diff --git a/libs/libvpx/AUTHORS b/libs/libvpx/AUTHORS index f89b6776a8..fcd5c534a8 100644 --- a/libs/libvpx/AUTHORS +++ b/libs/libvpx/AUTHORS @@ -24,6 +24,7 @@ changjun.yang Charles 'Buck' Krasic chm Christian Duvivier +Daniele Castagna Daniel Kang Deb Mukherjee Dim Temp @@ -56,10 +57,12 @@ James Zern Jan Gerber Jan Kratochvil Janne Salonen +Jean-Yves Avenard Jeff Faust Jeff Muizelaar Jeff Petkau Jia Jia +Jian Zhou Jim Bankoski Jingning Han Joey Parrish @@ -74,6 +77,7 @@ Justin Clift Justin Lebar KO Myung-Hun Lawrence Velázquez +Linfeng Zhang Lou Quillio Luca Barbato Makoto Kato @@ -107,9 +111,11 @@ Rob Bradford Ronald S. Bultje Rui Ueyama Sami Pietilä +Sasi Inguva Scott Graham Scott LaVarnway Sean McGovern +Sergey Kolomenkin Sergey Ulanov Shimon Doodkin Shunyao Li @@ -126,8 +132,10 @@ Timothy B. Terriberry Tom Finegan Vignesh Venkatasubramanian Yaowu Xu +Yi Luo Yongzhe Wang Yunqing Wang +Yury Gitman Zoe Liu Google Inc. The Mozilla Foundation diff --git a/libs/libvpx/CHANGELOG b/libs/libvpx/CHANGELOG index 7746cc6c4f..795d395f96 100644 --- a/libs/libvpx/CHANGELOG +++ b/libs/libvpx/CHANGELOG @@ -1,3 +1,33 @@ +2016-07-20 v1.6.0 "Khaki Campbell Duck" + This release improves upon the VP9 encoder and speeds up the encoding and + decoding processes. + + - Upgrading: + This release is ABI incompatible with 1.5.0 due to a new 'color_range' enum + in vpx_image and some minor changes to the VP8_COMP structure. + + The default key frame interval for VP9 has changed from 128 to 9999. + + - Enhancement: + A core focus has been performance for low end Intel processors. SSSE3 + instructions such as 'pshufb' have been avoided and instructions have been + reordered to better accommodate the more constrained pipelines. + + As a result, devices based on Celeron processors have seen substantial + decoding improvements. From Indian Runner Duck to Javan Whistling Duck, + decoding speed improved between 10 and 30%. Between Javan Whistling Duck + and Khaki Campbell Duck, it improved another 10 to 15%. + + While Celeron benefited most, Core-i5 also improved 5% and 10% between the + respective releases. + + Realtime performance for WebRTC for both speed and quality has received a + lot of attention. + + - Bug Fixes: + A number of fuzzing issues, found variously by Mozilla, Chromium and others, + have been fixed and we strongly recommend updating. + 2015-11-09 v1.5.0 "Javan Whistling Duck" This release improves upon the VP9 encoder and speeds up the encoding and decoding processes. diff --git a/libs/libvpx/README b/libs/libvpx/README index 979440eb70..b9266ca267 100644 --- a/libs/libvpx/README +++ b/libs/libvpx/README @@ -1,4 +1,4 @@ -README - 23 March 2015 +README - 20 July 2016 Welcome to the WebM VP8/VP9 Codec SDK! @@ -47,11 +47,8 @@ COMPILING THE APPLICATIONS/LIBRARIES: --help output of the configure script. As of this writing, the list of available targets is: - armv6-darwin-gcc - armv6-linux-rvct - armv6-linux-gcc - armv6-none-rvct arm64-darwin-gcc + arm64-linux-gcc armv7-android-gcc armv7-darwin-gcc armv7-linux-rvct @@ -61,6 +58,7 @@ COMPILING THE APPLICATIONS/LIBRARIES: armv7-win32-vs12 armv7-win32-vs14 armv7s-darwin-gcc + armv8-linux-gcc mips32-linux-gcc mips64-linux-gcc sparc-solaris-gcc @@ -74,15 +72,13 @@ COMPILING THE APPLICATIONS/LIBRARIES: x86-darwin12-gcc x86-darwin13-gcc x86-darwin14-gcc + x86-darwin15-gcc x86-iphonesimulator-gcc x86-linux-gcc x86-linux-icc x86-os2-gcc x86-solaris-gcc x86-win32-gcc - x86-win32-vs7 - x86-win32-vs8 - x86-win32-vs9 x86-win32-vs10 x86-win32-vs11 x86-win32-vs12 @@ -94,13 +90,12 @@ COMPILING THE APPLICATIONS/LIBRARIES: x86_64-darwin12-gcc x86_64-darwin13-gcc x86_64-darwin14-gcc + x86_64-darwin15-gcc x86_64-iphonesimulator-gcc x86_64-linux-gcc x86_64-linux-icc x86_64-solaris-gcc x86_64-win64-gcc - x86_64-win64-vs8 - x86_64-win64-vs9 x86_64-win64-vs10 x86_64-win64-vs11 x86_64-win64-vs12 @@ -133,7 +128,22 @@ VP8/VP9 TEST VECTORS: $ ./configure --enable-unit-tests $ LIBVPX_TEST_DATA_PATH=../libvpx-test-data make testdata +CODE STYLE: + The coding style used by this project is enforced with clang-format using the + configuration contained in the .clang-format file in the root of the + repository. + + Before pushing changes for review you can format your code with: + # Apply clang-format to modified .c, .h and .cc files + $ clang-format -i --style=file \ + $(git diff --name-only --diff-filter=ACMR '*.[hc]' '*.cc') + + Check the .clang-format file for the version used to generate it if there is + any difference between your local formatting and the review system. + + See also: http://clang.llvm.org/docs/ClangFormat.html + SUPPORT This library is an open source project supported by its community. Please - please email webm-discuss@webmproject.org for help. + email webm-discuss@webmproject.org for help. diff --git a/libs/libvpx/args.c b/libs/libvpx/args.c index 14b031040a..bd1ede0388 100644 --- a/libs/libvpx/args.c +++ b/libs/libvpx/args.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -22,42 +21,36 @@ extern void die(const char *fmt, ...) __attribute__((noreturn)); extern void die(const char *fmt, ...); #endif - struct arg arg_init(char **argv) { struct arg a; - a.argv = argv; + a.argv = argv; a.argv_step = 1; - a.name = NULL; - a.val = NULL; - a.def = NULL; + a.name = NULL; + a.val = NULL; + a.def = NULL; return a; } int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) { struct arg arg; - if (!argv[0] || argv[0][0] != '-') - return 0; + if (!argv[0] || argv[0][0] != '-') return 0; arg = arg_init(argv); - if (def->short_name - && strlen(arg.argv[0]) == strlen(def->short_name) + 1 - && !strcmp(arg.argv[0] + 1, def->short_name)) { - + if (def->short_name && strlen(arg.argv[0]) == strlen(def->short_name) + 1 && + !strcmp(arg.argv[0] + 1, def->short_name)) { arg.name = arg.argv[0] + 1; arg.val = def->has_val ? arg.argv[1] : NULL; arg.argv_step = def->has_val ? 2 : 1; } else if (def->long_name) { const size_t name_len = strlen(def->long_name); - if (strlen(arg.argv[0]) >= name_len + 2 - && arg.argv[0][1] == '-' - && !strncmp(arg.argv[0] + 2, def->long_name, name_len) - && (arg.argv[0][name_len + 2] == '=' - || arg.argv[0][name_len + 2] == '\0')) { - + if (strlen(arg.argv[0]) >= name_len + 2 && arg.argv[0][1] == '-' && + !strncmp(arg.argv[0] + 2, def->long_name, name_len) && + (arg.argv[0][name_len + 2] == '=' || + arg.argv[0][name_len + 2] == '\0')) { arg.name = arg.argv[0] + 2; arg.val = arg.name[name_len] == '=' ? arg.name + name_len + 1 : NULL; arg.argv_step = 1; @@ -70,8 +63,7 @@ int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) { if (arg.name && arg.val && !def->has_val) die("Error: option %s requires no argument.\n", arg.name); - if (arg.name - && (arg.val || !def->has_val)) { + if (arg.name && (arg.val || !def->has_val)) { arg.def = def; *arg_ = arg; return 1; @@ -80,15 +72,12 @@ int arg_match(struct arg *arg_, const struct arg_def *def, char **argv) { return 0; } - const char *arg_next(struct arg *arg) { - if (arg->argv[0]) - arg->argv += arg->argv_step; + if (arg->argv[0]) arg->argv += arg->argv_step; return *arg->argv; } - char **argv_dup(int argc, const char **argv) { char **new_argv = malloc((argc + 1) * sizeof(*argv)); @@ -97,9 +86,8 @@ char **argv_dup(int argc, const char **argv) { return new_argv; } - void arg_show_usage(FILE *fp, const struct arg_def *const *defs) { - char option_text[40] = {0}; + char option_text[40] = { 0 }; for (; *defs; defs++) { const struct arg_def *def = *defs; @@ -109,15 +97,12 @@ void arg_show_usage(FILE *fp, const struct arg_def *const *defs) { if (def->short_name && def->long_name) { char *comma = def->has_val ? "," : ", "; - snprintf(option_text, 37, "-%s%s%s --%s%6s", - def->short_name, short_val, comma, - def->long_name, long_val); + snprintf(option_text, 37, "-%s%s%s --%s%6s", def->short_name, short_val, + comma, def->long_name, long_val); } else if (def->short_name) - snprintf(option_text, 37, "-%s%s", - def->short_name, short_val); + snprintf(option_text, 37, "-%s%s", def->short_name, short_val); else if (def->long_name) - snprintf(option_text, 37, " --%s%s", - def->long_name, long_val); + snprintf(option_text, 37, " --%s%s", def->long_name, long_val); fprintf(fp, " %-37s\t%s\n", option_text, def->desc); @@ -127,110 +112,103 @@ void arg_show_usage(FILE *fp, const struct arg_def *const *defs) { fprintf(fp, " %-37s\t ", ""); for (listptr = def->enums; listptr->name; listptr++) - fprintf(fp, "%s%s", listptr->name, - listptr[1].name ? ", " : "\n"); + fprintf(fp, "%s%s", listptr->name, listptr[1].name ? ", " : "\n"); } } } - unsigned int arg_parse_uint(const struct arg *arg) { - long int rawval; - char *endptr; + long int rawval; + char *endptr; rawval = strtol(arg->val, &endptr, 10); if (arg->val[0] != '\0' && endptr[0] == '\0') { - if (rawval >= 0 && rawval <= UINT_MAX) - return rawval; + if (rawval >= 0 && rawval <= UINT_MAX) return (unsigned int)rawval; - die("Option %s: Value %ld out of range for unsigned int\n", - arg->name, rawval); + die("Option %s: Value %ld out of range for unsigned int\n", arg->name, + rawval); } die("Option %s: Invalid character '%c'\n", arg->name, *endptr); return 0; } - int arg_parse_int(const struct arg *arg) { - long int rawval; - char *endptr; + long int rawval; + char *endptr; rawval = strtol(arg->val, &endptr, 10); if (arg->val[0] != '\0' && endptr[0] == '\0') { - if (rawval >= INT_MIN && rawval <= INT_MAX) - return rawval; + if (rawval >= INT_MIN && rawval <= INT_MAX) return (int)rawval; - die("Option %s: Value %ld out of range for signed int\n", - arg->name, rawval); + die("Option %s: Value %ld out of range for signed int\n", arg->name, + rawval); } die("Option %s: Invalid character '%c'\n", arg->name, *endptr); return 0; } - struct vpx_rational { int num; /**< fraction numerator */ int den; /**< fraction denominator */ }; struct vpx_rational arg_parse_rational(const struct arg *arg) { - long int rawval; - char *endptr; - struct vpx_rational rat; + long int rawval; + char *endptr; + struct vpx_rational rat; /* parse numerator */ rawval = strtol(arg->val, &endptr, 10); if (arg->val[0] != '\0' && endptr[0] == '/') { if (rawval >= INT_MIN && rawval <= INT_MAX) - rat.num = rawval; - else die("Option %s: Value %ld out of range for signed int\n", - arg->name, rawval); - } else die("Option %s: Expected / at '%c'\n", arg->name, *endptr); + rat.num = (int)rawval; + else + die("Option %s: Value %ld out of range for signed int\n", arg->name, + rawval); + } else + die("Option %s: Expected / at '%c'\n", arg->name, *endptr); /* parse denominator */ rawval = strtol(endptr + 1, &endptr, 10); if (arg->val[0] != '\0' && endptr[0] == '\0') { if (rawval >= INT_MIN && rawval <= INT_MAX) - rat.den = rawval; - else die("Option %s: Value %ld out of range for signed int\n", - arg->name, rawval); - } else die("Option %s: Invalid character '%c'\n", arg->name, *endptr); + rat.den = (int)rawval; + else + die("Option %s: Value %ld out of range for signed int\n", arg->name, + rawval); + } else + die("Option %s: Invalid character '%c'\n", arg->name, *endptr); return rat; } - int arg_parse_enum(const struct arg *arg) { const struct arg_enum_list *listptr; - long int rawval; - char *endptr; + long int rawval; + char *endptr; /* First see if the value can be parsed as a raw value */ rawval = strtol(arg->val, &endptr, 10); if (arg->val[0] != '\0' && endptr[0] == '\0') { /* Got a raw value, make sure it's valid */ for (listptr = arg->def->enums; listptr->name; listptr++) - if (listptr->val == rawval) - return rawval; + if (listptr->val == rawval) return (int)rawval; } /* Next see if it can be parsed as a string */ for (listptr = arg->def->enums; listptr->name; listptr++) - if (!strcmp(arg->val, listptr->name)) - return listptr->val; + if (!strcmp(arg->val, listptr->name)) return listptr->val; die("Option %s: Invalid value '%s'\n", arg->name, arg->val); return 0; } - int arg_parse_enum_or_int(const struct arg *arg) { - if (arg->def->enums) - return arg_parse_enum(arg); + if (arg->def->enums) return arg_parse_enum(arg); return arg_parse_int(arg); } diff --git a/libs/libvpx/args.h b/libs/libvpx/args.h index 1f37151a02..54abe04607 100644 --- a/libs/libvpx/args.h +++ b/libs/libvpx/args.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef ARGS_H_ #define ARGS_H_ #include @@ -18,29 +17,33 @@ extern "C" { #endif struct arg { - char **argv; - const char *name; - const char *val; - unsigned int argv_step; - const struct arg_def *def; + char **argv; + const char *name; + const char *val; + unsigned int argv_step; + const struct arg_def *def; }; struct arg_enum_list { const char *name; - int val; + int val; }; -#define ARG_ENUM_LIST_END {0} +#define ARG_ENUM_LIST_END \ + { 0 } typedef struct arg_def { const char *short_name; const char *long_name; - int has_val; + int has_val; const char *desc; const struct arg_enum_list *enums; } arg_def_t; -#define ARG_DEF(s,l,v,d) {s,l,v,d, NULL} -#define ARG_DEF_ENUM(s,l,v,d,e) {s,l,v,d,e} -#define ARG_DEF_LIST_END {0} +#define ARG_DEF(s, l, v, d) \ + { s, l, v, d, NULL } +#define ARG_DEF_ENUM(s, l, v, d, e) \ + { s, l, v, d, e } +#define ARG_DEF_LIST_END \ + { 0 } struct arg arg_init(char **argv); int arg_match(struct arg *arg_, const struct arg_def *def, char **argv); diff --git a/libs/libvpx/build/make/Android.mk b/libs/libvpx/build/make/Android.mk index df01dece67..36120170e8 100644 --- a/libs/libvpx/build/make/Android.mk +++ b/libs/libvpx/build/make/Android.mk @@ -29,11 +29,6 @@ # include $(CLEAR_VARS) # include jni/libvpx/build/make/Android.mk # -# There are currently two TARGET_ARCH_ABI targets for ARM. -# armeabi and armeabi-v7a. armeabi-v7a is selected by creating an -# Application.mk in the jni directory that contains: -# APP_ABI := armeabi-v7a -# # By default libvpx will detect at runtime the existance of NEON extension. # For this we import the 'cpufeatures' module from the NDK sources. # libvpx can also be configured without this runtime detection method. @@ -42,13 +37,29 @@ # --disable-neon-asm # will remove any NEON dependency. -# To change to building armeabi, run ./libvpx/configure again, but with -# --target=armv6-android-gcc and modify the Application.mk file to -# set APP_ABI := armeabi # # Running ndk-build will build libvpx and include it in your project. # +# Alternatively, building the examples and unit tests can be accomplished in the +# following way: +# +# Create a standalone toolchain from the NDK: +# https://developer.android.com/ndk/guides/standalone_toolchain.html +# +# For example - to test on arm64 devices with clang: +# $NDK/build/tools/make_standalone_toolchain.py \ +# --arch arm64 --install-dir=/tmp/my-android-toolchain +# export PATH=/tmp/my-android-toolchain/bin:$PATH +# CROSS=aarch64-linux-android- CC=clang CXX=clang++ /path/to/libvpx/configure \ +# --target=arm64-android-gcc +# +# Push the resulting binaries to a device and run them: +# adb push test_libvpx /data/tmp/test_libvpx +# adb shell /data/tmp/test_libvpx --gtest_filter=\*Sixtap\* +# +# Make sure to push the test data as well and set LIBVPX_TEST_DATA + CONFIG_DIR := $(LOCAL_PATH)/ LIBVPX_PATH := $(LOCAL_PATH)/libvpx ASM_CNV_PATH_LOCAL := $(TARGET_ARCH_ABI)/ads2gas @@ -59,9 +70,6 @@ ASM_CNV_PATH := $(LOCAL_PATH)/$(ASM_CNV_PATH_LOCAL) ifeq ($(TARGET_ARCH_ABI),armeabi-v7a) include $(CONFIG_DIR)libs-armv7-android-gcc.mk LOCAL_ARM_MODE := arm -else ifeq ($(TARGET_ARCH_ABI),armeabi) - include $(CONFIG_DIR)libs-armv6-android-gcc.mk - LOCAL_ARM_MODE := arm else ifeq ($(TARGET_ARCH_ABI),arm64-v8a) include $(CONFIG_DIR)libs-armv8-android-gcc.mk LOCAL_ARM_MODE := arm @@ -174,9 +182,6 @@ endif ifeq ($(CONFIG_VP9), yes) $$(rtcd_dep_template_SRCS): vp9_rtcd.h endif -ifeq ($(CONFIG_VP10), yes) -$$(rtcd_dep_template_SRCS): vp10_rtcd.h -endif $$(rtcd_dep_template_SRCS): vpx_scale_rtcd.h $$(rtcd_dep_template_SRCS): vpx_dsp_rtcd.h diff --git a/libs/libvpx/build/make/Makefile b/libs/libvpx/build/make/Makefile index 3081a92680..469eb74c3a 100644 --- a/libs/libvpx/build/make/Makefile +++ b/libs/libvpx/build/make/Makefile @@ -26,7 +26,7 @@ test-no-data-check:: .DEFAULT testdata:: .DEFAULT utiltest: .DEFAULT exampletest-no-data-check utiltest-no-data-check: .DEFAULT - +test_%: .DEFAULT ; # Note: md5sum is not installed on OS X, but openssl is. Openssl may not be # installed on cygwin, so we need to autodetect here. @@ -119,29 +119,25 @@ utiltest: test-no-data-check:: exampletest-no-data-check utiltest-no-data-check: -# Add compiler flags for intrinsic files +# Force to realign stack always on OS/2 ifeq ($(TOOLCHAIN), x86-os2-gcc) -STACKREALIGN=-mstackrealign -else -STACKREALIGN= +CFLAGS += -mstackrealign endif $(BUILD_PFX)%_mmx.c.d: CFLAGS += -mmmx $(BUILD_PFX)%_mmx.c.o: CFLAGS += -mmmx -$(BUILD_PFX)%_sse2.c.d: CFLAGS += -msse2 $(STACKREALIGN) -$(BUILD_PFX)%_sse2.c.o: CFLAGS += -msse2 $(STACKREALIGN) -$(BUILD_PFX)%_sse3.c.d: CFLAGS += -msse3 $(STACKREALIGN) -$(BUILD_PFX)%_sse3.c.o: CFLAGS += -msse3 $(STACKREALIGN) -$(BUILD_PFX)%_ssse3.c.d: CFLAGS += -mssse3 $(STACKREALIGN) -$(BUILD_PFX)%_ssse3.c.o: CFLAGS += -mssse3 $(STACKREALIGN) -$(BUILD_PFX)%_sse4.c.d: CFLAGS += -msse4.1 $(STACKREALIGN) -$(BUILD_PFX)%_sse4.c.o: CFLAGS += -msse4.1 $(STACKREALIGN) -$(BUILD_PFX)%_avx.c.d: CFLAGS += -mavx $(STACKREALIGN) -$(BUILD_PFX)%_avx.c.o: CFLAGS += -mavx $(STACKREALIGN) -$(BUILD_PFX)%_avx2.c.d: CFLAGS += -mavx2 $(STACKREALIGN) -$(BUILD_PFX)%_avx2.c.o: CFLAGS += -mavx2 $(STACKREALIGN) -$(BUILD_PFX)%vp9_reconintra.c.d: CFLAGS += $(STACKREALIGN) -$(BUILD_PFX)%vp9_reconintra.c.o: CFLAGS += $(STACKREALIGN) +$(BUILD_PFX)%_sse2.c.d: CFLAGS += -msse2 +$(BUILD_PFX)%_sse2.c.o: CFLAGS += -msse2 +$(BUILD_PFX)%_sse3.c.d: CFLAGS += -msse3 +$(BUILD_PFX)%_sse3.c.o: CFLAGS += -msse3 +$(BUILD_PFX)%_ssse3.c.d: CFLAGS += -mssse3 +$(BUILD_PFX)%_ssse3.c.o: CFLAGS += -mssse3 +$(BUILD_PFX)%_sse4.c.d: CFLAGS += -msse4.1 +$(BUILD_PFX)%_sse4.c.o: CFLAGS += -msse4.1 +$(BUILD_PFX)%_avx.c.d: CFLAGS += -mavx +$(BUILD_PFX)%_avx.c.o: CFLAGS += -mavx +$(BUILD_PFX)%_avx2.c.d: CFLAGS += -mavx2 +$(BUILD_PFX)%_avx2.c.o: CFLAGS += -mavx2 $(BUILD_PFX)%.c.d: %.c $(if $(quiet),@echo " [DEP] $@") @@ -422,7 +418,6 @@ ifneq ($(call enabled,DIST-SRCS),) DIST-SRCS-yes += build/make/gen_asm_deps.sh DIST-SRCS-yes += build/make/Makefile DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_def.sh - DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_proj.sh DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_sln.sh DIST-SRCS-$(CONFIG_MSVS) += build/make/gen_msvs_vcxproj.sh DIST-SRCS-$(CONFIG_MSVS) += build/make/msvs_common.sh @@ -453,3 +448,5 @@ all: $(BUILD_TARGETS) install:: $(INSTALL_TARGETS) dist: $(INSTALL_TARGETS) test:: + +.SUFFIXES: # Delete default suffix rules diff --git a/libs/libvpx/build/make/ads2gas_apple.pl b/libs/libvpx/build/make/ads2gas_apple.pl index a82f3eba8e..1a9e105ba8 100755 --- a/libs/libvpx/build/make/ads2gas_apple.pl +++ b/libs/libvpx/build/make/ads2gas_apple.pl @@ -18,12 +18,6 @@ # Usage: cat inputfile | perl ads2gas_apple.pl > outputfile # -my $chromium = 0; - -foreach my $arg (@ARGV) { - $chromium = 1 if ($arg eq "-chromium"); -} - print "@ This file was created from a .asm file\n"; print "@ using the ads2gas_apple.pl script.\n\n"; print "\t.set WIDE_REFERENCE, 0\n"; @@ -218,18 +212,5 @@ while () s/\bMEND\b/.endm/; # No need to tell it where to stop assembling next if /^\s*END\s*$/; - # Clang used by Chromium differs slightly from clang in XCode in what it - # will accept in the assembly. - if ($chromium) { - s/qsubaddx/qsax/i; - s/qaddsubx/qasx/i; - s/ldrneb/ldrbne/i; - s/ldrneh/ldrhne/i; - s/(vqshrun\.s16 .*, \#)0$/${1}8/i; - - # http://llvm.org/bugs/show_bug.cgi?id=16022 - s/\.include/#include/; - } - print; } diff --git a/libs/libvpx/build/make/configure.sh b/libs/libvpx/build/make/configure.sh index c8a3291b74..35609e89af 100644 --- a/libs/libvpx/build/make/configure.sh +++ b/libs/libvpx/build/make/configure.sh @@ -185,6 +185,7 @@ add_extralibs() { # # Boolean Manipulation Functions # + enable_feature(){ set_all yes $* } @@ -201,6 +202,20 @@ disabled(){ eval test "x\$$1" = "xno" } +enable_codec(){ + enabled "${1}" || echo " enabling ${1}" + enable_feature "${1}" + + is_in "${1}" vp8 vp9 && enable_feature "${1}_encoder" "${1}_decoder" +} + +disable_codec(){ + disabled "${1}" || echo " disabling ${1}" + disable_feature "${1}" + + is_in "${1}" vp8 vp9 && disable_feature "${1}_encoder" "${1}_decoder" +} + # Iterates through positional parameters, checks to confirm the parameter has # not been explicitly (force) disabled, and enables the setting controlled by # the parameter when the setting is not disabled. @@ -317,20 +332,10 @@ EOF } check_cflags() { - log check_cflags "$@" - - case "$CC" in - *gcc*|*clang) - check_cc -Werror "$@" </dev/null; then + if is_in ${option} ${ARCH_EXT_LIST}; then [ $action = "disable" ] && RTCD_OPTIONS="${RTCD_OPTIONS}--disable-${option} " elif [ $action = "disable" ] && ! disabled $option ; then - echo "${CMDLINE_SELECT}" | grep "^ *$option\$" >/dev/null || - die_unknown $opt + is_in ${option} ${CMDLINE_SELECT} || die_unknown $opt log_echo " disabling $option" elif [ $action = "enable" ] && ! enabled $option ; then - echo "${CMDLINE_SELECT}" | grep "^ *$option\$" >/dev/null || - die_unknown $opt + is_in ${option} ${CMDLINE_SELECT} || die_unknown $opt log_echo " enabling $option" fi ${action}_feature $option ;; --require-?*) eval `echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g'` - if echo "${ARCH_EXT_LIST}" none | grep "^ *$option\$" >/dev/null; then + if is_in ${option} ${ARCH_EXT_LIST}; then RTCD_OPTIONS="${RTCD_OPTIONS}${opt} " else die_unknown $opt @@ -648,14 +651,34 @@ show_darwin_sdk_major_version() { xcrun --sdk $1 --show-sdk-version 2>/dev/null | cut -d. -f1 } +# Print the Xcode version. +show_xcode_version() { + xcodebuild -version | head -n1 | cut -d' ' -f2 +} + +# Fails when Xcode version is less than 6.3. +check_xcode_minimum_version() { + xcode_major=$(show_xcode_version | cut -f1 -d.) + xcode_minor=$(show_xcode_version | cut -f2 -d.) + xcode_min_major=6 + xcode_min_minor=3 + if [ ${xcode_major} -lt ${xcode_min_major} ]; then + return 1 + fi + if [ ${xcode_major} -eq ${xcode_min_major} ] \ + && [ ${xcode_minor} -lt ${xcode_min_minor} ]; then + return 1 + fi +} + process_common_toolchain() { if [ -z "$toolchain" ]; then gcctarget="${CHOST:-$(gcc -dumpmachine 2> /dev/null)}" # detect tgt_isa case "$gcctarget" in - armv6*) - tgt_isa=armv6 + aarch64*) + tgt_isa=arm64 ;; armv7*-hardfloat* | armv7*-gnueabihf | arm-*-gnueabihf) tgt_isa=armv7 @@ -758,7 +781,14 @@ process_common_toolchain() { enabled shared && soft_enable pic # Minimum iOS version for all target platforms (darwin and iphonesimulator). - IOS_VERSION_MIN="6.0" + # Shared library framework builds are only possible on iOS 8 and later. + if enabled shared; then + IOS_VERSION_OPTIONS="--enable-shared" + IOS_VERSION_MIN="8.0" + else + IOS_VERSION_OPTIONS="" + IOS_VERSION_MIN="6.0" + fi # Handle darwin variants. Newer SDKs allow targeting older # platforms, so use the newest one available. @@ -850,36 +880,6 @@ process_common_toolchain() { if disabled neon && enabled neon_asm; then die "Disabling neon while keeping neon-asm is not supported" fi - case ${toolchain} in - # Apple iOS SDKs no longer support armv6 as of the version 9 - # release (coincides with release of Xcode 7). Only enable media - # when using earlier SDK releases. - *-darwin*) - if [ "$(show_darwin_sdk_major_version iphoneos)" -lt 9 ]; then - soft_enable media - else - soft_disable media - RTCD_OPTIONS="${RTCD_OPTIONS}--disable-media " - fi - ;; - *) - soft_enable media - ;; - esac - ;; - armv6) - case ${toolchain} in - *-darwin*) - if [ "$(show_darwin_sdk_major_version iphoneos)" -lt 9 ]; then - soft_enable media - else - die "Your iOS SDK does not support armv6." - fi - ;; - *) - soft_enable media - ;; - esac ;; esac @@ -908,6 +908,9 @@ EOF check_add_cflags -mfpu=neon #-ftree-vectorize check_add_asflags -mfpu=neon fi + elif [ ${tgt_isa} = "arm64" ] || [ ${tgt_isa} = "armv8" ]; then + check_add_cflags -march=armv8-a + check_add_asflags -march=armv8-a else check_add_cflags -march=${tgt_isa} check_add_asflags -march=${tgt_isa} @@ -975,43 +978,50 @@ EOF ;; android*) - SDK_PATH=${sdk_path} - COMPILER_LOCATION=`find "${SDK_PATH}" \ - -name "arm-linux-androideabi-gcc*" -print -quit` - TOOLCHAIN_PATH=${COMPILER_LOCATION%/*}/arm-linux-androideabi- - CC=${TOOLCHAIN_PATH}gcc - CXX=${TOOLCHAIN_PATH}g++ - AR=${TOOLCHAIN_PATH}ar - LD=${TOOLCHAIN_PATH}gcc - AS=${TOOLCHAIN_PATH}as - STRIP=${TOOLCHAIN_PATH}strip - NM=${TOOLCHAIN_PATH}nm + if [ -n "${sdk_path}" ]; then + SDK_PATH=${sdk_path} + COMPILER_LOCATION=`find "${SDK_PATH}" \ + -name "arm-linux-androideabi-gcc*" -print -quit` + TOOLCHAIN_PATH=${COMPILER_LOCATION%/*}/arm-linux-androideabi- + CC=${TOOLCHAIN_PATH}gcc + CXX=${TOOLCHAIN_PATH}g++ + AR=${TOOLCHAIN_PATH}ar + LD=${TOOLCHAIN_PATH}gcc + AS=${TOOLCHAIN_PATH}as + STRIP=${TOOLCHAIN_PATH}strip + NM=${TOOLCHAIN_PATH}nm - if [ -z "${alt_libc}" ]; then - alt_libc=`find "${SDK_PATH}" -name arch-arm -print | \ - awk '{n = split($0,a,"/"); \ + if [ -z "${alt_libc}" ]; then + alt_libc=`find "${SDK_PATH}" -name arch-arm -print | \ + awk '{n = split($0,a,"/"); \ split(a[n-1],b,"-"); \ print $0 " " b[2]}' | \ sort -g -k 2 | \ awk '{ print $1 }' | tail -1` - fi + fi - if [ -d "${alt_libc}" ]; then - add_cflags "--sysroot=${alt_libc}" - add_ldflags "--sysroot=${alt_libc}" - fi + if [ -d "${alt_libc}" ]; then + add_cflags "--sysroot=${alt_libc}" + add_ldflags "--sysroot=${alt_libc}" + fi - # linker flag that routes around a CPU bug in some - # Cortex-A8 implementations (NDK Dev Guide) - add_ldflags "-Wl,--fix-cortex-a8" + # linker flag that routes around a CPU bug in some + # Cortex-A8 implementations (NDK Dev Guide) + add_ldflags "-Wl,--fix-cortex-a8" - enable_feature pic - soft_enable realtime_only - if [ ${tgt_isa} = "armv7" ]; then - soft_enable runtime_cpu_detect - fi - if enabled runtime_cpu_detect; then - add_cflags "-I${SDK_PATH}/sources/android/cpufeatures" + enable_feature pic + soft_enable realtime_only + if [ ${tgt_isa} = "armv7" ]; then + soft_enable runtime_cpu_detect + fi + if enabled runtime_cpu_detect; then + add_cflags "-I${SDK_PATH}/sources/android/cpufeatures" + fi + else + echo "Assuming standalone build with NDK toolchain." + echo "See build/make/Android.mk for details." + check_add_ldflags -static + soft_enable unit_tests fi ;; @@ -1025,18 +1035,7 @@ EOF NM="$(${XCRUN_FIND} nm)" RANLIB="$(${XCRUN_FIND} ranlib)" AS_SFX=.s - - # Special handling of ld for armv6 because libclang_rt.ios.a does - # not contain armv6 support in Apple's clang package: - # Apple LLVM version 5.1 (clang-503.0.40) (based on LLVM 3.4svn). - # TODO(tomfinegan): Remove this. Our minimum iOS version (6.0) - # renders support for armv6 unnecessary because the 3GS and up - # support neon. - if [ "${tgt_isa}" = "armv6" ]; then - LD="$(${XCRUN_FIND} ld)" - else - LD="${CXX:-$(${XCRUN_FIND} ld)}" - fi + LD="${CXX:-$(${XCRUN_FIND} ld)}" # ASFLAGS is written here instead of using check_add_asflags # because we need to overwrite all of ASFLAGS and purge the @@ -1062,6 +1061,19 @@ EOF [ -d "${try_dir}" ] && add_ldflags -L"${try_dir}" done + case ${tgt_isa} in + armv7|armv7s|armv8|arm64) + if enabled neon && ! check_xcode_minimum_version; then + soft_disable neon + log_echo " neon disabled: upgrade Xcode (need v6.3+)." + if enabled neon_asm; then + soft_disable neon_asm + log_echo " neon_asm disabled: upgrade Xcode (need v6.3+)." + fi + fi + ;; + esac + asm_conversion_cmd="${source_path}/build/make/ads2gas_apple.pl" if [ "$(show_darwin_sdk_major_version iphoneos)" -gt 8 ]; then @@ -1076,7 +1088,7 @@ EOF if enabled rvct; then # Check if we have CodeSourcery GCC in PATH. Needed for # libraries - hash arm-none-linux-gnueabi-gcc 2>&- || \ + which arm-none-linux-gnueabi-gcc 2>&- || \ die "Couldn't find CodeSourcery GCC from PATH" # Use armcc as a linker to enable translation of @@ -1111,13 +1123,13 @@ EOF if [ -n "${tune_cpu}" ]; then case ${tune_cpu} in p5600) - check_add_cflags -mips32r5 -funroll-loops -mload-store-pairs + check_add_cflags -mips32r5 -mload-store-pairs check_add_cflags -msched-weight -mhard-float -mfp64 check_add_asflags -mips32r5 -mhard-float -mfp64 check_add_ldflags -mfp64 ;; - i6400) - check_add_cflags -mips64r6 -mabi=64 -funroll-loops -msched-weight + i6400|p6600) + check_add_cflags -mips64r6 -mabi=64 -msched-weight check_add_cflags -mload-store-pairs -mhard-float -mfp64 check_add_asflags -mips64r6 -mabi=64 -mhard-float -mfp64 check_add_ldflags -mips64r6 -mabi=64 -mfp64 @@ -1197,6 +1209,12 @@ EOF soft_disable avx2 ;; esac + case $vc_version in + 7|8|9) + echo "${tgt_cc} omits stdint.h, disabling webm-io..." + soft_disable webm_io + ;; + esac ;; esac @@ -1348,10 +1366,6 @@ EOF fi fi - if [ "${tgt_isa}" = "x86_64" ] || [ "${tgt_isa}" = "x86" ]; then - soft_enable use_x86inc - fi - # Position Independent Code (PIC) support, for building relocatable # shared objects enabled gcc && enabled pic && check_add_cflags -fPIC @@ -1381,6 +1395,7 @@ EOF *-win*-vs*) ;; *-android-gcc) + # bionic includes basic pthread functionality, obviating -lpthread. ;; *) check_header pthread.h && add_extralibs -lpthread diff --git a/libs/libvpx/build/make/gen_msvs_proj.sh b/libs/libvpx/build/make/gen_msvs_proj.sh deleted file mode 100755 index 0cf335b3d2..0000000000 --- a/libs/libvpx/build/make/gen_msvs_proj.sh +++ /dev/null @@ -1,490 +0,0 @@ -#!/bin/bash -## -## Copyright (c) 2010 The WebM project authors. All Rights Reserved. -## -## Use of this source code is governed by a BSD-style license -## that can be found in the LICENSE file in the root of the source -## tree. An additional intellectual property rights grant can be found -## in the file PATENTS. All contributing project authors may -## be found in the AUTHORS file in the root of the source tree. -## - -self=$0 -self_basename=${self##*/} -self_dirname=$(dirname "$0") - -. "$self_dirname/msvs_common.sh"|| exit 127 - -show_help() { - cat <&2 - IFS=* - - open_tag Filter \ - Name=$name \ - Filter=$pats \ - UniqueIdentifier=`generate_uuid` \ - - file_list_sz=${#file_list[@]} - for i in ${!file_list[@]}; do - f=${file_list[i]} - for pat in ${pats//;/$IFS}; do - if [ "${f##*.}" == "$pat" ]; then - unset file_list[i] - - objf=$(echo ${f%.*}.obj \ - | sed -e "s,$src_path_bare,," \ - -e 's/^[\./]\+//g' -e 's,[:/ ],_,g') - open_tag File RelativePath="$f" - - if [ "$pat" == "asm" ] && $asm_use_custom_step; then - # Avoid object file name collisions, i.e. vpx_config.c and - # vpx_config.asm produce the same object file without - # this additional suffix. - objf=${objf%.obj}_asm.obj - for plat in "${platforms[@]}"; do - for cfg in Debug Release; do - open_tag FileConfiguration \ - Name="${cfg}|${plat}" \ - - tag Tool \ - Name="VCCustomBuildTool" \ - Description="Assembling \$(InputFileName)" \ - CommandLine="$(eval echo \$asm_${cfg}_cmdline) -o \$(IntDir)\\$objf" \ - Outputs="\$(IntDir)\\$objf" \ - - close_tag FileConfiguration - done - done - fi - if [ "$pat" == "c" ] || \ - [ "$pat" == "cc" ] || [ "$pat" == "cpp" ]; then - for plat in "${platforms[@]}"; do - for cfg in Debug Release; do - open_tag FileConfiguration \ - Name="${cfg}|${plat}" \ - - tag Tool \ - Name="VCCLCompilerTool" \ - ObjectFile="\$(IntDir)\\$objf" \ - - close_tag FileConfiguration - done - done - fi - close_tag File - - break - fi - done - done - - close_tag Filter - IFS="$saveIFS" -} - -# Process command line -unset target -for opt in "$@"; do - optval="${opt#*=}" - case "$opt" in - --help|-h) show_help - ;; - --target=*) target="${optval}" - ;; - --out=*) outfile="$optval" - ;; - --name=*) name="${optval}" - ;; - --proj-guid=*) guid="${optval}" - ;; - --module-def=*) link_opts="${link_opts} ModuleDefinitionFile=${optval}" - ;; - --exe) proj_kind="exe" - ;; - --dll) proj_kind="dll" - ;; - --lib) proj_kind="lib" - ;; - --src-path-bare=*) - src_path_bare=$(fix_path "$optval") - src_path_bare=${src_path_bare%/} - ;; - --static-crt) use_static_runtime=true - ;; - --ver=*) - vs_ver="$optval" - case "$optval" in - [789]) - ;; - *) die Unrecognized Visual Studio Version in $opt - ;; - esac - ;; - -I*) - opt=${opt##-I} - opt=$(fix_path "$opt") - opt="${opt%/}" - incs="${incs}${incs:+;}"${opt}"" - yasmincs="${yasmincs} -I"${opt}"" - ;; - -D*) defines="${defines}${defines:+;}${opt##-D}" - ;; - -L*) # fudge . to $(OutDir) - if [ "${opt##-L}" == "." ]; then - libdirs="${libdirs}${libdirs:+;}"\$(OutDir)"" - else - # Also try directories for this platform/configuration - opt=${opt##-L} - opt=$(fix_path "$opt") - libdirs="${libdirs}${libdirs:+;}"${opt}"" - libdirs="${libdirs}${libdirs:+;}"${opt}/\$(PlatformName)/\$(ConfigurationName)"" - libdirs="${libdirs}${libdirs:+;}"${opt}/\$(PlatformName)"" - fi - ;; - -l*) libs="${libs}${libs:+ }${opt##-l}.lib" - ;; - -*) die_unknown $opt - ;; - *) - # The paths in file_list are fixed outside of the loop. - file_list[${#file_list[@]}]="$opt" - case "$opt" in - *.asm) uses_asm=true - ;; - esac - ;; - esac -done - -# Make one call to fix_path for file_list to improve performance. -fix_file_list - -outfile=${outfile:-/dev/stdout} -guid=${guid:-`generate_uuid`} -asm_use_custom_step=false -uses_asm=${uses_asm:-false} -case "${vs_ver:-8}" in - 7) vs_ver_id="7.10" - asm_use_custom_step=$uses_asm - warn_64bit='Detect64BitPortabilityProblems=true' - ;; - 8) vs_ver_id="8.00" - asm_use_custom_step=$uses_asm - warn_64bit='Detect64BitPortabilityProblems=true' - ;; - 9) vs_ver_id="9.00" - asm_use_custom_step=$uses_asm - warn_64bit='Detect64BitPortabilityProblems=false' - ;; -esac - -[ -n "$name" ] || die "Project name (--name) must be specified!" -[ -n "$target" ] || die "Target (--target) must be specified!" - -if ${use_static_runtime:-false}; then - release_runtime=0 - debug_runtime=1 - lib_sfx=mt -else - release_runtime=2 - debug_runtime=3 - lib_sfx=md -fi - -# Calculate debug lib names: If a lib ends in ${lib_sfx}.lib, then rename -# it to ${lib_sfx}d.lib. This precludes linking to release libs from a -# debug exe, so this may need to be refactored later. -for lib in ${libs}; do - if [ "$lib" != "${lib%${lib_sfx}.lib}" ]; then - lib=${lib%.lib}d.lib - fi - debug_libs="${debug_libs}${debug_libs:+ }${lib}" -done - - -# List Keyword for this target -case "$target" in - x86*) keyword="ManagedCProj" - ;; - *) die "Unsupported target $target!" -esac - -# List of all platforms supported for this target -case "$target" in - x86_64*) - platforms[0]="x64" - asm_Debug_cmdline="yasm -Xvc -g cv8 -f win64 ${yasmincs} "\$(InputPath)"" - asm_Release_cmdline="yasm -Xvc -f win64 ${yasmincs} "\$(InputPath)"" - ;; - x86*) - platforms[0]="Win32" - asm_Debug_cmdline="yasm -Xvc -g cv8 -f win32 ${yasmincs} "\$(InputPath)"" - asm_Release_cmdline="yasm -Xvc -f win32 ${yasmincs} "\$(InputPath)"" - ;; - *) die "Unsupported target $target!" - ;; -esac - -generate_vcproj() { - case "$proj_kind" in - exe) vs_ConfigurationType=1 - ;; - dll) vs_ConfigurationType=2 - ;; - *) vs_ConfigurationType=4 - ;; - esac - - echo "" - open_tag VisualStudioProject \ - ProjectType="Visual C++" \ - Version="${vs_ver_id}" \ - Name="${name}" \ - ProjectGUID="{${guid}}" \ - RootNamespace="${name}" \ - Keyword="${keyword}" \ - - open_tag Platforms - for plat in "${platforms[@]}"; do - tag Platform Name="$plat" - done - close_tag Platforms - - open_tag Configurations - for plat in "${platforms[@]}"; do - plat_no_ws=`echo $plat | sed 's/[^A-Za-z0-9_]/_/g'` - open_tag Configuration \ - Name="Debug|$plat" \ - OutputDirectory="\$(SolutionDir)$plat_no_ws/\$(ConfigurationName)" \ - IntermediateDirectory="$plat_no_ws/\$(ConfigurationName)/${name}" \ - ConfigurationType="$vs_ConfigurationType" \ - CharacterSet="1" \ - - case "$target" in - x86*) - case "$name" in - vpx) - tag Tool \ - Name="VCCLCompilerTool" \ - Optimization="0" \ - AdditionalIncludeDirectories="$incs" \ - PreprocessorDefinitions="WIN32;_DEBUG;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;$defines" \ - RuntimeLibrary="$debug_runtime" \ - UsePrecompiledHeader="0" \ - WarningLevel="3" \ - DebugInformationFormat="2" \ - $warn_64bit \ - - $uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true" - ;; - *) - tag Tool \ - Name="VCCLCompilerTool" \ - Optimization="0" \ - AdditionalIncludeDirectories="$incs" \ - PreprocessorDefinitions="WIN32;_DEBUG;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;$defines" \ - RuntimeLibrary="$debug_runtime" \ - UsePrecompiledHeader="0" \ - WarningLevel="3" \ - DebugInformationFormat="2" \ - $warn_64bit \ - - $uses_asm && tag Tool Name="YASM" IncludePaths="$incs" Debug="true" - ;; - esac - ;; - esac - - case "$proj_kind" in - exe) - case "$target" in - x86*) - case "$name" in - *) - tag Tool \ - Name="VCLinkerTool" \ - AdditionalDependencies="$debug_libs \$(NoInherit)" \ - AdditionalLibraryDirectories="$libdirs" \ - GenerateDebugInformation="true" \ - ProgramDatabaseFile="\$(OutDir)/${name}.pdb" \ - ;; - esac - ;; - esac - ;; - lib) - case "$target" in - x86*) - tag Tool \ - Name="VCLibrarianTool" \ - OutputFile="\$(OutDir)/${name}${lib_sfx}d.lib" \ - - ;; - esac - ;; - dll) - tag Tool \ - Name="VCLinkerTool" \ - AdditionalDependencies="\$(NoInherit)" \ - LinkIncremental="2" \ - GenerateDebugInformation="true" \ - AssemblyDebug="1" \ - TargetMachine="1" \ - $link_opts \ - - ;; - esac - - close_tag Configuration - - open_tag Configuration \ - Name="Release|$plat" \ - OutputDirectory="\$(SolutionDir)$plat_no_ws/\$(ConfigurationName)" \ - IntermediateDirectory="$plat_no_ws/\$(ConfigurationName)/${name}" \ - ConfigurationType="$vs_ConfigurationType" \ - CharacterSet="1" \ - WholeProgramOptimization="0" \ - - case "$target" in - x86*) - case "$name" in - vpx) - tag Tool \ - Name="VCCLCompilerTool" \ - Optimization="2" \ - FavorSizeorSpeed="1" \ - AdditionalIncludeDirectories="$incs" \ - PreprocessorDefinitions="WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;$defines" \ - RuntimeLibrary="$release_runtime" \ - UsePrecompiledHeader="0" \ - WarningLevel="3" \ - DebugInformationFormat="0" \ - $warn_64bit \ - - $uses_asm && tag Tool Name="YASM" IncludePaths="$incs" - ;; - *) - tag Tool \ - Name="VCCLCompilerTool" \ - AdditionalIncludeDirectories="$incs" \ - Optimization="2" \ - FavorSizeorSpeed="1" \ - PreprocessorDefinitions="WIN32;NDEBUG;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;$defines" \ - RuntimeLibrary="$release_runtime" \ - UsePrecompiledHeader="0" \ - WarningLevel="3" \ - DebugInformationFormat="0" \ - $warn_64bit \ - - $uses_asm && tag Tool Name="YASM" IncludePaths="$incs" - ;; - esac - ;; - esac - - case "$proj_kind" in - exe) - case "$target" in - x86*) - case "$name" in - *) - tag Tool \ - Name="VCLinkerTool" \ - AdditionalDependencies="$libs \$(NoInherit)" \ - AdditionalLibraryDirectories="$libdirs" \ - - ;; - esac - ;; - esac - ;; - lib) - case "$target" in - x86*) - tag Tool \ - Name="VCLibrarianTool" \ - OutputFile="\$(OutDir)/${name}${lib_sfx}.lib" \ - - ;; - esac - ;; - dll) # note differences to debug version: LinkIncremental, AssemblyDebug - tag Tool \ - Name="VCLinkerTool" \ - AdditionalDependencies="\$(NoInherit)" \ - LinkIncremental="1" \ - GenerateDebugInformation="true" \ - TargetMachine="1" \ - $link_opts \ - - ;; - esac - - close_tag Configuration - done - close_tag Configurations - - open_tag Files - generate_filter srcs "Source Files" "c;cc;cpp;def;odl;idl;hpj;bat;asm;asmx" - generate_filter hdrs "Header Files" "h;hm;inl;inc;xsd" - generate_filter resrcs "Resource Files" "rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav" - generate_filter resrcs "Build Files" "mk" - close_tag Files - - tag Globals - close_tag VisualStudioProject - - # This must be done from within the {} subshell - echo "Ignored files list (${#file_list[@]} items) is:" >&2 - for f in "${file_list[@]}"; do - echo " $f" >&2 - done -} - -generate_vcproj | - sed -e '/"/s;\([^ "]\)/;\1\\;g' > ${outfile} - -exit - diff --git a/libs/libvpx/build/make/gen_msvs_sln.sh b/libs/libvpx/build/make/gen_msvs_sln.sh index 664b404c91..7d5f468109 100755 --- a/libs/libvpx/build/make/gen_msvs_sln.sh +++ b/libs/libvpx/build/make/gen_msvs_sln.sh @@ -55,16 +55,11 @@ indent_pop() { parse_project() { local file=$1 - if [ "$sfx" = "vcproj" ]; then - local name=`grep Name "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'` - local guid=`grep ProjectGUID "$file" | awk 'BEGIN {FS="\""}{if (NR==1) print $2}'` - else - local name=`grep RootNamespace "$file" | sed 's,.*<.*>\(.*\).*,\1,'` - local guid=`grep ProjectGuid "$file" | sed 's,.*<.*>\(.*\).*,\1,'` - fi + local name=`grep RootNamespace "$file" | sed 's,.*<.*>\(.*\).*,\1,'` + local guid=`grep ProjectGuid "$file" | sed 's,.*<.*>\(.*\).*,\1,'` # save the project GUID to a varaible, normalizing to the basename of the - # vcproj file without the extension + # vcxproj file without the extension local var var=${file##*/} var=${var%%.${sfx}} @@ -72,13 +67,8 @@ parse_project() { eval "${var}_name=$name" eval "${var}_guid=$guid" - if [ "$sfx" = "vcproj" ]; then - cur_config_list=`grep -A1 '/dev/null 2>&1 && echo yes) .nodevenv.once: ${TAB}@echo " * \$(MSBUILD_TOOL) not found in path." @@ -204,7 +171,7 @@ ${TAB}@echo " * You will have to build all configurations manually using the" ${TAB}@echo " * Visual Studio IDE. To allow make to build them automatically," ${TAB}@echo " * add the Common7/IDE directory of your Visual Studio" ${TAB}@echo " * installation to your path, eg:" -${TAB}@echo " * C:\Program Files\Microsoft Visual Studio 8\Common7\IDE" +${TAB}@echo " * C:\Program Files\Microsoft Visual Studio 10.0\Common7\IDE" ${TAB}@echo " * " ${TAB}@touch \$@ CLEAN-OBJS += \$(if \$(found_devenv),,.nodevenv.once) @@ -221,16 +188,9 @@ clean:: ${TAB}rm -rf "$platform"/"$config" .PHONY: $nows_sln_config ifneq (\$(found_devenv),) - ifeq (\$(CONFIG_VS_VERSION),7) -$nows_sln_config: $outfile -${TAB}\$(MSBUILD_TOOL) $outfile -build "$config" - - else $nows_sln_config: $outfile ${TAB}\$(MSBUILD_TOOL) $outfile -m -t:Build \\ ${TAB}${TAB}-p:Configuration="$config" -p:Platform="$platform" - - endif else $nows_sln_config: $outfile .nodevenv.once ${TAB}@echo " * Skipping build of $sln_config (\$(MSBUILD_TOOL) not in path)." @@ -255,23 +215,12 @@ for opt in "$@"; do ;; --ver=*) vs_ver="$optval" case $optval in - [789]|10|11|12|14) + 10|11|12|14) ;; *) die Unrecognized Visual Studio Version in $opt ;; esac ;; - --ver=*) vs_ver="$optval" - case $optval in - 7) sln_vers="8.00" - sln_vers_str="Visual Studio .NET 2003" - ;; - [89]) - ;; - *) die "Unrecognized Visual Studio Version '$optval' in $opt" - ;; - esac - ;; --target=*) target="${optval}" ;; -*) die_unknown $opt @@ -281,16 +230,7 @@ for opt in "$@"; do done outfile=${outfile:-/dev/stdout} mkoutfile=${mkoutfile:-/dev/stdout} -case "${vs_ver:-8}" in - 7) sln_vers="8.00" - sln_vers_str="Visual Studio .NET 2003" - ;; - 8) sln_vers="9.00" - sln_vers_str="Visual Studio 2005" - ;; - 9) sln_vers="10.00" - sln_vers_str="Visual Studio 2008" - ;; +case "${vs_ver:-10}" in 10) sln_vers="11.00" sln_vers_str="Visual Studio 2010" ;; @@ -304,14 +244,7 @@ case "${vs_ver:-8}" in sln_vers_str="Visual Studio 2015" ;; esac -case "${vs_ver:-8}" in - [789]) - sfx=vcproj - ;; - 10|11|12|14) - sfx=vcxproj - ;; -esac +sfx=vcxproj for f in "${file_list[@]}"; do parse_project $f diff --git a/libs/libvpx/build/make/gen_msvs_vcxproj.sh b/libs/libvpx/build/make/gen_msvs_vcxproj.sh index 182ea28fa7..e98611d102 100755 --- a/libs/libvpx/build/make/gen_msvs_vcxproj.sh +++ b/libs/libvpx/build/make/gen_msvs_vcxproj.sh @@ -211,7 +211,7 @@ for opt in "$@"; do done # Make one call to fix_path for file_list to improve performance. -fix_file_list +fix_file_list file_list outfile=${outfile:-/dev/stdout} guid=${guid:-`generate_uuid`} diff --git a/libs/libvpx/build/make/ios-Info.plist b/libs/libvpx/build/make/ios-Info.plist new file mode 100644 index 0000000000..d157b11a0d --- /dev/null +++ b/libs/libvpx/build/make/ios-Info.plist @@ -0,0 +1,37 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + VPX + CFBundleIdentifier + org.webmproject.VPX + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + VPX + CFBundlePackageType + FMWK + CFBundleShortVersionString + ${VERSION} + CFBundleSignature + ???? + CFBundleSupportedPlatforms + + iPhoneOS + + CFBundleVersion + ${VERSION} + MinimumOSVersion + ${IOS_VERSION_MIN} + UIDeviceFamily + + 1 + 2 + + VPXFullVersion + ${FULLVERSION} + + diff --git a/libs/libvpx/build/make/iosbuild.sh b/libs/libvpx/build/make/iosbuild.sh index ae5ba182d5..c703f22b0c 100755 --- a/libs/libvpx/build/make/iosbuild.sh +++ b/libs/libvpx/build/make/iosbuild.sh @@ -24,6 +24,7 @@ CONFIGURE_ARGS="--disable-docs --disable-unit-tests" DIST_DIR="_dist" FRAMEWORK_DIR="VPX.framework" +FRAMEWORK_LIB="VPX.framework/VPX" HEADER_DIR="${FRAMEWORK_DIR}/Headers/vpx" SCRIPT_DIR=$(dirname "$0") LIBVPX_SOURCE_DIR=$(cd ${SCRIPT_DIR}/../..; pwd) @@ -137,6 +138,44 @@ create_vpx_framework_config_shim() { printf "#endif // ${include_guard}" >> "${config_file}" } +# Verifies that $FRAMEWORK_LIB fat library contains requested builds. +verify_framework_targets() { + local requested_cpus="" + local cpu="" + + # Extract CPU from full target name. + for target; do + cpu="${target%%-*}" + if [ "${cpu}" = "x86" ]; then + # lipo -info outputs i386 for libvpx x86 targets. + cpu="i386" + fi + requested_cpus="${requested_cpus}${cpu} " + done + + # Get target CPUs present in framework library. + local targets_built=$(${LIPO} -info ${FRAMEWORK_LIB}) + + # $LIPO -info outputs a string like the following: + # Architectures in the fat file: $FRAMEWORK_LIB + # Capture only the architecture strings. + targets_built=${targets_built##*: } + + # Sort CPU strings to make the next step a simple string compare. + local actual=$(echo ${targets_built} | tr " " "\n" | sort | tr "\n" " ") + local requested=$(echo ${requested_cpus} | tr " " "\n" | sort | tr "\n" " ") + + vlog "Requested ${FRAMEWORK_LIB} CPUs: ${requested}" + vlog "Actual ${FRAMEWORK_LIB} CPUs: ${actual}" + + if [ "${requested}" != "${actual}" ]; then + elog "Actual ${FRAMEWORK_LIB} targets do not match requested target list." + elog " Requested target CPUs: ${requested}" + elog " Actual target CPUs: ${actual}" + return 1 + fi +} + # Configures and builds each target specified by $1, and then builds # VPX.framework. build_framework() { @@ -157,7 +196,12 @@ build_framework() { for target in ${targets}; do build_target "${target}" target_dist_dir="${BUILD_ROOT}/${target}/${DIST_DIR}" - lib_list="${lib_list} ${target_dist_dir}/lib/libvpx.a" + if [ "${ENABLE_SHARED}" = "yes" ]; then + local suffix="dylib" + else + local suffix="a" + fi + lib_list="${lib_list} ${target_dist_dir}/lib/libvpx.${suffix}" done cd "${ORIG_PWD}" @@ -176,13 +220,25 @@ build_framework() { # Copy in vpx_version.h. cp -p "${BUILD_ROOT}/${target}/vpx_version.h" "${HEADER_DIR}" - vlog "Created fat library ${FRAMEWORK_DIR}/VPX containing:" + if [ "${ENABLE_SHARED}" = "yes" ]; then + # Adjust the dylib's name so dynamic linking in apps works as expected. + install_name_tool -id '@rpath/VPX.framework/VPX' ${FRAMEWORK_DIR}/VPX + + # Copy in Info.plist. + cat "${SCRIPT_DIR}/ios-Info.plist" \ + | sed "s/\${FULLVERSION}/${FULLVERSION}/g" \ + | sed "s/\${VERSION}/${VERSION}/g" \ + | sed "s/\${IOS_VERSION_MIN}/${IOS_VERSION_MIN}/g" \ + > "${FRAMEWORK_DIR}/Info.plist" + fi + + # Confirm VPX.framework/VPX contains the targets requested. + verify_framework_targets ${targets} + + vlog "Created fat library ${FRAMEWORK_LIB} containing:" for lib in ${lib_list}; do vlog " $(echo ${lib} | awk -F / '{print $2, $NF}')" done - - # TODO(tomfinegan): Verify that expected targets are included within - # VPX.framework/VPX via lipo -info. } # Trap function. Cleans up the subtree used to build all targets contained in @@ -213,6 +269,7 @@ iosbuild_usage() { cat << EOF Usage: ${0##*/} [arguments] --help: Display this message and exit. + --enable-shared: Build a dynamic framework for use on iOS 8 or later. --extra-configure-args : Extra args to pass when configuring libvpx. --macosx: Uses darwin15 targets instead of iphonesimulator targets for x86 and x86_64. Allows linking to framework when builds target MacOSX @@ -251,6 +308,9 @@ while [ -n "$1" ]; do iosbuild_usage exit ;; + --enable-shared) + ENABLE_SHARED=yes + ;; --preserve-build-output) PRESERVE_BUILD_OUTPUT=yes ;; @@ -278,6 +338,21 @@ while [ -n "$1" ]; do shift done +if [ "${ENABLE_SHARED}" = "yes" ]; then + CONFIGURE_ARGS="--enable-shared ${CONFIGURE_ARGS}" +fi + +FULLVERSION=$("${SCRIPT_DIR}"/version.sh --bare "${LIBVPX_SOURCE_DIR}") +VERSION=$(echo "${FULLVERSION}" | sed -E 's/^v([0-9]+\.[0-9]+\.[0-9]+).*$/\1/') + +if [ "$ENABLE_SHARED" = "yes" ]; then + IOS_VERSION_OPTIONS="--enable-shared" + IOS_VERSION_MIN="8.0" +else + IOS_VERSION_OPTIONS="" + IOS_VERSION_MIN="6.0" +fi + if [ "${VERBOSE}" = "yes" ]; then cat << EOF BUILD_ROOT=${BUILD_ROOT} @@ -285,6 +360,7 @@ cat << EOF CONFIGURE_ARGS=${CONFIGURE_ARGS} EXTRA_CONFIGURE_ARGS=${EXTRA_CONFIGURE_ARGS} FRAMEWORK_DIR=${FRAMEWORK_DIR} + FRAMEWORK_LIB=${FRAMEWORK_LIB} HEADER_DIR=${HEADER_DIR} LIBVPX_SOURCE_DIR=${LIBVPX_SOURCE_DIR} LIPO=${LIPO} @@ -292,8 +368,13 @@ cat << EOF ORIG_PWD=${ORIG_PWD} PRESERVE_BUILD_OUTPUT=${PRESERVE_BUILD_OUTPUT} TARGETS="$(print_list "" ${TARGETS})" + ENABLE_SHARED=${ENABLE_SHARED} OSX_TARGETS="${OSX_TARGETS}" SIM_TARGETS="${SIM_TARGETS}" + SCRIPT_DIR="${SCRIPT_DIR}" + FULLVERSION="${FULLVERSION}" + VERSION="${VERSION}" + IOS_VERSION_MIN="${IOS_VERSION_MIN}" EOF fi diff --git a/libs/libvpx/build/make/msvs_common.sh b/libs/libvpx/build/make/msvs_common.sh index 90c14888c2..88f1cf9b57 100644 --- a/libs/libvpx/build/make/msvs_common.sh +++ b/libs/libvpx/build/make/msvs_common.sh @@ -39,11 +39,12 @@ fix_path() { } # Corrects the paths in file_list in one pass for efficiency. +# $1 is the name of the array to be modified. fix_file_list() { - # TODO(jzern): this could be more generic and take the array as a param. - files=$(fix_path "${file_list[@]}") + declare -n array_ref=$1 + files=$(fix_path "${array_ref[@]}") local IFS=$'\n' - file_list=($files) + array_ref=($files) } generate_uuid() { diff --git a/libs/libvpx/build/make/rtcd.pl b/libs/libvpx/build/make/rtcd.pl index 991b6abe7d..9e746c46d0 100755 --- a/libs/libvpx/build/make/rtcd.pl +++ b/libs/libvpx/build/make/rtcd.pl @@ -384,13 +384,8 @@ if ($opts{arch} eq 'x86') { } close CONFIG_FILE; mips; -} elsif ($opts{arch} eq 'armv6') { - @ALL_ARCHS = filter(qw/media/); - arm; } elsif ($opts{arch} =~ /armv7\w?/) { - @ALL_ARCHS = filter(qw/media neon_asm neon/); - @REQUIRES = filter(keys %required ? keys %required : qw/media/); - &require(@REQUIRES); + @ALL_ARCHS = filter(qw/neon_asm neon/); arm; } elsif ($opts{arch} eq 'armv8' || $opts{arch} eq 'arm64' ) { @ALL_ARCHS = filter(qw/neon/); diff --git a/libs/libvpx/build/make/version.sh b/libs/libvpx/build/make/version.sh index b340142c93..6967527771 100755 --- a/libs/libvpx/build/make/version.sh +++ b/libs/libvpx/build/make/version.sh @@ -24,8 +24,9 @@ out_file=${2} id=${3:-VERSION_STRING} git_version_id="" -if [ -d "${source_path}/.git" ]; then +if [ -e "${source_path}/.git" ]; then # Source Path is a git working copy. Check for local modifications. + # Note that git submodules may have a file as .git, not a directory. export GIT_DIR="${source_path}/.git" git_version_id=`git describe --match=v[0-9]* 2>/dev/null` fi diff --git a/libs/libvpx/codereview.settings b/libs/libvpx/codereview.settings index d7c8d395cb..34c6f1d9de 100644 --- a/libs/libvpx/codereview.settings +++ b/libs/libvpx/codereview.settings @@ -2,3 +2,4 @@ GERRIT_HOST: chromium-review.googlesource.com GERRIT_PORT: 29418 CODE_REVIEW_SERVER: chromium-review.googlesource.com +GERRIT_SQUASH_UPLOADS: False diff --git a/libs/libvpx/configure b/libs/libvpx/configure index 095cddf2db..95af2e395e 100755 --- a/libs/libvpx/configure +++ b/libs/libvpx/configure @@ -40,7 +40,6 @@ Advanced options: hardware decoder compatibility ${toggle_vp8} VP8 codec support ${toggle_vp9} VP9 codec support - ${toggle_vp10} VP10 codec support ${toggle_internal_stats} output of encoder internal stats for debug, if supported (encoders) ${toggle_postproc} postprocessing ${toggle_vp9_postproc} vp9 specific postprocessing @@ -98,11 +97,9 @@ EOF # all_platforms is a list of all supported target platforms. Maintain # alphabetically by architecture, generic-gnu last. -all_platforms="${all_platforms} armv6-darwin-gcc" -all_platforms="${all_platforms} armv6-linux-rvct" -all_platforms="${all_platforms} armv6-linux-gcc" -all_platforms="${all_platforms} armv6-none-rvct" +all_platforms="${all_platforms} arm64-android-gcc" all_platforms="${all_platforms} arm64-darwin-gcc" +all_platforms="${all_platforms} arm64-linux-gcc" all_platforms="${all_platforms} armv7-android-gcc" #neon Cortex-A8 all_platforms="${all_platforms} armv7-darwin-gcc" #neon Cortex-A8 all_platforms="${all_platforms} armv7-linux-rvct" #neon Cortex-A8 @@ -112,6 +109,7 @@ all_platforms="${all_platforms} armv7-win32-vs11" all_platforms="${all_platforms} armv7-win32-vs12" all_platforms="${all_platforms} armv7-win32-vs14" all_platforms="${all_platforms} armv7s-darwin-gcc" +all_platforms="${all_platforms} armv8-linux-gcc" all_platforms="${all_platforms} mips32-linux-gcc" all_platforms="${all_platforms} mips64-linux-gcc" all_platforms="${all_platforms} sparc-solaris-gcc" @@ -132,9 +130,6 @@ all_platforms="${all_platforms} x86-linux-icc" all_platforms="${all_platforms} x86-os2-gcc" all_platforms="${all_platforms} x86-solaris-gcc" all_platforms="${all_platforms} x86-win32-gcc" -all_platforms="${all_platforms} x86-win32-vs7" -all_platforms="${all_platforms} x86-win32-vs8" -all_platforms="${all_platforms} x86-win32-vs9" all_platforms="${all_platforms} x86-win32-vs10" all_platforms="${all_platforms} x86-win32-vs11" all_platforms="${all_platforms} x86-win32-vs12" @@ -152,8 +147,6 @@ all_platforms="${all_platforms} x86_64-linux-gcc" all_platforms="${all_platforms} x86_64-linux-icc" all_platforms="${all_platforms} x86_64-solaris-gcc" all_platforms="${all_platforms} x86_64-win64-gcc" -all_platforms="${all_platforms} x86_64-win64-vs8" -all_platforms="${all_platforms} x86_64-win64-vs9" all_platforms="${all_platforms} x86_64-win64-vs10" all_platforms="${all_platforms} x86_64-win64-vs11" all_platforms="${all_platforms} x86_64-win64-vs12" @@ -195,12 +188,8 @@ if [ ${doxy_major:-0} -ge 1 ]; then fi # disable codecs when their source directory does not exist -[ -d "${source_path}/vp8" ] || disable_feature vp8 -[ -d "${source_path}/vp9" ] || disable_feature vp9 -[ -d "${source_path}/vp10" ] || disable_feature vp10 - -# disable vp10 codec by default -disable_feature vp10 +[ -d "${source_path}/vp8" ] || disable_codec vp8 +[ -d "${source_path}/vp9" ] || disable_codec vp9 # install everything except the sources, by default. sources will have # to be enabled when doing dist builds, since that's no longer a common @@ -222,13 +211,10 @@ CODECS=" vp8_decoder vp9_encoder vp9_decoder - vp10_encoder - vp10_decoder " CODEC_FAMILIES=" vp8 vp9 - vp10 " ARCH_LIST=" @@ -248,8 +234,6 @@ ARCH_EXT_LIST_X86=" avx2 " ARCH_EXT_LIST=" - edsp - media neon neon_asm @@ -279,7 +263,6 @@ CONFIG_LIST=" install_bins install_libs install_srcs - use_x86inc debug gprof gcov @@ -341,7 +324,6 @@ CMDLINE_SELECT=" gprof gcov pic - use_x86inc optimizations ccache runtime_cpu_detect @@ -391,15 +373,19 @@ process_cmdline() { for opt do optval="${opt#*=}" case "$opt" in - --disable-codecs) for c in ${CODECS}; do disable_feature $c; done ;; + --disable-codecs) + for c in ${CODEC_FAMILIES}; do disable_codec $c; done + ;; --enable-?*|--disable-?*) eval `echo "$opt" | sed 's/--/action=/;s/-/ option=/;s/-/_/g'` - if echo "${EXPERIMENT_LIST}" | grep "^ *$option\$" >/dev/null; then + if is_in ${option} ${EXPERIMENT_LIST}; then if enabled experimental; then ${action}_feature $option else log_echo "Ignoring $opt -- not in experimental mode." fi + elif is_in ${option} "${CODECS} ${CODEC_FAMILIES}"; then + ${action}_codec ${option} else process_common_cmdline $opt fi @@ -413,14 +399,6 @@ process_cmdline() { post_process_cmdline() { c="" - # If the codec family is disabled, disable all components of that family. - # If the codec family is enabled, enable all components of that family. - log_echo "Configuring selected codecs" - for c in ${CODECS}; do - disabled ${c%%_*} && disable_feature ${c} - enabled ${c%%_*} && enable_feature ${c} - done - # Enable all detected codecs, if they haven't been disabled for c in ${CODECS}; do soft_enable $c; done @@ -515,13 +493,18 @@ process_detect() { # Can only build shared libs on a subset of platforms. Doing this check # here rather than at option parse time because the target auto-detect # magic happens after the command line has been parsed. - if ! enabled linux && ! enabled os2; then + case "${tgt_os}" in + linux|os2|darwin*|iphonesimulator*) + # Supported platforms + ;; + *) if enabled gnu; then echo "--enable-shared is only supported on ELF; assuming this is OK" else - die "--enable-shared only supported on ELF and OS/2 for now" + die "--enable-shared only supported on ELF, OS/2, and Darwin for now" fi - fi + ;; + esac fi if [ -z "$CC" ] || enabled external_build; then echo "Bypassing toolchain for environment detection." @@ -584,26 +567,19 @@ process_toolchain() { check_add_cflags -Wall check_add_cflags -Wdeclaration-after-statement check_add_cflags -Wdisabled-optimization + check_add_cflags -Wfloat-conversion check_add_cflags -Wpointer-arith check_add_cflags -Wtype-limits check_add_cflags -Wcast-qual check_add_cflags -Wvla check_add_cflags -Wimplicit-function-declaration check_add_cflags -Wuninitialized - check_add_cflags -Wunused-variable - case ${CC} in - *clang*) - # libvpx and/or clang have issues with aliasing: - # https://code.google.com/p/webm/issues/detail?id=603 - # work around them until they are fixed - check_add_cflags -fno-strict-aliasing - ;; - *) check_add_cflags -Wunused-but-set-variable ;; - esac + check_add_cflags -Wunused + # check_add_cflags also adds to cxxflags. gtest does not do well with + # -Wundef so add it explicitly to CFLAGS only. + check_cflags -Wundef && add_cflags_only -Wundef if enabled mips || [ -z "${INLINE}" ]; then enabled extra_warnings || check_add_cflags -Wno-unused-function - else - check_add_cflags -Wunused-function fi fi @@ -652,17 +628,9 @@ process_toolchain() { vs*) enable_feature msvs enable_feature solution vs_version=${tgt_cc##vs} - case $vs_version in - [789]) - VCPROJ_SFX=vcproj - gen_vcproj_cmd=${source_path}/build/make/gen_msvs_proj.sh - ;; - 10|11|12|14) - VCPROJ_SFX=vcxproj - gen_vcproj_cmd=${source_path}/build/make/gen_msvs_vcxproj.sh - enabled werror && gen_vcproj_cmd="${gen_vcproj_cmd} --enable-werror" - ;; - esac + VCPROJ_SFX=vcxproj + gen_vcproj_cmd=${source_path}/build/make/gen_msvs_vcxproj.sh + enabled werror && gen_vcproj_cmd="${gen_vcproj_cmd} --enable-werror" all_targets="${all_targets} solution" INLINE="__forceinline" ;; diff --git a/libs/libvpx/examples.mk b/libs/libvpx/examples.mk index f10bec68c3..cc7fb1ddcf 100644 --- a/libs/libvpx/examples.mk +++ b/libs/libvpx/examples.mk @@ -36,21 +36,30 @@ LIBYUV_SRCS += third_party/libyuv/include/libyuv/basic_types.h \ third_party/libyuv/source/scale_neon64.cc \ third_party/libyuv/source/scale_win.cc \ -LIBWEBM_COMMON_SRCS += third_party/libwebm/webmids.hpp +LIBWEBM_COMMON_SRCS += third_party/libwebm/common/hdr_util.cc \ + third_party/libwebm/common/hdr_util.h \ + third_party/libwebm/common/webmids.h -LIBWEBM_MUXER_SRCS += third_party/libwebm/mkvmuxer.cpp \ - third_party/libwebm/mkvmuxerutil.cpp \ - third_party/libwebm/mkvwriter.cpp \ - third_party/libwebm/mkvmuxer.hpp \ - third_party/libwebm/mkvmuxertypes.hpp \ - third_party/libwebm/mkvmuxerutil.hpp \ - third_party/libwebm/mkvparser.hpp \ - third_party/libwebm/mkvwriter.hpp +LIBWEBM_MUXER_SRCS += third_party/libwebm/mkvmuxer/mkvmuxer.cc \ + third_party/libwebm/mkvmuxer/mkvmuxerutil.cc \ + third_party/libwebm/mkvmuxer/mkvwriter.cc \ + third_party/libwebm/mkvmuxer/mkvmuxer.h \ + third_party/libwebm/mkvmuxer/mkvmuxertypes.h \ + third_party/libwebm/mkvmuxer/mkvmuxerutil.h \ + third_party/libwebm/mkvparser/mkvparser.h \ + third_party/libwebm/mkvmuxer/mkvwriter.h + +LIBWEBM_PARSER_SRCS = third_party/libwebm/mkvparser/mkvparser.cc \ + third_party/libwebm/mkvparser/mkvreader.cc \ + third_party/libwebm/mkvparser/mkvparser.h \ + third_party/libwebm/mkvparser/mkvreader.h + +# Add compile flags and include path for libwebm sources. +ifeq ($(CONFIG_WEBM_IO),yes) + CXXFLAGS += -D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS + INC_PATH-yes += $(SRC_PATH_BARE)/third_party/libwebm +endif -LIBWEBM_PARSER_SRCS = third_party/libwebm/mkvparser.cpp \ - third_party/libwebm/mkvreader.cpp \ - third_party/libwebm/mkvparser.hpp \ - third_party/libwebm/mkvreader.hpp # List of examples to build. UTILS are tools meant for distribution # while EXAMPLES demonstrate specific portions of the API. @@ -70,6 +79,7 @@ ifeq ($(CONFIG_LIBYUV),yes) endif ifeq ($(CONFIG_WEBM_IO),yes) vpxdec.SRCS += $(LIBWEBM_COMMON_SRCS) + vpxdec.SRCS += $(LIBWEBM_MUXER_SRCS) vpxdec.SRCS += $(LIBWEBM_PARSER_SRCS) vpxdec.SRCS += webmdec.cc webmdec.h endif @@ -93,6 +103,7 @@ endif ifeq ($(CONFIG_WEBM_IO),yes) vpxenc.SRCS += $(LIBWEBM_COMMON_SRCS) vpxenc.SRCS += $(LIBWEBM_MUXER_SRCS) + vpxenc.SRCS += $(LIBWEBM_PARSER_SRCS) vpxenc.SRCS += webmenc.cc webmenc.h endif vpxenc.GUID = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1 @@ -204,6 +215,17 @@ vp8cx_set_ref.SRCS += vpx_ports/msvc.h vp8cx_set_ref.GUID = C5E31F7F-96F6-48BD-BD3E-10EBF6E8057A vp8cx_set_ref.DESCRIPTION = VP8 set encoder reference frame +ifeq ($(CONFIG_VP9_ENCODER),yes) +ifeq ($(CONFIG_DECODERS),yes) +EXAMPLES-yes += vp9cx_set_ref.c +vp9cx_set_ref.SRCS += ivfenc.h ivfenc.c +vp9cx_set_ref.SRCS += tools_common.h tools_common.c +vp9cx_set_ref.SRCS += video_common.h +vp9cx_set_ref.SRCS += video_writer.h video_writer.c +vp9cx_set_ref.GUID = 65D7F14A-2EE6-4293-B958-AB5107A03B55 +vp9cx_set_ref.DESCRIPTION = VP9 set encoder reference frame +endif +endif ifeq ($(CONFIG_MULTI_RES_ENCODING),yes) ifeq ($(CONFIG_LIBYUV),yes) diff --git a/libs/libvpx/examples/decode_to_md5.c b/libs/libvpx/examples/decode_to_md5.c index 1ae7a4b57f..51959f37df 100644 --- a/libs/libvpx/examples/decode_to_md5.c +++ b/libs/libvpx/examples/decode_to_md5.c @@ -65,8 +65,7 @@ static void get_image_md5(const vpx_image_t *img, unsigned char digest[16]) { static void print_md5(FILE *stream, unsigned char digest[16]) { int i; - for (i = 0; i < 16; ++i) - fprintf(stream, "%02x", digest[i]); + for (i = 0; i < 16; ++i) fprintf(stream, "%02x", digest[i]); } static const char *exec_name; @@ -86,12 +85,10 @@ int main(int argc, char **argv) { exec_name = argv[0]; - if (argc != 3) - die("Invalid number of arguments."); + if (argc != 3) die("Invalid number of arguments."); reader = vpx_video_reader_open(argv[1]); - if (!reader) - die("Failed to open %s for reading.", argv[1]); + if (!reader) die("Failed to open %s for reading.", argv[1]); if (!(outfile = fopen(argv[2], "wb"))) die("Failed to open %s for writing.", argv[2]); @@ -99,8 +96,7 @@ int main(int argc, char **argv) { info = vpx_video_reader_get_info(reader); decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc); - if (!decoder) - die("Unknown input codec."); + if (!decoder) die("Unknown input codec."); printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface())); @@ -111,8 +107,8 @@ int main(int argc, char **argv) { vpx_codec_iter_t iter = NULL; vpx_image_t *img = NULL; size_t frame_size = 0; - const unsigned char *frame = vpx_video_reader_get_frame(reader, - &frame_size); + const unsigned char *frame = + vpx_video_reader_get_frame(reader, &frame_size); if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0)) die_codec(&codec, "Failed to decode frame"); @@ -121,14 +117,13 @@ int main(int argc, char **argv) { get_image_md5(img, digest); print_md5(outfile, digest); - fprintf(outfile, " img-%dx%d-%04d.i420\n", - img->d_w, img->d_h, ++frame_cnt); + fprintf(outfile, " img-%dx%d-%04d.i420\n", img->d_w, img->d_h, + ++frame_cnt); } } printf("Processed %d frames.\n", frame_cnt); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_reader_close(reader); diff --git a/libs/libvpx/examples/decode_with_drops.c b/libs/libvpx/examples/decode_with_drops.c index 2233e473d3..e69e2a9f9b 100644 --- a/libs/libvpx/examples/decode_with_drops.c +++ b/libs/libvpx/examples/decode_with_drops.c @@ -84,18 +84,16 @@ int main(int argc, char **argv) { exec_name = argv[0]; - if (argc != 4) - die("Invalid number of arguments."); + if (argc != 4) die("Invalid number of arguments."); reader = vpx_video_reader_open(argv[1]); - if (!reader) - die("Failed to open %s for reading.", argv[1]); + if (!reader) die("Failed to open %s for reading.", argv[1]); if (!(outfile = fopen(argv[2], "wb"))) die("Failed to open %s for writing.", argv[2]); - n = strtol(argv[3], &nptr, 0); - m = strtol(nptr + 1, NULL, 0); + n = (int)strtol(argv[3], &nptr, 0); + m = (int)strtol(nptr + 1, NULL, 0); is_range = (*nptr == '-'); if (!n || !m || (*nptr != '-' && *nptr != '/')) die("Couldn't parse pattern %s.\n", argv[3]); @@ -103,8 +101,7 @@ int main(int argc, char **argv) { info = vpx_video_reader_get_info(reader); decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc); - if (!decoder) - die("Unknown input codec."); + if (!decoder) die("Unknown input codec."); printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface())); @@ -116,8 +113,8 @@ int main(int argc, char **argv) { vpx_image_t *img = NULL; size_t frame_size = 0; int skip; - const unsigned char *frame = vpx_video_reader_get_frame(reader, - &frame_size); + const unsigned char *frame = + vpx_video_reader_get_frame(reader, &frame_size); if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0)) die_codec(&codec, "Failed to decode frame."); @@ -139,8 +136,7 @@ int main(int argc, char **argv) { } printf("Processed %d frames.\n", frame_cnt); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n", info->frame_width, info->frame_height, argv[2]); diff --git a/libs/libvpx/examples/postproc.c b/libs/libvpx/examples/postproc.c index a8ac208d9b..15713b946a 100644 --- a/libs/libvpx/examples/postproc.c +++ b/libs/libvpx/examples/postproc.c @@ -68,12 +68,10 @@ int main(int argc, char **argv) { exec_name = argv[0]; - if (argc != 3) - die("Invalid number of arguments."); + if (argc != 3) die("Invalid number of arguments."); reader = vpx_video_reader_open(argv[1]); - if (!reader) - die("Failed to open %s for reading.", argv[1]); + if (!reader) die("Failed to open %s for reading.", argv[1]); if (!(outfile = fopen(argv[2], "wb"))) die("Failed to open %s for writing", argv[2]); @@ -81,8 +79,7 @@ int main(int argc, char **argv) { info = vpx_video_reader_get_info(reader); decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc); - if (!decoder) - die("Unknown input codec."); + if (!decoder) die("Unknown input codec."); printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface())); @@ -91,26 +88,25 @@ int main(int argc, char **argv) { if (res == VPX_CODEC_INCAPABLE) die_codec(&codec, "Postproc not supported by this decoder."); - if (res) - die_codec(&codec, "Failed to initialize decoder."); + if (res) die_codec(&codec, "Failed to initialize decoder."); while (vpx_video_reader_read_frame(reader)) { vpx_codec_iter_t iter = NULL; vpx_image_t *img = NULL; size_t frame_size = 0; - const unsigned char *frame = vpx_video_reader_get_frame(reader, - &frame_size); + const unsigned char *frame = + vpx_video_reader_get_frame(reader, &frame_size); ++frame_cnt; if (frame_cnt % 30 == 1) { - vp8_postproc_cfg_t pp = {0, 0, 0}; + vp8_postproc_cfg_t pp = { 0, 0, 0 }; - if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp)) - die_codec(&codec, "Failed to turn off postproc."); + if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp)) + die_codec(&codec, "Failed to turn off postproc."); } else if (frame_cnt % 30 == 16) { - vp8_postproc_cfg_t pp = {VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE, - 4, 0}; + vp8_postproc_cfg_t pp = { VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE, 4, + 0 }; if (vpx_codec_control(&codec, VP8_SET_POSTPROC, &pp)) die_codec(&codec, "Failed to turn on postproc."); }; @@ -125,8 +121,7 @@ int main(int argc, char **argv) { } printf("Processed %d frames.\n", frame_cnt); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec"); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec"); printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n", info->frame_width, info->frame_height, argv[2]); diff --git a/libs/libvpx/examples/resize_util.c b/libs/libvpx/examples/resize_util.c index e6fdd5bb2a..7e529b2e20 100644 --- a/libs/libvpx/examples/resize_util.c +++ b/libs/libvpx/examples/resize_util.c @@ -34,10 +34,8 @@ void usage_exit(void) { static int parse_dim(char *v, int *width, int *height) { char *x = strchr(v, 'x'); - if (x == NULL) - x = strchr(v, 'X'); - if (x == NULL) - return 0; + if (x == NULL) x = strchr(v, 'X'); + if (x == NULL) return 0; *width = atoi(v); *height = atoi(&x[1]); if (*width <= 0 || *height <= 0) @@ -93,30 +91,25 @@ int main(int argc, char *argv[]) { else frames = INT_MAX; - printf("Input size: %dx%d\n", - width, height); - printf("Target size: %dx%d, Frames: ", - target_width, target_height); + printf("Input size: %dx%d\n", width, height); + printf("Target size: %dx%d, Frames: ", target_width, target_height); if (frames == INT_MAX) printf("All\n"); else printf("%d\n", frames); - inbuf = (uint8_t*)malloc(width * height * 3 / 2); - outbuf = (uint8_t*)malloc(target_width * target_height * 3 / 2); + inbuf = (uint8_t *)malloc(width * height * 3 / 2); + outbuf = (uint8_t *)malloc(target_width * target_height * 3 / 2); inbuf_u = inbuf + width * height; inbuf_v = inbuf_u + width * height / 4; outbuf_u = outbuf + target_width * target_height; outbuf_v = outbuf_u + target_width * target_height / 4; f = 0; while (f < frames) { - if (fread(inbuf, width * height * 3 / 2, 1, fpin) != 1) - break; - vp9_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2, - height, width, - outbuf, target_width, outbuf_u, outbuf_v, - target_width / 2, - target_height, target_width); + if (fread(inbuf, width * height * 3 / 2, 1, fpin) != 1) break; + vp9_resize_frame420(inbuf, width, inbuf_u, inbuf_v, width / 2, height, + width, outbuf, target_width, outbuf_u, outbuf_v, + target_width / 2, target_height, target_width); fwrite(outbuf, target_width * target_height * 3 / 2, 1, fpout); f++; } diff --git a/libs/libvpx/examples/set_maps.c b/libs/libvpx/examples/set_maps.c index 1dc3ac0c98..c0c7d10e72 100644 --- a/libs/libvpx/examples/set_maps.c +++ b/libs/libvpx/examples/set_maps.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - // VP8 Set Active and ROI Maps // =========================== // @@ -86,8 +85,7 @@ static void set_roi_map(const vpx_codec_enc_cfg_t *cfg, roi.static_threshold[3] = 0; roi.roi_map = (uint8_t *)malloc(roi.rows * roi.cols); - for (i = 0; i < roi.rows * roi.cols; ++i) - roi.roi_map[i] = i % 4; + for (i = 0; i < roi.rows * roi.cols; ++i) roi.roi_map[i] = i % 4; if (vpx_codec_control(codec, VP8E_SET_ROI_MAP, &roi)) die_codec(codec, "Failed to set ROI map"); @@ -98,14 +96,13 @@ static void set_roi_map(const vpx_codec_enc_cfg_t *cfg, static void set_active_map(const vpx_codec_enc_cfg_t *cfg, vpx_codec_ctx_t *codec) { unsigned int i; - vpx_active_map_t map = {0, 0, 0}; + vpx_active_map_t map = { 0, 0, 0 }; map.rows = (cfg->g_h + 15) / 16; map.cols = (cfg->g_w + 15) / 16; map.active_map = (uint8_t *)malloc(map.rows * map.cols); - for (i = 0; i < map.rows * map.cols; ++i) - map.active_map[i] = i % 2; + for (i = 0; i < map.rows * map.cols; ++i) map.active_map[i] = i % 2; if (vpx_codec_control(codec, VP8E_SET_ACTIVEMAP, &map)) die_codec(codec, "Failed to set active map"); @@ -115,7 +112,7 @@ static void set_active_map(const vpx_codec_enc_cfg_t *cfg, static void unset_active_map(const vpx_codec_enc_cfg_t *cfg, vpx_codec_ctx_t *codec) { - vpx_active_map_t map = {0, 0, 0}; + vpx_active_map_t map = { 0, 0, 0 }; map.rows = (cfg->g_h + 15) / 16; map.cols = (cfg->g_w + 15) / 16; @@ -125,25 +122,21 @@ static void unset_active_map(const vpx_codec_enc_cfg_t *cfg, die_codec(codec, "Failed to set active map"); } -static int encode_frame(vpx_codec_ctx_t *codec, - vpx_image_t *img, - int frame_index, - VpxVideoWriter *writer) { +static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img, + int frame_index, VpxVideoWriter *writer) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0, - VPX_DL_GOOD_QUALITY); - if (res != VPX_CODEC_OK) - die_codec(codec, "Failed to encode frame"); + const vpx_codec_err_t res = + vpx_codec_encode(codec, img, frame_index, 1, 0, VPX_DL_GOOD_QUALITY); + if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame"); while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) { got_pkts = 1; if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) { const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; - if (!vpx_video_writer_write_frame(writer, - pkt->data.frame.buf, + if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, pkt->data.frame.sz, pkt->data.frame.pts)) { die_codec(codec, "Failed to write compressed frame"); @@ -167,12 +160,11 @@ int main(int argc, char **argv) { VpxVideoInfo info; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; - const int fps = 2; // TODO(dkovalev) add command line argument + const int fps = 2; // TODO(dkovalev) add command line argument const double bits_per_pixel_per_frame = 0.067; exec_name = argv[0]; - if (argc != 6) - die("Invalid number of arguments"); + if (argc != 6) die("Invalid number of arguments"); memset(&info, 0, sizeof(info)); @@ -182,40 +174,36 @@ int main(int argc, char **argv) { } assert(encoder != NULL); info.codec_fourcc = encoder->fourcc; - info.frame_width = strtol(argv[2], NULL, 0); - info.frame_height = strtol(argv[3], NULL, 0); + info.frame_width = (int)strtol(argv[2], NULL, 0); + info.frame_height = (int)strtol(argv[3], NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; - if (info.frame_width <= 0 || - info.frame_height <= 0 || - (info.frame_width % 2) != 0 || - (info.frame_height % 2) != 0) { + if (info.frame_width <= 0 || info.frame_height <= 0 || + (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, - info.frame_height, 1)) { + info.frame_height, 1)) { die("Failed to allocate image."); } printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); - if (res) - die_codec(&codec, "Failed to get default codec config."); + if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; cfg.g_timebase.num = info.time_base.numerator; cfg.g_timebase.den = info.time_base.denominator; - cfg.rc_target_bitrate = (unsigned int)(bits_per_pixel_per_frame * cfg.g_w * - cfg.g_h * fps / 1000); + cfg.rc_target_bitrate = + (unsigned int)(bits_per_pixel_per_frame * cfg.g_w * cfg.g_h * fps / 1000); cfg.g_lag_in_frames = 0; writer = vpx_video_writer_open(argv[5], kContainerIVF, &info); - if (!writer) - die("Failed to open %s for writing.", argv[5]); + if (!writer) die("Failed to open %s for writing.", argv[5]); if (!(infile = fopen(argv[4], "rb"))) die("Failed to open %s for reading.", argv[4]); @@ -239,15 +227,15 @@ int main(int argc, char **argv) { } // Flush encoder. - while (encode_frame(&codec, NULL, -1, writer)) {} + while (encode_frame(&codec, NULL, -1, writer)) { + } printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); diff --git a/libs/libvpx/examples/simple_decoder.c b/libs/libvpx/examples/simple_decoder.c index 8ccc81035e..2bb1a05245 100644 --- a/libs/libvpx/examples/simple_decoder.c +++ b/libs/libvpx/examples/simple_decoder.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - // Simple Decoder // ============== // @@ -103,12 +102,10 @@ int main(int argc, char **argv) { exec_name = argv[0]; - if (argc != 3) - die("Invalid number of arguments."); + if (argc != 3) die("Invalid number of arguments."); reader = vpx_video_reader_open(argv[1]); - if (!reader) - die("Failed to open %s for reading.", argv[1]); + if (!reader) die("Failed to open %s for reading.", argv[1]); if (!(outfile = fopen(argv[2], "wb"))) die("Failed to open %s for writing.", argv[2]); @@ -116,8 +113,7 @@ int main(int argc, char **argv) { info = vpx_video_reader_get_info(reader); decoder = get_vpx_decoder_by_fourcc(info->codec_fourcc); - if (!decoder) - die("Unknown input codec."); + if (!decoder) die("Unknown input codec."); printf("Using %s\n", vpx_codec_iface_name(decoder->codec_interface())); @@ -128,8 +124,8 @@ int main(int argc, char **argv) { vpx_codec_iter_t iter = NULL; vpx_image_t *img = NULL; size_t frame_size = 0; - const unsigned char *frame = vpx_video_reader_get_frame(reader, - &frame_size); + const unsigned char *frame = + vpx_video_reader_get_frame(reader, &frame_size); if (vpx_codec_decode(&codec, frame, (unsigned int)frame_size, NULL, 0)) die_codec(&codec, "Failed to decode frame."); @@ -140,8 +136,7 @@ int main(int argc, char **argv) { } printf("Processed %d frames.\n", frame_cnt); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec"); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec"); printf("Play: ffplay -f rawvideo -pix_fmt yuv420p -s %dx%d %s\n", info->frame_width, info->frame_height, argv[2]); diff --git a/libs/libvpx/examples/simple_encoder.c b/libs/libvpx/examples/simple_encoder.c index a307729731..dde6344f8d 100644 --- a/libs/libvpx/examples/simple_encoder.c +++ b/libs/libvpx/examples/simple_encoder.c @@ -109,32 +109,27 @@ static const char *exec_name; void usage_exit(void) { fprintf(stderr, "Usage: %s " - " []\nSee comments in " - "simple_encoder.c for more information.\n", + " \n" + "See comments in simple_encoder.c for more information.\n", exec_name); exit(EXIT_FAILURE); } -static int encode_frame(vpx_codec_ctx_t *codec, - vpx_image_t *img, - int frame_index, - int flags, - VpxVideoWriter *writer) { +static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img, + int frame_index, int flags, VpxVideoWriter *writer) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, - flags, VPX_DL_GOOD_QUALITY); - if (res != VPX_CODEC_OK) - die_codec(codec, "Failed to encode frame"); + const vpx_codec_err_t res = + vpx_codec_encode(codec, img, frame_index, 1, flags, VPX_DL_GOOD_QUALITY); + if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame"); while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) { got_pkts = 1; if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) { const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; - if (!vpx_video_writer_write_frame(writer, - pkt->data.frame.buf, + if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, pkt->data.frame.sz, pkt->data.frame.pts)) { die_codec(codec, "Failed to write compressed frame"); @@ -147,6 +142,7 @@ static int encode_frame(vpx_codec_ctx_t *codec, return got_pkts; } +// TODO(tomfinegan): Improve command line parsing and add args for bitrate/fps. int main(int argc, char **argv) { FILE *infile = NULL; vpx_codec_ctx_t codec; @@ -154,15 +150,14 @@ int main(int argc, char **argv) { int frame_count = 0; vpx_image_t raw; vpx_codec_err_t res; - VpxVideoInfo info = {0}; + VpxVideoInfo info = { 0, 0, 0, { 0, 0 } }; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; - const int fps = 30; // TODO(dkovalev) add command line argument - const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument + const int fps = 30; + const int bitrate = 200; int keyframe_interval = 0; - - // TODO(dkovalev): Add some simple command line parsing code to make the - // command line more flexible. + int max_frames = 0; + int frames_encoded = 0; const char *codec_arg = NULL; const char *width_arg = NULL; const char *height_arg = NULL; @@ -172,8 +167,7 @@ int main(int argc, char **argv) { exec_name = argv[0]; - if (argc < 7) - die("Invalid number of arguments"); + if (argc != 9) die("Invalid number of arguments"); codec_arg = argv[1]; width_arg = argv[2]; @@ -181,49 +175,44 @@ int main(int argc, char **argv) { infile_arg = argv[4]; outfile_arg = argv[5]; keyframe_interval_arg = argv[6]; + max_frames = (int)strtol(argv[8], NULL, 0); encoder = get_vpx_encoder_by_name(codec_arg); - if (!encoder) - die("Unsupported codec."); + if (!encoder) die("Unsupported codec."); info.codec_fourcc = encoder->fourcc; - info.frame_width = strtol(width_arg, NULL, 0); - info.frame_height = strtol(height_arg, NULL, 0); + info.frame_width = (int)strtol(width_arg, NULL, 0); + info.frame_height = (int)strtol(height_arg, NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; - if (info.frame_width <= 0 || - info.frame_height <= 0 || - (info.frame_width % 2) != 0 || - (info.frame_height % 2) != 0) { + if (info.frame_width <= 0 || info.frame_height <= 0 || + (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, - info.frame_height, 1)) { + info.frame_height, 1)) { die("Failed to allocate image."); } - keyframe_interval = strtol(keyframe_interval_arg, NULL, 0); - if (keyframe_interval < 0) - die("Invalid keyframe interval value."); + keyframe_interval = (int)strtol(keyframe_interval_arg, NULL, 0); + if (keyframe_interval < 0) die("Invalid keyframe interval value."); printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); - if (res) - die_codec(&codec, "Failed to get default codec config."); + if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; cfg.g_timebase.num = info.time_base.numerator; cfg.g_timebase.den = info.time_base.denominator; cfg.rc_target_bitrate = bitrate; - cfg.g_error_resilient = argc > 7 ? strtol(argv[7], NULL, 0) : 0; + cfg.g_error_resilient = (vpx_codec_er_flags_t)strtoul(argv[7], NULL, 0); writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info); - if (!writer) - die("Failed to open %s for writing.", outfile_arg); + if (!writer) die("Failed to open %s for writing.", outfile_arg); if (!(infile = fopen(infile_arg, "rb"))) die("Failed to open %s for reading.", infile_arg); @@ -237,18 +226,20 @@ int main(int argc, char **argv) { if (keyframe_interval > 0 && frame_count % keyframe_interval == 0) flags |= VPX_EFLAG_FORCE_KF; encode_frame(&codec, &raw, frame_count++, flags, writer); + frames_encoded++; + if (max_frames > 0 && frames_encoded >= max_frames) break; } // Flush encoder. - while (encode_frame(&codec, NULL, -1, 0, writer)) {}; + while (encode_frame(&codec, NULL, -1, 0, writer)) { + } printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); diff --git a/libs/libvpx/examples/twopass_encoder.c b/libs/libvpx/examples/twopass_encoder.c index aecc11d3f4..4e63a7a6c9 100644 --- a/libs/libvpx/examples/twopass_encoder.c +++ b/libs/libvpx/examples/twopass_encoder.c @@ -59,25 +59,23 @@ static const char *exec_name; void usage_exit(void) { - fprintf(stderr, "Usage: %s \n", + fprintf(stderr, + "Usage: %s " + "\n", exec_name); exit(EXIT_FAILURE); } -static int get_frame_stats(vpx_codec_ctx_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned int duration, - vpx_enc_frame_flags_t flags, - unsigned int deadline, +static int get_frame_stats(vpx_codec_ctx_t *ctx, const vpx_image_t *img, + vpx_codec_pts_t pts, unsigned int duration, + vpx_enc_frame_flags_t flags, unsigned int deadline, vpx_fixed_buf_t *stats) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags, - deadline); - if (res != VPX_CODEC_OK) - die_codec(ctx, "Failed to get frame stats."); + const vpx_codec_err_t res = + vpx_codec_encode(ctx, img, pts, duration, flags, deadline); + if (res != VPX_CODEC_OK) die_codec(ctx, "Failed to get frame stats."); while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) { got_pkts = 1; @@ -94,20 +92,16 @@ static int get_frame_stats(vpx_codec_ctx_t *ctx, return got_pkts; } -static int encode_frame(vpx_codec_ctx_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned int duration, - vpx_enc_frame_flags_t flags, - unsigned int deadline, +static int encode_frame(vpx_codec_ctx_t *ctx, const vpx_image_t *img, + vpx_codec_pts_t pts, unsigned int duration, + vpx_enc_frame_flags_t flags, unsigned int deadline, VpxVideoWriter *writer) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(ctx, img, pts, duration, flags, - deadline); - if (res != VPX_CODEC_OK) - die_codec(ctx, "Failed to encode frame."); + const vpx_codec_err_t res = + vpx_codec_encode(ctx, img, pts, duration, flags, deadline); + if (res != VPX_CODEC_OK) die_codec(ctx, "Failed to encode frame."); while ((pkt = vpx_codec_get_cx_data(ctx, &iter)) != NULL) { got_pkts = 1; @@ -115,8 +109,8 @@ static int encode_frame(vpx_codec_ctx_t *ctx, const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, - pkt->data.frame.sz, - pkt->data.frame.pts)) + pkt->data.frame.sz, + pkt->data.frame.pts)) die_codec(ctx, "Failed to write compressed frame."); printf(keyframe ? "K" : "."); fflush(stdout); @@ -126,13 +120,12 @@ static int encode_frame(vpx_codec_ctx_t *ctx, return got_pkts; } -static vpx_fixed_buf_t pass0(vpx_image_t *raw, - FILE *infile, +static vpx_fixed_buf_t pass0(vpx_image_t *raw, FILE *infile, const VpxInterface *encoder, - const vpx_codec_enc_cfg_t *cfg) { + const vpx_codec_enc_cfg_t *cfg, int max_frames) { vpx_codec_ctx_t codec; int frame_count = 0; - vpx_fixed_buf_t stats = {NULL, 0}; + vpx_fixed_buf_t stats = { NULL, 0 }; if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0)) die_codec(&codec, "Failed to initialize encoder"); @@ -142,37 +135,33 @@ static vpx_fixed_buf_t pass0(vpx_image_t *raw, ++frame_count; get_frame_stats(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, &stats); + if (max_frames > 0 && frame_count >= max_frames) break; } // Flush encoder. - while (get_frame_stats(&codec, NULL, frame_count, 1, 0, - VPX_DL_GOOD_QUALITY, &stats)) {} + while (get_frame_stats(&codec, NULL, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, + &stats)) { + } printf("Pass 0 complete. Processed %d frames.\n", frame_count); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); return stats; } -static void pass1(vpx_image_t *raw, - FILE *infile, - const char *outfile_name, - const VpxInterface *encoder, - const vpx_codec_enc_cfg_t *cfg) { - VpxVideoInfo info = { - encoder->fourcc, - cfg->g_w, - cfg->g_h, - {cfg->g_timebase.num, cfg->g_timebase.den} - }; +static void pass1(vpx_image_t *raw, FILE *infile, const char *outfile_name, + const VpxInterface *encoder, const vpx_codec_enc_cfg_t *cfg, + int max_frames) { + VpxVideoInfo info = { encoder->fourcc, + cfg->g_w, + cfg->g_h, + { cfg->g_timebase.num, cfg->g_timebase.den } }; VpxVideoWriter *writer = NULL; vpx_codec_ctx_t codec; int frame_count = 0; writer = vpx_video_writer_open(outfile_name, kContainerIVF, &info); - if (!writer) - die("Failed to open %s for writing", outfile_name); + if (!writer) die("Failed to open %s for writing", outfile_name); if (vpx_codec_enc_init(&codec, encoder->codec_interface(), cfg, 0)) die_codec(&codec, "Failed to initialize encoder"); @@ -181,15 +170,17 @@ static void pass1(vpx_image_t *raw, while (vpx_img_read(raw, infile)) { ++frame_count; encode_frame(&codec, raw, frame_count, 1, 0, VPX_DL_GOOD_QUALITY, writer); + + if (max_frames > 0 && frame_count >= max_frames) break; } // Flush encoder. - while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) {} + while (encode_frame(&codec, NULL, -1, 1, 0, VPX_DL_GOOD_QUALITY, writer)) { + } printf("\n"); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); @@ -206,26 +197,27 @@ int main(int argc, char **argv) { vpx_fixed_buf_t stats; const VpxInterface *encoder = NULL; - const int fps = 30; // TODO(dkovalev) add command line argument - const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument + const int fps = 30; // TODO(dkovalev) add command line argument + const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument const char *const codec_arg = argv[1]; const char *const width_arg = argv[2]; const char *const height_arg = argv[3]; const char *const infile_arg = argv[4]; const char *const outfile_arg = argv[5]; + int max_frames = 0; exec_name = argv[0]; - if (argc != 6) - die("Invalid number of arguments."); + if (argc != 7) die("Invalid number of arguments."); + + max_frames = (int)strtol(argv[6], NULL, 0); encoder = get_vpx_encoder_by_name(codec_arg); - if (!encoder) - die("Unsupported codec."); + if (!encoder) die("Unsupported codec."); - w = strtol(width_arg, NULL, 0); - h = strtol(height_arg, NULL, 0); + w = (int)strtol(width_arg, NULL, 0); + h = (int)strtol(height_arg, NULL, 0); - if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0) + if (w <= 0 || h <= 0 || (w % 2) != 0 || (h % 2) != 0) die("Invalid frame size: %dx%d", w, h); if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, w, h, 1)) @@ -235,8 +227,7 @@ int main(int argc, char **argv) { // Configuration res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); - if (res) - die_codec(&codec, "Failed to get default codec config."); + if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = w; cfg.g_h = h; @@ -249,13 +240,13 @@ int main(int argc, char **argv) { // Pass 0 cfg.g_pass = VPX_RC_FIRST_PASS; - stats = pass0(&raw, infile, encoder, &cfg); + stats = pass0(&raw, infile, encoder, &cfg, max_frames); // Pass 1 rewind(infile); cfg.g_pass = VPX_RC_LAST_PASS; cfg.rc_twopass_stats_in = stats; - pass1(&raw, infile, outfile_arg, encoder, &cfg); + pass1(&raw, infile, outfile_arg, encoder, &cfg, max_frames); free(stats.buf); vpx_img_free(&raw); diff --git a/libs/libvpx/examples/vp8_multi_resolution_encoder.c b/libs/libvpx/examples/vp8_multi_resolution_encoder.c index 0248edede0..65308a0bd0 100644 --- a/libs/libvpx/examples/vp8_multi_resolution_encoder.c +++ b/libs/libvpx/examples/vp8_multi_resolution_encoder.c @@ -35,11 +35,9 @@ #include "vpx_ports/mem_ops.h" #include "../tools_common.h" #define interface (vpx_codec_vp8_cx()) -#define fourcc 0x30385056 +#define fourcc 0x30385056 -void usage_exit(void) { - exit(EXIT_FAILURE); -} +void usage_exit(void) { exit(EXIT_FAILURE); } /* * The input video frame is downsampled several times to generate a multi-level @@ -64,107 +62,100 @@ void usage_exit(void) { int (*read_frame_p)(FILE *f, vpx_image_t *img); static int read_frame(FILE *f, vpx_image_t *img) { - size_t nbytes, to_read; - int res = 1; + size_t nbytes, to_read; + int res = 1; - to_read = img->w*img->h*3/2; - nbytes = fread(img->planes[0], 1, to_read, f); - if(nbytes != to_read) { - res = 0; - if(nbytes > 0) - printf("Warning: Read partial frame. Check your width & height!\n"); - } - return res; + to_read = img->w * img->h * 3 / 2; + nbytes = fread(img->planes[0], 1, to_read, f); + if (nbytes != to_read) { + res = 0; + if (nbytes > 0) + printf("Warning: Read partial frame. Check your width & height!\n"); + } + return res; } static int read_frame_by_row(FILE *f, vpx_image_t *img) { - size_t nbytes, to_read; - int res = 1; - int plane; + size_t nbytes, to_read; + int res = 1; + int plane; - for (plane = 0; plane < 3; plane++) - { - unsigned char *ptr; - int w = (plane ? (1 + img->d_w) / 2 : img->d_w); - int h = (plane ? (1 + img->d_h) / 2 : img->d_h); - int r; + for (plane = 0; plane < 3; plane++) { + unsigned char *ptr; + int w = (plane ? (1 + img->d_w) / 2 : img->d_w); + int h = (plane ? (1 + img->d_h) / 2 : img->d_h); + int r; - /* Determine the correct plane based on the image format. The for-loop - * always counts in Y,U,V order, but this may not match the order of - * the data on disk. - */ - switch (plane) - { - case 1: - ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12? VPX_PLANE_V : VPX_PLANE_U]; - break; - case 2: - ptr = img->planes[img->fmt==VPX_IMG_FMT_YV12?VPX_PLANE_U : VPX_PLANE_V]; - break; - default: - ptr = img->planes[plane]; - } - - for (r = 0; r < h; r++) - { - to_read = w; - - nbytes = fread(ptr, 1, to_read, f); - if(nbytes != to_read) { - res = 0; - if(nbytes > 0) - printf("Warning: Read partial frame. Check your width & height!\n"); - break; - } - - ptr += img->stride[plane]; - } - if (!res) - break; + /* Determine the correct plane based on the image format. The for-loop + * always counts in Y,U,V order, but this may not match the order of + * the data on disk. + */ + switch (plane) { + case 1: + ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V + : VPX_PLANE_U]; + break; + case 2: + ptr = img->planes[img->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U + : VPX_PLANE_V]; + break; + default: ptr = img->planes[plane]; } - return res; + for (r = 0; r < h; r++) { + to_read = w; + + nbytes = fread(ptr, 1, to_read, f); + if (nbytes != to_read) { + res = 0; + if (nbytes > 0) + printf("Warning: Read partial frame. Check your width & height!\n"); + break; + } + + ptr += img->stride[plane]; + } + if (!res) break; + } + + return res; } -static void write_ivf_file_header(FILE *outfile, - const vpx_codec_enc_cfg_t *cfg, +static void write_ivf_file_header(FILE *outfile, const vpx_codec_enc_cfg_t *cfg, int frame_cnt) { - char header[32]; + char header[32]; - if(cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS) - return; - header[0] = 'D'; - header[1] = 'K'; - header[2] = 'I'; - header[3] = 'F'; - mem_put_le16(header+4, 0); /* version */ - mem_put_le16(header+6, 32); /* headersize */ - mem_put_le32(header+8, fourcc); /* headersize */ - mem_put_le16(header+12, cfg->g_w); /* width */ - mem_put_le16(header+14, cfg->g_h); /* height */ - mem_put_le32(header+16, cfg->g_timebase.den); /* rate */ - mem_put_le32(header+20, cfg->g_timebase.num); /* scale */ - mem_put_le32(header+24, frame_cnt); /* length */ - mem_put_le32(header+28, 0); /* unused */ + if (cfg->g_pass != VPX_RC_ONE_PASS && cfg->g_pass != VPX_RC_LAST_PASS) return; + header[0] = 'D'; + header[1] = 'K'; + header[2] = 'I'; + header[3] = 'F'; + mem_put_le16(header + 4, 0); /* version */ + mem_put_le16(header + 6, 32); /* headersize */ + mem_put_le32(header + 8, fourcc); /* headersize */ + mem_put_le16(header + 12, cfg->g_w); /* width */ + mem_put_le16(header + 14, cfg->g_h); /* height */ + mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */ + mem_put_le32(header + 20, cfg->g_timebase.num); /* scale */ + mem_put_le32(header + 24, frame_cnt); /* length */ + mem_put_le32(header + 28, 0); /* unused */ - (void) fwrite(header, 1, 32, outfile); + (void)fwrite(header, 1, 32, outfile); } static void write_ivf_frame_header(FILE *outfile, - const vpx_codec_cx_pkt_t *pkt) -{ - char header[12]; - vpx_codec_pts_t pts; + const vpx_codec_cx_pkt_t *pkt) { + char header[12]; + vpx_codec_pts_t pts; - if(pkt->kind != VPX_CODEC_CX_FRAME_PKT) - return; + if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return; - pts = pkt->data.frame.pts; - mem_put_le32(header, pkt->data.frame.sz); - mem_put_le32(header+4, pts&0xFFFFFFFF); - mem_put_le32(header+8, pts >> 32); + pts = pkt->data.frame.pts; + mem_put_le32(header, pkt->data.frame.sz); + mem_put_le32(header + 4, pts & 0xFFFFFFFF); + mem_put_le32(header + 8, pts >> 32); - (void) fwrite(header, 1, 12, outfile); + (void)fwrite(header, 1, 12, outfile); } /* Temporal scaling parameters */ @@ -173,557 +164,504 @@ static void write_ivf_frame_header(FILE *outfile, * parameters will be passed in as user parameters in another version. */ static void set_temporal_layer_pattern(int num_temporal_layers, - vpx_codec_enc_cfg_t *cfg, - int bitrate, - int *layer_flags) -{ - assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS); - switch (num_temporal_layers) - { - case 1: - { - /* 1-layer */ - cfg->ts_number_layers = 1; - cfg->ts_periodicity = 1; - cfg->ts_rate_decimator[0] = 1; - cfg->ts_layer_id[0] = 0; - cfg->ts_target_bitrate[0] = bitrate; + vpx_codec_enc_cfg_t *cfg, int bitrate, + int *layer_flags) { + assert(num_temporal_layers <= MAX_NUM_TEMPORAL_LAYERS); + switch (num_temporal_layers) { + case 1: { + /* 1-layer */ + cfg->ts_number_layers = 1; + cfg->ts_periodicity = 1; + cfg->ts_rate_decimator[0] = 1; + cfg->ts_layer_id[0] = 0; + cfg->ts_target_bitrate[0] = bitrate; - // Update L only. - layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - break; + // Update L only. + layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + break; } - case 2: - { - /* 2-layers, with sync point at first frame of layer 1. */ - cfg->ts_number_layers = 2; - cfg->ts_periodicity = 2; - cfg->ts_rate_decimator[0] = 2; - cfg->ts_rate_decimator[1] = 1; - cfg->ts_layer_id[0] = 0; - cfg->ts_layer_id[1] = 1; - // Use 60/40 bit allocation as example. - cfg->ts_target_bitrate[0] = 0.6f * bitrate; - cfg->ts_target_bitrate[1] = bitrate; + case 2: { + /* 2-layers, with sync point at first frame of layer 1. */ + cfg->ts_number_layers = 2; + cfg->ts_periodicity = 2; + cfg->ts_rate_decimator[0] = 2; + cfg->ts_rate_decimator[1] = 1; + cfg->ts_layer_id[0] = 0; + cfg->ts_layer_id[1] = 1; + // Use 60/40 bit allocation as example. + cfg->ts_target_bitrate[0] = 0.6f * bitrate; + cfg->ts_target_bitrate[1] = bitrate; - /* 0=L, 1=GF */ - // ARF is used as predictor for all frames, and is only updated on - // key frame. Sync point every 8 frames. + /* 0=L, 1=GF */ + // ARF is used as predictor for all frames, and is only updated on + // key frame. Sync point every 8 frames. - // Layer 0: predict from L and ARF, update L and G. - layer_flags[0] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_ARF; + // Layer 0: predict from L and ARF, update L and G. + layer_flags[0] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF; - // Layer 1: sync point: predict from L and ARF, and update G. - layer_flags[1] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; + // Layer 1: sync point: predict from L and ARF, and update G. + layer_flags[1] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; - // Layer 0, predict from L and ARF, update L. - layer_flags[2] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + // Layer 0, predict from L and ARF, update L. + layer_flags[2] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - // Layer 1: predict from L, G and ARF, and update G. - layer_flags[3] = VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ENTROPY; + // Layer 1: predict from L, G and ARF, and update G. + layer_flags[3] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_ENTROPY; - // Layer 0 - layer_flags[4] = layer_flags[2]; + // Layer 0 + layer_flags[4] = layer_flags[2]; - // Layer 1 - layer_flags[5] = layer_flags[3]; + // Layer 1 + layer_flags[5] = layer_flags[3]; - // Layer 0 - layer_flags[6] = layer_flags[4]; + // Layer 0 + layer_flags[6] = layer_flags[4]; - // Layer 1 - layer_flags[7] = layer_flags[5]; - break; + // Layer 1 + layer_flags[7] = layer_flags[5]; + break; } case 3: - default: - { - // 3-layers structure where ARF is used as predictor for all frames, - // and is only updated on key frame. - // Sync points for layer 1 and 2 every 8 frames. - cfg->ts_number_layers = 3; - cfg->ts_periodicity = 4; - cfg->ts_rate_decimator[0] = 4; - cfg->ts_rate_decimator[1] = 2; - cfg->ts_rate_decimator[2] = 1; - cfg->ts_layer_id[0] = 0; - cfg->ts_layer_id[1] = 2; - cfg->ts_layer_id[2] = 1; - cfg->ts_layer_id[3] = 2; - // Use 40/20/40 bit allocation as example. - cfg->ts_target_bitrate[0] = 0.4f * bitrate; - cfg->ts_target_bitrate[1] = 0.6f * bitrate; - cfg->ts_target_bitrate[2] = bitrate; + default: { + // 3-layers structure where ARF is used as predictor for all frames, + // and is only updated on key frame. + // Sync points for layer 1 and 2 every 8 frames. + cfg->ts_number_layers = 3; + cfg->ts_periodicity = 4; + cfg->ts_rate_decimator[0] = 4; + cfg->ts_rate_decimator[1] = 2; + cfg->ts_rate_decimator[2] = 1; + cfg->ts_layer_id[0] = 0; + cfg->ts_layer_id[1] = 2; + cfg->ts_layer_id[2] = 1; + cfg->ts_layer_id[3] = 2; + // Use 40/20/40 bit allocation as example. + cfg->ts_target_bitrate[0] = 0.4f * bitrate; + cfg->ts_target_bitrate[1] = 0.6f * bitrate; + cfg->ts_target_bitrate[2] = bitrate; - /* 0=L, 1=GF, 2=ARF */ + /* 0=L, 1=GF, 2=ARF */ - // Layer 0: predict from L and ARF; update L and G. - layer_flags[0] = VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + // Layer 0: predict from L and ARF; update L and G. + layer_flags[0] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; - // Layer 2: sync point: predict from L and ARF; update none. - layer_flags[1] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ENTROPY; + // Layer 2: sync point: predict from L and ARF; update none. + layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_ENTROPY; - // Layer 1: sync point: predict from L and ARF; update G. - layer_flags[2] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; + // Layer 1: sync point: predict from L and ARF; update G. + layer_flags[2] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; - // Layer 2: predict from L, G, ARF; update none. - layer_flags[3] = VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ENTROPY; + // Layer 2: predict from L, G, ARF; update none. + layer_flags[3] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY; - // Layer 0: predict from L and ARF; update L. - layer_flags[4] = VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + // Layer 0: predict from L and ARF; update L. + layer_flags[4] = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; - // Layer 2: predict from L, G, ARF; update none. - layer_flags[5] = layer_flags[3]; + // Layer 2: predict from L, G, ARF; update none. + layer_flags[5] = layer_flags[3]; - // Layer 1: predict from L, G, ARF; update G. - layer_flags[6] = VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; + // Layer 1: predict from L, G, ARF; update G. + layer_flags[6] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; - // Layer 2: predict from L, G, ARF; update none. - layer_flags[7] = layer_flags[3]; - break; - } + // Layer 2: predict from L, G, ARF; update none. + layer_flags[7] = layer_flags[3]; + break; } + } } /* The periodicity of the pattern given the number of temporal layers. */ -static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = {1, 8, 8}; +static int periodicity_to_num_layers[MAX_NUM_TEMPORAL_LAYERS] = { 1, 8, 8 }; -int main(int argc, char **argv) -{ - FILE *infile, *outfile[NUM_ENCODERS]; - FILE *downsampled_input[NUM_ENCODERS - 1]; - char filename[50]; - vpx_codec_ctx_t codec[NUM_ENCODERS]; - vpx_codec_enc_cfg_t cfg[NUM_ENCODERS]; - int frame_cnt = 0; - vpx_image_t raw[NUM_ENCODERS]; - vpx_codec_err_t res[NUM_ENCODERS]; +int main(int argc, char **argv) { + FILE *infile, *outfile[NUM_ENCODERS]; + FILE *downsampled_input[NUM_ENCODERS - 1]; + char filename[50]; + vpx_codec_ctx_t codec[NUM_ENCODERS]; + vpx_codec_enc_cfg_t cfg[NUM_ENCODERS]; + int frame_cnt = 0; + vpx_image_t raw[NUM_ENCODERS]; + vpx_codec_err_t res[NUM_ENCODERS]; - int i; - long width; - long height; - int length_frame; - int frame_avail; - int got_data; - int flags = 0; - int layer_id = 0; + int i; + long width; + long height; + int length_frame; + int frame_avail; + int got_data; + int flags = 0; + int layer_id = 0; - int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS] - = {0}; - int flag_periodicity; + int layer_flags[VPX_TS_MAX_PERIODICITY * NUM_ENCODERS] = { 0 }; + int flag_periodicity; - /*Currently, only realtime mode is supported in multi-resolution encoding.*/ - int arg_deadline = VPX_DL_REALTIME; + /*Currently, only realtime mode is supported in multi-resolution encoding.*/ + int arg_deadline = VPX_DL_REALTIME; - /* Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you - don't need to know PSNR, which will skip PSNR calculation and save - encoding time. */ - int show_psnr = 0; - int key_frame_insert = 0; - uint64_t psnr_sse_total[NUM_ENCODERS] = {0}; - uint64_t psnr_samples_total[NUM_ENCODERS] = {0}; - double psnr_totals[NUM_ENCODERS][4] = {{0,0}}; - int psnr_count[NUM_ENCODERS] = {0}; + /* Set show_psnr to 1/0 to show/not show PSNR. Choose show_psnr=0 if you + don't need to know PSNR, which will skip PSNR calculation and save + encoding time. */ + int show_psnr = 0; + int key_frame_insert = 0; + uint64_t psnr_sse_total[NUM_ENCODERS] = { 0 }; + uint64_t psnr_samples_total[NUM_ENCODERS] = { 0 }; + double psnr_totals[NUM_ENCODERS][4] = { { 0, 0 } }; + int psnr_count[NUM_ENCODERS] = { 0 }; - double cx_time = 0; - struct timeval tv1, tv2, difftv; + int64_t cx_time = 0; - /* Set the required target bitrates for each resolution level. - * If target bitrate for highest-resolution level is set to 0, - * (i.e. target_bitrate[0]=0), we skip encoding at that level. + /* Set the required target bitrates for each resolution level. + * If target bitrate for highest-resolution level is set to 0, + * (i.e. target_bitrate[0]=0), we skip encoding at that level. + */ + unsigned int target_bitrate[NUM_ENCODERS] = { 1000, 500, 100 }; + + /* Enter the frame rate of the input video */ + int framerate = 30; + + /* Set down-sampling factor for each resolution level. + dsf[0] controls down sampling from level 0 to level 1; + dsf[1] controls down sampling from level 1 to level 2; + dsf[2] is not used. */ + vpx_rational_t dsf[NUM_ENCODERS] = { { 2, 1 }, { 2, 1 }, { 1, 1 } }; + + /* Set the number of temporal layers for each encoder/resolution level, + * starting from highest resoln down to lowest resoln. */ + unsigned int num_temporal_layers[NUM_ENCODERS] = { 3, 3, 3 }; + + if (argc != (7 + 3 * NUM_ENCODERS)) + die("Usage: %s " + " \n", + argv[0]); + + printf("Using %s\n", vpx_codec_iface_name(interface)); + + width = strtol(argv[1], NULL, 0); + height = strtol(argv[2], NULL, 0); + framerate = strtol(argv[3], NULL, 0); + + if (width < 16 || width % 2 || height < 16 || height % 2) + die("Invalid resolution: %ldx%ld", width, height); + + /* Open input video file for encoding */ + if (!(infile = fopen(argv[4], "rb"))) + die("Failed to open %s for reading", argv[4]); + + /* Open output file for each encoder to output bitstreams */ + for (i = 0; i < NUM_ENCODERS; i++) { + if (!target_bitrate[i]) { + outfile[i] = NULL; + continue; + } + + if (!(outfile[i] = fopen(argv[i + 5], "wb"))) + die("Failed to open %s for writing", argv[i + 4]); + } + + // Bitrates per spatial layer: overwrite default rates above. + for (i = 0; i < NUM_ENCODERS; i++) { + target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0); + } + + // Temporal layers per spatial layers: overwrite default settings above. + for (i = 0; i < NUM_ENCODERS; i++) { + num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0); + if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3) + die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n", + num_temporal_layers); + } + + /* Open file to write out each spatially downsampled input stream. */ + for (i = 0; i < NUM_ENCODERS - 1; i++) { + // Highest resoln is encoder 0. + if (sprintf(filename, "ds%d.yuv", NUM_ENCODERS - i) < 0) { + return EXIT_FAILURE; + } + downsampled_input[i] = fopen(filename, "wb"); + } + + key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0); + + show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0); + + /* Populate default encoder configuration */ + for (i = 0; i < NUM_ENCODERS; i++) { + res[i] = vpx_codec_enc_config_default(interface, &cfg[i], 0); + if (res[i]) { + printf("Failed to get config: %s\n", vpx_codec_err_to_string(res[i])); + return EXIT_FAILURE; + } + } + + /* + * Update the default configuration according to needs of the application. + */ + /* Highest-resolution encoder settings */ + cfg[0].g_w = width; + cfg[0].g_h = height; + cfg[0].rc_dropframe_thresh = 0; + cfg[0].rc_end_usage = VPX_CBR; + cfg[0].rc_resize_allowed = 0; + cfg[0].rc_min_quantizer = 2; + cfg[0].rc_max_quantizer = 56; + cfg[0].rc_undershoot_pct = 100; + cfg[0].rc_overshoot_pct = 15; + cfg[0].rc_buf_initial_sz = 500; + cfg[0].rc_buf_optimal_sz = 600; + cfg[0].rc_buf_sz = 1000; + cfg[0].g_error_resilient = 1; /* Enable error resilient mode */ + cfg[0].g_lag_in_frames = 0; + + /* Disable automatic keyframe placement */ + /* Note: These 3 settings are copied to all levels. But, except the lowest + * resolution level, all other levels are set to VPX_KF_DISABLED internally. + */ + cfg[0].kf_mode = VPX_KF_AUTO; + cfg[0].kf_min_dist = 3000; + cfg[0].kf_max_dist = 3000; + + cfg[0].rc_target_bitrate = target_bitrate[0]; /* Set target bitrate */ + cfg[0].g_timebase.num = 1; /* Set fps */ + cfg[0].g_timebase.den = framerate; + + /* Other-resolution encoder settings */ + for (i = 1; i < NUM_ENCODERS; i++) { + memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t)); + + cfg[i].rc_target_bitrate = target_bitrate[i]; + + /* Note: Width & height of other-resolution encoders are calculated + * from the highest-resolution encoder's size and the corresponding + * down_sampling_factor. */ - unsigned int target_bitrate[NUM_ENCODERS]={1000, 500, 100}; - - /* Enter the frame rate of the input video */ - int framerate = 30; - - /* Set down-sampling factor for each resolution level. - dsf[0] controls down sampling from level 0 to level 1; - dsf[1] controls down sampling from level 1 to level 2; - dsf[2] is not used. */ - vpx_rational_t dsf[NUM_ENCODERS] = {{2, 1}, {2, 1}, {1, 1}}; - - /* Set the number of temporal layers for each encoder/resolution level, - * starting from highest resoln down to lowest resoln. */ - unsigned int num_temporal_layers[NUM_ENCODERS] = {3, 3, 3}; - - if(argc!= (7 + 3 * NUM_ENCODERS)) - die("Usage: %s " - " \n", - argv[0]); - - printf("Using %s\n",vpx_codec_iface_name(interface)); - - width = strtol(argv[1], NULL, 0); - height = strtol(argv[2], NULL, 0); - framerate = strtol(argv[3], NULL, 0); - - if(width < 16 || width%2 || height <16 || height%2) - die("Invalid resolution: %ldx%ld", width, height); - - /* Open input video file for encoding */ - if(!(infile = fopen(argv[4], "rb"))) - die("Failed to open %s for reading", argv[4]); - - /* Open output file for each encoder to output bitstreams */ - for (i=0; i< NUM_ENCODERS; i++) { - if(!target_bitrate[i]) - { - outfile[i] = NULL; - continue; + unsigned int iw = cfg[i - 1].g_w * dsf[i - 1].den + dsf[i - 1].num - 1; + unsigned int ih = cfg[i - 1].g_h * dsf[i - 1].den + dsf[i - 1].num - 1; + cfg[i].g_w = iw / dsf[i - 1].num; + cfg[i].g_h = ih / dsf[i - 1].num; + } + + /* Make width & height to be multiplier of 2. */ + // Should support odd size ??? + if ((cfg[i].g_w) % 2) cfg[i].g_w++; + if ((cfg[i].g_h) % 2) cfg[i].g_h++; + } + + // Set the number of threads per encode/spatial layer. + // (1, 1, 1) means no encoder threading. + cfg[0].g_threads = 2; + cfg[1].g_threads = 1; + cfg[2].g_threads = 1; + + /* Allocate image for each encoder */ + for (i = 0; i < NUM_ENCODERS; i++) + if (!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32)) + die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h); + + if (raw[0].stride[VPX_PLANE_Y] == raw[0].d_w) + read_frame_p = read_frame; + else + read_frame_p = read_frame_by_row; + + for (i = 0; i < NUM_ENCODERS; i++) + if (outfile[i]) write_ivf_file_header(outfile[i], &cfg[i], 0); + + /* Temporal layers settings */ + for (i = 0; i < NUM_ENCODERS; i++) { + set_temporal_layer_pattern(num_temporal_layers[i], &cfg[i], + cfg[i].rc_target_bitrate, + &layer_flags[i * VPX_TS_MAX_PERIODICITY]); + } + + /* Initialize multi-encoder */ + if (vpx_codec_enc_init_multi(&codec[0], interface, &cfg[0], NUM_ENCODERS, + (show_psnr ? VPX_CODEC_USE_PSNR : 0), &dsf[0])) + die_codec(&codec[0], "Failed to initialize encoder"); + + /* The extra encoding configuration parameters can be set as follows. */ + /* Set encoding speed */ + for (i = 0; i < NUM_ENCODERS; i++) { + int speed = -6; + /* Lower speed for the lowest resolution. */ + if (i == NUM_ENCODERS - 1) speed = -4; + if (vpx_codec_control(&codec[i], VP8E_SET_CPUUSED, speed)) + die_codec(&codec[i], "Failed to set cpu_used"); + } + + /* Set static threshold = 1 for all encoders */ + for (i = 0; i < NUM_ENCODERS; i++) { + if (vpx_codec_control(&codec[i], VP8E_SET_STATIC_THRESHOLD, 1)) + die_codec(&codec[i], "Failed to set static threshold"); + } + + /* Set NOISE_SENSITIVITY to do TEMPORAL_DENOISING */ + /* Enable denoising for the highest-resolution encoder. */ + if (vpx_codec_control(&codec[0], VP8E_SET_NOISE_SENSITIVITY, 4)) + die_codec(&codec[0], "Failed to set noise_sensitivity"); + for (i = 1; i < NUM_ENCODERS; i++) { + if (vpx_codec_control(&codec[i], VP8E_SET_NOISE_SENSITIVITY, 0)) + die_codec(&codec[i], "Failed to set noise_sensitivity"); + } + + /* Set the number of token partitions */ + for (i = 0; i < NUM_ENCODERS; i++) { + if (vpx_codec_control(&codec[i], VP8E_SET_TOKEN_PARTITIONS, 1)) + die_codec(&codec[i], "Failed to set static threshold"); + } + + /* Set the max intra target bitrate */ + for (i = 0; i < NUM_ENCODERS; i++) { + unsigned int max_intra_size_pct = + (int)(((double)cfg[0].rc_buf_optimal_sz * 0.5) * framerate / 10); + if (vpx_codec_control(&codec[i], VP8E_SET_MAX_INTRA_BITRATE_PCT, + max_intra_size_pct)) + die_codec(&codec[i], "Failed to set static threshold"); + // printf("%d %d \n",i,max_intra_size_pct); + } + + frame_avail = 1; + got_data = 0; + + while (frame_avail || got_data) { + struct vpx_usec_timer timer; + vpx_codec_iter_t iter[NUM_ENCODERS] = { NULL }; + const vpx_codec_cx_pkt_t *pkt[NUM_ENCODERS]; + + flags = 0; + frame_avail = read_frame_p(infile, &raw[0]); + + if (frame_avail) { + for (i = 1; i < NUM_ENCODERS; i++) { + /*Scale the image down a number of times by downsampling factor*/ + /* FilterMode 1 or 2 give better psnr than FilterMode 0. */ + I420Scale( + raw[i - 1].planes[VPX_PLANE_Y], raw[i - 1].stride[VPX_PLANE_Y], + raw[i - 1].planes[VPX_PLANE_U], raw[i - 1].stride[VPX_PLANE_U], + raw[i - 1].planes[VPX_PLANE_V], raw[i - 1].stride[VPX_PLANE_V], + raw[i - 1].d_w, raw[i - 1].d_h, raw[i].planes[VPX_PLANE_Y], + raw[i].stride[VPX_PLANE_Y], raw[i].planes[VPX_PLANE_U], + raw[i].stride[VPX_PLANE_U], raw[i].planes[VPX_PLANE_V], + raw[i].stride[VPX_PLANE_V], raw[i].d_w, raw[i].d_h, 1); + /* Write out down-sampled input. */ + length_frame = cfg[i].g_w * cfg[i].g_h * 3 / 2; + if (fwrite(raw[i].planes[0], 1, length_frame, + downsampled_input[NUM_ENCODERS - i - 1]) != length_frame) { + return EXIT_FAILURE; } - - if(!(outfile[i] = fopen(argv[i+5], "wb"))) - die("Failed to open %s for writing", argv[i+4]); + } } - // Bitrates per spatial layer: overwrite default rates above. - for (i=0; i< NUM_ENCODERS; i++) - { - target_bitrate[i] = strtol(argv[NUM_ENCODERS + 5 + i], NULL, 0); + /* Set the flags (reference and update) for all the encoders.*/ + for (i = 0; i < NUM_ENCODERS; i++) { + layer_id = cfg[i].ts_layer_id[frame_cnt % cfg[i].ts_periodicity]; + flags = 0; + flag_periodicity = periodicity_to_num_layers[num_temporal_layers[i] - 1]; + flags = layer_flags[i * VPX_TS_MAX_PERIODICITY + + frame_cnt % flag_periodicity]; + // Key frame flag for first frame. + if (frame_cnt == 0) { + flags |= VPX_EFLAG_FORCE_KF; + } + if (frame_cnt > 0 && frame_cnt == key_frame_insert) { + flags = VPX_EFLAG_FORCE_KF; + } + + vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags); + vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id); } - // Temporal layers per spatial layers: overwrite default settings above. - for (i=0; i< NUM_ENCODERS; i++) - { - num_temporal_layers[i] = strtol(argv[2 * NUM_ENCODERS + 5 + i], NULL, 0); - if (num_temporal_layers[i] < 1 || num_temporal_layers[i] > 3) - die("Invalid temporal layers: %d, Must be 1, 2, or 3. \n", - num_temporal_layers); + /* Encode each frame at multi-levels */ + /* Note the flags must be set to 0 in the encode call if they are set + for each frame with the vpx_codec_control(), as done above. */ + vpx_usec_timer_start(&timer); + if (vpx_codec_encode(&codec[0], frame_avail ? &raw[0] : NULL, frame_cnt, 1, + 0, arg_deadline)) { + die_codec(&codec[0], "Failed to encode frame"); } + vpx_usec_timer_mark(&timer); + cx_time += vpx_usec_timer_elapsed(&timer); - /* Open file to write out each spatially downsampled input stream. */ - for (i=0; i< NUM_ENCODERS - 1; i++) - { - // Highest resoln is encoder 0. - if (sprintf(filename,"ds%d.yuv",NUM_ENCODERS - i) < 0) - { - return EXIT_FAILURE; - } - downsampled_input[i] = fopen(filename,"wb"); - } + for (i = NUM_ENCODERS - 1; i >= 0; i--) { + got_data = 0; + while ((pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i]))) { + got_data = 1; + switch (pkt[i]->kind) { + case VPX_CODEC_CX_FRAME_PKT: + write_ivf_frame_header(outfile[i], pkt[i]); + (void)fwrite(pkt[i]->data.frame.buf, 1, pkt[i]->data.frame.sz, + outfile[i]); + break; + case VPX_CODEC_PSNR_PKT: + if (show_psnr) { + int j; - key_frame_insert = strtol(argv[3 * NUM_ENCODERS + 5], NULL, 0); - - show_psnr = strtol(argv[3 * NUM_ENCODERS + 6], NULL, 0); - - - /* Populate default encoder configuration */ - for (i=0; i< NUM_ENCODERS; i++) - { - res[i] = vpx_codec_enc_config_default(interface, &cfg[i], 0); - if(res[i]) { - printf("Failed to get config: %s\n", vpx_codec_err_to_string(res[i])); - return EXIT_FAILURE; - } - } - - /* - * Update the default configuration according to needs of the application. - */ - /* Highest-resolution encoder settings */ - cfg[0].g_w = width; - cfg[0].g_h = height; - cfg[0].rc_dropframe_thresh = 0; - cfg[0].rc_end_usage = VPX_CBR; - cfg[0].rc_resize_allowed = 0; - cfg[0].rc_min_quantizer = 2; - cfg[0].rc_max_quantizer = 56; - cfg[0].rc_undershoot_pct = 100; - cfg[0].rc_overshoot_pct = 15; - cfg[0].rc_buf_initial_sz = 500; - cfg[0].rc_buf_optimal_sz = 600; - cfg[0].rc_buf_sz = 1000; - cfg[0].g_error_resilient = 1; /* Enable error resilient mode */ - cfg[0].g_lag_in_frames = 0; - - /* Disable automatic keyframe placement */ - /* Note: These 3 settings are copied to all levels. But, except the lowest - * resolution level, all other levels are set to VPX_KF_DISABLED internally. - */ - cfg[0].kf_mode = VPX_KF_AUTO; - cfg[0].kf_min_dist = 3000; - cfg[0].kf_max_dist = 3000; - - cfg[0].rc_target_bitrate = target_bitrate[0]; /* Set target bitrate */ - cfg[0].g_timebase.num = 1; /* Set fps */ - cfg[0].g_timebase.den = framerate; - - /* Other-resolution encoder settings */ - for (i=1; i< NUM_ENCODERS; i++) - { - memcpy(&cfg[i], &cfg[0], sizeof(vpx_codec_enc_cfg_t)); - - cfg[i].rc_target_bitrate = target_bitrate[i]; - - /* Note: Width & height of other-resolution encoders are calculated - * from the highest-resolution encoder's size and the corresponding - * down_sampling_factor. - */ - { - unsigned int iw = cfg[i-1].g_w*dsf[i-1].den + dsf[i-1].num - 1; - unsigned int ih = cfg[i-1].g_h*dsf[i-1].den + dsf[i-1].num - 1; - cfg[i].g_w = iw/dsf[i-1].num; - cfg[i].g_h = ih/dsf[i-1].num; - } - - /* Make width & height to be multiplier of 2. */ - // Should support odd size ??? - if((cfg[i].g_w)%2)cfg[i].g_w++; - if((cfg[i].g_h)%2)cfg[i].g_h++; - } - - - // Set the number of threads per encode/spatial layer. - // (1, 1, 1) means no encoder threading. - cfg[0].g_threads = 2; - cfg[1].g_threads = 1; - cfg[2].g_threads = 1; - - /* Allocate image for each encoder */ - for (i=0; i< NUM_ENCODERS; i++) - if(!vpx_img_alloc(&raw[i], VPX_IMG_FMT_I420, cfg[i].g_w, cfg[i].g_h, 32)) - die("Failed to allocate image", cfg[i].g_w, cfg[i].g_h); - - if (raw[0].stride[VPX_PLANE_Y] == raw[0].d_w) - read_frame_p = read_frame; - else - read_frame_p = read_frame_by_row; - - for (i=0; i< NUM_ENCODERS; i++) - if(outfile[i]) - write_ivf_file_header(outfile[i], &cfg[i], 0); - - /* Temporal layers settings */ - for ( i=0; i 0 && frame_cnt == key_frame_insert) - { - flags = VPX_EFLAG_FORCE_KF; + psnr_sse_total[i] += pkt[i]->data.psnr.sse[0]; + psnr_samples_total[i] += pkt[i]->data.psnr.samples[0]; + for (j = 0; j < 4; j++) { + psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j]; + } + psnr_count[i]++; } - vpx_codec_control(&codec[i], VP8E_SET_FRAME_FLAGS, flags); - vpx_codec_control(&codec[i], VP8E_SET_TEMPORAL_LAYER_ID, layer_id); + break; + default: break; } - - gettimeofday(&tv1, NULL); - /* Encode each frame at multi-levels */ - /* Note the flags must be set to 0 in the encode call if they are set - for each frame with the vpx_codec_control(), as done above. */ - if(vpx_codec_encode(&codec[0], frame_avail? &raw[0] : NULL, - frame_cnt, 1, 0, arg_deadline)) - { - die_codec(&codec[0], "Failed to encode frame"); - } - gettimeofday(&tv2, NULL); - timersub(&tv2, &tv1, &difftv); - cx_time += (double)(difftv.tv_sec * 1000000 + difftv.tv_usec); - for (i=NUM_ENCODERS-1; i>=0 ; i--) - { - got_data = 0; - while( (pkt[i] = vpx_codec_get_cx_data(&codec[i], &iter[i])) ) - { - got_data = 1; - switch(pkt[i]->kind) { - case VPX_CODEC_CX_FRAME_PKT: - write_ivf_frame_header(outfile[i], pkt[i]); - (void) fwrite(pkt[i]->data.frame.buf, 1, - pkt[i]->data.frame.sz, outfile[i]); - break; - case VPX_CODEC_PSNR_PKT: - if (show_psnr) - { - int j; - - psnr_sse_total[i] += pkt[i]->data.psnr.sse[0]; - psnr_samples_total[i] += pkt[i]->data.psnr.samples[0]; - for (j = 0; j < 4; j++) - { - psnr_totals[i][j] += pkt[i]->data.psnr.psnr[j]; - } - psnr_count[i]++; - } - - break; - default: - break; - } - printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT - && (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY)? "K":""); - fflush(stdout); - } - } - frame_cnt++; + printf(pkt[i]->kind == VPX_CODEC_CX_FRAME_PKT && + (pkt[i]->data.frame.flags & VPX_FRAME_IS_KEY) + ? "K" + : ""); + fflush(stdout); + } } - printf("\n"); - printf("FPS for encoding %d %f %f \n", frame_cnt, (float)cx_time / 1000000, - 1000000 * (double)frame_cnt / (double)cx_time); + frame_cnt++; + } + printf("\n"); + printf("Frame cnt and encoding time/FPS stats for encoding: %d %f %f \n", + frame_cnt, 1000 * (float)cx_time / (double)(frame_cnt * 1000000), + 1000000 * (double)frame_cnt / (double)cx_time); - fclose(infile); + fclose(infile); - printf("Processed %ld frames.\n",(long int)frame_cnt-1); - for (i=0; i< NUM_ENCODERS; i++) - { - /* Calculate PSNR and print it out */ - if ( (show_psnr) && (psnr_count[i]>0) ) - { - int j; - double ovpsnr = sse_to_psnr(psnr_samples_total[i], 255.0, - psnr_sse_total[i]); + printf("Processed %ld frames.\n", (long int)frame_cnt - 1); + for (i = 0; i < NUM_ENCODERS; i++) { + /* Calculate PSNR and print it out */ + if ((show_psnr) && (psnr_count[i] > 0)) { + int j; + double ovpsnr = + sse_to_psnr(psnr_samples_total[i], 255.0, psnr_sse_total[i]); - fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i); + fprintf(stderr, "\n ENC%d PSNR (Overall/Avg/Y/U/V)", i); - fprintf(stderr, " %.3lf", ovpsnr); - for (j = 0; j < 4; j++) - { - fprintf(stderr, " %.3lf", psnr_totals[i][j]/psnr_count[i]); - } - } - - if(vpx_codec_destroy(&codec[i])) - die_codec(&codec[i], "Failed to destroy codec"); - - vpx_img_free(&raw[i]); - - if(!outfile[i]) - continue; - - /* Try to rewrite the file header with the actual frame count */ - if(!fseek(outfile[i], 0, SEEK_SET)) - write_ivf_file_header(outfile[i], &cfg[i], frame_cnt-1); - fclose(outfile[i]); + fprintf(stderr, " %.3lf", ovpsnr); + for (j = 0; j < 4; j++) { + fprintf(stderr, " %.3lf", psnr_totals[i][j] / psnr_count[i]); + } } - printf("\n"); - return EXIT_SUCCESS; + if (vpx_codec_destroy(&codec[i])) + die_codec(&codec[i], "Failed to destroy codec"); + + vpx_img_free(&raw[i]); + + if (!outfile[i]) continue; + + /* Try to rewrite the file header with the actual frame count */ + if (!fseek(outfile[i], 0, SEEK_SET)) + write_ivf_file_header(outfile[i], &cfg[i], frame_cnt - 1); + fclose(outfile[i]); + } + printf("\n"); + + return EXIT_SUCCESS; } diff --git a/libs/libvpx/examples/vp8cx_set_ref.c b/libs/libvpx/examples/vp8cx_set_ref.c index 8b4cc303d3..846477c61e 100644 --- a/libs/libvpx/examples/vp8cx_set_ref.c +++ b/libs/libvpx/examples/vp8cx_set_ref.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - // VP8 Set Reference Frame // ======================= // @@ -52,6 +51,7 @@ #include "vpx/vp8cx.h" #include "vpx/vpx_encoder.h" +#include "vp8/common/common.h" #include "../tools_common.h" #include "../video_writer.h" @@ -64,25 +64,21 @@ void usage_exit(void) { exit(EXIT_FAILURE); } -static int encode_frame(vpx_codec_ctx_t *codec, - vpx_image_t *img, - int frame_index, - VpxVideoWriter *writer) { +static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img, + int frame_index, VpxVideoWriter *writer) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, 0, - VPX_DL_GOOD_QUALITY); - if (res != VPX_CODEC_OK) - die_codec(codec, "Failed to encode frame"); + const vpx_codec_err_t res = + vpx_codec_encode(codec, img, frame_index, 1, 0, VPX_DL_GOOD_QUALITY); + if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame"); while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) { got_pkts = 1; if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) { const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; - if (!vpx_video_writer_write_frame(writer, - pkt->data.frame.buf, + if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, pkt->data.frame.sz, pkt->data.frame.pts)) { die_codec(codec, "Failed to write compressed frame"); @@ -98,55 +94,53 @@ static int encode_frame(vpx_codec_ctx_t *codec, int main(int argc, char **argv) { FILE *infile = NULL; - vpx_codec_ctx_t codec = {0}; - vpx_codec_enc_cfg_t cfg = {0}; + vpx_codec_ctx_t codec; + vpx_codec_enc_cfg_t cfg; int frame_count = 0; vpx_image_t raw; vpx_codec_err_t res; - VpxVideoInfo info = {0}; + VpxVideoInfo info; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; int update_frame_num = 0; - const int fps = 30; // TODO(dkovalev) add command line argument - const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument + const int fps = 30; // TODO(dkovalev) add command line argument + const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument + + vp8_zero(codec); + vp8_zero(cfg); + vp8_zero(info); exec_name = argv[0]; - if (argc != 6) - die("Invalid number of arguments"); + if (argc != 6) die("Invalid number of arguments"); // TODO(dkovalev): add vp9 support and rename the file accordingly encoder = get_vpx_encoder_by_name("vp8"); - if (!encoder) - die("Unsupported codec."); + if (!encoder) die("Unsupported codec."); update_frame_num = atoi(argv[5]); - if (!update_frame_num) - die("Couldn't parse frame number '%s'\n", argv[5]); + if (!update_frame_num) die("Couldn't parse frame number '%s'\n", argv[5]); info.codec_fourcc = encoder->fourcc; - info.frame_width = strtol(argv[1], NULL, 0); - info.frame_height = strtol(argv[2], NULL, 0); + info.frame_width = (int)strtol(argv[1], NULL, 0); + info.frame_height = (int)strtol(argv[2], NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; - if (info.frame_width <= 0 || - info.frame_height <= 0 || - (info.frame_width % 2) != 0 || - (info.frame_height % 2) != 0) { + if (info.frame_width <= 0 || info.frame_height <= 0 || + (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, - info.frame_height, 1)) { + info.frame_height, 1)) { die("Failed to allocate image."); } printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); - if (res) - die_codec(&codec, "Failed to get default codec config."); + if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; @@ -155,8 +149,7 @@ int main(int argc, char **argv) { cfg.rc_target_bitrate = bitrate; writer = vpx_video_writer_open(argv[4], kContainerIVF, &info); - if (!writer) - die("Failed to open %s for writing.", argv[4]); + if (!writer) die("Failed to open %s for writing.", argv[4]); if (!(infile = fopen(argv[3], "rb"))) die("Failed to open %s for reading.", argv[3]); @@ -178,15 +171,15 @@ int main(int argc, char **argv) { } // Flush encoder. - while (encode_frame(&codec, NULL, -1, writer)) {} + while (encode_frame(&codec, NULL, -1, writer)) { + } printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); diff --git a/libs/libvpx/examples/vp9_lossless_encoder.c b/libs/libvpx/examples/vp9_lossless_encoder.c index 8272516830..cb5ca6bfe0 100644 --- a/libs/libvpx/examples/vp9_lossless_encoder.c +++ b/libs/libvpx/examples/vp9_lossless_encoder.c @@ -14,6 +14,7 @@ #include "vpx/vpx_encoder.h" #include "vpx/vp8cx.h" +#include "vp9/common/vp9_common.h" #include "../tools_common.h" #include "../video_writer.h" @@ -21,32 +22,28 @@ static const char *exec_name; void usage_exit(void) { - fprintf(stderr, "vp9_lossless_encoder: Example demonstrating VP9 lossless " - "encoding feature. Supports raw input only.\n"); + fprintf(stderr, + "vp9_lossless_encoder: Example demonstrating VP9 lossless " + "encoding feature. Supports raw input only.\n"); fprintf(stderr, "Usage: %s \n", exec_name); exit(EXIT_FAILURE); } -static int encode_frame(vpx_codec_ctx_t *codec, - vpx_image_t *img, - int frame_index, - int flags, - VpxVideoWriter *writer) { +static int encode_frame(vpx_codec_ctx_t *codec, vpx_image_t *img, + int frame_index, int flags, VpxVideoWriter *writer) { int got_pkts = 0; vpx_codec_iter_t iter = NULL; const vpx_codec_cx_pkt_t *pkt = NULL; - const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1, - flags, VPX_DL_GOOD_QUALITY); - if (res != VPX_CODEC_OK) - die_codec(codec, "Failed to encode frame"); + const vpx_codec_err_t res = + vpx_codec_encode(codec, img, frame_index, 1, flags, VPX_DL_GOOD_QUALITY); + if (res != VPX_CODEC_OK) die_codec(codec, "Failed to encode frame"); while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) { got_pkts = 1; if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) { const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; - if (!vpx_video_writer_write_frame(writer, - pkt->data.frame.buf, + if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, pkt->data.frame.sz, pkt->data.frame.pts)) { die_codec(codec, "Failed to write compressed frame"); @@ -66,43 +63,40 @@ int main(int argc, char **argv) { int frame_count = 0; vpx_image_t raw; vpx_codec_err_t res; - VpxVideoInfo info = {0}; + VpxVideoInfo info; VpxVideoWriter *writer = NULL; const VpxInterface *encoder = NULL; const int fps = 30; + vp9_zero(info); + exec_name = argv[0]; - if (argc < 5) - die("Invalid number of arguments"); + if (argc < 5) die("Invalid number of arguments"); encoder = get_vpx_encoder_by_name("vp9"); - if (!encoder) - die("Unsupported codec."); + if (!encoder) die("Unsupported codec."); info.codec_fourcc = encoder->fourcc; - info.frame_width = strtol(argv[1], NULL, 0); - info.frame_height = strtol(argv[2], NULL, 0); + info.frame_width = (int)strtol(argv[1], NULL, 0); + info.frame_height = (int)strtol(argv[2], NULL, 0); info.time_base.numerator = 1; info.time_base.denominator = fps; - if (info.frame_width <= 0 || - info.frame_height <= 0 || - (info.frame_width % 2) != 0 || - (info.frame_height % 2) != 0) { + if (info.frame_width <= 0 || info.frame_height <= 0 || + (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); } if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, - info.frame_height, 1)) { + info.frame_height, 1)) { die("Failed to allocate image."); } printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); - if (res) - die_codec(&codec, "Failed to get default codec config."); + if (res) die_codec(&codec, "Failed to get default codec config."); cfg.g_w = info.frame_width; cfg.g_h = info.frame_height; @@ -110,8 +104,7 @@ int main(int argc, char **argv) { cfg.g_timebase.den = info.time_base.denominator; writer = vpx_video_writer_open(argv[4], kContainerIVF, &info); - if (!writer) - die("Failed to open %s for writing.", argv[4]); + if (!writer) die("Failed to open %s for writing.", argv[4]); if (!(infile = fopen(argv[3], "rb"))) die("Failed to open %s for reading.", argv[3]); @@ -128,15 +121,15 @@ int main(int argc, char **argv) { } // Flush encoder. - while (encode_frame(&codec, NULL, -1, 0, writer)) {} + while (encode_frame(&codec, NULL, -1, 0, writer)) { + } printf("\n"); fclose(infile); printf("Processed %d frames.\n", frame_count); vpx_img_free(&raw); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec."); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec."); vpx_video_writer_close(writer); diff --git a/libs/libvpx/examples/vp9_spatial_svc_encoder.c b/libs/libvpx/examples/vp9_spatial_svc_encoder.c index 271ab704b6..cecdce0804 100644 --- a/libs/libvpx/examples/vp9_spatial_svc_encoder.c +++ b/libs/libvpx/examples/vp9_spatial_svc_encoder.c @@ -20,7 +20,6 @@ #include #include - #include "../args.h" #include "../tools_common.h" #include "../video_writer.h" @@ -54,8 +53,9 @@ static const arg_def_t spatial_layers_arg = static const arg_def_t temporal_layers_arg = ARG_DEF("tl", "temporal-layers", 1, "number of temporal SVC layers"); static const arg_def_t temporal_layering_mode_arg = - ARG_DEF("tlm", "temporal-layering-mode", 1, "temporal layering scheme." - "VP9E_TEMPORAL_LAYERING_MODE"); + ARG_DEF("tlm", "temporal-layering-mode", 1, + "temporal layering scheme." + "VP9E_TEMPORAL_LAYERING_MODE"); static const arg_def_t kf_dist_arg = ARG_DEF("k", "kf-dist", 1, "number of frames between keyframes"); static const arg_def_t scale_factors_arg = @@ -75,8 +75,9 @@ static const arg_def_t min_bitrate_arg = static const arg_def_t max_bitrate_arg = ARG_DEF(NULL, "max-bitrate", 1, "Maximum bitrate"); static const arg_def_t lag_in_frame_arg = - ARG_DEF(NULL, "lag-in-frames", 1, "Number of frame to input before " - "generating any outputs"); + ARG_DEF(NULL, "lag-in-frames", 1, + "Number of frame to input before " + "generating any outputs"); static const arg_def_t rc_end_usage_arg = ARG_DEF(NULL, "rc-end-usage", 1, "0 - 3: VBR, CBR, CQ, Q"); static const arg_def_t speed_arg = @@ -86,35 +87,44 @@ static const arg_def_t aqmode_arg = #if CONFIG_VP9_HIGHBITDEPTH static const struct arg_enum_list bitdepth_enum[] = { - {"8", VPX_BITS_8}, - {"10", VPX_BITS_10}, - {"12", VPX_BITS_12}, - {NULL, 0} + { "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 } }; -static const arg_def_t bitdepth_arg = - ARG_DEF_ENUM("d", "bit-depth", 1, "Bit depth for codec 8, 10 or 12. ", - bitdepth_enum); +static const arg_def_t bitdepth_arg = ARG_DEF_ENUM( + "d", "bit-depth", 1, "Bit depth for codec 8, 10 or 12. ", bitdepth_enum); #endif // CONFIG_VP9_HIGHBITDEPTH - -static const arg_def_t *svc_args[] = { - &frames_arg, &width_arg, &height_arg, - &timebase_arg, &bitrate_arg, &skip_frames_arg, &spatial_layers_arg, - &kf_dist_arg, &scale_factors_arg, &passes_arg, &pass_arg, - &fpf_name_arg, &min_q_arg, &max_q_arg, &min_bitrate_arg, - &max_bitrate_arg, &temporal_layers_arg, &temporal_layering_mode_arg, - &lag_in_frame_arg, &threads_arg, &aqmode_arg, +static const arg_def_t *svc_args[] = { &frames_arg, + &width_arg, + &height_arg, + &timebase_arg, + &bitrate_arg, + &skip_frames_arg, + &spatial_layers_arg, + &kf_dist_arg, + &scale_factors_arg, + &passes_arg, + &pass_arg, + &fpf_name_arg, + &min_q_arg, + &max_q_arg, + &min_bitrate_arg, + &max_bitrate_arg, + &temporal_layers_arg, + &temporal_layering_mode_arg, + &lag_in_frame_arg, + &threads_arg, + &aqmode_arg, #if OUTPUT_RC_STATS - &output_rc_stats_arg, + &output_rc_stats_arg, #endif #if CONFIG_VP9_HIGHBITDEPTH - &bitdepth_arg, + &bitdepth_arg, #endif - &speed_arg, - &rc_end_usage_arg, NULL -}; + &speed_arg, + &rc_end_usage_arg, + NULL }; static const uint32_t default_frames_to_skip = 0; static const uint32_t default_frames_to_code = 60 * 60; @@ -128,7 +138,7 @@ static const uint32_t default_temporal_layers = 1; static const uint32_t default_kf_dist = 100; static const uint32_t default_temporal_layering_mode = 0; static const uint32_t default_output_rc_stats = 0; -static const int32_t default_speed = -1; // -1 means use library default. +static const int32_t default_speed = -1; // -1 means use library default. static const uint32_t default_threads = 0; // zero means use library default. typedef struct { @@ -155,7 +165,7 @@ void usage_exit(void) { static void parse_command_line(int argc, const char **argv_, AppInput *app_input, SvcContext *svc_ctx, vpx_codec_enc_cfg_t *enc_cfg) { - struct arg arg = {0}; + struct arg arg = { 0 }; char **argv = NULL; char **argi = NULL; char **argj = NULL; @@ -165,7 +175,7 @@ static void parse_command_line(int argc, const char **argv_, const char *fpf_file_name = NULL; unsigned int min_bitrate = 0; unsigned int max_bitrate = 0; - char string_options[1024] = {0}; + char string_options[1024] = { 0 }; // initialize SvcContext with parameters that will be passed to vpx_svc_init svc_ctx->log_level = SVC_LOG_DEBUG; @@ -229,8 +239,8 @@ static void parse_command_line(int argc, const char **argv_, } else if (arg_match(&arg, &threads_arg, argi)) { svc_ctx->threads = arg_parse_uint(&arg); } else if (arg_match(&arg, &temporal_layering_mode_arg, argi)) { - svc_ctx->temporal_layering_mode = - enc_cfg->temporal_layering_mode = arg_parse_int(&arg); + svc_ctx->temporal_layering_mode = enc_cfg->temporal_layering_mode = + arg_parse_int(&arg); if (svc_ctx->temporal_layering_mode) { enc_cfg->g_error_resilient = 1; } @@ -278,7 +288,7 @@ static void parse_command_line(int argc, const char **argv_, enc_cfg->g_input_bit_depth = 10; enc_cfg->g_profile = 2; break; - case VPX_BITS_12: + case VPX_BITS_12: enc_cfg->g_input_bit_depth = 12; enc_cfg->g_profile = 2; break; @@ -360,9 +370,8 @@ static void parse_command_line(int argc, const char **argv_, "num: %d, den: %d, bitrate: %d,\n" "gop size: %d\n", vpx_codec_iface_name(vpx_codec_vp9_cx()), app_input->frames_to_code, - app_input->frames_to_skip, - svc_ctx->spatial_layers, enc_cfg->g_w, enc_cfg->g_h, - enc_cfg->g_timebase.num, enc_cfg->g_timebase.den, + app_input->frames_to_skip, svc_ctx->spatial_layers, enc_cfg->g_w, + enc_cfg->g_h, enc_cfg->g_timebase.num, enc_cfg->g_timebase.den, enc_cfg->rc_target_bitrate, enc_cfg->kf_max_dist); } @@ -399,7 +408,7 @@ struct RateControlStats { // Note: these rate control stats assume only 1 key frame in the // sequence (i.e., first frame only). static void set_rate_control_stats(struct RateControlStats *rc, - vpx_codec_enc_cfg_t *cfg) { + vpx_codec_enc_cfg_t *cfg) { unsigned int sl, tl; // Set the layer (cumulative) framerate and the target layer (non-cumulative) // per-frame-bandwidth, for the rate control encoding stats below. @@ -412,18 +421,15 @@ static void set_rate_control_stats(struct RateControlStats *rc, if (cfg->ts_number_layers == 1) rc->layer_framerate[layer] = framerate; else - rc->layer_framerate[layer] = - framerate / cfg->ts_rate_decimator[tl]; + rc->layer_framerate[layer] = framerate / cfg->ts_rate_decimator[tl]; if (tl > 0) { - rc->layer_pfb[layer] = 1000.0 * - (cfg->layer_target_bitrate[layer] - - cfg->layer_target_bitrate[layer - 1]) / - (rc->layer_framerate[layer] - - rc->layer_framerate[layer - 1]); + rc->layer_pfb[layer] = + 1000.0 * (cfg->layer_target_bitrate[layer] - + cfg->layer_target_bitrate[layer - 1]) / + (rc->layer_framerate[layer] - rc->layer_framerate[layer - 1]); } else { - rc->layer_pfb[tlayer0] = 1000.0 * - cfg->layer_target_bitrate[tlayer0] / - rc->layer_framerate[tlayer0]; + rc->layer_pfb[tlayer0] = 1000.0 * cfg->layer_target_bitrate[tlayer0] / + rc->layer_framerate[tlayer0]; } rc->layer_input_frames[layer] = 0; rc->layer_enc_frames[layer] = 0; @@ -447,31 +453,33 @@ static void printout_rate_control_summary(struct RateControlStats *rc, double perc_fluctuation = 0.0; printf("Total number of processed frames: %d\n\n", frame_cnt - 1); printf("Rate control layer stats for sl%d tl%d layer(s):\n\n", - cfg->ss_number_layers, cfg->ts_number_layers); + cfg->ss_number_layers, cfg->ts_number_layers); for (sl = 0; sl < cfg->ss_number_layers; ++sl) { for (tl = 0; tl < cfg->ts_number_layers; ++tl) { const int layer = sl * cfg->ts_number_layers + tl; - const int num_dropped = (tl > 0) ? - (rc->layer_input_frames[layer] - rc->layer_enc_frames[layer]) : - (rc->layer_input_frames[layer] - rc->layer_enc_frames[layer] - 1); - if (!sl) - tot_num_frames += rc->layer_input_frames[layer]; + const int num_dropped = + (tl > 0) + ? (rc->layer_input_frames[layer] - rc->layer_enc_frames[layer]) + : (rc->layer_input_frames[layer] - rc->layer_enc_frames[layer] - + 1); + if (!sl) tot_num_frames += rc->layer_input_frames[layer]; rc->layer_encoding_bitrate[layer] = 0.001 * rc->layer_framerate[layer] * - rc->layer_encoding_bitrate[layer] / tot_num_frames; - rc->layer_avg_frame_size[layer] = rc->layer_avg_frame_size[layer] / - rc->layer_enc_frames[layer]; - rc->layer_avg_rate_mismatch[layer] = - 100.0 * rc->layer_avg_rate_mismatch[layer] / - rc->layer_enc_frames[layer]; + rc->layer_encoding_bitrate[layer] / + tot_num_frames; + rc->layer_avg_frame_size[layer] = + rc->layer_avg_frame_size[layer] / rc->layer_enc_frames[layer]; + rc->layer_avg_rate_mismatch[layer] = 100.0 * + rc->layer_avg_rate_mismatch[layer] / + rc->layer_enc_frames[layer]; printf("For layer#: sl%d tl%d \n", sl, tl); printf("Bitrate (target vs actual): %d %f.0 kbps\n", cfg->layer_target_bitrate[layer], rc->layer_encoding_bitrate[layer]); printf("Average frame size (target vs actual): %f %f bits\n", rc->layer_pfb[layer], rc->layer_avg_frame_size[layer]); - printf("Average rate_mismatch: %f\n", - rc->layer_avg_rate_mismatch[layer]); - printf("Number of input frames, encoded (non-key) frames, " + printf("Average rate_mismatch: %f\n", rc->layer_avg_rate_mismatch[layer]); + printf( + "Number of input frames, encoded (non-key) frames, " "and percent dropped frames: %d %d %f.0 \n", rc->layer_input_frames[layer], rc->layer_enc_frames[layer], 100.0 * num_dropped / rc->layer_input_frames[layer]); @@ -483,19 +491,18 @@ static void printout_rate_control_summary(struct RateControlStats *rc, rc->variance_st_encoding_bitrate / rc->window_count - (rc->avg_st_encoding_bitrate * rc->avg_st_encoding_bitrate); perc_fluctuation = 100.0 * sqrt(rc->variance_st_encoding_bitrate) / - rc->avg_st_encoding_bitrate; + rc->avg_st_encoding_bitrate; printf("Short-time stats, for window of %d frames: \n", rc->window_size); printf("Average, rms-variance, and percent-fluct: %f %f %f \n", - rc->avg_st_encoding_bitrate, - sqrt(rc->variance_st_encoding_bitrate), + rc->avg_st_encoding_bitrate, sqrt(rc->variance_st_encoding_bitrate), perc_fluctuation); if (frame_cnt != tot_num_frames) die("Error: Number of input frames not equal to output encoded frames != " - "%d tot_num_frames = %d\n", frame_cnt, tot_num_frames); + "%d tot_num_frames = %d\n", + frame_cnt, tot_num_frames); } -vpx_codec_err_t parse_superframe_index(const uint8_t *data, - size_t data_sz, +vpx_codec_err_t parse_superframe_index(const uint8_t *data, size_t data_sz, uint32_t sizes[8], int *count) { // A chunk ending with a byte matching 0xc0 is an invalid chunk unless // it is a super frame index. If the last byte of real video compression @@ -508,7 +515,6 @@ vpx_codec_err_t parse_superframe_index(const uint8_t *data, marker = *(data + data_sz - 1); *count = 0; - if ((marker & 0xe0) == 0xc0) { const uint32_t frames = (marker & 0x7) + 1; const uint32_t mag = ((marker >> 3) & 0x3) + 1; @@ -516,8 +522,7 @@ vpx_codec_err_t parse_superframe_index(const uint8_t *data, // This chunk is marked as having a superframe index but doesn't have // enough data for it, thus it's an invalid superframe index. - if (data_sz < index_sz) - return VPX_CODEC_CORRUPT_FRAME; + if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME; { const uint8_t marker2 = *(data + data_sz - index_sz); @@ -525,8 +530,7 @@ vpx_codec_err_t parse_superframe_index(const uint8_t *data, // This chunk is marked as having a superframe index but doesn't have // the matching marker byte at the front of the index therefore it's an // invalid chunk. - if (marker != marker2) - return VPX_CODEC_CORRUPT_FRAME; + if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME; } { @@ -537,8 +541,7 @@ vpx_codec_err_t parse_superframe_index(const uint8_t *data, for (i = 0; i < frames; ++i) { uint32_t this_sz = 0; - for (j = 0; j < mag; ++j) - this_sz |= (*x++) << (j * 8); + for (j = 0; j < mag; ++j) this_sz |= (*x++) << (j * 8); sizes[i] = this_sz; } *count = frames; @@ -558,32 +561,27 @@ void set_frame_flags_bypass_mode(int sl, int tl, int num_spatial_layers, for (sl = 0; sl < num_spatial_layers; ++sl) { if (!tl) { if (!sl) { - ref_frame_config->frame_flags[sl] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + ref_frame_config->frame_flags[sl] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; } else { if (is_key_frame) { - ref_frame_config->frame_flags[sl] = VP8_EFLAG_NO_REF_LAST | - VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + ref_frame_config->frame_flags[sl] = + VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF | + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } else { - ref_frame_config->frame_flags[sl] = VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + ref_frame_config->frame_flags[sl] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } } } else if (tl == 1) { if (!sl) { - ref_frame_config->frame_flags[sl] = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF; + ref_frame_config->frame_flags[sl] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_GF; } else { - ref_frame_config->frame_flags[sl] = VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF; + ref_frame_config->frame_flags[sl] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; } } if (tl == 0) { @@ -602,9 +600,9 @@ void set_frame_flags_bypass_mode(int sl, int tl, int num_spatial_layers, } int main(int argc, const char **argv) { - AppInput app_input = {0}; + AppInput app_input = { 0 }; VpxVideoWriter *writer = NULL; - VpxVideoInfo info = {0}; + VpxVideoInfo info = { 0 }; vpx_codec_ctx_t codec; vpx_codec_enc_cfg_t enc_cfg; SvcContext svc_ctx; @@ -618,14 +616,14 @@ int main(int argc, const char **argv) { int end_of_stream = 0; int frames_received = 0; #if OUTPUT_RC_STATS - VpxVideoWriter *outfile[VPX_TS_MAX_LAYERS] = {NULL}; + VpxVideoWriter *outfile[VPX_TS_MAX_LAYERS] = { NULL }; struct RateControlStats rc; vpx_svc_layer_id_t layer_id; vpx_svc_ref_frame_config_t ref_frame_config; int sl, tl; double sum_bitrate = 0.0; double sum_bitrate2 = 0.0; - double framerate = 30.0; + double framerate = 30.0; #endif struct vpx_usec_timer timer; int64_t cx_time = 0; @@ -634,10 +632,10 @@ int main(int argc, const char **argv) { exec_name = argv[0]; parse_command_line(argc, argv, &app_input, &svc_ctx, &enc_cfg); - // Allocate image buffer +// Allocate image buffer #if CONFIG_VP9_HIGHBITDEPTH - if (!vpx_img_alloc(&raw, enc_cfg.g_input_bit_depth == 8 ? - VPX_IMG_FMT_I420 : VPX_IMG_FMT_I42016, + if (!vpx_img_alloc(&raw, enc_cfg.g_input_bit_depth == 8 ? VPX_IMG_FMT_I420 + : VPX_IMG_FMT_I42016, enc_cfg.g_w, enc_cfg.g_h, 32)) { die("Failed to allocate image %dx%d\n", enc_cfg.g_w, enc_cfg.g_h); } @@ -668,8 +666,8 @@ int main(int argc, const char **argv) { if (!(app_input.passes == 2 && app_input.pass == 1)) { // We don't save the bitstream for the 1st pass on two pass rate control - writer = vpx_video_writer_open(app_input.output_filename, kContainerIVF, - &info); + writer = + vpx_video_writer_open(app_input.output_filename, kContainerIVF, &info); if (!writer) die("Failed to open %s for writing\n", app_input.output_filename); } @@ -683,15 +681,13 @@ int main(int argc, const char **argv) { snprintf(file_name, sizeof(file_name), "%s_t%d.ivf", app_input.output_filename, tl); outfile[tl] = vpx_video_writer_open(file_name, kContainerIVF, &info); - if (!outfile[tl]) - die("Failed to open %s for writing", file_name); + if (!outfile[tl]) die("Failed to open %s for writing", file_name); } } #endif // skip initial frames - for (i = 0; i < app_input.frames_to_skip; ++i) - vpx_img_read(&raw, infile); + for (i = 0; i < app_input.frames_to_skip; ++i) vpx_img_read(&raw, infile); if (svc_ctx.speed != -1) vpx_codec_control(&codec, VP8E_SET_CPUUSED, svc_ctx.speed); @@ -700,7 +696,6 @@ int main(int argc, const char **argv) { if (svc_ctx.speed >= 5 && svc_ctx.aqmode == 1) vpx_codec_control(&codec, VP9E_SET_AQ_MODE, 3); - // Encode frames while (!end_of_stream) { vpx_codec_iter_t iter = NULL; @@ -729,8 +724,7 @@ int main(int argc, const char **argv) { // over all the spatial layers for the current superframe. vpx_codec_control(&codec, VP9E_SET_SVC_LAYER_ID, &layer_id); set_frame_flags_bypass_mode(sl, layer_id.temporal_layer_id, - svc_ctx.spatial_layers, - frame_cnt == 0, + svc_ctx.spatial_layers, frame_cnt == 0, &ref_frame_config); vpx_codec_control(&codec, VP9E_SET_SVC_REF_FRAME_CONFIG, &ref_frame_config); @@ -743,9 +737,9 @@ int main(int argc, const char **argv) { } vpx_usec_timer_start(&timer); - res = vpx_svc_encode(&svc_ctx, &codec, (end_of_stream ? NULL : &raw), - pts, frame_duration, svc_ctx.speed >= 5 ? - VPX_DL_REALTIME : VPX_DL_GOOD_QUALITY); + res = vpx_svc_encode( + &svc_ctx, &codec, (end_of_stream ? NULL : &raw), pts, frame_duration, + svc_ctx.speed >= 5 ? VPX_DL_REALTIME : VPX_DL_GOOD_QUALITY); vpx_usec_timer_mark(&timer); cx_time += vpx_usec_timer_elapsed(&timer); @@ -764,8 +758,7 @@ int main(int argc, const char **argv) { uint32_t sizes[8]; int count = 0; #endif - vpx_video_writer_write_frame(writer, - cx_pkt->data.frame.buf, + vpx_video_writer_write_frame(writer, cx_pkt->data.frame.buf, cx_pkt->data.frame.sz, cx_pkt->data.frame.pts); #if OUTPUT_RC_STATS @@ -782,20 +775,19 @@ int main(int argc, const char **argv) { VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { for (sl = 0; sl < enc_cfg.ss_number_layers; ++sl) { ++rc.layer_input_frames[sl * enc_cfg.ts_number_layers + - layer_id.temporal_layer_id]; + layer_id.temporal_layer_id]; } } for (tl = layer_id.temporal_layer_id; - tl < enc_cfg.ts_number_layers; ++tl) { - vpx_video_writer_write_frame(outfile[tl], - cx_pkt->data.frame.buf, - cx_pkt->data.frame.sz, - cx_pkt->data.frame.pts); + tl < enc_cfg.ts_number_layers; ++tl) { + vpx_video_writer_write_frame( + outfile[tl], cx_pkt->data.frame.buf, cx_pkt->data.frame.sz, + cx_pkt->data.frame.pts); } for (sl = 0; sl < enc_cfg.ss_number_layers; ++sl) { for (tl = layer_id.temporal_layer_id; - tl < enc_cfg.ts_number_layers; ++tl) { + tl < enc_cfg.ts_number_layers; ++tl) { const int layer = sl * enc_cfg.ts_number_layers + tl; ++rc.layer_tot_enc_frames[layer]; rc.layer_encoding_bitrate[layer] += 8.0 * sizes[sl]; @@ -832,20 +824,20 @@ int main(int argc, const char **argv) { // Second shifted window. if (frame_cnt > rc.window_size + rc.window_size / 2) { - tl = layer_id.temporal_layer_id; - for (sl = 0; sl < enc_cfg.ss_number_layers; ++sl) { - sum_bitrate2 += 0.001 * 8.0 * sizes[sl] * framerate; - } + tl = layer_id.temporal_layer_id; + for (sl = 0; sl < enc_cfg.ss_number_layers; ++sl) { + sum_bitrate2 += 0.001 * 8.0 * sizes[sl] * framerate; + } - if (frame_cnt > 2 * rc.window_size && - frame_cnt % rc.window_size == 0) { - rc.window_count += 1; - rc.avg_st_encoding_bitrate += sum_bitrate2 / rc.window_size; - rc.variance_st_encoding_bitrate += - (sum_bitrate2 / rc.window_size) * - (sum_bitrate2 / rc.window_size); - sum_bitrate2 = 0.0; - } + if (frame_cnt > 2 * rc.window_size && + frame_cnt % rc.window_size == 0) { + rc.window_count += 1; + rc.avg_st_encoding_bitrate += sum_bitrate2 / rc.window_size; + rc.variance_st_encoding_bitrate += + (sum_bitrate2 / rc.window_size) * + (sum_bitrate2 / rc.window_size); + sum_bitrate2 = 0.0; + } } } #endif @@ -860,14 +852,11 @@ int main(int argc, const char **argv) { break; } case VPX_CODEC_STATS_PKT: { - stats_write(&app_input.rc_stats, - cx_pkt->data.twopass_stats.buf, + stats_write(&app_input.rc_stats, cx_pkt->data.twopass_stats.buf, cx_pkt->data.twopass_stats.sz); break; } - default: { - break; - } + default: { break; } } } @@ -880,8 +869,8 @@ int main(int argc, const char **argv) { // Compensate for the extra frame count for the bypass mode. if (svc_ctx.temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { for (sl = 0; sl < enc_cfg.ss_number_layers; ++sl) { - const int layer = sl * enc_cfg.ts_number_layers + - layer_id.temporal_layer_id; + const int layer = + sl * enc_cfg.ts_number_layers + layer_id.temporal_layer_id; --rc.layer_input_frames[layer]; } } @@ -895,8 +884,7 @@ int main(int argc, const char **argv) { } #endif if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec"); - if (app_input.passes == 2) - stats_close(&app_input.rc_stats, 1); + if (app_input.passes == 2) stats_close(&app_input.rc_stats, 1); if (writer) { vpx_video_writer_close(writer); } @@ -908,8 +896,7 @@ int main(int argc, const char **argv) { } #endif printf("Frame cnt and encoding time/FPS stats for encoding: %d %f %f \n", - frame_cnt, - 1000 * (float)cx_time / (double)(frame_cnt * 1000000), + frame_cnt, 1000 * (float)cx_time / (double)(frame_cnt * 1000000), 1000000 * (double)frame_cnt / (double)cx_time); vpx_img_free(&raw); // display average size, psnr diff --git a/libs/libvpx/examples/vp9cx_set_ref.c b/libs/libvpx/examples/vp9cx_set_ref.c new file mode 100644 index 0000000000..798d7e3f2b --- /dev/null +++ b/libs/libvpx/examples/vp9cx_set_ref.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +// VP9 Set Reference Frame +// ============================ +// +// This is an example demonstrating how to overwrite the VP9 encoder's +// internal reference frame. In the sample we set the last frame to the +// current frame. This technique could be used to bounce between two cameras. +// +// The decoder would also have to set the reference frame to the same value +// on the same frame, or the video will become corrupt. The 'test_decode' +// variable is set to 1 in this example that tests if the encoder and decoder +// results are matching. +// +// Usage +// ----- +// This example encodes a raw video. And the last argument passed in specifies +// the frame number to update the reference frame on. For example, run +// examples/vp9cx_set_ref 352 288 in.yuv out.ivf 4 30 +// The parameter is parsed as follows: +// +// +// Extra Variables +// --------------- +// This example maintains the frame number passed on the command line +// in the `update_frame_num` variable. +// +// +// Configuration +// ------------- +// +// The reference frame is updated on the frame specified on the command +// line. +// +// Observing The Effects +// --------------------- +// The encoder and decoder results should be matching when the same reference +// frame setting operation is done in both encoder and decoder. Otherwise, +// the encoder/decoder mismatch would be seen. + +#include +#include +#include + +#include "vpx/vp8cx.h" +#include "vpx/vpx_decoder.h" +#include "vpx/vpx_encoder.h" +#include "vp9/common/vp9_common.h" + +#include "./tools_common.h" +#include "./video_writer.h" + +static const char *exec_name; + +void usage_exit() { + fprintf(stderr, + "Usage: %s " + " \n", + exec_name); + exit(EXIT_FAILURE); +} + +static int compare_img(const vpx_image_t *const img1, + const vpx_image_t *const img2) { + uint32_t l_w = img1->d_w; + uint32_t c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift; + const uint32_t c_h = + (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift; + uint32_t i; + int match = 1; + + match &= (img1->fmt == img2->fmt); + match &= (img1->d_w == img2->d_w); + match &= (img1->d_h == img2->d_h); + + for (i = 0; i < img1->d_h; ++i) + match &= (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y], + img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y], + l_w) == 0); + + for (i = 0; i < c_h; ++i) + match &= (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U], + img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U], + c_w) == 0); + + for (i = 0; i < c_h; ++i) + match &= (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V], + img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V], + c_w) == 0); + + return match; +} + +#define mmin(a, b) ((a) < (b) ? (a) : (b)) +static void find_mismatch(const vpx_image_t *const img1, + const vpx_image_t *const img2, int yloc[4], + int uloc[4], int vloc[4]) { + const uint32_t bsize = 64; + const uint32_t bsizey = bsize >> img1->y_chroma_shift; + const uint32_t bsizex = bsize >> img1->x_chroma_shift; + const uint32_t c_w = + (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift; + const uint32_t c_h = + (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift; + int match = 1; + uint32_t i, j; + yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1; + for (i = 0, match = 1; match && i < img1->d_h; i += bsize) { + for (j = 0; match && j < img1->d_w; j += bsize) { + int k, l; + const int si = mmin(i + bsize, img1->d_h) - i; + const int sj = mmin(j + bsize, img1->d_w) - j; + for (k = 0; match && k < si; ++k) { + for (l = 0; match && l < sj; ++l) { + if (*(img1->planes[VPX_PLANE_Y] + + (i + k) * img1->stride[VPX_PLANE_Y] + j + l) != + *(img2->planes[VPX_PLANE_Y] + + (i + k) * img2->stride[VPX_PLANE_Y] + j + l)) { + yloc[0] = i + k; + yloc[1] = j + l; + yloc[2] = *(img1->planes[VPX_PLANE_Y] + + (i + k) * img1->stride[VPX_PLANE_Y] + j + l); + yloc[3] = *(img2->planes[VPX_PLANE_Y] + + (i + k) * img2->stride[VPX_PLANE_Y] + j + l); + match = 0; + break; + } + } + } + } + } + + uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1; + for (i = 0, match = 1; match && i < c_h; i += bsizey) { + for (j = 0; match && j < c_w; j += bsizex) { + int k, l; + const int si = mmin(i + bsizey, c_h - i); + const int sj = mmin(j + bsizex, c_w - j); + for (k = 0; match && k < si; ++k) { + for (l = 0; match && l < sj; ++l) { + if (*(img1->planes[VPX_PLANE_U] + + (i + k) * img1->stride[VPX_PLANE_U] + j + l) != + *(img2->planes[VPX_PLANE_U] + + (i + k) * img2->stride[VPX_PLANE_U] + j + l)) { + uloc[0] = i + k; + uloc[1] = j + l; + uloc[2] = *(img1->planes[VPX_PLANE_U] + + (i + k) * img1->stride[VPX_PLANE_U] + j + l); + uloc[3] = *(img2->planes[VPX_PLANE_U] + + (i + k) * img2->stride[VPX_PLANE_U] + j + l); + match = 0; + break; + } + } + } + } + } + vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1; + for (i = 0, match = 1; match && i < c_h; i += bsizey) { + for (j = 0; match && j < c_w; j += bsizex) { + int k, l; + const int si = mmin(i + bsizey, c_h - i); + const int sj = mmin(j + bsizex, c_w - j); + for (k = 0; match && k < si; ++k) { + for (l = 0; match && l < sj; ++l) { + if (*(img1->planes[VPX_PLANE_V] + + (i + k) * img1->stride[VPX_PLANE_V] + j + l) != + *(img2->planes[VPX_PLANE_V] + + (i + k) * img2->stride[VPX_PLANE_V] + j + l)) { + vloc[0] = i + k; + vloc[1] = j + l; + vloc[2] = *(img1->planes[VPX_PLANE_V] + + (i + k) * img1->stride[VPX_PLANE_V] + j + l); + vloc[3] = *(img2->planes[VPX_PLANE_V] + + (i + k) * img2->stride[VPX_PLANE_V] + j + l); + match = 0; + break; + } + } + } + } + } +} + +static void testing_decode(vpx_codec_ctx_t *encoder, vpx_codec_ctx_t *decoder, + unsigned int frame_out, int *mismatch_seen) { + vpx_image_t enc_img, dec_img; + struct vp9_ref_frame ref_enc, ref_dec; + + if (*mismatch_seen) return; + + ref_enc.idx = 0; + ref_dec.idx = 0; + if (vpx_codec_control(encoder, VP9_GET_REFERENCE, &ref_enc)) + die_codec(encoder, "Failed to get encoder reference frame"); + enc_img = ref_enc.img; + if (vpx_codec_control(decoder, VP9_GET_REFERENCE, &ref_dec)) + die_codec(decoder, "Failed to get decoder reference frame"); + dec_img = ref_dec.img; + + if (!compare_img(&enc_img, &dec_img)) { + int y[4], u[4], v[4]; + + *mismatch_seen = 1; + + find_mismatch(&enc_img, &dec_img, y, u, v); + printf( + "Encode/decode mismatch on frame %d at" + " Y[%d, %d] {%d/%d}," + " U[%d, %d] {%d/%d}," + " V[%d, %d] {%d/%d}", + frame_out, y[0], y[1], y[2], y[3], u[0], u[1], u[2], u[3], v[0], v[1], + v[2], v[3]); + } + + vpx_img_free(&enc_img); + vpx_img_free(&dec_img); +} + +static int encode_frame(vpx_codec_ctx_t *ecodec, vpx_image_t *img, + unsigned int frame_in, VpxVideoWriter *writer, + int test_decode, vpx_codec_ctx_t *dcodec, + unsigned int *frame_out, int *mismatch_seen) { + int got_pkts = 0; + vpx_codec_iter_t iter = NULL; + const vpx_codec_cx_pkt_t *pkt = NULL; + int got_data; + const vpx_codec_err_t res = + vpx_codec_encode(ecodec, img, frame_in, 1, 0, VPX_DL_GOOD_QUALITY); + if (res != VPX_CODEC_OK) die_codec(ecodec, "Failed to encode frame"); + + got_data = 0; + + while ((pkt = vpx_codec_get_cx_data(ecodec, &iter)) != NULL) { + got_pkts = 1; + + if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) { + const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0; + + if (!(pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT)) { + *frame_out += 1; + } + + if (!vpx_video_writer_write_frame(writer, pkt->data.frame.buf, + pkt->data.frame.sz, + pkt->data.frame.pts)) { + die_codec(ecodec, "Failed to write compressed frame"); + } + printf(keyframe ? "K" : "."); + fflush(stdout); + got_data = 1; + + // Decode 1 frame. + if (test_decode) { + if (vpx_codec_decode(dcodec, pkt->data.frame.buf, + (unsigned int)pkt->data.frame.sz, NULL, 0)) + die_codec(dcodec, "Failed to decode frame."); + } + } + } + + // Mismatch checking + if (got_data && test_decode) { + testing_decode(ecodec, dcodec, *frame_out, mismatch_seen); + } + + return got_pkts; +} + +int main(int argc, char **argv) { + FILE *infile = NULL; + // Encoder + vpx_codec_ctx_t ecodec; + vpx_codec_enc_cfg_t cfg; + unsigned int frame_in = 0; + vpx_image_t raw; + vpx_codec_err_t res; + VpxVideoInfo info; + VpxVideoWriter *writer = NULL; + const VpxInterface *encoder = NULL; + + // Test encoder/decoder mismatch. + int test_decode = 1; + // Decoder + vpx_codec_ctx_t dcodec; + unsigned int frame_out = 0; + + // The frame number to set reference frame on + unsigned int update_frame_num = 0; + int mismatch_seen = 0; + + const int fps = 30; + const int bitrate = 500; + + const char *width_arg = NULL; + const char *height_arg = NULL; + const char *infile_arg = NULL; + const char *outfile_arg = NULL; + unsigned int limit = 0; + + vp9_zero(ecodec); + vp9_zero(cfg); + vp9_zero(info); + + exec_name = argv[0]; + + if (argc < 6) die("Invalid number of arguments"); + + width_arg = argv[1]; + height_arg = argv[2]; + infile_arg = argv[3]; + outfile_arg = argv[4]; + + encoder = get_vpx_encoder_by_name("vp9"); + if (!encoder) die("Unsupported codec."); + + update_frame_num = atoi(argv[5]); + // In VP9, the reference buffers (cm->buffer_pool->frame_bufs[i].buf) are + // allocated while calling vpx_codec_encode(), thus, setting reference for + // 1st frame isn't supported. + if (update_frame_num <= 1) die("Couldn't parse frame number '%s'\n", argv[5]); + + if (argc > 6) { + limit = atoi(argv[6]); + if (update_frame_num > limit) + die("Update frame number couldn't larger than limit\n"); + } + + info.codec_fourcc = encoder->fourcc; + info.frame_width = (int)strtol(width_arg, NULL, 0); + info.frame_height = (int)strtol(height_arg, NULL, 0); + info.time_base.numerator = 1; + info.time_base.denominator = fps; + + if (info.frame_width <= 0 || info.frame_height <= 0 || + (info.frame_width % 2) != 0 || (info.frame_height % 2) != 0) { + die("Invalid frame size: %dx%d", info.frame_width, info.frame_height); + } + + if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width, + info.frame_height, 1)) { + die("Failed to allocate image."); + } + + printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); + + res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0); + if (res) die_codec(&ecodec, "Failed to get default codec config."); + + cfg.g_w = info.frame_width; + cfg.g_h = info.frame_height; + cfg.g_timebase.num = info.time_base.numerator; + cfg.g_timebase.den = info.time_base.denominator; + cfg.rc_target_bitrate = bitrate; + cfg.g_lag_in_frames = 3; + + writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info); + if (!writer) die("Failed to open %s for writing.", outfile_arg); + + if (!(infile = fopen(infile_arg, "rb"))) + die("Failed to open %s for reading.", infile_arg); + + if (vpx_codec_enc_init(&ecodec, encoder->codec_interface(), &cfg, 0)) + die_codec(&ecodec, "Failed to initialize encoder"); + + // Disable alt_ref. + if (vpx_codec_control(&ecodec, VP8E_SET_ENABLEAUTOALTREF, 0)) + die_codec(&ecodec, "Failed to set enable auto alt ref"); + + if (test_decode) { + const VpxInterface *decoder = get_vpx_decoder_by_name("vp9"); + if (vpx_codec_dec_init(&dcodec, decoder->codec_interface(), NULL, 0)) + die_codec(&dcodec, "Failed to initialize decoder."); + } + + // Encode frames. + while (vpx_img_read(&raw, infile)) { + if (limit && frame_in >= limit) break; + if (update_frame_num > 1 && frame_out + 1 == update_frame_num) { + vpx_ref_frame_t ref; + ref.frame_type = VP8_LAST_FRAME; + ref.img = raw; + // Set reference frame in encoder. + if (vpx_codec_control(&ecodec, VP8_SET_REFERENCE, &ref)) + die_codec(&ecodec, "Failed to set reference frame"); + printf(" "); + + // If set_reference in decoder is commented out, the enc/dec mismatch + // would be seen. + if (test_decode) { + if (vpx_codec_control(&dcodec, VP8_SET_REFERENCE, &ref)) + die_codec(&dcodec, "Failed to set reference frame"); + } + } + + encode_frame(&ecodec, &raw, frame_in, writer, test_decode, &dcodec, + &frame_out, &mismatch_seen); + frame_in++; + if (mismatch_seen) break; + } + + // Flush encoder. + if (!mismatch_seen) + while (encode_frame(&ecodec, NULL, frame_in, writer, test_decode, &dcodec, + &frame_out, &mismatch_seen)) { + } + + printf("\n"); + fclose(infile); + printf("Processed %d frames.\n", frame_out); + + if (test_decode) { + if (!mismatch_seen) + printf("Encoder/decoder results are matching.\n"); + else + printf("Encoder/decoder results are NOT matching.\n"); + } + + if (test_decode) + if (vpx_codec_destroy(&dcodec)) + die_codec(&dcodec, "Failed to destroy decoder"); + + vpx_img_free(&raw); + if (vpx_codec_destroy(&ecodec)) + die_codec(&ecodec, "Failed to destroy encoder."); + + vpx_video_writer_close(writer); + + return EXIT_SUCCESS; +} diff --git a/libs/libvpx/examples/vpx_temporal_svc_encoder.c b/libs/libvpx/examples/vpx_temporal_svc_encoder.c index 16abb9deb0..309a2fe2e2 100644 --- a/libs/libvpx/examples/vpx_temporal_svc_encoder.c +++ b/libs/libvpx/examples/vpx_temporal_svc_encoder.c @@ -28,9 +28,7 @@ static const char *exec_name; -void usage_exit(void) { - exit(EXIT_FAILURE); -} +void usage_exit(void) { exit(EXIT_FAILURE); } // Denoiser states, for temporal denoising. enum denoiserState { @@ -41,7 +39,7 @@ enum denoiserState { kDenoiserOnAdaptive }; -static int mode_to_num_layers[13] = {1, 2, 2, 3, 3, 3, 3, 5, 2, 3, 3, 3, 3}; +static int mode_to_num_layers[13] = { 1, 2, 2, 3, 3, 3, 3, 5, 2, 3, 3, 3, 3 }; // For rate control encoding stats. struct RateControlMetrics { @@ -86,14 +84,14 @@ static void set_rate_control_metrics(struct RateControlMetrics *rc, // per-frame-bandwidth, for the rate control encoding stats below. const double framerate = cfg->g_timebase.den / cfg->g_timebase.num; rc->layer_framerate[0] = framerate / cfg->ts_rate_decimator[0]; - rc->layer_pfb[0] = 1000.0 * rc->layer_target_bitrate[0] / - rc->layer_framerate[0]; + rc->layer_pfb[0] = + 1000.0 * rc->layer_target_bitrate[0] / rc->layer_framerate[0]; for (i = 0; i < cfg->ts_number_layers; ++i) { if (i > 0) { rc->layer_framerate[i] = framerate / cfg->ts_rate_decimator[i]; - rc->layer_pfb[i] = 1000.0 * - (rc->layer_target_bitrate[i] - rc->layer_target_bitrate[i - 1]) / - (rc->layer_framerate[i] - rc->layer_framerate[i - 1]); + rc->layer_pfb[i] = 1000.0 * (rc->layer_target_bitrate[i] - + rc->layer_target_bitrate[i - 1]) / + (rc->layer_framerate[i] - rc->layer_framerate[i - 1]); } rc->layer_input_frames[i] = 0; rc->layer_enc_frames[i] = 0; @@ -114,29 +112,31 @@ static void printout_rate_control_summary(struct RateControlMetrics *rc, unsigned int i = 0; int tot_num_frames = 0; double perc_fluctuation = 0.0; - printf("Total number of processed frames: %d\n\n", frame_cnt -1); + printf("Total number of processed frames: %d\n\n", frame_cnt - 1); printf("Rate control layer stats for %d layer(s):\n\n", - cfg->ts_number_layers); + cfg->ts_number_layers); for (i = 0; i < cfg->ts_number_layers; ++i) { - const int num_dropped = (i > 0) ? - (rc->layer_input_frames[i] - rc->layer_enc_frames[i]) : - (rc->layer_input_frames[i] - rc->layer_enc_frames[i] - 1); + const int num_dropped = + (i > 0) ? (rc->layer_input_frames[i] - rc->layer_enc_frames[i]) + : (rc->layer_input_frames[i] - rc->layer_enc_frames[i] - 1); tot_num_frames += rc->layer_input_frames[i]; rc->layer_encoding_bitrate[i] = 0.001 * rc->layer_framerate[i] * - rc->layer_encoding_bitrate[i] / tot_num_frames; - rc->layer_avg_frame_size[i] = rc->layer_avg_frame_size[i] / - rc->layer_enc_frames[i]; - rc->layer_avg_rate_mismatch[i] = 100.0 * rc->layer_avg_rate_mismatch[i] / - rc->layer_enc_frames[i]; + rc->layer_encoding_bitrate[i] / + tot_num_frames; + rc->layer_avg_frame_size[i] = + rc->layer_avg_frame_size[i] / rc->layer_enc_frames[i]; + rc->layer_avg_rate_mismatch[i] = + 100.0 * rc->layer_avg_rate_mismatch[i] / rc->layer_enc_frames[i]; printf("For layer#: %d \n", i); printf("Bitrate (target vs actual): %d %f \n", rc->layer_target_bitrate[i], rc->layer_encoding_bitrate[i]); printf("Average frame size (target vs actual): %f %f \n", rc->layer_pfb[i], rc->layer_avg_frame_size[i]); printf("Average rate_mismatch: %f \n", rc->layer_avg_rate_mismatch[i]); - printf("Number of input frames, encoded (non-key) frames, " - "and perc dropped frames: %d %d %f \n", rc->layer_input_frames[i], - rc->layer_enc_frames[i], + printf( + "Number of input frames, encoded (non-key) frames, " + "and perc dropped frames: %d %d %f \n", + rc->layer_input_frames[i], rc->layer_enc_frames[i], 100.0 * num_dropped / rc->layer_input_frames[i]); printf("\n"); } @@ -145,11 +145,10 @@ static void printout_rate_control_summary(struct RateControlMetrics *rc, rc->variance_st_encoding_bitrate / rc->window_count - (rc->avg_st_encoding_bitrate * rc->avg_st_encoding_bitrate); perc_fluctuation = 100.0 * sqrt(rc->variance_st_encoding_bitrate) / - rc->avg_st_encoding_bitrate; - printf("Short-time stats, for window of %d frames: \n",rc->window_size); + rc->avg_st_encoding_bitrate; + printf("Short-time stats, for window of %d frames: \n", rc->window_size); printf("Average, rms-variance, and percent-fluct: %f %f %f \n", - rc->avg_st_encoding_bitrate, - sqrt(rc->variance_st_encoding_bitrate), + rc->avg_st_encoding_bitrate, sqrt(rc->variance_st_encoding_bitrate), perc_fluctuation); if ((frame_cnt - 1) != tot_num_frames) die("Error: Number of input frames not equal to output! \n"); @@ -167,20 +166,20 @@ static void set_temporal_layer_pattern(int layering_mode, switch (layering_mode) { case 0: { // 1-layer. - int ids[1] = {0}; + int ids[1] = { 0 }; cfg->ts_periodicity = 1; *flag_periodicity = 1; cfg->ts_number_layers = 1; cfg->ts_rate_decimator[0] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // Update L only. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + layer_flags[0] = + VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; break; } case 1: { // 2-layers, 2-frame period. - int ids[2] = {0, 1}; + int ids[2] = { 0, 1 }; cfg->ts_periodicity = 2; *flag_periodicity = 2; cfg->ts_number_layers = 2; @@ -189,22 +188,24 @@ static void set_temporal_layer_pattern(int layering_mode, memcpy(cfg->ts_layer_id, ids, sizeof(ids)); #if 1 // 0=L, 1=GF, Intra-layer prediction enabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF; - layer_flags[1] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_REF_ARF; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF; + layer_flags[1] = + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_REF_ARF; #else - // 0=L, 1=GF, Intra-layer prediction disabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF; + // 0=L, 1=GF, Intra-layer prediction disabled. + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF; layer_flags[1] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_LAST; + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_REF_LAST; #endif break; } case 2: { // 2-layers, 3-frame period. - int ids[3] = {0, 1, 1}; + int ids[3] = { 0, 1, 1 }; cfg->ts_periodicity = 3; *flag_periodicity = 3; cfg->ts_number_layers = 2; @@ -212,16 +213,17 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[1] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, Intra-layer prediction enabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[1] = - layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; + layer_flags[1] = layer_flags[2] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | + VP8_EFLAG_NO_UPD_LAST; break; } case 3: { // 3-layers, 6-frame period. - int ids[6] = {0, 2, 2, 1, 2, 2}; + int ids[6] = { 0, 2, 2, 1, 2, 2 }; cfg->ts_periodicity = 6; *flag_periodicity = 6; cfg->ts_number_layers = 3; @@ -230,19 +232,18 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[2] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; - layer_flags[1] = - layer_flags[2] = - layer_flags[4] = - layer_flags[5] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; + layer_flags[3] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; + layer_flags[1] = layer_flags[2] = layer_flags[4] = layer_flags[5] = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST; break; } case 4: { // 3-layers, 4-frame period. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 4; cfg->ts_number_layers = 3; @@ -251,39 +252,41 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[2] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF, Intra-layer prediction disabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; - layer_flags[1] = - layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; + layer_flags[1] = layer_flags[3] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; break; } case 5: { // 3-layers, 4-frame period. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 4; - cfg->ts_number_layers = 3; + cfg->ts_number_layers = 3; cfg->ts_rate_decimator[0] = 4; cfg->ts_rate_decimator[1] = 2; cfg->ts_rate_decimator[2] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled in layer 1, disabled // in layer 2. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[2] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; + layer_flags[2] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; + layer_flags[1] = layer_flags[3] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[1] = - layer_flags[3] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; break; } case 6: { // 3-layers, 4-frame period. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 4; cfg->ts_number_layers = 3; @@ -292,18 +295,19 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[2] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF, Intra-layer prediction enabled. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[2] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; - layer_flags[1] = - layer_flags[3] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; + layer_flags[2] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; + layer_flags[1] = layer_flags[3] = + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; break; } case 7: { // NOTE: Probably of academic interest only. // 5-layers, 16-frame period. - int ids[16] = {0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4}; + int ids[16] = { 0, 4, 3, 4, 2, 4, 3, 4, 1, 4, 3, 4, 2, 4, 3, 4 }; cfg->ts_periodicity = 16; *flag_periodicity = 16; cfg->ts_number_layers = 5; @@ -313,28 +317,21 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[3] = 2; cfg->ts_rate_decimator[4] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); - layer_flags[0] = VPX_EFLAG_FORCE_KF; - layer_flags[1] = - layer_flags[3] = - layer_flags[5] = - layer_flags[7] = - layer_flags[9] = - layer_flags[11] = - layer_flags[13] = - layer_flags[15] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; - layer_flags[2] = - layer_flags[6] = - layer_flags[10] = - layer_flags[14] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF; - layer_flags[4] = - layer_flags[12] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_ARF; - layer_flags[8] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF; + layer_flags[0] = VPX_EFLAG_FORCE_KF; + layer_flags[1] = layer_flags[3] = layer_flags[5] = layer_flags[7] = + layer_flags[9] = layer_flags[11] = layer_flags[13] = layer_flags[15] = + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; + layer_flags[2] = layer_flags[6] = layer_flags[10] = layer_flags[14] = + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_GF; + layer_flags[4] = layer_flags[12] = + VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_ARF; + layer_flags[8] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF; break; } case 8: { // 2-layers, with sync point at first frame of layer 1. - int ids[2] = {0, 1}; + int ids[2] = { 0, 1 }; cfg->ts_periodicity = 2; *flag_periodicity = 8; cfg->ts_number_layers = 2; @@ -346,17 +343,17 @@ static void set_temporal_layer_pattern(int layering_mode, // key frame. Sync point every 8 frames. // Layer 0: predict from L and ARF, update L and G. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_ARF; + layer_flags[0] = + VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF; // Layer 1: sync point: predict from L and ARF, and update G. - layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; + layer_flags[1] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; // Layer 0, predict from L and ARF, update L. - layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + layer_flags[2] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; // Layer 1: predict from L, G and ARF, and update G. layer_flags[3] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ENTROPY; + VP8_EFLAG_NO_UPD_ENTROPY; // Layer 0. layer_flags[4] = layer_flags[2]; // Layer 1. @@ -365,11 +362,11 @@ static void set_temporal_layer_pattern(int layering_mode, layer_flags[6] = layer_flags[4]; // Layer 1. layer_flags[7] = layer_flags[5]; - break; + break; } case 9: { // 3-layers: Sync points for layer 1 and 2 every 8 frames. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 8; cfg->ts_number_layers = 3; @@ -378,20 +375,21 @@ static void set_temporal_layer_pattern(int layering_mode, cfg->ts_rate_decimator[2] = 1; memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_REF_GF | + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF; layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; + layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; + layer_flags[3] = layer_flags[5] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; - layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; - layer_flags[3] = - layer_flags[5] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; layer_flags[4] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; - layer_flags[6] = VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + layer_flags[6] = + VP8_EFLAG_NO_REF_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ARF; layer_flags[7] = VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_ENTROPY; + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_ENTROPY; break; } case 10: { @@ -399,7 +397,7 @@ static void set_temporal_layer_pattern(int layering_mode, // and is only updated on key frame. // Sync points for layer 1 and 2 every 8 frames. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 8; cfg->ts_number_layers = 3; @@ -409,21 +407,21 @@ static void set_temporal_layer_pattern(int layering_mode, memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF. // Layer 0: predict from L and ARF; update L and G. - layer_flags[0] = VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + layer_flags[0] = + VPX_EFLAG_FORCE_KF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; // Layer 2: sync point: predict from L and ARF; update none. layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ENTROPY; + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_ENTROPY; // Layer 1: sync point: predict from L and ARF; update G. - layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; + layer_flags[2] = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; // Layer 2: predict from L, G, ARF; update none. layer_flags[3] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY; + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY; // Layer 0: predict from L and ARF; update L. - layer_flags[4] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + layer_flags[4] = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; // Layer 2: predict from L, G, ARF; update none. layer_flags[5] = layer_flags[3]; // Layer 1: predict from L, G, ARF; update G. @@ -438,7 +436,7 @@ static void set_temporal_layer_pattern(int layering_mode, // This was added to compare with vp9_spatial_svc_encoder. // 3-layers, 4-frame period. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 4; cfg->ts_number_layers = 3; @@ -448,20 +446,20 @@ static void set_temporal_layer_pattern(int layering_mode, memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF, Intra-layer prediction disabled. layer_flags[0] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; layer_flags[2] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; layer_flags[1] = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; layer_flags[3] = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_ARF | - VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF; break; } case 12: default: { // 3-layers structure as in case 10, but no sync/refresh points for // layer 1 and 2. - int ids[4] = {0, 2, 1, 2}; + int ids[4] = { 0, 2, 1, 2 }; cfg->ts_periodicity = 4; *flag_periodicity = 8; cfg->ts_number_layers = 3; @@ -471,15 +469,15 @@ static void set_temporal_layer_pattern(int layering_mode, memcpy(cfg->ts_layer_id, ids, sizeof(ids)); // 0=L, 1=GF, 2=ARF. // Layer 0: predict from L and ARF; update L. - layer_flags[0] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + layer_flags[0] = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; layer_flags[4] = layer_flags[0]; // Layer 1: predict from L, G, ARF; update G. layer_flags[2] = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; layer_flags[6] = layer_flags[2]; // Layer 2: predict from L, G, ARF; update none. layer_flags[1] = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY; + VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_ENTROPY; layer_flags[3] = layer_flags[1]; layer_flags[5] = layer_flags[1]; layer_flags[7] = layer_flags[1]; @@ -489,7 +487,7 @@ static void set_temporal_layer_pattern(int layering_mode, } int main(int argc, char **argv) { - VpxVideoWriter *outfile[VPX_TS_MAX_LAYERS] = {NULL}; + VpxVideoWriter *outfile[VPX_TS_MAX_LAYERS] = { NULL }; vpx_codec_ctx_t codec; vpx_codec_enc_cfg_t cfg; int frame_cnt = 0; @@ -502,15 +500,15 @@ int main(int argc, char **argv) { int got_data; int flags = 0; unsigned int i; - int pts = 0; // PTS starts at 0. + int pts = 0; // PTS starts at 0. int frame_duration = 1; // 1 timebase tick per frame. int layering_mode = 0; - int layer_flags[VPX_TS_MAX_PERIODICITY] = {0}; + int layer_flags[VPX_TS_MAX_PERIODICITY] = { 0 }; int flag_periodicity = 1; #if VPX_ENCODER_ABI_VERSION > (4 + VPX_CODEC_ABI_VERSION) - vpx_svc_layer_id_t layer_id = {0, 0}; + vpx_svc_layer_id_t layer_id = { 0, 0 }; #else - vpx_svc_layer_id_t layer_id = {0}; + vpx_svc_layer_id_t layer_id = { 0 }; #endif const VpxInterface *encoder = NULL; FILE *infile = NULL; @@ -526,7 +524,7 @@ int main(int argc, char **argv) { #endif // CONFIG_VP9_HIGHBITDEPTH double sum_bitrate = 0.0; double sum_bitrate2 = 0.0; - double framerate = 30.0; + double framerate = 30.0; exec_name = argv[0]; // Check usage and arguments. @@ -534,27 +532,28 @@ int main(int argc, char **argv) { #if CONFIG_VP9_HIGHBITDEPTH die("Usage: %s " " " - " ... \n", argv[0]); + " ... \n", + argv[0]); #else die("Usage: %s " " " - " ... \n", argv[0]); + " ... \n", + argv[0]); #endif // CONFIG_VP9_HIGHBITDEPTH } encoder = get_vpx_encoder_by_name(argv[3]); - if (!encoder) - die("Unsupported codec."); + if (!encoder) die("Unsupported codec."); printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface())); - width = strtol(argv[4], NULL, 0); - height = strtol(argv[5], NULL, 0); + width = (unsigned int)strtoul(argv[4], NULL, 0); + height = (unsigned int)strtoul(argv[5], NULL, 0); if (width < 16 || width % 2 || height < 16 || height % 2) { die("Invalid resolution: %d x %d", width, height); } - layering_mode = strtol(argv[10], NULL, 0); + layering_mode = (int)strtol(argv[10], NULL, 0); if (layering_mode < 0 || layering_mode > 13) { die("Invalid layering mode (0..12) %s", argv[10]); } @@ -564,7 +563,7 @@ int main(int argc, char **argv) { } #if CONFIG_VP9_HIGHBITDEPTH - switch (strtol(argv[argc-1], NULL, 0)) { + switch (strtol(argv[argc - 1], NULL, 0)) { case 8: bit_depth = VPX_BITS_8; input_bit_depth = 8; @@ -577,13 +576,11 @@ int main(int argc, char **argv) { bit_depth = VPX_BITS_12; input_bit_depth = 12; break; - default: - die("Invalid bit depth (8, 10, 12) %s", argv[argc-1]); + default: die("Invalid bit depth (8, 10, 12) %s", argv[argc - 1]); } - if (!vpx_img_alloc(&raw, - bit_depth == VPX_BITS_8 ? VPX_IMG_FMT_I420 : - VPX_IMG_FMT_I42016, - width, height, 32)) { + if (!vpx_img_alloc( + &raw, bit_depth == VPX_BITS_8 ? VPX_IMG_FMT_I420 : VPX_IMG_FMT_I42016, + width, height, 32)) { die("Failed to allocate image", width, height); } #else @@ -612,18 +609,17 @@ int main(int argc, char **argv) { #endif // CONFIG_VP9_HIGHBITDEPTH // Timebase format e.g. 30fps: numerator=1, demoninator = 30. - cfg.g_timebase.num = strtol(argv[6], NULL, 0); - cfg.g_timebase.den = strtol(argv[7], NULL, 0); + cfg.g_timebase.num = (int)strtol(argv[6], NULL, 0); + cfg.g_timebase.den = (int)strtol(argv[7], NULL, 0); - speed = strtol(argv[8], NULL, 0); + speed = (int)strtol(argv[8], NULL, 0); if (speed < 0) { die("Invalid speed setting: must be positive"); } for (i = min_args_base; - (int)i < min_args_base + mode_to_num_layers[layering_mode]; - ++i) { - rc.layer_target_bitrate[i - 11] = strtol(argv[i], NULL, 0); + (int)i < min_args_base + mode_to_num_layers[layering_mode]; ++i) { + rc.layer_target_bitrate[i - 11] = (int)strtol(argv[i], NULL, 0); if (strncmp(encoder->name, "vp8", 3) == 0) cfg.ts_target_bitrate[i - 11] = rc.layer_target_bitrate[i - 11]; else if (strncmp(encoder->name, "vp9", 3) == 0) @@ -631,12 +627,11 @@ int main(int argc, char **argv) { } // Real time parameters. - cfg.rc_dropframe_thresh = strtol(argv[9], NULL, 0); + cfg.rc_dropframe_thresh = (unsigned int)strtoul(argv[9], NULL, 0); cfg.rc_end_usage = VPX_CBR; cfg.rc_min_quantizer = 2; cfg.rc_max_quantizer = 56; - if (strncmp(encoder->name, "vp9", 3) == 0) - cfg.rc_max_quantizer = 52; + if (strncmp(encoder->name, "vp9", 3) == 0) cfg.rc_max_quantizer = 52; cfg.rc_undershoot_pct = 50; cfg.rc_overshoot_pct = 50; cfg.rc_buf_initial_sz = 500; @@ -651,7 +646,7 @@ int main(int argc, char **argv) { // Enable error resilient mode. cfg.g_error_resilient = 1; - cfg.g_lag_in_frames = 0; + cfg.g_lag_in_frames = 0; cfg.kf_mode = VPX_KF_AUTO; // Disable automatic keyframe placement. @@ -659,9 +654,7 @@ int main(int argc, char **argv) { cfg.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS; - set_temporal_layer_pattern(layering_mode, - &cfg, - layer_flags, + set_temporal_layer_pattern(layering_mode, &cfg, layer_flags, &flag_periodicity); set_rate_control_metrics(&rc, &cfg); @@ -688,15 +681,14 @@ int main(int argc, char **argv) { snprintf(file_name, sizeof(file_name), "%s_%d.ivf", argv[2], i); outfile[i] = vpx_video_writer_open(file_name, kContainerIVF, &info); - if (!outfile[i]) - die("Failed to open %s for writing", file_name); + if (!outfile[i]) die("Failed to open %s for writing", file_name); assert(outfile[i] != NULL); } // No spatial layers in this encoder. cfg.ss_number_layers = 1; - // Initialize codec. +// Initialize codec. #if CONFIG_VP9_HIGHBITDEPTH if (vpx_codec_enc_init( &codec, encoder->codec_interface(), &cfg, @@ -712,14 +704,15 @@ int main(int argc, char **argv) { vpx_codec_control(&codec, VP8E_SET_STATIC_THRESHOLD, 1); } else if (strncmp(encoder->name, "vp9", 3) == 0) { vpx_svc_extra_cfg_t svc_params; + memset(&svc_params, 0, sizeof(svc_params)); vpx_codec_control(&codec, VP8E_SET_CPUUSED, speed); vpx_codec_control(&codec, VP9E_SET_AQ_MODE, 3); vpx_codec_control(&codec, VP9E_SET_FRAME_PERIODIC_BOOST, 0); - vpx_codec_control(&codec, VP9E_SET_NOISE_SENSITIVITY, 0); + vpx_codec_control(&codec, VP9E_SET_NOISE_SENSITIVITY, kDenoiserOff); vpx_codec_control(&codec, VP8E_SET_STATIC_THRESHOLD, 1); vpx_codec_control(&codec, VP9E_SET_TUNE_CONTENT, 0); vpx_codec_control(&codec, VP9E_SET_TILE_COLUMNS, (cfg.g_threads >> 1)); - if (vpx_codec_control(&codec, VP9E_SET_SVC, layering_mode > 0 ? 1: 0)) + if (vpx_codec_control(&codec, VP9E_SET_SVC, layering_mode > 0 ? 1 : 0)) die_codec(&codec, "Failed to set SVC"); for (i = 0; i < cfg.ts_number_layers; ++i) { svc_params.max_quantizers[i] = cfg.rc_max_quantizer; @@ -760,14 +753,12 @@ int main(int argc, char **argv) { layer_id.temporal_layer_id); } flags = layer_flags[frame_cnt % flag_periodicity]; - if (layering_mode == 0) - flags = 0; + if (layering_mode == 0) flags = 0; frame_avail = vpx_img_read(&raw, infile); - if (frame_avail) - ++rc.layer_input_frames[layer_id.temporal_layer_id]; + if (frame_avail) ++rc.layer_input_frames[layer_id.temporal_layer_id]; vpx_usec_timer_start(&timer); - if (vpx_codec_encode(&codec, frame_avail? &raw : NULL, pts, 1, flags, - VPX_DL_REALTIME)) { + if (vpx_codec_encode(&codec, frame_avail ? &raw : NULL, pts, 1, flags, + VPX_DL_REALTIME)) { die_codec(&codec, "Failed to encode frame"); } vpx_usec_timer_mark(&timer); @@ -777,12 +768,12 @@ int main(int argc, char **argv) { layer_flags[0] &= ~VPX_EFLAG_FORCE_KF; } got_data = 0; - while ( (pkt = vpx_codec_get_cx_data(&codec, &iter)) ) { + while ((pkt = vpx_codec_get_cx_data(&codec, &iter))) { got_data = 1; switch (pkt->kind) { case VPX_CODEC_CX_FRAME_PKT: for (i = cfg.ts_layer_id[frame_cnt % cfg.ts_periodicity]; - i < cfg.ts_number_layers; ++i) { + i < cfg.ts_number_layers; ++i) { vpx_video_writer_write_frame(outfile[i], pkt->data.frame.buf, pkt->data.frame.sz, pts); ++rc.layer_tot_enc_frames[i]; @@ -825,8 +816,7 @@ int main(int argc, char **argv) { } } break; - default: - break; + default: break; } } ++frame_cnt; @@ -836,16 +826,13 @@ int main(int argc, char **argv) { printout_rate_control_summary(&rc, &cfg, frame_cnt); printf("\n"); printf("Frame cnt and encoding time/FPS stats for encoding: %d %f %f \n", - frame_cnt, - 1000 * (float)cx_time / (double)(frame_cnt * 1000000), - 1000000 * (double)frame_cnt / (double)cx_time); + frame_cnt, 1000 * (float)cx_time / (double)(frame_cnt * 1000000), + 1000000 * (double)frame_cnt / (double)cx_time); - if (vpx_codec_destroy(&codec)) - die_codec(&codec, "Failed to destroy codec"); + if (vpx_codec_destroy(&codec)) die_codec(&codec, "Failed to destroy codec"); // Try to rewrite the output file headers with the actual frame count. - for (i = 0; i < cfg.ts_number_layers; ++i) - vpx_video_writer_close(outfile[i]); + for (i = 0; i < cfg.ts_number_layers; ++i) vpx_video_writer_close(outfile[i]); vpx_img_free(&raw); return EXIT_SUCCESS; diff --git a/libs/libvpx/ivfdec.c b/libs/libvpx/ivfdec.c index 6dcd66f734..f64e594ab0 100644 --- a/libs/libvpx/ivfdec.c +++ b/libs/libvpx/ivfdec.c @@ -23,7 +23,7 @@ static void fix_framerate(int *num, int *den) { // we can guess the framerate using only the timebase in this // case. Other files would require reading ahead to guess the // timebase, like we do for webm. - if (*num < 1000) { + if (*den > 0 && *den < 1000000000 && *num > 0 && *num < 1000) { // Correct for the factor of 2 applied to the timebase in the encoder. if (*num & 1) *den *= 2; @@ -46,7 +46,8 @@ int file_is_ivf(struct VpxInputContext *input_ctx) { is_ivf = 1; if (mem_get_le16(raw_hdr + 4) != 0) { - fprintf(stderr, "Error: Unrecognized IVF version! This file may not" + fprintf(stderr, + "Error: Unrecognized IVF version! This file may not" " decode properly."); } @@ -69,14 +70,13 @@ int file_is_ivf(struct VpxInputContext *input_ctx) { return is_ivf; } -int ivf_read_frame(FILE *infile, uint8_t **buffer, - size_t *bytes_read, size_t *buffer_size) { - char raw_header[IVF_FRAME_HDR_SZ] = {0}; +int ivf_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read, + size_t *buffer_size) { + char raw_header[IVF_FRAME_HDR_SZ] = { 0 }; size_t frame_size = 0; if (fread(raw_header, IVF_FRAME_HDR_SZ, 1, infile) != 1) { - if (!feof(infile)) - warn("Failed to read frame size\n"); + if (!feof(infile)) warn("Failed to read frame size\n"); } else { frame_size = mem_get_le32(raw_header); diff --git a/libs/libvpx/ivfdec.h b/libs/libvpx/ivfdec.h index dd29cc6174..af725572b4 100644 --- a/libs/libvpx/ivfdec.h +++ b/libs/libvpx/ivfdec.h @@ -18,11 +18,11 @@ extern "C" { int file_is_ivf(struct VpxInputContext *input); -int ivf_read_frame(FILE *infile, uint8_t **buffer, - size_t *bytes_read, size_t *buffer_size); +int ivf_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read, + size_t *buffer_size); #ifdef __cplusplus -} /* extern "C" */ +} /* extern "C" */ #endif #endif // IVFDEC_H_ diff --git a/libs/libvpx/ivfenc.c b/libs/libvpx/ivfenc.c index 4a97c42731..a50d31839d 100644 --- a/libs/libvpx/ivfenc.c +++ b/libs/libvpx/ivfenc.c @@ -13,10 +13,8 @@ #include "vpx/vpx_encoder.h" #include "vpx_ports/mem_ops.h" -void ivf_write_file_header(FILE *outfile, - const struct vpx_codec_enc_cfg *cfg, - unsigned int fourcc, - int frame_cnt) { +void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg, + unsigned int fourcc, int frame_cnt) { char header[32]; header[0] = 'D'; diff --git a/libs/libvpx/ivfenc.h b/libs/libvpx/ivfenc.h index 6623687e84..ebdce47be8 100644 --- a/libs/libvpx/ivfenc.h +++ b/libs/libvpx/ivfenc.h @@ -19,17 +19,15 @@ struct vpx_codec_cx_pkt; extern "C" { #endif -void ivf_write_file_header(FILE *outfile, - const struct vpx_codec_enc_cfg *cfg, - uint32_t fourcc, - int frame_cnt); +void ivf_write_file_header(FILE *outfile, const struct vpx_codec_enc_cfg *cfg, + uint32_t fourcc, int frame_cnt); void ivf_write_frame_header(FILE *outfile, int64_t pts, size_t frame_size); void ivf_write_frame_size(FILE *outfile, size_t frame_size); #ifdef __cplusplus -} /* extern "C" */ +} /* extern "C" */ #endif #endif // IVFENC_H_ diff --git a/libs/libvpx/libs.mk b/libs/libvpx/libs.mk index 6129ff258b..9a6092a510 100644 --- a/libs/libvpx/libs.mk +++ b/libs/libvpx/libs.mk @@ -107,41 +107,7 @@ ifeq ($(CONFIG_VP9_DECODER),yes) endif VP9_PREFIX=vp9/ -$(BUILD_PFX)$(VP9_PREFIX)%.c.o: - -# VP10 make file -ifeq ($(CONFIG_VP10),yes) - VP10_PREFIX=vp10/ - include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10_common.mk -endif - -ifeq ($(CONFIG_VP10_ENCODER),yes) - VP10_PREFIX=vp10/ - include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10cx.mk - CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_CX_SRCS)) - CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_CX_EXPORTS)) - CODEC_SRCS-yes += $(VP10_PREFIX)vp10cx.mk vpx/vp8.h vpx/vp8cx.h - INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8cx.h - INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/vpx/svc_context.h - INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/% - CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h - CODEC_DOC_SECTIONS += vp9 vp9_encoder -endif - -ifeq ($(CONFIG_VP10_DECODER),yes) - VP10_PREFIX=vp10/ - include $(SRC_PATH_BARE)/$(VP10_PREFIX)vp10dx.mk - CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_DX_SRCS)) - CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_DX_EXPORTS)) - CODEC_SRCS-yes += $(VP10_PREFIX)vp10dx.mk vpx/vp8.h vpx/vp8dx.h - INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8dx.h - INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/% - CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8dx.h - CODEC_DOC_SECTIONS += vp9 vp9_decoder -endif - -VP10_PREFIX=vp10/ -$(BUILD_PFX)$(VP10_PREFIX)%.c.o: +$(BUILD_PFX)$(VP9_PREFIX)%.c.o: CFLAGS += -Wextra ifeq ($(CONFIG_ENCODERS),yes) CODEC_DOC_SECTIONS += encoder @@ -183,6 +149,9 @@ INSTALL-SRCS-$(CONFIG_CODEC_SRCS) += third_party/x86inc/x86inc.asm endif CODEC_EXPORTS-yes += vpx/exports_com CODEC_EXPORTS-$(CONFIG_ENCODERS) += vpx/exports_enc +ifeq ($(CONFIG_SPATIAL_SVC),yes) +CODEC_EXPORTS-$(CONFIG_ENCODERS) += vpx/exports_spatial_svc +endif CODEC_EXPORTS-$(CONFIG_DECODERS) += vpx/exports_dec INSTALL-LIBS-yes += include/vpx/vpx_codec.h @@ -260,7 +229,7 @@ OBJS-yes += $(LIBVPX_OBJS) LIBS-$(if yes,$(CONFIG_STATIC)) += $(BUILD_PFX)libvpx.a $(BUILD_PFX)libvpx_g.a $(BUILD_PFX)libvpx_g.a: $(LIBVPX_OBJS) -SO_VERSION_MAJOR := 3 +SO_VERSION_MAJOR := 4 SO_VERSION_MINOR := 0 SO_VERSION_PATCH := 0 ifeq ($(filter darwin%,$(TGT_OS)),$(TGT_OS)) @@ -270,6 +239,12 @@ EXPORT_FILE := libvpx.syms LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \ libvpx.dylib ) else +ifeq ($(filter iphonesimulator%,$(TGT_OS)),$(TGT_OS)) +LIBVPX_SO := libvpx.$(SO_VERSION_MAJOR).dylib +SHARED_LIB_SUF := .dylib +EXPORT_FILE := libvpx.syms +LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, libvpx.dylib) +else ifeq ($(filter os2%,$(TGT_OS)),$(TGT_OS)) LIBVPX_SO := libvpx$(SO_VERSION_MAJOR).dll SHARED_LIB_SUF := _dll.a @@ -285,6 +260,7 @@ LIBVPX_SO_SYMLINKS := $(addprefix $(LIBSUBDIR)/, \ libvpx.so.$(SO_VERSION_MAJOR).$(SO_VERSION_MINOR)) endif endif +endif LIBS-$(CONFIG_SHARED) += $(BUILD_PFX)$(LIBVPX_SO)\ $(notdir $(LIBVPX_SO_SYMLINKS)) \ @@ -394,6 +370,12 @@ $(filter %$(ASM).o,$(OBJS-yes)): $(BUILD_PFX)vpx_config.asm $(shell $(SRC_PATH_BARE)/build/make/version.sh "$(SRC_PATH_BARE)" $(BUILD_PFX)vpx_version.h) CLEAN-OBJS += $(BUILD_PFX)vpx_version.h +# +# Add include path for libwebm sources. +# +ifeq ($(CONFIG_WEBM_IO),yes) + CXXFLAGS += -I$(SRC_PATH_BARE)/third_party/libwebm +endif ## ## libvpx test directives @@ -469,6 +451,7 @@ test_libvpx.$(VCPROJ_SFX): $(LIBVPX_TEST_SRCS) vpx.$(VCPROJ_SFX) gtest.$(VCPROJ_ $(if $(CONFIG_STATIC_MSVCRT),--static-crt) \ --out=$@ $(INTERNAL_CFLAGS) $(CFLAGS) \ -I. -I"$(SRC_PATH_BARE)/third_party/googletest/src/include" \ + $(if $(CONFIG_WEBM_IO),-I"$(SRC_PATH_BARE)/third_party/libwebm") \ -L. -l$(CODEC_LIB) -l$(GTEST_LIB) $^ PROJECTS-$(CONFIG_MSVS) += test_libvpx.$(VCPROJ_SFX) diff --git a/libs/libvpx/md5_utils.c b/libs/libvpx/md5_utils.c index f4f893a2d6..093798b833 100644 --- a/libs/libvpx/md5_utils.c +++ b/libs/libvpx/md5_utils.c @@ -20,19 +20,17 @@ * Still in the public domain. */ -#include /* for memcpy() */ +#include /* for memcpy() */ #include "md5_utils.h" -static void -byteSwap(UWORD32 *buf, unsigned words) { +static void byteSwap(UWORD32 *buf, unsigned words) { md5byte *p; /* Only swap bytes for big endian machines */ int i = 1; - if (*(char *)&i == 1) - return; + if (*(char *)&i == 1) return; p = (md5byte *)buf; @@ -47,8 +45,7 @@ byteSwap(UWORD32 *buf, unsigned words) { * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious * initialization constants. */ -void -MD5Init(struct MD5Context *ctx) { +void MD5Init(struct MD5Context *ctx) { ctx->buf[0] = 0x67452301; ctx->buf[1] = 0xefcdab89; ctx->buf[2] = 0x98badcfe; @@ -62,8 +59,7 @@ MD5Init(struct MD5Context *ctx) { * Update context to reflect the concatenation of another buffer full * of bytes. */ -void -MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) { +void MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) { UWORD32 t; /* Update byte count */ @@ -71,9 +67,9 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) { t = ctx->bytes[0]; if ((ctx->bytes[0] = t + len) < t) - ctx->bytes[1]++; /* Carry from low to high */ + ctx->bytes[1]++; /* Carry from low to high */ - t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */ + t = 64 - (t & 0x3f); /* Space available in ctx->in (at least 1) */ if (t > len) { memcpy((md5byte *)ctx->in + 64 - t, buf, len); @@ -104,8 +100,7 @@ MD5Update(struct MD5Context *ctx, md5byte const *buf, unsigned len) { * Final wrapup - pad to 64-byte boundary with the bit pattern * 1 0* (64-bit count of bits processed, MSB-first) */ -void -MD5Final(md5byte digest[16], struct MD5Context *ctx) { +void MD5Final(md5byte digest[16], struct MD5Context *ctx) { int count = ctx->bytes[0] & 0x3f; /* Number of bytes in ctx->in */ md5byte *p = (md5byte *)ctx->in + count; @@ -115,7 +110,7 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx) { /* Bytes of padding needed to make 56 bytes (-8..55) */ count = 56 - 1 - count; - if (count < 0) { /* Padding forces an extra block */ + if (count < 0) { /* Padding forces an extra block */ memset(p, 0, count + 8); byteSwap(ctx->in, 16); MD5Transform(ctx->buf, ctx->in); @@ -147,16 +142,27 @@ MD5Final(md5byte digest[16], struct MD5Context *ctx) { #define F4(x, y, z) (y ^ (x | ~z)) /* This is the central step in the MD5 algorithm. */ -#define MD5STEP(f,w,x,y,z,in,s) \ - (w += f(x,y,z) + in, w = (w<>(32-s)) + x) +#define MD5STEP(f, w, x, y, z, in, s) \ + (w += f(x, y, z) + in, w = (w << s | w >> (32 - s)) + x) + +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(no_sanitize) +#define VPX_NO_UNSIGNED_OVERFLOW_CHECK \ + __attribute__((no_sanitize("unsigned-integer-overflow"))) +#endif +#endif + +#ifndef VPX_NO_UNSIGNED_OVERFLOW_CHECK +#define VPX_NO_UNSIGNED_OVERFLOW_CHECK +#endif /* * The core of the MD5 algorithm, this alters an existing MD5 hash to * reflect the addition of 16 longwords of new data. MD5Update blocks * the data and converts bytes into longwords for this routine. */ -void -MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) { +VPX_NO_UNSIGNED_OVERFLOW_CHECK void MD5Transform(UWORD32 buf[4], + UWORD32 const in[16]) { register UWORD32 a, b, c, d; a = buf[0]; @@ -238,4 +244,6 @@ MD5Transform(UWORD32 buf[4], UWORD32 const in[16]) { buf[3] += d; } +#undef VPX_NO_UNSIGNED_OVERFLOW_CHECK + #endif diff --git a/libs/libvpx/rate_hist.c b/libs/libvpx/rate_hist.c index a77222b161..872a10bae0 100644 --- a/libs/libvpx/rate_hist.c +++ b/libs/libvpx/rate_hist.c @@ -45,8 +45,7 @@ struct rate_hist *init_rate_histogram(const vpx_codec_enc_cfg_t *cfg, hist->samples = cfg->rc_buf_sz * 5 / 4 * fps->num / fps->den / 1000; // prevent division by zero - if (hist->samples == 0) - hist->samples = 1; + if (hist->samples == 0) hist->samples = 1; hist->frames = 0; hist->total = 0; @@ -78,18 +77,16 @@ void update_rate_histogram(struct rate_hist *hist, int64_t avg_bitrate = 0; int64_t sum_sz = 0; const int64_t now = pkt->data.frame.pts * 1000 * - (uint64_t)cfg->g_timebase.num / - (uint64_t)cfg->g_timebase.den; + (uint64_t)cfg->g_timebase.num / + (uint64_t)cfg->g_timebase.den; int idx = hist->frames++ % hist->samples; hist->pts[idx] = now; hist->sz[idx] = (int)pkt->data.frame.sz; - if (now < cfg->rc_buf_initial_sz) - return; + if (now < cfg->rc_buf_initial_sz) return; - if (!cfg->rc_target_bitrate) - return; + if (!cfg->rc_target_bitrate) return; then = now; @@ -98,20 +95,16 @@ void update_rate_histogram(struct rate_hist *hist, const int i_idx = (i - 1) % hist->samples; then = hist->pts[i_idx]; - if (now - then > cfg->rc_buf_sz) - break; + if (now - then > cfg->rc_buf_sz) break; sum_sz += hist->sz[i_idx]; } - if (now == then) - return; + if (now == then) return; avg_bitrate = sum_sz * 8 * 1000 / (now - then); idx = (int)(avg_bitrate * (RATE_BINS / 2) / (cfg->rc_target_bitrate * 1000)); - if (idx < 0) - idx = 0; - if (idx > RATE_BINS - 1) - idx = RATE_BINS - 1; + if (idx < 0) idx = 0; + if (idx > RATE_BINS - 1) idx = RATE_BINS - 1; if (hist->bucket[idx].low > avg_bitrate) hist->bucket[idx].low = (int)avg_bitrate; if (hist->bucket[idx].high < avg_bitrate) @@ -120,8 +113,8 @@ void update_rate_histogram(struct rate_hist *hist, hist->total++; } -static int merge_hist_buckets(struct hist_bucket *bucket, - int max_buckets, int *num_buckets) { +static int merge_hist_buckets(struct hist_bucket *bucket, int max_buckets, + int *num_buckets) { int small_bucket = 0, merge_bucket = INT_MAX, big_bucket = 0; int buckets = *num_buckets; int i; @@ -129,10 +122,8 @@ static int merge_hist_buckets(struct hist_bucket *bucket, /* Find the extrema for this list of buckets */ big_bucket = small_bucket = 0; for (i = 0; i < buckets; i++) { - if (bucket[i].count < bucket[small_bucket].count) - small_bucket = i; - if (bucket[i].count > bucket[big_bucket].count) - big_bucket = i; + if (bucket[i].count < bucket[small_bucket].count) small_bucket = i; + if (bucket[i].count > bucket[big_bucket].count) big_bucket = i; } /* If we have too many buckets, merge the smallest with an adjacent @@ -174,13 +165,10 @@ static int merge_hist_buckets(struct hist_bucket *bucket, */ big_bucket = small_bucket = 0; for (i = 0; i < buckets; i++) { - if (i > merge_bucket) - bucket[i] = bucket[i + 1]; + if (i > merge_bucket) bucket[i] = bucket[i + 1]; - if (bucket[i].count < bucket[small_bucket].count) - small_bucket = i; - if (bucket[i].count > bucket[big_bucket].count) - big_bucket = i; + if (bucket[i].count < bucket[small_bucket].count) small_bucket = i; + if (bucket[i].count > bucket[big_bucket].count) big_bucket = i; } } @@ -188,8 +176,8 @@ static int merge_hist_buckets(struct hist_bucket *bucket, return bucket[big_bucket].count; } -static void show_histogram(const struct hist_bucket *bucket, - int buckets, int total, int scale) { +static void show_histogram(const struct hist_bucket *bucket, int buckets, + int total, int scale) { const char *pat1, *pat2; int i; @@ -232,8 +220,7 @@ static void show_histogram(const struct hist_bucket *bucket, pct = (float)(100.0 * bucket[i].count / total); len = HIST_BAR_MAX * bucket[i].count / scale; - if (len < 1) - len = 1; + if (len < 1) len = 1; assert(len <= HIST_BAR_MAX); if (bucket[i].low == bucket[i].high) @@ -241,8 +228,7 @@ static void show_histogram(const struct hist_bucket *bucket, else fprintf(stderr, pat2, bucket[i].low, bucket[i].high); - for (j = 0; j < HIST_BAR_MAX; j++) - fprintf(stderr, j < len ? "=" : " "); + for (j = 0; j < HIST_BAR_MAX; j++) fprintf(stderr, j < len ? "=" : " "); fprintf(stderr, "\t%5d (%6.2f%%)\n", bucket[i].count, pct); } } @@ -268,14 +254,13 @@ void show_q_histogram(const int counts[64], int max_buckets) { show_histogram(bucket, buckets, total, scale); } -void show_rate_histogram(struct rate_hist *hist, - const vpx_codec_enc_cfg_t *cfg, int max_buckets) { +void show_rate_histogram(struct rate_hist *hist, const vpx_codec_enc_cfg_t *cfg, + int max_buckets) { int i, scale; int buckets = 0; for (i = 0; i < RATE_BINS; i++) { - if (hist->bucket[i].low == INT_MAX) - continue; + if (hist->bucket[i].low == INT_MAX) continue; hist->bucket[buckets++] = hist->bucket[i]; } diff --git a/libs/libvpx/test/acm_random.h b/libs/libvpx/test/acm_random.h index ff5c93ea1d..c2f6b0e410 100644 --- a/libs/libvpx/test/acm_random.h +++ b/libs/libvpx/test/acm_random.h @@ -23,15 +23,19 @@ class ACMRandom { explicit ACMRandom(int seed) : random_(seed) {} - void Reset(int seed) { - random_.Reseed(seed); - } + void Reset(int seed) { random_.Reseed(seed); } uint16_t Rand16(void) { const uint32_t value = random_.Generate(testing::internal::Random::kMaxRange); return (value >> 15) & 0xffff; } + int16_t Rand9Signed(void) { + // Use 9 bits: values between 255 (0x0FF) and -256 (0x100). + const uint32_t value = random_.Generate(512); + return static_cast(value) - 256; + } + uint8_t Rand8(void) { const uint32_t value = random_.Generate(testing::internal::Random::kMaxRange); @@ -46,17 +50,11 @@ class ACMRandom { return r < 128 ? r << 4 : r >> 4; } - int PseudoUniform(int range) { - return random_.Generate(range); - } + int PseudoUniform(int range) { return random_.Generate(range); } - int operator()(int n) { - return PseudoUniform(n); - } + int operator()(int n) { return PseudoUniform(n); } - static int DeterministicSeed(void) { - return 0xbaba; - } + static int DeterministicSeed(void) { return 0xbaba; } private: testing::internal::Random random_; diff --git a/libs/libvpx/test/active_map_refresh_test.cc b/libs/libvpx/test/active_map_refresh_test.cc index c94566143b..d893635505 100644 --- a/libs/libvpx/test/active_map_refresh_test.cc +++ b/libs/libvpx/test/active_map_refresh_test.cc @@ -17,8 +17,8 @@ namespace { // Check if any pixel in a 16x16 macroblock varies between frames. -int CheckMb(const vpx_image_t ¤t, const vpx_image_t &previous, - int mb_r, int mb_c) { +int CheckMb(const vpx_image_t ¤t, const vpx_image_t &previous, int mb_r, + int mb_c) { for (int plane = 0; plane < 3; plane++) { int r = 16 * mb_r; int c0 = 16 * mb_c; @@ -37,8 +37,9 @@ int CheckMb(const vpx_image_t ¤t, const vpx_image_t &previous, for (; r < r_top; ++r) { for (int c = c0; c < c_top; ++c) { if (current.planes[plane][current.stride[plane] * r + c] != - previous.planes[plane][previous.stride[plane] * r + c]) + previous.planes[plane][previous.stride[plane] * r + c]) { return 1; + } } } } diff --git a/libs/libvpx/test/active_map_test.cc b/libs/libvpx/test/active_map_test.cc index 0221995191..1d24f956f5 100644 --- a/libs/libvpx/test/active_map_test.cc +++ b/libs/libvpx/test/active_map_test.cc @@ -39,6 +39,7 @@ class ActiveMapTest encoder->Control(VP8E_SET_CPUUSED, cpu_used_); } else if (video->frame() == 3) { vpx_active_map_t map = vpx_active_map_t(); + /* clang-format off */ uint8_t active_map[9 * 13] = { 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, @@ -50,6 +51,7 @@ class ActiveMapTest 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, }; + /* clang-format on */ map.cols = (kWidth + 15) / 16; map.rows = (kHeight + 15) / 16; ASSERT_EQ(map.cols, 13u); @@ -77,13 +79,13 @@ TEST_P(ActiveMapTest, Test) { cfg_.rc_end_usage = VPX_CBR; cfg_.kf_max_dist = 90000; - ::libvpx_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30, - 1, 0, 20); + ::libvpx_test::I420VideoSource video("hantro_odd.yuv", kWidth, kHeight, 30, 1, + 0, 20); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } VP9_INSTANTIATE_TEST_CASE(ActiveMapTest, ::testing::Values(::libvpx_test::kRealTime), - ::testing::Range(0, 6)); + ::testing::Range(0, 9)); } // namespace diff --git a/libs/libvpx/test/add_noise_test.cc b/libs/libvpx/test/add_noise_test.cc new file mode 100644 index 0000000000..eae32c33bb --- /dev/null +++ b/libs/libvpx/test/add_noise_test.cc @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include +#include "test/clear_system_state.h" +#include "test/register_state_check.h" +#include "third_party/googletest/src/include/gtest/gtest.h" +#include "./vpx_dsp_rtcd.h" +#include "vpx/vpx_integer.h" +#include "vpx_dsp/postproc.h" +#include "vpx_mem/vpx_mem.h" + +namespace { + +static const int kNoiseSize = 3072; + +// TODO(jimbankoski): make width and height integers not unsigned. +typedef void (*AddNoiseFunc)(uint8_t *start, const int8_t *noise, + int blackclamp, int whiteclamp, int width, + int height, int pitch); + +class AddNoiseTest : public ::testing::TestWithParam { + public: + virtual void TearDown() { libvpx_test::ClearSystemState(); } + virtual ~AddNoiseTest() {} +}; + +double stddev6(char a, char b, char c, char d, char e, char f) { + const double n = (a + b + c + d + e + f) / 6.0; + const double v = ((a - n) * (a - n) + (b - n) * (b - n) + (c - n) * (c - n) + + (d - n) * (d - n) + (e - n) * (e - n) + (f - n) * (f - n)) / + 6.0; + return sqrt(v); +} + +TEST_P(AddNoiseTest, CheckNoiseAdded) { + const int width = 64; + const int height = 64; + const int image_size = width * height; + int8_t noise[kNoiseSize]; + const int clamp = vpx_setup_noise(4.4, noise, kNoiseSize); + uint8_t *const s = + reinterpret_cast(vpx_calloc(image_size, sizeof(*s))); + ASSERT_TRUE(s != NULL); + memset(s, 99, image_size * sizeof(*s)); + + ASM_REGISTER_STATE_CHECK( + GetParam()(s, noise, clamp, clamp, width, height, width)); + + // Check to make sure we don't end up having either the same or no added + // noise either vertically or horizontally. + for (int i = 0; i < image_size - 6 * width - 6; ++i) { + const double hd = stddev6(s[i] - 99, s[i + 1] - 99, s[i + 2] - 99, + s[i + 3] - 99, s[i + 4] - 99, s[i + 5] - 99); + const double vd = stddev6(s[i] - 99, s[i + width] - 99, + s[i + 2 * width] - 99, s[i + 3 * width] - 99, + s[i + 4 * width] - 99, s[i + 5 * width] - 99); + + EXPECT_NE(hd, 0); + EXPECT_NE(vd, 0); + } + + // Initialize pixels in the image to 255 and check for roll over. + memset(s, 255, image_size); + + ASM_REGISTER_STATE_CHECK( + GetParam()(s, noise, clamp, clamp, width, height, width)); + + // Check to make sure don't roll over. + for (int i = 0; i < image_size; ++i) { + EXPECT_GT(static_cast(s[i]), clamp) << "i = " << i; + } + + // Initialize pixels in the image to 0 and check for roll under. + memset(s, 0, image_size); + + ASM_REGISTER_STATE_CHECK( + GetParam()(s, noise, clamp, clamp, width, height, width)); + + // Check to make sure don't roll under. + for (int i = 0; i < image_size; ++i) { + EXPECT_LT(static_cast(s[i]), 255 - clamp) << "i = " << i; + } + + vpx_free(s); +} + +TEST_P(AddNoiseTest, CheckCvsAssembly) { + const int width = 64; + const int height = 64; + const int image_size = width * height; + int8_t noise[kNoiseSize]; + const int clamp = vpx_setup_noise(4.4, noise, kNoiseSize); + + uint8_t *const s = reinterpret_cast(vpx_calloc(image_size, 1)); + uint8_t *const d = reinterpret_cast(vpx_calloc(image_size, 1)); + ASSERT_TRUE(s != NULL); + ASSERT_TRUE(d != NULL); + + memset(s, 99, image_size); + memset(d, 99, image_size); + + srand(0); + ASM_REGISTER_STATE_CHECK( + GetParam()(s, noise, clamp, clamp, width, height, width)); + srand(0); + ASM_REGISTER_STATE_CHECK( + vpx_plane_add_noise_c(d, noise, clamp, clamp, width, height, width)); + + for (int i = 0; i < image_size; ++i) { + EXPECT_EQ(static_cast(s[i]), static_cast(d[i])) << "i = " << i; + } + + vpx_free(d); + vpx_free(s); +} + +INSTANTIATE_TEST_CASE_P(C, AddNoiseTest, + ::testing::Values(vpx_plane_add_noise_c)); + +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P(SSE2, AddNoiseTest, + ::testing::Values(vpx_plane_add_noise_sse2)); +#endif + +#if HAVE_MSA +INSTANTIATE_TEST_CASE_P(MSA, AddNoiseTest, + ::testing::Values(vpx_plane_add_noise_msa)); +#endif +} // namespace diff --git a/libs/libvpx/test/alt_ref_aq_segment_test.cc b/libs/libvpx/test/alt_ref_aq_segment_test.cc new file mode 100644 index 0000000000..64a3011eb9 --- /dev/null +++ b/libs/libvpx/test/alt_ref_aq_segment_test.cc @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2012 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "third_party/googletest/src/include/gtest/gtest.h" +#include "test/codec_factory.h" +#include "test/encode_test_driver.h" +#include "test/i420_video_source.h" +#include "test/util.h" + +namespace { + +class AltRefAqSegmentTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { + protected: + AltRefAqSegmentTest() : EncoderTest(GET_PARAM(0)) {} + virtual ~AltRefAqSegmentTest() {} + + virtual void SetUp() { + InitializeConfig(); + SetMode(GET_PARAM(1)); + set_cpu_used_ = GET_PARAM(2); + aq_mode_ = 0; + alt_ref_aq_mode_ = 0; + } + + virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, + ::libvpx_test::Encoder *encoder) { + if (video->frame() == 1) { + encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_); + encoder->Control(VP9E_SET_ALT_REF_AQ, alt_ref_aq_mode_); + encoder->Control(VP9E_SET_AQ_MODE, aq_mode_); + encoder->Control(VP8E_SET_MAX_INTRA_BITRATE_PCT, 100); + } + } + + int set_cpu_used_; + int aq_mode_; + int alt_ref_aq_mode_; +}; + +// Validate that this ALT_REF_AQ/AQ segmentation mode +// (ALT_REF_AQ=0, AQ=0/no_aq) +// encodes and decodes without a mismatch. +TEST_P(AltRefAqSegmentTest, TestNoMisMatchAltRefAQ0) { + cfg_.rc_min_quantizer = 8; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_VBR; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_target_bitrate = 300; + + aq_mode_ = 0; + alt_ref_aq_mode_ = 1; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 100); + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +// Validate that this ALT_REF_AQ/AQ segmentation mode +// (ALT_REF_AQ=0, AQ=1/variance_aq) +// encodes and decodes without a mismatch. +TEST_P(AltRefAqSegmentTest, TestNoMisMatchAltRefAQ1) { + cfg_.rc_min_quantizer = 8; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_VBR; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_target_bitrate = 300; + + aq_mode_ = 1; + alt_ref_aq_mode_ = 1; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 100); + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +// Validate that this ALT_REF_AQ/AQ segmentation mode +// (ALT_REF_AQ=0, AQ=2/complexity_aq) +// encodes and decodes without a mismatch. +TEST_P(AltRefAqSegmentTest, TestNoMisMatchAltRefAQ2) { + cfg_.rc_min_quantizer = 8; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_VBR; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_target_bitrate = 300; + + aq_mode_ = 2; + alt_ref_aq_mode_ = 1; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 100); + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +// Validate that this ALT_REF_AQ/AQ segmentation mode +// (ALT_REF_AQ=0, AQ=3/cyclicrefresh_aq) +// encodes and decodes without a mismatch. +TEST_P(AltRefAqSegmentTest, TestNoMisMatchAltRefAQ3) { + cfg_.rc_min_quantizer = 8; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_VBR; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_target_bitrate = 300; + + aq_mode_ = 3; + alt_ref_aq_mode_ = 1; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 100); + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +// Validate that this ALT_REF_AQ/AQ segmentation mode +// (ALT_REF_AQ=0, AQ=4/equator360_aq) +// encodes and decodes without a mismatch. +TEST_P(AltRefAqSegmentTest, TestNoMisMatchAltRefAQ4) { + cfg_.rc_min_quantizer = 8; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_VBR; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_target_bitrate = 300; + + aq_mode_ = 4; + alt_ref_aq_mode_ = 1; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 100); + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +VP9_INSTANTIATE_TEST_CASE(AltRefAqSegmentTest, + ::testing::Values(::libvpx_test::kOnePassGood, + ::libvpx_test::kTwoPassGood), + ::testing::Range(2, 5)); +} // namespace diff --git a/libs/libvpx/test/altref_test.cc b/libs/libvpx/test/altref_test.cc index af25b72856..f9308c2717 100644 --- a/libs/libvpx/test/altref_test.cc +++ b/libs/libvpx/test/altref_test.cc @@ -14,12 +14,14 @@ #include "test/util.h" namespace { +#if CONFIG_VP8_ENCODER + // lookahead range: [kLookAheadMin, kLookAheadMax). const int kLookAheadMin = 5; const int kLookAheadMax = 26; class AltRefTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { + public ::libvpx_test::CodecTestWithParam { protected: AltRefTest() : EncoderTest(GET_PARAM(0)), altref_count_(0) {} virtual ~AltRefTest() {} @@ -29,9 +31,7 @@ class AltRefTest : public ::libvpx_test::EncoderTest, SetMode(libvpx_test::kTwoPassGood); } - virtual void BeginPassHook(unsigned int pass) { - altref_count_ = 0; - } + virtual void BeginPassHook(unsigned int /*pass*/) { altref_count_ = 0; } virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video, libvpx_test::Encoder *encoder) { @@ -63,7 +63,90 @@ TEST_P(AltRefTest, MonotonicTimestamps) { EXPECT_GE(altref_count(), 1); } - VP8_INSTANTIATE_TEST_CASE(AltRefTest, ::testing::Range(kLookAheadMin, kLookAheadMax)); + +#endif // CONFIG_VP8_ENCODER + +class AltRefForcedKeyTestLarge + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { + protected: + AltRefForcedKeyTestLarge() + : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), + cpu_used_(GET_PARAM(2)), forced_kf_frame_num_(1), frame_num_(0) {} + virtual ~AltRefForcedKeyTestLarge() {} + + virtual void SetUp() { + InitializeConfig(); + SetMode(encoding_mode_); + cfg_.rc_end_usage = VPX_VBR; + cfg_.g_threads = 0; + } + + virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, + ::libvpx_test::Encoder *encoder) { + if (video->frame() == 0) { + encoder->Control(VP8E_SET_CPUUSED, cpu_used_); + encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1); +#if CONFIG_VP9_ENCODER + // override test default for tile columns if necessary. + if (GET_PARAM(0) == &libvpx_test::kVP9) { + encoder->Control(VP9E_SET_TILE_COLUMNS, 6); + } +#endif + } + frame_flags_ = + (video->frame() == forced_kf_frame_num_) ? VPX_EFLAG_FORCE_KF : 0; + } + + virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { + if (frame_num_ == forced_kf_frame_num_) { + ASSERT_TRUE(!!(pkt->data.frame.flags & VPX_FRAME_IS_KEY)) + << "Frame #" << frame_num_ << " isn't a keyframe!"; + } + ++frame_num_; + } + + ::libvpx_test::TestMode encoding_mode_; + int cpu_used_; + unsigned int forced_kf_frame_num_; + unsigned int frame_num_; +}; + +TEST_P(AltRefForcedKeyTestLarge, Frame1IsKey) { + const vpx_rational timebase = { 1, 30 }; + const int lag_values[] = { 3, 15, 25, -1 }; + + forced_kf_frame_num_ = 1; + for (int i = 0; lag_values[i] != -1; ++i) { + frame_num_ = 0; + cfg_.g_lag_in_frames = lag_values[i]; + libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + timebase.den, timebase.num, 0, 30); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + } +} + +TEST_P(AltRefForcedKeyTestLarge, ForcedFrameIsKey) { + const vpx_rational timebase = { 1, 30 }; + const int lag_values[] = { 3, 15, 25, -1 }; + + for (int i = 0; lag_values[i] != -1; ++i) { + frame_num_ = 0; + forced_kf_frame_num_ = lag_values[i] - 1; + cfg_.g_lag_in_frames = lag_values[i]; + libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + timebase.den, timebase.num, 0, 30); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + } +} + +VP8_INSTANTIATE_TEST_CASE(AltRefForcedKeyTestLarge, + ::testing::Values(::libvpx_test::kOnePassGood), + ::testing::Range(0, 9)); + +VP9_INSTANTIATE_TEST_CASE(AltRefForcedKeyTestLarge, + ::testing::Values(::libvpx_test::kOnePassGood), + ::testing::Range(0, 9)); } // namespace diff --git a/libs/libvpx/test/aq_segment_test.cc b/libs/libvpx/test/aq_segment_test.cc index 1b9c943562..1c2147fbb2 100644 --- a/libs/libvpx/test/aq_segment_test.cc +++ b/libs/libvpx/test/aq_segment_test.cc @@ -57,7 +57,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ1) { aq_mode_ = 1; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, - 30, 1, 0, 100); + 30, 1, 0, 100); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } @@ -77,7 +77,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ2) { aq_mode_ = 2; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, - 30, 1, 0, 100); + 30, 1, 0, 100); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } @@ -97,7 +97,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ3) { aq_mode_ = 3; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, - 30, 1, 0, 100); + 30, 1, 0, 100); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } diff --git a/libs/libvpx/test/avg_test.cc b/libs/libvpx/test/avg_test.cc index 44d8dd7db5..867a77aa0d 100644 --- a/libs/libvpx/test/avg_test.cc +++ b/libs/libvpx/test/avg_test.cc @@ -31,7 +31,7 @@ class AverageTestBase : public ::testing::Test { AverageTestBase(int width, int height) : width_(width), height_(height) {} static void SetUpTestCase() { - source_data_ = reinterpret_cast( + source_data_ = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBlockSize)); } @@ -40,9 +40,7 @@ class AverageTestBase : public ::testing::Test { source_data_ = NULL; } - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } + virtual void TearDown() { libvpx_test::ClearSystemState(); } protected: // Handle blocks up to 4 blocks 64x64 with stride up to 128 @@ -55,47 +53,46 @@ class AverageTestBase : public ::testing::Test { } // Sum Pixels - unsigned int ReferenceAverage8x8(const uint8_t* source, int pitch) { + unsigned int ReferenceAverage8x8(const uint8_t *source, int pitch) { unsigned int average = 0; - for (int h = 0; h < 8; ++h) - for (int w = 0; w < 8; ++w) - average += source[h * pitch + w]; + for (int h = 0; h < 8; ++h) { + for (int w = 0; w < 8; ++w) average += source[h * pitch + w]; + } return ((average + 32) >> 6); } - unsigned int ReferenceAverage4x4(const uint8_t* source, int pitch) { + unsigned int ReferenceAverage4x4(const uint8_t *source, int pitch) { unsigned int average = 0; - for (int h = 0; h < 4; ++h) - for (int w = 0; w < 4; ++w) - average += source[h * pitch + w]; + for (int h = 0; h < 4; ++h) { + for (int w = 0; w < 4; ++w) average += source[h * pitch + w]; + } return ((average + 8) >> 4); } void FillConstant(uint8_t fill_constant) { for (int i = 0; i < width_ * height_; ++i) { - source_data_[i] = fill_constant; + source_data_[i] = fill_constant; } } void FillRandom() { for (int i = 0; i < width_ * height_; ++i) { - source_data_[i] = rnd_.Rand8(); + source_data_[i] = rnd_.Rand8(); } } int width_, height_; - static uint8_t* source_data_; + static uint8_t *source_data_; int source_stride_; ACMRandom rnd_; }; -typedef unsigned int (*AverageFunction)(const uint8_t* s, int pitch); +typedef unsigned int (*AverageFunction)(const uint8_t *s, int pitch); typedef std::tr1::tuple AvgFunc; -class AverageTest - : public AverageTestBase, - public ::testing::WithParamInterface{ +class AverageTest : public AverageTestBase, + public ::testing::WithParamInterface { public: AverageTest() : AverageTestBase(GET_PARAM(0), GET_PARAM(1)) {} @@ -103,17 +100,17 @@ class AverageTest void CheckAverages() { unsigned int expected = 0; if (GET_PARAM(3) == 8) { - expected = ReferenceAverage8x8(source_data_+ GET_PARAM(2), - source_stride_); - } else if (GET_PARAM(3) == 4) { - expected = ReferenceAverage4x4(source_data_+ GET_PARAM(2), - source_stride_); + expected = + ReferenceAverage8x8(source_data_ + GET_PARAM(2), source_stride_); + } else if (GET_PARAM(3) == 4) { + expected = + ReferenceAverage4x4(source_data_ + GET_PARAM(2), source_stride_); } - ASM_REGISTER_STATE_CHECK(GET_PARAM(4)(source_data_+ GET_PARAM(2), - source_stride_)); - unsigned int actual = GET_PARAM(4)(source_data_+ GET_PARAM(2), - source_stride_); + ASM_REGISTER_STATE_CHECK( + GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_)); + unsigned int actual = + GET_PARAM(4)(source_data_ + GET_PARAM(2), source_stride_); EXPECT_EQ(expected, actual); } @@ -124,23 +121,20 @@ typedef void (*IntProRowFunc)(int16_t hbuf[16], uint8_t const *ref, typedef std::tr1::tuple IntProRowParam; -class IntProRowTest - : public AverageTestBase, - public ::testing::WithParamInterface { +class IntProRowTest : public AverageTestBase, + public ::testing::WithParamInterface { public: IntProRowTest() - : AverageTestBase(16, GET_PARAM(0)), - hbuf_asm_(NULL), - hbuf_c_(NULL) { + : AverageTestBase(16, GET_PARAM(0)), hbuf_asm_(NULL), hbuf_c_(NULL) { asm_func_ = GET_PARAM(1); c_func_ = GET_PARAM(2); } protected: virtual void SetUp() { - hbuf_asm_ = reinterpret_cast( + hbuf_asm_ = reinterpret_cast( vpx_memalign(kDataAlignment, sizeof(*hbuf_asm_) * 16)); - hbuf_c_ = reinterpret_cast( + hbuf_c_ = reinterpret_cast( vpx_memalign(kDataAlignment, sizeof(*hbuf_c_) * 16)); } @@ -169,9 +163,8 @@ typedef int16_t (*IntProColFunc)(uint8_t const *ref, const int width); typedef std::tr1::tuple IntProColParam; -class IntProColTest - : public AverageTestBase, - public ::testing::WithParamInterface { +class IntProColTest : public AverageTestBase, + public ::testing::WithParamInterface { public: IntProColTest() : AverageTestBase(GET_PARAM(0), 1), sum_asm_(0), sum_c_(0) { asm_func_ = GET_PARAM(1); @@ -195,15 +188,14 @@ class IntProColTest typedef int (*SatdFunc)(const int16_t *coeffs, int length); typedef std::tr1::tuple SatdTestParam; -class SatdTest - : public ::testing::Test, - public ::testing::WithParamInterface { +class SatdTest : public ::testing::Test, + public ::testing::WithParamInterface { protected: virtual void SetUp() { satd_size_ = GET_PARAM(0); satd_func_ = GET_PARAM(1); rnd_.Reset(ACMRandom::DeterministicSeed()); - src_ = reinterpret_cast( + src_ = reinterpret_cast( vpx_memalign(16, sizeof(*src_) * satd_size_)); ASSERT_TRUE(src_ != NULL); } @@ -235,7 +227,7 @@ class SatdTest ACMRandom rnd_; }; -uint8_t* AverageTestBase::source_data_ = NULL; +uint8_t *AverageTestBase::source_data_ = NULL; TEST_P(AverageTest, MinValue) { FillConstant(0); @@ -286,7 +278,6 @@ TEST_P(IntProColTest, Random) { RunComparison(); } - TEST_P(SatdTest, MinValue) { const int kMin = -32640; const int expected = -kMin * satd_size_; @@ -320,92 +311,86 @@ using std::tr1::make_tuple; INSTANTIATE_TEST_CASE_P( C, AverageTest, - ::testing::Values( - make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c), - make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c))); + ::testing::Values(make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c), + make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c))); -INSTANTIATE_TEST_CASE_P( - C, SatdTest, - ::testing::Values( - make_tuple(16, &vpx_satd_c), - make_tuple(64, &vpx_satd_c), - make_tuple(256, &vpx_satd_c), - make_tuple(1024, &vpx_satd_c))); +INSTANTIATE_TEST_CASE_P(C, SatdTest, + ::testing::Values(make_tuple(16, &vpx_satd_c), + make_tuple(64, &vpx_satd_c), + make_tuple(256, &vpx_satd_c), + make_tuple(1024, &vpx_satd_c))); #if HAVE_SSE2 INSTANTIATE_TEST_CASE_P( SSE2, AverageTest, - ::testing::Values( - make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2), - make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2), - make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2), - make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2), - make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2), - make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2))); + ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2), + make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2), + make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2), + make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2), + make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2), + make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2))); INSTANTIATE_TEST_CASE_P( - SSE2, IntProRowTest, ::testing::Values( - make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c), - make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c), - make_tuple(64, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c))); + SSE2, IntProRowTest, + ::testing::Values(make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c), + make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c), + make_tuple(64, &vpx_int_pro_row_sse2, + &vpx_int_pro_row_c))); INSTANTIATE_TEST_CASE_P( - SSE2, IntProColTest, ::testing::Values( - make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c), - make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c), - make_tuple(64, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c))); + SSE2, IntProColTest, + ::testing::Values(make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c), + make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c), + make_tuple(64, &vpx_int_pro_col_sse2, + &vpx_int_pro_col_c))); -INSTANTIATE_TEST_CASE_P( - SSE2, SatdTest, - ::testing::Values( - make_tuple(16, &vpx_satd_sse2), - make_tuple(64, &vpx_satd_sse2), - make_tuple(256, &vpx_satd_sse2), - make_tuple(1024, &vpx_satd_sse2))); +INSTANTIATE_TEST_CASE_P(SSE2, SatdTest, + ::testing::Values(make_tuple(16, &vpx_satd_sse2), + make_tuple(64, &vpx_satd_sse2), + make_tuple(256, &vpx_satd_sse2), + make_tuple(1024, &vpx_satd_sse2))); #endif #if HAVE_NEON INSTANTIATE_TEST_CASE_P( NEON, AverageTest, - ::testing::Values( - make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon), - make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon), - make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon), - make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon), - make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon), - make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon))); + ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon), + make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon), + make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon), + make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon), + make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon), + make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon))); INSTANTIATE_TEST_CASE_P( - NEON, IntProRowTest, ::testing::Values( - make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c), - make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c), - make_tuple(64, &vpx_int_pro_row_neon, &vpx_int_pro_row_c))); + NEON, IntProRowTest, + ::testing::Values(make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c), + make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c), + make_tuple(64, &vpx_int_pro_row_neon, + &vpx_int_pro_row_c))); INSTANTIATE_TEST_CASE_P( - NEON, IntProColTest, ::testing::Values( - make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c), - make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c), - make_tuple(64, &vpx_int_pro_col_neon, &vpx_int_pro_col_c))); + NEON, IntProColTest, + ::testing::Values(make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c), + make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c), + make_tuple(64, &vpx_int_pro_col_neon, + &vpx_int_pro_col_c))); -INSTANTIATE_TEST_CASE_P( - NEON, SatdTest, - ::testing::Values( - make_tuple(16, &vpx_satd_neon), - make_tuple(64, &vpx_satd_neon), - make_tuple(256, &vpx_satd_neon), - make_tuple(1024, &vpx_satd_neon))); +INSTANTIATE_TEST_CASE_P(NEON, SatdTest, + ::testing::Values(make_tuple(16, &vpx_satd_neon), + make_tuple(64, &vpx_satd_neon), + make_tuple(256, &vpx_satd_neon), + make_tuple(1024, &vpx_satd_neon))); #endif #if HAVE_MSA INSTANTIATE_TEST_CASE_P( MSA, AverageTest, - ::testing::Values( - make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa), - make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa), - make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa), - make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa), - make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa), - make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa))); + ::testing::Values(make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa), + make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa), + make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa), + make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa), + make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa), + make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa))); #endif } // namespace diff --git a/libs/libvpx/test/blockiness_test.cc b/libs/libvpx/test/blockiness_test.cc index 0c60baaa38..2fa10192f1 100644 --- a/libs/libvpx/test/blockiness_test.cc +++ b/libs/libvpx/test/blockiness_test.cc @@ -26,11 +26,9 @@ #include "vpx_mem/vpx_mem.h" - -extern "C" -double vp9_get_blockiness(const unsigned char *img1, int img1_pitch, - const unsigned char *img2, int img2_pitch, - int width, int height); +extern "C" double vp9_get_blockiness(const unsigned char *img1, int img1_pitch, + const unsigned char *img2, int img2_pitch, + int width, int height); using libvpx_test::ACMRandom; @@ -40,9 +38,9 @@ class BlockinessTestBase : public ::testing::Test { BlockinessTestBase(int width, int height) : width_(width), height_(height) {} static void SetUpTestCase() { - source_data_ = reinterpret_cast( + source_data_ = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); - reference_data_ = reinterpret_cast( + reference_data_ = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); } @@ -53,14 +51,12 @@ class BlockinessTestBase : public ::testing::Test { reference_data_ = NULL; } - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } + virtual void TearDown() { libvpx_test::ClearSystemState(); } protected: // Handle frames up to 640x480 static const int kDataAlignment = 16; - static const int kDataBufferSize = 640*480; + static const int kDataBufferSize = 640 * 480; virtual void SetUp() { source_stride_ = (width_ + 31) & ~31; @@ -68,8 +64,8 @@ class BlockinessTestBase : public ::testing::Test { rnd_.Reset(ACMRandom::DeterministicSeed()); } - void FillConstant(uint8_t *data, int stride, uint8_t fill_constant, - int width, int height) { + void FillConstant(uint8_t *data, int stride, uint8_t fill_constant, int width, + int height) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w) { data[h * stride + w] = fill_constant; @@ -104,10 +100,11 @@ class BlockinessTestBase : public ::testing::Test { void FillCheckerboard(uint8_t *data, int stride) { for (int h = 0; h < height_; h += 4) { for (int w = 0; w < width_; w += 4) { - if (((h/4) ^ (w/4)) & 1) + if (((h / 4) ^ (w / 4)) & 1) { FillConstant(data + h * stride + w, stride, 255, 4, 4); - else + } else { FillConstant(data + h * stride + w, stride, 0, 4, 4); + } } } } @@ -135,9 +132,9 @@ class BlockinessTestBase : public ::testing::Test { } } int width_, height_; - static uint8_t* source_data_; + static uint8_t *source_data_; int source_stride_; - static uint8_t* reference_data_; + static uint8_t *reference_data_; int reference_stride_; ACMRandom rnd_; @@ -152,32 +149,32 @@ class BlockinessVP9Test BlockinessVP9Test() : BlockinessTestBase(GET_PARAM(0), GET_PARAM(1)) {} protected: - int CheckBlockiness() { - return vp9_get_blockiness(source_data_, source_stride_, - reference_data_, reference_stride_, - width_, height_); + double GetBlockiness() const { + return vp9_get_blockiness(source_data_, source_stride_, reference_data_, + reference_stride_, width_, height_); } }; #endif // CONFIG_VP9_ENCODER -uint8_t* BlockinessTestBase::source_data_ = NULL; -uint8_t* BlockinessTestBase::reference_data_ = NULL; +uint8_t *BlockinessTestBase::source_data_ = NULL; +uint8_t *BlockinessTestBase::reference_data_ = NULL; #if CONFIG_VP9_ENCODER TEST_P(BlockinessVP9Test, SourceBlockierThanReference) { // Source is blockier than reference. FillRandomBlocky(source_data_, source_stride_); FillConstant(reference_data_, reference_stride_, 128); - int super_blocky = CheckBlockiness(); + const double super_blocky = GetBlockiness(); - EXPECT_EQ(0, super_blocky) << "Blocky source should produce 0 blockiness."; + EXPECT_DOUBLE_EQ(0.0, super_blocky) + << "Blocky source should produce 0 blockiness."; } TEST_P(BlockinessVP9Test, ReferenceBlockierThanSource) { // Source is blockier than reference. FillConstant(source_data_, source_stride_, 128); FillRandomBlocky(reference_data_, reference_stride_); - int super_blocky = CheckBlockiness(); + const double super_blocky = GetBlockiness(); EXPECT_GT(super_blocky, 0.0) << "Blocky reference should score high for blockiness."; @@ -187,10 +184,10 @@ TEST_P(BlockinessVP9Test, BlurringDecreasesBlockiness) { // Source is blockier than reference. FillConstant(source_data_, source_stride_, 128); FillRandomBlocky(reference_data_, reference_stride_); - int super_blocky = CheckBlockiness(); + const double super_blocky = GetBlockiness(); Blur(reference_data_, reference_stride_, 4); - int less_blocky = CheckBlockiness(); + const double less_blocky = GetBlockiness(); EXPECT_GT(super_blocky, less_blocky) << "A straight blur should decrease blockiness."; @@ -201,17 +198,16 @@ TEST_P(BlockinessVP9Test, WorstCaseBlockiness) { FillConstant(source_data_, source_stride_, 128); FillCheckerboard(reference_data_, reference_stride_); - int super_blocky = CheckBlockiness(); + const double super_blocky = GetBlockiness(); Blur(reference_data_, reference_stride_, 4); - int less_blocky = CheckBlockiness(); + const double less_blocky = GetBlockiness(); EXPECT_GT(super_blocky, less_blocky) << "A straight blur should decrease blockiness."; } #endif // CONFIG_VP9_ENCODER - using std::tr1::make_tuple; //------------------------------------------------------------------------------ @@ -219,9 +215,7 @@ using std::tr1::make_tuple; #if CONFIG_VP9_ENCODER const BlockinessParam c_vp9_tests[] = { - make_tuple(320, 240), - make_tuple(318, 242), - make_tuple(318, 238), + make_tuple(320, 240), make_tuple(318, 242), make_tuple(318, 238), }; INSTANTIATE_TEST_CASE_P(C, BlockinessVP9Test, ::testing::ValuesIn(c_vp9_tests)); #endif diff --git a/libs/libvpx/test/borders_test.cc b/libs/libvpx/test/borders_test.cc index 6592375f80..e66ff02e25 100644 --- a/libs/libvpx/test/borders_test.cc +++ b/libs/libvpx/test/borders_test.cc @@ -17,8 +17,9 @@ namespace { -class BordersTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class BordersTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: BordersTest() : EncoderTest(GET_PARAM(0)) {} virtual ~BordersTest() {} @@ -52,7 +53,7 @@ TEST_P(BordersTest, TestEncodeHighBitrate) { // extend into the border and test the border condition. cfg_.g_lag_in_frames = 25; cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; cfg_.rc_target_bitrate = 2000; cfg_.rc_max_quantizer = 10; @@ -78,9 +79,6 @@ TEST_P(BordersTest, TestLowBitrate) { ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } -VP9_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values( - ::libvpx_test::kTwoPassGood)); - -VP10_INSTANTIATE_TEST_CASE(BordersTest, ::testing::Values( - ::libvpx_test::kTwoPassGood)); +VP9_INSTANTIATE_TEST_CASE(BordersTest, + ::testing::Values(::libvpx_test::kTwoPassGood)); } // namespace diff --git a/libs/libvpx/test/byte_alignment_test.cc b/libs/libvpx/test/byte_alignment_test.cc index 3a808b0467..d78294d10f 100644 --- a/libs/libvpx/test/byte_alignment_test.cc +++ b/libs/libvpx/test/byte_alignment_test.cc @@ -36,29 +36,26 @@ struct ByteAlignmentTestParam { }; const ByteAlignmentTestParam kBaTestParams[] = { - {kLegacyByteAlignment, VPX_CODEC_OK, true}, - {32, VPX_CODEC_OK, true}, - {64, VPX_CODEC_OK, true}, - {128, VPX_CODEC_OK, true}, - {256, VPX_CODEC_OK, true}, - {512, VPX_CODEC_OK, true}, - {1024, VPX_CODEC_OK, true}, - {1, VPX_CODEC_INVALID_PARAM, false}, - {-2, VPX_CODEC_INVALID_PARAM, false}, - {4, VPX_CODEC_INVALID_PARAM, false}, - {16, VPX_CODEC_INVALID_PARAM, false}, - {255, VPX_CODEC_INVALID_PARAM, false}, - {2048, VPX_CODEC_INVALID_PARAM, false}, + { kLegacyByteAlignment, VPX_CODEC_OK, true }, + { 32, VPX_CODEC_OK, true }, + { 64, VPX_CODEC_OK, true }, + { 128, VPX_CODEC_OK, true }, + { 256, VPX_CODEC_OK, true }, + { 512, VPX_CODEC_OK, true }, + { 1024, VPX_CODEC_OK, true }, + { 1, VPX_CODEC_INVALID_PARAM, false }, + { -2, VPX_CODEC_INVALID_PARAM, false }, + { 4, VPX_CODEC_INVALID_PARAM, false }, + { 16, VPX_CODEC_INVALID_PARAM, false }, + { 255, VPX_CODEC_INVALID_PARAM, false }, + { 2048, VPX_CODEC_INVALID_PARAM, false }, }; // Class for testing byte alignment of reference buffers. class ByteAlignmentTest : public ::testing::TestWithParam { protected: - ByteAlignmentTest() - : video_(NULL), - decoder_(NULL), - md5_file_(NULL) {} + ByteAlignmentTest() : video_(NULL), decoder_(NULL), md5_file_(NULL) {} virtual void SetUp() { video_ = new libvpx_test::WebMVideoSource(kVP9TestFile); @@ -74,8 +71,7 @@ class ByteAlignmentTest } virtual void TearDown() { - if (md5_file_ != NULL) - fclose(md5_file_); + if (md5_file_ != NULL) fclose(md5_file_); delete decoder_; delete video_; @@ -89,8 +85,7 @@ class ByteAlignmentTest const vpx_codec_err_t res = decoder_->DecodeFrame(video_->cxdata(), video_->frame_size()); CheckDecodedFrames(byte_alignment_to_check); - if (res == VPX_CODEC_OK) - video_->Next(); + if (res == VPX_CODEC_OK) video_->Next(); return res; } @@ -98,8 +93,7 @@ class ByteAlignmentTest for (; video_->cxdata() != NULL; video_->Next()) { const vpx_codec_err_t res = decoder_->DecodeFrame(video_->cxdata(), video_->frame_size()); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; CheckDecodedFrames(byte_alignment_to_check); } return VPX_CODEC_OK; @@ -135,7 +129,7 @@ class ByteAlignmentTest void OpenMd5File(const std::string &md5_file_name_) { md5_file_ = libvpx_test::OpenTestDataFile(md5_file_name_); ASSERT_TRUE(md5_file_ != NULL) << "MD5 file open failed. Filename: " - << md5_file_name_; + << md5_file_name_; } void CheckMd5(const vpx_image_t &img) { @@ -163,8 +157,8 @@ class ByteAlignmentTest TEST_F(ByteAlignmentTest, SwitchByteAlignment) { const int num_elements = 14; - const int byte_alignments[] = { 0, 32, 64, 128, 256, 512, 1024, - 0, 1024, 32, 512, 64, 256, 128 }; + const int byte_alignments[] = { 0, 32, 64, 128, 256, 512, 1024, + 0, 1024, 32, 512, 64, 256, 128 }; for (int i = 0; i < num_elements; ++i) { SetByteAlignment(byte_alignments[i], VPX_CODEC_OK); diff --git a/libs/libvpx/test/clear_system_state.h b/libs/libvpx/test/clear_system_state.h index 5e76797443..044a5c7583 100644 --- a/libs/libvpx/test/clear_system_state.h +++ b/libs/libvpx/test/clear_system_state.h @@ -12,7 +12,7 @@ #include "./vpx_config.h" #if ARCH_X86 || ARCH_X86_64 -# include "vpx_ports/x86.h" +#include "vpx_ports/x86.h" #endif namespace libvpx_test { diff --git a/libs/libvpx/test/codec_factory.h b/libs/libvpx/test/codec_factory.h index 09c9cf9842..e867dacafe 100644 --- a/libs/libvpx/test/codec_factory.h +++ b/libs/libvpx/test/codec_factory.h @@ -13,10 +13,10 @@ #include "./vpx_config.h" #include "vpx/vpx_decoder.h" #include "vpx/vpx_encoder.h" -#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER #include "vpx/vp8cx.h" #endif -#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER || CONFIG_VP10_DECODER +#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER #include "vpx/vp8dx.h" #endif @@ -32,15 +32,12 @@ class CodecFactory { virtual ~CodecFactory() {} - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - unsigned long deadline) const = 0; + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg) const = 0; - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - const vpx_codec_flags_t flags, - unsigned long deadline) // NOLINT(runtime/int) - const = 0; + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg, + const vpx_codec_flags_t flags) const = 0; - virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg, + virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline, const unsigned long init_flags, TwopassStatsStore *stats) const = 0; @@ -53,20 +50,20 @@ class CodecFactory { * to avoid having to include a pointer to the CodecFactory in every test * definition. */ -template -class CodecTestWithParam : public ::testing::TestWithParam< - std::tr1::tuple< const libvpx_test::CodecFactory*, T1 > > { -}; +template +class CodecTestWithParam + : public ::testing::TestWithParam< + std::tr1::tuple > {}; -template -class CodecTestWith2Params : public ::testing::TestWithParam< - std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2 > > { -}; +template +class CodecTestWith2Params + : public ::testing::TestWithParam< + std::tr1::tuple > {}; -template -class CodecTestWith3Params : public ::testing::TestWithParam< - std::tr1::tuple< const libvpx_test::CodecFactory*, T1, T2, T3 > > { -}; +template +class CodecTestWith3Params + : public ::testing::TestWithParam< + std::tr1::tuple > {}; /* * VP8 Codec Definitions @@ -74,15 +71,13 @@ class CodecTestWith3Params : public ::testing::TestWithParam< #if CONFIG_VP8 class VP8Decoder : public Decoder { public: - VP8Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline) - : Decoder(cfg, deadline) {} + explicit VP8Decoder(vpx_codec_dec_cfg_t cfg) : Decoder(cfg) {} - VP8Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag, - unsigned long deadline) // NOLINT - : Decoder(cfg, flag, deadline) {} + VP8Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag) + : Decoder(cfg, flag) {} protected: - virtual vpx_codec_iface_t* CodecInterface() const { + virtual vpx_codec_iface_t *CodecInterface() const { #if CONFIG_VP8_DECODER return &vpx_codec_vp8_dx_algo; #else @@ -98,7 +93,7 @@ class VP8Encoder : public Encoder { : Encoder(cfg, deadline, init_flags, stats) {} protected: - virtual vpx_codec_iface_t* CodecInterface() const { + virtual vpx_codec_iface_t *CodecInterface() const { #if CONFIG_VP8_ENCODER return &vpx_codec_vp8_cx_algo; #else @@ -111,22 +106,20 @@ class VP8CodecFactory : public CodecFactory { public: VP8CodecFactory() : CodecFactory() {} - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - unsigned long deadline) const { - return CreateDecoder(cfg, 0, deadline); + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg) const { + return CreateDecoder(cfg, 0); } - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - const vpx_codec_flags_t flags, - unsigned long deadline) const { // NOLINT + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg, + const vpx_codec_flags_t flags) const { #if CONFIG_VP8_DECODER - return new VP8Decoder(cfg, flags, deadline); + return new VP8Decoder(cfg, flags); #else return NULL; #endif } - virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg, + virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline, const unsigned long init_flags, TwopassStatsStore *stats) const { @@ -149,32 +142,30 @@ class VP8CodecFactory : public CodecFactory { const libvpx_test::VP8CodecFactory kVP8; -#define VP8_INSTANTIATE_TEST_CASE(test, ...)\ - INSTANTIATE_TEST_CASE_P(VP8, test, \ - ::testing::Combine( \ - ::testing::Values(static_cast( \ - &libvpx_test::kVP8)), \ +#define VP8_INSTANTIATE_TEST_CASE(test, ...) \ + INSTANTIATE_TEST_CASE_P( \ + VP8, test, \ + ::testing::Combine( \ + ::testing::Values(static_cast( \ + &libvpx_test::kVP8)), \ __VA_ARGS__)) #else #define VP8_INSTANTIATE_TEST_CASE(test, ...) #endif // CONFIG_VP8 - /* * VP9 Codec Definitions */ #if CONFIG_VP9 class VP9Decoder : public Decoder { public: - VP9Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline) - : Decoder(cfg, deadline) {} + explicit VP9Decoder(vpx_codec_dec_cfg_t cfg) : Decoder(cfg) {} - VP9Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag, - unsigned long deadline) // NOLINT - : Decoder(cfg, flag, deadline) {} + VP9Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag) + : Decoder(cfg, flag) {} protected: - virtual vpx_codec_iface_t* CodecInterface() const { + virtual vpx_codec_iface_t *CodecInterface() const { #if CONFIG_VP9_DECODER return &vpx_codec_vp9_dx_algo; #else @@ -190,7 +181,7 @@ class VP9Encoder : public Encoder { : Encoder(cfg, deadline, init_flags, stats) {} protected: - virtual vpx_codec_iface_t* CodecInterface() const { + virtual vpx_codec_iface_t *CodecInterface() const { #if CONFIG_VP9_ENCODER return &vpx_codec_vp9_cx_algo; #else @@ -203,22 +194,20 @@ class VP9CodecFactory : public CodecFactory { public: VP9CodecFactory() : CodecFactory() {} - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - unsigned long deadline) const { - return CreateDecoder(cfg, 0, deadline); + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg) const { + return CreateDecoder(cfg, 0); } - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - const vpx_codec_flags_t flags, - unsigned long deadline) const { // NOLINT + virtual Decoder *CreateDecoder(vpx_codec_dec_cfg_t cfg, + const vpx_codec_flags_t flags) const { #if CONFIG_VP9_DECODER - return new VP9Decoder(cfg, flags, deadline); + return new VP9Decoder(cfg, flags); #else return NULL; #endif } - virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg, + virtual Encoder *CreateEncoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline, const unsigned long init_flags, TwopassStatsStore *stats) const { @@ -233,8 +222,6 @@ class VP9CodecFactory : public CodecFactory { int usage) const { #if CONFIG_VP9_ENCODER return vpx_codec_enc_config_default(&vpx_codec_vp9_cx_algo, cfg, usage); -#elif CONFIG_VP10_ENCODER - return vpx_codec_enc_config_default(&vpx_codec_vp10_cx_algo, cfg, usage); #else return VPX_CODEC_INCAPABLE; #endif @@ -243,106 +230,16 @@ class VP9CodecFactory : public CodecFactory { const libvpx_test::VP9CodecFactory kVP9; -#define VP9_INSTANTIATE_TEST_CASE(test, ...)\ - INSTANTIATE_TEST_CASE_P(VP9, test, \ - ::testing::Combine( \ - ::testing::Values(static_cast( \ - &libvpx_test::kVP9)), \ +#define VP9_INSTANTIATE_TEST_CASE(test, ...) \ + INSTANTIATE_TEST_CASE_P( \ + VP9, test, \ + ::testing::Combine( \ + ::testing::Values(static_cast( \ + &libvpx_test::kVP9)), \ __VA_ARGS__)) #else #define VP9_INSTANTIATE_TEST_CASE(test, ...) #endif // CONFIG_VP9 -/* - * VP10 Codec Definitions - */ -#if CONFIG_VP10 -class VP10Decoder : public Decoder { - public: - VP10Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline) - : Decoder(cfg, deadline) {} - - VP10Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag, - unsigned long deadline) // NOLINT - : Decoder(cfg, flag, deadline) {} - - protected: - virtual vpx_codec_iface_t* CodecInterface() const { -#if CONFIG_VP10_DECODER - return &vpx_codec_vp10_dx_algo; -#else - return NULL; -#endif - } -}; - -class VP10Encoder : public Encoder { - public: - VP10Encoder(vpx_codec_enc_cfg_t cfg, unsigned long deadline, - const unsigned long init_flags, TwopassStatsStore *stats) - : Encoder(cfg, deadline, init_flags, stats) {} - - protected: - virtual vpx_codec_iface_t* CodecInterface() const { -#if CONFIG_VP10_ENCODER - return &vpx_codec_vp10_cx_algo; -#else - return NULL; -#endif - } -}; - -class VP10CodecFactory : public CodecFactory { - public: - VP10CodecFactory() : CodecFactory() {} - - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - unsigned long deadline) const { - return CreateDecoder(cfg, 0, deadline); - } - - virtual Decoder* CreateDecoder(vpx_codec_dec_cfg_t cfg, - const vpx_codec_flags_t flags, - unsigned long deadline) const { // NOLINT -#if CONFIG_VP10_DECODER - return new VP10Decoder(cfg, flags, deadline); -#else - return NULL; -#endif - } - - virtual Encoder* CreateEncoder(vpx_codec_enc_cfg_t cfg, - unsigned long deadline, - const unsigned long init_flags, - TwopassStatsStore *stats) const { -#if CONFIG_VP10_ENCODER - return new VP10Encoder(cfg, deadline, init_flags, stats); -#else - return NULL; -#endif - } - - virtual vpx_codec_err_t DefaultEncoderConfig(vpx_codec_enc_cfg_t *cfg, - int usage) const { -#if CONFIG_VP10_ENCODER - return vpx_codec_enc_config_default(&vpx_codec_vp10_cx_algo, cfg, usage); -#else - return VPX_CODEC_INCAPABLE; -#endif - } -}; - -const libvpx_test::VP10CodecFactory kVP10; - -#define VP10_INSTANTIATE_TEST_CASE(test, ...)\ - INSTANTIATE_TEST_CASE_P(VP10, test, \ - ::testing::Combine( \ - ::testing::Values(static_cast( \ - &libvpx_test::kVP10)), \ - __VA_ARGS__)) -#else -#define VP10_INSTANTIATE_TEST_CASE(test, ...) -#endif // CONFIG_VP10 - } // namespace libvpx_test #endif // TEST_CODEC_FACTORY_H_ diff --git a/libs/libvpx/test/config_test.cc b/libs/libvpx/test/config_test.cc index 04931103d7..b2f8ea5ed3 100644 --- a/libs/libvpx/test/config_test.cc +++ b/libs/libvpx/test/config_test.cc @@ -15,11 +15,13 @@ namespace { -class ConfigTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class ConfigTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: - ConfigTest() : EncoderTest(GET_PARAM(0)), - frame_count_in_(0), frame_count_out_(0), frame_count_max_(0) {} + ConfigTest() + : EncoderTest(GET_PARAM(0)), frame_count_in_(0), frame_count_out_(0), + frame_count_max_(0) {} virtual ~ConfigTest() {} virtual void SetUp() { @@ -32,12 +34,12 @@ class ConfigTest : public ::libvpx_test::EncoderTest, frame_count_out_ = 0; } - virtual void PreEncodeFrameHook(libvpx_test::VideoSource* /*video*/) { + virtual void PreEncodeFrameHook(libvpx_test::VideoSource * /*video*/) { ++frame_count_in_; abort_ |= (frame_count_in_ >= frame_count_max_); } - virtual void FramePktHook(const vpx_codec_cx_pkt_t* /*pkt*/) { + virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) { ++frame_count_out_; } diff --git a/libs/libvpx/test/consistency_test.cc b/libs/libvpx/test/consistency_test.cc index 9c2fd55084..37b4a45e54 100644 --- a/libs/libvpx/test/consistency_test.cc +++ b/libs/libvpx/test/consistency_test.cc @@ -26,12 +26,10 @@ #include "vpx_dsp/ssim.h" #include "vpx_mem/vpx_mem.h" -extern "C" -double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, - uint8_t *img2, int img2_pitch, - int width, int height, - Ssimv *sv2, Metrics *m, - int do_inconsistency); +extern "C" double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, + uint8_t *img2, int img2_pitch, int width, + int height, Ssimv *sv2, Metrics *m, + int do_inconsistency); using libvpx_test::ACMRandom; @@ -41,20 +39,18 @@ class ConsistencyTestBase : public ::testing::Test { ConsistencyTestBase(int width, int height) : width_(width), height_(height) {} static void SetUpTestCase() { - source_data_[0] = reinterpret_cast( + source_data_[0] = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); - reference_data_[0] = reinterpret_cast( + reference_data_[0] = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); - source_data_[1] = reinterpret_cast( + source_data_[1] = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); - reference_data_[1] = reinterpret_cast( + reference_data_[1] = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); ssim_array_ = new Ssimv[kDataBufferSize / 16]; } - static void ClearSsim() { - memset(ssim_array_, 0, kDataBufferSize / 16); - } + static void ClearSsim() { memset(ssim_array_, 0, kDataBufferSize / 16); } static void TearDownTestCase() { vpx_free(source_data_[0]); source_data_[0] = NULL; @@ -68,14 +64,12 @@ class ConsistencyTestBase : public ::testing::Test { delete[] ssim_array_; } - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } + virtual void TearDown() { libvpx_test::ClearSystemState(); } protected: // Handle frames up to 640x480 static const int kDataAlignment = 16; - static const int kDataBufferSize = 640*480; + static const int kDataBufferSize = 640 * 480; virtual void SetUp() { source_stride_ = (width_ + 31) & ~31; @@ -122,9 +116,9 @@ class ConsistencyTestBase : public ::testing::Test { } } int width_, height_; - static uint8_t* source_data_[2]; + static uint8_t *source_data_[2]; int source_stride_; - static uint8_t* reference_data_[2]; + static uint8_t *reference_data_[2]; int reference_stride_; static Ssimv *ssim_array_; Metrics metrics_; @@ -142,18 +136,17 @@ class ConsistencyVP9Test protected: double CheckConsistency(int frame) { - EXPECT_LT(frame, 2)<< "Frame to check has to be less than 2."; - return - vpx_get_ssim_metrics(source_data_[frame], source_stride_, - reference_data_[frame], reference_stride_, - width_, height_, ssim_array_, &metrics_, 1); + EXPECT_LT(frame, 2) << "Frame to check has to be less than 2."; + return vpx_get_ssim_metrics(source_data_[frame], source_stride_, + reference_data_[frame], reference_stride_, + width_, height_, ssim_array_, &metrics_, 1); } }; #endif // CONFIG_VP9_ENCODER -uint8_t* ConsistencyTestBase::source_data_[2] = {NULL, NULL}; -uint8_t* ConsistencyTestBase::reference_data_[2] = {NULL, NULL}; -Ssimv* ConsistencyTestBase::ssim_array_ = NULL; +uint8_t *ConsistencyTestBase::source_data_[2] = { NULL, NULL }; +uint8_t *ConsistencyTestBase::reference_data_[2] = { NULL, NULL }; +Ssimv *ConsistencyTestBase::ssim_array_ = NULL; #if CONFIG_VP9_ENCODER TEST_P(ConsistencyVP9Test, ConsistencyIsZero) { @@ -205,7 +198,6 @@ TEST_P(ConsistencyVP9Test, ConsistencyIsZero) { } #endif // CONFIG_VP9_ENCODER - using std::tr1::make_tuple; //------------------------------------------------------------------------------ @@ -213,9 +205,7 @@ using std::tr1::make_tuple; #if CONFIG_VP9_ENCODER const ConsistencyParam c_vp9_tests[] = { - make_tuple(320, 240), - make_tuple(318, 242), - make_tuple(318, 238), + make_tuple(320, 240), make_tuple(318, 242), make_tuple(318, 238), }; INSTANTIATE_TEST_CASE_P(C, ConsistencyVP9Test, ::testing::ValuesIn(c_vp9_tests)); diff --git a/libs/libvpx/test/convolve_test.cc b/libs/libvpx/test/convolve_test.cc index 12022be523..432d1b009e 100644 --- a/libs/libvpx/test/convolve_test.cc +++ b/libs/libvpx/test/convolve_test.cc @@ -37,14 +37,12 @@ typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride, int w, int h); struct ConvolveFunctions { - ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, - ConvolveFunc h8, ConvolveFunc h8_avg, - ConvolveFunc v8, ConvolveFunc v8_avg, - ConvolveFunc hv8, ConvolveFunc hv8_avg, - ConvolveFunc sh8, ConvolveFunc sh8_avg, - ConvolveFunc sv8, ConvolveFunc sv8_avg, - ConvolveFunc shv8, ConvolveFunc shv8_avg, - int bd) + ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, ConvolveFunc h8, + ConvolveFunc h8_avg, ConvolveFunc v8, ConvolveFunc v8_avg, + ConvolveFunc hv8, ConvolveFunc hv8_avg, ConvolveFunc sh8, + ConvolveFunc sh8_avg, ConvolveFunc sv8, + ConvolveFunc sv8_avg, ConvolveFunc shv8, + ConvolveFunc shv8_avg, int bd) : copy_(copy), avg_(avg), h8_(h8), v8_(v8), hv8_(hv8), h8_avg_(h8_avg), v8_avg_(v8_avg), hv8_avg_(hv8_avg), sh8_(sh8), sv8_(sv8), shv8_(shv8), sh8_avg_(sh8_avg), sv8_avg_(sv8_avg), shv8_avg_(shv8_avg), @@ -58,34 +56,35 @@ struct ConvolveFunctions { ConvolveFunc h8_avg_; ConvolveFunc v8_avg_; ConvolveFunc hv8_avg_; - ConvolveFunc sh8_; // scaled horiz - ConvolveFunc sv8_; // scaled vert - ConvolveFunc shv8_; // scaled horiz/vert - ConvolveFunc sh8_avg_; // scaled avg horiz - ConvolveFunc sv8_avg_; // scaled avg vert - ConvolveFunc shv8_avg_; // scaled avg horiz/vert + ConvolveFunc sh8_; // scaled horiz + ConvolveFunc sv8_; // scaled vert + ConvolveFunc shv8_; // scaled horiz/vert + ConvolveFunc sh8_avg_; // scaled avg horiz + ConvolveFunc sv8_avg_; // scaled avg vert + ConvolveFunc shv8_avg_; // scaled avg horiz/vert int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth. }; typedef std::tr1::tuple ConvolveParam; +#define ALL_SIZES(convolve_fn) \ + make_tuple(4, 4, &convolve_fn), make_tuple(8, 4, &convolve_fn), \ + make_tuple(4, 8, &convolve_fn), make_tuple(8, 8, &convolve_fn), \ + make_tuple(16, 8, &convolve_fn), make_tuple(8, 16, &convolve_fn), \ + make_tuple(16, 16, &convolve_fn), make_tuple(32, 16, &convolve_fn), \ + make_tuple(16, 32, &convolve_fn), make_tuple(32, 32, &convolve_fn), \ + make_tuple(64, 32, &convolve_fn), make_tuple(32, 64, &convolve_fn), \ + make_tuple(64, 64, &convolve_fn) + // Reference 8-tap subpixel filter, slightly modified to fit into this test. #define VP9_FILTER_WEIGHT 128 #define VP9_FILTER_SHIFT 7 -uint8_t clip_pixel(int x) { - return x < 0 ? 0 : - x > 255 ? 255 : - x; -} +uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; } -void filter_block2d_8_c(const uint8_t *src_ptr, - const unsigned int src_stride, - const int16_t *HFilter, - const int16_t *VFilter, - uint8_t *dst_ptr, - unsigned int dst_stride, - unsigned int output_width, - unsigned int output_height) { +void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride, + const int16_t *HFilter, const int16_t *VFilter, + uint8_t *dst_ptr, unsigned int dst_stride, + unsigned int output_width, unsigned int output_height) { // Between passes, we use an intermediate buffer whose height is extended to // have enough horizontally filtered values as input for the vertical pass. // This buffer is allocated to be big enough for the largest block type we @@ -103,7 +102,8 @@ void filter_block2d_8_c(const uint8_t *src_ptr, // and filter_max_width = 16 // uint8_t intermediate_buffer[71 * kMaxDimension]; - const int intermediate_next_stride = 1 - intermediate_height * output_width; + const int intermediate_next_stride = + 1 - static_cast(intermediate_height * output_width); // Horizontal pass (src -> transposed intermediate). uint8_t *output_ptr = intermediate_buffer; @@ -112,15 +112,11 @@ void filter_block2d_8_c(const uint8_t *src_ptr, for (i = 0; i < intermediate_height; ++i) { for (j = 0; j < output_width; ++j) { // Apply filter... - const int temp = (src_ptr[0] * HFilter[0]) + - (src_ptr[1] * HFilter[1]) + - (src_ptr[2] * HFilter[2]) + - (src_ptr[3] * HFilter[3]) + - (src_ptr[4] * HFilter[4]) + - (src_ptr[5] * HFilter[5]) + - (src_ptr[6] * HFilter[6]) + - (src_ptr[7] * HFilter[7]) + - (VP9_FILTER_WEIGHT >> 1); // Rounding + const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) + + (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) + + (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) + + (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) + + (VP9_FILTER_WEIGHT >> 1); // Rounding // Normalize back to 0-255... *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT); @@ -137,15 +133,11 @@ void filter_block2d_8_c(const uint8_t *src_ptr, for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { // Apply filter... - const int temp = (src_ptr[0] * VFilter[0]) + - (src_ptr[1] * VFilter[1]) + - (src_ptr[2] * VFilter[2]) + - (src_ptr[3] * VFilter[3]) + - (src_ptr[4] * VFilter[4]) + - (src_ptr[5] * VFilter[5]) + - (src_ptr[6] * VFilter[6]) + - (src_ptr[7] * VFilter[7]) + - (VP9_FILTER_WEIGHT >> 1); // Rounding + const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) + + (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) + + (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) + + (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) + + (VP9_FILTER_WEIGHT >> 1); // Rounding // Normalize back to 0-255... *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT); @@ -156,12 +148,9 @@ void filter_block2d_8_c(const uint8_t *src_ptr, } } -void block2d_average_c(uint8_t *src, - unsigned int src_stride, - uint8_t *output_ptr, - unsigned int output_stride, - unsigned int output_width, - unsigned int output_height) { +void block2d_average_c(uint8_t *src, unsigned int src_stride, + uint8_t *output_ptr, unsigned int output_stride, + unsigned int output_width, unsigned int output_height) { unsigned int i, j; for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { @@ -173,10 +162,8 @@ void block2d_average_c(uint8_t *src, void filter_average_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride, - const int16_t *HFilter, - const int16_t *VFilter, - uint8_t *dst_ptr, - unsigned int dst_stride, + const int16_t *HFilter, const int16_t *VFilter, + uint8_t *dst_ptr, unsigned int dst_stride, unsigned int output_width, unsigned int output_height) { uint8_t tmp[kMaxDimension * kMaxDimension]; @@ -185,20 +172,16 @@ void filter_average_block2d_8_c(const uint8_t *src_ptr, assert(output_height <= kMaxDimension); filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64, output_width, output_height); - block2d_average_c(tmp, 64, dst_ptr, dst_stride, - output_width, output_height); + block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, output_height); } #if CONFIG_VP9_HIGHBITDEPTH void highbd_filter_block2d_8_c(const uint16_t *src_ptr, const unsigned int src_stride, - const int16_t *HFilter, - const int16_t *VFilter, - uint16_t *dst_ptr, - unsigned int dst_stride, + const int16_t *HFilter, const int16_t *VFilter, + uint16_t *dst_ptr, unsigned int dst_stride, unsigned int output_width, - unsigned int output_height, - int bd) { + unsigned int output_height, int bd) { // Between passes, we use an intermediate buffer whose height is extended to // have enough horizontally filtered values as input for the vertical pass. // This buffer is allocated to be big enough for the largest block type we @@ -215,7 +198,8 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr, * and filter_max_width = 16 */ uint16_t intermediate_buffer[71 * kMaxDimension]; - const int intermediate_next_stride = 1 - intermediate_height * output_width; + const int intermediate_next_stride = + 1 - static_cast(intermediate_height * output_width); // Horizontal pass (src -> transposed intermediate). { @@ -226,14 +210,10 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr, for (i = 0; i < intermediate_height; ++i) { for (j = 0; j < output_width; ++j) { // Apply filter... - const int temp = (src_ptr[0] * HFilter[0]) + - (src_ptr[1] * HFilter[1]) + - (src_ptr[2] * HFilter[2]) + - (src_ptr[3] * HFilter[3]) + - (src_ptr[4] * HFilter[4]) + - (src_ptr[5] * HFilter[5]) + - (src_ptr[6] * HFilter[6]) + - (src_ptr[7] * HFilter[7]) + + const int temp = (src_ptr[0] * HFilter[0]) + (src_ptr[1] * HFilter[1]) + + (src_ptr[2] * HFilter[2]) + (src_ptr[3] * HFilter[3]) + + (src_ptr[4] * HFilter[4]) + (src_ptr[5] * HFilter[5]) + + (src_ptr[6] * HFilter[6]) + (src_ptr[7] * HFilter[7]) + (VP9_FILTER_WEIGHT >> 1); // Rounding // Normalize back to 0-255... @@ -254,14 +234,10 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr, for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { // Apply filter... - const int temp = (src_ptr[0] * VFilter[0]) + - (src_ptr[1] * VFilter[1]) + - (src_ptr[2] * VFilter[2]) + - (src_ptr[3] * VFilter[3]) + - (src_ptr[4] * VFilter[4]) + - (src_ptr[5] * VFilter[5]) + - (src_ptr[6] * VFilter[6]) + - (src_ptr[7] * VFilter[7]) + + const int temp = (src_ptr[0] * VFilter[0]) + (src_ptr[1] * VFilter[1]) + + (src_ptr[2] * VFilter[2]) + (src_ptr[3] * VFilter[3]) + + (src_ptr[4] * VFilter[4]) + (src_ptr[5] * VFilter[5]) + + (src_ptr[6] * VFilter[6]) + (src_ptr[7] * VFilter[7]) + (VP9_FILTER_WEIGHT >> 1); // Rounding // Normalize back to 0-255... @@ -274,10 +250,8 @@ void highbd_filter_block2d_8_c(const uint16_t *src_ptr, } } -void highbd_block2d_average_c(uint16_t *src, - unsigned int src_stride, - uint16_t *output_ptr, - unsigned int output_stride, +void highbd_block2d_average_c(uint16_t *src, unsigned int src_stride, + uint16_t *output_ptr, unsigned int output_stride, unsigned int output_width, unsigned int output_height) { unsigned int i, j; @@ -289,23 +263,19 @@ void highbd_block2d_average_c(uint16_t *src, } } -void highbd_filter_average_block2d_8_c(const uint16_t *src_ptr, - const unsigned int src_stride, - const int16_t *HFilter, - const int16_t *VFilter, - uint16_t *dst_ptr, - unsigned int dst_stride, - unsigned int output_width, - unsigned int output_height, - int bd) { +void highbd_filter_average_block2d_8_c( + const uint16_t *src_ptr, const unsigned int src_stride, + const int16_t *HFilter, const int16_t *VFilter, uint16_t *dst_ptr, + unsigned int dst_stride, unsigned int output_width, + unsigned int output_height, int bd) { uint16_t tmp[kMaxDimension * kMaxDimension]; assert(output_width <= kMaxDimension); assert(output_height <= kMaxDimension); highbd_filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, tmp, 64, output_width, output_height, bd); - highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, - output_width, output_height); + highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, + output_height); } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -313,19 +283,20 @@ class ConvolveTest : public ::testing::TestWithParam { public: static void SetUpTestCase() { // Force input_ to be unaligned, output to be 16 byte aligned. - input_ = reinterpret_cast( - vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + 1; - output_ = reinterpret_cast( + input_ = reinterpret_cast( + vpx_memalign(kDataAlignment, kInputBufferSize + 1)) + + 1; + output_ = reinterpret_cast( vpx_memalign(kDataAlignment, kOutputBufferSize)); - output_ref_ = reinterpret_cast( + output_ref_ = reinterpret_cast( vpx_memalign(kDataAlignment, kOutputBufferSize)); #if CONFIG_VP9_HIGHBITDEPTH - input16_ = reinterpret_cast( - vpx_memalign(kDataAlignment, - (kInputBufferSize + 1) * sizeof(uint16_t))) + 1; - output16_ = reinterpret_cast( + input16_ = reinterpret_cast(vpx_memalign( + kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) + + 1; + output16_ = reinterpret_cast( vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t))); - output16_ref_ = reinterpret_cast( + output16_ref_ = reinterpret_cast( vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t))); #endif } @@ -375,17 +346,19 @@ class ConvolveTest : public ::testing::TestWithParam { virtual void SetUp() { UUT_ = GET_PARAM(2); #if CONFIG_VP9_HIGHBITDEPTH - if (UUT_->use_highbd_ != 0) + if (UUT_->use_highbd_ != 0) { mask_ = (1 << UUT_->use_highbd_) - 1; - else + } else { mask_ = 255; + } #endif /* Set up guard blocks for an inner block centered in the outer block */ for (int i = 0; i < kOutputBufferSize; ++i) { - if (IsIndexInBorder(i)) + if (IsIndexInBorder(i)) { output_[i] = 255; - else + } else { output_[i] = 0; + } } ::libvpx_test::ACMRandom prng; @@ -414,53 +387,53 @@ class ConvolveTest : public ::testing::TestWithParam { void CopyOutputToRef() { memcpy(output_ref_, output_, kOutputBufferSize); #if CONFIG_VP9_HIGHBITDEPTH - memcpy(output16_ref_, output16_, kOutputBufferSize); + memcpy(output16_ref_, output16_, + kOutputBufferSize * sizeof(output16_ref_[0])); #endif } void CheckGuardBlocks() { for (int i = 0; i < kOutputBufferSize; ++i) { - if (IsIndexInBorder(i)) - EXPECT_EQ(255, output_[i]); + if (IsIndexInBorder(i)) EXPECT_EQ(255, output_[i]); } } uint8_t *input() const { + const int offset = BorderTop() * kOuterBlockSize + BorderLeft(); #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - return input_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return input_ + offset; } else { - return CONVERT_TO_BYTEPTR(input16_ + BorderTop() * kOuterBlockSize + - BorderLeft()); + return CONVERT_TO_BYTEPTR(input16_) + offset; } #else - return input_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return input_ + offset; #endif } uint8_t *output() const { + const int offset = BorderTop() * kOuterBlockSize + BorderLeft(); #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - return output_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return output_ + offset; } else { - return CONVERT_TO_BYTEPTR(output16_ + BorderTop() * kOuterBlockSize + - BorderLeft()); + return CONVERT_TO_BYTEPTR(output16_) + offset; } #else - return output_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return output_ + offset; #endif } uint8_t *output_ref() const { + const int offset = BorderTop() * kOuterBlockSize + BorderLeft(); #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return output_ref_ + offset; } else { - return CONVERT_TO_BYTEPTR(output16_ref_ + BorderTop() * kOuterBlockSize + - BorderLeft()); + return CONVERT_TO_BYTEPTR(output16_ref_) + offset; } #else - return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft(); + return output_ref_ + offset; #endif } @@ -479,180 +452,178 @@ class ConvolveTest : public ::testing::TestWithParam { void assign_val(uint8_t *list, int index, uint16_t val) const { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - list[index] = (uint8_t) val; + list[index] = (uint8_t)val; } else { CONVERT_TO_SHORTPTR(list)[index] = val; } #else - list[index] = (uint8_t) val; + list[index] = (uint8_t)val; #endif } - void wrapper_filter_average_block2d_8_c(const uint8_t *src_ptr, - const unsigned int src_stride, - const int16_t *HFilter, - const int16_t *VFilter, - uint8_t *dst_ptr, - unsigned int dst_stride, - unsigned int output_width, - unsigned int output_height) { + void wrapper_filter_average_block2d_8_c( + const uint8_t *src_ptr, const unsigned int src_stride, + const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr, + unsigned int dst_stride, unsigned int output_width, + unsigned int output_height) { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, - dst_ptr, dst_stride, output_width, - output_height); + filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, + dst_stride, output_width, output_height); } else { - highbd_filter_average_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), - src_stride, HFilter, VFilter, - CONVERT_TO_SHORTPTR(dst_ptr), - dst_stride, output_width, output_height, - UUT_->use_highbd_); + highbd_filter_average_block2d_8_c( + CONVERT_TO_SHORTPTR(src_ptr), src_stride, HFilter, VFilter, + CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, output_width, output_height, + UUT_->use_highbd_); } #else - filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, - dst_ptr, dst_stride, output_width, - output_height); + filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, + dst_stride, output_width, output_height); #endif } void wrapper_filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride, const int16_t *HFilter, - const int16_t *VFilter, - uint8_t *dst_ptr, + const int16_t *VFilter, uint8_t *dst_ptr, unsigned int dst_stride, unsigned int output_width, unsigned int output_height) { #if CONFIG_VP9_HIGHBITDEPTH if (UUT_->use_highbd_ == 0) { - filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, - dst_ptr, dst_stride, output_width, output_height); + filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, + dst_stride, output_width, output_height); } else { highbd_filter_block2d_8_c(CONVERT_TO_SHORTPTR(src_ptr), src_stride, - HFilter, VFilter, - CONVERT_TO_SHORTPTR(dst_ptr), dst_stride, - output_width, output_height, UUT_->use_highbd_); + HFilter, VFilter, CONVERT_TO_SHORTPTR(dst_ptr), + dst_stride, output_width, output_height, + UUT_->use_highbd_); } #else - filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, - dst_ptr, dst_stride, output_width, output_height); + filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr, + dst_stride, output_width, output_height); #endif } - const ConvolveFunctions* UUT_; - static uint8_t* input_; - static uint8_t* output_; - static uint8_t* output_ref_; + const ConvolveFunctions *UUT_; + static uint8_t *input_; + static uint8_t *output_; + static uint8_t *output_ref_; #if CONFIG_VP9_HIGHBITDEPTH - static uint16_t* input16_; - static uint16_t* output16_; - static uint16_t* output16_ref_; + static uint16_t *input16_; + static uint16_t *output16_; + static uint16_t *output16_ref_; int mask_; #endif }; -uint8_t* ConvolveTest::input_ = NULL; -uint8_t* ConvolveTest::output_ = NULL; -uint8_t* ConvolveTest::output_ref_ = NULL; +uint8_t *ConvolveTest::input_ = NULL; +uint8_t *ConvolveTest::output_ = NULL; +uint8_t *ConvolveTest::output_ref_ = NULL; #if CONFIG_VP9_HIGHBITDEPTH -uint16_t* ConvolveTest::input16_ = NULL; -uint16_t* ConvolveTest::output16_ = NULL; -uint16_t* ConvolveTest::output16_ref_ = NULL; +uint16_t *ConvolveTest::input16_ = NULL; +uint16_t *ConvolveTest::output16_ = NULL; +uint16_t *ConvolveTest::output16_ref_ = NULL; #endif -TEST_P(ConvolveTest, GuardBlocks) { - CheckGuardBlocks(); -} +TEST_P(ConvolveTest, GuardBlocks) { CheckGuardBlocks(); } TEST_P(ConvolveTest, Copy) { - uint8_t* const in = input(); - uint8_t* const out = output(); + uint8_t *const in = input(); + uint8_t *const out = output(); - ASM_REGISTER_STATE_CHECK( - UUT_->copy_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->copy_(in, kInputStride, out, kOutputStride, + NULL, 0, NULL, 0, Width(), Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(out, y * kOutputStride + x), lookup(in, y * kInputStride + x)) << "(" << x << "," << y << ")"; + } } TEST_P(ConvolveTest, Avg) { - uint8_t* const in = input(); - uint8_t* const out = output(); - uint8_t* const out_ref = output_ref(); + uint8_t *const in = input(); + uint8_t *const out = output(); + uint8_t *const out_ref = output_ref(); CopyOutputToRef(); - ASM_REGISTER_STATE_CHECK( - UUT_->avg_(in, kInputStride, out, kOutputStride, NULL, 0, NULL, 0, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->avg_(in, kInputStride, out, kOutputStride, + NULL, 0, NULL, 0, Width(), Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(out, y * kOutputStride + x), ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) + - lookup(out_ref, y * kOutputStride + x), 1)) + lookup(out_ref, y * kOutputStride + x), + 1)) << "(" << x << "," << y << ")"; + } } TEST_P(ConvolveTest, CopyHoriz) { - uint8_t* const in = input(); - uint8_t* const out = output(); - DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0}; + uint8_t *const in = input(); + uint8_t *const out = output(); + DECLARE_ALIGNED(256, const int16_t, + filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 }; - ASM_REGISTER_STATE_CHECK( - UUT_->sh8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->sh8_(in, kInputStride, out, kOutputStride, + filter8, 16, filter8, 16, Width(), + Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(out, y * kOutputStride + x), lookup(in, y * kInputStride + x)) << "(" << x << "," << y << ")"; + } } TEST_P(ConvolveTest, CopyVert) { - uint8_t* const in = input(); - uint8_t* const out = output(); - DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0}; + uint8_t *const in = input(); + uint8_t *const out = output(); + DECLARE_ALIGNED(256, const int16_t, + filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 }; - ASM_REGISTER_STATE_CHECK( - UUT_->sv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->sv8_(in, kInputStride, out, kOutputStride, + filter8, 16, filter8, 16, Width(), + Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(out, y * kOutputStride + x), lookup(in, y * kInputStride + x)) << "(" << x << "," << y << ")"; + } } TEST_P(ConvolveTest, Copy2D) { - uint8_t* const in = input(); - uint8_t* const out = output(); - DECLARE_ALIGNED(256, const int16_t, filter8[8]) = {0, 0, 0, 128, 0, 0, 0, 0}; + uint8_t *const in = input(); + uint8_t *const out = output(); + DECLARE_ALIGNED(256, const int16_t, + filter8[8]) = { 0, 0, 0, 128, 0, 0, 0, 0 }; - ASM_REGISTER_STATE_CHECK( - UUT_->shv8_(in, kInputStride, out, kOutputStride, filter8, 16, filter8, - 16, Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride, + filter8, 16, filter8, 16, Width(), + Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(out, y * kOutputStride + x), lookup(in, y * kInputStride + x)) << "(" << x << "," << y << ")"; + } } const int kNumFilterBanks = 4; @@ -682,12 +653,12 @@ TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) { const int16_t kInvalidFilter[8] = { 0 }; TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) { - uint8_t* const in = input(); - uint8_t* const out = output(); + uint8_t *const in = input(); + uint8_t *const out = output(); #if CONFIG_VP9_HIGHBITDEPTH uint8_t ref8[kOutputStride * kMaxDimension]; uint16_t ref16[kOutputStride * kMaxDimension]; - uint8_t* ref; + uint8_t *ref; if (UUT_->use_highbd_ == 0) { ref = ref8; } else { @@ -703,53 +674,49 @@ TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) { for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) { for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) { - wrapper_filter_block2d_8_c(in, kInputStride, - filters[filter_x], filters[filter_y], - ref, kOutputStride, + wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x], + filters[filter_y], ref, kOutputStride, Width(), Height()); if (filter_x && filter_y) - ASM_REGISTER_STATE_CHECK( - UUT_->hv8_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, filters[filter_y], 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->hv8_( + in, kInputStride, out, kOutputStride, filters[filter_x], 16, + filters[filter_y], 16, Width(), Height())); else if (filter_y) ASM_REGISTER_STATE_CHECK( - UUT_->v8_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 16, filters[filter_y], 16, - Width(), Height())); + UUT_->v8_(in, kInputStride, out, kOutputStride, kInvalidFilter, + 16, filters[filter_y], 16, Width(), Height())); else if (filter_x) ASM_REGISTER_STATE_CHECK( - UUT_->h8_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, kInvalidFilter, 16, - Width(), Height())); + UUT_->h8_(in, kInputStride, out, kOutputStride, filters[filter_x], + 16, kInvalidFilter, 16, Width(), Height())); else ASM_REGISTER_STATE_CHECK( - UUT_->copy_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 0, kInvalidFilter, 0, - Width(), Height())); + UUT_->copy_(in, kInputStride, out, kOutputStride, kInvalidFilter, + 0, kInvalidFilter, 0, Width(), Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(ref, y * kOutputStride + x), lookup(out, y * kOutputStride + x)) << "mismatch at (" << x << "," << y << "), " - << "filters (" << filter_bank << "," - << filter_x << "," << filter_y << ")"; + << "filters (" << filter_bank << "," << filter_x << "," + << filter_y << ")"; + } } } } } TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) { - uint8_t* const in = input(); - uint8_t* const out = output(); + uint8_t *const in = input(); + uint8_t *const out = output(); #if CONFIG_VP9_HIGHBITDEPTH uint8_t ref8[kOutputStride * kMaxDimension]; uint16_t ref16[kOutputStride * kMaxDimension]; - uint8_t* ref; + uint8_t *ref; if (UUT_->use_highbd_ == 0) { ref = ref8; } else { @@ -785,41 +752,37 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) { for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) { for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) { - wrapper_filter_average_block2d_8_c(in, kInputStride, - filters[filter_x], filters[filter_y], - ref, kOutputStride, - Width(), Height()); + wrapper_filter_average_block2d_8_c(in, kInputStride, filters[filter_x], + filters[filter_y], ref, + kOutputStride, Width(), Height()); if (filter_x && filter_y) - ASM_REGISTER_STATE_CHECK( - UUT_->hv8_avg_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, filters[filter_y], 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->hv8_avg_( + in, kInputStride, out, kOutputStride, filters[filter_x], 16, + filters[filter_y], 16, Width(), Height())); else if (filter_y) - ASM_REGISTER_STATE_CHECK( - UUT_->v8_avg_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 16, filters[filter_y], 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->v8_avg_( + in, kInputStride, out, kOutputStride, kInvalidFilter, 16, + filters[filter_y], 16, Width(), Height())); else if (filter_x) - ASM_REGISTER_STATE_CHECK( - UUT_->h8_avg_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, kInvalidFilter, 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->h8_avg_( + in, kInputStride, out, kOutputStride, filters[filter_x], 16, + kInvalidFilter, 16, Width(), Height())); else ASM_REGISTER_STATE_CHECK( - UUT_->avg_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 0, kInvalidFilter, 0, - Width(), Height())); + UUT_->avg_(in, kInputStride, out, kOutputStride, kInvalidFilter, + 0, kInvalidFilter, 0, Width(), Height())); CheckGuardBlocks(); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(ref, y * kOutputStride + x), lookup(out, y * kOutputStride + x)) << "mismatch at (" << x << "," << y << "), " - << "filters (" << filter_bank << "," - << filter_x << "," << filter_y << ")"; + << "filters (" << filter_bank << "," << filter_x << "," + << filter_y << ")"; + } } } } @@ -866,18 +829,19 @@ TEST_P(ConvolveTest, FilterExtremes) { for (int y = 0; y < 8; ++y) { for (int x = 0; x < 8; ++x) { #if CONFIG_VP9_HIGHBITDEPTH - assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1, - ((seed_val >> (axis ? y : x)) & 1) * mask_); + assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1, + ((seed_val >> (axis ? y : x)) & 1) * mask_); #else - assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1, - ((seed_val >> (axis ? y : x)) & 1) * 255); + assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1, + ((seed_val >> (axis ? y : x)) & 1) * 255); #endif if (axis) seed_val++; } - if (axis) - seed_val-= 8; - else + if (axis) { + seed_val -= 8; + } else { seed_val++; + } } if (axis) seed_val += 8; @@ -886,38 +850,34 @@ TEST_P(ConvolveTest, FilterExtremes) { vp9_filter_kernels[static_cast(filter_bank)]; for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) { for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) { - wrapper_filter_block2d_8_c(in, kInputStride, - filters[filter_x], filters[filter_y], - ref, kOutputStride, + wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x], + filters[filter_y], ref, kOutputStride, Width(), Height()); if (filter_x && filter_y) - ASM_REGISTER_STATE_CHECK( - UUT_->hv8_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, filters[filter_y], 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->hv8_( + in, kInputStride, out, kOutputStride, filters[filter_x], 16, + filters[filter_y], 16, Width(), Height())); else if (filter_y) - ASM_REGISTER_STATE_CHECK( - UUT_->v8_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 16, filters[filter_y], 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->v8_( + in, kInputStride, out, kOutputStride, kInvalidFilter, 16, + filters[filter_y], 16, Width(), Height())); else if (filter_x) - ASM_REGISTER_STATE_CHECK( - UUT_->h8_(in, kInputStride, out, kOutputStride, - filters[filter_x], 16, kInvalidFilter, 16, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->h8_( + in, kInputStride, out, kOutputStride, filters[filter_x], 16, + kInvalidFilter, 16, Width(), Height())); else - ASM_REGISTER_STATE_CHECK( - UUT_->copy_(in, kInputStride, out, kOutputStride, - kInvalidFilter, 0, kInvalidFilter, 0, - Width(), Height())); + ASM_REGISTER_STATE_CHECK(UUT_->copy_( + in, kInputStride, out, kOutputStride, kInvalidFilter, 0, + kInvalidFilter, 0, Width(), Height())); - for (int y = 0; y < Height(); ++y) + for (int y = 0; y < Height(); ++y) { for (int x = 0; x < Width(); ++x) ASSERT_EQ(lookup(ref, y * kOutputStride + x), lookup(out, y * kOutputStride + x)) << "mismatch at (" << x << "," << y << "), " - << "filters (" << filter_bank << "," - << filter_x << "," << filter_y << ")"; + << "filters (" << filter_bank << "," << filter_x << "," + << filter_y << ")"; + } } } } @@ -928,8 +888,8 @@ TEST_P(ConvolveTest, FilterExtremes) { /* This test exercises that enough rows and columns are filtered with every possible initial fractional positions and scaling steps. */ TEST_P(ConvolveTest, CheckScalingFiltering) { - uint8_t* const in = input(); - uint8_t* const out = output(); + uint8_t *const in = input(); + uint8_t *const out = output(); const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP]; SetConstantInput(127); @@ -938,9 +898,8 @@ TEST_P(ConvolveTest, CheckScalingFiltering) { for (int step = 1; step <= 32; ++step) { /* Test the horizontal and vertical filters in combination. */ ASM_REGISTER_STATE_CHECK(UUT_->shv8_(in, kInputStride, out, kOutputStride, - eighttap[frac], step, - eighttap[frac], step, - Width(), Height())); + eighttap[frac], step, eighttap[frac], + step, Width(), Height())); CheckGuardBlocks(); @@ -948,8 +907,8 @@ TEST_P(ConvolveTest, CheckScalingFiltering) { for (int x = 0; x < Width(); ++x) { ASSERT_EQ(lookup(in, y * kInputStride + x), lookup(out, y * kOutputStride + x)) - << "x == " << x << ", y == " << y - << ", frac == " << frac << ", step == " << step; + << "x == " << x << ", y == " << y << ", frac == " << frac + << ", step == " << step; } } } @@ -959,27 +918,21 @@ TEST_P(ConvolveTest, CheckScalingFiltering) { using std::tr1::make_tuple; #if CONFIG_VP9_HIGHBITDEPTH -#define WRAP(func, bd) \ -void wrap_ ## func ## _ ## bd(const uint8_t *src, ptrdiff_t src_stride, \ - uint8_t *dst, ptrdiff_t dst_stride, \ - const int16_t *filter_x, \ - int filter_x_stride, \ - const int16_t *filter_y, \ - int filter_y_stride, \ - int w, int h) { \ - vpx_highbd_ ## func(src, src_stride, dst, dst_stride, filter_x, \ - filter_x_stride, filter_y, filter_y_stride, \ - w, h, bd); \ -} +#define WRAP(func, bd) \ + void wrap_##func##_##bd( \ + const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ + ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, \ + const int16_t *filter_y, int filter_y_stride, int w, int h) { \ + vpx_highbd_##func(src, src_stride, dst, dst_stride, filter_x, \ + filter_x_stride, filter_y, filter_y_stride, w, h, bd); \ + } #if HAVE_SSE2 && ARCH_X86_64 -#if CONFIG_USE_X86INC WRAP(convolve_copy_sse2, 8) WRAP(convolve_avg_sse2, 8) WRAP(convolve_copy_sse2, 10) WRAP(convolve_avg_sse2, 10) WRAP(convolve_copy_sse2, 12) WRAP(convolve_avg_sse2, 12) -#endif // CONFIG_USE_X86INC WRAP(convolve8_horiz_sse2, 8) WRAP(convolve8_avg_horiz_sse2, 8) WRAP(convolve8_vert_sse2, 8) @@ -1027,107 +980,45 @@ WRAP(convolve8_avg_c, 12) #undef WRAP const ConvolveFunctions convolve8_c( - wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, + wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, wrap_convolve8_horiz_c_8, + wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8, + wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8, wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8, - wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, - wrap_convolve8_c_8, wrap_convolve8_avg_c_8, - wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8, - wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, - wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8); -INSTANTIATE_TEST_CASE_P(C_8, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_c), - make_tuple(8, 4, &convolve8_c), - make_tuple(4, 8, &convolve8_c), - make_tuple(8, 8, &convolve8_c), - make_tuple(16, 8, &convolve8_c), - make_tuple(8, 16, &convolve8_c), - make_tuple(16, 16, &convolve8_c), - make_tuple(32, 16, &convolve8_c), - make_tuple(16, 32, &convolve8_c), - make_tuple(32, 32, &convolve8_c), - make_tuple(64, 32, &convolve8_c), - make_tuple(32, 64, &convolve8_c), - make_tuple(64, 64, &convolve8_c))); + wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, + wrap_convolve8_avg_c_8, 8); const ConvolveFunctions convolve10_c( - wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, + wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, wrap_convolve8_horiz_c_10, + wrap_convolve8_avg_horiz_c_10, wrap_convolve8_vert_c_10, + wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, wrap_convolve8_avg_c_10, wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10, - wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, - wrap_convolve8_c_10, wrap_convolve8_avg_c_10, - wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10, - wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, - wrap_convolve8_c_10, wrap_convolve8_avg_c_10, 10); -INSTANTIATE_TEST_CASE_P(C_10, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve10_c), - make_tuple(8, 4, &convolve10_c), - make_tuple(4, 8, &convolve10_c), - make_tuple(8, 8, &convolve10_c), - make_tuple(16, 8, &convolve10_c), - make_tuple(8, 16, &convolve10_c), - make_tuple(16, 16, &convolve10_c), - make_tuple(32, 16, &convolve10_c), - make_tuple(16, 32, &convolve10_c), - make_tuple(32, 32, &convolve10_c), - make_tuple(64, 32, &convolve10_c), - make_tuple(32, 64, &convolve10_c), - make_tuple(64, 64, &convolve10_c))); + wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, + wrap_convolve8_avg_c_10, 10); const ConvolveFunctions convolve12_c( - wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, + wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, wrap_convolve8_horiz_c_12, + wrap_convolve8_avg_horiz_c_12, wrap_convolve8_vert_c_12, + wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, wrap_convolve8_avg_c_12, wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12, - wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, - wrap_convolve8_c_12, wrap_convolve8_avg_c_12, - wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12, - wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, - wrap_convolve8_c_12, wrap_convolve8_avg_c_12, 12); -INSTANTIATE_TEST_CASE_P(C_12, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve12_c), - make_tuple(8, 4, &convolve12_c), - make_tuple(4, 8, &convolve12_c), - make_tuple(8, 8, &convolve12_c), - make_tuple(16, 8, &convolve12_c), - make_tuple(8, 16, &convolve12_c), - make_tuple(16, 16, &convolve12_c), - make_tuple(32, 16, &convolve12_c), - make_tuple(16, 32, &convolve12_c), - make_tuple(32, 32, &convolve12_c), - make_tuple(64, 32, &convolve12_c), - make_tuple(32, 64, &convolve12_c), - make_tuple(64, 64, &convolve12_c))); + wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, + wrap_convolve8_avg_c_12, 12); +const ConvolveParam kArrayConvolve_c[] = { + ALL_SIZES(convolve8_c), ALL_SIZES(convolve10_c), ALL_SIZES(convolve12_c) +}; #else - const ConvolveFunctions convolve8_c( - vpx_convolve_copy_c, vpx_convolve_avg_c, - vpx_convolve8_horiz_c, vpx_convolve8_avg_horiz_c, - vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c, - vpx_convolve8_c, vpx_convolve8_avg_c, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, + vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_c, + vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c, + vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c, + vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); - -INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_c), - make_tuple(8, 4, &convolve8_c), - make_tuple(4, 8, &convolve8_c), - make_tuple(8, 8, &convolve8_c), - make_tuple(16, 8, &convolve8_c), - make_tuple(8, 16, &convolve8_c), - make_tuple(16, 16, &convolve8_c), - make_tuple(32, 16, &convolve8_c), - make_tuple(16, 32, &convolve8_c), - make_tuple(32, 32, &convolve8_c), - make_tuple(64, 32, &convolve8_c), - make_tuple(32, 64, &convolve8_c), - make_tuple(64, 64, &convolve8_c))); +const ConvolveParam kArrayConvolve_c[] = { ALL_SIZES(convolve8_c) }; #endif +INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::ValuesIn(kArrayConvolve_c)); #if HAVE_SSE2 && ARCH_X86_64 #if CONFIG_VP9_HIGHBITDEPTH const ConvolveFunctions convolve8_sse2( -#if CONFIG_USE_X86INC wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8, -#else - wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, -#endif // CONFIG_USE_X86INC wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8, wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8, wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, @@ -1135,11 +1026,7 @@ const ConvolveFunctions convolve8_sse2( wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8, wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8); const ConvolveFunctions convolve10_sse2( -#if CONFIG_USE_X86INC wrap_convolve_copy_sse2_10, wrap_convolve_avg_sse2_10, -#else - wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, -#endif // CONFIG_USE_X86INC wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10, wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10, wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, @@ -1147,226 +1034,101 @@ const ConvolveFunctions convolve10_sse2( wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10, wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10); const ConvolveFunctions convolve12_sse2( -#if CONFIG_USE_X86INC wrap_convolve_copy_sse2_12, wrap_convolve_avg_sse2_12, -#else - wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, -#endif // CONFIG_USE_X86INC wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12, wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12, wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12, wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12, wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12); -INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_sse2), - make_tuple(8, 4, &convolve8_sse2), - make_tuple(4, 8, &convolve8_sse2), - make_tuple(8, 8, &convolve8_sse2), - make_tuple(16, 8, &convolve8_sse2), - make_tuple(8, 16, &convolve8_sse2), - make_tuple(16, 16, &convolve8_sse2), - make_tuple(32, 16, &convolve8_sse2), - make_tuple(16, 32, &convolve8_sse2), - make_tuple(32, 32, &convolve8_sse2), - make_tuple(64, 32, &convolve8_sse2), - make_tuple(32, 64, &convolve8_sse2), - make_tuple(64, 64, &convolve8_sse2), - make_tuple(4, 4, &convolve10_sse2), - make_tuple(8, 4, &convolve10_sse2), - make_tuple(4, 8, &convolve10_sse2), - make_tuple(8, 8, &convolve10_sse2), - make_tuple(16, 8, &convolve10_sse2), - make_tuple(8, 16, &convolve10_sse2), - make_tuple(16, 16, &convolve10_sse2), - make_tuple(32, 16, &convolve10_sse2), - make_tuple(16, 32, &convolve10_sse2), - make_tuple(32, 32, &convolve10_sse2), - make_tuple(64, 32, &convolve10_sse2), - make_tuple(32, 64, &convolve10_sse2), - make_tuple(64, 64, &convolve10_sse2), - make_tuple(4, 4, &convolve12_sse2), - make_tuple(8, 4, &convolve12_sse2), - make_tuple(4, 8, &convolve12_sse2), - make_tuple(8, 8, &convolve12_sse2), - make_tuple(16, 8, &convolve12_sse2), - make_tuple(8, 16, &convolve12_sse2), - make_tuple(16, 16, &convolve12_sse2), - make_tuple(32, 16, &convolve12_sse2), - make_tuple(16, 32, &convolve12_sse2), - make_tuple(32, 32, &convolve12_sse2), - make_tuple(64, 32, &convolve12_sse2), - make_tuple(32, 64, &convolve12_sse2), - make_tuple(64, 64, &convolve12_sse2))); +const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2), + ALL_SIZES(convolve10_sse2), + ALL_SIZES(convolve12_sse2) }; #else const ConvolveFunctions convolve8_sse2( -#if CONFIG_USE_X86INC - vpx_convolve_copy_sse2, vpx_convolve_avg_sse2, -#else - vpx_convolve_copy_c, vpx_convolve_avg_c, -#endif // CONFIG_USE_X86INC - vpx_convolve8_horiz_sse2, vpx_convolve8_avg_horiz_sse2, - vpx_convolve8_vert_sse2, vpx_convolve8_avg_vert_sse2, - vpx_convolve8_sse2, vpx_convolve8_avg_sse2, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_sse2, vpx_convolve_avg_sse2, vpx_convolve8_horiz_sse2, + vpx_convolve8_avg_horiz_sse2, vpx_convolve8_vert_sse2, + vpx_convolve8_avg_vert_sse2, vpx_convolve8_sse2, vpx_convolve8_avg_sse2, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); -INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_sse2), - make_tuple(8, 4, &convolve8_sse2), - make_tuple(4, 8, &convolve8_sse2), - make_tuple(8, 8, &convolve8_sse2), - make_tuple(16, 8, &convolve8_sse2), - make_tuple(8, 16, &convolve8_sse2), - make_tuple(16, 16, &convolve8_sse2), - make_tuple(32, 16, &convolve8_sse2), - make_tuple(16, 32, &convolve8_sse2), - make_tuple(32, 32, &convolve8_sse2), - make_tuple(64, 32, &convolve8_sse2), - make_tuple(32, 64, &convolve8_sse2), - make_tuple(64, 64, &convolve8_sse2))); +const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2) }; #endif // CONFIG_VP9_HIGHBITDEPTH +INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve_sse2)); #endif #if HAVE_SSSE3 const ConvolveFunctions convolve8_ssse3( - vpx_convolve_copy_c, vpx_convolve_avg_c, - vpx_convolve8_horiz_ssse3, vpx_convolve8_avg_horiz_ssse3, - vpx_convolve8_vert_ssse3, vpx_convolve8_avg_vert_ssse3, - vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_ssse3, + vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_ssse3, + vpx_convolve8_avg_vert_ssse3, vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_ssse3, vpx_scaled_avg_2d_c, 0); -INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_ssse3), - make_tuple(8, 4, &convolve8_ssse3), - make_tuple(4, 8, &convolve8_ssse3), - make_tuple(8, 8, &convolve8_ssse3), - make_tuple(16, 8, &convolve8_ssse3), - make_tuple(8, 16, &convolve8_ssse3), - make_tuple(16, 16, &convolve8_ssse3), - make_tuple(32, 16, &convolve8_ssse3), - make_tuple(16, 32, &convolve8_ssse3), - make_tuple(32, 32, &convolve8_ssse3), - make_tuple(64, 32, &convolve8_ssse3), - make_tuple(32, 64, &convolve8_ssse3), - make_tuple(64, 64, &convolve8_ssse3))); +const ConvolveParam kArrayConvolve8_ssse3[] = { ALL_SIZES(convolve8_ssse3) }; +INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve8_ssse3)); #endif #if HAVE_AVX2 && HAVE_SSSE3 const ConvolveFunctions convolve8_avx2( - vpx_convolve_copy_c, vpx_convolve_avg_c, - vpx_convolve8_horiz_avx2, vpx_convolve8_avg_horiz_ssse3, - vpx_convolve8_vert_avx2, vpx_convolve8_avg_vert_ssse3, - vpx_convolve8_avx2, vpx_convolve8_avg_ssse3, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_avx2, + vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_avx2, + vpx_convolve8_avg_vert_ssse3, vpx_convolve8_avx2, vpx_convolve8_avg_ssse3, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); -INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_avx2), - make_tuple(8, 4, &convolve8_avx2), - make_tuple(4, 8, &convolve8_avx2), - make_tuple(8, 8, &convolve8_avx2), - make_tuple(8, 16, &convolve8_avx2), - make_tuple(16, 8, &convolve8_avx2), - make_tuple(16, 16, &convolve8_avx2), - make_tuple(32, 16, &convolve8_avx2), - make_tuple(16, 32, &convolve8_avx2), - make_tuple(32, 32, &convolve8_avx2), - make_tuple(64, 32, &convolve8_avx2), - make_tuple(32, 64, &convolve8_avx2), - make_tuple(64, 64, &convolve8_avx2))); +const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES(convolve8_avx2) }; +INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve8_avx2)); #endif // HAVE_AVX2 && HAVE_SSSE3 #if HAVE_NEON #if HAVE_NEON_ASM const ConvolveFunctions convolve8_neon( - vpx_convolve_copy_neon, vpx_convolve_avg_neon, - vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon, - vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon, - vpx_convolve8_neon, vpx_convolve8_avg_neon, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); -#else // HAVE_NEON + vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon, + vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon, + vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); +#else // HAVE_NEON const ConvolveFunctions convolve8_neon( - vpx_convolve_copy_neon, vpx_convolve_avg_neon, - vpx_convolve8_horiz_neon, vpx_convolve8_avg_horiz_neon, - vpx_convolve8_vert_neon, vpx_convolve8_avg_vert_neon, - vpx_convolve8_neon, vpx_convolve8_avg_neon, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon, + vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon, + vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); #endif // HAVE_NEON_ASM -INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_neon), - make_tuple(8, 4, &convolve8_neon), - make_tuple(4, 8, &convolve8_neon), - make_tuple(8, 8, &convolve8_neon), - make_tuple(16, 8, &convolve8_neon), - make_tuple(8, 16, &convolve8_neon), - make_tuple(16, 16, &convolve8_neon), - make_tuple(32, 16, &convolve8_neon), - make_tuple(16, 32, &convolve8_neon), - make_tuple(32, 32, &convolve8_neon), - make_tuple(64, 32, &convolve8_neon), - make_tuple(32, 64, &convolve8_neon), - make_tuple(64, 64, &convolve8_neon))); +const ConvolveParam kArrayConvolve8_neon[] = { ALL_SIZES(convolve8_neon) }; +INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve8_neon)); #endif // HAVE_NEON #if HAVE_DSPR2 const ConvolveFunctions convolve8_dspr2( - vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, - vpx_convolve8_horiz_dspr2, vpx_convolve8_avg_horiz_dspr2, - vpx_convolve8_vert_dspr2, vpx_convolve8_avg_vert_dspr2, - vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, vpx_convolve8_horiz_dspr2, + vpx_convolve8_avg_horiz_dspr2, vpx_convolve8_vert_dspr2, + vpx_convolve8_avg_vert_dspr2, vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); -INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_dspr2), - make_tuple(8, 4, &convolve8_dspr2), - make_tuple(4, 8, &convolve8_dspr2), - make_tuple(8, 8, &convolve8_dspr2), - make_tuple(16, 8, &convolve8_dspr2), - make_tuple(8, 16, &convolve8_dspr2), - make_tuple(16, 16, &convolve8_dspr2), - make_tuple(32, 16, &convolve8_dspr2), - make_tuple(16, 32, &convolve8_dspr2), - make_tuple(32, 32, &convolve8_dspr2), - make_tuple(64, 32, &convolve8_dspr2), - make_tuple(32, 64, &convolve8_dspr2), - make_tuple(64, 64, &convolve8_dspr2))); -#endif +const ConvolveParam kArrayConvolve8_dspr2[] = { ALL_SIZES(convolve8_dspr2) }; +INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve8_dspr2)); +#endif // HAVE_DSPR2 #if HAVE_MSA const ConvolveFunctions convolve8_msa( - vpx_convolve_copy_msa, vpx_convolve_avg_msa, - vpx_convolve8_horiz_msa, vpx_convolve8_avg_horiz_msa, - vpx_convolve8_vert_msa, vpx_convolve8_avg_vert_msa, - vpx_convolve8_msa, vpx_convolve8_avg_msa, - vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, - vpx_scaled_vert_c, vpx_scaled_avg_vert_c, - vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); + vpx_convolve_copy_msa, vpx_convolve_avg_msa, vpx_convolve8_horiz_msa, + vpx_convolve8_avg_horiz_msa, vpx_convolve8_vert_msa, + vpx_convolve8_avg_vert_msa, vpx_convolve8_msa, vpx_convolve8_avg_msa, + vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, + vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0); -INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, ::testing::Values( - make_tuple(4, 4, &convolve8_msa), - make_tuple(8, 4, &convolve8_msa), - make_tuple(4, 8, &convolve8_msa), - make_tuple(8, 8, &convolve8_msa), - make_tuple(16, 8, &convolve8_msa), - make_tuple(8, 16, &convolve8_msa), - make_tuple(16, 16, &convolve8_msa), - make_tuple(32, 16, &convolve8_msa), - make_tuple(16, 32, &convolve8_msa), - make_tuple(32, 32, &convolve8_msa), - make_tuple(64, 32, &convolve8_msa), - make_tuple(32, 64, &convolve8_msa), - make_tuple(64, 64, &convolve8_msa))); +const ConvolveParam kArrayConvolve8_msa[] = { ALL_SIZES(convolve8_msa) }; +INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest, + ::testing::ValuesIn(kArrayConvolve8_msa)); #endif // HAVE_MSA } // namespace diff --git a/libs/libvpx/test/cpu_speed_test.cc b/libs/libvpx/test/cpu_speed_test.cc index 8baa2f9c89..404b5b44f4 100644 --- a/libs/libvpx/test/cpu_speed_test.cc +++ b/libs/libvpx/test/cpu_speed_test.cc @@ -23,10 +23,9 @@ class CpuSpeedTest public ::libvpx_test::CodecTestWith2Params { protected: CpuSpeedTest() - : EncoderTest(GET_PARAM(0)), - encoding_mode_(GET_PARAM(1)), - set_cpu_used_(GET_PARAM(2)), - min_psnr_(kMaxPSNR) {} + : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), + set_cpu_used_(GET_PARAM(2)), min_psnr_(kMaxPSNR), + tune_content_(VP9E_CONTENT_DEFAULT) {} virtual ~CpuSpeedTest() {} virtual void SetUp() { @@ -41,14 +40,13 @@ class CpuSpeedTest } } - virtual void BeginPassHook(unsigned int /*pass*/) { - min_psnr_ = kMaxPSNR; - } + virtual void BeginPassHook(unsigned int /*pass*/) { min_psnr_ = kMaxPSNR; } virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { if (video->frame() == 1) { encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_); + encoder->Control(VP9E_SET_TUNE_CONTENT, tune_content_); if (encoding_mode_ != ::libvpx_test::kRealTime) { encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1); encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7); @@ -59,13 +57,13 @@ class CpuSpeedTest } virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) { - if (pkt->data.psnr.psnr[0] < min_psnr_) - min_psnr_ = pkt->data.psnr.psnr[0]; + if (pkt->data.psnr.psnr[0] < min_psnr_) min_psnr_ = pkt->data.psnr.psnr[0]; } ::libvpx_test::TestMode encoding_mode_; int set_cpu_used_; double min_psnr_; + int tune_content_; }; TEST_P(CpuSpeedTest, TestQ0) { @@ -74,7 +72,7 @@ TEST_P(CpuSpeedTest, TestQ0) { // the encoder to producing lots of big partitions which will likely // extend into the border and test the border condition. cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; cfg_.rc_target_bitrate = 400; cfg_.rc_max_quantizer = 0; cfg_.rc_min_quantizer = 0; @@ -92,7 +90,7 @@ TEST_P(CpuSpeedTest, TestScreencastQ0) { ::libvpx_test::Y4mVideoSource video("screendata.y4m", 0, 25); cfg_.g_timebase = video.timebase(); cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; cfg_.rc_target_bitrate = 400; cfg_.rc_max_quantizer = 0; cfg_.rc_min_quantizer = 0; @@ -103,13 +101,28 @@ TEST_P(CpuSpeedTest, TestScreencastQ0) { EXPECT_GE(min_psnr_, kMaxPSNR); } +TEST_P(CpuSpeedTest, TestTuneScreen) { + ::libvpx_test::Y4mVideoSource video("screendata.y4m", 0, 25); + cfg_.g_timebase = video.timebase(); + cfg_.rc_2pass_vbr_minsection_pct = 5; + cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_target_bitrate = 2000; + cfg_.rc_max_quantizer = 63; + cfg_.rc_min_quantizer = 0; + tune_content_ = VP9E_CONTENT_SCREEN; + + init_flags_ = VPX_CODEC_USE_PSNR; + + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + TEST_P(CpuSpeedTest, TestEncodeHighBitrate) { // Validate that this non multiple of 64 wide clip encodes and decodes // without a mismatch when passing in a very low max q. This pushes // the encoder to producing lots of big partitions which will likely // extend into the border and test the border condition. cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; cfg_.rc_target_bitrate = 12000; cfg_.rc_max_quantizer = 10; cfg_.rc_min_quantizer = 0; @@ -125,7 +138,7 @@ TEST_P(CpuSpeedTest, TestLowBitrate) { // when passing in a very high min q. This pushes the encoder to producing // lots of small partitions which might will test the other condition. cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; cfg_.rc_target_bitrate = 200; cfg_.rc_min_quantizer = 40; @@ -135,14 +148,9 @@ TEST_P(CpuSpeedTest, TestLowBitrate) { ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } -VP9_INSTANTIATE_TEST_CASE( - CpuSpeedTest, - ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood, - ::libvpx_test::kRealTime), - ::testing::Range(0, 9)); - -VP10_INSTANTIATE_TEST_CASE( - CpuSpeedTest, - ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood), - ::testing::Range(0, 3)); +VP9_INSTANTIATE_TEST_CASE(CpuSpeedTest, + ::testing::Values(::libvpx_test::kTwoPassGood, + ::libvpx_test::kOnePassGood, + ::libvpx_test::kRealTime), + ::testing::Range(0, 9)); } // namespace diff --git a/libs/libvpx/test/cq_test.cc b/libs/libvpx/test/cq_test.cc index 4e8019a87c..20e1f0f3de 100644 --- a/libs/libvpx/test/cq_test.cc +++ b/libs/libvpx/test/cq_test.cc @@ -24,14 +24,12 @@ const int kCQLevelStep = 8; const unsigned int kCQTargetBitrate = 2000; class CQTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { + public ::libvpx_test::CodecTestWithParam { public: // maps the cqlevel to the bitrate produced. typedef std::map BitrateMap; - static void SetUpTestCase() { - bitrates_.clear(); - } + static void SetUpTestCase() { bitrates_.clear(); } static void TearDownTestCase() { ASSERT_TRUE(!HasFailure()) @@ -128,7 +126,6 @@ TEST_P(CQTest, LinearPSNRIsHigherForCQLevel) { EXPECT_GE(cq_psnr_lin, vbr_psnr_lin); } -VP8_INSTANTIATE_TEST_CASE(CQTest, - ::testing::Range(kCQLevelMin, kCQLevelMax, - kCQLevelStep)); +VP8_INSTANTIATE_TEST_CASE(CQTest, ::testing::Range(kCQLevelMin, kCQLevelMax, + kCQLevelStep)); } // namespace diff --git a/libs/libvpx/test/vp8cx_set_ref.sh b/libs/libvpx/test/cx_set_ref.sh similarity index 58% rename from libs/libvpx/test/vp8cx_set_ref.sh rename to libs/libvpx/test/cx_set_ref.sh index 5d760bcdec..0a58dc187b 100755 --- a/libs/libvpx/test/vp8cx_set_ref.sh +++ b/libs/libvpx/test/cx_set_ref.sh @@ -1,6 +1,6 @@ #!/bin/sh ## -## Copyright (c) 2014 The WebM project authors. All Rights Reserved. +## Copyright (c) 2016 The WebM project authors. All Rights Reserved. ## ## Use of this source code is governed by a BSD-style license ## that can be found in the LICENSE file in the root of the source @@ -8,30 +8,27 @@ ## in the file PATENTS. All contributing project authors may ## be found in the AUTHORS file in the root of the source tree. ## -## This file tests the libvpx vp8cx_set_ref example. To add new tests to this +## This file tests the libvpx cx_set_ref example. To add new tests to this ## file, do the following: ## 1. Write a shell function (this is your test). -## 2. Add the function to vp8cx_set_ref_tests (on a new line). +## 2. Add the function to cx_set_ref_tests (on a new line). ## . $(dirname $0)/tools_common.sh # Environment check: $YUV_RAW_INPUT is required. -vp8cx_set_ref_verify_environment() { +cx_set_ref_verify_environment() { if [ ! -e "${YUV_RAW_INPUT}" ]; then echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH." return 1 fi } -# Runs vp8cx_set_ref and updates the reference frame before encoding frame 90. -# $1 is the codec name, which vp8cx_set_ref does not support at present: It's -# currently used only to name the output file. -# TODO(tomfinegan): Pass the codec param once the example is updated to support -# VP9. +# Runs cx_set_ref and updates the reference frame before encoding frame 90. +# $1 is the codec name. vpx_set_ref() { - local encoder="${LIBVPX_BIN_PATH}/vp8cx_set_ref${VPX_TEST_EXE_SUFFIX}" local codec="$1" - local output_file="${VPX_TEST_OUTPUT_DIR}/vp8cx_set_ref_${codec}.ivf" + local encoder="${LIBVPX_BIN_PATH}/${codec}cx_set_ref${VPX_TEST_EXE_SUFFIX}" + local output_file="${VPX_TEST_OUTPUT_DIR}/${codec}cx_set_ref_${codec}.ivf" local ref_frame_num=90 if [ ! -x "${encoder}" ]; then @@ -46,12 +43,18 @@ vpx_set_ref() { [ -e "${output_file}" ] || return 1 } -vp8cx_set_ref_vp8() { +cx_set_ref_vp8() { if [ "$(vp8_encode_available)" = "yes" ]; then vpx_set_ref vp8 || return 1 fi } -vp8cx_set_ref_tests="vp8cx_set_ref_vp8" +cx_set_ref_vp9() { + if [ "$(vp9_encode_available)" = "yes" ]; then + vpx_set_ref vp9 || return 1 + fi +} -run_tests vp8cx_set_ref_verify_environment "${vp8cx_set_ref_tests}" +cx_set_ref_tests="cx_set_ref_vp8 cx_set_ref_vp9" + +run_tests cx_set_ref_verify_environment "${cx_set_ref_tests}" diff --git a/libs/libvpx/test/datarate_test.cc b/libs/libvpx/test/datarate_test.cc index 9d5074e303..67c78bc91c 100644 --- a/libs/libvpx/test/datarate_test.cc +++ b/libs/libvpx/test/datarate_test.cc @@ -18,8 +18,9 @@ namespace { -class DatarateTestLarge : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class DatarateTestLarge + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { public: DatarateTestLarge() : EncoderTest(GET_PARAM(0)) {} @@ -45,8 +46,9 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { - if (video->frame() == 0) + if (video->frame() == 0) { encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_); + } if (denoiser_offon_test_) { ASSERT_GT(denoiser_offon_period_, 0) @@ -71,8 +73,7 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, // http://code.google.com/p/webm/issues/detail?id=496 is fixed. // For now the codec assumes buffer starts at starting buffer rate // plus one frame's time. - if (last_pts_ == 0) - duration = 1; + if (last_pts_ == 0) duration = 1; // Add to the buffer the bits we'd expect from a constant bitrate server. bits_in_buffer_model_ += static_cast( @@ -83,14 +84,14 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, * empty - and then stop showing frames until we've got enough bits to * show one. As noted in comment below (issue 495), this does not currently * apply to key frames. For now exclude key frames in condition below. */ - const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) - ? true: false; + const bool key_frame = + (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false; if (!key_frame) { ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame " - << pkt->data.frame.pts; + << pkt->data.frame.pts; } - const size_t frame_size_in_bits = pkt->data.frame.sz * 8; + const int64_t frame_size_in_bits = pkt->data.frame.sz * 8; // Subtract from the buffer the bits associated with a played back frame. bits_in_buffer_model_ -= frame_size_in_bits; @@ -99,8 +100,7 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, bits_total_ += frame_size_in_bits; // If first drop not set and we have a drop set it to this time. - if (!first_drop_ && duration > 1) - first_drop_ = last_pts_ + 1; + if (!first_drop_ && duration > 1) first_drop_ = last_pts_ + 1; // Update the most recent pts. last_pts_ = pkt->data.frame.pts; @@ -119,8 +119,8 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, duration_ = (last_pts_ + 1) * timebase_; // Effective file datarate includes the time spent prebuffering. - effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0 - / (cfg_.rc_buf_initial_sz / 1000.0 + duration_); + effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0 / + (cfg_.rc_buf_initial_sz / 1000.0 + duration_); file_datarate_ = file_size_in_kb / duration_; } @@ -135,7 +135,7 @@ class DatarateTestLarge : public ::libvpx_test::EncoderTest, double duration_; double file_datarate_; double effective_datarate_; - size_t bits_in_last_frame_; + int64_t bits_in_last_frame_; int denoiser_on_; int denoiser_offon_test_; int denoiser_offon_period_; @@ -256,8 +256,39 @@ TEST_P(DatarateTestLarge, ChangingDropFrameThresh) { } } -class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith2Params { +// Disabled for tsan, see: +// https://bugs.chromium.org/p/webm/issues/detail?id=1049 +#if defined(__has_feature) +#if __has_feature(thread_sanitizer) +#define BUILDING_WITH_TSAN +#endif +#endif +#ifndef BUILDING_WITH_TSAN +TEST_P(DatarateTestLarge, DropFramesMultiThreads) { + denoiser_on_ = 0; + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_dropframe_thresh = 30; + cfg_.rc_max_quantizer = 56; + cfg_.rc_end_usage = VPX_CBR; + // Encode using multiple threads. + cfg_.g_threads = 2; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 140); + cfg_.rc_target_bitrate = 200; + ResetModel(); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95) + << " The datarate for the file exceeds the target!"; + + ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3) + << " The datarate for the file missed the target!"; +} +#endif + +class DatarateTestVP9Large + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { public: DatarateTestVP9Large() : EncoderTest(GET_PARAM(0)) {} @@ -307,8 +338,8 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, if (num_temp_layers == 2) { if (frame_num % 2 == 0) { // Layer 0: predict from L and ARF, update L. - frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + frame_flags = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } else { // Layer 1: predict from L, G and ARF, and update G. frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | @@ -317,15 +348,15 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, } else if (num_temp_layers == 3) { if (frame_num % 4 == 0) { // Layer 0: predict from L and ARF; update L. - frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_REF_GF; + frame_flags = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_REF_GF; } else if ((frame_num - 2) % 4 == 0) { // Layer 1: predict from L, G, ARF; update G. frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; - } else if ((frame_num - 1) % 2 == 0) { + } else if ((frame_num - 1) % 2 == 0) { // Layer 2: predict from L, G, ARF; update none. - frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; + frame_flags = + VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; } } return frame_flags; @@ -353,8 +384,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { - if (video->frame() == 0) - encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_); + if (video->frame() == 0) encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_); if (denoiser_offon_test_) { ASSERT_GT(denoiser_offon_period_, 0) @@ -374,8 +404,8 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, vpx_svc_layer_id_t layer_id; layer_id.spatial_layer_id = 0; frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers); - layer_id.temporal_layer_id = SetLayerId(video->frame(), - cfg_.ts_number_layers); + layer_id.temporal_layer_id = + SetLayerId(video->frame(), cfg_.ts_number_layers); encoder->Control(VP9E_SET_SVC_LAYER_ID, &layer_id); } const vpx_rational_t tb = video->timebase(); @@ -383,15 +413,13 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, duration_ = 0; } - virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { // Time since last timestamp = duration. vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_; if (duration > 1) { // If first drop not set and we have a drop set it to this time. - if (!first_drop_) - first_drop_ = last_pts_ + 1; + if (!first_drop_) first_drop_ = last_pts_ + 1; // Update the number of frame drops. num_drops_ += static_cast(duration - 1); // Update counter for total number of frames (#frames input to encoder). @@ -407,7 +435,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, // Buffer should not go negative. ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame " - << pkt->data.frame.pts; + << pkt->data.frame.pts; const size_t frame_size_in_bits = pkt->data.frame.sz * 8; @@ -425,7 +453,7 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, virtual void EndPassHook(void) { for (int layer = 0; layer < static_cast(cfg_.ts_number_layers); - ++layer) { + ++layer) { duration_ = (last_pts_ + 1) * timebase_; if (bits_total_[layer]) { // Effective file datarate: @@ -450,7 +478,55 @@ class DatarateTestVP9Large : public ::libvpx_test::EncoderTest, int denoiser_offon_period_; }; -// Check basic rate targeting, +// Check basic rate targeting for VBR mode with 0 lag. +TEST_P(DatarateTestVP9Large, BasicRateTargetingVBRLagZero) { + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.g_error_resilient = 0; + cfg_.rc_end_usage = VPX_VBR; + cfg_.g_lag_in_frames = 0; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 300); + for (int i = 400; i <= 800; i += 400) { + cfg_.rc_target_bitrate = i; + ResetModel(); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.75) + << " The datarate for the file is lower than target by too much!"; + ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.25) + << " The datarate for the file is greater than target by too much!"; + } +} + +// Check basic rate targeting for VBR mode with non-zero lag. +TEST_P(DatarateTestVP9Large, BasicRateTargetingVBRLagNonZero) { + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.g_error_resilient = 0; + cfg_.rc_end_usage = VPX_VBR; + // For non-zero lag, rate control will work (be within bounds) for + // real-time mode. + if (deadline_ == VPX_DL_REALTIME) { + cfg_.g_lag_in_frames = 15; + } else { + cfg_.g_lag_in_frames = 0; + } + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 300); + for (int i = 400; i <= 800; i += 400) { + cfg_.rc_target_bitrate = i; + ResetModel(); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.75) + << " The datarate for the file is lower than target by too much!"; + ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.25) + << " The datarate for the file is greater than target by too much!"; + } +} + +// Check basic rate targeting for CBR mode. TEST_P(DatarateTestVP9Large, BasicRateTargeting) { cfg_.rc_buf_initial_sz = 500; cfg_.rc_buf_optimal_sz = 500; @@ -474,7 +550,31 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting) { } } -// Check basic rate targeting, +// Check basic rate targeting for CBR mode, with 2 threads and dropped frames. +TEST_P(DatarateTestVP9Large, BasicRateTargetingDropFramesMultiThreads) { + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_dropframe_thresh = 30; + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.rc_end_usage = VPX_CBR; + cfg_.g_lag_in_frames = 0; + // Encode using multiple threads. + cfg_.g_threads = 2; + + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 140); + cfg_.rc_target_bitrate = 200; + ResetModel(); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85) + << " The datarate for the file is lower than target by too much!"; + ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15) + << " The datarate for the file is greater than target by too much!"; +} + +// Check basic rate targeting for CBR. TEST_P(DatarateTestVP9Large, BasicRateTargeting444) { ::libvpx_test::Y4mVideoSource video("rush_hour_444.y4m", 0, 140); @@ -499,7 +599,7 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting444) { ASSERT_LE(static_cast(cfg_.rc_target_bitrate), effective_datarate_[0] * 1.15) << " The datarate for the file missed the target!" - << cfg_.rc_target_bitrate << " "<< effective_datarate_; + << cfg_.rc_target_bitrate << " " << effective_datarate_; } } @@ -519,6 +619,9 @@ TEST_P(DatarateTestVP9Large, ChangingDropFrameThresh) { cfg_.rc_end_usage = VPX_CBR; cfg_.rc_target_bitrate = 200; cfg_.g_lag_in_frames = 0; + // TODO(marpan): Investigate datarate target failures with a smaller keyframe + // interval (128). + cfg_.kf_max_dist = 9999; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 30, 1, 0, 140); @@ -566,8 +669,7 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting2TemporalLayers) { cfg_.temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS; - if (deadline_ == VPX_DL_REALTIME) - cfg_.g_error_resilient = 1; + if (deadline_ == VPX_DL_REALTIME) cfg_.g_error_resilient = 1; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 30, 1, 0, 200); @@ -581,10 +683,12 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting2TemporalLayers) { for (int j = 0; j < static_cast(cfg_.ts_number_layers); ++j) { ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85) << " The datarate for the file is lower than target by too much, " - "for layer: " << j; + "for layer: " + << j; ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15) << " The datarate for the file is greater than target by too much, " - "for layer: " << j; + "for layer: " + << j; } } } @@ -624,12 +728,14 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayers) { // Adjust the thresholds to be tighter than .75. ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.75) << " The datarate for the file is lower than target by too much, " - "for layer: " << j; + "for layer: " + << j; // TODO(yaowu): Work out more stable rc control strategy and // Adjust the thresholds to be tighter than 1.25. ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.25) << " The datarate for the file is greater than target by too much, " - "for layer: " << j; + "for layer: " + << j; } } } @@ -669,10 +775,12 @@ TEST_P(DatarateTestVP9Large, BasicRateTargeting3TemporalLayersFrameDropping) { for (int j = 0; j < static_cast(cfg_.ts_number_layers); ++j) { ASSERT_GE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 0.85) << " The datarate for the file is lower than target by too much, " - "for layer: " << j; + "for layer: " + << j; ASSERT_LE(effective_datarate_[j], cfg_.layer_target_bitrate[j] * 1.15) << " The datarate for the file is greater than target by too much, " - "for layer: " << j; + "for layer: " + << j; // Expect some frame drops in this test: for this 200 frames test, // expect at least 10% and not more than 60% drops. ASSERT_GE(num_drops_, 20); @@ -742,11 +850,15 @@ TEST_P(DatarateTestVP9Large, DenoiserOffOn) { } #endif // CONFIG_VP9_TEMPORAL_DENOISING -class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith2Params { +class DatarateOnePassCbrSvc + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { public: - DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) {} + DatarateOnePassCbrSvc() : EncoderTest(GET_PARAM(0)) { + memset(&svc_params_, 0, sizeof(svc_params_)); + } virtual ~DatarateOnePassCbrSvc() {} + protected: virtual void SetUp() { InitializeConfig(); @@ -764,8 +876,7 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, mismatch_psnr_ = 0.0; mismatch_nframes_ = 0; } - virtual void BeginPassHook(unsigned int /*pass*/) { - } + virtual void BeginPassHook(unsigned int /*pass*/) {} virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { if (video->frame() == 0) { @@ -774,10 +885,10 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, svc_params_.max_quantizers[i] = 63; svc_params_.min_quantizers[i] = 0; } - svc_params_.scaling_factor_num[0] = 144; - svc_params_.scaling_factor_den[0] = 288; - svc_params_.scaling_factor_num[1] = 288; - svc_params_.scaling_factor_den[1] = 288; + svc_params_.speed_per_layer[0] = 5; + for (i = 1; i < VPX_SS_MAX_LAYERS; ++i) { + svc_params_.speed_per_layer[i] = 7; + } encoder->Control(VP9E_SET_SVC, 1); encoder->Control(VP9E_SET_SVC_PARAMETERS, &svc_params_); encoder->Control(VP8E_SET_CPUUSED, speed_setting_); @@ -791,21 +902,19 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, } virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { vpx_codec_pts_t duration = pkt->data.frame.pts - last_pts_; - if (last_pts_ == 0) - duration = 1; + if (last_pts_ == 0) duration = 1; bits_in_buffer_model_ += static_cast( duration * timebase_ * cfg_.rc_target_bitrate * 1000); - const bool key_frame = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) - ? true: false; + const bool key_frame = + (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false; if (!key_frame) { ASSERT_GE(bits_in_buffer_model_, 0) << "Buffer Underrun at frame " - << pkt->data.frame.pts; + << pkt->data.frame.pts; } const size_t frame_size_in_bits = pkt->data.frame.sz * 8; bits_in_buffer_model_ -= frame_size_in_bits; bits_total_ += frame_size_in_bits; - if (!first_drop_ && duration > 1) - first_drop_ = last_pts_ + 1; + if (!first_drop_ && duration > 1) first_drop_ = last_pts_ + 1; last_pts_ = pkt->data.frame.pts; bits_in_last_frame_ = frame_size_in_bits; ++frame_number_; @@ -814,22 +923,17 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, if (bits_total_) { const double file_size_in_kb = bits_total_ / 1000.; // bits per kilobit duration_ = (last_pts_ + 1) * timebase_; - effective_datarate_ = (bits_total_ - bits_in_last_frame_) / 1000.0 - / (cfg_.rc_buf_initial_sz / 1000.0 + duration_); file_datarate_ = file_size_in_kb / duration_; } } - virtual void MismatchHook(const vpx_image_t *img1, - const vpx_image_t *img2) { + virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) { double mismatch_psnr = compute_psnr(img1, img2); mismatch_psnr_ += mismatch_psnr; ++mismatch_nframes_; } - unsigned int GetMismatchFrames() { - return mismatch_nframes_; - } + unsigned int GetMismatchFrames() { return mismatch_nframes_; } vpx_codec_pts_t last_pts_; int64_t bits_in_buffer_model_; @@ -839,7 +943,6 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, int64_t bits_total_; double duration_; double file_datarate_; - double effective_datarate_; size_t bits_in_last_frame_; vpx_svc_extra_cfg_t svc_params_; int speed_setting_; @@ -847,44 +950,80 @@ class DatarateOnePassCbrSvc : public ::libvpx_test::EncoderTest, int mismatch_nframes_; }; static void assign_layer_bitrates(vpx_codec_enc_cfg_t *const enc_cfg, - const vpx_svc_extra_cfg_t *svc_params, - int spatial_layers, - int temporal_layers, - int temporal_layering_mode) { + const vpx_svc_extra_cfg_t *svc_params, + int spatial_layers, int temporal_layers, + int temporal_layering_mode) { int sl, spatial_layer_target; float total = 0; - float alloc_ratio[VPX_MAX_LAYERS] = {0}; + float alloc_ratio[VPX_MAX_LAYERS] = { 0 }; for (sl = 0; sl < spatial_layers; ++sl) { if (svc_params->scaling_factor_den[sl] > 0) { - alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] * - 1.0 / svc_params->scaling_factor_den[sl]); + alloc_ratio[sl] = (float)(svc_params->scaling_factor_num[sl] * 1.0 / + svc_params->scaling_factor_den[sl]); total += alloc_ratio[sl]; } } for (sl = 0; sl < spatial_layers; ++sl) { enc_cfg->ss_target_bitrate[sl] = spatial_layer_target = - (unsigned int)(enc_cfg->rc_target_bitrate * - alloc_ratio[sl] / total); + (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[sl] / total); const int index = sl * temporal_layers; if (temporal_layering_mode == 3) { - enc_cfg->layer_target_bitrate[index] = - spatial_layer_target >> 1; + enc_cfg->layer_target_bitrate[index] = spatial_layer_target >> 1; enc_cfg->layer_target_bitrate[index + 1] = (spatial_layer_target >> 1) + (spatial_layer_target >> 2); - enc_cfg->layer_target_bitrate[index + 2] = - spatial_layer_target; + enc_cfg->layer_target_bitrate[index + 2] = spatial_layer_target; } else if (temporal_layering_mode == 2) { - enc_cfg->layer_target_bitrate[index] = - spatial_layer_target * 2 / 3; - enc_cfg->layer_target_bitrate[index + 1] = - spatial_layer_target; + enc_cfg->layer_target_bitrate[index] = spatial_layer_target * 2 / 3; + enc_cfg->layer_target_bitrate[index + 1] = spatial_layer_target; } } } // Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and // 3 temporal layers. Run CIF clip with 1 thread. -TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) { +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc2SpatialLayers) { + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.rc_end_usage = VPX_CBR; + cfg_.g_lag_in_frames = 0; + cfg_.ss_number_layers = 2; + cfg_.ts_number_layers = 3; + cfg_.ts_rate_decimator[0] = 4; + cfg_.ts_rate_decimator[1] = 2; + cfg_.ts_rate_decimator[2] = 1; + cfg_.g_error_resilient = 1; + cfg_.g_threads = 1; + cfg_.temporal_layering_mode = 3; + svc_params_.scaling_factor_num[0] = 144; + svc_params_.scaling_factor_den[0] = 288; + svc_params_.scaling_factor_num[1] = 288; + svc_params_.scaling_factor_den[1] = 288; + cfg_.rc_dropframe_thresh = 10; + cfg_.kf_max_dist = 9999; + ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, + 30, 1, 0, 200); + // TODO(wonkap/marpan): Check that effective_datarate for each layer hits the + // layer target_bitrate. + for (int i = 200; i <= 800; i += 200) { + cfg_.rc_target_bitrate = i; + ResetModel(); + assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, + cfg_.ts_number_layers, cfg_.temporal_layering_mode); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; + ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15) + << " The datarate for the file is lower than the target by too much!"; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); + } +} + +// Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and 3 +// temporal layers. Run CIF clip with 1 thread, and few short key frame periods. +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc2SpatialLayersSmallKf) { cfg_.rc_buf_initial_sz = 500; cfg_.rc_buf_optimal_sz = 500; cfg_.rc_buf_sz = 1000; @@ -907,25 +1046,26 @@ TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc) { cfg_.rc_dropframe_thresh = 10; ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288, 30, 1, 0, 200); - // TODO(wonkap/marpan): Check that effective_datarate for each layer hits the - // layer target_bitrate. Also check if test can pass at lower bitrate (~200k). - for (int i = 400; i <= 800; i += 200) { - cfg_.rc_target_bitrate = i; + cfg_.rc_target_bitrate = 400; + // For this 3 temporal layer case, pattern repeats every 4 frames, so choose + // 4 key neighboring key frame periods (so key frame will land on 0-2-1-2). + for (int j = 64; j <= 67; j++) { + cfg_.kf_max_dist = j; ResetModel(); assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, - cfg_.ts_number_layers, cfg_.temporal_layering_mode); + cfg_.ts_number_layers, cfg_.temporal_layering_mode); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); - ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85) - << " The datarate for the file exceeds the target by too much!"; + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15) << " The datarate for the file is lower than the target by too much!"; - EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0); + EXPECT_EQ(static_cast(0), GetMismatchFrames()); } } // Check basic rate targeting for 1 pass CBR SVC: 2 spatial layers and // 3 temporal layers. Run HD clip with 4 threads. -TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) { +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc2SpatialLayers4threads) { cfg_.rc_buf_initial_sz = 500; cfg_.rc_buf_optimal_sz = 500; cfg_.rc_buf_sz = 1000; @@ -946,26 +1086,151 @@ TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) { svc_params_.scaling_factor_num[1] = 288; svc_params_.scaling_factor_den[1] = 288; cfg_.rc_dropframe_thresh = 10; - ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, - 30, 1, 0, 300); + cfg_.kf_max_dist = 9999; + ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, 30, + 1, 0, 300); cfg_.rc_target_bitrate = 800; ResetModel(); assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, - cfg_.ts_number_layers, cfg_.temporal_layering_mode); + cfg_.ts_number_layers, cfg_.temporal_layering_mode); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); - ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.85) - << " The datarate for the file exceeds the target by too much!"; + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.15) << " The datarate for the file is lower than the target by too much!"; - EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0); + EXPECT_EQ(static_cast(0), GetMismatchFrames()); +} + +// Check basic rate targeting for 1 pass CBR SVC: 3 spatial layers and +// 3 temporal layers. Run CIF clip with 1 thread. +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc3SpatialLayers) { + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.rc_end_usage = VPX_CBR; + cfg_.g_lag_in_frames = 0; + cfg_.ss_number_layers = 3; + cfg_.ts_number_layers = 3; + cfg_.ts_rate_decimator[0] = 4; + cfg_.ts_rate_decimator[1] = 2; + cfg_.ts_rate_decimator[2] = 1; + cfg_.g_error_resilient = 1; + cfg_.g_threads = 1; + cfg_.temporal_layering_mode = 3; + svc_params_.scaling_factor_num[0] = 72; + svc_params_.scaling_factor_den[0] = 288; + svc_params_.scaling_factor_num[1] = 144; + svc_params_.scaling_factor_den[1] = 288; + svc_params_.scaling_factor_num[2] = 288; + svc_params_.scaling_factor_den[2] = 288; + cfg_.rc_dropframe_thresh = 10; + cfg_.kf_max_dist = 9999; + ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, 30, + 1, 0, 300); + cfg_.rc_target_bitrate = 800; + ResetModel(); + assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, + cfg_.ts_number_layers, cfg_.temporal_layering_mode); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; + ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.22) + << " The datarate for the file is lower than the target by too much!"; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); +} + +// Check basic rate targeting for 1 pass CBR SVC: 3 spatial layers and 3 +// temporal layers. Run CIF clip with 1 thread, and few short key frame periods. +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc3SpatialLayersSmallKf) { + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.rc_end_usage = VPX_CBR; + cfg_.g_lag_in_frames = 0; + cfg_.ss_number_layers = 3; + cfg_.ts_number_layers = 3; + cfg_.ts_rate_decimator[0] = 4; + cfg_.ts_rate_decimator[1] = 2; + cfg_.ts_rate_decimator[2] = 1; + cfg_.g_error_resilient = 1; + cfg_.g_threads = 1; + cfg_.temporal_layering_mode = 3; + svc_params_.scaling_factor_num[0] = 72; + svc_params_.scaling_factor_den[0] = 288; + svc_params_.scaling_factor_num[1] = 144; + svc_params_.scaling_factor_den[1] = 288; + svc_params_.scaling_factor_num[2] = 288; + svc_params_.scaling_factor_den[2] = 288; + cfg_.rc_dropframe_thresh = 10; + ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, 30, + 1, 0, 300); + cfg_.rc_target_bitrate = 800; + // For this 3 temporal layer case, pattern repeats every 4 frames, so choose + // 4 key neighboring key frame periods (so key frame will land on 0-2-1-2). + for (int j = 32; j <= 35; j++) { + cfg_.kf_max_dist = j; + ResetModel(); + assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, + cfg_.ts_number_layers, cfg_.temporal_layering_mode); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; + ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.30) + << " The datarate for the file is lower than the target by too much!"; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); + } +} + +// Check basic rate targeting for 1 pass CBR SVC: 3 spatial layers and +// 3 temporal layers. Run HD clip with 4 threads. +TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc3SpatialLayers4threads) { + cfg_.rc_buf_initial_sz = 500; + cfg_.rc_buf_optimal_sz = 500; + cfg_.rc_buf_sz = 1000; + cfg_.rc_min_quantizer = 0; + cfg_.rc_max_quantizer = 63; + cfg_.rc_end_usage = VPX_CBR; + cfg_.g_lag_in_frames = 0; + cfg_.ss_number_layers = 3; + cfg_.ts_number_layers = 3; + cfg_.ts_rate_decimator[0] = 4; + cfg_.ts_rate_decimator[1] = 2; + cfg_.ts_rate_decimator[2] = 1; + cfg_.g_error_resilient = 1; + cfg_.g_threads = 4; + cfg_.temporal_layering_mode = 3; + svc_params_.scaling_factor_num[0] = 72; + svc_params_.scaling_factor_den[0] = 288; + svc_params_.scaling_factor_num[1] = 144; + svc_params_.scaling_factor_den[1] = 288; + svc_params_.scaling_factor_num[2] = 288; + svc_params_.scaling_factor_den[2] = 288; + cfg_.rc_dropframe_thresh = 10; + cfg_.kf_max_dist = 9999; + ::libvpx_test::I420VideoSource video("niklas_1280_720_30.y4m", 1280, 720, 30, + 1, 0, 300); + cfg_.rc_target_bitrate = 800; + ResetModel(); + assign_layer_bitrates(&cfg_, &svc_params_, cfg_.ss_number_layers, + cfg_.ts_number_layers, cfg_.temporal_layering_mode); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_GE(cfg_.rc_target_bitrate, file_datarate_ * 0.85) + << " The datarate for the file exceeds the target by too much!"; + ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.22) + << " The datarate for the file is lower than the target by too much!"; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); } VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES); VP9_INSTANTIATE_TEST_CASE(DatarateTestVP9Large, ::testing::Values(::libvpx_test::kOnePassGood, ::libvpx_test::kRealTime), - ::testing::Range(2, 7)); + ::testing::Range(2, 9)); VP9_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc, ::testing::Values(::libvpx_test::kRealTime), - ::testing::Range(5, 8)); + ::testing::Range(5, 9)); } // namespace diff --git a/libs/libvpx/test/dct16x16_test.cc b/libs/libvpx/test/dct16x16_test.cc index d6cc5e443b..f9745ed81f 100644 --- a/libs/libvpx/test/dct16x16_test.cc +++ b/libs/libvpx/test/dct16x16_test.cc @@ -25,20 +25,12 @@ #include "vpx/vpx_codec.h" #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" +#include "vpx_ports/msvc.h" // for round() using libvpx_test::ACMRandom; namespace { -#ifdef _MSC_VER -static int round(double x) { - if (x < 0) - return static_cast(ceil(x - 0.5)); - else - return static_cast(floor(x + 0.5)); -} -#endif - const int kNumCoeffs = 256; const double C1 = 0.995184726672197; const double C2 = 0.98078528040323; @@ -62,16 +54,16 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { double temp1, temp2; // step 1 - step[ 0] = input[0] + input[15]; - step[ 1] = input[1] + input[14]; - step[ 2] = input[2] + input[13]; - step[ 3] = input[3] + input[12]; - step[ 4] = input[4] + input[11]; - step[ 5] = input[5] + input[10]; - step[ 6] = input[6] + input[ 9]; - step[ 7] = input[7] + input[ 8]; - step[ 8] = input[7] - input[ 8]; - step[ 9] = input[6] - input[ 9]; + step[0] = input[0] + input[15]; + step[1] = input[1] + input[14]; + step[2] = input[2] + input[13]; + step[3] = input[3] + input[12]; + step[4] = input[4] + input[11]; + step[5] = input[5] + input[10]; + step[6] = input[6] + input[9]; + step[7] = input[7] + input[8]; + step[8] = input[7] - input[8]; + step[9] = input[6] - input[9]; step[10] = input[5] - input[10]; step[11] = input[4] - input[11]; step[12] = input[3] - input[12]; @@ -89,13 +81,13 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { output[6] = step[1] - step[6]; output[7] = step[0] - step[7]; - temp1 = step[ 8] * C7; + temp1 = step[8] * C7; temp2 = step[15] * C9; - output[ 8] = temp1 + temp2; + output[8] = temp1 + temp2; - temp1 = step[ 9] * C11; + temp1 = step[9] * C11; temp2 = step[14] * C5; - output[ 9] = temp1 - temp2; + output[9] = temp1 - temp2; temp1 = step[10] * C3; temp2 = step[13] * C13; @@ -113,40 +105,40 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { temp2 = step[13] * C3; output[13] = temp2 - temp1; - temp1 = step[ 9] * C5; + temp1 = step[9] * C5; temp2 = step[14] * C11; output[14] = temp2 + temp1; - temp1 = step[ 8] * C9; + temp1 = step[8] * C9; temp2 = step[15] * C7; output[15] = temp2 - temp1; // step 3 - step[ 0] = output[0] + output[3]; - step[ 1] = output[1] + output[2]; - step[ 2] = output[1] - output[2]; - step[ 3] = output[0] - output[3]; + step[0] = output[0] + output[3]; + step[1] = output[1] + output[2]; + step[2] = output[1] - output[2]; + step[3] = output[0] - output[3]; temp1 = output[4] * C14; temp2 = output[7] * C2; - step[ 4] = temp1 + temp2; + step[4] = temp1 + temp2; temp1 = output[5] * C10; temp2 = output[6] * C6; - step[ 5] = temp1 + temp2; + step[5] = temp1 + temp2; temp1 = output[5] * C6; temp2 = output[6] * C10; - step[ 6] = temp2 - temp1; + step[6] = temp2 - temp1; temp1 = output[4] * C2; temp2 = output[7] * C14; - step[ 7] = temp2 - temp1; + step[7] = temp2 - temp1; - step[ 8] = output[ 8] + output[11]; - step[ 9] = output[ 9] + output[10]; - step[10] = output[ 9] - output[10]; - step[11] = output[ 8] - output[11]; + step[8] = output[8] + output[11]; + step[9] = output[9] + output[10]; + step[10] = output[9] - output[10]; + step[11] = output[8] - output[11]; step[12] = output[12] + output[15]; step[13] = output[13] + output[14]; @@ -154,25 +146,25 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { step[15] = output[12] - output[15]; // step 4 - output[ 0] = (step[ 0] + step[ 1]); - output[ 8] = (step[ 0] - step[ 1]); + output[0] = (step[0] + step[1]); + output[8] = (step[0] - step[1]); temp1 = step[2] * C12; temp2 = step[3] * C4; temp1 = temp1 + temp2; - output[ 4] = 2*(temp1 * C8); + output[4] = 2 * (temp1 * C8); temp1 = step[2] * C4; temp2 = step[3] * C12; temp1 = temp2 - temp1; output[12] = 2 * (temp1 * C8); - output[ 2] = 2 * ((step[4] + step[ 5]) * C8); - output[14] = 2 * ((step[7] - step[ 6]) * C8); + output[2] = 2 * ((step[4] + step[5]) * C8); + output[14] = 2 * ((step[7] - step[6]) * C8); temp1 = step[4] - step[5]; temp2 = step[6] + step[7]; - output[ 6] = (temp1 + temp2); + output[6] = (temp1 + temp2); output[10] = (temp1 - temp2); intermediate[8] = step[8] + step[14]; @@ -188,18 +180,18 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { temp1 = temp2 + temp1; output[13] = 2 * (temp1 * C8); - output[ 9] = 2 * ((step[10] + step[11]) * C8); + output[9] = 2 * ((step[10] + step[11]) * C8); intermediate[11] = step[10] - step[11]; intermediate[12] = step[12] + step[13]; intermediate[13] = step[12] - step[13]; - intermediate[14] = step[ 8] - step[14]; - intermediate[15] = step[ 9] - step[15]; + intermediate[14] = step[8] - step[14]; + intermediate[15] = step[9] - step[15]; output[15] = (intermediate[11] + intermediate[12]); - output[ 1] = -(intermediate[11] - intermediate[12]); + output[1] = -(intermediate[11] - intermediate[12]); - output[ 7] = 2 * (intermediate[13] * C8); + output[7] = 2 * (intermediate[13] * C8); temp1 = intermediate[14] * C12; temp2 = intermediate[15] * C4; @@ -209,28 +201,24 @@ void butterfly_16x16_dct_1d(double input[16], double output[16]) { temp1 = intermediate[14] * C4; temp2 = intermediate[15] * C12; temp1 = temp2 + temp1; - output[ 5] = 2 * (temp1 * C8); + output[5] = 2 * (temp1 * C8); } void reference_16x16_dct_2d(int16_t input[256], double output[256]) { // First transform columns for (int i = 0; i < 16; ++i) { double temp_in[16], temp_out[16]; - for (int j = 0; j < 16; ++j) - temp_in[j] = input[j * 16 + i]; + for (int j = 0; j < 16; ++j) temp_in[j] = input[j * 16 + i]; butterfly_16x16_dct_1d(temp_in, temp_out); - for (int j = 0; j < 16; ++j) - output[j * 16 + i] = temp_out[j]; + for (int j = 0; j < 16; ++j) output[j * 16 + i] = temp_out[j]; } // Then transform rows for (int i = 0; i < 16; ++i) { double temp_in[16], temp_out[16]; - for (int j = 0; j < 16; ++j) - temp_in[j] = output[j + i * 16]; + for (int j = 0; j < 16; ++j) temp_in[j] = output[j + i * 16]; butterfly_16x16_dct_1d(temp_in, temp_out); // Scale by some magic number - for (int j = 0; j < 16; ++j) - output[j + i * 16] = temp_out[j]/2; + for (int j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j] / 2; } } @@ -256,8 +244,7 @@ void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride, vpx_idct16x16_256_add_c(in, dest, stride); } -void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, - int tx_type) { +void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) { vp9_fht16x16_c(in, out, stride, tx_type); } @@ -359,11 +346,10 @@ class Trans16x16TestBase { } } - ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block, - test_temp_block, pitch_)); + ASM_REGISTER_STATE_CHECK( + RunFwdTxfm(test_input_block, test_temp_block, pitch_)); if (bit_depth_ == VPX_BITS_8) { - ASM_REGISTER_STATE_CHECK( - RunInvTxfm(test_temp_block, dst, pitch_)); + ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { ASM_REGISTER_STATE_CHECK( @@ -373,19 +359,18 @@ class Trans16x16TestBase { for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = - bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; + const int32_t diff = + bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; #else - const uint32_t diff = dst[j] - src[j]; + const int32_t diff = dst[j] - src[j]; #endif const uint32_t error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; } } - EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error) + EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error) << "Error: 16x16 FHT/IHT has an individual round trip error > 1"; EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error) @@ -401,8 +386,9 @@ class Trans16x16TestBase { for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-mask_, mask_]. - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); + } fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); @@ -426,16 +412,14 @@ class Trans16x16TestBase { input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; } if (i == 0) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_; } else if (i == 1) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = -mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_; } fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); - ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block, - output_block, pitch_)); + ASM_REGISTER_STATE_CHECK( + RunFwdTxfm(input_extreme_block, output_block, pitch_)); // The minimum quant value is 4. for (int j = 0; j < kNumCoeffs; ++j) { @@ -464,12 +448,12 @@ class Trans16x16TestBase { for (int j = 0; j < kNumCoeffs; ++j) { input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; } - if (i == 0) - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = mask_; - if (i == 1) - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = -mask_; + if (i == 0) { + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_; + } + if (i == 1) { + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_; + } fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); @@ -483,8 +467,9 @@ class Trans16x16TestBase { // quantization with maximum allowed step sizes output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred; - for (int j = 1; j < kNumCoeffs; ++j) + for (int j = 1; j < kNumCoeffs; ++j) { output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred; + } if (bit_depth_ == VPX_BITS_8) { inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_); ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_)); @@ -492,17 +477,15 @@ class Trans16x16TestBase { } else { inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_, tx_type_); - ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, - CONVERT_TO_BYTEPTR(dst16), pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(output_ref_block, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif } if (bit_depth_ == VPX_BITS_8) { - for (int j = 0; j < kNumCoeffs; ++j) - EXPECT_EQ(ref[j], dst[j]); + for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]); #if CONFIG_VP9_HIGHBITDEPTH } else { - for (int j = 0; j < kNumCoeffs; ++j) - EXPECT_EQ(ref16[j], dst16[j]); + for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]); #endif } } @@ -539,15 +522,16 @@ class Trans16x16TestBase { } reference_16x16_dct_2d(in, out_r); - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { coeff[j] = static_cast(round(out_r[j])); + } if (bit_depth_ == VPX_BITS_8) { ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16)); #if CONFIG_VP9_HIGHBITDEPTH } else { - ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), - 16)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16)); #endif // CONFIG_VP9_HIGHBITDEPTH } @@ -559,9 +543,8 @@ class Trans16x16TestBase { const uint32_t diff = dst[j] - src[j]; #endif // CONFIG_VP9_HIGHBITDEPTH const uint32_t error = diff * diff; - EXPECT_GE(1u, error) - << "Error: 16x16 IDCT has error " << error - << " at index " << j; + EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error + << " at index " << j; } } } @@ -603,8 +586,8 @@ class Trans16x16TestBase { } else { #if CONFIG_VP9_HIGHBITDEPTH ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_); - ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), - pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif // CONFIG_VP9_HIGHBITDEPTH } @@ -616,9 +599,8 @@ class Trans16x16TestBase { const uint32_t diff = dst[j] - ref[j]; #endif // CONFIG_VP9_HIGHBITDEPTH const uint32_t error = diff * diff; - EXPECT_EQ(0u, error) - << "Error: 16x16 IDCT Comparison has error " << error - << " at index " << j; + EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error " + << error << " at index " << j; } } } @@ -631,32 +613,25 @@ class Trans16x16TestBase { IhtFunc inv_txfm_ref; }; -class Trans16x16DCT - : public Trans16x16TestBase, - public ::testing::TestWithParam { +class Trans16x16DCT : public Trans16x16TestBase, + public ::testing::TestWithParam { public: virtual ~Trans16x16DCT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); + tx_type_ = GET_PARAM(2); bit_depth_ = GET_PARAM(3); - pitch_ = 16; + pitch_ = 16; fwd_txfm_ref = fdct16x16_ref; inv_txfm_ref = idct16x16_ref; mask_ = (1 << bit_depth_) - 1; #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth_) { - case VPX_BITS_10: - inv_txfm_ref = idct16x16_10_ref; - break; - case VPX_BITS_12: - inv_txfm_ref = idct16x16_12_ref; - break; - default: - inv_txfm_ref = idct16x16_ref; - break; + case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break; + case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break; + default: inv_txfm_ref = idct16x16_ref; break; } #else inv_txfm_ref = idct16x16_ref; @@ -676,17 +651,11 @@ class Trans16x16DCT IdctFunc inv_txfm_; }; -TEST_P(Trans16x16DCT, AccuracyCheck) { - RunAccuracyCheck(); -} +TEST_P(Trans16x16DCT, AccuracyCheck) { RunAccuracyCheck(); } -TEST_P(Trans16x16DCT, CoeffCheck) { - RunCoeffCheck(); -} +TEST_P(Trans16x16DCT, CoeffCheck) { RunCoeffCheck(); } -TEST_P(Trans16x16DCT, MemCheck) { - RunMemCheck(); -} +TEST_P(Trans16x16DCT, MemCheck) { RunMemCheck(); } TEST_P(Trans16x16DCT, QuantCheck) { // Use maximally allowed quantization step sizes for DC and AC @@ -694,36 +663,27 @@ TEST_P(Trans16x16DCT, QuantCheck) { RunQuantCheck(1336, 1828); } -TEST_P(Trans16x16DCT, InvAccuracyCheck) { - RunInvAccuracyCheck(); -} +TEST_P(Trans16x16DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); } -class Trans16x16HT - : public Trans16x16TestBase, - public ::testing::TestWithParam { +class Trans16x16HT : public Trans16x16TestBase, + public ::testing::TestWithParam { public: virtual ~Trans16x16HT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); + tx_type_ = GET_PARAM(2); bit_depth_ = GET_PARAM(3); - pitch_ = 16; + pitch_ = 16; fwd_txfm_ref = fht16x16_ref; inv_txfm_ref = iht16x16_ref; mask_ = (1 << bit_depth_) - 1; #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth_) { - case VPX_BITS_10: - inv_txfm_ref = iht16x16_10; - break; - case VPX_BITS_12: - inv_txfm_ref = iht16x16_12; - break; - default: - inv_txfm_ref = iht16x16_ref; - break; + case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break; + case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break; + default: inv_txfm_ref = iht16x16_ref; break; } #else inv_txfm_ref = iht16x16_ref; @@ -743,17 +703,11 @@ class Trans16x16HT IhtFunc inv_txfm_; }; -TEST_P(Trans16x16HT, AccuracyCheck) { - RunAccuracyCheck(); -} +TEST_P(Trans16x16HT, AccuracyCheck) { RunAccuracyCheck(); } -TEST_P(Trans16x16HT, CoeffCheck) { - RunCoeffCheck(); -} +TEST_P(Trans16x16HT, CoeffCheck) { RunCoeffCheck(); } -TEST_P(Trans16x16HT, MemCheck) { - RunMemCheck(); -} +TEST_P(Trans16x16HT, MemCheck) { RunMemCheck(); } TEST_P(Trans16x16HT, QuantCheck) { // The encoder skips any non-DC intra prediction modes, @@ -761,9 +715,8 @@ TEST_P(Trans16x16HT, QuantCheck) { RunQuantCheck(429, 729); } -class InvTrans16x16DCT - : public Trans16x16TestBase, - public ::testing::TestWithParam { +class InvTrans16x16DCT : public Trans16x16TestBase, + public ::testing::TestWithParam { public: virtual ~InvTrans16x16DCT() {} @@ -774,7 +727,7 @@ class InvTrans16x16DCT bit_depth_ = GET_PARAM(3); pitch_ = 16; mask_ = (1 << bit_depth_) - 1; -} + } virtual void TearDown() { libvpx_test::ClearSystemState(); } protected: @@ -792,6 +745,66 @@ TEST_P(InvTrans16x16DCT, CompareReference) { CompareInvReference(ref_txfm_, thresh_); } +class PartialTrans16x16Test : public ::testing::TestWithParam< + std::tr1::tuple > { + public: + virtual ~PartialTrans16x16Test() {} + virtual void SetUp() { + fwd_txfm_ = GET_PARAM(0); + bit_depth_ = GET_PARAM(1); + } + + virtual void TearDown() { libvpx_test::ClearSystemState(); } + + protected: + vpx_bit_depth_t bit_depth_; + FdctFunc fwd_txfm_; +}; + +TEST_P(PartialTrans16x16Test, Extremes) { +#if CONFIG_VP9_HIGHBITDEPTH + const int16_t maxval = + static_cast(clip_pixel_highbd(1 << 30, bit_depth_)); +#else + const int16_t maxval = 255; +#endif + const int minval = -maxval; + DECLARE_ALIGNED(16, int16_t, input[kNumCoeffs]); + DECLARE_ALIGNED(16, tran_low_t, output[kNumCoeffs]); + + for (int i = 0; i < kNumCoeffs; ++i) input[i] = maxval; + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 16)); + EXPECT_EQ((maxval * kNumCoeffs) >> 1, output[0]); + + for (int i = 0; i < kNumCoeffs; ++i) input[i] = minval; + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 16)); + EXPECT_EQ((minval * kNumCoeffs) >> 1, output[0]); +} + +TEST_P(PartialTrans16x16Test, Random) { +#if CONFIG_VP9_HIGHBITDEPTH + const int16_t maxval = + static_cast(clip_pixel_highbd(1 << 30, bit_depth_)); +#else + const int16_t maxval = 255; +#endif + DECLARE_ALIGNED(16, int16_t, input[kNumCoeffs]); + DECLARE_ALIGNED(16, tran_low_t, output[kNumCoeffs]); + ACMRandom rnd(ACMRandom::DeterministicSeed()); + + int sum = 0; + for (int i = 0; i < kNumCoeffs; ++i) { + const int val = (i & 1) ? -rnd(maxval + 1) : rnd(maxval + 1); + input[i] = val; + sum += val; + } + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 16)); + EXPECT_EQ(sum >> 1, output[0]); +} + using std::tr1::make_tuple; #if CONFIG_VP9_HIGHBITDEPTH @@ -802,10 +815,10 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8))); #else -INSTANTIATE_TEST_CASE_P( - C, Trans16x16DCT, - ::testing::Values( - make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT, + ::testing::Values(make_tuple(&vpx_fdct16x16_c, + &vpx_idct16x16_256_add_c, + 0, VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH @@ -824,6 +837,11 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8), make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8), make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P( + C, PartialTrans16x16Test, + ::testing::Values(make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_8), + make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct16x16_1_c, VPX_BITS_12))); #else INSTANTIATE_TEST_CASE_P( C, Trans16x16HT, @@ -832,49 +850,50 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8), make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8), make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, PartialTrans16x16Test, + ::testing::Values(make_tuple(&vpx_fdct16x16_1_c, + VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH -#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE +#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( NEON, Trans16x16DCT, - ::testing::Values( - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_neon, + 0, VPX_BITS_8))); #endif #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, Trans16x16DCT, - ::testing::Values( - make_tuple(&vpx_fdct16x16_sse2, - &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct16x16_sse2, + &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( SSE2, Trans16x16HT, - ::testing::Values( - make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0, - VPX_BITS_8), - make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1, - VPX_BITS_8), - make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2, - VPX_BITS_8), - make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3, - VPX_BITS_8))); + ::testing::Values(make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, + 0, VPX_BITS_8), + make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, + 1, VPX_BITS_8), + make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, + 2, VPX_BITS_8), + make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, + 3, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test, + ::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2, + VPX_BITS_8))); #endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, Trans16x16DCT, ::testing::Values( - make_tuple(&vpx_highbd_fdct16x16_sse2, - &idct16x16_10, 0, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct16x16_c, - &idct16x16_256_add_10_sse2, 0, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct16x16_sse2, - &idct16x16_12, 0, VPX_BITS_12), - make_tuple(&vpx_highbd_fdct16x16_c, - &idct16x16_256_add_12_sse2, 0, VPX_BITS_12), - make_tuple(&vpx_fdct16x16_sse2, - &vpx_idct16x16_256_add_c, 0, VPX_BITS_8))); + make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_10, 0, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_10_sse2, 0, + VPX_BITS_10), + make_tuple(&vpx_highbd_fdct16x16_sse2, &idct16x16_12, 0, VPX_BITS_12), + make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_256_add_12_sse2, 0, + VPX_BITS_12), + make_tuple(&vpx_fdct16x16_sse2, &vpx_idct16x16_256_add_c, 0, + VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( SSE2, Trans16x16HT, ::testing::Values( @@ -887,23 +906,24 @@ INSTANTIATE_TEST_CASE_P( // that to test both branches. INSTANTIATE_TEST_CASE_P( SSE2, InvTrans16x16DCT, - ::testing::Values( - make_tuple(&idct16x16_10_add_10_c, - &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10), - make_tuple(&idct16x16_10, - &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10), - make_tuple(&idct16x16_10_add_12_c, - &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12), - make_tuple(&idct16x16_12, - &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12))); + ::testing::Values(make_tuple(&idct16x16_10_add_10_c, + &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10), + make_tuple(&idct16x16_10, &idct16x16_256_add_10_sse2, + 3167, VPX_BITS_10), + make_tuple(&idct16x16_10_add_12_c, + &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12), + make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2, + 3167, VPX_BITS_12))); +INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans16x16Test, + ::testing::Values(make_tuple(&vpx_fdct16x16_1_sse2, + VPX_BITS_8))); #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - MSA, Trans16x16DCT, - ::testing::Values( - make_tuple(&vpx_fdct16x16_msa, - &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT, + ::testing::Values(make_tuple(&vpx_fdct16x16_msa, + &vpx_idct16x16_256_add_msa, + 0, VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( MSA, Trans16x16HT, ::testing::Values( @@ -912,5 +932,8 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8), make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(MSA, PartialTrans16x16Test, + ::testing::Values(make_tuple(&vpx_fdct16x16_1_msa, + VPX_BITS_8))); #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE } // namespace diff --git a/libs/libvpx/test/dct32x32_test.cc b/libs/libvpx/test/dct32x32_test.cc index 2dac10bc1f..a168e690ee 100644 --- a/libs/libvpx/test/dct32x32_test.cc +++ b/libs/libvpx/test/dct32x32_test.cc @@ -25,18 +25,11 @@ #include "vpx/vpx_codec.h" #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" +#include "vpx_ports/msvc.h" // for round() using libvpx_test::ACMRandom; namespace { -#ifdef _MSC_VER -static int round(double x) { - if (x < 0) - return static_cast(ceil(x - 0.5)); - else - return static_cast(floor(x + 0.5)); -} -#endif const int kNumCoeffs = 1024; const double kPi = 3.141592653589793238462643383279502884; @@ -44,10 +37,10 @@ void reference_32x32_dct_1d(const double in[32], double out[32]) { const double kInvSqrt2 = 0.707106781186547524400844362104; for (int k = 0; k < 32; k++) { out[k] = 0.0; - for (int n = 0; n < 32; n++) + for (int n = 0; n < 32; n++) { out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0); - if (k == 0) - out[k] = out[k] * kInvSqrt2; + } + if (k == 0) out[k] = out[k] * kInvSqrt2; } } @@ -56,21 +49,17 @@ void reference_32x32_dct_2d(const int16_t input[kNumCoeffs], // First transform columns for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; - for (int j = 0; j < 32; ++j) - temp_in[j] = input[j*32 + i]; + for (int j = 0; j < 32; ++j) temp_in[j] = input[j * 32 + i]; reference_32x32_dct_1d(temp_in, temp_out); - for (int j = 0; j < 32; ++j) - output[j * 32 + i] = temp_out[j]; + for (int j = 0; j < 32; ++j) output[j * 32 + i] = temp_out[j]; } // Then transform rows for (int i = 0; i < 32; ++i) { double temp_in[32], temp_out[32]; - for (int j = 0; j < 32; ++j) - temp_in[j] = output[j + i*32]; + for (int j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; reference_32x32_dct_1d(temp_in, temp_out); // Scale by some magic number - for (int j = 0; j < 32; ++j) - output[j + i * 32] = temp_out[j] / 4; + for (int j = 0; j < 32; ++j) output[j + i * 32] = temp_out[j] / 4; } } @@ -96,8 +85,8 @@ class Trans32x32Test : public ::testing::TestWithParam { virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - version_ = GET_PARAM(2); // 0: high precision forward transform - // 1: low precision version for rd loop + version_ = GET_PARAM(2); // 0: high precision forward transform + // 1: low precision version for rd loop bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; } @@ -147,21 +136,20 @@ TEST_P(Trans32x32Test, AccuracyCheck) { ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32)); #if CONFIG_VP9_HIGHBITDEPTH } else { - ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, - CONVERT_TO_BYTEPTR(dst16), 32)); + ASM_REGISTER_STATE_CHECK( + inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32)); #endif } for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = + const int32_t diff = bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; #else - const uint32_t diff = dst[j] - src[j]; + const int32_t diff = dst[j] - src[j]; #endif const uint32_t error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; } } @@ -187,8 +175,9 @@ TEST_P(Trans32x32Test, CoeffCheck) { DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]); for (int i = 0; i < count_test_block; ++i) { - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); + } const int stride = 32; vpx_fdct32x32_c(input_block, output_ref_block, stride); @@ -220,11 +209,9 @@ TEST_P(Trans32x32Test, MemCheck) { input_extreme_block[j] = rnd.Rand8() & 1 ? mask_ : -mask_; } if (i == 0) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_; } else if (i == 1) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = -mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_; } const int stride = 32; @@ -281,8 +268,9 @@ TEST_P(Trans32x32Test, InverseAccuracy) { } reference_32x32_dct_2d(in, out_r); - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { coeff[j] = static_cast(round(out_r[j])); + } if (bit_depth_ == VPX_BITS_8) { ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32)); #if CONFIG_VP9_HIGHBITDEPTH @@ -298,59 +286,122 @@ TEST_P(Trans32x32Test, InverseAccuracy) { const int diff = dst[j] - src[j]; #endif const int error = diff * diff; - EXPECT_GE(1, error) - << "Error: 32x32 IDCT has error " << error - << " at index " << j; + EXPECT_GE(1, error) << "Error: 32x32 IDCT has error " << error + << " at index " << j; } } } +class PartialTrans32x32Test + : public ::testing::TestWithParam< + std::tr1::tuple > { + public: + virtual ~PartialTrans32x32Test() {} + virtual void SetUp() { + fwd_txfm_ = GET_PARAM(0); + bit_depth_ = GET_PARAM(1); + } + + virtual void TearDown() { libvpx_test::ClearSystemState(); } + + protected: + vpx_bit_depth_t bit_depth_; + FwdTxfmFunc fwd_txfm_; +}; + +TEST_P(PartialTrans32x32Test, Extremes) { +#if CONFIG_VP9_HIGHBITDEPTH + const int16_t maxval = + static_cast(clip_pixel_highbd(1 << 30, bit_depth_)); +#else + const int16_t maxval = 255; +#endif + const int minval = -maxval; + DECLARE_ALIGNED(16, int16_t, input[kNumCoeffs]); + DECLARE_ALIGNED(16, tran_low_t, output[kNumCoeffs]); + + for (int i = 0; i < kNumCoeffs; ++i) input[i] = maxval; + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 32)); + EXPECT_EQ((maxval * kNumCoeffs) >> 3, output[0]); + + for (int i = 0; i < kNumCoeffs; ++i) input[i] = minval; + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 32)); + EXPECT_EQ((minval * kNumCoeffs) >> 3, output[0]); +} + +TEST_P(PartialTrans32x32Test, Random) { +#if CONFIG_VP9_HIGHBITDEPTH + const int16_t maxval = + static_cast(clip_pixel_highbd(1 << 30, bit_depth_)); +#else + const int16_t maxval = 255; +#endif + DECLARE_ALIGNED(16, int16_t, input[kNumCoeffs]); + DECLARE_ALIGNED(16, tran_low_t, output[kNumCoeffs]); + ACMRandom rnd(ACMRandom::DeterministicSeed()); + + int sum = 0; + for (int i = 0; i < kNumCoeffs; ++i) { + const int val = (i & 1) ? -rnd(maxval + 1) : rnd(maxval + 1); + input[i] = val; + sum += val; + } + output[0] = 0; + ASM_REGISTER_STATE_CHECK(fwd_txfm_(input, output, 32)); + EXPECT_EQ(sum >> 3, output[0]); +} + using std::tr1::make_tuple; #if CONFIG_VP9_HIGHBITDEPTH INSTANTIATE_TEST_CASE_P( C, Trans32x32Test, ::testing::Values( - make_tuple(&vpx_highbd_fdct32x32_c, - &idct32x32_10, 0, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct32x32_rd_c, - &idct32x32_10, 1, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct32x32_c, - &idct32x32_12, 0, VPX_BITS_12), - make_tuple(&vpx_highbd_fdct32x32_rd_c, - &idct32x32_12, 1, VPX_BITS_12), - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_c, - &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8))); + make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_10, 0, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_10, 1, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct32x32_c, &idct32x32_12, 0, VPX_BITS_12), + make_tuple(&vpx_highbd_fdct32x32_rd_c, &idct32x32_12, 1, VPX_BITS_12), + make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c, 1, + VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P( + C, PartialTrans32x32Test, + ::testing::Values(make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_8), + make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct32x32_1_c, VPX_BITS_12))); #else INSTANTIATE_TEST_CASE_P( C, Trans32x32Test, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_c, - &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, 0, + VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_c, &vpx_idct32x32_1024_add_c, + 1, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, PartialTrans32x32Test, + ::testing::Values(make_tuple(&vpx_fdct32x32_1_c, + VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH -#if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE +#if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( NEON, Trans32x32Test, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_neon, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_c, - &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8))); -#endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_neon, + 0, VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_c, + &vpx_idct32x32_1024_add_neon, 1, VPX_BITS_8))); +#endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, Trans32x32Test, - ::testing::Values( - make_tuple(&vpx_fdct32x32_sse2, - &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_sse2, - &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct32x32_sse2, + &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_sse2, + &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans32x32Test, + ::testing::Values(make_tuple(&vpx_fdct32x32_1_sse2, + VPX_BITS_8))); #endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE @@ -367,25 +418,29 @@ INSTANTIATE_TEST_CASE_P( VPX_BITS_8), make_tuple(&vpx_fdct32x32_rd_sse2, &vpx_idct32x32_1024_add_c, 1, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(SSE2, PartialTrans32x32Test, + ::testing::Values(make_tuple(&vpx_fdct32x32_1_sse2, + VPX_BITS_8))); #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( AVX2, Trans32x32Test, - ::testing::Values( - make_tuple(&vpx_fdct32x32_avx2, - &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_avx2, - &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct32x32_avx2, + &vpx_idct32x32_1024_add_sse2, 0, VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_avx2, + &vpx_idct32x32_1024_add_sse2, 1, VPX_BITS_8))); #endif // HAVE_AVX2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( MSA, Trans32x32Test, - ::testing::Values( - make_tuple(&vpx_fdct32x32_msa, - &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8), - make_tuple(&vpx_fdct32x32_rd_msa, - &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8))); + ::testing::Values(make_tuple(&vpx_fdct32x32_msa, + &vpx_idct32x32_1024_add_msa, 0, VPX_BITS_8), + make_tuple(&vpx_fdct32x32_rd_msa, + &vpx_idct32x32_1024_add_msa, 1, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(MSA, PartialTrans32x32Test, + ::testing::Values(make_tuple(&vpx_fdct32x32_1_msa, + VPX_BITS_8))); #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE } // namespace diff --git a/libs/libvpx/test/decode_api_test.cc b/libs/libvpx/test/decode_api_test.cc index 318351b73d..593637780e 100644 --- a/libs/libvpx/test/decode_api_test.cc +++ b/libs/libvpx/test/decode_api_test.cc @@ -26,12 +26,9 @@ TEST(DecodeAPI, InvalidParams) { #endif #if CONFIG_VP9_DECODER &vpx_codec_vp9_dx_algo, -#endif -#if CONFIG_VP10_DECODER - &vpx_codec_vp10_dx_algo, #endif }; - uint8_t buf[1] = {0}; + uint8_t buf[1] = { 0 }; vpx_codec_ctx_t dec; EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_dec_init(NULL, NULL, NULL, 0)); @@ -54,8 +51,7 @@ TEST(DecodeAPI, InvalidParams) { vpx_codec_decode(&dec, buf, NELEMENTS(buf), NULL, 0)); EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(&dec, NULL, NELEMENTS(buf), NULL, 0)); - EXPECT_EQ(VPX_CODEC_INVALID_PARAM, - vpx_codec_decode(&dec, buf, 0, NULL, 0)); + EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_decode(&dec, buf, 0, NULL, 0)); EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec)); } @@ -80,12 +76,9 @@ TEST(DecodeAPI, OptionalParams) { // Test VP9 codec controls after a decode error to ensure the code doesn't // misbehave. void TestVp9Controls(vpx_codec_ctx_t *dec) { - static const int kControls[] = { - VP8D_GET_LAST_REF_UPDATES, - VP8D_GET_FRAME_CORRUPTED, - VP9D_GET_DISPLAY_SIZE, - VP9D_GET_FRAME_SIZE - }; + static const int kControls[] = { VP8D_GET_LAST_REF_UPDATES, + VP8D_GET_FRAME_CORRUPTED, + VP9D_GET_DISPLAY_SIZE, VP9D_GET_FRAME_SIZE }; int val[2]; for (int i = 0; i < NELEMENTS(kControls); ++i) { @@ -94,9 +87,7 @@ void TestVp9Controls(vpx_codec_ctx_t *dec) { case VP8D_GET_FRAME_CORRUPTED: EXPECT_EQ(VPX_CODEC_ERROR, res) << kControls[i]; break; - default: - EXPECT_EQ(VPX_CODEC_OK, res) << kControls[i]; - break; + default: EXPECT_EQ(VPX_CODEC_OK, res) << kControls[i]; break; } EXPECT_EQ(VPX_CODEC_INVALID_PARAM, vpx_codec_control_(dec, kControls[i], NULL)); @@ -146,6 +137,39 @@ TEST(DecodeAPI, Vp9InvalidDecode) { TestVp9Controls(&dec); EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec)); } + +TEST(DecodeAPI, Vp9PeekSI) { + const vpx_codec_iface_t *const codec = &vpx_codec_vp9_dx_algo; + // The first 9 bytes are valid and the rest of the bytes are made up. Until + // size 10, this should return VPX_CODEC_UNSUP_BITSTREAM and after that it + // should return VPX_CODEC_CORRUPT_FRAME. + const uint8_t data[32] = { + 0x85, 0xa4, 0xc1, 0xa1, 0x38, 0x81, 0xa3, 0x49, 0x83, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }; + + for (uint32_t data_sz = 1; data_sz <= 32; ++data_sz) { + // Verify behavior of vpx_codec_decode. vpx_codec_decode doesn't even get + // to decoder_peek_si_internal on frames of size < 8. + if (data_sz >= 8) { + vpx_codec_ctx_t dec; + EXPECT_EQ(VPX_CODEC_OK, vpx_codec_dec_init(&dec, codec, NULL, 0)); + EXPECT_EQ( + (data_sz < 10) ? VPX_CODEC_UNSUP_BITSTREAM : VPX_CODEC_CORRUPT_FRAME, + vpx_codec_decode(&dec, data, data_sz, NULL, 0)); + vpx_codec_iter_t iter = NULL; + EXPECT_EQ(NULL, vpx_codec_get_frame(&dec, &iter)); + EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&dec)); + } + + // Verify behavior of vpx_codec_peek_stream_info. + vpx_codec_stream_info_t si; + si.sz = sizeof(si); + EXPECT_EQ((data_sz < 10) ? VPX_CODEC_UNSUP_BITSTREAM : VPX_CODEC_OK, + vpx_codec_peek_stream_info(codec, data, data_sz, &si)); + } +} #endif // CONFIG_VP9_DECODER } // namespace diff --git a/libs/libvpx/test/decode_perf_test.cc b/libs/libvpx/test/decode_perf_test.cc index c24d517013..ee26c3c046 100644 --- a/libs/libvpx/test/decode_perf_test.cc +++ b/libs/libvpx/test/decode_perf_test.cc @@ -28,7 +28,6 @@ namespace { #define VIDEO_NAME 0 #define THREADS 1 -const int kMaxPsnr = 100; const double kUsecsInSec = 1000000.0; const char kNewEncodeOutputFile[] = "new_encode.ivf"; @@ -70,8 +69,7 @@ const DecodePerfParam kVP9DecodePerfVectors[] = { power/temp/min max frame decode times/etc */ -class DecodePerfTest : public ::testing::TestWithParam { -}; +class DecodePerfTest : public ::testing::TestWithParam {}; TEST_P(DecodePerfTest, PerfTest) { const char *const video_name = GET_PARAM(VIDEO_NAME); @@ -92,8 +90,7 @@ TEST_P(DecodePerfTest, PerfTest) { } vpx_usec_timer_mark(&t); - const double elapsed_secs = double(vpx_usec_timer_elapsed(&t)) - / kUsecsInSec; + const double elapsed_secs = double(vpx_usec_timer_elapsed(&t)) / kUsecsInSec; const unsigned frames = video.frame_number(); const double fps = double(frames) / elapsed_secs; @@ -111,17 +108,13 @@ TEST_P(DecodePerfTest, PerfTest) { INSTANTIATE_TEST_CASE_P(VP9, DecodePerfTest, ::testing::ValuesIn(kVP9DecodePerfVectors)); -class VP9NewEncodeDecodePerfTest : - public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class VP9NewEncodeDecodePerfTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: VP9NewEncodeDecodePerfTest() - : EncoderTest(GET_PARAM(0)), - encoding_mode_(GET_PARAM(1)), - speed_(0), - outfile_(0), - out_frames_(0) { - } + : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), speed_(0), + outfile_(0), out_frames_(0) {} virtual ~VP9NewEncodeDecodePerfTest() {} @@ -160,8 +153,9 @@ class VP9NewEncodeDecodePerfTest : virtual void EndPassHook() { if (outfile_ != NULL) { - if (!fseek(outfile_, 0, SEEK_SET)) + if (!fseek(outfile_, 0, SEEK_SET)) { ivf_write_file_header(outfile_, &cfg_, VP9_FOURCC, out_frames_); + } fclose(outfile_); outfile_ = NULL; } @@ -171,8 +165,9 @@ class VP9NewEncodeDecodePerfTest : ++out_frames_; // Write initial file header if first frame. - if (pkt->data.frame.pts == 0) + if (pkt->data.frame.pts == 0) { ivf_write_file_header(outfile_, &cfg_, VP9_FOURCC, out_frames_); + } // Write frame header and data. ivf_write_frame_header(outfile_, out_frames_, pkt->data.frame.sz); @@ -180,11 +175,9 @@ class VP9NewEncodeDecodePerfTest : pkt->data.frame.sz); } - virtual bool DoDecode() { return false; } + virtual bool DoDecode() const { return false; } - void set_speed(unsigned int speed) { - speed_ = speed; - } + void set_speed(unsigned int speed) { speed_ = speed; } private: libvpx_test::TestMode encoding_mode_; @@ -196,10 +189,7 @@ class VP9NewEncodeDecodePerfTest : struct EncodePerfTestVideo { EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_, uint32_t bitrate_, int frames_) - : name(name_), - width(width_), - height(height_), - bitrate(bitrate_), + : name(name_), width(width_), height(height_), bitrate(bitrate_), frames(frames_) {} const char *name; uint32_t width; @@ -225,10 +215,8 @@ TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) { const char *video_name = kVP9EncodePerfTestVectors[i].name; libvpx_test::I420VideoSource video( - video_name, - kVP9EncodePerfTestVectors[i].width, - kVP9EncodePerfTestVectors[i].height, - timebase.den, timebase.num, 0, + video_name, kVP9EncodePerfTestVectors[i].width, + kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0, kVP9EncodePerfTestVectors[i].frames); set_speed(2); @@ -268,6 +256,6 @@ TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) { printf("}\n"); } -VP9_INSTANTIATE_TEST_CASE( - VP9NewEncodeDecodePerfTest, ::testing::Values(::libvpx_test::kTwoPassGood)); +VP9_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest, + ::testing::Values(::libvpx_test::kTwoPassGood)); } // namespace diff --git a/libs/libvpx/test/decode_test_driver.cc b/libs/libvpx/test/decode_test_driver.cc index ad861c3157..b738e0db1c 100644 --- a/libs/libvpx/test/decode_test_driver.cc +++ b/libs/libvpx/test/decode_test_driver.cc @@ -21,9 +21,8 @@ const char kVP8Name[] = "WebM Project VP8"; vpx_codec_err_t Decoder::PeekStream(const uint8_t *cxdata, size_t size, vpx_codec_stream_info_t *stream_info) { - return vpx_codec_peek_stream_info(CodecInterface(), - cxdata, static_cast(size), - stream_info); + return vpx_codec_peek_stream_info( + CodecInterface(), cxdata, static_cast(size), stream_info); } vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size) { @@ -35,9 +34,8 @@ vpx_codec_err_t Decoder::DecodeFrame(const uint8_t *cxdata, size_t size, vpx_codec_err_t res_dec; InitOnce(); API_REGISTER_STATE_CHECK( - res_dec = vpx_codec_decode(&decoder_, - cxdata, static_cast(size), - user_priv, 0)); + res_dec = vpx_codec_decode( + &decoder_, cxdata, static_cast(size), user_priv, 0)); return res_dec; } @@ -67,7 +65,7 @@ void DecoderTest::HandlePeekResult(Decoder *const decoder, void DecoderTest::RunLoop(CompressedVideoSource *video, const vpx_codec_dec_cfg_t &dec_cfg) { - Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0); + Decoder *const decoder = codec_->CreateDecoder(dec_cfg, flags_); ASSERT_TRUE(decoder != NULL); bool end_of_file = false; @@ -80,16 +78,14 @@ void DecoderTest::RunLoop(CompressedVideoSource *video, stream_info.sz = sizeof(stream_info); if (video->cxdata() != NULL) { - const vpx_codec_err_t res_peek = decoder->PeekStream(video->cxdata(), - video->frame_size(), - &stream_info); + const vpx_codec_err_t res_peek = decoder->PeekStream( + video->cxdata(), video->frame_size(), &stream_info); HandlePeekResult(decoder, video, res_peek); ASSERT_FALSE(::testing::Test::HasFailure()); - vpx_codec_err_t res_dec = decoder->DecodeFrame(video->cxdata(), - video->frame_size()); - if (!HandleDecodeResult(res_dec, *video, decoder)) - break; + vpx_codec_err_t res_dec = + decoder->DecodeFrame(video->cxdata(), video->frame_size()); + if (!HandleDecodeResult(res_dec, *video, decoder)) break; } else { // Signal end of the file to the decoder. const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0); @@ -101,8 +97,9 @@ void DecoderTest::RunLoop(CompressedVideoSource *video, const vpx_image_t *img = NULL; // Get decompressed data - while ((img = dec_iter.Next())) + while ((img = dec_iter.Next())) { DecompressedFrameHook(*img, video->frame_number()); + } } delete decoder; } @@ -116,8 +113,6 @@ void DecoderTest::set_cfg(const vpx_codec_dec_cfg_t &dec_cfg) { memcpy(&cfg_, &dec_cfg, sizeof(cfg_)); } -void DecoderTest::set_flags(const vpx_codec_flags_t flags) { - flags_ = flags; -} +void DecoderTest::set_flags(const vpx_codec_flags_t flags) { flags_ = flags; } } // namespace libvpx_test diff --git a/libs/libvpx/test/decode_test_driver.h b/libs/libvpx/test/decode_test_driver.h index f566c53c7d..644fc9e90d 100644 --- a/libs/libvpx/test/decode_test_driver.h +++ b/libs/libvpx/test/decode_test_driver.h @@ -26,13 +26,11 @@ class DxDataIterator { explicit DxDataIterator(vpx_codec_ctx_t *decoder) : decoder_(decoder), iter_(NULL) {} - const vpx_image_t *Next() { - return vpx_codec_get_frame(decoder_, &iter_); - } + const vpx_image_t *Next() { return vpx_codec_get_frame(decoder_, &iter_); } private: - vpx_codec_ctx_t *decoder_; - vpx_codec_iter_t iter_; + vpx_codec_ctx_t *decoder_; + vpx_codec_iter_t iter_; }; // Provides a simplified interface to manage one video decoding. @@ -40,20 +38,17 @@ class DxDataIterator { // as more tests are added. class Decoder { public: - Decoder(vpx_codec_dec_cfg_t cfg, unsigned long deadline) - : cfg_(cfg), flags_(0), deadline_(deadline), init_done_(false) { + explicit Decoder(vpx_codec_dec_cfg_t cfg) + : cfg_(cfg), flags_(0), init_done_(false) { memset(&decoder_, 0, sizeof(decoder_)); } - Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag, - unsigned long deadline) // NOLINT - : cfg_(cfg), flags_(flag), deadline_(deadline), init_done_(false) { + Decoder(vpx_codec_dec_cfg_t cfg, const vpx_codec_flags_t flag) + : cfg_(cfg), flags_(flag), init_done_(false) { memset(&decoder_, 0, sizeof(decoder_)); } - virtual ~Decoder() { - vpx_codec_destroy(&decoder_); - } + virtual ~Decoder() { vpx_codec_destroy(&decoder_); } vpx_codec_err_t PeekStream(const uint8_t *cxdata, size_t size, vpx_codec_stream_info_t *stream_info); @@ -63,17 +58,9 @@ class Decoder { vpx_codec_err_t DecodeFrame(const uint8_t *cxdata, size_t size, void *user_priv); - DxDataIterator GetDxData() { - return DxDataIterator(&decoder_); - } + DxDataIterator GetDxData() { return DxDataIterator(&decoder_); } - void set_deadline(unsigned long deadline) { - deadline_ = deadline; - } - - void Control(int ctrl_id, int arg) { - Control(ctrl_id, arg, VPX_CODEC_OK); - } + void Control(int ctrl_id, int arg) { Control(ctrl_id, arg, VPX_CODEC_OK); } void Control(int ctrl_id, const void *arg) { InitOnce(); @@ -87,7 +74,7 @@ class Decoder { ASSERT_EQ(expected_value, res) << DecodeError(); } - const char* DecodeError() { + const char *DecodeError() { const char *detail = vpx_codec_error_detail(&decoder_); return detail ? detail : vpx_codec_error(&decoder_); } @@ -97,38 +84,34 @@ class Decoder { vpx_get_frame_buffer_cb_fn_t cb_get, vpx_release_frame_buffer_cb_fn_t cb_release, void *user_priv) { InitOnce(); - return vpx_codec_set_frame_buffer_functions( - &decoder_, cb_get, cb_release, user_priv); + return vpx_codec_set_frame_buffer_functions(&decoder_, cb_get, cb_release, + user_priv); } - const char* GetDecoderName() const { + const char *GetDecoderName() const { return vpx_codec_iface_name(CodecInterface()); } bool IsVP8() const; - vpx_codec_ctx_t * GetDecoder() { - return &decoder_; - } + vpx_codec_ctx_t *GetDecoder() { return &decoder_; } protected: - virtual vpx_codec_iface_t* CodecInterface() const = 0; + virtual vpx_codec_iface_t *CodecInterface() const = 0; void InitOnce() { if (!init_done_) { - const vpx_codec_err_t res = vpx_codec_dec_init(&decoder_, - CodecInterface(), - &cfg_, flags_); + const vpx_codec_err_t res = + vpx_codec_dec_init(&decoder_, CodecInterface(), &cfg_, flags_); ASSERT_EQ(VPX_CODEC_OK, res) << DecodeError(); init_done_ = true; } } - vpx_codec_ctx_t decoder_; + vpx_codec_ctx_t decoder_; vpx_codec_dec_cfg_t cfg_; - vpx_codec_flags_t flags_; - unsigned int deadline_; - bool init_done_; + vpx_codec_flags_t flags_; + bool init_done_; }; // Common test functionality for all Decoder tests. @@ -143,37 +126,35 @@ class DecoderTest { virtual void set_flags(const vpx_codec_flags_t flags); // Hook to be called before decompressing every frame. - virtual void PreDecodeFrameHook(const CompressedVideoSource& /*video*/, - Decoder* /*decoder*/) {} + virtual void PreDecodeFrameHook(const CompressedVideoSource & /*video*/, + Decoder * /*decoder*/) {} // Hook to be called to handle decode result. Return true to continue. virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec, - const CompressedVideoSource& /*video*/, + const CompressedVideoSource & /*video*/, Decoder *decoder) { EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError(); return VPX_CODEC_OK == res_dec; } // Hook to be called on every decompressed frame. - virtual void DecompressedFrameHook(const vpx_image_t& /*img*/, + virtual void DecompressedFrameHook(const vpx_image_t & /*img*/, const unsigned int /*frame_number*/) {} // Hook to be called on peek result - virtual void HandlePeekResult(Decoder* const decoder, + virtual void HandlePeekResult(Decoder *const decoder, CompressedVideoSource *video, const vpx_codec_err_t res_peek); protected: explicit DecoderTest(const CodecFactory *codec) - : codec_(codec), - cfg_(), - flags_(0) {} + : codec_(codec), cfg_(), flags_(0) {} virtual ~DecoderTest() {} const CodecFactory *codec_; vpx_codec_dec_cfg_t cfg_; - vpx_codec_flags_t flags_; + vpx_codec_flags_t flags_; }; } // namespace libvpx_test diff --git a/libs/libvpx/test/encode_api_test.cc b/libs/libvpx/test/encode_api_test.cc index a7200e653a..419e38506c 100644 --- a/libs/libvpx/test/encode_api_test.cc +++ b/libs/libvpx/test/encode_api_test.cc @@ -25,12 +25,9 @@ TEST(EncodeAPI, InvalidParams) { #endif #if CONFIG_VP9_ENCODER &vpx_codec_vp9_cx_algo, -#endif -#if CONFIG_VP10_ENCODER - &vpx_codec_vp10_cx_algo, #endif }; - uint8_t buf[1] = {0}; + uint8_t buf[1] = { 0 }; vpx_image_t img; vpx_codec_ctx_t enc; vpx_codec_enc_cfg_t cfg; diff --git a/libs/libvpx/test/encode_perf_test.cc b/libs/libvpx/test/encode_perf_test.cc index 7e9f0d6c44..0bb435502b 100644 --- a/libs/libvpx/test/encode_perf_test.cc +++ b/libs/libvpx/test/encode_perf_test.cc @@ -26,10 +26,7 @@ const double kUsecsInSec = 1000000.0; struct EncodePerfTestVideo { EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_, uint32_t bitrate_, int frames_) - : name(name_), - width(width_), - height(height_), - bitrate(bitrate_), + : name(name_), width(width_), height(height_), bitrate(bitrate_), frames(frames_) {} const char *name; uint32_t width; @@ -45,8 +42,8 @@ const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = { EncodePerfTestVideo("macmarcostationary_640_480_30.yuv", 640, 480, 200, 718), EncodePerfTestVideo("niklas_640_480_30.yuv", 640, 480, 200, 471), EncodePerfTestVideo("tacomanarrows_640_480_30.yuv", 640, 480, 200, 300), - EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv", - 640, 480, 200, 300), + EncodePerfTestVideo("tacomasmallcameramovement_640_480_30.yuv", 640, 480, 200, + 300), EncodePerfTestVideo("thaloundeskmtg_640_480_30.yuv", 640, 480, 200, 300), EncodePerfTestVideo("niklas_1280_720_30.yuv", 1280, 720, 600, 470), }; @@ -61,12 +58,8 @@ class VP9EncodePerfTest public ::libvpx_test::CodecTestWithParam { protected: VP9EncodePerfTest() - : EncoderTest(GET_PARAM(0)), - min_psnr_(kMaxPsnr), - nframes_(0), - encoding_mode_(GET_PARAM(1)), - speed_(0), - threads_(1) {} + : EncoderTest(GET_PARAM(0)), min_psnr_(kMaxPsnr), nframes_(0), + encoding_mode_(GET_PARAM(1)), speed_(0), threads_(1) {} virtual ~VP9EncodePerfTest() {} @@ -107,24 +100,18 @@ class VP9EncodePerfTest virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) { if (pkt->data.psnr.psnr[0] < min_psnr_) { - min_psnr_= pkt->data.psnr.psnr[0]; + min_psnr_ = pkt->data.psnr.psnr[0]; } } // for performance reasons don't decode - virtual bool DoDecode() { return 0; } + virtual bool DoDecode() const { return false; } - double min_psnr() const { - return min_psnr_; - } + double min_psnr() const { return min_psnr_; } - void set_speed(unsigned int speed) { - speed_ = speed; - } + void set_speed(unsigned int speed) { speed_ = speed; } - void set_threads(unsigned int threads) { - threads_ = threads; - } + void set_threads(unsigned int threads) { threads_ = threads; } private: double min_psnr_; @@ -139,11 +126,12 @@ TEST_P(VP9EncodePerfTest, PerfTest) { for (size_t j = 0; j < NELEMENTS(kEncodePerfTestSpeeds); ++j) { for (size_t k = 0; k < NELEMENTS(kEncodePerfTestThreads); ++k) { if (kVP9EncodePerfTestVectors[i].width < 512 && - kEncodePerfTestThreads[k] > 1) + kEncodePerfTestThreads[k] > 1) { continue; - else if (kVP9EncodePerfTestVectors[i].width < 1024 && - kEncodePerfTestThreads[k] > 2) + } else if (kVP9EncodePerfTestVectors[i].width < 1024 && + kEncodePerfTestThreads[k] > 2) { continue; + } set_threads(kEncodePerfTestThreads[k]); SetUp(); @@ -157,10 +145,8 @@ TEST_P(VP9EncodePerfTest, PerfTest) { const unsigned frames = kVP9EncodePerfTestVectors[i].frames; const char *video_name = kVP9EncodePerfTestVectors[i].name; libvpx_test::I420VideoSource video( - video_name, - kVP9EncodePerfTestVectors[i].width, - kVP9EncodePerfTestVectors[i].height, - timebase.den, timebase.num, 0, + video_name, kVP9EncodePerfTestVectors[i].width, + kVP9EncodePerfTestVectors[i].height, timebase.den, timebase.num, 0, kVP9EncodePerfTestVectors[i].frames); set_speed(kEncodePerfTestSpeeds[j]); @@ -197,6 +183,6 @@ TEST_P(VP9EncodePerfTest, PerfTest) { } } -VP9_INSTANTIATE_TEST_CASE( - VP9EncodePerfTest, ::testing::Values(::libvpx_test::kRealTime)); +VP9_INSTANTIATE_TEST_CASE(VP9EncodePerfTest, + ::testing::Values(::libvpx_test::kRealTime)); } // namespace diff --git a/libs/libvpx/test/encode_test_driver.cc b/libs/libvpx/test/encode_test_driver.cc index 128436ee91..632c98f05a 100644 --- a/libs/libvpx/test/encode_test_driver.cc +++ b/libs/libvpx/test/encode_test_driver.cc @@ -30,8 +30,7 @@ void Encoder::InitEncoder(VideoSource *video) { cfg_.g_timebase = video->timebase(); cfg_.rc_twopass_stats_in = stats_->buf(); - res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, - init_flags_); + res = vpx_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); #if CONFIG_VP9_ENCODER @@ -42,15 +41,6 @@ void Encoder::InitEncoder(VideoSource *video) { log2_tile_columns); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } else -#endif -#if CONFIG_VP10_ENCODER - if (CodecInterface() == &vpx_codec_vp10_cx_algo) { - // Default to 1 tile column for VP10. - const int log2_tile_columns = 0; - res = vpx_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS, - log2_tile_columns); - ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); - } else #endif { #if CONFIG_VP8_ENCODER @@ -62,17 +52,17 @@ void Encoder::InitEncoder(VideoSource *video) { } void Encoder::EncodeFrame(VideoSource *video, const unsigned long frame_flags) { - if (video->img()) + if (video->img()) { EncodeFrameInternal(*video, frame_flags); - else + } else { Flush(); + } // Handle twopass stats CxDataIterator iter = GetCxData(); while (const vpx_codec_cx_pkt_t *pkt = iter.Next()) { - if (pkt->kind != VPX_CODEC_STATS_PKT) - continue; + if (pkt->kind != VPX_CODEC_STATS_PKT) continue; stats_->Append(*pkt); } @@ -92,15 +82,15 @@ void Encoder::EncodeFrameInternal(const VideoSource &video, } // Encode the frame - API_REGISTER_STATE_CHECK( - res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(), - frame_flags, deadline_)); + API_REGISTER_STATE_CHECK(res = vpx_codec_encode(&encoder_, img, video.pts(), + video.duration(), frame_flags, + deadline_)); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } void Encoder::Flush() { - const vpx_codec_err_t res = vpx_codec_encode(&encoder_, NULL, 0, 0, 0, - deadline_); + const vpx_codec_err_t res = + vpx_codec_encode(&encoder_, NULL, 0, 0, 0, deadline_); if (!encoder_.priv) ASSERT_EQ(VPX_CODEC_ERROR, res) << EncoderError(); else @@ -115,60 +105,57 @@ void EncoderTest::InitializeConfig() { void EncoderTest::SetMode(TestMode mode) { switch (mode) { - case kRealTime: - deadline_ = VPX_DL_REALTIME; - break; + case kRealTime: deadline_ = VPX_DL_REALTIME; break; case kOnePassGood: - case kTwoPassGood: - deadline_ = VPX_DL_GOOD_QUALITY; - break; + case kTwoPassGood: deadline_ = VPX_DL_GOOD_QUALITY; break; case kOnePassBest: - case kTwoPassBest: - deadline_ = VPX_DL_BEST_QUALITY; - break; + case kTwoPassBest: deadline_ = VPX_DL_BEST_QUALITY; break; - default: - ASSERT_TRUE(false) << "Unexpected mode " << mode; + default: ASSERT_TRUE(false) << "Unexpected mode " << mode; } - if (mode == kTwoPassGood || mode == kTwoPassBest) + if (mode == kTwoPassGood || mode == kTwoPassBest) { passes_ = 2; - else + } else { passes_ = 1; + } } // The function should return "true" most of the time, therefore no early // break-out is implemented within the match checking process. -static bool compare_img(const vpx_image_t *img1, - const vpx_image_t *img2) { - bool match = (img1->fmt == img2->fmt) && - (img1->cs == img2->cs) && - (img1->d_w == img2->d_w) && - (img1->d_h == img2->d_h); +static bool compare_img(const vpx_image_t *img1, const vpx_image_t *img2) { + bool match = (img1->fmt == img2->fmt) && (img1->cs == img2->cs) && + (img1->d_w == img2->d_w) && (img1->d_h == img2->d_h); - const unsigned int width_y = img1->d_w; + const unsigned int width_y = img1->d_w; const unsigned int height_y = img1->d_h; unsigned int i; - for (i = 0; i < height_y; ++i) + for (i = 0; i < height_y; ++i) { match = (memcmp(img1->planes[VPX_PLANE_Y] + i * img1->stride[VPX_PLANE_Y], img2->planes[VPX_PLANE_Y] + i * img2->stride[VPX_PLANE_Y], - width_y) == 0) && match; - const unsigned int width_uv = (img1->d_w + 1) >> 1; + width_y) == 0) && + match; + } + const unsigned int width_uv = (img1->d_w + 1) >> 1; const unsigned int height_uv = (img1->d_h + 1) >> 1; - for (i = 0; i < height_uv; ++i) + for (i = 0; i < height_uv; ++i) { match = (memcmp(img1->planes[VPX_PLANE_U] + i * img1->stride[VPX_PLANE_U], img2->planes[VPX_PLANE_U] + i * img2->stride[VPX_PLANE_U], - width_uv) == 0) && match; - for (i = 0; i < height_uv; ++i) + width_uv) == 0) && + match; + } + for (i = 0; i < height_uv; ++i) { match = (memcmp(img1->planes[VPX_PLANE_V] + i * img1->stride[VPX_PLANE_V], img2->planes[VPX_PLANE_V] + i * img2->stride[VPX_PLANE_V], - width_uv) == 0) && match; + width_uv) == 0) && + match; + } return match; } -void EncoderTest::MismatchHook(const vpx_image_t* /*img1*/, - const vpx_image_t* /*img2*/) { +void EncoderTest::MismatchHook(const vpx_image_t * /*img1*/, + const vpx_image_t * /*img2*/) { ASSERT_TRUE(0) << "Encode/Decode mismatch found"; } @@ -181,34 +168,37 @@ void EncoderTest::RunLoop(VideoSource *video) { for (unsigned int pass = 0; pass < passes_; pass++) { last_pts_ = 0; - if (passes_ == 1) + if (passes_ == 1) { cfg_.g_pass = VPX_RC_ONE_PASS; - else if (pass == 0) + } else if (pass == 0) { cfg_.g_pass = VPX_RC_FIRST_PASS; - else + } else { cfg_.g_pass = VPX_RC_LAST_PASS; + } BeginPassHook(pass); - Encoder* const encoder = codec_->CreateEncoder(cfg_, deadline_, init_flags_, - &stats_); - ASSERT_TRUE(encoder != NULL); + testing::internal::scoped_ptr encoder( + codec_->CreateEncoder(cfg_, deadline_, init_flags_, &stats_)); + ASSERT_TRUE(encoder.get() != NULL); - video->Begin(); + ASSERT_NO_FATAL_FAILURE(video->Begin()); encoder->InitEncoder(video); ASSERT_FALSE(::testing::Test::HasFatalFailure()); unsigned long dec_init_flags = 0; // NOLINT // Use fragment decoder if encoder outputs partitions. // NOTE: fragment decoder and partition encoder are only supported by VP8. - if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) + if (init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) { dec_init_flags |= VPX_CODEC_USE_INPUT_FRAGMENTS; - Decoder* const decoder = codec_->CreateDecoder(dec_cfg, dec_init_flags, 0); + } + testing::internal::scoped_ptr decoder( + codec_->CreateDecoder(dec_cfg, dec_init_flags)); bool again; for (again = true; again; video->Next()) { again = (video->img() != NULL); PreEncodeFrameHook(video); - PreEncodeFrameHook(video, encoder); + PreEncodeFrameHook(video, encoder.get()); encoder->EncodeFrame(video, frame_flags_); CxDataIterator iter = encoder->GetCxData(); @@ -221,12 +211,11 @@ void EncoderTest::RunLoop(VideoSource *video) { switch (pkt->kind) { case VPX_CODEC_CX_FRAME_PKT: has_cxdata = true; - if (decoder && DoDecode()) { + if (decoder.get() != NULL && DoDecode()) { vpx_codec_err_t res_dec = decoder->DecodeFrame( - (const uint8_t*)pkt->data.frame.buf, pkt->data.frame.sz); + (const uint8_t *)pkt->data.frame.buf, pkt->data.frame.sz); - if (!HandleDecodeResult(res_dec, *video, decoder)) - break; + if (!HandleDecodeResult(res_dec, *video, decoder.get())) break; has_dxdata = true; } @@ -235,20 +224,16 @@ void EncoderTest::RunLoop(VideoSource *video) { FramePktHook(pkt); break; - case VPX_CODEC_PSNR_PKT: - PSNRPktHook(pkt); - break; + case VPX_CODEC_PSNR_PKT: PSNRPktHook(pkt); break; - default: - break; + default: break; } } // Flush the decoder when there are no more fragments. if ((init_flags_ & VPX_CODEC_USE_OUTPUT_PARTITION) && has_dxdata) { const vpx_codec_err_t res_dec = decoder->DecodeFrame(NULL, 0); - if (!HandleDecodeResult(res_dec, *video, decoder)) - break; + if (!HandleDecodeResult(res_dec, *video, decoder.get())) break; } if (has_dxdata && has_cxdata) { @@ -261,21 +246,14 @@ void EncoderTest::RunLoop(VideoSource *video) { MismatchHook(img_enc, img_dec); } } - if (img_dec) - DecompressedFrameHook(*img_dec, video->pts()); + if (img_dec) DecompressedFrameHook(*img_dec, video->pts()); } - if (!Continue()) - break; + if (!Continue()) break; } EndPassHook(); - if (decoder) - delete decoder; - delete encoder; - - if (!Continue()) - break; + if (!Continue()) break; } } diff --git a/libs/libvpx/test/encode_test_driver.h b/libs/libvpx/test/encode_test_driver.h index 6d0a72f980..09b1a78344 100644 --- a/libs/libvpx/test/encode_test_driver.h +++ b/libs/libvpx/test/encode_test_driver.h @@ -16,7 +16,7 @@ #include "third_party/googletest/src/include/gtest/gtest.h" #include "./vpx_config.h" -#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER #include "vpx/vp8cx.h" #endif #include "vpx/vpx_encoder.h" @@ -33,19 +33,17 @@ enum TestMode { kTwoPassGood, kTwoPassBest }; -#define ALL_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \ - ::libvpx_test::kOnePassGood, \ - ::libvpx_test::kOnePassBest, \ - ::libvpx_test::kTwoPassGood, \ - ::libvpx_test::kTwoPassBest) +#define ALL_TEST_MODES \ + ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, \ + ::libvpx_test::kOnePassBest, ::libvpx_test::kTwoPassGood, \ + ::libvpx_test::kTwoPassBest) -#define ONE_PASS_TEST_MODES ::testing::Values(::libvpx_test::kRealTime, \ - ::libvpx_test::kOnePassGood, \ - ::libvpx_test::kOnePassBest) - -#define TWO_PASS_TEST_MODES ::testing::Values(::libvpx_test::kTwoPassGood, \ - ::libvpx_test::kTwoPassBest) +#define ONE_PASS_TEST_MODES \ + ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, \ + ::libvpx_test::kOnePassBest) +#define TWO_PASS_TEST_MODES \ + ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kTwoPassBest) // Provides an object to handle the libvpx get_cx_data() iteration pattern class CxDataIterator { @@ -58,8 +56,8 @@ class CxDataIterator { } private: - vpx_codec_ctx_t *encoder_; - vpx_codec_iter_t iter_; + vpx_codec_ctx_t *encoder_; + vpx_codec_iter_t iter_; }; // Implements an in-memory store for libvpx twopass statistics @@ -75,15 +73,12 @@ class TwopassStatsStore { return buf; } - void Reset() { - buffer_.clear(); - } + void Reset() { buffer_.clear(); } protected: - std::string buffer_; + std::string buffer_; }; - // Provides a simplified interface to manage one video encoding pass, given // a configuration and video source. // @@ -97,13 +92,9 @@ class Encoder { memset(&encoder_, 0, sizeof(encoder_)); } - virtual ~Encoder() { - vpx_codec_destroy(&encoder_); - } + virtual ~Encoder() { vpx_codec_destroy(&encoder_); } - CxDataIterator GetCxData() { - return CxDataIterator(&encoder_); - } + CxDataIterator GetCxData() { return CxDataIterator(&encoder_); } void InitEncoder(VideoSource *video); @@ -115,9 +106,7 @@ class Encoder { void EncodeFrame(VideoSource *video, const unsigned long frame_flags); // Convenience wrapper for EncodeFrame() - void EncodeFrame(VideoSource *video) { - EncodeFrame(video, 0); - } + void EncodeFrame(VideoSource *video) { EncodeFrame(video, 0); } void Control(int ctrl_id, int arg) { const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg); @@ -143,7 +132,7 @@ class Encoder { const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); } -#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER void Control(int ctrl_id, vpx_active_map_t *arg) { const vpx_codec_err_t res = vpx_codec_control_(&encoder_, ctrl_id, arg); ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError(); @@ -156,12 +145,10 @@ class Encoder { cfg_ = *cfg; } - void set_deadline(unsigned long deadline) { - deadline_ = deadline; - } + void set_deadline(unsigned long deadline) { deadline_ = deadline; } protected: - virtual vpx_codec_iface_t* CodecInterface() const = 0; + virtual vpx_codec_iface_t *CodecInterface() const = 0; const char *EncoderError() { const char *detail = vpx_codec_error_detail(&encoder_); @@ -175,11 +162,11 @@ class Encoder { // Flush the encoder on EOS void Flush(); - vpx_codec_ctx_t encoder_; - vpx_codec_enc_cfg_t cfg_; - unsigned long deadline_; - unsigned long init_flags_; - TwopassStatsStore *stats_; + vpx_codec_ctx_t encoder_; + vpx_codec_enc_cfg_t cfg_; + unsigned long deadline_; + unsigned long init_flags_; + TwopassStatsStore *stats_; }; // Common test functionality for all Encoder tests. @@ -221,36 +208,35 @@ class EncoderTest { virtual void EndPassHook() {} // Hook to be called before encoding a frame. - virtual void PreEncodeFrameHook(VideoSource* /*video*/) {} - virtual void PreEncodeFrameHook(VideoSource* /*video*/, - Encoder* /*encoder*/) {} + virtual void PreEncodeFrameHook(VideoSource * /*video*/) {} + virtual void PreEncodeFrameHook(VideoSource * /*video*/, + Encoder * /*encoder*/) {} // Hook to be called on every compressed data packet. - virtual void FramePktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {} + virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {} // Hook to be called on every PSNR packet. - virtual void PSNRPktHook(const vpx_codec_cx_pkt_t* /*pkt*/) {} + virtual void PSNRPktHook(const vpx_codec_cx_pkt_t * /*pkt*/) {} // Hook to determine whether the encode loop should continue. virtual bool Continue() const { return !(::testing::Test::HasFatalFailure() || abort_); } - const CodecFactory *codec_; + const CodecFactory *codec_; // Hook to determine whether to decode frame after encoding virtual bool DoDecode() const { return 1; } // Hook to handle encode/decode mismatch - virtual void MismatchHook(const vpx_image_t *img1, - const vpx_image_t *img2); + virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2); // Hook to be called on every decompressed frame. - virtual void DecompressedFrameHook(const vpx_image_t& /*img*/, + virtual void DecompressedFrameHook(const vpx_image_t & /*img*/, vpx_codec_pts_t /*pts*/) {} // Hook to be called to handle decode result. Return true to continue. virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec, - const VideoSource& /*video*/, + const VideoSource & /*video*/, Decoder *decoder) { EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError(); return VPX_CODEC_OK == res_dec; @@ -262,15 +248,15 @@ class EncoderTest { return pkt; } - bool abort_; - vpx_codec_enc_cfg_t cfg_; - vpx_codec_dec_cfg_t dec_cfg_; - unsigned int passes_; - unsigned long deadline_; - TwopassStatsStore stats_; - unsigned long init_flags_; - unsigned long frame_flags_; - vpx_codec_pts_t last_pts_; + bool abort_; + vpx_codec_enc_cfg_t cfg_; + vpx_codec_dec_cfg_t dec_cfg_; + unsigned int passes_; + unsigned long deadline_; + TwopassStatsStore stats_; + unsigned long init_flags_; + unsigned long frame_flags_; + vpx_codec_pts_t last_pts_; }; } // namespace libvpx_test diff --git a/libs/libvpx/test/error_resilience_test.cc b/libs/libvpx/test/error_resilience_test.cc index cd0dca235a..4c7ede8cae 100644 --- a/libs/libvpx/test/error_resilience_test.cc +++ b/libs/libvpx/test/error_resilience_test.cc @@ -19,16 +19,13 @@ namespace { const int kMaxErrorFrames = 12; const int kMaxDroppableFrames = 12; -class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith2Params { +class ErrorResilienceTestLarge + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { protected: ErrorResilienceTestLarge() - : EncoderTest(GET_PARAM(0)), - svc_support_(GET_PARAM(2)), - psnr_(0.0), - nframes_(0), - mismatch_psnr_(0.0), - mismatch_nframes_(0), + : EncoderTest(GET_PARAM(0)), svc_support_(GET_PARAM(2)), psnr_(0.0), + nframes_(0), mismatch_psnr_(0.0), mismatch_nframes_(0), encoding_mode_(GET_PARAM(1)) { Reset(); } @@ -66,81 +63,70 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest, // LAST is updated on base/layer 0, GOLDEN updated on layer 1. // Non-zero pattern_switch parameter means pattern will switch to // not using LAST for frame_num >= pattern_switch. - int SetFrameFlags(int frame_num, - int num_temp_layers, - int pattern_switch) { + int SetFrameFlags(int frame_num, int num_temp_layers, int pattern_switch) { int frame_flags = 0; if (num_temp_layers == 2) { - if (frame_num % 2 == 0) { - if (frame_num < pattern_switch || pattern_switch == 0) { - // Layer 0: predict from LAST and ARF, update LAST. - frame_flags = VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; - } else { - // Layer 0: predict from GF and ARF, update GF. - frame_flags = VP8_EFLAG_NO_REF_LAST | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; - } + if (frame_num % 2 == 0) { + if (frame_num < pattern_switch || pattern_switch == 0) { + // Layer 0: predict from LAST and ARF, update LAST. + frame_flags = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } else { - if (frame_num < pattern_switch || pattern_switch == 0) { - // Layer 1: predict from L, GF, and ARF, update GF. - frame_flags = VP8_EFLAG_NO_UPD_ARF | - VP8_EFLAG_NO_UPD_LAST; - } else { - // Layer 1: predict from GF and ARF, update GF. - frame_flags = VP8_EFLAG_NO_REF_LAST | - VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_ARF; - } + // Layer 0: predict from GF and ARF, update GF. + frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_ARF; } + } else { + if (frame_num < pattern_switch || pattern_switch == 0) { + // Layer 1: predict from L, GF, and ARF, update GF. + frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST; + } else { + // Layer 1: predict from GF and ARF, update GF. + frame_flags = VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_UPD_LAST | + VP8_EFLAG_NO_UPD_ARF; + } + } } return frame_flags; } virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video, ::libvpx_test::Encoder * /*encoder*/) { - frame_flags_ &= ~(VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF); + frame_flags_ &= + ~(VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF); // For temporal layer case. if (cfg_.ts_number_layers > 1) { - frame_flags_ = SetFrameFlags(video->frame(), - cfg_.ts_number_layers, - pattern_switch_); + frame_flags_ = + SetFrameFlags(video->frame(), cfg_.ts_number_layers, pattern_switch_); for (unsigned int i = 0; i < droppable_nframes_; ++i) { if (droppable_frames_[i] == video->frame()) { - std::cout << "Encoding droppable frame: " - << droppable_frames_[i] << "\n"; + std::cout << "Encoding droppable frame: " << droppable_frames_[i] + << "\n"; } } } else { - if (droppable_nframes_ > 0 && - (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) { - for (unsigned int i = 0; i < droppable_nframes_; ++i) { - if (droppable_frames_[i] == video->frame()) { - std::cout << "Encoding droppable frame: " - << droppable_frames_[i] << "\n"; - frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF); - return; - } - } - } + if (droppable_nframes_ > 0 && + (cfg_.g_pass == VPX_RC_LAST_PASS || cfg_.g_pass == VPX_RC_ONE_PASS)) { + for (unsigned int i = 0; i < droppable_nframes_; ++i) { + if (droppable_frames_[i] == video->frame()) { + std::cout << "Encoding droppable frame: " << droppable_frames_[i] + << "\n"; + frame_flags_ |= (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | + VP8_EFLAG_NO_UPD_ARF); + return; + } + } + } } } double GetAveragePsnr() const { - if (nframes_) - return psnr_ / nframes_; + if (nframes_) return psnr_ / nframes_; return 0.0; } double GetAverageMismatchPsnr() const { - if (mismatch_nframes_) - return mismatch_psnr_ / mismatch_nframes_; + if (mismatch_nframes_) return mismatch_psnr_ / mismatch_nframes_; return 0.0; } @@ -158,8 +144,7 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest, return 1; } - virtual void MismatchHook(const vpx_image_t *img1, - const vpx_image_t *img2) { + virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) { double mismatch_psnr = compute_psnr(img1, img2); mismatch_psnr_ += mismatch_psnr; ++mismatch_nframes_; @@ -167,32 +152,32 @@ class ErrorResilienceTestLarge : public ::libvpx_test::EncoderTest, } void SetErrorFrames(int num, unsigned int *list) { - if (num > kMaxErrorFrames) + if (num > kMaxErrorFrames) { num = kMaxErrorFrames; - else if (num < 0) + } else if (num < 0) { num = 0; + } error_nframes_ = num; - for (unsigned int i = 0; i < error_nframes_; ++i) + for (unsigned int i = 0; i < error_nframes_; ++i) { error_frames_[i] = list[i]; + } } void SetDroppableFrames(int num, unsigned int *list) { - if (num > kMaxDroppableFrames) + if (num > kMaxDroppableFrames) { num = kMaxDroppableFrames; - else if (num < 0) + } else if (num < 0) { num = 0; + } droppable_nframes_ = num; - for (unsigned int i = 0; i < droppable_nframes_; ++i) + for (unsigned int i = 0; i < droppable_nframes_; ++i) { droppable_frames_[i] = list[i]; + } } - unsigned int GetMismatchFrames() { - return mismatch_nframes_; - } + unsigned int GetMismatchFrames() { return mismatch_nframes_; } - void SetPatternSwitch(int frame_switch) { - pattern_switch_ = frame_switch; - } + void SetPatternSwitch(int frame_switch) { pattern_switch_ = frame_switch; } bool svc_support_; @@ -265,15 +250,14 @@ TEST_P(ErrorResilienceTestLarge, DropFramesWithoutRecovery) { // In addition to isolated loss/drop, add a long consecutive series // (of size 9) of dropped frames. unsigned int num_droppable_frames = 11; - unsigned int droppable_frame_list[] = {5, 16, 22, 23, 24, 25, 26, 27, 28, - 29, 30}; + unsigned int droppable_frame_list[] = { 5, 16, 22, 23, 24, 25, + 26, 27, 28, 29, 30 }; SetDroppableFrames(num_droppable_frames, droppable_frame_list); SetErrorFrames(num_droppable_frames, droppable_frame_list); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); // Test that no mismatches have been found - std::cout << " Mismatch frames: " - << GetMismatchFrames() << "\n"; - EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0); + std::cout << " Mismatch frames: " << GetMismatchFrames() << "\n"; + EXPECT_EQ(GetMismatchFrames(), (unsigned int)0); // Reset previously set of error/droppable frames. Reset(); @@ -306,8 +290,7 @@ TEST_P(ErrorResilienceTestLarge, DropFramesWithoutRecovery) { // layer, so successful decoding is expected. TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) { // This test doesn't run if SVC is not supported. - if (!svc_support_) - return; + if (!svc_support_) return; const vpx_rational timebase = { 33333333, 1000000000 }; cfg_.g_timebase = timebase; @@ -337,14 +320,13 @@ TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) { // The odd frames are the enhancement layer for 2 layer pattern, so set // those frames as droppable. Drop the last 7 frames. unsigned int num_droppable_frames = 7; - unsigned int droppable_frame_list[] = {27, 29, 31, 33, 35, 37, 39}; + unsigned int droppable_frame_list[] = { 27, 29, 31, 33, 35, 37, 39 }; SetDroppableFrames(num_droppable_frames, droppable_frame_list); SetErrorFrames(num_droppable_frames, droppable_frame_list); ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); // Test that no mismatches have been found - std::cout << " Mismatch frames: " - << GetMismatchFrames() << "\n"; - EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0); + std::cout << " Mismatch frames: " << GetMismatchFrames() << "\n"; + EXPECT_EQ(GetMismatchFrames(), (unsigned int)0); // Reset previously set of error/droppable frames. Reset(); @@ -355,8 +337,7 @@ TEST_P(ErrorResilienceTestLarge, 2LayersDropEnhancement) { // sequence, the LAST ref is not used anymore. TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) { // This test doesn't run if SVC is not supported. - if (!svc_support_) - return; + if (!svc_support_) return; const vpx_rational timebase = { 33333333, 1000000000 }; cfg_.g_timebase = timebase; @@ -385,20 +366,19 @@ TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) { ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); // Test that no mismatches have been found - std::cout << " Mismatch frames: " - << GetMismatchFrames() << "\n"; - EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0); + std::cout << " Mismatch frames: " << GetMismatchFrames() << "\n"; + EXPECT_EQ(GetMismatchFrames(), (unsigned int)0); // Reset previously set of error/droppable frames. Reset(); } -class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class ErrorResilienceTestLargeCodecControls + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: ErrorResilienceTestLargeCodecControls() - : EncoderTest(GET_PARAM(0)), - encoding_mode_(GET_PARAM(1)) { + : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)) { Reset(); } @@ -437,8 +417,8 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, if (num_temp_layers == 2) { if (frame_num % 2 == 0) { // Layer 0: predict from L and ARF, update L. - frame_flags = VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF; + frame_flags = + VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF; } else { // Layer 1: predict from L, G and ARF, and update G. frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | @@ -451,9 +431,9 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF; } else if ((frame_num - 2) % 4 == 0) { // Layer 1: predict from L, G, update G. - frame_flags = VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | - VP8_EFLAG_NO_REF_ARF; - } else if ((frame_num - 1) % 2 == 0) { + frame_flags = + VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_REF_ARF; + } else if ((frame_num - 1) % 2 == 0) { // Layer 2: predict from L, G, ARF; update ARG. frame_flags = VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_LAST; } @@ -467,7 +447,7 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, if (frame_num % 2 == 0) { layer_id = 0; } else { - layer_id = 1; + layer_id = 1; } } else if (num_temp_layers == 3) { if (frame_num % 4 == 0) { @@ -484,16 +464,16 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video, libvpx_test::Encoder *encoder) { if (cfg_.ts_number_layers > 1) { - int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers); - int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers); - if (video->frame() > 0) { - encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id); - encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags); - } - const vpx_rational_t tb = video->timebase(); - timebase_ = static_cast(tb.num) / tb.den; - duration_ = 0; - return; + int layer_id = SetLayerId(video->frame(), cfg_.ts_number_layers); + int frame_flags = SetFrameFlags(video->frame(), cfg_.ts_number_layers); + if (video->frame() > 0) { + encoder->Control(VP8E_SET_TEMPORAL_LAYER_ID, layer_id); + encoder->Control(VP8E_SET_FRAME_FLAGS, frame_flags); + } + const vpx_rational_t tb = video->timebase(); + timebase_ = static_cast(tb.num) / tb.den; + duration_ = 0; + return; } } @@ -519,26 +499,28 @@ class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest, virtual void EndPassHook(void) { duration_ = (last_pts_ + 1) * timebase_; - if (cfg_.ts_number_layers > 1) { + if (cfg_.ts_number_layers > 1) { for (int layer = 0; layer < static_cast(cfg_.ts_number_layers); - ++layer) { + ++layer) { if (bits_total_[layer]) { // Effective file datarate: - effective_datarate_[layer] = (bits_total_[layer] / 1000.0) / duration_; + effective_datarate_[layer] = + (bits_total_[layer] / 1000.0) / duration_; } } } } double effective_datarate_[3]; - private: - libvpx_test::TestMode encoding_mode_; - vpx_codec_pts_t last_pts_; - double timebase_; - int64_t bits_total_[3]; - double duration_; - int tot_frame_number_; - }; + + private: + libvpx_test::TestMode encoding_mode_; + vpx_codec_pts_t last_pts_; + double timebase_; + int64_t bits_total_[3]; + double duration_; + int tot_frame_number_; +}; // Check two codec controls used for: // (1) for setting temporal layer id, and (2) for settings encoder flags. @@ -582,10 +564,12 @@ TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) { for (int j = 0; j < static_cast(cfg_.ts_number_layers); ++j) { ASSERT_GE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 0.75) << " The datarate for the file is lower than target by too much, " - "for layer: " << j; + "for layer: " + << j; ASSERT_LE(effective_datarate_[j], cfg_.ts_target_bitrate[j] * 1.25) << " The datarate for the file is greater than target by too much, " - "for layer: " << j; + "for layer: " + << j; } } } @@ -596,7 +580,4 @@ VP8_INSTANTIATE_TEST_CASE(ErrorResilienceTestLargeCodecControls, ONE_PASS_TEST_MODES); VP9_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES, ::testing::Values(true)); -// SVC-related tests don't run for VP10 since SVC is not supported. -VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES, - ::testing::Values(false)); } // namespace diff --git a/libs/libvpx/test/external_frame_buffer_test.cc b/libs/libvpx/test/external_frame_buffer_test.cc index 2570f44eb8..f9686695a7 100644 --- a/libs/libvpx/test/external_frame_buffer_test.cc +++ b/libs/libvpx/test/external_frame_buffer_test.cc @@ -34,21 +34,18 @@ struct ExternalFrameBuffer { // Class to manipulate a list of external frame buffers. class ExternalFrameBufferList { public: - ExternalFrameBufferList() - : num_buffers_(0), - ext_fb_list_(NULL) {} + ExternalFrameBufferList() : num_buffers_(0), ext_fb_list_(NULL) {} virtual ~ExternalFrameBufferList() { for (int i = 0; i < num_buffers_; ++i) { - delete [] ext_fb_list_[i].data; + delete[] ext_fb_list_[i].data; } - delete [] ext_fb_list_; + delete[] ext_fb_list_; } // Creates the list to hold the external buffers. Returns true on success. bool CreateBufferList(int num_buffers) { - if (num_buffers < 0) - return false; + if (num_buffers < 0) return false; num_buffers_ = num_buffers; ext_fb_list_ = new ExternalFrameBuffer[num_buffers_]; @@ -64,11 +61,10 @@ class ExternalFrameBufferList { int GetFreeFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) { EXPECT_TRUE(fb != NULL); const int idx = FindFreeBufferIndex(); - if (idx == num_buffers_) - return -1; + if (idx == num_buffers_) return -1; if (ext_fb_list_[idx].size < min_size) { - delete [] ext_fb_list_[idx].data; + delete[] ext_fb_list_[idx].data; ext_fb_list_[idx].data = new uint8_t[min_size]; memset(ext_fb_list_[idx].data, 0, min_size); ext_fb_list_[idx].size = min_size; @@ -83,11 +79,10 @@ class ExternalFrameBufferList { int GetZeroFrameBuffer(size_t min_size, vpx_codec_frame_buffer_t *fb) { EXPECT_TRUE(fb != NULL); const int idx = FindFreeBufferIndex(); - if (idx == num_buffers_) - return -1; + if (idx == num_buffers_) return -1; if (ext_fb_list_[idx].size < min_size) { - delete [] ext_fb_list_[idx].data; + delete[] ext_fb_list_[idx].data; ext_fb_list_[idx].data = NULL; ext_fb_list_[idx].size = min_size; } @@ -104,7 +99,7 @@ class ExternalFrameBufferList { return -1; } ExternalFrameBuffer *const ext_fb = - reinterpret_cast(fb->priv); + reinterpret_cast(fb->priv); if (ext_fb == NULL) { EXPECT_TRUE(ext_fb != NULL); return -1; @@ -119,7 +114,7 @@ class ExternalFrameBufferList { void CheckXImageFrameBuffer(const vpx_image_t *img) { if (img->fb_priv != NULL) { const struct ExternalFrameBuffer *const ext_fb = - reinterpret_cast(img->fb_priv); + reinterpret_cast(img->fb_priv); ASSERT_TRUE(img->planes[0] >= ext_fb->data && img->planes[0] < (ext_fb->data + ext_fb->size)); @@ -133,8 +128,7 @@ class ExternalFrameBufferList { int i; // Find a free frame buffer. for (i = 0; i < num_buffers_; ++i) { - if (!ext_fb_list_[i].in_use) - break; + if (!ext_fb_list_[i].in_use) break; } return i; } @@ -161,16 +155,15 @@ class ExternalFrameBufferList { int get_vp9_frame_buffer(void *user_priv, size_t min_size, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferList *const fb_list = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return fb_list->GetFreeFrameBuffer(min_size, fb); } // Callback used by libvpx to tell the application that |fb| is not needed // anymore. -int release_vp9_frame_buffer(void *user_priv, - vpx_codec_frame_buffer_t *fb) { +int release_vp9_frame_buffer(void *user_priv, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferList *const fb_list = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return fb_list->ReturnFrameBuffer(fb); } @@ -178,7 +171,7 @@ int release_vp9_frame_buffer(void *user_priv, int get_vp9_zero_frame_buffer(void *user_priv, size_t min_size, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferList *const fb_list = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return fb_list->GetZeroFrameBuffer(min_size, fb); } @@ -186,7 +179,7 @@ int get_vp9_zero_frame_buffer(void *user_priv, size_t min_size, int get_vp9_one_less_byte_frame_buffer(void *user_priv, size_t min_size, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferList *const fb_list = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return fb_list->GetFreeFrameBuffer(min_size - 1, fb); } @@ -203,16 +196,14 @@ int do_not_release_vp9_frame_buffer(void *user_priv, // Class for testing passing in external frame buffers to libvpx. class ExternalFrameBufferMD5Test : public ::libvpx_test::DecoderTest, - public ::libvpx_test::CodecTestWithParam { + public ::libvpx_test::CodecTestWithParam { protected: ExternalFrameBufferMD5Test() : DecoderTest(GET_PARAM(::libvpx_test::kCodecFactoryParam)), - md5_file_(NULL), - num_buffers_(0) {} + md5_file_(NULL), num_buffers_(0) {} virtual ~ExternalFrameBufferMD5Test() { - if (md5_file_ != NULL) - fclose(md5_file_); + if (md5_file_ != NULL) fclose(md5_file_); } virtual void PreDecodeFrameHook( @@ -222,15 +213,15 @@ class ExternalFrameBufferMD5Test // Have libvpx use frame buffers we create. ASSERT_TRUE(fb_list_.CreateBufferList(num_buffers_)); ASSERT_EQ(VPX_CODEC_OK, - decoder->SetFrameBufferFunctions( - GetVP9FrameBuffer, ReleaseVP9FrameBuffer, this)); + decoder->SetFrameBufferFunctions(GetVP9FrameBuffer, + ReleaseVP9FrameBuffer, this)); } } void OpenMD5File(const std::string &md5_file_name_) { md5_file_ = libvpx_test::OpenTestDataFile(md5_file_name_); ASSERT_TRUE(md5_file_ != NULL) << "Md5 file open failed. Filename: " - << md5_file_name_; + << md5_file_name_; } virtual void DecompressedFrameHook(const vpx_image_t &img, @@ -258,7 +249,7 @@ class ExternalFrameBufferMD5Test static int GetVP9FrameBuffer(void *user_priv, size_t min_size, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferMD5Test *const md5Test = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return md5Test->fb_list_.GetFreeFrameBuffer(min_size, fb); } @@ -267,7 +258,7 @@ class ExternalFrameBufferMD5Test static int ReleaseVP9FrameBuffer(void *user_priv, vpx_codec_frame_buffer_t *fb) { ExternalFrameBufferMD5Test *const md5Test = - reinterpret_cast(user_priv); + reinterpret_cast(user_priv); return md5Test->fb_list_.ReturnFrameBuffer(fb); } @@ -286,10 +277,7 @@ const char kVP9TestFile[] = "vp90-2-02-size-lf-1920x1080.webm"; // Class for testing passing in external frame buffers to libvpx. class ExternalFrameBufferTest : public ::testing::Test { protected: - ExternalFrameBufferTest() - : video_(NULL), - decoder_(NULL), - num_buffers_(0) {} + ExternalFrameBufferTest() : video_(NULL), decoder_(NULL), num_buffers_(0) {} virtual void SetUp() { video_ = new libvpx_test::WebMVideoSource(kVP9TestFile); @@ -309,8 +297,7 @@ class ExternalFrameBufferTest : public ::testing::Test { // Passes the external frame buffer information to libvpx. vpx_codec_err_t SetFrameBufferFunctions( - int num_buffers, - vpx_get_frame_buffer_cb_fn_t cb_get, + int num_buffers, vpx_get_frame_buffer_cb_fn_t cb_get, vpx_release_frame_buffer_cb_fn_t cb_release) { if (num_buffers > 0) { num_buffers_ = num_buffers; @@ -324,8 +311,7 @@ class ExternalFrameBufferTest : public ::testing::Test { const vpx_codec_err_t res = decoder_->DecodeFrame(video_->cxdata(), video_->frame_size()); CheckDecodedFrames(); - if (res == VPX_CODEC_OK) - video_->Next(); + if (res == VPX_CODEC_OK) video_->Next(); return res; } @@ -333,8 +319,7 @@ class ExternalFrameBufferTest : public ::testing::Test { for (; video_->cxdata() != NULL; video_->Next()) { const vpx_codec_err_t res = decoder_->DecodeFrame(video_->cxdata(), video_->frame_size()); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; CheckDecodedFrames(); } return VPX_CODEC_OK; @@ -365,7 +350,6 @@ class ExternalFrameBufferTest : public ::testing::Test { // Otherwise, the test failed. TEST_P(ExternalFrameBufferMD5Test, ExtFBMD5Match) { const std::string filename = GET_PARAM(kVideoNameParam); - libvpx_test::CompressedVideoSource *video = NULL; // Number of buffers equals #VP9_MAXIMUM_REF_BUFFERS + // #VPX_MAXIMUM_WORK_BUFFERS + four jitter buffers. @@ -380,18 +364,19 @@ TEST_P(ExternalFrameBufferMD5Test, ExtFBMD5Match) { #endif // Open compressed video file. + testing::internal::scoped_ptr video; if (filename.substr(filename.length() - 3, 3) == "ivf") { - video = new libvpx_test::IVFVideoSource(filename); + video.reset(new libvpx_test::IVFVideoSource(filename)); } else { #if CONFIG_WEBM_IO - video = new libvpx_test::WebMVideoSource(filename); + video.reset(new libvpx_test::WebMVideoSource(filename)); #else fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n", filename.c_str()); return; #endif } - ASSERT_TRUE(video != NULL); + ASSERT_TRUE(video.get() != NULL); video->Init(); // Construct md5 file name. @@ -399,8 +384,7 @@ TEST_P(ExternalFrameBufferMD5Test, ExtFBMD5Match) { OpenMD5File(md5_filename); // Decode frame, and check the md5 matching. - ASSERT_NO_FATAL_FAILURE(RunLoop(video)); - delete video; + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get())); } #if CONFIG_WEBM_IO @@ -409,8 +393,8 @@ TEST_F(ExternalFrameBufferTest, MinFrameBuffers) { // #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS. const int num_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; ASSERT_EQ(VPX_CODEC_OK, - SetFrameBufferFunctions( - num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer)); + SetFrameBufferFunctions(num_buffers, get_vp9_frame_buffer, + release_vp9_frame_buffer)); ASSERT_EQ(VPX_CODEC_OK, DecodeRemainingFrames()); } @@ -421,8 +405,8 @@ TEST_F(ExternalFrameBufferTest, EightJitterBuffers) { const int num_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS + jitter_buffers; ASSERT_EQ(VPX_CODEC_OK, - SetFrameBufferFunctions( - num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer)); + SetFrameBufferFunctions(num_buffers, get_vp9_frame_buffer, + release_vp9_frame_buffer)); ASSERT_EQ(VPX_CODEC_OK, DecodeRemainingFrames()); } @@ -432,8 +416,8 @@ TEST_F(ExternalFrameBufferTest, NotEnoughBuffers) { // only use 5 frame buffers at one time. const int num_buffers = 2; ASSERT_EQ(VPX_CODEC_OK, - SetFrameBufferFunctions( - num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer)); + SetFrameBufferFunctions(num_buffers, get_vp9_frame_buffer, + release_vp9_frame_buffer)); ASSERT_EQ(VPX_CODEC_OK, DecodeOneFrame()); ASSERT_EQ(VPX_CODEC_MEM_ERROR, DecodeRemainingFrames()); } @@ -457,18 +441,17 @@ TEST_F(ExternalFrameBufferTest, NullRealloc) { TEST_F(ExternalFrameBufferTest, ReallocOneLessByte) { const int num_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; - ASSERT_EQ(VPX_CODEC_OK, - SetFrameBufferFunctions( - num_buffers, get_vp9_one_less_byte_frame_buffer, - release_vp9_frame_buffer)); + ASSERT_EQ(VPX_CODEC_OK, SetFrameBufferFunctions( + num_buffers, get_vp9_one_less_byte_frame_buffer, + release_vp9_frame_buffer)); ASSERT_EQ(VPX_CODEC_MEM_ERROR, DecodeOneFrame()); } TEST_F(ExternalFrameBufferTest, NullGetFunction) { const int num_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; - ASSERT_EQ(VPX_CODEC_INVALID_PARAM, - SetFrameBufferFunctions(num_buffers, NULL, - release_vp9_frame_buffer)); + ASSERT_EQ( + VPX_CODEC_INVALID_PARAM, + SetFrameBufferFunctions(num_buffers, NULL, release_vp9_frame_buffer)); } TEST_F(ExternalFrameBufferTest, NullReleaseFunction) { @@ -481,13 +464,14 @@ TEST_F(ExternalFrameBufferTest, SetAfterDecode) { const int num_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; ASSERT_EQ(VPX_CODEC_OK, DecodeOneFrame()); ASSERT_EQ(VPX_CODEC_ERROR, - SetFrameBufferFunctions( - num_buffers, get_vp9_frame_buffer, release_vp9_frame_buffer)); + SetFrameBufferFunctions(num_buffers, get_vp9_frame_buffer, + release_vp9_frame_buffer)); } #endif // CONFIG_WEBM_IO -VP9_INSTANTIATE_TEST_CASE(ExternalFrameBufferMD5Test, - ::testing::ValuesIn(libvpx_test::kVP9TestVectors, - libvpx_test::kVP9TestVectors + - libvpx_test::kNumVP9TestVectors)); +VP9_INSTANTIATE_TEST_CASE( + ExternalFrameBufferMD5Test, + ::testing::ValuesIn(libvpx_test::kVP9TestVectors, + libvpx_test::kVP9TestVectors + + libvpx_test::kNumVP9TestVectors)); } // namespace diff --git a/libs/libvpx/test/fdct4x4_test.cc b/libs/libvpx/test/fdct4x4_test.cc index 0c91aee214..1270dae94a 100644 --- a/libs/libvpx/test/fdct4x4_test.cc +++ b/libs/libvpx/test/fdct4x4_test.cc @@ -128,35 +128,33 @@ class Trans4x4TestBase { } } - ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block, - test_temp_block, pitch_)); + ASM_REGISTER_STATE_CHECK( + RunFwdTxfm(test_input_block, test_temp_block, pitch_)); if (bit_depth_ == VPX_BITS_8) { ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { - ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, - CONVERT_TO_BYTEPTR(dst16), pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif } for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = + const int diff = bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; #else ASSERT_EQ(VPX_BITS_8, bit_depth_); - const uint32_t diff = dst[j] - src[j]; + const int diff = dst[j] - src[j]; #endif const uint32_t error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; } } EXPECT_GE(static_cast(limit), max_error) - << "Error: 4x4 FHT/IHT has an individual round trip error > " - << limit; + << "Error: 4x4 FHT/IHT has an individual round trip error > " << limit; EXPECT_GE(count_test_block * limit, total_error) << "Error: 4x4 FHT/IHT has average round trip error > " << limit @@ -172,8 +170,9 @@ class Trans4x4TestBase { for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-mask_, mask_]. - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_); + } fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_); ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_)); @@ -197,16 +196,14 @@ class Trans4x4TestBase { input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_; } if (i == 0) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = mask_; } else if (i == 1) { - for (int j = 0; j < kNumCoeffs; ++j) - input_extreme_block[j] = -mask_; + for (int j = 0; j < kNumCoeffs; ++j) input_extreme_block[j] = -mask_; } fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_); - ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block, - output_block, pitch_)); + ASM_REGISTER_STATE_CHECK( + RunFwdTxfm(input_extreme_block, output_block, pitch_)); // The minimum quant value is 4. for (int j = 0; j < kNumCoeffs; ++j) { @@ -251,22 +248,21 @@ class Trans4x4TestBase { ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { - ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), - pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif } for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = + const int diff = bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; #else - const uint32_t diff = dst[j] - src[j]; + const int diff = dst[j] - src[j]; #endif const uint32_t error = diff * diff; EXPECT_GE(static_cast(limit), error) - << "Error: 4x4 IDCT has error " << error - << " at index " << j; + << "Error: 4x4 IDCT has error " << error << " at index " << j; } } } @@ -278,17 +274,16 @@ class Trans4x4TestBase { int mask_; }; -class Trans4x4DCT - : public Trans4x4TestBase, - public ::testing::TestWithParam { +class Trans4x4DCT : public Trans4x4TestBase, + public ::testing::TestWithParam { public: virtual ~Trans4x4DCT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); - pitch_ = 4; + tx_type_ = GET_PARAM(2); + pitch_ = 4; fwd_txfm_ref = fdct4x4_ref; bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; @@ -307,33 +302,24 @@ class Trans4x4DCT IdctFunc inv_txfm_; }; -TEST_P(Trans4x4DCT, AccuracyCheck) { - RunAccuracyCheck(1); -} +TEST_P(Trans4x4DCT, AccuracyCheck) { RunAccuracyCheck(1); } -TEST_P(Trans4x4DCT, CoeffCheck) { - RunCoeffCheck(); -} +TEST_P(Trans4x4DCT, CoeffCheck) { RunCoeffCheck(); } -TEST_P(Trans4x4DCT, MemCheck) { - RunMemCheck(); -} +TEST_P(Trans4x4DCT, MemCheck) { RunMemCheck(); } -TEST_P(Trans4x4DCT, InvAccuracyCheck) { - RunInvAccuracyCheck(1); -} +TEST_P(Trans4x4DCT, InvAccuracyCheck) { RunInvAccuracyCheck(1); } -class Trans4x4HT - : public Trans4x4TestBase, - public ::testing::TestWithParam { +class Trans4x4HT : public Trans4x4TestBase, + public ::testing::TestWithParam { public: virtual ~Trans4x4HT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); - pitch_ = 4; + tx_type_ = GET_PARAM(2); + pitch_ = 4; fwd_txfm_ref = fht4x4_ref; bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; @@ -353,33 +339,24 @@ class Trans4x4HT IhtFunc inv_txfm_; }; -TEST_P(Trans4x4HT, AccuracyCheck) { - RunAccuracyCheck(1); -} +TEST_P(Trans4x4HT, AccuracyCheck) { RunAccuracyCheck(1); } -TEST_P(Trans4x4HT, CoeffCheck) { - RunCoeffCheck(); -} +TEST_P(Trans4x4HT, CoeffCheck) { RunCoeffCheck(); } -TEST_P(Trans4x4HT, MemCheck) { - RunMemCheck(); -} +TEST_P(Trans4x4HT, MemCheck) { RunMemCheck(); } -TEST_P(Trans4x4HT, InvAccuracyCheck) { - RunInvAccuracyCheck(1); -} +TEST_P(Trans4x4HT, InvAccuracyCheck) { RunInvAccuracyCheck(1); } -class Trans4x4WHT - : public Trans4x4TestBase, - public ::testing::TestWithParam { +class Trans4x4WHT : public Trans4x4TestBase, + public ::testing::TestWithParam { public: virtual ~Trans4x4WHT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); - pitch_ = 4; + tx_type_ = GET_PARAM(2); + pitch_ = 4; fwd_txfm_ref = fwht4x4_ref; bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; @@ -398,21 +375,13 @@ class Trans4x4WHT IdctFunc inv_txfm_; }; -TEST_P(Trans4x4WHT, AccuracyCheck) { - RunAccuracyCheck(0); -} +TEST_P(Trans4x4WHT, AccuracyCheck) { RunAccuracyCheck(0); } -TEST_P(Trans4x4WHT, CoeffCheck) { - RunCoeffCheck(); -} +TEST_P(Trans4x4WHT, CoeffCheck) { RunCoeffCheck(); } -TEST_P(Trans4x4WHT, MemCheck) { - RunMemCheck(); -} +TEST_P(Trans4x4WHT, MemCheck) { RunMemCheck(); } -TEST_P(Trans4x4WHT, InvAccuracyCheck) { - RunInvAccuracyCheck(0); -} +TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); } using std::tr1::make_tuple; #if CONFIG_VP9_HIGHBITDEPTH @@ -423,10 +392,10 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12, 0, VPX_BITS_12), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8))); #else -INSTANTIATE_TEST_CASE_P( - C, Trans4x4DCT, - ::testing::Values( - make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT, + ::testing::Values(make_tuple(&vpx_fdct4x4_c, + &vpx_idct4x4_16_add_c, 0, + VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH @@ -463,18 +432,17 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vp9_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12), make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8))); #else -INSTANTIATE_TEST_CASE_P( - C, Trans4x4WHT, - ::testing::Values( - make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT, + ::testing::Values(make_tuple(&vp9_fwht4x4_c, + &vpx_iwht4x4_16_add_c, 0, + VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH #if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - NEON, Trans4x4DCT, - ::testing::Values( - make_tuple(&vpx_fdct4x4_c, - &vpx_idct4x4_16_add_neon, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT, + ::testing::Values(make_tuple(&vpx_fdct4x4_c, + &vpx_idct4x4_16_add_neon, + 0, VPX_BITS_8))); #endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE @@ -487,28 +455,19 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vp9_fht4x4_c, &vp9_iht4x4_16_add_neon, 3, VPX_BITS_8))); #endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VP9_HIGHBITDEPTH && \ - !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - MMX, Trans4x4WHT, - ::testing::Values( - make_tuple(&vp9_fwht4x4_mmx, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8))); -#endif - -#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && \ - !CONFIG_EMULATE_HARDWARE +#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, Trans4x4WHT, ::testing::Values( + make_tuple(&vp9_fwht4x4_sse2, &vpx_iwht4x4_16_add_c, 0, VPX_BITS_8), make_tuple(&vp9_fwht4x4_c, &vpx_iwht4x4_16_add_sse2, 0, VPX_BITS_8))); #endif #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - SSE2, Trans4x4DCT, - ::testing::Values( - make_tuple(&vpx_fdct4x4_sse2, - &vpx_idct4x4_16_add_sse2, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT, + ::testing::Values(make_tuple(&vpx_fdct4x4_sse2, + &vpx_idct4x4_16_add_sse2, + 0, VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( SSE2, Trans4x4HT, ::testing::Values( @@ -522,12 +481,11 @@ INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P( SSE2, Trans4x4DCT, ::testing::Values( - make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_10_sse2, 0, VPX_BITS_10), make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_10_sse2, 0, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12), + make_tuple(&vpx_highbd_fdct4x4_c, &idct4x4_12_sse2, 0, VPX_BITS_12), make_tuple(&vpx_highbd_fdct4x4_sse2, &idct4x4_12_sse2, 0, VPX_BITS_12), - make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0, - VPX_BITS_8))); + make_tuple(&vpx_fdct4x4_sse2, &vpx_idct4x4_16_add_c, 0, VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( SSE2, Trans4x4HT, @@ -539,10 +497,10 @@ INSTANTIATE_TEST_CASE_P( #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - MSA, Trans4x4DCT, - ::testing::Values( - make_tuple(&vpx_fdct4x4_msa, &vpx_idct4x4_16_add_msa, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT, + ::testing::Values(make_tuple(&vpx_fdct4x4_msa, + &vpx_idct4x4_16_add_msa, 0, + VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( MSA, Trans4x4HT, ::testing::Values( diff --git a/libs/libvpx/test/fdct8x8_test.cc b/libs/libvpx/test/fdct8x8_test.cc index edf4682169..3ac73c1250 100644 --- a/libs/libvpx/test/fdct8x8_test.cc +++ b/libs/libvpx/test/fdct8x8_test.cc @@ -51,10 +51,10 @@ void reference_8x8_dct_1d(const double in[8], double out[8]) { const double kInvSqrt2 = 0.707106781186547524400844362104; for (int k = 0; k < 8; k++) { out[k] = 0.0; - for (int n = 0; n < 8; n++) + for (int n = 0; n < 8; n++) { out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 16.0); - if (k == 0) - out[k] = out[k] * kInvSqrt2; + } + if (k == 0) out[k] = out[k] * kInvSqrt2; } } @@ -63,25 +63,20 @@ void reference_8x8_dct_2d(const int16_t input[kNumCoeffs], // First transform columns for (int i = 0; i < 8; ++i) { double temp_in[8], temp_out[8]; - for (int j = 0; j < 8; ++j) - temp_in[j] = input[j*8 + i]; + for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i]; reference_8x8_dct_1d(temp_in, temp_out); - for (int j = 0; j < 8; ++j) - output[j * 8 + i] = temp_out[j]; + for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j]; } // Then transform rows for (int i = 0; i < 8; ++i) { double temp_in[8], temp_out[8]; - for (int j = 0; j < 8; ++j) - temp_in[j] = output[j + i*8]; + for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8]; reference_8x8_dct_1d(temp_in, temp_out); // Scale by some magic number - for (int j = 0; j < 8; ++j) - output[j + i * 8] = temp_out[j] * 2; + for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j] * 2; } } - void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int /*tx_type*/) { vpx_fdct8x8_c(in, out, stride); @@ -155,17 +150,19 @@ class FwdTrans8x8TestBase { for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-255, 255]. - for (int j = 0; j < 64; ++j) + for (int j = 0; j < 64; ++j) { test_input_block[j] = ((rnd.Rand16() >> (16 - bit_depth_)) & mask_) - ((rnd.Rand16() >> (16 - bit_depth_)) & mask_); + } ASM_REGISTER_STATE_CHECK( RunFwdTxfm(test_input_block, test_output_block, pitch_)); for (int j = 0; j < 64; ++j) { - if (test_output_block[j] < 0) + if (test_output_block[j] < 0) { ++count_sign_block[j][0]; - else if (test_output_block[j] > 0) + } else if (test_output_block[j] > 0) { ++count_sign_block[j][1]; + } } } @@ -177,25 +174,26 @@ class FwdTrans8x8TestBase { << 1. * max_diff / count_test_block * 100 << "%" << " for input range [-255, 255] at index " << j << " count0: " << count_sign_block[j][0] - << " count1: " << count_sign_block[j][1] - << " diff: " << diff; + << " count1: " << count_sign_block[j][1] << " diff: " << diff; } memset(count_sign_block, 0, sizeof(count_sign_block)); for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-mask_ / 16, mask_ / 16]. - for (int j = 0; j < 64; ++j) - test_input_block[j] = ((rnd.Rand16() & mask_) >> 4) - - ((rnd.Rand16() & mask_) >> 4); + for (int j = 0; j < 64; ++j) { + test_input_block[j] = + ((rnd.Rand16() & mask_) >> 4) - ((rnd.Rand16() & mask_) >> 4); + } ASM_REGISTER_STATE_CHECK( RunFwdTxfm(test_input_block, test_output_block, pitch_)); for (int j = 0; j < 64; ++j) { - if (test_output_block[j] < 0) + if (test_output_block[j] < 0) { ++count_sign_block[j][0]; - else if (test_output_block[j] > 0) + } else if (test_output_block[j] > 0) { ++count_sign_block[j][1]; + } } } @@ -207,8 +205,7 @@ class FwdTrans8x8TestBase { << 1. * max_diff / count_test_block * 100 << "%" << " for input range [-15, 15] at index " << j << " count0: " << count_sign_block[j][0] - << " count1: " << count_sign_block[j][1] - << " diff: " << diff; + << " count1: " << count_sign_block[j][1] << " diff: " << diff; } } @@ -245,19 +242,18 @@ class FwdTrans8x8TestBase { ASM_REGISTER_STATE_CHECK( RunFwdTxfm(test_input_block, test_temp_block, pitch_)); for (int j = 0; j < 64; ++j) { - if (test_temp_block[j] > 0) { - test_temp_block[j] += 2; - test_temp_block[j] /= 4; - test_temp_block[j] *= 4; - } else { - test_temp_block[j] -= 2; - test_temp_block[j] /= 4; - test_temp_block[j] *= 4; - } + if (test_temp_block[j] > 0) { + test_temp_block[j] += 2; + test_temp_block[j] /= 4; + test_temp_block[j] *= 4; + } else { + test_temp_block[j] -= 2; + test_temp_block[j] /= 4; + test_temp_block[j] *= 4; + } } if (bit_depth_ == VPX_BITS_8) { - ASM_REGISTER_STATE_CHECK( - RunInvTxfm(test_temp_block, dst, pitch_)); + ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { ASM_REGISTER_STATE_CHECK( @@ -273,19 +269,18 @@ class FwdTrans8x8TestBase { const int diff = dst[j] - src[j]; #endif const int error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; } } EXPECT_GE(1 << 2 * (bit_depth_ - 8), max_error) - << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual" - << " roundtrip error > 1"; + << "Error: 8x8 FDCT/IDCT or FHT/IHT has an individual" + << " roundtrip error > 1"; - EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error) - << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip " - << "error > 1/5 per block"; + EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error) + << "Error: 8x8 FDCT/IDCT or FHT/IHT has average roundtrip " + << "error > 1/5 per block"; } void RunExtremalCheck() { @@ -341,8 +336,7 @@ class FwdTrans8x8TestBase { ASM_REGISTER_STATE_CHECK( fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_)); if (bit_depth_ == VPX_BITS_8) { - ASM_REGISTER_STATE_CHECK( - RunInvTxfm(test_temp_block, dst, pitch_)); + ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { ASM_REGISTER_STATE_CHECK( @@ -358,8 +352,7 @@ class FwdTrans8x8TestBase { const int diff = dst[j] - src[j]; #endif const int error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; const int coeff_diff = test_temp_block[j] - ref_temp_block[j]; @@ -370,7 +363,7 @@ class FwdTrans8x8TestBase { << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has" << "an individual roundtrip error > 1"; - EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8))/5, total_error) + EXPECT_GE((count_test_block << 2 * (bit_depth_ - 8)) / 5, total_error) << "Error: Extremal 8x8 FDCT/IDCT or FHT/IHT has average" << " roundtrip error > 1/5 per block"; @@ -411,29 +404,29 @@ class FwdTrans8x8TestBase { } reference_8x8_dct_2d(in, out_r); - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { coeff[j] = static_cast(round(out_r[j])); + } if (bit_depth_ == VPX_BITS_8) { ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_)); #if CONFIG_VP9_HIGHBITDEPTH } else { - ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), - pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif } for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = + const int diff = bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j]; #else - const uint32_t diff = dst[j] - src[j]; + const int diff = dst[j] - src[j]; #endif const uint32_t error = diff * diff; EXPECT_GE(1u << 2 * (bit_depth_ - 8), error) - << "Error: 8x8 IDCT has error " << error - << " at index " << j; + << "Error: 8x8 IDCT has error " << error << " at index " << j; } } } @@ -449,25 +442,26 @@ class FwdTrans8x8TestBase { double out_r[kNumCoeffs]; // Initialize a test block with input range [-mask_, mask_]. - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { in[j] = rnd.Rand8() % 2 == 0 ? mask_ : -mask_; + } RunFwdTxfm(in, coeff, pitch_); reference_8x8_dct_2d(in, out_r); - for (int j = 0; j < kNumCoeffs; ++j) + for (int j = 0; j < kNumCoeffs; ++j) { coeff_r[j] = static_cast(round(out_r[j])); + } for (int j = 0; j < kNumCoeffs; ++j) { - const uint32_t diff = coeff[j] - coeff_r[j]; + const int32_t diff = coeff[j] - coeff_r[j]; const uint32_t error = diff * diff; EXPECT_GE(9u << 2 * (bit_depth_ - 8), error) - << "Error: 8x8 DCT has error " << error - << " at index " << j; + << "Error: 8x8 DCT has error " << error << " at index " << j; } } } -void CompareInvReference(IdctFunc ref_txfm, int thresh) { + void CompareInvReference(IdctFunc ref_txfm, int thresh) { ACMRandom rnd(ACMRandom::DeterministicSeed()); const int count_test_block = 10000; const int eob = 12; @@ -484,7 +478,7 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) { for (int j = 0; j < kNumCoeffs; ++j) { if (j < eob) { // Random values less than the threshold, either positive or negative - coeff[scan[j]] = rnd(thresh) * (1-2*(i%2)); + coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2)); } else { coeff[scan[j]] = 0; } @@ -504,22 +498,21 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) { #if CONFIG_VP9_HIGHBITDEPTH } else { ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_); - ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), - pitch_)); + ASM_REGISTER_STATE_CHECK( + RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_)); #endif } for (int j = 0; j < kNumCoeffs; ++j) { #if CONFIG_VP9_HIGHBITDEPTH - const uint32_t diff = + const int diff = bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j]; #else - const uint32_t diff = dst[j] - ref[j]; + const int diff = dst[j] - ref[j]; #endif const uint32_t error = diff * diff; - EXPECT_EQ(0u, error) - << "Error: 8x8 IDCT has error " << error - << " at index " << j; + EXPECT_EQ(0u, error) << "Error: 8x8 IDCT has error " << error + << " at index " << j; } } } @@ -530,17 +523,16 @@ void CompareInvReference(IdctFunc ref_txfm, int thresh) { int mask_; }; -class FwdTrans8x8DCT - : public FwdTrans8x8TestBase, - public ::testing::TestWithParam { +class FwdTrans8x8DCT : public FwdTrans8x8TestBase, + public ::testing::TestWithParam { public: virtual ~FwdTrans8x8DCT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); - pitch_ = 8; + tx_type_ = GET_PARAM(2); + pitch_ = 8; fwd_txfm_ref = fdct8x8_ref; bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; @@ -560,37 +552,26 @@ class FwdTrans8x8DCT IdctFunc inv_txfm_; }; -TEST_P(FwdTrans8x8DCT, SignBiasCheck) { - RunSignBiasCheck(); -} +TEST_P(FwdTrans8x8DCT, SignBiasCheck) { RunSignBiasCheck(); } -TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) { - RunRoundTripErrorCheck(); -} +TEST_P(FwdTrans8x8DCT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); } -TEST_P(FwdTrans8x8DCT, ExtremalCheck) { - RunExtremalCheck(); -} +TEST_P(FwdTrans8x8DCT, ExtremalCheck) { RunExtremalCheck(); } -TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) { - RunFwdAccuracyCheck(); -} +TEST_P(FwdTrans8x8DCT, FwdAccuracyCheck) { RunFwdAccuracyCheck(); } -TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) { - RunInvAccuracyCheck(); -} +TEST_P(FwdTrans8x8DCT, InvAccuracyCheck) { RunInvAccuracyCheck(); } -class FwdTrans8x8HT - : public FwdTrans8x8TestBase, - public ::testing::TestWithParam { +class FwdTrans8x8HT : public FwdTrans8x8TestBase, + public ::testing::TestWithParam { public: virtual ~FwdTrans8x8HT() {} virtual void SetUp() { fwd_txfm_ = GET_PARAM(0); inv_txfm_ = GET_PARAM(1); - tx_type_ = GET_PARAM(2); - pitch_ = 8; + tx_type_ = GET_PARAM(2); + pitch_ = 8; fwd_txfm_ref = fht8x8_ref; bit_depth_ = GET_PARAM(3); mask_ = (1 << bit_depth_) - 1; @@ -610,21 +591,14 @@ class FwdTrans8x8HT IhtFunc inv_txfm_; }; -TEST_P(FwdTrans8x8HT, SignBiasCheck) { - RunSignBiasCheck(); -} +TEST_P(FwdTrans8x8HT, SignBiasCheck) { RunSignBiasCheck(); } -TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) { - RunRoundTripErrorCheck(); -} +TEST_P(FwdTrans8x8HT, RoundTripErrorCheck) { RunRoundTripErrorCheck(); } -TEST_P(FwdTrans8x8HT, ExtremalCheck) { - RunExtremalCheck(); -} +TEST_P(FwdTrans8x8HT, ExtremalCheck) { RunExtremalCheck(); } -class InvTrans8x8DCT - : public FwdTrans8x8TestBase, - public ::testing::TestWithParam { +class InvTrans8x8DCT : public FwdTrans8x8TestBase, + public ::testing::TestWithParam { public: virtual ~InvTrans8x8DCT() {} @@ -664,10 +638,10 @@ INSTANTIATE_TEST_CASE_P( make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_10, 0, VPX_BITS_10), make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_12, 0, VPX_BITS_12))); #else -INSTANTIATE_TEST_CASE_P( - C, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT, + ::testing::Values(make_tuple(&vpx_fdct8x8_c, + &vpx_idct8x8_64_add_c, 0, + VPX_BITS_8))); #endif // CONFIG_VP9_HIGHBITDEPTH #if CONFIG_VP9_HIGHBITDEPTH @@ -697,11 +671,10 @@ INSTANTIATE_TEST_CASE_P( #endif // CONFIG_VP9_HIGHBITDEPTH #if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - NEON, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_neon, &vpx_idct8x8_64_add_neon, 0, - VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT, + ::testing::Values(make_tuple(&vpx_fdct8x8_neon, + &vpx_idct8x8_64_add_neon, + 0, VPX_BITS_8))); #endif // HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE @@ -715,11 +688,10 @@ INSTANTIATE_TEST_CASE_P( #endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - SSE2, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_sse2, 0, - VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT, + ::testing::Values(make_tuple(&vpx_fdct8x8_sse2, + &vpx_idct8x8_64_add_sse2, + 0, VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( SSE2, FwdTrans8x8HT, ::testing::Values( @@ -732,16 +704,16 @@ INSTANTIATE_TEST_CASE_P( #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, VPX_BITS_8), - make_tuple(&vpx_highbd_fdct8x8_c, - &idct8x8_64_add_10_sse2, 12, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct8x8_sse2, - &idct8x8_64_add_10_sse2, 12, VPX_BITS_10), - make_tuple(&vpx_highbd_fdct8x8_c, - &idct8x8_64_add_12_sse2, 12, VPX_BITS_12), - make_tuple(&vpx_highbd_fdct8x8_sse2, - &idct8x8_64_add_12_sse2, 12, VPX_BITS_12))); + ::testing::Values(make_tuple(&vpx_fdct8x8_sse2, &vpx_idct8x8_64_add_c, 0, + VPX_BITS_8), + make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_10_sse2, + 12, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct8x8_sse2, + &idct8x8_64_add_10_sse2, 12, VPX_BITS_10), + make_tuple(&vpx_highbd_fdct8x8_c, &idct8x8_64_add_12_sse2, + 12, VPX_BITS_12), + make_tuple(&vpx_highbd_fdct8x8_sse2, + &idct8x8_64_add_12_sse2, 12, VPX_BITS_12))); INSTANTIATE_TEST_CASE_P( SSE2, FwdTrans8x8HT, @@ -756,30 +728,27 @@ INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P( SSE2, InvTrans8x8DCT, ::testing::Values( - make_tuple(&idct8x8_10_add_10_c, - &idct8x8_10_add_10_sse2, 6225, VPX_BITS_10), - make_tuple(&idct8x8_10, - &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10), - make_tuple(&idct8x8_10_add_12_c, - &idct8x8_10_add_12_sse2, 6225, VPX_BITS_12), - make_tuple(&idct8x8_12, - &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12))); + make_tuple(&idct8x8_10_add_10_c, &idct8x8_10_add_10_sse2, 6225, + VPX_BITS_10), + make_tuple(&idct8x8_10, &idct8x8_64_add_10_sse2, 6225, VPX_BITS_10), + make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225, + VPX_BITS_12), + make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12))); #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \ - !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - SSSE3, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_ssse3, &vpx_idct8x8_64_add_ssse3, 0, - VPX_BITS_8))); +#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \ + !CONFIG_EMULATE_HARDWARE +INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT, + ::testing::Values(make_tuple(&vpx_fdct8x8_ssse3, + &vpx_idct8x8_64_add_ssse3, + 0, VPX_BITS_8))); #endif #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE -INSTANTIATE_TEST_CASE_P( - MSA, FwdTrans8x8DCT, - ::testing::Values( - make_tuple(&vpx_fdct8x8_msa, &vpx_idct8x8_64_add_msa, 0, VPX_BITS_8))); +INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT, + ::testing::Values(make_tuple(&vpx_fdct8x8_msa, + &vpx_idct8x8_64_add_msa, 0, + VPX_BITS_8))); INSTANTIATE_TEST_CASE_P( MSA, FwdTrans8x8HT, ::testing::Values( diff --git a/libs/libvpx/test/frame_size_tests.cc b/libs/libvpx/test/frame_size_tests.cc index d39c8f6ee9..5a9b166e5b 100644 --- a/libs/libvpx/test/frame_size_tests.cc +++ b/libs/libvpx/test/frame_size_tests.cc @@ -13,12 +13,11 @@ namespace { -class VP9FrameSizeTestsLarge - : public ::libvpx_test::EncoderTest, - public ::testing::Test { +class VP9FrameSizeTestsLarge : public ::libvpx_test::EncoderTest, + public ::testing::Test { protected: - VP9FrameSizeTestsLarge() : EncoderTest(&::libvpx_test::kVP9), - expected_res_(VPX_CODEC_OK) {} + VP9FrameSizeTestsLarge() + : EncoderTest(&::libvpx_test::kVP9), expected_res_(VPX_CODEC_OK) {} virtual ~VP9FrameSizeTestsLarge() {} virtual void SetUp() { @@ -27,7 +26,7 @@ class VP9FrameSizeTestsLarge } virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec, - const libvpx_test::VideoSource& /*video*/, + const libvpx_test::VideoSource & /*video*/, libvpx_test::Decoder *decoder) { EXPECT_EQ(expected_res_, res_dec) << decoder->DecodeError(); return !::testing::Test::HasFailure(); @@ -67,13 +66,13 @@ TEST_F(VP9FrameSizeTestsLarge, ValidSizes) { expected_res_ = VPX_CODEC_OK; ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); #else - // This test produces a pretty large single frame allocation, (roughly - // 25 megabits). The encoder allocates a good number of these frames - // one for each lag in frames (for 2 pass), and then one for each possible - // reference buffer (8) - we can end up with up to 30 buffers of roughly this - // size or almost 1 gig of memory. - // In total the allocations will exceed 2GiB which may cause a failure with - // mingw + wine, use a smaller size in that case. +// This test produces a pretty large single frame allocation, (roughly +// 25 megabits). The encoder allocates a good number of these frames +// one for each lag in frames (for 2 pass), and then one for each possible +// reference buffer (8) - we can end up with up to 30 buffers of roughly this +// size or almost 1 gig of memory. +// In total the allocations will exceed 2GiB which may cause a failure with +// mingw + wine, use a smaller size in that case. #if defined(_WIN32) && !defined(_WIN64) || defined(__OS2__) video.SetSize(4096, 3072); #else diff --git a/libs/libvpx/test/hadamard_test.cc b/libs/libvpx/test/hadamard_test.cc new file mode 100644 index 0000000000..e7715958ed --- /dev/null +++ b/libs/libvpx/test/hadamard_test.cc @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "third_party/googletest/src/include/gtest/gtest.h" + +#include "./vpx_dsp_rtcd.h" + +#include "test/acm_random.h" +#include "test/register_state_check.h" + +namespace { + +using ::libvpx_test::ACMRandom; + +typedef void (*HadamardFunc)(const int16_t *a, int a_stride, int16_t *b); + +void hadamard_loop(const int16_t *a, int a_stride, int16_t *out) { + int16_t b[8]; + for (int i = 0; i < 8; i += 2) { + b[i + 0] = a[i * a_stride] + a[(i + 1) * a_stride]; + b[i + 1] = a[i * a_stride] - a[(i + 1) * a_stride]; + } + int16_t c[8]; + for (int i = 0; i < 8; i += 4) { + c[i + 0] = b[i + 0] + b[i + 2]; + c[i + 1] = b[i + 1] + b[i + 3]; + c[i + 2] = b[i + 0] - b[i + 2]; + c[i + 3] = b[i + 1] - b[i + 3]; + } + out[0] = c[0] + c[4]; + out[7] = c[1] + c[5]; + out[3] = c[2] + c[6]; + out[4] = c[3] + c[7]; + out[2] = c[0] - c[4]; + out[6] = c[1] - c[5]; + out[1] = c[2] - c[6]; + out[5] = c[3] - c[7]; +} + +void reference_hadamard8x8(const int16_t *a, int a_stride, int16_t *b) { + int16_t buf[64]; + for (int i = 0; i < 8; ++i) { + hadamard_loop(a + i, a_stride, buf + i * 8); + } + + for (int i = 0; i < 8; ++i) { + hadamard_loop(buf + i, 8, b + i * 8); + } +} + +void reference_hadamard16x16(const int16_t *a, int a_stride, int16_t *b) { + /* The source is a 16x16 block. The destination is rearranged to 8x32. + * Input is 9 bit. */ + reference_hadamard8x8(a + 0 + 0 * a_stride, a_stride, b + 0); + reference_hadamard8x8(a + 8 + 0 * a_stride, a_stride, b + 64); + reference_hadamard8x8(a + 0 + 8 * a_stride, a_stride, b + 128); + reference_hadamard8x8(a + 8 + 8 * a_stride, a_stride, b + 192); + + /* Overlay the 8x8 blocks and combine. */ + for (int i = 0; i < 64; ++i) { + /* 8x8 steps the range up to 15 bits. */ + const int16_t a0 = b[0]; + const int16_t a1 = b[64]; + const int16_t a2 = b[128]; + const int16_t a3 = b[192]; + + /* Prevent the result from escaping int16_t. */ + const int16_t b0 = (a0 + a1) >> 1; + const int16_t b1 = (a0 - a1) >> 1; + const int16_t b2 = (a2 + a3) >> 1; + const int16_t b3 = (a2 - a3) >> 1; + + /* Store a 16 bit value. */ + b[0] = b0 + b2; + b[64] = b1 + b3; + b[128] = b0 - b2; + b[192] = b1 - b3; + + ++b; + } +} + +class HadamardTestBase : public ::testing::TestWithParam { + public: + virtual void SetUp() { + h_func_ = GetParam(); + rnd_.Reset(ACMRandom::DeterministicSeed()); + } + + protected: + HadamardFunc h_func_; + ACMRandom rnd_; +}; + +class Hadamard8x8Test : public HadamardTestBase {}; + +TEST_P(Hadamard8x8Test, CompareReferenceRandom) { + DECLARE_ALIGNED(16, int16_t, a[64]); + DECLARE_ALIGNED(16, int16_t, b[64]); + int16_t b_ref[64]; + for (int i = 0; i < 64; ++i) { + a[i] = rnd_.Rand9Signed(); + } + memset(b, 0, sizeof(b)); + memset(b_ref, 0, sizeof(b_ref)); + + reference_hadamard8x8(a, 8, b_ref); + ASM_REGISTER_STATE_CHECK(h_func_(a, 8, b)); + + // The order of the output is not important. Sort before checking. + std::sort(b, b + 64); + std::sort(b_ref, b_ref + 64); + EXPECT_EQ(0, memcmp(b, b_ref, sizeof(b))); +} + +TEST_P(Hadamard8x8Test, VaryStride) { + DECLARE_ALIGNED(16, int16_t, a[64 * 8]); + DECLARE_ALIGNED(16, int16_t, b[64]); + int16_t b_ref[64]; + for (int i = 0; i < 64 * 8; ++i) { + a[i] = rnd_.Rand9Signed(); + } + + for (int i = 8; i < 64; i += 8) { + memset(b, 0, sizeof(b)); + memset(b_ref, 0, sizeof(b_ref)); + + reference_hadamard8x8(a, i, b_ref); + ASM_REGISTER_STATE_CHECK(h_func_(a, i, b)); + + // The order of the output is not important. Sort before checking. + std::sort(b, b + 64); + std::sort(b_ref, b_ref + 64); + EXPECT_EQ(0, memcmp(b, b_ref, sizeof(b))); + } +} + +INSTANTIATE_TEST_CASE_P(C, Hadamard8x8Test, + ::testing::Values(&vpx_hadamard_8x8_c)); + +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P(SSE2, Hadamard8x8Test, + ::testing::Values(&vpx_hadamard_8x8_sse2)); +#endif // HAVE_SSE2 + +#if HAVE_SSSE3 && ARCH_X86_64 +INSTANTIATE_TEST_CASE_P(SSSE3, Hadamard8x8Test, + ::testing::Values(&vpx_hadamard_8x8_ssse3)); +#endif // HAVE_SSSE3 && ARCH_X86_64 + +#if HAVE_NEON +INSTANTIATE_TEST_CASE_P(NEON, Hadamard8x8Test, + ::testing::Values(&vpx_hadamard_8x8_neon)); +#endif // HAVE_NEON + +class Hadamard16x16Test : public HadamardTestBase {}; + +TEST_P(Hadamard16x16Test, CompareReferenceRandom) { + DECLARE_ALIGNED(16, int16_t, a[16 * 16]); + DECLARE_ALIGNED(16, int16_t, b[16 * 16]); + int16_t b_ref[16 * 16]; + for (int i = 0; i < 16 * 16; ++i) { + a[i] = rnd_.Rand9Signed(); + } + memset(b, 0, sizeof(b)); + memset(b_ref, 0, sizeof(b_ref)); + + reference_hadamard16x16(a, 16, b_ref); + ASM_REGISTER_STATE_CHECK(h_func_(a, 16, b)); + + // The order of the output is not important. Sort before checking. + std::sort(b, b + 16 * 16); + std::sort(b_ref, b_ref + 16 * 16); + EXPECT_EQ(0, memcmp(b, b_ref, sizeof(b))); +} + +TEST_P(Hadamard16x16Test, VaryStride) { + DECLARE_ALIGNED(16, int16_t, a[16 * 16 * 8]); + DECLARE_ALIGNED(16, int16_t, b[16 * 16]); + int16_t b_ref[16 * 16]; + for (int i = 0; i < 16 * 16 * 8; ++i) { + a[i] = rnd_.Rand9Signed(); + } + + for (int i = 8; i < 64; i += 8) { + memset(b, 0, sizeof(b)); + memset(b_ref, 0, sizeof(b_ref)); + + reference_hadamard16x16(a, i, b_ref); + ASM_REGISTER_STATE_CHECK(h_func_(a, i, b)); + + // The order of the output is not important. Sort before checking. + std::sort(b, b + 16 * 16); + std::sort(b_ref, b_ref + 16 * 16); + EXPECT_EQ(0, memcmp(b, b_ref, sizeof(b))); + } +} + +INSTANTIATE_TEST_CASE_P(C, Hadamard16x16Test, + ::testing::Values(&vpx_hadamard_16x16_c)); + +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P(SSE2, Hadamard16x16Test, + ::testing::Values(&vpx_hadamard_16x16_sse2)); +#endif // HAVE_SSE2 + +#if HAVE_NEON +INSTANTIATE_TEST_CASE_P(NEON, Hadamard16x16Test, + ::testing::Values(&vpx_hadamard_16x16_neon)); +#endif // HAVE_NEON +} // namespace diff --git a/libs/libvpx/test/i420_video_source.h b/libs/libvpx/test/i420_video_source.h index 0a184805c2..49573823b4 100644 --- a/libs/libvpx/test/i420_video_source.h +++ b/libs/libvpx/test/i420_video_source.h @@ -21,14 +21,11 @@ namespace libvpx_test { // so that we can do actual file encodes. class I420VideoSource : public YUVVideoSource { public: - I420VideoSource(const std::string &file_name, - unsigned int width, unsigned int height, - int rate_numerator, int rate_denominator, + I420VideoSource(const std::string &file_name, unsigned int width, + unsigned int height, int rate_numerator, int rate_denominator, unsigned int start, int limit) - : YUVVideoSource(file_name, VPX_IMG_FMT_I420, - width, height, - rate_numerator, rate_denominator, - start, limit) {} + : YUVVideoSource(file_name, VPX_IMG_FMT_I420, width, height, + rate_numerator, rate_denominator, start, limit) {} }; } // namespace libvpx_test diff --git a/libs/libvpx/test/idct8x8_test.cc b/libs/libvpx/test/idct8x8_test.cc index 7f9d751d65..7951bb93c9 100644 --- a/libs/libvpx/test/idct8x8_test.cc +++ b/libs/libvpx/test/idct8x8_test.cc @@ -17,29 +17,21 @@ #include "./vpx_dsp_rtcd.h" #include "test/acm_random.h" #include "vpx/vpx_integer.h" +#include "vpx_ports/msvc.h" // for round() using libvpx_test::ACMRandom; namespace { -#ifdef _MSC_VER -static int round(double x) { - if (x < 0) - return static_cast(ceil(x - 0.5)); - else - return static_cast(floor(x + 0.5)); -} -#endif - void reference_dct_1d(double input[8], double output[8]) { const double kPi = 3.141592653589793238462643383279502884; const double kInvSqrt2 = 0.707106781186547524400844362104; for (int k = 0; k < 8; k++) { output[k] = 0.0; - for (int n = 0; n < 8; n++) - output[k] += input[n]*cos(kPi*(2*n+1)*k/16.0); - if (k == 0) - output[k] = output[k]*kInvSqrt2; + for (int n = 0; n < 8; n++) { + output[k] += input[n] * cos(kPi * (2 * n + 1) * k / 16.0); + } + if (k == 0) output[k] = output[k] * kInvSqrt2; } } @@ -47,24 +39,19 @@ void reference_dct_2d(int16_t input[64], double output[64]) { // First transform columns for (int i = 0; i < 8; ++i) { double temp_in[8], temp_out[8]; - for (int j = 0; j < 8; ++j) - temp_in[j] = input[j*8 + i]; + for (int j = 0; j < 8; ++j) temp_in[j] = input[j * 8 + i]; reference_dct_1d(temp_in, temp_out); - for (int j = 0; j < 8; ++j) - output[j*8 + i] = temp_out[j]; + for (int j = 0; j < 8; ++j) output[j * 8 + i] = temp_out[j]; } // Then transform rows for (int i = 0; i < 8; ++i) { double temp_in[8], temp_out[8]; - for (int j = 0; j < 8; ++j) - temp_in[j] = output[j + i*8]; + for (int j = 0; j < 8; ++j) temp_in[j] = output[j + i * 8]; reference_dct_1d(temp_in, temp_out); - for (int j = 0; j < 8; ++j) - output[j + i*8] = temp_out[j]; + for (int j = 0; j < 8; ++j) output[j + i * 8] = temp_out[j]; } // Scale by some magic number - for (int i = 0; i < 64; ++i) - output[i] *= 2; + for (int i = 0; i < 64; ++i) output[i] *= 2; } TEST(VP9Idct8x8Test, AccuracyCheck) { @@ -81,19 +68,18 @@ TEST(VP9Idct8x8Test, AccuracyCheck) { dst[j] = rnd.Rand8(); } // Initialize a test block with input range [-255, 255]. - for (int j = 0; j < 64; ++j) - input[j] = src[j] - dst[j]; + for (int j = 0; j < 64; ++j) input[j] = src[j] - dst[j]; reference_dct_2d(input, output_r); - for (int j = 0; j < 64; ++j) - coeff[j] = round(output_r[j]); + for (int j = 0; j < 64; ++j) { + coeff[j] = static_cast(round(output_r[j])); + } vpx_idct8x8_64_add_c(coeff, dst, 8); for (int j = 0; j < 64; ++j) { const int diff = dst[j] - src[j]; const int error = diff * diff; - EXPECT_GE(1, error) - << "Error: 8x8 FDCT/IDCT has error " << error - << " at index " << j; + EXPECT_GE(1, error) << "Error: 8x8 FDCT/IDCT has error " << error + << " at index " << j; } } } diff --git a/libs/libvpx/test/idct_test.cc b/libs/libvpx/test/idct_test.cc index 39db3e4c61..f54f2c005a 100644 --- a/libs/libvpx/test/idct_test.cc +++ b/libs/libvpx/test/idct_test.cc @@ -43,11 +43,12 @@ class IDCTTest : public ::testing::TestWithParam { TEST_P(IDCTTest, TestGuardBlocks) { int i; - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { if ((i & 0xF) < 4 && i < 64) EXPECT_EQ(0, output[i]) << i; else EXPECT_EQ(255, output[i]); + } } TEST_P(IDCTTest, TestAllZeros) { @@ -55,11 +56,12 @@ TEST_P(IDCTTest, TestAllZeros) { ASM_REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16)); - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { if ((i & 0xF) < 4 && i < 64) EXPECT_EQ(0, output[i]) << "i==" << i; else EXPECT_EQ(255, output[i]) << "i==" << i; + } } TEST_P(IDCTTest, TestAllOnes) { @@ -68,11 +70,12 @@ TEST_P(IDCTTest, TestAllOnes) { input[0] = 4; ASM_REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16)); - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { if ((i & 0xF) < 4 && i < 64) EXPECT_EQ(1, output[i]) << "i==" << i; else EXPECT_EQ(255, output[i]) << "i==" << i; + } } TEST_P(IDCTTest, TestAddOne) { @@ -82,11 +85,12 @@ TEST_P(IDCTTest, TestAddOne) { input[0] = 4; ASM_REGISTER_STATE_CHECK(UUT(input, predict, 16, output, 16)); - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { if ((i & 0xF) < 4 && i < 64) EXPECT_EQ(i + 1, output[i]) << "i==" << i; else EXPECT_EQ(255, output[i]) << "i==" << i; + } } TEST_P(IDCTTest, TestWithData) { @@ -96,7 +100,7 @@ TEST_P(IDCTTest, TestWithData) { ASM_REGISTER_STATE_CHECK(UUT(input, output, 16, output, 16)); - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { if ((i & 0xF) > 3 || i > 63) EXPECT_EQ(255, output[i]) << "i==" << i; else if (i == 0) @@ -107,6 +111,7 @@ TEST_P(IDCTTest, TestWithData) { EXPECT_EQ(3, output[i]) << "i==" << i; else EXPECT_EQ(0, output[i]) << "i==" << i; + } } INSTANTIATE_TEST_CASE_P(C, IDCTTest, ::testing::Values(vp8_short_idct4x4llm_c)); diff --git a/libs/libvpx/test/invalid_file_test.cc b/libs/libvpx/test/invalid_file_test.cc index f4241eb822..bebbb141d0 100644 --- a/libs/libvpx/test/invalid_file_test.cc +++ b/libs/libvpx/test/invalid_file_test.cc @@ -34,21 +34,19 @@ std::ostream &operator<<(std::ostream &os, const DecodeParam &dp) { return os << "threads: " << dp.threads << " file: " << dp.filename; } -class InvalidFileTest - : public ::libvpx_test::DecoderTest, - public ::libvpx_test::CodecTestWithParam { +class InvalidFileTest : public ::libvpx_test::DecoderTest, + public ::libvpx_test::CodecTestWithParam { protected: InvalidFileTest() : DecoderTest(GET_PARAM(0)), res_file_(NULL) {} virtual ~InvalidFileTest() { - if (res_file_ != NULL) - fclose(res_file_); + if (res_file_ != NULL) fclose(res_file_); } void OpenResFile(const std::string &res_file_name_) { res_file_ = libvpx_test::OpenTestDataFile(res_file_name_); ASSERT_TRUE(res_file_ != NULL) << "Result file open failed. Filename: " - << res_file_name_; + << res_file_name_; } virtual bool HandleDecodeResult( @@ -72,8 +70,9 @@ class InvalidFileTest EXPECT_TRUE(res_dec == expected_res_dec || res_dec == VPX_CODEC_CORRUPT_FRAME) << "Results don't match: frame number = " << video.frame_number() - << ". (" << decoder->DecodeError() << "). Expected: " - << expected_res_dec << " or " << VPX_CODEC_CORRUPT_FRAME; + << ". (" << decoder->DecodeError() + << "). Expected: " << expected_res_dec << " or " + << VPX_CODEC_CORRUPT_FRAME; } else { EXPECT_EQ(expected_res_dec, res_dec) << "Results don't match: frame number = " << video.frame_number() @@ -85,23 +84,24 @@ class InvalidFileTest void RunTest() { const DecodeParam input = GET_PARAM(1); - libvpx_test::CompressedVideoSource *video = NULL; vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); cfg.threads = input.threads; const std::string filename = input.filename; // Open compressed video file. + testing::internal::scoped_ptr video; if (filename.substr(filename.length() - 3, 3) == "ivf") { - video = new libvpx_test::IVFVideoSource(filename); + video.reset(new libvpx_test::IVFVideoSource(filename)); } else if (filename.substr(filename.length() - 4, 4) == "webm") { #if CONFIG_WEBM_IO - video = new libvpx_test::WebMVideoSource(filename); + video.reset(new libvpx_test::WebMVideoSource(filename)); #else fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n", filename.c_str()); return; #endif } + ASSERT_TRUE(video.get() != NULL); video->Init(); // Construct result file name. The file holds a list of expected integer @@ -111,33 +111,36 @@ class InvalidFileTest OpenResFile(res_filename); // Decode frame, and check the md5 matching. - ASSERT_NO_FATAL_FAILURE(RunLoop(video, cfg)); - delete video; + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get(), cfg)); } private: FILE *res_file_; }; -TEST_P(InvalidFileTest, ReturnCode) { - RunTest(); -} +TEST_P(InvalidFileTest, ReturnCode) { RunTest(); } const DecodeParam kVP9InvalidFileTests[] = { - {1, "invalid-vp90-02-v2.webm"}, + { 1, "invalid-vp90-02-v2.webm" }, #if CONFIG_VP9_HIGHBITDEPTH - {1, "invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf"}, + { 1, "invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf" }, #endif - {1, "invalid-vp90-03-v3.webm"}, - {1, "invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf"}, - {1, "invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf"}, - {1, "invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf"}, - {1, "invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf"}, - {1, "invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf"}, - {1, "invalid-vp91-2-mixedrefcsp-444to420.ivf"}, - {1, "invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf"}, - {1, "invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf"}, - {1, "invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf"}, + { 1, "invalid-vp90-03-v3.webm" }, + { 1, "invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf" }, + { 1, "invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf" }, +// This file will cause a large allocation which is expected to fail in 32-bit +// environments. Test x86 for coverage purposes as the allocation failure will +// be in platform agnostic code. +#if ARCH_X86 + { 1, "invalid-vp90-2-00-quantizer-63.ivf.kf_65527x61446.ivf" }, +#endif + { 1, "invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf" }, + { 1, "invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf" }, + { 1, "invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf" }, + { 1, "invalid-vp91-2-mixedrefcsp-444to420.ivf" }, + { 1, "invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf" }, + { 1, "invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf" }, + { 1, "invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf" }, }; VP9_INSTANTIATE_TEST_CASE(InvalidFileTest, @@ -149,34 +152,45 @@ class InvalidFileInvalidPeekTest : public InvalidFileTest { protected: InvalidFileInvalidPeekTest() : InvalidFileTest() {} virtual void HandlePeekResult(libvpx_test::Decoder *const /*decoder*/, - libvpx_test::CompressedVideoSource* /*video*/, + libvpx_test::CompressedVideoSource * /*video*/, const vpx_codec_err_t /*res_peek*/) {} }; -TEST_P(InvalidFileInvalidPeekTest, ReturnCode) { - RunTest(); -} +TEST_P(InvalidFileInvalidPeekTest, ReturnCode) { RunTest(); } +#if CONFIG_VP8_DECODER +const DecodeParam kVP8InvalidFileTests[] = { + { 1, "invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf" }, +}; + +VP8_INSTANTIATE_TEST_CASE(InvalidFileInvalidPeekTest, + ::testing::ValuesIn(kVP8InvalidFileTests)); +#endif // CONFIG_VP8_DECODER + +#if CONFIG_VP9_DECODER const DecodeParam kVP9InvalidFileInvalidPeekTests[] = { - {1, "invalid-vp90-01-v3.webm"}, + { 1, "invalid-vp90-01-v3.webm" }, }; VP9_INSTANTIATE_TEST_CASE(InvalidFileInvalidPeekTest, ::testing::ValuesIn(kVP9InvalidFileInvalidPeekTests)); const DecodeParam kMultiThreadedVP9InvalidFileTests[] = { - {4, "invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm"}, - {4, "invalid-" - "vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf"}, - {4, "invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf"}, - {2, "invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf"}, - {4, "invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf"}, + { 4, "invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm" }, + { 4, + "invalid-" + "vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf" }, + { 4, + "invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf" }, + { 2, "invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf" }, + { 4, "invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf" }, }; INSTANTIATE_TEST_CASE_P( VP9MultiThreaded, InvalidFileTest, ::testing::Combine( ::testing::Values( - static_cast(&libvpx_test::kVP9)), + static_cast(&libvpx_test::kVP9)), ::testing::ValuesIn(kMultiThreadedVP9InvalidFileTests))); +#endif // CONFIG_VP9_DECODER } // namespace diff --git a/libs/libvpx/test/ivf_video_source.h b/libs/libvpx/test/ivf_video_source.h index 824a39d7e6..b87624a11f 100644 --- a/libs/libvpx/test/ivf_video_source.h +++ b/libs/libvpx/test/ivf_video_source.h @@ -29,19 +29,13 @@ static unsigned int MemGetLe32(const uint8_t *mem) { class IVFVideoSource : public CompressedVideoSource { public: explicit IVFVideoSource(const std::string &file_name) - : file_name_(file_name), - input_file_(NULL), - compressed_frame_buf_(NULL), - frame_sz_(0), - frame_(0), - end_of_file_(false) { - } + : file_name_(file_name), input_file_(NULL), compressed_frame_buf_(NULL), + frame_sz_(0), frame_(0), end_of_file_(false) {} virtual ~IVFVideoSource() { delete[] compressed_frame_buf_; - if (input_file_) - fclose(input_file_); + if (input_file_) fclose(input_file_); } virtual void Init() { @@ -54,15 +48,16 @@ class IVFVideoSource : public CompressedVideoSource { virtual void Begin() { input_file_ = OpenTestDataFile(file_name_); ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: " - << file_name_; + << file_name_; // Read file header uint8_t file_hdr[kIvfFileHdrSize]; ASSERT_EQ(kIvfFileHdrSize, fread(file_hdr, 1, kIvfFileHdrSize, input_file_)) << "File header read failed."; // Check file header - ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' && file_hdr[2] == 'I' - && file_hdr[3] == 'F') << "Input is not an IVF file."; + ASSERT_TRUE(file_hdr[0] == 'D' && file_hdr[1] == 'K' && + file_hdr[2] == 'I' && file_hdr[3] == 'F') + << "Input is not an IVF file."; FillFrame(); } @@ -76,8 +71,8 @@ class IVFVideoSource : public CompressedVideoSource { ASSERT_TRUE(input_file_ != NULL); uint8_t frame_hdr[kIvfFrameHdrSize]; // Check frame header and read a frame from input_file. - if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_) - != kIvfFrameHdrSize) { + if (fread(frame_hdr, 1, kIvfFrameHdrSize, input_file_) != + kIvfFrameHdrSize) { end_of_file_ = true; } else { end_of_file_ = false; diff --git a/libs/libvpx/test/keyframe_test.cc b/libs/libvpx/test/keyframe_test.cc index d8b21a14d2..38bd923b7d 100644 --- a/libs/libvpx/test/keyframe_test.cc +++ b/libs/libvpx/test/keyframe_test.cc @@ -17,8 +17,9 @@ namespace { -class KeyframeTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class KeyframeTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: KeyframeTest() : EncoderTest(GET_PARAM(0)) {} virtual ~KeyframeTest() {} @@ -34,10 +35,12 @@ class KeyframeTest : public ::libvpx_test::EncoderTest, virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, ::libvpx_test::Encoder *encoder) { - if (kf_do_force_kf_) + if (kf_do_force_kf_) { frame_flags_ = (video->frame() % 3) ? 0 : VPX_EFLAG_FORCE_KF; - if (set_cpu_used_ && video->frame() == 1) + } + if (set_cpu_used_ && video->frame() == 1) { encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_); + } } virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { @@ -65,8 +68,7 @@ TEST_P(KeyframeTest, TestRandomVideoSource) { // In realtime mode - auto placed keyframes are exceedingly rare, don't // bother with this check if(GetParam() > 0) - if (GET_PARAM(1) > 0) - EXPECT_GT(kf_count_, 1); + if (GET_PARAM(1) > 0) EXPECT_GT(kf_count_, 1); } TEST_P(KeyframeTest, TestDisableKeyframes) { @@ -114,8 +116,7 @@ TEST_P(KeyframeTest, TestAutoKeyframe) { // may not produce a keyframe like we expect. This is necessary when running // on very slow environments (like Valgrind). The step -11 was determined // experimentally as the fastest mode that still throws the keyframe. - if (deadline_ == VPX_DL_REALTIME) - set_cpu_used_ = -11; + if (deadline_ == VPX_DL_REALTIME) set_cpu_used_ = -11; // This clip has a cut scene every 30 frames -> Frame 0, 30, 60, 90, 120. // I check only the first 40 frames to make sure there's a keyframe at frame @@ -135,7 +136,7 @@ TEST_P(KeyframeTest, TestAutoKeyframe) { iter != kf_pts_list_.end(); ++iter) { if (deadline_ == VPX_DL_REALTIME && *iter > 0) EXPECT_EQ(0, (*iter - 1) % 30) << "Unexpected keyframe at frame " - << *iter; + << *iter; else EXPECT_EQ(0, *iter % 30) << "Unexpected keyframe at frame " << *iter; } diff --git a/libs/libvpx/test/level_test.cc b/libs/libvpx/test/level_test.cc new file mode 100644 index 0000000000..fbbb539df1 --- /dev/null +++ b/libs/libvpx/test/level_test.cc @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "third_party/googletest/src/include/gtest/gtest.h" +#include "test/codec_factory.h" +#include "test/encode_test_driver.h" +#include "test/i420_video_source.h" +#include "test/util.h" + +namespace { +class LevelTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { + protected: + LevelTest() + : EncoderTest(GET_PARAM(0)), encoding_mode_(GET_PARAM(1)), + cpu_used_(GET_PARAM(2)), min_gf_internal_(24), target_level_(0), + level_(0) {} + virtual ~LevelTest() {} + + virtual void SetUp() { + InitializeConfig(); + SetMode(encoding_mode_); + if (encoding_mode_ != ::libvpx_test::kRealTime) { + cfg_.g_lag_in_frames = 25; + cfg_.rc_end_usage = VPX_VBR; + } else { + cfg_.g_lag_in_frames = 0; + cfg_.rc_end_usage = VPX_CBR; + } + cfg_.rc_2pass_vbr_minsection_pct = 5; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; + cfg_.rc_target_bitrate = 400; + cfg_.rc_max_quantizer = 63; + cfg_.rc_min_quantizer = 0; + } + + virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video, + ::libvpx_test::Encoder *encoder) { + if (video->frame() == 0) { + encoder->Control(VP8E_SET_CPUUSED, cpu_used_); + encoder->Control(VP9E_SET_TARGET_LEVEL, target_level_); + encoder->Control(VP9E_SET_MIN_GF_INTERVAL, min_gf_internal_); + if (encoding_mode_ != ::libvpx_test::kRealTime) { + encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1); + encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7); + encoder->Control(VP8E_SET_ARNR_STRENGTH, 5); + encoder->Control(VP8E_SET_ARNR_TYPE, 3); + } + } + encoder->Control(VP9E_GET_LEVEL, &level_); + ASSERT_LE(level_, 51); + ASSERT_GE(level_, 0); + } + + ::libvpx_test::TestMode encoding_mode_; + int cpu_used_; + int min_gf_internal_; + int target_level_; + int level_; +}; + +// Test for keeping level stats only +TEST_P(LevelTest, TestTargetLevel0) { + ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, + 40); + target_level_ = 0; + min_gf_internal_ = 4; + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_EQ(11, level_); + + cfg_.rc_target_bitrate = 1600; + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + ASSERT_EQ(20, level_); +} + +// Test for level control being turned off +TEST_P(LevelTest, TestTargetLevel255) { + ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, + 30); + target_level_ = 255; + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); +} + +TEST_P(LevelTest, TestTargetLevelApi) { + ::libvpx_test::I420VideoSource video("hantro_odd.yuv", 208, 144, 30, 1, 0, 1); + static const vpx_codec_iface_t *codec = &vpx_codec_vp9_cx_algo; + vpx_codec_ctx_t enc; + vpx_codec_enc_cfg_t cfg; + EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_config_default(codec, &cfg, 0)); + EXPECT_EQ(VPX_CODEC_OK, vpx_codec_enc_init(&enc, codec, &cfg, 0)); + for (int level = 0; level <= 256; ++level) { + if (level == 10 || level == 11 || level == 20 || level == 21 || + level == 30 || level == 31 || level == 40 || level == 41 || + level == 50 || level == 51 || level == 52 || level == 60 || + level == 61 || level == 62 || level == 0 || level == 255) + EXPECT_EQ(VPX_CODEC_OK, + vpx_codec_control(&enc, VP9E_SET_TARGET_LEVEL, level)); + else + EXPECT_EQ(VPX_CODEC_INVALID_PARAM, + vpx_codec_control(&enc, VP9E_SET_TARGET_LEVEL, level)); + } + EXPECT_EQ(VPX_CODEC_OK, vpx_codec_destroy(&enc)); +} + +VP9_INSTANTIATE_TEST_CASE(LevelTest, + ::testing::Values(::libvpx_test::kTwoPassGood, + ::libvpx_test::kOnePassGood), + ::testing::Range(0, 9)); +} // namespace diff --git a/libs/libvpx/test/lpf_8_test.cc b/libs/libvpx/test/lpf_8_test.cc deleted file mode 100644 index 0bf6b0c232..0000000000 --- a/libs/libvpx/test/lpf_8_test.cc +++ /dev/null @@ -1,718 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "third_party/googletest/src/include/gtest/gtest.h" - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "test/acm_random.h" -#include "test/clear_system_state.h" -#include "test/register_state_check.h" -#include "test/util.h" -#include "vp9/common/vp9_entropy.h" -#include "vp9/common/vp9_loopfilter.h" -#include "vpx/vpx_integer.h" - -using libvpx_test::ACMRandom; - -namespace { -// Horizontally and Vertically need 32x32: 8 Coeffs preceeding filtered section -// 16 Coefs within filtered section -// 8 Coeffs following filtered section -const int kNumCoeffs = 1024; - -const int number_of_iterations = 10000; - -#if CONFIG_VP9_HIGHBITDEPTH -typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd); -typedef void (*dual_loop_op_t)(uint16_t *s, int p, const uint8_t *blimit0, - const uint8_t *limit0, const uint8_t *thresh0, - const uint8_t *blimit1, const uint8_t *limit1, - const uint8_t *thresh1, int bd); -#else -typedef void (*loop_op_t)(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count); -typedef void (*dual_loop_op_t)(uint8_t *s, int p, const uint8_t *blimit0, - const uint8_t *limit0, const uint8_t *thresh0, - const uint8_t *blimit1, const uint8_t *limit1, - const uint8_t *thresh1); -#endif // CONFIG_VP9_HIGHBITDEPTH - -typedef std::tr1::tuple loop8_param_t; -typedef std::tr1::tuple dualloop8_param_t; - -#if HAVE_SSE2 -#if CONFIG_VP9_HIGHBITDEPTH -void wrapper_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { - vpx_highbd_lpf_vertical_16_sse2(s, p, blimit, limit, thresh, bd); -} - -void wrapper_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { - vpx_highbd_lpf_vertical_16_c(s, p, blimit, limit, thresh, bd); -} - -void wrapper_vertical_16_dual_sse2(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { - vpx_highbd_lpf_vertical_16_dual_sse2(s, p, blimit, limit, thresh, bd); -} - -void wrapper_vertical_16_dual_c(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { - vpx_highbd_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh, bd); -} -#else -void wrapper_vertical_16_sse2(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_sse2(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_c(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_dual_sse2(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_dual_sse2(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh); -} -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif // HAVE_SSE2 - -#if HAVE_NEON_ASM -#if CONFIG_VP9_HIGHBITDEPTH -// No neon high bitdepth functions. -#else -void wrapper_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_c(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_dual_neon(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh); -} -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif // HAVE_NEON_ASM - -#if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) -void wrapper_vertical_16_msa(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_msa(s, p, blimit, limit, thresh); -} - -void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { - vpx_lpf_vertical_16_c(s, p, blimit, limit, thresh); -} -#endif // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) - -class Loop8Test6Param : public ::testing::TestWithParam { - public: - virtual ~Loop8Test6Param() {} - virtual void SetUp() { - loopfilter_op_ = GET_PARAM(0); - ref_loopfilter_op_ = GET_PARAM(1); - bit_depth_ = GET_PARAM(2); - count_ = GET_PARAM(3); - mask_ = (1 << bit_depth_) - 1; - } - - virtual void TearDown() { libvpx_test::ClearSystemState(); } - - protected: - int bit_depth_; - int count_; - int mask_; - loop_op_t loopfilter_op_; - loop_op_t ref_loopfilter_op_; -}; - -class Loop8Test9Param : public ::testing::TestWithParam { - public: - virtual ~Loop8Test9Param() {} - virtual void SetUp() { - loopfilter_op_ = GET_PARAM(0); - ref_loopfilter_op_ = GET_PARAM(1); - bit_depth_ = GET_PARAM(2); - mask_ = (1 << bit_depth_) - 1; - } - - virtual void TearDown() { libvpx_test::ClearSystemState(); } - - protected: - int bit_depth_; - int mask_; - dual_loop_op_t loopfilter_op_; - dual_loop_op_t ref_loopfilter_op_; -}; - -TEST_P(Loop8Test6Param, OperationCheck) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = number_of_iterations; -#if CONFIG_VP9_HIGHBITDEPTH - int32_t bd = bit_depth_; - DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]); - DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]); -#else - DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]); - DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]); -#endif // CONFIG_VP9_HIGHBITDEPTH - int err_count_total = 0; - int first_failure = -1; - for (int i = 0; i < count_test_block; ++i) { - int err_count = 0; - uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - int32_t p = kNumCoeffs/32; - - uint16_t tmp_s[kNumCoeffs]; - int j = 0; - while (j < kNumCoeffs) { - uint8_t val = rnd.Rand8(); - if (val & 0x80) { // 50% chance to choose a new value. - tmp_s[j] = rnd.Rand16(); - j++; - } else { // 50% chance to repeat previous value in row X times - int k = 0; - while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) { - if (j < 1) { - tmp_s[j] = rnd.Rand16(); - } else if (val & 0x20) { // Increment by an value within the limit - tmp_s[j] = (tmp_s[j - 1] + (*limit - 1)); - } else { // Decrement by an value within the limit - tmp_s[j] = (tmp_s[j - 1] - (*limit - 1)); - } - j++; - } - } - } - for (j = 0; j < kNumCoeffs; j++) { - if (i % 2) { - s[j] = tmp_s[j] & mask_; - } else { - s[j] = tmp_s[p * (j % p) + j / p] & mask_; - } - ref_s[j] = s[j]; - } -#if CONFIG_VP9_HIGHBITDEPTH - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd)); -#else - ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count_); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_)); -#endif // CONFIG_VP9_HIGHBITDEPTH - - for (int j = 0; j < kNumCoeffs; ++j) { - err_count += ref_s[j] != s[j]; - } - if (err_count && !err_count_total) { - first_failure = i; - } - err_count_total += err_count; - } - EXPECT_EQ(0, err_count_total) - << "Error: Loop8Test6Param, C output doesn't match SSE2 " - "loopfilter output. " - << "First failed at test case " << first_failure; -} - -TEST_P(Loop8Test6Param, ValueCheck) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = number_of_iterations; -#if CONFIG_VP9_HIGHBITDEPTH - const int32_t bd = bit_depth_; - DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]); - DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]); -#else - DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]); - DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]); -#endif // CONFIG_VP9_HIGHBITDEPTH - int err_count_total = 0; - int first_failure = -1; - - // NOTE: The code in vp9_loopfilter.c:update_sharpness computes mblim as a - // function of sharpness_lvl and the loopfilter lvl as: - // block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); - // ... - // memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), - // SIMD_WIDTH); - // This means that the largest value for mblim will occur when sharpness_lvl - // is equal to 0, and lvl is equal to its greatest value (MAX_LOOP_FILTER). - // In this case block_inside_limit will be equal to MAX_LOOP_FILTER and - // therefore mblim will be equal to (2 * (lvl + 2) + block_inside_limit) = - // 2 * (MAX_LOOP_FILTER + 2) + MAX_LOOP_FILTER = 3 * MAX_LOOP_FILTER + 4 - - for (int i = 0; i < count_test_block; ++i) { - int err_count = 0; - uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - int32_t p = kNumCoeffs / 32; - for (int j = 0; j < kNumCoeffs; ++j) { - s[j] = rnd.Rand16() & mask_; - ref_s[j] = s[j]; - } -#if CONFIG_VP9_HIGHBITDEPTH - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd)); -#else - ref_loopfilter_op_(ref_s+8+p*8, p, blimit, limit, thresh, count_); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_)); -#endif // CONFIG_VP9_HIGHBITDEPTH - for (int j = 0; j < kNumCoeffs; ++j) { - err_count += ref_s[j] != s[j]; - } - if (err_count && !err_count_total) { - first_failure = i; - } - err_count_total += err_count; - } - EXPECT_EQ(0, err_count_total) - << "Error: Loop8Test6Param, C output doesn't match SSE2 " - "loopfilter output. " - << "First failed at test case " << first_failure; -} - -TEST_P(Loop8Test9Param, OperationCheck) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = number_of_iterations; -#if CONFIG_VP9_HIGHBITDEPTH - const int32_t bd = bit_depth_; - DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]); - DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]); -#else - DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]); - DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]); -#endif // CONFIG_VP9_HIGHBITDEPTH - int err_count_total = 0; - int first_failure = -1; - for (int i = 0; i < count_test_block; ++i) { - int err_count = 0; - uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - int32_t p = kNumCoeffs / 32; - uint16_t tmp_s[kNumCoeffs]; - int j = 0; - const uint8_t limit = *limit0 < *limit1 ? *limit0 : *limit1; - while (j < kNumCoeffs) { - uint8_t val = rnd.Rand8(); - if (val & 0x80) { // 50% chance to choose a new value. - tmp_s[j] = rnd.Rand16(); - j++; - } else { // 50% chance to repeat previous value in row X times. - int k = 0; - while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) { - if (j < 1) { - tmp_s[j] = rnd.Rand16(); - } else if (val & 0x20) { // Increment by a value within the limit. - tmp_s[j] = (tmp_s[j - 1] + (limit - 1)); - } else { // Decrement by an value within the limit. - tmp_s[j] = (tmp_s[j - 1] - (limit - 1)); - } - j++; - } - } - } - for (j = 0; j < kNumCoeffs; j++) { - if (i % 2) { - s[j] = tmp_s[j] & mask_; - } else { - s[j] = tmp_s[p * (j % p) + j / p] & mask_; - } - ref_s[j] = s[j]; - } -#if CONFIG_VP9_HIGHBITDEPTH - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1, bd); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1, bd)); -#else - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1)); -#endif // CONFIG_VP9_HIGHBITDEPTH - for (int j = 0; j < kNumCoeffs; ++j) { - err_count += ref_s[j] != s[j]; - } - if (err_count && !err_count_total) { - first_failure = i; - } - err_count_total += err_count; - } - EXPECT_EQ(0, err_count_total) - << "Error: Loop8Test9Param, C output doesn't match SSE2 " - "loopfilter output. " - << "First failed at test case " << first_failure; -} - -TEST_P(Loop8Test9Param, ValueCheck) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = number_of_iterations; -#if CONFIG_VP9_HIGHBITDEPTH - DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]); - DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]); -#else - DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]); - DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]); -#endif // CONFIG_VP9_HIGHBITDEPTH - int err_count_total = 0; - int first_failure = -1; - for (int i = 0; i < count_test_block; ++i) { - int err_count = 0; - uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh0[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); - DECLARE_ALIGNED(16, const uint8_t, blimit1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = static_cast(rnd(MAX_LOOP_FILTER)); - DECLARE_ALIGNED(16, const uint8_t, limit1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - tmp = rnd.Rand8(); - DECLARE_ALIGNED(16, const uint8_t, thresh1[16]) = { - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, - tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp - }; - int32_t p = kNumCoeffs / 32; // TODO(pdlf) can we have non-square here? - for (int j = 0; j < kNumCoeffs; ++j) { - s[j] = rnd.Rand16() & mask_; - ref_s[j] = s[j]; - } -#if CONFIG_VP9_HIGHBITDEPTH - const int32_t bd = bit_depth_; - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1, bd); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, - thresh0, blimit1, limit1, thresh1, bd)); -#else - ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1); - ASM_REGISTER_STATE_CHECK( - loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, thresh0, - blimit1, limit1, thresh1)); -#endif // CONFIG_VP9_HIGHBITDEPTH - for (int j = 0; j < kNumCoeffs; ++j) { - err_count += ref_s[j] != s[j]; - } - if (err_count && !err_count_total) { - first_failure = i; - } - err_count_total += err_count; - } - EXPECT_EQ(0, err_count_total) - << "Error: Loop8Test9Param, C output doesn't match SSE2" - "loopfilter output. " - << "First failed at test case " << first_failure; -} - -using std::tr1::make_tuple; - -#if HAVE_SSE2 -#if CONFIG_VP9_HIGHBITDEPTH -INSTANTIATE_TEST_CASE_P( - SSE2, Loop8Test6Param, - ::testing::Values( - make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, - &vpx_highbd_lpf_horizontal_4_c, 8, 1), - make_tuple(&vpx_highbd_lpf_vertical_4_sse2, - &vpx_highbd_lpf_vertical_4_c, 8, 1), - make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, - &vpx_highbd_lpf_horizontal_8_c, 8, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 8, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 8, 2), - make_tuple(&vpx_highbd_lpf_vertical_8_sse2, - &vpx_highbd_lpf_vertical_8_c, 8, 1), - make_tuple(&wrapper_vertical_16_sse2, - &wrapper_vertical_16_c, 8, 1), - make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, - &vpx_highbd_lpf_horizontal_4_c, 10, 1), - make_tuple(&vpx_highbd_lpf_vertical_4_sse2, - &vpx_highbd_lpf_vertical_4_c, 10, 1), - make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, - &vpx_highbd_lpf_horizontal_8_c, 10, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 10, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 10, 2), - make_tuple(&vpx_highbd_lpf_vertical_8_sse2, - &vpx_highbd_lpf_vertical_8_c, 10, 1), - make_tuple(&wrapper_vertical_16_sse2, - &wrapper_vertical_16_c, 10, 1), - make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, - &vpx_highbd_lpf_horizontal_4_c, 12, 1), - make_tuple(&vpx_highbd_lpf_vertical_4_sse2, - &vpx_highbd_lpf_vertical_4_c, 12, 1), - make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, - &vpx_highbd_lpf_horizontal_8_c, 12, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 12, 1), - make_tuple(&vpx_highbd_lpf_horizontal_16_sse2, - &vpx_highbd_lpf_horizontal_16_c, 12, 2), - make_tuple(&vpx_highbd_lpf_vertical_8_sse2, - &vpx_highbd_lpf_vertical_8_c, 12, 1), - make_tuple(&wrapper_vertical_16_sse2, - &wrapper_vertical_16_c, 12, 1), - make_tuple(&wrapper_vertical_16_dual_sse2, - &wrapper_vertical_16_dual_c, 8, 1), - make_tuple(&wrapper_vertical_16_dual_sse2, - &wrapper_vertical_16_dual_c, 10, 1), - make_tuple(&wrapper_vertical_16_dual_sse2, - &wrapper_vertical_16_dual_c, 12, 1))); -#else -INSTANTIATE_TEST_CASE_P( - SSE2, Loop8Test6Param, - ::testing::Values( - make_tuple(&vpx_lpf_horizontal_8_sse2, &vpx_lpf_horizontal_8_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_sse2, &vpx_lpf_horizontal_16_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_sse2, &vpx_lpf_horizontal_16_c, 8, 2), - make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8, 1), - make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1), - make_tuple(&wrapper_vertical_16_dual_sse2, - &wrapper_vertical_16_dual_c, 8, 1))); -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif - -#if HAVE_AVX2 && (!CONFIG_VP9_HIGHBITDEPTH) -INSTANTIATE_TEST_CASE_P( - AVX2, Loop8Test6Param, - ::testing::Values( - make_tuple(&vpx_lpf_horizontal_16_avx2, &vpx_lpf_horizontal_16_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_avx2, &vpx_lpf_horizontal_16_c, 8, - 2))); -#endif - -#if HAVE_SSE2 -#if CONFIG_VP9_HIGHBITDEPTH -INSTANTIATE_TEST_CASE_P( - SSE2, Loop8Test9Param, - ::testing::Values( - make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, - &vpx_highbd_lpf_horizontal_4_dual_c, 8), - make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, - &vpx_highbd_lpf_horizontal_8_dual_c, 8), - make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, - &vpx_highbd_lpf_vertical_4_dual_c, 8), - make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, - &vpx_highbd_lpf_vertical_8_dual_c, 8), - make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, - &vpx_highbd_lpf_horizontal_4_dual_c, 10), - make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, - &vpx_highbd_lpf_horizontal_8_dual_c, 10), - make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, - &vpx_highbd_lpf_vertical_4_dual_c, 10), - make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, - &vpx_highbd_lpf_vertical_8_dual_c, 10), - make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, - &vpx_highbd_lpf_horizontal_4_dual_c, 12), - make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, - &vpx_highbd_lpf_horizontal_8_dual_c, 12), - make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, - &vpx_highbd_lpf_vertical_4_dual_c, 12), - make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, - &vpx_highbd_lpf_vertical_8_dual_c, 12))); -#else -INSTANTIATE_TEST_CASE_P( - SSE2, Loop8Test9Param, - ::testing::Values( - make_tuple(&vpx_lpf_horizontal_4_dual_sse2, - &vpx_lpf_horizontal_4_dual_c, 8), - make_tuple(&vpx_lpf_horizontal_8_dual_sse2, - &vpx_lpf_horizontal_8_dual_c, 8), - make_tuple(&vpx_lpf_vertical_4_dual_sse2, - &vpx_lpf_vertical_4_dual_c, 8), - make_tuple(&vpx_lpf_vertical_8_dual_sse2, - &vpx_lpf_vertical_8_dual_c, 8))); -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif - -#if HAVE_NEON -#if CONFIG_VP9_HIGHBITDEPTH -// No neon high bitdepth functions. -#else -INSTANTIATE_TEST_CASE_P( - NEON, Loop8Test6Param, - ::testing::Values( -#if HAVE_NEON_ASM -// Using #if inside the macro is unsupported on MSVS but the tests are not -// currently built for MSVS with ARM and NEON. - make_tuple(&vpx_lpf_horizontal_16_neon, - &vpx_lpf_horizontal_16_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_neon, - &vpx_lpf_horizontal_16_c, 8, 2), - make_tuple(&wrapper_vertical_16_neon, - &wrapper_vertical_16_c, 8, 1), - make_tuple(&wrapper_vertical_16_dual_neon, - &wrapper_vertical_16_dual_c, 8, 1), -#endif // HAVE_NEON_ASM - make_tuple(&vpx_lpf_horizontal_8_neon, - &vpx_lpf_horizontal_8_c, 8, 1), - make_tuple(&vpx_lpf_vertical_8_neon, - &vpx_lpf_vertical_8_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_4_neon, - &vpx_lpf_horizontal_4_c, 8, 1), - make_tuple(&vpx_lpf_vertical_4_neon, - &vpx_lpf_vertical_4_c, 8, 1))); -INSTANTIATE_TEST_CASE_P( - NEON, Loop8Test9Param, - ::testing::Values( -#if HAVE_NEON_ASM - make_tuple(&vpx_lpf_horizontal_8_dual_neon, - &vpx_lpf_horizontal_8_dual_c, 8), - make_tuple(&vpx_lpf_vertical_8_dual_neon, - &vpx_lpf_vertical_8_dual_c, 8), -#endif // HAVE_NEON_ASM - make_tuple(&vpx_lpf_horizontal_4_dual_neon, - &vpx_lpf_horizontal_4_dual_c, 8), - make_tuple(&vpx_lpf_vertical_4_dual_neon, - &vpx_lpf_vertical_4_dual_c, 8))); -#endif // CONFIG_VP9_HIGHBITDEPTH -#endif // HAVE_NEON - -#if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) -INSTANTIATE_TEST_CASE_P( - MSA, Loop8Test6Param, - ::testing::Values( - make_tuple(&vpx_lpf_horizontal_8_msa, &vpx_lpf_horizontal_8_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_msa, &vpx_lpf_horizontal_16_c, 8, 1), - make_tuple(&vpx_lpf_horizontal_16_msa, &vpx_lpf_horizontal_16_c, 8, 2), - make_tuple(&vpx_lpf_vertical_8_msa, &vpx_lpf_vertical_8_c, 8, 1), - make_tuple(&wrapper_vertical_16_msa, &wrapper_vertical_16_c, 8, 1))); - -INSTANTIATE_TEST_CASE_P( - MSA, Loop8Test9Param, - ::testing::Values( - make_tuple(&vpx_lpf_horizontal_4_dual_msa, - &vpx_lpf_horizontal_4_dual_c, 8), - make_tuple(&vpx_lpf_horizontal_8_dual_msa, - &vpx_lpf_horizontal_8_dual_c, 8), - make_tuple(&vpx_lpf_vertical_4_dual_msa, - &vpx_lpf_vertical_4_dual_c, 8), - make_tuple(&vpx_lpf_vertical_8_dual_msa, - &vpx_lpf_vertical_8_dual_c, 8))); -#endif // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) - -} // namespace diff --git a/libs/libvpx/test/lpf_test.cc b/libs/libvpx/test/lpf_test.cc new file mode 100644 index 0000000000..552b5e33c2 --- /dev/null +++ b/libs/libvpx/test/lpf_test.cc @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2014 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include + +#include "third_party/googletest/src/include/gtest/gtest.h" + +#include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" +#include "test/acm_random.h" +#include "test/clear_system_state.h" +#include "test/register_state_check.h" +#include "test/util.h" +#include "vp9/common/vp9_entropy.h" +#include "vp9/common/vp9_loopfilter.h" +#include "vpx/vpx_integer.h" + +using libvpx_test::ACMRandom; + +namespace { +// Horizontally and Vertically need 32x32: 8 Coeffs preceeding filtered section +// 16 Coefs within filtered section +// 8 Coeffs following filtered section +const int kNumCoeffs = 1024; + +const int number_of_iterations = 10000; + +#if CONFIG_VP9_HIGHBITDEPTH +typedef uint16_t Pixel; +#define PIXEL_WIDTH 16 + +typedef void (*loop_op_t)(Pixel *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, int bd); +typedef void (*dual_loop_op_t)(Pixel *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd); +#else +typedef uint8_t Pixel; +#define PIXEL_WIDTH 8 + +typedef void (*loop_op_t)(Pixel *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh); +typedef void (*dual_loop_op_t)(Pixel *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1); +#endif // CONFIG_VP9_HIGHBITDEPTH + +typedef std::tr1::tuple loop8_param_t; +typedef std::tr1::tuple dualloop8_param_t; + +void InitInput(Pixel *s, Pixel *ref_s, ACMRandom *rnd, const uint8_t limit, + const int mask, const int32_t p, const int i) { + uint16_t tmp_s[kNumCoeffs]; + + for (int j = 0; j < kNumCoeffs;) { + const uint8_t val = rnd->Rand8(); + if (val & 0x80) { // 50% chance to choose a new value. + tmp_s[j] = rnd->Rand16(); + j++; + } else { // 50% chance to repeat previous value in row X times. + int k = 0; + while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) { + if (j < 1) { + tmp_s[j] = rnd->Rand16(); + } else if (val & 0x20) { // Increment by a value within the limit. + tmp_s[j] = tmp_s[j - 1] + (limit - 1); + } else { // Decrement by a value within the limit. + tmp_s[j] = tmp_s[j - 1] - (limit - 1); + } + j++; + } + } + } + + for (int j = 0; j < kNumCoeffs;) { + const uint8_t val = rnd->Rand8(); + if (val & 0x80) { + j++; + } else { // 50% chance to repeat previous value in column X times. + int k = 0; + while (k++ < ((val & 0x1f) + 1) && j < kNumCoeffs) { + if (j < 1) { + tmp_s[j] = rnd->Rand16(); + } else if (val & 0x20) { // Increment by a value within the limit. + tmp_s[(j % 32) * 32 + j / 32] = + tmp_s[((j - 1) % 32) * 32 + (j - 1) / 32] + (limit - 1); + } else { // Decrement by a value within the limit. + tmp_s[(j % 32) * 32 + j / 32] = + tmp_s[((j - 1) % 32) * 32 + (j - 1) / 32] - (limit - 1); + } + j++; + } + } + } + + for (int j = 0; j < kNumCoeffs; j++) { + if (i % 2) { + s[j] = tmp_s[j] & mask; + } else { + s[j] = tmp_s[p * (j % p) + j / p] & mask; + } + ref_s[j] = s[j]; + } +} + +class Loop8Test6Param : public ::testing::TestWithParam { + public: + virtual ~Loop8Test6Param() {} + virtual void SetUp() { + loopfilter_op_ = GET_PARAM(0); + ref_loopfilter_op_ = GET_PARAM(1); + bit_depth_ = GET_PARAM(2); + mask_ = (1 << bit_depth_) - 1; + } + + virtual void TearDown() { libvpx_test::ClearSystemState(); } + + protected: + int bit_depth_; + int mask_; + loop_op_t loopfilter_op_; + loop_op_t ref_loopfilter_op_; +}; + +class Loop8Test9Param : public ::testing::TestWithParam { + public: + virtual ~Loop8Test9Param() {} + virtual void SetUp() { + loopfilter_op_ = GET_PARAM(0); + ref_loopfilter_op_ = GET_PARAM(1); + bit_depth_ = GET_PARAM(2); + mask_ = (1 << bit_depth_) - 1; + } + + virtual void TearDown() { libvpx_test::ClearSystemState(); } + + protected: + int bit_depth_; + int mask_; + dual_loop_op_t loopfilter_op_; + dual_loop_op_t ref_loopfilter_op_; +}; + +TEST_P(Loop8Test6Param, OperationCheck) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + const int count_test_block = number_of_iterations; + const int32_t p = kNumCoeffs / 32; + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, s[kNumCoeffs]); + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, ref_s[kNumCoeffs]); + int err_count_total = 0; + int first_failure = -1; + for (int i = 0; i < count_test_block; ++i) { + int err_count = 0; + uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + InitInput(s, ref_s, &rnd, *limit, mask_, p, i); +#if CONFIG_VP9_HIGHBITDEPTH + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, bit_depth_); + ASM_REGISTER_STATE_CHECK( + loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bit_depth_)); +#else + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh); + ASM_REGISTER_STATE_CHECK( + loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh)); +#endif // CONFIG_VP9_HIGHBITDEPTH + + for (int j = 0; j < kNumCoeffs; ++j) { + err_count += ref_s[j] != s[j]; + } + if (err_count && !err_count_total) { + first_failure = i; + } + err_count_total += err_count; + } + EXPECT_EQ(0, err_count_total) + << "Error: Loop8Test6Param, C output doesn't match SSE2 " + "loopfilter output. " + << "First failed at test case " << first_failure; +} + +TEST_P(Loop8Test6Param, ValueCheck) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + const int count_test_block = number_of_iterations; + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, s[kNumCoeffs]); + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, ref_s[kNumCoeffs]); + int err_count_total = 0; + int first_failure = -1; + + // NOTE: The code in vp9_loopfilter.c:update_sharpness computes mblim as a + // function of sharpness_lvl and the loopfilter lvl as: + // block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); + // ... + // memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), + // SIMD_WIDTH); + // This means that the largest value for mblim will occur when sharpness_lvl + // is equal to 0, and lvl is equal to its greatest value (MAX_LOOP_FILTER). + // In this case block_inside_limit will be equal to MAX_LOOP_FILTER and + // therefore mblim will be equal to (2 * (lvl + 2) + block_inside_limit) = + // 2 * (MAX_LOOP_FILTER + 2) + MAX_LOOP_FILTER = 3 * MAX_LOOP_FILTER + 4 + + for (int i = 0; i < count_test_block; ++i) { + int err_count = 0; + uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + int32_t p = kNumCoeffs / 32; + for (int j = 0; j < kNumCoeffs; ++j) { + s[j] = rnd.Rand16() & mask_; + ref_s[j] = s[j]; + } +#if CONFIG_VP9_HIGHBITDEPTH + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, bit_depth_); + ASM_REGISTER_STATE_CHECK( + loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, bit_depth_)); +#else + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh); + ASM_REGISTER_STATE_CHECK( + loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh)); +#endif // CONFIG_VP9_HIGHBITDEPTH + + for (int j = 0; j < kNumCoeffs; ++j) { + err_count += ref_s[j] != s[j]; + } + if (err_count && !err_count_total) { + first_failure = i; + } + err_count_total += err_count; + } + EXPECT_EQ(0, err_count_total) + << "Error: Loop8Test6Param, C output doesn't match SSE2 " + "loopfilter output. " + << "First failed at test case " << first_failure; +} + +TEST_P(Loop8Test9Param, OperationCheck) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + const int count_test_block = number_of_iterations; + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, s[kNumCoeffs]); + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, ref_s[kNumCoeffs]); + int err_count_total = 0; + int first_failure = -1; + for (int i = 0; i < count_test_block; ++i) { + int err_count = 0; + uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + int32_t p = kNumCoeffs / 32; + const uint8_t limit = *limit0 < *limit1 ? *limit0 : *limit1; + InitInput(s, ref_s, &rnd, limit, mask_, p, i); +#if CONFIG_VP9_HIGHBITDEPTH + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, + limit1, thresh1, bit_depth_); + ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, + thresh0, blimit1, limit1, thresh1, + bit_depth_)); +#else + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, + limit1, thresh1); + ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, + thresh0, blimit1, limit1, thresh1)); +#endif // CONFIG_VP9_HIGHBITDEPTH + + for (int j = 0; j < kNumCoeffs; ++j) { + err_count += ref_s[j] != s[j]; + } + if (err_count && !err_count_total) { + first_failure = i; + } + err_count_total += err_count; + } + EXPECT_EQ(0, err_count_total) + << "Error: Loop8Test9Param, C output doesn't match SSE2 " + "loopfilter output. " + << "First failed at test case " << first_failure; +} + +TEST_P(Loop8Test9Param, ValueCheck) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + const int count_test_block = number_of_iterations; + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, s[kNumCoeffs]); + DECLARE_ALIGNED(PIXEL_WIDTH, Pixel, ref_s[kNumCoeffs]); + int err_count_total = 0; + int first_failure = -1; + for (int i = 0; i < count_test_block; ++i) { + int err_count = 0; + uint8_t tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh0[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(3 * MAX_LOOP_FILTER + 4)); + DECLARE_ALIGNED(16, const uint8_t, + blimit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = static_cast(rnd(MAX_LOOP_FILTER)); + DECLARE_ALIGNED(16, const uint8_t, + limit1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + tmp = rnd.Rand8(); + DECLARE_ALIGNED(16, const uint8_t, + thresh1[16]) = { tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp, + tmp, tmp, tmp, tmp, tmp, tmp, tmp, tmp }; + int32_t p = kNumCoeffs / 32; // TODO(pdlf) can we have non-square here? + for (int j = 0; j < kNumCoeffs; ++j) { + s[j] = rnd.Rand16() & mask_; + ref_s[j] = s[j]; + } +#if CONFIG_VP9_HIGHBITDEPTH + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, + limit1, thresh1, bit_depth_); + ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, + thresh0, blimit1, limit1, thresh1, + bit_depth_)); +#else + ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1, + limit1, thresh1); + ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0, + thresh0, blimit1, limit1, thresh1)); +#endif // CONFIG_VP9_HIGHBITDEPTH + + for (int j = 0; j < kNumCoeffs; ++j) { + err_count += ref_s[j] != s[j]; + } + if (err_count && !err_count_total) { + first_failure = i; + } + err_count_total += err_count; + } + EXPECT_EQ(0, err_count_total) + << "Error: Loop8Test9Param, C output doesn't match SSE2" + "loopfilter output. " + << "First failed at test case " << first_failure; +} + +using std::tr1::make_tuple; + +#if HAVE_SSE2 +#if CONFIG_VP9_HIGHBITDEPTH +INSTANTIATE_TEST_CASE_P( + SSE2, Loop8Test6Param, + ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, + &vpx_highbd_lpf_horizontal_4_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_4_sse2, + &vpx_highbd_lpf_vertical_4_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, + &vpx_highbd_lpf_horizontal_8_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2, + &vpx_highbd_lpf_horizontal_edge_8_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2, + &vpx_highbd_lpf_horizontal_edge_16_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_8_sse2, + &vpx_highbd_lpf_vertical_8_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_16_sse2, + &vpx_highbd_lpf_vertical_16_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, + &vpx_highbd_lpf_horizontal_4_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_4_sse2, + &vpx_highbd_lpf_vertical_4_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, + &vpx_highbd_lpf_horizontal_8_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2, + &vpx_highbd_lpf_horizontal_edge_8_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2, + &vpx_highbd_lpf_horizontal_edge_16_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_8_sse2, + &vpx_highbd_lpf_vertical_8_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_16_sse2, + &vpx_highbd_lpf_vertical_16_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_4_sse2, + &vpx_highbd_lpf_horizontal_4_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_4_sse2, + &vpx_highbd_lpf_vertical_4_c, 12), + make_tuple(&vpx_highbd_lpf_horizontal_8_sse2, + &vpx_highbd_lpf_horizontal_8_c, 12), + make_tuple(&vpx_highbd_lpf_horizontal_edge_8_sse2, + &vpx_highbd_lpf_horizontal_edge_8_c, 12), + make_tuple(&vpx_highbd_lpf_horizontal_edge_16_sse2, + &vpx_highbd_lpf_horizontal_edge_16_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_8_sse2, + &vpx_highbd_lpf_vertical_8_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_16_sse2, + &vpx_highbd_lpf_vertical_16_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2, + &vpx_highbd_lpf_vertical_16_dual_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2, + &vpx_highbd_lpf_vertical_16_dual_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_16_dual_sse2, + &vpx_highbd_lpf_vertical_16_dual_c, 12))); +#else +INSTANTIATE_TEST_CASE_P( + SSE2, Loop8Test6Param, + ::testing::Values( + make_tuple(&vpx_lpf_horizontal_4_sse2, &vpx_lpf_horizontal_4_c, 8), + make_tuple(&vpx_lpf_horizontal_8_sse2, &vpx_lpf_horizontal_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_8_sse2, + &vpx_lpf_horizontal_edge_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_16_sse2, + &vpx_lpf_horizontal_edge_16_c, 8), + make_tuple(&vpx_lpf_vertical_4_sse2, &vpx_lpf_vertical_4_c, 8), + make_tuple(&vpx_lpf_vertical_8_sse2, &vpx_lpf_vertical_8_c, 8), + make_tuple(&vpx_lpf_vertical_16_sse2, &vpx_lpf_vertical_16_c, 8), + make_tuple(&vpx_lpf_vertical_16_dual_sse2, &vpx_lpf_vertical_16_dual_c, + 8))); +#endif // CONFIG_VP9_HIGHBITDEPTH +#endif + +#if HAVE_AVX2 && (!CONFIG_VP9_HIGHBITDEPTH) +INSTANTIATE_TEST_CASE_P( + AVX2, Loop8Test6Param, + ::testing::Values(make_tuple(&vpx_lpf_horizontal_edge_8_avx2, + &vpx_lpf_horizontal_edge_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_16_avx2, + &vpx_lpf_horizontal_edge_16_c, 8))); +#endif + +#if HAVE_SSE2 +#if CONFIG_VP9_HIGHBITDEPTH +INSTANTIATE_TEST_CASE_P( + SSE2, Loop8Test9Param, + ::testing::Values(make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, + &vpx_highbd_lpf_horizontal_4_dual_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, + &vpx_highbd_lpf_horizontal_8_dual_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, + &vpx_highbd_lpf_vertical_4_dual_c, 8), + make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, + &vpx_highbd_lpf_vertical_8_dual_c, 8), + make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, + &vpx_highbd_lpf_horizontal_4_dual_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, + &vpx_highbd_lpf_horizontal_8_dual_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, + &vpx_highbd_lpf_vertical_4_dual_c, 10), + make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, + &vpx_highbd_lpf_vertical_8_dual_c, 10), + make_tuple(&vpx_highbd_lpf_horizontal_4_dual_sse2, + &vpx_highbd_lpf_horizontal_4_dual_c, 12), + make_tuple(&vpx_highbd_lpf_horizontal_8_dual_sse2, + &vpx_highbd_lpf_horizontal_8_dual_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_4_dual_sse2, + &vpx_highbd_lpf_vertical_4_dual_c, 12), + make_tuple(&vpx_highbd_lpf_vertical_8_dual_sse2, + &vpx_highbd_lpf_vertical_8_dual_c, 12))); +#else +INSTANTIATE_TEST_CASE_P( + SSE2, Loop8Test9Param, + ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_sse2, + &vpx_lpf_horizontal_4_dual_c, 8), + make_tuple(&vpx_lpf_horizontal_8_dual_sse2, + &vpx_lpf_horizontal_8_dual_c, 8), + make_tuple(&vpx_lpf_vertical_4_dual_sse2, + &vpx_lpf_vertical_4_dual_c, 8), + make_tuple(&vpx_lpf_vertical_8_dual_sse2, + &vpx_lpf_vertical_8_dual_c, 8))); +#endif // CONFIG_VP9_HIGHBITDEPTH +#endif + +#if HAVE_NEON +#if CONFIG_VP9_HIGHBITDEPTH +// No neon high bitdepth functions. +#else +INSTANTIATE_TEST_CASE_P( + NEON, Loop8Test6Param, + ::testing::Values( + make_tuple(&vpx_lpf_horizontal_edge_8_neon, + &vpx_lpf_horizontal_edge_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_16_neon, + &vpx_lpf_horizontal_edge_16_c, 8), + make_tuple(&vpx_lpf_vertical_16_neon, &vpx_lpf_vertical_16_c, 8), + make_tuple(&vpx_lpf_vertical_16_dual_neon, &vpx_lpf_vertical_16_dual_c, + 8), + make_tuple(&vpx_lpf_horizontal_8_neon, &vpx_lpf_horizontal_8_c, 8), + make_tuple(&vpx_lpf_vertical_8_neon, &vpx_lpf_vertical_8_c, 8), + make_tuple(&vpx_lpf_horizontal_4_neon, &vpx_lpf_horizontal_4_c, 8), + make_tuple(&vpx_lpf_vertical_4_neon, &vpx_lpf_vertical_4_c, 8))); +INSTANTIATE_TEST_CASE_P( + NEON, Loop8Test9Param, + ::testing::Values(make_tuple(&vpx_lpf_horizontal_8_dual_neon, + &vpx_lpf_horizontal_8_dual_c, 8), + make_tuple(&vpx_lpf_vertical_8_dual_neon, + &vpx_lpf_vertical_8_dual_c, 8), + make_tuple(&vpx_lpf_horizontal_4_dual_neon, + &vpx_lpf_horizontal_4_dual_c, 8), + make_tuple(&vpx_lpf_vertical_4_dual_neon, + &vpx_lpf_vertical_4_dual_c, 8))); +#endif // CONFIG_VP9_HIGHBITDEPTH +#endif // HAVE_NEON + +#if HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH +INSTANTIATE_TEST_CASE_P( + DSPR2, Loop8Test6Param, + ::testing::Values( + make_tuple(&vpx_lpf_horizontal_4_dspr2, &vpx_lpf_horizontal_4_c, 8), + make_tuple(&vpx_lpf_horizontal_8_dspr2, &vpx_lpf_horizontal_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_8, &vpx_lpf_horizontal_edge_8, 8), + make_tuple(&vpx_lpf_horizontal_edge_16, &vpx_lpf_horizontal_edge_16, 8), + make_tuple(&vpx_lpf_vertical_4_dspr2, &vpx_lpf_vertical_4_c, 8), + make_tuple(&vpx_lpf_vertical_8_dspr2, &vpx_lpf_vertical_8_c, 8), + make_tuple(&vpx_lpf_vertical_16_dspr2, &vpx_lpf_vertical_16_c, 8), + make_tuple(&vpx_lpf_vertical_16_dual_dspr2, &vpx_lpf_vertical_16_dual_c, + 8))); + +INSTANTIATE_TEST_CASE_P( + DSPR2, Loop8Test9Param, + ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_dspr2, + &vpx_lpf_horizontal_4_dual_c, 8), + make_tuple(&vpx_lpf_horizontal_8_dual_dspr2, + &vpx_lpf_horizontal_8_dual_c, 8), + make_tuple(&vpx_lpf_vertical_4_dual_dspr2, + &vpx_lpf_vertical_4_dual_c, 8), + make_tuple(&vpx_lpf_vertical_8_dual_dspr2, + &vpx_lpf_vertical_8_dual_c, 8))); +#endif // HAVE_DSPR2 && !CONFIG_VP9_HIGHBITDEPTH + +#if HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) +INSTANTIATE_TEST_CASE_P( + MSA, Loop8Test6Param, + ::testing::Values( + make_tuple(&vpx_lpf_horizontal_4_msa, &vpx_lpf_horizontal_4_c, 8), + make_tuple(&vpx_lpf_horizontal_8_msa, &vpx_lpf_horizontal_8_c, 8), + make_tuple(&vpx_lpf_horizontal_edge_8_msa, &vpx_lpf_horizontal_edge_8_c, + 8), + make_tuple(&vpx_lpf_horizontal_edge_16_msa, + &vpx_lpf_horizontal_edge_16_c, 8), + make_tuple(&vpx_lpf_vertical_4_msa, &vpx_lpf_vertical_4_c, 8), + make_tuple(&vpx_lpf_vertical_8_msa, &vpx_lpf_vertical_8_c, 8), + make_tuple(&vpx_lpf_vertical_16_msa, &vpx_lpf_vertical_16_c, 8))); + +INSTANTIATE_TEST_CASE_P( + MSA, Loop8Test9Param, + ::testing::Values(make_tuple(&vpx_lpf_horizontal_4_dual_msa, + &vpx_lpf_horizontal_4_dual_c, 8), + make_tuple(&vpx_lpf_horizontal_8_dual_msa, + &vpx_lpf_horizontal_8_dual_c, 8), + make_tuple(&vpx_lpf_vertical_4_dual_msa, + &vpx_lpf_vertical_4_dual_c, 8), + make_tuple(&vpx_lpf_vertical_8_dual_msa, + &vpx_lpf_vertical_8_dual_c, 8))); +#endif // HAVE_MSA && (!CONFIG_VP9_HIGHBITDEPTH) + +} // namespace diff --git a/libs/libvpx/test/md5_helper.h b/libs/libvpx/test/md5_helper.h index 742cf0b7b3..ef310a2d90 100644 --- a/libs/libvpx/test/md5_helper.h +++ b/libs/libvpx/test/md5_helper.h @@ -17,9 +17,7 @@ namespace libvpx_test { class MD5 { public: - MD5() { - MD5Init(&md5_); - } + MD5() { MD5Init(&md5_); } void Add(const vpx_image_t *img) { for (int plane = 0; plane < 3; ++plane) { @@ -30,10 +28,13 @@ class MD5 { // This works only for chroma_shift of 0 and 1. const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1; - const int h = plane ? (img->d_h + img->y_chroma_shift) >> - img->y_chroma_shift : img->d_h; - const int w = (plane ? (img->d_w + img->x_chroma_shift) >> - img->x_chroma_shift : img->d_w) * bytes_per_sample; + const int h = + plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift + : img->d_h; + const int w = + (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift + : img->d_w) * + bytes_per_sample; for (int y = 0; y < h; ++y) { MD5Update(&md5_, buf, w); @@ -56,8 +57,8 @@ class MD5 { MD5Final(tmp, &ctx_tmp); for (int i = 0; i < 16; i++) { - res_[i * 2 + 0] = hex[tmp[i] >> 4]; - res_[i * 2 + 1] = hex[tmp[i] & 0xf]; + res_[i * 2 + 0] = hex[tmp[i] >> 4]; + res_[i * 2 + 1] = hex[tmp[i] & 0xf]; } res_[32] = 0; diff --git a/libs/libvpx/test/minmax_test.cc b/libs/libvpx/test/minmax_test.cc new file mode 100644 index 0000000000..e51c9fd48c --- /dev/null +++ b/libs/libvpx/test/minmax_test.cc @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "third_party/googletest/src/include/gtest/gtest.h" + +#include "./vpx_dsp_rtcd.h" +#include "vpx/vpx_integer.h" + +#include "test/acm_random.h" +#include "test/register_state_check.h" + +namespace { + +using ::libvpx_test::ACMRandom; + +typedef void (*MinMaxFunc)(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int *min, int *max); + +class MinMaxTest : public ::testing::TestWithParam { + public: + virtual void SetUp() { + mm_func_ = GetParam(); + rnd_.Reset(ACMRandom::DeterministicSeed()); + } + + protected: + MinMaxFunc mm_func_; + ACMRandom rnd_; +}; + +void reference_minmax(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int *min_ret, int *max_ret) { + int min = 255; + int max = 0; + for (int i = 0; i < 8; i++) { + for (int j = 0; j < 8; j++) { + const int diff = abs(a[i * a_stride + j] - b[i * b_stride + j]); + if (min > diff) min = diff; + if (max < diff) max = diff; + } + } + + *min_ret = min; + *max_ret = max; +} + +TEST_P(MinMaxTest, MinValue) { + for (int i = 0; i < 64; i++) { + uint8_t a[64], b[64]; + memset(a, 0, sizeof(a)); + memset(b, 255, sizeof(b)); + b[i] = i; // Set a minimum difference of i. + + int min, max; + ASM_REGISTER_STATE_CHECK(mm_func_(a, 8, b, 8, &min, &max)); + EXPECT_EQ(255, max); + EXPECT_EQ(i, min); + } +} + +TEST_P(MinMaxTest, MaxValue) { + for (int i = 0; i < 64; i++) { + uint8_t a[64], b[64]; + memset(a, 0, sizeof(a)); + memset(b, 0, sizeof(b)); + b[i] = i; // Set a maximum difference of i. + + int min, max; + ASM_REGISTER_STATE_CHECK(mm_func_(a, 8, b, 8, &min, &max)); + EXPECT_EQ(i, max); + EXPECT_EQ(0, min); + } +} + +TEST_P(MinMaxTest, CompareReference) { + uint8_t a[64], b[64]; + for (int j = 0; j < 64; j++) { + a[j] = rnd_.Rand8(); + b[j] = rnd_.Rand8(); + } + + int min_ref, max_ref, min, max; + reference_minmax(a, 8, b, 8, &min_ref, &max_ref); + ASM_REGISTER_STATE_CHECK(mm_func_(a, 8, b, 8, &min, &max)); + EXPECT_EQ(max_ref, max); + EXPECT_EQ(min_ref, min); +} + +TEST_P(MinMaxTest, CompareReferenceAndVaryStride) { + uint8_t a[8 * 64], b[8 * 64]; + for (int i = 0; i < 8 * 64; i++) { + a[i] = rnd_.Rand8(); + b[i] = rnd_.Rand8(); + } + for (int a_stride = 8; a_stride <= 64; a_stride += 8) { + for (int b_stride = 8; b_stride <= 64; b_stride += 8) { + int min_ref, max_ref, min, max; + reference_minmax(a, a_stride, b, b_stride, &min_ref, &max_ref); + ASM_REGISTER_STATE_CHECK(mm_func_(a, a_stride, b, b_stride, &min, &max)); + EXPECT_EQ(max_ref, max) << "when a_stride = " << a_stride + << " and b_stride = " << b_stride; + EXPECT_EQ(min_ref, min) << "when a_stride = " << a_stride + << " and b_stride = " << b_stride; + } + } +} + +INSTANTIATE_TEST_CASE_P(C, MinMaxTest, ::testing::Values(&vpx_minmax_8x8_c)); + +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P(SSE2, MinMaxTest, + ::testing::Values(&vpx_minmax_8x8_sse2)); +#endif + +#if HAVE_NEON +INSTANTIATE_TEST_CASE_P(NEON, MinMaxTest, + ::testing::Values(&vpx_minmax_8x8_neon)); +#endif + +} // namespace diff --git a/libs/libvpx/test/partial_idct_test.cc b/libs/libvpx/test/partial_idct_test.cc index 6c824128b8..de6ff4a01b 100644 --- a/libs/libvpx/test/partial_idct_test.cc +++ b/libs/libvpx/test/partial_idct_test.cc @@ -29,10 +29,8 @@ using libvpx_test::ACMRandom; namespace { typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride); typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride); -typedef std::tr1::tuple PartialInvTxfmParam; +typedef std::tr1::tuple + PartialInvTxfmParam; const int kMaxNumCoeffs = 1024; class PartialIDctTest : public ::testing::TestWithParam { public: @@ -41,7 +39,7 @@ class PartialIDctTest : public ::testing::TestWithParam { ftxfm_ = GET_PARAM(0); full_itxfm_ = GET_PARAM(1); partial_itxfm_ = GET_PARAM(2); - tx_size_ = GET_PARAM(3); + tx_size_ = GET_PARAM(3); last_nonzero_ = GET_PARAM(4); } @@ -59,21 +57,11 @@ TEST_P(PartialIDctTest, RunQuantCheck) { ACMRandom rnd(ACMRandom::DeterministicSeed()); int size; switch (tx_size_) { - case TX_4X4: - size = 4; - break; - case TX_8X8: - size = 8; - break; - case TX_16X16: - size = 16; - break; - case TX_32X32: - size = 32; - break; - default: - FAIL() << "Wrong Size!"; - break; + case TX_4X4: size = 4; break; + case TX_8X8: size = 8; break; + case TX_16X16: size = 16; break; + case TX_32X32: size = 32; break; + default: FAIL() << "Wrong Size!"; break; } DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]); DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]); @@ -99,11 +87,9 @@ TEST_P(PartialIDctTest, RunQuantCheck) { for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-255, 255]. if (i == 0) { - for (int j = 0; j < block_size; ++j) - input_extreme_block[j] = 255; + for (int j = 0; j < block_size; ++j) input_extreme_block[j] = 255; } else if (i == 1) { - for (int j = 0; j < block_size; ++j) - input_extreme_block[j] = -255; + for (int j = 0; j < block_size; ++j) input_extreme_block[j] = -255; } else { for (int j = 0; j < block_size; ++j) { input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255; @@ -114,9 +100,10 @@ TEST_P(PartialIDctTest, RunQuantCheck) { // quantization with maximum allowed step sizes test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336; - for (int j = 1; j < last_nonzero_; ++j) - test_coef_block1[vp9_default_scan_orders[tx_size_].scan[j]] - = (output_ref_block[j] / 1828) * 1828; + for (int j = 1; j < last_nonzero_; ++j) { + test_coef_block1[vp9_default_scan_orders[tx_size_].scan[j]] = + (output_ref_block[j] / 1828) * 1828; + } } ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size)); @@ -125,8 +112,7 @@ TEST_P(PartialIDctTest, RunQuantCheck) { for (int j = 0; j < block_size; ++j) { const int diff = dst1[j] - dst2[j]; const int error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; } } @@ -138,21 +124,11 @@ TEST_P(PartialIDctTest, ResultsMatch) { ACMRandom rnd(ACMRandom::DeterministicSeed()); int size; switch (tx_size_) { - case TX_4X4: - size = 4; - break; - case TX_8X8: - size = 8; - break; - case TX_16X16: - size = 16; - break; - case TX_32X32: - size = 32; - break; - default: - FAIL() << "Wrong Size!"; - break; + case TX_4X4: size = 4; break; + case TX_8X8: size = 8; break; + case TX_16X16: size = 16; break; + case TX_32X32: size = 32; break; + default: FAIL() << "Wrong Size!"; break; } DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]); DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]); @@ -189,8 +165,7 @@ TEST_P(PartialIDctTest, ResultsMatch) { for (int j = 0; j < block_size; ++j) { const int diff = dst1[j] - dst2[j]; const int error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; } } @@ -201,143 +176,82 @@ using std::tr1::make_tuple; INSTANTIATE_TEST_CASE_P( C, PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_c, - TX_32X32, 34), - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_c, - TX_32X32, 1), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_c, - TX_16X16, 10), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_c, - TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_c, - TX_8X8, 12), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_c, - TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, - &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_c, - TX_4X4, 1))); + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_34_add_c, TX_32X32, 34), + make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_1_add_c, TX_32X32, 1), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_10_add_c, TX_16X16, 10), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_1_add_c, TX_16X16, 1), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_12_add_c, TX_8X8, 12), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_1_add_c, TX_8X8, 1), + make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, + &vpx_idct4x4_1_add_c, TX_4X4, 1))); #if HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( NEON, PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_neon, - TX_32X32, 1), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_neon, - TX_16X16, 10), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_neon, - TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_neon, - TX_8X8, 12), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_neon, - TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, - &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_neon, - TX_4X4, 1))); + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_1_add_neon, TX_32X32, 1), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_10_add_neon, TX_16X16, 10), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_1_add_neon, TX_16X16, 1), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_12_add_neon, TX_8X8, 12), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_1_add_neon, TX_8X8, 1), + make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, + &vpx_idct4x4_1_add_neon, TX_4X4, 1))); #endif // HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSE2, PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_sse2, - TX_32X32, 34), - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_sse2, - TX_32X32, 1), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_sse2, - TX_16X16, 10), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_sse2, - TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_sse2, - TX_8X8, 12), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_sse2, - TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, - &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_sse2, - TX_4X4, 1))); + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_34_add_sse2, TX_32X32, 34), + make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_1_add_sse2, TX_32X32, 1), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_10_add_sse2, TX_16X16, 10), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_1_add_sse2, TX_16X16, 1), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_12_add_sse2, TX_8X8, 12), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_1_add_sse2, TX_8X8, 1), + make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, + &vpx_idct4x4_1_add_sse2, TX_4X4, 1))); #endif -#if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \ - !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE +#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \ + !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSSE3_64, PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_ssse3, - TX_8X8, 12))); + ::testing::Values(make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_12_add_ssse3, TX_8X8, 12))); #endif #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( MSA, PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_msa, - TX_32X32, 34), - make_tuple(&vpx_fdct32x32_c, - &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_msa, - TX_32X32, 1), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_msa, - TX_16X16, 10), - make_tuple(&vpx_fdct16x16_c, - &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_msa, - TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_msa, - TX_8X8, 10), - make_tuple(&vpx_fdct8x8_c, - &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_msa, - TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, - &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_msa, - TX_4X4, 1))); + ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_34_add_msa, TX_32X32, 34), + make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, + &vpx_idct32x32_1_add_msa, TX_32X32, 1), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_10_add_msa, TX_16X16, 10), + make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, + &vpx_idct16x16_1_add_msa, TX_16X16, 1), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_12_add_msa, TX_8X8, 10), + make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, + &vpx_idct8x8_1_add_msa, TX_8X8, 1), + make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, + &vpx_idct4x4_1_add_msa, TX_4X4, 1))); #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE } // namespace diff --git a/libs/libvpx/test/pp_filter_test.cc b/libs/libvpx/test/pp_filter_test.cc index e4688dd8ce..2e34fed065 100644 --- a/libs/libvpx/test/pp_filter_test.cc +++ b/libs/libvpx/test/pp_filter_test.cc @@ -11,34 +11,28 @@ #include "test/register_state_check.h" #include "third_party/googletest/src/include/gtest/gtest.h" #include "./vpx_config.h" -#include "./vp8_rtcd.h" +#include "./vpx_dsp_rtcd.h" #include "vpx/vpx_integer.h" #include "vpx_mem/vpx_mem.h" -typedef void (*PostProcFunc)(unsigned char *src_ptr, - unsigned char *dst_ptr, - int src_pixels_per_line, - int dst_pixels_per_line, - int cols, - unsigned char *flimit, - int size); +typedef void (*PostProcFunc)(unsigned char *src_ptr, unsigned char *dst_ptr, + int src_pixels_per_line, int dst_pixels_per_line, + int cols, unsigned char *flimit, int size); namespace { -class VP8PostProcessingFilterTest +class VPxPostProcessingFilterTest : public ::testing::TestWithParam { public: - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } + virtual void TearDown() { libvpx_test::ClearSystemState(); } }; -// Test routine for the VP8 post-processing function -// vp8_post_proc_down_and_across_mb_row_c. +// Test routine for the VPx post-processing function +// vpx_post_proc_down_and_across_mb_row_c. -TEST_P(VP8PostProcessingFilterTest, FilterOutputCheck) { +TEST_P(VPxPostProcessingFilterTest, FilterOutputCheck) { // Size of the underlying data block that will be filtered. - const int block_width = 16; + const int block_width = 16; const int block_height = 16; // 5-tap filter needs 2 padding rows above and below the block in the input. @@ -54,9 +48,9 @@ TEST_P(VP8PostProcessingFilterTest, FilterOutputCheck) { const int output_size = output_width * output_height; uint8_t *const src_image = - reinterpret_cast(vpx_calloc(input_size, 1)); + reinterpret_cast(vpx_calloc(input_size, 1)); uint8_t *const dst_image = - reinterpret_cast(vpx_calloc(output_size, 1)); + reinterpret_cast(vpx_calloc(output_size, 1)); // Pointers to top-left pixel of block in the input and output images. uint8_t *const src_image_ptr = src_image + (input_stride << 1); @@ -80,19 +74,18 @@ TEST_P(VP8PostProcessingFilterTest, FilterOutputCheck) { // Initialize pixels in the output to 99. (void)memset(dst_image, 99, output_size); - ASM_REGISTER_STATE_CHECK( - GetParam()(src_image_ptr, dst_image_ptr, input_stride, - output_stride, block_width, flimits, 16)); + ASM_REGISTER_STATE_CHECK(GetParam()(src_image_ptr, dst_image_ptr, + input_stride, output_stride, block_width, + flimits, 16)); - static const uint8_t expected_data[block_height] = { - 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4 - }; + static const uint8_t expected_data[block_height] = { 4, 3, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 3, 4 }; pixel_ptr = dst_image_ptr; for (int i = 0; i < block_height; ++i) { for (int j = 0; j < block_width; ++j) { EXPECT_EQ(expected_data[i], pixel_ptr[j]) - << "VP8PostProcessingFilterTest failed with invalid filter output"; + << "VPxPostProcessingFilterTest failed with invalid filter output"; } pixel_ptr += output_stride; } @@ -102,17 +95,20 @@ TEST_P(VP8PostProcessingFilterTest, FilterOutputCheck) { vpx_free(flimits); }; -INSTANTIATE_TEST_CASE_P(C, VP8PostProcessingFilterTest, - ::testing::Values(vp8_post_proc_down_and_across_mb_row_c)); +INSTANTIATE_TEST_CASE_P( + C, VPxPostProcessingFilterTest, + ::testing::Values(vpx_post_proc_down_and_across_mb_row_c)); #if HAVE_SSE2 -INSTANTIATE_TEST_CASE_P(SSE2, VP8PostProcessingFilterTest, - ::testing::Values(vp8_post_proc_down_and_across_mb_row_sse2)); +INSTANTIATE_TEST_CASE_P( + SSE2, VPxPostProcessingFilterTest, + ::testing::Values(vpx_post_proc_down_and_across_mb_row_sse2)); #endif #if HAVE_MSA -INSTANTIATE_TEST_CASE_P(MSA, VP8PostProcessingFilterTest, - ::testing::Values(vp8_post_proc_down_and_across_mb_row_msa)); +INSTANTIATE_TEST_CASE_P( + MSA, VPxPostProcessingFilterTest, + ::testing::Values(vpx_post_proc_down_and_across_mb_row_msa)); #endif } // namespace diff --git a/libs/libvpx/test/predict_test.cc b/libs/libvpx/test/predict_test.cc new file mode 100644 index 0000000000..07841a1176 --- /dev/null +++ b/libs/libvpx/test/predict_test.cc @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "third_party/googletest/src/include/gtest/gtest.h" + +#include "./vp8_rtcd.h" +#include "./vpx_config.h" +#include "test/acm_random.h" +#include "test/clear_system_state.h" +#include "test/register_state_check.h" +#include "test/util.h" +#include "vpx/vpx_integer.h" +#include "vpx_mem/vpx_mem.h" + +namespace { + +using libvpx_test::ACMRandom; +using std::tr1::make_tuple; + +typedef void (*PredictFunc)(uint8_t *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, uint8_t *dst_ptr, + int dst_pitch); + +typedef std::tr1::tuple PredictParam; + +class PredictTestBase : public ::testing::TestWithParam { + public: + PredictTestBase() + : width_(GET_PARAM(0)), height_(GET_PARAM(1)), predict_(GET_PARAM(2)), + src_(NULL), padded_dst_(NULL), dst_(NULL), dst_c_(NULL) {} + + virtual void SetUp() { + src_ = new uint8_t[kSrcSize]; + ASSERT_TRUE(src_ != NULL); + + // padded_dst_ provides a buffer of kBorderSize around the destination + // memory to facilitate detecting out of bounds writes. + dst_stride_ = kBorderSize + width_ + kBorderSize; + padded_dst_size_ = dst_stride_ * (kBorderSize + height_ + kBorderSize); + padded_dst_ = + reinterpret_cast(vpx_memalign(16, padded_dst_size_)); + ASSERT_TRUE(padded_dst_ != NULL); + dst_ = padded_dst_ + (kBorderSize * dst_stride_) + kBorderSize; + + dst_c_ = new uint8_t[16 * 16]; + ASSERT_TRUE(dst_c_ != NULL); + + memset(src_, 0, kSrcSize); + memset(padded_dst_, 128, padded_dst_size_); + memset(dst_c_, 0, 16 * 16); + } + + virtual void TearDown() { + delete[] src_; + src_ = NULL; + vpx_free(padded_dst_); + padded_dst_ = NULL; + dst_ = NULL; + delete[] dst_c_; + dst_c_ = NULL; + libvpx_test::ClearSystemState(); + } + + protected: + // Make reference arrays big enough for 16x16 functions. Six-tap filters need + // 5 extra pixels outside of the macroblock. + static const int kSrcStride = 21; + static const int kSrcSize = kSrcStride * kSrcStride; + static const int kBorderSize = 16; + + int width_; + int height_; + PredictFunc predict_; + uint8_t *src_; + uint8_t *padded_dst_; + uint8_t *dst_; + int padded_dst_size_; + uint8_t *dst_c_; + int dst_stride_; + + bool CompareBuffers(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride) const { + for (int height = 0; height < height_; ++height) { + EXPECT_EQ(0, memcmp(a + height * a_stride, b + height * b_stride, + sizeof(*a) * width_)) + << "Row " << height << " does not match."; + } + + return !HasFailure(); + } + + // Given a block of memory 'a' with size 'a_size', determine if all regions + // excepting block 'b' described by 'b_stride', 'b_height', and 'b_width' + // match pixel value 'c'. + bool CheckBorder(const uint8_t *a, int a_size, const uint8_t *b, int b_width, + int b_height, int b_stride, uint8_t c) const { + const uint8_t *a_end = a + a_size; + const int b_size = (b_stride * b_height) + b_width; + const uint8_t *b_end = b + b_size; + const int left_border = (b_stride - b_width) / 2; + const int right_border = left_border + ((b_stride - b_width) % 2); + + EXPECT_GE(b - left_border, a) << "'b' does not start within 'a'"; + EXPECT_LE(b_end + right_border, a_end) << "'b' does not end within 'a'"; + + // Top border. + for (int pixel = 0; pixel < b - a - left_border; ++pixel) { + EXPECT_EQ(c, a[pixel]) << "Mismatch at " << pixel << " in top border."; + } + + // Left border. + for (int height = 0; height < b_height; ++height) { + for (int width = left_border; width > 0; --width) { + EXPECT_EQ(c, b[height * b_stride - width]) + << "Mismatch at row " << height << " column " << left_border - width + << " in left border."; + } + } + + // Right border. + for (int height = 0; height < b_height; ++height) { + for (int width = b_width; width < b_width + right_border; ++width) { + EXPECT_EQ(c, b[height * b_stride + width]) + << "Mismatch at row " << height << " column " << width - b_width + << " in right border."; + } + } + + // Bottom border. + for (int pixel = static_cast(b - a + b_size); pixel < a_size; + ++pixel) { + EXPECT_EQ(c, a[pixel]) << "Mismatch at " << pixel << " in bottom border."; + } + + return !HasFailure(); + } + + void TestWithRandomData(PredictFunc reference) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + + // Run tests for almost all possible offsets. + for (int xoffset = 0; xoffset < 8; ++xoffset) { + for (int yoffset = 0; yoffset < 8; ++yoffset) { + if (xoffset == 0 && yoffset == 0) { + // This represents a copy which is not required to be handled by this + // module. + continue; + } + + for (int i = 0; i < kSrcSize; ++i) { + src_[i] = rnd.Rand8(); + } + reference(&src_[kSrcStride * 2 + 2], kSrcStride, xoffset, yoffset, + dst_c_, 16); + + ASM_REGISTER_STATE_CHECK(predict_(&src_[kSrcStride * 2 + 2], kSrcStride, + xoffset, yoffset, dst_, dst_stride_)); + + ASSERT_TRUE(CompareBuffers(dst_c_, 16, dst_, dst_stride_)); + ASSERT_TRUE(CheckBorder(padded_dst_, padded_dst_size_, dst_, width_, + height_, dst_stride_, 128)); + } + } + } + + void TestWithUnalignedDst(PredictFunc reference) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + + // Only the 4x4 need to be able to handle unaligned writes. + if (width_ == 4 && height_ == 4) { + for (int xoffset = 0; xoffset < 8; ++xoffset) { + for (int yoffset = 0; yoffset < 8; ++yoffset) { + if (xoffset == 0 && yoffset == 0) { + continue; + } + for (int i = 0; i < kSrcSize; ++i) { + src_[i] = rnd.Rand8(); + } + reference(&src_[kSrcStride * 2 + 2], kSrcStride, xoffset, yoffset, + dst_c_, 16); + + for (int i = 1; i < 4; ++i) { + memset(padded_dst_, 128, padded_dst_size_); + + ASM_REGISTER_STATE_CHECK(predict_(&src_[kSrcStride * 2 + 2], + kSrcStride, xoffset, yoffset, + dst_ + i, dst_stride_ + i)); + + ASSERT_TRUE(CompareBuffers(dst_c_, 16, dst_ + i, dst_stride_ + i)); + ASSERT_TRUE(CheckBorder(padded_dst_, padded_dst_size_, dst_ + i, + width_, height_, dst_stride_ + i, 128)); + } + } + } + } + } +}; + +class SixtapPredictTest : public PredictTestBase {}; + +TEST_P(SixtapPredictTest, TestWithRandomData) { + TestWithRandomData(vp8_sixtap_predict16x16_c); +} +TEST_P(SixtapPredictTest, TestWithUnalignedDst) { + TestWithUnalignedDst(vp8_sixtap_predict16x16_c); +} + +TEST_P(SixtapPredictTest, TestWithPresetData) { + // Test input + static const uint8_t kTestData[kSrcSize] = { + 184, 4, 191, 82, 92, 41, 0, 1, 226, 236, 172, 20, 182, 42, 226, + 177, 79, 94, 77, 179, 203, 206, 198, 22, 192, 19, 75, 17, 192, 44, + 233, 120, 48, 168, 203, 141, 210, 203, 143, 180, 184, 59, 201, 110, 102, + 171, 32, 182, 10, 109, 105, 213, 60, 47, 236, 253, 67, 55, 14, 3, + 99, 247, 124, 148, 159, 71, 34, 114, 19, 177, 38, 203, 237, 239, 58, + 83, 155, 91, 10, 166, 201, 115, 124, 5, 163, 104, 2, 231, 160, 16, + 234, 4, 8, 103, 153, 167, 174, 187, 26, 193, 109, 64, 141, 90, 48, + 200, 174, 204, 36, 184, 114, 237, 43, 238, 242, 207, 86, 245, 182, 247, + 6, 161, 251, 14, 8, 148, 182, 182, 79, 208, 120, 188, 17, 6, 23, + 65, 206, 197, 13, 242, 126, 128, 224, 170, 110, 211, 121, 197, 200, 47, + 188, 207, 208, 184, 221, 216, 76, 148, 143, 156, 100, 8, 89, 117, 14, + 112, 183, 221, 54, 197, 208, 180, 69, 176, 94, 180, 131, 215, 121, 76, + 7, 54, 28, 216, 238, 249, 176, 58, 142, 64, 215, 242, 72, 49, 104, + 87, 161, 32, 52, 216, 230, 4, 141, 44, 181, 235, 224, 57, 195, 89, + 134, 203, 144, 162, 163, 126, 156, 84, 185, 42, 148, 145, 29, 221, 194, + 134, 52, 100, 166, 105, 60, 140, 110, 201, 184, 35, 181, 153, 93, 121, + 243, 227, 68, 131, 134, 232, 2, 35, 60, 187, 77, 209, 76, 106, 174, + 15, 241, 227, 115, 151, 77, 175, 36, 187, 121, 221, 223, 47, 118, 61, + 168, 105, 32, 237, 236, 167, 213, 238, 202, 17, 170, 24, 226, 247, 131, + 145, 6, 116, 117, 121, 11, 194, 41, 48, 126, 162, 13, 93, 209, 131, + 154, 122, 237, 187, 103, 217, 99, 60, 200, 45, 78, 115, 69, 49, 106, + 200, 194, 112, 60, 56, 234, 72, 251, 19, 120, 121, 182, 134, 215, 135, + 10, 114, 2, 247, 46, 105, 209, 145, 165, 153, 191, 243, 12, 5, 36, + 119, 206, 231, 231, 11, 32, 209, 83, 27, 229, 204, 149, 155, 83, 109, + 35, 93, 223, 37, 84, 14, 142, 37, 160, 52, 191, 96, 40, 204, 101, + 77, 67, 52, 53, 43, 63, 85, 253, 147, 113, 226, 96, 6, 125, 179, + 115, 161, 17, 83, 198, 101, 98, 85, 139, 3, 137, 75, 99, 178, 23, + 201, 255, 91, 253, 52, 134, 60, 138, 131, 208, 251, 101, 48, 2, 227, + 228, 118, 132, 245, 202, 75, 91, 44, 160, 231, 47, 41, 50, 147, 220, + 74, 92, 219, 165, 89, 16 + }; + + // Expected results for xoffset = 2 and yoffset = 2. + static const int kExpectedDstStride = 16; + static const uint8_t kExpectedDst[256] = { + 117, 102, 74, 135, 42, 98, 175, 206, 70, 73, 222, 197, 50, 24, 39, + 49, 38, 105, 90, 47, 169, 40, 171, 215, 200, 73, 109, 141, 53, 85, + 177, 164, 79, 208, 124, 89, 212, 18, 81, 145, 151, 164, 217, 153, 91, + 154, 102, 102, 159, 75, 164, 152, 136, 51, 213, 219, 186, 116, 193, 224, + 186, 36, 231, 208, 84, 211, 155, 167, 35, 59, 42, 76, 216, 149, 73, + 201, 78, 149, 184, 100, 96, 196, 189, 198, 188, 235, 195, 117, 129, 120, + 129, 49, 25, 133, 113, 69, 221, 114, 70, 143, 99, 157, 108, 189, 140, + 78, 6, 55, 65, 240, 255, 245, 184, 72, 90, 100, 116, 131, 39, 60, + 234, 167, 33, 160, 88, 185, 200, 157, 159, 176, 127, 151, 138, 102, 168, + 106, 170, 86, 82, 219, 189, 76, 33, 115, 197, 106, 96, 198, 136, 97, + 141, 237, 151, 98, 137, 191, 185, 2, 57, 95, 142, 91, 255, 185, 97, + 137, 76, 162, 94, 173, 131, 193, 161, 81, 106, 72, 135, 222, 234, 137, + 66, 137, 106, 243, 210, 147, 95, 15, 137, 110, 85, 66, 16, 96, 167, + 147, 150, 173, 203, 140, 118, 196, 84, 147, 160, 19, 95, 101, 123, 74, + 132, 202, 82, 166, 12, 131, 166, 189, 170, 159, 85, 79, 66, 57, 152, + 132, 203, 194, 0, 1, 56, 146, 180, 224, 156, 28, 83, 181, 79, 76, + 80, 46, 160, 175, 59, 106, 43, 87, 75, 136, 85, 189, 46, 71, 200, + 90 + }; + + ASM_REGISTER_STATE_CHECK( + predict_(const_cast(kTestData) + kSrcStride * 2 + 2, + kSrcStride, 2, 2, dst_, dst_stride_)); + + ASSERT_TRUE( + CompareBuffers(kExpectedDst, kExpectedDstStride, dst_, dst_stride_)); +} + +INSTANTIATE_TEST_CASE_P( + C, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_c), + make_tuple(8, 8, &vp8_sixtap_predict8x8_c), + make_tuple(8, 4, &vp8_sixtap_predict8x4_c), + make_tuple(4, 4, &vp8_sixtap_predict4x4_c))); +#if HAVE_NEON +INSTANTIATE_TEST_CASE_P( + NEON, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_neon), + make_tuple(8, 8, &vp8_sixtap_predict8x8_neon), + make_tuple(8, 4, &vp8_sixtap_predict8x4_neon))); +#endif +#if HAVE_MMX +INSTANTIATE_TEST_CASE_P( + MMX, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_mmx), + make_tuple(8, 8, &vp8_sixtap_predict8x8_mmx), + make_tuple(8, 4, &vp8_sixtap_predict8x4_mmx), + make_tuple(4, 4, &vp8_sixtap_predict4x4_mmx))); +#endif +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P( + SSE2, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_sse2), + make_tuple(8, 8, &vp8_sixtap_predict8x8_sse2), + make_tuple(8, 4, &vp8_sixtap_predict8x4_sse2))); +#endif +#if HAVE_SSSE3 +INSTANTIATE_TEST_CASE_P( + SSSE3, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_ssse3), + make_tuple(8, 8, &vp8_sixtap_predict8x8_ssse3), + make_tuple(8, 4, &vp8_sixtap_predict8x4_ssse3), + make_tuple(4, 4, &vp8_sixtap_predict4x4_ssse3))); +#endif +#if HAVE_MSA +INSTANTIATE_TEST_CASE_P( + MSA, SixtapPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_sixtap_predict16x16_msa), + make_tuple(8, 8, &vp8_sixtap_predict8x8_msa), + make_tuple(8, 4, &vp8_sixtap_predict8x4_msa), + make_tuple(4, 4, &vp8_sixtap_predict4x4_msa))); +#endif + +class BilinearPredictTest : public PredictTestBase {}; + +TEST_P(BilinearPredictTest, TestWithRandomData) { + TestWithRandomData(vp8_bilinear_predict16x16_c); +} +TEST_P(BilinearPredictTest, TestWithUnalignedDst) { + TestWithUnalignedDst(vp8_bilinear_predict16x16_c); +} + +INSTANTIATE_TEST_CASE_P( + C, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_c), + make_tuple(8, 8, &vp8_bilinear_predict8x8_c), + make_tuple(8, 4, &vp8_bilinear_predict8x4_c), + make_tuple(4, 4, &vp8_bilinear_predict4x4_c))); +#if HAVE_NEON +INSTANTIATE_TEST_CASE_P( + NEON, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_neon), + make_tuple(8, 8, &vp8_bilinear_predict8x8_neon), + make_tuple(8, 4, &vp8_bilinear_predict8x4_neon), + make_tuple(4, 4, &vp8_bilinear_predict4x4_neon))); +#endif +#if HAVE_MMX +INSTANTIATE_TEST_CASE_P( + MMX, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_mmx), + make_tuple(8, 8, &vp8_bilinear_predict8x8_mmx), + make_tuple(8, 4, &vp8_bilinear_predict8x4_mmx), + make_tuple(4, 4, &vp8_bilinear_predict4x4_mmx))); +#endif +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P( + SSE2, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_sse2), + make_tuple(8, 8, &vp8_bilinear_predict8x8_sse2))); +#endif +#if HAVE_SSSE3 +INSTANTIATE_TEST_CASE_P( + SSSE3, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_ssse3), + make_tuple(8, 8, &vp8_bilinear_predict8x8_ssse3))); +#endif +#if HAVE_MSA +INSTANTIATE_TEST_CASE_P( + MSA, BilinearPredictTest, + ::testing::Values(make_tuple(16, 16, &vp8_bilinear_predict16x16_msa), + make_tuple(8, 8, &vp8_bilinear_predict8x8_msa), + make_tuple(8, 4, &vp8_bilinear_predict8x4_msa), + make_tuple(4, 4, &vp8_bilinear_predict4x4_msa))); +#endif +} // namespace diff --git a/libs/libvpx/test/realtime_test.cc b/libs/libvpx/test/realtime_test.cc new file mode 100644 index 0000000000..63f1ac3c29 --- /dev/null +++ b/libs/libvpx/test/realtime_test.cc @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include "test/codec_factory.h" +#include "test/encode_test_driver.h" +#include "test/util.h" +#include "test/video_source.h" +#include "third_party/googletest/src/include/gtest/gtest.h" + +namespace { + +const int kVideoSourceWidth = 320; +const int kVideoSourceHeight = 240; +const int kFramesToEncode = 2; + +class RealtimeTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { + protected: + RealtimeTest() : EncoderTest(GET_PARAM(0)), frame_packets_(0) {} + virtual ~RealtimeTest() {} + + virtual void SetUp() { + InitializeConfig(); + cfg_.g_lag_in_frames = 0; + SetMode(::libvpx_test::kRealTime); + } + + virtual void BeginPassHook(unsigned int /*pass*/) { + // TODO(tomfinegan): We're changing the pass value here to make sure + // we get frames when real time mode is combined with |g_pass| set to + // VPX_RC_FIRST_PASS. This is necessary because EncoderTest::RunLoop() sets + // the pass value based on the mode passed into EncoderTest::SetMode(), + // which overrides the one specified in SetUp() above. + cfg_.g_pass = VPX_RC_FIRST_PASS; + } + virtual void FramePktHook(const vpx_codec_cx_pkt_t * /*pkt*/) { + frame_packets_++; + } + + int frame_packets_; +}; + +TEST_P(RealtimeTest, RealtimeFirstPassProducesFrames) { + ::libvpx_test::RandomVideoSource video; + video.SetSize(kVideoSourceWidth, kVideoSourceHeight); + video.set_limit(kFramesToEncode); + ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); + EXPECT_EQ(kFramesToEncode, frame_packets_); +} + +VP8_INSTANTIATE_TEST_CASE(RealtimeTest, + ::testing::Values(::libvpx_test::kRealTime)); +VP9_INSTANTIATE_TEST_CASE(RealtimeTest, + ::testing::Values(::libvpx_test::kRealTime)); + +} // namespace diff --git a/libs/libvpx/test/register_state_check.h b/libs/libvpx/test/register_state_check.h index 489c419424..b8ba34328c 100644 --- a/libs/libvpx/test/register_state_check.h +++ b/libs/libvpx/test/register_state_check.h @@ -36,16 +36,10 @@ #include #include -namespace testing { -namespace internal { - -inline bool operator==(const M128A& lhs, const M128A& rhs) { +inline bool operator==(const M128A &lhs, const M128A &rhs) { return (lhs.Low == rhs.Low && lhs.High == rhs.High); } -} // namespace internal -} // namespace testing - namespace libvpx_test { // Compares the state of xmm[6-15] at construction with their state at @@ -54,10 +48,10 @@ namespace libvpx_test { class RegisterStateCheck { public: RegisterStateCheck() { initialized_ = StoreRegisters(&pre_context_); } - ~RegisterStateCheck() { EXPECT_TRUE(Check()); } + ~RegisterStateCheck() { Check(); } private: - static bool StoreRegisters(CONTEXT* const context) { + static bool StoreRegisters(CONTEXT *const context) { const HANDLE this_thread = GetCurrentThread(); EXPECT_TRUE(this_thread != NULL); context->ContextFlags = CONTEXT_FLOATING_POINT; @@ -67,34 +61,34 @@ class RegisterStateCheck { } // Compares the register state. Returns true if the states match. - bool Check() const { - if (!initialized_) return false; + void Check() const { + ASSERT_TRUE(initialized_); CONTEXT post_context; - if (!StoreRegisters(&post_context)) return false; + ASSERT_TRUE(StoreRegisters(&post_context)); - const M128A* xmm_pre = &pre_context_.Xmm6; - const M128A* xmm_post = &post_context.Xmm6; + const M128A *xmm_pre = &pre_context_.Xmm6; + const M128A *xmm_post = &post_context.Xmm6; for (int i = 6; i <= 15; ++i) { EXPECT_EQ(*xmm_pre, *xmm_post) << "xmm" << i << " has been modified!"; ++xmm_pre; ++xmm_post; } - return !testing::Test::HasNonfatalFailure(); } bool initialized_; CONTEXT pre_context_; }; -#define ASM_REGISTER_STATE_CHECK(statement) do { \ - libvpx_test::RegisterStateCheck reg_check; \ - statement; \ -} while (false) +#define ASM_REGISTER_STATE_CHECK(statement) \ + do { \ + libvpx_test::RegisterStateCheck reg_check; \ + statement; \ + } while (false) } // namespace libvpx_test -#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && defined(CONFIG_VP9) \ - && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP9 +#elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && \ + defined(CONFIG_VP9) && !CONFIG_SHARED && HAVE_NEON_ASM && CONFIG_VP9 extern "C" { // Save the d8-d15 registers into store. @@ -108,35 +102,28 @@ namespace libvpx_test { // arm platform. class RegisterStateCheck { public: - RegisterStateCheck() { initialized_ = StoreRegisters(pre_store_); } - ~RegisterStateCheck() { EXPECT_TRUE(Check()); } + RegisterStateCheck() { vpx_push_neon(pre_store_); } + ~RegisterStateCheck() { Check(); } private: - static bool StoreRegisters(int64_t store[8]) { - vpx_push_neon(store); - return true; - } - // Compares the register state. Returns true if the states match. - bool Check() const { - if (!initialized_) return false; + void Check() const { int64_t post_store[8]; vpx_push_neon(post_store); for (int i = 0; i < 8; ++i) { - EXPECT_EQ(pre_store_[i], post_store[i]) << "d" - << i + 8 << " has been modified"; + EXPECT_EQ(pre_store_[i], post_store[i]) << "d" << i + 8 + << " has been modified"; } - return !testing::Test::HasNonfatalFailure(); } - bool initialized_; int64_t pre_store_[8]; }; -#define ASM_REGISTER_STATE_CHECK(statement) do { \ - libvpx_test::RegisterStateCheck reg_check; \ - statement; \ -} while (false) +#define ASM_REGISTER_STATE_CHECK(statement) \ + do { \ + libvpx_test::RegisterStateCheck reg_check; \ + statement; \ + } while (false) } // namespace libvpx_test @@ -162,12 +149,12 @@ class RegisterStateCheckMMX { RegisterStateCheckMMX() { __asm__ volatile("fstenv %0" : "=rm"(pre_fpu_env_)); } - ~RegisterStateCheckMMX() { EXPECT_TRUE(Check()); } + ~RegisterStateCheckMMX() { Check(); } private: // Checks the FPU tag word pre/post execution, returning false if not cleared // to 0xffff. - bool Check() const { + void Check() const { EXPECT_EQ(0xffff, pre_fpu_env_[4]) << "FPU was in an inconsistent state prior to call"; @@ -175,16 +162,16 @@ class RegisterStateCheckMMX { __asm__ volatile("fstenv %0" : "=rm"(post_fpu_env)); EXPECT_EQ(0xffff, post_fpu_env[4]) << "FPU was left in an inconsistent state after call"; - return !testing::Test::HasNonfatalFailure(); } uint16_t pre_fpu_env_[14]; }; -#define API_REGISTER_STATE_CHECK(statement) do { \ - libvpx_test::RegisterStateCheckMMX reg_check; \ - ASM_REGISTER_STATE_CHECK(statement); \ -} while (false) +#define API_REGISTER_STATE_CHECK(statement) \ + do { \ + libvpx_test::RegisterStateCheckMMX reg_check; \ + ASM_REGISTER_STATE_CHECK(statement); \ + } while (false) } // namespace libvpx_test diff --git a/libs/libvpx/test/resize_test.cc b/libs/libvpx/test/resize_test.cc index 017730899b..c9950dd433 100644 --- a/libs/libvpx/test/resize_test.cc +++ b/libs/libvpx/test/resize_test.cc @@ -7,6 +7,8 @@ * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ +#include + #include #include #include "third_party/googletest/src/include/gtest/gtest.h" @@ -42,9 +44,9 @@ static void write_ivf_file_header(const vpx_codec_enc_cfg_t *const cfg, header[1] = 'K'; header[2] = 'I'; header[3] = 'F'; - mem_put_le16(header + 4, 0); /* version */ - mem_put_le16(header + 6, 32); /* headersize */ - mem_put_le32(header + 8, 0x30395056); /* fourcc (vp9) */ + mem_put_le16(header + 4, 0); /* version */ + mem_put_le16(header + 6, 32); /* headersize */ + mem_put_le32(header + 8, 0x30395056); /* fourcc (vp9) */ mem_put_le16(header + 12, cfg->g_w); /* width */ mem_put_le16(header + 14, cfg->g_h); /* height */ mem_put_le32(header + 16, cfg->g_timebase.den); /* rate */ @@ -66,8 +68,7 @@ static void write_ivf_frame_header(const vpx_codec_cx_pkt_t *const pkt, char header[12]; vpx_codec_pts_t pts; - if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) - return; + if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return; pts = pkt->data.frame.pts; mem_put_le32(header, static_cast(pkt->data.frame.sz)); @@ -90,12 +91,9 @@ struct FrameInfo { unsigned int h; }; -void ScaleForFrameNumber(unsigned int frame, - unsigned int initial_w, - unsigned int initial_h, - unsigned int *w, - unsigned int *h, - int flag_codec) { +void ScaleForFrameNumber(unsigned int frame, unsigned int initial_w, + unsigned int initial_h, unsigned int *w, + unsigned int *h, int flag_codec) { if (frame < 10) { *w = initial_w; *h = initial_h; @@ -217,7 +215,7 @@ void ScaleForFrameNumber(unsigned int frame, return; } if (frame < 250) { - *w = initial_w / 2; + *w = initial_w / 2; *h = initial_h / 2; return; } @@ -266,8 +264,9 @@ class ResizingVideoSource : public ::libvpx_test::DummyVideoSource { } }; -class ResizeTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class ResizeTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: ResizeTest() : EncoderTest(GET_PARAM(0)) {} @@ -283,7 +282,7 @@ class ResizeTest : public ::libvpx_test::EncoderTest, frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h)); } - std::vector< FrameInfo > frame_info_list_; + std::vector frame_info_list_; }; TEST_P(ResizeTest, TestExternalResizeWorks) { @@ -297,12 +296,12 @@ TEST_P(ResizeTest, TestExternalResizeWorks) { const unsigned int frame = static_cast(info->pts); unsigned int expected_w; unsigned int expected_h; - ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, - &expected_w, &expected_h, 0); - EXPECT_EQ(expected_w, info->w) - << "Frame " << frame << " had unexpected width"; - EXPECT_EQ(expected_h, info->h) - << "Frame " << frame << " had unexpected height"; + ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, &expected_w, + &expected_h, 0); + EXPECT_EQ(expected_w, info->w) << "Frame " << frame + << " had unexpected width"; + EXPECT_EQ(expected_h, info->h) << "Frame " << frame + << " had unexpected height"; } } @@ -313,10 +312,7 @@ class ResizeInternalTest : public ResizeTest { protected: #if WRITE_COMPRESSED_STREAM ResizeInternalTest() - : ResizeTest(), - frame0_psnr_(0.0), - outfile_(NULL), - out_frames_(0) {} + : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {} #else ResizeInternalTest() : ResizeTest(), frame0_psnr_(0.0) {} #endif @@ -345,30 +341,29 @@ class ResizeInternalTest : public ResizeTest { if (change_config_) { int new_q = 60; if (video->frame() == 0) { - struct vpx_scaling_mode mode = {VP8E_ONETWO, VP8E_ONETWO}; + struct vpx_scaling_mode mode = { VP8E_ONETWO, VP8E_ONETWO }; encoder->Control(VP8E_SET_SCALEMODE, &mode); } if (video->frame() == 1) { - struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL}; + struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL }; encoder->Control(VP8E_SET_SCALEMODE, &mode); cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = new_q; encoder->Config(&cfg_); } } else { if (video->frame() == kStepDownFrame) { - struct vpx_scaling_mode mode = {VP8E_FOURFIVE, VP8E_THREEFIVE}; + struct vpx_scaling_mode mode = { VP8E_FOURFIVE, VP8E_THREEFIVE }; encoder->Control(VP8E_SET_SCALEMODE, &mode); } if (video->frame() == kStepUpFrame) { - struct vpx_scaling_mode mode = {VP8E_NORMAL, VP8E_NORMAL}; + struct vpx_scaling_mode mode = { VP8E_NORMAL, VP8E_NORMAL }; encoder->Control(VP8E_SET_SCALEMODE, &mode); } } } virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) { - if (!frame0_psnr_) - frame0_psnr_ = pkt->data.psnr.psnr[0]; + if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0]; EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0); } @@ -377,8 +372,7 @@ class ResizeInternalTest : public ResizeTest { ++out_frames_; // Write initial file header if first frame. - if (pkt->data.frame.pts == 0) - write_ivf_file_header(&cfg_, 0, outfile_); + if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_); // Write frame header and data. write_ivf_frame_header(pkt, outfile_); @@ -432,8 +426,9 @@ TEST_P(ResizeInternalTest, TestInternalResizeChangeConfig) { ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); } -class ResizeRealtimeTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith2Params { +class ResizeRealtimeTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWith2Params { protected: ResizeRealtimeTest() : EncoderTest(GET_PARAM(0)) {} virtual ~ResizeRealtimeTest() {} @@ -463,6 +458,14 @@ class ResizeRealtimeTest : public ::libvpx_test::EncoderTest, frame_info_list_.push_back(FrameInfo(pts, img.d_w, img.d_h)); } + virtual void MismatchHook(const vpx_image_t *img1, const vpx_image_t *img2) { + double mismatch_psnr = compute_psnr(img1, img2); + mismatch_psnr_ += mismatch_psnr; + ++mismatch_nframes_; + } + + unsigned int GetMismatchFrames() { return mismatch_nframes_; } + void DefaultConfig() { cfg_.rc_buf_initial_sz = 500; cfg_.rc_buf_optimal_sz = 600; @@ -478,16 +481,18 @@ class ResizeRealtimeTest : public ::libvpx_test::EncoderTest, // Enable dropped frames. cfg_.rc_dropframe_thresh = 1; // Enable error_resilience mode. - cfg_.g_error_resilient = 1; + cfg_.g_error_resilient = 1; // Enable dynamic resizing. cfg_.rc_resize_allowed = 1; // Run at low bitrate. cfg_.rc_target_bitrate = 200; } - std::vector< FrameInfo > frame_info_list_; + std::vector frame_info_list_; int set_cpu_used_; bool change_bitrate_; + double mismatch_psnr_; + int mismatch_nframes_; }; TEST_P(ResizeRealtimeTest, TestExternalResizeWorks) { @@ -497,6 +502,8 @@ TEST_P(ResizeRealtimeTest, TestExternalResizeWorks) { // Disable internal resize for this test. cfg_.rc_resize_allowed = 0; change_bitrate_ = false; + mismatch_psnr_ = 0.0; + mismatch_nframes_ = 0; ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); for (std::vector::const_iterator info = frame_info_list_.begin(); @@ -504,12 +511,13 @@ TEST_P(ResizeRealtimeTest, TestExternalResizeWorks) { const unsigned int frame = static_cast(info->pts); unsigned int expected_w; unsigned int expected_h; - ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, - &expected_w, &expected_h, 1); - EXPECT_EQ(expected_w, info->w) - << "Frame " << frame << " had unexpected width"; - EXPECT_EQ(expected_h, info->h) - << "Frame " << frame << " had unexpected height"; + ScaleForFrameNumber(frame, kInitialWidth, kInitialHeight, &expected_w, + &expected_h, 1); + EXPECT_EQ(expected_w, info->w) << "Frame " << frame + << " had unexpected width"; + EXPECT_EQ(expected_h, info->h) << "Frame " << frame + << " had unexpected height"; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); } } @@ -523,6 +531,8 @@ TEST_P(ResizeRealtimeTest, TestInternalResizeDown) { cfg_.g_w = 352; cfg_.g_h = 288; change_bitrate_ = false; + mismatch_psnr_ = 0.0; + mismatch_nframes_ = 0; ASSERT_NO_FATAL_FAILURE(RunLoop(&video)); unsigned int last_w = cfg_.g_w; @@ -540,8 +550,13 @@ TEST_P(ResizeRealtimeTest, TestInternalResizeDown) { } } +#if CONFIG_VP9_DECODER // Verify that we get 1 resize down event in this test. ASSERT_EQ(1, resize_count) << "Resizing should occur."; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); +#else + printf("Warning: VP9 decoder unavailable, unable to check resize count!\n"); +#endif } // Verify the dynamic resizer behavior for real time, 1 pass CBR mode. @@ -554,6 +569,8 @@ TEST_P(ResizeRealtimeTest, TestInternalResizeDownUpChangeBitRate) { cfg_.g_w = 352; cfg_.g_h = 288; change_bitrate_ = true; + mismatch_psnr_ = 0.0; + mismatch_nframes_ = 0; // Disable dropped frames. cfg_.rc_dropframe_thresh = 0; // Starting bitrate low. @@ -581,15 +598,18 @@ TEST_P(ResizeRealtimeTest, TestInternalResizeDownUpChangeBitRate) { } } +#if CONFIG_VP9_DECODER // Verify that we get 2 resize events in this test. ASSERT_EQ(resize_count, 2) << "Resizing should occur twice."; + EXPECT_EQ(static_cast(0), GetMismatchFrames()); +#else + printf("Warning: VP9 decoder unavailable, unable to check resize count!\n"); +#endif } vpx_img_fmt_t CspForFrameNumber(int frame) { - if (frame < 10) - return VPX_IMG_FMT_I420; - if (frame < 20) - return VPX_IMG_FMT_I444; + if (frame < 10) return VPX_IMG_FMT_I420; + if (frame < 20) return VPX_IMG_FMT_I444; return VPX_IMG_FMT_I420; } @@ -597,10 +617,7 @@ class ResizeCspTest : public ResizeTest { protected: #if WRITE_COMPRESSED_STREAM ResizeCspTest() - : ResizeTest(), - frame0_psnr_(0.0), - outfile_(NULL), - out_frames_(0) {} + : ResizeTest(), frame0_psnr_(0.0), outfile_(NULL), out_frames_(0) {} #else ResizeCspTest() : ResizeTest(), frame0_psnr_(0.0) {} #endif @@ -639,8 +656,7 @@ class ResizeCspTest : public ResizeTest { } virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) { - if (!frame0_psnr_) - frame0_psnr_ = pkt->data.psnr.psnr[0]; + if (frame0_psnr_ == 0.) frame0_psnr_ = pkt->data.psnr.psnr[0]; EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0); } @@ -649,8 +665,7 @@ class ResizeCspTest : public ResizeTest { ++out_frames_; // Write initial file header if first frame. - if (pkt->data.frame.pts == 0) - write_ivf_file_header(&cfg_, 0, outfile_); + if (pkt->data.frame.pts == 0) write_ivf_file_header(&cfg_, 0, outfile_); // Write frame header and data. write_ivf_frame_header(pkt, outfile_); diff --git a/libs/libvpx/test/sad_test.cc b/libs/libvpx/test/sad_test.cc index 3f0f74cae6..837b08fbdf 100644 --- a/libs/libvpx/test/sad_test.cc +++ b/libs/libvpx/test/sad_test.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -25,50 +24,72 @@ #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" -typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride); -typedef std::tr1::tuple SadMxNParam; +template +struct TestParams { + TestParams(int w, int h, Function f, int bd = -1) + : width(w), height(h), bit_depth(bd), func(f) {} + int width, height, bit_depth; + Function func; +}; -typedef uint32_t (*SadMxNAvgFunc)(const uint8_t *src_ptr, - int src_stride, - const uint8_t *ref_ptr, - int ref_stride, - const uint8_t *second_pred); -typedef std::tr1::tuple SadMxNAvgParam; +typedef unsigned int (*SadMxNFunc)(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride); +typedef TestParams SadMxNParam; -typedef void (*SadMxNx4Func)(const uint8_t *src_ptr, - int src_stride, - const uint8_t *const ref_ptr[], - int ref_stride, - uint32_t *sad_array); -typedef std::tr1::tuple SadMxNx4Param; +typedef unsigned int (*SadMxNAvgFunc)(const uint8_t *src_ptr, int src_stride, + const uint8_t *ref_ptr, int ref_stride, + const uint8_t *second_pred); +typedef TestParams SadMxNAvgParam; + +typedef void (*SadMxNx4Func)(const uint8_t *src_ptr, int src_stride, + const uint8_t *const ref_ptr[], int ref_stride, + unsigned int *sad_array); +typedef TestParams SadMxNx4Param; using libvpx_test::ACMRandom; namespace { -class SADTestBase : public ::testing::Test { +template +class SADTestBase : public ::testing::TestWithParam { public: - SADTestBase(int width, int height, int bit_depth) : - width_(width), height_(height), bd_(bit_depth) {} + explicit SADTestBase(const ParamType ¶ms) : params_(params) {} - static void SetUpTestCase() { - source_data8_ = reinterpret_cast( + virtual void SetUp() { + source_data8_ = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBlockSize)); - reference_data8_ = reinterpret_cast( + reference_data8_ = reinterpret_cast( vpx_memalign(kDataAlignment, kDataBufferSize)); - second_pred8_ = reinterpret_cast( - vpx_memalign(kDataAlignment, 64*64)); - source_data16_ = reinterpret_cast( - vpx_memalign(kDataAlignment, kDataBlockSize*sizeof(uint16_t))); - reference_data16_ = reinterpret_cast( - vpx_memalign(kDataAlignment, kDataBufferSize*sizeof(uint16_t))); - second_pred16_ = reinterpret_cast( - vpx_memalign(kDataAlignment, 64*64*sizeof(uint16_t))); + second_pred8_ = + reinterpret_cast(vpx_memalign(kDataAlignment, 64 * 64)); + source_data16_ = reinterpret_cast( + vpx_memalign(kDataAlignment, kDataBlockSize * sizeof(uint16_t))); + reference_data16_ = reinterpret_cast( + vpx_memalign(kDataAlignment, kDataBufferSize * sizeof(uint16_t))); + second_pred16_ = reinterpret_cast( + vpx_memalign(kDataAlignment, 64 * 64 * sizeof(uint16_t))); + + if (params_.bit_depth == -1) { + use_high_bit_depth_ = false; + bit_depth_ = VPX_BITS_8; + source_data_ = source_data8_; + reference_data_ = reference_data8_; + second_pred_ = second_pred8_; +#if CONFIG_VP9_HIGHBITDEPTH + } else { + use_high_bit_depth_ = true; + bit_depth_ = static_cast(params_.bit_depth); + source_data_ = CONVERT_TO_BYTEPTR(source_data16_); + reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_); + second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_); +#endif // CONFIG_VP9_HIGHBITDEPTH + } + mask_ = (1 << bit_depth_) - 1; + source_stride_ = (params_.width + 31) & ~31; + reference_stride_ = params_.width * 2; + rnd_.Reset(ACMRandom::DeterministicSeed()); } - static void TearDownTestCase() { + virtual void TearDown() { vpx_free(source_data8_); source_data8_ = NULL; vpx_free(reference_data8_); @@ -81,9 +102,7 @@ class SADTestBase : public ::testing::Test { reference_data16_ = NULL; vpx_free(second_pred16_); second_pred16_ = NULL; - } - virtual void TearDown() { libvpx_test::ClearSystemState(); } @@ -93,50 +112,29 @@ class SADTestBase : public ::testing::Test { static const int kDataBlockSize = 64 * 128; static const int kDataBufferSize = 4 * kDataBlockSize; - virtual void SetUp() { - if (bd_ == -1) { - use_high_bit_depth_ = false; - bit_depth_ = VPX_BITS_8; - source_data_ = source_data8_; - reference_data_ = reference_data8_; - second_pred_ = second_pred8_; + uint8_t *GetReference(int block_idx) const { #if CONFIG_VP9_HIGHBITDEPTH - } else { - use_high_bit_depth_ = true; - bit_depth_ = static_cast(bd_); - source_data_ = CONVERT_TO_BYTEPTR(source_data16_); - reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_); - second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - mask_ = (1 << bit_depth_) - 1; - source_stride_ = (width_ + 31) & ~31; - reference_stride_ = width_ * 2; - rnd_.Reset(ACMRandom::DeterministicSeed()); - } - - virtual uint8_t *GetReference(int block_idx) { -#if CONFIG_VP9_HIGHBITDEPTH - if (use_high_bit_depth_) + if (use_high_bit_depth_) { return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) + block_idx * kDataBlockSize); + } #endif // CONFIG_VP9_HIGHBITDEPTH return reference_data_ + block_idx * kDataBlockSize; } // Sum of Absolute Differences. Given two blocks, calculate the absolute // difference between two pixels in the same relative location; accumulate. - unsigned int ReferenceSAD(int block_idx) { - unsigned int sad = 0; - const uint8_t *const reference8 = GetReference(block_idx); - const uint8_t *const source8 = source_data_; + uint32_t ReferenceSAD(int block_idx) const { + uint32_t sad = 0; + const uint8_t *const reference8 = GetReference(block_idx); + const uint8_t *const source8 = source_data_; #if CONFIG_VP9_HIGHBITDEPTH - const uint16_t *const reference16 = - CONVERT_TO_SHORTPTR(GetReference(block_idx)); - const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_); + const uint16_t *const reference16 = + CONVERT_TO_SHORTPTR(GetReference(block_idx)); + const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_); #endif // CONFIG_VP9_HIGHBITDEPTH - for (int h = 0; h < height_; ++h) { - for (int w = 0; w < width_; ++w) { + for (int h = 0; h < params_.height; ++h) { + for (int w = 0; w < params_.width; ++w) { if (!use_high_bit_depth_) { sad += abs(source8[h * source_stride_ + w] - reference8[h * reference_stride_ + w]); @@ -154,7 +152,7 @@ class SADTestBase : public ::testing::Test { // Sum of Absolute Differences Average. Given two blocks, and a prediction // calculate the absolute difference between one pixel and average of the // corresponding and predicted pixels; accumulate. - unsigned int ReferenceSADavg(int block_idx) { + unsigned int ReferenceSADavg(int block_idx) const { unsigned int sad = 0; const uint8_t *const reference8 = GetReference(block_idx); const uint8_t *const source8 = source_data_; @@ -165,17 +163,17 @@ class SADTestBase : public ::testing::Test { const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_); const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_); #endif // CONFIG_VP9_HIGHBITDEPTH - for (int h = 0; h < height_; ++h) { - for (int w = 0; w < width_; ++w) { + for (int h = 0; h < params_.height; ++h) { + for (int w = 0; w < params_.width; ++w) { if (!use_high_bit_depth_) { - const int tmp = second_pred8[h * width_ + w] + - reference8[h * reference_stride_ + w]; + const int tmp = second_pred8[h * params_.width + w] + + reference8[h * reference_stride_ + w]; const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); sad += abs(source8[h * source_stride_ + w] - comp_pred); #if CONFIG_VP9_HIGHBITDEPTH } else { - const int tmp = second_pred16[h * width_ + w] + - reference16[h * reference_stride_ + w]; + const int tmp = second_pred16[h * params_.width + w] + + reference16[h * reference_stride_ + w]; const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1); sad += abs(source16[h * source_stride_ + w] - comp_pred); #endif // CONFIG_VP9_HIGHBITDEPTH @@ -185,13 +183,13 @@ class SADTestBase : public ::testing::Test { return sad; } - void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) { + void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) const { uint8_t *data8 = data; #if CONFIG_VP9_HIGHBITDEPTH uint16_t *data16 = CONVERT_TO_SHORTPTR(data); #endif // CONFIG_VP9_HIGHBITDEPTH - for (int h = 0; h < height_; ++h) { - for (int w = 0; w < width_; ++w) { + for (int h = 0; h < params_.height; ++h) { + for (int w = 0; w < params_.width; ++w) { if (!use_high_bit_depth_) { data8[h * stride + w] = static_cast(fill_constant); #if CONFIG_VP9_HIGHBITDEPTH @@ -208,8 +206,8 @@ class SADTestBase : public ::testing::Test { #if CONFIG_VP9_HIGHBITDEPTH uint16_t *data16 = CONVERT_TO_SHORTPTR(data); #endif // CONFIG_VP9_HIGHBITDEPTH - for (int h = 0; h < height_; ++h) { - for (int w = 0; w < width_; ++w) { + for (int h = 0; h < params_.height; ++h) { + for (int w = 0; w < params_.width; ++w) { if (!use_high_bit_depth_) { data8[h * stride + w] = rnd_.Rand8(); #if CONFIG_VP9_HIGHBITDEPTH @@ -221,42 +219,41 @@ class SADTestBase : public ::testing::Test { } } - int width_, height_, mask_, bd_; + uint32_t mask_; vpx_bit_depth_t bit_depth_; - static uint8_t *source_data_; - static uint8_t *reference_data_; - static uint8_t *second_pred_; int source_stride_; - bool use_high_bit_depth_; - static uint8_t *source_data8_; - static uint8_t *reference_data8_; - static uint8_t *second_pred8_; - static uint16_t *source_data16_; - static uint16_t *reference_data16_; - static uint16_t *second_pred16_; int reference_stride_; + bool use_high_bit_depth_; + + uint8_t *source_data_; + uint8_t *reference_data_; + uint8_t *second_pred_; + uint8_t *source_data8_; + uint8_t *reference_data8_; + uint8_t *second_pred8_; + uint16_t *source_data16_; + uint16_t *reference_data16_; + uint16_t *second_pred16_; ACMRandom rnd_; + ParamType params_; }; -class SADx4Test - : public SADTestBase, - public ::testing::WithParamInterface { +class SADx4Test : public SADTestBase { public: - SADx4Test() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {} + SADx4Test() : SADTestBase(GetParam()) {} protected: - void SADs(unsigned int *results) { - const uint8_t *references[] = {GetReference(0), GetReference(1), - GetReference(2), GetReference(3)}; + void SADs(unsigned int *results) const { + const uint8_t *references[] = { GetReference(0), GetReference(1), + GetReference(2), GetReference(3) }; - ASM_REGISTER_STATE_CHECK(GET_PARAM(2)(source_data_, source_stride_, - references, reference_stride_, - results)); + ASM_REGISTER_STATE_CHECK(params_.func( + source_data_, source_stride_, references, reference_stride_, results)); } - void CheckSADs() { - unsigned int reference_sad, exp_sad[4]; + void CheckSADs() const { + uint32_t reference_sad, exp_sad[4]; SADs(exp_sad); for (int block = 0; block < 4; ++block) { @@ -267,23 +264,21 @@ class SADx4Test } }; -class SADTest - : public SADTestBase, - public ::testing::WithParamInterface { +class SADTest : public SADTestBase { public: - SADTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {} + SADTest() : SADTestBase(GetParam()) {} protected: - unsigned int SAD(int block_idx) { + unsigned int SAD(int block_idx) const { unsigned int ret; const uint8_t *const reference = GetReference(block_idx); - ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_, + ASM_REGISTER_STATE_CHECK(ret = params_.func(source_data_, source_stride_, reference, reference_stride_)); return ret; } - void CheckSAD() { + void CheckSAD() const { const unsigned int reference_sad = ReferenceSAD(0); const unsigned int exp_sad = SAD(0); @@ -291,24 +286,22 @@ class SADTest } }; -class SADavgTest - : public SADTestBase, - public ::testing::WithParamInterface { +class SADavgTest : public SADTestBase { public: - SADavgTest() : SADTestBase(GET_PARAM(0), GET_PARAM(1), GET_PARAM(3)) {} + SADavgTest() : SADTestBase(GetParam()) {} protected: - unsigned int SAD_avg(int block_idx) { + unsigned int SAD_avg(int block_idx) const { unsigned int ret; const uint8_t *const reference = GetReference(block_idx); - ASM_REGISTER_STATE_CHECK(ret = GET_PARAM(2)(source_data_, source_stride_, + ASM_REGISTER_STATE_CHECK(ret = params_.func(source_data_, source_stride_, reference, reference_stride_, second_pred_)); return ret; } - void CheckSAD() { + void CheckSAD() const { const unsigned int reference_sad = ReferenceSADavg(0); const unsigned int exp_sad = SAD_avg(0); @@ -316,16 +309,6 @@ class SADavgTest } }; -uint8_t *SADTestBase::source_data_ = NULL; -uint8_t *SADTestBase::reference_data_ = NULL; -uint8_t *SADTestBase::second_pred_ = NULL; -uint8_t *SADTestBase::source_data8_ = NULL; -uint8_t *SADTestBase::reference_data8_ = NULL; -uint8_t *SADTestBase::second_pred8_ = NULL; -uint16_t *SADTestBase::source_data16_ = NULL; -uint16_t *SADTestBase::reference_data16_ = NULL; -uint16_t *SADTestBase::second_pred16_ = NULL; - TEST_P(SADTest, MaxRef) { FillConstant(source_data_, source_stride_, 0); FillConstant(reference_data_, reference_stride_, mask_); @@ -370,13 +353,13 @@ TEST_P(SADTest, ShortSrc) { TEST_P(SADavgTest, MaxRef) { FillConstant(source_data_, source_stride_, 0); FillConstant(reference_data_, reference_stride_, mask_); - FillConstant(second_pred_, width_, 0); + FillConstant(second_pred_, params_.width, 0); CheckSAD(); } TEST_P(SADavgTest, MaxSrc) { FillConstant(source_data_, source_stride_, mask_); FillConstant(reference_data_, reference_stride_, 0); - FillConstant(second_pred_, width_, 0); + FillConstant(second_pred_, params_.width, 0); CheckSAD(); } @@ -385,7 +368,7 @@ TEST_P(SADavgTest, ShortRef) { reference_stride_ >>= 1; FillRandom(source_data_, source_stride_); FillRandom(reference_data_, reference_stride_); - FillRandom(second_pred_, width_); + FillRandom(second_pred_, params_.width); CheckSAD(); reference_stride_ = tmp_stride; } @@ -397,7 +380,7 @@ TEST_P(SADavgTest, UnalignedRef) { reference_stride_ -= 1; FillRandom(source_data_, source_stride_); FillRandom(reference_data_, reference_stride_); - FillRandom(second_pred_, width_); + FillRandom(second_pred_, params_.width); CheckSAD(); reference_stride_ = tmp_stride; } @@ -407,7 +390,7 @@ TEST_P(SADavgTest, ShortSrc) { source_stride_ >>= 1; FillRandom(source_data_, source_stride_); FillRandom(reference_data_, reference_stride_); - FillRandom(second_pred_, width_); + FillRandom(second_pred_, params_.width); CheckSAD(); source_stride_ = tmp_stride; } @@ -469,8 +452,8 @@ TEST_P(SADx4Test, ShortSrc) { } TEST_P(SADx4Test, SrcAlignedByWidth) { - uint8_t * tmp_source_data = source_data_; - source_data_ += width_; + uint8_t *tmp_source_data = source_data_; + source_data_ += params_.width; FillRandom(source_data_, source_stride_); FillRandom(GetReference(0), reference_stride_); FillRandom(GetReference(1), reference_stride_); @@ -480,390 +463,368 @@ TEST_P(SADx4Test, SrcAlignedByWidth) { source_data_ = tmp_source_data; } -using std::tr1::make_tuple; - //------------------------------------------------------------------------------ // C functions const SadMxNParam c_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_c, -1), - make_tuple(64, 32, &vpx_sad64x32_c, -1), - make_tuple(32, 64, &vpx_sad32x64_c, -1), - make_tuple(32, 32, &vpx_sad32x32_c, -1), - make_tuple(32, 16, &vpx_sad32x16_c, -1), - make_tuple(16, 32, &vpx_sad16x32_c, -1), - make_tuple(16, 16, &vpx_sad16x16_c, -1), - make_tuple(16, 8, &vpx_sad16x8_c, -1), - make_tuple(8, 16, &vpx_sad8x16_c, -1), - make_tuple(8, 8, &vpx_sad8x8_c, -1), - make_tuple(8, 4, &vpx_sad8x4_c, -1), - make_tuple(4, 8, &vpx_sad4x8_c, -1), - make_tuple(4, 4, &vpx_sad4x4_c, -1), + SadMxNParam(64, 64, &vpx_sad64x64_c), + SadMxNParam(64, 32, &vpx_sad64x32_c), + SadMxNParam(32, 64, &vpx_sad32x64_c), + SadMxNParam(32, 32, &vpx_sad32x32_c), + SadMxNParam(32, 16, &vpx_sad32x16_c), + SadMxNParam(16, 32, &vpx_sad16x32_c), + SadMxNParam(16, 16, &vpx_sad16x16_c), + SadMxNParam(16, 8, &vpx_sad16x8_c), + SadMxNParam(8, 16, &vpx_sad8x16_c), + SadMxNParam(8, 8, &vpx_sad8x8_c), + SadMxNParam(8, 4, &vpx_sad8x4_c), + SadMxNParam(4, 8, &vpx_sad4x8_c), + SadMxNParam(4, 4, &vpx_sad4x4_c), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64_c, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32_c, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64_c, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32_c, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16_c, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32_c, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16_c, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8_c, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16_c, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8_c, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4_c, 8), - make_tuple(4, 8, &vpx_highbd_sad4x8_c, 8), - make_tuple(4, 4, &vpx_highbd_sad4x4_c, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64_c, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32_c, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64_c, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32_c, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16_c, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32_c, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16_c, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8_c, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16_c, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8_c, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4_c, 10), - make_tuple(4, 8, &vpx_highbd_sad4x8_c, 10), - make_tuple(4, 4, &vpx_highbd_sad4x4_c, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64_c, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32_c, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64_c, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32_c, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16_c, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32_c, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16_c, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8_c, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16_c, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8_c, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4_c, 12), - make_tuple(4, 8, &vpx_highbd_sad4x8_c, 12), - make_tuple(4, 4, &vpx_highbd_sad4x4_c, 12), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_c, 8), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_c, 8), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_c, 8), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_c, 8), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_c, 8), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_c, 8), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_c, 8), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_c, 8), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_c, 8), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_c, 8), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_c, 8), + SadMxNParam(4, 8, &vpx_highbd_sad4x8_c, 8), + SadMxNParam(4, 4, &vpx_highbd_sad4x4_c, 8), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_c, 10), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_c, 10), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_c, 10), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_c, 10), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_c, 10), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_c, 10), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_c, 10), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_c, 10), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_c, 10), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_c, 10), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_c, 10), + SadMxNParam(4, 8, &vpx_highbd_sad4x8_c, 10), + SadMxNParam(4, 4, &vpx_highbd_sad4x4_c, 10), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_c, 12), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_c, 12), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_c, 12), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_c, 12), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_c, 12), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_c, 12), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_c, 12), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_c, 12), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_c, 12), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_c, 12), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_c, 12), + SadMxNParam(4, 8, &vpx_highbd_sad4x8_c, 12), + SadMxNParam(4, 4, &vpx_highbd_sad4x4_c, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests)); const SadMxNAvgParam avg_c_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_avg_c, -1), - make_tuple(64, 32, &vpx_sad64x32_avg_c, -1), - make_tuple(32, 64, &vpx_sad32x64_avg_c, -1), - make_tuple(32, 32, &vpx_sad32x32_avg_c, -1), - make_tuple(32, 16, &vpx_sad32x16_avg_c, -1), - make_tuple(16, 32, &vpx_sad16x32_avg_c, -1), - make_tuple(16, 16, &vpx_sad16x16_avg_c, -1), - make_tuple(16, 8, &vpx_sad16x8_avg_c, -1), - make_tuple(8, 16, &vpx_sad8x16_avg_c, -1), - make_tuple(8, 8, &vpx_sad8x8_avg_c, -1), - make_tuple(8, 4, &vpx_sad8x4_avg_c, -1), - make_tuple(4, 8, &vpx_sad4x8_avg_c, -1), - make_tuple(4, 4, &vpx_sad4x4_avg_c, -1), + SadMxNAvgParam(64, 64, &vpx_sad64x64_avg_c), + SadMxNAvgParam(64, 32, &vpx_sad64x32_avg_c), + SadMxNAvgParam(32, 64, &vpx_sad32x64_avg_c), + SadMxNAvgParam(32, 32, &vpx_sad32x32_avg_c), + SadMxNAvgParam(32, 16, &vpx_sad32x16_avg_c), + SadMxNAvgParam(16, 32, &vpx_sad16x32_avg_c), + SadMxNAvgParam(16, 16, &vpx_sad16x16_avg_c), + SadMxNAvgParam(16, 8, &vpx_sad16x8_avg_c), + SadMxNAvgParam(8, 16, &vpx_sad8x16_avg_c), + SadMxNAvgParam(8, 8, &vpx_sad8x8_avg_c), + SadMxNAvgParam(8, 4, &vpx_sad8x4_avg_c), + SadMxNAvgParam(4, 8, &vpx_sad4x8_avg_c), + SadMxNAvgParam(4, 4, &vpx_sad4x4_avg_c), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 8), - make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 8), - make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 10), - make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 10), - make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_c, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_c, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_c, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_c, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_c, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_c, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_c, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_c, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_c, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_c, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_c, 12), - make_tuple(4, 8, &vpx_highbd_sad4x8_avg_c, 12), - make_tuple(4, 4, &vpx_highbd_sad4x4_avg_c, 12), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_c, 8), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_c, 8), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_c, 8), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_c, 8), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_c, 8), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_c, 8), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_c, 8), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_c, 8), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_c, 8), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_c, 8), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_c, 8), + SadMxNAvgParam(4, 8, &vpx_highbd_sad4x8_avg_c, 8), + SadMxNAvgParam(4, 4, &vpx_highbd_sad4x4_avg_c, 8), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_c, 10), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_c, 10), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_c, 10), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_c, 10), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_c, 10), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_c, 10), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_c, 10), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_c, 10), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_c, 10), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_c, 10), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_c, 10), + SadMxNAvgParam(4, 8, &vpx_highbd_sad4x8_avg_c, 10), + SadMxNAvgParam(4, 4, &vpx_highbd_sad4x4_avg_c, 10), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_c, 12), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_c, 12), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_c, 12), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_c, 12), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_c, 12), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_c, 12), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_c, 12), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_c, 12), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_c, 12), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_c, 12), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_c, 12), + SadMxNAvgParam(4, 8, &vpx_highbd_sad4x8_avg_c, 12), + SadMxNAvgParam(4, 4, &vpx_highbd_sad4x4_avg_c, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests)); const SadMxNx4Param x4d_c_tests[] = { - make_tuple(64, 64, &vpx_sad64x64x4d_c, -1), - make_tuple(64, 32, &vpx_sad64x32x4d_c, -1), - make_tuple(32, 64, &vpx_sad32x64x4d_c, -1), - make_tuple(32, 32, &vpx_sad32x32x4d_c, -1), - make_tuple(32, 16, &vpx_sad32x16x4d_c, -1), - make_tuple(16, 32, &vpx_sad16x32x4d_c, -1), - make_tuple(16, 16, &vpx_sad16x16x4d_c, -1), - make_tuple(16, 8, &vpx_sad16x8x4d_c, -1), - make_tuple(8, 16, &vpx_sad8x16x4d_c, -1), - make_tuple(8, 8, &vpx_sad8x8x4d_c, -1), - make_tuple(8, 4, &vpx_sad8x4x4d_c, -1), - make_tuple(4, 8, &vpx_sad4x8x4d_c, -1), - make_tuple(4, 4, &vpx_sad4x4x4d_c, -1), + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_c), + SadMxNx4Param(64, 32, &vpx_sad64x32x4d_c), + SadMxNx4Param(32, 64, &vpx_sad32x64x4d_c), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_c), + SadMxNx4Param(32, 16, &vpx_sad32x16x4d_c), + SadMxNx4Param(16, 32, &vpx_sad16x32x4d_c), + SadMxNx4Param(16, 16, &vpx_sad16x16x4d_c), + SadMxNx4Param(16, 8, &vpx_sad16x8x4d_c), + SadMxNx4Param(8, 16, &vpx_sad8x16x4d_c), + SadMxNx4Param(8, 8, &vpx_sad8x8x4d_c), + SadMxNx4Param(8, 4, &vpx_sad8x4x4d_c), + SadMxNx4Param(4, 8, &vpx_sad4x8x4d_c), + SadMxNx4Param(4, 4, &vpx_sad4x4x4d_c), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 8), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 8), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 10), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 10), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_c, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_c, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_c, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_c, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_c, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_c, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_c, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_c, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_c, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_c, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_c, 12), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_c, 12), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_c, 12), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_c, 8), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_c, 8), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_c, 8), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_c, 8), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_c, 8), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_c, 8), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_c, 8), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_c, 8), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_c, 8), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_c, 8), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_c, 8), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_c, 8), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_c, 8), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_c, 10), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_c, 10), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_c, 10), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_c, 10), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_c, 10), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_c, 10), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_c, 10), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_c, 10), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_c, 10), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_c, 10), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_c, 10), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_c, 10), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_c, 10), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_c, 12), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_c, 12), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_c, 12), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_c, 12), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_c, 12), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_c, 12), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_c, 12), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_c, 12), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_c, 12), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_c, 12), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_c, 12), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_c, 12), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_c, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests)); //------------------------------------------------------------------------------ // ARM functions -#if HAVE_MEDIA -const SadMxNParam media_tests[] = { - make_tuple(16, 16, &vpx_sad16x16_media, -1), -}; -INSTANTIATE_TEST_CASE_P(MEDIA, SADTest, ::testing::ValuesIn(media_tests)); -#endif // HAVE_MEDIA - #if HAVE_NEON const SadMxNParam neon_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_neon, -1), - make_tuple(32, 32, &vpx_sad32x32_neon, -1), - make_tuple(16, 16, &vpx_sad16x16_neon, -1), - make_tuple(16, 8, &vpx_sad16x8_neon, -1), - make_tuple(8, 16, &vpx_sad8x16_neon, -1), - make_tuple(8, 8, &vpx_sad8x8_neon, -1), - make_tuple(4, 4, &vpx_sad4x4_neon, -1), + SadMxNParam(64, 64, &vpx_sad64x64_neon), + SadMxNParam(32, 32, &vpx_sad32x32_neon), + SadMxNParam(16, 16, &vpx_sad16x16_neon), + SadMxNParam(16, 8, &vpx_sad16x8_neon), + SadMxNParam(8, 16, &vpx_sad8x16_neon), + SadMxNParam(8, 8, &vpx_sad8x8_neon), + SadMxNParam(4, 4, &vpx_sad4x4_neon), }; INSTANTIATE_TEST_CASE_P(NEON, SADTest, ::testing::ValuesIn(neon_tests)); const SadMxNx4Param x4d_neon_tests[] = { - make_tuple(64, 64, &vpx_sad64x64x4d_neon, -1), - make_tuple(32, 32, &vpx_sad32x32x4d_neon, -1), - make_tuple(16, 16, &vpx_sad16x16x4d_neon, -1), + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_neon), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_neon), + SadMxNx4Param(16, 16, &vpx_sad16x16x4d_neon), }; INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::ValuesIn(x4d_neon_tests)); #endif // HAVE_NEON //------------------------------------------------------------------------------ // x86 functions -#if HAVE_MMX -const SadMxNParam mmx_tests[] = { - make_tuple(16, 16, &vpx_sad16x16_mmx, -1), - make_tuple(16, 8, &vpx_sad16x8_mmx, -1), - make_tuple(8, 16, &vpx_sad8x16_mmx, -1), - make_tuple(8, 8, &vpx_sad8x8_mmx, -1), - make_tuple(4, 4, &vpx_sad4x4_mmx, -1), -}; -INSTANTIATE_TEST_CASE_P(MMX, SADTest, ::testing::ValuesIn(mmx_tests)); -#endif // HAVE_MMX - #if HAVE_SSE2 -#if CONFIG_USE_X86INC const SadMxNParam sse2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_sse2, -1), - make_tuple(64, 32, &vpx_sad64x32_sse2, -1), - make_tuple(32, 64, &vpx_sad32x64_sse2, -1), - make_tuple(32, 32, &vpx_sad32x32_sse2, -1), - make_tuple(32, 16, &vpx_sad32x16_sse2, -1), - make_tuple(16, 32, &vpx_sad16x32_sse2, -1), - make_tuple(16, 16, &vpx_sad16x16_sse2, -1), - make_tuple(16, 8, &vpx_sad16x8_sse2, -1), - make_tuple(8, 16, &vpx_sad8x16_sse2, -1), - make_tuple(8, 8, &vpx_sad8x8_sse2, -1), - make_tuple(8, 4, &vpx_sad8x4_sse2, -1), - make_tuple(4, 8, &vpx_sad4x8_sse2, -1), - make_tuple(4, 4, &vpx_sad4x4_sse2, -1), + SadMxNParam(64, 64, &vpx_sad64x64_sse2), + SadMxNParam(64, 32, &vpx_sad64x32_sse2), + SadMxNParam(32, 64, &vpx_sad32x64_sse2), + SadMxNParam(32, 32, &vpx_sad32x32_sse2), + SadMxNParam(32, 16, &vpx_sad32x16_sse2), + SadMxNParam(16, 32, &vpx_sad16x32_sse2), + SadMxNParam(16, 16, &vpx_sad16x16_sse2), + SadMxNParam(16, 8, &vpx_sad16x8_sse2), + SadMxNParam(8, 16, &vpx_sad8x16_sse2), + SadMxNParam(8, 8, &vpx_sad8x8_sse2), + SadMxNParam(8, 4, &vpx_sad8x4_sse2), + SadMxNParam(4, 8, &vpx_sad4x8_sse2), + SadMxNParam(4, 4, &vpx_sad4x4_sse2), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64_sse2, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32_sse2, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64_sse2, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32_sse2, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16_sse2, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32_sse2, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16_sse2, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8_sse2, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16_sse2, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8_sse2, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4_sse2, 12), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_sse2, 8), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_sse2, 8), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_sse2, 8), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_sse2, 8), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_sse2, 8), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_sse2, 8), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_sse2, 8), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_sse2, 8), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_sse2, 8), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_sse2, 8), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_sse2, 8), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_sse2, 10), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_sse2, 10), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_sse2, 10), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_sse2, 10), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_sse2, 10), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_sse2, 10), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_sse2, 10), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_sse2, 10), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_sse2, 10), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_sse2, 10), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_sse2, 10), + SadMxNParam(64, 64, &vpx_highbd_sad64x64_sse2, 12), + SadMxNParam(64, 32, &vpx_highbd_sad64x32_sse2, 12), + SadMxNParam(32, 64, &vpx_highbd_sad32x64_sse2, 12), + SadMxNParam(32, 32, &vpx_highbd_sad32x32_sse2, 12), + SadMxNParam(32, 16, &vpx_highbd_sad32x16_sse2, 12), + SadMxNParam(16, 32, &vpx_highbd_sad16x32_sse2, 12), + SadMxNParam(16, 16, &vpx_highbd_sad16x16_sse2, 12), + SadMxNParam(16, 8, &vpx_highbd_sad16x8_sse2, 12), + SadMxNParam(8, 16, &vpx_highbd_sad8x16_sse2, 12), + SadMxNParam(8, 8, &vpx_highbd_sad8x8_sse2, 12), + SadMxNParam(8, 4, &vpx_highbd_sad8x4_sse2, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests)); const SadMxNAvgParam avg_sse2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_avg_sse2, -1), - make_tuple(64, 32, &vpx_sad64x32_avg_sse2, -1), - make_tuple(32, 64, &vpx_sad32x64_avg_sse2, -1), - make_tuple(32, 32, &vpx_sad32x32_avg_sse2, -1), - make_tuple(32, 16, &vpx_sad32x16_avg_sse2, -1), - make_tuple(16, 32, &vpx_sad16x32_avg_sse2, -1), - make_tuple(16, 16, &vpx_sad16x16_avg_sse2, -1), - make_tuple(16, 8, &vpx_sad16x8_avg_sse2, -1), - make_tuple(8, 16, &vpx_sad8x16_avg_sse2, -1), - make_tuple(8, 8, &vpx_sad8x8_avg_sse2, -1), - make_tuple(8, 4, &vpx_sad8x4_avg_sse2, -1), - make_tuple(4, 8, &vpx_sad4x8_avg_sse2, -1), - make_tuple(4, 4, &vpx_sad4x4_avg_sse2, -1), + SadMxNAvgParam(64, 64, &vpx_sad64x64_avg_sse2), + SadMxNAvgParam(64, 32, &vpx_sad64x32_avg_sse2), + SadMxNAvgParam(32, 64, &vpx_sad32x64_avg_sse2), + SadMxNAvgParam(32, 32, &vpx_sad32x32_avg_sse2), + SadMxNAvgParam(32, 16, &vpx_sad32x16_avg_sse2), + SadMxNAvgParam(16, 32, &vpx_sad16x32_avg_sse2), + SadMxNAvgParam(16, 16, &vpx_sad16x16_avg_sse2), + SadMxNAvgParam(16, 8, &vpx_sad16x8_avg_sse2), + SadMxNAvgParam(8, 16, &vpx_sad8x16_avg_sse2), + SadMxNAvgParam(8, 8, &vpx_sad8x8_avg_sse2), + SadMxNAvgParam(8, 4, &vpx_sad8x4_avg_sse2), + SadMxNAvgParam(4, 8, &vpx_sad4x8_avg_sse2), + SadMxNAvgParam(4, 4, &vpx_sad4x4_avg_sse2), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64_avg_sse2, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32_avg_sse2, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64_avg_sse2, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32_avg_sse2, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16_avg_sse2, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32_avg_sse2, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16_avg_sse2, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8_avg_sse2, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16_avg_sse2, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8_avg_sse2, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4_avg_sse2, 12), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_sse2, 8), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_sse2, 8), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_sse2, 8), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_sse2, 8), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_sse2, 8), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_sse2, 8), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_sse2, 8), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_sse2, 8), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_sse2, 8), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_sse2, 8), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_sse2, 8), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_sse2, 10), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_sse2, 10), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_sse2, 10), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_sse2, 10), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_sse2, 10), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_sse2, 10), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_sse2, 10), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_sse2, 10), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_sse2, 10), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_sse2, 10), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_sse2, 10), + SadMxNAvgParam(64, 64, &vpx_highbd_sad64x64_avg_sse2, 12), + SadMxNAvgParam(64, 32, &vpx_highbd_sad64x32_avg_sse2, 12), + SadMxNAvgParam(32, 64, &vpx_highbd_sad32x64_avg_sse2, 12), + SadMxNAvgParam(32, 32, &vpx_highbd_sad32x32_avg_sse2, 12), + SadMxNAvgParam(32, 16, &vpx_highbd_sad32x16_avg_sse2, 12), + SadMxNAvgParam(16, 32, &vpx_highbd_sad16x32_avg_sse2, 12), + SadMxNAvgParam(16, 16, &vpx_highbd_sad16x16_avg_sse2, 12), + SadMxNAvgParam(16, 8, &vpx_highbd_sad16x8_avg_sse2, 12), + SadMxNAvgParam(8, 16, &vpx_highbd_sad8x16_avg_sse2, 12), + SadMxNAvgParam(8, 8, &vpx_highbd_sad8x8_avg_sse2, 12), + SadMxNAvgParam(8, 4, &vpx_highbd_sad8x4_avg_sse2, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests)); const SadMxNx4Param x4d_sse2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64x4d_sse2, -1), - make_tuple(64, 32, &vpx_sad64x32x4d_sse2, -1), - make_tuple(32, 64, &vpx_sad32x64x4d_sse2, -1), - make_tuple(32, 32, &vpx_sad32x32x4d_sse2, -1), - make_tuple(32, 16, &vpx_sad32x16x4d_sse2, -1), - make_tuple(16, 32, &vpx_sad16x32x4d_sse2, -1), - make_tuple(16, 16, &vpx_sad16x16x4d_sse2, -1), - make_tuple(16, 8, &vpx_sad16x8x4d_sse2, -1), - make_tuple(8, 16, &vpx_sad8x16x4d_sse2, -1), - make_tuple(8, 8, &vpx_sad8x8x4d_sse2, -1), - make_tuple(8, 4, &vpx_sad8x4x4d_sse2, -1), - make_tuple(4, 8, &vpx_sad4x8x4d_sse2, -1), - make_tuple(4, 4, &vpx_sad4x4x4d_sse2, -1), + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_sse2), + SadMxNx4Param(64, 32, &vpx_sad64x32x4d_sse2), + SadMxNx4Param(32, 64, &vpx_sad32x64x4d_sse2), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_sse2), + SadMxNx4Param(32, 16, &vpx_sad32x16x4d_sse2), + SadMxNx4Param(16, 32, &vpx_sad16x32x4d_sse2), + SadMxNx4Param(16, 16, &vpx_sad16x16x4d_sse2), + SadMxNx4Param(16, 8, &vpx_sad16x8x4d_sse2), + SadMxNx4Param(8, 16, &vpx_sad8x16x4d_sse2), + SadMxNx4Param(8, 8, &vpx_sad8x8x4d_sse2), + SadMxNx4Param(8, 4, &vpx_sad8x4x4d_sse2), + SadMxNx4Param(4, 8, &vpx_sad4x8x4d_sse2), + SadMxNx4Param(4, 4, &vpx_sad4x4x4d_sse2), #if CONFIG_VP9_HIGHBITDEPTH - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 8), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 8), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 8), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 8), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 8), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 8), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 8), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 8), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 8), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 8), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 8), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 8), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 8), - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 10), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 10), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 10), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 10), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 10), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 10), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 10), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 10), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 10), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 10), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 10), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 10), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 10), - make_tuple(64, 64, &vpx_highbd_sad64x64x4d_sse2, 12), - make_tuple(64, 32, &vpx_highbd_sad64x32x4d_sse2, 12), - make_tuple(32, 64, &vpx_highbd_sad32x64x4d_sse2, 12), - make_tuple(32, 32, &vpx_highbd_sad32x32x4d_sse2, 12), - make_tuple(32, 16, &vpx_highbd_sad32x16x4d_sse2, 12), - make_tuple(16, 32, &vpx_highbd_sad16x32x4d_sse2, 12), - make_tuple(16, 16, &vpx_highbd_sad16x16x4d_sse2, 12), - make_tuple(16, 8, &vpx_highbd_sad16x8x4d_sse2, 12), - make_tuple(8, 16, &vpx_highbd_sad8x16x4d_sse2, 12), - make_tuple(8, 8, &vpx_highbd_sad8x8x4d_sse2, 12), - make_tuple(8, 4, &vpx_highbd_sad8x4x4d_sse2, 12), - make_tuple(4, 8, &vpx_highbd_sad4x8x4d_sse2, 12), - make_tuple(4, 4, &vpx_highbd_sad4x4x4d_sse2, 12), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_sse2, 8), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_sse2, 8), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_sse2, 8), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_sse2, 8), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_sse2, 8), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_sse2, 8), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_sse2, 8), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_sse2, 8), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_sse2, 8), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_sse2, 8), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_sse2, 8), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_sse2, 8), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_sse2, 8), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_sse2, 10), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_sse2, 10), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_sse2, 10), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_sse2, 10), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_sse2, 10), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_sse2, 10), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_sse2, 10), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_sse2, 10), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_sse2, 10), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_sse2, 10), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_sse2, 10), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_sse2, 10), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_sse2, 10), + SadMxNx4Param(64, 64, &vpx_highbd_sad64x64x4d_sse2, 12), + SadMxNx4Param(64, 32, &vpx_highbd_sad64x32x4d_sse2, 12), + SadMxNx4Param(32, 64, &vpx_highbd_sad32x64x4d_sse2, 12), + SadMxNx4Param(32, 32, &vpx_highbd_sad32x32x4d_sse2, 12), + SadMxNx4Param(32, 16, &vpx_highbd_sad32x16x4d_sse2, 12), + SadMxNx4Param(16, 32, &vpx_highbd_sad16x32x4d_sse2, 12), + SadMxNx4Param(16, 16, &vpx_highbd_sad16x16x4d_sse2, 12), + SadMxNx4Param(16, 8, &vpx_highbd_sad16x8x4d_sse2, 12), + SadMxNx4Param(8, 16, &vpx_highbd_sad8x16x4d_sse2, 12), + SadMxNx4Param(8, 8, &vpx_highbd_sad8x8x4d_sse2, 12), + SadMxNx4Param(8, 4, &vpx_highbd_sad8x4x4d_sse2, 12), + SadMxNx4Param(4, 8, &vpx_highbd_sad4x8x4d_sse2, 12), + SadMxNx4Param(4, 4, &vpx_highbd_sad4x4x4d_sse2, 12), #endif // CONFIG_VP9_HIGHBITDEPTH }; INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests)); -#endif // CONFIG_USE_X86INC #endif // HAVE_SSE2 #if HAVE_SSE3 @@ -880,26 +841,26 @@ INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests)); #if HAVE_AVX2 const SadMxNParam avx2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_avx2, -1), - make_tuple(64, 32, &vpx_sad64x32_avx2, -1), - make_tuple(32, 64, &vpx_sad32x64_avx2, -1), - make_tuple(32, 32, &vpx_sad32x32_avx2, -1), - make_tuple(32, 16, &vpx_sad32x16_avx2, -1), + SadMxNParam(64, 64, &vpx_sad64x64_avx2), + SadMxNParam(64, 32, &vpx_sad64x32_avx2), + SadMxNParam(32, 64, &vpx_sad32x64_avx2), + SadMxNParam(32, 32, &vpx_sad32x32_avx2), + SadMxNParam(32, 16, &vpx_sad32x16_avx2), }; INSTANTIATE_TEST_CASE_P(AVX2, SADTest, ::testing::ValuesIn(avx2_tests)); const SadMxNAvgParam avg_avx2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_avg_avx2, -1), - make_tuple(64, 32, &vpx_sad64x32_avg_avx2, -1), - make_tuple(32, 64, &vpx_sad32x64_avg_avx2, -1), - make_tuple(32, 32, &vpx_sad32x32_avg_avx2, -1), - make_tuple(32, 16, &vpx_sad32x16_avg_avx2, -1), + SadMxNAvgParam(64, 64, &vpx_sad64x64_avg_avx2), + SadMxNAvgParam(64, 32, &vpx_sad64x32_avg_avx2), + SadMxNAvgParam(32, 64, &vpx_sad32x64_avg_avx2), + SadMxNAvgParam(32, 32, &vpx_sad32x32_avg_avx2), + SadMxNAvgParam(32, 16, &vpx_sad32x16_avg_avx2), }; INSTANTIATE_TEST_CASE_P(AVX2, SADavgTest, ::testing::ValuesIn(avg_avx2_tests)); const SadMxNx4Param x4d_avx2_tests[] = { - make_tuple(64, 64, &vpx_sad64x64x4d_avx2, -1), - make_tuple(32, 32, &vpx_sad32x32x4d_avx2, -1), + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_avx2), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_avx2), }; INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests)); #endif // HAVE_AVX2 @@ -908,53 +869,53 @@ INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::ValuesIn(x4d_avx2_tests)); // MIPS functions #if HAVE_MSA const SadMxNParam msa_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_msa, -1), - make_tuple(64, 32, &vpx_sad64x32_msa, -1), - make_tuple(32, 64, &vpx_sad32x64_msa, -1), - make_tuple(32, 32, &vpx_sad32x32_msa, -1), - make_tuple(32, 16, &vpx_sad32x16_msa, -1), - make_tuple(16, 32, &vpx_sad16x32_msa, -1), - make_tuple(16, 16, &vpx_sad16x16_msa, -1), - make_tuple(16, 8, &vpx_sad16x8_msa, -1), - make_tuple(8, 16, &vpx_sad8x16_msa, -1), - make_tuple(8, 8, &vpx_sad8x8_msa, -1), - make_tuple(8, 4, &vpx_sad8x4_msa, -1), - make_tuple(4, 8, &vpx_sad4x8_msa, -1), - make_tuple(4, 4, &vpx_sad4x4_msa, -1), + SadMxNParam(64, 64, &vpx_sad64x64_msa), + SadMxNParam(64, 32, &vpx_sad64x32_msa), + SadMxNParam(32, 64, &vpx_sad32x64_msa), + SadMxNParam(32, 32, &vpx_sad32x32_msa), + SadMxNParam(32, 16, &vpx_sad32x16_msa), + SadMxNParam(16, 32, &vpx_sad16x32_msa), + SadMxNParam(16, 16, &vpx_sad16x16_msa), + SadMxNParam(16, 8, &vpx_sad16x8_msa), + SadMxNParam(8, 16, &vpx_sad8x16_msa), + SadMxNParam(8, 8, &vpx_sad8x8_msa), + SadMxNParam(8, 4, &vpx_sad8x4_msa), + SadMxNParam(4, 8, &vpx_sad4x8_msa), + SadMxNParam(4, 4, &vpx_sad4x4_msa), }; INSTANTIATE_TEST_CASE_P(MSA, SADTest, ::testing::ValuesIn(msa_tests)); const SadMxNAvgParam avg_msa_tests[] = { - make_tuple(64, 64, &vpx_sad64x64_avg_msa, -1), - make_tuple(64, 32, &vpx_sad64x32_avg_msa, -1), - make_tuple(32, 64, &vpx_sad32x64_avg_msa, -1), - make_tuple(32, 32, &vpx_sad32x32_avg_msa, -1), - make_tuple(32, 16, &vpx_sad32x16_avg_msa, -1), - make_tuple(16, 32, &vpx_sad16x32_avg_msa, -1), - make_tuple(16, 16, &vpx_sad16x16_avg_msa, -1), - make_tuple(16, 8, &vpx_sad16x8_avg_msa, -1), - make_tuple(8, 16, &vpx_sad8x16_avg_msa, -1), - make_tuple(8, 8, &vpx_sad8x8_avg_msa, -1), - make_tuple(8, 4, &vpx_sad8x4_avg_msa, -1), - make_tuple(4, 8, &vpx_sad4x8_avg_msa, -1), - make_tuple(4, 4, &vpx_sad4x4_avg_msa, -1), + SadMxNAvgParam(64, 64, &vpx_sad64x64_avg_msa), + SadMxNAvgParam(64, 32, &vpx_sad64x32_avg_msa), + SadMxNAvgParam(32, 64, &vpx_sad32x64_avg_msa), + SadMxNAvgParam(32, 32, &vpx_sad32x32_avg_msa), + SadMxNAvgParam(32, 16, &vpx_sad32x16_avg_msa), + SadMxNAvgParam(16, 32, &vpx_sad16x32_avg_msa), + SadMxNAvgParam(16, 16, &vpx_sad16x16_avg_msa), + SadMxNAvgParam(16, 8, &vpx_sad16x8_avg_msa), + SadMxNAvgParam(8, 16, &vpx_sad8x16_avg_msa), + SadMxNAvgParam(8, 8, &vpx_sad8x8_avg_msa), + SadMxNAvgParam(8, 4, &vpx_sad8x4_avg_msa), + SadMxNAvgParam(4, 8, &vpx_sad4x8_avg_msa), + SadMxNAvgParam(4, 4, &vpx_sad4x4_avg_msa), }; INSTANTIATE_TEST_CASE_P(MSA, SADavgTest, ::testing::ValuesIn(avg_msa_tests)); const SadMxNx4Param x4d_msa_tests[] = { - make_tuple(64, 64, &vpx_sad64x64x4d_msa, -1), - make_tuple(64, 32, &vpx_sad64x32x4d_msa, -1), - make_tuple(32, 64, &vpx_sad32x64x4d_msa, -1), - make_tuple(32, 32, &vpx_sad32x32x4d_msa, -1), - make_tuple(32, 16, &vpx_sad32x16x4d_msa, -1), - make_tuple(16, 32, &vpx_sad16x32x4d_msa, -1), - make_tuple(16, 16, &vpx_sad16x16x4d_msa, -1), - make_tuple(16, 8, &vpx_sad16x8x4d_msa, -1), - make_tuple(8, 16, &vpx_sad8x16x4d_msa, -1), - make_tuple(8, 8, &vpx_sad8x8x4d_msa, -1), - make_tuple(8, 4, &vpx_sad8x4x4d_msa, -1), - make_tuple(4, 8, &vpx_sad4x8x4d_msa, -1), - make_tuple(4, 4, &vpx_sad4x4x4d_msa, -1), + SadMxNx4Param(64, 64, &vpx_sad64x64x4d_msa), + SadMxNx4Param(64, 32, &vpx_sad64x32x4d_msa), + SadMxNx4Param(32, 64, &vpx_sad32x64x4d_msa), + SadMxNx4Param(32, 32, &vpx_sad32x32x4d_msa), + SadMxNx4Param(32, 16, &vpx_sad32x16x4d_msa), + SadMxNx4Param(16, 32, &vpx_sad16x32x4d_msa), + SadMxNx4Param(16, 16, &vpx_sad16x16x4d_msa), + SadMxNx4Param(16, 8, &vpx_sad16x8x4d_msa), + SadMxNx4Param(8, 16, &vpx_sad8x16x4d_msa), + SadMxNx4Param(8, 8, &vpx_sad8x8x4d_msa), + SadMxNx4Param(8, 4, &vpx_sad8x4x4d_msa), + SadMxNx4Param(4, 8, &vpx_sad4x8x4d_msa), + SadMxNx4Param(4, 4, &vpx_sad4x4x4d_msa), }; INSTANTIATE_TEST_CASE_P(MSA, SADx4Test, ::testing::ValuesIn(x4d_msa_tests)); #endif // HAVE_MSA diff --git a/libs/libvpx/test/set_roi.cc b/libs/libvpx/test/set_roi.cc index fea8cca7a1..38711a806d 100644 --- a/libs/libvpx/test/set_roi.cc +++ b/libs/libvpx/test/set_roi.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -33,14 +32,10 @@ TEST(VP8RoiMapTest, ParameterCheck) { unsigned int threshold[MAX_MB_SEGMENTS] = { 0, 100, 200, 300 }; const int internalq_trans[] = { - 0, 1, 2, 3, 4, 5, 7, 8, - 9, 10, 12, 13, 15, 17, 18, 19, - 20, 21, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 33, 35, 37, 39, 41, - 43, 45, 47, 49, 51, 53, 55, 57, - 59, 61, 64, 67, 70, 73, 76, 79, - 82, 85, 88, 91, 94, 97, 100, 103, - 106, 109, 112, 115, 118, 121, 124, 127, + 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19, + 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41, + 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79, + 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, }; // Initialize elements of cpi with valid defaults. @@ -60,17 +55,17 @@ TEST(VP8RoiMapTest, ParameterCheck) { // Allocate memory for the source memory map. unsigned char *roi_map = - reinterpret_cast(vpx_calloc(mbs, 1)); + reinterpret_cast(vpx_calloc(mbs, 1)); memset(&roi_map[mbs >> 2], 1, (mbs >> 2)); memset(&roi_map[mbs >> 1], 2, (mbs >> 2)); - memset(&roi_map[mbs -(mbs >> 2)], 3, (mbs >> 2)); + memset(&roi_map[mbs - (mbs >> 2)], 3, (mbs >> 2)); // Do a test call with valid parameters. - int roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, - cpi.common.mb_cols, delta_q, delta_lf, - threshold); + int roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, cpi.common.mb_cols, + delta_q, delta_lf, threshold); EXPECT_EQ(0, roi_retval) - << "vp8_set_roimap roi failed with default test parameters"; + << "vp8_set_roimap roi failed with default test parameters"; // Check that the values in the cpi structure get set as expected. if (roi_retval == 0) { @@ -83,9 +78,9 @@ TEST(VP8RoiMapTest, ParameterCheck) { for (int i = 0; i < MAX_MB_SEGMENTS; ++i) { const int transq = internalq_trans[abs(delta_q[i])]; if (abs(cpi.segment_feature_data[MB_LVL_ALT_Q][i]) != transq) { - EXPECT_EQ(transq, cpi.segment_feature_data[MB_LVL_ALT_Q][i]) - << "segment delta_q error"; - break; + EXPECT_EQ(transq, cpi.segment_feature_data[MB_LVL_ALT_Q][i]) + << "segment delta_q error"; + break; } } @@ -93,7 +88,7 @@ TEST(VP8RoiMapTest, ParameterCheck) { for (int i = 0; i < MAX_MB_SEGMENTS; ++i) { if (cpi.segment_feature_data[MB_LVL_ALT_LF][i] != delta_lf[i]) { EXPECT_EQ(delta_lf[i], cpi.segment_feature_data[MB_LVL_ALT_LF][i]) - << "segment delta_lf error"; + << "segment delta_lf error"; break; } } @@ -101,23 +96,21 @@ TEST(VP8RoiMapTest, ParameterCheck) { // Check the breakout thresholds for (int i = 0; i < MAX_MB_SEGMENTS; ++i) { unsigned int breakout = - static_cast(cpi.segment_encode_breakout[i]); + static_cast(cpi.segment_encode_breakout[i]); if (threshold[i] != breakout) { - EXPECT_EQ(threshold[i], breakout) - << "breakout threshold error"; + EXPECT_EQ(threshold[i], breakout) << "breakout threshold error"; break; } } // Segmentation, and segmentation update flages should be set. EXPECT_EQ(1, cpi.mb.e_mbd.segmentation_enabled) - << "segmentation_enabled error"; + << "segmentation_enabled error"; EXPECT_EQ(1, cpi.mb.e_mbd.update_mb_segmentation_map) - << "update_mb_segmentation_map error"; + << "update_mb_segmentation_map error"; EXPECT_EQ(1, cpi.mb.e_mbd.update_mb_segmentation_data) - << "update_mb_segmentation_data error"; - + << "update_mb_segmentation_data error"; // Try a range of delta q and lf parameters (some legal, some not) for (int i = 0; i < 1000; ++i) { @@ -128,57 +121,54 @@ TEST(VP8RoiMapTest, ParameterCheck) { rand_deltas[2] = rnd(160) - 80; rand_deltas[3] = rnd(160) - 80; - deltas_valid = ((abs(rand_deltas[0]) <= 63) && - (abs(rand_deltas[1]) <= 63) && - (abs(rand_deltas[2]) <= 63) && - (abs(rand_deltas[3]) <= 63)) ? 0 : -1; + deltas_valid = + ((abs(rand_deltas[0]) <= 63) && (abs(rand_deltas[1]) <= 63) && + (abs(rand_deltas[2]) <= 63) && (abs(rand_deltas[3]) <= 63)) + ? 0 + : -1; // Test with random delta q values. - roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, - cpi.common.mb_cols, rand_deltas, - delta_lf, threshold); + roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, cpi.common.mb_cols, + rand_deltas, delta_lf, threshold); EXPECT_EQ(deltas_valid, roi_retval) << "dq range check error"; // One delta_q error shown at a time - if (deltas_valid != roi_retval) - break; + if (deltas_valid != roi_retval) break; // Test with random loop filter values. - roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, - cpi.common.mb_cols, delta_q, - rand_deltas, threshold); + roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, cpi.common.mb_cols, + delta_q, rand_deltas, threshold); EXPECT_EQ(deltas_valid, roi_retval) << "dlf range check error"; // One delta loop filter error shown at a time - if (deltas_valid != roi_retval) - break; + if (deltas_valid != roi_retval) break; } // Test that we report and error if cyclic refresh is enabled. cpi.cyclic_refresh_mode_enabled = 1; - roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, - cpi.common.mb_cols, delta_q, - delta_lf, threshold); + roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, cpi.common.mb_cols, + delta_q, delta_lf, threshold); EXPECT_EQ(-1, roi_retval) << "cyclic refresh check error"; cpi.cyclic_refresh_mode_enabled = 0; // Test invalid number of rows or colums. - roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows + 1, - cpi.common.mb_cols, delta_q, - delta_lf, threshold); + roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows + 1, + cpi.common.mb_cols, delta_q, delta_lf, threshold); EXPECT_EQ(-1, roi_retval) << "MB rows bounds check error"; - roi_retval = vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, - cpi.common.mb_cols - 1, delta_q, - delta_lf, threshold); + roi_retval = + vp8_set_roimap(&cpi, roi_map, cpi.common.mb_rows, + cpi.common.mb_cols - 1, delta_q, delta_lf, threshold); EXPECT_EQ(-1, roi_retval) << "MB cols bounds check error"; } // Free allocated memory - if (cpi.segmentation_map) - vpx_free(cpi.segmentation_map); - if (roi_map) - vpx_free(roi_map); + if (cpi.segmentation_map) vpx_free(cpi.segmentation_map); + if (roi_map) vpx_free(roi_map); }; } // namespace diff --git a/libs/libvpx/test/simple_encoder.sh b/libs/libvpx/test/simple_encoder.sh index c4a6280303..ee633ae99e 100755 --- a/libs/libvpx/test/simple_encoder.sh +++ b/libs/libvpx/test/simple_encoder.sh @@ -23,7 +23,7 @@ simple_encoder_verify_environment() { fi } -# Runs simple_encoder using the codec specified by $1. +# Runs simple_encoder using the codec specified by $1 with a frame limit of 100. simple_encoder() { local encoder="${LIBVPX_BIN_PATH}/simple_encoder${VPX_TEST_EXE_SUFFIX}" local codec="$1" @@ -35,7 +35,7 @@ simple_encoder() { fi eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \ - "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" 9999 \ + "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" 9999 0 100 \ ${devnull} [ -e "${output_file}" ] || return 1 @@ -47,16 +47,13 @@ simple_encoder_vp8() { fi } -# TODO(tomfinegan): Add a frame limit param to simple_encoder and enable this -# test. VP9 is just too slow right now: This test takes 4m30s+ on a fast -# machine. -DISABLED_simple_encoder_vp9() { +simple_encoder_vp9() { if [ "$(vp9_encode_available)" = "yes" ]; then simple_encoder vp9 || return 1 fi } simple_encoder_tests="simple_encoder_vp8 - DISABLED_simple_encoder_vp9" + simple_encoder_vp9" run_tests simple_encoder_verify_environment "${simple_encoder_tests}" diff --git a/libs/libvpx/test/sixtap_predict_test.cc b/libs/libvpx/test/sixtap_predict_test.cc deleted file mode 100644 index 304a1484af..0000000000 --- a/libs/libvpx/test/sixtap_predict_test.cc +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "third_party/googletest/src/include/gtest/gtest.h" - -#include "./vpx_config.h" -#include "./vp8_rtcd.h" -#include "test/acm_random.h" -#include "test/clear_system_state.h" -#include "test/register_state_check.h" -#include "test/util.h" -#include "vpx/vpx_integer.h" -#include "vpx_mem/vpx_mem.h" - -namespace { - -typedef void (*SixtapPredictFunc)(uint8_t *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - uint8_t *dst_ptr, - int dst_pitch); - -typedef std::tr1::tuple SixtapPredictParam; - -class SixtapPredictTest - : public ::testing::TestWithParam { - public: - static void SetUpTestCase() { - src_ = reinterpret_cast(vpx_memalign(kDataAlignment, kSrcSize)); - dst_ = reinterpret_cast(vpx_memalign(kDataAlignment, kDstSize)); - dst_c_ = reinterpret_cast(vpx_memalign(kDataAlignment, kDstSize)); - } - - static void TearDownTestCase() { - vpx_free(src_); - src_ = NULL; - vpx_free(dst_); - dst_ = NULL; - vpx_free(dst_c_); - dst_c_ = NULL; - } - - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } - - protected: - // Make test arrays big enough for 16x16 functions. Six-tap filters - // need 5 extra pixels outside of the macroblock. - static const int kSrcStride = 21; - static const int kDstStride = 16; - static const int kDataAlignment = 16; - static const int kSrcSize = kSrcStride * kSrcStride + 1; - static const int kDstSize = kDstStride * kDstStride; - - virtual void SetUp() { - width_ = GET_PARAM(0); - height_ = GET_PARAM(1); - sixtap_predict_ = GET_PARAM(2); - memset(src_, 0, kSrcSize); - memset(dst_, 0, kDstSize); - memset(dst_c_, 0, kDstSize); - } - - int width_; - int height_; - SixtapPredictFunc sixtap_predict_; - // The src stores the macroblock we will filter on, and makes it 1 byte larger - // in order to test unaligned access. The result is stored in dst and dst_c(c - // reference code result). - static uint8_t* src_; - static uint8_t* dst_; - static uint8_t* dst_c_; -}; - -uint8_t* SixtapPredictTest::src_ = NULL; -uint8_t* SixtapPredictTest::dst_ = NULL; -uint8_t* SixtapPredictTest::dst_c_ = NULL; - -TEST_P(SixtapPredictTest, TestWithPresetData) { - // Test input - static const uint8_t test_data[kSrcSize] = { - 216, 184, 4, 191, 82, 92, 41, 0, 1, 226, 236, 172, 20, 182, 42, 226, 177, - 79, 94, 77, 179, 203, 206, 198, 22, 192, 19, 75, 17, 192, 44, 233, 120, - 48, 168, 203, 141, 210, 203, 143, 180, 184, 59, 201, 110, 102, 171, 32, - 182, 10, 109, 105, 213, 60, 47, 236, 253, 67, 55, 14, 3, 99, 247, 124, - 148, 159, 71, 34, 114, 19, 177, 38, 203, 237, 239, 58, 83, 155, 91, 10, - 166, 201, 115, 124, 5, 163, 104, 2, 231, 160, 16, 234, 4, 8, 103, 153, - 167, 174, 187, 26, 193, 109, 64, 141, 90, 48, 200, 174, 204, 36, 184, - 114, 237, 43, 238, 242, 207, 86, 245, 182, 247, 6, 161, 251, 14, 8, 148, - 182, 182, 79, 208, 120, 188, 17, 6, 23, 65, 206, 197, 13, 242, 126, 128, - 224, 170, 110, 211, 121, 197, 200, 47, 188, 207, 208, 184, 221, 216, 76, - 148, 143, 156, 100, 8, 89, 117, 14, 112, 183, 221, 54, 197, 208, 180, 69, - 176, 94, 180, 131, 215, 121, 76, 7, 54, 28, 216, 238, 249, 176, 58, 142, - 64, 215, 242, 72, 49, 104, 87, 161, 32, 52, 216, 230, 4, 141, 44, 181, - 235, 224, 57, 195, 89, 134, 203, 144, 162, 163, 126, 156, 84, 185, 42, - 148, 145, 29, 221, 194, 134, 52, 100, 166, 105, 60, 140, 110, 201, 184, - 35, 181, 153, 93, 121, 243, 227, 68, 131, 134, 232, 2, 35, 60, 187, 77, - 209, 76, 106, 174, 15, 241, 227, 115, 151, 77, 175, 36, 187, 121, 221, - 223, 47, 118, 61, 168, 105, 32, 237, 236, 167, 213, 238, 202, 17, 170, - 24, 226, 247, 131, 145, 6, 116, 117, 121, 11, 194, 41, 48, 126, 162, 13, - 93, 209, 131, 154, 122, 237, 187, 103, 217, 99, 60, 200, 45, 78, 115, 69, - 49, 106, 200, 194, 112, 60, 56, 234, 72, 251, 19, 120, 121, 182, 134, 215, - 135, 10, 114, 2, 247, 46, 105, 209, 145, 165, 153, 191, 243, 12, 5, 36, - 119, 206, 231, 231, 11, 32, 209, 83, 27, 229, 204, 149, 155, 83, 109, 35, - 93, 223, 37, 84, 14, 142, 37, 160, 52, 191, 96, 40, 204, 101, 77, 67, 52, - 53, 43, 63, 85, 253, 147, 113, 226, 96, 6, 125, 179, 115, 161, 17, 83, - 198, 101, 98, 85, 139, 3, 137, 75, 99, 178, 23, 201, 255, 91, 253, 52, - 134, 60, 138, 131, 208, 251, 101, 48, 2, 227, 228, 118, 132, 245, 202, - 75, 91, 44, 160, 231, 47, 41, 50, 147, 220, 74, 92, 219, 165, 89, 16 - }; - - // Expected result - static const uint8_t expected_dst[kDstSize] = { - 117, 102, 74, 135, 42, 98, 175, 206, 70, 73, 222, 197, 50, 24, 39, 49, 38, - 105, 90, 47, 169, 40, 171, 215, 200, 73, 109, 141, 53, 85, 177, 164, 79, - 208, 124, 89, 212, 18, 81, 145, 151, 164, 217, 153, 91, 154, 102, 102, - 159, 75, 164, 152, 136, 51, 213, 219, 186, 116, 193, 224, 186, 36, 231, - 208, 84, 211, 155, 167, 35, 59, 42, 76, 216, 149, 73, 201, 78, 149, 184, - 100, 96, 196, 189, 198, 188, 235, 195, 117, 129, 120, 129, 49, 25, 133, - 113, 69, 221, 114, 70, 143, 99, 157, 108, 189, 140, 78, 6, 55, 65, 240, - 255, 245, 184, 72, 90, 100, 116, 131, 39, 60, 234, 167, 33, 160, 88, 185, - 200, 157, 159, 176, 127, 151, 138, 102, 168, 106, 170, 86, 82, 219, 189, - 76, 33, 115, 197, 106, 96, 198, 136, 97, 141, 237, 151, 98, 137, 191, - 185, 2, 57, 95, 142, 91, 255, 185, 97, 137, 76, 162, 94, 173, 131, 193, - 161, 81, 106, 72, 135, 222, 234, 137, 66, 137, 106, 243, 210, 147, 95, - 15, 137, 110, 85, 66, 16, 96, 167, 147, 150, 173, 203, 140, 118, 196, - 84, 147, 160, 19, 95, 101, 123, 74, 132, 202, 82, 166, 12, 131, 166, - 189, 170, 159, 85, 79, 66, 57, 152, 132, 203, 194, 0, 1, 56, 146, 180, - 224, 156, 28, 83, 181, 79, 76, 80, 46, 160, 175, 59, 106, 43, 87, 75, - 136, 85, 189, 46, 71, 200, 90 - }; - - uint8_t *src = const_cast(test_data); - - ASM_REGISTER_STATE_CHECK( - sixtap_predict_(&src[kSrcStride * 2 + 2 + 1], kSrcStride, - 2, 2, dst_, kDstStride)); - - for (int i = 0; i < height_; ++i) - for (int j = 0; j < width_; ++j) - ASSERT_EQ(expected_dst[i * kDstStride + j], dst_[i * kDstStride + j]) - << "i==" << (i * width_ + j); -} - -using libvpx_test::ACMRandom; - -TEST_P(SixtapPredictTest, TestWithRandomData) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - for (int i = 0; i < kSrcSize; ++i) - src_[i] = rnd.Rand8(); - - // Run tests for all possible offsets. - for (int xoffset = 0; xoffset < 8; ++xoffset) { - for (int yoffset = 0; yoffset < 8; ++yoffset) { - // Call c reference function. - // Move start point to next pixel to test if the function reads - // unaligned data correctly. - vp8_sixtap_predict16x16_c(&src_[kSrcStride * 2 + 2 + 1], kSrcStride, - xoffset, yoffset, dst_c_, kDstStride); - - // Run test. - ASM_REGISTER_STATE_CHECK( - sixtap_predict_(&src_[kSrcStride * 2 + 2 + 1], kSrcStride, - xoffset, yoffset, dst_, kDstStride)); - - for (int i = 0; i < height_; ++i) - for (int j = 0; j < width_; ++j) - ASSERT_EQ(dst_c_[i * kDstStride + j], dst_[i * kDstStride + j]) - << "i==" << (i * width_ + j); - } - } -} - -using std::tr1::make_tuple; - -INSTANTIATE_TEST_CASE_P( - C, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_c), - make_tuple(8, 8, &vp8_sixtap_predict8x8_c), - make_tuple(8, 4, &vp8_sixtap_predict8x4_c), - make_tuple(4, 4, &vp8_sixtap_predict4x4_c))); -#if HAVE_NEON -INSTANTIATE_TEST_CASE_P( - NEON, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_neon), - make_tuple(8, 8, &vp8_sixtap_predict8x8_neon), - make_tuple(8, 4, &vp8_sixtap_predict8x4_neon))); -#endif -#if HAVE_MMX -INSTANTIATE_TEST_CASE_P( - MMX, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_mmx), - make_tuple(8, 8, &vp8_sixtap_predict8x8_mmx), - make_tuple(8, 4, &vp8_sixtap_predict8x4_mmx), - make_tuple(4, 4, &vp8_sixtap_predict4x4_mmx))); -#endif -#if HAVE_SSE2 -INSTANTIATE_TEST_CASE_P( - SSE2, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_sse2), - make_tuple(8, 8, &vp8_sixtap_predict8x8_sse2), - make_tuple(8, 4, &vp8_sixtap_predict8x4_sse2))); -#endif -#if HAVE_SSSE3 -INSTANTIATE_TEST_CASE_P( - SSSE3, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_ssse3), - make_tuple(8, 8, &vp8_sixtap_predict8x8_ssse3), - make_tuple(8, 4, &vp8_sixtap_predict8x4_ssse3), - make_tuple(4, 4, &vp8_sixtap_predict4x4_ssse3))); -#endif -#if HAVE_MSA -INSTANTIATE_TEST_CASE_P( - MSA, SixtapPredictTest, ::testing::Values( - make_tuple(16, 16, &vp8_sixtap_predict16x16_msa), - make_tuple(8, 8, &vp8_sixtap_predict8x8_msa), - make_tuple(8, 4, &vp8_sixtap_predict8x4_msa), - make_tuple(4, 4, &vp8_sixtap_predict4x4_msa))); -#endif -} // namespace diff --git a/libs/libvpx/test/sum_squares_test.cc b/libs/libvpx/test/sum_squares_test.cc new file mode 100644 index 0000000000..3aa43d2ce0 --- /dev/null +++ b/libs/libvpx/test/sum_squares_test.cc @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include + +#include "third_party/googletest/src/include/gtest/gtest.h" + +#include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" +#include "test/acm_random.h" +#include "test/clear_system_state.h" +#include "test/register_state_check.h" +#include "test/util.h" +#include "vpx_ports/mem.h" + +using libvpx_test::ACMRandom; + +namespace { +const int kNumIterations = 10000; + +typedef uint64_t (*SSI16Func)(const int16_t *src, int stride, int size); +typedef std::tr1::tuple SumSquaresParam; + +class SumSquaresTest : public ::testing::TestWithParam { + public: + virtual ~SumSquaresTest() {} + virtual void SetUp() { + ref_func_ = GET_PARAM(0); + tst_func_ = GET_PARAM(1); + } + + virtual void TearDown() { libvpx_test::ClearSystemState(); } + + protected: + SSI16Func ref_func_; + SSI16Func tst_func_; +}; + +TEST_P(SumSquaresTest, OperationCheck) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + DECLARE_ALIGNED(16, int16_t, src[256 * 256]); + const int msb = 11; // Up to 12 bit input + const int limit = 1 << (msb + 1); + + for (int k = 0; k < kNumIterations; k++) { + const int size = 4 << rnd(6); // Up to 128x128 + int stride = 4 << rnd(7); // Up to 256 stride + while (stride < size) { // Make sure it's valid + stride = 4 << rnd(7); + } + + for (int i = 0; i < size; ++i) { + for (int j = 0; j < size; ++j) { + src[i * stride + j] = rnd(2) ? rnd(limit) : -rnd(limit); + } + } + + const uint64_t res_ref = ref_func_(src, stride, size); + uint64_t res_tst; + ASM_REGISTER_STATE_CHECK(res_tst = tst_func_(src, stride, size)); + + ASSERT_EQ(res_ref, res_tst) << "Error: Sum Squares Test" + << " C output does not match optimized output."; + } +} + +TEST_P(SumSquaresTest, ExtremeValues) { + ACMRandom rnd(ACMRandom::DeterministicSeed()); + DECLARE_ALIGNED(16, int16_t, src[256 * 256]); + const int msb = 11; // Up to 12 bit input + const int limit = 1 << (msb + 1); + + for (int k = 0; k < kNumIterations; k++) { + const int size = 4 << rnd(6); // Up to 128x128 + int stride = 4 << rnd(7); // Up to 256 stride + while (stride < size) { // Make sure it's valid + stride = 4 << rnd(7); + } + + const int val = rnd(2) ? limit - 1 : -(limit - 1); + for (int i = 0; i < size; ++i) { + for (int j = 0; j < size; ++j) { + src[i * stride + j] = val; + } + } + + const uint64_t res_ref = ref_func_(src, stride, size); + uint64_t res_tst; + ASM_REGISTER_STATE_CHECK(res_tst = tst_func_(src, stride, size)); + + ASSERT_EQ(res_ref, res_tst) << "Error: Sum Squares Test" + << " C output does not match optimized output."; + } +} + +using std::tr1::make_tuple; + +#if HAVE_SSE2 +INSTANTIATE_TEST_CASE_P( + SSE2, SumSquaresTest, + ::testing::Values(make_tuple(&vpx_sum_squares_2d_i16_c, + &vpx_sum_squares_2d_i16_sse2))); +#endif // HAVE_SSE2 +} // namespace diff --git a/libs/libvpx/test/superframe_test.cc b/libs/libvpx/test/superframe_test.cc index 90aa75b41e..421dfccd60 100644 --- a/libs/libvpx/test/superframe_test.cc +++ b/libs/libvpx/test/superframe_test.cc @@ -17,31 +17,27 @@ namespace { const int kTestMode = 0; -const int kSuperframeSyntax = 1; -typedef std::tr1::tuple SuperframeTestParam; +typedef std::tr1::tuple SuperframeTestParam; -class SuperframeTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class SuperframeTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: - SuperframeTest() : EncoderTest(GET_PARAM(0)), modified_buf_(NULL), - last_sf_pts_(0) {} + SuperframeTest() + : EncoderTest(GET_PARAM(0)), modified_buf_(NULL), last_sf_pts_(0) {} virtual ~SuperframeTest() {} virtual void SetUp() { InitializeConfig(); const SuperframeTestParam input = GET_PARAM(1); const libvpx_test::TestMode mode = std::tr1::get(input); - const int syntax = std::tr1::get(input); SetMode(mode); sf_count_ = 0; sf_count_max_ = INT_MAX; - is_vp10_style_superframe_ = syntax; } - virtual void TearDown() { - delete[] modified_buf_; - } + virtual void TearDown() { delete[] modified_buf_; } virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video, libvpx_test::Encoder *encoder) { @@ -50,26 +46,21 @@ class SuperframeTest : public ::libvpx_test::EncoderTest, } } - virtual const vpx_codec_cx_pkt_t * MutateEncoderOutputHook( + virtual const vpx_codec_cx_pkt_t *MutateEncoderOutputHook( const vpx_codec_cx_pkt_t *pkt) { - if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) - return pkt; + if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return pkt; - const uint8_t *buffer = reinterpret_cast(pkt->data.frame.buf); + const uint8_t *buffer = reinterpret_cast(pkt->data.frame.buf); const uint8_t marker = buffer[pkt->data.frame.sz - 1]; const int frames = (marker & 0x7) + 1; const int mag = ((marker >> 3) & 3) + 1; - const unsigned int index_sz = - 2 + mag * (frames - is_vp10_style_superframe_); - if ((marker & 0xe0) == 0xc0 && - pkt->data.frame.sz >= index_sz && + const unsigned int index_sz = 2 + mag * frames; + if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz && buffer[pkt->data.frame.sz - index_sz] == marker) { // frame is a superframe. strip off the index. - if (modified_buf_) - delete[] modified_buf_; + if (modified_buf_) delete[] modified_buf_; modified_buf_ = new uint8_t[pkt->data.frame.sz - index_sz]; - memcpy(modified_buf_, pkt->data.frame.buf, - pkt->data.frame.sz - index_sz); + memcpy(modified_buf_, pkt->data.frame.buf, pkt->data.frame.sz - index_sz); modified_pkt_ = *pkt; modified_pkt_.data.frame.buf = modified_buf_; modified_pkt_.data.frame.sz -= index_sz; @@ -80,12 +71,11 @@ class SuperframeTest : public ::libvpx_test::EncoderTest, } // Make sure we do a few frames after the last SF - abort_ |= sf_count_ > sf_count_max_ && - pkt->data.frame.pts - last_sf_pts_ >= 5; + abort_ |= + sf_count_ > sf_count_max_ && pkt->data.frame.pts - last_sf_pts_ >= 5; return pkt; } - int is_vp10_style_superframe_; int sf_count_; int sf_count_max_; vpx_codec_cx_pkt_t modified_pkt_; @@ -103,11 +93,8 @@ TEST_P(SuperframeTest, TestSuperframeIndexIsOptional) { EXPECT_EQ(sf_count_, 1); } -VP9_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Combine( - ::testing::Values(::libvpx_test::kTwoPassGood), - ::testing::Values(0))); - -VP10_INSTANTIATE_TEST_CASE(SuperframeTest, ::testing::Combine( - ::testing::Values(::libvpx_test::kTwoPassGood), - ::testing::Values(CONFIG_MISC_FIXES))); +VP9_INSTANTIATE_TEST_CASE( + SuperframeTest, + ::testing::Combine(::testing::Values(::libvpx_test::kTwoPassGood), + ::testing::Values(0))); } // namespace diff --git a/libs/libvpx/test/svc_test.cc b/libs/libvpx/test/svc_test.cc index b955cee659..949a820c4f 100644 --- a/libs/libvpx/test/svc_test.cc +++ b/libs/libvpx/test/svc_test.cc @@ -33,10 +33,8 @@ class SvcTest : public ::testing::Test { static const uint32_t kHeight = 288; SvcTest() - : codec_iface_(0), - test_file_name_("hantro_collage_w352h288.yuv"), - codec_initialized_(false), - decoder_(0) { + : codec_iface_(0), test_file_name_("hantro_collage_w352h288.yuv"), + codec_initialized_(false), decoder_(0) { memset(&svc_, 0, sizeof(svc_)); memset(&codec_, 0, sizeof(codec_)); memset(&codec_enc_, 0, sizeof(codec_enc_)); @@ -70,7 +68,7 @@ class SvcTest : public ::testing::Test { virtual void TearDown() { ReleaseEncoder(); - delete(decoder_); + delete (decoder_); } void InitializeEncoder() { @@ -97,7 +95,7 @@ class SvcTest : public ::testing::Test { if (cx_pkt->kind == VPX_CODEC_STATS_PKT) { EXPECT_GT(cx_pkt->data.twopass_stats.sz, 0U); ASSERT_TRUE(cx_pkt->data.twopass_stats.buf != NULL); - stats_buf->append(static_cast(cx_pkt->data.twopass_stats.buf), + stats_buf->append(static_cast(cx_pkt->data.twopass_stats.buf), cx_pkt->data.twopass_stats.sz); } } @@ -113,10 +111,9 @@ class SvcTest : public ::testing::Test { codec_enc_.g_pass = VPX_RC_FIRST_PASS; InitializeEncoder(); - libvpx_test::I420VideoSource video(test_file_name_, - codec_enc_.g_w, codec_enc_.g_h, - codec_enc_.g_timebase.den, - codec_enc_.g_timebase.num, 0, 30); + libvpx_test::I420VideoSource video( + test_file_name_, codec_enc_.g_w, codec_enc_.g_h, + codec_enc_.g_timebase.den, codec_enc_.g_timebase.num, 0, 30); video.Begin(); for (int i = 0; i < n; ++i) { @@ -128,8 +125,8 @@ class SvcTest : public ::testing::Test { } // Flush encoder and test EOS packet. - res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(), - video.duration(), VPX_DL_GOOD_QUALITY); + res = vpx_svc_encode(&svc_, &codec_, NULL, video.pts(), video.duration(), + VPX_DL_GOOD_QUALITY); ASSERT_EQ(VPX_CODEC_OK, res); GetStatsData(stats_buf); @@ -163,8 +160,8 @@ class SvcTest : public ::testing::Test { } } - void Pass2EncodeNFrames(std::string *const stats_buf, - const int n, const int layers, + void Pass2EncodeNFrames(std::string *const stats_buf, const int n, + const int layers, struct vpx_fixed_buf *const outputs) { vpx_codec_err_t res; size_t frame_received = 0; @@ -182,10 +179,9 @@ class SvcTest : public ::testing::Test { } InitializeEncoder(); - libvpx_test::I420VideoSource video(test_file_name_, - codec_enc_.g_w, codec_enc_.g_h, - codec_enc_.g_timebase.den, - codec_enc_.g_timebase.num, 0, 30); + libvpx_test::I420VideoSource video( + test_file_name_, codec_enc_.g_w, codec_enc_.g_h, + codec_enc_.g_timebase.den, codec_enc_.g_timebase.num, 0, 30); video.Begin(); for (int i = 0; i < n; ++i) { @@ -197,8 +193,8 @@ class SvcTest : public ::testing::Test { } // Flush encoder. - res = vpx_svc_encode(&svc_, &codec_, NULL, 0, - video.duration(), VPX_DL_GOOD_QUALITY); + res = vpx_svc_encode(&svc_, &codec_, NULL, 0, video.duration(), + VPX_DL_GOOD_QUALITY); EXPECT_EQ(VPX_CODEC_OK, res); StoreFrames(n, outputs, &frame_received); @@ -217,9 +213,8 @@ class SvcTest : public ::testing::Test { for (int i = 0; i < n; ++i) { ASSERT_TRUE(inputs[i].buf != NULL); ASSERT_GT(inputs[i].sz, 0U); - const vpx_codec_err_t res_dec = - decoder_->DecodeFrame(static_cast(inputs[i].buf), - inputs[i].sz); + const vpx_codec_err_t res_dec = decoder_->DecodeFrame( + static_cast(inputs[i].buf), inputs[i].sz); ASSERT_EQ(VPX_CODEC_OK, res_dec) << decoder_->DecodeError(); ++decoded_frames; @@ -240,17 +235,16 @@ class SvcTest : public ::testing::Test { ASSERT_GT(remained_spatial_layers, 0); for (int i = 0; i < num_super_frames; ++i) { - uint32_t frame_sizes[8] = {0}; + uint32_t frame_sizes[8] = { 0 }; int frame_count = 0; int frames_found = 0; int frame; ASSERT_TRUE(inputs[i].buf != NULL); ASSERT_GT(inputs[i].sz, 0U); - vpx_codec_err_t res = - vp9_parse_superframe_index(static_cast(inputs[i].buf), - inputs[i].sz, frame_sizes, &frame_count, - NULL, NULL); + vpx_codec_err_t res = vp9_parse_superframe_index( + static_cast(inputs[i].buf), inputs[i].sz, + frame_sizes, &frame_count, NULL, NULL); ASSERT_EQ(VPX_CODEC_OK, res); if (frame_count == 0) { @@ -258,28 +252,27 @@ class SvcTest : public ::testing::Test { ASSERT_EQ(1, remained_spatial_layers); } else { // Found a super frame. - uint8_t *frame_data = static_cast(inputs[i].buf); + uint8_t *frame_data = static_cast(inputs[i].buf); uint8_t *frame_start = frame_data; for (frame = 0; frame < frame_count; ++frame) { // Looking for a visible frame. if (frame_data[0] & 0x02) { ++frames_found; - if (frames_found == remained_spatial_layers) - break; + if (frames_found == remained_spatial_layers) break; } frame_data += frame_sizes[frame]; } - ASSERT_LT(frame, frame_count) << "Couldn't find a visible frame. " + ASSERT_LT(frame, frame_count) + << "Couldn't find a visible frame. " << "remained_spatial_layers: " << remained_spatial_layers << " super_frame: " << i; - if (frame == frame_count - 1) - continue; + if (frame == frame_count - 1) continue; frame_data += frame_sizes[frame]; // We need to add one more frame for multiple frame contexts. uint8_t marker = - static_cast(inputs[i].buf)[inputs[i].sz - 1]; + static_cast(inputs[i].buf)[inputs[i].sz - 1]; const uint32_t mag = ((marker >> 3) & 0x3) + 1; const size_t index_sz = 2 + mag * frame_count; const size_t new_index_sz = 2 + mag * (frame + 1); @@ -445,7 +438,7 @@ TEST_F(SvcTest, SetAutoAltRefOption) { // Test that decoder can handle an SVC frame as the first frame in a sequence. TEST_F(SvcTest, OnePassEncodeOneFrame) { codec_enc_.g_pass = VPX_RC_ONE_PASS; - vpx_fixed_buf output = {0}; + vpx_fixed_buf output = { 0 }; Pass2EncodeNFrames(NULL, 1, 2, &output); DecodeNFrames(&output, 1); FreeBitstreamBuffers(&output, 1); @@ -539,8 +532,7 @@ TEST_F(SvcTest, TwoPassEncode2SNRLayers) { // Second pass encode codec_enc_.g_pass = VPX_RC_LAST_PASS; - vpx_svc_set_options(&svc_, - "auto-alt-refs=1,1 scale-factors=1/1,1/1"); + vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1"); vpx_fixed_buf outputs[20]; memset(&outputs[0], 0, sizeof(outputs)); Pass2EncodeNFrames(&stats_buf, 20, 2, &outputs[0]); @@ -556,8 +548,7 @@ TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) { // Second pass encode codec_enc_.g_pass = VPX_RC_LAST_PASS; - vpx_svc_set_options(&svc_, - "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1"); + vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1"); vpx_fixed_buf outputs[20]; memset(&outputs[0], 0, sizeof(outputs)); Pass2EncodeNFrames(&stats_buf, 20, 3, &outputs[0]); @@ -572,8 +563,7 @@ TEST_F(SvcTest, TwoPassEncode3SNRLayersDecode321Layers) { TEST_F(SvcTest, SetMultipleFrameContextsOption) { svc_.spatial_layers = 5; - vpx_codec_err_t res = - vpx_svc_set_options(&svc_, "multi-frame-contexts=1"); + vpx_codec_err_t res = vpx_svc_set_options(&svc_, "multi-frame-contexts=1"); EXPECT_EQ(VPX_CODEC_OK, res); res = vpx_svc_init(&svc_, &codec_, vpx_codec_vp9_cx(), &codec_enc_); EXPECT_EQ(VPX_CODEC_INVALID_PARAM, res); @@ -626,7 +616,8 @@ TEST_F(SvcTest, TwoPassEncode2SNRLayersWithMultipleFrameContexts) { // Second pass encode codec_enc_.g_pass = VPX_RC_LAST_PASS; codec_enc_.g_error_resilient = 0; - vpx_svc_set_options(&svc_, "auto-alt-refs=1,1 scale-factors=1/1,1/1 " + vpx_svc_set_options(&svc_, + "auto-alt-refs=1,1 scale-factors=1/1,1/1 " "multi-frame-contexts=1"); vpx_fixed_buf outputs[10]; memset(&outputs[0], 0, sizeof(outputs)); @@ -645,7 +636,8 @@ TEST_F(SvcTest, // Second pass encode codec_enc_.g_pass = VPX_RC_LAST_PASS; codec_enc_.g_error_resilient = 0; - vpx_svc_set_options(&svc_, "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 " + vpx_svc_set_options(&svc_, + "auto-alt-refs=1,1,1 scale-factors=1/1,1/1,1/1 " "multi-frame-contexts=1"); vpx_fixed_buf outputs[10]; memset(&outputs[0], 0, sizeof(outputs)); @@ -689,7 +681,8 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContexts) { codec_enc_.g_pass = VPX_RC_LAST_PASS; svc_.temporal_layers = 2; codec_enc_.g_error_resilient = 0; - vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 " + vpx_svc_set_options(&svc_, + "auto-alt-refs=1 scale-factors=1/1 " "multi-frame-contexts=1"); vpx_fixed_buf outputs[10]; memset(&outputs[0], 0, sizeof(outputs)); @@ -714,8 +707,7 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersDecodeBaseLayer) { Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]); vpx_fixed_buf base_layer[5]; - for (int i = 0; i < 5; ++i) - base_layer[i] = outputs[i * 2]; + for (int i = 0; i < 5; ++i) base_layer[i] = outputs[i * 2]; DecodeNFrames(&base_layer[0], 5); FreeBitstreamBuffers(&outputs[0], 10); @@ -733,15 +725,15 @@ TEST_F(SvcTest, codec_enc_.g_pass = VPX_RC_LAST_PASS; svc_.temporal_layers = 2; codec_enc_.g_error_resilient = 0; - vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 " + vpx_svc_set_options(&svc_, + "auto-alt-refs=1 scale-factors=1/1 " "multi-frame-contexts=1"); vpx_fixed_buf outputs[10]; memset(&outputs[0], 0, sizeof(outputs)); Pass2EncodeNFrames(&stats_buf, 10, 1, &outputs[0]); vpx_fixed_buf base_layer[5]; - for (int i = 0; i < 5; ++i) - base_layer[i] = outputs[i * 2]; + for (int i = 0; i < 5; ++i) base_layer[i] = outputs[i * 2]; DecodeNFrames(&base_layer[0], 5); FreeBitstreamBuffers(&outputs[0], 10); @@ -769,8 +761,7 @@ TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithTiles) { FreeBitstreamBuffers(&outputs[0], 10); } -TEST_F(SvcTest, - TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) { +TEST_F(SvcTest, TwoPassEncode2TemporalLayersWithMultipleFrameContextsAndTiles) { // First pass encode std::string stats_buf; vpx_svc_set_options(&svc_, "scale-factors=1/1"); @@ -785,7 +776,8 @@ TEST_F(SvcTest, codec_enc_.g_h = 144; tile_columns_ = 1; tile_rows_ = 1; - vpx_svc_set_options(&svc_, "auto-alt-refs=1 scale-factors=1/1 " + vpx_svc_set_options(&svc_, + "auto-alt-refs=1 scale-factors=1/1 " "multi-frame-contexts=1"); vpx_fixed_buf outputs[10]; memset(&outputs[0], 0, sizeof(outputs)); diff --git a/libs/libvpx/test/test-data.mk b/libs/libvpx/test/test-data.mk index 05a0885ed2..da2fd77d42 100644 --- a/libs/libvpx/test/test-data.mk +++ b/libs/libvpx/test/test-data.mk @@ -730,6 +730,8 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv444.webm.md5 endif # CONFIG_VP9_HIGHBITDEPTH # Invalid files for testing libvpx error checking. +LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf +LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf.res LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-01-v3.webm LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-01-v3.webm.res LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-02-v2.webm @@ -742,6 +744,8 @@ LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.iv LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res +LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-63.ivf.kf_65527x61446.ivf +LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-63.ivf.kf_65527x61446.ivf.res LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-03-size-202x210.webm.ivf.s113306_r01-05_b6-.ivf.res LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-03-size-224x196.webm.ivf.s44156_r01-05_b6-.ivf diff --git a/libs/libvpx/test/test-data.sha1 b/libs/libvpx/test/test-data.sha1 index a4ed1742fc..7dd4fcf156 100644 --- a/libs/libvpx/test/test-data.sha1 +++ b/libs/libvpx/test/test-data.sha1 @@ -834,3 +834,7 @@ f6856f19236ee46ed462bd0a2e7e72b9c3b9cea6 *vp90-2-21-resize_inter_640x480_5_1-2.w 7739bfca167b1b43fea72f807f01e097b7cb98d8 *vp90-2-21-resize_inter_640x480_7_1-2.webm.md5 7291af354b4418917eee00e3a7e366086a0b7a10 *vp90-2-21-resize_inter_640x480_7_3-4.webm 4a18b09ccb36564193f0215f599d745d95bb558c *vp90-2-21-resize_inter_640x480_7_3-4.webm.md5 +a000d568431d07379dd5a8ec066061c07e560b47 *invalid-vp90-2-00-quantizer-63.ivf.kf_65527x61446.ivf +1e75aad3433c5c21c194a7b53fc393970f0a8d7f *invalid-vp90-2-00-quantizer-63.ivf.kf_65527x61446.ivf.res +235182f9a1c5c8841552510dd4288487447bfc40 *invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf +787f04f0483320d536894282f3358a4f8cac1cf9 *invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf.res diff --git a/libs/libvpx/test/test.mk b/libs/libvpx/test/test.mk index e8e830489b..60218a780d 100644 --- a/libs/libvpx/test/test.mk +++ b/libs/libvpx/test/test.mk @@ -18,23 +18,24 @@ LIBVPX_TEST_SRCS-yes += video_source.h LIBVPX_TEST_SRCS-yes += ../md5_utils.h ../md5_utils.c LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ivf_video_source.h LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += ../y4minput.h ../y4minput.c +LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += altref_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += aq_segment_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += alt_ref_aq_segment_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += datarate_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += encode_api_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += error_resilience_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += i420_video_source.h +LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += realtime_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += resize_test.cc LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += y4m_video_source.h LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += yuv_video_source.h -LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += altref_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += config_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += cq_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += keyframe_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += byte_alignment_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += external_frame_buffer_test.cc -LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += invalid_file_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += user_priv_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_frame_parallel_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += active_map_refresh_test.cc @@ -45,6 +46,7 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += frame_size_tests.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_lossless_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_end_to_end_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_ethread_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += level_test.cc LIBVPX_TEST_SRCS-yes += decode_test_driver.cc LIBVPX_TEST_SRCS-yes += decode_test_driver.h @@ -59,10 +61,10 @@ LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += y4m_test.cc ../y4menc.c ../y4menc.h ## WebM Parsing ifeq ($(CONFIG_WEBM_IO), yes) -LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser.cpp -LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvreader.cpp -LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser.hpp -LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvreader.hpp +LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser/mkvparser.cc +LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser/mkvreader.cc +LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser/mkvparser.h +LIBWEBM_PARSER_SRCS += ../third_party/libwebm/mkvparser/mkvreader.h LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += $(LIBWEBM_PARSER_SRCS) LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ../tools_common.h LIBVPX_TEST_SRCS-$(CONFIG_DECODERS) += ../webmdec.cc @@ -86,6 +88,11 @@ ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_VP9_ENCODER), yesyes) LIBVPX_TEST_SRCS-yes += encode_perf_test.cc endif +## Multi-codec blackbox tests. +ifeq ($(findstring yes,$(CONFIG_VP8_DECODER)$(CONFIG_VP9_DECODER)), yes) +LIBVPX_TEST_SRCS-yes += invalid_file_test.cc +endif + ## ## WHITE BOX TESTS ## @@ -103,6 +110,7 @@ LIBVPX_TEST_SRCS-yes += vp8_boolcoder_test.cc LIBVPX_TEST_SRCS-yes += vp8_fragments_test.cc endif +LIBVPX_TEST_SRCS-$(CONFIG_POSTPROC) += add_noise_test.cc LIBVPX_TEST_SRCS-$(CONFIG_POSTPROC) += pp_filter_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_DECODER) += vp8_decrypt_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += quantize_test.cc @@ -111,7 +119,7 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += variance_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += vp8_fdct4x4_test.cc LIBVPX_TEST_SRCS-yes += idct_test.cc -LIBVPX_TEST_SRCS-yes += sixtap_predict_test.cc +LIBVPX_TEST_SRCS-yes += predict_test.cc LIBVPX_TEST_SRCS-yes += vpx_scale_test.cc ifeq ($(CONFIG_VP8_ENCODER)$(CONFIG_TEMPORAL_DENOISING),yesyes) @@ -135,14 +143,17 @@ LIBVPX_TEST_SRCS-yes += vp9_encoder_parms_get_to_decoder.cc endif LIBVPX_TEST_SRCS-yes += convolve_test.cc -LIBVPX_TEST_SRCS-yes += lpf_8_test.cc +LIBVPX_TEST_SRCS-yes += lpf_test.cc LIBVPX_TEST_SRCS-yes += vp9_intrapred_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_decrypt_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_thread_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += avg_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct16x16_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += hadamard_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += minmax_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_error_block_test.cc LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_quantize_test.cc @@ -161,21 +172,10 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_arf_freq_test.cc endif # VP9 -## VP10 -ifeq ($(CONFIG_VP10),yes) - -LIBVPX_TEST_SRCS-yes += vp10_inv_txfm_test.cc -LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_dct_test.cc - -endif # VP10 - ## Multi-codec / unconditional whitebox tests. -ifeq ($(findstring yes,$(CONFIG_VP9_ENCODER)$(CONFIG_VP10_ENCODER)),yes) -LIBVPX_TEST_SRCS-yes += avg_test.cc -endif - LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc +LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sum_squares_test.cc TEST_INTRA_PRED_SPEED_SRCS-yes := test_intra_pred_speed.cc TEST_INTRA_PRED_SPEED_SRCS-yes += ../md5_utils.h ../md5_utils.c diff --git a/libs/libvpx/test/test_intra_pred_speed.cc b/libs/libvpx/test/test_intra_pred_speed.cc index 3e65fecfb6..a896307534 100644 --- a/libs/libvpx/test/test_intra_pred_speed.cc +++ b/libs/libvpx/test/test_intra_pred_speed.cc @@ -31,9 +31,9 @@ typedef void (*VpxPredFunc)(uint8_t *dst, ptrdiff_t y_stride, const int kNumVp9IntraPredFuncs = 13; const char *kVp9IntraPredNames[kNumVp9IntraPredFuncs] = { - "DC_PRED", "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED", "H_PRED", - "D45_PRED", "D135_PRED", "D117_PRED", "D153_PRED", "D207_PRED", "D63_PRED", - "TM_PRED" + "DC_PRED", "DC_LEFT_PRED", "DC_TOP_PRED", "DC_128_PRED", "V_PRED", + "H_PRED", "D45_PRED", "D135_PRED", "D117_PRED", "D153_PRED", + "D207_PRED", "D63_PRED", "TM_PRED" }; void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs, @@ -82,18 +82,12 @@ void TestIntraPred(const char name[], VpxPredFunc const *pred_funcs, void TestIntraPred4(VpxPredFunc const *pred_funcs) { static const int kNumVp9IntraFuncs = 13; static const char *const kSignatures[kNumVp9IntraFuncs] = { - "4334156168b34ab599d9b5b30f522fe9", - "bc4649d5ba47c7ff178d92e475960fb0", - "8d316e5933326dcac24e1064794b5d12", - "a27270fed024eafd762c95de85f4da51", - "c33dff000d4256c2b8f3bf9e9bab14d2", - "44d8cddc2ad8f79b8ed3306051722b4f", - "eb54839b2bad6699d8946f01ec041cd0", - "ecb0d56ae5f677ea45127ce9d5c058e4", - "0b7936841f6813da818275944895b574", - "9117972ef64f91a58ff73e1731c81db2", - "c56d5e8c729e46825f46dd5d3b5d508a", - "c0889e2039bcf7bcb5d2f33cdca69adc", + "4334156168b34ab599d9b5b30f522fe9", "bc4649d5ba47c7ff178d92e475960fb0", + "8d316e5933326dcac24e1064794b5d12", "a27270fed024eafd762c95de85f4da51", + "c33dff000d4256c2b8f3bf9e9bab14d2", "44d8cddc2ad8f79b8ed3306051722b4f", + "eb54839b2bad6699d8946f01ec041cd0", "ecb0d56ae5f677ea45127ce9d5c058e4", + "0b7936841f6813da818275944895b574", "9117972ef64f91a58ff73e1731c81db2", + "c56d5e8c729e46825f46dd5d3b5d508a", "c0889e2039bcf7bcb5d2f33cdca69adc", "309a618577b27c648f9c5ee45252bc8f", }; TestIntraPred("Intra4", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs, @@ -103,18 +97,12 @@ void TestIntraPred4(VpxPredFunc const *pred_funcs) { void TestIntraPred8(VpxPredFunc const *pred_funcs) { static const int kNumVp9IntraFuncs = 13; static const char *const kSignatures[kNumVp9IntraFuncs] = { - "7694ddeeefed887faf9d339d18850928", - "7d726b1213591b99f736be6dec65065b", - "19c5711281357a485591aaf9c96c0a67", - "ba6b66877a089e71cd938e3b8c40caac", - "802440c93317e0f8ba93fab02ef74265", - "9e09a47a15deb0b9d8372824f9805080", - "b7c2d8c662268c0c427da412d7b0311d", - "78339c1c60bb1d67d248ab8c4da08b7f", - "5c97d70f7d47de1882a6cd86c165c8a9", - "8182bf60688b42205acd95e59e967157", - "08323400005a297f16d7e57e7fe1eaac", - "95f7bfc262329a5849eda66d8f7c68ce", + "7694ddeeefed887faf9d339d18850928", "7d726b1213591b99f736be6dec65065b", + "19c5711281357a485591aaf9c96c0a67", "ba6b66877a089e71cd938e3b8c40caac", + "802440c93317e0f8ba93fab02ef74265", "9e09a47a15deb0b9d8372824f9805080", + "b7c2d8c662268c0c427da412d7b0311d", "78339c1c60bb1d67d248ab8c4da08b7f", + "5c97d70f7d47de1882a6cd86c165c8a9", "8182bf60688b42205acd95e59e967157", + "08323400005a297f16d7e57e7fe1eaac", "95f7bfc262329a5849eda66d8f7c68ce", "815b75c8e0d91cc1ae766dc5d3e445a3", }; TestIntraPred("Intra8", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs, @@ -124,18 +112,12 @@ void TestIntraPred8(VpxPredFunc const *pred_funcs) { void TestIntraPred16(VpxPredFunc const *pred_funcs) { static const int kNumVp9IntraFuncs = 13; static const char *const kSignatures[kNumVp9IntraFuncs] = { - "b40dbb555d5d16a043dc361e6694fe53", - "fb08118cee3b6405d64c1fd68be878c6", - "6c190f341475c837cc38c2e566b64875", - "db5c34ccbe2c7f595d9b08b0dc2c698c", - "a62cbfd153a1f0b9fed13e62b8408a7a", - "143df5b4c89335e281103f610f5052e4", - "d87feb124107cdf2cfb147655aa0bb3c", - "7841fae7d4d47b519322e6a03eeed9dc", - "f6ebed3f71cbcf8d6d0516ce87e11093", - "3cc480297dbfeed01a1c2d78dd03d0c5", - "b9f69fa6532b372c545397dcb78ef311", - "a8fe1c70432f09d0c20c67bdb6432c4d", + "b40dbb555d5d16a043dc361e6694fe53", "fb08118cee3b6405d64c1fd68be878c6", + "6c190f341475c837cc38c2e566b64875", "db5c34ccbe2c7f595d9b08b0dc2c698c", + "a62cbfd153a1f0b9fed13e62b8408a7a", "143df5b4c89335e281103f610f5052e4", + "d87feb124107cdf2cfb147655aa0bb3c", "7841fae7d4d47b519322e6a03eeed9dc", + "f6ebed3f71cbcf8d6d0516ce87e11093", "3cc480297dbfeed01a1c2d78dd03d0c5", + "b9f69fa6532b372c545397dcb78ef311", "a8fe1c70432f09d0c20c67bdb6432c4d", "b8a41aa968ec108af447af4217cba91b", }; TestIntraPred("Intra16", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs, @@ -145,18 +127,12 @@ void TestIntraPred16(VpxPredFunc const *pred_funcs) { void TestIntraPred32(VpxPredFunc const *pred_funcs) { static const int kNumVp9IntraFuncs = 13; static const char *const kSignatures[kNumVp9IntraFuncs] = { - "558541656d84f9ae7896db655826febe", - "b3587a1f9a01495fa38c8cd3c8e2a1bf", - "4c6501e64f25aacc55a2a16c7e8f0255", - "b3b01379ba08916ef6b1b35f7d9ad51c", - "0f1eb38b6cbddb3d496199ef9f329071", - "911c06efb9ed1c3b4c104b232b55812f", - "9225beb0ddfa7a1d24eaa1be430a6654", - "0a6d584a44f8db9aa7ade2e2fdb9fc9e", - "b01c9076525216925f3456f034fb6eee", - "d267e20ad9e5cd2915d1a47254d3d149", - "ed012a4a5da71f36c2393023184a0e59", - "f162b51ed618d28b936974cff4391da5", + "558541656d84f9ae7896db655826febe", "b3587a1f9a01495fa38c8cd3c8e2a1bf", + "4c6501e64f25aacc55a2a16c7e8f0255", "b3b01379ba08916ef6b1b35f7d9ad51c", + "0f1eb38b6cbddb3d496199ef9f329071", "911c06efb9ed1c3b4c104b232b55812f", + "9225beb0ddfa7a1d24eaa1be430a6654", "0a6d584a44f8db9aa7ade2e2fdb9fc9e", + "b01c9076525216925f3456f034fb6eee", "d267e20ad9e5cd2915d1a47254d3d149", + "ed012a4a5da71f36c2393023184a0e59", "f162b51ed618d28b936974cff4391da5", "9e1370c6d42e08d357d9612c93a71cfc", }; TestIntraPred("Intra32", pred_funcs, kVp9IntraPredNames, kNumVp9IntraFuncs, @@ -167,13 +143,13 @@ void TestIntraPred32(VpxPredFunc const *pred_funcs) { // Defines a test case for |arch| (e.g., C, SSE2, ...) passing the predictors // to |test_func|. The test name is 'arch.test_func', e.g., C.TestIntraPred4. -#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h, \ - d45, d135, d117, d153, d207, d63, tm) \ - TEST(arch, test_func) { \ - static const VpxPredFunc vpx_intra_pred[] = { \ - dc, dc_left, dc_top, dc_128, v, h, d45, \ - d135, d117, d153, d207, d63, tm}; \ - test_func(vpx_intra_pred); \ +#define INTRA_PRED_TEST(arch, test_func, dc, dc_left, dc_top, dc_128, v, h, \ + d45, d135, d117, d153, d207, d63, tm) \ + TEST(arch, test_func) { \ + static const VpxPredFunc vpx_intra_pred[] = { \ + dc, dc_left, dc_top, dc_128, v, h, d45, d135, d117, d153, d207, d63, tm \ + }; \ + test_func(vpx_intra_pred); \ } // ----------------------------------------------------------------------------- @@ -187,20 +163,20 @@ INTRA_PRED_TEST(C, TestIntraPred4, vpx_dc_predictor_4x4_c, vpx_d153_predictor_4x4_c, vpx_d207_predictor_4x4_c, vpx_d63_predictor_4x4_c, vpx_tm_predictor_4x4_c) -#if HAVE_SSE2 && CONFIG_USE_X86INC +#if HAVE_SSE2 INTRA_PRED_TEST(SSE2, TestIntraPred4, vpx_dc_predictor_4x4_sse2, vpx_dc_left_predictor_4x4_sse2, vpx_dc_top_predictor_4x4_sse2, vpx_dc_128_predictor_4x4_sse2, vpx_v_predictor_4x4_sse2, - vpx_h_predictor_4x4_sse2, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_h_predictor_4x4_sse2, vpx_d45_predictor_4x4_sse2, NULL, + NULL, NULL, vpx_d207_predictor_4x4_sse2, NULL, vpx_tm_predictor_4x4_sse2) -#endif // HAVE_SSE2 && CONFIG_USE_X86INC +#endif // HAVE_SSE2 -#if HAVE_SSSE3 && CONFIG_USE_X86INC -INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_d45_predictor_4x4_ssse3, NULL, NULL, - vpx_d153_predictor_4x4_ssse3, vpx_d207_predictor_4x4_ssse3, +#if HAVE_SSSE3 +INTRA_PRED_TEST(SSSE3, TestIntraPred4, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, vpx_d153_predictor_4x4_ssse3, NULL, vpx_d63_predictor_4x4_ssse3, NULL) -#endif // HAVE_SSSE3 && CONFIG_USE_X86INC +#endif // HAVE_SSSE3 #if HAVE_DSPR2 INTRA_PRED_TEST(DSPR2, TestIntraPred4, vpx_dc_predictor_4x4_dspr2, NULL, NULL, @@ -221,8 +197,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred4, vpx_dc_predictor_4x4_neon, INTRA_PRED_TEST(MSA, TestIntraPred4, vpx_dc_predictor_4x4_msa, vpx_dc_left_predictor_4x4_msa, vpx_dc_top_predictor_4x4_msa, vpx_dc_128_predictor_4x4_msa, vpx_v_predictor_4x4_msa, - vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_4x4_msa) + vpx_h_predictor_4x4_msa, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_tm_predictor_4x4_msa) #endif // HAVE_MSA // ----------------------------------------------------------------------------- @@ -236,20 +212,19 @@ INTRA_PRED_TEST(C, TestIntraPred8, vpx_dc_predictor_8x8_c, vpx_d153_predictor_8x8_c, vpx_d207_predictor_8x8_c, vpx_d63_predictor_8x8_c, vpx_tm_predictor_8x8_c) -#if HAVE_SSE2 && CONFIG_USE_X86INC +#if HAVE_SSE2 INTRA_PRED_TEST(SSE2, TestIntraPred8, vpx_dc_predictor_8x8_sse2, vpx_dc_left_predictor_8x8_sse2, vpx_dc_top_predictor_8x8_sse2, vpx_dc_128_predictor_8x8_sse2, vpx_v_predictor_8x8_sse2, - vpx_h_predictor_8x8_sse2, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_8x8_sse2) -#endif // HAVE_SSE2 && CONFIG_USE_X86INC + vpx_h_predictor_8x8_sse2, vpx_d45_predictor_8x8_sse2, NULL, + NULL, NULL, NULL, NULL, vpx_tm_predictor_8x8_sse2) +#endif // HAVE_SSE2 -#if HAVE_SSSE3 && CONFIG_USE_X86INC -INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_d45_predictor_8x8_ssse3, NULL, NULL, - vpx_d153_predictor_8x8_ssse3, vpx_d207_predictor_8x8_ssse3, - vpx_d63_predictor_8x8_ssse3, NULL) -#endif // HAVE_SSSE3 && CONFIG_USE_X86INC +#if HAVE_SSSE3 +INTRA_PRED_TEST(SSSE3, TestIntraPred8, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, vpx_d153_predictor_8x8_ssse3, + vpx_d207_predictor_8x8_ssse3, vpx_d63_predictor_8x8_ssse3, NULL) +#endif // HAVE_SSSE3 #if HAVE_DSPR2 INTRA_PRED_TEST(DSPR2, TestIntraPred8, vpx_dc_predictor_8x8_dspr2, NULL, NULL, @@ -270,8 +245,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred8, vpx_dc_predictor_8x8_neon, INTRA_PRED_TEST(MSA, TestIntraPred8, vpx_dc_predictor_8x8_msa, vpx_dc_left_predictor_8x8_msa, vpx_dc_top_predictor_8x8_msa, vpx_dc_128_predictor_8x8_msa, vpx_v_predictor_8x8_msa, - vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_8x8_msa) + vpx_h_predictor_8x8_msa, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_tm_predictor_8x8_msa) #endif // HAVE_MSA // ----------------------------------------------------------------------------- @@ -285,22 +260,21 @@ INTRA_PRED_TEST(C, TestIntraPred16, vpx_dc_predictor_16x16_c, vpx_d153_predictor_16x16_c, vpx_d207_predictor_16x16_c, vpx_d63_predictor_16x16_c, vpx_tm_predictor_16x16_c) -#if HAVE_SSE2 && CONFIG_USE_X86INC +#if HAVE_SSE2 INTRA_PRED_TEST(SSE2, TestIntraPred16, vpx_dc_predictor_16x16_sse2, vpx_dc_left_predictor_16x16_sse2, vpx_dc_top_predictor_16x16_sse2, vpx_dc_128_predictor_16x16_sse2, vpx_v_predictor_16x16_sse2, vpx_h_predictor_16x16_sse2, NULL, NULL, NULL, NULL, NULL, NULL, vpx_tm_predictor_16x16_sse2) -#endif // HAVE_SSE2 && CONFIG_USE_X86INC +#endif // HAVE_SSE2 -#if HAVE_SSSE3 && CONFIG_USE_X86INC -INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_d45_predictor_16x16_ssse3, - NULL, NULL, vpx_d153_predictor_16x16_ssse3, - vpx_d207_predictor_16x16_ssse3, vpx_d63_predictor_16x16_ssse3, - NULL) -#endif // HAVE_SSSE3 && CONFIG_USE_X86INC +#if HAVE_SSSE3 +INTRA_PRED_TEST(SSSE3, TestIntraPred16, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_d45_predictor_16x16_ssse3, NULL, NULL, + vpx_d153_predictor_16x16_ssse3, vpx_d207_predictor_16x16_ssse3, + vpx_d63_predictor_16x16_ssse3, NULL) +#endif // HAVE_SSSE3 #if HAVE_DSPR2 INTRA_PRED_TEST(DSPR2, TestIntraPred16, vpx_dc_predictor_16x16_dspr2, NULL, @@ -321,8 +295,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred16, vpx_dc_predictor_16x16_neon, INTRA_PRED_TEST(MSA, TestIntraPred16, vpx_dc_predictor_16x16_msa, vpx_dc_left_predictor_16x16_msa, vpx_dc_top_predictor_16x16_msa, vpx_dc_128_predictor_16x16_msa, vpx_v_predictor_16x16_msa, - vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_16x16_msa) + vpx_h_predictor_16x16_msa, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_tm_predictor_16x16_msa) #endif // HAVE_MSA // ----------------------------------------------------------------------------- @@ -336,21 +310,21 @@ INTRA_PRED_TEST(C, TestIntraPred32, vpx_dc_predictor_32x32_c, vpx_d153_predictor_32x32_c, vpx_d207_predictor_32x32_c, vpx_d63_predictor_32x32_c, vpx_tm_predictor_32x32_c) -#if HAVE_SSE2 && CONFIG_USE_X86INC +#if HAVE_SSE2 INTRA_PRED_TEST(SSE2, TestIntraPred32, vpx_dc_predictor_32x32_sse2, vpx_dc_left_predictor_32x32_sse2, vpx_dc_top_predictor_32x32_sse2, vpx_dc_128_predictor_32x32_sse2, vpx_v_predictor_32x32_sse2, - vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_32x32_sse2) -#endif // HAVE_SSE2 && CONFIG_USE_X86INC + vpx_h_predictor_32x32_sse2, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_tm_predictor_32x32_sse2) +#endif // HAVE_SSE2 -#if HAVE_SSSE3 && CONFIG_USE_X86INC -INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_d45_predictor_32x32_ssse3, NULL, NULL, +#if HAVE_SSSE3 +INTRA_PRED_TEST(SSSE3, TestIntraPred32, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_d45_predictor_32x32_ssse3, NULL, NULL, vpx_d153_predictor_32x32_ssse3, vpx_d207_predictor_32x32_ssse3, vpx_d63_predictor_32x32_ssse3, NULL) -#endif // HAVE_SSSE3 && CONFIG_USE_X86INC +#endif // HAVE_SSSE3 #if HAVE_NEON INTRA_PRED_TEST(NEON, TestIntraPred32, vpx_dc_predictor_32x32_neon, @@ -365,8 +339,8 @@ INTRA_PRED_TEST(NEON, TestIntraPred32, vpx_dc_predictor_32x32_neon, INTRA_PRED_TEST(MSA, TestIntraPred32, vpx_dc_predictor_32x32_msa, vpx_dc_left_predictor_32x32_msa, vpx_dc_top_predictor_32x32_msa, vpx_dc_128_predictor_32x32_msa, vpx_v_predictor_32x32_msa, - vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, - NULL, vpx_tm_predictor_32x32_msa) + vpx_h_predictor_32x32_msa, NULL, NULL, NULL, NULL, NULL, NULL, + vpx_tm_predictor_32x32_msa) #endif // HAVE_MSA #include "test/test_libvpx.cc" diff --git a/libs/libvpx/test/test_libvpx.cc b/libs/libvpx/test/test_libvpx.cc index 005ea8d13d..8a70b4e28c 100644 --- a/libs/libvpx/test/test_libvpx.cc +++ b/libs/libvpx/test/test_libvpx.cc @@ -41,22 +41,18 @@ int main(int argc, char **argv) { #if ARCH_X86 || ARCH_X86_64 const int simd_caps = x86_simd_caps(); - if (!(simd_caps & HAS_MMX)) - append_negative_gtest_filter(":MMX.*:MMX/*"); - if (!(simd_caps & HAS_SSE)) - append_negative_gtest_filter(":SSE.*:SSE/*"); - if (!(simd_caps & HAS_SSE2)) - append_negative_gtest_filter(":SSE2.*:SSE2/*"); - if (!(simd_caps & HAS_SSE3)) - append_negative_gtest_filter(":SSE3.*:SSE3/*"); - if (!(simd_caps & HAS_SSSE3)) + if (!(simd_caps & HAS_MMX)) append_negative_gtest_filter(":MMX.*:MMX/*"); + if (!(simd_caps & HAS_SSE)) append_negative_gtest_filter(":SSE.*:SSE/*"); + if (!(simd_caps & HAS_SSE2)) append_negative_gtest_filter(":SSE2.*:SSE2/*"); + if (!(simd_caps & HAS_SSE3)) append_negative_gtest_filter(":SSE3.*:SSE3/*"); + if (!(simd_caps & HAS_SSSE3)) { append_negative_gtest_filter(":SSSE3.*:SSSE3/*"); - if (!(simd_caps & HAS_SSE4_1)) + } + if (!(simd_caps & HAS_SSE4_1)) { append_negative_gtest_filter(":SSE4_1.*:SSE4_1/*"); - if (!(simd_caps & HAS_AVX)) - append_negative_gtest_filter(":AVX.*:AVX/*"); - if (!(simd_caps & HAS_AVX2)) - append_negative_gtest_filter(":AVX2.*:AVX2/*"); + } + if (!(simd_caps & HAS_AVX)) append_negative_gtest_filter(":AVX.*:AVX/*"); + if (!(simd_caps & HAS_AVX2)) append_negative_gtest_filter(":AVX2.*:AVX2/*"); #endif // ARCH_X86 || ARCH_X86_64 #if !CONFIG_SHARED diff --git a/libs/libvpx/test/test_vector_test.cc b/libs/libvpx/test/test_vector_test.cc index f1aa4d7f79..6d43157404 100644 --- a/libs/libvpx/test/test_vector_test.cc +++ b/libs/libvpx/test/test_vector_test.cc @@ -28,43 +28,36 @@ namespace { -enum DecodeMode { - kSerialMode, - kFrameParallelMode -}; +enum DecodeMode { kSerialMode, kFrameParallelMode }; const int kDecodeMode = 0; const int kThreads = 1; const int kFileName = 2; -typedef std::tr1::tuple DecodeParam; +typedef std::tr1::tuple DecodeParam; class TestVectorTest : public ::libvpx_test::DecoderTest, - public ::libvpx_test::CodecTestWithParam { + public ::libvpx_test::CodecTestWithParam { protected: - TestVectorTest() - : DecoderTest(GET_PARAM(0)), - md5_file_(NULL) { + TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) { #if CONFIG_VP9_DECODER - resize_clips_.insert( - ::libvpx_test::kVP9TestVectorsResize, - ::libvpx_test::kVP9TestVectorsResize + - ::libvpx_test::kNumVP9TestVectorsResize); + resize_clips_.insert(::libvpx_test::kVP9TestVectorsResize, + ::libvpx_test::kVP9TestVectorsResize + + ::libvpx_test::kNumVP9TestVectorsResize); #endif } virtual ~TestVectorTest() { - if (md5_file_) - fclose(md5_file_); + if (md5_file_) fclose(md5_file_); } - void OpenMD5File(const std::string& md5_file_name_) { + void OpenMD5File(const std::string &md5_file_name_) { md5_file_ = libvpx_test::OpenTestDataFile(md5_file_name_); ASSERT_TRUE(md5_file_ != NULL) << "Md5 file open failed. Filename: " - << md5_file_name_; + << md5_file_name_; } - virtual void DecompressedFrameHook(const vpx_image_t& img, + virtual void DecompressedFrameHook(const vpx_image_t &img, const unsigned int frame_number) { ASSERT_TRUE(md5_file_ != NULL); char expected_md5[33]; @@ -101,7 +94,6 @@ TEST_P(TestVectorTest, MD5Match) { const std::string filename = std::tr1::get(input); const int threads = std::tr1::get(input); const int mode = std::tr1::get(input); - libvpx_test::CompressedVideoSource *video = NULL; vpx_codec_flags_t flags = 0; vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); char str[256]; @@ -121,22 +113,24 @@ TEST_P(TestVectorTest, MD5Match) { cfg.threads = threads; snprintf(str, sizeof(str) / sizeof(str[0]) - 1, - "file: %s mode: %s threads: %d", - filename.c_str(), mode == 0 ? "Serial" : "Parallel", threads); + "file: %s mode: %s threads: %d", filename.c_str(), + mode == 0 ? "Serial" : "Parallel", threads); SCOPED_TRACE(str); // Open compressed video file. + testing::internal::scoped_ptr video; if (filename.substr(filename.length() - 3, 3) == "ivf") { - video = new libvpx_test::IVFVideoSource(filename); + video.reset(new libvpx_test::IVFVideoSource(filename)); } else if (filename.substr(filename.length() - 4, 4) == "webm") { #if CONFIG_WEBM_IO - video = new libvpx_test::WebMVideoSource(filename); + video.reset(new libvpx_test::WebMVideoSource(filename)); #else fprintf(stderr, "WebM IO is disabled, skipping test vector %s\n", filename.c_str()); return; #endif } + ASSERT_TRUE(video.get() != NULL); video->Init(); // Construct md5 file name. @@ -148,8 +142,7 @@ TEST_P(TestVectorTest, MD5Match) { set_flags(flags); // Decode frame, and check the md5 matching. - ASSERT_NO_FATAL_FAILURE(RunLoop(video, cfg)); - delete video; + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get(), cfg)); } // Test VP8 decode in serial mode with single thread. @@ -183,8 +176,8 @@ INSTANTIATE_TEST_CASE_P( ::testing::Values( static_cast(&libvpx_test::kVP9)), ::testing::Combine( - ::testing::Values(1), // Frame Parallel mode. - ::testing::Range(2, 9), // With 2 ~ 8 threads. + ::testing::Values(1), // Frame Parallel mode. + ::testing::Range(2, 9), // With 2 ~ 8 threads. ::testing::ValuesIn(libvpx_test::kVP9TestVectors, libvpx_test::kVP9TestVectors + libvpx_test::kNumVP9TestVectors)))); diff --git a/libs/libvpx/test/test_vectors.cc b/libs/libvpx/test/test_vectors.cc index c822479663..460c1f51b4 100644 --- a/libs/libvpx/test/test_vectors.cc +++ b/libs/libvpx/test/test_vectors.cc @@ -16,201 +16,324 @@ namespace libvpx_test { #if CONFIG_VP8_DECODER const char *const kVP8TestVectors[] = { - "vp80-00-comprehensive-001.ivf", - "vp80-00-comprehensive-002.ivf", "vp80-00-comprehensive-003.ivf", - "vp80-00-comprehensive-004.ivf", "vp80-00-comprehensive-005.ivf", - "vp80-00-comprehensive-006.ivf", "vp80-00-comprehensive-007.ivf", - "vp80-00-comprehensive-008.ivf", "vp80-00-comprehensive-009.ivf", - "vp80-00-comprehensive-010.ivf", "vp80-00-comprehensive-011.ivf", - "vp80-00-comprehensive-012.ivf", "vp80-00-comprehensive-013.ivf", - "vp80-00-comprehensive-014.ivf", "vp80-00-comprehensive-015.ivf", - "vp80-00-comprehensive-016.ivf", "vp80-00-comprehensive-017.ivf", - "vp80-00-comprehensive-018.ivf", "vp80-01-intra-1400.ivf", - "vp80-01-intra-1411.ivf", "vp80-01-intra-1416.ivf", - "vp80-01-intra-1417.ivf", "vp80-02-inter-1402.ivf", - "vp80-02-inter-1412.ivf", "vp80-02-inter-1418.ivf", - "vp80-02-inter-1424.ivf", "vp80-03-segmentation-01.ivf", - "vp80-03-segmentation-02.ivf", "vp80-03-segmentation-03.ivf", - "vp80-03-segmentation-04.ivf", "vp80-03-segmentation-1401.ivf", - "vp80-03-segmentation-1403.ivf", "vp80-03-segmentation-1407.ivf", - "vp80-03-segmentation-1408.ivf", "vp80-03-segmentation-1409.ivf", - "vp80-03-segmentation-1410.ivf", "vp80-03-segmentation-1413.ivf", - "vp80-03-segmentation-1414.ivf", "vp80-03-segmentation-1415.ivf", - "vp80-03-segmentation-1425.ivf", "vp80-03-segmentation-1426.ivf", - "vp80-03-segmentation-1427.ivf", "vp80-03-segmentation-1432.ivf", - "vp80-03-segmentation-1435.ivf", "vp80-03-segmentation-1436.ivf", - "vp80-03-segmentation-1437.ivf", "vp80-03-segmentation-1441.ivf", - "vp80-03-segmentation-1442.ivf", "vp80-04-partitions-1404.ivf", - "vp80-04-partitions-1405.ivf", "vp80-04-partitions-1406.ivf", - "vp80-05-sharpness-1428.ivf", "vp80-05-sharpness-1429.ivf", - "vp80-05-sharpness-1430.ivf", "vp80-05-sharpness-1431.ivf", - "vp80-05-sharpness-1433.ivf", "vp80-05-sharpness-1434.ivf", - "vp80-05-sharpness-1438.ivf", "vp80-05-sharpness-1439.ivf", - "vp80-05-sharpness-1440.ivf", "vp80-05-sharpness-1443.ivf", - "vp80-06-smallsize.ivf" + "vp80-00-comprehensive-001.ivf", "vp80-00-comprehensive-002.ivf", + "vp80-00-comprehensive-003.ivf", "vp80-00-comprehensive-004.ivf", + "vp80-00-comprehensive-005.ivf", "vp80-00-comprehensive-006.ivf", + "vp80-00-comprehensive-007.ivf", "vp80-00-comprehensive-008.ivf", + "vp80-00-comprehensive-009.ivf", "vp80-00-comprehensive-010.ivf", + "vp80-00-comprehensive-011.ivf", "vp80-00-comprehensive-012.ivf", + "vp80-00-comprehensive-013.ivf", "vp80-00-comprehensive-014.ivf", + "vp80-00-comprehensive-015.ivf", "vp80-00-comprehensive-016.ivf", + "vp80-00-comprehensive-017.ivf", "vp80-00-comprehensive-018.ivf", + "vp80-01-intra-1400.ivf", "vp80-01-intra-1411.ivf", + "vp80-01-intra-1416.ivf", "vp80-01-intra-1417.ivf", + "vp80-02-inter-1402.ivf", "vp80-02-inter-1412.ivf", + "vp80-02-inter-1418.ivf", "vp80-02-inter-1424.ivf", + "vp80-03-segmentation-01.ivf", "vp80-03-segmentation-02.ivf", + "vp80-03-segmentation-03.ivf", "vp80-03-segmentation-04.ivf", + "vp80-03-segmentation-1401.ivf", "vp80-03-segmentation-1403.ivf", + "vp80-03-segmentation-1407.ivf", "vp80-03-segmentation-1408.ivf", + "vp80-03-segmentation-1409.ivf", "vp80-03-segmentation-1410.ivf", + "vp80-03-segmentation-1413.ivf", "vp80-03-segmentation-1414.ivf", + "vp80-03-segmentation-1415.ivf", "vp80-03-segmentation-1425.ivf", + "vp80-03-segmentation-1426.ivf", "vp80-03-segmentation-1427.ivf", + "vp80-03-segmentation-1432.ivf", "vp80-03-segmentation-1435.ivf", + "vp80-03-segmentation-1436.ivf", "vp80-03-segmentation-1437.ivf", + "vp80-03-segmentation-1441.ivf", "vp80-03-segmentation-1442.ivf", + "vp80-04-partitions-1404.ivf", "vp80-04-partitions-1405.ivf", + "vp80-04-partitions-1406.ivf", "vp80-05-sharpness-1428.ivf", + "vp80-05-sharpness-1429.ivf", "vp80-05-sharpness-1430.ivf", + "vp80-05-sharpness-1431.ivf", "vp80-05-sharpness-1433.ivf", + "vp80-05-sharpness-1434.ivf", "vp80-05-sharpness-1438.ivf", + "vp80-05-sharpness-1439.ivf", "vp80-05-sharpness-1440.ivf", + "vp80-05-sharpness-1443.ivf", "vp80-06-smallsize.ivf" }; const int kNumVP8TestVectors = NELEMENTS(kVP8TestVectors); #endif // CONFIG_VP8_DECODER #if CONFIG_VP9_DECODER -#define RESIZE_TEST_VECTORS "vp90-2-21-resize_inter_320x180_5_1-2.webm", \ - "vp90-2-21-resize_inter_320x180_5_3-4.webm", \ - "vp90-2-21-resize_inter_320x180_7_1-2.webm", \ - "vp90-2-21-resize_inter_320x180_7_3-4.webm", \ - "vp90-2-21-resize_inter_320x240_5_1-2.webm", \ - "vp90-2-21-resize_inter_320x240_5_3-4.webm", \ - "vp90-2-21-resize_inter_320x240_7_1-2.webm", \ - "vp90-2-21-resize_inter_320x240_7_3-4.webm", \ - "vp90-2-21-resize_inter_640x360_5_1-2.webm", \ - "vp90-2-21-resize_inter_640x360_5_3-4.webm", \ - "vp90-2-21-resize_inter_640x360_7_1-2.webm", \ - "vp90-2-21-resize_inter_640x360_7_3-4.webm", \ - "vp90-2-21-resize_inter_640x480_5_1-2.webm", \ - "vp90-2-21-resize_inter_640x480_5_3-4.webm", \ - "vp90-2-21-resize_inter_640x480_7_1-2.webm", \ - "vp90-2-21-resize_inter_640x480_7_3-4.webm", \ - "vp90-2-21-resize_inter_1280x720_5_1-2.webm", \ - "vp90-2-21-resize_inter_1280x720_5_3-4.webm", \ - "vp90-2-21-resize_inter_1280x720_7_1-2.webm", \ - "vp90-2-21-resize_inter_1280x720_7_3-4.webm", \ - "vp90-2-21-resize_inter_1920x1080_5_1-2.webm", \ - "vp90-2-21-resize_inter_1920x1080_5_3-4.webm", \ - "vp90-2-21-resize_inter_1920x1080_7_1-2.webm", \ - "vp90-2-21-resize_inter_1920x1080_7_3-4.webm", +#define RESIZE_TEST_VECTORS \ + "vp90-2-21-resize_inter_320x180_5_1-2.webm", \ + "vp90-2-21-resize_inter_320x180_5_3-4.webm", \ + "vp90-2-21-resize_inter_320x180_7_1-2.webm", \ + "vp90-2-21-resize_inter_320x180_7_3-4.webm", \ + "vp90-2-21-resize_inter_320x240_5_1-2.webm", \ + "vp90-2-21-resize_inter_320x240_5_3-4.webm", \ + "vp90-2-21-resize_inter_320x240_7_1-2.webm", \ + "vp90-2-21-resize_inter_320x240_7_3-4.webm", \ + "vp90-2-21-resize_inter_640x360_5_1-2.webm", \ + "vp90-2-21-resize_inter_640x360_5_3-4.webm", \ + "vp90-2-21-resize_inter_640x360_7_1-2.webm", \ + "vp90-2-21-resize_inter_640x360_7_3-4.webm", \ + "vp90-2-21-resize_inter_640x480_5_1-2.webm", \ + "vp90-2-21-resize_inter_640x480_5_3-4.webm", \ + "vp90-2-21-resize_inter_640x480_7_1-2.webm", \ + "vp90-2-21-resize_inter_640x480_7_3-4.webm", \ + "vp90-2-21-resize_inter_1280x720_5_1-2.webm", \ + "vp90-2-21-resize_inter_1280x720_5_3-4.webm", \ + "vp90-2-21-resize_inter_1280x720_7_1-2.webm", \ + "vp90-2-21-resize_inter_1280x720_7_3-4.webm", \ + "vp90-2-21-resize_inter_1920x1080_5_1-2.webm", \ + "vp90-2-21-resize_inter_1920x1080_5_3-4.webm", \ + "vp90-2-21-resize_inter_1920x1080_7_1-2.webm", \ + "vp90-2-21-resize_inter_1920x1080_7_3-4.webm", const char *const kVP9TestVectors[] = { - "vp90-2-00-quantizer-00.webm", "vp90-2-00-quantizer-01.webm", - "vp90-2-00-quantizer-02.webm", "vp90-2-00-quantizer-03.webm", - "vp90-2-00-quantizer-04.webm", "vp90-2-00-quantizer-05.webm", - "vp90-2-00-quantizer-06.webm", "vp90-2-00-quantizer-07.webm", - "vp90-2-00-quantizer-08.webm", "vp90-2-00-quantizer-09.webm", - "vp90-2-00-quantizer-10.webm", "vp90-2-00-quantizer-11.webm", - "vp90-2-00-quantizer-12.webm", "vp90-2-00-quantizer-13.webm", - "vp90-2-00-quantizer-14.webm", "vp90-2-00-quantizer-15.webm", - "vp90-2-00-quantizer-16.webm", "vp90-2-00-quantizer-17.webm", - "vp90-2-00-quantizer-18.webm", "vp90-2-00-quantizer-19.webm", - "vp90-2-00-quantizer-20.webm", "vp90-2-00-quantizer-21.webm", - "vp90-2-00-quantizer-22.webm", "vp90-2-00-quantizer-23.webm", - "vp90-2-00-quantizer-24.webm", "vp90-2-00-quantizer-25.webm", - "vp90-2-00-quantizer-26.webm", "vp90-2-00-quantizer-27.webm", - "vp90-2-00-quantizer-28.webm", "vp90-2-00-quantizer-29.webm", - "vp90-2-00-quantizer-30.webm", "vp90-2-00-quantizer-31.webm", - "vp90-2-00-quantizer-32.webm", "vp90-2-00-quantizer-33.webm", - "vp90-2-00-quantizer-34.webm", "vp90-2-00-quantizer-35.webm", - "vp90-2-00-quantizer-36.webm", "vp90-2-00-quantizer-37.webm", - "vp90-2-00-quantizer-38.webm", "vp90-2-00-quantizer-39.webm", - "vp90-2-00-quantizer-40.webm", "vp90-2-00-quantizer-41.webm", - "vp90-2-00-quantizer-42.webm", "vp90-2-00-quantizer-43.webm", - "vp90-2-00-quantizer-44.webm", "vp90-2-00-quantizer-45.webm", - "vp90-2-00-quantizer-46.webm", "vp90-2-00-quantizer-47.webm", - "vp90-2-00-quantizer-48.webm", "vp90-2-00-quantizer-49.webm", - "vp90-2-00-quantizer-50.webm", "vp90-2-00-quantizer-51.webm", - "vp90-2-00-quantizer-52.webm", "vp90-2-00-quantizer-53.webm", - "vp90-2-00-quantizer-54.webm", "vp90-2-00-quantizer-55.webm", - "vp90-2-00-quantizer-56.webm", "vp90-2-00-quantizer-57.webm", - "vp90-2-00-quantizer-58.webm", "vp90-2-00-quantizer-59.webm", - "vp90-2-00-quantizer-60.webm", "vp90-2-00-quantizer-61.webm", - "vp90-2-00-quantizer-62.webm", "vp90-2-00-quantizer-63.webm", - "vp90-2-01-sharpness-1.webm", "vp90-2-01-sharpness-2.webm", - "vp90-2-01-sharpness-3.webm", "vp90-2-01-sharpness-4.webm", - "vp90-2-01-sharpness-5.webm", "vp90-2-01-sharpness-6.webm", - "vp90-2-01-sharpness-7.webm", "vp90-2-02-size-08x08.webm", - "vp90-2-02-size-08x10.webm", "vp90-2-02-size-08x16.webm", - "vp90-2-02-size-08x18.webm", "vp90-2-02-size-08x32.webm", - "vp90-2-02-size-08x34.webm", "vp90-2-02-size-08x64.webm", - "vp90-2-02-size-08x66.webm", "vp90-2-02-size-10x08.webm", - "vp90-2-02-size-10x10.webm", "vp90-2-02-size-10x16.webm", - "vp90-2-02-size-10x18.webm", "vp90-2-02-size-10x32.webm", - "vp90-2-02-size-10x34.webm", "vp90-2-02-size-10x64.webm", - "vp90-2-02-size-10x66.webm", "vp90-2-02-size-16x08.webm", - "vp90-2-02-size-16x10.webm", "vp90-2-02-size-16x16.webm", - "vp90-2-02-size-16x18.webm", "vp90-2-02-size-16x32.webm", - "vp90-2-02-size-16x34.webm", "vp90-2-02-size-16x64.webm", - "vp90-2-02-size-16x66.webm", "vp90-2-02-size-18x08.webm", - "vp90-2-02-size-18x10.webm", "vp90-2-02-size-18x16.webm", - "vp90-2-02-size-18x18.webm", "vp90-2-02-size-18x32.webm", - "vp90-2-02-size-18x34.webm", "vp90-2-02-size-18x64.webm", - "vp90-2-02-size-18x66.webm", "vp90-2-02-size-32x08.webm", - "vp90-2-02-size-32x10.webm", "vp90-2-02-size-32x16.webm", - "vp90-2-02-size-32x18.webm", "vp90-2-02-size-32x32.webm", - "vp90-2-02-size-32x34.webm", "vp90-2-02-size-32x64.webm", - "vp90-2-02-size-32x66.webm", "vp90-2-02-size-34x08.webm", - "vp90-2-02-size-34x10.webm", "vp90-2-02-size-34x16.webm", - "vp90-2-02-size-34x18.webm", "vp90-2-02-size-34x32.webm", - "vp90-2-02-size-34x34.webm", "vp90-2-02-size-34x64.webm", - "vp90-2-02-size-34x66.webm", "vp90-2-02-size-64x08.webm", - "vp90-2-02-size-64x10.webm", "vp90-2-02-size-64x16.webm", - "vp90-2-02-size-64x18.webm", "vp90-2-02-size-64x32.webm", - "vp90-2-02-size-64x34.webm", "vp90-2-02-size-64x64.webm", - "vp90-2-02-size-64x66.webm", "vp90-2-02-size-66x08.webm", - "vp90-2-02-size-66x10.webm", "vp90-2-02-size-66x16.webm", - "vp90-2-02-size-66x18.webm", "vp90-2-02-size-66x32.webm", - "vp90-2-02-size-66x34.webm", "vp90-2-02-size-66x64.webm", - "vp90-2-02-size-66x66.webm", "vp90-2-02-size-130x132.webm", - "vp90-2-02-size-132x130.webm", "vp90-2-02-size-132x132.webm", - "vp90-2-02-size-178x180.webm", "vp90-2-02-size-180x178.webm", - "vp90-2-02-size-180x180.webm", "vp90-2-03-size-196x196.webm", - "vp90-2-03-size-196x198.webm", "vp90-2-03-size-196x200.webm", - "vp90-2-03-size-196x202.webm", "vp90-2-03-size-196x208.webm", - "vp90-2-03-size-196x210.webm", "vp90-2-03-size-196x224.webm", - "vp90-2-03-size-196x226.webm", "vp90-2-03-size-198x196.webm", - "vp90-2-03-size-198x198.webm", "vp90-2-03-size-198x200.webm", - "vp90-2-03-size-198x202.webm", "vp90-2-03-size-198x208.webm", - "vp90-2-03-size-198x210.webm", "vp90-2-03-size-198x224.webm", - "vp90-2-03-size-198x226.webm", "vp90-2-03-size-200x196.webm", - "vp90-2-03-size-200x198.webm", "vp90-2-03-size-200x200.webm", - "vp90-2-03-size-200x202.webm", "vp90-2-03-size-200x208.webm", - "vp90-2-03-size-200x210.webm", "vp90-2-03-size-200x224.webm", - "vp90-2-03-size-200x226.webm", "vp90-2-03-size-202x196.webm", - "vp90-2-03-size-202x198.webm", "vp90-2-03-size-202x200.webm", - "vp90-2-03-size-202x202.webm", "vp90-2-03-size-202x208.webm", - "vp90-2-03-size-202x210.webm", "vp90-2-03-size-202x224.webm", - "vp90-2-03-size-202x226.webm", "vp90-2-03-size-208x196.webm", - "vp90-2-03-size-208x198.webm", "vp90-2-03-size-208x200.webm", - "vp90-2-03-size-208x202.webm", "vp90-2-03-size-208x208.webm", - "vp90-2-03-size-208x210.webm", "vp90-2-03-size-208x224.webm", - "vp90-2-03-size-208x226.webm", "vp90-2-03-size-210x196.webm", - "vp90-2-03-size-210x198.webm", "vp90-2-03-size-210x200.webm", - "vp90-2-03-size-210x202.webm", "vp90-2-03-size-210x208.webm", - "vp90-2-03-size-210x210.webm", "vp90-2-03-size-210x224.webm", - "vp90-2-03-size-210x226.webm", "vp90-2-03-size-224x196.webm", - "vp90-2-03-size-224x198.webm", "vp90-2-03-size-224x200.webm", - "vp90-2-03-size-224x202.webm", "vp90-2-03-size-224x208.webm", - "vp90-2-03-size-224x210.webm", "vp90-2-03-size-224x224.webm", - "vp90-2-03-size-224x226.webm", "vp90-2-03-size-226x196.webm", - "vp90-2-03-size-226x198.webm", "vp90-2-03-size-226x200.webm", - "vp90-2-03-size-226x202.webm", "vp90-2-03-size-226x208.webm", - "vp90-2-03-size-226x210.webm", "vp90-2-03-size-226x224.webm", - "vp90-2-03-size-226x226.webm", "vp90-2-03-size-352x288.webm", + "vp90-2-00-quantizer-00.webm", + "vp90-2-00-quantizer-01.webm", + "vp90-2-00-quantizer-02.webm", + "vp90-2-00-quantizer-03.webm", + "vp90-2-00-quantizer-04.webm", + "vp90-2-00-quantizer-05.webm", + "vp90-2-00-quantizer-06.webm", + "vp90-2-00-quantizer-07.webm", + "vp90-2-00-quantizer-08.webm", + "vp90-2-00-quantizer-09.webm", + "vp90-2-00-quantizer-10.webm", + "vp90-2-00-quantizer-11.webm", + "vp90-2-00-quantizer-12.webm", + "vp90-2-00-quantizer-13.webm", + "vp90-2-00-quantizer-14.webm", + "vp90-2-00-quantizer-15.webm", + "vp90-2-00-quantizer-16.webm", + "vp90-2-00-quantizer-17.webm", + "vp90-2-00-quantizer-18.webm", + "vp90-2-00-quantizer-19.webm", + "vp90-2-00-quantizer-20.webm", + "vp90-2-00-quantizer-21.webm", + "vp90-2-00-quantizer-22.webm", + "vp90-2-00-quantizer-23.webm", + "vp90-2-00-quantizer-24.webm", + "vp90-2-00-quantizer-25.webm", + "vp90-2-00-quantizer-26.webm", + "vp90-2-00-quantizer-27.webm", + "vp90-2-00-quantizer-28.webm", + "vp90-2-00-quantizer-29.webm", + "vp90-2-00-quantizer-30.webm", + "vp90-2-00-quantizer-31.webm", + "vp90-2-00-quantizer-32.webm", + "vp90-2-00-quantizer-33.webm", + "vp90-2-00-quantizer-34.webm", + "vp90-2-00-quantizer-35.webm", + "vp90-2-00-quantizer-36.webm", + "vp90-2-00-quantizer-37.webm", + "vp90-2-00-quantizer-38.webm", + "vp90-2-00-quantizer-39.webm", + "vp90-2-00-quantizer-40.webm", + "vp90-2-00-quantizer-41.webm", + "vp90-2-00-quantizer-42.webm", + "vp90-2-00-quantizer-43.webm", + "vp90-2-00-quantizer-44.webm", + "vp90-2-00-quantizer-45.webm", + "vp90-2-00-quantizer-46.webm", + "vp90-2-00-quantizer-47.webm", + "vp90-2-00-quantizer-48.webm", + "vp90-2-00-quantizer-49.webm", + "vp90-2-00-quantizer-50.webm", + "vp90-2-00-quantizer-51.webm", + "vp90-2-00-quantizer-52.webm", + "vp90-2-00-quantizer-53.webm", + "vp90-2-00-quantizer-54.webm", + "vp90-2-00-quantizer-55.webm", + "vp90-2-00-quantizer-56.webm", + "vp90-2-00-quantizer-57.webm", + "vp90-2-00-quantizer-58.webm", + "vp90-2-00-quantizer-59.webm", + "vp90-2-00-quantizer-60.webm", + "vp90-2-00-quantizer-61.webm", + "vp90-2-00-quantizer-62.webm", + "vp90-2-00-quantizer-63.webm", + "vp90-2-01-sharpness-1.webm", + "vp90-2-01-sharpness-2.webm", + "vp90-2-01-sharpness-3.webm", + "vp90-2-01-sharpness-4.webm", + "vp90-2-01-sharpness-5.webm", + "vp90-2-01-sharpness-6.webm", + "vp90-2-01-sharpness-7.webm", + "vp90-2-02-size-08x08.webm", + "vp90-2-02-size-08x10.webm", + "vp90-2-02-size-08x16.webm", + "vp90-2-02-size-08x18.webm", + "vp90-2-02-size-08x32.webm", + "vp90-2-02-size-08x34.webm", + "vp90-2-02-size-08x64.webm", + "vp90-2-02-size-08x66.webm", + "vp90-2-02-size-10x08.webm", + "vp90-2-02-size-10x10.webm", + "vp90-2-02-size-10x16.webm", + "vp90-2-02-size-10x18.webm", + "vp90-2-02-size-10x32.webm", + "vp90-2-02-size-10x34.webm", + "vp90-2-02-size-10x64.webm", + "vp90-2-02-size-10x66.webm", + "vp90-2-02-size-16x08.webm", + "vp90-2-02-size-16x10.webm", + "vp90-2-02-size-16x16.webm", + "vp90-2-02-size-16x18.webm", + "vp90-2-02-size-16x32.webm", + "vp90-2-02-size-16x34.webm", + "vp90-2-02-size-16x64.webm", + "vp90-2-02-size-16x66.webm", + "vp90-2-02-size-18x08.webm", + "vp90-2-02-size-18x10.webm", + "vp90-2-02-size-18x16.webm", + "vp90-2-02-size-18x18.webm", + "vp90-2-02-size-18x32.webm", + "vp90-2-02-size-18x34.webm", + "vp90-2-02-size-18x64.webm", + "vp90-2-02-size-18x66.webm", + "vp90-2-02-size-32x08.webm", + "vp90-2-02-size-32x10.webm", + "vp90-2-02-size-32x16.webm", + "vp90-2-02-size-32x18.webm", + "vp90-2-02-size-32x32.webm", + "vp90-2-02-size-32x34.webm", + "vp90-2-02-size-32x64.webm", + "vp90-2-02-size-32x66.webm", + "vp90-2-02-size-34x08.webm", + "vp90-2-02-size-34x10.webm", + "vp90-2-02-size-34x16.webm", + "vp90-2-02-size-34x18.webm", + "vp90-2-02-size-34x32.webm", + "vp90-2-02-size-34x34.webm", + "vp90-2-02-size-34x64.webm", + "vp90-2-02-size-34x66.webm", + "vp90-2-02-size-64x08.webm", + "vp90-2-02-size-64x10.webm", + "vp90-2-02-size-64x16.webm", + "vp90-2-02-size-64x18.webm", + "vp90-2-02-size-64x32.webm", + "vp90-2-02-size-64x34.webm", + "vp90-2-02-size-64x64.webm", + "vp90-2-02-size-64x66.webm", + "vp90-2-02-size-66x08.webm", + "vp90-2-02-size-66x10.webm", + "vp90-2-02-size-66x16.webm", + "vp90-2-02-size-66x18.webm", + "vp90-2-02-size-66x32.webm", + "vp90-2-02-size-66x34.webm", + "vp90-2-02-size-66x64.webm", + "vp90-2-02-size-66x66.webm", + "vp90-2-02-size-130x132.webm", + "vp90-2-02-size-132x130.webm", + "vp90-2-02-size-132x132.webm", + "vp90-2-02-size-178x180.webm", + "vp90-2-02-size-180x178.webm", + "vp90-2-02-size-180x180.webm", + "vp90-2-03-size-196x196.webm", + "vp90-2-03-size-196x198.webm", + "vp90-2-03-size-196x200.webm", + "vp90-2-03-size-196x202.webm", + "vp90-2-03-size-196x208.webm", + "vp90-2-03-size-196x210.webm", + "vp90-2-03-size-196x224.webm", + "vp90-2-03-size-196x226.webm", + "vp90-2-03-size-198x196.webm", + "vp90-2-03-size-198x198.webm", + "vp90-2-03-size-198x200.webm", + "vp90-2-03-size-198x202.webm", + "vp90-2-03-size-198x208.webm", + "vp90-2-03-size-198x210.webm", + "vp90-2-03-size-198x224.webm", + "vp90-2-03-size-198x226.webm", + "vp90-2-03-size-200x196.webm", + "vp90-2-03-size-200x198.webm", + "vp90-2-03-size-200x200.webm", + "vp90-2-03-size-200x202.webm", + "vp90-2-03-size-200x208.webm", + "vp90-2-03-size-200x210.webm", + "vp90-2-03-size-200x224.webm", + "vp90-2-03-size-200x226.webm", + "vp90-2-03-size-202x196.webm", + "vp90-2-03-size-202x198.webm", + "vp90-2-03-size-202x200.webm", + "vp90-2-03-size-202x202.webm", + "vp90-2-03-size-202x208.webm", + "vp90-2-03-size-202x210.webm", + "vp90-2-03-size-202x224.webm", + "vp90-2-03-size-202x226.webm", + "vp90-2-03-size-208x196.webm", + "vp90-2-03-size-208x198.webm", + "vp90-2-03-size-208x200.webm", + "vp90-2-03-size-208x202.webm", + "vp90-2-03-size-208x208.webm", + "vp90-2-03-size-208x210.webm", + "vp90-2-03-size-208x224.webm", + "vp90-2-03-size-208x226.webm", + "vp90-2-03-size-210x196.webm", + "vp90-2-03-size-210x198.webm", + "vp90-2-03-size-210x200.webm", + "vp90-2-03-size-210x202.webm", + "vp90-2-03-size-210x208.webm", + "vp90-2-03-size-210x210.webm", + "vp90-2-03-size-210x224.webm", + "vp90-2-03-size-210x226.webm", + "vp90-2-03-size-224x196.webm", + "vp90-2-03-size-224x198.webm", + "vp90-2-03-size-224x200.webm", + "vp90-2-03-size-224x202.webm", + "vp90-2-03-size-224x208.webm", + "vp90-2-03-size-224x210.webm", + "vp90-2-03-size-224x224.webm", + "vp90-2-03-size-224x226.webm", + "vp90-2-03-size-226x196.webm", + "vp90-2-03-size-226x198.webm", + "vp90-2-03-size-226x200.webm", + "vp90-2-03-size-226x202.webm", + "vp90-2-03-size-226x208.webm", + "vp90-2-03-size-226x210.webm", + "vp90-2-03-size-226x224.webm", + "vp90-2-03-size-226x226.webm", + "vp90-2-03-size-352x288.webm", "vp90-2-03-deltaq.webm", - "vp90-2-05-resize.ivf", "vp90-2-06-bilinear.webm", - "vp90-2-07-frame_parallel.webm", "vp90-2-08-tile_1x2_frame_parallel.webm", - "vp90-2-08-tile_1x2.webm", "vp90-2-08-tile_1x4_frame_parallel.webm", - "vp90-2-08-tile_1x4.webm", "vp90-2-08-tile_1x8_frame_parallel.webm", - "vp90-2-08-tile_1x8.webm", "vp90-2-08-tile-4x4.webm", - "vp90-2-08-tile-4x1.webm", "vp90-2-09-subpixel-00.ivf", - "vp90-2-02-size-lf-1920x1080.webm", "vp90-2-09-aq2.webm", - "vp90-2-09-lf_deltas.webm", "vp90-2-10-show-existing-frame.webm", + "vp90-2-05-resize.ivf", + "vp90-2-06-bilinear.webm", + "vp90-2-07-frame_parallel.webm", + "vp90-2-08-tile_1x2_frame_parallel.webm", + "vp90-2-08-tile_1x2.webm", + "vp90-2-08-tile_1x4_frame_parallel.webm", + "vp90-2-08-tile_1x4.webm", + "vp90-2-08-tile_1x8_frame_parallel.webm", + "vp90-2-08-tile_1x8.webm", + "vp90-2-08-tile-4x4.webm", + "vp90-2-08-tile-4x1.webm", + "vp90-2-09-subpixel-00.ivf", + "vp90-2-02-size-lf-1920x1080.webm", + "vp90-2-09-aq2.webm", + "vp90-2-09-lf_deltas.webm", + "vp90-2-10-show-existing-frame.webm", "vp90-2-10-show-existing-frame2.webm", - "vp90-2-11-size-351x287.webm", "vp90-2-11-size-351x288.webm", - "vp90-2-11-size-352x287.webm", "vp90-2-12-droppable_1.ivf", - "vp90-2-12-droppable_2.ivf", "vp90-2-12-droppable_3.ivf", + "vp90-2-11-size-351x287.webm", + "vp90-2-11-size-351x288.webm", + "vp90-2-11-size-352x287.webm", + "vp90-2-12-droppable_1.ivf", + "vp90-2-12-droppable_2.ivf", + "vp90-2-12-droppable_3.ivf", #if !CONFIG_SIZE_LIMIT || \ (DECODE_WIDTH_LIMIT >= 20400 && DECODE_HEIGHT_LIMIT >= 120) "vp90-2-13-largescaling.webm", #endif "vp90-2-14-resize-fp-tiles-1-16.webm", "vp90-2-14-resize-fp-tiles-1-2-4-8-16.webm", - "vp90-2-14-resize-fp-tiles-1-2.webm", "vp90-2-14-resize-fp-tiles-1-4.webm", - "vp90-2-14-resize-fp-tiles-16-1.webm", "vp90-2-14-resize-fp-tiles-16-2.webm", + "vp90-2-14-resize-fp-tiles-1-2.webm", + "vp90-2-14-resize-fp-tiles-1-4.webm", + "vp90-2-14-resize-fp-tiles-16-1.webm", + "vp90-2-14-resize-fp-tiles-16-2.webm", "vp90-2-14-resize-fp-tiles-16-4.webm", "vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm", - "vp90-2-14-resize-fp-tiles-16-8.webm", "vp90-2-14-resize-fp-tiles-1-8.webm", - "vp90-2-14-resize-fp-tiles-2-16.webm", "vp90-2-14-resize-fp-tiles-2-1.webm", - "vp90-2-14-resize-fp-tiles-2-4.webm", "vp90-2-14-resize-fp-tiles-2-8.webm", - "vp90-2-14-resize-fp-tiles-4-16.webm", "vp90-2-14-resize-fp-tiles-4-1.webm", - "vp90-2-14-resize-fp-tiles-4-2.webm", "vp90-2-14-resize-fp-tiles-4-8.webm", - "vp90-2-14-resize-fp-tiles-8-16.webm", "vp90-2-14-resize-fp-tiles-8-1.webm", - "vp90-2-14-resize-fp-tiles-8-2.webm", "vp90-2-14-resize-fp-tiles-8-4.webm", + "vp90-2-14-resize-fp-tiles-16-8.webm", + "vp90-2-14-resize-fp-tiles-1-8.webm", + "vp90-2-14-resize-fp-tiles-2-16.webm", + "vp90-2-14-resize-fp-tiles-2-1.webm", + "vp90-2-14-resize-fp-tiles-2-4.webm", + "vp90-2-14-resize-fp-tiles-2-8.webm", + "vp90-2-14-resize-fp-tiles-4-16.webm", + "vp90-2-14-resize-fp-tiles-4-1.webm", + "vp90-2-14-resize-fp-tiles-4-2.webm", + "vp90-2-14-resize-fp-tiles-4-8.webm", + "vp90-2-14-resize-fp-tiles-8-16.webm", + "vp90-2-14-resize-fp-tiles-8-1.webm", + "vp90-2-14-resize-fp-tiles-8-2.webm", + "vp90-2-14-resize-fp-tiles-8-4.webm", "vp90-2-14-resize-10frames-fp-tiles-1-2-4-8.webm", "vp90-2-14-resize-10frames-fp-tiles-1-2.webm", "vp90-2-14-resize-10frames-fp-tiles-1-4.webm", @@ -225,25 +348,33 @@ const char *const kVP9TestVectors[] = { "vp90-2-14-resize-10frames-fp-tiles-8-2.webm", "vp90-2-14-resize-10frames-fp-tiles-8-4-2-1.webm", "vp90-2-14-resize-10frames-fp-tiles-8-4.webm", - "vp90-2-15-segkey.webm", "vp90-2-15-segkey_adpq.webm", - "vp90-2-16-intra-only.webm", "vp90-2-17-show-existing-frame.webm", - "vp90-2-18-resize.ivf", "vp90-2-19-skip.webm", - "vp90-2-19-skip-01.webm", "vp90-2-19-skip-02.webm", + "vp90-2-15-segkey.webm", + "vp90-2-15-segkey_adpq.webm", + "vp90-2-16-intra-only.webm", + "vp90-2-17-show-existing-frame.webm", + "vp90-2-18-resize.ivf", + "vp90-2-19-skip.webm", + "vp90-2-19-skip-01.webm", + "vp90-2-19-skip-02.webm", "vp91-2-04-yuv444.webm", - "vp91-2-04-yuv422.webm", "vp91-2-04-yuv440.webm", + "vp91-2-04-yuv422.webm", + "vp91-2-04-yuv440.webm", #if CONFIG_VP9_HIGHBITDEPTH - "vp92-2-20-10bit-yuv420.webm", "vp92-2-20-12bit-yuv420.webm", - "vp93-2-20-10bit-yuv422.webm", "vp93-2-20-12bit-yuv422.webm", - "vp93-2-20-10bit-yuv440.webm", "vp93-2-20-12bit-yuv440.webm", - "vp93-2-20-10bit-yuv444.webm", "vp93-2-20-12bit-yuv444.webm", + "vp92-2-20-10bit-yuv420.webm", + "vp92-2-20-12bit-yuv420.webm", + "vp93-2-20-10bit-yuv422.webm", + "vp93-2-20-12bit-yuv422.webm", + "vp93-2-20-10bit-yuv440.webm", + "vp93-2-20-12bit-yuv440.webm", + "vp93-2-20-10bit-yuv444.webm", + "vp93-2-20-12bit-yuv444.webm", #endif // CONFIG_VP9_HIGHBITDEPTH - "vp90-2-20-big_superframe-01.webm", "vp90-2-20-big_superframe-02.webm", + "vp90-2-20-big_superframe-01.webm", + "vp90-2-20-big_superframe-02.webm", RESIZE_TEST_VECTORS }; const int kNumVP9TestVectors = NELEMENTS(kVP9TestVectors); -const char *const kVP9TestVectorsResize[] = { - RESIZE_TEST_VECTORS -}; +const char *const kVP9TestVectorsResize[] = { RESIZE_TEST_VECTORS }; const int kNumVP9TestVectorsResize = NELEMENTS(kVP9TestVectorsResize); #undef RESIZE_TEST_VECTORS #endif // CONFIG_VP9_DECODER diff --git a/libs/libvpx/test/tile_independence_test.cc b/libs/libvpx/test/tile_independence_test.cc index 193bd45986..e24981c68d 100644 --- a/libs/libvpx/test/tile_independence_test.cc +++ b/libs/libvpx/test/tile_independence_test.cc @@ -24,9 +24,7 @@ class TileIndependenceTest : public ::libvpx_test::EncoderTest, public ::libvpx_test::CodecTestWithParam { protected: TileIndependenceTest() - : EncoderTest(GET_PARAM(0)), - md5_fw_order_(), - md5_inv_order_(), + : EncoderTest(GET_PARAM(0)), md5_fw_order_(), md5_inv_order_(), n_tiles_(GET_PARAM(1)) { init_flags_ = VPX_CODEC_USE_PSNR; vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); @@ -58,7 +56,7 @@ class TileIndependenceTest : public ::libvpx_test::EncoderTest, void UpdateMD5(::libvpx_test::Decoder *dec, const vpx_codec_cx_pkt_t *pkt, ::libvpx_test::MD5 *md5) { const vpx_codec_err_t res = dec->DecodeFrame( - reinterpret_cast(pkt->data.frame.buf), pkt->data.frame.sz); + reinterpret_cast(pkt->data.frame.buf), pkt->data.frame.sz); if (res != VPX_CODEC_OK) { abort_ = true; ASSERT_EQ(VPX_CODEC_OK, res); @@ -103,6 +101,4 @@ TEST_P(TileIndependenceTest, MD5Match) { } VP9_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1)); - -VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1)); } // namespace diff --git a/libs/libvpx/test/twopass_encoder.sh b/libs/libvpx/test/twopass_encoder.sh index 1189e5131c..7a223f2afc 100755 --- a/libs/libvpx/test/twopass_encoder.sh +++ b/libs/libvpx/test/twopass_encoder.sh @@ -23,7 +23,8 @@ twopass_encoder_verify_environment() { fi } -# Runs twopass_encoder using the codec specified by $1. +# Runs twopass_encoder using the codec specified by $1 with a frame limit of +# 100. twopass_encoder() { local encoder="${LIBVPX_BIN_PATH}/twopass_encoder${VPX_TEST_EXE_SUFFIX}" local codec="$1" @@ -35,7 +36,7 @@ twopass_encoder() { fi eval "${VPX_TEST_PREFIX}" "${encoder}" "${codec}" "${YUV_RAW_INPUT_WIDTH}" \ - "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" \ + "${YUV_RAW_INPUT_HEIGHT}" "${YUV_RAW_INPUT}" "${output_file}" 100 \ ${devnull} [ -e "${output_file}" ] || return 1 @@ -47,16 +48,13 @@ twopass_encoder_vp8() { fi } -# TODO(tomfinegan): Add a frame limit param to twopass_encoder and enable this -# test. VP9 is just too slow right now: This test takes 31m16s+ on a fast -# machine. -DISABLED_twopass_encoder_vp9() { +twopass_encoder_vp9() { if [ "$(vp9_encode_available)" = "yes" ]; then twopass_encoder vp9 || return 1 fi } twopass_encoder_tests="twopass_encoder_vp8 - DISABLED_twopass_encoder_vp9" + twopass_encoder_vp9" run_tests twopass_encoder_verify_environment "${twopass_encoder_tests}" diff --git a/libs/libvpx/test/user_priv_test.cc b/libs/libvpx/test/user_priv_test.cc index 8512d88cf4..4b5de094e9 100644 --- a/libs/libvpx/test/user_priv_test.cc +++ b/libs/libvpx/test/user_priv_test.cc @@ -34,8 +34,8 @@ using libvpx_test::ACMRandom; void CheckUserPrivateData(void *user_priv, int *target) { // actual pointer value should be the same as expected. - EXPECT_EQ(reinterpret_cast(target), user_priv) << - "user_priv pointer value does not match."; + EXPECT_EQ(reinterpret_cast(target), user_priv) + << "user_priv pointer value does not match."; } // Decodes |filename|. Passes in user_priv data when calling DecodeFrame and diff --git a/libs/libvpx/test/util.h b/libs/libvpx/test/util.h index b27bffa94b..1f2540ecf2 100644 --- a/libs/libvpx/test/util.h +++ b/libs/libvpx/test/util.h @@ -17,24 +17,24 @@ #include "vpx/vpx_image.h" // Macros -#define GET_PARAM(k) std::tr1::get< k >(GetParam()) +#define GET_PARAM(k) std::tr1::get(GetParam()) inline double compute_psnr(const vpx_image_t *img1, const vpx_image_t *img2) { - assert((img1->fmt == img2->fmt) && - (img1->d_w == img2->d_w) && + assert((img1->fmt == img2->fmt) && (img1->d_w == img2->d_w) && (img1->d_h == img2->d_h)); - const unsigned int width_y = img1->d_w; + const unsigned int width_y = img1->d_w; const unsigned int height_y = img1->d_h; unsigned int i, j; int64_t sqrerr = 0; - for (i = 0; i < height_y; ++i) + for (i = 0; i < height_y; ++i) { for (j = 0; j < width_y; ++j) { int64_t d = img1->planes[VPX_PLANE_Y][i * img1->stride[VPX_PLANE_Y] + j] - img2->planes[VPX_PLANE_Y][i * img2->stride[VPX_PLANE_Y] + j]; sqrerr += d * d; } + } double mse = static_cast(sqrerr) / (width_y * height_y); double psnr = 100.0; if (mse > 0.0) { diff --git a/libs/libvpx/test/variance_test.cc b/libs/libvpx/test/variance_test.cc index 6f50f78f2e..6e31165faf 100644 --- a/libs/libvpx/test/variance_test.cc +++ b/libs/libvpx/test/variance_test.cc @@ -41,10 +41,6 @@ typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride); typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src); - -using ::std::tr1::get; -using ::std::tr1::make_tuple; -using ::std::tr1::tuple; using libvpx_test::ACMRandom; // Truncate high bit depth results by downshifting (with rounding) by: @@ -61,8 +57,7 @@ static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) { *se = (*se + 2) >> 2; break; case VPX_BITS_8: - default: - break; + default: break; } } @@ -74,10 +69,13 @@ static unsigned int mb_ss_ref(const int16_t *src) { return res; } -static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, - int l2w, int l2h, int src_stride_coeff, - int ref_stride_coeff, uint32_t *sse_ptr, - bool use_high_bit_depth_, +/* Note: + * Our codebase calculates the "diff" value in the variance algorithm by + * (src - ref). + */ +static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, int l2w, + int l2h, int src_stride, int ref_stride, + uint32_t *sse_ptr, bool use_high_bit_depth_, vpx_bit_depth_t bit_depth) { int64_t se = 0; uint64_t sse = 0; @@ -87,14 +85,13 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, for (int x = 0; x < w; x++) { int diff; if (!use_high_bit_depth_) { - diff = ref[w * y * ref_stride_coeff + x] - - src[w * y * src_stride_coeff + x]; + diff = src[y * src_stride + x] - ref[y * ref_stride + x]; se += diff; sse += diff * diff; #if CONFIG_VP9_HIGHBITDEPTH } else { - diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] - - CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x]; + diff = CONVERT_TO_SHORTPTR(src)[y * src_stride + x] - + CONVERT_TO_SHORTPTR(ref)[y * ref_stride + x]; se += diff; sse += diff * diff; #endif // CONFIG_VP9_HIGHBITDEPTH @@ -103,9 +100,8 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, } RoundHighBitDepth(bit_depth, &se, &sse); *sse_ptr = static_cast(sse); - return static_cast(sse - - ((static_cast(se) * se) >> - (l2w + l2h))); + return static_cast( + sse - ((static_cast(se) * se) >> (l2w + l2h))); } /* The subpel reference functions differ from the codec version in one aspect: @@ -116,8 +112,7 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, */ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src, int l2w, int l2h, int xoff, int yoff, - uint32_t *sse_ptr, - bool use_high_bit_depth_, + uint32_t *sse_ptr, bool use_high_bit_depth_, vpx_bit_depth_t bit_depth) { int64_t se = 0; uint64_t sse = 0; @@ -161,18 +156,71 @@ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src, } RoundHighBitDepth(bit_depth, &se, &sse); *sse_ptr = static_cast(sse); - return static_cast(sse - - ((static_cast(se) * se) >> - (l2w + l2h))); + return static_cast( + sse - ((static_cast(se) * se) >> (l2w + l2h))); } +static uint32_t subpel_avg_variance_ref(const uint8_t *ref, const uint8_t *src, + const uint8_t *second_pred, int l2w, + int l2h, int xoff, int yoff, + uint32_t *sse_ptr, + bool use_high_bit_depth, + vpx_bit_depth_t bit_depth) { + int64_t se = 0; + uint64_t sse = 0; + const int w = 1 << l2w; + const int h = 1 << l2h; + + xoff <<= 1; + yoff <<= 1; + + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + // bilinear interpolation at a 16th pel step + if (!use_high_bit_depth) { + const int a1 = ref[(w + 1) * (y + 0) + x + 0]; + const int a2 = ref[(w + 1) * (y + 0) + x + 1]; + const int b1 = ref[(w + 1) * (y + 1) + x + 0]; + const int b2 = ref[(w + 1) * (y + 1) + x + 1]; + const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); + const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); + const int r = a + (((b - a) * yoff + 8) >> 4); + const int diff = + ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x]; + se += diff; + sse += diff * diff; +#if CONFIG_VP9_HIGHBITDEPTH + } else { + const uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref); + const uint16_t *src16 = CONVERT_TO_SHORTPTR(src); + const uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred); + const int a1 = ref16[(w + 1) * (y + 0) + x + 0]; + const int a2 = ref16[(w + 1) * (y + 0) + x + 1]; + const int b1 = ref16[(w + 1) * (y + 1) + x + 0]; + const int b2 = ref16[(w + 1) * (y + 1) + x + 1]; + const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); + const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); + const int r = a + (((b - a) * yoff + 8) >> 4); + const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x]; + se += diff; + sse += diff * diff; +#endif // CONFIG_VP9_HIGHBITDEPTH + } + } + } + RoundHighBitDepth(bit_depth, &se, &sse); + *sse_ptr = static_cast(sse); + return static_cast( + sse - ((static_cast(se) * se) >> (l2w + l2h))); +} + +//////////////////////////////////////////////////////////////////////////////// + class SumOfSquaresTest : public ::testing::TestWithParam { public: SumOfSquaresTest() : func_(GetParam()) {} - virtual ~SumOfSquaresTest() { - libvpx_test::ClearSystemState(); - } + virtual ~SumOfSquaresTest() { libvpx_test::ClearSystemState(); } protected: void ConstTest(); @@ -208,20 +256,298 @@ void SumOfSquaresTest::RefTest() { } } -template -class VarianceTest - : public ::testing::TestWithParam > { +//////////////////////////////////////////////////////////////////////////////// +// Encapsulating struct to store the function to test along with +// some testing context. +// Can be used for MSE, SSE, Variance, etc. + +template +struct TestParams { + TestParams(int log2w = 0, int log2h = 0, Func function = NULL, + int bit_depth_value = 0) + : log2width(log2w), log2height(log2h), func(function) { + use_high_bit_depth = (bit_depth_value > 0); + if (use_high_bit_depth) { + bit_depth = static_cast(bit_depth_value); + } else { + bit_depth = VPX_BITS_8; + } + width = 1 << log2width; + height = 1 << log2height; + block_size = width * height; + mask = (1u << bit_depth) - 1; + } + + int log2width, log2height; + int width, height; + int block_size; + Func func; + vpx_bit_depth_t bit_depth; + bool use_high_bit_depth; + uint32_t mask; +}; + +template +std::ostream &operator<<(std::ostream &os, const TestParams &p) { + return os << "log2width/height:" << p.log2width << "/" << p.log2height + << " function:" << reinterpret_cast(p.func) + << " bit-depth:" << p.bit_depth; +} + +// Main class for testing a function type +template +class MainTestClass + : public ::testing::TestWithParam > { public: virtual void SetUp() { - const tuple& params = this->GetParam(); - log2width_ = get<0>(params); + params_ = this->GetParam(); + + rnd_.Reset(ACMRandom::DeterministicSeed()); + const size_t unit = + use_high_bit_depth() ? sizeof(uint16_t) : sizeof(uint8_t); + src_ = reinterpret_cast(vpx_memalign(16, block_size() * unit)); + ref_ = new uint8_t[block_size() * unit]; + ASSERT_TRUE(src_ != NULL); + ASSERT_TRUE(ref_ != NULL); +#if CONFIG_VP9_HIGHBITDEPTH + if (use_high_bit_depth()) { + // TODO(skal): remove! + src_ = CONVERT_TO_BYTEPTR(src_); + ref_ = CONVERT_TO_BYTEPTR(ref_); + } +#endif + } + + virtual void TearDown() { +#if CONFIG_VP9_HIGHBITDEPTH + if (use_high_bit_depth()) { + // TODO(skal): remove! + src_ = reinterpret_cast(CONVERT_TO_SHORTPTR(src_)); + ref_ = reinterpret_cast(CONVERT_TO_SHORTPTR(ref_)); + } +#endif + + vpx_free(src_); + delete[] ref_; + src_ = NULL; + ref_ = NULL; + libvpx_test::ClearSystemState(); + } + + protected: + // We could sub-class MainTestClass into dedicated class for Variance + // and MSE/SSE, but it involves a lot of 'this->xxx' dereferencing + // to access top class fields xxx. That's cumbersome, so for now we'll just + // implement the testing methods here: + + // Variance tests + void ZeroTest(); + void RefTest(); + void RefStrideTest(); + void OneQuarterTest(); + + // MSE/SSE tests + void RefTestMse(); + void RefTestSse(); + void MaxTestMse(); + void MaxTestSse(); + + protected: + ACMRandom rnd_; + uint8_t *src_; + uint8_t *ref_; + TestParams params_; + + // some relay helpers + bool use_high_bit_depth() const { return params_.use_high_bit_depth; } + int byte_shift() const { return params_.bit_depth - 8; } + int block_size() const { return params_.block_size; } + int width() const { return params_.width; } + uint32_t mask() const { return params_.mask; } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Tests related to variance. + +template +void MainTestClass::ZeroTest() { + for (int i = 0; i <= 255; ++i) { + if (!use_high_bit_depth()) { + memset(src_, i, block_size()); + } else { + uint16_t *const src16 = CONVERT_TO_SHORTPTR(src_); + for (int k = 0; k < block_size(); ++k) src16[k] = i << byte_shift(); + } + for (int j = 0; j <= 255; ++j) { + if (!use_high_bit_depth()) { + memset(ref_, j, block_size()); + } else { + uint16_t *const ref16 = CONVERT_TO_SHORTPTR(ref_); + for (int k = 0; k < block_size(); ++k) ref16[k] = j << byte_shift(); + } + unsigned int sse, var; + ASM_REGISTER_STATE_CHECK( + var = params_.func(src_, width(), ref_, width(), &sse)); + EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j; + } + } +} + +template +void MainTestClass::RefTest() { + for (int i = 0; i < 10; ++i) { + for (int j = 0; j < block_size(); j++) { + if (!use_high_bit_depth()) { + src_[j] = rnd_.Rand8(); + ref_[j] = rnd_.Rand8(); +#if CONFIG_VP9_HIGHBITDEPTH + } else { + CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask(); + CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask(); +#endif // CONFIG_VP9_HIGHBITDEPTH + } + } + unsigned int sse1, sse2, var1, var2; + const int stride = width(); + ASM_REGISTER_STATE_CHECK( + var1 = params_.func(src_, stride, ref_, stride, &sse1)); + var2 = + variance_ref(src_, ref_, params_.log2width, params_.log2height, stride, + stride, &sse2, use_high_bit_depth(), params_.bit_depth); + EXPECT_EQ(sse1, sse2) << "Error at test index: " << i; + EXPECT_EQ(var1, var2) << "Error at test index: " << i; + } +} + +template +void MainTestClass::RefStrideTest() { + for (int i = 0; i < 10; ++i) { + const int ref_stride = (i & 1) * width(); + const int src_stride = ((i >> 1) & 1) * width(); + for (int j = 0; j < block_size(); j++) { + const int ref_ind = (j / width()) * ref_stride + j % width(); + const int src_ind = (j / width()) * src_stride + j % width(); + if (!use_high_bit_depth()) { + src_[src_ind] = rnd_.Rand8(); + ref_[ref_ind] = rnd_.Rand8(); +#if CONFIG_VP9_HIGHBITDEPTH + } else { + CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() & mask(); + CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() & mask(); +#endif // CONFIG_VP9_HIGHBITDEPTH + } + } + unsigned int sse1, sse2; + unsigned int var1, var2; + + ASM_REGISTER_STATE_CHECK( + var1 = params_.func(src_, src_stride, ref_, ref_stride, &sse1)); + var2 = variance_ref(src_, ref_, params_.log2width, params_.log2height, + src_stride, ref_stride, &sse2, use_high_bit_depth(), + params_.bit_depth); + EXPECT_EQ(sse1, sse2) << "Error at test index: " << i; + EXPECT_EQ(var1, var2) << "Error at test index: " << i; + } +} + +template +void MainTestClass::OneQuarterTest() { + const int half = block_size() / 2; + if (!use_high_bit_depth()) { + memset(src_, 255, block_size()); + memset(ref_, 255, half); + memset(ref_ + half, 0, half); +#if CONFIG_VP9_HIGHBITDEPTH + } else { + vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << byte_shift(), block_size()); + vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << byte_shift(), half); + vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half); +#endif // CONFIG_VP9_HIGHBITDEPTH + } + unsigned int sse, var, expected; + ASM_REGISTER_STATE_CHECK( + var = params_.func(src_, width(), ref_, width(), &sse)); + expected = block_size() * 255 * 255 / 4; + EXPECT_EQ(expected, var); +} + +//////////////////////////////////////////////////////////////////////////////// +// Tests related to MSE / SSE. + +template +void MainTestClass::RefTestMse() { + for (int i = 0; i < 10; ++i) { + for (int j = 0; j < block_size(); ++j) { + src_[j] = rnd_.Rand8(); + ref_[j] = rnd_.Rand8(); + } + unsigned int sse1, sse2; + const int stride = width(); + ASM_REGISTER_STATE_CHECK(params_.func(src_, stride, ref_, stride, &sse1)); + variance_ref(src_, ref_, params_.log2width, params_.log2height, stride, + stride, &sse2, false, VPX_BITS_8); + EXPECT_EQ(sse1, sse2); + } +} + +template +void MainTestClass::RefTestSse() { + for (int i = 0; i < 10; ++i) { + for (int j = 0; j < block_size(); ++j) { + src_[j] = rnd_.Rand8(); + ref_[j] = rnd_.Rand8(); + } + unsigned int sse2; + unsigned int var1; + const int stride = width(); + ASM_REGISTER_STATE_CHECK(var1 = params_.func(src_, stride, ref_, stride)); + variance_ref(src_, ref_, params_.log2width, params_.log2height, stride, + stride, &sse2, false, VPX_BITS_8); + EXPECT_EQ(var1, sse2); + } +} + +template +void MainTestClass::MaxTestMse() { + memset(src_, 255, block_size()); + memset(ref_, 0, block_size()); + unsigned int sse; + ASM_REGISTER_STATE_CHECK(params_.func(src_, width(), ref_, width(), &sse)); + const unsigned int expected = block_size() * 255 * 255; + EXPECT_EQ(expected, sse); +} + +template +void MainTestClass::MaxTestSse() { + memset(src_, 255, block_size()); + memset(ref_, 0, block_size()); + unsigned int var; + ASM_REGISTER_STATE_CHECK(var = params_.func(src_, width(), ref_, width())); + const unsigned int expected = block_size() * 255 * 255; + EXPECT_EQ(expected, var); +} + +//////////////////////////////////////////////////////////////////////////////// + +using ::std::tr1::get; +using ::std::tr1::make_tuple; +using ::std::tr1::tuple; + +template +class SubpelVarianceTest + : public ::testing::TestWithParam< + tuple > { + public: + virtual void SetUp() { + const tuple ¶ms = + this->GetParam(); + log2width_ = get<0>(params); width_ = 1 << log2width_; log2height_ = get<1>(params); height_ = 1 << log2height_; - variance_ = get<2>(params); + subpel_variance_ = get<2>(params); if (get<3>(params)) { - bit_depth_ = static_cast(get<3>(params)); + bit_depth_ = (vpx_bit_depth_t)get<3>(params); use_high_bit_depth_ = true; } else { bit_depth_ = VPX_BITS_8; @@ -229,337 +555,6 @@ class VarianceTest } mask_ = (1 << bit_depth_) - 1; - rnd_.Reset(ACMRandom::DeterministicSeed()); - block_size_ = width_ * height_; - if (!use_high_bit_depth_) { - src_ = reinterpret_cast(vpx_memalign(16, block_size_ * 2)); - ref_ = new uint8_t[block_size_ * 2]; -#if CONFIG_VP9_HIGHBITDEPTH - } else { - src_ = CONVERT_TO_BYTEPTR(reinterpret_cast( - vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t)))); - ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - ASSERT_TRUE(src_ != NULL); - ASSERT_TRUE(ref_ != NULL); - } - - virtual void TearDown() { - if (!use_high_bit_depth_) { - vpx_free(src_); - delete[] ref_; -#if CONFIG_VP9_HIGHBITDEPTH - } else { - vpx_free(CONVERT_TO_SHORTPTR(src_)); - delete[] CONVERT_TO_SHORTPTR(ref_); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - libvpx_test::ClearSystemState(); - } - - protected: - void ZeroTest(); - void RefTest(); - void RefStrideTest(); - void OneQuarterTest(); - - ACMRandom rnd_; - uint8_t *src_; - uint8_t *ref_; - int width_, log2width_; - int height_, log2height_; - vpx_bit_depth_t bit_depth_; - int mask_; - bool use_high_bit_depth_; - int block_size_; - VarianceFunctionType variance_; -}; - -template -void VarianceTest::ZeroTest() { - for (int i = 0; i <= 255; ++i) { - if (!use_high_bit_depth_) { - memset(src_, i, block_size_); -#if CONFIG_VP9_HIGHBITDEPTH - } else { - vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8), - block_size_); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - for (int j = 0; j <= 255; ++j) { - if (!use_high_bit_depth_) { - memset(ref_, j, block_size_); -#if CONFIG_VP9_HIGHBITDEPTH - } else { - vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8), - block_size_); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - unsigned int sse; - unsigned int var; - ASM_REGISTER_STATE_CHECK( - var = variance_(src_, width_, ref_, width_, &sse)); - EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j; - } - } -} - -template -void VarianceTest::RefTest() { - for (int i = 0; i < 10; ++i) { - for (int j = 0; j < block_size_; j++) { - if (!use_high_bit_depth_) { - src_[j] = rnd_.Rand8(); - ref_[j] = rnd_.Rand8(); -#if CONFIG_VP9_HIGHBITDEPTH - } else { - CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_; - CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_; -#endif // CONFIG_VP9_HIGHBITDEPTH - } - } - unsigned int sse1, sse2; - unsigned int var1; - const int stride_coeff = 1; - ASM_REGISTER_STATE_CHECK( - var1 = variance_(src_, width_, ref_, width_, &sse1)); - const unsigned int var2 = variance_ref(src_, ref_, log2width_, - log2height_, stride_coeff, - stride_coeff, &sse2, - use_high_bit_depth_, bit_depth_); - EXPECT_EQ(sse1, sse2); - EXPECT_EQ(var1, var2); - } -} - -template -void VarianceTest::RefStrideTest() { - for (int i = 0; i < 10; ++i) { - int ref_stride_coeff = i % 2; - int src_stride_coeff = (i >> 1) % 2; - for (int j = 0; j < block_size_; j++) { - int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_; - int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_; - if (!use_high_bit_depth_) { - src_[src_ind] = rnd_.Rand8(); - ref_[ref_ind] = rnd_.Rand8(); -#if CONFIG_VP9_HIGHBITDEPTH - } else { - CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_; - CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_; -#endif // CONFIG_VP9_HIGHBITDEPTH - } - } - unsigned int sse1, sse2; - unsigned int var1; - - ASM_REGISTER_STATE_CHECK( - var1 = variance_(src_, width_ * src_stride_coeff, - ref_, width_ * ref_stride_coeff, &sse1)); - const unsigned int var2 = variance_ref(src_, ref_, log2width_, - log2height_, src_stride_coeff, - ref_stride_coeff, &sse2, - use_high_bit_depth_, bit_depth_); - EXPECT_EQ(sse1, sse2); - EXPECT_EQ(var1, var2); - } -} - -template -void VarianceTest::OneQuarterTest() { - const int half = block_size_ / 2; - if (!use_high_bit_depth_) { - memset(src_, 255, block_size_); - memset(ref_, 255, half); - memset(ref_ + half, 0, half); -#if CONFIG_VP9_HIGHBITDEPTH - } else { - vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8), - block_size_); - vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half); - vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - unsigned int sse; - unsigned int var; - ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse)); - const unsigned int expected = block_size_ * 255 * 255 / 4; - EXPECT_EQ(expected, var); -} - -template -class MseTest - : public ::testing::TestWithParam > { - public: - virtual void SetUp() { - const tuple& params = this->GetParam(); - log2width_ = get<0>(params); - width_ = 1 << log2width_; - log2height_ = get<1>(params); - height_ = 1 << log2height_; - mse_ = get<2>(params); - - rnd(ACMRandom::DeterministicSeed()); - block_size_ = width_ * height_; - src_ = reinterpret_cast(vpx_memalign(16, block_size_)); - ref_ = new uint8_t[block_size_]; - ASSERT_TRUE(src_ != NULL); - ASSERT_TRUE(ref_ != NULL); - } - - virtual void TearDown() { - vpx_free(src_); - delete[] ref_; - libvpx_test::ClearSystemState(); - } - - protected: - void RefTest_mse(); - void RefTest_sse(); - void MaxTest_mse(); - void MaxTest_sse(); - - ACMRandom rnd; - uint8_t* src_; - uint8_t* ref_; - int width_, log2width_; - int height_, log2height_; - int block_size_; - MseFunctionType mse_; -}; - -template -void MseTest::RefTest_mse() { - for (int i = 0; i < 10; ++i) { - for (int j = 0; j < block_size_; j++) { - src_[j] = rnd.Rand8(); - ref_[j] = rnd.Rand8(); - } - unsigned int sse1, sse2; - const int stride_coeff = 1; - ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1)); - variance_ref(src_, ref_, log2width_, log2height_, stride_coeff, - stride_coeff, &sse2, false, VPX_BITS_8); - EXPECT_EQ(sse1, sse2); - } -} - -template -void MseTest::RefTest_sse() { - for (int i = 0; i < 10; ++i) { - for (int j = 0; j < block_size_; j++) { - src_[j] = rnd.Rand8(); - ref_[j] = rnd.Rand8(); - } - unsigned int sse2; - unsigned int var1; - const int stride_coeff = 1; - ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_)); - variance_ref(src_, ref_, log2width_, log2height_, stride_coeff, - stride_coeff, &sse2, false, VPX_BITS_8); - EXPECT_EQ(var1, sse2); - } -} - -template -void MseTest::MaxTest_mse() { - memset(src_, 255, block_size_); - memset(ref_, 0, block_size_); - unsigned int sse; - ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse)); - const unsigned int expected = block_size_ * 255 * 255; - EXPECT_EQ(expected, sse); -} - -template -void MseTest::MaxTest_sse() { - memset(src_, 255, block_size_); - memset(ref_, 0, block_size_); - unsigned int var; - ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_)); - const unsigned int expected = block_size_ * 255 * 255; - EXPECT_EQ(expected, var); -} - -static uint32_t subpel_avg_variance_ref(const uint8_t *ref, - const uint8_t *src, - const uint8_t *second_pred, - int l2w, int l2h, - int xoff, int yoff, - uint32_t *sse_ptr, - bool use_high_bit_depth, - vpx_bit_depth_t bit_depth) { - int64_t se = 0; - uint64_t sse = 0; - const int w = 1 << l2w; - const int h = 1 << l2h; - - xoff <<= 1; - yoff <<= 1; - - for (int y = 0; y < h; y++) { - for (int x = 0; x < w; x++) { - // bilinear interpolation at a 16th pel step - if (!use_high_bit_depth) { - const int a1 = ref[(w + 1) * (y + 0) + x + 0]; - const int a2 = ref[(w + 1) * (y + 0) + x + 1]; - const int b1 = ref[(w + 1) * (y + 1) + x + 0]; - const int b2 = ref[(w + 1) * (y + 1) + x + 1]; - const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); - const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); - const int r = a + (((b - a) * yoff + 8) >> 4); - const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x]; - se += diff; - sse += diff * diff; -#if CONFIG_VP9_HIGHBITDEPTH - } else { - uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref); - uint16_t *src16 = CONVERT_TO_SHORTPTR(src); - uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred); - const int a1 = ref16[(w + 1) * (y + 0) + x + 0]; - const int a2 = ref16[(w + 1) * (y + 0) + x + 1]; - const int b1 = ref16[(w + 1) * (y + 1) + x + 0]; - const int b2 = ref16[(w + 1) * (y + 1) + x + 1]; - const int a = a1 + (((a2 - a1) * xoff + 8) >> 4); - const int b = b1 + (((b2 - b1) * xoff + 8) >> 4); - const int r = a + (((b - a) * yoff + 8) >> 4); - const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x]; - se += diff; - sse += diff * diff; -#endif // CONFIG_VP9_HIGHBITDEPTH - } - } - } - RoundHighBitDepth(bit_depth, &se, &sse); - *sse_ptr = static_cast(sse); - return static_cast(sse - - ((static_cast(se) * se) >> - (l2w + l2h))); -} - -template -class SubpelVarianceTest - : public ::testing::TestWithParam > { - public: - virtual void SetUp() { - const tuple& params = - this->GetParam(); - log2width_ = get<0>(params); - width_ = 1 << log2width_; - log2height_ = get<1>(params); - height_ = 1 << log2height_; - subpel_variance_ = get<2>(params); - if (get<3>(params)) { - bit_depth_ = (vpx_bit_depth_t) get<3>(params); - use_high_bit_depth_ = true; - } else { - bit_depth_ = VPX_BITS_8; - use_high_bit_depth_ = false; - } - mask_ = (1 << bit_depth_)-1; - rnd_.Reset(ACMRandom::DeterministicSeed()); block_size_ = width_ * height_; if (!use_high_bit_depth_) { @@ -568,14 +563,12 @@ class SubpelVarianceTest ref_ = new uint8_t[block_size_ + width_ + height_ + 1]; #if CONFIG_VP9_HIGHBITDEPTH } else { - src_ = CONVERT_TO_BYTEPTR( - reinterpret_cast( - vpx_memalign(16, block_size_*sizeof(uint16_t)))); - sec_ = CONVERT_TO_BYTEPTR( - reinterpret_cast( - vpx_memalign(16, block_size_*sizeof(uint16_t)))); - ref_ = CONVERT_TO_BYTEPTR( - new uint16_t[block_size_ + width_ + height_ + 1]); + src_ = CONVERT_TO_BYTEPTR(reinterpret_cast( + vpx_memalign(16, block_size_ * sizeof(uint16_t)))); + sec_ = CONVERT_TO_BYTEPTR(reinterpret_cast( + vpx_memalign(16, block_size_ * sizeof(uint16_t)))); + ref_ = + CONVERT_TO_BYTEPTR(new uint16_t[block_size_ + width_ + height_ + 1]); #endif // CONFIG_VP9_HIGHBITDEPTH } ASSERT_TRUE(src_ != NULL); @@ -610,11 +603,11 @@ class SubpelVarianceTest vpx_bit_depth_t bit_depth_; int width_, log2width_; int height_, log2height_; - int block_size_, mask_; + int block_size_, mask_; SubpelVarianceFunctionType subpel_variance_; }; -template +template void SubpelVarianceTest::RefTest() { for (int x = 0; x < 8; ++x) { for (int y = 0; y < 8; ++y) { @@ -637,20 +630,18 @@ void SubpelVarianceTest::RefTest() { } unsigned int sse1, sse2; unsigned int var1; - ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y, - src_, width_, &sse1)); - const unsigned int var2 = subpel_variance_ref(ref_, src_, - log2width_, log2height_, - x, y, &sse2, - use_high_bit_depth_, - bit_depth_); + ASM_REGISTER_STATE_CHECK( + var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1)); + const unsigned int var2 = + subpel_variance_ref(ref_, src_, log2width_, log2height_, x, y, &sse2, + use_high_bit_depth_, bit_depth_); EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; EXPECT_EQ(var1, var2) << "at position " << x << ", " << y; } } } -template +template void SubpelVarianceTest::ExtremeRefTest() { // Compare against reference. // Src: Set the first half of values to 0, the second half to the maximum. @@ -677,15 +668,15 @@ void SubpelVarianceTest::ExtremeRefTest() { ASM_REGISTER_STATE_CHECK( var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1)); const unsigned int var2 = - subpel_variance_ref(ref_, src_, log2width_, log2height_, - x, y, &sse2, use_high_bit_depth_, bit_depth_); + subpel_variance_ref(ref_, src_, log2width_, log2height_, x, y, &sse2, + use_high_bit_depth_, bit_depth_); EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y; EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y; } } } -template<> +template <> void SubpelVarianceTest::RefTest() { for (int x = 0; x < 8; ++x) { for (int y = 0; y < 8; ++y) { @@ -708,32 +699,30 @@ void SubpelVarianceTest::RefTest() { } #endif // CONFIG_VP9_HIGHBITDEPTH } - unsigned int sse1, sse2; - unsigned int var1; - ASM_REGISTER_STATE_CHECK( - var1 = subpel_variance_(ref_, width_ + 1, x, y, - src_, width_, &sse1, sec_)); - const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_, - log2width_, log2height_, - x, y, &sse2, - use_high_bit_depth_, - bit_depth_); + uint32_t sse1, sse2; + uint32_t var1, var2; + ASM_REGISTER_STATE_CHECK(var1 = + subpel_variance_(ref_, width_ + 1, x, y, + src_, width_, &sse1, sec_)); + var2 = subpel_avg_variance_ref(ref_, src_, sec_, log2width_, log2height_, + x, y, &sse2, use_high_bit_depth_, + static_cast(bit_depth_)); EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y; EXPECT_EQ(var1, var2) << "at position " << x << ", " << y; } } } -typedef MseTest VpxSseTest; -typedef MseTest VpxMseTest; -typedef VarianceTest VpxVarianceTest; +typedef MainTestClass VpxSseTest; +typedef MainTestClass VpxMseTest; +typedef MainTestClass VpxVarianceTest; typedef SubpelVarianceTest VpxSubpelVarianceTest; typedef SubpelVarianceTest VpxSubpelAvgVarianceTest; -TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); } -TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); } -TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); } -TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); } +TEST_P(VpxSseTest, RefSse) { RefTestSse(); } +TEST_P(VpxSseTest, MaxSse) { MaxTestSse(); } +TEST_P(VpxMseTest, RefMse) { RefTestMse(); } +TEST_P(VpxMseTest, MaxMse) { MaxTestMse(); } TEST_P(VpxVarianceTest, Zero) { ZeroTest(); } TEST_P(VpxVarianceTest, Ref) { RefTest(); } TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); } @@ -747,31 +736,34 @@ TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); } INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest, ::testing::Values(vpx_get_mb_ss_c)); +typedef TestParams SseParams; INSTANTIATE_TEST_CASE_P(C, VpxSseTest, - ::testing::Values(make_tuple(2, 2, - &vpx_get4x4sse_cs_c))); + ::testing::Values(SseParams(2, 2, + &vpx_get4x4sse_cs_c))); +typedef TestParams MseParams; INSTANTIATE_TEST_CASE_P(C, VpxMseTest, - ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_c), - make_tuple(4, 3, &vpx_mse16x8_c), - make_tuple(3, 4, &vpx_mse8x16_c), - make_tuple(3, 3, &vpx_mse8x8_c))); + ::testing::Values(MseParams(4, 4, &vpx_mse16x16_c), + MseParams(4, 3, &vpx_mse16x8_c), + MseParams(3, 4, &vpx_mse8x16_c), + MseParams(3, 3, &vpx_mse8x8_c))); +typedef TestParams VarianceParams; INSTANTIATE_TEST_CASE_P( C, VpxVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_c, 0), - make_tuple(6, 5, &vpx_variance64x32_c, 0), - make_tuple(5, 6, &vpx_variance32x64_c, 0), - make_tuple(5, 5, &vpx_variance32x32_c, 0), - make_tuple(5, 4, &vpx_variance32x16_c, 0), - make_tuple(4, 5, &vpx_variance16x32_c, 0), - make_tuple(4, 4, &vpx_variance16x16_c, 0), - make_tuple(4, 3, &vpx_variance16x8_c, 0), - make_tuple(3, 4, &vpx_variance8x16_c, 0), - make_tuple(3, 3, &vpx_variance8x8_c, 0), - make_tuple(3, 2, &vpx_variance8x4_c, 0), - make_tuple(2, 3, &vpx_variance4x8_c, 0), - make_tuple(2, 2, &vpx_variance4x4_c, 0))); + ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_c), + VarianceParams(6, 5, &vpx_variance64x32_c), + VarianceParams(5, 6, &vpx_variance32x64_c), + VarianceParams(5, 5, &vpx_variance32x32_c), + VarianceParams(5, 4, &vpx_variance32x16_c), + VarianceParams(4, 5, &vpx_variance16x32_c), + VarianceParams(4, 4, &vpx_variance16x16_c), + VarianceParams(4, 3, &vpx_variance16x8_c), + VarianceParams(3, 4, &vpx_variance8x16_c), + VarianceParams(3, 3, &vpx_variance8x8_c), + VarianceParams(3, 2, &vpx_variance8x4_c), + VarianceParams(2, 3, &vpx_variance4x8_c), + VarianceParams(2, 2, &vpx_variance4x4_c))); INSTANTIATE_TEST_CASE_P( C, VpxSubpelVarianceTest, @@ -806,14 +798,13 @@ INSTANTIATE_TEST_CASE_P( make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_c, 0))); #if CONFIG_VP9_HIGHBITDEPTH -typedef MseTest VpxHBDMseTest; -typedef VarianceTest VpxHBDVarianceTest; +typedef MainTestClass VpxHBDMseTest; +typedef MainTestClass VpxHBDVarianceTest; typedef SubpelVarianceTest VpxHBDSubpelVarianceTest; -typedef SubpelVarianceTest - VpxHBDSubpelAvgVarianceTest; +typedef SubpelVarianceTest VpxHBDSubpelAvgVarianceTest; -TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); } -TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); } +TEST_P(VpxHBDMseTest, RefMse) { RefTestMse(); } +TEST_P(VpxHBDMseTest, MaxMse) { MaxTestMse(); } TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); } TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); } TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); } @@ -841,45 +832,45 @@ INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P( C, VpxHBDVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_c, 12), - make_tuple(6, 5, &vpx_highbd_12_variance64x32_c, 12), - make_tuple(5, 6, &vpx_highbd_12_variance32x64_c, 12), - make_tuple(5, 5, &vpx_highbd_12_variance32x32_c, 12), - make_tuple(5, 4, &vpx_highbd_12_variance32x16_c, 12), - make_tuple(4, 5, &vpx_highbd_12_variance16x32_c, 12), - make_tuple(4, 4, &vpx_highbd_12_variance16x16_c, 12), - make_tuple(4, 3, &vpx_highbd_12_variance16x8_c, 12), - make_tuple(3, 4, &vpx_highbd_12_variance8x16_c, 12), - make_tuple(3, 3, &vpx_highbd_12_variance8x8_c, 12), - make_tuple(3, 2, &vpx_highbd_12_variance8x4_c, 12), - make_tuple(2, 3, &vpx_highbd_12_variance4x8_c, 12), - make_tuple(2, 2, &vpx_highbd_12_variance4x4_c, 12), - make_tuple(6, 6, &vpx_highbd_10_variance64x64_c, 10), - make_tuple(6, 5, &vpx_highbd_10_variance64x32_c, 10), - make_tuple(5, 6, &vpx_highbd_10_variance32x64_c, 10), - make_tuple(5, 5, &vpx_highbd_10_variance32x32_c, 10), - make_tuple(5, 4, &vpx_highbd_10_variance32x16_c, 10), - make_tuple(4, 5, &vpx_highbd_10_variance16x32_c, 10), - make_tuple(4, 4, &vpx_highbd_10_variance16x16_c, 10), - make_tuple(4, 3, &vpx_highbd_10_variance16x8_c, 10), - make_tuple(3, 4, &vpx_highbd_10_variance8x16_c, 10), - make_tuple(3, 3, &vpx_highbd_10_variance8x8_c, 10), - make_tuple(3, 2, &vpx_highbd_10_variance8x4_c, 10), - make_tuple(2, 3, &vpx_highbd_10_variance4x8_c, 10), - make_tuple(2, 2, &vpx_highbd_10_variance4x4_c, 10), - make_tuple(6, 6, &vpx_highbd_8_variance64x64_c, 8), - make_tuple(6, 5, &vpx_highbd_8_variance64x32_c, 8), - make_tuple(5, 6, &vpx_highbd_8_variance32x64_c, 8), - make_tuple(5, 5, &vpx_highbd_8_variance32x32_c, 8), - make_tuple(5, 4, &vpx_highbd_8_variance32x16_c, 8), - make_tuple(4, 5, &vpx_highbd_8_variance16x32_c, 8), - make_tuple(4, 4, &vpx_highbd_8_variance16x16_c, 8), - make_tuple(4, 3, &vpx_highbd_8_variance16x8_c, 8), - make_tuple(3, 4, &vpx_highbd_8_variance8x16_c, 8), - make_tuple(3, 3, &vpx_highbd_8_variance8x8_c, 8), - make_tuple(3, 2, &vpx_highbd_8_variance8x4_c, 8), - make_tuple(2, 3, &vpx_highbd_8_variance4x8_c, 8), - make_tuple(2, 2, &vpx_highbd_8_variance4x4_c, 8))); + ::testing::Values(VarianceParams(6, 6, &vpx_highbd_12_variance64x64_c, 12), + VarianceParams(6, 5, &vpx_highbd_12_variance64x32_c, 12), + VarianceParams(5, 6, &vpx_highbd_12_variance32x64_c, 12), + VarianceParams(5, 5, &vpx_highbd_12_variance32x32_c, 12), + VarianceParams(5, 4, &vpx_highbd_12_variance32x16_c, 12), + VarianceParams(4, 5, &vpx_highbd_12_variance16x32_c, 12), + VarianceParams(4, 4, &vpx_highbd_12_variance16x16_c, 12), + VarianceParams(4, 3, &vpx_highbd_12_variance16x8_c, 12), + VarianceParams(3, 4, &vpx_highbd_12_variance8x16_c, 12), + VarianceParams(3, 3, &vpx_highbd_12_variance8x8_c, 12), + VarianceParams(3, 2, &vpx_highbd_12_variance8x4_c, 12), + VarianceParams(2, 3, &vpx_highbd_12_variance4x8_c, 12), + VarianceParams(2, 2, &vpx_highbd_12_variance4x4_c, 12), + VarianceParams(6, 6, &vpx_highbd_10_variance64x64_c, 10), + VarianceParams(6, 5, &vpx_highbd_10_variance64x32_c, 10), + VarianceParams(5, 6, &vpx_highbd_10_variance32x64_c, 10), + VarianceParams(5, 5, &vpx_highbd_10_variance32x32_c, 10), + VarianceParams(5, 4, &vpx_highbd_10_variance32x16_c, 10), + VarianceParams(4, 5, &vpx_highbd_10_variance16x32_c, 10), + VarianceParams(4, 4, &vpx_highbd_10_variance16x16_c, 10), + VarianceParams(4, 3, &vpx_highbd_10_variance16x8_c, 10), + VarianceParams(3, 4, &vpx_highbd_10_variance8x16_c, 10), + VarianceParams(3, 3, &vpx_highbd_10_variance8x8_c, 10), + VarianceParams(3, 2, &vpx_highbd_10_variance8x4_c, 10), + VarianceParams(2, 3, &vpx_highbd_10_variance4x8_c, 10), + VarianceParams(2, 2, &vpx_highbd_10_variance4x4_c, 10), + VarianceParams(6, 6, &vpx_highbd_8_variance64x64_c, 8), + VarianceParams(6, 5, &vpx_highbd_8_variance64x32_c, 8), + VarianceParams(5, 6, &vpx_highbd_8_variance32x64_c, 8), + VarianceParams(5, 5, &vpx_highbd_8_variance32x32_c, 8), + VarianceParams(5, 4, &vpx_highbd_8_variance32x16_c, 8), + VarianceParams(4, 5, &vpx_highbd_8_variance16x32_c, 8), + VarianceParams(4, 4, &vpx_highbd_8_variance16x16_c, 8), + VarianceParams(4, 3, &vpx_highbd_8_variance16x8_c, 8), + VarianceParams(3, 4, &vpx_highbd_8_variance8x16_c, 8), + VarianceParams(3, 3, &vpx_highbd_8_variance8x8_c, 8), + VarianceParams(3, 2, &vpx_highbd_8_variance8x4_c, 8), + VarianceParams(2, 3, &vpx_highbd_8_variance4x8_c, 8), + VarianceParams(2, 2, &vpx_highbd_8_variance4x4_c, 8))); INSTANTIATE_TEST_CASE_P( C, VpxHBDSubpelVarianceTest, @@ -968,57 +959,32 @@ INSTANTIATE_TEST_CASE_P( make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12))); #endif // CONFIG_VP9_HIGHBITDEPTH -#if HAVE_MMX -INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest, - ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_mmx))); - -INSTANTIATE_TEST_CASE_P(MMX, SumOfSquaresTest, - ::testing::Values(vpx_get_mb_ss_mmx)); - -INSTANTIATE_TEST_CASE_P( - MMX, VpxVarianceTest, - ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_mmx, 0), - make_tuple(4, 3, &vpx_variance16x8_mmx, 0), - make_tuple(3, 4, &vpx_variance8x16_mmx, 0), - make_tuple(3, 3, &vpx_variance8x8_mmx, 0), - make_tuple(2, 2, &vpx_variance4x4_mmx, 0))); - -INSTANTIATE_TEST_CASE_P( - MMX, VpxSubpelVarianceTest, - ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_mmx, 0), - make_tuple(4, 3, &vpx_sub_pixel_variance16x8_mmx, 0), - make_tuple(3, 4, &vpx_sub_pixel_variance8x16_mmx, 0), - make_tuple(3, 3, &vpx_sub_pixel_variance8x8_mmx, 0), - make_tuple(2, 2, &vpx_sub_pixel_variance4x4_mmx, 0))); -#endif // HAVE_MMX - #if HAVE_SSE2 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest, ::testing::Values(vpx_get_mb_ss_sse2)); INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest, - ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_sse2), - make_tuple(4, 3, &vpx_mse16x8_sse2), - make_tuple(3, 4, &vpx_mse8x16_sse2), - make_tuple(3, 3, &vpx_mse8x8_sse2))); + ::testing::Values(MseParams(4, 4, &vpx_mse16x16_sse2), + MseParams(4, 3, &vpx_mse16x8_sse2), + MseParams(3, 4, &vpx_mse8x16_sse2), + MseParams(3, 3, &vpx_mse8x8_sse2))); INSTANTIATE_TEST_CASE_P( SSE2, VpxVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_sse2, 0), - make_tuple(6, 5, &vpx_variance64x32_sse2, 0), - make_tuple(5, 6, &vpx_variance32x64_sse2, 0), - make_tuple(5, 5, &vpx_variance32x32_sse2, 0), - make_tuple(5, 4, &vpx_variance32x16_sse2, 0), - make_tuple(4, 5, &vpx_variance16x32_sse2, 0), - make_tuple(4, 4, &vpx_variance16x16_sse2, 0), - make_tuple(4, 3, &vpx_variance16x8_sse2, 0), - make_tuple(3, 4, &vpx_variance8x16_sse2, 0), - make_tuple(3, 3, &vpx_variance8x8_sse2, 0), - make_tuple(3, 2, &vpx_variance8x4_sse2, 0), - make_tuple(2, 3, &vpx_variance4x8_sse2, 0), - make_tuple(2, 2, &vpx_variance4x4_sse2, 0))); + ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_sse2), + VarianceParams(6, 5, &vpx_variance64x32_sse2), + VarianceParams(5, 6, &vpx_variance32x64_sse2), + VarianceParams(5, 5, &vpx_variance32x32_sse2), + VarianceParams(5, 4, &vpx_variance32x16_sse2), + VarianceParams(4, 5, &vpx_variance16x32_sse2), + VarianceParams(4, 4, &vpx_variance16x16_sse2), + VarianceParams(4, 3, &vpx_variance16x8_sse2), + VarianceParams(3, 4, &vpx_variance8x16_sse2), + VarianceParams(3, 3, &vpx_variance8x8_sse2), + VarianceParams(3, 2, &vpx_variance8x4_sse2), + VarianceParams(2, 3, &vpx_variance4x8_sse2), + VarianceParams(2, 2, &vpx_variance4x4_sse2))); -#if CONFIG_USE_X86INC INSTANTIATE_TEST_CASE_P( SSE2, VpxSubpelVarianceTest, ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_sse2, 0), @@ -1032,8 +998,8 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 4, &vpx_sub_pixel_variance8x16_sse2, 0), make_tuple(3, 3, &vpx_sub_pixel_variance8x8_sse2, 0), make_tuple(3, 2, &vpx_sub_pixel_variance8x4_sse2, 0), - make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse, 0), - make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse, 0))); + make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse2, 0), + make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse2, 0))); INSTANTIATE_TEST_CASE_P( SSE2, VpxSubpelAvgVarianceTest, @@ -1049,62 +1015,61 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_sse2, 0), make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_sse2, 0), make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_sse2, 0), - make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse, 0), - make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse, 0))); -#endif // CONFIG_USE_X86INC + make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse2, 0), + make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse2, 0))); #if CONFIG_VP9_HIGHBITDEPTH /* TODO(debargha): This test does not support the highbd version INSTANTIATE_TEST_CASE_P( SSE2, VpxHBDMseTest, - ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_sse2), - make_tuple(4, 3, &vpx_highbd_12_mse16x8_sse2), - make_tuple(3, 4, &vpx_highbd_12_mse8x16_sse2), - make_tuple(3, 3, &vpx_highbd_12_mse8x8_sse2), - make_tuple(4, 4, &vpx_highbd_10_mse16x16_sse2), - make_tuple(4, 3, &vpx_highbd_10_mse16x8_sse2), - make_tuple(3, 4, &vpx_highbd_10_mse8x16_sse2), - make_tuple(3, 3, &vpx_highbd_10_mse8x8_sse2), - make_tuple(4, 4, &vpx_highbd_8_mse16x16_sse2), - make_tuple(4, 3, &vpx_highbd_8_mse16x8_sse2), - make_tuple(3, 4, &vpx_highbd_8_mse8x16_sse2), - make_tuple(3, 3, &vpx_highbd_8_mse8x8_sse2))); + ::testing::Values(MseParams(4, 4, &vpx_highbd_12_mse16x16_sse2), + MseParams(4, 3, &vpx_highbd_12_mse16x8_sse2), + MseParams(3, 4, &vpx_highbd_12_mse8x16_sse2), + MseParams(3, 3, &vpx_highbd_12_mse8x8_sse2), + MseParams(4, 4, &vpx_highbd_10_mse16x16_sse2), + MseParams(4, 3, &vpx_highbd_10_mse16x8_sse2), + MseParams(3, 4, &vpx_highbd_10_mse8x16_sse2), + MseParams(3, 3, &vpx_highbd_10_mse8x8_sse2), + MseParams(4, 4, &vpx_highbd_8_mse16x16_sse2), + MseParams(4, 3, &vpx_highbd_8_mse16x8_sse2), + MseParams(3, 4, &vpx_highbd_8_mse8x16_sse2), + MseParams(3, 3, &vpx_highbd_8_mse8x8_sse2))); */ INSTANTIATE_TEST_CASE_P( SSE2, VpxHBDVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_sse2, 12), - make_tuple(6, 5, &vpx_highbd_12_variance64x32_sse2, 12), - make_tuple(5, 6, &vpx_highbd_12_variance32x64_sse2, 12), - make_tuple(5, 5, &vpx_highbd_12_variance32x32_sse2, 12), - make_tuple(5, 4, &vpx_highbd_12_variance32x16_sse2, 12), - make_tuple(4, 5, &vpx_highbd_12_variance16x32_sse2, 12), - make_tuple(4, 4, &vpx_highbd_12_variance16x16_sse2, 12), - make_tuple(4, 3, &vpx_highbd_12_variance16x8_sse2, 12), - make_tuple(3, 4, &vpx_highbd_12_variance8x16_sse2, 12), - make_tuple(3, 3, &vpx_highbd_12_variance8x8_sse2, 12), - make_tuple(6, 6, &vpx_highbd_10_variance64x64_sse2, 10), - make_tuple(6, 5, &vpx_highbd_10_variance64x32_sse2, 10), - make_tuple(5, 6, &vpx_highbd_10_variance32x64_sse2, 10), - make_tuple(5, 5, &vpx_highbd_10_variance32x32_sse2, 10), - make_tuple(5, 4, &vpx_highbd_10_variance32x16_sse2, 10), - make_tuple(4, 5, &vpx_highbd_10_variance16x32_sse2, 10), - make_tuple(4, 4, &vpx_highbd_10_variance16x16_sse2, 10), - make_tuple(4, 3, &vpx_highbd_10_variance16x8_sse2, 10), - make_tuple(3, 4, &vpx_highbd_10_variance8x16_sse2, 10), - make_tuple(3, 3, &vpx_highbd_10_variance8x8_sse2, 10), - make_tuple(6, 6, &vpx_highbd_8_variance64x64_sse2, 8), - make_tuple(6, 5, &vpx_highbd_8_variance64x32_sse2, 8), - make_tuple(5, 6, &vpx_highbd_8_variance32x64_sse2, 8), - make_tuple(5, 5, &vpx_highbd_8_variance32x32_sse2, 8), - make_tuple(5, 4, &vpx_highbd_8_variance32x16_sse2, 8), - make_tuple(4, 5, &vpx_highbd_8_variance16x32_sse2, 8), - make_tuple(4, 4, &vpx_highbd_8_variance16x16_sse2, 8), - make_tuple(4, 3, &vpx_highbd_8_variance16x8_sse2, 8), - make_tuple(3, 4, &vpx_highbd_8_variance8x16_sse2, 8), - make_tuple(3, 3, &vpx_highbd_8_variance8x8_sse2, 8))); + ::testing::Values( + VarianceParams(6, 6, &vpx_highbd_12_variance64x64_sse2, 12), + VarianceParams(6, 5, &vpx_highbd_12_variance64x32_sse2, 12), + VarianceParams(5, 6, &vpx_highbd_12_variance32x64_sse2, 12), + VarianceParams(5, 5, &vpx_highbd_12_variance32x32_sse2, 12), + VarianceParams(5, 4, &vpx_highbd_12_variance32x16_sse2, 12), + VarianceParams(4, 5, &vpx_highbd_12_variance16x32_sse2, 12), + VarianceParams(4, 4, &vpx_highbd_12_variance16x16_sse2, 12), + VarianceParams(4, 3, &vpx_highbd_12_variance16x8_sse2, 12), + VarianceParams(3, 4, &vpx_highbd_12_variance8x16_sse2, 12), + VarianceParams(3, 3, &vpx_highbd_12_variance8x8_sse2, 12), + VarianceParams(6, 6, &vpx_highbd_10_variance64x64_sse2, 10), + VarianceParams(6, 5, &vpx_highbd_10_variance64x32_sse2, 10), + VarianceParams(5, 6, &vpx_highbd_10_variance32x64_sse2, 10), + VarianceParams(5, 5, &vpx_highbd_10_variance32x32_sse2, 10), + VarianceParams(5, 4, &vpx_highbd_10_variance32x16_sse2, 10), + VarianceParams(4, 5, &vpx_highbd_10_variance16x32_sse2, 10), + VarianceParams(4, 4, &vpx_highbd_10_variance16x16_sse2, 10), + VarianceParams(4, 3, &vpx_highbd_10_variance16x8_sse2, 10), + VarianceParams(3, 4, &vpx_highbd_10_variance8x16_sse2, 10), + VarianceParams(3, 3, &vpx_highbd_10_variance8x8_sse2, 10), + VarianceParams(6, 6, &vpx_highbd_8_variance64x64_sse2, 8), + VarianceParams(6, 5, &vpx_highbd_8_variance64x32_sse2, 8), + VarianceParams(5, 6, &vpx_highbd_8_variance32x64_sse2, 8), + VarianceParams(5, 5, &vpx_highbd_8_variance32x32_sse2, 8), + VarianceParams(5, 4, &vpx_highbd_8_variance32x16_sse2, 8), + VarianceParams(4, 5, &vpx_highbd_8_variance16x32_sse2, 8), + VarianceParams(4, 4, &vpx_highbd_8_variance16x16_sse2, 8), + VarianceParams(4, 3, &vpx_highbd_8_variance16x8_sse2, 8), + VarianceParams(3, 4, &vpx_highbd_8_variance8x16_sse2, 8), + VarianceParams(3, 3, &vpx_highbd_8_variance8x8_sse2, 8))); -#if CONFIG_USE_X86INC INSTANTIATE_TEST_CASE_P( SSE2, VpxHBDSubpelVarianceTest, ::testing::Values( @@ -1178,12 +1143,10 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_sse2, 8), make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_sse2, 8), make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_sse2, 8))); -#endif // CONFIG_USE_X86INC #endif // CONFIG_VP9_HIGHBITDEPTH #endif // HAVE_SSE2 #if HAVE_SSSE3 -#if CONFIG_USE_X86INC INSTANTIATE_TEST_CASE_P( SSSE3, VpxSubpelVarianceTest, ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_ssse3, 0), @@ -1216,21 +1179,19 @@ INSTANTIATE_TEST_CASE_P( make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_ssse3, 0), make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_ssse3, 0), make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_ssse3, 0))); -#endif // CONFIG_USE_X86INC #endif // HAVE_SSSE3 #if HAVE_AVX2 INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest, - ::testing::Values(make_tuple(4, 4, - &vpx_mse16x16_avx2))); + ::testing::Values(MseParams(4, 4, &vpx_mse16x16_avx2))); INSTANTIATE_TEST_CASE_P( AVX2, VpxVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_avx2, 0), - make_tuple(6, 5, &vpx_variance64x32_avx2, 0), - make_tuple(5, 5, &vpx_variance32x32_avx2, 0), - make_tuple(5, 4, &vpx_variance32x16_avx2, 0), - make_tuple(4, 4, &vpx_variance16x16_avx2, 0))); + ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_avx2), + VarianceParams(6, 5, &vpx_variance64x32_avx2), + VarianceParams(5, 5, &vpx_variance32x32_avx2), + VarianceParams(5, 4, &vpx_variance32x16_avx2), + VarianceParams(4, 4, &vpx_variance16x16_avx2))); INSTANTIATE_TEST_CASE_P( AVX2, VpxSubpelVarianceTest, @@ -1244,41 +1205,24 @@ INSTANTIATE_TEST_CASE_P( make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_avx2, 0))); #endif // HAVE_AVX2 -#if HAVE_MEDIA -INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest, - ::testing::Values(make_tuple(4, 4, - &vpx_mse16x16_media))); - -INSTANTIATE_TEST_CASE_P( - MEDIA, VpxVarianceTest, - ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_media, 0), - make_tuple(3, 3, &vpx_variance8x8_media, 0))); - -INSTANTIATE_TEST_CASE_P( - MEDIA, VpxSubpelVarianceTest, - ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_media, 0), - make_tuple(3, 3, &vpx_sub_pixel_variance8x8_media, 0))); -#endif // HAVE_MEDIA - #if HAVE_NEON INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest, - ::testing::Values(make_tuple(2, 2, - &vpx_get4x4sse_cs_neon))); + ::testing::Values(SseParams(2, 2, + &vpx_get4x4sse_cs_neon))); INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest, - ::testing::Values(make_tuple(4, 4, - &vpx_mse16x16_neon))); + ::testing::Values(MseParams(4, 4, &vpx_mse16x16_neon))); INSTANTIATE_TEST_CASE_P( NEON, VpxVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_neon, 0), - make_tuple(6, 5, &vpx_variance64x32_neon, 0), - make_tuple(5, 6, &vpx_variance32x64_neon, 0), - make_tuple(5, 5, &vpx_variance32x32_neon, 0), - make_tuple(4, 4, &vpx_variance16x16_neon, 0), - make_tuple(4, 3, &vpx_variance16x8_neon, 0), - make_tuple(3, 4, &vpx_variance8x16_neon, 0), - make_tuple(3, 3, &vpx_variance8x8_neon, 0))); + ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_neon), + VarianceParams(6, 5, &vpx_variance64x32_neon), + VarianceParams(5, 6, &vpx_variance32x64_neon), + VarianceParams(5, 5, &vpx_variance32x32_neon), + VarianceParams(4, 4, &vpx_variance16x16_neon), + VarianceParams(4, 3, &vpx_variance16x8_neon), + VarianceParams(3, 4, &vpx_variance8x16_neon), + VarianceParams(3, 3, &vpx_variance8x8_neon))); INSTANTIATE_TEST_CASE_P( NEON, VpxSubpelVarianceTest, @@ -1293,30 +1237,30 @@ INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest, ::testing::Values(vpx_get_mb_ss_msa)); INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest, - ::testing::Values(make_tuple(2, 2, - &vpx_get4x4sse_cs_msa))); + ::testing::Values(SseParams(2, 2, + &vpx_get4x4sse_cs_msa))); INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest, - ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_msa), - make_tuple(4, 3, &vpx_mse16x8_msa), - make_tuple(3, 4, &vpx_mse8x16_msa), - make_tuple(3, 3, &vpx_mse8x8_msa))); + ::testing::Values(MseParams(4, 4, &vpx_mse16x16_msa), + MseParams(4, 3, &vpx_mse16x8_msa), + MseParams(3, 4, &vpx_mse8x16_msa), + MseParams(3, 3, &vpx_mse8x8_msa))); INSTANTIATE_TEST_CASE_P( MSA, VpxVarianceTest, - ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_msa, 0), - make_tuple(6, 5, &vpx_variance64x32_msa, 0), - make_tuple(5, 6, &vpx_variance32x64_msa, 0), - make_tuple(5, 5, &vpx_variance32x32_msa, 0), - make_tuple(5, 4, &vpx_variance32x16_msa, 0), - make_tuple(4, 5, &vpx_variance16x32_msa, 0), - make_tuple(4, 4, &vpx_variance16x16_msa, 0), - make_tuple(4, 3, &vpx_variance16x8_msa, 0), - make_tuple(3, 4, &vpx_variance8x16_msa, 0), - make_tuple(3, 3, &vpx_variance8x8_msa, 0), - make_tuple(3, 2, &vpx_variance8x4_msa, 0), - make_tuple(2, 3, &vpx_variance4x8_msa, 0), - make_tuple(2, 2, &vpx_variance4x4_msa, 0))); + ::testing::Values(VarianceParams(6, 6, &vpx_variance64x64_msa), + VarianceParams(6, 5, &vpx_variance64x32_msa), + VarianceParams(5, 6, &vpx_variance32x64_msa), + VarianceParams(5, 5, &vpx_variance32x32_msa), + VarianceParams(5, 4, &vpx_variance32x16_msa), + VarianceParams(4, 5, &vpx_variance16x32_msa), + VarianceParams(4, 4, &vpx_variance16x16_msa), + VarianceParams(4, 3, &vpx_variance16x8_msa), + VarianceParams(3, 4, &vpx_variance8x16_msa), + VarianceParams(3, 3, &vpx_variance8x8_msa), + VarianceParams(3, 2, &vpx_variance8x4_msa), + VarianceParams(2, 3, &vpx_variance4x8_msa), + VarianceParams(2, 2, &vpx_variance4x4_msa))); INSTANTIATE_TEST_CASE_P( MSA, VpxSubpelVarianceTest, diff --git a/libs/libvpx/test/video_source.h b/libs/libvpx/test/video_source.h index ade323e7c3..424bb2cfb4 100644 --- a/libs/libvpx/test/video_source.h +++ b/libs/libvpx/test/video_source.h @@ -51,7 +51,7 @@ static std::string GetDataPath() { #undef TO_STRING #undef STRINGIFY -inline FILE *OpenTestDataFile(const std::string& file_name) { +inline FILE *OpenTestDataFile(const std::string &file_name) { const std::string path_to_source = GetDataPath() + "/" + file_name; return fopen(path_to_source.c_str(), "rb"); } @@ -76,21 +76,15 @@ static FILE *GetTempOutFile(std::string *file_name) { class TempOutFile { public: - TempOutFile() { - file_ = GetTempOutFile(&file_name_); - } + TempOutFile() { file_ = GetTempOutFile(&file_name_); } ~TempOutFile() { CloseFile(); if (!file_name_.empty()) { EXPECT_EQ(0, remove(file_name_.c_str())); } } - FILE *file() { - return file_; - } - const std::string& file_name() { - return file_name_; - } + FILE *file() { return file_; } + const std::string &file_name() { return file_name_; } protected: void CloseFile() { @@ -134,14 +128,10 @@ class VideoSource { virtual unsigned int limit() const = 0; }; - class DummyVideoSource : public VideoSource { public: DummyVideoSource() - : img_(NULL), - limit_(100), - width_(80), - height_(64), + : img_(NULL), limit_(100), width_(80), height_(64), format_(VPX_IMG_FMT_I420) { ReallocImage(); } @@ -158,9 +148,7 @@ class DummyVideoSource : public VideoSource { FillFrame(); } - virtual vpx_image_t *img() const { - return (frame_ < limit_) ? img_ : NULL; - } + virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; } // Models a stream where Timebase = 1/FPS, so pts == frame. virtual vpx_codec_pts_t pts() const { return frame_; } @@ -168,7 +156,7 @@ class DummyVideoSource : public VideoSource { virtual unsigned long duration() const { return 1; } virtual vpx_rational_t timebase() const { - const vpx_rational_t t = {1, 30}; + const vpx_rational_t t = { 1, 30 }; return t; } @@ -176,9 +164,7 @@ class DummyVideoSource : public VideoSource { virtual unsigned int limit() const { return limit_; } - void set_limit(unsigned int limit) { - limit_ = limit; - } + void set_limit(unsigned int limit) { limit_ = limit; } void SetSize(unsigned int width, unsigned int height) { if (width != width_ || height != height_) { @@ -196,7 +182,9 @@ class DummyVideoSource : public VideoSource { } protected: - virtual void FillFrame() { if (img_) memset(img_->img_data, 0, raw_sz_); } + virtual void FillFrame() { + if (img_) memset(img_->img_data, 0, raw_sz_); + } void ReallocImage() { vpx_img_free(img_); @@ -205,7 +193,7 @@ class DummyVideoSource : public VideoSource { } vpx_image_t *img_; - size_t raw_sz_; + size_t raw_sz_; unsigned int limit_; unsigned int frame_; unsigned int width_; @@ -213,12 +201,10 @@ class DummyVideoSource : public VideoSource { vpx_img_fmt_t format_; }; - class RandomVideoSource : public DummyVideoSource { public: RandomVideoSource(int seed = ACMRandom::DeterministicSeed()) - : rnd_(seed), - seed_(seed) { } + : rnd_(seed), seed_(seed) {} protected: // Reset the RNG to get a matching stream for the second pass @@ -232,11 +218,11 @@ class RandomVideoSource : public DummyVideoSource { // than holding previous frames to encourage keyframes to be thrown. virtual void FillFrame() { if (img_) { - if (frame_ % 30 < 15) - for (size_t i = 0; i < raw_sz_; ++i) - img_->img_data[i] = rnd_.Rand8(); - else + if (frame_ % 30 < 15) { + for (size_t i = 0; i < raw_sz_; ++i) img_->img_data[i] = rnd_.Rand8(); + } else { memset(img_->img_data, 0, raw_sz_); + } } } diff --git a/libs/libvpx/test/vp10_dct_test.cc b/libs/libvpx/test/vp10_dct_test.cc deleted file mode 100644 index b2c301ae39..0000000000 --- a/libs/libvpx/test/vp10_dct_test.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "third_party/googletest/src/include/gtest/gtest.h" -#include "test/acm_random.h" -#include "test/util.h" -#include "./vpx_config.h" -#include "vpx_ports/msvc.h" - -#undef CONFIG_COEFFICIENT_RANGE_CHECKING -#define CONFIG_COEFFICIENT_RANGE_CHECKING 1 -#include "vp10/encoder/dct.c" - -using libvpx_test::ACMRandom; - -namespace { -void reference_dct_1d(const double *in, double *out, int size) { - const double PI = 3.141592653589793238462643383279502884; - const double kInvSqrt2 = 0.707106781186547524400844362104; - for (int k = 0; k < size; ++k) { - out[k] = 0; - for (int n = 0; n < size; ++n) { - out[k] += in[n] * cos(PI * (2 * n + 1) * k / (2 * size)); - } - if (k == 0) - out[k] = out[k] * kInvSqrt2; - } -} - -typedef void (*FdctFuncRef)(const double *in, double *out, int size); -typedef void (*IdctFuncRef)(const double *in, double *out, int size); -typedef void (*FdctFunc)(const tran_low_t *in, tran_low_t *out); -typedef void (*IdctFunc)(const tran_low_t *in, tran_low_t *out); - -class TransTestBase { - public: - virtual ~TransTestBase() {} - - protected: - void RunFwdAccuracyCheck() { - tran_low_t *input = new tran_low_t[txfm_size_]; - tran_low_t *output = new tran_low_t[txfm_size_]; - double *ref_input = new double[txfm_size_]; - double *ref_output = new double[txfm_size_]; - - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = 5000; - for (int ti = 0; ti < count_test_block; ++ti) { - for (int ni = 0; ni < txfm_size_; ++ni) { - input[ni] = rnd.Rand8() - rnd.Rand8(); - ref_input[ni] = static_cast(input[ni]); - } - - fwd_txfm_(input, output); - fwd_txfm_ref_(ref_input, ref_output, txfm_size_); - - for (int ni = 0; ni < txfm_size_; ++ni) { - EXPECT_LE( - abs(output[ni] - static_cast(round(ref_output[ni]))), - max_error_); - } - } - - delete[] input; - delete[] output; - delete[] ref_input; - delete[] ref_output; - } - - double max_error_; - int txfm_size_; - FdctFunc fwd_txfm_; - FdctFuncRef fwd_txfm_ref_; -}; - -typedef std::tr1::tuple FdctParam; -class Vp10FwdTxfm - : public TransTestBase, - public ::testing::TestWithParam { - public: - virtual void SetUp() { - fwd_txfm_ = GET_PARAM(0); - fwd_txfm_ref_ = GET_PARAM(1); - txfm_size_ = GET_PARAM(2); - max_error_ = GET_PARAM(3); - } - virtual void TearDown() {} -}; - -TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { - RunFwdAccuracyCheck(); -} - -INSTANTIATE_TEST_CASE_P( - C, Vp10FwdTxfm, - ::testing::Values( - FdctParam(&fdct4, &reference_dct_1d, 4, 1), - FdctParam(&fdct8, &reference_dct_1d, 8, 1), - FdctParam(&fdct16, &reference_dct_1d, 16, 2))); -} // namespace diff --git a/libs/libvpx/test/vp10_inv_txfm_test.cc b/libs/libvpx/test/vp10_inv_txfm_test.cc deleted file mode 100644 index c49081ef85..0000000000 --- a/libs/libvpx/test/vp10_inv_txfm_test.cc +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "third_party/googletest/src/include/gtest/gtest.h" - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "test/acm_random.h" -#include "test/clear_system_state.h" -#include "test/register_state_check.h" -#include "test/util.h" -#include "vp10/common/blockd.h" -#include "vp10/common/scan.h" -#include "vpx/vpx_integer.h" -#include "vp10/common/vp10_inv_txfm.h" - -using libvpx_test::ACMRandom; - -namespace { -const double PI = 3.141592653589793238462643383279502884; -const double kInvSqrt2 = 0.707106781186547524400844362104; - -void reference_idct_1d(const double *in, double *out, int size) { - for (int n = 0; n < size; ++n) { - out[n] = 0; - for (int k = 0; k < size; ++k) { - if (k == 0) - out[n] += kInvSqrt2 * in[k] * cos(PI * (2 * n + 1) * k / (2 * size)); - else - out[n] += in[k] * cos(PI * (2 * n + 1) * k / (2 * size)); - } - } -} - -typedef void (*IdctFuncRef)(const double *in, double *out, int size); -typedef void (*IdctFunc)(const tran_low_t *in, tran_low_t *out); - -class TransTestBase { - public: - virtual ~TransTestBase() {} - - protected: - void RunInvAccuracyCheck() { - tran_low_t *input = new tran_low_t[txfm_size_]; - tran_low_t *output = new tran_low_t[txfm_size_]; - double *ref_input = new double[txfm_size_]; - double *ref_output = new double[txfm_size_]; - - ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = 5000; - for (int ti = 0; ti < count_test_block; ++ti) { - for (int ni = 0; ni < txfm_size_; ++ni) { - input[ni] = rnd.Rand8() - rnd.Rand8(); - ref_input[ni] = static_cast(input[ni]); - } - - fwd_txfm_(input, output); - fwd_txfm_ref_(ref_input, ref_output, txfm_size_); - - for (int ni = 0; ni < txfm_size_; ++ni) { - EXPECT_LE( - abs(output[ni] - static_cast(round(ref_output[ni]))), - max_error_); - } - } - - delete[] input; - delete[] output; - delete[] ref_input; - delete[] ref_output; - } - - double max_error_; - int txfm_size_; - IdctFunc fwd_txfm_; - IdctFuncRef fwd_txfm_ref_; -}; - -typedef std::tr1::tuple IdctParam; -class Vp10InvTxfm - : public TransTestBase, - public ::testing::TestWithParam { - public: - virtual void SetUp() { - fwd_txfm_ = GET_PARAM(0); - fwd_txfm_ref_ = GET_PARAM(1); - txfm_size_ = GET_PARAM(2); - max_error_ = GET_PARAM(3); - } - virtual void TearDown() {} -}; - -TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { - RunInvAccuracyCheck(); -} - -INSTANTIATE_TEST_CASE_P( - C, Vp10InvTxfm, - ::testing::Values( - IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1), - IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2), - IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4), - IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)) -); - -typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride); -typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride); -typedef std::tr1::tuple PartialInvTxfmParam; -const int kMaxNumCoeffs = 1024; -class Vp10PartialIDctTest - : public ::testing::TestWithParam { - public: - virtual ~Vp10PartialIDctTest() {} - virtual void SetUp() { - ftxfm_ = GET_PARAM(0); - full_itxfm_ = GET_PARAM(1); - partial_itxfm_ = GET_PARAM(2); - tx_size_ = GET_PARAM(3); - last_nonzero_ = GET_PARAM(4); - } - - virtual void TearDown() { libvpx_test::ClearSystemState(); } - - protected: - int last_nonzero_; - TX_SIZE tx_size_; - FwdTxfmFunc ftxfm_; - InvTxfmFunc full_itxfm_; - InvTxfmFunc partial_itxfm_; -}; - -TEST_P(Vp10PartialIDctTest, RunQuantCheck) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - int size; - switch (tx_size_) { - case TX_4X4: - size = 4; - break; - case TX_8X8: - size = 8; - break; - case TX_16X16: - size = 16; - break; - case TX_32X32: - size = 32; - break; - default: - FAIL() << "Wrong Size!"; - break; - } - DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, uint8_t, dst1[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, uint8_t, dst2[kMaxNumCoeffs]); - - const int count_test_block = 1000; - const int block_size = size * size; - - DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]); - - int max_error = 0; - for (int i = 0; i < count_test_block; ++i) { - // clear out destination buffer - memset(dst1, 0, sizeof(*dst1) * block_size); - memset(dst2, 0, sizeof(*dst2) * block_size); - memset(test_coef_block1, 0, sizeof(*test_coef_block1) * block_size); - memset(test_coef_block2, 0, sizeof(*test_coef_block2) * block_size); - - ACMRandom rnd(ACMRandom::DeterministicSeed()); - - for (int i = 0; i < count_test_block; ++i) { - // Initialize a test block with input range [-255, 255]. - if (i == 0) { - for (int j = 0; j < block_size; ++j) - input_extreme_block[j] = 255; - } else if (i == 1) { - for (int j = 0; j < block_size; ++j) - input_extreme_block[j] = -255; - } else { - for (int j = 0; j < block_size; ++j) { - input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255; - } - } - - ftxfm_(input_extreme_block, output_ref_block, size); - - // quantization with maximum allowed step sizes - test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336; - for (int j = 1; j < last_nonzero_; ++j) - test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] - = (output_ref_block[j] / 1828) * 1828; - } - - ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size)); - ASM_REGISTER_STATE_CHECK(partial_itxfm_(test_coef_block1, dst2, size)); - - for (int j = 0; j < block_size; ++j) { - const int diff = dst1[j] - dst2[j]; - const int error = diff * diff; - if (max_error < error) - max_error = error; - } - } - - EXPECT_EQ(0, max_error) - << "Error: partial inverse transform produces different results"; -} - -TEST_P(Vp10PartialIDctTest, ResultsMatch) { - ACMRandom rnd(ACMRandom::DeterministicSeed()); - int size; - switch (tx_size_) { - case TX_4X4: - size = 4; - break; - case TX_8X8: - size = 8; - break; - case TX_16X16: - size = 16; - break; - case TX_32X32: - size = 32; - break; - default: - FAIL() << "Wrong Size!"; - break; - } - DECLARE_ALIGNED(16, tran_low_t, test_coef_block1[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, tran_low_t, test_coef_block2[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, uint8_t, dst1[kMaxNumCoeffs]); - DECLARE_ALIGNED(16, uint8_t, dst2[kMaxNumCoeffs]); - const int count_test_block = 1000; - const int max_coeff = 32766 / 4; - const int block_size = size * size; - int max_error = 0; - for (int i = 0; i < count_test_block; ++i) { - // clear out destination buffer - memset(dst1, 0, sizeof(*dst1) * block_size); - memset(dst2, 0, sizeof(*dst2) * block_size); - memset(test_coef_block1, 0, sizeof(*test_coef_block1) * block_size); - memset(test_coef_block2, 0, sizeof(*test_coef_block2) * block_size); - int max_energy_leftover = max_coeff * max_coeff; - for (int j = 0; j < last_nonzero_; ++j) { - int16_t coef = static_cast(sqrt(1.0 * max_energy_leftover) * - (rnd.Rand16() - 32768) / 65536); - max_energy_leftover -= coef * coef; - if (max_energy_leftover < 0) { - max_energy_leftover = 0; - coef = 0; - } - test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef; - } - - memcpy(test_coef_block2, test_coef_block1, - sizeof(*test_coef_block2) * block_size); - - ASM_REGISTER_STATE_CHECK(full_itxfm_(test_coef_block1, dst1, size)); - ASM_REGISTER_STATE_CHECK(partial_itxfm_(test_coef_block2, dst2, size)); - - for (int j = 0; j < block_size; ++j) { - const int diff = dst1[j] - dst2[j]; - const int error = diff * diff; - if (max_error < error) - max_error = error; - } - } - - EXPECT_EQ(0, max_error) - << "Error: partial inverse transform produces different results"; -} -using std::tr1::make_tuple; - -INSTANTIATE_TEST_CASE_P( - C, Vp10PartialIDctTest, - ::testing::Values( - make_tuple(&vpx_fdct32x32_c, - &vp10_idct32x32_1024_add_c, - &vp10_idct32x32_34_add_c, - TX_32X32, 34), - make_tuple(&vpx_fdct32x32_c, - &vp10_idct32x32_1024_add_c, - &vp10_idct32x32_1_add_c, - TX_32X32, 1), - make_tuple(&vpx_fdct16x16_c, - &vp10_idct16x16_256_add_c, - &vp10_idct16x16_10_add_c, - TX_16X16, 10), - make_tuple(&vpx_fdct16x16_c, - &vp10_idct16x16_256_add_c, - &vp10_idct16x16_1_add_c, - TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, - &vp10_idct8x8_64_add_c, - &vp10_idct8x8_12_add_c, - TX_8X8, 12), - make_tuple(&vpx_fdct8x8_c, - &vp10_idct8x8_64_add_c, - &vp10_idct8x8_1_add_c, - TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, - &vp10_idct4x4_16_add_c, - &vp10_idct4x4_1_add_c, - TX_4X4, 1))); -} // namespace diff --git a/libs/libvpx/test/vp8_boolcoder_test.cc b/libs/libvpx/test/vp8_boolcoder_test.cc index 02d7162ac8..9d81f9382a 100644 --- a/libs/libvpx/test/vp8_boolcoder_test.cc +++ b/libs/libvpx/test/vp8_boolcoder_test.cc @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -41,9 +40,9 @@ void encrypt_buffer(uint8_t *buffer, size_t size) { } } -void test_decrypt_cb(void *decrypt_state, const uint8_t *input, - uint8_t *output, int count) { - const size_t offset = input - reinterpret_cast(decrypt_state); +void test_decrypt_cb(void *decrypt_state, const uint8_t *input, uint8_t *output, + int count) { + const size_t offset = input - reinterpret_cast(decrypt_state); for (int i = 0; i < count; i++) { output[i] = input[i] ^ secret_key[(offset + i) & 15]; } @@ -56,12 +55,13 @@ using libvpx_test::ACMRandom; TEST(VP8, TestBitIO) { ACMRandom rnd(ACMRandom::DeterministicSeed()); for (int n = 0; n < num_tests; ++n) { - for (int method = 0; method <= 7; ++method) { // we generate various proba + for (int method = 0; method <= 7; ++method) { // we generate various proba const int kBitsToTest = 1000; uint8_t probas[kBitsToTest]; for (int i = 0; i < kBitsToTest; ++i) { const int parity = i & 1; + /* clang-format off */ probas[i] = (method == 0) ? 0 : (method == 1) ? 255 : (method == 2) ? 128 : @@ -72,6 +72,7 @@ TEST(VP8, TestBitIO) { (method == 6) ? (parity ? rnd(64) : 255 - rnd(64)) : (parity ? rnd(32) : 255 - rnd(32)); + /* clang-format on */ } for (int bit_method = 0; bit_method <= 3; ++bit_method) { const int random_seed = 6432; @@ -95,8 +96,7 @@ TEST(VP8, TestBitIO) { BOOL_DECODER br; encrypt_buffer(bw_buffer, kBufferSize); - vp8dx_start_decode(&br, bw_buffer, kBufferSize, - test_decrypt_cb, + vp8dx_start_decode(&br, bw_buffer, kBufferSize, test_decrypt_cb, reinterpret_cast(bw_buffer)); bit_rnd.Reset(random_seed); for (int i = 0; i < kBitsToTest; ++i) { @@ -106,9 +106,8 @@ TEST(VP8, TestBitIO) { bit = bit_rnd(2); } GTEST_ASSERT_EQ(vp8dx_decode_bool(&br, probas[i]), bit) - << "pos: "<< i << " / " << kBitsToTest - << " bit_method: " << bit_method - << " method: " << method; + << "pos: " << i << " / " << kBitsToTest + << " bit_method: " << bit_method << " method: " << method; } } } diff --git a/libs/libvpx/test/vp8_decrypt_test.cc b/libs/libvpx/test/vp8_decrypt_test.cc index 972a1d9a3d..bcac9d1a82 100644 --- a/libs/libvpx/test/vp8_decrypt_test.cc +++ b/libs/libvpx/test/vp8_decrypt_test.cc @@ -21,10 +21,8 @@ namespace { // with whatever internal state the decryptor uses. For testing we'll just // xor with a constant key, and decrypt_state will point to the start of // the original buffer. -const uint8_t test_key[16] = { - 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, - 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0 -}; +const uint8_t test_key[16] = { 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, + 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0 }; void encrypt_buffer(const uint8_t *src, uint8_t *dst, size_t size, ptrdiff_t offset) { @@ -33,8 +31,8 @@ void encrypt_buffer(const uint8_t *src, uint8_t *dst, size_t size, } } -void test_decrypt_cb(void *decrypt_state, const uint8_t *input, - uint8_t *output, int count) { +void test_decrypt_cb(void *decrypt_state, const uint8_t *input, uint8_t *output, + int count) { encrypt_buffer(input, output, count, input - reinterpret_cast(decrypt_state)); } diff --git a/libs/libvpx/test/vp8_denoiser_sse2_test.cc b/libs/libvpx/test/vp8_denoiser_sse2_test.cc index e8ca8d3986..2cbcf04149 100644 --- a/libs/libvpx/test/vp8_denoiser_sse2_test.cc +++ b/libs/libvpx/test/vp8_denoiser_sse2_test.cc @@ -32,9 +32,7 @@ class VP8DenoiserTest : public ::testing::TestWithParam { public: virtual ~VP8DenoiserTest() {} - virtual void SetUp() { - increase_denoising_ = GetParam(); - } + virtual void SetUp() { increase_denoising_ = GetParam(); } virtual void TearDown() { libvpx_test::ClearSystemState(); } @@ -71,8 +69,8 @@ TEST_P(VP8DenoiserTest, BitexactCheck) { sig_block_sse2[j] = sig_block_c[j] = rnd.Rand8(); // The pixels in mc_avg_block are generated by adding a random // number in range [-19, 19] to corresponding pixels in sig_block. - temp = sig_block_c[j] + (rnd.Rand8() % 2 == 0 ? -1 : 1) * - (rnd.Rand8() % 20); + temp = + sig_block_c[j] + (rnd.Rand8() % 2 == 0 ? -1 : 1) * (rnd.Rand8() % 20); // Clip. mc_avg_block[j] = (temp < 0) ? 0 : ((temp > 255) ? 255 : temp); } diff --git a/libs/libvpx/test/vp8_fdct4x4_test.cc b/libs/libvpx/test/vp8_fdct4x4_test.cc index 11a653decc..da4f0caa1e 100644 --- a/libs/libvpx/test/vp8_fdct4x4_test.cc +++ b/libs/libvpx/test/vp8_fdct4x4_test.cc @@ -80,51 +80,59 @@ TEST(VP8FdctTest, SignBiasCheck) { for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-255, 255]. - for (int j = 0; j < 16; ++j) + for (int j = 0; j < 16; ++j) { test_input_block[j] = rnd.Rand8() - rnd.Rand8(); + } vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch); for (int j = 0; j < 16; ++j) { - if (test_output_block[j] < 0) + if (test_output_block[j] < 0) { ++count_sign_block[j][0]; - else if (test_output_block[j] > 0) + } else if (test_output_block[j] > 0) { ++count_sign_block[j][1]; + } } } bool bias_acceptable = true; - for (int j = 0; j < 16; ++j) - bias_acceptable = bias_acceptable && - (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000); + for (int j = 0; j < 16; ++j) { + bias_acceptable = + bias_acceptable && + (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 10000); + } EXPECT_EQ(true, bias_acceptable) - << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]"; + << "Error: 4x4 FDCT has a sign bias > 1% for input range [-255, 255]"; memset(count_sign_block, 0, sizeof(count_sign_block)); for (int i = 0; i < count_test_block; ++i) { // Initialize a test block with input range [-15, 15]. - for (int j = 0; j < 16; ++j) + for (int j = 0; j < 16; ++j) { test_input_block[j] = (rnd.Rand8() >> 4) - (rnd.Rand8() >> 4); + } vp8_short_fdct4x4_c(test_input_block, test_output_block, pitch); for (int j = 0; j < 16; ++j) { - if (test_output_block[j] < 0) + if (test_output_block[j] < 0) { ++count_sign_block[j][0]; - else if (test_output_block[j] > 0) + } else if (test_output_block[j] > 0) { ++count_sign_block[j][1]; + } } } bias_acceptable = true; - for (int j = 0; j < 16; ++j) - bias_acceptable = bias_acceptable && - (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000); + for (int j = 0; j < 16; ++j) { + bias_acceptable = + bias_acceptable && + (abs(count_sign_block[j][0] - count_sign_block[j][1]) < 100000); + } EXPECT_EQ(true, bias_acceptable) - << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]"; + << "Error: 4x4 FDCT has a sign bias > 10% for input range [-15, 15]"; }; TEST(VP8FdctTest, RoundTripErrorCheck) { @@ -138,8 +146,9 @@ TEST(VP8FdctTest, RoundTripErrorCheck) { int16_t test_output_block[16]; // Initialize a test block with input range [-255, 255]. - for (int j = 0; j < 16; ++j) + for (int j = 0; j < 16; ++j) { test_input_block[j] = rnd.Rand8() - rnd.Rand8(); + } const int pitch = 8; vp8_short_fdct4x4_c(test_input_block, test_temp_block, pitch); @@ -148,17 +157,16 @@ TEST(VP8FdctTest, RoundTripErrorCheck) { for (int j = 0; j < 16; ++j) { const int diff = test_input_block[j] - test_output_block[j]; const int error = diff * diff; - if (max_error < error) - max_error = error; + if (max_error < error) max_error = error; total_error += error; } } - EXPECT_GE(1, max_error ) - << "Error: FDCT/IDCT has an individual roundtrip error > 1"; + EXPECT_GE(1, max_error) + << "Error: FDCT/IDCT has an individual roundtrip error > 1"; EXPECT_GE(count_test_block, total_error) - << "Error: FDCT/IDCT has average roundtrip error > 1 per block"; + << "Error: FDCT/IDCT has average roundtrip error > 1 per block"; }; } // namespace diff --git a/libs/libvpx/test/vp8_fragments_test.cc b/libs/libvpx/test/vp8_fragments_test.cc index cb0d1a155e..ac967d1b7e 100644 --- a/libs/libvpx/test/vp8_fragments_test.cc +++ b/libs/libvpx/test/vp8_fragments_test.cc @@ -13,9 +13,8 @@ namespace { -class VP8FramgmentsTest - : public ::libvpx_test::EncoderTest, - public ::testing::Test { +class VP8FramgmentsTest : public ::libvpx_test::EncoderTest, + public ::testing::Test { protected: VP8FramgmentsTest() : EncoderTest(&::libvpx_test::kVP8) {} virtual ~VP8FramgmentsTest() {} diff --git a/libs/libvpx/test/vp9_arf_freq_test.cc b/libs/libvpx/test/vp9_arf_freq_test.cc index 89200d4086..48a4ca7392 100644 --- a/libs/libvpx/test/vp9_arf_freq_test.cc +++ b/libs/libvpx/test/vp9_arf_freq_test.cc @@ -22,8 +22,8 @@ namespace { const unsigned int kFrames = 100; const int kBitrate = 500; -#define ARF_NOT_SEEN 1000001 -#define ARF_SEEN_ONCE 1000000 +#define ARF_NOT_SEEN 1000001 +#define ARF_SEEN_ONCE 1000000 typedef struct { const char *filename; @@ -44,24 +44,20 @@ typedef struct { const TestVideoParam kTestVectors[] = { // artificially increase framerate to trigger default check - {"hantro_collage_w352h288.yuv", 352, 288, 5000, 1, - 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0}, - {"hantro_collage_w352h288.yuv", 352, 288, 30, 1, - 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0}, - {"rush_hour_444.y4m", 352, 288, 30, 1, - 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1}, + { "hantro_collage_w352h288.yuv", 352, 288, 5000, 1, 8, VPX_IMG_FMT_I420, + VPX_BITS_8, 0 }, + { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420, + VPX_BITS_8, 0 }, + { "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 }, #if CONFIG_VP9_HIGHBITDEPTH - // Add list of profile 2/3 test videos here ... +// Add list of profile 2/3 test videos here ... #endif // CONFIG_VP9_HIGHBITDEPTH }; const TestEncodeParam kEncodeVectors[] = { - {::libvpx_test::kOnePassGood, 2}, - {::libvpx_test::kOnePassGood, 5}, - {::libvpx_test::kTwoPassGood, 1}, - {::libvpx_test::kTwoPassGood, 2}, - {::libvpx_test::kTwoPassGood, 5}, - {::libvpx_test::kRealTime, 5}, + { ::libvpx_test::kOnePassGood, 2 }, { ::libvpx_test::kOnePassGood, 5 }, + { ::libvpx_test::kTwoPassGood, 1 }, { ::libvpx_test::kTwoPassGood, 2 }, + { ::libvpx_test::kTwoPassGood, 5 }, { ::libvpx_test::kRealTime, 5 }, }; const int kMinArfVectors[] = { @@ -72,23 +68,21 @@ const int kMinArfVectors[] = { int is_extension_y4m(const char *filename) { const char *dot = strrchr(filename, '.'); - if (!dot || dot == filename) + if (!dot || dot == filename) { return 0; - else + } else { return !strcmp(dot, ".y4m"); + } } class ArfFreqTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith3Params { protected: ArfFreqTest() - : EncoderTest(GET_PARAM(0)), - test_video_param_(GET_PARAM(1)), - test_encode_param_(GET_PARAM(2)), - min_arf_requested_(GET_PARAM(3)) { - } + : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(1)), + test_encode_param_(GET_PARAM(2)), min_arf_requested_(GET_PARAM(3)) {} virtual ~ArfFreqTest() {} @@ -114,17 +108,16 @@ class ArfFreqTest } int GetNumFramesInPkt(const vpx_codec_cx_pkt_t *pkt) { - const uint8_t *buffer = reinterpret_cast(pkt->data.frame.buf); + const uint8_t *buffer = reinterpret_cast(pkt->data.frame.buf); const uint8_t marker = buffer[pkt->data.frame.sz - 1]; const int mag = ((marker >> 3) & 3) + 1; int frames = (marker & 0x7) + 1; - const unsigned int index_sz = 2 + mag * frames; + const unsigned int index_sz = 2 + mag * frames; // Check for superframe or not. // Assume superframe has only one visible frame, the rest being // invisible. If superframe index is not found, then there is only // one frame. - if (!((marker & 0xe0) == 0xc0 && - pkt->data.frame.sz >= index_sz && + if (!((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz && buffer[pkt->data.frame.sz - index_sz] == marker)) { frames = 1; } @@ -132,8 +125,7 @@ class ArfFreqTest } virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { - if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) - return; + if (pkt->kind != VPX_CODEC_CX_FRAME_PKT) return; const int frames = GetNumFramesInPkt(pkt); if (frames == 1) { run_of_visible_frames_++; @@ -167,18 +159,17 @@ class ArfFreqTest } } - int GetMinVisibleRun() const { - return min_run_; - } + int GetMinVisibleRun() const { return min_run_; } int GetMinArfDistanceRequested() const { - if (min_arf_requested_) + if (min_arf_requested_) { return min_arf_requested_; - else + } else { return vp9_rc_get_default_min_gf_interval( test_video_param_.width, test_video_param_.height, (double)test_video_param_.framerate_num / - test_video_param_.framerate_den); + test_video_param_.framerate_den); + } } TestVideoParam test_video_param_; @@ -197,56 +188,30 @@ TEST_P(ArfFreqTest, MinArfFreqTest) { cfg_.g_input_bit_depth = test_video_param_.input_bit_depth; cfg_.g_bit_depth = test_video_param_.bit_depth; init_flags_ = VPX_CODEC_USE_PSNR; - if (cfg_.g_bit_depth > 8) - init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH; + if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH; - libvpx_test::VideoSource *video; + testing::internal::scoped_ptr video; if (is_extension_y4m(test_video_param_.filename)) { - video = new libvpx_test::Y4mVideoSource(test_video_param_.filename, - 0, kFrames); + video.reset(new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0, + kFrames)); } else { - video = new libvpx_test::YUVVideoSource(test_video_param_.filename, - test_video_param_.fmt, - test_video_param_.width, - test_video_param_.height, - test_video_param_.framerate_num, - test_video_param_.framerate_den, - 0, kFrames); + video.reset(new libvpx_test::YUVVideoSource( + test_video_param_.filename, test_video_param_.fmt, + test_video_param_.width, test_video_param_.height, + test_video_param_.framerate_num, test_video_param_.framerate_den, 0, + kFrames)); } - ASSERT_NO_FATAL_FAILURE(RunLoop(video)); + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get())); const int min_run = GetMinVisibleRun(); const int min_arf_dist_requested = GetMinArfDistanceRequested(); if (min_run != ARF_NOT_SEEN && min_run != ARF_SEEN_ONCE) { const int min_arf_dist = min_run + 1; EXPECT_GE(min_arf_dist, min_arf_dist_requested); } - delete(video); } -VP9_INSTANTIATE_TEST_CASE( - ArfFreqTest, - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kEncodeVectors), - ::testing::ValuesIn(kMinArfVectors)); - -#if CONFIG_VP9_HIGHBITDEPTH -# if CONFIG_VP10_ENCODER -// TODO(angiebird): 25-29 fail in high bitdepth mode. -INSTANTIATE_TEST_CASE_P( - DISABLED_VP10, ArfFreqTest, - ::testing::Combine( - ::testing::Values(static_cast( - &libvpx_test::kVP10)), - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kEncodeVectors), - ::testing::ValuesIn(kMinArfVectors))); -# endif // CONFIG_VP10_ENCODER -#else -VP10_INSTANTIATE_TEST_CASE( - ArfFreqTest, - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kEncodeVectors), - ::testing::ValuesIn(kMinArfVectors)); -#endif // CONFIG_VP9_HIGHBITDEPTH +VP9_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors), + ::testing::ValuesIn(kEncodeVectors), + ::testing::ValuesIn(kMinArfVectors)); } // namespace diff --git a/libs/libvpx/test/vp9_boolcoder_test.cc b/libs/libvpx/test/vp9_boolcoder_test.cc index c61bb4ab96..5dbfd5ca59 100644 --- a/libs/libvpx/test/vp9_boolcoder_test.cc +++ b/libs/libvpx/test/vp9_boolcoder_test.cc @@ -28,12 +28,13 @@ const int num_tests = 10; TEST(VP9, TestBitIO) { ACMRandom rnd(ACMRandom::DeterministicSeed()); for (int n = 0; n < num_tests; ++n) { - for (int method = 0; method <= 7; ++method) { // we generate various proba + for (int method = 0; method <= 7; ++method) { // we generate various proba const int kBitsToTest = 1000; uint8_t probas[kBitsToTest]; for (int i = 0; i < kBitsToTest; ++i) { const int parity = i & 1; + /* clang-format off */ probas[i] = (method == 0) ? 0 : (method == 1) ? 255 : (method == 2) ? 128 : @@ -44,6 +45,7 @@ TEST(VP9, TestBitIO) { (method == 6) ? (parity ? rnd(64) : 255 - rnd(64)) : (parity ? rnd(32) : 255 - rnd(32)); + /* clang-format on */ } for (int bit_method = 0; bit_method <= 3; ++bit_method) { const int random_seed = 6432; @@ -79,8 +81,7 @@ TEST(VP9, TestBitIO) { } GTEST_ASSERT_EQ(vpx_read(&br, probas[i]), bit) << "pos: " << i << " / " << kBitsToTest - << " bit_method: " << bit_method - << " method: " << method; + << " bit_method: " << bit_method << " method: " << method; } } } diff --git a/libs/libvpx/test/vp9_decrypt_test.cc b/libs/libvpx/test/vp9_decrypt_test.cc index d988612070..1874d23117 100644 --- a/libs/libvpx/test/vp9_decrypt_test.cc +++ b/libs/libvpx/test/vp9_decrypt_test.cc @@ -21,10 +21,8 @@ namespace { // with whatever internal state the decryptor uses. For testing we'll just // xor with a constant key, and decrypt_state will point to the start of // the original buffer. -const uint8_t test_key[16] = { - 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, - 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0 -}; +const uint8_t test_key[16] = { 0x01, 0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0x78, + 0x89, 0x9a, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xf0 }; void encrypt_buffer(const uint8_t *src, uint8_t *dst, size_t size, ptrdiff_t offset) { @@ -33,8 +31,8 @@ void encrypt_buffer(const uint8_t *src, uint8_t *dst, size_t size, } } -void test_decrypt_cb(void *decrypt_state, const uint8_t *input, - uint8_t *output, int count) { +void test_decrypt_cb(void *decrypt_state, const uint8_t *input, uint8_t *output, + int count) { encrypt_buffer(input, output, count, input - reinterpret_cast(decrypt_state)); } diff --git a/libs/libvpx/test/vp9_denoiser_sse2_test.cc b/libs/libvpx/test/vp9_denoiser_sse2_test.cc index 17c799dffb..2a50b77355 100644 --- a/libs/libvpx/test/vp9_denoiser_sse2_test.cc +++ b/libs/libvpx/test/vp9_denoiser_sse2_test.cc @@ -33,9 +33,7 @@ class VP9DenoiserTest : public ::testing::TestWithParam { public: virtual ~VP9DenoiserTest() {} - virtual void SetUp() { - bs_ = GetParam(); - } + virtual void SetUp() { bs_ = GetParam(); } virtual void TearDown() { libvpx_test::ClearSystemState(); } @@ -68,19 +66,19 @@ TEST_P(VP9DenoiserTest, BitexactCheck) { sig_block[j] = rnd.Rand8(); // The pixels in mc_avg_block are generated by adding a random // number in range [-19, 19] to corresponding pixels in sig_block. - temp = sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) * - (rnd.Rand8() % 20); + temp = + sig_block[j] + ((rnd.Rand8() % 2 == 0) ? -1 : 1) * (rnd.Rand8() % 20); // Clip. mc_avg_block[j] = (temp < 0) ? 0 : ((temp > 255) ? 255 : temp); } - ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c( - sig_block, 64, mc_avg_block, 64, avg_block_c, - 64, 0, bs_, motion_magnitude_random)); + ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_c(sig_block, 64, mc_avg_block, + 64, avg_block_c, 64, 0, bs_, + motion_magnitude_random)); ASM_REGISTER_STATE_CHECK(vp9_denoiser_filter_sse2( - sig_block, 64, mc_avg_block, 64, avg_block_sse2, - 64, 0, bs_, motion_magnitude_random)); + sig_block, 64, mc_avg_block, 64, avg_block_sse2, 64, 0, bs_, + motion_magnitude_random)); // Test bitexactness. for (int h = 0; h < (4 << b_height_log2_lookup[bs_]); ++h) { @@ -92,10 +90,9 @@ TEST_P(VP9DenoiserTest, BitexactCheck) { } // Test for all block size. -INSTANTIATE_TEST_CASE_P( - SSE2, VP9DenoiserTest, - ::testing::Values(BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, BLOCK_8X8, - BLOCK_8X16, BLOCK_16X8, BLOCK_16X16, BLOCK_16X32, - BLOCK_32X16, BLOCK_32X32, BLOCK_32X64, BLOCK_64X32, - BLOCK_64X64)); +INSTANTIATE_TEST_CASE_P(SSE2, VP9DenoiserTest, + ::testing::Values(BLOCK_8X8, BLOCK_8X16, BLOCK_16X8, + BLOCK_16X16, BLOCK_16X32, BLOCK_32X16, + BLOCK_32X32, BLOCK_32X64, BLOCK_64X32, + BLOCK_64X64)); } // namespace diff --git a/libs/libvpx/test/vp9_encoder_parms_get_to_decoder.cc b/libs/libvpx/test/vp9_encoder_parms_get_to_decoder.cc index bd84098791..53dc8c9fe4 100644 --- a/libs/libvpx/test/vp9_encoder_parms_get_to_decoder.cc +++ b/libs/libvpx/test/vp9_encoder_parms_get_to_decoder.cc @@ -29,7 +29,7 @@ struct EncodePerfTestVideo { }; const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = { - {"niklas_1280_720_30.y4m", 1280, 720, 600, 10}, + { "niklas_1280_720_30.y4m", 1280, 720, 600, 10 }, }; struct EncodeParameters { @@ -45,10 +45,10 @@ struct EncodeParameters { }; const EncodeParameters kVP9EncodeParameterSet[] = { - {0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601, { 0, 0 }}, - {0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709, { 0, 0 }}, - {0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020, { 0, 0 }}, - {0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 }}, + { 0, 0, 0, 1, 0, VPX_CR_STUDIO_RANGE, VPX_CS_BT_601, { 0, 0 } }, + { 0, 0, 0, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_709, { 0, 0 } }, + { 0, 0, 1, 0, 0, VPX_CR_FULL_RANGE, VPX_CS_BT_2020, { 0, 0 } }, + { 0, 2, 0, 0, 1, VPX_CR_STUDIO_RANGE, VPX_CS_UNKNOWN, { 640, 480 } }, // TODO(JBB): Test profiles (requires more work). }; @@ -87,8 +87,9 @@ class VpxEncoderParmsGetToDecoder encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7); encoder->Control(VP8E_SET_ARNR_STRENGTH, 5); encoder->Control(VP8E_SET_ARNR_TYPE, 3); - if (encode_parms.render_size[0] > 0 && encode_parms.render_size[1] > 0) + if (encode_parms.render_size[0] > 0 && encode_parms.render_size[1] > 0) { encoder->Control(VP9E_SET_RENDER_SIZE, encode_parms.render_size); + } } } @@ -139,12 +140,11 @@ class VpxEncoderParmsGetToDecoder TEST_P(VpxEncoderParmsGetToDecoder, BitstreamParms) { init_flags_ = VPX_CODEC_USE_PSNR; - libvpx_test::VideoSource *const video = - new libvpx_test::Y4mVideoSource(test_video_.name, 0, test_video_.frames); - ASSERT_TRUE(video != NULL); + testing::internal::scoped_ptr video( + new libvpx_test::Y4mVideoSource(test_video_.name, 0, test_video_.frames)); + ASSERT_TRUE(video.get() != NULL); - ASSERT_NO_FATAL_FAILURE(RunLoop(video)); - delete video; + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get())); } VP9_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder, diff --git a/libs/libvpx/test/vp9_end_to_end_test.cc b/libs/libvpx/test/vp9_end_to_end_test.cc index be1fa68c0e..955f567ce2 100644 --- a/libs/libvpx/test/vp9_end_to_end_test.cc +++ b/libs/libvpx/test/vp9_end_to_end_test.cc @@ -18,21 +18,17 @@ namespace { -const unsigned int kWidth = 160; +const unsigned int kWidth = 160; const unsigned int kHeight = 90; const unsigned int kFramerate = 50; const unsigned int kFrames = 10; const int kBitrate = 500; // List of psnr thresholds for speed settings 0-7 and 5 encoding modes const double kPsnrThreshold[][5] = { - { 36.0, 37.0, 37.0, 37.0, 37.0 }, - { 35.0, 36.0, 36.0, 36.0, 36.0 }, - { 34.0, 35.0, 35.0, 35.0, 35.0 }, - { 33.0, 34.0, 34.0, 34.0, 34.0 }, - { 32.0, 33.0, 33.0, 33.0, 33.0 }, - { 31.0, 32.0, 32.0, 32.0, 32.0 }, - { 30.0, 31.0, 31.0, 31.0, 31.0 }, - { 29.0, 30.0, 30.0, 30.0, 30.0 }, + { 36.0, 37.0, 37.0, 37.0, 37.0 }, { 35.0, 36.0, 36.0, 36.0, 36.0 }, + { 34.0, 35.0, 35.0, 35.0, 35.0 }, { 33.0, 34.0, 34.0, 34.0, 34.0 }, + { 32.0, 33.0, 33.0, 33.0, 33.0 }, { 31.0, 32.0, 32.0, 32.0, 32.0 }, + { 30.0, 31.0, 31.0, 31.0, 31.0 }, { 29.0, 30.0, 30.0, 30.0, 30.0 }, }; typedef struct { @@ -44,53 +40,49 @@ typedef struct { } TestVideoParam; const TestVideoParam kTestVectors[] = { - {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0}, - {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1}, - {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1}, - {"park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1}, + { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, VPX_BITS_8, 0 }, + { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 }, + { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 }, + { "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 }, #if CONFIG_VP9_HIGHBITDEPTH - {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2}, - {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3}, - {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3}, - {"park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3}, - {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2}, - {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3}, - {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3}, - {"park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3}, + { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 }, + { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 }, + { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 }, + { "park_joy_90p_10_440.yuv", 10, VPX_IMG_FMT_I44016, VPX_BITS_10, 3 }, + { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, VPX_BITS_12, 2 }, + { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 }, + { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 }, + { "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 }, #endif // CONFIG_VP9_HIGHBITDEPTH }; // Encoding modes tested const libvpx_test::TestMode kEncodingModeVectors[] = { - ::libvpx_test::kTwoPassGood, - ::libvpx_test::kOnePassGood, + ::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood, ::libvpx_test::kRealTime, }; // Speed settings tested -const int kCpuUsedVectors[] = {1, 2, 3, 5, 6}; +const int kCpuUsedVectors[] = { 1, 2, 3, 5, 6 }; int is_extension_y4m(const char *filename) { const char *dot = strrchr(filename, '.'); - if (!dot || dot == filename) + if (!dot || dot == filename) { return 0; - else + } else { return !strcmp(dot, ".y4m"); + } } class EndToEndTestLarge : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWith3Params { protected: EndToEndTestLarge() - : EncoderTest(GET_PARAM(0)), - test_video_param_(GET_PARAM(2)), - cpu_used_(GET_PARAM(3)), - psnr_(0.0), - nframes_(0), - encoding_mode_(GET_PARAM(1)) { - } + : EncoderTest(GET_PARAM(0)), test_video_param_(GET_PARAM(2)), + cpu_used_(GET_PARAM(3)), psnr_(0.0), nframes_(0), + encoding_mode_(GET_PARAM(1)) {} virtual ~EndToEndTestLarge() {} @@ -136,8 +128,7 @@ class EndToEndTestLarge } double GetAveragePsnr() const { - if (nframes_) - return psnr_ / nframes_; + if (nframes_) return psnr_ / nframes_; return 0.0; } @@ -161,49 +152,26 @@ TEST_P(EndToEndTestLarge, EndtoEndPSNRTest) { cfg_.g_input_bit_depth = test_video_param_.input_bit_depth; cfg_.g_bit_depth = test_video_param_.bit_depth; init_flags_ = VPX_CODEC_USE_PSNR; - if (cfg_.g_bit_depth > 8) - init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH; + if (cfg_.g_bit_depth > 8) init_flags_ |= VPX_CODEC_USE_HIGHBITDEPTH; - libvpx_test::VideoSource *video; + testing::internal::scoped_ptr video; if (is_extension_y4m(test_video_param_.filename)) { - video = new libvpx_test::Y4mVideoSource(test_video_param_.filename, - 0, kFrames); + video.reset(new libvpx_test::Y4mVideoSource(test_video_param_.filename, 0, + kFrames)); } else { - video = new libvpx_test::YUVVideoSource(test_video_param_.filename, - test_video_param_.fmt, - kWidth, kHeight, - kFramerate, 1, 0, kFrames); + video.reset(new libvpx_test::YUVVideoSource( + test_video_param_.filename, test_video_param_.fmt, kWidth, kHeight, + kFramerate, 1, 0, kFrames)); } + ASSERT_TRUE(video.get() != NULL); - ASSERT_NO_FATAL_FAILURE(RunLoop(video)); + ASSERT_NO_FATAL_FAILURE(RunLoop(video.get())); const double psnr = GetAveragePsnr(); EXPECT_GT(psnr, GetPsnrThreshold()); - delete(video); } -VP9_INSTANTIATE_TEST_CASE( - EndToEndTestLarge, - ::testing::ValuesIn(kEncodingModeVectors), - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kCpuUsedVectors)); - -#if CONFIG_VP9_HIGHBITDEPTH -# if CONFIG_VP10_ENCODER -// TODO(angiebird): many fail in high bitdepth mode. -INSTANTIATE_TEST_CASE_P( - DISABLED_VP10, EndToEndTestLarge, - ::testing::Combine( - ::testing::Values(static_cast( - &libvpx_test::kVP10)), - ::testing::ValuesIn(kEncodingModeVectors), - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kCpuUsedVectors))); -# endif // CONFIG_VP10_ENCODER -#else -VP10_INSTANTIATE_TEST_CASE( - EndToEndTestLarge, - ::testing::ValuesIn(kEncodingModeVectors), - ::testing::ValuesIn(kTestVectors), - ::testing::ValuesIn(kCpuUsedVectors)); -#endif // CONFIG_VP9_HIGHBITDEPTH +VP9_INSTANTIATE_TEST_CASE(EndToEndTestLarge, + ::testing::ValuesIn(kEncodingModeVectors), + ::testing::ValuesIn(kTestVectors), + ::testing::ValuesIn(kCpuUsedVectors)); } // namespace diff --git a/libs/libvpx/test/vp9_error_block_test.cc b/libs/libvpx/test/vp9_error_block_test.cc index 23a249e2b0..74436c09e7 100644 --- a/libs/libvpx/test/vp9_error_block_test.cc +++ b/libs/libvpx/test/vp9_error_block_test.cc @@ -32,20 +32,31 @@ const int kNumIterations = 1000; typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff, const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz, int bps); + intptr_t block_size, int64_t *ssz, int bps); typedef std::tr1::tuple - ErrorBlockParam; + ErrorBlockParam; -class ErrorBlockTest - : public ::testing::TestWithParam { +// wrapper for 8-bit block error functions without a 'bps' param. +typedef int64_t (*HighBdBlockError8bit)(const tran_low_t *coeff, + const tran_low_t *dqcoeff, + intptr_t block_size, int64_t *ssz); +template +int64_t HighBdBlockError8bitWrapper(const tran_low_t *coeff, + const tran_low_t *dqcoeff, + intptr_t block_size, int64_t *ssz, + int bps) { + EXPECT_EQ(8, bps); + return fn(coeff, dqcoeff, block_size, ssz); +} + +class ErrorBlockTest : public ::testing::TestWithParam { public: virtual ~ErrorBlockTest() {} virtual void SetUp() { - error_block_op_ = GET_PARAM(0); + error_block_op_ = GET_PARAM(0); ref_error_block_op_ = GET_PARAM(1); - bit_depth_ = GET_PARAM(2); + bit_depth_ = GET_PARAM(2); } virtual void TearDown() { libvpx_test::ClearSystemState(); } @@ -76,18 +87,18 @@ TEST_P(ErrorBlockTest, OperationCheck) { // can be used for optimization, so generate test input precisely. if (rnd(2)) { // Positive number - coeff[j] = rnd(1 << msb); + coeff[j] = rnd(1 << msb); dqcoeff[j] = rnd(1 << msb); } else { // Negative number - coeff[j] = -rnd(1 << msb); + coeff[j] = -rnd(1 << msb); dqcoeff[j] = -rnd(1 << msb); } } - ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, - bit_depth_); - ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size, - &ssz, bit_depth_)); + ref_ret = + ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_); + ASM_REGISTER_STATE_CHECK( + ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_)); err_count += (ref_ret != ret) | (ref_ssz != ssz); if (err_count && !err_count_total) { first_failure = i; @@ -117,35 +128,35 @@ TEST_P(ErrorBlockTest, ExtremeValues) { int k = (i / 9) % 9; // Change the maximum coeff value, to test different bit boundaries - if ( k == 8 && (i % 9) == 0 ) { + if (k == 8 && (i % 9) == 0) { max_val >>= 1; } block_size = 16 << (i % 9); // All block sizes from 4x4, 8x4 ..64x64 for (int j = 0; j < block_size; j++) { if (k < 4) { // Test at positive maximum values - coeff[j] = k % 2 ? max_val : 0; + coeff[j] = k % 2 ? max_val : 0; dqcoeff[j] = (k >> 1) % 2 ? max_val : 0; } else if (k < 8) { // Test at negative maximum values - coeff[j] = k % 2 ? -max_val : 0; + coeff[j] = k % 2 ? -max_val : 0; dqcoeff[j] = (k >> 1) % 2 ? -max_val : 0; } else { if (rnd(2)) { // Positive number - coeff[j] = rnd(1 << 14); + coeff[j] = rnd(1 << 14); dqcoeff[j] = rnd(1 << 14); } else { // Negative number - coeff[j] = -rnd(1 << 14); + coeff[j] = -rnd(1 << 14); dqcoeff[j] = -rnd(1 << 14); } } } - ref_ret = ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, - bit_depth_); - ASM_REGISTER_STATE_CHECK(ret = error_block_op_(coeff, dqcoeff, block_size, - &ssz, bit_depth_)); + ref_ret = + ref_error_block_op_(coeff, dqcoeff, block_size, &ref_ssz, bit_depth_); + ASM_REGISTER_STATE_CHECK( + ret = error_block_op_(coeff, dqcoeff, block_size, &ssz, bit_depth_)); err_count += (ref_ret != ret) | (ref_ssz != ssz); if (err_count && !err_count_total) { first_failure = i; @@ -159,53 +170,30 @@ TEST_P(ErrorBlockTest, ExtremeValues) { using std::tr1::make_tuple; -#if CONFIG_USE_X86INC -int64_t wrap_vp9_highbd_block_error_8bit_c(const tran_low_t *coeff, - const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz, int bps) { - EXPECT_EQ(8, bps); - return vp9_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz); -} - #if HAVE_SSE2 -int64_t wrap_vp9_highbd_block_error_8bit_sse2(const tran_low_t *coeff, - const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz, int bps) { - EXPECT_EQ(8, bps); - return vp9_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz); -} - INSTANTIATE_TEST_CASE_P( SSE2, ErrorBlockTest, ::testing::Values( - make_tuple(&vp9_highbd_block_error_sse2, - &vp9_highbd_block_error_c, VPX_BITS_10), - make_tuple(&vp9_highbd_block_error_sse2, - &vp9_highbd_block_error_c, VPX_BITS_12), - make_tuple(&vp9_highbd_block_error_sse2, - &vp9_highbd_block_error_c, VPX_BITS_8), - make_tuple(&wrap_vp9_highbd_block_error_8bit_sse2, - &wrap_vp9_highbd_block_error_8bit_c, VPX_BITS_8))); + make_tuple(&vp9_highbd_block_error_sse2, &vp9_highbd_block_error_c, + VPX_BITS_10), + make_tuple(&vp9_highbd_block_error_sse2, &vp9_highbd_block_error_c, + VPX_BITS_12), + make_tuple(&vp9_highbd_block_error_sse2, &vp9_highbd_block_error_c, + VPX_BITS_8), + make_tuple( + &HighBdBlockError8bitWrapper, + &HighBdBlockError8bitWrapper, + VPX_BITS_8))); #endif // HAVE_SSE2 #if HAVE_AVX -int64_t wrap_vp9_highbd_block_error_8bit_avx(const tran_low_t *coeff, - const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz, int bps) { - EXPECT_EQ(8, bps); - return vp9_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz); -} - INSTANTIATE_TEST_CASE_P( AVX, ErrorBlockTest, - ::testing::Values( - make_tuple(&wrap_vp9_highbd_block_error_8bit_avx, - &wrap_vp9_highbd_block_error_8bit_c, VPX_BITS_8))); + ::testing::Values(make_tuple( + &HighBdBlockError8bitWrapper, + &HighBdBlockError8bitWrapper, + VPX_BITS_8))); #endif // HAVE_AVX -#endif // CONFIG_USE_X86INC #endif // CONFIG_VP9_HIGHBITDEPTH } // namespace diff --git a/libs/libvpx/test/vp9_ethread_test.cc b/libs/libvpx/test/vp9_ethread_test.cc index 1e270e039b..0590487fc0 100644 --- a/libs/libvpx/test/vp9_ethread_test.cc +++ b/libs/libvpx/test/vp9_ethread_test.cc @@ -23,22 +23,12 @@ class VPxEncoderThreadTest public ::libvpx_test::CodecTestWith2Params { protected: VPxEncoderThreadTest() - : EncoderTest(GET_PARAM(0)), - encoder_initialized_(false), - tiles_(2), - encoding_mode_(GET_PARAM(1)), - set_cpu_used_(GET_PARAM(2)) { + : EncoderTest(GET_PARAM(0)), encoder_initialized_(false), tiles_(2), + encoding_mode_(GET_PARAM(1)), set_cpu_used_(GET_PARAM(2)) { init_flags_ = VPX_CODEC_USE_PSNR; - vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); - cfg.w = 1280; - cfg.h = 720; - decoder_ = codec_->CreateDecoder(cfg, 0); - md5_.clear(); } - virtual ~VPxEncoderThreadTest() { - delete decoder_; - } + virtual ~VPxEncoderThreadTest() {} virtual void SetUp() { InitializeConfig(); @@ -48,7 +38,7 @@ class VPxEncoderThreadTest cfg_.g_lag_in_frames = 3; cfg_.rc_end_usage = VPX_VBR; cfg_.rc_2pass_vbr_minsection_pct = 5; - cfg_.rc_2pass_vbr_minsection_pct = 2000; + cfg_.rc_2pass_vbr_maxsection_pct = 2000; } else { cfg_.g_lag_in_frames = 0; cfg_.rc_end_usage = VPX_CBR; @@ -81,27 +71,28 @@ class VPxEncoderThreadTest } } - virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) { - const vpx_codec_err_t res = decoder_->DecodeFrame( - reinterpret_cast(pkt->data.frame.buf), pkt->data.frame.sz); - if (res != VPX_CODEC_OK) { - abort_ = true; - ASSERT_EQ(VPX_CODEC_OK, res); - } - const vpx_image_t *img = decoder_->GetDxData().Next(); + virtual void DecompressedFrameHook(const vpx_image_t &img, + vpx_codec_pts_t /*pts*/) { + ::libvpx_test::MD5 md5_res; + md5_res.Add(&img); + md5_.push_back(md5_res.Get()); + } - if (img) { - ::libvpx_test::MD5 md5_res; - md5_res.Add(img); - md5_.push_back(md5_res.Get()); + virtual bool HandleDecodeResult(const vpx_codec_err_t res, + const libvpx_test::VideoSource & /*video*/, + libvpx_test::Decoder * /*decoder*/) { + if (res != VPX_CODEC_OK) { + EXPECT_EQ(VPX_CODEC_OK, res); + return false; } + + return true; } bool encoder_initialized_; int tiles_; ::libvpx_test::TestMode encoding_mode_; int set_cpu_used_; - ::libvpx_test::Decoder *decoder_; std::vector md5_; }; @@ -129,14 +120,9 @@ TEST_P(VPxEncoderThreadTest, EncoderResultTest) { ASSERT_EQ(single_thr_md5, multi_thr_md5); } -VP9_INSTANTIATE_TEST_CASE( - VPxEncoderThreadTest, - ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood, - ::libvpx_test::kRealTime), - ::testing::Range(1, 9)); - -VP10_INSTANTIATE_TEST_CASE( - VPxEncoderThreadTest, - ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood), - ::testing::Range(1, 3)); +VP9_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest, + ::testing::Values(::libvpx_test::kTwoPassGood, + ::libvpx_test::kOnePassGood, + ::libvpx_test::kRealTime), + ::testing::Range(1, 9)); } // namespace diff --git a/libs/libvpx/test/vp9_frame_parallel_test.cc b/libs/libvpx/test/vp9_frame_parallel_test.cc index f0df88afa9..670cd4d721 100644 --- a/libs/libvpx/test/vp9_frame_parallel_test.cc +++ b/libs/libvpx/test/vp9_frame_parallel_test.cc @@ -46,11 +46,11 @@ string DecodeFileWithPause(const string &filename, int num_threads, int in_frames = 0; int out_frames = 0; - vpx_codec_dec_cfg_t cfg = {0}; + vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); cfg.threads = num_threads; vpx_codec_flags_t flags = 0; flags |= VPX_CODEC_USE_FRAME_THREADING; - libvpx_test::VP9Decoder decoder(cfg, flags, 0); + libvpx_test::VP9Decoder decoder(cfg, flags); libvpx_test::MD5 md5; video.Begin(); @@ -74,8 +74,7 @@ string DecodeFileWithPause(const string &filename, int num_threads, } // Flush the decoder at the end of the video. - if (!video.cxdata()) - decoder.DecodeFrame(NULL, 0); + if (!video.cxdata()) decoder.DecodeFrame(NULL, 0); libvpx_test::DxDataIterator dec_iter = decoder.GetDxData(); const vpx_image_t *img; @@ -87,8 +86,8 @@ string DecodeFileWithPause(const string &filename, int num_threads, } } while (video.cxdata() != NULL); - EXPECT_EQ(in_frames, out_frames) << - "Input frame count does not match output frame count"; + EXPECT_EQ(in_frames, out_frames) + << "Input frame count does not match output frame count"; return string(md5.Get()); } @@ -108,12 +107,12 @@ TEST(VP9MultiThreadedFrameParallel, PauseSeekResume) { // vp90-2-07-frame_parallel-1.webm is a 40 frame video file with // one key frame for every ten frames. static const PauseFileList files[] = { - { "vp90-2-07-frame_parallel-1.webm", - "6ea7c3875d67252e7caf2bc6e75b36b1", 6 }, - { "vp90-2-07-frame_parallel-1.webm", - "4bb634160c7356a8d7d4299b6dc83a45", 12 }, - { "vp90-2-07-frame_parallel-1.webm", - "89772591e6ef461f9fa754f916c78ed8", 26 }, + { "vp90-2-07-frame_parallel-1.webm", "6ea7c3875d67252e7caf2bc6e75b36b1", + 6 }, + { "vp90-2-07-frame_parallel-1.webm", "4bb634160c7356a8d7d4299b6dc83a45", + 12 }, + { "vp90-2-07-frame_parallel-1.webm", "89772591e6ef461f9fa754f916c78ed8", + 26 }, { NULL, NULL, 0 }, }; DecodeFilesWithPause(files); @@ -137,7 +136,7 @@ string DecodeFile(const string &filename, int num_threads, vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); cfg.threads = num_threads; const vpx_codec_flags_t flags = VPX_CODEC_USE_FRAME_THREADING; - libvpx_test::VP9Decoder decoder(cfg, flags, 0); + libvpx_test::VP9Decoder decoder(cfg, flags); libvpx_test::MD5 md5; video.Begin(); @@ -155,8 +154,7 @@ string DecodeFile(const string &filename, int num_threads, video.Next(); // Flush the decoder at the end of the video. - if (!video.cxdata()) - decoder.DecodeFrame(NULL, 0); + if (!video.cxdata()) decoder.DecodeFrame(NULL, 0); libvpx_test::DxDataIterator dec_iter = decoder.GetDxData(); const vpx_image_t *img; @@ -168,8 +166,8 @@ string DecodeFile(const string &filename, int num_threads, } } while (video.cxdata() != NULL); - EXPECT_EQ(expected_frame_count, out_frames) << - "Input frame count does not match expected output frame count"; + EXPECT_EQ(expected_frame_count, out_frames) + << "Input frame count does not match expected output frame count"; return string(md5.Get()); } @@ -209,8 +207,7 @@ TEST(VP9MultiThreadedFrameParallel, InvalidFileTest) { TEST(VP9MultiThreadedFrameParallel, ValidFileTest) { static const FileList files[] = { #if CONFIG_VP9_HIGHBITDEPTH - { "vp92-2-20-10bit-yuv420.webm", - "a16b99df180c584e8db2ffeda987d293", 10 }, + { "vp92-2-20-10bit-yuv420.webm", "a16b99df180c584e8db2ffeda987d293", 10 }, #endif { NULL, NULL, 0 }, }; diff --git a/libs/libvpx/test/vp9_intrapred_test.cc b/libs/libvpx/test/vp9_intrapred_test.cc index 416f3c322e..1c4a3392e4 100644 --- a/libs/libvpx/test/vp9_intrapred_test.cc +++ b/libs/libvpx/test/vp9_intrapred_test.cc @@ -28,45 +28,42 @@ using libvpx_test::ACMRandom; const int count_test_block = 100000; -// Base class for VP9 intra prediction tests. -class VP9IntraPredBase { +typedef void (*IntraPred)(uint16_t *dst, ptrdiff_t stride, + const uint16_t *above, const uint16_t *left, int bps); + +struct IntraPredFunc { + IntraPredFunc(IntraPred pred = NULL, IntraPred ref = NULL, + int block_size_value = 0, int bit_depth_value = 0) + : pred_fn(pred), ref_fn(ref), block_size(block_size_value), + bit_depth(bit_depth_value) {} + + IntraPred pred_fn; + IntraPred ref_fn; + int block_size; + int bit_depth; +}; + +class VP9IntraPredTest : public ::testing::TestWithParam { public: - virtual ~VP9IntraPredBase() { libvpx_test::ClearSystemState(); } - - protected: - virtual void Predict() = 0; - - void CheckPrediction(int test_case_number, int *error_count) const { - // For each pixel ensure that the calculated value is the same as reference. - for (int y = 0; y < block_size_; y++) { - for (int x = 0; x < block_size_; x++) { - *error_count += ref_dst_[x + y * stride_] != dst_[x + y * stride_]; - if (*error_count == 1) { - ASSERT_EQ(ref_dst_[x + y * stride_], dst_[x + y * stride_]) - << " Failed on Test Case Number "<< test_case_number; - } - } - } - } - - void RunTest(uint16_t* left_col, uint16_t* above_data, - uint16_t* dst, uint16_t* ref_dst) { + void RunTest(uint16_t *left_col, uint16_t *above_data, uint16_t *dst, + uint16_t *ref_dst) { ACMRandom rnd(ACMRandom::DeterministicSeed()); + const int block_size = params_.block_size; + above_row_ = above_data + 16; left_col_ = left_col; dst_ = dst; ref_dst_ = ref_dst; - above_row_ = above_data + 16; int error_count = 0; for (int i = 0; i < count_test_block; ++i) { // Fill edges with random data, try first with saturated values. - for (int x = -1; x <= block_size_*2; x++) { + for (int x = -1; x <= block_size * 2; x++) { if (i == 0) { above_row_[x] = mask_; } else { above_row_[x] = rnd.Rand16() & mask_; } } - for (int y = 0; y < block_size_; y++) { + for (int y = 0; y < block_size; y++) { if (i == 0) { left_col_[y] = mask_; } else { @@ -79,153 +76,136 @@ class VP9IntraPredBase { ASSERT_EQ(0, error_count); } - int block_size_; + protected: + virtual void SetUp() { + params_ = GetParam(); + stride_ = params_.block_size * 3; + mask_ = (1 << params_.bit_depth) - 1; + } + + void Predict() { + const int bit_depth = params_.bit_depth; + params_.ref_fn(ref_dst_, stride_, above_row_, left_col_, bit_depth); + ASM_REGISTER_STATE_CHECK( + params_.pred_fn(dst_, stride_, above_row_, left_col_, bit_depth)); + } + + void CheckPrediction(int test_case_number, int *error_count) const { + // For each pixel ensure that the calculated value is the same as reference. + const int block_size = params_.block_size; + for (int y = 0; y < block_size; y++) { + for (int x = 0; x < block_size; x++) { + *error_count += ref_dst_[x + y * stride_] != dst_[x + y * stride_]; + if (*error_count == 1) { + ASSERT_EQ(ref_dst_[x + y * stride_], dst_[x + y * stride_]) + << " Failed on Test Case Number " << test_case_number; + } + } + } + } + uint16_t *above_row_; uint16_t *left_col_; uint16_t *dst_; uint16_t *ref_dst_; ptrdiff_t stride_; int mask_; -}; -typedef void (*intra_pred_fn_t)( - uint16_t *dst, ptrdiff_t stride, const uint16_t *above, - const uint16_t *left, int bps); -typedef std::tr1::tuple intra_pred_params_t; -class VP9IntraPredTest - : public VP9IntraPredBase, - public ::testing::TestWithParam { - - virtual void SetUp() { - pred_fn_ = GET_PARAM(0); - ref_fn_ = GET_PARAM(1); - block_size_ = GET_PARAM(2); - bit_depth_ = GET_PARAM(3); - stride_ = block_size_ * 3; - mask_ = (1 << bit_depth_) - 1; - } - - virtual void Predict() { - const uint16_t *const_above_row = above_row_; - const uint16_t *const_left_col = left_col_; - ref_fn_(ref_dst_, stride_, const_above_row, const_left_col, bit_depth_); - ASM_REGISTER_STATE_CHECK(pred_fn_(dst_, stride_, const_above_row, - const_left_col, bit_depth_)); - } - intra_pred_fn_t pred_fn_; - intra_pred_fn_t ref_fn_; - int bit_depth_; + IntraPredFunc params_; }; TEST_P(VP9IntraPredTest, IntraPredTests) { // max block size is 32 - DECLARE_ALIGNED(16, uint16_t, left_col[2*32]); - DECLARE_ALIGNED(16, uint16_t, above_data[2*32+32]); + DECLARE_ALIGNED(16, uint16_t, left_col[2 * 32]); + DECLARE_ALIGNED(16, uint16_t, above_data[2 * 32 + 32]); DECLARE_ALIGNED(16, uint16_t, dst[3 * 32 * 32]); DECLARE_ALIGNED(16, uint16_t, ref_dst[3 * 32 * 32]); RunTest(left_col, above_data, dst, ref_dst); } -using std::tr1::make_tuple; - #if HAVE_SSE2 #if CONFIG_VP9_HIGHBITDEPTH -#if CONFIG_USE_X86INC -INSTANTIATE_TEST_CASE_P(SSE2_TO_C_8, VP9IntraPredTest, - ::testing::Values( - make_tuple(&vpx_highbd_dc_predictor_32x32_sse2, - &vpx_highbd_dc_predictor_32x32_c, 32, 8), - make_tuple(&vpx_highbd_tm_predictor_16x16_sse2, - &vpx_highbd_tm_predictor_16x16_c, 16, 8), - make_tuple(&vpx_highbd_tm_predictor_32x32_sse2, - &vpx_highbd_tm_predictor_32x32_c, 32, 8), - make_tuple(&vpx_highbd_dc_predictor_4x4_sse2, - &vpx_highbd_dc_predictor_4x4_c, 4, 8), - make_tuple(&vpx_highbd_dc_predictor_8x8_sse2, - &vpx_highbd_dc_predictor_8x8_c, 8, 8), - make_tuple(&vpx_highbd_dc_predictor_16x16_sse2, - &vpx_highbd_dc_predictor_16x16_c, 16, 8), - make_tuple(&vpx_highbd_v_predictor_4x4_sse2, - &vpx_highbd_v_predictor_4x4_c, 4, 8), - make_tuple(&vpx_highbd_v_predictor_8x8_sse2, - &vpx_highbd_v_predictor_8x8_c, 8, 8), - make_tuple(&vpx_highbd_v_predictor_16x16_sse2, - &vpx_highbd_v_predictor_16x16_c, 16, 8), - make_tuple(&vpx_highbd_v_predictor_32x32_sse2, - &vpx_highbd_v_predictor_32x32_c, 32, 8), - make_tuple(&vpx_highbd_tm_predictor_4x4_sse2, - &vpx_highbd_tm_predictor_4x4_c, 4, 8), - make_tuple(&vpx_highbd_tm_predictor_8x8_sse2, - &vpx_highbd_tm_predictor_8x8_c, 8, 8))); +INSTANTIATE_TEST_CASE_P( + SSE2_TO_C_8, VP9IntraPredTest, + ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2, + &vpx_highbd_dc_predictor_32x32_c, 32, 8), + IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2, + &vpx_highbd_tm_predictor_16x16_c, 16, 8), + IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2, + &vpx_highbd_tm_predictor_32x32_c, 32, 8), + IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2, + &vpx_highbd_dc_predictor_4x4_c, 4, 8), + IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2, + &vpx_highbd_dc_predictor_8x8_c, 8, 8), + IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2, + &vpx_highbd_dc_predictor_16x16_c, 16, 8), + IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2, + &vpx_highbd_v_predictor_4x4_c, 4, 8), + IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2, + &vpx_highbd_v_predictor_8x8_c, 8, 8), + IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2, + &vpx_highbd_v_predictor_16x16_c, 16, 8), + IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2, + &vpx_highbd_v_predictor_32x32_c, 32, 8), + IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2, + &vpx_highbd_tm_predictor_4x4_c, 4, 8), + IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2, + &vpx_highbd_tm_predictor_8x8_c, 8, 8))); -INSTANTIATE_TEST_CASE_P(SSE2_TO_C_10, VP9IntraPredTest, - ::testing::Values( - make_tuple(&vpx_highbd_dc_predictor_32x32_sse2, - &vpx_highbd_dc_predictor_32x32_c, 32, - 10), - make_tuple(&vpx_highbd_tm_predictor_16x16_sse2, - &vpx_highbd_tm_predictor_16x16_c, 16, - 10), - make_tuple(&vpx_highbd_tm_predictor_32x32_sse2, - &vpx_highbd_tm_predictor_32x32_c, 32, - 10), - make_tuple(&vpx_highbd_dc_predictor_4x4_sse2, - &vpx_highbd_dc_predictor_4x4_c, 4, 10), - make_tuple(&vpx_highbd_dc_predictor_8x8_sse2, - &vpx_highbd_dc_predictor_8x8_c, 8, 10), - make_tuple(&vpx_highbd_dc_predictor_16x16_sse2, - &vpx_highbd_dc_predictor_16x16_c, 16, - 10), - make_tuple(&vpx_highbd_v_predictor_4x4_sse2, - &vpx_highbd_v_predictor_4x4_c, 4, 10), - make_tuple(&vpx_highbd_v_predictor_8x8_sse2, - &vpx_highbd_v_predictor_8x8_c, 8, 10), - make_tuple(&vpx_highbd_v_predictor_16x16_sse2, - &vpx_highbd_v_predictor_16x16_c, 16, - 10), - make_tuple(&vpx_highbd_v_predictor_32x32_sse2, - &vpx_highbd_v_predictor_32x32_c, 32, - 10), - make_tuple(&vpx_highbd_tm_predictor_4x4_sse2, - &vpx_highbd_tm_predictor_4x4_c, 4, 10), - make_tuple(&vpx_highbd_tm_predictor_8x8_sse2, - &vpx_highbd_tm_predictor_8x8_c, 8, 10))); +INSTANTIATE_TEST_CASE_P( + SSE2_TO_C_10, VP9IntraPredTest, + ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2, + &vpx_highbd_dc_predictor_32x32_c, 32, 10), + IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2, + &vpx_highbd_tm_predictor_16x16_c, 16, 10), + IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2, + &vpx_highbd_tm_predictor_32x32_c, 32, 10), + IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2, + &vpx_highbd_dc_predictor_4x4_c, 4, 10), + IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2, + &vpx_highbd_dc_predictor_8x8_c, 8, 10), + IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2, + &vpx_highbd_dc_predictor_16x16_c, 16, 10), + IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2, + &vpx_highbd_v_predictor_4x4_c, 4, 10), + IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2, + &vpx_highbd_v_predictor_8x8_c, 8, 10), + IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2, + &vpx_highbd_v_predictor_16x16_c, 16, 10), + IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2, + &vpx_highbd_v_predictor_32x32_c, 32, 10), + IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2, + &vpx_highbd_tm_predictor_4x4_c, 4, 10), + IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2, + &vpx_highbd_tm_predictor_8x8_c, 8, 10))); -INSTANTIATE_TEST_CASE_P(SSE2_TO_C_12, VP9IntraPredTest, - ::testing::Values( - make_tuple(&vpx_highbd_dc_predictor_32x32_sse2, - &vpx_highbd_dc_predictor_32x32_c, 32, - 12), - make_tuple(&vpx_highbd_tm_predictor_16x16_sse2, - &vpx_highbd_tm_predictor_16x16_c, 16, - 12), - make_tuple(&vpx_highbd_tm_predictor_32x32_sse2, - &vpx_highbd_tm_predictor_32x32_c, 32, - 12), - make_tuple(&vpx_highbd_dc_predictor_4x4_sse2, - &vpx_highbd_dc_predictor_4x4_c, 4, 12), - make_tuple(&vpx_highbd_dc_predictor_8x8_sse2, - &vpx_highbd_dc_predictor_8x8_c, 8, 12), - make_tuple(&vpx_highbd_dc_predictor_16x16_sse2, - &vpx_highbd_dc_predictor_16x16_c, 16, - 12), - make_tuple(&vpx_highbd_v_predictor_4x4_sse2, - &vpx_highbd_v_predictor_4x4_c, 4, 12), - make_tuple(&vpx_highbd_v_predictor_8x8_sse2, - &vpx_highbd_v_predictor_8x8_c, 8, 12), - make_tuple(&vpx_highbd_v_predictor_16x16_sse2, - &vpx_highbd_v_predictor_16x16_c, 16, - 12), - make_tuple(&vpx_highbd_v_predictor_32x32_sse2, - &vpx_highbd_v_predictor_32x32_c, 32, - 12), - make_tuple(&vpx_highbd_tm_predictor_4x4_sse2, - &vpx_highbd_tm_predictor_4x4_c, 4, 12), - make_tuple(&vpx_highbd_tm_predictor_8x8_sse2, - &vpx_highbd_tm_predictor_8x8_c, 8, 12))); +INSTANTIATE_TEST_CASE_P( + SSE2_TO_C_12, VP9IntraPredTest, + ::testing::Values(IntraPredFunc(&vpx_highbd_dc_predictor_32x32_sse2, + &vpx_highbd_dc_predictor_32x32_c, 32, 12), + IntraPredFunc(&vpx_highbd_tm_predictor_16x16_sse2, + &vpx_highbd_tm_predictor_16x16_c, 16, 12), + IntraPredFunc(&vpx_highbd_tm_predictor_32x32_sse2, + &vpx_highbd_tm_predictor_32x32_c, 32, 12), + IntraPredFunc(&vpx_highbd_dc_predictor_4x4_sse2, + &vpx_highbd_dc_predictor_4x4_c, 4, 12), + IntraPredFunc(&vpx_highbd_dc_predictor_8x8_sse2, + &vpx_highbd_dc_predictor_8x8_c, 8, 12), + IntraPredFunc(&vpx_highbd_dc_predictor_16x16_sse2, + &vpx_highbd_dc_predictor_16x16_c, 16, 12), + IntraPredFunc(&vpx_highbd_v_predictor_4x4_sse2, + &vpx_highbd_v_predictor_4x4_c, 4, 12), + IntraPredFunc(&vpx_highbd_v_predictor_8x8_sse2, + &vpx_highbd_v_predictor_8x8_c, 8, 12), + IntraPredFunc(&vpx_highbd_v_predictor_16x16_sse2, + &vpx_highbd_v_predictor_16x16_c, 16, 12), + IntraPredFunc(&vpx_highbd_v_predictor_32x32_sse2, + &vpx_highbd_v_predictor_32x32_c, 32, 12), + IntraPredFunc(&vpx_highbd_tm_predictor_4x4_sse2, + &vpx_highbd_tm_predictor_4x4_c, 4, 12), + IntraPredFunc(&vpx_highbd_tm_predictor_8x8_sse2, + &vpx_highbd_tm_predictor_8x8_c, 8, 12))); -#endif // CONFIG_USE_X86INC #endif // CONFIG_VP9_HIGHBITDEPTH #endif // HAVE_SSE2 } // namespace diff --git a/libs/libvpx/test/vp9_lossless_test.cc b/libs/libvpx/test/vp9_lossless_test.cc index 09c1070c6b..703b55e9bd 100644 --- a/libs/libvpx/test/vp9_lossless_test.cc +++ b/libs/libvpx/test/vp9_lossless_test.cc @@ -21,15 +21,13 @@ namespace { const int kMaxPsnr = 100; -class LosslessTest : public ::libvpx_test::EncoderTest, - public ::libvpx_test::CodecTestWithParam { +class LosslessTest + : public ::libvpx_test::EncoderTest, + public ::libvpx_test::CodecTestWithParam { protected: LosslessTest() - : EncoderTest(GET_PARAM(0)), - psnr_(kMaxPsnr), - nframes_(0), - encoding_mode_(GET_PARAM(1)) { - } + : EncoderTest(GET_PARAM(0)), psnr_(kMaxPsnr), nframes_(0), + encoding_mode_(GET_PARAM(1)) {} virtual ~LosslessTest() {} @@ -55,13 +53,10 @@ class LosslessTest : public ::libvpx_test::EncoderTest, } virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) { - if (pkt->data.psnr.psnr[0] < psnr_) - psnr_= pkt->data.psnr.psnr[0]; + if (pkt->data.psnr.psnr[0] < psnr_) psnr_ = pkt->data.psnr.psnr[0]; } - double GetMinPsnr() const { - return psnr_; - } + double GetMinPsnr() const { return psnr_; } private: double psnr_; @@ -127,8 +122,4 @@ VP9_INSTANTIATE_TEST_CASE(LosslessTest, ::testing::Values(::libvpx_test::kRealTime, ::libvpx_test::kOnePassGood, ::libvpx_test::kTwoPassGood)); - -VP10_INSTANTIATE_TEST_CASE(LosslessTest, - ::testing::Values(::libvpx_test::kOnePassGood, - ::libvpx_test::kTwoPassGood)); } // namespace diff --git a/libs/libvpx/test/vp9_quantize_test.cc b/libs/libvpx/test/vp9_quantize_test.cc index 81d31fd1b2..4643895021 100644 --- a/libs/libvpx/test/vp9_quantize_test.cc +++ b/libs/libvpx/test/vp9_quantize_test.cc @@ -34,9 +34,8 @@ const int number_of_iterations = 100; typedef void (*QuantizeFunc)(const tran_low_t *coeff, intptr_t count, int skip_block, const int16_t *zbin, const int16_t *round, const int16_t *quant, - const int16_t *quant_shift, - tran_low_t *qcoeff, tran_low_t *dqcoeff, - const int16_t *dequant, + const int16_t *quant_shift, tran_low_t *qcoeff, + tran_low_t *dqcoeff, const int16_t *dequant, uint16_t *eob, const int16_t *scan, const int16_t *iscan); typedef std::tr1::tuple @@ -46,9 +45,9 @@ class VP9QuantizeTest : public ::testing::TestWithParam { public: virtual ~VP9QuantizeTest() {} virtual void SetUp() { - quantize_op_ = GET_PARAM(0); + quantize_op_ = GET_PARAM(0); ref_quantize_op_ = GET_PARAM(1); - bit_depth_ = GET_PARAM(2); + bit_depth_ = GET_PARAM(2); mask_ = (1 << bit_depth_) - 1; } @@ -65,9 +64,9 @@ class VP9Quantize32Test : public ::testing::TestWithParam { public: virtual ~VP9Quantize32Test() {} virtual void SetUp() { - quantize_op_ = GET_PARAM(0); + quantize_op_ = GET_PARAM(0); ref_quantize_op_ = GET_PARAM(1); - bit_depth_ = GET_PARAM(2); + bit_depth_ = GET_PARAM(2); mask_ = (1 << bit_depth_) - 1; } @@ -106,10 +105,10 @@ TEST_P(VP9QuantizeTest, OperationCheck) { *eob_ptr = rnd.Rand16(); *ref_eob_ptr = *eob_ptr; for (int j = 0; j < count; j++) { - coeff_ptr[j] = rnd.Rand16()&mask_; + coeff_ptr[j] = rnd.Rand16() & mask_; } for (int j = 0; j < 2; j++) { - zbin_ptr[j] = rnd.Rand16()&mask_; + zbin_ptr[j] = rnd.Rand16() & mask_; round_ptr[j] = rnd.Rand16(); quant_ptr[j] = rnd.Rand16(); quant_shift_ptr[j] = rnd.Rand16(); @@ -117,16 +116,15 @@ TEST_P(VP9QuantizeTest, OperationCheck) { } ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, quant_shift_ptr, ref_qcoeff_ptr, - ref_dqcoeff_ptr, dequant_ptr, - ref_eob_ptr, scan_order->scan, scan_order->iscan); - ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block, - zbin_ptr, round_ptr, quant_ptr, - quant_shift_ptr, qcoeff_ptr, - dqcoeff_ptr, dequant_ptr, eob_ptr, - scan_order->scan, scan_order->iscan)); + ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr, + scan_order->scan, scan_order->iscan); + ASM_REGISTER_STATE_CHECK(quantize_op_( + coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, + quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, + scan_order->scan, scan_order->iscan)); for (int j = 0; j < sz; ++j) { - err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | - (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); + err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | + (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); } err_count += (*ref_eob_ptr != *eob_ptr); if (err_count && !err_count_total) { @@ -165,10 +163,10 @@ TEST_P(VP9Quantize32Test, OperationCheck) { *eob_ptr = rnd.Rand16(); *ref_eob_ptr = *eob_ptr; for (int j = 0; j < count; j++) { - coeff_ptr[j] = rnd.Rand16()&mask_; + coeff_ptr[j] = rnd.Rand16() & mask_; } for (int j = 0; j < 2; j++) { - zbin_ptr[j] = rnd.Rand16()&mask_; + zbin_ptr[j] = rnd.Rand16() & mask_; round_ptr[j] = rnd.Rand16(); quant_ptr[j] = rnd.Rand16(); quant_shift_ptr[j] = rnd.Rand16(); @@ -176,16 +174,15 @@ TEST_P(VP9Quantize32Test, OperationCheck) { } ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, quant_shift_ptr, ref_qcoeff_ptr, - ref_dqcoeff_ptr, dequant_ptr, - ref_eob_ptr, scan_order->scan, scan_order->iscan); - ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block, - zbin_ptr, round_ptr, quant_ptr, - quant_shift_ptr, qcoeff_ptr, - dqcoeff_ptr, dequant_ptr, eob_ptr, - scan_order->scan, scan_order->iscan)); + ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr, + scan_order->scan, scan_order->iscan); + ASM_REGISTER_STATE_CHECK(quantize_op_( + coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, + quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, + scan_order->scan, scan_order->iscan)); for (int j = 0; j < sz; ++j) { - err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | - (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); + err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | + (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); } err_count += (*ref_eob_ptr != *eob_ptr); if (err_count && !err_count_total) { @@ -227,10 +224,10 @@ TEST_P(VP9QuantizeTest, EOBCheck) { for (int j = 0; j < count; j++) { coeff_ptr[j] = 0; } - coeff_ptr[rnd(count)] = rnd.Rand16()&mask_; - coeff_ptr[rnd(count)] = rnd.Rand16()&mask_; + coeff_ptr[rnd(count)] = rnd.Rand16() & mask_; + coeff_ptr[rnd(count)] = rnd.Rand16() & mask_; for (int j = 0; j < 2; j++) { - zbin_ptr[j] = rnd.Rand16()&mask_; + zbin_ptr[j] = rnd.Rand16() & mask_; round_ptr[j] = rnd.Rand16(); quant_ptr[j] = rnd.Rand16(); quant_shift_ptr[j] = rnd.Rand16(); @@ -239,17 +236,16 @@ TEST_P(VP9QuantizeTest, EOBCheck) { ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, quant_shift_ptr, ref_qcoeff_ptr, - ref_dqcoeff_ptr, dequant_ptr, - ref_eob_ptr, scan_order->scan, scan_order->iscan); - ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block, - zbin_ptr, round_ptr, quant_ptr, - quant_shift_ptr, qcoeff_ptr, - dqcoeff_ptr, dequant_ptr, eob_ptr, - scan_order->scan, scan_order->iscan)); + ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr, + scan_order->scan, scan_order->iscan); + ASM_REGISTER_STATE_CHECK(quantize_op_( + coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, + quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, + scan_order->scan, scan_order->iscan)); for (int j = 0; j < sz; ++j) { - err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | - (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); + err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | + (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); } err_count += (*ref_eob_ptr != *eob_ptr); if (err_count && !err_count_total) { @@ -291,10 +287,10 @@ TEST_P(VP9Quantize32Test, EOBCheck) { coeff_ptr[j] = 0; } // Two random entries - coeff_ptr[rnd(count)] = rnd.Rand16()&mask_; - coeff_ptr[rnd(count)] = rnd.Rand16()&mask_; + coeff_ptr[rnd(count)] = rnd.Rand16() & mask_; + coeff_ptr[rnd(count)] = rnd.Rand16() & mask_; for (int j = 0; j < 2; j++) { - zbin_ptr[j] = rnd.Rand16()&mask_; + zbin_ptr[j] = rnd.Rand16() & mask_; round_ptr[j] = rnd.Rand16(); quant_ptr[j] = rnd.Rand16(); quant_shift_ptr[j] = rnd.Rand16(); @@ -303,17 +299,16 @@ TEST_P(VP9Quantize32Test, EOBCheck) { ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, quant_shift_ptr, ref_qcoeff_ptr, - ref_dqcoeff_ptr, dequant_ptr, - ref_eob_ptr, scan_order->scan, scan_order->iscan); - ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block, - zbin_ptr, round_ptr, quant_ptr, - quant_shift_ptr, qcoeff_ptr, - dqcoeff_ptr, dequant_ptr, eob_ptr, - scan_order->scan, scan_order->iscan)); + ref_dqcoeff_ptr, dequant_ptr, ref_eob_ptr, + scan_order->scan, scan_order->iscan); + ASM_REGISTER_STATE_CHECK(quantize_op_( + coeff_ptr, count, skip_block, zbin_ptr, round_ptr, quant_ptr, + quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, dequant_ptr, eob_ptr, + scan_order->scan, scan_order->iscan)); for (int j = 0; j < sz; ++j) { - err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | - (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); + err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) | + (ref_dqcoeff_ptr[j] != dqcoeff_ptr[j]); } err_count += (*ref_eob_ptr != *eob_ptr); if (err_count && !err_count_total) { @@ -330,22 +325,20 @@ using std::tr1::make_tuple; #if HAVE_SSE2 INSTANTIATE_TEST_CASE_P( SSE2, VP9QuantizeTest, - ::testing::Values( - make_tuple(&vpx_highbd_quantize_b_sse2, - &vpx_highbd_quantize_b_c, VPX_BITS_8), - make_tuple(&vpx_highbd_quantize_b_sse2, - &vpx_highbd_quantize_b_c, VPX_BITS_10), - make_tuple(&vpx_highbd_quantize_b_sse2, - &vpx_highbd_quantize_b_c, VPX_BITS_12))); + ::testing::Values(make_tuple(&vpx_highbd_quantize_b_sse2, + &vpx_highbd_quantize_b_c, VPX_BITS_8), + make_tuple(&vpx_highbd_quantize_b_sse2, + &vpx_highbd_quantize_b_c, VPX_BITS_10), + make_tuple(&vpx_highbd_quantize_b_sse2, + &vpx_highbd_quantize_b_c, VPX_BITS_12))); INSTANTIATE_TEST_CASE_P( SSE2, VP9Quantize32Test, - ::testing::Values( - make_tuple(&vpx_highbd_quantize_b_32x32_sse2, - &vpx_highbd_quantize_b_32x32_c, VPX_BITS_8), - make_tuple(&vpx_highbd_quantize_b_32x32_sse2, - &vpx_highbd_quantize_b_32x32_c, VPX_BITS_10), - make_tuple(&vpx_highbd_quantize_b_32x32_sse2, - &vpx_highbd_quantize_b_32x32_c, VPX_BITS_12))); + ::testing::Values(make_tuple(&vpx_highbd_quantize_b_32x32_sse2, + &vpx_highbd_quantize_b_32x32_c, VPX_BITS_8), + make_tuple(&vpx_highbd_quantize_b_32x32_sse2, + &vpx_highbd_quantize_b_32x32_c, VPX_BITS_10), + make_tuple(&vpx_highbd_quantize_b_32x32_sse2, + &vpx_highbd_quantize_b_32x32_c, VPX_BITS_12))); #endif // HAVE_SSE2 #endif // CONFIG_VP9_HIGHBITDEPTH } // namespace diff --git a/libs/libvpx/test/vp9_skip_loopfilter_test.cc b/libs/libvpx/test/vp9_skip_loopfilter_test.cc index b0cc7ba41c..e847bbddf3 100644 --- a/libs/libvpx/test/vp9_skip_loopfilter_test.cc +++ b/libs/libvpx/test/vp9_skip_loopfilter_test.cc @@ -24,14 +24,10 @@ const char kVp9Md5File[] = "vp90-2-08-tile_1x8_frame_parallel.webm.md5"; // Class for testing shutting off the loop filter. class SkipLoopFilterTest { public: - SkipLoopFilterTest() - : video_(NULL), - decoder_(NULL), - md5_file_(NULL) {} + SkipLoopFilterTest() : video_(NULL), decoder_(NULL), md5_file_(NULL) {} ~SkipLoopFilterTest() { - if (md5_file_ != NULL) - fclose(md5_file_); + if (md5_file_ != NULL) fclose(md5_file_); delete decoder_; delete video_; } @@ -46,8 +42,7 @@ class SkipLoopFilterTest { video_->Begin(); vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t(); - if (num_threads > 0) - cfg.threads = num_threads; + if (num_threads > 0) cfg.threads = num_threads; decoder_ = new libvpx_test::VP9Decoder(cfg, 0); ASSERT_TRUE(decoder_ != NULL); @@ -73,8 +68,7 @@ class SkipLoopFilterTest { for (; video_->cxdata() != NULL; video_->Next()) { const vpx_codec_err_t res = decoder_->DecodeFrame(video_->cxdata(), video_->frame_size()); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; ReadMd5(); } return VPX_CODEC_OK; @@ -92,7 +86,7 @@ class SkipLoopFilterTest { void OpenMd5File(const std::string &md5_file_name) { md5_file_ = libvpx_test::OpenTestDataFile(md5_file_name); ASSERT_TRUE(md5_file_ != NULL) << "MD5 file open failed. Filename: " - << md5_file_name; + << md5_file_name; } // Reads the next line of the MD5 file. diff --git a/libs/libvpx/test/vp9_subtract_test.cc b/libs/libvpx/test/vp9_subtract_test.cc index 3cad4d7e6d..19ed304315 100644 --- a/libs/libvpx/test/vp9_subtract_test.cc +++ b/libs/libvpx/test/vp9_subtract_test.cc @@ -19,18 +19,16 @@ #include "vp9/common/vp9_blockd.h" #include "vpx_mem/vpx_mem.h" -typedef void (*SubtractFunc)(int rows, int cols, - int16_t *diff_ptr, ptrdiff_t diff_stride, - const uint8_t *src_ptr, ptrdiff_t src_stride, - const uint8_t *pred_ptr, ptrdiff_t pred_stride); +typedef void (*SubtractFunc)(int rows, int cols, int16_t *diff_ptr, + ptrdiff_t diff_stride, const uint8_t *src_ptr, + ptrdiff_t src_stride, const uint8_t *pred_ptr, + ptrdiff_t pred_stride); namespace vp9 { class VP9SubtractBlockTest : public ::testing::TestWithParam { public: - virtual void TearDown() { - libvpx_test::ClearSystemState(); - } + virtual void TearDown() { libvpx_test::ClearSystemState(); } }; using libvpx_test::ACMRandom; @@ -47,7 +45,7 @@ TEST_P(VP9SubtractBlockTest, SimpleSubtract) { vpx_memalign(16, sizeof(*diff) * block_width * block_height * 2)); uint8_t *pred = reinterpret_cast( vpx_memalign(16, block_width * block_height * 2)); - uint8_t *src = reinterpret_cast( + uint8_t *src = reinterpret_cast( vpx_memalign(16, block_width * block_height * 2)); for (int n = 0; n < 100; n++) { @@ -58,29 +56,26 @@ TEST_P(VP9SubtractBlockTest, SimpleSubtract) { } } - GetParam()(block_height, block_width, diff, block_width, - src, block_width, pred, block_width); + GetParam()(block_height, block_width, diff, block_width, src, block_width, + pred, block_width); for (int r = 0; r < block_height; ++r) { for (int c = 0; c < block_width; ++c) { EXPECT_EQ(diff[r * block_width + c], - (src[r * block_width + c] - - pred[r * block_width + c])) << "r = " << r - << ", c = " << c - << ", bs = " << bsize; + (src[r * block_width + c] - pred[r * block_width + c])) + << "r = " << r << ", c = " << c << ", bs = " << bsize; } } - GetParam()(block_height, block_width, diff, block_width * 2, - src, block_width * 2, pred, block_width * 2); + GetParam()(block_height, block_width, diff, block_width * 2, src, + block_width * 2, pred, block_width * 2); for (int r = 0; r < block_height; ++r) { for (int c = 0; c < block_width; ++c) { - EXPECT_EQ(diff[r * block_width * 2 + c], - (src[r * block_width * 2 + c] - - pred[r * block_width * 2 + c])) << "r = " << r - << ", c = " << c - << ", bs = " << bsize; + EXPECT_EQ( + diff[r * block_width * 2 + c], + (src[r * block_width * 2 + c] - pred[r * block_width * 2 + c])) + << "r = " << r << ", c = " << c << ", bs = " << bsize; } } } @@ -93,7 +88,7 @@ TEST_P(VP9SubtractBlockTest, SimpleSubtract) { INSTANTIATE_TEST_CASE_P(C, VP9SubtractBlockTest, ::testing::Values(vpx_subtract_block_c)); -#if HAVE_SSE2 && CONFIG_USE_X86INC +#if HAVE_SSE2 INSTANTIATE_TEST_CASE_P(SSE2, VP9SubtractBlockTest, ::testing::Values(vpx_subtract_block_sse2)); #endif diff --git a/libs/libvpx/test/vp9_thread_test.cc b/libs/libvpx/test/vp9_thread_test.cc index 92e4b9688b..3e3fd25acb 100644 --- a/libs/libvpx/test/vp9_thread_test.cc +++ b/libs/libvpx/test/vp9_thread_test.cc @@ -27,15 +27,11 @@ using std::string; class VPxWorkerThreadTest : public ::testing::TestWithParam { protected: virtual ~VPxWorkerThreadTest() {} - virtual void SetUp() { - vpx_get_worker_interface()->init(&worker_); - } + virtual void SetUp() { vpx_get_worker_interface()->init(&worker_); } - virtual void TearDown() { - vpx_get_worker_interface()->end(&worker_); - } + virtual void TearDown() { vpx_get_worker_interface()->end(&worker_); } - void Run(VPxWorker* worker) { + void Run(VPxWorker *worker) { const bool synchronous = GetParam(); if (synchronous) { vpx_get_worker_interface()->execute(worker); @@ -47,10 +43,10 @@ class VPxWorkerThreadTest : public ::testing::TestWithParam { VPxWorker worker_; }; -int ThreadHook(void* data, void* return_value) { - int* const hook_data = reinterpret_cast(data); +int ThreadHook(void *data, void *return_value) { + int *const hook_data = reinterpret_cast(data); *hook_data = 5; - return *reinterpret_cast(return_value); + return *reinterpret_cast(return_value); } TEST_P(VPxWorkerThreadTest, HookSuccess) { @@ -159,7 +155,7 @@ struct FileList { }; // Decodes |filename| with |num_threads|. Returns the md5 of the decoded frames. -string DecodeFile(const string& filename, int num_threads) { +string DecodeFile(const string &filename, int num_threads) { libvpx_test::WebMVideoSource video(filename); video.Init(); @@ -191,8 +187,8 @@ void DecodeFiles(const FileList files[]) { for (const FileList *iter = files; iter->name != NULL; ++iter) { SCOPED_TRACE(iter->name); for (int t = 1; t <= 8; ++t) { - EXPECT_EQ(iter->expected_md5, DecodeFile(iter->name, t)) - << "threads = " << t; + EXPECT_EQ(iter->expected_md5, DecodeFile(iter->name, t)) << "threads = " + << t; } } } @@ -242,15 +238,13 @@ TEST(VP9DecodeMultiThreadedTest, NoTilesNonFrameParallel) { } TEST(VP9DecodeMultiThreadedTest, FrameParallel) { - static const FileList files[] = { - { "vp90-2-08-tile_1x2_frame_parallel.webm", - "68ede6abd66bae0a2edf2eb9232241b6" }, - { "vp90-2-08-tile_1x4_frame_parallel.webm", - "368ebc6ebf3a5e478d85b2c3149b2848" }, - { "vp90-2-08-tile_1x8_frame_parallel.webm", - "17e439da2388aff3a0f69cb22579c6c1" }, - { NULL, NULL } - }; + static const FileList files[] = { { "vp90-2-08-tile_1x2_frame_parallel.webm", + "68ede6abd66bae0a2edf2eb9232241b6" }, + { "vp90-2-08-tile_1x4_frame_parallel.webm", + "368ebc6ebf3a5e478d85b2c3149b2848" }, + { "vp90-2-08-tile_1x8_frame_parallel.webm", + "17e439da2388aff3a0f69cb22579c6c1" }, + { NULL, NULL } }; DecodeFiles(files); } diff --git a/libs/libvpx/test/vpx_scale_test.cc b/libs/libvpx/test/vpx_scale_test.cc index ef716fc80f..81773fe5b6 100644 --- a/libs/libvpx/test/vpx_scale_test.cc +++ b/libs/libvpx/test/vpx_scale_test.cc @@ -25,9 +25,7 @@ typedef void (*CopyFrameFunc)(const YV12_BUFFER_CONFIG *src_ybf, class VpxScaleBase { public: - virtual ~VpxScaleBase() { - libvpx_test::ClearSystemState(); - } + virtual ~VpxScaleBase() { libvpx_test::ClearSystemState(); } void ResetImage(int width, int height) { width_ = width; @@ -105,28 +103,22 @@ class VpxScaleBase { } uint8_t *bottom = left + (crop_height * stride); - for (int y = 0; y < bottom_extend; ++y) { + for (int y = 0; y < bottom_extend; ++y) { memcpy(bottom, left + (crop_height - 1) * stride, extend_width); bottom += stride; } } void ReferenceExtendBorder() { - ExtendPlane(ref_img_.y_buffer, - ref_img_.y_crop_width, ref_img_.y_crop_height, - ref_img_.y_width, ref_img_.y_height, - ref_img_.y_stride, - ref_img_.border); - ExtendPlane(ref_img_.u_buffer, - ref_img_.uv_crop_width, ref_img_.uv_crop_height, - ref_img_.uv_width, ref_img_.uv_height, - ref_img_.uv_stride, - ref_img_.border / 2); - ExtendPlane(ref_img_.v_buffer, - ref_img_.uv_crop_width, ref_img_.uv_crop_height, - ref_img_.uv_width, ref_img_.uv_height, - ref_img_.uv_stride, - ref_img_.border / 2); + ExtendPlane(ref_img_.y_buffer, ref_img_.y_crop_width, + ref_img_.y_crop_height, ref_img_.y_width, ref_img_.y_height, + ref_img_.y_stride, ref_img_.border); + ExtendPlane(ref_img_.u_buffer, ref_img_.uv_crop_width, + ref_img_.uv_crop_height, ref_img_.uv_width, ref_img_.uv_height, + ref_img_.uv_stride, ref_img_.border / 2); + ExtendPlane(ref_img_.v_buffer, ref_img_.uv_crop_width, + ref_img_.uv_crop_height, ref_img_.uv_width, ref_img_.uv_height, + ref_img_.uv_stride, ref_img_.border / 2); } void ReferenceCopyFrame() { @@ -172,13 +164,9 @@ class ExtendBorderTest virtual ~ExtendBorderTest() {} protected: - virtual void SetUp() { - extend_fn_ = GetParam(); - } + virtual void SetUp() { extend_fn_ = GetParam(); } - void ExtendBorder() { - ASM_REGISTER_STATE_CHECK(extend_fn_(&img_)); - } + void ExtendBorder() { ASM_REGISTER_STATE_CHECK(extend_fn_(&img_)); } void RunTest() { #if ARCH_ARM @@ -187,7 +175,7 @@ class ExtendBorderTest #else static const int kNumSizesToTest = 7; #endif - static const int kSizesToTest[] = {1, 15, 33, 145, 512, 1025, 16383}; + static const int kSizesToTest[] = { 1, 15, 33, 145, 512, 1025, 16383 }; for (int h = 0; h < kNumSizesToTest; ++h) { for (int w = 0; w < kNumSizesToTest; ++w) { ResetImage(kSizesToTest[w], kSizesToTest[h]); @@ -202,23 +190,18 @@ class ExtendBorderTest ExtendFrameBorderFunc extend_fn_; }; -TEST_P(ExtendBorderTest, ExtendBorder) { - ASSERT_NO_FATAL_FAILURE(RunTest()); -} +TEST_P(ExtendBorderTest, ExtendBorder) { ASSERT_NO_FATAL_FAILURE(RunTest()); } INSTANTIATE_TEST_CASE_P(C, ExtendBorderTest, ::testing::Values(vp8_yv12_extend_frame_borders_c)); -class CopyFrameTest - : public VpxScaleBase, - public ::testing::TestWithParam { +class CopyFrameTest : public VpxScaleBase, + public ::testing::TestWithParam { public: virtual ~CopyFrameTest() {} protected: - virtual void SetUp() { - copy_frame_fn_ = GetParam(); - } + virtual void SetUp() { copy_frame_fn_ = GetParam(); } void CopyFrame() { ASM_REGISTER_STATE_CHECK(copy_frame_fn_(&img_, &cpy_img_)); @@ -231,7 +214,7 @@ class CopyFrameTest #else static const int kNumSizesToTest = 7; #endif - static const int kSizesToTest[] = {1, 15, 33, 145, 512, 1025, 16383}; + static const int kSizesToTest[] = { 1, 15, 33, 145, 512, 1025, 16383 }; for (int h = 0; h < kNumSizesToTest; ++h) { for (int w = 0; w < kNumSizesToTest; ++w) { ResetImage(kSizesToTest[w], kSizesToTest[h]); @@ -246,9 +229,7 @@ class CopyFrameTest CopyFrameFunc copy_frame_fn_; }; -TEST_P(CopyFrameTest, CopyFrame) { - ASSERT_NO_FATAL_FAILURE(RunTest()); -} +TEST_P(CopyFrameTest, CopyFrame) { ASSERT_NO_FATAL_FAILURE(RunTest()); } INSTANTIATE_TEST_CASE_P(C, CopyFrameTest, ::testing::Values(vp8_yv12_copy_frame_c)); diff --git a/libs/libvpx/test/webm_video_source.h b/libs/libvpx/test/webm_video_source.h index 650bc52dce..53713618ee 100644 --- a/libs/libvpx/test/webm_video_source.h +++ b/libs/libvpx/test/webm_video_source.h @@ -25,30 +25,23 @@ namespace libvpx_test { class WebMVideoSource : public CompressedVideoSource { public: explicit WebMVideoSource(const std::string &file_name) - : file_name_(file_name), - vpx_ctx_(new VpxInputContext()), - webm_ctx_(new WebmInputContext()), - buf_(NULL), - buf_sz_(0), - frame_(0), - end_of_file_(false) { - } + : file_name_(file_name), vpx_ctx_(new VpxInputContext()), + webm_ctx_(new WebmInputContext()), buf_(NULL), buf_sz_(0), frame_(0), + end_of_file_(false) {} virtual ~WebMVideoSource() { - if (vpx_ctx_->file != NULL) - fclose(vpx_ctx_->file); + if (vpx_ctx_->file != NULL) fclose(vpx_ctx_->file); webm_free(webm_ctx_); delete vpx_ctx_; delete webm_ctx_; } - virtual void Init() { - } + virtual void Init() {} virtual void Begin() { vpx_ctx_->file = OpenTestDataFile(file_name_); ASSERT_TRUE(vpx_ctx_->file != NULL) << "Input file open failed. Filename: " - << file_name_; + << file_name_; ASSERT_EQ(file_is_webm(webm_ctx_, vpx_ctx_), 1) << "file is not WebM"; @@ -62,7 +55,7 @@ class WebMVideoSource : public CompressedVideoSource { void FillFrame() { ASSERT_TRUE(vpx_ctx_->file != NULL); - const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_, &buf_sz_); + const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_); ASSERT_GE(status, 0) << "webm_read_frame failed"; if (status == 1) { end_of_file_ = true; @@ -72,7 +65,7 @@ class WebMVideoSource : public CompressedVideoSource { void SeekToNextKeyFrame() { ASSERT_TRUE(vpx_ctx_->file != NULL); do { - const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_, &buf_sz_); + const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_); ASSERT_GE(status, 0) << "webm_read_frame failed"; ++frame_; if (status == 1) { @@ -81,9 +74,7 @@ class WebMVideoSource : public CompressedVideoSource { } while (!webm_ctx_->is_key_frame && !end_of_file_); } - virtual const uint8_t *cxdata() const { - return end_of_file_ ? NULL : buf_; - } + virtual const uint8_t *cxdata() const { return end_of_file_ ? NULL : buf_; } virtual size_t frame_size() const { return buf_sz_; } virtual unsigned int frame_number() const { return frame_; } diff --git a/libs/libvpx/test/y4m_test.cc b/libs/libvpx/test/y4m_test.cc index a5553292c0..ced717a7c1 100644 --- a/libs/libvpx/test/y4m_test.cc +++ b/libs/libvpx/test/y4m_test.cc @@ -22,7 +22,7 @@ namespace { using std::string; -static const unsigned int kWidth = 160; +static const unsigned int kWidth = 160; static const unsigned int kHeight = 90; static const unsigned int kFrames = 10; @@ -34,24 +34,24 @@ struct Y4mTestParam { }; const Y4mTestParam kY4mTestVectors[] = { - {"park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, - "e5406275b9fc6bb3436c31d4a05c1cab"}, - {"park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, - "284a47a47133b12884ec3a14e959a0b6"}, - {"park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, - "90517ff33843d85de712fd4fe60dbed0"}, - {"park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, - "63f21f9f717d8b8631bd2288ee87137b"}, - {"park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, - "48ab51fb540aed07f7ff5af130c9b605"}, - {"park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, - "067bfd75aa85ff9bae91fa3e0edd1e3e"}, - {"park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, - "9e6d8f6508c6e55625f6b697bc461cef"}, - {"park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, - "b239c6b301c0b835485be349ca83a7e3"}, - {"park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, - "5a6481a550821dab6d0192f5c63845e9"}, + { "park_joy_90p_8_420.y4m", 8, VPX_IMG_FMT_I420, + "e5406275b9fc6bb3436c31d4a05c1cab" }, + { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, + "284a47a47133b12884ec3a14e959a0b6" }, + { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, + "90517ff33843d85de712fd4fe60dbed0" }, + { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, + "63f21f9f717d8b8631bd2288ee87137b" }, + { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, + "48ab51fb540aed07f7ff5af130c9b605" }, + { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, + "067bfd75aa85ff9bae91fa3e0edd1e3e" }, + { "park_joy_90p_12_420.y4m", 12, VPX_IMG_FMT_I42016, + "9e6d8f6508c6e55625f6b697bc461cef" }, + { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, + "b239c6b301c0b835485be349ca83a7e3" }, + { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, + "5a6481a550821dab6d0192f5c63845e9" }, }; static void write_image_file(const vpx_image_t *img, FILE *file) { @@ -60,10 +60,12 @@ static void write_image_file(const vpx_image_t *img, FILE *file) { const unsigned char *buf = img->planes[plane]; const int stride = img->stride[plane]; const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1; - const int h = (plane ? (img->d_h + img->y_chroma_shift) >> - img->y_chroma_shift : img->d_h); - const int w = (plane ? (img->d_w + img->x_chroma_shift) >> - img->x_chroma_shift : img->d_w); + const int h = + (plane ? (img->d_h + img->y_chroma_shift) >> img->y_chroma_shift + : img->d_h); + const int w = + (plane ? (img->d_w + img->x_chroma_shift) >> img->x_chroma_shift + : img->d_w); for (y = 0; y < h; ++y) { fwrite(buf, bytes_per_sample, w, file); buf += stride; @@ -71,15 +73,12 @@ static void write_image_file(const vpx_image_t *img, FILE *file) { } } -class Y4mVideoSourceTest - : public ::testing::TestWithParam, - public ::libvpx_test::Y4mVideoSource { +class Y4mVideoSourceTest : public ::testing::TestWithParam, + public ::libvpx_test::Y4mVideoSource { protected: Y4mVideoSourceTest() : Y4mVideoSource("", 0, 0) {} - virtual ~Y4mVideoSourceTest() { - CloseSource(); - } + virtual ~Y4mVideoSourceTest() { CloseSource(); } virtual void Init(const std::string &file_name, int limit) { file_name_ = file_name; @@ -137,10 +136,9 @@ TEST_P(Y4mVideoSourceTest, SourceTest) { INSTANTIATE_TEST_CASE_P(C, Y4mVideoSourceTest, ::testing::ValuesIn(kY4mTestVectors)); -class Y4mVideoWriteTest - : public Y4mVideoSourceTest { +class Y4mVideoWriteTest : public Y4mVideoSourceTest { protected: - Y4mVideoWriteTest() {} + Y4mVideoWriteTest() : tmpfile_(NULL) {} virtual ~Y4mVideoWriteTest() { delete tmpfile_; @@ -158,14 +156,12 @@ class Y4mVideoWriteTest // Writes out a y4m file and then reads it back void WriteY4mAndReadBack() { ASSERT_TRUE(input_file_ != NULL); - char buf[Y4M_BUFFER_SIZE] = {0}; - const struct VpxRational framerate = {y4m_.fps_n, y4m_.fps_d}; + char buf[Y4M_BUFFER_SIZE] = { 0 }; + const struct VpxRational framerate = { y4m_.fps_n, y4m_.fps_d }; tmpfile_ = new libvpx_test::TempOutFile; ASSERT_TRUE(tmpfile_->file() != NULL); - y4m_write_file_header(buf, sizeof(buf), - kWidth, kHeight, - &framerate, y4m_.vpx_fmt, - y4m_.bit_depth); + y4m_write_file_header(buf, sizeof(buf), kWidth, kHeight, &framerate, + y4m_.vpx_fmt, y4m_.bit_depth); fputs(buf, tmpfile_->file()); for (unsigned int i = start_; i < limit_; i++) { y4m_write_frame_header(buf, sizeof(buf)); diff --git a/libs/libvpx/test/y4m_video_source.h b/libs/libvpx/test/y4m_video_source.h index 03d9388db8..2682ddde3d 100644 --- a/libs/libvpx/test/y4m_video_source.h +++ b/libs/libvpx/test/y4m_video_source.h @@ -21,18 +21,10 @@ namespace libvpx_test { // so that we can do actual file encodes. class Y4mVideoSource : public VideoSource { public: - Y4mVideoSource(const std::string &file_name, - unsigned int start, int limit) - : file_name_(file_name), - input_file_(NULL), - img_(new vpx_image_t()), - start_(start), - limit_(limit), - frame_(0), - framerate_numerator_(0), - framerate_denominator_(0), - y4m_() { - } + Y4mVideoSource(const std::string &file_name, unsigned int start, int limit) + : file_name_(file_name), input_file_(NULL), img_(new vpx_image_t()), + start_(start), limit_(limit), frame_(0), framerate_numerator_(0), + framerate_denominator_(0), y4m_() {} virtual ~Y4mVideoSource() { vpx_img_free(img_.get()); diff --git a/libs/libvpx/test/yuv_video_source.h b/libs/libvpx/test/yuv_video_source.h index 3c852b2426..71ad2ab9a6 100644 --- a/libs/libvpx/test/yuv_video_source.h +++ b/libs/libvpx/test/yuv_video_source.h @@ -25,19 +25,11 @@ namespace libvpx_test { class YUVVideoSource : public VideoSource { public: YUVVideoSource(const std::string &file_name, vpx_img_fmt format, - unsigned int width, unsigned int height, - int rate_numerator, int rate_denominator, - unsigned int start, int limit) - : file_name_(file_name), - input_file_(NULL), - img_(NULL), - start_(start), - limit_(limit), - frame_(0), - width_(0), - height_(0), - format_(VPX_IMG_FMT_NONE), - framerate_numerator_(rate_numerator), + unsigned int width, unsigned int height, int rate_numerator, + int rate_denominator, unsigned int start, int limit) + : file_name_(file_name), input_file_(NULL), img_(NULL), start_(start), + limit_(limit), frame_(0), width_(0), height_(0), + format_(VPX_IMG_FMT_NONE), framerate_numerator_(rate_numerator), framerate_denominator_(rate_denominator) { // This initializes format_, raw_size_, width_, height_ and allocates img. SetSize(width, height, format); @@ -45,18 +37,17 @@ class YUVVideoSource : public VideoSource { virtual ~YUVVideoSource() { vpx_img_free(img_); - if (input_file_) - fclose(input_file_); + if (input_file_) fclose(input_file_); } virtual void Begin() { - if (input_file_) - fclose(input_file_); + if (input_file_) fclose(input_file_); input_file_ = OpenTestDataFile(file_name_); ASSERT_TRUE(input_file_ != NULL) << "Input file open failed. Filename: " << file_name_; - if (start_) + if (start_) { fseek(input_file_, static_cast(raw_size_) * start_, SEEK_SET); + } frame_ = start_; FillFrame(); @@ -67,7 +58,7 @@ class YUVVideoSource : public VideoSource { FillFrame(); } - virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; } + virtual vpx_image_t *img() const { return (frame_ < limit_) ? img_ : NULL; } // Models a stream where Timebase = 1/FPS, so pts == frame. virtual vpx_codec_pts_t pts() const { return frame_; } @@ -93,32 +84,15 @@ class YUVVideoSource : public VideoSource { height_ = height; format_ = format; switch (format) { - case VPX_IMG_FMT_I420: - raw_size_ = width * height * 3 / 2; - break; - case VPX_IMG_FMT_I422: - raw_size_ = width * height * 2; - break; - case VPX_IMG_FMT_I440: - raw_size_ = width * height * 2; - break; - case VPX_IMG_FMT_I444: - raw_size_ = width * height * 3; - break; - case VPX_IMG_FMT_I42016: - raw_size_ = width * height * 3; - break; - case VPX_IMG_FMT_I42216: - raw_size_ = width * height * 4; - break; - case VPX_IMG_FMT_I44016: - raw_size_ = width * height * 4; - break; - case VPX_IMG_FMT_I44416: - raw_size_ = width * height * 6; - break; - default: - ASSERT_TRUE(0); + case VPX_IMG_FMT_I420: raw_size_ = width * height * 3 / 2; break; + case VPX_IMG_FMT_I422: raw_size_ = width * height * 2; break; + case VPX_IMG_FMT_I440: raw_size_ = width * height * 2; break; + case VPX_IMG_FMT_I444: raw_size_ = width * height * 3; break; + case VPX_IMG_FMT_I42016: raw_size_ = width * height * 3; break; + case VPX_IMG_FMT_I42216: raw_size_ = width * height * 4; break; + case VPX_IMG_FMT_I44016: raw_size_ = width * height * 4; break; + case VPX_IMG_FMT_I44416: raw_size_ = width * height * 6; break; + default: ASSERT_TRUE(0); } } } diff --git a/libs/libvpx/third_party/googletest/README.libvpx b/libs/libvpx/third_party/googletest/README.libvpx index 7201a67d3d..0e3b8b9377 100644 --- a/libs/libvpx/third_party/googletest/README.libvpx +++ b/libs/libvpx/third_party/googletest/README.libvpx @@ -12,4 +12,10 @@ failures, various options for running the tests, and XML test report generation. Local Modifications: -Removed unused declarations of kPathSeparatorString to have warning free build. \ No newline at end of file +- Removed unused declarations of kPathSeparatorString to have warning + free build. +- Added GTEST_ATTRIBUTE_UNUSED_ to test registering dummies in TEST_P + and INSTANTIATE_TEST_CASE_P to remove warnings about unused variables + under GCC 5. +- Only define g_in_fast_death_test_child for non-Windows builds; quiets an + unused variable warning. diff --git a/libs/libvpx/third_party/googletest/src/include/gtest/gtest.h b/libs/libvpx/third_party/googletest/src/include/gtest/gtest.h index 4f3804f703..581a44e95f 100644 --- a/libs/libvpx/third_party/googletest/src/include/gtest/gtest.h +++ b/libs/libvpx/third_party/googletest/src/include/gtest/gtest.h @@ -16960,7 +16960,7 @@ internal::CartesianProductHolder10()); \ return 0; \ } \ - static int gtest_registering_dummy_; \ + static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ GTEST_DISALLOW_COPY_AND_ASSIGN_(\ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ }; \ @@ -16972,7 +16972,7 @@ internal::CartesianProductHolder10 \ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ - int gtest_##prefix##test_case_name##_dummy_ = \ + int gtest_##prefix##test_case_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ GetTestCasePatternHolder(\ #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ diff --git a/libs/libvpx/third_party/googletest/src/src/gtest-all.cc b/libs/libvpx/third_party/googletest/src/src/gtest-all.cc index 8d906279ab..912868148e 100644 --- a/libs/libvpx/third_party/googletest/src/src/gtest-all.cc +++ b/libs/libvpx/third_party/googletest/src/src/gtest-all.cc @@ -6612,9 +6612,11 @@ GTEST_DEFINE_string_( namespace internal { +# if !GTEST_OS_WINDOWS // Valid only for fast death tests. Indicates the code is running in the // child process of a fast style death test. static bool g_in_fast_death_test_child = false; +# endif // !GTEST_OS_WINDOWS // Returns a Boolean value indicating whether the caller is currently // executing in the context of the death test child process. Tools such as diff --git a/libs/libvpx/third_party/libwebm/Android.mk b/libs/libvpx/third_party/libwebm/Android.mk index be9d77deed..8149a083f4 100644 --- a/libs/libvpx/third_party/libwebm/Android.mk +++ b/libs/libvpx/third_party/libwebm/Android.mk @@ -2,9 +2,16 @@ LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE:= libwebm -LOCAL_SRC_FILES:= mkvparser.cpp \ - mkvreader.cpp \ - mkvmuxer.cpp \ - mkvmuxerutil.cpp \ - mkvwriter.cpp +LOCAL_CPPFLAGS:=-D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS +LOCAL_CPPFLAGS+=-D__STDC_LIMIT_MACROS -Wno-extern-c-compat +LOCAL_C_INCLUDES:= $(LOCAL_PATH) +LOCAL_EXPORT_C_INCLUDES:= $(LOCAL_PATH) + +LOCAL_SRC_FILES:= common/file_util.cc \ + common/hdr_util.cc \ + mkvparser/mkvparser.cc \ + mkvparser/mkvreader.cc \ + mkvmuxer/mkvmuxer.cc \ + mkvmuxer/mkvmuxerutil.cc \ + mkvmuxer/mkvwriter.cc include $(BUILD_STATIC_LIBRARY) diff --git a/libs/libvpx/third_party/libwebm/README.libvpx b/libs/libvpx/third_party/libwebm/README.libvpx index 2989d3d89a..73f8303222 100644 --- a/libs/libvpx/third_party/libwebm/README.libvpx +++ b/libs/libvpx/third_party/libwebm/README.libvpx @@ -1,5 +1,5 @@ URL: https://chromium.googlesource.com/webm/libwebm -Version: 476366249e1fda7710a389cd41c57db42305e0d4 +Version: 32d5ac49414a8914ec1e1f285f3f927c6e8ec29d License: BSD License File: LICENSE.txt diff --git a/libs/libvpx/third_party/libwebm/RELEASE.TXT b/libs/libvpx/third_party/libwebm/RELEASE.TXT deleted file mode 100644 index db1b77117c..0000000000 --- a/libs/libvpx/third_party/libwebm/RELEASE.TXT +++ /dev/null @@ -1,34 +0,0 @@ -1.0.0.5 - * Handled case when no duration - * Handled empty clusters - * Handled empty clusters when seeking - * Implemented check lacing bits - -1.0.0.4 - * Made Cues member variables mutables - * Defined against badly-formatted cue points - * Segment::GetCluster returns CuePoint too - * Separated cue-based searches - -1.0.0.3 - * Added Block::GetOffset() to get a frame's offset in a block - * Changed cluster count type from size_t to long - * Parsed SeekHead to find cues - * Allowed seeking beyond end of cluster cache - * Added not to attempt to reparse cues element - * Restructured Segment::LoadCluster - * Marked position of cues without parsing cues element - * Allowed cue points to be loaded incrementally - * Implemented to load lazily cue points as they're searched - * Merged Cues::LoadCuePoint into Cues::Find - * Lazy init cues - * Loaded cue point during find - -1.0.0.2 - * added support for Cues element - * seeking was improved - -1.0.0.1 - * fixed item 141 - * added item 142 - * added this file, RELEASE.TXT, to repository diff --git a/libs/libvpx/third_party/libwebm/common/file_util.cc b/libs/libvpx/third_party/libwebm/common/file_util.cc new file mode 100644 index 0000000000..4f91318f3e --- /dev/null +++ b/libs/libvpx/third_party/libwebm/common/file_util.cc @@ -0,0 +1,67 @@ +// Copyright (c) 2016 The WebM project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +#include "common/file_util.h" + +#include +#ifndef _MSC_VER +#include // close() +#endif + +#include +#include +#include +#include + +namespace libwebm { + +std::string GetTempFileName() { +#if !defined _MSC_VER && !defined __MINGW32__ + char temp_file_name_template[] = "libwebm_temp.XXXXXX"; + int fd = mkstemp(temp_file_name_template); + if (fd != -1) { + close(fd); + return std::string(temp_file_name_template); + } + return std::string(); +#else + char tmp_file_name[_MAX_PATH]; + errno_t err = tmpnam_s(tmp_file_name); + if (err == 0) { + return std::string(tmp_file_name); + } + return std::string(); +#endif +} + +uint64_t GetFileSize(const std::string& file_name) { + uint64_t file_size = 0; +#ifndef _MSC_VER + struct stat st; + st.st_size = 0; + if (stat(file_name.c_str(), &st) == 0) { +#else + struct _stat st; + st.st_size = 0; + if (_stat(file_name.c_str(), &st) == 0) { +#endif + file_size = st.st_size; + } + return file_size; +} + +TempFileDeleter::TempFileDeleter() { file_name_ = GetTempFileName(); } + +TempFileDeleter::~TempFileDeleter() { + std::ifstream file(file_name_.c_str()); + if (file.good()) { + file.close(); + std::remove(file_name_.c_str()); + } +} + +} // namespace libwebm diff --git a/libs/libvpx/third_party/libwebm/common/file_util.h b/libs/libvpx/third_party/libwebm/common/file_util.h new file mode 100644 index 0000000000..0e71eac11e --- /dev/null +++ b/libs/libvpx/third_party/libwebm/common/file_util.h @@ -0,0 +1,41 @@ +// Copyright (c) 2016 The WebM project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +#ifndef LIBWEBM_COMMON_FILE_UTIL_H_ +#define LIBWEBM_COMMON_FILE_UTIL_H_ + +#include + +#include + +#include "mkvmuxer/mkvmuxertypes.h" // LIBWEBM_DISALLOW_COPY_AND_ASSIGN() + +namespace libwebm { + +// Returns a temporary file name. +std::string GetTempFileName(); + +// Returns size of file specified by |file_name|, or 0 upon failure. +uint64_t GetFileSize(const std::string& file_name); + +// Manages life of temporary file specified at time of construction. Deletes +// file upon destruction. +class TempFileDeleter { + public: + TempFileDeleter(); + explicit TempFileDeleter(std::string file_name) : file_name_(file_name) {} + ~TempFileDeleter(); + const std::string& name() const { return file_name_; } + + private: + std::string file_name_; + LIBWEBM_DISALLOW_COPY_AND_ASSIGN(TempFileDeleter); +}; + +} // namespace libwebm + +#endif // LIBWEBM_COMMON_FILE_UTIL_H_ \ No newline at end of file diff --git a/libs/libvpx/third_party/libwebm/common/hdr_util.cc b/libs/libvpx/third_party/libwebm/common/hdr_util.cc new file mode 100644 index 0000000000..e1a9842fb6 --- /dev/null +++ b/libs/libvpx/third_party/libwebm/common/hdr_util.cc @@ -0,0 +1,182 @@ +// Copyright (c) 2016 The WebM project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +#include "hdr_util.h" + +#include +#include + +#include "mkvparser/mkvparser.h" + +namespace libwebm { +bool CopyPrimaryChromaticity(const mkvparser::PrimaryChromaticity& parser_pc, + PrimaryChromaticityPtr* muxer_pc) { + muxer_pc->reset(new (std::nothrow) + mkvmuxer::PrimaryChromaticity(parser_pc.x, parser_pc.y)); + if (!muxer_pc->get()) + return false; + return true; +} + +bool MasteringMetadataValuePresent(double value) { + return value != mkvparser::MasteringMetadata::kValueNotPresent; +} + +bool CopyMasteringMetadata(const mkvparser::MasteringMetadata& parser_mm, + mkvmuxer::MasteringMetadata* muxer_mm) { + if (MasteringMetadataValuePresent(parser_mm.luminance_max)) + muxer_mm->luminance_max = parser_mm.luminance_max; + if (MasteringMetadataValuePresent(parser_mm.luminance_min)) + muxer_mm->luminance_min = parser_mm.luminance_min; + + PrimaryChromaticityPtr r_ptr(NULL); + PrimaryChromaticityPtr g_ptr(NULL); + PrimaryChromaticityPtr b_ptr(NULL); + PrimaryChromaticityPtr wp_ptr(NULL); + + if (parser_mm.r) { + if (!CopyPrimaryChromaticity(*parser_mm.r, &r_ptr)) + return false; + } + if (parser_mm.g) { + if (!CopyPrimaryChromaticity(*parser_mm.g, &g_ptr)) + return false; + } + if (parser_mm.b) { + if (!CopyPrimaryChromaticity(*parser_mm.b, &b_ptr)) + return false; + } + if (parser_mm.white_point) { + if (!CopyPrimaryChromaticity(*parser_mm.white_point, &wp_ptr)) + return false; + } + + if (!muxer_mm->SetChromaticity(r_ptr.get(), g_ptr.get(), b_ptr.get(), + wp_ptr.get())) { + return false; + } + + return true; +} + +bool ColourValuePresent(long long value) { + return value != mkvparser::Colour::kValueNotPresent; +} + +bool CopyColour(const mkvparser::Colour& parser_colour, + mkvmuxer::Colour* muxer_colour) { + if (!muxer_colour) + return false; + + if (ColourValuePresent(parser_colour.matrix_coefficients)) + muxer_colour->matrix_coefficients = parser_colour.matrix_coefficients; + if (ColourValuePresent(parser_colour.bits_per_channel)) + muxer_colour->bits_per_channel = parser_colour.bits_per_channel; + if (ColourValuePresent(parser_colour.chroma_subsampling_horz)) + muxer_colour->chroma_subsampling_horz = + parser_colour.chroma_subsampling_horz; + if (ColourValuePresent(parser_colour.chroma_subsampling_vert)) + muxer_colour->chroma_subsampling_vert = + parser_colour.chroma_subsampling_vert; + if (ColourValuePresent(parser_colour.cb_subsampling_horz)) + muxer_colour->cb_subsampling_horz = parser_colour.cb_subsampling_horz; + if (ColourValuePresent(parser_colour.cb_subsampling_vert)) + muxer_colour->cb_subsampling_vert = parser_colour.cb_subsampling_vert; + if (ColourValuePresent(parser_colour.chroma_siting_horz)) + muxer_colour->chroma_siting_horz = parser_colour.chroma_siting_horz; + if (ColourValuePresent(parser_colour.chroma_siting_vert)) + muxer_colour->chroma_siting_vert = parser_colour.chroma_siting_vert; + if (ColourValuePresent(parser_colour.range)) + muxer_colour->range = parser_colour.range; + if (ColourValuePresent(parser_colour.transfer_characteristics)) + muxer_colour->transfer_characteristics = + parser_colour.transfer_characteristics; + if (ColourValuePresent(parser_colour.primaries)) + muxer_colour->primaries = parser_colour.primaries; + if (ColourValuePresent(parser_colour.max_cll)) + muxer_colour->max_cll = parser_colour.max_cll; + if (ColourValuePresent(parser_colour.max_fall)) + muxer_colour->max_fall = parser_colour.max_fall; + + if (parser_colour.mastering_metadata) { + mkvmuxer::MasteringMetadata muxer_mm; + if (!CopyMasteringMetadata(*parser_colour.mastering_metadata, &muxer_mm)) + return false; + if (!muxer_colour->SetMasteringMetadata(muxer_mm)) + return false; + } + return true; +} + +// Format of VPx private data: +// +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ID Byte | Length | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | +// | | +// : Bytes 1..Length of Codec Feature : +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// ID Byte Format +// ID byte is an unsigned byte. +// 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+ +// |X| ID | +// +-+-+-+-+-+-+-+-+ +// +// The X bit is reserved. +// +// Currently only profile level is supported. ID byte must be set to 1, and +// length must be 1. Supported values are: +// +// 10: Level 1 +// 11: Level 1.1 +// 20: Level 2 +// 21: Level 2.1 +// 30: Level 3 +// 31: Level 3.1 +// 40: Level 4 +// 41: Level 4.1 +// 50: Level 5 +// 51: Level 5.1 +// 52: Level 5.2 +// 60: Level 6 +// 61: Level 6.1 +// 62: Level 6.2 +// +// See the following link for more information: +// http://www.webmproject.org/vp9/profiles/ +int ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length) { + const int kVpxCodecPrivateLength = 3; + if (!private_data || length != kVpxCodecPrivateLength) + return 0; + + const uint8_t id_byte = *private_data; + if (id_byte != 1) + return 0; + + const int kVpxProfileLength = 1; + const uint8_t length_byte = private_data[1]; + if (length_byte != kVpxProfileLength) + return 0; + + const int level = static_cast(private_data[2]); + + const int kNumLevels = 14; + const int levels[kNumLevels] = {10, 11, 20, 21, 30, 31, 40, + 41, 50, 51, 52, 60, 61, 62}; + + for (int i = 0; i < kNumLevels; ++i) { + if (level == levels[i]) + return level; + } + + return 0; +} +} // namespace libwebm diff --git a/libs/libvpx/third_party/libwebm/common/hdr_util.h b/libs/libvpx/third_party/libwebm/common/hdr_util.h new file mode 100644 index 0000000000..d30c2b9f2a --- /dev/null +++ b/libs/libvpx/third_party/libwebm/common/hdr_util.h @@ -0,0 +1,51 @@ +// Copyright (c) 2016 The WebM project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +#ifndef LIBWEBM_COMMON_HDR_UTIL_H_ +#define LIBWEBM_COMMON_HDR_UTIL_H_ + +#include + +#include + +#include "mkvmuxer/mkvmuxer.h" + +namespace mkvparser { +struct Colour; +struct MasteringMetadata; +struct PrimaryChromaticity; +} // namespace mkvparser + +namespace libwebm { +// Utility types and functions for working with the Colour element and its +// children. Copiers return true upon success. Presence functions return true +// when the specified element is present. + +// TODO(tomfinegan): These should be moved to libwebm_utils once c++11 is +// required by libwebm. + +typedef std::auto_ptr PrimaryChromaticityPtr; + +bool CopyPrimaryChromaticity(const mkvparser::PrimaryChromaticity& parser_pc, + PrimaryChromaticityPtr* muxer_pc); + +bool MasteringMetadataValuePresent(double value); + +bool CopyMasteringMetadata(const mkvparser::MasteringMetadata& parser_mm, + mkvmuxer::MasteringMetadata* muxer_mm); + +bool ColourValuePresent(long long value); + +bool CopyColour(const mkvparser::Colour& parser_colour, + mkvmuxer::Colour* muxer_colour); + +// Returns VP9 profile upon success or 0 upon failure. +int ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length); + +} // namespace libwebm + +#endif // LIBWEBM_COMMON_HDR_UTIL_H_ diff --git a/libs/libvpx/third_party/libwebm/webmids.hpp b/libs/libvpx/third_party/libwebm/common/webmids.h similarity index 79% rename from libs/libvpx/third_party/libwebm/webmids.hpp rename to libs/libvpx/third_party/libwebm/common/webmids.h index ad4ab57388..32a0c5fb91 100644 --- a/libs/libvpx/third_party/libwebm/webmids.hpp +++ b/libs/libvpx/third_party/libwebm/common/webmids.h @@ -6,10 +6,10 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#ifndef WEBMIDS_HPP -#define WEBMIDS_HPP +#ifndef COMMON_WEBMIDS_H_ +#define COMMON_WEBMIDS_H_ -namespace mkvmuxer { +namespace libwebm { enum MkvId { kMkvEBML = 0x1A45DFA3, @@ -95,6 +95,35 @@ enum MkvId { kMkvAspectRatioType = 0x54B3, kMkvFrameRate = 0x2383E3, // end video + // colour + kMkvColour = 0x55B0, + kMkvMatrixCoefficients = 0x55B1, + kMkvBitsPerChannel = 0x55B2, + kMkvChromaSubsamplingHorz = 0x55B3, + kMkvChromaSubsamplingVert = 0x55B4, + kMkvCbSubsamplingHorz = 0x55B5, + kMkvCbSubsamplingVert = 0x55B6, + kMkvChromaSitingHorz = 0x55B7, + kMkvChromaSitingVert = 0x55B8, + kMkvRange = 0x55B9, + kMkvTransferCharacteristics = 0x55BA, + kMkvPrimaries = 0x55BB, + kMkvMaxCLL = 0x55BC, + kMkvMaxFALL = 0x55BD, + // mastering metadata + kMkvMasteringMetadata = 0x55D0, + kMkvPrimaryRChromaticityX = 0x55D1, + kMkvPrimaryRChromaticityY = 0x55D2, + kMkvPrimaryGChromaticityX = 0x55D3, + kMkvPrimaryGChromaticityY = 0x55D4, + kMkvPrimaryBChromaticityX = 0x55D5, + kMkvPrimaryBChromaticityY = 0x55D6, + kMkvWhitePointChromaticityX = 0x55D7, + kMkvWhitePointChromaticityY = 0x55D8, + kMkvLuminanceMax = 0x55D9, + kMkvLuminanceMin = 0x55DA, + // end mastering metadata + // end colour // audio kMkvAudio = 0xE1, kMkvSamplingFrequency = 0xB5, @@ -150,6 +179,6 @@ enum MkvId { kMkvTagString = 0x4487 }; -} // end namespace mkvmuxer +} // namespace libwebm -#endif // WEBMIDS_HPP +#endif // COMMON_WEBMIDS_H_ diff --git a/libs/libvpx/third_party/libwebm/mkvmuxer.cpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.cc similarity index 56% rename from libs/libvpx/third_party/libwebm/mkvmuxer.cpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.cc index 9be3119a46..c79ce24ed3 100644 --- a/libs/libvpx/third_party/libwebm/mkvmuxer.cpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.cc @@ -6,27 +6,28 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#include "mkvmuxer.hpp" +#include "mkvmuxer/mkvmuxer.h" +#include #include #include #include #include #include +#include #include +#include -#include "mkvmuxerutil.hpp" -#include "mkvparser.hpp" -#include "mkvwriter.hpp" -#include "webmids.hpp" - -#ifdef _MSC_VER -// Disable MSVC warnings that suggest making code non-portable. -#pragma warning(disable : 4996) -#endif +#include "common/webmids.h" +#include "mkvmuxer/mkvmuxerutil.h" +#include "mkvmuxer/mkvwriter.h" +#include "mkvparser/mkvparser.h" namespace mkvmuxer { +const float MasteringMetadata::kValueNotPresent = FLT_MAX; +const uint64_t Colour::kValueNotPresent = UINT64_MAX; + namespace { // Deallocate the string designated by |dst|, and then copy the |src| // string to |dst|. The caller owns both the |src| string and the @@ -55,6 +56,20 @@ bool StrCpy(const char* src, char** dst_ptr) { strcpy(dst, src); // NOLINT return true; } + +typedef std::auto_ptr PrimaryChromaticityPtr; +bool CopyChromaticity(const PrimaryChromaticity* src, + PrimaryChromaticityPtr* dst) { + if (!dst) + return false; + + dst->reset(new (std::nothrow) PrimaryChromaticity(src->x, src->y)); + if (!dst->get()) + return false; + + return true; +} + } // namespace /////////////////////////////////////////////////////////////// @@ -65,31 +80,31 @@ IMkvWriter::IMkvWriter() {} IMkvWriter::~IMkvWriter() {} -bool WriteEbmlHeader(IMkvWriter* writer, uint64 doc_type_version) { +bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version) { // Level 0 - uint64 size = EbmlElementSize(kMkvEBMLVersion, 1ULL); - size += EbmlElementSize(kMkvEBMLReadVersion, 1ULL); - size += EbmlElementSize(kMkvEBMLMaxIDLength, 4ULL); - size += EbmlElementSize(kMkvEBMLMaxSizeLength, 8ULL); - size += EbmlElementSize(kMkvDocType, "webm"); - size += EbmlElementSize(kMkvDocTypeVersion, doc_type_version); - size += EbmlElementSize(kMkvDocTypeReadVersion, 2ULL); + uint64_t size = EbmlElementSize(libwebm::kMkvEBMLVersion, UINT64_C(1)); + size += EbmlElementSize(libwebm::kMkvEBMLReadVersion, UINT64_C(1)); + size += EbmlElementSize(libwebm::kMkvEBMLMaxIDLength, UINT64_C(4)); + size += EbmlElementSize(libwebm::kMkvEBMLMaxSizeLength, UINT64_C(8)); + size += EbmlElementSize(libwebm::kMkvDocType, "webm"); + size += EbmlElementSize(libwebm::kMkvDocTypeVersion, doc_type_version); + size += EbmlElementSize(libwebm::kMkvDocTypeReadVersion, UINT64_C(2)); - if (!WriteEbmlMasterElement(writer, kMkvEBML, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvEBML, size)) return false; - if (!WriteEbmlElement(writer, kMkvEBMLVersion, 1ULL)) + if (!WriteEbmlElement(writer, libwebm::kMkvEBMLVersion, UINT64_C(1))) return false; - if (!WriteEbmlElement(writer, kMkvEBMLReadVersion, 1ULL)) + if (!WriteEbmlElement(writer, libwebm::kMkvEBMLReadVersion, UINT64_C(1))) return false; - if (!WriteEbmlElement(writer, kMkvEBMLMaxIDLength, 4ULL)) + if (!WriteEbmlElement(writer, libwebm::kMkvEBMLMaxIDLength, UINT64_C(4))) return false; - if (!WriteEbmlElement(writer, kMkvEBMLMaxSizeLength, 8ULL)) + if (!WriteEbmlElement(writer, libwebm::kMkvEBMLMaxSizeLength, UINT64_C(8))) return false; - if (!WriteEbmlElement(writer, kMkvDocType, "webm")) + if (!WriteEbmlElement(writer, libwebm::kMkvDocType, "webm")) return false; - if (!WriteEbmlElement(writer, kMkvDocTypeVersion, doc_type_version)) + if (!WriteEbmlElement(writer, libwebm::kMkvDocTypeVersion, doc_type_version)) return false; - if (!WriteEbmlElement(writer, kMkvDocTypeReadVersion, 2ULL)) + if (!WriteEbmlElement(writer, libwebm::kMkvDocTypeReadVersion, UINT64_C(2))) return false; return true; @@ -100,16 +115,16 @@ bool WriteEbmlHeader(IMkvWriter* writer) { } bool ChunkedCopy(mkvparser::IMkvReader* source, mkvmuxer::IMkvWriter* dst, - mkvmuxer::int64 start, int64 size) { + int64_t start, int64_t size) { // TODO(vigneshv): Check if this is a reasonable value. - const uint32 kBufSize = 2048; - uint8* buf = new uint8[kBufSize]; - int64 offset = start; + const uint32_t kBufSize = 2048; + uint8_t* buf = new uint8_t[kBufSize]; + int64_t offset = start; while (size > 0) { - const int64 read_len = (size > kBufSize) ? kBufSize : size; + const int64_t read_len = (size > kBufSize) ? kBufSize : size; if (source->Read(offset, static_cast(read_len), buf)) return false; - dst->Write(buf, static_cast(read_len)); + dst->Write(buf, static_cast(read_len)); offset += read_len; size -= read_len; } @@ -126,6 +141,7 @@ Frame::Frame() additional_(NULL), additional_length_(0), duration_(0), + duration_set_(false), frame_(NULL), is_key_(false), length_(0), @@ -158,16 +174,19 @@ bool Frame::CopyFrom(const Frame& frame) { return false; } duration_ = frame.duration(); + duration_set_ = frame.duration_set(); is_key_ = frame.is_key(); track_number_ = frame.track_number(); timestamp_ = frame.timestamp(); discard_padding_ = frame.discard_padding(); + reference_block_timestamp_ = frame.reference_block_timestamp(); + reference_block_timestamp_set_ = frame.reference_block_timestamp_set(); return true; } -bool Frame::Init(const uint8* frame, uint64 length) { - uint8* const data = - new (std::nothrow) uint8[static_cast(length)]; // NOLINT +bool Frame::Init(const uint8_t* frame, uint64_t length) { + uint8_t* const data = + new (std::nothrow) uint8_t[static_cast(length)]; // NOLINT if (!data) return false; @@ -179,10 +198,10 @@ bool Frame::Init(const uint8* frame, uint64 length) { return true; } -bool Frame::AddAdditionalData(const uint8* additional, uint64 length, - uint64 add_id) { - uint8* const data = - new (std::nothrow) uint8[static_cast(length)]; // NOLINT +bool Frame::AddAdditionalData(const uint8_t* additional, uint64_t length, + uint64_t add_id) { + uint8_t* const data = + new (std::nothrow) uint8_t[static_cast(length)]; // NOLINT if (!data) return false; @@ -216,7 +235,12 @@ bool Frame::CanBeSimpleBlock() const { return additional_ == NULL && discard_padding_ == 0 && duration_ == 0; } -void Frame::set_reference_block_timestamp(int64 reference_block_timestamp) { +void Frame::set_duration(uint64_t duration) { + duration_ = duration; + duration_set_ = true; +} + +void Frame::set_reference_block_timestamp(int64_t reference_block_timestamp) { reference_block_timestamp_ = reference_block_timestamp; reference_block_timestamp_set_ = true; } @@ -238,61 +262,64 @@ bool CuePoint::Write(IMkvWriter* writer) const { if (!writer || track_ < 1 || cluster_pos_ < 1) return false; - uint64 size = EbmlElementSize(kMkvCueClusterPosition, cluster_pos_); - size += EbmlElementSize(kMkvCueTrack, track_); + uint64_t size = + EbmlElementSize(libwebm::kMkvCueClusterPosition, cluster_pos_); + size += EbmlElementSize(libwebm::kMkvCueTrack, track_); if (output_block_number_ && block_number_ > 1) - size += EbmlElementSize(kMkvCueBlockNumber, block_number_); - const uint64 track_pos_size = - EbmlMasterElementSize(kMkvCueTrackPositions, size) + size; - const uint64 payload_size = - EbmlElementSize(kMkvCueTime, time_) + track_pos_size; + size += EbmlElementSize(libwebm::kMkvCueBlockNumber, block_number_); + const uint64_t track_pos_size = + EbmlMasterElementSize(libwebm::kMkvCueTrackPositions, size) + size; + const uint64_t payload_size = + EbmlElementSize(libwebm::kMkvCueTime, time_) + track_pos_size; - if (!WriteEbmlMasterElement(writer, kMkvCuePoint, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvCuePoint, payload_size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvCueTime, time_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCueTime, time_)) return false; - if (!WriteEbmlMasterElement(writer, kMkvCueTrackPositions, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvCueTrackPositions, size)) return false; - if (!WriteEbmlElement(writer, kMkvCueTrack, track_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCueTrack, track_)) return false; - if (!WriteEbmlElement(writer, kMkvCueClusterPosition, cluster_pos_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCueClusterPosition, cluster_pos_)) return false; if (output_block_number_ && block_number_ > 1) - if (!WriteEbmlElement(writer, kMkvCueBlockNumber, block_number_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCueBlockNumber, block_number_)) return false; - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0) return false; - if (stop_position - payload_position != static_cast(payload_size)) + if (stop_position - payload_position != static_cast(payload_size)) return false; return true; } -uint64 CuePoint::PayloadSize() const { - uint64 size = EbmlElementSize(kMkvCueClusterPosition, cluster_pos_); - size += EbmlElementSize(kMkvCueTrack, track_); +uint64_t CuePoint::PayloadSize() const { + uint64_t size = + EbmlElementSize(libwebm::kMkvCueClusterPosition, cluster_pos_); + size += EbmlElementSize(libwebm::kMkvCueTrack, track_); if (output_block_number_ && block_number_ > 1) - size += EbmlElementSize(kMkvCueBlockNumber, block_number_); - const uint64 track_pos_size = - EbmlMasterElementSize(kMkvCueTrackPositions, size) + size; - const uint64 payload_size = - EbmlElementSize(kMkvCueTime, time_) + track_pos_size; + size += EbmlElementSize(libwebm::kMkvCueBlockNumber, block_number_); + const uint64_t track_pos_size = + EbmlMasterElementSize(libwebm::kMkvCueTrackPositions, size) + size; + const uint64_t payload_size = + EbmlElementSize(libwebm::kMkvCueTime, time_) + track_pos_size; return payload_size; } -uint64 CuePoint::Size() const { - const uint64 payload_size = PayloadSize(); - return EbmlMasterElementSize(kMkvCuePoint, payload_size) + payload_size; +uint64_t CuePoint::Size() const { + const uint64_t payload_size = PayloadSize(); + return EbmlMasterElementSize(libwebm::kMkvCuePoint, payload_size) + + payload_size; } /////////////////////////////////////////////////////////////// @@ -307,7 +334,7 @@ Cues::Cues() Cues::~Cues() { if (cue_entries_) { - for (int32 i = 0; i < cue_entries_size_; ++i) { + for (int32_t i = 0; i < cue_entries_size_; ++i) { CuePoint* const cue = cue_entries_[i]; delete cue; } @@ -321,7 +348,7 @@ bool Cues::AddCue(CuePoint* cue) { if ((cue_entries_size_ + 1) > cue_entries_capacity_) { // Add more CuePoints. - const int32 new_capacity = + const int32_t new_capacity = (!cue_entries_capacity_) ? 2 : cue_entries_capacity_ * 2; if (new_capacity < 1) @@ -332,7 +359,7 @@ bool Cues::AddCue(CuePoint* cue) { if (!cues) return false; - for (int32 i = 0; i < cue_entries_size_; ++i) { + for (int32_t i = 0; i < cue_entries_size_; ++i) { cues[i] = cue_entries_[i]; } @@ -347,7 +374,7 @@ bool Cues::AddCue(CuePoint* cue) { return true; } -CuePoint* Cues::GetCueByIndex(int32 index) const { +CuePoint* Cues::GetCueByIndex(int32_t index) const { if (cue_entries_ == NULL) return NULL; @@ -357,11 +384,11 @@ CuePoint* Cues::GetCueByIndex(int32 index) const { return cue_entries_[index]; } -uint64 Cues::Size() { - uint64 size = 0; - for (int32 i = 0; i < cue_entries_size_; ++i) +uint64_t Cues::Size() { + uint64_t size = 0; + for (int32_t i = 0; i < cue_entries_size_; ++i) size += GetCueByIndex(i)->Size(); - size += EbmlMasterElementSize(kMkvCues, size); + size += EbmlMasterElementSize(libwebm::kMkvCues, size); return size; } @@ -369,8 +396,8 @@ bool Cues::Write(IMkvWriter* writer) const { if (!writer) return false; - uint64 size = 0; - for (int32 i = 0; i < cue_entries_size_; ++i) { + uint64_t size = 0; + for (int32_t i = 0; i < cue_entries_size_; ++i) { const CuePoint* const cue = GetCueByIndex(i); if (!cue) @@ -379,25 +406,25 @@ bool Cues::Write(IMkvWriter* writer) const { size += cue->Size(); } - if (!WriteEbmlMasterElement(writer, kMkvCues, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvCues, size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - for (int32 i = 0; i < cue_entries_size_; ++i) { + for (int32_t i = 0; i < cue_entries_size_; ++i) { const CuePoint* const cue = GetCueByIndex(i); if (!cue->Write(writer)) return false; } - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0) return false; - if (stop_position - payload_position != static_cast(size)) + if (stop_position - payload_position != static_cast(size)) return false; return true; @@ -409,36 +436,40 @@ bool Cues::Write(IMkvWriter* writer) const { ContentEncAESSettings::ContentEncAESSettings() : cipher_mode_(kCTR) {} -uint64 ContentEncAESSettings::Size() const { - const uint64 payload = PayloadSize(); - const uint64 size = - EbmlMasterElementSize(kMkvContentEncAESSettings, payload) + payload; +uint64_t ContentEncAESSettings::Size() const { + const uint64_t payload = PayloadSize(); + const uint64_t size = + EbmlMasterElementSize(libwebm::kMkvContentEncAESSettings, payload) + + payload; return size; } bool ContentEncAESSettings::Write(IMkvWriter* writer) const { - const uint64 payload = PayloadSize(); + const uint64_t payload = PayloadSize(); - if (!WriteEbmlMasterElement(writer, kMkvContentEncAESSettings, payload)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncAESSettings, + payload)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvAESSettingsCipherMode, cipher_mode_)) + if (!WriteEbmlElement(writer, libwebm::kMkvAESSettingsCipherMode, + cipher_mode_)) return false; - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(payload)) + stop_position - payload_position != static_cast(payload)) return false; return true; } -uint64 ContentEncAESSettings::PayloadSize() const { - uint64 size = EbmlElementSize(kMkvAESSettingsCipherMode, cipher_mode_); +uint64_t ContentEncAESSettings::PayloadSize() const { + uint64_t size = + EbmlElementSize(libwebm::kMkvAESSettingsCipherMode, cipher_mode_); return size; } @@ -456,14 +487,14 @@ ContentEncoding::ContentEncoding() ContentEncoding::~ContentEncoding() { delete[] enc_key_id_; } -bool ContentEncoding::SetEncryptionID(const uint8* id, uint64 length) { +bool ContentEncoding::SetEncryptionID(const uint8_t* id, uint64_t length) { if (!id || length < 1) return false; delete[] enc_key_id_; enc_key_id_ = - new (std::nothrow) uint8[static_cast(length)]; // NOLINT + new (std::nothrow) uint8_t[static_cast(length)]; // NOLINT if (!enc_key_id_) return false; @@ -473,79 +504,89 @@ bool ContentEncoding::SetEncryptionID(const uint8* id, uint64 length) { return true; } -uint64 ContentEncoding::Size() const { - const uint64 encryption_size = EncryptionSize(); - const uint64 encoding_size = EncodingSize(0, encryption_size); - const uint64 encodings_size = - EbmlMasterElementSize(kMkvContentEncoding, encoding_size) + encoding_size; +uint64_t ContentEncoding::Size() const { + const uint64_t encryption_size = EncryptionSize(); + const uint64_t encoding_size = EncodingSize(0, encryption_size); + const uint64_t encodings_size = + EbmlMasterElementSize(libwebm::kMkvContentEncoding, encoding_size) + + encoding_size; return encodings_size; } bool ContentEncoding::Write(IMkvWriter* writer) const { - const uint64 encryption_size = EncryptionSize(); - const uint64 encoding_size = EncodingSize(0, encryption_size); - const uint64 size = - EbmlMasterElementSize(kMkvContentEncoding, encoding_size) + encoding_size; + const uint64_t encryption_size = EncryptionSize(); + const uint64_t encoding_size = EncodingSize(0, encryption_size); + const uint64_t size = + EbmlMasterElementSize(libwebm::kMkvContentEncoding, encoding_size) + + encoding_size; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlMasterElement(writer, kMkvContentEncoding, encoding_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncoding, + encoding_size)) return false; - if (!WriteEbmlElement(writer, kMkvContentEncodingOrder, encoding_order_)) + if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingOrder, + encoding_order_)) return false; - if (!WriteEbmlElement(writer, kMkvContentEncodingScope, encoding_scope_)) + if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingScope, + encoding_scope_)) return false; - if (!WriteEbmlElement(writer, kMkvContentEncodingType, encoding_type_)) + if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingType, + encoding_type_)) return false; - if (!WriteEbmlMasterElement(writer, kMkvContentEncryption, encryption_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncryption, + encryption_size)) return false; - if (!WriteEbmlElement(writer, kMkvContentEncAlgo, enc_algo_)) + if (!WriteEbmlElement(writer, libwebm::kMkvContentEncAlgo, enc_algo_)) return false; - if (!WriteEbmlElement(writer, kMkvContentEncKeyID, enc_key_id_, + if (!WriteEbmlElement(writer, libwebm::kMkvContentEncKeyID, enc_key_id_, enc_key_id_length_)) return false; if (!enc_aes_settings_.Write(writer)) return false; - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) + stop_position - payload_position != static_cast(size)) return false; return true; } -uint64 ContentEncoding::EncodingSize(uint64 compresion_size, - uint64 encryption_size) const { +uint64_t ContentEncoding::EncodingSize(uint64_t compresion_size, + uint64_t encryption_size) const { // TODO(fgalligan): Add support for compression settings. if (compresion_size != 0) return 0; - uint64 encoding_size = 0; + uint64_t encoding_size = 0; if (encryption_size > 0) { encoding_size += - EbmlMasterElementSize(kMkvContentEncryption, encryption_size) + + EbmlMasterElementSize(libwebm::kMkvContentEncryption, encryption_size) + encryption_size; } - encoding_size += EbmlElementSize(kMkvContentEncodingType, encoding_type_); - encoding_size += EbmlElementSize(kMkvContentEncodingScope, encoding_scope_); - encoding_size += EbmlElementSize(kMkvContentEncodingOrder, encoding_order_); + encoding_size += + EbmlElementSize(libwebm::kMkvContentEncodingType, encoding_type_); + encoding_size += + EbmlElementSize(libwebm::kMkvContentEncodingScope, encoding_scope_); + encoding_size += + EbmlElementSize(libwebm::kMkvContentEncodingOrder, encoding_order_); return encoding_size; } -uint64 ContentEncoding::EncryptionSize() const { - const uint64 aes_size = enc_aes_settings_.Size(); +uint64_t ContentEncoding::EncryptionSize() const { + const uint64_t aes_size = enc_aes_settings_.Size(); - uint64 encryption_size = - EbmlElementSize(kMkvContentEncKeyID, enc_key_id_, enc_key_id_length_); - encryption_size += EbmlElementSize(kMkvContentEncAlgo, enc_algo_); + uint64_t encryption_size = EbmlElementSize(libwebm::kMkvContentEncKeyID, + enc_key_id_, enc_key_id_length_); + encryption_size += EbmlElementSize(libwebm::kMkvContentEncAlgo, enc_algo_); return encryption_size + aes_size; } @@ -577,7 +618,7 @@ Track::~Track() { delete[] name_; if (content_encoding_entries_) { - for (uint32 i = 0; i < content_encoding_entries_size_; ++i) { + for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) { ContentEncoding* const encoding = content_encoding_entries_[i]; delete encoding; } @@ -586,7 +627,7 @@ Track::~Track() { } bool Track::AddContentEncoding() { - const uint32 count = content_encoding_entries_size_ + 1; + const uint32_t count = content_encoding_entries_size_ + 1; ContentEncoding** const content_encoding_entries = new (std::nothrow) ContentEncoding*[count]; // NOLINT @@ -600,7 +641,7 @@ bool Track::AddContentEncoding() { return false; } - for (uint32 i = 0; i < content_encoding_entries_size_; ++i) { + for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) { content_encoding_entries[i] = content_encoding_entries_[i]; } @@ -612,7 +653,7 @@ bool Track::AddContentEncoding() { return true; } -ContentEncoding* Track::GetContentEncodingByIndex(uint32 index) const { +ContentEncoding* Track::GetContentEncodingByIndex(uint32_t index) const { if (content_encoding_entries_ == NULL) return NULL; @@ -622,46 +663,47 @@ ContentEncoding* Track::GetContentEncodingByIndex(uint32 index) const { return content_encoding_entries_[index]; } -uint64 Track::PayloadSize() const { - uint64 size = EbmlElementSize(kMkvTrackNumber, number_); - size += EbmlElementSize(kMkvTrackUID, uid_); - size += EbmlElementSize(kMkvTrackType, type_); +uint64_t Track::PayloadSize() const { + uint64_t size = EbmlElementSize(libwebm::kMkvTrackNumber, number_); + size += EbmlElementSize(libwebm::kMkvTrackUID, uid_); + size += EbmlElementSize(libwebm::kMkvTrackType, type_); if (codec_id_) - size += EbmlElementSize(kMkvCodecID, codec_id_); + size += EbmlElementSize(libwebm::kMkvCodecID, codec_id_); if (codec_private_) - size += EbmlElementSize(kMkvCodecPrivate, codec_private_, + size += EbmlElementSize(libwebm::kMkvCodecPrivate, codec_private_, codec_private_length_); if (language_) - size += EbmlElementSize(kMkvLanguage, language_); + size += EbmlElementSize(libwebm::kMkvLanguage, language_); if (name_) - size += EbmlElementSize(kMkvName, name_); + size += EbmlElementSize(libwebm::kMkvName, name_); if (max_block_additional_id_) - size += EbmlElementSize(kMkvMaxBlockAdditionID, max_block_additional_id_); + size += EbmlElementSize(libwebm::kMkvMaxBlockAdditionID, + max_block_additional_id_); if (codec_delay_) - size += EbmlElementSize(kMkvCodecDelay, codec_delay_); + size += EbmlElementSize(libwebm::kMkvCodecDelay, codec_delay_); if (seek_pre_roll_) - size += EbmlElementSize(kMkvSeekPreRoll, seek_pre_roll_); + size += EbmlElementSize(libwebm::kMkvSeekPreRoll, seek_pre_roll_); if (default_duration_) - size += EbmlElementSize(kMkvDefaultDuration, default_duration_); + size += EbmlElementSize(libwebm::kMkvDefaultDuration, default_duration_); if (content_encoding_entries_size_ > 0) { - uint64 content_encodings_size = 0; - for (uint32 i = 0; i < content_encoding_entries_size_; ++i) { + uint64_t content_encodings_size = 0; + for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) { ContentEncoding* const encoding = content_encoding_entries_[i]; content_encodings_size += encoding->Size(); } - size += - EbmlMasterElementSize(kMkvContentEncodings, content_encodings_size) + - content_encodings_size; + size += EbmlMasterElementSize(libwebm::kMkvContentEncodings, + content_encodings_size) + + content_encodings_size; } return size; } -uint64 Track::Size() const { - uint64 size = PayloadSize(); - size += EbmlMasterElementSize(kMkvTrackEntry, size); +uint64_t Track::Size() const { + uint64_t size = PayloadSize(); + size += EbmlMasterElementSize(libwebm::kMkvTrackEntry, size); return size; } @@ -675,95 +717,97 @@ bool Track::Write(IMkvWriter* writer) const { // |size| may be bigger than what is written out in this function because // derived classes may write out more data in the Track element. - const uint64 payload_size = PayloadSize(); + const uint64_t payload_size = PayloadSize(); - if (!WriteEbmlMasterElement(writer, kMkvTrackEntry, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvTrackEntry, payload_size)) return false; - uint64 size = EbmlElementSize(kMkvTrackNumber, number_); - size += EbmlElementSize(kMkvTrackUID, uid_); - size += EbmlElementSize(kMkvTrackType, type_); + uint64_t size = EbmlElementSize(libwebm::kMkvTrackNumber, number_); + size += EbmlElementSize(libwebm::kMkvTrackUID, uid_); + size += EbmlElementSize(libwebm::kMkvTrackType, type_); if (codec_id_) - size += EbmlElementSize(kMkvCodecID, codec_id_); + size += EbmlElementSize(libwebm::kMkvCodecID, codec_id_); if (codec_private_) - size += EbmlElementSize(kMkvCodecPrivate, codec_private_, + size += EbmlElementSize(libwebm::kMkvCodecPrivate, codec_private_, codec_private_length_); if (language_) - size += EbmlElementSize(kMkvLanguage, language_); + size += EbmlElementSize(libwebm::kMkvLanguage, language_); if (name_) - size += EbmlElementSize(kMkvName, name_); + size += EbmlElementSize(libwebm::kMkvName, name_); if (max_block_additional_id_) - size += EbmlElementSize(kMkvMaxBlockAdditionID, max_block_additional_id_); + size += EbmlElementSize(libwebm::kMkvMaxBlockAdditionID, + max_block_additional_id_); if (codec_delay_) - size += EbmlElementSize(kMkvCodecDelay, codec_delay_); + size += EbmlElementSize(libwebm::kMkvCodecDelay, codec_delay_); if (seek_pre_roll_) - size += EbmlElementSize(kMkvSeekPreRoll, seek_pre_roll_); + size += EbmlElementSize(libwebm::kMkvSeekPreRoll, seek_pre_roll_); if (default_duration_) - size += EbmlElementSize(kMkvDefaultDuration, default_duration_); + size += EbmlElementSize(libwebm::kMkvDefaultDuration, default_duration_); - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvTrackNumber, number_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTrackNumber, number_)) return false; - if (!WriteEbmlElement(writer, kMkvTrackUID, uid_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTrackUID, uid_)) return false; - if (!WriteEbmlElement(writer, kMkvTrackType, type_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTrackType, type_)) return false; if (max_block_additional_id_) { - if (!WriteEbmlElement(writer, kMkvMaxBlockAdditionID, + if (!WriteEbmlElement(writer, libwebm::kMkvMaxBlockAdditionID, max_block_additional_id_)) { return false; } } if (codec_delay_) { - if (!WriteEbmlElement(writer, kMkvCodecDelay, codec_delay_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCodecDelay, codec_delay_)) return false; } if (seek_pre_roll_) { - if (!WriteEbmlElement(writer, kMkvSeekPreRoll, seek_pre_roll_)) + if (!WriteEbmlElement(writer, libwebm::kMkvSeekPreRoll, seek_pre_roll_)) return false; } if (default_duration_) { - if (!WriteEbmlElement(writer, kMkvDefaultDuration, default_duration_)) + if (!WriteEbmlElement(writer, libwebm::kMkvDefaultDuration, + default_duration_)) return false; } if (codec_id_) { - if (!WriteEbmlElement(writer, kMkvCodecID, codec_id_)) + if (!WriteEbmlElement(writer, libwebm::kMkvCodecID, codec_id_)) return false; } if (codec_private_) { - if (!WriteEbmlElement(writer, kMkvCodecPrivate, codec_private_, + if (!WriteEbmlElement(writer, libwebm::kMkvCodecPrivate, codec_private_, codec_private_length_)) return false; } if (language_) { - if (!WriteEbmlElement(writer, kMkvLanguage, language_)) + if (!WriteEbmlElement(writer, libwebm::kMkvLanguage, language_)) return false; } if (name_) { - if (!WriteEbmlElement(writer, kMkvName, name_)) + if (!WriteEbmlElement(writer, libwebm::kMkvName, name_)) return false; } - int64 stop_position = writer->Position(); + int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) + stop_position - payload_position != static_cast(size)) return false; if (content_encoding_entries_size_ > 0) { - uint64 content_encodings_size = 0; - for (uint32 i = 0; i < content_encoding_entries_size_; ++i) { + uint64_t content_encodings_size = 0; + for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) { ContentEncoding* const encoding = content_encoding_entries_[i]; content_encodings_size += encoding->Size(); } - if (!WriteEbmlMasterElement(writer, kMkvContentEncodings, + if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncodings, content_encodings_size)) return false; - for (uint32 i = 0; i < content_encoding_entries_size_; ++i) { + for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) { ContentEncoding* const encoding = content_encoding_entries_[i]; if (!encoding->Write(writer)) return false; @@ -776,14 +820,14 @@ bool Track::Write(IMkvWriter* writer) const { return true; } -bool Track::SetCodecPrivate(const uint8* codec_private, uint64 length) { +bool Track::SetCodecPrivate(const uint8_t* codec_private, uint64_t length) { if (!codec_private || length < 1) return false; delete[] codec_private_; codec_private_ = - new (std::nothrow) uint8[static_cast(length)]; // NOLINT + new (std::nothrow) uint8_t[static_cast(length)]; // NOLINT if (!codec_private_) return false; @@ -842,6 +886,279 @@ void Track::set_name(const char* name) { } } +/////////////////////////////////////////////////////////////// +// +// Colour and its child elements + +uint64_t PrimaryChromaticity::PrimaryChromaticityPayloadSize( + libwebm::MkvId x_id, libwebm::MkvId y_id) const { + return EbmlElementSize(x_id, x) + EbmlElementSize(y_id, y); +} + +bool PrimaryChromaticity::Write(IMkvWriter* writer, libwebm::MkvId x_id, + libwebm::MkvId y_id) const { + return WriteEbmlElement(writer, x_id, x) && WriteEbmlElement(writer, y_id, y); +} + +uint64_t MasteringMetadata::MasteringMetadataSize() const { + uint64_t size = PayloadSize(); + + if (size > 0) + size += EbmlMasterElementSize(libwebm::kMkvMasteringMetadata, size); + + return size; +} + +bool MasteringMetadata::Write(IMkvWriter* writer) const { + const uint64_t size = PayloadSize(); + + // Don't write an empty element. + if (size == 0) + return true; + + if (!WriteEbmlMasterElement(writer, libwebm::kMkvMasteringMetadata, size)) + return false; + if (luminance_max != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvLuminanceMax, luminance_max)) { + return false; + } + if (luminance_min != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvLuminanceMin, luminance_min)) { + return false; + } + if (r_ && + !r_->Write(writer, libwebm::kMkvPrimaryRChromaticityX, + libwebm::kMkvPrimaryRChromaticityY)) { + return false; + } + if (g_ && + !g_->Write(writer, libwebm::kMkvPrimaryGChromaticityX, + libwebm::kMkvPrimaryGChromaticityY)) { + return false; + } + if (b_ && + !b_->Write(writer, libwebm::kMkvPrimaryBChromaticityX, + libwebm::kMkvPrimaryBChromaticityY)) { + return false; + } + if (white_point_ && + !white_point_->Write(writer, libwebm::kMkvWhitePointChromaticityX, + libwebm::kMkvWhitePointChromaticityY)) { + return false; + } + + return true; +} + +bool MasteringMetadata::SetChromaticity( + const PrimaryChromaticity* r, const PrimaryChromaticity* g, + const PrimaryChromaticity* b, const PrimaryChromaticity* white_point) { + PrimaryChromaticityPtr r_ptr(NULL); + if (r) { + if (!CopyChromaticity(r, &r_ptr)) + return false; + } + PrimaryChromaticityPtr g_ptr(NULL); + if (g) { + if (!CopyChromaticity(g, &g_ptr)) + return false; + } + PrimaryChromaticityPtr b_ptr(NULL); + if (b) { + if (!CopyChromaticity(b, &b_ptr)) + return false; + } + PrimaryChromaticityPtr wp_ptr(NULL); + if (white_point) { + if (!CopyChromaticity(white_point, &wp_ptr)) + return false; + } + + r_ = r_ptr.release(); + g_ = g_ptr.release(); + b_ = b_ptr.release(); + white_point_ = wp_ptr.release(); + return true; +} + +uint64_t MasteringMetadata::PayloadSize() const { + uint64_t size = 0; + + if (luminance_max != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvLuminanceMax, luminance_max); + if (luminance_min != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvLuminanceMin, luminance_min); + + if (r_) { + size += r_->PrimaryChromaticityPayloadSize( + libwebm::kMkvPrimaryRChromaticityX, libwebm::kMkvPrimaryRChromaticityY); + } + if (g_) { + size += g_->PrimaryChromaticityPayloadSize( + libwebm::kMkvPrimaryGChromaticityX, libwebm::kMkvPrimaryGChromaticityY); + } + if (b_) { + size += b_->PrimaryChromaticityPayloadSize( + libwebm::kMkvPrimaryBChromaticityX, libwebm::kMkvPrimaryBChromaticityY); + } + if (white_point_) { + size += white_point_->PrimaryChromaticityPayloadSize( + libwebm::kMkvWhitePointChromaticityX, + libwebm::kMkvWhitePointChromaticityY); + } + + return size; +} + +uint64_t Colour::ColourSize() const { + uint64_t size = PayloadSize(); + + if (size > 0) + size += EbmlMasterElementSize(libwebm::kMkvColour, size); + + return size; +} + +bool Colour::Write(IMkvWriter* writer) const { + const uint64_t size = PayloadSize(); + + // Don't write an empty element. + if (size == 0) + return true; + + if (!WriteEbmlMasterElement(writer, libwebm::kMkvColour, size)) + return false; + + if (matrix_coefficients != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvMatrixCoefficients, + matrix_coefficients)) { + return false; + } + if (bits_per_channel != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvBitsPerChannel, + bits_per_channel)) { + return false; + } + if (chroma_subsampling_horz != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvChromaSubsamplingHorz, + chroma_subsampling_horz)) { + return false; + } + if (chroma_subsampling_vert != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvChromaSubsamplingVert, + chroma_subsampling_vert)) { + return false; + } + + if (cb_subsampling_horz != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvCbSubsamplingHorz, + cb_subsampling_horz)) { + return false; + } + if (cb_subsampling_vert != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvCbSubsamplingVert, + cb_subsampling_vert)) { + return false; + } + if (chroma_siting_horz != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvChromaSitingHorz, + chroma_siting_horz)) { + return false; + } + if (chroma_siting_vert != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvChromaSitingVert, + chroma_siting_vert)) { + return false; + } + if (range != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvRange, range)) { + return false; + } + if (transfer_characteristics != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvTransferCharacteristics, + transfer_characteristics)) { + return false; + } + if (primaries != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvPrimaries, primaries)) { + return false; + } + if (max_cll != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvMaxCLL, max_cll)) { + return false; + } + if (max_fall != kValueNotPresent && + !WriteEbmlElement(writer, libwebm::kMkvMaxFALL, max_fall)) { + return false; + } + + if (mastering_metadata_ && !mastering_metadata_->Write(writer)) + return false; + + return true; +} + +bool Colour::SetMasteringMetadata(const MasteringMetadata& mastering_metadata) { + std::auto_ptr mm_ptr(new MasteringMetadata()); + if (!mm_ptr.get()) + return false; + + mm_ptr->luminance_max = mastering_metadata.luminance_max; + mm_ptr->luminance_min = mastering_metadata.luminance_min; + + if (!mm_ptr->SetChromaticity(mastering_metadata.r(), mastering_metadata.g(), + mastering_metadata.b(), + mastering_metadata.white_point())) { + return false; + } + + delete mastering_metadata_; + mastering_metadata_ = mm_ptr.release(); + return true; +} + +uint64_t Colour::PayloadSize() const { + uint64_t size = 0; + + if (matrix_coefficients != kValueNotPresent) + size += + EbmlElementSize(libwebm::kMkvMatrixCoefficients, matrix_coefficients); + if (bits_per_channel != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvBitsPerChannel, bits_per_channel); + if (chroma_subsampling_horz != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvChromaSubsamplingHorz, + chroma_subsampling_horz); + if (chroma_subsampling_vert != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvChromaSubsamplingVert, + chroma_subsampling_vert); + if (cb_subsampling_horz != kValueNotPresent) + size += + EbmlElementSize(libwebm::kMkvCbSubsamplingHorz, cb_subsampling_horz); + if (cb_subsampling_vert != kValueNotPresent) + size += + EbmlElementSize(libwebm::kMkvCbSubsamplingVert, cb_subsampling_vert); + if (chroma_siting_horz != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvChromaSitingHorz, chroma_siting_horz); + if (chroma_siting_vert != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvChromaSitingVert, chroma_siting_vert); + if (range != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvRange, range); + if (transfer_characteristics != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvTransferCharacteristics, + transfer_characteristics); + if (primaries != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvPrimaries, primaries); + if (max_cll != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvMaxCLL, max_cll); + if (max_fall != kValueNotPresent) + size += EbmlElementSize(libwebm::kMkvMaxFALL, max_fall); + + if (mastering_metadata_) + size += mastering_metadata_->MasteringMetadataSize(); + + return size; +} + /////////////////////////////////////////////////////////////// // // VideoTrack Class @@ -858,11 +1175,12 @@ VideoTrack::VideoTrack(unsigned int* seed) height_(0), stereo_mode_(0), alpha_mode_(0), - width_(0) {} + width_(0), + colour_(NULL) {} -VideoTrack::~VideoTrack() {} +VideoTrack::~VideoTrack() { delete colour_; } -bool VideoTrack::SetStereoMode(uint64 stereo_mode) { +bool VideoTrack::SetStereoMode(uint64_t stereo_mode) { if (stereo_mode != kMono && stereo_mode != kSideBySideLeftIsFirst && stereo_mode != kTopBottomRightIsFirst && stereo_mode != kTopBottomLeftIsFirst && @@ -873,7 +1191,7 @@ bool VideoTrack::SetStereoMode(uint64 stereo_mode) { return true; } -bool VideoTrack::SetAlphaMode(uint64 alpha_mode) { +bool VideoTrack::SetAlphaMode(uint64_t alpha_mode) { if (alpha_mode != kNoAlpha && alpha_mode != kAlpha) return false; @@ -881,11 +1199,11 @@ bool VideoTrack::SetAlphaMode(uint64 alpha_mode) { return true; } -uint64 VideoTrack::PayloadSize() const { - const uint64 parent_size = Track::PayloadSize(); +uint64_t VideoTrack::PayloadSize() const { + const uint64_t parent_size = Track::PayloadSize(); - uint64 size = VideoPayloadSize(); - size += EbmlMasterElementSize(kMkvVideo, size); + uint64_t size = VideoPayloadSize(); + size += EbmlMasterElementSize(libwebm::kMkvVideo, size); return parent_size + size; } @@ -894,88 +1212,122 @@ bool VideoTrack::Write(IMkvWriter* writer) const { if (!Track::Write(writer)) return false; - const uint64 size = VideoPayloadSize(); + const uint64_t size = VideoPayloadSize(); - if (!WriteEbmlMasterElement(writer, kMkvVideo, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvVideo, size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvPixelWidth, width_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelWidth, width_)) return false; - if (!WriteEbmlElement(writer, kMkvPixelHeight, height_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelHeight, height_)) return false; if (display_width_ > 0) { - if (!WriteEbmlElement(writer, kMkvDisplayWidth, display_width_)) + if (!WriteEbmlElement(writer, libwebm::kMkvDisplayWidth, display_width_)) return false; } if (display_height_ > 0) { - if (!WriteEbmlElement(writer, kMkvDisplayHeight, display_height_)) + if (!WriteEbmlElement(writer, libwebm::kMkvDisplayHeight, display_height_)) return false; } if (crop_left_ > 0) { - if (!WriteEbmlElement(writer, kMkvPixelCropLeft, crop_left_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropLeft, crop_left_)) return false; } if (crop_right_ > 0) { - if (!WriteEbmlElement(writer, kMkvPixelCropRight, crop_right_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropRight, crop_right_)) return false; } if (crop_top_ > 0) { - if (!WriteEbmlElement(writer, kMkvPixelCropTop, crop_top_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropTop, crop_top_)) return false; } if (crop_bottom_ > 0) { - if (!WriteEbmlElement(writer, kMkvPixelCropBottom, crop_bottom_)) + if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropBottom, crop_bottom_)) return false; } if (stereo_mode_ > kMono) { - if (!WriteEbmlElement(writer, kMkvStereoMode, stereo_mode_)) + if (!WriteEbmlElement(writer, libwebm::kMkvStereoMode, stereo_mode_)) return false; } if (alpha_mode_ > kNoAlpha) { - if (!WriteEbmlElement(writer, kMkvAlphaMode, alpha_mode_)) + if (!WriteEbmlElement(writer, libwebm::kMkvAlphaMode, alpha_mode_)) return false; } if (frame_rate_ > 0.0) { - if (!WriteEbmlElement(writer, kMkvFrameRate, + if (!WriteEbmlElement(writer, libwebm::kMkvFrameRate, static_cast(frame_rate_))) { return false; } } + if (colour_) { + if (!colour_->Write(writer)) + return false; + } - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) { + stop_position - payload_position != static_cast(size)) { return false; } return true; } -uint64 VideoTrack::VideoPayloadSize() const { - uint64 size = EbmlElementSize(kMkvPixelWidth, width_); - size += EbmlElementSize(kMkvPixelHeight, height_); +bool VideoTrack::SetColour(const Colour& colour) { + std::auto_ptr colour_ptr(new Colour()); + if (!colour_ptr.get()) + return false; + + if (colour.mastering_metadata()) { + if (!colour_ptr->SetMasteringMetadata(*colour.mastering_metadata())) + return false; + } + + colour_ptr->matrix_coefficients = colour.matrix_coefficients; + colour_ptr->bits_per_channel = colour.bits_per_channel; + colour_ptr->chroma_subsampling_horz = colour.chroma_subsampling_horz; + colour_ptr->chroma_subsampling_vert = colour.chroma_subsampling_vert; + colour_ptr->cb_subsampling_horz = colour.cb_subsampling_horz; + colour_ptr->cb_subsampling_vert = colour.cb_subsampling_vert; + colour_ptr->chroma_siting_horz = colour.chroma_siting_horz; + colour_ptr->chroma_siting_vert = colour.chroma_siting_vert; + colour_ptr->range = colour.range; + colour_ptr->transfer_characteristics = colour.transfer_characteristics; + colour_ptr->primaries = colour.primaries; + colour_ptr->max_cll = colour.max_cll; + colour_ptr->max_fall = colour.max_fall; + colour_ = colour_ptr.release(); + return true; +} + +uint64_t VideoTrack::VideoPayloadSize() const { + uint64_t size = EbmlElementSize(libwebm::kMkvPixelWidth, width_); + size += EbmlElementSize(libwebm::kMkvPixelHeight, height_); if (display_width_ > 0) - size += EbmlElementSize(kMkvDisplayWidth, display_width_); + size += EbmlElementSize(libwebm::kMkvDisplayWidth, display_width_); if (display_height_ > 0) - size += EbmlElementSize(kMkvDisplayHeight, display_height_); + size += EbmlElementSize(libwebm::kMkvDisplayHeight, display_height_); if (crop_left_ > 0) - size += EbmlElementSize(kMkvPixelCropLeft, crop_left_); + size += EbmlElementSize(libwebm::kMkvPixelCropLeft, crop_left_); if (crop_right_ > 0) - size += EbmlElementSize(kMkvPixelCropRight, crop_right_); + size += EbmlElementSize(libwebm::kMkvPixelCropRight, crop_right_); if (crop_top_ > 0) - size += EbmlElementSize(kMkvPixelCropTop, crop_top_); + size += EbmlElementSize(libwebm::kMkvPixelCropTop, crop_top_); if (crop_bottom_ > 0) - size += EbmlElementSize(kMkvPixelCropBottom, crop_bottom_); + size += EbmlElementSize(libwebm::kMkvPixelCropBottom, crop_bottom_); if (stereo_mode_ > kMono) - size += EbmlElementSize(kMkvStereoMode, stereo_mode_); + size += EbmlElementSize(libwebm::kMkvStereoMode, stereo_mode_); if (alpha_mode_ > kNoAlpha) - size += EbmlElementSize(kMkvAlphaMode, alpha_mode_); + size += EbmlElementSize(libwebm::kMkvAlphaMode, alpha_mode_); if (frame_rate_ > 0.0) - size += EbmlElementSize(kMkvFrameRate, static_cast(frame_rate_)); + size += EbmlElementSize(libwebm::kMkvFrameRate, + static_cast(frame_rate_)); + if (colour_) + size += colour_->ColourSize(); return size; } @@ -989,15 +1341,15 @@ AudioTrack::AudioTrack(unsigned int* seed) AudioTrack::~AudioTrack() {} -uint64 AudioTrack::PayloadSize() const { - const uint64 parent_size = Track::PayloadSize(); +uint64_t AudioTrack::PayloadSize() const { + const uint64_t parent_size = Track::PayloadSize(); - uint64 size = - EbmlElementSize(kMkvSamplingFrequency, static_cast(sample_rate_)); - size += EbmlElementSize(kMkvChannels, channels_); + uint64_t size = EbmlElementSize(libwebm::kMkvSamplingFrequency, + static_cast(sample_rate_)); + size += EbmlElementSize(libwebm::kMkvChannels, channels_); if (bit_depth_ > 0) - size += EbmlElementSize(kMkvBitDepth, bit_depth_); - size += EbmlMasterElementSize(kMkvAudio, size); + size += EbmlElementSize(libwebm::kMkvBitDepth, bit_depth_); + size += EbmlMasterElementSize(libwebm::kMkvAudio, size); return parent_size + size; } @@ -1007,31 +1359,31 @@ bool AudioTrack::Write(IMkvWriter* writer) const { return false; // Calculate AudioSettings size. - uint64 size = - EbmlElementSize(kMkvSamplingFrequency, static_cast(sample_rate_)); - size += EbmlElementSize(kMkvChannels, channels_); + uint64_t size = EbmlElementSize(libwebm::kMkvSamplingFrequency, + static_cast(sample_rate_)); + size += EbmlElementSize(libwebm::kMkvChannels, channels_); if (bit_depth_ > 0) - size += EbmlElementSize(kMkvBitDepth, bit_depth_); + size += EbmlElementSize(libwebm::kMkvBitDepth, bit_depth_); - if (!WriteEbmlMasterElement(writer, kMkvAudio, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvAudio, size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvSamplingFrequency, + if (!WriteEbmlElement(writer, libwebm::kMkvSamplingFrequency, static_cast(sample_rate_))) return false; - if (!WriteEbmlElement(writer, kMkvChannels, channels_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChannels, channels_)) return false; if (bit_depth_ > 0) - if (!WriteEbmlElement(writer, kMkvBitDepth, bit_depth_)) + if (!WriteEbmlElement(writer, libwebm::kMkvBitDepth, bit_depth_)) return false; - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) + stop_position - payload_position != static_cast(size)) return false; return true; @@ -1047,11 +1399,12 @@ const char Tracks::kVp8CodecId[] = "V_VP8"; const char Tracks::kVp9CodecId[] = "V_VP9"; const char Tracks::kVp10CodecId[] = "V_VP10"; -Tracks::Tracks() : track_entries_(NULL), track_entries_size_(0) {} +Tracks::Tracks() + : track_entries_(NULL), track_entries_size_(0), wrote_tracks_(false) {} Tracks::~Tracks() { if (track_entries_) { - for (uint32 i = 0; i < track_entries_size_; ++i) { + for (uint32_t i = 0; i < track_entries_size_; ++i) { Track* const track = track_entries_[i]; delete track; } @@ -1059,8 +1412,8 @@ Tracks::~Tracks() { } } -bool Tracks::AddTrack(Track* track, int32 number) { - if (number < 0) +bool Tracks::AddTrack(Track* track, int32_t number) { + if (number < 0 || wrote_tracks_) return false; // This muxer only supports track numbers in the range [1, 126], in @@ -1071,23 +1424,23 @@ bool Tracks::AddTrack(Track* track, int32 number) { if (number > 0x7E) return false; - uint32 track_num = number; + uint32_t track_num = number; if (track_num > 0) { // Check to make sure a track does not already have |track_num|. - for (uint32 i = 0; i < track_entries_size_; ++i) { + for (uint32_t i = 0; i < track_entries_size_; ++i) { if (track_entries_[i]->number() == track_num) return false; } } - const uint32 count = track_entries_size_ + 1; + const uint32_t count = track_entries_size_ + 1; Track** const track_entries = new (std::nothrow) Track*[count]; // NOLINT if (!track_entries) return false; - for (uint32 i = 0; i < track_entries_size_; ++i) { + for (uint32_t i = 0; i < track_entries_size_; ++i) { track_entries[i] = track_entries_[i]; } @@ -1101,7 +1454,7 @@ bool Tracks::AddTrack(Track* track, int32 number) { bool exit = false; do { exit = true; - for (uint32 i = 0; i < track_entries_size_; ++i) { + for (uint32_t i = 0; i < track_entries_size_; ++i) { if (track_entries[i]->number() == track_num) { track_num++; exit = false; @@ -1118,7 +1471,7 @@ bool Tracks::AddTrack(Track* track, int32 number) { return true; } -const Track* Tracks::GetTrackByIndex(uint32 index) const { +const Track* Tracks::GetTrackByIndex(uint32_t index) const { if (track_entries_ == NULL) return NULL; @@ -1128,9 +1481,9 @@ const Track* Tracks::GetTrackByIndex(uint32 index) const { return track_entries_[index]; } -Track* Tracks::GetTrackByNumber(uint64 track_number) const { - const int32 count = track_entries_size(); - for (int32 i = 0; i < count; ++i) { +Track* Tracks::GetTrackByNumber(uint64_t track_number) const { + const int32_t count = track_entries_size(); + for (int32_t i = 0; i < count; ++i) { if (track_entries_[i]->number() == track_number) return track_entries_[i]; } @@ -1138,7 +1491,7 @@ Track* Tracks::GetTrackByNumber(uint64 track_number) const { return NULL; } -bool Tracks::TrackIsAudio(uint64 track_number) const { +bool Tracks::TrackIsAudio(uint64_t track_number) const { const Track* const track = GetTrackByNumber(track_number); if (track->type() == kAudio) @@ -1147,7 +1500,7 @@ bool Tracks::TrackIsAudio(uint64 track_number) const { return false; } -bool Tracks::TrackIsVideo(uint64 track_number) const { +bool Tracks::TrackIsVideo(uint64_t track_number) const { const Track* const track = GetTrackByNumber(track_number); if (track->type() == kVideo) @@ -1157,9 +1510,9 @@ bool Tracks::TrackIsVideo(uint64 track_number) const { } bool Tracks::Write(IMkvWriter* writer) const { - uint64 size = 0; - const int32 count = track_entries_size(); - for (int32 i = 0; i < count; ++i) { + uint64_t size = 0; + const int32_t count = track_entries_size(); + for (int32_t i = 0; i < count; ++i) { const Track* const track = GetTrackByIndex(i); if (!track) @@ -1168,24 +1521,25 @@ bool Tracks::Write(IMkvWriter* writer) const { size += track->Size(); } - if (!WriteEbmlMasterElement(writer, kMkvTracks, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvTracks, size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - for (int32 i = 0; i < count; ++i) { + for (int32_t i = 0; i < count; ++i) { const Track* const track = GetTrackByIndex(i); if (!track->Write(writer)) return false; } - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) + stop_position - payload_position != static_cast(size)) return false; + wrote_tracks_ = true; return true; } @@ -1195,9 +1549,10 @@ bool Tracks::Write(IMkvWriter* writer) const { bool Chapter::set_id(const char* id) { return StrCpy(id, &id_); } -void Chapter::set_time(const Segment& segment, uint64 start_ns, uint64 end_ns) { +void Chapter::set_time(const Segment& segment, uint64_t start_ns, + uint64_t end_ns) { const SegmentInfo* const info = segment.GetSegmentInfo(); - const uint64 timecode_scale = info->timecode_scale(); + const uint64_t timecode_scale = info->timecode_scale(); start_timecode_ = start_ns / timecode_scale; end_timecode_ = end_ns / timecode_scale; } @@ -1292,38 +1647,40 @@ bool Chapter::ExpandDisplaysArray() { return true; } -uint64 Chapter::WriteAtom(IMkvWriter* writer) const { - uint64 payload_size = EbmlElementSize(kMkvChapterStringUID, id_) + - EbmlElementSize(kMkvChapterUID, uid_) + - EbmlElementSize(kMkvChapterTimeStart, start_timecode_) + - EbmlElementSize(kMkvChapterTimeEnd, end_timecode_); +uint64_t Chapter::WriteAtom(IMkvWriter* writer) const { + uint64_t payload_size = + EbmlElementSize(libwebm::kMkvChapterStringUID, id_) + + EbmlElementSize(libwebm::kMkvChapterUID, uid_) + + EbmlElementSize(libwebm::kMkvChapterTimeStart, start_timecode_) + + EbmlElementSize(libwebm::kMkvChapterTimeEnd, end_timecode_); for (int idx = 0; idx < displays_count_; ++idx) { const Display& d = displays_[idx]; payload_size += d.WriteDisplay(NULL); } - const uint64 atom_size = - EbmlMasterElementSize(kMkvChapterAtom, payload_size) + payload_size; + const uint64_t atom_size = + EbmlMasterElementSize(libwebm::kMkvChapterAtom, payload_size) + + payload_size; if (writer == NULL) return atom_size; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); - if (!WriteEbmlMasterElement(writer, kMkvChapterAtom, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapterAtom, payload_size)) return 0; - if (!WriteEbmlElement(writer, kMkvChapterStringUID, id_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapterStringUID, id_)) return 0; - if (!WriteEbmlElement(writer, kMkvChapterUID, uid_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapterUID, uid_)) return 0; - if (!WriteEbmlElement(writer, kMkvChapterTimeStart, start_timecode_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapterTimeStart, start_timecode_)) return 0; - if (!WriteEbmlElement(writer, kMkvChapterTimeEnd, end_timecode_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapterTimeEnd, end_timecode_)) return 0; for (int idx = 0; idx < displays_count_; ++idx) { @@ -1333,9 +1690,9 @@ uint64 Chapter::WriteAtom(IMkvWriter* writer) const { return 0; } - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != atom_size) + if (stop >= start && uint64_t(stop - start) != atom_size) return 0; return atom_size; @@ -1365,42 +1722,44 @@ bool Chapter::Display::set_country(const char* country) { return StrCpy(country, &country_); } -uint64 Chapter::Display::WriteDisplay(IMkvWriter* writer) const { - uint64 payload_size = EbmlElementSize(kMkvChapString, title_); +uint64_t Chapter::Display::WriteDisplay(IMkvWriter* writer) const { + uint64_t payload_size = EbmlElementSize(libwebm::kMkvChapString, title_); if (language_) - payload_size += EbmlElementSize(kMkvChapLanguage, language_); + payload_size += EbmlElementSize(libwebm::kMkvChapLanguage, language_); if (country_) - payload_size += EbmlElementSize(kMkvChapCountry, country_); + payload_size += EbmlElementSize(libwebm::kMkvChapCountry, country_); - const uint64 display_size = - EbmlMasterElementSize(kMkvChapterDisplay, payload_size) + payload_size; + const uint64_t display_size = + EbmlMasterElementSize(libwebm::kMkvChapterDisplay, payload_size) + + payload_size; if (writer == NULL) return display_size; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); - if (!WriteEbmlMasterElement(writer, kMkvChapterDisplay, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapterDisplay, + payload_size)) return 0; - if (!WriteEbmlElement(writer, kMkvChapString, title_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapString, title_)) return 0; if (language_) { - if (!WriteEbmlElement(writer, kMkvChapLanguage, language_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapLanguage, language_)) return 0; } if (country_) { - if (!WriteEbmlElement(writer, kMkvChapCountry, country_)) + if (!WriteEbmlElement(writer, libwebm::kMkvChapCountry, country_)) return 0; } - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != display_size) + if (stop >= start && uint64_t(stop - start) != display_size) return 0; return display_size; @@ -1438,19 +1797,19 @@ bool Chapters::Write(IMkvWriter* writer) const { if (writer == NULL) return false; - const uint64 payload_size = WriteEdition(NULL); // return size only + const uint64_t payload_size = WriteEdition(NULL); // return size only - if (!WriteEbmlMasterElement(writer, kMkvChapters, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapters, payload_size)) return false; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); if (WriteEdition(writer) == 0) // error return false; - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != payload_size) + if (stop >= start && uint64_t(stop - start) != payload_size) return false; return true; @@ -1480,36 +1839,37 @@ bool Chapters::ExpandChaptersArray() { return true; } -uint64 Chapters::WriteEdition(IMkvWriter* writer) const { - uint64 payload_size = 0; +uint64_t Chapters::WriteEdition(IMkvWriter* writer) const { + uint64_t payload_size = 0; for (int idx = 0; idx < chapters_count_; ++idx) { const Chapter& chapter = chapters_[idx]; payload_size += chapter.WriteAtom(NULL); } - const uint64 edition_size = - EbmlMasterElementSize(kMkvEditionEntry, payload_size) + payload_size; + const uint64_t edition_size = + EbmlMasterElementSize(libwebm::kMkvEditionEntry, payload_size) + + payload_size; if (writer == NULL) // return size only return edition_size; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); - if (!WriteEbmlMasterElement(writer, kMkvEditionEntry, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvEditionEntry, payload_size)) return 0; // error for (int idx = 0; idx < chapters_count_; ++idx) { const Chapter& chapter = chapters_[idx]; - const uint64 chapter_size = chapter.WriteAtom(writer); + const uint64_t chapter_size = chapter.WriteAtom(writer); if (chapter_size == 0) // error return 0; } - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != edition_size) + if (stop >= start && uint64_t(stop - start) != edition_size) return 0; return edition_size; @@ -1581,23 +1941,23 @@ bool Tag::ExpandSimpleTagsArray() { return true; } -uint64 Tag::Write(IMkvWriter* writer) const { - uint64 payload_size = 0; +uint64_t Tag::Write(IMkvWriter* writer) const { + uint64_t payload_size = 0; for (int idx = 0; idx < simple_tags_count_; ++idx) { const SimpleTag& st = simple_tags_[idx]; payload_size += st.Write(NULL); } - const uint64 tag_size = - EbmlMasterElementSize(kMkvTag, payload_size) + payload_size; + const uint64_t tag_size = + EbmlMasterElementSize(libwebm::kMkvTag, payload_size) + payload_size; if (writer == NULL) return tag_size; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); - if (!WriteEbmlMasterElement(writer, kMkvTag, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvTag, payload_size)) return 0; for (int idx = 0; idx < simple_tags_count_; ++idx) { @@ -1607,9 +1967,9 @@ uint64 Tag::Write(IMkvWriter* writer) const { return 0; } - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != tag_size) + if (stop >= start && uint64_t(stop - start) != tag_size) return 0; return tag_size; @@ -1635,31 +1995,32 @@ bool Tag::SimpleTag::set_tag_string(const char* tag_string) { return StrCpy(tag_string, &tag_string_); } -uint64 Tag::SimpleTag::Write(IMkvWriter* writer) const { - uint64 payload_size = EbmlElementSize(kMkvTagName, tag_name_); +uint64_t Tag::SimpleTag::Write(IMkvWriter* writer) const { + uint64_t payload_size = EbmlElementSize(libwebm::kMkvTagName, tag_name_); - payload_size += EbmlElementSize(kMkvTagString, tag_string_); + payload_size += EbmlElementSize(libwebm::kMkvTagString, tag_string_); - const uint64 simple_tag_size = - EbmlMasterElementSize(kMkvSimpleTag, payload_size) + payload_size; + const uint64_t simple_tag_size = + EbmlMasterElementSize(libwebm::kMkvSimpleTag, payload_size) + + payload_size; if (writer == NULL) return simple_tag_size; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); - if (!WriteEbmlMasterElement(writer, kMkvSimpleTag, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvSimpleTag, payload_size)) return 0; - if (!WriteEbmlElement(writer, kMkvTagName, tag_name_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTagName, tag_name_)) return 0; - if (!WriteEbmlElement(writer, kMkvTagString, tag_string_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTagString, tag_string_)) return 0; - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != simple_tag_size) + if (stop >= start && uint64_t(stop - start) != simple_tag_size) return 0; return simple_tag_size; @@ -1694,29 +2055,29 @@ bool Tags::Write(IMkvWriter* writer) const { if (writer == NULL) return false; - uint64 payload_size = 0; + uint64_t payload_size = 0; for (int idx = 0; idx < tags_count_; ++idx) { const Tag& tag = tags_[idx]; payload_size += tag.Write(NULL); } - if (!WriteEbmlMasterElement(writer, kMkvTags, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvTags, payload_size)) return false; - const int64 start = writer->Position(); + const int64_t start = writer->Position(); for (int idx = 0; idx < tags_count_; ++idx) { const Tag& tag = tags_[idx]; - const uint64 tag_size = tag.Write(writer); + const uint64_t tag_size = tag.Write(writer); if (tag_size == 0) // error return 0; } - const int64 stop = writer->Position(); + const int64_t stop = writer->Position(); - if (stop >= start && uint64(stop - start) != payload_size) + if (stop >= start && uint64_t(stop - start) != payload_size) return false; return true; @@ -1750,15 +2111,18 @@ bool Tags::ExpandTagsArray() { // // Cluster class -Cluster::Cluster(uint64 timecode, int64 cues_pos, uint64 timecode_scale) +Cluster::Cluster(uint64_t timecode, int64_t cues_pos, uint64_t timecode_scale, + bool write_last_frame_with_duration, bool fixed_size_timecode) : blocks_added_(0), finalized_(false), + fixed_size_timecode_(fixed_size_timecode), header_written_(false), payload_size_(0), position_for_cues_(cues_pos), size_position_(-1), timecode_(timecode), timecode_scale_(timecode_scale), + write_last_frame_with_duration_(write_last_frame_with_duration), writer_(NULL) {} Cluster::~Cluster() {} @@ -1771,24 +2135,27 @@ bool Cluster::Init(IMkvWriter* ptr_writer) { return true; } -bool Cluster::AddFrame(const Frame* const frame) { return DoWriteFrame(frame); } +bool Cluster::AddFrame(const Frame* const frame) { + return QueueOrWriteFrame(frame); +} -bool Cluster::AddFrame(const uint8* data, uint64 length, uint64 track_number, - uint64 abs_timecode, bool is_key) { +bool Cluster::AddFrame(const uint8_t* data, uint64_t length, + uint64_t track_number, uint64_t abs_timecode, + bool is_key) { Frame frame; if (!frame.Init(data, length)) return false; frame.set_track_number(track_number); frame.set_timestamp(abs_timecode); frame.set_is_key(is_key); - return DoWriteFrame(&frame); + return QueueOrWriteFrame(&frame); } -bool Cluster::AddFrameWithAdditional(const uint8* data, uint64 length, - const uint8* additional, - uint64 additional_length, uint64 add_id, - uint64 track_number, uint64 abs_timecode, - bool is_key) { +bool Cluster::AddFrameWithAdditional(const uint8_t* data, uint64_t length, + const uint8_t* additional, + uint64_t additional_length, + uint64_t add_id, uint64_t track_number, + uint64_t abs_timecode, bool is_key) { if (!additional || additional_length == 0) { return false; } @@ -1800,13 +2167,13 @@ bool Cluster::AddFrameWithAdditional(const uint8* data, uint64 length, frame.set_track_number(track_number); frame.set_timestamp(abs_timecode); frame.set_is_key(is_key); - return DoWriteFrame(&frame); + return QueueOrWriteFrame(&frame); } -bool Cluster::AddFrameWithDiscardPadding(const uint8* data, uint64 length, - int64 discard_padding, - uint64 track_number, - uint64 abs_timecode, bool is_key) { +bool Cluster::AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length, + int64_t discard_padding, + uint64_t track_number, + uint64_t abs_timecode, bool is_key) { Frame frame; if (!frame.Init(data, length)) return false; @@ -1814,11 +2181,12 @@ bool Cluster::AddFrameWithDiscardPadding(const uint8* data, uint64 length, frame.set_track_number(track_number); frame.set_timestamp(abs_timecode); frame.set_is_key(is_key); - return DoWriteFrame(&frame); + return QueueOrWriteFrame(&frame); } -bool Cluster::AddMetadata(const uint8* data, uint64 length, uint64 track_number, - uint64 abs_timecode, uint64 duration_timecode) { +bool Cluster::AddMetadata(const uint8_t* data, uint64_t length, + uint64_t track_number, uint64_t abs_timecode, + uint64_t duration_timecode) { Frame frame; if (!frame.Init(data, length)) return false; @@ -1826,17 +2194,62 @@ bool Cluster::AddMetadata(const uint8* data, uint64 length, uint64 track_number, frame.set_timestamp(abs_timecode); frame.set_duration(duration_timecode); frame.set_is_key(true); // All metadata blocks are keyframes. - return DoWriteFrame(&frame); + return QueueOrWriteFrame(&frame); } -void Cluster::AddPayloadSize(uint64 size) { payload_size_ += size; } +void Cluster::AddPayloadSize(uint64_t size) { payload_size_ += size; } bool Cluster::Finalize() { - if (!writer_ || finalized_ || size_position_ == -1) + return !write_last_frame_with_duration_ && Finalize(false, 0); +} + +bool Cluster::Finalize(bool set_last_frame_duration, uint64_t duration) { + if (!writer_ || finalized_) + return false; + + if (write_last_frame_with_duration_) { + // Write out held back Frames. This essentially performs a k-way merge + // across all tracks in the increasing order of timestamps. + while (!stored_frames_.empty()) { + Frame* frame = stored_frames_.begin()->second.front(); + + // Get the next frame to write (frame with least timestamp across all + // tracks). + for (FrameMapIterator frames_iterator = ++stored_frames_.begin(); + frames_iterator != stored_frames_.end(); ++frames_iterator) { + if (frames_iterator->second.front()->timestamp() < frame->timestamp()) { + frame = frames_iterator->second.front(); + } + } + + // Set the duration if it's the last frame for the track. + if (set_last_frame_duration && + stored_frames_[frame->track_number()].size() == 1 && + !frame->duration_set()) { + frame->set_duration(duration - frame->timestamp()); + if (!frame->is_key() && !frame->reference_block_timestamp_set()) { + frame->set_reference_block_timestamp( + last_block_timestamp_[frame->track_number()]); + } + } + + // Write the frame and remove it from |stored_frames_|. + const bool wrote_frame = DoWriteFrame(frame); + stored_frames_[frame->track_number()].pop_front(); + if (stored_frames_[frame->track_number()].empty()) { + stored_frames_.erase(frame->track_number()); + } + delete frame; + if (!wrote_frame) + return false; + } + } + + if (size_position_ == -1) return false; if (writer_->Seekable()) { - const int64 pos = writer_->Position(); + const int64_t pos = writer_->Position(); if (writer_->Position(size_position_)) return false; @@ -1853,9 +2266,10 @@ bool Cluster::Finalize() { return true; } -uint64 Cluster::Size() const { - const uint64 element_size = - EbmlMasterElementSize(kMkvCluster, 0xFFFFFFFFFFFFFFFFULL) + payload_size_; +uint64_t Cluster::Size() const { + const uint64_t element_size = + EbmlMasterElementSize(libwebm::kMkvCluster, 0xFFFFFFFFFFFFFFFFULL) + + payload_size_; return element_size; } @@ -1871,15 +2285,15 @@ bool Cluster::PreWriteBlock() { return true; } -void Cluster::PostWriteBlock(uint64 element_size) { +void Cluster::PostWriteBlock(uint64_t element_size) { AddPayloadSize(element_size); ++blocks_added_; } -int64 Cluster::GetRelativeTimecode(int64 abs_timecode) const { - const int64 cluster_timecode = this->Cluster::timecode(); - const int64 rel_timecode = - static_cast(abs_timecode) - cluster_timecode; +int64_t Cluster::GetRelativeTimecode(int64_t abs_timecode) const { + const int64_t cluster_timecode = this->Cluster::timecode(); + const int64_t rel_timecode = + static_cast(abs_timecode) - cluster_timecode; if (rel_timecode < 0 || rel_timecode > kMaxBlockTimecode) return -1; @@ -1894,11 +2308,67 @@ bool Cluster::DoWriteFrame(const Frame* const frame) { if (!PreWriteBlock()) return false; - const uint64 element_size = WriteFrame(writer_, frame, this); + const uint64_t element_size = WriteFrame(writer_, frame, this); if (element_size == 0) return false; PostWriteBlock(element_size); + last_block_timestamp_[frame->track_number()] = frame->timestamp(); + return true; +} + +bool Cluster::QueueOrWriteFrame(const Frame* const frame) { + if (!frame || !frame->IsValid()) + return false; + + // If |write_last_frame_with_duration_| is not set, then write the frame right + // away. + if (!write_last_frame_with_duration_) { + return DoWriteFrame(frame); + } + + // Queue the current frame. + uint64_t track_number = frame->track_number(); + Frame* const frame_to_store = new Frame(); + frame_to_store->CopyFrom(*frame); + stored_frames_[track_number].push_back(frame_to_store); + + // Iterate through all queued frames in the current track except the last one + // and write it if it is okay to do so (i.e.) no other track has an held back + // frame with timestamp <= the timestamp of the frame in question. + std::vector::iterator> frames_to_erase; + for (std::list::iterator + current_track_iterator = stored_frames_[track_number].begin(), + end = --stored_frames_[track_number].end(); + current_track_iterator != end; ++current_track_iterator) { + const Frame* const frame_to_write = *current_track_iterator; + bool okay_to_write = true; + for (FrameMapIterator track_iterator = stored_frames_.begin(); + track_iterator != stored_frames_.end(); ++track_iterator) { + if (track_iterator->first == track_number) { + continue; + } + if (track_iterator->second.front()->timestamp() < + frame_to_write->timestamp()) { + okay_to_write = false; + break; + } + } + if (okay_to_write) { + const bool wrote_frame = DoWriteFrame(frame_to_write); + delete frame_to_write; + if (!wrote_frame) + return false; + frames_to_erase.push_back(current_track_iterator); + } else { + break; + } + } + for (std::vector::iterator>::iterator iterator = + frames_to_erase.begin(); + iterator != frames_to_erase.end(); ++iterator) { + stored_frames_[track_number].erase(*iterator); + } return true; } @@ -1906,7 +2376,7 @@ bool Cluster::WriteClusterHeader() { if (finalized_) return false; - if (WriteID(writer_, kMkvCluster)) + if (WriteID(writer_, libwebm::kMkvCluster)) return false; // Save for later. @@ -1917,9 +2387,12 @@ bool Cluster::WriteClusterHeader() { if (SerializeInt(writer_, kEbmlUnknownValue, 8)) return false; - if (!WriteEbmlElement(writer_, kMkvTimecode, timecode())) + if (!WriteEbmlElement(writer_, libwebm::kMkvTimecode, timecode(), + fixed_size_timecode_ ? 8 : 0)) { return false; - AddPayloadSize(EbmlElementSize(kMkvTimecode, timecode())); + } + AddPayloadSize(EbmlElementSize(libwebm::kMkvTimecode, timecode(), + fixed_size_timecode_ ? 8 : 0)); header_written_ = true; return true; @@ -1930,7 +2403,7 @@ bool Cluster::WriteClusterHeader() { // SeekHead Class SeekHead::SeekHead() : start_pos_(0ULL) { - for (int32 i = 0; i < kSeekEntryCount; ++i) { + for (int32_t i = 0; i < kSeekEntryCount; ++i) { seek_entry_id_[i] = 0; seek_entry_pos_[i] = 0; } @@ -1943,17 +2416,19 @@ bool SeekHead::Finalize(IMkvWriter* writer) const { if (start_pos_ == -1) return false; - uint64 payload_size = 0; - uint64 entry_size[kSeekEntryCount]; + uint64_t payload_size = 0; + uint64_t entry_size[kSeekEntryCount]; - for (int32 i = 0; i < kSeekEntryCount; ++i) { + for (int32_t i = 0; i < kSeekEntryCount; ++i) { if (seek_entry_id_[i] != 0) { - entry_size[i] = - EbmlElementSize(kMkvSeekID, static_cast(seek_entry_id_[i])); - entry_size[i] += EbmlElementSize(kMkvSeekPosition, seek_entry_pos_[i]); + entry_size[i] = EbmlElementSize( + libwebm::kMkvSeekID, static_cast(seek_entry_id_[i])); + entry_size[i] += + EbmlElementSize(libwebm::kMkvSeekPosition, seek_entry_pos_[i]); payload_size += - EbmlMasterElementSize(kMkvSeek, entry_size[i]) + entry_size[i]; + EbmlMasterElementSize(libwebm::kMkvSeek, entry_size[i]) + + entry_size[i]; } } @@ -1961,34 +2436,35 @@ bool SeekHead::Finalize(IMkvWriter* writer) const { if (payload_size == 0) return true; - const int64 pos = writer->Position(); + const int64_t pos = writer->Position(); if (writer->Position(start_pos_)) return false; - if (!WriteEbmlMasterElement(writer, kMkvSeekHead, payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvSeekHead, payload_size)) return false; - for (int32 i = 0; i < kSeekEntryCount; ++i) { + for (int32_t i = 0; i < kSeekEntryCount; ++i) { if (seek_entry_id_[i] != 0) { - if (!WriteEbmlMasterElement(writer, kMkvSeek, entry_size[i])) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvSeek, entry_size[i])) return false; - if (!WriteEbmlElement(writer, kMkvSeekID, - static_cast(seek_entry_id_[i]))) + if (!WriteEbmlElement(writer, libwebm::kMkvSeekID, + static_cast(seek_entry_id_[i]))) return false; - if (!WriteEbmlElement(writer, kMkvSeekPosition, seek_entry_pos_[i])) + if (!WriteEbmlElement(writer, libwebm::kMkvSeekPosition, + seek_entry_pos_[i])) return false; } } - const uint64 total_entry_size = kSeekEntryCount * MaxEntrySize(); - const uint64 total_size = - EbmlMasterElementSize(kMkvSeekHead, total_entry_size) + + const uint64_t total_entry_size = kSeekEntryCount * MaxEntrySize(); + const uint64_t total_size = + EbmlMasterElementSize(libwebm::kMkvSeekHead, total_entry_size) + total_entry_size; - const int64 size_left = total_size - (writer->Position() - start_pos_); + const int64_t size_left = total_size - (writer->Position() - start_pos_); - const uint64 bytes_written = WriteVoidElement(writer, size_left); + const uint64_t bytes_written = WriteVoidElement(writer, size_left); if (!bytes_written) return false; @@ -2000,20 +2476,21 @@ bool SeekHead::Finalize(IMkvWriter* writer) const { } bool SeekHead::Write(IMkvWriter* writer) { - const uint64 entry_size = kSeekEntryCount * MaxEntrySize(); - const uint64 size = EbmlMasterElementSize(kMkvSeekHead, entry_size); + const uint64_t entry_size = kSeekEntryCount * MaxEntrySize(); + const uint64_t size = + EbmlMasterElementSize(libwebm::kMkvSeekHead, entry_size); start_pos_ = writer->Position(); - const uint64 bytes_written = WriteVoidElement(writer, size + entry_size); + const uint64_t bytes_written = WriteVoidElement(writer, size + entry_size); if (!bytes_written) return false; return true; } -bool SeekHead::AddSeekEntry(uint32 id, uint64 pos) { - for (int32 i = 0; i < kSeekEntryCount; ++i) { +bool SeekHead::AddSeekEntry(uint32_t id, uint64_t pos) { + for (int32_t i = 0; i < kSeekEntryCount; ++i) { if (seek_entry_id_[i] == 0) { seek_entry_id_[i] = id; seek_entry_pos_[i] = pos; @@ -2023,19 +2500,19 @@ bool SeekHead::AddSeekEntry(uint32 id, uint64 pos) { return false; } -uint32 SeekHead::GetId(int index) const { +uint32_t SeekHead::GetId(int index) const { if (index < 0 || index >= kSeekEntryCount) return UINT_MAX; return seek_entry_id_[index]; } -uint64 SeekHead::GetPosition(int index) const { +uint64_t SeekHead::GetPosition(int index) const { if (index < 0 || index >= kSeekEntryCount) return ULLONG_MAX; return seek_entry_pos_[index]; } -bool SeekHead::SetSeekEntry(int index, uint32 id, uint64 position) { +bool SeekHead::SetSeekEntry(int index, uint32_t id, uint64_t position) { if (index < 0 || index >= kSeekEntryCount) return false; seek_entry_id_[index] = id; @@ -2043,12 +2520,12 @@ bool SeekHead::SetSeekEntry(int index, uint32 id, uint64 position) { return true; } -uint64 SeekHead::MaxEntrySize() const { - const uint64 max_entry_payload_size = - EbmlElementSize(kMkvSeekID, 0xffffffffULL) + - EbmlElementSize(kMkvSeekPosition, 0xffffffffffffffffULL); - const uint64 max_entry_size = - EbmlMasterElementSize(kMkvSeek, max_entry_payload_size) + +uint64_t SeekHead::MaxEntrySize() const { + const uint64_t max_entry_payload_size = + EbmlElementSize(libwebm::kMkvSeekID, UINT64_C(0xffffffff)) + + EbmlElementSize(libwebm::kMkvSeekPosition, UINT64_C(0xffffffffffffffff)); + const uint64_t max_entry_size = + EbmlMasterElementSize(libwebm::kMkvSeek, max_entry_payload_size) + max_entry_payload_size; return max_entry_size; @@ -2072,10 +2549,10 @@ SegmentInfo::~SegmentInfo() { } bool SegmentInfo::Init() { - int32 major; - int32 minor; - int32 build; - int32 revision; + int32_t major; + int32_t minor; + int32_t build; + int32_t revision; GetVersion(&major, &minor, &build, &revision); char temp[256]; #ifdef _MSC_VER @@ -2115,12 +2592,12 @@ bool SegmentInfo::Finalize(IMkvWriter* writer) const { if (duration_pos_ == -1) return false; - const int64 pos = writer->Position(); + const int64_t pos = writer->Position(); if (writer->Position(duration_pos_)) return false; - if (!WriteEbmlElement(writer, kMkvDuration, + if (!WriteEbmlElement(writer, libwebm::kMkvDuration, static_cast(duration_))) return false; @@ -2136,43 +2613,45 @@ bool SegmentInfo::Write(IMkvWriter* writer) { if (!writer || !muxing_app_ || !writing_app_) return false; - uint64 size = EbmlElementSize(kMkvTimecodeScale, timecode_scale_); + uint64_t size = EbmlElementSize(libwebm::kMkvTimecodeScale, timecode_scale_); if (duration_ > 0.0) - size += EbmlElementSize(kMkvDuration, static_cast(duration_)); + size += + EbmlElementSize(libwebm::kMkvDuration, static_cast(duration_)); if (date_utc_ != LLONG_MIN) - size += EbmlDateElementSize(kMkvDateUTC); - size += EbmlElementSize(kMkvMuxingApp, muxing_app_); - size += EbmlElementSize(kMkvWritingApp, writing_app_); + size += EbmlDateElementSize(libwebm::kMkvDateUTC); + size += EbmlElementSize(libwebm::kMkvMuxingApp, muxing_app_); + size += EbmlElementSize(libwebm::kMkvWritingApp, writing_app_); - if (!WriteEbmlMasterElement(writer, kMkvInfo, size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvInfo, size)) return false; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return false; - if (!WriteEbmlElement(writer, kMkvTimecodeScale, timecode_scale_)) + if (!WriteEbmlElement(writer, libwebm::kMkvTimecodeScale, timecode_scale_)) return false; if (duration_ > 0.0) { // Save for later duration_pos_ = writer->Position(); - if (!WriteEbmlElement(writer, kMkvDuration, static_cast(duration_))) + if (!WriteEbmlElement(writer, libwebm::kMkvDuration, + static_cast(duration_))) return false; } if (date_utc_ != LLONG_MIN) - WriteEbmlDateElement(writer, kMkvDateUTC, date_utc_); + WriteEbmlDateElement(writer, libwebm::kMkvDateUTC, date_utc_); - if (!WriteEbmlElement(writer, kMkvMuxingApp, muxing_app_)) + if (!WriteEbmlElement(writer, libwebm::kMkvMuxingApp, muxing_app_)) return false; - if (!WriteEbmlElement(writer, kMkvWritingApp, writing_app_)) + if (!WriteEbmlElement(writer, libwebm::kMkvWritingApp, writing_app_)) return false; - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(size)) + stop_position - payload_position != static_cast(size)) return false; return true; @@ -2244,6 +2723,8 @@ Segment::Segment() mode_(kFile), new_cuepoint_(false), output_cues_(true), + accurate_cluster_duration_(false), + fixed_size_cluster_timecode_(false), payload_pos_(0), size_position_(0), doc_type_version_(kDefaultDocTypeVersion), @@ -2260,7 +2741,7 @@ Segment::Segment() Segment::~Segment() { if (cluster_list_) { - for (int32 i = 0; i < cluster_list_size_; ++i) { + for (int32_t i = 0; i < cluster_list_size_; ++i) { Cluster* const cluster = cluster_list_[i]; delete cluster; } @@ -2268,7 +2749,7 @@ Segment::~Segment() { } if (frames_) { - for (int32 i = 0; i < frames_size_; ++i) { + for (int32_t i = 0; i < frames_size_; ++i) { Frame* const frame = frames_[i]; delete frame; } @@ -2292,13 +2773,13 @@ Segment::~Segment() { } } -void Segment::MoveCuesBeforeClustersHelper(uint64 diff, int32 index, - uint64* cues_size) { +void Segment::MoveCuesBeforeClustersHelper(uint64_t diff, int32_t index, + uint64_t* cues_size) { CuePoint* const cue_point = cues_.GetCueByIndex(index); if (cue_point == NULL) return; - const uint64 old_cue_point_size = cue_point->Size(); - const uint64 cluster_pos = cue_point->cluster_pos() + diff; + const uint64_t old_cue_point_size = cue_point->Size(); + const uint64_t cluster_pos = cue_point->cluster_pos() + diff; cue_point->set_cluster_pos(cluster_pos); // update the new cluster position // New size of the cue is computed as follows // Let a = current sum of size of all CuePoints @@ -2308,40 +2789,40 @@ void Segment::MoveCuesBeforeClustersHelper(uint64 diff, int32 index, // Let d = b + c. Now d is the |diff| passed to the next recursive call. // Let e = a + b. Now e is the |cues_size| passed to the next recursive // call. - const uint64 cue_point_size_diff = cue_point->Size() - old_cue_point_size; - const uint64 cue_size_diff = + const uint64_t cue_point_size_diff = cue_point->Size() - old_cue_point_size; + const uint64_t cue_size_diff = GetCodedUIntSize(*cues_size + cue_point_size_diff) - GetCodedUIntSize(*cues_size); *cues_size += cue_point_size_diff; diff = cue_size_diff + cue_point_size_diff; if (diff > 0) { - for (int32 i = 0; i < cues_.cue_entries_size(); ++i) { + for (int32_t i = 0; i < cues_.cue_entries_size(); ++i) { MoveCuesBeforeClustersHelper(diff, i, cues_size); } } } void Segment::MoveCuesBeforeClusters() { - const uint64 current_cue_size = cues_.Size(); - uint64 cue_size = 0; - for (int32 i = 0; i < cues_.cue_entries_size(); ++i) + const uint64_t current_cue_size = cues_.Size(); + uint64_t cue_size = 0; + for (int32_t i = 0; i < cues_.cue_entries_size(); ++i) cue_size += cues_.GetCueByIndex(i)->Size(); - for (int32 i = 0; i < cues_.cue_entries_size(); ++i) + for (int32_t i = 0; i < cues_.cue_entries_size(); ++i) MoveCuesBeforeClustersHelper(current_cue_size, i, &cue_size); // Adjust the Seek Entry to reflect the change in position // of Cluster and Cues - int32 cluster_index = 0; - int32 cues_index = 0; - for (int32 i = 0; i < SeekHead::kSeekEntryCount; ++i) { - if (seek_head_.GetId(i) == kMkvCluster) + int32_t cluster_index = 0; + int32_t cues_index = 0; + for (int32_t i = 0; i < SeekHead::kSeekEntryCount; ++i) { + if (seek_head_.GetId(i) == libwebm::kMkvCluster) cluster_index = i; - if (seek_head_.GetId(i) == kMkvCues) + if (seek_head_.GetId(i) == libwebm::kMkvCues) cues_index = i; } - seek_head_.SetSeekEntry(cues_index, kMkvCues, + seek_head_.SetSeekEntry(cues_index, libwebm::kMkvCues, seek_head_.GetPosition(cluster_index)); - seek_head_.SetSeekEntry(cluster_index, kMkvCluster, + seek_head_.SetSeekEntry(cluster_index, libwebm::kMkvCluster, cues_.Size() + seek_head_.GetPosition(cues_index)); } @@ -2359,8 +2840,8 @@ bool Segment::CopyAndMoveCuesBeforeClusters(mkvparser::IMkvReader* reader, IMkvWriter* writer) { if (!writer->Seekable() || chunking_) return false; - const int64 cluster_offset = - cluster_list_[0]->size_position() - GetUIntSize(kMkvCluster); + const int64_t cluster_offset = + cluster_list_[0]->size_position() - GetUIntSize(libwebm::kMkvCluster); // Copy the headers. if (!ChunkedCopy(reader, writer, 0, cluster_offset)) @@ -2383,8 +2864,8 @@ bool Segment::CopyAndMoveCuesBeforeClusters(mkvparser::IMkvReader* reader, return false; // Update the Segment size in case the Cues size has changed. - const int64 pos = writer->Position(); - const int64 segment_size = writer->Position() - payload_pos_; + const int64_t pos = writer->Position(); + const int64_t segment_size = writer->Position() - payload_pos_; if (writer->Position(size_position_) || WriteUIntSize(writer, segment_size, 8) || writer->Position(pos)) return false; @@ -2395,15 +2876,17 @@ bool Segment::Finalize() { if (WriteFramesAll() < 0) return false; + if (cluster_list_size_ > 0) { + // Update last cluster's size + Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1]; + + // For the last frame of the last Cluster, we don't write it as a BlockGroup + // with Duration unless the frame itself has duration set explicitly. + if (!old_cluster || !old_cluster->Finalize(false, 0)) + return false; + } + if (mode_ == kFile) { - if (cluster_list_size_ > 0) { - // Update last cluster's size - Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1]; - - if (!old_cluster || !old_cluster->Finalize()) - return false; - } - if (chunking_ && chunk_writer_cluster_) { chunk_writer_cluster_->Close(); chunk_count_++; @@ -2417,7 +2900,7 @@ bool Segment::Finalize() { return false; if (output_cues_) - if (!seek_head_.AddSeekEntry(kMkvCues, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvCues, MaxOffset())) return false; if (chunking_) { @@ -2448,11 +2931,11 @@ bool Segment::Finalize() { if (size_position_ == -1) return false; - const int64 segment_size = MaxOffset(); + const int64_t segment_size = MaxOffset(); if (segment_size < 1) return false; - const int64 pos = writer_header_->Position(); + const int64_t pos = writer_header_->Position(); UpdateDocTypeVersion(); if (doc_type_version_ != doc_type_version_written_) { if (writer_header_->Position(0)) @@ -2490,7 +2973,7 @@ bool Segment::Finalize() { return true; } -Track* Segment::AddTrack(int32 number) { +Track* Segment::AddTrack(int32_t number) { Track* const track = new (std::nothrow) Track(&seed_); // NOLINT if (!track) @@ -2508,7 +2991,7 @@ Chapter* Segment::AddChapter() { return chapters_.AddChapter(&seed_); } Tag* Segment::AddTag() { return tags_.AddTag(); } -uint64 Segment::AddVideoTrack(int32 width, int32 height, int32 number) { +uint64_t Segment::AddVideoTrack(int32_t width, int32_t height, int32_t number) { VideoTrack* const track = new (std::nothrow) VideoTrack(&seed_); // NOLINT if (!track) return 0; @@ -2524,7 +3007,7 @@ uint64 Segment::AddVideoTrack(int32 width, int32 height, int32 number) { return track->number(); } -bool Segment::AddCuePoint(uint64 timestamp, uint64 track) { +bool Segment::AddCuePoint(uint64_t timestamp, uint64_t track) { if (cluster_list_size_ < 1) return false; @@ -2547,7 +3030,8 @@ bool Segment::AddCuePoint(uint64 timestamp, uint64 track) { return true; } -uint64 Segment::AddAudioTrack(int32 sample_rate, int32 channels, int32 number) { +uint64_t Segment::AddAudioTrack(int32_t sample_rate, int32_t channels, + int32_t number) { AudioTrack* const track = new (std::nothrow) AudioTrack(&seed_); // NOLINT if (!track) return 0; @@ -2562,8 +3046,8 @@ uint64 Segment::AddAudioTrack(int32 sample_rate, int32 channels, int32 number) { return track->number(); } -bool Segment::AddFrame(const uint8* data, uint64 length, uint64 track_number, - uint64 timestamp, bool is_key) { +bool Segment::AddFrame(const uint8_t* data, uint64_t length, + uint64_t track_number, uint64_t timestamp, bool is_key) { if (!data) return false; @@ -2576,11 +3060,11 @@ bool Segment::AddFrame(const uint8* data, uint64 length, uint64 track_number, return AddGenericFrame(&frame); } -bool Segment::AddFrameWithAdditional(const uint8* data, uint64 length, - const uint8* additional, - uint64 additional_length, uint64 add_id, - uint64 track_number, uint64 timestamp, - bool is_key) { +bool Segment::AddFrameWithAdditional(const uint8_t* data, uint64_t length, + const uint8_t* additional, + uint64_t additional_length, + uint64_t add_id, uint64_t track_number, + uint64_t timestamp, bool is_key) { if (!data || !additional) return false; @@ -2595,10 +3079,10 @@ bool Segment::AddFrameWithAdditional(const uint8* data, uint64 length, return AddGenericFrame(&frame); } -bool Segment::AddFrameWithDiscardPadding(const uint8* data, uint64 length, - int64 discard_padding, - uint64 track_number, uint64 timestamp, - bool is_key) { +bool Segment::AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length, + int64_t discard_padding, + uint64_t track_number, + uint64_t timestamp, bool is_key) { if (!data) return false; @@ -2612,8 +3096,9 @@ bool Segment::AddFrameWithDiscardPadding(const uint8* data, uint64 length, return AddGenericFrame(&frame); } -bool Segment::AddMetadata(const uint8* data, uint64 length, uint64 track_number, - uint64 timestamp_ns, uint64 duration_ns) { +bool Segment::AddMetadata(const uint8_t* data, uint64_t length, + uint64_t track_number, uint64_t timestamp_ns, + uint64_t duration_ns) { if (!data) return false; @@ -2702,6 +3187,14 @@ bool Segment::AddGenericFrame(const Frame* frame) { void Segment::OutputCues(bool output_cues) { output_cues_ = output_cues; } +void Segment::AccurateClusterDuration(bool accurate_cluster_duration) { + accurate_cluster_duration_ = accurate_cluster_duration; +} + +void Segment::UseFixedSizeClusterTimecode(bool fixed_size_cluster_timecode) { + fixed_size_cluster_timecode_ = fixed_size_cluster_timecode; +} + bool Segment::SetChunking(bool chunking, const char* filename) { if (chunk_count_ > 0) return false; @@ -2781,7 +3274,7 @@ bool Segment::SetChunking(bool chunking, const char* filename) { return true; } -bool Segment::CuesTrack(uint64 track_number) { +bool Segment::CuesTrack(uint64_t track_number) { const Track* const track = GetTrackByNumber(track_number); if (!track) return false; @@ -2792,7 +3285,7 @@ bool Segment::CuesTrack(uint64 track_number) { void Segment::ForceNewClusterOnNextFrame() { force_new_cluster_ = true; } -Track* Segment::GetTrackByNumber(uint64 track_number) const { +Track* Segment::GetTrackByNumber(uint64_t track_number) const { return tracks_.GetTrackByNumber(track_number); } @@ -2803,11 +3296,11 @@ bool Segment::WriteSegmentHeader() { if (!WriteEbmlHeader(writer_header_, doc_type_version_)) return false; doc_type_version_written_ = doc_type_version_; - ebml_header_size_ = static_cast(writer_header_->Position()); + ebml_header_size_ = static_cast(writer_header_->Position()); // Write "unknown" (-1) as segment size value. If mode is kFile, Segment // will write over duration when the file is finalized. - if (WriteID(writer_header_, kMkvSegment)) + if (WriteID(writer_header_, libwebm::kMkvSegment)) return false; // Save for later. @@ -2831,25 +3324,25 @@ bool Segment::WriteSegmentHeader() { return false; } - if (!seek_head_.AddSeekEntry(kMkvInfo, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvInfo, MaxOffset())) return false; if (!segment_info_.Write(writer_header_)) return false; - if (!seek_head_.AddSeekEntry(kMkvTracks, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvTracks, MaxOffset())) return false; if (!tracks_.Write(writer_header_)) return false; if (chapters_.Count() > 0) { - if (!seek_head_.AddSeekEntry(kMkvChapters, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvChapters, MaxOffset())) return false; if (!chapters_.Write(writer_header_)) return false; } if (tags_.Count() > 0) { - if (!seek_head_.AddSeekEntry(kMkvTags, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvTags, MaxOffset())) return false; if (!tags_.Write(writer_header_)) return false; @@ -2870,7 +3363,7 @@ bool Segment::WriteSegmentHeader() { // Here we are testing whether to create a new cluster, given a frame // having time frame_timestamp_ns. // -int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, +int Segment::TestFrame(uint64_t track_number, uint64_t frame_timestamp_ns, bool is_key) const { if (force_new_cluster_) return 1; @@ -2888,11 +3381,11 @@ int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, // written to the existing cluster, or that a new cluster should be // created. - const uint64 timecode_scale = segment_info_.timecode_scale(); - const uint64 frame_timecode = frame_timestamp_ns / timecode_scale; + const uint64_t timecode_scale = segment_info_.timecode_scale(); + const uint64_t frame_timecode = frame_timestamp_ns / timecode_scale; const Cluster* const last_cluster = cluster_list_[cluster_list_size_ - 1]; - const uint64 last_cluster_timecode = last_cluster->timecode(); + const uint64_t last_cluster_timecode = last_cluster->timecode(); // For completeness we test for the case when the frame's timecode // is less than the cluster's timecode. Although in principle that @@ -2907,7 +3400,7 @@ int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, // using a 16-bit signed integer), then we cannot write this frame // to that cluster, and so we must create a new cluster. - const int64 delta_timecode = frame_timecode - last_cluster_timecode; + const int64_t delta_timecode = frame_timecode - last_cluster_timecode; if (delta_timecode > kMaxBlockTimecode) return 2; @@ -2923,7 +3416,7 @@ int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, // already, where "too many" is defined as "the total time of frames // in the cluster exceeds a threshold". - const uint64 delta_ns = delta_timecode * timecode_scale; + const uint64_t delta_ns = delta_timecode * timecode_scale; if (max_cluster_duration_ > 0 && delta_ns >= max_cluster_duration_) return 1; @@ -2932,7 +3425,7 @@ int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, // cluster is created when the size of the current cluster exceeds a // threshold. - const uint64 cluster_size = last_cluster->payload_size(); + const uint64_t cluster_size = last_cluster->payload_size(); if (max_cluster_size_ > 0 && cluster_size >= max_cluster_size_) return 1; @@ -2942,19 +3435,19 @@ int Segment::TestFrame(uint64 track_number, uint64 frame_timestamp_ns, return 0; } -bool Segment::MakeNewCluster(uint64 frame_timestamp_ns) { - const int32 new_size = cluster_list_size_ + 1; +bool Segment::MakeNewCluster(uint64_t frame_timestamp_ns) { + const int32_t new_size = cluster_list_size_ + 1; if (new_size > cluster_list_capacity_) { // Add more clusters. - const int32 new_capacity = + const int32_t new_capacity = (cluster_list_capacity_ <= 0) ? 1 : cluster_list_capacity_ * 2; Cluster** const clusters = new (std::nothrow) Cluster*[new_capacity]; // NOLINT if (!clusters) return false; - for (int32 i = 0; i < cluster_list_size_; ++i) { + for (int32_t i = 0; i < cluster_list_size_; ++i) { clusters[i] = cluster_list_[i]; } @@ -2967,19 +3460,17 @@ bool Segment::MakeNewCluster(uint64 frame_timestamp_ns) { if (!WriteFramesLessThan(frame_timestamp_ns)) return false; - if (mode_ == kFile) { - if (cluster_list_size_ > 0) { - // Update old cluster's size - Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1]; + if (cluster_list_size_ > 0) { + // Update old cluster's size + Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1]; - if (!old_cluster || !old_cluster->Finalize()) - return false; - } - - if (output_cues_) - new_cuepoint_ = true; + if (!old_cluster || !old_cluster->Finalize(true, frame_timestamp_ns)) + return false; } + if (output_cues_) + new_cuepoint_ = true; + if (chunking_ && cluster_list_size_ > 0) { chunk_writer_cluster_->Close(); chunk_count_++; @@ -2990,24 +3481,25 @@ bool Segment::MakeNewCluster(uint64 frame_timestamp_ns) { return false; } - const uint64 timecode_scale = segment_info_.timecode_scale(); - const uint64 frame_timecode = frame_timestamp_ns / timecode_scale; + const uint64_t timecode_scale = segment_info_.timecode_scale(); + const uint64_t frame_timecode = frame_timestamp_ns / timecode_scale; - uint64 cluster_timecode = frame_timecode; + uint64_t cluster_timecode = frame_timecode; if (frames_size_ > 0) { const Frame* const f = frames_[0]; // earliest queued frame - const uint64 ns = f->timestamp(); - const uint64 tc = ns / timecode_scale; + const uint64_t ns = f->timestamp(); + const uint64_t tc = ns / timecode_scale; if (tc < cluster_timecode) cluster_timecode = tc; } Cluster*& cluster = cluster_list_[cluster_list_size_]; - const int64 offset = MaxOffset(); - cluster = new (std::nothrow) Cluster(cluster_timecode, // NOLINT - offset, segment_info_.timecode_scale()); + const int64_t offset = MaxOffset(); + cluster = new (std::nothrow) + Cluster(cluster_timecode, offset, segment_info_.timecode_scale(), + accurate_cluster_duration_, fixed_size_cluster_timecode_); if (!cluster) return false; @@ -3018,8 +3510,8 @@ bool Segment::MakeNewCluster(uint64 frame_timestamp_ns) { return true; } -bool Segment::DoNewClusterProcessing(uint64 track_number, - uint64 frame_timestamp_ns, bool is_key) { +bool Segment::DoNewClusterProcessing(uint64_t track_number, + uint64_t frame_timestamp_ns, bool is_key) { for (;;) { // Based on the characteristics of the current frame and current // cluster, decide whether to create a new cluster. @@ -3055,12 +3547,12 @@ bool Segment::CheckHeaderInfo() { if (!WriteSegmentHeader()) return false; - if (!seek_head_.AddSeekEntry(kMkvCluster, MaxOffset())) + if (!seek_head_.AddSeekEntry(libwebm::kMkvCluster, MaxOffset())) return false; if (output_cues_ && cues_track_ == 0) { // Check for a video track - for (uint32 i = 0; i < tracks_.track_entries_size(); ++i) { + for (uint32_t i = 0; i < tracks_.track_entries_size(); ++i) { const Track* const track = tracks_.GetTrackByIndex(i); if (!track) return false; @@ -3085,7 +3577,7 @@ bool Segment::CheckHeaderInfo() { } void Segment::UpdateDocTypeVersion() { - for (uint32 index = 0; index < tracks_.track_entries_size(); ++index) { + for (uint32_t index = 0; index < tracks_.track_entries_size(); ++index) { const Track* track = tracks_.GetTrackByIndex(index); if (track == NULL) break; @@ -3127,14 +3619,14 @@ bool Segment::UpdateChunkName(const char* ext, char** name) const { return true; } -int64 Segment::MaxOffset() { +int64_t Segment::MaxOffset() { if (!writer_header_) return -1; - int64 offset = writer_header_->Position() - payload_pos_; + int64_t offset = writer_header_->Position() - payload_pos_; if (chunking_) { - for (int32 i = 0; i < cluster_list_size_; ++i) { + for (int32_t i = 0; i < cluster_list_size_; ++i) { Cluster* const cluster = cluster_list_[i]; offset += cluster->Size(); } @@ -3147,11 +3639,11 @@ int64 Segment::MaxOffset() { } bool Segment::QueueFrame(Frame* frame) { - const int32 new_size = frames_size_ + 1; + const int32_t new_size = frames_size_ + 1; if (new_size > frames_capacity_) { // Add more frames. - const int32 new_capacity = (!frames_capacity_) ? 2 : frames_capacity_ * 2; + const int32_t new_capacity = (!frames_capacity_) ? 2 : frames_capacity_ * 2; if (new_capacity < 1) return false; @@ -3160,7 +3652,7 @@ bool Segment::QueueFrame(Frame* frame) { if (!frames) return false; - for (int32 i = 0; i < frames_size_; ++i) { + for (int32_t i = 0; i < frames_size_; ++i) { frames[i] = frames_[i]; } @@ -3186,7 +3678,7 @@ int Segment::WriteFramesAll() { if (!cluster) return -1; - for (int32 i = 0; i < frames_size_; ++i) { + for (int32_t i = 0; i < frames_size_; ++i) { Frame*& frame = frames_[i]; // TODO(jzern/vigneshv): using Segment::AddGenericFrame here would limit the // places where |doc_type_version_| needs to be updated. @@ -3215,7 +3707,7 @@ int Segment::WriteFramesAll() { return result; } -bool Segment::WriteFramesLessThan(uint64 timestamp) { +bool Segment::WriteFramesLessThan(uint64_t timestamp) { // Check |cluster_list_size_| to see if this is the first cluster. If it is // the first cluster the audio frames that are less than the first video // timesatmp will be written in a later step. @@ -3227,11 +3719,11 @@ bool Segment::WriteFramesLessThan(uint64 timestamp) { if (!cluster) return false; - int32 shift_left = 0; + int32_t shift_left = 0; // TODO(fgalligan): Change this to use the durations of frames instead of // the next frame's start time if the duration is accurate. - for (int32 i = 1; i < frames_size_; ++i) { + for (int32_t i = 1; i < frames_size_; ++i) { const Frame* const frame_curr = frames_[i]; if (frame_curr->timestamp() > timestamp) @@ -3262,8 +3754,8 @@ bool Segment::WriteFramesLessThan(uint64 timestamp) { if (shift_left >= frames_size_) return false; - const int32 new_frames_size = frames_size_ - shift_left; - for (int32 i = 0; i < new_frames_size; ++i) { + const int32_t new_frames_size = frames_size_ - shift_left; + for (int32_t i = 0; i < new_frames_size; ++i) { frames_[i] = frames_[i + shift_left]; } diff --git a/libs/libvpx/third_party/libwebm/mkvmuxer.hpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.h similarity index 68% rename from libs/libvpx/third_party/libwebm/mkvmuxer.hpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.h index 03a002c93b..55ba07196d 100644 --- a/libs/libvpx/third_party/libwebm/mkvmuxer.hpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxer.h @@ -6,24 +6,31 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#ifndef MKVMUXER_HPP -#define MKVMUXER_HPP +#ifndef MKVMUXER_MKVMUXER_H_ +#define MKVMUXER_MKVMUXER_H_ -#include "mkvmuxertypes.hpp" +#include + +#include +#include +#include + +#include "common/webmids.h" +#include "mkvmuxer/mkvmuxertypes.h" // For a description of the WebM elements see // http://www.webmproject.org/code/specs/container/. namespace mkvparser { class IMkvReader; -} // end namespace +} // namespace mkvparser namespace mkvmuxer { class MkvWriter; class Segment; -const uint64 kMaxTrackNumber = 126; +const uint64_t kMaxTrackNumber = 126; /////////////////////////////////////////////////////////////// // Interface used by the mkvmuxer to write out the Mkv data. @@ -59,15 +66,15 @@ class IMkvWriter { // Writes out the EBML header for a WebM file. This function must be called // before any other libwebm writing functions are called. -bool WriteEbmlHeader(IMkvWriter* writer, uint64 doc_type_version); +bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version); // Deprecated. Writes out EBML header with doc_type_version as // kDefaultDocTypeVersion. Exists for backward compatibility. bool WriteEbmlHeader(IMkvWriter* writer); // Copies in Chunk from source to destination between the given byte positions -bool ChunkedCopy(mkvparser::IMkvReader* source, IMkvWriter* dst, int64 start, - int64 size); +bool ChunkedCopy(mkvparser::IMkvReader* source, IMkvWriter* dst, int64_t start, + int64_t size); /////////////////////////////////////////////////////////////// // Class to hold data the will be written to a block. @@ -81,10 +88,11 @@ class Frame { bool CopyFrom(const Frame& frame); // Copies |frame| data into |frame_|. Returns true on success. - bool Init(const uint8* frame, uint64 length); + bool Init(const uint8_t* frame, uint64_t length); // Copies |additional| data into |additional_|. Returns true on success. - bool AddAdditionalData(const uint8* additional, uint64 length, uint64 add_id); + bool AddAdditionalData(const uint8_t* additional, uint64_t length, + uint64_t add_id); // Returns true if the frame has valid parameters. bool IsValid() const; @@ -93,62 +101,70 @@ class Frame { // parameters. bool CanBeSimpleBlock() const; - uint64 add_id() const { return add_id_; } - const uint8* additional() const { return additional_; } - uint64 additional_length() const { return additional_length_; } - void set_duration(uint64 duration) { duration_ = duration; } - uint64 duration() const { return duration_; } - const uint8* frame() const { return frame_; } + uint64_t add_id() const { return add_id_; } + const uint8_t* additional() const { return additional_; } + uint64_t additional_length() const { return additional_length_; } + void set_duration(uint64_t duration); + uint64_t duration() const { return duration_; } + bool duration_set() const { return duration_set_; } + const uint8_t* frame() const { return frame_; } void set_is_key(bool key) { is_key_ = key; } bool is_key() const { return is_key_; } - uint64 length() const { return length_; } - void set_track_number(uint64 track_number) { track_number_ = track_number; } - uint64 track_number() const { return track_number_; } - void set_timestamp(uint64 timestamp) { timestamp_ = timestamp; } - uint64 timestamp() const { return timestamp_; } - void set_discard_padding(int64 discard_padding) { + uint64_t length() const { return length_; } + void set_track_number(uint64_t track_number) { track_number_ = track_number; } + uint64_t track_number() const { return track_number_; } + void set_timestamp(uint64_t timestamp) { timestamp_ = timestamp; } + uint64_t timestamp() const { return timestamp_; } + void set_discard_padding(int64_t discard_padding) { discard_padding_ = discard_padding; } - int64 discard_padding() const { return discard_padding_; } - void set_reference_block_timestamp(int64 reference_block_timestamp); - int64 reference_block_timestamp() const { return reference_block_timestamp_; } + int64_t discard_padding() const { return discard_padding_; } + void set_reference_block_timestamp(int64_t reference_block_timestamp); + int64_t reference_block_timestamp() const { + return reference_block_timestamp_; + } bool reference_block_timestamp_set() const { return reference_block_timestamp_set_; } private: // Id of the Additional data. - uint64 add_id_; + uint64_t add_id_; // Pointer to additional data. Owned by this class. - uint8* additional_; + uint8_t* additional_; // Length of the additional data. - uint64 additional_length_; + uint64_t additional_length_; // Duration of the frame in nanoseconds. - uint64 duration_; + uint64_t duration_; + + // Flag indicating that |duration_| has been set. Setting duration causes the + // frame to be written out as a Block with BlockDuration instead of as a + // SimpleBlock. + bool duration_set_; // Pointer to the data. Owned by this class. - uint8* frame_; + uint8_t* frame_; // Flag telling if the data should set the key flag of a block. bool is_key_; // Length of the data. - uint64 length_; + uint64_t length_; // Mkv track number the data is associated with. - uint64 track_number_; + uint64_t track_number_; // Timestamp of the data in nanoseconds. - uint64 timestamp_; + uint64_t timestamp_; // Discard padding for the frame. - int64 discard_padding_; + int64_t discard_padding_; // Reference block timestamp. - int64 reference_block_timestamp_; + int64_t reference_block_timestamp_; // Flag indicating if |reference_block_timestamp_| has been set. bool reference_block_timestamp_set_; @@ -164,19 +180,19 @@ class CuePoint { ~CuePoint(); // Returns the size in bytes for the entire CuePoint element. - uint64 Size() const; + uint64_t Size() const; // Output the CuePoint element to the writer. Returns true on success. bool Write(IMkvWriter* writer) const; - void set_time(uint64 time) { time_ = time; } - uint64 time() const { return time_; } - void set_track(uint64 track) { track_ = track; } - uint64 track() const { return track_; } - void set_cluster_pos(uint64 cluster_pos) { cluster_pos_ = cluster_pos; } - uint64 cluster_pos() const { return cluster_pos_; } - void set_block_number(uint64 block_number) { block_number_ = block_number; } - uint64 block_number() const { return block_number_; } + void set_time(uint64_t time) { time_ = time; } + uint64_t time() const { return time_; } + void set_track(uint64_t track) { track_ = track; } + uint64_t track() const { return track_; } + void set_cluster_pos(uint64_t cluster_pos) { cluster_pos_ = cluster_pos; } + uint64_t cluster_pos() const { return cluster_pos_; } + void set_block_number(uint64_t block_number) { block_number_ = block_number; } + uint64_t block_number() const { return block_number_; } void set_output_block_number(bool output_block_number) { output_block_number_ = output_block_number; } @@ -184,19 +200,19 @@ class CuePoint { private: // Returns the size in bytes for the payload of the CuePoint element. - uint64 PayloadSize() const; + uint64_t PayloadSize() const; // Absolute timecode according to the segment time base. - uint64 time_; + uint64_t time_; // The Track element associated with the CuePoint. - uint64 track_; + uint64_t track_; // The position of the Cluster containing the Block. - uint64 cluster_pos_; + uint64_t cluster_pos_; // Number of the Block within the Cluster, starting from 1. - uint64 block_number_; + uint64_t block_number_; // If true the muxer will write out the block number for the cue if the // block number is different than the default of 1. Default is set to true. @@ -217,15 +233,15 @@ class Cues { // Returns the cue point by index. Returns NULL if there is no cue point // match. - CuePoint* GetCueByIndex(int32 index) const; + CuePoint* GetCueByIndex(int32_t index) const; // Returns the total size of the Cues element - uint64 Size(); + uint64_t Size(); // Output the Cues element to the writer. Returns true on success. bool Write(IMkvWriter* writer) const; - int32 cue_entries_size() const { return cue_entries_size_; } + int32_t cue_entries_size() const { return cue_entries_size_; } void set_output_block_number(bool output_block_number) { output_block_number_ = output_block_number; } @@ -233,10 +249,10 @@ class Cues { private: // Number of allocated elements in |cue_entries_|. - int32 cue_entries_capacity_; + int32_t cue_entries_capacity_; // Number of CuePoints in |cue_entries_|. - int32 cue_entries_size_; + int32_t cue_entries_size_; // CuePoint list. CuePoint** cue_entries_; @@ -258,21 +274,21 @@ class ContentEncAESSettings { ~ContentEncAESSettings() {} // Returns the size in bytes for the ContentEncAESSettings element. - uint64 Size() const; + uint64_t Size() const; // Writes out the ContentEncAESSettings element to |writer|. Returns true on // success. bool Write(IMkvWriter* writer) const; - uint64 cipher_mode() const { return cipher_mode_; } + uint64_t cipher_mode() const { return cipher_mode_; } private: // Returns the size in bytes for the payload of the ContentEncAESSettings // element. - uint64 PayloadSize() const; + uint64_t PayloadSize() const; // Sub elements - uint64 cipher_mode_; + uint64_t cipher_mode_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(ContentEncAESSettings); }; @@ -291,44 +307,157 @@ class ContentEncoding { // Sets the content encryption id. Copies |length| bytes from |id| to // |enc_key_id_|. Returns true on success. - bool SetEncryptionID(const uint8* id, uint64 length); + bool SetEncryptionID(const uint8_t* id, uint64_t length); // Returns the size in bytes for the ContentEncoding element. - uint64 Size() const; + uint64_t Size() const; // Writes out the ContentEncoding element to |writer|. Returns true on // success. bool Write(IMkvWriter* writer) const; - uint64 enc_algo() const { return enc_algo_; } - uint64 encoding_order() const { return encoding_order_; } - uint64 encoding_scope() const { return encoding_scope_; } - uint64 encoding_type() const { return encoding_type_; } + uint64_t enc_algo() const { return enc_algo_; } + uint64_t encoding_order() const { return encoding_order_; } + uint64_t encoding_scope() const { return encoding_scope_; } + uint64_t encoding_type() const { return encoding_type_; } ContentEncAESSettings* enc_aes_settings() { return &enc_aes_settings_; } private: // Returns the size in bytes for the encoding elements. - uint64 EncodingSize(uint64 compresion_size, uint64 encryption_size) const; + uint64_t EncodingSize(uint64_t compresion_size, + uint64_t encryption_size) const; // Returns the size in bytes for the encryption elements. - uint64 EncryptionSize() const; + uint64_t EncryptionSize() const; // Track element names - uint64 enc_algo_; - uint8* enc_key_id_; - uint64 encoding_order_; - uint64 encoding_scope_; - uint64 encoding_type_; + uint64_t enc_algo_; + uint8_t* enc_key_id_; + uint64_t encoding_order_; + uint64_t encoding_scope_; + uint64_t encoding_type_; // ContentEncAESSettings element. ContentEncAESSettings enc_aes_settings_; // Size of the ContentEncKeyID data in bytes. - uint64 enc_key_id_length_; + uint64_t enc_key_id_length_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(ContentEncoding); }; +/////////////////////////////////////////////////////////////// +// Colour element. +struct PrimaryChromaticity { + PrimaryChromaticity(float x_val, float y_val) : x(x_val), y(y_val) {} + PrimaryChromaticity() : x(0), y(0) {} + ~PrimaryChromaticity() {} + uint64_t PrimaryChromaticityPayloadSize(libwebm::MkvId x_id, + libwebm::MkvId y_id) const; + bool Write(IMkvWriter* writer, libwebm::MkvId x_id, + libwebm::MkvId y_id) const; + + float x; + float y; +}; + +class MasteringMetadata { + public: + static const float kValueNotPresent; + + MasteringMetadata() + : luminance_max(kValueNotPresent), + luminance_min(kValueNotPresent), + r_(NULL), + g_(NULL), + b_(NULL), + white_point_(NULL) {} + ~MasteringMetadata() { + delete r_; + delete g_; + delete b_; + delete white_point_; + } + + // Returns total size of the MasteringMetadata element. + uint64_t MasteringMetadataSize() const; + bool Write(IMkvWriter* writer) const; + + // Copies non-null chromaticity. + bool SetChromaticity(const PrimaryChromaticity* r, + const PrimaryChromaticity* g, + const PrimaryChromaticity* b, + const PrimaryChromaticity* white_point); + const PrimaryChromaticity* r() const { return r_; } + const PrimaryChromaticity* g() const { return g_; } + const PrimaryChromaticity* b() const { return b_; } + const PrimaryChromaticity* white_point() const { return white_point_; } + + float luminance_max; + float luminance_min; + + private: + // Returns size of MasteringMetadata child elements. + uint64_t PayloadSize() const; + + PrimaryChromaticity* r_; + PrimaryChromaticity* g_; + PrimaryChromaticity* b_; + PrimaryChromaticity* white_point_; +}; + +class Colour { + public: + static const uint64_t kValueNotPresent; + Colour() + : matrix_coefficients(kValueNotPresent), + bits_per_channel(kValueNotPresent), + chroma_subsampling_horz(kValueNotPresent), + chroma_subsampling_vert(kValueNotPresent), + cb_subsampling_horz(kValueNotPresent), + cb_subsampling_vert(kValueNotPresent), + chroma_siting_horz(kValueNotPresent), + chroma_siting_vert(kValueNotPresent), + range(kValueNotPresent), + transfer_characteristics(kValueNotPresent), + primaries(kValueNotPresent), + max_cll(kValueNotPresent), + max_fall(kValueNotPresent), + mastering_metadata_(NULL) {} + ~Colour() { delete mastering_metadata_; } + + // Returns total size of the Colour element. + uint64_t ColourSize() const; + bool Write(IMkvWriter* writer) const; + + // Deep copies |mastering_metadata|. + bool SetMasteringMetadata(const MasteringMetadata& mastering_metadata); + + const MasteringMetadata* mastering_metadata() const { + return mastering_metadata_; + } + + uint64_t matrix_coefficients; + uint64_t bits_per_channel; + uint64_t chroma_subsampling_horz; + uint64_t chroma_subsampling_vert; + uint64_t cb_subsampling_horz; + uint64_t cb_subsampling_vert; + uint64_t chroma_siting_horz; + uint64_t chroma_siting_vert; + uint64_t range; + uint64_t transfer_characteristics; + uint64_t primaries; + uint64_t max_cll; + uint64_t max_fall; + + private: + // Returns size of Colour child elements. + uint64_t PayloadSize() const; + + MasteringMetadata* mastering_metadata_; +}; + /////////////////////////////////////////////////////////////// // Track element. class Track { @@ -342,76 +471,76 @@ class Track { // Returns the ContentEncoding by index. Returns NULL if there is no // ContentEncoding match. - ContentEncoding* GetContentEncodingByIndex(uint32 index) const; + ContentEncoding* GetContentEncodingByIndex(uint32_t index) const; // Returns the size in bytes for the payload of the Track element. - virtual uint64 PayloadSize() const; + virtual uint64_t PayloadSize() const; // Returns the size in bytes of the Track element. - virtual uint64 Size() const; + virtual uint64_t Size() const; // Output the Track element to the writer. Returns true on success. virtual bool Write(IMkvWriter* writer) const; // Sets the CodecPrivate element of the Track element. Copies |length| // bytes from |codec_private| to |codec_private_|. Returns true on success. - bool SetCodecPrivate(const uint8* codec_private, uint64 length); + bool SetCodecPrivate(const uint8_t* codec_private, uint64_t length); void set_codec_id(const char* codec_id); const char* codec_id() const { return codec_id_; } - const uint8* codec_private() const { return codec_private_; } + const uint8_t* codec_private() const { return codec_private_; } void set_language(const char* language); const char* language() const { return language_; } - void set_max_block_additional_id(uint64 max_block_additional_id) { + void set_max_block_additional_id(uint64_t max_block_additional_id) { max_block_additional_id_ = max_block_additional_id; } - uint64 max_block_additional_id() const { return max_block_additional_id_; } + uint64_t max_block_additional_id() const { return max_block_additional_id_; } void set_name(const char* name); const char* name() const { return name_; } - void set_number(uint64 number) { number_ = number; } - uint64 number() const { return number_; } - void set_type(uint64 type) { type_ = type; } - uint64 type() const { return type_; } - void set_uid(uint64 uid) { uid_ = uid; } - uint64 uid() const { return uid_; } - void set_codec_delay(uint64 codec_delay) { codec_delay_ = codec_delay; } - uint64 codec_delay() const { return codec_delay_; } - void set_seek_pre_roll(uint64 seek_pre_roll) { + void set_number(uint64_t number) { number_ = number; } + uint64_t number() const { return number_; } + void set_type(uint64_t type) { type_ = type; } + uint64_t type() const { return type_; } + void set_uid(uint64_t uid) { uid_ = uid; } + uint64_t uid() const { return uid_; } + void set_codec_delay(uint64_t codec_delay) { codec_delay_ = codec_delay; } + uint64_t codec_delay() const { return codec_delay_; } + void set_seek_pre_roll(uint64_t seek_pre_roll) { seek_pre_roll_ = seek_pre_roll; } - uint64 seek_pre_roll() const { return seek_pre_roll_; } - void set_default_duration(uint64 default_duration) { + uint64_t seek_pre_roll() const { return seek_pre_roll_; } + void set_default_duration(uint64_t default_duration) { default_duration_ = default_duration; } - uint64 default_duration() const { return default_duration_; } + uint64_t default_duration() const { return default_duration_; } - uint64 codec_private_length() const { return codec_private_length_; } - uint32 content_encoding_entries_size() const { + uint64_t codec_private_length() const { return codec_private_length_; } + uint32_t content_encoding_entries_size() const { return content_encoding_entries_size_; } private: // Track element names. char* codec_id_; - uint8* codec_private_; + uint8_t* codec_private_; char* language_; - uint64 max_block_additional_id_; + uint64_t max_block_additional_id_; char* name_; - uint64 number_; - uint64 type_; - uint64 uid_; - uint64 codec_delay_; - uint64 seek_pre_roll_; - uint64 default_duration_; + uint64_t number_; + uint64_t type_; + uint64_t uid_; + uint64_t codec_delay_; + uint64_t seek_pre_roll_; + uint64_t default_duration_; // Size of the CodecPrivate data in bytes. - uint64 codec_private_length_; + uint64_t codec_private_length_; // ContentEncoding element list. ContentEncoding** content_encoding_entries_; // Number of ContentEncoding elements added. - uint32 content_encoding_entries_size_; + uint32_t content_encoding_entries_size_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Track); }; @@ -437,56 +566,63 @@ class VideoTrack : public Track { // Returns the size in bytes for the payload of the Track element plus the // video specific elements. - virtual uint64 PayloadSize() const; + virtual uint64_t PayloadSize() const; // Output the VideoTrack element to the writer. Returns true on success. virtual bool Write(IMkvWriter* writer) const; // Sets the video's stereo mode. Returns true on success. - bool SetStereoMode(uint64 stereo_mode); + bool SetStereoMode(uint64_t stereo_mode); // Sets the video's alpha mode. Returns true on success. - bool SetAlphaMode(uint64 alpha_mode); + bool SetAlphaMode(uint64_t alpha_mode); - void set_display_height(uint64 height) { display_height_ = height; } - uint64 display_height() const { return display_height_; } - void set_display_width(uint64 width) { display_width_ = width; } - uint64 display_width() const { return display_width_; } + void set_display_height(uint64_t height) { display_height_ = height; } + uint64_t display_height() const { return display_height_; } + void set_display_width(uint64_t width) { display_width_ = width; } + uint64_t display_width() const { return display_width_; } - void set_crop_left(uint64 crop_left) { crop_left_ = crop_left; } - uint64 crop_left() const { return crop_left_; } - void set_crop_right(uint64 crop_right) { crop_right_ = crop_right; } - uint64 crop_right() const { return crop_right_; } - void set_crop_top(uint64 crop_top) { crop_top_ = crop_top; } - uint64 crop_top() const { return crop_top_; } - void set_crop_bottom(uint64 crop_bottom) { crop_bottom_ = crop_bottom; } - uint64 crop_bottom() const { return crop_bottom_; } + void set_crop_left(uint64_t crop_left) { crop_left_ = crop_left; } + uint64_t crop_left() const { return crop_left_; } + void set_crop_right(uint64_t crop_right) { crop_right_ = crop_right; } + uint64_t crop_right() const { return crop_right_; } + void set_crop_top(uint64_t crop_top) { crop_top_ = crop_top; } + uint64_t crop_top() const { return crop_top_; } + void set_crop_bottom(uint64_t crop_bottom) { crop_bottom_ = crop_bottom; } + uint64_t crop_bottom() const { return crop_bottom_; } void set_frame_rate(double frame_rate) { frame_rate_ = frame_rate; } double frame_rate() const { return frame_rate_; } - void set_height(uint64 height) { height_ = height; } - uint64 height() const { return height_; } - uint64 stereo_mode() { return stereo_mode_; } - uint64 alpha_mode() { return alpha_mode_; } - void set_width(uint64 width) { width_ = width; } - uint64 width() const { return width_; } + void set_height(uint64_t height) { height_ = height; } + uint64_t height() const { return height_; } + uint64_t stereo_mode() { return stereo_mode_; } + uint64_t alpha_mode() { return alpha_mode_; } + void set_width(uint64_t width) { width_ = width; } + uint64_t width() const { return width_; } + + Colour* colour() { return colour_; } + + // Deep copies |colour|. + bool SetColour(const Colour& colour); private: // Returns the size in bytes of the Video element. - uint64 VideoPayloadSize() const; + uint64_t VideoPayloadSize() const; // Video track element names. - uint64 display_height_; - uint64 display_width_; - uint64 crop_left_; - uint64 crop_right_; - uint64 crop_top_; - uint64 crop_bottom_; + uint64_t display_height_; + uint64_t display_width_; + uint64_t crop_left_; + uint64_t crop_right_; + uint64_t crop_top_; + uint64_t crop_bottom_; double frame_rate_; - uint64 height_; - uint64 stereo_mode_; - uint64 alpha_mode_; - uint64 width_; + uint64_t height_; + uint64_t stereo_mode_; + uint64_t alpha_mode_; + uint64_t width_; + + Colour* colour_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(VideoTrack); }; @@ -501,22 +637,22 @@ class AudioTrack : public Track { // Returns the size in bytes for the payload of the Track element plus the // audio specific elements. - virtual uint64 PayloadSize() const; + virtual uint64_t PayloadSize() const; // Output the AudioTrack element to the writer. Returns true on success. virtual bool Write(IMkvWriter* writer) const; - void set_bit_depth(uint64 bit_depth) { bit_depth_ = bit_depth; } - uint64 bit_depth() const { return bit_depth_; } - void set_channels(uint64 channels) { channels_ = channels; } - uint64 channels() const { return channels_; } + void set_bit_depth(uint64_t bit_depth) { bit_depth_ = bit_depth; } + uint64_t bit_depth() const { return bit_depth_; } + void set_channels(uint64_t channels) { channels_ = channels; } + uint64_t channels() const { return channels_; } void set_sample_rate(double sample_rate) { sample_rate_ = sample_rate; } double sample_rate() const { return sample_rate_; } private: // Audio track element names. - uint64 bit_depth_; - uint64 channels_; + uint64_t bit_depth_; + uint64_t channels_; double sample_rate_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(AudioTrack); @@ -542,32 +678,35 @@ class Tracks { // deleted by the Tracks object. Returns true on success. |number| is the // number to use for the track. |number| must be >= 0. If |number| == 0 // then the muxer will decide on the track number. - bool AddTrack(Track* track, int32 number); + bool AddTrack(Track* track, int32_t number); // Returns the track by index. Returns NULL if there is no track match. - const Track* GetTrackByIndex(uint32 idx) const; + const Track* GetTrackByIndex(uint32_t idx) const; // Search the Tracks and return the track that matches |tn|. Returns NULL // if there is no track match. - Track* GetTrackByNumber(uint64 track_number) const; + Track* GetTrackByNumber(uint64_t track_number) const; // Returns true if the track number is an audio track. - bool TrackIsAudio(uint64 track_number) const; + bool TrackIsAudio(uint64_t track_number) const; // Returns true if the track number is a video track. - bool TrackIsVideo(uint64 track_number) const; + bool TrackIsVideo(uint64_t track_number) const; // Output the Tracks element to the writer. Returns true on success. bool Write(IMkvWriter* writer) const; - uint32 track_entries_size() const { return track_entries_size_; } + uint32_t track_entries_size() const { return track_entries_size_; } private: // Track element list. Track** track_entries_; // Number of Track elements added. - uint32 track_entries_size_; + uint32_t track_entries_size_; + + // Whether or not Tracks element has already been written via IMkvWriter. + mutable bool wrote_tracks_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Tracks); }; @@ -585,12 +724,12 @@ class Chapter { // Converts the nanosecond start and stop times of this chapter to // their corresponding timecode values, and stores them that way. - void set_time(const Segment& segment, uint64 start_time_ns, - uint64 end_time_ns); + void set_time(const Segment& segment, uint64_t start_time_ns, + uint64_t end_time_ns); // Sets the uid for this chapter. Primarily used to enable // deterministic output from the muxer. - void set_uid(const uint64 uid) { uid_ = uid; } + void set_uid(const uint64_t uid) { uid_ = uid; } // Add a title string to this chapter, per the semantics described // here: @@ -637,7 +776,7 @@ class Chapter { // If |writer| is non-NULL, serialize the Display sub-element of // the Atom into the stream. Returns the Display element size on // success, 0 if error. - uint64 WriteDisplay(IMkvWriter* writer) const; + uint64_t WriteDisplay(IMkvWriter* writer) const; private: char* title_; @@ -670,20 +809,20 @@ class Chapter { // If |writer| is non-NULL, serialize the Atom sub-element into the // stream. Returns the total size of the element on success, 0 if // error. - uint64 WriteAtom(IMkvWriter* writer) const; + uint64_t WriteAtom(IMkvWriter* writer) const; // The string identifier for this chapter (corresponds to WebVTT cue // identifier). char* id_; // Start timecode of the chapter. - uint64 start_timecode_; + uint64_t start_timecode_; // Stop timecode of the chapter. - uint64 end_timecode_; + uint64_t end_timecode_; // The binary identifier for this chapter. - uint64 uid_; + uint64_t uid_; // The Atom element can contain multiple Display sub-elements, as // the same logical title can be rendered in different languages. @@ -723,7 +862,7 @@ class Chapters { // If |writer| is non-NULL, serialize the Edition sub-element of the // Chapters element into the stream. Returns the Edition element // size on success, 0 if error. - uint64 WriteEdition(IMkvWriter* writer) const; + uint64_t WriteEdition(IMkvWriter* writer) const; // Total length of the chapters_ array. int chapters_size_; @@ -768,7 +907,7 @@ class Tag { // If |writer| is non-NULL, serialize the SimpleTag sub-element of // the Atom into the stream. Returns the SimpleTag element size on // success, 0 if error. - uint64 Write(IMkvWriter* writer) const; + uint64_t Write(IMkvWriter* writer) const; private: char* tag_name_; @@ -795,7 +934,7 @@ class Tag { // If |writer| is non-NULL, serialize the Tag sub-element into the // stream. Returns the total size of the element on success, 0 if // error. - uint64 Write(IMkvWriter* writer) const; + uint64_t Write(IMkvWriter* writer) const; // The Atom element can contain multiple SimpleTag sub-elements SimpleTag* simple_tags_; @@ -853,7 +992,9 @@ class Cluster { // |timecode| is the absolute timecode of the cluster. |cues_pos| is the // position for the cluster within the segment that should be written in // the cues element. |timecode_scale| is the timecode scale of the segment. - Cluster(uint64 timecode, int64 cues_pos, uint64 timecode_scale); + Cluster(uint64_t timecode, int64_t cues_pos, uint64_t timecode_scale, + bool write_last_frame_with_duration = false, + bool fixed_size_timecode = false); ~Cluster(); bool Init(IMkvWriter* ptr_writer); @@ -872,8 +1013,8 @@ class Cluster { // timecode: Absolute (not relative to cluster) timestamp of the // frame, expressed in timecode units. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrame(const uint8* data, uint64 length, uint64 track_number, - uint64 timecode, // timecode units (absolute) + bool AddFrame(const uint8_t* data, uint64_t length, uint64_t track_number, + uint64_t timecode, // timecode units (absolute) bool is_key); // Adds a frame to be output in the file. The frame is written out through @@ -889,10 +1030,11 @@ class Cluster { // abs_timecode: Absolute (not relative to cluster) timestamp of the // frame, expressed in timecode units. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrameWithAdditional(const uint8* data, uint64 length, - const uint8* additional, uint64 additional_length, - uint64 add_id, uint64 track_number, - uint64 abs_timecode, bool is_key); + bool AddFrameWithAdditional(const uint8_t* data, uint64_t length, + const uint8_t* additional, + uint64_t additional_length, uint64_t add_id, + uint64_t track_number, uint64_t abs_timecode, + bool is_key); // Adds a frame to be output in the file. The frame is written out through // |writer_| if successful. Returns true on success. @@ -905,9 +1047,10 @@ class Cluster { // abs_timecode: Absolute (not relative to cluster) timestamp of the // frame, expressed in timecode units. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrameWithDiscardPadding(const uint8* data, uint64 length, - int64 discard_padding, uint64 track_number, - uint64 abs_timecode, bool is_key); + bool AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length, + int64_t discard_padding, + uint64_t track_number, uint64_t abs_timecode, + bool is_key); // Writes a frame of metadata to the output medium; returns true on // success. @@ -923,31 +1066,53 @@ class Cluster { // The metadata frame is written as a block group, with a duration // sub-element but no reference time sub-elements (indicating that // it is considered a keyframe, per Matroska semantics). - bool AddMetadata(const uint8* data, uint64 length, uint64 track_number, - uint64 timecode, uint64 duration); + bool AddMetadata(const uint8_t* data, uint64_t length, uint64_t track_number, + uint64_t timecode, uint64_t duration); // Increments the size of the cluster's data in bytes. - void AddPayloadSize(uint64 size); + void AddPayloadSize(uint64_t size); + + // Closes the cluster so no more data can be written to it. Will update the + // cluster's size if |writer_| is seekable. Returns true on success. This + // variant of Finalize() fails when |write_last_frame_with_duration_| is set + // to true. + bool Finalize(); // Closes the cluster so no more data can be written to it. Will update the // cluster's size if |writer_| is seekable. Returns true on success. - bool Finalize(); + // Inputs: + // set_last_frame_duration: Boolean indicating whether or not the duration + // of the last frame should be set. If set to + // false, the |duration| value is ignored and + // |write_last_frame_with_duration_| will not be + // honored. + // duration: Duration of the Cluster in timecode scale. + bool Finalize(bool set_last_frame_duration, uint64_t duration); // Returns the size in bytes for the entire Cluster element. - uint64 Size() const; + uint64_t Size() const; // Given |abs_timecode|, calculates timecode relative to most recent timecode. // Returns -1 on failure, or a relative timecode. - int64 GetRelativeTimecode(int64 abs_timecode) const; + int64_t GetRelativeTimecode(int64_t abs_timecode) const; - int64 size_position() const { return size_position_; } - int32 blocks_added() const { return blocks_added_; } - uint64 payload_size() const { return payload_size_; } - int64 position_for_cues() const { return position_for_cues_; } - uint64 timecode() const { return timecode_; } - uint64 timecode_scale() const { return timecode_scale_; } + int64_t size_position() const { return size_position_; } + int32_t blocks_added() const { return blocks_added_; } + uint64_t payload_size() const { return payload_size_; } + int64_t position_for_cues() const { return position_for_cues_; } + uint64_t timecode() const { return timecode_; } + uint64_t timecode_scale() const { return timecode_scale_; } + void set_write_last_frame_with_duration(bool write_last_frame_with_duration) { + write_last_frame_with_duration_ = write_last_frame_with_duration; + } + bool write_last_frame_with_duration() const { + return write_last_frame_with_duration_; + } private: + // Iterator type for the |stored_frames_| map. + typedef std::map >::iterator FrameMapIterator; + // Utility method that confirms that blocks can still be added, and that the // cluster header has been written. Used by |DoWriteFrame*|. Returns true // when successful. @@ -955,37 +1120,58 @@ class Cluster { // Utility method used by the |DoWriteFrame*| methods that handles the book // keeping required after each block is written. - void PostWriteBlock(uint64 element_size); + void PostWriteBlock(uint64_t element_size); // Does some verification and calls WriteFrame. bool DoWriteFrame(const Frame* const frame); + // Either holds back the given frame, or writes it out depending on whether or + // not |write_last_frame_with_duration_| is set. + bool QueueOrWriteFrame(const Frame* const frame); + // Outputs the Cluster header to |writer_|. Returns true on success. bool WriteClusterHeader(); // Number of blocks added to the cluster. - int32 blocks_added_; + int32_t blocks_added_; // Flag telling if the cluster has been closed. bool finalized_; + // Flag indicating whether the cluster's timecode will always be written out + // using 8 bytes. + bool fixed_size_timecode_; + // Flag telling if the cluster's header has been written. bool header_written_; // The size of the cluster elements in bytes. - uint64 payload_size_; + uint64_t payload_size_; // The file position used for cue points. - const int64 position_for_cues_; + const int64_t position_for_cues_; // The file position of the cluster's size element. - int64 size_position_; + int64_t size_position_; // The absolute timecode of the cluster. - const uint64 timecode_; + const uint64_t timecode_; // The timecode scale of the Segment containing the cluster. - const uint64 timecode_scale_; + const uint64_t timecode_scale_; + + // Flag indicating whether the last frame of the cluster should be written as + // a Block with Duration. If set to true, then it will result in holding back + // of frames and the parameterized version of Finalize() must be called to + // finish writing the Cluster. + bool write_last_frame_with_duration_; + + // Map used to hold back frames, if required. Track number is the key. + std::map > stored_frames_; + + // Map from track number to the timestamp of the last block written for that + // track. + std::map last_block_timestamp_; // Pointer to the writer object. Not owned by this class. IMkvWriter* writer_; @@ -1006,42 +1192,42 @@ class SeekHead { // Adds a seek entry to be written out when the element is finalized. |id| // must be the coded mkv element id. |pos| is the file position of the // element. Returns true on success. - bool AddSeekEntry(uint32 id, uint64 pos); + bool AddSeekEntry(uint32_t id, uint64_t pos); // Writes out SeekHead and SeekEntry elements. Returns true on success. bool Finalize(IMkvWriter* writer) const; // Returns the id of the Seek Entry at the given index. Returns -1 if index is // out of range. - uint32 GetId(int index) const; + uint32_t GetId(int index) const; // Returns the position of the Seek Entry at the given index. Returns -1 if // index is out of range. - uint64 GetPosition(int index) const; + uint64_t GetPosition(int index) const; // Sets the Seek Entry id and position at given index. // Returns true on success. - bool SetSeekEntry(int index, uint32 id, uint64 position); + bool SetSeekEntry(int index, uint32_t id, uint64_t position); // Reserves space by writing out a Void element which will be updated with // a SeekHead element later. Returns true on success. bool Write(IMkvWriter* writer); // We are going to put a cap on the number of Seek Entries. - const static int32 kSeekEntryCount = 5; + const static int32_t kSeekEntryCount = 5; private: // Returns the maximum size in bytes of one seek entry. - uint64 MaxEntrySize() const; + uint64_t MaxEntrySize() const; // Seek entry id element list. - uint32 seek_entry_id_[kSeekEntryCount]; + uint32_t seek_entry_id_[kSeekEntryCount]; // Seek entry pos element list. - uint64 seek_entry_pos_[kSeekEntryCount]; + uint64_t seek_entry_pos_[kSeekEntryCount]; // The file position of SeekHead element. - int64 start_pos_; + int64_t start_pos_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(SeekHead); }; @@ -1067,12 +1253,12 @@ class SegmentInfo { double duration() const { return duration_; } void set_muxing_app(const char* app); const char* muxing_app() const { return muxing_app_; } - void set_timecode_scale(uint64 scale) { timecode_scale_ = scale; } - uint64 timecode_scale() const { return timecode_scale_; } + void set_timecode_scale(uint64_t scale) { timecode_scale_ = scale; } + uint64_t timecode_scale() const { return timecode_scale_; } void set_writing_app(const char* app); const char* writing_app() const { return writing_app_; } - void set_date_utc(int64 date_utc) { date_utc_ = date_utc; } - int64 date_utc() const { return date_utc_; } + void set_date_utc(int64_t date_utc) { date_utc_ = date_utc; } + int64_t date_utc() const { return date_utc_; } private: // Segment Information element names. @@ -1081,14 +1267,14 @@ class SegmentInfo { double duration_; // Set to libwebm-%d.%d.%d.%d, major, minor, build, revision. char* muxing_app_; - uint64 timecode_scale_; + uint64_t timecode_scale_; // Initially set to libwebm-%d.%d.%d.%d, major, minor, build, revision. char* writing_app_; // LLONG_MIN when DateUTC is not set. - int64 date_utc_; + int64_t date_utc_; // The file position of the duration element. - int64 duration_pos_; + int64_t duration_pos_; LIBWEBM_DISALLOW_COPY_AND_ASSIGN(SegmentInfo); }; @@ -1108,8 +1294,8 @@ class Segment { kBeforeClusters = 0x1 // Position Cues before Clusters }; - const static uint32 kDefaultDocTypeVersion = 2; - const static uint64 kDefaultMaxClusterDuration = 30000000000ULL; + const static uint32_t kDefaultDocTypeVersion = 2; + const static uint64_t kDefaultMaxClusterDuration = 30000000000ULL; Segment(); ~Segment(); @@ -1123,13 +1309,13 @@ class Segment { // error. |number| is the number to use for the track. |number| // must be >= 0. If |number| == 0 then the muxer will decide on the // track number. - Track* AddTrack(int32 number); + Track* AddTrack(int32_t number); // Adds a Vorbis audio track to the segment. Returns the number of the track // on success, 0 on error. |number| is the number to use for the audio track. // |number| must be >= 0. If |number| == 0 then the muxer will decide on // the track number. - uint64 AddAudioTrack(int32 sample_rate, int32 channels, int32 number); + uint64_t AddAudioTrack(int32_t sample_rate, int32_t channels, int32_t number); // Adds an empty chapter to the chapters of this segment. Returns // non-NULL on success. After adding the chapter, the caller should @@ -1145,7 +1331,7 @@ class Segment { // nanoseconds of the cue's time. |track| is the Track of the Cue. This // function must be called after AddFrame to calculate the correct // BlockNumber for the CuePoint. Returns true on success. - bool AddCuePoint(uint64 timestamp, uint64 track); + bool AddCuePoint(uint64_t timestamp, uint64_t track); // Adds a frame to be output in the file. Returns true on success. // Inputs: @@ -1155,8 +1341,8 @@ class Segment { // functions. // timestamp: Timestamp of the frame in nanoseconds from 0. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrame(const uint8* data, uint64 length, uint64 track_number, - uint64 timestamp_ns, bool is_key); + bool AddFrame(const uint8_t* data, uint64_t length, uint64_t track_number, + uint64_t timestamp_ns, bool is_key); // Writes a frame of metadata to the output medium; returns true on // success. @@ -1172,8 +1358,8 @@ class Segment { // The metadata frame is written as a block group, with a duration // sub-element but no reference time sub-elements (indicating that // it is considered a keyframe, per Matroska semantics). - bool AddMetadata(const uint8* data, uint64 length, uint64 track_number, - uint64 timestamp_ns, uint64 duration_ns); + bool AddMetadata(const uint8_t* data, uint64_t length, uint64_t track_number, + uint64_t timestamp_ns, uint64_t duration_ns); // Writes a frame with additional data to the output medium; returns true on // success. @@ -1188,10 +1374,11 @@ class Segment { // timestamp: Absolute timestamp of the frame, expressed in nanosecond // units. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrameWithAdditional(const uint8* data, uint64 length, - const uint8* additional, uint64 additional_length, - uint64 add_id, uint64 track_number, - uint64 timestamp, bool is_key); + bool AddFrameWithAdditional(const uint8_t* data, uint64_t length, + const uint8_t* additional, + uint64_t additional_length, uint64_t add_id, + uint64_t track_number, uint64_t timestamp, + bool is_key); // Writes a frame with DiscardPadding to the output medium; returns true on // success. @@ -1204,9 +1391,10 @@ class Segment { // timestamp: Absolute timestamp of the frame, expressed in nanosecond // units. // is_key: Flag telling whether or not this frame is a key frame. - bool AddFrameWithDiscardPadding(const uint8* data, uint64 length, - int64 discard_padding, uint64 track_number, - uint64 timestamp, bool is_key); + bool AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length, + int64_t discard_padding, + uint64_t track_number, uint64_t timestamp, + bool is_key); // Writes a Frame to the output medium. Chooses the correct way of writing // the frame (Block vs SimpleBlock) based on the parameters passed. @@ -1218,7 +1406,7 @@ class Segment { // success, 0 on error. |number| is the number to use for the video track. // |number| must be >= 0. If |number| == 0 then the muxer will decide on // the track number. - uint64 AddVideoTrack(int32 width, int32 height, int32 number); + uint64_t AddVideoTrack(int32_t width, int32_t height, int32_t number); // This function must be called after Finalize() if you need a copy of the // output with Cues written before the Clusters. It will return false if the @@ -1237,7 +1425,7 @@ class Segment { // Sets which track to use for the Cues element. Must have added the track // before calling this function. Returns true on success. |track_number| is // returned by the Add track functions. - bool CuesTrack(uint64 track_number); + bool CuesTrack(uint64_t track_number); // This will force the muxer to create a new Cluster when the next frame is // added. @@ -1257,11 +1445,17 @@ class Segment { // Search the Tracks and return the track that matches |track_number|. // Returns NULL if there is no track match. - Track* GetTrackByNumber(uint64 track_number) const; + Track* GetTrackByNumber(uint64_t track_number) const; // Toggles whether to output a cues element. void OutputCues(bool output_cues); + // Toggles whether to write the last frame in each Cluster with Duration. + void AccurateClusterDuration(bool accurate_cluster_duration); + + // Toggles whether to write the Cluster Timecode using exactly 8 bytes. + void UseFixedSizeClusterTimecode(bool fixed_size_cluster_timecode); + // Sets if the muxer will output files in chunks or not. |chunking| is a // flag telling whether or not to turn on chunking. |filename| is the base // filename for the chunk files. The header chunk file will be named @@ -1274,15 +1468,15 @@ class Segment { bool SetChunking(bool chunking, const char* filename); bool chunking() const { return chunking_; } - uint64 cues_track() const { return cues_track_; } - void set_max_cluster_duration(uint64 max_cluster_duration) { + uint64_t cues_track() const { return cues_track_; } + void set_max_cluster_duration(uint64_t max_cluster_duration) { max_cluster_duration_ = max_cluster_duration; } - uint64 max_cluster_duration() const { return max_cluster_duration_; } - void set_max_cluster_size(uint64 max_cluster_size) { + uint64_t max_cluster_duration() const { return max_cluster_duration_; } + void set_max_cluster_size(uint64_t max_cluster_size) { max_cluster_size_ = max_cluster_size; } - uint64 max_cluster_size() const { return max_cluster_size_; } + uint64_t max_cluster_size() const { return max_cluster_size_; } void set_mode(Mode mode) { mode_ = mode; } Mode mode() const { return mode_; } CuesPosition cues_position() const { return cues_position_; } @@ -1306,7 +1500,7 @@ class Segment { // Returns the maximum offset within the segment's payload. When chunking // this function is needed to determine offsets of elements within the // chunked files. Returns -1 on error. - int64 MaxOffset(); + int64_t MaxOffset(); // Adds the frame to our frame array. bool QueueFrame(Frame* frame); @@ -1318,7 +1512,7 @@ class Segment { // Output all frames that are queued that have an end time that is less // then |timestamp|. Returns true on success and if there are no frames // queued. - bool WriteFramesLessThan(uint64 timestamp); + bool WriteFramesLessThan(uint64_t timestamp); // Outputs the segment header, Segment Information element, SeekHead element, // and Tracks element to |writer_|. @@ -1332,16 +1526,17 @@ class Segment { // 0 = do not create a new cluster, and write frame to the existing cluster // 1 = create a new cluster, and write frame to that new cluster // 2 = create a new cluster, and re-run test - int TestFrame(uint64 track_num, uint64 timestamp_ns, bool key) const; + int TestFrame(uint64_t track_num, uint64_t timestamp_ns, bool key) const; // Create a new cluster, using the earlier of the first enqueued // frame, or the indicated time. Returns true on success. - bool MakeNewCluster(uint64 timestamp_ns); + bool MakeNewCluster(uint64_t timestamp_ns); // Checks whether a new cluster needs to be created, and if so // creates a new cluster. Returns false if creation of a new cluster // was necessary but creation was not successful. - bool DoNewClusterProcessing(uint64 track_num, uint64 timestamp_ns, bool key); + bool DoNewClusterProcessing(uint64_t track_num, uint64_t timestamp_ns, + bool key); // Adjusts Cue Point values (to place Cues before Clusters) so that they // reflect the correct offsets. @@ -1355,7 +1550,8 @@ class Segment { // accounted for. // index - index in the list of Cues which is currently being adjusted. // cue_size - sum of size of all the CuePoint elements. - void MoveCuesBeforeClustersHelper(uint64 diff, int index, uint64* cue_size); + void MoveCuesBeforeClustersHelper(uint64_t diff, int index, + uint64_t* cue_size); // Seeds the random number generator used to make UIDs. unsigned int seed_; @@ -1394,22 +1590,22 @@ class Segment { char* chunking_base_name_; // File position offset where the Clusters end. - int64 cluster_end_offset_; + int64_t cluster_end_offset_; // List of clusters. Cluster** cluster_list_; // Number of cluster pointers allocated in the cluster list. - int32 cluster_list_capacity_; + int32_t cluster_list_capacity_; // Number of clusters in the cluster list. - int32 cluster_list_size_; + int32_t cluster_list_size_; // Indicates whether Cues should be written before or after Clusters CuesPosition cues_position_; // Track number that is associated with the cues element for this segment. - uint64 cues_track_; + uint64_t cues_track_; // Tells the muxer to force a new cluster on the next Block. bool force_new_cluster_; @@ -1421,10 +1617,10 @@ class Segment { Frame** frames_; // Number of frame pointers allocated in the frame list. - int32 frames_capacity_; + int32_t frames_capacity_; // Number of frames in the frame list. - int32 frames_size_; + int32_t frames_size_; // Flag telling if a video track has been added to the segment. bool has_video_; @@ -1433,23 +1629,23 @@ class Segment { bool header_written_; // Duration of the last block in nanoseconds. - uint64 last_block_duration_; + uint64_t last_block_duration_; // Last timestamp in nanoseconds added to a cluster. - uint64 last_timestamp_; + uint64_t last_timestamp_; // Last timestamp in nanoseconds by track number added to a cluster. - uint64 last_track_timestamp_[kMaxTrackNumber]; + uint64_t last_track_timestamp_[kMaxTrackNumber]; // Maximum time in nanoseconds for a cluster duration. This variable is a // guideline and some clusters may have a longer duration. Default is 30 // seconds. - uint64 max_cluster_duration_; + uint64_t max_cluster_duration_; // Maximum size in bytes for a cluster. This variable is a guideline and // some clusters may have a larger size. Default is 0 which signifies that // the muxer will decide the size. - uint64 max_cluster_size_; + uint64_t max_cluster_size_; // The mode that segment is in. If set to |kLive| the writer must not // seek backwards. @@ -1462,22 +1658,29 @@ class Segment { // Flag whether or not the muxer should output a Cues element. bool output_cues_; + // Flag whether or not the last frame in each Cluster will have a Duration + // element in it. + bool accurate_cluster_duration_; + + // Flag whether or not to write the Cluster Timecode using exactly 8 bytes. + bool fixed_size_cluster_timecode_; + // The size of the EBML header, used to validate the header if // WriteEbmlHeader() is called more than once. - int32 ebml_header_size_; + int32_t ebml_header_size_; // The file position of the segment's payload. - int64 payload_pos_; + int64_t payload_pos_; // The file position of the element's size. - int64 size_position_; + int64_t size_position_; // Current DocTypeVersion (|doc_type_version_|) and that written in // WriteSegmentHeader(). // WriteEbmlHeader() will be called from Finalize() if |doc_type_version_| // differs from |doc_type_version_written_|. - uint32 doc_type_version_; - uint32 doc_type_version_written_; + uint32_t doc_type_version_; + uint32_t doc_type_version_written_; // Pointer to the writer objects. Not owned by this class. IMkvWriter* writer_cluster_; @@ -1487,6 +1690,6 @@ class Segment { LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Segment); }; -} // end namespace mkvmuxer +} // namespace mkvmuxer -#endif // MKVMUXER_HPP +#endif // MKVMUXER_MKVMUXER_H_ diff --git a/libs/libvpx/third_party/libwebm/mkvmuxertypes.hpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxertypes.h similarity index 87% rename from libs/libvpx/third_party/libwebm/mkvmuxertypes.hpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxertypes.h index d0fc9fec88..e5db121605 100644 --- a/libs/libvpx/third_party/libwebm/mkvmuxertypes.hpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxertypes.h @@ -6,8 +6,17 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#ifndef MKVMUXERTYPES_HPP -#define MKVMUXERTYPES_HPP +#ifndef MKVMUXER_MKVMUXERTYPES_H_ +#define MKVMUXER_MKVMUXERTYPES_H_ + +namespace mkvmuxer { +typedef unsigned char uint8; +typedef short int16; +typedef int int32; +typedef unsigned int uint32; +typedef long long int64; +typedef unsigned long long uint64; +} // namespace mkvmuxer // Copied from Chromium basictypes.h // A macro to disallow the copy constructor and operator= functions @@ -16,15 +25,4 @@ TypeName(const TypeName&); \ void operator=(const TypeName&) -namespace mkvmuxer { - -typedef unsigned char uint8; -typedef short int16; -typedef int int32; -typedef unsigned int uint32; -typedef long long int64; -typedef unsigned long long uint64; - -} // end namespace mkvmuxer - -#endif // MKVMUXERTYPES_HPP +#endif // MKVMUXER_MKVMUXERTYPES_HPP_ diff --git a/libs/libvpx/third_party/libwebm/mkvmuxerutil.cpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc similarity index 50% rename from libs/libvpx/third_party/libwebm/mkvmuxerutil.cpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc index 27ab15d51f..3562b8ab82 100644 --- a/libs/libvpx/third_party/libwebm/mkvmuxerutil.cpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc @@ -6,7 +6,7 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#include "mkvmuxerutil.hpp" +#include "mkvmuxer/mkvmuxerutil.h" #ifdef __ANDROID__ #include @@ -20,13 +20,9 @@ #include #include -#include "mkvwriter.hpp" -#include "webmids.hpp" - -#ifdef _MSC_VER -// Disable MSVC warnings that suggest making code non-portable. -#pragma warning(disable : 4996) -#endif +#include "common/webmids.h" +#include "mkvmuxer/mkvmuxer.h" +#include "mkvmuxer/mkvwriter.h" namespace mkvmuxer { @@ -35,64 +31,68 @@ namespace { // Date elements are always 8 octets in size. const int kDateElementSize = 8; -uint64 WriteBlock(IMkvWriter* writer, const Frame* const frame, int64 timecode, - uint64 timecode_scale) { - uint64 block_additional_elem_size = 0; - uint64 block_addid_elem_size = 0; - uint64 block_more_payload_size = 0; - uint64 block_more_elem_size = 0; - uint64 block_additions_payload_size = 0; - uint64 block_additions_elem_size = 0; +uint64_t WriteBlock(IMkvWriter* writer, const Frame* const frame, + int64_t timecode, uint64_t timecode_scale) { + uint64_t block_additional_elem_size = 0; + uint64_t block_addid_elem_size = 0; + uint64_t block_more_payload_size = 0; + uint64_t block_more_elem_size = 0; + uint64_t block_additions_payload_size = 0; + uint64_t block_additions_elem_size = 0; if (frame->additional()) { - block_additional_elem_size = EbmlElementSize( - kMkvBlockAdditional, frame->additional(), frame->additional_length()); - block_addid_elem_size = EbmlElementSize(kMkvBlockAddID, frame->add_id()); + block_additional_elem_size = + EbmlElementSize(libwebm::kMkvBlockAdditional, frame->additional(), + frame->additional_length()); + block_addid_elem_size = + EbmlElementSize(libwebm::kMkvBlockAddID, frame->add_id()); block_more_payload_size = block_addid_elem_size + block_additional_elem_size; block_more_elem_size = - EbmlMasterElementSize(kMkvBlockMore, block_more_payload_size) + + EbmlMasterElementSize(libwebm::kMkvBlockMore, block_more_payload_size) + block_more_payload_size; block_additions_payload_size = block_more_elem_size; block_additions_elem_size = - EbmlMasterElementSize(kMkvBlockAdditions, + EbmlMasterElementSize(libwebm::kMkvBlockAdditions, block_additions_payload_size) + block_additions_payload_size; } - uint64 discard_padding_elem_size = 0; + uint64_t discard_padding_elem_size = 0; if (frame->discard_padding() != 0) { discard_padding_elem_size = - EbmlElementSize(kMkvDiscardPadding, frame->discard_padding()); + EbmlElementSize(libwebm::kMkvDiscardPadding, frame->discard_padding()); } - const uint64 reference_block_timestamp = + const uint64_t reference_block_timestamp = frame->reference_block_timestamp() / timecode_scale; - uint64 reference_block_elem_size = 0; + uint64_t reference_block_elem_size = 0; if (!frame->is_key()) { reference_block_elem_size = - EbmlElementSize(kMkvReferenceBlock, reference_block_timestamp); + EbmlElementSize(libwebm::kMkvReferenceBlock, reference_block_timestamp); } - const uint64 duration = frame->duration() / timecode_scale; - uint64 block_duration_elem_size = 0; + const uint64_t duration = frame->duration() / timecode_scale; + uint64_t block_duration_elem_size = 0; if (duration > 0) - block_duration_elem_size = EbmlElementSize(kMkvBlockDuration, duration); + block_duration_elem_size = + EbmlElementSize(libwebm::kMkvBlockDuration, duration); - const uint64 block_payload_size = 4 + frame->length(); - const uint64 block_elem_size = - EbmlMasterElementSize(kMkvBlock, block_payload_size) + block_payload_size; + const uint64_t block_payload_size = 4 + frame->length(); + const uint64_t block_elem_size = + EbmlMasterElementSize(libwebm::kMkvBlock, block_payload_size) + + block_payload_size; - const uint64 block_group_payload_size = + const uint64_t block_group_payload_size = block_elem_size + block_additions_elem_size + block_duration_elem_size + discard_padding_elem_size + reference_block_elem_size; - if (!WriteEbmlMasterElement(writer, kMkvBlockGroup, + if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockGroup, block_group_payload_size)) { return 0; } - if (!WriteEbmlMasterElement(writer, kMkvBlock, block_payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlock, block_payload_size)) return 0; if (WriteUInt(writer, frame->track_number())) @@ -105,77 +105,81 @@ uint64 WriteBlock(IMkvWriter* writer, const Frame* const frame, int64 timecode, if (SerializeInt(writer, 0, 1)) return 0; - if (writer->Write(frame->frame(), static_cast(frame->length()))) + if (writer->Write(frame->frame(), static_cast(frame->length()))) return 0; if (frame->additional()) { - if (!WriteEbmlMasterElement(writer, kMkvBlockAdditions, + if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockAdditions, block_additions_payload_size)) { return 0; } - if (!WriteEbmlMasterElement(writer, kMkvBlockMore, block_more_payload_size)) + if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockMore, + block_more_payload_size)) return 0; - if (!WriteEbmlElement(writer, kMkvBlockAddID, frame->add_id())) + if (!WriteEbmlElement(writer, libwebm::kMkvBlockAddID, frame->add_id())) return 0; - if (!WriteEbmlElement(writer, kMkvBlockAdditional, frame->additional(), - frame->additional_length())) { + if (!WriteEbmlElement(writer, libwebm::kMkvBlockAdditional, + frame->additional(), frame->additional_length())) { return 0; } } if (frame->discard_padding() != 0 && - !WriteEbmlElement(writer, kMkvDiscardPadding, frame->discard_padding())) { + !WriteEbmlElement(writer, libwebm::kMkvDiscardPadding, + frame->discard_padding())) { return false; } if (!frame->is_key() && - !WriteEbmlElement(writer, kMkvReferenceBlock, + !WriteEbmlElement(writer, libwebm::kMkvReferenceBlock, reference_block_timestamp)) { return false; } - if (duration > 0 && !WriteEbmlElement(writer, kMkvBlockDuration, duration)) { + if (duration > 0 && + !WriteEbmlElement(writer, libwebm::kMkvBlockDuration, duration)) { return false; } - return EbmlMasterElementSize(kMkvBlockGroup, block_group_payload_size) + + return EbmlMasterElementSize(libwebm::kMkvBlockGroup, + block_group_payload_size) + block_group_payload_size; } -uint64 WriteSimpleBlock(IMkvWriter* writer, const Frame* const frame, - int64 timecode) { - if (WriteID(writer, kMkvSimpleBlock)) +uint64_t WriteSimpleBlock(IMkvWriter* writer, const Frame* const frame, + int64_t timecode) { + if (WriteID(writer, libwebm::kMkvSimpleBlock)) return 0; - const int32 size = static_cast(frame->length()) + 4; + const int32_t size = static_cast(frame->length()) + 4; if (WriteUInt(writer, size)) return 0; - if (WriteUInt(writer, static_cast(frame->track_number()))) + if (WriteUInt(writer, static_cast(frame->track_number()))) return 0; if (SerializeInt(writer, timecode, 2)) return 0; - uint64 flags = 0; + uint64_t flags = 0; if (frame->is_key()) flags |= 0x80; if (SerializeInt(writer, flags, 1)) return 0; - if (writer->Write(frame->frame(), static_cast(frame->length()))) + if (writer->Write(frame->frame(), static_cast(frame->length()))) return 0; - return GetUIntSize(kMkvSimpleBlock) + GetCodedUIntSize(size) + 4 + - frame->length(); + return static_cast(GetUIntSize(libwebm::kMkvSimpleBlock) + + GetCodedUIntSize(size) + 4 + frame->length()); } } // namespace -int32 GetCodedUIntSize(uint64 value) { +int32_t GetCodedUIntSize(uint64_t value) { if (value < 0x000000000000007FULL) return 1; else if (value < 0x0000000000003FFFULL) @@ -193,7 +197,7 @@ int32 GetCodedUIntSize(uint64 value) { return 8; } -int32 GetUIntSize(uint64 value) { +int32_t GetUIntSize(uint64_t value) { if (value < 0x0000000000000100ULL) return 1; else if (value < 0x0000000000010000ULL) @@ -211,26 +215,26 @@ int32 GetUIntSize(uint64 value) { return 8; } -int32 GetIntSize(int64 value) { +int32_t GetIntSize(int64_t value) { // Doubling the requested value ensures positive values with their high bit // set are written with 0-padding to avoid flipping the signedness. - const uint64 v = (value < 0) ? value ^ -1LL : value; + const uint64_t v = (value < 0) ? value ^ -1LL : value; return GetUIntSize(2 * v); } -uint64 EbmlMasterElementSize(uint64 type, uint64 value) { +uint64_t EbmlMasterElementSize(uint64_t type, uint64_t value) { // Size of EBML ID - int32 ebml_size = GetUIntSize(type); + int32_t ebml_size = GetUIntSize(type); // Datasize ebml_size += GetCodedUIntSize(value); - return ebml_size; + return static_cast(ebml_size); } -uint64 EbmlElementSize(uint64 type, int64 value) { +uint64_t EbmlElementSize(uint64_t type, int64_t value) { // Size of EBML ID - int32 ebml_size = GetUIntSize(type); + int32_t ebml_size = GetUIntSize(type); // Datasize ebml_size += GetIntSize(value); @@ -238,15 +242,20 @@ uint64 EbmlElementSize(uint64 type, int64 value) { // Size of Datasize ebml_size++; - return ebml_size; + return static_cast(ebml_size); } -uint64 EbmlElementSize(uint64 type, uint64 value) { +uint64_t EbmlElementSize(uint64_t type, uint64_t value) { + return EbmlElementSize(type, value, 0); +} + +uint64_t EbmlElementSize(uint64_t type, uint64_t value, uint64_t fixed_size) { // Size of EBML ID - int32 ebml_size = GetUIntSize(type); + uint64_t ebml_size = static_cast(GetUIntSize(type)); // Datasize - ebml_size += GetUIntSize(value); + ebml_size += + (fixed_size > 0) ? fixed_size : static_cast(GetUIntSize(value)); // Size of Datasize ebml_size++; @@ -254,9 +263,9 @@ uint64 EbmlElementSize(uint64 type, uint64 value) { return ebml_size; } -uint64 EbmlElementSize(uint64 type, float /* value */) { +uint64_t EbmlElementSize(uint64_t type, float /* value */) { // Size of EBML ID - uint64 ebml_size = GetUIntSize(type); + uint64_t ebml_size = static_cast(GetUIntSize(type)); // Datasize ebml_size += sizeof(float); @@ -267,12 +276,12 @@ uint64 EbmlElementSize(uint64 type, float /* value */) { return ebml_size; } -uint64 EbmlElementSize(uint64 type, const char* value) { +uint64_t EbmlElementSize(uint64_t type, const char* value) { if (!value) return 0; // Size of EBML ID - uint64 ebml_size = GetUIntSize(type); + uint64_t ebml_size = static_cast(GetUIntSize(type)); // Datasize ebml_size += strlen(value); @@ -283,12 +292,12 @@ uint64 EbmlElementSize(uint64 type, const char* value) { return ebml_size; } -uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size) { +uint64_t EbmlElementSize(uint64_t type, const uint8_t* value, uint64_t size) { if (!value) return 0; // Size of EBML ID - uint64 ebml_size = GetUIntSize(type); + uint64_t ebml_size = static_cast(GetUIntSize(type)); // Datasize ebml_size += size; @@ -299,9 +308,9 @@ uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size) { return ebml_size; } -uint64 EbmlDateElementSize(uint64 type) { +uint64_t EbmlDateElementSize(uint64_t type) { // Size of EBML ID - uint64 ebml_size = GetUIntSize(type); + uint64_t ebml_size = static_cast(GetUIntSize(type)); // Datasize ebml_size += kDateElementSize; @@ -312,18 +321,18 @@ uint64 EbmlDateElementSize(uint64 type) { return ebml_size; } -int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size) { +int32_t SerializeInt(IMkvWriter* writer, int64_t value, int32_t size) { if (!writer || size < 1 || size > 8) return -1; - for (int32 i = 1; i <= size; ++i) { - const int32 byte_count = size - i; - const int32 bit_count = byte_count * 8; + for (int32_t i = 1; i <= size; ++i) { + const int32_t byte_count = size - i; + const int32_t bit_count = byte_count * 8; - const int64 bb = value >> bit_count; - const uint8 b = static_cast(bb); + const int64_t bb = value >> bit_count; + const uint8_t b = static_cast(bb); - const int32 status = writer->Write(&b, 1); + const int32_t status = writer->Write(&b, 1); if (status < 0) return status; @@ -332,26 +341,26 @@ int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size) { return 0; } -int32 SerializeFloat(IMkvWriter* writer, float f) { +int32_t SerializeFloat(IMkvWriter* writer, float f) { if (!writer) return -1; - assert(sizeof(uint32) == sizeof(float)); + assert(sizeof(uint32_t) == sizeof(float)); // This union is merely used to avoid a reinterpret_cast from float& to // uint32& which will result in violation of strict aliasing. union U32 { - uint32 u32; + uint32_t u32; float f; } value; value.f = f; - for (int32 i = 1; i <= 4; ++i) { - const int32 byte_count = 4 - i; - const int32 bit_count = byte_count * 8; + for (int32_t i = 1; i <= 4; ++i) { + const int32_t byte_count = 4 - i; + const int32_t bit_count = byte_count * 8; - const uint8 byte = static_cast(value.u32 >> bit_count); + const uint8_t byte = static_cast(value.u32 >> bit_count); - const int32 status = writer->Write(&byte, 1); + const int32_t status = writer->Write(&byte, 1); if (status < 0) return status; @@ -360,21 +369,21 @@ int32 SerializeFloat(IMkvWriter* writer, float f) { return 0; } -int32 WriteUInt(IMkvWriter* writer, uint64 value) { +int32_t WriteUInt(IMkvWriter* writer, uint64_t value) { if (!writer) return -1; - int32 size = GetCodedUIntSize(value); + int32_t size = GetCodedUIntSize(value); return WriteUIntSize(writer, value, size); } -int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size) { +int32_t WriteUIntSize(IMkvWriter* writer, uint64_t value, int32_t size) { if (!writer || size < 0 || size > 8) return -1; if (size > 0) { - const uint64 bit = 1LL << (size * 7); + const uint64_t bit = 1LL << (size * 7); if (value > (bit - 2)) return -1; @@ -382,11 +391,11 @@ int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size) { value |= bit; } else { size = 1; - int64 bit; + int64_t bit; for (;;) { bit = 1LL << (size * 7); - const uint64 max = bit - 2; + const uint64_t max = bit - 2; if (value <= max) break; @@ -403,18 +412,18 @@ int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size) { return SerializeInt(writer, value, size); } -int32 WriteID(IMkvWriter* writer, uint64 type) { +int32_t WriteID(IMkvWriter* writer, uint64_t type) { if (!writer) return -1; writer->ElementStartNotify(type, writer->Position()); - const int32 size = GetUIntSize(type); + const int32_t size = GetUIntSize(type); return SerializeInt(writer, type, size); } -bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 type, uint64 size) { +bool WriteEbmlMasterElement(IMkvWriter* writer, uint64_t type, uint64_t size) { if (!writer) return false; @@ -427,41 +436,51 @@ bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 type, uint64 size) { return true; } -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value) { +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, uint64_t value) { + return WriteEbmlElement(writer, type, value, 0); +} + +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, uint64_t value, + uint64_t fixed_size) { if (!writer) return false; if (WriteID(writer, type)) return false; - const uint64 size = GetUIntSize(value); + uint64_t size = static_cast(GetUIntSize(value)); + if (fixed_size > 0) { + if (size > fixed_size) + return false; + size = fixed_size; + } if (WriteUInt(writer, size)) return false; - if (SerializeInt(writer, value, static_cast(size))) + if (SerializeInt(writer, value, static_cast(size))) return false; return true; } -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, int64 value) { +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, int64_t value) { if (!writer) return false; if (WriteID(writer, type)) return 0; - const uint64 size = GetIntSize(value); + const uint64_t size = GetIntSize(value); if (WriteUInt(writer, size)) return false; - if (SerializeInt(writer, value, static_cast(size))) + if (SerializeInt(writer, value, static_cast(size))) return false; return true; } -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value) { +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, float value) { if (!writer) return false; @@ -477,25 +496,25 @@ bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value) { return true; } -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const char* value) { +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, const char* value) { if (!writer || !value) return false; if (WriteID(writer, type)) return false; - const uint64 length = strlen(value); + const uint64_t length = strlen(value); if (WriteUInt(writer, length)) return false; - if (writer->Write(value, static_cast(length))) + if (writer->Write(value, static_cast(length))) return false; return true; } -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value, - uint64 size) { +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, const uint8_t* value, + uint64_t size) { if (!writer || !value || size < 1) return false; @@ -505,13 +524,13 @@ bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value, if (WriteUInt(writer, size)) return false; - if (writer->Write(value, static_cast(size))) + if (writer->Write(value, static_cast(size))) return false; return true; } -bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value) { +bool WriteEbmlDateElement(IMkvWriter* writer, uint64_t type, int64_t value) { if (!writer) return false; @@ -527,8 +546,8 @@ bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value) { return true; } -uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame, - Cluster* cluster) { +uint64_t WriteFrame(IMkvWriter* writer, const Frame* const frame, + Cluster* cluster) { if (!writer || !frame || !frame->IsValid() || !cluster || !cluster->timecode_scale()) return 0; @@ -537,7 +556,7 @@ uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame, // timecode for the cluster itself (remember that block timecode // is a signed, 16-bit integer). However, as a simplification we // only permit non-negative cluster-relative timecodes for blocks. - const int64 relative_timecode = cluster->GetRelativeTimecode( + const int64_t relative_timecode = cluster->GetRelativeTimecode( frame->timestamp() / cluster->timecode_scale()); if (relative_timecode < 0 || relative_timecode > kMaxBlockTimecode) return 0; @@ -548,53 +567,53 @@ uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame, cluster->timecode_scale()); } -uint64 WriteVoidElement(IMkvWriter* writer, uint64 size) { +uint64_t WriteVoidElement(IMkvWriter* writer, uint64_t size) { if (!writer) return false; // Subtract one for the void ID and the coded size. - uint64 void_entry_size = size - 1 - GetCodedUIntSize(size - 1); - uint64 void_size = - EbmlMasterElementSize(kMkvVoid, void_entry_size) + void_entry_size; + uint64_t void_entry_size = size - 1 - GetCodedUIntSize(size - 1); + uint64_t void_size = + EbmlMasterElementSize(libwebm::kMkvVoid, void_entry_size) + + void_entry_size; if (void_size != size) return 0; - const int64 payload_position = writer->Position(); + const int64_t payload_position = writer->Position(); if (payload_position < 0) return 0; - if (WriteID(writer, kMkvVoid)) + if (WriteID(writer, libwebm::kMkvVoid)) return 0; if (WriteUInt(writer, void_entry_size)) return 0; - const uint8 value = 0; - for (int32 i = 0; i < static_cast(void_entry_size); ++i) { + const uint8_t value = 0; + for (int32_t i = 0; i < static_cast(void_entry_size); ++i) { if (writer->Write(&value, 1)) return 0; } - const int64 stop_position = writer->Position(); + const int64_t stop_position = writer->Position(); if (stop_position < 0 || - stop_position - payload_position != static_cast(void_size)) + stop_position - payload_position != static_cast(void_size)) return 0; return void_size; } -void GetVersion(int32* major, int32* minor, int32* build, int32* revision) { +void GetVersion(int32_t* major, int32_t* minor, int32_t* build, + int32_t* revision) { *major = 0; *minor = 2; *build = 1; *revision = 0; } -} // namespace mkvmuxer - -mkvmuxer::uint64 mkvmuxer::MakeUID(unsigned int* seed) { - uint64 uid = 0; +uint64_t MakeUID(unsigned int* seed) { + uint64_t uid = 0; #ifdef __MINGW32__ srand(*seed); @@ -606,24 +625,26 @@ mkvmuxer::uint64 mkvmuxer::MakeUID(unsigned int* seed) { // TODO(fgalligan): Move random number generation to platform specific code. #ifdef _MSC_VER (void)seed; - const int32 nn = rand(); + const int32_t nn = rand(); #elif __ANDROID__ - int32 temp_num = 1; + int32_t temp_num = 1; int fd = open("/dev/urandom", O_RDONLY); if (fd != -1) { - read(fd, &temp_num, sizeof(int32)); + read(fd, &temp_num, sizeof(temp_num)); close(fd); } - const int32 nn = temp_num; + const int32_t nn = temp_num; #elif defined __MINGW32__ - const int32 nn = rand(); + const int32_t nn = rand(); #else - const int32 nn = rand_r(seed); + const int32_t nn = rand_r(seed); #endif - const int32 n = 0xFF & (nn >> 4); // throw away low-order bits + const int32_t n = 0xFF & (nn >> 4); // throw away low-order bits uid |= n; } return uid; } + +} // namespace mkvmuxer diff --git a/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.h b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.h new file mode 100644 index 0000000000..0e21a2dcbe --- /dev/null +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvmuxerutil.h @@ -0,0 +1,95 @@ +// Copyright (c) 2012 The WebM project authors. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +#ifndef MKVMUXER_MKVMUXERUTIL_H_ +#define MKVMUXER_MKVMUXERUTIL_H_ + +#include + +namespace mkvmuxer { +class Cluster; +class Frame; +class IMkvWriter; + +const uint64_t kEbmlUnknownValue = 0x01FFFFFFFFFFFFFFULL; +const int64_t kMaxBlockTimecode = 0x07FFFLL; + +// Writes out |value| in Big Endian order. Returns 0 on success. +int32_t SerializeInt(IMkvWriter* writer, int64_t value, int32_t size); + +// Returns the size in bytes of the element. +int32_t GetUIntSize(uint64_t value); +int32_t GetIntSize(int64_t value); +int32_t GetCodedUIntSize(uint64_t value); +uint64_t EbmlMasterElementSize(uint64_t type, uint64_t value); +uint64_t EbmlElementSize(uint64_t type, int64_t value); +uint64_t EbmlElementSize(uint64_t type, uint64_t value); +uint64_t EbmlElementSize(uint64_t type, float value); +uint64_t EbmlElementSize(uint64_t type, const char* value); +uint64_t EbmlElementSize(uint64_t type, const uint8_t* value, uint64_t size); +uint64_t EbmlDateElementSize(uint64_t type); + +// Returns the size in bytes of the element assuming that the element was +// written using |fixed_size| bytes. If |fixed_size| is set to zero, then it +// computes the necessary number of bytes based on |value|. +uint64_t EbmlElementSize(uint64_t type, uint64_t value, uint64_t fixed_size); + +// Creates an EBML coded number from |value| and writes it out. The size of +// the coded number is determined by the value of |value|. |value| must not +// be in a coded form. Returns 0 on success. +int32_t WriteUInt(IMkvWriter* writer, uint64_t value); + +// Creates an EBML coded number from |value| and writes it out. The size of +// the coded number is determined by the value of |size|. |value| must not +// be in a coded form. Returns 0 on success. +int32_t WriteUIntSize(IMkvWriter* writer, uint64_t value, int32_t size); + +// Output an Mkv master element. Returns true if the element was written. +bool WriteEbmlMasterElement(IMkvWriter* writer, uint64_t value, uint64_t size); + +// Outputs an Mkv ID, calls |IMkvWriter::ElementStartNotify|, and passes the +// ID to |SerializeInt|. Returns 0 on success. +int32_t WriteID(IMkvWriter* writer, uint64_t type); + +// Output an Mkv non-master element. Returns true if the element was written. +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, uint64_t value); +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, int64_t value); +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, float value); +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, const char* value); +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, const uint8_t* value, + uint64_t size); +bool WriteEbmlDateElement(IMkvWriter* writer, uint64_t type, int64_t value); + +// Output an Mkv non-master element using fixed size. The element will be +// written out using exactly |fixed_size| bytes. If |fixed_size| is set to zero +// then it computes the necessary number of bytes based on |value|. Returns true +// if the element was written. +bool WriteEbmlElement(IMkvWriter* writer, uint64_t type, uint64_t value, + uint64_t fixed_size); + +// Output a Mkv Frame. It decides the correct element to write (Block vs +// SimpleBlock) based on the parameters of the Frame. +uint64_t WriteFrame(IMkvWriter* writer, const Frame* const frame, + Cluster* cluster); + +// Output a void element. |size| must be the entire size in bytes that will be +// void. The function will calculate the size of the void header and subtract +// it from |size|. +uint64_t WriteVoidElement(IMkvWriter* writer, uint64_t size); + +// Returns the version number of the muxer in |major|, |minor|, |build|, +// and |revision|. +void GetVersion(int32_t* major, int32_t* minor, int32_t* build, + int32_t* revision); + +// Returns a random number to be used for UID, using |seed| to seed +// the random-number generator (see POSIX rand_r() for semantics). +uint64_t MakeUID(unsigned int* seed); + +} // namespace mkvmuxer + +#endif // MKVMUXER_MKVMUXERUTIL_H_ diff --git a/libs/libvpx/third_party/libwebm/mkvwriter.cpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.cc similarity index 97% rename from libs/libvpx/third_party/libwebm/mkvwriter.cpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.cc index 75d4350c70..ca48e149c6 100644 --- a/libs/libvpx/third_party/libwebm/mkvwriter.cpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.cc @@ -6,14 +6,12 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#include "mkvwriter.hpp" +#include "mkvmuxer/mkvwriter.h" #ifdef _MSC_VER #include // for _SH_DENYWR #endif -#include - namespace mkvmuxer { MkvWriter::MkvWriter() : file_(NULL), writer_owns_file_(true) {} diff --git a/libs/libvpx/third_party/libwebm/mkvwriter.hpp b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.h similarity index 87% rename from libs/libvpx/third_party/libwebm/mkvwriter.hpp rename to libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.h index 684560c92d..4227c63748 100644 --- a/libs/libvpx/third_party/libwebm/mkvwriter.hpp +++ b/libs/libvpx/third_party/libwebm/mkvmuxer/mkvwriter.h @@ -6,13 +6,13 @@ // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. -#ifndef MKVWRITER_HPP -#define MKVWRITER_HPP +#ifndef MKVMUXER_MKVWRITER_H_ +#define MKVMUXER_MKVWRITER_H_ #include -#include "mkvmuxer.hpp" -#include "mkvmuxertypes.hpp" +#include "mkvmuxer/mkvmuxer.h" +#include "mkvmuxer/mkvmuxertypes.h" namespace mkvmuxer { @@ -46,6 +46,6 @@ class MkvWriter : public IMkvWriter { LIBWEBM_DISALLOW_COPY_AND_ASSIGN(MkvWriter); }; -} // end namespace mkvmuxer +} // namespace mkvmuxer -#endif // MKVWRITER_HPP +#endif // MKVMUXER_MKVWRITER_H_ diff --git a/libs/libvpx/third_party/libwebm/mkvmuxerutil.hpp b/libs/libvpx/third_party/libwebm/mkvmuxerutil.hpp deleted file mode 100644 index e318576942..0000000000 --- a/libs/libvpx/third_party/libwebm/mkvmuxerutil.hpp +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2012 The WebM project authors. All Rights Reserved. -// -// Use of this source code is governed by a BSD-style license -// that can be found in the LICENSE file in the root of the source -// tree. An additional intellectual property rights grant can be found -// in the file PATENTS. All contributing project authors may -// be found in the AUTHORS file in the root of the source tree. - -#ifndef MKVMUXERUTIL_HPP -#define MKVMUXERUTIL_HPP - -#include "mkvmuxer.hpp" -#include "mkvmuxertypes.hpp" - -namespace mkvmuxer { - -class IMkvWriter; - -const uint64 kEbmlUnknownValue = 0x01FFFFFFFFFFFFFFULL; -const int64 kMaxBlockTimecode = 0x07FFFLL; - -// Writes out |value| in Big Endian order. Returns 0 on success. -int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size); - -// Returns the size in bytes of the element. -int32 GetUIntSize(uint64 value); -int32 GetIntSize(int64 value); -int32 GetCodedUIntSize(uint64 value); -uint64 EbmlMasterElementSize(uint64 type, uint64 value); -uint64 EbmlElementSize(uint64 type, int64 value); -uint64 EbmlElementSize(uint64 type, uint64 value); -uint64 EbmlElementSize(uint64 type, float value); -uint64 EbmlElementSize(uint64 type, const char* value); -uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size); -uint64 EbmlDateElementSize(uint64 type); - -// Creates an EBML coded number from |value| and writes it out. The size of -// the coded number is determined by the value of |value|. |value| must not -// be in a coded form. Returns 0 on success. -int32 WriteUInt(IMkvWriter* writer, uint64 value); - -// Creates an EBML coded number from |value| and writes it out. The size of -// the coded number is determined by the value of |size|. |value| must not -// be in a coded form. Returns 0 on success. -int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size); - -// Output an Mkv master element. Returns true if the element was written. -bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 value, uint64 size); - -// Outputs an Mkv ID, calls |IMkvWriter::ElementStartNotify|, and passes the -// ID to |SerializeInt|. Returns 0 on success. -int32 WriteID(IMkvWriter* writer, uint64 type); - -// Output an Mkv non-master element. Returns true if the element was written. -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value); -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, int64 value); -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value); -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const char* value); -bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value, - uint64 size); -bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value); - -// Output a Mkv Frame. It decides the correct element to write (Block vs -// SimpleBlock) based on the parameters of the Frame. -uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame, - Cluster* cluster); - -// Output a void element. |size| must be the entire size in bytes that will be -// void. The function will calculate the size of the void header and subtract -// it from |size|. -uint64 WriteVoidElement(IMkvWriter* writer, uint64 size); - -// Returns the version number of the muxer in |major|, |minor|, |build|, -// and |revision|. -void GetVersion(int32* major, int32* minor, int32* build, int32* revision); - -// Returns a random number to be used for UID, using |seed| to seed -// the random-number generator (see POSIX rand_r() for semantics). -uint64 MakeUID(unsigned int* seed); - -} // end namespace mkvmuxer - -#endif // MKVMUXERUTIL_HPP diff --git a/libs/libvpx/third_party/libwebm/mkvparser.cpp b/libs/libvpx/third_party/libwebm/mkvparser/mkvparser.cc similarity index 92% rename from libs/libvpx/third_party/libwebm/mkvparser.cpp rename to libs/libvpx/third_party/libwebm/mkvparser/mkvparser.cc index f2855d5066..21801154d9 100644 --- a/libs/libvpx/third_party/libwebm/mkvparser.cpp +++ b/libs/libvpx/third_party/libwebm/mkvparser/mkvparser.cc @@ -5,8 +5,7 @@ // tree. An additional intellectual property rights grant can be found // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. - -#include "mkvparser.hpp" +#include "mkvparser/mkvparser.h" #if defined(_MSC_VER) && _MSC_VER < 1800 #include // _isnan() / _finite() @@ -14,19 +13,18 @@ #endif #include +#include #include #include #include +#include #include -#include "webmids.hpp" - -#ifdef _MSC_VER -// Disable MSVC warnings that suggest making code non-portable. -#pragma warning(disable : 4996) -#endif +#include "common/webmids.h" namespace mkvparser { +const float MasteringMetadata::kValueNotPresent = FLT_MAX; +const long long Colour::kValueNotPresent = LLONG_MAX; #ifdef MSC_COMPAT inline bool isnan(double val) { return !!_isnan(val); } @@ -38,8 +36,9 @@ inline bool isinf(double val) { return std::isinf(val); } IMkvReader::~IMkvReader() {} -template Type* SafeArrayAlloc(unsigned long long num_elements, - unsigned long long element_size) { +template +Type* SafeArrayAlloc(unsigned long long num_elements, + unsigned long long element_size) { if (num_elements == 0 || element_size == 0) return NULL; @@ -350,9 +349,8 @@ long UnserializeString(IMkvReader* pReader, long long pos, long long size, return 0; } -long ParseElementHeader(IMkvReader* pReader, long long& pos, - long long stop, long long& id, - long long& size) { +long ParseElementHeader(IMkvReader* pReader, long long& pos, long long stop, + long long& id, long long& size) { if (stop >= 0 && pos >= stop) return E_FILE_FORMAT_INVALID; @@ -386,7 +384,7 @@ long ParseElementHeader(IMkvReader* pReader, long long& pos, // pos now designates payload - if (stop >= 0 && pos >= stop) + if (stop >= 0 && pos > stop) return E_FILE_FORMAT_INVALID; return 0; // success @@ -520,7 +518,6 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { return status; pos = 0; - long long end = (available >= 1024) ? 1024 : available; // Scan until we find what looks like the first byte of the EBML header. const long long kMaxScanBytes = (available >= 1024) ? 1024 : available; @@ -544,8 +541,10 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { long len = 0; const long long ebml_id = ReadID(pReader, pos, len); - // TODO(tomfinegan): Move Matroska ID constants into a common namespace. - if (len != 4 || ebml_id != mkvmuxer::kMkvEBML) + if (ebml_id == E_BUFFER_NOT_FULL) + return E_BUFFER_NOT_FULL; + + if (len != 4 || ebml_id != libwebm::kMkvEBML) return E_FILE_FORMAT_INVALID; // Move read pos forward to the EBML header size field. @@ -584,7 +583,7 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { if ((available - pos) < result) return pos + result; - end = pos + result; + const long long end = pos + result; Init(); @@ -599,27 +598,27 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { if (size == 0) return E_FILE_FORMAT_INVALID; - if (id == mkvmuxer::kMkvEBMLVersion) { + if (id == libwebm::kMkvEBMLVersion) { m_version = UnserializeUInt(pReader, pos, size); if (m_version <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvEBMLReadVersion) { + } else if (id == libwebm::kMkvEBMLReadVersion) { m_readVersion = UnserializeUInt(pReader, pos, size); if (m_readVersion <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvEBMLMaxIDLength) { + } else if (id == libwebm::kMkvEBMLMaxIDLength) { m_maxIdLength = UnserializeUInt(pReader, pos, size); if (m_maxIdLength <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvEBMLMaxSizeLength) { + } else if (id == libwebm::kMkvEBMLMaxSizeLength) { m_maxSizeLength = UnserializeUInt(pReader, pos, size); if (m_maxSizeLength <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDocType) { + } else if (id == libwebm::kMkvDocType) { if (m_docType) return E_FILE_FORMAT_INVALID; @@ -627,12 +626,12 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { if (status) // error return status; - } else if (id == mkvmuxer::kMkvDocTypeVersion) { + } else if (id == libwebm::kMkvDocTypeVersion) { m_docTypeVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeVersion <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDocTypeReadVersion) { + } else if (id == libwebm::kMkvDocTypeReadVersion) { m_docTypeReadVersion = UnserializeUInt(pReader, pos, size); if (m_docTypeReadVersion <= 0) @@ -650,8 +649,8 @@ long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) { return E_FILE_FORMAT_INVALID; // Make sure EBMLMaxIDLength and EBMLMaxSizeLength are valid. - if (m_maxIdLength <= 0 || m_maxIdLength > 4 || - m_maxSizeLength <= 0 || m_maxSizeLength > 8) + if (m_maxIdLength <= 0 || m_maxIdLength > 4 || m_maxSizeLength <= 0 || + m_maxSizeLength > 8) return E_FILE_FORMAT_INVALID; return 0; @@ -786,7 +785,7 @@ long long Segment::CreateInstance(IMkvReader* pReader, long long pos, // Handle "unknown size" for live streaming of webm files. const long long unknown_size = (1LL << (7 * len)) - 1; - if (id == mkvmuxer::kMkvSegment) { + if (id == libwebm::kMkvSegment) { if (size == unknown_size) size = -1; @@ -878,7 +877,7 @@ long long Segment::ParseHeaders() { if (id < 0) return E_FILE_FORMAT_INVALID; - if (id == mkvmuxer::kMkvCluster) + if (id == libwebm::kMkvCluster) break; pos += len; // consume ID @@ -930,7 +929,7 @@ long long Segment::ParseHeaders() { if ((pos + size) > available) return pos + size; - if (id == mkvmuxer::kMkvInfo) { + if (id == libwebm::kMkvInfo) { if (m_pInfo) return E_FILE_FORMAT_INVALID; @@ -944,7 +943,7 @@ long long Segment::ParseHeaders() { if (status) return status; - } else if (id == mkvmuxer::kMkvTracks) { + } else if (id == libwebm::kMkvTracks) { if (m_pTracks) return E_FILE_FORMAT_INVALID; @@ -958,7 +957,7 @@ long long Segment::ParseHeaders() { if (status) return status; - } else if (id == mkvmuxer::kMkvCues) { + } else if (id == libwebm::kMkvCues) { if (m_pCues == NULL) { m_pCues = new (std::nothrow) Cues(this, pos, size, element_start, element_size); @@ -966,7 +965,7 @@ long long Segment::ParseHeaders() { if (m_pCues == NULL) return -1; } - } else if (id == mkvmuxer::kMkvSeekHead) { + } else if (id == libwebm::kMkvSeekHead) { if (m_pSeekHead == NULL) { m_pSeekHead = new (std::nothrow) SeekHead(this, pos, size, element_start, element_size); @@ -979,7 +978,7 @@ long long Segment::ParseHeaders() { if (status) return status; } - } else if (id == mkvmuxer::kMkvChapters) { + } else if (id == libwebm::kMkvChapters) { if (m_pChapters == NULL) { m_pChapters = new (std::nothrow) Chapters(this, pos, size, element_start, element_size); @@ -992,7 +991,7 @@ long long Segment::ParseHeaders() { if (status) return status; } - } else if (id == mkvmuxer::kMkvTags) { + } else if (id == libwebm::kMkvTags) { if (m_pTags == NULL) { m_pTags = new (std::nothrow) Tags(this, pos, size, element_start, element_size); @@ -1131,7 +1130,7 @@ long Segment::DoLoadCluster(long long& pos, long& len) { return E_FILE_FORMAT_INVALID; } - if (id == mkvmuxer::kMkvCues) { + if (id == libwebm::kMkvCues) { if (size == unknown_size) { // Cues element of unknown size: Not supported. return E_FILE_FORMAT_INVALID; @@ -1149,7 +1148,7 @@ long Segment::DoLoadCluster(long long& pos, long& len) { continue; } - if (id != mkvmuxer::kMkvCluster) { + if (id != libwebm::kMkvCluster) { // Besides the Segment, Libwebm allows only cluster elements of unknown // size. Fail the parse upon encountering a non-cluster element reporting // unknown size. @@ -1466,7 +1465,7 @@ long Segment::Load() { return E_FILE_FORMAT_INVALID; for (;;) { - const int status = LoadCluster(); + const long status = LoadCluster(); if (status < 0) // error return status; @@ -1512,9 +1511,9 @@ long SeekHead::Parse() { if (status < 0) // error return status; - if (id == mkvmuxer::kMkvSeek) + if (id == libwebm::kMkvSeek) ++entry_count; - else if (id == mkvmuxer::kMkvVoid) + else if (id == libwebm::kMkvVoid) ++void_element_count; pos += size; // consume payload @@ -1553,14 +1552,14 @@ long SeekHead::Parse() { if (status < 0) // error return status; - if (id == mkvmuxer::kMkvSeek) { + if (id == libwebm::kMkvSeek) { if (ParseEntry(pReader, pos, size, pEntry)) { Entry& e = *pEntry++; e.element_start = idpos; e.element_size = (pos + size) - idpos; } - } else if (id == mkvmuxer::kMkvVoid) { + } else if (id == libwebm::kMkvVoid) { VoidElement& e = *pVoidElement++; e.element_start = idpos; @@ -1664,7 +1663,7 @@ long Segment::ParseCues(long long off, long long& pos, long& len) { const long long id = ReadID(m_pReader, idpos, len); - if (id != mkvmuxer::kMkvCues) + if (id != libwebm::kMkvCues) return E_FILE_FORMAT_INVALID; pos += len; // consume ID @@ -1746,7 +1745,7 @@ bool SeekHead::ParseEntry(IMkvReader* pReader, long long start, long long size_, if (seekIdId < 0) return false; - if (seekIdId != mkvmuxer::kMkvSeekID) + if (seekIdId != libwebm::kMkvSeekID) return false; if ((pos + len) > stop) @@ -1790,7 +1789,7 @@ bool SeekHead::ParseEntry(IMkvReader* pReader, long long start, long long size_, const long long seekPosId = ReadID(pReader, pos, len); - if (seekPosId != mkvmuxer::kMkvSeekPosition) + if (seekPosId != libwebm::kMkvSeekPosition) return false; if ((pos + len) > stop) @@ -1900,7 +1899,7 @@ bool Cues::Init() const { return false; } - if (id == mkvmuxer::kMkvCuePoint) { + if (id == libwebm::kMkvCuePoint) { if (!PreloadCuePoint(cue_points_size, idpos)) return false; } @@ -1975,7 +1974,7 @@ bool Cues::LoadCuePoint() const { if ((m_pos + size) > stop) return false; - if (id != mkvmuxer::kMkvCuePoint) { + if (id != libwebm::kMkvCuePoint) { m_pos += size; // consume payload if (m_pos > stop) return false; @@ -2105,8 +2104,8 @@ const CuePoint* Cues::GetLast() const { } const CuePoint* Cues::GetNext(const CuePoint* pCurr) const { - if (pCurr == NULL || pCurr->GetTimeCode() < 0 || - m_cue_points == NULL || m_count < 1) { + if (pCurr == NULL || pCurr->GetTimeCode() < 0 || m_cue_points == NULL || + m_count < 1) { return NULL; } @@ -2286,7 +2285,7 @@ bool CuePoint::Load(IMkvReader* pReader) { long len; const long long id = ReadID(pReader, pos_, len); - if (id != mkvmuxer::kMkvCuePoint) + if (id != libwebm::kMkvCuePoint) return false; pos_ += len; // consume ID @@ -2326,10 +2325,10 @@ bool CuePoint::Load(IMkvReader* pReader) { return false; } - if (id == mkvmuxer::kMkvCueTime) + if (id == libwebm::kMkvCueTime) m_timecode = UnserializeUInt(pReader, pos, size); - else if (id == mkvmuxer::kMkvCueTrackPositions) + else if (id == libwebm::kMkvCueTrackPositions) ++m_track_positions_count; pos += size; // consume payload @@ -2368,7 +2367,7 @@ bool CuePoint::Load(IMkvReader* pReader) { pos += len; // consume Size field assert((pos + size) <= stop); - if (id == mkvmuxer::kMkvCueTrackPositions) { + if (id == libwebm::kMkvCueTrackPositions) { TrackPosition& tp = *p++; if (!tp.Parse(pReader, pos, size)) { return false; @@ -2417,11 +2416,11 @@ bool CuePoint::TrackPosition::Parse(IMkvReader* pReader, long long start_, return false; } - if (id == mkvmuxer::kMkvCueTrack) + if (id == libwebm::kMkvCueTrack) m_track = UnserializeUInt(pReader, pos, size); - else if (id == mkvmuxer::kMkvCueClusterPosition) + else if (id == libwebm::kMkvCueClusterPosition) m_pos = UnserializeUInt(pReader, pos, size); - else if (id == mkvmuxer::kMkvCueBlockNumber) + else if (id == libwebm::kMkvCueBlockNumber) m_block = UnserializeUInt(pReader, pos, size); pos += size; // consume payload @@ -2555,7 +2554,7 @@ const Cluster* Segment::GetNext(const Cluster* pCurr) { return NULL; const long long id = ReadID(m_pReader, pos, len); - if (id != mkvmuxer::kMkvCluster) + if (id != libwebm::kMkvCluster) return NULL; pos += len; // consume ID @@ -2612,7 +2611,7 @@ const Cluster* Segment::GetNext(const Cluster* pCurr) { if (size == 0) // weird continue; - if (id == mkvmuxer::kMkvCluster) { + if (id == libwebm::kMkvCluster) { const long long off_next_ = idpos - m_start; long long pos_; @@ -2762,7 +2761,7 @@ long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult, const long long id = ReadUInt(m_pReader, pos, len); - if (id != mkvmuxer::kMkvCluster) + if (id != libwebm::kMkvCluster) return -1; pos += len; // consume ID @@ -2927,7 +2926,7 @@ long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { return E_FILE_FORMAT_INVALID; } - if (id == mkvmuxer::kMkvCues) { + if (id == libwebm::kMkvCues) { if (size == unknown_size) return E_FILE_FORMAT_INVALID; @@ -2953,7 +2952,7 @@ long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { continue; } - if (id != mkvmuxer::kMkvCluster) { // not a Cluster ID + if (id != libwebm::kMkvCluster) { // not a Cluster ID if (size == unknown_size) return E_FILE_FORMAT_INVALID; @@ -3091,7 +3090,7 @@ long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) { // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. - if (id == mkvmuxer::kMkvCluster || id == mkvmuxer::kMkvCues) + if (id == libwebm::kMkvCluster || id == libwebm::kMkvCues) break; pos += len; // consume ID (of sub-element) @@ -3259,7 +3258,7 @@ long Chapters::Parse() { if (size == 0) // weird continue; - if (id == mkvmuxer::kMkvEditionEntry) { + if (id == libwebm::kMkvEditionEntry) { status = ParseEdition(pos, size); if (status < 0) // error @@ -3375,7 +3374,7 @@ long Chapters::Edition::Parse(IMkvReader* pReader, long long pos, if (size == 0) continue; - if (id == mkvmuxer::kMkvChapterAtom) { + if (id == libwebm::kMkvChapterAtom) { status = ParseAtom(pReader, pos, size); if (status < 0) // error @@ -3508,17 +3507,17 @@ long Chapters::Atom::Parse(IMkvReader* pReader, long long pos, long long size) { if (size == 0) // 0 length payload, skip. continue; - if (id == mkvmuxer::kMkvChapterDisplay) { + if (id == libwebm::kMkvChapterDisplay) { status = ParseDisplay(pReader, pos, size); if (status < 0) // error return status; - } else if (id == mkvmuxer::kMkvChapterStringUID) { + } else if (id == libwebm::kMkvChapterStringUID) { status = UnserializeString(pReader, pos, size, m_string_uid); if (status < 0) // error return status; - } else if (id == mkvmuxer::kMkvChapterUID) { + } else if (id == libwebm::kMkvChapterUID) { long long val; status = UnserializeInt(pReader, pos, size, val); @@ -3526,14 +3525,14 @@ long Chapters::Atom::Parse(IMkvReader* pReader, long long pos, long long size) { return status; m_uid = static_cast(val); - } else if (id == mkvmuxer::kMkvChapterTimeStart) { + } else if (id == libwebm::kMkvChapterTimeStart) { const long long val = UnserializeUInt(pReader, pos, size); if (val < 0) // error return static_cast(val); m_start_timecode = val; - } else if (id == mkvmuxer::kMkvChapterTimeEnd) { + } else if (id == libwebm::kMkvChapterTimeEnd) { const long long val = UnserializeUInt(pReader, pos, size); if (val < 0) // error @@ -3661,17 +3660,17 @@ long Chapters::Display::Parse(IMkvReader* pReader, long long pos, if (size == 0) // No payload. continue; - if (id == mkvmuxer::kMkvChapString) { + if (id == libwebm::kMkvChapString) { status = UnserializeString(pReader, pos, size, m_string); if (status) return status; - } else if (id == mkvmuxer::kMkvChapLanguage) { + } else if (id == libwebm::kMkvChapLanguage) { status = UnserializeString(pReader, pos, size, m_language); if (status) return status; - } else if (id == mkvmuxer::kMkvChapCountry) { + } else if (id == libwebm::kMkvChapCountry) { status = UnserializeString(pReader, pos, size, m_country); if (status) @@ -3724,7 +3723,7 @@ long Tags::Parse() { if (size == 0) // 0 length tag, read another continue; - if (id == mkvmuxer::kMkvTag) { + if (id == libwebm::kMkvTag) { status = ParseTag(pos, size); if (status < 0) @@ -3840,7 +3839,7 @@ long Tags::Tag::Parse(IMkvReader* pReader, long long pos, long long size) { if (size == 0) // 0 length tag, read another continue; - if (id == mkvmuxer::kMkvSimpleTag) { + if (id == libwebm::kMkvSimpleTag) { status = ParseSimpleTag(pReader, pos, size); if (status < 0) @@ -3931,12 +3930,12 @@ long Tags::SimpleTag::Parse(IMkvReader* pReader, long long pos, if (size == 0) // weird continue; - if (id == mkvmuxer::kMkvTagName) { + if (id == libwebm::kMkvTagName) { status = UnserializeString(pReader, pos, size, m_tag_name); if (status) return status; - } else if (id == mkvmuxer::kMkvTagString) { + } else if (id == libwebm::kMkvTagString) { status = UnserializeString(pReader, pos, size, m_tag_string); if (status) @@ -3996,12 +3995,12 @@ long SegmentInfo::Parse() { if (status < 0) // error return status; - if (id == mkvmuxer::kMkvTimecodeScale) { + if (id == libwebm::kMkvTimecodeScale) { m_timecodeScale = UnserializeUInt(pReader, pos, size); if (m_timecodeScale <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDuration) { + } else if (id == libwebm::kMkvDuration) { const long status = UnserializeFloat(pReader, pos, size, m_duration); if (status < 0) @@ -4009,19 +4008,19 @@ long SegmentInfo::Parse() { if (m_duration < 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvMuxingApp) { + } else if (id == libwebm::kMkvMuxingApp) { const long status = UnserializeString(pReader, pos, size, m_pMuxingAppAsUTF8); if (status) return status; - } else if (id == mkvmuxer::kMkvWritingApp) { + } else if (id == libwebm::kMkvWritingApp) { const long status = UnserializeString(pReader, pos, size, m_pWritingAppAsUTF8); if (status) return status; - } else if (id == mkvmuxer::kMkvTitle) { + } else if (id == libwebm::kMkvTitle) { const long status = UnserializeString(pReader, pos, size, m_pTitleAsUTF8); if (status) @@ -4176,7 +4175,7 @@ long ContentEncoding::ParseContentEncAESSettingsEntry( if (status < 0) // error return status; - if (id == mkvmuxer::kMkvAESSettingsCipherMode) { + if (id == libwebm::kMkvAESSettingsCipherMode) { aes->cipher_mode = UnserializeUInt(pReader, pos, size); if (aes->cipher_mode != 1) return E_FILE_FORMAT_INVALID; @@ -4207,10 +4206,10 @@ long ContentEncoding::ParseContentEncodingEntry(long long start, long long size, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvContentCompression) + if (id == libwebm::kMkvContentCompression) ++compression_count; - if (id == mkvmuxer::kMkvContentEncryption) + if (id == libwebm::kMkvContentEncryption) ++encryption_count; pos += size; // consume payload @@ -4246,15 +4245,15 @@ long ContentEncoding::ParseContentEncodingEntry(long long start, long long size, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvContentEncodingOrder) { + if (id == libwebm::kMkvContentEncodingOrder) { encoding_order_ = UnserializeUInt(pReader, pos, size); - } else if (id == mkvmuxer::kMkvContentEncodingScope) { + } else if (id == libwebm::kMkvContentEncodingScope) { encoding_scope_ = UnserializeUInt(pReader, pos, size); if (encoding_scope_ < 1) return -1; - } else if (id == mkvmuxer::kMkvContentEncodingType) { + } else if (id == libwebm::kMkvContentEncodingType) { encoding_type_ = UnserializeUInt(pReader, pos, size); - } else if (id == mkvmuxer::kMkvContentCompression) { + } else if (id == libwebm::kMkvContentCompression) { ContentCompression* const compression = new (std::nothrow) ContentCompression(); if (!compression) @@ -4266,7 +4265,7 @@ long ContentEncoding::ParseContentEncodingEntry(long long start, long long size, return status; } *compression_entries_end_++ = compression; - } else if (id == mkvmuxer::kMkvContentEncryption) { + } else if (id == libwebm::kMkvContentEncryption) { ContentEncryption* const encryption = new (std::nothrow) ContentEncryption(); if (!encryption) @@ -4307,13 +4306,13 @@ long ContentEncoding::ParseCompressionEntry(long long start, long long size, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvContentCompAlgo) { + if (id == libwebm::kMkvContentCompAlgo) { long long algo = UnserializeUInt(pReader, pos, size); if (algo < 0) return E_FILE_FORMAT_INVALID; compression->algo = algo; valid = true; - } else if (id == mkvmuxer::kMkvContentCompSettings) { + } else if (id == libwebm::kMkvContentCompSettings) { if (size <= 0) return E_FILE_FORMAT_INVALID; @@ -4360,11 +4359,11 @@ long ContentEncoding::ParseEncryptionEntry(long long start, long long size, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvContentEncAlgo) { + if (id == libwebm::kMkvContentEncAlgo) { encryption->algo = UnserializeUInt(pReader, pos, size); if (encryption->algo != 5) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvContentEncKeyID) { + } else if (id == libwebm::kMkvContentEncKeyID) { delete[] encryption->key_id; encryption->key_id = NULL; encryption->key_id_len = 0; @@ -4386,7 +4385,7 @@ long ContentEncoding::ParseEncryptionEntry(long long start, long long size, encryption->key_id = buf; encryption->key_id_len = buflen; - } else if (id == mkvmuxer::kMkvContentSignature) { + } else if (id == libwebm::kMkvContentSignature) { delete[] encryption->signature; encryption->signature = NULL; encryption->signature_len = 0; @@ -4408,7 +4407,7 @@ long ContentEncoding::ParseEncryptionEntry(long long start, long long size, encryption->signature = buf; encryption->signature_len = buflen; - } else if (id == mkvmuxer::kMkvContentSigKeyID) { + } else if (id == libwebm::kMkvContentSigKeyID) { delete[] encryption->sig_key_id; encryption->sig_key_id = NULL; encryption->sig_key_id_len = 0; @@ -4430,11 +4429,11 @@ long ContentEncoding::ParseEncryptionEntry(long long start, long long size, encryption->sig_key_id = buf; encryption->sig_key_id_len = buflen; - } else if (id == mkvmuxer::kMkvContentSigAlgo) { + } else if (id == libwebm::kMkvContentSigAlgo) { encryption->sig_algo = UnserializeUInt(pReader, pos, size); - } else if (id == mkvmuxer::kMkvContentSigHashAlgo) { + } else if (id == libwebm::kMkvContentSigHashAlgo) { encryption->sig_hash_algo = UnserializeUInt(pReader, pos, size); - } else if (id == mkvmuxer::kMkvContentEncAESSettings) { + } else if (id == libwebm::kMkvContentEncAESSettings) { const long status = ParseContentEncAESSettingsEntry( pos, size, pReader, &encryption->aes_settings); if (status) @@ -4921,7 +4920,7 @@ long Track::ParseContentEncodingsEntry(long long start, long long size) { return status; // pos now designates start of element - if (id == mkvmuxer::kMkvContentEncoding) + if (id == libwebm::kMkvContentEncoding) ++count; pos += size; // consume payload @@ -4946,7 +4945,7 @@ long Track::ParseContentEncodingsEntry(long long start, long long size) { return status; // pos now designates start of element - if (id == mkvmuxer::kMkvContentEncoding) { + if (id == libwebm::kMkvContentEncoding) { ContentEncoding* const content_encoding = new (std::nothrow) ContentEncoding(); if (!content_encoding) @@ -4978,9 +4977,222 @@ BlockEntry::Kind Track::EOSBlock::GetKind() const { return kBlockEOS; } const Block* Track::EOSBlock::GetBlock() const { return NULL; } +bool PrimaryChromaticity::Parse(IMkvReader* reader, long long read_pos, + long long value_size, bool is_x, + PrimaryChromaticity** chromaticity) { + if (!reader) + return false; + + std::auto_ptr chromaticity_ptr; + + if (!*chromaticity) { + chromaticity_ptr.reset(new PrimaryChromaticity()); + } else { + chromaticity_ptr.reset(*chromaticity); + } + + if (!chromaticity_ptr.get()) + return false; + + float* value = is_x ? &chromaticity_ptr->x : &chromaticity_ptr->y; + + double parser_value = 0; + const long long value_parse_status = + UnserializeFloat(reader, read_pos, value_size, parser_value); + + *value = static_cast(parser_value); + + if (value_parse_status < 0 || *value < 0.0 || *value > 1.0) + return false; + + *chromaticity = chromaticity_ptr.release(); + return true; +} + +bool MasteringMetadata::Parse(IMkvReader* reader, long long mm_start, + long long mm_size, MasteringMetadata** mm) { + if (!reader || *mm) + return false; + + std::auto_ptr mm_ptr(new MasteringMetadata()); + if (!mm_ptr.get()) + return false; + + const long long mm_end = mm_start + mm_size; + long long read_pos = mm_start; + + while (read_pos < mm_end) { + long long child_id = 0; + long long child_size = 0; + + const long long status = + ParseElementHeader(reader, read_pos, mm_end, child_id, child_size); + if (status < 0) + return false; + + if (child_id == libwebm::kMkvLuminanceMax) { + double value = 0; + const long long value_parse_status = + UnserializeFloat(reader, read_pos, child_size, value); + mm_ptr->luminance_max = static_cast(value); + if (value_parse_status < 0 || mm_ptr->luminance_max < 0.0 || + mm_ptr->luminance_max > 9999.99) { + return false; + } + } else if (child_id == libwebm::kMkvLuminanceMin) { + double value = 0; + const long long value_parse_status = + UnserializeFloat(reader, read_pos, child_size, value); + mm_ptr->luminance_min = static_cast(value); + if (value_parse_status < 0 || mm_ptr->luminance_min < 0.0 || + mm_ptr->luminance_min > 999.9999) { + return false; + } + } else { + bool is_x = false; + PrimaryChromaticity** chromaticity; + switch (child_id) { + case libwebm::kMkvPrimaryRChromaticityX: + case libwebm::kMkvPrimaryRChromaticityY: + is_x = child_id == libwebm::kMkvPrimaryRChromaticityX; + chromaticity = &mm_ptr->r; + break; + case libwebm::kMkvPrimaryGChromaticityX: + case libwebm::kMkvPrimaryGChromaticityY: + is_x = child_id == libwebm::kMkvPrimaryGChromaticityX; + chromaticity = &mm_ptr->g; + break; + case libwebm::kMkvPrimaryBChromaticityX: + case libwebm::kMkvPrimaryBChromaticityY: + is_x = child_id == libwebm::kMkvPrimaryBChromaticityX; + chromaticity = &mm_ptr->b; + break; + case libwebm::kMkvWhitePointChromaticityX: + case libwebm::kMkvWhitePointChromaticityY: + is_x = child_id == libwebm::kMkvWhitePointChromaticityX; + chromaticity = &mm_ptr->white_point; + break; + default: + return false; + } + const bool value_parse_status = PrimaryChromaticity::Parse( + reader, read_pos, child_size, is_x, chromaticity); + if (!value_parse_status) + return false; + } + + read_pos += child_size; + if (read_pos > mm_end) + return false; + } + + *mm = mm_ptr.release(); + return true; +} + +bool Colour::Parse(IMkvReader* reader, long long colour_start, + long long colour_size, Colour** colour) { + if (!reader || *colour) + return false; + + std::auto_ptr colour_ptr(new Colour()); + if (!colour_ptr.get()) + return false; + + const long long colour_end = colour_start + colour_size; + long long read_pos = colour_start; + + while (read_pos < colour_end) { + long long child_id = 0; + long long child_size = 0; + + const long status = + ParseElementHeader(reader, read_pos, colour_end, child_id, child_size); + if (status < 0) + return false; + + if (child_id == libwebm::kMkvMatrixCoefficients) { + colour_ptr->matrix_coefficients = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->matrix_coefficients < 0) + return false; + } else if (child_id == libwebm::kMkvBitsPerChannel) { + colour_ptr->bits_per_channel = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->bits_per_channel < 0) + return false; + } else if (child_id == libwebm::kMkvChromaSubsamplingHorz) { + colour_ptr->chroma_subsampling_horz = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->chroma_subsampling_horz < 0) + return false; + } else if (child_id == libwebm::kMkvChromaSubsamplingVert) { + colour_ptr->chroma_subsampling_vert = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->chroma_subsampling_vert < 0) + return false; + } else if (child_id == libwebm::kMkvCbSubsamplingHorz) { + colour_ptr->cb_subsampling_horz = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->cb_subsampling_horz < 0) + return false; + } else if (child_id == libwebm::kMkvCbSubsamplingVert) { + colour_ptr->cb_subsampling_vert = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->cb_subsampling_vert < 0) + return false; + } else if (child_id == libwebm::kMkvChromaSitingHorz) { + colour_ptr->chroma_siting_horz = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->chroma_siting_horz < 0) + return false; + } else if (child_id == libwebm::kMkvChromaSitingVert) { + colour_ptr->chroma_siting_vert = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->chroma_siting_vert < 0) + return false; + } else if (child_id == libwebm::kMkvRange) { + colour_ptr->range = UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->range < 0) + return false; + } else if (child_id == libwebm::kMkvTransferCharacteristics) { + colour_ptr->transfer_characteristics = + UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->transfer_characteristics < 0) + return false; + } else if (child_id == libwebm::kMkvPrimaries) { + colour_ptr->primaries = UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->primaries < 0) + return false; + } else if (child_id == libwebm::kMkvMaxCLL) { + colour_ptr->max_cll = UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->max_cll < 0) + return false; + } else if (child_id == libwebm::kMkvMaxFALL) { + colour_ptr->max_fall = UnserializeUInt(reader, read_pos, child_size); + if (colour_ptr->max_fall < 0) + return false; + } else if (child_id == libwebm::kMkvMasteringMetadata) { + if (!MasteringMetadata::Parse(reader, read_pos, child_size, + &colour_ptr->mastering_metadata)) + return false; + } else { + return false; + } + + read_pos += child_size; + if (read_pos > colour_end) + return false; + } + *colour = colour_ptr.release(); + return true; +} + VideoTrack::VideoTrack(Segment* pSegment, long long element_start, long long element_size) - : Track(pSegment, element_start, element_size) {} + : Track(pSegment, element_start, element_size), m_colour(NULL) {} + +VideoTrack::~VideoTrack() { delete m_colour; } long VideoTrack::Parse(Segment* pSegment, const Info& info, long long element_start, long long element_size, @@ -5011,6 +5223,8 @@ long VideoTrack::Parse(Segment* pSegment, const Info& info, const long long stop = pos + s.size; + Colour* colour = NULL; + while (pos < stop) { long long id, size; @@ -5019,37 +5233,37 @@ long VideoTrack::Parse(Segment* pSegment, const Info& info, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvPixelWidth) { + if (id == libwebm::kMkvPixelWidth) { width = UnserializeUInt(pReader, pos, size); if (width <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvPixelHeight) { + } else if (id == libwebm::kMkvPixelHeight) { height = UnserializeUInt(pReader, pos, size); if (height <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDisplayWidth) { + } else if (id == libwebm::kMkvDisplayWidth) { display_width = UnserializeUInt(pReader, pos, size); if (display_width <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDisplayHeight) { + } else if (id == libwebm::kMkvDisplayHeight) { display_height = UnserializeUInt(pReader, pos, size); if (display_height <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvDisplayUnit) { + } else if (id == libwebm::kMkvDisplayUnit) { display_unit = UnserializeUInt(pReader, pos, size); if (display_unit < 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvStereoMode) { + } else if (id == libwebm::kMkvStereoMode) { stereo_mode = UnserializeUInt(pReader, pos, size); if (stereo_mode < 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvFrameRate) { + } else if (id == libwebm::kMkvFrameRate) { const long status = UnserializeFloat(pReader, pos, size, rate); if (status < 0) @@ -5057,6 +5271,9 @@ long VideoTrack::Parse(Segment* pSegment, const Info& info, if (rate <= 0) return E_FILE_FORMAT_INVALID; + } else if (id == libwebm::kMkvColour) { + if (!Colour::Parse(pReader, pos, size, &colour)) + return E_FILE_FORMAT_INVALID; } pos += size; // consume payload @@ -5087,6 +5304,7 @@ long VideoTrack::Parse(Segment* pSegment, const Info& info, pTrack->m_display_unit = display_unit; pTrack->m_stereo_mode = stereo_mode; pTrack->m_rate = rate; + pTrack->m_colour = colour; pResult = pTrack; return 0; // success @@ -5185,6 +5403,8 @@ long VideoTrack::Seek(long long time_ns, const BlockEntry*& pResult) const { return 0; } +Colour* VideoTrack::GetColour() const { return m_colour; } + long long VideoTrack::GetWidth() const { return m_width; } long long VideoTrack::GetHeight() const { return m_height; } @@ -5239,7 +5459,7 @@ long AudioTrack::Parse(Segment* pSegment, const Info& info, if (status < 0) // error return status; - if (id == mkvmuxer::kMkvSamplingFrequency) { + if (id == libwebm::kMkvSamplingFrequency) { status = UnserializeFloat(pReader, pos, size, rate); if (status < 0) @@ -5247,12 +5467,12 @@ long AudioTrack::Parse(Segment* pSegment, const Info& info, if (rate <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvChannels) { + } else if (id == libwebm::kMkvChannels) { channels = UnserializeUInt(pReader, pos, size); if (channels <= 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvBitDepth) { + } else if (id == libwebm::kMkvBitDepth) { bit_depth = UnserializeUInt(pReader, pos, size); if (bit_depth <= 0) @@ -5325,7 +5545,7 @@ long Tracks::Parse() { if (size == 0) // weird continue; - if (id == mkvmuxer::kMkvTrackEntry) + if (id == libwebm::kMkvTrackEntry) ++count; pos += size; // consume payload @@ -5367,7 +5587,7 @@ long Tracks::Parse() { const long long element_size = payload_stop - element_start; - if (id == mkvmuxer::kMkvTrackEntry) { + if (id == libwebm::kMkvTrackEntry) { Track*& pTrack = *m_trackEntriesEnd; pTrack = NULL; @@ -5443,16 +5663,16 @@ long Tracks::ParseTrackEntry(long long track_start, long long track_size, const long long start = pos; - if (id == mkvmuxer::kMkvVideo) { + if (id == libwebm::kMkvVideo) { v.start = start; v.size = size; - } else if (id == mkvmuxer::kMkvAudio) { + } else if (id == libwebm::kMkvAudio) { a.start = start; a.size = size; - } else if (id == mkvmuxer::kMkvContentEncodings) { + } else if (id == libwebm::kMkvContentEncodings) { e.start = start; e.size = size; - } else if (id == mkvmuxer::kMkvTrackUID) { + } else if (id == libwebm::kMkvTrackUID) { if (size > 8) return E_FILE_FORMAT_INVALID; @@ -5474,49 +5694,49 @@ long Tracks::ParseTrackEntry(long long track_start, long long track_size, ++pos_; } - } else if (id == mkvmuxer::kMkvTrackNumber) { + } else if (id == libwebm::kMkvTrackNumber) { const long long num = UnserializeUInt(pReader, pos, size); if ((num <= 0) || (num > 127)) return E_FILE_FORMAT_INVALID; info.number = static_cast(num); - } else if (id == mkvmuxer::kMkvTrackType) { + } else if (id == libwebm::kMkvTrackType) { const long long type = UnserializeUInt(pReader, pos, size); if ((type <= 0) || (type > 254)) return E_FILE_FORMAT_INVALID; info.type = static_cast(type); - } else if (id == mkvmuxer::kMkvName) { + } else if (id == libwebm::kMkvName) { const long status = UnserializeString(pReader, pos, size, info.nameAsUTF8); if (status) return status; - } else if (id == mkvmuxer::kMkvLanguage) { + } else if (id == libwebm::kMkvLanguage) { const long status = UnserializeString(pReader, pos, size, info.language); if (status) return status; - } else if (id == mkvmuxer::kMkvDefaultDuration) { + } else if (id == libwebm::kMkvDefaultDuration) { const long long duration = UnserializeUInt(pReader, pos, size); if (duration < 0) return E_FILE_FORMAT_INVALID; info.defaultDuration = static_cast(duration); - } else if (id == mkvmuxer::kMkvCodecID) { + } else if (id == libwebm::kMkvCodecID) { const long status = UnserializeString(pReader, pos, size, info.codecId); if (status) return status; - } else if (id == mkvmuxer::kMkvFlagLacing) { + } else if (id == libwebm::kMkvFlagLacing) { lacing = UnserializeUInt(pReader, pos, size); if ((lacing < 0) || (lacing > 1)) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvCodecPrivate) { + } else if (id == libwebm::kMkvCodecPrivate) { delete[] info.codecPrivate; info.codecPrivate = NULL; info.codecPrivateSize = 0; @@ -5539,15 +5759,15 @@ long Tracks::ParseTrackEntry(long long track_start, long long track_size, info.codecPrivate = buf; info.codecPrivateSize = buflen; } - } else if (id == mkvmuxer::kMkvCodecName) { + } else if (id == libwebm::kMkvCodecName) { const long status = UnserializeString(pReader, pos, size, info.codecNameAsUTF8); if (status) return status; - } else if (id == mkvmuxer::kMkvCodecDelay) { + } else if (id == libwebm::kMkvCodecDelay) { info.codecDelay = UnserializeUInt(pReader, pos, size); - } else if (id == mkvmuxer::kMkvSeekPreRoll) { + } else if (id == libwebm::kMkvSeekPreRoll) { info.seekPreRoll = UnserializeUInt(pReader, pos, size); } @@ -5730,7 +5950,7 @@ long Cluster::Load(long long& pos, long& len) const { if (id_ < 0) // error return static_cast(id_); - if (id_ != mkvmuxer::kMkvCluster) + if (id_ != libwebm::kMkvCluster) return E_FILE_FORMAT_INVALID; pos += len; // consume id @@ -5812,10 +6032,10 @@ long Cluster::Load(long long& pos, long& len) const { // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. - if (id == mkvmuxer::kMkvCluster) + if (id == libwebm::kMkvCluster) break; - if (id == mkvmuxer::kMkvCues) + if (id == libwebm::kMkvCues) break; pos += len; // consume ID field @@ -5864,7 +6084,7 @@ long Cluster::Load(long long& pos, long& len) const { if ((cluster_stop >= 0) && ((pos + size) > cluster_stop)) return E_FILE_FORMAT_INVALID; - if (id == mkvmuxer::kMkvTimecode) { + if (id == libwebm::kMkvTimecode) { len = static_cast(size); if ((pos + size) > avail) @@ -5879,10 +6099,10 @@ long Cluster::Load(long long& pos, long& len) const { if (bBlock) break; - } else if (id == mkvmuxer::kMkvBlockGroup) { + } else if (id == libwebm::kMkvBlockGroup) { bBlock = true; break; - } else if (id == mkvmuxer::kMkvSimpleBlock) { + } else if (id == libwebm::kMkvSimpleBlock) { bBlock = true; break; } @@ -5980,7 +6200,7 @@ long Cluster::Parse(long long& pos, long& len) const { // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. - if ((id == mkvmuxer::kMkvCluster) || (id == mkvmuxer::kMkvCues)) { + if ((id == libwebm::kMkvCluster) || (id == libwebm::kMkvCues)) { if (m_element_size < 0) m_element_size = pos - m_element_start; @@ -6035,8 +6255,7 @@ long Cluster::Parse(long long& pos, long& len) const { if (cluster_stop >= 0) { if (block_stop > cluster_stop) { - if (id == mkvmuxer::kMkvBlockGroup || - id == mkvmuxer::kMkvSimpleBlock) { + if (id == libwebm::kMkvBlockGroup || id == libwebm::kMkvSimpleBlock) { return E_FILE_FORMAT_INVALID; } @@ -6054,10 +6273,10 @@ long Cluster::Parse(long long& pos, long& len) const { Cluster* const this_ = const_cast(this); - if (id == mkvmuxer::kMkvBlockGroup) + if (id == libwebm::kMkvBlockGroup) return this_->ParseBlockGroup(size, pos, len); - if (id == mkvmuxer::kMkvSimpleBlock) + if (id == libwebm::kMkvSimpleBlock) return this_->ParseSimpleBlock(size, pos, len); pos += size; // consume payload @@ -6188,8 +6407,7 @@ long Cluster::ParseSimpleBlock(long long block_size, long long& pos, return E_BUFFER_NOT_FULL; } - status = CreateBlock(mkvmuxer::kMkvSimpleBlock, - block_start, block_size, + status = CreateBlock(libwebm::kMkvSimpleBlock, block_start, block_size, 0); // DiscardPadding if (status != 0) @@ -6299,14 +6517,14 @@ long Cluster::ParseBlockGroup(long long payload_size, long long& pos, if (size == unknown_size) return E_FILE_FORMAT_INVALID; - if (id == mkvmuxer::kMkvDiscardPadding) { + if (id == libwebm::kMkvDiscardPadding) { status = UnserializeInt(pReader, pos, size, discard_padding); if (status < 0) // error return status; } - if (id != mkvmuxer::kMkvBlock) { + if (id != libwebm::kMkvBlock) { pos += size; // consume sub-part of block group if (pos > payload_stop) @@ -6399,8 +6617,8 @@ long Cluster::ParseBlockGroup(long long payload_size, long long& pos, if (pos != payload_stop) return E_FILE_FORMAT_INVALID; - status = CreateBlock(mkvmuxer::kMkvBlockGroup, - payload_start, payload_size, discard_padding); + status = CreateBlock(libwebm::kMkvBlockGroup, payload_start, payload_size, + discard_padding); if (status != 0) return status; @@ -6565,7 +6783,7 @@ long Cluster::HasBlockEntries( if (id < 0) // error return static_cast(id); - if (id != mkvmuxer::kMkvCluster) + if (id != libwebm::kMkvCluster) return E_PARSE_FAILED; pos += len; // consume Cluster ID field @@ -6653,10 +6871,10 @@ long Cluster::HasBlockEntries( // that we have exhausted the sub-element's inside the cluster // whose ID we parsed earlier. - if (id == mkvmuxer::kMkvCluster) + if (id == libwebm::kMkvCluster) return 0; // no entries found - if (id == mkvmuxer::kMkvCues) + if (id == libwebm::kMkvCues) return 0; // no entries found pos += len; // consume id field @@ -6708,10 +6926,10 @@ long Cluster::HasBlockEntries( if ((cluster_stop >= 0) && ((pos + size) > cluster_stop)) return E_FILE_FORMAT_INVALID; - if (id == mkvmuxer::kMkvBlockGroup) + if (id == libwebm::kMkvBlockGroup) return 1; // have at least one entry - if (id == mkvmuxer::kMkvSimpleBlock) + if (id == libwebm::kMkvSimpleBlock) return 1; // have at least one entry pos += size; // consume payload @@ -6786,7 +7004,7 @@ long long Cluster::GetLastTime() const { long Cluster::CreateBlock(long long id, long long pos, // absolute pos of payload long long size, long long discard_padding) { - if (id != mkvmuxer::kMkvBlockGroup && id != mkvmuxer::kMkvSimpleBlock) + if (id != libwebm::kMkvBlockGroup && id != libwebm::kMkvSimpleBlock) return E_PARSE_FAILED; if (m_entries_count < 0) { // haven't parsed anything yet @@ -6826,7 +7044,7 @@ long Cluster::CreateBlock(long long id, } } - if (id == mkvmuxer::kMkvBlockGroup) + if (id == libwebm::kMkvBlockGroup) return CreateBlockGroup(pos, size, discard_padding); else return CreateSimpleBlock(pos, size); @@ -6871,12 +7089,12 @@ long Cluster::CreateBlockGroup(long long start_offset, long long size, pos += len; // consume size - if (id == mkvmuxer::kMkvBlock) { + if (id == libwebm::kMkvBlock) { if (bpos < 0) { // Block ID bpos = pos; bsize = size; } - } else if (id == mkvmuxer::kMkvBlockDuration) { + } else if (id == libwebm::kMkvBlockDuration) { if (size > 8) return E_FILE_FORMAT_INVALID; @@ -6884,7 +7102,7 @@ long Cluster::CreateBlockGroup(long long start_offset, long long size, if (duration < 0) return E_FILE_FORMAT_INVALID; - } else if (id == mkvmuxer::kMkvReferenceBlock) { + } else if (id == libwebm::kMkvReferenceBlock) { if (size > 8 || size <= 0) return E_FILE_FORMAT_INVALID; const long size_ = static_cast(size); @@ -7231,7 +7449,6 @@ const BlockEntry* Cluster::GetEntry(const CuePoint& cp, BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {} BlockEntry::~BlockEntry() {} -bool BlockEntry::EOS() const { return (GetKind() == kBlockEOS); } const Cluster* BlockEntry::GetCluster() const { return m_pCluster; } long BlockEntry::GetIndex() const { return m_index; } @@ -7555,7 +7772,6 @@ long Block::Parse(const Cluster* pCluster) { if (pf >= pf_end) return E_FILE_FORMAT_INVALID; - const Frame& prev = *pf++; assert(prev.len == frame_size); if (prev.len != frame_size) @@ -7581,7 +7797,7 @@ long Block::Parse(const Cluster* pCluster) { if (pos > stop) return E_FILE_FORMAT_INVALID; - const int exp = 7 * len - 1; + const long exp = 7 * len - 1; const long long bias = (1LL << exp) - 1LL; const long long delta_size = delta_size_ - bias; @@ -7721,4 +7937,4 @@ long Block::Frame::Read(IMkvReader* pReader, unsigned char* buf) const { long long Block::GetDiscardPadding() const { return m_discard_padding; } -} // end namespace mkvparser +} // namespace mkvparser diff --git a/libs/libvpx/third_party/libwebm/mkvparser.hpp b/libs/libvpx/third_party/libwebm/mkvparser/mkvparser.h similarity index 90% rename from libs/libvpx/third_party/libwebm/mkvparser.hpp rename to libs/libvpx/third_party/libwebm/mkvparser/mkvparser.h index 75ef69d76d..42e6e88ab4 100644 --- a/libs/libvpx/third_party/libwebm/mkvparser.hpp +++ b/libs/libvpx/third_party/libwebm/mkvparser/mkvparser.h @@ -5,13 +5,10 @@ // tree. An additional intellectual property rights grant can be found // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. - -#ifndef MKVPARSER_HPP -#define MKVPARSER_HPP +#ifndef MKVPARSER_MKVPARSER_H_ +#define MKVPARSER_MKVPARSER_H_ #include -#include -#include namespace mkvparser { @@ -28,8 +25,9 @@ class IMkvReader { virtual ~IMkvReader(); }; -template Type* SafeArrayAlloc(unsigned long long num_elements, - unsigned long long element_size); +template +Type* SafeArrayAlloc(unsigned long long num_elements, + unsigned long long element_size); long long GetUIntLength(IMkvReader*, long long, long&); long long ReadUInt(IMkvReader*, long long, long&); long long ReadID(IMkvReader* pReader, long long pos, long& len); @@ -128,7 +126,7 @@ class BlockEntry { public: virtual ~BlockEntry(); - bool EOS() const; + bool EOS() const { return (GetKind() == kBlockEOS); } const Cluster* GetCluster() const; long GetIndex() const; virtual const Block* GetBlock() const = 0; @@ -391,6 +389,90 @@ class Track { ContentEncoding** content_encoding_entries_end_; }; +struct PrimaryChromaticity { + PrimaryChromaticity() : x(0), y(0) {} + ~PrimaryChromaticity() {} + static bool Parse(IMkvReader* reader, long long read_pos, + long long value_size, bool is_x, + PrimaryChromaticity** chromaticity); + float x; + float y; +}; + +struct MasteringMetadata { + static const float kValueNotPresent; + + MasteringMetadata() + : r(NULL), + g(NULL), + b(NULL), + white_point(NULL), + luminance_max(kValueNotPresent), + luminance_min(kValueNotPresent) {} + ~MasteringMetadata() { + delete r; + delete g; + delete b; + delete white_point; + } + + static bool Parse(IMkvReader* reader, long long element_start, + long long element_size, + MasteringMetadata** mastering_metadata); + + PrimaryChromaticity* r; + PrimaryChromaticity* g; + PrimaryChromaticity* b; + PrimaryChromaticity* white_point; + float luminance_max; + float luminance_min; +}; + +struct Colour { + static const long long kValueNotPresent; + + // Unless otherwise noted all values assigned upon construction are the + // equivalent of unspecified/default. + Colour() + : matrix_coefficients(kValueNotPresent), + bits_per_channel(kValueNotPresent), + chroma_subsampling_horz(kValueNotPresent), + chroma_subsampling_vert(kValueNotPresent), + cb_subsampling_horz(kValueNotPresent), + cb_subsampling_vert(kValueNotPresent), + chroma_siting_horz(kValueNotPresent), + chroma_siting_vert(kValueNotPresent), + range(kValueNotPresent), + transfer_characteristics(kValueNotPresent), + primaries(kValueNotPresent), + max_cll(kValueNotPresent), + max_fall(kValueNotPresent), + mastering_metadata(NULL) {} + ~Colour() { + delete mastering_metadata; + mastering_metadata = NULL; + } + + static bool Parse(IMkvReader* reader, long long element_start, + long long element_size, Colour** colour); + + long long matrix_coefficients; + long long bits_per_channel; + long long chroma_subsampling_horz; + long long chroma_subsampling_vert; + long long cb_subsampling_horz; + long long cb_subsampling_vert; + long long chroma_siting_horz; + long long chroma_siting_vert; + long long range; + long long transfer_characteristics; + long long primaries; + long long max_cll; + long long max_fall; + + MasteringMetadata* mastering_metadata; +}; + class VideoTrack : public Track { VideoTrack(const VideoTrack&); VideoTrack& operator=(const VideoTrack&); @@ -398,6 +480,7 @@ class VideoTrack : public Track { VideoTrack(Segment*, long long element_start, long long element_size); public: + virtual ~VideoTrack(); static long Parse(Segment*, const Info&, long long element_start, long long element_size, VideoTrack*&); @@ -412,6 +495,8 @@ class VideoTrack : public Track { bool VetEntry(const BlockEntry*) const; long Seek(long long time_ns, const BlockEntry*&) const; + Colour* GetColour() const; + private: long long m_width; long long m_height; @@ -421,6 +506,8 @@ class VideoTrack : public Track { long long m_stereo_mode; double m_rate; + + Colour* m_colour; }; class AudioTrack : public Track { @@ -1013,7 +1100,7 @@ class Segment { const BlockEntry* GetBlock(const CuePoint&, const CuePoint::TrackPosition&); }; -} // end namespace mkvparser +} // namespace mkvparser inline long mkvparser::Segment::LoadCluster() { long long pos; @@ -1022,4 +1109,4 @@ inline long mkvparser::Segment::LoadCluster() { return LoadCluster(pos, size); } -#endif // MKVPARSER_HPP +#endif // MKVPARSER_MKVPARSER_H_ diff --git a/libs/libvpx/third_party/libwebm/mkvreader.cpp b/libs/libvpx/third_party/libwebm/mkvparser/mkvreader.cc similarity index 97% rename from libs/libvpx/third_party/libwebm/mkvreader.cpp rename to libs/libvpx/third_party/libwebm/mkvparser/mkvreader.cc index eaf9e0a799..9f90d8c4f8 100644 --- a/libs/libvpx/third_party/libwebm/mkvreader.cpp +++ b/libs/libvpx/third_party/libwebm/mkvparser/mkvreader.cc @@ -5,8 +5,7 @@ // tree. An additional intellectual property rights grant can be found // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. - -#include "mkvreader.hpp" +#include "mkvparser/mkvreader.h" #include @@ -129,4 +128,4 @@ int MkvReader::Read(long long offset, long len, unsigned char* buffer) { return 0; // success } -} // end namespace mkvparser +} // namespace mkvparser \ No newline at end of file diff --git a/libs/libvpx/third_party/libwebm/mkvreader.hpp b/libs/libvpx/third_party/libwebm/mkvparser/mkvreader.h similarity index 87% rename from libs/libvpx/third_party/libwebm/mkvreader.hpp rename to libs/libvpx/third_party/libwebm/mkvparser/mkvreader.h index 82ebad5444..9831ecf645 100644 --- a/libs/libvpx/third_party/libwebm/mkvreader.hpp +++ b/libs/libvpx/third_party/libwebm/mkvparser/mkvreader.h @@ -5,13 +5,13 @@ // tree. An additional intellectual property rights grant can be found // in the file PATENTS. All contributing project authors may // be found in the AUTHORS file in the root of the source tree. +#ifndef MKVPARSER_MKVREADER_H_ +#define MKVPARSER_MKVREADER_H_ -#ifndef MKVREADER_HPP -#define MKVREADER_HPP - -#include "mkvparser.hpp" #include +#include "mkvparser/mkvparser.h" + namespace mkvparser { class MkvReader : public IMkvReader { @@ -40,6 +40,6 @@ class MkvReader : public IMkvReader { bool reader_owns_file_; }; -} // end namespace mkvparser +} // namespace mkvparser -#endif // MKVREADER_HPP +#endif // MKVPARSER_MKVREADER_H_ diff --git a/libs/libvpx/third_party/libyuv/README.libvpx b/libs/libvpx/third_party/libyuv/README.libvpx index 09693c1f2c..485f79c0ff 100644 --- a/libs/libvpx/third_party/libyuv/README.libvpx +++ b/libs/libvpx/third_party/libyuv/README.libvpx @@ -1,6 +1,6 @@ Name: libyuv -URL: http://code.google.com/p/libyuv/ -Version: 1456 +URL: https://chromium.googlesource.com/libyuv/libyuv +Version: de944ed8c74909ea6fbd743a22efe1e55e851b83 License: BSD License File: LICENSE @@ -13,3 +13,10 @@ which down-samples the original input video (f.g. 1280x720) a number of times in order to encode multiple resolution bit streams. Local Modifications: +rm -rf .gitignore .gn AUTHORS Android.mk BUILD.gn CMakeLists.txt DEPS LICENSE \ + LICENSE_THIRD_PARTY OWNERS PATENTS PRESUBMIT.py README.chromium README.md \ + all.gyp build_overrides/ chromium/ codereview.settings docs/ \ + download_vs_toolchain.py gyp_libyuv gyp_libyuv.py include/libyuv.h \ + include/libyuv/compare_row.h libyuv.gyp libyuv.gypi libyuv_nacl.gyp \ + libyuv_test.gyp linux.mk public.mk setup_links.py sync_chromium.py \ + third_party/ tools/ unit_test/ util/ winarm.mk diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/basic_types.h b/libs/libvpx/third_party/libyuv/include/libyuv/basic_types.h index beb750ba65..54a2181430 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/basic_types.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/basic_types.h @@ -13,7 +13,7 @@ #include // for NULL, size_t -#if defined(__ANDROID__) || (defined(_MSC_VER) && (_MSC_VER < 1600)) +#if defined(_MSC_VER) && (_MSC_VER < 1600) #include // for uintptr_t on x86 #else #include // for uintptr_t diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/convert.h b/libs/libvpx/third_party/libyuv/include/libyuv/convert.h index a8d3fa07ac..fcfcf544e1 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/convert.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/convert.h @@ -12,10 +12,13 @@ #define INCLUDE_LIBYUV_CONVERT_H_ #include "libyuv/basic_types.h" -// TODO(fbarchard): Remove the following headers includes. -#include "libyuv/convert_from.h" -#include "libyuv/planar_functions.h" -#include "libyuv/rotate.h" + +#include "libyuv/rotate.h" // For enum RotationMode. + +// TODO(fbarchard): fix WebRTC source to include following libyuv headers: +#include "libyuv/convert_argb.h" // For WebRTC I420ToARGB. b/620 +#include "libyuv/convert_from.h" // For WebRTC ConvertFromI420. b/620 +#include "libyuv/planar_functions.h" // For WebRTC I420Rect, CopyPlane. b/618 #ifdef __cplusplus namespace libyuv { @@ -115,6 +118,17 @@ int M420ToI420(const uint8* src_m420, int src_stride_m420, uint8* dst_v, int dst_stride_v, int width, int height); +// Convert Android420 to I420. +LIBYUV_API +int Android420ToI420(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + int pixel_stride_uv, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height); + // ARGB little endian (bgra in memory) to I420. LIBYUV_API int ARGBToI420(const uint8* src_frame, int src_stride_frame, diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/convert_argb.h b/libs/libvpx/third_party/libyuv/include/libyuv/convert_argb.h index 360c6d3593..19672f3269 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/convert_argb.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/convert_argb.h @@ -12,10 +12,8 @@ #define INCLUDE_LIBYUV_CONVERT_ARGB_H_ #include "libyuv/basic_types.h" -// TODO(fbarchard): Remove the following headers includes -#include "libyuv/convert_from.h" -#include "libyuv/planar_functions.h" -#include "libyuv/rotate.h" + +#include "libyuv/rotate.h" // For enum RotationMode. // TODO(fbarchard): This set of functions should exactly match convert.h // TODO(fbarchard): Add tests. Create random content of right size and convert @@ -44,6 +42,14 @@ int I420ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height); +// Duplicate prototype for function in convert_from.h for remoting. +LIBYUV_API +int I420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height); + // Convert I422 to ARGB. LIBYUV_API int I422ToARGB(const uint8* src_y, int src_stride_y, @@ -60,6 +66,22 @@ int I444ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height); +// Convert J444 to ARGB. +LIBYUV_API +int J444ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height); + +// Convert I444 to ABGR. +LIBYUV_API +int I444ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height); + // Convert I411 to ARGB. LIBYUV_API int I411ToARGB(const uint8* src_y, int src_stride_y, @@ -68,6 +90,24 @@ int I411ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height); +// Convert I420 with Alpha to preattenuated ARGB. +LIBYUV_API +int I420AlphaToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + const uint8* src_a, int src_stride_a, + uint8* dst_argb, int dst_stride_argb, + int width, int height, int attenuate); + +// Convert I420 with Alpha to preattenuated ABGR. +LIBYUV_API +int I420AlphaToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + const uint8* src_a, int src_stride_a, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height, int attenuate); + // Convert I400 (grey) to ARGB. Reverse of ARGBToI400. LIBYUV_API int I400ToARGB(const uint8* src_y, int src_stride_y, @@ -131,6 +171,54 @@ int J422ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height); +// Convert J420 to ABGR. +LIBYUV_API +int J420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height); + +// Convert J422 to ABGR. +LIBYUV_API +int J422ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height); + +// Convert H420 to ARGB. +LIBYUV_API +int H420ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height); + +// Convert H422 to ARGB. +LIBYUV_API +int H422ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height); + +// Convert H420 to ABGR. +LIBYUV_API +int H420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height); + +// Convert H422 to ABGR. +LIBYUV_API +int H422ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height); + // BGRA little endian (argb in memory) to ARGB. LIBYUV_API int BGRAToARGB(const uint8* src_frame, int src_stride_frame, diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/convert_from.h b/libs/libvpx/third_party/libyuv/include/libyuv/convert_from.h index 9fd8d4de5f..39e1578a0e 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/convert_from.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/convert_from.h @@ -56,8 +56,6 @@ int I400Copy(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, int width, int height); -// TODO(fbarchard): I420ToM420 - LIBYUV_API int I420ToNV12(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/cpu_id.h b/libs/libvpx/third_party/libyuv/include/libyuv/cpu_id.h index dc858a814a..dfb7445e2f 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/cpu_id.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/cpu_id.h @@ -18,9 +18,8 @@ namespace libyuv { extern "C" { #endif -// TODO(fbarchard): Consider overlapping bits for different architectures. // Internal flag to indicate cpuid requires initialization. -#define kCpuInit 0x1 +static const int kCpuInitialized = 0x1; // These flags are only valid on ARM processors. static const int kCpuHasARM = 0x2; @@ -37,12 +36,12 @@ static const int kCpuHasAVX = 0x200; static const int kCpuHasAVX2 = 0x400; static const int kCpuHasERMS = 0x800; static const int kCpuHasFMA3 = 0x1000; +static const int kCpuHasAVX3 = 0x2000; // 0x2000, 0x4000, 0x8000 reserved for future X86 flags. // These flags are only valid on MIPS processors. static const int kCpuHasMIPS = 0x10000; -static const int kCpuHasMIPS_DSP = 0x20000; -static const int kCpuHasMIPS_DSPR2 = 0x40000; +static const int kCpuHasDSPR2 = 0x20000; // Internal function used to auto-init. LIBYUV_API @@ -57,13 +56,13 @@ int ArmCpuCaps(const char* cpuinfo_name); // returns non-zero if instruction set is detected static __inline int TestCpuFlag(int test_flag) { LIBYUV_API extern int cpu_info_; - return (cpu_info_ == kCpuInit ? InitCpuFlags() : cpu_info_) & test_flag; + return (!cpu_info_ ? InitCpuFlags() : cpu_info_) & test_flag; } // For testing, allow CPU flags to be disabled. // ie MaskCpuFlags(~kCpuHasSSSE3) to disable SSSE3. // MaskCpuFlags(-1) to enable all cpu specific optimizations. -// MaskCpuFlags(0) to disable all cpu specific optimizations. +// MaskCpuFlags(1) to disable all cpu specific optimizations. LIBYUV_API void MaskCpuFlags(int enable_flags); diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/planar_functions.h b/libs/libvpx/third_party/libyuv/include/libyuv/planar_functions.h index ae994db899..9662516c57 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/planar_functions.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/planar_functions.h @@ -39,6 +39,20 @@ void SetPlane(uint8* dst_y, int dst_stride_y, int width, int height, uint32 value); +// Split interleaved UV plane into separate U and V planes. +LIBYUV_API +void SplitUVPlane(const uint8* src_uv, int src_stride_uv, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height); + +// Merge separate U and V planes into one interleaved UV plane. +LIBYUV_API +void MergeUVPlane(const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_uv, int dst_stride_uv, + int width, int height); + // Copy I400. Supports inverting. LIBYUV_API int I400ToI400(const uint8* src_y, int src_stride_y, @@ -145,13 +159,6 @@ int NV12ToRGB565(const uint8* src_y, int src_stride_y, uint8* dst_rgb565, int dst_stride_rgb565, int width, int height); -// Convert NV21 to RGB565. -LIBYUV_API -int NV21ToRGB565(const uint8* src_y, int src_stride_y, - const uint8* src_uv, int src_stride_uv, - uint8* dst_rgb565, int dst_stride_rgb565, - int width, int height); - // I422ToARGB is in convert_argb.h // Convert I422 to BGRA. LIBYUV_API @@ -177,6 +184,14 @@ int I422ToRGBA(const uint8* src_y, int src_stride_y, uint8* dst_rgba, int dst_stride_rgba, int width, int height); +// Alias +#define RGB24ToRAW RAWToRGB24 + +LIBYUV_API +int RAWToRGB24(const uint8* src_raw, int src_stride_raw, + uint8* dst_rgb24, int dst_stride_rgb24, + int width, int height); + // Draw a rectangle into I420. LIBYUV_API int I420Rect(uint8* dst_y, int dst_stride_y, @@ -281,13 +296,19 @@ int ARGBCopy(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height); -// Copy ARGB to ARGB. +// Copy Alpha channel of ARGB to alpha of ARGB. LIBYUV_API int ARGBCopyAlpha(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height); -// Copy ARGB to ARGB. +// Extract the alpha channel from ARGB. +LIBYUV_API +int ARGBExtractAlpha(const uint8* src_argb, int src_stride_argb, + uint8* dst_a, int dst_stride_a, + int width, int height); + +// Copy Y channel to Alpha of ARGB. LIBYUV_API int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, @@ -301,6 +322,7 @@ LIBYUV_API ARGBBlendRow GetARGBBlend(); // Alpha Blend ARGB images and store to destination. +// Source is pre-multiplied by alpha using ARGBAttenuate. // Alpha of destination is set to 255. LIBYUV_API int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, @@ -308,6 +330,31 @@ int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, uint8* dst_argb, int dst_stride_argb, int width, int height); +// Alpha Blend plane and store to destination. +// Source is not pre-multiplied by alpha. +LIBYUV_API +int BlendPlane(const uint8* src_y0, int src_stride_y0, + const uint8* src_y1, int src_stride_y1, + const uint8* alpha, int alpha_stride, + uint8* dst_y, int dst_stride_y, + int width, int height); + +// Alpha Blend YUV images and store to destination. +// Source is not pre-multiplied by alpha. +// Alpha is full width x height and subsampled to half size to apply to UV. +LIBYUV_API +int I420Blend(const uint8* src_y0, int src_stride_y0, + const uint8* src_u0, int src_stride_u0, + const uint8* src_v0, int src_stride_v0, + const uint8* src_y1, int src_stride_y1, + const uint8* src_u1, int src_stride_u1, + const uint8* src_v1, int src_stride_v1, + const uint8* alpha, int alpha_stride, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height); + // Multiply ARGB image by ARGB image. Shifted down by 8. Saturates to 255. LIBYUV_API int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0, @@ -357,12 +404,6 @@ int ARGBUnattenuate(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height); -// Convert MJPG to ARGB. -LIBYUV_API -int MJPGToARGB(const uint8* sample, size_t sample_size, - uint8* argb, int argb_stride, - int w, int h, int dw, int dh); - // Internal function - do not call directly. // Computes table of cumulative sum for image where the value is the sum // of all values above and to the left of the entry. Used by ARGBBlur. @@ -389,22 +430,49 @@ int ARGBShade(const uint8* src_argb, int src_stride_argb, uint8* dst_argb, int dst_stride_argb, int width, int height, uint32 value); -// Interpolate between two ARGB images using specified amount of interpolation +// Interpolate between two images using specified amount of interpolation // (0 to 255) and store to destination. -// 'interpolation' is specified as 8 bit fraction where 0 means 100% src_argb0 -// and 255 means 1% src_argb0 and 99% src_argb1. -// Internally uses ARGBScale bilinear filtering. -// Caveat: This function will write up to 16 bytes beyond the end of dst_argb. +// 'interpolation' is specified as 8 bit fraction where 0 means 100% src0 +// and 255 means 1% src0 and 99% src1. +LIBYUV_API +int InterpolatePlane(const uint8* src0, int src_stride0, + const uint8* src1, int src_stride1, + uint8* dst, int dst_stride, + int width, int height, int interpolation); + +// Interpolate between two ARGB images using specified amount of interpolation +// Internally calls InterpolatePlane with width * 4 (bpp). LIBYUV_API int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0, const uint8* src_argb1, int src_stride_argb1, uint8* dst_argb, int dst_stride_argb, int width, int height, int interpolation); +// Interpolate between two YUV images using specified amount of interpolation +// Internally calls InterpolatePlane on each plane where the U and V planes +// are half width and half height. +LIBYUV_API +int I420Interpolate(const uint8* src0_y, int src0_stride_y, + const uint8* src0_u, int src0_stride_u, + const uint8* src0_v, int src0_stride_v, + const uint8* src1_y, int src1_stride_y, + const uint8* src1_u, int src1_stride_u, + const uint8* src1_v, int src1_stride_v, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height, int interpolation); + #if defined(__pnacl__) || defined(__CLR_VER) || \ (defined(__i386__) && !defined(__SSE2__)) #define LIBYUV_DISABLE_X86 #endif +// MemorySanitizer does not support assembly code yet. http://crbug.com/344505 +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#define LIBYUV_DISABLE_X86 +#endif +#endif // The following are available on all x86 platforms: #if !defined(LIBYUV_DISABLE_X86) && \ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/rotate_row.h b/libs/libvpx/third_party/libyuv/include/libyuv/rotate_row.h index c41cf32735..ebc487f9ab 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/rotate_row.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/rotate_row.h @@ -22,53 +22,24 @@ extern "C" { (defined(__i386__) && !defined(__SSE2__)) #define LIBYUV_DISABLE_X86 #endif - -// Visual C 2012 required for AVX2. -#if defined(_M_IX86) && !defined(__clang__) && \ - defined(_MSC_VER) && _MSC_VER >= 1700 -#define VISUALC_HAS_AVX2 1 -#endif // VisualStudio >= 2012 - -// TODO(fbarchard): switch to standard form of inline; fails on clangcl. -#if !defined(LIBYUV_DISABLE_X86) && \ - (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) -#if defined(__APPLE__) && defined(__i386__) -#define DECLARE_FUNCTION(name) \ - ".text \n" \ - ".private_extern _" #name " \n" \ - ".align 4,0x90 \n" \ -"_" #name ": \n" -#elif defined(__MINGW32__) || defined(__CYGWIN__) && defined(__i386__) -#define DECLARE_FUNCTION(name) \ - ".text \n" \ - ".align 4,0x90 \n" \ -"_" #name ": \n" -#else -#define DECLARE_FUNCTION(name) \ - ".text \n" \ - ".align 4,0x90 \n" \ -#name ": \n" +// MemorySanitizer does not support assembly code yet. http://crbug.com/344505 +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#define LIBYUV_DISABLE_X86 #endif #endif - -// The following are available for Visual C: -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \ - defined(_MSC_VER) && !defined(__clang__) +// The following are available for Visual C and clangcl 32 bit: +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) #define HAS_TRANSPOSEWX8_SSSE3 #define HAS_TRANSPOSEUVWX8_SSE2 #endif -// The following are available for GCC but not NaCL: +// The following are available for GCC 32 or 64 bit but not NaCL for 64 bit: #if !defined(LIBYUV_DISABLE_X86) && \ (defined(__i386__) || (defined(__x86_64__) && !defined(__native_client__))) #define HAS_TRANSPOSEWX8_SSSE3 #endif -// The following are available for 32 bit GCC: -#if !defined(LIBYUV_DISABLE_X86) && defined(__i386__) && !defined(__clang__) -#define HAS_TRANSPOSEUVWX8_SSE2 -#endif - // The following are available for 64 bit GCC but not NaCL: #if !defined(LIBYUV_DISABLE_X86) && !defined(__native_client__) && \ defined(__x86_64__) @@ -85,8 +56,8 @@ extern "C" { #if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \ defined(__mips__) && \ defined(__mips_dsp) && (__mips_dsp_rev >= 2) -#define HAS_TRANSPOSEWX8_MIPS_DSPR2 -#define HAS_TRANSPOSEUVWx8_MIPS_DSPR2 +#define HAS_TRANSPOSEWX8_DSPR2 +#define HAS_TRANSPOSEUVWX8_DSPR2 #endif // defined(__mips__) void TransposeWxH_C(const uint8* src, int src_stride, @@ -100,7 +71,9 @@ void TransposeWx8_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width); void TransposeWx8_Fast_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width); -void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride, +void TransposeWx8_DSPR2(const uint8* src, int src_stride, + uint8* dst, int dst_stride, int width); +void TransposeWx8_Fast_DSPR2(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width); void TransposeWx8_Any_NEON(const uint8* src, int src_stride, @@ -109,8 +82,8 @@ void TransposeWx8_Any_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width); void TransposeWx8_Fast_Any_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width); -void TransposeWx8_Any_MIPS_DSPR2(const uint8* src, int src_stride, - uint8* dst, int dst_stride, int width); +void TransposeWx8_Any_DSPR2(const uint8* src, int src_stride, + uint8* dst, int dst_stride, int width); void TransposeUVWxH_C(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, @@ -126,9 +99,19 @@ void TransposeUVWx8_SSE2(const uint8* src, int src_stride, void TransposeUVWx8_NEON(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, uint8* dst_b, int dst_stride_b, int width); -void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride, - uint8* dst_a, int dst_stride_a, - uint8* dst_b, int dst_stride_b, int width); +void TransposeUVWx8_DSPR2(const uint8* src, int src_stride, + uint8* dst_a, int dst_stride_a, + uint8* dst_b, int dst_stride_b, int width); + +void TransposeUVWx8_Any_SSE2(const uint8* src, int src_stride, + uint8* dst_a, int dst_stride_a, + uint8* dst_b, int dst_stride_b, int width); +void TransposeUVWx8_Any_NEON(const uint8* src, int src_stride, + uint8* dst_a, int dst_stride_a, + uint8* dst_b, int dst_stride_b, int width); +void TransposeUVWx8_Any_DSPR2(const uint8* src, int src_stride, + uint8* dst_a, int dst_stride_a, + uint8* dst_b, int dst_stride_b, int width); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/row.h b/libs/libvpx/third_party/libyuv/include/libyuv/row.h index ebae3e7195..013a7e53e3 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/row.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/row.h @@ -41,6 +41,12 @@ extern "C" { (defined(__i386__) && !defined(__SSE2__)) #define LIBYUV_DISABLE_X86 #endif +// MemorySanitizer does not support assembly code yet. http://crbug.com/344505 +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#define LIBYUV_DISABLE_X86 +#endif +#endif // True if compiling for SSSE3 as a requirement. #if defined(__SSSE3__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 3)) #define LIBYUV_SSSE3_ONLY @@ -56,6 +62,26 @@ extern "C" { #endif // clang >= 3.5 #endif // __clang__ +// GCC >= 4.7.0 required for AVX2. +#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +#if (__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7)) +#define GCC_HAS_AVX2 1 +#endif // GNUC >= 4.7 +#endif // __GNUC__ + +// clang >= 3.4.0 required for AVX2. +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) +#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4)) +#define CLANG_HAS_AVX2 1 +#endif // clang >= 3.4 +#endif // __clang__ + +// Visual C 2012 required for AVX2. +#if defined(_M_IX86) && !defined(__clang__) && \ + defined(_MSC_VER) && _MSC_VER >= 1700 +#define VISUALC_HAS_AVX2 1 +#endif // VisualStudio >= 2012 + // The following are available on all x86 platforms: #if !defined(LIBYUV_DISABLE_X86) && \ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) @@ -71,25 +97,23 @@ extern "C" { #define HAS_ARGBTOARGB4444ROW_SSE2 #define HAS_ARGBTORAWROW_SSSE3 #define HAS_ARGBTORGB24ROW_SSSE3 +#define HAS_ARGBTORGB565DITHERROW_SSE2 #define HAS_ARGBTORGB565ROW_SSE2 -#define HAS_ARGBTOUV422ROW_SSSE3 #define HAS_ARGBTOUV444ROW_SSSE3 #define HAS_ARGBTOUVJROW_SSSE3 #define HAS_ARGBTOUVROW_SSSE3 #define HAS_ARGBTOYJROW_SSSE3 #define HAS_ARGBTOYROW_SSSE3 +#define HAS_ARGBEXTRACTALPHAROW_SSE2 #define HAS_BGRATOUVROW_SSSE3 #define HAS_BGRATOYROW_SSSE3 #define HAS_COPYROW_ERMS #define HAS_COPYROW_SSE2 +#define HAS_H422TOARGBROW_SSSE3 #define HAS_I400TOARGBROW_SSE2 -#define HAS_I411TOARGBROW_SSSE3 -#define HAS_I422TOABGRROW_SSSE3 #define HAS_I422TOARGB1555ROW_SSSE3 #define HAS_I422TOARGB4444ROW_SSSE3 #define HAS_I422TOARGBROW_SSSE3 -#define HAS_I422TOBGRAROW_SSSE3 -#define HAS_I422TORAWROW_SSSE3 #define HAS_I422TORGB24ROW_SSSE3 #define HAS_I422TORGB565ROW_SSSE3 #define HAS_I422TORGBAROW_SSSE3 @@ -99,15 +123,13 @@ extern "C" { #define HAS_J400TOARGBROW_SSE2 #define HAS_J422TOARGBROW_SSSE3 #define HAS_MERGEUVROW_SSE2 -#define HAS_MIRRORROW_SSE2 #define HAS_MIRRORROW_SSSE3 -#define HAS_MIRRORROW_UV_SSSE3 #define HAS_MIRRORUVROW_SSSE3 #define HAS_NV12TOARGBROW_SSSE3 #define HAS_NV12TORGB565ROW_SSSE3 #define HAS_NV21TOARGBROW_SSSE3 -#define HAS_NV21TORGB565ROW_SSSE3 #define HAS_RAWTOARGBROW_SSSE3 +#define HAS_RAWTORGB24ROW_SSSE3 #define HAS_RAWTOYROW_SSSE3 #define HAS_RGB24TOARGBROW_SSSE3 #define HAS_RGB24TOYROW_SSSE3 @@ -145,9 +167,9 @@ extern "C" { #define HAS_ARGBSHADEROW_SSE2 #define HAS_ARGBSUBTRACTROW_SSE2 #define HAS_ARGBUNATTENUATEROW_SSE2 +#define HAS_BLENDPLANEROW_SSSE3 #define HAS_COMPUTECUMULATIVESUMROW_SSE2 #define HAS_CUMULATIVESUMTOAVERAGEROW_SSE2 -#define HAS_INTERPOLATEROW_SSE2 #define HAS_INTERPOLATEROW_SSSE3 #define HAS_RGBCOLORTABLEROW_X86 #define HAS_SOBELROW_SSE2 @@ -155,54 +177,18 @@ extern "C" { #define HAS_SOBELXROW_SSE2 #define HAS_SOBELXYROW_SSE2 #define HAS_SOBELYROW_SSE2 + +// The following functions fail on gcc/clang 32 bit with fpic and framepointer. +// caveat: clangcl uses row_win.cc which works. +#if defined(NDEBUG) || !(defined(_DEBUG) && defined(__i386__)) || \ + !defined(__i386__) || defined(_MSC_VER) +// TODO(fbarchard): fix build error on x86 debug +// https://code.google.com/p/libyuv/issues/detail?id=524 +#define HAS_I411TOARGBROW_SSSE3 +// TODO(fbarchard): fix build error on android_full_debug=1 +// https://code.google.com/p/libyuv/issues/detail?id=517 +#define HAS_I422ALPHATOARGBROW_SSSE3 #endif - -// The following are available on x64 Visual C and clangcl. -#if !defined(LIBYUV_DISABLE_X86) && defined (_M_X64) && \ - (!defined(__clang__) || defined(__SSSE3__)) -#define HAS_I422TOARGBROW_SSSE3 -#endif - -// GCC >= 4.7.0 required for AVX2. -#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) -#if (__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7)) -#define GCC_HAS_AVX2 1 -#endif // GNUC >= 4.7 -#endif // __GNUC__ - -// clang >= 3.4.0 required for AVX2. -#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) -#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4)) -#define CLANG_HAS_AVX2 1 -#endif // clang >= 3.4 -#endif // __clang__ - -// Visual C 2012 required for AVX2. -#if defined(_M_IX86) && !defined(__clang__) && \ - defined(_MSC_VER) && _MSC_VER >= 1700 -#define VISUALC_HAS_AVX2 1 -#endif // VisualStudio >= 2012 - -// The following are available require VS2012. Port to GCC. -#if !defined(LIBYUV_DISABLE_X86) && defined(VISUALC_HAS_AVX2) -#define HAS_ARGB1555TOARGBROW_AVX2 -#define HAS_ARGB4444TOARGBROW_AVX2 -#define HAS_ARGBTOARGB1555ROW_AVX2 -#define HAS_ARGBTOARGB4444ROW_AVX2 -#define HAS_ARGBTORGB565DITHERROW_AVX2 -#define HAS_ARGBTORGB565DITHERROW_SSE2 -#define HAS_ARGBTORGB565ROW_AVX2 -#define HAS_I411TOARGBROW_AVX2 -#define HAS_I422TOARGB1555ROW_AVX2 -#define HAS_I422TOARGB4444ROW_AVX2 -#define HAS_I422TORGB565ROW_AVX2 -#define HAS_I444TOARGBROW_AVX2 -#define HAS_J400TOARGBROW_AVX2 -#define HAS_NV12TOARGBROW_AVX2 -#define HAS_NV12TORGB565ROW_AVX2 -#define HAS_NV21TOARGBROW_AVX2 -#define HAS_NV21TORGB565ROW_AVX2 -#define HAS_RGB565TOARGBROW_AVX2 #endif // The following are available on all x86 platforms, but @@ -215,21 +201,34 @@ extern "C" { #define HAS_ARGBMIRRORROW_AVX2 #define HAS_ARGBPOLYNOMIALROW_AVX2 #define HAS_ARGBSHUFFLEROW_AVX2 +#define HAS_ARGBTORGB565DITHERROW_AVX2 +#define HAS_ARGBTOUVJROW_AVX2 #define HAS_ARGBTOUVROW_AVX2 #define HAS_ARGBTOYJROW_AVX2 #define HAS_ARGBTOYROW_AVX2 #define HAS_COPYROW_AVX +#define HAS_H422TOARGBROW_AVX2 #define HAS_I400TOARGBROW_AVX2 -#define HAS_I422TOABGRROW_AVX2 +#if !(defined(_DEBUG) && defined(__i386__)) +// TODO(fbarchard): fix build error on android_full_debug=1 +// https://code.google.com/p/libyuv/issues/detail?id=517 +#define HAS_I422ALPHATOARGBROW_AVX2 +#endif +#define HAS_I411TOARGBROW_AVX2 +#define HAS_I422TOARGB1555ROW_AVX2 +#define HAS_I422TOARGB4444ROW_AVX2 #define HAS_I422TOARGBROW_AVX2 -#define HAS_I422TOBGRAROW_AVX2 -#define HAS_I422TORAWROW_AVX2 #define HAS_I422TORGB24ROW_AVX2 +#define HAS_I422TORGB565ROW_AVX2 #define HAS_I422TORGBAROW_AVX2 +#define HAS_I444TOARGBROW_AVX2 #define HAS_INTERPOLATEROW_AVX2 #define HAS_J422TOARGBROW_AVX2 #define HAS_MERGEUVROW_AVX2 #define HAS_MIRRORROW_AVX2 +#define HAS_NV12TOARGBROW_AVX2 +#define HAS_NV12TORGB565ROW_AVX2 +#define HAS_NV21TOARGBROW_AVX2 #define HAS_SPLITUVROW_AVX2 #define HAS_UYVYTOARGBROW_AVX2 #define HAS_UYVYTOUV422ROW_AVX2 @@ -246,15 +245,27 @@ extern "C" { #define HAS_ARGBMULTIPLYROW_AVX2 #define HAS_ARGBSUBTRACTROW_AVX2 #define HAS_ARGBUNATTENUATEROW_AVX2 +#define HAS_BLENDPLANEROW_AVX2 #endif -// The following are disabled when SSSE3 is available: -#if !defined(LIBYUV_DISABLE_X86) && \ - (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) && \ - !defined(LIBYUV_SSSE3_ONLY) -#define HAS_ARGBATTENUATEROW_SSE2 -#define HAS_ARGBBLENDROW_SSE2 -#define HAS_MIRRORROW_SSE2 +// The following are available for AVX2 Visual C and clangcl 32 bit: +// TODO(fbarchard): Port to gcc. +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \ + (defined(VISUALC_HAS_AVX2) || defined(CLANG_HAS_AVX2)) +#define HAS_ARGB1555TOARGBROW_AVX2 +#define HAS_ARGB4444TOARGBROW_AVX2 +#define HAS_ARGBTOARGB1555ROW_AVX2 +#define HAS_ARGBTOARGB4444ROW_AVX2 +#define HAS_ARGBTORGB565ROW_AVX2 +#define HAS_J400TOARGBROW_AVX2 +#define HAS_RGB565TOARGBROW_AVX2 +#endif + +// The following are also available on x64 Visual C. +#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && defined(_M_X64) && \ + (!defined(__clang__) || defined(__SSSE3__)) +#define HAS_I422ALPHATOARGBROW_SSSE3 +#define HAS_I422TOARGBROW_SSSE3 #endif // The following are available on Neon platforms: @@ -268,43 +279,44 @@ extern "C" { #define HAS_ARGB4444TOARGBROW_NEON #define HAS_ARGB4444TOUVROW_NEON #define HAS_ARGB4444TOYROW_NEON +#define HAS_ARGBSETROW_NEON #define HAS_ARGBTOARGB1555ROW_NEON #define HAS_ARGBTOARGB4444ROW_NEON #define HAS_ARGBTORAWROW_NEON #define HAS_ARGBTORGB24ROW_NEON +#define HAS_ARGBTORGB565DITHERROW_NEON #define HAS_ARGBTORGB565ROW_NEON #define HAS_ARGBTOUV411ROW_NEON -#define HAS_ARGBTOUV422ROW_NEON #define HAS_ARGBTOUV444ROW_NEON #define HAS_ARGBTOUVJROW_NEON #define HAS_ARGBTOUVROW_NEON #define HAS_ARGBTOYJROW_NEON #define HAS_ARGBTOYROW_NEON +#define HAS_ARGBEXTRACTALPHAROW_NEON #define HAS_BGRATOUVROW_NEON #define HAS_BGRATOYROW_NEON #define HAS_COPYROW_NEON -#define HAS_J400TOARGBROW_NEON +#define HAS_I400TOARGBROW_NEON #define HAS_I411TOARGBROW_NEON -#define HAS_I422TOABGRROW_NEON +#define HAS_I422ALPHATOARGBROW_NEON #define HAS_I422TOARGB1555ROW_NEON #define HAS_I422TOARGB4444ROW_NEON #define HAS_I422TOARGBROW_NEON -#define HAS_I422TOBGRAROW_NEON -#define HAS_I422TORAWROW_NEON #define HAS_I422TORGB24ROW_NEON #define HAS_I422TORGB565ROW_NEON #define HAS_I422TORGBAROW_NEON #define HAS_I422TOUYVYROW_NEON #define HAS_I422TOYUY2ROW_NEON #define HAS_I444TOARGBROW_NEON +#define HAS_J400TOARGBROW_NEON #define HAS_MERGEUVROW_NEON #define HAS_MIRRORROW_NEON #define HAS_MIRRORUVROW_NEON #define HAS_NV12TOARGBROW_NEON #define HAS_NV12TORGB565ROW_NEON #define HAS_NV21TOARGBROW_NEON -#define HAS_NV21TORGB565ROW_NEON #define HAS_RAWTOARGBROW_NEON +#define HAS_RAWTORGB24ROW_NEON #define HAS_RAWTOUVROW_NEON #define HAS_RAWTOYROW_NEON #define HAS_RGB24TOARGBROW_NEON @@ -316,29 +328,28 @@ extern "C" { #define HAS_RGBATOUVROW_NEON #define HAS_RGBATOYROW_NEON #define HAS_SETROW_NEON -#define HAS_ARGBSETROW_NEON #define HAS_SPLITUVROW_NEON #define HAS_UYVYTOARGBROW_NEON #define HAS_UYVYTOUV422ROW_NEON #define HAS_UYVYTOUVROW_NEON #define HAS_UYVYTOYROW_NEON -#define HAS_I400TOARGBROW_NEON #define HAS_YUY2TOARGBROW_NEON #define HAS_YUY2TOUV422ROW_NEON #define HAS_YUY2TOUVROW_NEON #define HAS_YUY2TOYROW_NEON -#define HAS_ARGBTORGB565DITHERROW_NEON // Effects: #define HAS_ARGBADDROW_NEON #define HAS_ARGBATTENUATEROW_NEON #define HAS_ARGBBLENDROW_NEON +#define HAS_ARGBCOLORMATRIXROW_NEON #define HAS_ARGBGRAYROW_NEON #define HAS_ARGBMIRRORROW_NEON #define HAS_ARGBMULTIPLYROW_NEON #define HAS_ARGBQUANTIZEROW_NEON #define HAS_ARGBSEPIAROW_NEON #define HAS_ARGBSHADEROW_NEON +#define HAS_ARGBSHUFFLEROW_NEON #define HAS_ARGBSUBTRACTROW_NEON #define HAS_INTERPOLATEROW_NEON #define HAS_SOBELROW_NEON @@ -346,8 +357,6 @@ extern "C" { #define HAS_SOBELXROW_NEON #define HAS_SOBELXYROW_NEON #define HAS_SOBELYROW_NEON -#define HAS_ARGBCOLORMATRIXROW_NEON -#define HAS_ARGBSHUFFLEROW_NEON #endif // The following are available on Mips platforms: @@ -355,19 +364,20 @@ extern "C" { (_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6) #define HAS_COPYROW_MIPS #if defined(__mips_dsp) && (__mips_dsp_rev >= 2) -#define HAS_I422TOABGRROW_MIPS_DSPR2 -#define HAS_I422TOARGBROW_MIPS_DSPR2 -#define HAS_I422TOBGRAROW_MIPS_DSPR2 -#define HAS_INTERPOLATEROW_MIPS_DSPR2 -#define HAS_MIRRORROW_MIPS_DSPR2 -#define HAS_MIRRORUVROW_MIPS_DSPR2 -#define HAS_SPLITUVROW_MIPS_DSPR2 +#define HAS_I422TOARGBROW_DSPR2 +#define HAS_INTERPOLATEROW_DSPR2 +#define HAS_MIRRORROW_DSPR2 +#define HAS_MIRRORUVROW_DSPR2 +#define HAS_SPLITUVROW_DSPR2 #endif #endif -#if defined(_MSC_VER) && !defined(__CLR_VER) +#if defined(_MSC_VER) && !defined(__CLR_VER) && !defined(__clang__) +#if defined(VISUALC_HAS_AVX2) +#define SIMD_ALIGNED(var) __declspec(align(32)) var +#else #define SIMD_ALIGNED(var) __declspec(align(16)) var -#define SIMD_ALIGNED32(var) __declspec(align(64)) var +#endif typedef __declspec(align(16)) int16 vec16[8]; typedef __declspec(align(16)) int32 vec32[4]; typedef __declspec(align(16)) int8 vec8[16]; @@ -380,10 +390,13 @@ typedef __declspec(align(32)) int8 lvec8[32]; typedef __declspec(align(32)) uint16 ulvec16[16]; typedef __declspec(align(32)) uint32 ulvec32[8]; typedef __declspec(align(32)) uint8 ulvec8[32]; -#elif defined(__GNUC__) +#elif !defined(__pnacl__) && (defined(__GNUC__) || defined(__clang__)) // Caveat GCC 4.2 to 4.7 have a known issue using vectors with const. +#if defined(CLANG_HAS_AVX2) || defined(GCC_HAS_AVX2) +#define SIMD_ALIGNED(var) var __attribute__((aligned(32))) +#else #define SIMD_ALIGNED(var) var __attribute__((aligned(16))) -#define SIMD_ALIGNED32(var) var __attribute__((aligned(64))) +#endif typedef int16 __attribute__((vector_size(16))) vec16; typedef int32 __attribute__((vector_size(16))) vec32; typedef int8 __attribute__((vector_size(16))) vec8; @@ -398,7 +411,6 @@ typedef uint32 __attribute__((vector_size(32))) ulvec32; typedef uint8 __attribute__((vector_size(32))) ulvec8; #else #define SIMD_ALIGNED(var) var -#define SIMD_ALIGNED32(var) var typedef int16 vec16[8]; typedef int32 vec32[4]; typedef int8 vec8[16]; @@ -413,6 +425,56 @@ typedef uint32 ulvec32[8]; typedef uint8 ulvec8[32]; #endif +#if defined(__aarch64__) +// This struct is for Arm64 color conversion. +struct YuvConstants { + uvec16 kUVToRB; + uvec16 kUVToRB2; + uvec16 kUVToG; + uvec16 kUVToG2; + vec16 kUVBiasBGR; + vec32 kYToRgb; +}; +#elif defined(__arm__) +// This struct is for ArmV7 color conversion. +struct YuvConstants { + uvec8 kUVToRB; + uvec8 kUVToG; + vec16 kUVBiasBGR; + vec32 kYToRgb; +}; +#else +// This struct is for Intel color conversion. +struct YuvConstants { + int8 kUVToB[32]; + int8 kUVToG[32]; + int8 kUVToR[32]; + int16 kUVBiasB[16]; + int16 kUVBiasG[16]; + int16 kUVBiasR[16]; + int16 kYToRgb[16]; +}; + +// Offsets into YuvConstants structure +#define KUVTOB 0 +#define KUVTOG 32 +#define KUVTOR 64 +#define KUVBIASB 96 +#define KUVBIASG 128 +#define KUVBIASR 160 +#define KYTORGB 192 +#endif + +// Conversion matrix for YUV to RGB +extern const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants); // BT.601 +extern const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants); // JPeg +extern const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants); // BT.709 + +// Conversion matrix for YVU to BGR +extern const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants); // BT.601 +extern const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants); // JPeg +extern const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants); // BT.709 + #if defined(__APPLE__) || defined(__x86_64__) || defined(__llvm__) #define OMITFP #else @@ -502,159 +564,166 @@ void I444ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422AlphaToARGBRow_NEON(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422ToARGBRow_NEON(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I411ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, - int width); -void I422ToBGRARow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width); -void I422ToABGRRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, + const struct YuvConstants* yuvconstants, int width); void I422ToRGBARow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width); -void I422ToRAWRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width); void I422ToRGB565Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB4444Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToARGBRow_NEON(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width); -void NV21ToRGB565Row_NEON(const uint8* src_y, - const uint8* src_vu, - uint8* dst_rgb565, - int width); +void NV21ToARGBRow_NEON(const uint8* src_y, + const uint8* src_vu, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void YUY2ToARGBRow_NEON(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_NEON(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix); -void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix); -void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix); -void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix); -void RGB24ToYRow_SSSE3(const uint8* src_rgb24, uint8* dst_y, int pix); -void RAWToYRow_SSSE3(const uint8* src_raw, uint8* dst_y, int pix); -void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix); +void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width); +void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width); +void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int width); +void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int width); +void RGB24ToYRow_SSSE3(const uint8* src_rgb24, uint8* dst_y, int width); +void RAWToYRow_SSSE3(const uint8* src_raw, uint8* dst_y, int width); +void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int width); void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); -void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); + int width); void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); + int width); void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, - uint8* dst_u, uint8* dst_v, int pix); -void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix); -void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix); -void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix); -void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix); -void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix); -void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix); -void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix); -void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix); -void ARGBToYRow_C(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_C(const uint8* src_argb, uint8* dst_y, int pix); -void BGRAToYRow_C(const uint8* src_bgra, uint8* dst_y, int pix); -void ABGRToYRow_C(const uint8* src_abgr, uint8* dst_y, int pix); -void RGBAToYRow_C(const uint8* src_rgba, uint8* dst_y, int pix); -void RGB24ToYRow_C(const uint8* src_rgb24, uint8* dst_y, int pix); -void RAWToYRow_C(const uint8* src_raw, uint8* dst_y, int pix); -void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int pix); -void ARGB1555ToYRow_C(const uint8* src_argb1555, uint8* dst_y, int pix); -void ARGB4444ToYRow_C(const uint8* src_argb4444, uint8* dst_y, int pix); -void ARGBToYRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int pix); -void BGRAToYRow_Any_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix); -void ABGRToYRow_Any_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix); -void RGBAToYRow_Any_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix); -void RGB24ToYRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_y, int pix); -void RAWToYRow_Any_SSSE3(const uint8* src_raw, uint8* dst_y, int pix); -void ARGBToYRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int pix); -void ARGBToYJRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int pix); -void BGRAToYRow_Any_NEON(const uint8* src_bgra, uint8* dst_y, int pix); -void ABGRToYRow_Any_NEON(const uint8* src_abgr, uint8* dst_y, int pix); -void RGBAToYRow_Any_NEON(const uint8* src_rgba, uint8* dst_y, int pix); -void RGB24ToYRow_Any_NEON(const uint8* src_rgb24, uint8* dst_y, int pix); -void RAWToYRow_Any_NEON(const uint8* src_raw, uint8* dst_y, int pix); -void RGB565ToYRow_Any_NEON(const uint8* src_rgb565, uint8* dst_y, int pix); -void ARGB1555ToYRow_Any_NEON(const uint8* src_argb1555, uint8* dst_y, int pix); -void ARGB4444ToYRow_Any_NEON(const uint8* src_argb4444, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int width); +void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int width); +void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int width); +void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int width); +void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int width); +void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int width); +void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int width); +void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int width); +void ARGBToYRow_C(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_C(const uint8* src_argb, uint8* dst_y, int width); +void BGRAToYRow_C(const uint8* src_bgra, uint8* dst_y, int width); +void ABGRToYRow_C(const uint8* src_abgr, uint8* dst_y, int width); +void RGBAToYRow_C(const uint8* src_rgba, uint8* dst_y, int width); +void RGB24ToYRow_C(const uint8* src_rgb24, uint8* dst_y, int width); +void RAWToYRow_C(const uint8* src_raw, uint8* dst_y, int width); +void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int width); +void ARGB1555ToYRow_C(const uint8* src_argb1555, uint8* dst_y, int width); +void ARGB4444ToYRow_C(const uint8* src_argb4444, uint8* dst_y, int width); +void ARGBToYRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int width); +void BGRAToYRow_Any_SSSE3(const uint8* src_bgra, uint8* dst_y, int width); +void ABGRToYRow_Any_SSSE3(const uint8* src_abgr, uint8* dst_y, int width); +void RGBAToYRow_Any_SSSE3(const uint8* src_rgba, uint8* dst_y, int width); +void RGB24ToYRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_y, int width); +void RAWToYRow_Any_SSSE3(const uint8* src_raw, uint8* dst_y, int width); +void ARGBToYRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int width); +void ARGBToYJRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int width); +void BGRAToYRow_Any_NEON(const uint8* src_bgra, uint8* dst_y, int width); +void ABGRToYRow_Any_NEON(const uint8* src_abgr, uint8* dst_y, int width); +void RGBAToYRow_Any_NEON(const uint8* src_rgba, uint8* dst_y, int width); +void RGB24ToYRow_Any_NEON(const uint8* src_rgb24, uint8* dst_y, int width); +void RAWToYRow_Any_NEON(const uint8* src_raw, uint8* dst_y, int width); +void RGB565ToYRow_Any_NEON(const uint8* src_rgb565, uint8* dst_y, int width); +void ARGB1555ToYRow_Any_NEON(const uint8* src_argb1555, uint8* dst_y, + int width); +void ARGB4444ToYRow_Any_NEON(const uint8* src_argb4444, uint8* dst_y, + int width); void ARGBToUVRow_AVX2(const uint8* src_argb, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width); -void ARGBToUVRow_Any_AVX2(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int width); +void ARGBToUVJRow_AVX2(const uint8* src_argb, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width); void ARGBToUVRow_SSSE3(const uint8* src_argb, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width); void ARGBToUVJRow_SSSE3(const uint8* src_argb, int src_stride_argb, @@ -665,6 +734,10 @@ void ABGRToUVRow_SSSE3(const uint8* src_abgr, int src_stride_abgr, uint8* dst_u, uint8* dst_v, int width); void RGBAToUVRow_SSSE3(const uint8* src_rgba, int src_stride_rgba, uint8* dst_u, uint8* dst_v, int width); +void ARGBToUVRow_Any_AVX2(const uint8* src_argb, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width); +void ARGBToUVJRow_Any_AVX2(const uint8* src_argb, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width); void ARGBToUVRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width); void ARGBToUVJRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb, @@ -676,33 +749,31 @@ void ABGRToUVRow_Any_SSSE3(const uint8* src_abgr, int src_stride_abgr, void RGBAToUVRow_Any_SSSE3(const uint8* src_rgba, int src_stride_rgba, uint8* dst_u, uint8* dst_v, int width); void ARGBToUV444Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); -void ARGBToUV422Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); + int width); void ARGBToUV411Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix); + int width); void ARGBToUVRow_Any_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGBToUVJRow_Any_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void BGRAToUVRow_Any_NEON(const uint8* src_bgra, int src_stride_bgra, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ABGRToUVRow_Any_NEON(const uint8* src_abgr, int src_stride_abgr, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGBAToUVRow_Any_NEON(const uint8* src_rgba, int src_stride_rgba, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGB24ToUVRow_Any_NEON(const uint8* src_rgb24, int src_stride_rgb24, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RAWToUVRow_Any_NEON(const uint8* src_raw, int src_stride_raw, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void RGB565ToUVRow_Any_NEON(const uint8* src_rgb565, int src_stride_rgb565, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGB1555ToUVRow_Any_NEON(const uint8* src_argb1555, int src_stride_argb1555, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGB4444ToUVRow_Any_NEON(const uint8* src_argb4444, int src_stride_argb4444, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void ARGBToUVRow_C(const uint8* src_argb, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width); void ARGBToUVJRow_C(const uint8* src_argb, int src_stride_argb, @@ -729,25 +800,15 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb, void ARGBToUV444Row_Any_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int width); -void ARGBToUV422Row_SSSE3(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width); -void ARGBToUV422Row_Any_SSSE3(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width); - void ARGBToUV444Row_C(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int width); -void ARGBToUV422Row_C(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width); void ARGBToUV411Row_C(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int width); -void ARGBToUVJ422Row_C(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width); void MirrorRow_AVX2(const uint8* src, uint8* dst, int width); void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width); -void MirrorRow_SSE2(const uint8* src, uint8* dst, int width); void MirrorRow_NEON(const uint8* src, uint8* dst, int width); -void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width); +void MirrorRow_DSPR2(const uint8* src, uint8* dst, int width); void MirrorRow_C(const uint8* src, uint8* dst, int width); void MirrorRow_Any_AVX2(const uint8* src, uint8* dst, int width); void MirrorRow_Any_SSSE3(const uint8* src, uint8* dst, int width); @@ -758,10 +819,9 @@ void MirrorUVRow_SSSE3(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width); void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width); -void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int width); -void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int width); +void MirrorUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); +void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width); void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width); void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width); @@ -771,20 +831,23 @@ void ARGBMirrorRow_Any_AVX2(const uint8* src, uint8* dst, int width); void ARGBMirrorRow_Any_SSE2(const uint8* src, uint8* dst, int width); void ARGBMirrorRow_Any_NEON(const uint8* src, uint8* dst, int width); -void SplitUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix); -void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix); -void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix); -void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix); -void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int pix); +void SplitUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width); +void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); +void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); +void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); +void SplitUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); void SplitUVRow_Any_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int pix); + int width); void SplitUVRow_Any_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int pix); + int width); void SplitUVRow_Any_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int pix); -void SplitUVRow_Any_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int pix); + int width); +void SplitUVRow_Any_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width); void MergeUVRow_C(const uint8* src_u, const uint8* src_v, uint8* dst_uv, int width); @@ -816,10 +879,26 @@ void CopyRow_16_C(const uint16* src, uint16* dst, int count); void ARGBCopyAlphaRow_C(const uint8* src_argb, uint8* dst_argb, int width); void ARGBCopyAlphaRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width); void ARGBCopyAlphaRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width); +void ARGBCopyAlphaRow_Any_SSE2(const uint8* src_argb, uint8* dst_argb, + int width); +void ARGBCopyAlphaRow_Any_AVX2(const uint8* src_argb, uint8* dst_argb, + int width); + +void ARGBExtractAlphaRow_C(const uint8* src_argb, uint8* dst_a, int width); +void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width); +void ARGBExtractAlphaRow_NEON(const uint8* src_argb, uint8* dst_a, int width); +void ARGBExtractAlphaRow_Any_SSE2(const uint8* src_argb, uint8* dst_a, + int width); +void ARGBExtractAlphaRow_Any_NEON(const uint8* src_argb, uint8* dst_a, + int width); void ARGBCopyYToAlphaRow_C(const uint8* src_y, uint8* dst_argb, int width); void ARGBCopyYToAlphaRow_SSE2(const uint8* src_y, uint8* dst_argb, int width); void ARGBCopyYToAlphaRow_AVX2(const uint8* src_y, uint8* dst_argb, int width); +void ARGBCopyYToAlphaRow_Any_SSE2(const uint8* src_y, uint8* dst_argb, + int width); +void ARGBCopyYToAlphaRow_Any_AVX2(const uint8* src_y, uint8* dst_argb, + int width); void SetRow_C(uint8* dst, uint8 v8, int count); void SetRow_X86(uint8* dst, uint8 v8, int count); @@ -835,524 +914,541 @@ void ARGBSetRow_Any_NEON(uint8* dst_argb, uint32 v32, int count); // ARGBShufflers for BGRAToARGB etc. void ARGBShuffleRow_C(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_Any_SSE2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_Any_SSSE3(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_Any_AVX2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); void ARGBShuffleRow_Any_NEON(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix); + const uint8* shuffler, int width); -void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix); -void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix); -void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, int pix); +void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int width); +void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int width); +void RAWToRGB24Row_SSSE3(const uint8* src_raw, uint8* dst_rgb24, int width); +void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, int width); void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb, - int pix); -void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, int pix); + int width); +void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, int width); void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb, - int pix); + int width); -void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix); -void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix); -void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix); +void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int width); +void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int width); +void RAWToRGB24Row_NEON(const uint8* src_raw, uint8* dst_rgb24, int width); +void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int width); void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb, - int pix); -void RGB24ToARGBRow_C(const uint8* src_rgb24, uint8* dst_argb, int pix); -void RAWToARGBRow_C(const uint8* src_raw, uint8* dst_argb, int pix); -void RGB565ToARGBRow_C(const uint8* src_rgb, uint8* dst_argb, int pix); -void ARGB1555ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int pix); -void ARGB4444ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int pix); -void RGB24ToARGBRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix); -void RAWToARGBRow_Any_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix); + int width); +void RGB24ToARGBRow_C(const uint8* src_rgb24, uint8* dst_argb, int width); +void RAWToARGBRow_C(const uint8* src_raw, uint8* dst_argb, int width); +void RAWToRGB24Row_C(const uint8* src_raw, uint8* dst_rgb24, int width); +void RGB565ToARGBRow_C(const uint8* src_rgb, uint8* dst_argb, int width); +void ARGB1555ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int width); +void ARGB4444ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int width); +void RGB24ToARGBRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_argb, + int width); +void RAWToARGBRow_Any_SSSE3(const uint8* src_raw, uint8* dst_argb, int width); +void RAWToRGB24Row_Any_SSSE3(const uint8* src_raw, uint8* dst_rgb24, int width); void RGB565ToARGBRow_Any_SSE2(const uint8* src_rgb565, uint8* dst_argb, - int pix); + int width); void ARGB1555ToARGBRow_Any_SSE2(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_Any_SSE2(const uint8* src_argb4444, uint8* dst_argb, - int pix); + int width); void RGB565ToARGBRow_Any_AVX2(const uint8* src_rgb565, uint8* dst_argb, - int pix); + int width); void ARGB1555ToARGBRow_Any_AVX2(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_Any_AVX2(const uint8* src_argb4444, uint8* dst_argb, - int pix); + int width); -void RGB24ToARGBRow_Any_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix); -void RAWToARGBRow_Any_NEON(const uint8* src_raw, uint8* dst_argb, int pix); +void RGB24ToARGBRow_Any_NEON(const uint8* src_rgb24, uint8* dst_argb, + int width); +void RAWToARGBRow_Any_NEON(const uint8* src_raw, uint8* dst_argb, int width); +void RAWToRGB24Row_Any_NEON(const uint8* src_raw, uint8* dst_rgb24, int width); void RGB565ToARGBRow_Any_NEON(const uint8* src_rgb565, uint8* dst_argb, - int pix); + int width); void ARGB1555ToARGBRow_Any_NEON(const uint8* src_argb1555, uint8* dst_argb, - int pix); + int width); void ARGB4444ToARGBRow_Any_NEON(const uint8* src_argb4444, uint8* dst_argb, - int pix); + int width); -void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width); void ARGBToRGB565DitherRow_C(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix); + const uint32 dither4, int width); void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix); + const uint32 dither4, int width); void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix); + const uint32 dither4, int width); -void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width); -void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_rgb, int width); void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb, const uint32 dither4, int width); -void ARGBToRGBARow_C(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB24Row_C(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRAWRow_C(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB565Row_C(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGBARow_C(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB24Row_C(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRAWRow_C(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB565Row_C(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int width); -void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_Any_SSE2(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_Any_AVX2(const uint8* src_y, uint8* dst_argb, int pix); -void J400ToARGBRow_Any_NEON(const uint8* src_y, uint8* dst_argb, int pix); +void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_Any_SSE2(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_Any_AVX2(const uint8* src_y, uint8* dst_argb, int width); +void J400ToARGBRow_Any_NEON(const uint8* src_y, uint8* dst_argb, int width); void I444ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); +void I422ToARGBRow_C(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422AlphaToARGBRow_C(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I411ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_C(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void NV21ToRGB565Row_C(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, - int width); void NV12ToRGB565Row_C(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV21ToARGBRow_C(const uint8* src_y, - const uint8* src_vu, + const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void YUY2ToARGBRow_C(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_C(const uint8* src_uyvy, uint8* dst_argb, - int width); -void J422ToARGBRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToBGRARow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width); -void I422ToABGRRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, + const struct YuvConstants* yuvconstants, int width); void I422ToRGBARow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width); -void I422ToRAWRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width); void I422ToARGB4444Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void I422ToBGRARow_AVX2(const uint8* src_y, +void I422ToARGBRow_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGBARow_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, - int width); -void I422ToABGRRow_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I444ToARGBRow_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I444ToARGBRow_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I444ToARGBRow_SSSE3(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I444ToARGBRow_AVX2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422AlphaToARGBRow_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422AlphaToARGBRow_AVX2(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422ToARGBRow_SSSE3(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I411ToARGBRow_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I411ToARGBRow_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_SSSE3(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToARGBRow_SSSE3(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_AVX2(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToARGBRow_AVX2(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToRGB565Row_SSSE3(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void NV21ToRGB565Row_AVX2(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, - int width); +void NV21ToARGBRow_SSSE3(const uint8* src_y, + const uint8* src_uv, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void NV21ToARGBRow_AVX2(const uint8* src_y, + const uint8* src_uv, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_SSSE3(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void YUY2ToARGBRow_AVX2(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_AVX2(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void J422ToARGBRow_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void J422ToARGBRow_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToBGRARow_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width); -void I422ToABGRRow_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, - int width); void I422ToRGBARow_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB4444Row_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB4444Row_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width); -void I422ToRAWRow_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width); -void I422ToRAWRow_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width); void I422ToARGBRow_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, - int width); -void I422ToBGRARow_Any_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGBARow_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, - int width); -void I422ToABGRRow_Any_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I444ToARGBRow_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I444ToARGBRow_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); +void I422AlphaToARGBRow_Any_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422AlphaToARGBRow_Any_AVX2(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I411ToARGBRow_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I411ToARGBRow_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_Any_SSSE3(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToARGBRow_Any_SSSE3(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_Any_AVX2(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); +void NV21ToARGBRow_Any_SSSE3(const uint8* src_y, + const uint8* src_vu, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void NV21ToARGBRow_Any_AVX2(const uint8* src_y, const uint8* src_vu, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_Any_SSSE3(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToRGB565Row_Any_SSSE3(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_Any_AVX2(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToRGB565Row_Any_AVX2(const uint8* src_y, - const uint8* src_vu, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void YUY2ToARGBRow_Any_SSSE3(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_Any_SSSE3(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void YUY2ToARGBRow_Any_AVX2(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_Any_AVX2(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void J422ToARGBRow_Any_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void J422ToARGBRow_Any_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToBGRARow_Any_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width); -void I422ToABGRRow_Any_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, - int width); void I422ToRGBARow_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB4444Row_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB4444Row_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_Any_SSSE3(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_Any_AVX2(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void I422ToRAWRow_Any_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToRAWRow_Any_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); void I400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width); void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width); @@ -1365,13 +1461,23 @@ void I400ToARGBRow_Any_NEON(const uint8* src_y, uint8* dst_argb, int width); // ARGB preattenuated alpha blend. void ARGBBlendRow_SSSE3(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width); -void ARGBBlendRow_SSE2(const uint8* src_argb, const uint8* src_argb1, - uint8* dst_argb, int width); void ARGBBlendRow_NEON(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width); void ARGBBlendRow_C(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width); +// Unattenuated planar alpha blend. +void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width); +void BlendPlaneRow_Any_SSSE3(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width); +void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width); +void BlendPlaneRow_Any_AVX2(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width); +void BlendPlaneRow_C(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width); + // ARGB multiply images. Same API as Blend, but these require // pointer and width alignment for SSE2. void ARGBMultiplyRow_C(const uint8* src_argb, const uint8* src_argb1, @@ -1422,26 +1528,32 @@ void ARGBSubtractRow_NEON(const uint8* src_argb, const uint8* src_argb1, void ARGBSubtractRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1, uint8* dst_argb, int width); -void ARGBToRGB24Row_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRAWRow_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB565Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB24Row_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRAWRow_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB565Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, + int width); +void ARGBToARGB4444Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, + int width); void ARGBToRGB565DitherRow_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix); + const uint32 dither4, int width); void ARGBToRGB565DitherRow_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix); + const uint32 dither4, int width); -void ARGBToRGB565Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB565Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, + int width); +void ARGBToARGB4444Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, + int width); -void ARGBToRGB24Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRAWRow_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToRGB565Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB1555Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); -void ARGBToARGB4444Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix); +void ARGBToRGB24Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRAWRow_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToRGB565Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int width); +void ARGBToARGB1555Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, + int width); +void ARGBToARGB4444Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, + int width); void ARGBToRGB565DitherRow_Any_NEON(const uint8* src_argb, uint8* dst_rgb, const uint32 dither4, int width); @@ -1449,186 +1561,169 @@ void I444ToARGBRow_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGBRow_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); +void I422AlphaToARGBRow_Any_NEON(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + const uint8* src_a, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); void I411ToARGBRow_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, - int width); -void I422ToBGRARow_Any_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToABGRRow_Any_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGBARow_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB24Row_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void I422ToRAWRow_Any_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); void I422ToARGB4444Row_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToARGB1555Row_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void I422ToRGB565Row_Any_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToARGBRow_Any_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV21ToARGBRow_Any_NEON(const uint8* src_y, - const uint8* src_uv, + const uint8* src_vu, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void NV12ToRGB565Row_Any_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, - int width); -void NV21ToRGB565Row_Any_NEON(const uint8* src_y, - const uint8* src_uv, - uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void YUY2ToARGBRow_Any_NEON(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); void UYVYToARGBRow_Any_NEON(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width); -void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToBGRARow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToABGRRow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToBGRARow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); -void I422ToABGRRow_MIPS_DSPR2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb, - int width); +void I422ToARGBRow_DSPR2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); +void I422ToARGBRow_DSPR2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width); -void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix); +void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_NEON(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_C(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_C(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_C(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_C(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_Any_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_Any_AVX2(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_Any_AVX2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_Any_AVX2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_Any_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_Any_SSE2(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_Any_SSE2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_Any_SSE2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void YUY2ToYRow_Any_NEON(const uint8* src_yuy2, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void YUY2ToYRow_Any_NEON(const uint8* src_yuy2, uint8* dst_y, int width); void YUY2ToUVRow_Any_NEON(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void YUY2ToUV422Row_Any_NEON(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_AVX2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_SSE2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_AVX2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_NEON(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); -void UYVYToYRow_C(const uint8* src_uyvy, uint8* dst_y, int pix); +void UYVYToYRow_C(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_C(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_C(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_Any_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_Any_AVX2(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_Any_AVX2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_Any_AVX2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_Any_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_Any_SSE2(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_Any_SSE2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_Any_SSE2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); -void UYVYToYRow_Any_NEON(const uint8* src_uyvy, uint8* dst_y, int pix); + uint8* dst_u, uint8* dst_v, int width); +void UYVYToYRow_Any_NEON(const uint8* src_uyvy, uint8* dst_y, int width); void UYVYToUVRow_Any_NEON(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void UYVYToUV422Row_Any_NEON(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix); + uint8* dst_u, uint8* dst_v, int width); void I422ToYUY2Row_C(const uint8* src_y, const uint8* src_u, @@ -1673,7 +1768,6 @@ void I422ToUYVYRow_Any_NEON(const uint8* src_y, // Effects related row functions. void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width); -void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width); void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width); void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width); void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width); @@ -1753,9 +1847,6 @@ void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride, void InterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); -void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride_ptr, int width, - int source_y_fraction); void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); @@ -1765,24 +1856,21 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, void InterpolateRow_NEON(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); -void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride_ptr, int width, - int source_y_fraction); +void InterpolateRow_DSPR2(uint8* dst_ptr, const uint8* src_ptr, + ptrdiff_t src_stride_ptr, int width, + int source_y_fraction); void InterpolateRow_Any_NEON(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); -void InterpolateRow_Any_SSE2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride_ptr, int width, - int source_y_fraction); void InterpolateRow_Any_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); void InterpolateRow_Any_AVX2(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride_ptr, int width, int source_y_fraction); -void InterpolateRow_Any_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride_ptr, int width, - int source_y_fraction); +void InterpolateRow_Any_DSPR2(uint8* dst_ptr, const uint8* src_ptr, + ptrdiff_t src_stride_ptr, int width, + int source_y_fraction); void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr, ptrdiff_t src_stride_ptr, diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/scale_argb.h b/libs/libvpx/third_party/libyuv/include/libyuv/scale_argb.h index 0c9b362575..b56cf52099 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/scale_argb.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/scale_argb.h @@ -35,7 +35,6 @@ int ARGBScaleClip(const uint8* src_argb, int src_stride_argb, int clip_x, int clip_y, int clip_width, int clip_height, enum FilterMode filtering); -// TODO(fbarchard): Implement this. // Scale with YUV conversion to ARGB and clipping. LIBYUV_API int YUVToARGBScaleClip(const uint8* src_y, int src_stride_y, diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/scale_row.h b/libs/libvpx/third_party/libyuv/include/libyuv/scale_row.h index 94ad9cf86b..df699e6c22 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/scale_row.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/scale_row.h @@ -23,6 +23,26 @@ extern "C" { (defined(__i386__) && !defined(__SSE2__)) #define LIBYUV_DISABLE_X86 #endif +// MemorySanitizer does not support assembly code yet. http://crbug.com/344505 +#if defined(__has_feature) +#if __has_feature(memory_sanitizer) +#define LIBYUV_DISABLE_X86 +#endif +#endif + +// GCC >= 4.7.0 required for AVX2. +#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) +#if (__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7)) +#define GCC_HAS_AVX2 1 +#endif // GNUC >= 4.7 +#endif // __GNUC__ + +// clang >= 3.4.0 required for AVX2. +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) +#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4)) +#define CLANG_HAS_AVX2 1 +#endif // clang >= 3.4 +#endif // __clang__ // Visual C 2012 required for AVX2. #if defined(_M_IX86) && !defined(__clang__) && \ @@ -42,24 +62,23 @@ extern "C" { #define HAS_SCALEARGBROWDOWNEVEN_SSE2 #define HAS_SCALECOLSUP2_SSE2 #define HAS_SCALEFILTERCOLS_SSSE3 -#define HAS_SCALEROWDOWN2_SSE2 +#define HAS_SCALEROWDOWN2_SSSE3 #define HAS_SCALEROWDOWN34_SSSE3 #define HAS_SCALEROWDOWN38_SSSE3 -#define HAS_SCALEROWDOWN4_SSE2 +#define HAS_SCALEROWDOWN4_SSSE3 +#define HAS_SCALEADDROW_SSE2 #endif -// The following are available on VS2012: -#if !defined(LIBYUV_DISABLE_X86) && defined(VISUALC_HAS_AVX2) +// The following are available on all x86 platforms, but +// require VS2012, clang 3.4 or gcc 4.7. +// The code supports NaCL but requires a new compiler and validator. +#if !defined(LIBYUV_DISABLE_X86) && (defined(VISUALC_HAS_AVX2) || \ + defined(CLANG_HAS_AVX2) || defined(GCC_HAS_AVX2)) #define HAS_SCALEADDROW_AVX2 #define HAS_SCALEROWDOWN2_AVX2 #define HAS_SCALEROWDOWN4_AVX2 #endif -// The following are available on Visual C: -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && !defined(__clang__) -#define HAS_SCALEADDROW_SSE2 -#endif - // The following are available on Neon platforms: #if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__)) @@ -77,10 +96,10 @@ extern "C" { // The following are available on Mips platforms: #if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \ defined(__mips__) && defined(__mips_dsp) && (__mips_dsp_rev >= 2) -#define HAS_SCALEROWDOWN2_MIPS_DSPR2 -#define HAS_SCALEROWDOWN4_MIPS_DSPR2 -#define HAS_SCALEROWDOWN34_MIPS_DSPR2 -#define HAS_SCALEROWDOWN38_MIPS_DSPR2 +#define HAS_SCALEROWDOWN2_DSPR2 +#define HAS_SCALEROWDOWN4_DSPR2 +#define HAS_SCALEROWDOWN34_DSPR2 +#define HAS_SCALEROWDOWN38_DSPR2 #endif // Scale ARGB vertically with bilinear interpolation. @@ -133,6 +152,8 @@ void ScaleRowDown2Linear_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown2Box_C(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); +void ScaleRowDown2Box_Odd_C(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); void ScaleRowDown2Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width); void ScaleRowDown4_C(const uint8* src_ptr, ptrdiff_t src_stride, @@ -214,22 +235,22 @@ void ScaleARGBFilterCols64_C(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx); // Specialized scalers for x86. -void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); +void ScaleRowDown2_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown2Linear_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown2Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); void ScaleRowDown2_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); -void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); +void ScaleRowDown4_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown4Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); void ScaleRowDown4_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, @@ -251,22 +272,26 @@ void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr, void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); -void ScaleRowDown2_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown2Linear_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown2Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); +void ScaleRowDown2_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown2Linear_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown2Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown2Box_Odd_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); void ScaleRowDown2_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Linear_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown2Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown4_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown4Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); +void ScaleRowDown2Box_Odd_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown4_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown4Box_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); void ScaleRowDown4_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, @@ -418,6 +443,8 @@ void ScaleRowDown2Linear_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); void ScaleRowDown2Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width); +void ScaleRowDown2Box_Odd_NEON(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); void ScaleRowDown4_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width); void ScaleRowDown4Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride, @@ -447,28 +474,26 @@ void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr, void ScaleFilterCols_Any_NEON(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx); - -void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* d, int dst_width); -void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* d, int dst_width); -void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width); -void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); -void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr, - ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width); +void ScaleRowDown2_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown2Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown4_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown4Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown34_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown34_0_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* d, int dst_width); +void ScaleRowDown34_1_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* d, int dst_width); +void ScaleRowDown38_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width); +void ScaleRowDown38_2_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); +void ScaleRowDown38_3_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/version.h b/libs/libvpx/third_party/libyuv/include/libyuv/version.h index 9d1d746c22..0fbdc022d5 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/version.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/version.h @@ -11,6 +11,6 @@ #ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT #define INCLUDE_LIBYUV_VERSION_H_ -#define LIBYUV_VERSION 1456 +#define LIBYUV_VERSION 1616 #endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT diff --git a/libs/libvpx/third_party/libyuv/include/libyuv/video_common.h b/libs/libvpx/third_party/libyuv/include/libyuv/video_common.h index cb6582f24d..ad934e4241 100644 --- a/libs/libvpx/third_party/libyuv/include/libyuv/video_common.h +++ b/libs/libvpx/third_party/libyuv/include/libyuv/video_common.h @@ -62,7 +62,7 @@ enum FourCC { // 2 Secondary YUV formats: row biplanar. FOURCC_M420 = FOURCC('M', '4', '2', '0'), - FOURCC_Q420 = FOURCC('Q', '4', '2', '0'), // deprecated. + FOURCC_Q420 = FOURCC('Q', '4', '2', '0'), // deprecated. // 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp. FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B'), @@ -90,7 +90,8 @@ enum FourCC { FOURCC_YV24 = FOURCC('Y', 'V', '2', '4'), FOURCC_YU12 = FOURCC('Y', 'U', '1', '2'), // Linux version of I420. FOURCC_J420 = FOURCC('J', '4', '2', '0'), - FOURCC_J400 = FOURCC('J', '4', '0', '0'), + FOURCC_J400 = FOURCC('J', '4', '0', '0'), // unofficial fourcc + FOURCC_H420 = FOURCC('H', '4', '2', '0'), // unofficial fourcc // 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc. FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420. @@ -150,6 +151,7 @@ enum FourCCBpp { FOURCC_BPP_YU12 = 12, FOURCC_BPP_J420 = 12, FOURCC_BPP_J400 = 8, + FOURCC_BPP_H420 = 12, FOURCC_BPP_MJPG = 0, // 0 means unknown. FOURCC_BPP_H264 = 0, FOURCC_BPP_IYUV = 12, diff --git a/libs/libvpx/third_party/libyuv/source/compare.cc b/libs/libvpx/third_party/libyuv/source/compare.cc index 46aa8473d2..e3846bdfdd 100644 --- a/libs/libvpx/third_party/libyuv/source/compare.cc +++ b/libs/libvpx/third_party/libyuv/source/compare.cc @@ -17,6 +17,7 @@ #endif #include "libyuv/basic_types.h" +#include "libyuv/compare_row.h" #include "libyuv/cpu_id.h" #include "libyuv/row.h" #include "libyuv/video_common.h" @@ -26,30 +27,13 @@ namespace libyuv { extern "C" { #endif -// hash seed of 5381 recommended. -// Internal C version of HashDjb2 with int sized count for efficiency. -uint32 HashDjb2_C(const uint8* src, int count, uint32 seed); - -// This module is for Visual C x86 -#if !defined(LIBYUV_DISABLE_X86) && \ - (defined(_M_IX86) || \ - (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))) -#define HAS_HASHDJB2_SSE41 -uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed); - -#ifdef VISUALC_HAS_AVX2 -#define HAS_HASHDJB2_AVX2 -uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed); -#endif - -#endif // HAS_HASHDJB2_SSE41 - // hash seed of 5381 recommended. LIBYUV_API uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) { const int kBlockSize = 1 << 15; // 32768; int remainder; - uint32 (*HashDjb2_SSE)(const uint8* src, int count, uint32 seed) = HashDjb2_C; + uint32 (*HashDjb2_SSE)(const uint8* src, int count, uint32 seed) = + HashDjb2_C; #if defined(HAS_HASHDJB2_SSE41) if (TestCpuFlag(kCpuHasSSE41)) { HashDjb2_SSE = HashDjb2_SSE41; @@ -127,23 +111,6 @@ uint32 ARGBDetect(const uint8* argb, int stride_argb, int width, int height) { return fourcc; } -uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count); -#if !defined(LIBYUV_DISABLE_NEON) && \ - (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__)) -#define HAS_SUMSQUAREERROR_NEON -uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count); -#endif -#if !defined(LIBYUV_DISABLE_X86) && \ - (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) -#define HAS_SUMSQUAREERROR_SSE2 -uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count); -#endif - -#ifdef VISUALC_HAS_AVX2 -#define HAS_SUMSQUAREERROR_AVX2 -uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count); -#endif - // TODO(fbarchard): Refactor into row function. LIBYUV_API uint64 ComputeSumSquareError(const uint8* src_a, const uint8* src_b, diff --git a/libs/libvpx/third_party/libyuv/source/compare_common.cc b/libs/libvpx/third_party/libyuv/source/compare_common.cc index c546b51829..42fc589354 100644 --- a/libs/libvpx/third_party/libyuv/source/compare_common.cc +++ b/libs/libvpx/third_party/libyuv/source/compare_common.cc @@ -10,6 +10,8 @@ #include "libyuv/basic_types.h" +#include "libyuv/compare_row.h" + #ifdef __cplusplus namespace libyuv { extern "C" { diff --git a/libs/libvpx/third_party/libyuv/source/compare_gcc.cc b/libs/libvpx/third_party/libyuv/source/compare_gcc.cc index 247cb33bba..1b83edb166 100644 --- a/libs/libvpx/third_party/libyuv/source/compare_gcc.cc +++ b/libs/libvpx/third_party/libyuv/source/compare_gcc.cc @@ -9,6 +9,8 @@ */ #include "libyuv/basic_types.h" + +#include "libyuv/compare_row.h" #include "libyuv/row.h" #ifdef __cplusplus @@ -16,11 +18,13 @@ namespace libyuv { extern "C" { #endif -#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) +// This module is for GCC x86 and x64. +#if !defined(LIBYUV_DISABLE_X86) && \ + (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { uint32 sse; - asm volatile ( // NOLINT + asm volatile ( "pxor %%xmm0,%%xmm0 \n" "pxor %%xmm5,%%xmm5 \n" LABELALIGN @@ -54,15 +58,10 @@ uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { "+r"(count), // %2 "=g"(sse) // %3 :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" - ); // NOLINT + ); return sse; } -#endif // defined(__x86_64__) || defined(__i386__) - -#if !defined(LIBYUV_DISABLE_X86) && \ - (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__))) -#define HAS_HASHDJB2_SSE41 static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16 static uvec32 kHashMul0 = { 0x0c3525e1, // 33 ^ 15 @@ -91,7 +90,7 @@ static uvec32 kHashMul3 = { uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { uint32 hash; - asm volatile ( // NOLINT + asm volatile ( "movd %2,%%xmm0 \n" "pxor %%xmm7,%%xmm7 \n" "movdqa %4,%%xmm6 \n" @@ -140,7 +139,7 @@ uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { "m"(kHashMul3) // %8 : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" - ); // NOLINT + ); return hash; } #endif // defined(__x86_64__) || (defined(__i386__) && !defined(__pic__))) diff --git a/libs/libvpx/third_party/libyuv/source/compare_neon.cc b/libs/libvpx/third_party/libyuv/source/compare_neon.cc index ef006ec41c..49aa3b4eef 100644 --- a/libs/libvpx/third_party/libyuv/source/compare_neon.cc +++ b/libs/libvpx/third_party/libyuv/source/compare_neon.cc @@ -9,6 +9,8 @@ */ #include "libyuv/basic_types.h" + +#include "libyuv/compare_row.h" #include "libyuv/row.h" #ifdef __cplusplus @@ -27,7 +29,6 @@ uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count) { "vmov.u8 q9, #0 \n" "vmov.u8 q11, #0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" diff --git a/libs/libvpx/third_party/libyuv/source/compare_neon64.cc b/libs/libvpx/third_party/libyuv/source/compare_neon64.cc index 6d1e5e1bc9..f9c7df98c8 100644 --- a/libs/libvpx/third_party/libyuv/source/compare_neon64.cc +++ b/libs/libvpx/third_party/libyuv/source/compare_neon64.cc @@ -9,6 +9,8 @@ */ #include "libyuv/basic_types.h" + +#include "libyuv/compare_row.h" #include "libyuv/row.h" #ifdef __cplusplus @@ -26,7 +28,6 @@ uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count) { "eor v17.16b, v17.16b, v17.16b \n" "eor v19.16b, v19.16b, v19.16b \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "ld1 {v0.16b}, [%0], #16 \n" diff --git a/libs/libvpx/third_party/libyuv/source/compare_win.cc b/libs/libvpx/third_party/libyuv/source/compare_win.cc index 19806f2750..dc86fe25b1 100644 --- a/libs/libvpx/third_party/libyuv/source/compare_win.cc +++ b/libs/libvpx/third_party/libyuv/source/compare_win.cc @@ -9,6 +9,8 @@ */ #include "libyuv/basic_types.h" + +#include "libyuv/compare_row.h" #include "libyuv/row.h" #ifdef __cplusplus @@ -16,9 +18,8 @@ namespace libyuv { extern "C" { #endif -// This module is for Visual C x86. -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \ - defined(_MSC_VER) && !defined(__clang__) +// This module is for 32 bit Visual C x86 and clangcl +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) __declspec(naked) uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) { @@ -100,41 +101,32 @@ uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count) { } #endif // _MSC_VER >= 1700 -#define HAS_HASHDJB2_SSE41 -static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16 -static uvec32 kHashMul0 = { +uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16 +uvec32 kHashMul0 = { 0x0c3525e1, // 33 ^ 15 0xa3476dc1, // 33 ^ 14 0x3b4039a1, // 33 ^ 13 0x4f5f0981, // 33 ^ 12 }; -static uvec32 kHashMul1 = { +uvec32 kHashMul1 = { 0x30f35d61, // 33 ^ 11 0x855cb541, // 33 ^ 10 0x040a9121, // 33 ^ 9 0x747c7101, // 33 ^ 8 }; -static uvec32 kHashMul2 = { +uvec32 kHashMul2 = { 0xec41d4e1, // 33 ^ 7 0x4cfa3cc1, // 33 ^ 6 0x025528a1, // 33 ^ 5 0x00121881, // 33 ^ 4 }; -static uvec32 kHashMul3 = { +uvec32 kHashMul3 = { 0x00008c61, // 33 ^ 3 0x00000441, // 33 ^ 2 0x00000021, // 33 ^ 1 0x00000001, // 33 ^ 0 }; -// 27: 66 0F 38 40 C6 pmulld xmm0,xmm6 -// 44: 66 0F 38 40 DD pmulld xmm3,xmm5 -// 59: 66 0F 38 40 E5 pmulld xmm4,xmm5 -// 72: 66 0F 38 40 D5 pmulld xmm2,xmm5 -// 83: 66 0F 38 40 CD pmulld xmm1,xmm5 -#define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \ - _asm _emit 0x40 _asm _emit reg - __declspec(naked) uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { __asm { @@ -143,30 +135,30 @@ uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) { movd xmm0, [esp + 12] // seed pxor xmm7, xmm7 // constant 0 for unpck - movdqa xmm6, kHash16x33 + movdqa xmm6, xmmword ptr kHash16x33 wloop: movdqu xmm1, [eax] // src[0-15] lea eax, [eax + 16] - pmulld(0xc6) // pmulld xmm0,xmm6 hash *= 33 ^ 16 - movdqa xmm5, kHashMul0 + pmulld xmm0, xmm6 // hash *= 33 ^ 16 + movdqa xmm5, xmmword ptr kHashMul0 movdqa xmm2, xmm1 punpcklbw xmm2, xmm7 // src[0-7] movdqa xmm3, xmm2 punpcklwd xmm3, xmm7 // src[0-3] - pmulld(0xdd) // pmulld xmm3, xmm5 - movdqa xmm5, kHashMul1 + pmulld xmm3, xmm5 + movdqa xmm5, xmmword ptr kHashMul1 movdqa xmm4, xmm2 punpckhwd xmm4, xmm7 // src[4-7] - pmulld(0xe5) // pmulld xmm4, xmm5 - movdqa xmm5, kHashMul2 + pmulld xmm4, xmm5 + movdqa xmm5, xmmword ptr kHashMul2 punpckhbw xmm1, xmm7 // src[8-15] movdqa xmm2, xmm1 punpcklwd xmm2, xmm7 // src[8-11] - pmulld(0xd5) // pmulld xmm2, xmm5 - movdqa xmm5, kHashMul3 + pmulld xmm2, xmm5 + movdqa xmm5, xmmword ptr kHashMul3 punpckhwd xmm1, xmm7 // src[12-15] - pmulld(0xcd) // pmulld xmm1, xmm5 + pmulld xmm1, xmm5 paddd xmm3, xmm4 // add 16 results paddd xmm1, xmm2 paddd xmm1, xmm3 @@ -191,36 +183,37 @@ uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed) { __asm { mov eax, [esp + 4] // src mov ecx, [esp + 8] // count - movd xmm0, [esp + 12] // seed - movdqa xmm6, kHash16x33 + vmovd xmm0, [esp + 12] // seed wloop: - vpmovzxbd xmm3, dword ptr [eax] // src[0-3] - pmulld xmm0, xmm6 // hash *= 33 ^ 16 - vpmovzxbd xmm4, dword ptr [eax + 4] // src[4-7] - pmulld xmm3, kHashMul0 - vpmovzxbd xmm2, dword ptr [eax + 8] // src[8-11] - pmulld xmm4, kHashMul1 - vpmovzxbd xmm1, dword ptr [eax + 12] // src[12-15] - pmulld xmm2, kHashMul2 + vpmovzxbd xmm3, [eax] // src[0-3] + vpmulld xmm0, xmm0, xmmword ptr kHash16x33 // hash *= 33 ^ 16 + vpmovzxbd xmm4, [eax + 4] // src[4-7] + vpmulld xmm3, xmm3, xmmword ptr kHashMul0 + vpmovzxbd xmm2, [eax + 8] // src[8-11] + vpmulld xmm4, xmm4, xmmword ptr kHashMul1 + vpmovzxbd xmm1, [eax + 12] // src[12-15] + vpmulld xmm2, xmm2, xmmword ptr kHashMul2 lea eax, [eax + 16] - pmulld xmm1, kHashMul3 - paddd xmm3, xmm4 // add 16 results - paddd xmm1, xmm2 - paddd xmm1, xmm3 - pshufd xmm2, xmm1, 0x0e // upper 2 dwords - paddd xmm1, xmm2 - pshufd xmm2, xmm1, 0x01 - paddd xmm1, xmm2 - paddd xmm0, xmm1 + vpmulld xmm1, xmm1, xmmword ptr kHashMul3 + vpaddd xmm3, xmm3, xmm4 // add 16 results + vpaddd xmm1, xmm1, xmm2 + vpaddd xmm1, xmm1, xmm3 + vpshufd xmm2, xmm1, 0x0e // upper 2 dwords + vpaddd xmm1, xmm1,xmm2 + vpshufd xmm2, xmm1, 0x01 + vpaddd xmm1, xmm1, xmm2 + vpaddd xmm0, xmm0, xmm1 sub ecx, 16 jg wloop - movd eax, xmm0 // return hash + vmovd eax, xmm0 // return hash + vzeroupper ret } } #endif // _MSC_VER >= 1700 + #endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) #ifdef __cplusplus diff --git a/libs/libvpx/third_party/libyuv/source/convert.cc b/libs/libvpx/third_party/libyuv/source/convert.cc index 3ad6bd7a4b..a33742d24d 100644 --- a/libs/libvpx/third_party/libyuv/source/convert.cc +++ b/libs/libvpx/third_party/libyuv/source/convert.cc @@ -40,13 +40,14 @@ static int I4xxToI420(const uint8* src_y, int src_stride_y, const int dst_y_height = Abs(src_y_height); const int dst_uv_width = SUBSAMPLE(dst_y_width, 1, 1); const int dst_uv_height = SUBSAMPLE(dst_y_height, 1, 1); - if (src_y_width == 0 || src_y_height == 0 || - src_uv_width == 0 || src_uv_height == 0) { + if (src_uv_width == 0 || src_uv_height == 0) { return -1; } - ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, - dst_y, dst_stride_y, dst_y_width, dst_y_height, - kFilterBilinear); + if (dst_y) { + ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, + dst_y, dst_stride_y, dst_y_width, dst_y_height, + kFilterBilinear); + } ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u, dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear); @@ -69,8 +70,8 @@ int I420Copy(const uint8* src_y, int src_stride_y, int width, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_y || !src_u || !src_v || - !dst_y || !dst_u || !dst_v || + if (!src_u || !src_v || + !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -166,7 +167,7 @@ int I400ToI420(const uint8* src_y, int src_stride_y, int width, int height) { int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - if (!src_y || !dst_y || !dst_u || !dst_v || + if (!dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -177,7 +178,9 @@ int I400ToI420(const uint8* src_y, int src_stride_y, src_y = src_y + (height - 1) * src_stride_y; src_stride_y = -src_stride_y; } - CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + if (dst_y) { + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + } SetPlane(dst_u, dst_stride_u, halfwidth, halfheight, 128); SetPlane(dst_v, dst_stride_v, halfwidth, halfheight, 128); return 0; @@ -242,13 +245,9 @@ static int X420ToI420(const uint8* src_y, uint8* dst_u, int dst_stride_u, uint8* dst_v, int dst_stride_v, int width, int height) { - int y; int halfwidth = (width + 1) >> 1; int halfheight = (height + 1) >> 1; - void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) = - SplitUVRow_C; - if (!src_y || !src_uv || - !dst_y || !dst_u || !dst_v || + if (!src_uv || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -256,7 +255,9 @@ static int X420ToI420(const uint8* src_y, if (height < 0) { height = -height; halfheight = (height + 1) >> 1; - dst_y = dst_y + (height - 1) * dst_stride_y; + if (dst_y) { + dst_y = dst_y + (height - 1) * dst_stride_y; + } dst_u = dst_u + (halfheight - 1) * dst_stride_u; dst_v = dst_v + (halfheight - 1) * dst_stride_v; dst_stride_y = -dst_stride_y; @@ -279,41 +280,6 @@ static int X420ToI420(const uint8* src_y, halfheight = 1; src_stride_uv = dst_stride_u = dst_stride_v = 0; } -#if defined(HAS_SPLITUVROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - SplitUVRow = SplitUVRow_Any_SSE2; - if (IS_ALIGNED(halfwidth, 16)) { - SplitUVRow = SplitUVRow_SSE2; - } - } -#endif -#if defined(HAS_SPLITUVROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - SplitUVRow = SplitUVRow_Any_AVX2; - if (IS_ALIGNED(halfwidth, 32)) { - SplitUVRow = SplitUVRow_AVX2; - } - } -#endif -#if defined(HAS_SPLITUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - SplitUVRow = SplitUVRow_Any_NEON; - if (IS_ALIGNED(halfwidth, 16)) { - SplitUVRow = SplitUVRow_NEON; - } - } -#endif -#if defined(HAS_SPLITUVROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && - IS_ALIGNED(src_uv, 4) && IS_ALIGNED(src_stride_uv, 4) && - IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) && - IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) { - SplitUVRow = SplitUVRow_Any_MIPS_DSPR2; - if (IS_ALIGNED(halfwidth, 16)) { - SplitUVRow = SplitUVRow_MIPS_DSPR2; - } - } -#endif if (dst_y) { if (src_stride_y0 == src_stride_y1) { @@ -324,13 +290,10 @@ static int X420ToI420(const uint8* src_y, } } - for (y = 0; y < halfheight; ++y) { - // Copy a row of UV. - SplitUVRow(src_uv, dst_u, dst_v, halfwidth); - dst_u += dst_stride_u; - dst_v += dst_stride_v; - src_uv += src_stride_uv; - } + // Split UV plane - NV12 / NV21 + SplitUVPlane(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v, dst_stride_v, + halfwidth, halfheight); + return 0; } @@ -390,9 +353,9 @@ int YUY2ToI420(const uint8* src_yuy2, int src_stride_yuy2, int width, int height) { int y; void (*YUY2ToUVRow)(const uint8* src_yuy2, int src_stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) = YUY2ToUVRow_C; + uint8* dst_u, uint8* dst_v, int width) = YUY2ToUVRow_C; void (*YUY2ToYRow)(const uint8* src_yuy2, - uint8* dst_y, int pix) = YUY2ToYRow_C; + uint8* dst_y, int width) = YUY2ToYRow_C; // Negative height means invert the image. if (height < 0) { height = -height; @@ -455,9 +418,9 @@ int UYVYToI420(const uint8* src_uyvy, int src_stride_uyvy, int width, int height) { int y; void (*UYVYToUVRow)(const uint8* src_uyvy, int src_stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) = UYVYToUVRow_C; + uint8* dst_u, uint8* dst_v, int width) = UYVYToUVRow_C; void (*UYVYToYRow)(const uint8* src_uyvy, - uint8* dst_y, int pix) = UYVYToYRow_C; + uint8* dst_y, int width) = UYVYToYRow_C; // Negative height means invert the image. if (height < 0) { height = -height; @@ -521,7 +484,7 @@ int ARGBToI420(const uint8* src_argb, int src_stride_argb, int y; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; if (!src_argb || !dst_y || !dst_u || !dst_v || @@ -597,7 +560,7 @@ int BGRAToI420(const uint8* src_bgra, int src_stride_bgra, int y; void (*BGRAToUVRow)(const uint8* src_bgra0, int src_stride_bgra, uint8* dst_u, uint8* dst_v, int width) = BGRAToUVRow_C; - void (*BGRAToYRow)(const uint8* src_bgra, uint8* dst_y, int pix) = + void (*BGRAToYRow)(const uint8* src_bgra, uint8* dst_y, int width) = BGRAToYRow_C; if (!src_bgra || !dst_y || !dst_u || !dst_v || @@ -663,7 +626,7 @@ int ABGRToI420(const uint8* src_abgr, int src_stride_abgr, int y; void (*ABGRToUVRow)(const uint8* src_abgr0, int src_stride_abgr, uint8* dst_u, uint8* dst_v, int width) = ABGRToUVRow_C; - void (*ABGRToYRow)(const uint8* src_abgr, uint8* dst_y, int pix) = + void (*ABGRToYRow)(const uint8* src_abgr, uint8* dst_y, int width) = ABGRToYRow_C; if (!src_abgr || !dst_y || !dst_u || !dst_v || @@ -729,7 +692,7 @@ int RGBAToI420(const uint8* src_rgba, int src_stride_rgba, int y; void (*RGBAToUVRow)(const uint8* src_rgba0, int src_stride_rgba, uint8* dst_u, uint8* dst_v, int width) = RGBAToUVRow_C; - void (*RGBAToYRow)(const uint8* src_rgba, uint8* dst_y, int pix) = + void (*RGBAToYRow)(const uint8* src_rgba, uint8* dst_y, int width) = RGBAToYRow_C; if (!src_rgba || !dst_y || !dst_u || !dst_v || @@ -796,14 +759,14 @@ int RGB24ToI420(const uint8* src_rgb24, int src_stride_rgb24, #if defined(HAS_RGB24TOYROW_NEON) void (*RGB24ToUVRow)(const uint8* src_rgb24, int src_stride_rgb24, uint8* dst_u, uint8* dst_v, int width) = RGB24ToUVRow_C; - void (*RGB24ToYRow)(const uint8* src_rgb24, uint8* dst_y, int pix) = + void (*RGB24ToYRow)(const uint8* src_rgb24, uint8* dst_y, int width) = RGB24ToYRow_C; #else - void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = RGB24ToARGBRow_C; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; #endif if (!src_rgb24 || !dst_y || !dst_u || !dst_v || @@ -910,14 +873,14 @@ int RAWToI420(const uint8* src_raw, int src_stride_raw, #if defined(HAS_RAWTOYROW_NEON) void (*RAWToUVRow)(const uint8* src_raw, int src_stride_raw, uint8* dst_u, uint8* dst_v, int width) = RAWToUVRow_C; - void (*RAWToYRow)(const uint8* src_raw, uint8* dst_y, int pix) = + void (*RAWToYRow)(const uint8* src_raw, uint8* dst_y, int width) = RAWToYRow_C; #else - void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = RAWToARGBRow_C; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; #endif if (!src_raw || !dst_y || !dst_u || !dst_v || @@ -1024,14 +987,14 @@ int RGB565ToI420(const uint8* src_rgb565, int src_stride_rgb565, #if defined(HAS_RGB565TOYROW_NEON) void (*RGB565ToUVRow)(const uint8* src_rgb565, int src_stride_rgb565, uint8* dst_u, uint8* dst_v, int width) = RGB565ToUVRow_C; - void (*RGB565ToYRow)(const uint8* src_rgb565, uint8* dst_y, int pix) = + void (*RGB565ToYRow)(const uint8* src_rgb565, uint8* dst_y, int width) = RGB565ToYRow_C; #else - void (*RGB565ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*RGB565ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = RGB565ToARGBRow_C; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; #endif if (!src_rgb565 || !dst_y || !dst_u || !dst_v || @@ -1146,14 +1109,14 @@ int ARGB1555ToI420(const uint8* src_argb1555, int src_stride_argb1555, #if defined(HAS_ARGB1555TOYROW_NEON) void (*ARGB1555ToUVRow)(const uint8* src_argb1555, int src_stride_argb1555, uint8* dst_u, uint8* dst_v, int width) = ARGB1555ToUVRow_C; - void (*ARGB1555ToYRow)(const uint8* src_argb1555, uint8* dst_y, int pix) = + void (*ARGB1555ToYRow)(const uint8* src_argb1555, uint8* dst_y, int width) = ARGB1555ToYRow_C; #else - void (*ARGB1555ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*ARGB1555ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = ARGB1555ToARGBRow_C; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; #endif if (!src_argb1555 || !dst_y || !dst_u || !dst_v || @@ -1270,14 +1233,14 @@ int ARGB4444ToI420(const uint8* src_argb4444, int src_stride_argb4444, #if defined(HAS_ARGB4444TOYROW_NEON) void (*ARGB4444ToUVRow)(const uint8* src_argb4444, int src_stride_argb4444, uint8* dst_u, uint8* dst_v, int width) = ARGB4444ToUVRow_C; - void (*ARGB4444ToYRow)(const uint8* src_argb4444, uint8* dst_y, int pix) = + void (*ARGB4444ToYRow)(const uint8* src_argb4444, uint8* dst_y, int width) = ARGB4444ToYRow_C; #else - void (*ARGB4444ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*ARGB4444ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = ARGB4444ToARGBRow_C; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; #endif if (!src_argb4444 || !dst_y || !dst_u || !dst_v || @@ -1383,6 +1346,81 @@ int ARGB4444ToI420(const uint8* src_argb4444, int src_stride_argb4444, return 0; } +static void SplitPixels(const uint8* src_u, int src_pixel_stride_uv, + uint8* dst_u, int width) { + int i; + for (i = 0; i < width; ++i) { + *dst_u = *src_u; + ++dst_u; + src_u += src_pixel_stride_uv; + } +} + +// Convert Android420 to I420. +LIBYUV_API +int Android420ToI420(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + int src_pixel_stride_uv, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height) { + int y; + const int vu_off = src_v - src_u; + int halfwidth = (width + 1) >> 1; + int halfheight = (height + 1) >> 1; + if (!src_u || !src_v || + !dst_u || !dst_v || + width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + halfheight = (height + 1) >> 1; + src_y = src_y + (height - 1) * src_stride_y; + src_u = src_u + (halfheight - 1) * src_stride_u; + src_v = src_v + (halfheight - 1) * src_stride_v; + src_stride_y = -src_stride_y; + src_stride_u = -src_stride_u; + src_stride_v = -src_stride_v; + } + + if (dst_y) { + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + } + + // Copy UV planes as is - I420 + if (src_pixel_stride_uv == 1) { + CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight); + CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight); + return 0; + // Split UV planes - NV21 + } else if (src_pixel_stride_uv == 2 && vu_off == -1 && + src_stride_u == src_stride_v) { + SplitUVPlane(src_v, src_stride_v, dst_v, dst_stride_v, dst_u, dst_stride_u, + halfwidth, halfheight); + return 0; + // Split UV planes - NV12 + } else if (src_pixel_stride_uv == 2 && vu_off == 1 && + src_stride_u == src_stride_v) { + SplitUVPlane(src_u, src_stride_u, dst_u, dst_stride_u, dst_v, dst_stride_v, + halfwidth, halfheight); + return 0; + } + + for (y = 0; y < halfheight; ++y) { + SplitPixels(src_u, src_pixel_stride_uv, dst_u, halfwidth); + SplitPixels(src_v, src_pixel_stride_uv, dst_v, halfwidth); + src_u += src_stride_u; + src_v += src_stride_v; + dst_u += dst_stride_u; + dst_v += dst_stride_v; + } + return 0; +} + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/libs/libvpx/third_party/libyuv/source/convert_argb.cc b/libs/libvpx/third_party/libyuv/source/convert_argb.cc index 44756bc41c..fb9582d627 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_argb.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_argb.cc @@ -14,6 +14,7 @@ #ifdef HAVE_JPEG #include "libyuv/mjpeg_decoder.h" #endif +#include "libyuv/planar_functions.h" // For CopyPlane and ARGBShuffle. #include "libyuv/rotate_argb.h" #include "libyuv/row.h" #include "libyuv/video_common.h" @@ -44,18 +45,347 @@ int ARGBCopy(const uint8* src_argb, int src_stride_argb, return 0; } -// Convert I444 to ARGB. +// Convert I422 to ARGB with matrix +static int I420ToARGBMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, int height) { + int y; + void (*I422ToARGBRow)(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + const struct YuvConstants* yuvconstants, + int width) = I422ToARGBRow_C; + if (!src_y || !src_u || !src_v || !dst_argb || + width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I422TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I422ToARGBRow = I422ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I422ToARGBRow = I422ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I422TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I422ToARGBRow = I422ToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I422TOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I422ToARGBRow = I422ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422ToARGBRow = I422ToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I422TOARGBROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && + IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && + IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && + IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && + IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { + I422ToARGBRow = I422ToARGBRow_DSPR2; + } +#endif + + for (y = 0; y < height; ++y) { + I422ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + if (y & 1) { + src_u += src_stride_u; + src_v += src_stride_v; + } + } + return 0; +} + +// Convert I420 to ARGB. LIBYUV_API -int I444ToARGB(const uint8* src_y, int src_stride_y, +int I420ToARGB(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, uint8* dst_argb, int dst_stride_argb, int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvI601Constants, + width, height); +} + +// Convert I420 to ABGR. +LIBYUV_API +int I420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + +// Convert J420 to ARGB. +LIBYUV_API +int J420ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvJPEGConstants, + width, height); +} + +// Convert J420 to ABGR. +LIBYUV_API +int J420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuJPEGConstants, // Use Yvu matrix + width, height); +} + +// Convert H420 to ARGB. +LIBYUV_API +int H420ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvH709Constants, + width, height); +} + +// Convert H420 to ABGR. +LIBYUV_API +int H420ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I420ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuH709Constants, // Use Yvu matrix + width, height); +} + +// Convert I422 to ARGB with matrix +static int I422ToARGBMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, int height) { + int y; + void (*I422ToARGBRow)(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + const struct YuvConstants* yuvconstants, + int width) = I422ToARGBRow_C; + if (!src_y || !src_u || !src_v || + !dst_argb || + width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } + // Coalesce rows. + if (src_stride_y == width && + src_stride_u * 2 == width && + src_stride_v * 2 == width && + dst_stride_argb == width * 4) { + width *= height; + height = 1; + src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0; + } +#if defined(HAS_I422TOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I422ToARGBRow = I422ToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I422ToARGBRow = I422ToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I422TOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I422ToARGBRow = I422ToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I422ToARGBRow = I422ToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I422TOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I422ToARGBRow = I422ToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422ToARGBRow = I422ToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I422TOARGBROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && + IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && + IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && + IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && + IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { + I422ToARGBRow = I422ToARGBRow_DSPR2; + } +#endif + + for (y = 0; y < height; ++y) { + I422ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); + dst_argb += dst_stride_argb; + src_y += src_stride_y; + src_u += src_stride_u; + src_v += src_stride_v; + } + return 0; +} + +// Convert I422 to ARGB. +LIBYUV_API +int I422ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvI601Constants, + width, height); +} + +// Convert I422 to ABGR. +LIBYUV_API +int I422ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + +// Convert J422 to ARGB. +LIBYUV_API +int J422ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvJPEGConstants, + width, height); +} + +// Convert J422 to ABGR. +LIBYUV_API +int J422ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuJPEGConstants, // Use Yvu matrix + width, height); +} + +// Convert H422 to ARGB. +LIBYUV_API +int H422ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvH709Constants, + width, height); +} + +// Convert H422 to ABGR. +LIBYUV_API +int H422ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I422ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuH709Constants, // Use Yvu matrix + width, height); +} + +// Convert I444 to ARGB with matrix +static int I444ToARGBMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, int height) { int y; void (*I444ToARGBRow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I444ToARGBRow_C; if (!src_y || !src_u || !src_v || !dst_argb || @@ -103,7 +433,7 @@ int I444ToARGB(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I444ToARGBRow(src_y, src_u, src_v, dst_argb, width); + I444ToARGBRow(src_y, src_u, src_v, dst_argb, yuvconstants, width); dst_argb += dst_stride_argb; src_y += src_stride_y; src_u += src_stride_u; @@ -112,81 +442,49 @@ int I444ToARGB(const uint8* src_y, int src_stride_y, return 0; } -// Convert I422 to ARGB. +// Convert I444 to ARGB. LIBYUV_API -int I422ToARGB(const uint8* src_y, int src_stride_y, +int I444ToARGB(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, const uint8* src_v, int src_stride_v, uint8* dst_argb, int dst_stride_argb, int width, int height) { - int y; - void (*I422ToARGBRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToARGBRow_C; - if (!src_y || !src_u || !src_v || - !dst_argb || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_argb = dst_argb + (height - 1) * dst_stride_argb; - dst_stride_argb = -dst_stride_argb; - } - // Coalesce rows. - if (src_stride_y == width && - src_stride_u * 2 == width && - src_stride_v * 2 == width && - dst_stride_argb == width * 4) { - width *= height; - height = 1; - src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0; - } -#if defined(HAS_I422TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToARGBRow = I422ToARGBRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToARGBRow = I422ToARGBRow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOARGBROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToARGBRow = I422ToARGBRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToARGBRow = I422ToARGBRow_AVX2; - } - } -#endif -#if defined(HAS_I422TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToARGBRow = I422ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToARGBRow = I422ToARGBRow_NEON; - } - } -#endif -#if defined(HAS_I422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; - } -#endif + return I444ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvI601Constants, + width, height); +} - for (y = 0; y < height; ++y) { - I422ToARGBRow(src_y, src_u, src_v, dst_argb, width); - dst_argb += dst_stride_argb; - src_y += src_stride_y; - src_u += src_stride_u; - src_v += src_stride_v; - } - return 0; +// Convert I444 to ABGR. +LIBYUV_API +int I444ToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height) { + return I444ToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_abgr, dst_stride_abgr, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + +// Convert J444 to ARGB. +LIBYUV_API +int J444ToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_argb, int dst_stride_argb, + int width, int height) { + return I444ToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_argb, dst_stride_argb, + &kYuvJPEGConstants, + width, height); } // Convert I411 to ARGB. @@ -201,6 +499,7 @@ int I411ToARGB(const uint8* src_y, int src_stride_y, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I411ToARGBRow_C; if (!src_y || !src_u || !src_v || !dst_argb || @@ -248,7 +547,7 @@ int I411ToARGB(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I411ToARGBRow(src_y, src_u, src_v, dst_argb, width); + I411ToARGBRow(src_y, src_u, src_v, dst_argb, &kYuvI601Constants, width); dst_argb += dst_stride_argb; src_y += src_stride_y; src_u += src_stride_u; @@ -257,6 +556,143 @@ int I411ToARGB(const uint8* src_y, int src_stride_y, return 0; } +// Convert I420 with Alpha to preattenuated ARGB. +static int I420AlphaToARGBMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + const uint8* src_a, int src_stride_a, + uint8* dst_argb, int dst_stride_argb, + const struct YuvConstants* yuvconstants, + int width, int height, int attenuate) { + int y; + void (*I422AlphaToARGBRow)(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = I422AlphaToARGBRow_C; + void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb, + int width) = ARGBAttenuateRow_C; + if (!src_y || !src_u || !src_v || !dst_argb || + width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_argb = dst_argb + (height - 1) * dst_stride_argb; + dst_stride_argb = -dst_stride_argb; + } +#if defined(HAS_I422ALPHATOARGBROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_SSSE3; + } + } +#endif +#if defined(HAS_I422ALPHATOARGBROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_AVX2; + } + } +#endif +#if defined(HAS_I422ALPHATOARGBROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_NEON; + } + } +#endif +#if defined(HAS_I422ALPHATOARGBROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && + IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && + IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && + IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && + IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { + I422AlphaToARGBRow = I422AlphaToARGBRow_DSPR2; + } +#endif +#if defined(HAS_ARGBATTENUATEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; + if (IS_ALIGNED(width, 4)) { + ARGBAttenuateRow = ARGBAttenuateRow_SSSE3; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_AVX2; + } + } +#endif +#if defined(HAS_ARGBATTENUATEROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + ARGBAttenuateRow = ARGBAttenuateRow_NEON; + } + } +#endif + + for (y = 0; y < height; ++y) { + I422AlphaToARGBRow(src_y, src_u, src_v, src_a, dst_argb, yuvconstants, + width); + if (attenuate) { + ARGBAttenuateRow(dst_argb, dst_argb, width); + } + dst_argb += dst_stride_argb; + src_a += src_stride_a; + src_y += src_stride_y; + if (y & 1) { + src_u += src_stride_u; + src_v += src_stride_v; + } + } + return 0; +} + +// Convert I420 with Alpha to ARGB. +LIBYUV_API +int I420AlphaToARGB(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + const uint8* src_a, int src_stride_a, + uint8* dst_argb, int dst_stride_argb, + int width, int height, int attenuate) { + return I420AlphaToARGBMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + src_a, src_stride_a, + dst_argb, dst_stride_argb, + &kYuvI601Constants, + width, height, attenuate); +} + +// Convert I420 with Alpha to ABGR. +LIBYUV_API +int I420AlphaToABGR(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + const uint8* src_a, int src_stride_a, + uint8* dst_abgr, int dst_stride_abgr, + int width, int height, int attenuate) { + return I420AlphaToARGBMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + src_a, src_stride_a, + dst_abgr, dst_stride_abgr, + &kYvuI601Constants, // Use Yvu matrix + width, height, attenuate); +} + // Convert I400 to ARGB. LIBYUV_API int I400ToARGB(const uint8* src_y, int src_stride_y, @@ -322,7 +758,7 @@ int J400ToARGB(const uint8* src_y, int src_stride_y, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*J400ToARGBRow)(const uint8* src_y, uint8* dst_argb, int pix) = + void (*J400ToARGBRow)(const uint8* src_y, uint8* dst_argb, int width) = J400ToARGBRow_C; if (!src_y || !dst_argb || width <= 0 || height == 0) { @@ -449,7 +885,7 @@ int RGB24ToARGB(const uint8* src_rgb24, int src_stride_rgb24, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = RGB24ToARGBRow_C; if (!src_rgb24 || !dst_argb || width <= 0 || height == 0) { @@ -499,7 +935,7 @@ int RAWToARGB(const uint8* src_raw, int src_stride_raw, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) = + void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int width) = RAWToARGBRow_C; if (!src_raw || !dst_argb || width <= 0 || height == 0) { @@ -549,7 +985,7 @@ int RGB565ToARGB(const uint8* src_rgb565, int src_stride_rgb565, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*RGB565ToARGBRow)(const uint8* src_rgb565, uint8* dst_argb, int pix) = + void (*RGB565ToARGBRow)(const uint8* src_rgb565, uint8* dst_argb, int width) = RGB565ToARGBRow_C; if (!src_rgb565 || !dst_argb || width <= 0 || height == 0) { @@ -608,7 +1044,7 @@ int ARGB1555ToARGB(const uint8* src_argb1555, int src_stride_argb1555, int width, int height) { int y; void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb, - int pix) = ARGB1555ToARGBRow_C; + int width) = ARGB1555ToARGBRow_C; if (!src_argb1555 || !dst_argb || width <= 0 || height == 0) { return -1; @@ -666,7 +1102,7 @@ int ARGB4444ToARGB(const uint8* src_argb4444, int src_stride_argb4444, int width, int height) { int y; void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb, - int pix) = ARGB4444ToARGBRow_C; + int width) = ARGB4444ToARGBRow_C; if (!src_argb4444 || !dst_argb || width <= 0 || height == 0) { return -1; @@ -727,6 +1163,7 @@ int NV12ToARGB(const uint8* src_y, int src_stride_y, void (*NV12ToARGBRow)(const uint8* y_buf, const uint8* uv_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = NV12ToARGBRow_C; if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { @@ -764,7 +1201,7 @@ int NV12ToARGB(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - NV12ToARGBRow(src_y, src_uv, dst_argb, width); + NV12ToARGBRow(src_y, src_uv, dst_argb, &kYuvI601Constants, width); dst_argb += dst_stride_argb; src_y += src_stride_y; if (y & 1) { @@ -784,6 +1221,7 @@ int NV21ToARGB(const uint8* src_y, int src_stride_y, void (*NV21ToARGBRow)(const uint8* y_buf, const uint8* uv_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = NV21ToARGBRow_C; if (!src_y || !src_uv || !dst_argb || width <= 0 || height == 0) { @@ -821,7 +1259,7 @@ int NV21ToARGB(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - NV21ToARGBRow(src_y, src_uv, dst_argb, width); + NV21ToARGBRow(src_y, src_uv, dst_argb, &kYuvI601Constants, width); dst_argb += dst_stride_argb; src_y += src_stride_y; if (y & 1) { @@ -840,6 +1278,7 @@ int M420ToARGB(const uint8* src_m420, int src_stride_m420, void (*NV12ToARGBRow)(const uint8* y_buf, const uint8* uv_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = NV12ToARGBRow_C; if (!src_m420 || !dst_argb || width <= 0 || height == 0) { @@ -877,14 +1316,16 @@ int M420ToARGB(const uint8* src_m420, int src_stride_m420, #endif for (y = 0; y < height - 1; y += 2) { - NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, width); + NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, + &kYuvI601Constants, width); NV12ToARGBRow(src_m420 + src_stride_m420, src_m420 + src_stride_m420 * 2, - dst_argb + dst_stride_argb, width); + dst_argb + dst_stride_argb, &kYuvI601Constants, width); dst_argb += dst_stride_argb * 2; src_m420 += src_stride_m420 * 3; } if (height & 1) { - NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, width); + NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, + &kYuvI601Constants, width); } return 0; } @@ -895,7 +1336,10 @@ int YUY2ToARGB(const uint8* src_yuy2, int src_stride_yuy2, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*YUY2ToARGBRow)(const uint8* src_yuy2, uint8* dst_argb, int pix) = + void (*YUY2ToARGBRow)(const uint8* src_yuy2, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = YUY2ToARGBRow_C; if (!src_yuy2 || !dst_argb || width <= 0 || height == 0) { @@ -939,7 +1383,7 @@ int YUY2ToARGB(const uint8* src_yuy2, int src_stride_yuy2, } #endif for (y = 0; y < height; ++y) { - YUY2ToARGBRow(src_yuy2, dst_argb, width); + YUY2ToARGBRow(src_yuy2, dst_argb, &kYuvI601Constants, width); src_yuy2 += src_stride_yuy2; dst_argb += dst_stride_argb; } @@ -952,7 +1396,10 @@ int UYVYToARGB(const uint8* src_uyvy, int src_stride_uyvy, uint8* dst_argb, int dst_stride_argb, int width, int height) { int y; - void (*UYVYToARGBRow)(const uint8* src_uyvy, uint8* dst_argb, int pix) = + void (*UYVYToARGBRow)(const uint8* src_uyvy, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) = UYVYToARGBRow_C; if (!src_uyvy || !dst_argb || width <= 0 || height == 0) { @@ -996,159 +1443,13 @@ int UYVYToARGB(const uint8* src_uyvy, int src_stride_uyvy, } #endif for (y = 0; y < height; ++y) { - UYVYToARGBRow(src_uyvy, dst_argb, width); + UYVYToARGBRow(src_uyvy, dst_argb, &kYuvI601Constants, width); src_uyvy += src_stride_uyvy; dst_argb += dst_stride_argb; } return 0; } -// Convert J420 to ARGB. -LIBYUV_API -int J420ToARGB(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_argb, int dst_stride_argb, - int width, int height) { - int y; - void (*J422ToARGBRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = J422ToARGBRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_argb = dst_argb + (height - 1) * dst_stride_argb; - dst_stride_argb = -dst_stride_argb; - } -#if defined(HAS_J422TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - J422ToARGBRow = J422ToARGBRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - J422ToARGBRow = J422ToARGBRow_SSSE3; - } - } -#endif -#if defined(HAS_J422TOARGBROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - J422ToARGBRow = J422ToARGBRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - J422ToARGBRow = J422ToARGBRow_AVX2; - } - } -#endif -#if defined(HAS_J422TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - J422ToARGBRow = J422ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - J422ToARGBRow = J422ToARGBRow_NEON; - } - } -#endif -#if defined(HAS_J422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - J422ToARGBRow = J422ToARGBRow_MIPS_DSPR2; - } -#endif - - for (y = 0; y < height; ++y) { - J422ToARGBRow(src_y, src_u, src_v, dst_argb, width); - dst_argb += dst_stride_argb; - src_y += src_stride_y; - if (y & 1) { - src_u += src_stride_u; - src_v += src_stride_v; - } - } - return 0; -} - -// Convert J422 to ARGB. -LIBYUV_API -int J422ToARGB(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_argb, int dst_stride_argb, - int width, int height) { - int y; - void (*J422ToARGBRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = J422ToARGBRow_C; - if (!src_y || !src_u || !src_v || - !dst_argb || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_argb = dst_argb + (height - 1) * dst_stride_argb; - dst_stride_argb = -dst_stride_argb; - } - // Coalesce rows. - if (src_stride_y == width && - src_stride_u * 2 == width && - src_stride_v * 2 == width && - dst_stride_argb == width * 4) { - width *= height; - height = 1; - src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0; - } -#if defined(HAS_J422TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - J422ToARGBRow = J422ToARGBRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - J422ToARGBRow = J422ToARGBRow_SSSE3; - } - } -#endif -#if defined(HAS_J422TOARGBROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - J422ToARGBRow = J422ToARGBRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - J422ToARGBRow = J422ToARGBRow_AVX2; - } - } -#endif -#if defined(HAS_J422TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - J422ToARGBRow = J422ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - J422ToARGBRow = J422ToARGBRow_NEON; - } - } -#endif -#if defined(HAS_J422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - J422ToARGBRow = J422ToARGBRow_MIPS_DSPR2; - } -#endif - - for (y = 0; y < height; ++y) { - J422ToARGBRow(src_y, src_u, src_v, dst_argb, width); - dst_argb += dst_stride_argb; - src_y += src_stride_y; - src_u += src_stride_u; - src_v += src_stride_v; - } - return 0; -} - #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/libs/libvpx/third_party/libyuv/source/convert_from.cc b/libs/libvpx/third_party/libyuv/source/convert_from.cc index 31f1ac992a..3b2dca8163 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_from.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_from.cc @@ -46,9 +46,11 @@ static int I420ToI4xx(const uint8* src_y, int src_stride_y, dst_uv_width <= 0 || dst_uv_height <= 0) { return -1; } - ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, - dst_y, dst_stride_y, dst_y_width, dst_y_height, - kFilterBilinear); + if (dst_y) { + ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, + dst_y, dst_stride_y, dst_y_width, dst_y_height, + kFilterBilinear); + } ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u, dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear); @@ -359,6 +361,7 @@ int I420ToUYVY(const uint8* src_y, int src_stride_y, return 0; } +// TODO(fbarchard): test negative height for invert. LIBYUV_API int I420ToNV12(const uint8* src_y, int src_stride_y, const uint8* src_u, int src_stride_u, @@ -366,72 +369,19 @@ int I420ToNV12(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, uint8* dst_uv, int dst_stride_uv, int width, int height) { - int y; - void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, - int width) = MergeUVRow_C; - // Coalesce rows. - int halfwidth = (width + 1) >> 1; - int halfheight = (height + 1) >> 1; if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 || height == 0) { return -1; } - // Negative height means invert the image. - if (height < 0) { - height = -height; - halfheight = (height + 1) >> 1; - dst_y = dst_y + (height - 1) * dst_stride_y; - dst_uv = dst_uv + (halfheight - 1) * dst_stride_uv; - dst_stride_y = -dst_stride_y; - dst_stride_uv = -dst_stride_uv; - } - if (src_stride_y == width && - dst_stride_y == width) { - width *= height; - height = 1; - src_stride_y = dst_stride_y = 0; - } - // Coalesce rows. - if (src_stride_u == halfwidth && - src_stride_v == halfwidth && - dst_stride_uv == halfwidth * 2) { - halfwidth *= halfheight; - halfheight = 1; - src_stride_u = src_stride_v = dst_stride_uv = 0; - } -#if defined(HAS_MERGEUVROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - MergeUVRow_ = MergeUVRow_Any_SSE2; - if (IS_ALIGNED(halfwidth, 16)) { - MergeUVRow_ = MergeUVRow_SSE2; - } - } -#endif -#if defined(HAS_MERGEUVROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - MergeUVRow_ = MergeUVRow_Any_AVX2; - if (IS_ALIGNED(halfwidth, 32)) { - MergeUVRow_ = MergeUVRow_AVX2; - } - } -#endif -#if defined(HAS_MERGEUVROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - MergeUVRow_ = MergeUVRow_Any_NEON; - if (IS_ALIGNED(halfwidth, 16)) { - MergeUVRow_ = MergeUVRow_NEON; - } - } -#endif - - CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); - for (y = 0; y < halfheight; ++y) { - // Merge a row of U and V into a row of UV. - MergeUVRow_(src_u, src_v, dst_uv, halfwidth); - src_u += src_stride_u; - src_v += src_stride_v; - dst_uv += dst_stride_uv; + int halfwidth = (width + 1) / 2; + int halfheight = height > 0 ? (height + 1) / 2 : (height - 1) / 2; + if (dst_y) { + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); } + MergeUVPlane(src_u, src_stride_u, + src_v, src_stride_v, + dst_uv, dst_stride_uv, + halfwidth, halfheight); return 0; } @@ -445,221 +395,24 @@ int I420ToNV21(const uint8* src_y, int src_stride_y, return I420ToNV12(src_y, src_stride_y, src_v, src_stride_v, src_u, src_stride_u, - dst_y, src_stride_y, + dst_y, dst_stride_y, dst_vu, dst_stride_vu, width, height); } -// Convert I420 to ARGB. -LIBYUV_API -int I420ToARGB(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_argb, int dst_stride_argb, - int width, int height) { - int y; - void (*I422ToARGBRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToARGBRow_C; - if (!src_y || !src_u || !src_v || !dst_argb || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_argb = dst_argb + (height - 1) * dst_stride_argb; - dst_stride_argb = -dst_stride_argb; - } -#if defined(HAS_I422TOARGBROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToARGBRow = I422ToARGBRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToARGBRow = I422ToARGBRow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOARGBROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToARGBRow = I422ToARGBRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToARGBRow = I422ToARGBRow_AVX2; - } - } -#endif -#if defined(HAS_I422TOARGBROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToARGBRow = I422ToARGBRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToARGBRow = I422ToARGBRow_NEON; - } - } -#endif -#if defined(HAS_I422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; - } -#endif - - for (y = 0; y < height; ++y) { - I422ToARGBRow(src_y, src_u, src_v, dst_argb, width); - dst_argb += dst_stride_argb; - src_y += src_stride_y; - if (y & 1) { - src_u += src_stride_u; - src_v += src_stride_v; - } - } - return 0; -} - -// Convert I420 to BGRA. -LIBYUV_API -int I420ToBGRA(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_bgra, int dst_stride_bgra, - int width, int height) { - int y; - void (*I422ToBGRARow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToBGRARow_C; - if (!src_y || !src_u || !src_v || !dst_bgra || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_bgra = dst_bgra + (height - 1) * dst_stride_bgra; - dst_stride_bgra = -dst_stride_bgra; - } -#if defined(HAS_I422TOBGRAROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToBGRARow = I422ToBGRARow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToBGRARow = I422ToBGRARow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToBGRARow = I422ToBGRARow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToBGRARow = I422ToBGRARow_AVX2; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToBGRARow = I422ToBGRARow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToBGRARow = I422ToBGRARow_NEON; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_bgra, 4) && IS_ALIGNED(dst_stride_bgra, 4)) { - I422ToBGRARow = I422ToBGRARow_MIPS_DSPR2; - } -#endif - - for (y = 0; y < height; ++y) { - I422ToBGRARow(src_y, src_u, src_v, dst_bgra, width); - dst_bgra += dst_stride_bgra; - src_y += src_stride_y; - if (y & 1) { - src_u += src_stride_u; - src_v += src_stride_v; - } - } - return 0; -} - -// Convert I420 to ABGR. -LIBYUV_API -int I420ToABGR(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_abgr, int dst_stride_abgr, - int width, int height) { - int y; - void (*I422ToABGRRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToABGRRow_C; - if (!src_y || !src_u || !src_v || !dst_abgr || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr; - dst_stride_abgr = -dst_stride_abgr; - } -#if defined(HAS_I422TOABGRROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToABGRRow = I422ToABGRRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToABGRRow = I422ToABGRRow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOABGRROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToABGRRow = I422ToABGRRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToABGRRow = I422ToABGRRow_AVX2; - } - } -#endif -#if defined(HAS_I422TOABGRROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToABGRRow = I422ToABGRRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToABGRRow = I422ToABGRRow_NEON; - } - } -#endif - - for (y = 0; y < height; ++y) { - I422ToABGRRow(src_y, src_u, src_v, dst_abgr, width); - dst_abgr += dst_stride_abgr; - src_y += src_stride_y; - if (y & 1) { - src_u += src_stride_u; - src_v += src_stride_v; - } - } - return 0; -} - -// Convert I420 to RGBA. -LIBYUV_API -int I420ToRGBA(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_rgba, int dst_stride_rgba, - int width, int height) { +// Convert I422 to RGBA with matrix +static int I420ToRGBAMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgba, int dst_stride_rgba, + const struct YuvConstants* yuvconstants, + int width, int height) { int y; void (*I422ToRGBARow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToRGBARow_C; if (!src_y || !src_u || !src_v || !dst_rgba || width <= 0 || height == 0) { @@ -695,9 +448,18 @@ int I420ToRGBA(const uint8* src_y, int src_stride_y, } } #endif +#if defined(HAS_I422TORGBAROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && + IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && + IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && + IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && + IS_ALIGNED(dst_rgba, 4) && IS_ALIGNED(dst_stride_rgba, 4)) { + I422ToRGBARow = I422ToRGBARow_DSPR2; + } +#endif for (y = 0; y < height; ++y) { - I422ToRGBARow(src_y, src_u, src_v, dst_rgba, width); + I422ToRGBARow(src_y, src_u, src_v, dst_rgba, yuvconstants, width); dst_rgba += dst_stride_rgba; src_y += src_stride_y; if (y & 1) { @@ -708,18 +470,49 @@ int I420ToRGBA(const uint8* src_y, int src_stride_y, return 0; } -// Convert I420 to RGB24. +// Convert I420 to RGBA. LIBYUV_API -int I420ToRGB24(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_rgb24, int dst_stride_rgb24, - int width, int height) { +int I420ToRGBA(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgba, int dst_stride_rgba, + int width, int height) { + return I420ToRGBAMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_rgba, dst_stride_rgba, + &kYuvI601Constants, + width, height); +} + +// Convert I420 to BGRA. +LIBYUV_API +int I420ToBGRA(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_bgra, int dst_stride_bgra, + int width, int height) { + return I420ToRGBAMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_bgra, dst_stride_bgra, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + +// Convert I420 to RGB24 with matrix +static int I420ToRGB24Matrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgb24, int dst_stride_rgb24, + const struct YuvConstants* yuvconstants, + int width, int height) { int y; void (*I422ToRGB24Row)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToRGB24Row_C; if (!src_y || !src_u || !src_v || !dst_rgb24 || width <= 0 || height == 0) { @@ -757,7 +550,7 @@ int I420ToRGB24(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I422ToRGB24Row(src_y, src_u, src_v, dst_rgb24, width); + I422ToRGB24Row(src_y, src_u, src_v, dst_rgb24, yuvconstants, width); dst_rgb24 += dst_stride_rgb24; src_y += src_stride_y; if (y & 1) { @@ -768,64 +561,34 @@ int I420ToRGB24(const uint8* src_y, int src_stride_y, return 0; } +// Convert I420 to RGB24. +LIBYUV_API +int I420ToRGB24(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgb24, int dst_stride_rgb24, + int width, int height) { + return I420ToRGB24Matrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_rgb24, dst_stride_rgb24, + &kYuvI601Constants, + width, height); +} + // Convert I420 to RAW. LIBYUV_API int I420ToRAW(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_raw, int dst_stride_raw, - int width, int height) { - int y; - void (*I422ToRAWRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToRAWRow_C; - if (!src_y || !src_u || !src_v || !dst_raw || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_raw = dst_raw + (height - 1) * dst_stride_raw; - dst_stride_raw = -dst_stride_raw; - } -#if defined(HAS_I422TORAWROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToRAWRow = I422ToRAWRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToRAWRow = I422ToRAWRow_SSSE3; - } - } -#endif -#if defined(HAS_I422TORAWROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToRAWRow = I422ToRAWRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToRAWRow = I422ToRAWRow_AVX2; - } - } -#endif -#if defined(HAS_I422TORAWROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToRAWRow = I422ToRAWRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToRAWRow = I422ToRAWRow_NEON; - } - } -#endif - - for (y = 0; y < height; ++y) { - I422ToRAWRow(src_y, src_u, src_v, dst_raw, width); - dst_raw += dst_stride_raw; - src_y += src_stride_y; - if (y & 1) { - src_u += src_stride_u; - src_v += src_stride_v; - } - } - return 0; + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_raw, int dst_stride_raw, + int width, int height) { + return I420ToRGB24Matrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_raw, dst_stride_raw, + &kYvuI601Constants, // Use Yvu matrix + width, height); } // Convert I420 to ARGB1555. @@ -840,6 +603,7 @@ int I420ToARGB1555(const uint8* src_y, int src_stride_y, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToARGB1555Row_C; if (!src_y || !src_u || !src_v || !dst_argb1555 || width <= 0 || height == 0) { @@ -877,7 +641,8 @@ int I420ToARGB1555(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I422ToARGB1555Row(src_y, src_u, src_v, dst_argb1555, width); + I422ToARGB1555Row(src_y, src_u, src_v, dst_argb1555, &kYuvI601Constants, + width); dst_argb1555 += dst_stride_argb1555; src_y += src_stride_y; if (y & 1) { @@ -901,6 +666,7 @@ int I420ToARGB4444(const uint8* src_y, int src_stride_y, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToARGB4444Row_C; if (!src_y || !src_u || !src_v || !dst_argb4444 || width <= 0 || height == 0) { @@ -938,7 +704,8 @@ int I420ToARGB4444(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I422ToARGB4444Row(src_y, src_u, src_v, dst_argb4444, width); + I422ToARGB4444Row(src_y, src_u, src_v, dst_argb4444, &kYuvI601Constants, + width); dst_argb4444 += dst_stride_argb4444; src_y += src_stride_y; if (y & 1) { @@ -961,6 +728,7 @@ int I420ToRGB565(const uint8* src_y, int src_stride_y, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToRGB565Row_C; if (!src_y || !src_u || !src_v || !dst_rgb565 || width <= 0 || height == 0) { @@ -998,7 +766,7 @@ int I420ToRGB565(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, width); + I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, &kYuvI601Constants, width); dst_rgb565 += dst_stride_rgb565; src_y += src_stride_y; if (y & 1) { @@ -1029,9 +797,10 @@ int I420ToRGB565Dither(const uint8* src_y, int src_stride_y, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToARGBRow_C; void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix) = ARGBToRGB565DitherRow_C; + const uint32 dither4, int width) = ARGBToRGB565DitherRow_C; if (!src_y || !src_u || !src_v || !dst_rgb565 || width <= 0 || height == 0) { return -1; @@ -1069,12 +838,12 @@ int I420ToRGB565Dither(const uint8* src_y, int src_stride_y, } } #endif -#if defined(HAS_I422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && +#if defined(HAS_I422TOARGBROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2)) { - I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; + I422ToARGBRow = I422ToARGBRow_DSPR2; } #endif #if defined(HAS_ARGBTORGB565DITHERROW_SSE2) @@ -1105,7 +874,7 @@ int I420ToRGB565Dither(const uint8* src_y, int src_stride_y, // Allocate a row of argb. align_buffer_64(row_argb, width * 4); for (y = 0; y < height; ++y) { - I422ToARGBRow(src_y, src_u, src_v, row_argb, width); + I422ToARGBRow(src_y, src_u, src_v, row_argb, &kYuvI601Constants, width); ARGBToRGB565DitherRow(row_argb, dst_rgb565, *(uint32*)(dither4x4 + ((y & 3) << 2)), width); dst_rgb565 += dst_stride_rgb565; @@ -1258,7 +1027,6 @@ int ConvertFromI420(const uint8* y, int y_stride, // Triplanar formats // TODO(fbarchard): halfstride instead of halfwidth case FOURCC_I420: - case FOURCC_YU12: case FOURCC_YV12: { int halfwidth = (width + 1) / 2; int halfheight = (height + 1) / 2; diff --git a/libs/libvpx/third_party/libyuv/source/convert_from_argb.cc b/libs/libvpx/third_party/libyuv/source/convert_from_argb.cc index 8d1e97aec2..2a8682b7eb 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_from_argb.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_from_argb.cc @@ -28,10 +28,10 @@ int ARGBToI444(const uint8* src_argb, int src_stride_argb, uint8* dst_v, int dst_stride_v, int width, int height) { int y; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUV444Row_C; + int width) = ARGBToUV444Row_C; if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -109,13 +109,16 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb, uint8* dst_v, int dst_stride_v, int width, int height) { int y; - void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUV422Row_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; - if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { + if (!src_argb || + !dst_y || !dst_u || !dst_v || + width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; @@ -130,34 +133,22 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb, height = 1; src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0; } -#if defined(HAS_ARGBTOUV422ROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOUV422ROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUV422Row = ARGBToUV422Row_Any_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) +#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } @@ -170,9 +161,17 @@ int ARGBToI422(const uint8* src_argb, int src_stride_argb, } } #endif +#if defined(HAS_ARGBTOUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToUVRow = ARGBToUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_NEON; + } + } +#endif for (y = 0; y < height; ++y) { - ARGBToUV422Row(src_argb, dst_u, dst_v, width); + ARGBToUVRow(src_argb, 0, dst_u, dst_v, width); ARGBToYRow(src_argb, dst_y, width); src_argb += src_stride_argb; dst_y += dst_stride_y; @@ -191,8 +190,8 @@ int ARGBToI411(const uint8* src_argb, int src_stride_argb, int width, int height) { int y; void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUV411Row_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + int width) = ARGBToUV411Row_C; + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { return -1; @@ -264,7 +263,7 @@ int ARGBToNV12(const uint8* src_argb, int src_stride_argb, int halfwidth = (width + 1) >> 1; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, int width) = MergeUVRow_C; @@ -373,7 +372,7 @@ int ARGBToNV21(const uint8* src_argb, int src_stride_argb, int halfwidth = (width + 1) >> 1; void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, int width) = MergeUVRow_C; @@ -478,9 +477,9 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb, uint8* dst_yuy2, int dst_stride_yuy2, int width, int height) { int y; - void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUV422Row_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_yuy2, int width) = I422ToYUY2Row_C; @@ -502,34 +501,22 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb, height = 1; src_stride_argb = dst_stride_yuy2 = 0; } -#if defined(HAS_ARGBTOUV422ROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOUV422ROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUV422Row = ARGBToUV422Row_Any_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) +#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } @@ -542,7 +529,14 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb, } } #endif - +#if defined(HAS_ARGBTOUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToUVRow = ARGBToUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_NEON; + } + } +#endif #if defined(HAS_I422TOYUY2ROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { I422ToYUY2Row = I422ToYUY2Row_Any_SSE2; @@ -567,7 +561,7 @@ int ARGBToYUY2(const uint8* src_argb, int src_stride_argb, uint8* row_v = row_u + ((width + 63) & ~63) / 2; for (y = 0; y < height; ++y) { - ARGBToUV422Row(src_argb, row_u, row_v, width); + ARGBToUVRow(src_argb, 0, row_u, row_v, width); ARGBToYRow(src_argb, row_y, width); I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width); src_argb += src_stride_argb; @@ -585,9 +579,9 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb, uint8* dst_uyvy, int dst_stride_uyvy, int width, int height) { int y; - void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUV422Row_C; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C; + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_uyvy, int width) = I422ToUYVYRow_C; @@ -609,34 +603,22 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb, height = 1; src_stride_argb = dst_stride_uyvy = 0; } -#if defined(HAS_ARGBTOUV422ROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOUV422ROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUV422Row = ARGBToUV422Row_Any_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUV422Row = ARGBToUV422Row_NEON; - } - } -#endif -#if defined(HAS_ARGBTOYROW_SSSE3) +#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVRow = ARGBToUVRow_Any_SSSE3; ARGBToYRow = ARGBToYRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_SSSE3; ARGBToYRow = ARGBToYRow_SSSE3; } } #endif -#if defined(HAS_ARGBTOYROW_AVX2) +#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { + ARGBToUVRow = ARGBToUVRow_Any_AVX2; ARGBToYRow = ARGBToYRow_Any_AVX2; if (IS_ALIGNED(width, 32)) { + ARGBToUVRow = ARGBToUVRow_AVX2; ARGBToYRow = ARGBToYRow_AVX2; } } @@ -649,7 +631,14 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb, } } #endif - +#if defined(HAS_ARGBTOUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToUVRow = ARGBToUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToUVRow = ARGBToUVRow_NEON; + } + } +#endif #if defined(HAS_I422TOUYVYROW_SSE2) if (TestCpuFlag(kCpuHasSSE2)) { I422ToUYVYRow = I422ToUYVYRow_Any_SSE2; @@ -674,7 +663,7 @@ int ARGBToUYVY(const uint8* src_argb, int src_stride_argb, uint8* row_v = row_u + ((width + 63) & ~63) / 2; for (y = 0; y < height; ++y) { - ARGBToUV422Row(src_argb, row_u, row_v, width); + ARGBToUVRow(src_argb, 0, row_u, row_v, width); ARGBToYRow(src_argb, row_y, width); I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width); src_argb += src_stride_argb; @@ -692,7 +681,7 @@ int ARGBToI400(const uint8* src_argb, int src_stride_argb, uint8* dst_y, int dst_stride_y, int width, int height) { int y; - void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) = ARGBToYRow_C; if (!src_argb || !dst_y || width <= 0 || height == 0) { return -1; @@ -764,7 +753,7 @@ int ARGBToRGB24(const uint8* src_argb, int src_stride_argb, uint8* dst_rgb24, int dst_stride_rgb24, int width, int height) { int y; - void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = + void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int width) = ARGBToRGB24Row_C; if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) { return -1; @@ -812,7 +801,7 @@ int ARGBToRAW(const uint8* src_argb, int src_stride_argb, uint8* dst_raw, int dst_stride_raw, int width, int height) { int y; - void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int pix) = + void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int width) = ARGBToRAWRow_C; if (!src_argb || !dst_raw || width <= 0 || height == 0) { return -1; @@ -869,7 +858,7 @@ int ARGBToRGB565Dither(const uint8* src_argb, int src_stride_argb, const uint8* dither4x4, int width, int height) { int y; void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix) = ARGBToRGB565DitherRow_C; + const uint32 dither4, int width) = ARGBToRGB565DitherRow_C; if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) { return -1; } @@ -921,7 +910,7 @@ int ARGBToRGB565(const uint8* src_argb, int src_stride_argb, uint8* dst_rgb565, int dst_stride_rgb565, int width, int height) { int y; - void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = + void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int width) = ARGBToRGB565Row_C; if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) { return -1; @@ -977,7 +966,7 @@ int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb, uint8* dst_argb1555, int dst_stride_argb1555, int width, int height) { int y; - void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = + void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int width) = ARGBToARGB1555Row_C; if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) { return -1; @@ -1033,7 +1022,7 @@ int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb, uint8* dst_argb4444, int dst_stride_argb4444, int width, int height) { int y; - void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int pix) = + void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int width) = ARGBToARGB4444Row_C; if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) { return -1; @@ -1093,7 +1082,7 @@ int ARGBToJ420(const uint8* src_argb, int src_stride_argb, int y; void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C; - void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) = + void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) = ARGBToYJRow_C; if (!src_argb || !dst_yj || !dst_u || !dst_v || @@ -1157,21 +1146,24 @@ int ARGBToJ420(const uint8* src_argb, int src_stride_argb, return 0; } -// ARGB little endian (bgra in memory) to J422 +// Convert ARGB to J422. (JPeg full range I422). LIBYUV_API int ARGBToJ422(const uint8* src_argb, int src_stride_argb, - uint8* dst_y, int dst_stride_y, + uint8* dst_yj, int dst_stride_yj, uint8* dst_u, int dst_stride_u, uint8* dst_v, int dst_stride_v, int width, int height) { int y; - void (*ARGBToUVJ422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) = ARGBToUVJ422Row_C; - void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_y, int pix) = + void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C; + void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) = ARGBToYJRow_C; - if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { + if (!src_argb || + !dst_yj || !dst_u || !dst_v || + width <= 0 || height == 0) { return -1; } + // Negative height means invert the image. if (height < 0) { height = -height; src_argb = src_argb + (height - 1) * src_stride_argb; @@ -1179,34 +1171,19 @@ int ARGBToJ422(const uint8* src_argb, int src_stride_argb, } // Coalesce rows. if (src_stride_argb == width * 4 && - dst_stride_y == width && + dst_stride_yj == width && dst_stride_u * 2 == width && dst_stride_v * 2 == width) { width *= height; height = 1; - src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0; + src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0; } -#if defined(HAS_ARGBTOUVJ422ROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - ARGBToUVJ422Row = ARGBToUVJ422Row_Any_SSSE3; - if (IS_ALIGNED(width, 16)) { - ARGBToUVJ422Row = ARGBToUVJ422Row_SSSE3; - } - } -#endif -#if defined(HAS_ARGBTOUVJ422ROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - ARGBToUVJ422Row = ARGBToUVJ422Row_Any_NEON; - if (IS_ALIGNED(width, 16)) { - ARGBToUVJ422Row = ARGBToUVJ422Row_NEON; - } - } -#endif - -#if defined(HAS_ARGBTOYJROW_SSSE3) +#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { + ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3; ARGBToYJRow = ARGBToYJRow_Any_SSSE3; if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_SSSE3; ARGBToYJRow = ARGBToYJRow_SSSE3; } } @@ -1227,12 +1204,20 @@ int ARGBToJ422(const uint8* src_argb, int src_stride_argb, } } #endif +#if defined(HAS_ARGBTOUVJROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBToUVJRow = ARGBToUVJRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + ARGBToUVJRow = ARGBToUVJRow_NEON; + } + } +#endif for (y = 0; y < height; ++y) { - ARGBToUVJ422Row(src_argb, dst_u, dst_v, width); - ARGBToYJRow(src_argb, dst_y, width); + ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width); + ARGBToYJRow(src_argb, dst_yj, width); src_argb += src_stride_argb; - dst_y += dst_stride_y; + dst_yj += dst_stride_yj; dst_u += dst_stride_u; dst_v += dst_stride_v; } @@ -1245,7 +1230,7 @@ int ARGBToJ400(const uint8* src_argb, int src_stride_argb, uint8* dst_yj, int dst_stride_yj, int width, int height) { int y; - void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) = + void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) = ARGBToYJRow_C; if (!src_argb || !dst_yj || width <= 0 || height == 0) { return -1; diff --git a/libs/libvpx/third_party/libyuv/source/convert_jpeg.cc b/libs/libvpx/third_party/libyuv/source/convert_jpeg.cc index bcb980f7f1..90f550a26a 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_jpeg.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_jpeg.cc @@ -9,6 +9,7 @@ */ #include "libyuv/convert.h" +#include "libyuv/convert_argb.h" #ifdef HAVE_JPEG #include "libyuv/mjpeg_decoder.h" diff --git a/libs/libvpx/third_party/libyuv/source/convert_to_argb.cc b/libs/libvpx/third_party/libyuv/source/convert_to_argb.cc index af829fbd32..aecdc80fde 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_to_argb.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_to_argb.cc @@ -23,7 +23,7 @@ namespace libyuv { extern "C" { #endif -// Convert camera sample to I420 with cropping, rotation and vertical flip. +// Convert camera sample to ARGB with cropping, rotation and vertical flip. // src_width is used for source stride computation // src_height is used to compute location of planes, and indicate inversion // sample_size is measured in bytes and is the size of the frame. @@ -51,8 +51,8 @@ int ConvertToARGB(const uint8* sample, size_t sample_size, // also enable temporary buffer. LIBYUV_BOOL need_buf = (rotation && format != FOURCC_ARGB) || crop_argb == sample; - uint8* tmp_argb = crop_argb; - int tmp_argb_stride = argb_stride; + uint8* dest_argb = crop_argb; + int dest_argb_stride = argb_stride; uint8* rotate_buffer = NULL; int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height; @@ -66,13 +66,13 @@ int ConvertToARGB(const uint8* sample, size_t sample_size, } if (need_buf) { - int argb_size = crop_width * abs_crop_height * 4; + int argb_size = crop_width * 4 * abs_crop_height; rotate_buffer = (uint8*)malloc(argb_size); if (!rotate_buffer) { return 1; // Out of memory runtime error. } crop_argb = rotate_buffer; - argb_stride = crop_width; + argb_stride = crop_width * 4; } switch (format) { @@ -176,7 +176,6 @@ int ConvertToARGB(const uint8* sample, size_t sample_size, break; // Triplanar formats case FOURCC_I420: - case FOURCC_YU12: case FOURCC_YV12: { const uint8* src_y = sample + (src_width * crop_y + crop_x); const uint8* src_u; @@ -291,7 +290,7 @@ int ConvertToARGB(const uint8* sample, size_t sample_size, if (need_buf) { if (!r) { r = ARGBRotate(crop_argb, argb_stride, - tmp_argb, tmp_argb_stride, + dest_argb, dest_argb_stride, crop_width, abs_crop_height, rotation); } free(rotate_buffer); diff --git a/libs/libvpx/third_party/libyuv/source/convert_to_i420.cc b/libs/libvpx/third_party/libyuv/source/convert_to_i420.cc index 5e75369b55..e5f307c446 100644 --- a/libs/libvpx/third_party/libyuv/source/convert_to_i420.cc +++ b/libs/libvpx/third_party/libyuv/source/convert_to_i420.cc @@ -39,12 +39,13 @@ int ConvertToI420(const uint8* sample, int aligned_src_width = (src_width + 1) & ~1; const uint8* src; const uint8* src_uv; - int abs_src_height = (src_height < 0) ? -src_height : src_height; - int inv_crop_height = (crop_height < 0) ? -crop_height : crop_height; + const int abs_src_height = (src_height < 0) ? -src_height : src_height; + // TODO(nisse): Why allow crop_height < 0? + const int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height; int r = 0; LIBYUV_BOOL need_buf = (rotation && format != FOURCC_I420 && format != FOURCC_NV12 && format != FOURCC_NV21 && - format != FOURCC_YU12 && format != FOURCC_YV12) || y == sample; + format != FOURCC_YV12) || y == sample; uint8* tmp_y = y; uint8* tmp_u = u; uint8* tmp_v = v; @@ -52,16 +53,14 @@ int ConvertToI420(const uint8* sample, int tmp_u_stride = u_stride; int tmp_v_stride = v_stride; uint8* rotate_buffer = NULL; - int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height; + const int inv_crop_height = + (src_height < 0) ? -abs_crop_height : abs_crop_height; if (!y || !u || !v || !sample || src_width <= 0 || crop_width <= 0 || src_height == 0 || crop_height == 0) { return -1; } - if (src_height < 0) { - inv_crop_height = -inv_crop_height; - } // One pass rotation is available for some formats. For the rest, convert // to I420 (with optional vertical flipping) into a temporary I420 buffer, @@ -214,7 +213,6 @@ int ConvertToI420(const uint8* sample, break; // Triplanar formats case FOURCC_I420: - case FOURCC_YU12: case FOURCC_YV12: { const uint8* src_y = sample + (src_width * crop_y + crop_x); const uint8* src_u; diff --git a/libs/libvpx/third_party/libyuv/source/cpu_id.cc b/libs/libvpx/third_party/libyuv/source/cpu_id.cc index 8a10b00835..84927ebc3e 100644 --- a/libs/libvpx/third_party/libyuv/source/cpu_id.cc +++ b/libs/libvpx/third_party/libyuv/source/cpu_id.cc @@ -10,12 +10,12 @@ #include "libyuv/cpu_id.h" -#if (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__) +#if defined(_MSC_VER) #include // For __cpuidex() #endif #if !defined(__pnacl__) && !defined(__CLR_VER) && \ !defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \ - defined(_MSC_VER) && !defined(__clang__) && (_MSC_FULL_VER >= 160040219) + defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) #include // For _xgetbv() #endif @@ -36,7 +36,8 @@ extern "C" { // For functions that use the stack and have runtime checks for overflow, // use SAFEBUFFERS to avoid additional check. -#if (defined(_MSC_VER) && !defined(__clang__)) && (_MSC_FULL_VER >= 160040219) +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \ + !defined(__clang__) #define SAFEBUFFERS __declspec(safebuffers) #else #define SAFEBUFFERS @@ -48,9 +49,9 @@ extern "C" { !defined(__pnacl__) && !defined(__CLR_VER) LIBYUV_API void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) { -#if (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__) +#if defined(_MSC_VER) // Visual C version uses intrinsic or inline x86 assembly. -#if (_MSC_FULL_VER >= 160040219) +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) __cpuidex((int*)(cpu_info), info_eax, info_ecx); #elif defined(_M_IX86) __asm { @@ -63,7 +64,7 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) { mov [edi + 8], ecx mov [edi + 12], edx } -#else +#else // Visual C but not x86 if (info_ecx == 0) { __cpuid((int*)(cpu_info), info_eax); } else { @@ -71,9 +72,9 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) { } #endif // GCC version uses inline x86 assembly. -#else // (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__) +#else // defined(_MSC_VER) uint32 info_ebx, info_edx; - asm volatile ( // NOLINT + asm volatile ( #if defined( __i386__) && defined(__PIC__) // Preserve ebx for fpic 32 bit. "mov %%ebx, %%edi \n" @@ -89,7 +90,7 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) { cpu_info[1] = info_ebx; cpu_info[2] = info_ecx; cpu_info[3] = info_edx; -#endif // (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__) +#endif // defined(_MSC_VER) } #else // (defined(_M_IX86) || defined(_M_X64) ... LIBYUV_API @@ -98,28 +99,37 @@ void CpuId(uint32 eax, uint32 ecx, uint32* cpu_info) { } #endif -// TODO(fbarchard): Enable xgetbv when validator supports it. +// For VS2010 and earlier emit can be used: +// _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier. +// __asm { +// xor ecx, ecx // xcr 0 +// xgetbv +// mov xcr0, eax +// } +// For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code. +// https://code.google.com/p/libyuv/issues/detail?id=529 +#if defined(_M_IX86) && (_MSC_VER < 1900) +#pragma optimize("g", off) +#endif #if (defined(_M_IX86) || defined(_M_X64) || \ defined(__i386__) || defined(__x86_64__)) && \ !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__) #define HAS_XGETBV // X86 CPUs have xgetbv to detect OS saves high parts of ymm registers. -int TestOsSaveYmm() { +int GetXCR0() { uint32 xcr0 = 0u; -#if (defined(_MSC_VER) && !defined(__clang__)) && (_MSC_FULL_VER >= 160040219) +#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) xcr0 = (uint32)(_xgetbv(0)); // VS2010 SP1 required. -#elif defined(_M_IX86) && defined(_MSC_VER) && !defined(__clang__) - __asm { - xor ecx, ecx // xcr 0 - _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier. - mov xcr0, eax - } #elif defined(__i386__) || defined(__x86_64__) asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcr0) : "c" (0) : "%edx"); #endif // defined(__i386__) || defined(__x86_64__) - return((xcr0 & 6) == 6); // Is ymm saved? + return xcr0; } #endif // defined(_M_IX86) || defined(_M_X64) .. +// Return optimization to previous setting. +#if defined(_M_IX86) && (_MSC_VER < 1900) +#pragma optimize("g", on) +#endif // based on libvpx arm_cpudetect.c // For Arm, but public to allow testing on any CPU @@ -151,30 +161,9 @@ int ArmCpuCaps(const char* cpuinfo_name) { return 0; } -#if defined(__mips__) && defined(__linux__) -static int MipsCpuCaps(const char* search_string) { - char cpuinfo_line[512]; - const char* file_name = "/proc/cpuinfo"; - FILE* f = fopen(file_name, "r"); - if (!f) { - // Assume DSP if /proc/cpuinfo is unavailable. - // This will occur for Chrome sandbox for Pepper or Render process. - return kCpuHasMIPS_DSP; - } - while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f) != NULL) { - if (strstr(cpuinfo_line, search_string) != NULL) { - fclose(f); - return kCpuHasMIPS_DSP; - } - } - fclose(f); - return 0; -} -#endif - // CPU detect function for SIMD instruction sets. LIBYUV_API -int cpu_info_ = kCpuInit; // cpu_info is not initialized yet. +int cpu_info_ = 0; // cpu_info is not initialized yet. // Test environment variable for disabling CPU features. Any non-zero value // to disable. Zero ignored to make it easy to set the variable on/off. @@ -197,8 +186,9 @@ static LIBYUV_BOOL TestEnv(const char*) { LIBYUV_API SAFEBUFFERS int InitCpuFlags(void) { + // TODO(fbarchard): swap kCpuInit logic so 0 means uninitialized. + int cpu_info = 0; #if !defined(__pnacl__) && !defined(__CLR_VER) && defined(CPU_X86) - uint32 cpu_info0[4] = { 0, 0, 0, 0 }; uint32 cpu_info1[4] = { 0, 0, 0, 0 }; uint32 cpu_info7[4] = { 0, 0, 0, 0 }; @@ -207,66 +197,66 @@ int InitCpuFlags(void) { if (cpu_info0[0] >= 7) { CpuId(7, 0, cpu_info7); } - cpu_info_ = ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) | - ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) | - ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) | - ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) | - ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0) | - ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) | - kCpuHasX86; + cpu_info = ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) | + ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) | + ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) | + ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) | + ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0) | + ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) | + kCpuHasX86; #ifdef HAS_XGETBV - if ((cpu_info1[2] & 0x18000000) == 0x18000000 && // AVX and OSSave - TestOsSaveYmm()) { // Saves YMM. - cpu_info_ |= ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) | - kCpuHasAVX; + // AVX requires CPU has AVX, XSAVE and OSXSave for xgetbv + if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) && // AVX and OSXSave + ((GetXCR0() & 6) == 6)) { // Test OS saves YMM registers + cpu_info |= ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) | kCpuHasAVX; + + // Detect AVX512bw + if ((GetXCR0() & 0xe0) == 0xe0) { + cpu_info |= (cpu_info7[1] & 0x40000000) ? kCpuHasAVX3 : 0; + } } #endif + // Environment variable overrides for testing. if (TestEnv("LIBYUV_DISABLE_X86")) { - cpu_info_ &= ~kCpuHasX86; + cpu_info &= ~kCpuHasX86; } if (TestEnv("LIBYUV_DISABLE_SSE2")) { - cpu_info_ &= ~kCpuHasSSE2; + cpu_info &= ~kCpuHasSSE2; } if (TestEnv("LIBYUV_DISABLE_SSSE3")) { - cpu_info_ &= ~kCpuHasSSSE3; + cpu_info &= ~kCpuHasSSSE3; } if (TestEnv("LIBYUV_DISABLE_SSE41")) { - cpu_info_ &= ~kCpuHasSSE41; + cpu_info &= ~kCpuHasSSE41; } if (TestEnv("LIBYUV_DISABLE_SSE42")) { - cpu_info_ &= ~kCpuHasSSE42; + cpu_info &= ~kCpuHasSSE42; } if (TestEnv("LIBYUV_DISABLE_AVX")) { - cpu_info_ &= ~kCpuHasAVX; + cpu_info &= ~kCpuHasAVX; } if (TestEnv("LIBYUV_DISABLE_AVX2")) { - cpu_info_ &= ~kCpuHasAVX2; + cpu_info &= ~kCpuHasAVX2; } if (TestEnv("LIBYUV_DISABLE_ERMS")) { - cpu_info_ &= ~kCpuHasERMS; + cpu_info &= ~kCpuHasERMS; } if (TestEnv("LIBYUV_DISABLE_FMA3")) { - cpu_info_ &= ~kCpuHasFMA3; + cpu_info &= ~kCpuHasFMA3; + } + if (TestEnv("LIBYUV_DISABLE_AVX3")) { + cpu_info &= ~kCpuHasAVX3; } #endif #if defined(__mips__) && defined(__linux__) - // Linux mips parse text file for dsp detect. - cpu_info_ = MipsCpuCaps("dsp"); // set kCpuHasMIPS_DSP. #if defined(__mips_dspr2) - cpu_info_ |= kCpuHasMIPS_DSPR2; + cpu_info |= kCpuHasDSPR2; #endif - cpu_info_ |= kCpuHasMIPS; - - if (getenv("LIBYUV_DISABLE_MIPS")) { - cpu_info_ &= ~kCpuHasMIPS; - } - if (getenv("LIBYUV_DISABLE_MIPS_DSP")) { - cpu_info_ &= ~kCpuHasMIPS_DSP; - } - if (getenv("LIBYUV_DISABLE_MIPS_DSPR2")) { - cpu_info_ &= ~kCpuHasMIPS_DSPR2; + cpu_info |= kCpuHasMIPS; + if (getenv("LIBYUV_DISABLE_DSPR2")) { + cpu_info &= ~kCpuHasDSPR2; } #endif #if defined(__arm__) || defined(__aarch64__) @@ -274,28 +264,31 @@ int InitCpuFlags(void) { // __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon. // For Linux, /proc/cpuinfo can be tested but without that assume Neon. #if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__) - cpu_info_ = kCpuHasNEON; + cpu_info = kCpuHasNEON; // For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon // flag in it. // So for aarch64, neon enabling is hard coded here. #endif #if defined(__aarch64__) - cpu_info_ = kCpuHasNEON; + cpu_info = kCpuHasNEON; #else // Linux arm parse text file for neon detect. - cpu_info_ = ArmCpuCaps("/proc/cpuinfo"); + cpu_info = ArmCpuCaps("/proc/cpuinfo"); #endif - cpu_info_ |= kCpuHasARM; + cpu_info |= kCpuHasARM; if (TestEnv("LIBYUV_DISABLE_NEON")) { - cpu_info_ &= ~kCpuHasNEON; + cpu_info &= ~kCpuHasNEON; } #endif // __arm__ if (TestEnv("LIBYUV_DISABLE_ASM")) { - cpu_info_ = 0; + cpu_info = 0; } - return cpu_info_; + cpu_info |= kCpuInitialized; + cpu_info_ = cpu_info; + return cpu_info; } +// Note that use of this function is not thread safe. LIBYUV_API void MaskCpuFlags(int enable_flags) { cpu_info_ = InitCpuFlags() & enable_flags; diff --git a/libs/libvpx/third_party/libyuv/source/mjpeg_decoder.cc b/libs/libvpx/third_party/libyuv/source/mjpeg_decoder.cc index 75f8a610e3..22025ad04a 100644 --- a/libs/libvpx/third_party/libyuv/source/mjpeg_decoder.cc +++ b/libs/libvpx/third_party/libyuv/source/mjpeg_decoder.cc @@ -59,10 +59,10 @@ const int MJpegDecoder::kColorSpaceYCCK = JCS_YCCK; // Methods that are passed to jpeglib. boolean fill_input_buffer(jpeg_decompress_struct* cinfo); void init_source(jpeg_decompress_struct* cinfo); -void skip_input_data(jpeg_decompress_struct* cinfo, - long num_bytes); // NOLINT +void skip_input_data(jpeg_decompress_struct* cinfo, long num_bytes); // NOLINT void term_source(jpeg_decompress_struct* cinfo); void ErrorHandler(jpeg_common_struct* cinfo); +void OutputHandler(jpeg_common_struct* cinfo); MJpegDecoder::MJpegDecoder() : has_scanline_padding_(LIBYUV_FALSE), @@ -78,6 +78,7 @@ MJpegDecoder::MJpegDecoder() decompress_struct_->err = jpeg_std_error(&error_mgr_->base); // Override standard exit()-based error handler. error_mgr_->base.error_exit = &ErrorHandler; + error_mgr_->base.output_message = &OutputHandler; #endif decompress_struct_->client_data = NULL; source_mgr_->init_source = &init_source; @@ -429,8 +430,7 @@ boolean fill_input_buffer(j_decompress_ptr cinfo) { return TRUE; } -void skip_input_data(j_decompress_ptr cinfo, - long num_bytes) { // NOLINT +void skip_input_data(j_decompress_ptr cinfo, long num_bytes) { // NOLINT cinfo->src->next_input_byte += num_bytes; } @@ -458,7 +458,12 @@ void ErrorHandler(j_common_ptr cinfo) { // and causes it to return (for a second time) with value 1. longjmp(mgr->setjmp_buffer, 1); } -#endif + +void OutputHandler(j_common_ptr cinfo) { + // Suppress fprintf warnings. +} + +#endif // HAVE_SETJMP void MJpegDecoder::AllocOutputBuffers(int num_outbufs) { if (num_outbufs != num_outbufs_) { diff --git a/libs/libvpx/third_party/libyuv/source/mjpeg_validate.cc b/libs/libvpx/third_party/libyuv/source/mjpeg_validate.cc index 8edfbe1e74..9c48832045 100644 --- a/libs/libvpx/third_party/libyuv/source/mjpeg_validate.cc +++ b/libs/libvpx/third_party/libyuv/source/mjpeg_validate.cc @@ -17,51 +17,22 @@ namespace libyuv { extern "C" { #endif -// Enable this to try scasb implementation. -// #define ENABLE_SCASB 1 - -#ifdef ENABLE_SCASB - -// Multiple of 1. -__declspec(naked) -const uint8* ScanRow_ERMS(const uint8* src, uint32 val, int count) { - __asm { - mov edx, edi - mov edi, [esp + 4] // src - mov eax, [esp + 8] // val - mov ecx, [esp + 12] // count - repne scasb - jne sr99 - mov eax, edi - sub eax, 1 - mov edi, edx - ret - - sr99: - mov eax, 0 - mov edi, edx - ret - } -} -#endif - -// Helper function to scan for EOI marker. +// Helper function to scan for EOI marker (0xff 0xd9). static LIBYUV_BOOL ScanEOI(const uint8* sample, size_t sample_size) { - const uint8* end = sample + sample_size - 1; - const uint8* it = sample; - for (;;) { -#ifdef ENABLE_SCASB - it = ScanRow_ERMS(it, 0xff, end - it); -#else - it = static_cast(memchr(it, 0xff, end - it)); -#endif - if (it == NULL) { - break; + if (sample_size >= 2) { + const uint8* end = sample + sample_size - 1; + const uint8* it = sample; + while (it < end) { + // TODO(fbarchard): scan for 0xd9 instead. + it = static_cast(memchr(it, 0xff, end - it)); + if (it == NULL) { + break; + } + if (it[1] == 0xd9) { + return LIBYUV_TRUE; // Success: Valid jpeg. + } + ++it; // Skip over current 0xff. } - if (it[1] == 0xd9) { - return LIBYUV_TRUE; // Success: Valid jpeg. - } - ++it; // Skip over current 0xff. } // ERROR: Invalid jpeg end code not found. Size sample_size return LIBYUV_FALSE; @@ -69,20 +40,19 @@ static LIBYUV_BOOL ScanEOI(const uint8* sample, size_t sample_size) { // Helper function to validate the jpeg appears intact. LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size) { + // Maximum size that ValidateJpeg will consider valid. + const size_t kMaxJpegSize = 0x7fffffffull; const size_t kBackSearchSize = 1024; - if (sample_size < 64) { + if (sample_size < 64 || sample_size > kMaxJpegSize || !sample) { // ERROR: Invalid jpeg size: sample_size return LIBYUV_FALSE; } - if (sample[0] != 0xff || sample[1] != 0xd8) { // Start Of Image + if (sample[0] != 0xff || sample[1] != 0xd8) { // SOI marker // ERROR: Invalid jpeg initial start code return LIBYUV_FALSE; } - // Step over SOI marker. - sample += 2; - sample_size -= 2; - // Look for the End Of Image (EOI) marker in the end kilobyte of the buffer. + // Look for the End Of Image (EOI) marker near the end of the buffer. if (sample_size > kBackSearchSize) { if (ScanEOI(sample + sample_size - kBackSearchSize, kBackSearchSize)) { return LIBYUV_TRUE; // Success: Valid jpeg. @@ -90,8 +60,8 @@ LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size) { // Reduce search size for forward search. sample_size = sample_size - kBackSearchSize + 1; } - return ScanEOI(sample, sample_size); - + // Step over SOI marker and scan for EOI. + return ScanEOI(sample + 2, sample_size - 2); } #ifdef __cplusplus diff --git a/libs/libvpx/third_party/libyuv/source/planar_functions.cc b/libs/libvpx/third_party/libyuv/source/planar_functions.cc index b96bd50206..a764f8da47 100644 --- a/libs/libvpx/third_party/libyuv/source/planar_functions.cc +++ b/libs/libvpx/third_party/libyuv/source/planar_functions.cc @@ -17,6 +17,7 @@ #include "libyuv/mjpeg_decoder.h" #endif #include "libyuv/row.h" +#include "libyuv/scale_row.h" // for ScaleRowDown2 #ifdef __cplusplus namespace libyuv { @@ -30,6 +31,12 @@ void CopyPlane(const uint8* src_y, int src_stride_y, int width, int height) { int y; void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C; + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } // Coalesce rows. if (src_stride_y == width && dst_stride_y == width) { @@ -75,6 +82,7 @@ void CopyPlane(const uint8* src_y, int src_stride_y, } } +// TODO(fbarchard): Consider support for negative height. LIBYUV_API void CopyPlane_16(const uint16* src_y, int src_stride_y, uint16* dst_y, int dst_stride_y, @@ -127,8 +135,8 @@ int I422Copy(const uint8* src_y, int src_stride_y, uint8* dst_v, int dst_stride_v, int width, int height) { int halfwidth = (width + 1) >> 1; - if (!src_y || !src_u || !src_v || - !dst_y || !dst_u || !dst_v || + if (!src_u || !src_v || + !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -142,7 +150,10 @@ int I422Copy(const uint8* src_y, int src_stride_y, src_stride_u = -src_stride_u; src_stride_v = -src_stride_v; } - CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + + if (dst_y) { + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + } CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, height); CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, height); return 0; @@ -157,8 +168,8 @@ int I444Copy(const uint8* src_y, int src_stride_y, uint8* dst_u, int dst_stride_u, uint8* dst_v, int dst_stride_v, int width, int height) { - if (!src_y || !src_u || !src_v || - !dst_y || !dst_u || !dst_v || + if (!src_u || !src_v || + !dst_u || !dst_v || width <= 0 || height == 0) { return -1; } @@ -173,7 +184,9 @@ int I444Copy(const uint8* src_y, int src_stride_y, src_stride_v = -src_stride_v; } - CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + if (dst_y) { + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); + } CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height); CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height); return 0; @@ -213,10 +226,138 @@ int I420ToI400(const uint8* src_y, int src_stride_y, src_y = src_y + (height - 1) * src_stride_y; src_stride_y = -src_stride_y; } + CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height); return 0; } +// Support function for NV12 etc UV channels. +// Width and height are plane sizes (typically half pixel width). +LIBYUV_API +void SplitUVPlane(const uint8* src_uv, int src_stride_uv, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height) { + int y; + void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) = SplitUVRow_C; + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_u = dst_u + (height - 1) * dst_stride_u; + dst_v = dst_v + (height - 1) * dst_stride_v; + dst_stride_u = -dst_stride_u; + dst_stride_v = -dst_stride_v; + } + // Coalesce rows. + if (src_stride_uv == width * 2 && + dst_stride_u == width && + dst_stride_v == width) { + width *= height; + height = 1; + src_stride_uv = dst_stride_u = dst_stride_v = 0; + } +#if defined(HAS_SPLITUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + SplitUVRow = SplitUVRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_SSE2; + } + } +#endif +#if defined(HAS_SPLITUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + SplitUVRow = SplitUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + SplitUVRow = SplitUVRow_AVX2; + } + } +#endif +#if defined(HAS_SPLITUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + SplitUVRow = SplitUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_NEON; + } + } +#endif +#if defined(HAS_SPLITUVROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && + IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) && + IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) { + SplitUVRow = SplitUVRow_Any_DSPR2; + if (IS_ALIGNED(width, 16)) { + SplitUVRow = SplitUVRow_DSPR2; + } + } +#endif + + for (y = 0; y < height; ++y) { + // Copy a row of UV. + SplitUVRow(src_uv, dst_u, dst_v, width); + dst_u += dst_stride_u; + dst_v += dst_stride_v; + src_uv += src_stride_uv; + } +} + +LIBYUV_API +void MergeUVPlane(const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_uv, int dst_stride_uv, + int width, int height) { + int y; + void (*MergeUVRow)(const uint8* src_u, const uint8* src_v, uint8* dst_uv, + int width) = MergeUVRow_C; + // Coalesce rows. + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_uv = dst_uv + (height - 1) * dst_stride_uv; + dst_stride_uv = -dst_stride_uv; + } + // Coalesce rows. + if (src_stride_u == width && + src_stride_v == width && + dst_stride_uv == width * 2) { + width *= height; + height = 1; + src_stride_u = src_stride_v = dst_stride_uv = 0; + } +#if defined(HAS_MERGEUVROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + MergeUVRow = MergeUVRow_Any_SSE2; + if (IS_ALIGNED(width, 16)) { + MergeUVRow = MergeUVRow_SSE2; + } + } +#endif +#if defined(HAS_MERGEUVROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + MergeUVRow = MergeUVRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + MergeUVRow = MergeUVRow_AVX2; + } + } +#endif +#if defined(HAS_MERGEUVROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + MergeUVRow = MergeUVRow_Any_NEON; + if (IS_ALIGNED(width, 16)) { + MergeUVRow = MergeUVRow_NEON; + } + } +#endif + + for (y = 0; y < height; ++y) { + // Merge a row of U and V into a row of UV. + MergeUVRow(src_u, src_v, dst_uv, width); + src_u += src_stride_u; + src_v += src_stride_v; + dst_uv += dst_stride_uv; + } +} + // Mirror a plane of data. void MirrorPlane(const uint8* src_y, int src_stride_y, uint8* dst_y, int dst_stride_y, @@ -237,14 +378,6 @@ void MirrorPlane(const uint8* src_y, int src_stride_y, } } #endif -#if defined(HAS_MIRRORROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - MirrorRow = MirrorRow_Any_SSE2; - if (IS_ALIGNED(width, 16)) { - MirrorRow = MirrorRow_SSE2; - } - } -#endif #if defined(HAS_MIRRORROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { MirrorRow = MirrorRow_Any_SSSE3; @@ -262,11 +395,11 @@ void MirrorPlane(const uint8* src_y, int src_stride_y, } #endif // TODO(fbarchard): Mirror on mips handle unaligned memory. -#if defined(HAS_MIRRORROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_MIRRORROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && IS_ALIGNED(dst_y, 4) && IS_ALIGNED(dst_stride_y, 4)) { - MirrorRow = MirrorRow_MIPS_DSPR2; + MirrorRow = MirrorRow_DSPR2; } #endif @@ -287,9 +420,9 @@ int YUY2ToI422(const uint8* src_yuy2, int src_stride_yuy2, int width, int height) { int y; void (*YUY2ToUV422Row)(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix) = + uint8* dst_u, uint8* dst_v, int width) = YUY2ToUV422Row_C; - void (*YUY2ToYRow)(const uint8* src_yuy2, uint8* dst_y, int pix) = + void (*YUY2ToYRow)(const uint8* src_yuy2, uint8* dst_y, int width) = YUY2ToYRow_C; // Negative height means invert the image. if (height < 0) { @@ -359,10 +492,10 @@ int UYVYToI422(const uint8* src_uyvy, int src_stride_uyvy, int width, int height) { int y; void (*UYVYToUV422Row)(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix) = + uint8* dst_u, uint8* dst_v, int width) = UYVYToUV422Row_C; void (*UYVYToYRow)(const uint8* src_uyvy, - uint8* dst_y, int pix) = UYVYToYRow_C; + uint8* dst_y, int width) = UYVYToYRow_C; // Negative height means invert the image. if (height < 0) { height = -height; @@ -541,11 +674,6 @@ ARGBBlendRow GetARGBBlend() { return ARGBBlendRow; } #endif -#if defined(HAS_ARGBBLENDROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - ARGBBlendRow = ARGBBlendRow_SSE2; - } -#endif #if defined(HAS_ARGBBLENDROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { ARGBBlendRow = ARGBBlendRow_NEON; @@ -590,6 +718,179 @@ int ARGBBlend(const uint8* src_argb0, int src_stride_argb0, return 0; } +// Alpha Blend plane and store to destination. +LIBYUV_API +int BlendPlane(const uint8* src_y0, int src_stride_y0, + const uint8* src_y1, int src_stride_y1, + const uint8* alpha, int alpha_stride, + uint8* dst_y, int dst_stride_y, + int width, int height) { + int y; + void (*BlendPlaneRow)(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) = BlendPlaneRow_C; + if (!src_y0 || !src_y1 || !alpha || !dst_y || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } + + // Coalesce rows for Y plane. + if (src_stride_y0 == width && + src_stride_y1 == width && + alpha_stride == width && + dst_stride_y == width) { + width *= height; + height = 1; + src_stride_y0 = src_stride_y1 = alpha_stride = dst_stride_y = 0; + } + +#if defined(HAS_BLENDPLANEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + BlendPlaneRow = BlendPlaneRow_Any_SSSE3; + if (IS_ALIGNED(width, 8)) { + BlendPlaneRow = BlendPlaneRow_SSSE3; + } + } +#endif +#if defined(HAS_BLENDPLANEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + BlendPlaneRow = BlendPlaneRow_Any_AVX2; + if (IS_ALIGNED(width, 32)) { + BlendPlaneRow = BlendPlaneRow_AVX2; + } + } +#endif + + for (y = 0; y < height; ++y) { + BlendPlaneRow(src_y0, src_y1, alpha, dst_y, width); + src_y0 += src_stride_y0; + src_y1 += src_stride_y1; + alpha += alpha_stride; + dst_y += dst_stride_y; + } + return 0; +} + +#define MAXTWIDTH 2048 +// Alpha Blend YUV images and store to destination. +LIBYUV_API +int I420Blend(const uint8* src_y0, int src_stride_y0, + const uint8* src_u0, int src_stride_u0, + const uint8* src_v0, int src_stride_v0, + const uint8* src_y1, int src_stride_y1, + const uint8* src_u1, int src_stride_u1, + const uint8* src_v1, int src_stride_v1, + const uint8* alpha, int alpha_stride, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height) { + int y; + // Half width/height for UV. + int halfwidth = (width + 1) >> 1; + void (*BlendPlaneRow)(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) = BlendPlaneRow_C; + void (*ScaleRowDown2)(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) = ScaleRowDown2Box_C; + if (!src_y0 || !src_u0 || !src_v0 || !src_y1 || !src_u1 || !src_v1 || + !alpha || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) { + return -1; + } + + // Negative height means invert the image. + if (height < 0) { + height = -height; + dst_y = dst_y + (height - 1) * dst_stride_y; + dst_stride_y = -dst_stride_y; + } + + // Blend Y plane. + BlendPlane(src_y0, src_stride_y0, + src_y1, src_stride_y1, + alpha, alpha_stride, + dst_y, dst_stride_y, + width, height); + +#if defined(HAS_BLENDPLANEROW_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + BlendPlaneRow = BlendPlaneRow_Any_SSSE3; + if (IS_ALIGNED(halfwidth, 8)) { + BlendPlaneRow = BlendPlaneRow_SSSE3; + } + } +#endif +#if defined(HAS_BLENDPLANEROW_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + BlendPlaneRow = BlendPlaneRow_Any_AVX2; + if (IS_ALIGNED(halfwidth, 32)) { + BlendPlaneRow = BlendPlaneRow_AVX2; + } + } +#endif + if (!IS_ALIGNED(width, 2)) { + ScaleRowDown2 = ScaleRowDown2Box_Odd_C; + } +#if defined(HAS_SCALEROWDOWN2_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ScaleRowDown2 = ScaleRowDown2Box_Odd_NEON; + if (IS_ALIGNED(width, 2)) { + ScaleRowDown2 = ScaleRowDown2Box_Any_NEON; + if (IS_ALIGNED(halfwidth, 16)) { + ScaleRowDown2 = ScaleRowDown2Box_NEON; + } + } + } +#endif +#if defined(HAS_SCALEROWDOWN2_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowDown2 = ScaleRowDown2Box_Odd_SSSE3; + if (IS_ALIGNED(width, 2)) { + ScaleRowDown2 = ScaleRowDown2Box_Any_SSSE3; + if (IS_ALIGNED(halfwidth, 16)) { + ScaleRowDown2 = ScaleRowDown2Box_SSSE3; + } + } + } +#endif +#if defined(HAS_SCALEROWDOWN2_AVX2) + if (TestCpuFlag(kCpuHasAVX2)) { + ScaleRowDown2 = ScaleRowDown2Box_Odd_AVX2; + if (IS_ALIGNED(width, 2)) { + ScaleRowDown2 = ScaleRowDown2Box_Any_AVX2; + if (IS_ALIGNED(halfwidth, 32)) { + ScaleRowDown2 = ScaleRowDown2Box_AVX2; + } + } + } +#endif + + // Row buffer for intermediate alpha pixels. + align_buffer_64(halfalpha, halfwidth); + for (y = 0; y < height; y += 2) { + // last row of odd height image use 1 row of alpha instead of 2. + if (y == (height - 1)) { + alpha_stride = 0; + } + // Subsample 2 rows of UV to half width and half height. + ScaleRowDown2(alpha, alpha_stride, halfalpha, halfwidth); + alpha += alpha_stride * 2; + BlendPlaneRow(src_u0, src_u1, halfalpha, dst_u, halfwidth); + BlendPlaneRow(src_v0, src_v1, halfalpha, dst_v, halfwidth); + src_u0 += src_stride_u0; + src_u1 += src_stride_u1; + dst_u += dst_stride_u; + src_v0 += src_stride_v0; + src_v1 += src_stride_v1; + dst_v += dst_stride_v; + } + free_aligned_buffer_64(halfalpha); + return 0; +} + // Multiply 2 ARGB images and store to destination. LIBYUV_API int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0, @@ -777,167 +1078,21 @@ int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0, } return 0; } - -// Convert I422 to BGRA. -LIBYUV_API -int I422ToBGRA(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_bgra, int dst_stride_bgra, - int width, int height) { - int y; - void (*I422ToBGRARow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToBGRARow_C; - if (!src_y || !src_u || !src_v || - !dst_bgra || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_bgra = dst_bgra + (height - 1) * dst_stride_bgra; - dst_stride_bgra = -dst_stride_bgra; - } - // Coalesce rows. - if (src_stride_y == width && - src_stride_u * 2 == width && - src_stride_v * 2 == width && - dst_stride_bgra == width * 4) { - width *= height; - height = 1; - src_stride_y = src_stride_u = src_stride_v = dst_stride_bgra = 0; - } -#if defined(HAS_I422TOBGRAROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToBGRARow = I422ToBGRARow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToBGRARow = I422ToBGRARow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToBGRARow = I422ToBGRARow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToBGRARow = I422ToBGRARow_AVX2; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_NEON) - if (TestCpuFlag(kCpuHasNEON)) { - I422ToBGRARow = I422ToBGRARow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToBGRARow = I422ToBGRARow_NEON; - } - } -#endif -#if defined(HAS_I422TOBGRAROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) && - IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && - IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && - IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && - IS_ALIGNED(dst_bgra, 4) && IS_ALIGNED(dst_stride_bgra, 4)) { - I422ToBGRARow = I422ToBGRARow_MIPS_DSPR2; - } -#endif - - for (y = 0; y < height; ++y) { - I422ToBGRARow(src_y, src_u, src_v, dst_bgra, width); - dst_bgra += dst_stride_bgra; - src_y += src_stride_y; - src_u += src_stride_u; - src_v += src_stride_v; - } - return 0; -} - -// Convert I422 to ABGR. -LIBYUV_API -int I422ToABGR(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_abgr, int dst_stride_abgr, - int width, int height) { - int y; - void (*I422ToABGRRow)(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) = I422ToABGRRow_C; - if (!src_y || !src_u || !src_v || - !dst_abgr || - width <= 0 || height == 0) { - return -1; - } - // Negative height means invert the image. - if (height < 0) { - height = -height; - dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr; - dst_stride_abgr = -dst_stride_abgr; - } - // Coalesce rows. - if (src_stride_y == width && - src_stride_u * 2 == width && - src_stride_v * 2 == width && - dst_stride_abgr == width * 4) { - width *= height; - height = 1; - src_stride_y = src_stride_u = src_stride_v = dst_stride_abgr = 0; - } -#if defined(HAS_I422TOABGRROW_NEON) - if (TestCpuFlag(kCpuHasNEON) && width >= 8) { - I422ToABGRRow = I422ToABGRRow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToABGRRow = I422ToABGRRow_NEON; - } - } -#endif -#if defined(HAS_I422TOABGRROW_SSSE3) - if (TestCpuFlag(kCpuHasSSSE3)) { - I422ToABGRRow = I422ToABGRRow_Any_SSSE3; - if (IS_ALIGNED(width, 8)) { - I422ToABGRRow = I422ToABGRRow_SSSE3; - } - } -#endif -#if defined(HAS_I422TOABGRROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - I422ToABGRRow = I422ToABGRRow_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - I422ToABGRRow = I422ToABGRRow_AVX2; - } - } -#endif - - for (y = 0; y < height; ++y) { - I422ToABGRRow(src_y, src_u, src_v, dst_abgr, width); - dst_abgr += dst_stride_abgr; - src_y += src_stride_y; - src_u += src_stride_u; - src_v += src_stride_v; - } - return 0; -} - -// Convert I422 to RGBA. -LIBYUV_API -int I422ToRGBA(const uint8* src_y, int src_stride_y, - const uint8* src_u, int src_stride_u, - const uint8* src_v, int src_stride_v, - uint8* dst_rgba, int dst_stride_rgba, - int width, int height) { +// Convert I422 to RGBA with matrix +static int I422ToRGBAMatrix(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgba, int dst_stride_rgba, + const struct YuvConstants* yuvconstants, + int width, int height) { int y; void (*I422ToRGBARow)(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = I422ToRGBARow_C; - if (!src_y || !src_u || !src_v || - !dst_rgba || + if (!src_y || !src_u || !src_v || !dst_rgba || width <= 0 || height == 0) { return -1; } @@ -947,23 +1102,6 @@ int I422ToRGBA(const uint8* src_y, int src_stride_y, dst_rgba = dst_rgba + (height - 1) * dst_stride_rgba; dst_stride_rgba = -dst_stride_rgba; } - // Coalesce rows. - if (src_stride_y == width && - src_stride_u * 2 == width && - src_stride_v * 2 == width && - dst_stride_rgba == width * 4) { - width *= height; - height = 1; - src_stride_y = src_stride_u = src_stride_v = dst_stride_rgba = 0; - } -#if defined(HAS_I422TORGBAROW_NEON) - if (TestCpuFlag(kCpuHasNEON) && width >= 8) { - I422ToRGBARow = I422ToRGBARow_Any_NEON; - if (IS_ALIGNED(width, 8)) { - I422ToRGBARow = I422ToRGBARow_NEON; - } - } -#endif #if defined(HAS_I422TORGBAROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { I422ToRGBARow = I422ToRGBARow_Any_SSSE3; @@ -980,9 +1118,26 @@ int I422ToRGBA(const uint8* src_y, int src_stride_y, } } #endif +#if defined(HAS_I422TORGBAROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + I422ToRGBARow = I422ToRGBARow_Any_NEON; + if (IS_ALIGNED(width, 8)) { + I422ToRGBARow = I422ToRGBARow_NEON; + } + } +#endif +#if defined(HAS_I422TORGBAROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 4) && + IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && + IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && + IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && + IS_ALIGNED(dst_rgba, 4) && IS_ALIGNED(dst_stride_rgba, 4)) { + I422ToRGBARow = I422ToRGBARow_DSPR2; + } +#endif for (y = 0; y < height; ++y) { - I422ToRGBARow(src_y, src_u, src_v, dst_rgba, width); + I422ToRGBARow(src_y, src_u, src_v, dst_rgba, yuvconstants, width); dst_rgba += dst_stride_rgba; src_y += src_stride_y; src_u += src_stride_u; @@ -991,6 +1146,36 @@ int I422ToRGBA(const uint8* src_y, int src_stride_y, return 0; } +// Convert I422 to RGBA. +LIBYUV_API +int I422ToRGBA(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_rgba, int dst_stride_rgba, + int width, int height) { + return I422ToRGBAMatrix(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + dst_rgba, dst_stride_rgba, + &kYuvI601Constants, + width, height); +} + +// Convert I422 to BGRA. +LIBYUV_API +int I422ToBGRA(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint8* dst_bgra, int dst_stride_bgra, + int width, int height) { + return I422ToRGBAMatrix(src_y, src_stride_y, + src_v, src_stride_v, // Swap U and V + src_u, src_stride_u, + dst_bgra, dst_stride_bgra, + &kYvuI601Constants, // Use Yvu matrix + width, height); +} + // Convert NV12 to RGB565. LIBYUV_API int NV12ToRGB565(const uint8* src_y, int src_stride_y, @@ -1001,6 +1186,7 @@ int NV12ToRGB565(const uint8* src_y, int src_stride_y, void (*NV12ToRGB565Row)(const uint8* y_buf, const uint8* uv_buf, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) = NV12ToRGB565Row_C; if (!src_y || !src_uv || !dst_rgb565 || width <= 0 || height == 0) { @@ -1038,7 +1224,7 @@ int NV12ToRGB565(const uint8* src_y, int src_stride_y, #endif for (y = 0; y < height; ++y) { - NV12ToRGB565Row(src_y, src_uv, dst_rgb565, width); + NV12ToRGB565Row(src_y, src_uv, dst_rgb565, &kYuvI601Constants, width); dst_rgb565 += dst_stride_rgb565; src_y += src_stride_y; if (y & 1) { @@ -1048,59 +1234,52 @@ int NV12ToRGB565(const uint8* src_y, int src_stride_y, return 0; } -// Convert NV21 to RGB565. +// Convert RAW to RGB24. LIBYUV_API -int NV21ToRGB565(const uint8* src_y, int src_stride_y, - const uint8* src_vu, int src_stride_vu, - uint8* dst_rgb565, int dst_stride_rgb565, - int width, int height) { +int RAWToRGB24(const uint8* src_raw, int src_stride_raw, + uint8* dst_rgb24, int dst_stride_rgb24, + int width, int height) { int y; - void (*NV21ToRGB565Row)(const uint8* y_buf, - const uint8* src_vu, - uint8* rgb_buf, - int width) = NV21ToRGB565Row_C; - if (!src_y || !src_vu || !dst_rgb565 || + void (*RAWToRGB24Row)(const uint8* src_rgb, uint8* dst_rgb24, int width) = + RAWToRGB24Row_C; + if (!src_raw || !dst_rgb24 || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; - dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565; - dst_stride_rgb565 = -dst_stride_rgb565; + src_raw = src_raw + (height - 1) * src_stride_raw; + src_stride_raw = -src_stride_raw; } -#if defined(HAS_NV21TORGB565ROW_SSSE3) + // Coalesce rows. + if (src_stride_raw == width * 3 && + dst_stride_rgb24 == width * 3) { + width *= height; + height = 1; + src_stride_raw = dst_stride_rgb24 = 0; + } +#if defined(HAS_RAWTORGB24ROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { - NV21ToRGB565Row = NV21ToRGB565Row_Any_SSSE3; + RAWToRGB24Row = RAWToRGB24Row_Any_SSSE3; if (IS_ALIGNED(width, 8)) { - NV21ToRGB565Row = NV21ToRGB565Row_SSSE3; + RAWToRGB24Row = RAWToRGB24Row_SSSE3; } } #endif -#if defined(HAS_NV21TORGB565ROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2)) { - NV21ToRGB565Row = NV21ToRGB565Row_Any_AVX2; - if (IS_ALIGNED(width, 16)) { - NV21ToRGB565Row = NV21ToRGB565Row_AVX2; - } - } -#endif -#if defined(HAS_NV21TORGB565ROW_NEON) +#if defined(HAS_RAWTORGB24ROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { - NV21ToRGB565Row = NV21ToRGB565Row_Any_NEON; + RAWToRGB24Row = RAWToRGB24Row_Any_NEON; if (IS_ALIGNED(width, 8)) { - NV21ToRGB565Row = NV21ToRGB565Row_NEON; + RAWToRGB24Row = RAWToRGB24Row_NEON; } } #endif for (y = 0; y < height; ++y) { - NV21ToRGB565Row(src_y, src_vu, dst_rgb565, width); - dst_rgb565 += dst_stride_rgb565; - src_y += src_stride_y; - if (y & 1) { - src_vu += src_stride_vu; - } + RAWToRGB24Row(src_raw, dst_rgb24, width); + src_raw += src_stride_raw; + dst_rgb24 += dst_stride_rgb24; } return 0; } @@ -1110,7 +1289,7 @@ void SetPlane(uint8* dst_y, int dst_stride_y, int width, int height, uint32 value) { int y; - void (*SetRow)(uint8* dst, uint8 value, int pix) = SetRow_C; + void (*SetRow)(uint8* dst, uint8 value, int width) = SetRow_C; if (height < 0) { height = -height; dst_y = dst_y + (height - 1) * dst_stride_y; @@ -1186,7 +1365,7 @@ int ARGBRect(uint8* dst_argb, int dst_stride_argb, int width, int height, uint32 value) { int y; - void (*ARGBSetRow)(uint8* dst_argb, uint32 value, int pix) = ARGBSetRow_C; + void (*ARGBSetRow)(uint8* dst_argb, uint32 value, int width) = ARGBSetRow_C; if (!dst_argb || width <= 0 || height == 0 || dst_x < 0 || dst_y < 0) { @@ -1262,14 +1441,6 @@ int ARGBAttenuate(const uint8* src_argb, int src_stride_argb, height = 1; src_stride_argb = dst_stride_argb = 0; } -#if defined(HAS_ARGBATTENUATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2; - if (IS_ALIGNED(width, 4)) { - ARGBAttenuateRow = ARGBAttenuateRow_SSE2; - } - } -#endif #if defined(HAS_ARGBATTENUATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3; @@ -1824,45 +1995,37 @@ int ARGBShade(const uint8* src_argb, int src_stride_argb, return 0; } -// Interpolate 2 ARGB images by specified amount (0 to 255). +// Interpolate 2 planes by specified amount (0 to 255). LIBYUV_API -int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0, - const uint8* src_argb1, int src_stride_argb1, - uint8* dst_argb, int dst_stride_argb, - int width, int height, int interpolation) { +int InterpolatePlane(const uint8* src0, int src_stride0, + const uint8* src1, int src_stride1, + uint8* dst, int dst_stride, + int width, int height, int interpolation) { int y; void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) = InterpolateRow_C; - if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) { + if (!src0 || !src1 || !dst || width <= 0 || height == 0) { return -1; } // Negative height means invert the image. if (height < 0) { height = -height; - dst_argb = dst_argb + (height - 1) * dst_stride_argb; - dst_stride_argb = -dst_stride_argb; + dst = dst + (height - 1) * dst_stride; + dst_stride = -dst_stride; } // Coalesce rows. - if (src_stride_argb0 == width * 4 && - src_stride_argb1 == width * 4 && - dst_stride_argb == width * 4) { + if (src_stride0 == width && + src_stride1 == width && + dst_stride == width) { width *= height; height = 1; - src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0; + src_stride0 = src_stride1 = dst_stride = 0; } -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(width, 4)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; - if (IS_ALIGNED(width, 4)) { + if (IS_ALIGNED(width, 16)) { InterpolateRow = InterpolateRow_SSSE3; } } @@ -1870,7 +2033,7 @@ int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0, #if defined(HAS_INTERPOLATEROW_AVX2) if (TestCpuFlag(kCpuHasAVX2)) { InterpolateRow = InterpolateRow_Any_AVX2; - if (IS_ALIGNED(width, 8)) { + if (IS_ALIGNED(width, 32)) { InterpolateRow = InterpolateRow_AVX2; } } @@ -1878,30 +2041,77 @@ int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0, #if defined(HAS_INTERPOLATEROW_NEON) if (TestCpuFlag(kCpuHasNEON)) { InterpolateRow = InterpolateRow_Any_NEON; - if (IS_ALIGNED(width, 4)) { + if (IS_ALIGNED(width, 16)) { InterpolateRow = InterpolateRow_NEON; } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && - IS_ALIGNED(src_argb0, 4) && IS_ALIGNED(src_stride_argb0, 4) && - IS_ALIGNED(src_argb1, 4) && IS_ALIGNED(src_stride_argb1, 4) && - IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && + IS_ALIGNED(src0, 4) && IS_ALIGNED(src_stride0, 4) && + IS_ALIGNED(src1, 4) && IS_ALIGNED(src_stride1, 4) && + IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4) && + IS_ALIGNED(width, 4)) { + InterpolateRow = InterpolateRow_DSPR2; } #endif for (y = 0; y < height; ++y) { - InterpolateRow(dst_argb, src_argb0, src_argb1 - src_argb0, - width * 4, interpolation); - src_argb0 += src_stride_argb0; - src_argb1 += src_stride_argb1; - dst_argb += dst_stride_argb; + InterpolateRow(dst, src0, src1 - src0, width, interpolation); + src0 += src_stride0; + src1 += src_stride1; + dst += dst_stride; } return 0; } +// Interpolate 2 ARGB images by specified amount (0 to 255). +LIBYUV_API +int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0, + const uint8* src_argb1, int src_stride_argb1, + uint8* dst_argb, int dst_stride_argb, + int width, int height, int interpolation) { + return InterpolatePlane(src_argb0, src_stride_argb0, + src_argb1, src_stride_argb1, + dst_argb, dst_stride_argb, + width * 4, height, interpolation); +} + +// Interpolate 2 YUV images by specified amount (0 to 255). +LIBYUV_API +int I420Interpolate(const uint8* src0_y, int src0_stride_y, + const uint8* src0_u, int src0_stride_u, + const uint8* src0_v, int src0_stride_v, + const uint8* src1_y, int src1_stride_y, + const uint8* src1_u, int src1_stride_u, + const uint8* src1_v, int src1_stride_v, + uint8* dst_y, int dst_stride_y, + uint8* dst_u, int dst_stride_u, + uint8* dst_v, int dst_stride_v, + int width, int height, int interpolation) { + int halfwidth = (width + 1) >> 1; + int halfheight = (height + 1) >> 1; + if (!src0_y || !src0_u || !src0_v || + !src1_y || !src1_u || !src1_v || + !dst_y || !dst_u || !dst_v || + width <= 0 || height == 0) { + return -1; + } + InterpolatePlane(src0_y, src0_stride_y, + src1_y, src1_stride_y, + dst_y, dst_stride_y, + width, height, interpolation); + InterpolatePlane(src0_u, src0_stride_u, + src1_u, src1_stride_u, + dst_u, dst_stride_u, + halfwidth, halfheight, interpolation); + InterpolatePlane(src0_v, src0_stride_v, + src1_v, src1_stride_v, + dst_v, dst_stride_v, + halfwidth, halfheight, interpolation); + return 0; +} + // Shuffle ARGB channel order. e.g. BGRA to ARGB. LIBYUV_API int ARGBShuffle(const uint8* src_bgra, int src_stride_bgra, @@ -1909,7 +2119,7 @@ int ARGBShuffle(const uint8* src_bgra, int src_stride_bgra, const uint8* shuffler, int width, int height) { int y; void (*ARGBShuffleRow)(const uint8* src_bgra, uint8* dst_argb, - const uint8* shuffler, int pix) = ARGBShuffleRow_C; + const uint8* shuffler, int width) = ARGBShuffleRow_C; if (!src_bgra || !dst_argb || width <= 0 || height == 0) { return -1; @@ -1976,7 +2186,7 @@ static int ARGBSobelize(const uint8* src_argb, int src_stride_argb, const uint8* src_sobely, uint8* dst, int width)) { int y; - void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_g, int pix) = + void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_g, int width) = ARGBToYJRow_C; void (*SobelYRow)(const uint8* src_y0, const uint8* src_y1, uint8* dst_sobely, int width) = SobelYRow_C; @@ -2280,13 +2490,19 @@ int ARGBCopyAlpha(const uint8* src_argb, int src_stride_argb, src_stride_argb = dst_stride_argb = 0; } #if defined(HAS_ARGBCOPYALPHAROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) { - ARGBCopyAlphaRow = ARGBCopyAlphaRow_SSE2; + if (TestCpuFlag(kCpuHasSSE2)) { + ARGBCopyAlphaRow = ARGBCopyAlphaRow_Any_SSE2; + if (IS_ALIGNED(width, 8)) { + ARGBCopyAlphaRow = ARGBCopyAlphaRow_SSE2; + } } #endif #if defined(HAS_ARGBCOPYALPHAROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 16)) { - ARGBCopyAlphaRow = ARGBCopyAlphaRow_AVX2; + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBCopyAlphaRow = ARGBCopyAlphaRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + ARGBCopyAlphaRow = ARGBCopyAlphaRow_AVX2; + } } #endif @@ -2298,6 +2514,49 @@ int ARGBCopyAlpha(const uint8* src_argb, int src_stride_argb, return 0; } +// Extract just the alpha channel from ARGB. +LIBYUV_API +int ARGBExtractAlpha(const uint8* src_argb, int src_stride, + uint8* dst_a, int dst_stride, + int width, int height) { + if (!src_argb || !dst_a || width <= 0 || height == 0) { + return -1; + } + // Negative height means invert the image. + if (height < 0) { + height = -height; + src_argb += (height - 1) * src_stride; + src_stride = -src_stride; + } + // Coalesce rows. + if (src_stride == width * 4 && dst_stride == width) { + width *= height; + height = 1; + src_stride = dst_stride = 0; + } + void (*ARGBExtractAlphaRow)(const uint8 *src_argb, uint8 *dst_a, int width) = + ARGBExtractAlphaRow_C; +#if defined(HAS_ARGBEXTRACTALPHAROW_SSE2) + if (TestCpuFlag(kCpuHasSSE2)) { + ARGBExtractAlphaRow = IS_ALIGNED(width, 8) ? ARGBExtractAlphaRow_SSE2 + : ARGBExtractAlphaRow_Any_SSE2; + } +#endif +#if defined(HAS_ARGBEXTRACTALPHAROW_NEON) + if (TestCpuFlag(kCpuHasNEON)) { + ARGBExtractAlphaRow = IS_ALIGNED(width, 16) ? ARGBExtractAlphaRow_NEON + : ARGBExtractAlphaRow_Any_NEON; + } +#endif + + for (int y = 0; y < height; ++y) { + ARGBExtractAlphaRow(src_argb, dst_a, width); + src_argb += src_stride; + dst_a += dst_stride; + } + return 0; +} + // Copy a planar Y channel to the alpha channel of a destination ARGB image. LIBYUV_API int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y, @@ -2323,13 +2582,19 @@ int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y, src_stride_y = dst_stride_argb = 0; } #if defined(HAS_ARGBCOPYYTOALPHAROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) { - ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_SSE2; + if (TestCpuFlag(kCpuHasSSE2)) { + ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_SSE2; + if (IS_ALIGNED(width, 8)) { + ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_SSE2; + } } #endif #if defined(HAS_ARGBCOPYYTOALPHAROW_AVX2) - if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 16)) { - ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_AVX2; + if (TestCpuFlag(kCpuHasAVX2)) { + ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_Any_AVX2; + if (IS_ALIGNED(width, 16)) { + ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_AVX2; + } } #endif @@ -2341,6 +2606,9 @@ int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y, return 0; } +// TODO(fbarchard): Consider if width is even Y channel can be split +// directly. A SplitUVRow_Odd function could copy the remaining chroma. + LIBYUV_API int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, uint8* dst_y, int dst_stride_y, @@ -2348,8 +2616,8 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, int width, int height) { int y; int halfwidth = (width + 1) >> 1; - void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) = - SplitUVRow_C; + void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) = SplitUVRow_C; void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) = InterpolateRow_C; @@ -2388,14 +2656,6 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, } } #endif -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(width, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -2423,22 +2683,24 @@ int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2, { int awidth = halfwidth * 2; - // 2 rows of uv - align_buffer_64(rows, awidth * 2); + // row of y and 2 rows of uv + align_buffer_64(rows, awidth * 3); for (y = 0; y < height - 1; y += 2) { // Split Y from UV. - SplitUVRow(src_yuy2, dst_y, rows, awidth); - SplitUVRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y, - rows + awidth, awidth); - InterpolateRow(dst_uv, rows, awidth, awidth, 128); + SplitUVRow(src_yuy2, rows, rows + awidth, awidth); + memcpy(dst_y, rows, width); + SplitUVRow(src_yuy2 + src_stride_yuy2, rows, rows + awidth * 2, awidth); + memcpy(dst_y + dst_stride_y, rows, width); + InterpolateRow(dst_uv, rows + awidth, awidth, awidth, 128); src_yuy2 += src_stride_yuy2 * 2; dst_y += dst_stride_y * 2; dst_uv += dst_stride_uv; } if (height & 1) { // Split Y from UV. - SplitUVRow(src_yuy2, dst_y, dst_uv, width); + SplitUVRow(src_yuy2, rows, dst_uv, awidth); + memcpy(dst_y, rows, width); } free_aligned_buffer_64(rows); } @@ -2452,8 +2714,8 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, int width, int height) { int y; int halfwidth = (width + 1) >> 1; - void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) = - SplitUVRow_C; + void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) = SplitUVRow_C; void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) = InterpolateRow_C; @@ -2492,14 +2754,6 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, } } #endif -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(width, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -2527,22 +2781,24 @@ int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy, { int awidth = halfwidth * 2; - // 2 rows of uv - align_buffer_64(rows, awidth * 2); + // row of y and 2 rows of uv + align_buffer_64(rows, awidth * 3); for (y = 0; y < height - 1; y += 2) { // Split Y from UV. - SplitUVRow(src_uyvy, rows, dst_y, awidth); - SplitUVRow(src_uyvy + src_stride_uyvy, rows + awidth, - dst_y + dst_stride_y, awidth); - InterpolateRow(dst_uv, rows, awidth, awidth, 128); + SplitUVRow(src_uyvy, rows + awidth, rows, awidth); + memcpy(dst_y, rows, width); + SplitUVRow(src_uyvy + src_stride_uyvy, rows + awidth * 2, rows, awidth); + memcpy(dst_y + dst_stride_y, rows, width); + InterpolateRow(dst_uv, rows + awidth, awidth, awidth, 128); src_uyvy += src_stride_uyvy * 2; dst_y += dst_stride_y * 2; dst_uv += dst_stride_uv; } if (height & 1) { // Split Y from UV. - SplitUVRow(src_uyvy, dst_y, dst_uv, width); + SplitUVRow(src_uyvy, dst_uv, rows, awidth); + memcpy(dst_y, rows, width); } free_aligned_buffer_64(rows); } diff --git a/libs/libvpx/third_party/libyuv/source/rotate.cc b/libs/libvpx/third_party/libyuv/source/rotate.cc index be3d589207..01ea5c4074 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate.cc @@ -49,13 +49,13 @@ void TransposePlane(const uint8* src, int src_stride, } } #endif -#if defined(HAS_TRANSPOSEWX8_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { +#if defined(HAS_TRANSPOSEWX8_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2)) { if (IS_ALIGNED(width, 4) && IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { - TransposeWx8 = TransposeWx8_Fast_MIPS_DSPR2; + TransposeWx8 = TransposeWx8_Fast_DSPR2; } else { - TransposeWx8 = TransposeWx8_MIPS_DSPR2; + TransposeWx8 = TransposeWx8_DSPR2; } } #endif @@ -117,14 +117,6 @@ void RotatePlane180(const uint8* src, int src_stride, } } #endif -#if defined(HAS_MIRRORROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - MirrorRow = MirrorRow_Any_SSE2; - if (IS_ALIGNED(width, 16)) { - MirrorRow = MirrorRow_SSE2; - } - } -#endif #if defined(HAS_MIRRORROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { MirrorRow = MirrorRow_Any_SSSE3; @@ -142,11 +134,11 @@ void RotatePlane180(const uint8* src, int src_stride, } #endif // TODO(fbarchard): Mirror on mips handle unaligned memory. -#if defined(HAS_MIRRORROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_MIRRORROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4)) { - MirrorRow = MirrorRow_MIPS_DSPR2; + MirrorRow = MirrorRow_DSPR2; } #endif #if defined(HAS_COPYROW_SSE2) @@ -204,14 +196,17 @@ void TransposeUV(const uint8* src, int src_stride, } #endif #if defined(HAS_TRANSPOSEUVWX8_SSE2) - if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) { - TransposeUVWx8 = TransposeUVWx8_SSE2; + if (TestCpuFlag(kCpuHasSSE2)) { + TransposeUVWx8 = TransposeUVWx8_Any_SSE2; + if (IS_ALIGNED(width, 8)) { + TransposeUVWx8 = TransposeUVWx8_SSE2; + } } #endif -#if defined(HAS_TRANSPOSEUVWx8_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 2) && +#if defined(HAS_TRANSPOSEUVWX8_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(width, 2) && IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { - TransposeUVWx8 = TransposeUVWx8_MIPS_DSPR2; + TransposeUVWx8 = TransposeUVWx8_DSPR2; } #endif @@ -272,22 +267,22 @@ void RotateUV180(const uint8* src, int src_stride, uint8* dst_b, int dst_stride_b, int width, int height) { int i; - void (*MirrorRowUV)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) = + void (*MirrorUVRow)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) = MirrorUVRow_C; #if defined(HAS_MIRRORUVROW_NEON) if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) { - MirrorRowUV = MirrorUVRow_NEON; + MirrorUVRow = MirrorUVRow_NEON; } #endif -#if defined(HAS_MIRRORROW_UV_SSSE3) +#if defined(HAS_MIRRORUVROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) { - MirrorRowUV = MirrorUVRow_SSSE3; + MirrorUVRow = MirrorUVRow_SSSE3; } #endif -#if defined(HAS_MIRRORUVROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_MIRRORUVROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) { - MirrorRowUV = MirrorUVRow_MIPS_DSPR2; + MirrorUVRow = MirrorUVRow_DSPR2; } #endif @@ -295,7 +290,7 @@ void RotateUV180(const uint8* src, int src_stride, dst_b += dst_stride_b * (height - 1); for (i = 0; i < height; ++i) { - MirrorRowUV(src, dst_a, dst_b, width); + MirrorUVRow(src, dst_a, dst_b, width); src += src_stride; dst_a -= dst_stride_a; dst_b -= dst_stride_b; diff --git a/libs/libvpx/third_party/libyuv/source/rotate_any.cc b/libs/libvpx/third_party/libyuv/source/rotate_any.cc index 4d6eb34e18..31a74c3155 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_any.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_any.cc @@ -18,7 +18,7 @@ namespace libyuv { extern "C" { #endif -#define TANY(NAMEANY, TPOS_SIMD, TPOS_C, MASK) \ +#define TANY(NAMEANY, TPOS_SIMD, MASK) \ void NAMEANY(const uint8* src, int src_stride, \ uint8* dst, int dst_stride, int width) { \ int r = width & MASK; \ @@ -26,24 +26,49 @@ extern "C" { if (n > 0) { \ TPOS_SIMD(src, src_stride, dst, dst_stride, n); \ } \ - TPOS_C(src + n, src_stride, dst + n * dst_stride, dst_stride, r); \ + TransposeWx8_C(src + n, src_stride, dst + n * dst_stride, dst_stride, r);\ } #ifdef HAS_TRANSPOSEWX8_NEON -TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, TransposeWx8_C, 7) +TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, 7) #endif #ifdef HAS_TRANSPOSEWX8_SSSE3 -TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, TransposeWx8_C, 7) +TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, 7) #endif #ifdef HAS_TRANSPOSEWX8_FAST_SSSE3 -TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, TransposeWx8_C, 15) +TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15) #endif -#ifdef HAS_TRANSPOSEWX8_MIPS_DSPR2 -TANY(TransposeWx8_Any_MIPS_DSPR2, TransposeWx8_MIPS_DSPR2, TransposeWx8_C, 7) +#ifdef HAS_TRANSPOSEWX8_DSPR2 +TANY(TransposeWx8_Any_DSPR2, TransposeWx8_DSPR2, 7) #endif - #undef TANY +#define TUVANY(NAMEANY, TPOS_SIMD, MASK) \ + void NAMEANY(const uint8* src, int src_stride, \ + uint8* dst_a, int dst_stride_a, \ + uint8* dst_b, int dst_stride_b, int width) { \ + int r = width & MASK; \ + int n = width - r; \ + if (n > 0) { \ + TPOS_SIMD(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, \ + n); \ + } \ + TransposeUVWx8_C(src + n * 2, src_stride, \ + dst_a + n * dst_stride_a, dst_stride_a, \ + dst_b + n * dst_stride_b, dst_stride_b, r); \ + } + +#ifdef HAS_TRANSPOSEUVWX8_NEON +TUVANY(TransposeUVWx8_Any_NEON, TransposeUVWx8_NEON, 7) +#endif +#ifdef HAS_TRANSPOSEUVWX8_SSE2 +TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7) +#endif +#ifdef HAS_TRANSPOSEUVWX8_DSPR2 +TUVANY(TransposeUVWx8_Any_DSPR2, TransposeUVWx8_DSPR2, 7) +#endif +#undef TUVANY + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/libs/libvpx/third_party/libyuv/source/rotate_gcc.cc b/libs/libvpx/third_party/libyuv/source/rotate_gcc.cc index fd385bcd30..cbe870caa7 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_gcc.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_gcc.cc @@ -17,16 +17,17 @@ extern "C" { #endif // This module is for GCC x86 and x64. -#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) - #if !defined(LIBYUV_DISABLE_X86) && \ - (defined(__i386__) || (defined(__x86_64__) && !defined(__native_client__))) + (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) + +// Transpose 8x8. 32 or 64 bit, but not NaCL for 64 bit. +#if defined(HAS_TRANSPOSEWX8_SSSE3) void TransposeWx8_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width) { asm volatile ( // Read in the data from the source pointer. // First round of bit swap. - ".p2align 2 \n" + LABELALIGN "1: \n" "movq (%0),%%xmm0 \n" "movq (%0,%3),%%xmm1 \n" @@ -105,386 +106,260 @@ void TransposeWx8_SSSE3(const uint8* src, int src_stride, "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ); } +#endif // defined(HAS_TRANSPOSEWX8_SSSE3) -#if !defined(LIBYUV_DISABLE_X86) && defined(__i386__) && !defined(__clang__) -void TransposeUVWx8_SSE2(const uint8* src, int src_stride, - uint8* dst_a, int dst_stride_a, - uint8* dst_b, int dst_stride_b, int width); - asm ( - DECLARE_FUNCTION(TransposeUVWx8_SSE2) - "push %ebx \n" - "push %esi \n" - "push %edi \n" - "push %ebp \n" - "mov 0x14(%esp),%eax \n" - "mov 0x18(%esp),%edi \n" - "mov 0x1c(%esp),%edx \n" - "mov 0x20(%esp),%esi \n" - "mov 0x24(%esp),%ebx \n" - "mov 0x28(%esp),%ebp \n" - "mov %esp,%ecx \n" - "sub $0x14,%esp \n" - "and $0xfffffff0,%esp \n" - "mov %ecx,0x10(%esp) \n" - "mov 0x2c(%ecx),%ecx \n" - -"1: \n" - "movdqu (%eax),%xmm0 \n" - "movdqu (%eax,%edi,1),%xmm1 \n" - "lea (%eax,%edi,2),%eax \n" - "movdqa %xmm0,%xmm7 \n" - "punpcklbw %xmm1,%xmm0 \n" - "punpckhbw %xmm1,%xmm7 \n" - "movdqa %xmm7,%xmm1 \n" - "movdqu (%eax),%xmm2 \n" - "movdqu (%eax,%edi,1),%xmm3 \n" - "lea (%eax,%edi,2),%eax \n" - "movdqa %xmm2,%xmm7 \n" - "punpcklbw %xmm3,%xmm2 \n" - "punpckhbw %xmm3,%xmm7 \n" - "movdqa %xmm7,%xmm3 \n" - "movdqu (%eax),%xmm4 \n" - "movdqu (%eax,%edi,1),%xmm5 \n" - "lea (%eax,%edi,2),%eax \n" - "movdqa %xmm4,%xmm7 \n" - "punpcklbw %xmm5,%xmm4 \n" - "punpckhbw %xmm5,%xmm7 \n" - "movdqa %xmm7,%xmm5 \n" - "movdqu (%eax),%xmm6 \n" - "movdqu (%eax,%edi,1),%xmm7 \n" - "lea (%eax,%edi,2),%eax \n" - "movdqu %xmm5,(%esp) \n" - "neg %edi \n" - "movdqa %xmm6,%xmm5 \n" - "punpcklbw %xmm7,%xmm6 \n" - "punpckhbw %xmm7,%xmm5 \n" - "movdqa %xmm5,%xmm7 \n" - "lea 0x10(%eax,%edi,8),%eax \n" - "neg %edi \n" - "movdqa %xmm0,%xmm5 \n" - "punpcklwd %xmm2,%xmm0 \n" - "punpckhwd %xmm2,%xmm5 \n" - "movdqa %xmm5,%xmm2 \n" - "movdqa %xmm1,%xmm5 \n" - "punpcklwd %xmm3,%xmm1 \n" - "punpckhwd %xmm3,%xmm5 \n" - "movdqa %xmm5,%xmm3 \n" - "movdqa %xmm4,%xmm5 \n" - "punpcklwd %xmm6,%xmm4 \n" - "punpckhwd %xmm6,%xmm5 \n" - "movdqa %xmm5,%xmm6 \n" - "movdqu (%esp),%xmm5 \n" - "movdqu %xmm6,(%esp) \n" - "movdqa %xmm5,%xmm6 \n" - "punpcklwd %xmm7,%xmm5 \n" - "punpckhwd %xmm7,%xmm6 \n" - "movdqa %xmm6,%xmm7 \n" - "movdqa %xmm0,%xmm6 \n" - "punpckldq %xmm4,%xmm0 \n" - "punpckhdq %xmm4,%xmm6 \n" - "movdqa %xmm6,%xmm4 \n" - "movdqu (%esp),%xmm6 \n" - "movlpd %xmm0,(%edx) \n" - "movhpd %xmm0,(%ebx) \n" - "movlpd %xmm4,(%edx,%esi,1) \n" - "lea (%edx,%esi,2),%edx \n" - "movhpd %xmm4,(%ebx,%ebp,1) \n" - "lea (%ebx,%ebp,2),%ebx \n" - "movdqa %xmm2,%xmm0 \n" - "punpckldq %xmm6,%xmm2 \n" - "movlpd %xmm2,(%edx) \n" - "movhpd %xmm2,(%ebx) \n" - "punpckhdq %xmm6,%xmm0 \n" - "movlpd %xmm0,(%edx,%esi,1) \n" - "lea (%edx,%esi,2),%edx \n" - "movhpd %xmm0,(%ebx,%ebp,1) \n" - "lea (%ebx,%ebp,2),%ebx \n" - "movdqa %xmm1,%xmm0 \n" - "punpckldq %xmm5,%xmm1 \n" - "movlpd %xmm1,(%edx) \n" - "movhpd %xmm1,(%ebx) \n" - "punpckhdq %xmm5,%xmm0 \n" - "movlpd %xmm0,(%edx,%esi,1) \n" - "lea (%edx,%esi,2),%edx \n" - "movhpd %xmm0,(%ebx,%ebp,1) \n" - "lea (%ebx,%ebp,2),%ebx \n" - "movdqa %xmm3,%xmm0 \n" - "punpckldq %xmm7,%xmm3 \n" - "movlpd %xmm3,(%edx) \n" - "movhpd %xmm3,(%ebx) \n" - "punpckhdq %xmm7,%xmm0 \n" - "sub $0x8,%ecx \n" - "movlpd %xmm0,(%edx,%esi,1) \n" - "lea (%edx,%esi,2),%edx \n" - "movhpd %xmm0,(%ebx,%ebp,1) \n" - "lea (%ebx,%ebp,2),%ebx \n" - "jg 1b \n" - "mov 0x10(%esp),%esp \n" - "pop %ebp \n" - "pop %edi \n" - "pop %esi \n" - "pop %ebx \n" -#if defined(__native_client__) - "pop %ecx \n" - "and $0xffffffe0,%ecx \n" - "jmp *%ecx \n" -#else - "ret \n" -#endif -); -#endif -#if !defined(LIBYUV_DISABLE_X86) && !defined(__native_client__) && \ - defined(__x86_64__) -// 64 bit version has enough registers to do 16x8 to 8x16 at a time. +// Transpose 16x8. 64 bit +#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3) void TransposeWx8_Fast_SSSE3(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width) { asm volatile ( - // Read in the data from the source pointer. - // First round of bit swap. - ".p2align 2 \n" -"1: \n" - "movdqu (%0),%%xmm0 \n" - "movdqu (%0,%3),%%xmm1 \n" - "lea (%0,%3,2),%0 \n" - "movdqa %%xmm0,%%xmm8 \n" - "punpcklbw %%xmm1,%%xmm0 \n" - "punpckhbw %%xmm1,%%xmm8 \n" - "movdqu (%0),%%xmm2 \n" - "movdqa %%xmm0,%%xmm1 \n" - "movdqa %%xmm8,%%xmm9 \n" - "palignr $0x8,%%xmm1,%%xmm1 \n" - "palignr $0x8,%%xmm9,%%xmm9 \n" - "movdqu (%0,%3),%%xmm3 \n" - "lea (%0,%3,2),%0 \n" - "movdqa %%xmm2,%%xmm10 \n" - "punpcklbw %%xmm3,%%xmm2 \n" - "punpckhbw %%xmm3,%%xmm10 \n" - "movdqa %%xmm2,%%xmm3 \n" - "movdqa %%xmm10,%%xmm11 \n" - "movdqu (%0),%%xmm4 \n" - "palignr $0x8,%%xmm3,%%xmm3 \n" - "palignr $0x8,%%xmm11,%%xmm11 \n" - "movdqu (%0,%3),%%xmm5 \n" - "lea (%0,%3,2),%0 \n" - "movdqa %%xmm4,%%xmm12 \n" - "punpcklbw %%xmm5,%%xmm4 \n" - "punpckhbw %%xmm5,%%xmm12 \n" - "movdqa %%xmm4,%%xmm5 \n" - "movdqa %%xmm12,%%xmm13 \n" - "movdqu (%0),%%xmm6 \n" - "palignr $0x8,%%xmm5,%%xmm5 \n" - "palignr $0x8,%%xmm13,%%xmm13 \n" - "movdqu (%0,%3),%%xmm7 \n" - "lea (%0,%3,2),%0 \n" - "movdqa %%xmm6,%%xmm14 \n" - "punpcklbw %%xmm7,%%xmm6 \n" - "punpckhbw %%xmm7,%%xmm14 \n" - "neg %3 \n" - "movdqa %%xmm6,%%xmm7 \n" - "movdqa %%xmm14,%%xmm15 \n" - "lea 0x10(%0,%3,8),%0 \n" - "palignr $0x8,%%xmm7,%%xmm7 \n" - "palignr $0x8,%%xmm15,%%xmm15 \n" - "neg %3 \n" - // Second round of bit swap. - "punpcklwd %%xmm2,%%xmm0 \n" - "punpcklwd %%xmm3,%%xmm1 \n" - "movdqa %%xmm0,%%xmm2 \n" - "movdqa %%xmm1,%%xmm3 \n" - "palignr $0x8,%%xmm2,%%xmm2 \n" - "palignr $0x8,%%xmm3,%%xmm3 \n" - "punpcklwd %%xmm6,%%xmm4 \n" - "punpcklwd %%xmm7,%%xmm5 \n" - "movdqa %%xmm4,%%xmm6 \n" - "movdqa %%xmm5,%%xmm7 \n" - "palignr $0x8,%%xmm6,%%xmm6 \n" - "palignr $0x8,%%xmm7,%%xmm7 \n" - "punpcklwd %%xmm10,%%xmm8 \n" - "punpcklwd %%xmm11,%%xmm9 \n" - "movdqa %%xmm8,%%xmm10 \n" - "movdqa %%xmm9,%%xmm11 \n" - "palignr $0x8,%%xmm10,%%xmm10 \n" - "palignr $0x8,%%xmm11,%%xmm11 \n" - "punpcklwd %%xmm14,%%xmm12 \n" - "punpcklwd %%xmm15,%%xmm13 \n" - "movdqa %%xmm12,%%xmm14 \n" - "movdqa %%xmm13,%%xmm15 \n" - "palignr $0x8,%%xmm14,%%xmm14 \n" - "palignr $0x8,%%xmm15,%%xmm15 \n" - // Third round of bit swap. - // Write to the destination pointer. - "punpckldq %%xmm4,%%xmm0 \n" - "movq %%xmm0,(%1) \n" - "movdqa %%xmm0,%%xmm4 \n" - "palignr $0x8,%%xmm4,%%xmm4 \n" - "movq %%xmm4,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "punpckldq %%xmm6,%%xmm2 \n" - "movdqa %%xmm2,%%xmm6 \n" - "movq %%xmm2,(%1) \n" - "palignr $0x8,%%xmm6,%%xmm6 \n" - "punpckldq %%xmm5,%%xmm1 \n" - "movq %%xmm6,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "movdqa %%xmm1,%%xmm5 \n" - "movq %%xmm1,(%1) \n" - "palignr $0x8,%%xmm5,%%xmm5 \n" - "movq %%xmm5,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "punpckldq %%xmm7,%%xmm3 \n" - "movq %%xmm3,(%1) \n" - "movdqa %%xmm3,%%xmm7 \n" - "palignr $0x8,%%xmm7,%%xmm7 \n" - "movq %%xmm7,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "punpckldq %%xmm12,%%xmm8 \n" - "movq %%xmm8,(%1) \n" - "movdqa %%xmm8,%%xmm12 \n" - "palignr $0x8,%%xmm12,%%xmm12 \n" - "movq %%xmm12,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "punpckldq %%xmm14,%%xmm10 \n" - "movdqa %%xmm10,%%xmm14 \n" - "movq %%xmm10,(%1) \n" - "palignr $0x8,%%xmm14,%%xmm14 \n" - "punpckldq %%xmm13,%%xmm9 \n" - "movq %%xmm14,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "movdqa %%xmm9,%%xmm13 \n" - "movq %%xmm9,(%1) \n" - "palignr $0x8,%%xmm13,%%xmm13 \n" - "movq %%xmm13,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "punpckldq %%xmm15,%%xmm11 \n" - "movq %%xmm11,(%1) \n" - "movdqa %%xmm11,%%xmm15 \n" - "palignr $0x8,%%xmm15,%%xmm15 \n" - "sub $0x10,%2 \n" - "movq %%xmm15,(%1,%4) \n" - "lea (%1,%4,2),%1 \n" - "jg 1b \n" - : "+r"(src), // %0 - "+r"(dst), // %1 - "+r"(width) // %2 - : "r"((intptr_t)(src_stride)), // %3 - "r"((intptr_t)(dst_stride)) // %4 - : "memory", "cc", - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", - "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" -); + // Read in the data from the source pointer. + // First round of bit swap. + LABELALIGN + "1: \n" + "movdqu (%0),%%xmm0 \n" + "movdqu (%0,%3),%%xmm1 \n" + "lea (%0,%3,2),%0 \n" + "movdqa %%xmm0,%%xmm8 \n" + "punpcklbw %%xmm1,%%xmm0 \n" + "punpckhbw %%xmm1,%%xmm8 \n" + "movdqu (%0),%%xmm2 \n" + "movdqa %%xmm0,%%xmm1 \n" + "movdqa %%xmm8,%%xmm9 \n" + "palignr $0x8,%%xmm1,%%xmm1 \n" + "palignr $0x8,%%xmm9,%%xmm9 \n" + "movdqu (%0,%3),%%xmm3 \n" + "lea (%0,%3,2),%0 \n" + "movdqa %%xmm2,%%xmm10 \n" + "punpcklbw %%xmm3,%%xmm2 \n" + "punpckhbw %%xmm3,%%xmm10 \n" + "movdqa %%xmm2,%%xmm3 \n" + "movdqa %%xmm10,%%xmm11 \n" + "movdqu (%0),%%xmm4 \n" + "palignr $0x8,%%xmm3,%%xmm3 \n" + "palignr $0x8,%%xmm11,%%xmm11 \n" + "movdqu (%0,%3),%%xmm5 \n" + "lea (%0,%3,2),%0 \n" + "movdqa %%xmm4,%%xmm12 \n" + "punpcklbw %%xmm5,%%xmm4 \n" + "punpckhbw %%xmm5,%%xmm12 \n" + "movdqa %%xmm4,%%xmm5 \n" + "movdqa %%xmm12,%%xmm13 \n" + "movdqu (%0),%%xmm6 \n" + "palignr $0x8,%%xmm5,%%xmm5 \n" + "palignr $0x8,%%xmm13,%%xmm13 \n" + "movdqu (%0,%3),%%xmm7 \n" + "lea (%0,%3,2),%0 \n" + "movdqa %%xmm6,%%xmm14 \n" + "punpcklbw %%xmm7,%%xmm6 \n" + "punpckhbw %%xmm7,%%xmm14 \n" + "neg %3 \n" + "movdqa %%xmm6,%%xmm7 \n" + "movdqa %%xmm14,%%xmm15 \n" + "lea 0x10(%0,%3,8),%0 \n" + "palignr $0x8,%%xmm7,%%xmm7 \n" + "palignr $0x8,%%xmm15,%%xmm15 \n" + "neg %3 \n" + // Second round of bit swap. + "punpcklwd %%xmm2,%%xmm0 \n" + "punpcklwd %%xmm3,%%xmm1 \n" + "movdqa %%xmm0,%%xmm2 \n" + "movdqa %%xmm1,%%xmm3 \n" + "palignr $0x8,%%xmm2,%%xmm2 \n" + "palignr $0x8,%%xmm3,%%xmm3 \n" + "punpcklwd %%xmm6,%%xmm4 \n" + "punpcklwd %%xmm7,%%xmm5 \n" + "movdqa %%xmm4,%%xmm6 \n" + "movdqa %%xmm5,%%xmm7 \n" + "palignr $0x8,%%xmm6,%%xmm6 \n" + "palignr $0x8,%%xmm7,%%xmm7 \n" + "punpcklwd %%xmm10,%%xmm8 \n" + "punpcklwd %%xmm11,%%xmm9 \n" + "movdqa %%xmm8,%%xmm10 \n" + "movdqa %%xmm9,%%xmm11 \n" + "palignr $0x8,%%xmm10,%%xmm10 \n" + "palignr $0x8,%%xmm11,%%xmm11 \n" + "punpcklwd %%xmm14,%%xmm12 \n" + "punpcklwd %%xmm15,%%xmm13 \n" + "movdqa %%xmm12,%%xmm14 \n" + "movdqa %%xmm13,%%xmm15 \n" + "palignr $0x8,%%xmm14,%%xmm14 \n" + "palignr $0x8,%%xmm15,%%xmm15 \n" + // Third round of bit swap. + // Write to the destination pointer. + "punpckldq %%xmm4,%%xmm0 \n" + "movq %%xmm0,(%1) \n" + "movdqa %%xmm0,%%xmm4 \n" + "palignr $0x8,%%xmm4,%%xmm4 \n" + "movq %%xmm4,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "punpckldq %%xmm6,%%xmm2 \n" + "movdqa %%xmm2,%%xmm6 \n" + "movq %%xmm2,(%1) \n" + "palignr $0x8,%%xmm6,%%xmm6 \n" + "punpckldq %%xmm5,%%xmm1 \n" + "movq %%xmm6,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "movdqa %%xmm1,%%xmm5 \n" + "movq %%xmm1,(%1) \n" + "palignr $0x8,%%xmm5,%%xmm5 \n" + "movq %%xmm5,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "punpckldq %%xmm7,%%xmm3 \n" + "movq %%xmm3,(%1) \n" + "movdqa %%xmm3,%%xmm7 \n" + "palignr $0x8,%%xmm7,%%xmm7 \n" + "movq %%xmm7,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "punpckldq %%xmm12,%%xmm8 \n" + "movq %%xmm8,(%1) \n" + "movdqa %%xmm8,%%xmm12 \n" + "palignr $0x8,%%xmm12,%%xmm12 \n" + "movq %%xmm12,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "punpckldq %%xmm14,%%xmm10 \n" + "movdqa %%xmm10,%%xmm14 \n" + "movq %%xmm10,(%1) \n" + "palignr $0x8,%%xmm14,%%xmm14 \n" + "punpckldq %%xmm13,%%xmm9 \n" + "movq %%xmm14,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "movdqa %%xmm9,%%xmm13 \n" + "movq %%xmm9,(%1) \n" + "palignr $0x8,%%xmm13,%%xmm13 \n" + "movq %%xmm13,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "punpckldq %%xmm15,%%xmm11 \n" + "movq %%xmm11,(%1) \n" + "movdqa %%xmm11,%%xmm15 \n" + "palignr $0x8,%%xmm15,%%xmm15 \n" + "sub $0x10,%2 \n" + "movq %%xmm15,(%1,%4) \n" + "lea (%1,%4,2),%1 \n" + "jg 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "r"((intptr_t)(src_stride)), // %3 + "r"((intptr_t)(dst_stride)) // %4 + : "memory", "cc", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" + ); } +#endif // defined(HAS_TRANSPOSEWX8_FAST_SSSE3) +// Transpose UV 8x8. 64 bit. +#if defined(HAS_TRANSPOSEUVWX8_SSE2) void TransposeUVWx8_SSE2(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, uint8* dst_b, int dst_stride_b, int width) { asm volatile ( - // Read in the data from the source pointer. - // First round of bit swap. - ".p2align 2 \n" -"1: \n" - "movdqu (%0),%%xmm0 \n" - "movdqu (%0,%4),%%xmm1 \n" - "lea (%0,%4,2),%0 \n" - "movdqa %%xmm0,%%xmm8 \n" - "punpcklbw %%xmm1,%%xmm0 \n" - "punpckhbw %%xmm1,%%xmm8 \n" - "movdqa %%xmm8,%%xmm1 \n" - "movdqu (%0),%%xmm2 \n" - "movdqu (%0,%4),%%xmm3 \n" - "lea (%0,%4,2),%0 \n" - "movdqa %%xmm2,%%xmm8 \n" - "punpcklbw %%xmm3,%%xmm2 \n" - "punpckhbw %%xmm3,%%xmm8 \n" - "movdqa %%xmm8,%%xmm3 \n" - "movdqu (%0),%%xmm4 \n" - "movdqu (%0,%4),%%xmm5 \n" - "lea (%0,%4,2),%0 \n" - "movdqa %%xmm4,%%xmm8 \n" - "punpcklbw %%xmm5,%%xmm4 \n" - "punpckhbw %%xmm5,%%xmm8 \n" - "movdqa %%xmm8,%%xmm5 \n" - "movdqu (%0),%%xmm6 \n" - "movdqu (%0,%4),%%xmm7 \n" - "lea (%0,%4,2),%0 \n" - "movdqa %%xmm6,%%xmm8 \n" - "punpcklbw %%xmm7,%%xmm6 \n" - "neg %4 \n" - "lea 0x10(%0,%4,8),%0 \n" - "punpckhbw %%xmm7,%%xmm8 \n" - "movdqa %%xmm8,%%xmm7 \n" - "neg %4 \n" - // Second round of bit swap. - "movdqa %%xmm0,%%xmm8 \n" - "movdqa %%xmm1,%%xmm9 \n" - "punpckhwd %%xmm2,%%xmm8 \n" - "punpckhwd %%xmm3,%%xmm9 \n" - "punpcklwd %%xmm2,%%xmm0 \n" - "punpcklwd %%xmm3,%%xmm1 \n" - "movdqa %%xmm8,%%xmm2 \n" - "movdqa %%xmm9,%%xmm3 \n" - "movdqa %%xmm4,%%xmm8 \n" - "movdqa %%xmm5,%%xmm9 \n" - "punpckhwd %%xmm6,%%xmm8 \n" - "punpckhwd %%xmm7,%%xmm9 \n" - "punpcklwd %%xmm6,%%xmm4 \n" - "punpcklwd %%xmm7,%%xmm5 \n" - "movdqa %%xmm8,%%xmm6 \n" - "movdqa %%xmm9,%%xmm7 \n" - // Third round of bit swap. - // Write to the destination pointer. - "movdqa %%xmm0,%%xmm8 \n" - "punpckldq %%xmm4,%%xmm0 \n" - "movlpd %%xmm0,(%1) \n" // Write back U channel - "movhpd %%xmm0,(%2) \n" // Write back V channel - "punpckhdq %%xmm4,%%xmm8 \n" - "movlpd %%xmm8,(%1,%5) \n" - "lea (%1,%5,2),%1 \n" - "movhpd %%xmm8,(%2,%6) \n" - "lea (%2,%6,2),%2 \n" - "movdqa %%xmm2,%%xmm8 \n" - "punpckldq %%xmm6,%%xmm2 \n" - "movlpd %%xmm2,(%1) \n" - "movhpd %%xmm2,(%2) \n" - "punpckhdq %%xmm6,%%xmm8 \n" - "movlpd %%xmm8,(%1,%5) \n" - "lea (%1,%5,2),%1 \n" - "movhpd %%xmm8,(%2,%6) \n" - "lea (%2,%6,2),%2 \n" - "movdqa %%xmm1,%%xmm8 \n" - "punpckldq %%xmm5,%%xmm1 \n" - "movlpd %%xmm1,(%1) \n" - "movhpd %%xmm1,(%2) \n" - "punpckhdq %%xmm5,%%xmm8 \n" - "movlpd %%xmm8,(%1,%5) \n" - "lea (%1,%5,2),%1 \n" - "movhpd %%xmm8,(%2,%6) \n" - "lea (%2,%6,2),%2 \n" - "movdqa %%xmm3,%%xmm8 \n" - "punpckldq %%xmm7,%%xmm3 \n" - "movlpd %%xmm3,(%1) \n" - "movhpd %%xmm3,(%2) \n" - "punpckhdq %%xmm7,%%xmm8 \n" - "sub $0x8,%3 \n" - "movlpd %%xmm8,(%1,%5) \n" - "lea (%1,%5,2),%1 \n" - "movhpd %%xmm8,(%2,%6) \n" - "lea (%2,%6,2),%2 \n" - "jg 1b \n" - : "+r"(src), // %0 - "+r"(dst_a), // %1 - "+r"(dst_b), // %2 - "+r"(width) // %3 - : "r"((intptr_t)(src_stride)), // %4 - "r"((intptr_t)(dst_stride_a)), // %5 - "r"((intptr_t)(dst_stride_b)) // %6 - : "memory", "cc", - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", - "xmm8", "xmm9" -); + // Read in the data from the source pointer. + // First round of bit swap. + LABELALIGN + "1: \n" + "movdqu (%0),%%xmm0 \n" + "movdqu (%0,%4),%%xmm1 \n" + "lea (%0,%4,2),%0 \n" + "movdqa %%xmm0,%%xmm8 \n" + "punpcklbw %%xmm1,%%xmm0 \n" + "punpckhbw %%xmm1,%%xmm8 \n" + "movdqa %%xmm8,%%xmm1 \n" + "movdqu (%0),%%xmm2 \n" + "movdqu (%0,%4),%%xmm3 \n" + "lea (%0,%4,2),%0 \n" + "movdqa %%xmm2,%%xmm8 \n" + "punpcklbw %%xmm3,%%xmm2 \n" + "punpckhbw %%xmm3,%%xmm8 \n" + "movdqa %%xmm8,%%xmm3 \n" + "movdqu (%0),%%xmm4 \n" + "movdqu (%0,%4),%%xmm5 \n" + "lea (%0,%4,2),%0 \n" + "movdqa %%xmm4,%%xmm8 \n" + "punpcklbw %%xmm5,%%xmm4 \n" + "punpckhbw %%xmm5,%%xmm8 \n" + "movdqa %%xmm8,%%xmm5 \n" + "movdqu (%0),%%xmm6 \n" + "movdqu (%0,%4),%%xmm7 \n" + "lea (%0,%4,2),%0 \n" + "movdqa %%xmm6,%%xmm8 \n" + "punpcklbw %%xmm7,%%xmm6 \n" + "neg %4 \n" + "lea 0x10(%0,%4,8),%0 \n" + "punpckhbw %%xmm7,%%xmm8 \n" + "movdqa %%xmm8,%%xmm7 \n" + "neg %4 \n" + // Second round of bit swap. + "movdqa %%xmm0,%%xmm8 \n" + "movdqa %%xmm1,%%xmm9 \n" + "punpckhwd %%xmm2,%%xmm8 \n" + "punpckhwd %%xmm3,%%xmm9 \n" + "punpcklwd %%xmm2,%%xmm0 \n" + "punpcklwd %%xmm3,%%xmm1 \n" + "movdqa %%xmm8,%%xmm2 \n" + "movdqa %%xmm9,%%xmm3 \n" + "movdqa %%xmm4,%%xmm8 \n" + "movdqa %%xmm5,%%xmm9 \n" + "punpckhwd %%xmm6,%%xmm8 \n" + "punpckhwd %%xmm7,%%xmm9 \n" + "punpcklwd %%xmm6,%%xmm4 \n" + "punpcklwd %%xmm7,%%xmm5 \n" + "movdqa %%xmm8,%%xmm6 \n" + "movdqa %%xmm9,%%xmm7 \n" + // Third round of bit swap. + // Write to the destination pointer. + "movdqa %%xmm0,%%xmm8 \n" + "punpckldq %%xmm4,%%xmm0 \n" + "movlpd %%xmm0,(%1) \n" // Write back U channel + "movhpd %%xmm0,(%2) \n" // Write back V channel + "punpckhdq %%xmm4,%%xmm8 \n" + "movlpd %%xmm8,(%1,%5) \n" + "lea (%1,%5,2),%1 \n" + "movhpd %%xmm8,(%2,%6) \n" + "lea (%2,%6,2),%2 \n" + "movdqa %%xmm2,%%xmm8 \n" + "punpckldq %%xmm6,%%xmm2 \n" + "movlpd %%xmm2,(%1) \n" + "movhpd %%xmm2,(%2) \n" + "punpckhdq %%xmm6,%%xmm8 \n" + "movlpd %%xmm8,(%1,%5) \n" + "lea (%1,%5,2),%1 \n" + "movhpd %%xmm8,(%2,%6) \n" + "lea (%2,%6,2),%2 \n" + "movdqa %%xmm1,%%xmm8 \n" + "punpckldq %%xmm5,%%xmm1 \n" + "movlpd %%xmm1,(%1) \n" + "movhpd %%xmm1,(%2) \n" + "punpckhdq %%xmm5,%%xmm8 \n" + "movlpd %%xmm8,(%1,%5) \n" + "lea (%1,%5,2),%1 \n" + "movhpd %%xmm8,(%2,%6) \n" + "lea (%2,%6,2),%2 \n" + "movdqa %%xmm3,%%xmm8 \n" + "punpckldq %%xmm7,%%xmm3 \n" + "movlpd %%xmm3,(%1) \n" + "movhpd %%xmm3,(%2) \n" + "punpckhdq %%xmm7,%%xmm8 \n" + "sub $0x8,%3 \n" + "movlpd %%xmm8,(%1,%5) \n" + "lea (%1,%5,2),%1 \n" + "movhpd %%xmm8,(%2,%6) \n" + "lea (%2,%6,2),%2 \n" + "jg 1b \n" + : "+r"(src), // %0 + "+r"(dst_a), // %1 + "+r"(dst_b), // %2 + "+r"(width) // %3 + : "r"((intptr_t)(src_stride)), // %4 + "r"((intptr_t)(dst_stride_a)), // %5 + "r"((intptr_t)(dst_stride_b)) // %6 + : "memory", "cc", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9" + ); } -#endif -#endif - +#endif // defined(HAS_TRANSPOSEUVWX8_SSE2) #endif // defined(__x86_64__) || defined(__i386__) #ifdef __cplusplus diff --git a/libs/libvpx/third_party/libyuv/source/rotate_mips.cc b/libs/libvpx/third_party/libyuv/source/rotate_mips.cc index efe6bd909e..1e8ce25197 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_mips.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_mips.cc @@ -22,8 +22,8 @@ extern "C" { defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \ (_MIPS_SIM == _MIPS_SIM_ABI32) -void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride, - uint8* dst, int dst_stride, int width) { +void TransposeWx8_DSPR2(const uint8* src, int src_stride, + uint8* dst, int dst_stride, int width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -106,8 +106,8 @@ void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride, ); } -void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride, - uint8* dst, int dst_stride, int width) { +void TransposeWx8_Fast_DSPR2(const uint8* src, int src_stride, + uint8* dst, int dst_stride, int width) { __asm__ __volatile__ ( ".set noat \n" ".set push \n" @@ -308,10 +308,10 @@ void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride, ); } -void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride, - uint8* dst_a, int dst_stride_a, - uint8* dst_b, int dst_stride_b, - int width) { +void TransposeUVWx8_DSPR2(const uint8* src, int src_stride, + uint8* dst_a, int dst_stride_a, + uint8* dst_b, int dst_stride_b, + int width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" diff --git a/libs/libvpx/third_party/libyuv/source/rotate_neon.cc b/libs/libvpx/third_party/libyuv/source/rotate_neon.cc index 76043b3b3c..1c22b472bc 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_neon.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_neon.cc @@ -27,7 +27,7 @@ static uvec8 kVTbl4x4Transpose = void TransposeWx8_NEON(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width) { - const uint8* src_temp = NULL; + const uint8* src_temp; asm volatile ( // loops are on blocks of 8. loop will stop when // counter gets to or below 0. starting the counter @@ -35,7 +35,6 @@ void TransposeWx8_NEON(const uint8* src, int src_stride, "sub %5, #8 \n" // handle 8x8 blocks. this should be the majority of the plane - ".p2align 2 \n" "1: \n" "mov %0, %1 \n" @@ -230,7 +229,7 @@ void TransposeWx8_NEON(const uint8* src, int src_stride, "4: \n" - : "+r"(src_temp), // %0 + : "=&r"(src_temp), // %0 "+r"(src), // %1 "+r"(src_stride), // %2 "+r"(dst), // %3 @@ -248,7 +247,7 @@ void TransposeUVWx8_NEON(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, uint8* dst_b, int dst_stride_b, int width) { - const uint8* src_temp = NULL; + const uint8* src_temp; asm volatile ( // loops are on blocks of 8. loop will stop when // counter gets to or below 0. starting the counter @@ -256,7 +255,6 @@ void TransposeUVWx8_NEON(const uint8* src, int src_stride, "sub %7, #8 \n" // handle 8x8 blocks. this should be the majority of the plane - ".p2align 2 \n" "1: \n" "mov %0, %1 \n" @@ -514,7 +512,7 @@ void TransposeUVWx8_NEON(const uint8* src, int src_stride, "4: \n" - : "+r"(src_temp), // %0 + : "=&r"(src_temp), // %0 "+r"(src), // %1 "+r"(src_stride), // %2 "+r"(dst_a), // %3 diff --git a/libs/libvpx/third_party/libyuv/source/rotate_neon64.cc b/libs/libvpx/third_party/libyuv/source/rotate_neon64.cc index f52c082b3f..1ab448f3ab 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_neon64.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_neon64.cc @@ -26,7 +26,7 @@ static uvec8 kVTbl4x4Transpose = void TransposeWx8_NEON(const uint8* src, int src_stride, uint8* dst, int dst_stride, int width) { - const uint8* src_temp = NULL; + const uint8* src_temp; int64 width64 = (int64) width; // Work around clang 3.4 warning. asm volatile ( // loops are on blocks of 8. loop will stop when @@ -235,7 +235,7 @@ void TransposeWx8_NEON(const uint8* src, int src_stride, "4: \n" - : "+r"(src_temp), // %0 + : "=&r"(src_temp), // %0 "+r"(src), // %1 "+r"(dst), // %2 "+r"(width64) // %3 @@ -255,7 +255,7 @@ void TransposeUVWx8_NEON(const uint8* src, int src_stride, uint8* dst_a, int dst_stride_a, uint8* dst_b, int dst_stride_b, int width) { - const uint8* src_temp = NULL; + const uint8* src_temp; int64 width64 = (int64) width; // Work around clang 3.4 warning. asm volatile ( // loops are on blocks of 8. loop will stop when @@ -520,7 +520,7 @@ void TransposeUVWx8_NEON(const uint8* src, int src_stride, "4: \n" - : "+r"(src_temp), // %0 + : "=&r"(src_temp), // %0 "+r"(src), // %1 "+r"(dst_a), // %2 "+r"(dst_b), // %3 diff --git a/libs/libvpx/third_party/libyuv/source/rotate_win.cc b/libs/libvpx/third_party/libyuv/source/rotate_win.cc index 2760066dfd..1300fc0feb 100644 --- a/libs/libvpx/third_party/libyuv/source/rotate_win.cc +++ b/libs/libvpx/third_party/libyuv/source/rotate_win.cc @@ -16,9 +16,8 @@ namespace libyuv { extern "C" { #endif -// This module is for Visual C x86. -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \ - defined(_MSC_VER) && !defined(__clang__) +// This module is for 32 bit Visual C x86 and clangcl +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) __declspec(naked) void TransposeWx8_SSSE3(const uint8* src, int src_stride, diff --git a/libs/libvpx/third_party/libyuv/source/row_any.cc b/libs/libvpx/third_party/libyuv/source/row_any.cc index 1cb1f6b930..494164fd02 100644 --- a/libs/libvpx/third_party/libyuv/source/row_any.cc +++ b/libs/libvpx/third_party/libyuv/source/row_any.cc @@ -22,6 +22,39 @@ extern "C" { // Subsampled source needs to be increase by 1 of not even. #define SS(width, shift) (((width) + (1 << (shift)) - 1) >> (shift)) +// Any 4 planes to 1 with yuvconstants +#define ANY41C(NAMEANY, ANY_SIMD, UVSHIFT, DUVSHIFT, BPP, MASK) \ + void NAMEANY(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, \ + const uint8* a_buf, uint8* dst_ptr, \ + const struct YuvConstants* yuvconstants, int width) { \ + SIMD_ALIGNED(uint8 temp[64 * 5]); \ + memset(temp, 0, 64 * 4); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(y_buf, u_buf, v_buf, a_buf, dst_ptr, yuvconstants, n); \ + } \ + memcpy(temp, y_buf + n, r); \ + memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \ + memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \ + memcpy(temp + 192, a_buf + n, r); \ + ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, temp + 256, \ + yuvconstants, MASK + 1); \ + memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 256, \ + SS(r, DUVSHIFT) * BPP); \ + } + +#ifdef HAS_I422ALPHATOARGBROW_SSSE3 +ANY41C(I422AlphaToARGBRow_Any_SSSE3, I422AlphaToARGBRow_SSSE3, 1, 0, 4, 7) +#endif +#ifdef HAS_I422ALPHATOARGBROW_AVX2 +ANY41C(I422AlphaToARGBRow_Any_AVX2, I422AlphaToARGBRow_AVX2, 1, 0, 4, 15) +#endif +#ifdef HAS_I422ALPHATOARGBROW_NEON +ANY41C(I422AlphaToARGBRow_Any_NEON, I422AlphaToARGBRow_NEON, 1, 0, 4, 7) +#endif +#undef ANY41C + // Any 3 planes to 1. #define ANY31(NAMEANY, ANY_SIMD, UVSHIFT, DUVSHIFT, BPP, MASK) \ void NAMEANY(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, \ @@ -40,75 +73,9 @@ extern "C" { memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 192, \ SS(r, DUVSHIFT) * BPP); \ } - -#ifdef HAS_I422TOARGBROW_SSSE3 -ANY31(I422ToARGBRow_Any_SSSE3, I422ToARGBRow_SSSE3, 1, 0, 4, 7) -#endif -#ifdef HAS_I444TOARGBROW_SSSE3 -ANY31(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_SSSE3, 0, 0, 4, 7) -ANY31(I411ToARGBRow_Any_SSSE3, I411ToARGBRow_SSSE3, 2, 0, 4, 7) -ANY31(I422ToBGRARow_Any_SSSE3, I422ToBGRARow_SSSE3, 1, 0, 4, 7) -ANY31(I422ToABGRRow_Any_SSSE3, I422ToABGRRow_SSSE3, 1, 0, 4, 7) -ANY31(I422ToRGBARow_Any_SSSE3, I422ToRGBARow_SSSE3, 1, 0, 4, 7) -ANY31(I422ToARGB4444Row_Any_SSSE3, I422ToARGB4444Row_SSSE3, 1, 0, 2, 7) -ANY31(I422ToARGB1555Row_Any_SSSE3, I422ToARGB1555Row_SSSE3, 1, 0, 2, 7) -ANY31(I422ToRGB565Row_Any_SSSE3, I422ToRGB565Row_SSSE3, 1, 0, 2, 7) -ANY31(I422ToRGB24Row_Any_SSSE3, I422ToRGB24Row_SSSE3, 1, 0, 3, 7) -ANY31(I422ToRAWRow_Any_SSSE3, I422ToRAWRow_SSSE3, 1, 0, 3, 7) +#ifdef HAS_I422TOYUY2ROW_SSE2 ANY31(I422ToYUY2Row_Any_SSE2, I422ToYUY2Row_SSE2, 1, 1, 4, 15) ANY31(I422ToUYVYRow_Any_SSE2, I422ToUYVYRow_SSE2, 1, 1, 4, 15) -#endif // HAS_I444TOARGBROW_SSSE3 -#ifdef HAS_I422TORGB24ROW_AVX2 -ANY31(I422ToRGB24Row_Any_AVX2, I422ToRGB24Row_AVX2, 1, 0, 3, 15) -#endif -#ifdef HAS_I422TORAWROW_AVX2 -ANY31(I422ToRAWRow_Any_AVX2, I422ToRAWRow_AVX2, 1, 0, 3, 15) -#endif -#ifdef HAS_J422TOARGBROW_SSSE3 -ANY31(J422ToARGBRow_Any_SSSE3, J422ToARGBRow_SSSE3, 1, 0, 4, 7) -#endif -#ifdef HAS_J422TOARGBROW_AVX2 -ANY31(J422ToARGBRow_Any_AVX2, J422ToARGBRow_AVX2, 1, 0, 4, 15) -#endif -#ifdef HAS_I422TOARGBROW_AVX2 -ANY31(I422ToARGBRow_Any_AVX2, I422ToARGBRow_AVX2, 1, 0, 4, 15) -#endif -#ifdef HAS_I422TOBGRAROW_AVX2 -ANY31(I422ToBGRARow_Any_AVX2, I422ToBGRARow_AVX2, 1, 0, 4, 15) -#endif -#ifdef HAS_I422TORGBAROW_AVX2 -ANY31(I422ToRGBARow_Any_AVX2, I422ToRGBARow_AVX2, 1, 0, 4, 15) -#endif -#ifdef HAS_I422TOABGRROW_AVX2 -ANY31(I422ToABGRRow_Any_AVX2, I422ToABGRRow_AVX2, 1, 0, 4, 15) -#endif -#ifdef HAS_I444TOARGBROW_AVX2 -ANY31(I444ToARGBRow_Any_AVX2, I444ToARGBRow_AVX2, 0, 0, 4, 15) -#endif -#ifdef HAS_I411TOARGBROW_AVX2 -ANY31(I411ToARGBRow_Any_AVX2, I411ToARGBRow_AVX2, 2, 0, 4, 15) -#endif -#ifdef HAS_I422TOARGB4444ROW_AVX2 -ANY31(I422ToARGB4444Row_Any_AVX2, I422ToARGB4444Row_AVX2, 1, 0, 2, 7) -#endif -#ifdef HAS_I422TOARGB1555ROW_AVX2 -ANY31(I422ToARGB1555Row_Any_AVX2, I422ToARGB1555Row_AVX2, 1, 0, 2, 7) -#endif -#ifdef HAS_I422TORGB565ROW_AVX2 -ANY31(I422ToRGB565Row_Any_AVX2, I422ToRGB565Row_AVX2, 1, 0, 2, 7) -#endif -#ifdef HAS_I422TOARGBROW_NEON -ANY31(I444ToARGBRow_Any_NEON, I444ToARGBRow_NEON, 0, 0, 4, 7) -ANY31(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, 1, 0, 4, 7) -ANY31(I411ToARGBRow_Any_NEON, I411ToARGBRow_NEON, 2, 0, 4, 7) -ANY31(I422ToBGRARow_Any_NEON, I422ToBGRARow_NEON, 1, 0, 4, 7) -ANY31(I422ToABGRRow_Any_NEON, I422ToABGRRow_NEON, 1, 0, 4, 7) -ANY31(I422ToRGBARow_Any_NEON, I422ToRGBARow_NEON, 1, 0, 4, 7) -ANY31(I422ToRGB24Row_Any_NEON, I422ToRGB24Row_NEON, 1, 0, 3, 7) -ANY31(I422ToRAWRow_Any_NEON, I422ToRAWRow_NEON, 1, 0, 3, 7) -ANY31(I422ToARGB4444Row_Any_NEON, I422ToARGB4444Row_NEON, 1, 0, 2, 7) -ANY31(I422ToARGB1555Row_Any_NEON, I422ToARGB1555Row_NEON, 1, 0, 2, 7) -ANY31(I422ToRGB565Row_Any_NEON, I422ToRGB565Row_NEON, 1, 0, 2, 7) #endif #ifdef HAS_I422TOYUY2ROW_NEON ANY31(I422ToYUY2Row_Any_NEON, I422ToYUY2Row_NEON, 1, 1, 4, 15) @@ -116,8 +83,91 @@ ANY31(I422ToYUY2Row_Any_NEON, I422ToYUY2Row_NEON, 1, 1, 4, 15) #ifdef HAS_I422TOUYVYROW_NEON ANY31(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, 1, 1, 4, 15) #endif +#ifdef HAS_BLENDPLANEROW_AVX2 +ANY31(BlendPlaneRow_Any_AVX2, BlendPlaneRow_AVX2, 0, 0, 1, 31) +#endif +#ifdef HAS_BLENDPLANEROW_SSSE3 +ANY31(BlendPlaneRow_Any_SSSE3, BlendPlaneRow_SSSE3, 0, 0, 1, 7) +#endif #undef ANY31 +// Note that odd width replication includes 444 due to implementation +// on arm that subsamples 444 to 422 internally. +// Any 3 planes to 1 with yuvconstants +#define ANY31C(NAMEANY, ANY_SIMD, UVSHIFT, DUVSHIFT, BPP, MASK) \ + void NAMEANY(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, \ + uint8* dst_ptr, const struct YuvConstants* yuvconstants, \ + int width) { \ + SIMD_ALIGNED(uint8 temp[64 * 4]); \ + memset(temp, 0, 64 * 3); /* for YUY2 and msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(y_buf, u_buf, v_buf, dst_ptr, yuvconstants, n); \ + } \ + memcpy(temp, y_buf + n, r); \ + memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \ + memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \ + if (width & 1) { \ + temp[64 + SS(r, UVSHIFT)] = temp[64 + SS(r, UVSHIFT) - 1]; \ + temp[128 + SS(r, UVSHIFT)] = temp[128 + SS(r, UVSHIFT) - 1]; \ + } \ + ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, \ + yuvconstants, MASK + 1); \ + memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 192, \ + SS(r, DUVSHIFT) * BPP); \ + } + +#ifdef HAS_I422TOARGBROW_SSSE3 +ANY31C(I422ToARGBRow_Any_SSSE3, I422ToARGBRow_SSSE3, 1, 0, 4, 7) +#endif +#ifdef HAS_I411TOARGBROW_SSSE3 +ANY31C(I411ToARGBRow_Any_SSSE3, I411ToARGBRow_SSSE3, 2, 0, 4, 7) +#endif +#ifdef HAS_I444TOARGBROW_SSSE3 +ANY31C(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_SSSE3, 0, 0, 4, 7) +ANY31C(I422ToRGBARow_Any_SSSE3, I422ToRGBARow_SSSE3, 1, 0, 4, 7) +ANY31C(I422ToARGB4444Row_Any_SSSE3, I422ToARGB4444Row_SSSE3, 1, 0, 2, 7) +ANY31C(I422ToARGB1555Row_Any_SSSE3, I422ToARGB1555Row_SSSE3, 1, 0, 2, 7) +ANY31C(I422ToRGB565Row_Any_SSSE3, I422ToRGB565Row_SSSE3, 1, 0, 2, 7) +ANY31C(I422ToRGB24Row_Any_SSSE3, I422ToRGB24Row_SSSE3, 1, 0, 3, 7) +#endif // HAS_I444TOARGBROW_SSSE3 +#ifdef HAS_I422TORGB24ROW_AVX2 +ANY31C(I422ToRGB24Row_Any_AVX2, I422ToRGB24Row_AVX2, 1, 0, 3, 15) +#endif +#ifdef HAS_I422TOARGBROW_AVX2 +ANY31C(I422ToARGBRow_Any_AVX2, I422ToARGBRow_AVX2, 1, 0, 4, 15) +#endif +#ifdef HAS_I422TORGBAROW_AVX2 +ANY31C(I422ToRGBARow_Any_AVX2, I422ToRGBARow_AVX2, 1, 0, 4, 15) +#endif +#ifdef HAS_I444TOARGBROW_AVX2 +ANY31C(I444ToARGBRow_Any_AVX2, I444ToARGBRow_AVX2, 0, 0, 4, 15) +#endif +#ifdef HAS_I411TOARGBROW_AVX2 +ANY31C(I411ToARGBRow_Any_AVX2, I411ToARGBRow_AVX2, 2, 0, 4, 15) +#endif +#ifdef HAS_I422TOARGB4444ROW_AVX2 +ANY31C(I422ToARGB4444Row_Any_AVX2, I422ToARGB4444Row_AVX2, 1, 0, 2, 7) +#endif +#ifdef HAS_I422TOARGB1555ROW_AVX2 +ANY31C(I422ToARGB1555Row_Any_AVX2, I422ToARGB1555Row_AVX2, 1, 0, 2, 7) +#endif +#ifdef HAS_I422TORGB565ROW_AVX2 +ANY31C(I422ToRGB565Row_Any_AVX2, I422ToRGB565Row_AVX2, 1, 0, 2, 7) +#endif +#ifdef HAS_I422TOARGBROW_NEON +ANY31C(I444ToARGBRow_Any_NEON, I444ToARGBRow_NEON, 0, 0, 4, 7) +ANY31C(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, 1, 0, 4, 7) +ANY31C(I411ToARGBRow_Any_NEON, I411ToARGBRow_NEON, 2, 0, 4, 7) +ANY31C(I422ToRGBARow_Any_NEON, I422ToRGBARow_NEON, 1, 0, 4, 7) +ANY31C(I422ToRGB24Row_Any_NEON, I422ToRGB24Row_NEON, 1, 0, 3, 7) +ANY31C(I422ToARGB4444Row_Any_NEON, I422ToARGB4444Row_NEON, 1, 0, 2, 7) +ANY31C(I422ToARGB1555Row_Any_NEON, I422ToARGB1555Row_NEON, 1, 0, 2, 7) +ANY31C(I422ToRGB565Row_Any_NEON, I422ToRGB565Row_NEON, 1, 0, 2, 7) +#endif +#undef ANY31C + // Any 2 planes to 1. #define ANY21(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, SBPP2, BPP, MASK) \ void NAMEANY(const uint8* y_buf, const uint8* uv_buf, \ @@ -136,32 +186,6 @@ ANY31(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, 1, 1, 4, 15) memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ } -// Biplanar to RGB. -#ifdef HAS_NV12TOARGBROW_SSSE3 -ANY21(NV12ToARGBRow_Any_SSSE3, NV12ToARGBRow_SSSE3, 1, 1, 2, 4, 7) -ANY21(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_SSSE3, 1, 1, 2, 4, 7) -#endif -#ifdef HAS_NV12TOARGBROW_AVX2 -ANY21(NV12ToARGBRow_Any_AVX2, NV12ToARGBRow_AVX2, 1, 1, 2, 4, 15) -ANY21(NV21ToARGBRow_Any_AVX2, NV21ToARGBRow_AVX2, 1, 1, 2, 4, 15) -#endif -#ifdef HAS_NV12TOARGBROW_NEON -ANY21(NV12ToARGBRow_Any_NEON, NV12ToARGBRow_NEON, 1, 1, 2, 4, 7) -ANY21(NV21ToARGBRow_Any_NEON, NV21ToARGBRow_NEON, 1, 1, 2, 4, 7) -#endif -#ifdef HAS_NV12TORGB565ROW_SSSE3 -ANY21(NV12ToRGB565Row_Any_SSSE3, NV12ToRGB565Row_SSSE3, 1, 1, 2, 2, 7) -ANY21(NV21ToRGB565Row_Any_SSSE3, NV21ToRGB565Row_SSSE3, 1, 1, 2, 2, 7) -#endif -#ifdef HAS_NV12TORGB565ROW_AVX2 -ANY21(NV12ToRGB565Row_Any_AVX2, NV12ToRGB565Row_AVX2, 1, 1, 2, 2, 15) -ANY21(NV21ToRGB565Row_Any_AVX2, NV21ToRGB565Row_AVX2, 1, 1, 2, 2, 15) -#endif -#ifdef HAS_NV12TORGB565ROW_NEON -ANY21(NV12ToRGB565Row_Any_NEON, NV12ToRGB565Row_NEON, 1, 1, 2, 2, 7) -ANY21(NV21ToRGB565Row_Any_NEON, NV21ToRGB565Row_NEON, 1, 1, 2, 2, 7) -#endif - // Merge functions. #ifdef HAS_MERGEUVROW_SSE2 ANY21(MergeUVRow_Any_SSE2, MergeUVRow_SSE2, 0, 1, 1, 2, 15) @@ -221,6 +245,55 @@ ANY21(SobelXYRow_Any_NEON, SobelXYRow_NEON, 0, 1, 1, 4, 7) #endif #undef ANY21 +// Any 2 planes to 1 with yuvconstants +#define ANY21C(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, SBPP2, BPP, MASK) \ + void NAMEANY(const uint8* y_buf, const uint8* uv_buf, \ + uint8* dst_ptr, const struct YuvConstants* yuvconstants, \ + int width) { \ + SIMD_ALIGNED(uint8 temp[64 * 3]); \ + memset(temp, 0, 64 * 2); /* for msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(y_buf, uv_buf, dst_ptr, yuvconstants, n); \ + } \ + memcpy(temp, y_buf + n * SBPP, r * SBPP); \ + memcpy(temp + 64, uv_buf + (n >> UVSHIFT) * SBPP2, \ + SS(r, UVSHIFT) * SBPP2); \ + ANY_SIMD(temp, temp + 64, temp + 128, yuvconstants, MASK + 1); \ + memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ + } + +// Biplanar to RGB. +#ifdef HAS_NV12TOARGBROW_SSSE3 +ANY21C(NV12ToARGBRow_Any_SSSE3, NV12ToARGBRow_SSSE3, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV12TOARGBROW_AVX2 +ANY21C(NV12ToARGBRow_Any_AVX2, NV12ToARGBRow_AVX2, 1, 1, 2, 4, 15) +#endif +#ifdef HAS_NV12TOARGBROW_NEON +ANY21C(NV12ToARGBRow_Any_NEON, NV12ToARGBRow_NEON, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV21TOARGBROW_SSSE3 +ANY21C(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_SSSE3, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV21TOARGBROW_AVX2 +ANY21C(NV21ToARGBRow_Any_AVX2, NV21ToARGBRow_AVX2, 1, 1, 2, 4, 15) +#endif +#ifdef HAS_NV21TOARGBROW_NEON +ANY21C(NV21ToARGBRow_Any_NEON, NV21ToARGBRow_NEON, 1, 1, 2, 4, 7) +#endif +#ifdef HAS_NV12TORGB565ROW_SSSE3 +ANY21C(NV12ToRGB565Row_Any_SSSE3, NV12ToRGB565Row_SSSE3, 1, 1, 2, 2, 7) +#endif +#ifdef HAS_NV12TORGB565ROW_AVX2 +ANY21C(NV12ToRGB565Row_Any_AVX2, NV12ToRGB565Row_AVX2, 1, 1, 2, 2, 15) +#endif +#ifdef HAS_NV12TORGB565ROW_NEON +ANY21C(NV12ToRGB565Row_Any_NEON, NV12ToRGB565Row_NEON, 1, 1, 2, 2, 7) +#endif +#undef ANY21C + // Any 1 to 1. #define ANY11(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, BPP, MASK) \ void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, int width) { \ @@ -252,8 +325,10 @@ ANY11(ARGBToRGB565Row_Any_SSE2, ARGBToRGB565Row_SSE2, 0, 4, 2, 3) ANY11(ARGBToARGB1555Row_Any_SSE2, ARGBToARGB1555Row_SSE2, 0, 4, 2, 3) ANY11(ARGBToARGB4444Row_Any_SSE2, ARGBToARGB4444Row_SSE2, 0, 4, 2, 3) #endif -#if defined(HAS_ARGBTOARGB4444ROW_AVX2) +#if defined(HAS_ARGBTORGB565ROW_AVX2) ANY11(ARGBToRGB565Row_Any_AVX2, ARGBToRGB565Row_AVX2, 0, 4, 2, 7) +#endif +#if defined(HAS_ARGBTOARGB4444ROW_AVX2) ANY11(ARGBToARGB1555Row_Any_AVX2, ARGBToARGB1555Row_AVX2, 0, 4, 2, 7) ANY11(ARGBToARGB4444Row_Any_AVX2, ARGBToARGB4444Row_AVX2, 0, 4, 2, 7) #endif @@ -269,15 +344,16 @@ ANY11(I400ToARGBRow_Any_SSE2, I400ToARGBRow_SSE2, 0, 1, 4, 7) #if defined(HAS_I400TOARGBROW_AVX2) ANY11(I400ToARGBRow_Any_AVX2, I400ToARGBRow_AVX2, 0, 1, 4, 15) #endif -#if defined(HAS_YUY2TOARGBROW_SSSE3) -ANY11(YUY2ToARGBRow_Any_SSSE3, YUY2ToARGBRow_SSSE3, 1, 4, 4, 15) -ANY11(UYVYToARGBRow_Any_SSSE3, UYVYToARGBRow_SSSE3, 1, 4, 4, 15) +#if defined(HAS_RGB24TOARGBROW_SSSE3) ANY11(RGB24ToARGBRow_Any_SSSE3, RGB24ToARGBRow_SSSE3, 0, 3, 4, 15) ANY11(RAWToARGBRow_Any_SSSE3, RAWToARGBRow_SSSE3, 0, 3, 4, 15) ANY11(RGB565ToARGBRow_Any_SSE2, RGB565ToARGBRow_SSE2, 0, 2, 4, 7) ANY11(ARGB1555ToARGBRow_Any_SSE2, ARGB1555ToARGBRow_SSE2, 0, 2, 4, 7) ANY11(ARGB4444ToARGBRow_Any_SSE2, ARGB4444ToARGBRow_SSE2, 0, 2, 4, 7) #endif +#if defined(HAS_RAWTORGB24ROW_SSSE3) +ANY11(RAWToRGB24Row_Any_SSSE3, RAWToRGB24Row_SSSE3, 0, 3, 3, 7) +#endif #if defined(HAS_RGB565TOARGBROW_AVX2) ANY11(RGB565ToARGBRow_Any_AVX2, RGB565ToARGBRow_AVX2, 0, 2, 4, 15) #endif @@ -287,10 +363,6 @@ ANY11(ARGB1555ToARGBRow_Any_AVX2, ARGB1555ToARGBRow_AVX2, 0, 2, 4, 15) #if defined(HAS_ARGB4444TOARGBROW_AVX2) ANY11(ARGB4444ToARGBRow_Any_AVX2, ARGB4444ToARGBRow_AVX2, 0, 2, 4, 15) #endif -#if defined(HAS_YUY2TOARGBROW_AVX2) -ANY11(YUY2ToARGBRow_Any_AVX2, YUY2ToARGBRow_AVX2, 1, 4, 4, 31) -ANY11(UYVYToARGBRow_Any_AVX2, UYVYToARGBRow_AVX2, 1, 4, 4, 31) -#endif #if defined(HAS_ARGBTORGB24ROW_NEON) ANY11(ARGBToRGB24Row_Any_NEON, ARGBToRGB24Row_NEON, 0, 4, 3, 7) ANY11(ARGBToRAWRow_Any_NEON, ARGBToRAWRow_NEON, 0, 4, 3, 7) @@ -299,8 +371,9 @@ ANY11(ARGBToARGB1555Row_Any_NEON, ARGBToARGB1555Row_NEON, 0, 4, 2, 7) ANY11(ARGBToARGB4444Row_Any_NEON, ARGBToARGB4444Row_NEON, 0, 4, 2, 7) ANY11(J400ToARGBRow_Any_NEON, J400ToARGBRow_NEON, 0, 1, 4, 7) ANY11(I400ToARGBRow_Any_NEON, I400ToARGBRow_NEON, 0, 1, 4, 7) -ANY11(YUY2ToARGBRow_Any_NEON, YUY2ToARGBRow_NEON, 1, 4, 4, 7) -ANY11(UYVYToARGBRow_Any_NEON, UYVYToARGBRow_NEON, 1, 4, 4, 7) +#endif +#if defined(HAS_RAWTORGB24ROW_NEON) +ANY11(RAWToRGB24Row_Any_NEON, RAWToRGB24Row_NEON, 0, 3, 3, 7) #endif #ifdef HAS_ARGBTOYROW_AVX2 ANY11(ARGBToYRow_Any_AVX2, ARGBToYRow_AVX2, 0, 4, 1, 31) @@ -381,9 +454,6 @@ ANY11(ARGB4444ToARGBRow_Any_NEON, ARGB4444ToARGBRow_NEON, 0, 2, 4, 7) #ifdef HAS_ARGBATTENUATEROW_SSSE3 ANY11(ARGBAttenuateRow_Any_SSSE3, ARGBAttenuateRow_SSSE3, 0, 4, 4, 3) #endif -#ifdef HAS_ARGBATTENUATEROW_SSE2 -ANY11(ARGBAttenuateRow_Any_SSE2, ARGBAttenuateRow_SSE2, 0, 4, 4, 3) -#endif #ifdef HAS_ARGBUNATTENUATEROW_SSE2 ANY11(ARGBUnattenuateRow_Any_SSE2, ARGBUnattenuateRow_SSE2, 0, 4, 4, 3) #endif @@ -396,8 +466,44 @@ ANY11(ARGBUnattenuateRow_Any_AVX2, ARGBUnattenuateRow_AVX2, 0, 4, 4, 7) #ifdef HAS_ARGBATTENUATEROW_NEON ANY11(ARGBAttenuateRow_Any_NEON, ARGBAttenuateRow_NEON, 0, 4, 4, 7) #endif +#ifdef HAS_ARGBEXTRACTALPHAROW_SSE2 +ANY11(ARGBExtractAlphaRow_Any_SSE2, ARGBExtractAlphaRow_SSE2, 0, 4, 1, 7) +#endif +#ifdef HAS_ARGBEXTRACTALPHAROW_NEON +ANY11(ARGBExtractAlphaRow_Any_NEON, ARGBExtractAlphaRow_NEON, 0, 4, 1, 15) +#endif #undef ANY11 +// Any 1 to 1 blended. Destination is read, modify, write. +#define ANY11B(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, BPP, MASK) \ + void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, int width) { \ + SIMD_ALIGNED(uint8 temp[128 * 2]); \ + memset(temp, 0, 128 * 2); /* for YUY2 and msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src_ptr, dst_ptr, n); \ + } \ + memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \ + memcpy(temp + 128, dst_ptr + n * BPP, r * BPP); \ + ANY_SIMD(temp, temp + 128, MASK + 1); \ + memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ + } + +#ifdef HAS_ARGBCOPYALPHAROW_AVX2 +ANY11B(ARGBCopyAlphaRow_Any_AVX2, ARGBCopyAlphaRow_AVX2, 0, 4, 4, 15) +#endif +#ifdef HAS_ARGBCOPYALPHAROW_SSE2 +ANY11B(ARGBCopyAlphaRow_Any_SSE2, ARGBCopyAlphaRow_SSE2, 0, 4, 4, 7) +#endif +#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2 +ANY11B(ARGBCopyYToAlphaRow_Any_AVX2, ARGBCopyYToAlphaRow_AVX2, 0, 1, 4, 15) +#endif +#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2 +ANY11B(ARGBCopyYToAlphaRow_Any_SSE2, ARGBCopyYToAlphaRow_SSE2, 0, 1, 4, 7) +#endif +#undef ANY11B + // Any 1 to 1 with parameter. #define ANY11P(NAMEANY, ANY_SIMD, T, SBPP, BPP, MASK) \ void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, \ @@ -440,6 +546,35 @@ ANY11P(ARGBShuffleRow_Any_NEON, ARGBShuffleRow_NEON, const uint8*, 4, 4, 3) #endif #undef ANY11P +// Any 1 to 1 with yuvconstants +#define ANY11C(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, BPP, MASK) \ + void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, \ + const struct YuvConstants* yuvconstants, int width) { \ + SIMD_ALIGNED(uint8 temp[128 * 2]); \ + memset(temp, 0, 128); /* for YUY2 and msan */ \ + int r = width & MASK; \ + int n = width & ~MASK; \ + if (n > 0) { \ + ANY_SIMD(src_ptr, dst_ptr, yuvconstants, n); \ + } \ + memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \ + ANY_SIMD(temp, temp + 128, yuvconstants, MASK + 1); \ + memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \ + } +#if defined(HAS_YUY2TOARGBROW_SSSE3) +ANY11C(YUY2ToARGBRow_Any_SSSE3, YUY2ToARGBRow_SSSE3, 1, 4, 4, 15) +ANY11C(UYVYToARGBRow_Any_SSSE3, UYVYToARGBRow_SSSE3, 1, 4, 4, 15) +#endif +#if defined(HAS_YUY2TOARGBROW_AVX2) +ANY11C(YUY2ToARGBRow_Any_AVX2, YUY2ToARGBRow_AVX2, 1, 4, 4, 31) +ANY11C(UYVYToARGBRow_Any_AVX2, UYVYToARGBRow_AVX2, 1, 4, 4, 31) +#endif +#if defined(HAS_YUY2TOARGBROW_NEON) +ANY11C(YUY2ToARGBRow_Any_NEON, YUY2ToARGBRow_NEON, 1, 4, 4, 7) +ANY11C(UYVYToARGBRow_Any_NEON, UYVYToARGBRow_NEON, 1, 4, 4, 7) +#endif +#undef ANY11C + // Any 1 to 1 interpolate. Takes 2 rows of source via stride. #define ANY11T(NAMEANY, ANY_SIMD, SBPP, BPP, MASK) \ void NAMEANY(uint8* dst_ptr, const uint8* src_ptr, \ @@ -464,14 +599,11 @@ ANY11T(InterpolateRow_Any_AVX2, InterpolateRow_AVX2, 1, 1, 31) #ifdef HAS_INTERPOLATEROW_SSSE3 ANY11T(InterpolateRow_Any_SSSE3, InterpolateRow_SSSE3, 1, 1, 15) #endif -#ifdef HAS_INTERPOLATEROW_SSE2 -ANY11T(InterpolateRow_Any_SSE2, InterpolateRow_SSE2, 1, 1, 15) -#endif #ifdef HAS_INTERPOLATEROW_NEON ANY11T(InterpolateRow_Any_NEON, InterpolateRow_NEON, 1, 1, 15) #endif -#ifdef HAS_INTERPOLATEROW_MIPS_DSPR2 -ANY11T(InterpolateRow_Any_MIPS_DSPR2, InterpolateRow_MIPS_DSPR2, 1, 1, 3) +#ifdef HAS_INTERPOLATEROW_DSPR2 +ANY11T(InterpolateRow_Any_DSPR2, InterpolateRow_DSPR2, 1, 1, 3) #endif #undef ANY11T @@ -496,9 +628,6 @@ ANY11M(MirrorRow_Any_AVX2, MirrorRow_AVX2, 1, 31) #ifdef HAS_MIRRORROW_SSSE3 ANY11M(MirrorRow_Any_SSSE3, MirrorRow_SSSE3, 1, 15) #endif -#ifdef HAS_MIRRORROW_SSE2 -ANY11M(MirrorRow_Any_SSE2, MirrorRow_SSE2, 1, 15) -#endif #ifdef HAS_MIRRORROW_NEON ANY11M(MirrorRow_Any_NEON, MirrorRow_NEON, 1, 15) #endif @@ -548,9 +677,25 @@ ANY1(ARGBSetRow_Any_NEON, ARGBSetRow_NEON, uint32, 4, 3) ANY_SIMD(src_ptr, dst_u, dst_v, n); \ } \ memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \ - if ((width & 1) && BPP == 4) { /* repeat last 4 bytes for subsampler */ \ + /* repeat last 4 bytes for 422 subsampler */ \ + if ((width & 1) && BPP == 4 && DUVSHIFT == 1) { \ memcpy(temp + SS(r, UVSHIFT) * BPP, \ - temp + SS(r, UVSHIFT) * BPP - BPP, 4); \ + temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \ + } \ + /* repeat last 4 - 12 bytes for 411 subsampler */ \ + if (((width & 3) == 1) && BPP == 4 && DUVSHIFT == 2) { \ + memcpy(temp + SS(r, UVSHIFT) * BPP, \ + temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \ + memcpy(temp + SS(r, UVSHIFT) * BPP + BPP, \ + temp + SS(r, UVSHIFT) * BPP - BPP, BPP * 2); \ + } \ + if (((width & 3) == 2) && BPP == 4 && DUVSHIFT == 2) { \ + memcpy(temp + SS(r, UVSHIFT) * BPP, \ + temp + SS(r, UVSHIFT) * BPP - BPP * 2, BPP * 2); \ + } \ + if (((width & 3) == 3) && BPP == 4 && DUVSHIFT == 2) { \ + memcpy(temp + SS(r, UVSHIFT) * BPP, \ + temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \ } \ ANY_SIMD(temp, temp + 128, temp + 256, MASK + 1); \ memcpy(dst_u + (n >> DUVSHIFT), temp + 128, SS(r, DUVSHIFT)); \ @@ -566,8 +711,8 @@ ANY12(SplitUVRow_Any_AVX2, SplitUVRow_AVX2, 0, 2, 0, 31) #ifdef HAS_SPLITUVROW_NEON ANY12(SplitUVRow_Any_NEON, SplitUVRow_NEON, 0, 2, 0, 15) #endif -#ifdef HAS_SPLITUVROW_MIPS_DSPR2 -ANY12(SplitUVRow_Any_MIPS_DSPR2, SplitUVRow_MIPS_DSPR2, 0, 2, 0, 15) +#ifdef HAS_SPLITUVROW_DSPR2 +ANY12(SplitUVRow_Any_DSPR2, SplitUVRow_DSPR2, 0, 2, 0, 15) #endif #ifdef HAS_ARGBTOUV444ROW_SSSE3 ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15) @@ -576,16 +721,12 @@ ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15) ANY12(YUY2ToUV422Row_Any_AVX2, YUY2ToUV422Row_AVX2, 1, 4, 1, 31) ANY12(UYVYToUV422Row_Any_AVX2, UYVYToUV422Row_AVX2, 1, 4, 1, 31) #endif -#ifdef HAS_ARGBTOUV422ROW_SSSE3 -ANY12(ARGBToUV422Row_Any_SSSE3, ARGBToUV422Row_SSSE3, 0, 4, 1, 15) -#endif #ifdef HAS_YUY2TOUV422ROW_SSE2 ANY12(YUY2ToUV422Row_Any_SSE2, YUY2ToUV422Row_SSE2, 1, 4, 1, 15) ANY12(UYVYToUV422Row_Any_SSE2, UYVYToUV422Row_SSE2, 1, 4, 1, 15) #endif #ifdef HAS_YUY2TOUV422ROW_NEON ANY12(ARGBToUV444Row_Any_NEON, ARGBToUV444Row_NEON, 0, 4, 0, 7) -ANY12(ARGBToUV422Row_Any_NEON, ARGBToUV422Row_NEON, 0, 4, 1, 15) ANY12(ARGBToUV411Row_Any_NEON, ARGBToUV411Row_NEON, 0, 4, 2, 31) ANY12(YUY2ToUV422Row_Any_NEON, YUY2ToUV422Row_NEON, 1, 4, 1, 15) ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15) @@ -607,11 +748,11 @@ ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15) memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \ memcpy(temp + 128, src_ptr + src_stride_ptr + (n >> UVSHIFT) * BPP, \ SS(r, UVSHIFT) * BPP); \ - if ((width & 1) && BPP == 4) { /* repeat last 4 bytes for subsampler */ \ + if ((width & 1) && UVSHIFT == 0) { /* repeat last pixel for subsample */\ memcpy(temp + SS(r, UVSHIFT) * BPP, \ - temp + SS(r, UVSHIFT) * BPP - BPP, 4); \ + temp + SS(r, UVSHIFT) * BPP - BPP, BPP); \ memcpy(temp + 128 + SS(r, UVSHIFT) * BPP, \ - temp + 128 + SS(r, UVSHIFT) * BPP - BPP, 4); \ + temp + 128 + SS(r, UVSHIFT) * BPP - BPP, BPP); \ } \ ANY_SIMD(temp, 128, temp + 256, temp + 384, MASK + 1); \ memcpy(dst_u + (n >> 1), temp + 256, SS(r, 1)); \ @@ -621,6 +762,9 @@ ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15) #ifdef HAS_ARGBTOUVROW_AVX2 ANY12S(ARGBToUVRow_Any_AVX2, ARGBToUVRow_AVX2, 0, 4, 31) #endif +#ifdef HAS_ARGBTOUVJROW_AVX2 +ANY12S(ARGBToUVJRow_Any_AVX2, ARGBToUVJRow_AVX2, 0, 4, 31) +#endif #ifdef HAS_ARGBTOUVROW_SSSE3 ANY12S(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_SSSE3, 0, 4, 15) ANY12S(ARGBToUVJRow_Any_SSSE3, ARGBToUVJRow_SSSE3, 0, 4, 15) diff --git a/libs/libvpx/third_party/libyuv/source/row_common.cc b/libs/libvpx/third_party/libyuv/source/row_common.cc index 49875894fe..aefa38c495 100644 --- a/libs/libvpx/third_party/libyuv/source/row_common.cc +++ b/libs/libvpx/third_party/libyuv/source/row_common.cc @@ -100,6 +100,20 @@ void RAWToARGBRow_C(const uint8* src_raw, uint8* dst_argb, int width) { } } +void RAWToRGB24Row_C(const uint8* src_raw, uint8* dst_rgb24, int width) { + int x; + for (x = 0; x < width; ++x) { + uint8 r = src_raw[0]; + uint8 g = src_raw[1]; + uint8 b = src_raw[2]; + dst_rgb24[0] = b; + dst_rgb24[1] = g; + dst_rgb24[2] = r; + dst_rgb24 += 3; + src_raw += 3; + } +} + void RGB565ToARGBRow_C(const uint8* src_rgb565, uint8* dst_argb, int width) { int x; for (x = 0; x < width; ++x) { @@ -419,28 +433,6 @@ void NAME ## ToUVJRow_C(const uint8* src_rgb0, int src_stride_rgb, \ MAKEROWYJ(ARGB, 2, 1, 0, 4) #undef MAKEROWYJ -void ARGBToUVJ422Row_C(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width) { - int x; - for (x = 0; x < width - 1; x += 2) { - uint8 ab = (src_argb[0] + src_argb[4]) >> 1; - uint8 ag = (src_argb[1] + src_argb[5]) >> 1; - uint8 ar = (src_argb[2] + src_argb[6]) >> 1; - dst_u[0] = RGBToUJ(ar, ag, ab); - dst_v[0] = RGBToVJ(ar, ag, ab); - src_argb += 8; - dst_u += 1; - dst_v += 1; - } - if (width & 1) { - uint8 ab = src_argb[0]; - uint8 ag = src_argb[1]; - uint8 ar = src_argb[2]; - dst_u[0] = RGBToUJ(ar, ag, ab); - dst_v[0] = RGBToVJ(ar, ag, ab); - } -} - void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int width) { int x; for (x = 0; x < width; ++x) { @@ -644,28 +636,6 @@ void ARGBToUV444Row_C(const uint8* src_argb, } } -void ARGBToUV422Row_C(const uint8* src_argb, - uint8* dst_u, uint8* dst_v, int width) { - int x; - for (x = 0; x < width - 1; x += 2) { - uint8 ab = (src_argb[0] + src_argb[4]) >> 1; - uint8 ag = (src_argb[1] + src_argb[5]) >> 1; - uint8 ar = (src_argb[2] + src_argb[6]) >> 1; - dst_u[0] = RGBToU(ar, ag, ab); - dst_v[0] = RGBToV(ar, ag, ab); - src_argb += 8; - dst_u += 1; - dst_v += 1; - } - if (width & 1) { - uint8 ab = src_argb[0]; - uint8 ag = src_argb[1]; - uint8 ar = src_argb[2]; - dst_u[0] = RGBToU(ar, ag, ab); - dst_v[0] = RGBToV(ar, ag, ab); - } -} - void ARGBToUV411Row_C(const uint8* src_argb, uint8* dst_u, uint8* dst_v, int width) { int x; @@ -679,10 +649,11 @@ void ARGBToUV411Row_C(const uint8* src_argb, dst_u += 1; dst_v += 1; } + // Odd width handling mimics 'any' function which replicates last pixel. if ((width & 3) == 3) { - uint8 ab = (src_argb[0] + src_argb[4] + src_argb[8]) / 3; - uint8 ag = (src_argb[1] + src_argb[5] + src_argb[9]) / 3; - uint8 ar = (src_argb[2] + src_argb[6] + src_argb[10]) / 3; + uint8 ab = (src_argb[0] + src_argb[4] + src_argb[8] + src_argb[8]) >> 2; + uint8 ag = (src_argb[1] + src_argb[5] + src_argb[9] + src_argb[9]) >> 2; + uint8 ar = (src_argb[2] + src_argb[6] + src_argb[10] + src_argb[10]) >> 2; dst_u[0] = RGBToU(ar, ag, ab); dst_v[0] = RGBToV(ar, ag, ab); } else if ((width & 3) == 2) { @@ -994,13 +965,15 @@ void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) { } } +// TODO(fbarchard): Unify these structures to be platform independent. +// TODO(fbarchard): Generate SIMD structures from float matrix. + // BT.601 YUV to RGB reference // R = (Y - 16) * 1.164 - V * -1.596 // G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813 // B = (Y - 16) * 1.164 - U * -2.018 // Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. #define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ #define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */ @@ -1011,19 +984,295 @@ void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) { #define VR -102 /* round(-1.596 * 64) */ // Bias values to subtract 16 from Y and 128 from U and V. -#define BB (UB * 128 + YGB) +#define BB (UB * 128 + YGB) #define BG (UG * 128 + VG * 128 + YGB) -#define BR (VR * 128 + YGB) +#define BR (VR * 128 + YGB) + +#if defined(__aarch64__) // 64 bit arm +const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = { + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#elif defined(__arm__) // 32 bit arm +const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = { + { -UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0 }, + { UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { + { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, + { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#else +const struct YuvConstants SIMD_ALIGNED(kYuvI601Constants) = { + { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, + UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, + { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, + UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, + { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, + 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuI601Constants) = { + { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, + VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, + { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, + VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, + { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, + 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +#endif + +#undef BB +#undef BG +#undef BR +#undef YGB +#undef UB +#undef UG +#undef VG +#undef VR +#undef YG + +// JPEG YUV to RGB reference +// * R = Y - V * -1.40200 +// * G = Y - U * 0.34414 - V * 0.71414 +// * B = Y - U * -1.77200 + +// Y contribution to R,G,B. Scale and bias. +#define YG 16320 /* round(1.000 * 64 * 256 * 256 / 257) */ +#define YGB 32 /* 64 / 2 */ + +// U and V contributions to R,G,B. +#define UB -113 /* round(-1.77200 * 64) */ +#define UG 22 /* round(0.34414 * 64) */ +#define VG 46 /* round(0.71414 * 64) */ +#define VR -90 /* round(-1.40200 * 64) */ + +// Bias values to round, and subtract 128 from U and V. +#define BB (UB * 128 + YGB) +#define BG (UG * 128 + VG * 128 + YGB) +#define BR (VR * 128 + YGB) + +#if defined(__aarch64__) +const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = { + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#elif defined(__arm__) +const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = { + { -UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0 }, + { UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { + { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, + { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#else +const struct YuvConstants SIMD_ALIGNED(kYuvJPEGConstants) = { + { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, + UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, + { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, + UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, + { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, + 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuJPEGConstants) = { + { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, + VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, + { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, + VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, + { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, + 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +#endif + +#undef BB +#undef BG +#undef BR +#undef YGB +#undef UB +#undef UG +#undef VG +#undef VR +#undef YG + +// BT.709 YUV to RGB reference +// * R = Y - V * -1.28033 +// * G = Y - U * 0.21482 - V * 0.38059 +// * B = Y - U * -2.12798 + +// Y contribution to R,G,B. Scale and bias. +#define YG 16320 /* round(1.000 * 64 * 256 * 256 / 257) */ +#define YGB 32 /* 64 / 2 */ + +// TODO(fbarchard): Find way to express 2.12 instead of 2.0. +// U and V contributions to R,G,B. +#define UB -128 /* max(-128, round(-2.12798 * 64)) */ +#define UG 14 /* round(0.21482 * 64) */ +#define VG 24 /* round(0.38059 * 64) */ +#define VR -82 /* round(-1.28033 * 64) */ + +// Bias values to round, and subtract 128 from U and V. +#define BB (UB * 128 + YGB) +#define BG (UG * 128 + VG * 128 + YGB) +#define BR (VR * 128 + YGB) + +#if defined(__aarch64__) +const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = { + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { -UB, -VR, -UB, -VR, -UB, -VR, -UB, -VR }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { UG, VG, UG, VG, UG, VG, UG, VG }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { -VR, -UB, -VR, -UB, -VR, -UB, -VR, -UB }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { VG, UG, VG, UG, VG, UG, VG, UG }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#elif defined(__arm__) +const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = { + { -UB, -UB, -UB, -UB, -VR, -VR, -VR, -VR, 0, 0, 0, 0, 0, 0, 0, 0 }, + { UG, UG, UG, UG, VG, VG, VG, VG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BB, BG, BR, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { + { -VR, -VR, -VR, -VR, -UB, -UB, -UB, -UB, 0, 0, 0, 0, 0, 0, 0, 0 }, + { VG, VG, VG, VG, UG, UG, UG, UG, 0, 0, 0, 0, 0, 0, 0, 0 }, + { BR, BG, BB, 0, 0, 0, 0, 0 }, + { 0x0101 * YG, 0, 0, 0 } +}; +#else +const struct YuvConstants SIMD_ALIGNED(kYuvH709Constants) = { + { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, + UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, + { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, + UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, + { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, + 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +const struct YuvConstants SIMD_ALIGNED(kYvuH709Constants) = { + { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, + VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, + { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, + VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, + { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, + 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, + { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, + { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, + { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, + { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } +}; +#endif + +#undef BB +#undef BG +#undef BR +#undef YGB +#undef UB +#undef UG +#undef VG +#undef VR +#undef YG // C reference code that mimics the YUV assembly. static __inline void YuvPixel(uint8 y, uint8 u, uint8 v, - uint8* b, uint8* g, uint8* r) { - uint32 y1 = (uint32)(y * 0x0101 * YG) >> 16; - *b = Clamp((int32)(-(u * UB) + y1 + BB) >> 6); - *g = Clamp((int32)(-(v * VG + u * UG) + y1 + BG) >> 6); - *r = Clamp((int32)(-(v * VR)+ y1 + BR) >> 6); + uint8* b, uint8* g, uint8* r, + const struct YuvConstants* yuvconstants) { +#if defined(__aarch64__) + int ub = -yuvconstants->kUVToRB[0]; + int ug = yuvconstants->kUVToG[0]; + int vg = yuvconstants->kUVToG[1]; + int vr = -yuvconstants->kUVToRB[1]; + int bb = yuvconstants->kUVBiasBGR[0]; + int bg = yuvconstants->kUVBiasBGR[1]; + int br = yuvconstants->kUVBiasBGR[2]; + int yg = yuvconstants->kYToRgb[0] / 0x0101; +#elif defined(__arm__) + int ub = -yuvconstants->kUVToRB[0]; + int ug = yuvconstants->kUVToG[0]; + int vg = yuvconstants->kUVToG[4]; + int vr = -yuvconstants->kUVToRB[4]; + int bb = yuvconstants->kUVBiasBGR[0]; + int bg = yuvconstants->kUVBiasBGR[1]; + int br = yuvconstants->kUVBiasBGR[2]; + int yg = yuvconstants->kYToRgb[0] / 0x0101; +#else + int ub = yuvconstants->kUVToB[0]; + int ug = yuvconstants->kUVToG[0]; + int vg = yuvconstants->kUVToG[1]; + int vr = yuvconstants->kUVToR[1]; + int bb = yuvconstants->kUVBiasB[0]; + int bg = yuvconstants->kUVBiasG[0]; + int br = yuvconstants->kUVBiasR[0]; + int yg = yuvconstants->kYToRgb[0]; +#endif + + uint32 y1 = (uint32)(y * 0x0101 * yg) >> 16; + *b = Clamp((int32)(-(u * ub) + y1 + bb) >> 6); + *g = Clamp((int32)(-(u * ug + v * vg) + y1 + bg) >> 6); + *r = Clamp((int32) (-(v * vr) + y1 + br) >> 6); } +// Y contribution to R,G,B. Scale and bias. +#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ +#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */ + // C reference code that mimics the YUV assembly. static __inline void YPixel(uint8 y, uint8* b, uint8* g, uint8* r) { uint32 y1 = (uint32)(y * 0x0101 * YG) >> 16; @@ -1034,53 +1283,6 @@ static __inline void YPixel(uint8 y, uint8* b, uint8* g, uint8* r) { #undef YG #undef YGB -#undef UB -#undef UG -#undef VG -#undef VR -#undef BB -#undef BG -#undef BR - -// JPEG YUV to RGB reference -// * R = Y - V * -1.40200 -// * G = Y - U * 0.34414 - V * 0.71414 -// * B = Y - U * -1.77200 - -// Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. -#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */ -#define YGBJ 32 /* 64 / 2 */ - -// U and V contributions to R,G,B. -#define UBJ -113 /* round(-1.77200 * 64) */ -#define UGJ 22 /* round(0.34414 * 64) */ -#define VGJ 46 /* round(0.71414 * 64) */ -#define VRJ -90 /* round(-1.40200 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BBJ (UBJ * 128 + YGBJ) -#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ) -#define BRJ (VRJ * 128 + YGBJ) - -// C reference code that mimics the YUV assembly. -static __inline void YuvJPixel(uint8 y, uint8 u, uint8 v, - uint8* b, uint8* g, uint8* r) { - uint32 y1 = (uint32)(y * 0x0101 * YGJ) >> 16; - *b = Clamp((int32)(-(u * UBJ) + y1 + BBJ) >> 6); - *g = Clamp((int32)(-(v * VGJ + u * UGJ) + y1 + BGJ) >> 6); - *r = Clamp((int32)(-(v * VRJ) + y1 + BRJ) >> 6); -} - -#undef YGJ -#undef YGBJ -#undef UBJ -#undef UGJ -#undef VGJ -#undef VRJ -#undef BBJ -#undef BGJ -#undef BRJ #if !defined(LIBYUV_DISABLE_NEON) && \ (defined(__ARM_NEON__) || defined(__aarch64__) || defined(LIBYUV_NEON)) @@ -1090,14 +1292,17 @@ void I444ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { uint8 u = (src_u[0] + src_u[1] + 1) >> 1; uint8 v = (src_v[0] + src_v[1] + 1) >> 1; - YuvPixel(src_y[0], u, v, rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + YuvPixel(src_y[0], u, v, rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, + yuvconstants); rgb_buf[3] = 255; - YuvPixel(src_y[1], u, v, rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + YuvPixel(src_y[1], u, v, rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, + yuvconstants); rgb_buf[7] = 255; src_y += 2; src_u += 2; @@ -1106,7 +1311,8 @@ void I444ToARGBRow_C(const uint8* src_y, } if (width & 1) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); + rgb_buf[3] = 255; } } #else @@ -1114,11 +1320,12 @@ void I444ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width; ++x) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; src_y += 1; src_u += 1; @@ -1133,14 +1340,15 @@ void I422ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; src_y += 2; src_u += 1; @@ -1149,33 +1357,36 @@ void I422ToARGBRow_C(const uint8* src_y, } if (width & 1) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } -void J422ToARGBRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* rgb_buf, - int width) { +void I422AlphaToARGBRow_C(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + const uint8* src_a, + uint8* rgb_buf, + const struct YuvConstants* yuvconstants, + int width) { int x; for (x = 0; x < width - 1; x += 2) { - YuvJPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); - rgb_buf[3] = 255; - YuvJPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); - rgb_buf[7] = 255; + YuvPixel(src_y[0], src_u[0], src_v[0], + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); + rgb_buf[3] = src_a[0]; + YuvPixel(src_y[1], src_u[0], src_v[0], + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); + rgb_buf[7] = src_a[1]; src_y += 2; src_u += 1; src_v += 1; + src_a += 2; rgb_buf += 8; // Advance 2 pixels. } if (width & 1) { - YuvJPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); - rgb_buf[3] = 255; + YuvPixel(src_y[0], src_u[0], src_v[0], + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); + rgb_buf[3] = src_a[0]; } } @@ -1183,13 +1394,14 @@ void I422ToRGB24Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 3, rgb_buf + 4, rgb_buf + 5); + rgb_buf + 3, rgb_buf + 4, rgb_buf + 5, yuvconstants); src_y += 2; src_u += 1; src_v += 1; @@ -1197,29 +1409,7 @@ void I422ToRGB24Row_C(const uint8* src_y, } if (width & 1) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); - } -} - -void I422ToRAWRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* rgb_buf, - int width) { - int x; - for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 2, rgb_buf + 1, rgb_buf + 0); - YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 5, rgb_buf + 4, rgb_buf + 3); - src_y += 2; - src_u += 1; - src_v += 1; - rgb_buf += 6; // Advance 2 pixels. - } - if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 2, rgb_buf + 1, rgb_buf + 0); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); } } @@ -1227,6 +1417,7 @@ void I422ToARGB4444Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width) { uint8 b0; uint8 g0; @@ -1236,8 +1427,8 @@ void I422ToARGB4444Row_C(const uint8* src_y, uint8 r1; int x; for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); - YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); + YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1, yuvconstants); b0 = b0 >> 4; g0 = g0 >> 4; r0 = r0 >> 4; @@ -1252,7 +1443,7 @@ void I422ToARGB4444Row_C(const uint8* src_y, dst_argb4444 += 4; // Advance 2 pixels. } if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); b0 = b0 >> 4; g0 = g0 >> 4; r0 = r0 >> 4; @@ -1265,6 +1456,7 @@ void I422ToARGB1555Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, int width) { uint8 b0; uint8 g0; @@ -1274,8 +1466,8 @@ void I422ToARGB1555Row_C(const uint8* src_y, uint8 r1; int x; for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); - YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); + YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 3; r0 = r0 >> 3; @@ -1290,7 +1482,7 @@ void I422ToARGB1555Row_C(const uint8* src_y, dst_argb1555 += 4; // Advance 2 pixels. } if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 3; r0 = r0 >> 3; @@ -1303,6 +1495,7 @@ void I422ToRGB565Row_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { uint8 b0; uint8 g0; @@ -1312,8 +1505,8 @@ void I422ToRGB565Row_C(const uint8* src_y, uint8 r1; int x; for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); - YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); + YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 2; r0 = r0 >> 3; @@ -1328,7 +1521,7 @@ void I422ToRGB565Row_C(const uint8* src_y, dst_rgb565 += 4; // Advance 2 pixels. } if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0); + YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 2; r0 = r0 >> 3; @@ -1340,20 +1533,21 @@ void I411ToARGBRow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 3; x += 4) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; YuvPixel(src_y[2], src_u[0], src_v[0], - rgb_buf + 8, rgb_buf + 9, rgb_buf + 10); + rgb_buf + 8, rgb_buf + 9, rgb_buf + 10, yuvconstants); rgb_buf[11] = 255; YuvPixel(src_y[3], src_u[0], src_v[0], - rgb_buf + 12, rgb_buf + 13, rgb_buf + 14); + rgb_buf + 12, rgb_buf + 13, rgb_buf + 14, yuvconstants); rgb_buf[15] = 255; src_y += 4; src_u += 1; @@ -1362,17 +1556,17 @@ void I411ToARGBRow_C(const uint8* src_y, } if (width & 2) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; src_y += 2; rgb_buf += 8; // Advance 2 pixels. } if (width & 1) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } @@ -1380,14 +1574,15 @@ void I411ToARGBRow_C(const uint8* src_y, void NV12ToARGBRow_C(const uint8* src_y, const uint8* src_uv, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_y[0], src_uv[0], src_uv[1], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_y[1], src_uv[0], src_uv[1], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; src_y += 2; src_uv += 2; @@ -1395,7 +1590,7 @@ void NV12ToARGBRow_C(const uint8* src_y, } if (width & 1) { YuvPixel(src_y[0], src_uv[0], src_uv[1], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } @@ -1403,24 +1598,23 @@ void NV12ToARGBRow_C(const uint8* src_y, void NV21ToARGBRow_C(const uint8* src_y, const uint8* src_vu, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_y[0], src_vu[1], src_vu[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; - YuvPixel(src_y[1], src_vu[1], src_vu[0], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; - src_y += 2; src_vu += 2; rgb_buf += 8; // Advance 2 pixels. } if (width & 1) { YuvPixel(src_y[0], src_vu[1], src_vu[0], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } @@ -1428,6 +1622,7 @@ void NV21ToARGBRow_C(const uint8* src_y, void NV12ToRGB565Row_C(const uint8* src_y, const uint8* src_uv, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { uint8 b0; uint8 g0; @@ -1437,8 +1632,8 @@ void NV12ToRGB565Row_C(const uint8* src_y, uint8 r1; int x; for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0); - YuvPixel(src_y[1], src_uv[0], src_uv[1], &b1, &g1, &r1); + YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0, yuvconstants); + YuvPixel(src_y[1], src_uv[0], src_uv[1], &b1, &g1, &r1, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 2; r0 = r0 >> 3; @@ -1452,42 +1647,7 @@ void NV12ToRGB565Row_C(const uint8* src_y, dst_rgb565 += 4; // Advance 2 pixels. } if (width & 1) { - YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0); - b0 = b0 >> 3; - g0 = g0 >> 2; - r0 = r0 >> 3; - *(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11); - } -} - -void NV21ToRGB565Row_C(const uint8* src_y, - const uint8* vsrc_u, - uint8* dst_rgb565, - int width) { - uint8 b0; - uint8 g0; - uint8 r0; - uint8 b1; - uint8 g1; - uint8 r1; - int x; - for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], vsrc_u[1], vsrc_u[0], &b0, &g0, &r0); - YuvPixel(src_y[1], vsrc_u[1], vsrc_u[0], &b1, &g1, &r1); - b0 = b0 >> 3; - g0 = g0 >> 2; - r0 = r0 >> 3; - b1 = b1 >> 3; - g1 = g1 >> 2; - r1 = r1 >> 3; - *(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) | - (b1 << 16) | (g1 << 21) | (r1 << 27); - src_y += 2; - vsrc_u += 2; - dst_rgb565 += 4; // Advance 2 pixels. - } - if (width & 1) { - YuvPixel(src_y[0], vsrc_u[1], vsrc_u[0], &b0, &g0, &r0); + YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0, yuvconstants); b0 = b0 >> 3; g0 = g0 >> 2; r0 = r0 >> 3; @@ -1497,92 +1657,44 @@ void NV21ToRGB565Row_C(const uint8* src_y, void YUY2ToARGBRow_C(const uint8* src_yuy2, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_yuy2[0], src_yuy2[1], src_yuy2[3], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_yuy2[2], src_yuy2[1], src_yuy2[3], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; src_yuy2 += 4; rgb_buf += 8; // Advance 2 pixels. } if (width & 1) { YuvPixel(src_yuy2[0], src_yuy2[1], src_yuy2[3], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } void UYVYToARGBRow_C(const uint8* src_uyvy, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_uyvy[1], src_uyvy[0], src_uyvy[2], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; YuvPixel(src_uyvy[3], src_uyvy[0], src_uyvy[2], - rgb_buf + 4, rgb_buf + 5, rgb_buf + 6); + rgb_buf + 4, rgb_buf + 5, rgb_buf + 6, yuvconstants); rgb_buf[7] = 255; src_uyvy += 4; rgb_buf += 8; // Advance 2 pixels. } if (width & 1) { YuvPixel(src_uyvy[1], src_uyvy[0], src_uyvy[2], - rgb_buf + 0, rgb_buf + 1, rgb_buf + 2); - rgb_buf[3] = 255; - } -} - -void I422ToBGRARow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* rgb_buf, - int width) { - int x; - for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 3, rgb_buf + 2, rgb_buf + 1); - rgb_buf[0] = 255; - YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 7, rgb_buf + 6, rgb_buf + 5); - rgb_buf[4] = 255; - src_y += 2; - src_u += 1; - src_v += 1; - rgb_buf += 8; // Advance 2 pixels. - } - if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 3, rgb_buf + 2, rgb_buf + 1); - rgb_buf[0] = 255; - } -} - -void I422ToABGRRow_C(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* rgb_buf, - int width) { - int x; - for (x = 0; x < width - 1; x += 2) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 2, rgb_buf + 1, rgb_buf + 0); - rgb_buf[3] = 255; - YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 6, rgb_buf + 5, rgb_buf + 4); - rgb_buf[7] = 255; - src_y += 2; - src_u += 1; - src_v += 1; - rgb_buf += 8; // Advance 2 pixels. - } - if (width & 1) { - YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 2, rgb_buf + 1, rgb_buf + 0); + rgb_buf + 0, rgb_buf + 1, rgb_buf + 2, yuvconstants); rgb_buf[3] = 255; } } @@ -1591,14 +1703,15 @@ void I422ToRGBARow_C(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* rgb_buf, + const struct YuvConstants* yuvconstants, int width) { int x; for (x = 0; x < width - 1; x += 2) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 1, rgb_buf + 2, rgb_buf + 3); + rgb_buf + 1, rgb_buf + 2, rgb_buf + 3, yuvconstants); rgb_buf[0] = 255; YuvPixel(src_y[1], src_u[0], src_v[0], - rgb_buf + 5, rgb_buf + 6, rgb_buf + 7); + rgb_buf + 5, rgb_buf + 6, rgb_buf + 7, yuvconstants); rgb_buf[4] = 255; src_y += 2; src_u += 1; @@ -1607,7 +1720,7 @@ void I422ToRGBARow_C(const uint8* src_y, } if (width & 1) { YuvPixel(src_y[0], src_u[0], src_v[0], - rgb_buf + 1, rgb_buf + 2, rgb_buf + 3); + rgb_buf + 1, rgb_buf + 2, rgb_buf + 3, yuvconstants); rgb_buf[0] = 255; } } @@ -1859,6 +1972,25 @@ void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1, } } #undef BLEND + +#define UBLEND(f, b, a) (((a) * f) + ((255 - a) * b) + 255) >> 8 +void BlendPlaneRow_C(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) { + int x; + for (x = 0; x < width - 1; x += 2) { + dst[0] = UBLEND(src0[0], src1[0], alpha[0]); + dst[1] = UBLEND(src0[1], src1[1], alpha[1]); + src0 += 2; + src1 += 2; + alpha += 2; + dst += 2; + } + if (width & 1) { + dst[0] = UBLEND(src0[0], src1[0], alpha[0]); + } +} +#undef UBLEND + #define ATTENUATE(f, a) (a | (a << 8)) * (f | (f << 8)) >> 24 // Multiply source RGB by alpha and store to destination. @@ -2015,18 +2147,18 @@ void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride, } // Blend 2 rows into 1. -static void HalfRow_C(const uint8* src_uv, int src_uv_stride, - uint8* dst_uv, int pix) { +static void HalfRow_C(const uint8* src_uv, ptrdiff_t src_uv_stride, + uint8* dst_uv, int width) { int x; - for (x = 0; x < pix; ++x) { + for (x = 0; x < width; ++x) { dst_uv[x] = (src_uv[x] + src_uv[src_uv_stride + x] + 1) >> 1; } } -static void HalfRow_16_C(const uint16* src_uv, int src_uv_stride, - uint16* dst_uv, int pix) { +static void HalfRow_16_C(const uint16* src_uv, ptrdiff_t src_uv_stride, + uint16* dst_uv, int width) { int x; - for (x = 0; x < pix; ++x) { + for (x = 0; x < width; ++x) { dst_uv[x] = (src_uv[x] + src_uv[src_uv_stride + x] + 1) >> 1; } } @@ -2039,23 +2171,26 @@ void InterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr, int y0_fraction = 256 - y1_fraction; const uint8* src_ptr1 = src_ptr + src_stride; int x; - if (source_y_fraction == 0) { + if (y1_fraction == 0) { memcpy(dst_ptr, src_ptr, width); return; } - if (source_y_fraction == 128) { - HalfRow_C(src_ptr, (int)(src_stride), dst_ptr, width); + if (y1_fraction == 128) { + HalfRow_C(src_ptr, src_stride, dst_ptr, width); return; } for (x = 0; x < width - 1; x += 2) { - dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8; - dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8; + dst_ptr[0] = + (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8; + dst_ptr[1] = + (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction + 128) >> 8; src_ptr += 2; src_ptr1 += 2; dst_ptr += 2; } if (width & 1) { - dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8; + dst_ptr[0] = + (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction + 128) >> 8; } } @@ -2071,7 +2206,7 @@ void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr, return; } if (source_y_fraction == 128) { - HalfRow_16_C(src_ptr, (int)(src_stride), dst_ptr, width); + HalfRow_16_C(src_ptr, src_stride, dst_ptr, width); return; } for (x = 0; x < width - 1; x += 2) { @@ -2088,14 +2223,14 @@ void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr, // Use first 4 shuffler values to reorder ARGB channels. void ARGBShuffleRow_C(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { int index0 = shuffler[0]; int index1 = shuffler[1]; int index2 = shuffler[2]; int index3 = shuffler[3]; // Shuffle a row of ARGB. int x; - for (x = 0; x < pix; ++x) { + for (x = 0; x < width; ++x) { // To support in-place conversion. uint8 b = src_argb[index0]; uint8 g = src_argb[index1]; @@ -2156,321 +2291,10 @@ void I422ToUYVYRow_C(const uint8* src_y, } } -// Maximum temporary width for wrappers to process at a time, in pixels. -#define MAXTWIDTH 2048 - -#if !(defined(_MSC_VER) && !defined(__clang__)) && \ - defined(HAS_I422TORGB565ROW_SSSE3) -// row_win.cc has asm version, but GCC uses 2 step wrapper. -void I422ToRGB565Row_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_rgb565, - int width) { - SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth); - ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TOARGB1555ROW_SSSE3) -void I422ToARGB1555Row_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb1555, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth); - ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_argb1555 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TOARGB4444ROW_SSSE3) -void I422ToARGB4444Row_SSSE3(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb4444, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth); - ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_argb4444 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_NV12TORGB565ROW_SSSE3) -void NV12ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_uv, - uint8* dst_rgb565, int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - NV12ToARGBRow_SSSE3(src_y, src_uv, row, twidth); - ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); - src_y += twidth; - src_uv += twidth; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_NV21TORGB565ROW_SSSE3) -void NV21ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_vu, - uint8* dst_rgb565, int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - NV21ToARGBRow_SSSE3(src_y, src_vu, row, twidth); - ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); - src_y += twidth; - src_vu += twidth; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_YUY2TOARGBROW_SSSE3) -void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2, uint8* dst_argb, int width) { - // Row buffers for intermediate YUV pixels. - SIMD_ALIGNED(uint8 row_y[MAXTWIDTH]); - SIMD_ALIGNED(uint8 row_u[MAXTWIDTH / 2]); - SIMD_ALIGNED(uint8 row_v[MAXTWIDTH / 2]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - YUY2ToUV422Row_SSE2(src_yuy2, row_u, row_v, twidth); - YUY2ToYRow_SSE2(src_yuy2, row_y, twidth); - I422ToARGBRow_SSSE3(row_y, row_u, row_v, dst_argb, twidth); - src_yuy2 += twidth * 2; - dst_argb += twidth * 4; - width -= twidth; - } -} -#endif - -#if defined(HAS_UYVYTOARGBROW_SSSE3) -void UYVYToARGBRow_SSSE3(const uint8* src_uyvy, uint8* dst_argb, int width) { - // Row buffers for intermediate YUV pixels. - SIMD_ALIGNED(uint8 row_y[MAXTWIDTH]); - SIMD_ALIGNED(uint8 row_u[MAXTWIDTH / 2]); - SIMD_ALIGNED(uint8 row_v[MAXTWIDTH / 2]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - UYVYToUV422Row_SSE2(src_uyvy, row_u, row_v, twidth); - UYVYToYRow_SSE2(src_uyvy, row_y, twidth); - I422ToARGBRow_SSSE3(row_y, row_u, row_v, dst_argb, twidth); - src_uyvy += twidth * 2; - dst_argb += twidth * 4; - width -= twidth; - } -} -#endif // !defined(LIBYUV_DISABLE_X86) - -#if defined(HAS_I422TORGB565ROW_AVX2) -void I422ToRGB565Row_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_rgb565, - int width) { - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth); - ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TOARGB1555ROW_AVX2) -void I422ToARGB1555Row_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb1555, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth); - ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_argb1555 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TOARGB4444ROW_AVX2) -void I422ToARGB4444Row_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_argb4444, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth); - ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_argb4444 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TORGB24ROW_AVX2) -void I422ToRGB24Row_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_rgb24, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth); - // TODO(fbarchard): ARGBToRGB24Row_AVX2 - ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_rgb24 += twidth * 3; - width -= twidth; - } -} -#endif - -#if defined(HAS_I422TORAWROW_AVX2) -void I422ToRAWRow_AVX2(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth); - // TODO(fbarchard): ARGBToRAWRow_AVX2 - ARGBToRAWRow_SSSE3(row, dst_raw, twidth); - src_y += twidth; - src_u += twidth / 2; - src_v += twidth / 2; - dst_raw += twidth * 3; - width -= twidth; - } -} -#endif - -#if defined(HAS_NV12TORGB565ROW_AVX2) -void NV12ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_uv, - uint8* dst_rgb565, int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - NV12ToARGBRow_AVX2(src_y, src_uv, row, twidth); - ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); - src_y += twidth; - src_uv += twidth; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_NV21TORGB565ROW_AVX2) -void NV21ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_vu, - uint8* dst_rgb565, int width) { - // Row buffer for intermediate ARGB pixels. - SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - NV21ToARGBRow_AVX2(src_y, src_vu, row, twidth); - ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); - src_y += twidth; - src_vu += twidth; - dst_rgb565 += twidth * 2; - width -= twidth; - } -} -#endif - -#if defined(HAS_YUY2TOARGBROW_AVX2) -void YUY2ToARGBRow_AVX2(const uint8* src_yuy2, uint8* dst_argb, int width) { - // Row buffers for intermediate YUV pixels. - SIMD_ALIGNED32(uint8 row_y[MAXTWIDTH]); - SIMD_ALIGNED32(uint8 row_u[MAXTWIDTH / 2]); - SIMD_ALIGNED32(uint8 row_v[MAXTWIDTH / 2]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - YUY2ToUV422Row_AVX2(src_yuy2, row_u, row_v, twidth); - YUY2ToYRow_AVX2(src_yuy2, row_y, twidth); - I422ToARGBRow_AVX2(row_y, row_u, row_v, dst_argb, twidth); - src_yuy2 += twidth * 2; - dst_argb += twidth * 4; - width -= twidth; - } -} -#endif - -#if defined(HAS_UYVYTOARGBROW_AVX2) -void UYVYToARGBRow_AVX2(const uint8* src_uyvy, uint8* dst_argb, int width) { - // Row buffers for intermediate YUV pixels. - SIMD_ALIGNED32(uint8 row_y[MAXTWIDTH]); - SIMD_ALIGNED32(uint8 row_u[MAXTWIDTH / 2]); - SIMD_ALIGNED32(uint8 row_v[MAXTWIDTH / 2]); - while (width > 0) { - int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; - UYVYToUV422Row_AVX2(src_uyvy, row_u, row_v, twidth); - UYVYToYRow_AVX2(src_uyvy, row_y, twidth); - I422ToARGBRow_AVX2(row_y, row_u, row_v, dst_argb, twidth); - src_uyvy += twidth * 2; - dst_argb += twidth * 4; - width -= twidth; - } -} -#endif // !defined(LIBYUV_DISABLE_X86) void ARGBPolynomialRow_C(const uint8* src_argb, - uint8* dst_argb, const float* poly, + uint8* dst_argb, + const float* poly, int width) { int i; for (i = 0; i < width; ++i) { @@ -2557,6 +2381,19 @@ void ARGBCopyAlphaRow_C(const uint8* src, uint8* dst, int width) { } } +void ARGBExtractAlphaRow_C(const uint8* src_argb, uint8* dst_a, int width) { + int i; + for (i = 0; i < width - 1; i += 2) { + dst_a[0] = src_argb[3]; + dst_a[1] = src_argb[7]; + dst_a += 2; + src_argb += 8; + } + if (width & 1) { + dst_a[0] = src_argb[3]; + } +} + void ARGBCopyYToAlphaRow_C(const uint8* src, uint8* dst, int width) { int i; for (i = 0; i < width - 1; i += 2) { @@ -2570,6 +2407,220 @@ void ARGBCopyYToAlphaRow_C(const uint8* src, uint8* dst, int width) { } } +// Maximum temporary width for wrappers to process at a time, in pixels. +#define MAXTWIDTH 2048 + +#if !(defined(_MSC_VER) && defined(_M_IX86)) && \ + defined(HAS_I422TORGB565ROW_SSSE3) +// row_win.cc has asm version, but GCC uses 2 step wrapper. +void I422ToRGB565Row_SSSE3(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, yuvconstants, twidth); + ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_rgb565 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TOARGB1555ROW_SSSE3) +void I422ToARGB1555Row_SSSE3(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, yuvconstants, twidth); + ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth); + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_argb1555 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TOARGB4444ROW_SSSE3) +void I422ToARGB4444Row_SSSE3(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, yuvconstants, twidth); + ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth); + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_argb4444 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_NV12TORGB565ROW_SSSE3) +void NV12ToRGB565Row_SSSE3(const uint8* src_y, + const uint8* src_uv, + uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + NV12ToARGBRow_SSSE3(src_y, src_uv, row, yuvconstants, twidth); + ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); + src_y += twidth; + src_uv += twidth; + dst_rgb565 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TORGB565ROW_AVX2) +void I422ToRGB565Row_AVX2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); +#if defined(HAS_ARGBTORGB565ROW_AVX2) + ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); +#else + ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); +#endif + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_rgb565 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TOARGB1555ROW_AVX2) +void I422ToARGB1555Row_AVX2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); +#if defined(HAS_ARGBTOARGB1555ROW_AVX2) + ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth); +#else + ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth); +#endif + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_argb1555 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TOARGB4444ROW_AVX2) +void I422ToARGB4444Row_AVX2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); +#if defined(HAS_ARGBTOARGB4444ROW_AVX2) + ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth); +#else + ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth); +#endif + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_argb4444 += twidth * 2; + width -= twidth; + } +} +#endif + +#if defined(HAS_I422TORGB24ROW_AVX2) +void I422ToRGB24Row_AVX2(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + I422ToARGBRow_AVX2(src_y, src_u, src_v, row, yuvconstants, twidth); + // TODO(fbarchard): ARGBToRGB24Row_AVX2 + ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth); + src_y += twidth; + src_u += twidth / 2; + src_v += twidth / 2; + dst_rgb24 += twidth * 3; + width -= twidth; + } +} +#endif + +#if defined(HAS_NV12TORGB565ROW_AVX2) +void NV12ToRGB565Row_AVX2(const uint8* src_y, + const uint8* src_uv, + uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, + int width) { + // Row buffer for intermediate ARGB pixels. + SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]); + while (width > 0) { + int twidth = width > MAXTWIDTH ? MAXTWIDTH : width; + NV12ToARGBRow_AVX2(src_y, src_uv, row, yuvconstants, twidth); +#if defined(HAS_ARGBTORGB565ROW_AVX2) + ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth); +#else + ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth); +#endif + src_y += twidth; + src_uv += twidth; + dst_rgb565 += twidth * 2; + width -= twidth; + } +} +#endif + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/libs/libvpx/third_party/libyuv/source/row_gcc.cc b/libs/libvpx/third_party/libyuv/source/row_gcc.cc index 820de0a1c6..1ac7ef1aa3 100644 --- a/libs/libvpx/third_party/libyuv/source/row_gcc.cc +++ b/libs/libvpx/third_party/libyuv/source/row_gcc.cc @@ -17,7 +17,8 @@ extern "C" { #endif // This module is for GCC x86 and x64. -#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) +#if !defined(LIBYUV_DISABLE_X86) && \ + (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) #if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3) @@ -120,6 +121,24 @@ static uvec8 kShuffleMaskRAWToARGB = { 2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u }; +// Shuffle table for converting RAW to RGB24. First 8. +static const uvec8 kShuffleMaskRAWToRGB24_0 = { + 2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + +// Shuffle table for converting RAW to RGB24. Middle 8. +static const uvec8 kShuffleMaskRAWToRGB24_1 = { + 2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + +// Shuffle table for converting RAW to RGB24. Last 8. +static const uvec8 kShuffleMaskRAWToRGB24_2 = { + 8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + // Shuffle table for converting ARGB to RGB24. static uvec8 kShuffleMaskARGBToRGB24 = { 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u @@ -135,109 +154,39 @@ static uvec8 kShuffleMaskARGBToRGB24_0 = { 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u }; -// Shuffle table for converting ARGB to RAW. -static uvec8 kShuffleMaskARGBToRAW_0 = { - 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u +// YUY2 shuf 16 Y to 32 Y. +static const lvec8 kShuffleYUY2Y = { + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14 +}; + +// YUY2 shuf 8 UV to 16 UV. +static const lvec8 kShuffleYUY2UV = { + 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15, + 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15 +}; + +// UYVY shuf 16 Y to 32 Y. +static const lvec8 kShuffleUYVYY = { + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15, + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15 +}; + +// UYVY shuf 8 UV to 16 UV. +static const lvec8 kShuffleUYVYUV = { + 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14, + 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14 +}; + +// NV21 shuf 8 VU to 16 UV. +static const lvec8 kShuffleNV21 = { + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, }; #endif // HAS_RGB24TOARGBROW_SSSE3 -#if defined(TESTING) && defined(__x86_64__) -void TestRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { - asm volatile ( - ".p2align 5 \n" - "mov %%eax,%%eax \n" - "mov %%ebx,%%ebx \n" - "mov %%ecx,%%ecx \n" - "mov %%edx,%%edx \n" - "mov %%esi,%%esi \n" - "mov %%edi,%%edi \n" - "mov %%ebp,%%ebp \n" - "mov %%esp,%%esp \n" - ".p2align 5 \n" - "mov %%r8d,%%r8d \n" - "mov %%r9d,%%r9d \n" - "mov %%r10d,%%r10d \n" - "mov %%r11d,%%r11d \n" - "mov %%r12d,%%r12d \n" - "mov %%r13d,%%r13d \n" - "mov %%r14d,%%r14d \n" - "mov %%r15d,%%r15d \n" - ".p2align 5 \n" - "lea (%%rax),%%eax \n" - "lea (%%rbx),%%ebx \n" - "lea (%%rcx),%%ecx \n" - "lea (%%rdx),%%edx \n" - "lea (%%rsi),%%esi \n" - "lea (%%rdi),%%edi \n" - "lea (%%rbp),%%ebp \n" - "lea (%%rsp),%%esp \n" - ".p2align 5 \n" - "lea (%%r8),%%r8d \n" - "lea (%%r9),%%r9d \n" - "lea (%%r10),%%r10d \n" - "lea (%%r11),%%r11d \n" - "lea (%%r12),%%r12d \n" - "lea (%%r13),%%r13d \n" - "lea (%%r14),%%r14d \n" - "lea (%%r15),%%r15d \n" - - ".p2align 5 \n" - "lea 0x10(%%rax),%%eax \n" - "lea 0x10(%%rbx),%%ebx \n" - "lea 0x10(%%rcx),%%ecx \n" - "lea 0x10(%%rdx),%%edx \n" - "lea 0x10(%%rsi),%%esi \n" - "lea 0x10(%%rdi),%%edi \n" - "lea 0x10(%%rbp),%%ebp \n" - "lea 0x10(%%rsp),%%esp \n" - ".p2align 5 \n" - "lea 0x10(%%r8),%%r8d \n" - "lea 0x10(%%r9),%%r9d \n" - "lea 0x10(%%r10),%%r10d \n" - "lea 0x10(%%r11),%%r11d \n" - "lea 0x10(%%r12),%%r12d \n" - "lea 0x10(%%r13),%%r13d \n" - "lea 0x10(%%r14),%%r14d \n" - "lea 0x10(%%r15),%%r15d \n" - - ".p2align 5 \n" - "add 0x10,%%eax \n" - "add 0x10,%%ebx \n" - "add 0x10,%%ecx \n" - "add 0x10,%%edx \n" - "add 0x10,%%esi \n" - "add 0x10,%%edi \n" - "add 0x10,%%ebp \n" - "add 0x10,%%esp \n" - ".p2align 5 \n" - "add 0x10,%%r8d \n" - "add 0x10,%%r9d \n" - "add 0x10,%%r10d \n" - "add 0x10,%%r11d \n" - "add 0x10,%%r12d \n" - "add 0x10,%%r13d \n" - "add 0x10,%%r14d \n" - "add 0x10,%%r15d \n" - - ".p2align 2 \n" - "1: \n" - "movq " MEMACCESS(0) ",%%xmm0 \n" - "lea " MEMLEA(0x8,0) ",%0 \n" - "movdqu %%xmm0," MEMACCESS(1) " \n" - "lea " MEMLEA(0x20,1) ",%1 \n" - "sub $0x8,%2 \n" - "jg 1b \n" - : "+r"(src_y), // %0 - "+r"(dst_argb), // %1 - "+r"(pix) // %2 - : - : "memory", "cc", "xmm0", "xmm1", "xmm5" - ); -} -#endif // TESTING - #ifdef HAS_J400TOARGBROW_SSE2 -void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { +void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "pslld $0x18,%%xmm5 \n" @@ -258,14 +207,14 @@ void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { "jg 1b \n" : "+r"(src_y), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 :: "memory", "cc", "xmm0", "xmm1", "xmm5" ); } #endif // HAS_J400TOARGBROW_SSE2 #ifdef HAS_RGB24TOARGBROW_SSSE3 -void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) { +void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" // generate mask 0xff000000 "pslld $0x18,%%xmm5 \n" @@ -297,13 +246,13 @@ void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) { "jg 1b \n" : "+r"(src_rgb24), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kShuffleMaskRGB24ToARGB) // %3 : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix) { +void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" // generate mask 0xff000000 "pslld $0x18,%%xmm5 \n" @@ -335,13 +284,43 @@ void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix) { "jg 1b \n" : "+r"(src_raw), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kShuffleMaskRAWToARGB) // %3 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void RGB565ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { +void RAWToRGB24Row_SSSE3(const uint8* src_raw, uint8* dst_rgb24, int width) { + asm volatile ( + "movdqa %3,%%xmm3 \n" + "movdqa %4,%%xmm4 \n" + "movdqa %5,%%xmm5 \n" + LABELALIGN + "1: \n" + "movdqu " MEMACCESS(0) ",%%xmm0 \n" + "movdqu " MEMACCESS2(0x4,0) ",%%xmm1 \n" + "movdqu " MEMACCESS2(0x8,0) ",%%xmm2 \n" + "lea " MEMLEA(0x18,0) ",%0 \n" + "pshufb %%xmm3,%%xmm0 \n" + "pshufb %%xmm4,%%xmm1 \n" + "pshufb %%xmm5,%%xmm2 \n" + "movq %%xmm0," MEMACCESS(1) " \n" + "movq %%xmm1," MEMACCESS2(0x8,1) " \n" + "movq %%xmm2," MEMACCESS2(0x10,1) " \n" + "lea " MEMLEA(0x18,1) ",%1 \n" + "sub $0x8,%2 \n" + "jg 1b \n" + : "+r"(src_raw), // %0 + "+r"(dst_rgb24), // %1 + "+r"(width) // %2 + : "m"(kShuffleMaskRAWToRGB24_0), // %3 + "m"(kShuffleMaskRAWToRGB24_1), // %4 + "m"(kShuffleMaskRAWToRGB24_2) // %5 + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} + +void RGB565ToARGBRow_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "mov $0x1080108,%%eax \n" "movd %%eax,%%xmm5 \n" @@ -382,14 +361,14 @@ void RGB565ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc", "eax", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ); } -void ARGB1555ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { +void ARGB1555ToARGBRow_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "mov $0x1080108,%%eax \n" "movd %%eax,%%xmm5 \n" @@ -433,14 +412,14 @@ void ARGB1555ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc", "eax", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ); } -void ARGB4444ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { +void ARGB4444ToARGBRow_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "mov $0xf0f0f0f,%%eax \n" "movd %%eax,%%xmm4 \n" @@ -471,14 +450,14 @@ void ARGB4444ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc", "eax", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void ARGBToRGB24Row_SSSE3(const uint8* src, uint8* dst, int pix) { +void ARGBToRGB24Row_SSSE3(const uint8* src, uint8* dst, int width) { asm volatile ( "movdqa %3,%%xmm6 \n" LABELALIGN @@ -510,13 +489,13 @@ void ARGBToRGB24Row_SSSE3(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kShuffleMaskARGBToRGB24) // %3 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6" ); } -void ARGBToRAWRow_SSSE3(const uint8* src, uint8* dst, int pix) { +void ARGBToRAWRow_SSSE3(const uint8* src, uint8* dst, int width) { asm volatile ( "movdqa %3,%%xmm6 \n" LABELALIGN @@ -548,13 +527,13 @@ void ARGBToRAWRow_SSSE3(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kShuffleMaskARGBToRAW) // %3 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6" ); } -void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int pix) { +void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "pcmpeqb %%xmm3,%%xmm3 \n" "psrld $0x1b,%%xmm3 \n" @@ -585,12 +564,104 @@ void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int pix) { +void ARGBToRGB565DitherRow_SSE2(const uint8* src, uint8* dst, + const uint32 dither4, int width) { + asm volatile ( + "movd %3,%%xmm6 \n" + "punpcklbw %%xmm6,%%xmm6 \n" + "movdqa %%xmm6,%%xmm7 \n" + "punpcklwd %%xmm6,%%xmm6 \n" + "punpckhwd %%xmm7,%%xmm7 \n" + "pcmpeqb %%xmm3,%%xmm3 \n" + "psrld $0x1b,%%xmm3 \n" + "pcmpeqb %%xmm4,%%xmm4 \n" + "psrld $0x1a,%%xmm4 \n" + "pslld $0x5,%%xmm4 \n" + "pcmpeqb %%xmm5,%%xmm5 \n" + "pslld $0xb,%%xmm5 \n" + + LABELALIGN + "1: \n" + "movdqu (%0),%%xmm0 \n" + "paddusb %%xmm6,%%xmm0 \n" + "movdqa %%xmm0,%%xmm1 \n" + "movdqa %%xmm0,%%xmm2 \n" + "pslld $0x8,%%xmm0 \n" + "psrld $0x3,%%xmm1 \n" + "psrld $0x5,%%xmm2 \n" + "psrad $0x10,%%xmm0 \n" + "pand %%xmm3,%%xmm1 \n" + "pand %%xmm4,%%xmm2 \n" + "pand %%xmm5,%%xmm0 \n" + "por %%xmm2,%%xmm1 \n" + "por %%xmm1,%%xmm0 \n" + "packssdw %%xmm0,%%xmm0 \n" + "lea 0x10(%0),%0 \n" + "movq %%xmm0,(%1) \n" + "lea 0x8(%1),%1 \n" + "sub $0x4,%2 \n" + "jg 1b \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "m"(dither4) // %3 + : "memory", "cc", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" + ); +} + +#ifdef HAS_ARGBTORGB565DITHERROW_AVX2 +void ARGBToRGB565DitherRow_AVX2(const uint8* src, uint8* dst, + const uint32 dither4, int width) { + asm volatile ( + "vbroadcastss %3,%%xmm6 \n" + "vpunpcklbw %%xmm6,%%xmm6,%%xmm6 \n" + "vpermq $0xd8,%%ymm6,%%ymm6 \n" + "vpunpcklwd %%ymm6,%%ymm6,%%ymm6 \n" + "vpcmpeqb %%ymm3,%%ymm3,%%ymm3 \n" + "vpsrld $0x1b,%%ymm3,%%ymm3 \n" + "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n" + "vpsrld $0x1a,%%ymm4,%%ymm4 \n" + "vpslld $0x5,%%ymm4,%%ymm4 \n" + "vpslld $0xb,%%ymm3,%%ymm5 \n" + + LABELALIGN + "1: \n" + "vmovdqu (%0),%%ymm0 \n" + "vpaddusb %%ymm6,%%ymm0,%%ymm0 \n" + "vpsrld $0x5,%%ymm0,%%ymm2 \n" + "vpsrld $0x3,%%ymm0,%%ymm1 \n" + "vpsrld $0x8,%%ymm0,%%ymm0 \n" + "vpand %%ymm4,%%ymm2,%%ymm2 \n" + "vpand %%ymm3,%%ymm1,%%ymm1 \n" + "vpand %%ymm5,%%ymm0,%%ymm0 \n" + "vpor %%ymm2,%%ymm1,%%ymm1 \n" + "vpor %%ymm1,%%ymm0,%%ymm0 \n" + "vpackusdw %%ymm0,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "lea 0x20(%0),%0 \n" + "vmovdqu %%xmm0,(%1) \n" + "lea 0x10(%1),%1 \n" + "sub $0x8,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src), // %0 + "+r"(dst), // %1 + "+r"(width) // %2 + : "m"(dither4) // %3 + : "memory", "cc", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" + ); +} +#endif // HAS_ARGBTORGB565DITHERROW_AVX2 + + +void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "pcmpeqb %%xmm4,%%xmm4 \n" "psrld $0x1b,%%xmm4 \n" @@ -625,13 +696,13 @@ void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ); } -void ARGBToARGB4444Row_SSE2(const uint8* src, uint8* dst, int pix) { +void ARGBToARGB4444Row_SSE2(const uint8* src, uint8* dst, int width) { asm volatile ( "pcmpeqb %%xmm4,%%xmm4 \n" "psllw $0xc,%%xmm4 \n" @@ -654,7 +725,7 @@ void ARGBToARGB4444Row_SSE2(const uint8* src, uint8* dst, int pix) { "jg 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4" ); } @@ -662,7 +733,7 @@ void ARGBToARGB4444Row_SSE2(const uint8* src, uint8* dst, int pix) { #ifdef HAS_ARGBTOYROW_SSSE3 // Convert 16 ARGB pixels (64 bytes) to 16 Y values. -void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "movdqa %3,%%xmm4 \n" "movdqa %4,%%xmm5 \n" @@ -689,7 +760,7 @@ void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kARGBToY), // %3 "m"(kAddY16) // %4 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" @@ -700,7 +771,7 @@ void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { #ifdef HAS_ARGBTOYJROW_SSSE3 // Convert 16 ARGB pixels (64 bytes) to 16 YJ values. // Same as ARGBToYRow but different coefficients, no add 16, but do rounding. -void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "movdqa %3,%%xmm4 \n" "movdqa %4,%%xmm5 \n" @@ -728,7 +799,7 @@ void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kARGBToYJ), // %3 "m"(kAddYJ64) // %4 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" @@ -743,7 +814,7 @@ static const lvec32 kPermdARGBToY_AVX = { }; // Convert 32 ARGB pixels (128 bytes) to 32 Y values. -void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" @@ -773,7 +844,7 @@ void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kARGBToY), // %3 "m"(kAddY16), // %4 "m"(kPermdARGBToY_AVX) // %5 @@ -784,7 +855,7 @@ void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { #ifdef HAS_ARGBTOYJROW_AVX2 // Convert 32 ARGB pixels (128 bytes) to 32 Y values. -void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "vbroadcastf128 %3,%%ymm4 \n" "vbroadcastf128 %4,%%ymm5 \n" @@ -815,7 +886,7 @@ void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kARGBToYJ), // %3 "m"(kAddYJ64), // %4 "m"(kPermdARGBToY_AVX) // %5 @@ -952,6 +1023,67 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, } #endif // HAS_ARGBTOUVROW_AVX2 +#ifdef HAS_ARGBTOUVJROW_AVX2 +void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) { + asm volatile ( + "vbroadcastf128 %5,%%ymm5 \n" + "vbroadcastf128 %6,%%ymm6 \n" + "vbroadcastf128 %7,%%ymm7 \n" + "sub %1,%2 \n" + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n" + "vmovdqu " MEMACCESS2(0x40,0) ",%%ymm2 \n" + "vmovdqu " MEMACCESS2(0x60,0) ",%%ymm3 \n" + VMEMOPREG(vpavgb,0x00,0,4,1,ymm0,ymm0) // vpavgb (%0,%4,1),%%ymm0,%%ymm0 + VMEMOPREG(vpavgb,0x20,0,4,1,ymm1,ymm1) + VMEMOPREG(vpavgb,0x40,0,4,1,ymm2,ymm2) + VMEMOPREG(vpavgb,0x60,0,4,1,ymm3,ymm3) + "lea " MEMLEA(0x80,0) ",%0 \n" + "vshufps $0x88,%%ymm1,%%ymm0,%%ymm4 \n" + "vshufps $0xdd,%%ymm1,%%ymm0,%%ymm0 \n" + "vpavgb %%ymm4,%%ymm0,%%ymm0 \n" + "vshufps $0x88,%%ymm3,%%ymm2,%%ymm4 \n" + "vshufps $0xdd,%%ymm3,%%ymm2,%%ymm2 \n" + "vpavgb %%ymm4,%%ymm2,%%ymm2 \n" + + "vpmaddubsw %%ymm7,%%ymm0,%%ymm1 \n" + "vpmaddubsw %%ymm7,%%ymm2,%%ymm3 \n" + "vpmaddubsw %%ymm6,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm6,%%ymm2,%%ymm2 \n" + "vphaddw %%ymm3,%%ymm1,%%ymm1 \n" + "vphaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm5,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm5,%%ymm1,%%ymm1 \n" + "vpsraw $0x8,%%ymm1,%%ymm1 \n" + "vpsraw $0x8,%%ymm0,%%ymm0 \n" + "vpacksswb %%ymm0,%%ymm1,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vpshufb %8,%%ymm0,%%ymm0 \n" + + "vextractf128 $0x0,%%ymm0," MEMACCESS(1) " \n" + VEXTOPMEM(vextractf128,1,ymm0,0x0,1,2,1) // vextractf128 $1,%%ymm0,(%1,%2,1) + "lea " MEMLEA(0x10,1) ",%1 \n" + "sub $0x20,%3 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_argb0), // %0 + "+r"(dst_u), // %1 + "+r"(dst_v), // %2 + "+rm"(width) // %3 + : "r"((intptr_t)(src_stride_argb)), // %4 + "m"(kAddUVJ128), // %5 + "m"(kARGBToVJ), // %6 + "m"(kARGBToUJ), // %7 + "m"(kShufARGBToUV_AVX) // %8 + : "memory", "cc", NACL_R14 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" + ); +} +#endif // HAS_ARGBTOUVJROW_AVX2 + #ifdef HAS_ARGBTOUVJROW_SSSE3 void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) { @@ -1073,60 +1205,7 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v, } #endif // HAS_ARGBTOUV444ROW_SSSE3 -#ifdef HAS_ARGBTOUV422ROW_SSSE3 -void ARGBToUV422Row_SSSE3(const uint8* src_argb0, - uint8* dst_u, uint8* dst_v, int width) { - asm volatile ( - "movdqa %4,%%xmm3 \n" - "movdqa %5,%%xmm4 \n" - "movdqa %6,%%xmm5 \n" - "sub %1,%2 \n" - LABELALIGN - "1: \n" - "movdqu " MEMACCESS(0) ",%%xmm0 \n" - "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" - "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n" - "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n" - "lea " MEMLEA(0x40,0) ",%0 \n" - "movdqa %%xmm0,%%xmm7 \n" - "shufps $0x88,%%xmm1,%%xmm0 \n" - "shufps $0xdd,%%xmm1,%%xmm7 \n" - "pavgb %%xmm7,%%xmm0 \n" - "movdqa %%xmm2,%%xmm7 \n" - "shufps $0x88,%%xmm6,%%xmm2 \n" - "shufps $0xdd,%%xmm6,%%xmm7 \n" - "pavgb %%xmm7,%%xmm2 \n" - "movdqa %%xmm0,%%xmm1 \n" - "movdqa %%xmm2,%%xmm6 \n" - "pmaddubsw %%xmm4,%%xmm0 \n" - "pmaddubsw %%xmm4,%%xmm2 \n" - "pmaddubsw %%xmm3,%%xmm1 \n" - "pmaddubsw %%xmm3,%%xmm6 \n" - "phaddw %%xmm2,%%xmm0 \n" - "phaddw %%xmm6,%%xmm1 \n" - "psraw $0x8,%%xmm0 \n" - "psraw $0x8,%%xmm1 \n" - "packsswb %%xmm1,%%xmm0 \n" - "paddb %%xmm5,%%xmm0 \n" - "movlps %%xmm0," MEMACCESS(1) " \n" - MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1) - "lea " MEMLEA(0x8,1) ",%1 \n" - "sub $0x10,%3 \n" - "jg 1b \n" - : "+r"(src_argb0), // %0 - "+r"(dst_u), // %1 - "+r"(dst_v), // %2 - "+rm"(width) // %3 - : "m"(kARGBToV), // %4 - "m"(kARGBToU), // %5 - "m"(kAddUV128) // %6 - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm6", "xmm7" - ); -} -#endif // HAS_ARGBTOUV422ROW_SSSE3 - -void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix) { +void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int width) { asm volatile ( "movdqa %4,%%xmm5 \n" "movdqa %3,%%xmm4 \n" @@ -1153,7 +1232,7 @@ void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_bgra), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kBGRAToY), // %3 "m"(kAddY16) // %4 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" @@ -1221,7 +1300,7 @@ void BGRAToUVRow_SSSE3(const uint8* src_bgra0, int src_stride_bgra, ); } -void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix) { +void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int width) { asm volatile ( "movdqa %4,%%xmm5 \n" "movdqa %3,%%xmm4 \n" @@ -1248,14 +1327,14 @@ void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_abgr), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kABGRToY), // %3 "m"(kAddY16) // %4 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix) { +void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int width) { asm volatile ( "movdqa %4,%%xmm5 \n" "movdqa %3,%%xmm4 \n" @@ -1282,7 +1361,7 @@ void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_rgba), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "m"(kRGBAToY), // %3 "m"(kAddY16) // %4 : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" @@ -1413,132 +1492,15 @@ void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba, #if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2) -struct YuvConstants { - lvec8 kUVToB; // 0 - lvec8 kUVToG; // 32 - lvec8 kUVToR; // 64 - lvec16 kUVBiasB; // 96 - lvec16 kUVBiasG; // 128 - lvec16 kUVBiasR; // 160 - lvec16 kYToRgb; // 192 -}; - -// BT.601 YUV to RGB reference -// R = (Y - 16) * 1.164 - V * -1.596 -// G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813 -// B = (Y - 16) * 1.164 - U * -2.018 - -// Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. -#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ -#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */ - -// U and V contributions to R,G,B. -#define UB -128 /* max(-128, round(-2.018 * 64)) */ -#define UG 25 /* round(0.391 * 64) */ -#define VG 52 /* round(0.813 * 64) */ -#define VR -102 /* round(-1.596 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BB (UB * 128 + YGB) -#define BG (UG * 128 + VG * 128 + YGB) -#define BR (VR * 128 + YGB) - -// BT601 constants for YUV to RGB. -static YuvConstants SIMD_ALIGNED(kYuvConstants) = { - { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, - UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, - { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, - UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, - { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, - 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, - { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, - { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, - { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, - { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } -}; - -// BT601 constants for NV21 where chroma plane is VU instead of UV. -static YuvConstants SIMD_ALIGNED(kYvuConstants) = { - { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, - 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, - { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, - VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, - { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, - VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, - { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, - { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, - { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, - { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } -}; - -#undef YG -#undef YGB -#undef UB -#undef UG -#undef VG -#undef VR -#undef BB -#undef BG -#undef BR - -// JPEG YUV to RGB reference -// * R = Y - V * -1.40200 -// * G = Y - U * 0.34414 - V * 0.71414 -// * B = Y - U * -1.77200 - -// Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. -#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */ -#define YGBJ 32 /* 64 / 2 */ - -// U and V contributions to R,G,B. -#define UBJ -113 /* round(-1.77200 * 64) */ -#define UGJ 22 /* round(0.34414 * 64) */ -#define VGJ 46 /* round(0.71414 * 64) */ -#define VRJ -90 /* round(-1.40200 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BBJ (UBJ * 128 + YGBJ) -#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ) -#define BRJ (VRJ * 128 + YGBJ) - -// JPEG constants for YUV to RGB. -YuvConstants SIMD_ALIGNED(kYuvJConstants) = { - { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, - UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 }, - { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ }, - { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, - 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ }, - { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, - BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ }, - { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, - BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ }, - { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, - BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ }, - { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, - YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ } -}; - -#undef YGJ -#undef YGBJ -#undef UBJ -#undef UGJ -#undef VGJ -#undef VRJ -#undef BBJ -#undef BGJ -#undef BRJ - -// Read 8 UV from 411 +// Read 8 UV from 444 #define READYUV444 \ "movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ - "punpcklbw %%xmm1,%%xmm0 \n" + "punpcklbw %%xmm1,%%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" // Read 4 UV from 422, upsample to 8 UV #define READYUV422 \ @@ -1546,52 +1508,144 @@ YuvConstants SIMD_ALIGNED(kYuvJConstants) = { MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \ "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \ - "punpcklwd %%xmm0,%%xmm0 \n" + "punpcklwd %%xmm0,%%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" -// Read 2 UV from 411, upsample to 8 UV -#define READYUV411 \ +// Read 4 UV from 422, upsample to 8 UV. With 8 Alpha. +#define READYUVA422 \ "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \ MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \ + "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \ + "punpcklbw %%xmm1,%%xmm0 \n" \ + "punpcklwd %%xmm0,%%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" \ + "movq " MEMACCESS([a_buf]) ",%%xmm5 \n" \ + "lea " MEMLEA(0x8, [a_buf]) ",%[a_buf] \n" + +// Read 2 UV from 411, upsample to 8 UV. +// reading 4 bytes is an msan violation. +// "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" +// MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) +// pinsrw fails with drmemory +// __asm pinsrw xmm0, [esi], 0 /* U */ +// __asm pinsrw xmm1, [esi + edi], 0 /* V */ +#define READYUV411_TEMP \ + "movzwl " MEMACCESS([u_buf]) ",%[temp] \n" \ + "movd %[temp],%%xmm0 \n" \ + MEMOPARG(movzwl, 0x00, [u_buf], [v_buf], 1, [temp]) " \n" \ + "movd %[temp],%%xmm1 \n" \ "lea " MEMLEA(0x2, [u_buf]) ",%[u_buf] \n" \ "punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklwd %%xmm0,%%xmm0 \n" \ - "punpckldq %%xmm0,%%xmm0 \n" + "punpckldq %%xmm0,%%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" // Read 4 UV from NV12, upsample to 8 UV #define READNV12 \ "movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \ "lea " MEMLEA(0x8, [uv_buf]) ",%[uv_buf] \n" \ - "punpcklwd %%xmm0,%%xmm0 \n" + "punpcklwd %%xmm0,%%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" +// Read 4 VU from NV21, upsample to 8 UV +#define READNV21 \ + "movq " MEMACCESS([vu_buf]) ",%%xmm0 \n" \ + "lea " MEMLEA(0x8, [vu_buf]) ",%[vu_buf] \n" \ + "pshufb %[kShuffleNV21], %%xmm0 \n" \ + "movq " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "punpcklbw %%xmm4,%%xmm4 \n" \ + "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" + +// Read 4 YUY2 with 8 Y and update 4 UV to 8 UV. +#define READYUY2 \ + "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm4 \n" \ + "pshufb %[kShuffleYUY2Y], %%xmm4 \n" \ + "movdqu " MEMACCESS([yuy2_buf]) ",%%xmm0 \n" \ + "pshufb %[kShuffleYUY2UV], %%xmm0 \n" \ + "lea " MEMLEA(0x10, [yuy2_buf]) ",%[yuy2_buf] \n" + +// Read 4 UYVY with 8 Y and update 4 UV to 8 UV. +#define READUYVY \ + "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm4 \n" \ + "pshufb %[kShuffleUYVYY], %%xmm4 \n" \ + "movdqu " MEMACCESS([uyvy_buf]) ",%%xmm0 \n" \ + "pshufb %[kShuffleUYVYUV], %%xmm0 \n" \ + "lea " MEMLEA(0x10, [uyvy_buf]) ",%[uyvy_buf] \n" + +#if defined(__x86_64__) +#define YUVTORGB_SETUP(yuvconstants) \ + "movdqa " MEMACCESS([yuvconstants]) ",%%xmm8 \n" \ + "movdqa " MEMACCESS2(32, [yuvconstants]) ",%%xmm9 \n" \ + "movdqa " MEMACCESS2(64, [yuvconstants]) ",%%xmm10 \n" \ + "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm11 \n" \ + "movdqa " MEMACCESS2(128, [yuvconstants]) ",%%xmm12 \n" \ + "movdqa " MEMACCESS2(160, [yuvconstants]) ",%%xmm13 \n" \ + "movdqa " MEMACCESS2(192, [yuvconstants]) ",%%xmm14 \n" // Convert 8 pixels: 8 UV and 8 Y -#define YUVTORGB(YuvConstants) \ +#define YUVTORGB(yuvconstants) \ "movdqa %%xmm0,%%xmm1 \n" \ "movdqa %%xmm0,%%xmm2 \n" \ "movdqa %%xmm0,%%xmm3 \n" \ - "movdqa " MEMACCESS2(96, [YuvConstants]) ",%%xmm0 \n" \ - "pmaddubsw " MEMACCESS([YuvConstants]) ",%%xmm1 \n" \ + "movdqa %%xmm11,%%xmm0 \n" \ + "pmaddubsw %%xmm8,%%xmm1 \n" \ "psubw %%xmm1,%%xmm0 \n" \ - "movdqa " MEMACCESS2(128, [YuvConstants]) ",%%xmm1 \n" \ - "pmaddubsw " MEMACCESS2(32, [YuvConstants]) ",%%xmm2 \n" \ + "movdqa %%xmm12,%%xmm1 \n" \ + "pmaddubsw %%xmm9,%%xmm2 \n" \ "psubw %%xmm2,%%xmm1 \n" \ - "movdqa " MEMACCESS2(160, [YuvConstants]) ",%%xmm2 \n" \ - "pmaddubsw " MEMACCESS2(64, [YuvConstants]) ",%%xmm3 \n" \ + "movdqa %%xmm13,%%xmm2 \n" \ + "pmaddubsw %%xmm10,%%xmm3 \n" \ "psubw %%xmm3,%%xmm2 \n" \ - "movq " MEMACCESS([y_buf]) ",%%xmm3 \n" \ - "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" \ - "punpcklbw %%xmm3,%%xmm3 \n" \ - "pmulhuw " MEMACCESS2(192, [YuvConstants]) ",%%xmm3 \n" \ - "paddsw %%xmm3,%%xmm0 \n" \ - "paddsw %%xmm3,%%xmm1 \n" \ - "paddsw %%xmm3,%%xmm2 \n" \ + "pmulhuw %%xmm14,%%xmm4 \n" \ + "paddsw %%xmm4,%%xmm0 \n" \ + "paddsw %%xmm4,%%xmm1 \n" \ + "paddsw %%xmm4,%%xmm2 \n" \ "psraw $0x6,%%xmm0 \n" \ "psraw $0x6,%%xmm1 \n" \ "psraw $0x6,%%xmm2 \n" \ "packuswb %%xmm0,%%xmm0 \n" \ "packuswb %%xmm1,%%xmm1 \n" \ "packuswb %%xmm2,%%xmm2 \n" +#define YUVTORGB_REGS \ + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", -// Store 8 ARGB values. Assumes XMM5 is zero. +#else +#define YUVTORGB_SETUP(yuvconstants) +// Convert 8 pixels: 8 UV and 8 Y +#define YUVTORGB(yuvconstants) \ + "movdqa %%xmm0,%%xmm1 \n" \ + "movdqa %%xmm0,%%xmm2 \n" \ + "movdqa %%xmm0,%%xmm3 \n" \ + "movdqa " MEMACCESS2(96, [yuvconstants]) ",%%xmm0 \n" \ + "pmaddubsw " MEMACCESS([yuvconstants]) ",%%xmm1 \n" \ + "psubw %%xmm1,%%xmm0 \n" \ + "movdqa " MEMACCESS2(128, [yuvconstants]) ",%%xmm1 \n" \ + "pmaddubsw " MEMACCESS2(32, [yuvconstants]) ",%%xmm2 \n" \ + "psubw %%xmm2,%%xmm1 \n" \ + "movdqa " MEMACCESS2(160, [yuvconstants]) ",%%xmm2 \n" \ + "pmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%xmm3 \n" \ + "psubw %%xmm3,%%xmm2 \n" \ + "pmulhuw " MEMACCESS2(192, [yuvconstants]) ",%%xmm4 \n" \ + "paddsw %%xmm4,%%xmm0 \n" \ + "paddsw %%xmm4,%%xmm1 \n" \ + "paddsw %%xmm4,%%xmm2 \n" \ + "psraw $0x6,%%xmm0 \n" \ + "psraw $0x6,%%xmm1 \n" \ + "psraw $0x6,%%xmm2 \n" \ + "packuswb %%xmm0,%%xmm0 \n" \ + "packuswb %%xmm1,%%xmm1 \n" \ + "packuswb %%xmm2,%%xmm2 \n" +#define YUVTORGB_REGS +#endif + +// Store 8 ARGB values. #define STOREARGB \ "punpcklbw %%xmm1,%%xmm0 \n" \ "punpcklbw %%xmm5,%%xmm2 \n" \ @@ -1602,30 +1656,7 @@ YuvConstants SIMD_ALIGNED(kYuvJConstants) = { "movdqu %%xmm1," MEMACCESS2(0x10, [dst_argb]) " \n" \ "lea " MEMLEA(0x20, [dst_argb]) ", %[dst_argb] \n" -// Store 8 BGRA values. Assumes XMM5 is zero. -#define STOREBGRA \ - "pcmpeqb %%xmm5,%%xmm5 \n" \ - "punpcklbw %%xmm0,%%xmm1 \n" \ - "punpcklbw %%xmm2,%%xmm5 \n" \ - "movdqa %%xmm5,%%xmm0 \n" \ - "punpcklwd %%xmm1,%%xmm5 \n" \ - "punpckhwd %%xmm1,%%xmm0 \n" \ - "movdqu %%xmm5," MEMACCESS([dst_bgra]) " \n" \ - "movdqu %%xmm0," MEMACCESS2(0x10, [dst_bgra]) " \n" \ - "lea " MEMLEA(0x20, [dst_bgra]) ", %[dst_bgra] \n" - -// Store 8 ABGR values. Assumes XMM5 is zero. -#define STOREABGR \ - "punpcklbw %%xmm1,%%xmm2 \n" \ - "punpcklbw %%xmm5,%%xmm0 \n" \ - "movdqa %%xmm2,%%xmm1 \n" \ - "punpcklwd %%xmm0,%%xmm2 \n" \ - "punpckhwd %%xmm0,%%xmm1 \n" \ - "movdqu %%xmm2," MEMACCESS([dst_abgr]) " \n" \ - "movdqu %%xmm1," MEMACCESS2(0x10, [dst_abgr]) " \n" \ - "lea " MEMLEA(0x20, [dst_abgr]) ", %[dst_abgr] \n" - -// Store 8 RGBA values. Assumes XMM5 is zero. +// Store 8 RGBA values. #define STORERGBA \ "pcmpeqb %%xmm5,%%xmm5 \n" \ "punpcklbw %%xmm2,%%xmm1 \n" \ @@ -1641,14 +1672,16 @@ void OMITFP I444ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "sub %[u_buf],%[v_buf] \n" "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" READYUV444 - YUVTORGB(kYuvConstants) + YUVTORGB(yuvconstants) STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" @@ -1657,26 +1690,27 @@ void OMITFP I444ToARGBRow_SSSE3(const uint8* y_buf, [v_buf]"+r"(v_buf), // %[v_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -// TODO(fbarchard): Consider putting masks into constants. void OMITFP I422ToRGB24Row_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "movdqa %[kShuffleMaskARGBToRGB24_0],%%xmm5 \n" "movdqa %[kShuffleMaskARGBToRGB24],%%xmm6 \n" "sub %[u_buf],%[v_buf] \n" LABELALIGN "1: \n" READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(yuvconstants) "punpcklbw %%xmm1,%%xmm0 \n" "punpcklbw %%xmm2,%%xmm2 \n" "movdqa %%xmm0,%%xmm1 \n" @@ -1694,61 +1728,16 @@ void OMITFP I422ToRGB24Row_SSSE3(const uint8* y_buf, [u_buf]"+r"(u_buf), // %[u_buf] [v_buf]"+r"(v_buf), // %[v_buf] [dst_rgb24]"+r"(dst_rgb24), // %[dst_rgb24] -// TODO(fbarchard): Make width a register for 32 bit. #if defined(__i386__) && defined(__pic__) [width]"+m"(width) // %[width] #else [width]"+rm"(width) // %[width] #endif - : [kYuvConstants]"r"(&kYuvConstants.kUVToB), + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] [kShuffleMaskARGBToRGB24_0]"m"(kShuffleMaskARGBToRGB24_0), [kShuffleMaskARGBToRGB24]"m"(kShuffleMaskARGBToRGB24) - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5", "xmm6" - ); -} - -void OMITFP I422ToRAWRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_raw, - int width) { - asm volatile ( - "movdqa %[kShuffleMaskARGBToRAW_0],%%xmm5 \n" - "movdqa %[kShuffleMaskARGBToRAW],%%xmm6 \n" - "sub %[u_buf],%[v_buf] \n" - LABELALIGN - "1: \n" - READYUV422 - YUVTORGB(kYuvConstants) - "punpcklbw %%xmm1,%%xmm0 \n" - "punpcklbw %%xmm2,%%xmm2 \n" - "movdqa %%xmm0,%%xmm1 \n" - "punpcklwd %%xmm2,%%xmm0 \n" - "punpckhwd %%xmm2,%%xmm1 \n" - "pshufb %%xmm5,%%xmm0 \n" - "pshufb %%xmm6,%%xmm1 \n" - "palignr $0xc,%%xmm0,%%xmm1 \n" - "movq %%xmm0," MEMACCESS([dst_raw]) " \n" - "movdqu %%xmm1," MEMACCESS2(0x8,[dst_raw]) "\n" - "lea " MEMLEA(0x18,[dst_raw]) ",%[dst_raw] \n" - "subl $0x8,%[width] \n" - "jg 1b \n" - : [y_buf]"+r"(y_buf), // %[y_buf] - [u_buf]"+r"(u_buf), // %[u_buf] - [v_buf]"+r"(v_buf), // %[v_buf] - [dst_raw]"+r"(dst_raw), // %[dst_raw] -// TODO(fbarchard): Make width a register for 32 bit. -#if defined(__i386__) && defined(__pic__) - [width]"+m"(width) // %[width] -#else - [width]"+rm"(width) // %[width] -#endif - : [kYuvConstants]"r"(&kYuvConstants.kUVToB), - [kShuffleMaskARGBToRAW_0]"m"(kShuffleMaskARGBToRAW_0), - [kShuffleMaskARGBToRAW]"m"(kShuffleMaskARGBToRAW) - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5", "xmm6" + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6" ); } @@ -1756,14 +1745,16 @@ void OMITFP I422ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "sub %[u_buf],%[v_buf] \n" "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(yuvconstants) STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" @@ -1772,74 +1763,95 @@ void OMITFP I422ToARGBRow_SSSE3(const uint8* y_buf, [v_buf]"+r"(v_buf), // %[v_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void OMITFP J422ToARGBRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_argb, - int width) { +#ifdef HAS_I422ALPHATOARGBROW_SSSE3 +void OMITFP I422AlphaToARGBRow_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "sub %[u_buf],%[v_buf] \n" - "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - READYUV422 - YUVTORGB(kYuvConstants) + READYUVA422 + YUVTORGB(yuvconstants) STOREARGB - "sub $0x8,%[width] \n" + "subl $0x8,%[width] \n" "jg 1b \n" : [y_buf]"+r"(y_buf), // %[y_buf] [u_buf]"+r"(u_buf), // %[u_buf] [v_buf]"+r"(v_buf), // %[v_buf] + [a_buf]"+r"(a_buf), // %[a_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] +#if defined(__i386__) && defined(__pic__) + [width]"+m"(width) // %[width] +#else [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvJConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" +#endif + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } +#endif // HAS_I422ALPHATOARGBROW_SSSE3 +#ifdef HAS_I411TOARGBROW_SSSE3 void OMITFP I411ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { + int temp; asm volatile ( + YUVTORGB_SETUP(yuvconstants) "sub %[u_buf],%[v_buf] \n" "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - READYUV411 - YUVTORGB(kYuvConstants) + READYUV411_TEMP + YUVTORGB(yuvconstants) STOREARGB - "sub $0x8,%[width] \n" + "subl $0x8,%[width] \n" "jg 1b \n" - : [y_buf]"+r"(y_buf), // %[y_buf] - [u_buf]"+r"(u_buf), // %[u_buf] - [v_buf]"+r"(v_buf), // %[v_buf] + : [y_buf]"+r"(y_buf), // %[y_buf] + [u_buf]"+r"(u_buf), // %[u_buf] + [v_buf]"+r"(v_buf), // %[v_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] - [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + [temp]"=&r"(temp), // %[temp] +#if defined(__i386__) && defined(__pic__) + [width]"+m"(width) // %[width] +#else + [width]"+rm"(width) // %[width] +#endif + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } +#endif void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf, const uint8* uv_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" READNV12 - YUVTORGB(kYuvConstants) + YUVTORGB(yuvconstants) STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" @@ -1847,84 +1859,85 @@ void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf, [uv_buf]"+r"(uv_buf), // %[uv_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - // Does not use r14. - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", YUVTORGB_REGS // Does not use r14. + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf, - const uint8* uv_buf, + const uint8* vu_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - READNV12 - YUVTORGB(kYuvConstants) + READNV21 + YUVTORGB(yuvconstants) STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" : [y_buf]"+r"(y_buf), // %[y_buf] - [uv_buf]"+r"(uv_buf), // %[uv_buf] + [vu_buf]"+r"(vu_buf), // %[vu_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYvuConstants.kUVToB) // %[kYuvConstants] - // Does not use r14. - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleNV21]"m"(kShuffleNV21) + : "memory", "cc", YUVTORGB_REGS // Does not use r14. + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void OMITFP I422ToBGRARow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_bgra, +void OMITFP YUY2ToARGBRow_SSSE3(const uint8* yuy2_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - "sub %[u_buf],%[v_buf] \n" + YUVTORGB_SETUP(yuvconstants) "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - READYUV422 - YUVTORGB(kYuvConstants) - STOREBGRA + READYUY2 + YUVTORGB(yuvconstants) + STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" - : [y_buf]"+r"(y_buf), // %[y_buf] - [u_buf]"+r"(u_buf), // %[u_buf] - [v_buf]"+r"(v_buf), // %[v_buf] - [dst_bgra]"+r"(dst_bgra), // %[dst_bgra] + : [yuy2_buf]"+r"(yuy2_buf), // %[yuy2_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleYUY2Y]"m"(kShuffleYUY2Y), + [kShuffleYUY2UV]"m"(kShuffleYUY2UV) + : "memory", "cc", YUVTORGB_REGS // Does not use r14. + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -void OMITFP I422ToABGRRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_abgr, +void OMITFP UYVYToARGBRow_SSSE3(const uint8* uyvy_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - "sub %[u_buf],%[v_buf] \n" + YUVTORGB_SETUP(yuvconstants) "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - READYUV422 - YUVTORGB(kYuvConstants) - STOREABGR + READUYVY + YUVTORGB(yuvconstants) + STOREARGB "sub $0x8,%[width] \n" "jg 1b \n" - : [y_buf]"+r"(y_buf), // %[y_buf] - [u_buf]"+r"(u_buf), // %[u_buf] - [v_buf]"+r"(v_buf), // %[v_buf] - [dst_abgr]"+r"(dst_abgr), // %[dst_abgr] + : [uyvy_buf]"+r"(uyvy_buf), // %[uyvy_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleUYVYY]"m"(kShuffleUYVYY), + [kShuffleUYVYUV]"m"(kShuffleUYVYUV) + : "memory", "cc", YUVTORGB_REGS // Does not use r14. + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } @@ -1932,14 +1945,16 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP(yuvconstants) "sub %[u_buf],%[v_buf] \n" "pcmpeqb %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(yuvconstants) STORERGBA "sub $0x8,%[width] \n" "jg 1b \n" @@ -1948,90 +1963,238 @@ void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf, [v_buf]"+r"(v_buf), // %[v_buf] [dst_rgba]"+r"(dst_rgba), // %[dst_rgba] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } #endif // HAS_I422TOARGBROW_SSSE3 +// Read 16 UV from 444 +#define READYUV444_AVX2 \ + "vmovdqu " MEMACCESS([u_buf]) ",%%xmm0 \n" \ + MEMOPREG(vmovdqu, 0x00, [u_buf], [v_buf], 1, xmm1) \ + "lea " MEMLEA(0x10, [u_buf]) ",%[u_buf] \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpermq $0xd8,%%ymm1,%%ymm1 \n" \ + "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" + // Read 8 UV from 422, upsample to 16 UV. #define READYUV422_AVX2 \ - "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ + "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ - "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" + "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" -// Convert 16 pixels: 16 UV and 16 Y. -#define YUVTORGB_AVX2(YuvConstants) \ - "vpmaddubsw " MEMACCESS2(64, [YuvConstants]) ",%%ymm0,%%ymm2 \n" \ - "vpmaddubsw " MEMACCESS2(32, [YuvConstants]) ",%%ymm0,%%ymm1 \n" \ - "vpmaddubsw " MEMACCESS([YuvConstants]) ",%%ymm0,%%ymm0 \n" \ - "vmovdqu " MEMACCESS2(160, [YuvConstants]) ",%%ymm3 \n" \ +// Read 8 UV from 422, upsample to 16 UV. With 16 Alpha. +#define READYUVA422_AVX2 \ + "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \ + MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \ + "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \ + "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" \ + "vmovdqu " MEMACCESS([a_buf]) ",%%xmm5 \n" \ + "vpermq $0xd8,%%ymm5,%%ymm5 \n" \ + "lea " MEMLEA(0x10, [a_buf]) ",%[a_buf] \n" + +// Read 4 UV from 411, upsample to 16 UV. +#define READYUV411_AVX2 \ + "vmovd " MEMACCESS([u_buf]) ",%%xmm0 \n" \ + MEMOPREG(vmovd, 0x00, [u_buf], [v_buf], 1, xmm1) \ + "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \ + "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ + "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpunpckldq %%ymm0,%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" + +// Read 8 UV from NV12, upsample to 16 UV. +#define READNV12_AVX2 \ + "vmovdqu " MEMACCESS([uv_buf]) ",%%xmm0 \n" \ + "lea " MEMLEA(0x10, [uv_buf]) ",%[uv_buf] \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" + +// Read 8 VU from NV21, upsample to 16 UV. +#define READNV21_AVX2 \ + "vmovdqu " MEMACCESS([vu_buf]) ",%%xmm0 \n" \ + "lea " MEMLEA(0x10, [vu_buf]) ",%[vu_buf] \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpshufb %[kShuffleNV21], %%ymm0, %%ymm0 \n" \ + "vmovdqu " MEMACCESS([y_buf]) ",%%xmm4 \n" \ + "vpermq $0xd8,%%ymm4,%%ymm4 \n" \ + "vpunpcklbw %%ymm4,%%ymm4,%%ymm4 \n" \ + "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" + +// Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV. +#define READYUY2_AVX2 \ + "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm4 \n" \ + "vpshufb %[kShuffleYUY2Y], %%ymm4, %%ymm4 \n" \ + "vmovdqu " MEMACCESS([yuy2_buf]) ",%%ymm0 \n" \ + "vpshufb %[kShuffleYUY2UV], %%ymm0, %%ymm0 \n" \ + "lea " MEMLEA(0x20, [yuy2_buf]) ",%[yuy2_buf] \n" + +// Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV. +#define READUYVY_AVX2 \ + "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm4 \n" \ + "vpshufb %[kShuffleUYVYY], %%ymm4, %%ymm4 \n" \ + "vmovdqu " MEMACCESS([uyvy_buf]) ",%%ymm0 \n" \ + "vpshufb %[kShuffleUYVYUV], %%ymm0, %%ymm0 \n" \ + "lea " MEMLEA(0x20, [uyvy_buf]) ",%[uyvy_buf] \n" + +#if defined(__x86_64__) +#define YUVTORGB_SETUP_AVX2(yuvconstants) \ + "vmovdqa " MEMACCESS([yuvconstants]) ",%%ymm8 \n" \ + "vmovdqa " MEMACCESS2(32, [yuvconstants]) ",%%ymm9 \n" \ + "vmovdqa " MEMACCESS2(64, [yuvconstants]) ",%%ymm10 \n" \ + "vmovdqa " MEMACCESS2(96, [yuvconstants]) ",%%ymm11 \n" \ + "vmovdqa " MEMACCESS2(128, [yuvconstants]) ",%%ymm12 \n" \ + "vmovdqa " MEMACCESS2(160, [yuvconstants]) ",%%ymm13 \n" \ + "vmovdqa " MEMACCESS2(192, [yuvconstants]) ",%%ymm14 \n" +#define YUVTORGB_AVX2(yuvconstants) \ + "vpmaddubsw %%ymm10,%%ymm0,%%ymm2 \n" \ + "vpmaddubsw %%ymm9,%%ymm0,%%ymm1 \n" \ + "vpmaddubsw %%ymm8,%%ymm0,%%ymm0 \n" \ + "vpsubw %%ymm2,%%ymm13,%%ymm2 \n" \ + "vpsubw %%ymm1,%%ymm12,%%ymm1 \n" \ + "vpsubw %%ymm0,%%ymm11,%%ymm0 \n" \ + "vpmulhuw %%ymm14,%%ymm4,%%ymm4 \n" \ + "vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \ + "vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \ + "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \ + "vpsraw $0x6,%%ymm0,%%ymm0 \n" \ + "vpsraw $0x6,%%ymm1,%%ymm1 \n" \ + "vpsraw $0x6,%%ymm2,%%ymm2 \n" \ + "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \ + "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \ + "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n" +#define YUVTORGB_REGS_AVX2 \ + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", +#else // Convert 16 pixels: 16 UV and 16 Y. +#define YUVTORGB_SETUP_AVX2(yuvconstants) +#define YUVTORGB_AVX2(yuvconstants) \ + "vpmaddubsw " MEMACCESS2(64, [yuvconstants]) ",%%ymm0,%%ymm2 \n" \ + "vpmaddubsw " MEMACCESS2(32, [yuvconstants]) ",%%ymm0,%%ymm1 \n" \ + "vpmaddubsw " MEMACCESS([yuvconstants]) ",%%ymm0,%%ymm0 \n" \ + "vmovdqu " MEMACCESS2(160, [yuvconstants]) ",%%ymm3 \n" \ "vpsubw %%ymm2,%%ymm3,%%ymm2 \n" \ - "vmovdqu " MEMACCESS2(128, [YuvConstants]) ",%%ymm3 \n" \ + "vmovdqu " MEMACCESS2(128, [yuvconstants]) ",%%ymm3 \n" \ "vpsubw %%ymm1,%%ymm3,%%ymm1 \n" \ - "vmovdqu " MEMACCESS2(96, [YuvConstants]) ",%%ymm3 \n" \ + "vmovdqu " MEMACCESS2(96, [yuvconstants]) ",%%ymm3 \n" \ "vpsubw %%ymm0,%%ymm3,%%ymm0 \n" \ - "vmovdqu " MEMACCESS([y_buf]) ",%%xmm3 \n" \ - "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" \ - "vpermq $0xd8,%%ymm3,%%ymm3 \n" \ - "vpunpcklbw %%ymm3,%%ymm3,%%ymm3 \n" \ - "vpmulhuw " MEMACCESS2(192, [YuvConstants]) ",%%ymm3,%%ymm3 \n" \ - "vpaddsw %%ymm3,%%ymm0,%%ymm0 \n" \ - "vpaddsw %%ymm3,%%ymm1,%%ymm1 \n" \ - "vpaddsw %%ymm3,%%ymm2,%%ymm2 \n" \ - "vpsraw $0x6,%%ymm0,%%ymm0 \n" \ - "vpsraw $0x6,%%ymm1,%%ymm1 \n" \ - "vpsraw $0x6,%%ymm2,%%ymm2 \n" \ - "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \ - "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \ - "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n" + "vpmulhuw " MEMACCESS2(192, [yuvconstants]) ",%%ymm4,%%ymm4 \n" \ + "vpaddsw %%ymm4,%%ymm0,%%ymm0 \n" \ + "vpaddsw %%ymm4,%%ymm1,%%ymm1 \n" \ + "vpaddsw %%ymm4,%%ymm2,%%ymm2 \n" \ + "vpsraw $0x6,%%ymm0,%%ymm0 \n" \ + "vpsraw $0x6,%%ymm1,%%ymm1 \n" \ + "vpsraw $0x6,%%ymm2,%%ymm2 \n" \ + "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \ + "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \ + "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n" +#define YUVTORGB_REGS_AVX2 +#endif -#if defined(HAS_I422TOBGRAROW_AVX2) +// Store 16 ARGB values. +#define STOREARGB_AVX2 \ + "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \ + "vpermq $0xd8,%%ymm0,%%ymm0 \n" \ + "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" \ + "vpermq $0xd8,%%ymm2,%%ymm2 \n" \ + "vpunpcklwd %%ymm2,%%ymm0,%%ymm1 \n" \ + "vpunpckhwd %%ymm2,%%ymm0,%%ymm0 \n" \ + "vmovdqu %%ymm1," MEMACCESS([dst_argb]) " \n" \ + "vmovdqu %%ymm0," MEMACCESS2(0x20, [dst_argb]) " \n" \ + "lea " MEMLEA(0x40, [dst_argb]) ", %[dst_argb] \n" + +#ifdef HAS_I444TOARGBROW_AVX2 // 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes). -void OMITFP I422ToBGRARow_AVX2(const uint8* y_buf, +// 16 UV values with 16 Y producing 16 ARGB (64 bytes). +void OMITFP I444ToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, - uint8* dst_bgra, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) "sub %[u_buf],%[v_buf] \n" - "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" LABELALIGN "1: \n" - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) - - // Step 3: Weave into BGRA - "vpunpcklbw %%ymm0,%%ymm1,%%ymm1 \n" // GB - "vpermq $0xd8,%%ymm1,%%ymm1 \n" - "vpunpcklbw %%ymm2,%%ymm5,%%ymm2 \n" // AR - "vpermq $0xd8,%%ymm2,%%ymm2 \n" - "vpunpcklwd %%ymm1,%%ymm2,%%ymm0 \n" // ARGB first 8 pixels - "vpunpckhwd %%ymm1,%%ymm2,%%ymm2 \n" // ARGB next 8 pixels - - "vmovdqu %%ymm0," MEMACCESS([dst_bgra]) "\n" - "vmovdqu %%ymm2," MEMACCESS2(0x20,[dst_bgra]) "\n" - "lea " MEMLEA(0x40,[dst_bgra]) ",%[dst_bgra] \n" + READYUV444_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 "sub $0x10,%[width] \n" "jg 1b \n" "vzeroupper \n" : [y_buf]"+r"(y_buf), // %[y_buf] [u_buf]"+r"(u_buf), // %[u_buf] [v_buf]"+r"(v_buf), // %[v_buf] - [dst_bgra]"+r"(dst_bgra), // %[dst_bgra] + [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -#endif // HAS_I422TOBGRAROW_AVX2 +#endif // HAS_I444TOARGBROW_AVX2 + +#ifdef HAS_I411TOARGBROW_AVX2 +// 16 pixels +// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). +void OMITFP I411ToARGBRow_AVX2(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) + "sub %[u_buf],%[v_buf] \n" + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + READYUV411_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "sub $0x10,%[width] \n" + "jg 1b \n" + "vzeroupper \n" + : [y_buf]"+r"(y_buf), // %[y_buf] + [u_buf]"+r"(u_buf), // %[u_buf] + [v_buf]"+r"(v_buf), // %[v_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_I411TOARGBROW_AVX2 #if defined(HAS_I422TOARGBROW_AVX2) // 16 pixels @@ -2040,26 +2203,17 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) "sub %[u_buf],%[v_buf] \n" - "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" LABELALIGN "1: \n" READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) - - // Step 3: Weave into ARGB - "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" // BG - "vpermq $0xd8,%%ymm0,%%ymm0 \n" - "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" // RA - "vpermq $0xd8,%%ymm2,%%ymm2 \n" - "vpunpcklwd %%ymm2,%%ymm0,%%ymm1 \n" // BGRA first 8 pixels - "vpunpckhwd %%ymm2,%%ymm0,%%ymm0 \n" // BGRA next 8 pixels - - "vmovdqu %%ymm1," MEMACCESS([dst_argb]) "\n" - "vmovdqu %%ymm0," MEMACCESS2(0x20,[dst_argb]) "\n" - "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n" + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 "sub $0x10,%[width] \n" "jg 1b \n" "vzeroupper \n" @@ -2068,95 +2222,50 @@ void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf, [v_buf]"+r"(v_buf), // %[v_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } #endif // HAS_I422TOARGBROW_AVX2 -#if defined(HAS_J422TOARGBROW_AVX2) +#if defined(HAS_I422ALPHATOARGBROW_AVX2) // 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). -void OMITFP J422ToARGBRow_AVX2(const uint8* y_buf, +// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB. +void OMITFP I422AlphaToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, + const uint8* a_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) "sub %[u_buf],%[v_buf] \n" - "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" LABELALIGN "1: \n" - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) - - // Step 3: Weave into ARGB - "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" // BG - "vpermq $0xd8,%%ymm0,%%ymm0 \n" - "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" // RA - "vpermq $0xd8,%%ymm2,%%ymm2 \n" - "vpunpcklwd %%ymm2,%%ymm0,%%ymm1 \n" // BGRA first 8 pixels - "vpunpckhwd %%ymm2,%%ymm0,%%ymm0 \n" // BGRA next 8 pixels - - "vmovdqu %%ymm1," MEMACCESS([dst_argb]) "\n" - "vmovdqu %%ymm0," MEMACCESS2(0x20,[dst_argb]) "\n" - "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n" - "sub $0x10,%[width] \n" + READYUVA422_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "subl $0x10,%[width] \n" "jg 1b \n" "vzeroupper \n" : [y_buf]"+r"(y_buf), // %[y_buf] [u_buf]"+r"(u_buf), // %[u_buf] [v_buf]"+r"(v_buf), // %[v_buf] + [a_buf]"+r"(a_buf), // %[a_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] +#if defined(__i386__) && defined(__pic__) + [width]"+m"(width) // %[width] +#else [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvJConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" +#endif + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } -#endif // HAS_J422TOARGBROW_AVX2 - -#if defined(HAS_I422TOABGRROW_AVX2) -// 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes). -void OMITFP I422ToABGRRow_AVX2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_argb, - int width) { - asm volatile ( - "sub %[u_buf],%[v_buf] \n" - "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" - LABELALIGN - "1: \n" - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) - - // Step 3: Weave into ABGR - "vpunpcklbw %%ymm1,%%ymm2,%%ymm1 \n" // RG - "vpermq $0xd8,%%ymm1,%%ymm1 \n" - "vpunpcklbw %%ymm5,%%ymm0,%%ymm2 \n" // BA - "vpermq $0xd8,%%ymm2,%%ymm2 \n" - "vpunpcklwd %%ymm2,%%ymm1,%%ymm0 \n" // RGBA first 8 pixels - "vpunpckhwd %%ymm2,%%ymm1,%%ymm1 \n" // RGBA next 8 pixels - "vmovdqu %%ymm0," MEMACCESS([dst_argb]) "\n" - "vmovdqu %%ymm1," MEMACCESS2(0x20,[dst_argb]) "\n" - "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n" - "sub $0x10,%[width] \n" - "jg 1b \n" - "vzeroupper \n" - : [y_buf]"+r"(y_buf), // %[y_buf] - [u_buf]"+r"(u_buf), // %[u_buf] - [v_buf]"+r"(v_buf), // %[v_buf] - [dst_argb]"+r"(dst_argb), // %[dst_argb] - [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" - ); -} -#endif // HAS_I422TOABGRROW_AVX2 +#endif // HAS_I422ALPHATOARGBROW_AVX2 #if defined(HAS_I422TORGBAROW_AVX2) // 16 pixels @@ -2165,14 +2274,16 @@ void OMITFP I422ToRGBARow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) "sub %[u_buf],%[v_buf] \n" "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" LABELALIGN "1: \n" READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(yuvconstants) // Step 3: Weave into RGBA "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n" @@ -2192,13 +2303,134 @@ void OMITFP I422ToRGBARow_AVX2(const uint8* y_buf, [v_buf]"+r"(v_buf), // %[v_buf] [dst_argb]"+r"(dst_argb), // %[dst_argb] [width]"+rm"(width) // %[width] - : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants] - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", NACL_R14 YUVTORGB_REGS_AVX2 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } #endif // HAS_I422TORGBAROW_AVX2 +#if defined(HAS_NV12TOARGBROW_AVX2) +// 16 pixels. +// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). +void OMITFP NV12ToARGBRow_AVX2(const uint8* y_buf, + const uint8* uv_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + READNV12_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "sub $0x10,%[width] \n" + "jg 1b \n" + "vzeroupper \n" + : [y_buf]"+r"(y_buf), // %[y_buf] + [uv_buf]"+r"(uv_buf), // %[uv_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants) // %[yuvconstants] + : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. + "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_NV12TOARGBROW_AVX2 + +#if defined(HAS_NV21TOARGBROW_AVX2) +// 16 pixels. +// 8 VU values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). +void OMITFP NV21ToARGBRow_AVX2(const uint8* y_buf, + const uint8* vu_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + READNV21_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "sub $0x10,%[width] \n" + "jg 1b \n" + "vzeroupper \n" + : [y_buf]"+r"(y_buf), // %[y_buf] + [vu_buf]"+r"(vu_buf), // %[vu_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleNV21]"m"(kShuffleNV21) + : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. + "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_NV21TOARGBROW_AVX2 + +#if defined(HAS_YUY2TOARGBROW_AVX2) +// 16 pixels. +// 8 YUY2 values with 16 Y and 8 UV producing 16 ARGB (64 bytes). +void OMITFP YUY2ToARGBRow_AVX2(const uint8* yuy2_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + READYUY2_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "sub $0x10,%[width] \n" + "jg 1b \n" + "vzeroupper \n" + : [yuy2_buf]"+r"(yuy2_buf), // %[yuy2_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleYUY2Y]"m"(kShuffleYUY2Y), + [kShuffleYUY2UV]"m"(kShuffleYUY2UV) + : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. + "xmm0", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_YUY2TOARGBROW_AVX2 + +#if defined(HAS_UYVYTOARGBROW_AVX2) +// 16 pixels. +// 8 UYVY values with 16 Y and 8 UV producing 16 ARGB (64 bytes). +void OMITFP UYVYToARGBRow_AVX2(const uint8* uyvy_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP_AVX2(yuvconstants) + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + READUYVY_AVX2 + YUVTORGB_AVX2(yuvconstants) + STOREARGB_AVX2 + "sub $0x10,%[width] \n" + "jg 1b \n" + "vzeroupper \n" + : [uyvy_buf]"+r"(uyvy_buf), // %[uyvy_buf] + [dst_argb]"+r"(dst_argb), // %[dst_argb] + [width]"+rm"(width) // %[width] + : [yuvconstants]"r"(yuvconstants), // %[yuvconstants] + [kShuffleUYVYY]"m"(kShuffleUYVYY), + [kShuffleUYVYUV]"m"(kShuffleUYVYUV) + : "memory", "cc", YUVTORGB_REGS_AVX2 // Does not use r14. + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_UYVYTOARGBROW_AVX2 + #ifdef HAS_I400TOARGBROW_SSE2 void I400ToARGBRow_SSE2(const uint8* y_buf, uint8* dst_argb, int width) { asm volatile ( @@ -2344,35 +2576,7 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) { } #endif // HAS_MIRRORROW_AVX2 -#ifdef HAS_MIRRORROW_SSE2 -void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) { - intptr_t temp_width = (intptr_t)(width); - asm volatile ( - LABELALIGN - "1: \n" - MEMOPREG(movdqu,-0x10,0,2,1,xmm0) // movdqu -0x10(%0,%2),%%xmm0 - "movdqa %%xmm0,%%xmm1 \n" - "psllw $0x8,%%xmm0 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm1,%%xmm0 \n" - "pshuflw $0x1b,%%xmm0,%%xmm0 \n" - "pshufhw $0x1b,%%xmm0,%%xmm0 \n" - "pshufd $0x4e,%%xmm0,%%xmm0 \n" - "movdqu %%xmm0," MEMACCESS(1) " \n" - "lea " MEMLEA(0x10,1)",%1 \n" - "sub $0x10,%2 \n" - "jg 1b \n" - : "+r"(src), // %0 - "+r"(dst), // %1 - "+r"(temp_width) // %2 - : - : "memory", "cc", NACL_R14 - "xmm0", "xmm1" - ); -} -#endif // HAS_MIRRORROW_SSE2 - -#ifdef HAS_MIRRORROW_UV_SSSE3 +#ifdef HAS_MIRRORUVROW_SSSE3 // Shuffle table for reversing the bytes of UV channels. static uvec8 kShuffleMirrorUV = { 14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u @@ -2403,7 +2607,7 @@ void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v, "xmm0", "xmm1" ); } -#endif // HAS_MIRRORROW_UV_SSSE3 +#endif // HAS_MIRRORUVROW_SSSE3 #ifdef HAS_ARGBMIRRORROW_SSE2 @@ -2458,7 +2662,8 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) { #endif // HAS_ARGBMIRRORROW_AVX2 #ifdef HAS_SPLITUVROW_AVX2 -void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { +void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -2485,7 +2690,7 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { : "+r"(src_uv), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" @@ -2494,7 +2699,8 @@ void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { #endif // HAS_SPLITUVROW_AVX2 #ifdef HAS_SPLITUVROW_SSE2 -void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { +void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -2520,7 +2726,7 @@ void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { : "+r"(src_uv), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" @@ -2591,8 +2797,23 @@ void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv, #ifdef HAS_COPYROW_SSE2 void CopyRow_SSE2(const uint8* src, uint8* dst, int count) { asm volatile ( + "test $0xf,%0 \n" + "jne 2f \n" + "test $0xf,%1 \n" + "jne 2f \n" LABELALIGN "1: \n" + "movdqa " MEMACCESS(0) ",%%xmm0 \n" + "movdqa " MEMACCESS2(0x10,0) ",%%xmm1 \n" + "lea " MEMLEA(0x20,0) ",%0 \n" + "movdqa %%xmm0," MEMACCESS(1) " \n" + "movdqa %%xmm1," MEMACCESS2(0x10,1) " \n" + "lea " MEMLEA(0x20,1) ",%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "jmp 9f \n" + LABELALIGN + "2: \n" "movdqu " MEMACCESS(0) ",%%xmm0 \n" "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" "lea " MEMLEA(0x20,0) ",%0 \n" @@ -2600,7 +2821,8 @@ void CopyRow_SSE2(const uint8* src, uint8* dst, int count) { "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n" "lea " MEMLEA(0x20,1) ",%1 \n" "sub $0x20,%2 \n" - "jg 1b \n" + "jg 2b \n" + "9: \n" : "+r"(src), // %0 "+r"(dst), // %1 "+r"(count) // %2 @@ -2714,6 +2936,33 @@ void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) { } #endif // HAS_ARGBCOPYALPHAROW_AVX2 +#ifdef HAS_ARGBEXTRACTALPHAROW_SSE2 +// width in pixels +void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) { + asm volatile ( + LABELALIGN + "1: \n" + "movdqu " MEMACCESS(0) ", %%xmm0 \n" + "movdqu " MEMACCESS2(0x10, 0) ", %%xmm1 \n" + "lea " MEMLEA(0x20, 0) ", %0 \n" + "psrld $0x18, %%xmm0 \n" + "psrld $0x18, %%xmm1 \n" + "packssdw %%xmm1, %%xmm0 \n" + "packuswb %%xmm0, %%xmm0 \n" + "movq %%xmm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x8, 1) ", %1 \n" + "sub $0x8, %2 \n" + "jg 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_a), // %1 + "+rm"(width) // %2 + : + : "memory", "cc" + , "xmm0", "xmm1" + ); +} +#endif // HAS_ARGBEXTRACTALPHAROW_SSE2 + #ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2 // width in pixels void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) { @@ -2786,7 +3035,7 @@ void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) { #ifdef HAS_SETROW_X86 void SetRow_X86(uint8* dst, uint8 v8, int width) { size_t width_tmp = (size_t)(width >> 2); - const uint32 v32 = v8 * 0x01010101; // Duplicate byte to all bytes. + const uint32 v32 = v8 * 0x01010101u; // Duplicate byte to all bytes. asm volatile ( "rep stosl " MEMSTORESTRING(eax,0) " \n" : "+D"(dst), // %0 @@ -2817,7 +3066,7 @@ void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int width) { #endif // HAS_SETROW_X86 #ifdef HAS_YUY2TOYROW_SSE2 -void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix) { +void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -2835,7 +3084,7 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_yuy2), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc" , "xmm0", "xmm1", "xmm5" @@ -2843,7 +3092,7 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix) { } void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -2873,7 +3122,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : "r"((intptr_t)(stride_yuy2)) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" @@ -2881,7 +3130,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, } void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -2907,14 +3156,14 @@ void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" ); } -void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix) { +void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int width) { asm volatile ( LABELALIGN "1: \n" @@ -2930,7 +3179,7 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix) { "jg 1b \n" : "+r"(src_uyvy), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc" , "xmm0", "xmm1" @@ -2938,7 +3187,7 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix) { } void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -2968,7 +3217,7 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : "r"((intptr_t)(stride_uyvy)) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" @@ -2976,7 +3225,7 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, } void UYVYToUV422Row_SSE2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" "psrlw $0x8,%%xmm5 \n" @@ -3002,7 +3251,7 @@ void UYVYToUV422Row_SSE2(const uint8* src_uyvy, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" @@ -3011,7 +3260,7 @@ void UYVYToUV422Row_SSE2(const uint8* src_uyvy, #endif // HAS_YUY2TOYROW_SSE2 #ifdef HAS_YUY2TOYROW_AVX2 -void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix) { +void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -3031,7 +3280,7 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix) { "vzeroupper \n" : "+r"(src_yuy2), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc" , "xmm0", "xmm1", "xmm5" @@ -3039,7 +3288,7 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix) { } void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -3070,7 +3319,7 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : "r"((intptr_t)(stride_yuy2)) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" @@ -3078,7 +3327,7 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, } void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -3107,14 +3356,14 @@ void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" ); } -void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix) { +void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int width) { asm volatile ( LABELALIGN "1: \n" @@ -3132,14 +3381,14 @@ void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix) { "vzeroupper \n" : "+r"(src_uyvy), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "memory", "cc" , "xmm0", "xmm1", "xmm5" ); } void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -3171,7 +3420,7 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : "r"((intptr_t)(stride_uyvy)) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" @@ -3179,7 +3428,7 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, } void UYVYToUV422Row_AVX2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" "vpsrlw $0x8,%%ymm5,%%ymm5 \n" @@ -3208,7 +3457,7 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" @@ -3216,92 +3465,6 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy, } #endif // HAS_YUY2TOYROW_AVX2 -#ifdef HAS_ARGBBLENDROW_SSE2 -// Blend 8 pixels at a time. -void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { - asm volatile ( - "pcmpeqb %%xmm7,%%xmm7 \n" - "psrlw $0xf,%%xmm7 \n" - "pcmpeqb %%xmm6,%%xmm6 \n" - "psrlw $0x8,%%xmm6 \n" - "pcmpeqb %%xmm5,%%xmm5 \n" - "psllw $0x8,%%xmm5 \n" - "pcmpeqb %%xmm4,%%xmm4 \n" - "pslld $0x18,%%xmm4 \n" - "sub $0x4,%3 \n" - "jl 49f \n" - - // 4 pixel loop. - LABELALIGN - "41: \n" - "movdqu " MEMACCESS(0) ",%%xmm3 \n" - "lea " MEMLEA(0x10,0) ",%0 \n" - "movdqa %%xmm3,%%xmm0 \n" - "pxor %%xmm4,%%xmm3 \n" - "movdqu " MEMACCESS(1) ",%%xmm2 \n" - "psrlw $0x8,%%xmm3 \n" - "pshufhw $0xf5,%%xmm3,%%xmm3 \n" - "pshuflw $0xf5,%%xmm3,%%xmm3 \n" - "pand %%xmm6,%%xmm2 \n" - "paddw %%xmm7,%%xmm3 \n" - "pmullw %%xmm3,%%xmm2 \n" - "movdqu " MEMACCESS(1) ",%%xmm1 \n" - "lea " MEMLEA(0x10,1) ",%1 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm4,%%xmm0 \n" - "pmullw %%xmm3,%%xmm1 \n" - "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" - "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" - "movdqu %%xmm0," MEMACCESS(2) " \n" - "lea " MEMLEA(0x10,2) ",%2 \n" - "sub $0x4,%3 \n" - "jge 41b \n" - - "49: \n" - "add $0x3,%3 \n" - "jl 99f \n" - - // 1 pixel loop. - "91: \n" - "movd " MEMACCESS(0) ",%%xmm3 \n" - "lea " MEMLEA(0x4,0) ",%0 \n" - "movdqa %%xmm3,%%xmm0 \n" - "pxor %%xmm4,%%xmm3 \n" - "movd " MEMACCESS(1) ",%%xmm2 \n" - "psrlw $0x8,%%xmm3 \n" - "pshufhw $0xf5,%%xmm3,%%xmm3 \n" - "pshuflw $0xf5,%%xmm3,%%xmm3 \n" - "pand %%xmm6,%%xmm2 \n" - "paddw %%xmm7,%%xmm3 \n" - "pmullw %%xmm3,%%xmm2 \n" - "movd " MEMACCESS(1) ",%%xmm1 \n" - "lea " MEMLEA(0x4,1) ",%1 \n" - "psrlw $0x8,%%xmm1 \n" - "por %%xmm4,%%xmm0 \n" - "pmullw %%xmm3,%%xmm1 \n" - "psrlw $0x8,%%xmm2 \n" - "paddusb %%xmm2,%%xmm0 \n" - "pand %%xmm5,%%xmm1 \n" - "paddusb %%xmm1,%%xmm0 \n" - "movd %%xmm0," MEMACCESS(2) " \n" - "lea " MEMLEA(0x4,2) ",%2 \n" - "sub $0x1,%3 \n" - "jge 91b \n" - "99: \n" - : "+r"(src_argb0), // %0 - "+r"(src_argb1), // %1 - "+r"(dst_argb), // %2 - "+r"(width) // %3 - : - : "memory", "cc" - , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" - ); -} -#endif // HAS_ARGBBLENDROW_SSE2 - #ifdef HAS_ARGBBLENDROW_SSSE3 // Shuffle table for isolating alpha. static uvec8 kShuffleAlpha = { @@ -3310,15 +3473,6 @@ static uvec8 kShuffleAlpha = { }; // Blend 8 pixels at a time -// Shuffle table for reversing the bytes. - -// Same as SSE2, but replaces -// psrlw xmm3, 8 // alpha -// pshufhw xmm3, xmm3,0F5h // 8 alpha words -// pshuflw xmm3, xmm3,0F5h -// with.. -// pshufb xmm3, kShuffleAlpha // alpha - void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -3399,49 +3553,112 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, } #endif // HAS_ARGBBLENDROW_SSSE3 -#ifdef HAS_ARGBATTENUATEROW_SSE2 -// Attenuate 4 pixels at a time. -void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) { +#ifdef HAS_BLENDPLANEROW_SSSE3 +// Blend 8 pixels at a time. +// unsigned version of math +// =((A2*C2)+(B2*(255-C2))+255)/256 +// signed version of math +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 +void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) { asm volatile ( - "pcmpeqb %%xmm4,%%xmm4 \n" - "pslld $0x18,%%xmm4 \n" - "pcmpeqb %%xmm5,%%xmm5 \n" - "psrld $0x8,%%xmm5 \n" + "pcmpeqb %%xmm5,%%xmm5 \n" + "psllw $0x8,%%xmm5 \n" + "mov $0x80808080,%%eax \n" + "movd %%eax,%%xmm6 \n" + "pshufd $0x0,%%xmm6,%%xmm6 \n" + "mov $0x807f807f,%%eax \n" + "movd %%eax,%%xmm7 \n" + "pshufd $0x0,%%xmm7,%%xmm7 \n" + "sub %2,%0 \n" + "sub %2,%1 \n" + "sub %2,%3 \n" - // 4 pixel loop. + // 8 pixel loop. LABELALIGN "1: \n" - "movdqu " MEMACCESS(0) ",%%xmm0 \n" - "punpcklbw %%xmm0,%%xmm0 \n" - "pshufhw $0xff,%%xmm0,%%xmm2 \n" - "pshuflw $0xff,%%xmm2,%%xmm2 \n" - "pmulhuw %%xmm2,%%xmm0 \n" - "movdqu " MEMACCESS(0) ",%%xmm1 \n" - "punpckhbw %%xmm1,%%xmm1 \n" - "pshufhw $0xff,%%xmm1,%%xmm2 \n" - "pshuflw $0xff,%%xmm2,%%xmm2 \n" - "pmulhuw %%xmm2,%%xmm1 \n" - "movdqu " MEMACCESS(0) ",%%xmm2 \n" - "lea " MEMLEA(0x10,0) ",%0 \n" - "psrlw $0x8,%%xmm0 \n" - "pand %%xmm4,%%xmm2 \n" - "psrlw $0x8,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" - "pand %%xmm5,%%xmm0 \n" - "por %%xmm2,%%xmm0 \n" - "movdqu %%xmm0," MEMACCESS(1) " \n" - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x4,%2 \n" + "movq (%2),%%xmm0 \n" + "punpcklbw %%xmm0,%%xmm0 \n" + "pxor %%xmm5,%%xmm0 \n" + "movq (%0,%2,1),%%xmm1 \n" + "movq (%1,%2,1),%%xmm2 \n" + "punpcklbw %%xmm2,%%xmm1 \n" + "psubb %%xmm6,%%xmm1 \n" + "pmaddubsw %%xmm1,%%xmm0 \n" + "paddw %%xmm7,%%xmm0 \n" + "psrlw $0x8,%%xmm0 \n" + "packuswb %%xmm0,%%xmm0 \n" + "movq %%xmm0,(%3,%2,1) \n" + "lea 0x8(%2),%2 \n" + "sub $0x8,%4 \n" "jg 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_argb), // %1 - "+r"(width) // %2 - : - : "memory", "cc" - , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + : "+r"(src0), // %0 + "+r"(src1), // %1 + "+r"(alpha), // %2 + "+r"(dst), // %3 + "+rm"(width) // %4 + :: "memory", "cc", "eax", "xmm0", "xmm1", "xmm2", "xmm5", "xmm6", "xmm7" ); } -#endif // HAS_ARGBATTENUATEROW_SSE2 +#endif // HAS_BLENDPLANEROW_SSSE3 + +#ifdef HAS_BLENDPLANEROW_AVX2 +// Blend 32 pixels at a time. +// unsigned version of math +// =((A2*C2)+(B2*(255-C2))+255)/256 +// signed version of math +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 +void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) { + asm volatile ( + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + "vpsllw $0x8,%%ymm5,%%ymm5 \n" + "mov $0x80808080,%%eax \n" + "vmovd %%eax,%%xmm6 \n" + "vbroadcastss %%xmm6,%%ymm6 \n" + "mov $0x807f807f,%%eax \n" + "vmovd %%eax,%%xmm7 \n" + "vbroadcastss %%xmm7,%%ymm7 \n" + "sub %2,%0 \n" + "sub %2,%1 \n" + "sub %2,%3 \n" + + // 32 pixel loop. + LABELALIGN + "1: \n" + "vmovdqu (%2),%%ymm0 \n" + "vpunpckhbw %%ymm0,%%ymm0,%%ymm3 \n" + "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n" + "vpxor %%ymm5,%%ymm3,%%ymm3 \n" + "vpxor %%ymm5,%%ymm0,%%ymm0 \n" + "vmovdqu (%0,%2,1),%%ymm1 \n" + "vmovdqu (%1,%2,1),%%ymm2 \n" + "vpunpckhbw %%ymm2,%%ymm1,%%ymm4 \n" + "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n" + "vpsubb %%ymm6,%%ymm4,%%ymm4 \n" + "vpsubb %%ymm6,%%ymm1,%%ymm1 \n" + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" + "vpmaddubsw %%ymm1,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm7,%%ymm3,%%ymm3 \n" + "vpaddw %%ymm7,%%ymm0,%%ymm0 \n" + "vpsrlw $0x8,%%ymm3,%%ymm3 \n" + "vpsrlw $0x8,%%ymm0,%%ymm0 \n" + "vpackuswb %%ymm3,%%ymm0,%%ymm0 \n" + "vmovdqu %%ymm0,(%3,%2,1) \n" + "lea 0x20(%2),%2 \n" + "sub $0x20,%4 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src0), // %0 + "+r"(src1), // %1 + "+r"(alpha), // %2 + "+r"(dst), // %3 + "+rm"(width) // %4 + :: "memory", "cc", "eax", + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" + ); +} +#endif // HAS_BLENDPLANEROW_AVX2 #ifdef HAS_ARGBATTENUATEROW_SSSE3 // Shuffle table duplicating alpha @@ -3542,7 +3759,7 @@ void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { // Unattenuate 4 pixels at a time. void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) { - uintptr_t alpha = 0; + uintptr_t alpha; asm volatile ( // 4 pixel loop. LABELALIGN @@ -3573,10 +3790,10 @@ void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, "lea " MEMLEA(0x10,1) ",%1 \n" "sub $0x4,%2 \n" "jg 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_argb), // %1 - "+r"(width), // %2 - "+r"(alpha) // %3 + : "+r"(src_argb), // %0 + "+r"(dst_argb), // %1 + "+r"(width), // %2 + "=&r"(alpha) // %3 : "r"(fixed_invtbl8) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" @@ -3592,7 +3809,7 @@ static const uvec8 kUnattenShuffleAlpha_AVX2 = { // Unattenuate 8 pixels at a time. void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { - uintptr_t alpha = 0; + uintptr_t alpha; asm volatile ( "sub %0,%1 \n" "vbroadcastf128 %5,%%ymm5 \n" @@ -3641,10 +3858,10 @@ void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, "sub $0x8,%2 \n" "jg 1b \n" "vzeroupper \n" - : "+r"(src_argb), // %0 - "+r"(dst_argb), // %1 - "+r"(width), // %2 - "+r"(alpha) // %3 + : "+r"(src_argb), // %0 + "+r"(dst_argb), // %1 + "+r"(width), // %2 + "=&r"(alpha) // %3 : "r"(fixed_invtbl8), // %4 "m"(kUnattenShuffleAlpha_AVX2) // %5 : "memory", "cc", NACL_R14 @@ -4569,7 +4786,7 @@ LIBYUV_API void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride, uint8* dst_argb, const float* src_dudv, int width) { intptr_t src_argb_stride_temp = src_argb_stride; - intptr_t temp = 0; + intptr_t temp; asm volatile ( "movq " MEMACCESS(3) ",%%xmm2 \n" "movq " MEMACCESS2(0x08,3) ",%%xmm7 \n" @@ -4641,7 +4858,7 @@ void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride, "+r"(dst_argb), // %2 "+r"(src_dudv), // %3 "+rm"(width), // %4 - "+r"(temp) // %5 + "=&r"(temp) // %5 : : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" @@ -4656,56 +4873,47 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, int source_y_fraction) { asm volatile ( "sub %1,%0 \n" - "shr %3 \n" "cmp $0x0,%3 \n" "je 100f \n" - "cmp $0x20,%3 \n" - "je 75f \n" - "cmp $0x40,%3 \n" + "cmp $0x80,%3 \n" "je 50f \n" - "cmp $0x60,%3 \n" - "je 25f \n" "movd %3,%%xmm0 \n" "neg %3 \n" - "add $0x80,%3 \n" + "add $0x100,%3 \n" "movd %3,%%xmm5 \n" "punpcklbw %%xmm0,%%xmm5 \n" "punpcklwd %%xmm5,%%xmm5 \n" "pshufd $0x0,%%xmm5,%%xmm5 \n" + "mov $0x80808080,%%eax \n" + "movd %%eax,%%xmm4 \n" + "pshufd $0x0,%%xmm4,%%xmm4 \n" // General purpose row blend. LABELALIGN "1: \n" "movdqu " MEMACCESS(1) ",%%xmm0 \n" MEMOPREG(movdqu,0x00,1,4,1,xmm2) - "movdqa %%xmm0,%%xmm1 \n" - "punpcklbw %%xmm2,%%xmm0 \n" - "punpckhbw %%xmm2,%%xmm1 \n" - "pmaddubsw %%xmm5,%%xmm0 \n" - "pmaddubsw %%xmm5,%%xmm1 \n" - "psrlw $0x7,%%xmm0 \n" - "psrlw $0x7,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) + "movdqa %%xmm0,%%xmm1 \n" + "punpcklbw %%xmm2,%%xmm0 \n" + "punpckhbw %%xmm2,%%xmm1 \n" + "psubb %%xmm4,%%xmm0 \n" + "psubb %%xmm4,%%xmm1 \n" + "movdqa %%xmm5,%%xmm2 \n" + "movdqa %%xmm5,%%xmm3 \n" + "pmaddubsw %%xmm0,%%xmm2 \n" + "pmaddubsw %%xmm1,%%xmm3 \n" + "paddw %%xmm4,%%xmm2 \n" + "paddw %%xmm4,%%xmm3 \n" + "psrlw $0x8,%%xmm2 \n" + "psrlw $0x8,%%xmm3 \n" + "packuswb %%xmm3,%%xmm2 \n" + MEMOPMEM(movdqu,xmm2,0x00,1,0,1) "lea " MEMLEA(0x10,1) ",%1 \n" "sub $0x10,%2 \n" "jg 1b \n" "jmp 99f \n" - // Blend 25 / 75. - LABELALIGN - "25: \n" - "movdqu " MEMACCESS(1) ",%%xmm0 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm1) - "pavgb %%xmm1,%%xmm0 \n" - "pavgb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 25b \n" - "jmp 99f \n" - // Blend 50 / 50. LABELALIGN "50: \n" @@ -4718,19 +4926,6 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, "jg 50b \n" "jmp 99f \n" - // Blend 75 / 25. - LABELALIGN - "75: \n" - "movdqu " MEMACCESS(1) ",%%xmm1 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm0) - "pavgb %%xmm1,%%xmm0 \n" - "pavgb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 75b \n" - "jmp 99f \n" - // Blend 100 / 0 - Copy row unchanged. LABELALIGN "100: \n" @@ -4741,13 +4936,13 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, "jg 100b \n" "99: \n" - : "+r"(dst_ptr), // %0 - "+r"(src_ptr), // %1 - "+r"(dst_width), // %2 + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "+rm"(dst_width), // %2 "+r"(source_y_fraction) // %3 : "r"((intptr_t)(src_stride)) // %4 - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm5" + : "memory", "cc", "eax", NACL_R14 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } #endif // HAS_INTERPOLATEROW_SSSE3 @@ -4758,25 +4953,22 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) { asm volatile ( - "shr %3 \n" "cmp $0x0,%3 \n" "je 100f \n" "sub %1,%0 \n" - "cmp $0x20,%3 \n" - "je 75f \n" - "cmp $0x40,%3 \n" + "cmp $0x80,%3 \n" "je 50f \n" - "cmp $0x60,%3 \n" - "je 25f \n" "vmovd %3,%%xmm0 \n" "neg %3 \n" - "add $0x80,%3 \n" + "add $0x100,%3 \n" "vmovd %3,%%xmm5 \n" "vpunpcklbw %%xmm0,%%xmm5,%%xmm5 \n" "vpunpcklwd %%xmm5,%%xmm5,%%xmm5 \n" - "vpxor %%ymm0,%%ymm0,%%ymm0 \n" - "vpermd %%ymm5,%%ymm0,%%ymm5 \n" + "vbroadcastss %%xmm5,%%ymm5 \n" + "mov $0x80808080,%%eax \n" + "vmovd %%eax,%%xmm4 \n" + "vbroadcastss %%xmm4,%%ymm4 \n" // General purpose row blend. LABELALIGN @@ -4785,10 +4977,14 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, MEMOPREG(vmovdqu,0x00,1,4,1,ymm2) "vpunpckhbw %%ymm2,%%ymm0,%%ymm1 \n" "vpunpcklbw %%ymm2,%%ymm0,%%ymm0 \n" - "vpmaddubsw %%ymm5,%%ymm0,%%ymm0 \n" - "vpmaddubsw %%ymm5,%%ymm1,%%ymm1 \n" - "vpsrlw $0x7,%%ymm0,%%ymm0 \n" - "vpsrlw $0x7,%%ymm1,%%ymm1 \n" + "vpsubb %%ymm4,%%ymm1,%%ymm1 \n" + "vpsubb %%ymm4,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm1,%%ymm5,%%ymm1 \n" + "vpmaddubsw %%ymm0,%%ymm5,%%ymm0 \n" + "vpaddw %%ymm4,%%ymm1,%%ymm1 \n" + "vpaddw %%ymm4,%%ymm0,%%ymm0 \n" + "vpsrlw $0x8,%%ymm1,%%ymm1 \n" + "vpsrlw $0x8,%%ymm0,%%ymm0 \n" "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1) "lea " MEMLEA(0x20,1) ",%1 \n" @@ -4796,19 +4992,6 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, "jg 1b \n" "jmp 99f \n" - // Blend 25 / 75. - LABELALIGN - "25: \n" - "vmovdqu " MEMACCESS(1) ",%%ymm0 \n" - MEMOPREG(vmovdqu,0x00,1,4,1,ymm1) - "vpavgb %%ymm1,%%ymm0,%%ymm0 \n" - "vpavgb %%ymm1,%%ymm0,%%ymm0 \n" - MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1) - "lea " MEMLEA(0x20,1) ",%1 \n" - "sub $0x20,%2 \n" - "jg 25b \n" - "jmp 99f \n" - // Blend 50 / 50. LABELALIGN "50: \n" @@ -4820,19 +5003,6 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, "jg 50b \n" "jmp 99f \n" - // Blend 75 / 25. - LABELALIGN - "75: \n" - "vmovdqu " MEMACCESS(1) ",%%ymm1 \n" - MEMOPREG(vmovdqu,0x00,1,4,1,ymm0) - "vpavgb %%ymm1,%%ymm0,%%ymm0 \n" - "vpavgb %%ymm1,%%ymm0,%%ymm0 \n" - MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1) - "lea " MEMLEA(0x20,1) ",%1 \n" - "sub $0x20,%2 \n" - "jg 75b \n" - "jmp 99f \n" - // Blend 100 / 0 - Copy row unchanged. LABELALIGN "100: \n" @@ -4844,130 +5014,19 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, "999: \n" : "+D"(dst_ptr), // %0 "+S"(src_ptr), // %1 - "+c"(dst_width), // %2 + "+cm"(dst_width), // %2 "+r"(source_y_fraction) // %3 : "r"((intptr_t)(src_stride)) // %4 - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm5" + : "memory", "cc", "eax", NACL_R14 + "xmm0", "xmm1", "xmm2", "xmm4", "xmm5" ); } #endif // HAS_INTERPOLATEROW_AVX2 -#ifdef HAS_INTERPOLATEROW_SSE2 -// Bilinear filter 16x2 -> 16x1 -void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride, int dst_width, - int source_y_fraction) { - asm volatile ( - "sub %1,%0 \n" - "shr %3 \n" - "cmp $0x0,%3 \n" - "je 100f \n" - "cmp $0x20,%3 \n" - "je 75f \n" - "cmp $0x40,%3 \n" - "je 50f \n" - "cmp $0x60,%3 \n" - "je 25f \n" - - "movd %3,%%xmm0 \n" - "neg %3 \n" - "add $0x80,%3 \n" - "movd %3,%%xmm5 \n" - "punpcklbw %%xmm0,%%xmm5 \n" - "punpcklwd %%xmm5,%%xmm5 \n" - "pshufd $0x0,%%xmm5,%%xmm5 \n" - "pxor %%xmm4,%%xmm4 \n" - - // General purpose row blend. - LABELALIGN - "1: \n" - "movdqu " MEMACCESS(1) ",%%xmm0 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm2) // movdqu (%1,%4,1),%%xmm2 - "movdqa %%xmm0,%%xmm1 \n" - "movdqa %%xmm2,%%xmm3 \n" - "punpcklbw %%xmm4,%%xmm2 \n" - "punpckhbw %%xmm4,%%xmm3 \n" - "punpcklbw %%xmm4,%%xmm0 \n" - "punpckhbw %%xmm4,%%xmm1 \n" - "psubw %%xmm0,%%xmm2 \n" - "psubw %%xmm1,%%xmm3 \n" - "paddw %%xmm2,%%xmm2 \n" - "paddw %%xmm3,%%xmm3 \n" - "pmulhw %%xmm5,%%xmm2 \n" - "pmulhw %%xmm5,%%xmm3 \n" - "paddw %%xmm2,%%xmm0 \n" - "paddw %%xmm3,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 1b \n" - "jmp 99f \n" - - // Blend 25 / 75. - LABELALIGN - "25: \n" - "movdqu " MEMACCESS(1) ",%%xmm0 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm1) // movdqu (%1,%4,1),%%xmm1 - "pavgb %%xmm1,%%xmm0 \n" - "pavgb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 25b \n" - "jmp 99f \n" - - // Blend 50 / 50. - LABELALIGN - "50: \n" - "movdqu " MEMACCESS(1) ",%%xmm0 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm1) // movdqu (%1,%4,1),%%xmm1 - "pavgb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 50b \n" - "jmp 99f \n" - - // Blend 75 / 25. - LABELALIGN - "75: \n" - "movdqu " MEMACCESS(1) ",%%xmm1 \n" - MEMOPREG(movdqu,0x00,1,4,1,xmm0) // movdqu (%1,%4,1),%%xmm0 - "pavgb %%xmm1,%%xmm0 \n" - "pavgb %%xmm1,%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 75b \n" - "jmp 99f \n" - - // Blend 100 / 0 - Copy row unchanged. - LABELALIGN - "100: \n" - "movdqu " MEMACCESS(1) ",%%xmm0 \n" - MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1) - "lea " MEMLEA(0x10,1) ",%1 \n" - "sub $0x10,%2 \n" - "jg 100b \n" - - "99: \n" - : "+r"(dst_ptr), // %0 - "+r"(src_ptr), // %1 - "+r"(dst_width), // %2 - "+r"(source_y_fraction) // %3 - : "r"((intptr_t)(src_stride)) // %4 - : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" - ); -} -#endif // HAS_INTERPOLATEROW_SSE2 - #ifdef HAS_ARGBSHUFFLEROW_SSSE3 // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { asm volatile ( "movdqu " MEMACCESS(3) ",%%xmm5 \n" LABELALIGN @@ -4984,7 +5043,7 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, "jg 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "r"(shuffler) // %3 : "memory", "cc" , "xmm0", "xmm1", "xmm5" @@ -4995,7 +5054,7 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, #ifdef HAS_ARGBSHUFFLEROW_AVX2 // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { asm volatile ( "vbroadcastf128 " MEMACCESS(3) ",%%ymm5 \n" LABELALIGN @@ -5013,7 +5072,7 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, "vzeroupper \n" : "+r"(src_argb), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "r"(shuffler) // %3 : "memory", "cc" , "xmm0", "xmm1", "xmm5" @@ -5024,8 +5083,8 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, #ifdef HAS_ARGBSHUFFLEROW_SSE2 // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { - uintptr_t pixel_temp = 0u; + const uint8* shuffler, int width) { + uintptr_t pixel_temp; asm volatile ( "pxor %%xmm5,%%xmm5 \n" "mov " MEMACCESS(4) ",%k2 \n" @@ -5130,11 +5189,11 @@ void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, "jg 3012b \n" "99: \n" - : "+r"(src_argb), // %0 - "+r"(dst_argb), // %1 - "+d"(pixel_temp), // %2 - "+r"(pix) // %3 - : "r"(shuffler) // %4 + : "+r"(src_argb), // %0 + "+r"(dst_argb), // %1 + "=&d"(pixel_temp), // %2 + "+r"(width) // %3 + : "r"(shuffler) // %4 : "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm5" ); @@ -5311,7 +5370,7 @@ void ARGBPolynomialRow_AVX2(const uint8* src_argb, // Tranform ARGB pixels with color table. void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) { - uintptr_t pixel_temp = 0u; + uintptr_t pixel_temp; asm volatile ( // 1 pixel loop. LABELALIGN @@ -5331,10 +5390,10 @@ void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, "mov %b1," MEMACCESS2(-0x1,0) " \n" "dec %2 \n" "jg 1b \n" - : "+r"(dst_argb), // %0 - "+d"(pixel_temp), // %1 - "+r"(width) // %2 - : "r"(table_argb) // %3 + : "+r"(dst_argb), // %0 + "=&d"(pixel_temp), // %1 + "+r"(width) // %2 + : "r"(table_argb) // %3 : "memory", "cc"); } #endif // HAS_ARGBCOLORTABLEROW_X86 @@ -5342,7 +5401,7 @@ void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, #ifdef HAS_RGBCOLORTABLEROW_X86 // Tranform RGB pixels with color table. void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) { - uintptr_t pixel_temp = 0u; + uintptr_t pixel_temp; asm volatile ( // 1 pixel loop. LABELALIGN @@ -5359,10 +5418,10 @@ void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) { "mov %b1," MEMACCESS2(-0x2,0) " \n" "dec %2 \n" "jg 1b \n" - : "+r"(dst_argb), // %0 - "+d"(pixel_temp), // %1 - "+r"(width) // %2 - : "r"(table_argb) // %3 + : "+r"(dst_argb), // %0 + "=&d"(pixel_temp), // %1 + "+r"(width) // %2 + : "r"(table_argb) // %3 : "memory", "cc"); } #endif // HAS_RGBCOLORTABLEROW_X86 @@ -5372,8 +5431,8 @@ void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) { void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width, const uint8* luma, uint32 lumacoeff) { - uintptr_t pixel_temp = 0u; - uintptr_t table_temp = 0u; + uintptr_t pixel_temp; + uintptr_t table_temp; asm volatile ( "movd %6,%%xmm3 \n" "pshufd $0x0,%%xmm3,%%xmm3 \n" @@ -5455,13 +5514,13 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, "lea " MEMLEA(0x10,3) ",%3 \n" "sub $0x4,%4 \n" "jg 1b \n" - : "+d"(pixel_temp), // %0 - "+a"(table_temp), // %1 - "+r"(src_argb), // %2 - "+r"(dst_argb), // %3 - "+rm"(width) // %4 - : "r"(luma), // %5 - "rm"(lumacoeff) // %6 + : "=&d"(pixel_temp), // %0 + "=&a"(table_temp), // %1 + "+r"(src_argb), // %2 + "+r"(dst_argb), // %3 + "+rm"(width) // %4 + : "r"(luma), // %5 + "rm"(lumacoeff) // %6 : "memory", "cc", "xmm0", "xmm3", "xmm4", "xmm5" ); } diff --git a/libs/libvpx/third_party/libyuv/source/row_mips.cc b/libs/libvpx/third_party/libyuv/source/row_mips.cc index cfc9ffe036..285f0b5adc 100644 --- a/libs/libvpx/third_party/libyuv/source/row_mips.cc +++ b/libs/libvpx/third_party/libyuv/source/row_mips.cc @@ -375,13 +375,13 @@ void CopyRow_MIPS(const uint8* src, uint8* dst, int count) { } #endif // HAS_COPYROW_MIPS -// MIPS DSPR2 functions +// DSPR2 functions #if !defined(LIBYUV_DISABLE_MIPS) && defined(__mips_dsp) && \ (__mips_dsp_rev >= 2) && \ (_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6) -void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int width) { +void SplitUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -389,7 +389,6 @@ void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, "blez $t4, 2f \n" " andi %[width], %[width], 0xf \n" // residual - ".p2align 2 \n" "1: \n" "addiu $t4, $t4, -1 \n" "lw $t0, 0(%[src_uv]) \n" // V1 | U1 | V0 | U0 @@ -447,7 +446,7 @@ void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, ); } -void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) { +void MirrorRow_DSPR2(const uint8* src, uint8* dst, int width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -457,7 +456,6 @@ void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) { "blez $t4, 2f \n" " addu %[src], %[src], %[width] \n" // src += width - ".p2align 2 \n" "1: \n" "lw $t0, -16(%[src]) \n" // |3|2|1|0| "lw $t1, -12(%[src]) \n" // |7|6|5|4| @@ -498,10 +496,10 @@ void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) { ); } -void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, - int width) { - int x = 0; - int y = 0; +void MirrorUVRow_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { + int x; + int y; __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -512,7 +510,6 @@ void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, "blez %[x], 2f \n" " addu %[src_uv], %[src_uv], $t4 \n" - ".p2align 2 \n" "1: \n" "lw $t0, -32(%[src_uv]) \n" // |3|2|1|0| "lw $t1, -28(%[src_uv]) \n" // |7|6|5|4| @@ -582,7 +579,7 @@ void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, [dst_u] "+r" (dst_u), [dst_v] "+r" (dst_v), [x] "=&r" (x), - [y] "+r" (y) + [y] "=&r" (y) : [width] "r" (width) : "t0", "t1", "t2", "t3", "t4", "t5", "t7", "t8", "t9" @@ -596,7 +593,7 @@ void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, // t8 = | 0 | G1 | 0 | g1 | // t2 = | 0 | R0 | 0 | r0 | // t1 = | 0 | R1 | 0 | r1 | -#define I422ToTransientMipsRGB \ +#define YUVTORGB \ "lw $t0, 0(%[y_buf]) \n" \ "lhu $t1, 0(%[u_buf]) \n" \ "lhu $t2, 0(%[v_buf]) \n" \ @@ -655,11 +652,13 @@ void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, "addu.ph $t2, $t2, $s5 \n" \ "addu.ph $t1, $t1, $s5 \n" -void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) { +// TODO(fbarchard): accept yuv conversion constants. +void I422ToARGBRow_DSPR2(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + uint8* rgb_buf, + const struct YuvConstants* yuvconstants, + int width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -673,9 +672,8 @@ void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf, "lui $s6, 0xff00 \n" "ori $s6, 0xff00 \n" // |ff|00|ff|00|ff| - ".p2align 2 \n" "1: \n" - I422ToTransientMipsRGB + YUVTORGB // Arranging into argb format "precr.qb.ph $t4, $t8, $t4 \n" // |G1|g1|B1|b1| "precr.qb.ph $t5, $t9, $t5 \n" // |G0|g0|B0|b0| @@ -717,136 +715,10 @@ void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf, ); } -void I422ToABGRRow_MIPS_DSPR2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) { - __asm__ __volatile__ ( - ".set push \n" - ".set noreorder \n" - "beqz %[width], 2f \n" - " repl.ph $s0, 74 \n" // |YG|YG| = |74|74| - "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25| - "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52| - "repl.ph $s3, 102 \n" // |VR|VR| = |102|102| - "repl.ph $s4, 16 \n" // |0|16|0|16| - "repl.ph $s5, 128 \n" // |128|128| - "lui $s6, 0xff00 \n" - "ori $s6, 0xff00 \n" // |ff|00|ff|00| - - ".p2align 2 \n" - "1: \n" - I422ToTransientMipsRGB -// Arranging into abgr format - "precr.qb.ph $t0, $t8, $t1 \n" // |G1|g1|R1|r1| - "precr.qb.ph $t3, $t9, $t2 \n" // |G0|g0|R0|r0| - "precrq.qb.ph $t8, $t0, $t3 \n" // |G1|R1|G0|R0| - "precr.qb.ph $t9, $t0, $t3 \n" // |g1|r1|g0|r0| - - "precr.qb.ph $t2, $t4, $t5 \n" // |B1|b1|B0|b0| - "addiu %[width], -4 \n" - "addiu %[y_buf], 4 \n" - "preceu.ph.qbla $t1, $t2 \n" // |0 |B1|0 |B0| - "preceu.ph.qbra $t2, $t2 \n" // |0 |b1|0 |b0| - "or $t1, $t1, $s6 \n" // |ff|B1|ff|B0| - "or $t2, $t2, $s6 \n" // |ff|b1|ff|b0| - "precrq.ph.w $t0, $t2, $t9 \n" // |ff|b1|g1|r1| - "precrq.ph.w $t3, $t1, $t8 \n" // |ff|B1|G1|R1| - "sll $t9, $t9, 16 \n" - "sll $t8, $t8, 16 \n" - "packrl.ph $t2, $t2, $t9 \n" // |ff|b0|g0|r0| - "packrl.ph $t1, $t1, $t8 \n" // |ff|B0|G0|R0| -// Store results. - "sw $t2, 0(%[rgb_buf]) \n" - "sw $t0, 4(%[rgb_buf]) \n" - "sw $t1, 8(%[rgb_buf]) \n" - "sw $t3, 12(%[rgb_buf]) \n" - "bnez %[width], 1b \n" - " addiu %[rgb_buf], 16 \n" - "2: \n" - ".set pop \n" - :[y_buf] "+r" (y_buf), - [u_buf] "+r" (u_buf), - [v_buf] "+r" (v_buf), - [width] "+r" (width), - [rgb_buf] "+r" (rgb_buf) - : - : "t0", "t1", "t2", "t3", "t4", "t5", - "t6", "t7", "t8", "t9", - "s0", "s1", "s2", "s3", - "s4", "s5", "s6" - ); -} - -void I422ToBGRARow_MIPS_DSPR2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* rgb_buf, - int width) { - __asm__ __volatile__ ( - ".set push \n" - ".set noreorder \n" - "beqz %[width], 2f \n" - " repl.ph $s0, 74 \n" // |YG|YG| = |74 |74 | - "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25| - "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52| - "repl.ph $s3, 102 \n" // |VR|VR| = |102|102| - "repl.ph $s4, 16 \n" // |0|16|0|16| - "repl.ph $s5, 128 \n" // |128|128| - "lui $s6, 0xff \n" - "ori $s6, 0xff \n" // |00|ff|00|ff| - - ".p2align 2 \n" - "1: \n" - I422ToTransientMipsRGB - // Arranging into bgra format - "precr.qb.ph $t4, $t4, $t8 \n" // |B1|b1|G1|g1| - "precr.qb.ph $t5, $t5, $t9 \n" // |B0|b0|G0|g0| - "precrq.qb.ph $t8, $t4, $t5 \n" // |B1|G1|B0|G0| - "precr.qb.ph $t9, $t4, $t5 \n" // |b1|g1|b0|g0| - - "precr.qb.ph $t2, $t1, $t2 \n" // |R1|r1|R0|r0| - "addiu %[width], -4 \n" - "addiu %[y_buf], 4 \n" - "preceu.ph.qbla $t1, $t2 \n" // |0 |R1|0 |R0| - "preceu.ph.qbra $t2, $t2 \n" // |0 |r1|0 |r0| - "sll $t1, $t1, 8 \n" // |R1|0 |R0|0 | - "sll $t2, $t2, 8 \n" // |r1|0 |r0|0 | - "or $t1, $t1, $s6 \n" // |R1|ff|R0|ff| - "or $t2, $t2, $s6 \n" // |r1|ff|r0|ff| - "precrq.ph.w $t0, $t9, $t2 \n" // |b1|g1|r1|ff| - "precrq.ph.w $t3, $t8, $t1 \n" // |B1|G1|R1|ff| - "sll $t1, $t1, 16 \n" - "sll $t2, $t2, 16 \n" - "packrl.ph $t2, $t9, $t2 \n" // |b0|g0|r0|ff| - "packrl.ph $t1, $t8, $t1 \n" // |B0|G0|R0|ff| -// Store results. - "sw $t2, 0(%[rgb_buf]) \n" - "sw $t0, 4(%[rgb_buf]) \n" - "sw $t1, 8(%[rgb_buf]) \n" - "sw $t3, 12(%[rgb_buf]) \n" - "bnez %[width], 1b \n" - " addiu %[rgb_buf], 16 \n" - "2: \n" - ".set pop \n" - :[y_buf] "+r" (y_buf), - [u_buf] "+r" (u_buf), - [v_buf] "+r" (v_buf), - [width] "+r" (width), - [rgb_buf] "+r" (rgb_buf) - : - : "t0", "t1", "t2", "t3", "t4", "t5", - "t6", "t7", "t8", "t9", - "s0", "s1", "s2", "s3", - "s4", "s5", "s6" - ); -} - // Bilinear filter 8x2 -> 8x1 -void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride, int dst_width, - int source_y_fraction) { +void InterpolateRow_DSPR2(uint8* dst_ptr, const uint8* src_ptr, + ptrdiff_t src_stride, int dst_width, + int source_y_fraction) { int y0_fraction = 256 - source_y_fraction; const uint8* src_ptr1 = src_ptr + src_stride; @@ -857,7 +729,6 @@ void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr, "replv.ph $t0, %[y0_fraction] \n" "replv.ph $t1, %[source_y_fraction] \n" - ".p2align 2 \n" "1: \n" "lw $t2, 0(%[src_ptr]) \n" "lw $t3, 0(%[src_ptr1]) \n" diff --git a/libs/libvpx/third_party/libyuv/source/row_neon.cc b/libs/libvpx/third_party/libyuv/source/row_neon.cc index 1a72eb9039..909df060c6 100644 --- a/libs/libvpx/third_party/libyuv/source/row_neon.cc +++ b/libs/libvpx/third_party/libyuv/source/row_neon.cc @@ -93,7 +93,7 @@ extern "C" { "vuzp.u8 d2, d3 \n" \ "vtrn.u32 d2, d3 \n" -#define YUV422TORGB_SETUP_REG \ +#define YUVTORGB_SETUP \ MEMACCESS([kUVToRB]) \ "vld1.8 {d24}, [%[kUVToRB]] \n" \ MEMACCESS([kUVToG]) \ @@ -107,7 +107,7 @@ extern "C" { MEMACCESS([kYToRgb]) \ "vld1.32 {d30[], d31[]}, [%[kYToRgb]] \n" -#define YUV422TORGB \ +#define YUVTORGB \ "vmull.u8 q8, d2, d24 \n" /* u/v B/R component */\ "vmull.u8 q9, d2, d25 \n" /* u/v G component */\ "vmovl.u8 q0, d0 \n" /* Y */\ @@ -134,52 +134,19 @@ extern "C" { "vqshrun.s16 d22, q9, #6 \n" /* R */ \ "vqshrun.s16 d21, q0, #6 \n" /* G */ -// YUV to RGB conversion constants. -// Y contribution to R,G,B. Scale and bias. -#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ -#define YGB 1160 /* 1.164 * 64 * 16 - adjusted for even error distribution */ - -// U and V contributions to R,G,B. -#define UB -128 /* -min(128, round(2.018 * 64)) */ -#define UG 25 /* -round(-0.391 * 64) */ -#define VG 52 /* -round(-0.813 * 64) */ -#define VR -102 /* -round(1.596 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BB (UB * 128 - YGB) -#define BG (UG * 128 + VG * 128 - YGB) -#define BR (VR * 128 - YGB) - -static uvec8 kUVToRB = { 128, 128, 128, 128, 102, 102, 102, 102, - 0, 0, 0, 0, 0, 0, 0, 0 }; -static uvec8 kUVToG = { 25, 25, 25, 25, 52, 52, 52, 52, - 0, 0, 0, 0, 0, 0, 0, 0 }; -static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 }; -static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 }; - -#undef YG -#undef YGB -#undef UB -#undef UG -#undef VG -#undef VR -#undef BB -#undef BG -#undef BR - void I444ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READYUV444 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(3) "vst4.8 {d20, d21, d22, d23}, [%3]! \n" "bgt 1b \n" @@ -188,10 +155,10 @@ void I444ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -201,15 +168,15 @@ void I422ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(3) "vst4.8 {d20, d21, d22, d23}, [%3]! \n" "bgt 1b \n" @@ -218,10 +185,43 @@ void I422ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) + : "cc", "memory", "q0", "q1", "q2", "q3", "q4", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + ); +} + +void I422AlphaToARGBRow_NEON(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + const uint8* src_a, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP + "1: \n" + READYUV422 + YUVTORGB + "subs %5, %5, #8 \n" + MEMACCESS(3) + "vld1.8 {d23}, [%3]! \n" + MEMACCESS(4) + "vst4.8 {d20, d21, d22, d23}, [%4]! \n" + "bgt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_u), // %1 + "+r"(src_v), // %2 + "+r"(src_a), // %3 + "+r"(dst_argb), // %4 + "+r"(width) // %5 + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -231,15 +231,15 @@ void I411ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READYUV411 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(3) "vst4.8 {d20, d21, d22, d23}, [%3]! \n" "bgt 1b \n" @@ -248,72 +248,10 @@ void I411ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "q0", "q1", "q2", "q3", "q4", - "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" - ); -} - -void I422ToBGRARow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" - "1: \n" - READYUV422 - YUV422TORGB - "subs %4, %4, #8 \n" - "vswp.u8 d20, d22 \n" - "vmov.u8 d19, #255 \n" - MEMACCESS(3) - "vst4.8 {d19, d20, d21, d22}, [%3]! \n" - "bgt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_bgra), // %3 - "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "q0", "q1", "q2", "q3", "q4", - "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" - ); -} - -void I422ToABGRRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" - "1: \n" - READYUV422 - YUV422TORGB - "subs %4, %4, #8 \n" - "vswp.u8 d20, d22 \n" - "vmov.u8 d23, #255 \n" - MEMACCESS(3) - "vst4.8 {d20, d21, d22, d23}, [%3]! \n" - "bgt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_abgr), // %3 - "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -323,15 +261,15 @@ void I422ToRGBARow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" - "vmov.u8 d19, #255 \n" + "vmov.u8 d19, #255 \n" // d19 modified by YUVTORGB MEMACCESS(3) "vst4.8 {d19, d20, d21, d22}, [%3]! \n" "bgt 1b \n" @@ -340,10 +278,10 @@ void I422ToRGBARow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgba), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -353,13 +291,13 @@ void I422ToRGB24Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" MEMACCESS(3) "vst3.8 {d20, d21, d22}, [%3]! \n" @@ -369,68 +307,33 @@ void I422ToRGB24Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgb24), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "q0", "q1", "q2", "q3", "q4", - "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" - ); -} - -void I422ToRAWRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" - "1: \n" - READYUV422 - YUV422TORGB - "subs %4, %4, #8 \n" - "vswp.u8 d20, d22 \n" - MEMACCESS(3) - "vst3.8 {d20, d21, d22}, [%3]! \n" - "bgt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_raw), // %3 - "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #define ARGBTORGB565 \ - "vshr.u8 d20, d20, #3 \n" /* B */ \ - "vshr.u8 d21, d21, #2 \n" /* G */ \ - "vshr.u8 d22, d22, #3 \n" /* R */ \ - "vmovl.u8 q8, d20 \n" /* B */ \ - "vmovl.u8 q9, d21 \n" /* G */ \ - "vmovl.u8 q10, d22 \n" /* R */ \ - "vshl.u16 q9, q9, #5 \n" /* G */ \ - "vshl.u16 q10, q10, #11 \n" /* R */ \ - "vorr q0, q8, q9 \n" /* BG */ \ - "vorr q0, q0, q10 \n" /* BGR */ + "vshll.u8 q0, d22, #8 \n" /* R */ \ + "vshll.u8 q8, d21, #8 \n" /* G */ \ + "vshll.u8 q9, d20, #8 \n" /* B */ \ + "vsri.16 q0, q8, #5 \n" /* RG */ \ + "vsri.16 q0, q9, #11 \n" /* RGB */ void I422ToRGB565Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" ARGBTORGB565 MEMACCESS(3) @@ -441,41 +344,35 @@ void I422ToRGB565Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgb565), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #define ARGBTOARGB1555 \ - "vshr.u8 q10, q10, #3 \n" /* B */ \ - "vshr.u8 d22, d22, #3 \n" /* R */ \ - "vshr.u8 d23, d23, #7 \n" /* A */ \ - "vmovl.u8 q8, d20 \n" /* B */ \ - "vmovl.u8 q9, d21 \n" /* G */ \ - "vmovl.u8 q10, d22 \n" /* R */ \ - "vmovl.u8 q11, d23 \n" /* A */ \ - "vshl.u16 q9, q9, #5 \n" /* G */ \ - "vshl.u16 q10, q10, #10 \n" /* R */ \ - "vshl.u16 q11, q11, #15 \n" /* A */ \ - "vorr q0, q8, q9 \n" /* BG */ \ - "vorr q1, q10, q11 \n" /* RA */ \ - "vorr q0, q0, q1 \n" /* BGRA */ + "vshll.u8 q0, d23, #8 \n" /* A */ \ + "vshll.u8 q8, d22, #8 \n" /* R */ \ + "vshll.u8 q9, d21, #8 \n" /* G */ \ + "vshll.u8 q10, d20, #8 \n" /* B */ \ + "vsri.16 q0, q8, #1 \n" /* AR */ \ + "vsri.16 q0, q9, #6 \n" /* ARG */ \ + "vsri.16 q0, q10, #11 \n" /* ARGB */ void I422ToARGB1555Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" "vmov.u8 d23, #255 \n" ARGBTOARGB1555 @@ -487,10 +384,10 @@ void I422ToARGB1555Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb1555), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -509,14 +406,14 @@ void I422ToARGB4444Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP "vmov.u8 d4, #0x0f \n" // bits to clear with vbic. - ".p2align 2 \n" "1: \n" READYUV422 - YUV422TORGB + YUVTORGB "subs %4, %4, #8 \n" "vmov.u8 d23, #255 \n" ARGBTOARGB4444 @@ -528,10 +425,10 @@ void I422ToARGB4444Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb4444), // %3 "+r"(width) // %4 - : [kUVToRB]"r"(&kUVToRB), // %5 - [kUVToG]"r"(&kUVToG), // %6 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -541,23 +438,22 @@ void I400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READYUV400 - YUV422TORGB + YUVTORGB "subs %2, %2, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(1) "vst4.8 {d20, d21, d22, d23}, [%1]! \n" "bgt 1b \n" : "+r"(src_y), // %0 "+r"(dst_argb), // %1 "+r"(width) // %2 - : [kUVToRB]"r"(&kUVToRB), // %3 - [kUVToG]"r"(&kUVToG), // %4 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&kYuvI601Constants.kUVToRB), + [kUVToG]"r"(&kYuvI601Constants.kUVToG), + [kUVBiasBGR]"r"(&kYuvI601Constants.kUVBiasBGR), + [kYToRgb]"r"(&kYuvI601Constants.kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -568,7 +464,6 @@ void J400ToARGBRow_NEON(const uint8* src_y, int width) { asm volatile ( "vmov.u8 d23, #255 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d20}, [%0]! \n" @@ -589,15 +484,15 @@ void J400ToARGBRow_NEON(const uint8* src_y, void NV12ToARGBRow_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READNV12 - YUV422TORGB + YUVTORGB "subs %3, %3, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(2) "vst4.8 {d20, d21, d22, d23}, [%2]! \n" "bgt 1b \n" @@ -605,38 +500,38 @@ void NV12ToARGBRow_NEON(const uint8* src_y, "+r"(src_uv), // %1 "+r"(dst_argb), // %2 "+r"(width) // %3 - : [kUVToRB]"r"(&kUVToRB), // %4 - [kUVToG]"r"(&kUVToG), // %5 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } void NV21ToARGBRow_NEON(const uint8* src_y, - const uint8* src_uv, + const uint8* src_vu, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READNV21 - YUV422TORGB + YUVTORGB "subs %3, %3, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(2) "vst4.8 {d20, d21, d22, d23}, [%2]! \n" "bgt 1b \n" : "+r"(src_y), // %0 - "+r"(src_uv), // %1 + "+r"(src_vu), // %1 "+r"(dst_argb), // %2 "+r"(width) // %3 - : [kUVToRB]"r"(&kUVToRB), // %4 - [kUVToG]"r"(&kUVToG), // %5 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -645,13 +540,13 @@ void NV21ToARGBRow_NEON(const uint8* src_y, void NV12ToRGB565Row_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP "1: \n" READNV12 - YUV422TORGB + YUVTORGB "subs %3, %3, #8 \n" ARGBTORGB565 MEMACCESS(2) @@ -661,38 +556,10 @@ void NV12ToRGB565Row_NEON(const uint8* src_y, "+r"(src_uv), // %1 "+r"(dst_rgb565), // %2 "+r"(width) // %3 - : [kUVToRB]"r"(&kUVToRB), // %4 - [kUVToG]"r"(&kUVToG), // %5 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "q0", "q1", "q2", "q3", "q4", - "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" - ); -} - -void NV21ToRGB565Row_NEON(const uint8* src_y, - const uint8* src_uv, - uint8* dst_rgb565, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" - "1: \n" - READNV21 - YUV422TORGB - "subs %3, %3, #8 \n" - ARGBTORGB565 - MEMACCESS(2) - "vst1.8 {q0}, [%2]! \n" // store 8 pixels RGB565. - "bgt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_uv), // %1 - "+r"(dst_rgb565), // %2 - "+r"(width) // %3 - : [kUVToRB]"r"(&kUVToRB), // %4 - [kUVToG]"r"(&kUVToG), // %5 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -700,25 +567,25 @@ void NV21ToRGB565Row_NEON(const uint8* src_y, void YUY2ToARGBRow_NEON(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READYUY2 - YUV422TORGB + YUVTORGB "subs %2, %2, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(1) "vst4.8 {d20, d21, d22, d23}, [%1]! \n" "bgt 1b \n" : "+r"(src_yuy2), // %0 "+r"(dst_argb), // %1 "+r"(width) // %2 - : [kUVToRB]"r"(&kUVToRB), // %3 - [kUVToG]"r"(&kUVToG), // %4 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -726,25 +593,25 @@ void YUY2ToARGBRow_NEON(const uint8* src_yuy2, void UYVYToARGBRow_NEON(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG - ".p2align 2 \n" + YUVTORGB_SETUP + "vmov.u8 d23, #255 \n" "1: \n" READUYVY - YUV422TORGB + YUVTORGB "subs %2, %2, #8 \n" - "vmov.u8 d23, #255 \n" MEMACCESS(1) "vst4.8 {d20, d21, d22, d23}, [%1]! \n" "bgt 1b \n" : "+r"(src_uyvy), // %0 "+r"(dst_argb), // %1 "+r"(width) // %2 - : [kUVToRB]"r"(&kUVToRB), // %3 - [kUVToG]"r"(&kUVToG), // %4 - [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); @@ -754,7 +621,6 @@ void UYVYToARGBRow_NEON(const uint8* src_uyvy, void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {q0, q1}, [%0]! \n" // load 16 pairs of UV @@ -777,7 +643,6 @@ void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load U @@ -800,7 +665,6 @@ void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv, // Copy multiple of 32. vld4.8 allow unaligned and is fastest on a15. void CopyRow_NEON(const uint8* src, uint8* dst, int count) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0, d1, d2, d3}, [%0]! \n" // load 32 @@ -855,7 +719,6 @@ void MirrorRow_NEON(const uint8* src, uint8* dst, int width) { "add %0, %0, %2 \n" "sub %0, #16 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0], r3 \n" // src -= 16 @@ -882,7 +745,6 @@ void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, "add %0, %0, %3, lsl #1 \n" "sub %0, #16 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {d0, d1}, [%0], r12 \n" // src -= 16 @@ -909,7 +771,6 @@ void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) { "add %0, %0, %2, lsl #2 \n" "sub %0, #16 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0], r3 \n" // src -= 16 @@ -928,10 +789,9 @@ void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) { ); } -void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) { +void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int width) { asm volatile ( "vmov.u8 d4, #255 \n" // Alpha - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d1, d2, d3}, [%0]! \n" // load 8 pixels of RGB24. @@ -941,16 +801,15 @@ void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) { "bgt 1b \n" : "+r"(src_rgb24), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List ); } -void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { +void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int width) { asm volatile ( "vmov.u8 d4, #255 \n" // Alpha - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d1, d2, d3}, [%0]! \n" // load 8 pixels of RAW. @@ -961,12 +820,30 @@ void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { "bgt 1b \n" : "+r"(src_raw), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List ); } +void RAWToRGB24Row_NEON(const uint8* src_raw, uint8* dst_rgb24, int width) { + asm volatile ( + "1: \n" + MEMACCESS(0) + "vld3.8 {d1, d2, d3}, [%0]! \n" // load 8 pixels of RAW. + "subs %2, %2, #8 \n" // 8 processed per loop. + "vswp.u8 d1, d3 \n" // swap R, B + MEMACCESS(1) + "vst3.8 {d1, d2, d3}, [%1]! \n" // store 8 pixels of RGB24. + "bgt 1b \n" + : "+r"(src_raw), // %0 + "+r"(dst_rgb24), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "d1", "d2", "d3" // Clobber List + ); +} + #define RGB565TOARGB \ "vshrn.u16 d6, q0, #5 \n" /* G xxGGGGGG */ \ "vuzp.u8 d0, d1 \n" /* d0 xxxBBBBB RRRRRxxx */ \ @@ -979,10 +856,9 @@ void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { "vorr.u8 d2, d1, d5 \n" /* R */ \ "vorr.u8 d1, d4, d6 \n" /* G */ -void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { +void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int width) { asm volatile ( "vmov.u8 d3, #255 \n" // Alpha - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels. @@ -993,7 +869,7 @@ void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { "bgt 1b \n" : "+r"(src_rgb565), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List ); @@ -1027,10 +903,9 @@ void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { "vorr.u8 d1, d4, d6 \n" /* G */ void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, - int pix) { + int width) { asm volatile ( "vmov.u8 d3, #255 \n" // Alpha - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels. @@ -1041,7 +916,7 @@ void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, "bgt 1b \n" : "+r"(src_argb1555), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List ); @@ -1058,10 +933,9 @@ void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, "vswp.u8 d1, d2 \n" /* B,R,G,A -> B,G,R,A */ void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb, - int pix) { + int width) { asm volatile ( "vmov.u8 d3, #255 \n" // Alpha - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels. @@ -1072,15 +946,14 @@ void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb, "bgt 1b \n" : "+r"(src_argb4444), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2" // Clobber List ); } -void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) { +void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d1, d2, d3, d4}, [%0]! \n" // load 8 pixels of ARGB. @@ -1090,15 +963,14 @@ void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) { "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb24), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List ); } -void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) { +void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d1, d2, d3, d4}, [%0]! \n" // load 8 pixels of ARGB. @@ -1109,15 +981,14 @@ void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) { "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_raw), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List ); } -void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) { +void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {q0, q1}, [%0]! \n" // load 16 pixels of YUY2. @@ -1127,15 +998,14 @@ void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_yuy2), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1" // Clobber List ); } -void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) { +void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {q0, q1}, [%0]! \n" // load 16 pixels of UYVY. @@ -1145,16 +1015,15 @@ void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_uyvy), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1" // Clobber List ); } void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of YUY2. @@ -1167,16 +1036,15 @@ void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "d0", "d1", "d2", "d3" // Clobber List ); } void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of UYVY. @@ -1189,17 +1057,16 @@ void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "d0", "d1", "d2", "d3" // Clobber List ); } void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // stride + src_yuy2 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of YUY2. @@ -1217,17 +1084,16 @@ void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2, "+r"(stride_yuy2), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" // Clobber List ); } void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // stride + src_uyvy - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of UYVY. @@ -1245,7 +1111,7 @@ void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, "+r"(stride_uyvy), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" // Clobber List ); @@ -1253,7 +1119,7 @@ void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { asm volatile ( MEMACCESS(3) "vld1.8 {q2}, [%3] \n" // shuffler @@ -1268,7 +1134,7 @@ void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb, "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "r"(shuffler) // %3 : "cc", "memory", "q0", "q1", "q2" // Clobber List ); @@ -1279,7 +1145,6 @@ void I422ToYUY2Row_NEON(const uint8* src_y, const uint8* src_v, uint8* dst_yuy2, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {d0, d2}, [%0]! \n" // load 16 Ys @@ -1306,7 +1171,6 @@ void I422ToUYVYRow_NEON(const uint8* src_y, const uint8* src_v, uint8* dst_uyvy, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld2.8 {d1, d3}, [%0]! \n" // load 16 Ys @@ -1328,9 +1192,8 @@ void I422ToUYVYRow_NEON(const uint8* src_y, ); } -void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) { +void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB. @@ -1341,7 +1204,7 @@ void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) { "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb565), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q8", "q9", "q10", "q11" ); @@ -1350,7 +1213,6 @@ void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) { void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb, const uint32 dither4, int width) { asm volatile ( - ".p2align 2 \n" "vdup.32 d2, %2 \n" // dither4 "1: \n" MEMACCESS(1) @@ -1372,9 +1234,8 @@ void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb, } void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555, - int pix) { + int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB. @@ -1385,17 +1246,16 @@ void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555, "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb1555), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q8", "q9", "q10", "q11" ); } void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444, - int pix) { + int width) { asm volatile ( "vmov.u8 d4, #0x0f \n" // bits to clear with vbic. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB. @@ -1406,19 +1266,18 @@ void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444, "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb4444), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q8", "q9", "q10", "q11" ); } -void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient "vmov.u8 d27, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -1433,18 +1292,35 @@ void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q12", "q13" ); } -void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBExtractAlphaRow_NEON(const uint8* src_argb, uint8* dst_a, int width) { + asm volatile ( + "1: \n" + MEMACCESS(0) + "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels + "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels + "subs %2, %2, #16 \n" // 16 processed per loop + MEMACCESS(1) + "vst1.8 {q3}, [%1]! \n" // store 16 A's. + "bgt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_a), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List + ); +} + +void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d24, #15 \n" // B * 0.11400 coefficient "vmov.u8 d25, #75 \n" // G * 0.58700 coefficient "vmov.u8 d26, #38 \n" // R * 0.29900 coefficient - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -1458,7 +1334,7 @@ void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q12", "q13" ); @@ -1466,7 +1342,7 @@ void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { // 8x1 pixels. void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( "vmov.u8 d24, #112 \n" // UB / VR 0.875 coefficient "vmov.u8 d25, #74 \n" // UG -0.5781 coefficient @@ -1474,7 +1350,6 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, "vmov.u8 d27, #18 \n" // VB -0.1406 coefficient "vmov.u8 d28, #94 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -1500,65 +1375,15 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, : "+r"(src_argb), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q12", "q13", "q14", "q15" ); } -// 16x1 pixels -> 8x1. pix is number of argb pixels. e.g. 16. -void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { - asm volatile ( - "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient - "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient - "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient - "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient - "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient - "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" - "1: \n" - MEMACCESS(0) - "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. - MEMACCESS(0) - "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels. - - "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts. - "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts. - "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts. - - "subs %3, %3, #16 \n" // 16 processed per loop. - "vmul.s16 q8, q0, q10 \n" // B - "vmls.s16 q8, q1, q11 \n" // G - "vmls.s16 q8, q2, q12 \n" // R - "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned - - "vmul.s16 q9, q2, q10 \n" // R - "vmls.s16 q9, q1, q14 \n" // G - "vmls.s16 q9, q0, q13 \n" // B - "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned - - "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U - "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V - - MEMACCESS(1) - "vst1.8 {d0}, [%1]! \n" // store 8 pixels U. - MEMACCESS(2) - "vst1.8 {d1}, [%2]! \n" // store 8 pixels V. - "bgt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_u), // %1 - "+r"(dst_v), // %2 - "+r"(pix) // %3 - : - : "cc", "memory", "q0", "q1", "q2", "q3", - "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" - ); -} - -// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32. +// 32x1 pixels -> 8x1. width is number of argb pixels. e.g. 32. void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient @@ -1566,7 +1391,6 @@ void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -1613,14 +1437,14 @@ void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, : "+r"(src_argb), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. #define RGBTOUV(QB, QG, QR) \ "vmul.s16 q8, " #QB ", q10 \n" /* B */ \ "vmls.s16 q8, " #QG ", q11 \n" /* G */ \ @@ -1635,7 +1459,7 @@ void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, // TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr. void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_argb "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1644,7 +1468,6 @@ void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -1676,7 +1499,7 @@ void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, "+r"(src_stride_argb), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1685,7 +1508,7 @@ void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, // TODO(fbarchard): Subsample match C code. void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_argb "vmov.s16 q10, #127 / 2 \n" // UB / VR 0.500 coefficient @@ -1694,7 +1517,6 @@ void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, "vmov.s16 q13, #20 / 2 \n" // VB -0.08131 coefficient "vmov.s16 q14, #107 / 2 \n" // VG -0.41869 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -1726,7 +1548,7 @@ void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, "+r"(src_stride_argb), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1734,7 +1556,7 @@ void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, } void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_bgra "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1743,7 +1565,6 @@ void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 BGRA pixels. @@ -1775,7 +1596,7 @@ void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, "+r"(src_stride_bgra), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1783,7 +1604,7 @@ void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, } void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_abgr "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1792,7 +1613,6 @@ void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ABGR pixels. @@ -1824,7 +1644,7 @@ void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, "+r"(src_stride_abgr), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1832,7 +1652,7 @@ void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, } void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_rgba "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1841,7 +1661,6 @@ void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 RGBA pixels. @@ -1873,7 +1692,7 @@ void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, "+r"(src_stride_rgba), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1881,7 +1700,7 @@ void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, } void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_rgb24 "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1890,7 +1709,6 @@ void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RGB24 pixels. @@ -1922,7 +1740,7 @@ void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, "+r"(src_stride_rgb24), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" @@ -1930,7 +1748,7 @@ void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, } void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_raw "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1939,7 +1757,6 @@ void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RAW pixels. @@ -1971,16 +1788,16 @@ void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, "+r"(src_stride_raw), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_argb "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -1989,7 +1806,6 @@ void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels. @@ -2041,16 +1857,16 @@ void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, "+r"(src_stride_rgb565), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_argb "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -2059,7 +1875,6 @@ void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels. @@ -2111,16 +1926,16 @@ void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, "+r"(src_stride_argb1555), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { asm volatile ( "add %1, %0, %1 \n" // src_stride + src_argb "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient @@ -2129,7 +1944,6 @@ void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient "vmov.u16 q15, #0x8080 \n" // 128.5 - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels. @@ -2181,20 +1995,19 @@ void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, "+r"(src_stride_argb4444), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } -void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) { +void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient "vmov.u8 d27, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels. @@ -2210,19 +2023,18 @@ void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_rgb565), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13" ); } -void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) { +void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient "vmov.u8 d27, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels. @@ -2238,19 +2050,18 @@ void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_argb1555), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13" ); } -void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) { +void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient "vmov.u8 d27, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels. @@ -2266,19 +2077,18 @@ void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_argb4444), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13" ); } -void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) { +void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient "vmov.u8 d7, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of BGRA. @@ -2293,19 +2103,18 @@ void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_bgra), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8" ); } -void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) { +void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient "vmov.u8 d7, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of ABGR. @@ -2320,19 +2129,18 @@ void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_abgr), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8" ); } -void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) { +void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d4, #13 \n" // B * 0.1016 coefficient "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient "vmov.u8 d6, #33 \n" // R * 0.2578 coefficient "vmov.u8 d7, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of RGBA. @@ -2347,19 +2155,18 @@ void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_rgba), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8" ); } -void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) { +void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d4, #13 \n" // B * 0.1016 coefficient "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient "vmov.u8 d6, #33 \n" // R * 0.2578 coefficient "vmov.u8 d7, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RGB24. @@ -2374,19 +2181,18 @@ void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_rgb24), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8" ); } -void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) { +void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int width) { asm volatile ( "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient "vmov.u8 d7, #16 \n" // Add 16 constant - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RAW. @@ -2401,7 +2207,7 @@ void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) { "bgt 1b \n" : "+r"(src_raw), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8" ); @@ -2411,16 +2217,13 @@ void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) { void InterpolateRow_NEON(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) { + int y1_fraction = source_y_fraction; asm volatile ( "cmp %4, #0 \n" "beq 100f \n" "add %2, %1 \n" - "cmp %4, #64 \n" - "beq 75f \n" "cmp %4, #128 \n" "beq 50f \n" - "cmp %4, #192 \n" - "beq 25f \n" "vdup.8 d5, %4 \n" "rsb %4, #256 \n" @@ -2443,20 +2246,6 @@ void InterpolateRow_NEON(uint8* dst_ptr, "bgt 1b \n" "b 99f \n" - // Blend 25 / 75. - "25: \n" - MEMACCESS(1) - "vld1.8 {q0}, [%1]! \n" - MEMACCESS(2) - "vld1.8 {q1}, [%2]! \n" - "subs %3, %3, #16 \n" - "vrhadd.u8 q0, q1 \n" - "vrhadd.u8 q0, q1 \n" - MEMACCESS(0) - "vst1.8 {q0}, [%0]! \n" - "bgt 25b \n" - "b 99f \n" - // Blend 50 / 50. "50: \n" MEMACCESS(1) @@ -2470,20 +2259,6 @@ void InterpolateRow_NEON(uint8* dst_ptr, "bgt 50b \n" "b 99f \n" - // Blend 75 / 25. - "75: \n" - MEMACCESS(1) - "vld1.8 {q1}, [%1]! \n" - MEMACCESS(2) - "vld1.8 {q0}, [%2]! \n" - "subs %3, %3, #16 \n" - "vrhadd.u8 q0, q1 \n" - "vrhadd.u8 q0, q1 \n" - MEMACCESS(0) - "vst1.8 {q0}, [%0]! \n" - "bgt 75b \n" - "b 99f \n" - // Blend 100 / 0 - Copy row unchanged. "100: \n" MEMACCESS(1) @@ -2498,7 +2273,7 @@ void InterpolateRow_NEON(uint8* dst_ptr, "+r"(src_ptr), // %1 "+r"(src_stride), // %2 "+r"(dst_width), // %3 - "+r"(source_y_fraction) // %4 + "+r"(y1_fraction) // %4 : : "cc", "memory", "q0", "q1", "d4", "d5", "q13", "q14" ); @@ -2605,7 +2380,6 @@ void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size, "vdup.u16 q10, %4 \n" // interval add // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0] \n" // load 8 pixels of ARGB. @@ -2648,7 +2422,6 @@ void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width, "vshr.u16 q0, q0, #1 \n" // scale / 2. // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d20, d22, d24, d26}, [%0]! \n" // load 8 pixels of ARGB. @@ -2684,7 +2457,6 @@ void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) { "vmov.u8 d24, #15 \n" // B * 0.11400 coefficient "vmov.u8 d25, #75 \n" // G * 0.58700 coefficient "vmov.u8 d26, #38 \n" // R * 0.29900 coefficient - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -2721,7 +2493,6 @@ void ARGBSepiaRow_NEON(uint8* dst_argb, int width) { "vmov.u8 d28, #24 \n" // BB coefficient "vmov.u8 d29, #98 \n" // BG coefficient "vmov.u8 d30, #50 \n" // BR coefficient - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0] \n" // load 8 ARGB pixels. @@ -2760,7 +2531,6 @@ void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb, "vmovl.s8 q0, d4 \n" // B,G coefficients s16. "vmovl.s8 q1, d5 \n" // R,A coefficients s16. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d16, d18, d20, d22}, [%0]! \n" // load 8 ARGB pixels. @@ -2813,14 +2583,11 @@ void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb, ); } -// TODO(fbarchard): fix vqshrun in ARGBMultiplyRow_NEON and reenable. -#ifdef HAS_ARGBMULTIPLYROW_NEON // Multiply 2 rows of ARGB pixels together, 8 pixels at a time. void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -2847,14 +2614,12 @@ void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1, : "cc", "memory", "q0", "q1", "q2", "q3" ); } -#endif // HAS_ARGBMULTIPLYROW_NEON // Add 2 rows of ARGB pixels together, 8 pixels at a time. void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -2881,7 +2646,6 @@ void ARGBSubtractRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels. @@ -2913,7 +2677,6 @@ void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, asm volatile ( "vmov.u8 d3, #255 \n" // alpha // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0}, [%0]! \n" // load 8 sobelx. @@ -2940,7 +2703,6 @@ void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, uint8* dst_y, int width) { asm volatile ( // 16 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load 16 sobelx. @@ -2970,7 +2732,6 @@ void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, asm volatile ( "vmov.u8 d3, #255 \n" // alpha // 8 pixel loop. - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d2}, [%0]! \n" // load 8 sobelx. @@ -2997,7 +2758,6 @@ void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2, uint8* dst_sobelx, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0}, [%0],%5 \n" // top @@ -3041,7 +2801,6 @@ void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1, void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1, uint8* dst_sobely, int width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0}, [%0],%4 \n" // left diff --git a/libs/libvpx/third_party/libyuv/source/row_neon64.cc b/libs/libvpx/third_party/libyuv/source/row_neon64.cc index 5d015454b0..6375d4f55f 100644 --- a/libs/libvpx/third_party/libyuv/source/row_neon64.cc +++ b/libs/libvpx/third_party/libyuv/source/row_neon64.cc @@ -91,17 +91,15 @@ extern "C" { "uzp2 v3.8b, v2.8b, v2.8b \n" \ "ins v1.s[1], v3.s[0] \n" -#define YUV422TORGB_SETUP_REG \ +#define YUVTORGB_SETUP \ "ld1r {v24.8h}, [%[kUVBiasBGR]], #2 \n" \ "ld1r {v25.8h}, [%[kUVBiasBGR]], #2 \n" \ "ld1r {v26.8h}, [%[kUVBiasBGR]] \n" \ "ld1r {v31.4s}, [%[kYToRgb]] \n" \ - "movi v27.8h, #128 \n" \ - "movi v28.8h, #102 \n" \ - "movi v29.8h, #25 \n" \ - "movi v30.8h, #52 \n" + "ld2 {v27.8h, v28.8h}, [%[kUVToRB]] \n" \ + "ld2 {v29.8h, v30.8h}, [%[kUVToG]] \n" -#define YUV422TORGB(vR, vG, vB) \ +#define YUVTORGB(vR, vG, vB) \ "uxtl v0.8h, v0.8b \n" /* Extract Y */ \ "shll v2.8h, v1.8b, #8 \n" /* Replicate UV */ \ "ushll2 v3.4s, v0.8h, #0 \n" /* Y */ \ @@ -129,57 +127,19 @@ extern "C" { "sqshrun " #vG ".8b, " #vG ".8h, #6 \n" /* G */ \ "sqshrun " #vR ".8b, " #vR ".8h, #6 \n" /* R */ \ -// YUV to RGB conversion constants. -// Y contribution to R,G,B. Scale and bias. -#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ -#define YGB 1160 /* 1.164 * 64 * 16 - adjusted for even error distribution */ - -// U and V contributions to R,G,B. -#define UB -128 /* -min(128, round(2.018 * 64)) */ -#define UG 25 /* -round(-0.391 * 64) */ -#define VG 52 /* -round(-0.813 * 64) */ -#define VR -102 /* -round(1.596 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BB (UB * 128 - YGB) -#define BG (UG * 128 + VG * 128 - YGB) -#define BR (VR * 128 - YGB) - -static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 }; -static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 }; - -#undef YG -#undef YGB -#undef UB -#undef UG -#undef VG -#undef VR -#undef BB -#undef BG -#undef BR - -#define RGBTOUV_SETUP_REG \ - "movi v20.8h, #56, lsl #0 \n" /* UB/VR coefficient (0.875) / 2 */ \ - "movi v21.8h, #37, lsl #0 \n" /* UG coefficient (-0.5781) / 2 */ \ - "movi v22.8h, #19, lsl #0 \n" /* UR coefficient (-0.2969) / 2 */ \ - "movi v23.8h, #9, lsl #0 \n" /* VB coefficient (-0.1406) / 2 */ \ - "movi v24.8h, #47, lsl #0 \n" /* VG coefficient (-0.7344) / 2 */ \ - "movi v25.16b, #0x80 \n" /* 128.5 (0x8080 in 16-bit) */ - - -#ifdef HAS_I444TOARGBROW_NEON void I444ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" /* A */ "1: \n" READYUV444 - YUV422TORGB(v22, v21, v20) - "subs %w4, %w4, #8 \n" - "movi v23.8b, #255 \n" /* A */ + YUVTORGB(v22, v21, v20) + "subs %w4, %w4, #8 \n" MEMACCESS(3) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" "b.gt 1b \n" @@ -188,27 +148,28 @@ void I444ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I444TOARGBROW_NEON -#ifdef HAS_I422TOARGBROW_NEON void I422ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" /* A */ "1: \n" READYUV422 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" - "movi v23.8b, #255 \n" /* A */ MEMACCESS(3) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" "b.gt 1b \n" @@ -217,27 +178,61 @@ void I422ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", + "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" + ); +} + +void I422AlphaToARGBRow_NEON(const uint8* src_y, + const uint8* src_u, + const uint8* src_v, + const uint8* src_a, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + asm volatile ( + YUVTORGB_SETUP + "1: \n" + READYUV422 + YUVTORGB(v22, v21, v20) + MEMACCESS(3) + "ld1 {v23.8b}, [%3], #8 \n" + "subs %w5, %w5, #8 \n" + MEMACCESS(4) + "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%4], #32 \n" + "b.gt 1b \n" + : "+r"(src_y), // %0 + "+r"(src_u), // %1 + "+r"(src_v), // %2 + "+r"(src_a), // %3 + "+r"(dst_argb), // %4 + "+r"(width) // %5 + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TOARGBROW_NEON -#ifdef HAS_I411TOARGBROW_NEON void I411ToARGBRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" /* A */ "1: \n" READYUV411 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" - "movi v23.8b, #255 \n" /* A */ MEMACCESS(3) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" "b.gt 1b \n" @@ -246,85 +241,28 @@ void I411ToARGBRow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I411TOARGBROW_NEON -#ifdef HAS_I422TOBGRAROW_NEON -void I422ToBGRARow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_bgra, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - "1: \n" - READYUV422 - YUV422TORGB(v21, v22, v23) - "subs %w4, %w4, #8 \n" - "movi v20.8b, #255 \n" /* A */ - MEMACCESS(3) - "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" - "b.gt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_bgra), // %3 - "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", - "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" - ); -} -#endif // HAS_I422TOBGRAROW_NEON - -#ifdef HAS_I422TOABGRROW_NEON -void I422ToABGRRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_abgr, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - "1: \n" - READYUV422 - YUV422TORGB(v20, v21, v22) - "subs %w4, %w4, #8 \n" - "movi v23.8b, #255 \n" /* A */ - MEMACCESS(3) - "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" - "b.gt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_abgr), // %3 - "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", - "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" - ); -} -#endif // HAS_I422TOABGRROW_NEON - -#ifdef HAS_I422TORGBAROW_NEON void I422ToRGBARow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v20.8b, #255 \n" /* A */ "1: \n" READYUV422 - YUV422TORGB(v23, v22, v21) + YUVTORGB(v23, v22, v21) "subs %w4, %w4, #8 \n" - "movi v20.8b, #255 \n" /* A */ MEMACCESS(3) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n" "b.gt 1b \n" @@ -333,25 +271,26 @@ void I422ToRGBARow_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgba), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TORGBAROW_NEON -#ifdef HAS_I422TORGB24ROW_NEON void I422ToRGB24Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" MEMACCESS(3) "st3 {v20.8b,v21.8b,v22.8b}, [%3], #24 \n" @@ -361,60 +300,33 @@ void I422ToRGB24Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgb24), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TORGB24ROW_NEON - -#ifdef HAS_I422TORAWROW_NEON -void I422ToRAWRow_NEON(const uint8* src_y, - const uint8* src_u, - const uint8* src_v, - uint8* dst_raw, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - "1: \n" - READYUV422 - YUV422TORGB(v20, v21, v22) - "subs %w4, %w4, #8 \n" - MEMACCESS(3) - "st3 {v20.8b,v21.8b,v22.8b}, [%3], #24 \n" - "b.gt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_u), // %1 - "+r"(src_v), // %2 - "+r"(dst_raw), // %3 - "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", - "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" - ); -} -#endif // HAS_I422TORAWROW_NEON #define ARGBTORGB565 \ "shll v0.8h, v22.8b, #8 \n" /* R */ \ - "shll v20.8h, v20.8b, #8 \n" /* B */ \ "shll v21.8h, v21.8b, #8 \n" /* G */ \ + "shll v20.8h, v20.8b, #8 \n" /* B */ \ "sri v0.8h, v21.8h, #5 \n" /* RG */ \ "sri v0.8h, v20.8h, #11 \n" /* RGB */ -#ifdef HAS_I422TORGB565ROW_NEON void I422ToRGB565Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP "1: \n" READYUV422 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" ARGBTORGB565 MEMACCESS(3) @@ -425,36 +337,37 @@ void I422ToRGB565Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_rgb565), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TORGB565ROW_NEON #define ARGBTOARGB1555 \ "shll v0.8h, v23.8b, #8 \n" /* A */ \ "shll v22.8h, v22.8b, #8 \n" /* R */ \ - "shll v20.8h, v20.8b, #8 \n" /* B */ \ "shll v21.8h, v21.8b, #8 \n" /* G */ \ + "shll v20.8h, v20.8b, #8 \n" /* B */ \ "sri v0.8h, v22.8h, #1 \n" /* AR */ \ "sri v0.8h, v21.8h, #6 \n" /* ARG */ \ "sri v0.8h, v20.8h, #11 \n" /* ARGB */ -#ifdef HAS_I422TOARGB1555ROW_NEON void I422ToARGB1555Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb1555, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READYUV422 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" - "movi v23.8b, #255 \n" ARGBTOARGB1555 MEMACCESS(3) "st1 {v0.8h}, [%3], #16 \n" // store 8 pixels RGB565. @@ -464,13 +377,14 @@ void I422ToARGB1555Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb1555), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TOARGB1555ROW_NEON #define ARGBTOARGB4444 \ /* Input v20.8b<=B, v21.8b<=G, v22.8b<=R, v23.8b<=A, v4.8b<=0x0f */ \ @@ -482,18 +396,18 @@ void I422ToARGB1555Row_NEON(const uint8* src_y, "orr v1.8b, v22.8b, v23.8b \n" /* RA */ \ "zip1 v0.16b, v0.16b, v1.16b \n" /* BGRA */ -#ifdef HAS_I422TOARGB4444ROW_NEON void I422ToARGB4444Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, uint8* dst_argb4444, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP "movi v4.16b, #0x0f \n" // bits to clear with vbic. "1: \n" READYUV422 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w4, %w4, #8 \n" "movi v23.8b, #255 \n" ARGBTOARGB4444 @@ -505,41 +419,40 @@ void I422ToARGB4444Row_NEON(const uint8* src_y, "+r"(src_v), // %2 "+r"(dst_argb4444), // %3 "+r"(width) // %4 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I422TOARGB4444ROW_NEON -#ifdef HAS_I400TOARGBROW_NEON void I400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int width) { - int64 width64 = (int64)(width); asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READYUV400 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w2, %w2, #8 \n" - "movi v23.8b, #255 \n" MEMACCESS(1) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n" "b.gt 1b \n" : "+r"(src_y), // %0 "+r"(dst_argb), // %1 - "+r"(width64) // %2 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + "+r"(width) // %2 + : [kUVToRB]"r"(&kYuvI601Constants.kUVToRB), + [kUVToG]"r"(&kYuvI601Constants.kUVToG), + [kUVBiasBGR]"r"(&kYuvI601Constants.kUVBiasBGR), + [kYToRgb]"r"(&kYuvI601Constants.kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_I400TOARGBROW_NEON -#ifdef HAS_J400TOARGBROW_NEON void J400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int width) { @@ -561,20 +474,19 @@ void J400ToARGBRow_NEON(const uint8* src_y, : "cc", "memory", "v20", "v21", "v22", "v23" ); } -#endif // HAS_J400TOARGBROW_NEON -#ifdef HAS_NV12TOARGBROW_NEON void NV12ToARGBRow_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READNV12 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w3, %w3, #8 \n" - "movi v23.8b, #255 \n" MEMACCESS(2) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%2], #32 \n" "b.gt 1b \n" @@ -582,51 +494,53 @@ void NV12ToARGBRow_NEON(const uint8* src_y, "+r"(src_uv), // %1 "+r"(dst_argb), // %2 "+r"(width) // %3 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_NV12TOARGBROW_NEON -#ifdef HAS_NV21TOARGBROW_NEON void NV21ToARGBRow_NEON(const uint8* src_y, - const uint8* src_uv, + const uint8* src_vu, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READNV21 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w3, %w3, #8 \n" - "movi v23.8b, #255 \n" MEMACCESS(2) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%2], #32 \n" "b.gt 1b \n" : "+r"(src_y), // %0 - "+r"(src_uv), // %1 + "+r"(src_vu), // %1 "+r"(dst_argb), // %2 "+r"(width) // %3 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_NV21TOARGBROW_NEON -#ifdef HAS_NV12TORGB565ROW_NEON void NV12ToRGB565Row_NEON(const uint8* src_y, const uint8* src_uv, uint8* dst_rgb565, + const struct YuvConstants* yuvconstants, int width) { asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP "1: \n" READNV12 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w3, %w3, #8 \n" ARGBTORGB565 MEMACCESS(2) @@ -636,95 +550,68 @@ void NV12ToRGB565Row_NEON(const uint8* src_y, "+r"(src_uv), // %1 "+r"(dst_rgb565), // %2 "+r"(width) // %3 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_NV12TORGB565ROW_NEON -#ifdef HAS_NV21TORGB565ROW_NEON -void NV21ToRGB565Row_NEON(const uint8* src_y, - const uint8* src_uv, - uint8* dst_rgb565, - int width) { - asm volatile ( - YUV422TORGB_SETUP_REG - "1: \n" - READNV21 - YUV422TORGB(v22, v21, v20) - "subs %w3, %w3, #8 \n" - ARGBTORGB565 - MEMACCESS(2) - "st1 {v0.8h}, [%2], 16 \n" // store 8 pixels RGB565. - "b.gt 1b \n" - : "+r"(src_y), // %0 - "+r"(src_uv), // %1 - "+r"(dst_rgb565), // %2 - "+r"(width) // %3 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", - "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" - ); -} -#endif // HAS_NV21TORGB565ROW_NEON - -#ifdef HAS_YUY2TOARGBROW_NEON void YUY2ToARGBRow_NEON(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { - int64 width64 = (int64)(width); asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READYUY2 - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w2, %w2, #8 \n" - "movi v23.8b, #255 \n" MEMACCESS(1) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n" "b.gt 1b \n" : "+r"(src_yuy2), // %0 "+r"(dst_argb), // %1 - "+r"(width64) // %2 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + "+r"(width) // %2 + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_YUY2TOARGBROW_NEON -#ifdef HAS_UYVYTOARGBROW_NEON void UYVYToARGBRow_NEON(const uint8* src_uyvy, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { - int64 width64 = (int64)(width); asm volatile ( - YUV422TORGB_SETUP_REG + YUVTORGB_SETUP + "movi v23.8b, #255 \n" "1: \n" READUYVY - YUV422TORGB(v22, v21, v20) + YUVTORGB(v22, v21, v20) "subs %w2, %w2, #8 \n" - "movi v23.8b, #255 \n" MEMACCESS(1) "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], 32 \n" "b.gt 1b \n" : "+r"(src_uyvy), // %0 "+r"(dst_argb), // %1 - "+r"(width64) // %2 - : [kUVBiasBGR]"r"(&kUVBiasBGR), - [kYToRgb]"r"(&kYToRgb) + "+r"(width) // %2 + : [kUVToRB]"r"(&yuvconstants->kUVToRB), + [kUVToG]"r"(&yuvconstants->kUVToG), + [kUVBiasBGR]"r"(&yuvconstants->kUVBiasBGR), + [kYToRgb]"r"(&yuvconstants->kYToRgb) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30" ); } -#endif // HAS_UYVYTOARGBROW_NEON // Reads 16 pairs of UV and write even values to dst_u and odd to dst_v. -#ifdef HAS_SPLITUVROW_NEON void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) { asm volatile ( @@ -745,10 +632,8 @@ void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, : "cc", "memory", "v0", "v1" // Clobber List ); } -#endif // HAS_SPLITUVROW_NEON // Reads 16 U's and V's and writes out 16 pairs of UV. -#ifdef HAS_MERGEUVROW_NEON void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv, int width) { asm volatile ( @@ -770,10 +655,8 @@ void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv, : "cc", "memory", "v0", "v1" // Clobber List ); } -#endif // HAS_MERGEUVROW_NEON // Copy multiple of 32. vld4.8 allow unaligned and is fastest on a15. -#ifdef HAS_COPYROW_NEON void CopyRow_NEON(const uint8* src, uint8* dst, int count) { asm volatile ( "1: \n" @@ -790,17 +673,16 @@ void CopyRow_NEON(const uint8* src, uint8* dst, int count) { : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_COPYROW_NEON // SetRow writes 'count' bytes using an 8 bit value repeated. void SetRow_NEON(uint8* dst, uint8 v8, int count) { asm volatile ( "dup v0.16b, %w2 \n" // duplicate 16 bytes "1: \n" - "subs %w1, %w1, #16 \n" // 16 bytes per loop + "subs %w1, %w1, #16 \n" // 16 bytes per loop MEMACCESS(0) "st1 {v0.16b}, [%0], #16 \n" // store - "b.gt 1b \n" + "b.gt 1b \n" : "+r"(dst), // %0 "+r"(count) // %1 : "r"(v8) // %2 @@ -812,10 +694,10 @@ void ARGBSetRow_NEON(uint8* dst, uint32 v32, int count) { asm volatile ( "dup v0.4s, %w2 \n" // duplicate 4 ints "1: \n" - "subs %w1, %w1, #4 \n" // 4 ints per loop + "subs %w1, %w1, #4 \n" // 4 ints per loop MEMACCESS(0) "st1 {v0.16b}, [%0], #16 \n" // store - "b.gt 1b \n" + "b.gt 1b \n" : "+r"(dst), // %0 "+r"(count) // %1 : "r"(v32) // %2 @@ -823,18 +705,15 @@ void ARGBSetRow_NEON(uint8* dst, uint32 v32, int count) { ); } -#ifdef HAS_MIRRORROW_NEON void MirrorRow_NEON(const uint8* src, uint8* dst, int width) { - int64 width64 = (int64) width; asm volatile ( // Start at end of source row. - "add %0, %0, %2 \n" + "add %0, %0, %w2, sxtw \n" "sub %0, %0, #16 \n" - "1: \n" MEMACCESS(0) "ld1 {v0.16b}, [%0], %3 \n" // src -= 16 - "subs %2, %2, #16 \n" // 16 pixels per loop. + "subs %w2, %w2, #16 \n" // 16 pixels per loop. "rev64 v0.16b, v0.16b \n" MEMACCESS(1) "st1 {v0.D}[1], [%1], #8 \n" // dst += 16 @@ -843,26 +722,22 @@ void MirrorRow_NEON(const uint8* src, uint8* dst, int width) { "b.gt 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(width64) // %2 + "+r"(width) // %2 : "r"((ptrdiff_t)-16) // %3 : "cc", "memory", "v0" ); } -#endif // HAS_MIRRORROW_NEON -#ifdef HAS_MIRRORUVROW_NEON void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) { - int64 width64 = (int64) width; asm volatile ( // Start at end of source row. - "add %0, %0, %3, lsl #1 \n" + "add %0, %0, %w3, sxtw #1 \n" "sub %0, %0, #16 \n" - "1: \n" MEMACCESS(0) "ld2 {v0.8b, v1.8b}, [%0], %4 \n" // src -= 16 - "subs %3, %3, #8 \n" // 8 pixels per loop. + "subs %w3, %w3, #8 \n" // 8 pixels per loop. "rev64 v0.8b, v0.8b \n" "rev64 v1.8b, v1.8b \n" MEMACCESS(1) @@ -873,25 +748,21 @@ void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, : "+r"(src_uv), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(width64) // %3 + "+r"(width) // %3 : "r"((ptrdiff_t)-16) // %4 : "cc", "memory", "v0", "v1" ); } -#endif // HAS_MIRRORUVROW_NEON -#ifdef HAS_ARGBMIRRORROW_NEON void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) { - int64 width64 = (int64) width; asm volatile ( - // Start at end of source row. - "add %0, %0, %2, lsl #2 \n" + // Start at end of source row. + "add %0, %0, %w2, sxtw #2 \n" "sub %0, %0, #16 \n" - "1: \n" MEMACCESS(0) "ld1 {v0.16b}, [%0], %3 \n" // src -= 16 - "subs %2, %2, #4 \n" // 4 pixels per loop. + "subs %w2, %w2, #4 \n" // 4 pixels per loop. "rev64 v0.4s, v0.4s \n" MEMACCESS(1) "st1 {v0.D}[1], [%1], #8 \n" // dst += 16 @@ -900,15 +771,13 @@ void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) { "b.gt 1b \n" : "+r"(src), // %0 "+r"(dst), // %1 - "+r"(width64) // %2 + "+r"(width) // %2 : "r"((ptrdiff_t)-16) // %3 : "cc", "memory", "v0" ); } -#endif // HAS_ARGBMIRRORROW_NEON -#ifdef HAS_RGB24TOARGBROW_NEON -void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) { +void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int width) { asm volatile ( "movi v4.8b, #255 \n" // Alpha "1: \n" @@ -920,15 +789,13 @@ void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) { "b.gt 1b \n" : "+r"(src_rgb24), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v1", "v2", "v3", "v4" // Clobber List ); } -#endif // HAS_RGB24TOARGBROW_NEON -#ifdef HAS_RAWTOARGBROW_NEON -void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { +void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int width) { asm volatile ( "movi v5.8b, #255 \n" // Alpha "1: \n" @@ -942,12 +809,30 @@ void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { "b.gt 1b \n" : "+r"(src_raw), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5" // Clobber List ); } -#endif // HAS_RAWTOARGBROW_NEON + +void RAWToRGB24Row_NEON(const uint8* src_raw, uint8* dst_rgb24, int width) { + asm volatile ( + "1: \n" + MEMACCESS(0) + "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // read r g b + "subs %w2, %w2, #8 \n" // 8 processed per loop. + "orr v3.8b, v1.8b, v1.8b \n" // move g + "orr v4.8b, v0.8b, v0.8b \n" // move r + MEMACCESS(1) + "st3 {v2.8b,v3.8b,v4.8b}, [%1], #24 \n" // store b g r + "b.gt 1b \n" + : "+r"(src_raw), // %0 + "+r"(dst_rgb24), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "v0", "v1", "v2", "v3", "v4" // Clobber List + ); +} #define RGB565TOARGB \ "shrn v6.8b, v0.8h, #5 \n" /* G xxGGGGGG */ \ @@ -962,8 +847,7 @@ void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) { "orr v0.16b, v0.16b, v2.16b \n" /* R,B */ \ "dup v2.2D, v0.D[1] \n" /* R */ -#ifdef HAS_RGB565TOARGBROW_NEON -void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { +void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int width) { asm volatile ( "movi v3.8b, #255 \n" // Alpha "1: \n" @@ -976,12 +860,11 @@ void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { "b.gt 1b \n" : "+r"(src_rgb565), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v6" // Clobber List ); } -#endif // HAS_RGB565TOARGBROW_NEON #define ARGB1555TOARGB \ "ushr v2.8h, v0.8h, #10 \n" /* R xxxRRRRR */ \ @@ -1020,9 +903,8 @@ void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) { "orr v2.16b, v1.16b, v3.16b \n" /* R */ \ "dup v1.2D, v0.D[1] \n" /* G */ \ -#ifdef HAS_ARGB1555TOARGBROW_NEON void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, - int pix) { + int width) { asm volatile ( "movi v3.8b, #255 \n" // Alpha "1: \n" @@ -1035,12 +917,11 @@ void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, "b.gt 1b \n" : "+r"(src_argb1555), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_ARGB1555TOARGBROW_NEON #define ARGB4444TOARGB \ "shrn v1.8b, v0.8h, #8 \n" /* v1(l) AR */ \ @@ -1054,9 +935,8 @@ void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb, "dup v0.2D, v2.D[1] \n" \ "dup v1.2D, v3.D[1] \n" -#ifdef HAS_ARGB4444TOARGBROW_NEON void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb, - int pix) { + int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1068,15 +948,13 @@ void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb, "b.gt 1b \n" : "+r"(src_argb4444), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4" // Clobber List ); } -#endif // HAS_ARGB4444TOARGBROW_NEON -#ifdef HAS_ARGBTORGB24ROW_NEON -void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) { +void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1087,15 +965,13 @@ void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) { "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb24), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v1", "v2", "v3", "v4" // Clobber List ); } -#endif // HAS_ARGBTORGB24ROW_NEON -#ifdef HAS_ARGBTORAWROW_NEON -void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) { +void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1108,15 +984,13 @@ void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) { "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_raw), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v1", "v2", "v3", "v4", "v5" // Clobber List ); } -#endif // HAS_ARGBTORAWROW_NEON -#ifdef HAS_YUY2TOYROW_NEON -void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) { +void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1127,15 +1001,13 @@ void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_yuy2), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1" // Clobber List ); } -#endif // HAS_YUY2TOYROW_NEON -#ifdef HAS_UYVYTOYROW_NEON -void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) { +void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1146,16 +1018,14 @@ void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_uyvy), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1" // Clobber List ); } -#endif // HAS_UYVYTOYROW_NEON -#ifdef HAS_YUY2TOUV422ROW_NEON void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1169,16 +1039,14 @@ void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v, : "+r"(src_yuy2), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_YUY2TOUV422ROW_NEON -#ifdef HAS_UYVYTOUV422ROW_NEON void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1192,16 +1060,14 @@ void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v, : "+r"(src_uyvy), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_UYVYTOUV422ROW_NEON -#ifdef HAS_YUY2TOUVROW_NEON void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_yuy2b = src_yuy2 + stride_yuy2; asm volatile ( "1: \n" @@ -1221,17 +1087,15 @@ void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2, "+r"(src_yuy2b), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" // Clobber List ); } -#endif // HAS_YUY2TOUVROW_NEON -#ifdef HAS_UYVYTOUVROW_NEON void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_uyvyb = src_uyvy + stride_uyvy; asm volatile ( "1: \n" @@ -1251,18 +1115,16 @@ void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy, "+r"(src_uyvyb), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" // Clobber List ); } -#endif // HAS_UYVYTOUVROW_NEON // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. -#ifdef HAS_ARGBSHUFFLEROW_NEON void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { asm volatile ( MEMACCESS(3) "ld1 {v2.16b}, [%3] \n" // shuffler @@ -1276,14 +1138,12 @@ void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb, "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : "r"(shuffler) // %3 : "cc", "memory", "v0", "v1", "v2" // Clobber List ); } -#endif // HAS_ARGBSHUFFLEROW_NEON -#ifdef HAS_I422TOYUY2ROW_NEON void I422ToYUY2Row_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, @@ -1310,9 +1170,7 @@ void I422ToYUY2Row_NEON(const uint8* src_y, : "cc", "memory", "v0", "v1", "v2", "v3" ); } -#endif // HAS_I422TOYUY2ROW_NEON -#ifdef HAS_I422TOUYVYROW_NEON void I422ToUYVYRow_NEON(const uint8* src_y, const uint8* src_u, const uint8* src_v, @@ -1339,10 +1197,8 @@ void I422ToUYVYRow_NEON(const uint8* src_y, : "cc", "memory", "v0", "v1", "v2", "v3" ); } -#endif // HAS_I422TOUYVYROW_NEON -#ifdef HAS_ARGBTORGB565ROW_NEON -void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) { +void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1354,14 +1210,12 @@ void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) { "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_rgb565), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v20", "v21", "v22", "v23" ); } -#endif // HAS_ARGBTORGB565ROW_NEON -#ifdef HAS_ARGBTORGB565DITHERROW_NEON void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb, const uint32 dither4, int width) { asm volatile ( @@ -1384,11 +1238,9 @@ void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb, : "cc", "memory", "v0", "v1", "v20", "v21", "v22", "v23" ); } -#endif // HAS_ARGBTORGB565ROW_NEON -#ifdef HAS_ARGBTOARGB1555ROW_NEON void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555, - int pix) { + int width) { asm volatile ( "1: \n" MEMACCESS(0) @@ -1400,16 +1252,14 @@ void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555, "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb1555), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v20", "v21", "v22", "v23" ); } -#endif // HAS_ARGBTOARGB1555ROW_NEON -#ifdef HAS_ARGBTOARGB4444ROW_NEON void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444, - int pix) { + int width) { asm volatile ( "movi v4.16b, #0x0f \n" // bits to clear with vbic. "1: \n" @@ -1422,15 +1272,13 @@ void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444, "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_argb4444), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v4", "v20", "v21", "v22", "v23" ); } -#endif // HAS_ARGBTOARGB4444ROW_NEON -#ifdef HAS_ARGBTOYROW_NEON -void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #13 \n" // B * 0.1016 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -1450,15 +1298,30 @@ void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGBTOYROW_NEON -#ifdef HAS_ARGBTOYJROW_NEON -void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBExtractAlphaRow_NEON(const uint8* src_argb, uint8* dst_a, int width) { + asm volatile ( + "1: \n" + MEMACCESS(0) + "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load row 16 pixels + "subs %w2, %w2, #16 \n" // 16 processed per loop + MEMACCESS(1) + "st1 {v3.16b}, [%1], #16 \n" // store 16 A's. + "b.gt 1b \n" + : "+r"(src_argb), // %0 + "+r"(dst_a), // %1 + "+r"(width) // %2 + : + : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List + ); +} + +void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #15 \n" // B * 0.11400 coefficient "movi v5.8b, #75 \n" // G * 0.58700 coefficient @@ -1476,17 +1339,15 @@ void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_argb), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6" ); } -#endif // HAS_ARGBTOYJROW_NEON // 8x1 pixels. -#ifdef HAS_ARGBTOUV444ROW_NEON void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( "movi v24.8b, #112 \n" // UB / VR 0.875 coefficient "movi v25.8b, #74 \n" // UG -0.5781 coefficient @@ -1519,62 +1380,24 @@ void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, : "+r"(src_argb), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v24", "v25", "v26", "v27", "v28", "v29" ); } -#endif // HAS_ARGBTOUV444ROW_NEON -// 16x1 pixels -> 8x1. pix is number of argb pixels. e.g. 16. -#ifdef HAS_ARGBTOUV422ROW_NEON -void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { - asm volatile ( - RGBTOUV_SETUP_REG - "1: \n" - MEMACCESS(0) - "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels. +#define RGBTOUV_SETUP_REG \ + "movi v20.8h, #56, lsl #0 \n" /* UB/VR coefficient (0.875) / 2 */ \ + "movi v21.8h, #37, lsl #0 \n" /* UG coefficient (-0.5781) / 2 */ \ + "movi v22.8h, #19, lsl #0 \n" /* UR coefficient (-0.2969) / 2 */ \ + "movi v23.8h, #9, lsl #0 \n" /* VB coefficient (-0.1406) / 2 */ \ + "movi v24.8h, #47, lsl #0 \n" /* VG coefficient (-0.7344) / 2 */ \ + "movi v25.16b, #0x80 \n" /* 128.5 (0x8080 in 16-bit) */ - "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts. - "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts. - "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts. - - "subs %w3, %w3, #16 \n" // 16 processed per loop. - "mul v3.8h, v0.8h, v20.8h \n" // B - "mls v3.8h, v1.8h, v21.8h \n" // G - "mls v3.8h, v2.8h, v22.8h \n" // R - "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned - - "mul v4.8h, v2.8h, v20.8h \n" // R - "mls v4.8h, v1.8h, v24.8h \n" // G - "mls v4.8h, v0.8h, v23.8h \n" // B - "add v4.8h, v4.8h, v25.8h \n" // +128 -> unsigned - - "uqshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit U - "uqshrn v1.8b, v4.8h, #8 \n" // 16 bit to 8 bit V - - MEMACCESS(1) - "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U. - MEMACCESS(2) - "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V. - "b.gt 1b \n" - : "+r"(src_argb), // %0 - "+r"(dst_u), // %1 - "+r"(dst_v), // %2 - "+r"(pix) // %3 - : - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", - "v20", "v21", "v22", "v23", "v24", "v25" - ); -} -#endif // HAS_ARGBTOUV422ROW_NEON - -// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32. -#ifdef HAS_ARGBTOUV411ROW_NEON +// 32x1 pixels -> 8x1. width is number of argb pixels. e.g. 32. void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, - int pix) { + int width) { asm volatile ( RGBTOUV_SETUP_REG "1: \n" @@ -1616,15 +1439,14 @@ void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, : "+r"(src_argb), // %0 "+r"(dst_u), // %1 "+r"(dst_v), // %2 - "+r"(pix) // %3 + "+r"(width) // %3 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_ARGBTOUV411ROW_NEON -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. #define RGBTOUV(QB, QG, QR) \ "mul v3.8h, " #QB ",v20.8h \n" /* B */ \ "mul v4.8h, " #QR ",v20.8h \n" /* R */ \ @@ -1640,9 +1462,8 @@ void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v, // TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr. // TODO(fbarchard): consider ptrdiff_t for all strides. -#ifdef HAS_ARGBTOUVROW_NEON void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_argb_1 = src_argb + src_stride_argb; asm volatile ( RGBTOUV_SETUP_REG @@ -1674,18 +1495,16 @@ void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb, "+r"(src_argb_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_ARGBTOUVROW_NEON // TODO(fbarchard): Subsample match C code. -#ifdef HAS_ARGBTOUVJROW_NEON void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_argb_1 = src_argb + src_stride_argb; asm volatile ( "movi v20.8h, #63, lsl #0 \n" // UB/VR coeff (0.500) / 2 @@ -1721,17 +1540,15 @@ void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb, "+r"(src_argb_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_ARGBTOUVJROW_NEON -#ifdef HAS_BGRATOUVROW_NEON void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_bgra_1 = src_bgra + src_stride_bgra; asm volatile ( RGBTOUV_SETUP_REG @@ -1762,17 +1579,15 @@ void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra, "+r"(src_bgra_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_BGRATOUVROW_NEON -#ifdef HAS_ABGRTOUVROW_NEON void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_abgr_1 = src_abgr + src_stride_abgr; asm volatile ( RGBTOUV_SETUP_REG @@ -1803,17 +1618,15 @@ void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr, "+r"(src_abgr_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_ABGRTOUVROW_NEON -#ifdef HAS_RGBATOUVROW_NEON void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_rgba_1 = src_rgba + src_stride_rgba; asm volatile ( RGBTOUV_SETUP_REG @@ -1844,17 +1657,15 @@ void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba, "+r"(src_rgba_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_RGBATOUVROW_NEON -#ifdef HAS_RGB24TOUVROW_NEON void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_rgb24_1 = src_rgb24 + src_stride_rgb24; asm volatile ( RGBTOUV_SETUP_REG @@ -1885,17 +1696,15 @@ void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24, "+r"(src_rgb24_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_RGB24TOUVROW_NEON -#ifdef HAS_RAWTOUVROW_NEON void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_raw_1 = src_raw + src_stride_raw; asm volatile ( RGBTOUV_SETUP_REG @@ -1926,18 +1735,16 @@ void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw, "+r"(src_raw_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "v21", "v22", "v23", "v24", "v25" ); } -#endif // HAS_RAWTOUVROW_NEON -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. -#ifdef HAS_RGB565TOUVROW_NEON +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_rgb565_1 = src_rgb565 + src_stride_rgb565; asm volatile ( "movi v22.8h, #56, lsl #0 \n" // UB / VR coeff (0.875) / 2 @@ -2001,19 +1808,17 @@ void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565, "+r"(src_rgb565_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } -#endif // HAS_RGB565TOUVROW_NEON -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. -#ifdef HAS_ARGB1555TOUVROW_NEON +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_argb1555_1 = src_argb1555 + src_stride_argb1555; asm volatile ( RGBTOUV_SETUP_REG @@ -2072,19 +1877,17 @@ void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555, "+r"(src_argb1555_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28" ); } -#endif // HAS_ARGB1555TOUVROW_NEON -// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16. -#ifdef HAS_ARGB4444TOUVROW_NEON +// 16x2 pixels -> 8x1. width is number of argb pixels. e.g. 16. void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { const uint8* src_argb4444_1 = src_argb4444 + src_stride_argb4444; asm volatile ( RGBTOUV_SETUP_REG @@ -2143,7 +1946,7 @@ void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, "+r"(src_argb4444_1), // %1 "+r"(dst_u), // %2 "+r"(dst_v), // %3 - "+r"(pix) // %4 + "+r"(width) // %4 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", @@ -2151,10 +1954,8 @@ void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444, ); } -#endif // HAS_ARGB4444TOUVROW_NEON -#ifdef HAS_RGB565TOYROW_NEON -void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) { +void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int width) { asm volatile ( "movi v24.8b, #13 \n" // B * 0.1016 coefficient "movi v25.8b, #65 \n" // G * 0.5078 coefficient @@ -2175,16 +1976,14 @@ void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_rgb565), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v6", "v24", "v25", "v26", "v27" ); } -#endif // HAS_RGB565TOYROW_NEON -#ifdef HAS_ARGB1555TOYROW_NEON -void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) { +void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #13 \n" // B * 0.1016 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2205,15 +2004,13 @@ void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_argb1555), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGB1555TOYROW_NEON -#ifdef HAS_ARGB4444TOYROW_NEON -void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) { +void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int width) { asm volatile ( "movi v24.8b, #13 \n" // B * 0.1016 coefficient "movi v25.8b, #65 \n" // G * 0.5078 coefficient @@ -2234,15 +2031,13 @@ void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_argb4444), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27" ); } -#endif // HAS_ARGB4444TOYROW_NEON -#ifdef HAS_BGRATOYROW_NEON -void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) { +void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #33 \n" // R * 0.2578 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2262,15 +2057,13 @@ void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_bgra), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16" ); } -#endif // HAS_BGRATOYROW_NEON -#ifdef HAS_ABGRTOYROW_NEON -void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) { +void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #33 \n" // R * 0.2578 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2290,15 +2083,13 @@ void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_abgr), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16" ); } -#endif // HAS_ABGRTOYROW_NEON -#ifdef HAS_RGBATOYROW_NEON -void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) { +void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #13 \n" // B * 0.1016 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2318,15 +2109,13 @@ void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_rgba), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16" ); } -#endif // HAS_RGBATOYROW_NEON -#ifdef HAS_RGB24TOYROW_NEON -void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) { +void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #13 \n" // B * 0.1016 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2346,15 +2135,13 @@ void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_rgb24), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16" ); } -#endif // HAS_RGB24TOYROW_NEON -#ifdef HAS_RAWTOYROW_NEON -void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) { +void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int width) { asm volatile ( "movi v4.8b, #33 \n" // R * 0.2578 coefficient "movi v5.8b, #65 \n" // G * 0.5078 coefficient @@ -2374,15 +2161,13 @@ void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) { "b.gt 1b \n" : "+r"(src_raw), // %0 "+r"(dst_y), // %1 - "+r"(pix) // %2 + "+r"(width) // %2 : : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16" ); } -#endif // HAS_RAWTOYROW_NEON // Bilinear filter 16x2 -> 16x1 -#ifdef HAS_INTERPOLATEROW_NEON void InterpolateRow_NEON(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, int source_y_fraction) { @@ -2392,12 +2177,8 @@ void InterpolateRow_NEON(uint8* dst_ptr, asm volatile ( "cmp %w4, #0 \n" "b.eq 100f \n" - "cmp %w4, #64 \n" - "b.eq 75f \n" "cmp %w4, #128 \n" "b.eq 50f \n" - "cmp %w4, #192 \n" - "b.eq 25f \n" "dup v5.16b, %w4 \n" "dup v4.16b, %w5 \n" @@ -2419,20 +2200,6 @@ void InterpolateRow_NEON(uint8* dst_ptr, "b.gt 1b \n" "b 99f \n" - // Blend 25 / 75. - "25: \n" - MEMACCESS(1) - "ld1 {v0.16b}, [%1], #16 \n" - MEMACCESS(2) - "ld1 {v1.16b}, [%2], #16 \n" - "subs %w3, %w3, #16 \n" - "urhadd v0.16b, v0.16b, v1.16b \n" - "urhadd v0.16b, v0.16b, v1.16b \n" - MEMACCESS(0) - "st1 {v0.16b}, [%0], #16 \n" - "b.gt 25b \n" - "b 99f \n" - // Blend 50 / 50. "50: \n" MEMACCESS(1) @@ -2446,20 +2213,6 @@ void InterpolateRow_NEON(uint8* dst_ptr, "b.gt 50b \n" "b 99f \n" - // Blend 75 / 25. - "75: \n" - MEMACCESS(1) - "ld1 {v1.16b}, [%1], #16 \n" - MEMACCESS(2) - "ld1 {v0.16b}, [%2], #16 \n" - "subs %w3, %w3, #16 \n" - "urhadd v0.16b, v0.16b, v1.16b \n" - "urhadd v0.16b, v0.16b, v1.16b \n" - MEMACCESS(0) - "st1 {v0.16b}, [%0], #16 \n" - "b.gt 75b \n" - "b 99f \n" - // Blend 100 / 0 - Copy row unchanged. "100: \n" MEMACCESS(1) @@ -2480,10 +2233,8 @@ void InterpolateRow_NEON(uint8* dst_ptr, : "cc", "memory", "v0", "v1", "v3", "v4", "v5" ); } -#endif // HAS_INTERPOLATEROW_NEON // dr * (256 - sa) / 256 + sr = dr - dr * sa / 256 + sr -#ifdef HAS_ARGBBLENDROW_NEON void ARGBBlendRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -2552,10 +2303,8 @@ void ARGBBlendRow_NEON(const uint8* src_argb0, const uint8* src_argb1, "v16", "v17", "v18" ); } -#endif // HAS_ARGBBLENDROW_NEON // Attenuate 8 pixels at a time. -#ifdef HAS_ARGBATTENUATEROW_NEON void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) { asm volatile ( // Attenuate 8 pixels. @@ -2579,11 +2328,9 @@ void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) { : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6" ); } -#endif // HAS_ARGBATTENUATEROW_NEON // Quantize 8 ARGB pixels (32 bytes). // dst = (dst * scale >> 16) * interval_size + interval_offset; -#ifdef HAS_ARGBQUANTIZEROW_NEON void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size, int interval_offset, int width) { asm volatile ( @@ -2623,12 +2370,10 @@ void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size, : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6" ); } -#endif // HAS_ARGBQUANTIZEROW_NEON // Shade 8 pixels at a time by specified value. // NOTE vqrdmulh.s16 q10, q10, d0[0] must use a scaler register from 0 to 8. // Rounding in vqrdmulh does +1 to high if high bit of low s16 is set. -#ifdef HAS_ARGBSHADEROW_NEON void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width, uint32 value) { asm volatile ( @@ -2663,12 +2408,10 @@ void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width, : "cc", "memory", "v0", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGBSHADEROW_NEON // Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels // Similar to ARGBToYJ but stores ARGB. // C code is (15 * b + 75 * g + 38 * r + 64) >> 7; -#ifdef HAS_ARGBGRAYROW_NEON void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) { asm volatile ( "movi v24.8b, #15 \n" // B * 0.11400 coefficient @@ -2694,14 +2437,12 @@ void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) { : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v24", "v25", "v26" ); } -#endif // HAS_ARGBGRAYROW_NEON // Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels. // b = (r * 35 + g * 68 + b * 17) >> 7 // g = (r * 45 + g * 88 + b * 22) >> 7 // r = (r * 50 + g * 98 + b * 24) >> 7 -#ifdef HAS_ARGBSEPIAROW_NEON void ARGBSepiaRow_NEON(uint8* dst_argb, int width) { asm volatile ( "movi v20.8b, #17 \n" // BB coefficient @@ -2739,12 +2480,10 @@ void ARGBSepiaRow_NEON(uint8* dst_argb, int width) { "v20", "v21", "v22", "v24", "v25", "v26", "v28", "v29", "v30" ); } -#endif // HAS_ARGBSEPIAROW_NEON // Tranform 8 ARGB pixels (32 bytes) with color matrix. // TODO(fbarchard): Was same as Sepia except matrix is provided. This function // needs to saturate. Consider doing a non-saturating version. -#ifdef HAS_ARGBCOLORMATRIXROW_NEON void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb, const int8* matrix_argb, int width) { asm volatile ( @@ -2804,11 +2543,9 @@ void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb, "v18", "v19", "v22", "v23", "v24", "v25" ); } -#endif // HAS_ARGBCOLORMATRIXROW_NEON // TODO(fbarchard): fix vqshrun in ARGBMultiplyRow_NEON and reenable. // Multiply 2 rows of ARGB pixels together, 8 pixels at a time. -#ifdef HAS_ARGBMULTIPLYROW_NEON void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -2839,10 +2576,8 @@ void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1, : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGBMULTIPLYROW_NEON // Add 2 rows of ARGB pixels together, 8 pixels at a time. -#ifdef HAS_ARGBADDROW_NEON void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -2869,10 +2604,8 @@ void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1, : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGBADDROW_NEON // Subtract 2 rows of ARGB pixels, 8 pixels at a time. -#ifdef HAS_ARGBSUBTRACTROW_NEON void ARGBSubtractRow_NEON(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { asm volatile ( @@ -2899,14 +2632,12 @@ void ARGBSubtractRow_NEON(const uint8* src_argb0, const uint8* src_argb1, : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); } -#endif // HAS_ARGBSUBTRACTROW_NEON // Adds Sobel X and Sobel Y and stores Sobel into ARGB. // A = 255 // R = Sobel // G = Sobel // B = Sobel -#ifdef HAS_SOBELROW_NEON void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, uint8* dst_argb, int width) { asm volatile ( @@ -2932,10 +2663,8 @@ void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, : "cc", "memory", "v0", "v1", "v2", "v3" ); } -#endif // HAS_SOBELROW_NEON // Adds Sobel X and Sobel Y and stores Sobel into plane. -#ifdef HAS_SOBELTOPLANEROW_NEON void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, uint8* dst_y, int width) { asm volatile ( @@ -2958,14 +2687,12 @@ void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, : "cc", "memory", "v0", "v1" ); } -#endif // HAS_SOBELTOPLANEROW_NEON // Mixes Sobel X, Sobel Y and Sobel into ARGB. // A = 255 // R = Sobel X // G = Sobel // B = Sobel Y -#ifdef HAS_SOBELXYROW_NEON void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, uint8* dst_argb, int width) { asm volatile ( @@ -2989,13 +2716,11 @@ void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely, : "cc", "memory", "v0", "v1", "v2", "v3" ); } -#endif // HAS_SOBELXYROW_NEON // SobelX as a matrix is // -1 0 1 // -2 0 2 // -1 0 1 -#ifdef HAS_SOBELXROW_NEON void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2, uint8* dst_sobelx, int width) { asm volatile ( @@ -3034,13 +2759,11 @@ void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1, : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_SOBELXROW_NEON // SobelY as a matrix is // -1 -2 -1 // 0 0 0 // 1 2 1 -#ifdef HAS_SOBELYROW_NEON void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1, uint8* dst_sobely, int width) { asm volatile ( @@ -3078,7 +2801,6 @@ void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1, : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List ); } -#endif // HAS_SOBELYROW_NEON #endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__) #ifdef __cplusplus diff --git a/libs/libvpx/third_party/libyuv/source/row_win.cc b/libs/libvpx/third_party/libyuv/source/row_win.cc index 71be268b47..2a3da8969f 100644 --- a/libs/libvpx/third_party/libyuv/source/row_win.cc +++ b/libs/libvpx/third_party/libyuv/source/row_win.cc @@ -10,8 +10,11 @@ #include "libyuv/row.h" -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \ - defined(_MSC_VER) && !defined(__clang__) +// This module is for Visual C 32/64 bit and clangcl 32 bit +#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \ + (defined(_M_IX86) || (defined(_M_X64) && !defined(__clang__))) + +#if defined(_M_X64) #include #include // For _mm_maddubs_epi16 #endif @@ -21,183 +24,104 @@ namespace libyuv { extern "C" { #endif -// This module is for Visual C. -#if !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64)) && \ - defined(_MSC_VER) && !defined(__clang__) - -struct YuvConstants { - lvec8 kUVToB; // 0 - lvec8 kUVToG; // 32 - lvec8 kUVToR; // 64 - lvec16 kUVBiasB; // 96 - lvec16 kUVBiasG; // 128 - lvec16 kUVBiasR; // 160 - lvec16 kYToRgb; // 192 -}; - -// BT.601 YUV to RGB reference -// R = (Y - 16) * 1.164 - V * -1.596 -// G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813 -// B = (Y - 16) * 1.164 - U * -2.018 - -// Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. -#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */ -#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */ - -// U and V contributions to R,G,B. -#define UB -128 /* max(-128, round(-2.018 * 64)) */ -#define UG 25 /* round(0.391 * 64) */ -#define VG 52 /* round(0.813 * 64) */ -#define VR -102 /* round(-1.596 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BB (UB * 128 + YGB) -#define BG (UG * 128 + VG * 128 + YGB) -#define BR (VR * 128 + YGB) - -// BT601 constants for YUV to RGB. -static YuvConstants SIMD_ALIGNED(kYuvConstants) = { - { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, - UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 }, - { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, - UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG }, - { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, - 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR }, - { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, - { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, - { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, - { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } -}; - -// BT601 constants for NV21 where chroma plane is VU instead of UV. -static YuvConstants SIMD_ALIGNED(kYvuConstants) = { - { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, - 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB }, - { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, - VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG }, - { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, - VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 }, - { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB }, - { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG }, - { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR }, - { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG } -}; - -#undef YG -#undef YGB -#undef UB -#undef UG -#undef VG -#undef VR -#undef BB -#undef BG -#undef BR - -// JPEG YUV to RGB reference -// * R = Y - V * -1.40200 -// * G = Y - U * 0.34414 - V * 0.71414 -// * B = Y - U * -1.77200 - -// Y contribution to R,G,B. Scale and bias. -// TODO(fbarchard): Consider moving constants into a common header. -#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */ -#define YGBJ 32 /* 64 / 2 */ - -// U and V contributions to R,G,B. -#define UBJ -113 /* round(-1.77200 * 64) */ -#define UGJ 22 /* round(0.34414 * 64) */ -#define VGJ 46 /* round(0.71414 * 64) */ -#define VRJ -90 /* round(-1.40200 * 64) */ - -// Bias values to subtract 16 from Y and 128 from U and V. -#define BBJ (UBJ * 128 + YGBJ) -#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ) -#define BRJ (VRJ * 128 + YGBJ) - -// JPEG constants for YUV to RGB. -static YuvConstants SIMD_ALIGNED(kYuvJConstants) = { - { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, - UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 }, - { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, - UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ }, - { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, - 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ }, - { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, - BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ }, - { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, - BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ }, - { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, - BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ }, - { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, - YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ } -}; - -#undef YGJ -#undef YGBJ -#undef UBJ -#undef UGJ -#undef VGJ -#undef VRJ -#undef BBJ -#undef BGJ -#undef BRJ - // 64 bit #if defined(_M_X64) + +// Read 4 UV from 422, upsample to 8 UV. +#define READYUV422 \ + xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf); \ + xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset)); \ + xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \ + xmm0 = _mm_unpacklo_epi16(xmm0, xmm0); \ + u_buf += 4; \ + xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \ + xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \ + y_buf += 8; + +// Read 4 UV from 422, upsample to 8 UV. With 8 Alpha. +#define READYUVA422 \ + xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf); \ + xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset)); \ + xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \ + xmm0 = _mm_unpacklo_epi16(xmm0, xmm0); \ + u_buf += 4; \ + xmm4 = _mm_loadl_epi64((__m128i*)y_buf); \ + xmm4 = _mm_unpacklo_epi8(xmm4, xmm4); \ + y_buf += 8; \ + xmm5 = _mm_loadl_epi64((__m128i*)a_buf); \ + a_buf += 8; + +// Convert 8 pixels: 8 UV and 8 Y. +#define YUVTORGB(yuvconstants) \ + xmm1 = _mm_loadu_si128(&xmm0); \ + xmm2 = _mm_loadu_si128(&xmm0); \ + xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)yuvconstants->kUVToB); \ + xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)yuvconstants->kUVToG); \ + xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)yuvconstants->kUVToR); \ + xmm0 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasB, xmm0); \ + xmm1 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasG, xmm1); \ + xmm2 = _mm_sub_epi16(*(__m128i*)yuvconstants->kUVBiasR, xmm2); \ + xmm4 = _mm_mulhi_epu16(xmm4, *(__m128i*)yuvconstants->kYToRgb); \ + xmm0 = _mm_adds_epi16(xmm0, xmm4); \ + xmm1 = _mm_adds_epi16(xmm1, xmm4); \ + xmm2 = _mm_adds_epi16(xmm2, xmm4); \ + xmm0 = _mm_srai_epi16(xmm0, 6); \ + xmm1 = _mm_srai_epi16(xmm1, 6); \ + xmm2 = _mm_srai_epi16(xmm2, 6); \ + xmm0 = _mm_packus_epi16(xmm0, xmm0); \ + xmm1 = _mm_packus_epi16(xmm1, xmm1); \ + xmm2 = _mm_packus_epi16(xmm2, xmm2); + +// Store 8 ARGB values. +#define STOREARGB \ + xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); \ + xmm2 = _mm_unpacklo_epi8(xmm2, xmm5); \ + xmm1 = _mm_loadu_si128(&xmm0); \ + xmm0 = _mm_unpacklo_epi16(xmm0, xmm2); \ + xmm1 = _mm_unpackhi_epi16(xmm1, xmm2); \ + _mm_storeu_si128((__m128i *)dst_argb, xmm0); \ + _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1); \ + dst_argb += 32; + + #if defined(HAS_I422TOARGBROW_SSSE3) void I422ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { - __m128i xmm0, xmm1, xmm2, xmm3; + __m128i xmm0, xmm1, xmm2, xmm4; const __m128i xmm5 = _mm_set1_epi8(-1); const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf; - while (width > 0) { - xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf); - xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset)); - xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); - xmm0 = _mm_unpacklo_epi16(xmm0, xmm0); - xmm1 = _mm_loadu_si128(&xmm0); - xmm2 = _mm_loadu_si128(&xmm0); - xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)kYuvConstants.kUVToB); - xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)kYuvConstants.kUVToG); - xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)kYuvConstants.kUVToR); - xmm0 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasB, xmm0); - xmm1 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasG, xmm1); - xmm2 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasR, xmm2); - xmm3 = _mm_loadl_epi64((__m128i*)y_buf); - xmm3 = _mm_unpacklo_epi8(xmm3, xmm3); - xmm3 = _mm_mulhi_epu16(xmm3, *(__m128i*)kYuvConstants.kYToRgb); - xmm0 = _mm_adds_epi16(xmm0, xmm3); - xmm1 = _mm_adds_epi16(xmm1, xmm3); - xmm2 = _mm_adds_epi16(xmm2, xmm3); - xmm0 = _mm_srai_epi16(xmm0, 6); - xmm1 = _mm_srai_epi16(xmm1, 6); - xmm2 = _mm_srai_epi16(xmm2, 6); - xmm0 = _mm_packus_epi16(xmm0, xmm0); - xmm1 = _mm_packus_epi16(xmm1, xmm1); - xmm2 = _mm_packus_epi16(xmm2, xmm2); - xmm0 = _mm_unpacklo_epi8(xmm0, xmm1); - xmm2 = _mm_unpacklo_epi8(xmm2, xmm5); - xmm1 = _mm_loadu_si128(&xmm0); - xmm0 = _mm_unpacklo_epi16(xmm0, xmm2); - xmm1 = _mm_unpackhi_epi16(xmm1, xmm2); - - _mm_storeu_si128((__m128i *)dst_argb, xmm0); - _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1); - - y_buf += 8; - u_buf += 4; - dst_argb += 32; + READYUV422 + YUVTORGB(yuvconstants) + STOREARGB width -= 8; } } #endif + +#if defined(HAS_I422ALPHATOARGBROW_SSSE3) +void I422AlphaToARGBRow_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + __m128i xmm0, xmm1, xmm2, xmm4, xmm5; + const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf; + while (width > 0) { + READYUVA422 + YUVTORGB(yuvconstants) + STOREARGB + width -= 8; + } +} +#endif + // 32 bit #else // defined(_M_X64) #ifdef HAS_ARGBTOYROW_SSSE3 @@ -301,6 +225,24 @@ static const uvec8 kShuffleMaskRAWToARGB = { 2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u }; +// Shuffle table for converting RAW to RGB24. First 8. +static const uvec8 kShuffleMaskRAWToRGB24_0 = { + 2u, 1u, 0u, 5u, 4u, 3u, 8u, 7u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + +// Shuffle table for converting RAW to RGB24. Middle 8. +static const uvec8 kShuffleMaskRAWToRGB24_1 = { + 2u, 7u, 6u, 5u, 10u, 9u, 8u, 13u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + +// Shuffle table for converting RAW to RGB24. Last 8. +static const uvec8 kShuffleMaskRAWToRGB24_2 = { + 8u, 7u, 12u, 11u, 10u, 15u, 14u, 13u, + 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u +}; + // Shuffle table for converting ARGB to RGB24. static const uvec8 kShuffleMaskARGBToRGB24 = { 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u @@ -316,18 +258,43 @@ static const uvec8 kShuffleMaskARGBToRGB24_0 = { 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u }; -// Shuffle table for converting ARGB to RAW. -static const uvec8 kShuffleMaskARGBToRAW_0 = { - 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u +// YUY2 shuf 16 Y to 32 Y. +static const lvec8 kShuffleYUY2Y = { + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, + 0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14 +}; + +// YUY2 shuf 8 UV to 16 UV. +static const lvec8 kShuffleYUY2UV = { + 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15, + 1, 3, 1, 3, 5, 7, 5, 7, 9, 11, 9, 11, 13, 15, 13, 15 +}; + +// UYVY shuf 16 Y to 32 Y. +static const lvec8 kShuffleUYVYY = { + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15, + 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15 +}; + +// UYVY shuf 8 UV to 16 UV. +static const lvec8 kShuffleUYVYUV = { + 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14, + 0, 2, 0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14, 12, 14 +}; + +// NV21 shuf 8 VU to 16 UV. +static const lvec8 kShuffleNV21 = { + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, + 1, 0, 1, 0, 3, 2, 3, 2, 5, 4, 5, 4, 7, 6, 7, 6, }; // Duplicates gray value 3 times and fills in alpha opaque. __declspec(naked) -void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { +void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width) { __asm { mov eax, [esp + 4] // src_y mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm5, xmm5 // generate mask 0xff000000 pslld xmm5, 24 @@ -352,11 +319,11 @@ void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) { #ifdef HAS_J400TOARGBROW_AVX2 // Duplicates gray value 3 times and fills in alpha opaque. __declspec(naked) -void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) { +void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int width) { __asm { mov eax, [esp + 4] // src_y mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xff000000 vpslld ymm5, ymm5, 24 @@ -382,14 +349,14 @@ void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) { #endif // HAS_J400TOARGBROW_AVX2 __declspec(naked) -void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) { +void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int width) { __asm { mov eax, [esp + 4] // src_rgb24 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm5, xmm5 // generate mask 0xff000000 pslld xmm5, 24 - movdqa xmm4, kShuffleMaskRGB24ToARGB + movdqa xmm4, xmmword ptr kShuffleMaskRGB24ToARGB convertloop: movdqu xmm0, [eax] @@ -421,14 +388,14 @@ void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) { __declspec(naked) void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, [esp + 4] // src_raw mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm5, xmm5 // generate mask 0xff000000 pslld xmm5, 24 - movdqa xmm4, kShuffleMaskRAWToARGB + movdqa xmm4, xmmword ptr kShuffleMaskRAWToARGB convertloop: movdqu xmm0, [eax] @@ -458,6 +425,34 @@ void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, } } +__declspec(naked) +void RAWToRGB24Row_SSSE3(const uint8* src_raw, uint8* dst_rgb24, int width) { + __asm { + mov eax, [esp + 4] // src_raw + mov edx, [esp + 8] // dst_rgb24 + mov ecx, [esp + 12] // width + movdqa xmm3, xmmword ptr kShuffleMaskRAWToRGB24_0 + movdqa xmm4, xmmword ptr kShuffleMaskRAWToRGB24_1 + movdqa xmm5, xmmword ptr kShuffleMaskRAWToRGB24_2 + + convertloop: + movdqu xmm0, [eax] + movdqu xmm1, [eax + 4] + movdqu xmm2, [eax + 8] + lea eax, [eax + 24] + pshufb xmm0, xmm3 + pshufb xmm1, xmm4 + pshufb xmm2, xmm5 + movq qword ptr [edx], xmm0 + movq qword ptr [edx + 8], xmm1 + movq qword ptr [edx + 16], xmm2 + lea edx, [edx + 24] + sub ecx, 8 + jg convertloop + ret + } +} + // pmul method to replicate bits. // Math to replicate bits: // (v << 8) | (v << 3) @@ -467,7 +462,7 @@ void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, // 20 instructions. __declspec(naked) void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x01080108 // generate multiplier to repeat 5 bits movd xmm5, eax @@ -485,7 +480,7 @@ void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, mov eax, [esp + 4] // src_rgb565 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -523,13 +518,13 @@ void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, // G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3 __declspec(naked) void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x01080108 // generate multiplier to repeat 5 bits vmovd xmm5, eax vbroadcastss ymm5, xmm5 mov eax, 0x20802080 // multiplier shift by 5 and then repeat 6 bits - movd xmm6, eax + vmovd xmm6, eax vbroadcastss ymm6, xmm6 vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red vpsllw ymm3, ymm3, 11 @@ -541,7 +536,7 @@ void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, mov eax, [esp + 4] // src_rgb565 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -574,13 +569,13 @@ void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, #ifdef HAS_ARGB1555TOARGBROW_AVX2 __declspec(naked) void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x01080108 // generate multiplier to repeat 5 bits vmovd xmm5, eax vbroadcastss ymm5, xmm5 mov eax, 0x42004200 // multiplier shift by 6 and then repeat 5 bits - movd xmm6, eax + vmovd xmm6, eax vbroadcastss ymm6, xmm6 vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red vpsllw ymm3, ymm3, 11 @@ -590,7 +585,7 @@ void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb, mov eax, [esp + 4] // src_argb1555 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -626,7 +621,7 @@ void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb, #ifdef HAS_ARGB4444TOARGBROW_AVX2 __declspec(naked) void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x0f0f0f0f // generate mask 0x0f0f0f0f vmovd xmm4, eax @@ -634,7 +629,7 @@ void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb, vpslld ymm5, ymm4, 4 // 0xf0f0f0f0 for high nibbles mov eax, [esp + 4] // src_argb4444 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -664,7 +659,7 @@ void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb, // 24 instructions __declspec(naked) void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x01080108 // generate multiplier to repeat 5 bits movd xmm5, eax @@ -681,7 +676,7 @@ void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb, mov eax, [esp + 4] // src_argb1555 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -717,7 +712,7 @@ void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb, // 18 instructions. __declspec(naked) void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb, - int pix) { + int width) { __asm { mov eax, 0x0f0f0f0f // generate mask 0x0f0f0f0f movd xmm4, eax @@ -726,7 +721,7 @@ void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb, pslld xmm5, 4 mov eax, [esp + 4] // src_argb4444 mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width sub edx, eax sub edx, eax @@ -754,12 +749,12 @@ void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb, } __declspec(naked) -void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix - movdqa xmm6, kShuffleMaskARGBToRGB24 + mov ecx, [esp + 12] // width + movdqa xmm6, xmmword ptr kShuffleMaskARGBToRGB24 convertloop: movdqu xmm0, [eax] // fetch 16 pixels of argb @@ -792,12 +787,12 @@ void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) { } __declspec(naked) -void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix - movdqa xmm6, kShuffleMaskARGBToRAW + mov ecx, [esp + 12] // width + movdqa xmm6, xmmword ptr kShuffleMaskARGBToRAW convertloop: movdqu xmm0, [eax] // fetch 16 pixels of argb @@ -829,13 +824,12 @@ void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) { } } -// 4 pixels __declspec(naked) -void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm3, xmm3 // generate mask 0x0000001f psrld xmm3, 27 pcmpeqb xmm4, xmm4 // generate mask 0x000007e0 @@ -867,16 +861,15 @@ void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { } } -// 8 pixels __declspec(naked) void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix) { + const uint32 dither4, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb movd xmm6, [esp + 12] // dither4 - mov ecx, [esp + 16] // pix + mov ecx, [esp + 16] // width punpcklbw xmm6, xmm6 // make dither 16 bytes movdqa xmm7, xmm6 punpcklwd xmm6, xmm6 @@ -916,12 +909,12 @@ void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb, #ifdef HAS_ARGBTORGB565DITHERROW_AVX2 __declspec(naked) void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb, - const uint32 dither4, int pix) { + const uint32 dither4, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb vbroadcastss xmm6, [esp + 12] // dither4 - mov ecx, [esp + 16] // pix + mov ecx, [esp + 16] // width vpunpcklbw xmm6, xmm6, xmm6 // make dither 32 bytes vpermq ymm6, ymm6, 0xd8 vpunpcklwd ymm6, ymm6, ymm6 @@ -958,11 +951,11 @@ void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb, // TODO(fbarchard): Improve sign extension/packing. __declspec(naked) -void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm4, xmm4 // generate mask 0x0000001f psrld xmm4, 27 movdqa xmm5, xmm4 // generate mask 0x000003e0 @@ -999,11 +992,11 @@ void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { } __declspec(naked) -void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm4, xmm4 // generate mask 0xf000f000 psllw xmm4, 12 movdqa xmm3, xmm4 // generate mask 0x00f000f0 @@ -1029,11 +1022,11 @@ void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) { #ifdef HAS_ARGBTORGB565ROW_AVX2 __declspec(naked) -void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0x0000001f vpsrld ymm3, ymm3, 27 vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0x000007e0 @@ -1066,11 +1059,11 @@ void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { #ifdef HAS_ARGBTOARGB1555ROW_AVX2 __declspec(naked) -void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width vpcmpeqb ymm4, ymm4, ymm4 vpsrld ymm4, ymm4, 27 // generate mask 0x0000001f vpslld ymm5, ymm4, 5 // generate mask 0x000003e0 @@ -1106,11 +1099,11 @@ void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { #ifdef HAS_ARGBTOARGB4444ROW_AVX2 __declspec(naked) -void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { +void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_rgb - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0xf000f000 vpsllw ymm4, ymm4, 12 vpsrlw ymm3, ymm4, 8 // generate mask 0x00f000f0 @@ -1137,13 +1130,13 @@ void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) { // Convert 16 ARGB pixels (64 bytes) to 16 Y values. __declspec(naked) -void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - movdqa xmm4, kARGBToY - movdqa xmm5, kAddY16 + mov ecx, [esp + 12] /* width */ + movdqa xmm4, xmmword ptr kARGBToY + movdqa xmm5, xmmword ptr kAddY16 convertloop: movdqu xmm0, [eax] @@ -1172,13 +1165,13 @@ void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { // Convert 16 ARGB pixels (64 bytes) to 16 YJ values. // Same as ARGBToYRow but different coefficients, no add 16, but do rounding. __declspec(naked) -void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - movdqa xmm4, kARGBToYJ - movdqa xmm5, kAddYJ64 + mov ecx, [esp + 12] /* width */ + movdqa xmm4, xmmword ptr kARGBToYJ + movdqa xmm5, xmmword ptr kAddYJ64 convertloop: movdqu xmm0, [eax] @@ -1213,14 +1206,14 @@ static const lvec32 kPermdARGBToY_AVX = { // Convert 32 ARGB pixels (128 bytes) to 32 Y values. __declspec(naked) -void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - vbroadcastf128 ymm4, kARGBToY - vbroadcastf128 ymm5, kAddY16 - vmovdqu ymm6, kPermdARGBToY_AVX + mov ecx, [esp + 12] /* width */ + vbroadcastf128 ymm4, xmmword ptr kARGBToY + vbroadcastf128 ymm5, xmmword ptr kAddY16 + vmovdqu ymm6, ymmword ptr kPermdARGBToY_AVX convertloop: vmovdqu ymm0, [eax] @@ -1252,14 +1245,14 @@ void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { #ifdef HAS_ARGBTOYJROW_AVX2 // Convert 32 ARGB pixels (128 bytes) to 32 Y values. __declspec(naked) -void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { +void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - vbroadcastf128 ymm4, kARGBToYJ - vbroadcastf128 ymm5, kAddYJ64 - vmovdqu ymm6, kPermdARGBToY_AVX + mov ecx, [esp + 12] /* width */ + vbroadcastf128 ymm4, xmmword ptr kARGBToYJ + vbroadcastf128 ymm5, xmmword ptr kAddYJ64 + vmovdqu ymm6, ymmword ptr kPermdARGBToY_AVX convertloop: vmovdqu ymm0, [eax] @@ -1291,13 +1284,13 @@ void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) { #endif // HAS_ARGBTOYJROW_AVX2 __declspec(naked) -void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - movdqa xmm4, kBGRAToY - movdqa xmm5, kAddY16 + mov ecx, [esp + 12] /* width */ + movdqa xmm4, xmmword ptr kBGRAToY + movdqa xmm5, xmmword ptr kAddY16 convertloop: movdqu xmm0, [eax] @@ -1324,13 +1317,13 @@ void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { } __declspec(naked) -void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - movdqa xmm4, kABGRToY - movdqa xmm5, kAddY16 + mov ecx, [esp + 12] /* width */ + movdqa xmm4, xmmword ptr kABGRToY + movdqa xmm5, xmmword ptr kAddY16 convertloop: movdqu xmm0, [eax] @@ -1357,13 +1350,13 @@ void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { } __declspec(naked) -void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) { +void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_y */ - mov ecx, [esp + 12] /* pix */ - movdqa xmm4, kRGBAToY - movdqa xmm5, kAddY16 + mov ecx, [esp + 12] /* width */ + movdqa xmm4, xmmword ptr kRGBAToY + movdqa xmm5, xmmword ptr kAddY16 convertloop: movdqu xmm0, [eax] @@ -1399,10 +1392,10 @@ void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kARGBToV - movdqa xmm7, kARGBToU + mov ecx, [esp + 8 + 20] // width + movdqa xmm5, xmmword ptr kAddUV128 + movdqa xmm6, xmmword ptr kARGBToV + movdqa xmm7, xmmword ptr kARGBToU sub edi, edx // stride from u to v convertloop: @@ -1469,10 +1462,10 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - movdqa xmm5, kAddUVJ128 - movdqa xmm6, kARGBToVJ - movdqa xmm7, kARGBToUJ + mov ecx, [esp + 8 + 20] // width + movdqa xmm5, xmmword ptr kAddUVJ128 + movdqa xmm6, xmmword ptr kARGBToVJ + movdqa xmm7, xmmword ptr kARGBToUJ sub edi, edx // stride from u to v convertloop: @@ -1511,7 +1504,7 @@ void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb, pmaddubsw xmm3, xmm6 phaddw xmm0, xmm2 phaddw xmm1, xmm3 - paddw xmm0, xmm5 // +.5 rounding -> unsigned + paddw xmm0, xmm5 // +.5 rounding -> unsigned paddw xmm1, xmm5 psraw xmm0, 8 psraw xmm1, 8 @@ -1541,10 +1534,10 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - vbroadcastf128 ymm5, kAddUV128 - vbroadcastf128 ymm6, kARGBToV - vbroadcastf128 ymm7, kARGBToU + mov ecx, [esp + 8 + 20] // width + vbroadcastf128 ymm5, xmmword ptr kAddUV128 + vbroadcastf128 ymm6, xmmword ptr kARGBToV + vbroadcastf128 ymm7, xmmword ptr kARGBToU sub edi, edx // stride from u to v convertloop: @@ -1578,7 +1571,7 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, vpsraw ymm0, ymm0, 8 vpacksswb ymm0, ymm1, ymm0 // mutates vpermq ymm0, ymm0, 0xd8 // For vpacksswb - vpshufb ymm0, ymm0, kShufARGBToUV_AVX // For vshufps + vphaddw + vpshufb ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX // for vshufps/vphaddw vpaddb ymm0, ymm0, ymm5 // -> unsigned // step 3 - store 16 U and 16 V values @@ -1596,6 +1589,73 @@ void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb, } #endif // HAS_ARGBTOUVROW_AVX2 +#ifdef HAS_ARGBTOUVJROW_AVX2 +__declspec(naked) +void ARGBToUVJRow_AVX2(const uint8* src_argb0, int src_stride_argb, + uint8* dst_u, uint8* dst_v, int width) { + __asm { + push esi + push edi + mov eax, [esp + 8 + 4] // src_argb + mov esi, [esp + 8 + 8] // src_stride_argb + mov edx, [esp + 8 + 12] // dst_u + mov edi, [esp + 8 + 16] // dst_v + mov ecx, [esp + 8 + 20] // width + vbroadcastf128 ymm5, xmmword ptr kAddUV128 + vbroadcastf128 ymm6, xmmword ptr kARGBToV + vbroadcastf128 ymm7, xmmword ptr kARGBToU + sub edi, edx // stride from u to v + + convertloop: + /* step 1 - subsample 32x2 argb pixels to 16x1 */ + vmovdqu ymm0, [eax] + vmovdqu ymm1, [eax + 32] + vmovdqu ymm2, [eax + 64] + vmovdqu ymm3, [eax + 96] + vpavgb ymm0, ymm0, [eax + esi] + vpavgb ymm1, ymm1, [eax + esi + 32] + vpavgb ymm2, ymm2, [eax + esi + 64] + vpavgb ymm3, ymm3, [eax + esi + 96] + lea eax, [eax + 128] + vshufps ymm4, ymm0, ymm1, 0x88 + vshufps ymm0, ymm0, ymm1, 0xdd + vpavgb ymm0, ymm0, ymm4 // mutated by vshufps + vshufps ymm4, ymm2, ymm3, 0x88 + vshufps ymm2, ymm2, ymm3, 0xdd + vpavgb ymm2, ymm2, ymm4 // mutated by vshufps + + // step 2 - convert to U and V + // from here down is very similar to Y code except + // instead of 32 different pixels, its 16 pixels of U and 16 of V + vpmaddubsw ymm1, ymm0, ymm7 // U + vpmaddubsw ymm3, ymm2, ymm7 + vpmaddubsw ymm0, ymm0, ymm6 // V + vpmaddubsw ymm2, ymm2, ymm6 + vphaddw ymm1, ymm1, ymm3 // mutates + vphaddw ymm0, ymm0, ymm2 + vpaddw ymm1, ymm1, ymm5 // +.5 rounding -> unsigned + vpaddw ymm0, ymm0, ymm5 + vpsraw ymm1, ymm1, 8 + vpsraw ymm0, ymm0, 8 + vpacksswb ymm0, ymm1, ymm0 // mutates + vpermq ymm0, ymm0, 0xd8 // For vpacksswb + vpshufb ymm0, ymm0, ymmword ptr kShufARGBToUV_AVX // for vshufps/vphaddw + + // step 3 - store 16 U and 16 V values + vextractf128 [edx], ymm0, 0 // U + vextractf128 [edx + edi], ymm0, 1 // V + lea edx, [edx + 16] + sub ecx, 32 + jg convertloop + + pop edi + pop esi + vzeroupper + ret + } +} +#endif // HAS_ARGBTOUVJROW_AVX2 + __declspec(naked) void ARGBToUV444Row_SSSE3(const uint8* src_argb0, uint8* dst_u, uint8* dst_v, int width) { @@ -1604,10 +1664,10 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb0, mov eax, [esp + 4 + 4] // src_argb mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kARGBToV - movdqa xmm7, kARGBToU + mov ecx, [esp + 4 + 16] // width + movdqa xmm5, xmmword ptr kAddUV128 + movdqa xmm6, xmmword ptr kARGBToV + movdqa xmm7, xmmword ptr kARGBToU sub edi, edx // stride from u to v convertloop: @@ -1653,64 +1713,6 @@ void ARGBToUV444Row_SSSE3(const uint8* src_argb0, } } -__declspec(naked) -void ARGBToUV422Row_SSSE3(const uint8* src_argb0, - uint8* dst_u, uint8* dst_v, int width) { - __asm { - push edi - mov eax, [esp + 4 + 4] // src_argb - mov edx, [esp + 4 + 8] // dst_u - mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kARGBToV - movdqa xmm7, kARGBToU - sub edi, edx // stride from u to v - - convertloop: - /* step 1 - subsample 16x2 argb pixels to 8x1 */ - movdqu xmm0, [eax] - movdqu xmm1, [eax + 16] - movdqu xmm2, [eax + 32] - movdqu xmm3, [eax + 48] - lea eax, [eax + 64] - movdqa xmm4, xmm0 - shufps xmm0, xmm1, 0x88 - shufps xmm4, xmm1, 0xdd - pavgb xmm0, xmm4 - movdqa xmm4, xmm2 - shufps xmm2, xmm3, 0x88 - shufps xmm4, xmm3, 0xdd - pavgb xmm2, xmm4 - - // step 2 - convert to U and V - // from here down is very similar to Y code except - // instead of 16 different pixels, its 8 pixels of U and 8 of V - movdqa xmm1, xmm0 - movdqa xmm3, xmm2 - pmaddubsw xmm0, xmm7 // U - pmaddubsw xmm2, xmm7 - pmaddubsw xmm1, xmm6 // V - pmaddubsw xmm3, xmm6 - phaddw xmm0, xmm2 - phaddw xmm1, xmm3 - psraw xmm0, 8 - psraw xmm1, 8 - packsswb xmm0, xmm1 - paddb xmm0, xmm5 // -> unsigned - - // step 3 - store 8 U and 8 V values - movlps qword ptr [edx], xmm0 // U - movhps qword ptr [edx + edi], xmm0 // V - lea edx, [edx + 8] - sub ecx, 16 - jg convertloop - - pop edi - ret - } -} - __declspec(naked) void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, uint8* dst_u, uint8* dst_v, int width) { @@ -1721,10 +1723,10 @@ void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kBGRAToV - movdqa xmm7, kBGRAToU + mov ecx, [esp + 8 + 20] // width + movdqa xmm5, xmmword ptr kAddUV128 + movdqa xmm6, xmmword ptr kBGRAToV + movdqa xmm7, xmmword ptr kBGRAToU sub edi, edx // stride from u to v convertloop: @@ -1791,10 +1793,10 @@ void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kABGRToV - movdqa xmm7, kABGRToU + mov ecx, [esp + 8 + 20] // width + movdqa xmm5, xmmword ptr kAddUV128 + movdqa xmm6, xmmword ptr kABGRToV + movdqa xmm7, xmmword ptr kABGRToU sub edi, edx // stride from u to v convertloop: @@ -1861,10 +1863,10 @@ void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, mov esi, [esp + 8 + 8] // src_stride_argb mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix - movdqa xmm5, kAddUV128 - movdqa xmm6, kRGBAToV - movdqa xmm7, kRGBAToU + mov ecx, [esp + 8 + 20] // width + movdqa xmm5, xmmword ptr kAddUV128 + movdqa xmm6, xmmword ptr kRGBAToV + movdqa xmm7, xmmword ptr kRGBAToU sub edi, edx // stride from u to v convertloop: @@ -1924,33 +1926,62 @@ void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, // Read 16 UV from 444 #define READYUV444_AVX2 __asm { \ - __asm vmovdqu xmm0, [esi] /* U */ /* NOLINT */ \ - __asm vmovdqu xmm1, [esi + edi] /* V */ /* NOLINT */ \ + __asm vmovdqu xmm0, [esi] /* U */ \ + __asm vmovdqu xmm1, [esi + edi] /* V */ \ __asm lea esi, [esi + 16] \ __asm vpermq ymm0, ymm0, 0xd8 \ __asm vpermq ymm1, ymm1, 0xd8 \ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ } // Read 8 UV from 422, upsample to 16 UV. #define READYUV422_AVX2 __asm { \ - __asm vmovq xmm0, qword ptr [esi] /* U */ /* NOLINT */ \ - __asm vmovq xmm1, qword ptr [esi + edi] /* V */ /* NOLINT */ \ + __asm vmovq xmm0, qword ptr [esi] /* U */ \ + __asm vmovq xmm1, qword ptr [esi + edi] /* V */ \ __asm lea esi, [esi + 8] \ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \ __asm vpermq ymm0, ymm0, 0xd8 \ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ + } + +// Read 8 UV from 422, upsample to 16 UV. With 16 Alpha. +#define READYUVA422_AVX2 __asm { \ + __asm vmovq xmm0, qword ptr [esi] /* U */ \ + __asm vmovq xmm1, qword ptr [esi + edi] /* V */ \ + __asm lea esi, [esi + 8] \ + __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \ + __asm vpermq ymm0, ymm0, 0xd8 \ + __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ + __asm vmovdqu xmm5, [ebp] /* A */ \ + __asm vpermq ymm5, ymm5, 0xd8 \ + __asm lea ebp, [ebp + 16] \ } // Read 4 UV from 411, upsample to 16 UV. #define READYUV411_AVX2 __asm { \ - __asm vmovd xmm0, dword ptr [esi] /* U */ /* NOLINT */ \ - __asm vmovd xmm1, dword ptr [esi + edi] /* V */ /* NOLINT */ \ + __asm vmovd xmm0, dword ptr [esi] /* U */ \ + __asm vmovd xmm1, dword ptr [esi + edi] /* V */ \ __asm lea esi, [esi + 4] \ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \ __asm vpermq ymm0, ymm0, 0xd8 \ __asm vpunpckldq ymm0, ymm0, ymm0 /* UVUVUVUV (upsample) */ \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ } // Read 8 UV from NV12, upsample to 16 UV. @@ -1959,29 +1990,58 @@ void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, __asm lea esi, [esi + 16] \ __asm vpermq ymm0, ymm0, 0xd8 \ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ + } + +// Read 8 UV from NV21, upsample to 16 UV. +#define READNV21_AVX2 __asm { \ + __asm vmovdqu xmm0, [esi] /* UV */ \ + __asm lea esi, [esi + 16] \ + __asm vpermq ymm0, ymm0, 0xd8 \ + __asm vpshufb ymm0, ymm0, ymmword ptr kShuffleNV21 \ + __asm vmovdqu xmm4, [eax] /* Y */ \ + __asm vpermq ymm4, ymm4, 0xd8 \ + __asm vpunpcklbw ymm4, ymm4, ymm4 \ + __asm lea eax, [eax + 16] \ + } + +// Read 8 YUY2 with 16 Y and upsample 8 UV to 16 UV. +#define READYUY2_AVX2 __asm { \ + __asm vmovdqu ymm4, [eax] /* YUY2 */ \ + __asm vpshufb ymm4, ymm4, ymmword ptr kShuffleYUY2Y \ + __asm vmovdqu ymm0, [eax] /* UV */ \ + __asm vpshufb ymm0, ymm0, ymmword ptr kShuffleYUY2UV \ + __asm lea eax, [eax + 32] \ + } + +// Read 8 UYVY with 16 Y and upsample 8 UV to 16 UV. +#define READUYVY_AVX2 __asm { \ + __asm vmovdqu ymm4, [eax] /* UYVY */ \ + __asm vpshufb ymm4, ymm4, ymmword ptr kShuffleUYVYY \ + __asm vmovdqu ymm0, [eax] /* UV */ \ + __asm vpshufb ymm0, ymm0, ymmword ptr kShuffleUYVYUV \ + __asm lea eax, [eax + 32] \ } // Convert 16 pixels: 16 UV and 16 Y. #define YUVTORGB_AVX2(YuvConstants) __asm { \ - /* Step 1: Find 8 UV contributions to 16 R,G,B values */ \ - __asm vpmaddubsw ymm2, ymm0, YuvConstants.kUVToR /* scale R UV */ \ - __asm vpmaddubsw ymm1, ymm0, YuvConstants.kUVToG /* scale G UV */ \ - __asm vpmaddubsw ymm0, ymm0, YuvConstants.kUVToB /* scale B UV */ \ - __asm vmovdqu ymm3, YuvConstants.kUVBiasR \ + __asm vpmaddubsw ymm2, ymm0, ymmword ptr [YuvConstants + KUVTOR] /* R UV */\ + __asm vpmaddubsw ymm1, ymm0, ymmword ptr [YuvConstants + KUVTOG] /* G UV */\ + __asm vpmaddubsw ymm0, ymm0, ymmword ptr [YuvConstants + KUVTOB] /* B UV */\ + __asm vmovdqu ymm3, ymmword ptr [YuvConstants + KUVBIASR] \ __asm vpsubw ymm2, ymm3, ymm2 \ - __asm vmovdqu ymm3, YuvConstants.kUVBiasG \ + __asm vmovdqu ymm3, ymmword ptr [YuvConstants + KUVBIASG] \ __asm vpsubw ymm1, ymm3, ymm1 \ - __asm vmovdqu ymm3, YuvConstants.kUVBiasB \ + __asm vmovdqu ymm3, ymmword ptr [YuvConstants + KUVBIASB] \ __asm vpsubw ymm0, ymm3, ymm0 \ /* Step 2: Find Y contribution to 16 R,G,B values */ \ - __asm vmovdqu xmm3, [eax] /* NOLINT */ \ - __asm lea eax, [eax + 16] \ - __asm vpermq ymm3, ymm3, 0xd8 \ - __asm vpunpcklbw ymm3, ymm3, ymm3 \ - __asm vpmulhuw ymm3, ymm3, YuvConstants.kYToRgb \ - __asm vpaddsw ymm0, ymm0, ymm3 /* B += Y */ \ - __asm vpaddsw ymm1, ymm1, ymm3 /* G += Y */ \ - __asm vpaddsw ymm2, ymm2, ymm3 /* R += Y */ \ + __asm vpmulhuw ymm4, ymm4, ymmword ptr [YuvConstants + KYTORGB] \ + __asm vpaddsw ymm0, ymm0, ymm4 /* B += Y */ \ + __asm vpaddsw ymm1, ymm1, ymm4 /* G += Y */ \ + __asm vpaddsw ymm2, ymm2, ymm4 /* R += Y */ \ __asm vpsraw ymm0, ymm0, 6 \ __asm vpsraw ymm1, ymm1, 6 \ __asm vpsraw ymm2, ymm2, 6 \ @@ -1992,7 +2052,6 @@ void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, // Store 16 ARGB values. #define STOREARGB_AVX2 __asm { \ - /* Step 3: Weave into ARGB */ \ __asm vpunpcklbw ymm0, ymm0, ymm1 /* BG */ \ __asm vpermq ymm0, ymm0, 0xd8 \ __asm vpunpcklbw ymm2, ymm2, ymm5 /* RA */ \ @@ -2004,6 +2063,19 @@ void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb, __asm lea edx, [edx + 64] \ } +// Store 16 RGBA values. +#define STORERGBA_AVX2 __asm { \ + __asm vpunpcklbw ymm1, ymm1, ymm2 /* GR */ \ + __asm vpermq ymm1, ymm1, 0xd8 \ + __asm vpunpcklbw ymm2, ymm5, ymm0 /* AB */ \ + __asm vpermq ymm2, ymm2, 0xd8 \ + __asm vpunpcklwd ymm0, ymm2, ymm1 /* ABGR first 8 pixels */ \ + __asm vpunpckhwd ymm1, ymm2, ymm1 /* ABGR next 8 pixels */ \ + __asm vmovdqu [edx], ymm0 \ + __asm vmovdqu [edx + 32], ymm1 \ + __asm lea edx, [edx + 64] \ + } + #ifdef HAS_I422TOARGBROW_AVX2 // 16 pixels // 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). @@ -2012,26 +2084,30 @@ void I422ToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebx pop edi pop esi vzeroupper @@ -2040,41 +2116,48 @@ void I422ToARGBRow_AVX2(const uint8* y_buf, } #endif // HAS_I422TOARGBROW_AVX2 -#ifdef HAS_J422TOARGBROW_AVX2 +#ifdef HAS_I422ALPHATOARGBROW_AVX2 // 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). +// 8 UV values upsampled to 16 UV, mixed with 16 Y and 16 A producing 16 ARGB. __declspec(naked) -void J422ToARGBRow_AVX2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_argb, - int width) { +void I422AlphaToARGBRow_AVX2(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + push ebp + mov eax, [esp + 16 + 4] // Y + mov esi, [esp + 16 + 8] // U + mov edi, [esp + 16 + 12] // V + mov ebp, [esp + 16 + 16] // A + mov edx, [esp + 16 + 20] // argb + mov ebx, [esp + 16 + 24] // yuvconstants + mov ecx, [esp + 16 + 28] // width sub edi, esi - vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvJConstants) + READYUVA422_AVX2 + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebp + pop ebx pop edi pop esi vzeroupper ret } } -#endif // HAS_J422TOARGBROW_AVX2 +#endif // HAS_I422ALPHATOARGBROW_AVX2 #ifdef HAS_I444TOARGBROW_AVX2 // 16 pixels @@ -2084,26 +2167,29 @@ void I444ToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha - convertloop: READYUV444_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebx pop edi pop esi vzeroupper @@ -2120,26 +2206,30 @@ void I411ToARGBRow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // abgr + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: READYUV411_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebx pop edi pop esi vzeroupper @@ -2155,23 +2245,27 @@ __declspec(naked) void NV12ToARGBRow_AVX2(const uint8* y_buf, const uint8* uv_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi - mov eax, [esp + 4 + 4] // Y - mov esi, [esp + 4 + 8] // UV - mov edx, [esp + 4 + 12] // argb - mov ecx, [esp + 4 + 16] // width + push ebx + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // UV + mov edx, [esp + 8 + 12] // argb + mov ebx, [esp + 8 + 16] // yuvconstants + mov ecx, [esp + 8 + 20] // width vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: READNV12_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebx pop esi vzeroupper ret @@ -2181,28 +2275,32 @@ void NV12ToARGBRow_AVX2(const uint8* y_buf, #ifdef HAS_NV21TOARGBROW_AVX2 // 16 pixels. -// 8 VU values upsampled to 16 VU, mixed with 16 Y producing 16 ARGB (64 bytes). +// 8 VU values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes). __declspec(naked) void NV21ToARGBRow_AVX2(const uint8* y_buf, - const uint8* uv_buf, + const uint8* vu_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi - mov eax, [esp + 4 + 4] // Y - mov esi, [esp + 4 + 8] // UV - mov edx, [esp + 4 + 12] // argb - mov ecx, [esp + 4 + 16] // width + push ebx + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // VU + mov edx, [esp + 8 + 12] // argb + mov ebx, [esp + 8 + 16] // yuvconstants + mov ecx, [esp + 8 + 20] // width vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: - READNV12_AVX2 - YUVTORGB_AVX2(kYvuConstants) + READNV21_AVX2 + YUVTORGB_AVX2(ebx) STOREARGB_AVX2 sub ecx, 16 jg convertloop + pop ebx pop esi vzeroupper ret @@ -2210,90 +2308,100 @@ void NV21ToARGBRow_AVX2(const uint8* y_buf, } #endif // HAS_NV21TOARGBROW_AVX2 -#ifdef HAS_I422TOBGRAROW_AVX2 -// 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes). -// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3. +#ifdef HAS_YUY2TOARGBROW_AVX2 +// 16 pixels. +// 8 YUY2 values with 16 Y and 8 UV producing 16 ARGB (64 bytes). __declspec(naked) -void I422ToBGRARow_AVX2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, +void YUY2ToARGBRow_AVX2(const uint8* src_yuy2, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { - push esi - push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width - sub edi, esi + push ebx + mov eax, [esp + 4 + 4] // yuy2 + mov edx, [esp + 4 + 8] // argb + mov ebx, [esp + 4 + 12] // yuvconstants + mov ecx, [esp + 4 + 16] // width vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) + READYUY2_AVX2 + YUVTORGB_AVX2(ebx) + STOREARGB_AVX2 - // Step 3: Weave into BGRA - vpunpcklbw ymm1, ymm1, ymm0 // GB - vpermq ymm1, ymm1, 0xd8 - vpunpcklbw ymm2, ymm5, ymm2 // AR - vpermq ymm2, ymm2, 0xd8 - vpunpcklwd ymm0, ymm2, ymm1 // ARGB first 8 pixels - vpunpckhwd ymm2, ymm2, ymm1 // ARGB next 8 pixels - vmovdqu [edx], ymm0 - vmovdqu [edx + 32], ymm2 - lea edx, [edx + 64] sub ecx, 16 jg convertloop - pop edi - pop esi + pop ebx vzeroupper ret } } -#endif // HAS_I422TOBGRAROW_AVX2 +#endif // HAS_YUY2TOARGBROW_AVX2 + +#ifdef HAS_UYVYTOARGBROW_AVX2 +// 16 pixels. +// 8 UYVY values with 16 Y and 8 UV producing 16 ARGB (64 bytes). +__declspec(naked) +void UYVYToARGBRow_AVX2(const uint8* src_uyvy, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + __asm { + push ebx + mov eax, [esp + 4 + 4] // uyvy + mov edx, [esp + 4 + 8] // argb + mov ebx, [esp + 4 + 12] // yuvconstants + mov ecx, [esp + 4 + 16] // width + vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha + + convertloop: + READUYVY_AVX2 + YUVTORGB_AVX2(ebx) + STOREARGB_AVX2 + + sub ecx, 16 + jg convertloop + + pop ebx + vzeroupper + ret + } +} +#endif // HAS_UYVYTOARGBROW_AVX2 #ifdef HAS_I422TORGBAROW_AVX2 // 16 pixels // 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes). -// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3. __declspec(naked) void I422ToRGBARow_AVX2(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // abgr + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha convertloop: READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) + YUVTORGB_AVX2(ebx) + STORERGBA_AVX2 - // Step 3: Weave into RGBA - vpunpcklbw ymm1, ymm1, ymm2 // GR - vpermq ymm1, ymm1, 0xd8 - vpunpcklbw ymm2, ymm5, ymm0 // AB - vpermq ymm2, ymm2, 0xd8 - vpunpcklwd ymm0, ymm2, ymm1 // ABGR first 8 pixels - vpunpckhwd ymm1, ymm2, ymm1 // ABGR next 8 pixels - vmovdqu [edx], ymm0 - vmovdqu [edx + 32], ymm1 - lea edx, [edx + 64] sub ecx, 16 jg convertloop + pop ebx pop edi pop esi vzeroupper @@ -2302,61 +2410,19 @@ void I422ToRGBARow_AVX2(const uint8* y_buf, } #endif // HAS_I422TORGBAROW_AVX2 -#ifdef HAS_I422TOABGRROW_AVX2 -// 16 pixels -// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes). -// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3. -__declspec(naked) -void I422ToABGRRow_AVX2(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_argb, - int width) { - __asm { - push esi - push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width - sub edi, esi - vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha - - convertloop: - READYUV422_AVX2 - YUVTORGB_AVX2(kYuvConstants) - - // Step 3: Weave into ABGR - vpunpcklbw ymm1, ymm2, ymm1 // RG - vpermq ymm1, ymm1, 0xd8 - vpunpcklbw ymm2, ymm0, ymm5 // BA - vpermq ymm2, ymm2, 0xd8 - vpunpcklwd ymm0, ymm1, ymm2 // RGBA first 8 pixels - vpunpckhwd ymm1, ymm1, ymm2 // RGBA next 8 pixels - vmovdqu [edx], ymm0 - vmovdqu [edx + 32], ymm1 - lea edx, [edx + 64] - sub ecx, 16 - jg convertloop - - pop edi - pop esi - vzeroupper - ret - } -} -#endif // HAS_I422TOABGRROW_AVX2 - #if defined(HAS_I422TOARGBROW_SSSE3) // TODO(fbarchard): Read that does half size on Y and treats 420 as 444. +// Allows a conversion with half size scaling. // Read 8 UV from 444. #define READYUV444 __asm { \ - __asm movq xmm0, qword ptr [esi] /* U */ /* NOLINT */ \ - __asm movq xmm1, qword ptr [esi + edi] /* V */ /* NOLINT */ \ + __asm movq xmm0, qword ptr [esi] /* U */ \ + __asm movq xmm1, qword ptr [esi + edi] /* V */ \ __asm lea esi, [esi + 8] \ __asm punpcklbw xmm0, xmm1 /* UV */ \ + __asm movq xmm4, qword ptr [eax] \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ } // Read 4 UV from 422, upsample to 8 UV. @@ -2366,50 +2432,99 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, __asm lea esi, [esi + 4] \ __asm punpcklbw xmm0, xmm1 /* UV */ \ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ + __asm movq xmm4, qword ptr [eax] \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ + } + +// Read 4 UV from 422, upsample to 8 UV. With 8 Alpha. +#define READYUVA422 __asm { \ + __asm movd xmm0, [esi] /* U */ \ + __asm movd xmm1, [esi + edi] /* V */ \ + __asm lea esi, [esi + 4] \ + __asm punpcklbw xmm0, xmm1 /* UV */ \ + __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ + __asm movq xmm4, qword ptr [eax] /* Y */ \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ + __asm movq xmm5, qword ptr [ebp] /* A */ \ + __asm lea ebp, [ebp + 8] \ } // Read 2 UV from 411, upsample to 8 UV. -#define READYUV411 __asm { \ - __asm movzx ebx, word ptr [esi] /* U */ /* NOLINT */ \ +// drmemory fails with memory fault if pinsrw used. libyuv bug: 525 +// __asm pinsrw xmm0, [esi], 0 /* U */ +// __asm pinsrw xmm1, [esi + edi], 0 /* V */ +#define READYUV411_EBX __asm { \ + __asm movzx ebx, word ptr [esi] /* U */ \ __asm movd xmm0, ebx \ - __asm movzx ebx, word ptr [esi + edi] /* V */ /* NOLINT */ \ + __asm movzx ebx, word ptr [esi + edi] /* V */ \ __asm movd xmm1, ebx \ __asm lea esi, [esi + 2] \ - __asm punpcklbw xmm0, xmm1 /* UV */ \ - __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ - __asm punpckldq xmm0, xmm0 /* UVUVUVUV (upsample) */ \ + __asm punpcklbw xmm0, xmm1 /* UV */ \ + __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ + __asm punpckldq xmm0, xmm0 /* UVUVUVUV (upsample) */ \ + __asm movq xmm4, qword ptr [eax] \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ } // Read 4 UV from NV12, upsample to 8 UV. #define READNV12 __asm { \ - __asm movq xmm0, qword ptr [esi] /* UV */ /* NOLINT */ \ + __asm movq xmm0, qword ptr [esi] /* UV */ \ __asm lea esi, [esi + 8] \ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \ + __asm movq xmm4, qword ptr [eax] \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ + } + +// Read 4 VU from NV21, upsample to 8 UV. +#define READNV21 __asm { \ + __asm movq xmm0, qword ptr [esi] /* UV */ \ + __asm lea esi, [esi + 8] \ + __asm pshufb xmm0, xmmword ptr kShuffleNV21 \ + __asm movq xmm4, qword ptr [eax] \ + __asm punpcklbw xmm4, xmm4 \ + __asm lea eax, [eax + 8] \ + } + +// Read 4 YUY2 with 8 Y and upsample 4 UV to 8 UV. +#define READYUY2 __asm { \ + __asm movdqu xmm4, [eax] /* YUY2 */ \ + __asm pshufb xmm4, xmmword ptr kShuffleYUY2Y \ + __asm movdqu xmm0, [eax] /* UV */ \ + __asm pshufb xmm0, xmmword ptr kShuffleYUY2UV \ + __asm lea eax, [eax + 16] \ + } + +// Read 4 UYVY with 8 Y and upsample 4 UV to 8 UV. +#define READUYVY __asm { \ + __asm movdqu xmm4, [eax] /* UYVY */ \ + __asm pshufb xmm4, xmmword ptr kShuffleUYVYY \ + __asm movdqu xmm0, [eax] /* UV */ \ + __asm pshufb xmm0, xmmword ptr kShuffleUYVYUV \ + __asm lea eax, [eax + 16] \ } // Convert 8 pixels: 8 UV and 8 Y. #define YUVTORGB(YuvConstants) __asm { \ - /* Step 1: Find 4 UV contributions to 8 R,G,B values */ \ __asm movdqa xmm1, xmm0 \ __asm movdqa xmm2, xmm0 \ __asm movdqa xmm3, xmm0 \ - __asm movdqa xmm0, YuvConstants.kUVBiasB /* unbias back to signed */ \ - __asm pmaddubsw xmm1, YuvConstants.kUVToB /* scale B UV */ \ + __asm movdqa xmm0, xmmword ptr [YuvConstants + KUVBIASB] \ + __asm pmaddubsw xmm1, xmmword ptr [YuvConstants + KUVTOB] \ __asm psubw xmm0, xmm1 \ - __asm movdqa xmm1, YuvConstants.kUVBiasG \ - __asm pmaddubsw xmm2, YuvConstants.kUVToG /* scale G UV */ \ + __asm movdqa xmm1, xmmword ptr [YuvConstants + KUVBIASG] \ + __asm pmaddubsw xmm2, xmmword ptr [YuvConstants + KUVTOG] \ __asm psubw xmm1, xmm2 \ - __asm movdqa xmm2, YuvConstants.kUVBiasR \ - __asm pmaddubsw xmm3, YuvConstants.kUVToR /* scale R UV */ \ + __asm movdqa xmm2, xmmword ptr [YuvConstants + KUVBIASR] \ + __asm pmaddubsw xmm3, xmmword ptr [YuvConstants + KUVTOR] \ __asm psubw xmm2, xmm3 \ - /* Step 2: Find Y contribution to 8 R,G,B values */ \ - __asm movq xmm3, qword ptr [eax] /* NOLINT */ \ - __asm lea eax, [eax + 8] \ - __asm punpcklbw xmm3, xmm3 \ - __asm pmulhuw xmm3, YuvConstants.kYToRgb \ - __asm paddsw xmm0, xmm3 /* B += Y */ \ - __asm paddsw xmm1, xmm3 /* G += Y */ \ - __asm paddsw xmm2, xmm3 /* R += Y */ \ + __asm pmulhuw xmm4, xmmword ptr [YuvConstants + KYTORGB] \ + __asm paddsw xmm0, xmm4 /* B += Y */ \ + __asm paddsw xmm1, xmm4 /* G += Y */ \ + __asm paddsw xmm2, xmm4 /* R += Y */ \ __asm psraw xmm0, 6 \ __asm psraw xmm1, 6 \ __asm psraw xmm2, 6 \ @@ -2420,7 +2535,6 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, // Store 8 ARGB values. #define STOREARGB __asm { \ - /* Step 3: Weave into ARGB */ \ __asm punpcklbw xmm0, xmm1 /* BG */ \ __asm punpcklbw xmm2, xmm5 /* RA */ \ __asm movdqa xmm1, xmm0 \ @@ -2433,7 +2547,6 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, // Store 8 BGRA values. #define STOREBGRA __asm { \ - /* Step 3: Weave into BGRA */ \ __asm pcmpeqb xmm5, xmm5 /* generate 0xffffffff for alpha */ \ __asm punpcklbw xmm1, xmm0 /* GB */ \ __asm punpcklbw xmm5, xmm2 /* AR */ \ @@ -2445,22 +2558,8 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, __asm lea edx, [edx + 32] \ } -// Store 8 ABGR values. -#define STOREABGR __asm { \ - /* Step 3: Weave into ABGR */ \ - __asm punpcklbw xmm2, xmm1 /* RG */ \ - __asm punpcklbw xmm0, xmm5 /* BA */ \ - __asm movdqa xmm1, xmm2 \ - __asm punpcklwd xmm2, xmm0 /* RGBA first 4 pixels */ \ - __asm punpckhwd xmm1, xmm0 /* RGBA next 4 pixels */ \ - __asm movdqu 0[edx], xmm2 \ - __asm movdqu 16[edx], xmm1 \ - __asm lea edx, [edx + 32] \ - } - // Store 8 RGBA values. #define STORERGBA __asm { \ - /* Step 3: Weave into RGBA */ \ __asm pcmpeqb xmm5, xmm5 /* generate 0xffffffff for alpha */ \ __asm punpcklbw xmm1, xmm2 /* GR */ \ __asm punpcklbw xmm5, xmm0 /* AB */ \ @@ -2474,30 +2573,13 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, // Store 8 RGB24 values. #define STORERGB24 __asm { \ - /* Step 3: Weave into RRGB */ \ + /* Weave into RRGB */ \ __asm punpcklbw xmm0, xmm1 /* BG */ \ __asm punpcklbw xmm2, xmm2 /* RR */ \ __asm movdqa xmm1, xmm0 \ __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \ __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \ - /* Step 4: RRGB -> RGB24 */ \ - __asm pshufb xmm0, xmm5 /* Pack first 8 and last 4 bytes. */ \ - __asm pshufb xmm1, xmm6 /* Pack first 12 bytes. */ \ - __asm palignr xmm1, xmm0, 12 /* last 4 bytes of xmm0 + 12 xmm1 */ \ - __asm movq qword ptr 0[edx], xmm0 /* First 8 bytes */ \ - __asm movdqu 8[edx], xmm1 /* Last 16 bytes */ \ - __asm lea edx, [edx + 24] \ - } - -// Store 8 RAW values. -#define STORERAW __asm { \ - /* Step 3: Weave into RRGB */ \ - __asm punpcklbw xmm0, xmm1 /* BG */ \ - __asm punpcklbw xmm2, xmm2 /* RR */ \ - __asm movdqa xmm1, xmm0 \ - __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \ - __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \ - /* Step 4: RRGB -> RAW */ \ + /* RRGB -> RGB24 */ \ __asm pshufb xmm0, xmm5 /* Pack first 8 and last 4 bytes. */ \ __asm pshufb xmm1, xmm6 /* Pack first 12 bytes. */ \ __asm palignr xmm1, xmm0, 12 /* last 4 bytes of xmm0 + 12 xmm1 */ \ @@ -2508,13 +2590,13 @@ void I422ToABGRRow_AVX2(const uint8* y_buf, // Store 8 RGB565 values. #define STORERGB565 __asm { \ - /* Step 3: Weave into RRGB */ \ + /* Weave into RRGB */ \ __asm punpcklbw xmm0, xmm1 /* BG */ \ __asm punpcklbw xmm2, xmm2 /* RR */ \ __asm movdqa xmm1, xmm0 \ __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \ __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \ - /* Step 4: RRGB -> RGB565 */ \ + /* RRGB -> RGB565 */ \ __asm movdqa xmm3, xmm0 /* B first 4 pixels of argb */ \ __asm movdqa xmm2, xmm0 /* G */ \ __asm pslld xmm0, 8 /* R */ \ @@ -2549,26 +2631,30 @@ void I444ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi - pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha + pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: READYUV444 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STOREARGB sub ecx, 8 jg convertloop + pop ebx pop edi pop esi ret @@ -2582,61 +2668,31 @@ void I422ToRGB24Row_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_rgb24, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // rgb24 - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi - movdqa xmm5, kShuffleMaskARGBToRGB24_0 - movdqa xmm6, kShuffleMaskARGBToRGB24 + movdqa xmm5, xmmword ptr kShuffleMaskARGBToRGB24_0 + movdqa xmm6, xmmword ptr kShuffleMaskARGBToRGB24 convertloop: READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STORERGB24 sub ecx, 8 jg convertloop - pop edi - pop esi - ret - } -} - -// 8 pixels. -// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RAW (24 bytes). -__declspec(naked) -void I422ToRAWRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_raw, - int width) { - __asm { - push esi - push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // raw - mov ecx, [esp + 8 + 20] // width - sub edi, esi - movdqa xmm5, kShuffleMaskARGBToRAW_0 - movdqa xmm6, kShuffleMaskARGBToRAW - - convertloop: - READYUV422 - YUVTORGB(kYuvConstants) - STORERAW - - sub ecx, 8 - jg convertloop - + pop ebx pop edi pop esi ret @@ -2650,15 +2706,18 @@ void I422ToRGB565Row_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* rgb565_buf, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // rgb565 - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi pcmpeqb xmm5, xmm5 // generate mask 0x0000001f psrld xmm5, 27 @@ -2670,12 +2729,13 @@ void I422ToRGB565Row_SSSE3(const uint8* y_buf, convertloop: READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STORERGB565 sub ecx, 8 jg convertloop + pop ebx pop edi pop esi ret @@ -2689,26 +2749,30 @@ void I422ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STOREARGB sub ecx, 8 jg convertloop + pop ebx pop edi pop esi ret @@ -2716,33 +2780,39 @@ void I422ToARGBRow_SSSE3(const uint8* y_buf, } // 8 pixels. -// JPeg color space version of I422ToARGB -// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). +// 4 UV values upsampled to 8 UV, mixed with 8 Y and 8 A producing 8 ARGB. __declspec(naked) -void J422ToARGBRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_argb, - int width) { +void I422AlphaToARGBRow_SSSE3(const uint8* y_buf, + const uint8* u_buf, + const uint8* v_buf, + const uint8* a_buf, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // argb - mov ecx, [esp + 8 + 20] // width + push ebx + push ebp + mov eax, [esp + 16 + 4] // Y + mov esi, [esp + 16 + 8] // U + mov edi, [esp + 16 + 12] // V + mov ebp, [esp + 16 + 16] // A + mov edx, [esp + 16 + 20] // argb + mov ebx, [esp + 16 + 24] // yuvconstants + mov ecx, [esp + 16 + 28] // width sub edi, esi - pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: - READYUV422 - YUVTORGB(kYuvJConstants) + READYUVA422 + YUVTORGB(ebx) STOREARGB sub ecx, 8 jg convertloop + pop ebp + pop ebx pop edi pop esi ret @@ -2757,30 +2827,34 @@ void I411ToARGBRow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { - push ebx push esi push edi - mov eax, [esp + 12 + 4] // Y - mov esi, [esp + 12 + 8] // U - mov edi, [esp + 12 + 12] // V - mov edx, [esp + 12 + 16] // argb - mov ecx, [esp + 12 + 20] // width + push ebx + push ebp + mov eax, [esp + 16 + 4] // Y + mov esi, [esp + 16 + 8] // U + mov edi, [esp + 16 + 12] // V + mov edx, [esp + 16 + 16] // abgr + mov ebp, [esp + 16 + 20] // yuvconstants + mov ecx, [esp + 16 + 24] // width sub edi, esi pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: - READYUV411 // modifies EBX - YUVTORGB(kYuvConstants) + READYUV411_EBX + YUVTORGB(ebp) STOREARGB sub ecx, 8 jg convertloop + pop ebp + pop ebx pop edi pop esi - pop ebx ret } } @@ -2791,113 +2865,116 @@ __declspec(naked) void NV12ToARGBRow_SSSE3(const uint8* y_buf, const uint8* uv_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi - mov eax, [esp + 4 + 4] // Y - mov esi, [esp + 4 + 8] // UV - mov edx, [esp + 4 + 12] // argb - mov ecx, [esp + 4 + 16] // width + push ebx + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // UV + mov edx, [esp + 8 + 12] // argb + mov ebx, [esp + 8 + 16] // yuvconstants + mov ecx, [esp + 8 + 20] // width pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: READNV12 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STOREARGB sub ecx, 8 jg convertloop + pop ebx pop esi ret } } // 8 pixels. -// 4 VU values upsampled to 8 VU, mixed with 8 Y producing 8 ARGB (32 bytes). +// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes). __declspec(naked) void NV21ToARGBRow_SSSE3(const uint8* y_buf, - const uint8* uv_buf, + const uint8* vu_buf, uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi - mov eax, [esp + 4 + 4] // Y - mov esi, [esp + 4 + 8] // UV - mov edx, [esp + 4 + 12] // argb - mov ecx, [esp + 4 + 16] // width + push ebx + mov eax, [esp + 8 + 4] // Y + mov esi, [esp + 8 + 8] // VU + mov edx, [esp + 8 + 12] // argb + mov ebx, [esp + 8 + 16] // yuvconstants + mov ecx, [esp + 8 + 20] // width pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: - READNV12 - YUVTORGB(kYvuConstants) + READNV21 + YUVTORGB(ebx) STOREARGB sub ecx, 8 jg convertloop + pop ebx pop esi ret } } +// 8 pixels. +// 4 YUY2 values with 8 Y and 4 UV producing 8 ARGB (32 bytes). __declspec(naked) -void I422ToBGRARow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_bgra, +void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, int width) { __asm { - push esi - push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // bgra - mov ecx, [esp + 8 + 20] // width - sub edi, esi - - convertloop: - READYUV422 - YUVTORGB(kYuvConstants) - STOREBGRA - - sub ecx, 8 - jg convertloop - - pop edi - pop esi - ret - } -} - -__declspec(naked) -void I422ToABGRRow_SSSE3(const uint8* y_buf, - const uint8* u_buf, - const uint8* v_buf, - uint8* dst_abgr, - int width) { - __asm { - push esi - push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // abgr - mov ecx, [esp + 8 + 20] // width - sub edi, esi + push ebx + mov eax, [esp + 4 + 4] // yuy2 + mov edx, [esp + 4 + 8] // argb + mov ebx, [esp + 4 + 12] // yuvconstants + mov ecx, [esp + 4 + 16] // width pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha convertloop: - READYUV422 - YUVTORGB(kYuvConstants) - STOREABGR + READYUY2 + YUVTORGB(ebx) + STOREARGB sub ecx, 8 jg convertloop - pop edi - pop esi + pop ebx + ret + } +} + +// 8 pixels. +// 4 UYVY values with 8 Y and 4 UV producing 8 ARGB (32 bytes). +__declspec(naked) +void UYVYToARGBRow_SSSE3(const uint8* src_uyvy, + uint8* dst_argb, + const struct YuvConstants* yuvconstants, + int width) { + __asm { + push ebx + mov eax, [esp + 4 + 4] // uyvy + mov edx, [esp + 4 + 8] // argb + mov ebx, [esp + 4 + 12] // yuvconstants + mov ecx, [esp + 4 + 16] // width + pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha + + convertloop: + READUYVY + YUVTORGB(ebx) + STOREARGB + + sub ecx, 8 + jg convertloop + + pop ebx ret } } @@ -2907,31 +2984,34 @@ void I422ToRGBARow_SSSE3(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, uint8* dst_rgba, + const struct YuvConstants* yuvconstants, int width) { __asm { push esi push edi - mov eax, [esp + 8 + 4] // Y - mov esi, [esp + 8 + 8] // U - mov edi, [esp + 8 + 12] // V - mov edx, [esp + 8 + 16] // rgba - mov ecx, [esp + 8 + 20] // width + push ebx + mov eax, [esp + 12 + 4] // Y + mov esi, [esp + 12 + 8] // U + mov edi, [esp + 12 + 12] // V + mov edx, [esp + 12 + 16] // argb + mov ebx, [esp + 12 + 20] // yuvconstants + mov ecx, [esp + 12 + 24] // width sub edi, esi convertloop: READYUV422 - YUVTORGB(kYuvConstants) + YUVTORGB(ebx) STORERGBA sub ecx, 8 jg convertloop + pop ebx pop edi pop esi ret } } - #endif // HAS_I422TOARGBROW_SSSE3 #ifdef HAS_I400TOARGBROW_SSE2 @@ -3045,7 +3125,7 @@ void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) { mov eax, [esp + 4] // src mov edx, [esp + 8] // dst mov ecx, [esp + 12] // width - movdqa xmm5, kShuffleMirror + movdqa xmm5, xmmword ptr kShuffleMirror convertloop: movdqu xmm0, [eax - 16 + ecx] @@ -3066,7 +3146,7 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) { mov eax, [esp + 4] // src mov edx, [esp + 8] // dst mov ecx, [esp + 12] // width - vbroadcastf128 ymm5, kShuffleMirror + vbroadcastf128 ymm5, xmmword ptr kShuffleMirror convertloop: vmovdqu ymm0, [eax - 32 + ecx] @@ -3082,33 +3162,7 @@ void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) { } #endif // HAS_MIRRORROW_AVX2 -#ifdef HAS_MIRRORROW_SSE2 -__declspec(naked) -void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) { - __asm { - mov eax, [esp + 4] // src - mov edx, [esp + 8] // dst - mov ecx, [esp + 12] // width - - convertloop: - movdqu xmm0, [eax - 16 + ecx] - movdqa xmm1, xmm0 // swap bytes - psllw xmm0, 8 - psrlw xmm1, 8 - por xmm0, xmm1 - pshuflw xmm0, xmm0, 0x1b // swap words - pshufhw xmm0, xmm0, 0x1b - pshufd xmm0, xmm0, 0x4e // swap qwords - movdqu [edx], xmm0 - lea edx, [edx + 16] - sub ecx, 16 - jg convertloop - ret - } -} -#endif // HAS_MIRRORROW_SSE2 - -#ifdef HAS_MIRRORROW_UV_SSSE3 +#ifdef HAS_MIRRORUVROW_SSSE3 // Shuffle table for reversing the bytes of UV channels. static const uvec8 kShuffleMirrorUV = { 14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u @@ -3123,7 +3177,7 @@ void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v, mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v mov ecx, [esp + 4 + 16] // width - movdqa xmm1, kShuffleMirrorUV + movdqa xmm1, xmmword ptr kShuffleMirrorUV lea eax, [eax + ecx * 2 - 16] sub edi, edx @@ -3141,7 +3195,7 @@ void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v, ret } } -#endif // HAS_MIRRORROW_UV_SSSE3 +#endif // HAS_MIRRORUVROW_SSSE3 #ifdef HAS_ARGBMIRRORROW_SSE2 __declspec(naked) @@ -3177,7 +3231,7 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) { mov eax, [esp + 4] // src mov edx, [esp + 8] // dst mov ecx, [esp + 12] // width - vmovdqu ymm5, kARGBShuffleMirror_AVX2 + vmovdqu ymm5, ymmword ptr kARGBShuffleMirror_AVX2 convertloop: vpermd ymm0, ymm5, [eax - 32 + ecx * 4] // permute dword order @@ -3193,13 +3247,14 @@ void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) { #ifdef HAS_SPLITUVROW_SSE2 __declspec(naked) -void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { +void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_uv mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 sub edi, edx @@ -3231,13 +3286,14 @@ void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { #ifdef HAS_SPLITUVROW_AVX2 __declspec(naked) -void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) { +void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, + int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_uv mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 sub edi, edx @@ -3339,8 +3395,23 @@ void CopyRow_SSE2(const uint8* src, uint8* dst, int count) { mov eax, [esp + 4] // src mov edx, [esp + 8] // dst mov ecx, [esp + 12] // count + test eax, 15 + jne convertloopu + test edx, 15 + jne convertloopu - convertloop: + convertloopa: + movdqa xmm0, [eax] + movdqa xmm1, [eax + 16] + lea eax, [eax + 32] + movdqa [edx], xmm0 + movdqa [edx + 16], xmm1 + lea edx, [edx + 32] + sub ecx, 32 + jg convertloopa + ret + + convertloopu: movdqu xmm0, [eax] movdqu xmm1, [eax + 16] lea eax, [eax + 32] @@ -3348,7 +3419,7 @@ void CopyRow_SSE2(const uint8* src, uint8* dst, int count) { movdqu [edx + 16], xmm1 lea edx, [edx + 32] sub ecx, 32 - jg convertloop + jg convertloopu ret } } @@ -3460,6 +3531,33 @@ void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) { } #endif // HAS_ARGBCOPYALPHAROW_AVX2 +#ifdef HAS_ARGBEXTRACTALPHAROW_SSE2 +// width in pixels +__declspec(naked) +void ARGBExtractAlphaRow_SSE2(const uint8* src_argb, uint8* dst_a, int width) { + __asm { + mov eax, [esp + 4] // src_argb + mov edx, [esp + 8] // dst_a + mov ecx, [esp + 12] // width + + extractloop: + movdqu xmm0, [eax] + movdqu xmm1, [eax + 16] + lea eax, [eax + 32] + psrld xmm0, 24 + psrld xmm1, 24 + packssdw xmm0, xmm1 + packuswb xmm0, xmm0 + movq qword ptr [edx], xmm0 + lea edx, [edx + 8] + sub ecx, 8 + jg extractloop + + ret + } +} +#endif // HAS_ARGBEXTRACTALPHAROW_SSE2 + #ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2 // width in pixels __declspec(naked) @@ -3579,12 +3677,11 @@ void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) { #ifdef HAS_YUY2TOYROW_AVX2 __declspec(naked) -void YUY2ToYRow_AVX2(const uint8* src_yuy2, - uint8* dst_y, int pix) { +void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int width) { __asm { mov eax, [esp + 4] // src_yuy2 mov edx, [esp + 8] // dst_y - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 @@ -3607,7 +3704,7 @@ void YUY2ToYRow_AVX2(const uint8* src_yuy2, __declspec(naked) void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push esi push edi @@ -3615,7 +3712,7 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, mov esi, [esp + 8 + 8] // stride_yuy2 mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix + mov ecx, [esp + 8 + 20] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 sub edi, edx @@ -3651,13 +3748,13 @@ void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2, __declspec(naked) void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_yuy2 mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 sub edi, edx @@ -3690,11 +3787,11 @@ void YUY2ToUV422Row_AVX2(const uint8* src_yuy2, __declspec(naked) void UYVYToYRow_AVX2(const uint8* src_uyvy, - uint8* dst_y, int pix) { + uint8* dst_y, int width) { __asm { mov eax, [esp + 4] // src_uyvy mov edx, [esp + 8] // dst_y - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width convertloop: vmovdqu ymm0, [eax] @@ -3715,7 +3812,7 @@ void UYVYToYRow_AVX2(const uint8* src_uyvy, __declspec(naked) void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push esi push edi @@ -3723,7 +3820,7 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, mov esi, [esp + 8 + 8] // stride_yuy2 mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix + mov ecx, [esp + 8 + 20] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 sub edi, edx @@ -3759,13 +3856,13 @@ void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy, __declspec(naked) void UYVYToUV422Row_AVX2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_yuy2 mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff vpsrlw ymm5, ymm5, 8 sub edi, edx @@ -3800,11 +3897,11 @@ void UYVYToUV422Row_AVX2(const uint8* src_uyvy, #ifdef HAS_YUY2TOYROW_SSE2 __declspec(naked) void YUY2ToYRow_SSE2(const uint8* src_yuy2, - uint8* dst_y, int pix) { + uint8* dst_y, int width) { __asm { mov eax, [esp + 4] // src_yuy2 mov edx, [esp + 8] // dst_y - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 @@ -3825,7 +3922,7 @@ void YUY2ToYRow_SSE2(const uint8* src_yuy2, __declspec(naked) void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push esi push edi @@ -3833,7 +3930,7 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, mov esi, [esp + 8 + 8] // stride_yuy2 mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix + mov ecx, [esp + 8 + 20] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 sub edi, edx @@ -3868,13 +3965,13 @@ void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2, __declspec(naked) void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_yuy2 mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 sub edi, edx @@ -3904,11 +4001,11 @@ void YUY2ToUV422Row_SSE2(const uint8* src_yuy2, __declspec(naked) void UYVYToYRow_SSE2(const uint8* src_uyvy, - uint8* dst_y, int pix) { + uint8* dst_y, int width) { __asm { mov eax, [esp + 4] // src_uyvy mov edx, [esp + 8] // dst_y - mov ecx, [esp + 12] // pix + mov ecx, [esp + 12] // width convertloop: movdqu xmm0, [eax] @@ -3927,7 +4024,7 @@ void UYVYToYRow_SSE2(const uint8* src_uyvy, __declspec(naked) void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push esi push edi @@ -3935,7 +4032,7 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, mov esi, [esp + 8 + 8] // stride_yuy2 mov edx, [esp + 8 + 12] // dst_u mov edi, [esp + 8 + 16] // dst_v - mov ecx, [esp + 8 + 20] // pix + mov ecx, [esp + 8 + 20] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 sub edi, edx @@ -3970,13 +4067,13 @@ void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy, __declspec(naked) void UYVYToUV422Row_SSE2(const uint8* src_uyvy, - uint8* dst_u, uint8* dst_v, int pix) { + uint8* dst_u, uint8* dst_v, int width) { __asm { push edi mov eax, [esp + 4 + 4] // src_yuy2 mov edx, [esp + 4 + 8] // dst_u mov edi, [esp + 4 + 12] // dst_v - mov ecx, [esp + 4 + 16] // pix + mov ecx, [esp + 4 + 16] // width pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff psrlw xmm5, 8 sub edi, edx @@ -4005,92 +4102,122 @@ void UYVYToUV422Row_SSE2(const uint8* src_uyvy, } #endif // HAS_YUY2TOYROW_SSE2 -#ifdef HAS_ARGBBLENDROW_SSE2 +#ifdef HAS_BLENDPLANEROW_SSSE3 // Blend 8 pixels at a time. +// unsigned version of math +// =((A2*C2)+(B2*(255-C2))+255)/256 +// signed version of math +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 __declspec(naked) -void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1, - uint8* dst_argb, int width) { +void BlendPlaneRow_SSSE3(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) { __asm { push esi - mov eax, [esp + 4 + 4] // src_argb0 - mov esi, [esp + 4 + 8] // src_argb1 - mov edx, [esp + 4 + 12] // dst_argb - mov ecx, [esp + 4 + 16] // width - pcmpeqb xmm7, xmm7 // generate constant 1 - psrlw xmm7, 15 - pcmpeqb xmm6, xmm6 // generate mask 0x00ff00ff - psrlw xmm6, 8 + push edi pcmpeqb xmm5, xmm5 // generate mask 0xff00ff00 psllw xmm5, 8 - pcmpeqb xmm4, xmm4 // generate mask 0xff000000 - pslld xmm4, 24 - sub ecx, 4 - jl convertloop4b // less than 4 pixels? + mov eax, 0x80808080 // 128 for biasing image to signed. + movd xmm6, eax + pshufd xmm6, xmm6, 0x00 - // 4 pixel loop. - convertloop4: - movdqu xmm3, [eax] // src argb - lea eax, [eax + 16] - movdqa xmm0, xmm3 // src argb - pxor xmm3, xmm4 // ~alpha - movdqu xmm2, [esi] // _r_b - psrlw xmm3, 8 // alpha - pshufhw xmm3, xmm3, 0F5h // 8 alpha words - pshuflw xmm3, xmm3, 0F5h - pand xmm2, xmm6 // _r_b - paddw xmm3, xmm7 // 256 - alpha - pmullw xmm2, xmm3 // _r_b * alpha - movdqu xmm1, [esi] // _a_g - lea esi, [esi + 16] - psrlw xmm1, 8 // _a_g - por xmm0, xmm4 // set alpha to 255 - pmullw xmm1, xmm3 // _a_g * alpha - psrlw xmm2, 8 // _r_b convert to 8 bits again - paddusb xmm0, xmm2 // + src argb - pand xmm1, xmm5 // a_g_ convert to 8 bits again - paddusb xmm0, xmm1 // + src argb - movdqu [edx], xmm0 - lea edx, [edx + 16] - sub ecx, 4 - jge convertloop4 + mov eax, 0x807f807f // 32768 + 127 for unbias and round. + movd xmm7, eax + pshufd xmm7, xmm7, 0x00 + mov eax, [esp + 8 + 4] // src0 + mov edx, [esp + 8 + 8] // src1 + mov esi, [esp + 8 + 12] // alpha + mov edi, [esp + 8 + 16] // dst + mov ecx, [esp + 8 + 20] // width + sub eax, esi + sub edx, esi + sub edi, esi - convertloop4b: - add ecx, 4 - 1 - jl convertloop1b + // 8 pixel loop. + convertloop8: + movq xmm0, qword ptr [esi] // alpha + punpcklbw xmm0, xmm0 + pxor xmm0, xmm5 // a, 255-a + movq xmm1, qword ptr [eax + esi] // src0 + movq xmm2, qword ptr [edx + esi] // src1 + punpcklbw xmm1, xmm2 + psubb xmm1, xmm6 // bias src0/1 - 128 + pmaddubsw xmm0, xmm1 + paddw xmm0, xmm7 // unbias result - 32768 and round. + psrlw xmm0, 8 + packuswb xmm0, xmm0 + movq qword ptr [edi + esi], xmm0 + lea esi, [esi + 8] + sub ecx, 8 + jg convertloop8 - // 1 pixel loop. - convertloop1: - movd xmm3, [eax] // src argb - lea eax, [eax + 4] - movdqa xmm0, xmm3 // src argb - pxor xmm3, xmm4 // ~alpha - movd xmm2, [esi] // _r_b - psrlw xmm3, 8 // alpha - pshufhw xmm3, xmm3, 0F5h // 8 alpha words - pshuflw xmm3, xmm3, 0F5h - pand xmm2, xmm6 // _r_b - paddw xmm3, xmm7 // 256 - alpha - pmullw xmm2, xmm3 // _r_b * alpha - movd xmm1, [esi] // _a_g - lea esi, [esi + 4] - psrlw xmm1, 8 // _a_g - por xmm0, xmm4 // set alpha to 255 - pmullw xmm1, xmm3 // _a_g * alpha - psrlw xmm2, 8 // _r_b convert to 8 bits again - paddusb xmm0, xmm2 // + src argb - pand xmm1, xmm5 // a_g_ convert to 8 bits again - paddusb xmm0, xmm1 // + src argb - movd [edx], xmm0 - lea edx, [edx + 4] - sub ecx, 1 - jge convertloop1 - - convertloop1b: + pop edi pop esi ret } } -#endif // HAS_ARGBBLENDROW_SSE2 +#endif // HAS_BLENDPLANEROW_SSSE3 + +#ifdef HAS_BLENDPLANEROW_AVX2 +// Blend 32 pixels at a time. +// unsigned version of math +// =((A2*C2)+(B2*(255-C2))+255)/256 +// signed version of math +// =(((A2-128)*C2)+((B2-128)*(255-C2))+32768+127)/256 +__declspec(naked) +void BlendPlaneRow_AVX2(const uint8* src0, const uint8* src1, + const uint8* alpha, uint8* dst, int width) { + __asm { + push esi + push edi + vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xff00ff00 + vpsllw ymm5, ymm5, 8 + mov eax, 0x80808080 // 128 for biasing image to signed. + vmovd xmm6, eax + vbroadcastss ymm6, xmm6 + mov eax, 0x807f807f // 32768 + 127 for unbias and round. + vmovd xmm7, eax + vbroadcastss ymm7, xmm7 + mov eax, [esp + 8 + 4] // src0 + mov edx, [esp + 8 + 8] // src1 + mov esi, [esp + 8 + 12] // alpha + mov edi, [esp + 8 + 16] // dst + mov ecx, [esp + 8 + 20] // width + sub eax, esi + sub edx, esi + sub edi, esi + + // 32 pixel loop. + convertloop32: + vmovdqu ymm0, [esi] // alpha + vpunpckhbw ymm3, ymm0, ymm0 // 8..15, 24..31 + vpunpcklbw ymm0, ymm0, ymm0 // 0..7, 16..23 + vpxor ymm3, ymm3, ymm5 // a, 255-a + vpxor ymm0, ymm0, ymm5 // a, 255-a + vmovdqu ymm1, [eax + esi] // src0 + vmovdqu ymm2, [edx + esi] // src1 + vpunpckhbw ymm4, ymm1, ymm2 + vpunpcklbw ymm1, ymm1, ymm2 + vpsubb ymm4, ymm4, ymm6 // bias src0/1 - 128 + vpsubb ymm1, ymm1, ymm6 // bias src0/1 - 128 + vpmaddubsw ymm3, ymm3, ymm4 + vpmaddubsw ymm0, ymm0, ymm1 + vpaddw ymm3, ymm3, ymm7 // unbias result - 32768 and round. + vpaddw ymm0, ymm0, ymm7 // unbias result - 32768 and round. + vpsrlw ymm3, ymm3, 8 + vpsrlw ymm0, ymm0, 8 + vpackuswb ymm0, ymm0, ymm3 + vmovdqu [edi + esi], ymm0 + lea esi, [esi + 32] + sub ecx, 32 + jg convertloop32 + + pop edi + pop esi + vzeroupper + ret + } +} +#endif // HAS_BLENDPLANEROW_AVX2 #ifdef HAS_ARGBBLENDROW_SSSE3 // Shuffle table for isolating alpha. @@ -4098,14 +4225,8 @@ static const uvec8 kShuffleAlpha = { 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80, 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80 }; -// Same as SSE2, but replaces: -// psrlw xmm3, 8 // alpha -// pshufhw xmm3, xmm3, 0F5h // 8 alpha words -// pshuflw xmm3, xmm3, 0F5h -// with.. -// pshufb xmm3, kShuffleAlpha // alpha -// Blend 8 pixels at a time. +// Blend 8 pixels at a time. __declspec(naked) void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, uint8* dst_argb, int width) { @@ -4133,7 +4254,7 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha movdqu xmm2, [esi] // _r_b - pshufb xmm3, kShuffleAlpha // alpha + pshufb xmm3, xmmword ptr kShuffleAlpha // alpha pand xmm2, xmm6 // _r_b paddw xmm3, xmm7 // 256 - alpha pmullw xmm2, xmm3 // _r_b * alpha @@ -4162,7 +4283,7 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, movdqa xmm0, xmm3 // src argb pxor xmm3, xmm4 // ~alpha movd xmm2, [esi] // _r_b - pshufb xmm3, kShuffleAlpha // alpha + pshufb xmm3, xmmword ptr kShuffleAlpha // alpha pand xmm2, xmm6 // _r_b paddw xmm3, xmm7 // 256 - alpha pmullw xmm2, xmm3 // _r_b * alpha @@ -4187,48 +4308,6 @@ void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1, } #endif // HAS_ARGBBLENDROW_SSSE3 -#ifdef HAS_ARGBATTENUATEROW_SSE2 -// Attenuate 4 pixels at a time. -__declspec(naked) -void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) { - __asm { - mov eax, [esp + 4] // src_argb0 - mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // width - pcmpeqb xmm4, xmm4 // generate mask 0xff000000 - pslld xmm4, 24 - pcmpeqb xmm5, xmm5 // generate mask 0x00ffffff - psrld xmm5, 8 - - convertloop: - movdqu xmm0, [eax] // read 4 pixels - punpcklbw xmm0, xmm0 // first 2 - pshufhw xmm2, xmm0, 0FFh // 8 alpha words - pshuflw xmm2, xmm2, 0FFh - pmulhuw xmm0, xmm2 // rgb * a - movdqu xmm1, [eax] // read 4 pixels - punpckhbw xmm1, xmm1 // next 2 pixels - pshufhw xmm2, xmm1, 0FFh // 8 alpha words - pshuflw xmm2, xmm2, 0FFh - pmulhuw xmm1, xmm2 // rgb * a - movdqu xmm2, [eax] // alphas - lea eax, [eax + 16] - psrlw xmm0, 8 - pand xmm2, xmm4 - psrlw xmm1, 8 - packuswb xmm0, xmm1 - pand xmm0, xmm5 // keep original alphas - por xmm0, xmm2 - movdqu [edx], xmm0 - lea edx, [edx + 16] - sub ecx, 4 - jg convertloop - - ret - } -} -#endif // HAS_ARGBATTENUATEROW_SSE2 - #ifdef HAS_ARGBATTENUATEROW_SSSE3 // Shuffle table duplicating alpha. static const uvec8 kShuffleAlpha0 = { @@ -4246,8 +4325,8 @@ void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) { mov ecx, [esp + 12] // width pcmpeqb xmm3, xmm3 // generate mask 0xff000000 pslld xmm3, 24 - movdqa xmm4, kShuffleAlpha0 - movdqa xmm5, kShuffleAlpha1 + movdqa xmm4, xmmword ptr kShuffleAlpha0 + movdqa xmm5, xmmword ptr kShuffleAlpha1 convertloop: movdqu xmm0, [eax] // read 4 pixels @@ -4289,7 +4368,7 @@ void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { mov edx, [esp + 8] // dst_argb mov ecx, [esp + 12] // width sub edx, eax - vbroadcastf128 ymm4,kShuffleAlpha_AVX2 + vbroadcastf128 ymm4, xmmword ptr kShuffleAlpha_AVX2 vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xff000000 vpslld ymm5, ymm5, 24 @@ -4323,19 +4402,21 @@ __declspec(naked) void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) { __asm { + push ebx push esi push edi - mov eax, [esp + 8 + 4] // src_argb0 - mov edx, [esp + 8 + 8] // dst_argb - mov ecx, [esp + 8 + 12] // width + mov eax, [esp + 12 + 4] // src_argb + mov edx, [esp + 12 + 8] // dst_argb + mov ecx, [esp + 12 + 12] // width + lea ebx, fixed_invtbl8 convertloop: movdqu xmm0, [eax] // read 4 pixels movzx esi, byte ptr [eax + 3] // first alpha movzx edi, byte ptr [eax + 7] // second alpha punpcklbw xmm0, xmm0 // first 2 - movd xmm2, dword ptr fixed_invtbl8[esi * 4] - movd xmm3, dword ptr fixed_invtbl8[edi * 4] + movd xmm2, dword ptr [ebx + esi * 4] + movd xmm3, dword ptr [ebx + edi * 4] pshuflw xmm2, xmm2, 040h // first 4 inv_alpha words. 1, a, a, a pshuflw xmm3, xmm3, 040h // next 4 inv_alpha words movlhps xmm2, xmm3 @@ -4345,21 +4426,22 @@ void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, movzx esi, byte ptr [eax + 11] // third alpha movzx edi, byte ptr [eax + 15] // forth alpha punpckhbw xmm1, xmm1 // next 2 - movd xmm2, dword ptr fixed_invtbl8[esi * 4] - movd xmm3, dword ptr fixed_invtbl8[edi * 4] + movd xmm2, dword ptr [ebx + esi * 4] + movd xmm3, dword ptr [ebx + edi * 4] pshuflw xmm2, xmm2, 040h // first 4 inv_alpha words pshuflw xmm3, xmm3, 040h // next 4 inv_alpha words movlhps xmm2, xmm3 pmulhuw xmm1, xmm2 // rgb * a lea eax, [eax + 16] - packuswb xmm0, xmm1 movdqu [edx], xmm0 lea edx, [edx + 16] sub ecx, 4 jg convertloop + pop edi pop esi + pop ebx ret } } @@ -4381,7 +4463,7 @@ void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, mov edx, [esp + 8] // dst_argb mov ecx, [esp + 12] // width sub edx, eax - vbroadcastf128 ymm4, kUnattenShuffleAlpha_AVX2 + vbroadcastf128 ymm4, xmmword ptr kUnattenShuffleAlpha_AVX2 convertloop: vmovdqu ymm6, [eax] // read 8 pixels. @@ -4412,36 +4494,37 @@ void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) { __asm { - mov eax, [esp + 4] // src_argb0 - mov edx, [esp + 8] // dst_argb - mov ecx, [esp + 12] // width - sub edx, eax - vbroadcastf128 ymm5, kUnattenShuffleAlpha_AVX2 - + push ebx push esi push edi + mov eax, [esp + 12 + 4] // src_argb + mov edx, [esp + 12 + 8] // dst_argb + mov ecx, [esp + 12 + 12] // width + sub edx, eax + lea ebx, fixed_invtbl8 + vbroadcastf128 ymm5, xmmword ptr kUnattenShuffleAlpha_AVX2 convertloop: // replace VPGATHER movzx esi, byte ptr [eax + 3] // alpha0 movzx edi, byte ptr [eax + 7] // alpha1 - vmovd xmm0, dword ptr fixed_invtbl8[esi * 4] // [1,a0] - vmovd xmm1, dword ptr fixed_invtbl8[edi * 4] // [1,a1] + vmovd xmm0, dword ptr [ebx + esi * 4] // [1,a0] + vmovd xmm1, dword ptr [ebx + edi * 4] // [1,a1] movzx esi, byte ptr [eax + 11] // alpha2 movzx edi, byte ptr [eax + 15] // alpha3 vpunpckldq xmm6, xmm0, xmm1 // [1,a1,1,a0] - vmovd xmm2, dword ptr fixed_invtbl8[esi * 4] // [1,a2] - vmovd xmm3, dword ptr fixed_invtbl8[edi * 4] // [1,a3] + vmovd xmm2, dword ptr [ebx + esi * 4] // [1,a2] + vmovd xmm3, dword ptr [ebx + edi * 4] // [1,a3] movzx esi, byte ptr [eax + 19] // alpha4 movzx edi, byte ptr [eax + 23] // alpha5 vpunpckldq xmm7, xmm2, xmm3 // [1,a3,1,a2] - vmovd xmm0, dword ptr fixed_invtbl8[esi * 4] // [1,a4] - vmovd xmm1, dword ptr fixed_invtbl8[edi * 4] // [1,a5] + vmovd xmm0, dword ptr [ebx + esi * 4] // [1,a4] + vmovd xmm1, dword ptr [ebx + edi * 4] // [1,a5] movzx esi, byte ptr [eax + 27] // alpha6 movzx edi, byte ptr [eax + 31] // alpha7 vpunpckldq xmm0, xmm0, xmm1 // [1,a5,1,a4] - vmovd xmm2, dword ptr fixed_invtbl8[esi * 4] // [1,a6] - vmovd xmm3, dword ptr fixed_invtbl8[edi * 4] // [1,a7] + vmovd xmm2, dword ptr [ebx + esi * 4] // [1,a6] + vmovd xmm3, dword ptr [ebx + edi * 4] // [1,a7] vpunpckldq xmm2, xmm2, xmm3 // [1,a7,1,a6] vpunpcklqdq xmm3, xmm6, xmm7 // [1,a3,1,a2,1,a1,1,a0] vpunpcklqdq xmm0, xmm0, xmm2 // [1,a7,1,a6,1,a5,1,a4] @@ -4465,6 +4548,7 @@ void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, pop edi pop esi + pop ebx vzeroupper ret } @@ -4480,8 +4564,8 @@ void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) { mov eax, [esp + 4] /* src_argb */ mov edx, [esp + 8] /* dst_argb */ mov ecx, [esp + 12] /* width */ - movdqa xmm4, kARGBToYJ - movdqa xmm5, kAddYJ64 + movdqa xmm4, xmmword ptr kARGBToYJ + movdqa xmm5, xmmword ptr kAddYJ64 convertloop: movdqu xmm0, [eax] // G @@ -4538,9 +4622,9 @@ void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) { __asm { mov eax, [esp + 4] /* dst_argb */ mov ecx, [esp + 8] /* width */ - movdqa xmm2, kARGBToSepiaB - movdqa xmm3, kARGBToSepiaG - movdqa xmm4, kARGBToSepiaR + movdqa xmm2, xmmword ptr kARGBToSepiaB + movdqa xmm3, xmmword ptr kARGBToSepiaG + movdqa xmm4, xmmword ptr kARGBToSepiaR convertloop: movdqu xmm0, [eax] // B @@ -5190,6 +5274,7 @@ void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely, // dst points to pixel to store result to. // count is number of averaged pixels to produce. // Does 4 pixels at a time. +// This function requires alignment on accumulation buffer pointers. void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft, int width, int area, uint8* dst, int count) { @@ -5517,36 +5602,38 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, mov edx, [esp + 8 + 12] // src_stride mov ecx, [esp + 8 + 16] // dst_width mov eax, [esp + 8 + 20] // source_y_fraction (0..255) - shr eax, 1 // Dispatch to specialized filters if applicable. cmp eax, 0 - je xloop100 // 0 / 128. Blend 100 / 0. + je xloop100 // 0 / 256. Blend 100 / 0. sub edi, esi - cmp eax, 32 - je xloop75 // 32 / 128 is 0.25. Blend 75 / 25. - cmp eax, 64 - je xloop50 // 64 / 128 is 0.50. Blend 50 / 50. - cmp eax, 96 - je xloop25 // 96 / 128 is 0.75. Blend 25 / 75. + cmp eax, 128 + je xloop50 // 128 /256 is 0.50. Blend 50 / 50. - vmovd xmm0, eax // high fraction 0..127 + vmovd xmm0, eax // high fraction 0..255 neg eax - add eax, 128 - vmovd xmm5, eax // low fraction 128..1 + add eax, 256 + vmovd xmm5, eax // low fraction 256..1 vpunpcklbw xmm5, xmm5, xmm0 vpunpcklwd xmm5, xmm5, xmm5 - vpxor ymm0, ymm0, ymm0 - vpermd ymm5, ymm0, ymm5 + vbroadcastss ymm5, xmm5 + + mov eax, 0x80808080 // 128b for bias and rounding. + vmovd xmm4, eax + vbroadcastss ymm4, xmm4 xloop: vmovdqu ymm0, [esi] vmovdqu ymm2, [esi + edx] vpunpckhbw ymm1, ymm0, ymm2 // mutates - vpunpcklbw ymm0, ymm0, ymm2 // mutates - vpmaddubsw ymm0, ymm0, ymm5 - vpmaddubsw ymm1, ymm1, ymm5 - vpsrlw ymm0, ymm0, 7 - vpsrlw ymm1, ymm1, 7 + vpunpcklbw ymm0, ymm0, ymm2 + vpsubb ymm1, ymm1, ymm4 // bias to signed image + vpsubb ymm0, ymm0, ymm4 + vpmaddubsw ymm1, ymm5, ymm1 + vpmaddubsw ymm0, ymm5, ymm0 + vpaddw ymm1, ymm1, ymm4 // unbias and round + vpaddw ymm0, ymm0, ymm4 + vpsrlw ymm1, ymm1, 8 + vpsrlw ymm0, ymm0, 8 vpackuswb ymm0, ymm0, ymm1 // unmutates vmovdqu [esi + edi], ymm0 lea esi, [esi + 32] @@ -5554,18 +5641,6 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, jg xloop jmp xloop99 - // Blend 25 / 75. - xloop25: - vmovdqu ymm0, [esi] - vmovdqu ymm1, [esi + edx] - vpavgb ymm0, ymm0, ymm1 - vpavgb ymm0, ymm0, ymm1 - vmovdqu [esi + edi], ymm0 - lea esi, [esi + 32] - sub ecx, 32 - jg xloop25 - jmp xloop99 - // Blend 50 / 50. xloop50: vmovdqu ymm0, [esi] @@ -5576,18 +5651,6 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, jg xloop50 jmp xloop99 - // Blend 75 / 25. - xloop75: - vmovdqu ymm1, [esi] - vmovdqu ymm0, [esi + edx] - vpavgb ymm0, ymm0, ymm1 - vpavgb ymm0, ymm0, ymm1 - vmovdqu [esi + edi], ymm0 - lea esi, [esi + 32] - sub ecx, 32 - jg xloop75 - jmp xloop99 - // Blend 100 / 0 - Copy row unchanged. xloop100: rep movsb @@ -5602,6 +5665,7 @@ void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr, #endif // HAS_INTERPOLATEROW_AVX2 // Bilinear filter 16x2 -> 16x1 +// TODO(fbarchard): Consider allowing 256 using memcpy. __declspec(naked) void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, ptrdiff_t src_stride, int dst_width, @@ -5609,30 +5673,29 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, __asm { push esi push edi + mov edi, [esp + 8 + 4] // dst_ptr mov esi, [esp + 8 + 8] // src_ptr mov edx, [esp + 8 + 12] // src_stride mov ecx, [esp + 8 + 16] // dst_width mov eax, [esp + 8 + 20] // source_y_fraction (0..255) sub edi, esi - shr eax, 1 // Dispatch to specialized filters if applicable. cmp eax, 0 - je xloop100 // 0 / 128. Blend 100 / 0. - cmp eax, 32 - je xloop75 // 32 / 128 is 0.25. Blend 75 / 25. - cmp eax, 64 - je xloop50 // 64 / 128 is 0.50. Blend 50 / 50. - cmp eax, 96 - je xloop25 // 96 / 128 is 0.75. Blend 25 / 75. + je xloop100 // 0 /256. Blend 100 / 0. + cmp eax, 128 + je xloop50 // 128 / 256 is 0.50. Blend 50 / 50. - movd xmm0, eax // high fraction 0..127 + movd xmm0, eax // high fraction 0..255 neg eax - add eax, 128 - movd xmm5, eax // low fraction 128..1 + add eax, 256 + movd xmm5, eax // low fraction 255..1 punpcklbw xmm5, xmm0 punpcklwd xmm5, xmm5 pshufd xmm5, xmm5, 0 + mov eax, 0x80808080 // 128 for biasing image to signed. + movd xmm4, eax + pshufd xmm4, xmm4, 0x00 xloop: movdqu xmm0, [esi] @@ -5640,29 +5703,23 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, movdqu xmm1, xmm0 punpcklbw xmm0, xmm2 punpckhbw xmm1, xmm2 - pmaddubsw xmm0, xmm5 - pmaddubsw xmm1, xmm5 - psrlw xmm0, 7 - psrlw xmm1, 7 - packuswb xmm0, xmm1 - movdqu [esi + edi], xmm0 + psubb xmm0, xmm4 // bias image by -128 + psubb xmm1, xmm4 + movdqa xmm2, xmm5 + movdqa xmm3, xmm5 + pmaddubsw xmm2, xmm0 + pmaddubsw xmm3, xmm1 + paddw xmm2, xmm4 + paddw xmm3, xmm4 + psrlw xmm2, 8 + psrlw xmm3, 8 + packuswb xmm2, xmm3 + movdqu [esi + edi], xmm2 lea esi, [esi + 16] sub ecx, 16 jg xloop jmp xloop99 - // Blend 25 / 75. - xloop25: - movdqu xmm0, [esi] - movdqu xmm1, [esi + edx] - pavgb xmm0, xmm1 - pavgb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop25 - jmp xloop99 - // Blend 50 / 50. xloop50: movdqu xmm0, [esi] @@ -5674,18 +5731,6 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, jg xloop50 jmp xloop99 - // Blend 75 / 25. - xloop75: - movdqu xmm1, [esi] - movdqu xmm0, [esi + edx] - pavgb xmm0, xmm1 - pavgb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop75 - jmp xloop99 - // Blend 100 / 0 - Copy row unchanged. xloop100: movdqu xmm0, [esi] @@ -5701,124 +5746,16 @@ void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr, } } -#ifdef HAS_INTERPOLATEROW_SSE2 -// Bilinear filter 16x2 -> 16x1 -__declspec(naked) -void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr, - ptrdiff_t src_stride, int dst_width, - int source_y_fraction) { - __asm { - push esi - push edi - mov edi, [esp + 8 + 4] // dst_ptr - mov esi, [esp + 8 + 8] // src_ptr - mov edx, [esp + 8 + 12] // src_stride - mov ecx, [esp + 8 + 16] // dst_width - mov eax, [esp + 8 + 20] // source_y_fraction (0..255) - sub edi, esi - // Dispatch to specialized filters if applicable. - cmp eax, 0 - je xloop100 // 0 / 256. Blend 100 / 0. - cmp eax, 64 - je xloop75 // 64 / 256 is 0.25. Blend 75 / 25. - cmp eax, 128 - je xloop50 // 128 / 256 is 0.50. Blend 50 / 50. - cmp eax, 192 - je xloop25 // 192 / 256 is 0.75. Blend 25 / 75. - - movd xmm5, eax // xmm5 = y fraction - punpcklbw xmm5, xmm5 - psrlw xmm5, 1 - punpcklwd xmm5, xmm5 - punpckldq xmm5, xmm5 - punpcklqdq xmm5, xmm5 - pxor xmm4, xmm4 - - xloop: - movdqu xmm0, [esi] // row0 - movdqu xmm2, [esi + edx] // row1 - movdqu xmm1, xmm0 - movdqu xmm3, xmm2 - punpcklbw xmm2, xmm4 - punpckhbw xmm3, xmm4 - punpcklbw xmm0, xmm4 - punpckhbw xmm1, xmm4 - psubw xmm2, xmm0 // row1 - row0 - psubw xmm3, xmm1 - paddw xmm2, xmm2 // 9 bits * 15 bits = 8.16 - paddw xmm3, xmm3 - pmulhw xmm2, xmm5 // scale diff - pmulhw xmm3, xmm5 - paddw xmm0, xmm2 // sum rows - paddw xmm1, xmm3 - packuswb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop - jmp xloop99 - - // Blend 25 / 75. - xloop25: - movdqu xmm0, [esi] - movdqu xmm1, [esi + edx] - pavgb xmm0, xmm1 - pavgb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop25 - jmp xloop99 - - // Blend 50 / 50. - xloop50: - movdqu xmm0, [esi] - movdqu xmm1, [esi + edx] - pavgb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop50 - jmp xloop99 - - // Blend 75 / 25. - xloop75: - movdqu xmm1, [esi] - movdqu xmm0, [esi + edx] - pavgb xmm0, xmm1 - pavgb xmm0, xmm1 - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop75 - jmp xloop99 - - // Blend 100 / 0 - Copy row unchanged. - xloop100: - movdqu xmm0, [esi] - movdqu [esi + edi], xmm0 - lea esi, [esi + 16] - sub ecx, 16 - jg xloop100 - - xloop99: - pop edi - pop esi - ret - } -} -#endif // HAS_INTERPOLATEROW_SSE2 - // For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA. __declspec(naked) void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_argb mov ecx, [esp + 12] // shuffler movdqu xmm5, [ecx] - mov ecx, [esp + 16] // pix + mov ecx, [esp + 16] // width wloop: movdqu xmm0, [eax] @@ -5838,13 +5775,13 @@ void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb, #ifdef HAS_ARGBSHUFFLEROW_AVX2 __declspec(naked) void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { __asm { mov eax, [esp + 4] // src_argb mov edx, [esp + 8] // dst_argb mov ecx, [esp + 12] // shuffler vbroadcastf128 ymm5, [ecx] // same shuffle in high as low. - mov ecx, [esp + 16] // pix + mov ecx, [esp + 16] // width wloop: vmovdqu ymm0, [eax] @@ -5866,14 +5803,14 @@ void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb, __declspec(naked) void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb, - const uint8* shuffler, int pix) { + const uint8* shuffler, int width) { __asm { push ebx push esi mov eax, [esp + 8 + 4] // src_argb mov edx, [esp + 8 + 8] // dst_argb mov esi, [esp + 8 + 12] // shuffler - mov ecx, [esp + 8 + 16] // pix + mov ecx, [esp + 8 + 16] // width pxor xmm5, xmm5 mov ebx, [esi] // shuffler @@ -6245,7 +6182,7 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, // 4 pixel loop. convertloop: - movdqu xmm0, qword ptr [eax] // generate luma ptr + movdqu xmm0, xmmword ptr [eax] // generate luma ptr pmaddubsw xmm0, xmm3 phaddw xmm0, xmm0 pand xmm0, xmm4 // mask out low bits @@ -6323,9 +6260,10 @@ void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb, #endif // HAS_ARGBLUMACOLORTABLEROW_SSSE3 #endif // defined(_M_X64) -#endif // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64)) #ifdef __cplusplus } // extern "C" } // namespace libyuv #endif + +#endif // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64)) diff --git a/libs/libvpx/third_party/libyuv/source/row_x86.asm b/libs/libvpx/third_party/libyuv/source/row_x86.asm deleted file mode 100644 index 0cb326f8e5..0000000000 --- a/libs/libvpx/third_party/libyuv/source/row_x86.asm +++ /dev/null @@ -1,146 +0,0 @@ -; -; Copyright 2012 The LibYuv Project Authors. All rights reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%ifdef __YASM_VERSION_ID__ -%if __YASM_VERSION_ID__ < 01020000h -%error AVX2 is supported only by yasm 1.2.0 or later. -%endif -%endif -%include "x86inc.asm" - -SECTION .text - -; cglobal numeric constants are parameters, gpr regs, mm regs - -; void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix) - -%macro YUY2TOYROW 2-3 -cglobal %1ToYRow%3, 3, 3, 3, src_yuy2, dst_y, pix -%ifidn %1,YUY2 - pcmpeqb m2, m2, m2 ; generate mask 0x00ff00ff - psrlw m2, m2, 8 -%endif - - ALIGN 4 -.convertloop: - mov%2 m0, [src_yuy2q] - mov%2 m1, [src_yuy2q + mmsize] - lea src_yuy2q, [src_yuy2q + mmsize * 2] -%ifidn %1,YUY2 - pand m0, m0, m2 ; YUY2 even bytes are Y - pand m1, m1, m2 -%else - psrlw m0, m0, 8 ; UYVY odd bytes are Y - psrlw m1, m1, 8 -%endif - packuswb m0, m0, m1 -%if cpuflag(AVX2) - vpermq m0, m0, 0xd8 -%endif - sub pixd, mmsize - mov%2 [dst_yq], m0 - lea dst_yq, [dst_yq + mmsize] - jg .convertloop - REP_RET -%endmacro - -; TODO(fbarchard): Remove MMX. Add SSSE3 pshufb version. -INIT_MMX MMX -YUY2TOYROW YUY2,a, -YUY2TOYROW YUY2,u,_Unaligned -YUY2TOYROW UYVY,a, -YUY2TOYROW UYVY,u,_Unaligned -INIT_XMM SSE2 -YUY2TOYROW YUY2,a, -YUY2TOYROW YUY2,u,_Unaligned -YUY2TOYROW UYVY,a, -YUY2TOYROW UYVY,u,_Unaligned -INIT_YMM AVX2 -YUY2TOYROW YUY2,a, -YUY2TOYROW UYVY,a, - -; void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) - -%macro SplitUVRow 1-2 -cglobal SplitUVRow%2, 4, 4, 5, src_uv, dst_u, dst_v, pix - pcmpeqb m4, m4, m4 ; generate mask 0x00ff00ff - psrlw m4, m4, 8 - sub dst_vq, dst_uq - - ALIGN 4 -.convertloop: - mov%1 m0, [src_uvq] - mov%1 m1, [src_uvq + mmsize] - lea src_uvq, [src_uvq + mmsize * 2] - psrlw m2, m0, 8 ; odd bytes - psrlw m3, m1, 8 - pand m0, m0, m4 ; even bytes - pand m1, m1, m4 - packuswb m0, m0, m1 - packuswb m2, m2, m3 -%if cpuflag(AVX2) - vpermq m0, m0, 0xd8 - vpermq m2, m2, 0xd8 -%endif - mov%1 [dst_uq], m0 - mov%1 [dst_uq + dst_vq], m2 - lea dst_uq, [dst_uq + mmsize] - sub pixd, mmsize - jg .convertloop - REP_RET -%endmacro - -INIT_MMX MMX -SplitUVRow a, -SplitUVRow u,_Unaligned -INIT_XMM SSE2 -SplitUVRow a, -SplitUVRow u,_Unaligned -INIT_YMM AVX2 -SplitUVRow a, - -; void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv, -; int width); - -%macro MergeUVRow_ 1-2 -cglobal MergeUVRow_%2, 4, 4, 3, src_u, src_v, dst_uv, pix - sub src_vq, src_uq - - ALIGN 4 -.convertloop: - mov%1 m0, [src_uq] - mov%1 m1, [src_vq] - lea src_uq, [src_uq + mmsize] - punpcklbw m2, m0, m1 // first 8 UV pairs - punpckhbw m0, m0, m1 // next 8 UV pairs -%if cpuflag(AVX2) - vperm2i128 m1, m2, m0, 0x20 // low 128 of ymm2 and low 128 of ymm0 - vperm2i128 m2, m2, m0, 0x31 // high 128 of ymm2 and high 128 of ymm0 - mov%1 [dst_uvq], m1 - mov%1 [dst_uvq + mmsize], m2 -%else - mov%1 [dst_uvq], m2 - mov%1 [dst_uvq + mmsize], m0 -%endif - lea dst_uvq, [dst_uvq + mmsize * 2] - sub pixd, mmsize - jg .convertloop - REP_RET -%endmacro - -INIT_MMX MMX -MergeUVRow_ a, -MergeUVRow_ u,_Unaligned -INIT_XMM SSE2 -MergeUVRow_ a, -MergeUVRow_ u,_Unaligned -INIT_YMM AVX2 -MergeUVRow_ a, - diff --git a/libs/libvpx/third_party/libyuv/source/scale.cc b/libs/libvpx/third_party/libyuv/source/scale.cc index 0a01304c41..36e3fe5281 100644 --- a/libs/libvpx/third_party/libyuv/source/scale.cc +++ b/libs/libvpx/third_party/libyuv/source/scale.cc @@ -61,15 +61,15 @@ static void ScalePlaneDown2(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN2_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_Any_SSE2 : - (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_SSE2 : - ScaleRowDown2Box_Any_SSE2); +#if defined(HAS_SCALEROWDOWN2_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { + ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_Any_SSSE3 : + (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_SSSE3 : + ScaleRowDown2Box_Any_SSSE3); if (IS_ALIGNED(dst_width, 16)) { - ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_SSE2 : - (filtering == kFilterLinear ? ScaleRowDown2Linear_SSE2 : - ScaleRowDown2Box_SSE2); + ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_SSSE3 : + (filtering == kFilterLinear ? ScaleRowDown2Linear_SSSE3 : + ScaleRowDown2Box_SSSE3); } } #endif @@ -85,12 +85,12 @@ static void ScalePlaneDown2(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN2_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) && +#if defined(HAS_SCALEROWDOWN2_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { ScaleRowDown2 = filtering ? - ScaleRowDown2Box_MIPS_DSPR2 : ScaleRowDown2_MIPS_DSPR2; + ScaleRowDown2Box_DSPR2 : ScaleRowDown2_DSPR2; } #endif @@ -135,12 +135,12 @@ static void ScalePlaneDown2_16(int src_width, int src_height, ScaleRowDown2Box_16_SSE2); } #endif -#if defined(HAS_SCALEROWDOWN2_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) && +#if defined(HAS_SCALEROWDOWN2_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { ScaleRowDown2 = filtering ? - ScaleRowDown2Box_16_MIPS_DSPR2 : ScaleRowDown2_16_MIPS_DSPR2; + ScaleRowDown2Box_16_DSPR2 : ScaleRowDown2_16_DSPR2; } #endif @@ -182,12 +182,12 @@ static void ScalePlaneDown4(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN4_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { +#if defined(HAS_SCALEROWDOWN4_SSSE3) + if (TestCpuFlag(kCpuHasSSSE3)) { ScaleRowDown4 = filtering ? - ScaleRowDown4Box_Any_SSE2 : ScaleRowDown4_Any_SSE2; + ScaleRowDown4Box_Any_SSSE3 : ScaleRowDown4_Any_SSSE3; if (IS_ALIGNED(dst_width, 8)) { - ScaleRowDown4 = filtering ? ScaleRowDown4Box_SSE2 : ScaleRowDown4_SSE2; + ScaleRowDown4 = filtering ? ScaleRowDown4Box_SSSE3 : ScaleRowDown4_SSSE3; } } #endif @@ -200,12 +200,12 @@ static void ScalePlaneDown4(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN4_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) && +#if defined(HAS_SCALEROWDOWN4_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(row_stride, 4) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { ScaleRowDown4 = filtering ? - ScaleRowDown4Box_MIPS_DSPR2 : ScaleRowDown4_MIPS_DSPR2; + ScaleRowDown4Box_DSPR2 : ScaleRowDown4_DSPR2; } #endif @@ -245,12 +245,12 @@ static void ScalePlaneDown4_16(int src_width, int src_height, ScaleRowDown4_16_SSE2; } #endif -#if defined(HAS_SCALEROWDOWN4_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) && +#if defined(HAS_SCALEROWDOWN4_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(row_stride, 4) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { ScaleRowDown4 = filtering ? - ScaleRowDown4Box_16_MIPS_DSPR2 : ScaleRowDown4_16_MIPS_DSPR2; + ScaleRowDown4Box_16_DSPR2 : ScaleRowDown4_16_DSPR2; } #endif @@ -325,16 +325,16 @@ static void ScalePlaneDown34(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN34_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) && +#if defined(HAS_SCALEROWDOWN34_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 24 == 0) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { if (!filtering) { - ScaleRowDown34_0 = ScaleRowDown34_MIPS_DSPR2; - ScaleRowDown34_1 = ScaleRowDown34_MIPS_DSPR2; + ScaleRowDown34_0 = ScaleRowDown34_DSPR2; + ScaleRowDown34_1 = ScaleRowDown34_DSPR2; } else { - ScaleRowDown34_0 = ScaleRowDown34_0_Box_MIPS_DSPR2; - ScaleRowDown34_1 = ScaleRowDown34_1_Box_MIPS_DSPR2; + ScaleRowDown34_0 = ScaleRowDown34_0_Box_DSPR2; + ScaleRowDown34_1 = ScaleRowDown34_1_Box_DSPR2; } } #endif @@ -404,16 +404,16 @@ static void ScalePlaneDown34_16(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN34_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) && +#if defined(HAS_SCALEROWDOWN34_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 24 == 0) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { if (!filtering) { - ScaleRowDown34_0 = ScaleRowDown34_16_MIPS_DSPR2; - ScaleRowDown34_1 = ScaleRowDown34_16_MIPS_DSPR2; + ScaleRowDown34_0 = ScaleRowDown34_16_DSPR2; + ScaleRowDown34_1 = ScaleRowDown34_16_DSPR2; } else { - ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_MIPS_DSPR2; - ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_MIPS_DSPR2; + ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_DSPR2; + ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_DSPR2; } } #endif @@ -517,16 +517,16 @@ static void ScalePlaneDown38(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN38_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) && +#if defined(HAS_SCALEROWDOWN38_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 12 == 0) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { if (!filtering) { - ScaleRowDown38_3 = ScaleRowDown38_MIPS_DSPR2; - ScaleRowDown38_2 = ScaleRowDown38_MIPS_DSPR2; + ScaleRowDown38_3 = ScaleRowDown38_DSPR2; + ScaleRowDown38_2 = ScaleRowDown38_DSPR2; } else { - ScaleRowDown38_3 = ScaleRowDown38_3_Box_MIPS_DSPR2; - ScaleRowDown38_2 = ScaleRowDown38_2_Box_MIPS_DSPR2; + ScaleRowDown38_3 = ScaleRowDown38_3_Box_DSPR2; + ScaleRowDown38_2 = ScaleRowDown38_2_Box_DSPR2; } } #endif @@ -595,16 +595,16 @@ static void ScalePlaneDown38_16(int src_width, int src_height, } } #endif -#if defined(HAS_SCALEROWDOWN38_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) && +#if defined(HAS_SCALEROWDOWN38_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && (dst_width % 12 == 0) && IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) { if (!filtering) { - ScaleRowDown38_3 = ScaleRowDown38_16_MIPS_DSPR2; - ScaleRowDown38_2 = ScaleRowDown38_16_MIPS_DSPR2; + ScaleRowDown38_3 = ScaleRowDown38_16_DSPR2; + ScaleRowDown38_2 = ScaleRowDown38_16_DSPR2; } else { - ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_MIPS_DSPR2; - ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_MIPS_DSPR2; + ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_DSPR2; + ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_DSPR2; } } #endif @@ -659,7 +659,6 @@ static void ScaleAddCols2_C(int dst_width, int boxheight, int x, int dx, int i; int scaletbl[2]; int minboxwidth = dx >> 16; - int* scaleptr = scaletbl - minboxwidth; int boxwidth; scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight); scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight); @@ -667,7 +666,8 @@ static void ScaleAddCols2_C(int dst_width, int boxheight, int x, int dx, int ix = x >> 16; x += dx; boxwidth = MIN1((x >> 16) - ix); - *dst_ptr++ = SumPixels(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16; + *dst_ptr++ = SumPixels(boxwidth, src_ptr + ix) * + scaletbl[boxwidth - minboxwidth] >> 16; } } @@ -676,7 +676,6 @@ static void ScaleAddCols2_16_C(int dst_width, int boxheight, int x, int dx, int i; int scaletbl[2]; int minboxwidth = dx >> 16; - int* scaleptr = scaletbl - minboxwidth; int boxwidth; scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight); scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight); @@ -684,8 +683,8 @@ static void ScaleAddCols2_16_C(int dst_width, int boxheight, int x, int dx, int ix = x >> 16; x += dx; boxwidth = MIN1((x >> 16) - ix); - *dst_ptr++ = - SumPixels_16(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16; + *dst_ptr++ = SumPixels_16(boxwidth, src_ptr + ix) * + scaletbl[boxwidth - minboxwidth] >> 16; } } @@ -875,14 +874,6 @@ void ScalePlaneBilinearDown(int src_width, int src_height, &x, &y, &dx, &dy); src_width = Abs(src_width); -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(src_width, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -907,11 +898,11 @@ void ScalePlaneBilinearDown(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { - InterpolateRow = InterpolateRow_Any_MIPS_DSPR2; +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2)) { + InterpolateRow = InterpolateRow_Any_DSPR2; if (IS_ALIGNED(src_width, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } } #endif @@ -1011,11 +1002,11 @@ void ScalePlaneBilinearDown_16(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { - InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2; +#if defined(HAS_INTERPOLATEROW_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2)) { + InterpolateRow = InterpolateRow_Any_16_DSPR2; if (IS_ALIGNED(src_width, 4)) { - InterpolateRow = InterpolateRow_16_MIPS_DSPR2; + InterpolateRow = InterpolateRow_16_DSPR2; } } #endif @@ -1072,14 +1063,6 @@ void ScalePlaneBilinearUp(int src_width, int src_height, &x, &y, &dx, &dy); src_width = Abs(src_width); -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(dst_width, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -1104,11 +1087,11 @@ void ScalePlaneBilinearUp(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { - InterpolateRow = InterpolateRow_Any_MIPS_DSPR2; +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2)) { + InterpolateRow = InterpolateRow_Any_DSPR2; if (IS_ALIGNED(dst_width, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } } #endif @@ -1243,11 +1226,11 @@ void ScalePlaneBilinearUp_16(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2)) { - InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2; +#if defined(HAS_INTERPOLATEROW_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2)) { + InterpolateRow = InterpolateRow_Any_16_DSPR2; if (IS_ALIGNED(dst_width, 4)) { - InterpolateRow = InterpolateRow_16_MIPS_DSPR2; + InterpolateRow = InterpolateRow_16_DSPR2; } } #endif diff --git a/libs/libvpx/third_party/libyuv/source/scale_any.cc b/libs/libvpx/third_party/libyuv/source/scale_any.cc index 2f6a2c8baf..ed76a9e4c0 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_any.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_any.cc @@ -55,12 +55,29 @@ CANY(ScaleARGBFilterCols_Any_NEON, ScaleARGBFilterCols_NEON, dst_ptr + n * BPP, r); \ } -#ifdef HAS_SCALEROWDOWN2_SSE2 -SDANY(ScaleRowDown2_Any_SSE2, ScaleRowDown2_SSE2, ScaleRowDown2_C, 2, 1, 15) -SDANY(ScaleRowDown2Linear_Any_SSE2, ScaleRowDown2Linear_SSE2, +// Fixed scale down for odd source width. Used by I420Blend subsampling. +// Since dst_width is (width + 1) / 2, this function scales one less pixel +// and copies the last pixel. +#define SDODD(NAMEANY, SCALEROWDOWN_SIMD, SCALEROWDOWN_C, FACTOR, BPP, MASK) \ + void NAMEANY(const uint8* src_ptr, ptrdiff_t src_stride, \ + uint8* dst_ptr, int dst_width) { \ + int r = (int)((unsigned int)(dst_width - 1) % (MASK + 1)); \ + int n = dst_width - r; \ + if (n > 0) { \ + SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \ + } \ + SCALEROWDOWN_C(src_ptr + (n * FACTOR) * BPP, src_stride, \ + dst_ptr + n * BPP, r); \ + } + +#ifdef HAS_SCALEROWDOWN2_SSSE3 +SDANY(ScaleRowDown2_Any_SSSE3, ScaleRowDown2_SSSE3, ScaleRowDown2_C, 2, 1, 15) +SDANY(ScaleRowDown2Linear_Any_SSSE3, ScaleRowDown2Linear_SSSE3, ScaleRowDown2Linear_C, 2, 1, 15) -SDANY(ScaleRowDown2Box_Any_SSE2, ScaleRowDown2Box_SSE2, ScaleRowDown2Box_C, +SDANY(ScaleRowDown2Box_Any_SSSE3, ScaleRowDown2Box_SSSE3, ScaleRowDown2Box_C, 2, 1, 15) +SDODD(ScaleRowDown2Box_Odd_SSSE3, ScaleRowDown2Box_SSSE3, + ScaleRowDown2Box_Odd_C, 2, 1, 15) #endif #ifdef HAS_SCALEROWDOWN2_AVX2 SDANY(ScaleRowDown2_Any_AVX2, ScaleRowDown2_AVX2, ScaleRowDown2_C, 2, 1, 31) @@ -68,6 +85,8 @@ SDANY(ScaleRowDown2Linear_Any_AVX2, ScaleRowDown2Linear_AVX2, ScaleRowDown2Linear_C, 2, 1, 31) SDANY(ScaleRowDown2Box_Any_AVX2, ScaleRowDown2Box_AVX2, ScaleRowDown2Box_C, 2, 1, 31) +SDODD(ScaleRowDown2Box_Odd_AVX2, ScaleRowDown2Box_AVX2, ScaleRowDown2Box_Odd_C, + 2, 1, 31) #endif #ifdef HAS_SCALEROWDOWN2_NEON SDANY(ScaleRowDown2_Any_NEON, ScaleRowDown2_NEON, ScaleRowDown2_C, 2, 1, 15) @@ -75,10 +94,12 @@ SDANY(ScaleRowDown2Linear_Any_NEON, ScaleRowDown2Linear_NEON, ScaleRowDown2Linear_C, 2, 1, 15) SDANY(ScaleRowDown2Box_Any_NEON, ScaleRowDown2Box_NEON, ScaleRowDown2Box_C, 2, 1, 15) +SDODD(ScaleRowDown2Box_Odd_NEON, ScaleRowDown2Box_NEON, + ScaleRowDown2Box_Odd_C, 2, 1, 15) #endif -#ifdef HAS_SCALEROWDOWN4_SSE2 -SDANY(ScaleRowDown4_Any_SSE2, ScaleRowDown4_SSE2, ScaleRowDown4_C, 4, 1, 7) -SDANY(ScaleRowDown4Box_Any_SSE2, ScaleRowDown4Box_SSE2, ScaleRowDown4Box_C, +#ifdef HAS_SCALEROWDOWN4_SSSE3 +SDANY(ScaleRowDown4_Any_SSSE3, ScaleRowDown4_SSSE3, ScaleRowDown4_C, 4, 1, 7) +SDANY(ScaleRowDown4Box_Any_SSSE3, ScaleRowDown4Box_SSSE3, ScaleRowDown4Box_C, 4, 1, 7) #endif #ifdef HAS_SCALEROWDOWN4_AVX2 diff --git a/libs/libvpx/third_party/libyuv/source/scale_argb.cc b/libs/libvpx/third_party/libyuv/source/scale_argb.cc index 40a2d1ab20..17f51ae9bf 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_argb.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_argb.cc @@ -210,14 +210,6 @@ static void ScaleARGBBilinearDown(int src_width, int src_height, clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4. src_argb += xl * 4; x -= (int)(xl << 16); -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(clip_src_width, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -242,12 +234,12 @@ static void ScaleARGBBilinearDown(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4)) { - InterpolateRow = InterpolateRow_Any_MIPS_DSPR2; + InterpolateRow = InterpolateRow_Any_DSPR2; if (IS_ALIGNED(clip_src_width, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } } #endif @@ -308,14 +300,6 @@ static void ScaleARGBBilinearUp(int src_width, int src_height, int dst_width, int x, int dx) = filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C; const int max_y = (src_height - 1) << 16; -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(dst_width, 4)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -340,10 +324,10 @@ static void ScaleARGBBilinearUp(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } #endif if (src_width >= 32768) { @@ -481,27 +465,19 @@ static void ScaleYUVToARGBBilinearUp(int src_width, int src_height, } } #endif -#if defined(HAS_I422TOARGBROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_width, 4) && +#if defined(HAS_I422TOARGBROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_width, 4) && IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) && IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) && IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) && IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2; + I422ToARGBRow = I422ToARGBRow_DSPR2; } #endif void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb, ptrdiff_t src_stride, int dst_width, int source_y_fraction) = InterpolateRow_C; -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(dst_width, 4)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -526,10 +502,10 @@ static void ScaleYUVToARGBBilinearUp(int src_width, int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } #endif @@ -847,6 +823,36 @@ int ARGBScale(const uint8* src_argb, int src_stride_argb, return 0; } +// Scale with YUV conversion to ARGB and clipping. +LIBYUV_API +int YUVToARGBScaleClip(const uint8* src_y, int src_stride_y, + const uint8* src_u, int src_stride_u, + const uint8* src_v, int src_stride_v, + uint32 src_fourcc, + int src_width, int src_height, + uint8* dst_argb, int dst_stride_argb, + uint32 dst_fourcc, + int dst_width, int dst_height, + int clip_x, int clip_y, int clip_width, int clip_height, + enum FilterMode filtering) { + uint8* argb_buffer = (uint8*)malloc(src_width * src_height * 4); + int r; + I420ToARGB(src_y, src_stride_y, + src_u, src_stride_u, + src_v, src_stride_v, + argb_buffer, src_width * 4, + src_width, src_height); + + r = ARGBScaleClip(argb_buffer, src_width * 4, + src_width, src_height, + dst_argb, dst_stride_argb, + dst_width, dst_height, + clip_x, clip_y, clip_width, clip_height, + filtering); + free(argb_buffer); + return r; +} + #ifdef __cplusplus } // extern "C" } // namespace libyuv diff --git a/libs/libvpx/third_party/libyuv/source/scale_common.cc b/libs/libvpx/third_party/libyuv/source/scale_common.cc index 1711f3d54c..3507aa4d9f 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_common.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_common.cc @@ -103,6 +103,28 @@ void ScaleRowDown2Box_C(const uint8* src_ptr, ptrdiff_t src_stride, } } +void ScaleRowDown2Box_Odd_C(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { + const uint8* s = src_ptr; + const uint8* t = src_ptr + src_stride; + int x; + dst_width -= 1; + for (x = 0; x < dst_width - 1; x += 2) { + dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2; + dst[1] = (s[2] + s[3] + t[2] + t[3] + 2) >> 2; + dst += 2; + s += 4; + t += 4; + } + if (dst_width & 1) { + dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2; + dst += 1; + s += 2; + t += 2; + } + dst[0] = (s[0] + t[0] + 1) >> 1; +} + void ScaleRowDown2Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride, uint16* dst, int dst_width) { const uint16* s = src_ptr; @@ -395,8 +417,14 @@ void ScaleColsUp2_16_C(uint16* dst_ptr, const uint16* src_ptr, } // (1-f)a + fb can be replaced with a + f(b-a) +#if defined(__arm__) || defined(__aarch64__) #define BLENDER(a, b, f) (uint8)((int)(a) + \ - ((int)(f) * ((int)(b) - (int)(a)) >> 16)) + ((((int)((f)) * ((int)(b) - (int)(a))) + 0x8000) >> 16)) +#else +// inteluses 7 bit math with rounding. +#define BLENDER(a, b, f) (uint8)((int)(a) + \ + (((int)((f) >> 9) * ((int)(b) - (int)(a)) + 0x40) >> 7)) +#endif void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx) { @@ -448,8 +476,9 @@ void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr, } #undef BLENDER +// Same as 8 bit arm blender but return is cast to uint16 #define BLENDER(a, b, f) (uint16)((int)(a) + \ - ((int)(f) * ((int)(b) - (int)(a)) >> 16)) + ((((int)((f)) * ((int)(b) - (int)(a))) + 0x8000) >> 16)) void ScaleFilterCols_16_C(uint16* dst_ptr, const uint16* src_ptr, int dst_width, int x, int dx) { @@ -787,6 +816,7 @@ void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb, } } +// TODO(fbarchard): Replace 0x7f ^ f with 128-f. bug=607. // Mimics SSSE3 blender #define BLENDER1(a, b, f) ((a) * (0x7f ^ f) + (b) * f) >> 7 #define BLENDERC(a, b, f, s) (uint32)( \ @@ -876,14 +906,6 @@ void ScalePlaneVertical(int src_height, assert(dst_width > 0); assert(dst_height > 0); src_argb += (x >> 16) * bpp; -#if defined(HAS_INTERPOLATEROW_SSE2) - if (TestCpuFlag(kCpuHasSSE2)) { - InterpolateRow = InterpolateRow_Any_SSE2; - if (IS_ALIGNED(dst_width_bytes, 16)) { - InterpolateRow = InterpolateRow_SSE2; - } - } -#endif #if defined(HAS_INTERPOLATEROW_SSSE3) if (TestCpuFlag(kCpuHasSSSE3)) { InterpolateRow = InterpolateRow_Any_SSSE3; @@ -908,13 +930,13 @@ void ScalePlaneVertical(int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_INTERPOLATEROW_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) { - InterpolateRow = InterpolateRow_Any_MIPS_DSPR2; + InterpolateRow = InterpolateRow_Any_DSPR2; if (IS_ALIGNED(dst_width_bytes, 4)) { - InterpolateRow = InterpolateRow_MIPS_DSPR2; + InterpolateRow = InterpolateRow_DSPR2; } } #endif @@ -982,13 +1004,13 @@ void ScalePlaneVertical_16(int src_height, } } #endif -#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2) - if (TestCpuFlag(kCpuHasMIPS_DSPR2) && +#if defined(HAS_INTERPOLATEROW_16_DSPR2) + if (TestCpuFlag(kCpuHasDSPR2) && IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) && IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) { - InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2; + InterpolateRow = InterpolateRow_Any_16_DSPR2; if (IS_ALIGNED(dst_width_bytes, 4)) { - InterpolateRow = InterpolateRow_16_MIPS_DSPR2; + InterpolateRow = InterpolateRow_16_DSPR2; } } #endif diff --git a/libs/libvpx/third_party/libyuv/source/scale_gcc.cc b/libs/libvpx/third_party/libyuv/source/scale_gcc.cc index 8a6ac54592..e2f88544b7 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_gcc.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_gcc.cc @@ -9,6 +9,7 @@ */ #include "libyuv/row.h" +#include "libyuv/scale_row.h" #ifdef __cplusplus namespace libyuv { @@ -16,7 +17,8 @@ extern "C" { #endif // This module is for GCC x86 and x64. -#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__)) +#if !defined(LIBYUV_DISABLE_X86) && \ + (defined(__x86_64__) || (defined(__i386__) && !defined(_MSC_VER))) // Offsets for source bytes 0 to 9 static uvec8 kShuf0 = @@ -96,8 +98,8 @@ static uvec16 kScaleAb2 = // Generated using gcc disassembly on Visual C object file: // objdump -D yuvscaler.obj >yuvscaler.txt -void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { asm volatile ( LABELALIGN "1: \n" @@ -118,26 +120,24 @@ void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2Linear_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { asm volatile ( - "pcmpeqb %%xmm5,%%xmm5 \n" - "psrlw $0x8,%%xmm5 \n" + "pcmpeqb %%xmm4,%%xmm4 \n" + "psrlw $0xf,%%xmm4 \n" + "packuswb %%xmm4,%%xmm4 \n" + "pxor %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" "movdqu " MEMACCESS(0) ",%%xmm0 \n" "movdqu " MEMACCESS2(0x10, 0) ",%%xmm1 \n" "lea " MEMLEA(0x20,0) ",%0 \n" - "movdqa %%xmm0,%%xmm2 \n" - "psrlw $0x8,%%xmm0 \n" - "movdqa %%xmm1,%%xmm3 \n" - "psrlw $0x8,%%xmm1 \n" - "pand %%xmm5,%%xmm2 \n" - "pand %%xmm5,%%xmm3 \n" - "pavgw %%xmm2,%%xmm0 \n" - "pavgw %%xmm3,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm1 \n" + "pavgw %%xmm5,%%xmm0 \n" + "pavgw %%xmm5,%%xmm1 \n" + "packuswb %%xmm1,%%xmm0 \n" "movdqu %%xmm0," MEMACCESS(1) " \n" "lea " MEMLEA(0x10,1) ",%1 \n" "sub $0x10,%2 \n" @@ -145,15 +145,17 @@ void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, : "+r"(src_ptr), // %0 "+r"(dst_ptr), // %1 "+r"(dst_width) // %2 - :: "memory", "cc", "xmm0", "xmm1", "xmm5" + :: "memory", "cc", "xmm0", "xmm1", "xmm4", "xmm5" ); } -void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { asm volatile ( - "pcmpeqb %%xmm5,%%xmm5 \n" - "psrlw $0x8,%%xmm5 \n" + "pcmpeqb %%xmm4,%%xmm4 \n" + "psrlw $0xf,%%xmm4 \n" + "packuswb %%xmm4,%%xmm4 \n" + "pxor %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" @@ -162,17 +164,17 @@ void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2 MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3 "lea " MEMLEA(0x20,0) ",%0 \n" - "pavgb %%xmm2,%%xmm0 \n" - "pavgb %%xmm3,%%xmm1 \n" - "movdqa %%xmm0,%%xmm2 \n" - "psrlw $0x8,%%xmm0 \n" - "movdqa %%xmm1,%%xmm3 \n" - "psrlw $0x8,%%xmm1 \n" - "pand %%xmm5,%%xmm2 \n" - "pand %%xmm5,%%xmm3 \n" - "pavgw %%xmm2,%%xmm0 \n" - "pavgw %%xmm3,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm1 \n" + "pmaddubsw %%xmm4,%%xmm2 \n" + "pmaddubsw %%xmm4,%%xmm3 \n" + "paddw %%xmm2,%%xmm0 \n" + "paddw %%xmm3,%%xmm1 \n" + "psrlw $0x1,%%xmm0 \n" + "psrlw $0x1,%%xmm1 \n" + "pavgw %%xmm5,%%xmm0 \n" + "pavgw %%xmm5,%%xmm1 \n" + "packuswb %%xmm1,%%xmm0 \n" "movdqu %%xmm0," MEMACCESS(1) " \n" "lea " MEMLEA(0x10,1) ",%1 \n" "sub $0x10,%2 \n" @@ -186,7 +188,105 @@ void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, +#ifdef HAS_SCALEROWDOWN2_AVX2 +void ScaleRowDown2_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { + asm volatile ( + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n" + "lea " MEMLEA(0x40,0) ",%0 \n" + "vpsrlw $0x8,%%ymm0,%%ymm0 \n" + "vpsrlw $0x8,%%ymm1,%%ymm1 \n" + "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%ymm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x20,1) ",%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(dst_width) // %2 + :: "memory", "cc", "xmm0", "xmm1" + ); +} + +void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { + asm volatile ( + "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n" + "vpsrlw $0xf,%%ymm4,%%ymm4 \n" + "vpackuswb %%ymm4,%%ymm4,%%ymm4 \n" + "vpxor %%ymm5,%%ymm5,%%ymm5 \n" + + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20, 0) ",%%ymm1 \n" + "lea " MEMLEA(0x40,0) ",%0 \n" + "vpmaddubsw %%ymm4,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm4,%%ymm1,%%ymm1 \n" + "vpavgw %%ymm5,%%ymm0,%%ymm0 \n" + "vpavgw %%ymm5,%%ymm1,%%ymm1 \n" + "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%ymm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x20,1) ",%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(dst_width) // %2 + :: "memory", "cc", "xmm0", "xmm1", "xmm4", "xmm5" + ); +} + +void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { + asm volatile ( + "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n" + "vpsrlw $0xf,%%ymm4,%%ymm4 \n" + "vpackuswb %%ymm4,%%ymm4,%%ymm4 \n" + "vpxor %%ymm5,%%ymm5,%%ymm5 \n" + + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n" + MEMOPREG(vmovdqu,0x00,0,3,1,ymm2) // vmovdqu (%0,%3,1),%%ymm2 + MEMOPREG(vmovdqu,0x20,0,3,1,ymm3) // vmovdqu 0x20(%0,%3,1),%%ymm3 + "lea " MEMLEA(0x40,0) ",%0 \n" + "vpmaddubsw %%ymm4,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm4,%%ymm1,%%ymm1 \n" + "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n" + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" + "vpaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm3,%%ymm1,%%ymm1 \n" + "vpsrlw $0x1,%%ymm0,%%ymm0 \n" + "vpsrlw $0x1,%%ymm1,%%ymm1 \n" + "vpavgw %%ymm5,%%ymm0,%%ymm0 \n" + "vpavgw %%ymm5,%%ymm1,%%ymm1 \n" + "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%ymm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x20,1) ",%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(dst_width) // %2 + : "r"((intptr_t)(src_stride)) // %3 + : "memory", "cc", NACL_R14 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + ); +} +#endif // HAS_SCALEROWDOWN2_AVX2 + +void ScaleRowDown4_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { asm volatile ( "pcmpeqb %%xmm5,%%xmm5 \n" @@ -214,12 +314,15 @@ void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, +void ScaleRowDown4Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { - intptr_t stridex3 = 0; + intptr_t stridex3; asm volatile ( - "pcmpeqb %%xmm7,%%xmm7 \n" - "psrlw $0x8,%%xmm7 \n" + "pcmpeqb %%xmm4,%%xmm4 \n" + "psrlw $0xf,%%xmm4 \n" + "movdqa %%xmm4,%%xmm5 \n" + "packuswb %%xmm4,%%xmm4 \n" + "psllw $0x3,%%xmm5 \n" "lea " MEMLEA4(0x00,4,4,2) ",%3 \n" LABELALIGN @@ -228,31 +331,29 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n" MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2 MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3 - "pavgb %%xmm2,%%xmm0 \n" - "pavgb %%xmm3,%%xmm1 \n" + "pmaddubsw %%xmm4,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm1 \n" + "pmaddubsw %%xmm4,%%xmm2 \n" + "pmaddubsw %%xmm4,%%xmm3 \n" + "paddw %%xmm2,%%xmm0 \n" + "paddw %%xmm3,%%xmm1 \n" MEMOPREG(movdqu,0x00,0,4,2,xmm2) // movdqu (%0,%4,2),%%xmm2 MEMOPREG(movdqu,0x10,0,4,2,xmm3) // movdqu 0x10(%0,%4,2),%%xmm3 - MEMOPREG(movdqu,0x00,0,3,1,xmm4) // movdqu (%0,%3,1),%%xmm4 - MEMOPREG(movdqu,0x10,0,3,1,xmm5) // movdqu 0x10(%0,%3,1),%%xmm5 + "pmaddubsw %%xmm4,%%xmm2 \n" + "pmaddubsw %%xmm4,%%xmm3 \n" + "paddw %%xmm2,%%xmm0 \n" + "paddw %%xmm3,%%xmm1 \n" + MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2 + MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3 "lea " MEMLEA(0x20,0) ",%0 \n" - "pavgb %%xmm4,%%xmm2 \n" - "pavgb %%xmm2,%%xmm0 \n" - "pavgb %%xmm5,%%xmm3 \n" - "pavgb %%xmm3,%%xmm1 \n" - "movdqa %%xmm0,%%xmm2 \n" - "psrlw $0x8,%%xmm0 \n" - "movdqa %%xmm1,%%xmm3 \n" - "psrlw $0x8,%%xmm1 \n" - "pand %%xmm7,%%xmm2 \n" - "pand %%xmm7,%%xmm3 \n" - "pavgw %%xmm2,%%xmm0 \n" - "pavgw %%xmm3,%%xmm1 \n" - "packuswb %%xmm1,%%xmm0 \n" - "movdqa %%xmm0,%%xmm2 \n" - "psrlw $0x8,%%xmm0 \n" - "pand %%xmm7,%%xmm2 \n" - "pavgw %%xmm2,%%xmm0 \n" - "packuswb %%xmm0,%%xmm0 \n" + "pmaddubsw %%xmm4,%%xmm2 \n" + "pmaddubsw %%xmm4,%%xmm3 \n" + "paddw %%xmm2,%%xmm0 \n" + "paddw %%xmm3,%%xmm1 \n" + "phaddw %%xmm1,%%xmm0 \n" + "paddw %%xmm5,%%xmm0 \n" + "psrlw $0x4,%%xmm0 \n" + "packuswb %%xmm0,%%xmm0 \n" "movq %%xmm0," MEMACCESS(1) " \n" "lea " MEMLEA(0x8,1) ",%1 \n" "sub $0x8,%2 \n" @@ -260,13 +361,100 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, : "+r"(src_ptr), // %0 "+r"(dst_ptr), // %1 "+r"(dst_width), // %2 - "+r"(stridex3) // %3 + "=&r"(stridex3) // %3 : "r"((intptr_t)(src_stride)) // %4 : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm7" + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" ); } + +#ifdef HAS_SCALEROWDOWN4_AVX2 +void ScaleRowDown4_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { + asm volatile ( + "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n" + "vpsrld $0x18,%%ymm5,%%ymm5 \n" + "vpslld $0x10,%%ymm5,%%ymm5 \n" + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n" + "lea " MEMLEA(0x40,0) ",%0 \n" + "vpand %%ymm5,%%ymm0,%%ymm0 \n" + "vpand %%ymm5,%%ymm1,%%ymm1 \n" + "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vpsrlw $0x8,%%ymm0,%%ymm0 \n" + "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%xmm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x10,1) ",%1 \n" + "sub $0x10,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(dst_width) // %2 + :: "memory", "cc", "xmm0", "xmm1", "xmm5" + ); +} + +void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { + asm volatile ( + "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n" + "vpsrlw $0xf,%%ymm4,%%ymm4 \n" + "vpsllw $0x3,%%ymm4,%%ymm5 \n" + "vpackuswb %%ymm4,%%ymm4,%%ymm4 \n" + + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm0 \n" + "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n" + MEMOPREG(vmovdqu,0x00,0,3,1,ymm2) // vmovdqu (%0,%3,1),%%ymm2 + MEMOPREG(vmovdqu,0x20,0,3,1,ymm3) // vmovdqu 0x20(%0,%3,1),%%ymm3 + "vpmaddubsw %%ymm4,%%ymm0,%%ymm0 \n" + "vpmaddubsw %%ymm4,%%ymm1,%%ymm1 \n" + "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n" + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" + "vpaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm3,%%ymm1,%%ymm1 \n" + MEMOPREG(vmovdqu,0x00,0,3,2,ymm2) // vmovdqu (%0,%3,2),%%ymm2 + MEMOPREG(vmovdqu,0x20,0,3,2,ymm3) // vmovdqu 0x20(%0,%3,2),%%ymm3 + "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n" + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" + "vpaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm3,%%ymm1,%%ymm1 \n" + MEMOPREG(vmovdqu,0x00,0,4,1,ymm2) // vmovdqu (%0,%4,1),%%ymm2 + MEMOPREG(vmovdqu,0x20,0,4,1,ymm3) // vmovdqu 0x20(%0,%4,1),%%ymm3 + "lea " MEMLEA(0x40,0) ",%0 \n" + "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n" + "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n" + "vpaddw %%ymm2,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm3,%%ymm1,%%ymm1 \n" + "vphaddw %%ymm1,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vpaddw %%ymm5,%%ymm0,%%ymm0 \n" + "vpsrlw $0x4,%%ymm0,%%ymm0 \n" + "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" + "vpermq $0xd8,%%ymm0,%%ymm0 \n" + "vmovdqu %%xmm0," MEMACCESS(1) " \n" + "lea " MEMLEA(0x10,1) ",%1 \n" + "sub $0x10,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(dst_width) // %2 + : "r"((intptr_t)(src_stride)), // %3 + "r"((intptr_t)(src_stride * 3)) // %4 + : "memory", "cc", NACL_R14 + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" + ); +} +#endif // HAS_SCALEROWDOWN4_AVX2 + void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { asm volatile ( @@ -574,61 +762,89 @@ void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr, } // Reads 16xN bytes and produces 16 shorts at a time. -void ScaleAddRows_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint16* dst_ptr, int src_width, int src_height) { - int tmp_height = 0; - intptr_t tmp_src = 0; +void ScaleAddRow_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width) { asm volatile ( - "mov %0,%3 \n" // row pointer - "mov %5,%2 \n" // height - "pxor %%xmm0,%%xmm0 \n" // clear accumulators - "pxor %%xmm1,%%xmm1 \n" - "pxor %%xmm4,%%xmm4 \n" + "pxor %%xmm5,%%xmm5 \n" LABELALIGN "1: \n" - "movdqu " MEMACCESS(3) ",%%xmm2 \n" - "add %6,%3 \n" - "movdqa %%xmm2,%%xmm3 \n" - "punpcklbw %%xmm4,%%xmm2 \n" - "punpckhbw %%xmm4,%%xmm3 \n" + "movdqu " MEMACCESS(0) ",%%xmm3 \n" + "lea " MEMLEA(0x10,0) ",%0 \n" // src_ptr += 16 + "movdqu " MEMACCESS(1) ",%%xmm0 \n" + "movdqu " MEMACCESS2(0x10,1) ",%%xmm1 \n" + "movdqa %%xmm3,%%xmm2 \n" + "punpcklbw %%xmm5,%%xmm2 \n" + "punpckhbw %%xmm5,%%xmm3 \n" "paddusw %%xmm2,%%xmm0 \n" "paddusw %%xmm3,%%xmm1 \n" - "sub $0x1,%2 \n" - "jg 1b \n" - "movdqu %%xmm0," MEMACCESS(1) " \n" "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n" "lea " MEMLEA(0x20,1) ",%1 \n" - "lea " MEMLEA(0x10,0) ",%0 \n" // src_ptr += 16 - "mov %0,%3 \n" // row pointer - "mov %5,%2 \n" // height - "pxor %%xmm0,%%xmm0 \n" // clear accumulators - "pxor %%xmm1,%%xmm1 \n" - "sub $0x10,%4 \n" + "sub $0x10,%2 \n" "jg 1b \n" : "+r"(src_ptr), // %0 "+r"(dst_ptr), // %1 - "+r"(tmp_height), // %2 - "+r"(tmp_src), // %3 - "+r"(src_width), // %4 - "+rm"(src_height) // %5 - : "rm"((intptr_t)(src_stride)) // %6 - : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4" + "+r"(src_width) // %2 + : + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" ); } + +#ifdef HAS_SCALEADDROW_AVX2 +// Reads 32 bytes and accumulates to 32 shorts at a time. +void ScaleAddRow_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width) { + asm volatile ( + "vpxor %%ymm5,%%ymm5,%%ymm5 \n" + + LABELALIGN + "1: \n" + "vmovdqu " MEMACCESS(0) ",%%ymm3 \n" + "lea " MEMLEA(0x20,0) ",%0 \n" // src_ptr += 32 + "vpermq $0xd8,%%ymm3,%%ymm3 \n" + "vpunpcklbw %%ymm5,%%ymm3,%%ymm2 \n" + "vpunpckhbw %%ymm5,%%ymm3,%%ymm3 \n" + "vpaddusw " MEMACCESS(1) ",%%ymm2,%%ymm0 \n" + "vpaddusw " MEMACCESS2(0x20,1) ",%%ymm3,%%ymm1 \n" + "vmovdqu %%ymm0," MEMACCESS(1) " \n" + "vmovdqu %%ymm1," MEMACCESS2(0x20,1) " \n" + "lea " MEMLEA(0x40,1) ",%1 \n" + "sub $0x20,%2 \n" + "jg 1b \n" + "vzeroupper \n" + : "+r"(src_ptr), // %0 + "+r"(dst_ptr), // %1 + "+r"(src_width) // %2 + : + : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5" + ); +} +#endif // HAS_SCALEADDROW_AVX2 + +// Constant for making pixels signed to avoid pmaddubsw +// saturation. +static uvec8 kFsub80 = + { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; + +// Constant for making pixels unsigned and adding .5 for rounding. +static uvec16 kFadd40 = + { 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040 }; + // Bilinear column filtering. SSSE3 version. void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx) { - intptr_t x0 = 0, x1 = 0, temp_pixel = 0; + intptr_t x0, x1, temp_pixel; asm volatile ( "movd %6,%%xmm2 \n" "movd %7,%%xmm3 \n" "movl $0x04040000,%k2 \n" "movd %k2,%%xmm5 \n" "pcmpeqb %%xmm6,%%xmm6 \n" - "psrlw $0x9,%%xmm6 \n" + "psrlw $0x9,%%xmm6 \n" // 0x007f007f + "pcmpeqb %%xmm7,%%xmm7 \n" + "psrlw $15,%%xmm7 \n" // 0x00010001 + "pextrw $0x1,%%xmm2,%k3 \n" "subl $0x2,%5 \n" "jl 29f \n" @@ -650,16 +866,19 @@ void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, "movd %k2,%%xmm4 \n" "pshufb %%xmm5,%%xmm1 \n" "punpcklwd %%xmm4,%%xmm0 \n" - "pxor %%xmm6,%%xmm1 \n" - "pmaddubsw %%xmm1,%%xmm0 \n" + "psubb %8,%%xmm0 \n" // make pixels signed. + "pxor %%xmm6,%%xmm1 \n" // 128 -f = (f ^ 127 ) + 1 + "paddusb %%xmm7,%%xmm1 \n" + "pmaddubsw %%xmm0,%%xmm1 \n" "pextrw $0x1,%%xmm2,%k3 \n" "pextrw $0x3,%%xmm2,%k4 \n" - "psrlw $0x7,%%xmm0 \n" - "packuswb %%xmm0,%%xmm0 \n" - "movd %%xmm0,%k2 \n" + "paddw %9,%%xmm1 \n" // make pixels unsigned. + "psrlw $0x7,%%xmm1 \n" + "packuswb %%xmm1,%%xmm1 \n" + "movd %%xmm1,%k2 \n" "mov %w2," MEMACCESS(0) " \n" "lea " MEMLEA(0x2,0) ",%0 \n" - "sub $0x2,%5 \n" + "subl $0x2,%5 \n" "jge 2b \n" LABELALIGN @@ -670,23 +889,37 @@ void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, "movd %k2,%%xmm0 \n" "psrlw $0x9,%%xmm2 \n" "pshufb %%xmm5,%%xmm2 \n" + "psubb %8,%%xmm0 \n" // make pixels signed. "pxor %%xmm6,%%xmm2 \n" - "pmaddubsw %%xmm2,%%xmm0 \n" - "psrlw $0x7,%%xmm0 \n" - "packuswb %%xmm0,%%xmm0 \n" - "movd %%xmm0,%k2 \n" + "paddusb %%xmm7,%%xmm2 \n" + "pmaddubsw %%xmm0,%%xmm2 \n" + "paddw %9,%%xmm2 \n" // make pixels unsigned. + "psrlw $0x7,%%xmm2 \n" + "packuswb %%xmm2,%%xmm2 \n" + "movd %%xmm2,%k2 \n" "mov %b2," MEMACCESS(0) " \n" "99: \n" - : "+r"(dst_ptr), // %0 - "+r"(src_ptr), // %1 - "+a"(temp_pixel), // %2 - "+r"(x0), // %3 - "+r"(x1), // %4 - "+rm"(dst_width) // %5 - : "rm"(x), // %6 - "rm"(dx) // %7 + : "+r"(dst_ptr), // %0 + "+r"(src_ptr), // %1 + "=&a"(temp_pixel), // %2 + "=&r"(x0), // %3 + "=&r"(x1), // %4 +#if defined(__x86_64__) + "+rm"(dst_width) // %5 +#else + "+m"(dst_width) // %5 +#endif + : "rm"(x), // %6 + "rm"(dx), // %7 +#if defined(__x86_64__) + "x"(kFsub80), // %8 + "x"(kFadd40) // %9 +#else + "m"(kFsub80), // %8 + "m"(kFadd40) // %9 +#endif : "memory", "cc", NACL_R14 - "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6" + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" ); } @@ -795,7 +1028,7 @@ void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb, void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width) { intptr_t src_stepx_x4 = (intptr_t)(src_stepx); - intptr_t src_stepx_x12 = 0; + intptr_t src_stepx_x12; asm volatile ( "lea " MEMLEA3(0x00,1,4) ",%1 \n" "lea " MEMLEA4(0x00,1,1,2) ",%4 \n" @@ -813,11 +1046,11 @@ void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride, "lea " MEMLEA(0x10,2) ",%2 \n" "sub $0x4,%3 \n" "jg 1b \n" - : "+r"(src_argb), // %0 - "+r"(src_stepx_x4), // %1 - "+r"(dst_argb), // %2 - "+r"(dst_width), // %3 - "+r"(src_stepx_x12) // %4 + : "+r"(src_argb), // %0 + "+r"(src_stepx_x4), // %1 + "+r"(dst_argb), // %2 + "+r"(dst_width), // %3 + "=&r"(src_stepx_x12) // %4 :: "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3" ); @@ -829,7 +1062,7 @@ void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width) { intptr_t src_stepx_x4 = (intptr_t)(src_stepx); - intptr_t src_stepx_x12 = 0; + intptr_t src_stepx_x12; intptr_t row1 = (intptr_t)(src_stride); asm volatile ( "lea " MEMLEA3(0x00,1,4) ",%1 \n" @@ -858,12 +1091,12 @@ void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb, "lea " MEMLEA(0x10,2) ",%2 \n" "sub $0x4,%3 \n" "jg 1b \n" - : "+r"(src_argb), // %0 - "+r"(src_stepx_x4), // %1 - "+r"(dst_argb), // %2 - "+rm"(dst_width), // %3 - "+r"(src_stepx_x12), // %4 - "+r"(row1) // %5 + : "+r"(src_argb), // %0 + "+r"(src_stepx_x4), // %1 + "+r"(dst_argb), // %2 + "+rm"(dst_width), // %3 + "=&r"(src_stepx_x12), // %4 + "+r"(row1) // %5 :: "memory", "cc", NACL_R14 "xmm0", "xmm1", "xmm2", "xmm3" ); @@ -871,7 +1104,7 @@ void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb, void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx) { - intptr_t x0 = 0, x1 = 0; + intptr_t x0, x1; asm volatile ( "movd %5,%%xmm2 \n" "movd %6,%%xmm3 \n" @@ -924,8 +1157,8 @@ void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb, MEMOPREG(movd,0x00,3,0,4,xmm0) // movd (%3,%0,4),%%xmm0 "movd %%xmm0," MEMACCESS(2) " \n" "99: \n" - : "+a"(x0), // %0 - "+d"(x1), // %1 + : "=&a"(x0), // %0 + "=&d"(x1), // %1 "+r"(dst_argb), // %2 "+r"(src_argb), // %3 "+r"(dst_width) // %4 @@ -976,7 +1209,7 @@ static uvec8 kShuffleFractions = { // Bilinear row filtering combines 4x2 -> 4x1. SSSE3 version void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx) { - intptr_t x0 = 0, x1 = 0; + intptr_t x0, x1; asm volatile ( "movdqa %0,%%xmm4 \n" "movdqa %1,%%xmm5 \n" @@ -1039,8 +1272,8 @@ void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb, : "+r"(dst_argb), // %0 "+r"(src_argb), // %1 "+rm"(dst_width), // %2 - "+r"(x0), // %3 - "+r"(x1) // %4 + "=&r"(x0), // %3 + "=&r"(x1) // %4 : "rm"(x), // %5 "rm"(dx) // %6 : "memory", "cc", NACL_R14 diff --git a/libs/libvpx/third_party/libyuv/source/scale_mips.cc b/libs/libvpx/third_party/libyuv/source/scale_mips.cc index 3eb4f27c45..ae953073fa 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_mips.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_mips.cc @@ -21,8 +21,8 @@ extern "C" { defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \ (_MIPS_SIM == _MIPS_SIM_ABI32) -void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown2_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { __asm__ __volatile__( ".set push \n" ".set noreorder \n" @@ -31,7 +31,6 @@ void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, "beqz $t9, 2f \n" " nop \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4| @@ -78,8 +77,8 @@ void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown2Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { const uint8* t = src_ptr + src_stride; __asm__ __volatile__ ( @@ -90,7 +89,6 @@ void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, "bltz $t9, 2f \n" " nop \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4| @@ -178,8 +176,8 @@ void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown4_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" @@ -188,7 +186,6 @@ void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, "beqz $t9, 2f \n" " nop \n" - ".p2align 2 \n" "1: \n" "lw $t1, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t2, 4(%[src_ptr]) \n" // |7|6|5|4| @@ -234,8 +231,8 @@ void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown4Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { intptr_t stride = src_stride; const uint8* s1 = src_ptr + stride; const uint8* s2 = s1 + stride; @@ -248,7 +245,6 @@ void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, "srl $t9, %[dst_width], 1 \n" "andi $t8, %[dst_width], 1 \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t1, 0(%[s1]) \n" // |7|6|5|4| @@ -314,12 +310,11 @@ void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown34_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" - ".p2align 2 \n" "1: \n" "lw $t1, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t2, 4(%[src_ptr]) \n" // |7|6|5|4| @@ -361,14 +356,13 @@ void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* d, int dst_width) { +void ScaleRowDown34_0_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* d, int dst_width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" "repl.ph $t3, 3 \n" // 0x00030003 - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0| "lwx $t1, %[src_stride](%[src_ptr]) \n" // |T3|T2|T1|T0| @@ -418,14 +412,13 @@ void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* d, int dst_width) { +void ScaleRowDown34_1_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* d, int dst_width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" "repl.ph $t2, 3 \n" // 0x00030003 - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0| "lwx $t1, %[src_stride](%[src_ptr]) \n" // |T3|T2|T1|T0| @@ -471,13 +464,12 @@ void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst, int dst_width) { +void ScaleRowDown38_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst, int dst_width) { __asm__ __volatile__ ( ".set push \n" ".set noreorder \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0| "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4| @@ -518,8 +510,8 @@ void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown38_2_Box_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { intptr_t stride = src_stride; const uint8* t = src_ptr + stride; const int c = 0x2AAA; @@ -528,7 +520,6 @@ void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ".set push \n" ".set noreorder \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0| "lw $t1, 4(%[src_ptr]) \n" // |S7|S6|S5|S4| @@ -572,9 +563,9 @@ void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride, ); } -void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr, - ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown38_3_Box_DSPR2(const uint8* src_ptr, + ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { intptr_t stride = src_stride; const uint8* s1 = src_ptr + stride; stride += stride; @@ -586,7 +577,6 @@ void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr, ".set push \n" ".set noreorder \n" - ".p2align 2 \n" "1: \n" "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0| "lw $t1, 4(%[src_ptr]) \n" // |S7|S6|S5|S4| diff --git a/libs/libvpx/third_party/libyuv/source/scale_neon.cc b/libs/libvpx/third_party/libyuv/source/scale_neon.cc index 7825878e98..44b0c8080d 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_neon.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_neon.cc @@ -26,7 +26,6 @@ extern "C" { void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" // load even pixels into q0, odd into q1 MEMACCESS(0) @@ -47,7 +46,6 @@ void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, void ScaleRowDown2Linear_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0, q1}, [%0]! \n" // load pixels and post inc @@ -73,7 +71,6 @@ void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, asm volatile ( // change the stride to row 2 pointer "add %1, %0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0, q1}, [%0]! \n" // load row 1 and post inc @@ -101,7 +98,6 @@ void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 @@ -123,7 +119,6 @@ void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, const uint8* src_ptr2 = src_ptr + src_stride * 2; const uint8* src_ptr3 = src_ptr + src_stride * 3; asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {q0}, [%0]! \n" // load up 16x4 @@ -162,7 +157,6 @@ void ScaleRowDown34_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 @@ -185,7 +179,6 @@ void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr, asm volatile ( "vmov.u8 d24, #3 \n" "add %3, %0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 @@ -245,7 +238,6 @@ void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr, asm volatile ( "vmov.u8 d24, #3 \n" "add %3, %0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0 @@ -300,7 +292,6 @@ void ScaleRowDown38_NEON(const uint8* src_ptr, asm volatile ( MEMACCESS(3) "vld1.8 {q3}, [%3] \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0, d1, d2, d3}, [%0]! \n" @@ -334,7 +325,6 @@ void OMITFP ScaleRowDown38_3_Box_NEON(const uint8* src_ptr, MEMACCESS(7) "vld1.8 {q15}, [%7] \n" "add %3, %0 \n" - ".p2align 2 \n" "1: \n" // d0 = 00 40 01 41 02 42 03 43 @@ -450,7 +440,6 @@ void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr, MEMACCESS(5) "vld1.8 {q14}, [%5] \n" "add %3, %0 \n" - ".p2align 2 \n" "1: \n" // d0 = 00 40 01 41 02 42 03 43 @@ -543,9 +532,8 @@ void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr, void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint16* dst_ptr, int src_width, int src_height) { - const uint8* src_tmp = NULL; + const uint8* src_tmp; asm volatile ( - ".p2align 2 \n" "1: \n" "mov %0, %1 \n" "mov r12, %5 \n" @@ -564,12 +552,12 @@ void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride, "add %1, %1, #16 \n" "subs %4, %4, #16 \n" // 16 processed per loop "bgt 1b \n" - : "+r"(src_tmp), // %0 - "+r"(src_ptr), // %1 - "+r"(dst_ptr), // %2 - "+r"(src_stride), // %3 - "+r"(src_width), // %4 - "+r"(src_height) // %5 + : "=&r"(src_tmp), // %0 + "+r"(src_ptr), // %1 + "+r"(dst_ptr), // %2 + "+r"(src_stride), // %3 + "+r"(src_width), // %4 + "+r"(src_height) // %5 : : "memory", "cc", "r12", "q0", "q1", "q2", "q3" // Clobber List ); @@ -584,13 +572,16 @@ void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride, MEMACCESS(6) \ "vld2.8 {d6["#n"], d7["#n"]}, [%6] \n" +// The NEON version mimics this formula: +// #define BLENDER(a, b, f) (uint8)((int)(a) + +// ((int)(f) * ((int)(b) - (int)(a)) >> 16)) + void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr, int dst_width, int x, int dx) { int dx_offset[4] = {0, 1, 2, 3}; int* tmp = dx_offset; const uint8* src_tmp = src_ptr; asm volatile ( - ".p2align 2 \n" "vdup.32 q0, %3 \n" // x "vdup.32 q1, %4 \n" // dx "vld1.32 {q2}, [%5] \n" // 0 1 2 3 @@ -621,8 +612,8 @@ void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr, "vmovl.u16 q10, d21 \n" "vmul.s32 q11, q11, q13 \n" "vmul.s32 q12, q12, q10 \n" - "vshrn.s32 d18, q11, #16 \n" - "vshrn.s32 d19, q12, #16 \n" + "vrshrn.s32 d18, q11, #16 \n" + "vrshrn.s32 d19, q12, #16 \n" "vadd.s16 q8, q8, q9 \n" "vmovn.s16 d6, q8 \n" @@ -749,7 +740,6 @@ void ScaleFilterRows_NEON(uint8* dst_ptr, void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" // load even pixels into q0, odd into q1 MEMACCESS(0) @@ -773,7 +763,6 @@ void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride, void ScaleARGBRowDown2Linear_NEON(const uint8* src_argb, ptrdiff_t src_stride, uint8* dst_argb, int dst_width) { asm volatile ( - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -804,7 +793,6 @@ void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride, asm volatile ( // change the stride to row 2 pointer "add %1, %1, %0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels. @@ -845,7 +833,6 @@ void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride, int src_stepx, uint8* dst_argb, int dst_width) { asm volatile ( "mov r12, %3, lsl #2 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.32 {d0[0]}, [%0], r12 \n" @@ -875,7 +862,6 @@ void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride, asm volatile ( "mov r12, %4, lsl #2 \n" "add %1, %1, %0 \n" - ".p2align 2 \n" "1: \n" MEMACCESS(0) "vld1.8 {d0}, [%0], r12 \n" // Read 4 2x2 blocks -> 2x1 @@ -927,10 +913,9 @@ void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride, void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb, int dst_width, int x, int dx) { - int tmp = 0; + int tmp; const uint8* src_tmp = src_argb; asm volatile ( - ".p2align 2 \n" "1: \n" LOAD1_DATA32_LANE(d0, 0) LOAD1_DATA32_LANE(d0, 1) @@ -945,13 +930,13 @@ void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb, "vst1.32 {q0, q1}, [%0]! \n" // store pixels "subs %2, %2, #8 \n" // 8 processed per loop "bgt 1b \n" - : "+r"(dst_argb), // %0 - "+r"(src_argb), // %1 - "+r"(dst_width), // %2 - "+r"(x), // %3 - "+r"(dx), // %4 - "+r"(tmp), // %5 - "+r"(src_tmp) // %6 + : "+r"(dst_argb), // %0 + "+r"(src_argb), // %1 + "+r"(dst_width), // %2 + "+r"(x), // %3 + "+r"(dx), // %4 + "=&r"(tmp), // %5 + "+r"(src_tmp) // %6 : : "memory", "cc", "q0", "q1" ); @@ -974,7 +959,6 @@ void ScaleARGBFilterCols_NEON(uint8* dst_argb, const uint8* src_argb, int* tmp = dx_offset; const uint8* src_tmp = src_argb; asm volatile ( - ".p2align 2 \n" "vdup.32 q0, %3 \n" // x "vdup.32 q1, %4 \n" // dx "vld1.32 {q2}, [%5] \n" // 0 1 2 3 diff --git a/libs/libvpx/third_party/libyuv/source/scale_neon64.cc b/libs/libvpx/third_party/libyuv/source/scale_neon64.cc index 1d55193579..ff277f26ff 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_neon64.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_neon64.cc @@ -547,7 +547,7 @@ void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr, void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride, uint16* dst_ptr, int src_width, int src_height) { - const uint8* src_tmp = NULL; + const uint8* src_tmp; asm volatile ( "1: \n" "mov %0, %1 \n" @@ -567,12 +567,12 @@ void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride, "add %1, %1, #16 \n" "subs %w4, %w4, #16 \n" // 16 processed per loop "b.gt 1b \n" - : "+r"(src_tmp), // %0 - "+r"(src_ptr), // %1 - "+r"(dst_ptr), // %2 - "+r"(src_stride), // %3 - "+r"(src_width), // %4 - "+r"(src_height) // %5 + : "=&r"(src_tmp), // %0 + "+r"(src_ptr), // %1 + "+r"(dst_ptr), // %2 + "+r"(src_stride), // %3 + "+r"(src_width), // %4 + "+r"(src_height) // %5 : : "memory", "cc", "w12", "v0", "v1", "v2", "v3" // Clobber List ); @@ -626,8 +626,8 @@ void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr, "ushll2 v6.4s, v6.8h, #0 \n" "mul v16.4s, v16.4s, v7.4s \n" "mul v17.4s, v17.4s, v6.4s \n" - "shrn v6.4h, v16.4s, #16 \n" - "shrn2 v6.8h, v17.4s, #16 \n" + "rshrn v6.4h, v16.4s, #16 \n" + "rshrn2 v6.8h, v17.4s, #16 \n" "add v4.8h, v4.8h, v6.8h \n" "xtn v4.8b, v4.8h \n" @@ -931,7 +931,7 @@ void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb, int64 dst_width64 = (int64) dst_width; // Work around ios 64 bit warning. int64 x64 = (int64) x; int64 dx64 = (int64) dx; - int64 tmp64 = 0; + int64 tmp64; asm volatile ( "1: \n" LOAD1_DATA32_LANE(v0, 0) @@ -947,13 +947,13 @@ void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb, "st1 {v0.4s, v1.4s}, [%0], #32 \n" // store pixels "subs %w2, %w2, #8 \n" // 8 processed per loop "b.gt 1b \n" - : "+r"(dst_argb), // %0 - "+r"(src_argb), // %1 - "+r"(dst_width64), // %2 - "+r"(x64), // %3 - "+r"(dx64), // %4 - "+r"(tmp64), // %5 - "+r"(src_tmp) // %6 + : "+r"(dst_argb), // %0 + "+r"(src_argb), // %1 + "+r"(dst_width64), // %2 + "+r"(x64), // %3 + "+r"(dx64), // %4 + "=&r"(tmp64), // %5 + "+r"(src_tmp) // %6 : : "memory", "cc", "v0", "v1" ); diff --git a/libs/libvpx/third_party/libyuv/source/scale_win.cc b/libs/libvpx/third_party/libyuv/source/scale_win.cc index c3896ebad2..f17097365c 100644 --- a/libs/libvpx/third_party/libyuv/source/scale_win.cc +++ b/libs/libvpx/third_party/libyuv/source/scale_win.cc @@ -16,9 +16,8 @@ namespace libyuv { extern "C" { #endif -// This module is for Visual C x86. -#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \ - defined(_MSC_VER) && !defined(__clang__) +// This module is for 32 bit Visual C x86 and clangcl +#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) // Offsets for source bytes 0 to 9 static uvec8 kShuf0 = @@ -96,8 +95,8 @@ static uvec16 kScaleAb2 = // Reads 32 pixels, throws half away and writes 16 pixels. __declspec(naked) -void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { __asm { mov eax, [esp + 4] // src_ptr // src_stride ignored @@ -122,31 +121,28 @@ void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, // Blends 32x1 rectangle to 16x1. __declspec(naked) -void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2Linear_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { __asm { mov eax, [esp + 4] // src_ptr // src_stride mov edx, [esp + 12] // dst_ptr mov ecx, [esp + 16] // dst_width - pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff - psrlw xmm5, 8 + + pcmpeqb xmm4, xmm4 // constant 0x0101 + psrlw xmm4, 15 + packuswb xmm4, xmm4 + pxor xmm5, xmm5 // constant 0 wloop: movdqu xmm0, [eax] movdqu xmm1, [eax + 16] lea eax, [eax + 32] - - movdqa xmm2, xmm0 // average columns (32 to 16 pixels) - psrlw xmm0, 8 - movdqa xmm3, xmm1 - psrlw xmm1, 8 - pand xmm2, xmm5 - pand xmm3, xmm5 - pavgw xmm0, xmm2 - pavgw xmm1, xmm3 + pmaddubsw xmm0, xmm4 // horizontal add + pmaddubsw xmm1, xmm4 + pavgw xmm0, xmm5 // (x + 1) / 2 + pavgw xmm1, xmm5 packuswb xmm0, xmm1 - movdqu [edx], xmm0 lea edx, [edx + 16] sub ecx, 16 @@ -158,16 +154,19 @@ void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, // Blends 32x2 rectangle to 16x1. __declspec(naked) -void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, - uint8* dst_ptr, int dst_width) { +void ScaleRowDown2Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, + uint8* dst_ptr, int dst_width) { __asm { push esi mov eax, [esp + 4 + 4] // src_ptr mov esi, [esp + 4 + 8] // src_stride mov edx, [esp + 4 + 12] // dst_ptr mov ecx, [esp + 4 + 16] // dst_width - pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff - psrlw xmm5, 8 + + pcmpeqb xmm4, xmm4 // constant 0x0101 + psrlw xmm4, 15 + packuswb xmm4, xmm4 + pxor xmm5, xmm5 // constant 0 wloop: movdqu xmm0, [eax] @@ -175,19 +174,17 @@ void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, movdqu xmm2, [eax + esi] movdqu xmm3, [eax + esi + 16] lea eax, [eax + 32] - pavgb xmm0, xmm2 // average rows - pavgb xmm1, xmm3 - - movdqa xmm2, xmm0 // average columns (32 to 16 pixels) - psrlw xmm0, 8 - movdqa xmm3, xmm1 - psrlw xmm1, 8 - pand xmm2, xmm5 - pand xmm3, xmm5 - pavgw xmm0, xmm2 - pavgw xmm1, xmm3 + pmaddubsw xmm0, xmm4 // horizontal add + pmaddubsw xmm1, xmm4 + pmaddubsw xmm2, xmm4 + pmaddubsw xmm3, xmm4 + paddw xmm0, xmm2 // vertical add + paddw xmm1, xmm3 + psrlw xmm0, 1 + psrlw xmm1, 1 + pavgw xmm0, xmm5 // (x + 1) / 2 + pavgw xmm1, xmm5 packuswb xmm0, xmm1 - movdqu [edx], xmm0 lea edx, [edx + 16] sub ecx, 16 @@ -246,14 +243,12 @@ void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, vmovdqu ymm0, [eax] vmovdqu ymm1, [eax + 32] lea eax, [eax + 64] - - vpmaddubsw ymm0, ymm0, ymm4 // average horizontally + vpmaddubsw ymm0, ymm0, ymm4 // horizontal add vpmaddubsw ymm1, ymm1, ymm4 vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2 vpavgw ymm1, ymm1, ymm5 vpackuswb ymm0, ymm0, ymm1 vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb - vmovdqu [edx], ymm0 lea edx, [edx + 32] sub ecx, 32 @@ -264,6 +259,8 @@ void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, } } +// For rounding, average = (sum + 2) / 4 +// becomes average((sum >> 1), 0) // Blends 64x2 rectangle to 32x1. __declspec(naked) void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, @@ -281,19 +278,23 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, vpxor ymm5, ymm5, ymm5 // constant 0 wloop: - vmovdqu ymm0, [eax] // average rows + vmovdqu ymm0, [eax] vmovdqu ymm1, [eax + 32] - vpavgb ymm0, ymm0, [eax + esi] - vpavgb ymm1, ymm1, [eax + esi + 32] + vmovdqu ymm2, [eax + esi] + vmovdqu ymm3, [eax + esi + 32] lea eax, [eax + 64] - - vpmaddubsw ymm0, ymm0, ymm4 // average horizontally + vpmaddubsw ymm0, ymm0, ymm4 // horizontal add vpmaddubsw ymm1, ymm1, ymm4 + vpmaddubsw ymm2, ymm2, ymm4 + vpmaddubsw ymm3, ymm3, ymm4 + vpaddw ymm0, ymm0, ymm2 // vertical add + vpaddw ymm1, ymm1, ymm3 + vpsrlw ymm0, ymm0, 1 // (x + 2) / 4 = (x / 2 + 1) / 2 + vpsrlw ymm1, ymm1, 1 vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2 vpavgw ymm1, ymm1, ymm5 vpackuswb ymm0, ymm0, ymm1 vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb - vmovdqu [edx], ymm0 lea edx, [edx + 32] sub ecx, 32 @@ -308,7 +309,7 @@ void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, // Point samples 32 pixels to 8 pixels. __declspec(naked) -void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, +void ScaleRowDown4_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { __asm { mov eax, [esp + 4] // src_ptr @@ -339,7 +340,7 @@ void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, // Blends 32x4 rectangle to 8x1. __declspec(naked) -void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, +void ScaleRowDown4Box_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, uint8* dst_ptr, int dst_width) { __asm { push esi @@ -349,42 +350,40 @@ void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride, mov edx, [esp + 8 + 12] // dst_ptr mov ecx, [esp + 8 + 16] // dst_width lea edi, [esi + esi * 2] // src_stride * 3 - pcmpeqb xmm7, xmm7 // generate mask 0x00ff00ff - psrlw xmm7, 8 + pcmpeqb xmm4, xmm4 // constant 0x0101 + psrlw xmm4, 15 + movdqa xmm5, xmm4 + packuswb xmm4, xmm4 + psllw xmm5, 3 // constant 0x0008 wloop: movdqu xmm0, [eax] // average rows movdqu xmm1, [eax + 16] movdqu xmm2, [eax + esi] movdqu xmm3, [eax + esi + 16] - pavgb xmm0, xmm2 - pavgb xmm1, xmm3 + pmaddubsw xmm0, xmm4 // horizontal add + pmaddubsw xmm1, xmm4 + pmaddubsw xmm2, xmm4 + pmaddubsw xmm3, xmm4 + paddw xmm0, xmm2 // vertical add rows 0, 1 + paddw xmm1, xmm3 movdqu xmm2, [eax + esi * 2] movdqu xmm3, [eax + esi * 2 + 16] - movdqu xmm4, [eax + edi] - movdqu xmm5, [eax + edi + 16] + pmaddubsw xmm2, xmm4 + pmaddubsw xmm3, xmm4 + paddw xmm0, xmm2 // add row 2 + paddw xmm1, xmm3 + movdqu xmm2, [eax + edi] + movdqu xmm3, [eax + edi + 16] lea eax, [eax + 32] - pavgb xmm2, xmm4 - pavgb xmm3, xmm5 - pavgb xmm0, xmm2 - pavgb xmm1, xmm3 - - movdqa xmm2, xmm0 // average columns (32 to 16 pixels) - psrlw xmm0, 8 - movdqa xmm3, xmm1 - psrlw xmm1, 8 - pand xmm2, xmm7 - pand xmm3, xmm7 - pavgw xmm0, xmm2 - pavgw xmm1, xmm3 - packuswb xmm0, xmm1 - - movdqa xmm2, xmm0 // average columns (16 to 8 pixels) - psrlw xmm0, 8 - pand xmm2, xmm7 - pavgw xmm0, xmm2 + pmaddubsw xmm2, xmm4 + pmaddubsw xmm3, xmm4 + paddw xmm0, xmm2 // add row 3 + paddw xmm1, xmm3 + phaddw xmm0, xmm1 + paddw xmm0, xmm5 // + 8 for round + psrlw xmm0, 4 // /16 for average of 4 * 4 packuswb xmm0, xmm0 - movq qword ptr [edx], xmm0 lea edx, [edx + 8] sub ecx, 8 @@ -443,37 +442,41 @@ void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride, mov edx, [esp + 8 + 12] // dst_ptr mov ecx, [esp + 8 + 16] // dst_width lea edi, [esi + esi * 2] // src_stride * 3 - vpcmpeqb ymm7, ymm7, ymm7 // generate mask 0x00ff00ff - vpsrlw ymm7, ymm7, 8 + vpcmpeqb ymm4, ymm4, ymm4 // constant 0x0101 + vpsrlw ymm4, ymm4, 15 + vpsllw ymm5, ymm4, 3 // constant 0x0008 + vpackuswb ymm4, ymm4, ymm4 wloop: vmovdqu ymm0, [eax] // average rows vmovdqu ymm1, [eax + 32] - vpavgb ymm0, ymm0, [eax + esi] - vpavgb ymm1, ymm1, [eax + esi + 32] + vmovdqu ymm2, [eax + esi] + vmovdqu ymm3, [eax + esi + 32] + vpmaddubsw ymm0, ymm0, ymm4 // horizontal add + vpmaddubsw ymm1, ymm1, ymm4 + vpmaddubsw ymm2, ymm2, ymm4 + vpmaddubsw ymm3, ymm3, ymm4 + vpaddw ymm0, ymm0, ymm2 // vertical add rows 0, 1 + vpaddw ymm1, ymm1, ymm3 vmovdqu ymm2, [eax + esi * 2] vmovdqu ymm3, [eax + esi * 2 + 32] - vpavgb ymm2, ymm2, [eax + edi] - vpavgb ymm3, ymm3, [eax + edi + 32] - lea eax, [eax + 64] - vpavgb ymm0, ymm0, ymm2 - vpavgb ymm1, ymm1, ymm3 - - vpand ymm2, ymm0, ymm7 // average columns (64 to 32 pixels) - vpand ymm3, ymm1, ymm7 - vpsrlw ymm0, ymm0, 8 - vpsrlw ymm1, ymm1, 8 - vpavgw ymm0, ymm0, ymm2 - vpavgw ymm1, ymm1, ymm3 - vpackuswb ymm0, ymm0, ymm1 - vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb - - vpand ymm2, ymm0, ymm7 // average columns (32 to 16 pixels) - vpsrlw ymm0, ymm0, 8 - vpavgw ymm0, ymm0, ymm2 + vpmaddubsw ymm2, ymm2, ymm4 + vpmaddubsw ymm3, ymm3, ymm4 + vpaddw ymm0, ymm0, ymm2 // add row 2 + vpaddw ymm1, ymm1, ymm3 + vmovdqu ymm2, [eax + edi] + vmovdqu ymm3, [eax + edi + 32] + lea eax, [eax + 64] + vpmaddubsw ymm2, ymm2, ymm4 + vpmaddubsw ymm3, ymm3, ymm4 + vpaddw ymm0, ymm0, ymm2 // add row 3 + vpaddw ymm1, ymm1, ymm3 + vphaddw ymm0, ymm0, ymm1 // mutates + vpermq ymm0, ymm0, 0xd8 // unmutate vphaddw + vpaddw ymm0, ymm0, ymm5 // + 8 for round + vpsrlw ymm0, ymm0, 4 // /32 for average of 4 * 4 vpackuswb ymm0, ymm0, ymm0 vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb - vmovdqu [edx], xmm0 lea edx, [edx + 16] sub ecx, 16 @@ -499,9 +502,9 @@ void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, // src_stride ignored mov edx, [esp + 12] // dst_ptr mov ecx, [esp + 16] // dst_width - movdqa xmm3, kShuf0 - movdqa xmm4, kShuf1 - movdqa xmm5, kShuf2 + movdqa xmm3, xmmword ptr kShuf0 + movdqa xmm4, xmmword ptr kShuf1 + movdqa xmm5, xmmword ptr kShuf2 wloop: movdqu xmm0, [eax] @@ -548,12 +551,12 @@ void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr, mov esi, [esp + 4 + 8] // src_stride mov edx, [esp + 4 + 12] // dst_ptr mov ecx, [esp + 4 + 16] // dst_width - movdqa xmm2, kShuf01 - movdqa xmm3, kShuf11 - movdqa xmm4, kShuf21 - movdqa xmm5, kMadd01 - movdqa xmm6, kMadd11 - movdqa xmm7, kRound34 + movdqa xmm2, xmmword ptr kShuf01 + movdqa xmm3, xmmword ptr kShuf11 + movdqa xmm4, xmmword ptr kShuf21 + movdqa xmm5, xmmword ptr kMadd01 + movdqa xmm6, xmmword ptr kMadd11 + movdqa xmm7, xmmword ptr kRound34 wloop: movdqu xmm0, [eax] // pixels 0..7 @@ -579,7 +582,7 @@ void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr, lea eax, [eax + 32] pavgb xmm0, xmm1 pshufb xmm0, xmm4 - movdqa xmm1, kMadd21 + movdqa xmm1, xmmword ptr kMadd21 pmaddubsw xmm0, xmm1 paddsw xmm0, xmm7 psrlw xmm0, 2 @@ -605,12 +608,12 @@ void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr, mov esi, [esp + 4 + 8] // src_stride mov edx, [esp + 4 + 12] // dst_ptr mov ecx, [esp + 4 + 16] // dst_width - movdqa xmm2, kShuf01 - movdqa xmm3, kShuf11 - movdqa xmm4, kShuf21 - movdqa xmm5, kMadd01 - movdqa xmm6, kMadd11 - movdqa xmm7, kRound34 + movdqa xmm2, xmmword ptr kShuf01 + movdqa xmm3, xmmword ptr kShuf11 + movdqa xmm4, xmmword ptr kShuf21 + movdqa xmm5, xmmword ptr kMadd01 + movdqa xmm6, xmmword ptr kMadd11 + movdqa xmm7, xmmword ptr kRound34 wloop: movdqu xmm0, [eax] // pixels 0..7 @@ -639,7 +642,7 @@ void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr, pavgb xmm1, xmm0 pavgb xmm0, xmm1 pshufb xmm0, xmm4 - movdqa xmm1, kMadd21 + movdqa xmm1, xmmword ptr kMadd21 pmaddubsw xmm0, xmm1 paddsw xmm0, xmm7 psrlw xmm0, 2 @@ -665,8 +668,8 @@ void ScaleRowDown38_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride, // src_stride ignored mov edx, [esp + 12] // dst_ptr mov ecx, [esp + 16] // dst_width - movdqa xmm4, kShuf38a - movdqa xmm5, kShuf38b + movdqa xmm4, xmmword ptr kShuf38a + movdqa xmm5, xmmword ptr kShuf38b xloop: movdqu xmm0, [eax] // 16 pixels -> 0,1,2,3,4,5 @@ -698,9 +701,9 @@ void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr, mov esi, [esp + 4 + 8] // src_stride mov edx, [esp + 4 + 12] // dst_ptr mov ecx, [esp + 4 + 16] // dst_width - movdqa xmm2, kShufAc - movdqa xmm3, kShufAc3 - movdqa xmm4, kScaleAc33 + movdqa xmm2, xmmword ptr kShufAc + movdqa xmm3, xmmword ptr kShufAc3 + movdqa xmm4, xmmword ptr kScaleAc33 pxor xmm5, xmm5 xloop: @@ -763,10 +766,10 @@ void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr, mov esi, [esp + 4 + 8] // src_stride mov edx, [esp + 4 + 12] // dst_ptr mov ecx, [esp + 4 + 16] // dst_width - movdqa xmm2, kShufAb0 - movdqa xmm3, kShufAb1 - movdqa xmm4, kShufAb2 - movdqa xmm5, kScaleAb2 + movdqa xmm2, xmmword ptr kShufAb0 + movdqa xmm3, xmmword ptr kShufAb1 + movdqa xmm4, xmmword ptr kShufAb2 + movdqa xmm5, xmmword ptr kScaleAb2 xloop: movdqu xmm0, [eax] // average 2 rows into xmm0 @@ -857,6 +860,16 @@ void ScaleAddRow_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width) { } #endif // HAS_SCALEADDROW_AVX2 +// Constant for making pixels signed to avoid pmaddubsw +// saturation. +static uvec8 kFsub80 = + { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; + +// Constant for making pixels unsigned and adding .5 for rounding. +static uvec16 kFadd40 = + { 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040, 0x4040 }; + // Bilinear column filtering. SSSE3 version. __declspec(naked) void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, @@ -874,6 +887,8 @@ void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, movd xmm5, eax pcmpeqb xmm6, xmm6 // generate 0x007f for inverting fraction. psrlw xmm6, 9 + pcmpeqb xmm7, xmm7 // generate 0x0001 + psrlw xmm7, 15 pextrw eax, xmm2, 1 // get x0 integer. preroll sub ecx, 2 jl xloop29 @@ -896,20 +911,22 @@ void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, movd xmm4, ebx pshufb xmm1, xmm5 // 0011 punpcklwd xmm0, xmm4 + psubb xmm0, xmmword ptr kFsub80 // make pixels signed. pxor xmm1, xmm6 // 0..7f and 7f..0 - pmaddubsw xmm0, xmm1 // 16 bit, 2 pixels. + paddusb xmm1, xmm7 // +1 so 0..7f and 80..1 + pmaddubsw xmm1, xmm0 // 16 bit, 2 pixels. pextrw eax, xmm2, 1 // get x0 integer. next iteration. pextrw edx, xmm2, 3 // get x1 integer. next iteration. - psrlw xmm0, 7 // 8.7 fixed point to low 8 bits. - packuswb xmm0, xmm0 // 8 bits, 2 pixels. - movd ebx, xmm0 + paddw xmm1, xmmword ptr kFadd40 // make pixels unsigned and round. + psrlw xmm1, 7 // 8.7 fixed point to low 8 bits. + packuswb xmm1, xmm1 // 8 bits, 2 pixels. + movd ebx, xmm1 mov [edi], bx lea edi, [edi + 2] sub ecx, 2 // 2 pixels jge xloop2 xloop29: - add ecx, 2 - 1 jl xloop99 @@ -918,11 +935,14 @@ void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr, movd xmm0, ebx psrlw xmm2, 9 // 7 bit fractions. pshufb xmm2, xmm5 // 0011 + psubb xmm0, xmmword ptr kFsub80 // make pixels signed. pxor xmm2, xmm6 // 0..7f and 7f..0 - pmaddubsw xmm0, xmm2 // 16 bit - psrlw xmm0, 7 // 8.7 fixed point to low 8 bits. - packuswb xmm0, xmm0 // 8 bits - movd ebx, xmm0 + paddusb xmm2, xmm7 // +1 so 0..7f and 80..1 + pmaddubsw xmm2, xmm0 // 16 bit + paddw xmm2, xmmword ptr kFadd40 // make pixels unsigned and round. + psrlw xmm2, 7 // 8.7 fixed point to low 8 bits. + packuswb xmm2, xmm2 // 8 bits + movd ebx, xmm2 mov [edi], bl xloop99: @@ -1233,8 +1253,8 @@ void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb, mov ecx, [esp + 8 + 12] // dst_width movd xmm2, [esp + 8 + 16] // x movd xmm3, [esp + 8 + 20] // dx - movdqa xmm4, kShuffleColARGB - movdqa xmm5, kShuffleFractions + movdqa xmm4, xmmword ptr kShuffleColARGB + movdqa xmm5, xmmword ptr kShuffleFractions pcmpeqb xmm6, xmm6 // generate 0x007f for inverting fraction. psrlw xmm6, 9 pextrw eax, xmm2, 1 // get x0 integer. preroll diff --git a/libs/libvpx/third_party/libyuv/source/video_common.cc b/libs/libvpx/third_party/libyuv/source/video_common.cc index 379a0669ae..00fb71e18b 100644 --- a/libs/libvpx/third_party/libyuv/source/video_common.cc +++ b/libs/libvpx/third_party/libyuv/source/video_common.cc @@ -25,6 +25,7 @@ struct FourCCAliasEntry { static const struct FourCCAliasEntry kFourCCAliases[] = { {FOURCC_IYUV, FOURCC_I420}, + {FOURCC_YU12, FOURCC_I420}, {FOURCC_YU16, FOURCC_I422}, {FOURCC_YU24, FOURCC_I444}, {FOURCC_YUYV, FOURCC_YUY2}, diff --git a/libs/libvpx/third_party/libyuv/source/x86inc.asm b/libs/libvpx/third_party/libyuv/source/x86inc.asm deleted file mode 100644 index cb5c32df3a..0000000000 --- a/libs/libvpx/third_party/libyuv/source/x86inc.asm +++ /dev/null @@ -1,1136 +0,0 @@ -;***************************************************************************** -;* x86inc.asm: x264asm abstraction layer -;***************************************************************************** -;* Copyright (C) 2005-2012 x264 project -;* -;* Authors: Loren Merritt -;* Anton Mitrofanov -;* Jason Garrett-Glaser -;* Henrik Gramner -;* -;* Permission to use, copy, modify, and/or distribute this software for any -;* purpose with or without fee is hereby granted, provided that the above -;* copyright notice and this permission notice appear in all copies. -;* -;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -;***************************************************************************** - -; This is a header file for the x264ASM assembly language, which uses -; NASM/YASM syntax combined with a large number of macros to provide easy -; abstraction between different calling conventions (x86_32, win64, linux64). -; It also has various other useful features to simplify writing the kind of -; DSP functions that are most often used in x264. - -; Unlike the rest of x264, this file is available under an ISC license, as it -; has significant usefulness outside of x264 and we want it to be available -; to the largest audience possible. Of course, if you modify it for your own -; purposes to add a new feature, we strongly encourage contributing a patch -; as this feature might be useful for others as well. Send patches or ideas -; to x264-devel@videolan.org . - -; Local changes for libyuv: -; remove %define program_name and references in labels -; rename cpus to uppercase - -%define WIN64 0 -%define UNIX64 0 -%if ARCH_X86_64 - %ifidn __OUTPUT_FORMAT__,win32 - %define WIN64 1 - %elifidn __OUTPUT_FORMAT__,win64 - %define WIN64 1 - %else - %define UNIX64 1 - %endif -%endif - -%ifdef PREFIX - %define mangle(x) _ %+ x -%else - %define mangle(x) x -%endif - -; Name of the .rodata section. -; Kludge: Something on OS X fails to align .rodata even given an align attribute, -; so use a different read-only section. -%macro SECTION_RODATA 0-1 16 - %ifidn __OUTPUT_FORMAT__,macho64 - SECTION .text align=%1 - %elifidn __OUTPUT_FORMAT__,macho - SECTION .text align=%1 - fakegot: - %elifidn __OUTPUT_FORMAT__,aout - section .text - %else - SECTION .rodata align=%1 - %endif -%endmacro - -; aout does not support align= -%macro SECTION_TEXT 0-1 16 - %ifidn __OUTPUT_FORMAT__,aout - SECTION .text - %else - SECTION .text align=%1 - %endif -%endmacro - -%if WIN64 - %define PIC -%elif ARCH_X86_64 == 0 -; x86_32 doesn't require PIC. -; Some distros prefer shared objects to be PIC, but nothing breaks if -; the code contains a few textrels, so we'll skip that complexity. - %undef PIC -%endif -%ifdef PIC - default rel -%endif - -; Always use long nops (reduces 0x90 spam in disassembly on x86_32) -CPU amdnop - -; Macros to eliminate most code duplication between x86_32 and x86_64: -; Currently this works only for leaf functions which load all their arguments -; into registers at the start, and make no other use of the stack. Luckily that -; covers most of x264's asm. - -; PROLOGUE: -; %1 = number of arguments. loads them from stack if needed. -; %2 = number of registers used. pushes callee-saved regs if needed. -; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. -; %4 = list of names to define to registers -; PROLOGUE can also be invoked by adding the same options to cglobal - -; e.g. -; cglobal foo, 2,3,0, dst, src, tmp -; declares a function (foo), taking two args (dst and src) and one local variable (tmp) - -; TODO Some functions can use some args directly from the stack. If they're the -; last args then you can just not declare them, but if they're in the middle -; we need more flexible macro. - -; RET: -; Pops anything that was pushed by PROLOGUE, and returns. - -; REP_RET: -; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons -; which are slow when a normal ret follows a branch. - -; registers: -; rN and rNq are the native-size register holding function argument N -; rNd, rNw, rNb are dword, word, and byte size -; rNh is the high 8 bits of the word size -; rNm is the original location of arg N (a register or on the stack), dword -; rNmp is native size - -%macro DECLARE_REG 2-3 - %define r%1q %2 - %define r%1d %2d - %define r%1w %2w - %define r%1b %2b - %define r%1h %2h - %if %0 == 2 - %define r%1m %2d - %define r%1mp %2 - %elif ARCH_X86_64 ; memory - %define r%1m [rsp + stack_offset + %3] - %define r%1mp qword r %+ %1m - %else - %define r%1m [esp + stack_offset + %3] - %define r%1mp dword r %+ %1m - %endif - %define r%1 %2 -%endmacro - -%macro DECLARE_REG_SIZE 3 - %define r%1q r%1 - %define e%1q r%1 - %define r%1d e%1 - %define e%1d e%1 - %define r%1w %1 - %define e%1w %1 - %define r%1h %3 - %define e%1h %3 - %define r%1b %2 - %define e%1b %2 -%if ARCH_X86_64 == 0 - %define r%1 e%1 -%endif -%endmacro - -DECLARE_REG_SIZE ax, al, ah -DECLARE_REG_SIZE bx, bl, bh -DECLARE_REG_SIZE cx, cl, ch -DECLARE_REG_SIZE dx, dl, dh -DECLARE_REG_SIZE si, sil, null -DECLARE_REG_SIZE di, dil, null -DECLARE_REG_SIZE bp, bpl, null - -; t# defines for when per-arch register allocation is more complex than just function arguments - -%macro DECLARE_REG_TMP 1-* - %assign %%i 0 - %rep %0 - CAT_XDEFINE t, %%i, r%1 - %assign %%i %%i+1 - %rotate 1 - %endrep -%endmacro - -%macro DECLARE_REG_TMP_SIZE 0-* - %rep %0 - %define t%1q t%1 %+ q - %define t%1d t%1 %+ d - %define t%1w t%1 %+ w - %define t%1h t%1 %+ h - %define t%1b t%1 %+ b - %rotate 1 - %endrep -%endmacro - -DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 - -%if ARCH_X86_64 - %define gprsize 8 -%else - %define gprsize 4 -%endif - -%macro PUSH 1 - push %1 - %assign stack_offset stack_offset+gprsize -%endmacro - -%macro POP 1 - pop %1 - %assign stack_offset stack_offset-gprsize -%endmacro - -%macro PUSH_IF_USED 1-* - %rep %0 - %if %1 < regs_used - PUSH r%1 - %endif - %rotate 1 - %endrep -%endmacro - -%macro POP_IF_USED 1-* - %rep %0 - %if %1 < regs_used - pop r%1 - %endif - %rotate 1 - %endrep -%endmacro - -%macro LOAD_IF_USED 1-* - %rep %0 - %if %1 < num_args - mov r%1, r %+ %1 %+ mp - %endif - %rotate 1 - %endrep -%endmacro - -%macro SUB 2 - sub %1, %2 - %ifidn %1, rsp - %assign stack_offset stack_offset+(%2) - %endif -%endmacro - -%macro ADD 2 - add %1, %2 - %ifidn %1, rsp - %assign stack_offset stack_offset-(%2) - %endif -%endmacro - -%macro movifnidn 2 - %ifnidn %1, %2 - mov %1, %2 - %endif -%endmacro - -%macro movsxdifnidn 2 - %ifnidn %1, %2 - movsxd %1, %2 - %endif -%endmacro - -%macro ASSERT 1 - %if (%1) == 0 - %error assert failed - %endif -%endmacro - -%macro DEFINE_ARGS 0-* - %ifdef n_arg_names - %assign %%i 0 - %rep n_arg_names - CAT_UNDEF arg_name %+ %%i, q - CAT_UNDEF arg_name %+ %%i, d - CAT_UNDEF arg_name %+ %%i, w - CAT_UNDEF arg_name %+ %%i, h - CAT_UNDEF arg_name %+ %%i, b - CAT_UNDEF arg_name %+ %%i, m - CAT_UNDEF arg_name %+ %%i, mp - CAT_UNDEF arg_name, %%i - %assign %%i %%i+1 - %endrep - %endif - - %xdefine %%stack_offset stack_offset - %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine - %assign %%i 0 - %rep %0 - %xdefine %1q r %+ %%i %+ q - %xdefine %1d r %+ %%i %+ d - %xdefine %1w r %+ %%i %+ w - %xdefine %1h r %+ %%i %+ h - %xdefine %1b r %+ %%i %+ b - %xdefine %1m r %+ %%i %+ m - %xdefine %1mp r %+ %%i %+ mp - CAT_XDEFINE arg_name, %%i, %1 - %assign %%i %%i+1 - %rotate 1 - %endrep - %xdefine stack_offset %%stack_offset - %assign n_arg_names %0 -%endmacro - -%if WIN64 ; Windows x64 ;================================================= - -DECLARE_REG 0, rcx -DECLARE_REG 1, rdx -DECLARE_REG 2, R8 -DECLARE_REG 3, R9 -DECLARE_REG 4, R10, 40 -DECLARE_REG 5, R11, 48 -DECLARE_REG 6, rax, 56 -DECLARE_REG 7, rdi, 64 -DECLARE_REG 8, rsi, 72 -DECLARE_REG 9, rbx, 80 -DECLARE_REG 10, rbp, 88 -DECLARE_REG 11, R12, 96 -DECLARE_REG 12, R13, 104 -DECLARE_REG 13, R14, 112 -DECLARE_REG 14, R15, 120 - -%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names... - %assign num_args %1 - %assign regs_used %2 - ASSERT regs_used >= num_args - ASSERT regs_used <= 15 - PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14 - %if mmsize == 8 - %assign xmm_regs_used 0 - %else - WIN64_SPILL_XMM %3 - %endif - LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 - DEFINE_ARGS %4 -%endmacro - -%macro WIN64_SPILL_XMM 1 - %assign xmm_regs_used %1 - ASSERT xmm_regs_used <= 16 - %if xmm_regs_used > 6 - SUB rsp, (xmm_regs_used-6)*16+16 - %assign %%i xmm_regs_used - %rep (xmm_regs_used-6) - %assign %%i %%i-1 - movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i - %endrep - %endif -%endmacro - -%macro WIN64_RESTORE_XMM_INTERNAL 1 - %if xmm_regs_used > 6 - %assign %%i xmm_regs_used - %rep (xmm_regs_used-6) - %assign %%i %%i-1 - movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)] - %endrep - add %1, (xmm_regs_used-6)*16+16 - %endif -%endmacro - -%macro WIN64_RESTORE_XMM 1 - WIN64_RESTORE_XMM_INTERNAL %1 - %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16 - %assign xmm_regs_used 0 -%endmacro - -%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 - -%macro RET 0 - WIN64_RESTORE_XMM_INTERNAL rsp - POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 -%if mmsize == 32 - vzeroupper -%endif - ret -%endmacro - -%elif ARCH_X86_64 ; *nix x64 ;============================================= - -DECLARE_REG 0, rdi -DECLARE_REG 1, rsi -DECLARE_REG 2, rdx -DECLARE_REG 3, rcx -DECLARE_REG 4, R8 -DECLARE_REG 5, R9 -DECLARE_REG 6, rax, 8 -DECLARE_REG 7, R10, 16 -DECLARE_REG 8, R11, 24 -DECLARE_REG 9, rbx, 32 -DECLARE_REG 10, rbp, 40 -DECLARE_REG 11, R12, 48 -DECLARE_REG 12, R13, 56 -DECLARE_REG 13, R14, 64 -DECLARE_REG 14, R15, 72 - -%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... - %assign num_args %1 - %assign regs_used %2 - ASSERT regs_used >= num_args - ASSERT regs_used <= 15 - PUSH_IF_USED 9, 10, 11, 12, 13, 14 - LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 - DEFINE_ARGS %4 -%endmacro - -%define has_epilogue regs_used > 9 || mmsize == 32 - -%macro RET 0 - POP_IF_USED 14, 13, 12, 11, 10, 9 -%if mmsize == 32 - vzeroupper -%endif - ret -%endmacro - -%else ; X86_32 ;============================================================== - -DECLARE_REG 0, eax, 4 -DECLARE_REG 1, ecx, 8 -DECLARE_REG 2, edx, 12 -DECLARE_REG 3, ebx, 16 -DECLARE_REG 4, esi, 20 -DECLARE_REG 5, edi, 24 -DECLARE_REG 6, ebp, 28 -%define rsp esp - -%macro DECLARE_ARG 1-* - %rep %0 - %define r%1m [esp + stack_offset + 4*%1 + 4] - %define r%1mp dword r%1m - %rotate 1 - %endrep -%endmacro - -DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 - -%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names... - %assign num_args %1 - %assign regs_used %2 - %if regs_used > 7 - %assign regs_used 7 - %endif - ASSERT regs_used >= num_args - PUSH_IF_USED 3, 4, 5, 6 - LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 - DEFINE_ARGS %4 -%endmacro - -%define has_epilogue regs_used > 3 || mmsize == 32 - -%macro RET 0 - POP_IF_USED 6, 5, 4, 3 -%if mmsize == 32 - vzeroupper -%endif - ret -%endmacro - -%endif ;====================================================================== - -%if WIN64 == 0 -%macro WIN64_SPILL_XMM 1 -%endmacro -%macro WIN64_RESTORE_XMM 1 -%endmacro -%endif - -%macro REP_RET 0 - %if has_epilogue - RET - %else - rep ret - %endif -%endmacro - -%macro TAIL_CALL 2 ; callee, is_nonadjacent - %if has_epilogue - call %1 - RET - %elif %2 - jmp %1 - %endif -%endmacro - -;============================================================================= -; arch-independent part -;============================================================================= - -%assign function_align 16 - -; Begin a function. -; Applies any symbol mangling needed for C linkage, and sets up a define such that -; subsequent uses of the function name automatically refer to the mangled version. -; Appends cpuflags to the function name if cpuflags has been specified. -%macro cglobal 1-2+ ; name, [PROLOGUE args] -%if %0 == 1 - cglobal_internal %1 %+ SUFFIX -%else - cglobal_internal %1 %+ SUFFIX, %2 -%endif -%endmacro -%macro cglobal_internal 1-2+ - %ifndef cglobaled_%1 - %xdefine %1 mangle(%1) - %xdefine %1.skip_prologue %1 %+ .skip_prologue - CAT_XDEFINE cglobaled_, %1, 1 - %endif - %xdefine current_function %1 - %ifidn __OUTPUT_FORMAT__,elf - global %1:function hidden - %else - global %1 - %endif - align function_align - %1: - RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer - %assign stack_offset 0 - %if %0 > 1 - PROLOGUE %2 - %endif -%endmacro - -%macro cextern 1 - %xdefine %1 mangle(%1) - CAT_XDEFINE cglobaled_, %1, 1 - extern %1 -%endmacro - -; like cextern, but without the prefix -%macro cextern_naked 1 - %xdefine %1 mangle(%1) - CAT_XDEFINE cglobaled_, %1, 1 - extern %1 -%endmacro - -%macro const 2+ - %xdefine %1 mangle(%1) - global %1 - %1: %2 -%endmacro - -; This is needed for ELF, otherwise the GNU linker assumes the stack is -; executable by default. -%ifidn __OUTPUT_FORMAT__,elf -SECTION .note.GNU-stack noalloc noexec nowrite progbits -%endif -%ifidn __OUTPUT_FORMAT__,elf32 -section .note.GNU-stack noalloc noexec nowrite progbits -%endif -%ifidn __OUTPUT_FORMAT__,elf64 -section .note.GNU-stack noalloc noexec nowrite progbits -%endif - -; cpuflags - -%assign cpuflags_MMX (1<<0) -%assign cpuflags_MMX2 (1<<1) | cpuflags_MMX -%assign cpuflags_3dnow (1<<2) | cpuflags_MMX -%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow -%assign cpuflags_SSE (1<<4) | cpuflags_MMX2 -%assign cpuflags_SSE2 (1<<5) | cpuflags_SSE -%assign cpuflags_SSE2slow (1<<6) | cpuflags_SSE2 -%assign cpuflags_SSE3 (1<<7) | cpuflags_SSE2 -%assign cpuflags_SSSE3 (1<<8) | cpuflags_SSE3 -%assign cpuflags_SSE4 (1<<9) | cpuflags_SSSE3 -%assign cpuflags_SSE42 (1<<10)| cpuflags_SSE4 -%assign cpuflags_AVX (1<<11)| cpuflags_SSE42 -%assign cpuflags_xop (1<<12)| cpuflags_AVX -%assign cpuflags_fma4 (1<<13)| cpuflags_AVX -%assign cpuflags_AVX2 (1<<14)| cpuflags_AVX -%assign cpuflags_fma3 (1<<15)| cpuflags_AVX - -%assign cpuflags_cache32 (1<<16) -%assign cpuflags_cache64 (1<<17) -%assign cpuflags_slowctz (1<<18) -%assign cpuflags_lzcnt (1<<19) -%assign cpuflags_misalign (1<<20) -%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant -%assign cpuflags_atom (1<<22) -%assign cpuflags_bmi1 (1<<23) -%assign cpuflags_bmi2 (1<<24)|cpuflags_bmi1 -%assign cpuflags_tbm (1<<25)|cpuflags_bmi1 - -%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) -%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) - -; Takes up to 2 cpuflags from the above list. -; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. -; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. -%macro INIT_CPUFLAGS 0-2 - %if %0 >= 1 - %xdefine cpuname %1 - %assign cpuflags cpuflags_%1 - %if %0 >= 2 - %xdefine cpuname %1_%2 - %assign cpuflags cpuflags | cpuflags_%2 - %endif - %xdefine SUFFIX _ %+ cpuname - %if cpuflag(AVX) - %assign AVX_enabled 1 - %endif - %if mmsize == 16 && notcpuflag(SSE2) - %define mova movaps - %define movu movups - %define movnta movntps - %endif - %if cpuflag(aligned) - %define movu mova - %elifidn %1, SSE3 - %define movu lddqu - %endif - %else - %xdefine SUFFIX - %undef cpuname - %undef cpuflags - %endif -%endmacro - -; merge MMX and SSE* - -%macro CAT_XDEFINE 3 - %xdefine %1%2 %3 -%endmacro - -%macro CAT_UNDEF 2 - %undef %1%2 -%endmacro - -%macro INIT_MMX 0-1+ - %assign AVX_enabled 0 - %define RESET_MM_PERMUTATION INIT_MMX %1 - %define mmsize 8 - %define num_mmregs 8 - %define mova movq - %define movu movq - %define movh movd - %define movnta movntq - %assign %%i 0 - %rep 8 - CAT_XDEFINE m, %%i, mm %+ %%i - CAT_XDEFINE nmm, %%i, %%i - %assign %%i %%i+1 - %endrep - %rep 8 - CAT_UNDEF m, %%i - CAT_UNDEF nmm, %%i - %assign %%i %%i+1 - %endrep - INIT_CPUFLAGS %1 -%endmacro - -%macro INIT_XMM 0-1+ - %assign AVX_enabled 0 - %define RESET_MM_PERMUTATION INIT_XMM %1 - %define mmsize 16 - %define num_mmregs 8 - %if ARCH_X86_64 - %define num_mmregs 16 - %endif - %define mova movdqa - %define movu movdqu - %define movh movq - %define movnta movntdq - %assign %%i 0 - %rep num_mmregs - CAT_XDEFINE m, %%i, xmm %+ %%i - CAT_XDEFINE nxmm, %%i, %%i - %assign %%i %%i+1 - %endrep - INIT_CPUFLAGS %1 -%endmacro - -%macro INIT_YMM 0-1+ - %assign AVX_enabled 1 - %define RESET_MM_PERMUTATION INIT_YMM %1 - %define mmsize 32 - %define num_mmregs 8 - %if ARCH_X86_64 - %define num_mmregs 16 - %endif - %define mova vmovaps - %define movu vmovups - %undef movh - %define movnta vmovntps - %assign %%i 0 - %rep num_mmregs - CAT_XDEFINE m, %%i, ymm %+ %%i - CAT_XDEFINE nymm, %%i, %%i - %assign %%i %%i+1 - %endrep - INIT_CPUFLAGS %1 -%endmacro - -INIT_XMM - -; I often want to use macros that permute their arguments. e.g. there's no -; efficient way to implement butterfly or transpose or dct without swapping some -; arguments. -; -; I would like to not have to manually keep track of the permutations: -; If I insert a permutation in the middle of a function, it should automatically -; change everything that follows. For more complex macros I may also have multiple -; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations. -; -; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that -; permutes its arguments. It's equivalent to exchanging the contents of the -; registers, except that this way you exchange the register names instead, so it -; doesn't cost any cycles. - -%macro PERMUTE 2-* ; takes a list of pairs to swap -%rep %0/2 - %xdefine tmp%2 m%2 - %xdefine ntmp%2 nm%2 - %rotate 2 -%endrep -%rep %0/2 - %xdefine m%1 tmp%2 - %xdefine nm%1 ntmp%2 - %undef tmp%2 - %undef ntmp%2 - %rotate 2 -%endrep -%endmacro - -%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs) -%rep %0-1 -%ifdef m%1 - %xdefine tmp m%1 - %xdefine m%1 m%2 - %xdefine m%2 tmp - CAT_XDEFINE n, m%1, %1 - CAT_XDEFINE n, m%2, %2 -%else - ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here. - ; Be careful using this mode in nested macros though, as in some cases there may be - ; other copies of m# that have already been dereferenced and don't get updated correctly. - %xdefine %%n1 n %+ %1 - %xdefine %%n2 n %+ %2 - %xdefine tmp m %+ %%n1 - CAT_XDEFINE m, %%n1, m %+ %%n2 - CAT_XDEFINE m, %%n2, tmp - CAT_XDEFINE n, m %+ %%n1, %%n1 - CAT_XDEFINE n, m %+ %%n2, %%n2 -%endif - %undef tmp - %rotate 1 -%endrep -%endmacro - -; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later -; calls to that function will automatically load the permutation, so values can -; be returned in mmregs. -%macro SAVE_MM_PERMUTATION 0-1 - %if %0 - %xdefine %%f %1_m - %else - %xdefine %%f current_function %+ _m - %endif - %assign %%i 0 - %rep num_mmregs - CAT_XDEFINE %%f, %%i, m %+ %%i - %assign %%i %%i+1 - %endrep -%endmacro - -%macro LOAD_MM_PERMUTATION 1 ; name to load from - %ifdef %1_m0 - %assign %%i 0 - %rep num_mmregs - CAT_XDEFINE m, %%i, %1_m %+ %%i - CAT_XDEFINE n, m %+ %%i, %%i - %assign %%i %%i+1 - %endrep - %endif -%endmacro - -; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't -%macro call 1 - call_internal %1, %1 %+ SUFFIX -%endmacro -%macro call_internal 2 - %xdefine %%i %1 - %ifndef cglobaled_%1 - %ifdef cglobaled_%2 - %xdefine %%i %2 - %endif - %endif - call %%i - LOAD_MM_PERMUTATION %%i -%endmacro - -; Substitutions that reduce instruction size but are functionally equivalent -%macro add 2 - %ifnum %2 - %if %2==128 - sub %1, -128 - %else - add %1, %2 - %endif - %else - add %1, %2 - %endif -%endmacro - -%macro sub 2 - %ifnum %2 - %if %2==128 - add %1, -128 - %else - sub %1, %2 - %endif - %else - sub %1, %2 - %endif -%endmacro - -;============================================================================= -; AVX abstraction layer -;============================================================================= - -%assign i 0 -%rep 16 - %if i < 8 - CAT_XDEFINE sizeofmm, i, 8 - %endif - CAT_XDEFINE sizeofxmm, i, 16 - CAT_XDEFINE sizeofymm, i, 32 -%assign i i+1 -%endrep -%undef i - -%macro CHECK_AVX_INSTR_EMU 3-* - %xdefine %%opcode %1 - %xdefine %%dst %2 - %rep %0-2 - %ifidn %%dst, %3 - %error non-AVX emulation of ``%%opcode'' is not supported - %endif - %rotate 1 - %endrep -%endmacro - -;%1 == instruction -;%2 == 1 if float, 0 if int -;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm) -;%4 == number of operands given -;%5+: operands -%macro RUN_AVX_INSTR 6-7+ - %ifid %6 - %define %%sizeofreg sizeof%6 - %elifid %5 - %define %%sizeofreg sizeof%5 - %else - %define %%sizeofreg mmsize - %endif - %if %%sizeofreg==32 - %if %4>=3 - v%1 %5, %6, %7 - %else - v%1 %5, %6 - %endif - %else - %if %%sizeofreg==8 - %define %%regmov movq - %elif %2 - %define %%regmov movaps - %else - %define %%regmov movdqa - %endif - - %if %4>=3+%3 - %ifnidn %5, %6 - %if AVX_enabled && %%sizeofreg==16 - v%1 %5, %6, %7 - %else - CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7 - %%regmov %5, %6 - %1 %5, %7 - %endif - %else - %1 %5, %7 - %endif - %elif %4>=3 - %1 %5, %6, %7 - %else - %1 %5, %6 - %endif - %endif -%endmacro - -; 3arg AVX ops with a memory arg can only have it in src2, -; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov). -; So, if the op is symmetric and the wrong one is memory, swap them. -%macro RUN_AVX_INSTR1 8 - %assign %%swap 0 - %if AVX_enabled - %ifnid %6 - %assign %%swap 1 - %endif - %elifnidn %5, %6 - %ifnid %7 - %assign %%swap 1 - %endif - %endif - %if %%swap && %3 == 0 && %8 == 1 - RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6 - %else - RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7 - %endif -%endmacro - -;%1 == instruction -;%2 == 1 if float, 0 if int -;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm) -;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not -%macro AVX_INSTR 4 - %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4 - %ifidn %3, fnord - RUN_AVX_INSTR %6, %7, %8, 2, %1, %2 - %elifidn %4, fnord - RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9 - %elifidn %5, fnord - RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4 - %else - RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5 - %endif - %endmacro -%endmacro - -AVX_INSTR addpd, 1, 0, 1 -AVX_INSTR addps, 1, 0, 1 -AVX_INSTR addsd, 1, 0, 1 -AVX_INSTR addss, 1, 0, 1 -AVX_INSTR addsubpd, 1, 0, 0 -AVX_INSTR addsubps, 1, 0, 0 -AVX_INSTR andpd, 1, 0, 1 -AVX_INSTR andps, 1, 0, 1 -AVX_INSTR andnpd, 1, 0, 0 -AVX_INSTR andnps, 1, 0, 0 -AVX_INSTR blendpd, 1, 0, 0 -AVX_INSTR blendps, 1, 0, 0 -AVX_INSTR blendvpd, 1, 0, 0 -AVX_INSTR blendvps, 1, 0, 0 -AVX_INSTR cmppd, 1, 0, 0 -AVX_INSTR cmpps, 1, 0, 0 -AVX_INSTR cmpsd, 1, 0, 0 -AVX_INSTR cmpss, 1, 0, 0 -AVX_INSTR cvtdq2ps, 1, 0, 0 -AVX_INSTR cvtps2dq, 1, 0, 0 -AVX_INSTR divpd, 1, 0, 0 -AVX_INSTR divps, 1, 0, 0 -AVX_INSTR divsd, 1, 0, 0 -AVX_INSTR divss, 1, 0, 0 -AVX_INSTR dppd, 1, 1, 0 -AVX_INSTR dpps, 1, 1, 0 -AVX_INSTR haddpd, 1, 0, 0 -AVX_INSTR haddps, 1, 0, 0 -AVX_INSTR hsubpd, 1, 0, 0 -AVX_INSTR hsubps, 1, 0, 0 -AVX_INSTR maxpd, 1, 0, 1 -AVX_INSTR maxps, 1, 0, 1 -AVX_INSTR maxsd, 1, 0, 1 -AVX_INSTR maxss, 1, 0, 1 -AVX_INSTR minpd, 1, 0, 1 -AVX_INSTR minps, 1, 0, 1 -AVX_INSTR minsd, 1, 0, 1 -AVX_INSTR minss, 1, 0, 1 -AVX_INSTR movhlps, 1, 0, 0 -AVX_INSTR movlhps, 1, 0, 0 -AVX_INSTR movsd, 1, 0, 0 -AVX_INSTR movss, 1, 0, 0 -AVX_INSTR mpsadbw, 0, 1, 0 -AVX_INSTR mulpd, 1, 0, 1 -AVX_INSTR mulps, 1, 0, 1 -AVX_INSTR mulsd, 1, 0, 1 -AVX_INSTR mulss, 1, 0, 1 -AVX_INSTR orpd, 1, 0, 1 -AVX_INSTR orps, 1, 0, 1 -AVX_INSTR pabsb, 0, 0, 0 -AVX_INSTR pabsw, 0, 0, 0 -AVX_INSTR pabsd, 0, 0, 0 -AVX_INSTR packsswb, 0, 0, 0 -AVX_INSTR packssdw, 0, 0, 0 -AVX_INSTR packuswb, 0, 0, 0 -AVX_INSTR packusdw, 0, 0, 0 -AVX_INSTR paddb, 0, 0, 1 -AVX_INSTR paddw, 0, 0, 1 -AVX_INSTR paddd, 0, 0, 1 -AVX_INSTR paddq, 0, 0, 1 -AVX_INSTR paddsb, 0, 0, 1 -AVX_INSTR paddsw, 0, 0, 1 -AVX_INSTR paddusb, 0, 0, 1 -AVX_INSTR paddusw, 0, 0, 1 -AVX_INSTR palignr, 0, 1, 0 -AVX_INSTR pand, 0, 0, 1 -AVX_INSTR pandn, 0, 0, 0 -AVX_INSTR pavgb, 0, 0, 1 -AVX_INSTR pavgw, 0, 0, 1 -AVX_INSTR pblendvb, 0, 0, 0 -AVX_INSTR pblendw, 0, 1, 0 -AVX_INSTR pcmpestri, 0, 0, 0 -AVX_INSTR pcmpestrm, 0, 0, 0 -AVX_INSTR pcmpistri, 0, 0, 0 -AVX_INSTR pcmpistrm, 0, 0, 0 -AVX_INSTR pcmpeqb, 0, 0, 1 -AVX_INSTR pcmpeqw, 0, 0, 1 -AVX_INSTR pcmpeqd, 0, 0, 1 -AVX_INSTR pcmpeqq, 0, 0, 1 -AVX_INSTR pcmpgtb, 0, 0, 0 -AVX_INSTR pcmpgtw, 0, 0, 0 -AVX_INSTR pcmpgtd, 0, 0, 0 -AVX_INSTR pcmpgtq, 0, 0, 0 -AVX_INSTR phaddw, 0, 0, 0 -AVX_INSTR phaddd, 0, 0, 0 -AVX_INSTR phaddsw, 0, 0, 0 -AVX_INSTR phsubw, 0, 0, 0 -AVX_INSTR phsubd, 0, 0, 0 -AVX_INSTR phsubsw, 0, 0, 0 -AVX_INSTR pmaddwd, 0, 0, 1 -AVX_INSTR pmaddubsw, 0, 0, 0 -AVX_INSTR pmaxsb, 0, 0, 1 -AVX_INSTR pmaxsw, 0, 0, 1 -AVX_INSTR pmaxsd, 0, 0, 1 -AVX_INSTR pmaxub, 0, 0, 1 -AVX_INSTR pmaxuw, 0, 0, 1 -AVX_INSTR pmaxud, 0, 0, 1 -AVX_INSTR pminsb, 0, 0, 1 -AVX_INSTR pminsw, 0, 0, 1 -AVX_INSTR pminsd, 0, 0, 1 -AVX_INSTR pminub, 0, 0, 1 -AVX_INSTR pminuw, 0, 0, 1 -AVX_INSTR pminud, 0, 0, 1 -AVX_INSTR pmovmskb, 0, 0, 0 -AVX_INSTR pmulhuw, 0, 0, 1 -AVX_INSTR pmulhrsw, 0, 0, 1 -AVX_INSTR pmulhw, 0, 0, 1 -AVX_INSTR pmullw, 0, 0, 1 -AVX_INSTR pmulld, 0, 0, 1 -AVX_INSTR pmuludq, 0, 0, 1 -AVX_INSTR pmuldq, 0, 0, 1 -AVX_INSTR por, 0, 0, 1 -AVX_INSTR psadbw, 0, 0, 1 -AVX_INSTR pshufb, 0, 0, 0 -AVX_INSTR pshufd, 0, 1, 0 -AVX_INSTR pshufhw, 0, 1, 0 -AVX_INSTR pshuflw, 0, 1, 0 -AVX_INSTR psignb, 0, 0, 0 -AVX_INSTR psignw, 0, 0, 0 -AVX_INSTR psignd, 0, 0, 0 -AVX_INSTR psllw, 0, 0, 0 -AVX_INSTR pslld, 0, 0, 0 -AVX_INSTR psllq, 0, 0, 0 -AVX_INSTR pslldq, 0, 0, 0 -AVX_INSTR psraw, 0, 0, 0 -AVX_INSTR psrad, 0, 0, 0 -AVX_INSTR psrlw, 0, 0, 0 -AVX_INSTR psrld, 0, 0, 0 -AVX_INSTR psrlq, 0, 0, 0 -AVX_INSTR psrldq, 0, 0, 0 -AVX_INSTR psubb, 0, 0, 0 -AVX_INSTR psubw, 0, 0, 0 -AVX_INSTR psubd, 0, 0, 0 -AVX_INSTR psubq, 0, 0, 0 -AVX_INSTR psubsb, 0, 0, 0 -AVX_INSTR psubsw, 0, 0, 0 -AVX_INSTR psubusb, 0, 0, 0 -AVX_INSTR psubusw, 0, 0, 0 -AVX_INSTR ptest, 0, 0, 0 -AVX_INSTR punpckhbw, 0, 0, 0 -AVX_INSTR punpckhwd, 0, 0, 0 -AVX_INSTR punpckhdq, 0, 0, 0 -AVX_INSTR punpckhqdq, 0, 0, 0 -AVX_INSTR punpcklbw, 0, 0, 0 -AVX_INSTR punpcklwd, 0, 0, 0 -AVX_INSTR punpckldq, 0, 0, 0 -AVX_INSTR punpcklqdq, 0, 0, 0 -AVX_INSTR pxor, 0, 0, 1 -AVX_INSTR shufps, 1, 1, 0 -AVX_INSTR subpd, 1, 0, 0 -AVX_INSTR subps, 1, 0, 0 -AVX_INSTR subsd, 1, 0, 0 -AVX_INSTR subss, 1, 0, 0 -AVX_INSTR unpckhpd, 1, 0, 0 -AVX_INSTR unpckhps, 1, 0, 0 -AVX_INSTR unpcklpd, 1, 0, 0 -AVX_INSTR unpcklps, 1, 0, 0 -AVX_INSTR xorpd, 1, 0, 1 -AVX_INSTR xorps, 1, 0, 1 - -; 3DNow instructions, for sharing code between AVX, SSE and 3DN -AVX_INSTR pfadd, 1, 0, 1 -AVX_INSTR pfsub, 1, 0, 0 -AVX_INSTR pfmul, 1, 0, 1 - -; base-4 constants for shuffles -%assign i 0 -%rep 256 - %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3) - %if j < 10 - CAT_XDEFINE q000, j, i - %elif j < 100 - CAT_XDEFINE q00, j, i - %elif j < 1000 - CAT_XDEFINE q0, j, i - %else - CAT_XDEFINE q, j, i - %endif -%assign i i+1 -%endrep -%undef i -%undef j - -%macro FMA_INSTR 3 - %macro %1 4-7 %1, %2, %3 - %if cpuflag(xop) - v%5 %1, %2, %3, %4 - %else - %6 %1, %2, %3 - %7 %1, %4 - %endif - %endmacro -%endmacro - -FMA_INSTR pmacsdd, pmulld, paddd -FMA_INSTR pmacsww, pmullw, paddw -FMA_INSTR pmadcswd, pmaddwd, paddd - -; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf. -; This lets us use tzcnt without bumping the yasm version requirement yet. -%define tzcnt rep bsf diff --git a/libs/libvpx/third_party/x86inc/README.libvpx b/libs/libvpx/third_party/x86inc/README.libvpx index e91e305a28..8d3cd966da 100644 --- a/libs/libvpx/third_party/x86inc/README.libvpx +++ b/libs/libvpx/third_party/x86inc/README.libvpx @@ -1,5 +1,5 @@ -URL: http://git.videolan.org/?p=x264.git -Version: a95584945dd9ce3acc66c6cd8f6796bc4404d40d +URL: https://git.videolan.org/git/x264.git +Version: d23d18655249944c1ca894b451e2c82c7a584c62 License: ISC License File: LICENSE @@ -13,12 +13,8 @@ Prefix functions with vpx by default. Manage name mangling (prefixing with '_') manually because 'PREFIX' does not exist in libvpx. Expand PIC default to macho64 and respect CONFIG_PIC from libvpx -Catch all elf formats for 'hidden' status and SECTION notes. -Avoid 'amdnop' when building with nasm. Set 'private_extern' visibility for macho targets. Copy PIC 'GLOBAL' macros from x86_abi_support.asm Use .text instead of .rodata on macho to avoid broken tables in PIC mode. Use .text with no alignment for aout Only use 'hidden' visibility with Chromium -Move '%use smartalign' for nasm out of 'INIT_CPUFLAGS' and before - 'ALIGNMODE'. diff --git a/libs/libvpx/third_party/x86inc/x86inc.asm b/libs/libvpx/third_party/x86inc/x86inc.asm index e7d3fa5ebb..b647dff2f8 100644 --- a/libs/libvpx/third_party/x86inc/x86inc.asm +++ b/libs/libvpx/third_party/x86inc/x86inc.asm @@ -1,7 +1,7 @@ ;***************************************************************************** ;* x86inc.asm: x264asm abstraction layer ;***************************************************************************** -;* Copyright (C) 2005-2015 x264 project +;* Copyright (C) 2005-2016 x264 project ;* ;* Authors: Loren Merritt ;* Anton Mitrofanov @@ -66,16 +66,35 @@ %endif %endif -%ifidn __OUTPUT_FORMAT__,elf32 - %define mangle(x) x +%define FORMAT_ELF 0 +%ifidn __OUTPUT_FORMAT__,elf + %define FORMAT_ELF 1 +%elifidn __OUTPUT_FORMAT__,elf32 + %define FORMAT_ELF 1 %elifidn __OUTPUT_FORMAT__,elf64 - %define mangle(x) x -%elifidn __OUTPUT_FORMAT__,x64 - %define mangle(x) x -%elifidn __OUTPUT_FORMAT__,win64 - %define mangle(x) x + %define FORMAT_ELF 1 +%endif + +%define FORMAT_MACHO 0 +%ifidn __OUTPUT_FORMAT__,macho32 + %define FORMAT_MACHO 1 +%elifidn __OUTPUT_FORMAT__,macho64 + %define FORMAT_MACHO 1 +%endif + +; Set PREFIX for libvpx builds. +%if FORMAT_ELF + %undef PREFIX +%elif WIN64 + %undef PREFIX %else + %define PREFIX +%endif + +%ifdef PREFIX %define mangle(x) _ %+ x +%else + %define mangle(x) x %endif ; In some instances macho32 tables get misaligned when using .rodata. @@ -94,14 +113,6 @@ %endif %endmacro -%macro SECTION_TEXT 0-1 16 - %ifidn __OUTPUT_FORMAT__,aout - SECTION .text - %else - SECTION .text align=%1 - %endif -%endmacro - ; PIC macros are copied from vpx_ports/x86_abi_support.asm. The "define PIC" ; from original code is added in for 64bit. %ifidn __OUTPUT_FORMAT__,elf32 @@ -188,8 +199,16 @@ %ifdef PIC default rel %endif + +%ifndef GET_GOT_DEFINED + %define GET_GOT_DEFINED 0 +%endif ; Done with PIC macros +%ifdef __NASM_VER__ + %use smartalign +%endif + ; Macros to eliminate most code duplication between x86_32 and x86_64: ; Currently this works only for leaf functions which load all their arguments ; into registers at the start, and make no other use of the stack. Luckily that @@ -237,6 +256,7 @@ %define r%1w %2w %define r%1b %2b %define r%1h %2h + %define %2q %2 %if %0 == 2 %define r%1m %2d %define r%1mp %2 @@ -261,9 +281,9 @@ %define e%1h %3 %define r%1b %2 %define e%1b %2 -%if ARCH_X86_64 == 0 - %define r%1 e%1 -%endif + %if ARCH_X86_64 == 0 + %define r%1 e%1 + %endif %endmacro DECLARE_REG_SIZE ax, al, ah @@ -373,7 +393,7 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %macro ASSERT 1 %if (%1) == 0 - %error assert failed + %error assertion ``%1'' failed %endif %endmacro @@ -464,8 +484,10 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT %if %1 > 0 %assign regs_used (regs_used + 1) - %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2 - %warning "Stack pointer will overwrite register argument" + %endif + %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3 + ; Ensure that we don't clobber any registers containing arguments + %assign regs_used 5 + UNIX64 * 3 %endif %endif %endif @@ -579,9 +601,9 @@ DECLARE_REG 14, R15, 120 %macro RET 0 WIN64_RESTORE_XMM_INTERNAL rsp POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro @@ -618,17 +640,17 @@ DECLARE_REG 14, R15, 72 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 %macro RET 0 -%if stack_size_padded > 0 -%if required_stack_alignment > STACK_ALIGNMENT - mov rsp, rstkm -%else - add rsp, stack_size_padded -%endif -%endif + %if stack_size_padded > 0 + %if required_stack_alignment > STACK_ALIGNMENT + mov rsp, rstkm + %else + add rsp, stack_size_padded + %endif + %endif POP_IF_USED 14, 13, 12, 11, 10, 9 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro @@ -674,29 +696,29 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 %macro RET 0 -%if stack_size_padded > 0 -%if required_stack_alignment > STACK_ALIGNMENT - mov rsp, rstkm -%else - add rsp, stack_size_padded -%endif -%endif + %if stack_size_padded > 0 + %if required_stack_alignment > STACK_ALIGNMENT + mov rsp, rstkm + %else + add rsp, stack_size_padded + %endif + %endif POP_IF_USED 6, 5, 4, 3 -%if mmsize == 32 - vzeroupper -%endif + %if mmsize == 32 + vzeroupper + %endif AUTO_REP_RET %endmacro %endif ;====================================================================== %if WIN64 == 0 -%macro WIN64_SPILL_XMM 1 -%endmacro -%macro WIN64_RESTORE_XMM 1 -%endmacro -%macro WIN64_PUSH_XMM 0 -%endmacro + %macro WIN64_SPILL_XMM 1 + %endmacro + %macro WIN64_RESTORE_XMM 1 + %endmacro + %macro WIN64_PUSH_XMM 0 + %endmacro %endif ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either @@ -709,24 +731,26 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 %else rep ret %endif + annotate_function_size %endmacro %define last_branch_adr $$ %macro AUTO_REP_RET 0 - %ifndef cpuflags - times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr. - %elif notcpuflag(ssse3) - times ((last_branch_adr-$)>>31)+1 rep + %if notcpuflag(ssse3) + times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr. %endif ret + annotate_function_size %endmacro %macro BRANCH_INSTR 0-* %rep %0 %macro %1 1-2 %1 %2 %1 - %%branch_instr: - %xdefine last_branch_adr %%branch_instr + %if notcpuflag(ssse3) + %%branch_instr equ $ + %xdefine last_branch_adr %%branch_instr + %endif %endmacro %rotate 1 %endrep @@ -741,6 +765,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %elif %2 jmp %1 %endif + annotate_function_size %endmacro ;============================================================================= @@ -762,6 +787,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, cglobal_internal 0, %1 %+ SUFFIX, %2 %endmacro %macro cglobal_internal 2-3+ + annotate_function_size %if %1 %xdefine %%FUNCTION_PREFIX private_prefix ; libvpx explicitly sets visibility in shared object builds. Avoid @@ -782,17 +808,10 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, CAT_XDEFINE cglobaled_, %2, 1 %endif %xdefine current_function %2 - %ifidn __OUTPUT_FORMAT__,elf32 + %xdefine current_function_section __SECT__ + %if FORMAT_ELF global %2:function %%VISIBILITY - %elifidn __OUTPUT_FORMAT__,elf64 - global %2:function %%VISIBILITY - %elifidn __OUTPUT_FORMAT__,macho32 - %ifdef __NASM_VER__ - global %2 - %else - global %2:private_extern - %endif - %elifidn __OUTPUT_FORMAT__,macho64 + %elif FORMAT_MACHO %ifdef __NASM_VER__ global %2 %else @@ -822,16 +841,16 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, ; like cextern, but without the prefix %macro cextern_naked 1 - %xdefine %1 mangle(%1) + %ifdef PREFIX + %xdefine %1 mangle(%1) + %endif CAT_XDEFINE cglobaled_, %1, 1 extern %1 %endmacro %macro const 1-2+ %xdefine %1 mangle(private_prefix %+ _ %+ %1) - %ifidn __OUTPUT_FORMAT__,elf32 - global %1:data hidden - %elifidn __OUTPUT_FORMAT__,elf64 + %if FORMAT_ELF global %1:data hidden %else global %1 @@ -839,14 +858,29 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, %1: %2 %endmacro -; This is needed for ELF, otherwise the GNU linker assumes the stack is -; executable by default. -%ifidn __OUTPUT_FORMAT__,elf32 -SECTION .note.GNU-stack noalloc noexec nowrite progbits -%elifidn __OUTPUT_FORMAT__,elf64 -SECTION .note.GNU-stack noalloc noexec nowrite progbits +; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default. +%if FORMAT_ELF + [SECTION .note.GNU-stack noalloc noexec nowrite progbits] %endif +; Tell debuggers how large the function was. +; This may be invoked multiple times per function; we rely on later instances overriding earlier ones. +; This is invoked by RET and similar macros, and also cglobal does it for the previous function, +; but if the last function in a source file doesn't use any of the standard macros for its epilogue, +; then its size might be unspecified. +%macro annotate_function_size 0 + %ifdef __YASM_VER__ + %ifdef current_function + %if FORMAT_ELF + current_function_section + %%ecf equ $ + size current_function %%ecf - current_function + __SECT__ + %endif + %endif + %endif +%endmacro + ; cpuflags %assign cpuflags_mmx (1<<0) @@ -875,12 +909,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt %assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1 -%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) -%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) - -%ifdef __NASM_VER__ - %use smartalign -%endif +; Returns a boolean value expressing whether or not the specified cpuflag is enabled. +%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1) +%define notcpuflag(x) (cpuflag(x) ^ 1) ; Takes an arbitrary number of cpuflags from the above list. ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. @@ -917,12 +948,18 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %endif %endif - %ifdef __NASM_VER__ - ALIGNMODE k7 - %elif ARCH_X86_64 || cpuflag(sse2) - CPU amdnop + %if ARCH_X86_64 || cpuflag(sse2) + %ifdef __NASM_VER__ + ALIGNMODE k8 + %else + CPU amdnop + %endif %else - CPU basicnop + %ifdef __NASM_VER__ + ALIGNMODE nop + %else + CPU basicnop + %endif %endif %endmacro @@ -951,14 +988,14 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %define movnta movntq %assign %%i 0 %rep 8 - CAT_XDEFINE m, %%i, mm %+ %%i - CAT_XDEFINE nnmm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, mm %+ %%i + CAT_XDEFINE nnmm, %%i, %%i + %assign %%i %%i+1 %endrep %rep 8 - CAT_UNDEF m, %%i - CAT_UNDEF nnmm, %%i - %assign %%i %%i+1 + CAT_UNDEF m, %%i + CAT_UNDEF nnmm, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -969,7 +1006,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %define mmsize 16 %define num_mmregs 8 %if ARCH_X86_64 - %define num_mmregs 16 + %define num_mmregs 16 %endif %define mova movdqa %define movu movdqu @@ -977,9 +1014,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %define movnta movntdq %assign %%i 0 %rep num_mmregs - CAT_XDEFINE m, %%i, xmm %+ %%i - CAT_XDEFINE nnxmm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, xmm %+ %%i + CAT_XDEFINE nnxmm, %%i, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -990,7 +1027,7 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %define mmsize 32 %define num_mmregs 8 %if ARCH_X86_64 - %define num_mmregs 16 + %define num_mmregs 16 %endif %define mova movdqa %define movu movdqu @@ -998,9 +1035,9 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits %define movnta movntdq %assign %%i 0 %rep num_mmregs - CAT_XDEFINE m, %%i, ymm %+ %%i - CAT_XDEFINE nnymm, %%i, %%i - %assign %%i %%i+1 + CAT_XDEFINE m, %%i, ymm %+ %%i + CAT_XDEFINE nnymm, %%i, %%i + %assign %%i %%i+1 %endrep INIT_CPUFLAGS %1 %endmacro @@ -1024,7 +1061,7 @@ INIT_XMM %assign i 0 %rep 16 DECLARE_MMCAST i -%assign i i+1 + %assign i i+1 %endrep ; I often want to use macros that permute their arguments. e.g. there's no @@ -1042,23 +1079,23 @@ INIT_XMM ; doesn't cost any cycles. %macro PERMUTE 2-* ; takes a list of pairs to swap -%rep %0/2 - %xdefine %%tmp%2 m%2 - %rotate 2 -%endrep -%rep %0/2 - %xdefine m%1 %%tmp%2 - CAT_XDEFINE nn, m%1, %1 - %rotate 2 -%endrep + %rep %0/2 + %xdefine %%tmp%2 m%2 + %rotate 2 + %endrep + %rep %0/2 + %xdefine m%1 %%tmp%2 + CAT_XDEFINE nn, m%1, %1 + %rotate 2 + %endrep %endmacro %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) -%ifnum %1 ; SWAP 0, 1, ... - SWAP_INTERNAL_NUM %1, %2 -%else ; SWAP m0, m1, ... - SWAP_INTERNAL_NAME %1, %2 -%endif + %ifnum %1 ; SWAP 0, 1, ... + SWAP_INTERNAL_NUM %1, %2 + %else ; SWAP m0, m1, ... + SWAP_INTERNAL_NAME %1, %2 + %endif %endmacro %macro SWAP_INTERNAL_NUM 2-* @@ -1068,7 +1105,7 @@ INIT_XMM %xdefine m%2 %%tmp CAT_XDEFINE nn, m%1, %1 CAT_XDEFINE nn, m%2, %2 - %rotate 1 + %rotate 1 %endrep %endmacro @@ -1076,7 +1113,7 @@ INIT_XMM %xdefine %%args nn %+ %1 %rep %0-1 %xdefine %%args %%args, nn %+ %2 - %rotate 1 + %rotate 1 %endrep SWAP_INTERNAL_NUM %%args %endmacro @@ -1093,7 +1130,7 @@ INIT_XMM %assign %%i 0 %rep num_mmregs CAT_XDEFINE %%f, %%i, m %+ %%i - %assign %%i %%i+1 + %assign %%i %%i+1 %endrep %endmacro @@ -1103,20 +1140,20 @@ INIT_XMM %rep num_mmregs CAT_XDEFINE m, %%i, %1_m %+ %%i CAT_XDEFINE nn, m %+ %%i, %%i - %assign %%i %%i+1 + %assign %%i %%i+1 %endrep %endif %endmacro ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't %macro call 1 - call_internal %1, %1 %+ SUFFIX + call_internal %1 %+ SUFFIX, %1 %endmacro %macro call_internal 2 - %xdefine %%i %1 - %ifndef cglobaled_%1 - %ifdef cglobaled_%2 - %xdefine %%i %2 + %xdefine %%i %2 + %ifndef cglobaled_%2 + %ifdef cglobaled_%1 + %xdefine %%i %1 %endif %endif call %%i @@ -1159,7 +1196,7 @@ INIT_XMM %endif CAT_XDEFINE sizeofxmm, i, 16 CAT_XDEFINE sizeofymm, i, 32 -%assign i i+1 + %assign i i+1 %endrep %undef i @@ -1536,7 +1573,7 @@ AVX_INSTR pfmul, 3dnow, 1, 0, 1 %else CAT_XDEFINE q, j, i %endif -%assign i i+1 + %assign i i+1 %endrep %undef i %undef j @@ -1559,55 +1596,54 @@ FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation FMA_INSTR pmadcswd, pmaddwd, paddd -; convert FMA4 to FMA3 if possible -%macro FMA4_INSTR 4 - %macro %1 4-8 %1, %2, %3, %4 - %if cpuflag(fma4) - v%5 %1, %2, %3, %4 - %elifidn %1, %2 - v%6 %1, %4, %3 ; %1 = %1 * %3 + %4 - %elifidn %1, %3 - v%7 %1, %2, %4 ; %1 = %2 * %1 + %4 - %elifidn %1, %4 - v%8 %1, %2, %3 ; %1 = %2 * %3 + %1 - %else - %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported - %endif - %endmacro +; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax. +; FMA3 is only possible if dst is the same as one of the src registers. +; Either src2 or src3 can be a memory operand. +%macro FMA4_INSTR 2-* + %push fma4_instr + %xdefine %$prefix %1 + %rep %0 - 1 + %macro %$prefix%2 4-6 %$prefix, %2 + %if notcpuflag(fma3) && notcpuflag(fma4) + %error use of ``%5%6'' fma instruction in cpuname function: current_function + %elif cpuflag(fma4) + v%5%6 %1, %2, %3, %4 + %elifidn %1, %2 + ; If %3 or %4 is a memory operand it needs to be encoded as the last operand. + %ifid %3 + v%{5}213%6 %2, %3, %4 + %else + v%{5}132%6 %2, %4, %3 + %endif + %elifidn %1, %3 + v%{5}213%6 %3, %2, %4 + %elifidn %1, %4 + v%{5}231%6 %4, %2, %3 + %else + %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported + %endif + %endmacro + %rotate 1 + %endrep + %pop %endmacro -FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd -FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps -FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd -FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss +FMA4_INSTR fmadd, pd, ps, sd, ss +FMA4_INSTR fmaddsub, pd, ps +FMA4_INSTR fmsub, pd, ps, sd, ss +FMA4_INSTR fmsubadd, pd, ps +FMA4_INSTR fnmadd, pd, ps, sd, ss +FMA4_INSTR fnmsub, pd, ps, sd, ss -FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd -FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps -FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd -FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps - -FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd -FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps -FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd -FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss - -FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd -FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps -FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd -FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss - -FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd -FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps -FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd -FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss - -; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug -%if ARCH_X86_64 == 0 -%macro vpbroadcastq 2 -%if sizeof%1 == 16 - movddup %1, %2 -%else - vbroadcastsd %1, %2 -%endif -%endmacro +; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0) +%ifdef __YASM_VER__ + %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0 + %macro vpbroadcastq 2 + %if sizeof%1 == 16 + movddup %1, %2 + %else + vbroadcastsd %1, %2 + %endif + %endmacro + %endif %endif diff --git a/libs/libvpx/tools/ftfy.sh b/libs/libvpx/tools/ftfy.sh index 29ae95e9ba..c005918fe7 100755 --- a/libs/libvpx/tools/ftfy.sh +++ b/libs/libvpx/tools/ftfy.sh @@ -32,7 +32,7 @@ vpx_style() { for f; do case "$f" in *.h|*.c|*.cc) - "${dirname_self}"/vpx-astyle.sh "$f" + clang-format -i --style=file "$f" ;; esac done @@ -102,9 +102,8 @@ CLEAN_FILES="${CLEAN_FILES} ${ORIG_COMMIT_MSG} ${NEW_COMMIT_MSG}" # Preconditions [ $# -lt 2 ] || usage -# Check that astyle supports pad-header and align-pointer=name -if ! astyle --pad-header --align-pointer=name < /dev/null; then - log "Install astyle v1.24 or newer" +if ! clang-format -version >/dev/null 2>&1; then + log "clang-format not found" exit 1 fi diff --git a/libs/libvpx/tools/vpx-astyle.sh b/libs/libvpx/tools/vpx-astyle.sh deleted file mode 100755 index 6340426bdc..0000000000 --- a/libs/libvpx/tools/vpx-astyle.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh -set -e -astyle --style=java --indent=spaces=2 --indent-switches\ - --min-conditional-indent=0 \ - --pad-oper --pad-header --unpad-paren \ - --align-pointer=name \ - --indent-preprocessor --convert-tabs --indent-labels \ - --suffix=none --quiet --max-instatement-indent=80 "$@" -# Disabled, too greedy? -#sed -i 's;[[:space:]]\{1,\}\[;[;g' "$@" - -sed_i() { - # Incompatible sed parameter parsing. - if sed -i 2>&1 | grep -q 'requires an argument'; then - sed -i '' "$@" - else - sed -i "$@" - fi -} - -sed_i -e 's/[[:space:]]\{1,\}\([,;]\)/\1/g' \ - -e 's/[[:space:]]\{1,\}\([+-]\{2\};\)/\1/g' \ - -e 's/,[[:space:]]*}/}/g' \ - -e 's;//\([^/[:space:]].*$\);// \1;g' \ - -e 's/^\(public\|private\|protected\):$/ \1:/g' \ - -e 's/[[:space:]]\{1,\}$//g' \ - "$@" diff --git a/libs/libvpx/tools_common.c b/libs/libvpx/tools_common.c index 20b259ca94..6f14c25561 100644 --- a/libs/libvpx/tools_common.c +++ b/libs/libvpx/tools_common.c @@ -16,11 +16,11 @@ #include "./tools_common.h" -#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER #include "vpx/vp8cx.h" #endif -#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER || CONFIG_VP10_DECODER +#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER #include "vpx/vp8dx.h" #endif @@ -29,23 +29,22 @@ #include #ifdef __OS2__ -#define _setmode setmode -#define _fileno fileno -#define _O_BINARY O_BINARY +#define _setmode setmode +#define _fileno fileno +#define _O_BINARY O_BINARY #endif #endif -#define LOG_ERROR(label) do {\ - const char *l = label;\ - va_list ap;\ - va_start(ap, fmt);\ - if (l)\ - fprintf(stderr, "%s: ", l);\ - vfprintf(stderr, fmt, ap);\ - fprintf(stderr, "\n");\ - va_end(ap);\ -} while (0) - +#define LOG_ERROR(label) \ + do { \ + const char *l = label; \ + va_list ap; \ + va_start(ap, fmt); \ + if (l) fprintf(stderr, "%s: ", l); \ + vfprintf(stderr, fmt, ap); \ + fprintf(stderr, "\n"); \ + va_end(ap); \ + } while (0) FILE *set_binary_mode(FILE *stream) { (void)stream; @@ -65,16 +64,13 @@ void fatal(const char *fmt, ...) { exit(EXIT_FAILURE); } -void warn(const char *fmt, ...) { - LOG_ERROR("Warning"); -} +void warn(const char *fmt, ...) { LOG_ERROR("Warning"); } void die_codec(vpx_codec_ctx_t *ctx, const char *s) { const char *detail = vpx_codec_error_detail(ctx); printf("%s: %s\n", s, vpx_codec_error(ctx)); - if (detail) - printf(" %s\n", detail); + if (detail) printf(" %s\n", detail); exit(EXIT_FAILURE); } @@ -97,15 +93,16 @@ int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame) { */ switch (plane) { case 1: - ptr = yuv_frame->planes[ - yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V : VPX_PLANE_U]; + ptr = + yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_V + : VPX_PLANE_U]; break; case 2: - ptr = yuv_frame->planes[ - yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U : VPX_PLANE_V]; + ptr = + yuv_frame->planes[yuv_frame->fmt == VPX_IMG_FMT_YV12 ? VPX_PLANE_U + : VPX_PLANE_V]; break; - default: - ptr = yuv_frame->planes[plane]; + default: ptr = yuv_frame->planes[plane]; } for (r = 0; r < h; ++r) { @@ -133,16 +130,12 @@ int read_yuv_frame(struct VpxInputContext *input_ctx, vpx_image_t *yuv_frame) { #if CONFIG_ENCODERS static const VpxInterface vpx_encoders[] = { -#if CONFIG_VP10_ENCODER - {"vp10", VP10_FOURCC, &vpx_codec_vp10_cx}, -#endif - #if CONFIG_VP8_ENCODER - {"vp8", VP8_FOURCC, &vpx_codec_vp8_cx}, + { "vp8", VP8_FOURCC, &vpx_codec_vp8_cx }, #endif #if CONFIG_VP9_ENCODER - {"vp9", VP9_FOURCC, &vpx_codec_vp9_cx}, + { "vp9", VP9_FOURCC, &vpx_codec_vp9_cx }, #endif }; @@ -150,17 +143,14 @@ int get_vpx_encoder_count(void) { return sizeof(vpx_encoders) / sizeof(vpx_encoders[0]); } -const VpxInterface *get_vpx_encoder_by_index(int i) { - return &vpx_encoders[i]; -} +const VpxInterface *get_vpx_encoder_by_index(int i) { return &vpx_encoders[i]; } const VpxInterface *get_vpx_encoder_by_name(const char *name) { int i; for (i = 0; i < get_vpx_encoder_count(); ++i) { const VpxInterface *encoder = get_vpx_encoder_by_index(i); - if (strcmp(encoder->name, name) == 0) - return encoder; + if (strcmp(encoder->name, name) == 0) return encoder; } return NULL; @@ -172,15 +162,11 @@ const VpxInterface *get_vpx_encoder_by_name(const char *name) { static const VpxInterface vpx_decoders[] = { #if CONFIG_VP8_DECODER - {"vp8", VP8_FOURCC, &vpx_codec_vp8_dx}, + { "vp8", VP8_FOURCC, &vpx_codec_vp8_dx }, #endif #if CONFIG_VP9_DECODER - {"vp9", VP9_FOURCC, &vpx_codec_vp9_dx}, -#endif - -#if CONFIG_VP10_DECODER - {"vp10", VP10_FOURCC, &vpx_codec_vp10_dx}, + { "vp9", VP9_FOURCC, &vpx_codec_vp9_dx }, #endif }; @@ -188,17 +174,14 @@ int get_vpx_decoder_count(void) { return sizeof(vpx_decoders) / sizeof(vpx_decoders[0]); } -const VpxInterface *get_vpx_decoder_by_index(int i) { - return &vpx_decoders[i]; -} +const VpxInterface *get_vpx_decoder_by_index(int i) { return &vpx_decoders[i]; } const VpxInterface *get_vpx_decoder_by_name(const char *name) { int i; for (i = 0; i < get_vpx_decoder_count(); ++i) { - const VpxInterface *const decoder = get_vpx_decoder_by_index(i); - if (strcmp(decoder->name, name) == 0) - return decoder; + const VpxInterface *const decoder = get_vpx_decoder_by_index(i); + if (strcmp(decoder->name, name) == 0) return decoder; } return NULL; @@ -209,8 +192,7 @@ const VpxInterface *get_vpx_decoder_by_fourcc(uint32_t fourcc) { for (i = 0; i < get_vpx_decoder_count(); ++i) { const VpxInterface *const decoder = get_vpx_decoder_by_index(i); - if (decoder->fourcc == fourcc) - return decoder; + if (decoder->fourcc == fourcc) return decoder; } return NULL; @@ -228,7 +210,7 @@ int vpx_img_plane_width(const vpx_image_t *img, int plane) { } int vpx_img_plane_height(const vpx_image_t *img, int plane) { - if (plane > 0 && img->y_chroma_shift > 0) + if (plane > 0 && img->y_chroma_shift > 0) return (img->d_h + 1) >> img->y_chroma_shift; else return img->d_h; @@ -241,7 +223,7 @@ void vpx_img_write(const vpx_image_t *img, FILE *file) { const unsigned char *buf = img->planes[plane]; const int stride = img->stride[plane]; const int w = vpx_img_plane_width(img, plane) * - ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); + ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); const int h = vpx_img_plane_height(img, plane); int y; @@ -259,13 +241,12 @@ int vpx_img_read(vpx_image_t *img, FILE *file) { unsigned char *buf = img->planes[plane]; const int stride = img->stride[plane]; const int w = vpx_img_plane_width(img, plane) * - ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); + ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); const int h = vpx_img_plane_height(img, plane); int y; for (y = 0; y < h; ++y) { - if (fread(buf, 1, w, file) != (size_t)w) - return 0; + if (fread(buf, 1, w, file) != (size_t)w) return 0; buf += stride; } } @@ -294,19 +275,16 @@ static void highbd_img_upshift(vpx_image_t *dst, vpx_image_t *src, int plane; if (dst->d_w != src->d_w || dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift || - dst->y_chroma_shift != src->y_chroma_shift || - dst->fmt != src->fmt || input_shift < 0) { + dst->y_chroma_shift != src->y_chroma_shift || dst->fmt != src->fmt || + input_shift < 0) { fatal("Unsupported image conversion"); } switch (src->fmt) { case VPX_IMG_FMT_I42016: case VPX_IMG_FMT_I42216: case VPX_IMG_FMT_I44416: - case VPX_IMG_FMT_I44016: - break; - default: - fatal("Unsupported image conversion"); - break; + case VPX_IMG_FMT_I44016: break; + default: fatal("Unsupported image conversion"); break; } for (plane = 0; plane < 3; plane++) { int w = src->d_w; @@ -321,8 +299,7 @@ static void highbd_img_upshift(vpx_image_t *dst, vpx_image_t *src, (uint16_t *)(src->planes[plane] + y * src->stride[plane]); uint16_t *p_dst = (uint16_t *)(dst->planes[plane] + y * dst->stride[plane]); - for (x = 0; x < w; x++) - *p_dst++ = (*p_src++ << input_shift) + offset; + for (x = 0; x < w; x++) *p_dst++ = (*p_src++ << input_shift) + offset; } } } @@ -335,19 +312,15 @@ static void lowbd_img_upshift(vpx_image_t *dst, vpx_image_t *src, if (dst->d_w != src->d_w || dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift || dst->y_chroma_shift != src->y_chroma_shift || - dst->fmt != src->fmt + VPX_IMG_FMT_HIGHBITDEPTH || - input_shift < 0) { + dst->fmt != src->fmt + VPX_IMG_FMT_HIGHBITDEPTH || input_shift < 0) { fatal("Unsupported image conversion"); } switch (src->fmt) { case VPX_IMG_FMT_I420: case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I444: - case VPX_IMG_FMT_I440: - break; - default: - fatal("Unsupported image conversion"); - break; + case VPX_IMG_FMT_I440: break; + default: fatal("Unsupported image conversion"); break; } for (plane = 0; plane < 3; plane++) { int w = src->d_w; @@ -368,8 +341,7 @@ static void lowbd_img_upshift(vpx_image_t *dst, vpx_image_t *src, } } -void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, - int input_shift) { +void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, int input_shift) { if (src->fmt & VPX_IMG_FMT_HIGHBITDEPTH) { highbd_img_upshift(dst, src, input_shift); } else { @@ -379,9 +351,8 @@ void vpx_img_upshift(vpx_image_t *dst, vpx_image_t *src, void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src) { int plane; - if (dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH != src->fmt || - dst->d_w != src->d_w || dst->d_h != src->d_h || - dst->x_chroma_shift != src->x_chroma_shift || + if (dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH != src->fmt || dst->d_w != src->d_w || + dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift || dst->y_chroma_shift != src->y_chroma_shift) { fatal("Unsupported image conversion"); } @@ -389,11 +360,8 @@ void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src) { case VPX_IMG_FMT_I420: case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I444: - case VPX_IMG_FMT_I440: - break; - default: - fatal("Unsupported image conversion"); - break; + case VPX_IMG_FMT_I440: break; + default: fatal("Unsupported image conversion"); break; } for (plane = 0; plane < 3; plane++) { int w = src->d_w; @@ -419,19 +387,16 @@ static void highbd_img_downshift(vpx_image_t *dst, vpx_image_t *src, int plane; if (dst->d_w != src->d_w || dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift || - dst->y_chroma_shift != src->y_chroma_shift || - dst->fmt != src->fmt || down_shift < 0) { + dst->y_chroma_shift != src->y_chroma_shift || dst->fmt != src->fmt || + down_shift < 0) { fatal("Unsupported image conversion"); } switch (src->fmt) { case VPX_IMG_FMT_I42016: case VPX_IMG_FMT_I42216: case VPX_IMG_FMT_I44416: - case VPX_IMG_FMT_I44016: - break; - default: - fatal("Unsupported image conversion"); - break; + case VPX_IMG_FMT_I44016: break; + default: fatal("Unsupported image conversion"); break; } for (plane = 0; plane < 3; plane++) { int w = src->d_w; @@ -446,8 +411,7 @@ static void highbd_img_downshift(vpx_image_t *dst, vpx_image_t *src, (uint16_t *)(src->planes[plane] + y * src->stride[plane]); uint16_t *p_dst = (uint16_t *)(dst->planes[plane] + y * dst->stride[plane]); - for (x = 0; x < w; x++) - *p_dst++ = *p_src++ >> down_shift; + for (x = 0; x < w; x++) *p_dst++ = *p_src++ >> down_shift; } } } @@ -458,19 +422,15 @@ static void lowbd_img_downshift(vpx_image_t *dst, vpx_image_t *src, if (dst->d_w != src->d_w || dst->d_h != src->d_h || dst->x_chroma_shift != src->x_chroma_shift || dst->y_chroma_shift != src->y_chroma_shift || - src->fmt != dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH || - down_shift < 0) { + src->fmt != dst->fmt + VPX_IMG_FMT_HIGHBITDEPTH || down_shift < 0) { fatal("Unsupported image conversion"); } switch (dst->fmt) { case VPX_IMG_FMT_I420: case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I444: - case VPX_IMG_FMT_I440: - break; - default: - fatal("Unsupported image conversion"); - break; + case VPX_IMG_FMT_I440: break; + default: fatal("Unsupported image conversion"); break; } for (plane = 0; plane < 3; plane++) { int w = src->d_w; @@ -491,8 +451,7 @@ static void lowbd_img_downshift(vpx_image_t *dst, vpx_image_t *src, } } -void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src, - int down_shift) { +void vpx_img_downshift(vpx_image_t *dst, vpx_image_t *src, int down_shift) { if (dst->fmt & VPX_IMG_FMT_HIGHBITDEPTH) { highbd_img_downshift(dst, src, down_shift); } else { diff --git a/libs/libvpx/tools_common.h b/libs/libvpx/tools_common.h index 98347b6f27..73ba1bc03b 100644 --- a/libs/libvpx/tools_common.h +++ b/libs/libvpx/tools_common.h @@ -30,24 +30,24 @@ /* MinGW uses f{seek,tell}o64 for large files. */ #define fseeko fseeko64 #define ftello ftello64 -#endif /* _WIN32 */ +#endif /* _WIN32 */ #if CONFIG_OS_SUPPORT #if defined(_MSC_VER) -#include /* NOLINT */ -#define isatty _isatty -#define fileno _fileno +#include /* NOLINT */ +#define isatty _isatty +#define fileno _fileno #else -#include /* NOLINT */ -#endif /* _MSC_VER */ -#endif /* CONFIG_OS_SUPPORT */ +#include /* NOLINT */ +#endif /* _MSC_VER */ +#endif /* CONFIG_OS_SUPPORT */ /* Use 32-bit file operations in WebM file format when building ARM * executables (.axf) with RVCT. */ #if !CONFIG_OS_SUPPORT #define fseeko fseek #define ftello ftell -#endif /* CONFIG_OS_SUPPORT */ +#endif /* CONFIG_OS_SUPPORT */ #define LITERALU64(hi, lo) ((((uint64_t)hi) << 32) | lo) @@ -55,14 +55,13 @@ #define PATH_MAX 512 #endif -#define IVF_FRAME_HDR_SZ (4 + 8) /* 4 byte size + 8 byte timestamp */ +#define IVF_FRAME_HDR_SZ (4 + 8) /* 4 byte size + 8 byte timestamp */ #define IVF_FILE_HDR_SZ 32 #define RAW_FRAME_HDR_SZ sizeof(uint32_t) #define VP8_FOURCC 0x30385056 #define VP9_FOURCC 0x30395056 -#define VP10_FOURCC 0x303a5056 enum VideoFileType { FILE_TYPE_RAW, @@ -158,7 +157,7 @@ void vpx_img_truncate_16_to_8(vpx_image_t *dst, vpx_image_t *src); #endif #ifdef __cplusplus -} /* extern "C" */ +} /* extern "C" */ #endif #endif // TOOLS_COMMON_H_ diff --git a/libs/libvpx/video_reader.c b/libs/libvpx/video_reader.c index 39c7edba1e..a0ba2521c6 100644 --- a/libs/libvpx/video_reader.c +++ b/libs/libvpx/video_reader.c @@ -30,21 +30,17 @@ VpxVideoReader *vpx_video_reader_open(const char *filename) { char header[32]; VpxVideoReader *reader = NULL; FILE *const file = fopen(filename, "rb"); - if (!file) - return NULL; // Can't open file + if (!file) return NULL; // Can't open file - if (fread(header, 1, 32, file) != 32) - return NULL; // Can't read file header + if (fread(header, 1, 32, file) != 32) return NULL; // Can't read file header if (memcmp(kIVFSignature, header, 4) != 0) return NULL; // Wrong IVF signature - if (mem_get_le16(header + 4) != 0) - return NULL; // Wrong IVF version + if (mem_get_le16(header + 4) != 0) return NULL; // Wrong IVF version reader = calloc(1, sizeof(*reader)); - if (!reader) - return NULL; // Can't allocate VpxVideoReader + if (!reader) return NULL; // Can't allocate VpxVideoReader reader->file = file; reader->info.codec_fourcc = mem_get_le32(header + 8); @@ -71,8 +67,7 @@ int vpx_video_reader_read_frame(VpxVideoReader *reader) { const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, size_t *size) { - if (size) - *size = reader->frame_size; + if (size) *size = reader->frame_size; return reader->buffer; } @@ -80,4 +75,3 @@ const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader) { return &reader->info; } - diff --git a/libs/libvpx/video_reader.h b/libs/libvpx/video_reader.h index a62c6d7109..73c25b00a7 100644 --- a/libs/libvpx/video_reader.h +++ b/libs/libvpx/video_reader.h @@ -39,8 +39,7 @@ int vpx_video_reader_read_frame(VpxVideoReader *reader); // Returns the pointer to memory buffer with frame data read by last call to // vpx_video_reader_read_frame(). -const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, - size_t *size); +const uint8_t *vpx_video_reader_get_frame(VpxVideoReader *reader, size_t *size); // Fills VpxVideoInfo with information from opened video file. const VpxVideoInfo *vpx_video_reader_get_info(VpxVideoReader *reader); diff --git a/libs/libvpx/video_writer.c b/libs/libvpx/video_writer.c index 3695236bfa..56d428b072 100644 --- a/libs/libvpx/video_writer.c +++ b/libs/libvpx/video_writer.c @@ -37,12 +37,10 @@ VpxVideoWriter *vpx_video_writer_open(const char *filename, if (container == kContainerIVF) { VpxVideoWriter *writer = NULL; FILE *const file = fopen(filename, "wb"); - if (!file) - return NULL; + if (!file) return NULL; writer = malloc(sizeof(*writer)); - if (!writer) - return NULL; + if (!writer) return NULL; writer->frame_count = 0; writer->info = *info; @@ -67,12 +65,10 @@ void vpx_video_writer_close(VpxVideoWriter *writer) { } } -int vpx_video_writer_write_frame(VpxVideoWriter *writer, - const uint8_t *buffer, size_t size, - int64_t pts) { +int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer, + size_t size, int64_t pts) { ivf_write_frame_header(writer->file, pts, size); - if (fwrite(buffer, 1, size, writer->file) != size) - return 0; + if (fwrite(buffer, 1, size, writer->file) != size) return 0; ++writer->frame_count; diff --git a/libs/libvpx/video_writer.h b/libs/libvpx/video_writer.h index 5dbfe52ea0..a769811c44 100644 --- a/libs/libvpx/video_writer.h +++ b/libs/libvpx/video_writer.h @@ -13,9 +13,7 @@ #include "./video_common.h" -typedef enum { - kContainerIVF -} VpxContainer; +typedef enum { kContainerIVF } VpxContainer; struct VpxVideoWriterStruct; typedef struct VpxVideoWriterStruct VpxVideoWriter; @@ -36,9 +34,8 @@ VpxVideoWriter *vpx_video_writer_open(const char *filename, void vpx_video_writer_close(VpxVideoWriter *writer); // Writes frame bytes to the file. -int vpx_video_writer_write_frame(VpxVideoWriter *writer, - const uint8_t *buffer, size_t size, - int64_t pts); +int vpx_video_writer_write_frame(VpxVideoWriter *writer, const uint8_t *buffer, + size_t size, int64_t pts); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp10/common/alloccommon.c b/libs/libvpx/vp10/common/alloccommon.c deleted file mode 100644 index 9ca86e5e58..0000000000 --- a/libs/libvpx/vp10/common/alloccommon.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "vpx_mem/vpx_mem.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/blockd.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/entropymv.h" -#include "vp10/common/onyxc_int.h" - -void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) { - const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2); - const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2); - - cm->mi_cols = aligned_width >> MI_SIZE_LOG2; - cm->mi_rows = aligned_height >> MI_SIZE_LOG2; - cm->mi_stride = calc_mi_size(cm->mi_cols); - - cm->mb_cols = (cm->mi_cols + 1) >> 1; - cm->mb_rows = (cm->mi_rows + 1) >> 1; - cm->MBs = cm->mb_rows * cm->mb_cols; -} - -static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) { - int i; - - for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) { - cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1); - if (cm->seg_map_array[i] == NULL) - return 1; - } - cm->seg_map_alloc_size = seg_map_size; - - // Init the index. - cm->seg_map_idx = 0; - cm->prev_seg_map_idx = 1; - - cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx]; - if (!cm->frame_parallel_decode) - cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx]; - - return 0; -} - -static void free_seg_map(VP10_COMMON *cm) { - int i; - - for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) { - vpx_free(cm->seg_map_array[i]); - cm->seg_map_array[i] = NULL; - } - - cm->current_frame_seg_map = NULL; - - if (!cm->frame_parallel_decode) { - cm->last_frame_seg_map = NULL; - } -} - -void vp10_free_ref_frame_buffers(BufferPool *pool) { - int i; - - for (i = 0; i < FRAME_BUFFERS; ++i) { - if (pool->frame_bufs[i].ref_count > 0 && - pool->frame_bufs[i].raw_frame_buffer.data != NULL) { - pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer); - pool->frame_bufs[i].ref_count = 0; - } - vpx_free(pool->frame_bufs[i].mvs); - pool->frame_bufs[i].mvs = NULL; - vpx_free_frame_buffer(&pool->frame_bufs[i].buf); - } -} - -void vp10_free_postproc_buffers(VP10_COMMON *cm) { -#if CONFIG_VP9_POSTPROC - vpx_free_frame_buffer(&cm->post_proc_buffer); - vpx_free_frame_buffer(&cm->post_proc_buffer_int); -#else - (void)cm; -#endif -} - -void vp10_free_context_buffers(VP10_COMMON *cm) { - cm->free_mi(cm); - free_seg_map(cm); - vpx_free(cm->above_context); - cm->above_context = NULL; - vpx_free(cm->above_seg_context); - cm->above_seg_context = NULL; -} - -int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) { - int new_mi_size; - - vp10_set_mb_mi(cm, width, height); - new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows); - if (cm->mi_alloc_size < new_mi_size) { - cm->free_mi(cm); - if (cm->alloc_mi(cm, new_mi_size)) - goto fail; - } - - if (cm->seg_map_alloc_size < cm->mi_rows * cm->mi_cols) { - // Create the segmentation map structure and set to 0. - free_seg_map(cm); - if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols)) - goto fail; - } - - if (cm->above_context_alloc_cols < cm->mi_cols) { - vpx_free(cm->above_context); - cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc( - 2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE, - sizeof(*cm->above_context)); - if (!cm->above_context) goto fail; - - vpx_free(cm->above_seg_context); - cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc( - mi_cols_aligned_to_sb(cm->mi_cols), sizeof(*cm->above_seg_context)); - if (!cm->above_seg_context) goto fail; - cm->above_context_alloc_cols = cm->mi_cols; - } - - return 0; - - fail: - vp10_free_context_buffers(cm); - return 1; -} - -void vp10_remove_common(VP10_COMMON *cm) { - vp10_free_context_buffers(cm); - - vpx_free(cm->fc); - cm->fc = NULL; - vpx_free(cm->frame_contexts); - cm->frame_contexts = NULL; -} - -void vp10_init_context_buffers(VP10_COMMON *cm) { - cm->setup_mi(cm); - if (cm->last_frame_seg_map && !cm->frame_parallel_decode) - memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols); -} - -void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) { - // Swap indices. - const int tmp = cm->seg_map_idx; - cm->seg_map_idx = cm->prev_seg_map_idx; - cm->prev_seg_map_idx = tmp; - - cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx]; - cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx]; -} diff --git a/libs/libvpx/vp10/common/alloccommon.h b/libs/libvpx/vp10/common/alloccommon.h deleted file mode 100644 index 5cfe6602d3..0000000000 --- a/libs/libvpx/vp10/common/alloccommon.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_COMMON_ALLOCCOMMON_H_ -#define VP10_COMMON_ALLOCCOMMON_H_ - -#define INVALID_IDX -1 // Invalid buffer index. - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Common; -struct BufferPool; - -void vp10_remove_common(struct VP10Common *cm); - -int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height); -void vp10_init_context_buffers(struct VP10Common *cm); -void vp10_free_context_buffers(struct VP10Common *cm); - -void vp10_free_ref_frame_buffers(struct BufferPool *pool); -void vp10_free_postproc_buffers(struct VP10Common *cm); - -int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height); -void vp10_free_state_buffers(struct VP10Common *cm); - -void vp10_set_mb_mi(struct VP10Common *cm, int width, int height); - -void vp10_swap_current_and_last_seg_map(struct VP10Common *cm); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ALLOCCOMMON_H_ diff --git a/libs/libvpx/vp10/common/arm/neon/iht4x4_add_neon.c b/libs/libvpx/vp10/common/arm/neon/iht4x4_add_neon.c deleted file mode 100644 index bd3e8b30f4..0000000000 --- a/libs/libvpx/vp10/common/arm/neon/iht4x4_add_neon.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "vp10/common/common.h" - -static int16_t sinpi_1_9 = 0x14a3; -static int16_t sinpi_2_9 = 0x26c9; -static int16_t sinpi_3_9 = 0x3441; -static int16_t sinpi_4_9 = 0x3b6c; -static int16_t cospi_8_64 = 0x3b21; -static int16_t cospi_16_64 = 0x2d41; -static int16_t cospi_24_64 = 0x187e; - -static INLINE void TRANSPOSE4X4( - int16x8_t *q8s16, - int16x8_t *q9s16) { - int32x4_t q8s32, q9s32; - int16x4x2_t d0x2s16, d1x2s16; - int32x4x2_t q0x2s32; - - d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16)); - d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16)); - - q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1])); - q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1])); - q0x2s32 = vtrnq_s32(q8s32, q9s32); - - *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]); - *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]); - return; -} - -static INLINE void GENERATE_COSINE_CONSTANTS( - int16x4_t *d0s16, - int16x4_t *d1s16, - int16x4_t *d2s16) { - *d0s16 = vdup_n_s16(cospi_8_64); - *d1s16 = vdup_n_s16(cospi_16_64); - *d2s16 = vdup_n_s16(cospi_24_64); - return; -} - -static INLINE void GENERATE_SINE_CONSTANTS( - int16x4_t *d3s16, - int16x4_t *d4s16, - int16x4_t *d5s16, - int16x8_t *q3s16) { - *d3s16 = vdup_n_s16(sinpi_1_9); - *d4s16 = vdup_n_s16(sinpi_2_9); - *q3s16 = vdupq_n_s16(sinpi_3_9); - *d5s16 = vdup_n_s16(sinpi_4_9); - return; -} - -static INLINE void IDCT4x4_1D( - int16x4_t *d0s16, - int16x4_t *d1s16, - int16x4_t *d2s16, - int16x8_t *q8s16, - int16x8_t *q9s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16; - int16x4_t d26s16, d27s16, d28s16, d29s16; - int32x4_t q10s32, q13s32, q14s32, q15s32; - int16x8_t q13s16, q14s16; - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - - d23s16 = vadd_s16(d16s16, d18s16); - d24s16 = vsub_s16(d16s16, d18s16); - - q15s32 = vmull_s16(d17s16, *d2s16); - q10s32 = vmull_s16(d17s16, *d0s16); - q13s32 = vmull_s16(d23s16, *d1s16); - q14s32 = vmull_s16(d24s16, *d1s16); - q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16); - q10s32 = vmlal_s16(q10s32, d19s16, *d2s16); - - d26s16 = vqrshrn_n_s32(q13s32, 14); - d27s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d28s16 = vqrshrn_n_s32(q10s32, 14); - - q13s16 = vcombine_s16(d26s16, d27s16); - q14s16 = vcombine_s16(d28s16, d29s16); - *q8s16 = vaddq_s16(q13s16, q14s16); - *q9s16 = vsubq_s16(q13s16, q14s16); - *q9s16 = vcombine_s16(vget_high_s16(*q9s16), - vget_low_s16(*q9s16)); // vswp - return; -} - -static INLINE void IADST4x4_1D( - int16x4_t *d3s16, - int16x4_t *d4s16, - int16x4_t *d5s16, - int16x8_t *q3s16, - int16x8_t *q8s16, - int16x8_t *q9s16) { - int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16; - int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; - - d6s16 = vget_low_s16(*q3s16); - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - - q10s32 = vmull_s16(*d3s16, d16s16); - q11s32 = vmull_s16(*d4s16, d16s16); - q12s32 = vmull_s16(d6s16, d17s16); - q13s32 = vmull_s16(*d5s16, d18s16); - q14s32 = vmull_s16(*d3s16, d18s16); - q15s32 = vmovl_s16(d16s16); - q15s32 = vaddw_s16(q15s32, d19s16); - q8s32 = vmull_s16(*d4s16, d19s16); - q15s32 = vsubw_s16(q15s32, d18s16); - q9s32 = vmull_s16(*d5s16, d19s16); - - q10s32 = vaddq_s32(q10s32, q13s32); - q10s32 = vaddq_s32(q10s32, q8s32); - q11s32 = vsubq_s32(q11s32, q14s32); - q8s32 = vdupq_n_s32(sinpi_3_9); - q11s32 = vsubq_s32(q11s32, q9s32); - q15s32 = vmulq_s32(q15s32, q8s32); - - q13s32 = vaddq_s32(q10s32, q12s32); - q10s32 = vaddq_s32(q10s32, q11s32); - q14s32 = vaddq_s32(q11s32, q12s32); - q10s32 = vsubq_s32(q10s32, q12s32); - - d16s16 = vqrshrn_n_s32(q13s32, 14); - d17s16 = vqrshrn_n_s32(q14s32, 14); - d18s16 = vqrshrn_n_s32(q15s32, 14); - d19s16 = vqrshrn_n_s32(q10s32, 14); - - *q8s16 = vcombine_s16(d16s16, d17s16); - *q9s16 = vcombine_s16(d18s16, d19s16); - return; -} - -void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, - int dest_stride, int tx_type) { - uint8x8_t d26u8, d27u8; - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16; - uint32x2_t d26u32, d27u32; - int16x8_t q3s16, q8s16, q9s16; - uint16x8_t q8u16, q9u16; - - d26u32 = d27u32 = vdup_n_u32(0); - - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); - - TRANSPOSE4X4(&q8s16, &q9s16); - - switch (tx_type) { - case 0: // idct_idct is not supported. Fall back to C - vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type); - return; - break; - case 1: // iadst_idct - // generate constants - GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - - // first transform rows - IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); - - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); - - // then transform columns - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - break; - case 2: // idct_iadst - // generate constantsyy - GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - - // first transform rows - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); - - // then transform columns - IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); - break; - case 3: // iadst_iadst - // generate constants - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - - // first transform rows - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); - - // then transform columns - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - break; - default: // iadst_idct - assert(0); - break; - } - - q8s16 = vrshrq_n_s16(q8s16, 4); - q9s16 = vrshrq_n_s16(q9s16, 4); - - d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0); - dest += dest_stride; - d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1); - dest += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0); - dest += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1); - - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); - - d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0); - return; -} diff --git a/libs/libvpx/vp10/common/arm/neon/iht8x8_add_neon.c b/libs/libvpx/vp10/common/arm/neon/iht8x8_add_neon.c deleted file mode 100644 index 82d7ccc612..0000000000 --- a/libs/libvpx/vp10/common/arm/neon/iht8x8_add_neon.c +++ /dev/null @@ -1,624 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "vp10/common/common.h" - -static int16_t cospi_2_64 = 16305; -static int16_t cospi_4_64 = 16069; -static int16_t cospi_6_64 = 15679; -static int16_t cospi_8_64 = 15137; -static int16_t cospi_10_64 = 14449; -static int16_t cospi_12_64 = 13623; -static int16_t cospi_14_64 = 12665; -static int16_t cospi_16_64 = 11585; -static int16_t cospi_18_64 = 10394; -static int16_t cospi_20_64 = 9102; -static int16_t cospi_22_64 = 7723; -static int16_t cospi_24_64 = 6270; -static int16_t cospi_26_64 = 4756; -static int16_t cospi_28_64 = 3196; -static int16_t cospi_30_64 = 1606; - -static INLINE void TRANSPOSE8X8( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); - - *q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - *q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - *q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - *q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - *q12s16 = vcombine_s16(d17s16, d25s16); - *q13s16 = vcombine_s16(d19s16, d27s16); - *q14s16 = vcombine_s16(d21s16, d29s16); - *q15s16 = vcombine_s16(d23s16, d31s16); - - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16), - vreinterpretq_s32_s16(*q10s16)); - q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16), - vreinterpretq_s32_s16(*q11s16)); - q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16), - vreinterpretq_s32_s16(*q14s16)); - q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16), - vreinterpretq_s32_s16(*q15s16)); - - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 - - *q8s16 = q0x2s16.val[0]; - *q9s16 = q0x2s16.val[1]; - *q10s16 = q1x2s16.val[0]; - *q11s16 = q1x2s16.val[1]; - *q12s16 = q2x2s16.val[0]; - *q13s16 = q2x2s16.val[1]; - *q14s16 = q3x2s16.val[0]; - *q15s16 = q3x2s16.val[1]; - return; -} - -static INLINE void IDCT8x8_1D( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d0s16, d1s16, d2s16, d3s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; - - d0s16 = vdup_n_s16(cospi_28_64); - d1s16 = vdup_n_s16(cospi_4_64); - d2s16 = vdup_n_s16(cospi_12_64); - d3s16 = vdup_n_s16(cospi_20_64); - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); - - q2s32 = vmull_s16(d18s16, d0s16); - q3s32 = vmull_s16(d19s16, d0s16); - q5s32 = vmull_s16(d26s16, d2s16); - q6s32 = vmull_s16(d27s16, d2s16); - - q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); - q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); - q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); - - d8s16 = vqrshrn_n_s32(q2s32, 14); - d9s16 = vqrshrn_n_s32(q3s32, 14); - d10s16 = vqrshrn_n_s32(q5s32, 14); - d11s16 = vqrshrn_n_s32(q6s32, 14); - q4s16 = vcombine_s16(d8s16, d9s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - q2s32 = vmull_s16(d18s16, d1s16); - q3s32 = vmull_s16(d19s16, d1s16); - q9s32 = vmull_s16(d26s16, d3s16); - q13s32 = vmull_s16(d27s16, d3s16); - - q2s32 = vmlal_s16(q2s32, d30s16, d0s16); - q3s32 = vmlal_s16(q3s32, d31s16, d0s16); - q9s32 = vmlal_s16(q9s32, d22s16, d2s16); - q13s32 = vmlal_s16(q13s32, d23s16, d2s16); - - d14s16 = vqrshrn_n_s32(q2s32, 14); - d15s16 = vqrshrn_n_s32(q3s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q13s32, 14); - q6s16 = vcombine_s16(d12s16, d13s16); - q7s16 = vcombine_s16(d14s16, d15s16); - - d0s16 = vdup_n_s16(cospi_16_64); - - q2s32 = vmull_s16(d16s16, d0s16); - q3s32 = vmull_s16(d17s16, d0s16); - q13s32 = vmull_s16(d16s16, d0s16); - q15s32 = vmull_s16(d17s16, d0s16); - - q2s32 = vmlal_s16(q2s32, d24s16, d0s16); - q3s32 = vmlal_s16(q3s32, d25s16, d0s16); - q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); - q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); - - d0s16 = vdup_n_s16(cospi_24_64); - d1s16 = vdup_n_s16(cospi_8_64); - - d18s16 = vqrshrn_n_s32(q2s32, 14); - d19s16 = vqrshrn_n_s32(q3s32, 14); - d22s16 = vqrshrn_n_s32(q13s32, 14); - d23s16 = vqrshrn_n_s32(q15s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - *q11s16 = vcombine_s16(d22s16, d23s16); - - q2s32 = vmull_s16(d20s16, d0s16); - q3s32 = vmull_s16(d21s16, d0s16); - q8s32 = vmull_s16(d20s16, d1s16); - q12s32 = vmull_s16(d21s16, d1s16); - - q2s32 = vmlsl_s16(q2s32, d28s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d29s16, d1s16); - q8s32 = vmlal_s16(q8s32, d28s16, d0s16); - q12s32 = vmlal_s16(q12s32, d29s16, d0s16); - - d26s16 = vqrshrn_n_s32(q2s32, 14); - d27s16 = vqrshrn_n_s32(q3s32, 14); - d30s16 = vqrshrn_n_s32(q8s32, 14); - d31s16 = vqrshrn_n_s32(q12s32, 14); - *q13s16 = vcombine_s16(d26s16, d27s16); - *q15s16 = vcombine_s16(d30s16, d31s16); - - q0s16 = vaddq_s16(*q9s16, *q15s16); - q1s16 = vaddq_s16(*q11s16, *q13s16); - q2s16 = vsubq_s16(*q11s16, *q13s16); - q3s16 = vsubq_s16(*q9s16, *q15s16); - - *q13s16 = vsubq_s16(q4s16, q5s16); - q4s16 = vaddq_s16(q4s16, q5s16); - *q14s16 = vsubq_s16(q7s16, q6s16); - q7s16 = vaddq_s16(q7s16, q6s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - - d16s16 = vdup_n_s16(cospi_16_64); - - q9s32 = vmull_s16(d28s16, d16s16); - q10s32 = vmull_s16(d29s16, d16s16); - q11s32 = vmull_s16(d28s16, d16s16); - q12s32 = vmull_s16(d29s16, d16s16); - - q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); - q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); - q11s32 = vmlal_s16(q11s32, d26s16, d16s16); - q12s32 = vmlal_s16(q12s32, d27s16, d16s16); - - d10s16 = vqrshrn_n_s32(q9s32, 14); - d11s16 = vqrshrn_n_s32(q10s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q12s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - *q8s16 = vaddq_s16(q0s16, q7s16); - *q9s16 = vaddq_s16(q1s16, q6s16); - *q10s16 = vaddq_s16(q2s16, q5s16); - *q11s16 = vaddq_s16(q3s16, q4s16); - *q12s16 = vsubq_s16(q3s16, q4s16); - *q13s16 = vsubq_s16(q2s16, q5s16); - *q14s16 = vsubq_s16(q1s16, q6s16); - *q15s16 = vsubq_s16(q0s16, q7s16); - return; -} - -static INLINE void IADST8X8_1D( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q2s16, q4s16, q5s16, q6s16; - int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; - int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); - - d14s16 = vdup_n_s16(cospi_2_64); - d15s16 = vdup_n_s16(cospi_30_64); - - q1s32 = vmull_s16(d30s16, d14s16); - q2s32 = vmull_s16(d31s16, d14s16); - q3s32 = vmull_s16(d30s16, d15s16); - q4s32 = vmull_s16(d31s16, d15s16); - - d30s16 = vdup_n_s16(cospi_18_64); - d31s16 = vdup_n_s16(cospi_14_64); - - q1s32 = vmlal_s16(q1s32, d16s16, d15s16); - q2s32 = vmlal_s16(q2s32, d17s16, d15s16); - q3s32 = vmlsl_s16(q3s32, d16s16, d14s16); - q4s32 = vmlsl_s16(q4s32, d17s16, d14s16); - - q5s32 = vmull_s16(d22s16, d30s16); - q6s32 = vmull_s16(d23s16, d30s16); - q7s32 = vmull_s16(d22s16, d31s16); - q8s32 = vmull_s16(d23s16, d31s16); - - q5s32 = vmlal_s16(q5s32, d24s16, d31s16); - q6s32 = vmlal_s16(q6s32, d25s16, d31s16); - q7s32 = vmlsl_s16(q7s32, d24s16, d30s16); - q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); - - q11s32 = vaddq_s32(q1s32, q5s32); - q12s32 = vaddq_s32(q2s32, q6s32); - q1s32 = vsubq_s32(q1s32, q5s32); - q2s32 = vsubq_s32(q2s32, q6s32); - - d22s16 = vqrshrn_n_s32(q11s32, 14); - d23s16 = vqrshrn_n_s32(q12s32, 14); - *q11s16 = vcombine_s16(d22s16, d23s16); - - q12s32 = vaddq_s32(q3s32, q7s32); - q15s32 = vaddq_s32(q4s32, q8s32); - q3s32 = vsubq_s32(q3s32, q7s32); - q4s32 = vsubq_s32(q4s32, q8s32); - - d2s16 = vqrshrn_n_s32(q1s32, 14); - d3s16 = vqrshrn_n_s32(q2s32, 14); - d24s16 = vqrshrn_n_s32(q12s32, 14); - d25s16 = vqrshrn_n_s32(q15s32, 14); - d6s16 = vqrshrn_n_s32(q3s32, 14); - d7s16 = vqrshrn_n_s32(q4s32, 14); - *q12s16 = vcombine_s16(d24s16, d25s16); - - d0s16 = vdup_n_s16(cospi_10_64); - d1s16 = vdup_n_s16(cospi_22_64); - q4s32 = vmull_s16(d26s16, d0s16); - q5s32 = vmull_s16(d27s16, d0s16); - q2s32 = vmull_s16(d26s16, d1s16); - q6s32 = vmull_s16(d27s16, d1s16); - - d30s16 = vdup_n_s16(cospi_26_64); - d31s16 = vdup_n_s16(cospi_6_64); - - q4s32 = vmlal_s16(q4s32, d20s16, d1s16); - q5s32 = vmlal_s16(q5s32, d21s16, d1s16); - q2s32 = vmlsl_s16(q2s32, d20s16, d0s16); - q6s32 = vmlsl_s16(q6s32, d21s16, d0s16); - - q0s32 = vmull_s16(d18s16, d30s16); - q13s32 = vmull_s16(d19s16, d30s16); - - q0s32 = vmlal_s16(q0s32, d28s16, d31s16); - q13s32 = vmlal_s16(q13s32, d29s16, d31s16); - - q10s32 = vmull_s16(d18s16, d31s16); - q9s32 = vmull_s16(d19s16, d31s16); - - q10s32 = vmlsl_s16(q10s32, d28s16, d30s16); - q9s32 = vmlsl_s16(q9s32, d29s16, d30s16); - - q14s32 = vaddq_s32(q2s32, q10s32); - q15s32 = vaddq_s32(q6s32, q9s32); - q2s32 = vsubq_s32(q2s32, q10s32); - q6s32 = vsubq_s32(q6s32, q9s32); - - d28s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d4s16 = vqrshrn_n_s32(q2s32, 14); - d5s16 = vqrshrn_n_s32(q6s32, 14); - *q14s16 = vcombine_s16(d28s16, d29s16); - - q9s32 = vaddq_s32(q4s32, q0s32); - q10s32 = vaddq_s32(q5s32, q13s32); - q4s32 = vsubq_s32(q4s32, q0s32); - q5s32 = vsubq_s32(q5s32, q13s32); - - d30s16 = vdup_n_s16(cospi_8_64); - d31s16 = vdup_n_s16(cospi_24_64); - - d18s16 = vqrshrn_n_s32(q9s32, 14); - d19s16 = vqrshrn_n_s32(q10s32, 14); - d8s16 = vqrshrn_n_s32(q4s32, 14); - d9s16 = vqrshrn_n_s32(q5s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - - q5s32 = vmull_s16(d2s16, d30s16); - q6s32 = vmull_s16(d3s16, d30s16); - q7s32 = vmull_s16(d2s16, d31s16); - q0s32 = vmull_s16(d3s16, d31s16); - - q5s32 = vmlal_s16(q5s32, d6s16, d31s16); - q6s32 = vmlal_s16(q6s32, d7s16, d31s16); - q7s32 = vmlsl_s16(q7s32, d6s16, d30s16); - q0s32 = vmlsl_s16(q0s32, d7s16, d30s16); - - q1s32 = vmull_s16(d4s16, d30s16); - q3s32 = vmull_s16(d5s16, d30s16); - q10s32 = vmull_s16(d4s16, d31s16); - q2s32 = vmull_s16(d5s16, d31s16); - - q1s32 = vmlsl_s16(q1s32, d8s16, d31s16); - q3s32 = vmlsl_s16(q3s32, d9s16, d31s16); - q10s32 = vmlal_s16(q10s32, d8s16, d30s16); - q2s32 = vmlal_s16(q2s32, d9s16, d30s16); - - *q8s16 = vaddq_s16(*q11s16, *q9s16); - *q11s16 = vsubq_s16(*q11s16, *q9s16); - q4s16 = vaddq_s16(*q12s16, *q14s16); - *q12s16 = vsubq_s16(*q12s16, *q14s16); - - q14s32 = vaddq_s32(q5s32, q1s32); - q15s32 = vaddq_s32(q6s32, q3s32); - q5s32 = vsubq_s32(q5s32, q1s32); - q6s32 = vsubq_s32(q6s32, q3s32); - - d18s16 = vqrshrn_n_s32(q14s32, 14); - d19s16 = vqrshrn_n_s32(q15s32, 14); - d10s16 = vqrshrn_n_s32(q5s32, 14); - d11s16 = vqrshrn_n_s32(q6s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - - q1s32 = vaddq_s32(q7s32, q10s32); - q3s32 = vaddq_s32(q0s32, q2s32); - q7s32 = vsubq_s32(q7s32, q10s32); - q0s32 = vsubq_s32(q0s32, q2s32); - - d28s16 = vqrshrn_n_s32(q1s32, 14); - d29s16 = vqrshrn_n_s32(q3s32, 14); - d14s16 = vqrshrn_n_s32(q7s32, 14); - d15s16 = vqrshrn_n_s32(q0s32, 14); - *q14s16 = vcombine_s16(d28s16, d29s16); - - d30s16 = vdup_n_s16(cospi_16_64); - - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - q2s32 = vmull_s16(d22s16, d30s16); - q3s32 = vmull_s16(d23s16, d30s16); - q13s32 = vmull_s16(d22s16, d30s16); - q1s32 = vmull_s16(d23s16, d30s16); - - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - q2s32 = vmlal_s16(q2s32, d24s16, d30s16); - q3s32 = vmlal_s16(q3s32, d25s16, d30s16); - q13s32 = vmlsl_s16(q13s32, d24s16, d30s16); - q1s32 = vmlsl_s16(q1s32, d25s16, d30s16); - - d4s16 = vqrshrn_n_s32(q2s32, 14); - d5s16 = vqrshrn_n_s32(q3s32, 14); - d24s16 = vqrshrn_n_s32(q13s32, 14); - d25s16 = vqrshrn_n_s32(q1s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - *q12s16 = vcombine_s16(d24s16, d25s16); - - q13s32 = vmull_s16(d10s16, d30s16); - q1s32 = vmull_s16(d11s16, d30s16); - q11s32 = vmull_s16(d10s16, d30s16); - q0s32 = vmull_s16(d11s16, d30s16); - - q13s32 = vmlal_s16(q13s32, d14s16, d30s16); - q1s32 = vmlal_s16(q1s32, d15s16, d30s16); - q11s32 = vmlsl_s16(q11s32, d14s16, d30s16); - q0s32 = vmlsl_s16(q0s32, d15s16, d30s16); - - d20s16 = vqrshrn_n_s32(q13s32, 14); - d21s16 = vqrshrn_n_s32(q1s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q0s32, 14); - *q10s16 = vcombine_s16(d20s16, d21s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - q5s16 = vdupq_n_s16(0); - - *q9s16 = vsubq_s16(q5s16, *q9s16); - *q11s16 = vsubq_s16(q5s16, q2s16); - *q13s16 = vsubq_s16(q5s16, q6s16); - *q15s16 = vsubq_s16(q5s16, q4s16); - return; -} - -void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, - int dest_stride, int tx_type) { - int i; - uint8_t *d1, *d2; - uint8x8_t d0u8, d1u8, d2u8, d3u8; - uint64x1_t d0u64, d1u64, d2u64, d3u64; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - uint16x8_t q8u16, q9u16, q10u16, q11u16; - - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); - q10s16 = vld1q_s16(input + 8 * 2); - q11s16 = vld1q_s16(input + 8 * 3); - q12s16 = vld1q_s16(input + 8 * 4); - q13s16 = vld1q_s16(input + 8 * 5); - q14s16 = vld1q_s16(input + 8 * 6); - q15s16 = vld1q_s16(input + 8 * 7); - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - switch (tx_type) { - case 0: // idct_idct is not supported. Fall back to C - vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type); - return; - break; - case 1: // iadst_idct - // generate IDCT constants - // GENERATE_IDCT_CONSTANTS - - // first transform rows - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // generate IADST constants - // GENERATE_IADST_CONSTANTS - - // then transform columns - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - case 2: // idct_iadst - // generate IADST constants - // GENERATE_IADST_CONSTANTS - - // first transform rows - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // generate IDCT constants - // GENERATE_IDCT_CONSTANTS - - // then transform columns - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - case 3: // iadst_iadst - // generate IADST constants - // GENERATE_IADST_CONSTANTS - - // first transform rows - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // then transform columns - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - default: // iadst_idct - assert(0); - break; - } - - q8s16 = vrshrq_n_s16(q8s16, 5); - q9s16 = vrshrq_n_s16(q9s16, 5); - q10s16 = vrshrq_n_s16(q10s16, 5); - q11s16 = vrshrq_n_s16(q11s16, 5); - q12s16 = vrshrq_n_s16(q12s16, 5); - q13s16 = vrshrq_n_s16(q13s16, 5); - q14s16 = vrshrq_n_s16(q14s16, 5); - q15s16 = vrshrq_n_s16(q15s16, 5); - - for (d1 = d2 = dest, i = 0; i < 2; i++) { - if (i != 0) { - q8s16 = q12s16; - q9s16 = q13s16; - q10s16 = q14s16; - q11s16 = q15s16; - } - - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); - - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - } - return; -} diff --git a/libs/libvpx/vp10/common/blockd.c b/libs/libvpx/vp10/common/blockd.c deleted file mode 100644 index b6f910ff68..0000000000 --- a/libs/libvpx/vp10/common/blockd.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/blockd.h" - -PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *left_mi, int b) { - if (b == 0 || b == 2) { - if (!left_mi || is_inter_block(&left_mi->mbmi)) - return DC_PRED; - - return get_y_mode(left_mi, b + 1); - } else { - assert(b == 1 || b == 3); - return cur_mi->bmi[b - 1].as_mode; - } -} - -PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *above_mi, int b) { - if (b == 0 || b == 1) { - if (!above_mi || is_inter_block(&above_mi->mbmi)) - return DC_PRED; - - return get_y_mode(above_mi, b + 2); - } else { - assert(b == 2 || b == 3); - return cur_mi->bmi[b - 2].as_mode; - } -} - -void vp10_foreach_transformed_block_in_plane( - const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, - foreach_transformed_block_visitor visit, void *arg) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi; - // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") - // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 - // transform size varies per plane, look it up in a common way. - const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) - : mbmi->tx_size; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const int step = 1 << (tx_size << 1); - int i = 0, r, c; - - // If mb_to_right_edge is < 0 we are in a situation in which - // the current block size extends into the UMV and we won't - // visit the sub blocks that are wholly within the UMV. - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : - xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : - xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - const int extra_step = ((num_4x4_w - max_blocks_wide) >> tx_size) * step; - - // Keep track of the row and column of the blocks we use so that we know - // if we are in the unrestricted motion border. - for (r = 0; r < max_blocks_high; r += (1 << tx_size)) { - // Skip visiting the sub blocks that are wholly within the UMV. - for (c = 0; c < max_blocks_wide; c += (1 << tx_size)) { - visit(plane, i, r, c, plane_bsize, tx_size, arg); - i += step; - } - i += extra_step; - } -} - -void vp10_foreach_transformed_block(const MACROBLOCKD* const xd, - BLOCK_SIZE bsize, - foreach_transformed_block_visitor visit, - void *arg) { - int plane; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) - vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg); -} - -void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, - int aoff, int loff) { - ENTROPY_CONTEXT *const a = pd->above_context + aoff; - ENTROPY_CONTEXT *const l = pd->left_context + loff; - const int tx_size_in_blocks = 1 << tx_size; - - // above - if (has_eob && xd->mb_to_right_edge < 0) { - int i; - const int blocks_wide = num_4x4_blocks_wide_lookup[plane_bsize] + - (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - int above_contexts = tx_size_in_blocks; - if (above_contexts + aoff > blocks_wide) - above_contexts = blocks_wide - aoff; - - for (i = 0; i < above_contexts; ++i) - a[i] = has_eob; - for (i = above_contexts; i < tx_size_in_blocks; ++i) - a[i] = 0; - } else { - memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } - - // left - if (has_eob && xd->mb_to_bottom_edge < 0) { - int i; - const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + - (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - int left_contexts = tx_size_in_blocks; - if (left_contexts + loff > blocks_high) - left_contexts = blocks_high - loff; - - for (i = 0; i < left_contexts; ++i) - l[i] = has_eob; - for (i = left_contexts; i < tx_size_in_blocks; ++i) - l[i] = 0; - } else { - memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } -} - -void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) { - int i; - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].plane_type = i ? PLANE_TYPE_UV : PLANE_TYPE_Y; - xd->plane[i].subsampling_x = i ? ss_x : 0; - xd->plane[i].subsampling_y = i ? ss_y : 0; - } -} diff --git a/libs/libvpx/vp10/common/blockd.h b/libs/libvpx/vp10/common/blockd.h deleted file mode 100644 index fce1767963..0000000000 --- a/libs/libvpx/vp10/common/blockd.h +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_COMMON_BLOCKD_H_ -#define VP10_COMMON_BLOCKD_H_ - -#include "./vpx_config.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_ports/mem.h" -#include "vpx_scale/yv12config.h" - -#include "vp10/common/common_data.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/mv.h" -#include "vp10/common/scale.h" -#include "vp10/common/seg_common.h" -#include "vp10/common/tile_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MAX_MB_PLANE 3 - -typedef enum { - KEY_FRAME = 0, - INTER_FRAME = 1, - FRAME_TYPES, -} FRAME_TYPE; - -static INLINE int is_inter_mode(PREDICTION_MODE mode) { - return mode >= NEARESTMV && mode <= NEWMV; -} - -/* For keyframes, intra block modes are predicted by the (already decoded) - modes for the Y blocks to the left and above us; for interframes, there - is a single probability table. */ - -typedef struct { - PREDICTION_MODE as_mode; - int_mv as_mv[2]; // first, second inter predictor motion vectors -} b_mode_info; - -// Note that the rate-distortion optimization loop, bit-stream writer, and -// decoder implementation modules critically rely on the defined entry values -// specified herein. They should be refactored concurrently. - -#define NONE -1 -#define INTRA_FRAME 0 -#define LAST_FRAME 1 -#define GOLDEN_FRAME 2 -#define ALTREF_FRAME 3 -#define MAX_REF_FRAMES 4 -typedef int8_t MV_REFERENCE_FRAME; - -// This structure now relates to 8x8 block regions. -typedef struct { - // Common for both INTER and INTRA blocks - BLOCK_SIZE sb_type; - PREDICTION_MODE mode; - TX_SIZE tx_size; - int8_t skip; -#if CONFIG_MISC_FIXES - int8_t has_no_coeffs; -#endif - int8_t segment_id; - int8_t seg_id_predicted; // valid only when temporal_update is enabled - - // Only for INTRA blocks - PREDICTION_MODE uv_mode; - - // Only for INTER blocks - INTERP_FILTER interp_filter; - MV_REFERENCE_FRAME ref_frame[2]; - TX_TYPE tx_type; - - // TODO(slavarnway): Delete and use bmi[3].as_mv[] instead. - int_mv mv[2]; -} MB_MODE_INFO; - -typedef struct MODE_INFO { - MB_MODE_INFO mbmi; - b_mode_info bmi[4]; -} MODE_INFO; - -static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) { - return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode - : mi->mbmi.mode; -} - -static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) { - return mbmi->ref_frame[0] > INTRA_FRAME; -} - -static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) { - return mbmi->ref_frame[1] > INTRA_FRAME; -} - -PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *left_mi, int b); - -PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi, - const MODE_INFO *above_mi, int b); - -enum mv_precision { - MV_PRECISION_Q3, - MV_PRECISION_Q4 -}; - -struct buf_2d { - uint8_t *buf; - int stride; -}; - -struct macroblockd_plane { - tran_low_t *dqcoeff; - PLANE_TYPE plane_type; - int subsampling_x; - int subsampling_y; - struct buf_2d dst; - struct buf_2d pre[2]; - ENTROPY_CONTEXT *above_context; - ENTROPY_CONTEXT *left_context; - int16_t seg_dequant[MAX_SEGMENTS][2]; - uint8_t *color_index_map; - - // number of 4x4s in current block - uint16_t n4_w, n4_h; - // log2 of n4_w, n4_h - uint8_t n4_wl, n4_hl; - - // encoder - const int16_t *dequant; -}; - -#define BLOCK_OFFSET(x, i) ((x) + (i) * 16) - -typedef struct RefBuffer { - // TODO(dkovalev): idx is not really required and should be removed, now it - // is used in vp10_onyxd_if.c - int idx; - YV12_BUFFER_CONFIG *buf; - struct scale_factors sf; -} RefBuffer; - -typedef struct macroblockd { - struct macroblockd_plane plane[MAX_MB_PLANE]; - uint8_t bmode_blocks_wl; - uint8_t bmode_blocks_hl; - - FRAME_COUNTS *counts; - TileInfo tile; - - int mi_stride; - - MODE_INFO **mi; - MODE_INFO *left_mi; - MODE_INFO *above_mi; - MB_MODE_INFO *left_mbmi; - MB_MODE_INFO *above_mbmi; - - int up_available; - int left_available; - - /* Distance of MB away from frame edges */ - int mb_to_left_edge; - int mb_to_right_edge; - int mb_to_top_edge; - int mb_to_bottom_edge; - - FRAME_CONTEXT *fc; - - /* pointers to reference frames */ - RefBuffer *block_refs[2]; - - /* pointer to current frame */ - const YV12_BUFFER_CONFIG *cur_buf; - - ENTROPY_CONTEXT *above_context[MAX_MB_PLANE]; - ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16]; - - PARTITION_CONTEXT *above_seg_context; - PARTITION_CONTEXT left_seg_context[8]; - -#if CONFIG_VP9_HIGHBITDEPTH - /* Bit depth: 8, 10, 12 */ - int bd; -#endif - - int lossless[MAX_SEGMENTS]; - int corrupted; - - struct vpx_internal_error_info *error_info; -} MACROBLOCKD; - -static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize, - PARTITION_TYPE partition) { - return subsize_lookup[partition][bsize]; -} - -static const TX_TYPE intra_mode_to_tx_type_context[INTRA_MODES] = { - DCT_DCT, // DC - ADST_DCT, // V - DCT_ADST, // H - DCT_DCT, // D45 - ADST_ADST, // D135 - ADST_DCT, // D117 - DCT_ADST, // D153 - DCT_ADST, // D207 - ADST_DCT, // D63 - ADST_ADST, // TM -}; - -static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, const MACROBLOCKD *xd, - int block_idx) { - const MODE_INFO *const mi = xd->mi[0]; - const MB_MODE_INFO *const mbmi = &mi->mbmi; - - (void) block_idx; - if (plane_type != PLANE_TYPE_Y || xd->lossless[mbmi->segment_id] || - mbmi->tx_size >= TX_32X32) - return DCT_DCT; - - return mbmi->tx_type; -} - -void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y); - -static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize, - int xss, int yss) { - if (bsize < BLOCK_8X8) { - return TX_4X4; - } else { - const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss]; - return VPXMIN(y_tx_size, max_txsize_lookup[plane_bsize]); - } -} - -static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi, - const struct macroblockd_plane *pd) { - return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x, - pd->subsampling_y); -} - -static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, - const struct macroblockd_plane *pd) { - return ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; -} - -static INLINE void reset_skip_context(MACROBLOCKD *xd, BLOCK_SIZE bsize) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) { - struct macroblockd_plane *const pd = &xd->plane[i]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - memset(pd->above_context, 0, - sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide_lookup[plane_bsize]); - memset(pd->left_context, 0, - sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high_lookup[plane_bsize]); - } -} - -typedef void (*foreach_transformed_block_visitor)(int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, - void *arg); - -void vp10_foreach_transformed_block_in_plane( - const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, - foreach_transformed_block_visitor visit, void *arg); - - -void vp10_foreach_transformed_block( - const MACROBLOCKD* const xd, BLOCK_SIZE bsize, - foreach_transformed_block_visitor visit, void *arg); - -void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, - int aoff, int loff); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_BLOCKD_H_ diff --git a/libs/libvpx/vp10/common/common.h b/libs/libvpx/vp10/common/common.h deleted file mode 100644 index 4abcbf6332..0000000000 --- a/libs/libvpx/vp10/common/common.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_COMMON_H_ -#define VP10_COMMON_COMMON_H_ - -/* Interface header for common constant data structures and lookup tables */ - -#include - -#include "./vpx_config.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx/vpx_integer.h" -#include "vpx_ports/bitops.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// Only need this for fixed-size arrays, for structs just assign. -#define vp10_copy(dest, src) { \ - assert(sizeof(dest) == sizeof(src)); \ - memcpy(dest, src, sizeof(src)); \ - } - -// Use this for variably-sized arrays. -#define vp10_copy_array(dest, src, n) { \ - assert(sizeof(*dest) == sizeof(*src)); \ - memcpy(dest, src, n * sizeof(*src)); \ - } - -#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest)) -#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest)) - -static INLINE int get_unsigned_bits(unsigned int num_values) { - return num_values > 0 ? get_msb(num_values) + 1 : 0; -} - -#if CONFIG_DEBUG -#define CHECK_MEM_ERROR(cm, lval, expr) do { \ - lval = (expr); \ - if (!lval) \ - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \ - "Failed to allocate "#lval" at %s:%d", \ - __FILE__, __LINE__); \ - } while (0) -#else -#define CHECK_MEM_ERROR(cm, lval, expr) do { \ - lval = (expr); \ - if (!lval) \ - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \ - "Failed to allocate "#lval); \ - } while (0) -#endif -// TODO(yaowu: validate the usage of these codes or develop new ones.) -#define VP10_SYNC_CODE_0 0x49 -#define VP10_SYNC_CODE_1 0x83 -#define VP10_SYNC_CODE_2 0x43 - -#define VP9_FRAME_MARKER 0x2 - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_COMMON_H_ diff --git a/libs/libvpx/vp10/common/common_data.h b/libs/libvpx/vp10/common/common_data.h deleted file mode 100644 index 334489c9d2..0000000000 --- a/libs/libvpx/vp10/common/common_data.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_COMMON_DATA_H_ -#define VP10_COMMON_COMMON_DATA_H_ - -#include "vp10/common/enums.h" -#include "vpx/vpx_integer.h" -#include "vpx_dsp/vpx_dsp_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// Log 2 conversion lookup tables for block width and height -static const uint8_t b_width_log2_lookup[BLOCK_SIZES] = - {0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4}; -static const uint8_t b_height_log2_lookup[BLOCK_SIZES] = - {0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4}; -static const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16}; -static const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] = - {1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16}; -// Log 2 conversion lookup tables for modeinfo width and height -static const uint8_t mi_width_log2_lookup[BLOCK_SIZES] = - {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3}; -static const uint8_t mi_height_log2_lookup[BLOCK_SIZES] = - {0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3}; -static const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8}; -static const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8}; - -// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize))) -static const uint8_t size_group_lookup[BLOCK_SIZES] = - {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3}; - -static const uint8_t num_pels_log2_lookup[BLOCK_SIZES] = - {4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12}; - -static const PARTITION_TYPE partition_lookup[][BLOCK_SIZES] = { - { // 4X4 - // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID - }, { // 8X8 - // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID - }, { // 16X16 - // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, - PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID - }, { // 32X32 - // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, - PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID - }, { // 64X64 - // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, - PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, - PARTITION_NONE - } -}; - -static const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES] = { - { // PARTITION_NONE - BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, - BLOCK_8X8, BLOCK_8X16, BLOCK_16X8, - BLOCK_16X16, BLOCK_16X32, BLOCK_32X16, - BLOCK_32X32, BLOCK_32X64, BLOCK_64X32, - BLOCK_64X64, - }, { // PARTITION_HORZ - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X4, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_64X32, - }, { // PARTITION_VERT - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_4X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X32, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X64, - }, { // PARTITION_SPLIT - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_4X4, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X32, - } -}; - -static const TX_SIZE max_txsize_lookup[BLOCK_SIZES] = { - TX_4X4, TX_4X4, TX_4X4, - TX_8X8, TX_8X8, TX_8X8, - TX_16X16, TX_16X16, TX_16X16, - TX_32X32, TX_32X32, TX_32X32, TX_32X32 -}; - -static const BLOCK_SIZE txsize_to_bsize[TX_SIZES] = { - BLOCK_4X4, // TX_4X4 - BLOCK_8X8, // TX_8X8 - BLOCK_16X16, // TX_16X16 - BLOCK_32X32, // TX_32X32 -}; - -static const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = { - TX_4X4, // ONLY_4X4 - TX_8X8, // ALLOW_8X8 - TX_16X16, // ALLOW_16X16 - TX_32X32, // ALLOW_32X32 - TX_32X32, // TX_MODE_SELECT -}; - -static const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = { -// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 -// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 - {{BLOCK_4X4, BLOCK_INVALID}, {BLOCK_INVALID, BLOCK_INVALID}}, - {{BLOCK_4X8, BLOCK_4X4}, {BLOCK_INVALID, BLOCK_INVALID}}, - {{BLOCK_8X4, BLOCK_INVALID}, {BLOCK_4X4, BLOCK_INVALID}}, - {{BLOCK_8X8, BLOCK_8X4}, {BLOCK_4X8, BLOCK_4X4}}, - {{BLOCK_8X16, BLOCK_8X8}, {BLOCK_INVALID, BLOCK_4X8}}, - {{BLOCK_16X8, BLOCK_INVALID}, {BLOCK_8X8, BLOCK_8X4}}, - {{BLOCK_16X16, BLOCK_16X8}, {BLOCK_8X16, BLOCK_8X8}}, - {{BLOCK_16X32, BLOCK_16X16}, {BLOCK_INVALID, BLOCK_8X16}}, - {{BLOCK_32X16, BLOCK_INVALID}, {BLOCK_16X16, BLOCK_16X8}}, - {{BLOCK_32X32, BLOCK_32X16}, {BLOCK_16X32, BLOCK_16X16}}, - {{BLOCK_32X64, BLOCK_32X32}, {BLOCK_INVALID, BLOCK_16X32}}, - {{BLOCK_64X32, BLOCK_INVALID}, {BLOCK_32X32, BLOCK_32X16}}, - {{BLOCK_64X64, BLOCK_64X32}, {BLOCK_32X64, BLOCK_32X32}}, -}; - -// Generates 4 bit field in which each bit set to 1 represents -// a blocksize partition 1111 means we split 64x64, 32x32, 16x16 -// and 8x8. 1000 means we just split the 64x64 to 32x32 -static const struct { - PARTITION_CONTEXT above; - PARTITION_CONTEXT left; -} partition_context_lookup[BLOCK_SIZES]= { - {15, 15}, // 4X4 - {0b1111, 0b1111} - {15, 14}, // 4X8 - {0b1111, 0b1110} - {14, 15}, // 8X4 - {0b1110, 0b1111} - {14, 14}, // 8X8 - {0b1110, 0b1110} - {14, 12}, // 8X16 - {0b1110, 0b1100} - {12, 14}, // 16X8 - {0b1100, 0b1110} - {12, 12}, // 16X16 - {0b1100, 0b1100} - {12, 8 }, // 16X32 - {0b1100, 0b1000} - {8, 12}, // 32X16 - {0b1000, 0b1100} - {8, 8 }, // 32X32 - {0b1000, 0b1000} - {8, 0 }, // 32X64 - {0b1000, 0b0000} - {0, 8 }, // 64X32 - {0b0000, 0b1000} - {0, 0 }, // 64X64 - {0b0000, 0b0000} -}; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_COMMON_DATA_H_ diff --git a/libs/libvpx/vp10/common/debugmodes.c b/libs/libvpx/vp10/common/debugmodes.c deleted file mode 100644 index 10fc4d633d..0000000000 --- a/libs/libvpx/vp10/common/debugmodes.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/blockd.h" -#include "vp10/common/onyxc_int.h" - -static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) { - fprintf(f, "%s", str); - fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame, - cm->show_frame, cm->base_qindex); -} -/* This function dereferences a pointer to the mbmi structure - * and uses the passed in member offset to print out the value of an integer - * for each mbmi member value in the mi structure. - */ -static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor, - size_t member_offset) { - int mi_row, mi_col; - MODE_INFO **mi = cm->mi_grid_visible; - int rows = cm->mi_rows; - int cols = cm->mi_cols; - char prefix = descriptor[0]; - - log_frame_info(cm, descriptor, file); - for (mi_row = 0; mi_row < rows; mi_row++) { - fprintf(file, "%c ", prefix); - for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(file, "%2d ", - *((int*) ((char *) (&mi[0]->mbmi) + - member_offset))); - mi++; - } - fprintf(file, "\n"); - mi += 8; - } - fprintf(file, "\n"); -} - -void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) { - int mi_row; - int mi_col; - FILE *mvs = fopen(file, "a"); - MODE_INFO **mi = cm->mi_grid_visible; - int rows = cm->mi_rows; - int cols = cm->mi_cols; - - print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type)); - print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode)); - print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0])); - print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size)); - print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode)); - - // output skip infomation. - log_frame_info(cm, "Skips:", mvs); - for (mi_row = 0; mi_row < rows; mi_row++) { - fprintf(mvs, "S "); - for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(mvs, "%2d ", mi[0]->mbmi.skip); - mi++; - } - fprintf(mvs, "\n"); - mi += 8; - } - fprintf(mvs, "\n"); - - // output motion vectors. - log_frame_info(cm, "Vectors ", mvs); - mi = cm->mi_grid_visible; - for (mi_row = 0; mi_row < rows; mi_row++) { - fprintf(mvs, "V "); - for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(mvs, "%4d:%4d ", mi[0]->mbmi.mv[0].as_mv.row, - mi[0]->mbmi.mv[0].as_mv.col); - mi++; - } - fprintf(mvs, "\n"); - mi += 8; - } - fprintf(mvs, "\n"); - - fclose(mvs); -} diff --git a/libs/libvpx/vp10/common/entropy.c b/libs/libvpx/vp10/common/entropy.c deleted file mode 100644 index 3da08a61b0..0000000000 --- a/libs/libvpx/vp10/common/entropy.c +++ /dev/null @@ -1,818 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/entropy.h" -#include "vp10/common/blockd.h" -#include "vp10/common/onyxc_int.h" -#include "vp10/common/entropymode.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx/vpx_integer.h" - -// Unconstrained Node Tree -const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = { - 2, 6, // 0 = LOW_VAL - -TWO_TOKEN, 4, // 1 = TWO - -THREE_TOKEN, -FOUR_TOKEN, // 2 = THREE - 8, 10, // 3 = HIGH_LOW - -CATEGORY1_TOKEN, -CATEGORY2_TOKEN, // 4 = CAT_ONE - 12, 14, // 5 = CAT_THREEFOUR - -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 6 = CAT_THREE - -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 7 = CAT_FIVE -}; - -const vpx_prob vp10_cat1_prob[] = { 159 }; -const vpx_prob vp10_cat2_prob[] = { 165, 145 }; -const vpx_prob vp10_cat3_prob[] = { 173, 148, 140 }; -const vpx_prob vp10_cat4_prob[] = { 176, 155, 140, 135 }; -const vpx_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 }; -const vpx_prob vp10_cat6_prob[] = { - 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129 -}; -#if CONFIG_VP9_HIGHBITDEPTH -const vpx_prob vp10_cat1_prob_high10[] = { 159 }; -const vpx_prob vp10_cat2_prob_high10[] = { 165, 145 }; -const vpx_prob vp10_cat3_prob_high10[] = { 173, 148, 140 }; -const vpx_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 }; -const vpx_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 }; -const vpx_prob vp10_cat6_prob_high10[] = { - 255, 255, 254, 254, 254, 252, 249, 243, - 230, 196, 177, 153, 140, 133, 130, 129 -}; -const vpx_prob vp10_cat1_prob_high12[] = { 159 }; -const vpx_prob vp10_cat2_prob_high12[] = { 165, 145 }; -const vpx_prob vp10_cat3_prob_high12[] = { 173, 148, 140 }; -const vpx_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 }; -const vpx_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 }; -const vpx_prob vp10_cat6_prob_high12[] = { - 255, 255, 255, 255, 254, 254, 254, 252, 249, - 243, 230, 196, 177, 153, 140, 133, 130, 129 -}; -#endif - -const uint8_t vp10_coefband_trans_8x8plus[1024] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 5, - // beyond MAXBAND_INDEX+1 all values are filled as 5 - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, -}; - -const uint8_t vp10_coefband_trans_4x4[16] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, -}; - -const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { - 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5 -}; - -// Model obtained from a 2-sided zero-centerd distribuition derived -// from a Pareto distribution. The cdf of the distribution is: -// cdf(x) = 0.5 + 0.5 * sgn(x) * [1 - {alpha/(alpha + |x|)} ^ beta] -// -// For a given beta and a given probablity of the 1-node, the alpha -// is first solved, and then the {alpha, beta} pair is used to generate -// the probabilities for the rest of the nodes. - -// beta = 8 - -// Every odd line in this table can be generated from the even lines -// by averaging : -// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] + -// vp10_pareto8_full[l+1][node] ) >> 1; -const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = { - { 3, 86, 128, 6, 86, 23, 88, 29}, - { 6, 86, 128, 11, 87, 42, 91, 52}, - { 9, 86, 129, 17, 88, 61, 94, 76}, - { 12, 86, 129, 22, 88, 77, 97, 93}, - { 15, 87, 129, 28, 89, 93, 100, 110}, - { 17, 87, 129, 33, 90, 105, 103, 123}, - { 20, 88, 130, 38, 91, 118, 106, 136}, - { 23, 88, 130, 43, 91, 128, 108, 146}, - { 26, 89, 131, 48, 92, 139, 111, 156}, - { 28, 89, 131, 53, 93, 147, 114, 163}, - { 31, 90, 131, 58, 94, 156, 117, 171}, - { 34, 90, 131, 62, 94, 163, 119, 177}, - { 37, 90, 132, 66, 95, 171, 122, 184}, - { 39, 90, 132, 70, 96, 177, 124, 189}, - { 42, 91, 132, 75, 97, 183, 127, 194}, - { 44, 91, 132, 79, 97, 188, 129, 198}, - { 47, 92, 133, 83, 98, 193, 132, 202}, - { 49, 92, 133, 86, 99, 197, 134, 205}, - { 52, 93, 133, 90, 100, 201, 137, 208}, - { 54, 93, 133, 94, 100, 204, 139, 211}, - { 57, 94, 134, 98, 101, 208, 142, 214}, - { 59, 94, 134, 101, 102, 211, 144, 216}, - { 62, 94, 135, 105, 103, 214, 146, 218}, - { 64, 94, 135, 108, 103, 216, 148, 220}, - { 66, 95, 135, 111, 104, 219, 151, 222}, - { 68, 95, 135, 114, 105, 221, 153, 223}, - { 71, 96, 136, 117, 106, 224, 155, 225}, - { 73, 96, 136, 120, 106, 225, 157, 226}, - { 76, 97, 136, 123, 107, 227, 159, 228}, - { 78, 97, 136, 126, 108, 229, 160, 229}, - { 80, 98, 137, 129, 109, 231, 162, 231}, - { 82, 98, 137, 131, 109, 232, 164, 232}, - { 84, 98, 138, 134, 110, 234, 166, 233}, - { 86, 98, 138, 137, 111, 235, 168, 234}, - { 89, 99, 138, 140, 112, 236, 170, 235}, - { 91, 99, 138, 142, 112, 237, 171, 235}, - { 93, 100, 139, 145, 113, 238, 173, 236}, - { 95, 100, 139, 147, 114, 239, 174, 237}, - { 97, 101, 140, 149, 115, 240, 176, 238}, - { 99, 101, 140, 151, 115, 241, 177, 238}, - {101, 102, 140, 154, 116, 242, 179, 239}, - {103, 102, 140, 156, 117, 242, 180, 239}, - {105, 103, 141, 158, 118, 243, 182, 240}, - {107, 103, 141, 160, 118, 243, 183, 240}, - {109, 104, 141, 162, 119, 244, 185, 241}, - {111, 104, 141, 164, 119, 244, 186, 241}, - {113, 104, 142, 166, 120, 245, 187, 242}, - {114, 104, 142, 168, 121, 245, 188, 242}, - {116, 105, 143, 170, 122, 246, 190, 243}, - {118, 105, 143, 171, 122, 246, 191, 243}, - {120, 106, 143, 173, 123, 247, 192, 244}, - {121, 106, 143, 175, 124, 247, 193, 244}, - {123, 107, 144, 177, 125, 248, 195, 244}, - {125, 107, 144, 178, 125, 248, 196, 244}, - {127, 108, 145, 180, 126, 249, 197, 245}, - {128, 108, 145, 181, 127, 249, 198, 245}, - {130, 109, 145, 183, 128, 249, 199, 245}, - {132, 109, 145, 184, 128, 249, 200, 245}, - {134, 110, 146, 186, 129, 250, 201, 246}, - {135, 110, 146, 187, 130, 250, 202, 246}, - {137, 111, 147, 189, 131, 251, 203, 246}, - {138, 111, 147, 190, 131, 251, 204, 246}, - {140, 112, 147, 192, 132, 251, 205, 247}, - {141, 112, 147, 193, 132, 251, 206, 247}, - {143, 113, 148, 194, 133, 251, 207, 247}, - {144, 113, 148, 195, 134, 251, 207, 247}, - {146, 114, 149, 197, 135, 252, 208, 248}, - {147, 114, 149, 198, 135, 252, 209, 248}, - {149, 115, 149, 199, 136, 252, 210, 248}, - {150, 115, 149, 200, 137, 252, 210, 248}, - {152, 115, 150, 201, 138, 252, 211, 248}, - {153, 115, 150, 202, 138, 252, 212, 248}, - {155, 116, 151, 204, 139, 253, 213, 249}, - {156, 116, 151, 205, 139, 253, 213, 249}, - {158, 117, 151, 206, 140, 253, 214, 249}, - {159, 117, 151, 207, 141, 253, 215, 249}, - {161, 118, 152, 208, 142, 253, 216, 249}, - {162, 118, 152, 209, 142, 253, 216, 249}, - {163, 119, 153, 210, 143, 253, 217, 249}, - {164, 119, 153, 211, 143, 253, 217, 249}, - {166, 120, 153, 212, 144, 254, 218, 250}, - {167, 120, 153, 212, 145, 254, 219, 250}, - {168, 121, 154, 213, 146, 254, 220, 250}, - {169, 121, 154, 214, 146, 254, 220, 250}, - {171, 122, 155, 215, 147, 254, 221, 250}, - {172, 122, 155, 216, 147, 254, 221, 250}, - {173, 123, 155, 217, 148, 254, 222, 250}, - {174, 123, 155, 217, 149, 254, 222, 250}, - {176, 124, 156, 218, 150, 254, 223, 250}, - {177, 124, 156, 219, 150, 254, 223, 250}, - {178, 125, 157, 220, 151, 254, 224, 251}, - {179, 125, 157, 220, 151, 254, 224, 251}, - {180, 126, 157, 221, 152, 254, 225, 251}, - {181, 126, 157, 221, 152, 254, 225, 251}, - {183, 127, 158, 222, 153, 254, 226, 251}, - {184, 127, 158, 223, 154, 254, 226, 251}, - {185, 128, 159, 224, 155, 255, 227, 251}, - {186, 128, 159, 224, 155, 255, 227, 251}, - {187, 129, 160, 225, 156, 255, 228, 251}, - {188, 130, 160, 225, 156, 255, 228, 251}, - {189, 131, 160, 226, 157, 255, 228, 251}, - {190, 131, 160, 226, 158, 255, 228, 251}, - {191, 132, 161, 227, 159, 255, 229, 251}, - {192, 132, 161, 227, 159, 255, 229, 251}, - {193, 133, 162, 228, 160, 255, 230, 252}, - {194, 133, 162, 229, 160, 255, 230, 252}, - {195, 134, 163, 230, 161, 255, 231, 252}, - {196, 134, 163, 230, 161, 255, 231, 252}, - {197, 135, 163, 231, 162, 255, 231, 252}, - {198, 135, 163, 231, 162, 255, 231, 252}, - {199, 136, 164, 232, 163, 255, 232, 252}, - {200, 136, 164, 232, 164, 255, 232, 252}, - {201, 137, 165, 233, 165, 255, 233, 252}, - {201, 137, 165, 233, 165, 255, 233, 252}, - {202, 138, 166, 233, 166, 255, 233, 252}, - {203, 138, 166, 233, 166, 255, 233, 252}, - {204, 139, 166, 234, 167, 255, 234, 252}, - {205, 139, 166, 234, 167, 255, 234, 252}, - {206, 140, 167, 235, 168, 255, 235, 252}, - {206, 140, 167, 235, 168, 255, 235, 252}, - {207, 141, 168, 236, 169, 255, 235, 252}, - {208, 141, 168, 236, 170, 255, 235, 252}, - {209, 142, 169, 237, 171, 255, 236, 252}, - {209, 143, 169, 237, 171, 255, 236, 252}, - {210, 144, 169, 237, 172, 255, 236, 252}, - {211, 144, 169, 237, 172, 255, 236, 252}, - {212, 145, 170, 238, 173, 255, 237, 252}, - {213, 145, 170, 238, 173, 255, 237, 252}, - {214, 146, 171, 239, 174, 255, 237, 253}, - {214, 146, 171, 239, 174, 255, 237, 253}, - {215, 147, 172, 240, 175, 255, 238, 253}, - {215, 147, 172, 240, 175, 255, 238, 253}, - {216, 148, 173, 240, 176, 255, 238, 253}, - {217, 148, 173, 240, 176, 255, 238, 253}, - {218, 149, 173, 241, 177, 255, 239, 253}, - {218, 149, 173, 241, 178, 255, 239, 253}, - {219, 150, 174, 241, 179, 255, 239, 253}, - {219, 151, 174, 241, 179, 255, 239, 253}, - {220, 152, 175, 242, 180, 255, 240, 253}, - {221, 152, 175, 242, 180, 255, 240, 253}, - {222, 153, 176, 242, 181, 255, 240, 253}, - {222, 153, 176, 242, 181, 255, 240, 253}, - {223, 154, 177, 243, 182, 255, 240, 253}, - {223, 154, 177, 243, 182, 255, 240, 253}, - {224, 155, 178, 244, 183, 255, 241, 253}, - {224, 155, 178, 244, 183, 255, 241, 253}, - {225, 156, 178, 244, 184, 255, 241, 253}, - {225, 157, 178, 244, 184, 255, 241, 253}, - {226, 158, 179, 244, 185, 255, 242, 253}, - {227, 158, 179, 244, 185, 255, 242, 253}, - {228, 159, 180, 245, 186, 255, 242, 253}, - {228, 159, 180, 245, 186, 255, 242, 253}, - {229, 160, 181, 245, 187, 255, 242, 253}, - {229, 160, 181, 245, 187, 255, 242, 253}, - {230, 161, 182, 246, 188, 255, 243, 253}, - {230, 162, 182, 246, 188, 255, 243, 253}, - {231, 163, 183, 246, 189, 255, 243, 253}, - {231, 163, 183, 246, 189, 255, 243, 253}, - {232, 164, 184, 247, 190, 255, 243, 253}, - {232, 164, 184, 247, 190, 255, 243, 253}, - {233, 165, 185, 247, 191, 255, 244, 253}, - {233, 165, 185, 247, 191, 255, 244, 253}, - {234, 166, 185, 247, 192, 255, 244, 253}, - {234, 167, 185, 247, 192, 255, 244, 253}, - {235, 168, 186, 248, 193, 255, 244, 253}, - {235, 168, 186, 248, 193, 255, 244, 253}, - {236, 169, 187, 248, 194, 255, 244, 253}, - {236, 169, 187, 248, 194, 255, 244, 253}, - {236, 170, 188, 248, 195, 255, 245, 253}, - {236, 170, 188, 248, 195, 255, 245, 253}, - {237, 171, 189, 249, 196, 255, 245, 254}, - {237, 172, 189, 249, 196, 255, 245, 254}, - {238, 173, 190, 249, 197, 255, 245, 254}, - {238, 173, 190, 249, 197, 255, 245, 254}, - {239, 174, 191, 249, 198, 255, 245, 254}, - {239, 174, 191, 249, 198, 255, 245, 254}, - {240, 175, 192, 249, 199, 255, 246, 254}, - {240, 176, 192, 249, 199, 255, 246, 254}, - {240, 177, 193, 250, 200, 255, 246, 254}, - {240, 177, 193, 250, 200, 255, 246, 254}, - {241, 178, 194, 250, 201, 255, 246, 254}, - {241, 178, 194, 250, 201, 255, 246, 254}, - {242, 179, 195, 250, 202, 255, 246, 254}, - {242, 180, 195, 250, 202, 255, 246, 254}, - {242, 181, 196, 250, 203, 255, 247, 254}, - {242, 181, 196, 250, 203, 255, 247, 254}, - {243, 182, 197, 251, 204, 255, 247, 254}, - {243, 183, 197, 251, 204, 255, 247, 254}, - {244, 184, 198, 251, 205, 255, 247, 254}, - {244, 184, 198, 251, 205, 255, 247, 254}, - {244, 185, 199, 251, 206, 255, 247, 254}, - {244, 185, 199, 251, 206, 255, 247, 254}, - {245, 186, 200, 251, 207, 255, 247, 254}, - {245, 187, 200, 251, 207, 255, 247, 254}, - {246, 188, 201, 252, 207, 255, 248, 254}, - {246, 188, 201, 252, 207, 255, 248, 254}, - {246, 189, 202, 252, 208, 255, 248, 254}, - {246, 190, 202, 252, 208, 255, 248, 254}, - {247, 191, 203, 252, 209, 255, 248, 254}, - {247, 191, 203, 252, 209, 255, 248, 254}, - {247, 192, 204, 252, 210, 255, 248, 254}, - {247, 193, 204, 252, 210, 255, 248, 254}, - {248, 194, 205, 252, 211, 255, 248, 254}, - {248, 194, 205, 252, 211, 255, 248, 254}, - {248, 195, 206, 252, 212, 255, 249, 254}, - {248, 196, 206, 252, 212, 255, 249, 254}, - {249, 197, 207, 253, 213, 255, 249, 254}, - {249, 197, 207, 253, 213, 255, 249, 254}, - {249, 198, 208, 253, 214, 255, 249, 254}, - {249, 199, 209, 253, 214, 255, 249, 254}, - {250, 200, 210, 253, 215, 255, 249, 254}, - {250, 200, 210, 253, 215, 255, 249, 254}, - {250, 201, 211, 253, 215, 255, 249, 254}, - {250, 202, 211, 253, 215, 255, 249, 254}, - {250, 203, 212, 253, 216, 255, 249, 254}, - {250, 203, 212, 253, 216, 255, 249, 254}, - {251, 204, 213, 253, 217, 255, 250, 254}, - {251, 205, 213, 253, 217, 255, 250, 254}, - {251, 206, 214, 254, 218, 255, 250, 254}, - {251, 206, 215, 254, 218, 255, 250, 254}, - {252, 207, 216, 254, 219, 255, 250, 254}, - {252, 208, 216, 254, 219, 255, 250, 254}, - {252, 209, 217, 254, 220, 255, 250, 254}, - {252, 210, 217, 254, 220, 255, 250, 254}, - {252, 211, 218, 254, 221, 255, 250, 254}, - {252, 212, 218, 254, 221, 255, 250, 254}, - {253, 213, 219, 254, 222, 255, 250, 254}, - {253, 213, 220, 254, 222, 255, 250, 254}, - {253, 214, 221, 254, 223, 255, 250, 254}, - {253, 215, 221, 254, 223, 255, 250, 254}, - {253, 216, 222, 254, 224, 255, 251, 254}, - {253, 217, 223, 254, 224, 255, 251, 254}, - {253, 218, 224, 254, 225, 255, 251, 254}, - {253, 219, 224, 254, 225, 255, 251, 254}, - {254, 220, 225, 254, 225, 255, 251, 254}, - {254, 221, 226, 254, 225, 255, 251, 254}, - {254, 222, 227, 255, 226, 255, 251, 254}, - {254, 223, 227, 255, 226, 255, 251, 254}, - {254, 224, 228, 255, 227, 255, 251, 254}, - {254, 225, 229, 255, 227, 255, 251, 254}, - {254, 226, 230, 255, 228, 255, 251, 254}, - {254, 227, 230, 255, 229, 255, 251, 254}, - {255, 228, 231, 255, 230, 255, 251, 254}, - {255, 229, 232, 255, 230, 255, 251, 254}, - {255, 230, 233, 255, 231, 255, 252, 254}, - {255, 231, 234, 255, 231, 255, 252, 254}, - {255, 232, 235, 255, 232, 255, 252, 254}, - {255, 233, 236, 255, 232, 255, 252, 254}, - {255, 235, 237, 255, 233, 255, 252, 254}, - {255, 236, 238, 255, 234, 255, 252, 254}, - {255, 238, 240, 255, 235, 255, 252, 255}, - {255, 239, 241, 255, 235, 255, 252, 254}, - {255, 241, 243, 255, 236, 255, 252, 254}, - {255, 243, 245, 255, 237, 255, 252, 254}, - {255, 246, 247, 255, 239, 255, 253, 255}, -}; - -static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 195, 29, 183 }, { 84, 49, 136 }, { 8, 42, 71 } - }, { // Band 1 - { 31, 107, 169 }, { 35, 99, 159 }, { 17, 82, 140 }, - { 8, 66, 114 }, { 2, 44, 76 }, { 1, 19, 32 } - }, { // Band 2 - { 40, 132, 201 }, { 29, 114, 187 }, { 13, 91, 157 }, - { 7, 75, 127 }, { 3, 58, 95 }, { 1, 28, 47 } - }, { // Band 3 - { 69, 142, 221 }, { 42, 122, 201 }, { 15, 91, 159 }, - { 6, 67, 121 }, { 1, 42, 77 }, { 1, 17, 31 } - }, { // Band 4 - { 102, 148, 228 }, { 67, 117, 204 }, { 17, 82, 154 }, - { 6, 59, 114 }, { 2, 39, 75 }, { 1, 15, 29 } - }, { // Band 5 - { 156, 57, 233 }, { 119, 57, 212 }, { 58, 48, 163 }, - { 29, 40, 124 }, { 12, 30, 81 }, { 3, 12, 31 } - } - }, { // Inter - { // Band 0 - { 191, 107, 226 }, { 124, 117, 204 }, { 25, 99, 155 } - }, { // Band 1 - { 29, 148, 210 }, { 37, 126, 194 }, { 8, 93, 157 }, - { 2, 68, 118 }, { 1, 39, 69 }, { 1, 17, 33 } - }, { // Band 2 - { 41, 151, 213 }, { 27, 123, 193 }, { 3, 82, 144 }, - { 1, 58, 105 }, { 1, 32, 60 }, { 1, 13, 26 } - }, { // Band 3 - { 59, 159, 220 }, { 23, 126, 198 }, { 4, 88, 151 }, - { 1, 66, 114 }, { 1, 38, 71 }, { 1, 18, 34 } - }, { // Band 4 - { 114, 136, 232 }, { 51, 114, 207 }, { 11, 83, 155 }, - { 3, 56, 105 }, { 1, 33, 65 }, { 1, 17, 34 } - }, { // Band 5 - { 149, 65, 234 }, { 121, 57, 215 }, { 61, 49, 166 }, - { 28, 36, 114 }, { 12, 25, 76 }, { 3, 16, 42 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 214, 49, 220 }, { 132, 63, 188 }, { 42, 65, 137 } - }, { // Band 1 - { 85, 137, 221 }, { 104, 131, 216 }, { 49, 111, 192 }, - { 21, 87, 155 }, { 2, 49, 87 }, { 1, 16, 28 } - }, { // Band 2 - { 89, 163, 230 }, { 90, 137, 220 }, { 29, 100, 183 }, - { 10, 70, 135 }, { 2, 42, 81 }, { 1, 17, 33 } - }, { // Band 3 - { 108, 167, 237 }, { 55, 133, 222 }, { 15, 97, 179 }, - { 4, 72, 135 }, { 1, 45, 85 }, { 1, 19, 38 } - }, { // Band 4 - { 124, 146, 240 }, { 66, 124, 224 }, { 17, 88, 175 }, - { 4, 58, 122 }, { 1, 36, 75 }, { 1, 18, 37 } - }, { // Band 5 - { 141, 79, 241 }, { 126, 70, 227 }, { 66, 58, 182 }, - { 30, 44, 136 }, { 12, 34, 96 }, { 2, 20, 47 } - } - }, { // Inter - { // Band 0 - { 229, 99, 249 }, { 143, 111, 235 }, { 46, 109, 192 } - }, { // Band 1 - { 82, 158, 236 }, { 94, 146, 224 }, { 25, 117, 191 }, - { 9, 87, 149 }, { 3, 56, 99 }, { 1, 33, 57 } - }, { // Band 2 - { 83, 167, 237 }, { 68, 145, 222 }, { 10, 103, 177 }, - { 2, 72, 131 }, { 1, 41, 79 }, { 1, 20, 39 } - }, { // Band 3 - { 99, 167, 239 }, { 47, 141, 224 }, { 10, 104, 178 }, - { 2, 73, 133 }, { 1, 44, 85 }, { 1, 22, 47 } - }, { // Band 4 - { 127, 145, 243 }, { 71, 129, 228 }, { 17, 93, 177 }, - { 3, 61, 124 }, { 1, 41, 84 }, { 1, 21, 52 } - }, { // Band 5 - { 157, 78, 244 }, { 140, 72, 231 }, { 69, 58, 184 }, - { 31, 44, 137 }, { 14, 38, 105 }, { 8, 23, 61 } - } - } - } -}; - -static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 125, 34, 187 }, { 52, 41, 133 }, { 6, 31, 56 } - }, { // Band 1 - { 37, 109, 153 }, { 51, 102, 147 }, { 23, 87, 128 }, - { 8, 67, 101 }, { 1, 41, 63 }, { 1, 19, 29 } - }, { // Band 2 - { 31, 154, 185 }, { 17, 127, 175 }, { 6, 96, 145 }, - { 2, 73, 114 }, { 1, 51, 82 }, { 1, 28, 45 } - }, { // Band 3 - { 23, 163, 200 }, { 10, 131, 185 }, { 2, 93, 148 }, - { 1, 67, 111 }, { 1, 41, 69 }, { 1, 14, 24 } - }, { // Band 4 - { 29, 176, 217 }, { 12, 145, 201 }, { 3, 101, 156 }, - { 1, 69, 111 }, { 1, 39, 63 }, { 1, 14, 23 } - }, { // Band 5 - { 57, 192, 233 }, { 25, 154, 215 }, { 6, 109, 167 }, - { 3, 78, 118 }, { 1, 48, 69 }, { 1, 21, 29 } - } - }, { // Inter - { // Band 0 - { 202, 105, 245 }, { 108, 106, 216 }, { 18, 90, 144 } - }, { // Band 1 - { 33, 172, 219 }, { 64, 149, 206 }, { 14, 117, 177 }, - { 5, 90, 141 }, { 2, 61, 95 }, { 1, 37, 57 } - }, { // Band 2 - { 33, 179, 220 }, { 11, 140, 198 }, { 1, 89, 148 }, - { 1, 60, 104 }, { 1, 33, 57 }, { 1, 12, 21 } - }, { // Band 3 - { 30, 181, 221 }, { 8, 141, 198 }, { 1, 87, 145 }, - { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 20 } - }, { // Band 4 - { 32, 186, 224 }, { 7, 142, 198 }, { 1, 86, 143 }, - { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 22 } - }, { // Band 5 - { 57, 192, 227 }, { 20, 143, 204 }, { 3, 96, 154 }, - { 1, 68, 112 }, { 1, 42, 69 }, { 1, 19, 32 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 212, 35, 215 }, { 113, 47, 169 }, { 29, 48, 105 } - }, { // Band 1 - { 74, 129, 203 }, { 106, 120, 203 }, { 49, 107, 178 }, - { 19, 84, 144 }, { 4, 50, 84 }, { 1, 15, 25 } - }, { // Band 2 - { 71, 172, 217 }, { 44, 141, 209 }, { 15, 102, 173 }, - { 6, 76, 133 }, { 2, 51, 89 }, { 1, 24, 42 } - }, { // Band 3 - { 64, 185, 231 }, { 31, 148, 216 }, { 8, 103, 175 }, - { 3, 74, 131 }, { 1, 46, 81 }, { 1, 18, 30 } - }, { // Band 4 - { 65, 196, 235 }, { 25, 157, 221 }, { 5, 105, 174 }, - { 1, 67, 120 }, { 1, 38, 69 }, { 1, 15, 30 } - }, { // Band 5 - { 65, 204, 238 }, { 30, 156, 224 }, { 7, 107, 177 }, - { 2, 70, 124 }, { 1, 42, 73 }, { 1, 18, 34 } - } - }, { // Inter - { // Band 0 - { 225, 86, 251 }, { 144, 104, 235 }, { 42, 99, 181 } - }, { // Band 1 - { 85, 175, 239 }, { 112, 165, 229 }, { 29, 136, 200 }, - { 12, 103, 162 }, { 6, 77, 123 }, { 2, 53, 84 } - }, { // Band 2 - { 75, 183, 239 }, { 30, 155, 221 }, { 3, 106, 171 }, - { 1, 74, 128 }, { 1, 44, 76 }, { 1, 17, 28 } - }, { // Band 3 - { 73, 185, 240 }, { 27, 159, 222 }, { 2, 107, 172 }, - { 1, 75, 127 }, { 1, 42, 73 }, { 1, 17, 29 } - }, { // Band 4 - { 62, 190, 238 }, { 21, 159, 222 }, { 2, 107, 172 }, - { 1, 72, 122 }, { 1, 40, 71 }, { 1, 18, 32 } - }, { // Band 5 - { 61, 199, 240 }, { 27, 161, 226 }, { 4, 113, 180 }, - { 1, 76, 129 }, { 1, 46, 80 }, { 1, 23, 41 } - } - } - } -}; - -static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 7, 27, 153 }, { 5, 30, 95 }, { 1, 16, 30 } - }, { // Band 1 - { 50, 75, 127 }, { 57, 75, 124 }, { 27, 67, 108 }, - { 10, 54, 86 }, { 1, 33, 52 }, { 1, 12, 18 } - }, { // Band 2 - { 43, 125, 151 }, { 26, 108, 148 }, { 7, 83, 122 }, - { 2, 59, 89 }, { 1, 38, 60 }, { 1, 17, 27 } - }, { // Band 3 - { 23, 144, 163 }, { 13, 112, 154 }, { 2, 75, 117 }, - { 1, 50, 81 }, { 1, 31, 51 }, { 1, 14, 23 } - }, { // Band 4 - { 18, 162, 185 }, { 6, 123, 171 }, { 1, 78, 125 }, - { 1, 51, 86 }, { 1, 31, 54 }, { 1, 14, 23 } - }, { // Band 5 - { 15, 199, 227 }, { 3, 150, 204 }, { 1, 91, 146 }, - { 1, 55, 95 }, { 1, 30, 53 }, { 1, 11, 20 } - } - }, { // Inter - { // Band 0 - { 19, 55, 240 }, { 19, 59, 196 }, { 3, 52, 105 } - }, { // Band 1 - { 41, 166, 207 }, { 104, 153, 199 }, { 31, 123, 181 }, - { 14, 101, 152 }, { 5, 72, 106 }, { 1, 36, 52 } - }, { // Band 2 - { 35, 176, 211 }, { 12, 131, 190 }, { 2, 88, 144 }, - { 1, 60, 101 }, { 1, 36, 60 }, { 1, 16, 28 } - }, { // Band 3 - { 28, 183, 213 }, { 8, 134, 191 }, { 1, 86, 142 }, - { 1, 56, 96 }, { 1, 30, 53 }, { 1, 12, 20 } - }, { // Band 4 - { 20, 190, 215 }, { 4, 135, 192 }, { 1, 84, 139 }, - { 1, 53, 91 }, { 1, 28, 49 }, { 1, 11, 20 } - }, { // Band 5 - { 13, 196, 216 }, { 2, 137, 192 }, { 1, 86, 143 }, - { 1, 57, 99 }, { 1, 32, 56 }, { 1, 13, 24 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 211, 29, 217 }, { 96, 47, 156 }, { 22, 43, 87 } - }, { // Band 1 - { 78, 120, 193 }, { 111, 116, 186 }, { 46, 102, 164 }, - { 15, 80, 128 }, { 2, 49, 76 }, { 1, 18, 28 } - }, { // Band 2 - { 71, 161, 203 }, { 42, 132, 192 }, { 10, 98, 150 }, - { 3, 69, 109 }, { 1, 44, 70 }, { 1, 18, 29 } - }, { // Band 3 - { 57, 186, 211 }, { 30, 140, 196 }, { 4, 93, 146 }, - { 1, 62, 102 }, { 1, 38, 65 }, { 1, 16, 27 } - }, { // Band 4 - { 47, 199, 217 }, { 14, 145, 196 }, { 1, 88, 142 }, - { 1, 57, 98 }, { 1, 36, 62 }, { 1, 15, 26 } - }, { // Band 5 - { 26, 219, 229 }, { 5, 155, 207 }, { 1, 94, 151 }, - { 1, 60, 104 }, { 1, 36, 62 }, { 1, 16, 28 } - } - }, { // Inter - { // Band 0 - { 233, 29, 248 }, { 146, 47, 220 }, { 43, 52, 140 } - }, { // Band 1 - { 100, 163, 232 }, { 179, 161, 222 }, { 63, 142, 204 }, - { 37, 113, 174 }, { 26, 89, 137 }, { 18, 68, 97 } - }, { // Band 2 - { 85, 181, 230 }, { 32, 146, 209 }, { 7, 100, 164 }, - { 3, 71, 121 }, { 1, 45, 77 }, { 1, 18, 30 } - }, { // Band 3 - { 65, 187, 230 }, { 20, 148, 207 }, { 2, 97, 159 }, - { 1, 68, 116 }, { 1, 40, 70 }, { 1, 14, 29 } - }, { // Band 4 - { 40, 194, 227 }, { 8, 147, 204 }, { 1, 94, 155 }, - { 1, 65, 112 }, { 1, 39, 66 }, { 1, 14, 26 } - }, { // Band 5 - { 16, 208, 228 }, { 3, 151, 207 }, { 1, 98, 160 }, - { 1, 67, 117 }, { 1, 41, 74 }, { 1, 17, 31 } - } - } - } -}; - -static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 17, 38, 140 }, { 7, 34, 80 }, { 1, 17, 29 } - }, { // Band 1 - { 37, 75, 128 }, { 41, 76, 128 }, { 26, 66, 116 }, - { 12, 52, 94 }, { 2, 32, 55 }, { 1, 10, 16 } - }, { // Band 2 - { 50, 127, 154 }, { 37, 109, 152 }, { 16, 82, 121 }, - { 5, 59, 85 }, { 1, 35, 54 }, { 1, 13, 20 } - }, { // Band 3 - { 40, 142, 167 }, { 17, 110, 157 }, { 2, 71, 112 }, - { 1, 44, 72 }, { 1, 27, 45 }, { 1, 11, 17 } - }, { // Band 4 - { 30, 175, 188 }, { 9, 124, 169 }, { 1, 74, 116 }, - { 1, 48, 78 }, { 1, 30, 49 }, { 1, 11, 18 } - }, { // Band 5 - { 10, 222, 223 }, { 2, 150, 194 }, { 1, 83, 128 }, - { 1, 48, 79 }, { 1, 27, 45 }, { 1, 11, 17 } - } - }, { // Inter - { // Band 0 - { 36, 41, 235 }, { 29, 36, 193 }, { 10, 27, 111 } - }, { // Band 1 - { 85, 165, 222 }, { 177, 162, 215 }, { 110, 135, 195 }, - { 57, 113, 168 }, { 23, 83, 120 }, { 10, 49, 61 } - }, { // Band 2 - { 85, 190, 223 }, { 36, 139, 200 }, { 5, 90, 146 }, - { 1, 60, 103 }, { 1, 38, 65 }, { 1, 18, 30 } - }, { // Band 3 - { 72, 202, 223 }, { 23, 141, 199 }, { 2, 86, 140 }, - { 1, 56, 97 }, { 1, 36, 61 }, { 1, 16, 27 } - }, { // Band 4 - { 55, 218, 225 }, { 13, 145, 200 }, { 1, 86, 141 }, - { 1, 57, 99 }, { 1, 35, 61 }, { 1, 13, 22 } - }, { // Band 5 - { 15, 235, 212 }, { 1, 132, 184 }, { 1, 84, 139 }, - { 1, 57, 97 }, { 1, 34, 56 }, { 1, 14, 23 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 181, 21, 201 }, { 61, 37, 123 }, { 10, 38, 71 } - }, { // Band 1 - { 47, 106, 172 }, { 95, 104, 173 }, { 42, 93, 159 }, - { 18, 77, 131 }, { 4, 50, 81 }, { 1, 17, 23 } - }, { // Band 2 - { 62, 147, 199 }, { 44, 130, 189 }, { 28, 102, 154 }, - { 18, 75, 115 }, { 2, 44, 65 }, { 1, 12, 19 } - }, { // Band 3 - { 55, 153, 210 }, { 24, 130, 194 }, { 3, 93, 146 }, - { 1, 61, 97 }, { 1, 31, 50 }, { 1, 10, 16 } - }, { // Band 4 - { 49, 186, 223 }, { 17, 148, 204 }, { 1, 96, 142 }, - { 1, 53, 83 }, { 1, 26, 44 }, { 1, 11, 17 } - }, { // Band 5 - { 13, 217, 212 }, { 2, 136, 180 }, { 1, 78, 124 }, - { 1, 50, 83 }, { 1, 29, 49 }, { 1, 14, 23 } - } - }, { // Inter - { // Band 0 - { 197, 13, 247 }, { 82, 17, 222 }, { 25, 17, 162 } - }, { // Band 1 - { 126, 186, 247 }, { 234, 191, 243 }, { 176, 177, 234 }, - { 104, 158, 220 }, { 66, 128, 186 }, { 55, 90, 137 } - }, { // Band 2 - { 111, 197, 242 }, { 46, 158, 219 }, { 9, 104, 171 }, - { 2, 65, 125 }, { 1, 44, 80 }, { 1, 17, 91 } - }, { // Band 3 - { 104, 208, 245 }, { 39, 168, 224 }, { 3, 109, 162 }, - { 1, 79, 124 }, { 1, 50, 102 }, { 1, 43, 102 } - }, { // Band 4 - { 84, 220, 246 }, { 31, 177, 231 }, { 2, 115, 180 }, - { 1, 79, 134 }, { 1, 55, 77 }, { 1, 60, 79 } - }, { // Band 5 - { 43, 243, 240 }, { 8, 180, 217 }, { 1, 115, 166 }, - { 1, 84, 121 }, { 1, 51, 67 }, { 1, 16, 6 } - } - } - } -}; - -static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) { - // TODO(aconverse): model[PIVOT_NODE] should never be zero. - // https://code.google.com/p/webm/issues/detail?id=1089 - memcpy(probs, vp10_pareto8_full[p == 0 ? 254 : p - 1], - MODEL_NODES * sizeof(vpx_prob)); -} - -void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full) { - if (full != model) - memcpy(full, model, sizeof(vpx_prob) * UNCONSTRAINED_NODES); - extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]); -} - -void vp10_default_coef_probs(VP10_COMMON *cm) { - vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4); - vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8); - vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16); - vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32); -} - -#define COEF_COUNT_SAT 24 -#define COEF_MAX_UPDATE_FACTOR 112 -#define COEF_COUNT_SAT_KEY 24 -#define COEF_MAX_UPDATE_FACTOR_KEY 112 -#define COEF_COUNT_SAT_AFTER_KEY 24 -#define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128 - -static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size, - unsigned int count_sat, - unsigned int update_factor) { - const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx]; - vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size]; - const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size]; - vp10_coeff_count_model *counts = cm->counts.coef[tx_size]; - unsigned int (*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = - cm->counts.eob_branch[tx_size]; - int i, j, k, l, m; - - for (i = 0; i < PLANE_TYPES; ++i) - for (j = 0; j < REF_TYPES; ++j) - for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - const int n0 = counts[i][j][k][l][ZERO_TOKEN]; - const int n1 = counts[i][j][k][l][ONE_TOKEN]; - const int n2 = counts[i][j][k][l][TWO_TOKEN]; - const int neob = counts[i][j][k][l][EOB_MODEL_TOKEN]; - const unsigned int branch_ct[UNCONSTRAINED_NODES][2] = { - { neob, eob_counts[i][j][k][l] - neob }, - { n0, n1 + n2 }, - { n1, n2 } - }; - for (m = 0; m < UNCONSTRAINED_NODES; ++m) - probs[i][j][k][l][m] = merge_probs(pre_probs[i][j][k][l][m], - branch_ct[m], - count_sat, update_factor); - } -} - -void vp10_adapt_coef_probs(VP10_COMMON *cm) { - TX_SIZE t; - unsigned int count_sat, update_factor; - - if (frame_is_intra_only(cm)) { - update_factor = COEF_MAX_UPDATE_FACTOR_KEY; - count_sat = COEF_COUNT_SAT_KEY; - } else if (cm->last_frame_type == KEY_FRAME) { - update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */ - count_sat = COEF_COUNT_SAT_AFTER_KEY; - } else { - update_factor = COEF_MAX_UPDATE_FACTOR; - count_sat = COEF_COUNT_SAT; - } - for (t = TX_4X4; t <= TX_32X32; t++) - adapt_coef_probs(cm, t, count_sat, update_factor); -} diff --git a/libs/libvpx/vp10/common/entropy.h b/libs/libvpx/vp10/common/entropy.h deleted file mode 100644 index 9a471c8183..0000000000 --- a/libs/libvpx/vp10/common/entropy.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_ENTROPY_H_ -#define VP10_COMMON_ENTROPY_H_ - -#include "vpx/vpx_integer.h" -#include "vpx_dsp/prob.h" - -#include "vp10/common/common.h" -#include "vp10/common/enums.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define DIFF_UPDATE_PROB 252 -#define GROUP_DIFF_UPDATE_PROB 252 - -// Coefficient token alphabet -#define ZERO_TOKEN 0 // 0 Extra Bits 0+0 -#define ONE_TOKEN 1 // 1 Extra Bits 0+1 -#define TWO_TOKEN 2 // 2 Extra Bits 0+1 -#define THREE_TOKEN 3 // 3 Extra Bits 0+1 -#define FOUR_TOKEN 4 // 4 Extra Bits 0+1 -#define CATEGORY1_TOKEN 5 // 5-6 Extra Bits 1+1 -#define CATEGORY2_TOKEN 6 // 7-10 Extra Bits 2+1 -#define CATEGORY3_TOKEN 7 // 11-18 Extra Bits 3+1 -#define CATEGORY4_TOKEN 8 // 19-34 Extra Bits 4+1 -#define CATEGORY5_TOKEN 9 // 35-66 Extra Bits 5+1 -#define CATEGORY6_TOKEN 10 // 67+ Extra Bits 14+1 -#define EOB_TOKEN 11 // EOB Extra Bits 0+0 - -#define ENTROPY_TOKENS 12 - -#define ENTROPY_NODES 11 - -DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]); - -#define CAT1_MIN_VAL 5 -#define CAT2_MIN_VAL 7 -#define CAT3_MIN_VAL 11 -#define CAT4_MIN_VAL 19 -#define CAT5_MIN_VAL 35 -#define CAT6_MIN_VAL 67 - -// Extra bit probabilities. -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]); - -#if CONFIG_VP9_HIGHBITDEPTH -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#define EOB_MODEL_TOKEN 3 - -typedef struct { - const vpx_tree_index *tree; - const vpx_prob *prob; - int len; - int base_val; - const int16_t *cost; -} vp10_extra_bit; - -// indexed by token value -extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS]; -#if CONFIG_VP9_HIGHBITDEPTH -extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS]; -extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS]; -#endif // CONFIG_VP9_HIGHBITDEPTH - -#define DCT_MAX_VALUE 16384 -#if CONFIG_VP9_HIGHBITDEPTH -#define DCT_MAX_VALUE_HIGH10 65536 -#define DCT_MAX_VALUE_HIGH12 262144 -#endif // CONFIG_VP9_HIGHBITDEPTH - -/* Coefficients are predicted via a 3-dimensional probability table. */ - -#define REF_TYPES 2 // intra=0, inter=1 - -/* Middle dimension reflects the coefficient position within the transform. */ -#define COEF_BANDS 6 - -/* Inside dimension is measure of nearby complexity, that reflects the energy - of nearby coefficients are nonzero. For the first coefficient (DC, unless - block type is 0), we look at the (already encoded) blocks above and to the - left of the current block. The context index is then the number (0,1,or 2) - of these blocks having nonzero coefficients. - After decoding a coefficient, the measure is determined by the size of the - most recently decoded coefficient. - Note that the intuitive meaning of this measure changes as coefficients - are decoded, e.g., prior to the first token, a zero means that my neighbors - are empty while, after the first token, because of the use of end-of-block, - a zero means we just decoded a zero and hence guarantees that a non-zero - coefficient will appear later in this block. However, this shift - in meaning is perfectly OK because our context depends also on the - coefficient band (and since zigzag positions 0, 1, and 2 are in - distinct bands). */ - -#define COEFF_CONTEXTS 6 -#define BAND_COEFF_CONTEXTS(band) ((band) == 0 ? 3 : COEFF_CONTEXTS) - -// #define ENTROPY_STATS - -typedef unsigned int vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] - [ENTROPY_TOKENS]; -typedef unsigned int vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] - [ENTROPY_NODES][2]; - -#define SUBEXP_PARAM 4 /* Subexponential code parameter */ -#define MODULUS_PARAM 13 /* Modulus parameter */ - -struct VP10Common; -void vp10_default_coef_probs(struct VP10Common *cm); -void vp10_adapt_coef_probs(struct VP10Common *cm); - -// This is the index in the scan order beyond which all coefficients for -// 8x8 transform and above are in the top band. -// This macro is currently unused but may be used by certain implementations -#define MAXBAND_INDEX 21 - -DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]); -DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]); - -static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) { - return tx_size == TX_4X4 ? vp10_coefband_trans_4x4 - : vp10_coefband_trans_8x8plus; -} - -// 128 lists of probabilities are stored for the following ONE node probs: -// 1, 3, 5, 7, ..., 253, 255 -// In between probabilities are interpolated linearly - -#define COEFF_PROB_MODELS 255 - -#define UNCONSTRAINED_NODES 3 - -#define PIVOT_NODE 2 // which node is pivot - -#define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES) -extern const vpx_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)]; -extern const vpx_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES]; - -typedef vpx_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS] - [COEFF_CONTEXTS][UNCONSTRAINED_NODES]; - -typedef unsigned int vp10_coeff_count_model[REF_TYPES][COEF_BANDS] - [COEFF_CONTEXTS] - [UNCONSTRAINED_NODES + 1]; - -void vp10_model_to_full_probs(const vpx_prob *model, vpx_prob *full); - -typedef char ENTROPY_CONTEXT; - -static INLINE int combine_entropy_contexts(ENTROPY_CONTEXT a, - ENTROPY_CONTEXT b) { - return (a != 0) + (b != 0); -} - -static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, - const ENTROPY_CONTEXT *l) { - ENTROPY_CONTEXT above_ec = 0, left_ec = 0; - - switch (tx_size) { - case TX_4X4: - above_ec = a[0] != 0; - left_ec = l[0] != 0; - break; - case TX_8X8: - above_ec = !!*(const uint16_t *)a; - left_ec = !!*(const uint16_t *)l; - break; - case TX_16X16: - above_ec = !!*(const uint32_t *)a; - left_ec = !!*(const uint32_t *)l; - break; - case TX_32X32: - above_ec = !!*(const uint64_t *)a; - left_ec = !!*(const uint64_t *)l; - break; - default: - assert(0 && "Invalid transform size."); - break; - } - - return combine_entropy_contexts(above_ec, left_ec); -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ENTROPY_H_ diff --git a/libs/libvpx/vp10/common/entropymode.c b/libs/libvpx/vp10/common/entropymode.c deleted file mode 100644 index 78f3650f8b..0000000000 --- a/libs/libvpx/vp10/common/entropymode.c +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_mem/vpx_mem.h" - -#include "vp10/common/onyxc_int.h" -#include "vp10/common/seg_common.h" - -const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = { - { // above = dc - { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc - { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v - { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, // left = h - { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, // left = d45 - { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, // left = d135 - { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, // left = d117 - { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, // left = d153 - { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, // left = d207 - { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, // left = d63 - { 59, 67, 44, 140, 161, 202, 78, 67, 119 } // left = tm - }, { // above = v - { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, // left = dc - { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, // left = v - { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, // left = h - { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, // left = d45 - { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, // left = d135 - { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, // left = d117 - { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, // left = d153 - { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, // left = d207 - { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, // left = d63 - { 36, 61, 116, 114, 128, 162, 80, 125, 82 } // left = tm - }, { // above = h - { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, // left = dc - { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, // left = v - { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, // left = h - { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, // left = d45 - { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, // left = d135 - { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, // left = d117 - { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, // left = d153 - { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, // left = d207 - { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, // left = d63 - { 38, 72, 19, 168, 203, 212, 50, 50, 107 } // left = tm - }, { // above = d45 - { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, // left = dc - { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, // left = v - { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, // left = h - { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, // left = d45 - { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, // left = d135 - { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, // left = d117 - { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, // left = d153 - { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, // left = d207 - { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, // left = d63 - { 47, 47, 43, 114, 137, 181, 100, 99, 95 } // left = tm - }, { // above = d135 - { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, // left = dc - { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, // left = v - { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, // left = h - { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, // left = d45 - { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, // left = d135 - { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, // left = d117 - { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, // left = d153 - { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, // left = d207 - { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, // left = d63 - { 47, 54, 34, 146, 108, 203, 72, 103, 151 } // left = tm - }, { // above = d117 - { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, // left = dc - { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, // left = v - { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, // left = h - { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, // left = d45 - { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, // left = d135 - { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, // left = d117 - { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, // left = d153 - { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, // left = d207 - { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, // left = d63 - { 38, 44, 51, 136, 74, 162, 57, 97, 121 } // left = tm - }, { // above = d153 - { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, // left = dc - { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, // left = v - { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, // left = h - { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, // left = d45 - { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, // left = d135 - { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, // left = d117 - { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, // left = d153 - { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, // left = d207 - { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, // left = d63 - { 37, 49, 25, 129, 168, 164, 41, 54, 148 } // left = tm - }, { // above = d207 - { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, // left = dc - { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, // left = v - { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, // left = h - { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, // left = d45 - { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, // left = d135 - { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, // left = d117 - { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, // left = d153 - { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, // left = d207 - { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, // left = d63 - { 40, 61, 26, 126, 152, 206, 61, 59, 93 } // left = tm - }, { // above = d63 - { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, // left = dc - { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, // left = v - { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, // left = h - { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, // left = d45 - { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, // left = d135 - { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, // left = d117 - { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, // left = d153 - { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, // left = d207 - { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, // left = d63 - { 36, 38, 48, 92, 122, 165, 88, 137, 91 } // left = tm - }, { // above = tm - { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, // left = dc - { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, // left = v - { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, // left = h - { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, // left = d45 - { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, // left = d135 - { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, // left = d117 - { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, // left = d153 - { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, // left = d207 - { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63 - { 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm - } -}; - -#if !CONFIG_MISC_FIXES -const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = { - { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc - { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v - { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h - { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, // y = d45 - { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, // y = d135 - { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, // y = d117 - { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, // y = d153 - { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, // y = d207 - { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, // y = d63 - { 102, 19, 66, 162, 182, 122, 35, 59, 128 } // y = tm -}; -#endif - -static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = { - { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8 - { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16 - { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32 - { 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32 -}; - -static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = { - { 120, 7, 76, 176, 208, 126, 28, 54, 103 }, // y = dc - { 48, 12, 154, 155, 139, 90, 34, 117, 119 }, // y = v - { 67, 6, 25, 204, 243, 158, 13, 21, 96 }, // y = h - { 97, 5, 44, 131, 176, 139, 48, 68, 97 }, // y = d45 - { 83, 5, 42, 156, 111, 152, 26, 49, 152 }, // y = d135 - { 80, 5, 58, 178, 74, 83, 33, 62, 145 }, // y = d117 - { 86, 5, 32, 154, 192, 168, 14, 22, 163 }, // y = d153 - { 85, 5, 32, 156, 216, 148, 19, 29, 73 }, // y = d207 - { 77, 7, 64, 116, 132, 122, 37, 126, 120 }, // y = d63 - { 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm -}; - -#if !CONFIG_MISC_FIXES -const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS] - [PARTITION_TYPES - 1] = { - // 8x8 -> 4x4 - { 158, 97, 94 }, // a/l both not split - { 93, 24, 99 }, // a split, l not split - { 85, 119, 44 }, // l split, a not split - { 62, 59, 67 }, // a/l both split - // 16x16 -> 8x8 - { 149, 53, 53 }, // a/l both not split - { 94, 20, 48 }, // a split, l not split - { 83, 53, 24 }, // l split, a not split - { 52, 18, 18 }, // a/l both split - // 32x32 -> 16x16 - { 150, 40, 39 }, // a/l both not split - { 78, 12, 26 }, // a split, l not split - { 67, 33, 11 }, // l split, a not split - { 24, 7, 5 }, // a/l both split - // 64x64 -> 32x32 - { 174, 35, 49 }, // a/l both not split - { 68, 11, 27 }, // a split, l not split - { 57, 15, 9 }, // l split, a not split - { 12, 3, 3 }, // a/l both split -}; -#endif - -static const vpx_prob default_partition_probs[PARTITION_CONTEXTS] - [PARTITION_TYPES - 1] = { - // 8x8 -> 4x4 - { 199, 122, 141 }, // a/l both not split - { 147, 63, 159 }, // a split, l not split - { 148, 133, 118 }, // l split, a not split - { 121, 104, 114 }, // a/l both split - // 16x16 -> 8x8 - { 174, 73, 87 }, // a/l both not split - { 92, 41, 83 }, // a split, l not split - { 82, 99, 50 }, // l split, a not split - { 53, 39, 39 }, // a/l both split - // 32x32 -> 16x16 - { 177, 58, 59 }, // a/l both not split - { 68, 26, 63 }, // a split, l not split - { 52, 79, 25 }, // l split, a not split - { 17, 14, 12 }, // a/l both split - // 64x64 -> 32x32 - { 222, 34, 30 }, // a/l both not split - { 72, 16, 44 }, // a split, l not split - { 58, 32, 12 }, // l split, a not split - { 10, 7, 6 }, // a/l both split -}; - -static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS] - [INTER_MODES - 1] = { - {2, 173, 34}, // 0 = both zero mv - {7, 145, 85}, // 1 = one zero mv + one a predicted mv - {7, 166, 63}, // 2 = two predicted mvs - {7, 94, 66}, // 3 = one predicted/zero and one new mv - {8, 64, 46}, // 4 = two new mvs - {17, 81, 31}, // 5 = one intra neighbour + x - {25, 29, 30}, // 6 = two intra neighbours -}; - -/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */ -const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = { - -DC_PRED, 2, /* 0 = DC_NODE */ - -TM_PRED, 4, /* 1 = TM_NODE */ - -V_PRED, 6, /* 2 = V_NODE */ - 8, 12, /* 3 = COM_NODE */ - -H_PRED, 10, /* 4 = H_NODE */ - -D135_PRED, -D117_PRED, /* 5 = D135_NODE */ - -D45_PRED, 14, /* 6 = D45_NODE */ - -D63_PRED, 16, /* 7 = D63_NODE */ - -D153_PRED, -D207_PRED /* 8 = D153_NODE */ -}; - -const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = { - -INTER_OFFSET(ZEROMV), 2, - -INTER_OFFSET(NEARESTMV), 4, - -INTER_OFFSET(NEARMV), -INTER_OFFSET(NEWMV) -}; - -const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = { - -PARTITION_NONE, 2, - -PARTITION_HORZ, 4, - -PARTITION_VERT, -PARTITION_SPLIT -}; - -static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = { - 9, 102, 187, 225 -}; - -static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = { - 239, 183, 119, 96, 41 -}; - -static const vpx_prob default_comp_ref_p[REF_CONTEXTS] = { - 50, 126, 123, 221, 226 -}; - -static const vpx_prob default_single_ref_p[REF_CONTEXTS][2] = { - { 33, 16 }, - { 77, 74 }, - { 142, 142 }, - { 172, 170 }, - { 238, 247 } -}; - -static const struct tx_probs default_tx_probs = { - { { 3, 136, 37 }, - { 5, 52, 13 } }, - - { { 20, 152 }, - { 15, 101 } }, - - { { 100 }, - { 66 } } -}; - -void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p, - unsigned int (*ct_32x32p)[2]) { - ct_32x32p[0][0] = tx_count_32x32p[TX_4X4]; - ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + - tx_count_32x32p[TX_16X16] + - tx_count_32x32p[TX_32X32]; - ct_32x32p[1][0] = tx_count_32x32p[TX_8X8]; - ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] + - tx_count_32x32p[TX_32X32]; - ct_32x32p[2][0] = tx_count_32x32p[TX_16X16]; - ct_32x32p[2][1] = tx_count_32x32p[TX_32X32]; -} - -void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p, - unsigned int (*ct_16x16p)[2]) { - ct_16x16p[0][0] = tx_count_16x16p[TX_4X4]; - ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] + tx_count_16x16p[TX_16X16]; - ct_16x16p[1][0] = tx_count_16x16p[TX_8X8]; - ct_16x16p[1][1] = tx_count_16x16p[TX_16X16]; -} - -void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p, - unsigned int (*ct_8x8p)[2]) { - ct_8x8p[0][0] = tx_count_8x8p[TX_4X4]; - ct_8x8p[0][1] = tx_count_8x8p[TX_8X8]; -} - -static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { - 192, 128, 64 -}; - -static const vpx_prob default_switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS - 1] = { - { 235, 162, }, - { 36, 255, }, - { 34, 3, }, - { 149, 144, }, -}; - -#if CONFIG_MISC_FIXES -// FIXME(someone) need real defaults here -static const struct segmentation_probs default_seg_probs = { - { 128, 128, 128, 128, 128, 128, 128 }, - { 128, 128, 128 }, -}; -#endif - -const vpx_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = { - -DCT_DCT, 2, - -ADST_ADST, 4, - -ADST_DCT, -DCT_ADST -}; - -static const vpx_prob default_intra_ext_tx_prob[EXT_TX_SIZES] - [TX_TYPES][TX_TYPES - 1] = { - {{240, 85, 128}, {4, 1, 248}, {4, 1, 8}, {4, 248, 128}}, - {{244, 85, 128}, {8, 2, 248}, {8, 2, 8}, {8, 248, 128}}, - {{248, 85, 128}, {16, 4, 248}, {16, 4, 8}, {16, 248, 128}}, -}; - -static const vpx_prob default_inter_ext_tx_prob[EXT_TX_SIZES] - [TX_TYPES - 1] = { - {160, 85, 128}, - {176, 85, 128}, - {192, 85, 128}, -}; - -static void init_mode_probs(FRAME_CONTEXT *fc) { - vp10_copy(fc->uv_mode_prob, default_uv_probs); - vp10_copy(fc->y_mode_prob, default_if_y_probs); - vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob); - vp10_copy(fc->partition_prob, default_partition_probs); - vp10_copy(fc->intra_inter_prob, default_intra_inter_p); - vp10_copy(fc->comp_inter_prob, default_comp_inter_p); - vp10_copy(fc->comp_ref_prob, default_comp_ref_p); - vp10_copy(fc->single_ref_prob, default_single_ref_p); - fc->tx_probs = default_tx_probs; - vp10_copy(fc->skip_probs, default_skip_probs); - vp10_copy(fc->inter_mode_probs, default_inter_mode_probs); -#if CONFIG_MISC_FIXES - vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs); - vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs); -#endif - vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob); - vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob); -} - -const vpx_tree_index vp10_switchable_interp_tree - [TREE_SIZE(SWITCHABLE_FILTERS)] = { - -EIGHTTAP, 2, - -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP -}; - -void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) { - int i, j; - FRAME_CONTEXT *fc = cm->fc; - const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx]; - const FRAME_COUNTS *counts = &cm->counts; - - for (i = 0; i < INTRA_INTER_CONTEXTS; i++) - fc->intra_inter_prob[i] = mode_mv_merge_probs(pre_fc->intra_inter_prob[i], - counts->intra_inter[i]); - for (i = 0; i < COMP_INTER_CONTEXTS; i++) - fc->comp_inter_prob[i] = mode_mv_merge_probs(pre_fc->comp_inter_prob[i], - counts->comp_inter[i]); - for (i = 0; i < REF_CONTEXTS; i++) - fc->comp_ref_prob[i] = mode_mv_merge_probs(pre_fc->comp_ref_prob[i], - counts->comp_ref[i]); - for (i = 0; i < REF_CONTEXTS; i++) - for (j = 0; j < 2; j++) - fc->single_ref_prob[i][j] = mode_mv_merge_probs( - pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]); - - for (i = 0; i < INTER_MODE_CONTEXTS; i++) - vpx_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i], - counts->inter_mode[i], fc->inter_mode_probs[i]); - - for (i = 0; i < BLOCK_SIZE_GROUPS; i++) - vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i], - counts->y_mode[i], fc->y_mode_prob[i]); - -#if !CONFIG_MISC_FIXES - for (i = 0; i < INTRA_MODES; ++i) - vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i], - counts->uv_mode[i], fc->uv_mode_prob[i]); - - for (i = 0; i < PARTITION_CONTEXTS; i++) - vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i], - counts->partition[i], fc->partition_prob[i]); -#endif - - if (cm->interp_filter == SWITCHABLE) { - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - vpx_tree_merge_probs(vp10_switchable_interp_tree, - pre_fc->switchable_interp_prob[i], - counts->switchable_interp[i], - fc->switchable_interp_prob[i]); - } -} - -void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) { - int i; - FRAME_CONTEXT *fc = cm->fc; - const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx]; - const FRAME_COUNTS *counts = &cm->counts; - - if (cm->tx_mode == TX_MODE_SELECT) { - int j; - unsigned int branch_ct_8x8p[TX_SIZES - 3][2]; - unsigned int branch_ct_16x16p[TX_SIZES - 2][2]; - unsigned int branch_ct_32x32p[TX_SIZES - 1][2]; - - for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { - vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p); - for (j = 0; j < TX_SIZES - 3; ++j) - fc->tx_probs.p8x8[i][j] = mode_mv_merge_probs( - pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]); - - vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p); - for (j = 0; j < TX_SIZES - 2; ++j) - fc->tx_probs.p16x16[i][j] = mode_mv_merge_probs( - pre_fc->tx_probs.p16x16[i][j], branch_ct_16x16p[j]); - - vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], branch_ct_32x32p); - for (j = 0; j < TX_SIZES - 1; ++j) - fc->tx_probs.p32x32[i][j] = mode_mv_merge_probs( - pre_fc->tx_probs.p32x32[i][j], branch_ct_32x32p[j]); - } - } - - for (i = 0; i < SKIP_CONTEXTS; ++i) - fc->skip_probs[i] = mode_mv_merge_probs( - pre_fc->skip_probs[i], counts->skip[i]); - - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - int j; - for (j = 0; j < TX_TYPES; ++j) - vpx_tree_merge_probs(vp10_ext_tx_tree, - pre_fc->intra_ext_tx_prob[i][j], - counts->intra_ext_tx[i][j], - fc->intra_ext_tx_prob[i][j]); - } - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - vpx_tree_merge_probs(vp10_ext_tx_tree, - pre_fc->inter_ext_tx_prob[i], - counts->inter_ext_tx[i], - fc->inter_ext_tx_prob[i]); - } - -#if CONFIG_MISC_FIXES - if (cm->seg.temporal_update) { - for (i = 0; i < PREDICTION_PROBS; i++) - fc->seg.pred_probs[i] = mode_mv_merge_probs(pre_fc->seg.pred_probs[i], - counts->seg.pred[i]); - - vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs, - counts->seg.tree_mispred, fc->seg.tree_probs); - } else { - vpx_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs, - counts->seg.tree_total, fc->seg.tree_probs); - } - - for (i = 0; i < INTRA_MODES; ++i) - vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i], - counts->uv_mode[i], fc->uv_mode_prob[i]); - - for (i = 0; i < PARTITION_CONTEXTS; i++) - vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i], - counts->partition[i], fc->partition_prob[i]); -#endif -} - -static void set_default_lf_deltas(struct loopfilter *lf) { - lf->mode_ref_delta_enabled = 1; - lf->mode_ref_delta_update = 1; - - lf->ref_deltas[INTRA_FRAME] = 1; - lf->ref_deltas[LAST_FRAME] = 0; - lf->ref_deltas[GOLDEN_FRAME] = -1; - lf->ref_deltas[ALTREF_FRAME] = -1; - - lf->mode_deltas[0] = 0; - lf->mode_deltas[1] = 0; -} - -void vp10_setup_past_independence(VP10_COMMON *cm) { - // Reset the segment feature data to the default stats: - // Features disabled, 0, with delta coding (Default state). - struct loopfilter *const lf = &cm->lf; - - int i; - vp10_clearall_segfeatures(&cm->seg); - cm->seg.abs_delta = SEGMENT_DELTADATA; - - if (cm->last_frame_seg_map && !cm->frame_parallel_decode) - memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols)); - - if (cm->current_frame_seg_map) - memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols)); - - // Reset the mode ref deltas for loop filter - vp10_zero(lf->last_ref_deltas); - vp10_zero(lf->last_mode_deltas); - set_default_lf_deltas(lf); - - // To force update of the sharpness - lf->last_sharpness_level = -1; - - vp10_default_coef_probs(cm); - init_mode_probs(cm->fc); - vp10_init_mv_probs(cm); - cm->fc->initialized = 1; - - if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || - cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) { - // Reset all frame contexts. - for (i = 0; i < FRAME_CONTEXTS; ++i) - cm->frame_contexts[i] = *cm->fc; - } else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) { - // Reset only the frame context specified in the frame header. - cm->frame_contexts[cm->frame_context_idx] = *cm->fc; - } - - // prev_mip will only be allocated in encoder. - if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode) - memset(cm->prev_mip, 0, - cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->prev_mip)); - - cm->frame_context_idx = 0; -} diff --git a/libs/libvpx/vp10/common/entropymode.h b/libs/libvpx/vp10/common/entropymode.h deleted file mode 100644 index 611d3ad132..0000000000 --- a/libs/libvpx/vp10/common/entropymode.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_ENTROPYMODE_H_ -#define VP10_COMMON_ENTROPYMODE_H_ - -#include "vp10/common/entropy.h" -#include "vp10/common/entropymv.h" -#include "vp10/common/filter.h" -#include "vp10/common/seg_common.h" -#include "vpx_dsp/vpx_filter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define BLOCK_SIZE_GROUPS 4 - -#define TX_SIZE_CONTEXTS 2 - -#define INTER_OFFSET(mode) ((mode) - NEARESTMV) - -struct VP10Common; - -struct tx_probs { - vpx_prob p32x32[TX_SIZE_CONTEXTS][TX_SIZES - 1]; - vpx_prob p16x16[TX_SIZE_CONTEXTS][TX_SIZES - 2]; - vpx_prob p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 3]; -}; - -struct tx_counts { - unsigned int p32x32[TX_SIZE_CONTEXTS][TX_SIZES]; - unsigned int p16x16[TX_SIZE_CONTEXTS][TX_SIZES - 1]; - unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 2]; - unsigned int tx_totals[TX_SIZES]; -}; - -struct seg_counts { - unsigned int tree_total[MAX_SEGMENTS]; - unsigned int tree_mispred[MAX_SEGMENTS]; - unsigned int pred[PREDICTION_PROBS][2]; -}; - -typedef struct frame_contexts { - vpx_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1]; - vpx_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; - vpx_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1]; - vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES]; - vpx_prob switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS - 1]; - vpx_prob inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1]; - vpx_prob intra_inter_prob[INTRA_INTER_CONTEXTS]; - vpx_prob comp_inter_prob[COMP_INTER_CONTEXTS]; - vpx_prob single_ref_prob[REF_CONTEXTS][2]; - vpx_prob comp_ref_prob[REF_CONTEXTS]; - struct tx_probs tx_probs; - vpx_prob skip_probs[SKIP_CONTEXTS]; - nmv_context nmvc; -#if CONFIG_MISC_FIXES - struct segmentation_probs seg; -#endif - vpx_prob intra_ext_tx_prob[EXT_TX_SIZES][TX_TYPES][TX_TYPES - 1]; - vpx_prob inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1]; - int initialized; -} FRAME_CONTEXT; - -typedef struct FRAME_COUNTS { - unsigned int kf_y_mode[INTRA_MODES][INTRA_MODES][INTRA_MODES]; - unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES]; - unsigned int uv_mode[INTRA_MODES][INTRA_MODES]; - unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES]; - vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES]; - unsigned int eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES] - [COEF_BANDS][COEFF_CONTEXTS]; - unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS] - [SWITCHABLE_FILTERS]; - unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES]; - unsigned int intra_inter[INTRA_INTER_CONTEXTS][2]; - unsigned int comp_inter[COMP_INTER_CONTEXTS][2]; - unsigned int single_ref[REF_CONTEXTS][2][2]; - unsigned int comp_ref[REF_CONTEXTS][2]; - struct tx_counts tx; - unsigned int skip[SKIP_CONTEXTS][2]; - nmv_context_counts mv; -#if CONFIG_MISC_FIXES - struct seg_counts seg; -#endif - unsigned int intra_ext_tx[EXT_TX_SIZES][TX_TYPES][TX_TYPES]; - unsigned int inter_ext_tx[EXT_TX_SIZES][TX_TYPES]; -} FRAME_COUNTS; - -extern const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES] - [INTRA_MODES - 1]; -#if !CONFIG_MISC_FIXES -extern const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1]; -extern const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS] - [PARTITION_TYPES - 1]; -#endif - -extern const vpx_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)]; -extern const vpx_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)]; -extern const vpx_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)]; -extern const vpx_tree_index vp10_switchable_interp_tree - [TREE_SIZE(SWITCHABLE_FILTERS)]; - - -void vp10_setup_past_independence(struct VP10Common *cm); - -void vp10_adapt_intra_frame_probs(struct VP10Common *cm); -void vp10_adapt_inter_frame_probs(struct VP10Common *cm); - -void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p, - unsigned int (*ct_32x32p)[2]); -void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p, - unsigned int (*ct_16x16p)[2]); -void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p, - unsigned int (*ct_8x8p)[2]); - -extern const vpx_tree_index - vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)]; - -static INLINE int vp10_ceil_log2(int n) { - int i = 1, p = 2; - while (p < n) { - i++; - p = p << 1; - } - return i; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ENTROPYMODE_H_ diff --git a/libs/libvpx/vp10/common/entropymv.c b/libs/libvpx/vp10/common/entropymv.c deleted file mode 100644 index a9946ee152..0000000000 --- a/libs/libvpx/vp10/common/entropymv.c +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/onyxc_int.h" -#include "vp10/common/entropymv.h" - -// Integer pel reference mv threshold for use of high-precision 1/8 mv -#define COMPANDED_MVREF_THRESH 8 - -const vpx_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = { - -MV_JOINT_ZERO, 2, - -MV_JOINT_HNZVZ, 4, - -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ -}; - -const vpx_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = { - -MV_CLASS_0, 2, - -MV_CLASS_1, 4, - 6, 8, - -MV_CLASS_2, -MV_CLASS_3, - 10, 12, - -MV_CLASS_4, -MV_CLASS_5, - -MV_CLASS_6, 14, - 16, 18, - -MV_CLASS_7, -MV_CLASS_8, - -MV_CLASS_9, -MV_CLASS_10, -}; - -const vpx_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = { - -0, -1, -}; - -const vpx_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { - -0, 2, - -1, 4, - -2, -3 -}; - -static const nmv_context default_nmv_context = { - {32, 64, 96}, - { - { // Vertical component - 128, // sign - {224, 144, 192, 168, 192, 176, 192, 198, 198, 245}, // class - {216}, // class0 - {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, // bits - {{128, 128, 64}, {96, 112, 64}}, // class0_fp - {64, 96, 64}, // fp - 160, // class0_hp bit - 128, // hp - }, - { // Horizontal component - 128, // sign - {216, 128, 176, 160, 176, 176, 192, 198, 198, 208}, // class - {208}, // class0 - {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, // bits - {{128, 128, 64}, {96, 112, 64}}, // class0_fp - {64, 96, 64}, // fp - 160, // class0_hp bit - 128, // hp - } - }, -}; - -static const uint8_t log_in_base_2[] = { - 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10 -}; - -static INLINE int mv_class_base(MV_CLASS_TYPE c) { - return c ? CLASS0_SIZE << (c + 2) : 0; -} - -MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) { - const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096) ? - MV_CLASS_10 : (MV_CLASS_TYPE)log_in_base_2[z >> 3]; - if (offset) - *offset = z - mv_class_base(c); - return c; -} - -int vp10_use_mv_hp(const MV *ref) { -#if CONFIG_MISC_FIXES - (void) ref; - return 1; -#else - return (abs(ref->row) >> 3) < COMPANDED_MVREF_THRESH && - (abs(ref->col) >> 3) < COMPANDED_MVREF_THRESH; -#endif -} - -static void inc_mv_component(int v, nmv_component_counts *comp_counts, - int incr, int usehp) { - int s, z, c, o, d, e, f; - assert(v != 0); /* should not be zero */ - s = v < 0; - comp_counts->sign[s] += incr; - z = (s ? -v : v) - 1; /* magnitude - 1 */ - - c = vp10_get_mv_class(z, &o); - comp_counts->classes[c] += incr; - - d = (o >> 3); /* int mv data */ - f = (o >> 1) & 3; /* fractional pel mv data */ - e = (o & 1); /* high precision mv data */ - - if (c == MV_CLASS_0) { - comp_counts->class0[d] += incr; - comp_counts->class0_fp[d][f] += incr; - comp_counts->class0_hp[e] += usehp * incr; - } else { - int i; - int b = c + CLASS0_BITS - 1; // number of bits - for (i = 0; i < b; ++i) - comp_counts->bits[i][((d >> i) & 1)] += incr; - comp_counts->fp[f] += incr; - comp_counts->hp[e] += usehp * incr; - } -} - -void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) { - if (counts != NULL) { - const MV_JOINT_TYPE j = vp10_get_mv_joint(mv); - ++counts->joints[j]; - - if (mv_joint_vertical(j)) { - inc_mv_component(mv->row, &counts->comps[0], 1, - !CONFIG_MISC_FIXES || usehp); - } - - if (mv_joint_horizontal(j)) { - inc_mv_component(mv->col, &counts->comps[1], 1, - !CONFIG_MISC_FIXES || usehp); - } - } -} - -void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) { - int i, j; - - nmv_context *fc = &cm->fc->nmvc; - const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc; - const nmv_context_counts *counts = &cm->counts.mv; - - vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints, - fc->joints); - - for (i = 0; i < 2; ++i) { - nmv_component *comp = &fc->comps[i]; - const nmv_component *pre_comp = &pre_fc->comps[i]; - const nmv_component_counts *c = &counts->comps[i]; - - comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign); - vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes, - comp->classes); - vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0, - comp->class0); - - for (j = 0; j < MV_OFFSET_BITS; ++j) - comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]); - - for (j = 0; j < CLASS0_SIZE; ++j) - vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j], - c->class0_fp[j], comp->class0_fp[j]); - - vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp); - - if (allow_hp) { - comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp); - comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp); - } - } -} - -void vp10_init_mv_probs(VP10_COMMON *cm) { - cm->fc->nmvc = default_nmv_context; -} diff --git a/libs/libvpx/vp10/common/entropymv.h b/libs/libvpx/vp10/common/entropymv.h deleted file mode 100644 index d1eb95c579..0000000000 --- a/libs/libvpx/vp10/common/entropymv.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_COMMON_ENTROPYMV_H_ -#define VP10_COMMON_ENTROPYMV_H_ - -#include "./vpx_config.h" - -#include "vpx_dsp/prob.h" - -#include "vp10/common/mv.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Common; - -void vp10_init_mv_probs(struct VP10Common *cm); - -void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp); -int vp10_use_mv_hp(const MV *ref); - -#define MV_UPDATE_PROB 252 - -/* Symbols for coding which components are zero jointly */ -#define MV_JOINTS 4 -typedef enum { - MV_JOINT_ZERO = 0, /* Zero vector */ - MV_JOINT_HNZVZ = 1, /* Vert zero, hor nonzero */ - MV_JOINT_HZVNZ = 2, /* Hor zero, vert nonzero */ - MV_JOINT_HNZVNZ = 3, /* Both components nonzero */ -} MV_JOINT_TYPE; - -static INLINE int mv_joint_vertical(MV_JOINT_TYPE type) { - return type == MV_JOINT_HZVNZ || type == MV_JOINT_HNZVNZ; -} - -static INLINE int mv_joint_horizontal(MV_JOINT_TYPE type) { - return type == MV_JOINT_HNZVZ || type == MV_JOINT_HNZVNZ; -} - -/* Symbols for coding magnitude class of nonzero components */ -#define MV_CLASSES 11 -typedef enum { - MV_CLASS_0 = 0, /* (0, 2] integer pel */ - MV_CLASS_1 = 1, /* (2, 4] integer pel */ - MV_CLASS_2 = 2, /* (4, 8] integer pel */ - MV_CLASS_3 = 3, /* (8, 16] integer pel */ - MV_CLASS_4 = 4, /* (16, 32] integer pel */ - MV_CLASS_5 = 5, /* (32, 64] integer pel */ - MV_CLASS_6 = 6, /* (64, 128] integer pel */ - MV_CLASS_7 = 7, /* (128, 256] integer pel */ - MV_CLASS_8 = 8, /* (256, 512] integer pel */ - MV_CLASS_9 = 9, /* (512, 1024] integer pel */ - MV_CLASS_10 = 10, /* (1024,2048] integer pel */ -} MV_CLASS_TYPE; - -#define CLASS0_BITS 1 /* bits at integer precision for class 0 */ -#define CLASS0_SIZE (1 << CLASS0_BITS) -#define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2) -#define MV_FP_SIZE 4 - -#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2) -#define MV_MAX ((1 << MV_MAX_BITS) - 1) -#define MV_VALS ((MV_MAX << 1) + 1) - -#define MV_IN_USE_BITS 14 -#define MV_UPP ((1 << MV_IN_USE_BITS) - 1) -#define MV_LOW (-(1 << MV_IN_USE_BITS)) - -extern const vpx_tree_index vp10_mv_joint_tree[]; -extern const vpx_tree_index vp10_mv_class_tree[]; -extern const vpx_tree_index vp10_mv_class0_tree[]; -extern const vpx_tree_index vp10_mv_fp_tree[]; - -typedef struct { - vpx_prob sign; - vpx_prob classes[MV_CLASSES - 1]; - vpx_prob class0[CLASS0_SIZE - 1]; - vpx_prob bits[MV_OFFSET_BITS]; - vpx_prob class0_fp[CLASS0_SIZE][MV_FP_SIZE - 1]; - vpx_prob fp[MV_FP_SIZE - 1]; - vpx_prob class0_hp; - vpx_prob hp; -} nmv_component; - -typedef struct { - vpx_prob joints[MV_JOINTS - 1]; - nmv_component comps[2]; -} nmv_context; - -static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) { - if (mv->row == 0) { - return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ; - } else { - return mv->col == 0 ? MV_JOINT_HZVNZ : MV_JOINT_HNZVNZ; - } -} - -MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset); - -typedef struct { - unsigned int sign[2]; - unsigned int classes[MV_CLASSES]; - unsigned int class0[CLASS0_SIZE]; - unsigned int bits[MV_OFFSET_BITS][2]; - unsigned int class0_fp[CLASS0_SIZE][MV_FP_SIZE]; - unsigned int fp[MV_FP_SIZE]; - unsigned int class0_hp[2]; - unsigned int hp[2]; -} nmv_component_counts; - -typedef struct { - unsigned int joints[MV_JOINTS]; - nmv_component_counts comps[2]; -} nmv_context_counts; - -void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ENTROPYMV_H_ diff --git a/libs/libvpx/vp10/common/enums.h b/libs/libvpx/vp10/common/enums.h deleted file mode 100644 index 18c7d1629e..0000000000 --- a/libs/libvpx/vp10/common/enums.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_ENUMS_H_ -#define VP10_COMMON_ENUMS_H_ - -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MI_SIZE_LOG2 3 -#define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2) // 64 = 2^6 - -#define MI_SIZE (1 << MI_SIZE_LOG2) // pixels per mi-unit -#define MI_BLOCK_SIZE (1 << MI_BLOCK_SIZE_LOG2) // mi-units per max block - -#define MI_MASK (MI_BLOCK_SIZE - 1) - -// Bitstream profiles indicated by 2-3 bits in the uncompressed header. -// 00: Profile 0. 8-bit 4:2:0 only. -// 10: Profile 1. 8-bit 4:4:4, 4:2:2, and 4:4:0. -// 01: Profile 2. 10-bit and 12-bit color only, with 4:2:0 sampling. -// 110: Profile 3. 10-bit and 12-bit color only, with 4:2:2/4:4:4/4:4:0 -// sampling. -// 111: Undefined profile. -typedef enum BITSTREAM_PROFILE { - PROFILE_0, - PROFILE_1, - PROFILE_2, - PROFILE_3, - MAX_PROFILES -} BITSTREAM_PROFILE; - -#define BLOCK_4X4 0 -#define BLOCK_4X8 1 -#define BLOCK_8X4 2 -#define BLOCK_8X8 3 -#define BLOCK_8X16 4 -#define BLOCK_16X8 5 -#define BLOCK_16X16 6 -#define BLOCK_16X32 7 -#define BLOCK_32X16 8 -#define BLOCK_32X32 9 -#define BLOCK_32X64 10 -#define BLOCK_64X32 11 -#define BLOCK_64X64 12 -#define BLOCK_SIZES 13 -#define BLOCK_INVALID BLOCK_SIZES -typedef uint8_t BLOCK_SIZE; - -typedef enum PARTITION_TYPE { - PARTITION_NONE, - PARTITION_HORZ, - PARTITION_VERT, - PARTITION_SPLIT, - PARTITION_TYPES, - PARTITION_INVALID = PARTITION_TYPES -} PARTITION_TYPE; - -typedef char PARTITION_CONTEXT; -#define PARTITION_PLOFFSET 4 // number of probability models per block size -#define PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET) - -// block transform size -typedef uint8_t TX_SIZE; -#define TX_4X4 ((TX_SIZE)0) // 4x4 transform -#define TX_8X8 ((TX_SIZE)1) // 8x8 transform -#define TX_16X16 ((TX_SIZE)2) // 16x16 transform -#define TX_32X32 ((TX_SIZE)3) // 32x32 transform -#define TX_SIZES ((TX_SIZE)4) - -// frame transform mode -typedef enum { - ONLY_4X4 = 0, // only 4x4 transform used - ALLOW_8X8 = 1, // allow block transform size up to 8x8 - ALLOW_16X16 = 2, // allow block transform size up to 16x16 - ALLOW_32X32 = 3, // allow block transform size up to 32x32 - TX_MODE_SELECT = 4, // transform specified for each block - TX_MODES = 5, -} TX_MODE; - -typedef enum { - DCT_DCT = 0, // DCT in both horizontal and vertical - ADST_DCT = 1, // ADST in vertical, DCT in horizontal - DCT_ADST = 2, // DCT in vertical, ADST in horizontal - ADST_ADST = 3, // ADST in both directions - TX_TYPES = 4 -} TX_TYPE; - -#define EXT_TX_SIZES 3 // number of sizes that use extended transforms - -typedef enum { - VP9_LAST_FLAG = 1 << 0, - VP9_GOLD_FLAG = 1 << 1, - VP9_ALT_FLAG = 1 << 2, -} VP9_REFFRAME; - -typedef enum { - PLANE_TYPE_Y = 0, - PLANE_TYPE_UV = 1, - PLANE_TYPES -} PLANE_TYPE; - -#define DC_PRED 0 // Average of above and left pixels -#define V_PRED 1 // Vertical -#define H_PRED 2 // Horizontal -#define D45_PRED 3 // Directional 45 deg = round(arctan(1/1) * 180/pi) -#define D135_PRED 4 // Directional 135 deg = 180 - 45 -#define D117_PRED 5 // Directional 117 deg = 180 - 63 -#define D153_PRED 6 // Directional 153 deg = 180 - 27 -#define D207_PRED 7 // Directional 207 deg = 180 + 27 -#define D63_PRED 8 // Directional 63 deg = round(arctan(2/1) * 180/pi) -#define TM_PRED 9 // True-motion -#define NEARESTMV 10 -#define NEARMV 11 -#define ZEROMV 12 -#define NEWMV 13 -#define MB_MODE_COUNT 14 -typedef uint8_t PREDICTION_MODE; - -#define INTRA_MODES (TM_PRED + 1) - -#define INTER_MODES (1 + NEWMV - NEARESTMV) - -#define SKIP_CONTEXTS 3 -#define INTER_MODE_CONTEXTS 7 - -/* Segment Feature Masks */ -#define MAX_MV_REF_CANDIDATES 2 - -#define INTRA_INTER_CONTEXTS 4 -#define COMP_INTER_CONTEXTS 5 -#define REF_CONTEXTS 5 - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ENUMS_H_ diff --git a/libs/libvpx/vp10/common/filter.c b/libs/libvpx/vp10/common/filter.c deleted file mode 100644 index dda279f132..0000000000 --- a/libs/libvpx/vp10/common/filter.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/filter.h" - -DECLARE_ALIGNED(256, static const InterpKernel, - bilinear_filters[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0 }, - { 0, 0, 0, 120, 8, 0, 0, 0 }, - { 0, 0, 0, 112, 16, 0, 0, 0 }, - { 0, 0, 0, 104, 24, 0, 0, 0 }, - { 0, 0, 0, 96, 32, 0, 0, 0 }, - { 0, 0, 0, 88, 40, 0, 0, 0 }, - { 0, 0, 0, 80, 48, 0, 0, 0 }, - { 0, 0, 0, 72, 56, 0, 0, 0 }, - { 0, 0, 0, 64, 64, 0, 0, 0 }, - { 0, 0, 0, 56, 72, 0, 0, 0 }, - { 0, 0, 0, 48, 80, 0, 0, 0 }, - { 0, 0, 0, 40, 88, 0, 0, 0 }, - { 0, 0, 0, 32, 96, 0, 0, 0 }, - { 0, 0, 0, 24, 104, 0, 0, 0 }, - { 0, 0, 0, 16, 112, 0, 0, 0 }, - { 0, 0, 0, 8, 120, 0, 0, 0 } -}; - -// Lagrangian interpolation filter -DECLARE_ALIGNED(256, static const InterpKernel, - sub_pel_filters_8[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0}, - { 0, 1, -5, 126, 8, -3, 1, 0}, - { -1, 3, -10, 122, 18, -6, 2, 0}, - { -1, 4, -13, 118, 27, -9, 3, -1}, - { -1, 4, -16, 112, 37, -11, 4, -1}, - { -1, 5, -18, 105, 48, -14, 4, -1}, - { -1, 5, -19, 97, 58, -16, 5, -1}, - { -1, 6, -19, 88, 68, -18, 5, -1}, - { -1, 6, -19, 78, 78, -19, 6, -1}, - { -1, 5, -18, 68, 88, -19, 6, -1}, - { -1, 5, -16, 58, 97, -19, 5, -1}, - { -1, 4, -14, 48, 105, -18, 5, -1}, - { -1, 4, -11, 37, 112, -16, 4, -1}, - { -1, 3, -9, 27, 118, -13, 4, -1}, - { 0, 2, -6, 18, 122, -10, 3, -1}, - { 0, 1, -3, 8, 126, -5, 1, 0} -}; - -// DCT based filter -DECLARE_ALIGNED(256, static const InterpKernel, - sub_pel_filters_8s[SUBPEL_SHIFTS]) = { - {0, 0, 0, 128, 0, 0, 0, 0}, - {-1, 3, -7, 127, 8, -3, 1, 0}, - {-2, 5, -13, 125, 17, -6, 3, -1}, - {-3, 7, -17, 121, 27, -10, 5, -2}, - {-4, 9, -20, 115, 37, -13, 6, -2}, - {-4, 10, -23, 108, 48, -16, 8, -3}, - {-4, 10, -24, 100, 59, -19, 9, -3}, - {-4, 11, -24, 90, 70, -21, 10, -4}, - {-4, 11, -23, 80, 80, -23, 11, -4}, - {-4, 10, -21, 70, 90, -24, 11, -4}, - {-3, 9, -19, 59, 100, -24, 10, -4}, - {-3, 8, -16, 48, 108, -23, 10, -4}, - {-2, 6, -13, 37, 115, -20, 9, -4}, - {-2, 5, -10, 27, 121, -17, 7, -3}, - {-1, 3, -6, 17, 125, -13, 5, -2}, - {0, 1, -3, 8, 127, -7, 3, -1} -}; - -// freqmultiplier = 0.5 -DECLARE_ALIGNED(256, static const InterpKernel, - sub_pel_filters_8lp[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0}, - {-3, -1, 32, 64, 38, 1, -3, 0}, - {-2, -2, 29, 63, 41, 2, -3, 0}, - {-2, -2, 26, 63, 43, 4, -4, 0}, - {-2, -3, 24, 62, 46, 5, -4, 0}, - {-2, -3, 21, 60, 49, 7, -4, 0}, - {-1, -4, 18, 59, 51, 9, -4, 0}, - {-1, -4, 16, 57, 53, 12, -4, -1}, - {-1, -4, 14, 55, 55, 14, -4, -1}, - {-1, -4, 12, 53, 57, 16, -4, -1}, - { 0, -4, 9, 51, 59, 18, -4, -1}, - { 0, -4, 7, 49, 60, 21, -3, -2}, - { 0, -4, 5, 46, 62, 24, -3, -2}, - { 0, -4, 4, 43, 63, 26, -2, -2}, - { 0, -3, 2, 41, 63, 29, -2, -2}, - { 0, -3, 1, 38, 64, 32, -1, -3} -}; - - -const InterpKernel *vp10_filter_kernels[4] = { - sub_pel_filters_8, - sub_pel_filters_8lp, - sub_pel_filters_8s, - bilinear_filters -}; diff --git a/libs/libvpx/vp10/common/filter.h b/libs/libvpx/vp10/common/filter.h deleted file mode 100644 index 826cd0386e..0000000000 --- a/libs/libvpx/vp10/common/filter.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_FILTER_H_ -#define VP10_COMMON_FILTER_H_ - -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" -#include "vpx_dsp/vpx_filter.h" -#include "vpx_ports/mem.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -#define EIGHTTAP 0 -#define EIGHTTAP_SMOOTH 1 -#define EIGHTTAP_SHARP 2 -#define SWITCHABLE_FILTERS 3 /* Number of switchable filters */ -#define BILINEAR 3 -// The codec can operate in four possible inter prediction filter mode: -// 8-tap, 8-tap-smooth, 8-tap-sharp, and switching between the three. -#define SWITCHABLE_FILTER_CONTEXTS (SWITCHABLE_FILTERS + 1) -#define SWITCHABLE 4 /* should be the last one */ - -typedef uint8_t INTERP_FILTER; - -extern const InterpKernel *vp10_filter_kernels[4]; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_FILTER_H_ diff --git a/libs/libvpx/vp10/common/frame_buffers.c b/libs/libvpx/vp10/common/frame_buffers.c deleted file mode 100644 index 794c80fde3..0000000000 --- a/libs/libvpx/vp10/common/frame_buffers.c +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/frame_buffers.h" -#include "vpx_mem/vpx_mem.h" - -int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) { - assert(list != NULL); - vp10_free_internal_frame_buffers(list); - - list->num_internal_frame_buffers = - VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; - list->int_fb = - (InternalFrameBuffer *)vpx_calloc(list->num_internal_frame_buffers, - sizeof(*list->int_fb)); - return (list->int_fb == NULL); -} - -void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) { - int i; - - assert(list != NULL); - - for (i = 0; i < list->num_internal_frame_buffers; ++i) { - vpx_free(list->int_fb[i].data); - list->int_fb[i].data = NULL; - } - vpx_free(list->int_fb); - list->int_fb = NULL; -} - -int vp10_get_frame_buffer(void *cb_priv, size_t min_size, - vpx_codec_frame_buffer_t *fb) { - int i; - InternalFrameBufferList *const int_fb_list = - (InternalFrameBufferList *)cb_priv; - if (int_fb_list == NULL) - return -1; - - // Find a free frame buffer. - for (i = 0; i < int_fb_list->num_internal_frame_buffers; ++i) { - if (!int_fb_list->int_fb[i].in_use) - break; - } - - if (i == int_fb_list->num_internal_frame_buffers) - return -1; - - if (int_fb_list->int_fb[i].size < min_size) { - int_fb_list->int_fb[i].data = - (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size); - if (!int_fb_list->int_fb[i].data) - return -1; - - // This memset is needed for fixing valgrind error from C loop filter - // due to access uninitialized memory in frame border. It could be - // removed if border is totally removed. - memset(int_fb_list->int_fb[i].data, 0, min_size); - int_fb_list->int_fb[i].size = min_size; - } - - fb->data = int_fb_list->int_fb[i].data; - fb->size = int_fb_list->int_fb[i].size; - int_fb_list->int_fb[i].in_use = 1; - - // Set the frame buffer's private data to point at the internal frame buffer. - fb->priv = &int_fb_list->int_fb[i]; - return 0; -} - -int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) { - InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv; - (void)cb_priv; - if (int_fb) - int_fb->in_use = 0; - return 0; -} diff --git a/libs/libvpx/vp10/common/frame_buffers.h b/libs/libvpx/vp10/common/frame_buffers.h deleted file mode 100644 index 729ebafb02..0000000000 --- a/libs/libvpx/vp10/common/frame_buffers.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_FRAME_BUFFERS_H_ -#define VP10_COMMON_FRAME_BUFFERS_H_ - -#include "vpx/vpx_frame_buffer.h" -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct InternalFrameBuffer { - uint8_t *data; - size_t size; - int in_use; -} InternalFrameBuffer; - -typedef struct InternalFrameBufferList { - int num_internal_frame_buffers; - InternalFrameBuffer *int_fb; -} InternalFrameBufferList; - -// Initializes |list|. Returns 0 on success. -int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list); - -// Free any data allocated to the frame buffers. -void vp10_free_internal_frame_buffers(InternalFrameBufferList *list); - -// Callback used by libvpx to request an external frame buffer. |cb_priv| -// Callback private data, which points to an InternalFrameBufferList. -// |min_size| is the minimum size in bytes needed to decode the next frame. -// |fb| pointer to the frame buffer. -int vp10_get_frame_buffer(void *cb_priv, size_t min_size, - vpx_codec_frame_buffer_t *fb); - -// Callback used by libvpx when there are no references to the frame buffer. -// |cb_priv| is not used. |fb| pointer to the frame buffer. -int vp10_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_FRAME_BUFFERS_H_ diff --git a/libs/libvpx/vp10/common/idct.c b/libs/libvpx/vp10/common/idct.c deleted file mode 100644 index 5ee15c862d..0000000000 --- a/libs/libvpx/vp10/common/idct.c +++ /dev/null @@ -1,498 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "vp10/common/blockd.h" -#include "vp10/common/idct.h" -#include "vpx_dsp/inv_txfm.h" -#include "vpx_ports/mem.h" - -void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride, - int tx_type) { - const transform_2d IHT_4[] = { - { idct4_c, idct4_c }, // DCT_DCT = 0 - { iadst4_c, idct4_c }, // ADST_DCT = 1 - { idct4_c, iadst4_c }, // DCT_ADST = 2 - { iadst4_c, iadst4_c } // ADST_ADST = 3 - }; - - int i, j; - tran_low_t out[4 * 4]; - tran_low_t *outptr = out; - tran_low_t temp_in[4], temp_out[4]; - - // inverse transform row vectors - for (i = 0; i < 4; ++i) { - IHT_4[tx_type].rows(input, outptr); - input += 4; - outptr += 4; - } - - // inverse transform column vectors - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - IHT_4[tx_type].cols(temp_in, temp_out); - for (j = 0; j < 4; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 4)); - } - } -} - -static const transform_2d IHT_8[] = { - { idct8_c, idct8_c }, // DCT_DCT = 0 - { iadst8_c, idct8_c }, // ADST_DCT = 1 - { idct8_c, iadst8_c }, // DCT_ADST = 2 - { iadst8_c, iadst8_c } // ADST_ADST = 3 -}; - -void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride, - int tx_type) { - int i, j; - tran_low_t out[8 * 8]; - tran_low_t *outptr = out; - tran_low_t temp_in[8], temp_out[8]; - const transform_2d ht = IHT_8[tx_type]; - - // inverse transform row vectors - for (i = 0; i < 8; ++i) { - ht.rows(input, outptr); - input += 8; - outptr += 8; - } - - // inverse transform column vectors - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - ht.cols(temp_in, temp_out); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 5)); - } - } -} - -static const transform_2d IHT_16[] = { - { idct16_c, idct16_c }, // DCT_DCT = 0 - { iadst16_c, idct16_c }, // ADST_DCT = 1 - { idct16_c, iadst16_c }, // DCT_ADST = 2 - { iadst16_c, iadst16_c } // ADST_ADST = 3 -}; - -void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride, - int tx_type) { - int i, j; - tran_low_t out[16 * 16]; - tran_low_t *outptr = out; - tran_low_t temp_in[16], temp_out[16]; - const transform_2d ht = IHT_16[tx_type]; - - // Rows - for (i = 0; i < 16; ++i) { - ht.rows(input, outptr); - input += 16; - outptr += 16; - } - - // Columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - ht.cols(temp_in, temp_out); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6)); - } - } -} - -// idct -void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob) { - if (eob > 1) - vpx_idct4x4_16_add(input, dest, stride); - else - vpx_idct4x4_1_add(input, dest, stride); -} - - -void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob) { - if (eob > 1) - vpx_iwht4x4_16_add(input, dest, stride); - else - vpx_iwht4x4_1_add(input, dest, stride); -} - -void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob) { - // If dc is 1, then input[0] is the reconstructed value, do not need - // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1. - - // The calculation can be simplified if there are not many non-zero dct - // coefficients. Use eobs to decide what to do. - // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c. - // Combine that with code here. - if (eob == 1) - // DC only DCT coefficient - vpx_idct8x8_1_add(input, dest, stride); - else if (eob <= 12) - vpx_idct8x8_12_add(input, dest, stride); - else - vpx_idct8x8_64_add(input, dest, stride); -} - -void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob) { - /* The calculation can be simplified if there are not many non-zero dct - * coefficients. Use eobs to separate different cases. */ - if (eob == 1) - /* DC only DCT coefficient. */ - vpx_idct16x16_1_add(input, dest, stride); - else if (eob <= 10) - vpx_idct16x16_10_add(input, dest, stride); - else - vpx_idct16x16_256_add(input, dest, stride); -} - -void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob) { - if (eob == 1) - vpx_idct32x32_1_add(input, dest, stride); - else if (eob <= 34) - // non-zero coeff only in upper-left 8x8 - vpx_idct32x32_34_add(input, dest, stride); - else - vpx_idct32x32_1024_add(input, dest, stride); -} - -void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type, int lossless) { - if (lossless) { - assert(tx_type == DCT_DCT); - vp10_iwht4x4_add(input, dest, stride, eob); - } else { - switch (tx_type) { - case DCT_DCT: - vp10_idct4x4_add(input, dest, stride, eob); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_iht4x4_16_add(input, dest, stride, tx_type); - break; - default: - assert(0); - break; - } - } -} - -void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_idct8x8_add(input, dest, stride, eob); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_iht8x8_64_add(input, dest, stride, tx_type); - break; - default: - assert(0); - break; - } -} - -void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_idct16x16_add(input, dest, stride, eob); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_iht16x16_256_add(input, dest, stride, tx_type); - break; - default: - assert(0); - break; - } -} - -void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_idct32x32_add(input, dest, stride, eob); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - assert(0); - break; - default: - assert(0); - break; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int tx_type, int bd) { - const highbd_transform_2d IHT_4[] = { - { vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0 - { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // ADST_DCT = 1 - { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2 - { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3 - }; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - int i, j; - tran_low_t out[4 * 4]; - tran_low_t *outptr = out; - tran_low_t temp_in[4], temp_out[4]; - - // Inverse transform row vectors. - for (i = 0; i < 4; ++i) { - IHT_4[tx_type].rows(input, outptr, bd); - input += 4; - outptr += 4; - } - - // Inverse transform column vectors. - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - IHT_4[tx_type].cols(temp_in, temp_out, bd); - for (j = 0; j < 4; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); - } - } -} - -static const highbd_transform_2d HIGH_IHT_8[] = { - { vpx_highbd_idct8_c, vpx_highbd_idct8_c }, // DCT_DCT = 0 - { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // ADST_DCT = 1 - { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_ADST = 2 - { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3 -}; - -void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int tx_type, int bd) { - int i, j; - tran_low_t out[8 * 8]; - tran_low_t *outptr = out; - tran_low_t temp_in[8], temp_out[8]; - const highbd_transform_2d ht = HIGH_IHT_8[tx_type]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // Inverse transform row vectors. - for (i = 0; i < 8; ++i) { - ht.rows(input, outptr, bd); - input += 8; - outptr += 8; - } - - // Inverse transform column vectors. - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - ht.cols(temp_in, temp_out, bd); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); - } - } -} - -static const highbd_transform_2d HIGH_IHT_16[] = { - { vpx_highbd_idct16_c, vpx_highbd_idct16_c }, // DCT_DCT = 0 - { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // ADST_DCT = 1 - { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_ADST = 2 - { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3 -}; - -void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int tx_type, int bd) { - int i, j; - tran_low_t out[16 * 16]; - tran_low_t *outptr = out; - tran_low_t temp_in[16], temp_out[16]; - const highbd_transform_2d ht = HIGH_IHT_16[tx_type]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // Rows - for (i = 0; i < 16; ++i) { - ht.rows(input, outptr, bd); - input += 16; - outptr += 16; - } - - // Columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - ht.cols(temp_in, temp_out, bd); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } -} - -// idct -void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd) { - if (eob > 1) - vpx_highbd_idct4x4_16_add(input, dest, stride, bd); - else - vpx_highbd_idct4x4_1_add(input, dest, stride, bd); -} - - -void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd) { - if (eob > 1) - vpx_highbd_iwht4x4_16_add(input, dest, stride, bd); - else - vpx_highbd_iwht4x4_1_add(input, dest, stride, bd); -} - -void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd) { - // If dc is 1, then input[0] is the reconstructed value, do not need - // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1. - - // The calculation can be simplified if there are not many non-zero dct - // coefficients. Use eobs to decide what to do. - // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c. - // Combine that with code here. - // DC only DCT coefficient - if (eob == 1) { - vpx_highbd_idct8x8_1_add(input, dest, stride, bd); - } else if (eob <= 10) { - vpx_highbd_idct8x8_10_add(input, dest, stride, bd); - } else { - vpx_highbd_idct8x8_64_add(input, dest, stride, bd); - } -} - -void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd) { - // The calculation can be simplified if there are not many non-zero dct - // coefficients. Use eobs to separate different cases. - // DC only DCT coefficient. - if (eob == 1) { - vpx_highbd_idct16x16_1_add(input, dest, stride, bd); - } else if (eob <= 10) { - vpx_highbd_idct16x16_10_add(input, dest, stride, bd); - } else { - vpx_highbd_idct16x16_256_add(input, dest, stride, bd); - } -} - -void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd) { - // Non-zero coeff only in upper-left 8x8 - if (eob == 1) { - vpx_highbd_idct32x32_1_add(input, dest, stride, bd); - } else if (eob <= 34) { - vpx_highbd_idct32x32_34_add(input, dest, stride, bd); - } else { - vpx_highbd_idct32x32_1024_add(input, dest, stride, bd); - } -} - -void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, TX_TYPE tx_type, - int lossless) { - if (lossless) { - assert(tx_type == DCT_DCT); - vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd); - } else { - switch (tx_type) { - case DCT_DCT: - vp10_highbd_idct4x4_add(input, dest, stride, eob, bd); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd); - break; - default: - assert(0); - break; - } - } -} - -void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, - TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_highbd_idct8x8_add(input, dest, stride, eob, bd); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd); - break; - default: - assert(0); - break; - } -} - -void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, - TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_highbd_idct16x16_add(input, dest, stride, eob, bd); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd); - break; - default: - assert(0); - break; - } -} - -void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, - TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vp10_highbd_idct32x32_add(input, dest, stride, eob, bd); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - assert(0); - break; - default: - assert(0); - break; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/common/idct.h b/libs/libvpx/vp10/common/idct.h deleted file mode 100644 index 088339804d..0000000000 --- a/libs/libvpx/vp10/common/idct.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_IDCT_H_ -#define VP10_COMMON_IDCT_H_ - -#include - -#include "./vpx_config.h" -#include "vp10/common/common.h" -#include "vp10/common/enums.h" -#include "vpx_dsp/inv_txfm.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_ports/mem.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef void (*transform_1d)(const tran_low_t*, tran_low_t*); - -typedef struct { - transform_1d cols, rows; // vertical and horizontal -} transform_2d; - -#if CONFIG_VP9_HIGHBITDEPTH -typedef void (*highbd_transform_1d)(const tran_low_t*, tran_low_t*, int bd); - -typedef struct { - highbd_transform_1d cols, rows; // vertical and horizontal -} highbd_transform_2d; -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob); -void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob); - -void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type, int lossless); -void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type); -void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type); -void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, - int stride, int eob, TX_TYPE tx_type); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd); -void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd); -void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride, - int eob, int bd); -void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd); -void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd); -void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, TX_TYPE tx_type, - int lossless); -void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, TX_TYPE tx_type); -void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, - TX_TYPE tx_type); -void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, - int stride, int eob, int bd, - TX_TYPE tx_type); -#endif // CONFIG_VP9_HIGHBITDEPTH -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_IDCT_H_ diff --git a/libs/libvpx/vp10/common/loopfilter.c b/libs/libvpx/vp10/common/loopfilter.c deleted file mode 100644 index a1925de55d..0000000000 --- a/libs/libvpx/vp10/common/loopfilter.c +++ /dev/null @@ -1,1656 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "vp10/common/loopfilter.h" -#include "vp10/common/onyxc_int.h" -#include "vp10/common/reconinter.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/seg_common.h" - -// 64 bit masks for left transform size. Each 1 represents a position where -// we should apply a loop filter across the left border of an 8x8 block -// boundary. -// -// In the case of TX_16X16-> ( in low order byte first we end up with -// a mask that looks like this -// -// 10101010 -// 10101010 -// 10101010 -// 10101010 -// 10101010 -// 10101010 -// 10101010 -// 10101010 -// -// A loopfilter should be applied to every other 8x8 horizontally. -static const uint64_t left_64x64_txform_mask[TX_SIZES]= { - 0xffffffffffffffffULL, // TX_4X4 - 0xffffffffffffffffULL, // TX_8x8 - 0x5555555555555555ULL, // TX_16x16 - 0x1111111111111111ULL, // TX_32x32 -}; - -// 64 bit masks for above transform size. Each 1 represents a position where -// we should apply a loop filter across the top border of an 8x8 block -// boundary. -// -// In the case of TX_32x32 -> ( in low order byte first we end up with -// a mask that looks like this -// -// 11111111 -// 00000000 -// 00000000 -// 00000000 -// 11111111 -// 00000000 -// 00000000 -// 00000000 -// -// A loopfilter should be applied to every other 4 the row vertically. -static const uint64_t above_64x64_txform_mask[TX_SIZES]= { - 0xffffffffffffffffULL, // TX_4X4 - 0xffffffffffffffffULL, // TX_8x8 - 0x00ff00ff00ff00ffULL, // TX_16x16 - 0x000000ff000000ffULL, // TX_32x32 -}; - -// 64 bit masks for prediction sizes (left). Each 1 represents a position -// where left border of an 8x8 block. These are aligned to the right most -// appropriate bit, and then shifted into place. -// -// In the case of TX_16x32 -> ( low order byte first ) we end up with -// a mask that looks like this : -// -// 10000000 -// 10000000 -// 10000000 -// 10000000 -// 00000000 -// 00000000 -// 00000000 -// 00000000 -static const uint64_t left_prediction_mask[BLOCK_SIZES] = { - 0x0000000000000001ULL, // BLOCK_4X4, - 0x0000000000000001ULL, // BLOCK_4X8, - 0x0000000000000001ULL, // BLOCK_8X4, - 0x0000000000000001ULL, // BLOCK_8X8, - 0x0000000000000101ULL, // BLOCK_8X16, - 0x0000000000000001ULL, // BLOCK_16X8, - 0x0000000000000101ULL, // BLOCK_16X16, - 0x0000000001010101ULL, // BLOCK_16X32, - 0x0000000000000101ULL, // BLOCK_32X16, - 0x0000000001010101ULL, // BLOCK_32X32, - 0x0101010101010101ULL, // BLOCK_32X64, - 0x0000000001010101ULL, // BLOCK_64X32, - 0x0101010101010101ULL, // BLOCK_64X64 -}; - -// 64 bit mask to shift and set for each prediction size. -static const uint64_t above_prediction_mask[BLOCK_SIZES] = { - 0x0000000000000001ULL, // BLOCK_4X4 - 0x0000000000000001ULL, // BLOCK_4X8 - 0x0000000000000001ULL, // BLOCK_8X4 - 0x0000000000000001ULL, // BLOCK_8X8 - 0x0000000000000001ULL, // BLOCK_8X16, - 0x0000000000000003ULL, // BLOCK_16X8 - 0x0000000000000003ULL, // BLOCK_16X16 - 0x0000000000000003ULL, // BLOCK_16X32, - 0x000000000000000fULL, // BLOCK_32X16, - 0x000000000000000fULL, // BLOCK_32X32, - 0x000000000000000fULL, // BLOCK_32X64, - 0x00000000000000ffULL, // BLOCK_64X32, - 0x00000000000000ffULL, // BLOCK_64X64 -}; -// 64 bit mask to shift and set for each prediction size. A bit is set for -// each 8x8 block that would be in the left most block of the given block -// size in the 64x64 block. -static const uint64_t size_mask[BLOCK_SIZES] = { - 0x0000000000000001ULL, // BLOCK_4X4 - 0x0000000000000001ULL, // BLOCK_4X8 - 0x0000000000000001ULL, // BLOCK_8X4 - 0x0000000000000001ULL, // BLOCK_8X8 - 0x0000000000000101ULL, // BLOCK_8X16, - 0x0000000000000003ULL, // BLOCK_16X8 - 0x0000000000000303ULL, // BLOCK_16X16 - 0x0000000003030303ULL, // BLOCK_16X32, - 0x0000000000000f0fULL, // BLOCK_32X16, - 0x000000000f0f0f0fULL, // BLOCK_32X32, - 0x0f0f0f0f0f0f0f0fULL, // BLOCK_32X64, - 0x00000000ffffffffULL, // BLOCK_64X32, - 0xffffffffffffffffULL, // BLOCK_64X64 -}; - -// These are used for masking the left and above borders. -static const uint64_t left_border = 0x1111111111111111ULL; -static const uint64_t above_border = 0x000000ff000000ffULL; - -// 16 bit masks for uv transform sizes. -static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= { - 0xffff, // TX_4X4 - 0xffff, // TX_8x8 - 0x5555, // TX_16x16 - 0x1111, // TX_32x32 -}; - -static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= { - 0xffff, // TX_4X4 - 0xffff, // TX_8x8 - 0x0f0f, // TX_16x16 - 0x000f, // TX_32x32 -}; - -// 16 bit left mask to shift and set for each uv prediction size. -static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = { - 0x0001, // BLOCK_4X4, - 0x0001, // BLOCK_4X8, - 0x0001, // BLOCK_8X4, - 0x0001, // BLOCK_8X8, - 0x0001, // BLOCK_8X16, - 0x0001, // BLOCK_16X8, - 0x0001, // BLOCK_16X16, - 0x0011, // BLOCK_16X32, - 0x0001, // BLOCK_32X16, - 0x0011, // BLOCK_32X32, - 0x1111, // BLOCK_32X64 - 0x0011, // BLOCK_64X32, - 0x1111, // BLOCK_64X64 -}; -// 16 bit above mask to shift and set for uv each prediction size. -static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = { - 0x0001, // BLOCK_4X4 - 0x0001, // BLOCK_4X8 - 0x0001, // BLOCK_8X4 - 0x0001, // BLOCK_8X8 - 0x0001, // BLOCK_8X16, - 0x0001, // BLOCK_16X8 - 0x0001, // BLOCK_16X16 - 0x0001, // BLOCK_16X32, - 0x0003, // BLOCK_32X16, - 0x0003, // BLOCK_32X32, - 0x0003, // BLOCK_32X64, - 0x000f, // BLOCK_64X32, - 0x000f, // BLOCK_64X64 -}; - -// 64 bit mask to shift and set for each uv prediction size -static const uint16_t size_mask_uv[BLOCK_SIZES] = { - 0x0001, // BLOCK_4X4 - 0x0001, // BLOCK_4X8 - 0x0001, // BLOCK_8X4 - 0x0001, // BLOCK_8X8 - 0x0001, // BLOCK_8X16, - 0x0001, // BLOCK_16X8 - 0x0001, // BLOCK_16X16 - 0x0011, // BLOCK_16X32, - 0x0003, // BLOCK_32X16, - 0x0033, // BLOCK_32X32, - 0x3333, // BLOCK_32X64, - 0x00ff, // BLOCK_64X32, - 0xffff, // BLOCK_64X64 -}; -static const uint16_t left_border_uv = 0x1111; -static const uint16_t above_border_uv = 0x000f; - -static const int mode_lf_lut[MB_MODE_COUNT] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES - 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0) -}; - -static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { - int lvl; - - // For each possible value for the loop filter fill out limits - for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) { - // Set loop filter parameters that control sharpness. - int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4)); - - if (sharpness_lvl > 0) { - if (block_inside_limit > (9 - sharpness_lvl)) - block_inside_limit = (9 - sharpness_lvl); - } - - if (block_inside_limit < 1) - block_inside_limit = 1; - - memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH); - memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), - SIMD_WIDTH); - } -} - -static uint8_t get_filter_level(const loop_filter_info_n *lfi_n, - const MB_MODE_INFO *mbmi) { - return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]] - [mode_lf_lut[mbmi->mode]]; -} - -void vp10_loop_filter_init(VP10_COMMON *cm) { - loop_filter_info_n *lfi = &cm->lf_info; - struct loopfilter *lf = &cm->lf; - int lvl; - - // init limits for given sharpness - update_sharpness(lfi, lf->sharpness_level); - lf->last_sharpness_level = lf->sharpness_level; - - // init hev threshold const vectors - for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) - memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH); -} - -void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) { - int seg_id; - // n_shift is the multiplier for lf_deltas - // the multiplier is 1 for when filter_lvl is between 0 and 31; - // 2 when filter_lvl is between 32 and 63 - const int scale = 1 << (default_filt_lvl >> 5); - loop_filter_info_n *const lfi = &cm->lf_info; - struct loopfilter *const lf = &cm->lf; - const struct segmentation *const seg = &cm->seg; - - // update limits if sharpness has changed - if (lf->last_sharpness_level != lf->sharpness_level) { - update_sharpness(lfi, lf->sharpness_level); - lf->last_sharpness_level = lf->sharpness_level; - } - - for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) { - int lvl_seg = default_filt_lvl; - if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { - const int data = get_segdata(seg, seg_id, SEG_LVL_ALT_LF); - lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ? - data : default_filt_lvl + data, - 0, MAX_LOOP_FILTER); - } - - if (!lf->mode_ref_delta_enabled) { - // we could get rid of this if we assume that deltas are set to - // zero when not in use; encoder always uses deltas - memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id])); - } else { - int ref, mode; - const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale; - lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER); - - for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { - for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { - const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale - + lf->mode_deltas[mode] * scale; - lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER); - } - } - } - } -} - -static void filter_selectively_vert_row2(int subsampling_factor, - uint8_t *s, int pitch, - unsigned int mask_16x16_l, - unsigned int mask_8x8_l, - unsigned int mask_4x4_l, - unsigned int mask_4x4_int_l, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { - const int mask_shift = subsampling_factor ? 4 : 8; - const int mask_cutoff = subsampling_factor ? 0xf : 0xff; - const int lfl_forward = subsampling_factor ? 4 : 8; - - unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff; - unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; - unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; - unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; - unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; - unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; - unsigned int mask; - - for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | - mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; - mask; mask >>= 1) { - const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; - const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); - - // TODO(yunqingwang): count in loopfilter functions should be removed. - if (mask & 1) { - if ((mask_16x16_0 | mask_16x16_1) & 1) { - if ((mask_16x16_0 & mask_16x16_1) & 1) { - vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr); - } else if (mask_16x16_0 & 1) { - vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr); - } else { - vpx_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr); - } - } - - if ((mask_8x8_0 | mask_8x8_1) & 1) { - if ((mask_8x8_0 & mask_8x8_1) & 1) { - vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_8x8_0 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); - } else { - vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); - } - } - - if ((mask_4x4_0 | mask_4x4_1) & 1) { - if ((mask_4x4_0 & mask_4x4_1) & 1) { - vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_4x4_0 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); - } else { - vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); - } - } - - if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) { - if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) { - vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_4x4_int_0 & 1) { - vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1); - } else { - vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); - } - } - } - - s += 8; - lfl += 1; - mask_16x16_0 >>= 1; - mask_8x8_0 >>= 1; - mask_4x4_0 >>= 1; - mask_4x4_int_0 >>= 1; - mask_16x16_1 >>= 1; - mask_8x8_1 >>= 1; - mask_4x4_1 >>= 1; - mask_4x4_int_1 >>= 1; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_vert_row2(int subsampling_factor, - uint16_t *s, int pitch, - unsigned int mask_16x16_l, - unsigned int mask_8x8_l, - unsigned int mask_4x4_l, - unsigned int mask_4x4_int_l, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { - const int mask_shift = subsampling_factor ? 4 : 8; - const int mask_cutoff = subsampling_factor ? 0xf : 0xff; - const int lfl_forward = subsampling_factor ? 4 : 8; - - unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff; - unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; - unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; - unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; - unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; - unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; - unsigned int mask; - - for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | - mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; - mask; mask >>= 1) { - const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; - const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); - - // TODO(yunqingwang): count in loopfilter functions should be removed. - if (mask & 1) { - if ((mask_16x16_0 | mask_16x16_1) & 1) { - if ((mask_16x16_0 & mask_16x16_1) & 1) { - vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, bd); - } else if (mask_16x16_0 & 1) { - vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, bd); - } else { - vpx_highbd_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, bd); - } - } - - if ((mask_8x8_0 | mask_8x8_1) & 1) { - if ((mask_8x8_0 & mask_8x8_1) & 1) { - vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_8x8_0 & 1) { - vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); - } else { - vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); - } - } - - if ((mask_4x4_0 | mask_4x4_1) & 1) { - if ((mask_4x4_0 & mask_4x4_1) & 1) { - vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_4x4_0 & 1) { - vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); - } else { - vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); - } - } - - if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) { - if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) { - vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_4x4_int_0 & 1) { - vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); - } else { - vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); - } - } - } - - s += 8; - lfl += 1; - mask_16x16_0 >>= 1; - mask_8x8_0 >>= 1; - mask_4x4_0 >>= 1; - mask_4x4_int_0 >>= 1; - mask_16x16_1 >>= 1; - mask_8x8_1 >>= 1; - mask_4x4_1 >>= 1; - mask_4x4_int_1 >>= 1; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void filter_selectively_horiz(uint8_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { - unsigned int mask; - int count; - - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= count) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - - count = 1; - if (mask & 1) { - if (mask_16x16 & 1) { - if ((mask_16x16 & 3) == 3) { - vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 2); - count = 2; - } else { - vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - } - } else if (mask_8x8 & 1) { - if ((mask_8x8 & 3) == 3) { - // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - - vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, lfin->mblim, lfin->lim, - lfin->hev_thr); - - if ((mask_4x4_int & 3) == 3) { - vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, lfin->mblim, - lfin->lim, lfin->hev_thr); - } else { - if (mask_4x4_int & 1) - vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - else if (mask_4x4_int & 2) - vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); - } - count = 2; - } else { - vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - - if (mask_4x4_int & 1) - vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - } - } else if (mask_4x4 & 1) { - if ((mask_4x4 & 3) == 3) { - // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - - vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, lfin->mblim, lfin->lim, - lfin->hev_thr); - if ((mask_4x4_int & 3) == 3) { - vpx_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, lfin->mblim, - lfin->lim, lfin->hev_thr); - } else { - if (mask_4x4_int & 1) - vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - else if (mask_4x4_int & 2) - vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); - } - count = 2; - } else { - vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - - if (mask_4x4_int & 1) - vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - } - } else if (mask_4x4_int & 1) { - vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); - } - } - s += 8 * count; - lfl += count; - mask_16x16 >>= count; - mask_8x8 >>= count; - mask_4x4 >>= count; - mask_4x4_int >>= count; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_horiz(uint16_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { - unsigned int mask; - int count; - - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= count) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - - count = 1; - if (mask & 1) { - if (mask_16x16 & 1) { - if ((mask_16x16 & 3) == 3) { - vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 2, bd); - count = 2; - } else { - vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - } - } else if (mask_8x8 & 1) { - if ((mask_8x8 & 3) == 3) { - // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - - vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, lfin->mblim, lfin->lim, - lfin->hev_thr, bd); - - if ((mask_4x4_int & 3) == 3) { - vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, - lfin->mblim, lfin->lim, - lfin->hev_thr, bd); - } else { - if (mask_4x4_int & 1) { - vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); - } else if (mask_4x4_int & 2) { - vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); - } - } - count = 2; - } else { - vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - - if (mask_4x4_int & 1) { - vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); - } - } - } else if (mask_4x4 & 1) { - if ((mask_4x4 & 3) == 3) { - // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); - - vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, lfin->mblim, lfin->lim, - lfin->hev_thr, bd); - if ((mask_4x4_int & 3) == 3) { - vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, - lfin->mblim, lfin->lim, - lfin->hev_thr, bd); - } else { - if (mask_4x4_int & 1) { - vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); - } else if (mask_4x4_int & 2) { - vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); - } - } - count = 2; - } else { - vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - - if (mask_4x4_int & 1) { - vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); - } - } - } else if (mask_4x4_int & 1) { - vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - } - } - s += 8 * count; - lfl += count; - mask_16x16 >>= count; - mask_8x8 >>= count; - mask_4x4 >>= count; - mask_4x4_int >>= count; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -// This function ors into the current lfm structure, where to do loop -// filters for the specific mi we are looking at. It uses information -// including the block_size_type (32x16, 32x32, etc.), the transform size, -// whether there were any coefficients encoded, and the loop filter strength -// block we are currently looking at. Shift is used to position the -// 1's we produce. -// TODO(JBB) Need another function for different resolution color.. -static void build_masks(const loop_filter_info_n *const lfi_n, - const MODE_INFO *mi, const int shift_y, - const int shift_uv, - LOOP_FILTER_MASK *lfm) { - const MB_MODE_INFO *mbmi = &mi->mbmi; - const BLOCK_SIZE block_size = mbmi->sb_type; - const TX_SIZE tx_size_y = mbmi->tx_size; - const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1); - const int filter_level = get_filter_level(lfi_n, mbmi); - uint64_t *const left_y = &lfm->left_y[tx_size_y]; - uint64_t *const above_y = &lfm->above_y[tx_size_y]; - uint64_t *const int_4x4_y = &lfm->int_4x4_y; - uint16_t *const left_uv = &lfm->left_uv[tx_size_uv]; - uint16_t *const above_uv = &lfm->above_uv[tx_size_uv]; -#if CONFIG_MISC_FIXES - uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv; -#else - uint16_t *const int_4x4_uv = &lfm->int_4x4_uv; -#endif - int i; - - // If filter level is 0 we don't loop filter. - if (!filter_level) { - return; - } else { - const int w = num_8x8_blocks_wide_lookup[block_size]; - const int h = num_8x8_blocks_high_lookup[block_size]; - int index = shift_y; - for (i = 0; i < h; i++) { - memset(&lfm->lfl_y[index], filter_level, w); - index += 8; - } - } - - // These set 1 in the current block size for the block size edges. - // For instance if the block size is 32x16, we'll set: - // above = 1111 - // 0000 - // and - // left = 1000 - // = 1000 - // NOTE : In this example the low bit is left most ( 1000 ) is stored as - // 1, not 8... - // - // U and V set things on a 16 bit scale. - // - *above_y |= above_prediction_mask[block_size] << shift_y; - *above_uv |= above_prediction_mask_uv[block_size] << shift_uv; - *left_y |= left_prediction_mask[block_size] << shift_y; - *left_uv |= left_prediction_mask_uv[block_size] << shift_uv; - - // If the block has no coefficients and is not intra we skip applying - // the loop filter on block edges. -#if CONFIG_MISC_FIXES - if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) - return; -#else - if (mbmi->skip && is_inter_block(mbmi)) - return; -#endif - - // Here we are adding a mask for the transform size. The transform - // size mask is set to be correct for a 64x64 prediction block size. We - // mask to match the size of the block we are working on and then shift it - // into place.. - *above_y |= (size_mask[block_size] & - above_64x64_txform_mask[tx_size_y]) << shift_y; - *above_uv |= (size_mask_uv[block_size] & - above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; - - *left_y |= (size_mask[block_size] & - left_64x64_txform_mask[tx_size_y]) << shift_y; - *left_uv |= (size_mask_uv[block_size] & - left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; - - // Here we are trying to determine what to do with the internal 4x4 block - // boundaries. These differ from the 4x4 boundaries on the outside edge of - // an 8x8 in that the internal ones can be skipped and don't depend on - // the prediction block size. - if (tx_size_y == TX_4X4) - *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y; - - if (tx_size_uv == TX_4X4) - *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; -} - -// This function does the same thing as the one above with the exception that -// it only affects the y masks. It exists because for blocks < 16x16 in size, -// we only update u and v masks on the first block. -static void build_y_mask(const loop_filter_info_n *const lfi_n, - const MODE_INFO *mi, const int shift_y, - LOOP_FILTER_MASK *lfm) { - const MB_MODE_INFO *mbmi = &mi->mbmi; - const BLOCK_SIZE block_size = mbmi->sb_type; - const TX_SIZE tx_size_y = mbmi->tx_size; - const int filter_level = get_filter_level(lfi_n, mbmi); - uint64_t *const left_y = &lfm->left_y[tx_size_y]; - uint64_t *const above_y = &lfm->above_y[tx_size_y]; - uint64_t *const int_4x4_y = &lfm->int_4x4_y; - int i; - - if (!filter_level) { - return; - } else { - const int w = num_8x8_blocks_wide_lookup[block_size]; - const int h = num_8x8_blocks_high_lookup[block_size]; - int index = shift_y; - for (i = 0; i < h; i++) { - memset(&lfm->lfl_y[index], filter_level, w); - index += 8; - } - } - - *above_y |= above_prediction_mask[block_size] << shift_y; - *left_y |= left_prediction_mask[block_size] << shift_y; - -#if CONFIG_MISC_FIXES - if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi)) - return; -#else - if (mbmi->skip && is_inter_block(mbmi)) - return; -#endif - - *above_y |= (size_mask[block_size] & - above_64x64_txform_mask[tx_size_y]) << shift_y; - - *left_y |= (size_mask[block_size] & - left_64x64_txform_mask[tx_size_y]) << shift_y; - - if (tx_size_y == TX_4X4) - *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffffULL) << shift_y; -} - -// This function sets up the bit masks for the entire 64x64 region represented -// by mi_row, mi_col. -// TODO(JBB): This function only works for yv12. -void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col, - MODE_INFO **mi, const int mode_info_stride, - LOOP_FILTER_MASK *lfm) { - int idx_32, idx_16, idx_8; - const loop_filter_info_n *const lfi_n = &cm->lf_info; - MODE_INFO **mip = mi; - MODE_INFO **mip2 = mi; - - // These are offsets to the next mi in the 64x64 block. It is what gets - // added to the mi ptr as we go through each loop. It helps us to avoid - // setting up special row and column counters for each index. The last step - // brings us out back to the starting position. - const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4, - -(mode_info_stride << 2) - 4}; - const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2, - -(mode_info_stride << 1) - 2}; - const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1}; - - // Following variables represent shifts to position the current block - // mask over the appropriate block. A shift of 36 to the left will move - // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left - // 4 rows to the appropriate spot. - const int shift_32_y[] = {0, 4, 32, 36}; - const int shift_16_y[] = {0, 2, 16, 18}; - const int shift_8_y[] = {0, 1, 8, 9}; - const int shift_32_uv[] = {0, 2, 8, 10}; - const int shift_16_uv[] = {0, 1, 4, 5}; - int i; - const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? - cm->mi_rows - mi_row : MI_BLOCK_SIZE); - const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? - cm->mi_cols - mi_col : MI_BLOCK_SIZE); - - vp10_zero(*lfm); - assert(mip[0] != NULL); - - // TODO(jimbankoski): Try moving most of the following code into decode - // loop and storing lfm in the mbmi structure so that we don't have to go - // through the recursive loop structure multiple times. - switch (mip[0]->mbmi.sb_type) { - case BLOCK_64X64: - build_masks(lfi_n, mip[0] , 0, 0, lfm); - break; - case BLOCK_64X32: - build_masks(lfi_n, mip[0], 0, 0, lfm); - mip2 = mip + mode_info_stride * 4; - if (4 >= max_rows) - break; - build_masks(lfi_n, mip2[0], 32, 8, lfm); - break; - case BLOCK_32X64: - build_masks(lfi_n, mip[0], 0, 0, lfm); - mip2 = mip + 4; - if (4 >= max_cols) - break; - build_masks(lfi_n, mip2[0], 4, 2, lfm); - break; - default: - for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) { - const int shift_y = shift_32_y[idx_32]; - const int shift_uv = shift_32_uv[idx_32]; - const int mi_32_col_offset = ((idx_32 & 1) << 2); - const int mi_32_row_offset = ((idx_32 >> 1) << 2); - if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows) - continue; - switch (mip[0]->mbmi.sb_type) { - case BLOCK_32X32: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - break; - case BLOCK_32X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_32_row_offset + 2 >= max_rows) - continue; - mip2 = mip + mode_info_stride * 2; - build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm); - break; - case BLOCK_16X32: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_32_col_offset + 2 >= max_cols) - continue; - mip2 = mip + 2; - build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm); - break; - default: - for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { - const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16]; - const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16]; - const int mi_16_col_offset = mi_32_col_offset + - ((idx_16 & 1) << 1); - const int mi_16_row_offset = mi_32_row_offset + - ((idx_16 >> 1) << 1); - - if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows) - continue; - - switch (mip[0]->mbmi.sb_type) { - case BLOCK_16X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - break; - case BLOCK_16X8: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_16_row_offset + 1 >= max_rows) - continue; - mip2 = mip + mode_info_stride; - build_y_mask(lfi_n, mip2[0], shift_y+8, lfm); - break; - case BLOCK_8X16: - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_16_col_offset +1 >= max_cols) - continue; - mip2 = mip + 1; - build_y_mask(lfi_n, mip2[0], shift_y+1, lfm); - break; - default: { - const int shift_y = shift_32_y[idx_32] + - shift_16_y[idx_16] + - shift_8_y[0]; - build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - mip += offset[0]; - for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { - const int shift_y = shift_32_y[idx_32] + - shift_16_y[idx_16] + - shift_8_y[idx_8]; - const int mi_8_col_offset = mi_16_col_offset + - ((idx_8 & 1)); - const int mi_8_row_offset = mi_16_row_offset + - ((idx_8 >> 1)); - - if (mi_8_col_offset >= max_cols || - mi_8_row_offset >= max_rows) - continue; - build_y_mask(lfi_n, mip[0], shift_y, lfm); - } - break; - } - } - } - break; - } - } - break; - } - // The largest loopfilter we have is 16x16 so we use the 16x16 mask - // for 32x32 transforms also. - lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32]; - lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32]; - lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32]; - lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32]; - - // We do at least 8 tap filter on every 32x32 even if the transform size - // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and - // remove it from the 4x4. - lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border; - lfm->left_y[TX_4X4] &= ~left_border; - lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border; - lfm->above_y[TX_4X4] &= ~above_border; - lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv; - lfm->left_uv[TX_4X4] &= ~left_border_uv; - lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv; - lfm->above_uv[TX_4X4] &= ~above_border_uv; - - // We do some special edge handling. - if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) { - const uint64_t rows = cm->mi_rows - mi_row; - - // Each pixel inside the border gets a 1, - const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1); - const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1); - - // Remove values completely outside our border. - for (i = 0; i < TX_32X32; i++) { - lfm->left_y[i] &= mask_y; - lfm->above_y[i] &= mask_y; - lfm->left_uv[i] &= mask_uv; - lfm->above_uv[i] &= mask_uv; - } - lfm->int_4x4_y &= mask_y; -#if CONFIG_MISC_FIXES - lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv; -#else - lfm->int_4x4_uv &= mask_uv; -#endif - - // We don't apply a wide loop filter on the last uv block row. If set - // apply the shorter one instead. - if (rows == 1) { - lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16]; - lfm->above_uv[TX_16X16] = 0; - } - if (rows == 5) { - lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00; - lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00); - } - } - - if (mi_col + MI_BLOCK_SIZE > cm->mi_cols) { - const uint64_t columns = cm->mi_cols - mi_col; - - // Each pixel inside the border gets a 1, the multiply copies the border - // to where we need it. - const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL; - const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111; - - // Internal edges are not applied on the last column of the image so - // we mask 1 more for the internal edges - const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111; - - // Remove the bits outside the image edge. - for (i = 0; i < TX_32X32; i++) { - lfm->left_y[i] &= mask_y; - lfm->above_y[i] &= mask_y; - lfm->left_uv[i] &= mask_uv; - lfm->above_uv[i] &= mask_uv; - } - lfm->int_4x4_y &= mask_y; -#if CONFIG_MISC_FIXES - lfm->left_int_4x4_uv &= mask_uv_int; -#else - lfm->int_4x4_uv &= mask_uv_int; -#endif - - // We don't apply a wide loop filter on the last uv column. If set - // apply the shorter one instead. - if (columns == 1) { - lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16]; - lfm->left_uv[TX_16X16] = 0; - } - if (columns == 5) { - lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc); - lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc); - } - } - // We don't apply a loop filter on the first column in the image, mask that - // out. - if (mi_col == 0) { - for (i = 0; i < TX_32X32; i++) { - lfm->left_y[i] &= 0xfefefefefefefefeULL; - lfm->left_uv[i] &= 0xeeee; - } - } - - // Assert if we try to apply 2 different loop filters at the same position. - assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8])); - assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4])); - assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4])); - assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16])); - assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8])); - assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4])); - assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4])); -#if CONFIG_MISC_FIXES - assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16])); -#else - assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16])); -#endif - assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8])); - assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4])); - assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4])); - assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16])); - assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8])); - assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4])); - assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4])); -#if CONFIG_MISC_FIXES - assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16])); -#else - assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16])); -#endif -} - -static void filter_selectively_vert(uint8_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { - unsigned int mask; - - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= 1) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - - if (mask & 1) { - if (mask_16x16 & 1) { - vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); - } else if (mask_8x8 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - } else if (mask_4x4 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - } - } - if (mask_4x4_int & 1) - vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - s += 8; - lfl += 1; - mask_16x16 >>= 1; - mask_8x8 >>= 1; - mask_4x4 >>= 1; - mask_4x4_int >>= 1; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_vert(uint16_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { - unsigned int mask; - - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= 1) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - - if (mask & 1) { - if (mask_16x16 & 1) { - vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, bd); - } else if (mask_8x8 & 1) { - vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - } else if (mask_4x4 & 1) { - vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - } - } - if (mask_4x4_int & 1) - vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); - s += 8; - lfl += 1; - mask_16x16 >>= 1; - mask_8x8 >>= 1; - mask_4x4 >>= 1; - mask_4x4_int >>= 1; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_filter_block_plane_non420(VP10_COMMON *cm, - struct macroblockd_plane *plane, - MODE_INFO **mi_8x8, - int mi_row, int mi_col) { - const int ss_x = plane->subsampling_x; - const int ss_y = plane->subsampling_y; - const int row_step = 1 << ss_y; - const int col_step = 1 << ss_x; - const int row_step_stride = cm->mi_stride * row_step; - struct buf_2d *const dst = &plane->dst; - uint8_t* const dst0 = dst->buf; - unsigned int mask_16x16[MI_BLOCK_SIZE] = {0}; - unsigned int mask_8x8[MI_BLOCK_SIZE] = {0}; - unsigned int mask_4x4[MI_BLOCK_SIZE] = {0}; - unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0}; - uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE]; - int r, c; - - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) { - unsigned int mask_16x16_c = 0; - unsigned int mask_8x8_c = 0; - unsigned int mask_4x4_c = 0; - unsigned int border_mask; - - // Determine the vertical edges that need filtering - for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) { - const MODE_INFO *mi = mi_8x8[c]; - const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type; - const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi); - // left edge of current unit is block/partition edge -> no skip - const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ? - !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1; - const int skip_this_c = skip_this && !block_edge_left; - // top edge of current unit is block/partition edge -> no skip - const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ? - !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1; - const int skip_this_r = skip_this && !block_edge_above; - const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV) - ? get_uv_tx_size(&mi[0].mbmi, plane) - : mi[0].mbmi.tx_size; - const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1; - const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; - - // Filter level can vary per MI - if (!(lfl[(r << 3) + (c >> ss_x)] = - get_filter_level(&cm->lf_info, &mi[0].mbmi))) - continue; - - // Build masks based on the transform size of each block - if (tx_size == TX_32X32) { - if (!skip_this_c && ((c >> ss_x) & 3) == 0) { - if (!skip_border_4x4_c) - mask_16x16_c |= 1 << (c >> ss_x); - else - mask_8x8_c |= 1 << (c >> ss_x); - } - if (!skip_this_r && ((r >> ss_y) & 3) == 0) { - if (!skip_border_4x4_r) - mask_16x16[r] |= 1 << (c >> ss_x); - else - mask_8x8[r] |= 1 << (c >> ss_x); - } - } else if (tx_size == TX_16X16) { - if (!skip_this_c && ((c >> ss_x) & 1) == 0) { - if (!skip_border_4x4_c) - mask_16x16_c |= 1 << (c >> ss_x); - else - mask_8x8_c |= 1 << (c >> ss_x); - } - if (!skip_this_r && ((r >> ss_y) & 1) == 0) { - if (!skip_border_4x4_r) - mask_16x16[r] |= 1 << (c >> ss_x); - else - mask_8x8[r] |= 1 << (c >> ss_x); - } - } else { - // force 8x8 filtering on 32x32 boundaries - if (!skip_this_c) { - if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0) - mask_8x8_c |= 1 << (c >> ss_x); - else - mask_4x4_c |= 1 << (c >> ss_x); - } - - if (!skip_this_r) { - if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0) - mask_8x8[r] |= 1 << (c >> ss_x); - else - mask_4x4[r] |= 1 << (c >> ss_x); - } - - if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c) - mask_4x4_int[r] |= 1 << (c >> ss_x); - } - } - - // Disable filtering on the leftmost column - border_mask = ~(mi_col == 0); -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_vert(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, - mask_16x16_c & border_mask, - mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3], - (int)cm->bit_depth); - } else { - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16_c & border_mask, - mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3]); - } -#else - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16_c & border_mask, - mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3]); -#endif // CONFIG_VP9_HIGHBITDEPTH - dst->buf += 8 * dst->stride; - mi_8x8 += row_step_stride; - } - - // Now do horizontal pass - dst->buf = dst0; - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) { - const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; - const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r]; - - unsigned int mask_16x16_r; - unsigned int mask_8x8_r; - unsigned int mask_4x4_r; - - if (mi_row + r == 0) { - mask_16x16_r = 0; - mask_8x8_r = 0; - mask_4x4_r = 0; - } else { - mask_16x16_r = mask_16x16[r]; - mask_8x8_r = mask_8x8[r]; - mask_4x4_r = mask_4x4[r]; - } -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3], - (int)cm->bit_depth); - } else { - filter_selectively_horiz(dst->buf, dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3]); - } -#else - filter_selectively_horiz(dst->buf, dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3]); -#endif // CONFIG_VP9_HIGHBITDEPTH - dst->buf += 8 * dst->stride; - } -} - -void vp10_filter_block_plane_ss00(VP10_COMMON *const cm, - struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm) { - struct buf_2d *const dst = &plane->dst; - uint8_t *const dst0 = dst->buf; - int r; - uint64_t mask_16x16 = lfm->left_y[TX_16X16]; - uint64_t mask_8x8 = lfm->left_y[TX_8X8]; - uint64_t mask_4x4 = lfm->left_y[TX_4X4]; - uint64_t mask_4x4_int = lfm->int_4x4_y; - - assert(plane->subsampling_x == 0 && plane->subsampling_y == 0); - - // Vertical pass: do 2 rows at one time - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { - unsigned int mask_16x16_l = mask_16x16 & 0xffff; - unsigned int mask_8x8_l = mask_8x8 & 0xffff; - unsigned int mask_4x4_l = mask_4x4 & 0xffff; - unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff; - -// Disable filtering on the leftmost column. -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_vert_row2( - plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfm->lfl_y[r << 3], (int)cm->bit_depth); - } else { - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l, - mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]); - } -#else - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l, - mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]); -#endif // CONFIG_VP9_HIGHBITDEPTH - dst->buf += 16 * dst->stride; - mask_16x16 >>= 16; - mask_8x8 >>= 16; - mask_4x4 >>= 16; - mask_4x4_int >>= 16; - } - - // Horizontal pass - dst->buf = dst0; - mask_16x16 = lfm->above_y[TX_16X16]; - mask_8x8 = lfm->above_y[TX_8X8]; - mask_4x4 = lfm->above_y[TX_4X4]; - mask_4x4_int = lfm->int_4x4_y; - - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r++) { - unsigned int mask_16x16_r; - unsigned int mask_8x8_r; - unsigned int mask_4x4_r; - - if (mi_row + r == 0) { - mask_16x16_r = 0; - mask_8x8_r = 0; - mask_4x4_r = 0; - } else { - mask_16x16_r = mask_16x16 & 0xff; - mask_8x8_r = mask_8x8 & 0xff; - mask_4x4_r = mask_4x4 & 0xff; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_horiz( - CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, &lfm->lfl_y[r << 3], - (int)cm->bit_depth); - } else { - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, - &lfm->lfl_y[r << 3]); - } -#else - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, - &lfm->lfl_y[r << 3]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - dst->buf += 8 * dst->stride; - mask_16x16 >>= 8; - mask_8x8 >>= 8; - mask_4x4 >>= 8; - mask_4x4_int >>= 8; - } -} - -void vp10_filter_block_plane_ss11(VP10_COMMON *const cm, - struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm) { - struct buf_2d *const dst = &plane->dst; - uint8_t *const dst0 = dst->buf; - int r, c; - - uint16_t mask_16x16 = lfm->left_uv[TX_16X16]; - uint16_t mask_8x8 = lfm->left_uv[TX_8X8]; - uint16_t mask_4x4 = lfm->left_uv[TX_4X4]; -#if CONFIG_MISC_FIXES - uint16_t mask_4x4_int = lfm->left_int_4x4_uv; -#else - uint16_t mask_4x4_int = lfm->int_4x4_uv; -#endif - - assert(plane->subsampling_x == 1 && plane->subsampling_y == 1); - - // Vertical pass: do 2 rows at one time - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 4) { - if (plane->plane_type == 1) { - for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) { - lfm->lfl_uv[(r << 1) + c] = lfm->lfl_y[(r << 3) + (c << 1)]; - lfm->lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) + (c << 1)]; - } - } - - { - unsigned int mask_16x16_l = mask_16x16 & 0xff; - unsigned int mask_8x8_l = mask_8x8 & 0xff; - unsigned int mask_4x4_l = mask_4x4 & 0xff; - unsigned int mask_4x4_int_l = mask_4x4_int & 0xff; - -// Disable filtering on the leftmost column. -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_vert_row2( - plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfm->lfl_uv[r << 1], (int)cm->bit_depth); - } else { - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfm->lfl_uv[r << 1]); - } -#else - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfm->lfl_uv[r << 1]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - dst->buf += 16 * dst->stride; - mask_16x16 >>= 8; - mask_8x8 >>= 8; - mask_4x4 >>= 8; - mask_4x4_int >>= 8; - } - } - - // Horizontal pass - dst->buf = dst0; - mask_16x16 = lfm->above_uv[TX_16X16]; - mask_8x8 = lfm->above_uv[TX_8X8]; - mask_4x4 = lfm->above_uv[TX_4X4]; -#if CONFIG_MISC_FIXES - mask_4x4_int = lfm->above_int_4x4_uv; -#else - mask_4x4_int = lfm->int_4x4_uv; -#endif - - for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { - const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1; - const unsigned int mask_4x4_int_r = - skip_border_4x4_r ? 0 : (mask_4x4_int & 0xf); - unsigned int mask_16x16_r; - unsigned int mask_8x8_r; - unsigned int mask_4x4_r; - - if (mi_row + r == 0) { - mask_16x16_r = 0; - mask_8x8_r = 0; - mask_4x4_r = 0; - } else { - mask_16x16_r = mask_16x16 & 0xf; - mask_8x8_r = mask_8x8 & 0xf; - mask_4x4_r = mask_4x4 & 0xf; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, - &lfm->lfl_uv[r << 1], (int)cm->bit_depth); - } else { - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, - &lfm->lfl_uv[r << 1]); - } -#else - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, - &lfm->lfl_uv[r << 1]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - dst->buf += 8 * dst->stride; - mask_16x16 >>= 4; - mask_8x8 >>= 4; - mask_4x4 >>= 4; - mask_4x4_int >>= 4; - } -} - -void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, - VP10_COMMON *cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int start, int stop, int y_only) { - const int num_planes = y_only ? 1 : MAX_MB_PLANE; - enum lf_path path; - LOOP_FILTER_MASK lfm; - int mi_row, mi_col; - - if (y_only) - path = LF_PATH_444; - else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1) - path = LF_PATH_420; - else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0) - path = LF_PATH_444; - else - path = LF_PATH_SLOW; - - for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { - MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride; - - for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { - int plane; - - vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); - - // TODO(JBB): Make setup_mask work for non 420. - vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, - &lfm); - - vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm); - for (plane = 1; plane < num_planes; ++plane) { - switch (path) { - case LF_PATH_420: - vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm); - break; - case LF_PATH_444: - vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm); - break; - case LF_PATH_SLOW: - vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, - mi_row, mi_col); - break; - } - } - } - } -} - -void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, - VP10_COMMON *cm, MACROBLOCKD *xd, - int frame_filter_level, - int y_only, int partial_frame) { - int start_mi_row, end_mi_row, mi_rows_to_filter; - if (!frame_filter_level) return; - start_mi_row = 0; - mi_rows_to_filter = cm->mi_rows; - if (partial_frame && cm->mi_rows > 8) { - start_mi_row = cm->mi_rows >> 1; - start_mi_row &= 0xfffffff8; - mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); - } - end_mi_row = start_mi_row + mi_rows_to_filter; - vp10_loop_filter_frame_init(cm, frame_filter_level); - vp10_loop_filter_rows(frame, cm, xd->plane, - start_mi_row, end_mi_row, - y_only); -} - -void vp10_loop_filter_data_reset( - LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer, - struct VP10Common *cm, - const struct macroblockd_plane planes[MAX_MB_PLANE]) { - lf_data->frame_buffer = frame_buffer; - lf_data->cm = cm; - lf_data->start = 0; - lf_data->stop = 0; - lf_data->y_only = 0; - memcpy(lf_data->planes, planes, sizeof(lf_data->planes)); -} - -int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) { - (void)unused; - vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, - lf_data->start, lf_data->stop, lf_data->y_only); - return 1; -} diff --git a/libs/libvpx/vp10/common/loopfilter.h b/libs/libvpx/vp10/common/loopfilter.h deleted file mode 100644 index 8db705aa03..0000000000 --- a/libs/libvpx/vp10/common/loopfilter.h +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_LOOPFILTER_H_ -#define VP10_COMMON_LOOPFILTER_H_ - -#include "vpx_ports/mem.h" -#include "./vpx_config.h" - -#include "vp10/common/blockd.h" -#include "vp10/common/seg_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MAX_LOOP_FILTER 63 -#define MAX_SHARPNESS 7 - -#define SIMD_WIDTH 16 - -#define MAX_MODE_LF_DELTAS 2 - -enum lf_path { - LF_PATH_420, - LF_PATH_444, - LF_PATH_SLOW, -}; - -struct loopfilter { - int filter_level; - - int sharpness_level; - int last_sharpness_level; - - uint8_t mode_ref_delta_enabled; - uint8_t mode_ref_delta_update; - - // 0 = Intra, Last, GF, ARF - signed char ref_deltas[MAX_REF_FRAMES]; - signed char last_ref_deltas[MAX_REF_FRAMES]; - - // 0 = ZERO_MV, MV - signed char mode_deltas[MAX_MODE_LF_DELTAS]; - signed char last_mode_deltas[MAX_MODE_LF_DELTAS]; -}; - -// Need to align this structure so when it is declared and -// passed it can be loaded into vector registers. -typedef struct { - DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, mblim[SIMD_WIDTH]); - DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, lim[SIMD_WIDTH]); - DECLARE_ALIGNED(SIMD_WIDTH, uint8_t, hev_thr[SIMD_WIDTH]); -} loop_filter_thresh; - -typedef struct { - loop_filter_thresh lfthr[MAX_LOOP_FILTER + 1]; - uint8_t lvl[MAX_SEGMENTS][MAX_REF_FRAMES][MAX_MODE_LF_DELTAS]; -} loop_filter_info_n; - -// This structure holds bit masks for all 8x8 blocks in a 64x64 region. -// Each 1 bit represents a position in which we want to apply the loop filter. -// Left_ entries refer to whether we apply a filter on the border to the -// left of the block. Above_ entries refer to whether or not to apply a -// filter on the above border. Int_ entries refer to whether or not to -// apply borders on the 4x4 edges within the 8x8 block that each bit -// represents. -// Since each transform is accompanied by a potentially different type of -// loop filter there is a different entry in the array for each transform size. -typedef struct { - uint64_t left_y[TX_SIZES]; - uint64_t above_y[TX_SIZES]; - uint64_t int_4x4_y; - uint16_t left_uv[TX_SIZES]; - uint16_t above_uv[TX_SIZES]; -#if CONFIG_MISC_FIXES - uint16_t left_int_4x4_uv; - uint16_t above_int_4x4_uv; -#else - uint16_t int_4x4_uv; -#endif - uint8_t lfl_y[64]; - uint8_t lfl_uv[16]; -} LOOP_FILTER_MASK; - -/* assorted loopfilter functions which get used elsewhere */ -struct VP10Common; -struct macroblockd; -struct VP9LfSyncData; - -// This function sets up the bit masks for the entire 64x64 region represented -// by mi_row, mi_col. -void vp10_setup_mask(struct VP10Common *const cm, - const int mi_row, const int mi_col, - MODE_INFO **mi_8x8, const int mode_info_stride, - LOOP_FILTER_MASK *lfm); - -void vp10_filter_block_plane_ss00(struct VP10Common *const cm, - struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm); - -void vp10_filter_block_plane_ss11(struct VP10Common *const cm, - struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm); - -void vp10_filter_block_plane_non420(struct VP10Common *cm, - struct macroblockd_plane *plane, - MODE_INFO **mi_8x8, - int mi_row, int mi_col); - -void vp10_loop_filter_init(struct VP10Common *cm); - -// Update the loop filter for the current frame. -// This should be called before vp10_loop_filter_rows(), vp10_loop_filter_frame() -// calls this function directly. -void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl); - -void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, - struct VP10Common *cm, - struct macroblockd *mbd, - int filter_level, - int y_only, int partial_frame); - -// Apply the loop filter to [start, stop) macro block rows in frame_buffer. -void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, - struct VP10Common *cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int start, int stop, int y_only); - -typedef struct LoopFilterWorkerData { - YV12_BUFFER_CONFIG *frame_buffer; - struct VP10Common *cm; - struct macroblockd_plane planes[MAX_MB_PLANE]; - - int start; - int stop; - int y_only; -} LFWorkerData; - -void vp10_loop_filter_data_reset( - LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer, - struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]); - -// Operates on the rows described by 'lf_data'. -int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused); -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_LOOPFILTER_H_ diff --git a/libs/libvpx/vp10/common/mfqe.c b/libs/libvpx/vp10/common/mfqe.c deleted file mode 100644 index c715ef73eb..0000000000 --- a/libs/libvpx/vp10/common/mfqe.c +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "./vpx_scale_rtcd.h" - -#include "vp10/common/onyxc_int.h" -#include "vp10/common/postproc.h" - -// TODO(jackychen): Replace this function with SSE2 code. There is -// one SSE2 implementation in vp8, so will consider how to share it -// between vp8 and vp9. -static void filter_by_weight(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int block_size, int src_weight) { - const int dst_weight = (1 << MFQE_PRECISION) - src_weight; - const int rounding_bit = 1 << (MFQE_PRECISION - 1); - int r, c; - - for (r = 0; r < block_size; r++) { - for (c = 0; c < block_size; c++) { - dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) - >> MFQE_PRECISION; - } - src += src_stride; - dst += dst_stride; - } -} - -void vp10_filter_by_weight8x8_c(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int src_weight) { - filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight); -} - -void vp10_filter_by_weight16x16_c(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int src_weight) { - filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight); -} - -static void filter_by_weight32x32(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int weight) { - vp10_filter_by_weight16x16(src, src_stride, dst, dst_stride, weight); - vp10_filter_by_weight16x16(src + 16, src_stride, dst + 16, dst_stride, - weight); - vp10_filter_by_weight16x16(src + src_stride * 16, src_stride, - dst + dst_stride * 16, dst_stride, weight); - vp10_filter_by_weight16x16(src + src_stride * 16 + 16, src_stride, - dst + dst_stride * 16 + 16, dst_stride, weight); -} - -static void filter_by_weight64x64(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int weight) { - filter_by_weight32x32(src, src_stride, dst, dst_stride, weight); - filter_by_weight32x32(src + 32, src_stride, dst + 32, - dst_stride, weight); - filter_by_weight32x32(src + src_stride * 32, src_stride, - dst + dst_stride * 32, dst_stride, weight); - filter_by_weight32x32(src + src_stride * 32 + 32, src_stride, - dst + dst_stride * 32 + 32, dst_stride, weight); -} - -static void apply_ifactor(const uint8_t *y, int y_stride, uint8_t *yd, - int yd_stride, const uint8_t *u, const uint8_t *v, - int uv_stride, uint8_t *ud, uint8_t *vd, - int uvd_stride, BLOCK_SIZE block_size, - int weight) { - if (block_size == BLOCK_16X16) { - vp10_filter_by_weight16x16(y, y_stride, yd, yd_stride, weight); - vp10_filter_by_weight8x8(u, uv_stride, ud, uvd_stride, weight); - vp10_filter_by_weight8x8(v, uv_stride, vd, uvd_stride, weight); - } else if (block_size == BLOCK_32X32) { - filter_by_weight32x32(y, y_stride, yd, yd_stride, weight); - vp10_filter_by_weight16x16(u, uv_stride, ud, uvd_stride, weight); - vp10_filter_by_weight16x16(v, uv_stride, vd, uvd_stride, weight); - } else if (block_size == BLOCK_64X64) { - filter_by_weight64x64(y, y_stride, yd, yd_stride, weight); - filter_by_weight32x32(u, uv_stride, ud, uvd_stride, weight); - filter_by_weight32x32(v, uv_stride, vd, uvd_stride, weight); - } -} - -// TODO(jackychen): Determine whether replace it with assembly code. -static void copy_mem8x8(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { - int r; - for (r = 0; r < 8; r++) { - memcpy(dst, src, 8); - src += src_stride; - dst += dst_stride; - } -} - -static void copy_mem16x16(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { - int r; - for (r = 0; r < 16; r++) { - memcpy(dst, src, 16); - src += src_stride; - dst += dst_stride; - } -} - -static void copy_mem32x32(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { - copy_mem16x16(src, src_stride, dst, dst_stride); - copy_mem16x16(src + 16, src_stride, dst + 16, dst_stride); - copy_mem16x16(src + src_stride * 16, src_stride, - dst + dst_stride * 16, dst_stride); - copy_mem16x16(src + src_stride * 16 + 16, src_stride, - dst + dst_stride * 16 + 16, dst_stride); -} - -void copy_mem64x64(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { - copy_mem32x32(src, src_stride, dst, dst_stride); - copy_mem32x32(src + 32, src_stride, dst + 32, dst_stride); - copy_mem32x32(src + src_stride * 32, src_stride, - dst + src_stride * 32, dst_stride); - copy_mem32x32(src + src_stride * 32 + 32, src_stride, - dst + src_stride * 32 + 32, dst_stride); -} - -static void copy_block(const uint8_t *y, const uint8_t *u, const uint8_t *v, - int y_stride, int uv_stride, uint8_t *yd, uint8_t *ud, - uint8_t *vd, int yd_stride, int uvd_stride, - BLOCK_SIZE bs) { - if (bs == BLOCK_16X16) { - copy_mem16x16(y, y_stride, yd, yd_stride); - copy_mem8x8(u, uv_stride, ud, uvd_stride); - copy_mem8x8(v, uv_stride, vd, uvd_stride); - } else if (bs == BLOCK_32X32) { - copy_mem32x32(y, y_stride, yd, yd_stride); - copy_mem16x16(u, uv_stride, ud, uvd_stride); - copy_mem16x16(v, uv_stride, vd, uvd_stride); - } else { - copy_mem64x64(y, y_stride, yd, yd_stride); - copy_mem32x32(u, uv_stride, ud, uvd_stride); - copy_mem32x32(v, uv_stride, vd, uvd_stride); - } -} - -static void get_thr(BLOCK_SIZE bs, int qdiff, int *sad_thr, int *vdiff_thr) { - const int adj = qdiff >> MFQE_PRECISION; - if (bs == BLOCK_16X16) { - *sad_thr = 7 + adj; - } else if (bs == BLOCK_32X32) { - *sad_thr = 6 + adj; - } else { // BLOCK_64X64 - *sad_thr = 5 + adj; - } - *vdiff_thr = 125 + qdiff; -} - -static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u, - const uint8_t *v, int y_stride, int uv_stride, - uint8_t *yd, uint8_t *ud, uint8_t *vd, int yd_stride, - int uvd_stride, int qdiff) { - int sad, sad_thr, vdiff, vdiff_thr; - uint32_t sse; - - get_thr(bs, qdiff, &sad_thr, &vdiff_thr); - - if (bs == BLOCK_16X16) { - vdiff = (vpx_variance16x16(y, y_stride, yd, yd_stride, &sse) + 128) >> 8; - sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; - } else if (bs == BLOCK_32X32) { - vdiff = (vpx_variance32x32(y, y_stride, yd, yd_stride, &sse) + 512) >> 10; - sad = (vpx_sad32x32(y, y_stride, yd, yd_stride) + 512) >> 10; - } else /* if (bs == BLOCK_64X64) */ { - vdiff = (vpx_variance64x64(y, y_stride, yd, yd_stride, &sse) + 2048) >> 12; - sad = (vpx_sad64x64(y, y_stride, yd, yd_stride) + 2048) >> 12; - } - - // vdiff > sad * 3 means vdiff should not be too small, otherwise, - // it might be a lighting change in smooth area. When there is a - // lighting change in smooth area, it is dangerous to do MFQE. - if (sad > 1 && vdiff > sad * 3) { - const int weight = 1 << MFQE_PRECISION; - int ifactor = weight * sad * vdiff / (sad_thr * vdiff_thr); - // When ifactor equals weight, no MFQE is done. - if (ifactor > weight) { - ifactor = weight; - } - apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd, - uvd_stride, bs, ifactor); - } else { - // Copy the block from current frame (i.e., no mfqe is done). - copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, - yd_stride, uvd_stride, bs); - } -} - -static int mfqe_decision(MODE_INFO *mi, BLOCK_SIZE cur_bs) { - // Check the motion in current block(for inter frame), - // or check the motion in the correlated block in last frame (for keyframe). - const int mv_len_square = mi->mbmi.mv[0].as_mv.row * - mi->mbmi.mv[0].as_mv.row + - mi->mbmi.mv[0].as_mv.col * - mi->mbmi.mv[0].as_mv.col; - const int mv_threshold = 100; - return mi->mbmi.mode >= NEARESTMV && // Not an intra block - cur_bs >= BLOCK_16X16 && - mv_len_square <= mv_threshold; -} - -// Process each partiton in a super block, recursively. -static void mfqe_partition(VP10_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, - const uint8_t *y, const uint8_t *u, - const uint8_t *v, int y_stride, int uv_stride, - uint8_t *yd, uint8_t *ud, uint8_t *vd, - int yd_stride, int uvd_stride) { - int mi_offset, y_offset, uv_offset; - const BLOCK_SIZE cur_bs = mi->mbmi.sb_type; - const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex; - const int bsl = b_width_log2_lookup[bs]; - PARTITION_TYPE partition = partition_lookup[bsl][cur_bs]; - const BLOCK_SIZE subsize = get_subsize(bs, partition); - - if (cur_bs < BLOCK_8X8) { - // If there are blocks smaller than 8x8, it must be on the boundary. - return; - } - // No MFQE on blocks smaller than 16x16 - if (bs == BLOCK_16X16) { - partition = PARTITION_NONE; - } - if (bs == BLOCK_64X64) { - mi_offset = 4; - y_offset = 32; - uv_offset = 16; - } else { - mi_offset = 2; - y_offset = 16; - uv_offset = 8; - } - switch (partition) { - BLOCK_SIZE mfqe_bs, bs_tmp; - case PARTITION_HORZ: - if (bs == BLOCK_64X64) { - mfqe_bs = BLOCK_64X32; - bs_tmp = BLOCK_32X32; - } else { - mfqe_bs = BLOCK_32X16; - bs_tmp = BLOCK_16X16; - } - if (mfqe_decision(mi, mfqe_bs)) { - // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); - // Do mfqe on the second square partition. - mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, - y_stride, uv_stride, yd + y_offset, ud + uv_offset, - vd + uv_offset, yd_stride, uvd_stride, qdiff); - } - if (mfqe_decision(mi + mi_offset * cm->mi_stride, mfqe_bs)) { - // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y + y_offset * y_stride, u + uv_offset * uv_stride, - v + uv_offset * uv_stride, y_stride, uv_stride, - yd + y_offset * yd_stride, ud + uv_offset * uvd_stride, - vd + uv_offset * uvd_stride, yd_stride, uvd_stride, qdiff); - // Do mfqe on the second square partition. - mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset, - u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, - ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride, qdiff); - } - break; - case PARTITION_VERT: - if (bs == BLOCK_64X64) { - mfqe_bs = BLOCK_32X64; - bs_tmp = BLOCK_32X32; - } else { - mfqe_bs = BLOCK_16X32; - bs_tmp = BLOCK_16X16; - } - if (mfqe_decision(mi, mfqe_bs)) { - // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); - // Do mfqe on the second square partition. - mfqe_block(bs_tmp, y + y_offset * y_stride, u + uv_offset * uv_stride, - v + uv_offset * uv_stride, y_stride, uv_stride, - yd + y_offset * yd_stride, ud + uv_offset * uvd_stride, - vd + uv_offset * uvd_stride, yd_stride, uvd_stride, qdiff); - } - if (mfqe_decision(mi + mi_offset, mfqe_bs)) { - // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, - y_stride, uv_stride, yd + y_offset, ud + uv_offset, - vd + uv_offset, yd_stride, uvd_stride, qdiff); - // Do mfqe on the second square partition. - mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset, - u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, - ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride, qdiff); - } - break; - case PARTITION_NONE: - if (mfqe_decision(mi, cur_bs)) { - // Do mfqe on this partition. - mfqe_block(cur_bs, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); - } else { - // Copy the block from current frame(i.e., no mfqe is done). - copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, - yd_stride, uvd_stride, bs); - } - break; - case PARTITION_SPLIT: - // Recursion on four square partitions, e.g. if bs is 64X64, - // then look into four 32X32 blocks in it. - mfqe_partition(cm, mi, subsize, y, u, v, y_stride, uv_stride, yd, ud, vd, - yd_stride, uvd_stride); - mfqe_partition(cm, mi + mi_offset, subsize, y + y_offset, u + uv_offset, - v + uv_offset, y_stride, uv_stride, yd + y_offset, - ud + uv_offset, vd + uv_offset, yd_stride, uvd_stride); - mfqe_partition(cm, mi + mi_offset * cm->mi_stride, subsize, - y + y_offset * y_stride, u + uv_offset * uv_stride, - v + uv_offset * uv_stride, y_stride, uv_stride, - yd + y_offset * yd_stride, ud + uv_offset * uvd_stride, - vd + uv_offset * uvd_stride, yd_stride, uvd_stride); - mfqe_partition(cm, mi + mi_offset * cm->mi_stride + mi_offset, - subsize, y + y_offset * y_stride + y_offset, - u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, - ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride); - break; - default: - assert(0); - } -} - -void vp10_mfqe(VP10_COMMON *cm) { - int mi_row, mi_col; - // Current decoded frame. - const YV12_BUFFER_CONFIG *show = cm->frame_to_show; - // Last decoded frame and will store the MFQE result. - YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer; - // Loop through each super block. - for (mi_row = 0; mi_row < cm->mi_rows; mi_row += MI_BLOCK_SIZE) { - for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { - MODE_INFO *mi; - MODE_INFO *mi_local = cm->mi + (mi_row * cm->mi_stride + mi_col); - // Motion Info in last frame. - MODE_INFO *mi_prev = cm->postproc_state.prev_mi + - (mi_row * cm->mi_stride + mi_col); - const uint32_t y_stride = show->y_stride; - const uint32_t uv_stride = show->uv_stride; - const uint32_t yd_stride = dest->y_stride; - const uint32_t uvd_stride = dest->uv_stride; - const uint32_t row_offset_y = mi_row << 3; - const uint32_t row_offset_uv = mi_row << 2; - const uint32_t col_offset_y = mi_col << 3; - const uint32_t col_offset_uv = mi_col << 2; - const uint8_t *y = show->y_buffer + row_offset_y * y_stride + - col_offset_y; - const uint8_t *u = show->u_buffer + row_offset_uv * uv_stride + - col_offset_uv; - const uint8_t *v = show->v_buffer + row_offset_uv * uv_stride + - col_offset_uv; - uint8_t *yd = dest->y_buffer + row_offset_y * yd_stride + col_offset_y; - uint8_t *ud = dest->u_buffer + row_offset_uv * uvd_stride + - col_offset_uv; - uint8_t *vd = dest->v_buffer + row_offset_uv * uvd_stride + - col_offset_uv; - if (frame_is_intra_only(cm)) { - mi = mi_prev; - } else { - mi = mi_local; - } - mfqe_partition(cm, mi, BLOCK_64X64, y, u, v, y_stride, uv_stride, yd, ud, - vd, yd_stride, uvd_stride); - } - } -} diff --git a/libs/libvpx/vp10/common/mfqe.h b/libs/libvpx/vp10/common/mfqe.h deleted file mode 100644 index 7bedd119f1..0000000000 --- a/libs/libvpx/vp10/common/mfqe.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_MFQE_H_ -#define VP10_COMMON_MFQE_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -// Multiframe Quality Enhancement. -// The aim for MFQE is to replace pixel blocks in the current frame with -// the correlated pixel blocks (with higher quality) in the last frame. -// The replacement can only be taken in stationary blocks by checking -// the motion of the blocks and other conditions such as the SAD of -// the current block and correlated block, the variance of the block -// difference, etc. -void vp10_mfqe(struct VP10Common *cm); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_MFQE_H_ diff --git a/libs/libvpx/vp10/common/mips/dspr2/itrans16_dspr2.c b/libs/libvpx/vp10/common/mips/dspr2/itrans16_dspr2.c deleted file mode 100644 index 3d1bd3d906..0000000000 --- a/libs/libvpx/vp10/common/mips/dspr2/itrans16_dspr2.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_config.h" -#include "./vp10_rtcd.h" -#include "vp10/common/common.h" -#include "vp10/common/blockd.h" -#include "vp10/common/idct.h" -#include "vpx_dsp/mips/inv_txfm_dspr2.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_ports/mem.h" - -#if HAVE_DSPR2 -void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, - int pitch, int tx_type) { - int i, j; - DECLARE_ALIGNED(32, int16_t, out[16 * 16]); - int16_t *outptr = out; - int16_t temp_out[16]; - uint32_t pos = 45; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical - idct16_rows_dspr2(input, outptr, 16); - idct16_cols_add_blk_dspr2(out, dest, pitch); - break; - case ADST_DCT: // ADST in vertical, DCT in horizontal - idct16_rows_dspr2(input, outptr, 16); - - outptr = out; - - for (i = 0; i < 16; ++i) { - iadst16_dspr2(outptr, temp_out); - - for (j = 0; j < 16; ++j) - dest[j * pitch + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * pitch + i]); - outptr += 16; - } - break; - case DCT_ADST: // DCT in vertical, ADST in horizontal - { - int16_t temp_in[16 * 16]; - - for (i = 0; i < 16; ++i) { - /* prefetch row */ - prefetch_load((const uint8_t *)(input + 16)); - - iadst16_dspr2(input, outptr); - input += 16; - outptr += 16; - } - - for (i = 0; i < 16; ++i) - for (j = 0; j < 16; ++j) - temp_in[j * 16 + i] = out[i * 16 + j]; - - idct16_cols_add_blk_dspr2(temp_in, dest, pitch); - } - break; - case ADST_ADST: // ADST in both directions - { - int16_t temp_in[16]; - - for (i = 0; i < 16; ++i) { - /* prefetch row */ - prefetch_load((const uint8_t *)(input + 16)); - - iadst16_dspr2(input, outptr); - input += 16; - outptr += 16; - } - - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - iadst16_dspr2(temp_in, temp_out); - for (j = 0; j < 16; ++j) - dest[j * pitch + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * pitch + i]); - } - } - break; - default: - printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); - break; - } -} -#endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp10/common/mips/dspr2/itrans4_dspr2.c b/libs/libvpx/vp10/common/mips/dspr2/itrans4_dspr2.c deleted file mode 100644 index 5249287b85..0000000000 --- a/libs/libvpx/vp10/common/mips/dspr2/itrans4_dspr2.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_config.h" -#include "./vp10_rtcd.h" -#include "vp10/common/common.h" -#include "vp10/common/blockd.h" -#include "vp10/common/idct.h" -#include "vpx_dsp/mips/inv_txfm_dspr2.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_ports/mem.h" - -#if HAVE_DSPR2 -void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, - int dest_stride, int tx_type) { - int i, j; - DECLARE_ALIGNED(32, int16_t, out[4 * 4]); - int16_t *outptr = out; - int16_t temp_in[4 * 4], temp_out[4]; - uint32_t pos = 45; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical - vpx_idct4_rows_dspr2(input, outptr); - vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); - break; - case ADST_DCT: // ADST in vertical, DCT in horizontal - vpx_idct4_rows_dspr2(input, outptr); - - outptr = out; - - for (i = 0; i < 4; ++i) { - iadst4_dspr2(outptr, temp_out); - - for (j = 0; j < 4; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4) - + dest[j * dest_stride + i]); - - outptr += 4; - } - break; - case DCT_ADST: // DCT in vertical, ADST in horizontal - for (i = 0; i < 4; ++i) { - iadst4_dspr2(input, outptr); - input += 4; - outptr += 4; - } - - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) { - temp_in[i * 4 + j] = out[j * 4 + i]; - } - } - vpx_idct4_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); - break; - case ADST_ADST: // ADST in both directions - for (i = 0; i < 4; ++i) { - iadst4_dspr2(input, outptr); - input += 4; - outptr += 4; - } - - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - iadst4_dspr2(temp_in, temp_out); - - for (j = 0; j < 4; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4) - + dest[j * dest_stride + i]); - } - break; - default: - printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); - break; - } -} -#endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp10/common/mips/dspr2/itrans8_dspr2.c b/libs/libvpx/vp10/common/mips/dspr2/itrans8_dspr2.c deleted file mode 100644 index b25b93aee0..0000000000 --- a/libs/libvpx/vp10/common/mips/dspr2/itrans8_dspr2.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_config.h" -#include "./vp10_rtcd.h" -#include "vp10/common/common.h" -#include "vp10/common/blockd.h" -#include "vpx_dsp/mips/inv_txfm_dspr2.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_ports/mem.h" - -#if HAVE_DSPR2 -void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, - int dest_stride, int tx_type) { - int i, j; - DECLARE_ALIGNED(32, int16_t, out[8 * 8]); - int16_t *outptr = out; - int16_t temp_in[8 * 8], temp_out[8]; - uint32_t pos = 45; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical - idct8_rows_dspr2(input, outptr, 8); - idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); - break; - case ADST_DCT: // ADST in vertical, DCT in horizontal - idct8_rows_dspr2(input, outptr, 8); - - for (i = 0; i < 8; ++i) { - iadst8_dspr2(&out[i * 8], temp_out); - - for (j = 0; j < 8; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) - + dest[j * dest_stride + i]); - } - break; - case DCT_ADST: // DCT in vertical, ADST in horizontal - for (i = 0; i < 8; ++i) { - iadst8_dspr2(input, outptr); - input += 8; - outptr += 8; - } - - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) { - temp_in[i * 8 + j] = out[j * 8 + i]; - } - } - idct8_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); - break; - case ADST_ADST: // ADST in both directions - for (i = 0; i < 8; ++i) { - iadst8_dspr2(input, outptr); - input += 8; - outptr += 8; - } - - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - - iadst8_dspr2(temp_in, temp_out); - - for (j = 0; j < 8; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) - + dest[j * dest_stride + i]); - } - break; - default: - printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); - break; - } -} -#endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp10/common/mips/msa/idct16x16_msa.c b/libs/libvpx/vp10/common/mips/msa/idct16x16_msa.c deleted file mode 100644 index a89e41b3dd..0000000000 --- a/libs/libvpx/vp10/common/mips/msa/idct16x16_msa.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vpx_dsp/mips/inv_txfm_msa.h" - -void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t tx_type) { - int32_t i; - DECLARE_ALIGNED(32, int16_t, out[16 * 16]); - int16_t *out_ptr = &out[0]; - - switch (tx_type) { - case DCT_DCT: - /* transform rows */ - for (i = 0; i < 2; ++i) { - /* process 16 * 8 block */ - vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; ++i) { - /* process 8 * 16 block */ - vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)), - dst_stride); - } - break; - case ADST_DCT: - /* transform rows */ - for (i = 0; i < 2; ++i) { - /* process 16 * 8 block */ - vpx_idct16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; ++i) { - vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } - break; - case DCT_ADST: - /* transform rows */ - for (i = 0; i < 2; ++i) { - /* process 16 * 8 block */ - vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; ++i) { - /* process 8 * 16 block */ - vpx_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), (dst + (i << 3)), - dst_stride); - } - break; - case ADST_ADST: - /* transform rows */ - for (i = 0; i < 2; ++i) { - /* process 16 * 8 block */ - vpx_iadst16_1d_rows_msa((input + (i << 7)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; ++i) { - vpx_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } - break; - default: - assert(0); - break; - } -} diff --git a/libs/libvpx/vp10/common/mips/msa/idct4x4_msa.c b/libs/libvpx/vp10/common/mips/msa/idct4x4_msa.c deleted file mode 100644 index 866f321ab7..0000000000 --- a/libs/libvpx/vp10/common/mips/msa/idct4x4_msa.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vpx_dsp/mips/inv_txfm_msa.h" - -void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t tx_type) { - v8i16 in0, in1, in2, in3; - - /* load vector elements of 4x4 block */ - LD4x4_SH(input, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - - switch (tx_type) { - case DCT_DCT: - /* DCT in horizontal */ - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* DCT in vertical */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case ADST_DCT: - /* DCT in horizontal */ - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* ADST in vertical */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case DCT_ADST: - /* ADST in horizontal */ - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* DCT in vertical */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case ADST_ADST: - /* ADST in horizontal */ - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* ADST in vertical */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - default: - assert(0); - break; - } - - /* final rounding (add 2^3, divide by 2^4) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 4); - /* add block and store 4x4 */ - ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); -} diff --git a/libs/libvpx/vp10/common/mips/msa/idct8x8_msa.c b/libs/libvpx/vp10/common/mips/msa/idct8x8_msa.c deleted file mode 100644 index 726af4e9ec..0000000000 --- a/libs/libvpx/vp10/common/mips/msa/idct8x8_msa.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vpx_dsp/mips/inv_txfm_msa.h" - -void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t tx_type) { - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - - switch (tx_type) { - case DCT_DCT: - /* DCT in horizontal */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* DCT in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case ADST_DCT: - /* DCT in horizontal */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* ADST in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case DCT_ADST: - /* ADST in horizontal */ - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* DCT in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case ADST_ADST: - /* ADST in horizontal */ - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* ADST in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - default: - assert(0); - break; - } - - /* final rounding (add 2^4, divide by 2^5) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 5); - SRARI_H4_SH(in4, in5, in6, in7, 5); - - /* add block and store 8x8 */ - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); -} diff --git a/libs/libvpx/vp10/common/mips/msa/mfqe_msa.c b/libs/libvpx/vp10/common/mips/msa/mfqe_msa.c deleted file mode 100644 index 3a593a1a1c..0000000000 --- a/libs/libvpx/vp10/common/mips/msa/mfqe_msa.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vp10_rtcd.h" -#include "vp10/common/onyxc_int.h" -#include "vpx_dsp/mips/macros_msa.h" - -static void filter_by_weight8x8_msa(const uint8_t *src_ptr, int32_t src_stride, - uint8_t *dst_ptr, int32_t dst_stride, - int32_t src_weight) { - int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; - int32_t row; - uint64_t src0_d, src1_d, dst0_d, dst1_d; - v16i8 src0 = { 0 }; - v16i8 src1 = { 0 }; - v16i8 dst0 = { 0 }; - v16i8 dst1 = { 0 }; - v8i16 src_wt, dst_wt, res_h_r, res_h_l, src_r, src_l, dst_r, dst_l; - - src_wt = __msa_fill_h(src_weight); - dst_wt = __msa_fill_h(dst_weight); - - for (row = 2; row--;) { - LD2(src_ptr, src_stride, src0_d, src1_d); - src_ptr += (2 * src_stride); - LD2(dst_ptr, dst_stride, dst0_d, dst1_d); - INSERT_D2_SB(src0_d, src1_d, src0); - INSERT_D2_SB(dst0_d, dst1_d, dst0); - - LD2(src_ptr, src_stride, src0_d, src1_d); - src_ptr += (2 * src_stride); - LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); - INSERT_D2_SB(src0_d, src1_d, src1); - INSERT_D2_SB(dst0_d, dst1_d, dst1); - - UNPCK_UB_SH(src0, src_r, src_l); - UNPCK_UB_SH(dst0, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); - ST8x2_UB(dst0, dst_ptr, dst_stride); - dst_ptr += (2 * dst_stride); - - UNPCK_UB_SH(src1, src_r, src_l); - UNPCK_UB_SH(dst1, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); - ST8x2_UB(dst1, dst_ptr, dst_stride); - dst_ptr += (2 * dst_stride); - } -} - -static void filter_by_weight16x16_msa(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - int32_t src_weight) { - int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; - int32_t row; - v16i8 src0, src1, src2, src3, dst0, dst1, dst2, dst3; - v8i16 src_wt, dst_wt, res_h_r, res_h_l, src_r, src_l, dst_r, dst_l; - - src_wt = __msa_fill_h(src_weight); - dst_wt = __msa_fill_h(dst_weight); - - for (row = 4; row--;) { - LD_SB4(src_ptr, src_stride, src0, src1, src2, src3); - src_ptr += (4 * src_stride); - LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3); - - UNPCK_UB_SH(src0, src_r, src_l); - UNPCK_UB_SH(dst0, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; - - UNPCK_UB_SH(src1, src_r, src_l); - UNPCK_UB_SH(dst1, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; - - UNPCK_UB_SH(src2, src_r, src_l); - UNPCK_UB_SH(dst2, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; - - UNPCK_UB_SH(src3, src_r, src_l); - UNPCK_UB_SH(dst3, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; - } -} - -void vp10_filter_by_weight8x8_msa(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int src_weight) { - filter_by_weight8x8_msa(src, src_stride, dst, dst_stride, src_weight); -} - -void vp10_filter_by_weight16x16_msa(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int src_weight) { - filter_by_weight16x16_msa(src, src_stride, dst, dst_stride, src_weight); -} diff --git a/libs/libvpx/vp10/common/mv.h b/libs/libvpx/vp10/common/mv.h deleted file mode 100644 index b4971a567e..0000000000 --- a/libs/libvpx/vp10/common/mv.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_MV_H_ -#define VP10_COMMON_MV_H_ - -#include "vpx/vpx_integer.h" - -#include "vp10/common/common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct mv { - int16_t row; - int16_t col; -} MV; - -typedef union int_mv { - uint32_t as_int; - MV as_mv; -} int_mv; /* facilitates faster equality tests and copies */ - -typedef struct mv32 { - int32_t row; - int32_t col; -} MV32; - -static INLINE int is_zero_mv(const MV *mv) { - return *((const uint32_t *)mv) == 0; -} - -static INLINE int is_equal_mv(const MV *a, const MV *b) { - return *((const uint32_t *)a) == *((const uint32_t *)b); -} - -static INLINE void clamp_mv(MV *mv, int min_col, int max_col, - int min_row, int max_row) { - mv->col = clamp(mv->col, min_col, max_col); - mv->row = clamp(mv->row, min_row, max_row); -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_MV_H_ diff --git a/libs/libvpx/vp10/common/mvref_common.c b/libs/libvpx/vp10/common/mvref_common.c deleted file mode 100644 index 1ef80c21aa..0000000000 --- a/libs/libvpx/vp10/common/mvref_common.c +++ /dev/null @@ -1,243 +0,0 @@ - -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/mvref_common.h" - -// This function searches the neighbourhood of a given MB/SB -// to try and find candidate reference vectors. -static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd, - MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int block, int mi_row, int mi_col, - find_mv_refs_sync sync, void *const data, - uint8_t *mode_context) { - const int *ref_sign_bias = cm->ref_frame_sign_bias; - int i, refmv_count = 0; - const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type]; - int different_ref_found = 0; - int context_counter = 0; - const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ? - cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL; - const TileInfo *const tile = &xd->tile; - const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type] << 3; - const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type] << 3; - -#if !CONFIG_MISC_FIXES - // Blank the reference vector list - memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES); -#endif - - // The nearest 2 blocks are treated differently - // if the size < 8x8 we get the mv from the bmi substructure, - // and we also need to keep a mode count. - for (i = 0; i < 2; ++i) { - const POSITION *const mv_ref = &mv_ref_search[i]; - if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * - xd->mi_stride]; - const MB_MODE_INFO *const candidate = &candidate_mi->mbmi; - // Keep counts for entropy encoding. - context_counter += mode_2_counter[candidate->mode]; - different_ref_found = 1; - - if (candidate->ref_frame[0] == ref_frame) - ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block), - refmv_count, mv_ref_list, bw, bh, xd, Done); - else if (candidate->ref_frame[1] == ref_frame) - ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block), - refmv_count, mv_ref_list, bw, bh, xd, Done); - } - } - - // Check the rest of the neighbors in much the same way - // as before except we don't need to keep track of sub blocks or - // mode counts. - for (; i < MVREF_NEIGHBOURS; ++i) { - const POSITION *const mv_ref = &mv_ref_search[i]; - if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row * - xd->mi_stride]->mbmi; - different_ref_found = 1; - - if (candidate->ref_frame[0] == ref_frame) - ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, - bw, bh, xd, Done); - else if (candidate->ref_frame[1] == ref_frame) - ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list, - bw, bh, xd, Done); - } - } - - // TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast - // on windows platform. The sync here is unncessary if use_perv_frame_mvs - // is 0. But after removing it, there will be hang in the unit test on windows - // due to several threads waiting for a thread's signal. -#if defined(_WIN32) && !HAVE_PTHREAD_H - if (cm->frame_parallel_decode && sync != NULL) { - sync(data, mi_row); - } -#endif - - // Check the last frame's mode and mv info. - if (cm->use_prev_frame_mvs) { - // Synchronize here for frame parallel decode if sync function is provided. - if (cm->frame_parallel_decode && sync != NULL) { - sync(data, mi_row); - } - - if (prev_frame_mvs->ref_frame[0] == ref_frame) { - ADD_MV_REF_LIST(prev_frame_mvs->mv[0], refmv_count, mv_ref_list, - bw, bh, xd, Done); - } else if (prev_frame_mvs->ref_frame[1] == ref_frame) { - ADD_MV_REF_LIST(prev_frame_mvs->mv[1], refmv_count, mv_ref_list, - bw, bh, xd, Done); - } - } - - // Since we couldn't find 2 mvs from the same reference frame - // go back through the neighbors and find motion vectors from - // different reference frames. - if (different_ref_found) { - for (i = 0; i < MVREF_NEIGHBOURS; ++i) { - const POSITION *mv_ref = &mv_ref_search[i]; - if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row - * xd->mi_stride]->mbmi; - - // If the candidate is INTRA we don't want to consider its mv. - IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias, - refmv_count, mv_ref_list, bw, bh, xd, Done); - } - } - } - - // Since we still don't have a candidate we'll try the last frame. - if (cm->use_prev_frame_mvs) { - if (prev_frame_mvs->ref_frame[0] != ref_frame && - prev_frame_mvs->ref_frame[0] > INTRA_FRAME) { - int_mv mv = prev_frame_mvs->mv[0]; - if (ref_sign_bias[prev_frame_mvs->ref_frame[0]] != - ref_sign_bias[ref_frame]) { - mv.as_mv.row *= -1; - mv.as_mv.col *= -1; - } - ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, bw, bh, xd, Done); - } - - if (prev_frame_mvs->ref_frame[1] > INTRA_FRAME && -#if !CONFIG_MISC_FIXES - prev_frame_mvs->mv[1].as_int != prev_frame_mvs->mv[0].as_int && -#endif - prev_frame_mvs->ref_frame[1] != ref_frame) { - int_mv mv = prev_frame_mvs->mv[1]; - if (ref_sign_bias[prev_frame_mvs->ref_frame[1]] != - ref_sign_bias[ref_frame]) { - mv.as_mv.row *= -1; - mv.as_mv.col *= -1; - } - ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, bw, bh, xd, Done); - } - } - - Done: - - mode_context[ref_frame] = counter_to_context[context_counter]; - -#if CONFIG_MISC_FIXES - for (i = refmv_count; i < MAX_MV_REF_CANDIDATES; ++i) - mv_ref_list[i].as_int = 0; -#else - // Clamp vectors - for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) - clamp_mv_ref(&mv_ref_list[i].as_mv, bw, bh, xd); -#endif -} - -void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd, - MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int mi_row, int mi_col, - find_mv_refs_sync sync, void *const data, - uint8_t *mode_context) { - find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, - mi_row, mi_col, sync, data, mode_context); -} - -static void lower_mv_precision(MV *mv, int allow_hp) { - const int use_hp = allow_hp && vp10_use_mv_hp(mv); - if (!use_hp) { - if (mv->row & 1) - mv->row += (mv->row > 0 ? -1 : 1); - if (mv->col & 1) - mv->col += (mv->col > 0 ? -1 : 1); - } -} - -void vp10_find_best_ref_mvs(int allow_hp, - int_mv *mvlist, int_mv *nearest_mv, - int_mv *near_mv) { - int i; - // Make sure all the candidates are properly clamped etc - for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { - lower_mv_precision(&mvlist[i].as_mv, allow_hp); - } - *nearest_mv = mvlist[0]; - *near_mv = mvlist[1]; -} - -void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, - int block, int ref, int mi_row, int mi_col, - int_mv *nearest_mv, int_mv *near_mv, - uint8_t *mode_context) { - int_mv mv_list[MAX_MV_REF_CANDIDATES]; - MODE_INFO *const mi = xd->mi[0]; - b_mode_info *bmi = mi->bmi; - int n; - - assert(MAX_MV_REF_CANDIDATES == 2); - - find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block, - mi_row, mi_col, NULL, NULL, mode_context); - - near_mv->as_int = 0; - switch (block) { - case 0: - nearest_mv->as_int = mv_list[0].as_int; - near_mv->as_int = mv_list[1].as_int; - break; - case 1: - case 2: - nearest_mv->as_int = bmi[0].as_mv[ref].as_int; - for (n = 0; n < MAX_MV_REF_CANDIDATES; ++n) - if (nearest_mv->as_int != mv_list[n].as_int) { - near_mv->as_int = mv_list[n].as_int; - break; - } - break; - case 3: { - int_mv candidates[2 + MAX_MV_REF_CANDIDATES]; - candidates[0] = bmi[1].as_mv[ref]; - candidates[1] = bmi[0].as_mv[ref]; - candidates[2] = mv_list[0]; - candidates[3] = mv_list[1]; - - nearest_mv->as_int = bmi[2].as_mv[ref].as_int; - for (n = 0; n < 2 + MAX_MV_REF_CANDIDATES; ++n) - if (nearest_mv->as_int != candidates[n].as_int) { - near_mv->as_int = candidates[n].as_int; - break; - } - break; - } - default: - assert(0 && "Invalid block index."); - } -} diff --git a/libs/libvpx/vp10/common/mvref_common.h b/libs/libvpx/vp10/common/mvref_common.h deleted file mode 100644 index 0a98866149..0000000000 --- a/libs/libvpx/vp10/common/mvref_common.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef VP10_COMMON_MVREF_COMMON_H_ -#define VP10_COMMON_MVREF_COMMON_H_ - -#include "vp10/common/onyxc_int.h" -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MVREF_NEIGHBOURS 8 - -typedef struct position { - int row; - int col; -} POSITION; - -typedef enum { - BOTH_ZERO = 0, - ZERO_PLUS_PREDICTED = 1, - BOTH_PREDICTED = 2, - NEW_PLUS_NON_INTRA = 3, - BOTH_NEW = 4, - INTRA_PLUS_NON_INTRA = 5, - BOTH_INTRA = 6, - INVALID_CASE = 9 -} motion_vector_context; - -// This is used to figure out a context for the ref blocks. The code flattens -// an array that would have 3 possible counts (0, 1 & 2) for 3 choices by -// adding 9 for each intra block, 3 for each zero mv and 1 for each new -// motion vector. This single number is then converted into a context -// with a single lookup ( counter_to_context ). -static const int mode_2_counter[MB_MODE_COUNT] = { - 9, // DC_PRED - 9, // V_PRED - 9, // H_PRED - 9, // D45_PRED - 9, // D135_PRED - 9, // D117_PRED - 9, // D153_PRED - 9, // D207_PRED - 9, // D63_PRED - 9, // TM_PRED - 0, // NEARESTMV - 0, // NEARMV - 3, // ZEROMV - 1, // NEWMV -}; - -// There are 3^3 different combinations of 3 counts that can be either 0,1 or -// 2. However the actual count can never be greater than 2 so the highest -// counter we need is 18. 9 is an invalid counter that's never used. -static const int counter_to_context[19] = { - BOTH_PREDICTED, // 0 - NEW_PLUS_NON_INTRA, // 1 - BOTH_NEW, // 2 - ZERO_PLUS_PREDICTED, // 3 - NEW_PLUS_NON_INTRA, // 4 - INVALID_CASE, // 5 - BOTH_ZERO, // 6 - INVALID_CASE, // 7 - INVALID_CASE, // 8 - INTRA_PLUS_NON_INTRA, // 9 - INTRA_PLUS_NON_INTRA, // 10 - INVALID_CASE, // 11 - INTRA_PLUS_NON_INTRA, // 12 - INVALID_CASE, // 13 - INVALID_CASE, // 14 - INVALID_CASE, // 15 - INVALID_CASE, // 16 - INVALID_CASE, // 17 - BOTH_INTRA // 18 -}; - -static const POSITION mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = { - // 4X4 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, - // 4X8 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, - // 8X4 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, - // 8X8 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, - // 8X16 - {{0, -1}, {-1, 0}, {1, -1}, {-1, -1}, {0, -2}, {-2, 0}, {-2, -1}, {-1, -2}}, - // 16X8 - {{-1, 0}, {0, -1}, {-1, 1}, {-1, -1}, {-2, 0}, {0, -2}, {-1, -2}, {-2, -1}}, - // 16X16 - {{-1, 0}, {0, -1}, {-1, 1}, {1, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, - // 16X32 - {{0, -1}, {-1, 0}, {2, -1}, {-1, -1}, {-1, 1}, {0, -3}, {-3, 0}, {-3, -3}}, - // 32X16 - {{-1, 0}, {0, -1}, {-1, 2}, {-1, -1}, {1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, - // 32X32 - {{-1, 1}, {1, -1}, {-1, 2}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, - // 32X64 - {{0, -1}, {-1, 0}, {4, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {2, -1}}, - // 64X32 - {{-1, 0}, {0, -1}, {-1, 4}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-1, 2}}, - // 64X64 - {{-1, 3}, {3, -1}, {-1, 4}, {4, -1}, {-1, -1}, {-1, 0}, {0, -1}, {-1, 6}} -}; - -static const int idx_n_column_to_subblock[4][2] = { - {1, 2}, - {1, 3}, - {3, 2}, - {3, 3} -}; - -// clamp_mv_ref -#if CONFIG_MISC_FIXES -#define MV_BORDER (8 << 3) // Allow 8 pels in 1/8th pel units -#else -#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units -#endif - -static INLINE void clamp_mv_ref(MV *mv, int bw, int bh, const MACROBLOCKD *xd) { -#if CONFIG_MISC_FIXES - clamp_mv(mv, xd->mb_to_left_edge - bw * 8 - MV_BORDER, - xd->mb_to_right_edge + bw * 8 + MV_BORDER, - xd->mb_to_top_edge - bh * 8 - MV_BORDER, - xd->mb_to_bottom_edge + bh * 8 + MV_BORDER); -#else - (void) bw; - (void) bh; - clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER, - xd->mb_to_right_edge + MV_BORDER, - xd->mb_to_top_edge - MV_BORDER, - xd->mb_to_bottom_edge + MV_BORDER); -#endif -} - -// This function returns either the appropriate sub block or block's mv -// on whether the block_size < 8x8 and we have check_sub_blocks set. -static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv, - int search_col, int block_idx) { - return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8 - ? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]] - .as_mv[which_mv] - : candidate->mbmi.mv[which_mv]; -} - - -// Performs mv sign inversion if indicated by the reference frame combination. -static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref, - const MV_REFERENCE_FRAME this_ref_frame, - const int *ref_sign_bias) { - int_mv mv = mbmi->mv[ref]; - if (ref_sign_bias[mbmi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) { - mv.as_mv.row *= -1; - mv.as_mv.col *= -1; - } - return mv; -} - -#if CONFIG_MISC_FIXES -#define CLIP_IN_ADD(mv, bw, bh, xd) clamp_mv_ref(mv, bw, bh, xd) -#else -#define CLIP_IN_ADD(mv, bw, bh, xd) do {} while (0) -#endif - -// This macro is used to add a motion vector mv_ref list if it isn't -// already in the list. If it's the second motion vector it will also -// skip all additional processing and jump to done! -#define ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, bw, bh, xd, Done) \ - do { \ - (mv_ref_list)[(refmv_count)] = (mv); \ - CLIP_IN_ADD(&(mv_ref_list)[(refmv_count)].as_mv, (bw), (bh), (xd)); \ - if (refmv_count && (mv_ref_list)[1].as_int != (mv_ref_list)[0].as_int) { \ - (refmv_count) = 2; \ - goto Done; \ - } \ - (refmv_count) = 1; \ - } while (0) - -// If either reference frame is different, not INTRA, and they -// are different from each other scale and add the mv to our list. -#define IF_DIFF_REF_FRAME_ADD_MV(mbmi, ref_frame, ref_sign_bias, refmv_count, \ - mv_ref_list, bw, bh, xd, Done) \ - do { \ - if (is_inter_block(mbmi)) { \ - if ((mbmi)->ref_frame[0] != ref_frame) \ - ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, bw, bh, xd, Done); \ - if (has_second_ref(mbmi) && \ - (CONFIG_MISC_FIXES || \ - (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) && \ - (mbmi)->ref_frame[1] != ref_frame) \ - ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, bw, bh, xd, Done); \ - } \ - } while (0) - - -// Checks that the given mi_row, mi_col and search point -// are inside the borders of the tile. -static INLINE int is_inside(const TileInfo *const tile, - int mi_col, int mi_row, int mi_rows, - const POSITION *mi_pos) { - return !(mi_row + mi_pos->row < 0 || - mi_col + mi_pos->col < tile->mi_col_start || - mi_row + mi_pos->row >= mi_rows || - mi_col + mi_pos->col >= tile->mi_col_end); -} - -typedef void (*find_mv_refs_sync)(void *const data, int mi_row); -void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd, - MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, int mi_row, int mi_col, - find_mv_refs_sync sync, void *const data, - uint8_t *mode_context); - -// check a list of motion vectors by sad score using a number rows of pixels -// above and a number cols of pixels in the left to select the one with best -// score to use as ref motion vector -void vp10_find_best_ref_mvs(int allow_hp, - int_mv *mvlist, int_mv *nearest_mv, int_mv *near_mv); - -void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, - int block, int ref, int mi_row, int mi_col, - int_mv *nearest_mv, int_mv *near_mv, - uint8_t *mode_context); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_MVREF_COMMON_H_ diff --git a/libs/libvpx/vp10/common/onyxc_int.h b/libs/libvpx/vp10/common/onyxc_int.h deleted file mode 100644 index ffef73312a..0000000000 --- a/libs/libvpx/vp10/common/onyxc_int.h +++ /dev/null @@ -1,494 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_ONYXC_INT_H_ -#define VP10_COMMON_ONYXC_INT_H_ - -#include "./vpx_config.h" -#include "vpx/internal/vpx_codec_internal.h" -#include "vpx_util/vpx_thread.h" -#include "./vp10_rtcd.h" -#include "vp10/common/alloccommon.h" -#include "vp10/common/loopfilter.h" -#include "vp10/common/entropymv.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/frame_buffers.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/tile_common.h" - -#if CONFIG_VP9_POSTPROC -#include "vp10/common/postproc.h" -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#define REFS_PER_FRAME (ALTREF_FRAME - LAST_FRAME + 1) - -#define REF_FRAMES_LOG2 3 -#define REF_FRAMES (1 << REF_FRAMES_LOG2) - -// 4 scratch frames for the new frames to support a maximum of 4 cores decoding -// in parallel, 3 for scaled references on the encoder. -// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number -// of framebuffers. -// TODO(jkoleszar): These 3 extra references could probably come from the -// normal reference pool. -#define FRAME_BUFFERS (REF_FRAMES + 7) - -#define FRAME_CONTEXTS_LOG2 2 -#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2) - -#define NUM_PING_PONG_BUFFERS 2 - -typedef enum { - SINGLE_REFERENCE = 0, - COMPOUND_REFERENCE = 1, - REFERENCE_MODE_SELECT = 2, - REFERENCE_MODES = 3, -} REFERENCE_MODE; - -typedef enum { - RESET_FRAME_CONTEXT_NONE = 0, - RESET_FRAME_CONTEXT_CURRENT = 1, - RESET_FRAME_CONTEXT_ALL = 2, -} RESET_FRAME_CONTEXT_MODE; - -typedef enum { - /** - * Don't update frame context - */ - REFRESH_FRAME_CONTEXT_OFF, - /** - * Update frame context to values resulting from forward probability - * updates signaled in the frame header - */ - REFRESH_FRAME_CONTEXT_FORWARD, - /** - * Update frame context to values resulting from backward probability - * updates based on entropy/counts in the decoded frame - */ - REFRESH_FRAME_CONTEXT_BACKWARD, -} REFRESH_FRAME_CONTEXT_MODE; - -typedef struct { - int_mv mv[2]; - MV_REFERENCE_FRAME ref_frame[2]; -} MV_REF; - -typedef struct { - int ref_count; - MV_REF *mvs; - int mi_rows; - int mi_cols; - vpx_codec_frame_buffer_t raw_frame_buffer; - YV12_BUFFER_CONFIG buf; - - // The Following variables will only be used in frame parallel decode. - - // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means - // that no FrameWorker owns, or is decoding, this buffer. - VPxWorker *frame_worker_owner; - - // row and col indicate which position frame has been decoded to in real - // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX - // when the frame is fully decoded. - int row; - int col; -} RefCntBuffer; - -typedef struct BufferPool { - // Protect BufferPool from being accessed by several FrameWorkers at - // the same time during frame parallel decode. - // TODO(hkuang): Try to use atomic variable instead of locking the whole pool. -#if CONFIG_MULTITHREAD - pthread_mutex_t pool_mutex; -#endif - - // Private data associated with the frame buffer callbacks. - void *cb_priv; - - vpx_get_frame_buffer_cb_fn_t get_fb_cb; - vpx_release_frame_buffer_cb_fn_t release_fb_cb; - - RefCntBuffer frame_bufs[FRAME_BUFFERS]; - - // Frame buffers allocated internally by the codec. - InternalFrameBufferList int_frame_buffers; -} BufferPool; - -typedef struct VP10Common { - struct vpx_internal_error_info error; - vpx_color_space_t color_space; - int color_range; - int width; - int height; - int render_width; - int render_height; - int last_width; - int last_height; - - // TODO(jkoleszar): this implies chroma ss right now, but could vary per - // plane. Revisit as part of the future change to YV12_BUFFER_CONFIG to - // support additional planes. - int subsampling_x; - int subsampling_y; - -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth; // Marks if we need to use 16bit frame buffers. -#endif - - YV12_BUFFER_CONFIG *frame_to_show; - RefCntBuffer *prev_frame; - - // TODO(hkuang): Combine this with cur_buf in macroblockd. - RefCntBuffer *cur_frame; - - int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */ - - // Prepare ref_frame_map for the next frame. - // Only used in frame parallel decode. - int next_ref_frame_map[REF_FRAMES]; - - // TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and - // roll new_fb_idx into it. - - // Each frame can reference REFS_PER_FRAME buffers - RefBuffer frame_refs[REFS_PER_FRAME]; - - int new_fb_idx; - -#if CONFIG_VP9_POSTPROC - YV12_BUFFER_CONFIG post_proc_buffer; - YV12_BUFFER_CONFIG post_proc_buffer_int; -#endif - - FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/ - FRAME_TYPE frame_type; - - int show_frame; - int last_show_frame; - int show_existing_frame; - - // Flag signaling that the frame is encoded using only INTRA modes. - uint8_t intra_only; - uint8_t last_intra_only; - - int allow_high_precision_mv; - - // Flag signaling which frame contexts should be reset to default values. - RESET_FRAME_CONTEXT_MODE reset_frame_context; - - // MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in - // MODE_INFO (8-pixel) units. - int MBs; - int mb_rows, mi_rows; - int mb_cols, mi_cols; - int mi_stride; - - /* profile settings */ - TX_MODE tx_mode; - - int base_qindex; - int y_dc_delta_q; - int uv_dc_delta_q; - int uv_ac_delta_q; - int16_t y_dequant[MAX_SEGMENTS][2]; - int16_t uv_dequant[MAX_SEGMENTS][2]; - - /* We allocate a MODE_INFO struct for each macroblock, together with - an extra row on top and column on the left to simplify prediction. */ - int mi_alloc_size; - MODE_INFO *mip; /* Base of allocated array */ - MODE_INFO *mi; /* Corresponds to upper left visible macroblock */ - - // TODO(agrange): Move prev_mi into encoder structure. - // prev_mip and prev_mi will only be allocated in VP9 encoder. - MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */ - MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ - - // Separate mi functions between encoder and decoder. - int (*alloc_mi)(struct VP10Common *cm, int mi_size); - void (*free_mi)(struct VP10Common *cm); - void (*setup_mi)(struct VP10Common *cm); - - // Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible - // area will be NULL. - MODE_INFO **mi_grid_base; - MODE_INFO **mi_grid_visible; - MODE_INFO **prev_mi_grid_base; - MODE_INFO **prev_mi_grid_visible; - - // Whether to use previous frame's motion vectors for prediction. - int use_prev_frame_mvs; - - // Persistent mb segment id map used in prediction. - int seg_map_idx; - int prev_seg_map_idx; - - uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS]; - uint8_t *last_frame_seg_map; - uint8_t *current_frame_seg_map; - int seg_map_alloc_size; - - INTERP_FILTER interp_filter; - - loop_filter_info_n lf_info; - - // Flag signaling how frame contexts should be updated at the end of - // a frame decode - REFRESH_FRAME_CONTEXT_MODE refresh_frame_context; - - int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ - - struct loopfilter lf; - struct segmentation seg; -#if !CONFIG_MISC_FIXES - struct segmentation_probs segp; -#endif - - int frame_parallel_decode; // frame-based threading. - - // Context probabilities for reference frame prediction - MV_REFERENCE_FRAME comp_fixed_ref; - MV_REFERENCE_FRAME comp_var_ref[2]; - REFERENCE_MODE reference_mode; - - FRAME_CONTEXT *fc; /* this frame entropy */ - FRAME_CONTEXT *frame_contexts; // FRAME_CONTEXTS - unsigned int frame_context_idx; /* Context to use/update */ - FRAME_COUNTS counts; - - unsigned int current_video_frame; - BITSTREAM_PROFILE profile; - - // VPX_BITS_8 in profile 0 or 1, VPX_BITS_10 or VPX_BITS_12 in profile 2 or 3. - vpx_bit_depth_t bit_depth; - vpx_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer - -#if CONFIG_VP9_POSTPROC - struct postproc_state postproc_state; -#endif - - int error_resilient_mode; - - int log2_tile_cols, log2_tile_rows; - int tile_sz_mag; - int byte_alignment; - int skip_loop_filter; - - // Private data associated with the frame buffer callbacks. - void *cb_priv; - vpx_get_frame_buffer_cb_fn_t get_fb_cb; - vpx_release_frame_buffer_cb_fn_t release_fb_cb; - - // Handles memory for the codec. - InternalFrameBufferList int_frame_buffers; - - // External BufferPool passed from outside. - BufferPool *buffer_pool; - - PARTITION_CONTEXT *above_seg_context; - ENTROPY_CONTEXT *above_context; - int above_context_alloc_cols; - - // scratch memory for intraonly/keyframe forward updates from default tables - // - this is intentionally not placed in FRAME_CONTEXT since it's reset upon - // each keyframe and not used afterwards - vpx_prob kf_y_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1]; -} VP10_COMMON; - -// TODO(hkuang): Don't need to lock the whole pool after implementing atomic -// frame reference count. -static void lock_buffer_pool(BufferPool *const pool) { -#if CONFIG_MULTITHREAD - pthread_mutex_lock(&pool->pool_mutex); -#else - (void)pool; -#endif -} - -static void unlock_buffer_pool(BufferPool *const pool) { -#if CONFIG_MULTITHREAD - pthread_mutex_unlock(&pool->pool_mutex); -#else - (void)pool; -#endif -} - -static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) { - if (index < 0 || index >= REF_FRAMES) - return NULL; - if (cm->ref_frame_map[index] < 0) - return NULL; - assert(cm->ref_frame_map[index] < FRAME_BUFFERS); - return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf; -} - -static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP10_COMMON *cm) { - return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf; -} - -static INLINE int get_free_fb(VP10_COMMON *cm) { - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; - int i; - - lock_buffer_pool(cm->buffer_pool); - for (i = 0; i < FRAME_BUFFERS; ++i) - if (frame_bufs[i].ref_count == 0) - break; - - if (i != FRAME_BUFFERS) { - frame_bufs[i].ref_count = 1; - } else { - // Reset i to be INVALID_IDX to indicate no free buffer found. - i = INVALID_IDX; - } - - unlock_buffer_pool(cm->buffer_pool); - return i; -} - -static INLINE void ref_cnt_fb(RefCntBuffer *bufs, int *idx, int new_idx) { - const int ref_index = *idx; - - if (ref_index >= 0 && bufs[ref_index].ref_count > 0) - bufs[ref_index].ref_count--; - - *idx = new_idx; - - bufs[new_idx].ref_count++; -} - -static INLINE int mi_cols_aligned_to_sb(int n_mis) { - return ALIGN_POWER_OF_TWO(n_mis, MI_BLOCK_SIZE_LOG2); -} - -static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) { - return cm->frame_type == KEY_FRAME || cm->intra_only; -} - -static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd, - tran_low_t *dqcoeff) { - int i; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - xd->plane[i].dqcoeff = dqcoeff; - xd->above_context[i] = cm->above_context + - i * sizeof(*cm->above_context) * 2 * mi_cols_aligned_to_sb(cm->mi_cols); - - if (xd->plane[i].plane_type == PLANE_TYPE_Y) { - memcpy(xd->plane[i].seg_dequant, cm->y_dequant, sizeof(cm->y_dequant)); - } else { - memcpy(xd->plane[i].seg_dequant, cm->uv_dequant, sizeof(cm->uv_dequant)); - } - xd->fc = cm->fc; - } - - xd->above_seg_context = cm->above_seg_context; - xd->mi_stride = cm->mi_stride; - xd->error_info = &cm->error; -} - -static INLINE void set_skip_context(MACROBLOCKD *xd, int mi_row, int mi_col) { - const int above_idx = mi_col * 2; - const int left_idx = (mi_row * 2) & 15; - int i; - for (i = 0; i < MAX_MB_PLANE; ++i) { - struct macroblockd_plane *const pd = &xd->plane[i]; - pd->above_context = &xd->above_context[i][above_idx >> pd->subsampling_x]; - pd->left_context = &xd->left_context[i][left_idx >> pd->subsampling_y]; - } -} - -static INLINE int calc_mi_size(int len) { - // len is in mi units. - return len + MI_BLOCK_SIZE; -} - -static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, - int mi_row, int bh, - int mi_col, int bw, - int mi_rows, int mi_cols) { - xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8); - xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8; - xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8); - xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8; - - // Are edges available for intra prediction? - xd->up_available = (mi_row != 0); - xd->left_available = (mi_col > tile->mi_col_start); - if (xd->up_available) { - xd->above_mi = xd->mi[-xd->mi_stride]; - // above_mi may be NULL in VP9 encoder's first pass. - xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL; - } else { - xd->above_mi = NULL; - xd->above_mbmi = NULL; - } - - if (xd->left_available) { - xd->left_mi = xd->mi[-1]; - // left_mi may be NULL in VP9 encoder's first pass. - xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL; - } else { - xd->left_mi = NULL; - xd->left_mbmi = NULL; - } -} - -static INLINE const vpx_prob *get_y_mode_probs(const VP10_COMMON *cm, - const MODE_INFO *mi, - const MODE_INFO *above_mi, - const MODE_INFO *left_mi, - int block) { - const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block); - const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block); - return cm->kf_y_prob[above][left]; -} - -static INLINE void update_partition_context(MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE subsize, - BLOCK_SIZE bsize) { - PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col; - PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK); - - // num_4x4_blocks_wide_lookup[bsize] / 2 - const int bs = num_8x8_blocks_wide_lookup[bsize]; - - // update the partition context at the end notes. set partition bits - // of block sizes larger than the current one to be one, and partition - // bits of smaller block sizes to be zero. - memset(above_ctx, partition_context_lookup[subsize].above, bs); - memset(left_ctx, partition_context_lookup[subsize].left, bs); -} - -static INLINE int partition_plane_context(const MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE bsize) { - const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col; - const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK); - const int bsl = mi_width_log2_lookup[bsize]; - int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1; - - assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]); - assert(bsl >= 0); - - return (left * 2 + above) + bsl * PARTITION_PLOFFSET; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_ONYXC_INT_H_ diff --git a/libs/libvpx/vp10/common/postproc.c b/libs/libvpx/vp10/common/postproc.c deleted file mode 100644 index a6ea9c0eff..0000000000 --- a/libs/libvpx/vp10/common/postproc.c +++ /dev/null @@ -1,746 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vpx_config.h" -#include "./vpx_scale_rtcd.h" -#include "./vp10_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" -#include "vpx_scale/vpx_scale.h" -#include "vpx_scale/yv12config.h" - -#include "vp10/common/onyxc_int.h" -#include "vp10/common/postproc.h" -#include "vp10/common/textblit.h" - -#if CONFIG_VP9_POSTPROC -static const short kernel5[] = { - 1, 1, 4, 1, 1 -}; - -const short vp10_rv[] = { - 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, - 0, 3, 9, 0, 0, 0, 8, 3, 14, 4, - 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, - 8, 6, 10, 0, 0, 8, 9, 0, 3, 14, - 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, - 1, 2, 3, 14, 13, 1, 8, 2, 9, 7, - 3, 3, 1, 13, 13, 6, 6, 5, 2, 7, - 11, 9, 11, 8, 7, 3, 2, 0, 13, 13, - 14, 4, 12, 5, 12, 10, 8, 10, 13, 10, - 4, 14, 4, 10, 0, 8, 11, 1, 13, 7, - 7, 14, 6, 14, 13, 2, 13, 5, 4, 4, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 3, 8, 3, 7, 8, 5, 11, 4, 12, 3, - 11, 9, 14, 8, 14, 13, 4, 3, 1, 2, - 14, 6, 5, 4, 4, 11, 4, 6, 2, 1, - 5, 8, 8, 12, 13, 5, 14, 10, 12, 13, - 0, 9, 5, 5, 11, 10, 13, 9, 10, 13, -}; - -static const uint8_t q_diff_thresh = 20; -static const uint8_t last_q_thresh = 170; - -void vp10_post_proc_down_and_across_c(const uint8_t *src_ptr, - uint8_t *dst_ptr, - int src_pixels_per_line, - int dst_pixels_per_line, - int rows, - int cols, - int flimit) { - uint8_t const *p_src; - uint8_t *p_dst; - int row, col, i, v, kernel; - int pitch = src_pixels_per_line; - uint8_t d[8]; - (void)dst_pixels_per_line; - - for (row = 0; row < rows; row++) { - /* post_proc_down for one row */ - p_src = src_ptr; - p_dst = dst_ptr; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i * pitch]) > flimit) - goto down_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i * pitch]; - } - - v = (kernel >> 3); - down_skip_convolve: - p_dst[col] = v; - } - - /* now post_proc_across */ - p_src = dst_ptr; - p_dst = dst_ptr; - - for (i = 0; i < 8; i++) - d[i] = p_src[i]; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - d[col & 7] = v; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i]) > flimit) - goto across_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i]; - } - - d[col & 7] = (kernel >> 3); - across_skip_convolve: - - if (col >= 2) - p_dst[col - 2] = d[(col - 2) & 7]; - } - - /* handle the last two pixels */ - p_dst[col - 2] = d[(col - 2) & 7]; - p_dst[col - 1] = d[(col - 1) & 7]; - - - /* next row */ - src_ptr += pitch; - dst_ptr += pitch; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, - uint16_t *dst_ptr, - int src_pixels_per_line, - int dst_pixels_per_line, - int rows, - int cols, - int flimit) { - uint16_t const *p_src; - uint16_t *p_dst; - int row, col, i, v, kernel; - int pitch = src_pixels_per_line; - uint16_t d[8]; - - for (row = 0; row < rows; row++) { - // post_proc_down for one row. - p_src = src_ptr; - p_dst = dst_ptr; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i * pitch]) > flimit) - goto down_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i * pitch]; - } - - v = (kernel >> 3); - - down_skip_convolve: - p_dst[col] = v; - } - - /* now post_proc_across */ - p_src = dst_ptr; - p_dst = dst_ptr; - - for (i = 0; i < 8; i++) - d[i] = p_src[i]; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - d[col & 7] = v; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i]) > flimit) - goto across_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i]; - } - - d[col & 7] = (kernel >> 3); - - across_skip_convolve: - if (col >= 2) - p_dst[col - 2] = d[(col - 2) & 7]; - } - - /* handle the last two pixels */ - p_dst[col - 2] = d[(col - 2) & 7]; - p_dst[col - 1] = d[(col - 1) & 7]; - - - /* next row */ - src_ptr += pitch; - dst_ptr += dst_pixels_per_line; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static int q2mbl(int x) { - if (x < 20) x = 20; - - x = 50 + (x - 50) * 10 / 8; - return x * x / 3; -} - -void vp10_mbpost_proc_across_ip_c(uint8_t *src, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - uint8_t *s = src; - uint8_t d[16]; - - for (r = 0; r < rows; r++) { - int sumsq = 0; - int sum = 0; - - for (i = -8; i <= 6; i++) { - sumsq += s[i] * s[i]; - sum += s[i]; - d[i + 8] = 0; - } - - for (c = 0; c < cols + 8; c++) { - int x = s[c + 7] - s[c - 8]; - int y = s[c + 7] + s[c - 8]; - - sum += x; - sumsq += x * y; - - d[c & 15] = s[c]; - - if (sumsq * 15 - sum * sum < flimit) { - d[c & 15] = (8 + sum + s[c]) >> 4; - } - - s[c - 8] = d[(c - 8) & 15]; - } - s += pitch; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_mbpost_proc_across_ip_c(uint16_t *src, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - - uint16_t *s = src; - uint16_t d[16]; - - - for (r = 0; r < rows; r++) { - int sumsq = 0; - int sum = 0; - - for (i = -8; i <= 6; i++) { - sumsq += s[i] * s[i]; - sum += s[i]; - d[i + 8] = 0; - } - - for (c = 0; c < cols + 8; c++) { - int x = s[c + 7] - s[c - 8]; - int y = s[c + 7] + s[c - 8]; - - sum += x; - sumsq += x * y; - - d[c & 15] = s[c]; - - if (sumsq * 15 - sum * sum < flimit) { - d[c & 15] = (8 + sum + s[c]) >> 4; - } - - s[c - 8] = d[(c - 8) & 15]; - } - - s += pitch; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_mbpost_proc_down_c(uint8_t *dst, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - const short *rv3 = &vp10_rv[63 & rand()]; // NOLINT - - for (c = 0; c < cols; c++) { - uint8_t *s = &dst[c]; - int sumsq = 0; - int sum = 0; - uint8_t d[16]; - const short *rv2 = rv3 + ((c * 17) & 127); - - for (i = -8; i <= 6; i++) { - sumsq += s[i * pitch] * s[i * pitch]; - sum += s[i * pitch]; - } - - for (r = 0; r < rows + 8; r++) { - sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; - sum += s[7 * pitch] - s[-8 * pitch]; - d[r & 15] = s[0]; - - if (sumsq * 15 - sum * sum < flimit) { - d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4; - } - - s[-8 * pitch] = d[(r - 8) & 15]; - s += pitch; - } - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_mbpost_proc_down_c(uint16_t *dst, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - const int16_t *rv3 = &vp10_rv[63 & rand()]; // NOLINT - - for (c = 0; c < cols; c++) { - uint16_t *s = &dst[c]; - int sumsq = 0; - int sum = 0; - uint16_t d[16]; - const int16_t *rv2 = rv3 + ((c * 17) & 127); - - for (i = -8; i <= 6; i++) { - sumsq += s[i * pitch] * s[i * pitch]; - sum += s[i * pitch]; - } - - for (r = 0; r < rows + 8; r++) { - sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; - sum += s[7 * pitch] - s[-8 * pitch]; - d[r & 15] = s[0]; - - if (sumsq * 15 - sum * sum < flimit) { - d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4; - } - - s[-8 * pitch] = d[(r - 8) & 15]; - s += pitch; - } - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag) { - double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; - int ppl = (int)(level + .5); - (void) low_var_thresh; - (void) flag; - -#if CONFIG_VP9_HIGHBITDEPTH - if (source->flags & YV12_FLAG_HIGHBITDEPTH) { - vp10_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->y_buffer), - CONVERT_TO_SHORTPTR(post->y_buffer), - source->y_stride, post->y_stride, - source->y_height, source->y_width, - ppl); - - vp10_highbd_mbpost_proc_across_ip(CONVERT_TO_SHORTPTR(post->y_buffer), - post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_highbd_mbpost_proc_down(CONVERT_TO_SHORTPTR(post->y_buffer), - post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->u_buffer), - CONVERT_TO_SHORTPTR(post->u_buffer), - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, - ppl); - vp10_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->v_buffer), - CONVERT_TO_SHORTPTR(post->v_buffer), - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, - ppl); - } else { - vp10_post_proc_down_and_across(source->y_buffer, post->y_buffer, - source->y_stride, post->y_stride, - source->y_height, source->y_width, ppl); - - vp10_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_post_proc_down_and_across(source->u_buffer, post->u_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); - vp10_post_proc_down_and_across(source->v_buffer, post->v_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); - } -#else - vp10_post_proc_down_and_across(source->y_buffer, post->y_buffer, - source->y_stride, post->y_stride, - source->y_height, source->y_width, ppl); - - vp10_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp10_post_proc_down_and_across(source->u_buffer, post->u_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); - vp10_post_proc_down_and_across(source->v_buffer, post->v_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); -#endif // CONFIG_VP9_HIGHBITDEPTH -} - -void vp10_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, - int q) { - const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q - + 0.0065 + 0.5); - int i; - - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width}; - const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height}; - - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - - for (i = 0; i < MAX_MB_PLANE; ++i) { -#if CONFIG_VP9_HIGHBITDEPTH - assert((src->flags & YV12_FLAG_HIGHBITDEPTH) == - (dst->flags & YV12_FLAG_HIGHBITDEPTH)); - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - vp10_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(srcs[i]), - CONVERT_TO_SHORTPTR(dsts[i]), - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); - } else { - vp10_post_proc_down_and_across(srcs[i], dsts[i], - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); - } -#else - vp10_post_proc_down_and_across(srcs[i], dsts[i], - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); -#endif // CONFIG_VP9_HIGHBITDEPTH - } -} - -void vp10_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, - int q) { - const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q - + 0.0065 + 0.5); - int i; - - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width}; - const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height}; - - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - const int src_stride = src_strides[i]; - const int src_width = src_widths[i] - 4; - const int src_height = src_heights[i] - 4; - const int dst_stride = dst_strides[i]; - -#if CONFIG_VP9_HIGHBITDEPTH - assert((src->flags & YV12_FLAG_HIGHBITDEPTH) == - (dst->flags & YV12_FLAG_HIGHBITDEPTH)); - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - const uint16_t *const src_plane = CONVERT_TO_SHORTPTR( - srcs[i] + 2 * src_stride + 2); - uint16_t *const dst_plane = CONVERT_TO_SHORTPTR( - dsts[i] + 2 * dst_stride + 2); - vp10_highbd_post_proc_down_and_across(src_plane, dst_plane, src_stride, - dst_stride, src_height, src_width, - ppl); - } else { - const uint8_t *const src_plane = srcs[i] + 2 * src_stride + 2; - uint8_t *const dst_plane = dsts[i] + 2 * dst_stride + 2; - - vp10_post_proc_down_and_across(src_plane, dst_plane, src_stride, - dst_stride, src_height, src_width, ppl); - } -#else - const uint8_t *const src_plane = srcs[i] + 2 * src_stride + 2; - uint8_t *const dst_plane = dsts[i] + 2 * dst_stride + 2; - vp10_post_proc_down_and_across(src_plane, dst_plane, src_stride, dst_stride, - src_height, src_width, ppl); -#endif - } -} - -static double gaussian(double sigma, double mu, double x) { - return 1 / (sigma * sqrt(2.0 * 3.14159265)) * - (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))); -} - -static void fillrd(struct postproc_state *state, int q, int a) { - char char_dist[300]; - - double sigma; - int ai = a, qi = q, i; - - vpx_clear_system_state(); - - sigma = ai + .5 + .6 * (63 - qi) / 63.0; - - /* set up a lookup table of 256 entries that matches - * a gaussian distribution with sigma determined by q. - */ - { - int next, j; - - next = 0; - - for (i = -32; i < 32; i++) { - int a_i = (int)(0.5 + 256 * gaussian(sigma, 0, i)); - - if (a_i) { - for (j = 0; j < a_i; j++) { - char_dist[next + j] = (char) i; - } - - next = next + j; - } - } - - for (; next < 256; next++) - char_dist[next] = 0; - } - - for (i = 0; i < 3072; i++) { - state->noise[i] = char_dist[rand() & 0xff]; // NOLINT - } - - for (i = 0; i < 16; i++) { - state->blackclamp[i] = -char_dist[0]; - state->whiteclamp[i] = -char_dist[0]; - state->bothclamp[i] = -2 * char_dist[0]; - } - - state->last_q = q; - state->last_noise = a; -} - -void vp10_plane_add_noise_c(uint8_t *start, char *noise, - char blackclamp[16], - char whiteclamp[16], - char bothclamp[16], - unsigned int width, unsigned int height, int pitch) { - unsigned int i, j; - - // TODO(jbb): why does simd code use both but c doesn't, normalize and - // fix.. - (void) bothclamp; - for (i = 0; i < height; i++) { - uint8_t *pos = start + i * pitch; - char *ref = (char *)(noise + (rand() & 0xff)); // NOLINT - - for (j = 0; j < width; j++) { - if (pos[j] < blackclamp[0]) - pos[j] = blackclamp[0]; - - if (pos[j] > 255 + whiteclamp[0]) - pos[j] = 255 + whiteclamp[0]; - - pos[j] += ref[j]; - } - } -} - -static void swap_mi_and_prev_mi(VP10_COMMON *cm) { - // Current mip will be the prev_mip for the next frame. - MODE_INFO *temp = cm->postproc_state.prev_mip; - cm->postproc_state.prev_mip = cm->mip; - cm->mip = temp; - - // Update the upper left visible macroblock ptrs. - cm->mi = cm->mip + cm->mi_stride + 1; - cm->postproc_state.prev_mi = cm->postproc_state.prev_mip + cm->mi_stride + 1; -} - -int vp10_post_proc_frame(struct VP10Common *cm, - YV12_BUFFER_CONFIG *dest, vp10_ppflags_t *ppflags) { - const int q = VPXMIN(105, cm->lf.filter_level * 2); - const int flags = ppflags->post_proc_flag; - YV12_BUFFER_CONFIG *const ppbuf = &cm->post_proc_buffer; - struct postproc_state *const ppstate = &cm->postproc_state; - - if (!cm->frame_to_show) - return -1; - - if (!flags) { - *dest = *cm->frame_to_show; - return 0; - } - - vpx_clear_system_state(); - - // Alloc memory for prev_mip in the first frame. - if (cm->current_video_frame == 1) { - cm->postproc_state.last_base_qindex = cm->base_qindex; - cm->postproc_state.last_frame_valid = 1; - ppstate->prev_mip = vpx_calloc(cm->mi_alloc_size, sizeof(*cm->mip)); - if (!ppstate->prev_mip) { - return 1; - } - ppstate->prev_mi = ppstate->prev_mip + cm->mi_stride + 1; - memset(ppstate->prev_mip, 0, - cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip)); - } - - // Allocate post_proc_buffer_int if needed. - if ((flags & VP9D_MFQE) && !cm->post_proc_buffer_int.buffer_alloc) { - if ((flags & VP9D_DEMACROBLOCK) || (flags & VP9D_DEBLOCK)) { - const int width = ALIGN_POWER_OF_TWO(cm->width, 4); - const int height = ALIGN_POWER_OF_TWO(cm->height, 4); - - if (vpx_alloc_frame_buffer(&cm->post_proc_buffer_int, width, height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif // CONFIG_VP9_HIGHBITDEPTH - VP9_ENC_BORDER_IN_PIXELS, - cm->byte_alignment) < 0) { - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate MFQE framebuffer"); - } - - // Ensure that postproc is set to all 0s so that post proc - // doesn't pull random data in from edge. - memset(cm->post_proc_buffer_int.buffer_alloc, 128, - cm->post_proc_buffer.frame_size); - } - } - - if (vpx_realloc_frame_buffer(&cm->post_proc_buffer, cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL) < 0) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate post-processing buffer"); - - if ((flags & VP9D_MFQE) && cm->current_video_frame >= 2 && - cm->postproc_state.last_frame_valid && cm->bit_depth == 8 && - cm->postproc_state.last_base_qindex <= last_q_thresh && - cm->base_qindex - cm->postproc_state.last_base_qindex >= q_diff_thresh) { - vp10_mfqe(cm); - // TODO(jackychen): Consider whether enable deblocking by default - // if mfqe is enabled. Need to take both the quality and the speed - // into consideration. - if ((flags & VP9D_DEMACROBLOCK) || (flags & VP9D_DEBLOCK)) { - vp8_yv12_copy_frame(ppbuf, &cm->post_proc_buffer_int); - } - if ((flags & VP9D_DEMACROBLOCK) && cm->post_proc_buffer_int.buffer_alloc) { - deblock_and_de_macro_block(&cm->post_proc_buffer_int, ppbuf, - q + (ppflags->deblocking_level - 5) * 10, - 1, 0); - } else if (flags & VP9D_DEBLOCK) { - vp10_deblock(&cm->post_proc_buffer_int, ppbuf, q); - } else { - vp8_yv12_copy_frame(&cm->post_proc_buffer_int, ppbuf); - } - } else if (flags & VP9D_DEMACROBLOCK) { - deblock_and_de_macro_block(cm->frame_to_show, ppbuf, - q + (ppflags->deblocking_level - 5) * 10, 1, 0); - } else if (flags & VP9D_DEBLOCK) { - vp10_deblock(cm->frame_to_show, ppbuf, q); - } else { - vp8_yv12_copy_frame(cm->frame_to_show, ppbuf); - } - - cm->postproc_state.last_base_qindex = cm->base_qindex; - cm->postproc_state.last_frame_valid = 1; - - if (flags & VP9D_ADDNOISE) { - const int noise_level = ppflags->noise_level; - if (ppstate->last_q != q || - ppstate->last_noise != noise_level) { - fillrd(ppstate, 63 - q, noise_level); - } - - vp10_plane_add_noise(ppbuf->y_buffer, ppstate->noise, ppstate->blackclamp, - ppstate->whiteclamp, ppstate->bothclamp, - ppbuf->y_width, ppbuf->y_height, ppbuf->y_stride); - } - - *dest = *ppbuf; - - /* handle problem with extending borders */ - dest->y_width = cm->width; - dest->y_height = cm->height; - dest->uv_width = dest->y_width >> cm->subsampling_x; - dest->uv_height = dest->y_height >> cm->subsampling_y; - - swap_mi_and_prev_mi(cm); - return 0; -} -#endif // CONFIG_VP9_POSTPROC diff --git a/libs/libvpx/vp10/common/postproc.h b/libs/libvpx/vp10/common/postproc.h deleted file mode 100644 index e2ce0dcc87..0000000000 --- a/libs/libvpx/vp10/common/postproc.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_COMMON_POSTPROC_H_ -#define VP10_COMMON_POSTPROC_H_ - -#include "vpx_ports/mem.h" -#include "vpx_scale/yv12config.h" -#include "vp10/common/blockd.h" -#include "vp10/common/mfqe.h" -#include "vp10/common/ppflags.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct postproc_state { - int last_q; - int last_noise; - char noise[3072]; - int last_base_qindex; - int last_frame_valid; - MODE_INFO *prev_mip; - MODE_INFO *prev_mi; - DECLARE_ALIGNED(16, char, blackclamp[16]); - DECLARE_ALIGNED(16, char, whiteclamp[16]); - DECLARE_ALIGNED(16, char, bothclamp[16]); -}; - -struct VP10Common; - -#define MFQE_PRECISION 4 - -int vp10_post_proc_frame(struct VP10Common *cm, - YV12_BUFFER_CONFIG *dest, vp10_ppflags_t *flags); - -void vp10_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); - -void vp10_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_POSTPROC_H_ diff --git a/libs/libvpx/vp10/common/ppflags.h b/libs/libvpx/vp10/common/ppflags.h deleted file mode 100644 index 8592fe906a..0000000000 --- a/libs/libvpx/vp10/common/ppflags.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_PPFLAGS_H_ -#define VP10_COMMON_PPFLAGS_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -enum { - VP9D_NOFILTERING = 0, - VP9D_DEBLOCK = 1 << 0, - VP9D_DEMACROBLOCK = 1 << 1, - VP9D_ADDNOISE = 1 << 2, - VP9D_DEBUG_TXT_FRAME_INFO = 1 << 3, - VP9D_DEBUG_TXT_MBLK_MODES = 1 << 4, - VP9D_DEBUG_TXT_DC_DIFF = 1 << 5, - VP9D_DEBUG_TXT_RATE_INFO = 1 << 6, - VP9D_DEBUG_DRAW_MV = 1 << 7, - VP9D_DEBUG_CLR_BLK_MODES = 1 << 8, - VP9D_DEBUG_CLR_FRM_REF_BLKS = 1 << 9, - VP9D_MFQE = 1 << 10 -}; - -typedef struct { - int post_proc_flag; - int deblocking_level; - int noise_level; -} vp10_ppflags_t; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_PPFLAGS_H_ diff --git a/libs/libvpx/vp10/common/pred_common.c b/libs/libvpx/vp10/common/pred_common.c deleted file mode 100644 index 236ae54661..0000000000 --- a/libs/libvpx/vp10/common/pred_common.c +++ /dev/null @@ -1,339 +0,0 @@ - -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/seg_common.h" - -// Returns a context number for the given MB prediction signal -int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int left_type = xd->left_available && is_inter_block(left_mbmi) ? - left_mbmi->interp_filter : SWITCHABLE_FILTERS; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const int above_type = xd->up_available && is_inter_block(above_mbmi) ? - above_mbmi->interp_filter : SWITCHABLE_FILTERS; - - if (left_type == above_type) - return left_type; - else if (left_type == SWITCHABLE_FILTERS && above_type != SWITCHABLE_FILTERS) - return above_type; - else if (left_type != SWITCHABLE_FILTERS && above_type == SWITCHABLE_FILTERS) - return left_type; - else - return SWITCHABLE_FILTERS; -} - -// The mode info data structure has a one element border above and to the -// left of the entries corresponding to real macroblocks. -// The prediction flags in these dummy entries are initialized to 0. -// 0 - inter/inter, inter/--, --/inter, --/-- -// 1 - intra/inter, inter/intra -// 2 - intra/--, --/intra -// 3 - intra/intra -int vp10_get_intra_inter_context(const MACROBLOCKD *xd) { - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - - if (has_above && has_left) { // both edges available - const int above_intra = !is_inter_block(above_mbmi); - const int left_intra = !is_inter_block(left_mbmi); - return left_intra && above_intra ? 3 - : left_intra || above_intra; - } else if (has_above || has_left) { // one edge available - return 2 * !is_inter_block(has_above ? above_mbmi : left_mbmi); - } else { - return 0; - } -} - -int vp10_get_reference_mode_context(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - int ctx; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - if (has_above && has_left) { // both edges available - if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi)) - // neither edge uses comp pred (0/1) - ctx = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^ - (left_mbmi->ref_frame[0] == cm->comp_fixed_ref); - else if (!has_second_ref(above_mbmi)) - // one of two edges uses comp pred (2/3) - ctx = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref || - !is_inter_block(above_mbmi)); - else if (!has_second_ref(left_mbmi)) - // one of two edges uses comp pred (2/3) - ctx = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref || - !is_inter_block(left_mbmi)); - else // both edges use comp pred (4) - ctx = 4; - } else if (has_above || has_left) { // one edge available - const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; - - if (!has_second_ref(edge_mbmi)) - // edge does not use comp pred (0/1) - ctx = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref; - else - // edge uses comp pred (3) - ctx = 3; - } else { // no edges available (1) - ctx = 1; - } - assert(ctx >= 0 && ctx < COMP_INTER_CONTEXTS); - return ctx; -} - -// Returns a context number for the given MB prediction signal -int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - int pred_context; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int above_in_image = xd->up_available; - const int left_in_image = xd->left_available; - - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - const int fix_ref_idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; - const int var_ref_idx = !fix_ref_idx; - - if (above_in_image && left_in_image) { // both edges available - const int above_intra = !is_inter_block(above_mbmi); - const int left_intra = !is_inter_block(left_mbmi); - - if (above_intra && left_intra) { // intra/intra (2) - pred_context = 2; - } else if (above_intra || left_intra) { // intra/inter - const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi; - - if (!has_second_ref(edge_mbmi)) // single pred (1/3) - pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]); - else // comp pred (1/3) - pred_context = 1 + 2 * (edge_mbmi->ref_frame[var_ref_idx] - != cm->comp_var_ref[1]); - } else { // inter/inter - const int l_sg = !has_second_ref(left_mbmi); - const int a_sg = !has_second_ref(above_mbmi); - const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0] - : above_mbmi->ref_frame[var_ref_idx]; - const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0] - : left_mbmi->ref_frame[var_ref_idx]; - - if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) { - pred_context = 0; - } else if (l_sg && a_sg) { // single/single - if ((vrfa == cm->comp_fixed_ref && vrfl == cm->comp_var_ref[0]) || - (vrfl == cm->comp_fixed_ref && vrfa == cm->comp_var_ref[0])) - pred_context = 4; - else if (vrfa == vrfl) - pred_context = 3; - else - pred_context = 1; - } else if (l_sg || a_sg) { // single/comp - const MV_REFERENCE_FRAME vrfc = l_sg ? vrfa : vrfl; - const MV_REFERENCE_FRAME rfs = a_sg ? vrfa : vrfl; - if (vrfc == cm->comp_var_ref[1] && rfs != cm->comp_var_ref[1]) - pred_context = 1; - else if (rfs == cm->comp_var_ref[1] && vrfc != cm->comp_var_ref[1]) - pred_context = 2; - else - pred_context = 4; - } else if (vrfa == vrfl) { // comp/comp - pred_context = 4; - } else { - pred_context = 2; - } - } - } else if (above_in_image || left_in_image) { // one edge available - const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi; - - if (!is_inter_block(edge_mbmi)) { - pred_context = 2; - } else { - if (has_second_ref(edge_mbmi)) - pred_context = 4 * (edge_mbmi->ref_frame[var_ref_idx] - != cm->comp_var_ref[1]); - else - pred_context = 3 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]); - } - } else { // no edges available (2) - pred_context = 2; - } - assert(pred_context >= 0 && pred_context < REF_CONTEXTS); - - return pred_context; -} - -int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { - int pred_context; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - if (has_above && has_left) { // both edges available - const int above_intra = !is_inter_block(above_mbmi); - const int left_intra = !is_inter_block(left_mbmi); - - if (above_intra && left_intra) { // intra/intra - pred_context = 2; - } else if (above_intra || left_intra) { // intra/inter or inter/intra - const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi; - if (!has_second_ref(edge_mbmi)) - pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME); - else - pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME || - edge_mbmi->ref_frame[1] == LAST_FRAME); - } else { // inter/inter - const int above_has_second = has_second_ref(above_mbmi); - const int left_has_second = has_second_ref(left_mbmi); - const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1]; - const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1]; - - if (above_has_second && left_has_second) { - pred_context = 1 + (above0 == LAST_FRAME || above1 == LAST_FRAME || - left0 == LAST_FRAME || left1 == LAST_FRAME); - } else if (above_has_second || left_has_second) { - const MV_REFERENCE_FRAME rfs = !above_has_second ? above0 : left0; - const MV_REFERENCE_FRAME crf1 = above_has_second ? above0 : left0; - const MV_REFERENCE_FRAME crf2 = above_has_second ? above1 : left1; - - if (rfs == LAST_FRAME) - pred_context = 3 + (crf1 == LAST_FRAME || crf2 == LAST_FRAME); - else - pred_context = (crf1 == LAST_FRAME || crf2 == LAST_FRAME); - } else { - pred_context = 2 * (above0 == LAST_FRAME) + 2 * (left0 == LAST_FRAME); - } - } - } else if (has_above || has_left) { // one edge available - const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; - if (!is_inter_block(edge_mbmi)) { // intra - pred_context = 2; - } else { // inter - if (!has_second_ref(edge_mbmi)) - pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME); - else - pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME || - edge_mbmi->ref_frame[1] == LAST_FRAME); - } - } else { // no edges available - pred_context = 2; - } - - assert(pred_context >= 0 && pred_context < REF_CONTEXTS); - return pred_context; -} - -int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { - int pred_context; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - if (has_above && has_left) { // both edges available - const int above_intra = !is_inter_block(above_mbmi); - const int left_intra = !is_inter_block(left_mbmi); - - if (above_intra && left_intra) { // intra/intra - pred_context = 2; - } else if (above_intra || left_intra) { // intra/inter or inter/intra - const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi; - if (!has_second_ref(edge_mbmi)) { - if (edge_mbmi->ref_frame[0] == LAST_FRAME) - pred_context = 3; - else - pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME); - } else { - pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME || - edge_mbmi->ref_frame[1] == GOLDEN_FRAME); - } - } else { // inter/inter - const int above_has_second = has_second_ref(above_mbmi); - const int left_has_second = has_second_ref(left_mbmi); - const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1]; - const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0]; - const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1]; - - if (above_has_second && left_has_second) { - if (above0 == left0 && above1 == left1) - pred_context = 3 * (above0 == GOLDEN_FRAME || - above1 == GOLDEN_FRAME || - left0 == GOLDEN_FRAME || - left1 == GOLDEN_FRAME); - else - pred_context = 2; - } else if (above_has_second || left_has_second) { - const MV_REFERENCE_FRAME rfs = !above_has_second ? above0 : left0; - const MV_REFERENCE_FRAME crf1 = above_has_second ? above0 : left0; - const MV_REFERENCE_FRAME crf2 = above_has_second ? above1 : left1; - - if (rfs == GOLDEN_FRAME) - pred_context = 3 + (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME); - else if (rfs == ALTREF_FRAME) - pred_context = crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME; - else - pred_context = 1 + 2 * (crf1 == GOLDEN_FRAME || crf2 == GOLDEN_FRAME); - } else { - if (above0 == LAST_FRAME && left0 == LAST_FRAME) { - pred_context = 3; - } else if (above0 == LAST_FRAME || left0 == LAST_FRAME) { - const MV_REFERENCE_FRAME edge0 = (above0 == LAST_FRAME) ? left0 - : above0; - pred_context = 4 * (edge0 == GOLDEN_FRAME); - } else { - pred_context = 2 * (above0 == GOLDEN_FRAME) + - 2 * (left0 == GOLDEN_FRAME); - } - } - } - } else if (has_above || has_left) { // one edge available - const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi; - - if (!is_inter_block(edge_mbmi) || - (edge_mbmi->ref_frame[0] == LAST_FRAME && !has_second_ref(edge_mbmi))) - pred_context = 2; - else if (!has_second_ref(edge_mbmi)) - pred_context = 4 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME); - else - pred_context = 3 * (edge_mbmi->ref_frame[0] == GOLDEN_FRAME || - edge_mbmi->ref_frame[1] == GOLDEN_FRAME); - } else { // no edges available (2) - pred_context = 2; - } - assert(pred_context >= 0 && pred_context < REF_CONTEXTS); - return pred_context; -} diff --git a/libs/libvpx/vp10/common/pred_common.h b/libs/libvpx/vp10/common/pred_common.h deleted file mode 100644 index d6d7146d7a..0000000000 --- a/libs/libvpx/vp10/common/pred_common.h +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_PRED_COMMON_H_ -#define VP10_COMMON_PRED_COMMON_H_ - -#include "vp10/common/blockd.h" -#include "vp10/common/onyxc_int.h" -#include "vpx_dsp/vpx_dsp_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static INLINE int get_segment_id(const VP10_COMMON *cm, - const uint8_t *segment_ids, - BLOCK_SIZE bsize, int mi_row, int mi_col) { - const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = num_8x8_blocks_wide_lookup[bsize]; - const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); - const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); - int x, y, segment_id = MAX_SEGMENTS; - - for (y = 0; y < ymis; ++y) - for (x = 0; x < xmis; ++x) - segment_id = - VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); - - assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); - return segment_id; -} - -static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = xd->above_mi; - const MODE_INFO *const left_mi = xd->left_mi; - const int above_sip = (above_mi != NULL) ? - above_mi->mbmi.seg_id_predicted : 0; - const int left_sip = (left_mi != NULL) ? left_mi->mbmi.seg_id_predicted : 0; - - return above_sip + left_sip; -} - -static INLINE vpx_prob vp10_get_pred_prob_seg_id( - const struct segmentation_probs *segp, const MACROBLOCKD *xd) { - return segp->pred_probs[vp10_get_pred_context_seg_id(xd)]; -} - -static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = xd->above_mi; - const MODE_INFO *const left_mi = xd->left_mi; - const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0; - const int left_skip = (left_mi != NULL) ? left_mi->mbmi.skip : 0; - return above_skip + left_skip; -} - -static INLINE vpx_prob vp10_get_skip_prob(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc->skip_probs[vp10_get_skip_context(xd)]; -} - -int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd); - -int vp10_get_intra_inter_context(const MACROBLOCKD *xd); - -static INLINE vpx_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)]; -} - -int vp10_get_reference_mode_context(const VP10_COMMON *cm, - const MACROBLOCKD *xd); - -static INLINE vpx_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)]; -} - -int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm, - const MACROBLOCKD *xd); - -static INLINE vpx_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd); - return cm->fc->comp_ref_prob[pred_context]; -} - -int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd); - -static INLINE vpx_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0]; -} - -int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd); - -static INLINE vpx_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm, - const MACROBLOCKD *xd) { - return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1]; -} - -// Returns a context number for the given MB prediction signal -// The mode info data structure has a one element border above and to the -// left of the entries corresponding to real blocks. -// The prediction flags in these dummy entries are initialized to 0. -static INLINE int get_tx_size_context(const MACROBLOCKD *xd) { - const int max_tx_size = max_txsize_lookup[xd->mi[0]->mbmi.sb_type]; - const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; - const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - int above_ctx = (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size - : max_tx_size; - int left_ctx = (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size - : max_tx_size; - if (!has_left) - left_ctx = above_ctx; - - if (!has_above) - above_ctx = left_ctx; - - return (above_ctx + left_ctx) > max_tx_size; -} - -static INLINE const vpx_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx, - const struct tx_probs *tx_probs) { - switch (max_tx_size) { - case TX_8X8: - return tx_probs->p8x8[ctx]; - case TX_16X16: - return tx_probs->p16x16[ctx]; - case TX_32X32: - return tx_probs->p32x32[ctx]; - default: - assert(0 && "Invalid max_tx_size."); - return NULL; - } -} - -static INLINE const vpx_prob *get_tx_probs2(TX_SIZE max_tx_size, - const MACROBLOCKD *xd, - const struct tx_probs *tx_probs) { - return get_tx_probs(max_tx_size, get_tx_size_context(xd), tx_probs); -} - -static INLINE unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx, - struct tx_counts *tx_counts) { - switch (max_tx_size) { - case TX_8X8: - return tx_counts->p8x8[ctx]; - case TX_16X16: - return tx_counts->p16x16[ctx]; - case TX_32X32: - return tx_counts->p32x32[ctx]; - default: - assert(0 && "Invalid max_tx_size."); - return NULL; - } -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_PRED_COMMON_H_ diff --git a/libs/libvpx/vp10/common/quant_common.c b/libs/libvpx/vp10/common/quant_common.c deleted file mode 100644 index edf7394011..0000000000 --- a/libs/libvpx/vp10/common/quant_common.c +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/common.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/seg_common.h" - -static const int16_t dc_qlookup[QINDEX_RANGE] = { - 4, 8, 8, 9, 10, 11, 12, 12, - 13, 14, 15, 16, 17, 18, 19, 19, - 20, 21, 22, 23, 24, 25, 26, 26, - 27, 28, 29, 30, 31, 32, 32, 33, - 34, 35, 36, 37, 38, 38, 39, 40, - 41, 42, 43, 43, 44, 45, 46, 47, - 48, 48, 49, 50, 51, 52, 53, 53, - 54, 55, 56, 57, 57, 58, 59, 60, - 61, 62, 62, 63, 64, 65, 66, 66, - 67, 68, 69, 70, 70, 71, 72, 73, - 74, 74, 75, 76, 77, 78, 78, 79, - 80, 81, 81, 82, 83, 84, 85, 85, - 87, 88, 90, 92, 93, 95, 96, 98, - 99, 101, 102, 104, 105, 107, 108, 110, - 111, 113, 114, 116, 117, 118, 120, 121, - 123, 125, 127, 129, 131, 134, 136, 138, - 140, 142, 144, 146, 148, 150, 152, 154, - 156, 158, 161, 164, 166, 169, 172, 174, - 177, 180, 182, 185, 187, 190, 192, 195, - 199, 202, 205, 208, 211, 214, 217, 220, - 223, 226, 230, 233, 237, 240, 243, 247, - 250, 253, 257, 261, 265, 269, 272, 276, - 280, 284, 288, 292, 296, 300, 304, 309, - 313, 317, 322, 326, 330, 335, 340, 344, - 349, 354, 359, 364, 369, 374, 379, 384, - 389, 395, 400, 406, 411, 417, 423, 429, - 435, 441, 447, 454, 461, 467, 475, 482, - 489, 497, 505, 513, 522, 530, 539, 549, - 559, 569, 579, 590, 602, 614, 626, 640, - 654, 668, 684, 700, 717, 736, 755, 775, - 796, 819, 843, 869, 896, 925, 955, 988, - 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336, -}; - -#if CONFIG_VP9_HIGHBITDEPTH -static const int16_t dc_qlookup_10[QINDEX_RANGE] = { - 4, 9, 10, 13, 15, 17, 20, 22, - 25, 28, 31, 34, 37, 40, 43, 47, - 50, 53, 57, 60, 64, 68, 71, 75, - 78, 82, 86, 90, 93, 97, 101, 105, - 109, 113, 116, 120, 124, 128, 132, 136, - 140, 143, 147, 151, 155, 159, 163, 166, - 170, 174, 178, 182, 185, 189, 193, 197, - 200, 204, 208, 212, 215, 219, 223, 226, - 230, 233, 237, 241, 244, 248, 251, 255, - 259, 262, 266, 269, 273, 276, 280, 283, - 287, 290, 293, 297, 300, 304, 307, 310, - 314, 317, 321, 324, 327, 331, 334, 337, - 343, 350, 356, 362, 369, 375, 381, 387, - 394, 400, 406, 412, 418, 424, 430, 436, - 442, 448, 454, 460, 466, 472, 478, 484, - 490, 499, 507, 516, 525, 533, 542, 550, - 559, 567, 576, 584, 592, 601, 609, 617, - 625, 634, 644, 655, 666, 676, 687, 698, - 708, 718, 729, 739, 749, 759, 770, 782, - 795, 807, 819, 831, 844, 856, 868, 880, - 891, 906, 920, 933, 947, 961, 975, 988, - 1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, - 1120, 1137, 1153, 1170, 1186, 1202, 1218, 1236, - 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379, - 1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, - 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717, - 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929, - 1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, - 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561, - 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102, - 3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, - 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347, -}; - -static const int16_t dc_qlookup_12[QINDEX_RANGE] = { - 4, 12, 18, 25, 33, 41, 50, 60, - 70, 80, 91, 103, 115, 127, 140, 153, - 166, 180, 194, 208, 222, 237, 251, 266, - 281, 296, 312, 327, 343, 358, 374, 390, - 405, 421, 437, 453, 469, 484, 500, 516, - 532, 548, 564, 580, 596, 611, 627, 643, - 659, 674, 690, 706, 721, 737, 752, 768, - 783, 798, 814, 829, 844, 859, 874, 889, - 904, 919, 934, 949, 964, 978, 993, 1008, - 1022, 1037, 1051, 1065, 1080, 1094, 1108, 1122, - 1136, 1151, 1165, 1179, 1192, 1206, 1220, 1234, - 1248, 1261, 1275, 1288, 1302, 1315, 1329, 1342, - 1368, 1393, 1419, 1444, 1469, 1494, 1519, 1544, - 1569, 1594, 1618, 1643, 1668, 1692, 1717, 1741, - 1765, 1789, 1814, 1838, 1862, 1885, 1909, 1933, - 1957, 1992, 2027, 2061, 2096, 2130, 2165, 2199, - 2233, 2267, 2300, 2334, 2367, 2400, 2434, 2467, - 2499, 2532, 2575, 2618, 2661, 2704, 2746, 2788, - 2830, 2872, 2913, 2954, 2995, 3036, 3076, 3127, - 3177, 3226, 3275, 3324, 3373, 3421, 3469, 3517, - 3565, 3621, 3677, 3733, 3788, 3843, 3897, 3951, - 4005, 4058, 4119, 4181, 4241, 4301, 4361, 4420, - 4479, 4546, 4612, 4677, 4742, 4807, 4871, 4942, - 5013, 5083, 5153, 5222, 5291, 5367, 5442, 5517, - 5591, 5665, 5745, 5825, 5905, 5984, 6063, 6149, - 6234, 6319, 6404, 6495, 6587, 6678, 6769, 6867, - 6966, 7064, 7163, 7269, 7376, 7483, 7599, 7715, - 7832, 7958, 8085, 8214, 8352, 8492, 8635, 8788, - 8945, 9104, 9275, 9450, 9639, 9832, 10031, 10245, - 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409, - 12750, 13118, 13501, 13913, 14343, 14807, 15290, 15812, - 16356, 16943, 17575, 18237, 18949, 19718, 20521, 21387, -}; -#endif - -static const int16_t ac_qlookup[QINDEX_RANGE] = { - 4, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 41, 42, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 92, 93, 94, - 95, 96, 97, 98, 99, 100, 101, 102, - 104, 106, 108, 110, 112, 114, 116, 118, - 120, 122, 124, 126, 128, 130, 132, 134, - 136, 138, 140, 142, 144, 146, 148, 150, - 152, 155, 158, 161, 164, 167, 170, 173, - 176, 179, 182, 185, 188, 191, 194, 197, - 200, 203, 207, 211, 215, 219, 223, 227, - 231, 235, 239, 243, 247, 251, 255, 260, - 265, 270, 275, 280, 285, 290, 295, 300, - 305, 311, 317, 323, 329, 335, 341, 347, - 353, 359, 366, 373, 380, 387, 394, 401, - 408, 416, 424, 432, 440, 448, 456, 465, - 474, 483, 492, 501, 510, 520, 530, 540, - 550, 560, 571, 582, 593, 604, 615, 627, - 639, 651, 663, 676, 689, 702, 715, 729, - 743, 757, 771, 786, 801, 816, 832, 848, - 864, 881, 898, 915, 933, 951, 969, 988, - 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151, - 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343, - 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, - 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828, -}; - -#if CONFIG_VP9_HIGHBITDEPTH -static const int16_t ac_qlookup_10[QINDEX_RANGE] = { - 4, 9, 11, 13, 16, 18, 21, 24, - 27, 30, 33, 37, 40, 44, 48, 51, - 55, 59, 63, 67, 71, 75, 79, 83, - 88, 92, 96, 100, 105, 109, 114, 118, - 122, 127, 131, 136, 140, 145, 149, 154, - 158, 163, 168, 172, 177, 181, 186, 190, - 195, 199, 204, 208, 213, 217, 222, 226, - 231, 235, 240, 244, 249, 253, 258, 262, - 267, 271, 275, 280, 284, 289, 293, 297, - 302, 306, 311, 315, 319, 324, 328, 332, - 337, 341, 345, 349, 354, 358, 362, 367, - 371, 375, 379, 384, 388, 392, 396, 401, - 409, 417, 425, 433, 441, 449, 458, 466, - 474, 482, 490, 498, 506, 514, 523, 531, - 539, 547, 555, 563, 571, 579, 588, 596, - 604, 616, 628, 640, 652, 664, 676, 688, - 700, 713, 725, 737, 749, 761, 773, 785, - 797, 809, 825, 841, 857, 873, 889, 905, - 922, 938, 954, 970, 986, 1002, 1018, 1038, - 1058, 1078, 1098, 1118, 1138, 1158, 1178, 1198, - 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386, - 1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, - 1631, 1663, 1695, 1727, 1759, 1791, 1823, 1859, - 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159, - 2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, - 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915, - 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391, - 3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, - 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604, - 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372, - 5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, - 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312, -}; - -static const int16_t ac_qlookup_12[QINDEX_RANGE] = { - 4, 13, 19, 27, 35, 44, 54, 64, - 75, 87, 99, 112, 126, 139, 154, 168, - 183, 199, 214, 230, 247, 263, 280, 297, - 314, 331, 349, 366, 384, 402, 420, 438, - 456, 475, 493, 511, 530, 548, 567, 586, - 604, 623, 642, 660, 679, 698, 716, 735, - 753, 772, 791, 809, 828, 846, 865, 884, - 902, 920, 939, 957, 976, 994, 1012, 1030, - 1049, 1067, 1085, 1103, 1121, 1139, 1157, 1175, - 1193, 1211, 1229, 1246, 1264, 1282, 1299, 1317, - 1335, 1352, 1370, 1387, 1405, 1422, 1440, 1457, - 1474, 1491, 1509, 1526, 1543, 1560, 1577, 1595, - 1627, 1660, 1693, 1725, 1758, 1791, 1824, 1856, - 1889, 1922, 1954, 1987, 2020, 2052, 2085, 2118, - 2150, 2183, 2216, 2248, 2281, 2313, 2346, 2378, - 2411, 2459, 2508, 2556, 2605, 2653, 2701, 2750, - 2798, 2847, 2895, 2943, 2992, 3040, 3088, 3137, - 3185, 3234, 3298, 3362, 3426, 3491, 3555, 3619, - 3684, 3748, 3812, 3876, 3941, 4005, 4069, 4149, - 4230, 4310, 4390, 4470, 4550, 4631, 4711, 4791, - 4871, 4967, 5064, 5160, 5256, 5352, 5448, 5544, - 5641, 5737, 5849, 5961, 6073, 6185, 6297, 6410, - 6522, 6650, 6778, 6906, 7034, 7162, 7290, 7435, - 7579, 7723, 7867, 8011, 8155, 8315, 8475, 8635, - 8795, 8956, 9132, 9308, 9484, 9660, 9836, 10028, - 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661, - 11885, 12109, 12333, 12573, 12813, 13053, 13309, 13565, - 13821, 14093, 14365, 14637, 14925, 15213, 15502, 15806, - 16110, 16414, 16734, 17054, 17390, 17726, 18062, 18414, - 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486, - 21902, 22334, 22766, 23214, 23662, 24126, 24590, 25070, - 25551, 26047, 26559, 27071, 27599, 28143, 28687, 29247, -}; -#endif - -int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) { -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - return dc_qlookup[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_10: - return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_12: - return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - (void) bit_depth; - return dc_qlookup[clamp(qindex + delta, 0, MAXQ)]; -#endif -} - -int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) { -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - return ac_qlookup[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_10: - return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_12: - return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - (void) bit_depth; - return ac_qlookup[clamp(qindex + delta, 0, MAXQ)]; -#endif -} - -int vp10_get_qindex(const struct segmentation *seg, int segment_id, - int base_qindex) { - if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) { - const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q); - const int seg_qindex = seg->abs_delta == SEGMENT_ABSDATA ? - data : base_qindex + data; - return clamp(seg_qindex, 0, MAXQ); - } else { - return base_qindex; - } -} - diff --git a/libs/libvpx/vp10/common/quant_common.h b/libs/libvpx/vp10/common/quant_common.h deleted file mode 100644 index 6813e1734c..0000000000 --- a/libs/libvpx/vp10/common/quant_common.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_QUANT_COMMON_H_ -#define VP10_COMMON_QUANT_COMMON_H_ - -#include "vpx/vpx_codec.h" -#include "vp10/common/seg_common.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MINQ 0 -#define MAXQ 255 -#define QINDEX_RANGE (MAXQ - MINQ + 1) -#define QINDEX_BITS 8 - -int16_t vp10_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth); -int16_t vp10_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth); - -int vp10_get_qindex(const struct segmentation *seg, int segment_id, - int base_qindex); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_QUANT_COMMON_H_ diff --git a/libs/libvpx/vp10/common/reconinter.c b/libs/libvpx/vp10/common/reconinter.c deleted file mode 100644 index fdcb9673cf..0000000000 --- a/libs/libvpx/vp10/common/reconinter.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vpx_scale_rtcd.h" -#include "./vpx_config.h" - -#include "vpx/vpx_integer.h" - -#include "vp10/common/blockd.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *src_mv, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y, int bd) { - const int is_q4 = precision == MV_PRECISION_Q4; - const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, - is_q4 ? src_mv->col : src_mv->col * 2 }; - MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf); - const int subpel_x = mv.col & SUBPEL_MASK; - const int subpel_y = mv.row & SUBPEL_MASK; - - src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); - - high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, - sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *src_mv, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y) { - const int is_q4 = precision == MV_PRECISION_Q4; - const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, - is_q4 ? src_mv->col : src_mv->col * 2 }; - MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf); - const int subpel_x = mv.col & SUBPEL_MASK; - const int subpel_y = mv.row & SUBPEL_MASK; - - src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); - - inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, - sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); -} - -void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, - int bw, int bh, - int x, int y, int w, int h, - int mi_x, int mi_y) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - const MODE_INFO *mi = xd->mi[0]; - const int is_compound = has_second_ref(&mi->mbmi); - const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter]; - int ref; - - for (ref = 0; ref < 1 + is_compound; ++ref) { - const struct scale_factors *const sf = &xd->block_refs[ref]->sf; - struct buf_2d *const pre_buf = &pd->pre[ref]; - struct buf_2d *const dst_buf = &pd->dst; - uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; - const MV mv = mi->mbmi.sb_type < BLOCK_8X8 - ? average_split_mvs(pd, mi, ref, block) - : mi->mbmi.mv[ref].as_mv; - - // TODO(jkoleszar): This clamping is done in the incorrect place for the - // scaling case. It needs to be done on the scaled MV, not the pre-scaling - // MV. Note however that it performs the subsampling aware scaling so - // that the result is always q4. - // mv_precision precision is MV_PRECISION_Q4. - const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, - pd->subsampling_x, - pd->subsampling_y); - - uint8_t *pre; - MV32 scaled_mv; - int xs, ys, subpel_x, subpel_y; - const int is_scaled = vp10_is_scaled(sf); - - if (is_scaled) { - pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf); - scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); - xs = sf->x_step_q4; - ys = sf->y_step_q4; - } else { - pre = pre_buf->buf + (y * pre_buf->stride + x); - scaled_mv.row = mv_q4.row; - scaled_mv.col = mv_q4.col; - xs = ys = 16; - } - subpel_x = scaled_mv.col & SUBPEL_MASK; - subpel_y = scaled_mv.row & SUBPEL_MASK; - pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride - + (scaled_mv.col >> SUBPEL_BITS); - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys, - xd->bd); - } else { - inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); - } -#else - inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); -#endif // CONFIG_VP9_HIGHBITDEPTH - } -} - -void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, - int i, int ir, int ic, - int mi_row, int mi_col) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - MODE_INFO *const mi = xd->mi[0]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd); - const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize]; - - uint8_t *const dst = &pd->dst.buf[(ir * pd->dst.stride + ic) << 2]; - int ref; - const int is_compound = has_second_ref(&mi->mbmi); - const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter]; - - for (ref = 0; ref < 1 + is_compound; ++ref) { - const uint8_t *pre = - &pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2]; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp10_highbd_build_inter_predictor(pre, pd->pre[ref].stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, - ref, kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * ic, - mi_row * MI_SIZE + 4 * ir, xd->bd); - } else { - vp10_build_inter_predictor(pre, pd->pre[ref].stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, ref, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * ic, - mi_row * MI_SIZE + 4 * ir); - } -#else - vp10_build_inter_predictor(pre, pd->pre[ref].stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, ref, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * ic, - mi_row * MI_SIZE + 4 * ir); -#endif // CONFIG_VP9_HIGHBITDEPTH - } -} - -static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, - int mi_row, int mi_col, - int plane_from, int plane_to) { - int plane; - const int mi_x = mi_col * MI_SIZE; - const int mi_y = mi_row * MI_SIZE; - for (plane = plane_from; plane <= plane_to; ++plane) { - const struct macroblockd_plane *pd = &xd->plane[plane]; - const int bw = 4 * num_4x4_blocks_wide_lookup[bsize] >> pd->subsampling_x; - const int bh = 4 * num_4x4_blocks_high_lookup[bsize] >> pd->subsampling_y; - - if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) { - const PARTITION_TYPE bp = bsize - xd->mi[0]->mbmi.sb_type; - const int have_vsplit = bp != PARTITION_HORZ; - const int have_hsplit = bp != PARTITION_VERT; - const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x); - const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y); - const int pw = 8 >> (have_vsplit | pd->subsampling_x); - const int ph = 8 >> (have_hsplit | pd->subsampling_y); - int x, y; - assert(bp != PARTITION_NONE && bp < PARTITION_TYPES); - assert(bsize == BLOCK_8X8); - assert(pw * num_4x4_w == bw && ph * num_4x4_h == bh); - for (y = 0; y < num_4x4_h; ++y) - for (x = 0; x < num_4x4_w; ++x) - build_inter_predictors(xd, plane, y * 2 + x, bw, bh, - 4 * x, 4 * y, pw, ph, mi_x, mi_y); - } else { - build_inter_predictors(xd, plane, 0, bw, bh, - 0, 0, bw, bh, mi_x, mi_y); - } - } -} - -void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize) { - build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0); -} - -void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize, int plane) { - build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane); -} - -void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize) { - build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1, - MAX_MB_PLANE - 1); -} - -void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize) { - build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, - MAX_MB_PLANE - 1); -} - -void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col) { - uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer, - src->v_buffer}; - const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride, - src->uv_stride}; - int i; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - struct macroblockd_plane *const pd = &planes[i]; - setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL, - pd->subsampling_x, pd->subsampling_y); - } -} - -void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx, - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, - const struct scale_factors *sf) { - if (src != NULL) { - int i; - uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer, - src->v_buffer}; - const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride, - src->uv_stride}; - for (i = 0; i < MAX_MB_PLANE; ++i) { - struct macroblockd_plane *const pd = &xd->plane[i]; - setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, - sf, pd->subsampling_x, pd->subsampling_y); - } - } -} diff --git a/libs/libvpx/vp10/common/reconinter.h b/libs/libvpx/vp10/common/reconinter.h deleted file mode 100644 index 5678f473f6..0000000000 --- a/libs/libvpx/vp10/common/reconinter.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_RECONINTER_H_ -#define VP10_COMMON_RECONINTER_H_ - -#include "vp10/common/filter.h" -#include "vp10/common/onyxc_int.h" -#include "vpx/vpx_integer.h" -#include "vpx_dsp/vpx_filter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static INLINE void inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const int subpel_x, - const int subpel_y, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - int xs, int ys) { - sf->predict[subpel_x != 0][subpel_y != 0][ref]( - src, src_stride, dst, dst_stride, - kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE void high_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const int subpel_x, - const int subpel_y, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - int xs, int ys, int bd) { - sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref]( - src, src_stride, dst, dst_stride, - kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static INLINE int round_mv_comp_q4(int value) { - return (value < 0 ? value - 2 : value + 2) / 4; -} - -static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { - MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + - mi->bmi[1].as_mv[idx].as_mv.row + - mi->bmi[2].as_mv[idx].as_mv.row + - mi->bmi[3].as_mv[idx].as_mv.row), - round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + - mi->bmi[1].as_mv[idx].as_mv.col + - mi->bmi[2].as_mv[idx].as_mv.col + - mi->bmi[3].as_mv[idx].as_mv.col) }; - return res; -} - -static INLINE int round_mv_comp_q2(int value) { - return (value < 0 ? value - 1 : value + 1) / 2; -} - -static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) { - MV res = { round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.row + - mi->bmi[block1].as_mv[idx].as_mv.row), - round_mv_comp_q2(mi->bmi[block0].as_mv[idx].as_mv.col + - mi->bmi[block1].as_mv[idx].as_mv.col) }; - return res; -} - -// TODO(jkoleszar): yet another mv clamping function :-( -static INLINE MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, - const MV *src_mv, - int bw, int bh, int ss_x, int ss_y) { - // If the MV points so far into the UMV border that no visible pixels - // are used for reconstruction, the subpel part of the MV can be - // discarded and the MV limited to 16 pixels with equivalent results. - const int spel_left = (VP9_INTERP_EXTEND + bw) << SUBPEL_BITS; - const int spel_right = spel_left - SUBPEL_SHIFTS; - const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; - const int spel_bottom = spel_top - SUBPEL_SHIFTS; - MV clamped_mv = { - src_mv->row * (1 << (1 - ss_y)), - src_mv->col * (1 << (1 - ss_x)) - }; - assert(ss_x <= 1); - assert(ss_y <= 1); - - clamp_mv(&clamped_mv, - xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, - xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, - xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, - xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); - - return clamped_mv; -} - -static INLINE MV average_split_mvs(const struct macroblockd_plane *pd, - const MODE_INFO *mi, int ref, int block) { - const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0); - MV res = {0, 0}; - switch (ss_idx) { - case 0: - res = mi->bmi[block].as_mv[ref].as_mv; - break; - case 1: - res = mi_mv_pred_q2(mi, ref, block, block + 2); - break; - case 2: - res = mi_mv_pred_q2(mi, ref, block, block + 1); - break; - case 3: - res = mi_mv_pred_q4(mi, ref); - break; - default: - assert(ss_idx <= 3 && ss_idx >= 0); - } - return res; -} - -void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, - int bw, int bh, - int x, int y, int w, int h, - int mi_x, int mi_y); - -void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, - int i, int ir, int ic, - int mi_row, int mi_col); - -void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize); - -void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize, int plane); - -void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize); - -void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, - BLOCK_SIZE bsize); - -void vp10_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *mv_q3, - const struct scale_factors *sf, - int w, int h, int do_avg, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *mv_q3, - const struct scale_factors *sf, - int w, int h, int do_avg, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y, int bd); -#endif - -static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride, - const struct scale_factors *sf) { - const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset; - const int y = sf ? sf->scale_value_y(y_offset, sf) : y_offset; - return y * stride + x; -} - -static INLINE void setup_pred_plane(struct buf_2d *dst, - uint8_t *src, int stride, - int mi_row, int mi_col, - const struct scale_factors *scale, - int subsampling_x, int subsampling_y) { - const int x = (MI_SIZE * mi_col) >> subsampling_x; - const int y = (MI_SIZE * mi_row) >> subsampling_y; - dst->buf = src + scaled_buffer_offset(x, y, stride, scale); - dst->stride = stride; -} - -void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col); - -void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx, - const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, - const struct scale_factors *sf); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_RECONINTER_H_ diff --git a/libs/libvpx/vp10/common/reconintra.c b/libs/libvpx/vp10/common/reconintra.c deleted file mode 100644 index e9e3949ad7..0000000000 --- a/libs/libvpx/vp10/common/reconintra.c +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" - -#if CONFIG_VP9_HIGHBITDEPTH -#include "vpx_dsp/vpx_dsp_common.h" -#endif // CONFIG_VP9_HIGHBITDEPTH -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/vpx_once.h" - -#include "vp10/common/reconintra.h" -#include "vp10/common/onyxc_int.h" - -#if CONFIG_MISC_FIXES -enum { - NEED_LEFT = 1 << 1, - NEED_ABOVE = 1 << 2, - NEED_ABOVERIGHT = 1 << 3, - NEED_ABOVELEFT = 1 << 4, - NEED_BOTTOMLEFT = 1 << 5, -}; - -static const uint8_t extend_modes[INTRA_MODES] = { - NEED_ABOVE | NEED_LEFT, // DC - NEED_ABOVE, // V - NEED_LEFT, // H - NEED_ABOVE | NEED_ABOVERIGHT, // D45 - NEED_LEFT | NEED_ABOVE | NEED_ABOVELEFT, // D135 - NEED_LEFT | NEED_ABOVE | NEED_ABOVELEFT, // D117 - NEED_LEFT | NEED_ABOVE | NEED_ABOVELEFT, // D153 - NEED_LEFT | NEED_BOTTOMLEFT, // D207 - NEED_ABOVE | NEED_ABOVERIGHT, // D63 - NEED_LEFT | NEED_ABOVE | NEED_ABOVELEFT, // TM -}; -#else -enum { - NEED_LEFT = 1 << 1, - NEED_ABOVE = 1 << 2, - NEED_ABOVERIGHT = 1 << 3, -}; - -static const uint8_t extend_modes[INTRA_MODES] = { - NEED_ABOVE | NEED_LEFT, // DC - NEED_ABOVE, // V - NEED_LEFT, // H - NEED_ABOVERIGHT, // D45 - NEED_LEFT | NEED_ABOVE, // D135 - NEED_LEFT | NEED_ABOVE, // D117 - NEED_LEFT | NEED_ABOVE, // D153 - NEED_LEFT, // D207 - NEED_ABOVERIGHT, // D63 - NEED_LEFT | NEED_ABOVE, // TM -}; -#endif - -#if CONFIG_MISC_FIXES -static const uint8_t orders_64x64[1] = { 0 }; -static const uint8_t orders_64x32[2] = { 0, 1 }; -static const uint8_t orders_32x64[2] = { 0, 1 }; -static const uint8_t orders_32x32[4] = { - 0, 1, - 2, 3, -}; -static const uint8_t orders_32x16[8] = { - 0, 2, - 1, 3, - 4, 6, - 5, 7, -}; -static const uint8_t orders_16x32[8] = { - 0, 1, 2, 3, - 4, 5, 6, 7, -}; -static const uint8_t orders_16x16[16] = { - 0, 1, 4, 5, - 2, 3, 6, 7, - 8, 9, 12, 13, - 10, 11, 14, 15, -}; -static const uint8_t orders_16x8[32] = { - 0, 2, 8, 10, - 1, 3, 9, 11, - 4, 6, 12, 14, - 5, 7, 13, 15, - 16, 18, 24, 26, - 17, 19, 25, 27, - 20, 22, 28, 30, - 21, 23, 29, 31, -}; -static const uint8_t orders_8x16[32] = { - 0, 1, 2, 3, 8, 9, 10, 11, - 4, 5, 6, 7, 12, 13, 14, 15, - 16, 17, 18, 19, 24, 25, 26, 27, - 20, 21, 22, 23, 28, 29, 30, 31, -}; -static const uint8_t orders_8x8[64] = { - 0, 1, 4, 5, 16, 17, 20, 21, - 2, 3, 6, 7, 18, 19, 22, 23, - 8, 9, 12, 13, 24, 25, 28, 29, - 10, 11, 14, 15, 26, 27, 30, 31, - 32, 33, 36, 37, 48, 49, 52, 53, - 34, 35, 38, 39, 50, 51, 54, 55, - 40, 41, 44, 45, 56, 57, 60, 61, - 42, 43, 46, 47, 58, 59, 62, 63, -}; -static const uint8_t *const orders[BLOCK_SIZES] = { - orders_8x8, orders_8x8, orders_8x8, orders_8x8, - orders_8x16, orders_16x8, orders_16x16, - orders_16x32, orders_32x16, orders_32x32, - orders_32x64, orders_64x32, orders_64x64, -}; - -static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col, - int right_available, - TX_SIZE txsz, int y, int x, int ss_x) { - if (y == 0) { - int wl = mi_width_log2_lookup[bsize]; - int hl = mi_height_log2_lookup[bsize]; - int w = 1 << (wl + 1 - ss_x); - int step = 1 << txsz; - const uint8_t *order = orders[bsize]; - int my_order, tr_order; - - if (x + step < w) - return 1; - - mi_row = (mi_row & 7) >> hl; - mi_col = (mi_col & 7) >> wl; - - if (mi_row == 0) - return right_available; - - if (((mi_col + 1) << wl) >= 8) - return 0; - - my_order = order[((mi_row + 0) << (3 - wl)) + mi_col + 0]; - tr_order = order[((mi_row - 1) << (3 - wl)) + mi_col + 1]; - - return my_order > tr_order && right_available; - } else { - int wl = mi_width_log2_lookup[bsize]; - int w = 1 << (wl + 1 - ss_x); - int step = 1 << txsz; - - return x + step < w; - } -} - -static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col, - int bottom_available, TX_SIZE txsz, - int y, int x, int ss_y) { - if (x == 0) { - int wl = mi_width_log2_lookup[bsize]; - int hl = mi_height_log2_lookup[bsize]; - int h = 1 << (hl + 1 - ss_y); - int step = 1 << txsz; - const uint8_t *order = orders[bsize]; - int my_order, bl_order; - - mi_row = (mi_row & 7) >> hl; - mi_col = (mi_col & 7) >> wl; - - if (mi_col == 0) - return bottom_available && - (mi_row << (hl + !ss_y)) + y + step < (8 << !ss_y); - - if (((mi_row + 1) << hl) >= 8) - return 0; - - if (y + step < h) - return 1; - - my_order = order[((mi_row + 0) << (3 - wl)) + mi_col + 0]; - bl_order = order[((mi_row + 1) << (3 - wl)) + mi_col - 1]; - - return bl_order < my_order && bottom_available; - } else { - return 0; - } -} -#endif - -typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride, - const uint8_t *above, const uint8_t *left); - -static intra_pred_fn pred[INTRA_MODES][TX_SIZES]; -static intra_pred_fn dc_pred[2][2][TX_SIZES]; - -#if CONFIG_VP9_HIGHBITDEPTH -typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride, - const uint16_t *above, const uint16_t *left, - int bd); -static intra_high_pred_fn pred_high[INTRA_MODES][4]; -static intra_high_pred_fn dc_pred_high[2][2][4]; -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void vp10_init_intra_predictors_internal(void) { -#define INIT_NO_4X4(p, type) \ - p[TX_8X8] = vpx_##type##_predictor_8x8; \ - p[TX_16X16] = vpx_##type##_predictor_16x16; \ - p[TX_32X32] = vpx_##type##_predictor_32x32 - -#define INIT_ALL_SIZES(p, type) \ - p[TX_4X4] = vpx_##type##_predictor_4x4; \ - INIT_NO_4X4(p, type) - - INIT_ALL_SIZES(pred[V_PRED], v); - INIT_ALL_SIZES(pred[H_PRED], h); -#if CONFIG_MISC_FIXES - INIT_ALL_SIZES(pred[D207_PRED], d207e); - INIT_ALL_SIZES(pred[D45_PRED], d45e); - INIT_ALL_SIZES(pred[D63_PRED], d63e); -#else - INIT_ALL_SIZES(pred[D207_PRED], d207); - INIT_ALL_SIZES(pred[D45_PRED], d45); - INIT_ALL_SIZES(pred[D63_PRED], d63); -#endif - INIT_ALL_SIZES(pred[D117_PRED], d117); - INIT_ALL_SIZES(pred[D135_PRED], d135); - INIT_ALL_SIZES(pred[D153_PRED], d153); - INIT_ALL_SIZES(pred[TM_PRED], tm); - - INIT_ALL_SIZES(dc_pred[0][0], dc_128); - INIT_ALL_SIZES(dc_pred[0][1], dc_top); - INIT_ALL_SIZES(dc_pred[1][0], dc_left); - INIT_ALL_SIZES(dc_pred[1][1], dc); - -#if CONFIG_VP9_HIGHBITDEPTH - INIT_ALL_SIZES(pred_high[V_PRED], highbd_v); - INIT_ALL_SIZES(pred_high[H_PRED], highbd_h); -#if CONFIG_MISC_FIXES - INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207e); - INIT_ALL_SIZES(pred_high[D45_PRED], highbd_d45e); - INIT_ALL_SIZES(pred_high[D63_PRED], highbd_d63); -#else - INIT_ALL_SIZES(pred_high[D207_PRED], highbd_d207); - INIT_ALL_SIZES(pred_high[D45_PRED], highbd_d45); - INIT_ALL_SIZES(pred_high[D63_PRED], highbd_d63); -#endif - INIT_ALL_SIZES(pred_high[D117_PRED], highbd_d117); - INIT_ALL_SIZES(pred_high[D135_PRED], highbd_d135); - INIT_ALL_SIZES(pred_high[D153_PRED], highbd_d153); - INIT_ALL_SIZES(pred_high[TM_PRED], highbd_tm); - - INIT_ALL_SIZES(dc_pred_high[0][0], highbd_dc_128); - INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top); - INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left); - INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#undef intra_pred_allsizes -} - -#if CONFIG_MISC_FIXES -static INLINE void memset16(uint16_t *dst, int val, int n) { - while (n--) - *dst++ = val; -} -#endif - -#if CONFIG_VP9_HIGHBITDEPTH -static void build_intra_predictors_high(const MACROBLOCKD *xd, - const uint8_t *ref8, - int ref_stride, - uint8_t *dst8, - int dst_stride, - PREDICTION_MODE mode, - TX_SIZE tx_size, -#if CONFIG_MISC_FIXES - int n_top_px, int n_topright_px, - int n_left_px, int n_bottomleft_px, -#else - int up_available, - int left_available, - int right_available, -#endif - int x, int y, - int plane, int bd) { - int i; - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); -#if CONFIG_MISC_FIXES - DECLARE_ALIGNED(16, uint16_t, left_col[32]); -#else - DECLARE_ALIGNED(16, uint16_t, left_col[64]); -#endif - DECLARE_ALIGNED(16, uint16_t, above_data[64 + 16]); - uint16_t *above_row = above_data + 16; - const uint16_t *const_above_row = above_row; - const int bs = 4 << tx_size; -#if CONFIG_MISC_FIXES - const uint16_t *above_ref = ref - ref_stride; -#else - int frame_width, frame_height; - int x0, y0; - const struct macroblockd_plane *const pd = &xd->plane[plane]; -#endif - const int need_left = extend_modes[mode] & NEED_LEFT; - const int need_above = extend_modes[mode] & NEED_ABOVE; - const int need_aboveright = extend_modes[mode] & NEED_ABOVERIGHT; - int base = 128 << (bd - 8); - // 127 127 127 .. 127 127 127 127 127 127 - // 129 A B .. Y Z - // 129 C D .. W X - // 129 E F .. U V - // 129 G H .. S T T T T T - -#if CONFIG_MISC_FIXES - (void) x; - (void) y; - (void) plane; - (void) need_left; - (void) need_above; - (void) need_aboveright; - - // NEED_LEFT - if (extend_modes[mode] & NEED_LEFT) { - const int need_bottom = !!(extend_modes[mode] & NEED_BOTTOMLEFT); - i = 0; - if (n_left_px > 0) { - for (; i < n_left_px; i++) - left_col[i] = ref[i * ref_stride - 1]; - if (need_bottom && n_bottomleft_px > 0) { - assert(i == bs); - for (; i < bs + n_bottomleft_px; i++) - left_col[i] = ref[i * ref_stride - 1]; - } - if (i < (bs << need_bottom)) - memset16(&left_col[i], left_col[i - 1], (bs << need_bottom) - i); - } else { - memset16(left_col, base + 1, bs << need_bottom); - } - } - - // NEED_ABOVE - if (extend_modes[mode] & NEED_ABOVE) { - const int need_right = !!(extend_modes[mode] & NEED_ABOVERIGHT); - if (n_top_px > 0) { - memcpy(above_row, above_ref, n_top_px * 2); - i = n_top_px; - if (need_right && n_topright_px > 0) { - assert(n_top_px == bs); - memcpy(above_row + bs, above_ref + bs, n_topright_px * 2); - i += n_topright_px; - } - if (i < (bs << need_right)) - memset16(&above_row[i], above_row[i - 1], (bs << need_right) - i); - } else { - memset16(above_row, base - 1, bs << need_right); - } - } - - if (extend_modes[mode] & NEED_ABOVELEFT) { - above_row[-1] = n_top_px > 0 ? - (n_left_px > 0 ? above_ref[-1] : base + 1) : base - 1; - } -#else - // Get current frame pointer, width and height. - if (plane == 0) { - frame_width = xd->cur_buf->y_width; - frame_height = xd->cur_buf->y_height; - } else { - frame_width = xd->cur_buf->uv_width; - frame_height = xd->cur_buf->uv_height; - } - - // Get block position in current frame. - x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; - y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; - - // NEED_LEFT - if (need_left) { - if (left_available) { - if (xd->mb_to_bottom_edge < 0) { - /* slower path if the block needs border extension */ - if (y0 + bs <= frame_height) { - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; - } else { - const int extend_bottom = frame_height - y0; - for (i = 0; i < extend_bottom; ++i) - left_col[i] = ref[i * ref_stride - 1]; - for (; i < bs; ++i) - left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1]; - } - } else { - /* faster path if the block does not need extension */ - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; - } - } else { - // TODO(Peter): this value should probably change for high bitdepth - vpx_memset16(left_col, base + 1, bs); - } - } - - // NEED_ABOVE - if (need_above) { - if (up_available) { - const uint16_t *above_ref = ref - ref_stride; - if (xd->mb_to_right_edge < 0) { - /* slower path if the block needs border extension */ - if (x0 + bs <= frame_width) { - memcpy(above_row, above_ref, bs * sizeof(above_row[0])); - } else if (x0 <= frame_width) { - const int r = frame_width - x0; - memcpy(above_row, above_ref, r * sizeof(above_row[0])); - vpx_memset16(above_row + r, above_row[r - 1], x0 + bs - frame_width); - } - } else { - /* faster path if the block does not need extension */ - if (bs == 4 && right_available && left_available) { - const_above_row = above_ref; - } else { - memcpy(above_row, above_ref, bs * sizeof(above_row[0])); - } - } - above_row[-1] = left_available ? above_ref[-1] : (base + 1); - } else { - vpx_memset16(above_row, base - 1, bs); - above_row[-1] = base - 1; - } - } - - // NEED_ABOVERIGHT - if (need_aboveright) { - if (up_available) { - const uint16_t *above_ref = ref - ref_stride; - if (xd->mb_to_right_edge < 0) { - /* slower path if the block needs border extension */ - if (x0 + 2 * bs <= frame_width) { - if (right_available && bs == 4) { - memcpy(above_row, above_ref, 2 * bs * sizeof(above_row[0])); - } else { - memcpy(above_row, above_ref, bs * sizeof(above_row[0])); - vpx_memset16(above_row + bs, above_row[bs - 1], bs); - } - } else if (x0 + bs <= frame_width) { - const int r = frame_width - x0; - if (right_available && bs == 4) { - memcpy(above_row, above_ref, r * sizeof(above_row[0])); - vpx_memset16(above_row + r, above_row[r - 1], - x0 + 2 * bs - frame_width); - } else { - memcpy(above_row, above_ref, bs * sizeof(above_row[0])); - vpx_memset16(above_row + bs, above_row[bs - 1], bs); - } - } else if (x0 <= frame_width) { - const int r = frame_width - x0; - memcpy(above_row, above_ref, r * sizeof(above_row[0])); - vpx_memset16(above_row + r, above_row[r - 1], - x0 + 2 * bs - frame_width); - } - // TODO(Peter) this value should probably change for high bitdepth - above_row[-1] = left_available ? above_ref[-1] : (base + 1); - } else { - /* faster path if the block does not need extension */ - if (bs == 4 && right_available && left_available) { - const_above_row = above_ref; - } else { - memcpy(above_row, above_ref, bs * sizeof(above_row[0])); - if (bs == 4 && right_available) - memcpy(above_row + bs, above_ref + bs, bs * sizeof(above_row[0])); - else - vpx_memset16(above_row + bs, above_row[bs - 1], bs); - // TODO(Peter): this value should probably change for high bitdepth - above_row[-1] = left_available ? above_ref[-1] : (base + 1); - } - } - } else { - vpx_memset16(above_row, base - 1, bs * 2); - // TODO(Peter): this value should probably change for high bitdepth - above_row[-1] = base - 1; - } - } -#endif - - // predict - if (mode == DC_PRED) { -#if CONFIG_MISC_FIXES - dc_pred_high[n_left_px > 0][n_top_px > 0][tx_size](dst, dst_stride, - const_above_row, - left_col, xd->bd); -#else - dc_pred_high[left_available][up_available][tx_size](dst, dst_stride, - const_above_row, - left_col, xd->bd); -#endif - } else { - pred_high[mode][tx_size](dst, dst_stride, const_above_row, left_col, - xd->bd); - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, - int ref_stride, uint8_t *dst, int dst_stride, - PREDICTION_MODE mode, TX_SIZE tx_size, -#if CONFIG_MISC_FIXES - int n_top_px, int n_topright_px, - int n_left_px, int n_bottomleft_px, -#else - int up_available, int left_available, - int right_available, -#endif - int x, int y, int plane) { - int i; -#if CONFIG_MISC_FIXES - DECLARE_ALIGNED(16, uint8_t, left_col[64]); - const uint8_t *above_ref = ref - ref_stride; -#else - DECLARE_ALIGNED(16, uint8_t, left_col[32]); - int frame_width, frame_height; - int x0, y0; - const struct macroblockd_plane *const pd = &xd->plane[plane]; -#endif - DECLARE_ALIGNED(16, uint8_t, above_data[64 + 16]); - uint8_t *above_row = above_data + 16; - const uint8_t *const_above_row = above_row; - const int bs = 4 << tx_size; - - // 127 127 127 .. 127 127 127 127 127 127 - // 129 A B .. Y Z - // 129 C D .. W X - // 129 E F .. U V - // 129 G H .. S T T T T T - // .. - -#if CONFIG_MISC_FIXES - (void) xd; - (void) x; - (void) y; - (void) plane; - assert(n_top_px >= 0); - assert(n_topright_px >= 0); - assert(n_left_px >= 0); - assert(n_bottomleft_px >= 0); -#else - // Get current frame pointer, width and height. - if (plane == 0) { - frame_width = xd->cur_buf->y_width; - frame_height = xd->cur_buf->y_height; - } else { - frame_width = xd->cur_buf->uv_width; - frame_height = xd->cur_buf->uv_height; - } - - // Get block position in current frame. - x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; - y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; -#endif - - // NEED_LEFT - if (extend_modes[mode] & NEED_LEFT) { -#if CONFIG_MISC_FIXES - const int need_bottom = !!(extend_modes[mode] & NEED_BOTTOMLEFT); - i = 0; - if (n_left_px > 0) { - for (; i < n_left_px; i++) - left_col[i] = ref[i * ref_stride - 1]; - if (need_bottom && n_bottomleft_px > 0) { - assert(i == bs); - for (; i < bs + n_bottomleft_px; i++) - left_col[i] = ref[i * ref_stride - 1]; - } - if (i < (bs << need_bottom)) - memset(&left_col[i], left_col[i - 1], (bs << need_bottom) - i); - } else { - memset(left_col, 129, bs << need_bottom); - } -#else - if (left_available) { - if (xd->mb_to_bottom_edge < 0) { - /* slower path if the block needs border extension */ - if (y0 + bs <= frame_height) { - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; - } else { - const int extend_bottom = frame_height - y0; - for (i = 0; i < extend_bottom; ++i) - left_col[i] = ref[i * ref_stride - 1]; - for (; i < bs; ++i) - left_col[i] = ref[(extend_bottom - 1) * ref_stride - 1]; - } - } else { - /* faster path if the block does not need extension */ - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; - } - } else { - memset(left_col, 129, bs); - } -#endif - } - - // NEED_ABOVE - if (extend_modes[mode] & NEED_ABOVE) { -#if CONFIG_MISC_FIXES - const int need_right = !!(extend_modes[mode] & NEED_ABOVERIGHT); - if (n_top_px > 0) { - memcpy(above_row, above_ref, n_top_px); - i = n_top_px; - if (need_right && n_topright_px > 0) { - assert(n_top_px == bs); - memcpy(above_row + bs, above_ref + bs, n_topright_px); - i += n_topright_px; - } - if (i < (bs << need_right)) - memset(&above_row[i], above_row[i - 1], (bs << need_right) - i); - } else { - memset(above_row, 127, bs << need_right); - } -#else - if (up_available) { - const uint8_t *above_ref = ref - ref_stride; - if (xd->mb_to_right_edge < 0) { - /* slower path if the block needs border extension */ - if (x0 + bs <= frame_width) { - memcpy(above_row, above_ref, bs); - } else if (x0 <= frame_width) { - const int r = frame_width - x0; - memcpy(above_row, above_ref, r); - memset(above_row + r, above_row[r - 1], x0 + bs - frame_width); - } - } else { - /* faster path if the block does not need extension */ - if (bs == 4 && right_available && left_available) { - const_above_row = above_ref; - } else { - memcpy(above_row, above_ref, bs); - } - } - above_row[-1] = left_available ? above_ref[-1] : 129; - } else { - memset(above_row, 127, bs); - above_row[-1] = 127; - } -#endif - } - -#if CONFIG_MISC_FIXES - if (extend_modes[mode] & NEED_ABOVELEFT) { - above_row[-1] = n_top_px > 0 ? (n_left_px > 0 ? above_ref[-1] : 129) : 127; - } -#else - // NEED_ABOVERIGHT - if (extend_modes[mode] & NEED_ABOVERIGHT) { - if (up_available) { - const uint8_t *above_ref = ref - ref_stride; - if (xd->mb_to_right_edge < 0) { - /* slower path if the block needs border extension */ - if (x0 + 2 * bs <= frame_width) { - if (right_available && bs == 4) { - memcpy(above_row, above_ref, 2 * bs); - } else { - memcpy(above_row, above_ref, bs); - memset(above_row + bs, above_row[bs - 1], bs); - } - } else if (x0 + bs <= frame_width) { - const int r = frame_width - x0; - if (right_available && bs == 4) { - memcpy(above_row, above_ref, r); - memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); - } else { - memcpy(above_row, above_ref, bs); - memset(above_row + bs, above_row[bs - 1], bs); - } - } else if (x0 <= frame_width) { - const int r = frame_width - x0; - memcpy(above_row, above_ref, r); - memset(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); - } - } else { - /* faster path if the block does not need extension */ - if (bs == 4 && right_available && left_available) { - const_above_row = above_ref; - } else { - memcpy(above_row, above_ref, bs); - if (bs == 4 && right_available) - memcpy(above_row + bs, above_ref + bs, bs); - else - memset(above_row + bs, above_row[bs - 1], bs); - } - } - above_row[-1] = left_available ? above_ref[-1] : 129; - } else { - memset(above_row, 127, bs * 2); - above_row[-1] = 127; - } - } -#endif - - // predict - if (mode == DC_PRED) { -#if CONFIG_MISC_FIXES - dc_pred[n_left_px > 0][n_top_px > 0][tx_size](dst, dst_stride, - const_above_row, left_col); -#else - dc_pred[left_available][up_available][tx_size](dst, dst_stride, - const_above_row, left_col); -#endif - } else { - pred[mode][tx_size](dst, dst_stride, const_above_row, left_col); - } -} - -void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in, - TX_SIZE tx_size, PREDICTION_MODE mode, - const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride, - int aoff, int loff, int plane) { - const int txw = (1 << tx_size); - const int have_top = loff || xd->up_available; - const int have_left = aoff || xd->left_available; - const int x = aoff * 4; - const int y = loff * 4; -#if CONFIG_MISC_FIXES - const int bw = VPXMAX(2, 1 << bwl_in); - const int bh = VPXMAX(2, 1 << bhl_in); - const int mi_row = -xd->mb_to_top_edge >> 6; - const int mi_col = -xd->mb_to_left_edge >> 6; - const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const int right_available = - mi_col + (bw >> !pd->subsampling_x) < xd->tile.mi_col_end; - const int have_right = vp10_has_right(bsize, mi_row, mi_col, - right_available, - tx_size, loff, aoff, - pd->subsampling_x); - const int have_bottom = vp10_has_bottom(bsize, mi_row, mi_col, - xd->mb_to_bottom_edge > 0, - tx_size, loff, aoff, - pd->subsampling_y); - const int wpx = 4 * bw; - const int hpx = 4 * bh; - const int txpx = 4 * txw; - - int xr = (xd->mb_to_right_edge >> (3 + pd->subsampling_x)) + (wpx - x - txpx); - int yd = - (xd->mb_to_bottom_edge >> (3 + pd->subsampling_y)) + (hpx - y - txpx); -#else - const int bw = (1 << bwl_in); - const int have_right = (aoff + txw) < bw; -#endif // CONFIG_MISC_FIXES - -#if CONFIG_MISC_FIXES -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode, - tx_size, - have_top ? VPXMIN(txpx, xr + txpx) : 0, - have_top && have_right ? VPXMIN(txpx, xr) : 0, - have_left ? VPXMIN(txpx, yd + txpx) : 0, - have_bottom && have_left ? VPXMIN(txpx, yd) : 0, - x, y, plane, xd->bd); - return; - } -#endif - build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, - tx_size, - have_top ? VPXMIN(txpx, xr + txpx) : 0, - have_top && have_right ? VPXMIN(txpx, xr) : 0, - have_left ? VPXMIN(txpx, yd + txpx) : 0, - have_bottom && have_left ? VPXMIN(txpx, yd) : 0, - x, y, plane); -#else // CONFIG_MISC_FIXES - (void) bhl_in; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode, - tx_size, have_top, have_left, have_right, - x, y, plane, xd->bd); - return; - } -#endif - build_intra_predictors(xd, ref, ref_stride, dst, dst_stride, mode, tx_size, - have_top, have_left, have_right, x, y, plane); -#endif // CONFIG_MISC_FIXES -} - -void vp10_init_intra_predictors(void) { - once(vp10_init_intra_predictors_internal); -} diff --git a/libs/libvpx/vp10/common/reconintra.h b/libs/libvpx/vp10/common/reconintra.h deleted file mode 100644 index f451fb8f70..0000000000 --- a/libs/libvpx/vp10/common/reconintra.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_RECONINTRA_H_ -#define VP10_COMMON_RECONINTRA_H_ - -#include "vpx/vpx_integer.h" -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_init_intra_predictors(void); - -void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in, - TX_SIZE tx_size, PREDICTION_MODE mode, - const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride, - int aoff, int loff, int plane); -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_RECONINTRA_H_ diff --git a/libs/libvpx/vp10/common/scale.c b/libs/libvpx/vp10/common/scale.c deleted file mode 100644 index ce6062c195..0000000000 --- a/libs/libvpx/vp10/common/scale.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_dsp_rtcd.h" -#include "vp10/common/filter.h" -#include "vp10/common/scale.h" -#include "vpx_dsp/vpx_filter.h" - -static INLINE int scaled_x(int val, const struct scale_factors *sf) { - return (int)((int64_t)val * sf->x_scale_fp >> REF_SCALE_SHIFT); -} - -static INLINE int scaled_y(int val, const struct scale_factors *sf) { - return (int)((int64_t)val * sf->y_scale_fp >> REF_SCALE_SHIFT); -} - -static int unscaled_value(int val, const struct scale_factors *sf) { - (void) sf; - return val; -} - -static int get_fixed_point_scale_factor(int other_size, int this_size) { - // Calculate scaling factor once for each reference frame - // and use fixed point scaling factors in decoding and encoding routines. - // Hardware implementations can calculate scale factor in device driver - // and use multiplication and shifting on hardware instead of division. - return (other_size << REF_SCALE_SHIFT) / this_size; -} - -MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { - const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK; - const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK; - const MV32 res = { - scaled_y(mv->row, sf) + y_off_q4, - scaled_x(mv->col, sf) + x_off_q4 - }; - return res; -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h, - int use_highbd) { -#else -void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h) { -#endif - if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) { - sf->x_scale_fp = REF_INVALID_SCALE; - sf->y_scale_fp = REF_INVALID_SCALE; - return; - } - - sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); - sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); - sf->x_step_q4 = scaled_x(16, sf); - sf->y_step_q4 = scaled_y(16, sf); - - if (vp10_is_scaled(sf)) { - sf->scale_value_x = scaled_x; - sf->scale_value_y = scaled_y; - } else { - sf->scale_value_x = unscaled_value; - sf->scale_value_y = unscaled_value; - } - - // TODO(agrange): Investigate the best choice of functions to use here - // for EIGHTTAP_SMOOTH. Since it is not interpolating, need to choose what - // to do at full-pel offsets. The current selection, where the filter is - // applied in one direction only, and not at all for 0,0, seems to give the - // best quality, but it may be worth trying an additional mode that does - // do the filtering on full-pel. - if (sf->x_step_q4 == 16) { - if (sf->y_step_q4 == 16) { - // No scaling in either direction. - sf->predict[0][0][0] = vpx_convolve_copy; - sf->predict[0][0][1] = vpx_convolve_avg; - sf->predict[0][1][0] = vpx_convolve8_vert; - sf->predict[0][1][1] = vpx_convolve8_avg_vert; - sf->predict[1][0][0] = vpx_convolve8_horiz; - sf->predict[1][0][1] = vpx_convolve8_avg_horiz; - } else { - // No scaling in x direction. Must always scale in the y direction. - sf->predict[0][0][0] = vpx_convolve8_vert; - sf->predict[0][0][1] = vpx_convolve8_avg_vert; - sf->predict[0][1][0] = vpx_convolve8_vert; - sf->predict[0][1][1] = vpx_convolve8_avg_vert; - sf->predict[1][0][0] = vpx_convolve8; - sf->predict[1][0][1] = vpx_convolve8_avg; - } - } else { - if (sf->y_step_q4 == 16) { - // No scaling in the y direction. Must always scale in the x direction. - sf->predict[0][0][0] = vpx_convolve8_horiz; - sf->predict[0][0][1] = vpx_convolve8_avg_horiz; - sf->predict[0][1][0] = vpx_convolve8; - sf->predict[0][1][1] = vpx_convolve8_avg; - sf->predict[1][0][0] = vpx_convolve8_horiz; - sf->predict[1][0][1] = vpx_convolve8_avg_horiz; - } else { - // Must always scale in both directions. - sf->predict[0][0][0] = vpx_convolve8; - sf->predict[0][0][1] = vpx_convolve8_avg; - sf->predict[0][1][0] = vpx_convolve8; - sf->predict[0][1][1] = vpx_convolve8_avg; - sf->predict[1][0][0] = vpx_convolve8; - sf->predict[1][0][1] = vpx_convolve8_avg; - } - } - // 2D subpel motion always gets filtered in both directions - sf->predict[1][1][0] = vpx_convolve8; - sf->predict[1][1][1] = vpx_convolve8_avg; -#if CONFIG_VP9_HIGHBITDEPTH - if (use_highbd) { - if (sf->x_step_q4 == 16) { - if (sf->y_step_q4 == 16) { - // No scaling in either direction. - sf->highbd_predict[0][0][0] = vpx_highbd_convolve_copy; - sf->highbd_predict[0][0][1] = vpx_highbd_convolve_avg; - sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert; - sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert; - sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz; - sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz; - } else { - // No scaling in x direction. Must always scale in the y direction. - sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_vert; - sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_vert; - sf->highbd_predict[0][1][0] = vpx_highbd_convolve8_vert; - sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg_vert; - sf->highbd_predict[1][0][0] = vpx_highbd_convolve8; - sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg; - } - } else { - if (sf->y_step_q4 == 16) { - // No scaling in the y direction. Must always scale in the x direction. - sf->highbd_predict[0][0][0] = vpx_highbd_convolve8_horiz; - sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg_horiz; - sf->highbd_predict[0][1][0] = vpx_highbd_convolve8; - sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg; - sf->highbd_predict[1][0][0] = vpx_highbd_convolve8_horiz; - sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg_horiz; - } else { - // Must always scale in both directions. - sf->highbd_predict[0][0][0] = vpx_highbd_convolve8; - sf->highbd_predict[0][0][1] = vpx_highbd_convolve8_avg; - sf->highbd_predict[0][1][0] = vpx_highbd_convolve8; - sf->highbd_predict[0][1][1] = vpx_highbd_convolve8_avg; - sf->highbd_predict[1][0][0] = vpx_highbd_convolve8; - sf->highbd_predict[1][0][1] = vpx_highbd_convolve8_avg; - } - } - // 2D subpel motion always gets filtered in both directions. - sf->highbd_predict[1][1][0] = vpx_highbd_convolve8; - sf->highbd_predict[1][1][1] = vpx_highbd_convolve8_avg; - } -#endif -} diff --git a/libs/libvpx/vp10/common/scale.h b/libs/libvpx/vp10/common/scale.h deleted file mode 100644 index 833f6c4119..0000000000 --- a/libs/libvpx/vp10/common/scale.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_SCALE_H_ -#define VP10_COMMON_SCALE_H_ - -#include "vp10/common/mv.h" -#include "vpx_dsp/vpx_convolve.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define REF_SCALE_SHIFT 14 -#define REF_NO_SCALE (1 << REF_SCALE_SHIFT) -#define REF_INVALID_SCALE -1 - -struct scale_factors { - int x_scale_fp; // horizontal fixed point scale factor - int y_scale_fp; // vertical fixed point scale factor - int x_step_q4; - int y_step_q4; - - int (*scale_value_x)(int val, const struct scale_factors *sf); - int (*scale_value_y)(int val, const struct scale_factors *sf); - - convolve_fn_t predict[2][2][2]; // horiz, vert, avg -#if CONFIG_VP9_HIGHBITDEPTH - highbd_convolve_fn_t highbd_predict[2][2][2]; // horiz, vert, avg -#endif -}; - -MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h, - int use_high); -#else -void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h); -#endif - -static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) { - return sf->x_scale_fp != REF_INVALID_SCALE && - sf->y_scale_fp != REF_INVALID_SCALE; -} - -static INLINE int vp10_is_scaled(const struct scale_factors *sf) { - return vp10_is_valid_scale(sf) && - (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE); -} - -static INLINE int valid_ref_frame_size(int ref_width, int ref_height, - int this_width, int this_height) { - return 2 * this_width >= ref_width && - 2 * this_height >= ref_height && - this_width <= 16 * ref_width && - this_height <= 16 * ref_height; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_SCALE_H_ diff --git a/libs/libvpx/vp10/common/scan.c b/libs/libvpx/vp10/common/scan.c deleted file mode 100644 index 7217f6d045..0000000000 --- a/libs/libvpx/vp10/common/scan.c +++ /dev/null @@ -1,727 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/scan.h" - -DECLARE_ALIGNED(16, static const int16_t, default_scan_4x4[16]) = { - 0, 4, 1, 5, - 8, 2, 12, 9, - 3, 6, 13, 10, - 7, 14, 11, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, col_scan_4x4[16]) = { - 0, 4, 8, 1, - 12, 5, 9, 2, - 13, 6, 10, 3, - 7, 14, 11, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, row_scan_4x4[16]) = { - 0, 1, 4, 2, - 5, 3, 6, 8, - 9, 7, 12, 10, - 13, 11, 14, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8[64]) = { - 0, 8, 1, 16, 9, 2, 17, 24, - 10, 3, 18, 25, 32, 11, 4, 26, - 33, 19, 40, 12, 34, 27, 5, 41, - 20, 48, 13, 35, 42, 28, 21, 6, - 49, 56, 36, 43, 29, 7, 14, 50, - 57, 44, 22, 37, 15, 51, 58, 30, - 45, 23, 52, 59, 38, 31, 60, 53, - 46, 39, 61, 54, 47, 62, 55, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, col_scan_8x8[64]) = { - 0, 8, 16, 1, 24, 9, 32, 17, - 2, 40, 25, 10, 33, 18, 48, 3, - 26, 41, 11, 56, 19, 34, 4, 49, - 27, 42, 12, 35, 20, 57, 50, 28, - 5, 43, 13, 36, 58, 51, 21, 44, - 6, 29, 59, 37, 14, 52, 22, 7, - 45, 60, 30, 15, 38, 53, 23, 46, - 31, 61, 39, 54, 47, 62, 55, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, row_scan_8x8[64]) = { - 0, 1, 2, 8, 9, 3, 16, 10, - 4, 17, 11, 24, 5, 18, 25, 12, - 19, 26, 32, 6, 13, 20, 33, 27, - 7, 34, 40, 21, 28, 41, 14, 35, - 48, 42, 29, 36, 49, 22, 43, 15, - 56, 37, 50, 44, 30, 57, 23, 51, - 58, 45, 38, 52, 31, 59, 53, 46, - 60, 39, 61, 47, 54, 55, 62, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, default_scan_16x16[256]) = { - 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80, - 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52, - 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69, - 100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, 101, 131, 160, 146, - 55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, 176, 162, 87, 56, 25, - 133, 118, 177, 148, 72, 103, 41, 163, 10, 192, 178, 88, 57, 134, 149, 119, - 26, 164, 73, 104, 193, 42, 179, 208, 11, 135, 89, 165, 120, 150, 58, 194, - 180, 27, 74, 209, 105, 151, 136, 43, 90, 224, 166, 195, 181, 121, 210, 59, - 12, 152, 106, 167, 196, 75, 137, 225, 211, 240, 182, 122, 91, 28, 197, 13, - 226, 168, 183, 153, 44, 212, 138, 107, 241, 60, 29, 123, 198, 184, 227, 169, - 242, 76, 213, 154, 45, 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108, - 77, 155, 30, 15, 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140, - 230, 62, 216, 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141, - 63, 232, 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142, - 219, 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, - 251, - 190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239, - 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, col_scan_16x16[256]) = { - 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81, - 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4, - 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21, - 146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, 116, 193, 147, 85, - 22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, 7, 148, 194, 86, 179, - 225, 23, 133, 39, 164, 8, 102, 210, 241, 55, 195, 118, 149, 71, 180, 24, - 87, 226, 134, 165, 211, 40, 103, 56, 72, 150, 196, 242, 119, 9, 181, 227, - 88, 166, 25, 135, 41, 104, 212, 57, 151, 197, 120, 73, 243, 182, 136, 167, - 213, 89, 10, 228, 105, 152, 198, 26, 42, 121, 183, 244, 168, 58, 137, 229, - 74, 214, 90, 153, 199, 184, 11, 106, 245, 27, 122, 230, 169, 43, 215, 59, - 200, 138, 185, 246, 75, 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170, - 60, 247, 232, 76, 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202, - 233, 171, 61, 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125, - 62, 172, 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79, - 126, 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205, - 236, - 159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239, - 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, row_scan_16x16[256]) = { - 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20, - 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52, - 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69, - 25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, 41, 56, 114, 100, - 13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, 116, 14, 87, 130, 102, - 144, 73, 131, 117, 28, 58, 15, 88, 43, 145, 103, 132, 146, 118, 74, 160, - 89, 133, 104, 29, 59, 147, 119, 44, 161, 148, 90, 105, 134, 162, 120, 176, - 75, 135, 149, 30, 60, 163, 177, 45, 121, 91, 106, 164, 178, 150, 192, 136, - 165, 179, 31, 151, 193, 76, 122, 61, 137, 194, 107, 152, 180, 208, 46, 166, - 167, 195, 92, 181, 138, 209, 123, 153, 224, 196, 77, 168, 210, 182, 240, 108, - 197, 62, 154, 225, 183, 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170, - 124, 155, 199, 78, 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186, - 156, 229, 243, 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110, - 157, 245, 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111, - 158, - 188, 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, - 175, - 190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254, - 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32[1024]) = { - 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, - 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, - 68, 131, 37, 100, - 225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38, - 258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321, - 102, 352, 8, 197, - 71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, - 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293, - 41, 417, 199, 136, - 262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105, - 419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169, - 295, 420, 106, 451, - 481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, - 75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, - 453, 139, 44, 234, - 484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108, - 546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577, - 486, 77, 204, 362, - 608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, - 610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, - 111, 238, 48, 143, - 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51, - 83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424, - 393, 300, 269, 176, 145, - 52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301, - 270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581, - 550, 519, 488, 457, 426, 395, - 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737, - 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241, - 210, 179, 117, 86, 55, 738, 707, - 614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491, - 367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676, - 645, 552, 521, 428, 397, 304, - 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553, - 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26, - 864, 833, 802, 771, 740, 709, - 678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306, - 275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741, - 710, 679, 617, 586, 555, 493, - 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, - 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, - 743, 619, 495, 371, 247, 123, - 896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680, - 649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929, - 898, 836, 805, 774, 712, 681, - 650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, - 92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, - 651, 620, 589, 558, 527, - 496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124, - 93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590, - 559, 497, 466, 435, 373, - 342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, - 622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, - 499, 375, 251, 127, - 900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560, - 529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716, - 685, 654, 592, 561, - 530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, - 872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, - 438, 407, 376, 345, - 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718, - 687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998, - 967, 874, 843, 750, - 719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, - 379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, - 564, 533, 440, 409, - 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534, - 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783, - 752, 721, 690, 659, - 628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970, - 939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, - 350, 319, 1002, 971, - 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631, - 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568, - 537, 444, 413, 972, - 941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414, - 1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601, - 570, 539, 508, 477, - 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571, - 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479, - 1007, 883, 759, 635, 511, - 912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945, - 914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915, - 884, 853, 822, 791, - 760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, - 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607, - 1011, 887, 763, 639, - 916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825, - 794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733, - 702, 671, 1013, 982, - 951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, - 891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, - 1016, 985, 954, 923, - 892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863, - 1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021, - 990, 959, 1022, 991, 1023, -}; - -// Neighborhood 5-tuples for various scans and blocksizes, -// in {top, left, topleft, topright, bottomleft} order -// for each position in raster scan order. -// -1 indicates the neighbor does not exist. -DECLARE_ALIGNED(16, static const int16_t, - default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 1, 4, 4, 4, 1, 1, 8, 8, 5, 8, 2, 2, 2, 5, 9, 12, 6, 9, - 3, 6, 10, 13, 7, 10, 11, 14, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 4, 4, 0, 0, 8, 8, 1, 1, 5, 5, 1, 1, 9, 9, 2, 2, 6, 6, 2, 2, 3, - 3, 10, 10, 7, 7, 11, 11, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 1, 1, 4, 4, 2, 2, 5, 5, 4, 4, 8, 8, 6, 6, 8, 8, 9, 9, 12, - 12, 10, 10, 13, 13, 14, 14, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 8, 8, 0, 0, 16, 16, 1, 1, 24, 24, 9, 9, 1, 1, 32, 32, 17, 17, 2, - 2, 25, 25, 10, 10, 40, 40, 2, 2, 18, 18, 33, 33, 3, 3, 48, 48, 11, 11, 26, - 26, 3, 3, 41, 41, 19, 19, 34, 34, 4, 4, 27, 27, 12, 12, 49, 49, 42, 42, 20, - 20, 4, 4, 35, 35, 5, 5, 28, 28, 50, 50, 43, 43, 13, 13, 36, 36, 5, 5, 21, 21, - 51, 51, 29, 29, 6, 6, 44, 44, 14, 14, 6, 6, 37, 37, 52, 52, 22, 22, 7, 7, 30, - 30, 45, 45, 15, 15, 38, 38, 23, 23, 53, 53, 31, 31, 46, 46, 39, 39, 54, 54, - 47, 47, 55, 55, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 1, 1, 0, 0, 8, 8, 2, 2, 8, 8, 9, 9, 3, 3, 16, 16, 10, 10, 16, 16, - 4, 4, 17, 17, 24, 24, 11, 11, 18, 18, 25, 25, 24, 24, 5, 5, 12, 12, 19, 19, - 32, 32, 26, 26, 6, 6, 33, 33, 32, 32, 20, 20, 27, 27, 40, 40, 13, 13, 34, 34, - 40, 40, 41, 41, 28, 28, 35, 35, 48, 48, 21, 21, 42, 42, 14, 14, 48, 48, 36, - 36, 49, 49, 43, 43, 29, 29, 56, 56, 22, 22, 50, 50, 57, 57, 44, 44, 37, 37, - 51, 51, 30, 30, 58, 58, 52, 52, 45, 45, 59, 59, 38, 38, 60, 60, 46, 46, 53, - 53, 54, 54, 61, 61, 62, 62, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 8, 8, 1, 8, 1, 1, 9, 16, 16, 16, 2, 9, 2, 2, 10, 17, 17, - 24, 24, 24, 3, 10, 3, 3, 18, 25, 25, 32, 11, 18, 32, 32, 4, 11, 26, 33, 19, - 26, 4, 4, 33, 40, 12, 19, 40, 40, 5, 12, 27, 34, 34, 41, 20, 27, 13, 20, 5, - 5, 41, 48, 48, 48, 28, 35, 35, 42, 21, 28, 6, 6, 6, 13, 42, 49, 49, 56, 36, - 43, 14, 21, 29, 36, 7, 14, 43, 50, 50, 57, 22, 29, 37, 44, 15, 22, 44, 51, - 51, 58, 30, 37, 23, 30, 52, 59, 45, 52, 38, 45, 31, 38, 53, 60, 46, 53, 39, - 46, 54, 61, 47, 54, 55, 62, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 16, 16, 32, 32, 0, 0, 48, 48, 1, 1, 64, 64, - 17, 17, 80, 80, 33, 33, 1, 1, 49, 49, 96, 96, 2, 2, 65, 65, - 18, 18, 112, 112, 34, 34, 81, 81, 2, 2, 50, 50, 128, 128, 3, 3, - 97, 97, 19, 19, 66, 66, 144, 144, 82, 82, 35, 35, 113, 113, 3, 3, - 51, 51, 160, 160, 4, 4, 98, 98, 129, 129, 67, 67, 20, 20, 83, 83, - 114, 114, 36, 36, 176, 176, 4, 4, 145, 145, 52, 52, 99, 99, 5, 5, - 130, 130, 68, 68, 192, 192, 161, 161, 21, 21, 115, 115, 84, 84, 37, 37, - 146, 146, 208, 208, 53, 53, 5, 5, 100, 100, 177, 177, 131, 131, 69, 69, - 6, 6, 224, 224, 116, 116, 22, 22, 162, 162, 85, 85, 147, 147, 38, 38, - 193, 193, 101, 101, 54, 54, 6, 6, 132, 132, 178, 178, 70, 70, 163, 163, - 209, 209, 7, 7, 117, 117, 23, 23, 148, 148, 7, 7, 86, 86, 194, 194, - 225, 225, 39, 39, 179, 179, 102, 102, 133, 133, 55, 55, 164, 164, 8, 8, - 71, 71, 210, 210, 118, 118, 149, 149, 195, 195, 24, 24, 87, 87, 40, 40, - 56, 56, 134, 134, 180, 180, 226, 226, 103, 103, 8, 8, 165, 165, 211, 211, - 72, 72, 150, 150, 9, 9, 119, 119, 25, 25, 88, 88, 196, 196, 41, 41, - 135, 135, 181, 181, 104, 104, 57, 57, 227, 227, 166, 166, 120, 120, 151, 151, - 197, 197, 73, 73, 9, 9, 212, 212, 89, 89, 136, 136, 182, 182, 10, 10, - 26, 26, 105, 105, 167, 167, 228, 228, 152, 152, 42, 42, 121, 121, 213, 213, - 58, 58, 198, 198, 74, 74, 137, 137, 183, 183, 168, 168, 10, 10, 90, 90, - 229, 229, 11, 11, 106, 106, 214, 214, 153, 153, 27, 27, 199, 199, 43, 43, - 184, 184, 122, 122, 169, 169, 230, 230, 59, 59, 11, 11, 75, 75, 138, 138, - 200, 200, 215, 215, 91, 91, 12, 12, 28, 28, 185, 185, 107, 107, 154, 154, - 44, 44, 231, 231, 216, 216, 60, 60, 123, 123, 12, 12, 76, 76, 201, 201, - 170, 170, 232, 232, 139, 139, 92, 92, 13, 13, 108, 108, 29, 29, 186, 186, - 217, 217, 155, 155, 45, 45, 13, 13, 61, 61, 124, 124, 14, 14, 233, 233, - 77, 77, 14, 14, 171, 171, 140, 140, 202, 202, 30, 30, 93, 93, 109, 109, - 46, 46, 156, 156, 62, 62, 187, 187, 15, 15, 125, 125, 218, 218, 78, 78, - 31, 31, 172, 172, 47, 47, 141, 141, 94, 94, 234, 234, 203, 203, 63, 63, - 110, 110, 188, 188, 157, 157, 126, 126, 79, 79, 173, 173, 95, 95, 219, 219, - 142, 142, 204, 204, 235, 235, 111, 111, 158, 158, 127, 127, 189, 189, 220, - 220, 143, 143, 174, 174, 205, 205, 236, 236, 159, 159, 190, 190, 221, 221, - 175, 175, 237, 237, 206, 206, 222, 222, 191, 191, 238, 238, 207, 207, 223, - 223, 239, 239, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 16, 16, 3, 3, 17, 17, - 16, 16, 4, 4, 32, 32, 18, 18, 5, 5, 33, 33, 32, 32, 19, 19, - 48, 48, 6, 6, 34, 34, 20, 20, 49, 49, 48, 48, 7, 7, 35, 35, - 64, 64, 21, 21, 50, 50, 36, 36, 64, 64, 8, 8, 65, 65, 51, 51, - 22, 22, 37, 37, 80, 80, 66, 66, 9, 9, 52, 52, 23, 23, 81, 81, - 67, 67, 80, 80, 38, 38, 10, 10, 53, 53, 82, 82, 96, 96, 68, 68, - 24, 24, 97, 97, 83, 83, 39, 39, 96, 96, 54, 54, 11, 11, 69, 69, - 98, 98, 112, 112, 84, 84, 25, 25, 40, 40, 55, 55, 113, 113, 99, 99, - 12, 12, 70, 70, 112, 112, 85, 85, 26, 26, 114, 114, 100, 100, 128, 128, - 41, 41, 56, 56, 71, 71, 115, 115, 13, 13, 86, 86, 129, 129, 101, 101, - 128, 128, 72, 72, 130, 130, 116, 116, 27, 27, 57, 57, 14, 14, 87, 87, - 42, 42, 144, 144, 102, 102, 131, 131, 145, 145, 117, 117, 73, 73, 144, 144, - 88, 88, 132, 132, 103, 103, 28, 28, 58, 58, 146, 146, 118, 118, 43, 43, - 160, 160, 147, 147, 89, 89, 104, 104, 133, 133, 161, 161, 119, 119, 160, 160, - 74, 74, 134, 134, 148, 148, 29, 29, 59, 59, 162, 162, 176, 176, 44, 44, - 120, 120, 90, 90, 105, 105, 163, 163, 177, 177, 149, 149, 176, 176, 135, 135, - 164, 164, 178, 178, 30, 30, 150, 150, 192, 192, 75, 75, 121, 121, 60, 60, - 136, 136, 193, 193, 106, 106, 151, 151, 179, 179, 192, 192, 45, 45, 165, 165, - 166, 166, 194, 194, 91, 91, 180, 180, 137, 137, 208, 208, 122, 122, 152, 152, - 208, 208, 195, 195, 76, 76, 167, 167, 209, 209, 181, 181, 224, 224, 107, 107, - 196, 196, 61, 61, 153, 153, 224, 224, 182, 182, 168, 168, 210, 210, 46, 46, - 138, 138, 92, 92, 183, 183, 225, 225, 211, 211, 240, 240, 197, 197, 169, 169, - 123, 123, 154, 154, 198, 198, 77, 77, 212, 212, 184, 184, 108, 108, 226, 226, - 199, 199, 62, 62, 227, 227, 241, 241, 139, 139, 213, 213, 170, 170, 185, 185, - 155, 155, 228, 228, 242, 242, 124, 124, 93, 93, 200, 200, 243, 243, 214, 214, - 215, 215, 229, 229, 140, 140, 186, 186, 201, 201, 78, 78, 171, 171, 109, 109, - 156, 156, 244, 244, 216, 216, 230, 230, 94, 94, 245, 245, 231, 231, 125, 125, - 202, 202, 246, 246, 232, 232, 172, 172, 217, 217, 141, 141, 110, 110, 157, - 157, 187, 187, 247, 247, 126, 126, 233, 233, 218, 218, 248, 248, 188, 188, - 203, 203, 142, 142, 173, 173, 158, 158, 249, 249, 234, 234, 204, 204, 219, - 219, 174, 174, 189, 189, 250, 250, 220, 220, 190, 190, 205, 205, 235, 235, - 206, 206, 236, 236, 251, 251, 221, 221, 252, 252, 222, 222, 237, 237, 238, - 238, 253, 253, 254, 254, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 16, 16, 1, 16, 1, 1, 32, 32, 17, 32, - 2, 17, 2, 2, 48, 48, 18, 33, 33, 48, 3, 18, 49, 64, 64, 64, - 34, 49, 3, 3, 19, 34, 50, 65, 4, 19, 65, 80, 80, 80, 35, 50, - 4, 4, 20, 35, 66, 81, 81, 96, 51, 66, 96, 96, 5, 20, 36, 51, - 82, 97, 21, 36, 67, 82, 97, 112, 5, 5, 52, 67, 112, 112, 37, 52, - 6, 21, 83, 98, 98, 113, 68, 83, 6, 6, 113, 128, 22, 37, 53, 68, - 84, 99, 99, 114, 128, 128, 114, 129, 69, 84, 38, 53, 7, 22, 7, 7, - 129, 144, 23, 38, 54, 69, 100, 115, 85, 100, 115, 130, 144, 144, 130, 145, - 39, 54, 70, 85, 8, 23, 55, 70, 116, 131, 101, 116, 145, 160, 24, 39, - 8, 8, 86, 101, 131, 146, 160, 160, 146, 161, 71, 86, 40, 55, 9, 24, - 117, 132, 102, 117, 161, 176, 132, 147, 56, 71, 87, 102, 25, 40, 147, 162, - 9, 9, 176, 176, 162, 177, 72, 87, 41, 56, 118, 133, 133, 148, 103, 118, - 10, 25, 148, 163, 57, 72, 88, 103, 177, 192, 26, 41, 163, 178, 192, 192, - 10, 10, 119, 134, 73, 88, 149, 164, 104, 119, 134, 149, 42, 57, 178, 193, - 164, 179, 11, 26, 58, 73, 193, 208, 89, 104, 135, 150, 120, 135, 27, 42, - 74, 89, 208, 208, 150, 165, 179, 194, 165, 180, 105, 120, 194, 209, 43, 58, - 11, 11, 136, 151, 90, 105, 151, 166, 180, 195, 59, 74, 121, 136, 209, 224, - 195, 210, 224, 224, 166, 181, 106, 121, 75, 90, 12, 27, 181, 196, 12, 12, - 210, 225, 152, 167, 167, 182, 137, 152, 28, 43, 196, 211, 122, 137, 91, 106, - 225, 240, 44, 59, 13, 28, 107, 122, 182, 197, 168, 183, 211, 226, 153, 168, - 226, 241, 60, 75, 197, 212, 138, 153, 29, 44, 76, 91, 13, 13, 183, 198, - 123, 138, 45, 60, 212, 227, 198, 213, 154, 169, 169, 184, 227, 242, 92, 107, - 61, 76, 139, 154, 14, 29, 14, 14, 184, 199, 213, 228, 108, 123, 199, 214, - 228, 243, 77, 92, 30, 45, 170, 185, 155, 170, 185, 200, 93, 108, 124, 139, - 214, 229, 46, 61, 200, 215, 229, 244, 15, 30, 109, 124, 62, 77, 140, 155, - 215, 230, 31, 46, 171, 186, 186, 201, 201, 216, 78, 93, 230, 245, 125, 140, - 47, 62, 216, 231, 156, 171, 94, 109, 231, 246, 141, 156, 63, 78, 202, 217, - 187, 202, 110, 125, 217, 232, 172, 187, 232, 247, 79, 94, 157, 172, 126, 141, - 203, 218, 95, 110, 233, 248, 218, 233, 142, 157, 111, 126, 173, 188, 188, 203, - 234, 249, 219, 234, 127, 142, 158, 173, 204, 219, 189, 204, 143, 158, 235, - 250, 174, 189, 205, 220, 159, 174, 220, 235, 221, 236, 175, 190, 190, 205, - 236, 251, 206, 221, 237, 252, 191, 206, 222, 237, 207, 222, 238, 253, 223, - 238, 239, 254, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, - default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 32, 32, 1, 32, 1, 1, 64, 64, 33, 64, - 2, 33, 96, 96, 2, 2, 65, 96, 34, 65, 128, 128, 97, 128, 3, 34, - 66, 97, 3, 3, 35, 66, 98, 129, 129, 160, 160, 160, 4, 35, 67, 98, - 192, 192, 4, 4, 130, 161, 161, 192, 36, 67, 99, 130, 5, 36, 68, 99, - 193, 224, 162, 193, 224, 224, 131, 162, 37, 68, 100, 131, 5, 5, 194, 225, - 225, 256, 256, 256, 163, 194, 69, 100, 132, 163, 6, 37, 226, 257, 6, 6, - 195, 226, 257, 288, 101, 132, 288, 288, 38, 69, 164, 195, 133, 164, 258, 289, - 227, 258, 196, 227, 7, 38, 289, 320, 70, 101, 320, 320, 7, 7, 165, 196, - 39, 70, 102, 133, 290, 321, 259, 290, 228, 259, 321, 352, 352, 352, 197, 228, - 134, 165, 71, 102, 8, 39, 322, 353, 291, 322, 260, 291, 103, 134, 353, 384, - 166, 197, 229, 260, 40, 71, 8, 8, 384, 384, 135, 166, 354, 385, 323, 354, - 198, 229, 292, 323, 72, 103, 261, 292, 9, 40, 385, 416, 167, 198, 104, 135, - 230, 261, 355, 386, 416, 416, 293, 324, 324, 355, 9, 9, 41, 72, 386, 417, - 199, 230, 136, 167, 417, 448, 262, 293, 356, 387, 73, 104, 387, 418, 231, 262, - 10, 41, 168, 199, 325, 356, 418, 449, 105, 136, 448, 448, 42, 73, 294, 325, - 200, 231, 10, 10, 357, 388, 137, 168, 263, 294, 388, 419, 74, 105, 419, 450, - 449, 480, 326, 357, 232, 263, 295, 326, 169, 200, 11, 42, 106, 137, 480, 480, - 450, 481, 358, 389, 264, 295, 201, 232, 138, 169, 389, 420, 43, 74, 420, 451, - 327, 358, 11, 11, 481, 512, 233, 264, 451, 482, 296, 327, 75, 106, 170, 201, - 482, 513, 512, 512, 390, 421, 359, 390, 421, 452, 107, 138, 12, 43, 202, 233, - 452, 483, 265, 296, 328, 359, 139, 170, 44, 75, 483, 514, 513, 544, 234, 265, - 297, 328, 422, 453, 12, 12, 391, 422, 171, 202, 76, 107, 514, 545, 453, 484, - 544, 544, 266, 297, 203, 234, 108, 139, 329, 360, 298, 329, 140, 171, 515, - 546, 13, 44, 423, 454, 235, 266, 545, 576, 454, 485, 45, 76, 172, 203, 330, - 361, 576, 576, 13, 13, 267, 298, 546, 577, 77, 108, 204, 235, 455, 486, 577, - 608, 299, 330, 109, 140, 547, 578, 14, 45, 14, 14, 141, 172, 578, 609, 331, - 362, 46, 77, 173, 204, 15, 15, 78, 109, 205, 236, 579, 610, 110, 141, 15, 46, - 142, 173, 47, 78, 174, 205, 16, 16, 79, 110, 206, 237, 16, 47, 111, 142, - 48, 79, 143, 174, 80, 111, 175, 206, 17, 48, 17, 17, 207, 238, 49, 80, - 81, 112, 18, 18, 18, 49, 50, 81, 82, 113, 19, 50, 51, 82, 83, 114, 608, 608, - 484, 515, 360, 391, 236, 267, 112, 143, 19, 19, 640, 640, 609, 640, 516, 547, - 485, 516, 392, 423, 361, 392, 268, 299, 237, 268, 144, 175, 113, 144, 20, 51, - 20, 20, 672, 672, 641, 672, 610, 641, 548, 579, 517, 548, 486, 517, 424, 455, - 393, 424, 362, 393, 300, 331, 269, 300, 238, 269, 176, 207, 145, 176, 114, - 145, 52, 83, 21, 52, 21, 21, 704, 704, 673, 704, 642, 673, 611, 642, 580, - 611, 549, 580, 518, 549, 487, 518, 456, 487, 425, 456, 394, 425, 363, 394, - 332, 363, 301, 332, 270, 301, 239, 270, 208, 239, 177, 208, 146, 177, 115, - 146, 84, 115, 53, 84, 22, 53, 22, 22, 705, 736, 674, 705, 643, 674, 581, 612, - 550, 581, 519, 550, 457, 488, 426, 457, 395, 426, 333, 364, 302, 333, 271, - 302, 209, 240, 178, 209, 147, 178, 85, 116, 54, 85, 23, 54, 706, 737, 675, - 706, 582, 613, 551, 582, 458, 489, 427, 458, 334, 365, 303, 334, 210, 241, - 179, 210, 86, 117, 55, 86, 707, 738, 583, 614, 459, 490, 335, 366, 211, 242, - 87, 118, 736, 736, 612, 643, 488, 519, 364, 395, 240, 271, 116, 147, 23, 23, - 768, 768, 737, 768, 644, 675, 613, 644, 520, 551, 489, 520, 396, 427, 365, - 396, 272, 303, 241, 272, 148, 179, 117, 148, 24, 55, 24, 24, 800, 800, 769, - 800, 738, 769, 676, 707, 645, 676, 614, 645, 552, 583, 521, 552, 490, 521, - 428, 459, 397, 428, 366, 397, 304, 335, 273, 304, 242, 273, 180, 211, 149, - 180, 118, 149, 56, 87, 25, 56, 25, 25, 832, 832, 801, 832, 770, 801, 739, - 770, 708, 739, 677, 708, 646, 677, 615, 646, 584, 615, 553, 584, 522, 553, - 491, 522, 460, 491, 429, 460, 398, 429, 367, 398, 336, 367, 305, 336, 274, - 305, 243, 274, 212, 243, 181, 212, 150, 181, 119, 150, 88, 119, 57, 88, 26, - 57, 26, 26, 833, 864, 802, 833, 771, 802, 709, 740, 678, 709, 647, 678, 585, - 616, 554, 585, 523, 554, 461, 492, 430, 461, 399, 430, 337, 368, 306, 337, - 275, 306, 213, 244, 182, 213, 151, 182, 89, 120, 58, 89, 27, 58, 834, 865, - 803, 834, 710, 741, 679, 710, 586, 617, 555, 586, 462, 493, 431, 462, 338, - 369, 307, 338, 214, 245, 183, 214, 90, 121, 59, 90, 835, 866, 711, 742, 587, - 618, 463, 494, 339, 370, 215, 246, 91, 122, 864, 864, 740, 771, 616, 647, - 492, 523, 368, 399, 244, 275, 120, 151, 27, 27, 896, 896, 865, 896, 772, 803, - 741, 772, 648, 679, 617, 648, 524, 555, 493, 524, 400, 431, 369, 400, 276, - 307, 245, 276, 152, 183, 121, 152, 28, 59, 28, 28, 928, 928, 897, 928, 866, - 897, 804, 835, 773, 804, 742, 773, 680, 711, 649, 680, 618, 649, 556, 587, - 525, 556, 494, 525, 432, 463, 401, 432, 370, 401, 308, 339, 277, 308, 246, - 277, 184, 215, 153, 184, 122, 153, 60, 91, 29, 60, 29, 29, 960, 960, 929, - 960, 898, 929, 867, 898, 836, 867, 805, 836, 774, 805, 743, 774, 712, 743, - 681, 712, 650, 681, 619, 650, 588, 619, 557, 588, 526, 557, 495, 526, 464, - 495, 433, 464, 402, 433, 371, 402, 340, 371, 309, 340, 278, 309, 247, 278, - 216, 247, 185, 216, 154, 185, 123, 154, 92, 123, 61, 92, 30, 61, 30, 30, - 961, 992, 930, 961, 899, 930, 837, 868, 806, 837, 775, 806, 713, 744, 682, - 713, 651, 682, 589, 620, 558, 589, 527, 558, 465, 496, 434, 465, 403, 434, - 341, 372, 310, 341, 279, 310, 217, 248, 186, 217, 155, 186, 93, 124, 62, 93, - 31, 62, 962, 993, 931, 962, 838, 869, 807, 838, 714, 745, 683, 714, 590, 621, - 559, 590, 466, 497, 435, 466, 342, 373, 311, 342, 218, 249, 187, 218, 94, - 125, 63, 94, 963, 994, 839, 870, 715, 746, 591, 622, 467, 498, 343, 374, 219, - 250, 95, 126, 868, 899, 744, 775, 620, 651, 496, 527, 372, 403, 248, 279, - 124, 155, 900, 931, 869, 900, 776, 807, 745, 776, 652, 683, 621, 652, 528, - 559, 497, 528, 404, 435, 373, 404, 280, 311, 249, 280, 156, 187, 125, 156, - 932, 963, 901, 932, 870, 901, 808, 839, 777, 808, 746, 777, 684, 715, 653, - 684, 622, 653, 560, 591, 529, 560, 498, 529, 436, 467, 405, 436, 374, 405, - 312, 343, 281, 312, 250, 281, 188, 219, 157, 188, 126, 157, 964, 995, 933, - 964, 902, 933, 871, 902, 840, 871, 809, 840, 778, 809, 747, 778, 716, 747, - 685, 716, 654, 685, 623, 654, 592, 623, 561, 592, 530, 561, 499, 530, 468, - 499, 437, 468, 406, 437, 375, 406, 344, 375, 313, 344, 282, 313, 251, 282, - 220, 251, 189, 220, 158, 189, 127, 158, 965, 996, 934, 965, 903, 934, 841, - 872, 810, 841, 779, 810, 717, 748, 686, 717, 655, 686, 593, 624, 562, 593, - 531, 562, 469, 500, 438, 469, 407, 438, 345, 376, 314, 345, 283, 314, 221, - 252, 190, 221, 159, 190, 966, 997, 935, 966, 842, 873, 811, 842, 718, 749, - 687, 718, 594, 625, 563, 594, 470, 501, 439, 470, 346, 377, 315, 346, 222, - 253, 191, 222, 967, 998, 843, 874, 719, 750, 595, 626, 471, 502, 347, 378, - 223, 254, 872, 903, 748, 779, 624, 655, 500, 531, 376, 407, 252, 283, 904, - 935, 873, 904, 780, 811, 749, 780, 656, 687, 625, 656, 532, 563, 501, 532, - 408, 439, 377, 408, 284, 315, 253, 284, 936, 967, 905, 936, 874, 905, 812, - 843, 781, 812, 750, 781, 688, 719, 657, 688, 626, 657, 564, 595, 533, 564, - 502, 533, 440, 471, 409, 440, 378, 409, 316, 347, 285, 316, 254, 285, 968, - 999, 937, 968, 906, 937, 875, 906, 844, 875, 813, 844, 782, 813, 751, 782, - 720, 751, 689, 720, 658, 689, 627, 658, 596, 627, 565, 596, 534, 565, 503, - 534, 472, 503, 441, 472, 410, 441, 379, 410, 348, 379, 317, 348, 286, 317, - 255, 286, 969, 1000, 938, 969, 907, 938, 845, 876, 814, 845, 783, 814, 721, - 752, 690, 721, 659, 690, 597, 628, 566, 597, 535, 566, 473, 504, 442, 473, - 411, 442, 349, 380, 318, 349, 287, 318, 970, 1001, 939, 970, 846, 877, 815, - 846, 722, 753, 691, 722, 598, 629, 567, 598, 474, 505, 443, 474, 350, 381, - 319, 350, 971, 1002, 847, 878, 723, 754, 599, 630, 475, 506, 351, 382, 876, - 907, 752, 783, 628, 659, 504, 535, 380, 411, 908, 939, 877, 908, 784, 815, - 753, 784, 660, 691, 629, 660, 536, 567, 505, 536, 412, 443, 381, 412, 940, - 971, 909, 940, 878, 909, 816, 847, 785, 816, 754, 785, 692, 723, 661, 692, - 630, 661, 568, 599, 537, 568, 506, 537, 444, 475, 413, 444, 382, 413, 972, - 1003, 941, 972, 910, 941, 879, 910, 848, 879, 817, 848, 786, 817, 755, 786, - 724, 755, 693, 724, 662, 693, 631, 662, 600, 631, 569, 600, 538, 569, 507, - 538, 476, 507, 445, 476, 414, 445, 383, 414, 973, 1004, 942, 973, 911, 942, - 849, 880, 818, 849, 787, 818, 725, 756, 694, 725, 663, 694, 601, 632, 570, - 601, 539, 570, 477, 508, 446, 477, 415, 446, 974, 1005, 943, 974, 850, 881, - 819, 850, 726, 757, 695, 726, 602, 633, 571, 602, 478, 509, 447, 478, 975, - 1006, 851, 882, 727, 758, 603, 634, 479, 510, 880, 911, 756, 787, 632, 663, - 508, 539, 912, 943, 881, 912, 788, 819, 757, 788, 664, 695, 633, 664, 540, - 571, 509, 540, 944, 975, 913, 944, 882, 913, 820, 851, 789, 820, 758, 789, - 696, 727, 665, 696, 634, 665, 572, 603, 541, 572, 510, 541, 976, 1007, 945, - 976, 914, 945, 883, 914, 852, 883, 821, 852, 790, 821, 759, 790, 728, 759, - 697, 728, 666, 697, 635, 666, 604, 635, 573, 604, 542, 573, 511, 542, 977, - 1008, 946, 977, 915, 946, 853, 884, 822, 853, 791, 822, 729, 760, 698, 729, - 667, 698, 605, 636, 574, 605, 543, 574, 978, 1009, 947, 978, 854, 885, 823, - 854, 730, 761, 699, 730, 606, 637, 575, 606, 979, 1010, 855, 886, 731, 762, - 607, 638, 884, 915, 760, 791, 636, 667, 916, 947, 885, 916, 792, 823, 761, - 792, 668, 699, 637, 668, 948, 979, 917, 948, 886, 917, 824, 855, 793, 824, - 762, 793, 700, 731, 669, 700, 638, 669, 980, 1011, 949, 980, 918, 949, 887, - 918, 856, 887, 825, 856, 794, 825, 763, 794, 732, 763, 701, 732, 670, 701, - 639, 670, 981, 1012, 950, 981, 919, 950, 857, 888, 826, 857, 795, 826, 733, - 764, 702, 733, 671, 702, 982, 1013, 951, 982, 858, 889, 827, 858, 734, 765, - 703, 734, 983, 1014, 859, 890, 735, 766, 888, 919, 764, 795, 920, 951, 889, - 920, 796, 827, 765, 796, 952, 983, 921, 952, 890, 921, 828, 859, 797, 828, - 766, 797, 984, 1015, 953, 984, 922, 953, 891, 922, 860, 891, 829, 860, 798, - 829, 767, 798, 985, 1016, 954, 985, 923, 954, 861, 892, 830, 861, 799, 830, - 986, 1017, 955, 986, 862, 893, 831, 862, 987, 1018, 863, 894, 892, 923, 924, - 955, 893, 924, 956, 987, 925, 956, 894, 925, 988, 1019, 957, 988, 926, 957, - 895, 926, 989, 1020, 958, 989, 927, 958, 990, 1021, 959, 990, 991, 1022, 0, 0, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = { - 0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = { - 0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = { - 0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = { - 0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51, - 2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56, - 6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60, - 14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = { - 0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39, - 6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52, - 18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59, - 32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = { - 0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44, - 3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53, - 12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60, - 25, 32, 39, 45, 50, 55, 59, 62, 33, 40, 46, 51, 54, 58, 61, 63, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = { - 0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198, - 1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212, - 2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216, - 3, 10, 18, 29, 41, 55, 71, 89, 103, 119, 141, 159, 176, 194, 208, 218, - 5, 12, 21, 32, 45, 58, 74, 93, 104, 123, 144, 164, 179, 196, 210, 223, - 7, 15, 26, 37, 49, 63, 78, 96, 112, 129, 146, 166, 182, 200, 215, 228, - 9, 19, 28, 39, 54, 69, 86, 102, 117, 132, 151, 170, 187, 206, 220, 230, - 13, 24, 35, 46, 60, 73, 91, 108, 122, 137, 154, 174, 189, 207, 224, 235, - 17, 30, 40, 53, 66, 82, 98, 115, 126, 142, 161, 180, 197, 213, 227, 237, - 22, 36, 48, 62, 76, 92, 105, 120, 133, 147, 167, 186, 203, 219, 232, 240, - 27, 44, 56, 70, 84, 99, 113, 127, 140, 156, 175, 193, 209, 226, 236, 244, - 33, 51, 68, 79, 94, 110, 125, 138, 149, 162, 184, 202, 217, 229, 241, 247, - 42, 61, 77, 90, 106, 121, 134, 148, 160, 173, 191, 211, 225, 238, 245, 251, - 50, 72, 87, 100, 118, 128, 145, 158, 168, 183, 204, 222, 233, 242, 249, 253, - 57, 80, 97, 111, 131, 143, 155, 169, 178, 192, 214, 231, 239, 246, 250, 254, - 65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = { - 0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76, 86, - 3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99, 115, 130, - 8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103, 119, 142, 167, - 14, 16, 20, 26, 31, 37, 44, 53, 61, 73, 85, 100, 116, 135, 161, 185, - 21, 24, 30, 35, 40, 47, 55, 65, 74, 81, 94, 112, 133, 154, 179, 205, - 28, 34, 39, 45, 50, 58, 67, 77, 87, 96, 106, 121, 146, 169, 196, 212, - 41, 46, 49, 56, 63, 70, 79, 90, 98, 107, 122, 138, 159, 182, 207, 222, - 52, 57, 62, 69, 75, 83, 93, 102, 110, 120, 134, 150, 176, 195, 215, 226, - 66, 71, 78, 82, 91, 97, 108, 113, 127, 136, 148, 168, 188, 202, 221, 232, - 80, 89, 92, 101, 105, 114, 125, 131, 139, 151, 162, 177, 192, 208, 223, 234, - 95, 104, 109, 117, 123, 128, 143, 144, 155, 165, 175, 190, 206, 219, 233, 239, - 111, 118, 124, 129, 140, 147, 157, 164, 170, 181, 191, 203, 224, 230, 240, - 243, 126, 132, 137, 145, 153, 160, 174, 178, 184, 197, 204, 216, 231, 237, - 244, 246, 141, 149, 156, 166, 172, 180, 189, 199, 200, 210, 220, 228, 238, - 242, 249, 251, 152, 163, 171, 183, 186, 193, 201, 211, 214, 218, 227, 236, - 245, 247, 252, 253, 158, 173, 187, 194, 198, 209, 213, 217, 225, 229, 235, - 241, 248, 250, 254, 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = { - 0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166, 179, - 1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154, 178, 196, - 3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148, 164, 186, 201, - 6, 12, 16, 23, 31, 39, 53, 64, 78, 92, 110, 127, 153, 169, 193, 208, - 10, 14, 19, 28, 37, 47, 58, 67, 84, 98, 114, 133, 161, 176, 198, 214, - 15, 21, 26, 34, 43, 52, 65, 77, 91, 106, 120, 140, 165, 185, 205, 221, - 22, 27, 32, 41, 48, 60, 73, 85, 99, 116, 130, 151, 175, 190, 211, 225, - 29, 35, 42, 49, 59, 69, 81, 95, 108, 125, 139, 155, 182, 197, 217, 229, - 38, 45, 51, 61, 68, 80, 93, 105, 118, 134, 150, 168, 191, 207, 223, 234, - 50, 56, 63, 74, 83, 94, 109, 117, 129, 147, 163, 177, 199, 213, 228, 238, - 62, 70, 76, 87, 97, 107, 122, 131, 145, 159, 172, 188, 210, 222, 235, 242, - 75, 82, 90, 102, 112, 124, 138, 146, 157, 173, 187, 202, 219, 230, 240, 245, - 89, 100, 111, 123, 132, 142, 156, 167, 180, 189, 203, 216, 231, 237, 246, 250, - 103, 115, 126, 136, 149, 162, 171, 183, 194, 204, 215, 224, 236, 241, 248, - 252, 121, 135, 144, 158, 170, 181, 192, 200, 209, 218, 227, 233, 243, 244, - 251, 254, 137, 152, 160, 174, 184, 195, 206, 212, 220, 226, 232, 239, 247, - 249, 253, 255, -}; - -DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = { - 0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145, 170, 193, 204, - 210, 219, 229, 233, 245, 257, 275, 299, 342, 356, 377, 405, 455, 471, 495, - 527, 1, 4, 8, 15, 22, 30, 45, 58, 74, 92, 112, 133, 158, 184, 203, 215, 222, - 228, 234, 237, 256, 274, 298, 317, 355, 376, 404, 426, 470, 494, 526, 551, - 3, 7, 12, 18, 28, 36, 52, 64, 82, 102, 118, 142, 164, 189, 208, 217, 224, - 231, 235, 238, 273, 297, 316, 329, 375, 403, 425, 440, 493, 525, 550, 567, - 6, 11, 16, 23, 31, 43, 60, 73, 90, 109, 126, 150, 173, 196, 211, 220, 226, - 232, 236, 239, 296, 315, 328, 335, 402, 424, 439, 447, 524, 549, 566, 575, - 9, 14, 19, 29, 37, 50, 65, 78, 95, 116, 134, 157, 179, 201, 214, 223, 244, - 255, 272, 295, 341, 354, 374, 401, 454, 469, 492, 523, 582, 596, 617, 645, - 13, 20, 26, 35, 44, 54, 72, 85, 105, 123, 140, 163, 182, 205, 216, 225, - 254, 271, 294, 314, 353, 373, 400, 423, 468, 491, 522, 548, 595, 616, 644, - 666, 21, 27, 33, 42, 53, 63, 80, 94, 113, 132, 151, 172, 190, 209, 218, 227, - 270, 293, 313, 327, 372, 399, 422, 438, 490, 521, 547, 565, 615, 643, 665, - 680, 24, 32, 39, 48, 57, 71, 88, 104, 120, 139, 159, 178, 197, 212, 221, 230, - 292, 312, 326, 334, 398, 421, 437, 446, 520, 546, 564, 574, 642, 664, 679, - 687, 34, 40, 46, 56, 68, 81, 96, 111, 130, 147, 167, 186, 243, 253, 269, 291, - 340, 352, 371, 397, 453, 467, 489, 519, 581, 594, 614, 641, 693, 705, 723, - 747, 41, 49, 55, 67, 77, 91, 107, 124, 138, 161, 177, 194, 252, 268, 290, - 311, 351, 370, 396, 420, 466, 488, 518, 545, 593, 613, 640, 663, 704, 722, - 746, 765, 51, 59, 66, 76, 89, 99, 119, 131, 149, 168, 181, 200, 267, 289, - 310, 325, 369, 395, 419, 436, 487, 517, 544, 563, 612, 639, 662, 678, 721, - 745, 764, 777, 61, 69, 75, 87, 100, 114, 129, 144, 162, 180, 191, 207, 288, - 309, 324, 333, 394, 418, 435, 445, 516, 543, 562, 573, 638, 661, 677, 686, - 744, 763, 776, 783, 70, 79, 86, 97, 108, 122, 137, 155, 242, 251, 266, 287, - 339, 350, 368, 393, 452, 465, 486, 515, 580, 592, 611, 637, 692, 703, 720, - 743, 788, 798, 813, 833, 84, 93, 103, 110, 125, 141, 154, 171, 250, 265, 286, - 308, 349, 367, 392, 417, 464, 485, 514, 542, 591, 610, 636, 660, 702, 719, - 742, 762, 797, 812, 832, 848, 98, 106, 115, 127, 143, 156, 169, 185, 264, - 285, 307, 323, 366, 391, 416, 434, 484, 513, 541, 561, 609, 635, 659, 676, - 718, 741, 761, 775, 811, 831, 847, 858, 117, 128, 136, 148, 160, 175, 188, - 198, 284, 306, 322, 332, 390, 415, 433, 444, 512, 540, 560, 572, 634, 658, - 675, 685, 740, 760, 774, 782, 830, 846, 857, 863, 135, 146, 152, 165, 241, - 249, 263, 283, 338, 348, 365, 389, 451, 463, 483, 511, 579, 590, 608, 633, - 691, 701, 717, 739, 787, 796, 810, 829, 867, 875, 887, 903, 153, 166, 174, - 183, 248, 262, 282, 305, 347, 364, 388, 414, 462, 482, 510, 539, 589, 607, - 632, 657, 700, 716, 738, 759, 795, 809, 828, 845, 874, 886, 902, 915, 176, - 187, 195, 202, 261, 281, 304, 321, 363, 387, 413, 432, 481, 509, 538, 559, - 606, 631, 656, 674, 715, 737, 758, 773, 808, 827, 844, 856, 885, 901, 914, - 923, 192, 199, 206, 213, 280, 303, 320, 331, 386, 412, 431, 443, 508, 537, - 558, 571, 630, 655, 673, 684, 736, 757, 772, 781, 826, 843, 855, 862, 900, - 913, 922, 927, 240, 247, 260, 279, 337, 346, 362, 385, 450, 461, 480, 507, - 578, 588, 605, 629, 690, 699, 714, 735, 786, 794, 807, 825, 866, 873, 884, - 899, 930, 936, 945, 957, 246, 259, 278, 302, 345, 361, 384, 411, 460, 479, - 506, 536, 587, 604, 628, 654, 698, 713, 734, 756, 793, 806, 824, 842, 872, - 883, 898, 912, 935, 944, 956, 966, 258, 277, 301, 319, 360, 383, 410, 430, - 478, 505, 535, 557, 603, 627, 653, 672, 712, 733, 755, 771, 805, 823, 841, - 854, 882, 897, 911, 921, 943, 955, 965, 972, 276, 300, 318, 330, 382, 409, - 429, 442, 504, 534, 556, 570, 626, 652, 671, 683, 732, 754, 770, 780, 822, - 840, 853, 861, 896, 910, 920, 926, 954, 964, 971, 975, 336, 344, 359, 381, - 449, 459, 477, 503, 577, 586, 602, 625, 689, 697, 711, 731, 785, 792, 804, - 821, 865, 871, 881, 895, 929, 934, 942, 953, 977, 981, 987, 995, 343, 358, - 380, 408, 458, 476, 502, 533, 585, 601, 624, 651, 696, 710, 730, 753, 791, - 803, 820, 839, 870, 880, 894, 909, 933, 941, 952, 963, 980, 986, 994, 1001, - 357, 379, 407, 428, 475, 501, 532, 555, 600, 623, 650, 670, 709, 729, 752, - 769, 802, 819, 838, 852, 879, 893, 908, 919, 940, 951, 962, 970, 985, 993, - 1000, 1005, 378, 406, 427, 441, 500, 531, 554, 569, 622, 649, 669, 682, 728, - 751, 768, 779, 818, 837, 851, 860, 892, 907, 918, 925, 950, 961, 969, 974, - 992, 999, 1004, 1007, 448, 457, 474, 499, 576, 584, 599, 621, 688, 695, 708, - 727, 784, 790, 801, 817, 864, 869, 878, 891, 928, 932, 939, 949, 976, 979, - 984, 991, 1008, 1010, 1013, 1017, 456, 473, 498, 530, 583, 598, 620, 648, - 694, 707, 726, 750, 789, 800, 816, 836, 868, 877, 890, 906, 931, 938, 948, - 960, 978, 983, 990, 998, 1009, 1012, 1016, 1020, 472, 497, 529, 553, 597, - 619, 647, 668, 706, 725, 749, 767, 799, 815, 835, 850, 876, 889, 905, 917, - 937, 947, 959, 968, 982, 989, 997, 1003, 1011, 1015, 1019, 1022, 496, 528, - 552, 568, 618, 646, 667, 681, 724, 748, 766, 778, 814, 834, 849, 859, 888, - 904, 916, 924, 946, 958, 967, 973, 988, 996, 1002, 1006, 1014, 1018, 1021, - 1023, -}; - -const scan_order vp10_default_scan_orders[TX_SIZES] = { - {default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors}, - {default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors}, - {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors}, - {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors}, -}; - -const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES] = { - { // TX_4X4 - {default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors}, - {row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors}, - {col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors}, - {default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors} - }, { // TX_8X8 - {default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors}, - {row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors}, - {col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors}, - {default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors} - }, { // TX_16X16 - {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors}, - {row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors}, - {col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors}, - {default_scan_16x16, vp10_default_iscan_16x16, default_scan_16x16_neighbors} - }, { // TX_32X32 - {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp10_default_iscan_32x32, default_scan_32x32_neighbors}, - } -}; diff --git a/libs/libvpx/vp10/common/scan.h b/libs/libvpx/vp10/common/scan.h deleted file mode 100644 index f5a020f1e7..0000000000 --- a/libs/libvpx/vp10/common/scan.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_SCAN_H_ -#define VP10_COMMON_SCAN_H_ - -#include "vpx/vpx_integer.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/enums.h" -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MAX_NEIGHBORS 2 - -typedef struct { - const int16_t *scan; - const int16_t *iscan; - const int16_t *neighbors; -} scan_order; - -extern const scan_order vp10_default_scan_orders[TX_SIZES]; -extern const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES]; - -static INLINE int get_coef_context(const int16_t *neighbors, - const uint8_t *token_cache, int c) { - return (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] + - token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1; -} - -static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type) { - return &vp10_scan_orders[tx_size][tx_type]; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_SCAN_H_ diff --git a/libs/libvpx/vp10/common/seg_common.c b/libs/libvpx/vp10/common/seg_common.c deleted file mode 100644 index 1bf09b9a0f..0000000000 --- a/libs/libvpx/vp10/common/seg_common.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/blockd.h" -#include "vp10/common/loopfilter.h" -#include "vp10/common/seg_common.h" -#include "vp10/common/quant_common.h" - -static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 }; - -static const int seg_feature_data_max[SEG_LVL_MAX] = { - MAXQ, MAX_LOOP_FILTER, 3, 0 }; - -// These functions provide access to new segment level features. -// Eventually these function may be "optimized out" but for the moment, -// the coding mechanism is still subject to change so these provide a -// convenient single point of change. - -void vp10_clearall_segfeatures(struct segmentation *seg) { - vp10_zero(seg->feature_data); - vp10_zero(seg->feature_mask); -} - -void vp10_enable_segfeature(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - seg->feature_mask[segment_id] |= 1 << feature_id; -} - -int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) { - return seg_feature_data_max[feature_id]; -} - -int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) { - return seg_feature_data_signed[feature_id]; -} - -void vp10_set_segdata(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id, int seg_data) { - assert(seg_data <= seg_feature_data_max[feature_id]); - if (seg_data < 0) { - assert(seg_feature_data_signed[feature_id]); - assert(-seg_data <= seg_feature_data_max[feature_id]); - } - - seg->feature_data[segment_id][feature_id] = seg_data; -} - -const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = { - 2, 4, 6, 8, 10, 12, - 0, -1, -2, -3, -4, -5, -6, -7 -}; - - -// TBD? Functions to read and write segment data with range / validity checking diff --git a/libs/libvpx/vp10/common/seg_common.h b/libs/libvpx/vp10/common/seg_common.h deleted file mode 100644 index cd38e8ee0d..0000000000 --- a/libs/libvpx/vp10/common/seg_common.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_SEG_COMMON_H_ -#define VP10_COMMON_SEG_COMMON_H_ - -#include "vpx_dsp/prob.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define SEGMENT_DELTADATA 0 -#define SEGMENT_ABSDATA 1 - -#define MAX_SEGMENTS 8 -#define SEG_TREE_PROBS (MAX_SEGMENTS-1) - -#define PREDICTION_PROBS 3 - -// Segment level features. -typedef enum { - SEG_LVL_ALT_Q = 0, // Use alternate Quantizer .... - SEG_LVL_ALT_LF = 1, // Use alternate loop filter value... - SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame - SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode - SEG_LVL_MAX = 4 // Number of features supported -} SEG_LVL_FEATURES; - - -struct segmentation { - uint8_t enabled; - uint8_t update_map; - uint8_t update_data; - uint8_t abs_delta; - uint8_t temporal_update; - - int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; - unsigned int feature_mask[MAX_SEGMENTS]; -}; - -struct segmentation_probs { - vpx_prob tree_probs[SEG_TREE_PROBS]; - vpx_prob pred_probs[PREDICTION_PROBS]; -}; - -static INLINE int segfeature_active(const struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id) { - return seg->enabled && - (seg->feature_mask[segment_id] & (1 << feature_id)); -} - -void vp10_clearall_segfeatures(struct segmentation *seg); - -void vp10_enable_segfeature(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id); - -int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id); - -int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id); - -void vp10_set_segdata(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id, - int seg_data); - -static INLINE int get_segdata(const struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - return seg->feature_data[segment_id][feature_id]; -} - -extern const vpx_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)]; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_SEG_COMMON_H_ - diff --git a/libs/libvpx/vp10/common/textblit.c b/libs/libvpx/vp10/common/textblit.c deleted file mode 100644 index 2e8811ea50..0000000000 --- a/libs/libvpx/vp10/common/textblit.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/textblit.h" - -static const int font[] = { - 0x0, 0x5C00, 0x8020, 0xAFABEA, 0xD7EC0, 0x1111111, 0x1855740, 0x18000, - 0x45C0, 0x74400, 0x51140, 0x23880, 0xC4000, 0x21080, 0x80000, 0x111110, - 0xE9D72E, 0x87E40, 0x12AD732, 0xAAD62A, 0x4F94C4, 0x4D6B7, 0x456AA, - 0x3E8423, 0xAAD6AA, 0xAAD6A2, 0x2800, 0x2A00, 0x8A880, 0x52940, 0x22A20, - 0x15422, 0x6AD62E, 0x1E4A53E, 0xAAD6BF, 0x8C62E, 0xE8C63F, 0x118D6BF, - 0x1094BF, 0xCAC62E, 0x1F2109F, 0x118FE31, 0xF8C628, 0x8A89F, 0x108421F, - 0x1F1105F, 0x1F4105F, 0xE8C62E, 0x2294BF, 0x164C62E, 0x12694BF, 0x8AD6A2, - 0x10FC21, 0x1F8421F, 0x744107, 0xF8220F, 0x1151151, 0x117041, 0x119D731, - 0x47E0, 0x1041041, 0xFC400, 0x10440, 0x1084210, 0x820 -}; - -static void plot(int x, int y, unsigned char *image, int pitch) { - image[x + y * pitch] ^= 255; -} - -void vp10_blit_text(const char *msg, unsigned char *address, const int pitch) { - int letter_bitmap; - unsigned char *output_pos = address; - int colpos = 0; - - while (msg[colpos] != 0) { - char letter = msg[colpos]; - int fontcol, fontrow; - - if (letter <= 'Z' && letter >= ' ') - letter_bitmap = font[letter - ' ']; - else if (letter <= 'z' && letter >= 'a') - letter_bitmap = font[letter - 'a' + 'A' - ' ']; - else - letter_bitmap = font[0]; - - for (fontcol = 6; fontcol >= 0; fontcol--) - for (fontrow = 0; fontrow < 5; fontrow++) - output_pos[fontrow * pitch + fontcol] = - ((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0); - - output_pos += 7; - colpos++; - } -} - - - -/* Bresenham line algorithm */ -void vp10_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, - int pitch) { - int steep = abs(y1 - y0) > abs(x1 - x0); - int deltax, deltay; - int error, ystep, y, x; - - if (steep) { - int t; - t = x0; - x0 = y0; - y0 = t; - - t = x1; - x1 = y1; - y1 = t; - } - - if (x0 > x1) { - int t; - t = x0; - x0 = x1; - x1 = t; - - t = y0; - y0 = y1; - y1 = t; - } - - deltax = x1 - x0; - deltay = abs(y1 - y0); - error = deltax / 2; - - y = y0; - - if (y0 < y1) - ystep = 1; - else - ystep = -1; - - if (steep) { - for (x = x0; x <= x1; x++) { - plot(y, x, image, pitch); - - error = error - deltay; - if (error < 0) { - y = y + ystep; - error = error + deltax; - } - } - } else { - for (x = x0; x <= x1; x++) { - plot(x, y, image, pitch); - - error = error - deltay; - if (error < 0) { - y = y + ystep; - error = error + deltax; - } - } - } -} diff --git a/libs/libvpx/vp10/common/textblit.h b/libs/libvpx/vp10/common/textblit.h deleted file mode 100644 index c37140d0f1..0000000000 --- a/libs/libvpx/vp10/common/textblit.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_TEXTBLIT_H_ -#define VP10_COMMON_TEXTBLIT_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_blit_text(const char *msg, unsigned char *address, int pitch); - -void vp10_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, - int pitch); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_TEXTBLIT_H_ diff --git a/libs/libvpx/vp10/common/thread_common.c b/libs/libvpx/vp10/common/thread_common.c deleted file mode 100644 index 0c7a1c22a8..0000000000 --- a/libs/libvpx/vp10/common/thread_common.c +++ /dev/null @@ -1,459 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/thread_common.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/loopfilter.h" - -#if CONFIG_MULTITHREAD -static INLINE void mutex_lock(pthread_mutex_t *const mutex) { - const int kMaxTryLocks = 4000; - int locked = 0; - int i; - - for (i = 0; i < kMaxTryLocks; ++i) { - if (!pthread_mutex_trylock(mutex)) { - locked = 1; - break; - } - } - - if (!locked) - pthread_mutex_lock(mutex); -} -#endif // CONFIG_MULTITHREAD - -static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) { -#if CONFIG_MULTITHREAD - const int nsync = lf_sync->sync_range; - - if (r && !(c & (nsync - 1))) { - pthread_mutex_t *const mutex = &lf_sync->mutex_[r - 1]; - mutex_lock(mutex); - - while (c > lf_sync->cur_sb_col[r - 1] - nsync) { - pthread_cond_wait(&lf_sync->cond_[r - 1], mutex); - } - pthread_mutex_unlock(mutex); - } -#else - (void)lf_sync; - (void)r; - (void)c; -#endif // CONFIG_MULTITHREAD -} - -static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, - const int sb_cols) { -#if CONFIG_MULTITHREAD - const int nsync = lf_sync->sync_range; - int cur; - // Only signal when there are enough filtered SB for next row to run. - int sig = 1; - - if (c < sb_cols - 1) { - cur = c; - if (c % nsync) - sig = 0; - } else { - cur = sb_cols + nsync; - } - - if (sig) { - mutex_lock(&lf_sync->mutex_[r]); - - lf_sync->cur_sb_col[r] = cur; - - pthread_cond_signal(&lf_sync->cond_[r]); - pthread_mutex_unlock(&lf_sync->mutex_[r]); - } -#else - (void)lf_sync; - (void)r; - (void)c; - (void)sb_cols; -#endif // CONFIG_MULTITHREAD -} - -// Implement row loopfiltering for each thread. -static INLINE -void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer, - VP10_COMMON *const cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int start, int stop, int y_only, - VP9LfSync *const lf_sync) { - const int num_planes = y_only ? 1 : MAX_MB_PLANE; - const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; - int mi_row, mi_col; - enum lf_path path; - if (y_only) - path = LF_PATH_444; - else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1) - path = LF_PATH_420; - else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0) - path = LF_PATH_444; - else - path = LF_PATH_SLOW; - - for (mi_row = start; mi_row < stop; - mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) { - MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride; - - for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { - const int r = mi_row >> MI_BLOCK_SIZE_LOG2; - const int c = mi_col >> MI_BLOCK_SIZE_LOG2; - LOOP_FILTER_MASK lfm; - int plane; - - sync_read(lf_sync, r, c); - - vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); - - // TODO(JBB): Make setup_mask work for non 420. - vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, - &lfm); - - vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm); - for (plane = 1; plane < num_planes; ++plane) { - switch (path) { - case LF_PATH_420: - vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm); - break; - case LF_PATH_444: - vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm); - break; - case LF_PATH_SLOW: - vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, - mi_row, mi_col); - break; - } - } - - sync_write(lf_sync, r, c, sb_cols); - } - } -} - -// Row-based multi-threaded loopfilter hook -static int loop_filter_row_worker(VP9LfSync *const lf_sync, - LFWorkerData *const lf_data) { - thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, - lf_data->start, lf_data->stop, lf_data->y_only, - lf_sync); - return 1; -} - -static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, - VP10_COMMON *cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int start, int stop, int y_only, - VPxWorker *workers, int nworkers, - VP9LfSync *lf_sync) { - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - // Number of superblock rows and cols - const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; - // Decoder may allocate more threads than number of tiles based on user's - // input. - const int tile_cols = 1 << cm->log2_tile_cols; - const int num_workers = VPXMIN(nworkers, tile_cols); - int i; - - if (!lf_sync->sync_range || sb_rows != lf_sync->rows || - num_workers > lf_sync->num_workers) { - vp10_loop_filter_dealloc(lf_sync); - vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); - } - - // Initialize cur_sb_col to -1 for all SB rows. - memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); - - // Set up loopfilter thread data. - // The decoder is capping num_workers because it has been observed that using - // more threads on the loopfilter than there are cores will hurt performance - // on Android. This is because the system will only schedule the tile decode - // workers on cores equal to the number of tile columns. Then if the decoder - // tries to use more threads for the loopfilter, it will hurt performance - // because of contention. If the multithreading code changes in the future - // then the number of workers used by the loopfilter should be revisited. - for (i = 0; i < num_workers; ++i) { - VPxWorker *const worker = &workers[i]; - LFWorkerData *const lf_data = &lf_sync->lfdata[i]; - - worker->hook = (VPxWorkerHook)loop_filter_row_worker; - worker->data1 = lf_sync; - worker->data2 = lf_data; - - // Loopfilter data - vp10_loop_filter_data_reset(lf_data, frame, cm, planes); - lf_data->start = start + i * MI_BLOCK_SIZE; - lf_data->stop = stop; - lf_data->y_only = y_only; - - // Start loopfiltering - if (i == num_workers - 1) { - winterface->execute(worker); - } else { - winterface->launch(worker); - } - } - - // Wait till all rows are finished - for (i = 0; i < num_workers; ++i) { - winterface->sync(&workers[i]); - } -} - -void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, - VP10_COMMON *cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int frame_filter_level, - int y_only, int partial_frame, - VPxWorker *workers, int num_workers, - VP9LfSync *lf_sync) { - int start_mi_row, end_mi_row, mi_rows_to_filter; - - if (!frame_filter_level) return; - - start_mi_row = 0; - mi_rows_to_filter = cm->mi_rows; - if (partial_frame && cm->mi_rows > 8) { - start_mi_row = cm->mi_rows >> 1; - start_mi_row &= 0xfffffff8; - mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); - } - end_mi_row = start_mi_row + mi_rows_to_filter; - vp10_loop_filter_frame_init(cm, frame_filter_level); - - loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, - y_only, workers, num_workers, lf_sync); -} - -// Set up nsync by width. -static INLINE int get_sync_range(int width) { - // nsync numbers are picked by testing. For example, for 4k - // video, using 4 gives best performance. - if (width < 640) - return 1; - else if (width <= 1280) - return 2; - else if (width <= 4096) - return 4; - else - return 8; -} - -// Allocate memory for lf row synchronization -void vp10_loop_filter_alloc(VP9LfSync *lf_sync, VP10_COMMON *cm, int rows, - int width, int num_workers) { - lf_sync->rows = rows; -#if CONFIG_MULTITHREAD - { - int i; - - CHECK_MEM_ERROR(cm, lf_sync->mutex_, - vpx_malloc(sizeof(*lf_sync->mutex_) * rows)); - if (lf_sync->mutex_) { - for (i = 0; i < rows; ++i) { - pthread_mutex_init(&lf_sync->mutex_[i], NULL); - } - } - - CHECK_MEM_ERROR(cm, lf_sync->cond_, - vpx_malloc(sizeof(*lf_sync->cond_) * rows)); - if (lf_sync->cond_) { - for (i = 0; i < rows; ++i) { - pthread_cond_init(&lf_sync->cond_[i], NULL); - } - } - } -#endif // CONFIG_MULTITHREAD - - CHECK_MEM_ERROR(cm, lf_sync->lfdata, - vpx_malloc(num_workers * sizeof(*lf_sync->lfdata))); - lf_sync->num_workers = num_workers; - - CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col, - vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows)); - - // Set up nsync. - lf_sync->sync_range = get_sync_range(width); -} - -// Deallocate lf synchronization related mutex and data -void vp10_loop_filter_dealloc(VP9LfSync *lf_sync) { - if (lf_sync != NULL) { -#if CONFIG_MULTITHREAD - int i; - - if (lf_sync->mutex_ != NULL) { - for (i = 0; i < lf_sync->rows; ++i) { - pthread_mutex_destroy(&lf_sync->mutex_[i]); - } - vpx_free(lf_sync->mutex_); - } - if (lf_sync->cond_ != NULL) { - for (i = 0; i < lf_sync->rows; ++i) { - pthread_cond_destroy(&lf_sync->cond_[i]); - } - vpx_free(lf_sync->cond_); - } -#endif // CONFIG_MULTITHREAD - vpx_free(lf_sync->lfdata); - vpx_free(lf_sync->cur_sb_col); - // clear the structure as the source of this call may be a resize in which - // case this call will be followed by an _alloc() which may fail. - vp10_zero(*lf_sync); - } -} - -// Accumulate frame counts. -void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts, - int is_dec) { - int i, j, k, l, m; - - for (i = 0; i < BLOCK_SIZE_GROUPS; i++) - for (j = 0; j < INTRA_MODES; j++) - cm->counts.y_mode[i][j] += counts->y_mode[i][j]; - - for (i = 0; i < INTRA_MODES; i++) - for (j = 0; j < INTRA_MODES; j++) - cm->counts.uv_mode[i][j] += counts->uv_mode[i][j]; - - for (i = 0; i < PARTITION_CONTEXTS; i++) - for (j = 0; j < PARTITION_TYPES; j++) - cm->counts.partition[i][j] += counts->partition[i][j]; - - if (is_dec) { - int n; - for (i = 0; i < TX_SIZES; i++) - for (j = 0; j < PLANE_TYPES; j++) - for (k = 0; k < REF_TYPES; k++) - for (l = 0; l < COEF_BANDS; l++) - for (m = 0; m < COEFF_CONTEXTS; m++) { - cm->counts.eob_branch[i][j][k][l][m] += - counts->eob_branch[i][j][k][l][m]; - for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) - cm->counts.coef[i][j][k][l][m][n] += - counts->coef[i][j][k][l][m][n]; - } - } else { - for (i = 0; i < TX_SIZES; i++) - for (j = 0; j < PLANE_TYPES; j++) - for (k = 0; k < REF_TYPES; k++) - for (l = 0; l < COEF_BANDS; l++) - for (m = 0; m < COEFF_CONTEXTS; m++) - cm->counts.eob_branch[i][j][k][l][m] += - counts->eob_branch[i][j][k][l][m]; - // In the encoder, cm->counts.coef is only updated at frame - // level, so not need to accumulate it here. - // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) - // cm->counts.coef[i][j][k][l][m][n] += - // counts->coef[i][j][k][l][m][n]; - } - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - for (j = 0; j < SWITCHABLE_FILTERS; j++) - cm->counts.switchable_interp[i][j] += counts->switchable_interp[i][j]; - - for (i = 0; i < INTER_MODE_CONTEXTS; i++) - for (j = 0; j < INTER_MODES; j++) - cm->counts.inter_mode[i][j] += counts->inter_mode[i][j]; - - for (i = 0; i < INTRA_INTER_CONTEXTS; i++) - for (j = 0; j < 2; j++) - cm->counts.intra_inter[i][j] += counts->intra_inter[i][j]; - - for (i = 0; i < COMP_INTER_CONTEXTS; i++) - for (j = 0; j < 2; j++) - cm->counts.comp_inter[i][j] += counts->comp_inter[i][j]; - - for (i = 0; i < REF_CONTEXTS; i++) - for (j = 0; j < 2; j++) - for (k = 0; k < 2; k++) - cm->counts.single_ref[i][j][k] += counts->single_ref[i][j][k]; - - for (i = 0; i < REF_CONTEXTS; i++) - for (j = 0; j < 2; j++) - cm->counts.comp_ref[i][j] += counts->comp_ref[i][j]; - - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - for (j = 0; j < TX_SIZES; j++) - cm->counts.tx.p32x32[i][j] += counts->tx.p32x32[i][j]; - - for (j = 0; j < TX_SIZES - 1; j++) - cm->counts.tx.p16x16[i][j] += counts->tx.p16x16[i][j]; - - for (j = 0; j < TX_SIZES - 2; j++) - cm->counts.tx.p8x8[i][j] += counts->tx.p8x8[i][j]; - } - - for (i = 0; i < TX_SIZES; i++) - cm->counts.tx.tx_totals[i] += counts->tx.tx_totals[i]; - - for (i = 0; i < SKIP_CONTEXTS; i++) - for (j = 0; j < 2; j++) - cm->counts.skip[i][j] += counts->skip[i][j]; - - for (i = 0; i < MV_JOINTS; i++) - cm->counts.mv.joints[i] += counts->mv.joints[i]; - - for (k = 0; k < 2; k++) { - nmv_component_counts *comps = &cm->counts.mv.comps[k]; - nmv_component_counts *comps_t = &counts->mv.comps[k]; - - for (i = 0; i < 2; i++) { - comps->sign[i] += comps_t->sign[i]; - comps->class0_hp[i] += comps_t->class0_hp[i]; - comps->hp[i] += comps_t->hp[i]; - } - - for (i = 0; i < MV_CLASSES; i++) - comps->classes[i] += comps_t->classes[i]; - - for (i = 0; i < CLASS0_SIZE; i++) { - comps->class0[i] += comps_t->class0[i]; - for (j = 0; j < MV_FP_SIZE; j++) - comps->class0_fp[i][j] += comps_t->class0_fp[i][j]; - } - - for (i = 0; i < MV_OFFSET_BITS; i++) - for (j = 0; j < 2; j++) - comps->bits[i][j] += comps_t->bits[i][j]; - - for (i = 0; i < MV_FP_SIZE; i++) - comps->fp[i] += comps_t->fp[i]; - } - - for (i = 0; i < EXT_TX_SIZES; i++) { - int j; - for (j = 0; j < TX_TYPES; ++j) - for (k = 0; k < TX_TYPES; k++) - cm->counts.intra_ext_tx[i][j][k] += counts->intra_ext_tx[i][j][k]; - } - for (i = 0; i < EXT_TX_SIZES; i++) { - for (k = 0; k < TX_TYPES; k++) - cm->counts.inter_ext_tx[i][k] += counts->inter_ext_tx[i][k]; - } - -#if CONFIG_MISC_FIXES - for (i = 0; i < PREDICTION_PROBS; i++) - for (j = 0; j < 2; j++) - cm->counts.seg.pred[i][j] += counts->seg.pred[i][j]; - - for (i = 0; i < MAX_SEGMENTS; i++) { - cm->counts.seg.tree_total[i] += counts->seg.tree_total[i]; - cm->counts.seg.tree_mispred[i] += counts->seg.tree_mispred[i]; - } -#endif -} diff --git a/libs/libvpx/vp10/common/thread_common.h b/libs/libvpx/vp10/common/thread_common.h deleted file mode 100644 index a401ddcb2e..0000000000 --- a/libs/libvpx/vp10/common/thread_common.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_ -#define VP10_COMMON_LOOPFILTER_THREAD_H_ -#include "./vpx_config.h" -#include "vp10/common/loopfilter.h" -#include "vpx_util/vpx_thread.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Common; -struct FRAME_COUNTS; - -// Loopfilter row synchronization -typedef struct VP9LfSyncData { -#if CONFIG_MULTITHREAD - pthread_mutex_t *mutex_; - pthread_cond_t *cond_; -#endif - // Allocate memory to store the loop-filtered superblock index in each row. - int *cur_sb_col; - // The optimal sync_range for different resolution and platform should be - // determined by testing. Currently, it is chosen to be a power-of-2 number. - int sync_range; - int rows; - - // Row-based parallel loopfilter data - LFWorkerData *lfdata; - int num_workers; -} VP9LfSync; - -// Allocate memory for loopfilter row synchronization. -void vp10_loop_filter_alloc(VP9LfSync *lf_sync, struct VP10Common *cm, int rows, - int width, int num_workers); - -// Deallocate loopfilter synchronization related mutex and data. -void vp10_loop_filter_dealloc(VP9LfSync *lf_sync); - -// Multi-threaded loopfilter that uses the tile threads. -void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, - struct VP10Common *cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int frame_filter_level, - int y_only, int partial_frame, - VPxWorker *workers, int num_workers, - VP9LfSync *lf_sync); - -void vp10_accumulate_frame_counts(struct VP10Common *cm, - struct FRAME_COUNTS *counts, int is_dec); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_LOOPFILTER_THREAD_H_ diff --git a/libs/libvpx/vp10/common/tile_common.c b/libs/libvpx/vp10/common/tile_common.c deleted file mode 100644 index 4d92b4c6bf..0000000000 --- a/libs/libvpx/vp10/common/tile_common.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/tile_common.h" -#include "vp10/common/onyxc_int.h" -#include "vpx_dsp/vpx_dsp_common.h" - -#define MIN_TILE_WIDTH_B64 4 -#define MAX_TILE_WIDTH_B64 64 - -static int get_tile_offset(int idx, int mis, int log2) { - const int sb_cols = mi_cols_aligned_to_sb(mis) >> MI_BLOCK_SIZE_LOG2; - const int offset = ((idx * sb_cols) >> log2) << MI_BLOCK_SIZE_LOG2; - return VPXMIN(offset, mis); -} - -void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) { - tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows); - tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows); -} - -void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) { - tile->mi_col_start = get_tile_offset(col, cm->mi_cols, cm->log2_tile_cols); - tile->mi_col_end = get_tile_offset(col + 1, cm->mi_cols, cm->log2_tile_cols); -} - -void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) { - vp10_tile_set_row(tile, cm, row); - vp10_tile_set_col(tile, cm, col); -} - -static int get_min_log2_tile_cols(const int sb64_cols) { - int min_log2 = 0; - while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols) - ++min_log2; - return min_log2; -} - -static int get_max_log2_tile_cols(const int sb64_cols) { - int max_log2 = 1; - while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64) - ++max_log2; - return max_log2 - 1; -} - -void vp10_get_tile_n_bits(int mi_cols, - int *min_log2_tile_cols, int *max_log2_tile_cols) { - const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2; - *min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols); - *max_log2_tile_cols = get_max_log2_tile_cols(sb64_cols); - assert(*min_log2_tile_cols <= *max_log2_tile_cols); -} diff --git a/libs/libvpx/vp10/common/tile_common.h b/libs/libvpx/vp10/common/tile_common.h deleted file mode 100644 index 09cf060d8a..0000000000 --- a/libs/libvpx/vp10/common/tile_common.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_TILE_COMMON_H_ -#define VP10_COMMON_TILE_COMMON_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Common; - -typedef struct TileInfo { - int mi_row_start, mi_row_end; - int mi_col_start, mi_col_end; -} TileInfo; - -// initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on -// 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)' -void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, - int row, int col); - -void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row); -void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col); - -void vp10_get_tile_n_bits(int mi_cols, - int *min_log2_tile_cols, int *max_log2_tile_cols); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_COMMON_TILE_COMMON_H_ diff --git a/libs/libvpx/vp10/common/vp10_fwd_txfm.c b/libs/libvpx/vp10/common/vp10_fwd_txfm.c deleted file mode 100644 index 3211cd0828..0000000000 --- a/libs/libvpx/vp10/common/vp10_fwd_txfm.c +++ /dev/null @@ -1,824 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/common/vp10_fwd_txfm.h" - -void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { - // The 2D transform is done with two passes which are actually pretty - // similar. In the first one, we transform the columns and transpose - // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we transpose the columns (that - // is the transposed rows) and transpose the results (so that it goes back - // in normal/row positions). - int pass; - // We need an intermediate buffer between passes. - tran_low_t intermediate[4 * 4]; - const int16_t *in_pass0 = input; - const tran_low_t *in = NULL; - tran_low_t *out = intermediate; - // Do the two transform/transpose passes - for (pass = 0; pass < 2; ++pass) { - tran_high_t input[4]; // canbe16 - tran_high_t step[4]; // canbe16 - tran_high_t temp1, temp2; // needs32 - int i; - for (i = 0; i < 4; ++i) { - // Load inputs. - if (0 == pass) { - input[0] = in_pass0[0 * stride] * 16; - input[1] = in_pass0[1 * stride] * 16; - input[2] = in_pass0[2 * stride] * 16; - input[3] = in_pass0[3 * stride] * 16; - if (i == 0 && input[0]) { - input[0] += 1; - } - } else { - input[0] = in[0 * 4]; - input[1] = in[1 * 4]; - input[2] = in[2 * 4]; - input[3] = in[3 * 4]; - } - // Transform. - step[0] = input[0] + input[3]; - step[1] = input[1] + input[2]; - step[2] = input[1] - input[2]; - step[3] = input[0] - input[3]; - temp1 = (step[0] + step[1]) * cospi_16_64; - temp2 = (step[0] - step[1]) * cospi_16_64; - out[0] = (tran_low_t)fdct_round_shift(temp1); - out[2] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; - temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; - out[1] = (tran_low_t)fdct_round_shift(temp1); - out[3] = (tran_low_t)fdct_round_shift(temp2); - // Do next column (which is a transposed row in second/horizontal pass) - in_pass0++; - in++; - out += 4; - } - // Setup in/out for next pass. - in = intermediate; - out = output; - } - - { - int i, j; - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - output[j + i * 4] = (output[j + i * 4] + 1) >> 2; - } - } -} - -void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) { - int r, c; - tran_low_t sum = 0; - for (r = 0; r < 4; ++r) - for (c = 0; c < 4; ++c) - sum += input[r * stride + c]; - - output[0] = sum << 1; - output[1] = 0; -} - -void vp10_fdct8x8_c(const int16_t *input, - tran_low_t *final_output, int stride) { - int i, j; - tran_low_t intermediate[64]; - int pass; - tran_low_t *output = intermediate; - const tran_low_t *in = NULL; - - // Transform columns - for (pass = 0; pass < 2; ++pass) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16 - tran_high_t t0, t1, t2, t3; // needs32 - tran_high_t x0, x1, x2, x3; // canbe16 - - int i; - for (i = 0; i < 8; i++) { - // stage 1 - if (pass == 0) { - s0 = (input[0 * stride] + input[7 * stride]) * 4; - s1 = (input[1 * stride] + input[6 * stride]) * 4; - s2 = (input[2 * stride] + input[5 * stride]) * 4; - s3 = (input[3 * stride] + input[4 * stride]) * 4; - s4 = (input[3 * stride] - input[4 * stride]) * 4; - s5 = (input[2 * stride] - input[5 * stride]) * 4; - s6 = (input[1 * stride] - input[6 * stride]) * 4; - s7 = (input[0 * stride] - input[7 * stride]) * 4; - ++input; - } else { - s0 = in[0 * 8] + in[7 * 8]; - s1 = in[1 * 8] + in[6 * 8]; - s2 = in[2 * 8] + in[5 * 8]; - s3 = in[3 * 8] + in[4 * 8]; - s4 = in[3 * 8] - in[4 * 8]; - s5 = in[2 * 8] - in[5 * 8]; - s6 = in[1 * 8] - in[6 * 8]; - s7 = in[0 * 8] - in[7 * 8]; - ++in; - } - - // fdct4(step, step); - x0 = s0 + s3; - x1 = s1 + s2; - x2 = s1 - s2; - x3 = s0 - s3; - t0 = (x0 + x1) * cospi_16_64; - t1 = (x0 - x1) * cospi_16_64; - t2 = x2 * cospi_24_64 + x3 * cospi_8_64; - t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; - output[0] = (tran_low_t)fdct_round_shift(t0); - output[2] = (tran_low_t)fdct_round_shift(t2); - output[4] = (tran_low_t)fdct_round_shift(t1); - output[6] = (tran_low_t)fdct_round_shift(t3); - - // Stage 2 - t0 = (s6 - s5) * cospi_16_64; - t1 = (s6 + s5) * cospi_16_64; - t2 = fdct_round_shift(t0); - t3 = fdct_round_shift(t1); - - // Stage 3 - x0 = s4 + t2; - x1 = s4 - t2; - x2 = s7 - t3; - x3 = s7 + t3; - - // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; - t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - output[1] = (tran_low_t)fdct_round_shift(t0); - output[3] = (tran_low_t)fdct_round_shift(t2); - output[5] = (tran_low_t)fdct_round_shift(t1); - output[7] = (tran_low_t)fdct_round_shift(t3); - output += 8; - } - in = intermediate; - output = final_output; - } - - // Rows - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - final_output[j + i * 8] /= 2; - } -} - -void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) { - int r, c; - tran_low_t sum = 0; - for (r = 0; r < 8; ++r) - for (c = 0; c < 8; ++c) - sum += input[r * stride + c]; - - output[0] = sum; - output[1] = 0; -} - -void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { - // The 2D transform is done with two passes which are actually pretty - // similar. In the first one, we transform the columns and transpose - // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we transpose the columns (that - // is the transposed rows) and transpose the results (so that it goes back - // in normal/row positions). - int pass; - // We need an intermediate buffer between passes. - tran_low_t intermediate[256]; - const int16_t *in_pass0 = input; - const tran_low_t *in = NULL; - tran_low_t *out = intermediate; - // Do the two transform/transpose passes - for (pass = 0; pass < 2; ++pass) { - tran_high_t step1[8]; // canbe16 - tran_high_t step2[8]; // canbe16 - tran_high_t step3[8]; // canbe16 - tran_high_t input[8]; // canbe16 - tran_high_t temp1, temp2; // needs32 - int i; - for (i = 0; i < 16; i++) { - if (0 == pass) { - // Calculate input for the first 8 results. - input[0] = (in_pass0[0 * stride] + in_pass0[15 * stride]) * 4; - input[1] = (in_pass0[1 * stride] + in_pass0[14 * stride]) * 4; - input[2] = (in_pass0[2 * stride] + in_pass0[13 * stride]) * 4; - input[3] = (in_pass0[3 * stride] + in_pass0[12 * stride]) * 4; - input[4] = (in_pass0[4 * stride] + in_pass0[11 * stride]) * 4; - input[5] = (in_pass0[5 * stride] + in_pass0[10 * stride]) * 4; - input[6] = (in_pass0[6 * stride] + in_pass0[ 9 * stride]) * 4; - input[7] = (in_pass0[7 * stride] + in_pass0[ 8 * stride]) * 4; - // Calculate input for the next 8 results. - step1[0] = (in_pass0[7 * stride] - in_pass0[ 8 * stride]) * 4; - step1[1] = (in_pass0[6 * stride] - in_pass0[ 9 * stride]) * 4; - step1[2] = (in_pass0[5 * stride] - in_pass0[10 * stride]) * 4; - step1[3] = (in_pass0[4 * stride] - in_pass0[11 * stride]) * 4; - step1[4] = (in_pass0[3 * stride] - in_pass0[12 * stride]) * 4; - step1[5] = (in_pass0[2 * stride] - in_pass0[13 * stride]) * 4; - step1[6] = (in_pass0[1 * stride] - in_pass0[14 * stride]) * 4; - step1[7] = (in_pass0[0 * stride] - in_pass0[15 * stride]) * 4; - } else { - // Calculate input for the first 8 results. - input[0] = ((in[0 * 16] + 1) >> 2) + ((in[15 * 16] + 1) >> 2); - input[1] = ((in[1 * 16] + 1) >> 2) + ((in[14 * 16] + 1) >> 2); - input[2] = ((in[2 * 16] + 1) >> 2) + ((in[13 * 16] + 1) >> 2); - input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2); - input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2); - input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2); - input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2); - input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2); - // Calculate input for the next 8 results. - step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2); - step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2); - step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2); - step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2); - step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2); - step1[5] = ((in[2 * 16] + 1) >> 2) - ((in[13 * 16] + 1) >> 2); - step1[6] = ((in[1 * 16] + 1) >> 2) - ((in[14 * 16] + 1) >> 2); - step1[7] = ((in[0 * 16] + 1) >> 2) - ((in[15 * 16] + 1) >> 2); - } - // Work on the first eight values; fdct8(input, even_results); - { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16 - tran_high_t t0, t1, t2, t3; // needs32 - tran_high_t x0, x1, x2, x3; // canbe16 - - // stage 1 - s0 = input[0] + input[7]; - s1 = input[1] + input[6]; - s2 = input[2] + input[5]; - s3 = input[3] + input[4]; - s4 = input[3] - input[4]; - s5 = input[2] - input[5]; - s6 = input[1] - input[6]; - s7 = input[0] - input[7]; - - // fdct4(step, step); - x0 = s0 + s3; - x1 = s1 + s2; - x2 = s1 - s2; - x3 = s0 - s3; - t0 = (x0 + x1) * cospi_16_64; - t1 = (x0 - x1) * cospi_16_64; - t2 = x3 * cospi_8_64 + x2 * cospi_24_64; - t3 = x3 * cospi_24_64 - x2 * cospi_8_64; - out[0] = (tran_low_t)fdct_round_shift(t0); - out[4] = (tran_low_t)fdct_round_shift(t2); - out[8] = (tran_low_t)fdct_round_shift(t1); - out[12] = (tran_low_t)fdct_round_shift(t3); - - // Stage 2 - t0 = (s6 - s5) * cospi_16_64; - t1 = (s6 + s5) * cospi_16_64; - t2 = fdct_round_shift(t0); - t3 = fdct_round_shift(t1); - - // Stage 3 - x0 = s4 + t2; - x1 = s4 - t2; - x2 = s7 - t3; - x3 = s7 + t3; - - // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; - t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - out[2] = (tran_low_t)fdct_round_shift(t0); - out[6] = (tran_low_t)fdct_round_shift(t2); - out[10] = (tran_low_t)fdct_round_shift(t1); - out[14] = (tran_low_t)fdct_round_shift(t3); - } - // Work on the next eight values; step1 -> odd_results - { - // step 2 - temp1 = (step1[5] - step1[2]) * cospi_16_64; - temp2 = (step1[4] - step1[3]) * cospi_16_64; - step2[2] = fdct_round_shift(temp1); - step2[3] = fdct_round_shift(temp2); - temp1 = (step1[4] + step1[3]) * cospi_16_64; - temp2 = (step1[5] + step1[2]) * cospi_16_64; - step2[4] = fdct_round_shift(temp1); - step2[5] = fdct_round_shift(temp2); - // step 3 - step3[0] = step1[0] + step2[3]; - step3[1] = step1[1] + step2[2]; - step3[2] = step1[1] - step2[2]; - step3[3] = step1[0] - step2[3]; - step3[4] = step1[7] - step2[4]; - step3[5] = step1[6] - step2[5]; - step3[6] = step1[6] + step2[5]; - step3[7] = step1[7] + step2[4]; - // step 4 - temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; - temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; - step2[1] = fdct_round_shift(temp1); - step2[2] = fdct_round_shift(temp2); - temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; - temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; - step2[5] = fdct_round_shift(temp1); - step2[6] = fdct_round_shift(temp2); - // step 5 - step1[0] = step3[0] + step2[1]; - step1[1] = step3[0] - step2[1]; - step1[2] = step3[3] + step2[2]; - step1[3] = step3[3] - step2[2]; - step1[4] = step3[4] - step2[5]; - step1[5] = step3[4] + step2[5]; - step1[6] = step3[7] - step2[6]; - step1[7] = step3[7] + step2[6]; - // step 6 - temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; - temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64; - out[1] = (tran_low_t)fdct_round_shift(temp1); - out[9] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64; - temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; - out[5] = (tran_low_t)fdct_round_shift(temp1); - out[13] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; - temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64; - out[3] = (tran_low_t)fdct_round_shift(temp1); - out[11] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64; - temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; - out[7] = (tran_low_t)fdct_round_shift(temp1); - out[15] = (tran_low_t)fdct_round_shift(temp2); - } - // Do next column (which is a transposed row in second/horizontal pass) - in++; - in_pass0++; - out += 16; - } - // Setup in/out for next pass. - in = intermediate; - out = output; - } -} - -void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) { - int r, c; - tran_low_t sum = 0; - for (r = 0; r < 16; ++r) - for (c = 0; c < 16; ++c) - sum += input[r * stride + c]; - - output[0] = sum >> 1; - output[1] = 0; -} - -static INLINE tran_high_t dct_32_round(tran_high_t input) { - tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - // TODO(debargha, peter.derivaz): Find new bounds for this assert, - // and make the bounds consts. - // assert(-131072 <= rv && rv <= 131071); - return rv; -} - -static INLINE tran_high_t half_round_shift(tran_high_t input) { - tran_high_t rv = (input + 1 + (input < 0)) >> 2; - return rv; -} - -void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) { - tran_high_t step[32]; - // Stage 1 - step[0] = input[0] + input[(32 - 1)]; - step[1] = input[1] + input[(32 - 2)]; - step[2] = input[2] + input[(32 - 3)]; - step[3] = input[3] + input[(32 - 4)]; - step[4] = input[4] + input[(32 - 5)]; - step[5] = input[5] + input[(32 - 6)]; - step[6] = input[6] + input[(32 - 7)]; - step[7] = input[7] + input[(32 - 8)]; - step[8] = input[8] + input[(32 - 9)]; - step[9] = input[9] + input[(32 - 10)]; - step[10] = input[10] + input[(32 - 11)]; - step[11] = input[11] + input[(32 - 12)]; - step[12] = input[12] + input[(32 - 13)]; - step[13] = input[13] + input[(32 - 14)]; - step[14] = input[14] + input[(32 - 15)]; - step[15] = input[15] + input[(32 - 16)]; - step[16] = -input[16] + input[(32 - 17)]; - step[17] = -input[17] + input[(32 - 18)]; - step[18] = -input[18] + input[(32 - 19)]; - step[19] = -input[19] + input[(32 - 20)]; - step[20] = -input[20] + input[(32 - 21)]; - step[21] = -input[21] + input[(32 - 22)]; - step[22] = -input[22] + input[(32 - 23)]; - step[23] = -input[23] + input[(32 - 24)]; - step[24] = -input[24] + input[(32 - 25)]; - step[25] = -input[25] + input[(32 - 26)]; - step[26] = -input[26] + input[(32 - 27)]; - step[27] = -input[27] + input[(32 - 28)]; - step[28] = -input[28] + input[(32 - 29)]; - step[29] = -input[29] + input[(32 - 30)]; - step[30] = -input[30] + input[(32 - 31)]; - step[31] = -input[31] + input[(32 - 32)]; - - // Stage 2 - output[0] = step[0] + step[16 - 1]; - output[1] = step[1] + step[16 - 2]; - output[2] = step[2] + step[16 - 3]; - output[3] = step[3] + step[16 - 4]; - output[4] = step[4] + step[16 - 5]; - output[5] = step[5] + step[16 - 6]; - output[6] = step[6] + step[16 - 7]; - output[7] = step[7] + step[16 - 8]; - output[8] = -step[8] + step[16 - 9]; - output[9] = -step[9] + step[16 - 10]; - output[10] = -step[10] + step[16 - 11]; - output[11] = -step[11] + step[16 - 12]; - output[12] = -step[12] + step[16 - 13]; - output[13] = -step[13] + step[16 - 14]; - output[14] = -step[14] + step[16 - 15]; - output[15] = -step[15] + step[16 - 16]; - - output[16] = step[16]; - output[17] = step[17]; - output[18] = step[18]; - output[19] = step[19]; - - output[20] = dct_32_round((-step[20] + step[27]) * cospi_16_64); - output[21] = dct_32_round((-step[21] + step[26]) * cospi_16_64); - output[22] = dct_32_round((-step[22] + step[25]) * cospi_16_64); - output[23] = dct_32_round((-step[23] + step[24]) * cospi_16_64); - - output[24] = dct_32_round((step[24] + step[23]) * cospi_16_64); - output[25] = dct_32_round((step[25] + step[22]) * cospi_16_64); - output[26] = dct_32_round((step[26] + step[21]) * cospi_16_64); - output[27] = dct_32_round((step[27] + step[20]) * cospi_16_64); - - output[28] = step[28]; - output[29] = step[29]; - output[30] = step[30]; - output[31] = step[31]; - - // dump the magnitude by 4, hence the intermediate values are within - // the range of 16 bits. - if (round) { - output[0] = half_round_shift(output[0]); - output[1] = half_round_shift(output[1]); - output[2] = half_round_shift(output[2]); - output[3] = half_round_shift(output[3]); - output[4] = half_round_shift(output[4]); - output[5] = half_round_shift(output[5]); - output[6] = half_round_shift(output[6]); - output[7] = half_round_shift(output[7]); - output[8] = half_round_shift(output[8]); - output[9] = half_round_shift(output[9]); - output[10] = half_round_shift(output[10]); - output[11] = half_round_shift(output[11]); - output[12] = half_round_shift(output[12]); - output[13] = half_round_shift(output[13]); - output[14] = half_round_shift(output[14]); - output[15] = half_round_shift(output[15]); - - output[16] = half_round_shift(output[16]); - output[17] = half_round_shift(output[17]); - output[18] = half_round_shift(output[18]); - output[19] = half_round_shift(output[19]); - output[20] = half_round_shift(output[20]); - output[21] = half_round_shift(output[21]); - output[22] = half_round_shift(output[22]); - output[23] = half_round_shift(output[23]); - output[24] = half_round_shift(output[24]); - output[25] = half_round_shift(output[25]); - output[26] = half_round_shift(output[26]); - output[27] = half_round_shift(output[27]); - output[28] = half_round_shift(output[28]); - output[29] = half_round_shift(output[29]); - output[30] = half_round_shift(output[30]); - output[31] = half_round_shift(output[31]); - } - - // Stage 3 - step[0] = output[0] + output[(8 - 1)]; - step[1] = output[1] + output[(8 - 2)]; - step[2] = output[2] + output[(8 - 3)]; - step[3] = output[3] + output[(8 - 4)]; - step[4] = -output[4] + output[(8 - 5)]; - step[5] = -output[5] + output[(8 - 6)]; - step[6] = -output[6] + output[(8 - 7)]; - step[7] = -output[7] + output[(8 - 8)]; - step[8] = output[8]; - step[9] = output[9]; - step[10] = dct_32_round((-output[10] + output[13]) * cospi_16_64); - step[11] = dct_32_round((-output[11] + output[12]) * cospi_16_64); - step[12] = dct_32_round((output[12] + output[11]) * cospi_16_64); - step[13] = dct_32_round((output[13] + output[10]) * cospi_16_64); - step[14] = output[14]; - step[15] = output[15]; - - step[16] = output[16] + output[23]; - step[17] = output[17] + output[22]; - step[18] = output[18] + output[21]; - step[19] = output[19] + output[20]; - step[20] = -output[20] + output[19]; - step[21] = -output[21] + output[18]; - step[22] = -output[22] + output[17]; - step[23] = -output[23] + output[16]; - step[24] = -output[24] + output[31]; - step[25] = -output[25] + output[30]; - step[26] = -output[26] + output[29]; - step[27] = -output[27] + output[28]; - step[28] = output[28] + output[27]; - step[29] = output[29] + output[26]; - step[30] = output[30] + output[25]; - step[31] = output[31] + output[24]; - - // Stage 4 - output[0] = step[0] + step[3]; - output[1] = step[1] + step[2]; - output[2] = -step[2] + step[1]; - output[3] = -step[3] + step[0]; - output[4] = step[4]; - output[5] = dct_32_round((-step[5] + step[6]) * cospi_16_64); - output[6] = dct_32_round((step[6] + step[5]) * cospi_16_64); - output[7] = step[7]; - output[8] = step[8] + step[11]; - output[9] = step[9] + step[10]; - output[10] = -step[10] + step[9]; - output[11] = -step[11] + step[8]; - output[12] = -step[12] + step[15]; - output[13] = -step[13] + step[14]; - output[14] = step[14] + step[13]; - output[15] = step[15] + step[12]; - - output[16] = step[16]; - output[17] = step[17]; - output[18] = dct_32_round(step[18] * -cospi_8_64 + step[29] * cospi_24_64); - output[19] = dct_32_round(step[19] * -cospi_8_64 + step[28] * cospi_24_64); - output[20] = dct_32_round(step[20] * -cospi_24_64 + step[27] * -cospi_8_64); - output[21] = dct_32_round(step[21] * -cospi_24_64 + step[26] * -cospi_8_64); - output[22] = step[22]; - output[23] = step[23]; - output[24] = step[24]; - output[25] = step[25]; - output[26] = dct_32_round(step[26] * cospi_24_64 + step[21] * -cospi_8_64); - output[27] = dct_32_round(step[27] * cospi_24_64 + step[20] * -cospi_8_64); - output[28] = dct_32_round(step[28] * cospi_8_64 + step[19] * cospi_24_64); - output[29] = dct_32_round(step[29] * cospi_8_64 + step[18] * cospi_24_64); - output[30] = step[30]; - output[31] = step[31]; - - // Stage 5 - step[0] = dct_32_round((output[0] + output[1]) * cospi_16_64); - step[1] = dct_32_round((-output[1] + output[0]) * cospi_16_64); - step[2] = dct_32_round(output[2] * cospi_24_64 + output[3] * cospi_8_64); - step[3] = dct_32_round(output[3] * cospi_24_64 - output[2] * cospi_8_64); - step[4] = output[4] + output[5]; - step[5] = -output[5] + output[4]; - step[6] = -output[6] + output[7]; - step[7] = output[7] + output[6]; - step[8] = output[8]; - step[9] = dct_32_round(output[9] * -cospi_8_64 + output[14] * cospi_24_64); - step[10] = dct_32_round(output[10] * -cospi_24_64 + output[13] * -cospi_8_64); - step[11] = output[11]; - step[12] = output[12]; - step[13] = dct_32_round(output[13] * cospi_24_64 + output[10] * -cospi_8_64); - step[14] = dct_32_round(output[14] * cospi_8_64 + output[9] * cospi_24_64); - step[15] = output[15]; - - step[16] = output[16] + output[19]; - step[17] = output[17] + output[18]; - step[18] = -output[18] + output[17]; - step[19] = -output[19] + output[16]; - step[20] = -output[20] + output[23]; - step[21] = -output[21] + output[22]; - step[22] = output[22] + output[21]; - step[23] = output[23] + output[20]; - step[24] = output[24] + output[27]; - step[25] = output[25] + output[26]; - step[26] = -output[26] + output[25]; - step[27] = -output[27] + output[24]; - step[28] = -output[28] + output[31]; - step[29] = -output[29] + output[30]; - step[30] = output[30] + output[29]; - step[31] = output[31] + output[28]; - - // Stage 6 - output[0] = step[0]; - output[1] = step[1]; - output[2] = step[2]; - output[3] = step[3]; - output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64); - output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64); - output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64); - output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64); - output[8] = step[8] + step[9]; - output[9] = -step[9] + step[8]; - output[10] = -step[10] + step[11]; - output[11] = step[11] + step[10]; - output[12] = step[12] + step[13]; - output[13] = -step[13] + step[12]; - output[14] = -step[14] + step[15]; - output[15] = step[15] + step[14]; - - output[16] = step[16]; - output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64); - output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64); - output[19] = step[19]; - output[20] = step[20]; - output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64); - output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64); - output[23] = step[23]; - output[24] = step[24]; - output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64); - output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64); - output[27] = step[27]; - output[28] = step[28]; - output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64); - output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64); - output[31] = step[31]; - - // Stage 7 - step[0] = output[0]; - step[1] = output[1]; - step[2] = output[2]; - step[3] = output[3]; - step[4] = output[4]; - step[5] = output[5]; - step[6] = output[6]; - step[7] = output[7]; - step[8] = dct_32_round(output[8] * cospi_30_64 + output[15] * cospi_2_64); - step[9] = dct_32_round(output[9] * cospi_14_64 + output[14] * cospi_18_64); - step[10] = dct_32_round(output[10] * cospi_22_64 + output[13] * cospi_10_64); - step[11] = dct_32_round(output[11] * cospi_6_64 + output[12] * cospi_26_64); - step[12] = dct_32_round(output[12] * cospi_6_64 + output[11] * -cospi_26_64); - step[13] = dct_32_round(output[13] * cospi_22_64 + output[10] * -cospi_10_64); - step[14] = dct_32_round(output[14] * cospi_14_64 + output[9] * -cospi_18_64); - step[15] = dct_32_round(output[15] * cospi_30_64 + output[8] * -cospi_2_64); - - step[16] = output[16] + output[17]; - step[17] = -output[17] + output[16]; - step[18] = -output[18] + output[19]; - step[19] = output[19] + output[18]; - step[20] = output[20] + output[21]; - step[21] = -output[21] + output[20]; - step[22] = -output[22] + output[23]; - step[23] = output[23] + output[22]; - step[24] = output[24] + output[25]; - step[25] = -output[25] + output[24]; - step[26] = -output[26] + output[27]; - step[27] = output[27] + output[26]; - step[28] = output[28] + output[29]; - step[29] = -output[29] + output[28]; - step[30] = -output[30] + output[31]; - step[31] = output[31] + output[30]; - - // Final stage --- outputs indices are bit-reversed. - output[0] = step[0]; - output[16] = step[1]; - output[8] = step[2]; - output[24] = step[3]; - output[4] = step[4]; - output[20] = step[5]; - output[12] = step[6]; - output[28] = step[7]; - output[2] = step[8]; - output[18] = step[9]; - output[10] = step[10]; - output[26] = step[11]; - output[6] = step[12]; - output[22] = step[13]; - output[14] = step[14]; - output[30] = step[15]; - - output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64); - output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64); - output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64); - output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64); - output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64); - output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64); - output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64); - output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64); - output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64); - output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64); - output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64); - output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64); - output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64); - output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64); - output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64); - output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64); -} - -void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { - int i, j; - tran_high_t output[32 * 32]; - - // Columns - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = input[j * stride + i] * 4; - vp10_fdct32(temp_in, temp_out, 0); - for (j = 0; j < 32; ++j) - output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; - } - - // Rows - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = output[j + i * 32]; - vp10_fdct32(temp_in, temp_out, 0); - for (j = 0; j < 32; ++j) - out[j + i * 32] = - (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); - } -} - -// Note that although we use dct_32_round in dct32 computation flow, -// this 2d fdct32x32 for rate-distortion optimization loop is operating -// within 16 bits precision. -void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { - int i, j; - tran_high_t output[32 * 32]; - - // Columns - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = input[j * stride + i] * 4; - vp10_fdct32(temp_in, temp_out, 0); - for (j = 0; j < 32; ++j) - // TODO(cd): see quality impact of only doing - // output[j * 32 + i] = (temp_out[j] + 1) >> 2; - // PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c - output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; - } - - // Rows - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = output[j + i * 32]; - vp10_fdct32(temp_in, temp_out, 1); - for (j = 0; j < 32; ++j) - out[j + i * 32] = (tran_low_t)temp_out[j]; - } -} - -void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) { - int r, c; - tran_low_t sum = 0; - for (r = 0; r < 32; ++r) - for (c = 0; c < 32; ++c) - sum += input[r * stride + c]; - - output[0] = sum >> 3; - output[1] = 0; -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output, - int stride) { - vp10_fdct4x4_c(input, output, stride); -} - -void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output, - int stride) { - vp10_fdct8x8_c(input, final_output, stride); -} - -void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output, - int stride) { - vp10_fdct8x8_1_c(input, final_output, stride); -} - -void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output, - int stride) { - vp10_fdct16x16_c(input, output, stride); -} - -void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output, - int stride) { - vp10_fdct16x16_1_c(input, output, stride); -} - -void vp10_highbd_fdct32x32_c(const int16_t *input, - tran_low_t *out, int stride) { - vp10_fdct32x32_c(input, out, stride); -} - -void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, - int stride) { - vp10_fdct32x32_rd_c(input, out, stride); -} - -void vp10_highbd_fdct32x32_1_c(const int16_t *input, - tran_low_t *out, int stride) { - vp10_fdct32x32_1_c(input, out, stride); -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/common/vp10_fwd_txfm.h b/libs/libvpx/vp10/common/vp10_fwd_txfm.h deleted file mode 100644 index 46dbf3dd04..0000000000 --- a/libs/libvpx/vp10/common/vp10_fwd_txfm.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_COMMON_VP10_FWD_TXFM_H_ -#define VP10_COMMON_VP10_FWD_TXFM_H_ - -#include "vpx_dsp/txfm_common.h" -#include "vpx_dsp/fwd_txfm.h" - -void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round); -#endif // VP10_COMMON_VP10_FWD_TXFM_H_ diff --git a/libs/libvpx/vp10/common/vp10_inv_txfm.c b/libs/libvpx/vp10/common/vp10_inv_txfm.c deleted file mode 100644 index 403b209a20..0000000000 --- a/libs/libvpx/vp10/common/vp10_inv_txfm.c +++ /dev/null @@ -1,2499 +0,0 @@ -/* - * - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/common/vp10_inv_txfm.h" - -void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { -/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, - 0.5 shifts per pixel. */ - int i; - tran_low_t output[16]; - tran_high_t a1, b1, c1, d1, e1; - const tran_low_t *ip = input; - tran_low_t *op = output; - - for (i = 0; i < 4; i++) { - a1 = ip[0] >> UNIT_QUANT_SHIFT; - c1 = ip[1] >> UNIT_QUANT_SHIFT; - d1 = ip[2] >> UNIT_QUANT_SHIFT; - b1 = ip[3] >> UNIT_QUANT_SHIFT; - a1 += c1; - d1 -= b1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= b1; - d1 += c1; - op[0] = WRAPLOW(a1, 8); - op[1] = WRAPLOW(b1, 8); - op[2] = WRAPLOW(c1, 8); - op[3] = WRAPLOW(d1, 8); - ip += 4; - op += 4; - } - - ip = output; - for (i = 0; i < 4; i++) { - a1 = ip[4 * 0]; - c1 = ip[4 * 1]; - d1 = ip[4 * 2]; - b1 = ip[4 * 3]; - a1 += c1; - d1 -= b1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= b1; - d1 += c1; - dest[stride * 0] = clip_pixel_add(dest[stride * 0], a1); - dest[stride * 1] = clip_pixel_add(dest[stride * 1], b1); - dest[stride * 2] = clip_pixel_add(dest[stride * 2], c1); - dest[stride * 3] = clip_pixel_add(dest[stride * 3], d1); - - ip++; - dest++; - } -} - -void vp10_iwht4x4_1_add_c(const tran_low_t *in, - uint8_t *dest, - int dest_stride) { - int i; - tran_high_t a1, e1; - tran_low_t tmp[4]; - const tran_low_t *ip = in; - tran_low_t *op = tmp; - - a1 = ip[0] >> UNIT_QUANT_SHIFT; - e1 = a1 >> 1; - a1 -= e1; - op[0] = WRAPLOW(a1, 8); - op[1] = op[2] = op[3] = WRAPLOW(e1, 8); - - ip = tmp; - for (i = 0; i < 4; i++) { - e1 = ip[0] >> 1; - a1 = ip[0] - e1; - dest[dest_stride * 0] = clip_pixel_add(dest[dest_stride * 0], a1); - dest[dest_stride * 1] = clip_pixel_add(dest[dest_stride * 1], e1); - dest[dest_stride * 2] = clip_pixel_add(dest[dest_stride * 2], e1); - dest[dest_stride * 3] = clip_pixel_add(dest[dest_stride * 3], e1); - ip++; - dest++; - } -} - -void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) { - tran_low_t step[4]; - tran_high_t temp1, temp2; - // stage 1 - temp1 = (input[0] + input[2]) * cospi_16_64; - temp2 = (input[0] - input[2]) * cospi_16_64; - step[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step[1] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; - temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; - step[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - - // stage 2 - output[0] = WRAPLOW(step[0] + step[3], 8); - output[1] = WRAPLOW(step[1] + step[2], 8); - output[2] = WRAPLOW(step[1] - step[2], 8); - output[3] = WRAPLOW(step[0] - step[3], 8); -} - -void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - tran_low_t out[4 * 4]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[4], temp_out[4]; - - // Rows - for (i = 0; i < 4; ++i) { - vp10_idct4_c(input, outptr); - input += 4; - outptr += 4; - } - - // Columns - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - vp10_idct4_c(temp_in, temp_out); - for (j = 0; j < 4; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 4)); - } - } -} - -void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, - int dest_stride) { - int i; - tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); - a1 = ROUND_POWER_OF_TWO(out, 4); - - for (i = 0; i < 4; i++) { - dest[0] = clip_pixel_add(dest[0], a1); - dest[1] = clip_pixel_add(dest[1], a1); - dest[2] = clip_pixel_add(dest[2], a1); - dest[3] = clip_pixel_add(dest[3], a1); - dest += dest_stride; - } -} - -void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) { - tran_low_t step1[8], step2[8]; - tran_high_t temp1, temp2; - // stage 1 - step1[0] = input[0]; - step1[2] = input[4]; - step1[1] = input[2]; - step1[3] = input[6]; - temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; - temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; - temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - - // stage 2 - temp1 = (step1[0] + step1[2]) * cospi_16_64; - temp2 = (step1[0] - step1[2]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; - temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); - - // stage 3 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[7] = step2[7]; - - // stage 4 - output[0] = WRAPLOW(step1[0] + step1[7], 8); - output[1] = WRAPLOW(step1[1] + step1[6], 8); - output[2] = WRAPLOW(step1[2] + step1[5], 8); - output[3] = WRAPLOW(step1[3] + step1[4], 8); - output[4] = WRAPLOW(step1[3] - step1[4], 8); - output[5] = WRAPLOW(step1[2] - step1[5], 8); - output[6] = WRAPLOW(step1[1] - step1[6], 8); - output[7] = WRAPLOW(step1[0] - step1[7], 8); -} - -void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - tran_low_t out[8 * 8]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[8], temp_out[8]; - - // First transform rows - for (i = 0; i < 8; ++i) { - vp10_idct8_c(input, outptr); - input += 8; - outptr += 8; - } - - // Then transform columns - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_idct8_c(temp_in, temp_out); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 5)); - } - } -} - -void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - int i, j; - tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); - a1 = ROUND_POWER_OF_TWO(out, 5); - for (j = 0; j < 8; ++j) { - for (i = 0; i < 8; ++i) - dest[i] = clip_pixel_add(dest[i], a1); - dest += stride; - } -} - -void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; - - tran_low_t x0 = input[0]; - tran_low_t x1 = input[1]; - tran_low_t x2 = input[2]; - tran_low_t x3 = input[3]; - - if (!(x0 | x1 | x2 | x3)) { - output[0] = output[1] = output[2] = output[3] = 0; - return; - } - - s0 = sinpi_1_9 * x0; - s1 = sinpi_2_9 * x0; - s2 = sinpi_3_9 * x1; - s3 = sinpi_4_9 * x2; - s4 = sinpi_1_9 * x2; - s5 = sinpi_2_9 * x3; - s6 = sinpi_4_9 * x3; - s7 = x0 - x2 + x3; - - s0 = s0 + s3 + s5; - s1 = s1 - s4 - s6; - s3 = s2; - s2 = sinpi_3_9 * s7; - - // 1-D transform scaling factor is sqrt(2). - // The overall dynamic range is 14b (input) + 14b (multiplication scaling) - // + 1b (addition) = 29b. - // Hence the output bit depth is 15b. - output[0] = WRAPLOW(dct_const_round_shift(s0 + s3), 8); - output[1] = WRAPLOW(dct_const_round_shift(s1 + s3), 8); - output[2] = WRAPLOW(dct_const_round_shift(s2), 8); - output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3), 8); -} - -void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) { - int s0, s1, s2, s3, s4, s5, s6, s7; - - tran_high_t x0 = input[7]; - tran_high_t x1 = input[0]; - tran_high_t x2 = input[5]; - tran_high_t x3 = input[2]; - tran_high_t x4 = input[3]; - tran_high_t x5 = input[4]; - tran_high_t x6 = input[1]; - tran_high_t x7 = input[6]; - - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = 0; - return; - } - - // stage 1 - s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1); - s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1); - s2 = (int)(cospi_10_64 * x2 + cospi_22_64 * x3); - s3 = (int)(cospi_22_64 * x2 - cospi_10_64 * x3); - s4 = (int)(cospi_18_64 * x4 + cospi_14_64 * x5); - s5 = (int)(cospi_14_64 * x4 - cospi_18_64 * x5); - s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7); - s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7); - - x0 = WRAPLOW(dct_const_round_shift(s0 + s4), 8); - x1 = WRAPLOW(dct_const_round_shift(s1 + s5), 8); - x2 = WRAPLOW(dct_const_round_shift(s2 + s6), 8); - x3 = WRAPLOW(dct_const_round_shift(s3 + s7), 8); - x4 = WRAPLOW(dct_const_round_shift(s0 - s4), 8); - x5 = WRAPLOW(dct_const_round_shift(s1 - s5), 8); - x6 = WRAPLOW(dct_const_round_shift(s2 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s3 - s7), 8); - - // stage 2 - s0 = (int)x0; - s1 = (int)x1; - s2 = (int)x2; - s3 = (int)x3; - s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); - s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); - s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); - s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); - - x0 = WRAPLOW(s0 + s2, 8); - x1 = WRAPLOW(s1 + s3, 8); - x2 = WRAPLOW(s0 - s2, 8); - x3 = WRAPLOW(s1 - s3, 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s6), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s7), 8); - x6 = WRAPLOW(dct_const_round_shift(s4 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s5 - s7), 8); - - // stage 3 - s2 = (int)(cospi_16_64 * (x2 + x3)); - s3 = (int)(cospi_16_64 * (x2 - x3)); - s6 = (int)(cospi_16_64 * (x6 + x7)); - s7 = (int)(cospi_16_64 * (x6 - x7)); - - x2 = WRAPLOW(dct_const_round_shift(s2), 8); - x3 = WRAPLOW(dct_const_round_shift(s3), 8); - x6 = WRAPLOW(dct_const_round_shift(s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s7), 8); - - output[0] = WRAPLOW(x0, 8); - output[1] = WRAPLOW(-x4, 8); - output[2] = WRAPLOW(x6, 8); - output[3] = WRAPLOW(-x2, 8); - output[4] = WRAPLOW(x3, 8); - output[5] = WRAPLOW(-x7, 8); - output[6] = WRAPLOW(x5, 8); - output[7] = WRAPLOW(-x1, 8); -} - -void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - tran_low_t out[8 * 8] = { 0 }; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[8], temp_out[8]; - - // First transform rows - // only first 4 row has non-zero coefs - for (i = 0; i < 4; ++i) { - vp10_idct8_c(input, outptr); - input += 8; - outptr += 8; - } - - // Then transform columns - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_idct8_c(temp_in, temp_out); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 5)); - } - } -} - -void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) { - tran_low_t step1[16], step2[16]; - tran_high_t temp1, temp2; - - // stage 1 - step1[0] = input[0/2]; - step1[1] = input[16/2]; - step1[2] = input[8/2]; - step1[3] = input[24/2]; - step1[4] = input[4/2]; - step1[5] = input[20/2]; - step1[6] = input[12/2]; - step1[7] = input[28/2]; - step1[8] = input[2/2]; - step1[9] = input[18/2]; - step1[10] = input[10/2]; - step1[11] = input[26/2]; - step1[12] = input[6/2]; - step1[13] = input[22/2]; - step1[14] = input[14/2]; - step1[15] = input[30/2]; - - // stage 2 - step2[0] = step1[0]; - step2[1] = step1[1]; - step2[2] = step1[2]; - step2[3] = step1[3]; - step2[4] = step1[4]; - step2[5] = step1[5]; - step2[6] = step1[6]; - step2[7] = step1[7]; - - temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; - temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[15] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; - temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; - temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; - temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); - - // stage 3 - step1[0] = step2[0]; - step1[1] = step2[1]; - step1[2] = step2[2]; - step1[3] = step2[3]; - - temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; - temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; - temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - - step1[8] = WRAPLOW(step2[8] + step2[9], 8); - step1[9] = WRAPLOW(step2[8] - step2[9], 8); - step1[10] = WRAPLOW(-step2[10] + step2[11], 8); - step1[11] = WRAPLOW(step2[10] + step2[11], 8); - step1[12] = WRAPLOW(step2[12] + step2[13], 8); - step1[13] = WRAPLOW(step2[12] - step2[13], 8); - step1[14] = WRAPLOW(-step2[14] + step2[15], 8); - step1[15] = WRAPLOW(step2[14] + step2[15], 8); - - // stage 4 - temp1 = (step1[0] + step1[1]) * cospi_16_64; - temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; - temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); - - step2[8] = step1[8]; - step2[15] = step1[15]; - temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; - temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; - temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[11] = step1[11]; - step2[12] = step1[12]; - - // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[7] = step2[7]; - - step1[8] = WRAPLOW(step2[8] + step2[11], 8); - step1[9] = WRAPLOW(step2[9] + step2[10], 8); - step1[10] = WRAPLOW(step2[9] - step2[10], 8); - step1[11] = WRAPLOW(step2[8] - step2[11], 8); - step1[12] = WRAPLOW(-step2[12] + step2[15], 8); - step1[13] = WRAPLOW(-step2[13] + step2[14], 8); - step1[14] = WRAPLOW(step2[13] + step2[14], 8); - step1[15] = WRAPLOW(step2[12] + step2[15], 8); - - // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], 8); - step2[1] = WRAPLOW(step1[1] + step1[6], 8); - step2[2] = WRAPLOW(step1[2] + step1[5], 8); - step2[3] = WRAPLOW(step1[3] + step1[4], 8); - step2[4] = WRAPLOW(step1[3] - step1[4], 8); - step2[5] = WRAPLOW(step1[2] - step1[5], 8); - step2[6] = WRAPLOW(step1[1] - step1[6], 8); - step2[7] = WRAPLOW(step1[0] - step1[7], 8); - step2[8] = step1[8]; - step2[9] = step1[9]; - temp1 = (-step1[10] + step1[13]) * cospi_16_64; - temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = (-step1[11] + step1[12]) * cospi_16_64; - temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[14] = step1[14]; - step2[15] = step1[15]; - - // stage 7 - output[0] = WRAPLOW(step2[0] + step2[15], 8); - output[1] = WRAPLOW(step2[1] + step2[14], 8); - output[2] = WRAPLOW(step2[2] + step2[13], 8); - output[3] = WRAPLOW(step2[3] + step2[12], 8); - output[4] = WRAPLOW(step2[4] + step2[11], 8); - output[5] = WRAPLOW(step2[5] + step2[10], 8); - output[6] = WRAPLOW(step2[6] + step2[9], 8); - output[7] = WRAPLOW(step2[7] + step2[8], 8); - output[8] = WRAPLOW(step2[7] - step2[8], 8); - output[9] = WRAPLOW(step2[6] - step2[9], 8); - output[10] = WRAPLOW(step2[5] - step2[10], 8); - output[11] = WRAPLOW(step2[4] - step2[11], 8); - output[12] = WRAPLOW(step2[3] - step2[12], 8); - output[13] = WRAPLOW(step2[2] - step2[13], 8); - output[14] = WRAPLOW(step2[1] - step2[14], 8); - output[15] = WRAPLOW(step2[0] - step2[15], 8); -} - -void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, - int stride) { - tran_low_t out[16 * 16]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[16], temp_out[16]; - - // First transform rows - for (i = 0; i < 16; ++i) { - vp10_idct16_c(input, outptr); - input += 16; - outptr += 16; - } - - // Then transform columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - vp10_idct16_c(temp_in, temp_out); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6)); - } - } -} - -void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8; - tran_high_t s9, s10, s11, s12, s13, s14, s15; - - tran_high_t x0 = input[15]; - tran_high_t x1 = input[0]; - tran_high_t x2 = input[13]; - tran_high_t x3 = input[2]; - tran_high_t x4 = input[11]; - tran_high_t x5 = input[4]; - tran_high_t x6 = input[9]; - tran_high_t x7 = input[6]; - tran_high_t x8 = input[7]; - tran_high_t x9 = input[8]; - tran_high_t x10 = input[5]; - tran_high_t x11 = input[10]; - tran_high_t x12 = input[3]; - tran_high_t x13 = input[12]; - tran_high_t x14 = input[1]; - tran_high_t x15 = input[14]; - - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 - | x9 | x10 | x11 | x12 | x13 | x14 | x15)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = output[8] - = output[9] = output[10] = output[11] = output[12] - = output[13] = output[14] = output[15] = 0; - return; - } - - // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; - s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; - s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; - s5 = x4 * cospi_23_64 - x5 * cospi_9_64; - s6 = x6 * cospi_13_64 + x7 * cospi_19_64; - s7 = x6 * cospi_19_64 - x7 * cospi_13_64; - s8 = x8 * cospi_17_64 + x9 * cospi_15_64; - s9 = x8 * cospi_15_64 - x9 * cospi_17_64; - s10 = x10 * cospi_21_64 + x11 * cospi_11_64; - s11 = x10 * cospi_11_64 - x11 * cospi_21_64; - s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; - s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - - x0 = WRAPLOW(dct_const_round_shift(s0 + s8), 8); - x1 = WRAPLOW(dct_const_round_shift(s1 + s9), 8); - x2 = WRAPLOW(dct_const_round_shift(s2 + s10), 8); - x3 = WRAPLOW(dct_const_round_shift(s3 + s11), 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s12), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s13), 8); - x6 = WRAPLOW(dct_const_round_shift(s6 + s14), 8); - x7 = WRAPLOW(dct_const_round_shift(s7 + s15), 8); - x8 = WRAPLOW(dct_const_round_shift(s0 - s8), 8); - x9 = WRAPLOW(dct_const_round_shift(s1 - s9), 8); - x10 = WRAPLOW(dct_const_round_shift(s2 - s10), 8); - x11 = WRAPLOW(dct_const_round_shift(s3 - s11), 8); - x12 = WRAPLOW(dct_const_round_shift(s4 - s12), 8); - x13 = WRAPLOW(dct_const_round_shift(s5 - s13), 8); - x14 = WRAPLOW(dct_const_round_shift(s6 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s7 - s15), 8); - - // stage 2 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4; - s5 = x5; - s6 = x6; - s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; - - x0 = WRAPLOW(s0 + s4, 8); - x1 = WRAPLOW(s1 + s5, 8); - x2 = WRAPLOW(s2 + s6, 8); - x3 = WRAPLOW(s3 + s7, 8); - x4 = WRAPLOW(s0 - s4, 8); - x5 = WRAPLOW(s1 - s5, 8); - x6 = WRAPLOW(s2 - s6, 8); - x7 = WRAPLOW(s3 - s7, 8); - x8 = WRAPLOW(dct_const_round_shift(s8 + s12), 8); - x9 = WRAPLOW(dct_const_round_shift(s9 + s13), 8); - x10 = WRAPLOW(dct_const_round_shift(s10 + s14), 8); - x11 = WRAPLOW(dct_const_round_shift(s11 + s15), 8); - x12 = WRAPLOW(dct_const_round_shift(s8 - s12), 8); - x13 = WRAPLOW(dct_const_round_shift(s9 - s13), 8); - x14 = WRAPLOW(dct_const_round_shift(s10 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s11 - s15), 8); - - // stage 3 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; - s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; - s8 = x8; - s9 = x9; - s10 = x10; - s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; - s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; - - x0 = WRAPLOW(check_range(s0 + s2), 8); - x1 = WRAPLOW(check_range(s1 + s3), 8); - x2 = WRAPLOW(check_range(s0 - s2), 8); - x3 = WRAPLOW(check_range(s1 - s3), 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s6), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s7), 8); - x6 = WRAPLOW(dct_const_round_shift(s4 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s5 - s7), 8); - x8 = WRAPLOW(check_range(s8 + s10), 8); - x9 = WRAPLOW(check_range(s9 + s11), 8); - x10 = WRAPLOW(check_range(s8 - s10), 8); - x11 = WRAPLOW(check_range(s9 - s11), 8); - x12 = WRAPLOW(dct_const_round_shift(s12 + s14), 8); - x13 = WRAPLOW(dct_const_round_shift(s13 + s15), 8); - x14 = WRAPLOW(dct_const_round_shift(s12 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s13 - s15), 8); - - // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); - s3 = cospi_16_64 * (x2 - x3); - s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (- x6 + x7); - s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (- x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); - s15 = cospi_16_64 * (x14 - x15); - - x2 = WRAPLOW(dct_const_round_shift(s2), 8); - x3 = WRAPLOW(dct_const_round_shift(s3), 8); - x6 = WRAPLOW(dct_const_round_shift(s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s7), 8); - x10 = WRAPLOW(dct_const_round_shift(s10), 8); - x11 = WRAPLOW(dct_const_round_shift(s11), 8); - x14 = WRAPLOW(dct_const_round_shift(s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s15), 8); - - output[0] = WRAPLOW(x0, 8); - output[1] = WRAPLOW(-x8, 8); - output[2] = WRAPLOW(x12, 8); - output[3] = WRAPLOW(-x4, 8); - output[4] = WRAPLOW(x6, 8); - output[5] = WRAPLOW(x14, 8); - output[6] = WRAPLOW(x10, 8); - output[7] = WRAPLOW(x2, 8); - output[8] = WRAPLOW(x3, 8); - output[9] = WRAPLOW(x11, 8); - output[10] = WRAPLOW(x15, 8); - output[11] = WRAPLOW(x7, 8); - output[12] = WRAPLOW(x5, 8); - output[13] = WRAPLOW(-x13, 8); - output[14] = WRAPLOW(x9, 8); - output[15] = WRAPLOW(-x1, 8); -} - -void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, - int stride) { - tran_low_t out[16 * 16] = { 0 }; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[16], temp_out[16]; - - // First transform rows. Since all non-zero dct coefficients are in - // upper-left 4x4 area, we only need to calculate first 4 rows here. - for (i = 0; i < 4; ++i) { - vp10_idct16_c(input, outptr); - input += 16; - outptr += 16; - } - - // Then transform columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j*16 + i]; - vp10_idct16_c(temp_in, temp_out); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = clip_pixel_add( - dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6)); - } - } -} - -void vp10_idct16x16_1_add_c(const tran_low_t *input, - uint8_t *dest, - int stride) { - int i, j; - tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); - a1 = ROUND_POWER_OF_TWO(out, 6); - for (j = 0; j < 16; ++j) { - for (i = 0; i < 16; ++i) - dest[i] = clip_pixel_add(dest[i], a1); - dest += stride; - } -} - -void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) { - tran_low_t step1[32], step2[32]; - tran_high_t temp1, temp2; - - // stage 1 - step1[0] = input[0]; - step1[1] = input[16]; - step1[2] = input[8]; - step1[3] = input[24]; - step1[4] = input[4]; - step1[5] = input[20]; - step1[6] = input[12]; - step1[7] = input[28]; - step1[8] = input[2]; - step1[9] = input[18]; - step1[10] = input[10]; - step1[11] = input[26]; - step1[12] = input[6]; - step1[13] = input[22]; - step1[14] = input[14]; - step1[15] = input[30]; - - temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64; - temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64; - step1[16] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[31] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64; - temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64; - step1[17] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[30] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64; - temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64; - temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64; - step1[19] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[28] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64; - temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64; - temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64; - temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64; - temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64; - step1[23] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[24] = WRAPLOW(dct_const_round_shift(temp2), 8); - - // stage 2 - step2[0] = step1[0]; - step2[1] = step1[1]; - step2[2] = step1[2]; - step2[3] = step1[3]; - step2[4] = step1[4]; - step2[5] = step1[5]; - step2[6] = step1[6]; - step2[7] = step1[7]; - - temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; - temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[15] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; - temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; - temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - - temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; - temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); - - step2[16] = WRAPLOW(step1[16] + step1[17], 8); - step2[17] = WRAPLOW(step1[16] - step1[17], 8); - step2[18] = WRAPLOW(-step1[18] + step1[19], 8); - step2[19] = WRAPLOW(step1[18] + step1[19], 8); - step2[20] = WRAPLOW(step1[20] + step1[21], 8); - step2[21] = WRAPLOW(step1[20] - step1[21], 8); - step2[22] = WRAPLOW(-step1[22] + step1[23], 8); - step2[23] = WRAPLOW(step1[22] + step1[23], 8); - step2[24] = WRAPLOW(step1[24] + step1[25], 8); - step2[25] = WRAPLOW(step1[24] - step1[25], 8); - step2[26] = WRAPLOW(-step1[26] + step1[27], 8); - step2[27] = WRAPLOW(step1[26] + step1[27], 8); - step2[28] = WRAPLOW(step1[28] + step1[29], 8); - step2[29] = WRAPLOW(step1[28] - step1[29], 8); - step2[30] = WRAPLOW(-step1[30] + step1[31], 8); - step2[31] = WRAPLOW(step1[30] + step1[31], 8); - - // stage 3 - step1[0] = step2[0]; - step1[1] = step2[1]; - step1[2] = step2[2]; - step1[3] = step2[3]; - - temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; - temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; - temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - - step1[8] = WRAPLOW(step2[8] + step2[9], 8); - step1[9] = WRAPLOW(step2[8] - step2[9], 8); - step1[10] = WRAPLOW(-step2[10] + step2[11], 8); - step1[11] = WRAPLOW(step2[10] + step2[11], 8); - step1[12] = WRAPLOW(step2[12] + step2[13], 8); - step1[13] = WRAPLOW(step2[12] - step2[13], 8); - step1[14] = WRAPLOW(-step2[14] + step2[15], 8); - step1[15] = WRAPLOW(step2[14] + step2[15], 8); - - step1[16] = step2[16]; - step1[31] = step2[31]; - temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64; - temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64; - step1[17] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[30] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64; - temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[19] = step2[19]; - step1[20] = step2[20]; - temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64; - temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64; - temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[23] = step2[23]; - step1[24] = step2[24]; - step1[27] = step2[27]; - step1[28] = step2[28]; - - // stage 4 - temp1 = (step1[0] + step1[1]) * cospi_16_64; - temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; - temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); - - step2[8] = step1[8]; - step2[15] = step1[15]; - temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; - temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; - temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[11] = step1[11]; - step2[12] = step1[12]; - - step2[16] = WRAPLOW(step1[16] + step1[19], 8); - step2[17] = WRAPLOW(step1[17] + step1[18], 8); - step2[18] = WRAPLOW(step1[17] - step1[18], 8); - step2[19] = WRAPLOW(step1[16] - step1[19], 8); - step2[20] = WRAPLOW(-step1[20] + step1[23], 8); - step2[21] = WRAPLOW(-step1[21] + step1[22], 8); - step2[22] = WRAPLOW(step1[21] + step1[22], 8); - step2[23] = WRAPLOW(step1[20] + step1[23], 8); - - step2[24] = WRAPLOW(step1[24] + step1[27], 8); - step2[25] = WRAPLOW(step1[25] + step1[26], 8); - step2[26] = WRAPLOW(step1[25] - step1[26], 8); - step2[27] = WRAPLOW(step1[24] - step1[27], 8); - step2[28] = WRAPLOW(-step1[28] + step1[31], 8); - step2[29] = WRAPLOW(-step1[29] + step1[30], 8); - step2[30] = WRAPLOW(step1[29] + step1[30], 8); - step2[31] = WRAPLOW(step1[28] + step1[31], 8); - - // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[7] = step2[7]; - - step1[8] = WRAPLOW(step2[8] + step2[11], 8); - step1[9] = WRAPLOW(step2[9] + step2[10], 8); - step1[10] = WRAPLOW(step2[9] - step2[10], 8); - step1[11] = WRAPLOW(step2[8] - step2[11], 8); - step1[12] = WRAPLOW(-step2[12] + step2[15], 8); - step1[13] = WRAPLOW(-step2[13] + step2[14], 8); - step1[14] = WRAPLOW(step2[13] + step2[14], 8); - step1[15] = WRAPLOW(step2[12] + step2[15], 8); - - step1[16] = step2[16]; - step1[17] = step2[17]; - temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64; - temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64; - temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64; - step1[19] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[28] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64; - temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64; - temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[22] = step2[22]; - step1[23] = step2[23]; - step1[24] = step2[24]; - step1[25] = step2[25]; - step1[30] = step2[30]; - step1[31] = step2[31]; - - // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], 8); - step2[1] = WRAPLOW(step1[1] + step1[6], 8); - step2[2] = WRAPLOW(step1[2] + step1[5], 8); - step2[3] = WRAPLOW(step1[3] + step1[4], 8); - step2[4] = WRAPLOW(step1[3] - step1[4], 8); - step2[5] = WRAPLOW(step1[2] - step1[5], 8); - step2[6] = WRAPLOW(step1[1] - step1[6], 8); - step2[7] = WRAPLOW(step1[0] - step1[7], 8); - step2[8] = step1[8]; - step2[9] = step1[9]; - temp1 = (-step1[10] + step1[13]) * cospi_16_64; - temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = (-step1[11] + step1[12]) * cospi_16_64; - temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[14] = step1[14]; - step2[15] = step1[15]; - - step2[16] = WRAPLOW(step1[16] + step1[23], 8); - step2[17] = WRAPLOW(step1[17] + step1[22], 8); - step2[18] = WRAPLOW(step1[18] + step1[21], 8); - step2[19] = WRAPLOW(step1[19] + step1[20], 8); - step2[20] = WRAPLOW(step1[19] - step1[20], 8); - step2[21] = WRAPLOW(step1[18] - step1[21], 8); - step2[22] = WRAPLOW(step1[17] - step1[22], 8); - step2[23] = WRAPLOW(step1[16] - step1[23], 8); - - step2[24] = WRAPLOW(-step1[24] + step1[31], 8); - step2[25] = WRAPLOW(-step1[25] + step1[30], 8); - step2[26] = WRAPLOW(-step1[26] + step1[29], 8); - step2[27] = WRAPLOW(-step1[27] + step1[28], 8); - step2[28] = WRAPLOW(step1[27] + step1[28], 8); - step2[29] = WRAPLOW(step1[26] + step1[29], 8); - step2[30] = WRAPLOW(step1[25] + step1[30], 8); - step2[31] = WRAPLOW(step1[24] + step1[31], 8); - - // stage 7 - step1[0] = WRAPLOW(step2[0] + step2[15], 8); - step1[1] = WRAPLOW(step2[1] + step2[14], 8); - step1[2] = WRAPLOW(step2[2] + step2[13], 8); - step1[3] = WRAPLOW(step2[3] + step2[12], 8); - step1[4] = WRAPLOW(step2[4] + step2[11], 8); - step1[5] = WRAPLOW(step2[5] + step2[10], 8); - step1[6] = WRAPLOW(step2[6] + step2[9], 8); - step1[7] = WRAPLOW(step2[7] + step2[8], 8); - step1[8] = WRAPLOW(step2[7] - step2[8], 8); - step1[9] = WRAPLOW(step2[6] - step2[9], 8); - step1[10] = WRAPLOW(step2[5] - step2[10], 8); - step1[11] = WRAPLOW(step2[4] - step2[11], 8); - step1[12] = WRAPLOW(step2[3] - step2[12], 8); - step1[13] = WRAPLOW(step2[2] - step2[13], 8); - step1[14] = WRAPLOW(step2[1] - step2[14], 8); - step1[15] = WRAPLOW(step2[0] - step2[15], 8); - - step1[16] = step2[16]; - step1[17] = step2[17]; - step1[18] = step2[18]; - step1[19] = step2[19]; - temp1 = (-step2[20] + step2[27]) * cospi_16_64; - temp2 = (step2[20] + step2[27]) * cospi_16_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = (-step2[21] + step2[26]) * cospi_16_64; - temp2 = (step2[21] + step2[26]) * cospi_16_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = (-step2[22] + step2[25]) * cospi_16_64; - temp2 = (step2[22] + step2[25]) * cospi_16_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); - temp1 = (-step2[23] + step2[24]) * cospi_16_64; - temp2 = (step2[23] + step2[24]) * cospi_16_64; - step1[23] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[24] = WRAPLOW(dct_const_round_shift(temp2), 8); - step1[28] = step2[28]; - step1[29] = step2[29]; - step1[30] = step2[30]; - step1[31] = step2[31]; - - // final stage - output[0] = WRAPLOW(step1[0] + step1[31], 8); - output[1] = WRAPLOW(step1[1] + step1[30], 8); - output[2] = WRAPLOW(step1[2] + step1[29], 8); - output[3] = WRAPLOW(step1[3] + step1[28], 8); - output[4] = WRAPLOW(step1[4] + step1[27], 8); - output[5] = WRAPLOW(step1[5] + step1[26], 8); - output[6] = WRAPLOW(step1[6] + step1[25], 8); - output[7] = WRAPLOW(step1[7] + step1[24], 8); - output[8] = WRAPLOW(step1[8] + step1[23], 8); - output[9] = WRAPLOW(step1[9] + step1[22], 8); - output[10] = WRAPLOW(step1[10] + step1[21], 8); - output[11] = WRAPLOW(step1[11] + step1[20], 8); - output[12] = WRAPLOW(step1[12] + step1[19], 8); - output[13] = WRAPLOW(step1[13] + step1[18], 8); - output[14] = WRAPLOW(step1[14] + step1[17], 8); - output[15] = WRAPLOW(step1[15] + step1[16], 8); - output[16] = WRAPLOW(step1[15] - step1[16], 8); - output[17] = WRAPLOW(step1[14] - step1[17], 8); - output[18] = WRAPLOW(step1[13] - step1[18], 8); - output[19] = WRAPLOW(step1[12] - step1[19], 8); - output[20] = WRAPLOW(step1[11] - step1[20], 8); - output[21] = WRAPLOW(step1[10] - step1[21], 8); - output[22] = WRAPLOW(step1[9] - step1[22], 8); - output[23] = WRAPLOW(step1[8] - step1[23], 8); - output[24] = WRAPLOW(step1[7] - step1[24], 8); - output[25] = WRAPLOW(step1[6] - step1[25], 8); - output[26] = WRAPLOW(step1[5] - step1[26], 8); - output[27] = WRAPLOW(step1[4] - step1[27], 8); - output[28] = WRAPLOW(step1[3] - step1[28], 8); - output[29] = WRAPLOW(step1[2] - step1[29], 8); - output[30] = WRAPLOW(step1[1] - step1[30], 8); - output[31] = WRAPLOW(step1[0] - step1[31], 8); -} - -void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, - int stride) { - tran_low_t out[32 * 32]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[32], temp_out[32]; - - // Rows - for (i = 0; i < 32; ++i) { - int16_t zero_coeff[16]; - for (j = 0; j < 16; ++j) - zero_coeff[j] = input[2 * j] | input[2 * j + 1]; - for (j = 0; j < 8; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - for (j = 0; j < 4; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - for (j = 0; j < 2; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - - if (zero_coeff[0] | zero_coeff[1]) - vp10_idct32_c(input, outptr); - else - memset(outptr, 0, sizeof(tran_low_t) * 32); - input += 32; - outptr += 32; - } - - // Columns - for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; - vp10_idct32_c(temp_in, temp_out); - for (j = 0; j < 32; ++j) { - dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6)); - } - } -} - -void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, - int stride) { - tran_low_t out[32 * 32] = {0}; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[32], temp_out[32]; - - // Rows - // only upper-left 8x8 has non-zero coeff - for (i = 0; i < 8; ++i) { - vp10_idct32_c(input, outptr); - input += 32; - outptr += 32; - } - - // Columns - for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; - vp10_idct32_c(temp_in, temp_out); - for (j = 0; j < 32; ++j) { - dest[j * stride + i] = clip_pixel_add( - dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6)); - } - } -} - -void vp10_idct32x32_1_add_c(const tran_low_t *input, - uint8_t *dest, - int stride) { - int i, j; - tran_high_t a1; - - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); - a1 = ROUND_POWER_OF_TWO(out, 6); - - for (j = 0; j < 32; ++j) { - for (i = 0; i < 32; ++i) - dest[i] = clip_pixel_add(dest[i], a1); - dest += stride; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, - 0.5 shifts per pixel. */ - int i; - tran_low_t output[16]; - tran_high_t a1, b1, c1, d1, e1; - const tran_low_t *ip = input; - tran_low_t *op = output; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - for (i = 0; i < 4; i++) { - a1 = ip[0] >> UNIT_QUANT_SHIFT; - c1 = ip[1] >> UNIT_QUANT_SHIFT; - d1 = ip[2] >> UNIT_QUANT_SHIFT; - b1 = ip[3] >> UNIT_QUANT_SHIFT; - a1 += c1; - d1 -= b1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= b1; - d1 += c1; - op[0] = WRAPLOW(a1, bd); - op[1] = WRAPLOW(b1, bd); - op[2] = WRAPLOW(c1, bd); - op[3] = WRAPLOW(d1, bd); - ip += 4; - op += 4; - } - - ip = output; - for (i = 0; i < 4; i++) { - a1 = ip[4 * 0]; - c1 = ip[4 * 1]; - d1 = ip[4 * 2]; - b1 = ip[4 * 3]; - a1 += c1; - d1 -= b1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= b1; - d1 += c1; - dest[stride * 0] = highbd_clip_pixel_add(dest[stride * 0], a1, bd); - dest[stride * 1] = highbd_clip_pixel_add(dest[stride * 1], b1, bd); - dest[stride * 2] = highbd_clip_pixel_add(dest[stride * 2], c1, bd); - dest[stride * 3] = highbd_clip_pixel_add(dest[stride * 3], d1, bd); - - ip++; - dest++; - } -} - -void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, - int dest_stride, int bd) { - int i; - tran_high_t a1, e1; - tran_low_t tmp[4]; - const tran_low_t *ip = in; - tran_low_t *op = tmp; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - (void) bd; - - a1 = ip[0] >> UNIT_QUANT_SHIFT; - e1 = a1 >> 1; - a1 -= e1; - op[0] = WRAPLOW(a1, bd); - op[1] = op[2] = op[3] = WRAPLOW(e1, bd); - - ip = tmp; - for (i = 0; i < 4; i++) { - e1 = ip[0] >> 1; - a1 = ip[0] - e1; - dest[dest_stride * 0] = highbd_clip_pixel_add( - dest[dest_stride * 0], a1, bd); - dest[dest_stride * 1] = highbd_clip_pixel_add( - dest[dest_stride * 1], e1, bd); - dest[dest_stride * 2] = highbd_clip_pixel_add( - dest[dest_stride * 2], e1, bd); - dest[dest_stride * 3] = highbd_clip_pixel_add( - dest[dest_stride * 3], e1, bd); - ip++; - dest++; - } -} - -void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) { - tran_low_t step[4]; - tran_high_t temp1, temp2; - (void) bd; - // stage 1 - temp1 = (input[0] + input[2]) * cospi_16_64; - temp2 = (input[0] - input[2]) * cospi_16_64; - step[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; - temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; - step[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - // stage 2 - output[0] = WRAPLOW(step[0] + step[3], bd); - output[1] = WRAPLOW(step[1] + step[2], bd); - output[2] = WRAPLOW(step[1] - step[2], bd); - output[3] = WRAPLOW(step[0] - step[3], bd); -} - -void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[4 * 4]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[4], temp_out[4]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // Rows - for (i = 0; i < 4; ++i) { - vp10_highbd_idct4_c(input, outptr, bd); - input += 4; - outptr += 4; - } - - // Columns - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - vp10_highbd_idct4_c(temp_in, temp_out, bd); - for (j = 0; j < 4; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); - } - } -} - -void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8, - int dest_stride, int bd) { - int i; - tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); - a1 = ROUND_POWER_OF_TWO(out, 4); - - for (i = 0; i < 4; i++) { - dest[0] = highbd_clip_pixel_add(dest[0], a1, bd); - dest[1] = highbd_clip_pixel_add(dest[1], a1, bd); - dest[2] = highbd_clip_pixel_add(dest[2], a1, bd); - dest[3] = highbd_clip_pixel_add(dest[3], a1, bd); - dest += dest_stride; - } -} - -void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { - tran_low_t step1[8], step2[8]; - tran_high_t temp1, temp2; - // stage 1 - step1[0] = input[0]; - step1[2] = input[4]; - step1[1] = input[2]; - step1[3] = input[6]; - temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; - temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; - temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - // stage 2 & stage 3 - even half - vp10_highbd_idct4_c(step1, step1, bd); - - // stage 2 - odd half - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); - - // stage 3 - odd half - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[7] = step2[7]; - - // stage 4 - output[0] = WRAPLOW(step1[0] + step1[7], bd); - output[1] = WRAPLOW(step1[1] + step1[6], bd); - output[2] = WRAPLOW(step1[2] + step1[5], bd); - output[3] = WRAPLOW(step1[3] + step1[4], bd); - output[4] = WRAPLOW(step1[3] - step1[4], bd); - output[5] = WRAPLOW(step1[2] - step1[5], bd); - output[6] = WRAPLOW(step1[1] - step1[6], bd); - output[7] = WRAPLOW(step1[0] - step1[7], bd); -} - -void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[8 * 8]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[8], temp_out[8]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // First transform rows. - for (i = 0; i < 8; ++i) { - vp10_highbd_idct8_c(input, outptr, bd); - input += 8; - outptr += 8; - } - - // Then transform columns. - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_highbd_idct8_c(temp_in, temp_out, bd); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); - } - } -} - -void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - int i, j; - tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); - a1 = ROUND_POWER_OF_TWO(out, 5); - for (j = 0; j < 8; ++j) { - for (i = 0; i < 8; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); - dest += stride; - } -} - -void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; - - tran_low_t x0 = input[0]; - tran_low_t x1 = input[1]; - tran_low_t x2 = input[2]; - tran_low_t x3 = input[3]; - (void) bd; - - if (!(x0 | x1 | x2 | x3)) { - memset(output, 0, 4 * sizeof(*output)); - return; - } - - s0 = sinpi_1_9 * x0; - s1 = sinpi_2_9 * x0; - s2 = sinpi_3_9 * x1; - s3 = sinpi_4_9 * x2; - s4 = sinpi_1_9 * x2; - s5 = sinpi_2_9 * x3; - s6 = sinpi_4_9 * x3; - s7 = (tran_high_t)(x0 - x2 + x3); - - s0 = s0 + s3 + s5; - s1 = s1 - s4 - s6; - s3 = s2; - s2 = sinpi_3_9 * s7; - - // 1-D transform scaling factor is sqrt(2). - // The overall dynamic range is 14b (input) + 14b (multiplication scaling) - // + 1b (addition) = 29b. - // Hence the output bit depth is 15b. - output[0] = WRAPLOW(highbd_dct_const_round_shift(s0 + s3, bd), bd); - output[1] = WRAPLOW(highbd_dct_const_round_shift(s1 + s3, bd), bd); - output[2] = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd); -} - -void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; - - tran_low_t x0 = input[7]; - tran_low_t x1 = input[0]; - tran_low_t x2 = input[5]; - tran_low_t x3 = input[2]; - tran_low_t x4 = input[3]; - tran_low_t x5 = input[4]; - tran_low_t x6 = input[1]; - tran_low_t x7 = input[6]; - (void) bd; - - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { - memset(output, 0, 8 * sizeof(*output)); - return; - } - - // stage 1 - s0 = cospi_2_64 * x0 + cospi_30_64 * x1; - s1 = cospi_30_64 * x0 - cospi_2_64 * x1; - s2 = cospi_10_64 * x2 + cospi_22_64 * x3; - s3 = cospi_22_64 * x2 - cospi_10_64 * x3; - s4 = cospi_18_64 * x4 + cospi_14_64 * x5; - s5 = cospi_14_64 * x4 - cospi_18_64 * x5; - s6 = cospi_26_64 * x6 + cospi_6_64 * x7; - s7 = cospi_6_64 * x6 - cospi_26_64 * x7; - - x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s4, bd), bd); - x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s5, bd), bd); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s6, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s7, bd), bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s0 - s4, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s1 - s5, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s2 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s3 - s7, bd), bd); - - // stage 2 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = cospi_8_64 * x4 + cospi_24_64 * x5; - s5 = cospi_24_64 * x4 - cospi_8_64 * x5; - s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; - s7 = cospi_8_64 * x6 + cospi_24_64 * x7; - - x0 = WRAPLOW(s0 + s2, bd); - x1 = WRAPLOW(s1 + s3, bd); - x2 = WRAPLOW(s0 - s2, bd); - x3 = WRAPLOW(s1 - s3, bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd); - - // stage 3 - s2 = cospi_16_64 * (x2 + x3); - s3 = cospi_16_64 * (x2 - x3); - s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (x6 - x7); - - x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd); - - output[0] = WRAPLOW(x0, bd); - output[1] = WRAPLOW(-x4, bd); - output[2] = WRAPLOW(x6, bd); - output[3] = WRAPLOW(-x2, bd); - output[4] = WRAPLOW(x3, bd); - output[5] = WRAPLOW(-x7, bd); - output[6] = WRAPLOW(x5, bd); - output[7] = WRAPLOW(-x1, bd); -} - -void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[8 * 8] = { 0 }; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[8], temp_out[8]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // First transform rows. - // Only first 4 row has non-zero coefs. - for (i = 0; i < 4; ++i) { - vp10_highbd_idct8_c(input, outptr, bd); - input += 8; - outptr += 8; - } - // Then transform columns. - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_highbd_idct8_c(temp_in, temp_out, bd); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); - } - } -} - -void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { - tran_low_t step1[16], step2[16]; - tran_high_t temp1, temp2; - (void) bd; - - // stage 1 - step1[0] = input[0/2]; - step1[1] = input[16/2]; - step1[2] = input[8/2]; - step1[3] = input[24/2]; - step1[4] = input[4/2]; - step1[5] = input[20/2]; - step1[6] = input[12/2]; - step1[7] = input[28/2]; - step1[8] = input[2/2]; - step1[9] = input[18/2]; - step1[10] = input[10/2]; - step1[11] = input[26/2]; - step1[12] = input[6/2]; - step1[13] = input[22/2]; - step1[14] = input[14/2]; - step1[15] = input[30/2]; - - // stage 2 - step2[0] = step1[0]; - step2[1] = step1[1]; - step2[2] = step1[2]; - step2[3] = step1[3]; - step2[4] = step1[4]; - step2[5] = step1[5]; - step2[6] = step1[6]; - step2[7] = step1[7]; - - temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; - temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; - temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; - temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; - temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - // stage 3 - step1[0] = step2[0]; - step1[1] = step2[1]; - step1[2] = step2[2]; - step1[3] = step2[3]; - - temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; - temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; - temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - step1[8] = WRAPLOW(step2[8] + step2[9], bd); - step1[9] = WRAPLOW(step2[8] - step2[9], bd); - step1[10] = WRAPLOW(-step2[10] + step2[11], bd); - step1[11] = WRAPLOW(step2[10] + step2[11], bd); - step1[12] = WRAPLOW(step2[12] + step2[13], bd); - step1[13] = WRAPLOW(step2[12] - step2[13], bd); - step1[14] = WRAPLOW(-step2[14] + step2[15], bd); - step1[15] = WRAPLOW(step2[14] + step2[15], bd); - - // stage 4 - temp1 = (step1[0] + step1[1]) * cospi_16_64; - temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; - temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); - - step2[8] = step1[8]; - step2[15] = step1[15]; - temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; - temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; - temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[11] = step1[11]; - step2[12] = step1[12]; - - // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], bd); - step1[1] = WRAPLOW(step2[1] + step2[2], bd); - step1[2] = WRAPLOW(step2[1] - step2[2], bd); - step1[3] = WRAPLOW(step2[0] - step2[3], bd); - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[7] = step2[7]; - - step1[8] = WRAPLOW(step2[8] + step2[11], bd); - step1[9] = WRAPLOW(step2[9] + step2[10], bd); - step1[10] = WRAPLOW(step2[9] - step2[10], bd); - step1[11] = WRAPLOW(step2[8] - step2[11], bd); - step1[12] = WRAPLOW(-step2[12] + step2[15], bd); - step1[13] = WRAPLOW(-step2[13] + step2[14], bd); - step1[14] = WRAPLOW(step2[13] + step2[14], bd); - step1[15] = WRAPLOW(step2[12] + step2[15], bd); - - // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], bd); - step2[1] = WRAPLOW(step1[1] + step1[6], bd); - step2[2] = WRAPLOW(step1[2] + step1[5], bd); - step2[3] = WRAPLOW(step1[3] + step1[4], bd); - step2[4] = WRAPLOW(step1[3] - step1[4], bd); - step2[5] = WRAPLOW(step1[2] - step1[5], bd); - step2[6] = WRAPLOW(step1[1] - step1[6], bd); - step2[7] = WRAPLOW(step1[0] - step1[7], bd); - step2[8] = step1[8]; - step2[9] = step1[9]; - temp1 = (-step1[10] + step1[13]) * cospi_16_64; - temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = (-step1[11] + step1[12]) * cospi_16_64; - temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[14] = step1[14]; - step2[15] = step1[15]; - - // stage 7 - output[0] = WRAPLOW(step2[0] + step2[15], bd); - output[1] = WRAPLOW(step2[1] + step2[14], bd); - output[2] = WRAPLOW(step2[2] + step2[13], bd); - output[3] = WRAPLOW(step2[3] + step2[12], bd); - output[4] = WRAPLOW(step2[4] + step2[11], bd); - output[5] = WRAPLOW(step2[5] + step2[10], bd); - output[6] = WRAPLOW(step2[6] + step2[9], bd); - output[7] = WRAPLOW(step2[7] + step2[8], bd); - output[8] = WRAPLOW(step2[7] - step2[8], bd); - output[9] = WRAPLOW(step2[6] - step2[9], bd); - output[10] = WRAPLOW(step2[5] - step2[10], bd); - output[11] = WRAPLOW(step2[4] - step2[11], bd); - output[12] = WRAPLOW(step2[3] - step2[12], bd); - output[13] = WRAPLOW(step2[2] - step2[13], bd); - output[14] = WRAPLOW(step2[1] - step2[14], bd); - output[15] = WRAPLOW(step2[0] - step2[15], bd); -} - -void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[16 * 16]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[16], temp_out[16]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // First transform rows. - for (i = 0; i < 16; ++i) { - vp10_highbd_idct16_c(input, outptr, bd); - input += 16; - outptr += 16; - } - - // Then transform columns. - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - vp10_highbd_idct16_c(temp_in, temp_out, bd); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], - ROUND_POWER_OF_TWO(temp_out[j], 6), - bd); - } - } -} - -void vp10_highbd_iadst16_c(const tran_low_t *input, - tran_low_t *output, - int bd) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8; - tran_high_t s9, s10, s11, s12, s13, s14, s15; - - tran_low_t x0 = input[15]; - tran_low_t x1 = input[0]; - tran_low_t x2 = input[13]; - tran_low_t x3 = input[2]; - tran_low_t x4 = input[11]; - tran_low_t x5 = input[4]; - tran_low_t x6 = input[9]; - tran_low_t x7 = input[6]; - tran_low_t x8 = input[7]; - tran_low_t x9 = input[8]; - tran_low_t x10 = input[5]; - tran_low_t x11 = input[10]; - tran_low_t x12 = input[3]; - tran_low_t x13 = input[12]; - tran_low_t x14 = input[1]; - tran_low_t x15 = input[14]; - (void) bd; - - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 - | x9 | x10 | x11 | x12 | x13 | x14 | x15)) { - memset(output, 0, 16 * sizeof(*output)); - return; - } - - // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; - s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; - s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; - s5 = x4 * cospi_23_64 - x5 * cospi_9_64; - s6 = x6 * cospi_13_64 + x7 * cospi_19_64; - s7 = x6 * cospi_19_64 - x7 * cospi_13_64; - s8 = x8 * cospi_17_64 + x9 * cospi_15_64; - s9 = x8 * cospi_15_64 - x9 * cospi_17_64; - s10 = x10 * cospi_21_64 + x11 * cospi_11_64; - s11 = x10 * cospi_11_64 - x11 * cospi_21_64; - s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; - s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - - x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s8, bd), bd); - x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s9, bd), bd); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s10, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s11, bd), bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s12, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s13, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6 + s14, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7 + s15, bd), bd); - x8 = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd); - x9 = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s2 - s10, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s3 - s11, bd), bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s4 - s12, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s5 - s13, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s6 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s7 - s15, bd), bd); - - // stage 2 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4; - s5 = x5; - s6 = x6; - s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; - - x0 = WRAPLOW(s0 + s4, bd); - x1 = WRAPLOW(s1 + s5, bd); - x2 = WRAPLOW(s2 + s6, bd); - x3 = WRAPLOW(s3 + s7, bd); - x4 = WRAPLOW(s0 - s4, bd); - x5 = WRAPLOW(s1 - s5, bd); - x6 = WRAPLOW(s2 - s6, bd); - x7 = WRAPLOW(s3 - s7, bd); - x8 = WRAPLOW(highbd_dct_const_round_shift(s8 + s12, bd), bd); - x9 = WRAPLOW(highbd_dct_const_round_shift(s9 + s13, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s10 + s14, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s11 + s15, bd), bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s8 - s12, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s9 - s13, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s10 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s11 - s15, bd), bd); - - // stage 3 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; - s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; - s8 = x8; - s9 = x9; - s10 = x10; - s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; - s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; - - x0 = WRAPLOW(s0 + s2, bd); - x1 = WRAPLOW(s1 + s3, bd); - x2 = WRAPLOW(s0 - s2, bd); - x3 = WRAPLOW(s1 - s3, bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd); - x8 = WRAPLOW(s8 + s10, bd); - x9 = WRAPLOW(s9 + s11, bd); - x10 = WRAPLOW(s8 - s10, bd); - x11 = WRAPLOW(s9 - s11, bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s12 + s14, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s13 + s15, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s12 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s13 - s15, bd), bd); - - // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); - s3 = cospi_16_64 * (x2 - x3); - s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (-x6 + x7); - s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (-x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); - s15 = cospi_16_64 * (x14 - x15); - - x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s10, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s11, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s15, bd), bd); - - output[0] = WRAPLOW(x0, bd); - output[1] = WRAPLOW(-x8, bd); - output[2] = WRAPLOW(x12, bd); - output[3] = WRAPLOW(-x4, bd); - output[4] = WRAPLOW(x6, bd); - output[5] = WRAPLOW(x14, bd); - output[6] = WRAPLOW(x10, bd); - output[7] = WRAPLOW(x2, bd); - output[8] = WRAPLOW(x3, bd); - output[9] = WRAPLOW(x11, bd); - output[10] = WRAPLOW(x15, bd); - output[11] = WRAPLOW(x7, bd); - output[12] = WRAPLOW(x5, bd); - output[13] = WRAPLOW(-x13, bd); - output[14] = WRAPLOW(x9, bd); - output[15] = WRAPLOW(-x1, bd); -} - -void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[16 * 16] = { 0 }; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[16], temp_out[16]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // First transform rows. Since all non-zero dct coefficients are in - // upper-left 4x4 area, we only need to calculate first 4 rows here. - for (i = 0; i < 4; ++i) { - vp10_highbd_idct16_c(input, outptr, bd); - input += 16; - outptr += 16; - } - - // Then transform columns. - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j*16 + i]; - vp10_highbd_idct16_c(temp_in, temp_out, bd); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } -} - -void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - int i, j; - tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); - a1 = ROUND_POWER_OF_TWO(out, 6); - for (j = 0; j < 16; ++j) { - for (i = 0; i < 16; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); - dest += stride; - } -} - -static void highbd_idct32_c(const tran_low_t *input, - tran_low_t *output, int bd) { - tran_low_t step1[32], step2[32]; - tran_high_t temp1, temp2; - (void) bd; - - // stage 1 - step1[0] = input[0]; - step1[1] = input[16]; - step1[2] = input[8]; - step1[3] = input[24]; - step1[4] = input[4]; - step1[5] = input[20]; - step1[6] = input[12]; - step1[7] = input[28]; - step1[8] = input[2]; - step1[9] = input[18]; - step1[10] = input[10]; - step1[11] = input[26]; - step1[12] = input[6]; - step1[13] = input[22]; - step1[14] = input[14]; - step1[15] = input[30]; - - temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64; - temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64; - step1[16] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[31] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64; - temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64; - step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64; - temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64; - temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64; - step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64; - temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64; - temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64; - temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64; - temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64; - step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - // stage 2 - step2[0] = step1[0]; - step2[1] = step1[1]; - step2[2] = step1[2]; - step2[3] = step1[3]; - step2[4] = step1[4]; - step2[5] = step1[5]; - step2[6] = step1[6]; - step2[7] = step1[7]; - - temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; - temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; - temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; - temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; - temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - step2[16] = WRAPLOW(step1[16] + step1[17], bd); - step2[17] = WRAPLOW(step1[16] - step1[17], bd); - step2[18] = WRAPLOW(-step1[18] + step1[19], bd); - step2[19] = WRAPLOW(step1[18] + step1[19], bd); - step2[20] = WRAPLOW(step1[20] + step1[21], bd); - step2[21] = WRAPLOW(step1[20] - step1[21], bd); - step2[22] = WRAPLOW(-step1[22] + step1[23], bd); - step2[23] = WRAPLOW(step1[22] + step1[23], bd); - step2[24] = WRAPLOW(step1[24] + step1[25], bd); - step2[25] = WRAPLOW(step1[24] - step1[25], bd); - step2[26] = WRAPLOW(-step1[26] + step1[27], bd); - step2[27] = WRAPLOW(step1[26] + step1[27], bd); - step2[28] = WRAPLOW(step1[28] + step1[29], bd); - step2[29] = WRAPLOW(step1[28] - step1[29], bd); - step2[30] = WRAPLOW(-step1[30] + step1[31], bd); - step2[31] = WRAPLOW(step1[30] + step1[31], bd); - - // stage 3 - step1[0] = step2[0]; - step1[1] = step2[1]; - step1[2] = step2[2]; - step1[3] = step2[3]; - - temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; - temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; - temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - - step1[8] = WRAPLOW(step2[8] + step2[9], bd); - step1[9] = WRAPLOW(step2[8] - step2[9], bd); - step1[10] = WRAPLOW(-step2[10] + step2[11], bd); - step1[11] = WRAPLOW(step2[10] + step2[11], bd); - step1[12] = WRAPLOW(step2[12] + step2[13], bd); - step1[13] = WRAPLOW(step2[12] - step2[13], bd); - step1[14] = WRAPLOW(-step2[14] + step2[15], bd); - step1[15] = WRAPLOW(step2[14] + step2[15], bd); - - step1[16] = step2[16]; - step1[31] = step2[31]; - temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64; - temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64; - step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64; - temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[19] = step2[19]; - step1[20] = step2[20]; - temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64; - temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64; - temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[23] = step2[23]; - step1[24] = step2[24]; - step1[27] = step2[27]; - step1[28] = step2[28]; - - // stage 4 - temp1 = (step1[0] + step1[1]) * cospi_16_64; - temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; - temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); - - step2[8] = step1[8]; - step2[15] = step1[15]; - temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; - temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; - temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[11] = step1[11]; - step2[12] = step1[12]; - - step2[16] = WRAPLOW(step1[16] + step1[19], bd); - step2[17] = WRAPLOW(step1[17] + step1[18], bd); - step2[18] = WRAPLOW(step1[17] - step1[18], bd); - step2[19] = WRAPLOW(step1[16] - step1[19], bd); - step2[20] = WRAPLOW(-step1[20] + step1[23], bd); - step2[21] = WRAPLOW(-step1[21] + step1[22], bd); - step2[22] = WRAPLOW(step1[21] + step1[22], bd); - step2[23] = WRAPLOW(step1[20] + step1[23], bd); - - step2[24] = WRAPLOW(step1[24] + step1[27], bd); - step2[25] = WRAPLOW(step1[25] + step1[26], bd); - step2[26] = WRAPLOW(step1[25] - step1[26], bd); - step2[27] = WRAPLOW(step1[24] - step1[27], bd); - step2[28] = WRAPLOW(-step1[28] + step1[31], bd); - step2[29] = WRAPLOW(-step1[29] + step1[30], bd); - step2[30] = WRAPLOW(step1[29] + step1[30], bd); - step2[31] = WRAPLOW(step1[28] + step1[31], bd); - - // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], bd); - step1[1] = WRAPLOW(step2[1] + step2[2], bd); - step1[2] = WRAPLOW(step2[1] - step2[2], bd); - step1[3] = WRAPLOW(step2[0] - step2[3], bd); - step1[4] = step2[4]; - temp1 = (step2[6] - step2[5]) * cospi_16_64; - temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[7] = step2[7]; - - step1[8] = WRAPLOW(step2[8] + step2[11], bd); - step1[9] = WRAPLOW(step2[9] + step2[10], bd); - step1[10] = WRAPLOW(step2[9] - step2[10], bd); - step1[11] = WRAPLOW(step2[8] - step2[11], bd); - step1[12] = WRAPLOW(-step2[12] + step2[15], bd); - step1[13] = WRAPLOW(-step2[13] + step2[14], bd); - step1[14] = WRAPLOW(step2[13] + step2[14], bd); - step1[15] = WRAPLOW(step2[12] + step2[15], bd); - - step1[16] = step2[16]; - step1[17] = step2[17]; - temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64; - temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64; - temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64; - step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64; - temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64; - temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[22] = step2[22]; - step1[23] = step2[23]; - step1[24] = step2[24]; - step1[25] = step2[25]; - step1[30] = step2[30]; - step1[31] = step2[31]; - - // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], bd); - step2[1] = WRAPLOW(step1[1] + step1[6], bd); - step2[2] = WRAPLOW(step1[2] + step1[5], bd); - step2[3] = WRAPLOW(step1[3] + step1[4], bd); - step2[4] = WRAPLOW(step1[3] - step1[4], bd); - step2[5] = WRAPLOW(step1[2] - step1[5], bd); - step2[6] = WRAPLOW(step1[1] - step1[6], bd); - step2[7] = WRAPLOW(step1[0] - step1[7], bd); - step2[8] = step1[8]; - step2[9] = step1[9]; - temp1 = (-step1[10] + step1[13]) * cospi_16_64; - temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = (-step1[11] + step1[12]) * cospi_16_64; - temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[14] = step1[14]; - step2[15] = step1[15]; - - step2[16] = WRAPLOW(step1[16] + step1[23], bd); - step2[17] = WRAPLOW(step1[17] + step1[22], bd); - step2[18] = WRAPLOW(step1[18] + step1[21], bd); - step2[19] = WRAPLOW(step1[19] + step1[20], bd); - step2[20] = WRAPLOW(step1[19] - step1[20], bd); - step2[21] = WRAPLOW(step1[18] - step1[21], bd); - step2[22] = WRAPLOW(step1[17] - step1[22], bd); - step2[23] = WRAPLOW(step1[16] - step1[23], bd); - - step2[24] = WRAPLOW(-step1[24] + step1[31], bd); - step2[25] = WRAPLOW(-step1[25] + step1[30], bd); - step2[26] = WRAPLOW(-step1[26] + step1[29], bd); - step2[27] = WRAPLOW(-step1[27] + step1[28], bd); - step2[28] = WRAPLOW(step1[27] + step1[28], bd); - step2[29] = WRAPLOW(step1[26] + step1[29], bd); - step2[30] = WRAPLOW(step1[25] + step1[30], bd); - step2[31] = WRAPLOW(step1[24] + step1[31], bd); - - // stage 7 - step1[0] = WRAPLOW(step2[0] + step2[15], bd); - step1[1] = WRAPLOW(step2[1] + step2[14], bd); - step1[2] = WRAPLOW(step2[2] + step2[13], bd); - step1[3] = WRAPLOW(step2[3] + step2[12], bd); - step1[4] = WRAPLOW(step2[4] + step2[11], bd); - step1[5] = WRAPLOW(step2[5] + step2[10], bd); - step1[6] = WRAPLOW(step2[6] + step2[9], bd); - step1[7] = WRAPLOW(step2[7] + step2[8], bd); - step1[8] = WRAPLOW(step2[7] - step2[8], bd); - step1[9] = WRAPLOW(step2[6] - step2[9], bd); - step1[10] = WRAPLOW(step2[5] - step2[10], bd); - step1[11] = WRAPLOW(step2[4] - step2[11], bd); - step1[12] = WRAPLOW(step2[3] - step2[12], bd); - step1[13] = WRAPLOW(step2[2] - step2[13], bd); - step1[14] = WRAPLOW(step2[1] - step2[14], bd); - step1[15] = WRAPLOW(step2[0] - step2[15], bd); - - step1[16] = step2[16]; - step1[17] = step2[17]; - step1[18] = step2[18]; - step1[19] = step2[19]; - temp1 = (-step2[20] + step2[27]) * cospi_16_64; - temp2 = (step2[20] + step2[27]) * cospi_16_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = (-step2[21] + step2[26]) * cospi_16_64; - temp2 = (step2[21] + step2[26]) * cospi_16_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = (-step2[22] + step2[25]) * cospi_16_64; - temp2 = (step2[22] + step2[25]) * cospi_16_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - temp1 = (-step2[23] + step2[24]) * cospi_16_64; - temp2 = (step2[23] + step2[24]) * cospi_16_64; - step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step1[28] = step2[28]; - step1[29] = step2[29]; - step1[30] = step2[30]; - step1[31] = step2[31]; - - // final stage - output[0] = WRAPLOW(step1[0] + step1[31], bd); - output[1] = WRAPLOW(step1[1] + step1[30], bd); - output[2] = WRAPLOW(step1[2] + step1[29], bd); - output[3] = WRAPLOW(step1[3] + step1[28], bd); - output[4] = WRAPLOW(step1[4] + step1[27], bd); - output[5] = WRAPLOW(step1[5] + step1[26], bd); - output[6] = WRAPLOW(step1[6] + step1[25], bd); - output[7] = WRAPLOW(step1[7] + step1[24], bd); - output[8] = WRAPLOW(step1[8] + step1[23], bd); - output[9] = WRAPLOW(step1[9] + step1[22], bd); - output[10] = WRAPLOW(step1[10] + step1[21], bd); - output[11] = WRAPLOW(step1[11] + step1[20], bd); - output[12] = WRAPLOW(step1[12] + step1[19], bd); - output[13] = WRAPLOW(step1[13] + step1[18], bd); - output[14] = WRAPLOW(step1[14] + step1[17], bd); - output[15] = WRAPLOW(step1[15] + step1[16], bd); - output[16] = WRAPLOW(step1[15] - step1[16], bd); - output[17] = WRAPLOW(step1[14] - step1[17], bd); - output[18] = WRAPLOW(step1[13] - step1[18], bd); - output[19] = WRAPLOW(step1[12] - step1[19], bd); - output[20] = WRAPLOW(step1[11] - step1[20], bd); - output[21] = WRAPLOW(step1[10] - step1[21], bd); - output[22] = WRAPLOW(step1[9] - step1[22], bd); - output[23] = WRAPLOW(step1[8] - step1[23], bd); - output[24] = WRAPLOW(step1[7] - step1[24], bd); - output[25] = WRAPLOW(step1[6] - step1[25], bd); - output[26] = WRAPLOW(step1[5] - step1[26], bd); - output[27] = WRAPLOW(step1[4] - step1[27], bd); - output[28] = WRAPLOW(step1[3] - step1[28], bd); - output[29] = WRAPLOW(step1[2] - step1[29], bd); - output[30] = WRAPLOW(step1[1] - step1[30], bd); - output[31] = WRAPLOW(step1[0] - step1[31], bd); -} - -void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[32 * 32]; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[32], temp_out[32]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // Rows - for (i = 0; i < 32; ++i) { - tran_low_t zero_coeff[16]; - for (j = 0; j < 16; ++j) - zero_coeff[j] = input[2 * j] | input[2 * j + 1]; - for (j = 0; j < 8; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - for (j = 0; j < 4; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - for (j = 0; j < 2; ++j) - zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; - - if (zero_coeff[0] | zero_coeff[1]) - highbd_idct32_c(input, outptr, bd); - else - memset(outptr, 0, sizeof(tran_low_t) * 32); - input += 32; - outptr += 32; - } - - // Columns - for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; - highbd_idct32_c(temp_in, temp_out, bd); - for (j = 0; j < 32; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } -} - -void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[32 * 32] = {0}; - tran_low_t *outptr = out; - int i, j; - tran_low_t temp_in[32], temp_out[32]; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - // Rows - // Only upper-left 8x8 has non-zero coeff. - for (i = 0; i < 8; ++i) { - highbd_idct32_c(input, outptr, bd); - input += 32; - outptr += 32; - } - // Columns - for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; - highbd_idct32_c(temp_in, temp_out, bd); - for (j = 0; j < 32; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } -} - -void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - int i, j; - int a1; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); - a1 = ROUND_POWER_OF_TWO(out, 6); - - for (j = 0; j < 32; ++j) { - for (i = 0; i < 32; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); - dest += stride; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/common/vp10_inv_txfm.h b/libs/libvpx/vp10/common/vp10_inv_txfm.h deleted file mode 100644 index 52611acbd2..0000000000 --- a/libs/libvpx/vp10/common/vp10_inv_txfm.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VPX_DSP_INV_TXFM_H_ -#define VPX_DSP_INV_TXFM_H_ - -#include - -#include "./vpx_config.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_ports/mem.h" - -#ifdef __cplusplus -extern "C" { -#endif - -static INLINE tran_low_t check_range(tran_high_t input) { -#if CONFIG_COEFFICIENT_RANGE_CHECKING - // For valid VP9 input streams, intermediate stage coefficients should always - // stay within the range of a signed 16 bit integer. Coefficients can go out - // of this range for invalid/corrupt VP9 streams. However, strictly checking - // this range for every intermediate coefficient can burdensome for a decoder, - // therefore the following assertion is only enabled when configured with - // --enable-coefficient-range-checking. - assert(INT16_MIN <= input); - assert(input <= INT16_MAX); -#endif // CONFIG_COEFFICIENT_RANGE_CHECKING - return (tran_low_t)input; -} - -static INLINE tran_low_t dct_const_round_shift(tran_high_t input) { - tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - return check_range(rv); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE tran_low_t highbd_check_range(tran_high_t input, - int bd) { -#if CONFIG_COEFFICIENT_RANGE_CHECKING - // For valid highbitdepth VP9 streams, intermediate stage coefficients will - // stay within the ranges: - // - 8 bit: signed 16 bit integer - // - 10 bit: signed 18 bit integer - // - 12 bit: signed 20 bit integer - const int32_t int_max = (1 << (7 + bd)) - 1; - const int32_t int_min = -int_max - 1; - assert(int_min <= input); - assert(input <= int_max); - (void) int_min; -#endif // CONFIG_COEFFICIENT_RANGE_CHECKING - (void) bd; - return (tran_low_t)input; -} - -static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input, - int bd) { - tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - return highbd_check_range(rv, bd); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -#if CONFIG_EMULATE_HARDWARE -// When CONFIG_EMULATE_HARDWARE is 1 the transform performs a -// non-normative method to handle overflows. A stream that causes -// overflows in the inverse transform is considered invalid in VP9, -// and a hardware implementer is free to choose any reasonable -// method to handle overflows. However to aid in hardware -// verification they can use a specific implementation of the -// WRAPLOW() macro below that is identical to their intended -// hardware implementation (and also use configure options to trigger -// the C-implementation of the transform). -// -// The particular WRAPLOW implementation below performs strict -// overflow wrapping to match common hardware implementations. -// bd of 8 uses trans_low with 16bits, need to remove 16bits -// bd of 10 uses trans_low with 18bits, need to remove 14bits -// bd of 12 uses trans_low with 20bits, need to remove 12bits -// bd of x uses trans_low with 8+x bits, need to remove 24-x bits -#define WRAPLOW(x, bd) ((((int32_t)(x)) << (24 - bd)) >> (24 - bd)) -#else -#define WRAPLOW(x, bd) ((int32_t)(x)) -#endif // CONFIG_EMULATE_HARDWARE - -void vp10_idct4_c(const tran_low_t *input, tran_low_t *output); -void vp10_idct8_c(const tran_low_t *input, tran_low_t *output); -void vp10_idct16_c(const tran_low_t *input, tran_low_t *output); -void vp10_idct32_c(const tran_low_t *input, tran_low_t *output); -void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output); -void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output); -void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd); -void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd); -void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd); - -void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd); -void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd); -void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd); - -static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans, - int bd) { - trans = WRAPLOW(trans, bd); - return clip_pixel_highbd(WRAPLOW(dest + trans, bd), bd); -} -#endif - -static INLINE uint8_t clip_pixel_add(uint8_t dest, tran_high_t trans) { - trans = WRAPLOW(trans, 8); - return clip_pixel(WRAPLOW(dest + trans, 8)); -} -#ifdef __cplusplus -} // extern "C" -#endif -#endif // VPX_DSP_INV_TXFM_H_ diff --git a/libs/libvpx/vp10/common/vp10_rtcd.c b/libs/libvpx/vp10/common/vp10_rtcd.c deleted file mode 100644 index 36b294ae8a..0000000000 --- a/libs/libvpx/vp10/common/vp10_rtcd.c +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "./vpx_config.h" -#define RTCD_C -#include "./vp10_rtcd.h" -#include "vpx_ports/vpx_once.h" - -void vp10_rtcd() { - // TODO(JBB): Remove this once, by insuring that both the encoder and - // decoder setup functions are protected by once(); - once(setup_rtcd_internal); -} diff --git a/libs/libvpx/vp10/common/vp10_rtcd_defs.pl b/libs/libvpx/vp10/common/vp10_rtcd_defs.pl deleted file mode 100644 index 9860baedfe..0000000000 --- a/libs/libvpx/vp10/common/vp10_rtcd_defs.pl +++ /dev/null @@ -1,656 +0,0 @@ -sub vp10_common_forward_decls() { -print <> 8 - paddd xmm1, [GLOBAL(t128)] - psrld xmm1, 8 - mov rax, arg(4) - - movd [rax], xmm1 - - - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - -SECTION_RODATA -align 16 -t128: -%ifndef __NASM_VER__ - ddq 128 -%elif CONFIG_BIG_ENDIAN - dq 0, 128 -%else - dq 128, 0 -%endif -align 16 -tMFQE: ; 1 << MFQE_PRECISION - times 8 dw 0x10 -align 16 -tMFQE_round: ; 1 << (MFQE_PRECISION - 1) - times 8 dw 0x08 diff --git a/libs/libvpx/vp10/common/x86/postproc_sse2.asm b/libs/libvpx/vp10/common/x86/postproc_sse2.asm deleted file mode 100644 index d5f8e927b1..0000000000 --- a/libs/libvpx/vp10/common/x86/postproc_sse2.asm +++ /dev/null @@ -1,694 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -;void vp10_post_proc_down_and_across_xmm -;( -; unsigned char *src_ptr, -; unsigned char *dst_ptr, -; int src_pixels_per_line, -; int dst_pixels_per_line, -; int rows, -; int cols, -; int flimit -;) -global sym(vp10_post_proc_down_and_across_xmm) PRIVATE -sym(vp10_post_proc_down_and_across_xmm): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - -%if ABI_IS_32BIT=1 && CONFIG_PIC=1 - ALIGN_STACK 16, rax - ; move the global rd onto the stack, since we don't have enough registers - ; to do PIC addressing - movdqa xmm0, [GLOBAL(rd42)] - sub rsp, 16 - movdqa [rsp], xmm0 -%define RD42 [rsp] -%else -%define RD42 [GLOBAL(rd42)] -%endif - - - movd xmm2, dword ptr arg(6) ;flimit - punpcklwd xmm2, xmm2 - punpckldq xmm2, xmm2 - punpcklqdq xmm2, xmm2 - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(1) ;dst_ptr - - movsxd rcx, DWORD PTR arg(4) ;rows - movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch? - pxor xmm0, xmm0 ; mm0 = 00000000 - -.nextrow: - - xor rdx, rdx ; clear out rdx for use as loop counter -.nextcol: - movq xmm3, QWORD PTR [rsi] ; mm4 = r0 p0..p7 - punpcklbw xmm3, xmm0 ; mm3 = p0..p3 - movdqa xmm1, xmm3 ; mm1 = p0..p3 - psllw xmm3, 2 ; - - movq xmm5, QWORD PTR [rsi + rax] ; mm4 = r1 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r1 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm6 - - ; thresholding - movdqa xmm7, xmm1 ; mm7 = r0 p0..p3 - psubusw xmm7, xmm5 ; mm7 = r0 p0..p3 - r1 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r1 p0..p3 - r0 p0..p3 - paddusw xmm7, xmm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3) - pcmpgtw xmm7, xmm2 - - movq xmm5, QWORD PTR [rsi + 2*rax] ; mm4 = r2 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r2 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm5 ; mm6 = r0 p0..p3 - r2 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r2 p0..p3 - r2 p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - neg rax - movq xmm5, QWORD PTR [rsi+2*rax] ; mm4 = r-2 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r-2 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - r-2 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r-2 p0..p3 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - movq xmm4, QWORD PTR [rsi+rax] ; mm4 = r-1 p0..p7 - punpcklbw xmm4, xmm0 ; mm4 = r-1 p0..p3 - paddusw xmm3, xmm4 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm4 ; mm6 = p0..p3 - r-2 p0..p3 - psubusw xmm4, xmm1 ; mm5 = r-1 p0..p3 - p0..p3 - paddusw xmm6, xmm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - paddusw xmm3, RD42 ; mm3 += round value - psraw xmm3, 3 ; mm3 /= 8 - - pand xmm1, xmm7 ; mm1 select vals > thresh from source - pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result - paddusw xmm1, xmm7 ; combination - - packuswb xmm1, xmm0 ; pack to bytes - movq QWORD PTR [rdi], xmm1 ; - - neg rax ; pitch is positive - add rsi, 8 - add rdi, 8 - - add rdx, 8 - cmp edx, dword arg(5) ;cols - - jl .nextcol - - ; done with the all cols, start the across filtering in place - sub rsi, rdx - sub rdi, rdx - - xor rdx, rdx - movq mm0, QWORD PTR [rdi-8]; - -.acrossnextcol: - movq xmm7, QWORD PTR [rdi +rdx -2] - movd xmm4, DWORD PTR [rdi +rdx +6] - - pslldq xmm4, 8 - por xmm4, xmm7 - - movdqa xmm3, xmm4 - psrldq xmm3, 2 - punpcklbw xmm3, xmm0 ; mm3 = p0..p3 - movdqa xmm1, xmm3 ; mm1 = p0..p3 - psllw xmm3, 2 - - - movdqa xmm5, xmm4 - psrldq xmm5, 3 - punpcklbw xmm5, xmm0 ; mm5 = p1..p4 - paddusw xmm3, xmm5 ; mm3 += mm6 - - ; thresholding - movdqa xmm7, xmm1 ; mm7 = p0..p3 - psubusw xmm7, xmm5 ; mm7 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm7, xmm5 ; mm7 = abs(p0..p3 - p1..p4) - pcmpgtw xmm7, xmm2 - - movdqa xmm5, xmm4 - psrldq xmm5, 4 - punpcklbw xmm5, xmm0 ; mm5 = p2..p5 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - movdqa xmm5, xmm4 ; mm5 = p-2..p5 - punpcklbw xmm5, xmm0 ; mm5 = p-2..p1 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - psrldq xmm4, 1 ; mm4 = p-1..p5 - punpcklbw xmm4, xmm0 ; mm4 = p-1..p2 - paddusw xmm3, xmm4 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm4 ; mm6 = p0..p3 - p1..p4 - psubusw xmm4, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm4 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - paddusw xmm3, RD42 ; mm3 += round value - psraw xmm3, 3 ; mm3 /= 8 - - pand xmm1, xmm7 ; mm1 select vals > thresh from source - pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result - paddusw xmm1, xmm7 ; combination - - packuswb xmm1, xmm0 ; pack to bytes - movq QWORD PTR [rdi+rdx-8], mm0 ; store previous four bytes - movdq2q mm0, xmm1 - - add rdx, 8 - cmp edx, dword arg(5) ;cols - jl .acrossnextcol; - - ; last 8 pixels - movq QWORD PTR [rdi+rdx-8], mm0 - - ; done with this rwo - add rsi,rax ; next line - mov eax, dword arg(3) ;dst_pixels_per_line ; destination pitch? - add rdi,rax ; next destination - mov eax, dword arg(2) ;src_pixels_per_line ; destination pitch? - - dec rcx ; decrement count - jnz .nextrow ; next row - -%if ABI_IS_32BIT=1 && CONFIG_PIC=1 - add rsp,16 - pop rsp -%endif - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret -%undef RD42 - - -;void vp10_mbpost_proc_down_xmm(unsigned char *dst, -; int pitch, int rows, int cols,int flimit) -extern sym(vp10_rv) -global sym(vp10_mbpost_proc_down_xmm) PRIVATE -sym(vp10_mbpost_proc_down_xmm): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 5 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - ALIGN_STACK 16, rax - sub rsp, 128+16 - - ; unsigned char d[16][8] at [rsp] - ; create flimit2 at [rsp+128] - mov eax, dword ptr arg(4) ;flimit - mov [rsp+128], eax - mov [rsp+128+4], eax - mov [rsp+128+8], eax - mov [rsp+128+12], eax -%define flimit4 [rsp+128] - -%if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp10_rv))] -%endif - - ;rows +=8; - add dword arg(2), 8 - - ;for(c=0; c // SSE2 - -#include "./vp10_rtcd.h" -#include "vp10/common/vp10_fwd_txfm.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_dsp/x86/txfm_common_sse2.h" - -// TODO(jingning) The high bit-depth version needs re-work for performance. -// The current SSE2 implementation also causes cross reference to the static -// functions in the C implementation file. -#if DCT_HIGH_BIT_DEPTH -#define ADD_EPI16 _mm_adds_epi16 -#define SUB_EPI16 _mm_subs_epi16 -#if FDCT32x32_HIGH_PRECISION -void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) { - int i, j; - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = intermediate[j * 32 + i]; - vp10_fdct32(temp_in, temp_out, 0); - for (j = 0; j < 32; ++j) - out[j + i * 32] = - (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); - } -} - #define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c - #define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c -#else -void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) { - int i, j; - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = intermediate[j * 32 + i]; - vp10_fdct32(temp_in, temp_out, 1); - for (j = 0; j < 32; ++j) - out[j + i * 32] = (tran_low_t)temp_out[j]; - } -} - #define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c - #define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c -#endif // FDCT32x32_HIGH_PRECISION -#else -#define ADD_EPI16 _mm_add_epi16 -#define SUB_EPI16 _mm_sub_epi16 -#endif // DCT_HIGH_BIT_DEPTH - - -void FDCT32x32_2D(const int16_t *input, - tran_low_t *output_org, int stride) { - // Calculate pre-multiplied strides - const int str1 = stride; - const int str2 = 2 * stride; - const int str3 = 2 * stride + str1; - // We need an intermediate buffer between passes. - DECLARE_ALIGNED(16, int16_t, intermediate[32 * 32]); - // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); - const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64); - const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64); - const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64); - const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64); - const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64); - const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64); - const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); - const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); - const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); - const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); - const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64); - const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64); - const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64); - const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64); - const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64); - const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64); - const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64); - const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64); - const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64); - const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64); - const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64); - const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64); - const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64); - const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64); - const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64); - const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i kZero = _mm_set1_epi16(0); - const __m128i kOne = _mm_set1_epi16(1); - // Do the two transform/transpose passes - int pass; -#if DCT_HIGH_BIT_DEPTH - int overflow; -#endif - for (pass = 0; pass < 2; ++pass) { - // We process eight columns (transposed rows in second pass) at a time. - int column_start; - for (column_start = 0; column_start < 32; column_start += 8) { - __m128i step1[32]; - __m128i step2[32]; - __m128i step3[32]; - __m128i out[32]; - // Stage 1 - // Note: even though all the loads below are aligned, using the aligned - // intrinsic make the code slightly slower. - if (0 == pass) { - const int16_t *in = &input[column_start]; - // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; - // Note: the next four blocks could be in a loop. That would help the - // instruction cache but is actually slower. - { - const int16_t *ina = in + 0 * str1; - const int16_t *inb = in + 31 * str1; - __m128i *step1a = &step1[ 0]; - __m128i *step1b = &step1[31]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); - step1b[-3] = _mm_sub_epi16(ina3, inb3); - step1b[-2] = _mm_sub_epi16(ina2, inb2); - step1b[-1] = _mm_sub_epi16(ina1, inb1); - step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); - step1b[-3] = _mm_slli_epi16(step1b[-3], 2); - step1b[-2] = _mm_slli_epi16(step1b[-2], 2); - step1b[-1] = _mm_slli_epi16(step1b[-1], 2); - step1b[-0] = _mm_slli_epi16(step1b[-0], 2); - } - { - const int16_t *ina = in + 4 * str1; - const int16_t *inb = in + 27 * str1; - __m128i *step1a = &step1[ 4]; - __m128i *step1b = &step1[27]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); - step1b[-3] = _mm_sub_epi16(ina3, inb3); - step1b[-2] = _mm_sub_epi16(ina2, inb2); - step1b[-1] = _mm_sub_epi16(ina1, inb1); - step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); - step1b[-3] = _mm_slli_epi16(step1b[-3], 2); - step1b[-2] = _mm_slli_epi16(step1b[-2], 2); - step1b[-1] = _mm_slli_epi16(step1b[-1], 2); - step1b[-0] = _mm_slli_epi16(step1b[-0], 2); - } - { - const int16_t *ina = in + 8 * str1; - const int16_t *inb = in + 23 * str1; - __m128i *step1a = &step1[ 8]; - __m128i *step1b = &step1[23]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); - step1b[-3] = _mm_sub_epi16(ina3, inb3); - step1b[-2] = _mm_sub_epi16(ina2, inb2); - step1b[-1] = _mm_sub_epi16(ina1, inb1); - step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); - step1b[-3] = _mm_slli_epi16(step1b[-3], 2); - step1b[-2] = _mm_slli_epi16(step1b[-2], 2); - step1b[-1] = _mm_slli_epi16(step1b[-1], 2); - step1b[-0] = _mm_slli_epi16(step1b[-0], 2); - } - { - const int16_t *ina = in + 12 * str1; - const int16_t *inb = in + 19 * str1; - __m128i *step1a = &step1[12]; - __m128i *step1b = &step1[19]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); - step1b[-3] = _mm_sub_epi16(ina3, inb3); - step1b[-2] = _mm_sub_epi16(ina2, inb2); - step1b[-1] = _mm_sub_epi16(ina1, inb1); - step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); - step1b[-3] = _mm_slli_epi16(step1b[-3], 2); - step1b[-2] = _mm_slli_epi16(step1b[-2], 2); - step1b[-1] = _mm_slli_epi16(step1b[-1], 2); - step1b[-0] = _mm_slli_epi16(step1b[-0], 2); - } - } else { - int16_t *in = &intermediate[column_start]; - // step1[i] = in[ 0 * 32] + in[(32 - 1) * 32]; - // Note: using the same approach as above to have common offset is - // counter-productive as all offsets can be calculated at compile - // time. - // Note: the next four blocks could be in a loop. That would help the - // instruction cache but is actually slower. - { - __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32)); - __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32)); - __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32)); - __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32)); - __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32)); - __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32)); - __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32)); - __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32)); - step1[0] = ADD_EPI16(in00, in31); - step1[1] = ADD_EPI16(in01, in30); - step1[2] = ADD_EPI16(in02, in29); - step1[3] = ADD_EPI16(in03, in28); - step1[28] = SUB_EPI16(in03, in28); - step1[29] = SUB_EPI16(in02, in29); - step1[30] = SUB_EPI16(in01, in30); - step1[31] = SUB_EPI16(in00, in31); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[0], &step1[1], &step1[2], - &step1[3], &step1[28], &step1[29], - &step1[30], &step1[31]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32)); - __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32)); - __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32)); - __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32)); - __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32)); - __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32)); - __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32)); - __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32)); - step1[4] = ADD_EPI16(in04, in27); - step1[5] = ADD_EPI16(in05, in26); - step1[6] = ADD_EPI16(in06, in25); - step1[7] = ADD_EPI16(in07, in24); - step1[24] = SUB_EPI16(in07, in24); - step1[25] = SUB_EPI16(in06, in25); - step1[26] = SUB_EPI16(in05, in26); - step1[27] = SUB_EPI16(in04, in27); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[4], &step1[5], &step1[6], - &step1[7], &step1[24], &step1[25], - &step1[26], &step1[27]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32)); - __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32)); - __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); - __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); - __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32)); - __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32)); - __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32)); - __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32)); - step1[8] = ADD_EPI16(in08, in23); - step1[9] = ADD_EPI16(in09, in22); - step1[10] = ADD_EPI16(in10, in21); - step1[11] = ADD_EPI16(in11, in20); - step1[20] = SUB_EPI16(in11, in20); - step1[21] = SUB_EPI16(in10, in21); - step1[22] = SUB_EPI16(in09, in22); - step1[23] = SUB_EPI16(in08, in23); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[8], &step1[9], &step1[10], - &step1[11], &step1[20], &step1[21], - &step1[22], &step1[23]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32)); - __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32)); - __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32)); - __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32)); - __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32)); - __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32)); - __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32)); - __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32)); - step1[12] = ADD_EPI16(in12, in19); - step1[13] = ADD_EPI16(in13, in18); - step1[14] = ADD_EPI16(in14, in17); - step1[15] = ADD_EPI16(in15, in16); - step1[16] = SUB_EPI16(in15, in16); - step1[17] = SUB_EPI16(in14, in17); - step1[18] = SUB_EPI16(in13, in18); - step1[19] = SUB_EPI16(in12, in19); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[12], &step1[13], &step1[14], - &step1[15], &step1[16], &step1[17], - &step1[18], &step1[19]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - // Stage 2 - { - step2[0] = ADD_EPI16(step1[0], step1[15]); - step2[1] = ADD_EPI16(step1[1], step1[14]); - step2[2] = ADD_EPI16(step1[2], step1[13]); - step2[3] = ADD_EPI16(step1[3], step1[12]); - step2[4] = ADD_EPI16(step1[4], step1[11]); - step2[5] = ADD_EPI16(step1[5], step1[10]); - step2[6] = ADD_EPI16(step1[6], step1[ 9]); - step2[7] = ADD_EPI16(step1[7], step1[ 8]); - step2[8] = SUB_EPI16(step1[7], step1[ 8]); - step2[9] = SUB_EPI16(step1[6], step1[ 9]); - step2[10] = SUB_EPI16(step1[5], step1[10]); - step2[11] = SUB_EPI16(step1[4], step1[11]); - step2[12] = SUB_EPI16(step1[3], step1[12]); - step2[13] = SUB_EPI16(step1[2], step1[13]); - step2[14] = SUB_EPI16(step1[1], step1[14]); - step2[15] = SUB_EPI16(step1[0], step1[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step2[0], &step2[1], &step2[2], &step2[3], - &step2[4], &step2[5], &step2[6], &step2[7], - &step2[8], &step2[9], &step2[10], &step2[11], - &step2[12], &step2[13], &step2[14], &step2[15]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s2_20_0 = _mm_unpacklo_epi16(step1[27], step1[20]); - const __m128i s2_20_1 = _mm_unpackhi_epi16(step1[27], step1[20]); - const __m128i s2_21_0 = _mm_unpacklo_epi16(step1[26], step1[21]); - const __m128i s2_21_1 = _mm_unpackhi_epi16(step1[26], step1[21]); - const __m128i s2_22_0 = _mm_unpacklo_epi16(step1[25], step1[22]); - const __m128i s2_22_1 = _mm_unpackhi_epi16(step1[25], step1[22]); - const __m128i s2_23_0 = _mm_unpacklo_epi16(step1[24], step1[23]); - const __m128i s2_23_1 = _mm_unpackhi_epi16(step1[24], step1[23]); - const __m128i s2_20_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_m16); - const __m128i s2_20_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_m16); - const __m128i s2_21_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_m16); - const __m128i s2_21_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_m16); - const __m128i s2_22_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_m16); - const __m128i s2_22_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_m16); - const __m128i s2_23_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_m16); - const __m128i s2_23_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_m16); - const __m128i s2_24_2 = _mm_madd_epi16(s2_23_0, k__cospi_p16_p16); - const __m128i s2_24_3 = _mm_madd_epi16(s2_23_1, k__cospi_p16_p16); - const __m128i s2_25_2 = _mm_madd_epi16(s2_22_0, k__cospi_p16_p16); - const __m128i s2_25_3 = _mm_madd_epi16(s2_22_1, k__cospi_p16_p16); - const __m128i s2_26_2 = _mm_madd_epi16(s2_21_0, k__cospi_p16_p16); - const __m128i s2_26_3 = _mm_madd_epi16(s2_21_1, k__cospi_p16_p16); - const __m128i s2_27_2 = _mm_madd_epi16(s2_20_0, k__cospi_p16_p16); - const __m128i s2_27_3 = _mm_madd_epi16(s2_20_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s2_20_4 = _mm_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); - const __m128i s2_20_5 = _mm_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); - const __m128i s2_21_4 = _mm_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); - const __m128i s2_21_5 = _mm_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); - const __m128i s2_22_4 = _mm_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); - const __m128i s2_22_5 = _mm_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); - const __m128i s2_23_4 = _mm_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); - const __m128i s2_23_5 = _mm_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); - const __m128i s2_24_4 = _mm_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); - const __m128i s2_24_5 = _mm_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); - const __m128i s2_25_4 = _mm_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); - const __m128i s2_25_5 = _mm_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); - const __m128i s2_26_4 = _mm_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); - const __m128i s2_26_5 = _mm_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); - const __m128i s2_27_4 = _mm_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); - const __m128i s2_27_5 = _mm_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); - const __m128i s2_20_6 = _mm_srai_epi32(s2_20_4, DCT_CONST_BITS); - const __m128i s2_20_7 = _mm_srai_epi32(s2_20_5, DCT_CONST_BITS); - const __m128i s2_21_6 = _mm_srai_epi32(s2_21_4, DCT_CONST_BITS); - const __m128i s2_21_7 = _mm_srai_epi32(s2_21_5, DCT_CONST_BITS); - const __m128i s2_22_6 = _mm_srai_epi32(s2_22_4, DCT_CONST_BITS); - const __m128i s2_22_7 = _mm_srai_epi32(s2_22_5, DCT_CONST_BITS); - const __m128i s2_23_6 = _mm_srai_epi32(s2_23_4, DCT_CONST_BITS); - const __m128i s2_23_7 = _mm_srai_epi32(s2_23_5, DCT_CONST_BITS); - const __m128i s2_24_6 = _mm_srai_epi32(s2_24_4, DCT_CONST_BITS); - const __m128i s2_24_7 = _mm_srai_epi32(s2_24_5, DCT_CONST_BITS); - const __m128i s2_25_6 = _mm_srai_epi32(s2_25_4, DCT_CONST_BITS); - const __m128i s2_25_7 = _mm_srai_epi32(s2_25_5, DCT_CONST_BITS); - const __m128i s2_26_6 = _mm_srai_epi32(s2_26_4, DCT_CONST_BITS); - const __m128i s2_26_7 = _mm_srai_epi32(s2_26_5, DCT_CONST_BITS); - const __m128i s2_27_6 = _mm_srai_epi32(s2_27_4, DCT_CONST_BITS); - const __m128i s2_27_7 = _mm_srai_epi32(s2_27_5, DCT_CONST_BITS); - // Combine - step2[20] = _mm_packs_epi32(s2_20_6, s2_20_7); - step2[21] = _mm_packs_epi32(s2_21_6, s2_21_7); - step2[22] = _mm_packs_epi32(s2_22_6, s2_22_7); - step2[23] = _mm_packs_epi32(s2_23_6, s2_23_7); - step2[24] = _mm_packs_epi32(s2_24_6, s2_24_7); - step2[25] = _mm_packs_epi32(s2_25_6, s2_25_7); - step2[26] = _mm_packs_epi32(s2_26_6, s2_26_7); - step2[27] = _mm_packs_epi32(s2_27_6, s2_27_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step2[20], &step2[21], &step2[22], - &step2[23], &step2[24], &step2[25], - &step2[26], &step2[27]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - -#if !FDCT32x32_HIGH_PRECISION - // dump the magnitude by half, hence the intermediate values are within - // the range of 16 bits. - if (1 == pass) { - __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero); - __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero); - __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero); - __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero); - __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero); - __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero); - __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero); - __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero); - __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero); - __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero); - __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero); - __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero); - __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero); - __m128i s3_13_0 = _mm_cmplt_epi16(step2[13], kZero); - __m128i s2_14_0 = _mm_cmplt_epi16(step2[14], kZero); - __m128i s2_15_0 = _mm_cmplt_epi16(step2[15], kZero); - __m128i s3_16_0 = _mm_cmplt_epi16(step1[16], kZero); - __m128i s3_17_0 = _mm_cmplt_epi16(step1[17], kZero); - __m128i s3_18_0 = _mm_cmplt_epi16(step1[18], kZero); - __m128i s3_19_0 = _mm_cmplt_epi16(step1[19], kZero); - __m128i s3_20_0 = _mm_cmplt_epi16(step2[20], kZero); - __m128i s3_21_0 = _mm_cmplt_epi16(step2[21], kZero); - __m128i s3_22_0 = _mm_cmplt_epi16(step2[22], kZero); - __m128i s3_23_0 = _mm_cmplt_epi16(step2[23], kZero); - __m128i s3_24_0 = _mm_cmplt_epi16(step2[24], kZero); - __m128i s3_25_0 = _mm_cmplt_epi16(step2[25], kZero); - __m128i s3_26_0 = _mm_cmplt_epi16(step2[26], kZero); - __m128i s3_27_0 = _mm_cmplt_epi16(step2[27], kZero); - __m128i s3_28_0 = _mm_cmplt_epi16(step1[28], kZero); - __m128i s3_29_0 = _mm_cmplt_epi16(step1[29], kZero); - __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero); - __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero); - - step2[0] = SUB_EPI16(step2[ 0], s3_00_0); - step2[1] = SUB_EPI16(step2[ 1], s3_01_0); - step2[2] = SUB_EPI16(step2[ 2], s3_02_0); - step2[3] = SUB_EPI16(step2[ 3], s3_03_0); - step2[4] = SUB_EPI16(step2[ 4], s3_04_0); - step2[5] = SUB_EPI16(step2[ 5], s3_05_0); - step2[6] = SUB_EPI16(step2[ 6], s3_06_0); - step2[7] = SUB_EPI16(step2[ 7], s3_07_0); - step2[8] = SUB_EPI16(step2[ 8], s2_08_0); - step2[9] = SUB_EPI16(step2[ 9], s2_09_0); - step2[10] = SUB_EPI16(step2[10], s3_10_0); - step2[11] = SUB_EPI16(step2[11], s3_11_0); - step2[12] = SUB_EPI16(step2[12], s3_12_0); - step2[13] = SUB_EPI16(step2[13], s3_13_0); - step2[14] = SUB_EPI16(step2[14], s2_14_0); - step2[15] = SUB_EPI16(step2[15], s2_15_0); - step1[16] = SUB_EPI16(step1[16], s3_16_0); - step1[17] = SUB_EPI16(step1[17], s3_17_0); - step1[18] = SUB_EPI16(step1[18], s3_18_0); - step1[19] = SUB_EPI16(step1[19], s3_19_0); - step2[20] = SUB_EPI16(step2[20], s3_20_0); - step2[21] = SUB_EPI16(step2[21], s3_21_0); - step2[22] = SUB_EPI16(step2[22], s3_22_0); - step2[23] = SUB_EPI16(step2[23], s3_23_0); - step2[24] = SUB_EPI16(step2[24], s3_24_0); - step2[25] = SUB_EPI16(step2[25], s3_25_0); - step2[26] = SUB_EPI16(step2[26], s3_26_0); - step2[27] = SUB_EPI16(step2[27], s3_27_0); - step1[28] = SUB_EPI16(step1[28], s3_28_0); - step1[29] = SUB_EPI16(step1[29], s3_29_0); - step1[30] = SUB_EPI16(step1[30], s3_30_0); - step1[31] = SUB_EPI16(step1[31], s3_31_0); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x32( - &step2[0], &step2[1], &step2[2], &step2[3], - &step2[4], &step2[5], &step2[6], &step2[7], - &step2[8], &step2[9], &step2[10], &step2[11], - &step2[12], &step2[13], &step2[14], &step2[15], - &step1[16], &step1[17], &step1[18], &step1[19], - &step2[20], &step2[21], &step2[22], &step2[23], - &step2[24], &step2[25], &step2[26], &step2[27], - &step1[28], &step1[29], &step1[30], &step1[31]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - step2[0] = _mm_add_epi16(step2[ 0], kOne); - step2[1] = _mm_add_epi16(step2[ 1], kOne); - step2[2] = _mm_add_epi16(step2[ 2], kOne); - step2[3] = _mm_add_epi16(step2[ 3], kOne); - step2[4] = _mm_add_epi16(step2[ 4], kOne); - step2[5] = _mm_add_epi16(step2[ 5], kOne); - step2[6] = _mm_add_epi16(step2[ 6], kOne); - step2[7] = _mm_add_epi16(step2[ 7], kOne); - step2[8] = _mm_add_epi16(step2[ 8], kOne); - step2[9] = _mm_add_epi16(step2[ 9], kOne); - step2[10] = _mm_add_epi16(step2[10], kOne); - step2[11] = _mm_add_epi16(step2[11], kOne); - step2[12] = _mm_add_epi16(step2[12], kOne); - step2[13] = _mm_add_epi16(step2[13], kOne); - step2[14] = _mm_add_epi16(step2[14], kOne); - step2[15] = _mm_add_epi16(step2[15], kOne); - step1[16] = _mm_add_epi16(step1[16], kOne); - step1[17] = _mm_add_epi16(step1[17], kOne); - step1[18] = _mm_add_epi16(step1[18], kOne); - step1[19] = _mm_add_epi16(step1[19], kOne); - step2[20] = _mm_add_epi16(step2[20], kOne); - step2[21] = _mm_add_epi16(step2[21], kOne); - step2[22] = _mm_add_epi16(step2[22], kOne); - step2[23] = _mm_add_epi16(step2[23], kOne); - step2[24] = _mm_add_epi16(step2[24], kOne); - step2[25] = _mm_add_epi16(step2[25], kOne); - step2[26] = _mm_add_epi16(step2[26], kOne); - step2[27] = _mm_add_epi16(step2[27], kOne); - step1[28] = _mm_add_epi16(step1[28], kOne); - step1[29] = _mm_add_epi16(step1[29], kOne); - step1[30] = _mm_add_epi16(step1[30], kOne); - step1[31] = _mm_add_epi16(step1[31], kOne); - - step2[0] = _mm_srai_epi16(step2[ 0], 2); - step2[1] = _mm_srai_epi16(step2[ 1], 2); - step2[2] = _mm_srai_epi16(step2[ 2], 2); - step2[3] = _mm_srai_epi16(step2[ 3], 2); - step2[4] = _mm_srai_epi16(step2[ 4], 2); - step2[5] = _mm_srai_epi16(step2[ 5], 2); - step2[6] = _mm_srai_epi16(step2[ 6], 2); - step2[7] = _mm_srai_epi16(step2[ 7], 2); - step2[8] = _mm_srai_epi16(step2[ 8], 2); - step2[9] = _mm_srai_epi16(step2[ 9], 2); - step2[10] = _mm_srai_epi16(step2[10], 2); - step2[11] = _mm_srai_epi16(step2[11], 2); - step2[12] = _mm_srai_epi16(step2[12], 2); - step2[13] = _mm_srai_epi16(step2[13], 2); - step2[14] = _mm_srai_epi16(step2[14], 2); - step2[15] = _mm_srai_epi16(step2[15], 2); - step1[16] = _mm_srai_epi16(step1[16], 2); - step1[17] = _mm_srai_epi16(step1[17], 2); - step1[18] = _mm_srai_epi16(step1[18], 2); - step1[19] = _mm_srai_epi16(step1[19], 2); - step2[20] = _mm_srai_epi16(step2[20], 2); - step2[21] = _mm_srai_epi16(step2[21], 2); - step2[22] = _mm_srai_epi16(step2[22], 2); - step2[23] = _mm_srai_epi16(step2[23], 2); - step2[24] = _mm_srai_epi16(step2[24], 2); - step2[25] = _mm_srai_epi16(step2[25], 2); - step2[26] = _mm_srai_epi16(step2[26], 2); - step2[27] = _mm_srai_epi16(step2[27], 2); - step1[28] = _mm_srai_epi16(step1[28], 2); - step1[29] = _mm_srai_epi16(step1[29], 2); - step1[30] = _mm_srai_epi16(step1[30], 2); - step1[31] = _mm_srai_epi16(step1[31], 2); - } -#endif // !FDCT32x32_HIGH_PRECISION - -#if FDCT32x32_HIGH_PRECISION - if (pass == 0) { -#endif - // Stage 3 - { - step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]); - step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]); - step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]); - step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]); - step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]); - step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]); - step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]); - step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2], - &step3[3], &step3[4], &step3[5], - &step3[6], &step3[7]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); - const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); - const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); - const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); - const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); - const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); - const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); - const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); - const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); - const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); - const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); - const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); - const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); - const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); - const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); - const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); - const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); - const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); - const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); - const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); - const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); - const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); - const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); - const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); - const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); - const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); - const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); - // Combine - step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7); - step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7); - step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7); - step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step3[10], &step3[11], - &step3[12], &step3[13]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - step3[16] = ADD_EPI16(step2[23], step1[16]); - step3[17] = ADD_EPI16(step2[22], step1[17]); - step3[18] = ADD_EPI16(step2[21], step1[18]); - step3[19] = ADD_EPI16(step2[20], step1[19]); - step3[20] = SUB_EPI16(step1[19], step2[20]); - step3[21] = SUB_EPI16(step1[18], step2[21]); - step3[22] = SUB_EPI16(step1[17], step2[22]); - step3[23] = SUB_EPI16(step1[16], step2[23]); - step3[24] = SUB_EPI16(step1[31], step2[24]); - step3[25] = SUB_EPI16(step1[30], step2[25]); - step3[26] = SUB_EPI16(step1[29], step2[26]); - step3[27] = SUB_EPI16(step1[28], step2[27]); - step3[28] = ADD_EPI16(step2[27], step1[28]); - step3[29] = ADD_EPI16(step2[26], step1[29]); - step3[30] = ADD_EPI16(step2[25], step1[30]); - step3[31] = ADD_EPI16(step2[24], step1[31]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step3[16], &step3[17], &step3[18], &step3[19], - &step3[20], &step3[21], &step3[22], &step3[23], - &step3[24], &step3[25], &step3[26], &step3[27], - &step3[28], &step3[29], &step3[30], &step3[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - - // Stage 4 - { - step1[0] = ADD_EPI16(step3[ 3], step3[ 0]); - step1[1] = ADD_EPI16(step3[ 2], step3[ 1]); - step1[2] = SUB_EPI16(step3[ 1], step3[ 2]); - step1[3] = SUB_EPI16(step3[ 0], step3[ 3]); - step1[8] = ADD_EPI16(step3[11], step2[ 8]); - step1[9] = ADD_EPI16(step3[10], step2[ 9]); - step1[10] = SUB_EPI16(step2[ 9], step3[10]); - step1[11] = SUB_EPI16(step2[ 8], step3[11]); - step1[12] = SUB_EPI16(step2[15], step3[12]); - step1[13] = SUB_EPI16(step2[14], step3[13]); - step1[14] = ADD_EPI16(step3[13], step2[14]); - step1[15] = ADD_EPI16(step3[12], step2[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step1[0], &step1[1], &step1[2], &step1[3], - &step1[4], &step1[5], &step1[6], &step1[7], - &step1[8], &step1[9], &step1[10], &step1[11], - &step1[12], &step1[13], &step1[14], &step1[15]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); - const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); - const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); - const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); - const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); - const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); - const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); - const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); - const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); - const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); - const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); - const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); - const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); - // Combine - step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7); - step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&step1[5], &step1[6]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); - const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); - const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); - const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); - const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); - const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); - const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); - const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); - const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); - const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); - const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); - const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); - const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); - const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); - const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); - const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); - const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); - const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); - const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); - const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); - const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); - const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); - const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); - const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); - const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); - const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); - const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); - const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); - const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); - const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); - const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); - const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); - const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); - const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); - const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); - const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); - const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); - const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); - const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); - const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); - const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); - const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); - const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); - const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); - const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); - const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); - const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); - const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); - const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); - const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); - const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); - const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); - const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); - const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); - const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); - // Combine - step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7); - step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7); - step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7); - step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7); - step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7); - step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7); - step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7); - step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20], - &step1[21], &step1[26], &step1[27], - &step1[28], &step1[29]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Stage 5 - { - step2[4] = ADD_EPI16(step1[5], step3[4]); - step2[5] = SUB_EPI16(step3[4], step1[5]); - step2[6] = SUB_EPI16(step3[7], step1[6]); - step2[7] = ADD_EPI16(step1[6], step3[7]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2[4], &step2[5], - &step2[6], &step2[7]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]); - const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]); - const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]); - const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]); - const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16); - const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16); - const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16); - const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16); - const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08); - const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08); - const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24); - const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24); - // dct_const_round_shift - const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); - const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); - const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); - const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); - const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); - const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); - const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); - const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); - const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS); - const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS); - const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS); - const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS); - const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS); - const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS); - const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS); - const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS); - // Combine - out[ 0] = _mm_packs_epi32(out_00_6, out_00_7); - out[16] = _mm_packs_epi32(out_16_6, out_16_7); - out[ 8] = _mm_packs_epi32(out_08_6, out_08_7); - out[24] = _mm_packs_epi32(out_24_6, out_24_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[0], &out[16], - &out[8], &out[24]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]); - const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]); - const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]); - const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]); - const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24); - const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24); - const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08); - const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08); - const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24); - const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24); - const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08); - const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); - const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); - const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); - const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); - const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); - const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); - const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); - const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); - const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS); - const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS); - const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS); - const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS); - const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS); - const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS); - const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS); - const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS); - // Combine - step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7); - step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7); - step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7); - step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2[9], &step2[10], - &step2[13], &step2[14]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - step2[16] = ADD_EPI16(step1[19], step3[16]); - step2[17] = ADD_EPI16(step1[18], step3[17]); - step2[18] = SUB_EPI16(step3[17], step1[18]); - step2[19] = SUB_EPI16(step3[16], step1[19]); - step2[20] = SUB_EPI16(step3[23], step1[20]); - step2[21] = SUB_EPI16(step3[22], step1[21]); - step2[22] = ADD_EPI16(step1[21], step3[22]); - step2[23] = ADD_EPI16(step1[20], step3[23]); - step2[24] = ADD_EPI16(step1[27], step3[24]); - step2[25] = ADD_EPI16(step1[26], step3[25]); - step2[26] = SUB_EPI16(step3[25], step1[26]); - step2[27] = SUB_EPI16(step3[24], step1[27]); - step2[28] = SUB_EPI16(step3[31], step1[28]); - step2[29] = SUB_EPI16(step3[30], step1[29]); - step2[30] = ADD_EPI16(step1[29], step3[30]); - step2[31] = ADD_EPI16(step1[28], step3[31]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step2[16], &step2[17], &step2[18], &step2[19], - &step2[20], &step2[21], &step2[22], &step2[23], - &step2[24], &step2[25], &step2[26], &step2[27], - &step2[28], &step2[29], &step2[30], &step2[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Stage 6 - { - const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]); - const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]); - const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]); - const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]); - const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]); - const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]); - const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]); - const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]); - const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04); - const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04); - const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20); - const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20); - const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12); - const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12); - const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28); - const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28); - // dct_const_round_shift - const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); - const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); - const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); - const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); - const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); - const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); - const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); - const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); - const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS); - const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS); - const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS); - const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS); - const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS); - const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS); - const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS); - const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS); - // Combine - out[4] = _mm_packs_epi32(out_04_6, out_04_7); - out[20] = _mm_packs_epi32(out_20_6, out_20_7); - out[12] = _mm_packs_epi32(out_12_6, out_12_7); - out[28] = _mm_packs_epi32(out_28_6, out_28_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[4], &out[20], - &out[12], &out[28]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - step3[8] = ADD_EPI16(step2[ 9], step1[ 8]); - step3[9] = SUB_EPI16(step1[ 8], step2[ 9]); - step3[10] = SUB_EPI16(step1[11], step2[10]); - step3[11] = ADD_EPI16(step2[10], step1[11]); - step3[12] = ADD_EPI16(step2[13], step1[12]); - step3[13] = SUB_EPI16(step1[12], step2[13]); - step3[14] = SUB_EPI16(step1[15], step2[14]); - step3[15] = ADD_EPI16(step2[14], step1[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10], - &step3[11], &step3[12], &step3[13], - &step3[14], &step3[15]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]); - const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]); - const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]); - const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]); - const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]); - const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]); - const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]); - const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]); - const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28); - const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28); - const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04); - const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04); - const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12); - const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12); - const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20); - const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20); - const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12); - const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12); - const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20); - const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20); - const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28); - const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28); - const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04); - const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04); - // dct_const_round_shift - const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); - const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); - const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); - const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); - const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); - const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); - const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); - const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); - const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS); - const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS); - const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS); - const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS); - const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS); - const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS); - const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS); - const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS); - const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); - const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); - const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); - const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); - const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); - const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); - const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); - const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); - const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS); - const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS); - const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS); - const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS); - const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS); - const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS); - const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS); - const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS); - // Combine - step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7); - step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7); - step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7); - step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7); - // Combine - step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7); - step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7); - step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7); - step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21], - &step3[22], &step3[25], &step3[26], - &step3[29], &step3[30]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Stage 7 - { - const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]); - const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]); - const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]); - const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]); - const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]); - const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]); - const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]); - const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]); - const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02); - const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02); - const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18); - const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18); - const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10); - const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10); - const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26); - const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26); - const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06); - const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06); - const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22); - const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22); - const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14); - const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14); - const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30); - const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30); - // dct_const_round_shift - const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); - const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); - const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); - const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); - const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); - const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); - const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); - const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); - const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); - const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); - const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); - const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); - const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); - const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); - const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); - const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); - const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS); - const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS); - const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS); - const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS); - const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS); - const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS); - const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS); - const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS); - const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS); - const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS); - const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS); - const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS); - const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS); - const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS); - const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS); - const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS); - // Combine - out[ 2] = _mm_packs_epi32(out_02_6, out_02_7); - out[18] = _mm_packs_epi32(out_18_6, out_18_7); - out[10] = _mm_packs_epi32(out_10_6, out_10_7); - out[26] = _mm_packs_epi32(out_26_6, out_26_7); - out[ 6] = _mm_packs_epi32(out_06_6, out_06_7); - out[22] = _mm_packs_epi32(out_22_6, out_22_7); - out[14] = _mm_packs_epi32(out_14_6, out_14_7); - out[30] = _mm_packs_epi32(out_30_6, out_30_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], - &out[26], &out[6], &out[22], - &out[14], &out[30]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - step1[16] = ADD_EPI16(step3[17], step2[16]); - step1[17] = SUB_EPI16(step2[16], step3[17]); - step1[18] = SUB_EPI16(step2[19], step3[18]); - step1[19] = ADD_EPI16(step3[18], step2[19]); - step1[20] = ADD_EPI16(step3[21], step2[20]); - step1[21] = SUB_EPI16(step2[20], step3[21]); - step1[22] = SUB_EPI16(step2[23], step3[22]); - step1[23] = ADD_EPI16(step3[22], step2[23]); - step1[24] = ADD_EPI16(step3[25], step2[24]); - step1[25] = SUB_EPI16(step2[24], step3[25]); - step1[26] = SUB_EPI16(step2[27], step3[26]); - step1[27] = ADD_EPI16(step3[26], step2[27]); - step1[28] = ADD_EPI16(step3[29], step2[28]); - step1[29] = SUB_EPI16(step2[28], step3[29]); - step1[30] = SUB_EPI16(step2[31], step3[30]); - step1[31] = ADD_EPI16(step3[30], step2[31]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step1[16], &step1[17], &step1[18], &step1[19], - &step1[20], &step1[21], &step1[22], &step1[23], - &step1[24], &step1[25], &step1[26], &step1[27], - &step1[28], &step1[29], &step1[30], &step1[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Final stage --- outputs indices are bit-reversed. - { - const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]); - const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]); - const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]); - const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]); - const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]); - const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]); - const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]); - const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]); - const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01); - const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01); - const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17); - const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17); - const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09); - const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09); - const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25); - const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25); - const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07); - const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07); - const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23); - const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23); - const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15); - const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15); - const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31); - const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31); - // dct_const_round_shift - const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); - const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); - const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); - const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); - const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); - const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); - const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); - const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); - const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); - const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); - const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); - const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); - const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); - const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); - const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); - const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); - const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS); - const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS); - const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS); - const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS); - const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS); - const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS); - const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS); - const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS); - const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS); - const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS); - const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS); - const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS); - const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS); - const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS); - const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS); - const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS); - // Combine - out[ 1] = _mm_packs_epi32(out_01_6, out_01_7); - out[17] = _mm_packs_epi32(out_17_6, out_17_7); - out[ 9] = _mm_packs_epi32(out_09_6, out_09_7); - out[25] = _mm_packs_epi32(out_25_6, out_25_7); - out[ 7] = _mm_packs_epi32(out_07_6, out_07_7); - out[23] = _mm_packs_epi32(out_23_6, out_23_7); - out[15] = _mm_packs_epi32(out_15_6, out_15_7); - out[31] = _mm_packs_epi32(out_31_6, out_31_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], - &out[25], &out[7], &out[23], - &out[15], &out[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]); - const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]); - const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]); - const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]); - const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]); - const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]); - const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]); - const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]); - const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05); - const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05); - const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21); - const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21); - const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13); - const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13); - const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29); - const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29); - const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03); - const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03); - const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19); - const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19); - const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11); - const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11); - const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27); - const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27); - // dct_const_round_shift - const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); - const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); - const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); - const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); - const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); - const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); - const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); - const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); - const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); - const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); - const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); - const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); - const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); - const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); - const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); - const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); - const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS); - const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS); - const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS); - const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS); - const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS); - const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS); - const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS); - const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS); - const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS); - const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS); - const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS); - const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS); - const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS); - const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS); - const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS); - const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS); - // Combine - out[ 5] = _mm_packs_epi32(out_05_6, out_05_7); - out[21] = _mm_packs_epi32(out_21_6, out_21_7); - out[13] = _mm_packs_epi32(out_13_6, out_13_7); - out[29] = _mm_packs_epi32(out_29_6, out_29_7); - out[ 3] = _mm_packs_epi32(out_03_6, out_03_7); - out[19] = _mm_packs_epi32(out_19_6, out_19_7); - out[11] = _mm_packs_epi32(out_11_6, out_11_7); - out[27] = _mm_packs_epi32(out_27_6, out_27_7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], - &out[29], &out[3], &out[19], - &out[11], &out[27]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } -#if FDCT32x32_HIGH_PRECISION - } else { - __m128i lstep1[64], lstep2[64], lstep3[64]; - __m128i u[32], v[32], sign[16]; - const __m128i K32One = _mm_set_epi32(1, 1, 1, 1); - // start using 32-bit operations - // stage 3 - { - // expanding to 32-bit length priori to addition operations - lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero); - lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero); - lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero); - lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero); - lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero); - lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero); - lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero); - lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero); - lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero); - lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero); - lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero); - lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero); - lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero); - lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero); - lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero); - lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero); - lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne); - lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne); - lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne); - lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne); - lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne); - lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne); - lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne); - lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne); - lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne); - lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne); - lstep2[10] = _mm_madd_epi16(lstep2[10], kOne); - lstep2[11] = _mm_madd_epi16(lstep2[11], kOne); - lstep2[12] = _mm_madd_epi16(lstep2[12], kOne); - lstep2[13] = _mm_madd_epi16(lstep2[13], kOne); - lstep2[14] = _mm_madd_epi16(lstep2[14], kOne); - lstep2[15] = _mm_madd_epi16(lstep2[15], kOne); - - lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]); - lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]); - lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]); - lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]); - lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]); - lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]); - lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]); - lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]); - lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]); - lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]); - lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]); - lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]); - lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]); - lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]); - lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]); - lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]); - } - { - const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); - const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); - const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); - const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); - const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); - const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); - const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); - const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); - const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); - const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); - const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); - const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); - const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); - const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); - const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); - const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); - const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); - const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); - const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); - lstep3[20] = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); - lstep3[21] = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); - lstep3[22] = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); - lstep3[23] = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); - lstep3[24] = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); - lstep3[25] = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); - lstep3[26] = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); - lstep3[27] = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); - } - { - lstep2[40] = _mm_unpacklo_epi16(step2[20], kZero); - lstep2[41] = _mm_unpackhi_epi16(step2[20], kZero); - lstep2[42] = _mm_unpacklo_epi16(step2[21], kZero); - lstep2[43] = _mm_unpackhi_epi16(step2[21], kZero); - lstep2[44] = _mm_unpacklo_epi16(step2[22], kZero); - lstep2[45] = _mm_unpackhi_epi16(step2[22], kZero); - lstep2[46] = _mm_unpacklo_epi16(step2[23], kZero); - lstep2[47] = _mm_unpackhi_epi16(step2[23], kZero); - lstep2[48] = _mm_unpacklo_epi16(step2[24], kZero); - lstep2[49] = _mm_unpackhi_epi16(step2[24], kZero); - lstep2[50] = _mm_unpacklo_epi16(step2[25], kZero); - lstep2[51] = _mm_unpackhi_epi16(step2[25], kZero); - lstep2[52] = _mm_unpacklo_epi16(step2[26], kZero); - lstep2[53] = _mm_unpackhi_epi16(step2[26], kZero); - lstep2[54] = _mm_unpacklo_epi16(step2[27], kZero); - lstep2[55] = _mm_unpackhi_epi16(step2[27], kZero); - lstep2[40] = _mm_madd_epi16(lstep2[40], kOne); - lstep2[41] = _mm_madd_epi16(lstep2[41], kOne); - lstep2[42] = _mm_madd_epi16(lstep2[42], kOne); - lstep2[43] = _mm_madd_epi16(lstep2[43], kOne); - lstep2[44] = _mm_madd_epi16(lstep2[44], kOne); - lstep2[45] = _mm_madd_epi16(lstep2[45], kOne); - lstep2[46] = _mm_madd_epi16(lstep2[46], kOne); - lstep2[47] = _mm_madd_epi16(lstep2[47], kOne); - lstep2[48] = _mm_madd_epi16(lstep2[48], kOne); - lstep2[49] = _mm_madd_epi16(lstep2[49], kOne); - lstep2[50] = _mm_madd_epi16(lstep2[50], kOne); - lstep2[51] = _mm_madd_epi16(lstep2[51], kOne); - lstep2[52] = _mm_madd_epi16(lstep2[52], kOne); - lstep2[53] = _mm_madd_epi16(lstep2[53], kOne); - lstep2[54] = _mm_madd_epi16(lstep2[54], kOne); - lstep2[55] = _mm_madd_epi16(lstep2[55], kOne); - - lstep1[32] = _mm_unpacklo_epi16(step1[16], kZero); - lstep1[33] = _mm_unpackhi_epi16(step1[16], kZero); - lstep1[34] = _mm_unpacklo_epi16(step1[17], kZero); - lstep1[35] = _mm_unpackhi_epi16(step1[17], kZero); - lstep1[36] = _mm_unpacklo_epi16(step1[18], kZero); - lstep1[37] = _mm_unpackhi_epi16(step1[18], kZero); - lstep1[38] = _mm_unpacklo_epi16(step1[19], kZero); - lstep1[39] = _mm_unpackhi_epi16(step1[19], kZero); - lstep1[56] = _mm_unpacklo_epi16(step1[28], kZero); - lstep1[57] = _mm_unpackhi_epi16(step1[28], kZero); - lstep1[58] = _mm_unpacklo_epi16(step1[29], kZero); - lstep1[59] = _mm_unpackhi_epi16(step1[29], kZero); - lstep1[60] = _mm_unpacklo_epi16(step1[30], kZero); - lstep1[61] = _mm_unpackhi_epi16(step1[30], kZero); - lstep1[62] = _mm_unpacklo_epi16(step1[31], kZero); - lstep1[63] = _mm_unpackhi_epi16(step1[31], kZero); - lstep1[32] = _mm_madd_epi16(lstep1[32], kOne); - lstep1[33] = _mm_madd_epi16(lstep1[33], kOne); - lstep1[34] = _mm_madd_epi16(lstep1[34], kOne); - lstep1[35] = _mm_madd_epi16(lstep1[35], kOne); - lstep1[36] = _mm_madd_epi16(lstep1[36], kOne); - lstep1[37] = _mm_madd_epi16(lstep1[37], kOne); - lstep1[38] = _mm_madd_epi16(lstep1[38], kOne); - lstep1[39] = _mm_madd_epi16(lstep1[39], kOne); - lstep1[56] = _mm_madd_epi16(lstep1[56], kOne); - lstep1[57] = _mm_madd_epi16(lstep1[57], kOne); - lstep1[58] = _mm_madd_epi16(lstep1[58], kOne); - lstep1[59] = _mm_madd_epi16(lstep1[59], kOne); - lstep1[60] = _mm_madd_epi16(lstep1[60], kOne); - lstep1[61] = _mm_madd_epi16(lstep1[61], kOne); - lstep1[62] = _mm_madd_epi16(lstep1[62], kOne); - lstep1[63] = _mm_madd_epi16(lstep1[63], kOne); - - lstep3[32] = _mm_add_epi32(lstep2[46], lstep1[32]); - lstep3[33] = _mm_add_epi32(lstep2[47], lstep1[33]); - - lstep3[34] = _mm_add_epi32(lstep2[44], lstep1[34]); - lstep3[35] = _mm_add_epi32(lstep2[45], lstep1[35]); - lstep3[36] = _mm_add_epi32(lstep2[42], lstep1[36]); - lstep3[37] = _mm_add_epi32(lstep2[43], lstep1[37]); - lstep3[38] = _mm_add_epi32(lstep2[40], lstep1[38]); - lstep3[39] = _mm_add_epi32(lstep2[41], lstep1[39]); - lstep3[40] = _mm_sub_epi32(lstep1[38], lstep2[40]); - lstep3[41] = _mm_sub_epi32(lstep1[39], lstep2[41]); - lstep3[42] = _mm_sub_epi32(lstep1[36], lstep2[42]); - lstep3[43] = _mm_sub_epi32(lstep1[37], lstep2[43]); - lstep3[44] = _mm_sub_epi32(lstep1[34], lstep2[44]); - lstep3[45] = _mm_sub_epi32(lstep1[35], lstep2[45]); - lstep3[46] = _mm_sub_epi32(lstep1[32], lstep2[46]); - lstep3[47] = _mm_sub_epi32(lstep1[33], lstep2[47]); - lstep3[48] = _mm_sub_epi32(lstep1[62], lstep2[48]); - lstep3[49] = _mm_sub_epi32(lstep1[63], lstep2[49]); - lstep3[50] = _mm_sub_epi32(lstep1[60], lstep2[50]); - lstep3[51] = _mm_sub_epi32(lstep1[61], lstep2[51]); - lstep3[52] = _mm_sub_epi32(lstep1[58], lstep2[52]); - lstep3[53] = _mm_sub_epi32(lstep1[59], lstep2[53]); - lstep3[54] = _mm_sub_epi32(lstep1[56], lstep2[54]); - lstep3[55] = _mm_sub_epi32(lstep1[57], lstep2[55]); - lstep3[56] = _mm_add_epi32(lstep2[54], lstep1[56]); - lstep3[57] = _mm_add_epi32(lstep2[55], lstep1[57]); - lstep3[58] = _mm_add_epi32(lstep2[52], lstep1[58]); - lstep3[59] = _mm_add_epi32(lstep2[53], lstep1[59]); - lstep3[60] = _mm_add_epi32(lstep2[50], lstep1[60]); - lstep3[61] = _mm_add_epi32(lstep2[51], lstep1[61]); - lstep3[62] = _mm_add_epi32(lstep2[48], lstep1[62]); - lstep3[63] = _mm_add_epi32(lstep2[49], lstep1[63]); - } - - // stage 4 - { - // expanding to 32-bit length priori to addition operations - lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero); - lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero); - lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero); - lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero); - lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero); - lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero); - lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero); - lstep2[31] = _mm_unpackhi_epi16(step2[15], kZero); - lstep2[16] = _mm_madd_epi16(lstep2[16], kOne); - lstep2[17] = _mm_madd_epi16(lstep2[17], kOne); - lstep2[18] = _mm_madd_epi16(lstep2[18], kOne); - lstep2[19] = _mm_madd_epi16(lstep2[19], kOne); - lstep2[28] = _mm_madd_epi16(lstep2[28], kOne); - lstep2[29] = _mm_madd_epi16(lstep2[29], kOne); - lstep2[30] = _mm_madd_epi16(lstep2[30], kOne); - lstep2[31] = _mm_madd_epi16(lstep2[31], kOne); - - lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]); - lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]); - lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]); - lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]); - lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]); - lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]); - lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]); - lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]); - lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]); - lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]); - lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]); - lstep1[19] = _mm_add_epi32(lstep3[21], lstep2[19]); - lstep1[20] = _mm_sub_epi32(lstep2[18], lstep3[20]); - lstep1[21] = _mm_sub_epi32(lstep2[19], lstep3[21]); - lstep1[22] = _mm_sub_epi32(lstep2[16], lstep3[22]); - lstep1[23] = _mm_sub_epi32(lstep2[17], lstep3[23]); - lstep1[24] = _mm_sub_epi32(lstep2[30], lstep3[24]); - lstep1[25] = _mm_sub_epi32(lstep2[31], lstep3[25]); - lstep1[26] = _mm_sub_epi32(lstep2[28], lstep3[26]); - lstep1[27] = _mm_sub_epi32(lstep2[29], lstep3[27]); - lstep1[28] = _mm_add_epi32(lstep3[26], lstep2[28]); - lstep1[29] = _mm_add_epi32(lstep3[27], lstep2[29]); - lstep1[30] = _mm_add_epi32(lstep3[24], lstep2[30]); - lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]); - } - { - // to be continued... - // - const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); - const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); - - u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]); - u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]); - u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]); - u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]); - - // TODO(jingning): manually inline k_madd_epi32_ to further hide - // instruction latency. - v[0] = k_madd_epi32(u[0], k32_p16_m16); - v[1] = k_madd_epi32(u[1], k32_p16_m16); - v[2] = k_madd_epi32(u[2], k32_p16_m16); - v[3] = k_madd_epi32(u[3], k32_p16_m16); - v[4] = k_madd_epi32(u[0], k32_p16_p16); - v[5] = k_madd_epi32(u[1], k32_p16_p16); - v[6] = k_madd_epi32(u[2], k32_p16_p16); - v[7] = k_madd_epi32(u[3], k32_p16_p16); -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], - &v[4], &v[5], &v[6], &v[7], &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[0] = k_packs_epi64(v[0], v[1]); - u[1] = k_packs_epi64(v[2], v[3]); - u[2] = k_packs_epi64(v[4], v[5]); - u[3] = k_packs_epi64(v[6], v[7]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - - lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - } - { - const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); - const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); - const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); - - u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]); - u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]); - u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]); - u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]); - u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]); - u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]); - u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]); - u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]); - u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]); - u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]); - u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]); - u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]); - u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]); - u[13] = _mm_unpackhi_epi32(lstep3[42], lstep3[52]); - u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]); - u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]); - - v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24); - v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24); - v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24); - v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24); - v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24); - v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24); - v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24); - v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24); - v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08); - v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08); - v[10] = k_madd_epi32(u[10], k32_m24_m08); - v[11] = k_madd_epi32(u[11], k32_m24_m08); - v[12] = k_madd_epi32(u[12], k32_m24_m08); - v[13] = k_madd_epi32(u[13], k32_m24_m08); - v[14] = k_madd_epi32(u[14], k32_m24_m08); - v[15] = k_madd_epi32(u[15], k32_m24_m08); - v[16] = k_madd_epi32(u[12], k32_m08_p24); - v[17] = k_madd_epi32(u[13], k32_m08_p24); - v[18] = k_madd_epi32(u[14], k32_m08_p24); - v[19] = k_madd_epi32(u[15], k32_m08_p24); - v[20] = k_madd_epi32(u[ 8], k32_m08_p24); - v[21] = k_madd_epi32(u[ 9], k32_m08_p24); - v[22] = k_madd_epi32(u[10], k32_m08_p24); - v[23] = k_madd_epi32(u[11], k32_m08_p24); - v[24] = k_madd_epi32(u[ 4], k32_p24_p08); - v[25] = k_madd_epi32(u[ 5], k32_p24_p08); - v[26] = k_madd_epi32(u[ 6], k32_p24_p08); - v[27] = k_madd_epi32(u[ 7], k32_p24_p08); - v[28] = k_madd_epi32(u[ 0], k32_p24_p08); - v[29] = k_madd_epi32(u[ 1], k32_p24_p08); - v[30] = k_madd_epi32(u[ 2], k32_p24_p08); - v[31] = k_madd_epi32(u[ 3], k32_p24_p08); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); - u[10] = k_packs_epi64(v[20], v[21]); - u[11] = k_packs_epi64(v[22], v[23]); - u[12] = k_packs_epi64(v[24], v[25]); - u[13] = k_packs_epi64(v[26], v[27]); - u[14] = k_packs_epi64(v[28], v[29]); - u[15] = k_packs_epi64(v[30], v[31]); - - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); - lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - lstep1[57] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - lstep1[58] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - lstep1[59] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - } - // stage 5 - { - lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]); - lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]); - lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]); - lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]); - lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]); - lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]); - lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]); - lstep2[15] = _mm_add_epi32(lstep1[13], lstep3[15]); - } - { - const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); - const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); - const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); - const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); - - u[0] = _mm_unpacklo_epi32(lstep1[0], lstep1[2]); - u[1] = _mm_unpackhi_epi32(lstep1[0], lstep1[2]); - u[2] = _mm_unpacklo_epi32(lstep1[1], lstep1[3]); - u[3] = _mm_unpackhi_epi32(lstep1[1], lstep1[3]); - u[4] = _mm_unpacklo_epi32(lstep1[4], lstep1[6]); - u[5] = _mm_unpackhi_epi32(lstep1[4], lstep1[6]); - u[6] = _mm_unpacklo_epi32(lstep1[5], lstep1[7]); - u[7] = _mm_unpackhi_epi32(lstep1[5], lstep1[7]); - - // TODO(jingning): manually inline k_madd_epi32_ to further hide - // instruction latency. - v[ 0] = k_madd_epi32(u[0], k32_p16_p16); - v[ 1] = k_madd_epi32(u[1], k32_p16_p16); - v[ 2] = k_madd_epi32(u[2], k32_p16_p16); - v[ 3] = k_madd_epi32(u[3], k32_p16_p16); - v[ 4] = k_madd_epi32(u[0], k32_p16_m16); - v[ 5] = k_madd_epi32(u[1], k32_p16_m16); - v[ 6] = k_madd_epi32(u[2], k32_p16_m16); - v[ 7] = k_madd_epi32(u[3], k32_p16_m16); - v[ 8] = k_madd_epi32(u[4], k32_p24_p08); - v[ 9] = k_madd_epi32(u[5], k32_p24_p08); - v[10] = k_madd_epi32(u[6], k32_p24_p08); - v[11] = k_madd_epi32(u[7], k32_p24_p08); - v[12] = k_madd_epi32(u[4], k32_m08_p24); - v[13] = k_madd_epi32(u[5], k32_m08_p24); - v[14] = k_madd_epi32(u[6], k32_m08_p24); - v[15] = k_madd_epi32(u[7], k32_m08_p24); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[0] = k_packs_epi64(v[0], v[1]); - u[1] = k_packs_epi64(v[2], v[3]); - u[2] = k_packs_epi64(v[4], v[5]); - u[3] = k_packs_epi64(v[6], v[7]); - u[4] = k_packs_epi64(v[8], v[9]); - u[5] = k_packs_epi64(v[10], v[11]); - u[6] = k_packs_epi64(v[12], v[13]); - u[7] = k_packs_epi64(v[14], v[15]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - - sign[0] = _mm_cmplt_epi32(u[0], kZero); - sign[1] = _mm_cmplt_epi32(u[1], kZero); - sign[2] = _mm_cmplt_epi32(u[2], kZero); - sign[3] = _mm_cmplt_epi32(u[3], kZero); - sign[4] = _mm_cmplt_epi32(u[4], kZero); - sign[5] = _mm_cmplt_epi32(u[5], kZero); - sign[6] = _mm_cmplt_epi32(u[6], kZero); - sign[7] = _mm_cmplt_epi32(u[7], kZero); - - u[0] = _mm_sub_epi32(u[0], sign[0]); - u[1] = _mm_sub_epi32(u[1], sign[1]); - u[2] = _mm_sub_epi32(u[2], sign[2]); - u[3] = _mm_sub_epi32(u[3], sign[3]); - u[4] = _mm_sub_epi32(u[4], sign[4]); - u[5] = _mm_sub_epi32(u[5], sign[5]); - u[6] = _mm_sub_epi32(u[6], sign[6]); - u[7] = _mm_sub_epi32(u[7], sign[7]); - - u[0] = _mm_add_epi32(u[0], K32One); - u[1] = _mm_add_epi32(u[1], K32One); - u[2] = _mm_add_epi32(u[2], K32One); - u[3] = _mm_add_epi32(u[3], K32One); - u[4] = _mm_add_epi32(u[4], K32One); - u[5] = _mm_add_epi32(u[5], K32One); - u[6] = _mm_add_epi32(u[6], K32One); - u[7] = _mm_add_epi32(u[7], K32One); - - u[0] = _mm_srai_epi32(u[0], 2); - u[1] = _mm_srai_epi32(u[1], 2); - u[2] = _mm_srai_epi32(u[2], 2); - u[3] = _mm_srai_epi32(u[3], 2); - u[4] = _mm_srai_epi32(u[4], 2); - u[5] = _mm_srai_epi32(u[5], 2); - u[6] = _mm_srai_epi32(u[6], 2); - u[7] = _mm_srai_epi32(u[7], 2); - - // Combine - out[ 0] = _mm_packs_epi32(u[0], u[1]); - out[16] = _mm_packs_epi32(u[2], u[3]); - out[ 8] = _mm_packs_epi32(u[4], u[5]); - out[24] = _mm_packs_epi32(u[6], u[7]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[0], &out[16], - &out[8], &out[24]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); - const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); - const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); - - u[0] = _mm_unpacklo_epi32(lstep1[18], lstep1[28]); - u[1] = _mm_unpackhi_epi32(lstep1[18], lstep1[28]); - u[2] = _mm_unpacklo_epi32(lstep1[19], lstep1[29]); - u[3] = _mm_unpackhi_epi32(lstep1[19], lstep1[29]); - u[4] = _mm_unpacklo_epi32(lstep1[20], lstep1[26]); - u[5] = _mm_unpackhi_epi32(lstep1[20], lstep1[26]); - u[6] = _mm_unpacklo_epi32(lstep1[21], lstep1[27]); - u[7] = _mm_unpackhi_epi32(lstep1[21], lstep1[27]); - - v[0] = k_madd_epi32(u[0], k32_m08_p24); - v[1] = k_madd_epi32(u[1], k32_m08_p24); - v[2] = k_madd_epi32(u[2], k32_m08_p24); - v[3] = k_madd_epi32(u[3], k32_m08_p24); - v[4] = k_madd_epi32(u[4], k32_m24_m08); - v[5] = k_madd_epi32(u[5], k32_m24_m08); - v[6] = k_madd_epi32(u[6], k32_m24_m08); - v[7] = k_madd_epi32(u[7], k32_m24_m08); - v[ 8] = k_madd_epi32(u[4], k32_m08_p24); - v[ 9] = k_madd_epi32(u[5], k32_m08_p24); - v[10] = k_madd_epi32(u[6], k32_m08_p24); - v[11] = k_madd_epi32(u[7], k32_m08_p24); - v[12] = k_madd_epi32(u[0], k32_p24_p08); - v[13] = k_madd_epi32(u[1], k32_p24_p08); - v[14] = k_madd_epi32(u[2], k32_p24_p08); - v[15] = k_madd_epi32(u[3], k32_p24_p08); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[0] = k_packs_epi64(v[0], v[1]); - u[1] = k_packs_epi64(v[2], v[3]); - u[2] = k_packs_epi64(v[4], v[5]); - u[3] = k_packs_epi64(v[6], v[7]); - u[4] = k_packs_epi64(v[8], v[9]); - u[5] = k_packs_epi64(v[10], v[11]); - u[6] = k_packs_epi64(v[12], v[13]); - u[7] = k_packs_epi64(v[14], v[15]); - - u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - - lstep2[18] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - lstep2[19] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - lstep2[20] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - lstep2[21] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - lstep2[26] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - lstep2[27] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - lstep2[28] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - lstep2[29] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - } - { - lstep2[32] = _mm_add_epi32(lstep1[38], lstep3[32]); - lstep2[33] = _mm_add_epi32(lstep1[39], lstep3[33]); - lstep2[34] = _mm_add_epi32(lstep1[36], lstep3[34]); - lstep2[35] = _mm_add_epi32(lstep1[37], lstep3[35]); - lstep2[36] = _mm_sub_epi32(lstep3[34], lstep1[36]); - lstep2[37] = _mm_sub_epi32(lstep3[35], lstep1[37]); - lstep2[38] = _mm_sub_epi32(lstep3[32], lstep1[38]); - lstep2[39] = _mm_sub_epi32(lstep3[33], lstep1[39]); - lstep2[40] = _mm_sub_epi32(lstep3[46], lstep1[40]); - lstep2[41] = _mm_sub_epi32(lstep3[47], lstep1[41]); - lstep2[42] = _mm_sub_epi32(lstep3[44], lstep1[42]); - lstep2[43] = _mm_sub_epi32(lstep3[45], lstep1[43]); - lstep2[44] = _mm_add_epi32(lstep1[42], lstep3[44]); - lstep2[45] = _mm_add_epi32(lstep1[43], lstep3[45]); - lstep2[46] = _mm_add_epi32(lstep1[40], lstep3[46]); - lstep2[47] = _mm_add_epi32(lstep1[41], lstep3[47]); - lstep2[48] = _mm_add_epi32(lstep1[54], lstep3[48]); - lstep2[49] = _mm_add_epi32(lstep1[55], lstep3[49]); - lstep2[50] = _mm_add_epi32(lstep1[52], lstep3[50]); - lstep2[51] = _mm_add_epi32(lstep1[53], lstep3[51]); - lstep2[52] = _mm_sub_epi32(lstep3[50], lstep1[52]); - lstep2[53] = _mm_sub_epi32(lstep3[51], lstep1[53]); - lstep2[54] = _mm_sub_epi32(lstep3[48], lstep1[54]); - lstep2[55] = _mm_sub_epi32(lstep3[49], lstep1[55]); - lstep2[56] = _mm_sub_epi32(lstep3[62], lstep1[56]); - lstep2[57] = _mm_sub_epi32(lstep3[63], lstep1[57]); - lstep2[58] = _mm_sub_epi32(lstep3[60], lstep1[58]); - lstep2[59] = _mm_sub_epi32(lstep3[61], lstep1[59]); - lstep2[60] = _mm_add_epi32(lstep1[58], lstep3[60]); - lstep2[61] = _mm_add_epi32(lstep1[59], lstep3[61]); - lstep2[62] = _mm_add_epi32(lstep1[56], lstep3[62]); - lstep2[63] = _mm_add_epi32(lstep1[57], lstep3[63]); - } - // stage 6 - { - const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); - const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); - const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); - const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); - - u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); - u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); - u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); - u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); - u[7] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); - u[8] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); - u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); - u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); - u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); - u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); - - v[0] = k_madd_epi32(u[0], k32_p28_p04); - v[1] = k_madd_epi32(u[1], k32_p28_p04); - v[2] = k_madd_epi32(u[2], k32_p28_p04); - v[3] = k_madd_epi32(u[3], k32_p28_p04); - v[4] = k_madd_epi32(u[4], k32_p12_p20); - v[5] = k_madd_epi32(u[5], k32_p12_p20); - v[6] = k_madd_epi32(u[6], k32_p12_p20); - v[7] = k_madd_epi32(u[7], k32_p12_p20); - v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); - v[10] = k_madd_epi32(u[10], k32_m20_p12); - v[11] = k_madd_epi32(u[11], k32_m20_p12); - v[12] = k_madd_epi32(u[12], k32_m04_p28); - v[13] = k_madd_epi32(u[13], k32_m04_p28); - v[14] = k_madd_epi32(u[14], k32_m04_p28); - v[15] = k_madd_epi32(u[15], k32_m04_p28); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[0] = k_packs_epi64(v[0], v[1]); - u[1] = k_packs_epi64(v[2], v[3]); - u[2] = k_packs_epi64(v[4], v[5]); - u[3] = k_packs_epi64(v[6], v[7]); - u[4] = k_packs_epi64(v[8], v[9]); - u[5] = k_packs_epi64(v[10], v[11]); - u[6] = k_packs_epi64(v[12], v[13]); - u[7] = k_packs_epi64(v[14], v[15]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - - sign[0] = _mm_cmplt_epi32(u[0], kZero); - sign[1] = _mm_cmplt_epi32(u[1], kZero); - sign[2] = _mm_cmplt_epi32(u[2], kZero); - sign[3] = _mm_cmplt_epi32(u[3], kZero); - sign[4] = _mm_cmplt_epi32(u[4], kZero); - sign[5] = _mm_cmplt_epi32(u[5], kZero); - sign[6] = _mm_cmplt_epi32(u[6], kZero); - sign[7] = _mm_cmplt_epi32(u[7], kZero); - - u[0] = _mm_sub_epi32(u[0], sign[0]); - u[1] = _mm_sub_epi32(u[1], sign[1]); - u[2] = _mm_sub_epi32(u[2], sign[2]); - u[3] = _mm_sub_epi32(u[3], sign[3]); - u[4] = _mm_sub_epi32(u[4], sign[4]); - u[5] = _mm_sub_epi32(u[5], sign[5]); - u[6] = _mm_sub_epi32(u[6], sign[6]); - u[7] = _mm_sub_epi32(u[7], sign[7]); - - u[0] = _mm_add_epi32(u[0], K32One); - u[1] = _mm_add_epi32(u[1], K32One); - u[2] = _mm_add_epi32(u[2], K32One); - u[3] = _mm_add_epi32(u[3], K32One); - u[4] = _mm_add_epi32(u[4], K32One); - u[5] = _mm_add_epi32(u[5], K32One); - u[6] = _mm_add_epi32(u[6], K32One); - u[7] = _mm_add_epi32(u[7], K32One); - - u[0] = _mm_srai_epi32(u[0], 2); - u[1] = _mm_srai_epi32(u[1], 2); - u[2] = _mm_srai_epi32(u[2], 2); - u[3] = _mm_srai_epi32(u[3], 2); - u[4] = _mm_srai_epi32(u[4], 2); - u[5] = _mm_srai_epi32(u[5], 2); - u[6] = _mm_srai_epi32(u[6], 2); - u[7] = _mm_srai_epi32(u[7], 2); - - out[ 4] = _mm_packs_epi32(u[0], u[1]); - out[20] = _mm_packs_epi32(u[2], u[3]); - out[12] = _mm_packs_epi32(u[4], u[5]); - out[28] = _mm_packs_epi32(u[6], u[7]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[4], &out[20], - &out[12], &out[28]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - lstep3[16] = _mm_add_epi32(lstep2[18], lstep1[16]); - lstep3[17] = _mm_add_epi32(lstep2[19], lstep1[17]); - lstep3[18] = _mm_sub_epi32(lstep1[16], lstep2[18]); - lstep3[19] = _mm_sub_epi32(lstep1[17], lstep2[19]); - lstep3[20] = _mm_sub_epi32(lstep1[22], lstep2[20]); - lstep3[21] = _mm_sub_epi32(lstep1[23], lstep2[21]); - lstep3[22] = _mm_add_epi32(lstep2[20], lstep1[22]); - lstep3[23] = _mm_add_epi32(lstep2[21], lstep1[23]); - lstep3[24] = _mm_add_epi32(lstep2[26], lstep1[24]); - lstep3[25] = _mm_add_epi32(lstep2[27], lstep1[25]); - lstep3[26] = _mm_sub_epi32(lstep1[24], lstep2[26]); - lstep3[27] = _mm_sub_epi32(lstep1[25], lstep2[27]); - lstep3[28] = _mm_sub_epi32(lstep1[30], lstep2[28]); - lstep3[29] = _mm_sub_epi32(lstep1[31], lstep2[29]); - lstep3[30] = _mm_add_epi32(lstep2[28], lstep1[30]); - lstep3[31] = _mm_add_epi32(lstep2[29], lstep1[31]); - } - { - const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); - const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64); - const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); - const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64, - -cospi_20_64); - const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); - const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); - - u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]); - u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]); - u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]); - u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]); - u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]); - u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]); - u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]); - u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]); - u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]); - u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]); - u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]); - u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]); - u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]); - u[13] = _mm_unpackhi_epi32(lstep2[44], lstep2[50]); - u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]); - u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]); - - v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28); - v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28); - v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28); - v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28); - v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04); - v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04); - v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04); - v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04); - v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); - v[10] = k_madd_epi32(u[10], k32_m20_p12); - v[11] = k_madd_epi32(u[11], k32_m20_p12); - v[12] = k_madd_epi32(u[12], k32_m12_m20); - v[13] = k_madd_epi32(u[13], k32_m12_m20); - v[14] = k_madd_epi32(u[14], k32_m12_m20); - v[15] = k_madd_epi32(u[15], k32_m12_m20); - v[16] = k_madd_epi32(u[12], k32_m20_p12); - v[17] = k_madd_epi32(u[13], k32_m20_p12); - v[18] = k_madd_epi32(u[14], k32_m20_p12); - v[19] = k_madd_epi32(u[15], k32_m20_p12); - v[20] = k_madd_epi32(u[ 8], k32_p12_p20); - v[21] = k_madd_epi32(u[ 9], k32_p12_p20); - v[22] = k_madd_epi32(u[10], k32_p12_p20); - v[23] = k_madd_epi32(u[11], k32_p12_p20); - v[24] = k_madd_epi32(u[ 4], k32_m04_p28); - v[25] = k_madd_epi32(u[ 5], k32_m04_p28); - v[26] = k_madd_epi32(u[ 6], k32_m04_p28); - v[27] = k_madd_epi32(u[ 7], k32_m04_p28); - v[28] = k_madd_epi32(u[ 0], k32_p28_p04); - v[29] = k_madd_epi32(u[ 1], k32_p28_p04); - v[30] = k_madd_epi32(u[ 2], k32_p28_p04); - v[31] = k_madd_epi32(u[ 3], k32_p28_p04); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); - u[10] = k_packs_epi64(v[20], v[21]); - u[11] = k_packs_epi64(v[22], v[23]); - u[12] = k_packs_epi64(v[24], v[25]); - u[13] = k_packs_epi64(v[26], v[27]); - u[14] = k_packs_epi64(v[28], v[29]); - u[15] = k_packs_epi64(v[30], v[31]); - - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); - lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - lstep3[59] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - lstep3[60] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - lstep3[61] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - } - // stage 7 - { - const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64); - const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64); - const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64); - const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64); - const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64); - const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64); - const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64); - const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64); - - u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]); - u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]); - u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]); - u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]); - u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]); - u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]); - u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]); - u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]); - u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]); - u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]); - u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]); - u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]); - u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]); - u[13] = _mm_unpackhi_epi32(lstep3[22], lstep3[24]); - u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]); - u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]); - - v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02); - v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02); - v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02); - v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02); - v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18); - v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18); - v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18); - v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18); - v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10); - v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10); - v[10] = k_madd_epi32(u[10], k32_p22_p10); - v[11] = k_madd_epi32(u[11], k32_p22_p10); - v[12] = k_madd_epi32(u[12], k32_p06_p26); - v[13] = k_madd_epi32(u[13], k32_p06_p26); - v[14] = k_madd_epi32(u[14], k32_p06_p26); - v[15] = k_madd_epi32(u[15], k32_p06_p26); - v[16] = k_madd_epi32(u[12], k32_m26_p06); - v[17] = k_madd_epi32(u[13], k32_m26_p06); - v[18] = k_madd_epi32(u[14], k32_m26_p06); - v[19] = k_madd_epi32(u[15], k32_m26_p06); - v[20] = k_madd_epi32(u[ 8], k32_m10_p22); - v[21] = k_madd_epi32(u[ 9], k32_m10_p22); - v[22] = k_madd_epi32(u[10], k32_m10_p22); - v[23] = k_madd_epi32(u[11], k32_m10_p22); - v[24] = k_madd_epi32(u[ 4], k32_m18_p14); - v[25] = k_madd_epi32(u[ 5], k32_m18_p14); - v[26] = k_madd_epi32(u[ 6], k32_m18_p14); - v[27] = k_madd_epi32(u[ 7], k32_m18_p14); - v[28] = k_madd_epi32(u[ 0], k32_m02_p30); - v[29] = k_madd_epi32(u[ 1], k32_m02_p30); - v[30] = k_madd_epi32(u[ 2], k32_m02_p30); - v[31] = k_madd_epi32(u[ 3], k32_m02_p30); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); - u[10] = k_packs_epi64(v[20], v[21]); - u[11] = k_packs_epi64(v[22], v[23]); - u[12] = k_packs_epi64(v[24], v[25]); - u[13] = k_packs_epi64(v[26], v[27]); - u[14] = k_packs_epi64(v[28], v[29]); - u[15] = k_packs_epi64(v[30], v[31]); - - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); - v[10] = _mm_cmplt_epi32(u[10], kZero); - v[11] = _mm_cmplt_epi32(u[11], kZero); - v[12] = _mm_cmplt_epi32(u[12], kZero); - v[13] = _mm_cmplt_epi32(u[13], kZero); - v[14] = _mm_cmplt_epi32(u[14], kZero); - v[15] = _mm_cmplt_epi32(u[15], kZero); - - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); - u[10] = _mm_sub_epi32(u[10], v[10]); - u[11] = _mm_sub_epi32(u[11], v[11]); - u[12] = _mm_sub_epi32(u[12], v[12]); - u[13] = _mm_sub_epi32(u[13], v[13]); - u[14] = _mm_sub_epi32(u[14], v[14]); - u[15] = _mm_sub_epi32(u[15], v[15]); - - v[ 0] = _mm_add_epi32(u[ 0], K32One); - v[ 1] = _mm_add_epi32(u[ 1], K32One); - v[ 2] = _mm_add_epi32(u[ 2], K32One); - v[ 3] = _mm_add_epi32(u[ 3], K32One); - v[ 4] = _mm_add_epi32(u[ 4], K32One); - v[ 5] = _mm_add_epi32(u[ 5], K32One); - v[ 6] = _mm_add_epi32(u[ 6], K32One); - v[ 7] = _mm_add_epi32(u[ 7], K32One); - v[ 8] = _mm_add_epi32(u[ 8], K32One); - v[ 9] = _mm_add_epi32(u[ 9], K32One); - v[10] = _mm_add_epi32(u[10], K32One); - v[11] = _mm_add_epi32(u[11], K32One); - v[12] = _mm_add_epi32(u[12], K32One); - v[13] = _mm_add_epi32(u[13], K32One); - v[14] = _mm_add_epi32(u[14], K32One); - v[15] = _mm_add_epi32(u[15], K32One); - - u[ 0] = _mm_srai_epi32(v[ 0], 2); - u[ 1] = _mm_srai_epi32(v[ 1], 2); - u[ 2] = _mm_srai_epi32(v[ 2], 2); - u[ 3] = _mm_srai_epi32(v[ 3], 2); - u[ 4] = _mm_srai_epi32(v[ 4], 2); - u[ 5] = _mm_srai_epi32(v[ 5], 2); - u[ 6] = _mm_srai_epi32(v[ 6], 2); - u[ 7] = _mm_srai_epi32(v[ 7], 2); - u[ 8] = _mm_srai_epi32(v[ 8], 2); - u[ 9] = _mm_srai_epi32(v[ 9], 2); - u[10] = _mm_srai_epi32(v[10], 2); - u[11] = _mm_srai_epi32(v[11], 2); - u[12] = _mm_srai_epi32(v[12], 2); - u[13] = _mm_srai_epi32(v[13], 2); - u[14] = _mm_srai_epi32(v[14], 2); - u[15] = _mm_srai_epi32(v[15], 2); - - out[ 2] = _mm_packs_epi32(u[0], u[1]); - out[18] = _mm_packs_epi32(u[2], u[3]); - out[10] = _mm_packs_epi32(u[4], u[5]); - out[26] = _mm_packs_epi32(u[6], u[7]); - out[ 6] = _mm_packs_epi32(u[8], u[9]); - out[22] = _mm_packs_epi32(u[10], u[11]); - out[14] = _mm_packs_epi32(u[12], u[13]); - out[30] = _mm_packs_epi32(u[14], u[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], - &out[26], &out[6], &out[22], - &out[14], &out[30]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - lstep1[32] = _mm_add_epi32(lstep3[34], lstep2[32]); - lstep1[33] = _mm_add_epi32(lstep3[35], lstep2[33]); - lstep1[34] = _mm_sub_epi32(lstep2[32], lstep3[34]); - lstep1[35] = _mm_sub_epi32(lstep2[33], lstep3[35]); - lstep1[36] = _mm_sub_epi32(lstep2[38], lstep3[36]); - lstep1[37] = _mm_sub_epi32(lstep2[39], lstep3[37]); - lstep1[38] = _mm_add_epi32(lstep3[36], lstep2[38]); - lstep1[39] = _mm_add_epi32(lstep3[37], lstep2[39]); - lstep1[40] = _mm_add_epi32(lstep3[42], lstep2[40]); - lstep1[41] = _mm_add_epi32(lstep3[43], lstep2[41]); - lstep1[42] = _mm_sub_epi32(lstep2[40], lstep3[42]); - lstep1[43] = _mm_sub_epi32(lstep2[41], lstep3[43]); - lstep1[44] = _mm_sub_epi32(lstep2[46], lstep3[44]); - lstep1[45] = _mm_sub_epi32(lstep2[47], lstep3[45]); - lstep1[46] = _mm_add_epi32(lstep3[44], lstep2[46]); - lstep1[47] = _mm_add_epi32(lstep3[45], lstep2[47]); - lstep1[48] = _mm_add_epi32(lstep3[50], lstep2[48]); - lstep1[49] = _mm_add_epi32(lstep3[51], lstep2[49]); - lstep1[50] = _mm_sub_epi32(lstep2[48], lstep3[50]); - lstep1[51] = _mm_sub_epi32(lstep2[49], lstep3[51]); - lstep1[52] = _mm_sub_epi32(lstep2[54], lstep3[52]); - lstep1[53] = _mm_sub_epi32(lstep2[55], lstep3[53]); - lstep1[54] = _mm_add_epi32(lstep3[52], lstep2[54]); - lstep1[55] = _mm_add_epi32(lstep3[53], lstep2[55]); - lstep1[56] = _mm_add_epi32(lstep3[58], lstep2[56]); - lstep1[57] = _mm_add_epi32(lstep3[59], lstep2[57]); - lstep1[58] = _mm_sub_epi32(lstep2[56], lstep3[58]); - lstep1[59] = _mm_sub_epi32(lstep2[57], lstep3[59]); - lstep1[60] = _mm_sub_epi32(lstep2[62], lstep3[60]); - lstep1[61] = _mm_sub_epi32(lstep2[63], lstep3[61]); - lstep1[62] = _mm_add_epi32(lstep3[60], lstep2[62]); - lstep1[63] = _mm_add_epi32(lstep3[61], lstep2[63]); - } - // stage 8 - { - const __m128i k32_p31_p01 = pair_set_epi32(cospi_31_64, cospi_1_64); - const __m128i k32_p15_p17 = pair_set_epi32(cospi_15_64, cospi_17_64); - const __m128i k32_p23_p09 = pair_set_epi32(cospi_23_64, cospi_9_64); - const __m128i k32_p07_p25 = pair_set_epi32(cospi_7_64, cospi_25_64); - const __m128i k32_m25_p07 = pair_set_epi32(-cospi_25_64, cospi_7_64); - const __m128i k32_m09_p23 = pair_set_epi32(-cospi_9_64, cospi_23_64); - const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64); - const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64); - - u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]); - u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]); - u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]); - u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]); - u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]); - u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]); - u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]); - u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]); - u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]); - u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]); - u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]); - u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]); - u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]); - u[13] = _mm_unpackhi_epi32(lstep1[38], lstep1[56]); - u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]); - u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]); - - v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01); - v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01); - v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01); - v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01); - v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17); - v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17); - v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17); - v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17); - v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09); - v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09); - v[10] = k_madd_epi32(u[10], k32_p23_p09); - v[11] = k_madd_epi32(u[11], k32_p23_p09); - v[12] = k_madd_epi32(u[12], k32_p07_p25); - v[13] = k_madd_epi32(u[13], k32_p07_p25); - v[14] = k_madd_epi32(u[14], k32_p07_p25); - v[15] = k_madd_epi32(u[15], k32_p07_p25); - v[16] = k_madd_epi32(u[12], k32_m25_p07); - v[17] = k_madd_epi32(u[13], k32_m25_p07); - v[18] = k_madd_epi32(u[14], k32_m25_p07); - v[19] = k_madd_epi32(u[15], k32_m25_p07); - v[20] = k_madd_epi32(u[ 8], k32_m09_p23); - v[21] = k_madd_epi32(u[ 9], k32_m09_p23); - v[22] = k_madd_epi32(u[10], k32_m09_p23); - v[23] = k_madd_epi32(u[11], k32_m09_p23); - v[24] = k_madd_epi32(u[ 4], k32_m17_p15); - v[25] = k_madd_epi32(u[ 5], k32_m17_p15); - v[26] = k_madd_epi32(u[ 6], k32_m17_p15); - v[27] = k_madd_epi32(u[ 7], k32_m17_p15); - v[28] = k_madd_epi32(u[ 0], k32_m01_p31); - v[29] = k_madd_epi32(u[ 1], k32_m01_p31); - v[30] = k_madd_epi32(u[ 2], k32_m01_p31); - v[31] = k_madd_epi32(u[ 3], k32_m01_p31); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); - u[10] = k_packs_epi64(v[20], v[21]); - u[11] = k_packs_epi64(v[22], v[23]); - u[12] = k_packs_epi64(v[24], v[25]); - u[13] = k_packs_epi64(v[26], v[27]); - u[14] = k_packs_epi64(v[28], v[29]); - u[15] = k_packs_epi64(v[30], v[31]); - - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); - v[10] = _mm_cmplt_epi32(u[10], kZero); - v[11] = _mm_cmplt_epi32(u[11], kZero); - v[12] = _mm_cmplt_epi32(u[12], kZero); - v[13] = _mm_cmplt_epi32(u[13], kZero); - v[14] = _mm_cmplt_epi32(u[14], kZero); - v[15] = _mm_cmplt_epi32(u[15], kZero); - - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); - u[10] = _mm_sub_epi32(u[10], v[10]); - u[11] = _mm_sub_epi32(u[11], v[11]); - u[12] = _mm_sub_epi32(u[12], v[12]); - u[13] = _mm_sub_epi32(u[13], v[13]); - u[14] = _mm_sub_epi32(u[14], v[14]); - u[15] = _mm_sub_epi32(u[15], v[15]); - - v[0] = _mm_add_epi32(u[0], K32One); - v[1] = _mm_add_epi32(u[1], K32One); - v[2] = _mm_add_epi32(u[2], K32One); - v[3] = _mm_add_epi32(u[3], K32One); - v[4] = _mm_add_epi32(u[4], K32One); - v[5] = _mm_add_epi32(u[5], K32One); - v[6] = _mm_add_epi32(u[6], K32One); - v[7] = _mm_add_epi32(u[7], K32One); - v[8] = _mm_add_epi32(u[8], K32One); - v[9] = _mm_add_epi32(u[9], K32One); - v[10] = _mm_add_epi32(u[10], K32One); - v[11] = _mm_add_epi32(u[11], K32One); - v[12] = _mm_add_epi32(u[12], K32One); - v[13] = _mm_add_epi32(u[13], K32One); - v[14] = _mm_add_epi32(u[14], K32One); - v[15] = _mm_add_epi32(u[15], K32One); - - u[0] = _mm_srai_epi32(v[0], 2); - u[1] = _mm_srai_epi32(v[1], 2); - u[2] = _mm_srai_epi32(v[2], 2); - u[3] = _mm_srai_epi32(v[3], 2); - u[4] = _mm_srai_epi32(v[4], 2); - u[5] = _mm_srai_epi32(v[5], 2); - u[6] = _mm_srai_epi32(v[6], 2); - u[7] = _mm_srai_epi32(v[7], 2); - u[8] = _mm_srai_epi32(v[8], 2); - u[9] = _mm_srai_epi32(v[9], 2); - u[10] = _mm_srai_epi32(v[10], 2); - u[11] = _mm_srai_epi32(v[11], 2); - u[12] = _mm_srai_epi32(v[12], 2); - u[13] = _mm_srai_epi32(v[13], 2); - u[14] = _mm_srai_epi32(v[14], 2); - u[15] = _mm_srai_epi32(v[15], 2); - - out[ 1] = _mm_packs_epi32(u[0], u[1]); - out[17] = _mm_packs_epi32(u[2], u[3]); - out[ 9] = _mm_packs_epi32(u[4], u[5]); - out[25] = _mm_packs_epi32(u[6], u[7]); - out[ 7] = _mm_packs_epi32(u[8], u[9]); - out[23] = _mm_packs_epi32(u[10], u[11]); - out[15] = _mm_packs_epi32(u[12], u[13]); - out[31] = _mm_packs_epi32(u[14], u[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], - &out[25], &out[7], &out[23], - &out[15], &out[31]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i k32_p27_p05 = pair_set_epi32(cospi_27_64, cospi_5_64); - const __m128i k32_p11_p21 = pair_set_epi32(cospi_11_64, cospi_21_64); - const __m128i k32_p19_p13 = pair_set_epi32(cospi_19_64, cospi_13_64); - const __m128i k32_p03_p29 = pair_set_epi32(cospi_3_64, cospi_29_64); - const __m128i k32_m29_p03 = pair_set_epi32(-cospi_29_64, cospi_3_64); - const __m128i k32_m13_p19 = pair_set_epi32(-cospi_13_64, cospi_19_64); - const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64); - const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64); - - u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]); - u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]); - u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]); - u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]); - u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]); - u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]); - u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]); - u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]); - u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]); - u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]); - u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]); - u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]); - u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]); - u[13] = _mm_unpackhi_epi32(lstep1[46], lstep1[48]); - u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]); - u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]); - - v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05); - v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05); - v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05); - v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05); - v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21); - v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21); - v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21); - v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21); - v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13); - v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13); - v[10] = k_madd_epi32(u[10], k32_p19_p13); - v[11] = k_madd_epi32(u[11], k32_p19_p13); - v[12] = k_madd_epi32(u[12], k32_p03_p29); - v[13] = k_madd_epi32(u[13], k32_p03_p29); - v[14] = k_madd_epi32(u[14], k32_p03_p29); - v[15] = k_madd_epi32(u[15], k32_p03_p29); - v[16] = k_madd_epi32(u[12], k32_m29_p03); - v[17] = k_madd_epi32(u[13], k32_m29_p03); - v[18] = k_madd_epi32(u[14], k32_m29_p03); - v[19] = k_madd_epi32(u[15], k32_m29_p03); - v[20] = k_madd_epi32(u[ 8], k32_m13_p19); - v[21] = k_madd_epi32(u[ 9], k32_m13_p19); - v[22] = k_madd_epi32(u[10], k32_m13_p19); - v[23] = k_madd_epi32(u[11], k32_m13_p19); - v[24] = k_madd_epi32(u[ 4], k32_m21_p11); - v[25] = k_madd_epi32(u[ 5], k32_m21_p11); - v[26] = k_madd_epi32(u[ 6], k32_m21_p11); - v[27] = k_madd_epi32(u[ 7], k32_m21_p11); - v[28] = k_madd_epi32(u[ 0], k32_m05_p27); - v[29] = k_madd_epi32(u[ 1], k32_m05_p27); - v[30] = k_madd_epi32(u[ 2], k32_m05_p27); - v[31] = k_madd_epi32(u[ 3], k32_m05_p27); - -#if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); - u[10] = k_packs_epi64(v[20], v[21]); - u[11] = k_packs_epi64(v[22], v[23]); - u[12] = k_packs_epi64(v[24], v[25]); - u[13] = k_packs_epi64(v[26], v[27]); - u[14] = k_packs_epi64(v[28], v[29]); - u[15] = k_packs_epi64(v[30], v[31]); - - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); - v[10] = _mm_cmplt_epi32(u[10], kZero); - v[11] = _mm_cmplt_epi32(u[11], kZero); - v[12] = _mm_cmplt_epi32(u[12], kZero); - v[13] = _mm_cmplt_epi32(u[13], kZero); - v[14] = _mm_cmplt_epi32(u[14], kZero); - v[15] = _mm_cmplt_epi32(u[15], kZero); - - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); - u[10] = _mm_sub_epi32(u[10], v[10]); - u[11] = _mm_sub_epi32(u[11], v[11]); - u[12] = _mm_sub_epi32(u[12], v[12]); - u[13] = _mm_sub_epi32(u[13], v[13]); - u[14] = _mm_sub_epi32(u[14], v[14]); - u[15] = _mm_sub_epi32(u[15], v[15]); - - v[0] = _mm_add_epi32(u[0], K32One); - v[1] = _mm_add_epi32(u[1], K32One); - v[2] = _mm_add_epi32(u[2], K32One); - v[3] = _mm_add_epi32(u[3], K32One); - v[4] = _mm_add_epi32(u[4], K32One); - v[5] = _mm_add_epi32(u[5], K32One); - v[6] = _mm_add_epi32(u[6], K32One); - v[7] = _mm_add_epi32(u[7], K32One); - v[8] = _mm_add_epi32(u[8], K32One); - v[9] = _mm_add_epi32(u[9], K32One); - v[10] = _mm_add_epi32(u[10], K32One); - v[11] = _mm_add_epi32(u[11], K32One); - v[12] = _mm_add_epi32(u[12], K32One); - v[13] = _mm_add_epi32(u[13], K32One); - v[14] = _mm_add_epi32(u[14], K32One); - v[15] = _mm_add_epi32(u[15], K32One); - - u[0] = _mm_srai_epi32(v[0], 2); - u[1] = _mm_srai_epi32(v[1], 2); - u[2] = _mm_srai_epi32(v[2], 2); - u[3] = _mm_srai_epi32(v[3], 2); - u[4] = _mm_srai_epi32(v[4], 2); - u[5] = _mm_srai_epi32(v[5], 2); - u[6] = _mm_srai_epi32(v[6], 2); - u[7] = _mm_srai_epi32(v[7], 2); - u[8] = _mm_srai_epi32(v[8], 2); - u[9] = _mm_srai_epi32(v[9], 2); - u[10] = _mm_srai_epi32(v[10], 2); - u[11] = _mm_srai_epi32(v[11], 2); - u[12] = _mm_srai_epi32(v[12], 2); - u[13] = _mm_srai_epi32(v[13], 2); - u[14] = _mm_srai_epi32(v[14], 2); - u[15] = _mm_srai_epi32(v[15], 2); - - out[ 5] = _mm_packs_epi32(u[0], u[1]); - out[21] = _mm_packs_epi32(u[2], u[3]); - out[13] = _mm_packs_epi32(u[4], u[5]); - out[29] = _mm_packs_epi32(u[6], u[7]); - out[ 3] = _mm_packs_epi32(u[8], u[9]); - out[19] = _mm_packs_epi32(u[10], u[11]); - out[11] = _mm_packs_epi32(u[12], u[13]); - out[27] = _mm_packs_epi32(u[14], u[15]); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], - &out[29], &out[3], &out[19], - &out[11], &out[27]); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } -#endif // FDCT32x32_HIGH_PRECISION - // Transpose the results, do it as four 8x8 transposes. - { - int transpose_block; - int16_t *output0 = &intermediate[column_start * 32]; - tran_low_t *output1 = &output_org[column_start * 32]; - for (transpose_block = 0; transpose_block < 4; ++transpose_block) { - __m128i *this_out = &out[8 * transpose_block]; - // 00 01 02 03 04 05 06 07 - // 10 11 12 13 14 15 16 17 - // 20 21 22 23 24 25 26 27 - // 30 31 32 33 34 35 36 37 - // 40 41 42 43 44 45 46 47 - // 50 51 52 53 54 55 56 57 - // 60 61 62 63 64 65 66 67 - // 70 71 72 73 74 75 76 77 - const __m128i tr0_0 = _mm_unpacklo_epi16(this_out[0], this_out[1]); - const __m128i tr0_1 = _mm_unpacklo_epi16(this_out[2], this_out[3]); - const __m128i tr0_2 = _mm_unpackhi_epi16(this_out[0], this_out[1]); - const __m128i tr0_3 = _mm_unpackhi_epi16(this_out[2], this_out[3]); - const __m128i tr0_4 = _mm_unpacklo_epi16(this_out[4], this_out[5]); - const __m128i tr0_5 = _mm_unpacklo_epi16(this_out[6], this_out[7]); - const __m128i tr0_6 = _mm_unpackhi_epi16(this_out[4], this_out[5]); - const __m128i tr0_7 = _mm_unpackhi_epi16(this_out[6], this_out[7]); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - // 04 14 05 15 06 16 07 17 - // 24 34 25 35 26 36 27 37 - // 40 50 41 51 42 52 43 53 - // 60 70 61 71 62 72 63 73 - // 54 54 55 55 56 56 57 57 - // 64 74 65 75 66 76 67 77 - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - // 00 10 20 30 01 11 21 31 - // 40 50 60 70 41 51 61 71 - // 02 12 22 32 03 13 23 33 - // 42 52 62 72 43 53 63 73 - // 04 14 24 34 05 15 21 36 - // 44 54 64 74 45 55 61 76 - // 06 16 26 36 07 17 27 37 - // 46 56 66 76 47 57 67 77 - __m128i tr2_0 = _mm_unpacklo_epi64(tr1_0, tr1_4); - __m128i tr2_1 = _mm_unpackhi_epi64(tr1_0, tr1_4); - __m128i tr2_2 = _mm_unpacklo_epi64(tr1_2, tr1_6); - __m128i tr2_3 = _mm_unpackhi_epi64(tr1_2, tr1_6); - __m128i tr2_4 = _mm_unpacklo_epi64(tr1_1, tr1_5); - __m128i tr2_5 = _mm_unpackhi_epi64(tr1_1, tr1_5); - __m128i tr2_6 = _mm_unpacklo_epi64(tr1_3, tr1_7); - __m128i tr2_7 = _mm_unpackhi_epi64(tr1_3, tr1_7); - // 00 10 20 30 40 50 60 70 - // 01 11 21 31 41 51 61 71 - // 02 12 22 32 42 52 62 72 - // 03 13 23 33 43 53 63 73 - // 04 14 24 34 44 54 64 74 - // 05 15 25 35 45 55 65 75 - // 06 16 26 36 46 56 66 76 - // 07 17 27 37 47 57 67 77 - if (0 == pass) { - // output[j] = (output[j] + 1 + (output[j] > 0)) >> 2; - // TODO(cd): see quality impact of only doing - // output[j] = (output[j] + 1) >> 2; - // which would remove the code between here ... - __m128i tr2_0_0 = _mm_cmpgt_epi16(tr2_0, kZero); - __m128i tr2_1_0 = _mm_cmpgt_epi16(tr2_1, kZero); - __m128i tr2_2_0 = _mm_cmpgt_epi16(tr2_2, kZero); - __m128i tr2_3_0 = _mm_cmpgt_epi16(tr2_3, kZero); - __m128i tr2_4_0 = _mm_cmpgt_epi16(tr2_4, kZero); - __m128i tr2_5_0 = _mm_cmpgt_epi16(tr2_5, kZero); - __m128i tr2_6_0 = _mm_cmpgt_epi16(tr2_6, kZero); - __m128i tr2_7_0 = _mm_cmpgt_epi16(tr2_7, kZero); - tr2_0 = _mm_sub_epi16(tr2_0, tr2_0_0); - tr2_1 = _mm_sub_epi16(tr2_1, tr2_1_0); - tr2_2 = _mm_sub_epi16(tr2_2, tr2_2_0); - tr2_3 = _mm_sub_epi16(tr2_3, tr2_3_0); - tr2_4 = _mm_sub_epi16(tr2_4, tr2_4_0); - tr2_5 = _mm_sub_epi16(tr2_5, tr2_5_0); - tr2_6 = _mm_sub_epi16(tr2_6, tr2_6_0); - tr2_7 = _mm_sub_epi16(tr2_7, tr2_7_0); - // ... and here. - // PS: also change code in vp9/encoder/vp9_dct.c - tr2_0 = _mm_add_epi16(tr2_0, kOne); - tr2_1 = _mm_add_epi16(tr2_1, kOne); - tr2_2 = _mm_add_epi16(tr2_2, kOne); - tr2_3 = _mm_add_epi16(tr2_3, kOne); - tr2_4 = _mm_add_epi16(tr2_4, kOne); - tr2_5 = _mm_add_epi16(tr2_5, kOne); - tr2_6 = _mm_add_epi16(tr2_6, kOne); - tr2_7 = _mm_add_epi16(tr2_7, kOne); - tr2_0 = _mm_srai_epi16(tr2_0, 2); - tr2_1 = _mm_srai_epi16(tr2_1, 2); - tr2_2 = _mm_srai_epi16(tr2_2, 2); - tr2_3 = _mm_srai_epi16(tr2_3, 2); - tr2_4 = _mm_srai_epi16(tr2_4, 2); - tr2_5 = _mm_srai_epi16(tr2_5, 2); - tr2_6 = _mm_srai_epi16(tr2_6, 2); - tr2_7 = _mm_srai_epi16(tr2_7, 2); - } - // Note: even though all these stores are aligned, using the aligned - // intrinsic make the code slightly slower. - if (pass == 0) { - _mm_storeu_si128((__m128i *)(output0 + 0 * 32), tr2_0); - _mm_storeu_si128((__m128i *)(output0 + 1 * 32), tr2_1); - _mm_storeu_si128((__m128i *)(output0 + 2 * 32), tr2_2); - _mm_storeu_si128((__m128i *)(output0 + 3 * 32), tr2_3); - _mm_storeu_si128((__m128i *)(output0 + 4 * 32), tr2_4); - _mm_storeu_si128((__m128i *)(output0 + 5 * 32), tr2_5); - _mm_storeu_si128((__m128i *)(output0 + 6 * 32), tr2_6); - _mm_storeu_si128((__m128i *)(output0 + 7 * 32), tr2_7); - // Process next 8x8 - output0 += 8; - } else { - storeu_output(&tr2_0, (output1 + 0 * 32)); - storeu_output(&tr2_1, (output1 + 1 * 32)); - storeu_output(&tr2_2, (output1 + 2 * 32)); - storeu_output(&tr2_3, (output1 + 3 * 32)); - storeu_output(&tr2_4, (output1 + 4 * 32)); - storeu_output(&tr2_5, (output1 + 5 * 32)); - storeu_output(&tr2_6, (output1 + 6 * 32)); - storeu_output(&tr2_7, (output1 + 7 * 32)); - // Process next 8x8 - output1 += 8; - } - } - } - } - } -} // NOLINT - -#undef ADD_EPI16 -#undef SUB_EPI16 -#undef HIGH_FDCT32x32_2D_C -#undef HIGH_FDCT32x32_2D_ROWS_C diff --git a/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h b/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h deleted file mode 100644 index 69889e2e98..0000000000 --- a/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_impl_sse2.h +++ /dev/null @@ -1,1027 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include // SSE2 - -#include "./vpx_dsp_rtcd.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_dsp/x86/fwd_txfm_sse2.h" -#include "vpx_dsp/x86/txfm_common_sse2.h" -#include "vpx_ports/mem.h" - -// TODO(jingning) The high bit-depth functions need rework for performance. -// After we properly fix the high bit-depth function implementations, this -// file's dependency should be substantially simplified. -#if DCT_HIGH_BIT_DEPTH -#define ADD_EPI16 _mm_adds_epi16 -#define SUB_EPI16 _mm_subs_epi16 - -#else -#define ADD_EPI16 _mm_add_epi16 -#define SUB_EPI16 _mm_sub_epi16 -#endif - -void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { - // This 2D transform implements 4 vertical 1D transforms followed - // by 4 horizontal 1D transforms. The multiplies and adds are as given - // by Chen, Smith and Fralick ('77). The commands for moving the data - // around have been minimized by hand. - // For the purposes of the comments, the 16 inputs are referred to at i0 - // through iF (in raster order), intermediate variables are a0, b0, c0 - // through f, and correspond to the in-place computations mapped to input - // locations. The outputs, o0 through oF are labeled according to the - // output locations. - - // Constants - // These are the coefficients used for the multiplies. - // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64), - // where cospi_N_64 = cos(N pi /64) - const __m128i k__cospi_A = octa_set_epi16(cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64); - const __m128i k__cospi_B = octa_set_epi16(cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64); - const __m128i k__cospi_C = octa_set_epi16(cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64, - cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64); - const __m128i k__cospi_D = octa_set_epi16(cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64, - cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64); - const __m128i k__cospi_E = octa_set_epi16(cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64); - const __m128i k__cospi_F = octa_set_epi16(cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64); - const __m128i k__cospi_G = octa_set_epi16(cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64, - -cospi_8_64, -cospi_24_64, - -cospi_8_64, -cospi_24_64); - const __m128i k__cospi_H = octa_set_epi16(cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64, - -cospi_24_64, cospi_8_64, - -cospi_24_64, cospi_8_64); - - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - // This second rounding constant saves doing some extra adds at the end - const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING - +(DCT_CONST_ROUNDING << 1)); - const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2; - const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); - const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); - __m128i in0, in1; -#if DCT_HIGH_BIT_DEPTH - __m128i cmp0, cmp1; - int test, overflow; -#endif - - // Load inputs. - in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); - in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) - (input + 2 * stride))); - in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) - (input + 3 * stride))); - // in0 = [i0 i1 i2 i3 iC iD iE iF] - // in1 = [i4 i5 i6 i7 i8 i9 iA iB] -#if DCT_HIGH_BIT_DEPTH - // Check inputs small enough to use optimised code - cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)), - _mm_cmplt_epi16(in0, _mm_set1_epi16(0xfc00))); - cmp1 = _mm_xor_si128(_mm_cmpgt_epi16(in1, _mm_set1_epi16(0x3ff)), - _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00))); - test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1)); - if (test) { - vpx_highbd_fdct4x4_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - - // multiply by 16 to give some extra precision - in0 = _mm_slli_epi16(in0, 4); - in1 = _mm_slli_epi16(in1, 4); - // if (i == 0 && input[0]) input[0] += 1; - // add 1 to the upper left pixel if it is non-zero, which helps reduce - // the round-trip error - { - // The mask will only contain whether the first value is zero, all - // other comparison will fail as something shifted by 4 (above << 4) - // can never be equal to one. To increment in the non-zero case, we - // add the mask and one for the first element: - // - if zero, mask = -1, v = v - 1 + 1 = v - // - if non-zero, mask = 0, v = v + 0 + 1 = v + 1 - __m128i mask = _mm_cmpeq_epi16(in0, k__nonzero_bias_a); - in0 = _mm_add_epi16(in0, mask); - in0 = _mm_add_epi16(in0, k__nonzero_bias_b); - } - // There are 4 total stages, alternating between an add/subtract stage - // followed by an multiply-and-add stage. - { - // Stage 1: Add/subtract - - // in0 = [i0 i1 i2 i3 iC iD iE iF] - // in1 = [i4 i5 i6 i7 i8 i9 iA iB] - const __m128i r0 = _mm_unpacklo_epi16(in0, in1); - const __m128i r1 = _mm_unpackhi_epi16(in0, in1); - // r0 = [i0 i4 i1 i5 i2 i6 i3 i7] - // r1 = [iC i8 iD i9 iE iA iF iB] - const __m128i r2 = _mm_shuffle_epi32(r0, 0xB4); - const __m128i r3 = _mm_shuffle_epi32(r1, 0xB4); - // r2 = [i0 i4 i1 i5 i3 i7 i2 i6] - // r3 = [iC i8 iD i9 iF iB iE iA] - - const __m128i t0 = _mm_add_epi16(r2, r3); - const __m128i t1 = _mm_sub_epi16(r2, r3); - // t0 = [a0 a4 a1 a5 a3 a7 a2 a6] - // t1 = [aC a8 aD a9 aF aB aE aA] - - // Stage 2: multiply by constants (which gets us into 32 bits). - // The constants needed here are: - // k__cospi_A = [p16 p16 p16 p16 p16 m16 p16 m16] - // k__cospi_B = [p16 m16 p16 m16 p16 p16 p16 p16] - // k__cospi_C = [p08 p24 p08 p24 p24 m08 p24 m08] - // k__cospi_D = [p24 m08 p24 m08 p08 p24 p08 p24] - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_A); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_B); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_C); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_D); - // Then add and right-shift to get back to 16-bit range - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - // w0 = [b0 b1 b7 b6] - // w1 = [b8 b9 bF bE] - // w2 = [b4 b5 b3 b2] - // w3 = [bC bD bB bA] - const __m128i x0 = _mm_packs_epi32(w0, w1); - const __m128i x1 = _mm_packs_epi32(w2, w3); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&x0, &x1); - if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // x0 = [b0 b1 b7 b6 b8 b9 bF bE] - // x1 = [b4 b5 b3 b2 bC bD bB bA] - in0 = _mm_shuffle_epi32(x0, 0xD8); - in1 = _mm_shuffle_epi32(x1, 0x8D); - // in0 = [b0 b1 b8 b9 b7 b6 bF bE] - // in1 = [b3 b2 bB bA b4 b5 bC bD] - } - { - // vertical DCTs finished. Now we do the horizontal DCTs. - // Stage 3: Add/subtract - - const __m128i t0 = ADD_EPI16(in0, in1); - const __m128i t1 = SUB_EPI16(in0, in1); - // t0 = [c0 c1 c8 c9 c4 c5 cC cD] - // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE] -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&t0, &t1); - if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - - // Stage 4: multiply by constants (which gets us into 32 bits). - { - // The constants needed here are: - // k__cospi_E = [p16 p16 p16 p16 p16 p16 p16 p16] - // k__cospi_F = [p16 m16 p16 m16 p16 m16 p16 m16] - // k__cospi_G = [p08 p24 p08 p24 m08 m24 m08 m24] - // k__cospi_H = [p24 m08 p24 m08 m24 p08 m24 p08] - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_E); - const __m128i u1 = _mm_madd_epi16(t0, k__cospi_F); - const __m128i u2 = _mm_madd_epi16(t1, k__cospi_G); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_H); - // Then add and right-shift to get back to 16-bit range - // but this combines the final right-shift as well to save operations - // This unusual rounding operations is to maintain bit-accurate - // compatibility with the c version of this function which has two - // rounding steps in a row. - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING2); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING2); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING2); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING2); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS2); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS2); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS2); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS2); - // w0 = [o0 o4 o8 oC] - // w1 = [o2 o6 oA oE] - // w2 = [o1 o5 o9 oD] - // w3 = [o3 o7 oB oF] - // remember the o's are numbered according to the correct output location - const __m128i x0 = _mm_packs_epi32(w0, w1); - const __m128i x1 = _mm_packs_epi32(w2, w3); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&x0, &x1); - if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - { - // x0 = [o0 o4 o8 oC o2 o6 oA oE] - // x1 = [o1 o5 o9 oD o3 o7 oB oF] - const __m128i y0 = _mm_unpacklo_epi16(x0, x1); - const __m128i y1 = _mm_unpackhi_epi16(x0, x1); - // y0 = [o0 o1 o4 o5 o8 o9 oC oD] - // y1 = [o2 o3 o6 o7 oA oB oE oF] - in0 = _mm_unpacklo_epi32(y0, y1); - // in0 = [o0 o1 o2 o3 o4 o5 o6 o7] - in1 = _mm_unpackhi_epi32(y0, y1); - // in1 = [o8 o9 oA oB oC oD oE oF] - } - } - } - // Post-condition (v + 1) >> 2 is now incorporated into previous - // add and right-shift commands. Only 2 store instructions needed - // because we are using the fact that 1/3 are stored just after 0/2. - storeu_output(&in0, output + 0 * 4); - storeu_output(&in1, output + 2 * 4); -} - - -void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { - int pass; - // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); -#if DCT_HIGH_BIT_DEPTH - int overflow; -#endif - // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); - // Pre-condition input (shift by two) - in0 = _mm_slli_epi16(in0, 2); - in1 = _mm_slli_epi16(in1, 2); - in2 = _mm_slli_epi16(in2, 2); - in3 = _mm_slli_epi16(in3, 2); - in4 = _mm_slli_epi16(in4, 2); - in5 = _mm_slli_epi16(in5, 2); - in6 = _mm_slli_epi16(in6, 2); - in7 = _mm_slli_epi16(in7, 2); - - // We do two passes, first the columns, then the rows. The results of the - // first pass are transposed so that the same column code can be reused. The - // results of the second pass are also transposed so that the rows (processed - // as columns) are put back in row positions. - for (pass = 0; pass < 2; pass++) { - // To store results of each pass before the transpose. - __m128i res0, res1, res2, res3, res4, res5, res6, res7; - // Add/subtract - const __m128i q0 = ADD_EPI16(in0, in7); - const __m128i q1 = ADD_EPI16(in1, in6); - const __m128i q2 = ADD_EPI16(in2, in5); - const __m128i q3 = ADD_EPI16(in3, in4); - const __m128i q4 = SUB_EPI16(in3, in4); - const __m128i q5 = SUB_EPI16(in2, in5); - const __m128i q6 = SUB_EPI16(in1, in6); - const __m128i q7 = SUB_EPI16(in0, in7); -#if DCT_HIGH_BIT_DEPTH - if (pass == 1) { - overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, - &q4, &q5, &q6, &q7); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } - } -#endif // DCT_HIGH_BIT_DEPTH - // Work on first four results - { - // Add/subtract - const __m128i r0 = ADD_EPI16(q0, q3); - const __m128i r1 = ADD_EPI16(q1, q2); - const __m128i r2 = SUB_EPI16(q1, q2); - const __m128i r3 = SUB_EPI16(q0, q3); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // Interleave to do the multiply by constants which gets us into 32bits - { - const __m128i t0 = _mm_unpacklo_epi16(r0, r1); - const __m128i t1 = _mm_unpackhi_epi16(r0, r1); - const __m128i t2 = _mm_unpacklo_epi16(r2, r3); - const __m128i t3 = _mm_unpackhi_epi16(r2, r3); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); - // dct_const_round_shift - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - res0 = _mm_packs_epi32(w0, w1); - res4 = _mm_packs_epi32(w2, w3); - res2 = _mm_packs_epi32(w4, w5); - res6 = _mm_packs_epi32(w6, w7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - // Work on next four results - { - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i d0 = _mm_unpacklo_epi16(q6, q5); - const __m128i d1 = _mm_unpackhi_epi16(q6, q5); - const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); - const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); - const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); - const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); - const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); - const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); - const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); - const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); - const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); - const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); - const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); - // Combine - const __m128i r0 = _mm_packs_epi32(s0, s1); - const __m128i r1 = _mm_packs_epi32(s2, s3); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&r0, &r1); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - { - // Add/subtract - const __m128i x0 = ADD_EPI16(q4, r0); - const __m128i x1 = SUB_EPI16(q4, r0); - const __m128i x2 = SUB_EPI16(q7, r1); - const __m128i x3 = ADD_EPI16(q7, r1); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // Interleave to do the multiply by constants which gets us into 32bits - { - const __m128i t0 = _mm_unpacklo_epi16(x0, x3); - const __m128i t1 = _mm_unpackhi_epi16(x0, x3); - const __m128i t2 = _mm_unpacklo_epi16(x1, x2); - const __m128i t3 = _mm_unpackhi_epi16(x1, x2); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); - // dct_const_round_shift - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - res1 = _mm_packs_epi32(w0, w1); - res7 = _mm_packs_epi32(w2, w3); - res5 = _mm_packs_epi32(w4, w5); - res3 = _mm_packs_epi32(w6, w7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3); - if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - } - // Transpose the 8x8. - { - // 00 01 02 03 04 05 06 07 - // 10 11 12 13 14 15 16 17 - // 20 21 22 23 24 25 26 27 - // 30 31 32 33 34 35 36 37 - // 40 41 42 43 44 45 46 47 - // 50 51 52 53 54 55 56 57 - // 60 61 62 63 64 65 66 67 - // 70 71 72 73 74 75 76 77 - const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); - const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3); - const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1); - const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3); - const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5); - const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); - const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5); - const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - // 04 14 05 15 06 16 07 17 - // 24 34 25 35 26 36 27 37 - // 40 50 41 51 42 52 43 53 - // 60 70 61 71 62 72 63 73 - // 54 54 55 55 56 56 57 57 - // 64 74 65 75 66 76 67 77 - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - // 00 10 20 30 01 11 21 31 - // 40 50 60 70 41 51 61 71 - // 02 12 22 32 03 13 23 33 - // 42 52 62 72 43 53 63 73 - // 04 14 24 34 05 15 21 36 - // 44 54 64 74 45 55 61 76 - // 06 16 26 36 07 17 27 37 - // 46 56 66 76 47 57 67 77 - in0 = _mm_unpacklo_epi64(tr1_0, tr1_4); - in1 = _mm_unpackhi_epi64(tr1_0, tr1_4); - in2 = _mm_unpacklo_epi64(tr1_2, tr1_6); - in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); - in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); - in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); - in6 = _mm_unpacklo_epi64(tr1_3, tr1_7); - in7 = _mm_unpackhi_epi64(tr1_3, tr1_7); - // 00 10 20 30 40 50 60 70 - // 01 11 21 31 41 51 61 71 - // 02 12 22 32 42 52 62 72 - // 03 13 23 33 43 53 63 73 - // 04 14 24 34 44 54 64 74 - // 05 15 25 35 45 55 65 75 - // 06 16 26 36 46 56 66 76 - // 07 17 27 37 47 57 67 77 - } - } - // Post-condition output and store it - { - // Post-condition (division by two) - // division of two 16 bits signed numbers using shifts - // n / 2 = (n - (n >> 15)) >> 1 - const __m128i sign_in0 = _mm_srai_epi16(in0, 15); - const __m128i sign_in1 = _mm_srai_epi16(in1, 15); - const __m128i sign_in2 = _mm_srai_epi16(in2, 15); - const __m128i sign_in3 = _mm_srai_epi16(in3, 15); - const __m128i sign_in4 = _mm_srai_epi16(in4, 15); - const __m128i sign_in5 = _mm_srai_epi16(in5, 15); - const __m128i sign_in6 = _mm_srai_epi16(in6, 15); - const __m128i sign_in7 = _mm_srai_epi16(in7, 15); - in0 = _mm_sub_epi16(in0, sign_in0); - in1 = _mm_sub_epi16(in1, sign_in1); - in2 = _mm_sub_epi16(in2, sign_in2); - in3 = _mm_sub_epi16(in3, sign_in3); - in4 = _mm_sub_epi16(in4, sign_in4); - in5 = _mm_sub_epi16(in5, sign_in5); - in6 = _mm_sub_epi16(in6, sign_in6); - in7 = _mm_sub_epi16(in7, sign_in7); - in0 = _mm_srai_epi16(in0, 1); - in1 = _mm_srai_epi16(in1, 1); - in2 = _mm_srai_epi16(in2, 1); - in3 = _mm_srai_epi16(in3, 1); - in4 = _mm_srai_epi16(in4, 1); - in5 = _mm_srai_epi16(in5, 1); - in6 = _mm_srai_epi16(in6, 1); - in7 = _mm_srai_epi16(in7, 1); - // store results - store_output(&in0, (output + 0 * 8)); - store_output(&in1, (output + 1 * 8)); - store_output(&in2, (output + 2 * 8)); - store_output(&in3, (output + 3 * 8)); - store_output(&in4, (output + 4 * 8)); - store_output(&in5, (output + 5 * 8)); - store_output(&in6, (output + 6 * 8)); - store_output(&in7, (output + 7 * 8)); - } -} - -void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { - // The 2D transform is done with two passes which are actually pretty - // similar. In the first one, we transform the columns and transpose - // the results. In the second one, we transform the rows. To achieve that, - // as the first pass results are transposed, we transpose the columns (that - // is the transposed rows) and transpose the results (so that it goes back - // in normal/row positions). - int pass; - // We need an intermediate buffer between passes. - DECLARE_ALIGNED(16, int16_t, intermediate[256]); - const int16_t *in = input; - int16_t *out0 = intermediate; - tran_low_t *out1 = output; - // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_p08_m24 = pair_set_epi16(cospi_8_64, -cospi_24_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); - const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); - const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); - const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); - const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); - const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); - const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); - const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i kOne = _mm_set1_epi16(1); - // Do the two transform/transpose passes - for (pass = 0; pass < 2; ++pass) { - // We process eight columns (transposed rows in second pass) at a time. - int column_start; -#if DCT_HIGH_BIT_DEPTH - int overflow; -#endif - for (column_start = 0; column_start < 16; column_start += 8) { - __m128i in00, in01, in02, in03, in04, in05, in06, in07; - __m128i in08, in09, in10, in11, in12, in13, in14, in15; - __m128i input0, input1, input2, input3, input4, input5, input6, input7; - __m128i step1_0, step1_1, step1_2, step1_3; - __m128i step1_4, step1_5, step1_6, step1_7; - __m128i step2_1, step2_2, step2_3, step2_4, step2_5, step2_6; - __m128i step3_0, step3_1, step3_2, step3_3; - __m128i step3_4, step3_5, step3_6, step3_7; - __m128i res00, res01, res02, res03, res04, res05, res06, res07; - __m128i res08, res09, res10, res11, res12, res13, res14, res15; - // Load and pre-condition input. - if (0 == pass) { - in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); - in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); - in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); - in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); - in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); - in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); - in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); - in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); - in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); - in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); - in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); - in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); - in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); - in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); - in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); - in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); - // x = x << 2 - in00 = _mm_slli_epi16(in00, 2); - in01 = _mm_slli_epi16(in01, 2); - in02 = _mm_slli_epi16(in02, 2); - in03 = _mm_slli_epi16(in03, 2); - in04 = _mm_slli_epi16(in04, 2); - in05 = _mm_slli_epi16(in05, 2); - in06 = _mm_slli_epi16(in06, 2); - in07 = _mm_slli_epi16(in07, 2); - in08 = _mm_slli_epi16(in08, 2); - in09 = _mm_slli_epi16(in09, 2); - in10 = _mm_slli_epi16(in10, 2); - in11 = _mm_slli_epi16(in11, 2); - in12 = _mm_slli_epi16(in12, 2); - in13 = _mm_slli_epi16(in13, 2); - in14 = _mm_slli_epi16(in14, 2); - in15 = _mm_slli_epi16(in15, 2); - } else { - in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); - in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); - in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); - in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); - in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); - in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); - in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); - in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); - in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); - in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); - in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); - in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); - in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); - in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); - in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); - in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); - // x = (x + 1) >> 2 - in00 = _mm_add_epi16(in00, kOne); - in01 = _mm_add_epi16(in01, kOne); - in02 = _mm_add_epi16(in02, kOne); - in03 = _mm_add_epi16(in03, kOne); - in04 = _mm_add_epi16(in04, kOne); - in05 = _mm_add_epi16(in05, kOne); - in06 = _mm_add_epi16(in06, kOne); - in07 = _mm_add_epi16(in07, kOne); - in08 = _mm_add_epi16(in08, kOne); - in09 = _mm_add_epi16(in09, kOne); - in10 = _mm_add_epi16(in10, kOne); - in11 = _mm_add_epi16(in11, kOne); - in12 = _mm_add_epi16(in12, kOne); - in13 = _mm_add_epi16(in13, kOne); - in14 = _mm_add_epi16(in14, kOne); - in15 = _mm_add_epi16(in15, kOne); - in00 = _mm_srai_epi16(in00, 2); - in01 = _mm_srai_epi16(in01, 2); - in02 = _mm_srai_epi16(in02, 2); - in03 = _mm_srai_epi16(in03, 2); - in04 = _mm_srai_epi16(in04, 2); - in05 = _mm_srai_epi16(in05, 2); - in06 = _mm_srai_epi16(in06, 2); - in07 = _mm_srai_epi16(in07, 2); - in08 = _mm_srai_epi16(in08, 2); - in09 = _mm_srai_epi16(in09, 2); - in10 = _mm_srai_epi16(in10, 2); - in11 = _mm_srai_epi16(in11, 2); - in12 = _mm_srai_epi16(in12, 2); - in13 = _mm_srai_epi16(in13, 2); - in14 = _mm_srai_epi16(in14, 2); - in15 = _mm_srai_epi16(in15, 2); - } - in += 8; - // Calculate input for the first 8 results. - { - input0 = ADD_EPI16(in00, in15); - input1 = ADD_EPI16(in01, in14); - input2 = ADD_EPI16(in02, in13); - input3 = ADD_EPI16(in03, in12); - input4 = ADD_EPI16(in04, in11); - input5 = ADD_EPI16(in05, in10); - input6 = ADD_EPI16(in06, in09); - input7 = ADD_EPI16(in07, in08); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3, - &input4, &input5, &input6, &input7); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Calculate input for the next 8 results. - { - step1_0 = SUB_EPI16(in07, in08); - step1_1 = SUB_EPI16(in06, in09); - step1_2 = SUB_EPI16(in05, in10); - step1_3 = SUB_EPI16(in04, in11); - step1_4 = SUB_EPI16(in03, in12); - step1_5 = SUB_EPI16(in02, in13); - step1_6 = SUB_EPI16(in01, in14); - step1_7 = SUB_EPI16(in00, in15); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1_0, &step1_1, - &step1_2, &step1_3, - &step1_4, &step1_5, - &step1_6, &step1_7); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // Work on the first eight values; fdct8(input, even_results); - { - // Add/subtract - const __m128i q0 = ADD_EPI16(input0, input7); - const __m128i q1 = ADD_EPI16(input1, input6); - const __m128i q2 = ADD_EPI16(input2, input5); - const __m128i q3 = ADD_EPI16(input3, input4); - const __m128i q4 = SUB_EPI16(input3, input4); - const __m128i q5 = SUB_EPI16(input2, input5); - const __m128i q6 = SUB_EPI16(input1, input6); - const __m128i q7 = SUB_EPI16(input0, input7); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, - &q4, &q5, &q6, &q7); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // Work on first four results - { - // Add/subtract - const __m128i r0 = ADD_EPI16(q0, q3); - const __m128i r1 = ADD_EPI16(q1, q2); - const __m128i r2 = SUB_EPI16(q1, q2); - const __m128i r3 = SUB_EPI16(q0, q3); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // Interleave to do the multiply by constants which gets us - // into 32 bits. - { - const __m128i t0 = _mm_unpacklo_epi16(r0, r1); - const __m128i t1 = _mm_unpackhi_epi16(r0, r1); - const __m128i t2 = _mm_unpacklo_epi16(r2, r3); - const __m128i t3 = _mm_unpackhi_epi16(r2, r3); - res00 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res08 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res04 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res12 = mult_round_shift(&t2, &t3, &k__cospi_m08_p24, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - // Work on next four results - { - // Interleave to do the multiply by constants which gets us - // into 32 bits. - const __m128i d0 = _mm_unpacklo_epi16(q6, q5); - const __m128i d1 = _mm_unpackhi_epi16(q6, q5); - const __m128i r0 = mult_round_shift(&d0, &d1, &k__cospi_p16_m16, - &k__DCT_CONST_ROUNDING, - DCT_CONST_BITS); - const __m128i r1 = mult_round_shift(&d0, &d1, &k__cospi_p16_p16, - &k__DCT_CONST_ROUNDING, - DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&r0, &r1); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - { - // Add/subtract - const __m128i x0 = ADD_EPI16(q4, r0); - const __m128i x1 = SUB_EPI16(q4, r0); - const __m128i x2 = SUB_EPI16(q7, r1); - const __m128i x3 = ADD_EPI16(q7, r1); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - // Interleave to do the multiply by constants which gets us - // into 32 bits. - { - const __m128i t0 = _mm_unpacklo_epi16(x0, x3); - const __m128i t1 = _mm_unpackhi_epi16(x0, x3); - const __m128i t2 = _mm_unpacklo_epi16(x1, x2); - const __m128i t3 = _mm_unpackhi_epi16(x1, x2); - res02 = mult_round_shift(&t0, &t1, &k__cospi_p28_p04, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res14 = mult_round_shift(&t0, &t1, &k__cospi_m04_p28, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res10 = mult_round_shift(&t2, &t3, &k__cospi_p12_p20, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res02, &res14, - &res10, &res06); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - } - } - // Work on the next eight values; step1 -> odd_results - { - // step 2 - { - const __m128i t0 = _mm_unpacklo_epi16(step1_5, step1_2); - const __m128i t1 = _mm_unpackhi_epi16(step1_5, step1_2); - const __m128i t2 = _mm_unpacklo_epi16(step1_4, step1_3); - const __m128i t3 = _mm_unpackhi_epi16(step1_4, step1_3); - step2_2 = mult_round_shift(&t0, &t1, &k__cospi_p16_m16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_3 = mult_round_shift(&t2, &t3, &k__cospi_p16_m16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_5 = mult_round_shift(&t0, &t1, &k__cospi_p16_p16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, - &step2_4); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // step 3 - { - step3_0 = ADD_EPI16(step1_0, step2_3); - step3_1 = ADD_EPI16(step1_1, step2_2); - step3_2 = SUB_EPI16(step1_1, step2_2); - step3_3 = SUB_EPI16(step1_0, step2_3); - step3_4 = SUB_EPI16(step1_7, step2_4); - step3_5 = SUB_EPI16(step1_6, step2_5); - step3_6 = ADD_EPI16(step1_6, step2_5); - step3_7 = ADD_EPI16(step1_7, step2_4); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3_0, &step3_1, - &step3_2, &step3_3, - &step3_4, &step3_5, - &step3_6, &step3_7); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // step 4 - { - const __m128i t0 = _mm_unpacklo_epi16(step3_1, step3_6); - const __m128i t1 = _mm_unpackhi_epi16(step3_1, step3_6); - const __m128i t2 = _mm_unpacklo_epi16(step3_2, step3_5); - const __m128i t3 = _mm_unpackhi_epi16(step3_2, step3_5); - step2_1 = mult_round_shift(&t0, &t1, &k__cospi_m08_p24, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_2 = mult_round_shift(&t2, &t3, &k__cospi_p24_p08, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_6 = mult_round_shift(&t0, &t1, &k__cospi_p24_p08, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, - &step2_5); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // step 5 - { - step1_0 = ADD_EPI16(step3_0, step2_1); - step1_1 = SUB_EPI16(step3_0, step2_1); - step1_2 = ADD_EPI16(step3_3, step2_2); - step1_3 = SUB_EPI16(step3_3, step2_2); - step1_4 = SUB_EPI16(step3_4, step2_5); - step1_5 = ADD_EPI16(step3_4, step2_5); - step1_6 = SUB_EPI16(step3_7, step2_6); - step1_7 = ADD_EPI16(step3_7, step2_6); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1_0, &step1_1, - &step1_2, &step1_3, - &step1_4, &step1_5, - &step1_6, &step1_7); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - // step 6 - { - const __m128i t0 = _mm_unpacklo_epi16(step1_0, step1_7); - const __m128i t1 = _mm_unpackhi_epi16(step1_0, step1_7); - const __m128i t2 = _mm_unpacklo_epi16(step1_1, step1_6); - const __m128i t3 = _mm_unpackhi_epi16(step1_1, step1_6); - res01 = mult_round_shift(&t0, &t1, &k__cospi_p30_p02, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res09 = mult_round_shift(&t2, &t3, &k__cospi_p14_p18, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res15 = mult_round_shift(&t0, &t1, &k__cospi_m02_p30, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res07 = mult_round_shift(&t2, &t3, &k__cospi_m18_p14, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i t0 = _mm_unpacklo_epi16(step1_2, step1_5); - const __m128i t1 = _mm_unpackhi_epi16(step1_2, step1_5); - const __m128i t2 = _mm_unpacklo_epi16(step1_3, step1_4); - const __m128i t3 = _mm_unpackhi_epi16(step1_3, step1_4); - res05 = mult_round_shift(&t0, &t1, &k__cospi_p22_p10, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res13 = mult_round_shift(&t2, &t3, &k__cospi_p06_p26, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res11 = mult_round_shift(&t0, &t1, &k__cospi_m10_p22, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); - res03 = mult_round_shift(&t2, &t3, &k__cospi_m26_p06, - &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); -#if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03); - if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); - return; - } -#endif // DCT_HIGH_BIT_DEPTH - } - } - // Transpose the results, do it as two 8x8 transposes. - transpose_and_output8x8(&res00, &res01, &res02, &res03, - &res04, &res05, &res06, &res07, - pass, out0, out1); - transpose_and_output8x8(&res08, &res09, &res10, &res11, - &res12, &res13, &res14, &res15, - pass, out0 + 8, out1 + 8); - if (pass == 0) { - out0 += 8*16; - } else { - out1 += 8*16; - } - } - // Setup in/out for next pass. - in = intermediate; - } -} - -#undef ADD_EPI16 -#undef SUB_EPI16 diff --git a/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_sse2.c b/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_sse2.c deleted file mode 100644 index 032c3ccd1d..0000000000 --- a/libs/libvpx/vp10/common/x86/vp10_fwd_txfm_sse2.c +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include // SSE2 - -#include "./vpx_config.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_dsp/x86/fwd_txfm_sse2.h" - -void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { - __m128i in0, in1; - __m128i tmp; - const __m128i zero = _mm_setzero_si128(); - in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); - in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) - (input + 2 * stride))); - in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) - (input + 3 * stride))); - - tmp = _mm_add_epi16(in0, in1); - in0 = _mm_unpacklo_epi16(zero, tmp); - in1 = _mm_unpackhi_epi16(zero, tmp); - in0 = _mm_srai_epi32(in0, 16); - in1 = _mm_srai_epi32(in1, 16); - - tmp = _mm_add_epi32(in0, in1); - in0 = _mm_unpacklo_epi32(tmp, zero); - in1 = _mm_unpackhi_epi32(tmp, zero); - - tmp = _mm_add_epi32(in0, in1); - in0 = _mm_srli_si128(tmp, 8); - - in1 = _mm_add_epi32(tmp, in0); - in0 = _mm_slli_epi32(in1, 1); - store_output(&in0, output); -} - -void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i u0, u1, sum; - - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - - in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); - - sum = _mm_add_epi16(u0, u1); - - in0 = _mm_add_epi16(in0, in1); - in2 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, in0); - - u0 = _mm_setzero_si128(); - sum = _mm_add_epi16(sum, in2); - - in0 = _mm_unpacklo_epi16(u0, sum); - in1 = _mm_unpackhi_epi16(u0, sum); - in0 = _mm_srai_epi32(in0, 16); - in1 = _mm_srai_epi32(in1, 16); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_unpacklo_epi32(sum, u0); - in1 = _mm_unpackhi_epi32(sum, u0); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_srli_si128(sum, 8); - - in1 = _mm_add_epi32(sum, in0); - store_output(&in1, output); -} - -void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, - int stride) { - __m128i in0, in1, in2, in3; - __m128i u0, u1; - __m128i sum = _mm_setzero_si128(); - int i; - - for (i = 0; i < 2; ++i) { - input += 8 * i; - in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); - - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 8 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 9 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 10 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 11 * stride)); - - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 12 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 13 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 14 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 15 * stride)); - - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - sum = _mm_add_epi16(sum, u1); - } - - u0 = _mm_setzero_si128(); - in0 = _mm_unpacklo_epi16(u0, sum); - in1 = _mm_unpackhi_epi16(u0, sum); - in0 = _mm_srai_epi32(in0, 16); - in1 = _mm_srai_epi32(in1, 16); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_unpacklo_epi32(sum, u0); - in1 = _mm_unpackhi_epi32(sum, u0); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_srli_si128(sum, 8); - - in1 = _mm_add_epi32(sum, in0); - in1 = _mm_srai_epi32(in1, 1); - store_output(&in1, output); -} - -void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, - int stride) { - __m128i in0, in1, in2, in3; - __m128i u0, u1; - __m128i sum = _mm_setzero_si128(); - int i; - - for (i = 0; i < 8; ++i) { - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); - - input += stride; - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); - - input += stride; - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); - - input += stride; - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); - - input += stride; - sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); - sum = _mm_add_epi16(sum, u0); - - sum = _mm_add_epi16(sum, u1); - } - - u0 = _mm_setzero_si128(); - in0 = _mm_unpacklo_epi16(u0, sum); - in1 = _mm_unpackhi_epi16(u0, sum); - in0 = _mm_srai_epi32(in0, 16); - in1 = _mm_srai_epi32(in1, 16); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_unpacklo_epi32(sum, u0); - in1 = _mm_unpackhi_epi32(sum, u0); - - sum = _mm_add_epi32(in0, in1); - in0 = _mm_srli_si128(sum, 8); - - in1 = _mm_add_epi32(sum, in0); - in1 = _mm_srai_epi32(in1, 3); - store_output(&in1, output); -} - -#define DCT_HIGH_BIT_DEPTH 0 -#define FDCT4x4_2D vp10_fdct4x4_sse2 -#define FDCT8x8_2D vp10_fdct8x8_sse2 -#define FDCT16x16_2D vp10_fdct16x16_sse2 -#include "vp10/common/x86/vp10_fwd_txfm_impl_sse2.h" -#undef FDCT4x4_2D -#undef FDCT8x8_2D -#undef FDCT16x16_2D - -#define FDCT32x32_2D vp10_fdct32x32_rd_sse2 -#define FDCT32x32_HIGH_PRECISION 0 -#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION - -#define FDCT32x32_2D vp10_fdct32x32_sse2 -#define FDCT32x32_HIGH_PRECISION 1 -#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION -#undef DCT_HIGH_BIT_DEPTH - -#if CONFIG_VP9_HIGHBITDEPTH -#define DCT_HIGH_BIT_DEPTH 1 -#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2 -#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2 -#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2 -#include "vp10/common/x86/vp10_fwd_txfm_impl_sse2.h" // NOLINT -#undef FDCT4x4_2D -#undef FDCT8x8_2D -#undef FDCT16x16_2D - -#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2 -#define FDCT32x32_HIGH_PRECISION 0 -#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION - -#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2 -#define FDCT32x32_HIGH_PRECISION 1 -#include "vp10/common/x86/vp10_fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION -#undef DCT_HIGH_BIT_DEPTH -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.c b/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.c deleted file mode 100644 index b25e22e0ee..0000000000 --- a/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.c +++ /dev/null @@ -1,4058 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vp10_rtcd.h" -#include "vp10/common/x86/vp10_inv_txfm_sse2.h" -#include "vpx_dsp/x86/txfm_common_sse2.h" - -#define RECON_AND_STORE4X4(dest, in_x) \ -{ \ - __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \ - d0 = _mm_unpacklo_epi8(d0, zero); \ - d0 = _mm_add_epi16(in_x, d0); \ - d0 = _mm_packus_epi16(d0, d0); \ - *(int *)(dest) = _mm_cvtsi128_si32(d0); \ -} - -void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) { - const __m128i zero = _mm_setzero_si128(); - const __m128i eight = _mm_set1_epi16(8); - const __m128i cst = _mm_setr_epi16( - (int16_t)cospi_16_64, (int16_t)cospi_16_64, (int16_t)cospi_16_64, - (int16_t)-cospi_16_64, (int16_t)cospi_24_64, (int16_t)-cospi_8_64, - (int16_t)cospi_8_64, (int16_t)cospi_24_64); - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i input0, input1, input2, input3; - - // Rows - input0 = _mm_load_si128((const __m128i *)input); - input2 = _mm_load_si128((const __m128i *)(input + 8)); - - // Construct i3, i1, i3, i1, i2, i0, i2, i0 - input0 = _mm_shufflelo_epi16(input0, 0xd8); - input0 = _mm_shufflehi_epi16(input0, 0xd8); - input2 = _mm_shufflelo_epi16(input2, 0xd8); - input2 = _mm_shufflehi_epi16(input2, 0xd8); - - input1 = _mm_unpackhi_epi32(input0, input0); - input0 = _mm_unpacklo_epi32(input0, input0); - input3 = _mm_unpackhi_epi32(input2, input2); - input2 = _mm_unpacklo_epi32(input2, input2); - - // Stage 1 - input0 = _mm_madd_epi16(input0, cst); - input1 = _mm_madd_epi16(input1, cst); - input2 = _mm_madd_epi16(input2, cst); - input3 = _mm_madd_epi16(input3, cst); - - input0 = _mm_add_epi32(input0, rounding); - input1 = _mm_add_epi32(input1, rounding); - input2 = _mm_add_epi32(input2, rounding); - input3 = _mm_add_epi32(input3, rounding); - - input0 = _mm_srai_epi32(input0, DCT_CONST_BITS); - input1 = _mm_srai_epi32(input1, DCT_CONST_BITS); - input2 = _mm_srai_epi32(input2, DCT_CONST_BITS); - input3 = _mm_srai_epi32(input3, DCT_CONST_BITS); - - // Stage 2 - input0 = _mm_packs_epi32(input0, input1); - input1 = _mm_packs_epi32(input2, input3); - - // Transpose - input2 = _mm_unpacklo_epi16(input0, input1); - input3 = _mm_unpackhi_epi16(input0, input1); - input0 = _mm_unpacklo_epi32(input2, input3); - input1 = _mm_unpackhi_epi32(input2, input3); - - // Switch column2, column 3, and then, we got: - // input2: column1, column 0; input3: column2, column 3. - input1 = _mm_shuffle_epi32(input1, 0x4e); - input2 = _mm_add_epi16(input0, input1); - input3 = _mm_sub_epi16(input0, input1); - - // Columns - // Construct i3, i1, i3, i1, i2, i0, i2, i0 - input0 = _mm_unpacklo_epi32(input2, input2); - input1 = _mm_unpackhi_epi32(input2, input2); - input2 = _mm_unpackhi_epi32(input3, input3); - input3 = _mm_unpacklo_epi32(input3, input3); - - // Stage 1 - input0 = _mm_madd_epi16(input0, cst); - input1 = _mm_madd_epi16(input1, cst); - input2 = _mm_madd_epi16(input2, cst); - input3 = _mm_madd_epi16(input3, cst); - - input0 = _mm_add_epi32(input0, rounding); - input1 = _mm_add_epi32(input1, rounding); - input2 = _mm_add_epi32(input2, rounding); - input3 = _mm_add_epi32(input3, rounding); - - input0 = _mm_srai_epi32(input0, DCT_CONST_BITS); - input1 = _mm_srai_epi32(input1, DCT_CONST_BITS); - input2 = _mm_srai_epi32(input2, DCT_CONST_BITS); - input3 = _mm_srai_epi32(input3, DCT_CONST_BITS); - - // Stage 2 - input0 = _mm_packs_epi32(input0, input2); - input1 = _mm_packs_epi32(input1, input3); - - // Transpose - input2 = _mm_unpacklo_epi16(input0, input1); - input3 = _mm_unpackhi_epi16(input0, input1); - input0 = _mm_unpacklo_epi32(input2, input3); - input1 = _mm_unpackhi_epi32(input2, input3); - - // Switch column2, column 3, and then, we got: - // input2: column1, column 0; input3: column2, column 3. - input1 = _mm_shuffle_epi32(input1, 0x4e); - input2 = _mm_add_epi16(input0, input1); - input3 = _mm_sub_epi16(input0, input1); - - // Final round and shift - input2 = _mm_add_epi16(input2, eight); - input3 = _mm_add_epi16(input3, eight); - - input2 = _mm_srai_epi16(input2, 4); - input3 = _mm_srai_epi16(input3, 4); - - // Reconstruction and Store - { - __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); - __m128i d2 = _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)); - d0 = _mm_unpacklo_epi32(d0, - _mm_cvtsi32_si128(*(const int *)(dest + stride))); - d2 = _mm_unpacklo_epi32( - _mm_cvtsi32_si128(*(const int *)(dest + stride * 3)), d2); - d0 = _mm_unpacklo_epi8(d0, zero); - d2 = _mm_unpacklo_epi8(d2, zero); - d0 = _mm_add_epi16(d0, input2); - d2 = _mm_add_epi16(d2, input3); - d0 = _mm_packus_epi16(d0, d2); - // store input0 - *(int *)dest = _mm_cvtsi128_si32(d0); - // store input1 - d0 = _mm_srli_si128(d0, 4); - *(int *)(dest + stride) = _mm_cvtsi128_si32(d0); - // store input2 - d0 = _mm_srli_si128(d0, 4); - *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d0); - // store input3 - d0 = _mm_srli_si128(d0, 4); - *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d0); - } -} - -void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { - __m128i dc_value; - const __m128i zero = _mm_setzero_si128(); - int a; - - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); - a = ROUND_POWER_OF_TWO(a, 4); - - dc_value = _mm_set1_epi16(a); - - RECON_AND_STORE4X4(dest + 0 * stride, dc_value); - RECON_AND_STORE4X4(dest + 1 * stride, dc_value); - RECON_AND_STORE4X4(dest + 2 * stride, dc_value); - RECON_AND_STORE4X4(dest + 3 * stride, dc_value); -} - -static INLINE void transpose_4x4(__m128i *res) { - const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); - const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]); - - res[0] = _mm_unpacklo_epi16(tr0_0, tr0_1); - res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1); -} - -void vp10_idct4_sse2(__m128i *in) { - const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i u[8], v[8]; - - transpose_4x4(in); - // stage 1 - u[0] = _mm_unpacklo_epi16(in[0], in[1]); - u[1] = _mm_unpackhi_epi16(in[0], in[1]); - v[0] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[1] = _mm_madd_epi16(u[0], k__cospi_p16_m16); - v[2] = _mm_madd_epi16(u[1], k__cospi_p24_m08); - v[3] = _mm_madd_epi16(u[1], k__cospi_p08_p24); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - - u[0] = _mm_packs_epi32(v[0], v[1]); - u[1] = _mm_packs_epi32(v[3], v[2]); - - // stage 2 - in[0] = _mm_add_epi16(u[0], u[1]); - in[1] = _mm_sub_epi16(u[0], u[1]); - in[1] = _mm_shuffle_epi32(in[1], 0x4E); -} - -void vp10_iadst4_sse2(__m128i *in) { - const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9); - const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9); - const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9); - const __m128i k__sinpi_p03_m04 = pair_set_epi16(sinpi_3_9, -sinpi_4_9); - const __m128i k__sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi_3_9); - const __m128i kZero = _mm_set1_epi16(0); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i u[8], v[8], in7; - - transpose_4x4(in); - in7 = _mm_srli_si128(in[1], 8); - in7 = _mm_add_epi16(in7, in[0]); - in7 = _mm_sub_epi16(in7, in[1]); - - u[0] = _mm_unpacklo_epi16(in[0], in[1]); - u[1] = _mm_unpackhi_epi16(in[0], in[1]); - u[2] = _mm_unpacklo_epi16(in7, kZero); - u[3] = _mm_unpackhi_epi16(in[0], kZero); - - v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p04); // s0 + s3 - v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p02); // s2 + s5 - v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x2 - v[3] = _mm_madd_epi16(u[0], k__sinpi_p02_m01); // s1 - s4 - v[4] = _mm_madd_epi16(u[1], k__sinpi_p03_m04); // s2 - s6 - v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s2 - - u[0] = _mm_add_epi32(v[0], v[1]); - u[1] = _mm_add_epi32(v[3], v[4]); - u[2] = v[2]; - u[3] = _mm_add_epi32(u[0], u[1]); - u[4] = _mm_slli_epi32(v[5], 2); - u[5] = _mm_add_epi32(u[3], v[5]); - u[6] = _mm_sub_epi32(u[5], u[4]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - - in[0] = _mm_packs_epi32(u[0], u[1]); - in[1] = _mm_packs_epi32(u[2], u[3]); -} - -#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \ - const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \ - const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \ - const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \ - const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \ - const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \ - \ - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \ - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \ - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \ - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \ - \ - out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ - out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ - out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ - out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ - out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \ - out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \ - out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \ - out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \ - } - -#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, \ - out0, out1, out2, out3) \ - { \ - const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \ - const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \ - const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \ - \ - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ - \ - out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ - out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ - out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ - out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ - } - -#define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ - } - -// Define Macro for multiplying elements by constants and adding them together. -#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \ - cst0, cst1, cst2, cst3, res0, res1, res2, res3) \ - { \ - tmp0 = _mm_madd_epi16(lo_0, cst0); \ - tmp1 = _mm_madd_epi16(hi_0, cst0); \ - tmp2 = _mm_madd_epi16(lo_0, cst1); \ - tmp3 = _mm_madd_epi16(hi_0, cst1); \ - tmp4 = _mm_madd_epi16(lo_1, cst2); \ - tmp5 = _mm_madd_epi16(hi_1, cst2); \ - tmp6 = _mm_madd_epi16(lo_1, cst3); \ - tmp7 = _mm_madd_epi16(hi_1, cst3); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - tmp4 = _mm_add_epi32(tmp4, rounding); \ - tmp5 = _mm_add_epi32(tmp5, rounding); \ - tmp6 = _mm_add_epi32(tmp6, rounding); \ - tmp7 = _mm_add_epi32(tmp7, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \ - tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \ - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \ - tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \ - \ - res0 = _mm_packs_epi32(tmp0, tmp1); \ - res1 = _mm_packs_epi32(tmp2, tmp3); \ - res2 = _mm_packs_epi32(tmp4, tmp5); \ - res3 = _mm_packs_epi32(tmp6, tmp7); \ - } - -#define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \ - { \ - tmp0 = _mm_madd_epi16(lo_0, cst0); \ - tmp1 = _mm_madd_epi16(hi_0, cst0); \ - tmp2 = _mm_madd_epi16(lo_0, cst1); \ - tmp3 = _mm_madd_epi16(hi_0, cst1); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - res0 = _mm_packs_epi32(tmp0, tmp1); \ - res1 = _mm_packs_epi32(tmp2, tmp3); \ - } - -#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ - { \ - /* Stage1 */ \ - { \ - const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \ - const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \ - const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \ - const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \ - \ - MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \ - stg1_1, stg1_2, stg1_3, stp1_4, \ - stp1_7, stp1_5, stp1_6) \ - } \ - \ - /* Stage2 */ \ - { \ - const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \ - const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \ - const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \ - const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \ - \ - MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \ - stg2_1, stg2_2, stg2_3, stp2_0, \ - stp2_1, stp2_2, stp2_3) \ - \ - stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_56, stg2_1); \ - tmp1 = _mm_madd_epi16(hi_56, stg2_1); \ - tmp2 = _mm_madd_epi16(lo_56, stg2_0); \ - tmp3 = _mm_madd_epi16(hi_56, stg2_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - } \ - \ - /* Stage4 */ \ - out0 = _mm_adds_epi16(stp1_0, stp2_7); \ - out1 = _mm_adds_epi16(stp1_1, stp1_6); \ - out2 = _mm_adds_epi16(stp1_2, stp1_5); \ - out3 = _mm_adds_epi16(stp1_3, stp2_4); \ - out4 = _mm_subs_epi16(stp1_3, stp2_4); \ - out5 = _mm_subs_epi16(stp1_2, stp1_5); \ - out6 = _mm_subs_epi16(stp1_1, stp1_6); \ - out7 = _mm_subs_epi16(stp1_0, stp2_7); \ - } - -void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) { - const __m128i zero = _mm_setzero_si128(); - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1 << 4); - const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); - - __m128i in0, in1, in2, in3, in4, in5, in6, in7; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i; - - // Load input data. - in0 = _mm_load_si128((const __m128i *)input); - in1 = _mm_load_si128((const __m128i *)(input + 8 * 1)); - in2 = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in3 = _mm_load_si128((const __m128i *)(input + 8 * 3)); - in4 = _mm_load_si128((const __m128i *)(input + 8 * 4)); - in5 = _mm_load_si128((const __m128i *)(input + 8 * 5)); - in6 = _mm_load_si128((const __m128i *)(input + 8 * 6)); - in7 = _mm_load_si128((const __m128i *)(input + 8 * 7)); - - // 2-D - for (i = 0; i < 2; i++) { - // 8x8 Transpose is copied from vp10_fdct8x8_sse2() - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - - // 4-stage 1D vp10_idct8x8 - IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - } - - // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - - in0 = _mm_srai_epi16(in0, 5); - in1 = _mm_srai_epi16(in1, 5); - in2 = _mm_srai_epi16(in2, 5); - in3 = _mm_srai_epi16(in3, 5); - in4 = _mm_srai_epi16(in4, 5); - in5 = _mm_srai_epi16(in5, 5); - in6 = _mm_srai_epi16(in6, 5); - in7 = _mm_srai_epi16(in7, 5); - - RECON_AND_STORE(dest + 0 * stride, in0); - RECON_AND_STORE(dest + 1 * stride, in1); - RECON_AND_STORE(dest + 2 * stride, in2); - RECON_AND_STORE(dest + 3 * stride, in3); - RECON_AND_STORE(dest + 4 * stride, in4); - RECON_AND_STORE(dest + 5 * stride, in5); - RECON_AND_STORE(dest + 6 * stride, in6); - RECON_AND_STORE(dest + 7 * stride, in7); -} - -void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) { - __m128i dc_value; - const __m128i zero = _mm_setzero_si128(); - int a; - - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); - a = ROUND_POWER_OF_TWO(a, 5); - - dc_value = _mm_set1_epi16(a); - - RECON_AND_STORE(dest + 0 * stride, dc_value); - RECON_AND_STORE(dest + 1 * stride, dc_value); - RECON_AND_STORE(dest + 2 * stride, dc_value); - RECON_AND_STORE(dest + 3 * stride, dc_value); - RECON_AND_STORE(dest + 4 * stride, dc_value); - RECON_AND_STORE(dest + 5 * stride, dc_value); - RECON_AND_STORE(dest + 6 * stride, dc_value); - RECON_AND_STORE(dest + 7 * stride, dc_value); -} - -void vp10_idct8_sse2(__m128i *in) { - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); - - __m128i in0, in1, in2, in3, in4, in5, in6, in7; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - - // 8x8 Transpose is copied from vp10_fdct8x8_sse2() - TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], - in0, in1, in2, in3, in4, in5, in6, in7); - - // 4-stage 1D vp10_idct8x8 - IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]); -} - -void vp10_iadst8_sse2(__m128i *in) { - const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); - const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); - const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__const_0 = _mm_set1_epi16(0); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - - __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15; - __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; - __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; - __m128i s0, s1, s2, s3, s4, s5, s6, s7; - __m128i in0, in1, in2, in3, in4, in5, in6, in7; - - // transpose - array_transpose_8x8(in, in); - - // properly aligned for butterfly input - in0 = in[7]; - in1 = in[0]; - in2 = in[5]; - in3 = in[2]; - in4 = in[3]; - in5 = in[4]; - in6 = in[1]; - in7 = in[6]; - - // column transformation - // stage 1 - // interleave and multiply/add into 32-bit integer - s0 = _mm_unpacklo_epi16(in0, in1); - s1 = _mm_unpackhi_epi16(in0, in1); - s2 = _mm_unpacklo_epi16(in2, in3); - s3 = _mm_unpackhi_epi16(in2, in3); - s4 = _mm_unpacklo_epi16(in4, in5); - s5 = _mm_unpackhi_epi16(in4, in5); - s6 = _mm_unpacklo_epi16(in6, in7); - s7 = _mm_unpackhi_epi16(in6, in7); - - u0 = _mm_madd_epi16(s0, k__cospi_p02_p30); - u1 = _mm_madd_epi16(s1, k__cospi_p02_p30); - u2 = _mm_madd_epi16(s0, k__cospi_p30_m02); - u3 = _mm_madd_epi16(s1, k__cospi_p30_m02); - u4 = _mm_madd_epi16(s2, k__cospi_p10_p22); - u5 = _mm_madd_epi16(s3, k__cospi_p10_p22); - u6 = _mm_madd_epi16(s2, k__cospi_p22_m10); - u7 = _mm_madd_epi16(s3, k__cospi_p22_m10); - u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); - u9 = _mm_madd_epi16(s5, k__cospi_p18_p14); - u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); - u11 = _mm_madd_epi16(s5, k__cospi_p14_m18); - u12 = _mm_madd_epi16(s6, k__cospi_p26_p06); - u13 = _mm_madd_epi16(s7, k__cospi_p26_p06); - u14 = _mm_madd_epi16(s6, k__cospi_p06_m26); - u15 = _mm_madd_epi16(s7, k__cospi_p06_m26); - - // addition - w0 = _mm_add_epi32(u0, u8); - w1 = _mm_add_epi32(u1, u9); - w2 = _mm_add_epi32(u2, u10); - w3 = _mm_add_epi32(u3, u11); - w4 = _mm_add_epi32(u4, u12); - w5 = _mm_add_epi32(u5, u13); - w6 = _mm_add_epi32(u6, u14); - w7 = _mm_add_epi32(u7, u15); - w8 = _mm_sub_epi32(u0, u8); - w9 = _mm_sub_epi32(u1, u9); - w10 = _mm_sub_epi32(u2, u10); - w11 = _mm_sub_epi32(u3, u11); - w12 = _mm_sub_epi32(u4, u12); - w13 = _mm_sub_epi32(u5, u13); - w14 = _mm_sub_epi32(u6, u14); - w15 = _mm_sub_epi32(u7, u15); - - // shift and rounding - v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); - v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); - v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); - v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); - v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); - v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); - v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); - v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); - v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING); - v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING); - v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING); - v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING); - v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING); - v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING); - v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING); - v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING); - - u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - u8 = _mm_srai_epi32(v8, DCT_CONST_BITS); - u9 = _mm_srai_epi32(v9, DCT_CONST_BITS); - u10 = _mm_srai_epi32(v10, DCT_CONST_BITS); - u11 = _mm_srai_epi32(v11, DCT_CONST_BITS); - u12 = _mm_srai_epi32(v12, DCT_CONST_BITS); - u13 = _mm_srai_epi32(v13, DCT_CONST_BITS); - u14 = _mm_srai_epi32(v14, DCT_CONST_BITS); - u15 = _mm_srai_epi32(v15, DCT_CONST_BITS); - - // back to 16-bit and pack 8 integers into __m128i - in[0] = _mm_packs_epi32(u0, u1); - in[1] = _mm_packs_epi32(u2, u3); - in[2] = _mm_packs_epi32(u4, u5); - in[3] = _mm_packs_epi32(u6, u7); - in[4] = _mm_packs_epi32(u8, u9); - in[5] = _mm_packs_epi32(u10, u11); - in[6] = _mm_packs_epi32(u12, u13); - in[7] = _mm_packs_epi32(u14, u15); - - // stage 2 - s0 = _mm_add_epi16(in[0], in[2]); - s1 = _mm_add_epi16(in[1], in[3]); - s2 = _mm_sub_epi16(in[0], in[2]); - s3 = _mm_sub_epi16(in[1], in[3]); - u0 = _mm_unpacklo_epi16(in[4], in[5]); - u1 = _mm_unpackhi_epi16(in[4], in[5]); - u2 = _mm_unpacklo_epi16(in[6], in[7]); - u3 = _mm_unpackhi_epi16(in[6], in[7]); - - v0 = _mm_madd_epi16(u0, k__cospi_p08_p24); - v1 = _mm_madd_epi16(u1, k__cospi_p08_p24); - v2 = _mm_madd_epi16(u0, k__cospi_p24_m08); - v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); - v4 = _mm_madd_epi16(u2, k__cospi_m24_p08); - v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); - v6 = _mm_madd_epi16(u2, k__cospi_p08_p24); - v7 = _mm_madd_epi16(u3, k__cospi_p08_p24); - - w0 = _mm_add_epi32(v0, v4); - w1 = _mm_add_epi32(v1, v5); - w2 = _mm_add_epi32(v2, v6); - w3 = _mm_add_epi32(v3, v7); - w4 = _mm_sub_epi32(v0, v4); - w5 = _mm_sub_epi32(v1, v5); - w6 = _mm_sub_epi32(v2, v6); - w7 = _mm_sub_epi32(v3, v7); - - v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); - v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); - v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); - v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); - v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); - v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); - v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); - v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); - - u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - - // back to 16-bit intergers - s4 = _mm_packs_epi32(u0, u1); - s5 = _mm_packs_epi32(u2, u3); - s6 = _mm_packs_epi32(u4, u5); - s7 = _mm_packs_epi32(u6, u7); - - // stage 3 - u0 = _mm_unpacklo_epi16(s2, s3); - u1 = _mm_unpackhi_epi16(s2, s3); - u2 = _mm_unpacklo_epi16(s6, s7); - u3 = _mm_unpackhi_epi16(s6, s7); - - v0 = _mm_madd_epi16(u0, k__cospi_p16_p16); - v1 = _mm_madd_epi16(u1, k__cospi_p16_p16); - v2 = _mm_madd_epi16(u0, k__cospi_p16_m16); - v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); - v4 = _mm_madd_epi16(u2, k__cospi_p16_p16); - v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); - v6 = _mm_madd_epi16(u2, k__cospi_p16_m16); - v7 = _mm_madd_epi16(u3, k__cospi_p16_m16); - - u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); - u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); - u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); - u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); - u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); - u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); - u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); - u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); - - v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); - v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); - v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); - v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); - v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); - v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); - v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); - v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); - - s2 = _mm_packs_epi32(v0, v1); - s3 = _mm_packs_epi32(v2, v3); - s6 = _mm_packs_epi32(v4, v5); - s7 = _mm_packs_epi32(v6, v7); - - in[0] = s0; - in[1] = _mm_sub_epi16(k__const_0, s4); - in[2] = s6; - in[3] = _mm_sub_epi16(k__const_0, s2); - in[4] = s3; - in[5] = _mm_sub_epi16(k__const_0, s7); - in[6] = s5; - in[7] = _mm_sub_epi16(k__const_0, s1); -} - -void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) { - const __m128i zero = _mm_setzero_si128(); - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1 << 4); - const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - - __m128i in0, in1, in2, in3, in4, in5, in6, in7; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - - // Rows. Load 4-row input data. - in0 = _mm_load_si128((const __m128i *)input); - in1 = _mm_load_si128((const __m128i *)(input + 8 * 1)); - in2 = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in3 = _mm_load_si128((const __m128i *)(input + 8 * 3)); - - // 8x4 Transpose - TRANSPOSE_8X8_10(in0, in1, in2, in3, in0, in1); - // Stage1 - { - const __m128i lo_17 = _mm_unpackhi_epi16(in0, zero); - const __m128i lo_35 = _mm_unpackhi_epi16(in1, zero); - - tmp0 = _mm_madd_epi16(lo_17, stg1_0); - tmp2 = _mm_madd_epi16(lo_17, stg1_1); - tmp4 = _mm_madd_epi16(lo_35, stg1_2); - tmp6 = _mm_madd_epi16(lo_35, stg1_3); - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - - stp1_4 = _mm_packs_epi32(tmp0, tmp2); - stp1_5 = _mm_packs_epi32(tmp4, tmp6); - } - - // Stage2 - { - const __m128i lo_04 = _mm_unpacklo_epi16(in0, zero); - const __m128i lo_26 = _mm_unpacklo_epi16(in1, zero); - - tmp0 = _mm_madd_epi16(lo_04, stg2_0); - tmp2 = _mm_madd_epi16(lo_04, stg2_1); - tmp4 = _mm_madd_epi16(lo_26, stg2_2); - tmp6 = _mm_madd_epi16(lo_26, stg2_3); - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - - stp2_0 = _mm_packs_epi32(tmp0, tmp2); - stp2_2 = _mm_packs_epi32(tmp6, tmp4); - - tmp0 = _mm_adds_epi16(stp1_4, stp1_5); - tmp1 = _mm_subs_epi16(stp1_4, stp1_5); - - stp2_4 = tmp0; - stp2_5 = _mm_unpacklo_epi64(tmp1, zero); - stp2_6 = _mm_unpackhi_epi64(tmp1, zero); - } - - // Stage3 - { - const __m128i lo_56 = _mm_unpacklo_epi16(stp2_5, stp2_6); - - tmp4 = _mm_adds_epi16(stp2_0, stp2_2); - tmp6 = _mm_subs_epi16(stp2_0, stp2_2); - - stp1_2 = _mm_unpackhi_epi64(tmp6, tmp4); - stp1_3 = _mm_unpacklo_epi64(tmp6, tmp4); - - tmp0 = _mm_madd_epi16(lo_56, stg3_0); - tmp2 = _mm_madd_epi16(lo_56, stg2_0); // stg3_1 = stg2_0 - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - - stp1_5 = _mm_packs_epi32(tmp0, tmp2); - } - - // Stage4 - tmp0 = _mm_adds_epi16(stp1_3, stp2_4); - tmp1 = _mm_adds_epi16(stp1_2, stp1_5); - tmp2 = _mm_subs_epi16(stp1_3, stp2_4); - tmp3 = _mm_subs_epi16(stp1_2, stp1_5); - - TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3) - - IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, - in0, in1, in2, in3, in4, in5, in6, in7); - // Final rounding and shift - in0 = _mm_adds_epi16(in0, final_rounding); - in1 = _mm_adds_epi16(in1, final_rounding); - in2 = _mm_adds_epi16(in2, final_rounding); - in3 = _mm_adds_epi16(in3, final_rounding); - in4 = _mm_adds_epi16(in4, final_rounding); - in5 = _mm_adds_epi16(in5, final_rounding); - in6 = _mm_adds_epi16(in6, final_rounding); - in7 = _mm_adds_epi16(in7, final_rounding); - - in0 = _mm_srai_epi16(in0, 5); - in1 = _mm_srai_epi16(in1, 5); - in2 = _mm_srai_epi16(in2, 5); - in3 = _mm_srai_epi16(in3, 5); - in4 = _mm_srai_epi16(in4, 5); - in5 = _mm_srai_epi16(in5, 5); - in6 = _mm_srai_epi16(in6, 5); - in7 = _mm_srai_epi16(in7, 5); - - RECON_AND_STORE(dest + 0 * stride, in0); - RECON_AND_STORE(dest + 1 * stride, in1); - RECON_AND_STORE(dest + 2 * stride, in2); - RECON_AND_STORE(dest + 3 * stride, in3); - RECON_AND_STORE(dest + 4 * stride, in4); - RECON_AND_STORE(dest + 5 * stride, in5); - RECON_AND_STORE(dest + 6 * stride, in6); - RECON_AND_STORE(dest + 7 * stride, in7); -} - -#define IDCT16 \ - /* Stage2 */ \ - { \ - const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \ - const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \ - const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]); \ - const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]); \ - const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \ - const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \ - const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \ - const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \ - stg2_0, stg2_1, stg2_2, stg2_3, \ - stp2_8, stp2_15, stp2_9, stp2_14) \ - \ - MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \ - stg2_4, stg2_5, stg2_6, stg2_7, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \ - const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \ - const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \ - const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \ - \ - MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \ - stg3_0, stg3_1, stg3_2, stg3_3, \ - stp1_4, stp1_7, stp1_5, stp1_6) \ - \ - stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); \ - stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ - stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ - \ - stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \ - stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ - } \ - \ - /* Stage4 */ \ - { \ - const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \ - const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \ - const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \ - const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \ - stg4_0, stg4_1, stg4_2, stg4_3, \ - stp2_0, stp2_1, stp2_2, stp2_3) \ - \ - stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \ - stg4_4, stg4_5, stg4_6, stg4_7, \ - stp2_9, stp2_14, stp2_10, stp2_13) \ - } \ - \ - /* Stage5 */ \ - { \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ - \ - stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ - } \ - \ - /* Stage6 */ \ - { \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ - } - -#define IDCT16_10 \ - /* Stage2 */ \ - { \ - const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \ - const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \ - const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \ - const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, \ - stg2_0, stg2_1, stg2_6, stg2_7, \ - stp1_8_0, stp1_15, stp1_11, stp1_12_0) \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \ - const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \ - \ - MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, \ - stg3_0, stg3_1, \ - stp2_4, stp2_7) \ - \ - stp1_9 = stp1_8_0; \ - stp1_10 = stp1_11; \ - \ - stp1_13 = stp1_12_0; \ - stp1_14 = stp1_15; \ - } \ - \ - /* Stage4 */ \ - { \ - const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \ - const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, \ - stg4_0, stg4_1, \ - stp1_0, stp1_1) \ - stp2_5 = stp2_4; \ - stp2_6 = stp2_7; \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \ - stg4_4, stg4_5, stg4_6, stg4_7, \ - stp2_9, stp2_14, stp2_10, stp2_13) \ - } \ - \ - /* Stage5 */ \ - { \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_2 = stp1_1; \ - stp1_3 = stp1_0; \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ - \ - stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ - } \ - \ - /* Stage6 */ \ - { \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ - } - -void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest, - int stride) { - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1 << 5); - const __m128i zero = _mm_setzero_si128(); - - const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64); - const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64); - - const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64); - - const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64); - - const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - - __m128i in[16], l[16], r[16], *curr1; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_8_0, stp1_12_0; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i; - - curr1 = l; - for (i = 0; i < 2; i++) { - // 1-D vp10_idct - - // Load input data. - in[0] = _mm_load_si128((const __m128i *)input); - in[8] = _mm_load_si128((const __m128i *)(input + 8 * 1)); - in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in[9] = _mm_load_si128((const __m128i *)(input + 8 * 3)); - in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4)); - in[10] = _mm_load_si128((const __m128i *)(input + 8 * 5)); - in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6)); - in[11] = _mm_load_si128((const __m128i *)(input + 8 * 7)); - in[4] = _mm_load_si128((const __m128i *)(input + 8 * 8)); - in[12] = _mm_load_si128((const __m128i *)(input + 8 * 9)); - in[5] = _mm_load_si128((const __m128i *)(input + 8 * 10)); - in[13] = _mm_load_si128((const __m128i *)(input + 8 * 11)); - in[6] = _mm_load_si128((const __m128i *)(input + 8 * 12)); - in[14] = _mm_load_si128((const __m128i *)(input + 8 * 13)); - in[7] = _mm_load_si128((const __m128i *)(input + 8 * 14)); - in[15] = _mm_load_si128((const __m128i *)(input + 8 * 15)); - - array_transpose_8x8(in, in); - array_transpose_8x8(in + 8, in + 8); - - IDCT16 - - // Stage7 - curr1[0] = _mm_add_epi16(stp2_0, stp1_15); - curr1[1] = _mm_add_epi16(stp2_1, stp1_14); - curr1[2] = _mm_add_epi16(stp2_2, stp2_13); - curr1[3] = _mm_add_epi16(stp2_3, stp2_12); - curr1[4] = _mm_add_epi16(stp2_4, stp2_11); - curr1[5] = _mm_add_epi16(stp2_5, stp2_10); - curr1[6] = _mm_add_epi16(stp2_6, stp1_9); - curr1[7] = _mm_add_epi16(stp2_7, stp1_8); - curr1[8] = _mm_sub_epi16(stp2_7, stp1_8); - curr1[9] = _mm_sub_epi16(stp2_6, stp1_9); - curr1[10] = _mm_sub_epi16(stp2_5, stp2_10); - curr1[11] = _mm_sub_epi16(stp2_4, stp2_11); - curr1[12] = _mm_sub_epi16(stp2_3, stp2_12); - curr1[13] = _mm_sub_epi16(stp2_2, stp2_13); - curr1[14] = _mm_sub_epi16(stp2_1, stp1_14); - curr1[15] = _mm_sub_epi16(stp2_0, stp1_15); - - curr1 = r; - input += 128; - } - for (i = 0; i < 2; i++) { - int j; - // 1-D vp10_idct - array_transpose_8x8(l + i * 8, in); - array_transpose_8x8(r + i * 8, in + 8); - - IDCT16 - - // 2-D - in[0] = _mm_add_epi16(stp2_0, stp1_15); - in[1] = _mm_add_epi16(stp2_1, stp1_14); - in[2] = _mm_add_epi16(stp2_2, stp2_13); - in[3] = _mm_add_epi16(stp2_3, stp2_12); - in[4] = _mm_add_epi16(stp2_4, stp2_11); - in[5] = _mm_add_epi16(stp2_5, stp2_10); - in[6] = _mm_add_epi16(stp2_6, stp1_9); - in[7] = _mm_add_epi16(stp2_7, stp1_8); - in[8] = _mm_sub_epi16(stp2_7, stp1_8); - in[9] = _mm_sub_epi16(stp2_6, stp1_9); - in[10] = _mm_sub_epi16(stp2_5, stp2_10); - in[11] = _mm_sub_epi16(stp2_4, stp2_11); - in[12] = _mm_sub_epi16(stp2_3, stp2_12); - in[13] = _mm_sub_epi16(stp2_2, stp2_13); - in[14] = _mm_sub_epi16(stp2_1, stp1_14); - in[15] = _mm_sub_epi16(stp2_0, stp1_15); - - for (j = 0; j < 16; ++j) { - // Final rounding and shift - in[j] = _mm_adds_epi16(in[j], final_rounding); - in[j] = _mm_srai_epi16(in[j], 6); - RECON_AND_STORE(dest + j * stride, in[j]); - } - - dest += 8; - } -} - -void vp10_idct16x16_1_add_sse2(const int16_t *input, - uint8_t *dest, - int stride) { - __m128i dc_value; - const __m128i zero = _mm_setzero_si128(); - int a, i; - - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); - a = ROUND_POWER_OF_TWO(a, 6); - - dc_value = _mm_set1_epi16(a); - - for (i = 0; i < 2; ++i) { - RECON_AND_STORE(dest + 0 * stride, dc_value); - RECON_AND_STORE(dest + 1 * stride, dc_value); - RECON_AND_STORE(dest + 2 * stride, dc_value); - RECON_AND_STORE(dest + 3 * stride, dc_value); - RECON_AND_STORE(dest + 4 * stride, dc_value); - RECON_AND_STORE(dest + 5 * stride, dc_value); - RECON_AND_STORE(dest + 6 * stride, dc_value); - RECON_AND_STORE(dest + 7 * stride, dc_value); - RECON_AND_STORE(dest + 8 * stride, dc_value); - RECON_AND_STORE(dest + 9 * stride, dc_value); - RECON_AND_STORE(dest + 10 * stride, dc_value); - RECON_AND_STORE(dest + 11 * stride, dc_value); - RECON_AND_STORE(dest + 12 * stride, dc_value); - RECON_AND_STORE(dest + 13 * stride, dc_value); - RECON_AND_STORE(dest + 14 * stride, dc_value); - RECON_AND_STORE(dest + 15 * stride, dc_value); - dest += 8; - } -} - -static void vp10_iadst16_8col(__m128i *in) { - // perform 16x16 1-D ADST for 8 columns - __m128i s[16], x[16], u[32], v[32]; - const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); - const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64); - const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64); - const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64); - const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64); - const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64); - const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64); - const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64); - const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64); - const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64); - const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64); - const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64); - const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64); - const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64); - const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64); - const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64); - const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); - const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64); - const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); - const __m128i k__cospi_m16_m16 = _mm_set1_epi16((int16_t)-cospi_16_64); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i kZero = _mm_set1_epi16(0); - - u[0] = _mm_unpacklo_epi16(in[15], in[0]); - u[1] = _mm_unpackhi_epi16(in[15], in[0]); - u[2] = _mm_unpacklo_epi16(in[13], in[2]); - u[3] = _mm_unpackhi_epi16(in[13], in[2]); - u[4] = _mm_unpacklo_epi16(in[11], in[4]); - u[5] = _mm_unpackhi_epi16(in[11], in[4]); - u[6] = _mm_unpacklo_epi16(in[9], in[6]); - u[7] = _mm_unpackhi_epi16(in[9], in[6]); - u[8] = _mm_unpacklo_epi16(in[7], in[8]); - u[9] = _mm_unpackhi_epi16(in[7], in[8]); - u[10] = _mm_unpacklo_epi16(in[5], in[10]); - u[11] = _mm_unpackhi_epi16(in[5], in[10]); - u[12] = _mm_unpacklo_epi16(in[3], in[12]); - u[13] = _mm_unpackhi_epi16(in[3], in[12]); - u[14] = _mm_unpacklo_epi16(in[1], in[14]); - u[15] = _mm_unpackhi_epi16(in[1], in[14]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31); - v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31); - v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01); - v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01); - v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27); - v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27); - v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05); - v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05); - v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23); - v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23); - v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09); - v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09); - v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19); - v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19); - v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13); - v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13); - v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15); - v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15); - v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17); - v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17); - v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11); - v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11); - v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21); - v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21); - v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07); - v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07); - v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25); - v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25); - v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03); - v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03); - v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29); - v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29); - - u[0] = _mm_add_epi32(v[0], v[16]); - u[1] = _mm_add_epi32(v[1], v[17]); - u[2] = _mm_add_epi32(v[2], v[18]); - u[3] = _mm_add_epi32(v[3], v[19]); - u[4] = _mm_add_epi32(v[4], v[20]); - u[5] = _mm_add_epi32(v[5], v[21]); - u[6] = _mm_add_epi32(v[6], v[22]); - u[7] = _mm_add_epi32(v[7], v[23]); - u[8] = _mm_add_epi32(v[8], v[24]); - u[9] = _mm_add_epi32(v[9], v[25]); - u[10] = _mm_add_epi32(v[10], v[26]); - u[11] = _mm_add_epi32(v[11], v[27]); - u[12] = _mm_add_epi32(v[12], v[28]); - u[13] = _mm_add_epi32(v[13], v[29]); - u[14] = _mm_add_epi32(v[14], v[30]); - u[15] = _mm_add_epi32(v[15], v[31]); - u[16] = _mm_sub_epi32(v[0], v[16]); - u[17] = _mm_sub_epi32(v[1], v[17]); - u[18] = _mm_sub_epi32(v[2], v[18]); - u[19] = _mm_sub_epi32(v[3], v[19]); - u[20] = _mm_sub_epi32(v[4], v[20]); - u[21] = _mm_sub_epi32(v[5], v[21]); - u[22] = _mm_sub_epi32(v[6], v[22]); - u[23] = _mm_sub_epi32(v[7], v[23]); - u[24] = _mm_sub_epi32(v[8], v[24]); - u[25] = _mm_sub_epi32(v[9], v[25]); - u[26] = _mm_sub_epi32(v[10], v[26]); - u[27] = _mm_sub_epi32(v[11], v[27]); - u[28] = _mm_sub_epi32(v[12], v[28]); - u[29] = _mm_sub_epi32(v[13], v[29]); - u[30] = _mm_sub_epi32(v[14], v[30]); - u[31] = _mm_sub_epi32(v[15], v[31]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING); - v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING); - v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING); - v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING); - v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING); - v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING); - v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING); - v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING); - v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING); - v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING); - v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING); - v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING); - v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING); - v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING); - v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING); - v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS); - u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS); - u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS); - u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS); - u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS); - u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS); - u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS); - u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS); - u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS); - u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS); - u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS); - u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS); - u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS); - u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS); - u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS); - u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS); - - s[0] = _mm_packs_epi32(u[0], u[1]); - s[1] = _mm_packs_epi32(u[2], u[3]); - s[2] = _mm_packs_epi32(u[4], u[5]); - s[3] = _mm_packs_epi32(u[6], u[7]); - s[4] = _mm_packs_epi32(u[8], u[9]); - s[5] = _mm_packs_epi32(u[10], u[11]); - s[6] = _mm_packs_epi32(u[12], u[13]); - s[7] = _mm_packs_epi32(u[14], u[15]); - s[8] = _mm_packs_epi32(u[16], u[17]); - s[9] = _mm_packs_epi32(u[18], u[19]); - s[10] = _mm_packs_epi32(u[20], u[21]); - s[11] = _mm_packs_epi32(u[22], u[23]); - s[12] = _mm_packs_epi32(u[24], u[25]); - s[13] = _mm_packs_epi32(u[26], u[27]); - s[14] = _mm_packs_epi32(u[28], u[29]); - s[15] = _mm_packs_epi32(u[30], u[31]); - - // stage 2 - u[0] = _mm_unpacklo_epi16(s[8], s[9]); - u[1] = _mm_unpackhi_epi16(s[8], s[9]); - u[2] = _mm_unpacklo_epi16(s[10], s[11]); - u[3] = _mm_unpackhi_epi16(s[10], s[11]); - u[4] = _mm_unpacklo_epi16(s[12], s[13]); - u[5] = _mm_unpackhi_epi16(s[12], s[13]); - u[6] = _mm_unpacklo_epi16(s[14], s[15]); - u[7] = _mm_unpackhi_epi16(s[14], s[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28); - v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28); - v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04); - v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04); - v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12); - v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12); - v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20); - v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20); - v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04); - v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04); - v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28); - v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28); - v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20); - v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20); - v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12); - v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12); - - u[0] = _mm_add_epi32(v[0], v[8]); - u[1] = _mm_add_epi32(v[1], v[9]); - u[2] = _mm_add_epi32(v[2], v[10]); - u[3] = _mm_add_epi32(v[3], v[11]); - u[4] = _mm_add_epi32(v[4], v[12]); - u[5] = _mm_add_epi32(v[5], v[13]); - u[6] = _mm_add_epi32(v[6], v[14]); - u[7] = _mm_add_epi32(v[7], v[15]); - u[8] = _mm_sub_epi32(v[0], v[8]); - u[9] = _mm_sub_epi32(v[1], v[9]); - u[10] = _mm_sub_epi32(v[2], v[10]); - u[11] = _mm_sub_epi32(v[3], v[11]); - u[12] = _mm_sub_epi32(v[4], v[12]); - u[13] = _mm_sub_epi32(v[5], v[13]); - u[14] = _mm_sub_epi32(v[6], v[14]); - u[15] = _mm_sub_epi32(v[7], v[15]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - - x[0] = _mm_add_epi16(s[0], s[4]); - x[1] = _mm_add_epi16(s[1], s[5]); - x[2] = _mm_add_epi16(s[2], s[6]); - x[3] = _mm_add_epi16(s[3], s[7]); - x[4] = _mm_sub_epi16(s[0], s[4]); - x[5] = _mm_sub_epi16(s[1], s[5]); - x[6] = _mm_sub_epi16(s[2], s[6]); - x[7] = _mm_sub_epi16(s[3], s[7]); - x[8] = _mm_packs_epi32(u[0], u[1]); - x[9] = _mm_packs_epi32(u[2], u[3]); - x[10] = _mm_packs_epi32(u[4], u[5]); - x[11] = _mm_packs_epi32(u[6], u[7]); - x[12] = _mm_packs_epi32(u[8], u[9]); - x[13] = _mm_packs_epi32(u[10], u[11]); - x[14] = _mm_packs_epi32(u[12], u[13]); - x[15] = _mm_packs_epi32(u[14], u[15]); - - // stage 3 - u[0] = _mm_unpacklo_epi16(x[4], x[5]); - u[1] = _mm_unpackhi_epi16(x[4], x[5]); - u[2] = _mm_unpacklo_epi16(x[6], x[7]); - u[3] = _mm_unpackhi_epi16(x[6], x[7]); - u[4] = _mm_unpacklo_epi16(x[12], x[13]); - u[5] = _mm_unpackhi_epi16(x[12], x[13]); - u[6] = _mm_unpacklo_epi16(x[14], x[15]); - u[7] = _mm_unpackhi_epi16(x[14], x[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24); - v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24); - v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08); - v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08); - v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08); - v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08); - v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24); - v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24); - v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24); - v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24); - v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08); - v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08); - v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08); - v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08); - v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24); - v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24); - - u[0] = _mm_add_epi32(v[0], v[4]); - u[1] = _mm_add_epi32(v[1], v[5]); - u[2] = _mm_add_epi32(v[2], v[6]); - u[3] = _mm_add_epi32(v[3], v[7]); - u[4] = _mm_sub_epi32(v[0], v[4]); - u[5] = _mm_sub_epi32(v[1], v[5]); - u[6] = _mm_sub_epi32(v[2], v[6]); - u[7] = _mm_sub_epi32(v[3], v[7]); - u[8] = _mm_add_epi32(v[8], v[12]); - u[9] = _mm_add_epi32(v[9], v[13]); - u[10] = _mm_add_epi32(v[10], v[14]); - u[11] = _mm_add_epi32(v[11], v[15]); - u[12] = _mm_sub_epi32(v[8], v[12]); - u[13] = _mm_sub_epi32(v[9], v[13]); - u[14] = _mm_sub_epi32(v[10], v[14]); - u[15] = _mm_sub_epi32(v[11], v[15]); - - u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - s[0] = _mm_add_epi16(x[0], x[2]); - s[1] = _mm_add_epi16(x[1], x[3]); - s[2] = _mm_sub_epi16(x[0], x[2]); - s[3] = _mm_sub_epi16(x[1], x[3]); - s[4] = _mm_packs_epi32(v[0], v[1]); - s[5] = _mm_packs_epi32(v[2], v[3]); - s[6] = _mm_packs_epi32(v[4], v[5]); - s[7] = _mm_packs_epi32(v[6], v[7]); - s[8] = _mm_add_epi16(x[8], x[10]); - s[9] = _mm_add_epi16(x[9], x[11]); - s[10] = _mm_sub_epi16(x[8], x[10]); - s[11] = _mm_sub_epi16(x[9], x[11]); - s[12] = _mm_packs_epi32(v[8], v[9]); - s[13] = _mm_packs_epi32(v[10], v[11]); - s[14] = _mm_packs_epi32(v[12], v[13]); - s[15] = _mm_packs_epi32(v[14], v[15]); - - // stage 4 - u[0] = _mm_unpacklo_epi16(s[2], s[3]); - u[1] = _mm_unpackhi_epi16(s[2], s[3]); - u[2] = _mm_unpacklo_epi16(s[6], s[7]); - u[3] = _mm_unpackhi_epi16(s[6], s[7]); - u[4] = _mm_unpacklo_epi16(s[10], s[11]); - u[5] = _mm_unpackhi_epi16(s[10], s[11]); - u[6] = _mm_unpacklo_epi16(s[14], s[15]); - u[7] = _mm_unpackhi_epi16(s[14], s[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16); - v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); - v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); - v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16); - v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16); - v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16); - v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16); - v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16); - v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16); - v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16); - v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16); - v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16); - v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - in[0] = s[0]; - in[1] = _mm_sub_epi16(kZero, s[8]); - in[2] = s[12]; - in[3] = _mm_sub_epi16(kZero, s[4]); - in[4] = _mm_packs_epi32(v[4], v[5]); - in[5] = _mm_packs_epi32(v[12], v[13]); - in[6] = _mm_packs_epi32(v[8], v[9]); - in[7] = _mm_packs_epi32(v[0], v[1]); - in[8] = _mm_packs_epi32(v[2], v[3]); - in[9] = _mm_packs_epi32(v[10], v[11]); - in[10] = _mm_packs_epi32(v[14], v[15]); - in[11] = _mm_packs_epi32(v[6], v[7]); - in[12] = s[5]; - in[13] = _mm_sub_epi16(kZero, s[13]); - in[14] = s[9]; - in[15] = _mm_sub_epi16(kZero, s[1]); -} - -static void vp10_idct16_8col(__m128i *in) { - const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); - const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); - const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i v[16], u[16], s[16], t[16]; - - // stage 1 - s[0] = in[0]; - s[1] = in[8]; - s[2] = in[4]; - s[3] = in[12]; - s[4] = in[2]; - s[5] = in[10]; - s[6] = in[6]; - s[7] = in[14]; - s[8] = in[1]; - s[9] = in[9]; - s[10] = in[5]; - s[11] = in[13]; - s[12] = in[3]; - s[13] = in[11]; - s[14] = in[7]; - s[15] = in[15]; - - // stage 2 - u[0] = _mm_unpacklo_epi16(s[8], s[15]); - u[1] = _mm_unpackhi_epi16(s[8], s[15]); - u[2] = _mm_unpacklo_epi16(s[9], s[14]); - u[3] = _mm_unpackhi_epi16(s[9], s[14]); - u[4] = _mm_unpacklo_epi16(s[10], s[13]); - u[5] = _mm_unpackhi_epi16(s[10], s[13]); - u[6] = _mm_unpacklo_epi16(s[11], s[12]); - u[7] = _mm_unpackhi_epi16(s[11], s[12]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p30_m02); - v[1] = _mm_madd_epi16(u[1], k__cospi_p30_m02); - v[2] = _mm_madd_epi16(u[0], k__cospi_p02_p30); - v[3] = _mm_madd_epi16(u[1], k__cospi_p02_p30); - v[4] = _mm_madd_epi16(u[2], k__cospi_p14_m18); - v[5] = _mm_madd_epi16(u[3], k__cospi_p14_m18); - v[6] = _mm_madd_epi16(u[2], k__cospi_p18_p14); - v[7] = _mm_madd_epi16(u[3], k__cospi_p18_p14); - v[8] = _mm_madd_epi16(u[4], k__cospi_p22_m10); - v[9] = _mm_madd_epi16(u[5], k__cospi_p22_m10); - v[10] = _mm_madd_epi16(u[4], k__cospi_p10_p22); - v[11] = _mm_madd_epi16(u[5], k__cospi_p10_p22); - v[12] = _mm_madd_epi16(u[6], k__cospi_p06_m26); - v[13] = _mm_madd_epi16(u[7], k__cospi_p06_m26); - v[14] = _mm_madd_epi16(u[6], k__cospi_p26_p06); - v[15] = _mm_madd_epi16(u[7], k__cospi_p26_p06); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - s[8] = _mm_packs_epi32(u[0], u[1]); - s[15] = _mm_packs_epi32(u[2], u[3]); - s[9] = _mm_packs_epi32(u[4], u[5]); - s[14] = _mm_packs_epi32(u[6], u[7]); - s[10] = _mm_packs_epi32(u[8], u[9]); - s[13] = _mm_packs_epi32(u[10], u[11]); - s[11] = _mm_packs_epi32(u[12], u[13]); - s[12] = _mm_packs_epi32(u[14], u[15]); - - // stage 3 - t[0] = s[0]; - t[1] = s[1]; - t[2] = s[2]; - t[3] = s[3]; - u[0] = _mm_unpacklo_epi16(s[4], s[7]); - u[1] = _mm_unpackhi_epi16(s[4], s[7]); - u[2] = _mm_unpacklo_epi16(s[5], s[6]); - u[3] = _mm_unpackhi_epi16(s[5], s[6]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p28_m04); - v[1] = _mm_madd_epi16(u[1], k__cospi_p28_m04); - v[2] = _mm_madd_epi16(u[0], k__cospi_p04_p28); - v[3] = _mm_madd_epi16(u[1], k__cospi_p04_p28); - v[4] = _mm_madd_epi16(u[2], k__cospi_p12_m20); - v[5] = _mm_madd_epi16(u[3], k__cospi_p12_m20); - v[6] = _mm_madd_epi16(u[2], k__cospi_p20_p12); - v[7] = _mm_madd_epi16(u[3], k__cospi_p20_p12); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - - t[4] = _mm_packs_epi32(u[0], u[1]); - t[7] = _mm_packs_epi32(u[2], u[3]); - t[5] = _mm_packs_epi32(u[4], u[5]); - t[6] = _mm_packs_epi32(u[6], u[7]); - t[8] = _mm_add_epi16(s[8], s[9]); - t[9] = _mm_sub_epi16(s[8], s[9]); - t[10] = _mm_sub_epi16(s[11], s[10]); - t[11] = _mm_add_epi16(s[10], s[11]); - t[12] = _mm_add_epi16(s[12], s[13]); - t[13] = _mm_sub_epi16(s[12], s[13]); - t[14] = _mm_sub_epi16(s[15], s[14]); - t[15] = _mm_add_epi16(s[14], s[15]); - - // stage 4 - u[0] = _mm_unpacklo_epi16(t[0], t[1]); - u[1] = _mm_unpackhi_epi16(t[0], t[1]); - u[2] = _mm_unpacklo_epi16(t[2], t[3]); - u[3] = _mm_unpackhi_epi16(t[2], t[3]); - u[4] = _mm_unpacklo_epi16(t[9], t[14]); - u[5] = _mm_unpackhi_epi16(t[9], t[14]); - u[6] = _mm_unpacklo_epi16(t[10], t[13]); - u[7] = _mm_unpackhi_epi16(t[10], t[13]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[1] = _mm_madd_epi16(u[1], k__cospi_p16_p16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16); - v[4] = _mm_madd_epi16(u[2], k__cospi_p24_m08); - v[5] = _mm_madd_epi16(u[3], k__cospi_p24_m08); - v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24); - v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24); - v[8] = _mm_madd_epi16(u[4], k__cospi_m08_p24); - v[9] = _mm_madd_epi16(u[5], k__cospi_m08_p24); - v[10] = _mm_madd_epi16(u[4], k__cospi_p24_p08); - v[11] = _mm_madd_epi16(u[5], k__cospi_p24_p08); - v[12] = _mm_madd_epi16(u[6], k__cospi_m24_m08); - v[13] = _mm_madd_epi16(u[7], k__cospi_m24_m08); - v[14] = _mm_madd_epi16(u[6], k__cospi_m08_p24); - v[15] = _mm_madd_epi16(u[7], k__cospi_m08_p24); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - s[0] = _mm_packs_epi32(u[0], u[1]); - s[1] = _mm_packs_epi32(u[2], u[3]); - s[2] = _mm_packs_epi32(u[4], u[5]); - s[3] = _mm_packs_epi32(u[6], u[7]); - s[4] = _mm_add_epi16(t[4], t[5]); - s[5] = _mm_sub_epi16(t[4], t[5]); - s[6] = _mm_sub_epi16(t[7], t[6]); - s[7] = _mm_add_epi16(t[6], t[7]); - s[8] = t[8]; - s[15] = t[15]; - s[9] = _mm_packs_epi32(u[8], u[9]); - s[14] = _mm_packs_epi32(u[10], u[11]); - s[10] = _mm_packs_epi32(u[12], u[13]); - s[13] = _mm_packs_epi32(u[14], u[15]); - s[11] = t[11]; - s[12] = t[12]; - - // stage 5 - t[0] = _mm_add_epi16(s[0], s[3]); - t[1] = _mm_add_epi16(s[1], s[2]); - t[2] = _mm_sub_epi16(s[1], s[2]); - t[3] = _mm_sub_epi16(s[0], s[3]); - t[4] = s[4]; - t[7] = s[7]; - - u[0] = _mm_unpacklo_epi16(s[5], s[6]); - u[1] = _mm_unpackhi_epi16(s[5], s[6]); - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16); - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - t[5] = _mm_packs_epi32(u[0], u[1]); - t[6] = _mm_packs_epi32(u[2], u[3]); - - t[8] = _mm_add_epi16(s[8], s[11]); - t[9] = _mm_add_epi16(s[9], s[10]); - t[10] = _mm_sub_epi16(s[9], s[10]); - t[11] = _mm_sub_epi16(s[8], s[11]); - t[12] = _mm_sub_epi16(s[15], s[12]); - t[13] = _mm_sub_epi16(s[14], s[13]); - t[14] = _mm_add_epi16(s[13], s[14]); - t[15] = _mm_add_epi16(s[12], s[15]); - - // stage 6 - s[0] = _mm_add_epi16(t[0], t[7]); - s[1] = _mm_add_epi16(t[1], t[6]); - s[2] = _mm_add_epi16(t[2], t[5]); - s[3] = _mm_add_epi16(t[3], t[4]); - s[4] = _mm_sub_epi16(t[3], t[4]); - s[5] = _mm_sub_epi16(t[2], t[5]); - s[6] = _mm_sub_epi16(t[1], t[6]); - s[7] = _mm_sub_epi16(t[0], t[7]); - s[8] = t[8]; - s[9] = t[9]; - - u[0] = _mm_unpacklo_epi16(t[10], t[13]); - u[1] = _mm_unpackhi_epi16(t[10], t[13]); - u[2] = _mm_unpacklo_epi16(t[11], t[12]); - u[3] = _mm_unpackhi_epi16(t[11], t[12]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16); - v[4] = _mm_madd_epi16(u[2], k__cospi_m16_p16); - v[5] = _mm_madd_epi16(u[3], k__cospi_m16_p16); - v[6] = _mm_madd_epi16(u[2], k__cospi_p16_p16); - v[7] = _mm_madd_epi16(u[3], k__cospi_p16_p16); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - - s[10] = _mm_packs_epi32(u[0], u[1]); - s[13] = _mm_packs_epi32(u[2], u[3]); - s[11] = _mm_packs_epi32(u[4], u[5]); - s[12] = _mm_packs_epi32(u[6], u[7]); - s[14] = t[14]; - s[15] = t[15]; - - // stage 7 - in[0] = _mm_add_epi16(s[0], s[15]); - in[1] = _mm_add_epi16(s[1], s[14]); - in[2] = _mm_add_epi16(s[2], s[13]); - in[3] = _mm_add_epi16(s[3], s[12]); - in[4] = _mm_add_epi16(s[4], s[11]); - in[5] = _mm_add_epi16(s[5], s[10]); - in[6] = _mm_add_epi16(s[6], s[9]); - in[7] = _mm_add_epi16(s[7], s[8]); - in[8] = _mm_sub_epi16(s[7], s[8]); - in[9] = _mm_sub_epi16(s[6], s[9]); - in[10] = _mm_sub_epi16(s[5], s[10]); - in[11] = _mm_sub_epi16(s[4], s[11]); - in[12] = _mm_sub_epi16(s[3], s[12]); - in[13] = _mm_sub_epi16(s[2], s[13]); - in[14] = _mm_sub_epi16(s[1], s[14]); - in[15] = _mm_sub_epi16(s[0], s[15]); -} - -void vp10_idct16_sse2(__m128i *in0, __m128i *in1) { - array_transpose_16x16(in0, in1); - vp10_idct16_8col(in0); - vp10_idct16_8col(in1); -} - -void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) { - array_transpose_16x16(in0, in1); - vp10_iadst16_8col(in0); - vp10_iadst16_8col(in1); -} - -void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest, - int stride) { - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1 << 5); - const __m128i zero = _mm_setzero_si128(); - - const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64); - - const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - - const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - const __m128i stg4_7 = pair_set_epi16(-cospi_8_64, cospi_24_64); - - const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - __m128i in[16], l[16]; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_8_0, stp1_12_0; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i; - // First 1-D inverse DCT - // Load input data. - in[0] = _mm_load_si128((const __m128i *)input); - in[1] = _mm_load_si128((const __m128i *)(input + 8 * 2)); - in[2] = _mm_load_si128((const __m128i *)(input + 8 * 4)); - in[3] = _mm_load_si128((const __m128i *)(input + 8 * 6)); - - TRANSPOSE_8X4(in[0], in[1], in[2], in[3], in[0], in[1]); - - // Stage2 - { - const __m128i lo_1_15 = _mm_unpackhi_epi16(in[0], zero); - const __m128i lo_13_3 = _mm_unpackhi_epi16(zero, in[1]); - - tmp0 = _mm_madd_epi16(lo_1_15, stg2_0); - tmp2 = _mm_madd_epi16(lo_1_15, stg2_1); - tmp5 = _mm_madd_epi16(lo_13_3, stg2_6); - tmp7 = _mm_madd_epi16(lo_13_3, stg2_7); - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp5 = _mm_add_epi32(tmp5, rounding); - tmp7 = _mm_add_epi32(tmp7, rounding); - - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); - tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); - - stp2_8 = _mm_packs_epi32(tmp0, tmp2); - stp2_11 = _mm_packs_epi32(tmp5, tmp7); - } - - // Stage3 - { - const __m128i lo_2_14 = _mm_unpacklo_epi16(in[1], zero); - - tmp0 = _mm_madd_epi16(lo_2_14, stg3_0); - tmp2 = _mm_madd_epi16(lo_2_14, stg3_1); - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - - stp1_13 = _mm_unpackhi_epi64(stp2_11, zero); - stp1_14 = _mm_unpackhi_epi64(stp2_8, zero); - - stp1_4 = _mm_packs_epi32(tmp0, tmp2); - } - - // Stage4 - { - const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp1_14); - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp1_13); - - tmp0 = _mm_madd_epi16(lo_0_8, stg4_0); - tmp2 = _mm_madd_epi16(lo_0_8, stg4_1); - tmp1 = _mm_madd_epi16(lo_9_14, stg4_4); - tmp3 = _mm_madd_epi16(lo_9_14, stg4_5); - tmp5 = _mm_madd_epi16(lo_10_13, stg4_6); - tmp7 = _mm_madd_epi16(lo_10_13, stg4_7); - - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp1 = _mm_add_epi32(tmp1, rounding); - tmp3 = _mm_add_epi32(tmp3, rounding); - tmp5 = _mm_add_epi32(tmp5, rounding); - tmp7 = _mm_add_epi32(tmp7, rounding); - - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); - tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); - tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); - - stp1_0 = _mm_packs_epi32(tmp0, tmp0); - stp1_1 = _mm_packs_epi32(tmp2, tmp2); - stp2_9 = _mm_packs_epi32(tmp1, tmp3); - stp2_10 = _mm_packs_epi32(tmp5, tmp7); - - stp2_6 = _mm_unpackhi_epi64(stp1_4, zero); - } - - // Stage5 and Stage6 - { - tmp0 = _mm_add_epi16(stp2_8, stp2_11); - tmp1 = _mm_sub_epi16(stp2_8, stp2_11); - tmp2 = _mm_add_epi16(stp2_9, stp2_10); - tmp3 = _mm_sub_epi16(stp2_9, stp2_10); - - stp1_9 = _mm_unpacklo_epi64(tmp2, zero); - stp1_10 = _mm_unpacklo_epi64(tmp3, zero); - stp1_8 = _mm_unpacklo_epi64(tmp0, zero); - stp1_11 = _mm_unpacklo_epi64(tmp1, zero); - - stp1_13 = _mm_unpackhi_epi64(tmp3, zero); - stp1_14 = _mm_unpackhi_epi64(tmp2, zero); - stp1_12 = _mm_unpackhi_epi64(tmp1, zero); - stp1_15 = _mm_unpackhi_epi64(tmp0, zero); - } - - // Stage6 - { - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp1_4); - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); - - tmp1 = _mm_madd_epi16(lo_6_5, stg4_1); - tmp3 = _mm_madd_epi16(lo_6_5, stg4_0); - tmp0 = _mm_madd_epi16(lo_10_13, stg6_0); - tmp2 = _mm_madd_epi16(lo_10_13, stg4_0); - tmp4 = _mm_madd_epi16(lo_11_12, stg6_0); - tmp6 = _mm_madd_epi16(lo_11_12, stg4_0); - - tmp1 = _mm_add_epi32(tmp1, rounding); - tmp3 = _mm_add_epi32(tmp3, rounding); - tmp0 = _mm_add_epi32(tmp0, rounding); - tmp2 = _mm_add_epi32(tmp2, rounding); - tmp4 = _mm_add_epi32(tmp4, rounding); - tmp6 = _mm_add_epi32(tmp6, rounding); - - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); - - stp1_6 = _mm_packs_epi32(tmp3, tmp1); - - stp2_10 = _mm_packs_epi32(tmp0, zero); - stp2_13 = _mm_packs_epi32(tmp2, zero); - stp2_11 = _mm_packs_epi32(tmp4, zero); - stp2_12 = _mm_packs_epi32(tmp6, zero); - - tmp0 = _mm_add_epi16(stp1_0, stp1_4); - tmp1 = _mm_sub_epi16(stp1_0, stp1_4); - tmp2 = _mm_add_epi16(stp1_1, stp1_6); - tmp3 = _mm_sub_epi16(stp1_1, stp1_6); - - stp2_0 = _mm_unpackhi_epi64(tmp0, zero); - stp2_1 = _mm_unpacklo_epi64(tmp2, zero); - stp2_2 = _mm_unpackhi_epi64(tmp2, zero); - stp2_3 = _mm_unpacklo_epi64(tmp0, zero); - stp2_4 = _mm_unpacklo_epi64(tmp1, zero); - stp2_5 = _mm_unpackhi_epi64(tmp3, zero); - stp2_6 = _mm_unpacklo_epi64(tmp3, zero); - stp2_7 = _mm_unpackhi_epi64(tmp1, zero); - } - - // Stage7. Left 8x16 only. - l[0] = _mm_add_epi16(stp2_0, stp1_15); - l[1] = _mm_add_epi16(stp2_1, stp1_14); - l[2] = _mm_add_epi16(stp2_2, stp2_13); - l[3] = _mm_add_epi16(stp2_3, stp2_12); - l[4] = _mm_add_epi16(stp2_4, stp2_11); - l[5] = _mm_add_epi16(stp2_5, stp2_10); - l[6] = _mm_add_epi16(stp2_6, stp1_9); - l[7] = _mm_add_epi16(stp2_7, stp1_8); - l[8] = _mm_sub_epi16(stp2_7, stp1_8); - l[9] = _mm_sub_epi16(stp2_6, stp1_9); - l[10] = _mm_sub_epi16(stp2_5, stp2_10); - l[11] = _mm_sub_epi16(stp2_4, stp2_11); - l[12] = _mm_sub_epi16(stp2_3, stp2_12); - l[13] = _mm_sub_epi16(stp2_2, stp2_13); - l[14] = _mm_sub_epi16(stp2_1, stp1_14); - l[15] = _mm_sub_epi16(stp2_0, stp1_15); - - // Second 1-D inverse transform, performed per 8x16 block - for (i = 0; i < 2; i++) { - int j; - array_transpose_4X8(l + 8 * i, in); - - IDCT16_10 - - // Stage7 - in[0] = _mm_add_epi16(stp2_0, stp1_15); - in[1] = _mm_add_epi16(stp2_1, stp1_14); - in[2] = _mm_add_epi16(stp2_2, stp2_13); - in[3] = _mm_add_epi16(stp2_3, stp2_12); - in[4] = _mm_add_epi16(stp2_4, stp2_11); - in[5] = _mm_add_epi16(stp2_5, stp2_10); - in[6] = _mm_add_epi16(stp2_6, stp1_9); - in[7] = _mm_add_epi16(stp2_7, stp1_8); - in[8] = _mm_sub_epi16(stp2_7, stp1_8); - in[9] = _mm_sub_epi16(stp2_6, stp1_9); - in[10] = _mm_sub_epi16(stp2_5, stp2_10); - in[11] = _mm_sub_epi16(stp2_4, stp2_11); - in[12] = _mm_sub_epi16(stp2_3, stp2_12); - in[13] = _mm_sub_epi16(stp2_2, stp2_13); - in[14] = _mm_sub_epi16(stp2_1, stp1_14); - in[15] = _mm_sub_epi16(stp2_0, stp1_15); - - for (j = 0; j < 16; ++j) { - // Final rounding and shift - in[j] = _mm_adds_epi16(in[j], final_rounding); - in[j] = _mm_srai_epi16(in[j], 6); - RECON_AND_STORE(dest + j * stride, in[j]); - } - - dest += 8; - } -} - -#define LOAD_DQCOEFF(reg, input) \ - { \ - reg = _mm_load_si128((const __m128i *) input); \ - input += 8; \ - } \ - -#define IDCT32_34 \ -/* Stage1 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \ - const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \ - \ - const __m128i lo_25_7= _mm_unpacklo_epi16(zero, in[7]); \ - const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \ - \ - const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \ - const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \ - \ - const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \ - const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \ - \ - MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, \ - stg1_1, stp1_16, stp1_31); \ - MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, \ - stg1_7, stp1_19, stp1_28); \ - MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, \ - stg1_9, stp1_20, stp1_27); \ - MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, \ - stg1_15, stp1_23, stp1_24); \ -} \ -\ -/* Stage2 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \ - const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \ - \ - const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \ - const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \ - \ - MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, \ - stg2_1, stp2_8, stp2_15); \ - MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, \ - stg2_7, stp2_11, stp2_12); \ - \ - stp2_16 = stp1_16; \ - stp2_19 = stp1_19; \ - \ - stp2_20 = stp1_20; \ - stp2_23 = stp1_23; \ - \ - stp2_24 = stp1_24; \ - stp2_27 = stp1_27; \ - \ - stp2_28 = stp1_28; \ - stp2_31 = stp1_31; \ -} \ -\ -/* Stage3 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \ - const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \ - \ - const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \ - const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \ - \ - MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, \ - stg3_1, stp1_4, stp1_7); \ - \ - stp1_8 = stp2_8; \ - stp1_11 = stp2_11; \ - stp1_12 = stp2_12; \ - stp1_15 = stp2_15; \ - \ - MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ - stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \ - stp1_18, stp1_29) \ - MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ - stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \ - stp1_22, stp1_25) \ - \ - stp1_16 = stp2_16; \ - stp1_31 = stp2_31; \ - stp1_19 = stp2_19; \ - stp1_20 = stp2_20; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_27 = stp2_27; \ - stp1_28 = stp2_28; \ -} \ -\ -/* Stage4 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \ - const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \ - \ - MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, \ - stg4_1, stp2_0, stp2_1); \ - \ - stp2_4 = stp1_4; \ - stp2_5 = stp1_4; \ - stp2_6 = stp1_7; \ - stp2_7 = stp1_7; \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ - stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \ - stp2_10, stp2_13) \ - \ - stp2_8 = stp1_8; \ - stp2_15 = stp1_15; \ - stp2_11 = stp1_11; \ - stp2_12 = stp1_12; \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ - stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ - stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ - stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ - stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ - stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ - stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ - stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ - stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ -} \ -\ -/* Stage5 */ \ -{ \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ - const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - stp1_0 = stp2_0; \ - stp1_1 = stp2_1; \ - stp1_2 = stp2_1; \ - stp1_3 = stp2_0; \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_4 = stp2_4; \ - stp1_7 = stp2_7; \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - \ - MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ - stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ - stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - \ - stp1_22 = stp2_22; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_25 = stp2_25; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} \ -\ -/* Stage6 */ \ -{ \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ - \ - stp2_8 = stp1_8; \ - stp2_9 = stp1_9; \ - stp2_14 = stp1_14; \ - stp2_15 = stp1_15; \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \ - stp2_13, stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ - stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ - stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ - stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ - \ - stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ - stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ - stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ - stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ -} \ -\ -/* Stage7 */ \ -{ \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ - const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ - stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ - stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ - stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ - stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ - stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ - stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ - stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ - stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ - stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - stp1_18 = stp2_18; \ - stp1_19 = stp2_19; \ - \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ - \ - stp1_28 = stp2_28; \ - stp1_29 = stp2_29; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} - - -#define IDCT32 \ -/* Stage1 */ \ -{ \ - const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \ - const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \ - const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \ - const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \ - \ - const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \ - const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \ - const __m128i lo_25_7= _mm_unpacklo_epi16(in[25], in[7]); \ - const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \ - \ - const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \ - const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \ - const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \ - const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \ - \ - const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \ - const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \ - const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \ - const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \ - stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, \ - stp1_17, stp1_30) \ - MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, \ - stg1_5, stg1_6, stg1_7, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8, \ - stg1_9, stg1_10, stg1_11, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12, \ - stg1_13, stg1_14, stg1_15, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ -} \ -\ -/* Stage2 */ \ -{ \ - const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \ - const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \ - const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \ - const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \ - \ - const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \ - const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \ - const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \ - const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \ - \ - MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \ - stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \ - stp2_14) \ - MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4, \ - stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, \ - stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_17); \ - stp2_17 = _mm_sub_epi16(stp1_16, stp1_17); \ - stp2_18 = _mm_sub_epi16(stp1_19, stp1_18); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_18); \ - \ - stp2_20 = _mm_add_epi16(stp1_20, stp1_21); \ - stp2_21 = _mm_sub_epi16(stp1_20, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_23, stp1_22); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_22); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_25); \ - stp2_25 = _mm_sub_epi16(stp1_24, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_27, stp1_26); \ - stp2_27 = _mm_add_epi16(stp1_27, stp1_26); \ - \ - stp2_28 = _mm_add_epi16(stp1_28, stp1_29); \ - stp2_29 = _mm_sub_epi16(stp1_28, stp1_29); \ - stp2_30 = _mm_sub_epi16(stp1_31, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_31, stp1_30); \ -} \ -\ -/* Stage3 */ \ -{ \ - const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \ - const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \ - const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \ - const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \ - \ - const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \ - const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - \ - MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0, \ - stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, \ - stp1_6) \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_9); \ - stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ - stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ - stp1_12 = _mm_add_epi16(stp2_12, stp2_13); \ - stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ - \ - MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ - stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \ - stp1_18, stp1_29) \ - MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ - stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \ - stp1_22, stp1_25) \ - \ - stp1_16 = stp2_16; \ - stp1_31 = stp2_31; \ - stp1_19 = stp2_19; \ - stp1_20 = stp2_20; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_27 = stp2_27; \ - stp1_28 = stp2_28; \ -} \ -\ -/* Stage4 */ \ -{ \ - const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \ - const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \ - const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \ - const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, \ - stg4_1, stg4_2, stg4_3, stp2_0, stp2_1, \ - stp2_2, stp2_3) \ - \ - stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ - stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \ - stp2_10, stp2_13) \ - \ - stp2_8 = stp1_8; \ - stp2_15 = stp1_15; \ - stp2_11 = stp1_11; \ - stp2_12 = stp1_12; \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ - stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ - stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ - stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ - stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ - stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ - stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ - stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ - stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ -} \ -\ -/* Stage5 */ \ -{ \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ - const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_4 = stp2_4; \ - stp1_7 = stp2_7; \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - \ - MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ - stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ - stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - \ - stp1_22 = stp2_22; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_25 = stp2_25; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} \ -\ -/* Stage6 */ \ -{ \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ - \ - stp2_8 = stp1_8; \ - stp2_9 = stp1_9; \ - stp2_14 = stp1_14; \ - stp2_15 = stp1_15; \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \ - stp2_13, stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ - stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ - stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ - stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ - \ - stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ - stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ - stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ - stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ -} \ -\ -/* Stage7 */ \ -{ \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ - const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ - stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ - stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ - stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ - stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ - stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ - stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ - stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ - stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ - stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - stp1_18 = stp2_18; \ - stp1_19 = stp2_19; \ - \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ - \ - stp1_28 = stp2_28; \ - stp1_29 = stp2_29; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} - -// Only upper-left 8x8 has non-zero coeff -void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest, - int stride) { - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1<<5); - - // vp10_idct constants for each stage - const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64); - const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64); - const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64); - const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64); - const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64); - const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64); - const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64); - const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64); - - const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64); - - const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64); - const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64); - - const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - - const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - - __m128i in[32], col[32]; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, - stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, - stp1_30, stp1_31; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, - stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, - stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, - stp2_30, stp2_31; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i; - - // Load input data. Only need to load the top left 8x8 block. - in[0] = _mm_load_si128((const __m128i *)input); - in[1] = _mm_load_si128((const __m128i *)(input + 32)); - in[2] = _mm_load_si128((const __m128i *)(input + 64)); - in[3] = _mm_load_si128((const __m128i *)(input + 96)); - in[4] = _mm_load_si128((const __m128i *)(input + 128)); - in[5] = _mm_load_si128((const __m128i *)(input + 160)); - in[6] = _mm_load_si128((const __m128i *)(input + 192)); - in[7] = _mm_load_si128((const __m128i *)(input + 224)); - - for (i = 8; i < 32; ++i) { - in[i] = _mm_setzero_si128(); - } - - array_transpose_8x8(in, in); - // TODO(hkuang): Following transposes are unnecessary. But remove them will - // lead to performance drop on some devices. - array_transpose_8x8(in + 8, in + 8); - array_transpose_8x8(in + 16, in + 16); - array_transpose_8x8(in + 24, in + 24); - - IDCT32_34 - - // 1_D: Store 32 intermediate results for each 8x32 block. - col[0] = _mm_add_epi16(stp1_0, stp1_31); - col[1] = _mm_add_epi16(stp1_1, stp1_30); - col[2] = _mm_add_epi16(stp1_2, stp1_29); - col[3] = _mm_add_epi16(stp1_3, stp1_28); - col[4] = _mm_add_epi16(stp1_4, stp1_27); - col[5] = _mm_add_epi16(stp1_5, stp1_26); - col[6] = _mm_add_epi16(stp1_6, stp1_25); - col[7] = _mm_add_epi16(stp1_7, stp1_24); - col[8] = _mm_add_epi16(stp1_8, stp1_23); - col[9] = _mm_add_epi16(stp1_9, stp1_22); - col[10] = _mm_add_epi16(stp1_10, stp1_21); - col[11] = _mm_add_epi16(stp1_11, stp1_20); - col[12] = _mm_add_epi16(stp1_12, stp1_19); - col[13] = _mm_add_epi16(stp1_13, stp1_18); - col[14] = _mm_add_epi16(stp1_14, stp1_17); - col[15] = _mm_add_epi16(stp1_15, stp1_16); - col[16] = _mm_sub_epi16(stp1_15, stp1_16); - col[17] = _mm_sub_epi16(stp1_14, stp1_17); - col[18] = _mm_sub_epi16(stp1_13, stp1_18); - col[19] = _mm_sub_epi16(stp1_12, stp1_19); - col[20] = _mm_sub_epi16(stp1_11, stp1_20); - col[21] = _mm_sub_epi16(stp1_10, stp1_21); - col[22] = _mm_sub_epi16(stp1_9, stp1_22); - col[23] = _mm_sub_epi16(stp1_8, stp1_23); - col[24] = _mm_sub_epi16(stp1_7, stp1_24); - col[25] = _mm_sub_epi16(stp1_6, stp1_25); - col[26] = _mm_sub_epi16(stp1_5, stp1_26); - col[27] = _mm_sub_epi16(stp1_4, stp1_27); - col[28] = _mm_sub_epi16(stp1_3, stp1_28); - col[29] = _mm_sub_epi16(stp1_2, stp1_29); - col[30] = _mm_sub_epi16(stp1_1, stp1_30); - col[31] = _mm_sub_epi16(stp1_0, stp1_31); - for (i = 0; i < 4; i++) { - int j; - const __m128i zero = _mm_setzero_si128(); - // Transpose 32x8 block to 8x32 block - array_transpose_8x8(col + i * 8, in); - IDCT32_34 - - // 2_D: Calculate the results and store them to destination. - in[0] = _mm_add_epi16(stp1_0, stp1_31); - in[1] = _mm_add_epi16(stp1_1, stp1_30); - in[2] = _mm_add_epi16(stp1_2, stp1_29); - in[3] = _mm_add_epi16(stp1_3, stp1_28); - in[4] = _mm_add_epi16(stp1_4, stp1_27); - in[5] = _mm_add_epi16(stp1_5, stp1_26); - in[6] = _mm_add_epi16(stp1_6, stp1_25); - in[7] = _mm_add_epi16(stp1_7, stp1_24); - in[8] = _mm_add_epi16(stp1_8, stp1_23); - in[9] = _mm_add_epi16(stp1_9, stp1_22); - in[10] = _mm_add_epi16(stp1_10, stp1_21); - in[11] = _mm_add_epi16(stp1_11, stp1_20); - in[12] = _mm_add_epi16(stp1_12, stp1_19); - in[13] = _mm_add_epi16(stp1_13, stp1_18); - in[14] = _mm_add_epi16(stp1_14, stp1_17); - in[15] = _mm_add_epi16(stp1_15, stp1_16); - in[16] = _mm_sub_epi16(stp1_15, stp1_16); - in[17] = _mm_sub_epi16(stp1_14, stp1_17); - in[18] = _mm_sub_epi16(stp1_13, stp1_18); - in[19] = _mm_sub_epi16(stp1_12, stp1_19); - in[20] = _mm_sub_epi16(stp1_11, stp1_20); - in[21] = _mm_sub_epi16(stp1_10, stp1_21); - in[22] = _mm_sub_epi16(stp1_9, stp1_22); - in[23] = _mm_sub_epi16(stp1_8, stp1_23); - in[24] = _mm_sub_epi16(stp1_7, stp1_24); - in[25] = _mm_sub_epi16(stp1_6, stp1_25); - in[26] = _mm_sub_epi16(stp1_5, stp1_26); - in[27] = _mm_sub_epi16(stp1_4, stp1_27); - in[28] = _mm_sub_epi16(stp1_3, stp1_28); - in[29] = _mm_sub_epi16(stp1_2, stp1_29); - in[30] = _mm_sub_epi16(stp1_1, stp1_30); - in[31] = _mm_sub_epi16(stp1_0, stp1_31); - - for (j = 0; j < 32; ++j) { - // Final rounding and shift - in[j] = _mm_adds_epi16(in[j], final_rounding); - in[j] = _mm_srai_epi16(in[j], 6); - RECON_AND_STORE(dest + j * stride, in[j]); - } - - dest += 8; - } -} - -void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest, - int stride) { - const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1 << 5); - const __m128i zero = _mm_setzero_si128(); - - // vp10_idct constants for each stage - const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64); - const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64); - const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64); - const __m128i stg1_3 = pair_set_epi16(cospi_17_64, cospi_15_64); - const __m128i stg1_4 = pair_set_epi16(cospi_23_64, -cospi_9_64); - const __m128i stg1_5 = pair_set_epi16(cospi_9_64, cospi_23_64); - const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64); - const __m128i stg1_7 = pair_set_epi16(cospi_25_64, cospi_7_64); - const __m128i stg1_8 = pair_set_epi16(cospi_27_64, -cospi_5_64); - const __m128i stg1_9 = pair_set_epi16(cospi_5_64, cospi_27_64); - const __m128i stg1_10 = pair_set_epi16(cospi_11_64, -cospi_21_64); - const __m128i stg1_11 = pair_set_epi16(cospi_21_64, cospi_11_64); - const __m128i stg1_12 = pair_set_epi16(cospi_19_64, -cospi_13_64); - const __m128i stg1_13 = pair_set_epi16(cospi_13_64, cospi_19_64); - const __m128i stg1_14 = pair_set_epi16(cospi_3_64, -cospi_29_64); - const __m128i stg1_15 = pair_set_epi16(cospi_29_64, cospi_3_64); - - const __m128i stg2_0 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i stg2_1 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i stg2_2 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i stg2_3 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i stg2_4 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i stg2_5 = pair_set_epi16(cospi_10_64, cospi_22_64); - const __m128i stg2_6 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i stg2_7 = pair_set_epi16(cospi_26_64, cospi_6_64); - - const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64); - const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64); - const __m128i stg3_8 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i stg3_9 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i stg3_10 = pair_set_epi16(-cospi_12_64, -cospi_20_64); - - const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); - const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i stg4_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i stg4_3 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - - const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); - - __m128i in[32], col[128], zero_idx[16]; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, - stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, - stp1_30, stp1_31; - __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, - stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, - stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, - stp2_30, stp2_31; - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - int i, j, i32; - - for (i = 0; i < 4; i++) { - i32 = (i << 5); - // First 1-D vp10_idct - // Load input data. - LOAD_DQCOEFF(in[0], input); - LOAD_DQCOEFF(in[8], input); - LOAD_DQCOEFF(in[16], input); - LOAD_DQCOEFF(in[24], input); - LOAD_DQCOEFF(in[1], input); - LOAD_DQCOEFF(in[9], input); - LOAD_DQCOEFF(in[17], input); - LOAD_DQCOEFF(in[25], input); - LOAD_DQCOEFF(in[2], input); - LOAD_DQCOEFF(in[10], input); - LOAD_DQCOEFF(in[18], input); - LOAD_DQCOEFF(in[26], input); - LOAD_DQCOEFF(in[3], input); - LOAD_DQCOEFF(in[11], input); - LOAD_DQCOEFF(in[19], input); - LOAD_DQCOEFF(in[27], input); - - LOAD_DQCOEFF(in[4], input); - LOAD_DQCOEFF(in[12], input); - LOAD_DQCOEFF(in[20], input); - LOAD_DQCOEFF(in[28], input); - LOAD_DQCOEFF(in[5], input); - LOAD_DQCOEFF(in[13], input); - LOAD_DQCOEFF(in[21], input); - LOAD_DQCOEFF(in[29], input); - LOAD_DQCOEFF(in[6], input); - LOAD_DQCOEFF(in[14], input); - LOAD_DQCOEFF(in[22], input); - LOAD_DQCOEFF(in[30], input); - LOAD_DQCOEFF(in[7], input); - LOAD_DQCOEFF(in[15], input); - LOAD_DQCOEFF(in[23], input); - LOAD_DQCOEFF(in[31], input); - - // checking if all entries are zero - zero_idx[0] = _mm_or_si128(in[0], in[1]); - zero_idx[1] = _mm_or_si128(in[2], in[3]); - zero_idx[2] = _mm_or_si128(in[4], in[5]); - zero_idx[3] = _mm_or_si128(in[6], in[7]); - zero_idx[4] = _mm_or_si128(in[8], in[9]); - zero_idx[5] = _mm_or_si128(in[10], in[11]); - zero_idx[6] = _mm_or_si128(in[12], in[13]); - zero_idx[7] = _mm_or_si128(in[14], in[15]); - zero_idx[8] = _mm_or_si128(in[16], in[17]); - zero_idx[9] = _mm_or_si128(in[18], in[19]); - zero_idx[10] = _mm_or_si128(in[20], in[21]); - zero_idx[11] = _mm_or_si128(in[22], in[23]); - zero_idx[12] = _mm_or_si128(in[24], in[25]); - zero_idx[13] = _mm_or_si128(in[26], in[27]); - zero_idx[14] = _mm_or_si128(in[28], in[29]); - zero_idx[15] = _mm_or_si128(in[30], in[31]); - - zero_idx[0] = _mm_or_si128(zero_idx[0], zero_idx[1]); - zero_idx[1] = _mm_or_si128(zero_idx[2], zero_idx[3]); - zero_idx[2] = _mm_or_si128(zero_idx[4], zero_idx[5]); - zero_idx[3] = _mm_or_si128(zero_idx[6], zero_idx[7]); - zero_idx[4] = _mm_or_si128(zero_idx[8], zero_idx[9]); - zero_idx[5] = _mm_or_si128(zero_idx[10], zero_idx[11]); - zero_idx[6] = _mm_or_si128(zero_idx[12], zero_idx[13]); - zero_idx[7] = _mm_or_si128(zero_idx[14], zero_idx[15]); - - zero_idx[8] = _mm_or_si128(zero_idx[0], zero_idx[1]); - zero_idx[9] = _mm_or_si128(zero_idx[2], zero_idx[3]); - zero_idx[10] = _mm_or_si128(zero_idx[4], zero_idx[5]); - zero_idx[11] = _mm_or_si128(zero_idx[6], zero_idx[7]); - zero_idx[12] = _mm_or_si128(zero_idx[8], zero_idx[9]); - zero_idx[13] = _mm_or_si128(zero_idx[10], zero_idx[11]); - zero_idx[14] = _mm_or_si128(zero_idx[12], zero_idx[13]); - - if (_mm_movemask_epi8(_mm_cmpeq_epi32(zero_idx[14], zero)) == 0xFFFF) { - col[i32 + 0] = _mm_setzero_si128(); - col[i32 + 1] = _mm_setzero_si128(); - col[i32 + 2] = _mm_setzero_si128(); - col[i32 + 3] = _mm_setzero_si128(); - col[i32 + 4] = _mm_setzero_si128(); - col[i32 + 5] = _mm_setzero_si128(); - col[i32 + 6] = _mm_setzero_si128(); - col[i32 + 7] = _mm_setzero_si128(); - col[i32 + 8] = _mm_setzero_si128(); - col[i32 + 9] = _mm_setzero_si128(); - col[i32 + 10] = _mm_setzero_si128(); - col[i32 + 11] = _mm_setzero_si128(); - col[i32 + 12] = _mm_setzero_si128(); - col[i32 + 13] = _mm_setzero_si128(); - col[i32 + 14] = _mm_setzero_si128(); - col[i32 + 15] = _mm_setzero_si128(); - col[i32 + 16] = _mm_setzero_si128(); - col[i32 + 17] = _mm_setzero_si128(); - col[i32 + 18] = _mm_setzero_si128(); - col[i32 + 19] = _mm_setzero_si128(); - col[i32 + 20] = _mm_setzero_si128(); - col[i32 + 21] = _mm_setzero_si128(); - col[i32 + 22] = _mm_setzero_si128(); - col[i32 + 23] = _mm_setzero_si128(); - col[i32 + 24] = _mm_setzero_si128(); - col[i32 + 25] = _mm_setzero_si128(); - col[i32 + 26] = _mm_setzero_si128(); - col[i32 + 27] = _mm_setzero_si128(); - col[i32 + 28] = _mm_setzero_si128(); - col[i32 + 29] = _mm_setzero_si128(); - col[i32 + 30] = _mm_setzero_si128(); - col[i32 + 31] = _mm_setzero_si128(); - continue; - } - - // Transpose 32x8 block to 8x32 block - array_transpose_8x8(in, in); - array_transpose_8x8(in + 8, in + 8); - array_transpose_8x8(in + 16, in + 16); - array_transpose_8x8(in + 24, in + 24); - - IDCT32 - - // 1_D: Store 32 intermediate results for each 8x32 block. - col[i32 + 0] = _mm_add_epi16(stp1_0, stp1_31); - col[i32 + 1] = _mm_add_epi16(stp1_1, stp1_30); - col[i32 + 2] = _mm_add_epi16(stp1_2, stp1_29); - col[i32 + 3] = _mm_add_epi16(stp1_3, stp1_28); - col[i32 + 4] = _mm_add_epi16(stp1_4, stp1_27); - col[i32 + 5] = _mm_add_epi16(stp1_5, stp1_26); - col[i32 + 6] = _mm_add_epi16(stp1_6, stp1_25); - col[i32 + 7] = _mm_add_epi16(stp1_7, stp1_24); - col[i32 + 8] = _mm_add_epi16(stp1_8, stp1_23); - col[i32 + 9] = _mm_add_epi16(stp1_9, stp1_22); - col[i32 + 10] = _mm_add_epi16(stp1_10, stp1_21); - col[i32 + 11] = _mm_add_epi16(stp1_11, stp1_20); - col[i32 + 12] = _mm_add_epi16(stp1_12, stp1_19); - col[i32 + 13] = _mm_add_epi16(stp1_13, stp1_18); - col[i32 + 14] = _mm_add_epi16(stp1_14, stp1_17); - col[i32 + 15] = _mm_add_epi16(stp1_15, stp1_16); - col[i32 + 16] = _mm_sub_epi16(stp1_15, stp1_16); - col[i32 + 17] = _mm_sub_epi16(stp1_14, stp1_17); - col[i32 + 18] = _mm_sub_epi16(stp1_13, stp1_18); - col[i32 + 19] = _mm_sub_epi16(stp1_12, stp1_19); - col[i32 + 20] = _mm_sub_epi16(stp1_11, stp1_20); - col[i32 + 21] = _mm_sub_epi16(stp1_10, stp1_21); - col[i32 + 22] = _mm_sub_epi16(stp1_9, stp1_22); - col[i32 + 23] = _mm_sub_epi16(stp1_8, stp1_23); - col[i32 + 24] = _mm_sub_epi16(stp1_7, stp1_24); - col[i32 + 25] = _mm_sub_epi16(stp1_6, stp1_25); - col[i32 + 26] = _mm_sub_epi16(stp1_5, stp1_26); - col[i32 + 27] = _mm_sub_epi16(stp1_4, stp1_27); - col[i32 + 28] = _mm_sub_epi16(stp1_3, stp1_28); - col[i32 + 29] = _mm_sub_epi16(stp1_2, stp1_29); - col[i32 + 30] = _mm_sub_epi16(stp1_1, stp1_30); - col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31); - } - for (i = 0; i < 4; i++) { - // Second 1-D vp10_idct - j = i << 3; - - // Transpose 32x8 block to 8x32 block - array_transpose_8x8(col + j, in); - array_transpose_8x8(col + j + 32, in + 8); - array_transpose_8x8(col + j + 64, in + 16); - array_transpose_8x8(col + j + 96, in + 24); - - IDCT32 - - // 2_D: Calculate the results and store them to destination. - in[0] = _mm_add_epi16(stp1_0, stp1_31); - in[1] = _mm_add_epi16(stp1_1, stp1_30); - in[2] = _mm_add_epi16(stp1_2, stp1_29); - in[3] = _mm_add_epi16(stp1_3, stp1_28); - in[4] = _mm_add_epi16(stp1_4, stp1_27); - in[5] = _mm_add_epi16(stp1_5, stp1_26); - in[6] = _mm_add_epi16(stp1_6, stp1_25); - in[7] = _mm_add_epi16(stp1_7, stp1_24); - in[8] = _mm_add_epi16(stp1_8, stp1_23); - in[9] = _mm_add_epi16(stp1_9, stp1_22); - in[10] = _mm_add_epi16(stp1_10, stp1_21); - in[11] = _mm_add_epi16(stp1_11, stp1_20); - in[12] = _mm_add_epi16(stp1_12, stp1_19); - in[13] = _mm_add_epi16(stp1_13, stp1_18); - in[14] = _mm_add_epi16(stp1_14, stp1_17); - in[15] = _mm_add_epi16(stp1_15, stp1_16); - in[16] = _mm_sub_epi16(stp1_15, stp1_16); - in[17] = _mm_sub_epi16(stp1_14, stp1_17); - in[18] = _mm_sub_epi16(stp1_13, stp1_18); - in[19] = _mm_sub_epi16(stp1_12, stp1_19); - in[20] = _mm_sub_epi16(stp1_11, stp1_20); - in[21] = _mm_sub_epi16(stp1_10, stp1_21); - in[22] = _mm_sub_epi16(stp1_9, stp1_22); - in[23] = _mm_sub_epi16(stp1_8, stp1_23); - in[24] = _mm_sub_epi16(stp1_7, stp1_24); - in[25] = _mm_sub_epi16(stp1_6, stp1_25); - in[26] = _mm_sub_epi16(stp1_5, stp1_26); - in[27] = _mm_sub_epi16(stp1_4, stp1_27); - in[28] = _mm_sub_epi16(stp1_3, stp1_28); - in[29] = _mm_sub_epi16(stp1_2, stp1_29); - in[30] = _mm_sub_epi16(stp1_1, stp1_30); - in[31] = _mm_sub_epi16(stp1_0, stp1_31); - - for (j = 0; j < 32; ++j) { - // Final rounding and shift - in[j] = _mm_adds_epi16(in[j], final_rounding); - in[j] = _mm_srai_epi16(in[j], 6); - RECON_AND_STORE(dest + j * stride, in[j]); - } - - dest += 8; - } -} - -void vp10_idct32x32_1_add_sse2(const int16_t *input, - uint8_t *dest, - int stride) { - __m128i dc_value; - const __m128i zero = _mm_setzero_si128(); - int a, i; - - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); - a = ROUND_POWER_OF_TWO(a, 6); - - dc_value = _mm_set1_epi16(a); - - for (i = 0; i < 4; ++i) { - int j; - for (j = 0; j < 32; ++j) { - RECON_AND_STORE(dest + j * stride, dc_value); - } - dest += 8; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE __m128i clamp_high_sse2(__m128i value, int bd) { - __m128i ubounded, retval; - const __m128i zero = _mm_set1_epi16(0); - const __m128i one = _mm_set1_epi16(1); - const __m128i max = _mm_subs_epi16(_mm_slli_epi16(one, bd), one); - ubounded = _mm_cmpgt_epi16(value, max); - retval = _mm_andnot_si128(ubounded, value); - ubounded = _mm_and_si128(ubounded, max); - retval = _mm_or_si128(retval, ubounded); - retval = _mm_and_si128(retval, _mm_cmpgt_epi16(retval, zero)); - return retval; -} - -void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[4 * 4]; - tran_low_t *outptr = out; - int i, j; - __m128i inptr[4]; - __m128i sign_bits[2]; - __m128i temp_mm, min_input, max_input; - int test; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - int optimised_cols = 0; - const __m128i zero = _mm_set1_epi16(0); - const __m128i eight = _mm_set1_epi16(8); - const __m128i max = _mm_set1_epi16(12043); - const __m128i min = _mm_set1_epi16(-12043); - // Load input into __m128i - inptr[0] = _mm_loadu_si128((const __m128i *)input); - inptr[1] = _mm_loadu_si128((const __m128i *)(input + 4)); - inptr[2] = _mm_loadu_si128((const __m128i *)(input + 8)); - inptr[3] = _mm_loadu_si128((const __m128i *)(input + 12)); - - // Pack to 16 bits - inptr[0] = _mm_packs_epi32(inptr[0], inptr[1]); - inptr[1] = _mm_packs_epi32(inptr[2], inptr[3]); - - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp_mm = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp_mm); - - if (!test) { - // Do the row transform - vp10_idct4_sse2(inptr); - - // Check the min & max values - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp_mm = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp_mm); - - if (test) { - transpose_4x4(inptr); - sign_bits[0] = _mm_cmplt_epi16(inptr[0], zero); - sign_bits[1] = _mm_cmplt_epi16(inptr[1], zero); - inptr[3] = _mm_unpackhi_epi16(inptr[1], sign_bits[1]); - inptr[2] = _mm_unpacklo_epi16(inptr[1], sign_bits[1]); - inptr[1] = _mm_unpackhi_epi16(inptr[0], sign_bits[0]); - inptr[0] = _mm_unpacklo_epi16(inptr[0], sign_bits[0]); - _mm_storeu_si128((__m128i *)outptr, inptr[0]); - _mm_storeu_si128((__m128i *)(outptr + 4), inptr[1]); - _mm_storeu_si128((__m128i *)(outptr + 8), inptr[2]); - _mm_storeu_si128((__m128i *)(outptr + 12), inptr[3]); - } else { - // Set to use the optimised transform for the column - optimised_cols = 1; - } - } else { - // Run the un-optimised row transform - for (i = 0; i < 4; ++i) { - vp10_highbd_idct4_c(input, outptr, bd); - input += 4; - outptr += 4; - } - } - - if (optimised_cols) { - vp10_idct4_sse2(inptr); - - // Final round and shift - inptr[0] = _mm_add_epi16(inptr[0], eight); - inptr[1] = _mm_add_epi16(inptr[1], eight); - - inptr[0] = _mm_srai_epi16(inptr[0], 4); - inptr[1] = _mm_srai_epi16(inptr[1], 4); - - // Reconstruction and Store - { - __m128i d0 = _mm_loadl_epi64((const __m128i *)dest); - __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2)); - d0 = _mm_unpacklo_epi64( - d0, _mm_loadl_epi64((const __m128i *)(dest + stride))); - d2 = _mm_unpacklo_epi64( - d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3))); - d0 = clamp_high_sse2(_mm_adds_epi16(d0, inptr[0]), bd); - d2 = clamp_high_sse2(_mm_adds_epi16(d2, inptr[1]), bd); - // store input0 - _mm_storel_epi64((__m128i *)dest, d0); - // store input1 - d0 = _mm_srli_si128(d0, 8); - _mm_storel_epi64((__m128i *)(dest + stride), d0); - // store input2 - _mm_storel_epi64((__m128i *)(dest + stride * 2), d2); - // store input3 - d2 = _mm_srli_si128(d2, 8); - _mm_storel_epi64((__m128i *)(dest + stride * 3), d2); - } - } else { - // Run the un-optimised column transform - tran_low_t temp_in[4], temp_out[4]; - // Columns - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; - vp10_highbd_idct4_c(temp_in, temp_out, bd); - for (j = 0; j < 4; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); - } - } - } -} - -void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[8 * 8]; - tran_low_t *outptr = out; - int i, j, test; - __m128i inptr[8]; - __m128i min_input, max_input, temp1, temp2, sign_bits; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - const __m128i zero = _mm_set1_epi16(0); - const __m128i sixteen = _mm_set1_epi16(16); - const __m128i max = _mm_set1_epi16(6201); - const __m128i min = _mm_set1_epi16(-6201); - int optimised_cols = 0; - - // Load input into __m128i & pack to 16 bits - for (i = 0; i < 8; i++) { - temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4)); - inptr[i] = _mm_packs_epi32(temp1, temp2); - } - - // Find the min & max for the row transform - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 8; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (!test) { - // Do the row transform - vp10_idct8_sse2(inptr); - - // Find the min & max for the column transform - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 8; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (test) { - array_transpose_8x8(inptr, inptr); - for (i = 0; i < 8; i++) { - sign_bits = _mm_cmplt_epi16(inptr[i], zero); - temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits); - temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2); - } - } else { - // Set to use the optimised transform for the column - optimised_cols = 1; - } - } else { - // Run the un-optimised row transform - for (i = 0; i < 8; ++i) { - vp10_highbd_idct8_c(input, outptr, bd); - input += 8; - outptr += 8; - } - } - - if (optimised_cols) { - vp10_idct8_sse2(inptr); - - // Final round & shift and Reconstruction and Store - { - __m128i d[8]; - for (i = 0; i < 8; i++) { - inptr[i] = _mm_add_epi16(inptr[i], sixteen); - d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - inptr[i] = _mm_srai_epi16(inptr[i], 5); - d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd); - // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]); - } - } - } else { - // Run the un-optimised column transform - tran_low_t temp_in[8], temp_out[8]; - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_highbd_idct8_c(temp_in, temp_out, bd); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); - } - } - } -} - -void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[8 * 8] = { 0 }; - tran_low_t *outptr = out; - int i, j, test; - __m128i inptr[8]; - __m128i min_input, max_input, temp1, temp2, sign_bits; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - const __m128i zero = _mm_set1_epi16(0); - const __m128i sixteen = _mm_set1_epi16(16); - const __m128i max = _mm_set1_epi16(6201); - const __m128i min = _mm_set1_epi16(-6201); - int optimised_cols = 0; - - // Load input into __m128i & pack to 16 bits - for (i = 0; i < 8; i++) { - temp1 = _mm_loadu_si128((const __m128i *)(input + 8 * i)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 8 * i + 4)); - inptr[i] = _mm_packs_epi32(temp1, temp2); - } - - // Find the min & max for the row transform - // only first 4 row has non-zero coefs - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 4; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (!test) { - // Do the row transform - vp10_idct8_sse2(inptr); - - // Find the min & max for the column transform - // N.B. Only first 4 cols contain non-zero coeffs - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 8; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (test) { - // Use fact only first 4 rows contain non-zero coeffs - array_transpose_4X8(inptr, inptr); - for (i = 0; i < 4; i++) { - sign_bits = _mm_cmplt_epi16(inptr[i], zero); - temp1 = _mm_unpackhi_epi16(inptr[i], sign_bits); - temp2 = _mm_unpacklo_epi16(inptr[i], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i + 1)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (2 * i)), temp2); - } - } else { - // Set to use the optimised transform for the column - optimised_cols = 1; - } - } else { - // Run the un-optimised row transform - for (i = 0; i < 4; ++i) { - vp10_highbd_idct8_c(input, outptr, bd); - input += 8; - outptr += 8; - } - } - - if (optimised_cols) { - vp10_idct8_sse2(inptr); - - // Final round & shift and Reconstruction and Store - { - __m128i d[8]; - for (i = 0; i < 8; i++) { - inptr[i] = _mm_add_epi16(inptr[i], sixteen); - d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - inptr[i] = _mm_srai_epi16(inptr[i], 5); - d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd); - // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]); - } - } - } else { - // Run the un-optimised column transform - tran_low_t temp_in[8], temp_out[8]; - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; - vp10_highbd_idct8_c(temp_in, temp_out, bd); - for (j = 0; j < 8; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); - } - } - } -} - -void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[16 * 16]; - tran_low_t *outptr = out; - int i, j, test; - __m128i inptr[32]; - __m128i min_input, max_input, temp1, temp2, sign_bits; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - const __m128i zero = _mm_set1_epi16(0); - const __m128i rounding = _mm_set1_epi16(32); - const __m128i max = _mm_set1_epi16(3155); - const __m128i min = _mm_set1_epi16(-3155); - int optimised_cols = 0; - - // Load input into __m128i & pack to 16 bits - for (i = 0; i < 16; i++) { - temp1 = _mm_loadu_si128((const __m128i *)(input + 16 * i)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 4)); - inptr[i] = _mm_packs_epi32(temp1, temp2); - temp1 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 8)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 12)); - inptr[i + 16] = _mm_packs_epi32(temp1, temp2); - } - - // Find the min & max for the row transform - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 32; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (!test) { - // Do the row transform - vp10_idct16_sse2(inptr, inptr + 16); - - // Find the min & max for the column transform - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 32; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (test) { - array_transpose_16x16(inptr, inptr + 16); - for (i = 0; i < 16; i++) { - sign_bits = _mm_cmplt_epi16(inptr[i], zero); - temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits); - temp2 = _mm_unpackhi_epi16(inptr[i], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 1)), temp2); - sign_bits = _mm_cmplt_epi16(inptr[i + 16], zero); - temp1 = _mm_unpacklo_epi16(inptr[i + 16], sign_bits); - temp2 = _mm_unpackhi_epi16(inptr[i + 16], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 2)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 3)), temp2); - } - } else { - // Set to use the optimised transform for the column - optimised_cols = 1; - } - } else { - // Run the un-optimised row transform - for (i = 0; i < 16; ++i) { - vp10_highbd_idct16_c(input, outptr, bd); - input += 16; - outptr += 16; - } - } - - if (optimised_cols) { - vp10_idct16_sse2(inptr, inptr + 16); - - // Final round & shift and Reconstruction and Store - { - __m128i d[2]; - for (i = 0; i < 16; i++) { - inptr[i ] = _mm_add_epi16(inptr[i ], rounding); - inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding); - d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8)); - inptr[i ] = _mm_srai_epi16(inptr[i ], 6); - inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6); - d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i ]), bd); - d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd); - // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]); - _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]); - } - } - } else { - // Run the un-optimised column transform - tran_low_t temp_in[16], temp_out[16]; - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - vp10_highbd_idct16_c(temp_in, temp_out, bd); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } - } -} - -void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, - int stride, int bd) { - tran_low_t out[16 * 16] = { 0 }; - tran_low_t *outptr = out; - int i, j, test; - __m128i inptr[32]; - __m128i min_input, max_input, temp1, temp2, sign_bits; - uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - const __m128i zero = _mm_set1_epi16(0); - const __m128i rounding = _mm_set1_epi16(32); - const __m128i max = _mm_set1_epi16(3155); - const __m128i min = _mm_set1_epi16(-3155); - int optimised_cols = 0; - - // Load input into __m128i & pack to 16 bits - for (i = 0; i < 16; i++) { - temp1 = _mm_loadu_si128((const __m128i *)(input + 16 * i)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 4)); - inptr[i] = _mm_packs_epi32(temp1, temp2); - temp1 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 8)); - temp2 = _mm_loadu_si128((const __m128i *)(input + 16 * i + 12)); - inptr[i + 16] = _mm_packs_epi32(temp1, temp2); - } - - // Find the min & max for the row transform - // Since all non-zero dct coefficients are in upper-left 4x4 area, - // we only need to consider first 4 rows here. - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 4; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (!test) { - // Do the row transform (N.B. This transposes inptr) - vp10_idct16_sse2(inptr, inptr + 16); - - // Find the min & max for the column transform - // N.B. Only first 4 cols contain non-zero coeffs - max_input = _mm_max_epi16(inptr[0], inptr[1]); - min_input = _mm_min_epi16(inptr[0], inptr[1]); - for (i = 2; i < 16; i++) { - max_input = _mm_max_epi16(max_input, inptr[i]); - min_input = _mm_min_epi16(min_input, inptr[i]); - } - max_input = _mm_cmpgt_epi16(max_input, max); - min_input = _mm_cmplt_epi16(min_input, min); - temp1 = _mm_or_si128(max_input, min_input); - test = _mm_movemask_epi8(temp1); - - if (test) { - // Use fact only first 4 rows contain non-zero coeffs - array_transpose_8x8(inptr, inptr); - array_transpose_8x8(inptr + 8, inptr + 16); - for (i = 0; i < 4; i++) { - sign_bits = _mm_cmplt_epi16(inptr[i], zero); - temp1 = _mm_unpacklo_epi16(inptr[i], sign_bits); - temp2 = _mm_unpackhi_epi16(inptr[i], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 1)), temp2); - sign_bits = _mm_cmplt_epi16(inptr[i + 16], zero); - temp1 = _mm_unpacklo_epi16(inptr[i + 16], sign_bits); - temp2 = _mm_unpackhi_epi16(inptr[i + 16], sign_bits); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 2)), temp1); - _mm_storeu_si128((__m128i *)(outptr + 4 * (i * 4 + 3)), temp2); - } - } else { - // Set to use the optimised transform for the column - optimised_cols = 1; - } - } else { - // Run the un-optimised row transform - for (i = 0; i < 4; ++i) { - vp10_highbd_idct16_c(input, outptr, bd); - input += 16; - outptr += 16; - } - } - - if (optimised_cols) { - vp10_idct16_sse2(inptr, inptr + 16); - - // Final round & shift and Reconstruction and Store - { - __m128i d[2]; - for (i = 0; i < 16; i++) { - inptr[i ] = _mm_add_epi16(inptr[i ], rounding); - inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding); - d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8)); - inptr[i ] = _mm_srai_epi16(inptr[i ], 6); - inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6); - d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i ]), bd); - d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd); - // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]); - _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]); - } - } - } else { - // Run the un-optimised column transform - tran_low_t temp_in[16], temp_out[16]; - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; - vp10_highbd_idct16_c(temp_in, temp_out, bd); - for (j = 0; j < 16; ++j) { - dest[j * stride + i] = highbd_clip_pixel_add( - dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); - } - } - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.h b/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.h deleted file mode 100644 index b79781aeeb..0000000000 --- a/libs/libvpx/vp10/common/x86/vp10_inv_txfm_sse2.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_ -#define VPX_DSP_X86_INV_TXFM_SSE2_H_ - -#include // SSE2 -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" -#include "vp10/common/vp10_inv_txfm.h" - -// perform 8x8 transpose -static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) { - const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); - const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); - const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]); - const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]); - const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); - const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); - const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]); - const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]); - - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - - res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1); - res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1); - res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3); - res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3); - res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5); - res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5); - res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7); - res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); -} - -#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - \ - in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \ - in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \ - } - -static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) { - const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); - const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); - const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); - const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); - - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); - - out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4); - out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4); - out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6); - out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6); -} - -static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) { - __m128i tbuf[8]; - array_transpose_8x8(res0, res0); - array_transpose_8x8(res1, tbuf); - array_transpose_8x8(res0 + 8, res1); - array_transpose_8x8(res1 + 8, res1 + 8); - - res0[8] = tbuf[0]; - res0[9] = tbuf[1]; - res0[10] = tbuf[2]; - res0[11] = tbuf[3]; - res0[12] = tbuf[4]; - res0[13] = tbuf[5]; - res0[14] = tbuf[6]; - res0[15] = tbuf[7]; -} - -static INLINE void load_buffer_8x16(const int16_t *input, __m128i *in) { - in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16)); - in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16)); - in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16)); - in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16)); - in[4] = _mm_load_si128((const __m128i *)(input + 4 * 16)); - in[5] = _mm_load_si128((const __m128i *)(input + 5 * 16)); - in[6] = _mm_load_si128((const __m128i *)(input + 6 * 16)); - in[7] = _mm_load_si128((const __m128i *)(input + 7 * 16)); - - in[8] = _mm_load_si128((const __m128i *)(input + 8 * 16)); - in[9] = _mm_load_si128((const __m128i *)(input + 9 * 16)); - in[10] = _mm_load_si128((const __m128i *)(input + 10 * 16)); - in[11] = _mm_load_si128((const __m128i *)(input + 11 * 16)); - in[12] = _mm_load_si128((const __m128i *)(input + 12 * 16)); - in[13] = _mm_load_si128((const __m128i *)(input + 13 * 16)); - in[14] = _mm_load_si128((const __m128i *)(input + 14 * 16)); - in[15] = _mm_load_si128((const __m128i *)(input + 15 * 16)); -} - -#define RECON_AND_STORE(dest, in_x) \ - { \ - __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \ - d0 = _mm_unpacklo_epi8(d0, zero); \ - d0 = _mm_add_epi16(in_x, d0); \ - d0 = _mm_packus_epi16(d0, d0); \ - _mm_storel_epi64((__m128i *)(dest), d0); \ - } - -static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) { - const __m128i final_rounding = _mm_set1_epi16(1<<5); - const __m128i zero = _mm_setzero_si128(); - // Final rounding and shift - in[0] = _mm_adds_epi16(in[0], final_rounding); - in[1] = _mm_adds_epi16(in[1], final_rounding); - in[2] = _mm_adds_epi16(in[2], final_rounding); - in[3] = _mm_adds_epi16(in[3], final_rounding); - in[4] = _mm_adds_epi16(in[4], final_rounding); - in[5] = _mm_adds_epi16(in[5], final_rounding); - in[6] = _mm_adds_epi16(in[6], final_rounding); - in[7] = _mm_adds_epi16(in[7], final_rounding); - in[8] = _mm_adds_epi16(in[8], final_rounding); - in[9] = _mm_adds_epi16(in[9], final_rounding); - in[10] = _mm_adds_epi16(in[10], final_rounding); - in[11] = _mm_adds_epi16(in[11], final_rounding); - in[12] = _mm_adds_epi16(in[12], final_rounding); - in[13] = _mm_adds_epi16(in[13], final_rounding); - in[14] = _mm_adds_epi16(in[14], final_rounding); - in[15] = _mm_adds_epi16(in[15], final_rounding); - - in[0] = _mm_srai_epi16(in[0], 6); - in[1] = _mm_srai_epi16(in[1], 6); - in[2] = _mm_srai_epi16(in[2], 6); - in[3] = _mm_srai_epi16(in[3], 6); - in[4] = _mm_srai_epi16(in[4], 6); - in[5] = _mm_srai_epi16(in[5], 6); - in[6] = _mm_srai_epi16(in[6], 6); - in[7] = _mm_srai_epi16(in[7], 6); - in[8] = _mm_srai_epi16(in[8], 6); - in[9] = _mm_srai_epi16(in[9], 6); - in[10] = _mm_srai_epi16(in[10], 6); - in[11] = _mm_srai_epi16(in[11], 6); - in[12] = _mm_srai_epi16(in[12], 6); - in[13] = _mm_srai_epi16(in[13], 6); - in[14] = _mm_srai_epi16(in[14], 6); - in[15] = _mm_srai_epi16(in[15], 6); - - RECON_AND_STORE(dest + 0 * stride, in[0]); - RECON_AND_STORE(dest + 1 * stride, in[1]); - RECON_AND_STORE(dest + 2 * stride, in[2]); - RECON_AND_STORE(dest + 3 * stride, in[3]); - RECON_AND_STORE(dest + 4 * stride, in[4]); - RECON_AND_STORE(dest + 5 * stride, in[5]); - RECON_AND_STORE(dest + 6 * stride, in[6]); - RECON_AND_STORE(dest + 7 * stride, in[7]); - RECON_AND_STORE(dest + 8 * stride, in[8]); - RECON_AND_STORE(dest + 9 * stride, in[9]); - RECON_AND_STORE(dest + 10 * stride, in[10]); - RECON_AND_STORE(dest + 11 * stride, in[11]); - RECON_AND_STORE(dest + 12 * stride, in[12]); - RECON_AND_STORE(dest + 13 * stride, in[13]); - RECON_AND_STORE(dest + 14 * stride, in[14]); - RECON_AND_STORE(dest + 15 * stride, in[15]); -} - -void idct4_sse2(__m128i *in); -void idct8_sse2(__m128i *in); -void idct16_sse2(__m128i *in0, __m128i *in1); -void iadst4_sse2(__m128i *in); -void iadst8_sse2(__m128i *in); -void iadst16_sse2(__m128i *in0, __m128i *in1); - -#endif // VPX_DSP_X86_INV_TXFM_SSE2_H_ diff --git a/libs/libvpx/vp10/decoder/decodeframe.c b/libs/libvpx/vp10/decoder/decodeframe.c deleted file mode 100644 index 1c3f182390..0000000000 --- a/libs/libvpx/vp10/decoder/decodeframe.c +++ /dev/null @@ -1,2433 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include // qsort() - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "./vpx_scale_rtcd.h" - -#include "vpx_dsp/bitreader_buffer.h" -#include "vpx_dsp/bitreader.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/mem_ops.h" -#include "vpx_scale/vpx_scale.h" -#include "vpx_util/vpx_thread.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/idct.h" -#include "vp10/common/thread_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/seg_common.h" -#include "vp10/common/tile_common.h" - -#include "vp10/decoder/decodeframe.h" -#include "vp10/decoder/detokenize.h" -#include "vp10/decoder/decodemv.h" -#include "vp10/decoder/decoder.h" -#include "vp10/decoder/dsubexp.h" - -#define MAX_VP9_HEADER_SIZE 80 - -static int is_compound_reference_allowed(const VP10_COMMON *cm) { - int i; - if (frame_is_intra_only(cm)) - return 0; - for (i = 1; i < REFS_PER_FRAME; ++i) - if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) - return 1; - - return 0; -} - -static void setup_compound_reference_mode(VP10_COMMON *cm) { - if (cm->ref_frame_sign_bias[LAST_FRAME] == - cm->ref_frame_sign_bias[GOLDEN_FRAME]) { - cm->comp_fixed_ref = ALTREF_FRAME; - cm->comp_var_ref[0] = LAST_FRAME; - cm->comp_var_ref[1] = GOLDEN_FRAME; - } else if (cm->ref_frame_sign_bias[LAST_FRAME] == - cm->ref_frame_sign_bias[ALTREF_FRAME]) { - cm->comp_fixed_ref = GOLDEN_FRAME; - cm->comp_var_ref[0] = LAST_FRAME; - cm->comp_var_ref[1] = ALTREF_FRAME; - } else { - cm->comp_fixed_ref = LAST_FRAME; - cm->comp_var_ref[0] = GOLDEN_FRAME; - cm->comp_var_ref[1] = ALTREF_FRAME; - } -} - -static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) { - return len != 0 && len <= (size_t)(end - start); -} - -static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) { - const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max)); - return data > max ? max : data; -} - -#if CONFIG_MISC_FIXES -static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) { - return vpx_rb_read_bit(rb) ? TX_MODE_SELECT : vpx_rb_read_literal(rb, 2); -} -#else -static TX_MODE read_tx_mode(vpx_reader *r) { - TX_MODE tx_mode = vpx_read_literal(r, 2); - if (tx_mode == ALLOW_32X32) - tx_mode += vpx_read_bit(r); - return tx_mode; -} -#endif - -static void read_tx_mode_probs(struct tx_probs *tx_probs, vpx_reader *r) { - int i, j; - - for (i = 0; i < TX_SIZE_CONTEXTS; ++i) - for (j = 0; j < TX_SIZES - 3; ++j) - vp10_diff_update_prob(r, &tx_probs->p8x8[i][j]); - - for (i = 0; i < TX_SIZE_CONTEXTS; ++i) - for (j = 0; j < TX_SIZES - 2; ++j) - vp10_diff_update_prob(r, &tx_probs->p16x16[i][j]); - - for (i = 0; i < TX_SIZE_CONTEXTS; ++i) - for (j = 0; j < TX_SIZES - 1; ++j) - vp10_diff_update_prob(r, &tx_probs->p32x32[i][j]); -} - -static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vpx_reader *r) { - int i, j; - for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) - for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i) - vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]); -} - -static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) { - int i, j; - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) - for (j = 0; j < INTER_MODES - 1; ++j) - vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]); -} - -#if CONFIG_MISC_FIXES -static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm, - struct vpx_read_bit_buffer *rb) { - if (is_compound_reference_allowed(cm)) { - return vpx_rb_read_bit(rb) ? REFERENCE_MODE_SELECT - : (vpx_rb_read_bit(rb) ? COMPOUND_REFERENCE - : SINGLE_REFERENCE); - } else { - return SINGLE_REFERENCE; - } -} -#else -static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm, - vpx_reader *r) { - if (is_compound_reference_allowed(cm)) { - return vpx_read_bit(r) ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT - : COMPOUND_REFERENCE) - : SINGLE_REFERENCE; - } else { - return SINGLE_REFERENCE; - } -} -#endif - -static void read_frame_reference_mode_probs(VP10_COMMON *cm, vpx_reader *r) { - FRAME_CONTEXT *const fc = cm->fc; - int i; - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - for (i = 0; i < COMP_INTER_CONTEXTS; ++i) - vp10_diff_update_prob(r, &fc->comp_inter_prob[i]); - - if (cm->reference_mode != COMPOUND_REFERENCE) - for (i = 0; i < REF_CONTEXTS; ++i) { - vp10_diff_update_prob(r, &fc->single_ref_prob[i][0]); - vp10_diff_update_prob(r, &fc->single_ref_prob[i][1]); - } - - if (cm->reference_mode != SINGLE_REFERENCE) - for (i = 0; i < REF_CONTEXTS; ++i) - vp10_diff_update_prob(r, &fc->comp_ref_prob[i]); -} - -static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) { - int i; - for (i = 0; i < n; ++i) -#if CONFIG_MISC_FIXES - vp10_diff_update_prob(r, &p[i]); -#else - if (vpx_read(r, MV_UPDATE_PROB)) - p[i] = (vpx_read_literal(r, 7) << 1) | 1; -#endif -} - -static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) { - int i, j; - - update_mv_probs(ctx->joints, MV_JOINTS - 1, r); - - for (i = 0; i < 2; ++i) { - nmv_component *const comp_ctx = &ctx->comps[i]; - update_mv_probs(&comp_ctx->sign, 1, r); - update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r); - update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r); - update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r); - } - - for (i = 0; i < 2; ++i) { - nmv_component *const comp_ctx = &ctx->comps[i]; - for (j = 0; j < CLASS0_SIZE; ++j) - update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r); - update_mv_probs(comp_ctx->fp, 3, r); - } - - if (allow_hp) { - for (i = 0; i < 2; ++i) { - nmv_component *const comp_ctx = &ctx->comps[i]; - update_mv_probs(&comp_ctx->class0_hp, 1, r); - update_mv_probs(&comp_ctx->hp, 1, r); - } - } -} - -static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane, - const TX_SIZE tx_size, - uint8_t *dst, int stride, - int eob, int block) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block); - const int seg_id = xd->mi[0]->mbmi.segment_id; - if (eob > 0) { - tran_low_t *const dqcoeff = pd->dqcoeff; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_4X4: - vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd, - tx_type, xd->lossless[seg_id]); - break; - case TX_8X8: - vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - case TX_16X16: - vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - case TX_32X32: - vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - default: - assert(0 && "Invalid transform size"); - return; - } - } else { -#endif // CONFIG_VP9_HIGHBITDEPTH - switch (tx_size) { - case TX_4X4: - vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type, - xd->lossless[seg_id]); - break; - case TX_8X8: - vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type); - break; - case TX_16X16: - vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type); - break; - case TX_32X32: - vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type); - break; - default: - assert(0 && "Invalid transform size"); - return; - } -#if CONFIG_VP9_HIGHBITDEPTH - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - if (eob == 1) { - dqcoeff[0] = 0; - } else { - if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) - memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); - else if (tx_size == TX_32X32 && eob <= 34) - memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); - else - memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); - } - } -} - -static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane, - const TX_TYPE tx_type, - const TX_SIZE tx_size, - uint8_t *dst, int stride, - int eob) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - const int seg_id = xd->mi[0]->mbmi.segment_id; - if (eob > 0) { - tran_low_t *const dqcoeff = pd->dqcoeff; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_4X4: - vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd, - tx_type, xd->lossless[seg_id]); - break; - case TX_8X8: - vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - case TX_16X16: - vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - case TX_32X32: - vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd, - tx_type); - break; - default: - assert(0 && "Invalid transform size"); - return; - } - } else { -#endif // CONFIG_VP9_HIGHBITDEPTH - switch (tx_size) { - case TX_4X4: - vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type, - xd->lossless[seg_id]); - break; - case TX_8X8: - vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type); - break; - case TX_16X16: - vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type); - break; - case TX_32X32: - vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type); - break; - default: - assert(0 && "Invalid transform size"); - return; - } -#if CONFIG_VP9_HIGHBITDEPTH - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - if (eob == 1) { - dqcoeff[0] = 0; - } else { - if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) - memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); - else if (tx_size == TX_32X32 && eob <= 34) - memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); - else - memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); - } - } -} - -static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd, - vpx_reader *r, - MB_MODE_INFO *const mbmi, - int plane, - int row, int col, - TX_SIZE tx_size) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - PREDICTION_MODE mode = (plane == 0) ? mbmi->mode : mbmi->uv_mode; - PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV; - uint8_t *dst; - int block_idx = (row << 1) + col; - dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col]; - - if (mbmi->sb_type < BLOCK_8X8) - if (plane == 0) - mode = xd->mi[0]->bmi[(row << 1) + col].as_mode; - - vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, - dst, pd->dst.stride, dst, pd->dst.stride, - col, row, plane); - - if (!mbmi->skip) { - TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx); - const scan_order *sc = get_scan(tx_size, tx_type); - const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, - r, mbmi->segment_id); - inverse_transform_block_intra(xd, plane, tx_type, tx_size, - dst, pd->dst.stride, eob); - } -} - -static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r, - MB_MODE_INFO *const mbmi, int plane, - int row, int col, TX_SIZE tx_size) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV; - int block_idx = (row << 1) + col; - TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx); - const scan_order *sc = get_scan(tx_size, tx_type); - const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, r, - mbmi->segment_id); - - inverse_transform_block_inter(xd, plane, tx_size, - &pd->dst.buf[4 * row * pd->dst.stride + 4 * col], - pd->dst.stride, eob, block_idx); - return eob; -} - -static void build_mc_border(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int x, int y, int b_w, int b_h, int w, int h) { - // Get a pointer to the start of the real data for this row. - const uint8_t *ref_row = src - x - y * src_stride; - - if (y >= h) - ref_row += (h - 1) * src_stride; - else if (y > 0) - ref_row += y * src_stride; - - do { - int right = 0, copy; - int left = x < 0 ? -x : 0; - - if (left > b_w) - left = b_w; - - if (x + b_w > w) - right = x + b_w - w; - - if (right > b_w) - right = b_w; - - copy = b_w - left - right; - - if (left) - memset(dst, ref_row[0], left); - - if (copy) - memcpy(dst + left, ref_row + x + left, copy); - - if (right) - memset(dst + left + copy, ref_row[w - 1], right); - - dst += dst_stride; - ++y; - - if (y > 0 && y < h) - ref_row += src_stride; - } while (--b_h); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void high_build_mc_border(const uint8_t *src8, int src_stride, - uint16_t *dst, int dst_stride, - int x, int y, int b_w, int b_h, - int w, int h) { - // Get a pointer to the start of the real data for this row. - const uint16_t *src = CONVERT_TO_SHORTPTR(src8); - const uint16_t *ref_row = src - x - y * src_stride; - - if (y >= h) - ref_row += (h - 1) * src_stride; - else if (y > 0) - ref_row += y * src_stride; - - do { - int right = 0, copy; - int left = x < 0 ? -x : 0; - - if (left > b_w) - left = b_w; - - if (x + b_w > w) - right = x + b_w - w; - - if (right > b_w) - right = b_w; - - copy = b_w - left - right; - - if (left) - vpx_memset16(dst, ref_row[0], left); - - if (copy) - memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); - - if (right) - vpx_memset16(dst + left + copy, ref_row[w - 1], right); - - dst += dst_stride; - ++y; - - if (y > 0 && y < h) - ref_row += src_stride; - } while (--b_h); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -#if CONFIG_VP9_HIGHBITDEPTH -static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, - int x0, int y0, int b_w, int b_h, - int frame_width, int frame_height, - int border_offset, - uint8_t *const dst, int dst_buf_stride, - int subpel_x, int subpel_y, - const InterpKernel *kernel, - const struct scale_factors *sf, - MACROBLOCKD *xd, - int w, int h, int ref, int xs, int ys) { - DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]); - const uint8_t *buf_ptr; - - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); - buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset; - } else { - build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); - buf_ptr = ((uint8_t *)mc_buf_high) + border_offset; - } - - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); - } else { - inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); - } -} -#else -static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, - int x0, int y0, int b_w, int b_h, - int frame_width, int frame_height, - int border_offset, - uint8_t *const dst, int dst_buf_stride, - int subpel_x, int subpel_y, - const InterpKernel *kernel, - const struct scale_factors *sf, - int w, int h, int ref, int xs, int ys) { - DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]); - const uint8_t *buf_ptr; - - build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); - buf_ptr = mc_buf + border_offset; - - inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd, - int plane, int bw, int bh, int x, - int y, int w, int h, int mi_x, int mi_y, - const InterpKernel *kernel, - const struct scale_factors *sf, - struct buf_2d *pre_buf, - struct buf_2d *dst_buf, const MV* mv, - RefCntBuffer *ref_frame_buf, - int is_scaled, int ref) { - VP10_COMMON *const cm = &pbi->common; - struct macroblockd_plane *const pd = &xd->plane[plane]; - uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; - MV32 scaled_mv; - int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, - buf_stride, subpel_x, subpel_y; - uint8_t *ref_frame, *buf_ptr; - - // Get reference frame pointer, width and height. - if (plane == 0) { - frame_width = ref_frame_buf->buf.y_crop_width; - frame_height = ref_frame_buf->buf.y_crop_height; - ref_frame = ref_frame_buf->buf.y_buffer; - } else { - frame_width = ref_frame_buf->buf.uv_crop_width; - frame_height = ref_frame_buf->buf.uv_crop_height; - ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer - : ref_frame_buf->buf.v_buffer; - } - - if (is_scaled) { - const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh, - pd->subsampling_x, - pd->subsampling_y); - // Co-ordinate of containing block to pixel precision. - int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); - int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); - - // Co-ordinate of the block to 1/16th pixel precision. - x0_16 = (x_start + x) << SUBPEL_BITS; - y0_16 = (y_start + y) << SUBPEL_BITS; - - // Co-ordinate of current block in reference frame - // to 1/16th pixel precision. - x0_16 = sf->scale_value_x(x0_16, sf); - y0_16 = sf->scale_value_y(y0_16, sf); - - // Map the top left corner of the block into the reference frame. - x0 = sf->scale_value_x(x_start + x, sf); - y0 = sf->scale_value_y(y_start + y, sf); - - // Scale the MV and incorporate the sub-pixel offset of the block - // in the reference frame. - scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); - xs = sf->x_step_q4; - ys = sf->y_step_q4; - } else { - // Co-ordinate of containing block to pixel precision. - x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x; - y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y; - - // Co-ordinate of the block to 1/16th pixel precision. - x0_16 = x0 << SUBPEL_BITS; - y0_16 = y0 << SUBPEL_BITS; - - scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y)); - scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x)); - xs = ys = 16; - } - subpel_x = scaled_mv.col & SUBPEL_MASK; - subpel_y = scaled_mv.row & SUBPEL_MASK; - - // Calculate the top left corner of the best matching block in the - // reference frame. - x0 += scaled_mv.col >> SUBPEL_BITS; - y0 += scaled_mv.row >> SUBPEL_BITS; - x0_16 += scaled_mv.col; - y0_16 += scaled_mv.row; - - // Get reference block pointer. - buf_ptr = ref_frame + y0 * pre_buf->stride + x0; - buf_stride = pre_buf->stride; - - // Do border extension if there is motion or the - // width/height is not a multiple of 8 pixels. - if (is_scaled || scaled_mv.col || scaled_mv.row || - (frame_width & 0x7) || (frame_height & 0x7)) { - int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; - - // Get reference block bottom right horizontal coordinate. - int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1; - int x_pad = 0, y_pad = 0; - - if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) { - x0 -= VP9_INTERP_EXTEND - 1; - x1 += VP9_INTERP_EXTEND; - x_pad = 1; - } - - if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) { - y0 -= VP9_INTERP_EXTEND - 1; - y1 += VP9_INTERP_EXTEND; - y_pad = 1; - } - - // Wait until reference block is ready. Pad 7 more pixels as last 7 - // pixels of each superblock row can be changed by next superblock row. - if (cm->frame_parallel_decode) - vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); - - // Skip border extension if block is inside the frame. - if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || - y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) { - // Extend the border. - const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0; - const int b_w = x1 - x0 + 1; - const int b_h = y1 - y0 + 1; - const int border_offset = y_pad * 3 * b_w + x_pad * 3; - - extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, - frame_width, frame_height, border_offset, - dst, dst_buf->stride, - subpel_x, subpel_y, - kernel, sf, -#if CONFIG_VP9_HIGHBITDEPTH - xd, -#endif - w, h, ref, xs, ys); - return; - } - } else { - // Wait until reference block is ready. Pad 7 more pixels as last 7 - // pixels of each superblock row can be changed by next superblock row. - if (cm->frame_parallel_decode) { - const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS; - vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); - } - } -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); - } else { - inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); - } -#else - inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); -#endif // CONFIG_VP9_HIGHBITDEPTH -} - -static void dec_build_inter_predictors_sb(VP10Decoder *const pbi, - MACROBLOCKD *xd, - int mi_row, int mi_col) { - int plane; - const int mi_x = mi_col * MI_SIZE; - const int mi_y = mi_row * MI_SIZE; - const MODE_INFO *mi = xd->mi[0]; - const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter]; - const BLOCK_SIZE sb_type = mi->mbmi.sb_type; - const int is_compound = has_second_ref(&mi->mbmi); - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - struct buf_2d *const dst_buf = &pd->dst; - const int num_4x4_w = pd->n4_w; - const int num_4x4_h = pd->n4_h; - - const int n4w_x4 = 4 * num_4x4_w; - const int n4h_x4 = 4 * num_4x4_h; - int ref; - - for (ref = 0; ref < 1 + is_compound; ++ref) { - const struct scale_factors *const sf = &xd->block_refs[ref]->sf; - struct buf_2d *const pre_buf = &pd->pre[ref]; - const int idx = xd->block_refs[ref]->idx; - BufferPool *const pool = pbi->common.buffer_pool; - RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx]; - const int is_scaled = vp10_is_scaled(sf); - - if (sb_type < BLOCK_8X8) { - const PARTITION_TYPE bp = BLOCK_8X8 - sb_type; - const int have_vsplit = bp != PARTITION_HORZ; - const int have_hsplit = bp != PARTITION_VERT; - const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x); - const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y); - const int pw = 8 >> (have_vsplit | pd->subsampling_x); - const int ph = 8 >> (have_hsplit | pd->subsampling_y); - int x, y; - for (y = 0; y < num_4x4_h; ++y) { - for (x = 0; x < num_4x4_w; ++x) { - const MV mv = average_split_mvs(pd, mi, ref, y * 2 + x); - dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4, - 4 * x, 4 * y, pw, ph, mi_x, mi_y, kernel, - sf, pre_buf, dst_buf, &mv, - ref_frame_buf, is_scaled, ref); - } - } - } else { - const MV mv = mi->mbmi.mv[ref].as_mv; - dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4, - 0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel, - sf, pre_buf, dst_buf, &mv, ref_frame_buf, - is_scaled, ref); - } - } - } -} - -static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, - int n4_wl, int n4_hl) { - // get minimum log2 num4x4s dimension - const int x = VPXMIN(n4_wl, n4_hl); - return VPXMIN(mbmi->tx_size, x); -} - -static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) { - struct macroblockd_plane *const pd = &xd->plane[i]; - memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_w); - memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_h); - } -} - -static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl, - int bhl) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x; - xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y; - xd->plane[i].n4_wl = bwl - xd->plane[i].subsampling_x; - xd->plane[i].n4_hl = bhl - xd->plane[i].subsampling_y; - } -} - -static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd, - BLOCK_SIZE bsize, int mi_row, int mi_col, - int bw, int bh, int x_mis, int y_mis, - int bwl, int bhl) { - const int offset = mi_row * cm->mi_stride + mi_col; - int x, y; - const TileInfo *const tile = &xd->tile; - - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = &cm->mi[offset]; - // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of - // passing bsize from decode_partition(). - xd->mi[0]->mbmi.sb_type = bsize; - for (y = 0; y < y_mis; ++y) - for (x = !y; x < x_mis; ++x) { - xd->mi[y * cm->mi_stride + x] = xd->mi[0]; - } - - set_plane_n4(xd, bw, bh, bwl, bhl); - - set_skip_context(xd, mi_row, mi_col); - - // Distance of Mb to the various image edges. These are specified to 8th pel - // as they are always compared to values that are in 1/8th pel units - set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); - - vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col); - return &xd->mi[0]->mbmi; -} - -static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd, - int mi_row, int mi_col, - vpx_reader *r, BLOCK_SIZE bsize, - int bwl, int bhl) { - VP10_COMMON *const cm = &pbi->common; - const int less8x8 = bsize < BLOCK_8X8; - const int bw = 1 << (bwl - 1); - const int bh = 1 << (bhl - 1); - const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); - const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); - - MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, - bw, bh, x_mis, y_mis, bwl, bhl); - - if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) { - const BLOCK_SIZE uv_subsize = - ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y]; - if (uv_subsize == BLOCK_INVALID) - vpx_internal_error(xd->error_info, - VPX_CODEC_CORRUPT_FRAME, "Invalid block size."); - } - - vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis); - - if (mbmi->skip) { - dec_reset_skip_context(xd); - } - - if (!is_inter_block(mbmi)) { - int plane; - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const TX_SIZE tx_size = - plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl) - : mbmi->tx_size; - const int num_4x4_w = pd->n4_w; - const int num_4x4_h = pd->n4_h; - const int step = (1 << tx_size); - int row, col; - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? - 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? - 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - - for (row = 0; row < max_blocks_high; row += step) - for (col = 0; col < max_blocks_wide; col += step) - predict_and_reconstruct_intra_block(xd, r, mbmi, plane, - row, col, tx_size); - } - } else { - // Prediction - dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col); - - // Reconstruction - if (!mbmi->skip) { - int eobtotal = 0; - int plane; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const TX_SIZE tx_size = - plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl) - : mbmi->tx_size; - const int num_4x4_w = pd->n4_w; - const int num_4x4_h = pd->n4_h; - const int step = (1 << tx_size); - int row, col; - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? - 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? - 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - - for (row = 0; row < max_blocks_high; row += step) - for (col = 0; col < max_blocks_wide; col += step) - eobtotal += reconstruct_inter_block(xd, r, mbmi, plane, row, col, - tx_size); - } - - if (!less8x8 && eobtotal == 0) -#if CONFIG_MISC_FIXES - mbmi->has_no_coeffs = 1; // skip loopfilter -#else - mbmi->skip = 1; // skip loopfilter -#endif - } - } - - xd->corrupted |= vpx_reader_has_error(r); -} - -static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, - int mi_row, int mi_col, - int bsl) { - const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col; - const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK); - int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1; - -// assert(bsl >= 0); - - return (left * 2 + above) + bsl * PARTITION_PLOFFSET; -} - -static INLINE void dec_update_partition_context(MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE subsize, - int bw) { - PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col; - PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK); - - // update the partition context at the end notes. set partition bits - // of block sizes larger than the current one to be one, and partition - // bits of smaller block sizes to be zero. - memset(above_ctx, partition_context_lookup[subsize].above, bw); - memset(left_ctx, partition_context_lookup[subsize].left, bw); -} - -static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd, - int mi_row, int mi_col, vpx_reader *r, - int has_rows, int has_cols, int bsl) { - const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl); - const vpx_prob *const probs = cm->fc->partition_prob[ctx]; - FRAME_COUNTS *counts = xd->counts; - PARTITION_TYPE p; - - if (has_rows && has_cols) - p = (PARTITION_TYPE)vpx_read_tree(r, vp10_partition_tree, probs); - else if (!has_rows && has_cols) - p = vpx_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ; - else if (has_rows && !has_cols) - p = vpx_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT; - else - p = PARTITION_SPLIT; - - if (counts) - ++counts->partition[ctx][p]; - - return p; -} - -// TODO(slavarnway): eliminate bsize and subsize in future commits -static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd, - int mi_row, int mi_col, - vpx_reader* r, BLOCK_SIZE bsize, int n4x4_l2) { - VP10_COMMON *const cm = &pbi->common; - const int n8x8_l2 = n4x4_l2 - 1; - const int num_8x8_wh = 1 << n8x8_l2; - const int hbs = num_8x8_wh >> 1; - PARTITION_TYPE partition; - BLOCK_SIZE subsize; - const int has_rows = (mi_row + hbs) < cm->mi_rows; - const int has_cols = (mi_col + hbs) < cm->mi_cols; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - partition = read_partition(cm, xd, mi_row, mi_col, r, has_rows, has_cols, - n8x8_l2); - subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition); - if (!hbs) { - // calculate bmode block dimensions (log 2) - xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT); - xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ); - decode_block(pbi, xd, mi_row, mi_col, r, subsize, 1, 1); - } else { - switch (partition) { - case PARTITION_NONE: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n4x4_l2); - break; - case PARTITION_HORZ: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n8x8_l2); - if (has_rows) - decode_block(pbi, xd, mi_row + hbs, mi_col, r, subsize, n4x4_l2, - n8x8_l2); - break; - case PARTITION_VERT: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2, n4x4_l2); - if (has_cols) - decode_block(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2, - n4x4_l2); - break; - case PARTITION_SPLIT: - decode_partition(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row + hbs, mi_col, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize, - n8x8_l2); - break; - default: - assert(0 && "Invalid partition type"); - } - } - - // update partition context - if (bsize >= BLOCK_8X8 && - (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) - dec_update_partition_context(xd, mi_row, mi_col, subsize, num_8x8_wh); -} - -static void setup_token_decoder(const uint8_t *data, - const uint8_t *data_end, - size_t read_size, - struct vpx_internal_error_info *error_info, - vpx_reader *r, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state) { - // Validate the calculated partition length. If the buffer - // described by the partition can't be fully read, then restrict - // it to the portion that can be (for EC mode) or throw an error. - if (!read_is_valid(data, read_size, data_end)) - vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt tile length"); - - if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state)) - vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR, - "Failed to allocate bool decoder %d", 1); -} - -static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs, - vpx_reader *r) { - int i, j, k, l, m; - - if (vpx_read_bit(r)) - for (i = 0; i < PLANE_TYPES; ++i) - for (j = 0; j < REF_TYPES; ++j) - for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) - for (m = 0; m < UNCONSTRAINED_NODES; ++m) - vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]); -} - -static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, - vpx_reader *r) { - const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; - TX_SIZE tx_size; - for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) - read_coef_probs_common(fc->coef_probs[tx_size], r); -} - -static void setup_segmentation(VP10_COMMON *const cm, - struct vpx_read_bit_buffer *rb) { - struct segmentation *const seg = &cm->seg; -#if !CONFIG_MISC_FIXES - struct segmentation_probs *const segp = &cm->segp; -#endif - int i, j; - - seg->update_map = 0; - seg->update_data = 0; - - seg->enabled = vpx_rb_read_bit(rb); - if (!seg->enabled) - return; - - // Segmentation map update - if (frame_is_intra_only(cm) || cm->error_resilient_mode) { - seg->update_map = 1; - } else { - seg->update_map = vpx_rb_read_bit(rb); - } - if (seg->update_map) { -#if !CONFIG_MISC_FIXES - for (i = 0; i < SEG_TREE_PROBS; i++) - segp->tree_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) - : MAX_PROB; -#endif - if (frame_is_intra_only(cm) || cm->error_resilient_mode) { - seg->temporal_update = 0; - } else { - seg->temporal_update = vpx_rb_read_bit(rb); - } -#if !CONFIG_MISC_FIXES - if (seg->temporal_update) { - for (i = 0; i < PREDICTION_PROBS; i++) - segp->pred_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) - : MAX_PROB; - } else { - for (i = 0; i < PREDICTION_PROBS; i++) - segp->pred_probs[i] = MAX_PROB; - } -#endif - } - - // Segmentation data update - seg->update_data = vpx_rb_read_bit(rb); - if (seg->update_data) { - seg->abs_delta = vpx_rb_read_bit(rb); - - vp10_clearall_segfeatures(seg); - - for (i = 0; i < MAX_SEGMENTS; i++) { - for (j = 0; j < SEG_LVL_MAX; j++) { - int data = 0; - const int feature_enabled = vpx_rb_read_bit(rb); - if (feature_enabled) { - vp10_enable_segfeature(seg, i, j); - data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j)); - if (vp10_is_segfeature_signed(j)) - data = vpx_rb_read_bit(rb) ? -data : data; - } - vp10_set_segdata(seg, i, j, data); - } - } - } -} - -static void setup_loopfilter(struct loopfilter *lf, - struct vpx_read_bit_buffer *rb) { - lf->filter_level = vpx_rb_read_literal(rb, 6); - lf->sharpness_level = vpx_rb_read_literal(rb, 3); - - // Read in loop filter deltas applied at the MB level based on mode or ref - // frame. - lf->mode_ref_delta_update = 0; - - lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb); - if (lf->mode_ref_delta_enabled) { - lf->mode_ref_delta_update = vpx_rb_read_bit(rb); - if (lf->mode_ref_delta_update) { - int i; - - for (i = 0; i < MAX_REF_FRAMES; i++) - if (vpx_rb_read_bit(rb)) - lf->ref_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6); - - for (i = 0; i < MAX_MODE_LF_DELTAS; i++) - if (vpx_rb_read_bit(rb)) - lf->mode_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6); - } - } -} - -static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) { - return vpx_rb_read_bit(rb) ? - vpx_rb_read_inv_signed_literal(rb, CONFIG_MISC_FIXES ? 6 : 4) : 0; -} - -static void setup_quantization(VP10_COMMON *const cm, - struct vpx_read_bit_buffer *rb) { - cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS); - cm->y_dc_delta_q = read_delta_q(rb); - cm->uv_dc_delta_q = read_delta_q(rb); - cm->uv_ac_delta_q = read_delta_q(rb); - cm->dequant_bit_depth = cm->bit_depth; -} - -static void setup_segmentation_dequant(VP10_COMMON *const cm) { - // Build y/uv dequant values based on segmentation. - if (cm->seg.enabled) { - int i; - for (i = 0; i < MAX_SEGMENTS; ++i) { - const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex); - cm->y_dequant[i][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q, - cm->bit_depth); - cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[i][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q, - cm->bit_depth); - cm->uv_dequant[i][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q, - cm->bit_depth); - } - } else { - const int qindex = cm->base_qindex; - // When segmentation is disabled, only the first value is used. The - // remaining are don't cares. - cm->y_dequant[0][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); - cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[0][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q, - cm->bit_depth); - cm->uv_dequant[0][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q, - cm->bit_depth); - } -} - -static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) { - return vpx_rb_read_bit(rb) ? SWITCHABLE : vpx_rb_read_literal(rb, 2); -} - -static void setup_render_size(VP10_COMMON *cm, - struct vpx_read_bit_buffer *rb) { - cm->render_width = cm->width; - cm->render_height = cm->height; - if (vpx_rb_read_bit(rb)) - vp10_read_frame_size(rb, &cm->render_width, &cm->render_height); -} - -static void resize_mv_buffer(VP10_COMMON *cm) { - vpx_free(cm->cur_frame->mvs); - cm->cur_frame->mi_rows = cm->mi_rows; - cm->cur_frame->mi_cols = cm->mi_cols; - cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, - sizeof(*cm->cur_frame->mvs)); -} - -static void resize_context_buffers(VP10_COMMON *cm, int width, int height) { -#if CONFIG_SIZE_LIMIT - if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Dimensions of %dx%d beyond allowed size of %dx%d.", - width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT); -#endif - if (cm->width != width || cm->height != height) { - const int new_mi_rows = - ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; - const int new_mi_cols = - ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; - - // Allocations in vp10_alloc_context_buffers() depend on individual - // dimensions as well as the overall size. - if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) { - if (vp10_alloc_context_buffers(cm, width, height)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate context buffers"); - } else { - vp10_set_mb_mi(cm, width, height); - } - vp10_init_context_buffers(cm); - cm->width = width; - cm->height = height; - } - if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows || - cm->mi_cols > cm->cur_frame->mi_cols) { - resize_mv_buffer(cm); - } -} - -static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) { - int width, height; - BufferPool *const pool = cm->buffer_pool; - vp10_read_frame_size(rb, &width, &height); - resize_context_buffers(cm, width, height); - setup_render_size(cm, rb); - - lock_buffer_pool(pool); - if (vpx_realloc_frame_buffer( - get_frame_new_buffer(cm), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_DEC_BORDER_IN_PIXELS, - cm->byte_alignment, - &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, - pool->cb_priv)) { - unlock_buffer_pool(pool); - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate frame buffer"); - } - unlock_buffer_pool(pool); - - pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; - pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; - pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; - pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; - pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; - pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; - pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; -} - -static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth, - int ref_xss, int ref_yss, - vpx_bit_depth_t this_bit_depth, - int this_xss, int this_yss) { - return ref_bit_depth == this_bit_depth && ref_xss == this_xss && - ref_yss == this_yss; -} - -static void setup_frame_size_with_refs(VP10_COMMON *cm, - struct vpx_read_bit_buffer *rb) { - int width, height; - int found = 0, i; - int has_valid_ref_frame = 0; - BufferPool *const pool = cm->buffer_pool; - for (i = 0; i < REFS_PER_FRAME; ++i) { - if (vpx_rb_read_bit(rb)) { - YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; - width = buf->y_crop_width; - height = buf->y_crop_height; -#if CONFIG_MISC_FIXES - cm->render_width = buf->render_width; - cm->render_height = buf->render_height; -#endif - found = 1; - break; - } - } - - if (!found) { - vp10_read_frame_size(rb, &width, &height); -#if CONFIG_MISC_FIXES - setup_render_size(cm, rb); -#endif - } - - if (width <= 0 || height <= 0) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Invalid frame size"); - - // Check to make sure at least one of frames that this frame references - // has valid dimensions. - for (i = 0; i < REFS_PER_FRAME; ++i) { - RefBuffer *const ref_frame = &cm->frame_refs[i]; - has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width, - ref_frame->buf->y_crop_height, - width, height); - } - if (!has_valid_ref_frame) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Referenced frame has invalid size"); - for (i = 0; i < REFS_PER_FRAME; ++i) { - RefBuffer *const ref_frame = &cm->frame_refs[i]; - if (!valid_ref_frame_img_fmt( - ref_frame->buf->bit_depth, - ref_frame->buf->subsampling_x, - ref_frame->buf->subsampling_y, - cm->bit_depth, - cm->subsampling_x, - cm->subsampling_y)) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Referenced frame has incompatible color format"); - } - - resize_context_buffers(cm, width, height); -#if !CONFIG_MISC_FIXES - setup_render_size(cm, rb); -#endif - - lock_buffer_pool(pool); - if (vpx_realloc_frame_buffer( - get_frame_new_buffer(cm), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_DEC_BORDER_IN_PIXELS, - cm->byte_alignment, - &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, - pool->cb_priv)) { - unlock_buffer_pool(pool); - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate frame buffer"); - } - unlock_buffer_pool(pool); - - pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x; - pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y; - pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; - pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; - pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; - pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; - pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; -} - -static void setup_tile_info(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) { - int min_log2_tile_cols, max_log2_tile_cols, max_ones; - vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); - - // columns - max_ones = max_log2_tile_cols - min_log2_tile_cols; - cm->log2_tile_cols = min_log2_tile_cols; - while (max_ones-- && vpx_rb_read_bit(rb)) - cm->log2_tile_cols++; - - if (cm->log2_tile_cols > 6) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Invalid number of tile columns"); - - // rows - cm->log2_tile_rows = vpx_rb_read_bit(rb); - if (cm->log2_tile_rows) - cm->log2_tile_rows += vpx_rb_read_bit(rb); - -#if CONFIG_MISC_FIXES - // tile size magnitude - if (cm->log2_tile_rows > 0 || cm->log2_tile_cols > 0) { - cm->tile_sz_mag = vpx_rb_read_literal(rb, 2); - } -#else - cm->tile_sz_mag = 3; -#endif -} - -typedef struct TileBuffer { - const uint8_t *data; - size_t size; - int col; // only used with multi-threaded decoding -} TileBuffer; - -static int mem_get_varsize(const uint8_t *data, const int mag) { - switch (mag) { - case 0: - return data[0]; - case 1: - return mem_get_le16(data); - case 2: - return mem_get_le24(data); - case 3: - return mem_get_le32(data); - } - - assert("Invalid tile size marker value" && 0); - - return -1; -} - -// Reads the next tile returning its size and adjusting '*data' accordingly -// based on 'is_last'. -static void get_tile_buffer(const uint8_t *const data_end, - const int tile_sz_mag, int is_last, - struct vpx_internal_error_info *error_info, - const uint8_t **data, - vpx_decrypt_cb decrypt_cb, void *decrypt_state, - TileBuffer *buf) { - size_t size; - - if (!is_last) { - if (!read_is_valid(*data, 4, data_end)) - vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt tile length"); - - if (decrypt_cb) { - uint8_t be_data[4]; - decrypt_cb(decrypt_state, *data, be_data, tile_sz_mag + 1); - size = mem_get_varsize(be_data, tile_sz_mag) + CONFIG_MISC_FIXES; - } else { - size = mem_get_varsize(*data, tile_sz_mag) + CONFIG_MISC_FIXES; - } - *data += tile_sz_mag + 1; - - if (size > (size_t)(data_end - *data)) - vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt tile size"); - } else { - size = data_end - *data; - } - - buf->data = *data; - buf->size = size; - - *data += size; -} - -static void get_tile_buffers(VP10Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - int tile_cols, int tile_rows, - TileBuffer (*tile_buffers)[1 << 6]) { - int r, c; - - for (r = 0; r < tile_rows; ++r) { - for (c = 0; c < tile_cols; ++c) { - const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1); - TileBuffer *const buf = &tile_buffers[r][c]; - buf->col = c; - get_tile_buffer(data_end, pbi->common.tile_sz_mag, - is_last, &pbi->common.error, &data, - pbi->decrypt_cb, pbi->decrypt_state, buf); - } - } -} - -static const uint8_t *decode_tiles(VP10Decoder *pbi, - const uint8_t *data, - const uint8_t *data_end) { - VP10_COMMON *const cm = &pbi->common; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols); - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - TileBuffer tile_buffers[4][1 << 6]; - int tile_row, tile_col; - int mi_row, mi_col; - TileData *tile_data = NULL; - - if (cm->lf.filter_level && !cm->skip_loop_filter && - pbi->lf_worker.data1 == NULL) { - CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, - vpx_memalign(32, sizeof(LFWorkerData))); - pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker; - if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Loop filter thread creation failed"); - } - } - - if (cm->lf.filter_level && !cm->skip_loop_filter) { - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; - // Be sure to sync as we might be resuming after a failed frame decode. - winterface->sync(&pbi->lf_worker); - vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm, - pbi->mb.plane); - } - - assert(tile_rows <= 4); - assert(tile_cols <= (1 << 6)); - - // Note: this memset assumes above_context[0], [1] and [2] - // are allocated as part of the same buffer. - memset(cm->above_context, 0, - sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols); - - memset(cm->above_seg_context, 0, - sizeof(*cm->above_seg_context) * aligned_cols); - - get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); - - if (pbi->tile_data == NULL || - (tile_cols * tile_rows) != pbi->total_tiles) { - vpx_free(pbi->tile_data); - CHECK_MEM_ERROR( - cm, - pbi->tile_data, - vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data)))); - pbi->total_tiles = tile_rows * tile_cols; - } - - // Load all tile information into tile_data. - for (tile_row = 0; tile_row < tile_rows; ++tile_row) { - for (tile_col = 0; tile_col < tile_cols; ++tile_col) { - const TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; - tile_data = pbi->tile_data + tile_cols * tile_row + tile_col; - tile_data->cm = cm; - tile_data->xd = pbi->mb; - tile_data->xd.corrupted = 0; - tile_data->xd.counts = - cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ? - &cm->counts : NULL; - vp10_zero(tile_data->dqcoeff); - vp10_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col); - setup_token_decoder(buf->data, data_end, buf->size, &cm->error, - &tile_data->bit_reader, pbi->decrypt_cb, - pbi->decrypt_state); - vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff); - tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0]; - tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1]; - } - } - - for (tile_row = 0; tile_row < tile_rows; ++tile_row) { - TileInfo tile; - vp10_tile_set_row(&tile, cm, tile_row); - for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end; - mi_row += MI_BLOCK_SIZE) { - for (tile_col = 0; tile_col < tile_cols; ++tile_col) { - const int col = pbi->inv_tile_order ? - tile_cols - tile_col - 1 : tile_col; - tile_data = pbi->tile_data + tile_cols * tile_row + col; - vp10_tile_set_col(&tile, tile_data->cm, col); - vp10_zero(tile_data->xd.left_context); - vp10_zero(tile_data->xd.left_seg_context); - for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; - mi_col += MI_BLOCK_SIZE) { - decode_partition(pbi, &tile_data->xd, mi_row, - mi_col, &tile_data->bit_reader, BLOCK_64X64, 4); - } - pbi->mb.corrupted |= tile_data->xd.corrupted; - if (pbi->mb.corrupted) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Failed to decode tile data"); - } - // Loopfilter one row. - if (cm->lf.filter_level && !cm->skip_loop_filter) { - const int lf_start = mi_row - MI_BLOCK_SIZE; - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; - - // delay the loopfilter by 1 macroblock row. - if (lf_start < 0) continue; - - // decoding has completed: finish up the loop filter in this thread. - if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue; - - winterface->sync(&pbi->lf_worker); - lf_data->start = lf_start; - lf_data->stop = mi_row; - if (pbi->max_threads > 1) { - winterface->launch(&pbi->lf_worker); - } else { - winterface->execute(&pbi->lf_worker); - } - } - // After loopfiltering, the last 7 row pixels in each superblock row may - // still be changed by the longest loopfilter of the next superblock - // row. - if (cm->frame_parallel_decode) - vp10_frameworker_broadcast(pbi->cur_buf, - mi_row << MI_BLOCK_SIZE_LOG2); - } - } - - // Loopfilter remaining rows in the frame. - if (cm->lf.filter_level && !cm->skip_loop_filter) { - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; - winterface->sync(&pbi->lf_worker); - lf_data->start = lf_data->stop; - lf_data->stop = cm->mi_rows; - winterface->execute(&pbi->lf_worker); - } - - // Get last tile data. - tile_data = pbi->tile_data + tile_cols * tile_rows - 1; - - if (cm->frame_parallel_decode) - vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX); - return vpx_reader_find_end(&tile_data->bit_reader); -} - -static int tile_worker_hook(TileWorkerData *const tile_data, - const TileInfo *const tile) { - int mi_row, mi_col; - - if (setjmp(tile_data->error_info.jmp)) { - tile_data->error_info.setjmp = 0; - tile_data->xd.corrupted = 1; - return 0; - } - - tile_data->error_info.setjmp = 1; - tile_data->xd.error_info = &tile_data->error_info; - - for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; - mi_row += MI_BLOCK_SIZE) { - vp10_zero(tile_data->xd.left_context); - vp10_zero(tile_data->xd.left_seg_context); - for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; - mi_col += MI_BLOCK_SIZE) { - decode_partition(tile_data->pbi, &tile_data->xd, - mi_row, mi_col, &tile_data->bit_reader, - BLOCK_64X64, 4); - } - } - return !tile_data->xd.corrupted; -} - -// sorts in descending order -static int compare_tile_buffers(const void *a, const void *b) { - const TileBuffer *const buf1 = (const TileBuffer*)a; - const TileBuffer *const buf2 = (const TileBuffer*)b; - return (int)(buf2->size - buf1->size); -} - -static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, - const uint8_t *data, - const uint8_t *data_end) { - VP10_COMMON *const cm = &pbi->common; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - const uint8_t *bit_reader_end = NULL; - const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols); - TileBuffer tile_buffers[1][1 << 6]; - int n; - int final_worker = -1; - - assert(tile_cols <= (1 << 6)); - assert(tile_rows == 1); - (void)tile_rows; - - // TODO(jzern): See if we can remove the restriction of passing in max - // threads to the decoder. - if (pbi->num_tile_workers == 0) { - const int num_threads = pbi->max_threads & ~1; - int i; - CHECK_MEM_ERROR(cm, pbi->tile_workers, - vpx_malloc(num_threads * sizeof(*pbi->tile_workers))); - // Ensure tile data offsets will be properly aligned. This may fail on - // platforms without DECLARE_ALIGNED(). - assert((sizeof(*pbi->tile_worker_data) % 16) == 0); - CHECK_MEM_ERROR(cm, pbi->tile_worker_data, - vpx_memalign(32, num_threads * - sizeof(*pbi->tile_worker_data))); - CHECK_MEM_ERROR(cm, pbi->tile_worker_info, - vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info))); - for (i = 0; i < num_threads; ++i) { - VPxWorker *const worker = &pbi->tile_workers[i]; - ++pbi->num_tile_workers; - - winterface->init(worker); - if (i < num_threads - 1 && !winterface->reset(worker)) { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Tile decoder thread creation failed"); - } - } - } - - // Reset tile decoding hook - for (n = 0; n < num_workers; ++n) { - VPxWorker *const worker = &pbi->tile_workers[n]; - winterface->sync(worker); - worker->hook = (VPxWorkerHook)tile_worker_hook; - worker->data1 = &pbi->tile_worker_data[n]; - worker->data2 = &pbi->tile_worker_info[n]; - } - - // Note: this memset assumes above_context[0], [1] and [2] - // are allocated as part of the same buffer. - memset(cm->above_context, 0, - sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols); - memset(cm->above_seg_context, 0, - sizeof(*cm->above_seg_context) * aligned_mi_cols); - - // Load tile data into tile_buffers - get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); - - // Sort the buffers based on size in descending order. - qsort(tile_buffers[0], tile_cols, sizeof(tile_buffers[0][0]), - compare_tile_buffers); - - // Rearrange the tile buffers such that per-tile group the largest, and - // presumably the most difficult, tile will be decoded in the main thread. - // This should help minimize the number of instances where the main thread is - // waiting for a worker to complete. - { - int group_start = 0; - while (group_start < tile_cols) { - const TileBuffer largest = tile_buffers[0][group_start]; - const int group_end = VPXMIN(group_start + num_workers, tile_cols) - 1; - memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1, - (group_end - group_start) * sizeof(tile_buffers[0][0])); - tile_buffers[0][group_end] = largest; - group_start = group_end + 1; - } - } - - // Initialize thread frame counts. - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - int i; - - for (i = 0; i < num_workers; ++i) { - TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[i].data1; - vp10_zero(tile_data->counts); - } - } - - n = 0; - while (n < tile_cols) { - int i; - for (i = 0; i < num_workers && n < tile_cols; ++i) { - VPxWorker *const worker = &pbi->tile_workers[i]; - TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; - TileInfo *const tile = (TileInfo*)worker->data2; - TileBuffer *const buf = &tile_buffers[0][n]; - - tile_data->pbi = pbi; - tile_data->xd = pbi->mb; - tile_data->xd.corrupted = 0; - tile_data->xd.counts = - cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ? - &tile_data->counts : NULL; - vp10_zero(tile_data->dqcoeff); - vp10_tile_init(tile, cm, 0, buf->col); - vp10_tile_init(&tile_data->xd.tile, cm, 0, buf->col); - setup_token_decoder(buf->data, data_end, buf->size, &cm->error, - &tile_data->bit_reader, pbi->decrypt_cb, - pbi->decrypt_state); - vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff); - tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0]; - tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1]; - - worker->had_error = 0; - if (i == num_workers - 1 || n == tile_cols - 1) { - winterface->execute(worker); - } else { - winterface->launch(worker); - } - - if (buf->col == tile_cols - 1) { - final_worker = i; - } - - ++n; - } - - for (; i > 0; --i) { - VPxWorker *const worker = &pbi->tile_workers[i - 1]; - // TODO(jzern): The tile may have specific error data associated with - // its vpx_internal_error_info which could be propagated to the main info - // in cm. Additionally once the threads have been synced and an error is - // detected, there's no point in continuing to decode tiles. - pbi->mb.corrupted |= !winterface->sync(worker); - } - if (final_worker > -1) { - TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[final_worker].data1; - bit_reader_end = vpx_reader_find_end(&tile_data->bit_reader); - final_worker = -1; - } - - // Accumulate thread frame counts. - if (n >= tile_cols && - cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - for (i = 0; i < num_workers; ++i) { - TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[i].data1; - vp10_accumulate_frame_counts(cm, &tile_data->counts, 1); - } - } - } - - return bit_reader_end; -} - -static void error_handler(void *data) { - VP10_COMMON *const cm = (VP10_COMMON *)data; - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); -} - -static void read_bitdepth_colorspace_sampling( - VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) { - if (cm->profile >= PROFILE_2) { - cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10; -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth = 1; -#endif - } else { - cm->bit_depth = VPX_BITS_8; -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth = 0; -#endif - } - cm->color_space = vpx_rb_read_literal(rb, 3); - if (cm->color_space != VPX_CS_SRGB) { - // [16,235] (including xvycc) vs [0,255] range - cm->color_range = vpx_rb_read_bit(rb); - if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { - cm->subsampling_x = vpx_rb_read_bit(rb); - cm->subsampling_y = vpx_rb_read_bit(rb); - if (cm->subsampling_x == 1 && cm->subsampling_y == 1) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "4:2:0 color not supported in profile 1 or 3"); - if (vpx_rb_read_bit(rb)) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Reserved bit set"); - } else { - cm->subsampling_y = cm->subsampling_x = 1; - } - } else { - if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { - // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed. - // 4:2:2 or 4:4:0 chroma sampling is not allowed. - cm->subsampling_y = cm->subsampling_x = 0; - if (vpx_rb_read_bit(rb)) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Reserved bit set"); - } else { - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "4:4:4 color not supported in profile 0 or 2"); - } - } -} - -static size_t read_uncompressed_header(VP10Decoder *pbi, - struct vpx_read_bit_buffer *rb) { - VP10_COMMON *const cm = &pbi->common; - MACROBLOCKD *const xd = &pbi->mb; - BufferPool *const pool = cm->buffer_pool; - RefCntBuffer *const frame_bufs = pool->frame_bufs; - int i, mask, ref_index = 0; - size_t sz; - - cm->last_frame_type = cm->frame_type; - cm->last_intra_only = cm->intra_only; - - if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid frame marker"); - - cm->profile = vp10_read_profile(rb); -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->profile >= MAX_PROFILES) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Unsupported bitstream profile"); -#else - if (cm->profile >= PROFILE_2) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Unsupported bitstream profile"); -#endif - - cm->show_existing_frame = vpx_rb_read_bit(rb); - if (cm->show_existing_frame) { - // Show an existing frame directly. - const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)]; - lock_buffer_pool(pool); - if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) { - unlock_buffer_pool(pool); - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Buffer %d does not contain a decoded frame", - frame_to_show); - } - - ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show); - unlock_buffer_pool(pool); - pbi->refresh_frame_flags = 0; - cm->lf.filter_level = 0; - cm->show_frame = 1; - - if (cm->frame_parallel_decode) { - for (i = 0; i < REF_FRAMES; ++i) - cm->next_ref_frame_map[i] = cm->ref_frame_map[i]; - } - return 0; - } - - cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb); - cm->show_frame = vpx_rb_read_bit(rb); - cm->error_resilient_mode = vpx_rb_read_bit(rb); - - if (cm->frame_type == KEY_FRAME) { - if (!vp10_read_sync_code(rb)) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid frame sync code"); - - read_bitdepth_colorspace_sampling(cm, rb); - pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1; - - for (i = 0; i < REFS_PER_FRAME; ++i) { - cm->frame_refs[i].idx = INVALID_IDX; - cm->frame_refs[i].buf = NULL; - } - - setup_frame_size(cm, rb); - if (pbi->need_resync) { - memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); - pbi->need_resync = 0; - } - } else { - cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb); - - if (cm->error_resilient_mode) { - cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL; - } else { -#if CONFIG_MISC_FIXES - if (cm->intra_only) { - cm->reset_frame_context = - vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL - : RESET_FRAME_CONTEXT_CURRENT; - } else { - cm->reset_frame_context = - vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_CURRENT - : RESET_FRAME_CONTEXT_NONE; - if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) - cm->reset_frame_context = - vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL - : RESET_FRAME_CONTEXT_CURRENT; - } -#else - static const RESET_FRAME_CONTEXT_MODE reset_frame_context_conv_tbl[4] = { - RESET_FRAME_CONTEXT_NONE, RESET_FRAME_CONTEXT_NONE, - RESET_FRAME_CONTEXT_CURRENT, RESET_FRAME_CONTEXT_ALL - }; - - cm->reset_frame_context = - reset_frame_context_conv_tbl[vpx_rb_read_literal(rb, 2)]; -#endif - } - - if (cm->intra_only) { - if (!vp10_read_sync_code(rb)) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid frame sync code"); -#if CONFIG_MISC_FIXES - read_bitdepth_colorspace_sampling(cm, rb); -#else - if (cm->profile > PROFILE_0) { - read_bitdepth_colorspace_sampling(cm, rb); - } else { - // NOTE: The intra-only frame header does not include the specification - // of either the color format or color sub-sampling in profile 0. VP9 - // specifies that the default color format should be YUV 4:2:0 in this - // case (normative). - cm->color_space = VPX_CS_BT_601; - cm->color_range = 0; - cm->subsampling_y = cm->subsampling_x = 1; - cm->bit_depth = VPX_BITS_8; -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth = 0; -#endif - } -#endif - - pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES); - setup_frame_size(cm, rb); - if (pbi->need_resync) { - memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); - pbi->need_resync = 0; - } - } else if (pbi->need_resync != 1) { /* Skip if need resync */ - pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES); - for (i = 0; i < REFS_PER_FRAME; ++i) { - const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2); - const int idx = cm->ref_frame_map[ref]; - RefBuffer *const ref_frame = &cm->frame_refs[i]; - ref_frame->idx = idx; - ref_frame->buf = &frame_bufs[idx].buf; - cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb); - } - - setup_frame_size_with_refs(cm, rb); - - cm->allow_high_precision_mv = vpx_rb_read_bit(rb); - cm->interp_filter = read_interp_filter(rb); - - for (i = 0; i < REFS_PER_FRAME; ++i) { - RefBuffer *const ref_buf = &cm->frame_refs[i]; -#if CONFIG_VP9_HIGHBITDEPTH - vp10_setup_scale_factors_for_frame(&ref_buf->sf, - ref_buf->buf->y_crop_width, - ref_buf->buf->y_crop_height, - cm->width, cm->height, - cm->use_highbitdepth); -#else - vp10_setup_scale_factors_for_frame(&ref_buf->sf, - ref_buf->buf->y_crop_width, - ref_buf->buf->y_crop_height, - cm->width, cm->height); -#endif - } - } - } -#if CONFIG_VP9_HIGHBITDEPTH - get_frame_new_buffer(cm)->bit_depth = cm->bit_depth; -#endif - get_frame_new_buffer(cm)->color_space = cm->color_space; - get_frame_new_buffer(cm)->color_range = cm->color_range; - get_frame_new_buffer(cm)->render_width = cm->render_width; - get_frame_new_buffer(cm)->render_height = cm->render_height; - - if (pbi->need_resync) { - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Keyframe / intra-only frame required to reset decoder" - " state"); - } - - if (!cm->error_resilient_mode) { - cm->refresh_frame_context = - vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD - : REFRESH_FRAME_CONTEXT_OFF; - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) { - cm->refresh_frame_context = - vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD - : REFRESH_FRAME_CONTEXT_BACKWARD; -#if !CONFIG_MISC_FIXES - } else { - vpx_rb_read_bit(rb); // parallel decoding mode flag -#endif - } - } else { - cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF; - } - - // This flag will be overridden by the call to vp10_setup_past_independence - // below, forcing the use of context 0 for those frame types. - cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2); - - // Generate next_ref_frame_map. - lock_buffer_pool(pool); - for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { - if (mask & 1) { - cm->next_ref_frame_map[ref_index] = cm->new_fb_idx; - ++frame_bufs[cm->new_fb_idx].ref_count; - } else { - cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; - } - // Current thread holds the reference frame. - if (cm->ref_frame_map[ref_index] >= 0) - ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; - ++ref_index; - } - - for (; ref_index < REF_FRAMES; ++ref_index) { - cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index]; - // Current thread holds the reference frame. - if (cm->ref_frame_map[ref_index] >= 0) - ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count; - } - unlock_buffer_pool(pool); - pbi->hold_ref_buf = 1; - - if (frame_is_intra_only(cm) || cm->error_resilient_mode) - vp10_setup_past_independence(cm); - - setup_loopfilter(&cm->lf, rb); - setup_quantization(cm, rb); -#if CONFIG_VP9_HIGHBITDEPTH - xd->bd = (int)cm->bit_depth; -#endif - - setup_segmentation(cm, rb); - - { - int i; - for (i = 0; i < MAX_SEGMENTS; ++i) { - const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled ? - vp10_get_qindex(&cm->seg, i, cm->base_qindex) : - cm->base_qindex; - xd->lossless[i] = qindex == 0 && - cm->y_dc_delta_q == 0 && - cm->uv_dc_delta_q == 0 && - cm->uv_ac_delta_q == 0; - } - } - - setup_segmentation_dequant(cm); -#if CONFIG_MISC_FIXES - cm->tx_mode = (!cm->seg.enabled && xd->lossless[0]) ? ONLY_4X4 - : read_tx_mode(rb); - cm->reference_mode = read_frame_reference_mode(cm, rb); -#endif - - setup_tile_info(cm, rb); - sz = vpx_rb_read_literal(rb, 16); - - if (sz == 0) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Invalid header size"); - - return sz; -} - -static void read_ext_tx_probs(FRAME_CONTEXT *fc, vpx_reader *r) { - int i, j, k; - if (vpx_read(r, GROUP_DIFF_UPDATE_PROB)) { - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - for (j = 0; j < TX_TYPES; ++j) - for (k = 0; k < TX_TYPES - 1; ++k) - vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]); - } - } - if (vpx_read(r, GROUP_DIFF_UPDATE_PROB)) { - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - for (k = 0; k < TX_TYPES - 1; ++k) - vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]); - } - } -} - -static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data, - size_t partition_size) { - VP10_COMMON *const cm = &pbi->common; -#if !CONFIG_MISC_FIXES - MACROBLOCKD *const xd = &pbi->mb; -#endif - FRAME_CONTEXT *const fc = cm->fc; - vpx_reader r; - int k, i, j; - - if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb, - pbi->decrypt_state)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate bool decoder 0"); - -#if !CONFIG_MISC_FIXES - cm->tx_mode = xd->lossless[0] ? ONLY_4X4 : read_tx_mode(&r); -#endif - if (cm->tx_mode == TX_MODE_SELECT) - read_tx_mode_probs(&fc->tx_probs, &r); - read_coef_probs(fc, cm->tx_mode, &r); - - for (k = 0; k < SKIP_CONTEXTS; ++k) - vp10_diff_update_prob(&r, &fc->skip_probs[k]); - -#if CONFIG_MISC_FIXES - if (cm->seg.enabled) { - if (cm->seg.temporal_update) { - for (k = 0; k < PREDICTION_PROBS; k++) - vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]); - } - for (k = 0; k < MAX_SEGMENTS - 1; k++) - vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]); - } - - for (j = 0; j < INTRA_MODES; j++) - for (i = 0; i < INTRA_MODES - 1; ++i) - vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]); - - for (j = 0; j < PARTITION_CONTEXTS; ++j) - for (i = 0; i < PARTITION_TYPES - 1; ++i) - vp10_diff_update_prob(&r, &fc->partition_prob[j][i]); -#endif - - if (frame_is_intra_only(cm)) { - vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob); -#if CONFIG_MISC_FIXES - for (k = 0; k < INTRA_MODES; k++) - for (j = 0; j < INTRA_MODES; j++) - for (i = 0; i < INTRA_MODES - 1; ++i) - vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]); -#endif - } else { - nmv_context *const nmvc = &fc->nmvc; - - read_inter_mode_probs(fc, &r); - - if (cm->interp_filter == SWITCHABLE) - read_switchable_interp_probs(fc, &r); - - for (i = 0; i < INTRA_INTER_CONTEXTS; i++) - vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]); - -#if !CONFIG_MISC_FIXES - cm->reference_mode = read_frame_reference_mode(cm, &r); -#endif - if (cm->reference_mode != SINGLE_REFERENCE) - setup_compound_reference_mode(cm); - read_frame_reference_mode_probs(cm, &r); - - for (j = 0; j < BLOCK_SIZE_GROUPS; j++) - for (i = 0; i < INTRA_MODES - 1; ++i) - vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]); - -#if !CONFIG_MISC_FIXES - for (j = 0; j < PARTITION_CONTEXTS; ++j) - for (i = 0; i < PARTITION_TYPES - 1; ++i) - vp10_diff_update_prob(&r, &fc->partition_prob[j][i]); -#endif - - read_mv_probs(nmvc, cm->allow_high_precision_mv, &r); - read_ext_tx_probs(fc, &r); - } - - return vpx_reader_has_error(&r); -} - -#ifdef NDEBUG -#define debug_check_frame_counts(cm) (void)0 -#else // !NDEBUG -// Counts should only be incremented when frame_parallel_decoding_mode and -// error_resilient_mode are disabled. -static void debug_check_frame_counts(const VP10_COMMON *const cm) { - FRAME_COUNTS zero_counts; - vp10_zero(zero_counts); - assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD || - cm->error_resilient_mode); - assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode, - sizeof(cm->counts.y_mode))); - assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode, - sizeof(cm->counts.uv_mode))); - assert(!memcmp(cm->counts.partition, zero_counts.partition, - sizeof(cm->counts.partition))); - assert(!memcmp(cm->counts.coef, zero_counts.coef, - sizeof(cm->counts.coef))); - assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch, - sizeof(cm->counts.eob_branch))); - assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp, - sizeof(cm->counts.switchable_interp))); - assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode, - sizeof(cm->counts.inter_mode))); - assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter, - sizeof(cm->counts.intra_inter))); - assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter, - sizeof(cm->counts.comp_inter))); - assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref, - sizeof(cm->counts.single_ref))); - assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref, - sizeof(cm->counts.comp_ref))); - assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx))); - assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip))); - assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); - assert(!memcmp(cm->counts.intra_ext_tx, zero_counts.intra_ext_tx, - sizeof(cm->counts.intra_ext_tx))); - assert(!memcmp(cm->counts.inter_ext_tx, zero_counts.inter_ext_tx, - sizeof(cm->counts.inter_ext_tx))); -} -#endif // NDEBUG - -static struct vpx_read_bit_buffer *init_read_bit_buffer( - VP10Decoder *pbi, - struct vpx_read_bit_buffer *rb, - const uint8_t *data, - const uint8_t *data_end, - uint8_t clear_data[MAX_VP9_HEADER_SIZE]) { - rb->bit_offset = 0; - rb->error_handler = error_handler; - rb->error_handler_data = &pbi->common; - if (pbi->decrypt_cb) { - const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data); - pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); - rb->bit_buffer = clear_data; - rb->bit_buffer_end = clear_data + n; - } else { - rb->bit_buffer = data; - rb->bit_buffer_end = data_end; - } - return rb; -} - -//------------------------------------------------------------------------------ - -int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) { - return vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 && - vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 && - vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2; -} - -void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, - int *width, int *height) { - *width = vpx_rb_read_literal(rb, 16) + 1; - *height = vpx_rb_read_literal(rb, 16) + 1; -} - -BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) { - int profile = vpx_rb_read_bit(rb); - profile |= vpx_rb_read_bit(rb) << 1; - if (profile > 2) - profile += vpx_rb_read_bit(rb); - return (BITSTREAM_PROFILE) profile; -} - -void vp10_decode_frame(VP10Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - const uint8_t **p_data_end) { - VP10_COMMON *const cm = &pbi->common; - MACROBLOCKD *const xd = &pbi->mb; - struct vpx_read_bit_buffer rb; - int context_updated = 0; - uint8_t clear_data[MAX_VP9_HEADER_SIZE]; - const size_t first_partition_size = read_uncompressed_header(pbi, - init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); - const int tile_rows = 1 << cm->log2_tile_rows; - const int tile_cols = 1 << cm->log2_tile_cols; - YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); - xd->cur_buf = new_fb; - - if (!first_partition_size) { - // showing a frame directly - *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2); - return; - } - - data += vpx_rb_bytes_read(&rb); - if (!read_is_valid(data, first_partition_size, data_end)) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt header length"); - - cm->use_prev_frame_mvs = !cm->error_resilient_mode && - cm->width == cm->last_width && - cm->height == cm->last_height && - !cm->last_intra_only && - cm->last_show_frame && - (cm->last_frame_type != KEY_FRAME); - - vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); - - *cm->fc = cm->frame_contexts[cm->frame_context_idx]; - if (!cm->fc->initialized) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Uninitialized entropy context."); - - vp10_zero(cm->counts); - - xd->corrupted = 0; - new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size); - if (new_fb->corrupted) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Decode failed. Frame data header is corrupted."); - - if (cm->lf.filter_level && !cm->skip_loop_filter) { - vp10_loop_filter_frame_init(cm, cm->lf.filter_level); - } - - // If encoded in frame parallel mode, frame context is ready after decoding - // the frame header. - if (cm->frame_parallel_decode && - cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) { - VPxWorker *const worker = pbi->frame_worker_owner; - FrameWorkerData *const frame_worker_data = worker->data1; - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) { - context_updated = 1; - cm->frame_contexts[cm->frame_context_idx] = *cm->fc; - } - vp10_frameworker_lock_stats(worker); - pbi->cur_buf->row = -1; - pbi->cur_buf->col = -1; - frame_worker_data->frame_context_ready = 1; - // Signal the main thread that context is ready. - vp10_frameworker_signal_stats(worker); - vp10_frameworker_unlock_stats(worker); - } - - if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) { - // Multi-threaded tile decoder - *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); - if (!xd->corrupted) { - if (!cm->skip_loop_filter) { - // If multiple threads are used to decode tiles, then we use those - // threads to do parallel loopfiltering. - vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, - cm->lf.filter_level, 0, 0, pbi->tile_workers, - pbi->num_tile_workers, &pbi->lf_row_sync); - } - } else { - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Decode failed. Frame data is corrupted."); - - } - } else { - *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end); - } - - if (!xd->corrupted) { - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - vp10_adapt_coef_probs(cm); -#if CONFIG_MISC_FIXES - vp10_adapt_intra_frame_probs(cm); -#endif - - if (!frame_is_intra_only(cm)) { -#if !CONFIG_MISC_FIXES - vp10_adapt_intra_frame_probs(cm); -#endif - vp10_adapt_inter_frame_probs(cm); - vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv); - } - } else { - debug_check_frame_counts(cm); - } - } else { - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Decode failed. Frame data is corrupted."); - } - - // Non frame parallel update frame context here. - if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF && - !context_updated) - cm->frame_contexts[cm->frame_context_idx] = *cm->fc; -} diff --git a/libs/libvpx/vp10/decoder/decodeframe.h b/libs/libvpx/vp10/decoder/decodeframe.h deleted file mode 100644 index 770ae154e5..0000000000 --- a/libs/libvpx/vp10/decoder/decodeframe.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_DECODER_DECODEFRAME_H_ -#define VP10_DECODER_DECODEFRAME_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Decoder; -struct vpx_read_bit_buffer; - -int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb); -void vp10_read_frame_size(struct vpx_read_bit_buffer *rb, - int *width, int *height); -BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb); - -void vp10_decode_frame(struct VP10Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - const uint8_t **p_data_end); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_DECODER_DECODEFRAME_H_ diff --git a/libs/libvpx/vp10/decoder/decodemv.c b/libs/libvpx/vp10/decoder/decodemv.c deleted file mode 100644 index 01b796c106..0000000000 --- a/libs/libvpx/vp10/decoder/decodemv.c +++ /dev/null @@ -1,704 +0,0 @@ -/* - Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/entropymv.h" -#include "vp10/common/mvref_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/seg_common.h" - -#include "vp10/decoder/decodemv.h" -#include "vp10/decoder/decodeframe.h" - -#include "vpx_dsp/vpx_dsp_common.h" - -static PREDICTION_MODE read_intra_mode(vpx_reader *r, const vpx_prob *p) { - return (PREDICTION_MODE)vpx_read_tree(r, vp10_intra_mode_tree, p); -} - -static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd, - vpx_reader *r, int size_group) { - const PREDICTION_MODE y_mode = - read_intra_mode(r, cm->fc->y_mode_prob[size_group]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->y_mode[size_group][y_mode]; - return y_mode; -} - -static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd, - vpx_reader *r, - PREDICTION_MODE y_mode) { - const PREDICTION_MODE uv_mode = read_intra_mode(r, - cm->fc->uv_mode_prob[y_mode]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->uv_mode[y_mode][uv_mode]; - return uv_mode; -} - -static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd, - vpx_reader *r, int ctx) { - const int mode = vpx_read_tree(r, vp10_inter_mode_tree, - cm->fc->inter_mode_probs[ctx]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->inter_mode[ctx][mode]; - - return NEARESTMV + mode; -} - -static int read_segment_id(vpx_reader *r, - const struct segmentation_probs *segp) { - return vpx_read_tree(r, vp10_segment_tree, segp->tree_probs); -} - -static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, - TX_SIZE max_tx_size, vpx_reader *r) { - FRAME_COUNTS *counts = xd->counts; - const int ctx = get_tx_size_context(xd); - const vpx_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc->tx_probs); - int tx_size = vpx_read(r, tx_probs[0]); - if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { - tx_size += vpx_read(r, tx_probs[1]); - if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) - tx_size += vpx_read(r, tx_probs[2]); - } - - if (counts) - ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size]; - return (TX_SIZE)tx_size; -} - -static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, - int allow_select, vpx_reader *r) { - TX_MODE tx_mode = cm->tx_mode; - BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) - return TX_4X4; - if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8) - return read_selected_tx_size(cm, xd, max_tx_size, r); - else - return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); -} - -static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids, - int mi_offset, int x_mis, int y_mis) { - int x, y, segment_id = INT_MAX; - - for (y = 0; y < y_mis; y++) - for (x = 0; x < x_mis; x++) - segment_id = - VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); - - assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); - return segment_id; -} - -static void set_segment_id(VP10_COMMON *cm, int mi_offset, - int x_mis, int y_mis, int segment_id) { - int x, y; - - assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); - - for (y = 0; y < y_mis; y++) - for (x = 0; x < x_mis; x++) - cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id; -} - -static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd, - int mi_offset, int x_mis, int y_mis, - vpx_reader *r) { - struct segmentation *const seg = &cm->seg; -#if CONFIG_MISC_FIXES - FRAME_COUNTS *counts = xd->counts; - struct segmentation_probs *const segp = &cm->fc->seg; -#else - struct segmentation_probs *const segp = &cm->segp; -#endif - int segment_id; - -#if !CONFIG_MISC_FIXES - (void) xd; -#endif - - if (!seg->enabled) - return 0; // Default for disabled segmentation - - assert(seg->update_map && !seg->temporal_update); - - segment_id = read_segment_id(r, segp); -#if CONFIG_MISC_FIXES - if (counts) - ++counts->seg.tree_total[segment_id]; -#endif - set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id); - return segment_id; -} - -static void copy_segment_id(const VP10_COMMON *cm, - const uint8_t *last_segment_ids, - uint8_t *current_segment_ids, - int mi_offset, int x_mis, int y_mis) { - int x, y; - - for (y = 0; y < y_mis; y++) - for (x = 0; x < x_mis; x++) - current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ? - last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0; -} - -static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { - struct segmentation *const seg = &cm->seg; -#if CONFIG_MISC_FIXES - FRAME_COUNTS *counts = xd->counts; - struct segmentation_probs *const segp = &cm->fc->seg; -#else - struct segmentation_probs *const segp = &cm->segp; -#endif - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - int predicted_segment_id, segment_id; - const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = xd->plane[0].n4_w >> 1; - const int bh = xd->plane[0].n4_h >> 1; - - // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); - const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); - - if (!seg->enabled) - return 0; // Default for disabled segmentation - - predicted_segment_id = cm->last_frame_seg_map ? - dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) : - 0; - - if (!seg->update_map) { - copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map, - mi_offset, x_mis, y_mis); - return predicted_segment_id; - } - - if (seg->temporal_update) { - const int ctx = vp10_get_pred_context_seg_id(xd); - const vpx_prob pred_prob = segp->pred_probs[ctx]; - mbmi->seg_id_predicted = vpx_read(r, pred_prob); -#if CONFIG_MISC_FIXES - if (counts) - ++counts->seg.pred[ctx][mbmi->seg_id_predicted]; -#endif - if (mbmi->seg_id_predicted) { - segment_id = predicted_segment_id; - } else { - segment_id = read_segment_id(r, segp); -#if CONFIG_MISC_FIXES - if (counts) - ++counts->seg.tree_mispred[segment_id]; -#endif - } - } else { - segment_id = read_segment_id(r, segp); -#if CONFIG_MISC_FIXES - if (counts) - ++counts->seg.tree_total[segment_id]; -#endif - } - set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id); - return segment_id; -} - -static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, - int segment_id, vpx_reader *r) { - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { - return 1; - } else { - const int ctx = vp10_get_skip_context(xd); - const int skip = vpx_read(r, cm->fc->skip_probs[ctx]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->skip[ctx][skip]; - return skip; - } -} - -static void read_intra_frame_mode_info(VP10_COMMON *const cm, - MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { - MODE_INFO *const mi = xd->mi[0]; - MB_MODE_INFO *const mbmi = &mi->mbmi; - const MODE_INFO *above_mi = xd->above_mi; - const MODE_INFO *left_mi = xd->left_mi; - const BLOCK_SIZE bsize = mbmi->sb_type; - int i; - const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = xd->plane[0].n4_w >> 1; - const int bh = xd->plane[0].n4_h >> 1; - - // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); - const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); - - mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r); - mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); - mbmi->tx_size = read_tx_size(cm, xd, 1, r); - mbmi->ref_frame[0] = INTRA_FRAME; - mbmi->ref_frame[1] = NONE; - - switch (bsize) { - case BLOCK_4X4: - for (i = 0; i < 4; ++i) - mi->bmi[i].as_mode = - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, i)); - mbmi->mode = mi->bmi[3].as_mode; - break; - case BLOCK_4X8: - mi->bmi[0].as_mode = mi->bmi[2].as_mode = - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); - mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 1)); - break; - case BLOCK_8X4: - mi->bmi[0].as_mode = mi->bmi[1].as_mode = - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); - mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 2)); - break; - default: - mbmi->mode = read_intra_mode(r, - get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); - } - - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); - - if (mbmi->tx_size < TX_32X32 && - cm->base_qindex > 0 && !mbmi->skip && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - FRAME_COUNTS *counts = xd->counts; - TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode]; - mbmi->tx_type = vpx_read_tree( - r, vp10_ext_tx_tree, - cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]); - if (counts) - ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type]; - } else { - mbmi->tx_type = DCT_DCT; - } -} - -static int read_mv_component(vpx_reader *r, - const nmv_component *mvcomp, int usehp) { - int mag, d, fr, hp; - const int sign = vpx_read(r, mvcomp->sign); - const int mv_class = vpx_read_tree(r, vp10_mv_class_tree, mvcomp->classes); - const int class0 = mv_class == MV_CLASS_0; - - // Integer part - if (class0) { - d = vpx_read_tree(r, vp10_mv_class0_tree, mvcomp->class0); - mag = 0; - } else { - int i; - const int n = mv_class + CLASS0_BITS - 1; // number of bits - - d = 0; - for (i = 0; i < n; ++i) - d |= vpx_read(r, mvcomp->bits[i]) << i; - mag = CLASS0_SIZE << (mv_class + 2); - } - - // Fractional part - fr = vpx_read_tree(r, vp10_mv_fp_tree, class0 ? mvcomp->class0_fp[d] - : mvcomp->fp); - - // High precision part (if hp is not used, the default value of the hp is 1) - hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) - : 1; - - // Result - mag += ((d << 3) | (fr << 1) | hp) + 1; - return sign ? -mag : mag; -} - -static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref, - const nmv_context *ctx, - nmv_context_counts *counts, int allow_hp) { - const MV_JOINT_TYPE joint_type = - (MV_JOINT_TYPE)vpx_read_tree(r, vp10_mv_joint_tree, ctx->joints); - const int use_hp = allow_hp && vp10_use_mv_hp(ref); - MV diff = {0, 0}; - - if (mv_joint_vertical(joint_type)) - diff.row = read_mv_component(r, &ctx->comps[0], use_hp); - - if (mv_joint_horizontal(joint_type)) - diff.col = read_mv_component(r, &ctx->comps[1], use_hp); - - vp10_inc_mv(&diff, counts, use_hp); - - mv->row = ref->row + diff.row; - mv->col = ref->col + diff.col; -} - -static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm, - const MACROBLOCKD *xd, - vpx_reader *r) { - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - const int ctx = vp10_get_reference_mode_context(cm, xd); - const REFERENCE_MODE mode = - (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->comp_inter[ctx][mode]; - return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE - } else { - return cm->reference_mode; - } -} - -// Read the referncence frame -static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd, - vpx_reader *r, - int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { - FRAME_CONTEXT *const fc = cm->fc; - FRAME_COUNTS *counts = xd->counts; - - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { - ref_frame[0] = (MV_REFERENCE_FRAME)get_segdata(&cm->seg, segment_id, - SEG_LVL_REF_FRAME); - ref_frame[1] = NONE; - } else { - const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r); - // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding - if (mode == COMPOUND_REFERENCE) { - const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; - const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd); - const int bit = vpx_read(r, fc->comp_ref_prob[ctx]); - if (counts) - ++counts->comp_ref[ctx][bit]; - ref_frame[idx] = cm->comp_fixed_ref; - ref_frame[!idx] = cm->comp_var_ref[bit]; - } else if (mode == SINGLE_REFERENCE) { - const int ctx0 = vp10_get_pred_context_single_ref_p1(xd); - const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]); - if (counts) - ++counts->single_ref[ctx0][0][bit0]; - if (bit0) { - const int ctx1 = vp10_get_pred_context_single_ref_p2(xd); - const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]); - if (counts) - ++counts->single_ref[ctx1][1][bit1]; - ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; - } else { - ref_frame[0] = LAST_FRAME; - } - - ref_frame[1] = NONE; - } else { - assert(0 && "Invalid prediction mode."); - } - } -} - - -static INLINE INTERP_FILTER read_switchable_interp_filter( - VP10_COMMON *const cm, MACROBLOCKD *const xd, - vpx_reader *r) { - const int ctx = vp10_get_pred_context_switchable_interp(xd); - const INTERP_FILTER type = - (INTERP_FILTER)vpx_read_tree(r, vp10_switchable_interp_tree, - cm->fc->switchable_interp_prob[ctx]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->switchable_interp[ctx][type]; - return type; -} - -static void read_intra_block_mode_info(VP10_COMMON *const cm, - MACROBLOCKD *const xd, MODE_INFO *mi, - vpx_reader *r) { - MB_MODE_INFO *const mbmi = &mi->mbmi; - const BLOCK_SIZE bsize = mi->mbmi.sb_type; - int i; - - mbmi->ref_frame[0] = INTRA_FRAME; - mbmi->ref_frame[1] = NONE; - - switch (bsize) { - case BLOCK_4X4: - for (i = 0; i < 4; ++i) - mi->bmi[i].as_mode = read_intra_mode_y(cm, xd, r, 0); - mbmi->mode = mi->bmi[3].as_mode; - break; - case BLOCK_4X8: - mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, - r, 0); - mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode_y(cm, xd, r, 0); - break; - case BLOCK_8X4: - mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, - r, 0); - mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode_y(cm, xd, r, 0); - break; - default: - mbmi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]); - } - - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); -} - -static INLINE int is_mv_valid(const MV *mv) { - return mv->row > MV_LOW && mv->row < MV_UPP && - mv->col > MV_LOW && mv->col < MV_UPP; -} - -static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd, - PREDICTION_MODE mode, - int_mv mv[2], int_mv ref_mv[2], - int_mv nearest_mv[2], int_mv near_mv[2], - int is_compound, int allow_hp, vpx_reader *r) { - int i; - int ret = 1; - - switch (mode) { - case NEWMV: { - FRAME_COUNTS *counts = xd->counts; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; - for (i = 0; i < 1 + is_compound; ++i) { - read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts, - allow_hp); - ret = ret && is_mv_valid(&mv[i].as_mv); - } - break; - } - case NEARESTMV: { - mv[0].as_int = nearest_mv[0].as_int; - if (is_compound) - mv[1].as_int = nearest_mv[1].as_int; - break; - } - case NEARMV: { - mv[0].as_int = near_mv[0].as_int; - if (is_compound) - mv[1].as_int = near_mv[1].as_int; - break; - } - case ZEROMV: { - mv[0].as_int = 0; - if (is_compound) - mv[1].as_int = 0; - break; - } - default: { - return 0; - } - } - return ret; -} - -static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd, - int segment_id, vpx_reader *r) { - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { - return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; - } else { - const int ctx = vp10_get_intra_inter_context(xd); - const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]); - FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->intra_inter[ctx][is_inter]; - return is_inter; - } -} - -static void fpm_sync(void *const data, int mi_row) { - VP10Decoder *const pbi = (VP10Decoder *)data; - vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame, - mi_row << MI_BLOCK_SIZE_LOG2); -} - -static void read_inter_block_mode_info(VP10Decoder *const pbi, - MACROBLOCKD *const xd, - MODE_INFO *const mi, - int mi_row, int mi_col, vpx_reader *r) { - VP10_COMMON *const cm = &pbi->common; - MB_MODE_INFO *const mbmi = &mi->mbmi; - const BLOCK_SIZE bsize = mbmi->sb_type; - const int allow_hp = cm->allow_high_precision_mv; - int_mv nearestmv[2], nearmv[2]; - int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; - int ref, is_compound; - uint8_t inter_mode_ctx[MAX_REF_FRAMES]; - - read_ref_frames(cm, xd, r, mbmi->segment_id, mbmi->ref_frame); - is_compound = has_second_ref(mbmi); - - for (ref = 0; ref < 1 + is_compound; ++ref) { - const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; - RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME]; - - xd->block_refs[ref] = ref_buf; - if ((!vp10_is_valid_scale(&ref_buf->sf))) - vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM, - "Reference frame has invalid dimensions"); - vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, - &ref_buf->sf); - vp10_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], - mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx); - } - - if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - mbmi->mode = ZEROMV; - if (bsize < BLOCK_8X8) { - vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid usage of segement feature on small blocks"); - return; - } - } else { - if (bsize >= BLOCK_8X8) - mbmi->mode = read_inter_mode(cm, xd, r, - inter_mode_ctx[mbmi->ref_frame[0]]); - } - - if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { - for (ref = 0; ref < 1 + is_compound; ++ref) { - vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]], - &nearestmv[ref], &nearmv[ref]); - } - } - - mbmi->interp_filter = (cm->interp_filter == SWITCHABLE) - ? read_switchable_interp_filter(cm, xd, r) - : cm->interp_filter; - - if (bsize < BLOCK_8X8) { - const int num_4x4_w = 1 << xd->bmode_blocks_wl; - const int num_4x4_h = 1 << xd->bmode_blocks_hl; - int idx, idy; - PREDICTION_MODE b_mode; - int_mv nearest_sub8x8[2], near_sub8x8[2]; - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - int_mv block[2]; - const int j = idy * 2 + idx; - b_mode = read_inter_mode(cm, xd, r, inter_mode_ctx[mbmi->ref_frame[0]]); - - if (b_mode == NEARESTMV || b_mode == NEARMV) { - uint8_t dummy_mode_ctx[MAX_REF_FRAMES]; - for (ref = 0; ref < 1 + is_compound; ++ref) - vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col, - &nearest_sub8x8[ref], - &near_sub8x8[ref], - dummy_mode_ctx); - } - - if (!assign_mv(cm, xd, b_mode, block, nearestmv, - nearest_sub8x8, near_sub8x8, - is_compound, allow_hp, r)) { - xd->corrupted |= 1; - break; - }; - - mi->bmi[j].as_mv[0].as_int = block[0].as_int; - if (is_compound) - mi->bmi[j].as_mv[1].as_int = block[1].as_int; - - if (num_4x4_h == 2) - mi->bmi[j + 2] = mi->bmi[j]; - if (num_4x4_w == 2) - mi->bmi[j + 1] = mi->bmi[j]; - } - } - - mi->mbmi.mode = b_mode; - - mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; - mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; - } else { - xd->corrupted |= !assign_mv(cm, xd, mbmi->mode, mbmi->mv, nearestmv, - nearestmv, nearmv, is_compound, allow_hp, r); - } -} - -static void read_inter_frame_mode_info(VP10Decoder *const pbi, - MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { - VP10_COMMON *const cm = &pbi->common; - MODE_INFO *const mi = xd->mi[0]; - MB_MODE_INFO *const mbmi = &mi->mbmi; - int inter_block; - - mbmi->mv[0].as_int = 0; - mbmi->mv[1].as_int = 0; - mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r); - mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); - inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); - mbmi->tx_size = read_tx_size(cm, xd, !mbmi->skip || !inter_block, r); - - if (inter_block) - read_inter_block_mode_info(pbi, xd, mi, mi_row, mi_col, r); - else - read_intra_block_mode_info(cm, xd, mi, r); - - if (mbmi->tx_size < TX_32X32 && - cm->base_qindex > 0 && !mbmi->skip && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - FRAME_COUNTS *counts = xd->counts; - if (inter_block) { - mbmi->tx_type = vpx_read_tree( - r, vp10_ext_tx_tree, - cm->fc->inter_ext_tx_prob[mbmi->tx_size]); - if (counts) - ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type]; - } else { - const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode]; - mbmi->tx_type = vpx_read_tree( - r, vp10_ext_tx_tree, - cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]); - if (counts) - ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type]; - } - } else { - mbmi->tx_type = DCT_DCT; - } -} - -void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, - int mi_row, int mi_col, vpx_reader *r, - int x_mis, int y_mis) { - VP10_COMMON *const cm = &pbi->common; - MODE_INFO *const mi = xd->mi[0]; - MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; - int w, h; - - if (frame_is_intra_only(cm)) { - read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r); - } else { - read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r); - - for (h = 0; h < y_mis; ++h) { - MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols; - for (w = 0; w < x_mis; ++w) { - MV_REF *const mv = frame_mv + w; - mv->ref_frame[0] = mi->mbmi.ref_frame[0]; - mv->ref_frame[1] = mi->mbmi.ref_frame[1]; - mv->mv[0].as_int = mi->mbmi.mv[0].as_int; - mv->mv[1].as_int = mi->mbmi.mv[1].as_int; - } - } - } -} diff --git a/libs/libvpx/vp10/decoder/decodemv.h b/libs/libvpx/vp10/decoder/decodemv.h deleted file mode 100644 index 6653be5f69..0000000000 --- a/libs/libvpx/vp10/decoder/decodemv.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_DECODER_DECODEMV_H_ -#define VP10_DECODER_DECODEMV_H_ - -#include "vpx_dsp/bitreader.h" - -#include "vp10/decoder/decoder.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, - int mi_row, int mi_col, vpx_reader *r, - int x_mis, int y_mis); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_DECODER_DECODEMV_H_ diff --git a/libs/libvpx/vp10/decoder/decoder.c b/libs/libvpx/vp10/decoder/decoder.c deleted file mode 100644 index d8864d22e6..0000000000 --- a/libs/libvpx/vp10/decoder/decoder.c +++ /dev/null @@ -1,522 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "./vpx_scale_rtcd.h" - -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/system_state.h" -#include "vpx_ports/vpx_once.h" -#include "vpx_ports/vpx_timer.h" -#include "vpx_scale/vpx_scale.h" -#include "vpx_util/vpx_thread.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/loopfilter.h" -#include "vp10/common/onyxc_int.h" -#if CONFIG_VP9_POSTPROC -#include "vp10/common/postproc.h" -#endif -#include "vp10/common/quant_common.h" -#include "vp10/common/reconintra.h" - -#include "vp10/decoder/decodeframe.h" -#include "vp10/decoder/decoder.h" -#include "vp10/decoder/detokenize.h" - -static void initialize_dec(void) { - static volatile int init_done = 0; - - if (!init_done) { - vp10_rtcd(); - vpx_dsp_rtcd(); - vpx_scale_rtcd(); - vp10_init_intra_predictors(); - init_done = 1; - } -} - -static void vp10_dec_setup_mi(VP10_COMMON *cm) { - cm->mi = cm->mip + cm->mi_stride + 1; - cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1; - memset(cm->mi_grid_base, 0, - cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base)); -} - -static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) { - cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); - if (!cm->mip) - return 1; - cm->mi_alloc_size = mi_size; - cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->mi_grid_base) - return 1; - return 0; -} - -static void vp10_dec_free_mi(VP10_COMMON *cm) { - vpx_free(cm->mip); - cm->mip = NULL; - vpx_free(cm->mi_grid_base); - cm->mi_grid_base = NULL; -} - -VP10Decoder *vp10_decoder_create(BufferPool *const pool) { - VP10Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi)); - VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL; - - if (!cm) - return NULL; - - vp10_zero(*pbi); - - if (setjmp(cm->error.jmp)) { - cm->error.setjmp = 0; - vp10_decoder_remove(pbi); - return NULL; - } - - cm->error.setjmp = 1; - - CHECK_MEM_ERROR(cm, cm->fc, - (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); - CHECK_MEM_ERROR(cm, cm->frame_contexts, - (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, - sizeof(*cm->frame_contexts))); - - pbi->need_resync = 1; - once(initialize_dec); - - // Initialize the references to not point to any frame buffers. - memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); - memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); - - cm->current_video_frame = 0; - pbi->ready_for_new_data = 1; - pbi->common.buffer_pool = pool; - - cm->bit_depth = VPX_BITS_8; - cm->dequant_bit_depth = VPX_BITS_8; - - cm->alloc_mi = vp10_dec_alloc_mi; - cm->free_mi = vp10_dec_free_mi; - cm->setup_mi = vp10_dec_setup_mi; - - vp10_loop_filter_init(cm); - - cm->error.setjmp = 0; - - vpx_get_worker_interface()->init(&pbi->lf_worker); - - return pbi; -} - -void vp10_decoder_remove(VP10Decoder *pbi) { - int i; - - if (!pbi) - return; - - vpx_get_worker_interface()->end(&pbi->lf_worker); - vpx_free(pbi->lf_worker.data1); - vpx_free(pbi->tile_data); - for (i = 0; i < pbi->num_tile_workers; ++i) { - VPxWorker *const worker = &pbi->tile_workers[i]; - vpx_get_worker_interface()->end(worker); - } - vpx_free(pbi->tile_worker_data); - vpx_free(pbi->tile_worker_info); - vpx_free(pbi->tile_workers); - - if (pbi->num_tile_workers > 0) { - vp10_loop_filter_dealloc(&pbi->lf_row_sync); - } - - vpx_free(pbi); -} - -static int equal_dimensions(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b) { - return a->y_height == b->y_height && a->y_width == b->y_width && - a->uv_height == b->uv_height && a->uv_width == b->uv_width; -} - -vpx_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd) { - VP10_COMMON *cm = &pbi->common; - - /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the - * encoder is using the frame buffers for. This is just a stub to keep the - * vpxenc --test-decode functionality working, and will be replaced in a - * later commit that adds VP9-specific controls for this functionality. - */ - if (ref_frame_flag == VP9_LAST_FLAG) { - const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0); - if (cfg == NULL) { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "No 'last' reference frame"); - return VPX_CODEC_ERROR; - } - if (!equal_dimensions(cfg, sd)) - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Incorrect buffer dimensions"); - else - vp8_yv12_copy_frame(cfg, sd); - } else { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Invalid reference frame"); - } - - return cm->error.error_code; -} - - -vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd) { - RefBuffer *ref_buf = NULL; - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; - - // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the - // encoder is using the frame buffers for. This is just a stub to keep the - // vpxenc --test-decode functionality working, and will be replaced in a - // later commit that adds VP9-specific controls for this functionality. - if (ref_frame_flag == VP9_LAST_FLAG) { - ref_buf = &cm->frame_refs[0]; - } else if (ref_frame_flag == VP9_GOLD_FLAG) { - ref_buf = &cm->frame_refs[1]; - } else if (ref_frame_flag == VP9_ALT_FLAG) { - ref_buf = &cm->frame_refs[2]; - } else { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Invalid reference frame"); - return cm->error.error_code; - } - - if (!equal_dimensions(ref_buf->buf, sd)) { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Incorrect buffer dimensions"); - } else { - int *ref_fb_ptr = &ref_buf->idx; - - // Find an empty frame buffer. - const int free_fb = get_free_fb(cm); - if (cm->new_fb_idx == INVALID_IDX) - return VPX_CODEC_MEM_ERROR; - - // Decrease ref_count since it will be increased again in - // ref_cnt_fb() below. - --frame_bufs[free_fb].ref_count; - - // Manage the reference counters and copy image. - ref_cnt_fb(frame_bufs, ref_fb_ptr, free_fb); - ref_buf->buf = &frame_bufs[*ref_fb_ptr].buf; - vp8_yv12_copy_frame(sd, ref_buf->buf); - } - - return cm->error.error_code; -} - -/* If any buffer updating is signaled it should be done here. */ -static void swap_frame_buffers(VP10Decoder *pbi) { - int ref_index = 0, mask; - VP10_COMMON *const cm = &pbi->common; - BufferPool *const pool = cm->buffer_pool; - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; - - lock_buffer_pool(pool); - for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { - const int old_idx = cm->ref_frame_map[ref_index]; - // Current thread releases the holding of reference frame. - decrease_ref_count(old_idx, frame_bufs, pool); - - // Release the reference frame in reference map. - if ((mask & 1) && old_idx >= 0) { - decrease_ref_count(old_idx, frame_bufs, pool); - } - cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; - ++ref_index; - } - - // Current thread releases the holding of reference frame. - for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { - const int old_idx = cm->ref_frame_map[ref_index]; - decrease_ref_count(old_idx, frame_bufs, pool); - cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; - } - unlock_buffer_pool(pool); - pbi->hold_ref_buf = 0; - cm->frame_to_show = get_frame_new_buffer(cm); - - if (!cm->frame_parallel_decode || !cm->show_frame) { - lock_buffer_pool(pool); - --frame_bufs[cm->new_fb_idx].ref_count; - unlock_buffer_pool(pool); - } - - // Invalidate these references until the next frame starts. - for (ref_index = 0; ref_index < 3; ref_index++) - cm->frame_refs[ref_index].idx = -1; -} - -int vp10_receive_compressed_data(VP10Decoder *pbi, - size_t size, const uint8_t **psource) { - VP10_COMMON *volatile const cm = &pbi->common; - BufferPool *volatile const pool = cm->buffer_pool; - RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; - const uint8_t *source = *psource; - int retcode = 0; - cm->error.error_code = VPX_CODEC_OK; - - if (size == 0) { - // This is used to signal that we are missing frames. - // We do not know if the missing frame(s) was supposed to update - // any of the reference buffers, but we act conservative and - // mark only the last buffer as corrupted. - // - // TODO(jkoleszar): Error concealment is undefined and non-normative - // at this point, but if it becomes so, [0] may not always be the correct - // thing to do here. - if (cm->frame_refs[0].idx > 0) { - assert(cm->frame_refs[0].buf != NULL); - cm->frame_refs[0].buf->corrupted = 1; - } - } - - pbi->ready_for_new_data = 0; - - // Check if the previous frame was a frame without any references to it. - // Release frame buffer if not decoding in frame parallel mode. - if (!cm->frame_parallel_decode && cm->new_fb_idx >= 0 - && frame_bufs[cm->new_fb_idx].ref_count == 0) - pool->release_fb_cb(pool->cb_priv, - &frame_bufs[cm->new_fb_idx].raw_frame_buffer); - // Find a free frame buffer. Return error if can not find any. - cm->new_fb_idx = get_free_fb(cm); - if (cm->new_fb_idx == INVALID_IDX) - return VPX_CODEC_MEM_ERROR; - - // Assign a MV array to the frame buffer. - cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; - - pbi->hold_ref_buf = 0; - if (cm->frame_parallel_decode) { - VPxWorker *const worker = pbi->frame_worker_owner; - vp10_frameworker_lock_stats(worker); - frame_bufs[cm->new_fb_idx].frame_worker_owner = worker; - // Reset decoding progress. - pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; - pbi->cur_buf->row = -1; - pbi->cur_buf->col = -1; - vp10_frameworker_unlock_stats(worker); - } else { - pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; - } - - - if (setjmp(cm->error.jmp)) { - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - int i; - - cm->error.setjmp = 0; - pbi->ready_for_new_data = 1; - - // Synchronize all threads immediately as a subsequent decode call may - // cause a resize invalidating some allocations. - winterface->sync(&pbi->lf_worker); - for (i = 0; i < pbi->num_tile_workers; ++i) { - winterface->sync(&pbi->tile_workers[i]); - } - - lock_buffer_pool(pool); - // Release all the reference buffers if worker thread is holding them. - if (pbi->hold_ref_buf == 1) { - int ref_index = 0, mask; - for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { - const int old_idx = cm->ref_frame_map[ref_index]; - // Current thread releases the holding of reference frame. - decrease_ref_count(old_idx, frame_bufs, pool); - - // Release the reference frame in reference map. - if ((mask & 1) && old_idx >= 0) { - decrease_ref_count(old_idx, frame_bufs, pool); - } - ++ref_index; - } - - // Current thread releases the holding of reference frame. - for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { - const int old_idx = cm->ref_frame_map[ref_index]; - decrease_ref_count(old_idx, frame_bufs, pool); - } - pbi->hold_ref_buf = 0; - } - // Release current frame. - decrease_ref_count(cm->new_fb_idx, frame_bufs, pool); - unlock_buffer_pool(pool); - - vpx_clear_system_state(); - return -1; - } - - cm->error.setjmp = 1; - vp10_decode_frame(pbi, source, source + size, psource); - - swap_frame_buffers(pbi); - - vpx_clear_system_state(); - - if (!cm->show_existing_frame) { - cm->last_show_frame = cm->show_frame; - cm->prev_frame = cm->cur_frame; - if (cm->seg.enabled && !cm->frame_parallel_decode) - vp10_swap_current_and_last_seg_map(cm); - } - - // Update progress in frame parallel decode. - if (cm->frame_parallel_decode) { - // Need to lock the mutex here as another thread may - // be accessing this buffer. - VPxWorker *const worker = pbi->frame_worker_owner; - FrameWorkerData *const frame_worker_data = worker->data1; - vp10_frameworker_lock_stats(worker); - - if (cm->show_frame) { - cm->current_video_frame++; - } - frame_worker_data->frame_decoded = 1; - frame_worker_data->frame_context_ready = 1; - vp10_frameworker_signal_stats(worker); - vp10_frameworker_unlock_stats(worker); - } else { - cm->last_width = cm->width; - cm->last_height = cm->height; - if (cm->show_frame) { - cm->current_video_frame++; - } - } - - cm->error.setjmp = 0; - return retcode; -} - -int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd, - vp10_ppflags_t *flags) { - VP10_COMMON *const cm = &pbi->common; - int ret = -1; -#if !CONFIG_VP9_POSTPROC - (void)*flags; -#endif - - if (pbi->ready_for_new_data == 1) - return ret; - - pbi->ready_for_new_data = 1; - - /* no raw frame to show!!! */ - if (!cm->show_frame) - return ret; - - pbi->ready_for_new_data = 1; - -#if CONFIG_VP9_POSTPROC - if (!cm->show_existing_frame) { - ret = vp10_post_proc_frame(cm, sd, flags); - } else { - *sd = *cm->frame_to_show; - ret = 0; - } -#else - *sd = *cm->frame_to_show; - ret = 0; -#endif /*!CONFIG_POSTPROC*/ - vpx_clear_system_state(); - return ret; -} - -vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, - size_t data_sz, - uint32_t sizes[8], int *count, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state) { - // A chunk ending with a byte matching 0xc0 is an invalid chunk unless - // it is a super frame index. If the last byte of real video compression - // data is 0xc0 the encoder must add a 0 byte. If we have the marker but - // not the associated matching marker byte at the front of the index we have - // an invalid bitstream and need to return an error. - - uint8_t marker; -#if CONFIG_MISC_FIXES - size_t frame_sz_sum = 0; -#endif - - assert(data_sz); - marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1); - *count = 0; - - if ((marker & 0xe0) == 0xc0) { - const uint32_t frames = (marker & 0x7) + 1; - const uint32_t mag = ((marker >> 3) & 0x3) + 1; - const size_t index_sz = 2 + mag * (frames - CONFIG_MISC_FIXES); - - // This chunk is marked as having a superframe index but doesn't have - // enough data for it, thus it's an invalid superframe index. - if (data_sz < index_sz) - return VPX_CODEC_CORRUPT_FRAME; - - { - const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state, - data + data_sz - index_sz); - - // This chunk is marked as having a superframe index but doesn't have - // the matching marker byte at the front of the index therefore it's an - // invalid chunk. - if (marker != marker2) - return VPX_CODEC_CORRUPT_FRAME; - } - - { - // Found a valid superframe index. - uint32_t i, j; - const uint8_t *x = &data[data_sz - index_sz + 1]; - - // Frames has a maximum of 8 and mag has a maximum of 4. - uint8_t clear_buffer[32]; - assert(sizeof(clear_buffer) >= frames * mag); - if (decrypt_cb) { - decrypt_cb(decrypt_state, x, clear_buffer, frames * mag); - x = clear_buffer; - } - - for (i = 0; i < frames - CONFIG_MISC_FIXES; ++i) { - uint32_t this_sz = 0; - - for (j = 0; j < mag; ++j) - this_sz |= (*x++) << (j * 8); - this_sz += CONFIG_MISC_FIXES; - sizes[i] = this_sz; -#if CONFIG_MISC_FIXES - frame_sz_sum += this_sz; -#endif - } -#if CONFIG_MISC_FIXES - sizes[i] = data_sz - index_sz - frame_sz_sum; -#endif - *count = frames; - } - } - return VPX_CODEC_OK; -} diff --git a/libs/libvpx/vp10/decoder/decoder.h b/libs/libvpx/vp10/decoder/decoder.h deleted file mode 100644 index 72a6310202..0000000000 --- a/libs/libvpx/vp10/decoder/decoder.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_DECODER_DECODER_H_ -#define VP10_DECODER_DECODER_H_ - -#include "./vpx_config.h" - -#include "vpx/vpx_codec.h" -#include "vpx_dsp/bitreader.h" -#include "vpx_scale/yv12config.h" -#include "vpx_util/vpx_thread.h" - -#include "vp10/common/thread_common.h" -#include "vp10/common/onyxc_int.h" -#include "vp10/common/ppflags.h" -#include "vp10/decoder/dthread.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// TODO(hkuang): combine this with TileWorkerData. -typedef struct TileData { - VP10_COMMON *cm; - vpx_reader bit_reader; - DECLARE_ALIGNED(16, MACROBLOCKD, xd); - /* dqcoeff are shared by all the planes. So planes must be decoded serially */ - DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]); - DECLARE_ALIGNED(16, uint8_t, color_index_map[2][64 * 64]); -} TileData; - -typedef struct TileWorkerData { - struct VP10Decoder *pbi; - vpx_reader bit_reader; - FRAME_COUNTS counts; - DECLARE_ALIGNED(16, MACROBLOCKD, xd); - /* dqcoeff are shared by all the planes. So planes must be decoded serially */ - DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]); - DECLARE_ALIGNED(16, uint8_t, color_index_map[2][64 * 64]); - struct vpx_internal_error_info error_info; -} TileWorkerData; - -typedef struct VP10Decoder { - DECLARE_ALIGNED(16, MACROBLOCKD, mb); - - DECLARE_ALIGNED(16, VP10_COMMON, common); - - int ready_for_new_data; - - int refresh_frame_flags; - - // TODO(hkuang): Combine this with cur_buf in macroblockd as they are - // the same. - RefCntBuffer *cur_buf; // Current decoding frame buffer. - - VPxWorker *frame_worker_owner; // frame_worker that owns this pbi. - VPxWorker lf_worker; - VPxWorker *tile_workers; - TileWorkerData *tile_worker_data; - TileInfo *tile_worker_info; - int num_tile_workers; - - TileData *tile_data; - int total_tiles; - - VP9LfSync lf_row_sync; - - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; - - int max_threads; - int inv_tile_order; - int need_resync; // wait for key/intra-only frame. - int hold_ref_buf; // hold the reference buffer. -} VP10Decoder; - -int vp10_receive_compressed_data(struct VP10Decoder *pbi, - size_t size, const uint8_t **dest); - -int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd, - vp10_ppflags_t *flags); - -vpx_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -vpx_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm, - VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb, - void *decrypt_state, - const uint8_t *data) { - if (decrypt_cb) { - uint8_t marker; - decrypt_cb(decrypt_state, data, &marker, 1); - return marker; - } - return *data; -} - -// This function is exposed for use in tests, as well as the inlined function -// "read_marker". -vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data, - size_t data_sz, - uint32_t sizes[8], int *count, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state); - -struct VP10Decoder *vp10_decoder_create(BufferPool *const pool); - -void vp10_decoder_remove(struct VP10Decoder *pbi); - -static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs, - BufferPool *const pool) { - if (idx >= 0) { - --frame_bufs[idx].ref_count; - // A worker may only get a free framebuffer index when calling get_free_fb. - // But the private buffer is not set up until finish decoding header. - // So any error happens during decoding header, the frame_bufs will not - // have valid priv buffer. - if (frame_bufs[idx].ref_count == 0 && - frame_bufs[idx].raw_frame_buffer.priv) { - pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer); - } - } -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_DECODER_DECODER_H_ diff --git a/libs/libvpx/vp10/decoder/detokenize.c b/libs/libvpx/vp10/decoder/detokenize.c deleted file mode 100644 index d39e3dc06c..0000000000 --- a/libs/libvpx/vp10/decoder/detokenize.c +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/blockd.h" -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#if CONFIG_COEFFICIENT_RANGE_CHECKING -#include "vp10/common/idct.h" -#endif - -#include "vp10/decoder/detokenize.h" - -#define EOB_CONTEXT_NODE 0 -#define ZERO_CONTEXT_NODE 1 -#define ONE_CONTEXT_NODE 2 -#define LOW_VAL_CONTEXT_NODE 0 -#define TWO_CONTEXT_NODE 1 -#define THREE_CONTEXT_NODE 2 -#define HIGH_LOW_CONTEXT_NODE 3 -#define CAT_ONE_CONTEXT_NODE 4 -#define CAT_THREEFOUR_CONTEXT_NODE 5 -#define CAT_THREE_CONTEXT_NODE 6 -#define CAT_FIVE_CONTEXT_NODE 7 - -#define INCREMENT_COUNT(token) \ - do { \ - if (counts) \ - ++coef_counts[band][ctx][token]; \ - } while (0) - -static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) { - int i, val = 0; - for (i = 0; i < n; ++i) - val = (val << 1) | vpx_read(r, probs[i]); - return val; -} - -static int decode_coefs(const MACROBLOCKD *xd, - PLANE_TYPE type, - tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, - int ctx, const int16_t *scan, const int16_t *nb, - vpx_reader *r) { - FRAME_COUNTS *counts = xd->counts; - const int max_eob = 16 << (tx_size << 1); - const FRAME_CONTEXT *const fc = xd->fc; - const int ref = is_inter_block(&xd->mi[0]->mbmi); - int band, c = 0; - const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = - fc->coef_probs[tx_size][type][ref]; - const vpx_prob *prob; - unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; - unsigned int (*eob_branch_count)[COEFF_CONTEXTS]; - uint8_t token_cache[32 * 32]; - const uint8_t *band_translate = get_band_translate(tx_size); - const int dq_shift = (tx_size == TX_32X32); - int v, token; - int16_t dqv = dq[0]; - const uint8_t *cat1_prob; - const uint8_t *cat2_prob; - const uint8_t *cat3_prob; - const uint8_t *cat4_prob; - const uint8_t *cat5_prob; - const uint8_t *cat6_prob; - - if (counts) { - coef_counts = counts->coef[tx_size][type][ref]; - eob_branch_count = counts->eob_branch[tx_size][type][ref]; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->bd > VPX_BITS_8) { - if (xd->bd == VPX_BITS_10) { - cat1_prob = vp10_cat1_prob_high10; - cat2_prob = vp10_cat2_prob_high10; - cat3_prob = vp10_cat3_prob_high10; - cat4_prob = vp10_cat4_prob_high10; - cat5_prob = vp10_cat5_prob_high10; - cat6_prob = vp10_cat6_prob_high10; - } else { - cat1_prob = vp10_cat1_prob_high12; - cat2_prob = vp10_cat2_prob_high12; - cat3_prob = vp10_cat3_prob_high12; - cat4_prob = vp10_cat4_prob_high12; - cat5_prob = vp10_cat5_prob_high12; - cat6_prob = vp10_cat6_prob_high12; - } - } else { - cat1_prob = vp10_cat1_prob; - cat2_prob = vp10_cat2_prob; - cat3_prob = vp10_cat3_prob; - cat4_prob = vp10_cat4_prob; - cat5_prob = vp10_cat5_prob; - cat6_prob = vp10_cat6_prob; - } -#else - cat1_prob = vp10_cat1_prob; - cat2_prob = vp10_cat2_prob; - cat3_prob = vp10_cat3_prob; - cat4_prob = vp10_cat4_prob; - cat5_prob = vp10_cat5_prob; - cat6_prob = vp10_cat6_prob; -#endif - - while (c < max_eob) { - int val = -1; - band = *band_translate++; - prob = coef_probs[band][ctx]; - if (counts) - ++eob_branch_count[band][ctx]; - if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { - INCREMENT_COUNT(EOB_MODEL_TOKEN); - break; - } - - while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { - INCREMENT_COUNT(ZERO_TOKEN); - dqv = dq[1]; - token_cache[scan[c]] = 0; - ++c; - if (c >= max_eob) - return c; // zero tokens at the end (no eob token) - ctx = get_coef_context(nb, token_cache, c); - band = *band_translate++; - prob = coef_probs[band][ctx]; - } - - if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { - INCREMENT_COUNT(ONE_TOKEN); - token = ONE_TOKEN; - val = 1; - } else { - INCREMENT_COUNT(TWO_TOKEN); - token = vpx_read_tree(r, vp10_coef_con_tree, - vp10_pareto8_full[prob[PIVOT_NODE] - 1]); - switch (token) { - case TWO_TOKEN: - case THREE_TOKEN: - case FOUR_TOKEN: - val = token; - break; - case CATEGORY1_TOKEN: - val = CAT1_MIN_VAL + read_coeff(cat1_prob, 1, r); - break; - case CATEGORY2_TOKEN: - val = CAT2_MIN_VAL + read_coeff(cat2_prob, 2, r); - break; - case CATEGORY3_TOKEN: - val = CAT3_MIN_VAL + read_coeff(cat3_prob, 3, r); - break; - case CATEGORY4_TOKEN: - val = CAT4_MIN_VAL + read_coeff(cat4_prob, 4, r); - break; - case CATEGORY5_TOKEN: - val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r); - break; - case CATEGORY6_TOKEN: { -#if CONFIG_MISC_FIXES - const int skip_bits = TX_SIZES - 1 - tx_size; -#else - const int skip_bits = 0; -#endif - const uint8_t *cat6p = cat6_prob + skip_bits; -#if CONFIG_VP9_HIGHBITDEPTH - switch (xd->bd) { - case VPX_BITS_8: - val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); - break; - case VPX_BITS_10: - val = CAT6_MIN_VAL + read_coeff(cat6p, 16 - skip_bits, r); - break; - case VPX_BITS_12: - val = CAT6_MIN_VAL + read_coeff(cat6p, 18 - skip_bits, r); - break; - default: - assert(0); - return -1; - } -#else - val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r); -#endif - break; - } - } - } - v = (val * dqv) >> dq_shift; -#if CONFIG_COEFFICIENT_RANGE_CHECKING -#if CONFIG_VP9_HIGHBITDEPTH - dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), - xd->bd); -#else - dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); -#endif // CONFIG_VP9_HIGHBITDEPTH -#else - dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; -#endif // CONFIG_COEFFICIENT_RANGE_CHECKING - token_cache[scan[c]] = vp10_pt_energy_class[token]; - ++c; - ctx = get_coef_context(nb, token_cache, c); - dqv = dq[1]; - } - - return c; -} - -// TODO(slavarnway): Decode version of vp10_set_context. Modify vp10_set_context -// after testing is complete, then delete this version. -static -void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, - TX_SIZE tx_size, int has_eob, - int aoff, int loff) { - ENTROPY_CONTEXT *const a = pd->above_context + aoff; - ENTROPY_CONTEXT *const l = pd->left_context + loff; - const int tx_size_in_blocks = 1 << tx_size; - - // above - if (has_eob && xd->mb_to_right_edge < 0) { - int i; - const int blocks_wide = pd->n4_w + - (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - int above_contexts = tx_size_in_blocks; - if (above_contexts + aoff > blocks_wide) - above_contexts = blocks_wide - aoff; - - for (i = 0; i < above_contexts; ++i) - a[i] = has_eob; - for (i = above_contexts; i < tx_size_in_blocks; ++i) - a[i] = 0; - } else { - memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } - - // left - if (has_eob && xd->mb_to_bottom_edge < 0) { - int i; - const int blocks_high = pd->n4_h + - (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - int left_contexts = tx_size_in_blocks; - if (left_contexts + loff > blocks_high) - left_contexts = blocks_high - loff; - - for (i = 0; i < left_contexts; ++i) - l[i] = has_eob; - for (i = left_contexts; i < tx_size_in_blocks; ++i) - l[i] = 0; - } else { - memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); - } -} - -int vp10_decode_block_tokens(MACROBLOCKD *xd, - int plane, const scan_order *sc, - int x, int y, - TX_SIZE tx_size, vpx_reader *r, - int seg_id) { - struct macroblockd_plane *const pd = &xd->plane[plane]; - const int16_t *const dequant = pd->seg_dequant[seg_id]; - const int ctx = get_entropy_context(tx_size, pd->above_context + x, - pd->left_context + y); - const int eob = decode_coefs(xd, pd->plane_type, - pd->dqcoeff, tx_size, - dequant, ctx, sc->scan, sc->neighbors, r); - dec_set_contexts(xd, pd, tx_size, eob > 0, x, y); - return eob; -} - - diff --git a/libs/libvpx/vp10/decoder/detokenize.h b/libs/libvpx/vp10/decoder/detokenize.h deleted file mode 100644 index c3fd90a728..0000000000 --- a/libs/libvpx/vp10/decoder/detokenize.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_DECODER_DETOKENIZE_H_ -#define VP10_DECODER_DETOKENIZE_H_ - -#include "vpx_dsp/bitreader.h" -#include "vp10/decoder/decoder.h" -#include "vp10/common/scan.h" - -#ifdef __cplusplus -extern "C" { -#endif - -int vp10_decode_block_tokens(MACROBLOCKD *xd, - int plane, const scan_order *sc, - int x, int y, - TX_SIZE tx_size, vpx_reader *r, - int seg_id); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_DECODER_DETOKENIZE_H_ diff --git a/libs/libvpx/vp10/decoder/dsubexp.c b/libs/libvpx/vp10/decoder/dsubexp.c deleted file mode 100644 index 36c1917bc2..0000000000 --- a/libs/libvpx/vp10/decoder/dsubexp.c +++ /dev/null @@ -1,79 +0,0 @@ -/* - Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/entropy.h" - -#include "vp10/decoder/dsubexp.h" - -static int inv_recenter_nonneg(int v, int m) { - if (v > 2 * m) - return v; - - return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1); -} - -static int decode_uniform(vpx_reader *r) { - const int l = 8; - const int m = (1 << l) - 191 + CONFIG_MISC_FIXES; - const int v = vpx_read_literal(r, l - 1); - return v < m ? v : (v << 1) - m + vpx_read_bit(r); -} - -static int inv_remap_prob(int v, int m) { - static uint8_t inv_map_table[MAX_PROB - CONFIG_MISC_FIXES] = { - 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189, - 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, - 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, - 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, - 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125, - 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, - 142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, - 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206, - 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, - 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, -#if !CONFIG_MISC_FIXES - 253 -#endif - }; - assert(v < (int)(sizeof(inv_map_table) / sizeof(inv_map_table[0]))); - v = inv_map_table[v]; - m--; - if ((m << 1) <= MAX_PROB) { - return 1 + inv_recenter_nonneg(v, m); - } else { - return MAX_PROB - inv_recenter_nonneg(v, MAX_PROB - 1 - m); - } -} - -static int decode_term_subexp(vpx_reader *r) { - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 4); - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 4) + 16; - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 5) + 32; - return decode_uniform(r) + 64; -} - -void vp10_diff_update_prob(vpx_reader *r, vpx_prob* p) { - if (vpx_read(r, DIFF_UPDATE_PROB)) { - const int delp = decode_term_subexp(r); - *p = (vpx_prob)inv_remap_prob(delp, *p); - } -} diff --git a/libs/libvpx/vp10/decoder/dthread.c b/libs/libvpx/vp10/decoder/dthread.c deleted file mode 100644 index 4206adcb61..0000000000 --- a/libs/libvpx/vp10/decoder/dthread.c +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "vpx_mem/vpx_mem.h" -#include "vp10/common/reconinter.h" -#include "vp10/decoder/dthread.h" -#include "vp10/decoder/decoder.h" - -// #define DEBUG_THREAD - -// TODO(hkuang): Clean up all the #ifdef in this file. -void vp10_frameworker_lock_stats(VPxWorker *const worker) { -#if CONFIG_MULTITHREAD - FrameWorkerData *const worker_data = worker->data1; - pthread_mutex_lock(&worker_data->stats_mutex); -#else - (void)worker; -#endif -} - -void vp10_frameworker_unlock_stats(VPxWorker *const worker) { -#if CONFIG_MULTITHREAD - FrameWorkerData *const worker_data = worker->data1; - pthread_mutex_unlock(&worker_data->stats_mutex); -#else - (void)worker; -#endif -} - -void vp10_frameworker_signal_stats(VPxWorker *const worker) { -#if CONFIG_MULTITHREAD - FrameWorkerData *const worker_data = worker->data1; - -// TODO(hkuang): Fix the pthread_cond_broadcast in windows wrapper. -#if defined(_WIN32) && !HAVE_PTHREAD_H - pthread_cond_signal(&worker_data->stats_cond); -#else - pthread_cond_broadcast(&worker_data->stats_cond); -#endif - -#else - (void)worker; -#endif -} - -// This macro prevents thread_sanitizer from reporting known concurrent writes. -#if defined(__has_feature) -#if __has_feature(thread_sanitizer) -#define BUILDING_WITH_TSAN -#endif -#endif - -// TODO(hkuang): Remove worker parameter as it is only used in debug code. -void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf, - int row) { -#if CONFIG_MULTITHREAD - if (!ref_buf) - return; - -#ifndef BUILDING_WITH_TSAN - // The following line of code will get harmless tsan error but it is the key - // to get best performance. - if (ref_buf->row >= row && ref_buf->buf.corrupted != 1) return; -#endif - - { - // Find the worker thread that owns the reference frame. If the reference - // frame has been fully decoded, it may not have owner. - VPxWorker *const ref_worker = ref_buf->frame_worker_owner; - FrameWorkerData *const ref_worker_data = - (FrameWorkerData *)ref_worker->data1; - const VP10Decoder *const pbi = ref_worker_data->pbi; - -#ifdef DEBUG_THREAD - { - FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1; - printf("%d %p worker is waiting for %d %p worker (%d) ref %d \r\n", - worker_data->worker_id, worker, ref_worker_data->worker_id, - ref_buf->frame_worker_owner, row, ref_buf->row); - } -#endif - - vp10_frameworker_lock_stats(ref_worker); - while (ref_buf->row < row && pbi->cur_buf == ref_buf && - ref_buf->buf.corrupted != 1) { - pthread_cond_wait(&ref_worker_data->stats_cond, - &ref_worker_data->stats_mutex); - } - - if (ref_buf->buf.corrupted == 1) { - FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1; - vp10_frameworker_unlock_stats(ref_worker); - vpx_internal_error(&worker_data->pbi->common.error, - VPX_CODEC_CORRUPT_FRAME, - "Worker %p failed to decode frame", worker); - } - vp10_frameworker_unlock_stats(ref_worker); - } -#else - (void)worker; - (void)ref_buf; - (void)row; - (void)ref_buf; -#endif // CONFIG_MULTITHREAD -} - -void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) { -#if CONFIG_MULTITHREAD - VPxWorker *worker = buf->frame_worker_owner; - -#ifdef DEBUG_THREAD - { - FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1; - printf("%d %p worker decode to (%d) \r\n", worker_data->worker_id, - buf->frame_worker_owner, row); - } -#endif - - vp10_frameworker_lock_stats(worker); - buf->row = row; - vp10_frameworker_signal_stats(worker); - vp10_frameworker_unlock_stats(worker); -#else - (void)buf; - (void)row; -#endif // CONFIG_MULTITHREAD -} - -void vp10_frameworker_copy_context(VPxWorker *const dst_worker, - VPxWorker *const src_worker) { -#if CONFIG_MULTITHREAD - FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1; - FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1; - VP10_COMMON *const src_cm = &src_worker_data->pbi->common; - VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common; - int i; - - // Wait until source frame's context is ready. - vp10_frameworker_lock_stats(src_worker); - while (!src_worker_data->frame_context_ready) { - pthread_cond_wait(&src_worker_data->stats_cond, - &src_worker_data->stats_mutex); - } - - dst_cm->last_frame_seg_map = src_cm->seg.enabled ? - src_cm->current_frame_seg_map : src_cm->last_frame_seg_map; - dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync; - vp10_frameworker_unlock_stats(src_worker); - - dst_cm->bit_depth = src_cm->bit_depth; -#if CONFIG_VP9_HIGHBITDEPTH - dst_cm->use_highbitdepth = src_cm->use_highbitdepth; -#endif - dst_cm->prev_frame = src_cm->show_existing_frame ? - src_cm->prev_frame : src_cm->cur_frame; - dst_cm->last_width = !src_cm->show_existing_frame ? - src_cm->width : src_cm->last_width; - dst_cm->last_height = !src_cm->show_existing_frame ? - src_cm->height : src_cm->last_height; - dst_cm->subsampling_x = src_cm->subsampling_x; - dst_cm->subsampling_y = src_cm->subsampling_y; - dst_cm->frame_type = src_cm->frame_type; - dst_cm->last_show_frame = !src_cm->show_existing_frame ? - src_cm->show_frame : src_cm->last_show_frame; - for (i = 0; i < REF_FRAMES; ++i) - dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i]; - - memcpy(dst_cm->lf_info.lfthr, src_cm->lf_info.lfthr, - (MAX_LOOP_FILTER + 1) * sizeof(loop_filter_thresh)); - dst_cm->lf.last_sharpness_level = src_cm->lf.sharpness_level; - dst_cm->lf.filter_level = src_cm->lf.filter_level; - memcpy(dst_cm->lf.ref_deltas, src_cm->lf.ref_deltas, MAX_REF_FRAMES); - memcpy(dst_cm->lf.mode_deltas, src_cm->lf.mode_deltas, MAX_MODE_LF_DELTAS); - dst_cm->seg = src_cm->seg; - memcpy(dst_cm->frame_contexts, src_cm->frame_contexts, - FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0])); -#else - (void) dst_worker; - (void) src_worker; -#endif // CONFIG_MULTITHREAD -} diff --git a/libs/libvpx/vp10/decoder/dthread.h b/libs/libvpx/vp10/decoder/dthread.h deleted file mode 100644 index 1b0dc0191d..0000000000 --- a/libs/libvpx/vp10/decoder/dthread.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_DECODER_DTHREAD_H_ -#define VP10_DECODER_DTHREAD_H_ - -#include "./vpx_config.h" -#include "vpx_util/vpx_thread.h" -#include "vpx/internal/vpx_codec_internal.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10Common; -struct VP10Decoder; - -// WorkerData for the FrameWorker thread. It contains all the information of -// the worker and decode structures for decoding a frame. -typedef struct FrameWorkerData { - struct VP10Decoder *pbi; - const uint8_t *data; - const uint8_t *data_end; - size_t data_size; - void *user_priv; - int result; - int worker_id; - int received_frame; - - // scratch_buffer is used in frame parallel mode only. - // It is used to make a copy of the compressed data. - uint8_t *scratch_buffer; - size_t scratch_buffer_size; - -#if CONFIG_MULTITHREAD - pthread_mutex_t stats_mutex; - pthread_cond_t stats_cond; -#endif - - int frame_context_ready; // Current frame's context is ready to read. - int frame_decoded; // Finished decoding current frame. -} FrameWorkerData; - -void vp10_frameworker_lock_stats(VPxWorker *const worker); -void vp10_frameworker_unlock_stats(VPxWorker *const worker); -void vp10_frameworker_signal_stats(VPxWorker *const worker); - -// Wait until ref_buf has been decoded to row in real pixel unit. -// Note: worker may already finish decoding ref_buf and release it in order to -// start decoding next frame. So need to check whether worker is still decoding -// ref_buf. -void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf, - int row); - -// FrameWorker broadcasts its decoding progress so other workers that are -// waiting on it can resume decoding. -void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row); - -// Copy necessary decoding context from src worker to dst worker. -void vp10_frameworker_copy_context(VPxWorker *const dst_worker, - VPxWorker *const src_worker); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_DECODER_DTHREAD_H_ diff --git a/libs/libvpx/vp10/encoder/aq_complexity.c b/libs/libvpx/vp10/encoder/aq_complexity.c deleted file mode 100644 index 2506a4e552..0000000000 --- a/libs/libvpx/vp10/encoder/aq_complexity.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/encoder/aq_complexity.h" -#include "vp10/encoder/aq_variance.h" -#include "vp10/encoder/encodeframe.h" -#include "vp10/common/seg_common.h" -#include "vp10/encoder/segmentation.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_ports/system_state.h" - -#define AQ_C_SEGMENTS 5 -#define DEFAULT_AQ2_SEG 3 // Neutral Q segment -#define AQ_C_STRENGTHS 3 -static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {1.75, 1.25, 1.05, 1.00, 0.90}, - {2.00, 1.50, 1.15, 1.00, 0.85}, - {2.50, 1.75, 1.25, 1.00, 0.80} }; -static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {0.15, 0.30, 0.55, 2.00, 100.0}, - {0.20, 0.40, 0.65, 2.00, 100.0}, - {0.25, 0.50, 0.75, 2.00, 100.0} }; -static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {-4.0, -3.0, -2.0, 100.00, 100.0}, - {-3.5, -2.5, -1.5, 100.00, 100.0}, - {-3.0, -2.0, -1.0, 100.00, 100.0} }; - -#define DEFAULT_COMPLEXITY 64 - - -static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) { - // Approximate base quatizer (truncated to int) - const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4; - return (base_quant > 10) + (base_quant > 25); -} - -void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - struct segmentation *const seg = &cm->seg; - - // Make SURE use of floating point in this function is safe. - vpx_clear_system_state(); - - if (frame_is_intra_only(cm) || cm->error_resilient_mode || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { - int segment; - const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth); - - // Clear down the segment map. - memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols); - - vp10_clearall_segfeatures(seg); - - // Segmentation only makes sense if the target bits per SB is above a - // threshold. Below this the overheads will usually outweigh any benefit. - if (cpi->rc.sb64_target_rate < 256) { - vp10_disable_segmentation(seg); - return; - } - - vp10_enable_segmentation(seg); - - // Select delta coding method. - seg->abs_delta = SEGMENT_DELTADATA; - - // Default segment "Q" feature is disabled so it defaults to the baseline Q. - vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q); - - // Use some of the segments for in frame Q adjustment. - for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) { - int qindex_delta; - - if (segment == DEFAULT_AQ2_SEG) - continue; - - qindex_delta = - vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex, - aq_c_q_adj_factor[aq_strength][segment], - cm->bit_depth); - - - // For AQ complexity mode, we dont allow Q0 in a segment if the base - // Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment - // Q delta is sometimes applied without going back around the rd loop. - // This could lead to an illegal combination of partition size and q. - if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) { - qindex_delta = -cm->base_qindex + 1; - } - if ((cm->base_qindex + qindex_delta) > 0) { - vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q); - vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta); - } - } - } -} - -#define DEFAULT_LV_THRESH 10.0 -#define MIN_DEFAULT_LV_THRESH 8.0 -#define VAR_STRENGTH_STEP 0.25 -// Select a segment for the current block. -// The choice of segment for a block depends on the ratio of the projected -// bits for the block vs a target average and its spatial complexity. -void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, - int mi_row, int mi_col, int projected_rate) { - VP10_COMMON *const cm = &cpi->common; - - const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; - const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; - const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]); - const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]); - int x, y; - int i; - unsigned char segment; - - if (0) { - segment = DEFAULT_AQ2_SEG; - } else { - // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). - // It is converted to bits * 256 units. - const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / - (bw * bh); - double logvar; - double low_var_thresh; - const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth); - - vpx_clear_system_state(); - low_var_thresh = (cpi->oxcf.pass == 2) - ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) - : DEFAULT_LV_THRESH; - - vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col); - logvar = vp10_log_block_var(cpi, mb, bs); - - segment = AQ_C_SEGMENTS - 1; // Just in case no break out below. - for (i = 0; i < AQ_C_SEGMENTS; ++i) { - // Test rate against a threshold value and variance against a threshold. - // Increasing segment number (higher variance and complexity) = higher Q. - if ((projected_rate < - target_rate * aq_c_transitions[aq_strength][i]) && - (logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) { - segment = i; - break; - } - } - } - - // Fill in the entires in the segment map corresponding to this SB64. - for (y = 0; y < ymis; y++) { - for (x = 0; x < xmis; x++) { - cpi->segmentation_map[mi_offset + y * cm->mi_cols + x] = segment; - } - } -} diff --git a/libs/libvpx/vp10/encoder/aq_complexity.h b/libs/libvpx/vp10/encoder/aq_complexity.h deleted file mode 100644 index f9de2ada3e..0000000000 --- a/libs/libvpx/vp10/encoder/aq_complexity.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_ -#define VP10_ENCODER_AQ_COMPLEXITY_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "vp10/common/enums.h" - -struct VP10_COMP; -struct macroblock; - -// Select a segment for the current Block. -void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *, - BLOCK_SIZE bs, - int mi_row, int mi_col, int projected_rate); - -// This function sets up a set of segments with delta Q values around -// the baseline frame quantizer. -void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_AQ_COMPLEXITY_H_ diff --git a/libs/libvpx/vp10/encoder/aq_cyclicrefresh.c b/libs/libvpx/vp10/encoder/aq_cyclicrefresh.c deleted file mode 100644 index 660670ccea..0000000000 --- a/libs/libvpx/vp10/encoder/aq_cyclicrefresh.c +++ /dev/null @@ -1,567 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/common/seg_common.h" -#include "vp10/encoder/aq_cyclicrefresh.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/segmentation.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_ports/system_state.h" - -struct CYCLIC_REFRESH { - // Percentage of blocks per frame that are targeted as candidates - // for cyclic refresh. - int percent_refresh; - // Maximum q-delta as percentage of base q. - int max_qdelta_perc; - // Superblock starting index for cycling through the frame. - int sb_index; - // Controls how long block will need to wait to be refreshed again, in - // excess of the cycle time, i.e., in the case of all zero motion, block - // will be refreshed every (100/percent_refresh + time_for_refresh) frames. - int time_for_refresh; - // Target number of (8x8) blocks that are set for delta-q. - int target_num_seg_blocks; - // Actual number of (8x8) blocks that were applied delta-q. - int actual_num_seg1_blocks; - int actual_num_seg2_blocks; - // RD mult. parameters for segment 1. - int rdmult; - // Cyclic refresh map. - signed char *map; - // Map of the last q a block was coded at. - uint8_t *last_coded_q_map; - // Thresholds applied to the projected rate/distortion of the coding block, - // when deciding whether block should be refreshed. - int64_t thresh_rate_sb; - int64_t thresh_dist_sb; - // Threshold applied to the motion vector (in units of 1/8 pel) of the - // coding block, when deciding whether block should be refreshed. - int16_t motion_thresh; - // Rate target ratio to set q delta. - double rate_ratio_qdelta; - // Boost factor for rate target ratio, for segment CR_SEGMENT_ID_BOOST2. - int rate_boost_fac; - double low_content_avg; - int qindex_delta[3]; -}; - -CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) { - size_t last_coded_q_map_size; - CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr)); - if (cr == NULL) - return NULL; - - cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map)); - if (cr->map == NULL) { - vpx_free(cr); - return NULL; - } - last_coded_q_map_size = mi_rows * mi_cols * sizeof(*cr->last_coded_q_map); - cr->last_coded_q_map = vpx_malloc(last_coded_q_map_size); - if (cr->last_coded_q_map == NULL) { - vpx_free(cr); - return NULL; - } - assert(MAXQ <= 255); - memset(cr->last_coded_q_map, MAXQ, last_coded_q_map_size); - - return cr; -} - -void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) { - vpx_free(cr->map); - vpx_free(cr->last_coded_q_map); - vpx_free(cr); -} - -// Check if we should turn off cyclic refresh based on bitrate condition. -static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm, - const RATE_CONTROL *rc) { - // Turn off cyclic refresh if bits available per frame is not sufficiently - // larger than bit cost of segmentation. Segment map bit cost should scale - // with number of seg blocks, so compare available bits to number of blocks. - // Average bits available per frame = avg_frame_bandwidth - // Number of (8x8) blocks in frame = mi_rows * mi_cols; - const float factor = 0.25; - const int number_blocks = cm->mi_rows * cm->mi_cols; - // The condition below corresponds to turning off at target bitrates: - // (at 30fps), ~12kbps for CIF, 36kbps for VGA, 100kps for HD/720p. - // Also turn off at very small frame sizes, to avoid too large fraction of - // superblocks to be refreshed per frame. Threshold below is less than QCIF. - if (rc->avg_frame_bandwidth < factor * number_blocks || - number_blocks / 64 < 5) - return 0; - else - return 1; -} - -// Check if this coding block, of size bsize, should be considered for refresh -// (lower-qp coding). Decision can be based on various factors, such as -// size of the coding block (i.e., below min_block size rejected), coding -// mode, and rate/distortion. -static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, - const MB_MODE_INFO *mbmi, - int64_t rate, - int64_t dist, - int bsize) { - MV mv = mbmi->mv[0].as_mv; - // Reject the block for lower-qp coding if projected distortion - // is above the threshold, and any of the following is true: - // 1) mode uses large mv - // 2) mode is an intra-mode - // Otherwise accept for refresh. - if (dist > cr->thresh_dist_sb && - (mv.row > cr->motion_thresh || mv.row < -cr->motion_thresh || - mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh || - !is_inter_block(mbmi))) - return CR_SEGMENT_ID_BASE; - else if (bsize >= BLOCK_16X16 && - rate < cr->thresh_rate_sb && - is_inter_block(mbmi) && - mbmi->mv[0].as_int == 0 && - cr->rate_boost_fac > 10) - // More aggressive delta-q for bigger blocks with zero motion. - return CR_SEGMENT_ID_BOOST2; - else - return CR_SEGMENT_ID_BOOST1; -} - -// Compute delta-q for the segment. -static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) { - const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - const RATE_CONTROL *const rc = &cpi->rc; - int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, - q, rate_factor, - cpi->common.bit_depth); - if ((-deltaq) > cr->max_qdelta_perc * q / 100) { - deltaq = -cr->max_qdelta_perc * q / 100; - } - return deltaq; -} - -// For the just encoded frame, estimate the bits, incorporating the delta-q -// from non-base segment. For now ignore effect of multiple segments -// (with different delta-q). Note this function is called in the postencode -// (called from rc_update_rate_correction_factors()). -int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi, - double correction_factor) { - const VP10_COMMON *const cm = &cpi->common; - const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - int estimated_bits; - int mbs = cm->MBs; - int num8x8bl = mbs << 2; - // Weight for non-base segments: use actual number of blocks refreshed in - // previous/just encoded frame. Note number of blocks here is in 8x8 units. - double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl; - double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl; - // Take segment weighted average for estimated bits. - estimated_bits = (int)((1.0 - weight_segment1 - weight_segment2) * - vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs, - correction_factor, cm->bit_depth) + - weight_segment1 * - vp10_estimate_bits_at_q(cm->frame_type, - cm->base_qindex + cr->qindex_delta[1], mbs, - correction_factor, cm->bit_depth) + - weight_segment2 * - vp10_estimate_bits_at_q(cm->frame_type, - cm->base_qindex + cr->qindex_delta[2], mbs, - correction_factor, cm->bit_depth)); - return estimated_bits; -} - -// Prior to encoding the frame, estimate the bits per mb, for a given q = i and -// a corresponding delta-q (for segment 1). This function is called in the -// rc_regulate_q() to set the base qp index. -// Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or -// to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding. -int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i, - double correction_factor) { - const VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - int bits_per_mb; - int num8x8bl = cm->MBs << 2; - // Weight for segment prior to encoding: take the average of the target - // number for the frame to be encoded and the actual from the previous frame. - double weight_segment = (double)((cr->target_num_seg_blocks + - cr->actual_num_seg1_blocks + cr->actual_num_seg2_blocks) >> 1) / - num8x8bl; - // Compute delta-q corresponding to qindex i. - int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta); - // Take segment weighted average for bits per mb. - bits_per_mb = (int)((1.0 - weight_segment) * - vp10_rc_bits_per_mb(cm->frame_type, i, correction_factor, cm->bit_depth) + - weight_segment * - vp10_rc_bits_per_mb(cm->frame_type, i + deltaq, correction_factor, - cm->bit_depth)); - return bits_per_mb; -} - -// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col), -// check if we should reset the segment_id, and update the cyclic_refresh map -// and segmentation map. -void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi, - MB_MODE_INFO *const mbmi, - int mi_row, int mi_col, - BLOCK_SIZE bsize, - int64_t rate, - int64_t dist, - int skip) { - const VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - const int bw = num_8x8_blocks_wide_lookup[bsize]; - const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); - const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); - const int block_index = mi_row * cm->mi_cols + mi_col; - const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist, - bsize); - // Default is to not update the refresh map. - int new_map_value = cr->map[block_index]; - int x = 0; int y = 0; - - // If this block is labeled for refresh, check if we should reset the - // segment_id. - if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) { - mbmi->segment_id = refresh_this_block; - // Reset segment_id if will be skipped. - if (skip) - mbmi->segment_id = CR_SEGMENT_ID_BASE; - } - - // Update the cyclic refresh map, to be used for setting segmentation map - // for the next frame. If the block will be refreshed this frame, mark it - // as clean. The magnitude of the -ve influences how long before we consider - // it for refresh again. - if (cyclic_refresh_segment_id_boosted(mbmi->segment_id)) { - new_map_value = -cr->time_for_refresh; - } else if (refresh_this_block) { - // Else if it is accepted as candidate for refresh, and has not already - // been refreshed (marked as 1) then mark it as a candidate for cleanup - // for future time (marked as 0), otherwise don't update it. - if (cr->map[block_index] == 1) - new_map_value = 0; - } else { - // Leave it marked as block that is not candidate for refresh. - new_map_value = 1; - } - - // Update entries in the cyclic refresh map with new_map_value, and - // copy mbmi->segment_id into global segmentation map. - for (y = 0; y < ymis; y++) - for (x = 0; x < xmis; x++) { - int map_offset = block_index + y * cm->mi_cols + x; - cr->map[map_offset] = new_map_value; - cpi->segmentation_map[map_offset] = mbmi->segment_id; - // Inter skip blocks were clearly not coded at the current qindex, so - // don't update the map for them. For cases where motion is non-zero or - // the reference frame isn't the previous frame, the previous value in - // the map for this spatial location is not entirely correct. - if (!is_inter_block(mbmi) || !skip) - cr->last_coded_q_map[map_offset] = clamp( - cm->base_qindex + cr->qindex_delta[mbmi->segment_id], 0, MAXQ); - } -} - -// Update the actual number of blocks that were applied the segment delta q. -void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) { - VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - unsigned char *const seg_map = cpi->segmentation_map; - int mi_row, mi_col; - cr->actual_num_seg1_blocks = 0; - cr->actual_num_seg2_blocks = 0; - for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) - for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { - if (cyclic_refresh_segment_id( - seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST1) - cr->actual_num_seg1_blocks++; - else if (cyclic_refresh_segment_id( - seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST2) - cr->actual_num_seg2_blocks++; - } -} - -// Set golden frame update interval, for 1 pass CBR mode. -void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) { - RATE_CONTROL *const rc = &cpi->rc; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - // Set minimum gf_interval for GF update to a multiple (== 2) of refresh - // period. Depending on past encoding stats, GF flag may be reset and update - // may not occur until next baseline_gf_interval. - if (cr->percent_refresh > 0) - rc->baseline_gf_interval = 4 * (100 / cr->percent_refresh); - else - rc->baseline_gf_interval = 40; -} - -// Update some encoding stats (from the just encoded frame). If this frame's -// background has high motion, refresh the golden frame. Otherwise, if the -// golden reference is to be updated check if we should NOT update the golden -// ref. -void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) { - VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - int mi_row, mi_col; - double fraction_low = 0.0; - int low_content_frame = 0; - - MODE_INFO **mi = cm->mi_grid_visible; - RATE_CONTROL *const rc = &cpi->rc; - const int rows = cm->mi_rows, cols = cm->mi_cols; - int cnt1 = 0, cnt2 = 0; - int force_gf_refresh = 0; - - for (mi_row = 0; mi_row < rows; mi_row++) { - for (mi_col = 0; mi_col < cols; mi_col++) { - int16_t abs_mvr = mi[0]->mbmi.mv[0].as_mv.row >= 0 ? - mi[0]->mbmi.mv[0].as_mv.row : -1 * mi[0]->mbmi.mv[0].as_mv.row; - int16_t abs_mvc = mi[0]->mbmi.mv[0].as_mv.col >= 0 ? - mi[0]->mbmi.mv[0].as_mv.col : -1 * mi[0]->mbmi.mv[0].as_mv.col; - - // Calculate the motion of the background. - if (abs_mvr <= 16 && abs_mvc <= 16) { - cnt1++; - if (abs_mvr == 0 && abs_mvc == 0) - cnt2++; - } - mi++; - - // Accumulate low_content_frame. - if (cr->map[mi_row * cols + mi_col] < 1) - low_content_frame++; - } - mi += 8; - } - - // For video conference clips, if the background has high motion in current - // frame because of the camera movement, set this frame as the golden frame. - // Use 70% and 5% as the thresholds for golden frame refreshing. - // Also, force this frame as a golden update frame if this frame will change - // the resolution (resize_pending != 0). - if (cpi->resize_pending != 0 || - (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) { - vp10_cyclic_refresh_set_golden_update(cpi); - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - - if (rc->frames_till_gf_update_due > rc->frames_to_key) - rc->frames_till_gf_update_due = rc->frames_to_key; - cpi->refresh_golden_frame = 1; - force_gf_refresh = 1; - } - - fraction_low = - (double)low_content_frame / (rows * cols); - // Update average. - cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4; - if (!force_gf_refresh && cpi->refresh_golden_frame == 1) { - // Don't update golden reference if the amount of low_content for the - // current encoded frame is small, or if the recursive average of the - // low_content over the update interval window falls below threshold. - if (fraction_low < 0.8 || cr->low_content_avg < 0.7) - cpi->refresh_golden_frame = 0; - // Reset for next internal. - cr->low_content_avg = fraction_low; - } -} - -// Update the segmentation map, and related quantities: cyclic refresh map, -// refresh sb_index, and target number of blocks to be refreshed. -// The map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or to -// 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock. -// Blocks labeled as BOOST1 may later get set to BOOST2 (during the -// encoding of the superblock). -static void cyclic_refresh_update_map(VP10_COMP *const cpi) { - VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - unsigned char *const seg_map = cpi->segmentation_map; - int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame; - int xmis, ymis, x, y; - memset(seg_map, CR_SEGMENT_ID_BASE, cm->mi_rows * cm->mi_cols); - sb_cols = (cm->mi_cols + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; - sb_rows = (cm->mi_rows + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; - sbs_in_frame = sb_cols * sb_rows; - // Number of target blocks to get the q delta (segment 1). - block_count = cr->percent_refresh * cm->mi_rows * cm->mi_cols / 100; - // Set the segmentation map: cycle through the superblocks, starting at - // cr->mb_index, and stopping when either block_count blocks have been found - // to be refreshed, or we have passed through whole frame. - assert(cr->sb_index < sbs_in_frame); - i = cr->sb_index; - cr->target_num_seg_blocks = 0; - do { - int sum_map = 0; - // Get the mi_row/mi_col corresponding to superblock index i. - int sb_row_index = (i / sb_cols); - int sb_col_index = i - sb_row_index * sb_cols; - int mi_row = sb_row_index * MI_BLOCK_SIZE; - int mi_col = sb_col_index * MI_BLOCK_SIZE; - int qindex_thresh = - cpi->oxcf.content == VP9E_CONTENT_SCREEN - ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex) - : 0; - assert(mi_row >= 0 && mi_row < cm->mi_rows); - assert(mi_col >= 0 && mi_col < cm->mi_cols); - bl_index = mi_row * cm->mi_cols + mi_col; - // Loop through all 8x8 blocks in superblock and update map. - xmis = - VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[BLOCK_64X64]); - ymis = - VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[BLOCK_64X64]); - for (y = 0; y < ymis; y++) { - for (x = 0; x < xmis; x++) { - const int bl_index2 = bl_index + y * cm->mi_cols + x; - // If the block is as a candidate for clean up then mark it - // for possible boost/refresh (segment 1). The segment id may get - // reset to 0 later if block gets coded anything other than ZEROMV. - if (cr->map[bl_index2] == 0) { - if (cr->last_coded_q_map[bl_index2] > qindex_thresh) - sum_map++; - } else if (cr->map[bl_index2] < 0) { - cr->map[bl_index2]++; - } - } - } - // Enforce constant segment over superblock. - // If segment is at least half of superblock, set to 1. - if (sum_map >= xmis * ymis / 2) { - for (y = 0; y < ymis; y++) - for (x = 0; x < xmis; x++) { - seg_map[bl_index + y * cm->mi_cols + x] = CR_SEGMENT_ID_BOOST1; - } - cr->target_num_seg_blocks += xmis * ymis; - } - i++; - if (i == sbs_in_frame) { - i = 0; - } - } while (cr->target_num_seg_blocks < block_count && i != cr->sb_index); - cr->sb_index = i; -} - -// Set cyclic refresh parameters. -void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) { - const RATE_CONTROL *const rc = &cpi->rc; - const VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - cr->percent_refresh = 10; - cr->max_qdelta_perc = 50; - cr->time_for_refresh = 0; - // Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4) - // periods of the refresh cycle, after a key frame. - if (rc->frames_since_key < 4 * cr->percent_refresh) - cr->rate_ratio_qdelta = 3.0; - else - cr->rate_ratio_qdelta = 2.0; - // Adjust some parameters for low resolutions at low bitrates. - if (cm->width <= 352 && - cm->height <= 288 && - rc->avg_frame_bandwidth < 3400) { - cr->motion_thresh = 4; - cr->rate_boost_fac = 10; - } else { - cr->motion_thresh = 32; - cr->rate_boost_fac = 17; - } -} - -// Setup cyclic background refresh: set delta q and segmentation map. -void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) { - VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *const rc = &cpi->rc; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - struct segmentation *const seg = &cm->seg; - const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc); - if (cm->current_video_frame == 0) - cr->low_content_avg = 0.0; - // Don't apply refresh on key frame or enhancement layer frames. - if (!apply_cyclic_refresh || cm->frame_type == KEY_FRAME) { - // Set segmentation map to 0 and disable. - unsigned char *const seg_map = cpi->segmentation_map; - memset(seg_map, 0, cm->mi_rows * cm->mi_cols); - vp10_disable_segmentation(&cm->seg); - if (cm->frame_type == KEY_FRAME) { - memset(cr->last_coded_q_map, MAXQ, - cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map)); - cr->sb_index = 0; - } - return; - } else { - int qindex_delta = 0; - int qindex2; - const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth); - vpx_clear_system_state(); - // Set rate threshold to some multiple (set to 2 for now) of the target - // rate (target is given by sb64_target_rate and scaled by 256). - cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2; - // Distortion threshold, quadratic in Q, scale factor to be adjusted. - // q will not exceed 457, so (q * q) is within 32bit; see: - // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[]. - cr->thresh_dist_sb = ((int64_t)(q * q)) << 2; - - // Set up segmentation. - // Clear down the segment map. - vp10_enable_segmentation(&cm->seg); - vp10_clearall_segfeatures(seg); - // Select delta coding method. - seg->abs_delta = SEGMENT_DELTADATA; - - // Note: setting temporal_update has no effect, as the seg-map coding method - // (temporal or spatial) is determined in vp10_choose_segmap_coding_method(), - // based on the coding cost of each method. For error_resilient mode on the - // last_frame_seg_map is set to 0, so if temporal coding is used, it is - // relative to 0 previous map. - // seg->temporal_update = 0; - - // Segment BASE "Q" feature is disabled so it defaults to the baseline Q. - vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q); - // Use segment BOOST1 for in-frame Q adjustment. - vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q); - // Use segment BOOST2 for more aggressive in-frame Q adjustment. - vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q); - - // Set the q delta for segment BOOST1. - qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta); - cr->qindex_delta[1] = qindex_delta; - - // Compute rd-mult for segment BOOST1. - qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ); - - cr->rdmult = vp10_compute_rd_mult(cpi, qindex2); - - vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta); - - // Set a more aggressive (higher) q delta for segment BOOST2. - qindex_delta = compute_deltaq( - cpi, cm->base_qindex, - VPXMIN(CR_MAX_RATE_TARGET_RATIO, - 0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta)); - cr->qindex_delta[2] = qindex_delta; - vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta); - - // Update the segmentation and refresh map. - cyclic_refresh_update_map(cpi); - } -} - -int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) { - return cr->rdmult; -} - -void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) { - const VP10_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - memset(cr->map, 0, cm->mi_rows * cm->mi_cols); - cr->sb_index = 0; - cpi->refresh_golden_frame = 1; -} diff --git a/libs/libvpx/vp10/encoder/aq_cyclicrefresh.h b/libs/libvpx/vp10/encoder/aq_cyclicrefresh.h deleted file mode 100644 index f6714c5c8d..0000000000 --- a/libs/libvpx/vp10/encoder/aq_cyclicrefresh.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_ -#define VP10_ENCODER_AQ_CYCLICREFRESH_H_ - -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// The segment ids used in cyclic refresh: from base (no boost) to increasing -// boost (higher delta-qp). -#define CR_SEGMENT_ID_BASE 0 -#define CR_SEGMENT_ID_BOOST1 1 -#define CR_SEGMENT_ID_BOOST2 2 - -// Maximum rate target ratio for setting segment delta-qp. -#define CR_MAX_RATE_TARGET_RATIO 4.0 - -struct VP10_COMP; - -struct CYCLIC_REFRESH; -typedef struct CYCLIC_REFRESH CYCLIC_REFRESH; - -CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols); - -void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr); - -// Estimate the bits, incorporating the delta-q from segment 1, after encoding -// the frame. -int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi, - double correction_factor); - -// Estimate the bits per mb, for a given q = i and a corresponding delta-q -// (for segment 1), prior to encoding the frame. -int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i, - double correction_factor); - -// Prior to coding a given prediction block, of size bsize at (mi_row, mi_col), -// check if we should reset the segment_id, and update the cyclic_refresh map -// and segmentation map. -void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi, - MB_MODE_INFO *const mbmi, - int mi_row, int mi_col, BLOCK_SIZE bsize, - int64_t rate, int64_t dist, int skip); - -// Update the segmentation map, and related quantities: cyclic refresh map, -// refresh sb_index, and target number of blocks to be refreshed. -void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi); - -// Update the actual number of blocks that were applied the segment delta q. -void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi); - -// Set golden frame update interval, for 1 pass CBR mode. -void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi); - -// Check if we should not update golden reference, based on past refresh stats. -void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi); - -// Set/update global/frame level refresh parameters. -void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi); - -// Setup cyclic background refresh: set delta q and segmentation map. -void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi); - -int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr); - -void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi); - -static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) { - return segment_id == CR_SEGMENT_ID_BOOST1 || - segment_id == CR_SEGMENT_ID_BOOST2; -} - -static INLINE int cyclic_refresh_segment_id(int segment_id) { - if (segment_id == CR_SEGMENT_ID_BOOST1) - return CR_SEGMENT_ID_BOOST1; - else if (segment_id == CR_SEGMENT_ID_BOOST2) - return CR_SEGMENT_ID_BOOST2; - else - return CR_SEGMENT_ID_BASE; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_AQ_CYCLICREFRESH_H_ diff --git a/libs/libvpx/vp10/encoder/aq_variance.c b/libs/libvpx/vp10/encoder/aq_variance.c deleted file mode 100644 index bed5162fb2..0000000000 --- a/libs/libvpx/vp10/encoder/aq_variance.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vpx_ports/mem.h" - -#include "vp10/encoder/aq_variance.h" - -#include "vp10/common/seg_common.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/segmentation.h" -#include "vpx_ports/system_state.h" - -#define ENERGY_MIN (-4) -#define ENERGY_MAX (1) -#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1) -#define ENERGY_IN_BOUNDS(energy)\ - assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX) - -static const double rate_ratio[MAX_SEGMENTS] = - {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0}; -static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4}; - -#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN] - -DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = {0}; -#if CONFIG_VP9_HIGHBITDEPTH -DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = {0}; -#endif - -unsigned int vp10_vaq_segment_id(int energy) { - ENERGY_IN_BOUNDS(energy); - return SEGMENT_ID(energy); -} - -void vp10_vaq_frame_setup(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - struct segmentation *seg = &cm->seg; - int i; - - if (frame_is_intra_only(cm) || cm->error_resilient_mode || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { - vp10_enable_segmentation(seg); - vp10_clearall_segfeatures(seg); - - seg->abs_delta = SEGMENT_DELTADATA; - - vpx_clear_system_state(); - - for (i = 0; i < MAX_SEGMENTS; ++i) { - int qindex_delta = - vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex, - rate_ratio[i], cm->bit_depth); - - // We don't allow qindex 0 in a segment if the base value is not 0. - // Q index 0 (lossless) implies 4x4 encoding only and in AQ mode a segment - // Q delta is sometimes applied without going back around the rd loop. - // This could lead to an illegal combination of partition size and q. - if ((cm->base_qindex != 0) && ((cm->base_qindex + qindex_delta) == 0)) { - qindex_delta = -cm->base_qindex + 1; - } - - // No need to enable SEG_LVL_ALT_Q for this segment. - if (rate_ratio[i] == 1.0) { - continue; - } - - vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta); - vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q); - } - } -} - -/* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions - * of variance() and highbd_8_variance(). It should not. - */ -static void aq_variance(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, unsigned int *sse, int *sum) { - int i, j; - - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - - a += a_stride; - b += b_stride; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void aq_highbd_variance64(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint64_t *sse, uint64_t *sum) { - int i, j; - - uint16_t *a = CONVERT_TO_SHORTPTR(a8); - uint16_t *b = CONVERT_TO_SHORTPTR(b8); - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - a += a_stride; - b += b_stride; - } -} - -static void aq_highbd_8_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, unsigned int *sse, int *sum) { - uint64_t sse_long = 0; - uint64_t sum_long = 0; - aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); - *sse = (unsigned int)sse_long; - *sum = (int)sum_long; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bs) { - MACROBLOCKD *xd = &x->e_mbd; - unsigned int var, sse; - int right_overflow = (xd->mb_to_right_edge < 0) ? - ((-xd->mb_to_right_edge) >> 3) : 0; - int bottom_overflow = (xd->mb_to_bottom_edge < 0) ? - ((-xd->mb_to_bottom_edge) >> 3) : 0; - - if (right_overflow || bottom_overflow) { - const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow; - const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow; - int avg; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride, - CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, bw, bh, - &sse, &avg); - sse >>= 2 * (xd->bd - 8); - avg >>= (xd->bd - 8); - } else { - aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, - vp10_64_zeros, 0, bw, bh, &sse, &avg); - } -#else - aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, - vp10_64_zeros, 0, bw, bh, &sse, &avg); -#endif // CONFIG_VP9_HIGHBITDEPTH - var = sse - (((int64_t)avg * avg) / (bw * bh)); - return (256 * var) / (bw * bh); - } else { -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, - CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), - 0, &sse); - } else { - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, - vp10_64_zeros, 0, &sse); - } -#else - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, - vp10_64_zeros, 0, &sse); -#endif // CONFIG_VP9_HIGHBITDEPTH - return (256 * var) >> num_pels_log2_lookup[bs]; - } -} - -double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { - unsigned int var = block_variance(cpi, x, bs); - vpx_clear_system_state(); - return log(var + 1.0); -} - -#define DEFAULT_E_MIDPOINT 10.0 -int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { - double energy; - double energy_midpoint; - vpx_clear_system_state(); - energy_midpoint = - (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT; - energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint; - return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX); -} diff --git a/libs/libvpx/vp10/encoder/aq_variance.h b/libs/libvpx/vp10/encoder/aq_variance.h deleted file mode 100644 index 318f5f27f1..0000000000 --- a/libs/libvpx/vp10/encoder/aq_variance.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_AQ_VARIANCE_H_ -#define VP10_ENCODER_AQ_VARIANCE_H_ - -#include "vp10/encoder/encoder.h" - -#ifdef __cplusplus -extern "C" { -#endif - -unsigned int vp10_vaq_segment_id(int energy); -void vp10_vaq_frame_setup(VP10_COMP *cpi); - -int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs); -double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_AQ_VARIANCE_H_ diff --git a/libs/libvpx/vp10/encoder/arm/neon/dct_neon.c b/libs/libvpx/vp10/encoder/arm/neon/dct_neon.c deleted file mode 100644 index b37a2ff3a9..0000000000 --- a/libs/libvpx/vp10/encoder/arm/neon/dct_neon.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" - -#include "vp10/common/blockd.h" -#include "vpx_dsp/txfm_common.h" - -void vp10_fdct8x8_quant_neon(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, - int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, - const int16_t* dequant_ptr, uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { - int16_t temp_buffer[64]; - (void)coeff_ptr; - - vpx_fdct8x8_neon(input, temp_buffer, stride); - vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr, - quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr, - dequant_ptr, eob_ptr, scan_ptr, iscan_ptr); -} diff --git a/libs/libvpx/vp10/encoder/arm/neon/error_neon.c b/libs/libvpx/vp10/encoder/arm/neon/error_neon.c deleted file mode 100644 index 009520aeed..0000000000 --- a/libs/libvpx/vp10/encoder/arm/neon/error_neon.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" - -int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff, - int block_size) { - int64x2_t error = vdupq_n_s64(0); - - assert(block_size >= 8); - assert((block_size % 8) == 0); - - do { - const int16x8_t c = vld1q_s16(coeff); - const int16x8_t d = vld1q_s16(dqcoeff); - const int16x8_t diff = vsubq_s16(c, d); - const int16x4_t diff_lo = vget_low_s16(diff); - const int16x4_t diff_hi = vget_high_s16(diff); - // diff is 15-bits, the squares 30, so we can store 2 in 31-bits before - // accumulating them in 64-bits. - const int32x4_t err0 = vmull_s16(diff_lo, diff_lo); - const int32x4_t err1 = vmlal_s16(err0, diff_hi, diff_hi); - const int64x2_t err2 = vaddl_s32(vget_low_s32(err1), vget_high_s32(err1)); - error = vaddq_s64(error, err2); - coeff += 8; - dqcoeff += 8; - block_size -= 8; - } while (block_size != 0); - - return vgetq_lane_s64(error, 0) + vgetq_lane_s64(error, 1); -} diff --git a/libs/libvpx/vp10/encoder/arm/neon/quantize_neon.c b/libs/libvpx/vp10/encoder/arm/neon/quantize_neon.c deleted file mode 100644 index 9354ced699..0000000000 --- a/libs/libvpx/vp10/encoder/arm/neon/quantize_neon.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include - -#include "vpx_mem/vpx_mem.h" - -#include "vp10/common/quant_common.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/rd.h" - -void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, - int skip_block, const int16_t *zbin_ptr, - const int16_t *round_ptr, const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, - int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - // TODO(jingning) Decide the need of these arguments after the - // quantization process is completed. - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)scan; - - if (!skip_block) { - // Quantization pass: All coefficients with index >= zero_flag are - // skippable. Note: zero_flag can be zero. - int i; - const int16x8_t v_zero = vdupq_n_s16(0); - const int16x8_t v_one = vdupq_n_s16(1); - int16x8_t v_eobmax_76543210 = vdupq_n_s16(-1); - int16x8_t v_round = vmovq_n_s16(round_ptr[1]); - int16x8_t v_quant = vmovq_n_s16(quant_ptr[1]); - int16x8_t v_dequant = vmovq_n_s16(dequant_ptr[1]); - // adjust for dc - v_round = vsetq_lane_s16(round_ptr[0], v_round, 0); - v_quant = vsetq_lane_s16(quant_ptr[0], v_quant, 0); - v_dequant = vsetq_lane_s16(dequant_ptr[0], v_dequant, 0); - // process dc and the first seven ac coeffs - { - const int16x8_t v_iscan = vld1q_s16(&iscan[0]); - const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]); - const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); - const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); - const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), - vget_low_s16(v_quant)); - const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), - vget_high_s16(v_quant)); - const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), - vshrn_n_s32(v_tmp_hi, 16)); - const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); - const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); - const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); - const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); - const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); - const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); - v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); - vst1q_s16(&qcoeff_ptr[0], v_qcoeff); - vst1q_s16(&dqcoeff_ptr[0], v_dqcoeff); - v_round = vmovq_n_s16(round_ptr[1]); - v_quant = vmovq_n_s16(quant_ptr[1]); - v_dequant = vmovq_n_s16(dequant_ptr[1]); - } - // now process the rest of the ac coeffs - for (i = 8; i < count; i += 8) { - const int16x8_t v_iscan = vld1q_s16(&iscan[i]); - const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]); - const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); - const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); - const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), - vget_low_s16(v_quant)); - const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), - vget_high_s16(v_quant)); - const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), - vshrn_n_s32(v_tmp_hi, 16)); - const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); - const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); - const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); - const int16x8_t v_qcoeff_a = veorq_s16(v_tmp2, v_coeff_sign); - const int16x8_t v_qcoeff = vsubq_s16(v_qcoeff_a, v_coeff_sign); - const int16x8_t v_dqcoeff = vmulq_s16(v_qcoeff, v_dequant); - v_eobmax_76543210 = vmaxq_s16(v_eobmax_76543210, v_nz_iscan); - vst1q_s16(&qcoeff_ptr[i], v_qcoeff); - vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff); - } - { - const int16x4_t v_eobmax_3210 = - vmax_s16(vget_low_s16(v_eobmax_76543210), - vget_high_s16(v_eobmax_76543210)); - const int64x1_t v_eobmax_xx32 = - vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32); - const int16x4_t v_eobmax_tmp = - vmax_s16(v_eobmax_3210, vreinterpret_s16_s64(v_eobmax_xx32)); - const int64x1_t v_eobmax_xxx3 = - vshr_n_s64(vreinterpret_s64_s16(v_eobmax_tmp), 16); - const int16x4_t v_eobmax_final = - vmax_s16(v_eobmax_tmp, vreinterpret_s16_s64(v_eobmax_xxx3)); - - *eob_ptr = (uint16_t)vget_lane_s16(v_eobmax_final, 0); - } - } else { - memset(qcoeff_ptr, 0, count * sizeof(int16_t)); - memset(dqcoeff_ptr, 0, count * sizeof(int16_t)); - *eob_ptr = 0; - } -} diff --git a/libs/libvpx/vp10/encoder/bitstream.c b/libs/libvpx/vp10/encoder/bitstream.c deleted file mode 100644 index 04ce61d55a..0000000000 --- a/libs/libvpx/vp10/encoder/bitstream.c +++ /dev/null @@ -1,1573 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "vpx/vpx_encoder.h" -#include "vpx_dsp/bitwriter_buffer.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem_ops.h" -#include "vpx_ports/system_state.h" - -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/entropymv.h" -#include "vp10/common/mvref_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/seg_common.h" -#include "vp10/common/tile_common.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/bitstream.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/subexp.h" -#include "vp10/encoder/tokenize.h" - -static const struct vp10_token intra_mode_encodings[INTRA_MODES] = { - {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7}, - {62, 6}, {2, 2}}; -static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] = - {{0, 1}, {2, 2}, {3, 2}}; -static const struct vp10_token partition_encodings[PARTITION_TYPES] = - {{0, 1}, {2, 2}, {6, 3}, {7, 3}}; -static const struct vp10_token inter_mode_encodings[INTER_MODES] = - {{2, 2}, {6, 3}, {0, 1}, {7, 3}}; - -static struct vp10_token ext_tx_encodings[TX_TYPES]; - -void vp10_encode_token_init() { - vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree); -} - -static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode, - const vpx_prob *probs) { - vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]); -} - -static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode, - const vpx_prob *probs) { - assert(is_inter_mode(mode)); - vp10_write_token(w, vp10_inter_mode_tree, probs, - &inter_mode_encodings[INTER_OFFSET(mode)]); -} - -static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, - int data, int max) { - vpx_wb_write_literal(wb, data, get_unsigned_bits(max)); -} - -static void prob_diff_update(const vpx_tree_index *tree, - vpx_prob probs[/*n - 1*/], - const unsigned int counts[/*n - 1*/], - int n, vpx_writer *w) { - int i; - unsigned int branch_ct[32][2]; - - // Assuming max number of probabilities <= 32 - assert(n <= 32); - - vp10_tree_probs_from_distribution(tree, branch_ct, counts); - for (i = 0; i < n - 1; ++i) - vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]); -} - -static int prob_diff_update_savings(const vpx_tree_index *tree, - vpx_prob probs[/*n - 1*/], - const unsigned int counts[/*n - 1*/], - int n) { - int i; - unsigned int branch_ct[32][2]; - int savings = 0; - - // Assuming max number of probabilities <= 32 - assert(n <= 32); - vp10_tree_probs_from_distribution(tree, branch_ct, counts); - for (i = 0; i < n - 1; ++i) { - savings += vp10_cond_prob_diff_update_savings(&probs[i], - branch_ct[i]); - } - return savings; -} - -static void write_selected_tx_size(const VP10_COMMON *cm, - const MACROBLOCKD *xd, vpx_writer *w) { - TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size; - BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; - const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, - &cm->fc->tx_probs); - vpx_write(w, tx_size != TX_4X4, tx_probs[0]); - if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { - vpx_write(w, tx_size != TX_8X8, tx_probs[1]); - if (tx_size != TX_8X8 && max_tx_size >= TX_32X32) - vpx_write(w, tx_size != TX_16X16, tx_probs[2]); - } -} - -static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd, - int segment_id, const MODE_INFO *mi, vpx_writer *w) { - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { - return 1; - } else { - const int skip = mi->mbmi.skip; - vpx_write(w, skip, vp10_get_skip_prob(cm, xd)); - return skip; - } -} - -static void update_skip_probs(VP10_COMMON *cm, vpx_writer *w, - FRAME_COUNTS *counts) { - int k; - - for (k = 0; k < SKIP_CONTEXTS; ++k) - vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]); -} - -static void update_switchable_interp_probs(VP10_COMMON *cm, vpx_writer *w, - FRAME_COUNTS *counts) { - int j; - for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) - prob_diff_update(vp10_switchable_interp_tree, - cm->fc->switchable_interp_prob[j], - counts->switchable_interp[j], SWITCHABLE_FILTERS, w); -} - -static void update_ext_tx_probs(VP10_COMMON *cm, vpx_writer *w) { - const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) - - vp10_cost_zero(GROUP_DIFF_UPDATE_PROB); - int i, j; - - int savings = 0; - int do_update = 0; - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - for (j = 0; j < TX_TYPES; ++j) - savings += prob_diff_update_savings( - vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j], - cm->counts.intra_ext_tx[i][j], TX_TYPES); - } - do_update = savings > savings_thresh; - vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB); - if (do_update) { - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - for (j = 0; j < TX_TYPES; ++j) - prob_diff_update(vp10_ext_tx_tree, - cm->fc->intra_ext_tx_prob[i][j], - cm->counts.intra_ext_tx[i][j], - TX_TYPES, w); - } - } - savings = 0; - do_update = 0; - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - savings += prob_diff_update_savings( - vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i], - cm->counts.inter_ext_tx[i], TX_TYPES); - } - do_update = savings > savings_thresh; - vpx_write(w, do_update, GROUP_DIFF_UPDATE_PROB); - if (do_update) { - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - prob_diff_update(vp10_ext_tx_tree, - cm->fc->inter_ext_tx_prob[i], - cm->counts.inter_ext_tx[i], - TX_TYPES, w); - } - } -} - -static void pack_mb_tokens(vpx_writer *w, - TOKENEXTRA **tp, const TOKENEXTRA *const stop, - vpx_bit_depth_t bit_depth, const TX_SIZE tx) { - TOKENEXTRA *p = *tp; -#if !CONFIG_MISC_FIXES - (void) tx; -#endif - - while (p < stop && p->token != EOSB_TOKEN) { - const int t = p->token; - const struct vp10_token *const a = &vp10_coef_encodings[t]; - int i = 0; - int v = a->value; - int n = a->len; -#if CONFIG_VP9_HIGHBITDEPTH - const vp10_extra_bit *b; - if (bit_depth == VPX_BITS_12) - b = &vp10_extra_bits_high12[t]; - else if (bit_depth == VPX_BITS_10) - b = &vp10_extra_bits_high10[t]; - else - b = &vp10_extra_bits[t]; -#else - const vp10_extra_bit *const b = &vp10_extra_bits[t]; - (void) bit_depth; -#endif // CONFIG_VP9_HIGHBITDEPTH - - /* skip one or two nodes */ - if (p->skip_eob_node) { - n -= p->skip_eob_node; - i = 2 * p->skip_eob_node; - } - - // TODO(jbb): expanding this can lead to big gains. It allows - // much better branch prediction and would enable us to avoid numerous - // lookups and compares. - - // If we have a token that's in the constrained set, the coefficient tree - // is split into two treed writes. The first treed write takes care of the - // unconstrained nodes. The second treed write takes care of the - // constrained nodes. - if (t >= TWO_TOKEN && t < EOB_TOKEN) { - int len = UNCONSTRAINED_NODES - p->skip_eob_node; - int bits = v >> (n - len); - vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i); - vp10_write_tree(w, vp10_coef_con_tree, - vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], - v, n - len, 0); - } else { - vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i); - } - - if (b->base_val) { - const int e = p->extra, l = b->len; -#if CONFIG_MISC_FIXES - int skip_bits = - (b->base_val == CAT6_MIN_VAL) ? TX_SIZES - 1 - tx : 0; -#else - int skip_bits = 0; -#endif - - if (l) { - const unsigned char *pb = b->prob; - int v = e >> 1; - int n = l; /* number of bits in v, assumed nonzero */ - int i = 0; - - do { - const int bb = (v >> --n) & 1; - if (skip_bits) { - skip_bits--; - assert(!bb); - } else { - vpx_write(w, bb, pb[i >> 1]); - } - i = b->tree[i + bb]; - } while (n); - } - - vpx_write_bit(w, e & 1); - } - ++p; - } - - *tp = p; -} - -static void write_segment_id(vpx_writer *w, const struct segmentation *seg, - const struct segmentation_probs *segp, - int segment_id) { - if (seg->enabled && seg->update_map) - vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0); -} - -// This function encodes the reference frame -static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd, - vpx_writer *w) { - const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - const int is_compound = has_second_ref(mbmi); - const int segment_id = mbmi->segment_id; - - // If segment level coding of this signal is disabled... - // or the segment allows multiple reference frame options - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { - assert(!is_compound); - assert(mbmi->ref_frame[0] == - get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); - } else { - // does the feature use compound prediction or not - // (if not specified at the frame/segment level) - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - vpx_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd)); - } else { - assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE)); - } - - if (is_compound) { - vpx_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME, - vp10_get_pred_prob_comp_ref_p(cm, xd)); - } else { - const int bit0 = mbmi->ref_frame[0] != LAST_FRAME; - vpx_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd)); - if (bit0) { - const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME; - vpx_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd)); - } - } - } -} - -static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi, - vpx_writer *w) { - VP10_COMMON *const cm = &cpi->common; - const nmv_context *nmvc = &cm->fc->nmvc; - const MACROBLOCK *const x = &cpi->td.mb; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct segmentation *const seg = &cm->seg; -#if CONFIG_MISC_FIXES - const struct segmentation_probs *const segp = &cm->fc->seg; -#else - const struct segmentation_probs *const segp = &cm->segp; -#endif - const MB_MODE_INFO *const mbmi = &mi->mbmi; - const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - const PREDICTION_MODE mode = mbmi->mode; - const int segment_id = mbmi->segment_id; - const BLOCK_SIZE bsize = mbmi->sb_type; - const int allow_hp = cm->allow_high_precision_mv; - const int is_inter = is_inter_block(mbmi); - const int is_compound = has_second_ref(mbmi); - int skip, ref; - - if (seg->update_map) { - if (seg->temporal_update) { - const int pred_flag = mbmi->seg_id_predicted; - vpx_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd); - vpx_write(w, pred_flag, pred_prob); - if (!pred_flag) - write_segment_id(w, seg, segp, segment_id); - } else { - write_segment_id(w, seg, segp, segment_id); - } - } - - skip = write_skip(cm, xd, segment_id, mi, w); - - if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - vpx_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd)); - - if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && - !(is_inter && skip) && !xd->lossless[segment_id]) { - write_selected_tx_size(cm, xd, w); - } - - if (!is_inter) { - if (bsize >= BLOCK_8X8) { - write_intra_mode(w, mode, cm->fc->y_mode_prob[size_group_lookup[bsize]]); - } else { - int idx, idy; - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode; - write_intra_mode(w, b_mode, cm->fc->y_mode_prob[0]); - } - } - } - write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]); - } else { - const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]]; - const vpx_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx]; - write_ref_frames(cm, xd, w); - - // If segment skip is not enabled code the mode. - if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { - if (bsize >= BLOCK_8X8) { - write_inter_mode(w, mode, inter_probs); - } - } - - if (cm->interp_filter == SWITCHABLE) { - const int ctx = vp10_get_pred_context_switchable_interp(xd); - vp10_write_token(w, vp10_switchable_interp_tree, - cm->fc->switchable_interp_prob[ctx], - &switchable_interp_encodings[mbmi->interp_filter]); - ++cpi->interp_filter_selected[0][mbmi->interp_filter]; - } else { - assert(mbmi->interp_filter == cm->interp_filter); - } - - if (bsize < BLOCK_8X8) { - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int j = idy * 2 + idx; - const PREDICTION_MODE b_mode = mi->bmi[j].as_mode; - write_inter_mode(w, b_mode, inter_probs); - if (b_mode == NEWMV) { - for (ref = 0; ref < 1 + is_compound; ++ref) - vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv, - &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, - nmvc, allow_hp); - } - } - } - } else { - if (mode == NEWMV) { - for (ref = 0; ref < 1 + is_compound; ++ref) - vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv, - &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc, - allow_hp); - } - } - } - if (mbmi->tx_size < TX_32X32 && - cm->base_qindex > 0 && !mbmi->skip && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - if (is_inter) { - vp10_write_token( - w, vp10_ext_tx_tree, - cm->fc->inter_ext_tx_prob[mbmi->tx_size], - &ext_tx_encodings[mbmi->tx_type]); - } else { - vp10_write_token( - w, vp10_ext_tx_tree, - cm->fc->intra_ext_tx_prob[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]], - &ext_tx_encodings[mbmi->tx_type]); - } - } else { - if (!mbmi->skip) - assert(mbmi->tx_type == DCT_DCT); - } -} - -static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd, - MODE_INFO **mi_8x8, vpx_writer *w) { - const struct segmentation *const seg = &cm->seg; -#if CONFIG_MISC_FIXES - const struct segmentation_probs *const segp = &cm->fc->seg; -#else - const struct segmentation_probs *const segp = &cm->segp; -#endif - const MODE_INFO *const mi = mi_8x8[0]; - const MODE_INFO *const above_mi = xd->above_mi; - const MODE_INFO *const left_mi = xd->left_mi; - const MB_MODE_INFO *const mbmi = &mi->mbmi; - const BLOCK_SIZE bsize = mbmi->sb_type; - - if (seg->update_map) - write_segment_id(w, seg, segp, mbmi->segment_id); - - write_skip(cm, xd, mbmi->segment_id, mi, w); - - if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT && - !xd->lossless[mbmi->segment_id]) - write_selected_tx_size(cm, xd, w); - - if (bsize >= BLOCK_8X8) { - write_intra_mode(w, mbmi->mode, - get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); - } else { - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int block = idy * 2 + idx; - write_intra_mode(w, mi->bmi[block].as_mode, - get_y_mode_probs(cm, mi, above_mi, left_mi, block)); - } - } - } - - write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mbmi->mode]); - - if (mbmi->tx_size < TX_32X32 && - cm->base_qindex > 0 && !mbmi->skip && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - vp10_write_token( - w, vp10_ext_tx_tree, - cm->fc->intra_ext_tx_prob[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]], - &ext_tx_encodings[mbmi->tx_type]); - } -} - -static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile, - vpx_writer *w, TOKENEXTRA **tok, - const TOKENEXTRA *const tok_end, - int mi_row, int mi_col) { - const VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - MODE_INFO *m; - int plane; - - xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); - m = xd->mi[0]; - - cpi->td.mb.mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col); - - set_mi_row_col(xd, tile, - mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type], - mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type], - cm->mi_rows, cm->mi_cols); - if (frame_is_intra_only(cm)) { - write_mb_modes_kf(cm, xd, xd->mi, w); - } else { - pack_inter_mode_mvs(cpi, m, w); - } - - if (!m->mbmi.skip) { - assert(*tok < tok_end); - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane]) - : m->mbmi.tx_size; - pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx); - assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN); - (*tok)++; - } - } -} - -static void write_partition(const VP10_COMMON *const cm, - const MACROBLOCKD *const xd, - int hbs, int mi_row, int mi_col, - PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) { - const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); - const vpx_prob *const probs = cm->fc->partition_prob[ctx]; - const int has_rows = (mi_row + hbs) < cm->mi_rows; - const int has_cols = (mi_col + hbs) < cm->mi_cols; - - if (has_rows && has_cols) { - vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]); - } else if (!has_rows && has_cols) { - assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); - vpx_write(w, p == PARTITION_SPLIT, probs[1]); - } else if (has_rows && !has_cols) { - assert(p == PARTITION_SPLIT || p == PARTITION_VERT); - vpx_write(w, p == PARTITION_SPLIT, probs[2]); - } else { - assert(p == PARTITION_SPLIT); - } -} - -static void write_modes_sb(VP10_COMP *cpi, - const TileInfo *const tile, vpx_writer *w, - TOKENEXTRA **tok, const TOKENEXTRA *const tok_end, - int mi_row, int mi_col, BLOCK_SIZE bsize) { - const VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - - const int bsl = b_width_log2_lookup[bsize]; - const int bs = (1 << bsl) / 4; - PARTITION_TYPE partition; - BLOCK_SIZE subsize; - const MODE_INFO *m = NULL; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; - - partition = partition_lookup[bsl][m->mbmi.sb_type]; - write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w); - subsize = get_subsize(bsize, partition); - if (subsize < BLOCK_8X8) { - write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); - } else { - switch (partition) { - case PARTITION_NONE: - write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); - break; - case PARTITION_HORZ: - write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); - if (mi_row + bs < cm->mi_rows) - write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col); - break; - case PARTITION_VERT: - write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); - if (mi_col + bs < cm->mi_cols) - write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs); - break; - case PARTITION_SPLIT: - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize); - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs, - subsize); - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col, - subsize); - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, - subsize); - break; - default: - assert(0); - } - } - - // update partition context - if (bsize >= BLOCK_8X8 && - (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) - update_partition_context(xd, mi_row, mi_col, subsize, bsize); -} - -static void write_modes(VP10_COMP *cpi, - const TileInfo *const tile, vpx_writer *w, - TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) { - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - int mi_row, mi_col; - - for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end; - mi_row += MI_BLOCK_SIZE) { - vp10_zero(xd->left_seg_context); - for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; - mi_col += MI_BLOCK_SIZE) - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, - BLOCK_64X64); - } -} - -static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size, - vp10_coeff_stats *coef_branch_ct, - vp10_coeff_probs_model *coef_probs) { - vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size]; - unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = - cpi->common.counts.eob_branch[tx_size]; - int i, j, k, l, m; - - for (i = 0; i < PLANE_TYPES; ++i) { - for (j = 0; j < REF_TYPES; ++j) { - for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - vp10_tree_probs_from_distribution(vp10_coef_tree, - coef_branch_ct[i][j][k][l], - coef_counts[i][j][k][l]); - coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - - coef_branch_ct[i][j][k][l][0][0]; - for (m = 0; m < UNCONSTRAINED_NODES; ++m) - coef_probs[i][j][k][l][m] = get_binary_prob( - coef_branch_ct[i][j][k][l][m][0], - coef_branch_ct[i][j][k][l][m][1]); - } - } - } - } -} - -static void update_coef_probs_common(vpx_writer* const bc, VP10_COMP *cpi, - TX_SIZE tx_size, - vp10_coeff_stats *frame_branch_ct, - vp10_coeff_probs_model *new_coef_probs) { - vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size]; - const vpx_prob upd = DIFF_UPDATE_PROB; - const int entropy_nodes_update = UNCONSTRAINED_NODES; - int i, j, k, l, t; - int stepsize = cpi->sf.coeff_prob_appx_step; - - switch (cpi->sf.use_fast_coef_updates) { - case TWO_LOOP: { - /* dry run to see if there is any update at all needed */ - int savings = 0; - int update[2] = {0, 0}; - for (i = 0; i < PLANE_TYPES; ++i) { - for (j = 0; j < REF_TYPES; ++j) { - for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - for (t = 0; t < entropy_nodes_update; ++t) { - vpx_prob newp = new_coef_probs[i][j][k][l][t]; - const vpx_prob oldp = old_coef_probs[i][j][k][l][t]; - int s; - int u = 0; - if (t == PIVOT_NODE) - s = vp10_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); - else - s = vp10_prob_diff_update_savings_search( - frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); - if (s > 0 && newp != oldp) - u = 1; - if (u) - savings += s - (int)(vp10_cost_zero(upd)); - else - savings -= (int)(vp10_cost_zero(upd)); - update[u]++; - } - } - } - } - } - - // printf("Update %d %d, savings %d\n", update[0], update[1], savings); - /* Is coef updated at all */ - if (update[1] == 0 || savings < 0) { - vpx_write_bit(bc, 0); - return; - } - vpx_write_bit(bc, 1); - for (i = 0; i < PLANE_TYPES; ++i) { - for (j = 0; j < REF_TYPES; ++j) { - for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - // calc probs and branch cts for this frame only - for (t = 0; t < entropy_nodes_update; ++t) { - vpx_prob newp = new_coef_probs[i][j][k][l][t]; - vpx_prob *oldp = old_coef_probs[i][j][k][l] + t; - const vpx_prob upd = DIFF_UPDATE_PROB; - int s; - int u = 0; - if (t == PIVOT_NODE) - s = vp10_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); - else - s = vp10_prob_diff_update_savings_search( - frame_branch_ct[i][j][k][l][t], - *oldp, &newp, upd); - if (s > 0 && newp != *oldp) - u = 1; - vpx_write(bc, u, upd); - if (u) { - /* send/use new probability */ - vp10_write_prob_diff_update(bc, newp, *oldp); - *oldp = newp; - } - } - } - } - } - } - return; - } - - case ONE_LOOP_REDUCED: { - int updates = 0; - int noupdates_before_first = 0; - for (i = 0; i < PLANE_TYPES; ++i) { - for (j = 0; j < REF_TYPES; ++j) { - for (k = 0; k < COEF_BANDS; ++k) { - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - // calc probs and branch cts for this frame only - for (t = 0; t < entropy_nodes_update; ++t) { - vpx_prob newp = new_coef_probs[i][j][k][l][t]; - vpx_prob *oldp = old_coef_probs[i][j][k][l] + t; - int s; - int u = 0; - - if (t == PIVOT_NODE) { - s = vp10_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); - } else { - s = vp10_prob_diff_update_savings_search( - frame_branch_ct[i][j][k][l][t], - *oldp, &newp, upd); - } - - if (s > 0 && newp != *oldp) - u = 1; - updates += u; - if (u == 0 && updates == 0) { - noupdates_before_first++; - continue; - } - if (u == 1 && updates == 1) { - int v; - // first update - vpx_write_bit(bc, 1); - for (v = 0; v < noupdates_before_first; ++v) - vpx_write(bc, 0, upd); - } - vpx_write(bc, u, upd); - if (u) { - /* send/use new probability */ - vp10_write_prob_diff_update(bc, newp, *oldp); - *oldp = newp; - } - } - } - } - } - } - if (updates == 0) { - vpx_write_bit(bc, 0); // no updates - } - return; - } - default: - assert(0); - } -} - -static void update_coef_probs(VP10_COMP *cpi, vpx_writer* w) { - const TX_MODE tx_mode = cpi->common.tx_mode; - const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; - TX_SIZE tx_size; - for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) { - vp10_coeff_stats frame_branch_ct[PLANE_TYPES]; - vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES]; - if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 || - (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) { - vpx_write_bit(w, 0); - } else { - build_tree_distribution(cpi, tx_size, frame_branch_ct, - frame_coef_probs); - update_coef_probs_common(w, cpi, tx_size, frame_branch_ct, - frame_coef_probs); - } - } -} - -static void encode_loopfilter(struct loopfilter *lf, - struct vpx_write_bit_buffer *wb) { - int i; - - // Encode the loop filter level and type - vpx_wb_write_literal(wb, lf->filter_level, 6); - vpx_wb_write_literal(wb, lf->sharpness_level, 3); - - // Write out loop filter deltas applied at the MB level based on mode or - // ref frame (if they are enabled). - vpx_wb_write_bit(wb, lf->mode_ref_delta_enabled); - - if (lf->mode_ref_delta_enabled) { - vpx_wb_write_bit(wb, lf->mode_ref_delta_update); - if (lf->mode_ref_delta_update) { - for (i = 0; i < MAX_REF_FRAMES; i++) { - const int delta = lf->ref_deltas[i]; - const int changed = delta != lf->last_ref_deltas[i]; - vpx_wb_write_bit(wb, changed); - if (changed) { - lf->last_ref_deltas[i] = delta; - vpx_wb_write_inv_signed_literal(wb, delta, 6); - } - } - - for (i = 0; i < MAX_MODE_LF_DELTAS; i++) { - const int delta = lf->mode_deltas[i]; - const int changed = delta != lf->last_mode_deltas[i]; - vpx_wb_write_bit(wb, changed); - if (changed) { - lf->last_mode_deltas[i] = delta; - vpx_wb_write_inv_signed_literal(wb, delta, 6); - } - } - } - } -} - -static void write_delta_q(struct vpx_write_bit_buffer *wb, int delta_q) { - if (delta_q != 0) { - vpx_wb_write_bit(wb, 1); - vpx_wb_write_inv_signed_literal(wb, delta_q, CONFIG_MISC_FIXES ? 6 : 4); - } else { - vpx_wb_write_bit(wb, 0); - } -} - -static void encode_quantization(const VP10_COMMON *const cm, - struct vpx_write_bit_buffer *wb) { - vpx_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS); - write_delta_q(wb, cm->y_dc_delta_q); - write_delta_q(wb, cm->uv_dc_delta_q); - write_delta_q(wb, cm->uv_ac_delta_q); -} - -static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd, - struct vpx_write_bit_buffer *wb) { - int i, j; - - const struct segmentation *seg = &cm->seg; -#if !CONFIG_MISC_FIXES - const struct segmentation_probs *segp = &cm->segp; -#endif - - vpx_wb_write_bit(wb, seg->enabled); - if (!seg->enabled) - return; - - // Segmentation map - if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { - vpx_wb_write_bit(wb, seg->update_map); - } else { - assert(seg->update_map == 1); - } - if (seg->update_map) { - // Select the coding strategy (temporal or spatial) - vp10_choose_segmap_coding_method(cm, xd); -#if !CONFIG_MISC_FIXES - // Write out probabilities used to decode unpredicted macro-block segments - for (i = 0; i < SEG_TREE_PROBS; i++) { - const int prob = segp->tree_probs[i]; - const int update = prob != MAX_PROB; - vpx_wb_write_bit(wb, update); - if (update) - vpx_wb_write_literal(wb, prob, 8); - } -#endif - - // Write out the chosen coding method. - if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { - vpx_wb_write_bit(wb, seg->temporal_update); - } else { - assert(seg->temporal_update == 0); - } - -#if !CONFIG_MISC_FIXES - if (seg->temporal_update) { - for (i = 0; i < PREDICTION_PROBS; i++) { - const int prob = segp->pred_probs[i]; - const int update = prob != MAX_PROB; - vpx_wb_write_bit(wb, update); - if (update) - vpx_wb_write_literal(wb, prob, 8); - } - } -#endif - } - - // Segmentation data - vpx_wb_write_bit(wb, seg->update_data); - if (seg->update_data) { - vpx_wb_write_bit(wb, seg->abs_delta); - - for (i = 0; i < MAX_SEGMENTS; i++) { - for (j = 0; j < SEG_LVL_MAX; j++) { - const int active = segfeature_active(seg, i, j); - vpx_wb_write_bit(wb, active); - if (active) { - const int data = get_segdata(seg, i, j); - const int data_max = vp10_seg_feature_data_max(j); - - if (vp10_is_segfeature_signed(j)) { - encode_unsigned_max(wb, abs(data), data_max); - vpx_wb_write_bit(wb, data < 0); - } else { - encode_unsigned_max(wb, data, data_max); - } - } - } - } - } -} - -#if CONFIG_MISC_FIXES -static void update_seg_probs(VP10_COMP *cpi, vpx_writer *w) { - VP10_COMMON *cm = &cpi->common; - - if (!cpi->common.seg.enabled) - return; - - if (cpi->common.seg.temporal_update) { - int i; - - for (i = 0; i < PREDICTION_PROBS; i++) - vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i], - cm->counts.seg.pred[i]); - - prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs, - cm->counts.seg.tree_mispred, MAX_SEGMENTS, w); - } else { - prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs, - cm->counts.seg.tree_total, MAX_SEGMENTS, w); - } -} - -static void write_txfm_mode(TX_MODE mode, struct vpx_write_bit_buffer *wb) { - vpx_wb_write_bit(wb, mode == TX_MODE_SELECT); - if (mode != TX_MODE_SELECT) - vpx_wb_write_literal(wb, mode, 2); -} -#else -static void write_txfm_mode(TX_MODE mode, struct vpx_writer *wb) { - vpx_write_literal(wb, VPXMIN(mode, ALLOW_32X32), 2); - if (mode >= ALLOW_32X32) - vpx_write_bit(wb, mode == TX_MODE_SELECT); -} -#endif - - -static void update_txfm_probs(VP10_COMMON *cm, vpx_writer *w, - FRAME_COUNTS *counts) { - - if (cm->tx_mode == TX_MODE_SELECT) { - int i, j; - unsigned int ct_8x8p[TX_SIZES - 3][2]; - unsigned int ct_16x16p[TX_SIZES - 2][2]; - unsigned int ct_32x32p[TX_SIZES - 1][2]; - - - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p); - for (j = 0; j < TX_SIZES - 3; j++) - vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]); - } - - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p); - for (j = 0; j < TX_SIZES - 2; j++) - vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j], - ct_16x16p[j]); - } - - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { - vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p); - for (j = 0; j < TX_SIZES - 1; j++) - vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j], - ct_32x32p[j]); - } - } -} - -static void write_interp_filter(INTERP_FILTER filter, - struct vpx_write_bit_buffer *wb) { - vpx_wb_write_bit(wb, filter == SWITCHABLE); - if (filter != SWITCHABLE) - vpx_wb_write_literal(wb, filter, 2); -} - -static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) { - if (cm->interp_filter == SWITCHABLE) { - // Check to see if only one of the filters is actually used - int count[SWITCHABLE_FILTERS]; - int i, j, c = 0; - for (i = 0; i < SWITCHABLE_FILTERS; ++i) { - count[i] = 0; - for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j) - count[i] += counts->switchable_interp[j][i]; - c += (count[i] > 0); - } - if (c == 1) { - // Only one filter is used. So set the filter at frame level - for (i = 0; i < SWITCHABLE_FILTERS; ++i) { - if (count[i]) { - cm->interp_filter = i; - break; - } - } - } - } -} - -static void write_tile_info(const VP10_COMMON *const cm, - struct vpx_write_bit_buffer *wb) { - int min_log2_tile_cols, max_log2_tile_cols, ones; - vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); - - // columns - ones = cm->log2_tile_cols - min_log2_tile_cols; - while (ones--) - vpx_wb_write_bit(wb, 1); - - if (cm->log2_tile_cols < max_log2_tile_cols) - vpx_wb_write_bit(wb, 0); - - // rows - vpx_wb_write_bit(wb, cm->log2_tile_rows != 0); - if (cm->log2_tile_rows != 0) - vpx_wb_write_bit(wb, cm->log2_tile_rows != 1); -} - -static int get_refresh_mask(VP10_COMP *cpi) { - if (vp10_preserve_existing_gf(cpi)) { - // We have decided to preserve the previously existing golden frame as our - // new ARF frame. However, in the short term we leave it in the GF slot and, - // if we're updating the GF with the current decoded frame, we save it - // instead to the ARF slot. - // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we - // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it - // there so that it can be done outside of the recode loop. - // Note: This is highly specific to the use of ARF as a forward reference, - // and this needs to be generalized as other uses are implemented - // (like RTC/temporal scalability). - return (cpi->refresh_last_frame << cpi->lst_fb_idx) | - (cpi->refresh_golden_frame << cpi->alt_fb_idx); - } else { - int arf_idx = cpi->alt_fb_idx; - if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - arf_idx = gf_group->arf_update_idx[gf_group->index]; - } - return (cpi->refresh_last_frame << cpi->lst_fb_idx) | - (cpi->refresh_golden_frame << cpi->gld_fb_idx) | - (cpi->refresh_alt_ref_frame << arf_idx); - } -} - -static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr, - unsigned int *max_tile_sz) { - VP10_COMMON *const cm = &cpi->common; - vpx_writer residual_bc; - int tile_row, tile_col; - TOKENEXTRA *tok_end; - size_t total_size = 0; - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - unsigned int max_tile = 0; - - memset(cm->above_seg_context, 0, - sizeof(*cm->above_seg_context) * mi_cols_aligned_to_sb(cm->mi_cols)); - - for (tile_row = 0; tile_row < tile_rows; tile_row++) { - for (tile_col = 0; tile_col < tile_cols; tile_col++) { - int tile_idx = tile_row * tile_cols + tile_col; - TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; - - tok_end = cpi->tile_tok[tile_row][tile_col] + - cpi->tok_count[tile_row][tile_col]; - - if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) - vpx_start_encode(&residual_bc, data_ptr + total_size + 4); - else - vpx_start_encode(&residual_bc, data_ptr + total_size); - - write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, - &residual_bc, &tok, tok_end); - assert(tok == tok_end); - vpx_stop_encode(&residual_bc); - if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { - unsigned int tile_sz; - - // size of this tile - assert(residual_bc.pos > 0); - tile_sz = residual_bc.pos - CONFIG_MISC_FIXES; - mem_put_le32(data_ptr + total_size, tile_sz); - max_tile = max_tile > tile_sz ? max_tile : tile_sz; - total_size += 4; - } - - total_size += residual_bc.pos; - } - } - *max_tile_sz = max_tile; - - return total_size; -} - -static void write_render_size(const VP10_COMMON *cm, - struct vpx_write_bit_buffer *wb) { - const int scaling_active = cm->width != cm->render_width || - cm->height != cm->render_height; - vpx_wb_write_bit(wb, scaling_active); - if (scaling_active) { - vpx_wb_write_literal(wb, cm->render_width - 1, 16); - vpx_wb_write_literal(wb, cm->render_height - 1, 16); - } -} - -static void write_frame_size(const VP10_COMMON *cm, - struct vpx_write_bit_buffer *wb) { - vpx_wb_write_literal(wb, cm->width - 1, 16); - vpx_wb_write_literal(wb, cm->height - 1, 16); - - write_render_size(cm, wb); -} - -static void write_frame_size_with_refs(VP10_COMP *cpi, - struct vpx_write_bit_buffer *wb) { - VP10_COMMON *const cm = &cpi->common; - int found = 0; - - MV_REFERENCE_FRAME ref_frame; - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame); - - if (cfg != NULL) { - found = cm->width == cfg->y_crop_width && - cm->height == cfg->y_crop_height; -#if CONFIG_MISC_FIXES - found &= cm->render_width == cfg->render_width && - cm->render_height == cfg->render_height; -#endif - } - vpx_wb_write_bit(wb, found); - if (found) { - break; - } - } - - if (!found) { - vpx_wb_write_literal(wb, cm->width - 1, 16); - vpx_wb_write_literal(wb, cm->height - 1, 16); - -#if CONFIG_MISC_FIXES - write_render_size(cm, wb); -#endif - } - -#if !CONFIG_MISC_FIXES - write_render_size(cm, wb); -#endif -} - -static void write_sync_code(struct vpx_write_bit_buffer *wb) { - vpx_wb_write_literal(wb, VP10_SYNC_CODE_0, 8); - vpx_wb_write_literal(wb, VP10_SYNC_CODE_1, 8); - vpx_wb_write_literal(wb, VP10_SYNC_CODE_2, 8); -} - -static void write_profile(BITSTREAM_PROFILE profile, - struct vpx_write_bit_buffer *wb) { - switch (profile) { - case PROFILE_0: - vpx_wb_write_literal(wb, 0, 2); - break; - case PROFILE_1: - vpx_wb_write_literal(wb, 2, 2); - break; - case PROFILE_2: - vpx_wb_write_literal(wb, 1, 2); - break; - case PROFILE_3: - vpx_wb_write_literal(wb, 6, 3); - break; - default: - assert(0); - } -} - -static void write_bitdepth_colorspace_sampling( - VP10_COMMON *const cm, struct vpx_write_bit_buffer *wb) { - if (cm->profile >= PROFILE_2) { - assert(cm->bit_depth > VPX_BITS_8); - vpx_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1); - } - vpx_wb_write_literal(wb, cm->color_space, 3); - if (cm->color_space != VPX_CS_SRGB) { - // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] - vpx_wb_write_bit(wb, cm->color_range); - if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) { - assert(cm->subsampling_x != 1 || cm->subsampling_y != 1); - vpx_wb_write_bit(wb, cm->subsampling_x); - vpx_wb_write_bit(wb, cm->subsampling_y); - vpx_wb_write_bit(wb, 0); // unused - } else { - assert(cm->subsampling_x == 1 && cm->subsampling_y == 1); - } - } else { - assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3); - vpx_wb_write_bit(wb, 0); // unused - } -} - -static void write_uncompressed_header(VP10_COMP *cpi, - struct vpx_write_bit_buffer *wb) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - - vpx_wb_write_literal(wb, VP9_FRAME_MARKER, 2); - - write_profile(cm->profile, wb); - - vpx_wb_write_bit(wb, 0); // show_existing_frame - vpx_wb_write_bit(wb, cm->frame_type); - vpx_wb_write_bit(wb, cm->show_frame); - vpx_wb_write_bit(wb, cm->error_resilient_mode); - - if (cm->frame_type == KEY_FRAME) { - write_sync_code(wb); - write_bitdepth_colorspace_sampling(cm, wb); - write_frame_size(cm, wb); - } else { - if (!cm->show_frame) - vpx_wb_write_bit(wb, cm->intra_only); - - if (!cm->error_resilient_mode) { -#if CONFIG_MISC_FIXES - if (cm->intra_only) { - vpx_wb_write_bit(wb, - cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL); - } else { - vpx_wb_write_bit(wb, - cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE); - if (cm->reset_frame_context != RESET_FRAME_CONTEXT_NONE) - vpx_wb_write_bit(wb, - cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL); - } -#else - static const int reset_frame_context_conv_tbl[3] = { 0, 2, 3 }; - - vpx_wb_write_literal(wb, - reset_frame_context_conv_tbl[cm->reset_frame_context], 2); -#endif - } - - if (cm->intra_only) { - write_sync_code(wb); - -#if CONFIG_MISC_FIXES - write_bitdepth_colorspace_sampling(cm, wb); -#else - // Note for profile 0, 420 8bpp is assumed. - if (cm->profile > PROFILE_0) { - write_bitdepth_colorspace_sampling(cm, wb); - } -#endif - - vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); - write_frame_size(cm, wb); - } else { - MV_REFERENCE_FRAME ref_frame; - vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX); - vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame), - REF_FRAMES_LOG2); - vpx_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]); - } - - write_frame_size_with_refs(cpi, wb); - - vpx_wb_write_bit(wb, cm->allow_high_precision_mv); - - fix_interp_filter(cm, cpi->td.counts); - write_interp_filter(cm->interp_filter, wb); - } - } - - if (!cm->error_resilient_mode) { - vpx_wb_write_bit(wb, - cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF); -#if CONFIG_MISC_FIXES - if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF) -#endif - vpx_wb_write_bit(wb, cm->refresh_frame_context != - REFRESH_FRAME_CONTEXT_BACKWARD); - } - - vpx_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2); - - encode_loopfilter(&cm->lf, wb); - encode_quantization(cm, wb); - encode_segmentation(cm, xd, wb); -#if CONFIG_MISC_FIXES - if (!cm->seg.enabled && xd->lossless[0]) - cm->tx_mode = TX_4X4; - else - write_txfm_mode(cm->tx_mode, wb); - if (cpi->allow_comp_inter_inter) { - const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; - const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; - - vpx_wb_write_bit(wb, use_hybrid_pred); - if (!use_hybrid_pred) - vpx_wb_write_bit(wb, use_compound_pred); - } -#endif - - write_tile_info(cm, wb); -} - -static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) { - VP10_COMMON *const cm = &cpi->common; - FRAME_CONTEXT *const fc = cm->fc; - FRAME_COUNTS *counts = cpi->td.counts; - vpx_writer header_bc; - int i; -#if CONFIG_MISC_FIXES - int j; -#endif - - vpx_start_encode(&header_bc, data); - -#if !CONFIG_MISC_FIXES - if (cpi->td.mb.e_mbd.lossless[0]) { - cm->tx_mode = TX_4X4; - } else { - write_txfm_mode(cm->tx_mode, &header_bc); - update_txfm_probs(cm, &header_bc, counts); - } -#else - update_txfm_probs(cm, &header_bc, counts); -#endif - update_coef_probs(cpi, &header_bc); - update_skip_probs(cm, &header_bc, counts); -#if CONFIG_MISC_FIXES - update_seg_probs(cpi, &header_bc); - - for (i = 0; i < INTRA_MODES; ++i) - prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i], - counts->uv_mode[i], INTRA_MODES, &header_bc); - - for (i = 0; i < PARTITION_CONTEXTS; ++i) - prob_diff_update(vp10_partition_tree, fc->partition_prob[i], - counts->partition[i], PARTITION_TYPES, &header_bc); -#endif - - if (frame_is_intra_only(cm)) { - vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob); -#if CONFIG_MISC_FIXES - for (i = 0; i < INTRA_MODES; ++i) - for (j = 0; j < INTRA_MODES; ++j) - prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j], - counts->kf_y_mode[i][j], INTRA_MODES, &header_bc); -#endif - } else { - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) - prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i], - counts->inter_mode[i], INTER_MODES, &header_bc); - - if (cm->interp_filter == SWITCHABLE) - update_switchable_interp_probs(cm, &header_bc, counts); - - for (i = 0; i < INTRA_INTER_CONTEXTS; i++) - vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i], - counts->intra_inter[i]); - - if (cpi->allow_comp_inter_inter) { - const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT; -#if !CONFIG_MISC_FIXES - const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE; - - vpx_write_bit(&header_bc, use_compound_pred); - if (use_compound_pred) { - vpx_write_bit(&header_bc, use_hybrid_pred); - if (use_hybrid_pred) - for (i = 0; i < COMP_INTER_CONTEXTS; i++) - vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], - counts->comp_inter[i]); - } -#else - if (use_hybrid_pred) - for (i = 0; i < COMP_INTER_CONTEXTS; i++) - vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i], - counts->comp_inter[i]); -#endif - } - - if (cm->reference_mode != COMPOUND_REFERENCE) { - for (i = 0; i < REF_CONTEXTS; i++) { - vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0], - counts->single_ref[i][0]); - vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1], - counts->single_ref[i][1]); - } - } - - if (cm->reference_mode != SINGLE_REFERENCE) - for (i = 0; i < REF_CONTEXTS; i++) - vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i], - counts->comp_ref[i]); - - for (i = 0; i < BLOCK_SIZE_GROUPS; ++i) - prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i], - counts->y_mode[i], INTRA_MODES, &header_bc); - -#if !CONFIG_MISC_FIXES - for (i = 0; i < PARTITION_CONTEXTS; ++i) - prob_diff_update(vp10_partition_tree, fc->partition_prob[i], - counts->partition[i], PARTITION_TYPES, &header_bc); -#endif - - vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc, - &counts->mv); - update_ext_tx_probs(cm, &header_bc); - } - - vpx_stop_encode(&header_bc); - assert(header_bc.pos <= 0xffff); - - return header_bc.pos; -} - -#if CONFIG_MISC_FIXES -static int remux_tiles(uint8_t *dest, const int sz, - const int n_tiles, const int mag) { - int rpos = 0, wpos = 0, n; - - for (n = 0; n < n_tiles; n++) { - int tile_sz; - - if (n == n_tiles - 1) { - tile_sz = sz - rpos; - } else { - tile_sz = mem_get_le32(&dest[rpos]) + 1; - rpos += 4; - switch (mag) { - case 0: - dest[wpos] = tile_sz - 1; - break; - case 1: - mem_put_le16(&dest[wpos], tile_sz - 1); - break; - case 2: - mem_put_le24(&dest[wpos], tile_sz - 1); - break; - case 3: // remuxing should only happen if mag < 3 - default: - assert("Invalid value for tile size magnitude" && 0); - } - wpos += mag + 1; - } - - memmove(&dest[wpos], &dest[rpos], tile_sz); - wpos += tile_sz; - rpos += tile_sz; - } - - assert(rpos > wpos); - assert(rpos == sz); - - return wpos; -} -#endif - -void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) { - uint8_t *data = dest; - size_t first_part_size, uncompressed_hdr_size, data_sz; - struct vpx_write_bit_buffer wb = {data, 0}; - struct vpx_write_bit_buffer saved_wb; - unsigned int max_tile; -#if CONFIG_MISC_FIXES - VP10_COMMON *const cm = &cpi->common; - const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols; - const int have_tiles = n_log2_tiles > 0; -#else - const int have_tiles = 0; // we have tiles, but we don't want to write a - // tile size marker in the header -#endif - - write_uncompressed_header(cpi, &wb); - saved_wb = wb; - // don't know in advance first part. size - vpx_wb_write_literal(&wb, 0, 16 + have_tiles * 2); - - uncompressed_hdr_size = vpx_wb_bytes_written(&wb); - data += uncompressed_hdr_size; - - vpx_clear_system_state(); - - first_part_size = write_compressed_header(cpi, data); - data += first_part_size; - - data_sz = encode_tiles(cpi, data, &max_tile); -#if CONFIG_MISC_FIXES - if (max_tile > 0) { - int mag; - unsigned int mask; - - // Choose the (tile size) magnitude - for (mag = 0, mask = 0xff; mag < 4; mag++) { - if (max_tile <= mask) - break; - mask <<= 8; - mask |= 0xff; - } - assert(n_log2_tiles > 0); - vpx_wb_write_literal(&saved_wb, mag, 2); - if (mag < 3) - data_sz = remux_tiles(data, (int)data_sz, 1 << n_log2_tiles, mag); - } else { - assert(n_log2_tiles == 0); - } -#endif - data += data_sz; - - // TODO(jbb): Figure out what to do if first_part_size > 16 bits. - vpx_wb_write_literal(&saved_wb, (int)first_part_size, 16); - - *size = data - dest; -} diff --git a/libs/libvpx/vp10/encoder/bitstream.h b/libs/libvpx/vp10/encoder/bitstream.h deleted file mode 100644 index b1da89f1d7..0000000000 --- a/libs/libvpx/vp10/encoder/bitstream.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_BITSTREAM_H_ -#define VP10_ENCODER_BITSTREAM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "vp10/encoder/encoder.h" - -void vp10_encode_token_init(); -void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size); - -static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) { - return !cpi->multi_arf_allowed && cpi->refresh_golden_frame && - cpi->rc.is_src_frame_alt_ref; -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_BITSTREAM_H_ diff --git a/libs/libvpx/vp10/encoder/block.h b/libs/libvpx/vp10/encoder/block.h deleted file mode 100644 index ab0252baae..0000000000 --- a/libs/libvpx/vp10/encoder/block.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_BLOCK_H_ -#define VP10_ENCODER_BLOCK_H_ - -#include "vp10/common/entropymv.h" -#include "vp10/common/entropy.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - unsigned int sse; - int sum; - unsigned int var; -} diff; - -struct macroblock_plane { - DECLARE_ALIGNED(16, int16_t, src_diff[64 * 64]); - tran_low_t *qcoeff; - tran_low_t *coeff; - uint16_t *eobs; - struct buf_2d src; - - // Quantizer setings - int16_t *quant_fp; - int16_t *round_fp; - int16_t *quant; - int16_t *quant_shift; - int16_t *zbin; - int16_t *round; - - int64_t quant_thred[2]; -}; - -/* The [2] dimension is for whether we skip the EOB node (i.e. if previous - * coefficient in this block was zero) or not. */ -typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2] - [COEFF_CONTEXTS][ENTROPY_TOKENS]; - -typedef struct { - int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; - uint8_t mode_context[MAX_REF_FRAMES]; -} MB_MODE_INFO_EXT; - -typedef struct macroblock MACROBLOCK; -struct macroblock { - struct macroblock_plane plane[MAX_MB_PLANE]; - - MACROBLOCKD e_mbd; - MB_MODE_INFO_EXT *mbmi_ext; - int skip_block; - int select_tx_size; - int skip_recode; - int skip_optimize; - int q_index; - - int errorperbit; - int sadperbit16; - int sadperbit4; - int rddiv; - int rdmult; - int mb_energy; - int * m_search_count_ptr; - int * ex_search_count_ptr; - - // These are set to their default values at the beginning, and then adjusted - // further in the encoding process. - BLOCK_SIZE min_partition_size; - BLOCK_SIZE max_partition_size; - - int mv_best_ref_index[MAX_REF_FRAMES]; - unsigned int max_mv_context[MAX_REF_FRAMES]; - unsigned int source_variance; - unsigned int pred_sse[MAX_REF_FRAMES]; - int pred_mv_sad[MAX_REF_FRAMES]; - - int nmvjointcost[MV_JOINTS]; - int *nmvcost[2]; - int *nmvcost_hp[2]; - int **mvcost; - - int nmvjointsadcost[MV_JOINTS]; - int *nmvsadcost[2]; - int *nmvsadcost_hp[2]; - int **mvsadcost; - - // These define limits to motion vector components to prevent them - // from extending outside the UMV borders - int mv_col_min; - int mv_col_max; - int mv_row_min; - int mv_row_max; - - // Notes transform blocks where no coefficents are coded. - // Set during mode selection. Read during block encoding. - uint8_t zcoeff_blk[TX_SIZES][256]; - - int skip; - - int encode_breakout; - - // note that token_costs is the cost when eob node is skipped - vp10_coeff_cost token_costs[TX_SIZES]; - - int optimize; - - // indicate if it is in the rd search loop or encoding process - int use_lp32x32fdct; - - // use fast quantization process - int quant_fp; - - // skip forward transform and quantization - uint8_t skip_txfm[MAX_MB_PLANE << 2]; - #define SKIP_TXFM_NONE 0 - #define SKIP_TXFM_AC_DC 1 - #define SKIP_TXFM_AC_ONLY 2 - - int64_t bsse[MAX_MB_PLANE << 2]; - - // Used to store sub partition's choices. - MV pred_mv[MAX_REF_FRAMES]; - - // Strong color activity detection. Used in RTC coding mode to enhance - // the visual quality at the boundary of moving color objects. - uint8_t color_sensitivity[2]; -}; - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_BLOCK_H_ diff --git a/libs/libvpx/vp10/encoder/blockiness.c b/libs/libvpx/vp10/encoder/blockiness.c deleted file mode 100644 index ede13e0e59..0000000000 --- a/libs/libvpx/vp10/encoder/blockiness.c +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "vp10/common/common.h" -#include "vp10/common/filter.h" -#include "vpx/vpx_integer.h" -#include "vpx_dsp/vpx_convolve.h" -#include "vpx_dsp/vpx_filter.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" - -static int horizontal_filter(const uint8_t *s) { - return (s[1] - s[-2]) * 2 + (s[-1] - s[0]) * 6; -} - -static int vertical_filter(const uint8_t *s, int p) { - return (s[p] - s[-2 * p]) * 2 + (s[-p] - s[0]) * 6; -} - -static int variance(int sum, int sum_squared, int size) { - return sum_squared / size - (sum / size) * (sum / size); -} -// Calculate a blockiness level for a vertical block edge. -// This function returns a new blockiness metric that's defined as - -// p0 p1 p2 p3 -// q0 q1 q2 q3 -// block edge -> -// r0 r1 r2 r3 -// s0 s1 s2 s3 - -// blockiness = p0*-2+q0*6+r0*-6+s0*2 + -// p1*-2+q1*6+r1*-6+s1*2 + -// p2*-2+q2*6+r2*-6+s2*2 + -// p3*-2+q3*6+r3*-6+s3*2 ; - -// reconstructed_blockiness = abs(blockiness from reconstructed buffer - -// blockiness from source buffer,0) -// -// I make the assumption that flat blocks are much more visible than high -// contrast blocks. As such, I scale the result of the blockiness calc -// by dividing the blockiness by the variance of the pixels on either side -// of the edge as follows: -// var_0 = (q0^2+q1^2+q2^2+q3^2) - ((q0 + q1 + q2 + q3) / 4 )^2 -// var_1 = (r0^2+r1^2+r2^2+r3^2) - ((r0 + r1 + r2 + r3) / 4 )^2 -// The returned blockiness is the scaled value -// Reconstructed blockiness / ( 1 + var_0 + var_1 ) ; -static int blockiness_vertical(const uint8_t *s, int sp, const uint8_t *r, - int rp, int size) { - int s_blockiness = 0; - int r_blockiness = 0; - int sum_0 = 0; - int sum_sq_0 = 0; - int sum_1 = 0; - int sum_sq_1 = 0; - int i; - int var_0; - int var_1; - for (i = 0; i < size; ++i, s += sp, r += rp) { - s_blockiness += horizontal_filter(s); - r_blockiness += horizontal_filter(r); - sum_0 += s[0]; - sum_sq_0 += s[0]*s[0]; - sum_1 += s[-1]; - sum_sq_1 += s[-1]*s[-1]; - } - var_0 = variance(sum_0, sum_sq_0, size); - var_1 = variance(sum_1, sum_sq_1, size); - r_blockiness = abs(r_blockiness); - s_blockiness = abs(s_blockiness); - - if (r_blockiness > s_blockiness) - return (r_blockiness - s_blockiness) / (1 + var_0 + var_1); - else - return 0; -} - -// Calculate a blockiness level for a horizontal block edge -// same as above. -static int blockiness_horizontal(const uint8_t *s, int sp, const uint8_t *r, - int rp, int size) { - int s_blockiness = 0; - int r_blockiness = 0; - int sum_0 = 0; - int sum_sq_0 = 0; - int sum_1 = 0; - int sum_sq_1 = 0; - int i; - int var_0; - int var_1; - for (i = 0; i < size; ++i, ++s, ++r) { - s_blockiness += vertical_filter(s, sp); - r_blockiness += vertical_filter(r, rp); - sum_0 += s[0]; - sum_sq_0 += s[0] * s[0]; - sum_1 += s[-sp]; - sum_sq_1 += s[-sp] * s[-sp]; - } - var_0 = variance(sum_0, sum_sq_0, size); - var_1 = variance(sum_1, sum_sq_1, size); - r_blockiness = abs(r_blockiness); - s_blockiness = abs(s_blockiness); - - if (r_blockiness > s_blockiness) - return (r_blockiness - s_blockiness) / (1 + var_0 + var_1); - else - return 0; -} - -// This function returns the blockiness for the entire frame currently by -// looking at all borders in steps of 4. -double vp10_get_blockiness(const unsigned char *img1, int img1_pitch, - const unsigned char *img2, int img2_pitch, - int width, int height ) { - double blockiness = 0; - int i, j; - vpx_clear_system_state(); - for (i = 0; i < height; i += 4, img1 += img1_pitch * 4, - img2 += img2_pitch * 4) { - for (j = 0; j < width; j += 4) { - if (i > 0 && i < height && j > 0 && j < width) { - blockiness += blockiness_vertical(img1 + j, img1_pitch, - img2 + j, img2_pitch, 4); - blockiness += blockiness_horizontal(img1 + j, img1_pitch, - img2 + j, img2_pitch, 4); - } - } - } - blockiness /= width * height / 16; - return blockiness; -} diff --git a/libs/libvpx/vp10/encoder/context_tree.c b/libs/libvpx/vp10/encoder/context_tree.c deleted file mode 100644 index 6c056d28e1..0000000000 --- a/libs/libvpx/vp10/encoder/context_tree.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/encoder/context_tree.h" -#include "vp10/encoder/encoder.h" - -static const BLOCK_SIZE square[] = { - BLOCK_8X8, - BLOCK_16X16, - BLOCK_32X32, - BLOCK_64X64, -}; - -static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk, - PICK_MODE_CONTEXT *ctx) { - const int num_blk = (num_4x4_blk < 4 ? 4 : num_4x4_blk); - const int num_pix = num_blk << 4; - int i, k; - ctx->num_4x4_blk = num_blk; - - CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, - vpx_calloc(num_blk, sizeof(uint8_t))); - for (i = 0; i < MAX_MB_PLANE; ++i) { - for (k = 0; k < 3; ++k) { - CHECK_MEM_ERROR(cm, ctx->coeff[i][k], - vpx_memalign(32, num_pix * sizeof(*ctx->coeff[i][k]))); - CHECK_MEM_ERROR(cm, ctx->qcoeff[i][k], - vpx_memalign(32, num_pix * sizeof(*ctx->qcoeff[i][k]))); - CHECK_MEM_ERROR(cm, ctx->dqcoeff[i][k], - vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k]))); - CHECK_MEM_ERROR(cm, ctx->eobs[i][k], - vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k]))); - ctx->coeff_pbuf[i][k] = ctx->coeff[i][k]; - ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k]; - ctx->dqcoeff_pbuf[i][k] = ctx->dqcoeff[i][k]; - ctx->eobs_pbuf[i][k] = ctx->eobs[i][k]; - } - } -} - -static void free_mode_context(PICK_MODE_CONTEXT *ctx) { - int i, k; - vpx_free(ctx->zcoeff_blk); - ctx->zcoeff_blk = 0; - for (i = 0; i < MAX_MB_PLANE; ++i) { - for (k = 0; k < 3; ++k) { - vpx_free(ctx->coeff[i][k]); - ctx->coeff[i][k] = 0; - vpx_free(ctx->qcoeff[i][k]); - ctx->qcoeff[i][k] = 0; - vpx_free(ctx->dqcoeff[i][k]); - ctx->dqcoeff[i][k] = 0; - vpx_free(ctx->eobs[i][k]); - ctx->eobs[i][k] = 0; - } - } - - for (i = 0; i < 2; ++i) { - vpx_free(ctx->color_index_map[i]); - ctx->color_index_map[i] = 0; - } -} - -static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree, - int num_4x4_blk) { - alloc_mode_context(cm, num_4x4_blk, &tree->none); - alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[0]); - alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[0]); - - if (num_4x4_blk > 4) { - alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[1]); - alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[1]); - } else { - memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1])); - memset(&tree->vertical[1], 0, sizeof(tree->vertical[1])); - } -} - -static void free_tree_contexts(PC_TREE *tree) { - free_mode_context(&tree->none); - free_mode_context(&tree->horizontal[0]); - free_mode_context(&tree->horizontal[1]); - free_mode_context(&tree->vertical[0]); - free_mode_context(&tree->vertical[1]); -} - -// This function sets up a tree of contexts such that at each square -// partition level. There are contexts for none, horizontal, vertical, and -// split. Along with a block_size value and a selected block_size which -// represents the state of our search. -void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) { - int i, j; - const int leaf_nodes = 64; - const int tree_nodes = 64 + 16 + 4 + 1; - int pc_tree_index = 0; - PC_TREE *this_pc; - PICK_MODE_CONTEXT *this_leaf; - int square_index = 1; - int nodes; - - vpx_free(td->leaf_tree); - CHECK_MEM_ERROR(cm, td->leaf_tree, vpx_calloc(leaf_nodes, - sizeof(*td->leaf_tree))); - vpx_free(td->pc_tree); - CHECK_MEM_ERROR(cm, td->pc_tree, vpx_calloc(tree_nodes, - sizeof(*td->pc_tree))); - - this_pc = &td->pc_tree[0]; - this_leaf = &td->leaf_tree[0]; - - // 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same - // context so we only need to allocate 1 for each 8x8 block. - for (i = 0; i < leaf_nodes; ++i) - alloc_mode_context(cm, 1, &td->leaf_tree[i]); - - // Sets up all the leaf nodes in the tree. - for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) { - PC_TREE *const tree = &td->pc_tree[pc_tree_index]; - tree->block_size = square[0]; - alloc_tree_contexts(cm, tree, 4); - tree->leaf_split[0] = this_leaf++; - for (j = 1; j < 4; j++) - tree->leaf_split[j] = tree->leaf_split[0]; - } - - // Each node has 4 leaf nodes, fill each block_size level of the tree - // from leafs to the root. - for (nodes = 16; nodes > 0; nodes >>= 2) { - for (i = 0; i < nodes; ++i) { - PC_TREE *const tree = &td->pc_tree[pc_tree_index]; - alloc_tree_contexts(cm, tree, 4 << (2 * square_index)); - tree->block_size = square[square_index]; - for (j = 0; j < 4; j++) - tree->split[j] = this_pc++; - ++pc_tree_index; - } - ++square_index; - } - td->pc_root = &td->pc_tree[tree_nodes - 1]; - td->pc_root[0].none.best_mode_index = 2; -} - -void vp10_free_pc_tree(ThreadData *td) { - const int tree_nodes = 64 + 16 + 4 + 1; - int i; - - // Set up all 4x4 mode contexts - for (i = 0; i < 64; ++i) - free_mode_context(&td->leaf_tree[i]); - - // Sets up all the leaf nodes in the tree. - for (i = 0; i < tree_nodes; ++i) - free_tree_contexts(&td->pc_tree[i]); - - vpx_free(td->pc_tree); - td->pc_tree = NULL; - vpx_free(td->leaf_tree); - td->leaf_tree = NULL; -} diff --git a/libs/libvpx/vp10/encoder/context_tree.h b/libs/libvpx/vp10/encoder/context_tree.h deleted file mode 100644 index 2a0fffbfb2..0000000000 --- a/libs/libvpx/vp10/encoder/context_tree.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_CONTEXT_TREE_H_ -#define VP10_ENCODER_CONTEXT_TREE_H_ - -#include "vp10/common/blockd.h" -#include "vp10/encoder/block.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10_COMP; -struct VP10Common; -struct ThreadData; - -// Structure to hold snapshot of coding context during the mode picking process -typedef struct { - MODE_INFO mic; - MB_MODE_INFO_EXT mbmi_ext; - uint8_t *zcoeff_blk; - uint8_t *color_index_map[2]; - tran_low_t *coeff[MAX_MB_PLANE][3]; - tran_low_t *qcoeff[MAX_MB_PLANE][3]; - tran_low_t *dqcoeff[MAX_MB_PLANE][3]; - uint16_t *eobs[MAX_MB_PLANE][3]; - - // dual buffer pointers, 0: in use, 1: best in store - tran_low_t *coeff_pbuf[MAX_MB_PLANE][3]; - tran_low_t *qcoeff_pbuf[MAX_MB_PLANE][3]; - tran_low_t *dqcoeff_pbuf[MAX_MB_PLANE][3]; - uint16_t *eobs_pbuf[MAX_MB_PLANE][3]; - - int is_coded; - int num_4x4_blk; - int skip; - int pred_pixel_ready; - // For current partition, only if all Y, U, and V transform blocks' - // coefficients are quantized to 0, skippable is set to 0. - int skippable; - uint8_t skip_txfm[MAX_MB_PLANE << 2]; - int best_mode_index; - int hybrid_pred_diff; - int comp_pred_diff; - int single_pred_diff; - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - - // TODO(jingning) Use RD_COST struct here instead. This involves a boarder - // scope of refactoring. - int rate; - int64_t dist; - -#if CONFIG_VP9_TEMPORAL_DENOISING - unsigned int newmv_sse; - unsigned int zeromv_sse; - PREDICTION_MODE best_sse_inter_mode; - int_mv best_sse_mv; - MV_REFERENCE_FRAME best_reference_frame; - MV_REFERENCE_FRAME best_zeromv_reference_frame; -#endif - - // motion vector cache for adaptive motion search control in partition - // search loop - MV pred_mv[MAX_REF_FRAMES]; - INTERP_FILTER pred_interp_filter; -} PICK_MODE_CONTEXT; - -typedef struct PC_TREE { - int index; - PARTITION_TYPE partitioning; - BLOCK_SIZE block_size; - PICK_MODE_CONTEXT none; - PICK_MODE_CONTEXT horizontal[2]; - PICK_MODE_CONTEXT vertical[2]; - union { - struct PC_TREE *split[4]; - PICK_MODE_CONTEXT *leaf_split[4]; - }; -} PC_TREE; - -void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td); -void vp10_free_pc_tree(struct ThreadData *td); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */ diff --git a/libs/libvpx/vp10/encoder/cost.c b/libs/libvpx/vp10/encoder/cost.c deleted file mode 100644 index aab826322b..0000000000 --- a/libs/libvpx/vp10/encoder/cost.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include - -#include "vp10/encoder/cost.h" - -const unsigned int vp10_prob_cost[256] = { - 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, - 1129, 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, - 873, 858, 843, 829, 816, 803, 790, 778, 767, 755, 744, 733, - 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625, - 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, - 534, 528, 522, 516, 511, 505, 499, 494, 488, 483, 477, 472, - 467, 462, 457, 452, 447, 442, 437, 433, 428, 424, 419, 415, - 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365, - 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, - 317, 314, 311, 307, 304, 301, 297, 294, 291, 288, 285, 281, - 278, 275, 272, 269, 266, 263, 260, 257, 255, 252, 249, 246, - 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214, - 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, - 181, 179, 177, 174, 172, 170, 168, 165, 163, 161, 159, 156, - 154, 152, 150, 148, 145, 143, 141, 139, 137, 135, 133, 131, - 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, - 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, - 82, 81, 79, 77, 75, 73, 72, 70, 68, 66, 65, 63, - 61, 60, 58, 56, 55, 53, 51, 50, 48, 46, 45, 43, - 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24, - 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, - 4, 3, 1, 1}; - -static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, - int i, int c) { - const vpx_prob prob = probs[i / 2]; - int b; - - for (b = 0; b <= 1; ++b) { - const int cc = c + vp10_cost_bit(prob, b); - const vpx_tree_index ii = tree[i + b]; - - if (ii <= 0) - costs[-ii] = cc; - else - cost(costs, tree, probs, ii, cc); - } -} - -void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree) { - cost(costs, tree, probs, 0, 0); -} - -void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree) { - assert(tree[0] <= 0 && tree[1] > 0); - - costs[-tree[0]] = vp10_cost_bit(probs[0], 0); - cost(costs, tree, probs, 2, 0); -} diff --git a/libs/libvpx/vp10/encoder/cost.h b/libs/libvpx/vp10/encoder/cost.h deleted file mode 100644 index b9619c6b1b..0000000000 --- a/libs/libvpx/vp10/encoder/cost.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_COST_H_ -#define VP10_ENCODER_COST_H_ - -#include "vpx_dsp/prob.h" - -#ifdef __cplusplus -extern "C" { -#endif - -extern const unsigned int vp10_prob_cost[256]; - -#define vp10_cost_zero(prob) (vp10_prob_cost[prob]) - -#define vp10_cost_one(prob) vp10_cost_zero(vpx_complement(prob)) - -#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? vpx_complement(prob) \ - : (prob)) - -static INLINE unsigned int cost_branch256(const unsigned int ct[2], - vpx_prob p) { - return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p); -} - -static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, - int bits, int len) { - int cost = 0; - vpx_tree_index i = 0; - - do { - const int bit = (bits >> --len) & 1; - cost += vp10_cost_bit(probs[i >> 1], bit); - i = tree[i + bit]; - } while (len); - - return cost; -} - -void vp10_cost_tokens(int *costs, const vpx_prob *probs, vpx_tree tree); -void vp10_cost_tokens_skip(int *costs, const vpx_prob *probs, vpx_tree tree); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_COST_H_ diff --git a/libs/libvpx/vp10/encoder/dct.c b/libs/libvpx/vp10/encoder/dct.c deleted file mode 100644 index 132a141741..0000000000 --- a/libs/libvpx/vp10/encoder/dct.c +++ /dev/null @@ -1,1303 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" - -#include "vp10/common/blockd.h" -#include "vp10/common/idct.h" -#include "vpx_dsp/fwd_txfm.h" -#include "vpx_ports/mem.h" - -static INLINE void range_check(const tran_low_t *input, const int size, - const int bit) { -#if 0 // CONFIG_COEFFICIENT_RANGE_CHECKING -// TODO(angiebird): the range_check is not used because the bit range -// in fdct# is not correct. Since we are going to merge in a new version -// of fdct# from nextgenv2, we won't fix the incorrect bit range now. - int i; - for (i = 0; i < size; ++i) { - assert(abs(input[i]) < (1 << bit)); - } -#else - (void)input; - (void)size; - (void)bit; -#endif -} - -static void fdct4(const tran_low_t *input, tran_low_t *output) { - tran_high_t temp; - tran_low_t step[4]; - - // stage 0 - range_check(input, 4, 14); - - // stage 1 - output[0] = input[0] + input[3]; - output[1] = input[1] + input[2]; - output[2] = input[1] - input[2]; - output[3] = input[0] - input[3]; - - range_check(output, 4, 15); - - // stage 2 - temp = output[0] * cospi_16_64 + output[1] * cospi_16_64; - step[0] = (tran_low_t)fdct_round_shift(temp); - temp = output[1] * -cospi_16_64 + output[0] * cospi_16_64; - step[1] = (tran_low_t)fdct_round_shift(temp); - temp = output[2] * cospi_24_64 + output[3] * cospi_8_64; - step[2] = (tran_low_t)fdct_round_shift(temp); - temp = output[3] * cospi_24_64 + output[2] * -cospi_8_64; - step[3] = (tran_low_t)fdct_round_shift(temp); - - range_check(step, 4, 16); - - // stage 3 - output[0] = step[0]; - output[1] = step[2]; - output[2] = step[1]; - output[3] = step[3]; - - range_check(output, 4, 16); -} - -static void fdct8(const tran_low_t *input, tran_low_t *output) { - tran_high_t temp; - tran_low_t step[8]; - - // stage 0 - range_check(input, 8, 13); - - // stage 1 - output[0] = input[0] + input[7]; - output[1] = input[1] + input[6]; - output[2] = input[2] + input[5]; - output[3] = input[3] + input[4]; - output[4] = input[3] - input[4]; - output[5] = input[2] - input[5]; - output[6] = input[1] - input[6]; - output[7] = input[0] - input[7]; - - range_check(output, 8, 14); - - // stage 2 - step[0] = output[0] + output[3]; - step[1] = output[1] + output[2]; - step[2] = output[1] - output[2]; - step[3] = output[0] - output[3]; - step[4] = output[4]; - temp = output[5] * -cospi_16_64 + output[6] * cospi_16_64; - step[5] = (tran_low_t)fdct_round_shift(temp); - temp = output[6] * cospi_16_64 + output[5] * cospi_16_64; - step[6] = (tran_low_t)fdct_round_shift(temp); - step[7] = output[7]; - - range_check(step, 8, 15); - - // stage 3 - temp = step[0] * cospi_16_64 + step[1] * cospi_16_64; - output[0] = (tran_low_t)fdct_round_shift(temp); - temp = step[1] * -cospi_16_64 + step[0] * cospi_16_64; - output[1] = (tran_low_t)fdct_round_shift(temp); - temp = step[2] * cospi_24_64 + step[3] * cospi_8_64; - output[2] = (tran_low_t)fdct_round_shift(temp); - temp = step[3] * cospi_24_64 + step[2] * -cospi_8_64; - output[3] = (tran_low_t)fdct_round_shift(temp); - output[4] = step[4] + step[5]; - output[5] = step[4] - step[5]; - output[6] = step[7] - step[6]; - output[7] = step[7] + step[6]; - - range_check(output, 8, 16); - - // stage 4 - step[0] = output[0]; - step[1] = output[1]; - step[2] = output[2]; - step[3] = output[3]; - temp = output[4] * cospi_28_64 + output[7] * cospi_4_64; - step[4] = (tran_low_t)fdct_round_shift(temp); - temp = output[5] * cospi_12_64 + output[6] * cospi_20_64; - step[5] = (tran_low_t)fdct_round_shift(temp); - temp = output[6] * cospi_12_64 + output[5] * -cospi_20_64; - step[6] = (tran_low_t)fdct_round_shift(temp); - temp = output[7] * cospi_28_64 + output[4] * -cospi_4_64; - step[7] = (tran_low_t)fdct_round_shift(temp); - - range_check(step, 8, 16); - - // stage 5 - output[0] = step[0]; - output[1] = step[4]; - output[2] = step[2]; - output[3] = step[6]; - output[4] = step[1]; - output[5] = step[5]; - output[6] = step[3]; - output[7] = step[7]; - - range_check(output, 8, 16); -} - -static void fdct16(const tran_low_t *input, tran_low_t *output) { - tran_high_t temp; - tran_low_t step[16]; - - // stage 0 - range_check(input, 16, 13); - - // stage 1 - output[0] = input[0] + input[15]; - output[1] = input[1] + input[14]; - output[2] = input[2] + input[13]; - output[3] = input[3] + input[12]; - output[4] = input[4] + input[11]; - output[5] = input[5] + input[10]; - output[6] = input[6] + input[9]; - output[7] = input[7] + input[8]; - output[8] = input[7] - input[8]; - output[9] = input[6] - input[9]; - output[10] = input[5] - input[10]; - output[11] = input[4] - input[11]; - output[12] = input[3] - input[12]; - output[13] = input[2] - input[13]; - output[14] = input[1] - input[14]; - output[15] = input[0] - input[15]; - - range_check(output, 16, 14); - - // stage 2 - step[0] = output[0] + output[7]; - step[1] = output[1] + output[6]; - step[2] = output[2] + output[5]; - step[3] = output[3] + output[4]; - step[4] = output[3] - output[4]; - step[5] = output[2] - output[5]; - step[6] = output[1] - output[6]; - step[7] = output[0] - output[7]; - step[8] = output[8]; - step[9] = output[9]; - temp = output[10] * -cospi_16_64 + output[13] * cospi_16_64; - step[10] = (tran_low_t)fdct_round_shift(temp); - temp = output[11] * -cospi_16_64 + output[12] * cospi_16_64; - step[11] = (tran_low_t)fdct_round_shift(temp); - temp = output[12] * cospi_16_64 + output[11] * cospi_16_64; - step[12] = (tran_low_t)fdct_round_shift(temp); - temp = output[13] * cospi_16_64 + output[10] * cospi_16_64; - step[13] = (tran_low_t)fdct_round_shift(temp); - step[14] = output[14]; - step[15] = output[15]; - - range_check(step, 16, 15); - - // stage 3 - output[0] = step[0] + step[3]; - output[1] = step[1] + step[2]; - output[2] = step[1] - step[2]; - output[3] = step[0] - step[3]; - output[4] = step[4]; - temp = step[5] * -cospi_16_64 + step[6] * cospi_16_64; - output[5] = (tran_low_t)fdct_round_shift(temp); - temp = step[6] * cospi_16_64 + step[5] * cospi_16_64; - output[6] = (tran_low_t)fdct_round_shift(temp); - output[7] = step[7]; - output[8] = step[8] + step[11]; - output[9] = step[9] + step[10]; - output[10] = step[9] - step[10]; - output[11] = step[8] - step[11]; - output[12] = step[15] - step[12]; - output[13] = step[14] - step[13]; - output[14] = step[14] + step[13]; - output[15] = step[15] + step[12]; - - range_check(output, 16, 16); - - // stage 4 - temp = output[0] * cospi_16_64 + output[1] * cospi_16_64; - step[0] = (tran_low_t)fdct_round_shift(temp); - temp = output[1] * -cospi_16_64 + output[0] * cospi_16_64; - step[1] = (tran_low_t)fdct_round_shift(temp); - temp = output[2] * cospi_24_64 + output[3] * cospi_8_64; - step[2] = (tran_low_t)fdct_round_shift(temp); - temp = output[3] * cospi_24_64 + output[2] * -cospi_8_64; - step[3] = (tran_low_t)fdct_round_shift(temp); - step[4] = output[4] + output[5]; - step[5] = output[4] - output[5]; - step[6] = output[7] - output[6]; - step[7] = output[7] + output[6]; - step[8] = output[8]; - temp = output[9] * -cospi_8_64 + output[14] * cospi_24_64; - step[9] = (tran_low_t)fdct_round_shift(temp); - temp = output[10] * -cospi_24_64 + output[13] * -cospi_8_64; - step[10] = (tran_low_t)fdct_round_shift(temp); - step[11] = output[11]; - step[12] = output[12]; - temp = output[13] * cospi_24_64 + output[10] * -cospi_8_64; - step[13] = (tran_low_t)fdct_round_shift(temp); - temp = output[14] * cospi_8_64 + output[9] * cospi_24_64; - step[14] = (tran_low_t)fdct_round_shift(temp); - step[15] = output[15]; - - range_check(step, 16, 16); - - // stage 5 - output[0] = step[0]; - output[1] = step[1]; - output[2] = step[2]; - output[3] = step[3]; - temp = step[4] * cospi_28_64 + step[7] * cospi_4_64; - output[4] = (tran_low_t)fdct_round_shift(temp); - temp = step[5] * cospi_12_64 + step[6] * cospi_20_64; - output[5] = (tran_low_t)fdct_round_shift(temp); - temp = step[6] * cospi_12_64 + step[5] * -cospi_20_64; - output[6] = (tran_low_t)fdct_round_shift(temp); - temp = step[7] * cospi_28_64 + step[4] * -cospi_4_64; - output[7] = (tran_low_t)fdct_round_shift(temp); - output[8] = step[8] + step[9]; - output[9] = step[8] - step[9]; - output[10] = step[11] - step[10]; - output[11] = step[11] + step[10]; - output[12] = step[12] + step[13]; - output[13] = step[12] - step[13]; - output[14] = step[15] - step[14]; - output[15] = step[15] + step[14]; - - range_check(output, 16, 16); - - // stage 6 - step[0] = output[0]; - step[1] = output[1]; - step[2] = output[2]; - step[3] = output[3]; - step[4] = output[4]; - step[5] = output[5]; - step[6] = output[6]; - step[7] = output[7]; - temp = output[8] * cospi_30_64 + output[15] * cospi_2_64; - step[8] = (tran_low_t)fdct_round_shift(temp); - temp = output[9] * cospi_14_64 + output[14] * cospi_18_64; - step[9] = (tran_low_t)fdct_round_shift(temp); - temp = output[10] * cospi_22_64 + output[13] * cospi_10_64; - step[10] = (tran_low_t)fdct_round_shift(temp); - temp = output[11] * cospi_6_64 + output[12] * cospi_26_64; - step[11] = (tran_low_t)fdct_round_shift(temp); - temp = output[12] * cospi_6_64 + output[11] * -cospi_26_64; - step[12] = (tran_low_t)fdct_round_shift(temp); - temp = output[13] * cospi_22_64 + output[10] * -cospi_10_64; - step[13] = (tran_low_t)fdct_round_shift(temp); - temp = output[14] * cospi_14_64 + output[9] * -cospi_18_64; - step[14] = (tran_low_t)fdct_round_shift(temp); - temp = output[15] * cospi_30_64 + output[8] * -cospi_2_64; - step[15] = (tran_low_t)fdct_round_shift(temp); - - range_check(step, 16, 16); - - // stage 7 - output[0] = step[0]; - output[1] = step[8]; - output[2] = step[4]; - output[3] = step[12]; - output[4] = step[2]; - output[5] = step[10]; - output[6] = step[6]; - output[7] = step[14]; - output[8] = step[1]; - output[9] = step[9]; - output[10] = step[5]; - output[11] = step[13]; - output[12] = step[3]; - output[13] = step[11]; - output[14] = step[7]; - output[15] = step[15]; - - range_check(output, 16, 16); -} - -/* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32 -static void fdct32(const tran_low_t *input, tran_low_t *output) { - tran_high_t temp; - tran_low_t step[32]; - - // stage 0 - range_check(input, 32, 14); - - // stage 1 - output[0] = input[0] + input[31]; - output[1] = input[1] + input[30]; - output[2] = input[2] + input[29]; - output[3] = input[3] + input[28]; - output[4] = input[4] + input[27]; - output[5] = input[5] + input[26]; - output[6] = input[6] + input[25]; - output[7] = input[7] + input[24]; - output[8] = input[8] + input[23]; - output[9] = input[9] + input[22]; - output[10] = input[10] + input[21]; - output[11] = input[11] + input[20]; - output[12] = input[12] + input[19]; - output[13] = input[13] + input[18]; - output[14] = input[14] + input[17]; - output[15] = input[15] + input[16]; - output[16] = input[15] - input[16]; - output[17] = input[14] - input[17]; - output[18] = input[13] - input[18]; - output[19] = input[12] - input[19]; - output[20] = input[11] - input[20]; - output[21] = input[10] - input[21]; - output[22] = input[9] - input[22]; - output[23] = input[8] - input[23]; - output[24] = input[7] - input[24]; - output[25] = input[6] - input[25]; - output[26] = input[5] - input[26]; - output[27] = input[4] - input[27]; - output[28] = input[3] - input[28]; - output[29] = input[2] - input[29]; - output[30] = input[1] - input[30]; - output[31] = input[0] - input[31]; - - range_check(output, 32, 15); - - // stage 2 - step[0] = output[0] + output[15]; - step[1] = output[1] + output[14]; - step[2] = output[2] + output[13]; - step[3] = output[3] + output[12]; - step[4] = output[4] + output[11]; - step[5] = output[5] + output[10]; - step[6] = output[6] + output[9]; - step[7] = output[7] + output[8]; - step[8] = output[7] - output[8]; - step[9] = output[6] - output[9]; - step[10] = output[5] - output[10]; - step[11] = output[4] - output[11]; - step[12] = output[3] - output[12]; - step[13] = output[2] - output[13]; - step[14] = output[1] - output[14]; - step[15] = output[0] - output[15]; - step[16] = output[16]; - step[17] = output[17]; - step[18] = output[18]; - step[19] = output[19]; - temp = output[20] * -cospi_16_64 + output[27] * cospi_16_64; - step[20] = (tran_low_t)fdct_round_shift(temp); - temp = output[21] * -cospi_16_64 + output[26] * cospi_16_64; - step[21] = (tran_low_t)fdct_round_shift(temp); - temp = output[22] * -cospi_16_64 + output[25] * cospi_16_64; - step[22] = (tran_low_t)fdct_round_shift(temp); - temp = output[23] * -cospi_16_64 + output[24] * cospi_16_64; - step[23] = (tran_low_t)fdct_round_shift(temp); - temp = output[24] * cospi_16_64 + output[23] * cospi_16_64; - step[24] = (tran_low_t)fdct_round_shift(temp); - temp = output[25] * cospi_16_64 + output[22] * cospi_16_64; - step[25] = (tran_low_t)fdct_round_shift(temp); - temp = output[26] * cospi_16_64 + output[21] * cospi_16_64; - step[26] = (tran_low_t)fdct_round_shift(temp); - temp = output[27] * cospi_16_64 + output[20] * cospi_16_64; - step[27] = (tran_low_t)fdct_round_shift(temp); - step[28] = output[28]; - step[29] = output[29]; - step[30] = output[30]; - step[31] = output[31]; - - range_check(step, 32, 16); - - // stage 3 - output[0] = step[0] + step[7]; - output[1] = step[1] + step[6]; - output[2] = step[2] + step[5]; - output[3] = step[3] + step[4]; - output[4] = step[3] - step[4]; - output[5] = step[2] - step[5]; - output[6] = step[1] - step[6]; - output[7] = step[0] - step[7]; - output[8] = step[8]; - output[9] = step[9]; - temp = step[10] * -cospi_16_64 + step[13] * cospi_16_64; - output[10] = (tran_low_t)fdct_round_shift(temp); - temp = step[11] * -cospi_16_64 + step[12] * cospi_16_64; - output[11] = (tran_low_t)fdct_round_shift(temp); - temp = step[12] * cospi_16_64 + step[11] * cospi_16_64; - output[12] = (tran_low_t)fdct_round_shift(temp); - temp = step[13] * cospi_16_64 + step[10] * cospi_16_64; - output[13] = (tran_low_t)fdct_round_shift(temp); - output[14] = step[14]; - output[15] = step[15]; - output[16] = step[16] + step[23]; - output[17] = step[17] + step[22]; - output[18] = step[18] + step[21]; - output[19] = step[19] + step[20]; - output[20] = step[19] - step[20]; - output[21] = step[18] - step[21]; - output[22] = step[17] - step[22]; - output[23] = step[16] - step[23]; - output[24] = step[31] - step[24]; - output[25] = step[30] - step[25]; - output[26] = step[29] - step[26]; - output[27] = step[28] - step[27]; - output[28] = step[28] + step[27]; - output[29] = step[29] + step[26]; - output[30] = step[30] + step[25]; - output[31] = step[31] + step[24]; - - range_check(output, 32, 17); - - // stage 4 - step[0] = output[0] + output[3]; - step[1] = output[1] + output[2]; - step[2] = output[1] - output[2]; - step[3] = output[0] - output[3]; - step[4] = output[4]; - temp = output[5] * -cospi_16_64 + output[6] * cospi_16_64; - step[5] = (tran_low_t)fdct_round_shift(temp); - temp = output[6] * cospi_16_64 + output[5] * cospi_16_64; - step[6] = (tran_low_t)fdct_round_shift(temp); - step[7] = output[7]; - step[8] = output[8] + output[11]; - step[9] = output[9] + output[10]; - step[10] = output[9] - output[10]; - step[11] = output[8] - output[11]; - step[12] = output[15] - output[12]; - step[13] = output[14] - output[13]; - step[14] = output[14] + output[13]; - step[15] = output[15] + output[12]; - step[16] = output[16]; - step[17] = output[17]; - temp = output[18] * -cospi_8_64 + output[29] * cospi_24_64; - step[18] = (tran_low_t)fdct_round_shift(temp); - temp = output[19] * -cospi_8_64 + output[28] * cospi_24_64; - step[19] = (tran_low_t)fdct_round_shift(temp); - temp = output[20] * -cospi_24_64 + output[27] * -cospi_8_64; - step[20] = (tran_low_t)fdct_round_shift(temp); - temp = output[21] * -cospi_24_64 + output[26] * -cospi_8_64; - step[21] = (tran_low_t)fdct_round_shift(temp); - step[22] = output[22]; - step[23] = output[23]; - step[24] = output[24]; - step[25] = output[25]; - temp = output[26] * cospi_24_64 + output[21] * -cospi_8_64; - step[26] = (tran_low_t)fdct_round_shift(temp); - temp = output[27] * cospi_24_64 + output[20] * -cospi_8_64; - step[27] = (tran_low_t)fdct_round_shift(temp); - temp = output[28] * cospi_8_64 + output[19] * cospi_24_64; - step[28] = (tran_low_t)fdct_round_shift(temp); - temp = output[29] * cospi_8_64 + output[18] * cospi_24_64; - step[29] = (tran_low_t)fdct_round_shift(temp); - step[30] = output[30]; - step[31] = output[31]; - - range_check(step, 32, 18); - - // stage 5 - temp = step[0] * cospi_16_64 + step[1] * cospi_16_64; - output[0] = (tran_low_t)fdct_round_shift(temp); - temp = step[1] * -cospi_16_64 + step[0] * cospi_16_64; - output[1] = (tran_low_t)fdct_round_shift(temp); - temp = step[2] * cospi_24_64 + step[3] * cospi_8_64; - output[2] = (tran_low_t)fdct_round_shift(temp); - temp = step[3] * cospi_24_64 + step[2] * -cospi_8_64; - output[3] = (tran_low_t)fdct_round_shift(temp); - output[4] = step[4] + step[5]; - output[5] = step[4] - step[5]; - output[6] = step[7] - step[6]; - output[7] = step[7] + step[6]; - output[8] = step[8]; - temp = step[9] * -cospi_8_64 + step[14] * cospi_24_64; - output[9] = (tran_low_t)fdct_round_shift(temp); - temp = step[10] * -cospi_24_64 + step[13] * -cospi_8_64; - output[10] = (tran_low_t)fdct_round_shift(temp); - output[11] = step[11]; - output[12] = step[12]; - temp = step[13] * cospi_24_64 + step[10] * -cospi_8_64; - output[13] = (tran_low_t)fdct_round_shift(temp); - temp = step[14] * cospi_8_64 + step[9] * cospi_24_64; - output[14] = (tran_low_t)fdct_round_shift(temp); - output[15] = step[15]; - output[16] = step[16] + step[19]; - output[17] = step[17] + step[18]; - output[18] = step[17] - step[18]; - output[19] = step[16] - step[19]; - output[20] = step[23] - step[20]; - output[21] = step[22] - step[21]; - output[22] = step[22] + step[21]; - output[23] = step[23] + step[20]; - output[24] = step[24] + step[27]; - output[25] = step[25] + step[26]; - output[26] = step[25] - step[26]; - output[27] = step[24] - step[27]; - output[28] = step[31] - step[28]; - output[29] = step[30] - step[29]; - output[30] = step[30] + step[29]; - output[31] = step[31] + step[28]; - - range_check(output, 32, 18); - - // stage 6 - step[0] = output[0]; - step[1] = output[1]; - step[2] = output[2]; - step[3] = output[3]; - temp = output[4] * cospi_28_64 + output[7] * cospi_4_64; - step[4] = (tran_low_t)fdct_round_shift(temp); - temp = output[5] * cospi_12_64 + output[6] * cospi_20_64; - step[5] = (tran_low_t)fdct_round_shift(temp); - temp = output[6] * cospi_12_64 + output[5] * -cospi_20_64; - step[6] = (tran_low_t)fdct_round_shift(temp); - temp = output[7] * cospi_28_64 + output[4] * -cospi_4_64; - step[7] = (tran_low_t)fdct_round_shift(temp); - step[8] = output[8] + output[9]; - step[9] = output[8] - output[9]; - step[10] = output[11] - output[10]; - step[11] = output[11] + output[10]; - step[12] = output[12] + output[13]; - step[13] = output[12] - output[13]; - step[14] = output[15] - output[14]; - step[15] = output[15] + output[14]; - step[16] = output[16]; - temp = output[17] * -cospi_4_64 + output[30] * cospi_28_64; - step[17] = (tran_low_t)fdct_round_shift(temp); - temp = output[18] * -cospi_28_64 + output[29] * -cospi_4_64; - step[18] = (tran_low_t)fdct_round_shift(temp); - step[19] = output[19]; - step[20] = output[20]; - temp = output[21] * -cospi_20_64 + output[26] * cospi_12_64; - step[21] = (tran_low_t)fdct_round_shift(temp); - temp = output[22] * -cospi_12_64 + output[25] * -cospi_20_64; - step[22] = (tran_low_t)fdct_round_shift(temp); - step[23] = output[23]; - step[24] = output[24]; - temp = output[25] * cospi_12_64 + output[22] * -cospi_20_64; - step[25] = (tran_low_t)fdct_round_shift(temp); - temp = output[26] * cospi_20_64 + output[21] * cospi_12_64; - step[26] = (tran_low_t)fdct_round_shift(temp); - step[27] = output[27]; - step[28] = output[28]; - temp = output[29] * cospi_28_64 + output[18] * -cospi_4_64; - step[29] = (tran_low_t)fdct_round_shift(temp); - temp = output[30] * cospi_4_64 + output[17] * cospi_28_64; - step[30] = (tran_low_t)fdct_round_shift(temp); - step[31] = output[31]; - - range_check(step, 32, 18); - - // stage 7 - output[0] = step[0]; - output[1] = step[1]; - output[2] = step[2]; - output[3] = step[3]; - output[4] = step[4]; - output[5] = step[5]; - output[6] = step[6]; - output[7] = step[7]; - temp = step[8] * cospi_30_64 + step[15] * cospi_2_64; - output[8] = (tran_low_t)fdct_round_shift(temp); - temp = step[9] * cospi_14_64 + step[14] * cospi_18_64; - output[9] = (tran_low_t)fdct_round_shift(temp); - temp = step[10] * cospi_22_64 + step[13] * cospi_10_64; - output[10] = (tran_low_t)fdct_round_shift(temp); - temp = step[11] * cospi_6_64 + step[12] * cospi_26_64; - output[11] = (tran_low_t)fdct_round_shift(temp); - temp = step[12] * cospi_6_64 + step[11] * -cospi_26_64; - output[12] = (tran_low_t)fdct_round_shift(temp); - temp = step[13] * cospi_22_64 + step[10] * -cospi_10_64; - output[13] = (tran_low_t)fdct_round_shift(temp); - temp = step[14] * cospi_14_64 + step[9] * -cospi_18_64; - output[14] = (tran_low_t)fdct_round_shift(temp); - temp = step[15] * cospi_30_64 + step[8] * -cospi_2_64; - output[15] = (tran_low_t)fdct_round_shift(temp); - output[16] = step[16] + step[17]; - output[17] = step[16] - step[17]; - output[18] = step[19] - step[18]; - output[19] = step[19] + step[18]; - output[20] = step[20] + step[21]; - output[21] = step[20] - step[21]; - output[22] = step[23] - step[22]; - output[23] = step[23] + step[22]; - output[24] = step[24] + step[25]; - output[25] = step[24] - step[25]; - output[26] = step[27] - step[26]; - output[27] = step[27] + step[26]; - output[28] = step[28] + step[29]; - output[29] = step[28] - step[29]; - output[30] = step[31] - step[30]; - output[31] = step[31] + step[30]; - - range_check(output, 32, 18); - - // stage 8 - step[0] = output[0]; - step[1] = output[1]; - step[2] = output[2]; - step[3] = output[3]; - step[4] = output[4]; - step[5] = output[5]; - step[6] = output[6]; - step[7] = output[7]; - step[8] = output[8]; - step[9] = output[9]; - step[10] = output[10]; - step[11] = output[11]; - step[12] = output[12]; - step[13] = output[13]; - step[14] = output[14]; - step[15] = output[15]; - temp = output[16] * cospi_31_64 + output[31] * cospi_1_64; - step[16] = (tran_low_t)fdct_round_shift(temp); - temp = output[17] * cospi_15_64 + output[30] * cospi_17_64; - step[17] = (tran_low_t)fdct_round_shift(temp); - temp = output[18] * cospi_23_64 + output[29] * cospi_9_64; - step[18] = (tran_low_t)fdct_round_shift(temp); - temp = output[19] * cospi_7_64 + output[28] * cospi_25_64; - step[19] = (tran_low_t)fdct_round_shift(temp); - temp = output[20] * cospi_27_64 + output[27] * cospi_5_64; - step[20] = (tran_low_t)fdct_round_shift(temp); - temp = output[21] * cospi_11_64 + output[26] * cospi_21_64; - step[21] = (tran_low_t)fdct_round_shift(temp); - temp = output[22] * cospi_19_64 + output[25] * cospi_13_64; - step[22] = (tran_low_t)fdct_round_shift(temp); - temp = output[23] * cospi_3_64 + output[24] * cospi_29_64; - step[23] = (tran_low_t)fdct_round_shift(temp); - temp = output[24] * cospi_3_64 + output[23] * -cospi_29_64; - step[24] = (tran_low_t)fdct_round_shift(temp); - temp = output[25] * cospi_19_64 + output[22] * -cospi_13_64; - step[25] = (tran_low_t)fdct_round_shift(temp); - temp = output[26] * cospi_11_64 + output[21] * -cospi_21_64; - step[26] = (tran_low_t)fdct_round_shift(temp); - temp = output[27] * cospi_27_64 + output[20] * -cospi_5_64; - step[27] = (tran_low_t)fdct_round_shift(temp); - temp = output[28] * cospi_7_64 + output[19] * -cospi_25_64; - step[28] = (tran_low_t)fdct_round_shift(temp); - temp = output[29] * cospi_23_64 + output[18] * -cospi_9_64; - step[29] = (tran_low_t)fdct_round_shift(temp); - temp = output[30] * cospi_15_64 + output[17] * -cospi_17_64; - step[30] = (tran_low_t)fdct_round_shift(temp); - temp = output[31] * cospi_31_64 + output[16] * -cospi_1_64; - step[31] = (tran_low_t)fdct_round_shift(temp); - - range_check(step, 32, 18); - - // stage 9 - output[0] = step[0]; - output[1] = step[16]; - output[2] = step[8]; - output[3] = step[24]; - output[4] = step[4]; - output[5] = step[20]; - output[6] = step[12]; - output[7] = step[28]; - output[8] = step[2]; - output[9] = step[18]; - output[10] = step[10]; - output[11] = step[26]; - output[12] = step[6]; - output[13] = step[22]; - output[14] = step[14]; - output[15] = step[30]; - output[16] = step[1]; - output[17] = step[17]; - output[18] = step[9]; - output[19] = step[25]; - output[20] = step[5]; - output[21] = step[21]; - output[22] = step[13]; - output[23] = step[29]; - output[24] = step[3]; - output[25] = step[19]; - output[26] = step[11]; - output[27] = step[27]; - output[28] = step[7]; - output[29] = step[23]; - output[30] = step[15]; - output[31] = step[31]; - - range_check(output, 32, 18); -} -*/ - -static void fadst4(const tran_low_t *input, tran_low_t *output) { - tran_high_t x0, x1, x2, x3; - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; - - x0 = input[0]; - x1 = input[1]; - x2 = input[2]; - x3 = input[3]; - - if (!(x0 | x1 | x2 | x3)) { - output[0] = output[1] = output[2] = output[3] = 0; - return; - } - - s0 = sinpi_1_9 * x0; - s1 = sinpi_4_9 * x0; - s2 = sinpi_2_9 * x1; - s3 = sinpi_1_9 * x1; - s4 = sinpi_3_9 * x2; - s5 = sinpi_4_9 * x3; - s6 = sinpi_2_9 * x3; - s7 = x0 + x1 - x3; - - x0 = s0 + s2 + s5; - x1 = sinpi_3_9 * s7; - x2 = s1 - s3 + s6; - x3 = s4; - - s0 = x0 + x3; - s1 = x1; - s2 = x2 - x3; - s3 = x2 - x0 + x3; - - // 1-D transform scaling factor is sqrt(2). - output[0] = (tran_low_t)fdct_round_shift(s0); - output[1] = (tran_low_t)fdct_round_shift(s1); - output[2] = (tran_low_t)fdct_round_shift(s2); - output[3] = (tran_low_t)fdct_round_shift(s3); -} - -static void fadst8(const tran_low_t *input, tran_low_t *output) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; - - tran_high_t x0 = input[7]; - tran_high_t x1 = input[0]; - tran_high_t x2 = input[5]; - tran_high_t x3 = input[2]; - tran_high_t x4 = input[3]; - tran_high_t x5 = input[4]; - tran_high_t x6 = input[1]; - tran_high_t x7 = input[6]; - - // stage 1 - s0 = cospi_2_64 * x0 + cospi_30_64 * x1; - s1 = cospi_30_64 * x0 - cospi_2_64 * x1; - s2 = cospi_10_64 * x2 + cospi_22_64 * x3; - s3 = cospi_22_64 * x2 - cospi_10_64 * x3; - s4 = cospi_18_64 * x4 + cospi_14_64 * x5; - s5 = cospi_14_64 * x4 - cospi_18_64 * x5; - s6 = cospi_26_64 * x6 + cospi_6_64 * x7; - s7 = cospi_6_64 * x6 - cospi_26_64 * x7; - - x0 = fdct_round_shift(s0 + s4); - x1 = fdct_round_shift(s1 + s5); - x2 = fdct_round_shift(s2 + s6); - x3 = fdct_round_shift(s3 + s7); - x4 = fdct_round_shift(s0 - s4); - x5 = fdct_round_shift(s1 - s5); - x6 = fdct_round_shift(s2 - s6); - x7 = fdct_round_shift(s3 - s7); - - // stage 2 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = cospi_8_64 * x4 + cospi_24_64 * x5; - s5 = cospi_24_64 * x4 - cospi_8_64 * x5; - s6 = - cospi_24_64 * x6 + cospi_8_64 * x7; - s7 = cospi_8_64 * x6 + cospi_24_64 * x7; - - x0 = s0 + s2; - x1 = s1 + s3; - x2 = s0 - s2; - x3 = s1 - s3; - x4 = fdct_round_shift(s4 + s6); - x5 = fdct_round_shift(s5 + s7); - x6 = fdct_round_shift(s4 - s6); - x7 = fdct_round_shift(s5 - s7); - - // stage 3 - s2 = cospi_16_64 * (x2 + x3); - s3 = cospi_16_64 * (x2 - x3); - s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (x6 - x7); - - x2 = fdct_round_shift(s2); - x3 = fdct_round_shift(s3); - x6 = fdct_round_shift(s6); - x7 = fdct_round_shift(s7); - - output[0] = (tran_low_t)x0; - output[1] = (tran_low_t)-x4; - output[2] = (tran_low_t)x6; - output[3] = (tran_low_t)-x2; - output[4] = (tran_low_t)x3; - output[5] = (tran_low_t)-x7; - output[6] = (tran_low_t)x5; - output[7] = (tran_low_t)-x1; -} - -static void fadst16(const tran_low_t *input, tran_low_t *output) { - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8; - tran_high_t s9, s10, s11, s12, s13, s14, s15; - - tran_high_t x0 = input[15]; - tran_high_t x1 = input[0]; - tran_high_t x2 = input[13]; - tran_high_t x3 = input[2]; - tran_high_t x4 = input[11]; - tran_high_t x5 = input[4]; - tran_high_t x6 = input[9]; - tran_high_t x7 = input[6]; - tran_high_t x8 = input[7]; - tran_high_t x9 = input[8]; - tran_high_t x10 = input[5]; - tran_high_t x11 = input[10]; - tran_high_t x12 = input[3]; - tran_high_t x13 = input[12]; - tran_high_t x14 = input[1]; - tran_high_t x15 = input[14]; - - // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; - s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; - s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; - s5 = x4 * cospi_23_64 - x5 * cospi_9_64; - s6 = x6 * cospi_13_64 + x7 * cospi_19_64; - s7 = x6 * cospi_19_64 - x7 * cospi_13_64; - s8 = x8 * cospi_17_64 + x9 * cospi_15_64; - s9 = x8 * cospi_15_64 - x9 * cospi_17_64; - s10 = x10 * cospi_21_64 + x11 * cospi_11_64; - s11 = x10 * cospi_11_64 - x11 * cospi_21_64; - s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; - s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - - x0 = fdct_round_shift(s0 + s8); - x1 = fdct_round_shift(s1 + s9); - x2 = fdct_round_shift(s2 + s10); - x3 = fdct_round_shift(s3 + s11); - x4 = fdct_round_shift(s4 + s12); - x5 = fdct_round_shift(s5 + s13); - x6 = fdct_round_shift(s6 + s14); - x7 = fdct_round_shift(s7 + s15); - x8 = fdct_round_shift(s0 - s8); - x9 = fdct_round_shift(s1 - s9); - x10 = fdct_round_shift(s2 - s10); - x11 = fdct_round_shift(s3 - s11); - x12 = fdct_round_shift(s4 - s12); - x13 = fdct_round_shift(s5 - s13); - x14 = fdct_round_shift(s6 - s14); - x15 = fdct_round_shift(s7 - s15); - - // stage 2 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4; - s5 = x5; - s6 = x6; - s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; - - x0 = s0 + s4; - x1 = s1 + s5; - x2 = s2 + s6; - x3 = s3 + s7; - x4 = s0 - s4; - x5 = s1 - s5; - x6 = s2 - s6; - x7 = s3 - s7; - x8 = fdct_round_shift(s8 + s12); - x9 = fdct_round_shift(s9 + s13); - x10 = fdct_round_shift(s10 + s14); - x11 = fdct_round_shift(s11 + s15); - x12 = fdct_round_shift(s8 - s12); - x13 = fdct_round_shift(s9 - s13); - x14 = fdct_round_shift(s10 - s14); - x15 = fdct_round_shift(s11 - s15); - - // stage 3 - s0 = x0; - s1 = x1; - s2 = x2; - s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; - s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; - s8 = x8; - s9 = x9; - s10 = x10; - s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; - s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; - - x0 = s0 + s2; - x1 = s1 + s3; - x2 = s0 - s2; - x3 = s1 - s3; - x4 = fdct_round_shift(s4 + s6); - x5 = fdct_round_shift(s5 + s7); - x6 = fdct_round_shift(s4 - s6); - x7 = fdct_round_shift(s5 - s7); - x8 = s8 + s10; - x9 = s9 + s11; - x10 = s8 - s10; - x11 = s9 - s11; - x12 = fdct_round_shift(s12 + s14); - x13 = fdct_round_shift(s13 + s15); - x14 = fdct_round_shift(s12 - s14); - x15 = fdct_round_shift(s13 - s15); - - // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); - s3 = cospi_16_64 * (x2 - x3); - s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (- x6 + x7); - s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (- x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); - s15 = cospi_16_64 * (x14 - x15); - - x2 = fdct_round_shift(s2); - x3 = fdct_round_shift(s3); - x6 = fdct_round_shift(s6); - x7 = fdct_round_shift(s7); - x10 = fdct_round_shift(s10); - x11 = fdct_round_shift(s11); - x14 = fdct_round_shift(s14); - x15 = fdct_round_shift(s15); - - output[0] = (tran_low_t)x0; - output[1] = (tran_low_t)-x8; - output[2] = (tran_low_t)x12; - output[3] = (tran_low_t)-x4; - output[4] = (tran_low_t)x6; - output[5] = (tran_low_t)x14; - output[6] = (tran_low_t)x10; - output[7] = (tran_low_t)x2; - output[8] = (tran_low_t)x3; - output[9] = (tran_low_t)x11; - output[10] = (tran_low_t)x15; - output[11] = (tran_low_t)x7; - output[12] = (tran_low_t)x5; - output[13] = (tran_low_t)-x13; - output[14] = (tran_low_t)x9; - output[15] = (tran_low_t)-x1; -} - -static const transform_2d FHT_4[] = { - { fdct4, fdct4 }, // DCT_DCT = 0 - { fadst4, fdct4 }, // ADST_DCT = 1 - { fdct4, fadst4 }, // DCT_ADST = 2 - { fadst4, fadst4 } // ADST_ADST = 3 -}; - -static const transform_2d FHT_8[] = { - { fdct8, fdct8 }, // DCT_DCT = 0 - { fadst8, fdct8 }, // ADST_DCT = 1 - { fdct8, fadst8 }, // DCT_ADST = 2 - { fadst8, fadst8 } // ADST_ADST = 3 -}; - -static const transform_2d FHT_16[] = { - { fdct16, fdct16 }, // DCT_DCT = 0 - { fadst16, fdct16 }, // ADST_DCT = 1 - { fdct16, fadst16 }, // DCT_ADST = 2 - { fadst16, fadst16 } // ADST_ADST = 3 -}; - -void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - if (tx_type == DCT_DCT) { - vpx_fdct4x4_c(input, output, stride); - } else { - tran_low_t out[4 * 4]; - int i, j; - tran_low_t temp_in[4], temp_out[4]; - const transform_2d ht = FHT_4[tx_type]; - - // Columns - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = input[j * stride + i] * 16; - if (i == 0 && temp_in[0]) - temp_in[0] += 1; - ht.cols(temp_in, temp_out); - for (j = 0; j < 4; ++j) - out[j * 4 + i] = temp_out[j]; - } - - // Rows - for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j + i * 4]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 4; ++j) - output[j + i * 4] = (temp_out[j] + 1) >> 2; - } - } -} - -void vp10_fdct8x8_quant_c(const int16_t *input, int stride, - tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - int eob = -1; - - int i, j; - tran_low_t intermediate[64]; - - // Transform columns - { - tran_low_t *output = intermediate; - tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16 - tran_high_t t0, t1, t2, t3; // needs32 - tran_high_t x0, x1, x2, x3; // canbe16 - - int i; - for (i = 0; i < 8; i++) { - // stage 1 - s0 = (input[0 * stride] + input[7 * stride]) * 4; - s1 = (input[1 * stride] + input[6 * stride]) * 4; - s2 = (input[2 * stride] + input[5 * stride]) * 4; - s3 = (input[3 * stride] + input[4 * stride]) * 4; - s4 = (input[3 * stride] - input[4 * stride]) * 4; - s5 = (input[2 * stride] - input[5 * stride]) * 4; - s6 = (input[1 * stride] - input[6 * stride]) * 4; - s7 = (input[0 * stride] - input[7 * stride]) * 4; - - // fdct4(step, step); - x0 = s0 + s3; - x1 = s1 + s2; - x2 = s1 - s2; - x3 = s0 - s3; - t0 = (x0 + x1) * cospi_16_64; - t1 = (x0 - x1) * cospi_16_64; - t2 = x2 * cospi_24_64 + x3 * cospi_8_64; - t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; - output[0 * 8] = (tran_low_t)fdct_round_shift(t0); - output[2 * 8] = (tran_low_t)fdct_round_shift(t2); - output[4 * 8] = (tran_low_t)fdct_round_shift(t1); - output[6 * 8] = (tran_low_t)fdct_round_shift(t3); - - // stage 2 - t0 = (s6 - s5) * cospi_16_64; - t1 = (s6 + s5) * cospi_16_64; - t2 = fdct_round_shift(t0); - t3 = fdct_round_shift(t1); - - // stage 3 - x0 = s4 + t2; - x1 = s4 - t2; - x2 = s7 - t3; - x3 = s7 + t3; - - // stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; - t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; - output[1 * 8] = (tran_low_t)fdct_round_shift(t0); - output[3 * 8] = (tran_low_t)fdct_round_shift(t2); - output[5 * 8] = (tran_low_t)fdct_round_shift(t1); - output[7 * 8] = (tran_low_t)fdct_round_shift(t3); - input++; - output++; - } - } - - // Rows - for (i = 0; i < 8; ++i) { - fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]); - for (j = 0; j < 8; ++j) - coeff_ptr[j + i * 8] /= 2; - } - - // TODO(jingning) Decide the need of these arguments after the - // quantization process is completed. - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)iscan; - - memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); - memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr)); - - if (!skip_block) { - // Quantization pass: All coefficients with index >= zero_flag are - // skippable. Note: zero_flag can be zero. - for (i = 0; i < n_coeffs; i++) { - const int rc = scan[i]; - const int coeff = coeff_ptr[rc]; - const int coeff_sign = (coeff >> 31); - const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - - int tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX); - tmp = (tmp * quant_ptr[rc != 0]) >> 16; - - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; - dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - - if (tmp) - eob = i; - } - } - *eob_ptr = eob + 1; -} - -void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - if (tx_type == DCT_DCT) { - vpx_fdct8x8_c(input, output, stride); - } else { - tran_low_t out[64]; - int i, j; - tran_low_t temp_in[8], temp_out[8]; - const transform_2d ht = FHT_8[tx_type]; - - // Columns - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = input[j * stride + i] * 4; - ht.cols(temp_in, temp_out); - for (j = 0; j < 8; ++j) - out[j * 8 + i] = temp_out[j]; - } - - // Rows - for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j + i * 8]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 8; ++j) - output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1; - } - } -} - -/* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per - pixel. */ -void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) { - int i; - tran_high_t a1, b1, c1, d1, e1; - const int16_t *ip_pass0 = input; - const tran_low_t *ip = NULL; - tran_low_t *op = output; - - for (i = 0; i < 4; i++) { - a1 = ip_pass0[0 * stride]; - b1 = ip_pass0[1 * stride]; - c1 = ip_pass0[2 * stride]; - d1 = ip_pass0[3 * stride]; - - a1 += b1; - d1 = d1 - c1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= c1; - d1 += b1; - op[0] = (tran_low_t)a1; - op[4] = (tran_low_t)c1; - op[8] = (tran_low_t)d1; - op[12] = (tran_low_t)b1; - - ip_pass0++; - op++; - } - ip = output; - op = output; - - for (i = 0; i < 4; i++) { - a1 = ip[0]; - b1 = ip[1]; - c1 = ip[2]; - d1 = ip[3]; - - a1 += b1; - d1 -= c1; - e1 = (a1 - d1) >> 1; - b1 = e1 - b1; - c1 = e1 - c1; - a1 -= c1; - d1 += b1; - op[0] = (tran_low_t)(a1 * UNIT_QUANT_FACTOR); - op[1] = (tran_low_t)(c1 * UNIT_QUANT_FACTOR); - op[2] = (tran_low_t)(d1 * UNIT_QUANT_FACTOR); - op[3] = (tran_low_t)(b1 * UNIT_QUANT_FACTOR); - - ip += 4; - op += 4; - } -} - -void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - if (tx_type == DCT_DCT) { - vpx_fdct16x16_c(input, output, stride); - } else { - tran_low_t out[256]; - int i, j; - tran_low_t temp_in[16], temp_out[16]; - const transform_2d ht = FHT_16[tx_type]; - - // Columns - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = input[j * stride + i] * 4; - ht.cols(temp_in, temp_out); - for (j = 0; j < 16; ++j) - out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; - } - - // Rows - for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j + i * 16]; - ht.rows(temp_in, temp_out); - for (j = 0; j < 16; ++j) - output[j + i * 16] = temp_out[j]; - } - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - vp10_fht4x4_c(input, output, stride, tx_type); -} - -void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - vp10_fht8x8_c(input, output, stride, tx_type); -} - -void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output, - int stride) { - vp10_fwht4x4_c(input, output, stride); -} - -void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - vp10_fht16x16_c(input, output, stride, tx_type); -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/encoder/denoiser.c b/libs/libvpx/vp10/encoder/denoiser.c deleted file mode 100644 index e5d8157a4a..0000000000 --- a/libs/libvpx/vp10/encoder/denoiser.c +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include "./vpx_dsp_rtcd.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_scale/yv12config.h" -#include "vpx/vpx_integer.h" -#include "vp10/common/reconinter.h" -#include "vp10/encoder/context_tree.h" -#include "vp10/encoder/denoiser.h" - -/* The VP9 denoiser is a work-in-progress. It currently is only designed to work - * with speed 6, though it (inexplicably) seems to also work with speed 5 (one - * would need to modify the source code in vp10_pickmode.c and vp10_encoder.c to - * make the calls to the vp10_denoiser_* functions when in speed 5). - * - * The implementation is very similar to that of the VP8 denoiser. While - * choosing the motion vectors / reference frames, the denoiser is run, and if - * it did not modify the signal to much, the denoised block is copied to the - * signal. - */ - -#ifdef OUTPUT_YUV_DENOISED -static void make_grayscale(YV12_BUFFER_CONFIG *yuv); -#endif - -static int absdiff_thresh(BLOCK_SIZE bs, int increase_denoising) { - (void)bs; - return 3 + (increase_denoising ? 1 : 0); -} - -static int delta_thresh(BLOCK_SIZE bs, int increase_denoising) { - (void)bs; - (void)increase_denoising; - return 4; -} - -static int noise_motion_thresh(BLOCK_SIZE bs, int increase_denoising) { - (void)bs; - (void)increase_denoising; - return 625; -} - -static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) { - return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 60 : 40); -} - -static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising, - int motion_magnitude) { - if (motion_magnitude > - noise_motion_thresh(bs, increase_denoising)) { - return 0; - } else { - return (1 << num_pels_log2_lookup[bs]) * 20; - } -} - -int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising) { - return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2); -} - -static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) { - return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2); -} - -// TODO(jackychen): If increase_denoising is enabled in the future, -// we might need to update the code for calculating 'total_adj' in -// case the C code is not bit-exact with corresponding sse2 code. -int vp10_denoiser_filter_c(const uint8_t *sig, int sig_stride, - const uint8_t *mc_avg, - int mc_avg_stride, - uint8_t *avg, int avg_stride, - int increase_denoising, - BLOCK_SIZE bs, - int motion_magnitude) { - int r, c; - const uint8_t *sig_start = sig; - const uint8_t *mc_avg_start = mc_avg; - uint8_t *avg_start = avg; - int diff, adj, absdiff, delta; - int adj_val[] = {3, 4, 6}; - int total_adj = 0; - int shift_inc = 1; - - // If motion_magnitude is small, making the denoiser more aggressive by - // increasing the adjustment for each level. Add another increment for - // blocks that are labeled for increase denoising. - if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) { - if (increase_denoising) { - shift_inc = 2; - } - adj_val[0] += shift_inc; - adj_val[1] += shift_inc; - adj_val[2] += shift_inc; - } - - // First attempt to apply a strong temporal denoising filter. - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) { - diff = mc_avg[c] - sig[c]; - absdiff = abs(diff); - - if (absdiff <= absdiff_thresh(bs, increase_denoising)) { - avg[c] = mc_avg[c]; - total_adj += diff; - } else { - switch (absdiff) { - case 4: case 5: case 6: case 7: - adj = adj_val[0]; - break; - case 8: case 9: case 10: case 11: - case 12: case 13: case 14: case 15: - adj = adj_val[1]; - break; - default: - adj = adj_val[2]; - } - if (diff > 0) { - avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj); - total_adj += adj; - } else { - avg[c] = VPXMAX(0, sig[c] - adj); - total_adj -= adj; - } - } - } - sig += sig_stride; - avg += avg_stride; - mc_avg += mc_avg_stride; - } - - // If the strong filter did not modify the signal too much, we're all set. - if (abs(total_adj) <= total_adj_strong_thresh(bs, increase_denoising)) { - return FILTER_BLOCK; - } - - // Otherwise, we try to dampen the filter if the delta is not too high. - delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) - >> num_pels_log2_lookup[bs]) + 1; - - if (delta >= delta_thresh(bs, increase_denoising)) { - return COPY_BLOCK; - } - - mc_avg = mc_avg_start; - avg = avg_start; - sig = sig_start; - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) { - diff = mc_avg[c] - sig[c]; - adj = abs(diff); - if (adj > delta) { - adj = delta; - } - if (diff > 0) { - // Diff positive means we made positive adjustment above - // (in first try/attempt), so now make negative adjustment to bring - // denoised signal down. - avg[c] = VPXMAX(0, avg[c] - adj); - total_adj -= adj; - } else { - // Diff negative means we made negative adjustment above - // (in first try/attempt), so now make positive adjustment to bring - // denoised signal up. - avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj); - total_adj += adj; - } - } - sig += sig_stride; - avg += avg_stride; - mc_avg += mc_avg_stride; - } - - // We can use the filter if it has been sufficiently dampened - if (abs(total_adj) <= total_adj_weak_thresh(bs, increase_denoising)) { - return FILTER_BLOCK; - } - return COPY_BLOCK; -} - -static uint8_t *block_start(uint8_t *framebuf, int stride, - int mi_row, int mi_col) { - return framebuf + (stride * mi_row * 8) + (mi_col * 8); -} - -static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, - MACROBLOCK *mb, - BLOCK_SIZE bs, - int increase_denoising, - int mi_row, - int mi_col, - PICK_MODE_CONTEXT *ctx, - int *motion_magnitude - ) { - int mv_col, mv_row; - int sse_diff = ctx->zeromv_sse - ctx->newmv_sse; - MV_REFERENCE_FRAME frame; - MACROBLOCKD *filter_mbd = &mb->e_mbd; - MB_MODE_INFO *mbmi = &filter_mbd->mi[0]->mbmi; - MB_MODE_INFO saved_mbmi; - int i, j; - struct buf_2d saved_dst[MAX_MB_PLANE]; - struct buf_2d saved_pre[MAX_MB_PLANE][2]; // 2 pre buffers - - mv_col = ctx->best_sse_mv.as_mv.col; - mv_row = ctx->best_sse_mv.as_mv.row; - *motion_magnitude = mv_row * mv_row + mv_col * mv_col; - frame = ctx->best_reference_frame; - - saved_mbmi = *mbmi; - - // If the best reference frame uses inter-prediction and there is enough of a - // difference in sum-squared-error, use it. - if (frame != INTRA_FRAME && - sse_diff > sse_diff_thresh(bs, increase_denoising, *motion_magnitude)) { - mbmi->ref_frame[0] = ctx->best_reference_frame; - mbmi->mode = ctx->best_sse_inter_mode; - mbmi->mv[0] = ctx->best_sse_mv; - } else { - // Otherwise, use the zero reference frame. - frame = ctx->best_zeromv_reference_frame; - - mbmi->ref_frame[0] = ctx->best_zeromv_reference_frame; - mbmi->mode = ZEROMV; - mbmi->mv[0].as_int = 0; - - ctx->best_sse_inter_mode = ZEROMV; - ctx->best_sse_mv.as_int = 0; - ctx->newmv_sse = ctx->zeromv_sse; - } - - if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) { - // Restore everything to its original state - *mbmi = saved_mbmi; - return COPY_BLOCK; - } - if (*motion_magnitude > - (noise_motion_thresh(bs, increase_denoising) << 3)) { - // Restore everything to its original state - *mbmi = saved_mbmi; - return COPY_BLOCK; - } - - // We will restore these after motion compensation. - for (i = 0; i < MAX_MB_PLANE; ++i) { - for (j = 0; j < 2; ++j) { - saved_pre[i][j] = filter_mbd->plane[i].pre[j]; - } - saved_dst[i] = filter_mbd->plane[i].dst; - } - - // Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser - // struct. - for (j = 0; j < 2; ++j) { - filter_mbd->plane[0].pre[j].buf = - block_start(denoiser->running_avg_y[frame].y_buffer, - denoiser->running_avg_y[frame].y_stride, - mi_row, mi_col); - filter_mbd->plane[0].pre[j].stride = - denoiser->running_avg_y[frame].y_stride; - filter_mbd->plane[1].pre[j].buf = - block_start(denoiser->running_avg_y[frame].u_buffer, - denoiser->running_avg_y[frame].uv_stride, - mi_row, mi_col); - filter_mbd->plane[1].pre[j].stride = - denoiser->running_avg_y[frame].uv_stride; - filter_mbd->plane[2].pre[j].buf = - block_start(denoiser->running_avg_y[frame].v_buffer, - denoiser->running_avg_y[frame].uv_stride, - mi_row, mi_col); - filter_mbd->plane[2].pre[j].stride = - denoiser->running_avg_y[frame].uv_stride; - } - filter_mbd->plane[0].dst.buf = - block_start(denoiser->mc_running_avg_y.y_buffer, - denoiser->mc_running_avg_y.y_stride, - mi_row, mi_col); - filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride; - filter_mbd->plane[1].dst.buf = - block_start(denoiser->mc_running_avg_y.u_buffer, - denoiser->mc_running_avg_y.uv_stride, - mi_row, mi_col); - filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride; - filter_mbd->plane[2].dst.buf = - block_start(denoiser->mc_running_avg_y.v_buffer, - denoiser->mc_running_avg_y.uv_stride, - mi_row, mi_col); - filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride; - - vp10_build_inter_predictors_sby(filter_mbd, mv_row, mv_col, bs); - - // Restore everything to its original state - *mbmi = saved_mbmi; - for (i = 0; i < MAX_MB_PLANE; ++i) { - for (j = 0; j < 2; ++j) { - filter_mbd->plane[i].pre[j] = saved_pre[i][j]; - } - filter_mbd->plane[i].dst = saved_dst[i]; - } - - mv_row = ctx->best_sse_mv.as_mv.row; - mv_col = ctx->best_sse_mv.as_mv.col; - - return FILTER_BLOCK; -} - -void vp10_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb, - int mi_row, int mi_col, BLOCK_SIZE bs, - PICK_MODE_CONTEXT *ctx) { - int motion_magnitude = 0; - VP9_DENOISER_DECISION decision = FILTER_BLOCK; - YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME]; - YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y; - uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col); - uint8_t *mc_avg_start = block_start(mc_avg.y_buffer, mc_avg.y_stride, - mi_row, mi_col); - struct buf_2d src = mb->plane[0].src; - - decision = perform_motion_compensation(denoiser, mb, bs, - denoiser->increase_denoising, - mi_row, mi_col, ctx, - &motion_magnitude); - - if (decision == FILTER_BLOCK) { - decision = vp10_denoiser_filter(src.buf, src.stride, - mc_avg_start, mc_avg.y_stride, - avg_start, avg.y_stride, - 0, bs, motion_magnitude); - } - - if (decision == FILTER_BLOCK) { - vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, - NULL, 0, NULL, 0, - num_4x4_blocks_wide_lookup[bs] << 2, - num_4x4_blocks_high_lookup[bs] << 2); - } else { // COPY_BLOCK - vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, - NULL, 0, NULL, 0, - num_4x4_blocks_wide_lookup[bs] << 2, - num_4x4_blocks_high_lookup[bs] << 2); - } -} - -static void copy_frame(YV12_BUFFER_CONFIG dest, const YV12_BUFFER_CONFIG src) { - int r; - const uint8_t *srcbuf = src.y_buffer; - uint8_t *destbuf = dest.y_buffer; - - assert(dest.y_width == src.y_width); - assert(dest.y_height == src.y_height); - - for (r = 0; r < dest.y_height; ++r) { - memcpy(destbuf, srcbuf, dest.y_width); - destbuf += dest.y_stride; - srcbuf += src.y_stride; - } -} - -static void swap_frame_buffer(YV12_BUFFER_CONFIG *dest, - YV12_BUFFER_CONFIG *src) { - uint8_t *tmp_buf = dest->y_buffer; - assert(dest->y_width == src->y_width); - assert(dest->y_height == src->y_height); - dest->y_buffer = src->y_buffer; - src->y_buffer = tmp_buf; -} - -void vp10_denoiser_update_frame_info(VP9_DENOISER *denoiser, - YV12_BUFFER_CONFIG src, - FRAME_TYPE frame_type, - int refresh_alt_ref_frame, - int refresh_golden_frame, - int refresh_last_frame) { - if (frame_type == KEY_FRAME) { - int i; - // Start at 1 so as not to overwrite the INTRA_FRAME - for (i = 1; i < MAX_REF_FRAMES; ++i) - copy_frame(denoiser->running_avg_y[i], src); - return; - } - - /* For non key frames */ - if (refresh_alt_ref_frame) { - swap_frame_buffer(&denoiser->running_avg_y[ALTREF_FRAME], - &denoiser->running_avg_y[INTRA_FRAME]); - } - if (refresh_golden_frame) { - swap_frame_buffer(&denoiser->running_avg_y[GOLDEN_FRAME], - &denoiser->running_avg_y[INTRA_FRAME]); - } - if (refresh_last_frame) { - swap_frame_buffer(&denoiser->running_avg_y[LAST_FRAME], - &denoiser->running_avg_y[INTRA_FRAME]); - } -} - -void vp10_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx) { - ctx->zeromv_sse = UINT_MAX; - ctx->newmv_sse = UINT_MAX; -} - -void vp10_denoiser_update_frame_stats(MB_MODE_INFO *mbmi, unsigned int sse, - PREDICTION_MODE mode, - PICK_MODE_CONTEXT *ctx) { - // TODO(tkopp): Use both MVs if possible - if (mbmi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) { - ctx->zeromv_sse = sse; - ctx->best_zeromv_reference_frame = mbmi->ref_frame[0]; - } - - if (mbmi->mv[0].as_int != 0 && sse < ctx->newmv_sse) { - ctx->newmv_sse = sse; - ctx->best_sse_inter_mode = mode; - ctx->best_sse_mv = mbmi->mv[0]; - ctx->best_reference_frame = mbmi->ref_frame[0]; - } -} - -int vp10_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, - int ssx, int ssy, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - int border) { - int i, fail; - const int legacy_byte_alignment = 0; - assert(denoiser != NULL); - - for (i = 0; i < MAX_REF_FRAMES; ++i) { - fail = vpx_alloc_frame_buffer(&denoiser->running_avg_y[i], width, height, - ssx, ssy, -#if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, -#endif - border, legacy_byte_alignment); - if (fail) { - vp10_denoiser_free(denoiser); - return 1; - } -#ifdef OUTPUT_YUV_DENOISED - make_grayscale(&denoiser->running_avg_y[i]); -#endif - } - - fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, - ssx, ssy, -#if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, -#endif - border, legacy_byte_alignment); - if (fail) { - vp10_denoiser_free(denoiser); - return 1; - } -#ifdef OUTPUT_YUV_DENOISED - make_grayscale(&denoiser->running_avg_y[i]); -#endif - denoiser->increase_denoising = 0; - denoiser->frame_buffer_initialized = 1; - - return 0; -} - -void vp10_denoiser_free(VP9_DENOISER *denoiser) { - int i; - denoiser->frame_buffer_initialized = 0; - if (denoiser == NULL) { - return; - } - for (i = 0; i < MAX_REF_FRAMES; ++i) { - vpx_free_frame_buffer(&denoiser->running_avg_y[i]); - } - vpx_free_frame_buffer(&denoiser->mc_running_avg_y); -} - -#ifdef OUTPUT_YUV_DENOISED -static void make_grayscale(YV12_BUFFER_CONFIG *yuv) { - int r, c; - uint8_t *u = yuv->u_buffer; - uint8_t *v = yuv->v_buffer; - - for (r = 0; r < yuv->uv_height; ++r) { - for (c = 0; c < yuv->uv_width; ++c) { - u[c] = UINT8_MAX / 2; - v[c] = UINT8_MAX / 2; - } - u += yuv->uv_stride; - v += yuv->uv_stride; - } -} -#endif diff --git a/libs/libvpx/vp10/encoder/denoiser.h b/libs/libvpx/vp10/encoder/denoiser.h deleted file mode 100644 index e543fb05fa..0000000000 --- a/libs/libvpx/vp10/encoder/denoiser.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_ENCODER_DENOISER_H_ -#define VP9_ENCODER_DENOISER_H_ - -#include "vp10/encoder/block.h" -#include "vpx_scale/yv12config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MOTION_MAGNITUDE_THRESHOLD (8 * 3) - -typedef enum vp10_denoiser_decision { - COPY_BLOCK, - FILTER_BLOCK -} VP9_DENOISER_DECISION; - -typedef struct vp10_denoiser { - YV12_BUFFER_CONFIG running_avg_y[MAX_REF_FRAMES]; - YV12_BUFFER_CONFIG mc_running_avg_y; - int increase_denoising; - int frame_buffer_initialized; -} VP9_DENOISER; - -void vp10_denoiser_update_frame_info(VP9_DENOISER *denoiser, - YV12_BUFFER_CONFIG src, - FRAME_TYPE frame_type, - int refresh_alt_ref_frame, - int refresh_golden_frame, - int refresh_last_frame); - -void vp10_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb, - int mi_row, int mi_col, BLOCK_SIZE bs, - PICK_MODE_CONTEXT *ctx); - -void vp10_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx); - -void vp10_denoiser_update_frame_stats(MB_MODE_INFO *mbmi, - unsigned int sse, PREDICTION_MODE mode, - PICK_MODE_CONTEXT *ctx); - -int vp10_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, - int ssx, int ssy, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - int border); - -#if CONFIG_VP9_TEMPORAL_DENOISING -int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising); -#endif - -void vp10_denoiser_free(VP9_DENOISER *denoiser); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP9_ENCODER_DENOISER_H_ diff --git a/libs/libvpx/vp10/encoder/encodeframe.c b/libs/libvpx/vp10/encoder/encodeframe.c deleted file mode 100644 index 26ce5a1ebe..0000000000 --- a/libs/libvpx/vp10/encoder/encodeframe.c +++ /dev/null @@ -1,3039 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/vpx_timer.h" -#include "vpx_ports/system_state.h" - -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/idct.h" -#include "vp10/common/mvref_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/seg_common.h" -#include "vp10/common/tile_common.h" - -#include "vp10/encoder/aq_complexity.h" -#include "vp10/encoder/aq_cyclicrefresh.h" -#include "vp10/encoder/aq_variance.h" -#include "vp10/encoder/encodeframe.h" -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/ethread.h" -#include "vp10/encoder/extend.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/rdopt.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/tokenize.h" - -static void encode_superblock(VP10_COMP *cpi, ThreadData * td, - TOKENEXTRA **t, int output_enabled, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx); - -// This is used as a reference when computing the source variance for the -// purposes of activity masking. -// Eventually this should be replaced by custom no-reference routines, -// which will be faster. -static const uint8_t VP9_VAR_OFFS[64] = { - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128 -}; - -#if CONFIG_VP9_HIGHBITDEPTH -static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = { - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128 -}; - -static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = { - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4 -}; - -static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = { - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16 -}; -#endif // CONFIG_VP9_HIGHBITDEPTH - -unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi, - const struct buf_2d *ref, - BLOCK_SIZE bs) { - unsigned int sse; - const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - VP9_VAR_OFFS, 0, &sse); - return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); -} - -#if CONFIG_VP9_HIGHBITDEPTH -unsigned int vp10_high_get_sby_perpixel_variance( - VP10_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) { - unsigned int var, sse; - switch (bd) { - case 10: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), - 0, &sse); - break; - case 12: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), - 0, &sse); - break; - case 8: - default: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), - 0, &sse); - break; - } - return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi, - const struct buf_2d *ref, - int mi_row, int mi_col, - BLOCK_SIZE bs) { - unsigned int sse, var; - uint8_t *last_y; - const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME); - - assert(last != NULL); - last_y = - &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE]; - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse); - return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); -} - -static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi, - MACROBLOCK *x, - int mi_row, - int mi_col) { - unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, - mi_row, mi_col, - BLOCK_64X64); - if (var < 8) - return BLOCK_64X64; - else if (var < 128) - return BLOCK_32X32; - else if (var < 2048) - return BLOCK_16X16; - else - return BLOCK_8X8; -} - -// Lighter version of set_offsets that only sets the mode info -// pointers. -static INLINE void set_mode_info_offsets(VP10_COMP *const cpi, - MACROBLOCK *const x, - MACROBLOCKD *const xd, - int mi_row, - int mi_col) { - VP10_COMMON *const cm = &cpi->common; - const int idx_str = xd->mi_stride * mi_row + mi_col; - xd->mi = cm->mi_grid_visible + idx_str; - xd->mi[0] = cm->mi + idx_str; - x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col); -} - -static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile, - MACROBLOCK *const x, int mi_row, int mi_col, - BLOCK_SIZE bsize) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mbmi; - const int mi_width = num_8x8_blocks_wide_lookup[bsize]; - const int mi_height = num_8x8_blocks_high_lookup[bsize]; - const struct segmentation *const seg = &cm->seg; - - set_skip_context(xd, mi_row, mi_col); - - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col); - - mbmi = &xd->mi[0]->mbmi; - - // Set up destination pointers. - vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col); - - // Set up limit values for MV components. - // Mv beyond the range do not produce new/different prediction block. - x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND); - x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND); - x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND; - x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND; - - // Set up distance of MB to edge of frame in 1/8th pel units. - assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1))); - set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, - cm->mi_rows, cm->mi_cols); - - // Set up source buffers. - vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col); - - // R/D setup. - x->rddiv = cpi->rd.RDDIV; - x->rdmult = cpi->rd.RDMULT; - - // Setup segment ID. - if (seg->enabled) { - if (cpi->oxcf.aq_mode != VARIANCE_AQ) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); - } - vp10_init_plane_quantizers(cpi, x); - - x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id]; - } else { - mbmi->segment_id = 0; - x->encode_breakout = cpi->encode_breakout; - } - - // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs() - xd->tile = *tile; -} - -static void set_block_size(VP10_COMP * const cpi, - MACROBLOCK *const x, - MACROBLOCKD *const xd, - int mi_row, int mi_col, - BLOCK_SIZE bsize) { - if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) { - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col); - xd->mi[0]->mbmi.sb_type = bsize; - } -} - -typedef struct { - int64_t sum_square_error; - int64_t sum_error; - int log2_count; - int variance; -} var; - -typedef struct { - var none; - var horz[2]; - var vert[2]; -} partition_variance; - -typedef struct { - partition_variance part_variances; - var split[4]; -} v4x4; - -typedef struct { - partition_variance part_variances; - v4x4 split[4]; -} v8x8; - -typedef struct { - partition_variance part_variances; - v8x8 split[4]; -} v16x16; - -typedef struct { - partition_variance part_variances; - v16x16 split[4]; -} v32x32; - -typedef struct { - partition_variance part_variances; - v32x32 split[4]; -} v64x64; - -typedef struct { - partition_variance *part_variances; - var *split[4]; -} variance_node; - -typedef enum { - V16X16, - V32X32, - V64X64, -} TREE_LEVEL; - -static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) { - int i; - node->part_variances = NULL; - switch (bsize) { - case BLOCK_64X64: { - v64x64 *vt = (v64x64 *) data; - node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i].part_variances.none; - break; - } - case BLOCK_32X32: { - v32x32 *vt = (v32x32 *) data; - node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i].part_variances.none; - break; - } - case BLOCK_16X16: { - v16x16 *vt = (v16x16 *) data; - node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i].part_variances.none; - break; - } - case BLOCK_8X8: { - v8x8 *vt = (v8x8 *) data; - node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i].part_variances.none; - break; - } - case BLOCK_4X4: { - v4x4 *vt = (v4x4 *) data; - node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i]; - break; - } - default: { - assert(0); - break; - } - } -} - -// Set variance values given sum square error, sum error, count. -static void fill_variance(int64_t s2, int64_t s, int c, var *v) { - v->sum_square_error = s2; - v->sum_error = s; - v->log2_count = c; -} - -static void get_variance(var *v) { - v->variance = (int)(256 * (v->sum_square_error - - ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count); -} - -static void sum_2_variances(const var *a, const var *b, var *r) { - assert(a->log2_count == b->log2_count); - fill_variance(a->sum_square_error + b->sum_square_error, - a->sum_error + b->sum_error, a->log2_count + 1, r); -} - -static void fill_variance_tree(void *data, BLOCK_SIZE bsize) { - variance_node node; - memset(&node, 0, sizeof(node)); - tree_to_node(data, bsize, &node); - sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]); - sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]); - sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]); - sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]); - sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1], - &node.part_variances->none); -} - -static int set_vt_partitioning(VP10_COMP *cpi, - MACROBLOCK *const x, - MACROBLOCKD *const xd, - void *data, - BLOCK_SIZE bsize, - int mi_row, - int mi_col, - int64_t threshold, - BLOCK_SIZE bsize_min, - int force_split) { - VP10_COMMON * const cm = &cpi->common; - variance_node vt; - const int block_width = num_8x8_blocks_wide_lookup[bsize]; - const int block_height = num_8x8_blocks_high_lookup[bsize]; - const int low_res = (cm->width <= 352 && cm->height <= 288); - - assert(block_height == block_width); - tree_to_node(data, bsize, &vt); - - if (force_split == 1) - return 0; - - // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if - // variance is below threshold, otherwise split will be selected. - // No check for vert/horiz split as too few samples for variance. - if (bsize == bsize_min) { - // Variance already computed to set the force_split. - if (low_res || cm->frame_type == KEY_FRAME) - get_variance(&vt.part_variances->none); - if (mi_col + block_width / 2 < cm->mi_cols && - mi_row + block_height / 2 < cm->mi_rows && - vt.part_variances->none.variance < threshold) { - set_block_size(cpi, x, xd, mi_row, mi_col, bsize); - return 1; - } - return 0; - } else if (bsize > bsize_min) { - // Variance already computed to set the force_split. - if (low_res || cm->frame_type == KEY_FRAME) - get_variance(&vt.part_variances->none); - // For key frame: take split for bsize above 32X32 or very high variance. - if (cm->frame_type == KEY_FRAME && - (bsize > BLOCK_32X32 || - vt.part_variances->none.variance > (threshold << 4))) { - return 0; - } - // If variance is low, take the bsize (no split). - if (mi_col + block_width / 2 < cm->mi_cols && - mi_row + block_height / 2 < cm->mi_rows && - vt.part_variances->none.variance < threshold) { - set_block_size(cpi, x, xd, mi_row, mi_col, bsize); - return 1; - } - - // Check vertical split. - if (mi_row + block_height / 2 < cm->mi_rows) { - BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT); - get_variance(&vt.part_variances->vert[0]); - get_variance(&vt.part_variances->vert[1]); - if (vt.part_variances->vert[0].variance < threshold && - vt.part_variances->vert[1].variance < threshold && - get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) { - set_block_size(cpi, x, xd, mi_row, mi_col, subsize); - set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize); - return 1; - } - } - // Check horizontal split. - if (mi_col + block_width / 2 < cm->mi_cols) { - BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ); - get_variance(&vt.part_variances->horz[0]); - get_variance(&vt.part_variances->horz[1]); - if (vt.part_variances->horz[0].variance < threshold && - vt.part_variances->horz[1].variance < threshold && - get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) { - set_block_size(cpi, x, xd, mi_row, mi_col, subsize); - set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize); - return 1; - } - } - - return 0; - } - return 0; -} - -// Set the variance split thresholds for following the block sizes: -// 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16, -// 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is -// currently only used on key frame. -static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) { - VP10_COMMON *const cm = &cpi->common; - const int is_key_frame = (cm->frame_type == KEY_FRAME); - const int threshold_multiplier = is_key_frame ? 20 : 1; - const int64_t threshold_base = (int64_t)(threshold_multiplier * - cpi->y_dequant[q][1]); - if (is_key_frame) { - thresholds[0] = threshold_base; - thresholds[1] = threshold_base >> 2; - thresholds[2] = threshold_base >> 2; - thresholds[3] = threshold_base << 2; - } else { - thresholds[1] = threshold_base; - if (cm->width <= 352 && cm->height <= 288) { - thresholds[0] = threshold_base >> 2; - thresholds[2] = threshold_base << 3; - } else { - thresholds[0] = threshold_base; - thresholds[1] = (5 * threshold_base) >> 2; - if (cm->width >= 1920 && cm->height >= 1080) - thresholds[1] = (7 * threshold_base) >> 2; - thresholds[2] = threshold_base << cpi->oxcf.speed; - } - } -} - -void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) { - VP10_COMMON *const cm = &cpi->common; - SPEED_FEATURES *const sf = &cpi->sf; - const int is_key_frame = (cm->frame_type == KEY_FRAME); - if (sf->partition_search_type != VAR_BASED_PARTITION && - sf->partition_search_type != REFERENCE_PARTITION) { - return; - } else { - set_vbp_thresholds(cpi, cpi->vbp_thresholds, q); - // The thresholds below are not changed locally. - if (is_key_frame) { - cpi->vbp_threshold_sad = 0; - cpi->vbp_bsize_min = BLOCK_8X8; - } else { - if (cm->width <= 352 && cm->height <= 288) - cpi->vbp_threshold_sad = 100; - else - cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ? - (cpi->y_dequant[q][1] << 1) : 1000; - cpi->vbp_bsize_min = BLOCK_16X16; - } - cpi->vbp_threshold_minmax = 15 + (q >> 3); - } -} - -// Compute the minmax over the 8x8 subblocks. -static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d, - int dp, int x16_idx, int y16_idx, -#if CONFIG_VP9_HIGHBITDEPTH - int highbd_flag, -#endif - int pixels_wide, - int pixels_high) { - int k; - int minmax_max = 0; - int minmax_min = 255; - // Loop over the 4 8x8 subblocks. - for (k = 0; k < 4; k++) { - int x8_idx = x16_idx + ((k & 1) << 3); - int y8_idx = y16_idx + ((k >> 1) << 3); - int min = 0; - int max = 0; - if (x8_idx < pixels_wide && y8_idx < pixels_high) { -#if CONFIG_VP9_HIGHBITDEPTH - if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, - &min, &max); - } else { - vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, - &min, &max); - } -#else - vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, - &min, &max); -#endif - if ((max - min) > minmax_max) - minmax_max = (max - min); - if ((max - min) < minmax_min) - minmax_min = (max - min); - } - } - return (minmax_max - minmax_min); -} - -static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d, - int dp, int x8_idx, int y8_idx, v8x8 *vst, -#if CONFIG_VP9_HIGHBITDEPTH - int highbd_flag, -#endif - int pixels_wide, - int pixels_high, - int is_key_frame) { - int k; - for (k = 0; k < 4; k++) { - int x4_idx = x8_idx + ((k & 1) << 2); - int y4_idx = y8_idx + ((k >> 1) << 2); - unsigned int sse = 0; - int sum = 0; - if (x4_idx < pixels_wide && y4_idx < pixels_high) { - int s_avg; - int d_avg = 128; -#if CONFIG_VP9_HIGHBITDEPTH - if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) { - s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp); - if (!is_key_frame) - d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp); - } else { - s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); - } -#else - s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); -#endif - sum = s_avg - d_avg; - sse = sum * sum; - } - fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); - } -} - -static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d, - int dp, int x16_idx, int y16_idx, v16x16 *vst, -#if CONFIG_VP9_HIGHBITDEPTH - int highbd_flag, -#endif - int pixels_wide, - int pixels_high, - int is_key_frame) { - int k; - for (k = 0; k < 4; k++) { - int x8_idx = x16_idx + ((k & 1) << 3); - int y8_idx = y16_idx + ((k >> 1) << 3); - unsigned int sse = 0; - int sum = 0; - if (x8_idx < pixels_wide && y8_idx < pixels_high) { - int s_avg; - int d_avg = 128; -#if CONFIG_VP9_HIGHBITDEPTH - if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) { - s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp); - if (!is_key_frame) - d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); - } else { - s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); - } -#else - s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); -#endif - sum = s_avg - d_avg; - sse = sum * sum; - } - fill_variance(sse, sum, 0, &vst->split[k].part_variances.none); - } -} - -// This function chooses partitioning based on the variance between source and -// reconstructed last, where variance is computed for down-sampled inputs. -static int choose_partitioning(VP10_COMP *cpi, - const TileInfo *const tile, - MACROBLOCK *x, - int mi_row, int mi_col) { - VP10_COMMON * const cm = &cpi->common; - MACROBLOCKD *xd = &x->e_mbd; - int i, j, k, m; - v64x64 vt; - v16x16 vt2[16]; - int force_split[21]; - uint8_t *s; - const uint8_t *d; - int sp; - int dp; - int pixels_wide = 64, pixels_high = 64; - int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1], - cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]}; - - // Always use 4x4 partition for key frame. - const int is_key_frame = (cm->frame_type == KEY_FRAME); - const int use_4x4_partition = is_key_frame; - const int low_res = (cm->width <= 352 && cm->height <= 288); - int variance4x4downsample[16]; - - int segment_id = CR_SEGMENT_ID_BASE; - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map : - cm->last_frame_seg_map; - segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col); - - if (cyclic_refresh_segment_id_boosted(segment_id)) { - int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex); - set_vbp_thresholds(cpi, thresholds, q); - } - } - - set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); - - if (xd->mb_to_right_edge < 0) - pixels_wide += (xd->mb_to_right_edge >> 3); - if (xd->mb_to_bottom_edge < 0) - pixels_high += (xd->mb_to_bottom_edge >> 3); - - s = x->plane[0].src.buf; - sp = x->plane[0].src.stride; - - if (!is_key_frame) { - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - unsigned int uv_sad; - const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); - - const YV12_BUFFER_CONFIG *yv12_g = NULL; - unsigned int y_sad, y_sad_g; - const BLOCK_SIZE bsize = BLOCK_32X32 - + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows); - - assert(yv12 != NULL); - yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME); - - if (yv12_g && yv12_g != yv12) { - vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col, - &cm->frame_refs[GOLDEN_FRAME - 1].sf); - y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, - x->plane[0].src.stride, - xd->plane[0].pre[0].buf, - xd->plane[0].pre[0].stride); - } else { - y_sad_g = UINT_MAX; - } - - vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, - &cm->frame_refs[LAST_FRAME - 1].sf); - mbmi->ref_frame[0] = LAST_FRAME; - mbmi->ref_frame[1] = NONE; - mbmi->sb_type = BLOCK_64X64; - mbmi->mv[0].as_int = 0; - mbmi->interp_filter = BILINEAR; - - y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col); - if (y_sad_g < y_sad) { - vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col, - &cm->frame_refs[GOLDEN_FRAME - 1].sf); - mbmi->ref_frame[0] = GOLDEN_FRAME; - mbmi->mv[0].as_int = 0; - y_sad = y_sad_g; - } else { - x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv; - } - - vp10_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64); - - for (i = 1; i <= 2; ++i) { - struct macroblock_plane *p = &x->plane[i]; - struct macroblockd_plane *pd = &xd->plane[i]; - const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); - - if (bs == BLOCK_INVALID) - uv_sad = UINT_MAX; - else - uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, - pd->dst.buf, pd->dst.stride); - - x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2); - } - - d = xd->plane[0].dst.buf; - dp = xd->plane[0].dst.stride; - - // If the y_sad is very small, take 64x64 as partition and exit. - // Don't check on boosted segment for now, as 64x64 is suppressed there. - if (segment_id == CR_SEGMENT_ID_BASE && - y_sad < cpi->vbp_threshold_sad) { - const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64]; - const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64]; - if (mi_col + block_width / 2 < cm->mi_cols && - mi_row + block_height / 2 < cm->mi_rows) { - set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64); - return 0; - } - } - } else { - d = VP9_VAR_OFFS; - dp = 0; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (xd->bd) { - case 10: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); - break; - case 12: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); - break; - case 8: - default: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); - break; - } - } -#endif // CONFIG_VP9_HIGHBITDEPTH - } - - // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks, - // 5-20 for the 16x16 blocks. - force_split[0] = 0; - // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances - // for splits. - for (i = 0; i < 4; i++) { - const int x32_idx = ((i & 1) << 5); - const int y32_idx = ((i >> 1) << 5); - const int i2 = i << 2; - force_split[i + 1] = 0; - for (j = 0; j < 4; j++) { - const int x16_idx = x32_idx + ((j & 1) << 4); - const int y16_idx = y32_idx + ((j >> 1) << 4); - const int split_index = 5 + i2 + j; - v16x16 *vst = &vt.split[i].split[j]; - force_split[split_index] = 0; - variance4x4downsample[i2 + j] = 0; - if (!is_key_frame) { - fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst, -#if CONFIG_VP9_HIGHBITDEPTH - xd->cur_buf->flags, -#endif - pixels_wide, - pixels_high, - is_key_frame); - fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); - get_variance(&vt.split[i].split[j].part_variances.none); - if (vt.split[i].split[j].part_variances.none.variance > - thresholds[2]) { - // 16X16 variance is above threshold for split, so force split to 8x8 - // for this 16x16 block (this also forces splits for upper levels). - force_split[split_index] = 1; - force_split[i + 1] = 1; - force_split[0] = 1; - } else if (vt.split[i].split[j].part_variances.none.variance > - thresholds[1] && - !cyclic_refresh_segment_id_boosted(segment_id)) { - // We have some nominal amount of 16x16 variance (based on average), - // compute the minmax over the 8x8 sub-blocks, and if above threshold, - // force split to 8x8 block for this 16x16 block. - int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx, -#if CONFIG_VP9_HIGHBITDEPTH - xd->cur_buf->flags, -#endif - pixels_wide, pixels_high); - if (minmax > cpi->vbp_threshold_minmax) { - force_split[split_index] = 1; - force_split[i + 1] = 1; - force_split[0] = 1; - } - } - } - if (is_key_frame || (low_res && - vt.split[i].split[j].part_variances.none.variance > - (thresholds[1] << 1))) { - force_split[split_index] = 0; - // Go down to 4x4 down-sampling for variance. - variance4x4downsample[i2 + j] = 1; - for (k = 0; k < 4; k++) { - int x8_idx = x16_idx + ((k & 1) << 3); - int y8_idx = y16_idx + ((k >> 1) << 3); - v8x8 *vst2 = is_key_frame ? &vst->split[k] : - &vt2[i2 + j].split[k]; - fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2, -#if CONFIG_VP9_HIGHBITDEPTH - xd->cur_buf->flags, -#endif - pixels_wide, - pixels_high, - is_key_frame); - } - } - } - } - - // Fill the rest of the variance tree by summing split partition values. - for (i = 0; i < 4; i++) { - const int i2 = i << 2; - for (j = 0; j < 4; j++) { - if (variance4x4downsample[i2 + j] == 1) { - v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : - &vt.split[i].split[j]; - for (m = 0; m < 4; m++) - fill_variance_tree(&vtemp->split[m], BLOCK_8X8); - fill_variance_tree(vtemp, BLOCK_16X16); - } - } - fill_variance_tree(&vt.split[i], BLOCK_32X32); - // If variance of this 32x32 block is above the threshold, force the block - // to split. This also forces a split on the upper (64x64) level. - if (!force_split[i + 1]) { - get_variance(&vt.split[i].part_variances.none); - if (vt.split[i].part_variances.none.variance > thresholds[1]) { - force_split[i + 1] = 1; - force_split[0] = 1; - } - } - } - if (!force_split[0]) { - fill_variance_tree(&vt, BLOCK_64X64); - get_variance(&vt.part_variances.none); - } - - // Now go through the entire structure, splitting every block size until - // we get to one that's got a variance lower than our threshold. - if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || - !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col, - thresholds[0], BLOCK_16X16, force_split[0])) { - for (i = 0; i < 4; ++i) { - const int x32_idx = ((i & 1) << 2); - const int y32_idx = ((i >> 1) << 2); - const int i2 = i << 2; - if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32, - (mi_row + y32_idx), (mi_col + x32_idx), - thresholds[1], BLOCK_16X16, - force_split[i + 1])) { - for (j = 0; j < 4; ++j) { - const int x16_idx = ((j & 1) << 1); - const int y16_idx = ((j >> 1) << 1); - // For inter frames: if variance4x4downsample[] == 1 for this 16x16 - // block, then the variance is based on 4x4 down-sampling, so use vt2 - // in set_vt_partioning(), otherwise use vt. - v16x16 *vtemp = (!is_key_frame && - variance4x4downsample[i2 + j] == 1) ? - &vt2[i2 + j] : &vt.split[i].split[j]; - if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16, - mi_row + y32_idx + y16_idx, - mi_col + x32_idx + x16_idx, - thresholds[2], - cpi->vbp_bsize_min, - force_split[5 + i2 + j])) { - for (k = 0; k < 4; ++k) { - const int x8_idx = (k & 1); - const int y8_idx = (k >> 1); - if (use_4x4_partition) { - if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k], - BLOCK_8X8, - mi_row + y32_idx + y16_idx + y8_idx, - mi_col + x32_idx + x16_idx + x8_idx, - thresholds[3], BLOCK_8X8, 0)) { - set_block_size(cpi, x, xd, - (mi_row + y32_idx + y16_idx + y8_idx), - (mi_col + x32_idx + x16_idx + x8_idx), - BLOCK_4X4); - } - } else { - set_block_size(cpi, x, xd, - (mi_row + y32_idx + y16_idx + y8_idx), - (mi_col + x32_idx + x16_idx + x8_idx), - BLOCK_8X8); - } - } - } - } - } - } - } - return 0; -} - -static void update_state(VP10_COMP *cpi, ThreadData *td, - PICK_MODE_CONTEXT *ctx, - int mi_row, int mi_col, BLOCK_SIZE bsize, - int output_enabled) { - int i, x_idx, y; - VP10_COMMON *const cm = &cpi->common; - RD_COUNTS *const rdc = &td->rd_counts; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblock_plane *const p = x->plane; - struct macroblockd_plane *const pd = xd->plane; - MODE_INFO *mi = &ctx->mic; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - MODE_INFO *mi_addr = xd->mi[0]; - const struct segmentation *const seg = &cm->seg; - const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; - const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; - const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); - const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); - MV_REF *const frame_mvs = - cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; - int w, h; - - const int mis = cm->mi_stride; - const int mi_width = num_8x8_blocks_wide_lookup[bsize]; - const int mi_height = num_8x8_blocks_high_lookup[bsize]; - int max_plane; - - assert(mi->mbmi.sb_type == bsize); - - *mi_addr = *mi; - *x->mbmi_ext = ctx->mbmi_ext; - - // If segmentation in use - if (seg->enabled) { - // For in frame complexity AQ copy the segment id from the segment map. - if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - mi_addr->mbmi.segment_id = - get_segment_id(cm, map, bsize, mi_row, mi_col); - } - // Else for cyclic refresh mode update the segment map, set the segment id - // and then update the quantizer. - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { - vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, - mi_col, bsize, ctx->rate, ctx->dist, - x->skip); - } - } - - max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1; - for (i = 0; i < max_plane; ++i) { - p[i].coeff = ctx->coeff_pbuf[i][1]; - p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; - pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; - p[i].eobs = ctx->eobs_pbuf[i][1]; - } - - for (i = max_plane; i < MAX_MB_PLANE; ++i) { - p[i].coeff = ctx->coeff_pbuf[i][2]; - p[i].qcoeff = ctx->qcoeff_pbuf[i][2]; - pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2]; - p[i].eobs = ctx->eobs_pbuf[i][2]; - } - - for (i = 0; i < 2; ++i) - pd[i].color_index_map = ctx->color_index_map[i]; - - // Restore the coding context of the MB to that that was in place - // when the mode was picked for it - for (y = 0; y < mi_height; y++) - for (x_idx = 0; x_idx < mi_width; x_idx++) - if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx - && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { - xd->mi[x_idx + y * mis] = mi_addr; - } - - if (cpi->oxcf.aq_mode) - vp10_init_plane_quantizers(cpi, x); - - if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) { - mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; - mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; - } - - x->skip = ctx->skip; - memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk, - sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); - - if (!output_enabled) - return; - -#if CONFIG_INTERNAL_STATS - if (frame_is_intra_only(cm)) { - static const int kf_mode_index[] = { - THR_DC /*DC_PRED*/, - THR_V_PRED /*V_PRED*/, - THR_H_PRED /*H_PRED*/, - THR_D45_PRED /*D45_PRED*/, - THR_D135_PRED /*D135_PRED*/, - THR_D117_PRED /*D117_PRED*/, - THR_D153_PRED /*D153_PRED*/, - THR_D207_PRED /*D207_PRED*/, - THR_D63_PRED /*D63_PRED*/, - THR_TM /*TM_PRED*/, - }; - ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]]; - } else { - // Note how often each mode chosen as best - ++cpi->mode_chosen_counts[ctx->best_mode_index]; - } -#endif - if (!frame_is_intra_only(cm)) { - if (is_inter_block(mbmi)) { - vp10_update_mv_count(td); - - if (cm->interp_filter == SWITCHABLE) { - const int ctx = vp10_get_pred_context_switchable_interp(xd); - ++td->counts->switchable_interp[ctx][mbmi->interp_filter]; - } - } - - rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; - rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; - rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - rdc->filter_diff[i] += ctx->best_filter_diff[i]; - } - - for (h = 0; h < y_mis; ++h) { - MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols; - for (w = 0; w < x_mis; ++w) { - MV_REF *const mv = frame_mv + w; - mv->ref_frame[0] = mi->mbmi.ref_frame[0]; - mv->ref_frame[1] = mi->mbmi.ref_frame[1]; - mv->mv[0].as_int = mi->mbmi.mv[0].as_int; - mv->mv[1].as_int = mi->mbmi.mv[1].as_int; - } - } -} - -void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col) { - uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer }; - const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride }; - int i; - - // Set current frame pointer. - x->e_mbd.cur_buf = src; - - for (i = 0; i < MAX_MB_PLANE; i++) - setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col, - NULL, x->e_mbd.plane[i].subsampling_x, - x->e_mbd.plane[i].subsampling_y); -} - -static int set_segment_rdmult(VP10_COMP *const cpi, - MACROBLOCK *const x, - int8_t segment_id) { - int segment_qindex; - VP10_COMMON *const cm = &cpi->common; - vp10_init_plane_quantizers(cpi, x); - vpx_clear_system_state(); - segment_qindex = vp10_get_qindex(&cm->seg, segment_id, - cm->base_qindex); - return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q); -} - -static void rd_pick_sb_modes(VP10_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *const x, - int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd) { - VP10_COMMON *const cm = &cpi->common; - TileInfo *const tile_info = &tile_data->tile_info; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mbmi; - struct macroblock_plane *const p = x->plane; - struct macroblockd_plane *const pd = xd->plane; - const AQ_MODE aq_mode = cpi->oxcf.aq_mode; - int i, orig_rdmult; - - vpx_clear_system_state(); - - // Use the lower precision, but faster, 32x32 fdct for mode selection. - x->use_lp32x32fdct = 1; - - set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - mbmi = &xd->mi[0]->mbmi; - mbmi->sb_type = bsize; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - p[i].coeff = ctx->coeff_pbuf[i][0]; - p[i].qcoeff = ctx->qcoeff_pbuf[i][0]; - pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0]; - p[i].eobs = ctx->eobs_pbuf[i][0]; - } - - for (i = 0; i < 2; ++i) - pd[i].color_index_map = ctx->color_index_map[i]; - - ctx->is_coded = 0; - ctx->skippable = 0; - ctx->pred_pixel_ready = 0; - x->skip_recode = 0; - - // Set to zero to make sure we do not use the previous encoded frame stats - mbmi->skip = 0; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - x->source_variance = - vp10_high_get_sby_perpixel_variance(cpi, &x->plane[0].src, - bsize, xd->bd); - } else { - x->source_variance = - vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); - } -#else - x->source_variance = - vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Save rdmult before it might be changed, so it can be restored later. - orig_rdmult = x->rdmult; - - if (aq_mode == VARIANCE_AQ) { - const int energy = bsize <= BLOCK_16X16 ? x->mb_energy - : vp10_block_energy(cpi, x, bsize); - if (cm->frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { - mbmi->segment_id = vp10_vaq_segment_id(energy); - } else { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); - } - x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id); - } else if (aq_mode == COMPLEXITY_AQ) { - x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id); - } else if (aq_mode == CYCLIC_REFRESH_AQ) { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - // If segment is boosted, use rdmult for that segment. - if (cyclic_refresh_segment_id_boosted( - get_segment_id(cm, map, bsize, mi_row, mi_col))) - x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh); - } - - // Find best coding mode & reconstruct the MB so it is available - // as a predictor for MBs that follow in the SB - if (frame_is_intra_only(cm)) { - vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd); - } else { - if (bsize >= BLOCK_8X8) { - if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) - vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize, - ctx, best_rd); - else - vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, - rd_cost, bsize, ctx, best_rd); - } else { - vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, - rd_cost, bsize, ctx, best_rd); - } - } - - - // Examine the resulting rate and for AQ mode 2 make a segment choice. - if ((rd_cost->rate != INT_MAX) && - (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) && - (cm->frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) { - vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate); - } - - x->rdmult = orig_rdmult; - - // TODO(jingning) The rate-distortion optimization flow needs to be - // refactored to provide proper exit/return handle. - if (rd_cost->rate == INT_MAX) - rd_cost->rdcost = INT64_MAX; - - ctx->rate = rd_cost->rate; - ctx->dist = rd_cost->dist; -} - -static void update_stats(VP10_COMMON *cm, ThreadData *td) { - const MACROBLOCK *x = &td->mb; - const MACROBLOCKD *const xd = &x->e_mbd; - const MODE_INFO *const mi = xd->mi[0]; - const MB_MODE_INFO *const mbmi = &mi->mbmi; - const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - const BLOCK_SIZE bsize = mbmi->sb_type; - - if (!frame_is_intra_only(cm)) { - FRAME_COUNTS *const counts = td->counts; - const int inter_block = is_inter_block(mbmi); - const int seg_ref_active = segfeature_active(&cm->seg, mbmi->segment_id, - SEG_LVL_REF_FRAME); - if (!seg_ref_active) { - counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++; - // If the segment reference feature is enabled we have only a single - // reference frame allowed for the segment so exclude it from - // the reference frame counts used to work out probabilities. - if (inter_block) { - const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0]; - if (cm->reference_mode == REFERENCE_MODE_SELECT) - counts->comp_inter[vp10_get_reference_mode_context(cm, xd)] - [has_second_ref(mbmi)]++; - - if (has_second_ref(mbmi)) { - counts->comp_ref[vp10_get_pred_context_comp_ref_p(cm, xd)] - [ref0 == GOLDEN_FRAME]++; - } else { - counts->single_ref[vp10_get_pred_context_single_ref_p1(xd)][0] - [ref0 != LAST_FRAME]++; - if (ref0 != LAST_FRAME) - counts->single_ref[vp10_get_pred_context_single_ref_p2(xd)][1] - [ref0 != GOLDEN_FRAME]++; - } - } - } - if (inter_block && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]]; - if (bsize >= BLOCK_8X8) { - const PREDICTION_MODE mode = mbmi->mode; - ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)]; - } else { - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int j = idy * 2 + idx; - const PREDICTION_MODE b_mode = mi->bmi[j].as_mode; - ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)]; - } - } - } - } - } -} - -static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col, - ENTROPY_CONTEXT a[16 * MAX_MB_PLANE], - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], - PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8], - BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; - int p; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - int mi_width = num_8x8_blocks_wide_lookup[bsize]; - int mi_height = num_8x8_blocks_high_lookup[bsize]; - for (p = 0; p < MAX_MB_PLANE; p++) { - memcpy( - xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x), - a + num_4x4_blocks_wide * p, - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> - xd->plane[p].subsampling_x); - memcpy( - xd->left_context[p] - + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), - l + num_4x4_blocks_high * p, - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> - xd->plane[p].subsampling_y); - } - memcpy(xd->above_seg_context + mi_col, sa, - sizeof(*xd->above_seg_context) * mi_width); - memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl, - sizeof(xd->left_seg_context[0]) * mi_height); -} - -static void save_context(MACROBLOCK *const x, int mi_row, int mi_col, - ENTROPY_CONTEXT a[16 * MAX_MB_PLANE], - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], - PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8], - BLOCK_SIZE bsize) { - const MACROBLOCKD *const xd = &x->e_mbd; - int p; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - int mi_width = num_8x8_blocks_wide_lookup[bsize]; - int mi_height = num_8x8_blocks_high_lookup[bsize]; - - // buffer the above/left context information of the block in search. - for (p = 0; p < MAX_MB_PLANE; ++p) { - memcpy( - a + num_4x4_blocks_wide * p, - xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x), - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> - xd->plane[p].subsampling_x); - memcpy( - l + num_4x4_blocks_high * p, - xd->left_context[p] - + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> - xd->plane[p].subsampling_y); - } - memcpy(sa, xd->above_seg_context + mi_col, - sizeof(*xd->above_seg_context) * mi_width); - memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK), - sizeof(xd->left_seg_context[0]) * mi_height); -} - -static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, - ThreadData *td, - TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx) { - MACROBLOCK *const x = &td->mb; - set_offsets(cpi, tile, x, mi_row, mi_col, bsize); - update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled); - encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx); - - if (output_enabled) { - update_stats(&cpi->common, td); - } -} - -static void encode_sb(VP10_COMP *cpi, ThreadData *td, - const TileInfo *const tile, - TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, - PC_TREE *pc_tree) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - - const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; - int ctx; - PARTITION_TYPE partition; - BLOCK_SIZE subsize = bsize; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - if (bsize >= BLOCK_8X8) { - ctx = partition_plane_context(xd, mi_row, mi_col, bsize); - subsize = get_subsize(bsize, pc_tree->partitioning); - } else { - ctx = 0; - subsize = BLOCK_4X4; - } - - partition = partition_lookup[bsl][subsize]; - if (output_enabled && bsize != BLOCK_4X4) - td->counts->partition[ctx][partition]++; - - switch (partition) { - case PARTITION_NONE: - encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize, - &pc_tree->none); - break; - case PARTITION_VERT: - encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize, - &pc_tree->vertical[0]); - if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) { - encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled, - subsize, &pc_tree->vertical[1]); - } - break; - case PARTITION_HORZ: - encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize, - &pc_tree->horizontal[0]); - if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) { - encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled, - subsize, &pc_tree->horizontal[1]); - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8) { - encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize, - pc_tree->leaf_split[0]); - } else { - encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize, - pc_tree->split[0]); - encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled, - subsize, pc_tree->split[1]); - encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled, - subsize, pc_tree->split[2]); - encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, - subsize, pc_tree->split[3]); - } - break; - default: - assert(0 && "Invalid partition type."); - break; - } - - if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) - update_partition_context(xd, mi_row, mi_col, subsize, bsize); -} - -// Check to see if the given partition size is allowed for a specified number -// of 8x8 block rows and columns remaining in the image. -// If not then return the largest allowed partition size -static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, - int rows_left, int cols_left, - int *bh, int *bw) { - if (rows_left <= 0 || cols_left <= 0) { - return VPXMIN(bsize, BLOCK_8X8); - } else { - for (; bsize > 0; bsize -= 3) { - *bh = num_8x8_blocks_high_lookup[bsize]; - *bw = num_8x8_blocks_wide_lookup[bsize]; - if ((*bh <= rows_left) && (*bw <= cols_left)) { - break; - } - } - } - return bsize; -} - -static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, - int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining, - BLOCK_SIZE bsize, MODE_INFO **mi_8x8) { - int bh = bh_in; - int r, c; - for (r = 0; r < MI_BLOCK_SIZE; r += bh) { - int bw = bw_in; - for (c = 0; c < MI_BLOCK_SIZE; c += bw) { - const int index = r * mis + c; - mi_8x8[index] = mi + index; - mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize, - row8x8_remaining - r, col8x8_remaining - c, &bh, &bw); - } - } -} - -// This function attempts to set all mode info entries in a given SB64 -// to the same block partition size. -// However, at the bottom and right borders of the image the requested size -// may not be allowed in which case this code attempts to choose the largest -// allowable partition. -static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile, - MODE_INFO **mi_8x8, int mi_row, int mi_col, - BLOCK_SIZE bsize) { - VP10_COMMON *const cm = &cpi->common; - const int mis = cm->mi_stride; - const int row8x8_remaining = tile->mi_row_end - mi_row; - const int col8x8_remaining = tile->mi_col_end - mi_col; - int block_row, block_col; - MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col; - int bh = num_8x8_blocks_high_lookup[bsize]; - int bw = num_8x8_blocks_wide_lookup[bsize]; - - assert((row8x8_remaining > 0) && (col8x8_remaining > 0)); - - // Apply the requested partition size to the SB64 if it is all "in image" - if ((col8x8_remaining >= MI_BLOCK_SIZE) && - (row8x8_remaining >= MI_BLOCK_SIZE)) { - for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) { - for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) { - int index = block_row * mis + block_col; - mi_8x8[index] = mi_upper_left + index; - mi_8x8[index]->mbmi.sb_type = bsize; - } - } - } else { - // Else this is a partial SB64. - set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining, - col8x8_remaining, bsize, mi_8x8); - } -} - -static void rd_use_partition(VP10_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - MODE_INFO **mi_8x8, TOKENEXTRA **tp, - int mi_row, int mi_col, - BLOCK_SIZE bsize, - int *rate, int64_t *dist, - int do_recon, PC_TREE *pc_tree) { - VP10_COMMON *const cm = &cpi->common; - TileInfo *const tile_info = &tile_data->tile_info; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - const int mis = cm->mi_stride; - const int bsl = b_width_log2_lookup[bsize]; - const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2; - const int bss = (1 << bsl) / 4; - int i, pl; - PARTITION_TYPE partition = PARTITION_NONE; - BLOCK_SIZE subsize; - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; - PARTITION_CONTEXT sl[8], sa[8]; - RD_COST last_part_rdc, none_rdc, chosen_rdc; - BLOCK_SIZE sub_subsize = BLOCK_4X4; - int splits_below = 0; - BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type; - int do_partition_search = 1; - PICK_MODE_CONTEXT *ctx = &pc_tree->none; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - assert(num_4x4_blocks_wide_lookup[bsize] == - num_4x4_blocks_high_lookup[bsize]); - - vp10_rd_cost_reset(&last_part_rdc); - vp10_rd_cost_reset(&none_rdc); - vp10_rd_cost_reset(&chosen_rdc); - - partition = partition_lookup[bsl][bs_type]; - subsize = get_subsize(bsize, partition); - - pc_tree->partitioning = partition; - save_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - - if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) { - set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - x->mb_energy = vp10_block_energy(cpi, x, bsize); - } - - if (do_partition_search && - cpi->sf.partition_search_type == SEARCH_PARTITION && - cpi->sf.adjust_partitioning_from_last_frame) { - // Check if any of the sub blocks are further split. - if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { - sub_subsize = get_subsize(subsize, PARTITION_SPLIT); - splits_below = 1; - for (i = 0; i < 4; i++) { - int jj = i >> 1, ii = i & 0x01; - MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss]; - if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) { - splits_below = 0; - } - } - } - - // If partition is not none try none unless each of the 4 splits are split - // even further.. - if (partition != PARTITION_NONE && !splits_below && - mi_row + (mi_step >> 1) < cm->mi_rows && - mi_col + (mi_step >> 1) < cm->mi_cols) { - pc_tree->partitioning = PARTITION_NONE; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, - ctx, INT64_MAX); - - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - - if (none_rdc.rate < INT_MAX) { - none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate, - none_rdc.dist); - } - - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - mi_8x8[0]->mbmi.sb_type = bs_type; - pc_tree->partitioning = partition; - } - } - - switch (partition) { - case PARTITION_NONE: - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - bsize, ctx, INT64_MAX); - break; - case PARTITION_HORZ: - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - subsize, &pc_tree->horizontal[0], - INT64_MAX); - if (last_part_rdc.rate != INT_MAX && - bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) { - RD_COST tmp_rdc; - PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; - vp10_rd_cost_init(&tmp_rdc); - update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); - encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - rd_pick_sb_modes(cpi, tile_data, x, - mi_row + (mi_step >> 1), mi_col, &tmp_rdc, - subsize, &pc_tree->horizontal[1], INT64_MAX); - if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { - vp10_rd_cost_reset(&last_part_rdc); - break; - } - last_part_rdc.rate += tmp_rdc.rate; - last_part_rdc.dist += tmp_rdc.dist; - last_part_rdc.rdcost += tmp_rdc.rdcost; - } - break; - case PARTITION_VERT: - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - subsize, &pc_tree->vertical[0], INT64_MAX); - if (last_part_rdc.rate != INT_MAX && - bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) { - RD_COST tmp_rdc; - PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0]; - vp10_rd_cost_init(&tmp_rdc); - update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); - encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - rd_pick_sb_modes(cpi, tile_data, x, - mi_row, mi_col + (mi_step >> 1), &tmp_rdc, - subsize, &pc_tree->vertical[bsize > BLOCK_8X8], - INT64_MAX); - if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { - vp10_rd_cost_reset(&last_part_rdc); - break; - } - last_part_rdc.rate += tmp_rdc.rate; - last_part_rdc.dist += tmp_rdc.dist; - last_part_rdc.rdcost += tmp_rdc.rdcost; - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8) { - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - subsize, pc_tree->leaf_split[0], INT64_MAX); - break; - } - last_part_rdc.rate = 0; - last_part_rdc.dist = 0; - last_part_rdc.rdcost = 0; - for (i = 0; i < 4; i++) { - int x_idx = (i & 1) * (mi_step >> 1); - int y_idx = (i >> 1) * (mi_step >> 1); - int jj = i >> 1, ii = i & 0x01; - RD_COST tmp_rdc; - if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) - continue; - - vp10_rd_cost_init(&tmp_rdc); - rd_use_partition(cpi, td, tile_data, - mi_8x8 + jj * bss * mis + ii * bss, tp, - mi_row + y_idx, mi_col + x_idx, subsize, - &tmp_rdc.rate, &tmp_rdc.dist, - i != 3, pc_tree->split[i]); - if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { - vp10_rd_cost_reset(&last_part_rdc); - break; - } - last_part_rdc.rate += tmp_rdc.rate; - last_part_rdc.dist += tmp_rdc.dist; - } - break; - default: - assert(0); - break; - } - - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - if (last_part_rdc.rate < INT_MAX) { - last_part_rdc.rate += cpi->partition_cost[pl][partition]; - last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - last_part_rdc.rate, last_part_rdc.dist); - } - - if (do_partition_search - && cpi->sf.adjust_partitioning_from_last_frame - && cpi->sf.partition_search_type == SEARCH_PARTITION - && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 - && (mi_row + mi_step < cm->mi_rows || - mi_row + (mi_step >> 1) == cm->mi_rows) - && (mi_col + mi_step < cm->mi_cols || - mi_col + (mi_step >> 1) == cm->mi_cols)) { - BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); - chosen_rdc.rate = 0; - chosen_rdc.dist = 0; - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - pc_tree->partitioning = PARTITION_SPLIT; - - // Split partition. - for (i = 0; i < 4; i++) { - int x_idx = (i & 1) * (mi_step >> 1); - int y_idx = (i >> 1) * (mi_step >> 1); - RD_COST tmp_rdc; - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; - PARTITION_CONTEXT sl[8], sa[8]; - - if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) - continue; - - save_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - pc_tree->split[i]->partitioning = PARTITION_NONE; - rd_pick_sb_modes(cpi, tile_data, x, - mi_row + y_idx, mi_col + x_idx, &tmp_rdc, - split_subsize, &pc_tree->split[i]->none, INT64_MAX); - - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - - if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { - vp10_rd_cost_reset(&chosen_rdc); - break; - } - - chosen_rdc.rate += tmp_rdc.rate; - chosen_rdc.dist += tmp_rdc.dist; - - if (i != 3) - encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0, - split_subsize, pc_tree->split[i]); - - pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx, - split_subsize); - chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - } - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - if (chosen_rdc.rate < INT_MAX) { - chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; - chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - chosen_rdc.rate, chosen_rdc.dist); - } - } - - // If last_part is better set the partitioning to that. - if (last_part_rdc.rdcost < chosen_rdc.rdcost) { - mi_8x8[0]->mbmi.sb_type = bsize; - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = partition; - chosen_rdc = last_part_rdc; - } - // If none was better set the partitioning to that. - if (none_rdc.rdcost < chosen_rdc.rdcost) { - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = PARTITION_NONE; - chosen_rdc = none_rdc; - } - - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - - // We must have chosen a partitioning and encoding or we'll fail later on. - // No other opportunities for success. - if (bsize == BLOCK_64X64) - assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX); - - if (do_recon) { - int output_enabled = (bsize == BLOCK_64X64); - encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize, - pc_tree); - } - - *rate = chosen_rdc.rate; - *dist = chosen_rdc.dist; -} - -static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, - BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, - BLOCK_16X16 -}; - -static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { - BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, - BLOCK_16X16, BLOCK_32X32, BLOCK_32X32, - BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, - BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, - BLOCK_64X64 -}; - - -// Look at all the mode_info entries for blocks that are part of this -// partition and find the min and max values for sb_type. -// At the moment this is designed to work on a 64x64 SB but could be -// adjusted to use a size parameter. -// -// The min and max are assumed to have been initialized prior to calling this -// function so repeat calls can accumulate a min and max of more than one sb64. -static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8, - BLOCK_SIZE *min_block_size, - BLOCK_SIZE *max_block_size, - int bs_hist[BLOCK_SIZES]) { - int sb_width_in_blocks = MI_BLOCK_SIZE; - int sb_height_in_blocks = MI_BLOCK_SIZE; - int i, j; - int index = 0; - - // Check the sb_type for each block that belongs to this region. - for (i = 0; i < sb_height_in_blocks; ++i) { - for (j = 0; j < sb_width_in_blocks; ++j) { - MODE_INFO *mi = mi_8x8[index+j]; - BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; - bs_hist[sb_type]++; - *min_block_size = VPXMIN(*min_block_size, sb_type); - *max_block_size = VPXMAX(*max_block_size, sb_type); - } - index += xd->mi_stride; - } -} - -// Next square block size less or equal than current block size. -static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, - BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, - BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, - BLOCK_64X64 -}; - -// Look at neighboring blocks and set a min and max partition size based on -// what they chose. -static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile, - MACROBLOCKD *const xd, - int mi_row, int mi_col, - BLOCK_SIZE *min_block_size, - BLOCK_SIZE *max_block_size) { - VP10_COMMON *const cm = &cpi->common; - MODE_INFO **mi = xd->mi; - const int left_in_image = xd->left_available && mi[-1]; - const int above_in_image = xd->up_available && mi[-xd->mi_stride]; - const int row8x8_remaining = tile->mi_row_end - mi_row; - const int col8x8_remaining = tile->mi_col_end - mi_col; - int bh, bw; - BLOCK_SIZE min_size = BLOCK_4X4; - BLOCK_SIZE max_size = BLOCK_64X64; - int bs_hist[BLOCK_SIZES] = {0}; - - // Trap case where we do not have a prediction. - if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) { - // Default "min to max" and "max to min" - min_size = BLOCK_64X64; - max_size = BLOCK_4X4; - - // NOTE: each call to get_sb_partition_size_range() uses the previous - // passed in values for min and max as a starting point. - // Find the min and max partition used in previous frame at this location - if (cm->frame_type != KEY_FRAME) { - MODE_INFO **prev_mi = - &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col]; - get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist); - } - // Find the min and max partition sizes used in the left SB64 - if (left_in_image) { - MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE]; - get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size, - bs_hist); - } - // Find the min and max partition sizes used in the above SB64. - if (above_in_image) { - MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE]; - get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size, - bs_hist); - } - - // Adjust observed min and max for "relaxed" auto partition case. - if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) { - min_size = min_partition_size[min_size]; - max_size = max_partition_size[max_size]; - } - } - - // Check border cases where max and min from neighbors may not be legal. - max_size = find_partition_size(max_size, - row8x8_remaining, col8x8_remaining, - &bh, &bw); - // Test for blocks at the edge of the active image. - // This may be the actual edge of the image or where there are formatting - // bars. - if (vp10_active_edge_sb(cpi, mi_row, mi_col)) { - min_size = BLOCK_4X4; - } else { - min_size = - VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size)); - } - - // When use_square_partition_only is true, make sure at least one square - // partition is allowed by selecting the next smaller square size as - // *min_block_size. - if (cpi->sf.use_square_partition_only && - next_square_size[max_size] < min_size) { - min_size = next_square_size[max_size]; - } - - *min_block_size = min_size; - *max_block_size = max_size; -} - -// TODO(jingning) refactor functions setting partition search range -static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, - int mi_row, int mi_col, BLOCK_SIZE bsize, - BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) { - int mi_width = num_8x8_blocks_wide_lookup[bsize]; - int mi_height = num_8x8_blocks_high_lookup[bsize]; - int idx, idy; - - MODE_INFO *mi; - const int idx_str = cm->mi_stride * mi_row + mi_col; - MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str]; - BLOCK_SIZE bs, min_size, max_size; - - min_size = BLOCK_64X64; - max_size = BLOCK_4X4; - - if (prev_mi) { - for (idy = 0; idy < mi_height; ++idy) { - for (idx = 0; idx < mi_width; ++idx) { - mi = prev_mi[idy * cm->mi_stride + idx]; - bs = mi ? mi->mbmi.sb_type : bsize; - min_size = VPXMIN(min_size, bs); - max_size = VPXMAX(max_size, bs); - } - } - } - - if (xd->left_available) { - for (idy = 0; idy < mi_height; ++idy) { - mi = xd->mi[idy * cm->mi_stride - 1]; - bs = mi ? mi->mbmi.sb_type : bsize; - min_size = VPXMIN(min_size, bs); - max_size = VPXMAX(max_size, bs); - } - } - - if (xd->up_available) { - for (idx = 0; idx < mi_width; ++idx) { - mi = xd->mi[idx - cm->mi_stride]; - bs = mi ? mi->mbmi.sb_type : bsize; - min_size = VPXMIN(min_size, bs); - max_size = VPXMAX(max_size, bs); - } - } - - if (min_size == max_size) { - min_size = min_partition_size[min_size]; - max_size = max_partition_size[max_size]; - } - - *min_bs = min_size; - *max_bs = max_size; -} - -static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { - memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv)); -} - -static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { - memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv)); -} - -#if CONFIG_FP_MB_STATS -const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4}; -const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4}; -const int qindex_skip_threshold_lookup[BLOCK_SIZES] = - {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120}; -const int qindex_split_threshold_lookup[BLOCK_SIZES] = - {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120}; -const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6}; - -typedef enum { - MV_ZERO = 0, - MV_LEFT = 1, - MV_UP = 2, - MV_RIGHT = 3, - MV_DOWN = 4, - MV_INVALID -} MOTION_DIRECTION; - -static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) { - if (fp_byte & FPMB_MOTION_ZERO_MASK) { - return MV_ZERO; - } else if (fp_byte & FPMB_MOTION_LEFT_MASK) { - return MV_LEFT; - } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) { - return MV_RIGHT; - } else if (fp_byte & FPMB_MOTION_UP_MASK) { - return MV_UP; - } else { - return MV_DOWN; - } -} - -static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv, - MOTION_DIRECTION that_mv) { - if (this_mv == that_mv) { - return 0; - } else { - return abs(this_mv - that_mv) == 2 ? 2 : 1; - } -} -#endif - -// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are -// unlikely to be selected depending on previous rate-distortion optimization -// results, for encoding speed-up. -static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td, - TileDataEnc *tile_data, - TOKENEXTRA **tp, int mi_row, int mi_col, - BLOCK_SIZE bsize, RD_COST *rd_cost, - int64_t best_rd, PC_TREE *pc_tree) { - VP10_COMMON *const cm = &cpi->common; - TileInfo *const tile_info = &tile_data->tile_info; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2; - ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; - PARTITION_CONTEXT sl[8], sa[8]; - TOKENEXTRA *tp_orig = *tp; - PICK_MODE_CONTEXT *ctx = &pc_tree->none; - int i, pl; - BLOCK_SIZE subsize; - RD_COST this_rdc, sum_rdc, best_rdc; - int do_split = bsize >= BLOCK_8X8; - int do_rect = 1; - - // Override skipping rectangular partition operations for edge blocks - const int force_horz_split = (mi_row + mi_step >= cm->mi_rows); - const int force_vert_split = (mi_col + mi_step >= cm->mi_cols); - const int xss = x->e_mbd.plane[1].subsampling_x; - const int yss = x->e_mbd.plane[1].subsampling_y; - - BLOCK_SIZE min_size = x->min_partition_size; - BLOCK_SIZE max_size = x->max_partition_size; - -#if CONFIG_FP_MB_STATS - unsigned int src_diff_var = UINT_MAX; - int none_complexity = 0; -#endif - - int partition_none_allowed = !force_horz_split && !force_vert_split; - int partition_horz_allowed = !force_vert_split && yss <= xss && - bsize >= BLOCK_8X8; - int partition_vert_allowed = !force_horz_split && xss <= yss && - bsize >= BLOCK_8X8; - (void) *tp_orig; - - assert(num_8x8_blocks_wide_lookup[bsize] == - num_8x8_blocks_high_lookup[bsize]); - - vp10_rd_cost_init(&this_rdc); - vp10_rd_cost_init(&sum_rdc); - vp10_rd_cost_reset(&best_rdc); - best_rdc.rdcost = best_rd; - - set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - - if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) - x->mb_energy = vp10_block_energy(cpi, x, bsize); - - if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) { - int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3) - + get_chessboard_index(cm->current_video_frame)) & 0x1; - - if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size) - set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size); - } - - // Determine partition types in search according to the speed features. - // The threshold set here has to be of square block size. - if (cpi->sf.auto_min_max_partition_size) { - partition_none_allowed &= (bsize <= max_size && bsize >= min_size); - partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) || - force_horz_split); - partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) || - force_vert_split); - do_split &= bsize > min_size; - } - if (cpi->sf.use_square_partition_only) { - partition_horz_allowed &= force_horz_split; - partition_vert_allowed &= force_vert_split; - } - - save_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, - mi_row, mi_col, bsize); - } -#endif - -#if CONFIG_FP_MB_STATS - // Decide whether we shall split directly and skip searching NONE by using - // the first pass block statistics - if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split && - partition_none_allowed && src_diff_var > 4 && - cm->base_qindex < qindex_split_threshold_lookup[bsize]) { - int mb_row = mi_row >> 1; - int mb_col = mi_col >> 1; - int mb_row_end = - VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); - int mb_col_end = - VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); - int r, c; - - // compute a complexity measure, basically measure inconsistency of motion - // vectors obtained from the first pass in the current block - for (r = mb_row; r < mb_row_end ; r++) { - for (c = mb_col; c < mb_col_end; c++) { - const int mb_index = r * cm->mb_cols + c; - - MOTION_DIRECTION this_mv; - MOTION_DIRECTION right_mv; - MOTION_DIRECTION bottom_mv; - - this_mv = - get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]); - - // to its right - if (c != mb_col_end - 1) { - right_mv = get_motion_direction_fp( - cpi->twopass.this_frame_mb_stats[mb_index + 1]); - none_complexity += get_motion_inconsistency(this_mv, right_mv); - } - - // to its bottom - if (r != mb_row_end - 1) { - bottom_mv = get_motion_direction_fp( - cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]); - none_complexity += get_motion_inconsistency(this_mv, bottom_mv); - } - - // do not count its left and top neighbors to avoid double counting - } - } - - if (none_complexity > complexity_16x16_blocks_threshold[bsize]) { - partition_none_allowed = 0; - } - } -#endif - - // PARTITION_NONE - if (partition_none_allowed) { - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, - &this_rdc, bsize, ctx, best_rdc.rdcost); - if (this_rdc.rate != INT_MAX) { - if (bsize >= BLOCK_8X8) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); - } - - if (this_rdc.rdcost < best_rdc.rdcost) { - int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr; - int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr; - - best_rdc = this_rdc; - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = PARTITION_NONE; - - // Adjust dist breakout threshold according to the partition size. - dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] + - b_height_log2_lookup[bsize]); - - rate_breakout_thr *= num_pels_log2_lookup[bsize]; - - // If all y, u, v transform blocks in this partition are skippable, and - // the dist & rate are within the thresholds, the partition search is - // terminated for current branch of the partition search tree. - // The dist & rate thresholds are set to 0 at speed 0 to disable the - // early termination at that speed. - if (!x->e_mbd.lossless[xd->mi[0]->mbmi.segment_id] && - (ctx->skippable && best_rdc.dist < dist_breakout_thr && - best_rdc.rate < rate_breakout_thr)) { - do_split = 0; - do_rect = 0; - } - -#if CONFIG_FP_MB_STATS - // Check if every 16x16 first pass block statistics has zero - // motion and the corresponding first pass residue is small enough. - // If that is the case, check the difference variance between the - // current frame and the last frame. If the variance is small enough, - // stop further splitting in RD optimization - if (cpi->use_fp_mb_stats && do_split != 0 && - cm->base_qindex > qindex_skip_threshold_lookup[bsize]) { - int mb_row = mi_row >> 1; - int mb_col = mi_col >> 1; - int mb_row_end = - VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); - int mb_col_end = - VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); - int r, c; - - int skip = 1; - for (r = mb_row; r < mb_row_end; r++) { - for (c = mb_col; c < mb_col_end; c++) { - const int mb_index = r * cm->mb_cols + c; - if (!(cpi->twopass.this_frame_mb_stats[mb_index] & - FPMB_MOTION_ZERO_MASK) || - !(cpi->twopass.this_frame_mb_stats[mb_index] & - FPMB_ERROR_SMALL_MASK)) { - skip = 0; - break; - } - } - if (skip == 0) { - break; - } - } - if (skip) { - if (src_diff_var == UINT_MAX) { - set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - src_diff_var = get_sby_perpixel_diff_variance( - cpi, &x->plane[0].src, mi_row, mi_col, bsize); - } - if (src_diff_var < 8) { - do_split = 0; - do_rect = 0; - } - } - } -#endif - } - } - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - } - - // store estimated motion vector - if (cpi->sf.adaptive_motion_search) - store_pred_mv(x, ctx); - - // PARTITION_SPLIT - // TODO(jingning): use the motion vectors given by the above search as - // the starting point of motion search in the following partition type check. - if (do_split) { - subsize = get_subsize(bsize, PARTITION_SPLIT); - if (bsize == BLOCK_8X8) { - i = 4; - if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed) - pc_tree->leaf_split[0]->pred_interp_filter = - ctx->mic.mbmi.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, - pc_tree->leaf_split[0], best_rdc.rdcost); - if (sum_rdc.rate == INT_MAX) - sum_rdc.rdcost = INT64_MAX; - } else { - for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) { - const int x_idx = (i & 1) * mi_step; - const int y_idx = (i >> 1) * mi_step; - - if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) - continue; - - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); - - pc_tree->split[i]->index = i; - rd_pick_partition(cpi, td, tile_data, tp, - mi_row + y_idx, mi_col + x_idx, - subsize, &this_rdc, - best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]); - - if (this_rdc.rate == INT_MAX) { - sum_rdc.rdcost = INT64_MAX; - break; - } else { - sum_rdc.rate += this_rdc.rate; - sum_rdc.dist += this_rdc.dist; - sum_rdc.rdcost += this_rdc.rdcost; - } - } - } - - if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); - - if (sum_rdc.rdcost < best_rdc.rdcost) { - best_rdc = sum_rdc; - pc_tree->partitioning = PARTITION_SPLIT; - } - } else { - // skip rectangular partition test when larger block size - // gives better rd cost - if (cpi->sf.less_rectangular_check) - do_rect &= !partition_none_allowed; - } - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - } - - // PARTITION_HORZ - if (partition_horz_allowed && - (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) { - subsize = get_subsize(bsize, PARTITION_HORZ); - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); - if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && - partition_none_allowed) - pc_tree->horizontal[0].pred_interp_filter = - ctx->mic.mbmi.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, - &pc_tree->horizontal[0], best_rdc.rdcost); - - if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows && - bsize > BLOCK_8X8) { - PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; - update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); - encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); - if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && - partition_none_allowed) - pc_tree->horizontal[1].pred_interp_filter = - ctx->mic.mbmi.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, - &this_rdc, subsize, &pc_tree->horizontal[1], - best_rdc.rdcost - sum_rdc.rdcost); - if (this_rdc.rate == INT_MAX) { - sum_rdc.rdcost = INT64_MAX; - } else { - sum_rdc.rate += this_rdc.rate; - sum_rdc.dist += this_rdc.dist; - sum_rdc.rdcost += this_rdc.rdcost; - } - } - - if (sum_rdc.rdcost < best_rdc.rdcost) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ]; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); - if (sum_rdc.rdcost < best_rdc.rdcost) { - best_rdc = sum_rdc; - pc_tree->partitioning = PARTITION_HORZ; - } - } - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - } - // PARTITION_VERT - if (partition_vert_allowed && - (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) { - subsize = get_subsize(bsize, PARTITION_VERT); - - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); - if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && - partition_none_allowed) - pc_tree->vertical[0].pred_interp_filter = - ctx->mic.mbmi.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, - &pc_tree->vertical[0], best_rdc.rdcost); - if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols && - bsize > BLOCK_8X8) { - update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0); - encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, - &pc_tree->vertical[0]); - - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); - if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && - partition_none_allowed) - pc_tree->vertical[1].pred_interp_filter = - ctx->mic.mbmi.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, - &this_rdc, subsize, - &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost); - if (this_rdc.rate == INT_MAX) { - sum_rdc.rdcost = INT64_MAX; - } else { - sum_rdc.rate += this_rdc.rate; - sum_rdc.dist += this_rdc.dist; - sum_rdc.rdcost += this_rdc.rdcost; - } - } - - if (sum_rdc.rdcost < best_rdc.rdcost) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); - sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT]; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); - if (sum_rdc.rdcost < best_rdc.rdcost) { - best_rdc = sum_rdc; - pc_tree->partitioning = PARTITION_VERT; - } - } - restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - } - - // TODO(jbb): This code added so that we avoid static analysis - // warning related to the fact that best_rd isn't used after this - // point. This code should be refactored so that the duplicate - // checks occur in some sub function and thus are used... - (void) best_rd; - *rd_cost = best_rdc; - - - if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && - pc_tree->index != 3) { - int output_enabled = (bsize == BLOCK_64X64); - encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, - bsize, pc_tree); - } - - if (bsize == BLOCK_64X64) { - assert(tp_orig < *tp || (tp_orig == *tp && xd->mi[0]->mbmi.skip)); - assert(best_rdc.rate < INT_MAX); - assert(best_rdc.dist < INT64_MAX); - } else { - assert(tp_orig == *tp); - } -} - -static void encode_rd_sb_row(VP10_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - int mi_row, - TOKENEXTRA **tp) { - VP10_COMMON *const cm = &cpi->common; - TileInfo *const tile_info = &tile_data->tile_info; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - SPEED_FEATURES *const sf = &cpi->sf; - int mi_col; - - // Initialize the left context for the new SB row - memset(&xd->left_context, 0, sizeof(xd->left_context)); - memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); - - // Code each SB in the row - for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end; - mi_col += MI_BLOCK_SIZE) { - const struct segmentation *const seg = &cm->seg; - int dummy_rate; - int64_t dummy_dist; - RD_COST dummy_rdc; - int i; - int seg_skip = 0; - - const int idx_str = cm->mi_stride * mi_row + mi_col; - MODE_INFO **mi = cm->mi_grid_visible + idx_str; - - if (sf->adaptive_pred_interp_filter) { - for (i = 0; i < 64; ++i) - td->leaf_tree[i].pred_interp_filter = SWITCHABLE; - - for (i = 0; i < 64; ++i) { - td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE; - td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE; - td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE; - td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE; - } - } - - vp10_zero(x->pred_mv); - td->pc_root->index = 0; - - if (seg->enabled) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col); - seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP); - } - - x->source_variance = UINT_MAX; - if (sf->partition_search_type == FIXED_PARTITION || seg_skip) { - const BLOCK_SIZE bsize = - seg_skip ? BLOCK_64X64 : sf->always_this_block_size; - set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); - set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); - } else if (cpi->partition_search_skippable_frame) { - BLOCK_SIZE bsize; - set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); - bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col); - set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); - } else if (sf->partition_search_type == VAR_BASED_PARTITION && - cm->frame_type != KEY_FRAME) { - choose_partitioning(cpi, tile_info, x, mi_row, mi_col); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); - } else { - // If required set upper and lower partition size limits - if (sf->auto_min_max_partition_size) { - set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); - rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col, - &x->min_partition_size, - &x->max_partition_size); - } - rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64, - &dummy_rdc, INT64_MAX, td->pc_root); - } - } -} - -static void init_encode_frame_mb_context(VP10_COMP *cpi) { - MACROBLOCK *const x = &cpi->td.mb; - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); - - // Copy data over into macro block data structures. - vp10_setup_src_planes(x, cpi->Source, 0, 0); - - vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); - - // Note: this memset assumes above_context[0], [1] and [2] - // are allocated as part of the same buffer. - memset(xd->above_context[0], 0, - sizeof(*xd->above_context[0]) * - 2 * aligned_mi_cols * MAX_MB_PLANE); - memset(xd->above_seg_context, 0, - sizeof(*xd->above_seg_context) * aligned_mi_cols); -} - -static int check_dual_ref_flags(VP10_COMP *cpi) { - const int ref_flags = cpi->ref_frame_flags; - - if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { - return 0; - } else { - return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) - + !!(ref_flags & VP9_ALT_FLAG)) >= 2; - } -} - -static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) { - int mi_row, mi_col; - const int mis = cm->mi_stride; - MODE_INFO **mi_ptr = cm->mi_grid_visible; - - for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) { - for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) { - if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size) - mi_ptr[mi_col]->mbmi.tx_size = max_tx_size; - } - } -} - -static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) { - if (frame_is_intra_only(&cpi->common)) - return INTRA_FRAME; - else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame) - return ALTREF_FRAME; - else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame) - return GOLDEN_FRAME; - else - return LAST_FRAME; -} - -static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) { - if (xd->lossless[0]) - return ONLY_4X4; - if (cpi->sf.tx_size_search_method == USE_LARGESTALL) - return ALLOW_32X32; - else if (cpi->sf.tx_size_search_method == USE_FULL_RD|| - cpi->sf.tx_size_search_method == USE_TX_8X8) - return TX_MODE_SELECT; - else - return cpi->common.tx_mode; -} - -void vp10_init_tile_data(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - int tile_col, tile_row; - TOKENEXTRA *pre_tok = cpi->tile_tok[0][0]; - int tile_tok = 0; - - if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) { - if (cpi->tile_data != NULL) - vpx_free(cpi->tile_data); - CHECK_MEM_ERROR(cm, cpi->tile_data, - vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data))); - cpi->allocated_tiles = tile_cols * tile_rows; - - for (tile_row = 0; tile_row < tile_rows; ++tile_row) - for (tile_col = 0; tile_col < tile_cols; ++tile_col) { - TileDataEnc *tile_data = - &cpi->tile_data[tile_row * tile_cols + tile_col]; - int i, j; - for (i = 0; i < BLOCK_SIZES; ++i) { - for (j = 0; j < MAX_MODES; ++j) { - tile_data->thresh_freq_fact[i][j] = 32; - tile_data->mode_map[i][j] = j; - } - } - } - } - - for (tile_row = 0; tile_row < tile_rows; ++tile_row) { - for (tile_col = 0; tile_col < tile_cols; ++tile_col) { - TileInfo *tile_info = - &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info; - vp10_tile_init(tile_info, cm, tile_row, tile_col); - - cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok; - pre_tok = cpi->tile_tok[tile_row][tile_col]; - tile_tok = allocated_tokens(*tile_info); - } - } -} - -void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, - int tile_row, int tile_col) { - VP10_COMMON *const cm = &cpi->common; - const int tile_cols = 1 << cm->log2_tile_cols; - TileDataEnc *this_tile = - &cpi->tile_data[tile_row * tile_cols + tile_col]; - const TileInfo * const tile_info = &this_tile->tile_info; - TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; - int mi_row; - - // Set up pointers to per thread motion search counters. - td->mb.m_search_count_ptr = &td->rd_counts.m_search_count; - td->mb.ex_search_count_ptr = &td->rd_counts.ex_search_count; - - for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end; - mi_row += MI_BLOCK_SIZE) { - encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok); - } - cpi->tok_count[tile_row][tile_col] = - (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]); - assert(tok - cpi->tile_tok[tile_row][tile_col] <= - allocated_tokens(*tile_info)); -} - -static void encode_tiles(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - int tile_col, tile_row; - - vp10_init_tile_data(cpi); - - for (tile_row = 0; tile_row < tile_rows; ++tile_row) - for (tile_col = 0; tile_col < tile_cols; ++tile_col) - vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col); -} - -#if CONFIG_FP_MB_STATS -static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats, - VP10_COMMON *cm, uint8_t **this_frame_mb_stats) { - uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start + - cm->current_video_frame * cm->MBs * sizeof(uint8_t); - - if (mb_stats_in > firstpass_mb_stats->mb_stats_end) - return EOF; - - *this_frame_mb_stats = mb_stats_in; - - return 1; -} -#endif - -static void encode_frame_internal(VP10_COMP *cpi) { - ThreadData *const td = &cpi->td; - MACROBLOCK *const x = &td->mb; - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - RD_COUNTS *const rdc = &cpi->td.rd_counts; - int i; - - xd->mi = cm->mi_grid_visible; - xd->mi[0] = cm->mi; - - vp10_zero(*td->counts); - vp10_zero(rdc->coef_counts); - vp10_zero(rdc->comp_pred_diff); - vp10_zero(rdc->filter_diff); - rdc->m_search_count = 0; // Count of motion search hits. - rdc->ex_search_count = 0; // Exhaustive mesh search hits. - - for (i = 0; i < MAX_SEGMENTS; ++i) { - const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled ? - vp10_get_qindex(&cm->seg, i, cm->base_qindex) : - cm->base_qindex; - xd->lossless[i] = qindex == 0 && - cm->y_dc_delta_q == 0 && - cm->uv_dc_delta_q == 0 && - cm->uv_ac_delta_q == 0; - } - - if (!cm->seg.enabled && xd->lossless[0]) - x->optimize = 0; - - cm->tx_mode = select_tx_mode(cpi, xd); - - vp10_frame_init_quantizer(cpi); - - vp10_initialize_rd_consts(cpi); - vp10_initialize_me_consts(cpi, x, cm->base_qindex); - init_encode_frame_mb_context(cpi); - cm->use_prev_frame_mvs = !cm->error_resilient_mode && - cm->width == cm->last_width && - cm->height == cm->last_height && - !cm->intra_only && - cm->last_show_frame; - // Special case: set prev_mi to NULL when the previous mode info - // context cannot be used. - cm->prev_mi = cm->use_prev_frame_mvs ? - cm->prev_mip + cm->mi_stride + 1 : NULL; - - x->quant_fp = cpi->sf.use_quant_fp; - vp10_zero(x->skip_txfm); - - { - struct vpx_usec_timer emr_timer; - vpx_usec_timer_start(&emr_timer); - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm, - &cpi->twopass.this_frame_mb_stats); - } -#endif - - // If allowed, encoding tiles in parallel with one thread handling one tile. - if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1) - vp10_encode_tiles_mt(cpi); - else - encode_tiles(cpi); - - vpx_usec_timer_mark(&emr_timer); - cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); - } - -#if 0 - // Keep record of the total distortion this time around for future use - cpi->last_frame_distortion = cpi->frame_distortion; -#endif -} - -static INTERP_FILTER get_interp_filter( - const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) { - if (!is_alt_ref && - threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] && - threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] && - threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) { - return EIGHTTAP_SMOOTH; - } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] && - threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) { - return EIGHTTAP_SHARP; - } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) { - return EIGHTTAP; - } else { - return SWITCHABLE; - } -} - -void vp10_encode_frame(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - - // In the longer term the encoder should be generalized to match the - // decoder such that we allow compound where one of the 3 buffers has a - // different sign bias and that buffer is then the fixed ref. However, this - // requires further work in the rd loop. For now the only supported encoder - // side behavior is where the ALT ref buffer has opposite sign bias to - // the other two. - if (!frame_is_intra_only(cm)) { - if ((cm->ref_frame_sign_bias[ALTREF_FRAME] == - cm->ref_frame_sign_bias[GOLDEN_FRAME]) || - (cm->ref_frame_sign_bias[ALTREF_FRAME] == - cm->ref_frame_sign_bias[LAST_FRAME])) { - cpi->allow_comp_inter_inter = 0; - } else { - cpi->allow_comp_inter_inter = 1; - cm->comp_fixed_ref = ALTREF_FRAME; - cm->comp_var_ref[0] = LAST_FRAME; - cm->comp_var_ref[1] = GOLDEN_FRAME; - } - } else { - cpi->allow_comp_inter_inter = 0; - } - - if (cpi->sf.frame_parameter_update) { - int i; - RD_OPT *const rd_opt = &cpi->rd; - FRAME_COUNTS *counts = cpi->td.counts; - RD_COUNTS *const rdc = &cpi->td.rd_counts; - - // This code does a single RD pass over the whole frame assuming - // either compound, single or hybrid prediction as per whatever has - // worked best for that type of frame in the past. - // It also predicts whether another coding mode would have worked - // better that this coding mode. If that is the case, it remembers - // that for subsequent frames. - // It does the same analysis for transform size selection also. - const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); - int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type]; - int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type]; - const int is_alt_ref = frame_type == ALTREF_FRAME; - - /* prediction (compound, single or hybrid) mode selection */ - if (is_alt_ref || !cpi->allow_comp_inter_inter) - cm->reference_mode = SINGLE_REFERENCE; - else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] && - mode_thrs[COMPOUND_REFERENCE] > - mode_thrs[REFERENCE_MODE_SELECT] && - check_dual_ref_flags(cpi) && - cpi->static_mb_pct == 100) - cm->reference_mode = COMPOUND_REFERENCE; - else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT]) - cm->reference_mode = SINGLE_REFERENCE; - else - cm->reference_mode = REFERENCE_MODE_SELECT; - - if (cm->interp_filter == SWITCHABLE) - cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref); - - encode_frame_internal(cpi); - - for (i = 0; i < REFERENCE_MODES; ++i) - mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2; - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2; - - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - int single_count_zero = 0; - int comp_count_zero = 0; - - for (i = 0; i < COMP_INTER_CONTEXTS; i++) { - single_count_zero += counts->comp_inter[i][0]; - comp_count_zero += counts->comp_inter[i][1]; - } - - if (comp_count_zero == 0) { - cm->reference_mode = SINGLE_REFERENCE; - vp10_zero(counts->comp_inter); - } else if (single_count_zero == 0) { - cm->reference_mode = COMPOUND_REFERENCE; - vp10_zero(counts->comp_inter); - } - } - - if (cm->tx_mode == TX_MODE_SELECT) { - int count4x4 = 0; - int count8x8_lp = 0, count8x8_8x8p = 0; - int count16x16_16x16p = 0, count16x16_lp = 0; - int count32x32 = 0; - - for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { - count4x4 += counts->tx.p32x32[i][TX_4X4]; - count4x4 += counts->tx.p16x16[i][TX_4X4]; - count4x4 += counts->tx.p8x8[i][TX_4X4]; - - count8x8_lp += counts->tx.p32x32[i][TX_8X8]; - count8x8_lp += counts->tx.p16x16[i][TX_8X8]; - count8x8_8x8p += counts->tx.p8x8[i][TX_8X8]; - - count16x16_16x16p += counts->tx.p16x16[i][TX_16X16]; - count16x16_lp += counts->tx.p32x32[i][TX_16X16]; - count32x32 += counts->tx.p32x32[i][TX_32X32]; - } - if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && - count32x32 == 0) { - cm->tx_mode = ALLOW_8X8; - reset_skip_tx_size(cm, TX_8X8); - } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && - count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { - cm->tx_mode = ONLY_4X4; - reset_skip_tx_size(cm, TX_4X4); - } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { - cm->tx_mode = ALLOW_32X32; - } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { - cm->tx_mode = ALLOW_16X16; - reset_skip_tx_size(cm, TX_16X16); - } - } - } else { - cm->reference_mode = SINGLE_REFERENCE; - encode_frame_internal(cpi); - } -} - -static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi, - const MODE_INFO *above_mi, const MODE_INFO *left_mi, - const int intraonly) { - const PREDICTION_MODE y_mode = mi->mbmi.mode; - const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode; - const BLOCK_SIZE bsize = mi->mbmi.sb_type; - - if (bsize < BLOCK_8X8) { - int idx, idy; - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - for (idy = 0; idy < 2; idy += num_4x4_h) - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int bidx = idy * 2 + idx; - const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode; - if (intraonly) { - const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx); - const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx); - ++counts->kf_y_mode[a][l][bmode]; - } else { - ++counts->y_mode[0][bmode]; - } - } - } else { - if (intraonly) { - const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0); - const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0); - ++counts->kf_y_mode[above][left][y_mode]; - } else { - ++counts->y_mode[size_group_lookup[bsize]][y_mode]; - } - } - - ++counts->uv_mode[y_mode][uv_mode]; -} - -static void encode_superblock(VP10_COMP *cpi, ThreadData *td, - TOKENEXTRA **t, int output_enabled, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO **mi_8x8 = xd->mi; - MODE_INFO *mi = mi_8x8[0]; - MB_MODE_INFO *mbmi = &mi->mbmi; - const int seg_skip = segfeature_active(&cm->seg, mbmi->segment_id, - SEG_LVL_SKIP); - const int mis = cm->mi_stride; - const int mi_width = num_8x8_blocks_wide_lookup[bsize]; - const int mi_height = num_8x8_blocks_high_lookup[bsize]; - - x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 && - cpi->oxcf.aq_mode != COMPLEXITY_AQ && - cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ && - cpi->sf.allow_skip_recode; - - if (!x->skip_recode) - memset(x->skip_txfm, 0, sizeof(x->skip_txfm)); - - x->skip_optimize = ctx->is_coded; - ctx->is_coded = 1; - x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct; - - if (!is_inter_block(mbmi)) { - int plane; - mbmi->skip = 1; - for (plane = 0; plane < MAX_MB_PLANE; ++plane) - vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane); - if (output_enabled) - sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi, - frame_is_intra_only(cm)); - vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); - } else { - int ref; - const int is_compound = has_second_ref(mbmi); - set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); - for (ref = 0; ref < 1 + is_compound; ++ref) { - YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, - mbmi->ref_frame[ref]); - assert(cfg != NULL); - vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col, - &xd->block_refs[ref]->sf); - } - if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip) - vp10_build_inter_predictors_sby(xd, mi_row, mi_col, - VPXMAX(bsize, BLOCK_8X8)); - - vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col, - VPXMAX(bsize, BLOCK_8X8)); - - vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8)); - vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); - } - - if (output_enabled) { - if (cm->tx_mode == TX_MODE_SELECT && - mbmi->sb_type >= BLOCK_8X8 && - !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) { - ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd), - &td->counts->tx)[mbmi->tx_size]; - } else { - int x, y; - TX_SIZE tx_size; - // The new intra coding scheme requires no change of transform size - if (is_inter_block(&mi->mbmi)) { - tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode], - max_txsize_lookup[bsize]); - } else { - tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; - } - - for (y = 0; y < mi_height; y++) - for (x = 0; x < mi_width; x++) - if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) - mi_8x8[mis * y + x]->mbmi.tx_size = tx_size; - } - ++td->counts->tx.tx_totals[mbmi->tx_size]; - ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])]; - if (mbmi->tx_size < TX_32X32 && - cm->base_qindex > 0 && !mbmi->skip && - !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { - if (is_inter_block(mbmi)) { - ++td->counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type]; - } else { - ++td->counts->intra_ext_tx[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]] - [mbmi->tx_type]; - } - } - } -} diff --git a/libs/libvpx/vp10/encoder/encodeframe.h b/libs/libvpx/vp10/encoder/encodeframe.h deleted file mode 100644 index fbb81f8b17..0000000000 --- a/libs/libvpx/vp10/encoder/encodeframe.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_ENCODEFRAME_H_ -#define VP10_ENCODER_ENCODEFRAME_H_ - -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct macroblock; -struct yv12_buffer_config; -struct VP10_COMP; -struct ThreadData; - -// Constants used in SOURCE_VAR_BASED_PARTITION -#define VAR_HIST_MAX_BG_VAR 1000 -#define VAR_HIST_FACTOR 10 -#define VAR_HIST_BINS (VAR_HIST_MAX_BG_VAR / VAR_HIST_FACTOR + 1) -#define VAR_HIST_LARGE_CUT_OFF 75 -#define VAR_HIST_SMALL_CUT_OFF 45 - -void vp10_setup_src_planes(struct macroblock *x, - const struct yv12_buffer_config *src, - int mi_row, int mi_col); - -void vp10_encode_frame(struct VP10_COMP *cpi); - -void vp10_init_tile_data(struct VP10_COMP *cpi); -void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td, - int tile_row, int tile_col); - -void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_ENCODEFRAME_H_ diff --git a/libs/libvpx/vp10/encoder/encodemb.c b/libs/libvpx/vp10/encoder/encodemb.c deleted file mode 100644 index 92ba4ddb44..0000000000 --- a/libs/libvpx/vp10/encoder/encodemb.c +++ /dev/null @@ -1,1133 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include "./vp10_rtcd.h" -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" - -#include "vpx_dsp/quantize.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/idct.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/scan.h" - -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/tokenize.h" - -struct optimize_ctx { - ENTROPY_CONTEXT ta[MAX_MB_PLANE][16]; - ENTROPY_CONTEXT tl[MAX_MB_PLANE][16]; -}; - -void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { - struct macroblock_plane *const p = &x->plane[plane]; - const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize]; - -#if CONFIG_VP9_HIGHBITDEPTH - if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, - p->src.stride, pd->dst.buf, pd->dst.stride, - x->e_mbd.bd); - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, - pd->dst.buf, pd->dst.stride); -} - -#define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF) - -typedef struct vp10_token_state { - int rate; - int error; - int next; - int16_t token; - short qc; -} vp10_token_state; - -// TODO(jimbankoski): experiment to find optimal RD numbers. -static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 }; - -#define UPDATE_RD_COST()\ -{\ - rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\ - rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\ - if (rd_cost0 == rd_cost1) {\ - rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\ - rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\ - }\ -} - -// This function is a place holder for now but may ultimately need -// to scan previous tokens to work out the correct context. -static int trellis_get_coeff_context(const int16_t *scan, - const int16_t *nb, - int idx, int token, - uint8_t *token_cache) { - int bak = token_cache[scan[idx]], pt; - token_cache[scan[idx]] = vp10_pt_energy_class[token]; - pt = get_coef_context(nb, token_cache, idx + 1); - token_cache[scan[idx]] = bak; - return pt; -} - -static int optimize_b(MACROBLOCK *mb, int plane, int block, - TX_SIZE tx_size, int ctx) { - MACROBLOCKD *const xd = &mb->e_mbd; - struct macroblock_plane *const p = &mb->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; - const int ref = is_inter_block(&xd->mi[0]->mbmi); - vp10_token_state tokens[1025][2]; - unsigned best_index[1025][2]; - uint8_t token_cache[1024]; - const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); - tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - const int eob = p->eobs[block]; - const PLANE_TYPE type = pd->plane_type; - const int default_eob = 16 << (tx_size << 1); - const int mul = 1 + (tx_size == TX_32X32); - const int16_t *dequant_ptr = pd->dequant; - const uint8_t *const band_translate = get_band_translate(tx_size); - TX_TYPE tx_type = get_tx_type(type, xd, block); - const scan_order *const so = get_scan(tx_size, tx_type); - const int16_t *const scan = so->scan; - const int16_t *const nb = so->neighbors; - int next = eob, sz = 0; - int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv; - int64_t rd_cost0, rd_cost1; - int rate0, rate1, error0, error1; - int16_t t0, t1; - EXTRABIT e0; - int best, band, pt, i, final_eob; -#if CONFIG_VP9_HIGHBITDEPTH - const int16_t *cat6_high_cost = vp10_get_high_cost_table(xd->bd); -#else - const int16_t *cat6_high_cost = vp10_get_high_cost_table(8); -#endif - - assert((!type && !plane) || (type && plane)); - assert(eob <= default_eob); - - /* Now set up a Viterbi trellis to evaluate alternative roundings. */ - if (!ref) - rdmult = (rdmult * 9) >> 4; - - /* Initialize the sentinel node of the trellis. */ - tokens[eob][0].rate = 0; - tokens[eob][0].error = 0; - tokens[eob][0].next = default_eob; - tokens[eob][0].token = EOB_TOKEN; - tokens[eob][0].qc = 0; - tokens[eob][1] = tokens[eob][0]; - - for (i = 0; i < eob; i++) - token_cache[scan[i]] = - vp10_pt_energy_class[vp10_get_token(qcoeff[scan[i]])]; - - for (i = eob; i-- > 0;) { - int base_bits, d2, dx; - const int rc = scan[i]; - int x = qcoeff[rc]; - /* Only add a trellis state for non-zero coefficients. */ - if (x) { - int shortcut = 0; - error0 = tokens[next][0].error; - error1 = tokens[next][1].error; - /* Evaluate the first possibility for this state. */ - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - vp10_get_token_extra(x, &t0, &e0); - /* Consider both possible successor states. */ - if (next < default_eob) { - band = band_translate[i + 1]; - pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); - rate0 += mb->token_costs[tx_size][type][ref][band][0][pt] - [tokens[next][0].token]; - rate1 += mb->token_costs[tx_size][type][ref][band][0][pt] - [tokens[next][1].token]; - } - UPDATE_RD_COST(); - /* And pick the best. */ - best = rd_cost1 < rd_cost0; - base_bits = vp10_get_cost(t0, e0, cat6_high_cost); - dx = mul * (dqcoeff[rc] - coeff[rc]); -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - dx >>= xd->bd - 8; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - d2 = dx * dx; - tokens[i][0].rate = base_bits + (best ? rate1 : rate0); - tokens[i][0].error = d2 + (best ? error1 : error0); - tokens[i][0].next = next; - tokens[i][0].token = t0; - tokens[i][0].qc = x; - best_index[i][0] = best; - - /* Evaluate the second possibility for this state. */ - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - - if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && - (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + - dequant_ptr[rc != 0])) - shortcut = 1; - else - shortcut = 0; - - if (shortcut) { - sz = -(x < 0); - x -= 2 * sz + 1; - } - - /* Consider both possible successor states. */ - if (!x) { - /* If we reduced this coefficient to zero, check to see if - * we need to move the EOB back here. - */ - t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; - t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN; - e0 = 0; - } else { - vp10_get_token_extra(x, &t0, &e0); - t1 = t0; - } - if (next < default_eob) { - band = band_translate[i + 1]; - if (t0 != EOB_TOKEN) { - pt = trellis_get_coeff_context(scan, nb, i, t0, token_cache); - rate0 += mb->token_costs[tx_size][type][ref][band][!x][pt] - [tokens[next][0].token]; - } - if (t1 != EOB_TOKEN) { - pt = trellis_get_coeff_context(scan, nb, i, t1, token_cache); - rate1 += mb->token_costs[tx_size][type][ref][band][!x][pt] - [tokens[next][1].token]; - } - } - - UPDATE_RD_COST(); - /* And pick the best. */ - best = rd_cost1 < rd_cost0; - base_bits = vp10_get_cost(t0, e0, cat6_high_cost); - - if (shortcut) { -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz; - } else { - dx -= (dequant_ptr[rc != 0] + sz) ^ sz; - } -#else - dx -= (dequant_ptr[rc != 0] + sz) ^ sz; -#endif // CONFIG_VP9_HIGHBITDEPTH - d2 = dx * dx; - } - tokens[i][1].rate = base_bits + (best ? rate1 : rate0); - tokens[i][1].error = d2 + (best ? error1 : error0); - tokens[i][1].next = next; - tokens[i][1].token = best ? t1 : t0; - tokens[i][1].qc = x; - best_index[i][1] = best; - /* Finally, make this the new head of the trellis. */ - next = i; - } else { - /* There's no choice to make for a zero coefficient, so we don't - * add a new trellis node, but we do need to update the costs. - */ - band = band_translate[i + 1]; - t0 = tokens[next][0].token; - t1 = tokens[next][1].token; - /* Update the cost of each path if we're past the EOB token. */ - if (t0 != EOB_TOKEN) { - tokens[next][0].rate += - mb->token_costs[tx_size][type][ref][band][1][0][t0]; - tokens[next][0].token = ZERO_TOKEN; - } - if (t1 != EOB_TOKEN) { - tokens[next][1].rate += - mb->token_costs[tx_size][type][ref][band][1][0][t1]; - tokens[next][1].token = ZERO_TOKEN; - } - best_index[i][0] = best_index[i][1] = 0; - /* Don't update next, because we didn't add a new node. */ - } - } - - /* Now pick the best path through the whole trellis. */ - band = band_translate[i + 1]; - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - error0 = tokens[next][0].error; - error1 = tokens[next][1].error; - t0 = tokens[next][0].token; - t1 = tokens[next][1].token; - rate0 += mb->token_costs[tx_size][type][ref][band][0][ctx][t0]; - rate1 += mb->token_costs[tx_size][type][ref][band][0][ctx][t1]; - UPDATE_RD_COST(); - best = rd_cost1 < rd_cost0; - final_eob = -1; - memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2))); - memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2))); - for (i = next; i < eob; i = next) { - const int x = tokens[i][best].qc; - const int rc = scan[i]; - if (x) { - final_eob = i; - } - - qcoeff[rc] = x; - dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul; - - next = tokens[i][best].next; - best = best_index[i][best]; - } - final_eob++; - - mb->plane[plane].eobs[block] = final_eob; - return final_eob; -} - -static INLINE void fdct32x32(int rd_transform, - const int16_t *src, tran_low_t *dst, - int src_stride) { - if (rd_transform) - vpx_fdct32x32_rd(src, dst, src_stride); - else - vpx_fdct32x32(src, dst, src_stride); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src, - tran_low_t *dst, int src_stride) { - if (rd_transform) - vpx_highbd_fdct32x32_rd(src, dst, src_stride); - else - vpx_highbd_fdct32x32(src, dst, src_stride); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type, int lossless) { - if (lossless) { - vp10_fwht4x4(src_diff, coeff, diff_stride); - } else { - switch (tx_type) { - case DCT_DCT: - vpx_fdct4x4(src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_fht4x4(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } - } -} - -static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_fht8x8(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } -} - -static void fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_fht16x16(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } -} - -static void fwd_txfm_32x32(int rd_transform, const int16_t *src_diff, - tran_low_t *coeff, int diff_stride, - TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - fdct32x32(rd_transform, src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - assert(0); - break; - default: - assert(0); - break; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type, int lossless) { - if (lossless) { - assert(tx_type == DCT_DCT); - vp10_highbd_fwht4x4(src_diff, coeff, diff_stride); - } else { - switch (tx_type) { - case DCT_DCT: - vpx_highbd_fdct4x4(src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } - } -} - -static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } -} - -static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - vp10_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type); - break; - default: - assert(0); - break; - } -} - -static void highbd_fwd_txfm_32x32(int rd_transform, const int16_t *src_diff, - tran_low_t *coeff, int diff_stride, - TX_TYPE tx_type) { - switch (tx_type) { - case DCT_DCT: - highbd_fdct32x32(rd_transform, src_diff, coeff, diff_stride); - break; - case ADST_DCT: - case DCT_ADST: - case ADST_ADST: - assert(0); - break; - default: - assert(0); - break; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { - MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblock_plane *const p = &x->plane[plane]; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV; - TX_TYPE tx_type = get_tx_type(plane_type, xd, block); - const scan_order *const scan_order = get_scan(tx_size, tx_type); - tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint16_t *const eob = &p->eobs[block]; - const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int16_t *src_diff; - src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)]; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_32X32: - highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); - vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, - p->round_fp, p->quant_fp, p->quant_shift, - qcoeff, dqcoeff, pd->dequant, - eob, scan_order->scan, - scan_order->iscan); - break; - case TX_16X16: - vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); - vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_8X8: - vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); - vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_4X4: - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - vp10_highbd_fwht4x4(src_diff, coeff, diff_stride); - } else { - vpx_highbd_fdct4x4(src_diff, coeff, diff_stride); - } - vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); - } - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - switch (tx_size) { - case TX_32X32: - fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); - vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - break; - case TX_16X16: - vpx_fdct16x16(src_diff, coeff, diff_stride); - vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_8X8: - vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64, - x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_4X4: - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - vp10_fwht4x4(src_diff, coeff, diff_stride); - } else { - vpx_fdct4x4(src_diff, coeff, diff_stride); - } - vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); - break; - } -} - -void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { - MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblock_plane *const p = &x->plane[plane]; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint16_t *const eob = &p->eobs[block]; - const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int16_t *src_diff; - src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)]; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_32X32: - vpx_highbd_fdct32x32_1(src_diff, coeff, diff_stride); - vpx_highbd_quantize_dc_32x32(coeff, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_16X16: - vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride); - vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_8X8: - vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride); - vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_4X4: - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - vp10_highbd_fwht4x4(src_diff, coeff, diff_stride); - } else { - vpx_highbd_fdct4x4(src_diff, coeff, diff_stride); - } - vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - default: - assert(0); - } - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - switch (tx_size) { - case TX_32X32: - vpx_fdct32x32_1(src_diff, coeff, diff_stride); - vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_16X16: - vpx_fdct16x16_1(src_diff, coeff, diff_stride); - vpx_quantize_dc(coeff, 256, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_8X8: - vpx_fdct8x8_1(src_diff, coeff, diff_stride); - vpx_quantize_dc(coeff, 64, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - case TX_4X4: - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - vp10_fwht4x4(src_diff, coeff, diff_stride); - } else { - vpx_fdct4x4(src_diff, coeff, diff_stride); - } - vpx_quantize_dc(coeff, 16, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - default: - assert(0); - break; - } -} - - - -void vp10_xform_quant(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { - MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblock_plane *const p = &x->plane[plane]; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV; - TX_TYPE tx_type = get_tx_type(plane_type, xd, block); - const scan_order *const scan_order = get_scan(tx_size, tx_type); - tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint16_t *const eob = &p->eobs[block]; - const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int16_t *src_diff; - src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)]; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_32X32: - highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride, - tx_type); - vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, - p->round, p->quant, p->quant_shift, qcoeff, - dqcoeff, pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_16X16: - highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type); - vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_8X8: - highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type); - vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_4X4: - vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, - xd->lossless[xd->mi[0]->mbmi.segment_id]); - vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); - } - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - switch (tx_size) { - case TX_32X32: - fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - break; - case TX_16X16: - fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_8X8: - fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - case TX_4X4: - vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, - xd->lossless[xd->mi[0]->mbmi.segment_id]); - vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); - break; - } -} - -static void encode_block(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct encode_b_args *const args = arg; - MACROBLOCK *const x = args->x; - MACROBLOCKD *const xd = &x->e_mbd; - struct optimize_ctx *const ctx = args->ctx; - struct macroblock_plane *const p = &x->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint8_t *dst; - ENTROPY_CONTEXT *a, *l; - TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block); - dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col]; - a = &ctx->ta[plane][blk_col]; - l = &ctx->tl[plane][blk_row]; - - // TODO(jingning): per transformed block zero forcing only enabled for - // luma component. will integrate chroma components as well. - if (x->zcoeff_blk[tx_size][block] && plane == 0) { - p->eobs[block] = 0; - *a = *l = 0; - return; - } - - if (!x->skip_recode) { - if (x->quant_fp) { - // Encoding process for rtc mode - if (x->skip_txfm[0] == SKIP_TXFM_AC_DC && plane == 0) { - // skip forward transform - p->eobs[block] = 0; - *a = *l = 0; - return; - } else { - vp10_xform_quant_fp(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - } - } else { - if (max_txsize_lookup[plane_bsize] == tx_size) { - int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1)); - if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) { - // full forward transform and quantization - vp10_xform_quant(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - } else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) { - // fast path forward transform and quantization - vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - } else { - // skip forward transform - p->eobs[block] = 0; - *a = *l = 0; - return; - } - } else { - vp10_xform_quant(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - } - } - } - - if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { - const int ctx = combine_entropy_contexts(*a, *l); - *a = *l = optimize_b(x, plane, block, tx_size, ctx) > 0; - } else { - *a = *l = p->eobs[block] > 0; - } - - if (p->eobs[block]) - *(args->skip) = 0; - - if (p->eobs[block] == 0) - return; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_32X32: - vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd, tx_type); - break; - case TX_16X16: - vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd, tx_type); - break; - case TX_8X8: - vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd, tx_type); - break; - case TX_4X4: - // this is like vp10_short_idct4x4 but has a special case around eob<=1 - // which is significant (not just an optimization) for the lossless - // case. - vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd, tx_type, - xd->lossless[xd->mi[0]->mbmi.segment_id]); - break; - default: - assert(0 && "Invalid transform size"); - break; - } - - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - switch (tx_size) { - case TX_32X32: - vp10_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, p->eobs[block], - tx_type); - break; - case TX_16X16: - vp10_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, p->eobs[block], - tx_type); - break; - case TX_8X8: - vp10_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, p->eobs[block], - tx_type); - break; - case TX_4X4: - // this is like vp10_short_idct4x4 but has a special case around eob<=1 - // which is significant (not just an optimization) for the lossless - // case. - vp10_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block], - tx_type, xd->lossless[xd->mi[0]->mbmi.segment_id]); - break; - default: - assert(0 && "Invalid transform size"); - break; - } -} - -static void encode_block_pass1(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - MACROBLOCK *const x = (MACROBLOCK *)arg; - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblock_plane *const p = &x->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - uint8_t *dst; - dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col]; - - vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size); - - if (p->eobs[block] > 0) { -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - if (xd->lossless[0]) { - vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); - } else { - vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); - } - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - if (xd->lossless[0]) { - vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); - } else { - vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); - } - } -} - -void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) { - vp10_subtract_plane(x, bsize, 0); - vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0, - encode_block_pass1, x); -} - -void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; - struct optimize_ctx ctx; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - struct encode_b_args arg = {x, &ctx, &mbmi->skip}; - int plane; - - mbmi->skip = 1; - - if (x->skip) - return; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - if (!x->skip_recode) - vp10_subtract_plane(x, bsize, plane); - - if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { - const struct macroblockd_plane* const pd = &xd->plane[plane]; - const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size; - vp10_get_entropy_contexts(bsize, tx_size, pd, - ctx.ta[plane], ctx.tl[plane]); - } - - vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block, - &arg); - } -} - -void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct encode_b_args* const args = arg; - MACROBLOCK *const x = args->x; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - struct macroblock_plane *const p = &x->plane[plane]; - struct macroblockd_plane *const pd = &xd->plane[plane]; - tran_low_t *coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); - tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV; - TX_TYPE tx_type = get_tx_type(plane_type, xd, block); - const scan_order *const scan_order = get_scan(tx_size, tx_type); - PREDICTION_MODE mode; - const int bwl = b_width_log2_lookup[plane_bsize]; - const int bhl = b_height_log2_lookup[plane_bsize]; - const int diff_stride = 4 * (1 << bwl); - uint8_t *src, *dst; - int16_t *src_diff; - uint16_t *eob = &p->eobs[block]; - const int src_stride = p->src.stride; - const int dst_stride = pd->dst.stride; - dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)]; - src = &p->src.buf[4 * (blk_row * src_stride + blk_col)]; - src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)]; - - mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode; - vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, - dst, dst_stride, blk_col, blk_row, plane); - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { - case TX_32X32: - if (!x->skip_recode) { - vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); - highbd_fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, - diff_stride, tx_type); - vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, - p->round, p->quant, p->quant_shift, - qcoeff, dqcoeff, pd->dequant, eob, - scan_order->scan, scan_order->iscan); - } - if (*eob) - vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, xd->bd, - tx_type); - break; - case TX_16X16: - if (!x->skip_recode) { - vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); - highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type); - vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - } - if (*eob) - vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd, - tx_type); - break; - case TX_8X8: - if (!x->skip_recode) { - vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); - highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type); - vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - } - if (*eob) - vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd, - tx_type); - break; - case TX_4X4: - if (!x->skip_recode) { - vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); - vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, - xd->lossless[mbmi->segment_id]); - vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - } - - if (*eob) - // this is like vp10_short_idct4x4 but has a special case around - // eob<=1 which is significant (not just an optimization) for the - // lossless case. - vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd, - tx_type, xd->lossless[mbmi->segment_id]); - break; - default: - assert(0); - return; - } - if (*eob) - *(args->skip) = 0; - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - switch (tx_size) { - case TX_32X32: - if (!x->skip_recode) { - vpx_subtract_block(32, 32, src_diff, diff_stride, - src, src_stride, dst, dst_stride); - fwd_txfm_32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride, - tx_type); - vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - } - if (*eob) - vp10_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type); - break; - case TX_16X16: - if (!x->skip_recode) { - vpx_subtract_block(16, 16, src_diff, diff_stride, - src, src_stride, dst, dst_stride); - fwd_txfm_16x16(src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - } - if (*eob) - vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type); - break; - case TX_8X8: - if (!x->skip_recode) { - vpx_subtract_block(8, 8, src_diff, diff_stride, - src, src_stride, dst, dst_stride); - fwd_txfm_8x8(src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, - p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - } - if (*eob) - vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type); - break; - case TX_4X4: - if (!x->skip_recode) { - vpx_subtract_block(4, 4, src_diff, diff_stride, - src, src_stride, dst, dst_stride); - vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type, - xd->lossless[mbmi->segment_id]); - vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, - p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); - } - - if (*eob) { - // this is like vp10_short_idct4x4 but has a special case around eob<=1 - // which is significant (not just an optimization) for the lossless - // case. - vp10_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type, - xd->lossless[mbmi->segment_id]); - } - break; - default: - assert(0); - break; - } - if (*eob) - *(args->skip) = 0; -} - -void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { - const MACROBLOCKD *const xd = &x->e_mbd; - struct encode_b_args arg = {x, NULL, &xd->mi[0]->mbmi.skip}; - - vp10_foreach_transformed_block_in_plane(xd, bsize, plane, - vp10_encode_block_intra, &arg); -} diff --git a/libs/libvpx/vp10/encoder/encodemb.h b/libs/libvpx/vp10/encoder/encodemb.h deleted file mode 100644 index 2e6516e0b0..0000000000 --- a/libs/libvpx/vp10/encoder/encodemb.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_ENCODEMB_H_ -#define VP10_ENCODER_ENCODEMB_H_ - -#include "./vpx_config.h" -#include "vp10/encoder/block.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct encode_b_args { - MACROBLOCK *x; - struct optimize_ctx *ctx; - int8_t *skip; -}; -void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size); -void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size); -void vp10_xform_quant(MACROBLOCK *x, int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size); - -void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); - -void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg); - -void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); - -void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type, int lossless); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff, - int diff_stride, TX_TYPE tx_type, int lossless); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_ENCODEMB_H_ diff --git a/libs/libvpx/vp10/encoder/encodemv.c b/libs/libvpx/vp10/encoder/encodemv.c deleted file mode 100644 index 0736c65b3f..0000000000 --- a/libs/libvpx/vp10/encoder/encodemv.c +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/common.h" -#include "vp10/common/entropymode.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/subexp.h" - -#include "vpx_dsp/vpx_dsp_common.h" - -static struct vp10_token mv_joint_encodings[MV_JOINTS]; -static struct vp10_token mv_class_encodings[MV_CLASSES]; -static struct vp10_token mv_fp_encodings[MV_FP_SIZE]; -static struct vp10_token mv_class0_encodings[CLASS0_SIZE]; - -void vp10_entropy_mv_init(void) { - vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree); - vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree); - vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree); - vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree); -} - -static void encode_mv_component(vpx_writer* w, int comp, - const nmv_component* mvcomp, int usehp) { - int offset; - const int sign = comp < 0; - const int mag = sign ? -comp : comp; - const int mv_class = vp10_get_mv_class(mag - 1, &offset); - const int d = offset >> 3; // int mv data - const int fr = (offset >> 1) & 3; // fractional mv data - const int hp = offset & 1; // high precision mv data - - assert(comp != 0); - - // Sign - vpx_write(w, sign, mvcomp->sign); - - // Class - vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes, - &mv_class_encodings[mv_class]); - - // Integer bits - if (mv_class == MV_CLASS_0) { - vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0, - &mv_class0_encodings[d]); - } else { - int i; - const int n = mv_class + CLASS0_BITS - 1; // number of bits - for (i = 0; i < n; ++i) - vpx_write(w, (d >> i) & 1, mvcomp->bits[i]); - } - - // Fractional bits - vp10_write_token(w, vp10_mv_fp_tree, - mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, - &mv_fp_encodings[fr]); - - // High precision bit - if (usehp) - vpx_write(w, hp, - mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp); -} - - -static void build_nmv_component_cost_table(int *mvcost, - const nmv_component* const mvcomp, - int usehp) { - int i, v; - int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE]; - int bits_cost[MV_OFFSET_BITS][2]; - int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE]; - int class0_hp_cost[2], hp_cost[2]; - - sign_cost[0] = vp10_cost_zero(mvcomp->sign); - sign_cost[1] = vp10_cost_one(mvcomp->sign); - vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree); - vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree); - for (i = 0; i < MV_OFFSET_BITS; ++i) { - bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]); - bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]); - } - - for (i = 0; i < CLASS0_SIZE; ++i) - vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree); - vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree); - - if (usehp) { - class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp); - class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp); - hp_cost[0] = vp10_cost_zero(mvcomp->hp); - hp_cost[1] = vp10_cost_one(mvcomp->hp); - } - mvcost[0] = 0; - for (v = 1; v <= MV_MAX; ++v) { - int z, c, o, d, e, f, cost = 0; - z = v - 1; - c = vp10_get_mv_class(z, &o); - cost += class_cost[c]; - d = (o >> 3); /* int mv data */ - f = (o >> 1) & 3; /* fractional pel mv data */ - e = (o & 1); /* high precision mv data */ - if (c == MV_CLASS_0) { - cost += class0_cost[d]; - } else { - int i, b; - b = c + CLASS0_BITS - 1; /* number of bits */ - for (i = 0; i < b; ++i) - cost += bits_cost[i][((d >> i) & 1)]; - } - if (c == MV_CLASS_0) { - cost += class0_fp_cost[d][f]; - } else { - cost += fp_cost[f]; - } - if (usehp) { - if (c == MV_CLASS_0) { - cost += class0_hp_cost[e]; - } else { - cost += hp_cost[e]; - } - } - mvcost[v] = cost + sign_cost[0]; - mvcost[-v] = cost + sign_cost[1]; - } -} - -static void update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p, - vpx_prob upd_p) { -#if CONFIG_MISC_FIXES - (void) upd_p; - vp10_cond_prob_diff_update(w, cur_p, ct); -#else - const vpx_prob new_p = get_binary_prob(ct[0], ct[1]) | 1; - const int update = cost_branch256(ct, *cur_p) + vp10_cost_zero(upd_p) > - cost_branch256(ct, new_p) + vp10_cost_one(upd_p) + 7 * 256; - vpx_write(w, update, upd_p); - if (update) { - *cur_p = new_p; - vpx_write_literal(w, new_p >> 1, 7); - } -#endif -} - -static void write_mv_update(const vpx_tree_index *tree, - vpx_prob probs[/*n - 1*/], - const unsigned int counts[/*n - 1*/], - int n, vpx_writer *w) { - int i; - unsigned int branch_ct[32][2]; - - // Assuming max number of probabilities <= 32 - assert(n <= 32); - - vp10_tree_probs_from_distribution(tree, branch_ct, counts); - for (i = 0; i < n - 1; ++i) - update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB); -} - -void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w, - nmv_context_counts *const counts) { - int i, j; - nmv_context *const mvc = &cm->fc->nmvc; - - write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w); - - for (i = 0; i < 2; ++i) { - nmv_component *comp = &mvc->comps[i]; - nmv_component_counts *comp_counts = &counts->comps[i]; - - update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB); - write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes, - MV_CLASSES, w); - write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0, - CLASS0_SIZE, w); - for (j = 0; j < MV_OFFSET_BITS; ++j) - update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB); - } - - for (i = 0; i < 2; ++i) { - for (j = 0; j < CLASS0_SIZE; ++j) - write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j], - counts->comps[i].class0_fp[j], MV_FP_SIZE, w); - - write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp, - MV_FP_SIZE, w); - } - - if (usehp) { - for (i = 0; i < 2; ++i) { - update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp, - MV_UPDATE_PROB); - update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB); - } - } -} - -void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w, - const MV* mv, const MV* ref, - const nmv_context* mvctx, int usehp) { - const MV diff = {mv->row - ref->row, - mv->col - ref->col}; - const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff); - usehp = usehp && vp10_use_mv_hp(ref); - - vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints, &mv_joint_encodings[j]); - if (mv_joint_vertical(j)) - encode_mv_component(w, diff.row, &mvctx->comps[0], usehp); - - if (mv_joint_horizontal(j)) - encode_mv_component(w, diff.col, &mvctx->comps[1], usehp); - - // If auto_mv_step_size is enabled then keep track of the largest - // motion vector component used. - if (cpi->sf.mv.auto_mv_step_size) { - unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3; - cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude); - } -} - -void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2], - const nmv_context* ctx, int usehp) { - vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree); - build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp); - build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp); -} - -static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext, - const int_mv mvs[2], - nmv_context_counts *counts) { - int i; - - for (i = 0; i < 1 + has_second_ref(mbmi); ++i) { - const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv; - const MV diff = {mvs[i].as_mv.row - ref->row, - mvs[i].as_mv.col - ref->col}; - vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref)); - } -} - -void vp10_update_mv_count(ThreadData *td) { - const MACROBLOCKD *xd = &td->mb.e_mbd; - const MODE_INFO *mi = xd->mi[0]; - const MB_MODE_INFO *const mbmi = &mi->mbmi; - const MB_MODE_INFO_EXT *mbmi_ext = td->mb.mbmi_ext; - - if (mbmi->sb_type < BLOCK_8X8) { - const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type]; - const int num_4x4_h = num_4x4_blocks_high_lookup[mbmi->sb_type]; - int idx, idy; - - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int i = idy * 2 + idx; - if (mi->bmi[i].as_mode == NEWMV) - inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv); - } - } - } else { - if (mbmi->mode == NEWMV) - inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv); - } -} - diff --git a/libs/libvpx/vp10/encoder/encodemv.h b/libs/libvpx/vp10/encoder/encodemv.h deleted file mode 100644 index 006f6d7c71..0000000000 --- a/libs/libvpx/vp10/encoder/encodemv.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_ENCODEMV_H_ -#define VP10_ENCODER_ENCODEMV_H_ - -#include "vp10/encoder/encoder.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_entropy_mv_init(void); - -void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w, - nmv_context_counts *const counts); - -void vp10_encode_mv(VP10_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref, - const nmv_context* mvctx, int usehp); - -void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2], - const nmv_context* mvctx, int usehp); - -void vp10_update_mv_count(ThreadData *td); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_ENCODEMV_H_ diff --git a/libs/libvpx/vp10/encoder/encoder.c b/libs/libvpx/vp10/encoder/encoder.c deleted file mode 100644 index d3a4dc12e4..0000000000 --- a/libs/libvpx/vp10/encoder/encoder.c +++ /dev/null @@ -1,4479 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vpx_config.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/filter.h" -#include "vp10/common/idct.h" -#if CONFIG_VP9_POSTPROC -#include "vp10/common/postproc.h" -#endif -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/tile_common.h" - -#include "vp10/encoder/aq_complexity.h" -#include "vp10/encoder/aq_cyclicrefresh.h" -#include "vp10/encoder/aq_variance.h" -#include "vp10/encoder/bitstream.h" -#include "vp10/encoder/context_tree.h" -#include "vp10/encoder/encodeframe.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/ethread.h" -#include "vp10/encoder/firstpass.h" -#include "vp10/encoder/mbgraph.h" -#include "vp10/encoder/picklpf.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/resize.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/skin_detection.h" -#include "vp10/encoder/speed_features.h" -#include "vp10/encoder/temporal_filter.h" - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "./vpx_scale_rtcd.h" -#include "vpx/internal/vpx_psnr.h" -#if CONFIG_INTERNAL_STATS -#include "vpx_dsp/ssim.h" -#endif -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_dsp/vpx_filter.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" -#include "vpx_ports/vpx_timer.h" -#include "vpx_scale/vpx_scale.h" - -#define AM_SEGMENT_ID_INACTIVE 7 -#define AM_SEGMENT_ID_ACTIVE 0 - -#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */ - -#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv - // for altref computation. -#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision - // mv. Choose a very high value for - // now so that HIGH_PRECISION is always - // chosen. -// #define OUTPUT_YUV_REC - -#ifdef OUTPUT_YUV_DENOISED -FILE *yuv_denoised_file = NULL; -#endif -#ifdef OUTPUT_YUV_SKINMAP -FILE *yuv_skinmap_file = NULL; -#endif -#ifdef OUTPUT_YUV_REC -FILE *yuv_rec_file; -#endif - -#if 0 -FILE *framepsnr; -FILE *kf_list; -FILE *keyfile; -#endif - -static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) { - switch (mode) { - case NORMAL: - *hr = 1; - *hs = 1; - break; - case FOURFIVE: - *hr = 4; - *hs = 5; - break; - case THREEFIVE: - *hr = 3; - *hs = 5; - break; - case ONETWO: - *hr = 1; - *hs = 2; - break; - default: - *hr = 1; - *hs = 1; - assert(0); - break; - } -} - -// Mark all inactive blocks as active. Other segmentation features may be set -// so memset cannot be used, instead only inactive blocks should be reset. -static void suppress_active_map(VP10_COMP *cpi) { - unsigned char *const seg_map = cpi->segmentation_map; - int i; - if (cpi->active_map.enabled || cpi->active_map.update) - for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i) - if (seg_map[i] == AM_SEGMENT_ID_INACTIVE) - seg_map[i] = AM_SEGMENT_ID_ACTIVE; -} - -static void apply_active_map(VP10_COMP *cpi) { - struct segmentation *const seg = &cpi->common.seg; - unsigned char *const seg_map = cpi->segmentation_map; - const unsigned char *const active_map = cpi->active_map.map; - int i; - - assert(AM_SEGMENT_ID_ACTIVE == CR_SEGMENT_ID_BASE); - - if (frame_is_intra_only(&cpi->common)) { - cpi->active_map.enabled = 0; - cpi->active_map.update = 1; - } - - if (cpi->active_map.update) { - if (cpi->active_map.enabled) { - for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i) - if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i]; - vp10_enable_segmentation(seg); - vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP); - vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); - // Setting the data to -MAX_LOOP_FILTER will result in the computed loop - // filter level being zero regardless of the value of seg->abs_delta. - vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, - SEG_LVL_ALT_LF, -MAX_LOOP_FILTER); - } else { - vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP); - vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); - if (seg->enabled) { - seg->update_data = 1; - seg->update_map = 1; - } - } - cpi->active_map.update = 0; - } -} - -int vp10_set_active_map(VP10_COMP* cpi, - unsigned char* new_map_16x16, - int rows, - int cols) { - if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) { - unsigned char *const active_map_8x8 = cpi->active_map.map; - const int mi_rows = cpi->common.mi_rows; - const int mi_cols = cpi->common.mi_cols; - cpi->active_map.update = 1; - if (new_map_16x16) { - int r, c; - for (r = 0; r < mi_rows; ++r) { - for (c = 0; c < mi_cols; ++c) { - active_map_8x8[r * mi_cols + c] = - new_map_16x16[(r >> 1) * cols + (c >> 1)] - ? AM_SEGMENT_ID_ACTIVE - : AM_SEGMENT_ID_INACTIVE; - } - } - cpi->active_map.enabled = 1; - } else { - cpi->active_map.enabled = 0; - } - return 0; - } else { - return -1; - } -} - -int vp10_get_active_map(VP10_COMP* cpi, - unsigned char* new_map_16x16, - int rows, - int cols) { - if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols && - new_map_16x16) { - unsigned char* const seg_map_8x8 = cpi->segmentation_map; - const int mi_rows = cpi->common.mi_rows; - const int mi_cols = cpi->common.mi_cols; - memset(new_map_16x16, !cpi->active_map.enabled, rows * cols); - if (cpi->active_map.enabled) { - int r, c; - for (r = 0; r < mi_rows; ++r) { - for (c = 0; c < mi_cols; ++c) { - // Cyclic refresh segments are considered active despite not having - // AM_SEGMENT_ID_ACTIVE - new_map_16x16[(r >> 1) * cols + (c >> 1)] |= - seg_map_8x8[r * mi_cols + c] != AM_SEGMENT_ID_INACTIVE; - } - } - } - return 0; - } else { - return -1; - } -} - -void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) { - MACROBLOCK *const mb = &cpi->td.mb; - cpi->common.allow_high_precision_mv = allow_high_precision_mv; - if (cpi->common.allow_high_precision_mv) { - mb->mvcost = mb->nmvcost_hp; - mb->mvsadcost = mb->nmvsadcost_hp; - } else { - mb->mvcost = mb->nmvcost; - mb->mvsadcost = mb->nmvsadcost; - } -} - -static void setup_frame(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - // Set up entropy context depending on frame type. The decoder mandates - // the use of the default context, index 0, for keyframes and inter - // frames where the error_resilient_mode or intra_only flag is set. For - // other inter-frames the encoder currently uses only two contexts; - // context 1 for ALTREF frames and context 0 for the others. - if (frame_is_intra_only(cm) || cm->error_resilient_mode) { - vp10_setup_past_independence(cm); - } else { - cm->frame_context_idx = cpi->refresh_alt_ref_frame; - } - - if (cm->frame_type == KEY_FRAME) { - cpi->refresh_golden_frame = 1; - cpi->refresh_alt_ref_frame = 1; - vp10_zero(cpi->interp_filter_selected); - } else { - *cm->fc = cm->frame_contexts[cm->frame_context_idx]; - vp10_zero(cpi->interp_filter_selected[0]); - } -} - -static void vp10_enc_setup_mi(VP10_COMMON *cm) { - int i; - cm->mi = cm->mip + cm->mi_stride + 1; - memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip)); - cm->prev_mi = cm->prev_mip + cm->mi_stride + 1; - // Clear top border row - memset(cm->prev_mip, 0, sizeof(*cm->prev_mip) * cm->mi_stride); - // Clear left border column - for (i = 1; i < cm->mi_rows + 1; ++i) - memset(&cm->prev_mip[i * cm->mi_stride], 0, sizeof(*cm->prev_mip)); - - cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1; - - memset(cm->mi_grid_base, 0, - cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base)); -} - -static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) { - cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); - if (!cm->mip) - return 1; - cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip)); - if (!cm->prev_mip) - return 1; - cm->mi_alloc_size = mi_size; - - cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->mi_grid_base) - return 1; - cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->prev_mi_grid_base) - return 1; - - return 0; -} - -static void vp10_enc_free_mi(VP10_COMMON *cm) { - vpx_free(cm->mip); - cm->mip = NULL; - vpx_free(cm->prev_mip); - cm->prev_mip = NULL; - vpx_free(cm->mi_grid_base); - cm->mi_grid_base = NULL; - vpx_free(cm->prev_mi_grid_base); - cm->prev_mi_grid_base = NULL; -} - -static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) { - // Current mip will be the prev_mip for the next frame. - MODE_INFO **temp_base = cm->prev_mi_grid_base; - MODE_INFO *temp = cm->prev_mip; - cm->prev_mip = cm->mip; - cm->mip = temp; - - // Update the upper left visible macroblock ptrs. - cm->mi = cm->mip + cm->mi_stride + 1; - cm->prev_mi = cm->prev_mip + cm->mi_stride + 1; - - cm->prev_mi_grid_base = cm->mi_grid_base; - cm->mi_grid_base = temp_base; - cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1; - cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1; -} - -void vp10_initialize_enc(void) { - static volatile int init_done = 0; - - if (!init_done) { - vp10_rtcd(); - vpx_dsp_rtcd(); - vpx_scale_rtcd(); - vp10_init_intra_predictors(); - vp10_init_me_luts(); - vp10_rc_init_minq_luts(); - vp10_entropy_mv_init(); - vp10_temporal_filter_init(); - vp10_encode_token_init(); - init_done = 1; - } -} - -static void dealloc_compressor_data(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - - vpx_free(cpi->mbmi_ext_base); - cpi->mbmi_ext_base = NULL; - - vpx_free(cpi->tile_data); - cpi->tile_data = NULL; - - // Delete sementation map - vpx_free(cpi->segmentation_map); - cpi->segmentation_map = NULL; - vpx_free(cpi->coding_context.last_frame_seg_map_copy); - cpi->coding_context.last_frame_seg_map_copy = NULL; - - vpx_free(cpi->nmvcosts[0]); - vpx_free(cpi->nmvcosts[1]); - cpi->nmvcosts[0] = NULL; - cpi->nmvcosts[1] = NULL; - - vpx_free(cpi->nmvcosts_hp[0]); - vpx_free(cpi->nmvcosts_hp[1]); - cpi->nmvcosts_hp[0] = NULL; - cpi->nmvcosts_hp[1] = NULL; - - vpx_free(cpi->nmvsadcosts[0]); - vpx_free(cpi->nmvsadcosts[1]); - cpi->nmvsadcosts[0] = NULL; - cpi->nmvsadcosts[1] = NULL; - - vpx_free(cpi->nmvsadcosts_hp[0]); - vpx_free(cpi->nmvsadcosts_hp[1]); - cpi->nmvsadcosts_hp[0] = NULL; - cpi->nmvsadcosts_hp[1] = NULL; - - vp10_cyclic_refresh_free(cpi->cyclic_refresh); - cpi->cyclic_refresh = NULL; - - vpx_free(cpi->active_map.map); - cpi->active_map.map = NULL; - - vp10_free_ref_frame_buffers(cm->buffer_pool); -#if CONFIG_VP9_POSTPROC - vp10_free_postproc_buffers(cm); -#endif - vp10_free_context_buffers(cm); - - vpx_free_frame_buffer(&cpi->last_frame_uf); - vpx_free_frame_buffer(&cpi->scaled_source); - vpx_free_frame_buffer(&cpi->scaled_last_source); - vpx_free_frame_buffer(&cpi->alt_ref_buffer); - vp10_lookahead_destroy(cpi->lookahead); - - vpx_free(cpi->tile_tok[0][0]); - cpi->tile_tok[0][0] = 0; - - vp10_free_pc_tree(&cpi->td); - - if (cpi->source_diff_var != NULL) { - vpx_free(cpi->source_diff_var); - cpi->source_diff_var = NULL; - } -} - -static void save_coding_context(VP10_COMP *cpi) { - CODING_CONTEXT *const cc = &cpi->coding_context; - VP10_COMMON *cm = &cpi->common; - - // Stores a snapshot of key state variables which can subsequently be - // restored with a call to vp10_restore_coding_context. These functions are - // intended for use in a re-code loop in vp10_compress_frame where the - // quantizer value is adjusted between loop iterations. - vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost); - - memcpy(cc->nmvcosts[0], cpi->nmvcosts[0], - MV_VALS * sizeof(*cpi->nmvcosts[0])); - memcpy(cc->nmvcosts[1], cpi->nmvcosts[1], - MV_VALS * sizeof(*cpi->nmvcosts[1])); - memcpy(cc->nmvcosts_hp[0], cpi->nmvcosts_hp[0], - MV_VALS * sizeof(*cpi->nmvcosts_hp[0])); - memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1], - MV_VALS * sizeof(*cpi->nmvcosts_hp[1])); - -#if !CONFIG_MISC_FIXES - vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs); -#endif - - memcpy(cpi->coding_context.last_frame_seg_map_copy, - cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols)); - - vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas); - vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas); - - cc->fc = *cm->fc; -} - -static void restore_coding_context(VP10_COMP *cpi) { - CODING_CONTEXT *const cc = &cpi->coding_context; - VP10_COMMON *cm = &cpi->common; - - // Restore key state variables to the snapshot state stored in the - // previous call to vp10_save_coding_context. - vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost); - - memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0])); - memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1])); - memcpy(cpi->nmvcosts_hp[0], cc->nmvcosts_hp[0], - MV_VALS * sizeof(*cc->nmvcosts_hp[0])); - memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1], - MV_VALS * sizeof(*cc->nmvcosts_hp[1])); - -#if !CONFIG_MISC_FIXES - vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs); -#endif - - memcpy(cm->last_frame_seg_map, - cpi->coding_context.last_frame_seg_map_copy, - (cm->mi_rows * cm->mi_cols)); - - vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas); - vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas); - - *cm->fc = cc->fc; -} - -static void configure_static_seg_features(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *const rc = &cpi->rc; - struct segmentation *const seg = &cm->seg; - - int high_q = (int)(rc->avg_q > 48.0); - int qi_delta; - - // Disable and clear down for KF - if (cm->frame_type == KEY_FRAME) { - // Clear down the global segmentation map - memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols); - seg->update_map = 0; - seg->update_data = 0; - cpi->static_mb_pct = 0; - - // Disable segmentation - vp10_disable_segmentation(seg); - - // Clear down the segment features. - vp10_clearall_segfeatures(seg); - } else if (cpi->refresh_alt_ref_frame) { - // If this is an alt ref frame - // Clear down the global segmentation map - memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols); - seg->update_map = 0; - seg->update_data = 0; - cpi->static_mb_pct = 0; - - // Disable segmentation and individual segment features by default - vp10_disable_segmentation(seg); - vp10_clearall_segfeatures(seg); - - // Scan frames from current to arf frame. - // This function re-enables segmentation if appropriate. - vp10_update_mbgraph_stats(cpi); - - // If segmentation was enabled set those features needed for the - // arf itself. - if (seg->enabled) { - seg->update_map = 1; - seg->update_data = 1; - - qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, - cm->bit_depth); - vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2); - vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2); - - vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q); - vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF); - - // Where relevant assume segment data is delta data - seg->abs_delta = SEGMENT_DELTADATA; - } - } else if (seg->enabled) { - // All other frames if segmentation has been enabled - - // First normal frame in a valid gf or alt ref group - if (rc->frames_since_golden == 0) { - // Set up segment features for normal frames in an arf group - if (rc->source_alt_ref_active) { - seg->update_map = 0; - seg->update_data = 1; - seg->abs_delta = SEGMENT_DELTADATA; - - qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, - cm->bit_depth); - vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2); - vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q); - - vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2); - vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF); - - // Segment coding disabled for compred testing - if (high_q || (cpi->static_mb_pct == 100)) { - vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME); - vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME); - vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP); - } - } else { - // Disable segmentation and clear down features if alt ref - // is not active for this group - - vp10_disable_segmentation(seg); - - memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols); - - seg->update_map = 0; - seg->update_data = 0; - - vp10_clearall_segfeatures(seg); - } - } else if (rc->is_src_frame_alt_ref) { - // Special case where we are coding over the top of a previous - // alt ref frame. - // Segment coding disabled for compred testing - - // Enable ref frame features for segment 0 as well - vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME); - vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME); - - // All mbs should use ALTREF_FRAME - vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME); - vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME); - vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME); - vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME); - - // Skip all MBs if high Q (0,0 mv and skip coeffs) - if (high_q) { - vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP); - vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP); - } - // Enable data update - seg->update_data = 1; - } else { - // All other frames. - - // No updates.. leave things as they are. - seg->update_map = 0; - seg->update_data = 0; - } - } -} - -static void update_reference_segmentation_map(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible; - uint8_t *cache_ptr = cm->last_frame_seg_map; - int row, col; - - for (row = 0; row < cm->mi_rows; row++) { - MODE_INFO **mi_8x8 = mi_8x8_ptr; - uint8_t *cache = cache_ptr; - for (col = 0; col < cm->mi_cols; col++, mi_8x8++, cache++) - cache[0] = mi_8x8[0]->mbmi.segment_id; - mi_8x8_ptr += cm->mi_stride; - cache_ptr += cm->mi_cols; - } -} - -static void alloc_raw_frame_buffers(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - const VP10EncoderConfig *oxcf = &cpi->oxcf; - - if (!cpi->lookahead) - cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - oxcf->lag_in_frames); - if (!cpi->lookahead) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate lag buffers"); - - // TODO(agrange) Check if ARF is enabled and skip allocation if not. - if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, - oxcf->width, oxcf->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate altref buffer"); -} - -static void alloc_util_frame_buffers(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate last frame buffer"); - - if (vpx_realloc_frame_buffer(&cpi->scaled_source, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate scaled source buffer"); - - if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate scaled last source buffer"); -} - - -static int alloc_context_buffers_ext(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - int mi_size = cm->mi_cols * cm->mi_rows; - - cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base)); - if (!cpi->mbmi_ext_base) - return 1; - - return 0; -} - -void vp10_alloc_compressor_data(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - - vp10_alloc_context_buffers(cm, cm->width, cm->height); - - alloc_context_buffers_ext(cpi); - - vpx_free(cpi->tile_tok[0][0]); - - { - unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols); - CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0], - vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0]))); - } - - vp10_setup_pc_tree(&cpi->common, &cpi->td); -} - -void vp10_new_framerate(VP10_COMP *cpi, double framerate) { - cpi->framerate = framerate < 0.1 ? 30 : framerate; - vp10_rc_update_framerate(cpi); -} - -static void set_tile_limits(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - - int min_log2_tile_cols, max_log2_tile_cols; - vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); - - cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns, - min_log2_tile_cols, max_log2_tile_cols); - cm->log2_tile_rows = cpi->oxcf.tile_rows; -} - -static void update_frame_size(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - - vp10_set_mb_mi(cm, cm->width, cm->height); - vp10_init_context_buffers(cm); - vp10_init_macroblockd(cm, xd, NULL); - memset(cpi->mbmi_ext_base, 0, - cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base)); - - set_tile_limits(cpi); -} - -static void init_buffer_indices(VP10_COMP *cpi) { - cpi->lst_fb_idx = 0; - cpi->gld_fb_idx = 1; - cpi->alt_fb_idx = 2; -} - -static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) { - VP10_COMMON *const cm = &cpi->common; - - cpi->oxcf = *oxcf; - cpi->framerate = oxcf->init_framerate; - - cm->profile = oxcf->profile; - cm->bit_depth = oxcf->bit_depth; -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth = oxcf->use_highbitdepth; -#endif - cm->color_space = oxcf->color_space; - cm->color_range = oxcf->color_range; - - cm->width = oxcf->width; - cm->height = oxcf->height; - vp10_alloc_compressor_data(cpi); - - // Single thread case: use counts in common. - cpi->td.counts = &cm->counts; - - // change includes all joint functionality - vp10_change_config(cpi, oxcf); - - cpi->static_mb_pct = 0; - cpi->ref_frame_flags = 0; - - init_buffer_indices(cpi); -} - -static void set_rc_buffer_sizes(RATE_CONTROL *rc, - const VP10EncoderConfig *oxcf) { - const int64_t bandwidth = oxcf->target_bandwidth; - const int64_t starting = oxcf->starting_buffer_level_ms; - const int64_t optimal = oxcf->optimal_buffer_level_ms; - const int64_t maximum = oxcf->maximum_buffer_size_ms; - - rc->starting_buffer_level = starting * bandwidth / 1000; - rc->optimal_buffer_level = (optimal == 0) ? bandwidth / 8 - : optimal * bandwidth / 1000; - rc->maximum_buffer_size = (maximum == 0) ? bandwidth / 8 - : maximum * bandwidth / 1000; -} - -#if CONFIG_VP9_HIGHBITDEPTH -#define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \ - cpi->fn_ptr[BT].sdf = SDF; \ - cpi->fn_ptr[BT].sdaf = SDAF; \ - cpi->fn_ptr[BT].vf = VF; \ - cpi->fn_ptr[BT].svf = SVF; \ - cpi->fn_ptr[BT].svaf = SVAF; \ - cpi->fn_ptr[BT].sdx3f = SDX3F; \ - cpi->fn_ptr[BT].sdx8f = SDX8F; \ - cpi->fn_ptr[BT].sdx4df = SDX4DF; - -#define MAKE_BFP_SAD_WRAPPER(fnname) \ -static unsigned int fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \ -} \ -static unsigned int fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \ -} \ -static unsigned int fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \ -} - -#define MAKE_BFP_SADAVG_WRAPPER(fnname) static unsigned int \ -fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \ -} \ -static unsigned int fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \ - second_pred) >> 2; \ -} \ -static unsigned int fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \ - second_pred) >> 4; \ -} - -#define MAKE_BFP_SAD3_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 3; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 3; i++) \ - sad_array[i] >>= 4; \ -} - -#define MAKE_BFP_SAD8_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 8; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 8; i++) \ - sad_array[i] >>= 4; \ -} -#define MAKE_BFP_SAD4D_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 4; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 4; i++) \ - sad_array[i] >>= 4; \ -} - -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x16x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x32) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x32_avg) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x32x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x32) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x32_avg) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x32x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x64) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x64_avg) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x64x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x32) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x32_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad32x32x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad32x32x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad32x32x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad64x64) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad64x64_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad64x64x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad64x64x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad64x64x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x16) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x16_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x16x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x16x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x16x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad16x8) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad16x8_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad16x8x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad16x8x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad16x8x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x16) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x16_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x16x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x16x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x16x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x8) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x8_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad8x8x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x8x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x8x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad8x4) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad8x4_avg) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad8x4x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad8x4x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x8) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x8_avg) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x8x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x8x4d) -MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad4x4) -MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad4x4_avg) -MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3) -MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8) -MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d) - -static void highbd_set_var_fns(VP10_COMP *const cpi) { - VP10_COMMON *const cm = &cpi->common; - if (cm->use_highbitdepth) { - switch (cm->bit_depth) { - case VPX_BITS_8: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits8, - vpx_highbd_sad32x16_avg_bits8, - vpx_highbd_8_variance32x16, - vpx_highbd_8_sub_pixel_variance32x16, - vpx_highbd_8_sub_pixel_avg_variance32x16, - NULL, - NULL, - vpx_highbd_sad32x16x4d_bits8) - - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits8, - vpx_highbd_sad16x32_avg_bits8, - vpx_highbd_8_variance16x32, - vpx_highbd_8_sub_pixel_variance16x32, - vpx_highbd_8_sub_pixel_avg_variance16x32, - NULL, - NULL, - vpx_highbd_sad16x32x4d_bits8) - - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits8, - vpx_highbd_sad64x32_avg_bits8, - vpx_highbd_8_variance64x32, - vpx_highbd_8_sub_pixel_variance64x32, - vpx_highbd_8_sub_pixel_avg_variance64x32, - NULL, - NULL, - vpx_highbd_sad64x32x4d_bits8) - - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits8, - vpx_highbd_sad32x64_avg_bits8, - vpx_highbd_8_variance32x64, - vpx_highbd_8_sub_pixel_variance32x64, - vpx_highbd_8_sub_pixel_avg_variance32x64, - NULL, - NULL, - vpx_highbd_sad32x64x4d_bits8) - - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits8, - vpx_highbd_sad32x32_avg_bits8, - vpx_highbd_8_variance32x32, - vpx_highbd_8_sub_pixel_variance32x32, - vpx_highbd_8_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits8, - vpx_highbd_sad32x32x8_bits8, - vpx_highbd_sad32x32x4d_bits8) - - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits8, - vpx_highbd_sad64x64_avg_bits8, - vpx_highbd_8_variance64x64, - vpx_highbd_8_sub_pixel_variance64x64, - vpx_highbd_8_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits8, - vpx_highbd_sad64x64x8_bits8, - vpx_highbd_sad64x64x4d_bits8) - - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits8, - vpx_highbd_sad16x16_avg_bits8, - vpx_highbd_8_variance16x16, - vpx_highbd_8_sub_pixel_variance16x16, - vpx_highbd_8_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits8, - vpx_highbd_sad16x16x8_bits8, - vpx_highbd_sad16x16x4d_bits8) - - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits8, - vpx_highbd_sad16x8_avg_bits8, - vpx_highbd_8_variance16x8, - vpx_highbd_8_sub_pixel_variance16x8, - vpx_highbd_8_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits8, - vpx_highbd_sad16x8x8_bits8, - vpx_highbd_sad16x8x4d_bits8) - - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits8, - vpx_highbd_sad8x16_avg_bits8, - vpx_highbd_8_variance8x16, - vpx_highbd_8_sub_pixel_variance8x16, - vpx_highbd_8_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits8, - vpx_highbd_sad8x16x8_bits8, - vpx_highbd_sad8x16x4d_bits8) - - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits8, - vpx_highbd_sad8x8_avg_bits8, - vpx_highbd_8_variance8x8, - vpx_highbd_8_sub_pixel_variance8x8, - vpx_highbd_8_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits8, - vpx_highbd_sad8x8x8_bits8, - vpx_highbd_sad8x8x4d_bits8) - - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits8, - vpx_highbd_sad8x4_avg_bits8, - vpx_highbd_8_variance8x4, - vpx_highbd_8_sub_pixel_variance8x4, - vpx_highbd_8_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits8, - vpx_highbd_sad8x4x4d_bits8) - - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits8, - vpx_highbd_sad4x8_avg_bits8, - vpx_highbd_8_variance4x8, - vpx_highbd_8_sub_pixel_variance4x8, - vpx_highbd_8_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits8, - vpx_highbd_sad4x8x4d_bits8) - - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits8, - vpx_highbd_sad4x4_avg_bits8, - vpx_highbd_8_variance4x4, - vpx_highbd_8_sub_pixel_variance4x4, - vpx_highbd_8_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits8, - vpx_highbd_sad4x4x8_bits8, - vpx_highbd_sad4x4x4d_bits8) - break; - - case VPX_BITS_10: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits10, - vpx_highbd_sad32x16_avg_bits10, - vpx_highbd_10_variance32x16, - vpx_highbd_10_sub_pixel_variance32x16, - vpx_highbd_10_sub_pixel_avg_variance32x16, - NULL, - NULL, - vpx_highbd_sad32x16x4d_bits10) - - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits10, - vpx_highbd_sad16x32_avg_bits10, - vpx_highbd_10_variance16x32, - vpx_highbd_10_sub_pixel_variance16x32, - vpx_highbd_10_sub_pixel_avg_variance16x32, - NULL, - NULL, - vpx_highbd_sad16x32x4d_bits10) - - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits10, - vpx_highbd_sad64x32_avg_bits10, - vpx_highbd_10_variance64x32, - vpx_highbd_10_sub_pixel_variance64x32, - vpx_highbd_10_sub_pixel_avg_variance64x32, - NULL, - NULL, - vpx_highbd_sad64x32x4d_bits10) - - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits10, - vpx_highbd_sad32x64_avg_bits10, - vpx_highbd_10_variance32x64, - vpx_highbd_10_sub_pixel_variance32x64, - vpx_highbd_10_sub_pixel_avg_variance32x64, - NULL, - NULL, - vpx_highbd_sad32x64x4d_bits10) - - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits10, - vpx_highbd_sad32x32_avg_bits10, - vpx_highbd_10_variance32x32, - vpx_highbd_10_sub_pixel_variance32x32, - vpx_highbd_10_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits10, - vpx_highbd_sad32x32x8_bits10, - vpx_highbd_sad32x32x4d_bits10) - - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits10, - vpx_highbd_sad64x64_avg_bits10, - vpx_highbd_10_variance64x64, - vpx_highbd_10_sub_pixel_variance64x64, - vpx_highbd_10_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits10, - vpx_highbd_sad64x64x8_bits10, - vpx_highbd_sad64x64x4d_bits10) - - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits10, - vpx_highbd_sad16x16_avg_bits10, - vpx_highbd_10_variance16x16, - vpx_highbd_10_sub_pixel_variance16x16, - vpx_highbd_10_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits10, - vpx_highbd_sad16x16x8_bits10, - vpx_highbd_sad16x16x4d_bits10) - - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits10, - vpx_highbd_sad16x8_avg_bits10, - vpx_highbd_10_variance16x8, - vpx_highbd_10_sub_pixel_variance16x8, - vpx_highbd_10_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits10, - vpx_highbd_sad16x8x8_bits10, - vpx_highbd_sad16x8x4d_bits10) - - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits10, - vpx_highbd_sad8x16_avg_bits10, - vpx_highbd_10_variance8x16, - vpx_highbd_10_sub_pixel_variance8x16, - vpx_highbd_10_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits10, - vpx_highbd_sad8x16x8_bits10, - vpx_highbd_sad8x16x4d_bits10) - - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits10, - vpx_highbd_sad8x8_avg_bits10, - vpx_highbd_10_variance8x8, - vpx_highbd_10_sub_pixel_variance8x8, - vpx_highbd_10_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits10, - vpx_highbd_sad8x8x8_bits10, - vpx_highbd_sad8x8x4d_bits10) - - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits10, - vpx_highbd_sad8x4_avg_bits10, - vpx_highbd_10_variance8x4, - vpx_highbd_10_sub_pixel_variance8x4, - vpx_highbd_10_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits10, - vpx_highbd_sad8x4x4d_bits10) - - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits10, - vpx_highbd_sad4x8_avg_bits10, - vpx_highbd_10_variance4x8, - vpx_highbd_10_sub_pixel_variance4x8, - vpx_highbd_10_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits10, - vpx_highbd_sad4x8x4d_bits10) - - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits10, - vpx_highbd_sad4x4_avg_bits10, - vpx_highbd_10_variance4x4, - vpx_highbd_10_sub_pixel_variance4x4, - vpx_highbd_10_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits10, - vpx_highbd_sad4x4x8_bits10, - vpx_highbd_sad4x4x4d_bits10) - break; - - case VPX_BITS_12: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits12, - vpx_highbd_sad32x16_avg_bits12, - vpx_highbd_12_variance32x16, - vpx_highbd_12_sub_pixel_variance32x16, - vpx_highbd_12_sub_pixel_avg_variance32x16, - NULL, - NULL, - vpx_highbd_sad32x16x4d_bits12) - - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits12, - vpx_highbd_sad16x32_avg_bits12, - vpx_highbd_12_variance16x32, - vpx_highbd_12_sub_pixel_variance16x32, - vpx_highbd_12_sub_pixel_avg_variance16x32, - NULL, - NULL, - vpx_highbd_sad16x32x4d_bits12) - - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits12, - vpx_highbd_sad64x32_avg_bits12, - vpx_highbd_12_variance64x32, - vpx_highbd_12_sub_pixel_variance64x32, - vpx_highbd_12_sub_pixel_avg_variance64x32, - NULL, - NULL, - vpx_highbd_sad64x32x4d_bits12) - - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits12, - vpx_highbd_sad32x64_avg_bits12, - vpx_highbd_12_variance32x64, - vpx_highbd_12_sub_pixel_variance32x64, - vpx_highbd_12_sub_pixel_avg_variance32x64, - NULL, - NULL, - vpx_highbd_sad32x64x4d_bits12) - - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits12, - vpx_highbd_sad32x32_avg_bits12, - vpx_highbd_12_variance32x32, - vpx_highbd_12_sub_pixel_variance32x32, - vpx_highbd_12_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits12, - vpx_highbd_sad32x32x8_bits12, - vpx_highbd_sad32x32x4d_bits12) - - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits12, - vpx_highbd_sad64x64_avg_bits12, - vpx_highbd_12_variance64x64, - vpx_highbd_12_sub_pixel_variance64x64, - vpx_highbd_12_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits12, - vpx_highbd_sad64x64x8_bits12, - vpx_highbd_sad64x64x4d_bits12) - - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits12, - vpx_highbd_sad16x16_avg_bits12, - vpx_highbd_12_variance16x16, - vpx_highbd_12_sub_pixel_variance16x16, - vpx_highbd_12_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits12, - vpx_highbd_sad16x16x8_bits12, - vpx_highbd_sad16x16x4d_bits12) - - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits12, - vpx_highbd_sad16x8_avg_bits12, - vpx_highbd_12_variance16x8, - vpx_highbd_12_sub_pixel_variance16x8, - vpx_highbd_12_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits12, - vpx_highbd_sad16x8x8_bits12, - vpx_highbd_sad16x8x4d_bits12) - - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits12, - vpx_highbd_sad8x16_avg_bits12, - vpx_highbd_12_variance8x16, - vpx_highbd_12_sub_pixel_variance8x16, - vpx_highbd_12_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits12, - vpx_highbd_sad8x16x8_bits12, - vpx_highbd_sad8x16x4d_bits12) - - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits12, - vpx_highbd_sad8x8_avg_bits12, - vpx_highbd_12_variance8x8, - vpx_highbd_12_sub_pixel_variance8x8, - vpx_highbd_12_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits12, - vpx_highbd_sad8x8x8_bits12, - vpx_highbd_sad8x8x4d_bits12) - - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits12, - vpx_highbd_sad8x4_avg_bits12, - vpx_highbd_12_variance8x4, - vpx_highbd_12_sub_pixel_variance8x4, - vpx_highbd_12_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits12, - vpx_highbd_sad8x4x4d_bits12) - - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits12, - vpx_highbd_sad4x8_avg_bits12, - vpx_highbd_12_variance4x8, - vpx_highbd_12_sub_pixel_variance4x8, - vpx_highbd_12_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits12, - vpx_highbd_sad4x8x4d_bits12) - - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits12, - vpx_highbd_sad4x4_avg_bits12, - vpx_highbd_12_variance4x4, - vpx_highbd_12_sub_pixel_variance4x4, - vpx_highbd_12_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits12, - vpx_highbd_sad4x4x8_bits12, - vpx_highbd_sad4x4x4d_bits12) - break; - - default: - assert(0 && "cm->bit_depth should be VPX_BITS_8, " - "VPX_BITS_10 or VPX_BITS_12"); - } - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void realloc_segmentation_maps(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - - // Create the encoder segmentation map and set all entries to 0 - vpx_free(cpi->segmentation_map); - CHECK_MEM_ERROR(cm, cpi->segmentation_map, - vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); - - // Create a map used for cyclic background refresh. - if (cpi->cyclic_refresh) - vp10_cyclic_refresh_free(cpi->cyclic_refresh); - CHECK_MEM_ERROR(cm, cpi->cyclic_refresh, - vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols)); - - // Create a map used to mark inactive areas. - vpx_free(cpi->active_map.map); - CHECK_MEM_ERROR(cm, cpi->active_map.map, - vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); - - // And a place holder structure is the coding context - // for use if we want to save and restore it - vpx_free(cpi->coding_context.last_frame_seg_map_copy); - CHECK_MEM_ERROR(cm, cpi->coding_context.last_frame_seg_map_copy, - vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); -} - -void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - - if (cm->profile != oxcf->profile) - cm->profile = oxcf->profile; - cm->bit_depth = oxcf->bit_depth; - cm->color_space = oxcf->color_space; - cm->color_range = oxcf->color_range; - - if (cm->profile <= PROFILE_1) - assert(cm->bit_depth == VPX_BITS_8); - else - assert(cm->bit_depth > VPX_BITS_8); - - cpi->oxcf = *oxcf; -#if CONFIG_VP9_HIGHBITDEPTH - cpi->td.mb.e_mbd.bd = (int)cm->bit_depth; -#endif // CONFIG_VP9_HIGHBITDEPTH - - if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) { - rc->baseline_gf_interval = FIXED_GF_INTERVAL; - } else { - rc->baseline_gf_interval = (MIN_GF_INTERVAL + MAX_GF_INTERVAL) / 2; - } - - cpi->refresh_golden_frame = 0; - cpi->refresh_last_frame = 1; - cm->refresh_frame_context = - oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF : - oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD - : REFRESH_FRAME_CONTEXT_BACKWARD; - cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE; - - vp10_reset_segment_features(cm); - vp10_set_high_precision_mv(cpi, 0); - - { - int i; - - for (i = 0; i < MAX_SEGMENTS; i++) - cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; - } - cpi->encode_breakout = cpi->oxcf.encode_breakout; - - set_rc_buffer_sizes(rc, &cpi->oxcf); - - // Under a configuration change, where maximum_buffer_size may change, - // keep buffer level clipped to the maximum allowed buffer size. - rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); - rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size); - - // Set up frame rate and related parameters rate control values. - vp10_new_framerate(cpi, cpi->framerate); - - // Set absolute upper and lower quality limits - rc->worst_quality = cpi->oxcf.worst_allowed_q; - rc->best_quality = cpi->oxcf.best_allowed_q; - - cm->interp_filter = cpi->sf.default_interp_filter; - - if (cpi->oxcf.render_width > 0 && cpi->oxcf.render_height > 0) { - cm->render_width = cpi->oxcf.render_width; - cm->render_height = cpi->oxcf.render_height; - } else { - cm->render_width = cpi->oxcf.width; - cm->render_height = cpi->oxcf.height; - } - cm->width = cpi->oxcf.width; - cm->height = cpi->oxcf.height; - - if (cpi->initial_width) { - if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) { - vp10_free_context_buffers(cm); - vp10_alloc_compressor_data(cpi); - realloc_segmentation_maps(cpi); - cpi->initial_width = cpi->initial_height = 0; - } - } - update_frame_size(cpi); - - cpi->alt_ref_source = NULL; - rc->is_src_frame_alt_ref = 0; - -#if 0 - // Experimental RD Code - cpi->frame_distortion = 0; - cpi->last_frame_distortion = 0; -#endif - - set_tile_limits(cpi); - - cpi->ext_refresh_frame_flags_pending = 0; - cpi->ext_refresh_frame_context_pending = 0; - -#if CONFIG_VP9_HIGHBITDEPTH - highbd_set_var_fns(cpi); -#endif -} - -#ifndef M_LOG2_E -#define M_LOG2_E 0.693147180559945309417 -#endif -#define log2f(x) (log (x) / (float) M_LOG2_E) - -static void cal_nmvjointsadcost(int *mvjointsadcost) { - mvjointsadcost[0] = 600; - mvjointsadcost[1] = 300; - mvjointsadcost[2] = 300; - mvjointsadcost[3] = 300; -} - -static void cal_nmvsadcosts(int *mvsadcost[2]) { - int i = 1; - - mvsadcost[0][0] = 0; - mvsadcost[1][0] = 0; - - do { - double z = 256 * (2 * (log2f(8 * i) + .6)); - mvsadcost[0][i] = (int)z; - mvsadcost[1][i] = (int)z; - mvsadcost[0][-i] = (int)z; - mvsadcost[1][-i] = (int)z; - } while (++i <= MV_MAX); -} - -static void cal_nmvsadcosts_hp(int *mvsadcost[2]) { - int i = 1; - - mvsadcost[0][0] = 0; - mvsadcost[1][0] = 0; - - do { - double z = 256 * (2 * (log2f(8 * i) + .6)); - mvsadcost[0][i] = (int)z; - mvsadcost[1][i] = (int)z; - mvsadcost[0][-i] = (int)z; - mvsadcost[1][-i] = (int)z; - } while (++i <= MV_MAX); -} - - -VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf, - BufferPool *const pool) { - unsigned int i; - VP10_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP10_COMP)); - VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL; - - if (!cm) - return NULL; - - vp10_zero(*cpi); - - if (setjmp(cm->error.jmp)) { - cm->error.setjmp = 0; - vp10_remove_compressor(cpi); - return 0; - } - - cm->error.setjmp = 1; - cm->alloc_mi = vp10_enc_alloc_mi; - cm->free_mi = vp10_enc_free_mi; - cm->setup_mi = vp10_enc_setup_mi; - - CHECK_MEM_ERROR(cm, cm->fc, - (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); - CHECK_MEM_ERROR(cm, cm->frame_contexts, - (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, - sizeof(*cm->frame_contexts))); - - cpi->resize_state = 0; - cpi->resize_avg_qp = 0; - cpi->resize_buffer_underflow = 0; - cpi->common.buffer_pool = pool; - - init_config(cpi, oxcf); - vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc); - - cm->current_video_frame = 0; - cpi->partition_search_skippable_frame = 0; - cpi->tile_data = NULL; - - realloc_segmentation_maps(cpi); - - CHECK_MEM_ERROR(cm, cpi->nmvcosts[0], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0]))); - CHECK_MEM_ERROR(cm, cpi->nmvcosts[1], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[1]))); - CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[0], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[0]))); - CHECK_MEM_ERROR(cm, cpi->nmvcosts_hp[1], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts_hp[1]))); - CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[0], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[0]))); - CHECK_MEM_ERROR(cm, cpi->nmvsadcosts[1], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts[1]))); - CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[0], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[0]))); - CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1], - vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1]))); - - for (i = 0; i < (sizeof(cpi->mbgraph_stats) / - sizeof(cpi->mbgraph_stats[0])); i++) { - CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats, - vpx_calloc(cm->MBs * - sizeof(*cpi->mbgraph_stats[i].mb_stats), 1)); - } - -#if CONFIG_FP_MB_STATS - cpi->use_fp_mb_stats = 0; - if (cpi->use_fp_mb_stats) { - // a place holder used to store the first pass mb stats in the first pass - CHECK_MEM_ERROR(cm, cpi->twopass.frame_mb_stats_buf, - vpx_calloc(cm->MBs * sizeof(uint8_t), 1)); - } else { - cpi->twopass.frame_mb_stats_buf = NULL; - } -#endif - - cpi->refresh_alt_ref_frame = 0; - cpi->multi_arf_last_grp_enabled = 0; - - cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS; -#if CONFIG_INTERNAL_STATS - cpi->b_calculate_ssimg = 0; - cpi->b_calculate_blockiness = 1; - cpi->b_calculate_consistency = 1; - cpi->total_inconsistency = 0; - cpi->psnr.worst = 100.0; - cpi->worst_ssim = 100.0; - - cpi->count = 0; - cpi->bytes = 0; - - if (cpi->b_calculate_psnr) { - cpi->total_sq_error = 0; - cpi->total_samples = 0; - - cpi->totalp_sq_error = 0; - cpi->totalp_samples = 0; - - cpi->tot_recode_hits = 0; - cpi->summed_quality = 0; - cpi->summed_weights = 0; - cpi->summedp_quality = 0; - cpi->summedp_weights = 0; - } - - if (cpi->b_calculate_ssimg) { - cpi->ssimg.worst= 100.0; - } - cpi->fastssim.worst = 100.0; - - cpi->psnrhvs.worst = 100.0; - - if (cpi->b_calculate_blockiness) { - cpi->total_blockiness = 0; - cpi->worst_blockiness = 0.0; - } - - if (cpi->b_calculate_consistency) { - cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) * - 4 * cpi->common.mi_rows * cpi->common.mi_cols); - cpi->worst_consistency = 100.0; - } - -#endif - - cpi->first_time_stamp_ever = INT64_MAX; - - cal_nmvjointsadcost(cpi->td.mb.nmvjointsadcost); - cpi->td.mb.nmvcost[0] = &cpi->nmvcosts[0][MV_MAX]; - cpi->td.mb.nmvcost[1] = &cpi->nmvcosts[1][MV_MAX]; - cpi->td.mb.nmvsadcost[0] = &cpi->nmvsadcosts[0][MV_MAX]; - cpi->td.mb.nmvsadcost[1] = &cpi->nmvsadcosts[1][MV_MAX]; - cal_nmvsadcosts(cpi->td.mb.nmvsadcost); - - cpi->td.mb.nmvcost_hp[0] = &cpi->nmvcosts_hp[0][MV_MAX]; - cpi->td.mb.nmvcost_hp[1] = &cpi->nmvcosts_hp[1][MV_MAX]; - cpi->td.mb.nmvsadcost_hp[0] = &cpi->nmvsadcosts_hp[0][MV_MAX]; - cpi->td.mb.nmvsadcost_hp[1] = &cpi->nmvsadcosts_hp[1][MV_MAX]; - cal_nmvsadcosts_hp(cpi->td.mb.nmvsadcost_hp); - -#if CONFIG_VP9_TEMPORAL_DENOISING -#ifdef OUTPUT_YUV_DENOISED - yuv_denoised_file = fopen("denoised.yuv", "ab"); -#endif -#endif -#ifdef OUTPUT_YUV_SKINMAP - yuv_skinmap_file = fopen("skinmap.yuv", "ab"); -#endif -#ifdef OUTPUT_YUV_REC - yuv_rec_file = fopen("rec.yuv", "wb"); -#endif - -#if 0 - framepsnr = fopen("framepsnr.stt", "a"); - kf_list = fopen("kf_list.stt", "w"); -#endif - - cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED; - - if (oxcf->pass == 1) { - vp10_init_first_pass(cpi); - } else if (oxcf->pass == 2) { - const size_t packet_sz = sizeof(FIRSTPASS_STATS); - const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - const size_t psz = cpi->common.MBs * sizeof(uint8_t); - const int ps = (int)(oxcf->firstpass_mb_stats_in.sz / psz); - - cpi->twopass.firstpass_mb_stats.mb_stats_start = - oxcf->firstpass_mb_stats_in.buf; - cpi->twopass.firstpass_mb_stats.mb_stats_end = - cpi->twopass.firstpass_mb_stats.mb_stats_start + - (ps - 1) * cpi->common.MBs * sizeof(uint8_t); - } -#endif - - cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf; - cpi->twopass.stats_in = cpi->twopass.stats_in_start; - cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1]; - - vp10_init_second_pass(cpi); - } - - vp10_set_speed_features_framesize_independent(cpi); - vp10_set_speed_features_framesize_dependent(cpi); - - // Allocate memory to store variances for a frame. - CHECK_MEM_ERROR(cm, cpi->source_diff_var, - vpx_calloc(cm->MBs, sizeof(diff))); - cpi->source_var_thresh = 0; - cpi->frames_till_next_var_check = 0; - -#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF)\ - cpi->fn_ptr[BT].sdf = SDF; \ - cpi->fn_ptr[BT].sdaf = SDAF; \ - cpi->fn_ptr[BT].vf = VF; \ - cpi->fn_ptr[BT].svf = SVF; \ - cpi->fn_ptr[BT].svaf = SVAF; \ - cpi->fn_ptr[BT].sdx3f = SDX3F; \ - cpi->fn_ptr[BT].sdx8f = SDX8F; \ - cpi->fn_ptr[BT].sdx4df = SDX4DF; - - BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, - vpx_variance32x16, vpx_sub_pixel_variance32x16, - vpx_sub_pixel_avg_variance32x16, NULL, NULL, vpx_sad32x16x4d) - - BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, - vpx_variance16x32, vpx_sub_pixel_variance16x32, - vpx_sub_pixel_avg_variance16x32, NULL, NULL, vpx_sad16x32x4d) - - BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, - vpx_variance64x32, vpx_sub_pixel_variance64x32, - vpx_sub_pixel_avg_variance64x32, NULL, NULL, vpx_sad64x32x4d) - - BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, - vpx_variance32x64, vpx_sub_pixel_variance32x64, - vpx_sub_pixel_avg_variance32x64, NULL, NULL, vpx_sad32x64x4d) - - BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, - vpx_variance32x32, vpx_sub_pixel_variance32x32, - vpx_sub_pixel_avg_variance32x32, vpx_sad32x32x3, vpx_sad32x32x8, - vpx_sad32x32x4d) - - BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, - vpx_variance64x64, vpx_sub_pixel_variance64x64, - vpx_sub_pixel_avg_variance64x64, vpx_sad64x64x3, vpx_sad64x64x8, - vpx_sad64x64x4d) - - BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, - vpx_variance16x16, vpx_sub_pixel_variance16x16, - vpx_sub_pixel_avg_variance16x16, vpx_sad16x16x3, vpx_sad16x16x8, - vpx_sad16x16x4d) - - BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, - vpx_variance16x8, vpx_sub_pixel_variance16x8, - vpx_sub_pixel_avg_variance16x8, - vpx_sad16x8x3, vpx_sad16x8x8, vpx_sad16x8x4d) - - BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, - vpx_variance8x16, vpx_sub_pixel_variance8x16, - vpx_sub_pixel_avg_variance8x16, - vpx_sad8x16x3, vpx_sad8x16x8, vpx_sad8x16x4d) - - BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, - vpx_variance8x8, vpx_sub_pixel_variance8x8, - vpx_sub_pixel_avg_variance8x8, - vpx_sad8x8x3, vpx_sad8x8x8, vpx_sad8x8x4d) - - BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, - vpx_variance8x4, vpx_sub_pixel_variance8x4, - vpx_sub_pixel_avg_variance8x4, NULL, vpx_sad8x4x8, vpx_sad8x4x4d) - - BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, - vpx_variance4x8, vpx_sub_pixel_variance4x8, - vpx_sub_pixel_avg_variance4x8, NULL, vpx_sad4x8x8, vpx_sad4x8x4d) - - BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, - vpx_variance4x4, vpx_sub_pixel_variance4x4, - vpx_sub_pixel_avg_variance4x4, - vpx_sad4x4x3, vpx_sad4x4x8, vpx_sad4x4x4d) - -#if CONFIG_VP9_HIGHBITDEPTH - highbd_set_var_fns(cpi); -#endif - - /* vp10_init_quantizer() is first called here. Add check in - * vp10_frame_init_quantizer() so that vp10_init_quantizer is only - * called later when needed. This will avoid unnecessary calls of - * vp10_init_quantizer() for every frame. - */ - vp10_init_quantizer(cpi); - - vp10_loop_filter_init(cm); - - cm->error.setjmp = 0; - - return cpi; -} -#define SNPRINT(H, T) \ - snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T)) - -#define SNPRINT2(H, T, V) \ - snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V)) - -void vp10_remove_compressor(VP10_COMP *cpi) { - VP10_COMMON *cm; - unsigned int i; - int t; - - if (!cpi) - return; - - cm = &cpi->common; - if (cm->current_video_frame > 0) { -#if CONFIG_INTERNAL_STATS - vpx_clear_system_state(); - - if (cpi->oxcf.pass != 1) { - char headings[512] = {0}; - char results[512] = {0}; - FILE *f = fopen("opsnr.stt", "a"); - double time_encoded = (cpi->last_end_time_stamp_seen - - cpi->first_time_stamp_ever) / 10000000.000; - double total_encode_time = (cpi->time_receive_data + - cpi->time_compress_data) / 1000.000; - const double dr = - (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded; - const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1); - - if (cpi->b_calculate_psnr) { - const double total_psnr = - vpx_sse_to_psnr((double)cpi->total_samples, peak, - (double)cpi->total_sq_error); - const double totalp_psnr = - vpx_sse_to_psnr((double)cpi->totalp_samples, peak, - (double)cpi->totalp_sq_error); - const double total_ssim = 100 * pow(cpi->summed_quality / - cpi->summed_weights, 8.0); - const double totalp_ssim = 100 * pow(cpi->summedp_quality / - cpi->summedp_weights, 8.0); - - snprintf(headings, sizeof(headings), - "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t" - "VPXSSIM\tVPSSIMP\tFASTSIM\tPSNRHVS\t" - "WstPsnr\tWstSsim\tWstFast\tWstHVS"); - snprintf(results, sizeof(results), - "%7.2f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t" - "%7.3f\t%7.3f\t%7.3f\t%7.3f\t" - "%7.3f\t%7.3f\t%7.3f\t%7.3f", - dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr, - cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, - total_ssim, totalp_ssim, - cpi->fastssim.stat[ALL] / cpi->count, - cpi->psnrhvs.stat[ALL] / cpi->count, - cpi->psnr.worst, cpi->worst_ssim, cpi->fastssim.worst, - cpi->psnrhvs.worst); - - if (cpi->b_calculate_blockiness) { - SNPRINT(headings, "\t Block\tWstBlck"); - SNPRINT2(results, "\t%7.3f", cpi->total_blockiness / cpi->count); - SNPRINT2(results, "\t%7.3f", cpi->worst_blockiness); - } - - if (cpi->b_calculate_consistency) { - double consistency = - vpx_sse_to_psnr((double)cpi->totalp_samples, peak, - (double)cpi->total_inconsistency); - - SNPRINT(headings, "\tConsist\tWstCons"); - SNPRINT2(results, "\t%7.3f", consistency); - SNPRINT2(results, "\t%7.3f", cpi->worst_consistency); - } - - if (cpi->b_calculate_ssimg) { - SNPRINT(headings, "\t SSIMG\tWtSSIMG"); - SNPRINT2(results, "\t%7.3f", cpi->ssimg.stat[ALL] / cpi->count); - SNPRINT2(results, "\t%7.3f", cpi->ssimg.worst); - } - - fprintf(f, "%s\t Time\n", headings); - fprintf(f, "%s\t%8.0f\n", results, total_encode_time); - } - - fclose(f); - } - -#endif - -#if 0 - { - printf("\n_pick_loop_filter_level:%d\n", cpi->time_pick_lpf / 1000); - printf("\n_frames recive_data encod_mb_row compress_frame Total\n"); - printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, - cpi->time_receive_data / 1000, cpi->time_encode_sb_row / 1000, - cpi->time_compress_data / 1000, - (cpi->time_receive_data + cpi->time_compress_data) / 1000); - } -#endif - } - -#if CONFIG_VP9_TEMPORAL_DENOISING - vp10_denoiser_free(&(cpi->denoiser)); -#endif - - for (t = 0; t < cpi->num_workers; ++t) { - VPxWorker *const worker = &cpi->workers[t]; - EncWorkerData *const thread_data = &cpi->tile_thr_data[t]; - - // Deallocate allocated threads. - vpx_get_worker_interface()->end(worker); - - // Deallocate allocated thread data. - if (t < cpi->num_workers - 1) { - vpx_free(thread_data->td->counts); - vp10_free_pc_tree(thread_data->td); - vpx_free(thread_data->td); - } - } - vpx_free(cpi->tile_thr_data); - vpx_free(cpi->workers); - - if (cpi->num_workers > 1) - vp10_loop_filter_dealloc(&cpi->lf_row_sync); - - dealloc_compressor_data(cpi); - - for (i = 0; i < sizeof(cpi->mbgraph_stats) / - sizeof(cpi->mbgraph_stats[0]); ++i) { - vpx_free(cpi->mbgraph_stats[i].mb_stats); - } - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - vpx_free(cpi->twopass.frame_mb_stats_buf); - cpi->twopass.frame_mb_stats_buf = NULL; - } -#endif - - vp10_remove_common(cm); - vp10_free_ref_frame_buffers(cm->buffer_pool); -#if CONFIG_VP9_POSTPROC - vp10_free_postproc_buffers(cm); -#endif - vpx_free(cpi); - -#if CONFIG_VP9_TEMPORAL_DENOISING -#ifdef OUTPUT_YUV_DENOISED - fclose(yuv_denoised_file); -#endif -#endif -#ifdef OUTPUT_YUV_SKINMAP - fclose(yuv_skinmap_file); -#endif -#ifdef OUTPUT_YUV_REC - fclose(yuv_rec_file); -#endif - -#if 0 - - if (keyfile) - fclose(keyfile); - - if (framepsnr) - fclose(framepsnr); - - if (kf_list) - fclose(kf_list); - -#endif -} - -/* TODO(yaowu): The block_variance calls the unoptimized versions of variance() - * and highbd_8_variance(). It should not. - */ -static void encoder_variance(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, unsigned int *sse, int *sum) { - int i, j; - - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - - a += a_stride; - b += b_stride; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void encoder_highbd_variance64(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint64_t *sse, - uint64_t *sum) { - int i, j; - - uint16_t *a = CONVERT_TO_SHORTPTR(a8); - uint16_t *b = CONVERT_TO_SHORTPTR(b8); - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - a += a_stride; - b += b_stride; - } -} - -static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, - unsigned int *sse, int *sum) { - uint64_t sse_long = 0; - uint64_t sum_long = 0; - encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, - &sse_long, &sum_long); - *sse = (unsigned int)sse_long; - *sum = (int)sum_long; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static int64_t get_sse(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int width, int height) { - const int dw = width % 16; - const int dh = height % 16; - int64_t total_sse = 0; - unsigned int sse = 0; - int sum = 0; - int x, y; - - if (dw > 0) { - encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, - dw, height, &sse, &sum); - total_sse += sse; - } - - if (dh > 0) { - encoder_variance(&a[(height - dh) * a_stride], a_stride, - &b[(height - dh) * b_stride], b_stride, - width - dw, dh, &sse, &sum); - total_sse += sse; - } - - for (y = 0; y < height / 16; ++y) { - const uint8_t *pa = a; - const uint8_t *pb = b; - for (x = 0; x < width / 16; ++x) { - vpx_mse16x16(pa, a_stride, pb, b_stride, &sse); - total_sse += sse; - - pa += 16; - pb += 16; - } - - a += 16 * a_stride; - b += 16 * b_stride; - } - - return total_sse; -} - -#if CONFIG_VP9_HIGHBITDEPTH -static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int width, int height, - unsigned int input_shift) { - const uint16_t *a = CONVERT_TO_SHORTPTR(a8); - const uint16_t *b = CONVERT_TO_SHORTPTR(b8); - int64_t total_sse = 0; - int x, y; - for (y = 0; y < height; ++y) { - for (x = 0; x < width; ++x) { - int64_t diff; - diff = (a[x] >> input_shift) - (b[x] >> input_shift); - total_sse += diff * diff; - } - a += a_stride; - b += b_stride; - } - return total_sse; -} - -static int64_t highbd_get_sse(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int width, int height) { - int64_t total_sse = 0; - int x, y; - const int dw = width % 16; - const int dh = height % 16; - unsigned int sse = 0; - int sum = 0; - if (dw > 0) { - encoder_highbd_8_variance(&a[width - dw], a_stride, - &b[width - dw], b_stride, - dw, height, &sse, &sum); - total_sse += sse; - } - if (dh > 0) { - encoder_highbd_8_variance(&a[(height - dh) * a_stride], a_stride, - &b[(height - dh) * b_stride], b_stride, - width - dw, dh, &sse, &sum); - total_sse += sse; - } - for (y = 0; y < height / 16; ++y) { - const uint8_t *pa = a; - const uint8_t *pb = b; - for (x = 0; x < width / 16; ++x) { - vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse); - total_sse += sse; - pa += 16; - pb += 16; - } - a += 16 * a_stride; - b += 16 * b_stride; - } - return total_sse; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -typedef struct { - double psnr[4]; // total/y/u/v - uint64_t sse[4]; // total/y/u/v - uint32_t samples[4]; // total/y/u/v -} PSNR_STATS; - -#if CONFIG_VP9_HIGHBITDEPTH -static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b, - PSNR_STATS *psnr, - unsigned int bit_depth, - unsigned int in_bit_depth) { - const int widths[3] = - {a->y_crop_width, a->uv_crop_width, a->uv_crop_width }; - const int heights[3] = - {a->y_crop_height, a->uv_crop_height, a->uv_crop_height}; - const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer }; - const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride}; - const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer }; - const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride}; - int i; - uint64_t total_sse = 0; - uint32_t total_samples = 0; - const double peak = (double)((1 << in_bit_depth) - 1); - const unsigned int input_shift = bit_depth - in_bit_depth; - - for (i = 0; i < 3; ++i) { - const int w = widths[i]; - const int h = heights[i]; - const uint32_t samples = w * h; - uint64_t sse; - if (a->flags & YV12_FLAG_HIGHBITDEPTH) { - if (input_shift) { - sse = highbd_get_sse_shift(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], w, h, - input_shift); - } else { - sse = highbd_get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], w, h); - } - } else { - sse = get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], - w, h); - } - psnr->sse[1 + i] = sse; - psnr->samples[1 + i] = samples; - psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); - - total_sse += sse; - total_samples += samples; - } - - psnr->sse[0] = total_sse; - psnr->samples[0] = total_samples; - psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak, - (double)total_sse); -} - -#else // !CONFIG_VP9_HIGHBITDEPTH - -static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, - PSNR_STATS *psnr) { - static const double peak = 255.0; - const int widths[3] = { - a->y_crop_width, a->uv_crop_width, a->uv_crop_width}; - const int heights[3] = { - a->y_crop_height, a->uv_crop_height, a->uv_crop_height}; - const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer}; - const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride}; - const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer}; - const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride}; - int i; - uint64_t total_sse = 0; - uint32_t total_samples = 0; - - for (i = 0; i < 3; ++i) { - const int w = widths[i]; - const int h = heights[i]; - const uint32_t samples = w * h; - const uint64_t sse = get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], - w, h); - psnr->sse[1 + i] = sse; - psnr->samples[1 + i] = samples; - psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); - - total_sse += sse; - total_samples += samples; - } - - psnr->sse[0] = total_sse; - psnr->samples[0] = total_samples; - psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak, - (double)total_sse); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static void generate_psnr_packet(VP10_COMP *cpi) { - struct vpx_codec_cx_pkt pkt; - int i; - PSNR_STATS psnr; -#if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr, - cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth); -#else - calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr); -#endif - - for (i = 0; i < 4; ++i) { - pkt.data.psnr.samples[i] = psnr.samples[i]; - pkt.data.psnr.sse[i] = psnr.sse[i]; - pkt.data.psnr.psnr[i] = psnr.psnr[i]; - } - pkt.kind = VPX_CODEC_PSNR_PKT; - vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); -} - -int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) { - if (ref_frame_flags > 7) - return -1; - - cpi->ref_frame_flags = ref_frame_flags; - return 0; -} - -void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) { - cpi->ext_refresh_golden_frame = (ref_frame_flags & VP9_GOLD_FLAG) != 0; - cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VP9_ALT_FLAG) != 0; - cpi->ext_refresh_last_frame = (ref_frame_flags & VP9_LAST_FLAG) != 0; - cpi->ext_refresh_frame_flags_pending = 1; -} - -static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(VP10_COMP *cpi, - VP9_REFFRAME ref_frame_flag) { - MV_REFERENCE_FRAME ref_frame = NONE; - if (ref_frame_flag == VP9_LAST_FLAG) - ref_frame = LAST_FRAME; - else if (ref_frame_flag == VP9_GOLD_FLAG) - ref_frame = GOLDEN_FRAME; - else if (ref_frame_flag == VP9_ALT_FLAG) - ref_frame = ALTREF_FRAME; - - return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame); -} - -int vp10_copy_reference_enc(VP10_COMP *cpi, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd) { - YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag); - if (cfg) { - vp8_yv12_copy_frame(cfg, sd); - return 0; - } else { - return -1; - } -} - -int vp10_set_reference_enc(VP10_COMP *cpi, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd) { - YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag); - if (cfg) { - vp8_yv12_copy_frame(sd, cfg); - return 0; - } else { - return -1; - } -} - -int vp10_update_entropy(VP10_COMP * cpi, int update) { - cpi->ext_refresh_frame_context = update; - cpi->ext_refresh_frame_context_pending = 1; - return 0; -} - -#if defined(OUTPUT_YUV_DENOISED) || defined(OUTPUT_YUV_SKINMAP) -// The denoiser buffer is allocated as a YUV 440 buffer. This function writes it -// as YUV 420. We simply use the top-left pixels of the UV buffers, since we do -// not denoise the UV channels at this time. If ever we implement UV channel -// denoising we will have to modify this. -void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) { - uint8_t *src = s->y_buffer; - int h = s->y_height; - - do { - fwrite(src, s->y_width, 1, f); - src += s->y_stride; - } while (--h); - - src = s->u_buffer; - h = s->uv_height; - - do { - fwrite(src, s->uv_width, 1, f); - src += s->uv_stride; - } while (--h); - - src = s->v_buffer; - h = s->uv_height; - - do { - fwrite(src, s->uv_width, 1, f); - src += s->uv_stride; - } while (--h); -} -#endif - -#ifdef OUTPUT_YUV_REC -void vp10_write_yuv_rec_frame(VP10_COMMON *cm) { - YV12_BUFFER_CONFIG *s = cm->frame_to_show; - uint8_t *src = s->y_buffer; - int h = cm->height; - -#if CONFIG_VP9_HIGHBITDEPTH - if (s->flags & YV12_FLAG_HIGHBITDEPTH) { - uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer); - - do { - fwrite(src16, s->y_width, 2, yuv_rec_file); - src16 += s->y_stride; - } while (--h); - - src16 = CONVERT_TO_SHORTPTR(s->u_buffer); - h = s->uv_height; - - do { - fwrite(src16, s->uv_width, 2, yuv_rec_file); - src16 += s->uv_stride; - } while (--h); - - src16 = CONVERT_TO_SHORTPTR(s->v_buffer); - h = s->uv_height; - - do { - fwrite(src16, s->uv_width, 2, yuv_rec_file); - src16 += s->uv_stride; - } while (--h); - - fflush(yuv_rec_file); - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - do { - fwrite(src, s->y_width, 1, yuv_rec_file); - src += s->y_stride; - } while (--h); - - src = s->u_buffer; - h = s->uv_height; - - do { - fwrite(src, s->uv_width, 1, yuv_rec_file); - src += s->uv_stride; - } while (--h); - - src = s->v_buffer; - h = s->uv_height; - - do { - fwrite(src, s->uv_width, 1, yuv_rec_file); - src += s->uv_stride; - } while (--h); - - fflush(yuv_rec_file); -} -#endif - -#if CONFIG_VP9_HIGHBITDEPTH -static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int bd) { -#else -static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst) { -#endif // CONFIG_VP9_HIGHBITDEPTH - // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t - int i; - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_crop_width, src->uv_crop_width, - src->uv_crop_width }; - const int src_heights[3] = {src->y_crop_height, src->uv_crop_height, - src->uv_crop_height}; - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - const int dst_widths[3] = {dst->y_crop_width, dst->uv_crop_width, - dst->uv_crop_width}; - const int dst_heights[3] = {dst->y_crop_height, dst->uv_crop_height, - dst->uv_crop_height}; - - for (i = 0; i < MAX_MB_PLANE; ++i) { -#if CONFIG_VP9_HIGHBITDEPTH - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i], - src_strides[i], dsts[i], dst_heights[i], - dst_widths[i], dst_strides[i], bd); - } else { - vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i], - dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]); - } -#else - vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i], - dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - vpx_extend_frame_borders(dst); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, int bd) { -#else -static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst) { -#endif // CONFIG_VP9_HIGHBITDEPTH - const int src_w = src->y_crop_width; - const int src_h = src->y_crop_height; - const int dst_w = dst->y_crop_width; - const int dst_h = dst->y_crop_height; - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - const InterpKernel *const kernel = vp10_filter_kernels[EIGHTTAP]; - int x, y, i; - - for (y = 0; y < dst_h; y += 16) { - for (x = 0; x < dst_w; x += 16) { - for (i = 0; i < MAX_MB_PLANE; ++i) { - const int factor = (i == 0 || i == 3 ? 1 : 2); - const int x_q4 = x * (16 / factor) * src_w / dst_w; - const int y_q4 = y * (16 / factor) * src_h / dst_h; - const int src_stride = src_strides[i]; - const int dst_stride = dst_strides[i]; - const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h * - src_stride + (x / factor) * src_w / dst_w; - uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor); - -#if CONFIG_VP9_HIGHBITDEPTH - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride, - kernel[x_q4 & 0xf], 16 * src_w / dst_w, - kernel[y_q4 & 0xf], 16 * src_h / dst_h, - 16 / factor, 16 / factor, bd); - } else { - vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride, - kernel[x_q4 & 0xf], 16 * src_w / dst_w, - kernel[y_q4 & 0xf], 16 * src_h / dst_h, - 16 / factor, 16 / factor); - } -#else - vpx_convolve8(src_ptr, src_stride, dst_ptr, dst_stride, - kernel[x_q4 & 0xf], 16 * src_w / dst_w, - kernel[y_q4 & 0xf], 16 * src_h / dst_h, - 16 / factor, 16 / factor); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - } - } - - vpx_extend_frame_borders(dst); -} - -static int scale_down(VP10_COMP *cpi, int q) { - RATE_CONTROL *const rc = &cpi->rc; - GF_GROUP *const gf_group = &cpi->twopass.gf_group; - int scale = 0; - assert(frame_is_kf_gf_arf(cpi)); - - if (rc->frame_size_selector == UNSCALED && - q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) { - const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1] - * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth)); - scale = rc->projected_frame_size > max_size_thresh ? 1 : 0; - } - return scale; -} - -// Function to test for conditions that indicate we should loop -// back and recode a frame. -static int recode_loop_test(VP10_COMP *cpi, - int high_limit, int low_limit, - int q, int maxq, int minq) { - const RATE_CONTROL *const rc = &cpi->rc; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi); - int force_recode = 0; - - if ((rc->projected_frame_size >= rc->max_frame_bandwidth) || - (cpi->sf.recode_loop == ALLOW_RECODE) || - (frame_is_kfgfarf && - (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) { - if (frame_is_kfgfarf && - (oxcf->resize_mode == RESIZE_DYNAMIC) && - scale_down(cpi, q)) { - // Code this group at a lower resolution. - cpi->resize_pending = 1; - return 1; - } - - // TODO(agrange) high_limit could be greater than the scale-down threshold. - if ((rc->projected_frame_size > high_limit && q < maxq) || - (rc->projected_frame_size < low_limit && q > minq)) { - force_recode = 1; - } else if (cpi->oxcf.rc_mode == VPX_CQ) { - // Deal with frame undershoot and whether or not we are - // below the automatically set cq level. - if (q > oxcf->cq_level && - rc->projected_frame_size < ((rc->this_frame_target * 7) >> 3)) { - force_recode = 1; - } - } - } - return force_recode; -} - -void vp10_update_reference_frames(VP10_COMP *cpi) { - VP10_COMMON * const cm = &cpi->common; - BufferPool *const pool = cm->buffer_pool; - - // At this point the new frame has been encoded. - // If any buffer copy / swapping is signaled it should be done here. - if (cm->frame_type == KEY_FRAME) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); - } else if (vp10_preserve_existing_gf(cpi)) { - // We have decided to preserve the previously existing golden frame as our - // new ARF frame. However, in the short term in function - // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if - // we're updating the GF with the current decoded frame, we save it to the - // ARF slot instead. - // We now have to update the ARF with the current frame and swap gld_fb_idx - // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF - // slot and, if we're updating the GF, the current frame becomes the new GF. - int tmp; - - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); - - tmp = cpi->alt_fb_idx; - cpi->alt_fb_idx = cpi->gld_fb_idx; - cpi->gld_fb_idx = tmp; - } else { /* For non key/golden frames */ - if (cpi->refresh_alt_ref_frame) { - int arf_idx = cpi->alt_fb_idx; - if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - arf_idx = gf_group->arf_update_idx[gf_group->index]; - } - - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[arf_idx], cm->new_fb_idx); - memcpy(cpi->interp_filter_selected[ALTREF_FRAME], - cpi->interp_filter_selected[0], - sizeof(cpi->interp_filter_selected[0])); - } - - if (cpi->refresh_golden_frame) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); - if (!cpi->rc.is_src_frame_alt_ref) - memcpy(cpi->interp_filter_selected[GOLDEN_FRAME], - cpi->interp_filter_selected[0], - sizeof(cpi->interp_filter_selected[0])); - else - memcpy(cpi->interp_filter_selected[GOLDEN_FRAME], - cpi->interp_filter_selected[ALTREF_FRAME], - sizeof(cpi->interp_filter_selected[ALTREF_FRAME])); - } - } - - if (cpi->refresh_last_frame) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx); - if (!cpi->rc.is_src_frame_alt_ref) - memcpy(cpi->interp_filter_selected[LAST_FRAME], - cpi->interp_filter_selected[0], - sizeof(cpi->interp_filter_selected[0])); - } -#if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { - vp10_denoiser_update_frame_info(&cpi->denoiser, - *cpi->Source, - cpi->common.frame_type, - cpi->refresh_alt_ref_frame, - cpi->refresh_golden_frame, - cpi->refresh_last_frame); - } -#endif -} - -static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) { - MACROBLOCKD *xd = &cpi->td.mb.e_mbd; - struct loopfilter *lf = &cm->lf; - if (is_lossless_requested(&cpi->oxcf)) { - lf->filter_level = 0; - } else { - struct vpx_usec_timer timer; - - vpx_clear_system_state(); - - vpx_usec_timer_start(&timer); - - vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick); - - vpx_usec_timer_mark(&timer); - cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer); - } - - if (lf->filter_level > 0) { - if (cpi->num_workers > 1) - vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane, - lf->filter_level, 0, 0, - cpi->workers, cpi->num_workers, - &cpi->lf_row_sync); - else - vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0); - } - - vpx_extend_frame_inner_borders(cm->frame_to_show); -} - -static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, - int buffer_idx) { - RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx]; - if (new_fb_ptr->mvs == NULL || - new_fb_ptr->mi_rows < cm->mi_rows || - new_fb_ptr->mi_cols < cm->mi_cols) { - vpx_free(new_fb_ptr->mvs); - new_fb_ptr->mvs = - (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, - sizeof(*new_fb_ptr->mvs)); - new_fb_ptr->mi_rows = cm->mi_rows; - new_fb_ptr->mi_cols = cm->mi_cols; - } -} - -void vp10_scale_references(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - MV_REFERENCE_FRAME ref_frame; - const VP9_REFFRAME ref_mask[3] = {VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG}; - - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - // Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1). - if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) { - BufferPool *const pool = cm->buffer_pool; - const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, - ref_frame); - - if (ref == NULL) { - cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX; - continue; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) { - RefCntBuffer *new_fb_ptr = NULL; - int force_scaling = 0; - int new_fb = cpi->scaled_ref_idx[ref_frame - 1]; - if (new_fb == INVALID_IDX) { - new_fb = get_free_fb(cm); - force_scaling = 1; - } - if (new_fb == INVALID_IDX) - return; - new_fb_ptr = &pool->frame_bufs[new_fb]; - if (force_scaling || - new_fb_ptr->buf.y_crop_width != cm->width || - new_fb_ptr->buf.y_crop_height != cm->height) { - vpx_realloc_frame_buffer(&new_fb_ptr->buf, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - cm->use_highbitdepth, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); - scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth); - cpi->scaled_ref_idx[ref_frame - 1] = new_fb; - alloc_frame_mvs(cm, new_fb); - } -#else - if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) { - RefCntBuffer *new_fb_ptr = NULL; - int force_scaling = 0; - int new_fb = cpi->scaled_ref_idx[ref_frame - 1]; - if (new_fb == INVALID_IDX) { - new_fb = get_free_fb(cm); - force_scaling = 1; - } - if (new_fb == INVALID_IDX) - return; - new_fb_ptr = &pool->frame_bufs[new_fb]; - if (force_scaling || - new_fb_ptr->buf.y_crop_width != cm->width || - new_fb_ptr->buf.y_crop_height != cm->height) { - vpx_realloc_frame_buffer(&new_fb_ptr->buf, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); - scale_and_extend_frame(ref, &new_fb_ptr->buf); - cpi->scaled_ref_idx[ref_frame - 1] = new_fb; - alloc_frame_mvs(cm, new_fb); - } -#endif // CONFIG_VP9_HIGHBITDEPTH - } else { - const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame); - RefCntBuffer *const buf = &pool->frame_bufs[buf_idx]; - buf->buf.y_crop_width = ref->y_crop_width; - buf->buf.y_crop_height = ref->y_crop_height; - cpi->scaled_ref_idx[ref_frame - 1] = buf_idx; - ++buf->ref_count; - } - } else { - if (cpi->oxcf.pass != 0) - cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX; - } - } -} - -static void release_scaled_references(VP10_COMP *cpi) { - VP10_COMMON *cm = &cpi->common; - int i; - if (cpi->oxcf.pass == 0) { - // Only release scaled references under certain conditions: - // if reference will be updated, or if scaled reference has same resolution. - int refresh[3]; - refresh[0] = (cpi->refresh_last_frame) ? 1 : 0; - refresh[1] = (cpi->refresh_golden_frame) ? 1 : 0; - refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0; - for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { - const int idx = cpi->scaled_ref_idx[i - 1]; - RefCntBuffer *const buf = idx != INVALID_IDX ? - &cm->buffer_pool->frame_bufs[idx] : NULL; - const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i); - if (buf != NULL && - (refresh[i - 1] || - (buf->buf.y_crop_width == ref->y_crop_width && - buf->buf.y_crop_height == ref->y_crop_height))) { - --buf->ref_count; - cpi->scaled_ref_idx[i -1] = INVALID_IDX; - } - } - } else { - for (i = 0; i < MAX_REF_FRAMES; ++i) { - const int idx = cpi->scaled_ref_idx[i]; - RefCntBuffer *const buf = idx != INVALID_IDX ? - &cm->buffer_pool->frame_bufs[idx] : NULL; - if (buf != NULL) { - --buf->ref_count; - cpi->scaled_ref_idx[i] = INVALID_IDX; - } - } - } -} - -static void full_to_model_count(unsigned int *model_count, - unsigned int *full_count) { - int n; - model_count[ZERO_TOKEN] = full_count[ZERO_TOKEN]; - model_count[ONE_TOKEN] = full_count[ONE_TOKEN]; - model_count[TWO_TOKEN] = full_count[TWO_TOKEN]; - for (n = THREE_TOKEN; n < EOB_TOKEN; ++n) - model_count[TWO_TOKEN] += full_count[n]; - model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN]; -} - -static void full_to_model_counts(vp10_coeff_count_model *model_count, - vp10_coeff_count *full_count) { - int i, j, k, l; - - for (i = 0; i < PLANE_TYPES; ++i) - for (j = 0; j < REF_TYPES; ++j) - for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) - full_to_model_count(model_count[i][j][k][l], full_count[i][j][k][l]); -} - -#if 0 && CONFIG_INTERNAL_STATS -static void output_frame_level_debug_stats(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w"); - int64_t recon_err; - - vpx_clear_system_state(); - - recon_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); - - if (cpi->twopass.total_left_stats.coded_error != 0.0) - fprintf(f, "%10u %dx%d %10d %10d %d %d %10d %10d %10d %10d" - "%10"PRId64" %10"PRId64" %5d %5d %10"PRId64" " - "%10"PRId64" %10"PRId64" %10d " - "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf" - "%6d %6d %5d %5d %5d " - "%10"PRId64" %10.3lf" - "%10lf %8u %10"PRId64" %10d %10d %10d\n", - cpi->common.current_video_frame, - cm->width, cm->height, - cpi->td.rd_counts.m_search_count, - cpi->td.rd_counts.ex_search_count, - cpi->rc.source_alt_ref_pending, - cpi->rc.source_alt_ref_active, - cpi->rc.this_frame_target, - cpi->rc.projected_frame_size, - cpi->rc.projected_frame_size / cpi->common.MBs, - (cpi->rc.projected_frame_size - cpi->rc.this_frame_target), - cpi->rc.vbr_bits_off_target, - cpi->rc.vbr_bits_off_target_fast, - cpi->twopass.extend_minq, - cpi->twopass.extend_minq_fast, - cpi->rc.total_target_vs_actual, - (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target), - cpi->rc.total_actual_bits, cm->base_qindex, - vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth), - (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0, - vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality, - cm->bit_depth), - cpi->rc.avg_q, - vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth), - cpi->refresh_last_frame, cpi->refresh_golden_frame, - cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost, - cpi->twopass.bits_left, - cpi->twopass.total_left_stats.coded_error, - cpi->twopass.bits_left / - (1 + cpi->twopass.total_left_stats.coded_error), - cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost, - cpi->twopass.kf_zeromotion_pct, - cpi->twopass.fr_content_type); - - fclose(f); - - if (0) { - FILE *const fmodes = fopen("Modes.stt", "a"); - int i; - - fprintf(fmodes, "%6d:%1d:%1d:%1d ", cpi->common.current_video_frame, - cm->frame_type, cpi->refresh_golden_frame, - cpi->refresh_alt_ref_frame); - - for (i = 0; i < MAX_MODES; ++i) - fprintf(fmodes, "%5d ", cpi->mode_chosen_counts[i]); - - fprintf(fmodes, "\n"); - - fclose(fmodes); - } -} -#endif - -static void set_mv_search_params(VP10_COMP *cpi) { - const VP10_COMMON *const cm = &cpi->common; - const unsigned int max_mv_def = VPXMIN(cm->width, cm->height); - - // Default based on max resolution. - cpi->mv_step_param = vp10_init_search_range(max_mv_def); - - if (cpi->sf.mv.auto_mv_step_size) { - if (frame_is_intra_only(cm)) { - // Initialize max_mv_magnitude for use in the first INTER frame - // after a key/intra-only frame. - cpi->max_mv_magnitude = max_mv_def; - } else { - if (cm->show_frame) { - // Allow mv_steps to correspond to twice the max mv magnitude found - // in the previous frame, capped by the default max_mv_magnitude based - // on resolution. - cpi->mv_step_param = vp10_init_search_range( - VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude)); - } - cpi->max_mv_magnitude = 0; - } - } -} - -static void set_size_independent_vars(VP10_COMP *cpi) { - vp10_set_speed_features_framesize_independent(cpi); - vp10_set_rd_speed_thresholds(cpi); - vp10_set_rd_speed_thresholds_sub8x8(cpi); - cpi->common.interp_filter = cpi->sf.default_interp_filter; -} - -static void set_size_dependent_vars(VP10_COMP *cpi, int *q, - int *bottom_index, int *top_index) { - VP10_COMMON *const cm = &cpi->common; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - - // Setup variables that depend on the dimensions of the frame. - vp10_set_speed_features_framesize_dependent(cpi); - - // Decide q and q bounds. - *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index); - - if (!frame_is_intra_only(cm)) { - vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH); - } - - // Configure experimental use of segmentation for enhanced coding of - // static regions if indicated. - // Only allowed in the second pass of a two pass encode, as it requires - // lagged coding, and if the relevant speed feature flag is set. - if (oxcf->pass == 2 && cpi->sf.static_segmentation) - configure_static_seg_features(cpi); - -#if CONFIG_VP9_POSTPROC - if (oxcf->noise_sensitivity > 0) { - int l = 0; - switch (oxcf->noise_sensitivity) { - case 1: - l = 20; - break; - case 2: - l = 40; - break; - case 3: - l = 60; - break; - case 4: - case 5: - l = 100; - break; - case 6: - l = 150; - break; - } - vp10_denoise(cpi->Source, cpi->Source, l); - } -#endif // CONFIG_VP9_POSTPROC -} - -static void init_motion_estimation(VP10_COMP *cpi) { - int y_stride = cpi->scaled_source.y_stride; - - if (cpi->sf.mv.search_method == NSTEP) { - vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride); - } else if (cpi->sf.mv.search_method == DIAMOND) { - vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride); - } -} - -static void set_frame_size(VP10_COMP *cpi) { - int ref_frame; - VP10_COMMON *const cm = &cpi->common; - VP10EncoderConfig *const oxcf = &cpi->oxcf; - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - - if (oxcf->pass == 2 && - oxcf->rc_mode == VPX_VBR && - ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) || - (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) { - vp10_calculate_coded_size( - cpi, &oxcf->scaled_frame_width, &oxcf->scaled_frame_height); - - // There has been a change in frame size. - vp10_set_size_literal(cpi, oxcf->scaled_frame_width, - oxcf->scaled_frame_height); - } - - if (oxcf->pass == 0 && - oxcf->rc_mode == VPX_CBR && - oxcf->resize_mode == RESIZE_DYNAMIC) { - if (cpi->resize_pending == 1) { - oxcf->scaled_frame_width = - (cm->width * cpi->resize_scale_num) / cpi->resize_scale_den; - oxcf->scaled_frame_height = - (cm->height * cpi->resize_scale_num) /cpi->resize_scale_den; - } else if (cpi->resize_pending == -1) { - // Go back up to original size. - oxcf->scaled_frame_width = oxcf->width; - oxcf->scaled_frame_height = oxcf->height; - } - if (cpi->resize_pending != 0) { - // There has been a change in frame size. - vp10_set_size_literal(cpi, - oxcf->scaled_frame_width, - oxcf->scaled_frame_height); - - // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed. - set_mv_search_params(cpi); - } - } - - if (oxcf->pass == 2) { - vp10_set_target_rate(cpi); - } - - alloc_frame_mvs(cm, cm->new_fb_idx); - - // Reset the frame pointers to the current frame size. - vpx_realloc_frame_buffer(get_frame_new_buffer(cm), - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); - - alloc_util_frame_buffers(cpi); - init_motion_estimation(cpi); - - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1]; - const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame); - - ref_buf->idx = buf_idx; - - if (buf_idx != INVALID_IDX) { - YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf; - ref_buf->buf = buf; -#if CONFIG_VP9_HIGHBITDEPTH - vp10_setup_scale_factors_for_frame(&ref_buf->sf, - buf->y_crop_width, buf->y_crop_height, - cm->width, cm->height, - (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? - 1 : 0); -#else - vp10_setup_scale_factors_for_frame(&ref_buf->sf, - buf->y_crop_width, buf->y_crop_height, - cm->width, cm->height); -#endif // CONFIG_VP9_HIGHBITDEPTH - if (vp10_is_scaled(&ref_buf->sf)) - vpx_extend_frame_borders(buf); - } else { - ref_buf->buf = NULL; - } - } - - set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME); -} - -static void encode_without_recode_loop(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - int q = 0, bottom_index = 0, top_index = 0; // Dummy variables. - - vpx_clear_system_state(); - - set_frame_size(cpi); - - // For 1 pass CBR under dynamic resize mode: use faster scaling for source. - // Only for 2x2 scaling for now. - if (cpi->oxcf.pass == 0 && - cpi->oxcf.rc_mode == VPX_CBR && - cpi->oxcf.resize_mode == RESIZE_DYNAMIC && - cpi->un_scaled_source->y_width == (cm->width << 1) && - cpi->un_scaled_source->y_height == (cm->height << 1)) { - cpi->Source = vp10_scale_if_required_fast(cm, - cpi->un_scaled_source, - &cpi->scaled_source); - if (cpi->unscaled_last_source != NULL) - cpi->Last_Source = vp10_scale_if_required_fast(cm, - cpi->unscaled_last_source, - &cpi->scaled_last_source); - } else { - cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source, - &cpi->scaled_source); - if (cpi->unscaled_last_source != NULL) - cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source, - &cpi->scaled_last_source); - } - - if (frame_is_intra_only(cm) == 0) { - vp10_scale_references(cpi); - } - - set_size_independent_vars(cpi); - set_size_dependent_vars(cpi, &q, &bottom_index, &top_index); - - vp10_set_quantizer(cm, q); - vp10_set_variance_partition_thresholds(cpi, q); - - setup_frame(cpi); - - suppress_active_map(cpi); - // Variance adaptive and in frame q adjustment experiments are mutually - // exclusive. - if (cpi->oxcf.aq_mode == VARIANCE_AQ) { - vp10_vaq_frame_setup(cpi); - } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { - vp10_setup_in_frame_q_adj(cpi); - } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { - vp10_cyclic_refresh_setup(cpi); - } - apply_active_map(cpi); - - // transform / motion compensation build reconstruction frame - vp10_encode_frame(cpi); - - // Update some stats from cyclic refresh, and check if we should not update - // golden reference, for 1 pass CBR. - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cm->frame_type != KEY_FRAME && - (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR)) - vp10_cyclic_refresh_check_golden_update(cpi); - - // Update the skip mb flag probabilities based on the distribution - // seen in the last encoder iteration. - // update_base_skip_probs(cpi); - vpx_clear_system_state(); -} - -static void encode_with_recode_loop(VP10_COMP *cpi, - size_t *size, - uint8_t *dest) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - int bottom_index, top_index; - int loop_count = 0; - int loop_at_this_size = 0; - int loop = 0; - int overshoot_seen = 0; - int undershoot_seen = 0; - int frame_over_shoot_limit; - int frame_under_shoot_limit; - int q = 0, q_low = 0, q_high = 0; - - set_size_independent_vars(cpi); - - do { - vpx_clear_system_state(); - - set_frame_size(cpi); - - if (loop_count == 0 || cpi->resize_pending != 0) { - set_size_dependent_vars(cpi, &q, &bottom_index, &top_index); - - // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed. - set_mv_search_params(cpi); - - // Reset the loop state for new frame size. - overshoot_seen = 0; - undershoot_seen = 0; - - // Reconfiguration for change in frame size has concluded. - cpi->resize_pending = 0; - - q_low = bottom_index; - q_high = top_index; - - loop_at_this_size = 0; - } - - // Decide frame size bounds first time through. - if (loop_count == 0) { - vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target, - &frame_under_shoot_limit, - &frame_over_shoot_limit); - } - - cpi->Source = vp10_scale_if_required(cm, cpi->un_scaled_source, - &cpi->scaled_source); - - if (cpi->unscaled_last_source != NULL) - cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source, - &cpi->scaled_last_source); - - if (frame_is_intra_only(cm) == 0) { - if (loop_count > 0) { - release_scaled_references(cpi); - } - vp10_scale_references(cpi); - } - - vp10_set_quantizer(cm, q); - - if (loop_count == 0) - setup_frame(cpi); - - // Variance adaptive and in frame q adjustment experiments are mutually - // exclusive. - if (cpi->oxcf.aq_mode == VARIANCE_AQ) { - vp10_vaq_frame_setup(cpi); - } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { - vp10_setup_in_frame_q_adj(cpi); - } - - // transform / motion compensation build reconstruction frame - vp10_encode_frame(cpi); - - // Update the skip mb flag probabilities based on the distribution - // seen in the last encoder iteration. - // update_base_skip_probs(cpi); - - vpx_clear_system_state(); - - // Dummy pack of the bitstream using up to date stats to get an - // accurate estimate of output frame size to determine if we need - // to recode. - if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) { - save_coding_context(cpi); - vp10_pack_bitstream(cpi, dest, size); - - rc->projected_frame_size = (int)(*size) << 3; - restore_coding_context(cpi); - - if (frame_over_shoot_limit == 0) - frame_over_shoot_limit = 1; - } - - if (cpi->oxcf.rc_mode == VPX_Q) { - loop = 0; - } else { - if ((cm->frame_type == KEY_FRAME) && - rc->this_key_frame_forced && - (rc->projected_frame_size < rc->max_frame_bandwidth)) { - int last_q = q; - int64_t kf_err; - - int64_t high_err_target = cpi->ambient_err; - int64_t low_err_target = cpi->ambient_err >> 1; - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - kf_err = vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); - } else { - kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); - } -#else - kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Prevent possible divide by zero error below for perfect KF - kf_err += !kf_err; - - // The key frame is not good enough or we can afford - // to make it better without undue risk of popping. - if ((kf_err > high_err_target && - rc->projected_frame_size <= frame_over_shoot_limit) || - (kf_err > low_err_target && - rc->projected_frame_size <= frame_under_shoot_limit)) { - // Lower q_high - q_high = q > q_low ? q - 1 : q_low; - - // Adjust Q - q = (int)((q * high_err_target) / kf_err); - q = VPXMIN(q, (q_high + q_low) >> 1); - } else if (kf_err < low_err_target && - rc->projected_frame_size >= frame_under_shoot_limit) { - // The key frame is much better than the previous frame - // Raise q_low - q_low = q < q_high ? q + 1 : q_high; - - // Adjust Q - q = (int)((q * low_err_target) / kf_err); - q = VPXMIN(q, (q_high + q_low + 1) >> 1); - } - - // Clamp Q to upper and lower limits: - q = clamp(q, q_low, q_high); - - loop = q != last_q; - } else if (recode_loop_test( - cpi, frame_over_shoot_limit, frame_under_shoot_limit, - q, VPXMAX(q_high, top_index), bottom_index)) { - // Is the projected frame size out of range and are we allowed - // to attempt to recode. - int last_q = q; - int retries = 0; - - if (cpi->resize_pending == 1) { - // Change in frame size so go back around the recode loop. - cpi->rc.frame_size_selector = - SCALE_STEP1 - cpi->rc.frame_size_selector; - cpi->rc.next_frame_size_selector = cpi->rc.frame_size_selector; - -#if CONFIG_INTERNAL_STATS - ++cpi->tot_recode_hits; -#endif - ++loop_count; - loop = 1; - continue; - } - - // Frame size out of permitted range: - // Update correction factor & compute new Q to try... - - // Frame is too large - if (rc->projected_frame_size > rc->this_frame_target) { - // Special case if the projected size is > the max allowed. - if (rc->projected_frame_size >= rc->max_frame_bandwidth) - q_high = rc->worst_quality; - - // Raise Qlow as to at least the current value - q_low = q < q_high ? q + 1 : q_high; - - if (undershoot_seen || loop_at_this_size > 1) { - // Update rate_correction_factor unless - vp10_rc_update_rate_correction_factors(cpi); - - q = (q_high + q_low + 1) / 2; - } else { - // Update rate_correction_factor unless - vp10_rc_update_rate_correction_factors(cpi); - - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, VPXMAX(q_high, top_index)); - - while (q < q_low && retries < 10) { - vp10_rc_update_rate_correction_factors(cpi); - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, VPXMAX(q_high, top_index)); - retries++; - } - } - - overshoot_seen = 1; - } else { - // Frame is too small - q_high = q > q_low ? q - 1 : q_low; - - if (overshoot_seen || loop_at_this_size > 1) { - vp10_rc_update_rate_correction_factors(cpi); - q = (q_high + q_low) / 2; - } else { - vp10_rc_update_rate_correction_factors(cpi); - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, top_index); - // Special case reset for qlow for constrained quality. - // This should only trigger where there is very substantial - // undershoot on a frame and the auto cq level is above - // the user passsed in value. - if (cpi->oxcf.rc_mode == VPX_CQ && - q < q_low) { - q_low = q; - } - - while (q > q_high && retries < 10) { - vp10_rc_update_rate_correction_factors(cpi); - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, top_index); - retries++; - } - } - - undershoot_seen = 1; - } - - // Clamp Q to upper and lower limits: - q = clamp(q, q_low, q_high); - - loop = (q != last_q); - } else { - loop = 0; - } - } - - // Special case for overlay frame. - if (rc->is_src_frame_alt_ref && - rc->projected_frame_size < rc->max_frame_bandwidth) - loop = 0; - - if (loop) { - ++loop_count; - ++loop_at_this_size; - -#if CONFIG_INTERNAL_STATS - ++cpi->tot_recode_hits; -#endif - } - } while (loop); -} - -static int get_ref_frame_flags(const VP10_COMP *cpi) { - const int *const map = cpi->common.ref_frame_map; - const int gold_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx]; - const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx]; - const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx]; - int flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG; - - if (gold_is_last) - flags &= ~VP9_GOLD_FLAG; - - if (cpi->rc.frames_till_gf_update_due == INT_MAX) - flags &= ~VP9_GOLD_FLAG; - - if (alt_is_last) - flags &= ~VP9_ALT_FLAG; - - if (gold_is_alt) - flags &= ~VP9_ALT_FLAG; - - return flags; -} - -static void set_ext_overrides(VP10_COMP *cpi) { - // Overrides the defaults with the externally supplied values with - // vp10_update_reference() and vp10_update_entropy() calls - // Note: The overrides are valid only for the next frame passed - // to encode_frame_to_data_rate() function - if (cpi->ext_refresh_frame_context_pending) { - cpi->common.refresh_frame_context = cpi->ext_refresh_frame_context; - cpi->ext_refresh_frame_context_pending = 0; - } - if (cpi->ext_refresh_frame_flags_pending) { - cpi->refresh_last_frame = cpi->ext_refresh_last_frame; - cpi->refresh_golden_frame = cpi->ext_refresh_golden_frame; - cpi->refresh_alt_ref_frame = cpi->ext_refresh_alt_ref_frame; - cpi->ext_refresh_frame_flags_pending = 0; - } -} - -YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm, - YV12_BUFFER_CONFIG *unscaled, - YV12_BUFFER_CONFIG *scaled) { - if (cm->mi_cols * MI_SIZE != unscaled->y_width || - cm->mi_rows * MI_SIZE != unscaled->y_height) { - // For 2x2 scaling down. - vpx_scale_frame(unscaled, scaled, unscaled->y_buffer, 9, 2, 1, - 2, 1, 0); - vpx_extend_frame_borders(scaled); - return scaled; - } else { - return unscaled; - } -} - -YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm, - YV12_BUFFER_CONFIG *unscaled, - YV12_BUFFER_CONFIG *scaled) { - if (cm->mi_cols * MI_SIZE != unscaled->y_width || - cm->mi_rows * MI_SIZE != unscaled->y_height) { -#if CONFIG_VP9_HIGHBITDEPTH - scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth); -#else - scale_and_extend_frame_nonnormative(unscaled, scaled); -#endif // CONFIG_VP9_HIGHBITDEPTH - return scaled; - } else { - return unscaled; - } -} - -static void set_arf_sign_bias(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - int arf_sign_bias; - - if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - arf_sign_bias = cpi->rc.source_alt_ref_active && - (!cpi->refresh_alt_ref_frame || - (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)); - } else { - arf_sign_bias = - (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame); - } - cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias; -} - -static int setup_interp_filter_search_mask(VP10_COMP *cpi) { - INTERP_FILTER ifilter; - int ref_total[MAX_REF_FRAMES] = {0}; - MV_REFERENCE_FRAME ref; - int mask = 0; - if (cpi->common.last_frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame) - return mask; - for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref) - for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) - ref_total[ref] += cpi->interp_filter_selected[ref][ifilter]; - - for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) { - if ((ref_total[LAST_FRAME] && - cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) && - (ref_total[GOLDEN_FRAME] == 0 || - cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 - < ref_total[GOLDEN_FRAME]) && - (ref_total[ALTREF_FRAME] == 0 || - cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 - < ref_total[ALTREF_FRAME])) - mask |= 1 << ifilter; - } - return mask; -} - -static void encode_frame_to_data_rate(VP10_COMP *cpi, - size_t *size, - uint8_t *dest, - unsigned int *frame_flags) { - VP10_COMMON *const cm = &cpi->common; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - struct segmentation *const seg = &cm->seg; - TX_SIZE t; - - set_ext_overrides(cpi); - vpx_clear_system_state(); - - // Set the arf sign bias for this frame. - set_arf_sign_bias(cpi); - - // Set default state for segment based loop filter update flags. - cm->lf.mode_ref_delta_update = 0; - - if (cpi->oxcf.pass == 2 && - cpi->sf.adaptive_interp_filter_search) - cpi->sf.interp_filter_search_mask = - setup_interp_filter_search_mask(cpi); - - // Set various flags etc to special state if it is a key frame. - if (frame_is_intra_only(cm)) { - // Reset the loop filter deltas and segmentation map. - vp10_reset_segment_features(cm); - - // If segmentation is enabled force a map update for key frames. - if (seg->enabled) { - seg->update_map = 1; - seg->update_data = 1; - } - - // The alternate reference frame cannot be active for a key frame. - cpi->rc.source_alt_ref_active = 0; - - cm->error_resilient_mode = oxcf->error_resilient_mode; - - // By default, encoder assumes decoder can use prev_mi. - if (cm->error_resilient_mode) { - cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE; - cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF; - } else if (cm->intra_only) { - // Only reset the current context. - cm->reset_frame_context = RESET_FRAME_CONTEXT_CURRENT; - } - } - - // For 1 pass CBR, check if we are dropping this frame. - // Never drop on key frame. - if (oxcf->pass == 0 && - oxcf->rc_mode == VPX_CBR && - cm->frame_type != KEY_FRAME) { - if (vp10_rc_drop_frame(cpi)) { - vp10_rc_postencode_update_drop_frame(cpi); - ++cm->current_video_frame; - return; - } - } - - vpx_clear_system_state(); - -#if CONFIG_INTERNAL_STATS - memset(cpi->mode_chosen_counts, 0, - MAX_MODES * sizeof(*cpi->mode_chosen_counts)); -#endif - - if (cpi->sf.recode_loop == DISALLOW_RECODE) { - encode_without_recode_loop(cpi); - } else { - encode_with_recode_loop(cpi, size, dest); - } - -#if CONFIG_VP9_TEMPORAL_DENOISING -#ifdef OUTPUT_YUV_DENOISED - if (oxcf->noise_sensitivity > 0) { - vp10_write_yuv_frame_420(&cpi->denoiser.running_avg_y[INTRA_FRAME], - yuv_denoised_file); - } -#endif -#endif -#ifdef OUTPUT_YUV_SKINMAP - if (cpi->common.current_video_frame > 1) { - vp10_compute_skin_map(cpi, yuv_skinmap_file); - } -#endif - - // Special case code to reduce pulsing when key frames are forced at a - // fixed interval. Note the reconstruction error if it is the frame before - // the force key frame - if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) { -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - cpi->ambient_err = vp10_highbd_get_y_sse(cpi->Source, - get_frame_new_buffer(cm)); - } else { - cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); - } -#else - cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - - // If the encoder forced a KEY_FRAME decision - if (cm->frame_type == KEY_FRAME) - cpi->refresh_last_frame = 1; - - cm->frame_to_show = get_frame_new_buffer(cm); - cm->frame_to_show->color_space = cm->color_space; - cm->frame_to_show->color_range = cm->color_range; - cm->frame_to_show->render_width = cm->render_width; - cm->frame_to_show->render_height = cm->render_height; - - // Pick the loop filter level for the frame. - loopfilter_frame(cpi, cm); - - // build the bitstream - vp10_pack_bitstream(cpi, dest, size); - - if (cm->seg.update_map) - update_reference_segmentation_map(cpi); - - if (frame_is_intra_only(cm) == 0) { - release_scaled_references(cpi); - } - vp10_update_reference_frames(cpi); - - for (t = TX_4X4; t <= TX_32X32; t++) - full_to_model_counts(cpi->td.counts->coef[t], - cpi->td.rd_counts.coef_counts[t]); - - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - vp10_adapt_coef_probs(cm); -#if CONFIG_MISC_FIXES - vp10_adapt_intra_frame_probs(cm); -#else - if (!frame_is_intra_only(cm)) - vp10_adapt_intra_frame_probs(cm); -#endif - } - - if (!frame_is_intra_only(cm)) { - if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - vp10_adapt_inter_frame_probs(cm); - vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv); - } - } - - if (cpi->refresh_golden_frame == 1) - cpi->frame_flags |= FRAMEFLAGS_GOLDEN; - else - cpi->frame_flags &= ~FRAMEFLAGS_GOLDEN; - - if (cpi->refresh_alt_ref_frame == 1) - cpi->frame_flags |= FRAMEFLAGS_ALTREF; - else - cpi->frame_flags &= ~FRAMEFLAGS_ALTREF; - - cpi->ref_frame_flags = get_ref_frame_flags(cpi); - - cm->last_frame_type = cm->frame_type; - - vp10_rc_postencode_update(cpi, *size); - -#if 0 - output_frame_level_debug_stats(cpi); -#endif - - if (cm->frame_type == KEY_FRAME) { - // Tell the caller that the frame was coded as a key frame - *frame_flags = cpi->frame_flags | FRAMEFLAGS_KEY; - } else { - *frame_flags = cpi->frame_flags & ~FRAMEFLAGS_KEY; - } - - // Clear the one shot update flags for segmentation map and mode/ref loop - // filter deltas. - cm->seg.update_map = 0; - cm->seg.update_data = 0; - cm->lf.mode_ref_delta_update = 0; - - // keep track of the last coded dimensions - cm->last_width = cm->width; - cm->last_height = cm->height; - - // reset to normal state now that we are done. - if (!cm->show_existing_frame) - cm->last_show_frame = cm->show_frame; - - if (cm->show_frame) { - vp10_swap_mi_and_prev_mi(cm); - // Don't increment frame counters if this was an altref buffer - // update not a real frame - ++cm->current_video_frame; - } - cm->prev_frame = cm->cur_frame; -} - -static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest, - unsigned int *frame_flags) { - if (cpi->oxcf.rc_mode == VPX_CBR) { - vp10_rc_get_one_pass_cbr_params(cpi); - } else { - vp10_rc_get_one_pass_vbr_params(cpi); - } - encode_frame_to_data_rate(cpi, size, dest, frame_flags); -} - -static void Pass2Encode(VP10_COMP *cpi, size_t *size, - uint8_t *dest, unsigned int *frame_flags) { - cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED; - encode_frame_to_data_rate(cpi, size, dest, frame_flags); - - vp10_twopass_postencode_update(cpi); -} - -static void init_ref_frame_bufs(VP10_COMMON *cm) { - int i; - BufferPool *const pool = cm->buffer_pool; - cm->new_fb_idx = INVALID_IDX; - for (i = 0; i < REF_FRAMES; ++i) { - cm->ref_frame_map[i] = INVALID_IDX; - pool->frame_bufs[i].ref_count = 0; - } -} - -static void check_initial_width(VP10_COMP *cpi, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - int subsampling_x, int subsampling_y) { - VP10_COMMON *const cm = &cpi->common; - - if (!cpi->initial_width || -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth != use_highbitdepth || -#endif - cm->subsampling_x != subsampling_x || - cm->subsampling_y != subsampling_y) { - cm->subsampling_x = subsampling_x; - cm->subsampling_y = subsampling_y; -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth = use_highbitdepth; -#endif - - alloc_raw_frame_buffers(cpi); - init_ref_frame_bufs(cm); - alloc_util_frame_buffers(cpi); - - init_motion_estimation(cpi); // TODO(agrange) This can be removed. - - cpi->initial_width = cm->width; - cpi->initial_height = cm->height; - cpi->initial_mbs = cm->MBs; - } -} - -#if CONFIG_VP9_TEMPORAL_DENOISING -static void setup_denoiser_buffer(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - if (cpi->oxcf.noise_sensitivity > 0 && - !cpi->denoiser.frame_buffer_initialized) { - vp10_denoiser_alloc(&(cpi->denoiser), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS); - } -} -#endif - -int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags, - YV12_BUFFER_CONFIG *sd, int64_t time_stamp, - int64_t end_time) { - VP10_COMMON *volatile const cm = &cpi->common; - struct vpx_usec_timer timer; - volatile int res = 0; - const int subsampling_x = sd->subsampling_x; - const int subsampling_y = sd->subsampling_y; -#if CONFIG_VP9_HIGHBITDEPTH - const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0; -#endif - - if (setjmp(cm->error.jmp)) { - cm->error.setjmp = 0; - return -1; - } - cm->error.setjmp = 1; - -#if CONFIG_VP9_HIGHBITDEPTH - check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y); -#else - check_initial_width(cpi, subsampling_x, subsampling_y); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#if CONFIG_VP9_TEMPORAL_DENOISING - setup_denoiser_buffer(cpi); -#endif - vpx_usec_timer_start(&timer); - - if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, -#if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, -#endif // CONFIG_VP9_HIGHBITDEPTH - frame_flags)) - res = -1; - vpx_usec_timer_mark(&timer); - cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); - - if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) && - (subsampling_x != 1 || subsampling_y != 1)) { - vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM, - "Non-4:2:0 color format requires profile 1 or 3"); - res = -1; - } - if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) && - (subsampling_x == 1 && subsampling_y == 1)) { - vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM, - "4:2:0 color format requires profile 0 or 2"); - res = -1; - } - - cm->error.setjmp = 0; - return res; -} - - -static int frame_is_reference(const VP10_COMP *cpi) { - const VP10_COMMON *cm = &cpi->common; - - return cm->frame_type == KEY_FRAME || - cpi->refresh_last_frame || - cpi->refresh_golden_frame || - cpi->refresh_alt_ref_frame || - cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF || - cm->lf.mode_ref_delta_update || - cm->seg.update_map || - cm->seg.update_data; -} - -static void adjust_frame_rate(VP10_COMP *cpi, - const struct lookahead_entry *source) { - int64_t this_duration; - int step = 0; - - if (source->ts_start == cpi->first_time_stamp_ever) { - this_duration = source->ts_end - source->ts_start; - step = 1; - } else { - int64_t last_duration = cpi->last_end_time_stamp_seen - - cpi->last_time_stamp_seen; - - this_duration = source->ts_end - cpi->last_end_time_stamp_seen; - - // do a step update if the duration changes by 10% - if (last_duration) - step = (int)((this_duration - last_duration) * 10 / last_duration); - } - - if (this_duration) { - if (step) { - vp10_new_framerate(cpi, 10000000.0 / this_duration); - } else { - // Average this frame's rate into the last second's average - // frame rate. If we haven't seen 1 second yet, then average - // over the whole interval seen. - const double interval = VPXMIN( - (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0); - double avg_duration = 10000000.0 / cpi->framerate; - avg_duration *= (interval - avg_duration + this_duration); - avg_duration /= interval; - - vp10_new_framerate(cpi, 10000000.0 / avg_duration); - } - } - cpi->last_time_stamp_seen = source->ts_start; - cpi->last_end_time_stamp_seen = source->ts_end; -} - -// Returns 0 if this is not an alt ref else the offset of the source frame -// used as the arf midpoint. -static int get_arf_src_index(VP10_COMP *cpi) { - RATE_CONTROL *const rc = &cpi->rc; - int arf_src_index = 0; - if (is_altref_enabled(cpi)) { - if (cpi->oxcf.pass == 2) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - if (gf_group->update_type[gf_group->index] == ARF_UPDATE) { - arf_src_index = gf_group->arf_src_offset[gf_group->index]; - } - } else if (rc->source_alt_ref_pending) { - arf_src_index = rc->frames_till_gf_update_due; - } - } - return arf_src_index; -} - -static void check_src_altref(VP10_COMP *cpi, - const struct lookahead_entry *source) { - RATE_CONTROL *const rc = &cpi->rc; - - if (cpi->oxcf.pass == 2) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - rc->is_src_frame_alt_ref = - (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE); - } else { - rc->is_src_frame_alt_ref = cpi->alt_ref_source && - (source == cpi->alt_ref_source); - } - - if (rc->is_src_frame_alt_ref) { - // Current frame is an ARF overlay frame. - cpi->alt_ref_source = NULL; - - // Don't refresh the last buffer for an ARF overlay frame. It will - // become the GF so preserve last as an alternative prediction option. - cpi->refresh_last_frame = 0; - } -} - -#if CONFIG_INTERNAL_STATS -extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch, - const unsigned char *img2, int img2_pitch, - int width, int height); - -static void adjust_image_stat(double y, double u, double v, double all, - ImageStat *s) { - s->stat[Y] += y; - s->stat[U] += u; - s->stat[V] += v; - s->stat[ALL] += all; - s->worst = VPXMIN(s->worst, all); -} -#endif // CONFIG_INTERNAL_STATS - -int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags, - size_t *size, uint8_t *dest, - int64_t *time_stamp, int64_t *time_end, int flush) { - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - VP10_COMMON *const cm = &cpi->common; - BufferPool *const pool = cm->buffer_pool; - RATE_CONTROL *const rc = &cpi->rc; - struct vpx_usec_timer cmptimer; - YV12_BUFFER_CONFIG *force_src_buffer = NULL; - struct lookahead_entry *last_source = NULL; - struct lookahead_entry *source = NULL; - int arf_src_index; - int i; - - vpx_usec_timer_start(&cmptimer); - - vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV); - - // Is multi-arf enabled. - // Note that at the moment multi_arf is only configured for 2 pass VBR - if ((oxcf->pass == 2) && (cpi->oxcf.enable_auto_arf > 1)) - cpi->multi_arf_allowed = 1; - else - cpi->multi_arf_allowed = 0; - - // Normal defaults - cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE; - cm->refresh_frame_context = - oxcf->error_resilient_mode ? REFRESH_FRAME_CONTEXT_OFF : - oxcf->frame_parallel_decoding_mode ? REFRESH_FRAME_CONTEXT_FORWARD - : REFRESH_FRAME_CONTEXT_BACKWARD; - - cpi->refresh_last_frame = 1; - cpi->refresh_golden_frame = 0; - cpi->refresh_alt_ref_frame = 0; - - // Should we encode an arf frame. - arf_src_index = get_arf_src_index(cpi); - - if (arf_src_index) { - assert(arf_src_index <= rc->frames_to_key); - - if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) { - cpi->alt_ref_source = source; - - if (oxcf->arnr_max_frames > 0) { - // Produce the filtered ARF frame. - vp10_temporal_filter(cpi, arf_src_index); - vpx_extend_frame_borders(&cpi->alt_ref_buffer); - force_src_buffer = &cpi->alt_ref_buffer; - } - - cm->show_frame = 0; - cm->intra_only = 0; - cpi->refresh_alt_ref_frame = 1; - cpi->refresh_golden_frame = 0; - cpi->refresh_last_frame = 0; - rc->is_src_frame_alt_ref = 0; - rc->source_alt_ref_pending = 0; - } else { - rc->source_alt_ref_pending = 0; - } - } - - if (!source) { - // Get last frame source. - if (cm->current_video_frame > 0) { - if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL) - return -1; - } - - // Read in the source frame. - source = vp10_lookahead_pop(cpi->lookahead, flush); - - if (source != NULL) { - cm->show_frame = 1; - cm->intra_only = 0; - - // Check to see if the frame should be encoded as an arf overlay. - check_src_altref(cpi, source); - } - } - - if (source) { - cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer - : &source->img; - - cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL; - - *time_stamp = source->ts_start; - *time_end = source->ts_end; - *frame_flags = (source->flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0; - - } else { - *size = 0; - if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) { - vp10_end_first_pass(cpi); /* get last stats packet */ - cpi->twopass.first_pass_done = 1; - } - return -1; - } - - if (source->ts_start < cpi->first_time_stamp_ever) { - cpi->first_time_stamp_ever = source->ts_start; - cpi->last_end_time_stamp_seen = source->ts_start; - } - - // Clear down mmx registers - vpx_clear_system_state(); - - // adjust frame rates based on timestamps given - if (cm->show_frame) { - adjust_frame_rate(cpi, source); - } - - // Find a free buffer for the new frame, releasing the reference previously - // held. - if (cm->new_fb_idx != INVALID_IDX) { - --pool->frame_bufs[cm->new_fb_idx].ref_count; - } - cm->new_fb_idx = get_free_fb(cm); - - if (cm->new_fb_idx == INVALID_IDX) - return -1; - - cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; - - if (cpi->multi_arf_allowed) { - if (cm->frame_type == KEY_FRAME) { - init_buffer_indices(cpi); - } else if (oxcf->pass == 2) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - cpi->alt_fb_idx = gf_group->arf_ref_idx[gf_group->index]; - } - } - - // Start with a 0 size frame. - *size = 0; - - cpi->frame_flags = *frame_flags; - - if (oxcf->pass == 2) { - vp10_rc_get_second_pass_params(cpi); - } else if (oxcf->pass == 1) { - set_frame_size(cpi); - } - - if (cpi->oxcf.pass != 0 || frame_is_intra_only(cm) == 1) { - for (i = 0; i < MAX_REF_FRAMES; ++i) - cpi->scaled_ref_idx[i] = INVALID_IDX; - } - - if (oxcf->pass == 1) { - cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf); - vp10_first_pass(cpi, source); - } else if (oxcf->pass == 2) { - Pass2Encode(cpi, size, dest, frame_flags); - } else { - // One pass encode - Pass0Encode(cpi, size, dest, frame_flags); - } - - if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF) - cm->frame_contexts[cm->frame_context_idx] = *cm->fc; - - // No frame encoded, or frame was dropped, release scaled references. - if ((*size == 0) && (frame_is_intra_only(cm) == 0)) { - release_scaled_references(cpi); - } - - if (*size > 0) { - cpi->droppable = !frame_is_reference(cpi); - } - - vpx_usec_timer_mark(&cmptimer); - cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); - - if (cpi->b_calculate_psnr && oxcf->pass != 1 && cm->show_frame) - generate_psnr_packet(cpi); - -#if CONFIG_INTERNAL_STATS - - if (oxcf->pass != 1) { - double samples = 0.0; - cpi->bytes += (int)(*size); - - if (cm->show_frame) { - cpi->count++; - - if (cpi->b_calculate_psnr) { - YV12_BUFFER_CONFIG *orig = cpi->Source; - YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; - YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; - PSNR_STATS psnr; -#if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd, - cpi->oxcf.input_bit_depth); -#else - calc_psnr(orig, recon, &psnr); -#endif // CONFIG_VP9_HIGHBITDEPTH - - adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3], - psnr.psnr[0], &cpi->psnr); - cpi->total_sq_error += psnr.sse[0]; - cpi->total_samples += psnr.samples[0]; - samples = psnr.samples[0]; - - { - PSNR_STATS psnr2; - double frame_ssim2 = 0, weight = 0; -#if CONFIG_VP9_POSTPROC - if (vpx_alloc_frame_buffer(&cm->post_proc_buffer, - recon->y_crop_width, recon->y_crop_height, - cm->subsampling_x, cm->subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, - cm->byte_alignment) < 0) { - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate post processing buffer"); - } - - vp10_deblock(cm->frame_to_show, &cm->post_proc_buffer, - cm->lf.filter_level * 10 / 6); -#endif - vpx_clear_system_state(); - -#if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd, - cpi->oxcf.input_bit_depth); -#else - calc_psnr(orig, pp, &psnr2); -#endif // CONFIG_VP9_HIGHBITDEPTH - - cpi->totalp_sq_error += psnr2.sse[0]; - cpi->totalp_samples += psnr2.samples[0]; - adjust_image_stat(psnr2.psnr[1], psnr2.psnr[2], psnr2.psnr[3], - psnr2.psnr[0], &cpi->psnrp); - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight, - (int)cm->bit_depth); - } else { - frame_ssim2 = vpx_calc_ssim(orig, recon, &weight); - } -#else - frame_ssim2 = vpx_calc_ssim(orig, recon, &weight); -#endif // CONFIG_VP9_HIGHBITDEPTH - - cpi->worst_ssim= VPXMIN(cpi->worst_ssim, frame_ssim2); - cpi->summed_quality += frame_ssim2 * weight; - cpi->summed_weights += weight; - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - frame_ssim2 = vpx_highbd_calc_ssim( - orig, &cm->post_proc_buffer, &weight, (int)cm->bit_depth); - } else { - frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight); - } -#else - frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight); -#endif // CONFIG_VP9_HIGHBITDEPTH - - cpi->summedp_quality += frame_ssim2 * weight; - cpi->summedp_weights += weight; -#if 0 - { - FILE *f = fopen("q_used.stt", "a"); - fprintf(f, "%5d : Y%f7.3:U%f7.3:V%f7.3:F%f7.3:S%7.3f\n", - cpi->common.current_video_frame, y2, u2, v2, - frame_psnr2, frame_ssim2); - fclose(f); - } -#endif - } - } - if (cpi->b_calculate_blockiness) { -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif - { - double frame_blockiness = vp10_get_blockiness( - cpi->Source->y_buffer, cpi->Source->y_stride, - cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride, - cpi->Source->y_width, cpi->Source->y_height); - cpi->worst_blockiness = - VPXMAX(cpi->worst_blockiness, frame_blockiness); - cpi->total_blockiness += frame_blockiness; - } - } - - if (cpi->b_calculate_consistency) { -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif - { - double this_inconsistency = vpx_get_ssim_metrics( - cpi->Source->y_buffer, cpi->Source->y_stride, - cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride, - cpi->Source->y_width, cpi->Source->y_height, cpi->ssim_vars, - &cpi->metrics, 1); - - const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1); - double consistency = vpx_sse_to_psnr(samples, peak, - (double)cpi->total_inconsistency); - if (consistency > 0.0) - cpi->worst_consistency = - VPXMIN(cpi->worst_consistency, consistency); - cpi->total_inconsistency += this_inconsistency; - } - } - - if (cpi->b_calculate_ssimg) { - double y, u, v, frame_all; -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - frame_all = vpx_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y, - &u, &v, (int)cm->bit_depth); - } else { - frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, - &v); - } -#else - frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v); -#endif // CONFIG_VP9_HIGHBITDEPTH - adjust_image_stat(y, u, v, frame_all, &cpi->ssimg); - } -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif - { - double y, u, v, frame_all; - frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u, - &v); - adjust_image_stat(y, u, v, frame_all, &cpi->fastssim); - /* TODO(JBB): add 10/12 bit support */ - } -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif - { - double y, u, v, frame_all; - frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v); - adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs); - } - } - } -#endif - - vpx_clear_system_state(); - return 0; -} - -int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest, - vp10_ppflags_t *flags) { - VP10_COMMON *cm = &cpi->common; -#if !CONFIG_VP9_POSTPROC - (void)flags; -#endif - - if (!cm->show_frame) { - return -1; - } else { - int ret; -#if CONFIG_VP9_POSTPROC - ret = vp10_post_proc_frame(cm, dest, flags); -#else - if (cm->frame_to_show) { - *dest = *cm->frame_to_show; - dest->y_width = cm->width; - dest->y_height = cm->height; - dest->uv_width = cm->width >> cm->subsampling_x; - dest->uv_height = cm->height >> cm->subsampling_y; - ret = 0; - } else { - ret = -1; - } -#endif // !CONFIG_VP9_POSTPROC - vpx_clear_system_state(); - return ret; - } -} - -int vp10_set_internal_size(VP10_COMP *cpi, - VPX_SCALING horiz_mode, VPX_SCALING vert_mode) { - VP10_COMMON *cm = &cpi->common; - int hr = 0, hs = 0, vr = 0, vs = 0; - - if (horiz_mode > ONETWO || vert_mode > ONETWO) - return -1; - - Scale2Ratio(horiz_mode, &hr, &hs); - Scale2Ratio(vert_mode, &vr, &vs); - - // always go to the next whole number - cm->width = (hs - 1 + cpi->oxcf.width * hr) / hs; - cm->height = (vs - 1 + cpi->oxcf.height * vr) / vs; - assert(cm->width <= cpi->initial_width); - assert(cm->height <= cpi->initial_height); - - update_frame_size(cpi); - - return 0; -} - -int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width, - unsigned int height) { - VP10_COMMON *cm = &cpi->common; -#if CONFIG_VP9_HIGHBITDEPTH - check_initial_width(cpi, cm->use_highbitdepth, 1, 1); -#else - check_initial_width(cpi, 1, 1); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#if CONFIG_VP9_TEMPORAL_DENOISING - setup_denoiser_buffer(cpi); -#endif - - if (width) { - cm->width = width; - if (cm->width > cpi->initial_width) { - cm->width = cpi->initial_width; - printf("Warning: Desired width too large, changed to %d\n", cm->width); - } - } - - if (height) { - cm->height = height; - if (cm->height > cpi->initial_height) { - cm->height = cpi->initial_height; - printf("Warning: Desired height too large, changed to %d\n", cm->height); - } - } - assert(cm->width <= cpi->initial_width); - assert(cm->height <= cpi->initial_height); - - update_frame_size(cpi); - - return 0; -} - -int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b) { - assert(a->y_crop_width == b->y_crop_width); - assert(a->y_crop_height == b->y_crop_height); - - return get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, - a->y_crop_width, a->y_crop_height); -} - -#if CONFIG_VP9_HIGHBITDEPTH -int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b) { - assert(a->y_crop_width == b->y_crop_width); - assert(a->y_crop_height == b->y_crop_height); - assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0); - assert((b->flags & YV12_FLAG_HIGHBITDEPTH) != 0); - - return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, - a->y_crop_width, a->y_crop_height); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -int vp10_get_quantizer(VP10_COMP *cpi) { - return cpi->common.base_qindex; -} - -void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags) { - if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF)) { - int ref = 7; - - if (flags & VP8_EFLAG_NO_REF_LAST) - ref ^= VP9_LAST_FLAG; - - if (flags & VP8_EFLAG_NO_REF_GF) - ref ^= VP9_GOLD_FLAG; - - if (flags & VP8_EFLAG_NO_REF_ARF) - ref ^= VP9_ALT_FLAG; - - vp10_use_as_reference(cpi, ref); - } - - if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF | - VP8_EFLAG_FORCE_ARF)) { - int upd = 7; - - if (flags & VP8_EFLAG_NO_UPD_LAST) - upd ^= VP9_LAST_FLAG; - - if (flags & VP8_EFLAG_NO_UPD_GF) - upd ^= VP9_GOLD_FLAG; - - if (flags & VP8_EFLAG_NO_UPD_ARF) - upd ^= VP9_ALT_FLAG; - - vp10_update_reference(cpi, upd); - } - - if (flags & VP8_EFLAG_NO_UPD_ENTROPY) { - vp10_update_entropy(cpi, 0); - } -} diff --git a/libs/libvpx/vp10/encoder/encoder.h b/libs/libvpx/vp10/encoder/encoder.h deleted file mode 100644 index bd6a00932f..0000000000 --- a/libs/libvpx/vp10/encoder/encoder.h +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_ENCODER_H_ -#define VP10_ENCODER_ENCODER_H_ - -#include - -#include "./vpx_config.h" -#include "vpx/vp8cx.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/ppflags.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/thread_common.h" -#include "vp10/common/onyxc_int.h" - -#include "vp10/encoder/aq_cyclicrefresh.h" -#include "vp10/encoder/context_tree.h" -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/firstpass.h" -#include "vp10/encoder/lookahead.h" -#include "vp10/encoder/mbgraph.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/speed_features.h" -#include "vp10/encoder/tokenize.h" - -#if CONFIG_VP9_TEMPORAL_DENOISING -#include "vp10/encoder/denoiser.h" -#endif - -#if CONFIG_INTERNAL_STATS -#include "vpx_dsp/ssim.h" -#endif -#include "vpx_dsp/variance.h" -#include "vpx/internal/vpx_codec_internal.h" -#include "vpx_util/vpx_thread.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - int nmvjointcost[MV_JOINTS]; - int nmvcosts[2][MV_VALS]; - int nmvcosts_hp[2][MV_VALS]; - -#if !CONFIG_MISC_FIXES - vpx_prob segment_pred_probs[PREDICTION_PROBS]; -#endif - - unsigned char *last_frame_seg_map_copy; - - // 0 = Intra, Last, GF, ARF - signed char last_ref_lf_deltas[MAX_REF_FRAMES]; - // 0 = ZERO_MV, MV - signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS]; - - FRAME_CONTEXT fc; -} CODING_CONTEXT; - - -typedef enum { - // encode_breakout is disabled. - ENCODE_BREAKOUT_DISABLED = 0, - // encode_breakout is enabled. - ENCODE_BREAKOUT_ENABLED = 1, - // encode_breakout is enabled with small max_thresh limit. - ENCODE_BREAKOUT_LIMITED = 2 -} ENCODE_BREAKOUT_TYPE; - -typedef enum { - NORMAL = 0, - FOURFIVE = 1, - THREEFIVE = 2, - ONETWO = 3 -} VPX_SCALING; - -typedef enum { - // Good Quality Fast Encoding. The encoder balances quality with the amount of - // time it takes to encode the output. Speed setting controls how fast. - GOOD, - - // The encoder places priority on the quality of the output over encoding - // speed. The output is compressed at the highest possible quality. This - // option takes the longest amount of time to encode. Speed setting ignored. - BEST, - - // Realtime/Live Encoding. This mode is optimized for realtime encoding (for - // example, capturing a television signal or feed from a live camera). Speed - // setting controls how fast. - REALTIME -} MODE; - -typedef enum { - FRAMEFLAGS_KEY = 1 << 0, - FRAMEFLAGS_GOLDEN = 1 << 1, - FRAMEFLAGS_ALTREF = 1 << 2, -} FRAMETYPE_FLAGS; - -typedef enum { - NO_AQ = 0, - VARIANCE_AQ = 1, - COMPLEXITY_AQ = 2, - CYCLIC_REFRESH_AQ = 3, - AQ_MODE_COUNT // This should always be the last member of the enum -} AQ_MODE; - -typedef enum { - RESIZE_NONE = 0, // No frame resizing allowed. - RESIZE_FIXED = 1, // All frames are coded at the specified dimension. - RESIZE_DYNAMIC = 2 // Coded size of each frame is determined by the codec. -} RESIZE_TYPE; - -typedef struct VP10EncoderConfig { - BITSTREAM_PROFILE profile; - vpx_bit_depth_t bit_depth; // Codec bit-depth. - int width; // width of data passed to the compressor - int height; // height of data passed to the compressor - unsigned int input_bit_depth; // Input bit depth. - double init_framerate; // set to passed in framerate - int64_t target_bandwidth; // bandwidth to be used in kilobits per second - - int noise_sensitivity; // pre processing blur: recommendation 0 - int sharpness; // sharpening output: recommendation 0: - int speed; - // maximum allowed bitrate for any intra frame in % of bitrate target. - unsigned int rc_max_intra_bitrate_pct; - // maximum allowed bitrate for any inter frame in % of bitrate target. - unsigned int rc_max_inter_bitrate_pct; - // percent of rate boost for golden frame in CBR mode. - unsigned int gf_cbr_boost_pct; - - MODE mode; - int pass; - - // Key Framing Operations - int auto_key; // autodetect cut scenes and set the keyframes - int key_freq; // maximum distance to key frame. - - int lag_in_frames; // how many frames lag before we start encoding - - // ---------------------------------------------------------------- - // DATARATE CONTROL OPTIONS - - // vbr, cbr, constrained quality or constant quality - enum vpx_rc_mode rc_mode; - - // buffer targeting aggressiveness - int under_shoot_pct; - int over_shoot_pct; - - // buffering parameters - int64_t starting_buffer_level_ms; - int64_t optimal_buffer_level_ms; - int64_t maximum_buffer_size_ms; - - // Frame drop threshold. - int drop_frames_water_mark; - - // controlling quality - int fixed_q; - int worst_allowed_q; - int best_allowed_q; - int cq_level; - AQ_MODE aq_mode; // Adaptive Quantization mode - - // Internal frame size scaling. - RESIZE_TYPE resize_mode; - int scaled_frame_width; - int scaled_frame_height; - - // Enable feature to reduce the frame quantization every x frames. - int frame_periodic_boost; - - // two pass datarate control - int two_pass_vbrbias; // two pass datarate control tweaks - int two_pass_vbrmin_section; - int two_pass_vbrmax_section; - // END DATARATE CONTROL OPTIONS - // ---------------------------------------------------------------- - - int enable_auto_arf; - - int encode_breakout; // early breakout : for video conf recommend 800 - - /* Bitfield defining the error resiliency features to enable. - * Can provide decodable frames after losses in previous - * frames and decodable partitions after losses in the same frame. - */ - unsigned int error_resilient_mode; - - /* Bitfield defining the parallel decoding mode where the - * decoding in successive frames may be conducted in parallel - * just by decoding the frame headers. - */ - unsigned int frame_parallel_decoding_mode; - - int arnr_max_frames; - int arnr_strength; - - int min_gf_interval; - int max_gf_interval; - - int tile_columns; - int tile_rows; - - int max_threads; - - vpx_fixed_buf_t two_pass_stats_in; - struct vpx_codec_pkt_list *output_pkt_list; - -#if CONFIG_FP_MB_STATS - vpx_fixed_buf_t firstpass_mb_stats_in; -#endif - - vp8e_tuning tuning; - vp9e_tune_content content; -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth; -#endif - vpx_color_space_t color_space; - int color_range; - int render_width; - int render_height; -} VP10EncoderConfig; - -static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) { - return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0; -} - -// TODO(jingning) All spatially adaptive variables should go to TileDataEnc. -typedef struct TileDataEnc { - TileInfo tile_info; - int thresh_freq_fact[BLOCK_SIZES][MAX_MODES]; - int mode_map[BLOCK_SIZES][MAX_MODES]; -} TileDataEnc; - -typedef struct RD_COUNTS { - vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES]; - int64_t comp_pred_diff[REFERENCE_MODES]; - int64_t filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - int m_search_count; - int ex_search_count; -} RD_COUNTS; - -typedef struct ThreadData { - MACROBLOCK mb; - RD_COUNTS rd_counts; - FRAME_COUNTS *counts; - - PICK_MODE_CONTEXT *leaf_tree; - PC_TREE *pc_tree; - PC_TREE *pc_root; -} ThreadData; - -struct EncWorkerData; - -typedef struct ActiveMap { - int enabled; - int update; - unsigned char *map; -} ActiveMap; - -typedef enum { - Y, - U, - V, - ALL -} STAT_TYPE; - -typedef struct IMAGE_STAT { - double stat[ALL+1]; - double worst; -} ImageStat; - -typedef struct VP10_COMP { - QUANTS quants; - ThreadData td; - MB_MODE_INFO_EXT *mbmi_ext_base; - DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]); - VP10_COMMON common; - VP10EncoderConfig oxcf; - struct lookahead_ctx *lookahead; - struct lookahead_entry *alt_ref_source; - - YV12_BUFFER_CONFIG *Source; - YV12_BUFFER_CONFIG *Last_Source; // NULL for first frame and alt_ref frames - YV12_BUFFER_CONFIG *un_scaled_source; - YV12_BUFFER_CONFIG scaled_source; - YV12_BUFFER_CONFIG *unscaled_last_source; - YV12_BUFFER_CONFIG scaled_last_source; - - TileDataEnc *tile_data; - int allocated_tiles; // Keep track of memory allocated for tiles. - - // For a still frame, this flag is set to 1 to skip partition search. - int partition_search_skippable_frame; - - int scaled_ref_idx[MAX_REF_FRAMES]; - int lst_fb_idx; - int gld_fb_idx; - int alt_fb_idx; - - int refresh_last_frame; - int refresh_golden_frame; - int refresh_alt_ref_frame; - - int ext_refresh_frame_flags_pending; - int ext_refresh_last_frame; - int ext_refresh_golden_frame; - int ext_refresh_alt_ref_frame; - - int ext_refresh_frame_context_pending; - int ext_refresh_frame_context; - - YV12_BUFFER_CONFIG last_frame_uf; - - TOKENEXTRA *tile_tok[4][1 << 6]; - unsigned int tok_count[4][1 << 6]; - - // Ambient reconstruction err target for force key frames - int64_t ambient_err; - - RD_OPT rd; - - CODING_CONTEXT coding_context; - - int *nmvcosts[2]; - int *nmvcosts_hp[2]; - int *nmvsadcosts[2]; - int *nmvsadcosts_hp[2]; - - int64_t last_time_stamp_seen; - int64_t last_end_time_stamp_seen; - int64_t first_time_stamp_ever; - - RATE_CONTROL rc; - double framerate; - - int interp_filter_selected[MAX_REF_FRAMES][SWITCHABLE]; - - struct vpx_codec_pkt_list *output_pkt_list; - - MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS]; - int mbgraph_n_frames; // number of frames filled in the above - int static_mb_pct; // % forced skip mbs by segmentation - int ref_frame_flags; - - SPEED_FEATURES sf; - - unsigned int max_mv_magnitude; - int mv_step_param; - - int allow_comp_inter_inter; - - // Default value is 1. From first pass stats, encode_breakout may be disabled. - ENCODE_BREAKOUT_TYPE allow_encode_breakout; - - // Get threshold from external input. A suggested threshold is 800 for HD - // clips, and 300 for < HD clips. - int encode_breakout; - - unsigned char *segmentation_map; - - // segment threashold for encode breakout - int segment_encode_breakout[MAX_SEGMENTS]; - - CYCLIC_REFRESH *cyclic_refresh; - ActiveMap active_map; - - fractional_mv_step_fp *find_fractional_mv_step; - vp10_full_search_fn_t full_search_sad; - vp10_diamond_search_fn_t diamond_search_sad; - vp9_variance_fn_ptr_t fn_ptr[BLOCK_SIZES]; - uint64_t time_receive_data; - uint64_t time_compress_data; - uint64_t time_pick_lpf; - uint64_t time_encode_sb_row; - -#if CONFIG_FP_MB_STATS - int use_fp_mb_stats; -#endif - - TWO_PASS twopass; - - YV12_BUFFER_CONFIG alt_ref_buffer; - - -#if CONFIG_INTERNAL_STATS - unsigned int mode_chosen_counts[MAX_MODES]; - - int count; - uint64_t total_sq_error; - uint64_t total_samples; - ImageStat psnr; - - uint64_t totalp_sq_error; - uint64_t totalp_samples; - ImageStat psnrp; - - double total_blockiness; - double worst_blockiness; - - int bytes; - double summed_quality; - double summed_weights; - double summedp_quality; - double summedp_weights; - unsigned int tot_recode_hits; - double worst_ssim; - - ImageStat ssimg; - ImageStat fastssim; - ImageStat psnrhvs; - - int b_calculate_ssimg; - int b_calculate_blockiness; - - int b_calculate_consistency; - - double total_inconsistency; - double worst_consistency; - Ssimv *ssim_vars; - Metrics metrics; -#endif - int b_calculate_psnr; - - int droppable; - - int initial_width; - int initial_height; - int initial_mbs; // Number of MBs in the full-size frame; to be used to - // normalize the firstpass stats. This will differ from the - // number of MBs in the current frame when the frame is - // scaled. - - // Store frame variance info in SOURCE_VAR_BASED_PARTITION search type. - diff *source_diff_var; - // The threshold used in SOURCE_VAR_BASED_PARTITION search type. - unsigned int source_var_thresh; - int frames_till_next_var_check; - - int frame_flags; - - search_site_config ss_cfg; - - int mbmode_cost[INTRA_MODES]; - unsigned int inter_mode_cost[INTER_MODE_CONTEXTS][INTER_MODES]; - int intra_uv_mode_cost[INTRA_MODES][INTRA_MODES]; - int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES]; - int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS]; - int partition_cost[PARTITION_CONTEXTS][PARTITION_TYPES]; - - int multi_arf_allowed; - int multi_arf_enabled; - int multi_arf_last_grp_enabled; - - int intra_tx_type_costs[EXT_TX_SIZES][TX_TYPES][TX_TYPES]; - int inter_tx_type_costs[EXT_TX_SIZES][TX_TYPES]; -#if CONFIG_VP9_TEMPORAL_DENOISING - VP9_DENOISER denoiser; -#endif - - int resize_pending; - int resize_state; - int resize_scale_num; - int resize_scale_den; - int resize_avg_qp; - int resize_buffer_underflow; - int resize_count; - - // VAR_BASED_PARTITION thresholds - // 0 - threshold_64x64; 1 - threshold_32x32; - // 2 - threshold_16x16; 3 - vbp_threshold_8x8; - int64_t vbp_thresholds[4]; - int64_t vbp_threshold_minmax; - int64_t vbp_threshold_sad; - BLOCK_SIZE vbp_bsize_min; - - // Multi-threading - int num_workers; - VPxWorker *workers; - struct EncWorkerData *tile_thr_data; - VP9LfSync lf_row_sync; -} VP10_COMP; - -void vp10_initialize_enc(void); - -struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf, - BufferPool *const pool); -void vp10_remove_compressor(VP10_COMP *cpi); - -void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf); - - // receive a frames worth of data. caller can assume that a copy of this - // frame is made and not just a copy of the pointer.. -int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags, - YV12_BUFFER_CONFIG *sd, int64_t time_stamp, - int64_t end_time_stamp); - -int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags, - size_t *size, uint8_t *dest, - int64_t *time_stamp, int64_t *time_end, int flush); - -int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest, - vp10_ppflags_t *flags); - -int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags); - -void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags); - -int vp10_copy_reference_enc(VP10_COMP *cpi, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -int vp10_set_reference_enc(VP10_COMP *cpi, VP9_REFFRAME ref_frame_flag, - YV12_BUFFER_CONFIG *sd); - -int vp10_update_entropy(VP10_COMP *cpi, int update); - -int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols); - -int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols); - -int vp10_set_internal_size(VP10_COMP *cpi, - VPX_SCALING horiz_mode, VPX_SCALING vert_mode); - -int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width, - unsigned int height); - -int vp10_get_quantizer(struct VP10_COMP *cpi); - -static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) { - return frame_is_intra_only(&cpi->common) || - cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref); -} - -static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi, - MV_REFERENCE_FRAME ref_frame) { - if (ref_frame == LAST_FRAME) { - return cpi->lst_fb_idx; - } else if (ref_frame == GOLDEN_FRAME) { - return cpi->gld_fb_idx; - } else { - return cpi->alt_fb_idx; - } -} - -static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi, - int ref_frame) { - const VP10_COMMON *const cm = &cpi->common; - const int map_idx = get_ref_frame_map_idx(cpi, ref_frame); - return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX; -} - -static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer( - VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) { - VP10_COMMON *const cm = &cpi->common; - const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame); - return - buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf : NULL; -} - -static INLINE int get_token_alloc(int mb_rows, int mb_cols) { - // TODO(JBB): double check we can't exceed this token count if we have a - // 32x32 transform crossing a boundary at a multiple of 16. - // mb_rows, cols are in units of 16 pixels. We assume 3 planes all at full - // resolution. We assume up to 1 token per pixel, and then allow - // a head room of 1 EOSB token per 8x8 block per plane. - return mb_rows * mb_cols * (16 * 16 + 4) * 3; -} - -// Get the allocated token size for a tile. It does the same calculation as in -// the frame token allocation. -static INLINE int allocated_tokens(TileInfo tile) { - int tile_mb_rows = (tile.mi_row_end - tile.mi_row_start + 1) >> 1; - int tile_mb_cols = (tile.mi_col_end - tile.mi_col_start + 1) >> 1; - - return get_token_alloc(tile_mb_rows, tile_mb_cols); -} - -int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b); -#if CONFIG_VP9_HIGHBITDEPTH -int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b); -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_alloc_compressor_data(VP10_COMP *cpi); - -void vp10_scale_references(VP10_COMP *cpi); - -void vp10_update_reference_frames(VP10_COMP *cpi); - -void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv); - -YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm, - YV12_BUFFER_CONFIG *unscaled, - YV12_BUFFER_CONFIG *scaled); - -YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm, - YV12_BUFFER_CONFIG *unscaled, - YV12_BUFFER_CONFIG *scaled); - -void vp10_apply_encoding_flags(VP10_COMP *cpi, vpx_enc_frame_flags_t flags); - -static INLINE int is_altref_enabled(const VP10_COMP *const cpi) { - return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 && - cpi->oxcf.enable_auto_arf; -} - -static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd, - MV_REFERENCE_FRAME ref0, - MV_REFERENCE_FRAME ref1) { - xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME - : 0]; - xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME - : 0]; -} - -static INLINE int get_chessboard_index(const int frame_index) { - return frame_index & 0x1; -} - -static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) { - return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL; -} - -void vp10_new_framerate(VP10_COMP *cpi, double framerate); - -#define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl)) - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_ENCODER_H_ diff --git a/libs/libvpx/vp10/encoder/ethread.c b/libs/libvpx/vp10/encoder/ethread.c deleted file mode 100644 index ad47ccf043..0000000000 --- a/libs/libvpx/vp10/encoder/ethread.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/encoder/encodeframe.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/ethread.h" -#include "vpx_dsp/vpx_dsp_common.h" - -static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) { - int i, j, k, l, m, n; - - for (i = 0; i < REFERENCE_MODES; i++) - td->rd_counts.comp_pred_diff[i] += td_t->rd_counts.comp_pred_diff[i]; - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - td->rd_counts.filter_diff[i] += td_t->rd_counts.filter_diff[i]; - - for (i = 0; i < TX_SIZES; i++) - for (j = 0; j < PLANE_TYPES; j++) - for (k = 0; k < REF_TYPES; k++) - for (l = 0; l < COEF_BANDS; l++) - for (m = 0; m < COEFF_CONTEXTS; m++) - for (n = 0; n < ENTROPY_TOKENS; n++) - td->rd_counts.coef_counts[i][j][k][l][m][n] += - td_t->rd_counts.coef_counts[i][j][k][l][m][n]; - - - // Counts of all motion searches and exhuastive mesh searches. - td->rd_counts.m_search_count += td_t->rd_counts.m_search_count; - td->rd_counts.ex_search_count += td_t->rd_counts.ex_search_count; -} - -static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) { - VP10_COMP *const cpi = thread_data->cpi; - const VP10_COMMON *const cm = &cpi->common; - const int tile_cols = 1 << cm->log2_tile_cols; - const int tile_rows = 1 << cm->log2_tile_rows; - int t; - - (void) unused; - - for (t = thread_data->start; t < tile_rows * tile_cols; - t += cpi->num_workers) { - int tile_row = t / tile_cols; - int tile_col = t % tile_cols; - - vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col); - } - - return 0; -} - -void vp10_encode_tiles_mt(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - const int tile_cols = 1 << cm->log2_tile_cols; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols); - int i; - - vp10_init_tile_data(cpi); - - // Only run once to create threads and allocate thread data. - if (cpi->num_workers == 0) { - int allocated_workers = num_workers; - - CHECK_MEM_ERROR(cm, cpi->workers, - vpx_malloc(allocated_workers * sizeof(*cpi->workers))); - - CHECK_MEM_ERROR(cm, cpi->tile_thr_data, - vpx_calloc(allocated_workers, - sizeof(*cpi->tile_thr_data))); - - for (i = 0; i < allocated_workers; i++) { - VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *thread_data = &cpi->tile_thr_data[i]; - - ++cpi->num_workers; - winterface->init(worker); - - if (i < allocated_workers - 1) { - thread_data->cpi = cpi; - - // Allocate thread data. - CHECK_MEM_ERROR(cm, thread_data->td, - vpx_memalign(32, sizeof(*thread_data->td))); - vp10_zero(*thread_data->td); - - // Set up pc_tree. - thread_data->td->leaf_tree = NULL; - thread_data->td->pc_tree = NULL; - vp10_setup_pc_tree(cm, thread_data->td); - - // Allocate frame counters in thread data. - CHECK_MEM_ERROR(cm, thread_data->td->counts, - vpx_calloc(1, sizeof(*thread_data->td->counts))); - - // Create threads - if (!winterface->reset(worker)) - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Tile encoder thread creation failed"); - } else { - // Main thread acts as a worker and uses the thread data in cpi. - thread_data->cpi = cpi; - thread_data->td = &cpi->td; - } - - winterface->sync(worker); - } - } - - for (i = 0; i < num_workers; i++) { - VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *thread_data; - - worker->hook = (VPxWorkerHook)enc_worker_hook; - worker->data1 = &cpi->tile_thr_data[i]; - worker->data2 = NULL; - thread_data = (EncWorkerData*)worker->data1; - - // Before encoding a frame, copy the thread data from cpi. - if (thread_data->td != &cpi->td) { - thread_data->td->mb = cpi->td.mb; - thread_data->td->rd_counts = cpi->td.rd_counts; - } - if (thread_data->td->counts != &cpi->common.counts) { - memcpy(thread_data->td->counts, &cpi->common.counts, - sizeof(cpi->common.counts)); - } - } - - // Encode a frame - for (i = 0; i < num_workers; i++) { - VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *const thread_data = (EncWorkerData*)worker->data1; - - // Set the starting tile for each thread. - thread_data->start = i; - - if (i == cpi->num_workers - 1) - winterface->execute(worker); - else - winterface->launch(worker); - } - - // Encoding ends. - for (i = 0; i < num_workers; i++) { - VPxWorker *const worker = &cpi->workers[i]; - winterface->sync(worker); - } - - for (i = 0; i < num_workers; i++) { - VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *const thread_data = (EncWorkerData*)worker->data1; - - // Accumulate counters. - if (i < cpi->num_workers - 1) { - vp10_accumulate_frame_counts(cm, thread_data->td->counts, 0); - accumulate_rd_opt(&cpi->td, thread_data->td); - } - } -} diff --git a/libs/libvpx/vp10/encoder/ethread.h b/libs/libvpx/vp10/encoder/ethread.h deleted file mode 100644 index d72816cd5c..0000000000 --- a/libs/libvpx/vp10/encoder/ethread.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_ETHREAD_H_ -#define VP10_ENCODER_ETHREAD_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10_COMP; -struct ThreadData; - -typedef struct EncWorkerData { - struct VP10_COMP *cpi; - struct ThreadData *td; - int start; -} EncWorkerData; - -void vp10_encode_tiles_mt(struct VP10_COMP *cpi); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_ETHREAD_H_ diff --git a/libs/libvpx/vp10/encoder/extend.c b/libs/libvpx/vp10/encoder/extend.c deleted file mode 100644 index 4c8ce3b572..0000000000 --- a/libs/libvpx/vp10/encoder/extend.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/common.h" -#include "vp10/encoder/extend.h" - -static void copy_and_extend_plane(const uint8_t *src, int src_pitch, - uint8_t *dst, int dst_pitch, - int w, int h, - int extend_top, int extend_left, - int extend_bottom, int extend_right) { - int i, linesize; - - // copy the left and right most columns out - const uint8_t *src_ptr1 = src; - const uint8_t *src_ptr2 = src + w - 1; - uint8_t *dst_ptr1 = dst - extend_left; - uint8_t *dst_ptr2 = dst + w; - - for (i = 0; i < h; i++) { - memset(dst_ptr1, src_ptr1[0], extend_left); - memcpy(dst_ptr1 + extend_left, src_ptr1, w); - memset(dst_ptr2, src_ptr2[0], extend_right); - src_ptr1 += src_pitch; - src_ptr2 += src_pitch; - dst_ptr1 += dst_pitch; - dst_ptr2 += dst_pitch; - } - - // Now copy the top and bottom lines into each line of the respective - // borders - src_ptr1 = dst - extend_left; - src_ptr2 = dst + dst_pitch * (h - 1) - extend_left; - dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left; - dst_ptr2 = dst + dst_pitch * (h) - extend_left; - linesize = extend_left + extend_right + w; - - for (i = 0; i < extend_top; i++) { - memcpy(dst_ptr1, src_ptr1, linesize); - dst_ptr1 += dst_pitch; - } - - for (i = 0; i < extend_bottom; i++) { - memcpy(dst_ptr2, src_ptr2, linesize); - dst_ptr2 += dst_pitch; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch, - uint8_t *dst8, int dst_pitch, - int w, int h, - int extend_top, int extend_left, - int extend_bottom, int extend_right) { - int i, linesize; - uint16_t *src = CONVERT_TO_SHORTPTR(src8); - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); - - // copy the left and right most columns out - const uint16_t *src_ptr1 = src; - const uint16_t *src_ptr2 = src + w - 1; - uint16_t *dst_ptr1 = dst - extend_left; - uint16_t *dst_ptr2 = dst + w; - - for (i = 0; i < h; i++) { - vpx_memset16(dst_ptr1, src_ptr1[0], extend_left); - memcpy(dst_ptr1 + extend_left, src_ptr1, w * sizeof(src_ptr1[0])); - vpx_memset16(dst_ptr2, src_ptr2[0], extend_right); - src_ptr1 += src_pitch; - src_ptr2 += src_pitch; - dst_ptr1 += dst_pitch; - dst_ptr2 += dst_pitch; - } - - // Now copy the top and bottom lines into each line of the respective - // borders - src_ptr1 = dst - extend_left; - src_ptr2 = dst + dst_pitch * (h - 1) - extend_left; - dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left; - dst_ptr2 = dst + dst_pitch * (h) - extend_left; - linesize = extend_left + extend_right + w; - - for (i = 0; i < extend_top; i++) { - memcpy(dst_ptr1, src_ptr1, linesize * sizeof(src_ptr1[0])); - dst_ptr1 += dst_pitch; - } - - for (i = 0; i < extend_bottom; i++) { - memcpy(dst_ptr2, src_ptr2, linesize * sizeof(src_ptr2[0])); - dst_ptr2 += dst_pitch; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst) { - // Extend src frame in buffer - // Altref filtering assumes 16 pixel extension - const int et_y = 16; - const int el_y = 16; - // Motion estimation may use src block variance with the block size up - // to 64x64, so the right and bottom need to be extended to 64 multiple - // or up to 16, whichever is greater. - const int er_y = - VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) - - src->y_crop_width; - const int eb_y = - VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) - - src->y_crop_height; - const int uv_width_subsampling = (src->uv_width != src->y_width); - const int uv_height_subsampling = (src->uv_height != src->y_height); - const int et_uv = et_y >> uv_height_subsampling; - const int el_uv = el_y >> uv_width_subsampling; - const int eb_uv = eb_y >> uv_height_subsampling; - const int er_uv = er_y >> uv_width_subsampling; - -#if CONFIG_VP9_HIGHBITDEPTH - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, - src->y_crop_width, src->y_crop_height, - et_y, el_y, eb_y, er_y); - - highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); - - highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - copy_and_extend_plane(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, - src->y_crop_width, src->y_crop_height, - et_y, el_y, eb_y, er_y); - - copy_and_extend_plane(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); - - copy_and_extend_plane(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); -} - -void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw) { - // If the side is not touching the bounder then don't extend. - const int et_y = srcy ? 0 : dst->border; - const int el_y = srcx ? 0 : dst->border; - const int eb_y = srcy + srch != src->y_height ? 0 : - dst->border + dst->y_height - src->y_height; - const int er_y = srcx + srcw != src->y_width ? 0 : - dst->border + dst->y_width - src->y_width; - const int src_y_offset = srcy * src->y_stride + srcx; - const int dst_y_offset = srcy * dst->y_stride + srcx; - - const int et_uv = ROUND_POWER_OF_TWO(et_y, 1); - const int el_uv = ROUND_POWER_OF_TWO(el_y, 1); - const int eb_uv = ROUND_POWER_OF_TWO(eb_y, 1); - const int er_uv = ROUND_POWER_OF_TWO(er_y, 1); - const int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1); - const int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1); - const int srch_uv = ROUND_POWER_OF_TWO(srch, 1); - const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1); - - copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride, - dst->y_buffer + dst_y_offset, dst->y_stride, - srcw, srch, - et_y, el_y, eb_y, er_y); - - copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride, - dst->u_buffer + dst_uv_offset, dst->uv_stride, - srcw_uv, srch_uv, - et_uv, el_uv, eb_uv, er_uv); - - copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride, - dst->v_buffer + dst_uv_offset, dst->uv_stride, - srcw_uv, srch_uv, - et_uv, el_uv, eb_uv, er_uv); -} diff --git a/libs/libvpx/vp10/encoder/extend.h b/libs/libvpx/vp10/encoder/extend.h deleted file mode 100644 index 6f502ef6a0..0000000000 --- a/libs/libvpx/vp10/encoder/extend.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_EXTEND_H_ -#define VP10_ENCODER_EXTEND_H_ - -#include "vpx_scale/yv12config.h" -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - - -void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst); - -void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw); -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_EXTEND_H_ diff --git a/libs/libvpx/vp10/encoder/firstpass.c b/libs/libvpx/vp10/encoder/firstpass.c deleted file mode 100644 index bc1ce001bb..0000000000 --- a/libs/libvpx/vp10/encoder/firstpass.c +++ /dev/null @@ -1,2667 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vpx_dsp_rtcd.h" -#include "./vpx_scale_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" -#include "vpx_scale/vpx_scale.h" -#include "vpx_scale/yv12config.h" - -#include "vp10/common/entropymv.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconinter.h" // vp10_setup_dst_planes() -#include "vp10/encoder/aq_variance.h" -#include "vp10/encoder/block.h" -#include "vp10/encoder/encodeframe.h" -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/extend.h" -#include "vp10/encoder/firstpass.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/rd.h" -#include "vpx_dsp/variance.h" - -#define OUTPUT_FPF 0 -#define ARF_STATS_OUTPUT 0 - -#define GROUP_ADAPTIVE_MAXQ 1 - -#define BOOST_BREAKOUT 12.5 -#define BOOST_FACTOR 12.5 -#define ERR_DIVISOR 128.0 -#define FACTOR_PT_LOW 0.70 -#define FACTOR_PT_HIGH 0.90 -#define FIRST_PASS_Q 10.0 -#define GF_MAX_BOOST 96.0 -#define INTRA_MODE_PENALTY 1024 -#define KF_MAX_BOOST 128.0 -#define MIN_ARF_GF_BOOST 240 -#define MIN_DECAY_FACTOR 0.01 -#define MIN_KF_BOOST 300 -#define NEW_MV_MODE_PENALTY 32 -#define DARK_THRESH 64 -#define DEFAULT_GRP_WEIGHT 1.0 -#define RC_FACTOR_MIN 0.75 -#define RC_FACTOR_MAX 1.75 - - -#define NCOUNT_INTRA_THRESH 8192 -#define NCOUNT_INTRA_FACTOR 3 -#define NCOUNT_FRAME_II_THRESH 5.0 - -#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001) - -#if ARF_STATS_OUTPUT -unsigned int arf_count = 0; -#endif - -// Resets the first pass file to the given position using a relative seek from -// the current position. -static void reset_fpf_position(TWO_PASS *p, - const FIRSTPASS_STATS *position) { - p->stats_in = position; -} - -// Read frame stats at an offset from the current position. -static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, int offset) { - if ((offset >= 0 && p->stats_in + offset >= p->stats_in_end) || - (offset < 0 && p->stats_in + offset < p->stats_in_start)) { - return NULL; - } - - return &p->stats_in[offset]; -} - -static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) { - if (p->stats_in >= p->stats_in_end) - return EOF; - - *fps = *p->stats_in; - ++p->stats_in; - return 1; -} - -static void output_stats(FIRSTPASS_STATS *stats, - struct vpx_codec_pkt_list *pktlist) { - struct vpx_codec_cx_pkt pkt; - pkt.kind = VPX_CODEC_STATS_PKT; - pkt.data.twopass_stats.buf = stats; - pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS); - vpx_codec_pkt_list_add(pktlist, &pkt); - -// TEMP debug code -#if OUTPUT_FPF - { - FILE *fpfile; - fpfile = fopen("firstpass.stt", "a"); - - fprintf(fpfile, "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf" - "%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf" - "%12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf\n", - stats->frame, - stats->weight, - stats->intra_error, - stats->coded_error, - stats->sr_coded_error, - stats->pcnt_inter, - stats->pcnt_motion, - stats->pcnt_second_ref, - stats->pcnt_neutral, - stats->intra_skip_pct, - stats->inactive_zone_rows, - stats->inactive_zone_cols, - stats->MVr, - stats->mvr_abs, - stats->MVc, - stats->mvc_abs, - stats->MVrv, - stats->MVcv, - stats->mv_in_out_count, - stats->new_mv_count, - stats->count, - stats->duration); - fclose(fpfile); - } -#endif -} - -#if CONFIG_FP_MB_STATS -static void output_fpmb_stats(uint8_t *this_frame_mb_stats, - VP10_COMMON *cm, - struct vpx_codec_pkt_list *pktlist) { - struct vpx_codec_cx_pkt pkt; - pkt.kind = VPX_CODEC_FPMB_STATS_PKT; - pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats; - pkt.data.firstpass_mb_stats.sz = cm->initial_mbs * sizeof(uint8_t); - vpx_codec_pkt_list_add(pktlist, &pkt); -} -#endif - -static void zero_stats(FIRSTPASS_STATS *section) { - section->frame = 0.0; - section->weight = 0.0; - section->intra_error = 0.0; - section->coded_error = 0.0; - section->sr_coded_error = 0.0; - section->pcnt_inter = 0.0; - section->pcnt_motion = 0.0; - section->pcnt_second_ref = 0.0; - section->pcnt_neutral = 0.0; - section->intra_skip_pct = 0.0; - section->inactive_zone_rows = 0.0; - section->inactive_zone_cols = 0.0; - section->MVr = 0.0; - section->mvr_abs = 0.0; - section->MVc = 0.0; - section->mvc_abs = 0.0; - section->MVrv = 0.0; - section->MVcv = 0.0; - section->mv_in_out_count = 0.0; - section->new_mv_count = 0.0; - section->count = 0.0; - section->duration = 1.0; -} - -static void accumulate_stats(FIRSTPASS_STATS *section, - const FIRSTPASS_STATS *frame) { - section->frame += frame->frame; - section->weight += frame->weight; - section->intra_error += frame->intra_error; - section->coded_error += frame->coded_error; - section->sr_coded_error += frame->sr_coded_error; - section->pcnt_inter += frame->pcnt_inter; - section->pcnt_motion += frame->pcnt_motion; - section->pcnt_second_ref += frame->pcnt_second_ref; - section->pcnt_neutral += frame->pcnt_neutral; - section->intra_skip_pct += frame->intra_skip_pct; - section->inactive_zone_rows += frame->inactive_zone_rows; - section->inactive_zone_cols += frame->inactive_zone_cols; - section->MVr += frame->MVr; - section->mvr_abs += frame->mvr_abs; - section->MVc += frame->MVc; - section->mvc_abs += frame->mvc_abs; - section->MVrv += frame->MVrv; - section->MVcv += frame->MVcv; - section->mv_in_out_count += frame->mv_in_out_count; - section->new_mv_count += frame->new_mv_count; - section->count += frame->count; - section->duration += frame->duration; -} - -static void subtract_stats(FIRSTPASS_STATS *section, - const FIRSTPASS_STATS *frame) { - section->frame -= frame->frame; - section->weight -= frame->weight; - section->intra_error -= frame->intra_error; - section->coded_error -= frame->coded_error; - section->sr_coded_error -= frame->sr_coded_error; - section->pcnt_inter -= frame->pcnt_inter; - section->pcnt_motion -= frame->pcnt_motion; - section->pcnt_second_ref -= frame->pcnt_second_ref; - section->pcnt_neutral -= frame->pcnt_neutral; - section->intra_skip_pct -= frame->intra_skip_pct; - section->inactive_zone_rows -= frame->inactive_zone_rows; - section->inactive_zone_cols -= frame->inactive_zone_cols; - section->MVr -= frame->MVr; - section->mvr_abs -= frame->mvr_abs; - section->MVc -= frame->MVc; - section->mvc_abs -= frame->mvc_abs; - section->MVrv -= frame->MVrv; - section->MVcv -= frame->MVcv; - section->mv_in_out_count -= frame->mv_in_out_count; - section->new_mv_count -= frame->new_mv_count; - section->count -= frame->count; - section->duration -= frame->duration; -} - -// Calculate an active area of the image that discounts formatting -// bars and partially discounts other 0 energy areas. -#define MIN_ACTIVE_AREA 0.5 -#define MAX_ACTIVE_AREA 1.0 -static double calculate_active_area(const VP10_COMP *cpi, - const FIRSTPASS_STATS *this_frame) -{ - double active_pct; - - active_pct = 1.0 - - ((this_frame->intra_skip_pct / 2) + - ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows)); - return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA); -} - -// Calculate a modified Error used in distributing bits between easier and -// harder frames. -#define ACT_AREA_CORRECTION 0.5 -static double calculate_modified_err(const VP10_COMP *cpi, - const TWO_PASS *twopass, - const VP10EncoderConfig *oxcf, - const FIRSTPASS_STATS *this_frame) { - const FIRSTPASS_STATS *const stats = &twopass->total_stats; - const double av_weight = stats->weight / stats->count; - const double av_err = (stats->coded_error * av_weight) / stats->count; - double modified_error = - av_err * pow(this_frame->coded_error * this_frame->weight / - DOUBLE_DIVIDE_CHECK(av_err), oxcf->two_pass_vbrbias / 100.0); - - // Correction for active area. Frames with a reduced active area - // (eg due to formatting bars) have a higher error per mb for the - // remaining active MBs. The correction here assumes that coding - // 0.5N blocks of complexity 2X is a little easier than coding N - // blocks of complexity X. - modified_error *= - pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION); - - return fclamp(modified_error, - twopass->modified_error_min, twopass->modified_error_max); -} - -// This function returns the maximum target rate per frame. -static int frame_max_bits(const RATE_CONTROL *rc, - const VP10EncoderConfig *oxcf) { - int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth * - (int64_t)oxcf->two_pass_vbrmax_section) / 100; - if (max_bits < 0) - max_bits = 0; - else if (max_bits > rc->max_frame_bandwidth) - max_bits = rc->max_frame_bandwidth; - - return (int)max_bits; -} - -void vp10_init_first_pass(VP10_COMP *cpi) { - zero_stats(&cpi->twopass.total_stats); -} - -void vp10_end_first_pass(VP10_COMP *cpi) { - output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list); -} - -static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) { - switch (bsize) { - case BLOCK_8X8: - return vpx_mse8x8; - case BLOCK_16X8: - return vpx_mse16x8; - case BLOCK_8X16: - return vpx_mse8x16; - default: - return vpx_mse16x16; - } -} - -static unsigned int get_prediction_error(BLOCK_SIZE bsize, - const struct buf_2d *src, - const struct buf_2d *ref) { - unsigned int sse; - const vpx_variance_fn_t fn = get_block_variance_fn(bsize); - fn(src->buf, src->stride, ref->buf, ref->stride, &sse); - return sse; -} - -#if CONFIG_VP9_HIGHBITDEPTH -static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize, - int bd) { - switch (bd) { - default: - switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_8_mse8x8; - case BLOCK_16X8: - return vpx_highbd_8_mse16x8; - case BLOCK_8X16: - return vpx_highbd_8_mse8x16; - default: - return vpx_highbd_8_mse16x16; - } - break; - case 10: - switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_10_mse8x8; - case BLOCK_16X8: - return vpx_highbd_10_mse16x8; - case BLOCK_8X16: - return vpx_highbd_10_mse8x16; - default: - return vpx_highbd_10_mse16x16; - } - break; - case 12: - switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_12_mse8x8; - case BLOCK_16X8: - return vpx_highbd_12_mse16x8; - case BLOCK_8X16: - return vpx_highbd_12_mse8x16; - default: - return vpx_highbd_12_mse16x16; - } - break; - } -} - -static unsigned int highbd_get_prediction_error(BLOCK_SIZE bsize, - const struct buf_2d *src, - const struct buf_2d *ref, - int bd) { - unsigned int sse; - const vpx_variance_fn_t fn = highbd_get_block_variance_fn(bsize, bd); - fn(src->buf, src->stride, ref->buf, ref->stride, &sse); - return sse; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -// Refine the motion search range according to the frame dimension -// for first pass test. -static int get_search_range(const VP10_COMP *cpi) { - int sr = 0; - const int dim = VPXMIN(cpi->initial_width, cpi->initial_height); - - while ((dim << sr) < MAX_FULL_PEL_VAL) - ++sr; - return sr; -} - -static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x, - const MV *ref_mv, MV *best_mv, - int *best_motion_err) { - MACROBLOCKD *const xd = &x->e_mbd; - MV tmp_mv = {0, 0}; - MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3}; - int num00, tmp_err, n; - const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize]; - const int new_mv_mode_penalty = NEW_MV_MODE_PENALTY; - - int step_param = 3; - int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; - const int sr = get_search_range(cpi); - step_param += sr; - further_steps -= sr; - - // Override the default variance function to use MSE. - v_fn_ptr.vf = get_block_variance_fn(bsize); -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd); - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Center the initial step/diamond search on best mv. - tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv, - step_param, - x->sadperbit16, &num00, &v_fn_ptr, ref_mv); - if (tmp_err < INT_MAX) - tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); - if (tmp_err < INT_MAX - new_mv_mode_penalty) - tmp_err += new_mv_mode_penalty; - - if (tmp_err < *best_motion_err) { - *best_motion_err = tmp_err; - *best_mv = tmp_mv; - } - - // Carry out further step/diamond searches as necessary. - n = num00; - num00 = 0; - - while (n < further_steps) { - ++n; - - if (num00) { - --num00; - } else { - tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv, - step_param + n, x->sadperbit16, - &num00, &v_fn_ptr, ref_mv); - if (tmp_err < INT_MAX) - tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); - if (tmp_err < INT_MAX - new_mv_mode_penalty) - tmp_err += new_mv_mode_penalty; - - if (tmp_err < *best_motion_err) { - *best_motion_err = tmp_err; - *best_mv = tmp_mv; - } - } - } -} - -static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) { - if (2 * mb_col + 1 < cm->mi_cols) { - return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 - : BLOCK_16X8; - } else { - return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 - : BLOCK_8X8; - } -} - -static int find_fp_qindex(vpx_bit_depth_t bit_depth) { - int i; - - for (i = 0; i < QINDEX_RANGE; ++i) - if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) - break; - - if (i == QINDEX_RANGE) - i--; - - return i; -} - -static void set_first_pass_params(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - if (!cpi->refresh_alt_ref_frame && - (cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY))) { - cm->frame_type = KEY_FRAME; - } else { - cm->frame_type = INTER_FRAME; - } - // Do not use periodic key frames. - cpi->rc.frames_to_key = INT_MAX; -} - -#define UL_INTRA_THRESH 50 -#define INVALID_ROW -1 -void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) { - int mb_row, mb_col; - MACROBLOCK *const x = &cpi->td.mb; - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - TileInfo tile; - struct macroblock_plane *const p = x->plane; - struct macroblockd_plane *const pd = xd->plane; - const PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none; - int i; - - int recon_yoffset, recon_uvoffset; - int64_t intra_error = 0; - int64_t coded_error = 0; - int64_t sr_coded_error = 0; - - int sum_mvr = 0, sum_mvc = 0; - int sum_mvr_abs = 0, sum_mvc_abs = 0; - int64_t sum_mvrs = 0, sum_mvcs = 0; - int mvcount = 0; - int intercount = 0; - int second_ref_count = 0; - const int intrapenalty = INTRA_MODE_PENALTY; - double neutral_count; - int intra_skip_count = 0; - int image_data_start_row = INVALID_ROW; - int new_mv_count = 0; - int sum_in_vectors = 0; - MV lastmv = {0, 0}; - TWO_PASS *twopass = &cpi->twopass; - const MV zero_mv = {0, 0}; - int recon_y_stride, recon_uv_stride, uv_mb_height; - - YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); - YV12_BUFFER_CONFIG *gld_yv12 = get_ref_frame_buffer(cpi, GOLDEN_FRAME); - YV12_BUFFER_CONFIG *const new_yv12 = get_frame_new_buffer(cm); - const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12; - double intra_factor; - double brightness_factor; - BufferPool *const pool = cm->buffer_pool; - - // First pass code requires valid last and new frame buffers. - assert(new_yv12 != NULL); - assert(frame_is_intra_only(cm) || (lst_yv12 != NULL)); - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs); - } -#endif - - vpx_clear_system_state(); - - intra_factor = 0.0; - brightness_factor = 0.0; - neutral_count = 0.0; - - set_first_pass_params(cpi); - vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth)); - - vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y); - - vp10_setup_src_planes(x, cpi->Source, 0, 0); - vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0); - - if (!frame_is_intra_only(cm)) { - vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL); - } - - xd->mi = cm->mi_grid_visible; - xd->mi[0] = cm->mi; - - vp10_frame_init_quantizer(cpi); - - for (i = 0; i < MAX_MB_PLANE; ++i) { - p[i].coeff = ctx->coeff_pbuf[i][1]; - p[i].qcoeff = ctx->qcoeff_pbuf[i][1]; - pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1]; - p[i].eobs = ctx->eobs_pbuf[i][1]; - } - x->skip_recode = 0; - - vp10_init_mv_probs(cm); - vp10_initialize_rd_consts(cpi); - - // Tiling is ignored in the first pass. - vp10_tile_init(&tile, cm, 0, 0); - - recon_y_stride = new_yv12->y_stride; - recon_uv_stride = new_yv12->uv_stride; - uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height); - - for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { - MV best_ref_mv = {0, 0}; - - // Reset above block coeffs. - xd->up_available = (mb_row != 0); - recon_yoffset = (mb_row * recon_y_stride * 16); - recon_uvoffset = (mb_row * recon_uv_stride * uv_mb_height); - - // Set up limit values for motion vectors to prevent them extending - // outside the UMV borders. - x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16); - x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) - + BORDER_MV_PIXELS_B16; - - for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { - int this_error; - const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); - const BLOCK_SIZE bsize = get_bsize(cm, mb_row, mb_col); - double log_intra; - int level_sample; - -#if CONFIG_FP_MB_STATS - const int mb_index = mb_row * cm->mb_cols + mb_col; -#endif - - vpx_clear_system_state(); - - xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset; - xd->plane[1].dst.buf = new_yv12->u_buffer + recon_uvoffset; - xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset; - xd->left_available = (mb_col != 0); - xd->mi[0]->mbmi.sb_type = bsize; - xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME; - set_mi_row_col(xd, &tile, - mb_row << 1, num_8x8_blocks_high_lookup[bsize], - mb_col << 1, num_8x8_blocks_wide_lookup[bsize], - cm->mi_rows, cm->mi_cols); - - // Do intra 16x16 prediction. - xd->mi[0]->mbmi.segment_id = 0; - xd->mi[0]->mbmi.mode = DC_PRED; - xd->mi[0]->mbmi.tx_size = use_dc_pred ? - (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4; - vp10_encode_intra_block_plane(x, bsize, 0); - this_error = vpx_get_mb_ss(x->plane[0].src_diff); - - // Keep a record of blocks that have almost no intra error residual - // (i.e. are in effect completely flat and untextured in the intra - // domain). In natural videos this is uncommon, but it is much more - // common in animations, graphics and screen content, so may be used - // as a signal to detect these types of content. - if (this_error < UL_INTRA_THRESH) { - ++intra_skip_count; - } else if ((mb_col > 0) && (image_data_start_row == INVALID_ROW)) { - image_data_start_row = mb_row; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - switch (cm->bit_depth) { - case VPX_BITS_8: - break; - case VPX_BITS_10: - this_error >>= 4; - break; - case VPX_BITS_12: - this_error >>= 8; - break; - default: - assert(0 && "cm->bit_depth should be VPX_BITS_8, " - "VPX_BITS_10 or VPX_BITS_12"); - return; - } - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - vpx_clear_system_state(); - log_intra = log(this_error + 1.0); - if (log_intra < 10.0) - intra_factor += 1.0 + ((10.0 - log_intra) * 0.05); - else - intra_factor += 1.0; - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) - level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0]; - else - level_sample = x->plane[0].src.buf[0]; -#else - level_sample = x->plane[0].src.buf[0]; -#endif - if ((level_sample < DARK_THRESH) && (log_intra < 9.0)) - brightness_factor += 1.0 + (0.01 * (DARK_THRESH - level_sample)); - else - brightness_factor += 1.0; - - // Intrapenalty below deals with situations where the intra and inter - // error scores are very low (e.g. a plain black frame). - // We do not have special cases in first pass for 0,0 and nearest etc so - // all inter modes carry an overhead cost estimate for the mv. - // When the error score is very low this causes us to pick all or lots of - // INTRA modes and throw lots of key frames. - // This penalty adds a cost matching that of a 0,0 mv to the intra case. - this_error += intrapenalty; - - // Accumulate the intra error. - intra_error += (int64_t)this_error; - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - // initialization - cpi->twopass.frame_mb_stats_buf[mb_index] = 0; - } -#endif - - // Set up limit values for motion vectors to prevent them extending - // outside the UMV borders. - x->mv_col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16; - - // Other than for the first frame do a motion search. - if (cm->current_video_frame > 0) { - int tmp_err, motion_error, raw_motion_error; - // Assume 0,0 motion with no mv overhead. - MV mv = {0, 0} , tmp_mv = {0, 0}; - struct buf_2d unscaled_last_source_buf_2d; - - xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - motion_error = highbd_get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd); - } else { - motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); - } -#else - motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Compute the motion error of the 0,0 motion using the last source - // frame as the reference. Skip the further motion search on - // reconstructed frame if this error is small. - unscaled_last_source_buf_2d.buf = - cpi->unscaled_last_source->y_buffer + recon_yoffset; - unscaled_last_source_buf_2d.stride = - cpi->unscaled_last_source->y_stride; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - raw_motion_error = highbd_get_prediction_error( - bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd); - } else { - raw_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &unscaled_last_source_buf_2d); - } -#else - raw_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &unscaled_last_source_buf_2d); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // TODO(pengchong): Replace the hard-coded threshold - if (raw_motion_error > 25) { - // Test last reference frame using the previous best mv as the - // starting point (best reference) for the search. - first_pass_motion_search(cpi, x, &best_ref_mv, &mv, &motion_error); - - // If the current best reference mv is not centered on 0,0 then do a - // 0,0 based search as well. - if (!is_zero_mv(&best_ref_mv)) { - tmp_err = INT_MAX; - first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv, &tmp_err); - - if (tmp_err < motion_error) { - motion_error = tmp_err; - mv = tmp_mv; - } - } - - // Search in an older reference frame. - if ((cm->current_video_frame > 1) && gld_yv12 != NULL) { - // Assume 0,0 motion with no mv overhead. - int gf_motion_error; - - xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - gf_motion_error = highbd_get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd); - } else { - gf_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); - } -#else - gf_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv, - &gf_motion_error); - - if (gf_motion_error < motion_error && gf_motion_error < this_error) - ++second_ref_count; - - // Reset to last frame as reference buffer. - xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset; - xd->plane[1].pre[0].buf = first_ref_buf->u_buffer + recon_uvoffset; - xd->plane[2].pre[0].buf = first_ref_buf->v_buffer + recon_uvoffset; - - // In accumulating a score for the older reference frame take the - // best of the motion predicted score and the intra coded error - // (just as will be done for) accumulation of "coded_error" for - // the last frame. - if (gf_motion_error < this_error) - sr_coded_error += gf_motion_error; - else - sr_coded_error += this_error; - } else { - sr_coded_error += motion_error; - } - } else { - sr_coded_error += motion_error; - } - - // Start by assuming that intra mode is best. - best_ref_mv.row = 0; - best_ref_mv.col = 0; - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - // intra predication statistics - cpi->twopass.frame_mb_stats_buf[mb_index] = 0; - cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_DCINTRA_MASK; - cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK; - if (this_error > FPMB_ERROR_LARGE_TH) { - cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_ERROR_LARGE_MASK; - } else if (this_error < FPMB_ERROR_SMALL_TH) { - cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_ERROR_SMALL_MASK; - } - } -#endif - - if (motion_error <= this_error) { - vpx_clear_system_state(); - - // Keep a count of cases where the inter and intra were very close - // and very low. This helps with scene cut detection for example in - // cropped clips with black bars at the sides or top and bottom. - if (((this_error - intrapenalty) * 9 <= motion_error * 10) && - (this_error < (2 * intrapenalty))) { - neutral_count += 1.0; - // Also track cases where the intra is not much worse than the inter - // and use this in limiting the GF/arf group length. - } else if ((this_error > NCOUNT_INTRA_THRESH) && - (this_error < (NCOUNT_INTRA_FACTOR * motion_error))) { - neutral_count += (double)motion_error / - DOUBLE_DIVIDE_CHECK((double)this_error); - } - - mv.row *= 8; - mv.col *= 8; - this_error = motion_error; - xd->mi[0]->mbmi.mode = NEWMV; - xd->mi[0]->mbmi.mv[0].as_mv = mv; - xd->mi[0]->mbmi.tx_size = TX_4X4; - xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME; - xd->mi[0]->mbmi.ref_frame[1] = NONE; - vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize); - vp10_encode_sby_pass1(x, bsize); - sum_mvr += mv.row; - sum_mvr_abs += abs(mv.row); - sum_mvc += mv.col; - sum_mvc_abs += abs(mv.col); - sum_mvrs += mv.row * mv.row; - sum_mvcs += mv.col * mv.col; - ++intercount; - - best_ref_mv = mv; - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - // inter predication statistics - cpi->twopass.frame_mb_stats_buf[mb_index] = 0; - cpi->twopass.frame_mb_stats_buf[mb_index] &= ~FPMB_DCINTRA_MASK; - cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK; - if (this_error > FPMB_ERROR_LARGE_TH) { - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_ERROR_LARGE_MASK; - } else if (this_error < FPMB_ERROR_SMALL_TH) { - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_ERROR_SMALL_MASK; - } - } -#endif - - if (!is_zero_mv(&mv)) { - ++mvcount; - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - cpi->twopass.frame_mb_stats_buf[mb_index] &= - ~FPMB_MOTION_ZERO_MASK; - // check estimated motion direction - if (mv.as_mv.col > 0 && mv.as_mv.col >= abs(mv.as_mv.row)) { - // right direction - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_MOTION_RIGHT_MASK; - } else if (mv.as_mv.row < 0 && - abs(mv.as_mv.row) >= abs(mv.as_mv.col)) { - // up direction - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_MOTION_UP_MASK; - } else if (mv.as_mv.col < 0 && - abs(mv.as_mv.col) >= abs(mv.as_mv.row)) { - // left direction - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_MOTION_LEFT_MASK; - } else { - // down direction - cpi->twopass.frame_mb_stats_buf[mb_index] |= - FPMB_MOTION_DOWN_MASK; - } - } -#endif - - // Non-zero vector, was it different from the last non zero vector? - if (!is_equal_mv(&mv, &lastmv)) - ++new_mv_count; - lastmv = mv; - - // Does the row vector point inwards or outwards? - if (mb_row < cm->mb_rows / 2) { - if (mv.row > 0) - --sum_in_vectors; - else if (mv.row < 0) - ++sum_in_vectors; - } else if (mb_row > cm->mb_rows / 2) { - if (mv.row > 0) - ++sum_in_vectors; - else if (mv.row < 0) - --sum_in_vectors; - } - - // Does the col vector point inwards or outwards? - if (mb_col < cm->mb_cols / 2) { - if (mv.col > 0) - --sum_in_vectors; - else if (mv.col < 0) - ++sum_in_vectors; - } else if (mb_col > cm->mb_cols / 2) { - if (mv.col > 0) - ++sum_in_vectors; - else if (mv.col < 0) - --sum_in_vectors; - } - } - } - } else { - sr_coded_error += (int64_t)this_error; - } - coded_error += (int64_t)this_error; - - // Adjust to the next column of MBs. - x->plane[0].src.buf += 16; - x->plane[1].src.buf += uv_mb_height; - x->plane[2].src.buf += uv_mb_height; - - recon_yoffset += 16; - recon_uvoffset += uv_mb_height; - } - - // Adjust to the next row of MBs. - x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols; - x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride - - uv_mb_height * cm->mb_cols; - x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride - - uv_mb_height * cm->mb_cols; - - vpx_clear_system_state(); - } - - // Clamp the image start to rows/2. This number of rows is discarded top - // and bottom as dead data so rows / 2 means the frame is blank. - if ((image_data_start_row > cm->mb_rows / 2) || - (image_data_start_row == INVALID_ROW)) { - image_data_start_row = cm->mb_rows / 2; - } - // Exclude any image dead zone - if (image_data_start_row > 0) { - intra_skip_count = - VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2)); - } - - { - FIRSTPASS_STATS fps; - // The minimum error here insures some bit allocation to frames even - // in static regions. The allocation per MB declines for larger formats - // where the typical "real" energy per MB also falls. - // Initial estimate here uses sqrt(mbs) to define the min_err, where the - // number of mbs is proportional to the image area. - const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - const double min_err = 200 * sqrt(num_mbs); - - intra_factor = intra_factor / (double)num_mbs; - brightness_factor = brightness_factor / (double)num_mbs; - fps.weight = intra_factor * brightness_factor; - - fps.frame = cm->current_video_frame; - fps.coded_error = (double)(coded_error >> 8) + min_err; - fps.sr_coded_error = (double)(sr_coded_error >> 8) + min_err; - fps.intra_error = (double)(intra_error >> 8) + min_err; - fps.count = 1.0; - fps.pcnt_inter = (double)intercount / num_mbs; - fps.pcnt_second_ref = (double)second_ref_count / num_mbs; - fps.pcnt_neutral = (double)neutral_count / num_mbs; - fps.intra_skip_pct = (double)intra_skip_count / num_mbs; - fps.inactive_zone_rows = (double)image_data_start_row; - fps.inactive_zone_cols = (double)0; // TODO(paulwilkins): fix - - if (mvcount > 0) { - fps.MVr = (double)sum_mvr / mvcount; - fps.mvr_abs = (double)sum_mvr_abs / mvcount; - fps.MVc = (double)sum_mvc / mvcount; - fps.mvc_abs = (double)sum_mvc_abs / mvcount; - fps.MVrv = ((double)sum_mvrs - - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount; - fps.MVcv = ((double)sum_mvcs - - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount; - fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2); - fps.new_mv_count = new_mv_count; - fps.pcnt_motion = (double)mvcount / num_mbs; - } else { - fps.MVr = 0.0; - fps.mvr_abs = 0.0; - fps.MVc = 0.0; - fps.mvc_abs = 0.0; - fps.MVrv = 0.0; - fps.MVcv = 0.0; - fps.mv_in_out_count = 0.0; - fps.new_mv_count = 0.0; - fps.pcnt_motion = 0.0; - } - - // TODO(paulwilkins): Handle the case when duration is set to 0, or - // something less than the full time between subsequent values of - // cpi->source_time_stamp. - fps.duration = (double)(source->ts_end - source->ts_start); - - // Don't want to do output stats with a stack variable! - twopass->this_frame_stats = fps; - output_stats(&twopass->this_frame_stats, cpi->output_pkt_list); - accumulate_stats(&twopass->total_stats, &fps); - -#if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - output_fpmb_stats(twopass->frame_mb_stats_buf, cm, cpi->output_pkt_list); - } -#endif - } - - // Copy the previous Last Frame back into gf and and arf buffers if - // the prediction is good enough... but also don't allow it to lag too far. - if ((twopass->sr_update_lag > 3) || - ((cm->current_video_frame > 0) && - (twopass->this_frame_stats.pcnt_inter > 0.20) && - ((twopass->this_frame_stats.intra_error / - DOUBLE_DIVIDE_CHECK(twopass->this_frame_stats.coded_error)) > 2.0))) { - if (gld_yv12 != NULL) { - ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], - cm->ref_frame_map[cpi->lst_fb_idx]); - } - twopass->sr_update_lag = 1; - } else { - ++twopass->sr_update_lag; - } - - vpx_extend_frame_borders(new_yv12); - - // The frame we just compressed now becomes the last frame. - ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx], - cm->new_fb_idx); - - // Special case for the first frame. Copy into the GF buffer as a second - // reference. - if (cm->current_video_frame == 0 && cpi->gld_fb_idx != INVALID_IDX) { - ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], - cm->ref_frame_map[cpi->lst_fb_idx]); - } - - // Use this to see what the first pass reconstruction looks like. - if (0) { - char filename[512]; - FILE *recon_file; - snprintf(filename, sizeof(filename), "enc%04d.yuv", - (int)cm->current_video_frame); - - if (cm->current_video_frame == 0) - recon_file = fopen(filename, "wb"); - else - recon_file = fopen(filename, "ab"); - - (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file); - fclose(recon_file); - } - - ++cm->current_video_frame; -} - -static double calc_correction_factor(double err_per_mb, - double err_divisor, - double pt_low, - double pt_high, - int q, - vpx_bit_depth_t bit_depth) { - const double error_term = err_per_mb / err_divisor; - - // Adjustment based on actual quantizer to power term. - const double power_term = - VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); - - // Calculate correction factor. - if (power_term < 1.0) - assert(error_term >= 0.0); - - return fclamp(pow(error_term, power_term), 0.05, 5.0); -} - -// Larger image formats are expected to be a little harder to code relatively -// given the same prediction error score. This in part at least relates to the -// increased size and hence coding cost of motion vectors. -#define EDIV_SIZE_FACTOR 800 - -static int get_twopass_worst_quality(const VP10_COMP *cpi, - const double section_err, - double inactive_zone, - int section_target_bandwidth, - double group_weight_factor) { - const RATE_CONTROL *const rc = &cpi->rc; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - - inactive_zone = fclamp(inactive_zone, 0.0, 1.0); - - if (section_target_bandwidth <= 0) { - return rc->worst_quality; // Highest value allowed - } else { - const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone)); - const double av_err_per_mb = section_err / active_mbs; - const double speed_term = 1.0 + 0.04 * oxcf->speed; - const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR; - const int target_norm_bits_per_mb = ((uint64_t)section_target_bandwidth << - BPER_MB_NORMBITS) / active_mbs; - - int q; - - // Try and pick a max Q that will be high enough to encode the - // content at the given rate. - for (q = rc->best_quality; q < rc->worst_quality; ++q) { - const double factor = - calc_correction_factor(av_err_per_mb, - ERR_DIVISOR - ediv_size_correction, - FACTOR_PT_LOW, FACTOR_PT_HIGH, q, - cpi->common.bit_depth); - const int bits_per_mb = - vp10_rc_bits_per_mb(INTER_FRAME, q, - factor * speed_term * group_weight_factor, - cpi->common.bit_depth); - if (bits_per_mb <= target_norm_bits_per_mb) - break; - } - - // Restriction on active max q for constrained quality mode. - if (cpi->oxcf.rc_mode == VPX_CQ) - q = VPXMAX(q, oxcf->cq_level); - return q; - } -} - -static void setup_rf_level_maxq(VP10_COMP *cpi) { - int i; - RATE_CONTROL *const rc = &cpi->rc; - for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) { - int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality); - rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality); - } -} - -void vp10_init_subsampling(VP10_COMP *cpi) { - const VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - const int w = cm->width; - const int h = cm->height; - int i; - - for (i = 0; i < FRAME_SCALE_STEPS; ++i) { - // Note: Frames with odd-sized dimensions may result from this scaling. - rc->frame_width[i] = (w * 16) / frame_scale_factor[i]; - rc->frame_height[i] = (h * 16) / frame_scale_factor[i]; - } - - setup_rf_level_maxq(cpi); -} - -void vp10_calculate_coded_size(VP10_COMP *cpi, - int *scaled_frame_width, - int *scaled_frame_height) { - RATE_CONTROL *const rc = &cpi->rc; - *scaled_frame_width = rc->frame_width[rc->frame_size_selector]; - *scaled_frame_height = rc->frame_height[rc->frame_size_selector]; -} - -void vp10_init_second_pass(VP10_COMP *cpi) { - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - TWO_PASS *const twopass = &cpi->twopass; - double frame_rate; - FIRSTPASS_STATS *stats; - - zero_stats(&twopass->total_stats); - zero_stats(&twopass->total_left_stats); - - if (!twopass->stats_in_end) - return; - - stats = &twopass->total_stats; - - *stats = *twopass->stats_in_end; - twopass->total_left_stats = *stats; - - frame_rate = 10000000.0 * stats->count / stats->duration; - // Each frame can have a different duration, as the frame rate in the source - // isn't guaranteed to be constant. The frame rate prior to the first frame - // encoded in the second pass is a guess. However, the sum duration is not. - // It is calculated based on the actual durations of all frames from the - // first pass. - vp10_new_framerate(cpi, frame_rate); - twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth / - 10000000.0); - - // This variable monitors how far behind the second ref update is lagging. - twopass->sr_update_lag = 1; - - // Scan the first pass file and calculate a modified total error based upon - // the bias/power function used to allocate bits. - { - const double avg_error = stats->coded_error / - DOUBLE_DIVIDE_CHECK(stats->count); - const FIRSTPASS_STATS *s = twopass->stats_in; - double modified_error_total = 0.0; - twopass->modified_error_min = (avg_error * - oxcf->two_pass_vbrmin_section) / 100; - twopass->modified_error_max = (avg_error * - oxcf->two_pass_vbrmax_section) / 100; - while (s < twopass->stats_in_end) { - modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s); - ++s; - } - twopass->modified_error_left = modified_error_total; - } - - // Reset the vbr bits off target counters - cpi->rc.vbr_bits_off_target = 0; - cpi->rc.vbr_bits_off_target_fast = 0; - - cpi->rc.rate_error_estimate = 0; - - // Static sequence monitor variables. - twopass->kf_zeromotion_pct = 100; - twopass->last_kfgroup_zeromotion_pct = 100; - - if (oxcf->resize_mode != RESIZE_NONE) { - vp10_init_subsampling(cpi); - } -} - -#define SR_DIFF_PART 0.0015 -#define MOTION_AMP_PART 0.003 -#define INTRA_PART 0.005 -#define DEFAULT_DECAY_LIMIT 0.75 -#define LOW_SR_DIFF_TRHESH 0.1 -#define SR_DIFF_MAX 128.0 - -static double get_sr_decay_rate(const VP10_COMP *cpi, - const FIRSTPASS_STATS *frame) { - const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - double sr_diff = - (frame->sr_coded_error - frame->coded_error) / num_mbs; - double sr_decay = 1.0; - double modified_pct_inter; - double modified_pcnt_intra; - const double motion_amplitude_factor = - frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2); - - modified_pct_inter = frame->pcnt_inter; - if ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) < - (double)NCOUNT_FRAME_II_THRESH) { - modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral; - } - modified_pcnt_intra = 100 * (1.0 - modified_pct_inter); - - - if ((sr_diff > LOW_SR_DIFF_TRHESH)) { - sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX); - sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - - (MOTION_AMP_PART * motion_amplitude_factor) - - (INTRA_PART * modified_pcnt_intra); - } - return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); -} - -// This function gives an estimate of how badly we believe the prediction -// quality is decaying from frame to frame. -static double get_zero_motion_factor(const VP10_COMP *cpi, - const FIRSTPASS_STATS *frame) { - const double zero_motion_pct = frame->pcnt_inter - - frame->pcnt_motion; - double sr_decay = get_sr_decay_rate(cpi, frame); - return VPXMIN(sr_decay, zero_motion_pct); -} - -#define ZM_POWER_FACTOR 0.75 - -static double get_prediction_decay_rate(const VP10_COMP *cpi, - const FIRSTPASS_STATS *next_frame) { - const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame); - const double zero_motion_factor = - (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion), - ZM_POWER_FACTOR)); - - return VPXMAX(zero_motion_factor, - (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); -} - -// Function to test for a condition where a complex transition is followed -// by a static section. For example in slide shows where there is a fade -// between slides. This is to help with more optimal kf and gf positioning. -static int detect_transition_to_still(VP10_COMP *cpi, - int frame_interval, int still_interval, - double loop_decay_rate, - double last_decay_rate) { - TWO_PASS *const twopass = &cpi->twopass; - RATE_CONTROL *const rc = &cpi->rc; - - // Break clause to detect very still sections after motion - // For example a static image after a fade or other transition - // instead of a clean scene cut. - if (frame_interval > rc->min_gf_interval && - loop_decay_rate >= 0.999 && - last_decay_rate < 0.9) { - int j; - - // Look ahead a few frames to see if static condition persists... - for (j = 0; j < still_interval; ++j) { - const FIRSTPASS_STATS *stats = &twopass->stats_in[j]; - if (stats >= twopass->stats_in_end) - break; - - if (stats->pcnt_inter - stats->pcnt_motion < 0.999) - break; - } - - // Only if it does do we signal a transition to still. - return j == still_interval; - } - - return 0; -} - -// This function detects a flash through the high relative pcnt_second_ref -// score in the frame following a flash frame. The offset passed in should -// reflect this. -static int detect_flash(const TWO_PASS *twopass, int offset) { - const FIRSTPASS_STATS *const next_frame = read_frame_stats(twopass, offset); - - // What we are looking for here is a situation where there is a - // brief break in prediction (such as a flash) but subsequent frames - // are reasonably well predicted by an earlier (pre flash) frame. - // The recovery after a flash is indicated by a high pcnt_second_ref - // compared to pcnt_inter. - return next_frame != NULL && - next_frame->pcnt_second_ref > next_frame->pcnt_inter && - next_frame->pcnt_second_ref >= 0.5; -} - -// Update the motion related elements to the GF arf boost calculation. -static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats, - double *mv_in_out, - double *mv_in_out_accumulator, - double *abs_mv_in_out_accumulator, - double *mv_ratio_accumulator) { - const double pct = stats->pcnt_motion; - - // Accumulate Motion In/Out of frame stats. - *mv_in_out = stats->mv_in_out_count * pct; - *mv_in_out_accumulator += *mv_in_out; - *abs_mv_in_out_accumulator += fabs(*mv_in_out); - - // Accumulate a measure of how uniform (or conversely how random) the motion - // field is (a ratio of abs(mv) / mv). - if (pct > 0.05) { - const double mvr_ratio = fabs(stats->mvr_abs) / - DOUBLE_DIVIDE_CHECK(fabs(stats->MVr)); - const double mvc_ratio = fabs(stats->mvc_abs) / - DOUBLE_DIVIDE_CHECK(fabs(stats->MVc)); - - *mv_ratio_accumulator += pct * (mvr_ratio < stats->mvr_abs ? - mvr_ratio : stats->mvr_abs); - *mv_ratio_accumulator += pct * (mvc_ratio < stats->mvc_abs ? - mvc_ratio : stats->mvc_abs); - } -} - -#define BASELINE_ERR_PER_MB 1000.0 -static double calc_frame_boost(VP10_COMP *cpi, - const FIRSTPASS_STATS *this_frame, - double this_frame_mv_in_out, - double max_boost) { - double frame_boost; - const double lq = - vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME], - cpi->common.bit_depth); - const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5); - int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - - // Correct for any inactive region in the image - num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame)); - - // Underlying boost factor is based on inter error ratio. - frame_boost = (BASELINE_ERR_PER_MB * num_mbs) / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error); - frame_boost = frame_boost * BOOST_FACTOR * boost_q_correction; - - // Increase boost for frames where new data coming into frame (e.g. zoom out). - // Slightly reduce boost if there is a net balance of motion out of the frame - // (zoom in). The range for this_frame_mv_in_out is -1.0 to +1.0. - if (this_frame_mv_in_out > 0.0) - frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); - // In the extreme case the boost is halved. - else - frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); - - return VPXMIN(frame_boost, max_boost * boost_q_correction); -} - -static int calc_arf_boost(VP10_COMP *cpi, int offset, - int f_frames, int b_frames, - int *f_boost, int *b_boost) { - TWO_PASS *const twopass = &cpi->twopass; - int i; - double boost_score = 0.0; - double mv_ratio_accumulator = 0.0; - double decay_accumulator = 1.0; - double this_frame_mv_in_out = 0.0; - double mv_in_out_accumulator = 0.0; - double abs_mv_in_out_accumulator = 0.0; - int arf_boost; - int flash_detected = 0; - - // Search forward from the proposed arf/next gf position. - for (i = 0; i < f_frames; ++i) { - const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset); - if (this_frame == NULL) - break; - - // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); - - // We want to discount the flash frame itself and the recovery - // frame that follows as both will have poor scores. - flash_detected = detect_flash(twopass, i + offset) || - detect_flash(twopass, i + offset + 1); - - // Accumulate the effect of prediction quality decay. - if (!flash_detected) { - decay_accumulator *= get_prediction_decay_rate(cpi, this_frame); - decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR - ? MIN_DECAY_FACTOR : decay_accumulator; - } - - boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); - } - - *f_boost = (int)boost_score; - - // Reset for backward looking loop. - boost_score = 0.0; - mv_ratio_accumulator = 0.0; - decay_accumulator = 1.0; - this_frame_mv_in_out = 0.0; - mv_in_out_accumulator = 0.0; - abs_mv_in_out_accumulator = 0.0; - - // Search backward towards last gf position. - for (i = -1; i >= -b_frames; --i) { - const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset); - if (this_frame == NULL) - break; - - // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); - - // We want to discount the the flash frame itself and the recovery - // frame that follows as both will have poor scores. - flash_detected = detect_flash(twopass, i + offset) || - detect_flash(twopass, i + offset + 1); - - // Cumulative effect of prediction quality decay. - if (!flash_detected) { - decay_accumulator *= get_prediction_decay_rate(cpi, this_frame); - decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR - ? MIN_DECAY_FACTOR : decay_accumulator; - } - - boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); - } - *b_boost = (int)boost_score; - - arf_boost = (*f_boost + *b_boost); - if (arf_boost < ((b_frames + f_frames) * 20)) - arf_boost = ((b_frames + f_frames) * 20); - arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST); - - return arf_boost; -} - -// Calculate a section intra ratio used in setting max loop filter. -static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin, - const FIRSTPASS_STATS *end, - int section_length) { - const FIRSTPASS_STATS *s = begin; - double intra_error = 0.0; - double coded_error = 0.0; - int i = 0; - - while (s < end && i < section_length) { - intra_error += s->intra_error; - coded_error += s->coded_error; - ++s; - ++i; - } - - return (int)(intra_error / DOUBLE_DIVIDE_CHECK(coded_error)); -} - -// Calculate the total bits to allocate in this GF/ARF group. -static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi, - double gf_group_err) { - const RATE_CONTROL *const rc = &cpi->rc; - const TWO_PASS *const twopass = &cpi->twopass; - const int max_bits = frame_max_bits(rc, &cpi->oxcf); - int64_t total_group_bits; - - // Calculate the bits to be allocated to the group as a whole. - if ((twopass->kf_group_bits > 0) && (twopass->kf_group_error_left > 0)) { - total_group_bits = (int64_t)(twopass->kf_group_bits * - (gf_group_err / twopass->kf_group_error_left)); - } else { - total_group_bits = 0; - } - - // Clamp odd edge cases. - total_group_bits = (total_group_bits < 0) ? - 0 : (total_group_bits > twopass->kf_group_bits) ? - twopass->kf_group_bits : total_group_bits; - - // Clip based on user supplied data rate variability limit. - if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval) - total_group_bits = (int64_t)max_bits * rc->baseline_gf_interval; - - return total_group_bits; -} - -// Calculate the number bits extra to assign to boosted frames in a group. -static int calculate_boost_bits(int frame_count, - int boost, int64_t total_group_bits) { - int allocation_chunks; - - // return 0 for invalid inputs (could arise e.g. through rounding errors) - if (!boost || (total_group_bits <= 0) || (frame_count <= 0) ) - return 0; - - allocation_chunks = (frame_count * 100) + boost; - - // Prevent overflow. - if (boost > 1023) { - int divisor = boost >> 10; - boost /= divisor; - allocation_chunks /= divisor; - } - - // Calculate the number of extra bits for use in the boosted frame or frames. - return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), - 0); -} - -// Current limit on maximum number of active arfs in a GF/ARF group. -#define MAX_ACTIVE_ARFS 2 -#define ARF_SLOT1 2 -#define ARF_SLOT2 3 -// This function indirects the choice of buffers for arfs. -// At the moment the values are fixed but this may change as part of -// the integration process with other codec features that swap buffers around. -static void get_arf_buffer_indices(unsigned char *arf_buffer_indices) { - arf_buffer_indices[0] = ARF_SLOT1; - arf_buffer_indices[1] = ARF_SLOT2; -} - -static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits, - double group_error, int gf_arf_bits) { - RATE_CONTROL *const rc = &cpi->rc; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - TWO_PASS *const twopass = &cpi->twopass; - GF_GROUP *const gf_group = &twopass->gf_group; - FIRSTPASS_STATS frame_stats; - int i; - int frame_index = 1; - int target_frame_size; - int key_frame; - const int max_bits = frame_max_bits(&cpi->rc, &cpi->oxcf); - int64_t total_group_bits = gf_group_bits; - double modified_err = 0.0; - double err_fraction; - int mid_boost_bits = 0; - int mid_frame_idx; - unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS]; - - key_frame = cpi->common.frame_type == KEY_FRAME; - - get_arf_buffer_indices(arf_buffer_indices); - - // For key frames the frame target rate is already set and it - // is also the golden frame. - if (!key_frame) { - if (rc->source_alt_ref_active) { - gf_group->update_type[0] = OVERLAY_UPDATE; - gf_group->rf_level[0] = INTER_NORMAL; - gf_group->bit_allocation[0] = 0; - } else { - gf_group->update_type[0] = GF_UPDATE; - gf_group->rf_level[0] = GF_ARF_STD; - gf_group->bit_allocation[0] = gf_arf_bits; - } - gf_group->arf_update_idx[0] = arf_buffer_indices[0]; - gf_group->arf_ref_idx[0] = arf_buffer_indices[0]; - - // Step over the golden frame / overlay frame - if (EOF == input_stats(twopass, &frame_stats)) - return; - } - - // Deduct the boost bits for arf (or gf if it is not a key frame) - // from the group total. - if (rc->source_alt_ref_pending || !key_frame) - total_group_bits -= gf_arf_bits; - - // Store the bits to spend on the ARF if there is one. - if (rc->source_alt_ref_pending) { - gf_group->update_type[frame_index] = ARF_UPDATE; - gf_group->rf_level[frame_index] = GF_ARF_STD; - gf_group->bit_allocation[frame_index] = gf_arf_bits; - - gf_group->arf_src_offset[frame_index] = - (unsigned char)(rc->baseline_gf_interval - 1); - - gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0]; - gf_group->arf_ref_idx[frame_index] = - arf_buffer_indices[cpi->multi_arf_last_grp_enabled && - rc->source_alt_ref_active]; - ++frame_index; - - if (cpi->multi_arf_enabled) { - // Set aside a slot for a level 1 arf. - gf_group->update_type[frame_index] = ARF_UPDATE; - gf_group->rf_level[frame_index] = GF_ARF_LOW; - gf_group->arf_src_offset[frame_index] = - (unsigned char)((rc->baseline_gf_interval >> 1) - 1); - gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1]; - gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0]; - ++frame_index; - } - } - - // Define middle frame - mid_frame_idx = frame_index + (rc->baseline_gf_interval >> 1) - 1; - - // Allocate bits to the other frames in the group. - for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) { - int arf_idx = 0; - if (EOF == input_stats(twopass, &frame_stats)) - break; - - modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats); - - if (group_error > 0) - err_fraction = modified_err / DOUBLE_DIVIDE_CHECK(group_error); - else - err_fraction = 0.0; - - target_frame_size = (int)((double)total_group_bits * err_fraction); - - if (rc->source_alt_ref_pending && cpi->multi_arf_enabled) { - mid_boost_bits += (target_frame_size >> 4); - target_frame_size -= (target_frame_size >> 4); - - if (frame_index <= mid_frame_idx) - arf_idx = 1; - } - gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx]; - gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx]; - - target_frame_size = clamp(target_frame_size, 0, - VPXMIN(max_bits, (int)total_group_bits)); - - gf_group->update_type[frame_index] = LF_UPDATE; - gf_group->rf_level[frame_index] = INTER_NORMAL; - - gf_group->bit_allocation[frame_index] = target_frame_size; - ++frame_index; - } - - // Note: - // We need to configure the frame at the end of the sequence + 1 that will be - // the start frame for the next group. Otherwise prior to the call to - // vp10_rc_get_second_pass_params() the data will be undefined. - gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0]; - gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0]; - - if (rc->source_alt_ref_pending) { - gf_group->update_type[frame_index] = OVERLAY_UPDATE; - gf_group->rf_level[frame_index] = INTER_NORMAL; - - // Final setup for second arf and its overlay. - if (cpi->multi_arf_enabled) { - gf_group->bit_allocation[2] = - gf_group->bit_allocation[mid_frame_idx] + mid_boost_bits; - gf_group->update_type[mid_frame_idx] = OVERLAY_UPDATE; - gf_group->bit_allocation[mid_frame_idx] = 0; - } - } else { - gf_group->update_type[frame_index] = GF_UPDATE; - gf_group->rf_level[frame_index] = GF_ARF_STD; - } - - // Note whether multi-arf was enabled this group for next time. - cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled; -} - -// Analyse and define a gf/arf group. -static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - VP10EncoderConfig *const oxcf = &cpi->oxcf; - TWO_PASS *const twopass = &cpi->twopass; - FIRSTPASS_STATS next_frame; - const FIRSTPASS_STATS *const start_pos = twopass->stats_in; - int i; - - double boost_score = 0.0; - double old_boost_score = 0.0; - double gf_group_err = 0.0; -#if GROUP_ADAPTIVE_MAXQ - double gf_group_raw_error = 0.0; -#endif - double gf_group_skip_pct = 0.0; - double gf_group_inactive_zone_rows = 0.0; - double gf_first_frame_err = 0.0; - double mod_frame_err = 0.0; - - double mv_ratio_accumulator = 0.0; - double decay_accumulator = 1.0; - double zero_motion_accumulator = 1.0; - - double loop_decay_rate = 1.00; - double last_loop_decay_rate = 1.00; - - double this_frame_mv_in_out = 0.0; - double mv_in_out_accumulator = 0.0; - double abs_mv_in_out_accumulator = 0.0; - double mv_ratio_accumulator_thresh; - unsigned int allow_alt_ref = is_altref_enabled(cpi); - - int f_boost = 0; - int b_boost = 0; - int flash_detected; - int active_max_gf_interval; - int active_min_gf_interval; - int64_t gf_group_bits; - double gf_group_error_left; - int gf_arf_bits; - const int is_key_frame = frame_is_intra_only(cm); - const int arf_active_or_kf = is_key_frame || rc->source_alt_ref_active; - - // Reset the GF group data structures unless this is a key - // frame in which case it will already have been done. - if (is_key_frame == 0) { - vp10_zero(twopass->gf_group); - } - - vpx_clear_system_state(); - vp10_zero(next_frame); - - // Load stats for the current frame. - mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame); - - // Note the error of the frame at the start of the group. This will be - // the GF frame error if we code a normal gf. - gf_first_frame_err = mod_frame_err; - - // If this is a key frame or the overlay from a previous arf then - // the error score / cost of this frame has already been accounted for. - if (arf_active_or_kf) { - gf_group_err -= gf_first_frame_err; -#if GROUP_ADAPTIVE_MAXQ - gf_group_raw_error -= this_frame->coded_error; -#endif - gf_group_skip_pct -= this_frame->intra_skip_pct; - gf_group_inactive_zone_rows -= this_frame->inactive_zone_rows; - } - - // Motion breakout threshold for loop below depends on image size. - mv_ratio_accumulator_thresh = - (cpi->initial_height + cpi->initial_width) / 4.0; - - // Set a maximum and minimum interval for the GF group. - // If the image appears almost completely static we can extend beyond this. - { - int int_max_q = - (int)(vp10_convert_qindex_to_q(twopass->active_worst_quality, - cpi->common.bit_depth)); - int int_lbq = - (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex, - cpi->common.bit_depth)); - active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200); - if (active_min_gf_interval > rc->max_gf_interval) - active_min_gf_interval = rc->max_gf_interval; - - if (cpi->multi_arf_allowed) { - active_max_gf_interval = rc->max_gf_interval; - } else { - // The value chosen depends on the active Q range. At low Q we have - // bits to spare and are better with a smaller interval and smaller boost. - // At high Q when there are few bits to spare we are better with a longer - // interval to spread the cost of the GF. - active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6)); - - // We have: active_min_gf_interval <= rc->max_gf_interval - if (active_max_gf_interval < active_min_gf_interval) - active_max_gf_interval = active_min_gf_interval; - else if (active_max_gf_interval > rc->max_gf_interval) - active_max_gf_interval = rc->max_gf_interval; - } - } - - i = 0; - while (i < rc->static_scene_max_gf_interval && i < rc->frames_to_key) { - ++i; - - // Accumulate error score of frames in this gf group. - mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame); - gf_group_err += mod_frame_err; -#if GROUP_ADAPTIVE_MAXQ - gf_group_raw_error += this_frame->coded_error; -#endif - gf_group_skip_pct += this_frame->intra_skip_pct; - gf_group_inactive_zone_rows += this_frame->inactive_zone_rows; - - if (EOF == input_stats(twopass, &next_frame)) - break; - - // Test for the case where there is a brief flash but the prediction - // quality back to an earlier frame is then restored. - flash_detected = detect_flash(twopass, 0); - - // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(&next_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); - - // Accumulate the effect of prediction quality decay. - if (!flash_detected) { - last_loop_decay_rate = loop_decay_rate; - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - - decay_accumulator = decay_accumulator * loop_decay_rate; - - // Monitor for static sections. - zero_motion_accumulator = VPXMIN( - zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); - - // Break clause to detect very still sections after motion. For example, - // a static image after a fade or other transition. - if (detect_transition_to_still(cpi, i, 5, loop_decay_rate, - last_loop_decay_rate)) { - allow_alt_ref = 0; - break; - } - } - - // Calculate a boost number for this frame. - boost_score += decay_accumulator * calc_frame_boost(cpi, &next_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); - - // Break out conditions. - if ( - // Break at active_max_gf_interval unless almost totally static. - (i >= (active_max_gf_interval + arf_active_or_kf) && - zero_motion_accumulator < 0.995) || - ( - // Don't break out with a very short interval. - (i >= active_min_gf_interval + arf_active_or_kf) && - (!flash_detected) && - ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) || - (abs_mv_in_out_accumulator > 3.0) || - (mv_in_out_accumulator < -2.0) || - ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) { - boost_score = old_boost_score; - break; - } - - *this_frame = next_frame; - old_boost_score = boost_score; - } - - twopass->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0); - - // Was the group length constrained by the requirement for a new KF? - rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0; - - // Should we use the alternate reference frame. - if (allow_alt_ref && - (i < cpi->oxcf.lag_in_frames) && - (i >= rc->min_gf_interval)) { - // Calculate the boost for alt ref. - rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, - &b_boost); - rc->source_alt_ref_pending = 1; - - // Test to see if multi arf is appropriate. - cpi->multi_arf_enabled = - (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) && - (zero_motion_accumulator < 0.995)) ? 1 : 0; - } else { - rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST); - rc->source_alt_ref_pending = 0; - } - - // Set the interval until the next gf. - rc->baseline_gf_interval = i - (is_key_frame || rc->source_alt_ref_pending); - - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - - // Reset the file position. - reset_fpf_position(twopass, start_pos); - - // Calculate the bits to be allocated to the gf/arf group as a whole - gf_group_bits = calculate_total_gf_group_bits(cpi, gf_group_err); - -#if GROUP_ADAPTIVE_MAXQ - // Calculate an estimate of the maxq needed for the group. - // We are more agressive about correcting for sections - // where there could be significant overshoot than for easier - // sections where we do not wish to risk creating an overshoot - // of the allocated bit budget. - if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) { - const int vbr_group_bits_per_frame = - (int)(gf_group_bits / rc->baseline_gf_interval); - const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval; - const double group_av_skip_pct = - gf_group_skip_pct / rc->baseline_gf_interval; - const double group_av_inactive_zone = - ((gf_group_inactive_zone_rows * 2) / - (rc->baseline_gf_interval * (double)cm->mb_rows)); - - int tmp_q; - // rc factor is a weight factor that corrects for local rate control drift. - double rc_factor = 1.0; - if (rc->rate_error_estimate > 0) { - rc_factor = VPXMAX(RC_FACTOR_MIN, - (double)(100 - rc->rate_error_estimate) / 100.0); - } else { - rc_factor = VPXMIN(RC_FACTOR_MAX, - (double)(100 - rc->rate_error_estimate) / 100.0); - } - tmp_q = - get_twopass_worst_quality(cpi, group_av_err, - (group_av_skip_pct + group_av_inactive_zone), - vbr_group_bits_per_frame, - twopass->kfgroup_inter_fraction * rc_factor); - twopass->active_worst_quality = - VPXMAX(tmp_q, twopass->active_worst_quality >> 1); - } -#endif - - // Calculate the extra bits to be used for boosted frame(s) - gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, - rc->gfu_boost, gf_group_bits); - - // Adjust KF group bits and error remaining. - twopass->kf_group_error_left -= (int64_t)gf_group_err; - - // If this is an arf update we want to remove the score for the overlay - // frame at the end which will usually be very cheap to code. - // The overlay frame has already, in effect, been coded so we want to spread - // the remaining bits among the other frames. - // For normal GFs remove the score for the GF itself unless this is - // also a key frame in which case it has already been accounted for. - if (rc->source_alt_ref_pending) { - gf_group_error_left = gf_group_err - mod_frame_err; - } else if (is_key_frame == 0) { - gf_group_error_left = gf_group_err - gf_first_frame_err; - } else { - gf_group_error_left = gf_group_err; - } - - // Allocate bits to each of the frames in the GF group. - allocate_gf_group_bits(cpi, gf_group_bits, gf_group_error_left, gf_arf_bits); - - // Reset the file position. - reset_fpf_position(twopass, start_pos); - - // Calculate a section intra ratio used in setting max loop filter. - if (cpi->common.frame_type != KEY_FRAME) { - twopass->section_intra_rating = - calculate_section_intra_ratio(start_pos, twopass->stats_in_end, - rc->baseline_gf_interval); - } - - if (oxcf->resize_mode == RESIZE_DYNAMIC) { - // Default to starting GF groups at normal frame size. - cpi->rc.next_frame_size_selector = UNSCALED; - } -} - -// Threshold for use of the lagging second reference frame. High second ref -// usage may point to a transient event like a flash or occlusion rather than -// a real scene cut. -#define SECOND_REF_USEAGE_THRESH 0.1 -// Minimum % intra coding observed in first pass (1.0 = 100%) -#define MIN_INTRA_LEVEL 0.25 -// Minimum ratio between the % of intra coding and inter coding in the first -// pass after discounting neutral blocks (discounting neutral blocks in this -// way helps catch scene cuts in clips with very flat areas or letter box -// format clips with image padding. -#define INTRA_VS_INTER_THRESH 2.0 -// Hard threshold where the first pass chooses intra for almost all blocks. -// In such a case even if the frame is not a scene cut coding a key frame -// may be a good option. -#define VERY_LOW_INTER_THRESH 0.05 -// Maximum threshold for the relative ratio of intra error score vs best -// inter error score. -#define KF_II_ERR_THRESHOLD 2.5 -// In real scene cuts there is almost always a sharp change in the intra -// or inter error score. -#define ERR_CHANGE_THRESHOLD 0.4 -// For real scene cuts we expect an improvment in the intra inter error -// ratio in the next frame. -#define II_IMPROVEMENT_THRESHOLD 3.5 -#define KF_II_MAX 128.0 - -static int test_candidate_kf(TWO_PASS *twopass, - const FIRSTPASS_STATS *last_frame, - const FIRSTPASS_STATS *this_frame, - const FIRSTPASS_STATS *next_frame) { - int is_viable_kf = 0; - double pcnt_intra = 1.0 - this_frame->pcnt_inter; - double modified_pcnt_inter = - this_frame->pcnt_inter - this_frame->pcnt_neutral; - - // Does the frame satisfy the primary criteria of a key frame? - // See above for an explanation of the test criteria. - // If so, then examine how well it predicts subsequent frames. - if ((this_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) && - (next_frame->pcnt_second_ref < SECOND_REF_USEAGE_THRESH) && - ((this_frame->pcnt_inter < VERY_LOW_INTER_THRESH) || - ((pcnt_intra > MIN_INTRA_LEVEL) && - (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) && - ((this_frame->intra_error / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < - KF_II_ERR_THRESHOLD) && - ((fabs(last_frame->coded_error - this_frame->coded_error) / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > - ERR_CHANGE_THRESHOLD) || - (fabs(last_frame->intra_error - this_frame->intra_error) / - DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > - ERR_CHANGE_THRESHOLD) || - ((next_frame->intra_error / - DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > - II_IMPROVEMENT_THRESHOLD))))) { - int i; - const FIRSTPASS_STATS *start_pos = twopass->stats_in; - FIRSTPASS_STATS local_next_frame = *next_frame; - double boost_score = 0.0; - double old_boost_score = 0.0; - double decay_accumulator = 1.0; - - // Examine how well the key frame predicts subsequent frames. - for (i = 0; i < 16; ++i) { - double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error / - DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)); - - if (next_iiratio > KF_II_MAX) - next_iiratio = KF_II_MAX; - - // Cumulative effect of decay in prediction quality. - if (local_next_frame.pcnt_inter > 0.85) - decay_accumulator *= local_next_frame.pcnt_inter; - else - decay_accumulator *= (0.85 + local_next_frame.pcnt_inter) / 2.0; - - // Keep a running total. - boost_score += (decay_accumulator * next_iiratio); - - // Test various breakout clauses. - if ((local_next_frame.pcnt_inter < 0.05) || - (next_iiratio < 1.5) || - (((local_next_frame.pcnt_inter - - local_next_frame.pcnt_neutral) < 0.20) && - (next_iiratio < 3.0)) || - ((boost_score - old_boost_score) < 3.0) || - (local_next_frame.intra_error < 200)) { - break; - } - - old_boost_score = boost_score; - - // Get the next frame details - if (EOF == input_stats(twopass, &local_next_frame)) - break; - } - - // If there is tolerable prediction for at least the next 3 frames then - // break out else discard this potential key frame and move on - if (boost_score > 30.0 && (i > 3)) { - is_viable_kf = 1; - } else { - // Reset the file position - reset_fpf_position(twopass, start_pos); - - is_viable_kf = 0; - } - } - - return is_viable_kf; -} - -#define FRAMES_TO_CHECK_DECAY 8 - -static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { - int i, j; - RATE_CONTROL *const rc = &cpi->rc; - TWO_PASS *const twopass = &cpi->twopass; - GF_GROUP *const gf_group = &twopass->gf_group; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - const FIRSTPASS_STATS first_frame = *this_frame; - const FIRSTPASS_STATS *const start_position = twopass->stats_in; - FIRSTPASS_STATS next_frame; - FIRSTPASS_STATS last_frame; - int kf_bits = 0; - int loop_decay_counter = 0; - double decay_accumulator = 1.0; - double av_decay_accumulator = 0.0; - double zero_motion_accumulator = 1.0; - double boost_score = 0.0; - double kf_mod_err = 0.0; - double kf_group_err = 0.0; - double recent_loop_decay[FRAMES_TO_CHECK_DECAY]; - - vp10_zero(next_frame); - - cpi->common.frame_type = KEY_FRAME; - - // Reset the GF group data structures. - vp10_zero(*gf_group); - - // Is this a forced key frame by interval. - rc->this_key_frame_forced = rc->next_key_frame_forced; - - // Clear the alt ref active flag and last group multi arf flags as they - // can never be set for a key frame. - rc->source_alt_ref_active = 0; - cpi->multi_arf_last_grp_enabled = 0; - - // KF is always a GF so clear frames till next gf counter. - rc->frames_till_gf_update_due = 0; - - rc->frames_to_key = 1; - - twopass->kf_group_bits = 0; // Total bits available to kf group - twopass->kf_group_error_left = 0; // Group modified error score. - - kf_mod_err = calculate_modified_err(cpi, twopass, oxcf, this_frame); - - // Initialize the decay rates for the recent frames to check - for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) - recent_loop_decay[j] = 1.0; - - // Find the next keyframe. - i = 0; - while (twopass->stats_in < twopass->stats_in_end && - rc->frames_to_key < cpi->oxcf.key_freq) { - // Accumulate kf group error. - kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame); - - // Load the next frame's stats. - last_frame = *this_frame; - input_stats(twopass, this_frame); - - // Provided that we are not at the end of the file... - if (cpi->oxcf.auto_key && twopass->stats_in < twopass->stats_in_end) { - double loop_decay_rate; - - // Check for a scene cut. - if (test_candidate_kf(twopass, &last_frame, this_frame, - twopass->stats_in)) - break; - - // How fast is the prediction quality decaying? - loop_decay_rate = get_prediction_decay_rate(cpi, twopass->stats_in); - - // We want to know something about the recent past... rather than - // as used elsewhere where we are concerned with decay in prediction - // quality since the last GF or KF. - recent_loop_decay[i % FRAMES_TO_CHECK_DECAY] = loop_decay_rate; - decay_accumulator = 1.0; - for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) - decay_accumulator *= recent_loop_decay[j]; - - // Special check for transition or high motion followed by a - // static scene. - if (detect_transition_to_still(cpi, i, cpi->oxcf.key_freq - i, - loop_decay_rate, decay_accumulator)) - break; - - // Step on to the next frame. - ++rc->frames_to_key; - - // If we don't have a real key frame within the next two - // key_freq intervals then break out of the loop. - if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) - break; - } else { - ++rc->frames_to_key; - } - ++i; - } - - // If there is a max kf interval set by the user we must obey it. - // We already breakout of the loop above at 2x max. - // This code centers the extra kf if the actual natural interval - // is between 1x and 2x. - if (cpi->oxcf.auto_key && - rc->frames_to_key > cpi->oxcf.key_freq) { - FIRSTPASS_STATS tmp_frame = first_frame; - - rc->frames_to_key /= 2; - - // Reset to the start of the group. - reset_fpf_position(twopass, start_position); - - kf_group_err = 0.0; - - // Rescan to get the correct error data for the forced kf group. - for (i = 0; i < rc->frames_to_key; ++i) { - kf_group_err += calculate_modified_err(cpi, twopass, oxcf, &tmp_frame); - input_stats(twopass, &tmp_frame); - } - rc->next_key_frame_forced = 1; - } else if (twopass->stats_in == twopass->stats_in_end || - rc->frames_to_key >= cpi->oxcf.key_freq) { - rc->next_key_frame_forced = 1; - } else { - rc->next_key_frame_forced = 0; - } - - // Special case for the last key frame of the file. - if (twopass->stats_in >= twopass->stats_in_end) { - // Accumulate kf group error. - kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame); - } - - // Calculate the number of bits that should be assigned to the kf group. - if (twopass->bits_left > 0 && twopass->modified_error_left > 0.0) { - // Maximum number of bits for a single normal frame (not key frame). - const int max_bits = frame_max_bits(rc, &cpi->oxcf); - - // Maximum number of bits allocated to the key frame group. - int64_t max_grp_bits; - - // Default allocation based on bits left and relative - // complexity of the section. - twopass->kf_group_bits = (int64_t)(twopass->bits_left * - (kf_group_err / twopass->modified_error_left)); - - // Clip based on maximum per frame rate defined by the user. - max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key; - if (twopass->kf_group_bits > max_grp_bits) - twopass->kf_group_bits = max_grp_bits; - } else { - twopass->kf_group_bits = 0; - } - twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits); - - // Reset the first pass file position. - reset_fpf_position(twopass, start_position); - - // Scan through the kf group collating various stats used to determine - // how many bits to spend on it. - decay_accumulator = 1.0; - boost_score = 0.0; - for (i = 0; i < (rc->frames_to_key - 1); ++i) { - if (EOF == input_stats(twopass, &next_frame)) - break; - - // Monitor for static sections. - zero_motion_accumulator = VPXMIN( - zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); - - // Not all frames in the group are necessarily used in calculating boost. - if ((i <= rc->max_gf_interval) || - ((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) { - const double frame_boost = - calc_frame_boost(cpi, this_frame, 0, KF_MAX_BOOST); - - // How fast is prediction quality decaying. - if (!detect_flash(twopass, 0)) { - const double loop_decay_rate = - get_prediction_decay_rate(cpi, &next_frame); - decay_accumulator *= loop_decay_rate; - decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR); - av_decay_accumulator += decay_accumulator; - ++loop_decay_counter; - } - boost_score += (decay_accumulator * frame_boost); - } - } - av_decay_accumulator /= (double)loop_decay_counter; - - reset_fpf_position(twopass, start_position); - - // Store the zero motion percentage - twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0); - - // Calculate a section intra ratio used in setting max loop filter. - twopass->section_intra_rating = - calculate_section_intra_ratio(start_position, twopass->stats_in_end, - rc->frames_to_key); - - // Apply various clamps for min and max boost - rc->kf_boost = (int)(av_decay_accumulator * boost_score); - rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3)); - rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST); - - // Work out how many bits to allocate for the key frame itself. - kf_bits = calculate_boost_bits((rc->frames_to_key - 1), - rc->kf_boost, twopass->kf_group_bits); - - // Work out the fraction of the kf group bits reserved for the inter frames - // within the group after discounting the bits for the kf itself. - if (twopass->kf_group_bits) { - twopass->kfgroup_inter_fraction = - (double)(twopass->kf_group_bits - kf_bits) / - (double)twopass->kf_group_bits; - } else { - twopass->kfgroup_inter_fraction = 1.0; - } - - twopass->kf_group_bits -= kf_bits; - - // Save the bits to spend on the key frame. - gf_group->bit_allocation[0] = kf_bits; - gf_group->update_type[0] = KF_UPDATE; - gf_group->rf_level[0] = KF_STD; - - // Note the total error score of the kf group minus the key frame itself. - twopass->kf_group_error_left = (int)(kf_group_err - kf_mod_err); - - // Adjust the count of total modified error left. - // The count of bits left is adjusted elsewhere based on real coded frame - // sizes. - twopass->modified_error_left -= kf_group_err; - - if (oxcf->resize_mode == RESIZE_DYNAMIC) { - // Default to normal-sized frame on keyframes. - cpi->rc.next_frame_size_selector = UNSCALED; - } -} - -// Define the reference buffers that will be updated post encode. -static void configure_buffer_updates(VP10_COMP *cpi) { - TWO_PASS *const twopass = &cpi->twopass; - - cpi->rc.is_src_frame_alt_ref = 0; - switch (twopass->gf_group.update_type[twopass->gf_group.index]) { - case KF_UPDATE: - cpi->refresh_last_frame = 1; - cpi->refresh_golden_frame = 1; - cpi->refresh_alt_ref_frame = 1; - break; - case LF_UPDATE: - cpi->refresh_last_frame = 1; - cpi->refresh_golden_frame = 0; - cpi->refresh_alt_ref_frame = 0; - break; - case GF_UPDATE: - cpi->refresh_last_frame = 1; - cpi->refresh_golden_frame = 1; - cpi->refresh_alt_ref_frame = 0; - break; - case OVERLAY_UPDATE: - cpi->refresh_last_frame = 0; - cpi->refresh_golden_frame = 1; - cpi->refresh_alt_ref_frame = 0; - cpi->rc.is_src_frame_alt_ref = 1; - break; - case ARF_UPDATE: - cpi->refresh_last_frame = 0; - cpi->refresh_golden_frame = 0; - cpi->refresh_alt_ref_frame = 1; - break; - default: - assert(0); - break; - } -} - -static int is_skippable_frame(const VP10_COMP *cpi) { - // If the current frame does not have non-zero motion vector detected in the - // first pass, and so do its previous and forward frames, then this frame - // can be skipped for partition check, and the partition size is assigned - // according to the variance - const TWO_PASS *const twopass = &cpi->twopass; - - return (!frame_is_intra_only(&cpi->common) && - twopass->stats_in - 2 > twopass->stats_in_start && - twopass->stats_in < twopass->stats_in_end && - (twopass->stats_in - 1)->pcnt_inter - (twopass->stats_in - 1)->pcnt_motion - == 1 && - (twopass->stats_in - 2)->pcnt_inter - (twopass->stats_in - 2)->pcnt_motion - == 1 && - twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1); -} - -void vp10_rc_get_second_pass_params(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - TWO_PASS *const twopass = &cpi->twopass; - GF_GROUP *const gf_group = &twopass->gf_group; - int frames_left; - FIRSTPASS_STATS this_frame; - - int target_rate; - - frames_left = (int)(twopass->total_stats.count - - cm->current_video_frame); - - if (!twopass->stats_in) - return; - - // If this is an arf frame then we dont want to read the stats file or - // advance the input pointer as we already have what we need. - if (gf_group->update_type[gf_group->index] == ARF_UPDATE) { - int target_rate; - configure_buffer_updates(cpi); - target_rate = gf_group->bit_allocation[gf_group->index]; - target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate); - rc->base_frame_target = target_rate; - - cm->frame_type = INTER_FRAME; - - // Do the firstpass stats indicate that this frame is skippable for the - // partition search? - if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2) { - cpi->partition_search_skippable_frame = is_skippable_frame(cpi); - } - - return; - } - - vpx_clear_system_state(); - - if (cpi->oxcf.rc_mode == VPX_Q) { - twopass->active_worst_quality = cpi->oxcf.cq_level; - } else if (cm->current_video_frame == 0) { - // Special case code for first frame. - const int section_target_bandwidth = (int)(twopass->bits_left / - frames_left); - const double section_length = twopass->total_left_stats.count; - const double section_error = - twopass->total_left_stats.coded_error / section_length; - const double section_intra_skip = - twopass->total_left_stats.intra_skip_pct / section_length; - const double section_inactive_zone = - (twopass->total_left_stats.inactive_zone_rows * 2) / - ((double)cm->mb_rows * section_length); - const int tmp_q = - get_twopass_worst_quality(cpi, section_error, - section_intra_skip + section_inactive_zone, - section_target_bandwidth, DEFAULT_GRP_WEIGHT); - - twopass->active_worst_quality = tmp_q; - twopass->baseline_active_worst_quality = tmp_q; - rc->ni_av_qi = tmp_q; - rc->last_q[INTER_FRAME] = tmp_q; - rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth); - rc->avg_frame_qindex[INTER_FRAME] = tmp_q; - rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2; - rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME]; - } - vp10_zero(this_frame); - if (EOF == input_stats(twopass, &this_frame)) - return; - - // Set the frame content type flag. - if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH) - twopass->fr_content_type = FC_GRAPHICS_ANIMATION; - else - twopass->fr_content_type = FC_NORMAL; - - // Keyframe and section processing. - if (rc->frames_to_key == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY)) { - FIRSTPASS_STATS this_frame_copy; - this_frame_copy = this_frame; - // Define next KF group and assign bits to it. - find_next_key_frame(cpi, &this_frame); - this_frame = this_frame_copy; - } else { - cm->frame_type = INTER_FRAME; - } - - // Define a new GF/ARF group. (Should always enter here for key frames). - if (rc->frames_till_gf_update_due == 0) { - define_gf_group(cpi, &this_frame); - - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - -#if ARF_STATS_OUTPUT - { - FILE *fpfile; - fpfile = fopen("arf.stt", "a"); - ++arf_count; - fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", - cm->current_video_frame, rc->frames_till_gf_update_due, - rc->kf_boost, arf_count, rc->gfu_boost); - - fclose(fpfile); - } -#endif - } - - configure_buffer_updates(cpi); - - // Do the firstpass stats indicate that this frame is skippable for the - // partition search? - if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2) { - cpi->partition_search_skippable_frame = is_skippable_frame(cpi); - } - - target_rate = gf_group->bit_allocation[gf_group->index]; - if (cpi->common.frame_type == KEY_FRAME) - target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate); - else - target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate); - - rc->base_frame_target = target_rate; - - { - const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - // The multiplication by 256 reverses a scaling factor of (>> 8) - // applied when combining MB error values for the frame. - twopass->mb_av_energy = - log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0); - } - - // Update the total stats remaining structure. - subtract_stats(&twopass->total_left_stats, &this_frame); -} - -#define MINQ_ADJ_LIMIT 48 -#define MINQ_ADJ_LIMIT_CQ 20 -#define HIGH_UNDERSHOOT_RATIO 2 -void vp10_twopass_postencode_update(VP10_COMP *cpi) { - TWO_PASS *const twopass = &cpi->twopass; - RATE_CONTROL *const rc = &cpi->rc; - const int bits_used = rc->base_frame_target; - - // VBR correction is done through rc->vbr_bits_off_target. Based on the - // sign of this value, a limited % adjustment is made to the target rate - // of subsequent frames, to try and push it back towards 0. This method - // is designed to prevent extreme behaviour at the end of a clip - // or group of frames. - rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size; - twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0); - - // Calculate the pct rc error. - if (rc->total_actual_bits) { - rc->rate_error_estimate = - (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits); - rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100); - } else { - rc->rate_error_estimate = 0; - } - - if (cpi->common.frame_type != KEY_FRAME) { - twopass->kf_group_bits -= bits_used; - twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct; - } - twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0); - - // Increment the gf group index ready for the next frame. - ++twopass->gf_group.index; - - // If the rate control is drifting consider adjustment to min or maxq. - if ((cpi->oxcf.rc_mode != VPX_Q) && - (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) && - !cpi->rc.is_src_frame_alt_ref) { - const int maxq_adj_limit = - rc->worst_quality - twopass->active_worst_quality; - const int minq_adj_limit = - (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT); - - // Undershoot. - if (rc->rate_error_estimate > cpi->oxcf.under_shoot_pct) { - --twopass->extend_maxq; - if (rc->rolling_target_bits >= rc->rolling_actual_bits) - ++twopass->extend_minq; - // Overshoot. - } else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) { - --twopass->extend_minq; - if (rc->rolling_target_bits < rc->rolling_actual_bits) - ++twopass->extend_maxq; - } else { - // Adjustment for extreme local overshoot. - if (rc->projected_frame_size > (2 * rc->base_frame_target) && - rc->projected_frame_size > (2 * rc->avg_frame_bandwidth)) - ++twopass->extend_maxq; - - // Unwind undershoot or overshoot adjustment. - if (rc->rolling_target_bits < rc->rolling_actual_bits) - --twopass->extend_minq; - else if (rc->rolling_target_bits > rc->rolling_actual_bits) - --twopass->extend_maxq; - } - - twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit); - twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit); - - // If there is a big and undexpected undershoot then feed the extra - // bits back in quickly. One situation where this may happen is if a - // frame is unexpectedly almost perfectly predicted by the ARF or GF - // but not very well predcited by the previous frame. - if (!frame_is_kf_gf_arf(cpi) && !cpi->rc.is_src_frame_alt_ref) { - int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO; - if (rc->projected_frame_size < fast_extra_thresh) { - rc->vbr_bits_off_target_fast += - fast_extra_thresh - rc->projected_frame_size; - rc->vbr_bits_off_target_fast = - VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); - - // Fast adaptation of minQ if necessary to use up the extra bits. - if (rc->avg_frame_bandwidth) { - twopass->extend_minq_fast = - (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth); - } - twopass->extend_minq_fast = VPXMIN( - twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); - } else if (rc->vbr_bits_off_target_fast) { - twopass->extend_minq_fast = VPXMIN( - twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); - } else { - twopass->extend_minq_fast = 0; - } - } - } -} diff --git a/libs/libvpx/vp10/encoder/firstpass.h b/libs/libvpx/vp10/encoder/firstpass.h deleted file mode 100644 index 68a88879c4..0000000000 --- a/libs/libvpx/vp10/encoder/firstpass.h +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_FIRSTPASS_H_ -#define VP10_ENCODER_FIRSTPASS_H_ - -#include "vp10/encoder/lookahead.h" -#include "vp10/encoder/ratectrl.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#if CONFIG_FP_MB_STATS - -#define FPMB_DCINTRA_MASK 0x01 - -#define FPMB_MOTION_ZERO_MASK 0x02 -#define FPMB_MOTION_LEFT_MASK 0x04 -#define FPMB_MOTION_RIGHT_MASK 0x08 -#define FPMB_MOTION_UP_MASK 0x10 -#define FPMB_MOTION_DOWN_MASK 0x20 - -#define FPMB_ERROR_SMALL_MASK 0x40 -#define FPMB_ERROR_LARGE_MASK 0x80 -#define FPMB_ERROR_SMALL_TH 2000 -#define FPMB_ERROR_LARGE_TH 48000 - -typedef struct { - uint8_t *mb_stats_start; - uint8_t *mb_stats_end; -} FIRSTPASS_MB_STATS; -#endif - -#define VLOW_MOTION_THRESHOLD 950 - -typedef struct { - double frame; - double weight; - double intra_error; - double coded_error; - double sr_coded_error; - double pcnt_inter; - double pcnt_motion; - double pcnt_second_ref; - double pcnt_neutral; - double intra_skip_pct; - double inactive_zone_rows; // Image mask rows top and bottom. - double inactive_zone_cols; // Image mask columns at left and right edges. - double MVr; - double mvr_abs; - double MVc; - double mvc_abs; - double MVrv; - double MVcv; - double mv_in_out_count; - double new_mv_count; - double duration; - double count; -} FIRSTPASS_STATS; - -typedef enum { - KF_UPDATE = 0, - LF_UPDATE = 1, - GF_UPDATE = 2, - ARF_UPDATE = 3, - OVERLAY_UPDATE = 4, - FRAME_UPDATE_TYPES = 5 -} FRAME_UPDATE_TYPE; - -#define FC_ANIMATION_THRESH 0.15 -typedef enum { - FC_NORMAL = 0, - FC_GRAPHICS_ANIMATION = 1, - FRAME_CONTENT_TYPES = 2 -} FRAME_CONTENT_TYPE; - -typedef struct { - unsigned char index; - RATE_FACTOR_LEVEL rf_level[(MAX_LAG_BUFFERS * 2) + 1]; - FRAME_UPDATE_TYPE update_type[(MAX_LAG_BUFFERS * 2) + 1]; - unsigned char arf_src_offset[(MAX_LAG_BUFFERS * 2) + 1]; - unsigned char arf_update_idx[(MAX_LAG_BUFFERS * 2) + 1]; - unsigned char arf_ref_idx[(MAX_LAG_BUFFERS * 2) + 1]; - int bit_allocation[(MAX_LAG_BUFFERS * 2) + 1]; -} GF_GROUP; - -typedef struct { - unsigned int section_intra_rating; - FIRSTPASS_STATS total_stats; - FIRSTPASS_STATS this_frame_stats; - const FIRSTPASS_STATS *stats_in; - const FIRSTPASS_STATS *stats_in_start; - const FIRSTPASS_STATS *stats_in_end; - FIRSTPASS_STATS total_left_stats; - int first_pass_done; - int64_t bits_left; - double modified_error_min; - double modified_error_max; - double modified_error_left; - double mb_av_energy; - -#if CONFIG_FP_MB_STATS - uint8_t *frame_mb_stats_buf; - uint8_t *this_frame_mb_stats; - FIRSTPASS_MB_STATS firstpass_mb_stats; -#endif - // An indication of the content type of the current frame - FRAME_CONTENT_TYPE fr_content_type; - - // Projected total bits available for a key frame group of frames - int64_t kf_group_bits; - - // Error score of frames still to be coded in kf group - int64_t kf_group_error_left; - - // The fraction for a kf groups total bits allocated to the inter frames - double kfgroup_inter_fraction; - - int sr_update_lag; - - int kf_zeromotion_pct; - int last_kfgroup_zeromotion_pct; - int gf_zeromotion_pct; - int active_worst_quality; - int baseline_active_worst_quality; - int extend_minq; - int extend_maxq; - int extend_minq_fast; - - GF_GROUP gf_group; -} TWO_PASS; - -struct VP10_COMP; - -void vp10_init_first_pass(struct VP10_COMP *cpi); -void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi); -void vp10_first_pass(struct VP10_COMP *cpi, - const struct lookahead_entry *source); -void vp10_end_first_pass(struct VP10_COMP *cpi); - -void vp10_init_second_pass(struct VP10_COMP *cpi); -void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi); -void vp10_twopass_postencode_update(struct VP10_COMP *cpi); - -// Post encode update of the rate control parameters for 2-pass -void vp10_twopass_postencode_update(struct VP10_COMP *cpi); - -void vp10_init_subsampling(struct VP10_COMP *cpi); - -void vp10_calculate_coded_size(struct VP10_COMP *cpi, - int *scaled_frame_width, - int *scaled_frame_height); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_FIRSTPASS_H_ diff --git a/libs/libvpx/vp10/encoder/lookahead.c b/libs/libvpx/vp10/encoder/lookahead.c deleted file mode 100644 index dce0139038..0000000000 --- a/libs/libvpx/vp10/encoder/lookahead.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include -#include - -#include "./vpx_config.h" - -#include "vp10/common/common.h" - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/extend.h" -#include "vp10/encoder/lookahead.h" - -/* Return the buffer at the given absolute index and increment the index */ -static struct lookahead_entry *pop(struct lookahead_ctx *ctx, - unsigned int *idx) { - unsigned int index = *idx; - struct lookahead_entry *buf = ctx->buf + index; - - assert(index < ctx->max_sz); - if (++index >= ctx->max_sz) - index -= ctx->max_sz; - *idx = index; - return buf; -} - - -void vp10_lookahead_destroy(struct lookahead_ctx *ctx) { - if (ctx) { - if (ctx->buf) { - unsigned int i; - - for (i = 0; i < ctx->max_sz; i++) - vpx_free_frame_buffer(&ctx->buf[i].img); - free(ctx->buf); - } - free(ctx); - } -} - - -struct lookahead_ctx *vp10_lookahead_init(unsigned int width, - unsigned int height, - unsigned int subsampling_x, - unsigned int subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - unsigned int depth) { - struct lookahead_ctx *ctx = NULL; - - // Clamp the lookahead queue depth - depth = clamp(depth, 1, MAX_LAG_BUFFERS); - - // Allocate memory to keep previous source frames available. - depth += MAX_PRE_FRAMES; - - // Allocate the lookahead structures - ctx = calloc(1, sizeof(*ctx)); - if (ctx) { - const int legacy_byte_alignment = 0; - unsigned int i; - ctx->max_sz = depth; - ctx->buf = calloc(depth, sizeof(*ctx->buf)); - if (!ctx->buf) - goto bail; - for (i = 0; i < depth; i++) - if (vpx_alloc_frame_buffer(&ctx->buf[i].img, - width, height, subsampling_x, subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, - legacy_byte_alignment)) - goto bail; - } - return ctx; - bail: - vp10_lookahead_destroy(ctx); - return NULL; -} - -#define USE_PARTIAL_COPY 0 - -int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, - int64_t ts_start, int64_t ts_end, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - unsigned int flags) { - struct lookahead_entry *buf; -#if USE_PARTIAL_COPY - int row, col, active_end; - int mb_rows = (src->y_height + 15) >> 4; - int mb_cols = (src->y_width + 15) >> 4; -#endif - int width = src->y_crop_width; - int height = src->y_crop_height; - int uv_width = src->uv_crop_width; - int uv_height = src->uv_crop_height; - int subsampling_x = src->subsampling_x; - int subsampling_y = src->subsampling_y; - int larger_dimensions, new_dimensions; - - if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) - return 1; - ctx->sz++; - buf = pop(ctx, &ctx->write_idx); - - new_dimensions = width != buf->img.y_crop_width || - height != buf->img.y_crop_height || - uv_width != buf->img.uv_crop_width || - uv_height != buf->img.uv_crop_height; - larger_dimensions = width > buf->img.y_width || - height > buf->img.y_height || - uv_width > buf->img.uv_width || - uv_height > buf->img.uv_height; - assert(!larger_dimensions || new_dimensions); - -#if USE_PARTIAL_COPY - // TODO(jkoleszar): This is disabled for now, as - // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware. - - // Only do this partial copy if the following conditions are all met: - // 1. Lookahead queue has has size of 1. - // 2. Active map is provided. - // 3. This is not a key frame, golden nor altref frame. - if (!new_dimensions && ctx->max_sz == 1 && active_map && !flags) { - for (row = 0; row < mb_rows; ++row) { - col = 0; - - while (1) { - // Find the first active macroblock in this row. - for (; col < mb_cols; ++col) { - if (active_map[col]) - break; - } - - // No more active macroblock in this row. - if (col == mb_cols) - break; - - // Find the end of active region in this row. - active_end = col; - - for (; active_end < mb_cols; ++active_end) { - if (!active_map[active_end]) - break; - } - - // Only copy this active region. - vp10_copy_and_extend_frame_with_rect(src, &buf->img, - row << 4, - col << 4, 16, - (active_end - col) << 4); - - // Start again from the end of this active region. - col = active_end; - } - - active_map += mb_cols; - } - } else { -#endif - if (larger_dimensions) { - YV12_BUFFER_CONFIG new_img; - memset(&new_img, 0, sizeof(new_img)); - if (vpx_alloc_frame_buffer(&new_img, - width, height, subsampling_x, subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, -#endif - VP9_ENC_BORDER_IN_PIXELS, - 0)) - return 1; - vpx_free_frame_buffer(&buf->img); - buf->img = new_img; - } else if (new_dimensions) { - buf->img.y_crop_width = src->y_crop_width; - buf->img.y_crop_height = src->y_crop_height; - buf->img.uv_crop_width = src->uv_crop_width; - buf->img.uv_crop_height = src->uv_crop_height; - buf->img.subsampling_x = src->subsampling_x; - buf->img.subsampling_y = src->subsampling_y; - } - // Partial copy not implemented yet - vp10_copy_and_extend_frame(src, &buf->img); -#if USE_PARTIAL_COPY - } -#endif - - buf->ts_start = ts_start; - buf->ts_end = ts_end; - buf->flags = flags; - return 0; -} - - -struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx, - int drain) { - struct lookahead_entry *buf = NULL; - - if (ctx && ctx->sz && (drain || ctx->sz == ctx->max_sz - MAX_PRE_FRAMES)) { - buf = pop(ctx, &ctx->read_idx); - ctx->sz--; - } - return buf; -} - - -struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx, - int index) { - struct lookahead_entry *buf = NULL; - - if (index >= 0) { - // Forward peek - if (index < (int)ctx->sz) { - index += ctx->read_idx; - if (index >= (int)ctx->max_sz) - index -= ctx->max_sz; - buf = ctx->buf + index; - } - } else if (index < 0) { - // Backward peek - if (-index <= MAX_PRE_FRAMES) { - index += ctx->read_idx; - if (index < 0) - index += ctx->max_sz; - buf = ctx->buf + index; - } - } - - return buf; -} - -unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { - return ctx->sz; -} diff --git a/libs/libvpx/vp10/encoder/lookahead.h b/libs/libvpx/vp10/encoder/lookahead.h deleted file mode 100644 index 22429aeeb0..0000000000 --- a/libs/libvpx/vp10/encoder/lookahead.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_LOOKAHEAD_H_ -#define VP10_ENCODER_LOOKAHEAD_H_ - -#include "vpx_scale/yv12config.h" -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define MAX_LAG_BUFFERS 25 - -struct lookahead_entry { - YV12_BUFFER_CONFIG img; - int64_t ts_start; - int64_t ts_end; - unsigned int flags; -}; - -// The max of past frames we want to keep in the queue. -#define MAX_PRE_FRAMES 1 - -struct lookahead_ctx { - unsigned int max_sz; /* Absolute size of the queue */ - unsigned int sz; /* Number of buffers currently in the queue */ - unsigned int read_idx; /* Read index */ - unsigned int write_idx; /* Write index */ - struct lookahead_entry *buf; /* Buffer list */ -}; - -/**\brief Initializes the lookahead stage - * - * The lookahead stage is a queue of frame buffers on which some analysis - * may be done when buffers are enqueued. - */ -struct lookahead_ctx *vp10_lookahead_init(unsigned int width, - unsigned int height, - unsigned int subsampling_x, - unsigned int subsampling_y, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - unsigned int depth); - - -/**\brief Destroys the lookahead stage - */ -void vp10_lookahead_destroy(struct lookahead_ctx *ctx); - - -/**\brief Enqueue a source buffer - * - * This function will copy the source image into a new framebuffer with - * the expected stride/border. - * - * If active_map is non-NULL and there is only one frame in the queue, then copy - * only active macroblocks. - * - * \param[in] ctx Pointer to the lookahead context - * \param[in] src Pointer to the image to enqueue - * \param[in] ts_start Timestamp for the start of this frame - * \param[in] ts_end Timestamp for the end of this frame - * \param[in] flags Flags set on this frame - * \param[in] active_map Map that specifies which macroblock is active - */ -int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, - int64_t ts_start, int64_t ts_end, -#if CONFIG_VP9_HIGHBITDEPTH - int use_highbitdepth, -#endif - unsigned int flags); - - -/**\brief Get the next source buffer to encode - * - * - * \param[in] ctx Pointer to the lookahead context - * \param[in] drain Flag indicating the buffer should be drained - * (return a buffer regardless of the current queue depth) - * - * \retval NULL, if drain set and queue is empty - * \retval NULL, if drain not set and queue not of the configured depth - */ -struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx, - int drain); - - -/**\brief Get a future source buffer to encode - * - * \param[in] ctx Pointer to the lookahead context - * \param[in] index Index of the frame to be returned, 0 == next frame - * - * \retval NULL, if no buffer exists at the specified index - */ -struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx, - int index); - - -/**\brief Get the number of frames currently in the lookahead queue - * - * \param[in] ctx Pointer to the lookahead context - */ -unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_LOOKAHEAD_H_ diff --git a/libs/libvpx/vp10/encoder/mbgraph.c b/libs/libvpx/vp10/encoder/mbgraph.c deleted file mode 100644 index ed0f53909f..0000000000 --- a/libs/libvpx/vp10/encoder/mbgraph.c +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/system_state.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/common/blockd.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" - - -static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, - const MV *ref_mv, - MV *dst_mv, - int mb_row, - int mb_col) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; - const vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; - - const int tmp_col_min = x->mv_col_min; - const int tmp_col_max = x->mv_col_max; - const int tmp_row_min = x->mv_row_min; - const int tmp_row_max = x->mv_row_max; - MV ref_full; - int cost_list[5]; - - // Further step/diamond searches as necessary - int step_param = mv_sf->reduce_first_step_size; - step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); - - vp10_set_mv_search_range(x, ref_mv); - - ref_full.col = ref_mv->col >> 3; - ref_full.row = ref_mv->row >> 3; - - /*cpi->sf.search_method == HEX*/ - vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0, - cond_cost_list(cpi, cost_list), - &v_fn_ptr, 0, ref_mv, dst_mv); - - // Try sub-pixel MC - // if (bestsme > error_thresh && bestsme < INT_MAX) - { - int distortion; - unsigned int sse; - cpi->find_fractional_mv_step( - x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, - &v_fn_ptr, 0, mv_sf->subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - NULL, NULL, - &distortion, &sse, NULL, 0, 0); - } - - xd->mi[0]->mbmi.mode = NEWMV; - xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv; - - vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); - - /* restore UMV window */ - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - - return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride); -} - -static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv, - int_mv *dst_mv, int mb_row, int mb_col) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - unsigned int err, tmp_err; - MV tmp_mv; - - // Try zero MV first - // FIXME should really use something like near/nearest MV and/or MV prediction - err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); - dst_mv->as_int = 0; - - // Test last reference frame using the previous best mv as the - // starting point (best reference) for the search - tmp_err = do_16x16_motion_iteration(cpi, ref_mv, &tmp_mv, mb_row, mb_col); - if (tmp_err < err) { - err = tmp_err; - dst_mv->as_mv = tmp_mv; - } - - // If the current best reference mv is not centered on 0,0 then do a 0,0 - // based search as well. - if (ref_mv->row != 0 || ref_mv->col != 0) { - unsigned int tmp_err; - MV zero_ref_mv = {0, 0}, tmp_mv; - - tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, - mb_row, mb_col); - if (tmp_err < err) { - dst_mv->as_mv = tmp_mv; - err = tmp_err; - } - } - - return err; -} - -static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - unsigned int err; - - // Try zero MV first - // FIXME should really use something like near/nearest MV and/or MV prediction - err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].pre[0].buf, xd->plane[0].pre[0].stride); - - dst_mv->as_int = 0; - - return err; -} -static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - PREDICTION_MODE best_mode = -1, mode; - unsigned int best_err = INT_MAX; - - // calculate SATD for each intra prediction mode; - // we're intentionally not doing 4x4, we just want a rough estimate - for (mode = DC_PRED; mode <= TM_PRED; mode++) { - unsigned int err; - - xd->mi[0]->mbmi.mode = mode; - vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, - x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride, - 0, 0, 0); - err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride); - - // find best - if (err < best_err) { - best_err = err; - best_mode = mode; - } - } - - if (pbest_mode) - *pbest_mode = best_mode; - - return best_err; -} - -static void update_mbgraph_mb_stats -( - VP10_COMP *cpi, - MBGRAPH_MB_STATS *stats, - YV12_BUFFER_CONFIG *buf, - int mb_y_offset, - YV12_BUFFER_CONFIG *golden_ref, - const MV *prev_golden_ref_mv, - YV12_BUFFER_CONFIG *alt_ref, - int mb_row, - int mb_col -) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - int intra_error; - VP10_COMMON *cm = &cpi->common; - - // FIXME in practice we're completely ignoring chroma here - x->plane[0].src.buf = buf->y_buffer + mb_y_offset; - x->plane[0].src.stride = buf->y_stride; - - xd->plane[0].dst.buf = get_frame_new_buffer(cm)->y_buffer + mb_y_offset; - xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride; - - // do intra 16x16 prediction - intra_error = find_best_16x16_intra(cpi, - &stats->ref[INTRA_FRAME].m.mode); - if (intra_error <= 0) - intra_error = 1; - stats->ref[INTRA_FRAME].err = intra_error; - - // Golden frame MV search, if it exists and is different than last frame - if (golden_ref) { - int g_motion_error; - xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; - xd->plane[0].pre[0].stride = golden_ref->y_stride; - g_motion_error = do_16x16_motion_search(cpi, - prev_golden_ref_mv, - &stats->ref[GOLDEN_FRAME].m.mv, - mb_row, mb_col); - stats->ref[GOLDEN_FRAME].err = g_motion_error; - } else { - stats->ref[GOLDEN_FRAME].err = INT_MAX; - stats->ref[GOLDEN_FRAME].m.mv.as_int = 0; - } - - // Do an Alt-ref frame MV search, if it exists and is different than - // last/golden frame. - if (alt_ref) { - int a_motion_error; - xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; - xd->plane[0].pre[0].stride = alt_ref->y_stride; - a_motion_error = do_16x16_zerozero_search(cpi, - &stats->ref[ALTREF_FRAME].m.mv); - - stats->ref[ALTREF_FRAME].err = a_motion_error; - } else { - stats->ref[ALTREF_FRAME].err = INT_MAX; - stats->ref[ALTREF_FRAME].m.mv.as_int = 0; - } -} - -static void update_mbgraph_frame_stats(VP10_COMP *cpi, - MBGRAPH_FRAME_STATS *stats, - YV12_BUFFER_CONFIG *buf, - YV12_BUFFER_CONFIG *golden_ref, - YV12_BUFFER_CONFIG *alt_ref) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - VP10_COMMON *const cm = &cpi->common; - - int mb_col, mb_row, offset = 0; - int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; - MV gld_top_mv = {0, 0}; - MODE_INFO mi_local; - - vp10_zero(mi_local); - // Set up limit values for motion vectors to prevent them extending outside - // the UMV borders. - x->mv_row_min = -BORDER_MV_PIXELS_B16; - x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; - xd->up_available = 0; - xd->plane[0].dst.stride = buf->y_stride; - xd->plane[0].pre[0].stride = buf->y_stride; - xd->plane[1].dst.stride = buf->uv_stride; - xd->mi[0] = &mi_local; - mi_local.mbmi.sb_type = BLOCK_16X16; - mi_local.mbmi.ref_frame[0] = LAST_FRAME; - mi_local.mbmi.ref_frame[1] = NONE; - - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { - MV gld_left_mv = gld_top_mv; - int mb_y_in_offset = mb_y_offset; - int arf_y_in_offset = arf_y_offset; - int gld_y_in_offset = gld_y_offset; - - // Set up limit values for motion vectors to prevent them extending outside - // the UMV borders. - x->mv_col_min = -BORDER_MV_PIXELS_B16; - x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; - xd->left_available = 0; - - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { - MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col]; - - update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, - golden_ref, &gld_left_mv, alt_ref, - mb_row, mb_col); - gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv; - if (mb_col == 0) { - gld_top_mv = gld_left_mv; - } - xd->left_available = 1; - mb_y_in_offset += 16; - gld_y_in_offset += 16; - arf_y_in_offset += 16; - x->mv_col_min -= 16; - x->mv_col_max -= 16; - } - xd->up_available = 1; - mb_y_offset += buf->y_stride * 16; - gld_y_offset += golden_ref->y_stride * 16; - if (alt_ref) - arf_y_offset += alt_ref->y_stride * 16; - x->mv_row_min -= 16; - x->mv_row_max -= 16; - offset += cm->mb_cols; - } -} - -// void separate_arf_mbs_byzz -static void separate_arf_mbs(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - int mb_col, mb_row, offset, i; - int mi_row, mi_col; - int ncnt[4] = { 0 }; - int n_frames = cpi->mbgraph_n_frames; - - int *arf_not_zz; - - CHECK_MEM_ERROR(cm, arf_not_zz, - vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), - 1)); - - // We are not interested in results beyond the alt ref itself. - if (n_frames > cpi->rc.frames_till_gf_update_due) - n_frames = cpi->rc.frames_till_gf_update_due; - - // defer cost to reference frames - for (i = n_frames - 1; i >= 0; i--) { - MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; - - for (offset = 0, mb_row = 0; mb_row < cm->mb_rows; - offset += cm->mb_cols, mb_row++) { - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { - MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col]; - - int altref_err = mb_stats->ref[ALTREF_FRAME].err; - int intra_err = mb_stats->ref[INTRA_FRAME ].err; - int golden_err = mb_stats->ref[GOLDEN_FRAME].err; - - // Test for altref vs intra and gf and that its mv was 0,0. - if (altref_err > 1000 || - altref_err > intra_err || - altref_err > golden_err) { - arf_not_zz[offset + mb_col]++; - } - } - } - } - - // arf_not_zz is indexed by MB, but this loop is indexed by MI to avoid out - // of bound access in segmentation_map - for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { - for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { - // If any of the blocks in the sequence failed then the MB - // goes in segment 0 - if (arf_not_zz[mi_row / 2 * cm->mb_cols + mi_col / 2]) { - ncnt[0]++; - cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 0; - } else { - cpi->segmentation_map[mi_row * cm->mi_cols + mi_col] = 1; - ncnt[1]++; - } - } - } - - // Only bother with segmentation if over 10% of the MBs in static segment - // if ( ncnt[1] && (ncnt[0] / ncnt[1] < 10) ) - if (1) { - // Note % of blocks that are marked as static - if (cm->MBs) - cpi->static_mb_pct = (ncnt[1] * 100) / (cm->mi_rows * cm->mi_cols); - - // This error case should not be reachable as this function should - // never be called with the common data structure uninitialized. - else - cpi->static_mb_pct = 0; - - vp10_enable_segmentation(&cm->seg); - } else { - cpi->static_mb_pct = 0; - vp10_disable_segmentation(&cm->seg); - } - - // Free localy allocated storage - vpx_free(arf_not_zz); -} - -void vp10_update_mbgraph_stats(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - int i, n_frames = vp10_lookahead_depth(cpi->lookahead); - YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME); - - assert(golden_ref != NULL); - - // we need to look ahead beyond where the ARF transitions into - // being a GF - so exit if we don't look ahead beyond that - if (n_frames <= cpi->rc.frames_till_gf_update_due) - return; - - if (n_frames > MAX_LAG_BUFFERS) - n_frames = MAX_LAG_BUFFERS; - - cpi->mbgraph_n_frames = n_frames; - for (i = 0; i < n_frames; i++) { - MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; - memset(frame_stats->mb_stats, 0, - cm->mb_rows * cm->mb_cols * sizeof(*cpi->mbgraph_stats[i].mb_stats)); - } - - // do motion search to find contribution of each reference to data - // later on in this GF group - // FIXME really, the GF/last MC search should be done forward, and - // the ARF MC search backwards, to get optimal results for MV caching - for (i = 0; i < n_frames; i++) { - MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i]; - struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i); - - assert(q_cur != NULL); - - update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, - golden_ref, cpi->Source); - } - - vpx_clear_system_state(); - - separate_arf_mbs(cpi); -} diff --git a/libs/libvpx/vp10/encoder/mbgraph.h b/libs/libvpx/vp10/encoder/mbgraph.h deleted file mode 100644 index 3408464c55..0000000000 --- a/libs/libvpx/vp10/encoder/mbgraph.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_MBGRAPH_H_ -#define VP10_ENCODER_MBGRAPH_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - struct { - int err; - union { - int_mv mv; - PREDICTION_MODE mode; - } m; - } ref[MAX_REF_FRAMES]; -} MBGRAPH_MB_STATS; - -typedef struct { - MBGRAPH_MB_STATS *mb_stats; -} MBGRAPH_FRAME_STATS; - -struct VP10_COMP; - -void vp10_update_mbgraph_stats(struct VP10_COMP *cpi); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_MBGRAPH_H_ diff --git a/libs/libvpx/vp10/encoder/mcomp.c b/libs/libvpx/vp10/encoder/mcomp.c deleted file mode 100644 index 2c1c591c50..0000000000 --- a/libs/libvpx/vp10/encoder/mcomp.c +++ /dev/null @@ -1,2498 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/common.h" -#include "vp10/common/reconinter.h" - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/mcomp.h" - -// #define NEW_DIAMOND_SEARCH - -static INLINE const uint8_t *get_buf_from_mv(const struct buf_2d *buf, - const MV *mv) { - return &buf->buf[mv->row * buf->stride + mv->col]; -} - -void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) { - int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0); - int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); - int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; - int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; - - col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1); - row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1); - col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1); - row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1); - - // Get intersection of UMV window and valid MV window to reduce # of checks - // in diamond search. - if (x->mv_col_min < col_min) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max) - x->mv_row_max = row_max; -} - -int vp10_init_search_range(int size) { - int sr = 0; - // Minimum search size no matter what the passed in value. - size = VPXMAX(16, size); - - while ((size << sr) < MAX_FULL_PEL_VAL) - sr++; - - sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2); - return sr; -} - -static INLINE int mv_cost(const MV *mv, - const int *joint_cost, int *const comp_cost[2]) { - return joint_cost[vp10_get_mv_joint(mv)] + - comp_cost[0][mv->row] + comp_cost[1][mv->col]; -} - -int vp10_mv_bit_cost(const MV *mv, const MV *ref, - const int *mvjcost, int *mvcost[2], int weight) { - const MV diff = { mv->row - ref->row, - mv->col - ref->col }; - return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7); -} - -static int mv_err_cost(const MV *mv, const MV *ref, - const int *mvjcost, int *mvcost[2], - int error_per_bit) { - if (mvcost) { - const MV diff = { mv->row - ref->row, - mv->col - ref->col }; - return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * - error_per_bit, 13); - } - return 0; -} - -static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, - int error_per_bit) { - const MV diff = { mv->row - ref->row, - mv->col - ref->col }; - return ROUND_POWER_OF_TWO(mv_cost(&diff, x->nmvjointsadcost, - x->nmvsadcost) * error_per_bit, 8); -} - -void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) { - int len, ss_count = 1; - - cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0; - cfg->ss[0].offset = 0; - - for (len = MAX_FIRST_STEP; len > 0; len /= 2) { - // Generate offsets for 4 search sites per step. - const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}}; - int i; - for (i = 0; i < 4; ++i) { - search_site *const ss = &cfg->ss[ss_count++]; - ss->mv = ss_mvs[i]; - ss->offset = ss->mv.row * stride + ss->mv.col; - } - } - - cfg->ss_count = ss_count; - cfg->searches_per_step = 4; -} - -void vp10_init3smotion_compensation(search_site_config *cfg, int stride) { - int len, ss_count = 1; - - cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0; - cfg->ss[0].offset = 0; - - for (len = MAX_FIRST_STEP; len > 0; len /= 2) { - // Generate offsets for 8 search sites per step. - const MV ss_mvs[8] = { - {-len, 0 }, {len, 0 }, { 0, -len}, {0, len}, - {-len, -len}, {-len, len}, {len, -len}, {len, len} - }; - int i; - for (i = 0; i < 8; ++i) { - search_site *const ss = &cfg->ss[ss_count++]; - ss->mv = ss_mvs[i]; - ss->offset = ss->mv.row * stride + ss->mv.col; - } - } - - cfg->ss_count = ss_count; - cfg->searches_per_step = 8; -} - -/* - * To avoid the penalty for crossing cache-line read, preload the reference - * area in a small buffer, which is aligned to make sure there won't be crossing - * cache-line read while reading from this buffer. This reduced the cpu - * cycles spent on reading ref data in sub-pixel filter functions. - * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x - * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we - * could reduce the area. - */ - -/* estimated cost of a motion vector (r,c) */ -#define MVC(r, c) \ - (mvcost ? \ - ((mvjcost[((r) != rr) * 2 + ((c) != rc)] + \ - mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \ - error_per_bit + 4096) >> 13 : 0) - - -// convert motion vector component to offset for sv[a]f calc -static INLINE int sp(int x) { - return x & 7; -} - -static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) { - return &buf[(r >> 3) * stride + (c >> 3)]; -} - -/* checks if (r, c) has better score than previous best */ -#define CHECK_BETTER(v, r, c) \ - if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \ - if (second_pred == NULL) \ - thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ - src_stride, &sse); \ - else \ - thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \ - z, src_stride, &sse, second_pred); \ - if ((v = MVC(r, c) + thismse) < besterr) { \ - besterr = v; \ - br = r; \ - bc = c; \ - *distortion = thismse; \ - *sse1 = sse; \ - } \ - } else { \ - v = INT_MAX; \ - } - -#define FIRST_LEVEL_CHECKS \ - { \ - unsigned int left, right, up, down, diag; \ - CHECK_BETTER(left, tr, tc - hstep); \ - CHECK_BETTER(right, tr, tc + hstep); \ - CHECK_BETTER(up, tr - hstep, tc); \ - CHECK_BETTER(down, tr + hstep, tc); \ - whichdir = (left < right ? 0 : 1) + \ - (up < down ? 0 : 2); \ - switch (whichdir) { \ - case 0: \ - CHECK_BETTER(diag, tr - hstep, tc - hstep); \ - break; \ - case 1: \ - CHECK_BETTER(diag, tr - hstep, tc + hstep); \ - break; \ - case 2: \ - CHECK_BETTER(diag, tr + hstep, tc - hstep); \ - break; \ - case 3: \ - CHECK_BETTER(diag, tr + hstep, tc + hstep); \ - break; \ - } \ - } - -#define SECOND_LEVEL_CHECKS \ - { \ - int kr, kc; \ - unsigned int second; \ - if (tr != br && tc != bc) { \ - kr = br - tr; \ - kc = bc - tc; \ - CHECK_BETTER(second, tr + kr, tc + 2 * kc); \ - CHECK_BETTER(second, tr + 2 * kr, tc + kc); \ - } else if (tr == br && tc != bc) { \ - kc = bc - tc; \ - CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \ - CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \ - switch (whichdir) { \ - case 0: \ - case 1: \ - CHECK_BETTER(second, tr + hstep, tc + kc); \ - break; \ - case 2: \ - case 3: \ - CHECK_BETTER(second, tr - hstep, tc + kc); \ - break; \ - } \ - } else if (tr != br && tc == bc) { \ - kr = br - tr; \ - CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \ - CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \ - switch (whichdir) { \ - case 0: \ - case 2: \ - CHECK_BETTER(second, tr + kr, tc + hstep); \ - break; \ - case 1: \ - case 3: \ - CHECK_BETTER(second, tr + kr, tc - hstep); \ - break; \ - } \ - } \ - } - -// TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of -// SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten -// later in the same way. -#define SECOND_LEVEL_CHECKS_BEST \ - { \ - unsigned int second; \ - int br0 = br; \ - int bc0 = bc; \ - assert(tr == br || tc == bc); \ - if (tr == br && tc != bc) { \ - kc = bc - tc; \ - } else if (tr != br && tc == bc) { \ - kr = br - tr; \ - } \ - CHECK_BETTER(second, br0 + kr, bc0); \ - CHECK_BETTER(second, br0, bc0 + kc); \ - if (br0 != br || bc0 != bc) { \ - CHECK_BETTER(second, br0 + kr, bc0 + kc); \ - } \ - } - -#define SETUP_SUBPEL_SEARCH \ - const uint8_t *const z = x->plane[0].src.buf; \ - const int src_stride = x->plane[0].src.stride; \ - const MACROBLOCKD *xd = &x->e_mbd; \ - unsigned int besterr = INT_MAX; \ - unsigned int sse; \ - unsigned int whichdir; \ - int thismse; \ - const unsigned int halfiters = iters_per_step; \ - const unsigned int quarteriters = iters_per_step; \ - const unsigned int eighthiters = iters_per_step; \ - const int y_stride = xd->plane[0].pre[0].stride; \ - const int offset = bestmv->row * y_stride + bestmv->col; \ - const uint8_t *const y = xd->plane[0].pre[0].buf; \ - \ - int rr = ref_mv->row; \ - int rc = ref_mv->col; \ - int br = bestmv->row * 8; \ - int bc = bestmv->col * 8; \ - int hstep = 4; \ - const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ - const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ - const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ - const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ - int tr = br; \ - int tc = bc; \ - \ - bestmv->row *= 8; \ - bestmv->col *= 8; - -static unsigned int setup_center_error(const MACROBLOCKD *xd, - const MV *bestmv, - const MV *ref_mv, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - const uint8_t *const src, - const int src_stride, - const uint8_t *const y, - int y_stride, - const uint8_t *second_pred, - int w, int h, int offset, - int *mvjcost, int *mvcost[2], - unsigned int *sse1, - int *distortion) { - unsigned int besterr; -#if CONFIG_VP9_HIGHBITDEPTH - if (second_pred != NULL) { - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]); - vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset, - y_stride); - besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, - sse1); - } else { - DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); - vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); - besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); - } - } else { - besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1); - } - *distortion = besterr; - besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); -#else - (void) xd; - if (second_pred != NULL) { - DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); - vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); - besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); - } else { - besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1); - } - *distortion = besterr; - besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); -#endif // CONFIG_VP9_HIGHBITDEPTH - return besterr; -} - -static INLINE int divide_and_round(const int n, const int d) { - return ((n < 0) ^ (d < 0)) ? ((n - d / 2) / d) : ((n + d / 2) / d); -} - -static INLINE int is_cost_list_wellbehaved(int *cost_list) { - return cost_list[0] < cost_list[1] && - cost_list[0] < cost_list[2] && - cost_list[0] < cost_list[3] && - cost_list[0] < cost_list[4]; -} - -// Returns surface minima estimate at given precision in 1/2^n bits. -// Assume a model for the cost surface: S = A(x - x0)^2 + B(y - y0)^2 + C -// For a given set of costs S0, S1, S2, S3, S4 at points -// (y, x) = (0, 0), (0, -1), (1, 0), (0, 1) and (-1, 0) respectively, -// the solution for the location of the minima (x0, y0) is given by: -// x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0), -// y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0). -// The code below is an integerized version of that. -static void get_cost_surf_min(int *cost_list, int *ir, int *ic, - int bits) { - *ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)), - (cost_list[1] - 2 * cost_list[0] + cost_list[3])); - *ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)), - (cost_list[4] - 2 * cost_list[0] + cost_list[2])); -} - -int vp10_find_best_sub_pixel_tree_pruned_evenmore( - const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { - SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - (void) halfiters; - (void) quarteriters; - (void) eighthiters; - (void) whichdir; - (void) allow_hp; - (void) forced_stop; - (void) hstep; - - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && - cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && - cost_list[4] != INT_MAX && - is_cost_list_wellbehaved(cost_list)) { - int ir, ic; - unsigned int minpt; - get_cost_surf_min(cost_list, &ir, &ic, 2); - if (ir != 0 || ic != 0) { - CHECK_BETTER(minpt, tr + 2 * ir, tc + 2 * ic); - } - } else { - FIRST_LEVEL_CHECKS; - if (halfiters > 1) { - SECOND_LEVEL_CHECKS; - } - - tr = br; - tc = bc; - - // Each subsequent iteration checks at least one point in common with - // the last iteration could be 2 ( if diag selected) 1/4 pel - // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only - if (forced_stop != 2) { - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (quarteriters > 1) { - SECOND_LEVEL_CHECKS; - } - } - } - - tr = br; - tc = bc; - - if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) { - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (eighthiters > 1) { - SECOND_LEVEL_CHECKS; - } - } - - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -int vp10_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { - SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && - cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && - cost_list[4] != INT_MAX && - is_cost_list_wellbehaved(cost_list)) { - unsigned int minpt; - int ir, ic; - get_cost_surf_min(cost_list, &ir, &ic, 1); - if (ir != 0 || ic != 0) { - CHECK_BETTER(minpt, tr + ir * hstep, tc + ic * hstep); - } - } else { - FIRST_LEVEL_CHECKS; - if (halfiters > 1) { - SECOND_LEVEL_CHECKS; - } - } - - // Each subsequent iteration checks at least one point in common with - // the last iteration could be 2 ( if diag selected) 1/4 pel - - // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only - if (forced_stop != 2) { - tr = br; - tc = bc; - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (quarteriters > 1) { - SECOND_LEVEL_CHECKS; - } - } - - if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) { - tr = br; - tc = bc; - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (eighthiters > 1) { - SECOND_LEVEL_CHECKS; - } - } - // These lines insure static analysis doesn't warn that - // tr and tc aren't used after the above point. - (void) tr; - (void) tc; - - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -int vp10_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { - SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && - cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && - cost_list[4] != INT_MAX) { - unsigned int left, right, up, down, diag; - whichdir = (cost_list[1] < cost_list[3] ? 0 : 1) + - (cost_list[2] < cost_list[4] ? 0 : 2); - switch (whichdir) { - case 0: - CHECK_BETTER(left, tr, tc - hstep); - CHECK_BETTER(down, tr + hstep, tc); - CHECK_BETTER(diag, tr + hstep, tc - hstep); - break; - case 1: - CHECK_BETTER(right, tr, tc + hstep); - CHECK_BETTER(down, tr + hstep, tc); - CHECK_BETTER(diag, tr + hstep, tc + hstep); - break; - case 2: - CHECK_BETTER(left, tr, tc - hstep); - CHECK_BETTER(up, tr - hstep, tc); - CHECK_BETTER(diag, tr - hstep, tc - hstep); - break; - case 3: - CHECK_BETTER(right, tr, tc + hstep); - CHECK_BETTER(up, tr - hstep, tc); - CHECK_BETTER(diag, tr - hstep, tc + hstep); - break; - } - } else { - FIRST_LEVEL_CHECKS; - if (halfiters > 1) { - SECOND_LEVEL_CHECKS; - } - } - - tr = br; - tc = bc; - - // Each subsequent iteration checks at least one point in common with - // the last iteration could be 2 ( if diag selected) 1/4 pel - - // Note forced_stop: 0 - full, 1 - qtr only, 2 - half only - if (forced_stop != 2) { - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (quarteriters > 1) { - SECOND_LEVEL_CHECKS; - } - tr = br; - tc = bc; - } - - if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) { - hstep >>= 1; - FIRST_LEVEL_CHECKS; - if (eighthiters > 1) { - SECOND_LEVEL_CHECKS; - } - tr = br; - tc = bc; - } - // These lines insure static analysis doesn't warn that - // tr and tc aren't used after the above point. - (void) tr; - (void) tc; - - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -static const MV search_step_table[12] = { - // left, right, up, down - {0, -4}, {0, 4}, {-4, 0}, {4, 0}, - {0, -2}, {0, 2}, {-2, 0}, {2, 0}, - {0, -1}, {0, 1}, {-1, 0}, {1, 0} -}; - -int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { - const uint8_t *const z = x->plane[0].src.buf; - const uint8_t *const src_address = z; - const int src_stride = x->plane[0].src.stride; - const MACROBLOCKD *xd = &x->e_mbd; - unsigned int besterr = INT_MAX; - unsigned int sse; - int thismse; - const int y_stride = xd->plane[0].pre[0].stride; - const int offset = bestmv->row * y_stride + bestmv->col; - const uint8_t *const y = xd->plane[0].pre[0].buf; - - int rr = ref_mv->row; - int rc = ref_mv->col; - int br = bestmv->row * 8; - int bc = bestmv->col * 8; - int hstep = 4; - int iter, round = 3 - forced_stop; - const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); - int tr = br; - int tc = bc; - const MV *search_step = search_step_table; - int idx, best_idx = -1; - unsigned int cost_array[5]; - int kr, kc; - - if (!(allow_hp && vp10_use_mv_hp(ref_mv))) - if (round == 3) - round = 2; - - bestmv->row *= 8; - bestmv->col *= 8; - - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - - (void) cost_list; // to silence compiler warning - - for (iter = 0; iter < round; ++iter) { - // Check vertical and horizontal sub-pixel positions. - for (idx = 0; idx < 4; ++idx) { - tr = br + search_step[idx].row; - tc = bc + search_step[idx].col; - if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) { - const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3); - MV this_mv; - this_mv.row = tr; - this_mv.col = tc; - if (second_pred == NULL) - thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse); - else - thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse, second_pred); - cost_array[idx] = thismse + - mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); - - if (cost_array[idx] < besterr) { - best_idx = idx; - besterr = cost_array[idx]; - *distortion = thismse; - *sse1 = sse; - } - } else { - cost_array[idx] = INT_MAX; - } - } - - // Check diagonal sub-pixel position - kc = (cost_array[0] <= cost_array[1] ? -hstep : hstep); - kr = (cost_array[2] <= cost_array[3] ? -hstep : hstep); - - tc = bc + kc; - tr = br + kr; - if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) { - const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3); - MV this_mv = {tr, tc}; - if (second_pred == NULL) - thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse); - else - thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse, second_pred); - cost_array[4] = thismse + - mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); - - if (cost_array[4] < besterr) { - best_idx = 4; - besterr = cost_array[4]; - *distortion = thismse; - *sse1 = sse; - } - } else { - cost_array[idx] = INT_MAX; - } - - if (best_idx < 4 && best_idx >= 0) { - br += search_step[best_idx].row; - bc += search_step[best_idx].col; - } else if (best_idx == 4) { - br = tr; - bc = tc; - } - - if (iters_per_step > 1 && best_idx != -1) - SECOND_LEVEL_CHECKS_BEST; - - tr = br; - tc = bc; - - search_step += 4; - hstep >>= 1; - best_idx = -1; - } - - // Each subsequent iteration checks at least one point in common with - // the last iteration could be 2 ( if diag selected) 1/4 pel - - // These lines insure static analysis doesn't warn that - // tr and tc aren't used after the above point. - (void) tr; - (void) tc; - - bestmv->row = br; - bestmv->col = bc; - - if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || - (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; - - return besterr; -} - -#undef MVC -#undef PRE -#undef CHECK_BETTER - -static INLINE int check_bounds(const MACROBLOCK *x, int row, int col, - int range) { - return ((row - range) >= x->mv_row_min) & - ((row + range) <= x->mv_row_max) & - ((col - range) >= x->mv_col_min) & - ((col + range) <= x->mv_col_max); -} - -static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) { - return (mv->col >= x->mv_col_min) && (mv->col <= x->mv_col_max) && - (mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max); -} - -#define CHECK_BETTER \ - {\ - if (thissad < bestsad) {\ - if (use_mvcost) \ - thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);\ - if (thissad < bestsad) {\ - bestsad = thissad;\ - best_site = i;\ - }\ - }\ - } - -#define MAX_PATTERN_SCALES 11 -#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale -#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates - -// Calculate and return a sad+mvcost list around an integer best pel. -static INLINE void calc_int_cost_list(const MACROBLOCK *x, - const MV *ref_mv, - int sadpb, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *best_mv, - int *cost_list) { - static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}}; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0]; - const MV fcenter_mv = {ref_mv->row >> 3, ref_mv->col >> 3}; - int br = best_mv->row; - int bc = best_mv->col; - MV this_mv; - int i; - unsigned int sse; - - this_mv.row = br; - this_mv.col = bc; - cost_list[0] = fn_ptr->vf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride, &sse) + - mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb); - if (check_bounds(x, br, bc, 1)) { - for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride, &sse) + - // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb); - mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost, - x->errorperbit); - } - } else { - for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - if (!is_mv_in(x, &this_mv)) - cost_list[i + 1] = INT_MAX; - else - cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride, &sse) + - // mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb); - mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, x->mvcost, - x->errorperbit); - } - } -} - -// Generic pattern search function that searches over multiple scales. -// Each scale can have a different number of candidates and shape of -// candidates as indicated in the num_candidates and candidates arrays -// passed into this function -// -static int vp10_pattern_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv, - const int num_candidates[MAX_PATTERN_SCALES], - const MV candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES]) { - const MACROBLOCKD *const xd = &x->e_mbd; - static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = { - 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, - }; - int i, s, t; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - int br, bc; - int bestsad = INT_MAX; - int thissad; - int k = -1; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int best_init_s = search_param_to_steps[search_param]; - // adjust ref_mv to make sure it is within MV range - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - br = ref_mv->row; - bc = ref_mv->col; - - // Work out the start point for the search - bestsad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - - // Search all possible scales upto the search param around the center point - // pick the scale of the point that is best as the starting scale of - // further steps around it. - if (do_init_search) { - s = best_init_s; - best_init_s = -1; - for (t = 0; t <= s; ++t) { - int best_site = -1; - if (check_bounds(x, br, bc, 1 << t)) { - for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - if (best_site == -1) { - continue; - } else { - best_init_s = t; - k = best_site; - } - } - if (best_init_s != -1) { - br += candidates[best_init_s][k].row; - bc += candidates[best_init_s][k].col; - } - } - - // If the center point is still the best, just skip this and move to - // the refinement step. - if (best_init_s != -1) { - int best_site = -1; - s = best_init_s; - - do { - // No need to search all 6 points the 1st time if initial search was used - if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site == -1) { - continue; - } else { - br += candidates[s][best_site].row; - bc += candidates[s][best_site].col; - k = best_site; - } - } - - do { - int next_chkpts_indices[PATTERN_CANDIDATES_REF]; - best_site = -1; - next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1; - next_chkpts_indices[1] = k; - next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site != -1) { - k = next_chkpts_indices[best_site]; - br += candidates[s][k].row; - bc += candidates[s][k].col; - } - } while (best_site != -1); - } while (s--); - } - - // Returns the one-away integer pel sad values around the best as follows: - // cost_list[0]: cost at the best integer pel - // cost_list[1]: cost at delta {0, -1} (left) from the best integer pel - // cost_list[2]: cost at delta { 1, 0} (bottom) from the best integer pel - // cost_list[3]: cost at delta { 0, 1} (right) from the best integer pel - // cost_list[4]: cost at delta {-1, 0} (top) from the best integer pel - if (cost_list) { - const MV best_mv = { br, bc }; - calc_int_cost_list(x, &fcenter_mv, sad_per_bit, vfp, &best_mv, cost_list); - } - best_mv->row = br; - best_mv->col = bc; - return bestsad; -} - -// A specialized function where the smallest scale search candidates -// are 4 1-away neighbors, and cost_list is non-null -// TODO(debargha): Merge this function with the one above. Also remove -// use_mvcost option since it is always 1, to save unnecessary branches. -static int vp10_pattern_search_sad(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv, - const int num_candidates[MAX_PATTERN_SCALES], - const MV candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES]) { - const MACROBLOCKD *const xd = &x->e_mbd; - static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = { - 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, - }; - int i, s, t; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - int br, bc; - int bestsad = INT_MAX; - int thissad; - int k = -1; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int best_init_s = search_param_to_steps[search_param]; - // adjust ref_mv to make sure it is within MV range - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - br = ref_mv->row; - bc = ref_mv->col; - if (cost_list != NULL) { - cost_list[0] = cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = - INT_MAX; - } - - // Work out the start point for the search - bestsad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - - // Search all possible scales upto the search param around the center point - // pick the scale of the point that is best as the starting scale of - // further steps around it. - if (do_init_search) { - s = best_init_s; - best_init_s = -1; - for (t = 0; t <= s; ++t) { - int best_site = -1; - if (check_bounds(x, br, bc, 1 << t)) { - for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - if (best_site == -1) { - continue; - } else { - best_init_s = t; - k = best_site; - } - } - if (best_init_s != -1) { - br += candidates[best_init_s][k].row; - bc += candidates[best_init_s][k].col; - } - } - - // If the center point is still the best, just skip this and move to - // the refinement step. - if (best_init_s != -1) { - int do_sad = (num_candidates[0] == 4 && cost_list != NULL); - int best_site = -1; - s = best_init_s; - - for (; s >= do_sad; s--) { - if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site == -1) { - continue; - } else { - br += candidates[s][best_site].row; - bc += candidates[s][best_site].col; - k = best_site; - } - } - - do { - int next_chkpts_indices[PATTERN_CANDIDATES_REF]; - best_site = -1; - next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1; - next_chkpts_indices[1] = k; - next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site != -1) { - k = next_chkpts_indices[best_site]; - br += candidates[s][k].row; - bc += candidates[s][k].col; - } - } while (best_site != -1); - } - - // Note: If we enter the if below, then cost_list must be non-NULL. - if (s == 0) { - cost_list[0] = bestsad; - if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - cost_list[i + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - cost_list[i + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site != -1) { - br += candidates[s][best_site].row; - bc += candidates[s][best_site].col; - k = best_site; - } - } - while (best_site != -1) { - int next_chkpts_indices[PATTERN_CANDIDATES_REF]; - best_site = -1; - next_chkpts_indices[0] = (k == 0) ? num_candidates[s] - 1 : k - 1; - next_chkpts_indices[1] = k; - next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - cost_list[1] = cost_list[2] = cost_list[3] = cost_list[4] = INT_MAX; - cost_list[((k + 2) % 4) + 1] = cost_list[0]; - cost_list[0] = bestsad; - - if (check_bounds(x, br, bc, 1 << s)) { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - cost_list[next_chkpts_indices[i] + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } else { - for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) { - cost_list[next_chkpts_indices[i] + 1] = INT_MAX; - continue; - } - cost_list[next_chkpts_indices[i] + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - CHECK_BETTER - } - } - - if (best_site != -1) { - k = next_chkpts_indices[best_site]; - br += candidates[s][k].row; - bc += candidates[s][k].col; - } - } - } - } - - // Returns the one-away integer pel sad values around the best as follows: - // cost_list[0]: sad at the best integer pel - // cost_list[1]: sad at delta {0, -1} (left) from the best integer pel - // cost_list[2]: sad at delta { 1, 0} (bottom) from the best integer pel - // cost_list[3]: sad at delta { 0, 1} (right) from the best integer pel - // cost_list[4]: sad at delta {-1, 0} (top) from the best integer pel - if (cost_list) { - static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}}; - if (cost_list[0] == INT_MAX) { - cost_list[0] = bestsad; - if (check_bounds(x, br, bc, 1)) { - for (i = 0; i < 4; i++) { - const MV this_mv = { br + neighbors[i].row, - bc + neighbors[i].col }; - cost_list[i + 1] = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - } - } else { - for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - if (!is_mv_in(x, &this_mv)) - cost_list[i + 1] = INT_MAX; - else - cost_list[i + 1] = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); - } - } - } else { - if (use_mvcost) { - for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - if (cost_list[i + 1] != INT_MAX) { - cost_list[i + 1] += - mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); - } - } - } - } - } - best_mv->row = br; - best_mv->col = bc; - return bestsad; -} - -int vp10_get_mvpred_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost) { - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV mv = {best_mv->row * 8, best_mv->col * 8}; - unsigned int unused; - - return vfp->vf(what->buf, what->stride, - get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) + - (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, - x->mvcost, x->errorperbit) : 0); -} - -int vp10_get_mvpred_av_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const uint8_t *second_pred, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost) { - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV mv = {best_mv->row * 8, best_mv->col * 8}; - unsigned int unused; - - return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0, - what->buf, what->stride, &unused, second_pred) + - (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, - x->mvcost, x->errorperbit) : 0); -} - -int vp10_hex_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, MV *best_mv) { - // First scale has 8-closest points, the rest have 6 points in hex shape - // at increasing scales - static const int hex_num_candidates[MAX_PATTERN_SCALES] = { - 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 - }; - // Note that the largest candidate step at each scale is 2^scale - static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = { - {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}}, - {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}}, - {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}}, - {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}}, - {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}}, - {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}}, - {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}}, - {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}}, - {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}}, - {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}}, - {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024}, - { -1024, 0}}, - }; - return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - hex_num_candidates, hex_candidates); -} - -int vp10_bigdia_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { - // First scale has 4-closest points, the rest have 8 points in diamond - // shape at increasing scales - static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = { - 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - }; - // Note that the largest candidate step at each scale is 2^scale - static const MV bigdia_candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES] = { - {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}}, - {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}}, - {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}}, - {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}}, - {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}}, - {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32}, - {-16, 16}, {-32, 0}}, - {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64}, - {-32, 32}, {-64, 0}}, - {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128}, - {-64, 64}, {-128, 0}}, - {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256}, - {-128, 128}, {-256, 0}}, - {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512}, - {-256, 256}, {-512, 0}}, - {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024}, - {-512, 512}, {-1024, 0}}, - }; - return vp10_pattern_search_sad(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - bigdia_num_candidates, bigdia_candidates); -} - -int vp10_square_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { - // All scales have 8 closest points in square shape - static const int square_num_candidates[MAX_PATTERN_SCALES] = { - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - }; - // Note that the largest candidate step at each scale is 2^scale - static const MV square_candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES] = { - {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}}, - {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}}, - {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}}, - {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}}, - {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16}, - {-16, 16}, {-16, 0}}, - {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32}, - {-32, 32}, {-32, 0}}, - {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64}, - {-64, 64}, {-64, 0}}, - {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128}, - {-128, 128}, {-128, 0}}, - {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256}, - {-256, 256}, {-256, 0}}, - {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512}, - {-512, 512}, {-512, 0}}, - {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024}, - {0, 1024}, {-1024, 1024}, {-1024, 0}}, - }; - return vp10_pattern_search(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - square_num_candidates, square_candidates); -} - -int vp10_fast_hex_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, // must be zero for fast_hex - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { - return vp10_hex_search( - x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); -} - -int vp10_fast_dia_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { - return vp10_bigdia_search( - x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); -} - -#undef CHECK_BETTER - -// Exhuastive motion search around a given centre position with a given -// step size. -static int exhuastive_mesh_search(const MACROBLOCK *x, - MV *ref_mv, MV *best_mv, - int range, int step, int sad_per_bit, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv) { - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - MV fcenter_mv = {center_mv->row, center_mv->col}; - unsigned int best_sad = INT_MAX; - int r, c, i; - int start_col, end_col, start_row, end_row; - int col_step = (step > 1) ? step : 4; - - assert(step >= 1); - - clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, - x->mv_row_min, x->mv_row_max); - *best_mv = fcenter_mv; - best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) + - mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit); - start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row); - start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col); - end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row); - end_col = VPXMIN(range, x->mv_col_max - fcenter_mv.col); - - for (r = start_row; r <= end_row; r += step) { - for (c = start_col; c <= end_col; c += col_step) { - // Step > 1 means we are not checking every location in this pass. - if (step > 1) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c}; - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride); - if (sad < best_sad) { - sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - } else { - // 4 sads in a single call if we are checking every location - if (c + 3 <= end_col) { - unsigned int sads[4]; - const uint8_t *addrs[4]; - for (i = 0; i < 4; ++i) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; - addrs[i] = get_buf_from_mv(in_what, &mv); - } - fn_ptr->sdx4df(what->buf, what->stride, addrs, - in_what->stride, sads); - - for (i = 0; i < 4; ++i) { - if (sads[i] < best_sad) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; - const unsigned int sad = sads[i] + - mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - } - } else { - for (i = 0; i < end_col - c; ++i) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride); - if (sad < best_sad) { - sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - } - } - } - } - } - - return best_sad; -} - -int vp10_diamond_search_sad_c(const MACROBLOCK *x, - const search_site_config *cfg, - MV *ref_mv, MV *best_mv, int search_param, - int sad_per_bit, int *num00, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv) { - int i, j, step; - - const MACROBLOCKD *const xd = &x->e_mbd; - uint8_t *what = x->plane[0].src.buf; - const int what_stride = x->plane[0].src.stride; - const uint8_t *in_what; - const int in_what_stride = xd->plane[0].pre[0].stride; - const uint8_t *best_address; - - unsigned int bestsad = INT_MAX; - int best_site = 0; - int last_site = 0; - - int ref_row; - int ref_col; - - // search_param determines the length of the initial step and hence the number - // of iterations. - // 0 = initial step (MAX_FIRST_STEP) pel - // 1 = (MAX_FIRST_STEP/2) pel, - // 2 = (MAX_FIRST_STEP/4) pel... - const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step]; - const int tot_steps = (cfg->ss_count / cfg->searches_per_step) - search_param; - - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - ref_row = ref_mv->row; - ref_col = ref_mv->col; - *num00 = 0; - best_mv->row = ref_row; - best_mv->col = ref_col; - - // Work out the start point for the search - in_what = xd->plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; - best_address = in_what; - - // Check the starting position - bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) - + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit); - - i = 1; - - for (step = 0; step < tot_steps; step++) { - int all_in = 1, t; - - // All_in is true if every one of the points we are checking are within - // the bounds of the image. - all_in &= ((best_mv->row + ss[i].mv.row) > x->mv_row_min); - all_in &= ((best_mv->row + ss[i + 1].mv.row) < x->mv_row_max); - all_in &= ((best_mv->col + ss[i + 2].mv.col) > x->mv_col_min); - all_in &= ((best_mv->col + ss[i + 3].mv.col) < x->mv_col_max); - - // If all the pixels are within the bounds we don't check whether the - // search point is valid in this loop, otherwise we check each point - // for validity.. - if (all_in) { - unsigned int sad_array[4]; - - for (j = 0; j < cfg->searches_per_step; j += 4) { - unsigned char const *block_offset[4]; - - for (t = 0; t < 4; t++) - block_offset[t] = ss[i + t].offset + best_address; - - fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, - sad_array); - - for (t = 0; t < 4; t++, i++) { - if (sad_array[t] < bestsad) { - const MV this_mv = {best_mv->row + ss[i].mv.row, - best_mv->col + ss[i].mv.col}; - sad_array[t] += mvsad_err_cost(x, &this_mv, &fcenter_mv, - sad_per_bit); - if (sad_array[t] < bestsad) { - bestsad = sad_array[t]; - best_site = i; - } - } - } - } - } else { - for (j = 0; j < cfg->searches_per_step; j++) { - // Trap illegal vectors - const MV this_mv = {best_mv->row + ss[i].mv.row, - best_mv->col + ss[i].mv.col}; - - if (is_mv_in(x, &this_mv)) { - const uint8_t *const check_here = ss[i].offset + best_address; - unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, - in_what_stride); - - if (thissad < bestsad) { - thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); - if (thissad < bestsad) { - bestsad = thissad; - best_site = i; - } - } - } - i++; - } - } - if (best_site != last_site) { - best_mv->row += ss[best_site].mv.row; - best_mv->col += ss[best_site].mv.col; - best_address += ss[best_site].offset; - last_site = best_site; -#if defined(NEW_DIAMOND_SEARCH) - while (1) { - const MV this_mv = {best_mv->row + ss[best_site].mv.row, - best_mv->col + ss[best_site].mv.col}; - if (is_mv_in(x, &this_mv)) { - const uint8_t *const check_here = ss[best_site].offset + best_address; - unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, - in_what_stride); - if (thissad < bestsad) { - thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); - if (thissad < bestsad) { - bestsad = thissad; - best_mv->row += ss[best_site].mv.row; - best_mv->col += ss[best_site].mv.col; - best_address += ss[best_site].offset; - continue; - } - } - } - break; - } -#endif - } else if (best_address == in_what) { - (*num00)++; - } - } - return bestsad; -} - -static int vector_match(int16_t *ref, int16_t *src, int bwl) { - int best_sad = INT_MAX; - int this_sad; - int d; - int center, offset = 0; - int bw = 4 << bwl; // redundant variable, to be changed in the experiments. - for (d = 0; d <= bw; d += 16) { - this_sad = vpx_vector_var(&ref[d], src, bwl); - if (this_sad < best_sad) { - best_sad = this_sad; - offset = d; - } - } - center = offset; - - for (d = -8; d <= 8; d += 16) { - int this_pos = offset + d; - // check limit - if (this_pos < 0 || this_pos > bw) - continue; - this_sad = vpx_vector_var(&ref[this_pos], src, bwl); - if (this_sad < best_sad) { - best_sad = this_sad; - center = this_pos; - } - } - offset = center; - - for (d = -4; d <= 4; d += 8) { - int this_pos = offset + d; - // check limit - if (this_pos < 0 || this_pos > bw) - continue; - this_sad = vpx_vector_var(&ref[this_pos], src, bwl); - if (this_sad < best_sad) { - best_sad = this_sad; - center = this_pos; - } - } - offset = center; - - for (d = -2; d <= 2; d += 4) { - int this_pos = offset + d; - // check limit - if (this_pos < 0 || this_pos > bw) - continue; - this_sad = vpx_vector_var(&ref[this_pos], src, bwl); - if (this_sad < best_sad) { - best_sad = this_sad; - center = this_pos; - } - } - offset = center; - - for (d = -1; d <= 1; d += 2) { - int this_pos = offset + d; - // check limit - if (this_pos < 0 || this_pos > bw) - continue; - this_sad = vpx_vector_var(&ref[this_pos], src, bwl); - if (this_sad < best_sad) { - best_sad = this_sad; - center = this_pos; - } - } - - return (center - (bw >> 1)); -} - -static const MV search_pos[4] = { - {-1, 0}, {0, -1}, {0, 1}, {1, 0}, -}; - -unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int mi_row, int mi_col) { - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}}; - DECLARE_ALIGNED(16, int16_t, hbuf[128]); - DECLARE_ALIGNED(16, int16_t, vbuf[128]); - DECLARE_ALIGNED(16, int16_t, src_hbuf[64]); - DECLARE_ALIGNED(16, int16_t, src_vbuf[64]); - int idx; - const int bw = 4 << b_width_log2_lookup[bsize]; - const int bh = 4 << b_height_log2_lookup[bsize]; - const int search_width = bw << 1; - const int search_height = bh << 1; - const int src_stride = x->plane[0].src.stride; - const int ref_stride = xd->plane[0].pre[0].stride; - uint8_t const *ref_buf, *src_buf; - MV *tmp_mv = &xd->mi[0]->mbmi.mv[0].as_mv; - unsigned int best_sad, tmp_sad, this_sad[4]; - MV this_mv; - const int norm_factor = 3 + (bw >> 5); - const YV12_BUFFER_CONFIG *scaled_ref_frame = - vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]); - - if (scaled_ref_frame) { - int i; - // Swap out the reference frame for a version that's been scaled to - // match the resolution of the current frame, allowing the existing - // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[i] = xd->plane[i].pre[0]; - vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); - } - -#if CONFIG_VP9_HIGHBITDEPTH - { - unsigned int this_sad; - tmp_mv->row = 0; - tmp_mv->col = 0; - this_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, src_stride, - xd->plane[0].pre[0].buf, ref_stride); - - if (scaled_ref_frame) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; - } - return this_sad; - } -#endif - - // Set up prediction 1-D reference set - ref_buf = xd->plane[0].pre[0].buf - (bw >> 1); - for (idx = 0; idx < search_width; idx += 16) { - vpx_int_pro_row(&hbuf[idx], ref_buf, ref_stride, bh); - ref_buf += 16; - } - - ref_buf = xd->plane[0].pre[0].buf - (bh >> 1) * ref_stride; - for (idx = 0; idx < search_height; ++idx) { - vbuf[idx] = vpx_int_pro_col(ref_buf, bw) >> norm_factor; - ref_buf += ref_stride; - } - - // Set up src 1-D reference set - for (idx = 0; idx < bw; idx += 16) { - src_buf = x->plane[0].src.buf + idx; - vpx_int_pro_row(&src_hbuf[idx], src_buf, src_stride, bh); - } - - src_buf = x->plane[0].src.buf; - for (idx = 0; idx < bh; ++idx) { - src_vbuf[idx] = vpx_int_pro_col(src_buf, bw) >> norm_factor; - src_buf += src_stride; - } - - // Find the best match per 1-D search - tmp_mv->col = vector_match(hbuf, src_hbuf, b_width_log2_lookup[bsize]); - tmp_mv->row = vector_match(vbuf, src_vbuf, b_height_log2_lookup[bsize]); - - this_mv = *tmp_mv; - src_buf = x->plane[0].src.buf; - ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col; - best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride); - - { - const uint8_t * const pos[4] = { - ref_buf - ref_stride, - ref_buf - 1, - ref_buf + 1, - ref_buf + ref_stride, - }; - - cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad); - } - - for (idx = 0; idx < 4; ++idx) { - if (this_sad[idx] < best_sad) { - best_sad = this_sad[idx]; - tmp_mv->row = search_pos[idx].row + this_mv.row; - tmp_mv->col = search_pos[idx].col + this_mv.col; - } - } - - if (this_sad[0] < this_sad[3]) - this_mv.row -= 1; - else - this_mv.row += 1; - - if (this_sad[1] < this_sad[2]) - this_mv.col -= 1; - else - this_mv.col += 1; - - ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col; - - tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, - ref_buf, ref_stride); - if (best_sad > tmp_sad) { - *tmp_mv = this_mv; - best_sad = tmp_sad; - } - - tmp_mv->row *= 8; - tmp_mv->col *= 8; - - if (scaled_ref_frame) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; - } - - return best_sad; -} - -/* do_refine: If last step (1-away) of n-step search doesn't pick the center - point as the best match, we will do a final 1-away diamond - refining search */ -int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, - MV *mvp_full, int step_param, - int sadpb, int further_steps, int do_refine, - int *cost_list, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *ref_mv, MV *dst_mv) { - MV temp_mv; - int thissme, n, num00 = 0; - int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv, - step_param, sadpb, &n, - fn_ptr, ref_mv); - if (bestsme < INT_MAX) - bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); - *dst_mv = temp_mv; - - // If there won't be more n-step search, check to see if refining search is - // needed. - if (n > further_steps) - do_refine = 0; - - while (n < further_steps) { - ++n; - - if (num00) { - num00--; - } else { - thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv, - step_param + n, sadpb, &num00, - fn_ptr, ref_mv); - if (thissme < INT_MAX) - thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); - - // check to see if refining search is needed. - if (num00 > further_steps - n) - do_refine = 0; - - if (thissme < bestsme) { - bestsme = thissme; - *dst_mv = temp_mv; - } - } - } - - // final 1-away diamond refining search - if (do_refine) { - const int search_range = 8; - MV best_mv = *dst_mv; - thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, - fn_ptr, ref_mv); - if (thissme < INT_MAX) - thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1); - if (thissme < bestsme) { - bestsme = thissme; - *dst_mv = best_mv; - } - } - - // Return cost list. - if (cost_list) { - calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, dst_mv, cost_list); - } - return bestsme; -} - -#define MIN_RANGE 7 -#define MAX_RANGE 256 -#define MIN_INTERVAL 1 -// Runs an limited range exhaustive mesh search using a pattern set -// according to the encode speed profile. -static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x, - MV *centre_mv_full, int sadpb, int *cost_list, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *ref_mv, MV *dst_mv) { - const SPEED_FEATURES *const sf = &cpi->sf; - MV temp_mv = {centre_mv_full->row, centre_mv_full->col}; - MV f_ref_mv = {ref_mv->row >> 3, ref_mv->col >> 3}; - int bestsme; - int i; - int interval = sf->mesh_patterns[0].interval; - int range = sf->mesh_patterns[0].range; - int baseline_interval_divisor; - - // Keep track of number of exhaustive calls (this frame in this thread). - ++(*x->ex_search_count_ptr); - - // Trap illegal values for interval and range for this function. - if ((range < MIN_RANGE) || (range > MAX_RANGE) || - (interval < MIN_INTERVAL) || (interval > range)) - return INT_MAX; - - baseline_interval_divisor = range / interval; - - // Check size of proposed first range against magnitude of the centre - // value used as a starting point. - range = VPXMAX(range, (5 * VPXMAX(abs(temp_mv.row), abs(temp_mv.col))) / 4); - range = VPXMIN(range, MAX_RANGE); - interval = VPXMAX(interval, range / baseline_interval_divisor); - - // initial search - bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, - interval, sadpb, fn_ptr, &temp_mv); - - if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) { - // Progressive searches with range and step size decreasing each time - // till we reach a step size of 1. Then break out. - for (i = 1; i < MAX_MESH_STEP; ++i) { - // First pass with coarser step and longer range - bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, - sf->mesh_patterns[i].range, - sf->mesh_patterns[i].interval, - sadpb, fn_ptr, &temp_mv); - - if (sf->mesh_patterns[i].interval == 1) - break; - } - } - - if (bestsme < INT_MAX) - bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); - *dst_mv = temp_mv; - - // Return cost list. - if (cost_list) { - calc_int_cost_list(x, ref_mv, sadpb, fn_ptr, dst_mv, cost_list); - } - return bestsme; -} - -int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv, - int sad_per_bit, int distance, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, MV *best_mv) { - int r, c; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - *best_mv = *ref_mv; - - for (r = row_min; r < row_max; ++r) { - for (c = col_min; c < col_max; ++c) { - const MV mv = {r, c}; - const int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride) + - mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - } - return best_sad; -} - -int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, - int sad_per_bit, int distance, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, MV *best_mv) { - int r; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - *best_mv = *ref_mv; - - for (r = row_min; r < row_max; ++r) { - int c = col_min; - const uint8_t *check_here = &in_what->buf[r * in_what->stride + c]; - - if (fn_ptr->sdx3f != NULL) { - while ((c + 2) < col_max) { - int i; - DECLARE_ALIGNED(16, uint32_t, sads[3]); - - fn_ptr->sdx3f(what->buf, what->stride, check_here, in_what->stride, - sads); - - for (i = 0; i < 3; ++i) { - unsigned int sad = sads[i]; - if (sad < best_sad) { - const MV mv = {r, c}; - sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - ++check_here; - ++c; - } - } - } - - while (c < col_max) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - check_here, in_what->stride); - if (sad < best_sad) { - const MV mv = {r, c}; - sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - ++check_here; - ++c; - } - } - - return best_sad; -} - -int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, - int sad_per_bit, int distance, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, MV *best_mv) { - int r; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - *best_mv = *ref_mv; - - for (r = row_min; r < row_max; ++r) { - int c = col_min; - const uint8_t *check_here = &in_what->buf[r * in_what->stride + c]; - - if (fn_ptr->sdx8f != NULL) { - while ((c + 7) < col_max) { - int i; - DECLARE_ALIGNED(16, uint32_t, sads[8]); - - fn_ptr->sdx8f(what->buf, what->stride, check_here, in_what->stride, - sads); - - for (i = 0; i < 8; ++i) { - unsigned int sad = sads[i]; - if (sad < best_sad) { - const MV mv = {r, c}; - sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - ++check_here; - ++c; - } - } - } - - if (fn_ptr->sdx3f != NULL) { - while ((c + 2) < col_max) { - int i; - DECLARE_ALIGNED(16, uint32_t, sads[3]); - - fn_ptr->sdx3f(what->buf, what->stride, check_here, in_what->stride, - sads); - - for (i = 0; i < 3; ++i) { - unsigned int sad = sads[i]; - if (sad < best_sad) { - const MV mv = {r, c}; - sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - ++check_here; - ++c; - } - } - } - - while (c < col_max) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - check_here, in_what->stride); - if (sad < best_sad) { - const MV mv = {r, c}; - sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); - if (sad < best_sad) { - best_sad = sad; - *best_mv = mv; - } - } - ++check_here; - ++c; - } - } - - return best_sad; -} - -int vp10_refining_search_sad(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, - int search_range, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv) { - const MACROBLOCKD *const xd = &x->e_mbd; - const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}}; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv); - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, best_address, - in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit); - int i, j; - - for (i = 0; i < search_range; i++) { - int best_site = -1; - const int all_in = ((ref_mv->row - 1) > x->mv_row_min) & - ((ref_mv->row + 1) < x->mv_row_max) & - ((ref_mv->col - 1) > x->mv_col_min) & - ((ref_mv->col + 1) < x->mv_col_max); - - if (all_in) { - unsigned int sads[4]; - const uint8_t *const positions[4] = { - best_address - in_what->stride, - best_address - 1, - best_address + 1, - best_address + in_what->stride - }; - - fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads); - - for (j = 0; j < 4; ++j) { - if (sads[j] < best_sad) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; - sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); - if (sads[j] < best_sad) { - best_sad = sads[j]; - best_site = j; - } - } - } - } else { - for (j = 0; j < 4; ++j) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; - - if (is_mv_in(x, &mv)) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), - in_what->stride); - if (sad < best_sad) { - sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); - if (sad < best_sad) { - best_sad = sad; - best_site = j; - } - } - } - } - } - - if (best_site == -1) { - break; - } else { - ref_mv->row += neighbors[best_site].row; - ref_mv->col += neighbors[best_site].col; - best_address = get_buf_from_mv(in_what, ref_mv); - } - } - - return best_sad; -} - -// This function is called when we do joint motion search in comp_inter_inter -// mode. -int vp10_refining_search_8p_c(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, - int search_range, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, - const uint8_t *second_pred) { - const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}, - {-1, -1}, {1, -1}, {-1, 1}, {1, 1}}; - const MACROBLOCKD *const xd = &x->e_mbd; - const struct buf_2d *const what = &x->plane[0].src; - const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdaf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride, second_pred) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit); - int i, j; - - for (i = 0; i < search_range; ++i) { - int best_site = -1; - - for (j = 0; j < 8; ++j) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; - - if (is_mv_in(x, &mv)) { - unsigned int sad = fn_ptr->sdaf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride, second_pred); - if (sad < best_sad) { - sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); - if (sad < best_sad) { - best_sad = sad; - best_site = j; - } - } - } - } - - if (best_site == -1) { - break; - } else { - ref_mv->row += neighbors[best_site].row; - ref_mv->col += neighbors[best_site].col; - } - } - return best_sad; -} - -#define MIN_EX_SEARCH_LIMIT 128 -static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) { - const SPEED_FEATURES *const sf = &cpi->sf; - const int max_ex = VPXMAX(MIN_EX_SEARCH_LIMIT, - (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100); - - return sf->allow_exhaustive_searches && - (sf->exhaustive_searches_thresh < INT_MAX) && - (*x->ex_search_count_ptr <= max_ex) && - !cpi->rc.is_src_frame_alt_ref; -} - -int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, MV *mvp_full, - int step_param, int error_per_bit, - int *cost_list, - const MV *ref_mv, MV *tmp_mv, - int var_max, int rd) { - const SPEED_FEATURES *const sf = &cpi->sf; - const SEARCH_METHODS method = sf->mv.search_method; - vp9_variance_fn_ptr_t *fn_ptr = &cpi->fn_ptr[bsize]; - int var = 0; - if (cost_list) { - cost_list[0] = INT_MAX; - cost_list[1] = INT_MAX; - cost_list[2] = INT_MAX; - cost_list[3] = INT_MAX; - cost_list[4] = INT_MAX; - } - - // Keep track of number of searches (this frame in this thread). - ++(*x->m_search_count_ptr); - - switch (method) { - case FAST_DIAMOND: - var = vp10_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); - break; - case FAST_HEX: - var = vp10_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); - break; - case HEX: - var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); - break; - case SQUARE: - var = vp10_square_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); - break; - case BIGDIA: - var = vp10_bigdia_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); - break; - case NSTEP: - var = vp10_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit, - MAX_MVSEARCH_STEPS - 1 - step_param, - 1, cost_list, fn_ptr, ref_mv, tmp_mv); - - // Should we allow a follow on exhaustive search? - if (is_exhaustive_allowed(cpi, x)) { - int64_t exhuastive_thr = sf->exhaustive_searches_thresh; - exhuastive_thr >>= 8 - (b_width_log2_lookup[bsize] + - b_height_log2_lookup[bsize]); - - // Threshold variance for an exhaustive full search. - if (var > exhuastive_thr) { - int var_ex; - MV tmp_mv_ex; - var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, - error_per_bit, cost_list, fn_ptr, - ref_mv, &tmp_mv_ex); - - if (var_ex < var) { - var = var_ex; - *tmp_mv = tmp_mv_ex; - } - } - } - break; - - break; - default: - assert(0 && "Invalid search method."); - } - - if (method != NSTEP && rd && var < var_max) - var = vp10_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1); - - return var; -} diff --git a/libs/libvpx/vp10/encoder/mcomp.h b/libs/libvpx/vp10/encoder/mcomp.h deleted file mode 100644 index 9d1ab2aabe..0000000000 --- a/libs/libvpx/vp10/encoder/mcomp.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_MCOMP_H_ -#define VP10_ENCODER_MCOMP_H_ - -#include "vp10/encoder/block.h" -#include "vpx_dsp/variance.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// The maximum number of steps in a step search given the largest -// allowed initial step -#define MAX_MVSEARCH_STEPS 11 -// Max full pel mv specified in the unit of full pixel -// Enable the use of motion vector in range [-1023, 1023]. -#define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1) -// Maximum size of the first step in full pel units -#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) -// Allowed motion vector pixel distance outside image border -// for Block_16x16 -#define BORDER_MV_PIXELS_B16 (16 + VP9_INTERP_EXTEND) - -// motion search site -typedef struct search_site { - MV mv; - int offset; -} search_site; - -typedef struct search_site_config { - search_site ss[8 * MAX_MVSEARCH_STEPS + 1]; - int ss_count; - int searches_per_step; -} search_site_config; - -void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride); -void vp10_init3smotion_compensation(search_site_config *cfg, int stride); - -void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv); -int vp10_mv_bit_cost(const MV *mv, const MV *ref, - const int *mvjcost, int *mvcost[2], int weight); - -// Utility to compute variance + MV rate cost for a given MV -int vp10_get_mvpred_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost); -int vp10_get_mvpred_av_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const uint8_t *second_pred, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost); - -struct VP10_COMP; -struct SPEED_FEATURES; - -int vp10_init_search_range(int size); - -int vp10_refining_search_sad(const struct macroblock *x, - struct mv *ref_mv, - int sad_per_bit, int distance, - const struct vp9_variance_vtable *fn_ptr, - const struct mv *center_mv); - -// Runs sequence of diamond searches in smaller steps for RD. -int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x, - MV *mvp_full, int step_param, - int sadpb, int further_steps, int do_refine, - int *cost_list, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *ref_mv, MV *dst_mv); - -// Perform integral projection based motion estimation. -unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi, - MACROBLOCK *x, - BLOCK_SIZE bsize, - int mi_row, int mi_col); - -typedef int (integer_mv_pattern_search_fn) ( - const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int error_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vf, - int use_mvcost, - const MV *center_mv, - MV *best_mv); - -integer_mv_pattern_search_fn vp10_hex_search; -integer_mv_pattern_search_fn vp10_bigdia_search; -integer_mv_pattern_search_fn vp10_square_search; -integer_mv_pattern_search_fn vp10_fast_hex_search; -integer_mv_pattern_search_fn vp10_fast_dia_search; - -typedef int (fractional_mv_step_fp) ( - const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, // 0 - full, 1 - qtr only, 2 - half only - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, unsigned int *sse1, - const uint8_t *second_pred, - int w, int h); - -extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree; -extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned; -extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more; -extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore; - -typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, - const MV *ref_mv, int sad_per_bit, - int distance, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, MV *best_mv); - -typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x, - MV *ref_mv, int sad_per_bit, - int distance, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv); - -typedef int (*vp10_diamond_search_fn_t)(const MACROBLOCK *x, - const search_site_config *cfg, - MV *ref_mv, MV *best_mv, - int search_param, int sad_per_bit, - int *num00, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv); - -int vp10_refining_search_8p_c(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, - int search_range, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, const uint8_t *second_pred); - -struct VP10_COMP; - -int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, MV *mvp_full, - int step_param, int error_per_bit, - int *cost_list, - const MV *ref_mv, MV *tmp_mv, - int var_max, int rd); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_MCOMP_H_ diff --git a/libs/libvpx/vp10/encoder/mips/msa/error_msa.c b/libs/libvpx/vp10/encoder/mips/msa/error_msa.c deleted file mode 100644 index dacca32c05..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/error_msa.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vp10_rtcd.h" -#include "vpx_dsp/mips/macros_msa.h" - -#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \ -static int64_t block_error_##BSize##size_msa(const int16_t *coeff_ptr, \ - const int16_t *dq_coeff_ptr, \ - int64_t *ssz) { \ - int64_t err = 0; \ - uint32_t loop_cnt; \ - v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \ - v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \ - v2i64 sq_coeff_r, sq_coeff_l; \ - v2i64 err0, err_dup0, err1, err_dup1; \ - \ - coeff = LD_SH(coeff_ptr); \ - dq_coeff = LD_SH(dq_coeff_ptr); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, \ - sq_coeff_r, sq_coeff_l); \ - DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \ - \ - coeff = LD_SH(coeff_ptr + 8); \ - dq_coeff = LD_SH(dq_coeff_ptr + 8); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff_ptr += 16; \ - dq_coeff_ptr += 16; \ - \ - for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \ - coeff = LD_SH(coeff_ptr); \ - dq_coeff = LD_SH(dq_coeff_ptr); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff = LD_SH(coeff_ptr + 8); \ - dq_coeff = LD_SH(dq_coeff_ptr + 8); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff_ptr += 16; \ - dq_coeff_ptr += 16; \ - } \ - \ - err_dup0 = __msa_splati_d(sq_coeff_r, 1); \ - err_dup1 = __msa_splati_d(sq_coeff_l, 1); \ - sq_coeff_r += err_dup0; \ - sq_coeff_l += err_dup1; \ - *ssz = __msa_copy_s_d(sq_coeff_r, 0); \ - *ssz += __msa_copy_s_d(sq_coeff_l, 0); \ - \ - err_dup0 = __msa_splati_d(err0, 1); \ - err_dup1 = __msa_splati_d(err1, 1); \ - err0 += err_dup0; \ - err1 += err_dup1; \ - err = __msa_copy_s_d(err0, 0); \ - err += __msa_copy_s_d(err1, 0); \ - \ - return err; \ -} - -BLOCK_ERROR_BLOCKSIZE_MSA(16); -BLOCK_ERROR_BLOCKSIZE_MSA(64); -BLOCK_ERROR_BLOCKSIZE_MSA(256); -BLOCK_ERROR_BLOCKSIZE_MSA(1024); - -int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr, - const tran_low_t *dq_coeff_ptr, - intptr_t blk_size, int64_t *ssz) { - int64_t err; - const int16_t *coeff = (const int16_t *)coeff_ptr; - const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr; - - switch (blk_size) { - case 16: - err = block_error_16size_msa(coeff, dq_coeff, ssz); - break; - case 64: - err = block_error_64size_msa(coeff, dq_coeff, ssz); - break; - case 256: - err = block_error_256size_msa(coeff, dq_coeff, ssz); - break; - case 1024: - err = block_error_1024size_msa(coeff, dq_coeff, ssz); - break; - default: - err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz); - break; - } - - return err; -} diff --git a/libs/libvpx/vp10/encoder/mips/msa/fdct16x16_msa.c b/libs/libvpx/vp10/encoder/mips/msa/fdct16x16_msa.c deleted file mode 100644 index d78fc6473e..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/fdct16x16_msa.c +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vp10/encoder/mips/msa/fdct_msa.h" -#include "vpx_dsp/mips/fwd_txfm_msa.h" - -static void fadst16_cols_step1_msa(const int16_t *input, int32_t stride, - const int32_t *const0, int16_t *int_buf) { - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3; - v4i32 k0, k1, k2, k3; - - /* load input data */ - r0 = LD_SH(input); - r15 = LD_SH(input + 15 * stride); - r7 = LD_SH(input + 7 * stride); - r8 = LD_SH(input + 8 * stride); - SLLI_4V(r0, r15, r7, r8, 2); - - /* stage 1 */ - LD_SW2(const0, 4, k0, k1); - LD_SW2(const0 + 8, 4, k2, k3); - MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3); - - r3 = LD_SH(input + 3 * stride); - r4 = LD_SH(input + 4 * stride); - r11 = LD_SH(input + 11 * stride); - r12 = LD_SH(input + 12 * stride); - SLLI_4V(r3, r4, r11, r12, 2); - - LD_SW2(const0 + 4 * 4, 4, k0, k1); - LD_SW2(const0 + 4 * 6, 4, k2, k3); - MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11); - - /* stage 2 */ - BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1); - ST_SH2(tp0, tp2, int_buf, 8); - ST_SH2(tp1, tp3, int_buf + 4 * 8, 8); - - LD_SW2(const0 + 4 * 8, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 10); - MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3); - - ST_SH2(h0, h1, int_buf + 8 * 8, 8); - ST_SH2(h3, h2, int_buf + 12 * 8, 8); - - r9 = LD_SH(input + 9 * stride); - r6 = LD_SH(input + 6 * stride); - r1 = LD_SH(input + stride); - r14 = LD_SH(input + 14 * stride); - SLLI_4V(r9, r6, r1, r14, 2); - - LD_SW2(const0 + 4 * 11, 4, k0, k1); - LD_SW2(const0 + 4 * 13, 4, k2, k3); - MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3); - - ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8); - - r13 = LD_SH(input + 13 * stride); - r2 = LD_SH(input + 2 * stride); - r5 = LD_SH(input + 5 * stride); - r10 = LD_SH(input + 10 * stride); - SLLI_4V(r13, r2, r5, r10, 2); - - LD_SW2(const0 + 4 * 15, 4, k0, k1); - LD_SW2(const0 + 4 * 17, 4, k2, k3); - MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3); - - ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8); - - BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3); - ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8); -} - -static void fadst16_cols_step2_msa(int16_t *int_buf, const int32_t *const0, - int16_t *out) { - int16_t *out_ptr = out + 128; - v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15; - v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 out8, out9, out10, out11, out12, out13, out14, out15; - v4i32 k0, k1, k2, k3; - - LD_SH2(int_buf + 3 * 8, 4 * 8, g13, g15); - LD_SH2(int_buf + 11 * 8, 4 * 8, g5, g7); - LD_SW2(const0 + 4 * 19, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 21); - MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7); - - tp0 = LD_SH(int_buf + 4 * 8); - tp1 = LD_SH(int_buf + 5 * 8); - tp3 = LD_SH(int_buf + 10 * 8); - tp2 = LD_SH(int_buf + 14 * 8); - LD_SW2(const0 + 4 * 22, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 24); - MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7); - out4 = -out4; - ST_SH(out4, (out + 3 * 16)); - ST_SH(out5, (out_ptr + 4 * 16)); - - h1 = LD_SH(int_buf + 9 * 8); - h3 = LD_SH(int_buf + 12 * 8); - MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15); - out13 = -out13; - ST_SH(out12, (out + 2 * 16)); - ST_SH(out13, (out_ptr + 5 * 16)); - - tp0 = LD_SH(int_buf); - tp1 = LD_SH(int_buf + 8); - tp2 = LD_SH(int_buf + 2 * 8); - tp3 = LD_SH(int_buf + 6 * 8); - - BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10); - out1 = -out1; - ST_SH(out0, (out)); - ST_SH(out1, (out_ptr + 7 * 16)); - - h0 = LD_SH(int_buf + 8 * 8); - h2 = LD_SH(int_buf + 13 * 8); - - BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10); - out8 = -out8; - ST_SH(out8, (out + 16)); - ST_SH(out9, (out_ptr + 6 * 16)); - - /* stage 4 */ - LD_SW2(const0 + 4 * 25, 4, k0, k1); - LD_SW2(const0 + 4 * 27, 4, k2, k3); - MADD_SHORT(h10, h11, k1, k2, out2, out3); - ST_SH(out2, (out + 7 * 16)); - ST_SH(out3, (out_ptr)); - - MADD_SHORT(out6, out7, k0, k3, out6, out7); - ST_SH(out6, (out + 4 * 16)); - ST_SH(out7, (out_ptr + 3 * 16)); - - MADD_SHORT(out10, out11, k0, k3, out10, out11); - ST_SH(out10, (out + 6 * 16)); - ST_SH(out11, (out_ptr + 16)); - - MADD_SHORT(out14, out15, k1, k2, out14, out15); - ST_SH(out14, (out + 5 * 16)); - ST_SH(out15, (out_ptr + 2 * 16)); -} - -static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) { - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; - - /* load input data */ - LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - FDCT_POSTPROC_2V_NEG_H(r0, r1); - FDCT_POSTPROC_2V_NEG_H(r2, r3); - FDCT_POSTPROC_2V_NEG_H(r4, r5); - FDCT_POSTPROC_2V_NEG_H(r6, r7); - ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); - out += 64; - - LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); - FDCT_POSTPROC_2V_NEG_H(r8, r9); - FDCT_POSTPROC_2V_NEG_H(r10, r11); - FDCT_POSTPROC_2V_NEG_H(r12, r13); - FDCT_POSTPROC_2V_NEG_H(r14, r15); - ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); - out += 64; - - /* load input data */ - input += 128; - LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - FDCT_POSTPROC_2V_NEG_H(r0, r1); - FDCT_POSTPROC_2V_NEG_H(r2, r3); - FDCT_POSTPROC_2V_NEG_H(r4, r5); - FDCT_POSTPROC_2V_NEG_H(r6, r7); - ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); - out += 64; - - LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); - FDCT_POSTPROC_2V_NEG_H(r8, r9); - FDCT_POSTPROC_2V_NEG_H(r10, r11); - FDCT_POSTPROC_2V_NEG_H(r12, r13); - FDCT_POSTPROC_2V_NEG_H(r14, r15); - ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); -} - -static void fadst16_rows_step1_msa(int16_t *input, const int32_t *const0, - int16_t *int_buf) { - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 tp0, tp1, tp2, tp3, g0, g1, g2, g3, g8, g9, g10, g11, h0, h1, h2, h3; - v4i32 k0, k1, k2, k3; - - /* load input data */ - r0 = LD_SH(input); - r7 = LD_SH(input + 7 * 8); - r8 = LD_SH(input + 8 * 8); - r15 = LD_SH(input + 15 * 8); - - /* stage 1 */ - LD_SW2(const0, 4, k0, k1); - LD_SW2(const0 + 4 * 2, 4, k2, k3); - MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3); - - r3 = LD_SH(input + 3 * 8); - r4 = LD_SH(input + 4 * 8); - r11 = LD_SH(input + 11 * 8); - r12 = LD_SH(input + 12 * 8); - - LD_SW2(const0 + 4 * 4, 4, k0, k1); - LD_SW2(const0 + 4 * 6, 4, k2, k3); - MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11); - - /* stage 2 */ - BUTTERFLY_4(g0, g2, g10, g8, tp0, tp2, tp3, tp1); - ST_SH2(tp0, tp1, int_buf, 4 * 8); - ST_SH2(tp2, tp3, int_buf + 8, 4 * 8); - - LD_SW2(const0 + 4 * 8, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 10); - MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3); - ST_SH2(h0, h3, int_buf + 8 * 8, 4 * 8); - ST_SH2(h1, h2, int_buf + 9 * 8, 4 * 8); - - r1 = LD_SH(input + 8); - r6 = LD_SH(input + 6 * 8); - r9 = LD_SH(input + 9 * 8); - r14 = LD_SH(input + 14 * 8); - - LD_SW2(const0 + 4 * 11, 4, k0, k1); - LD_SW2(const0 + 4 * 13, 4, k2, k3); - MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g0, g1, g2, g3); - ST_SH2(g1, g3, int_buf + 3 * 8, 4 * 8); - - r2 = LD_SH(input + 2 * 8); - r5 = LD_SH(input + 5 * 8); - r10 = LD_SH(input + 10 * 8); - r13 = LD_SH(input + 13 * 8); - - LD_SW2(const0 + 4 * 15, 4, k0, k1); - LD_SW2(const0 + 4 * 17, 4, k2, k3); - MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, h0, h1, h2, h3); - ST_SH2(h1, h3, int_buf + 11 * 8, 4 * 8); - BUTTERFLY_4(h0, h2, g2, g0, tp0, tp1, tp2, tp3); - ST_SH4(tp0, tp1, tp2, tp3, int_buf + 2 * 8, 4 * 8); -} - -static void fadst16_rows_step2_msa(int16_t *int_buf, const int32_t *const0, - int16_t *out) { - int16_t *out_ptr = out + 8; - v8i16 tp0, tp1, tp2, tp3, g5, g7, g13, g15; - v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h10, h11; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 out8, out9, out10, out11, out12, out13, out14, out15; - v4i32 k0, k1, k2, k3; - - g13 = LD_SH(int_buf + 3 * 8); - g15 = LD_SH(int_buf + 7 * 8); - g5 = LD_SH(int_buf + 11 * 8); - g7 = LD_SH(int_buf + 15 * 8); - - LD_SW2(const0 + 4 * 19, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 21); - MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7); - - tp0 = LD_SH(int_buf + 4 * 8); - tp1 = LD_SH(int_buf + 5 * 8); - tp3 = LD_SH(int_buf + 10 * 8); - tp2 = LD_SH(int_buf + 14 * 8); - - LD_SW2(const0 + 4 * 22, 4, k0, k1); - k2 = LD_SW(const0 + 4 * 24); - MADD_BF(tp0, tp1, tp2, tp3, k0, k1, k2, k0, out4, out6, out5, out7); - out4 = -out4; - ST_SH(out4, (out + 3 * 16)); - ST_SH(out5, (out_ptr + 4 * 16)); - - h1 = LD_SH(int_buf + 9 * 8); - h3 = LD_SH(int_buf + 12 * 8); - MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15); - out13 = -out13; - ST_SH(out12, (out + 2 * 16)); - ST_SH(out13, (out_ptr + 5 * 16)); - - tp0 = LD_SH(int_buf); - tp1 = LD_SH(int_buf + 8); - tp2 = LD_SH(int_buf + 2 * 8); - tp3 = LD_SH(int_buf + 6 * 8); - - BUTTERFLY_4(tp0, tp1, tp3, tp2, out0, out1, h11, h10); - out1 = -out1; - ST_SH(out0, (out)); - ST_SH(out1, (out_ptr + 7 * 16)); - - h0 = LD_SH(int_buf + 8 * 8); - h2 = LD_SH(int_buf + 13 * 8); - BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10); - out8 = -out8; - ST_SH(out8, (out + 16)); - ST_SH(out9, (out_ptr + 6 * 16)); - - /* stage 4 */ - LD_SW2(const0 + 4 * 25, 4, k0, k1); - LD_SW2(const0 + 4 * 27, 4, k2, k3); - MADD_SHORT(h10, h11, k1, k2, out2, out3); - ST_SH(out2, (out + 7 * 16)); - ST_SH(out3, (out_ptr)); - - MADD_SHORT(out6, out7, k0, k3, out6, out7); - ST_SH(out6, (out + 4 * 16)); - ST_SH(out7, (out_ptr + 3 * 16)); - - MADD_SHORT(out10, out11, k0, k3, out10, out11); - ST_SH(out10, (out + 6 * 16)); - ST_SH(out11, (out_ptr + 16)); - - MADD_SHORT(out14, out15, k1, k2, out14, out15); - ST_SH(out14, (out + 5 * 16)); - ST_SH(out15, (out_ptr + 2 * 16)); -} - -static void fadst16_transpose_msa(int16_t *input, int16_t *out) { - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; - - /* load input data */ - LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, - l4, l12, l5, l13, l6, l14, l7, l15); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); - ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); - ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); - out += 16 * 8; - - /* load input data */ - input += 128; - LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, - l4, l12, l5, l13, l6, l14, l7, l15); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); - ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); - ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); -} - -static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) { - int16_t *temp = intermediate; - int16_t *out = output; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; - v8i16 in12, in13, in14, in15; - - LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7); - temp = intermediate + 8; - LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, - in8, in9, in10, in11, in12, in13, in14, in15); - FDCT_POSTPROC_2V_NEG_H(in0, in1); - FDCT_POSTPROC_2V_NEG_H(in2, in3); - FDCT_POSTPROC_2V_NEG_H(in4, in5); - FDCT_POSTPROC_2V_NEG_H(in6, in7); - FDCT_POSTPROC_2V_NEG_H(in8, in9); - FDCT_POSTPROC_2V_NEG_H(in10, in11); - FDCT_POSTPROC_2V_NEG_H(in12, in13); - FDCT_POSTPROC_2V_NEG_H(in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - in8, in9, in10, in11, in12, in13, in14, in15); - temp = intermediate; - ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); - FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); - temp = intermediate; - LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); - FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, - tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3); - ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16); - TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, - tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7); - out = output + 8; - ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16); -} - -void vp10_fht16x16_msa(const int16_t *input, int16_t *output, - int32_t stride, int32_t tx_type) { - DECLARE_ALIGNED(32, int16_t, tmp[256]); - DECLARE_ALIGNED(32, int16_t, trans_buf[256]); - DECLARE_ALIGNED(32, int16_t, tmp_buf[128]); - int32_t i; - int16_t *ptmpbuf = &tmp_buf[0]; - int16_t *trans = &trans_buf[0]; - const int32_t const_arr[29 * 4] = { - 52707308, 52707308, 52707308, 52707308, - -1072430300, -1072430300, -1072430300, -1072430300, - 795618043, 795618043, 795618043, 795618043, - -721080468, -721080468, -721080468, -721080468, - 459094491, 459094491, 459094491, 459094491, - -970646691, -970646691, -970646691, -970646691, - 1010963856, 1010963856, 1010963856, 1010963856, - -361743294, -361743294, -361743294, -361743294, - 209469125, 209469125, 209469125, 209469125, - -1053094788, -1053094788, -1053094788, -1053094788, - 1053160324, 1053160324, 1053160324, 1053160324, - 639644520, 639644520, 639644520, 639644520, - -862444000, -862444000, -862444000, -862444000, - 1062144356, 1062144356, 1062144356, 1062144356, - -157532337, -157532337, -157532337, -157532337, - 260914709, 260914709, 260914709, 260914709, - -1041559667, -1041559667, -1041559667, -1041559667, - 920985831, 920985831, 920985831, 920985831, - -551995675, -551995675, -551995675, -551995675, - 596522295, 596522295, 596522295, 596522295, - 892853362, 892853362, 892853362, 892853362, - -892787826, -892787826, -892787826, -892787826, - 410925857, 410925857, 410925857, 410925857, - -992012162, -992012162, -992012162, -992012162, - 992077698, 992077698, 992077698, 992077698, - 759246145, 759246145, 759246145, 759246145, - -759180609, -759180609, -759180609, -759180609, - -759222975, -759222975, -759222975, -759222975, - 759288511, 759288511, 759288511, 759288511 }; - - switch (tx_type) { - case DCT_DCT: - /* column transform */ - for (i = 0; i < 2; ++i) { - fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride); - } - - /* row transform */ - for (i = 0; i < 2; ++i) { - fdct16x8_1d_row(tmp + (128 * i), output + (128 * i)); - } - break; - case ADST_DCT: - /* column transform */ - for (i = 0; i < 2; ++i) { - fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf); - fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3)); - } - - /* row transform */ - for (i = 0; i < 2; ++i) { - postproc_fdct16x8_1d_row(tmp + (128 * i), output + (128 * i)); - } - break; - case DCT_ADST: - /* column transform */ - for (i = 0; i < 2; ++i) { - fdct8x16_1d_column(input + 8 * i, tmp + 8 * i, stride); - } - - fadst16_transpose_postproc_msa(tmp, trans); - - /* row transform */ - for (i = 0; i < 2; ++i) { - fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf); - fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7)); - } - - fadst16_transpose_msa(tmp, output); - break; - case ADST_ADST: - /* column transform */ - for (i = 0; i < 2; ++i) { - fadst16_cols_step1_msa(input + (i << 3), stride, const_arr, ptmpbuf); - fadst16_cols_step2_msa(ptmpbuf, const_arr, tmp + (i << 3)); - } - - fadst16_transpose_postproc_msa(tmp, trans); - - /* row transform */ - for (i = 0; i < 2; ++i) { - fadst16_rows_step1_msa(trans + (i << 7), const_arr, ptmpbuf); - fadst16_rows_step2_msa(ptmpbuf, const_arr, tmp + (i << 7)); - } - - fadst16_transpose_msa(tmp, output); - break; - default: - assert(0); - break; - } -} diff --git a/libs/libvpx/vp10/encoder/mips/msa/fdct4x4_msa.c b/libs/libvpx/vp10/encoder/mips/msa/fdct4x4_msa.c deleted file mode 100644 index 37269f0a43..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/fdct4x4_msa.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vp10/encoder/mips/msa/fdct_msa.h" - -void vp10_fwht4x4_msa(const int16_t *input, int16_t *output, - int32_t src_stride) { - v8i16 in0, in1, in2, in3, in4; - - LD_SH4(input, src_stride, in0, in1, in2, in3); - - in0 += in1; - in3 -= in2; - in4 = (in0 - in3) >> 1; - SUB2(in4, in1, in4, in2, in1, in2); - in0 -= in2; - in3 += in1; - - TRANSPOSE4x4_SH_SH(in0, in2, in3, in1, in0, in2, in3, in1); - - in0 += in2; - in1 -= in3; - in4 = (in0 - in1) >> 1; - SUB2(in4, in2, in4, in3, in2, in3); - in0 -= in3; - in1 += in2; - - SLLI_4V(in0, in1, in2, in3, 2); - - TRANSPOSE4x4_SH_SH(in0, in3, in1, in2, in0, in3, in1, in2); - - ST4x2_UB(in0, output, 4); - ST4x2_UB(in3, output + 4, 4); - ST4x2_UB(in1, output + 8, 4); - ST4x2_UB(in2, output + 12, 4); -} - -void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride, - int32_t tx_type) { - v8i16 in0, in1, in2, in3; - - LD_SH4(input, stride, in0, in1, in2, in3); - - /* fdct4 pre-process */ - { - v8i16 temp, mask; - v16i8 zero = { 0 }; - v16i8 one = __msa_ldi_b(1); - - mask = (v8i16)__msa_sldi_b(zero, one, 15); - SLLI_4V(in0, in1, in2, in3, 4); - temp = __msa_ceqi_h(in0, 0); - temp = (v8i16)__msa_xori_b((v16u8)temp, 255); - temp = mask & temp; - in0 += temp; - } - - switch (tx_type) { - case DCT_DCT: - VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case ADST_DCT: - VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case DCT_ADST: - VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - case ADST_ADST: - VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3); - break; - default: - assert(0); - break; - } - - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); - SRA_4V(in0, in1, in2, in3, 2); - PCKEV_D2_SH(in1, in0, in3, in2, in0, in2); - ST_SH2(in0, in2, output, 8); -} diff --git a/libs/libvpx/vp10/encoder/mips/msa/fdct8x8_msa.c b/libs/libvpx/vp10/encoder/mips/msa/fdct8x8_msa.c deleted file mode 100644 index 4283eb946c..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/fdct8x8_msa.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/common/enums.h" -#include "vp10/encoder/mips/msa/fdct_msa.h" - -void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride, - int32_t tx_type) { - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - - LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); - SLLI_4V(in0, in1, in2, in3, 2); - SLLI_4V(in4, in5, in6, in7, 2); - - switch (tx_type) { - case DCT_DCT: - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case ADST_DCT: - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case DCT_ADST: - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - case ADST_ADST: - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - default: - assert(0); - break; - } - - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7); - ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); -} diff --git a/libs/libvpx/vp10/encoder/mips/msa/fdct_msa.h b/libs/libvpx/vp10/encoder/mips/msa/fdct_msa.h deleted file mode 100644 index d7d40cb72c..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/fdct_msa.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ -#define VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ - -#include "vpx_dsp/mips/fwd_txfm_msa.h" -#include "vpx_dsp/mips/txfm_macros_msa.h" -#include "vpx_ports/mem.h" - -#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ - v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ - cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ - v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ - cospi_24_64, -cospi_24_64, 0, 0 }; \ - \ - SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ - ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in7, in0, \ - in4, in3); \ - \ - SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in5, in2, \ - in6, in1); \ - BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ - out7 = -s0_m; \ - out0 = s1_m; \ - \ - SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ - \ - ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - cnst1_m = cnst0_m; \ - \ - ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst2_m, cnst3_m, cnst1_m, out1, out6, \ - s0_m, s1_m); \ - \ - SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ - \ - out1 = -out1; \ - out3 = -out3; \ - out5 = -out5; \ -} - -#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \ - v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \ - \ - UNPCK_R_SH_SW(in0, in0_r_m); \ - UNPCK_R_SH_SW(in1, in1_r_m); \ - UNPCK_R_SH_SW(in2, in2_r_m); \ - UNPCK_R_SH_SW(in3, in3_r_m); \ - \ - constant_m = __msa_fill_w(sinpi_4_9); \ - MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \ - \ - constant_m = __msa_fill_w(sinpi_1_9); \ - s0_m += in0_r_m * constant_m; \ - s1_m -= in1_r_m * constant_m; \ - \ - constant_m = __msa_fill_w(sinpi_2_9); \ - s0_m += in1_r_m * constant_m; \ - s1_m += in3_r_m * constant_m; \ - \ - s2_m = in0_r_m + in1_r_m - in3_r_m; \ - \ - constant_m = __msa_fill_w(sinpi_3_9); \ - MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \ - \ - in0_r_m = s0_m + s3_m; \ - s2_m = s1_m - s3_m; \ - s3_m = s1_m - s0_m + s3_m; \ - \ - SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \ - PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, \ - s3_m, s3_m, out0, out1, out2, out3); \ -} -#endif /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */ diff --git a/libs/libvpx/vp10/encoder/mips/msa/temporal_filter_msa.c b/libs/libvpx/vp10/encoder/mips/msa/temporal_filter_msa.c deleted file mode 100644 index 5d4558b94c..0000000000 --- a/libs/libvpx/vp10/encoder/mips/msa/temporal_filter_msa.c +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vp10_rtcd.h" -#include "vpx_dsp/mips/macros_msa.h" - -static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, - uint32_t stride, - uint8_t *frm2_ptr, - int32_t filt_sth, - int32_t filt_wgt, - uint32_t *acc, - uint16_t *cnt) { - uint32_t row; - uint64_t f0, f1, f2, f3; - v16i8 frm2, frm1 = { 0 }; - v16i8 frm4, frm3 = { 0 }; - v16u8 frm_r, frm_l; - v8i16 frm2_r, frm2_l; - v8i16 diff0, diff1, mod0_h, mod1_h; - v4i32 cnst3, cnst16, filt_wt, strength; - v4i32 mod0_w, mod1_w, mod2_w, mod3_w; - v4i32 diff0_r, diff0_l, diff1_r, diff1_l; - v4i32 frm2_rr, frm2_rl, frm2_lr, frm2_ll; - v4i32 acc0, acc1, acc2, acc3; - v8i16 cnt0, cnt1; - - filt_wt = __msa_fill_w(filt_wgt); - strength = __msa_fill_w(filt_sth); - cnst3 = __msa_ldi_w(3); - cnst16 = __msa_ldi_w(16); - - for (row = 2; row--;) { - LD4(frm1_ptr, stride, f0, f1, f2, f3); - frm1_ptr += (4 * stride); - - LD_SB2(frm2_ptr, 16, frm2, frm4); - frm2_ptr += 32; - - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - - INSERT_D2_SB(f0, f1, frm1); - INSERT_D2_SB(f2, f3, frm3); - ILVRL_B2_UB(frm1, frm2, frm_r, frm_l); - HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - - diff0_r = (mod0_w < cnst16); - diff0_l = (mod1_w < cnst16); - diff1_r = (mod2_w < cnst16); - diff1_l = (mod3_w < cnst16); - - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - - MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt, - mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; - - UNPCK_UB_SH(frm2, frm2_r, frm2_l); - UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl); - UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); - MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, - mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - - ST_SW2(mod0_w, mod1_w, acc, 4); - acc += 8; - ST_SW2(mod2_w, mod3_w, acc, 4); - acc += 8; - - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - - ILVRL_B2_UB(frm3, frm4, frm_r, frm_l); - HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - - diff0_r = (mod0_w < cnst16); - diff0_l = (mod1_w < cnst16); - diff1_r = (mod2_w < cnst16); - diff1_l = (mod3_w < cnst16); - - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - - MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt, - mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; - UNPCK_UB_SH(frm4, frm2_r, frm2_l); - UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl); - UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); - MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, - mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - - ST_SW2(mod0_w, mod1_w, acc, 4); - acc += 8; - ST_SW2(mod2_w, mod3_w, acc, 4); - acc += 8; - } -} - -static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, - uint32_t stride, - uint8_t *frm2_ptr, - int32_t filt_sth, - int32_t filt_wgt, - uint32_t *acc, - uint16_t *cnt) { - uint32_t row; - v16i8 frm1, frm2, frm3, frm4; - v16u8 frm_r, frm_l; - v16i8 zero = { 0 }; - v8u16 frm2_r, frm2_l; - v8i16 diff0, diff1, mod0_h, mod1_h; - v4i32 cnst3, cnst16, filt_wt, strength; - v4i32 mod0_w, mod1_w, mod2_w, mod3_w; - v4i32 diff0_r, diff0_l, diff1_r, diff1_l; - v4i32 frm2_rr, frm2_rl, frm2_lr, frm2_ll; - v4i32 acc0, acc1, acc2, acc3; - v8i16 cnt0, cnt1; - - filt_wt = __msa_fill_w(filt_wgt); - strength = __msa_fill_w(filt_sth); - cnst3 = __msa_ldi_w(3); - cnst16 = __msa_ldi_w(16); - - for (row = 8; row--;) { - LD_SB2(frm1_ptr, stride, frm1, frm3); - frm1_ptr += stride; - - LD_SB2(frm2_ptr, 16, frm2, frm4); - frm2_ptr += 16; - - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - - ILVRL_B2_UB(frm1, frm2, frm_r, frm_l); - HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, - mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - - diff0_r = (mod0_w < cnst16); - diff0_l = (mod1_w < cnst16); - diff1_r = (mod2_w < cnst16); - diff1_l = (mod3_w < cnst16); - - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - - MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt, - mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; - - ILVRL_B2_UH(zero, frm2, frm2_r, frm2_l); - UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl); - UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); - MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, - mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - - ST_SW2(mod0_w, mod1_w, acc, 4); - acc += 8; - ST_SW2(mod2_w, mod3_w, acc, 4); - acc += 8; - - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - - ILVRL_B2_UB(frm3, frm4, frm_r, frm_l); - HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, - mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - - diff0_r = (mod0_w < cnst16); - diff0_l = (mod1_w < cnst16); - diff1_r = (mod2_w < cnst16); - diff1_l = (mod3_w < cnst16); - - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - - MUL4(mod0_w, filt_wt, mod1_w, filt_wt, mod2_w, filt_wt, mod3_w, filt_wt, - mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; - - ILVRL_B2_UH(zero, frm4, frm2_r, frm2_l); - UNPCK_SH_SW(frm2_r, frm2_rr, frm2_rl); - UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); - MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, - mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - ST_SW2(mod0_w, mod1_w, acc, 4); - acc += 8; - ST_SW2(mod2_w, mod3_w, acc, 4); - acc += 8; - - frm1_ptr += stride; - frm2_ptr += 16; - } -} - -void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride, - uint8_t *frame2_ptr, uint32_t blk_w, - uint32_t blk_h, int32_t strength, - int32_t filt_wgt, uint32_t *accu, - uint16_t *cnt) { - if (8 == (blk_w * blk_h)) { - temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, - strength, filt_wgt, accu, cnt); - } else if (16 == (blk_w * blk_h)) { - temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, - strength, filt_wgt, accu, cnt); - } else { - vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h, - strength, filt_wgt, accu, cnt); - } -} diff --git a/libs/libvpx/vp10/encoder/picklpf.c b/libs/libvpx/vp10/encoder/picklpf.c deleted file mode 100644 index 045e03d1d0..0000000000 --- a/libs/libvpx/vp10/encoder/picklpf.c +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_scale_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/loopfilter.h" -#include "vp10/common/onyxc_int.h" -#include "vp10/common/quant_common.h" - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/picklpf.h" -#include "vp10/encoder/quantize.h" - -static int get_max_filter_level(const VP10_COMP *cpi) { - if (cpi->oxcf.pass == 2) { - return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4 - : MAX_LOOP_FILTER; - } else { - return MAX_LOOP_FILTER; - } -} - - -static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd, - VP10_COMP *const cpi, - int filt_level, int partial_frame) { - VP10_COMMON *const cm = &cpi->common; - int64_t filt_err; - - if (cpi->num_workers > 1) - vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane, - filt_level, 1, partial_frame, - cpi->workers, cpi->num_workers, &cpi->lf_row_sync); - else - vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, - 1, partial_frame); - -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - filt_err = vp10_highbd_get_y_sse(sd, cm->frame_to_show); - } else { - filt_err = vp10_get_y_sse(sd, cm->frame_to_show); - } -#else - filt_err = vp10_get_y_sse(sd, cm->frame_to_show); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Re-instate the unfiltered frame - vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show); - - return filt_err; -} - -static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi, - int partial_frame) { - const VP10_COMMON *const cm = &cpi->common; - const struct loopfilter *const lf = &cm->lf; - const int min_filter_level = 0; - const int max_filter_level = get_max_filter_level(cpi); - int filt_direction = 0; - int64_t best_err; - int filt_best; - - // Start the search at the previous frame filter level unless it is now out of - // range. - int filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level); - int filter_step = filt_mid < 16 ? 4 : filt_mid / 4; - // Sum squared error at each filter level - int64_t ss_err[MAX_LOOP_FILTER + 1]; - - // Set each entry to -1 - memset(ss_err, 0xFF, sizeof(ss_err)); - - // Make a copy of the unfiltered / processed recon buffer - vpx_yv12_copy_y(cm->frame_to_show, &cpi->last_frame_uf); - - best_err = try_filter_frame(sd, cpi, filt_mid, partial_frame); - filt_best = filt_mid; - ss_err[filt_mid] = best_err; - - while (filter_step > 0) { - const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level); - const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level); - - // Bias against raising loop filter in favor of lowering it. - int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; - - if ((cpi->oxcf.pass == 2) && (cpi->twopass.section_intra_rating < 20)) - bias = (bias * cpi->twopass.section_intra_rating) / 20; - - // yx, bias less for large block size - if (cm->tx_mode != ONLY_4X4) - bias >>= 1; - - if (filt_direction <= 0 && filt_low != filt_mid) { - // Get Low filter error score - if (ss_err[filt_low] < 0) { - ss_err[filt_low] = try_filter_frame(sd, cpi, filt_low, partial_frame); - } - // If value is close to the best so far then bias towards a lower loop - // filter value. - if ((ss_err[filt_low] - bias) < best_err) { - // Was it actually better than the previous best? - if (ss_err[filt_low] < best_err) - best_err = ss_err[filt_low]; - - filt_best = filt_low; - } - } - - // Now look at filt_high - if (filt_direction >= 0 && filt_high != filt_mid) { - if (ss_err[filt_high] < 0) { - ss_err[filt_high] = try_filter_frame(sd, cpi, filt_high, partial_frame); - } - // Was it better than the previous best? - if (ss_err[filt_high] < (best_err - bias)) { - best_err = ss_err[filt_high]; - filt_best = filt_high; - } - } - - // Half the step distance if the best filter value was the same as last time - if (filt_best == filt_mid) { - filter_step /= 2; - filt_direction = 0; - } else { - filt_direction = (filt_best < filt_mid) ? -1 : 1; - filt_mid = filt_best; - } - } - - return filt_best; -} - -void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi, - LPF_PICK_METHOD method) { - VP10_COMMON *const cm = &cpi->common; - struct loopfilter *const lf = &cm->lf; - - lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 - : cpi->oxcf.sharpness; - - if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) { - lf->filter_level = 0; - } else if (method >= LPF_PICK_FROM_Q) { - const int min_filter_level = 0; - const int max_filter_level = get_max_filter_level(cpi); - const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth); - // These values were determined by linear fitting the result of the - // searched level, filt_guess = q * 0.316206 + 3.87252 -#if CONFIG_VP9_HIGHBITDEPTH - int filt_guess; - switch (cm->bit_depth) { - case VPX_BITS_8: - filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18); - break; - case VPX_BITS_10: - filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 4060632, 20); - break; - case VPX_BITS_12: - filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22); - break; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 " - "or VPX_BITS_12"); - return; - } -#else - int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18); -#endif // CONFIG_VP9_HIGHBITDEPTH - if (cm->frame_type == KEY_FRAME) - filt_guess -= 4; - lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level); - } else { - lf->filter_level = search_filter_level(sd, cpi, - method == LPF_PICK_FROM_SUBIMAGE); - } -} diff --git a/libs/libvpx/vp10/encoder/picklpf.h b/libs/libvpx/vp10/encoder/picklpf.h deleted file mode 100644 index 21a8758ef4..0000000000 --- a/libs/libvpx/vp10/encoder/picklpf.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_PICKLPF_H_ -#define VP10_ENCODER_PICKLPF_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "vp10/encoder/encoder.h" - -struct yv12_buffer_config; -struct VP10_COMP; - -void vp10_pick_filter_level(const struct yv12_buffer_config *sd, - struct VP10_COMP *cpi, LPF_PICK_METHOD method); -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_PICKLPF_H_ diff --git a/libs/libvpx/vp10/encoder/quantize.c b/libs/libvpx/vp10/encoder/quantize.c deleted file mode 100644 index 86b324f1a1..0000000000 --- a/libs/libvpx/vp10/encoder/quantize.c +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include "./vpx_dsp_rtcd.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" - -#include "vp10/common/quant_common.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/rd.h" - -void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - int i, eob = -1; - // TODO(jingning) Decide the need of these arguments after the - // quantization process is completed. - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)iscan; - - memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); - memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr)); - - if (!skip_block) { - // Quantization pass: All coefficients with index >= zero_flag are - // skippable. Note: zero_flag can be zero. - for (i = 0; i < n_coeffs; i++) { - const int rc = scan[i]; - const int coeff = coeff_ptr[rc]; - const int coeff_sign = (coeff >> 31); - const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - - int tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX); - tmp = (tmp * quant_ptr[rc != 0]) >> 16; - - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; - dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - - if (tmp) - eob = i; - } - } - *eob_ptr = eob + 1; -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, - intptr_t count, - int skip_block, - const int16_t *zbin_ptr, - const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, - const int16_t *iscan) { - int i; - int eob = -1; - // TODO(jingning) Decide the need of these arguments after the - // quantization process is completed. - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)iscan; - - memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr)); - memset(dqcoeff_ptr, 0, count * sizeof(*dqcoeff_ptr)); - - if (!skip_block) { - // Quantization pass: All coefficients with index >= zero_flag are - // skippable. Note: zero_flag can be zero. - for (i = 0; i < count; i++) { - const int rc = scan[i]; - const int coeff = coeff_ptr[rc]; - const int coeff_sign = (coeff >> 31); - const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - const int64_t tmp = abs_coeff + round_ptr[rc != 0]; - const uint32_t abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 16); - qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); - dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (abs_qcoeff) - eob = i; - } - } - *eob_ptr = eob + 1; -} -#endif - -// TODO(jingning) Refactor this file and combine functions with similar -// operations. -void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - int i, eob = -1; - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)iscan; - - memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); - memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr)); - - if (!skip_block) { - for (i = 0; i < n_coeffs; i++) { - const int rc = scan[i]; - const int coeff = coeff_ptr[rc]; - const int coeff_sign = (coeff >> 31); - int tmp = 0; - int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - - if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) { - abs_coeff += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); - abs_coeff = clamp(abs_coeff, INT16_MIN, INT16_MAX); - tmp = (abs_coeff * quant_ptr[rc != 0]) >> 15; - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; - dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; - } - - if (tmp) - eob = i; - } - } - *eob_ptr = eob + 1; -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, - intptr_t n_coeffs, int skip_block, - const int16_t *zbin_ptr, - const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - int i, eob = -1; - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)iscan; - - memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); - memset(dqcoeff_ptr, 0, n_coeffs * sizeof(*dqcoeff_ptr)); - - if (!skip_block) { - for (i = 0; i < n_coeffs; i++) { - uint32_t abs_qcoeff = 0; - const int rc = scan[i]; - const int coeff = coeff_ptr[rc]; - const int coeff_sign = (coeff >> 31); - const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - - if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) { - const int64_t tmp = abs_coeff - + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); - abs_qcoeff = (uint32_t) ((tmp * quant_ptr[rc != 0]) >> 15); - qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); - dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; - } - - if (abs_qcoeff) - eob = i; - } - } - *eob_ptr = eob + 1; -} -#endif - -void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, - const int16_t *scan, const int16_t *iscan) { - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblock_plane *p = &x->plane[plane]; - struct macroblockd_plane *pd = &xd->plane[plane]; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), - 16, x->skip_block, - p->zbin, p->round, p->quant, p->quant_shift, - BLOCK_OFFSET(p->qcoeff, block), - BLOCK_OFFSET(pd->dqcoeff, block), - pd->dequant, &p->eobs[block], - scan, iscan); - return; - } -#endif - vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), - 16, x->skip_block, - p->zbin, p->round, p->quant, p->quant_shift, - BLOCK_OFFSET(p->qcoeff, block), - BLOCK_OFFSET(pd->dqcoeff, block), - pd->dequant, &p->eobs[block], scan, iscan); -} - -static void invert_quant(int16_t *quant, int16_t *shift, int d) { - unsigned t; - int l; - t = d; - for (l = 0; t > 1; l++) - t >>= 1; - t = 1 + (1 << (16 + l)) / d; - *quant = (int16_t)(t - (1 << 16)); - *shift = 1 << (16 - l); -} - -static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) { - const int quant = vp10_dc_quant(q, 0, bit_depth); -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - return q == 0 ? 64 : (quant < 148 ? 84 : 80); - case VPX_BITS_10: - return q == 0 ? 64 : (quant < 592 ? 84 : 80); - case VPX_BITS_12: - return q == 0 ? 64 : (quant < 2368 ? 84 : 80); - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - (void) bit_depth; - return q == 0 ? 64 : (quant < 148 ? 84 : 80); -#endif -} - -void vp10_init_quantizer(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - QUANTS *const quants = &cpi->quants; - int i, q, quant; - - for (q = 0; q < QINDEX_RANGE; q++) { - const int qzbin_factor = get_qzbin_factor(q, cm->bit_depth); - const int qrounding_factor = q == 0 ? 64 : 48; - - for (i = 0; i < 2; ++i) { - int qrounding_factor_fp = i == 0 ? 48 : 42; - if (q == 0) - qrounding_factor_fp = 64; - - // y - quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth) - : vp10_ac_quant(q, 0, cm->bit_depth); - invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant); - quants->y_quant_fp[q][i] = (1 << 16) / quant; - quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7; - quants->y_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); - quants->y_round[q][i] = (qrounding_factor * quant) >> 7; - cpi->y_dequant[q][i] = quant; - - // uv - quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth) - : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth); - invert_quant(&quants->uv_quant[q][i], - &quants->uv_quant_shift[q][i], quant); - quants->uv_quant_fp[q][i] = (1 << 16) / quant; - quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7; - quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); - quants->uv_round[q][i] = (qrounding_factor * quant) >> 7; - cpi->uv_dequant[q][i] = quant; - } - - for (i = 2; i < 8; i++) { - quants->y_quant[q][i] = quants->y_quant[q][1]; - quants->y_quant_fp[q][i] = quants->y_quant_fp[q][1]; - quants->y_round_fp[q][i] = quants->y_round_fp[q][1]; - quants->y_quant_shift[q][i] = quants->y_quant_shift[q][1]; - quants->y_zbin[q][i] = quants->y_zbin[q][1]; - quants->y_round[q][i] = quants->y_round[q][1]; - cpi->y_dequant[q][i] = cpi->y_dequant[q][1]; - - quants->uv_quant[q][i] = quants->uv_quant[q][1]; - quants->uv_quant_fp[q][i] = quants->uv_quant_fp[q][1]; - quants->uv_round_fp[q][i] = quants->uv_round_fp[q][1]; - quants->uv_quant_shift[q][i] = quants->uv_quant_shift[q][1]; - quants->uv_zbin[q][i] = quants->uv_zbin[q][1]; - quants->uv_round[q][i] = quants->uv_round[q][1]; - cpi->uv_dequant[q][i] = cpi->uv_dequant[q][1]; - } - } -} - -void vp10_init_plane_quantizers(VP10_COMP *cpi, MACROBLOCK *x) { - const VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - QUANTS *const quants = &cpi->quants; - const int segment_id = xd->mi[0]->mbmi.segment_id; - const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex); - const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q); - int i; - - // Y - x->plane[0].quant = quants->y_quant[qindex]; - x->plane[0].quant_fp = quants->y_quant_fp[qindex]; - x->plane[0].round_fp = quants->y_round_fp[qindex]; - x->plane[0].quant_shift = quants->y_quant_shift[qindex]; - x->plane[0].zbin = quants->y_zbin[qindex]; - x->plane[0].round = quants->y_round[qindex]; - xd->plane[0].dequant = cpi->y_dequant[qindex]; - - x->plane[0].quant_thred[0] = x->plane[0].zbin[0] * x->plane[0].zbin[0]; - x->plane[0].quant_thred[1] = x->plane[0].zbin[1] * x->plane[0].zbin[1]; - - // UV - for (i = 1; i < 3; i++) { - x->plane[i].quant = quants->uv_quant[qindex]; - x->plane[i].quant_fp = quants->uv_quant_fp[qindex]; - x->plane[i].round_fp = quants->uv_round_fp[qindex]; - x->plane[i].quant_shift = quants->uv_quant_shift[qindex]; - x->plane[i].zbin = quants->uv_zbin[qindex]; - x->plane[i].round = quants->uv_round[qindex]; - xd->plane[i].dequant = cpi->uv_dequant[qindex]; - - x->plane[i].quant_thred[0] = x->plane[i].zbin[0] * x->plane[i].zbin[0]; - x->plane[i].quant_thred[1] = x->plane[i].zbin[1] * x->plane[i].zbin[1]; - } - - x->skip_block = segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP); - x->q_index = qindex; - - x->errorperbit = rdmult >> 6; - x->errorperbit += (x->errorperbit == 0); - - vp10_initialize_me_consts(cpi, x, x->q_index); -} - -void vp10_frame_init_quantizer(VP10_COMP *cpi) { - vp10_init_plane_quantizers(cpi, &cpi->td.mb); -} - -void vp10_set_quantizer(VP10_COMMON *cm, int q) { - // quantizer has to be reinitialized with vp10_init_quantizer() if any - // delta_q changes. - cm->base_qindex = q; - cm->y_dc_delta_q = 0; - cm->uv_dc_delta_q = 0; - cm->uv_ac_delta_q = 0; -} - -// Table that converts 0-63 Q-range values passed in outside to the Qindex -// range used internally. -static const int quantizer_to_qindex[] = { - 0, 4, 8, 12, 16, 20, 24, 28, - 32, 36, 40, 44, 48, 52, 56, 60, - 64, 68, 72, 76, 80, 84, 88, 92, - 96, 100, 104, 108, 112, 116, 120, 124, - 128, 132, 136, 140, 144, 148, 152, 156, - 160, 164, 168, 172, 176, 180, 184, 188, - 192, 196, 200, 204, 208, 212, 216, 220, - 224, 228, 232, 236, 240, 244, 249, 255, -}; - -int vp10_quantizer_to_qindex(int quantizer) { - return quantizer_to_qindex[quantizer]; -} - -int vp10_qindex_to_quantizer(int qindex) { - int quantizer; - - for (quantizer = 0; quantizer < 64; ++quantizer) - if (quantizer_to_qindex[quantizer] >= qindex) - return quantizer; - - return 63; -} diff --git a/libs/libvpx/vp10/encoder/quantize.h b/libs/libvpx/vp10/encoder/quantize.h deleted file mode 100644 index b44088ecc6..0000000000 --- a/libs/libvpx/vp10/encoder/quantize.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_QUANTIZE_H_ -#define VP10_ENCODER_QUANTIZE_H_ - -#include "./vpx_config.h" -#include "vp10/encoder/block.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - DECLARE_ALIGNED(16, int16_t, y_quant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_quant_shift[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_zbin[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_round[QINDEX_RANGE][8]); - - // TODO(jingning): in progress of re-working the quantization. will decide - // if we want to deprecate the current use of y_quant. - DECLARE_ALIGNED(16, int16_t, y_quant_fp[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_quant_fp[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, y_round_fp[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_round_fp[QINDEX_RANGE][8]); - - DECLARE_ALIGNED(16, int16_t, uv_quant[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_quant_shift[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_zbin[QINDEX_RANGE][8]); - DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]); -} QUANTS; - -void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, - const int16_t *scan, const int16_t *iscan); - -struct VP10_COMP; -struct VP10Common; - -void vp10_frame_init_quantizer(struct VP10_COMP *cpi); - -void vp10_init_plane_quantizers(struct VP10_COMP *cpi, MACROBLOCK *x); - -void vp10_init_quantizer(struct VP10_COMP *cpi); - -void vp10_set_quantizer(struct VP10Common *cm, int q); - -int vp10_quantizer_to_qindex(int quantizer); - -int vp10_qindex_to_quantizer(int qindex); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_QUANTIZE_H_ diff --git a/libs/libvpx/vp10/encoder/ratectrl.c b/libs/libvpx/vp10/encoder/ratectrl.c deleted file mode 100644 index 6068775942..0000000000 --- a/libs/libvpx/vp10/encoder/ratectrl.c +++ /dev/null @@ -1,1781 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include -#include -#include -#include - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/encoder/aq_cyclicrefresh.h" -#include "vp10/common/common.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/ratectrl.h" - -// Max rate target for 1080P and below encodes under normal circumstances -// (1920 * 1080 / (16 * 16)) * MAX_MB_RATE bits per MB -#define MAX_MB_RATE 250 -#define MAXRATE_1080P 2025000 - -#define DEFAULT_KF_BOOST 2000 -#define DEFAULT_GF_BOOST 2000 - -#define LIMIT_QRANGE_FOR_ALTREF_AND_KEY 1 - -#define MIN_BPB_FACTOR 0.005 -#define MAX_BPB_FACTOR 50 - -#define FRAME_OVERHEAD_BITS 200 - -#if CONFIG_VP9_HIGHBITDEPTH -#define ASSIGN_MINQ_TABLE(bit_depth, name) \ - do { \ - switch (bit_depth) { \ - case VPX_BITS_8: \ - name = name##_8; \ - break; \ - case VPX_BITS_10: \ - name = name##_10; \ - break; \ - case VPX_BITS_12: \ - name = name##_12; \ - break; \ - default: \ - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10" \ - " or VPX_BITS_12"); \ - name = NULL; \ - } \ - } while (0) -#else -#define ASSIGN_MINQ_TABLE(bit_depth, name) \ - do { \ - (void) bit_depth; \ - name = name##_8; \ - } while (0) -#endif - -// Tables relating active max Q to active min Q -static int kf_low_motion_minq_8[QINDEX_RANGE]; -static int kf_high_motion_minq_8[QINDEX_RANGE]; -static int arfgf_low_motion_minq_8[QINDEX_RANGE]; -static int arfgf_high_motion_minq_8[QINDEX_RANGE]; -static int inter_minq_8[QINDEX_RANGE]; -static int rtc_minq_8[QINDEX_RANGE]; - -#if CONFIG_VP9_HIGHBITDEPTH -static int kf_low_motion_minq_10[QINDEX_RANGE]; -static int kf_high_motion_minq_10[QINDEX_RANGE]; -static int arfgf_low_motion_minq_10[QINDEX_RANGE]; -static int arfgf_high_motion_minq_10[QINDEX_RANGE]; -static int inter_minq_10[QINDEX_RANGE]; -static int rtc_minq_10[QINDEX_RANGE]; -static int kf_low_motion_minq_12[QINDEX_RANGE]; -static int kf_high_motion_minq_12[QINDEX_RANGE]; -static int arfgf_low_motion_minq_12[QINDEX_RANGE]; -static int arfgf_high_motion_minq_12[QINDEX_RANGE]; -static int inter_minq_12[QINDEX_RANGE]; -static int rtc_minq_12[QINDEX_RANGE]; -#endif - -static int gf_high = 2000; -static int gf_low = 400; -static int kf_high = 5000; -static int kf_low = 400; - -// Functions to compute the active minq lookup table entries based on a -// formulaic approach to facilitate easier adjustment of the Q tables. -// The formulae were derived from computing a 3rd order polynomial best -// fit to the original data (after plotting real maxq vs minq (not q index)) -static int get_minq_index(double maxq, double x3, double x2, double x1, - vpx_bit_depth_t bit_depth) { - int i; - const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq); - - // Special case handling to deal with the step from q2.0 - // down to lossless mode represented by q 1.0. - if (minqtarget <= 2.0) - return 0; - - for (i = 0; i < QINDEX_RANGE; i++) { - if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) - return i; - } - - return QINDEX_RANGE - 1; -} - -static void init_minq_luts(int *kf_low_m, int *kf_high_m, - int *arfgf_low, int *arfgf_high, - int *inter, int *rtc, vpx_bit_depth_t bit_depth) { - int i; - for (i = 0; i < QINDEX_RANGE; i++) { - const double maxq = vp10_convert_qindex_to_q(i, bit_depth); - kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth); - kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth); - arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth); - arfgf_high[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth); - inter[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.90, bit_depth); - rtc[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.70, bit_depth); - } -} - -void vp10_rc_init_minq_luts(void) { - init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8, - arfgf_low_motion_minq_8, arfgf_high_motion_minq_8, - inter_minq_8, rtc_minq_8, VPX_BITS_8); -#if CONFIG_VP9_HIGHBITDEPTH - init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10, - arfgf_low_motion_minq_10, arfgf_high_motion_minq_10, - inter_minq_10, rtc_minq_10, VPX_BITS_10); - init_minq_luts(kf_low_motion_minq_12, kf_high_motion_minq_12, - arfgf_low_motion_minq_12, arfgf_high_motion_minq_12, - inter_minq_12, rtc_minq_12, VPX_BITS_12); -#endif -} - -// These functions use formulaic calculations to make playing with the -// quantizer tables easier. If necessary they can be replaced by lookup -// tables if and when things settle down in the experimental bitstream -double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) { - // Convert the index to a real Q value (scaled down to match old Q values) -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - return vp10_ac_quant(qindex, 0, bit_depth) / 4.0; - case VPX_BITS_10: - return vp10_ac_quant(qindex, 0, bit_depth) / 16.0; - case VPX_BITS_12: - return vp10_ac_quant(qindex, 0, bit_depth) / 64.0; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1.0; - } -#else - return vp10_ac_quant(qindex, 0, bit_depth) / 4.0; -#endif -} - -int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, - double correction_factor, - vpx_bit_depth_t bit_depth) { - const double q = vp10_convert_qindex_to_q(qindex, bit_depth); - int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000; - - assert(correction_factor <= MAX_BPB_FACTOR && - correction_factor >= MIN_BPB_FACTOR); - - // q based adjustment to baseline enumerator - enumerator += (int)(enumerator * q) >> 12; - return (int)(enumerator * correction_factor / q); -} - -int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, - double correction_factor, - vpx_bit_depth_t bit_depth) { - const int bpm = (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, - bit_depth)); - return VPXMAX(FRAME_OVERHEAD_BITS, - (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); -} - -int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) { - const RATE_CONTROL *rc = &cpi->rc; - const VP10EncoderConfig *oxcf = &cpi->oxcf; - const int min_frame_target = VPXMAX(rc->min_frame_bandwidth, - rc->avg_frame_bandwidth >> 5); - if (target < min_frame_target) - target = min_frame_target; - if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) { - // If there is an active ARF at this location use the minimum - // bits on this frame even if it is a constructed arf. - // The active maximum quantizer insures that an appropriate - // number of bits will be spent if needed for constructed ARFs. - target = min_frame_target; - } - // Clip the frame target to the maximum allowed value. - if (target > rc->max_frame_bandwidth) - target = rc->max_frame_bandwidth; - if (oxcf->rc_max_inter_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_inter_bitrate_pct / 100; - target = VPXMIN(target, max_rate); - } - return target; -} - -int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) { - const RATE_CONTROL *rc = &cpi->rc; - const VP10EncoderConfig *oxcf = &cpi->oxcf; - if (oxcf->rc_max_intra_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_intra_bitrate_pct / 100; - target = VPXMIN(target, max_rate); - } - if (target > rc->max_frame_bandwidth) - target = rc->max_frame_bandwidth; - return target; -} - -// Update the buffer level: leaky bucket model. -static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) { - const VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - - // Non-viewable frames are a special case and are treated as pure overhead. - if (!cm->show_frame) { - rc->bits_off_target -= encoded_frame_size; - } else { - rc->bits_off_target += rc->avg_frame_bandwidth - encoded_frame_size; - } - - // Clip the buffer level to the maximum specified buffer size. - rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); - rc->buffer_level = rc->bits_off_target; -} - -int vp10_rc_get_default_min_gf_interval( - int width, int height, double framerate) { - // Assume we do not need any constraint lower than 4K 20 fps - static const double factor_safe = 3840 * 2160 * 20.0; - const double factor = width * height * framerate; - const int default_interval = - clamp((int)(framerate * 0.125), MIN_GF_INTERVAL, MAX_GF_INTERVAL); - - if (factor <= factor_safe) - return default_interval; - else - return VPXMAX(default_interval, - (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5)); - // Note this logic makes: - // 4K24: 5 - // 4K30: 6 - // 4K60: 12 -} - -int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) { - int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75)); - interval += (interval & 0x01); // Round to even value - return VPXMAX(interval, min_gf_interval); -} - -void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) { - int i; - - if (pass == 0 && oxcf->rc_mode == VPX_CBR) { - rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q; - rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q; - } else { - rc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; - rc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; - } - - rc->last_q[KEY_FRAME] = oxcf->best_allowed_q; - rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q; - - rc->buffer_level = rc->starting_buffer_level; - rc->bits_off_target = rc->starting_buffer_level; - - rc->rolling_target_bits = rc->avg_frame_bandwidth; - rc->rolling_actual_bits = rc->avg_frame_bandwidth; - rc->long_rolling_target_bits = rc->avg_frame_bandwidth; - rc->long_rolling_actual_bits = rc->avg_frame_bandwidth; - - rc->total_actual_bits = 0; - rc->total_target_bits = 0; - rc->total_target_vs_actual = 0; - - rc->frames_since_key = 8; // Sensible default for first frame. - rc->this_key_frame_forced = 0; - rc->next_key_frame_forced = 0; - rc->source_alt_ref_pending = 0; - rc->source_alt_ref_active = 0; - - rc->frames_till_gf_update_due = 0; - rc->ni_av_qi = oxcf->worst_allowed_q; - rc->ni_tot_qi = 0; - rc->ni_frames = 0; - - rc->tot_q = 0.0; - rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth); - - for (i = 0; i < RATE_FACTOR_LEVELS; ++i) { - rc->rate_correction_factors[i] = 1.0; - } - - rc->min_gf_interval = oxcf->min_gf_interval; - rc->max_gf_interval = oxcf->max_gf_interval; - if (rc->min_gf_interval == 0) - rc->min_gf_interval = vp10_rc_get_default_min_gf_interval( - oxcf->width, oxcf->height, oxcf->init_framerate); - if (rc->max_gf_interval == 0) - rc->max_gf_interval = vp10_rc_get_default_max_gf_interval( - oxcf->init_framerate, rc->min_gf_interval); - rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2; -} - -int vp10_rc_drop_frame(VP10_COMP *cpi) { - const VP10EncoderConfig *oxcf = &cpi->oxcf; - RATE_CONTROL *const rc = &cpi->rc; - - if (!oxcf->drop_frames_water_mark) { - return 0; - } else { - if (rc->buffer_level < 0) { - // Always drop if buffer is below 0. - return 1; - } else { - // If buffer is below drop_mark, for now just drop every other frame - // (starting with the next frame) until it increases back over drop_mark. - int drop_mark = (int)(oxcf->drop_frames_water_mark * - rc->optimal_buffer_level / 100); - if ((rc->buffer_level > drop_mark) && - (rc->decimation_factor > 0)) { - --rc->decimation_factor; - } else if (rc->buffer_level <= drop_mark && - rc->decimation_factor == 0) { - rc->decimation_factor = 1; - } - if (rc->decimation_factor > 0) { - if (rc->decimation_count > 0) { - --rc->decimation_count; - return 1; - } else { - rc->decimation_count = rc->decimation_factor; - return 0; - } - } else { - rc->decimation_count = 0; - return 0; - } - } - } -} - -static double get_rate_correction_factor(const VP10_COMP *cpi) { - const RATE_CONTROL *const rc = &cpi->rc; - double rcf; - - if (cpi->common.frame_type == KEY_FRAME) { - rcf = rc->rate_correction_factors[KF_STD]; - } else if (cpi->oxcf.pass == 2) { - RATE_FACTOR_LEVEL rf_lvl = - cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; - rcf = rc->rate_correction_factors[rf_lvl]; - } else { - if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && - !rc->is_src_frame_alt_ref && - (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20)) - rcf = rc->rate_correction_factors[GF_ARF_STD]; - else - rcf = rc->rate_correction_factors[INTER_NORMAL]; - } - rcf *= rcf_mult[rc->frame_size_selector]; - return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR); -} - -static void set_rate_correction_factor(VP10_COMP *cpi, double factor) { - RATE_CONTROL *const rc = &cpi->rc; - - // Normalize RCF to account for the size-dependent scaling factor. - factor /= rcf_mult[cpi->rc.frame_size_selector]; - - factor = fclamp(factor, MIN_BPB_FACTOR, MAX_BPB_FACTOR); - - if (cpi->common.frame_type == KEY_FRAME) { - rc->rate_correction_factors[KF_STD] = factor; - } else if (cpi->oxcf.pass == 2) { - RATE_FACTOR_LEVEL rf_lvl = - cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; - rc->rate_correction_factors[rf_lvl] = factor; - } else { - if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && - !rc->is_src_frame_alt_ref && - (cpi->oxcf.rc_mode != VPX_CBR || cpi->oxcf.gf_cbr_boost_pct > 20)) - rc->rate_correction_factors[GF_ARF_STD] = factor; - else - rc->rate_correction_factors[INTER_NORMAL] = factor; - } -} - -void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) { - const VP10_COMMON *const cm = &cpi->common; - int correction_factor = 100; - double rate_correction_factor = get_rate_correction_factor(cpi); - double adjustment_limit; - - int projected_size_based_on_q = 0; - - // Do not update the rate factors for arf overlay frames. - if (cpi->rc.is_src_frame_alt_ref) - return; - - // Clear down mmx registers to allow floating point in what follows - vpx_clear_system_state(); - - // Work out how big we would have expected the frame to be at this Q given - // the current correction factor. - // Stay in double to avoid int overflow when values are large - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) { - projected_size_based_on_q = - vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor); - } else { - projected_size_based_on_q = vp10_estimate_bits_at_q(cpi->common.frame_type, - cm->base_qindex, - cm->MBs, - rate_correction_factor, - cm->bit_depth); - } - // Work out a size correction factor. - if (projected_size_based_on_q > FRAME_OVERHEAD_BITS) - correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) / - projected_size_based_on_q); - - // More heavily damped adjustment used if we have been oscillating either side - // of target. - adjustment_limit = 0.25 + - 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor))); - - cpi->rc.q_2_frame = cpi->rc.q_1_frame; - cpi->rc.q_1_frame = cm->base_qindex; - cpi->rc.rc_2_frame = cpi->rc.rc_1_frame; - if (correction_factor > 110) - cpi->rc.rc_1_frame = -1; - else if (correction_factor < 90) - cpi->rc.rc_1_frame = 1; - else - cpi->rc.rc_1_frame = 0; - - if (correction_factor > 102) { - // We are not already at the worst allowable quality - correction_factor = (int)(100 + ((correction_factor - 100) * - adjustment_limit)); - rate_correction_factor = (rate_correction_factor * correction_factor) / 100; - // Keep rate_correction_factor within limits - if (rate_correction_factor > MAX_BPB_FACTOR) - rate_correction_factor = MAX_BPB_FACTOR; - } else if (correction_factor < 99) { - // We are not already at the best allowable quality - correction_factor = (int)(100 - ((100 - correction_factor) * - adjustment_limit)); - rate_correction_factor = (rate_correction_factor * correction_factor) / 100; - - // Keep rate_correction_factor within limits - if (rate_correction_factor < MIN_BPB_FACTOR) - rate_correction_factor = MIN_BPB_FACTOR; - } - - set_rate_correction_factor(cpi, rate_correction_factor); -} - - -int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame, - int active_best_quality, int active_worst_quality) { - const VP10_COMMON *const cm = &cpi->common; - int q = active_worst_quality; - int last_error = INT_MAX; - int i, target_bits_per_mb, bits_per_mb_at_this_q; - const double correction_factor = get_rate_correction_factor(cpi); - - // Calculate required scaling factor based on target frame size and size of - // frame produced using previous Q. - target_bits_per_mb = - ((uint64_t)target_bits_per_frame << BPER_MB_NORMBITS) / cm->MBs; - - i = active_best_quality; - - do { - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) { - bits_per_mb_at_this_q = - (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor); - } else { - bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(cm->frame_type, i, - correction_factor, - cm->bit_depth); - } - - if (bits_per_mb_at_this_q <= target_bits_per_mb) { - if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) - q = i; - else - q = i - 1; - - break; - } else { - last_error = bits_per_mb_at_this_q - target_bits_per_mb; - } - } while (++i <= active_worst_quality); - - // In CBR mode, this makes sure q is between oscillating Qs to prevent - // resonance. - if (cpi->oxcf.rc_mode == VPX_CBR && - (cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) && - cpi->rc.q_1_frame != cpi->rc.q_2_frame) { - q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame), - VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame)); - } - return q; -} - -static int get_active_quality(int q, int gfu_boost, int low, int high, - int *low_motion_minq, int *high_motion_minq) { - if (gfu_boost > high) { - return low_motion_minq[q]; - } else if (gfu_boost < low) { - return high_motion_minq[q]; - } else { - const int gap = high - low; - const int offset = high - gfu_boost; - const int qdiff = high_motion_minq[q] - low_motion_minq[q]; - const int adjustment = ((offset * qdiff) + (gap >> 1)) / gap; - return low_motion_minq[q] + adjustment; - } -} - -static int get_kf_active_quality(const RATE_CONTROL *const rc, int q, - vpx_bit_depth_t bit_depth) { - int *kf_low_motion_minq; - int *kf_high_motion_minq; - ASSIGN_MINQ_TABLE(bit_depth, kf_low_motion_minq); - ASSIGN_MINQ_TABLE(bit_depth, kf_high_motion_minq); - return get_active_quality(q, rc->kf_boost, kf_low, kf_high, - kf_low_motion_minq, kf_high_motion_minq); -} - -static int get_gf_active_quality(const RATE_CONTROL *const rc, int q, - vpx_bit_depth_t bit_depth) { - int *arfgf_low_motion_minq; - int *arfgf_high_motion_minq; - ASSIGN_MINQ_TABLE(bit_depth, arfgf_low_motion_minq); - ASSIGN_MINQ_TABLE(bit_depth, arfgf_high_motion_minq); - return get_active_quality(q, rc->gfu_boost, gf_low, gf_high, - arfgf_low_motion_minq, arfgf_high_motion_minq); -} - -static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) { - const RATE_CONTROL *const rc = &cpi->rc; - const unsigned int curr_frame = cpi->common.current_video_frame; - int active_worst_quality; - - if (cpi->common.frame_type == KEY_FRAME) { - active_worst_quality = curr_frame == 0 ? rc->worst_quality - : rc->last_q[KEY_FRAME] * 2; - } else { - if (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4 - : rc->last_q[INTER_FRAME]; - } else { - active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 2 - : rc->last_q[INTER_FRAME] * 2; - } - } - return VPXMIN(active_worst_quality, rc->worst_quality); -} - -// Adjust active_worst_quality level based on buffer level. -static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) { - // Adjust active_worst_quality: If buffer is above the optimal/target level, - // bring active_worst_quality down depending on fullness of buffer. - // If buffer is below the optimal level, let the active_worst_quality go from - // ambient Q (at buffer = optimal level) to worst_quality level - // (at buffer = critical level). - const VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *rc = &cpi->rc; - // Buffer level below which we push active_worst to worst_quality. - int64_t critical_level = rc->optimal_buffer_level >> 3; - int64_t buff_lvl_step = 0; - int adjustment = 0; - int active_worst_quality; - int ambient_qp; - if (cm->frame_type == KEY_FRAME) - return rc->worst_quality; - // For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME] - // for the first few frames following key frame. These are both initialized - // to worst_quality and updated with (3/4, 1/4) average in postencode_update. - // So for first few frames following key, the qp of that key frame is weighted - // into the active_worst_quality setting. - ambient_qp = (cm->current_video_frame < 5) ? - VPXMIN(rc->avg_frame_qindex[INTER_FRAME], - rc->avg_frame_qindex[KEY_FRAME]) : - rc->avg_frame_qindex[INTER_FRAME]; - active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4); - if (rc->buffer_level > rc->optimal_buffer_level) { - // Adjust down. - // Maximum limit for down adjustment, ~30%. - int max_adjustment_down = active_worst_quality / 3; - if (max_adjustment_down) { - buff_lvl_step = ((rc->maximum_buffer_size - - rc->optimal_buffer_level) / max_adjustment_down); - if (buff_lvl_step) - adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) / - buff_lvl_step); - active_worst_quality -= adjustment; - } - } else if (rc->buffer_level > critical_level) { - // Adjust up from ambient Q. - if (critical_level) { - buff_lvl_step = (rc->optimal_buffer_level - critical_level); - if (buff_lvl_step) { - adjustment = (int)((rc->worst_quality - ambient_qp) * - (rc->optimal_buffer_level - rc->buffer_level) / - buff_lvl_step); - } - active_worst_quality = ambient_qp + adjustment; - } - } else { - // Set to worst_quality if buffer is below critical level. - active_worst_quality = rc->worst_quality; - } - return active_worst_quality; -} - -static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi, - int *bottom_index, - int *top_index) { - const VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *const rc = &cpi->rc; - int active_best_quality; - int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi); - int q; - int *rtc_minq; - ASSIGN_MINQ_TABLE(cm->bit_depth, rtc_minq); - - if (frame_is_intra_only(cm)) { - active_best_quality = rc->best_quality; - // Handle the special case for key frames forced when we have reached - // the maximum key frame interval. Here force the Q to a range - // based on the ambient Q to reduce the risk of popping. - if (rc->this_key_frame_forced) { - int qindex = rc->last_boosted_qindex; - double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, - (last_boosted_q * 0.75), - cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } else if (cm->current_video_frame > 0) { - // not first frame of one pass and kf_boost is set - double q_adj_factor = 1.0; - double q_val; - - active_best_quality = - get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME], - cm->bit_depth); - - // Allow somewhat lower kf minq with small image formats. - if ((cm->width * cm->height) <= (352 * 288)) { - q_adj_factor -= 0.25; - } - - // Convert the adjustment factor to a qindex delta - // on active_best_quality. - q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp10_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); - } - } else if (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - // Use the lower of active_worst_quality and recent - // average Q as basis for GF/ARF best Q limit unless last frame was - // a key frame. - if (rc->frames_since_key > 1 && - rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { - q = rc->avg_frame_qindex[INTER_FRAME]; - } else { - q = active_worst_quality; - } - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - } else { - // Use the lower of active_worst_quality and recent/average Q. - if (cm->current_video_frame > 1) { - if (rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) - active_best_quality = rtc_minq[rc->avg_frame_qindex[INTER_FRAME]]; - else - active_best_quality = rtc_minq[active_worst_quality]; - } else { - if (rc->avg_frame_qindex[KEY_FRAME] < active_worst_quality) - active_best_quality = rtc_minq[rc->avg_frame_qindex[KEY_FRAME]]; - else - active_best_quality = rtc_minq[active_worst_quality]; - } - } - - // Clip the active best and worst quality values to limits - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); - - *top_index = active_worst_quality; - *bottom_index = active_best_quality; - -#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY - // Limit Q range for the adaptive loop. - if (cm->frame_type == KEY_FRAME && - !rc->this_key_frame_forced && - !(cm->current_video_frame == 0)) { - int qdelta = 0; - vpx_clear_system_state(); - qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 2.0, - cm->bit_depth); - *top_index = active_worst_quality + qdelta; - *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index; - } -#endif - - // Special case code to try and match quality with forced key frames - if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) { - q = rc->last_boosted_qindex; - } else { - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); - if (q > *top_index) { - // Special case when we are targeting the max allowed rate - if (rc->this_frame_target >= rc->max_frame_bandwidth) - *top_index = q; - else - q = *top_index; - } - } - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); - assert(*bottom_index <= rc->worst_quality && - *bottom_index >= rc->best_quality); - assert(q <= rc->worst_quality && q >= rc->best_quality); - return q; -} - -static int get_active_cq_level(const RATE_CONTROL *rc, - const VP10EncoderConfig *const oxcf) { - static const double cq_adjust_threshold = 0.1; - int active_cq_level = oxcf->cq_level; - if (oxcf->rc_mode == VPX_CQ && - rc->total_target_bits > 0) { - const double x = (double)rc->total_actual_bits / rc->total_target_bits; - if (x < cq_adjust_threshold) { - active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold); - } - } - return active_cq_level; -} - -static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi, - int *bottom_index, - int *top_index) { - const VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *const rc = &cpi->rc; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - const int cq_level = get_active_cq_level(rc, oxcf); - int active_best_quality; - int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi); - int q; - int *inter_minq; - ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq); - - if (frame_is_intra_only(cm)) { - if (oxcf->rc_mode == VPX_Q) { - int qindex = cq_level; - double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, - cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } else if (rc->this_key_frame_forced) { - int qindex = rc->last_boosted_qindex; - double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 0.75, - cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } else { - // not first frame of one pass and kf_boost is set - double q_adj_factor = 1.0; - double q_val; - - active_best_quality = - get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME], - cm->bit_depth); - - // Allow somewhat lower kf minq with small image formats. - if ((cm->width * cm->height) <= (352 * 288)) { - q_adj_factor -= 0.25; - } - - // Convert the adjustment factor to a qindex delta - // on active_best_quality. - q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp10_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); - } - } else if (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - // Use the lower of active_worst_quality and recent - // average Q as basis for GF/ARF best Q limit unless last frame was - // a key frame. - if (rc->frames_since_key > 1 && - rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { - q = rc->avg_frame_qindex[INTER_FRAME]; - } else { - q = rc->avg_frame_qindex[KEY_FRAME]; - } - // For constrained quality dont allow Q less than the cq level - if (oxcf->rc_mode == VPX_CQ) { - if (q < cq_level) - q = cq_level; - - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - - // Constrained quality use slightly lower active best. - active_best_quality = active_best_quality * 15 / 16; - - } else if (oxcf->rc_mode == VPX_Q) { - int qindex = cq_level; - double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex; - if (cpi->refresh_alt_ref_frame) - delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth); - else - delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } else { - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - } - } else { - if (oxcf->rc_mode == VPX_Q) { - int qindex = cq_level; - double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - double delta_rate[FIXED_GF_INTERVAL] = - {0.50, 1.0, 0.85, 1.0, 0.70, 1.0, 0.85, 1.0}; - int delta_qindex = - vp10_compute_qdelta(rc, q, - q * delta_rate[cm->current_video_frame % - FIXED_GF_INTERVAL], cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } else { - // Use the lower of active_worst_quality and recent/average Q. - if (cm->current_video_frame > 1) - active_best_quality = inter_minq[rc->avg_frame_qindex[INTER_FRAME]]; - else - active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]]; - // For the constrained quality mode we don't want - // q to fall below the cq level. - if ((oxcf->rc_mode == VPX_CQ) && - (active_best_quality < cq_level)) { - active_best_quality = cq_level; - } - } - } - - // Clip the active best and worst quality values to limits - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); - - *top_index = active_worst_quality; - *bottom_index = active_best_quality; - -#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY - { - int qdelta = 0; - vpx_clear_system_state(); - - // Limit Q range for the adaptive loop. - if (cm->frame_type == KEY_FRAME && - !rc->this_key_frame_forced && - !(cm->current_video_frame == 0)) { - qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 2.0, - cm->bit_depth); - } else if (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 1.75, - cm->bit_depth); - } - *top_index = active_worst_quality + qdelta; - *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index; - } -#endif - - if (oxcf->rc_mode == VPX_Q) { - q = active_best_quality; - // Special case code to try and match quality with forced key frames - } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) { - q = rc->last_boosted_qindex; - } else { - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); - if (q > *top_index) { - // Special case when we are targeting the max allowed rate - if (rc->this_frame_target >= rc->max_frame_bandwidth) - *top_index = q; - else - q = *top_index; - } - } - - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); - assert(*bottom_index <= rc->worst_quality && - *bottom_index >= rc->best_quality); - assert(q <= rc->worst_quality && q >= rc->best_quality); - return q; -} - -int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) { - static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = { - 1.00, // INTER_NORMAL - 1.00, // INTER_HIGH - 1.50, // GF_ARF_LOW - 1.75, // GF_ARF_STD - 2.00, // KF_STD - }; - static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = - {INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME}; - const VP10_COMMON *const cm = &cpi->common; - int qdelta = vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], - q, rate_factor_deltas[rf_level], - cm->bit_depth); - return qdelta; -} - -#define STATIC_MOTION_THRESH 95 -static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi, - int *bottom_index, - int *top_index) { - const VP10_COMMON *const cm = &cpi->common; - const RATE_CONTROL *const rc = &cpi->rc; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - const GF_GROUP *gf_group = &cpi->twopass.gf_group; - const int cq_level = get_active_cq_level(rc, oxcf); - int active_best_quality; - int active_worst_quality = cpi->twopass.active_worst_quality; - int q; - int *inter_minq; - ASSIGN_MINQ_TABLE(cm->bit_depth, inter_minq); - - if (frame_is_intra_only(cm)) { - // Handle the special case for key frames forced when we have reached - // the maximum key frame interval. Here force the Q to a range - // based on the ambient Q to reduce the risk of popping. - if (rc->this_key_frame_forced) { - double last_boosted_q; - int delta_qindex; - int qindex; - - if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); - active_best_quality = qindex; - last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 1.25, - cm->bit_depth); - active_worst_quality = - VPXMIN(qindex + delta_qindex, active_worst_quality); - } else { - qindex = rc->last_boosted_qindex; - last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); - delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 0.75, - cm->bit_depth); - active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); - } - } else { - // Not forced keyframe. - double q_adj_factor = 1.0; - double q_val; - // Baseline value derived from cpi->active_worst_quality and kf boost. - active_best_quality = get_kf_active_quality(rc, active_worst_quality, - cm->bit_depth); - - // Allow somewhat lower kf minq with small image formats. - if ((cm->width * cm->height) <= (352 * 288)) { - q_adj_factor -= 0.25; - } - - // Make a further adjustment based on the kf zero motion measure. - q_adj_factor += 0.05 - (0.001 * (double)cpi->twopass.kf_zeromotion_pct); - - // Convert the adjustment factor to a qindex delta - // on active_best_quality. - q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp10_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); - } - } else if (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - // Use the lower of active_worst_quality and recent - // average Q as basis for GF/ARF best Q limit unless last frame was - // a key frame. - if (rc->frames_since_key > 1 && - rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { - q = rc->avg_frame_qindex[INTER_FRAME]; - } else { - q = active_worst_quality; - } - // For constrained quality dont allow Q less than the cq level - if (oxcf->rc_mode == VPX_CQ) { - if (q < cq_level) - q = cq_level; - - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - - // Constrained quality use slightly lower active best. - active_best_quality = active_best_quality * 15 / 16; - - } else if (oxcf->rc_mode == VPX_Q) { - if (!cpi->refresh_alt_ref_frame) { - active_best_quality = cq_level; - } else { - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - - // Modify best quality for second level arfs. For mode VPX_Q this - // becomes the baseline frame q. - if (gf_group->rf_level[gf_group->index] == GF_ARF_LOW) - active_best_quality = (active_best_quality + cq_level + 1) / 2; - } - } else { - active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); - } - } else { - if (oxcf->rc_mode == VPX_Q) { - active_best_quality = cq_level; - } else { - active_best_quality = inter_minq[active_worst_quality]; - - // For the constrained quality mode we don't want - // q to fall below the cq level. - if ((oxcf->rc_mode == VPX_CQ) && - (active_best_quality < cq_level)) { - active_best_quality = cq_level; - } - } - } - - // Extension to max or min Q if undershoot or overshoot is outside - // the permitted range. - if ((cpi->oxcf.rc_mode != VPX_Q) && - (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD)) { - if (frame_is_intra_only(cm) || - (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) { - active_best_quality -= - (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast); - active_worst_quality += (cpi->twopass.extend_maxq / 2); - } else { - active_best_quality -= - (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2; - active_worst_quality += cpi->twopass.extend_maxq; - } - } - -#if LIMIT_QRANGE_FOR_ALTREF_AND_KEY - vpx_clear_system_state(); - // Static forced key frames Q restrictions dealt with elsewhere. - if (!(frame_is_intra_only(cm)) || - !rc->this_key_frame_forced || - (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) { - int qdelta = vp10_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index], - active_worst_quality); - active_worst_quality = VPXMAX(active_worst_quality + qdelta, - active_best_quality); - } -#endif - - // Modify active_best_quality for downscaled normal frames. - if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) { - int qdelta = vp10_compute_qdelta_by_rate(rc, cm->frame_type, - active_best_quality, 2.0, - cm->bit_depth); - active_best_quality = - VPXMAX(active_best_quality + qdelta, rc->best_quality); - } - - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); - - if (oxcf->rc_mode == VPX_Q) { - q = active_best_quality; - // Special case code to try and match quality with forced key frames. - } else if (frame_is_intra_only(cm) && rc->this_key_frame_forced) { - // If static since last kf use better of last boosted and last kf q. - if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); - } else { - q = rc->last_boosted_qindex; - } - } else { - q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); - if (q > active_worst_quality) { - // Special case when we are targeting the max allowed rate. - if (rc->this_frame_target >= rc->max_frame_bandwidth) - active_worst_quality = q; - else - q = active_worst_quality; - } - } - clamp(q, active_best_quality, active_worst_quality); - - *top_index = active_worst_quality; - *bottom_index = active_best_quality; - - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); - assert(*bottom_index <= rc->worst_quality && - *bottom_index >= rc->best_quality); - assert(q <= rc->worst_quality && q >= rc->best_quality); - return q; -} - -int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, - int *bottom_index, int *top_index) { - int q; - if (cpi->oxcf.pass == 0) { - if (cpi->oxcf.rc_mode == VPX_CBR) - q = rc_pick_q_and_bounds_one_pass_cbr(cpi, bottom_index, top_index); - else - q = rc_pick_q_and_bounds_one_pass_vbr(cpi, bottom_index, top_index); - } else { - q = rc_pick_q_and_bounds_two_pass(cpi, bottom_index, top_index); - } - - return q; -} - -void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, - int frame_target, - int *frame_under_shoot_limit, - int *frame_over_shoot_limit) { - if (cpi->oxcf.rc_mode == VPX_Q) { - *frame_under_shoot_limit = 0; - *frame_over_shoot_limit = INT_MAX; - } else { - // For very small rate targets where the fractional adjustment - // may be tiny make sure there is at least a minimum range. - const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100; - *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0); - *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200, - cpi->rc.max_frame_bandwidth); - } -} - -void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) { - const VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - - rc->this_frame_target = target; - - // Modify frame size target when down-scaling. - if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC && - rc->frame_size_selector != UNSCALED) - rc->this_frame_target = (int)(rc->this_frame_target - * rate_thresh_mult[rc->frame_size_selector]); - - // Target rate per SB64 (including partial SB64s. - rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) / - (cm->width * cm->height); -} - -static void update_alt_ref_frame_stats(VP10_COMP *cpi) { - // this frame refreshes means next frames don't unless specified by user - RATE_CONTROL *const rc = &cpi->rc; - rc->frames_since_golden = 0; - - // Mark the alt ref as done (setting to 0 means no further alt refs pending). - rc->source_alt_ref_pending = 0; - - // Set the alternate reference frame active flag - rc->source_alt_ref_active = 1; -} - -static void update_golden_frame_stats(VP10_COMP *cpi) { - RATE_CONTROL *const rc = &cpi->rc; - - // Update the Golden frame usage counts. - if (cpi->refresh_golden_frame) { - // this frame refreshes means next frames don't unless specified by user - rc->frames_since_golden = 0; - - // If we are not using alt ref in the up and coming group clear the arf - // active flag. In multi arf group case, if the index is not 0 then - // we are overlaying a mid group arf so should not reset the flag. - if (cpi->oxcf.pass == 2) { - if (!rc->source_alt_ref_pending && (cpi->twopass.gf_group.index == 0)) - rc->source_alt_ref_active = 0; - } else if (!rc->source_alt_ref_pending) { - rc->source_alt_ref_active = 0; - } - - // Decrement count down till next gf - if (rc->frames_till_gf_update_due > 0) - rc->frames_till_gf_update_due--; - - } else if (!cpi->refresh_alt_ref_frame) { - // Decrement count down till next gf - if (rc->frames_till_gf_update_due > 0) - rc->frames_till_gf_update_due--; - - rc->frames_since_golden++; - } -} - -void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) { - const VP10_COMMON *const cm = &cpi->common; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - RATE_CONTROL *const rc = &cpi->rc; - const int qindex = cm->base_qindex; - - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) { - vp10_cyclic_refresh_postencode(cpi); - } - - // Update rate control heuristics - rc->projected_frame_size = (int)(bytes_used << 3); - - // Post encode loop adjustment of Q prediction. - vp10_rc_update_rate_correction_factors(cpi); - - // Keep a record of last Q and ambient average Q. - if (cm->frame_type == KEY_FRAME) { - rc->last_q[KEY_FRAME] = qindex; - rc->avg_frame_qindex[KEY_FRAME] = - ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[KEY_FRAME] + qindex, 2); - } else { - if (!rc->is_src_frame_alt_ref && - !(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - rc->last_q[INTER_FRAME] = qindex; - rc->avg_frame_qindex[INTER_FRAME] = - ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2); - rc->ni_frames++; - rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth); - rc->avg_q = rc->tot_q / rc->ni_frames; - // Calculate the average Q for normal inter frames (not key or GFU - // frames). - rc->ni_tot_qi += qindex; - rc->ni_av_qi = rc->ni_tot_qi / rc->ni_frames; - } - } - - // Keep record of last boosted (KF/KF/ARF) Q value. - // If the current frame is coded at a lower Q then we also update it. - // If all mbs in this group are skipped only update if the Q value is - // better than that already stored. - // This is used to help set quality in forced key frames to reduce popping - if ((qindex < rc->last_boosted_qindex) || - (cm->frame_type == KEY_FRAME) || - (!rc->constrained_gf_group && - (cpi->refresh_alt_ref_frame || - (cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) { - rc->last_boosted_qindex = qindex; - } - if (cm->frame_type == KEY_FRAME) - rc->last_kf_qindex = qindex; - - update_buffer_level(cpi, rc->projected_frame_size); - - // Rolling monitors of whether we are over or underspending used to help - // regulate min and Max Q in two pass. - if (cm->frame_type != KEY_FRAME) { - rc->rolling_target_bits = ROUND_POWER_OF_TWO( - rc->rolling_target_bits * 3 + rc->this_frame_target, 2); - rc->rolling_actual_bits = ROUND_POWER_OF_TWO( - rc->rolling_actual_bits * 3 + rc->projected_frame_size, 2); - rc->long_rolling_target_bits = ROUND_POWER_OF_TWO( - rc->long_rolling_target_bits * 31 + rc->this_frame_target, 5); - rc->long_rolling_actual_bits = ROUND_POWER_OF_TWO( - rc->long_rolling_actual_bits * 31 + rc->projected_frame_size, 5); - } - - // Actual bits spent - rc->total_actual_bits += rc->projected_frame_size; - rc->total_target_bits += cm->show_frame ? rc->avg_frame_bandwidth : 0; - - rc->total_target_vs_actual = rc->total_actual_bits - rc->total_target_bits; - - if (is_altref_enabled(cpi) && cpi->refresh_alt_ref_frame && - (cm->frame_type != KEY_FRAME)) - // Update the alternate reference frame stats as appropriate. - update_alt_ref_frame_stats(cpi); - else - // Update the Golden frame stats as appropriate. - update_golden_frame_stats(cpi); - - if (cm->frame_type == KEY_FRAME) - rc->frames_since_key = 0; - if (cm->show_frame) { - rc->frames_since_key++; - rc->frames_to_key--; - } - - // Trigger the resizing of the next frame if it is scaled. - if (oxcf->pass != 0) { - cpi->resize_pending = - rc->next_frame_size_selector != rc->frame_size_selector; - rc->frame_size_selector = rc->next_frame_size_selector; - } -} - -void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) { - // Update buffer level with zero size, update frame counters, and return. - update_buffer_level(cpi, 0); - cpi->rc.frames_since_key++; - cpi->rc.frames_to_key--; - cpi->rc.rc_2_frame = 0; - cpi->rc.rc_1_frame = 0; -} - -// Use this macro to turn on/off use of alt-refs in one-pass mode. -#define USE_ALTREF_FOR_ONE_PASS 1 - -static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) { - static const int af_ratio = 10; - const RATE_CONTROL *const rc = &cpi->rc; - int target; -#if USE_ALTREF_FOR_ONE_PASS - target = (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ? - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) / - (rc->baseline_gf_interval + af_ratio - 1) : - (rc->avg_frame_bandwidth * rc->baseline_gf_interval) / - (rc->baseline_gf_interval + af_ratio - 1); -#else - target = rc->avg_frame_bandwidth; -#endif - return vp10_rc_clamp_pframe_target_size(cpi, target); -} - -static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) { - static const int kf_ratio = 25; - const RATE_CONTROL *rc = &cpi->rc; - const int target = rc->avg_frame_bandwidth * kf_ratio; - return vp10_rc_clamp_iframe_target_size(cpi, target); -} - -void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - int target; - // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic. - if (!cpi->refresh_alt_ref_frame && - (cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY) || - rc->frames_to_key == 0 || - (cpi->oxcf.auto_key && 0))) { - cm->frame_type = KEY_FRAME; - rc->this_key_frame_forced = cm->current_video_frame != 0 && - rc->frames_to_key == 0; - rc->frames_to_key = cpi->oxcf.key_freq; - rc->kf_boost = DEFAULT_KF_BOOST; - rc->source_alt_ref_active = 0; - } else { - cm->frame_type = INTER_FRAME; - } - if (rc->frames_till_gf_update_due == 0) { - rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2; - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - // NOTE: frames_till_gf_update_due must be <= frames_to_key. - if (rc->frames_till_gf_update_due > rc->frames_to_key) { - rc->frames_till_gf_update_due = rc->frames_to_key; - rc->constrained_gf_group = 1; - } else { - rc->constrained_gf_group = 0; - } - cpi->refresh_golden_frame = 1; - rc->source_alt_ref_pending = USE_ALTREF_FOR_ONE_PASS; - rc->gfu_boost = DEFAULT_GF_BOOST; - } - if (cm->frame_type == KEY_FRAME) - target = calc_iframe_target_size_one_pass_vbr(cpi); - else - target = calc_pframe_target_size_one_pass_vbr(cpi); - vp10_rc_set_frame_target(cpi, target); -} - -static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { - const VP10EncoderConfig *oxcf = &cpi->oxcf; - const RATE_CONTROL *rc = &cpi->rc; - const int64_t diff = rc->optimal_buffer_level - rc->buffer_level; - const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100; - int min_frame_target = - VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS); - int target; - - if (oxcf->gf_cbr_boost_pct) { - const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100; - target = cpi->refresh_golden_frame ? - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio_pct) / - (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) : - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) / - (rc->baseline_gf_interval * 100 + af_ratio_pct - 100); - } else { - target = rc->avg_frame_bandwidth; - } - - if (diff > 0) { - // Lower the target bandwidth for this frame. - const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct); - target -= (target * pct_low) / 200; - } else if (diff < 0) { - // Increase the target bandwidth for this frame. - const int pct_high = - (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct); - target += (target * pct_high) / 200; - } - if (oxcf->rc_max_inter_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_inter_bitrate_pct / 100; - target = VPXMIN(target, max_rate); - } - return VPXMAX(min_frame_target, target); -} - -static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { - const RATE_CONTROL *rc = &cpi->rc; - int target; - if (cpi->common.current_video_frame == 0) { - target = ((rc->starting_buffer_level / 2) > INT_MAX) - ? INT_MAX : (int)(rc->starting_buffer_level / 2); - } else { - int kf_boost = 32; - double framerate = cpi->framerate; - - kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16)); - if (rc->frames_since_key < framerate / 2) { - kf_boost = (int)(kf_boost * rc->frames_since_key / - (framerate / 2)); - } - target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4; - } - return vp10_rc_clamp_iframe_target_size(cpi, target); -} - -void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - int target; - // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic. - if ((cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY) || - rc->frames_to_key == 0 || - (cpi->oxcf.auto_key && 0))) { - cm->frame_type = KEY_FRAME; - rc->this_key_frame_forced = cm->current_video_frame != 0 && - rc->frames_to_key == 0; - rc->frames_to_key = cpi->oxcf.key_freq; - rc->kf_boost = DEFAULT_KF_BOOST; - rc->source_alt_ref_active = 0; - } else { - cm->frame_type = INTER_FRAME; - } - if (rc->frames_till_gf_update_due == 0) { - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) - vp10_cyclic_refresh_set_golden_update(cpi); - else - rc->baseline_gf_interval = - (rc->min_gf_interval + rc->max_gf_interval) / 2; - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - // NOTE: frames_till_gf_update_due must be <= frames_to_key. - if (rc->frames_till_gf_update_due > rc->frames_to_key) - rc->frames_till_gf_update_due = rc->frames_to_key; - cpi->refresh_golden_frame = 1; - rc->gfu_boost = DEFAULT_GF_BOOST; - } - - // Any update/change of global cyclic refresh parameters (amount/delta-qp) - // should be done here, before the frame qp is selected. - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) - vp10_cyclic_refresh_update_parameters(cpi); - - if (cm->frame_type == KEY_FRAME) - target = calc_iframe_target_size_one_pass_cbr(cpi); - else - target = calc_pframe_target_size_one_pass_cbr(cpi); - - vp10_rc_set_frame_target(cpi, target); - if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC) - cpi->resize_pending = vp10_resize_one_pass_cbr(cpi); - else - cpi->resize_pending = 0; -} - -int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget, - vpx_bit_depth_t bit_depth) { - int start_index = rc->worst_quality; - int target_index = rc->worst_quality; - int i; - - // Convert the average q value to an index. - for (i = rc->best_quality; i < rc->worst_quality; ++i) { - start_index = i; - if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) - break; - } - - // Convert the q target to an index - for (i = rc->best_quality; i < rc->worst_quality; ++i) { - target_index = i; - if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) - break; - } - - return target_index - start_index; -} - -int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type, - int qindex, double rate_target_ratio, - vpx_bit_depth_t bit_depth) { - int target_index = rc->worst_quality; - int i; - - // Look up the current projected bits per block for the base index - const int base_bits_per_mb = vp10_rc_bits_per_mb(frame_type, qindex, 1.0, - bit_depth); - - // Find the target bits per mb based on the base value and given ratio. - const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb); - - // Convert the q target to an index - for (i = rc->best_quality; i < rc->worst_quality; ++i) { - if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <= - target_bits_per_mb) { - target_index = i; - break; - } - } - return target_index - qindex; -} - -void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi, - RATE_CONTROL *const rc) { - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - - // Special case code for 1 pass fixed Q mode tests - if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) { - rc->max_gf_interval = FIXED_GF_INTERVAL; - rc->min_gf_interval = FIXED_GF_INTERVAL; - rc->static_scene_max_gf_interval = FIXED_GF_INTERVAL; - } else { - // Set Maximum gf/arf interval - rc->max_gf_interval = oxcf->max_gf_interval; - rc->min_gf_interval = oxcf->min_gf_interval; - if (rc->min_gf_interval == 0) - rc->min_gf_interval = vp10_rc_get_default_min_gf_interval( - oxcf->width, oxcf->height, cpi->framerate); - if (rc->max_gf_interval == 0) - rc->max_gf_interval = vp10_rc_get_default_max_gf_interval( - cpi->framerate, rc->min_gf_interval); - - // Extended interval for genuinely static scenes - rc->static_scene_max_gf_interval = MAX_LAG_BUFFERS * 2; - - if (is_altref_enabled(cpi)) { - if (rc->static_scene_max_gf_interval > oxcf->lag_in_frames - 1) - rc->static_scene_max_gf_interval = oxcf->lag_in_frames - 1; - } - - if (rc->max_gf_interval > rc->static_scene_max_gf_interval) - rc->max_gf_interval = rc->static_scene_max_gf_interval; - - // Clamp min to max - rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval); - } -} - -void vp10_rc_update_framerate(VP10_COMP *cpi) { - const VP10_COMMON *const cm = &cpi->common; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - RATE_CONTROL *const rc = &cpi->rc; - int vbr_max_bits; - - rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate); - rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth * - oxcf->two_pass_vbrmin_section / 100); - - rc->min_frame_bandwidth = - VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); - - // A maximum bitrate for a frame is defined. - // The baseline for this aligns with HW implementations that - // can support decode of 1080P content up to a bitrate of MAX_MB_RATE bits - // per 16x16 MB (averaged over a frame). However this limit is extended if - // a very high rate is given on the command line or the the rate cannnot - // be acheived because of a user specificed max q (e.g. when the user - // specifies lossless encode. - vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth * - oxcf->two_pass_vbrmax_section) / 100); - rc->max_frame_bandwidth = - VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits); - - vp10_rc_set_gf_interval_range(cpi, rc); -} - -#define VBR_PCT_ADJUSTMENT_LIMIT 50 -// For VBR...adjustment to the frame target based on error from previous frames -static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) { - RATE_CONTROL *const rc = &cpi->rc; - int64_t vbr_bits_off_target = rc->vbr_bits_off_target; - int max_delta; - double position_factor = 1.0; - - // How far through the clip are we. - // This number is used to damp the per frame rate correction. - // Range 0 - 1.0 - if (cpi->twopass.total_stats.count) { - position_factor = sqrt((double)cpi->common.current_video_frame / - cpi->twopass.total_stats.count); - } - max_delta = (int)(position_factor * - ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100)); - - // vbr_bits_off_target > 0 means we have extra bits to spend - if (vbr_bits_off_target > 0) { - *this_frame_target += - (vbr_bits_off_target > max_delta) ? max_delta - : (int)vbr_bits_off_target; - } else { - *this_frame_target -= - (vbr_bits_off_target < -max_delta) ? max_delta - : (int)-vbr_bits_off_target; - } - - // Fast redistribution of bits arising from massive local undershoot. - // Dont do it for kf,arf,gf or overlay frames. - if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref && - rc->vbr_bits_off_target_fast) { - int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target); - int fast_extra_bits; - fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits); - fast_extra_bits = (int)VPXMIN( - fast_extra_bits, - VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8)); - *this_frame_target += (int)fast_extra_bits; - rc->vbr_bits_off_target_fast -= fast_extra_bits; - } -} - -void vp10_set_target_rate(VP10_COMP *cpi) { - RATE_CONTROL *const rc = &cpi->rc; - int target_rate = rc->base_frame_target; - - // Correction to rate target based on prior over or under shoot. - if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ) - vbr_rate_correction(cpi, &target_rate); - vp10_rc_set_frame_target(cpi, target_rate); -} - -// Check if we should resize, based on average QP from past x frames. -// Only allow for resize at most one scale down for now, scaling factor is 2. -int vp10_resize_one_pass_cbr(VP10_COMP *cpi) { - const VP10_COMMON *const cm = &cpi->common; - RATE_CONTROL *const rc = &cpi->rc; - int resize_now = 0; - cpi->resize_scale_num = 1; - cpi->resize_scale_den = 1; - // Don't resize on key frame; reset the counters on key frame. - if (cm->frame_type == KEY_FRAME) { - cpi->resize_avg_qp = 0; - cpi->resize_count = 0; - return 0; - } - // Resize based on average buffer underflow and QP over some window. - // Ignore samples close to key frame, since QP is usually high after key. - if (cpi->rc.frames_since_key > 2 * cpi->framerate) { - const int window = (int)(5 * cpi->framerate); - cpi->resize_avg_qp += cm->base_qindex; - if (cpi->rc.buffer_level < (int)(30 * rc->optimal_buffer_level / 100)) - ++cpi->resize_buffer_underflow; - ++cpi->resize_count; - // Check for resize action every "window" frames. - if (cpi->resize_count >= window) { - int avg_qp = cpi->resize_avg_qp / cpi->resize_count; - // Resize down if buffer level has underflowed sufficent amount in past - // window, and we are at original resolution. - // Resize back up if average QP is low, and we are currently in a resized - // down state. - if (cpi->resize_state == 0 && - cpi->resize_buffer_underflow > (cpi->resize_count >> 2)) { - resize_now = 1; - cpi->resize_state = 1; - } else if (cpi->resize_state == 1 && - avg_qp < 40 * cpi->rc.worst_quality / 100) { - resize_now = -1; - cpi->resize_state = 0; - } - // Reset for next window measurement. - cpi->resize_avg_qp = 0; - cpi->resize_count = 0; - cpi->resize_buffer_underflow = 0; - } - } - // If decision is to resize, reset some quantities, and check is we should - // reduce rate correction factor, - if (resize_now != 0) { - int target_bits_per_frame; - int active_worst_quality; - int qindex; - int tot_scale_change; - // For now, resize is by 1/2 x 1/2. - cpi->resize_scale_num = 1; - cpi->resize_scale_den = 2; - tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) / - (cpi->resize_scale_num * cpi->resize_scale_num); - // Reset buffer level to optimal, update target size. - rc->buffer_level = rc->optimal_buffer_level; - rc->bits_off_target = rc->optimal_buffer_level; - rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi); - // Reset cyclic refresh parameters. - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) - vp10_cyclic_refresh_reset_resize(cpi); - // Get the projected qindex, based on the scaled target frame size (scaled - // so target_bits_per_mb in vp10_rc_regulate_q will be correct target). - target_bits_per_frame = (resize_now == 1) ? - rc->this_frame_target * tot_scale_change : - rc->this_frame_target / tot_scale_change; - active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi); - qindex = vp10_rc_regulate_q(cpi, - target_bits_per_frame, - rc->best_quality, - active_worst_quality); - // If resize is down, check if projected q index is close to worst_quality, - // and if so, reduce the rate correction factor (since likely can afford - // lower q for resized frame). - if (resize_now == 1 && - qindex > 90 * cpi->rc.worst_quality / 100) { - rc->rate_correction_factors[INTER_NORMAL] *= 0.85; - } - // If resize is back up, check if projected q index is too much above the - // current base_qindex, and if so, reduce the rate correction factor - // (since prefer to keep q for resized frame at least close to previous q). - if (resize_now == -1 && - qindex > 130 * cm->base_qindex / 100) { - rc->rate_correction_factors[INTER_NORMAL] *= 0.9; - } - } - return resize_now; -} diff --git a/libs/libvpx/vp10/encoder/ratectrl.h b/libs/libvpx/vp10/encoder/ratectrl.h deleted file mode 100644 index 0b9fd456df..0000000000 --- a/libs/libvpx/vp10/encoder/ratectrl.h +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_RATECTRL_H_ -#define VP10_ENCODER_RATECTRL_H_ - -#include "vpx/vpx_codec.h" -#include "vpx/vpx_integer.h" - -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -// Bits Per MB at different Q (Multiplied by 512) -#define BPER_MB_NORMBITS 9 - -#define MIN_GF_INTERVAL 4 -#define MAX_GF_INTERVAL 16 -#define FIXED_GF_INTERVAL 8 // Used in some testing modes only - -typedef enum { - INTER_NORMAL = 0, - INTER_HIGH = 1, - GF_ARF_LOW = 2, - GF_ARF_STD = 3, - KF_STD = 4, - RATE_FACTOR_LEVELS = 5 -} RATE_FACTOR_LEVEL; - -// Internal frame scaling level. -typedef enum { - UNSCALED = 0, // Frame is unscaled. - SCALE_STEP1 = 1, // First-level down-scaling. - FRAME_SCALE_STEPS -} FRAME_SCALE_LEVEL; - -// Frame dimensions multiplier wrt the native frame size, in 1/16ths, -// specified for the scale-up case. -// e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is -// intended to match the capabilities of the normative scaling filters, -// giving precedence to the up-scaling accuracy. -static const int frame_scale_factor[FRAME_SCALE_STEPS] = {16, 24}; - -// Multiplier of the target rate to be used as threshold for triggering scaling. -static const double rate_thresh_mult[FRAME_SCALE_STEPS] = {1.0, 2.0}; - -// Scale dependent Rate Correction Factor multipliers. Compensates for the -// greater number of bits per pixel generated in down-scaled frames. -static const double rcf_mult[FRAME_SCALE_STEPS] = {1.0, 2.0}; - -typedef struct { - // Rate targetting variables - int base_frame_target; // A baseline frame target before adjustment - // for previous under or over shoot. - int this_frame_target; // Actual frame target after rc adjustment. - int projected_frame_size; - int sb64_target_rate; - int last_q[FRAME_TYPES]; // Separate values for Intra/Inter - int last_boosted_qindex; // Last boosted GF/KF/ARF q - int last_kf_qindex; // Q index of the last key frame coded. - - int gfu_boost; - int last_boost; - int kf_boost; - - double rate_correction_factors[RATE_FACTOR_LEVELS]; - - int frames_since_golden; - int frames_till_gf_update_due; - int min_gf_interval; - int max_gf_interval; - int static_scene_max_gf_interval; - int baseline_gf_interval; - int constrained_gf_group; - int frames_to_key; - int frames_since_key; - int this_key_frame_forced; - int next_key_frame_forced; - int source_alt_ref_pending; - int source_alt_ref_active; - int is_src_frame_alt_ref; - - int avg_frame_bandwidth; // Average frame size target for clip - int min_frame_bandwidth; // Minimum allocation used for any frame - int max_frame_bandwidth; // Maximum burst rate allowed for a frame. - - int ni_av_qi; - int ni_tot_qi; - int ni_frames; - int avg_frame_qindex[FRAME_TYPES]; - double tot_q; - double avg_q; - - int64_t buffer_level; - int64_t bits_off_target; - int64_t vbr_bits_off_target; - int64_t vbr_bits_off_target_fast; - - int decimation_factor; - int decimation_count; - - int rolling_target_bits; - int rolling_actual_bits; - - int long_rolling_target_bits; - int long_rolling_actual_bits; - - int rate_error_estimate; - - int64_t total_actual_bits; - int64_t total_target_bits; - int64_t total_target_vs_actual; - - int worst_quality; - int best_quality; - - int64_t starting_buffer_level; - int64_t optimal_buffer_level; - int64_t maximum_buffer_size; - - // rate control history for last frame(1) and the frame before(2). - // -1: undershot - // 1: overshoot - // 0: not initialized. - int rc_1_frame; - int rc_2_frame; - int q_1_frame; - int q_2_frame; - - // Auto frame-scaling variables. - FRAME_SCALE_LEVEL frame_size_selector; - FRAME_SCALE_LEVEL next_frame_size_selector; - int frame_width[FRAME_SCALE_STEPS]; - int frame_height[FRAME_SCALE_STEPS]; - int rf_level_maxq[RATE_FACTOR_LEVELS]; -} RATE_CONTROL; - -struct VP10_COMP; -struct VP10EncoderConfig; - -void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass, - RATE_CONTROL *rc); - -int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs, - double correction_factor, - vpx_bit_depth_t bit_depth); - -double vp10_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth); - -void vp10_rc_init_minq_luts(void); - -int vp10_rc_get_default_min_gf_interval(int width, int height, double framerate); -// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to -// be passed in to ensure that the max_gf_interval returned is at least as bis -// as that. -int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate); - -// Generally at the high level, the following flow is expected -// to be enforced for rate control: -// First call per frame, one of: -// vp10_rc_get_one_pass_vbr_params() -// vp10_rc_get_one_pass_cbr_params() -// vp10_rc_get_first_pass_params() -// vp10_rc_get_second_pass_params() -// depending on the usage to set the rate control encode parameters desired. -// -// Then, call encode_frame_to_data_rate() to perform the -// actual encode. This function will in turn call encode_frame() -// one or more times, followed by one of: -// vp10_rc_postencode_update() -// vp10_rc_postencode_update_drop_frame() -// -// The majority of rate control parameters are only expected -// to be set in the vp10_rc_get_..._params() functions and -// updated during the vp10_rc_postencode_update...() functions. -// The only exceptions are vp10_rc_drop_frame() and -// vp10_rc_update_rate_correction_factors() functions. - -// Functions to set parameters for encoding before the actual -// encode_frame_to_data_rate() function. -void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi); -void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi); - -// Post encode update of the rate control parameters based -// on bytes used -void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used); -// Post encode update of the rate control parameters for dropped frames -void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi); - -// Updates rate correction factors -// Changes only the rate correction factors in the rate control structure. -void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi); - -// Decide if we should drop this frame: For 1-pass CBR. -// Changes only the decimation count in the rate control structure -int vp10_rc_drop_frame(struct VP10_COMP *cpi); - -// Computes frame size bounds. -void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi, - int this_frame_target, - int *frame_under_shoot_limit, - int *frame_over_shoot_limit); - -// Picks q and q bounds given the target for bits -int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, - int *bottom_index, - int *top_index); - -// Estimates q to achieve a target bits per frame -int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame, - int active_best_quality, int active_worst_quality); - -// Estimates bits per mb for a given qindex and correction factor. -int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, - double correction_factor, vpx_bit_depth_t bit_depth); - -// Clamping utilities for bitrate targets for iframes and pframes. -int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi, - int target); -int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi, - int target); -// Utility to set frame_target into the RATE_CONTROL structure -// This function is called only from the vp10_rc_get_..._params() functions. -void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target); - -// Computes a q delta (in "q index" terms) to get from a starting q value -// to a target q value -int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget, - vpx_bit_depth_t bit_depth); - -// Computes a q delta (in "q index" terms) to get from a starting q value -// to a value that should equate to the given rate ratio. -int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type, - int qindex, double rate_target_ratio, - vpx_bit_depth_t bit_depth); - -int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q); - -void vp10_rc_update_framerate(struct VP10_COMP *cpi); - -void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi, - RATE_CONTROL *const rc); - -void vp10_set_target_rate(struct VP10_COMP *cpi); - -int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_RATECTRL_H_ diff --git a/libs/libvpx/vp10/encoder/rd.c b/libs/libvpx/vp10/encoder/rd.c deleted file mode 100644 index f4fdb2417c..0000000000 --- a/libs/libvpx/vp10/encoder/rd.c +++ /dev/null @@ -1,673 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include - -#include "./vp10_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/bitops.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" - -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/mvref_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/tokenize.h" - -#define RD_THRESH_POW 1.25 -#define RD_MULT_EPB_RATIO 64 - -// Factor to weigh the rate for switchable interp filters. -#define SWITCHABLE_INTERP_RATE_FACTOR 1 - -void vp10_rd_cost_reset(RD_COST *rd_cost) { - rd_cost->rate = INT_MAX; - rd_cost->dist = INT64_MAX; - rd_cost->rdcost = INT64_MAX; -} - -void vp10_rd_cost_init(RD_COST *rd_cost) { - rd_cost->rate = 0; - rd_cost->dist = 0; - rd_cost->rdcost = 0; -} - -// The baseline rd thresholds for breaking out of the rd loop for -// certain modes are assumed to be based on 8x8 blocks. -// This table is used to correct for block size. -// The factors here are << 2 (2 = x0.5, 32 = x8 etc). -static const uint8_t rd_thresh_block_size_factor[BLOCK_SIZES] = { - 2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32 -}; - -static void fill_mode_costs(VP10_COMP *cpi) { - const FRAME_CONTEXT *const fc = cpi->common.fc; - int i, j; - - for (i = 0; i < INTRA_MODES; ++i) - for (j = 0; j < INTRA_MODES; ++j) - vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j], - vp10_intra_mode_tree); - - vp10_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp10_intra_mode_tree); - for (i = 0; i < INTRA_MODES; ++i) - vp10_cost_tokens(cpi->intra_uv_mode_cost[i], - fc->uv_mode_prob[i], vp10_intra_mode_tree); - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - vp10_cost_tokens(cpi->switchable_interp_costs[i], - fc->switchable_interp_prob[i], vp10_switchable_interp_tree); - - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - for (j = 0; j < TX_TYPES; ++j) - vp10_cost_tokens(cpi->intra_tx_type_costs[i][j], - fc->intra_ext_tx_prob[i][j], - vp10_ext_tx_tree); - } - for (i = TX_4X4; i < EXT_TX_SIZES; ++i) { - vp10_cost_tokens(cpi->inter_tx_type_costs[i], - fc->inter_ext_tx_prob[i], - vp10_ext_tx_tree); - } -} - -static void fill_token_costs(vp10_coeff_cost *c, - vp10_coeff_probs_model (*p)[PLANE_TYPES]) { - int i, j, k, l; - TX_SIZE t; - for (t = TX_4X4; t <= TX_32X32; ++t) - for (i = 0; i < PLANE_TYPES; ++i) - for (j = 0; j < REF_TYPES; ++j) - for (k = 0; k < COEF_BANDS; ++k) - for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { - vpx_prob probs[ENTROPY_NODES]; - vp10_model_to_full_probs(p[t][i][j][k][l], probs); - vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, - vp10_coef_tree); - vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs, - vp10_coef_tree); - assert(c[t][i][j][k][0][l][EOB_TOKEN] == - c[t][i][j][k][1][l][EOB_TOKEN]); - } -} - -// Values are now correlated to quantizer. -static int sad_per_bit16lut_8[QINDEX_RANGE]; -static int sad_per_bit4lut_8[QINDEX_RANGE]; - -#if CONFIG_VP9_HIGHBITDEPTH -static int sad_per_bit16lut_10[QINDEX_RANGE]; -static int sad_per_bit4lut_10[QINDEX_RANGE]; -static int sad_per_bit16lut_12[QINDEX_RANGE]; -static int sad_per_bit4lut_12[QINDEX_RANGE]; -#endif - -static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range, - vpx_bit_depth_t bit_depth) { - int i; - // Initialize the sad lut tables using a formulaic calculation for now. - // This is to make it easier to resolve the impact of experimental changes - // to the quantizer tables. - for (i = 0; i < range; i++) { - const double q = vp10_convert_qindex_to_q(i, bit_depth); - bit16lut[i] = (int)(0.0418 * q + 2.4107); - bit4lut[i] = (int)(0.063 * q + 2.742); - } -} - -void vp10_init_me_luts(void) { - init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE, - VPX_BITS_8); -#if CONFIG_VP9_HIGHBITDEPTH - init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE, - VPX_BITS_10); - init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE, - VPX_BITS_12); -#endif -} - -static const int rd_boost_factor[16] = { - 64, 32, 32, 32, 24, 16, 12, 12, - 8, 8, 4, 4, 2, 2, 1, 0 -}; -static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { - 128, 144, 128, 128, 144 -}; - -int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) { - const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth); -#if CONFIG_VP9_HIGHBITDEPTH - int64_t rdmult = 0; - switch (cpi->common.bit_depth) { - case VPX_BITS_8: - rdmult = 88 * q * q / 24; - break; - case VPX_BITS_10: - rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); - break; - case VPX_BITS_12: - rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); - break; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - int64_t rdmult = 88 * q * q / 24; -#endif // CONFIG_VP9_HIGHBITDEPTH - if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index]; - const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100)); - - rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7; - rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7); - } - if (rdmult < 1) - rdmult = 1; - return (int)rdmult; -} - -static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) { - double q; -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; - break; - case VPX_BITS_10: - q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; - break; - case VPX_BITS_12: - q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; - break; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - (void) bit_depth; - q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; -#endif // CONFIG_VP9_HIGHBITDEPTH - // TODO(debargha): Adjust the function below. - return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8); -} - -void vp10_initialize_me_consts(VP10_COMP *cpi, MACROBLOCK *x, int qindex) { -#if CONFIG_VP9_HIGHBITDEPTH - switch (cpi->common.bit_depth) { - case VPX_BITS_8: - x->sadperbit16 = sad_per_bit16lut_8[qindex]; - x->sadperbit4 = sad_per_bit4lut_8[qindex]; - break; - case VPX_BITS_10: - x->sadperbit16 = sad_per_bit16lut_10[qindex]; - x->sadperbit4 = sad_per_bit4lut_10[qindex]; - break; - case VPX_BITS_12: - x->sadperbit16 = sad_per_bit16lut_12[qindex]; - x->sadperbit4 = sad_per_bit4lut_12[qindex]; - break; - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - } -#else - (void)cpi; - x->sadperbit16 = sad_per_bit16lut_8[qindex]; - x->sadperbit4 = sad_per_bit4lut_8[qindex]; -#endif // CONFIG_VP9_HIGHBITDEPTH -} - -static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) { - int i, bsize, segment_id; - - for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) { - const int qindex = - clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) + - cm->y_dc_delta_q, 0, MAXQ); - const int q = compute_rd_thresh_factor(qindex, cm->bit_depth); - - for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) { - // Threshold here seems unnecessarily harsh but fine given actual - // range of values used for cpi->sf.thresh_mult[]. - const int t = q * rd_thresh_block_size_factor[bsize]; - const int thresh_max = INT_MAX / t; - - if (bsize >= BLOCK_8X8) { - for (i = 0; i < MAX_MODES; ++i) - rd->threshes[segment_id][bsize][i] = - rd->thresh_mult[i] < thresh_max - ? rd->thresh_mult[i] * t / 4 - : INT_MAX; - } else { - for (i = 0; i < MAX_REFS; ++i) - rd->threshes[segment_id][bsize][i] = - rd->thresh_mult_sub8x8[i] < thresh_max - ? rd->thresh_mult_sub8x8[i] * t / 4 - : INT_MAX; - } - } - } -} - -void vp10_initialize_rd_consts(VP10_COMP *cpi) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &cpi->td.mb; - RD_OPT *const rd = &cpi->rd; - int i; - - vpx_clear_system_state(); - - rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128). - rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q); - - x->errorperbit = rd->RDMULT / RD_MULT_EPB_RATIO; - x->errorperbit += (x->errorperbit == 0); - - x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL && - cm->frame_type != KEY_FRAME) ? 0 : 1; - - set_block_thresholds(cm, rd); - - fill_token_costs(x->token_costs, cm->fc->coef_probs); - - if (cpi->sf.partition_search_type != VAR_BASED_PARTITION || - cm->frame_type == KEY_FRAME) { - for (i = 0; i < PARTITION_CONTEXTS; ++i) - vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i], - vp10_partition_tree); - } - - fill_mode_costs(cpi); - - if (!frame_is_intra_only(cm)) { - vp10_build_nmv_cost_table(x->nmvjointcost, - cm->allow_high_precision_mv ? x->nmvcost_hp - : x->nmvcost, - &cm->fc->nmvc, cm->allow_high_precision_mv); - - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) - vp10_cost_tokens((int *)cpi->inter_mode_cost[i], - cm->fc->inter_mode_probs[i], vp10_inter_mode_tree); - } -} - -static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) { - // NOTE: The tables below must be of the same size. - - // The functions described below are sampled at the four most significant - // bits of x^2 + 8 / 256. - - // Normalized rate: - // This table models the rate for a Laplacian source with given variance - // when quantized with a uniform quantizer with given stepsize. The - // closed form expression is: - // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)], - // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance), - // and H(x) is the binary entropy function. - static const int rate_tab_q10[] = { - 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, - 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811, - 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186, - 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, - 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130, - 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651, - 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, - 1159, 1086, 1021, 963, 911, 864, 821, 781, - 745, 680, 623, 574, 530, 490, 455, 424, - 395, 345, 304, 269, 239, 213, 190, 171, - 154, 126, 104, 87, 73, 61, 52, 44, - 38, 28, 21, 16, 12, 10, 8, 6, - 5, 3, 2, 1, 1, 1, 0, 0, - }; - // Normalized distortion: - // This table models the normalized distortion for a Laplacian source - // with given variance when quantized with a uniform quantizer - // with given stepsize. The closed form expression is: - // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2)) - // where x = qpstep / sqrt(variance). - // Note the actual distortion is Dn * variance. - static const int dist_tab_q10[] = { - 0, 0, 1, 1, 1, 2, 2, 2, - 3, 3, 4, 5, 5, 6, 7, 7, - 8, 9, 11, 12, 13, 15, 16, 17, - 18, 21, 24, 26, 29, 31, 34, 36, - 39, 44, 49, 54, 59, 64, 69, 73, - 78, 88, 97, 106, 115, 124, 133, 142, - 151, 167, 184, 200, 215, 231, 245, 260, - 274, 301, 327, 351, 375, 397, 418, 439, - 458, 495, 528, 559, 587, 613, 637, 659, - 680, 717, 749, 777, 801, 823, 842, 859, - 874, 899, 919, 936, 949, 960, 969, 977, - 983, 994, 1001, 1006, 1010, 1013, 1015, 1017, - 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024, - }; - static const int xsq_iq_q10[] = { - 0, 4, 8, 12, 16, 20, 24, 28, - 32, 40, 48, 56, 64, 72, 80, 88, - 96, 112, 128, 144, 160, 176, 192, 208, - 224, 256, 288, 320, 352, 384, 416, 448, - 480, 544, 608, 672, 736, 800, 864, 928, - 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888, - 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808, - 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648, - 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328, - 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, - 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408, - 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848, - 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728, - }; - const int tmp = (xsq_q10 >> 2) + 8; - const int k = get_msb(tmp) - 3; - const int xq = (k << 3) + ((tmp >> k) & 0x7); - const int one_q10 = 1 << 10; - const int a_q10 = ((xsq_q10 - xsq_iq_q10[xq]) << 10) >> (2 + k); - const int b_q10 = one_q10 - a_q10; - *r_q10 = (rate_tab_q10[xq] * b_q10 + rate_tab_q10[xq + 1] * a_q10) >> 10; - *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10; -} - -void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2, - unsigned int qstep, int *rate, - int64_t *dist) { - // This function models the rate and distortion for a Laplacian - // source with given variance when quantized with a uniform quantizer - // with given stepsize. The closed form expressions are in: - // Hang and Chen, "Source Model for transform video coder and its - // application - Part I: Fundamental Theory", IEEE Trans. Circ. - // Sys. for Video Tech., April 1997. - if (var == 0) { - *rate = 0; - *dist = 0; - } else { - int d_q10, r_q10; - static const uint32_t MAX_XSQ_Q10 = 245727; - const uint64_t xsq_q10_64 = - (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var; - const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10); - model_rd_norm(xsq_q10, &r_q10, &d_q10); - *rate = ((r_q10 << n_log2) + 2) >> 2; - *dist = (var * (int64_t)d_q10 + 512) >> 10; - } -} - -void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, - const struct macroblockd_plane *pd, - ENTROPY_CONTEXT t_above[16], - ENTROPY_CONTEXT t_left[16]) { - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const ENTROPY_CONTEXT *const above = pd->above_context; - const ENTROPY_CONTEXT *const left = pd->left_context; - - int i; - switch (tx_size) { - case TX_4X4: - memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w); - memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h); - break; - case TX_8X8: - for (i = 0; i < num_4x4_w; i += 2) - t_above[i] = !!*(const uint16_t *)&above[i]; - for (i = 0; i < num_4x4_h; i += 2) - t_left[i] = !!*(const uint16_t *)&left[i]; - break; - case TX_16X16: - for (i = 0; i < num_4x4_w; i += 4) - t_above[i] = !!*(const uint32_t *)&above[i]; - for (i = 0; i < num_4x4_h; i += 4) - t_left[i] = !!*(const uint32_t *)&left[i]; - break; - case TX_32X32: - for (i = 0; i < num_4x4_w; i += 8) - t_above[i] = !!*(const uint64_t *)&above[i]; - for (i = 0; i < num_4x4_h; i += 8) - t_left[i] = !!*(const uint64_t *)&left[i]; - break; - default: - assert(0 && "Invalid transform size."); - break; - } -} - -void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, - uint8_t *ref_y_buffer, int ref_y_stride, - int ref_frame, BLOCK_SIZE block_size) { - int i; - int zero_seen = 0; - int best_index = 0; - int best_sad = INT_MAX; - int this_sad = INT_MAX; - int max_mv = 0; - int near_same_nearest; - uint8_t *src_y_ptr = x->plane[0].src.buf; - uint8_t *ref_y_ptr; - const int num_mv_refs = MAX_MV_REF_CANDIDATES + - (cpi->sf.adaptive_motion_search && - block_size < x->max_partition_size); - - MV pred_mv[3]; - pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv; - pred_mv[1] = x->mbmi_ext->ref_mvs[ref_frame][1].as_mv; - pred_mv[2] = x->pred_mv[ref_frame]; - assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0]))); - - near_same_nearest = - x->mbmi_ext->ref_mvs[ref_frame][0].as_int == - x->mbmi_ext->ref_mvs[ref_frame][1].as_int; - // Get the sad for each candidate reference mv. - for (i = 0; i < num_mv_refs; ++i) { - const MV *this_mv = &pred_mv[i]; - int fp_row, fp_col; - - if (i == 1 && near_same_nearest) - continue; - fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3; - fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3; - max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3); - - if (fp_row ==0 && fp_col == 0 && zero_seen) - continue; - zero_seen |= (fp_row ==0 && fp_col == 0); - - ref_y_ptr =&ref_y_buffer[ref_y_stride * fp_row + fp_col]; - // Find sad for current vector. - this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride, - ref_y_ptr, ref_y_stride); - // Note if it is the best so far. - if (this_sad < best_sad) { - best_sad = this_sad; - best_index = i; - } - } - - // Note the index of the mv that worked best in the reference list. - x->mv_best_ref_index[ref_frame] = best_index; - x->max_mv_context[ref_frame] = max_mv; - x->pred_mv_sad[ref_frame] = best_sad; -} - -void vp10_setup_pred_block(const MACROBLOCKD *xd, - struct buf_2d dst[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, - const struct scale_factors *scale, - const struct scale_factors *scale_uv) { - int i; - - dst[0].buf = src->y_buffer; - dst[0].stride = src->y_stride; - dst[1].buf = src->u_buffer; - dst[2].buf = src->v_buffer; - dst[1].stride = dst[2].stride = src->uv_stride; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col, - i ? scale_uv : scale, - xd->plane[i].subsampling_x, xd->plane[i].subsampling_y); - } -} - -int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, - int raster_block, int stride) { - const int bw = b_width_log2_lookup[plane_bsize]; - const int y = 4 * (raster_block >> bw); - const int x = 4 * (raster_block & ((1 << bw) - 1)); - return y * stride + x; -} - -int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize, - int raster_block, int16_t *base) { - const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - return base + vp10_raster_block_offset(plane_bsize, raster_block, stride); -} - -YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi, - int ref_frame) { - const VP10_COMMON *const cm = &cpi->common; - const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1]; - const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame); - return - (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) ? - &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL; -} - -int vp10_get_switchable_rate(const VP10_COMP *cpi, - const MACROBLOCKD *const xd) { - const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - const int ctx = vp10_get_pred_context_switchable_interp(xd); - return SWITCHABLE_INTERP_RATE_FACTOR * - cpi->switchable_interp_costs[ctx][mbmi->interp_filter]; -} - -void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) { - int i; - RD_OPT *const rd = &cpi->rd; - SPEED_FEATURES *const sf = &cpi->sf; - - // Set baseline threshold values. - for (i = 0; i < MAX_MODES; ++i) - rd->thresh_mult[i] = cpi->oxcf.mode == BEST ? -500 : 0; - - if (sf->adaptive_rd_thresh) { - rd->thresh_mult[THR_NEARESTMV] = 300; - rd->thresh_mult[THR_NEARESTG] = 300; - rd->thresh_mult[THR_NEARESTA] = 300; - } else { - rd->thresh_mult[THR_NEARESTMV] = 0; - rd->thresh_mult[THR_NEARESTG] = 0; - rd->thresh_mult[THR_NEARESTA] = 0; - } - - rd->thresh_mult[THR_DC] += 1000; - - rd->thresh_mult[THR_NEWMV] += 1000; - rd->thresh_mult[THR_NEWA] += 1000; - rd->thresh_mult[THR_NEWG] += 1000; - - rd->thresh_mult[THR_NEARMV] += 1000; - rd->thresh_mult[THR_NEARA] += 1000; - rd->thresh_mult[THR_COMP_NEARESTLA] += 1000; - rd->thresh_mult[THR_COMP_NEARESTGA] += 1000; - - rd->thresh_mult[THR_TM] += 1000; - - rd->thresh_mult[THR_COMP_NEARLA] += 1500; - rd->thresh_mult[THR_COMP_NEWLA] += 2000; - rd->thresh_mult[THR_NEARG] += 1000; - rd->thresh_mult[THR_COMP_NEARGA] += 1500; - rd->thresh_mult[THR_COMP_NEWGA] += 2000; - - rd->thresh_mult[THR_ZEROMV] += 2000; - rd->thresh_mult[THR_ZEROG] += 2000; - rd->thresh_mult[THR_ZEROA] += 2000; - rd->thresh_mult[THR_COMP_ZEROLA] += 2500; - rd->thresh_mult[THR_COMP_ZEROGA] += 2500; - - rd->thresh_mult[THR_H_PRED] += 2000; - rd->thresh_mult[THR_V_PRED] += 2000; - rd->thresh_mult[THR_D45_PRED ] += 2500; - rd->thresh_mult[THR_D135_PRED] += 2500; - rd->thresh_mult[THR_D117_PRED] += 2500; - rd->thresh_mult[THR_D153_PRED] += 2500; - rd->thresh_mult[THR_D207_PRED] += 2500; - rd->thresh_mult[THR_D63_PRED] += 2500; -} - -void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) { - static const int thresh_mult[2][MAX_REFS] = - {{2500, 2500, 2500, 4500, 4500, 2500}, - {2000, 2000, 2000, 4000, 4000, 2000}}; - RD_OPT *const rd = &cpi->rd; - const int idx = cpi->oxcf.mode == BEST; - memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx])); -} - -void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh, - int bsize, int best_mode_index) { - if (rd_thresh > 0) { - const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES; - int mode; - for (mode = 0; mode < top_mode; ++mode) { - const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4); - const BLOCK_SIZE max_size = VPXMIN(bsize + 2, BLOCK_64X64); - BLOCK_SIZE bs; - for (bs = min_size; bs <= max_size; ++bs) { - int *const fact = &factor_buf[bs][mode]; - if (mode == best_mode_index) { - *fact -= (*fact >> 4); - } else { - *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT); - } - } - } - } -} - -int vp10_get_intra_cost_penalty(int qindex, int qdelta, - vpx_bit_depth_t bit_depth) { - const int q = vp10_dc_quant(qindex, qdelta, bit_depth); -#if CONFIG_VP9_HIGHBITDEPTH - switch (bit_depth) { - case VPX_BITS_8: - return 20 * q; - case VPX_BITS_10: - return 5 * q; - case VPX_BITS_12: - return ROUND_POWER_OF_TWO(5 * q, 2); - default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); - return -1; - } -#else - return 20 * q; -#endif // CONFIG_VP9_HIGHBITDEPTH -} - diff --git a/libs/libvpx/vp10/encoder/rd.h b/libs/libvpx/vp10/encoder/rd.h deleted file mode 100644 index cd58bf84f2..0000000000 --- a/libs/libvpx/vp10/encoder/rd.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_RD_H_ -#define VP10_ENCODER_RD_H_ - -#include - -#include "vp10/common/blockd.h" - -#include "vp10/encoder/block.h" -#include "vp10/encoder/context_tree.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define RDDIV_BITS 7 - -#define RDCOST(RM, DM, R, D) \ - (((128 + ((int64_t)R) * (RM)) >> 8) + (D << DM)) -#define QIDX_SKIP_THRESH 115 - -#define MV_COST_WEIGHT 108 -#define MV_COST_WEIGHT_SUB 120 - -#define INVALID_MV 0x80008000 - -#define MAX_MODES 30 -#define MAX_REFS 6 - -#define RD_THRESH_MAX_FACT 64 -#define RD_THRESH_INC 1 - -// This enumerator type needs to be kept aligned with the mode order in -// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code. -typedef enum { - THR_NEARESTMV, - THR_NEARESTA, - THR_NEARESTG, - - THR_DC, - - THR_NEWMV, - THR_NEWA, - THR_NEWG, - - THR_NEARMV, - THR_NEARA, - THR_NEARG, - - THR_ZEROMV, - THR_ZEROG, - THR_ZEROA, - - THR_COMP_NEARESTLA, - THR_COMP_NEARESTGA, - - THR_TM, - - THR_COMP_NEARLA, - THR_COMP_NEWLA, - THR_COMP_NEARGA, - THR_COMP_NEWGA, - - THR_COMP_ZEROLA, - THR_COMP_ZEROGA, - - THR_H_PRED, - THR_V_PRED, - THR_D135_PRED, - THR_D207_PRED, - THR_D153_PRED, - THR_D63_PRED, - THR_D117_PRED, - THR_D45_PRED, -} THR_MODES; - -typedef enum { - THR_LAST, - THR_GOLD, - THR_ALTR, - THR_COMP_LA, - THR_COMP_GA, - THR_INTRA, -} THR_MODES_SUB8X8; - -typedef struct RD_OPT { - // Thresh_mult is used to set a threshold for the rd score. A higher value - // means that we will accept the best mode so far more often. This number - // is used in combination with the current block size, and thresh_freq_fact - // to pick a threshold. - int thresh_mult[MAX_MODES]; - int thresh_mult_sub8x8[MAX_REFS]; - - int threshes[MAX_SEGMENTS][BLOCK_SIZES][MAX_MODES]; - - int64_t prediction_type_threshes[MAX_REF_FRAMES][REFERENCE_MODES]; - - int64_t filter_threshes[MAX_REF_FRAMES][SWITCHABLE_FILTER_CONTEXTS]; - - int RDMULT; - int RDDIV; -} RD_OPT; - -typedef struct RD_COST { - int rate; - int64_t dist; - int64_t rdcost; -} RD_COST; - -// Reset the rate distortion cost values to maximum (invalid) value. -void vp10_rd_cost_reset(RD_COST *rd_cost); -// Initialize the rate distortion cost values to zero. -void vp10_rd_cost_init(RD_COST *rd_cost); - -struct TileInfo; -struct TileDataEnc; -struct VP10_COMP; -struct macroblock; - -int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex); - -void vp10_initialize_rd_consts(struct VP10_COMP *cpi); - -void vp10_initialize_me_consts(struct VP10_COMP *cpi, - MACROBLOCK *x, int qindex); - -void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n, - unsigned int qstep, int *rate, - int64_t *dist); - -int vp10_get_switchable_rate(const struct VP10_COMP *cpi, - const MACROBLOCKD *const xd); - -int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, - int raster_block, int stride); - -int16_t* vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize, - int raster_block, int16_t *base); - -YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi, - int ref_frame); - -void vp10_init_me_luts(void); - -void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, - const struct macroblockd_plane *pd, - ENTROPY_CONTEXT t_above[16], - ENTROPY_CONTEXT t_left[16]); - -void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi); - -void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi); - -void vp10_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh, - int bsize, int best_mode_index); - -static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh, - int thresh_fact) { - return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX; -} - -void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, - uint8_t *ref_y_buffer, int ref_y_stride, - int ref_frame, BLOCK_SIZE block_size); - -void vp10_setup_pred_block(const MACROBLOCKD *xd, - struct buf_2d dst[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, - const struct scale_factors *scale, - const struct scale_factors *scale_uv); - -int vp10_get_intra_cost_penalty(int qindex, int qdelta, - vpx_bit_depth_t bit_depth); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_RD_H_ diff --git a/libs/libvpx/vp10/encoder/rdopt.c b/libs/libvpx/vp10/encoder/rdopt.c deleted file mode 100644 index c62da964aa..0000000000 --- a/libs/libvpx/vp10/encoder/rdopt.c +++ /dev/null @@ -1,4310 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" - -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/system_state.h" - -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/common/entropymode.h" -#include "vp10/common/idct.h" -#include "vp10/common/mvref_common.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconinter.h" -#include "vp10/common/reconintra.h" -#include "vp10/common/scan.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/encodemb.h" -#include "vp10/encoder/encodemv.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/rd.h" -#include "vp10/encoder/rdopt.h" -#include "vp10/encoder/aq_variance.h" - -#define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \ - (1 << INTRA_FRAME)) -#define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \ - (1 << INTRA_FRAME)) -#define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \ - (1 << INTRA_FRAME)) - -#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01) - -#define MIN_EARLY_TERM_INDEX 3 -#define NEW_MV_DISCOUNT_FACTOR 8 - -const double ext_tx_th = 0.99; - -typedef struct { - PREDICTION_MODE mode; - MV_REFERENCE_FRAME ref_frame[2]; -} MODE_DEFINITION; - -typedef struct { - MV_REFERENCE_FRAME ref_frame[2]; -} REF_DEFINITION; - -struct rdcost_block_args { - MACROBLOCK *x; - ENTROPY_CONTEXT t_above[16]; - ENTROPY_CONTEXT t_left[16]; - int this_rate; - int64_t this_dist; - int64_t this_sse; - int64_t this_rd; - int64_t best_rd; - int exit_early; - int use_fast_coef_costing; - const scan_order *so; - uint8_t skippable; -}; - -#define LAST_NEW_MV_INDEX 6 -static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = { - {NEARESTMV, {LAST_FRAME, NONE}}, - {NEARESTMV, {ALTREF_FRAME, NONE}}, - {NEARESTMV, {GOLDEN_FRAME, NONE}}, - - {DC_PRED, {INTRA_FRAME, NONE}}, - - {NEWMV, {LAST_FRAME, NONE}}, - {NEWMV, {ALTREF_FRAME, NONE}}, - {NEWMV, {GOLDEN_FRAME, NONE}}, - - {NEARMV, {LAST_FRAME, NONE}}, - {NEARMV, {ALTREF_FRAME, NONE}}, - {NEARMV, {GOLDEN_FRAME, NONE}}, - - {ZEROMV, {LAST_FRAME, NONE}}, - {ZEROMV, {GOLDEN_FRAME, NONE}}, - {ZEROMV, {ALTREF_FRAME, NONE}}, - - {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}}, - - {TM_PRED, {INTRA_FRAME, NONE}}, - - {NEARMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEWMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}}, - {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}}, - - {ZEROMV, {LAST_FRAME, ALTREF_FRAME}}, - {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}}, - - {H_PRED, {INTRA_FRAME, NONE}}, - {V_PRED, {INTRA_FRAME, NONE}}, - {D135_PRED, {INTRA_FRAME, NONE}}, - {D207_PRED, {INTRA_FRAME, NONE}}, - {D153_PRED, {INTRA_FRAME, NONE}}, - {D63_PRED, {INTRA_FRAME, NONE}}, - {D117_PRED, {INTRA_FRAME, NONE}}, - {D45_PRED, {INTRA_FRAME, NONE}}, -}; - -static const REF_DEFINITION vp10_ref_order[MAX_REFS] = { - {{LAST_FRAME, NONE}}, - {{GOLDEN_FRAME, NONE}}, - {{ALTREF_FRAME, NONE}}, - {{LAST_FRAME, ALTREF_FRAME}}, - {{GOLDEN_FRAME, ALTREF_FRAME}}, - {{INTRA_FRAME, NONE}}, -}; - -static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, - int m, int n, int min_plane, int max_plane) { - int i; - - for (i = min_plane; i < max_plane; ++i) { - struct macroblock_plane *const p = &x->plane[i]; - struct macroblockd_plane *const pd = &x->e_mbd.plane[i]; - - p->coeff = ctx->coeff_pbuf[i][m]; - p->qcoeff = ctx->qcoeff_pbuf[i][m]; - pd->dqcoeff = ctx->dqcoeff_pbuf[i][m]; - p->eobs = ctx->eobs_pbuf[i][m]; - - ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n]; - ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n]; - ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n]; - ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n]; - - ctx->coeff_pbuf[i][n] = p->coeff; - ctx->qcoeff_pbuf[i][n] = p->qcoeff; - ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff; - ctx->eobs_pbuf[i][n] = p->eobs; - } -} - -static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, - MACROBLOCK *x, MACROBLOCKD *xd, - int *out_rate_sum, int64_t *out_dist_sum, - int *skip_txfm_sb, int64_t *skip_sse_sb) { - // Note our transform coeffs are 8 times an orthogonal transform. - // Hence quantizer step is also 8 times. To get effective quantizer - // we need to divide by 8 before sending to modeling function. - int i; - int64_t rate_sum = 0; - int64_t dist_sum = 0; - const int ref = xd->mi[0]->mbmi.ref_frame[0]; - unsigned int sse; - unsigned int var = 0; - unsigned int sum_sse = 0; - int64_t total_sse = 0; - int skip_flag = 1; - const int shift = 6; - int rate; - int64_t dist; - const int dequant_shift = -#if CONFIG_VP9_HIGHBITDEPTH - (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? - xd->bd - 5 : -#endif // CONFIG_VP9_HIGHBITDEPTH - 3; - - x->pred_sse[ref] = 0; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - struct macroblock_plane *const p = &x->plane[i]; - struct macroblockd_plane *const pd = &xd->plane[i]; - const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); - const TX_SIZE max_tx_size = max_txsize_lookup[bs]; - const BLOCK_SIZE unit_size = txsize_to_bsize[max_tx_size]; - const int64_t dc_thr = p->quant_thred[0] >> shift; - const int64_t ac_thr = p->quant_thred[1] >> shift; - // The low thresholds are used to measure if the prediction errors are - // low enough so that we can skip the mode search. - const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2); - const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2); - int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]); - int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]); - int idx, idy; - int lw = b_width_log2_lookup[unit_size] + 2; - int lh = b_height_log2_lookup[unit_size] + 2; - - sum_sse = 0; - - for (idy = 0; idy < bh; ++idy) { - for (idx = 0; idx < bw; ++idx) { - uint8_t *src = p->src.buf + (idy * p->src.stride << lh) + (idx << lw); - uint8_t *dst = pd->dst.buf + (idy * pd->dst.stride << lh) + (idx << lh); - int block_idx = (idy << 1) + idx; - int low_err_skip = 0; - - var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, - dst, pd->dst.stride, &sse); - x->bsse[(i << 2) + block_idx] = sse; - sum_sse += sse; - - x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_NONE; - if (!x->select_tx_size) { - // Check if all ac coefficients can be quantized to zero. - if (var < ac_thr || var == 0) { - x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_ONLY; - - // Check if dc coefficient can be quantized to zero. - if (sse - var < dc_thr || sse == var) { - x->skip_txfm[(i << 2) + block_idx] = SKIP_TXFM_AC_DC; - - if (!sse || (var < low_ac_thr && sse - var < low_dc_thr)) - low_err_skip = 1; - } - } - } - - if (skip_flag && !low_err_skip) - skip_flag = 0; - - if (i == 0) - x->pred_sse[ref] += sse; - } - } - - total_sse += sum_sse; - - // Fast approximate the modelling function. - if (cpi->sf.simple_model_rd_from_var) { - int64_t rate; - const int64_t square_error = sum_sse; - int quantizer = (pd->dequant[1] >> dequant_shift); - - if (quantizer < 120) - rate = (square_error * (280 - quantizer)) >> 8; - else - rate = 0; - dist = (square_error * quantizer) >> 8; - rate_sum += rate; - dist_sum += dist; - } else { - vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs], - pd->dequant[1] >> dequant_shift, - &rate, &dist); - rate_sum += rate; - dist_sum += dist; - } - } - - *skip_txfm_sb = skip_flag; - *skip_sse_sb = total_sse << 4; - *out_rate_sum = (int)rate_sum; - *out_dist_sum = dist_sum << 4; -} - -int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff, - intptr_t block_size, int64_t *ssz) { - int i; - int64_t error = 0, sqcoeff = 0; - - for (i = 0; i < block_size; i++) { - const int diff = coeff[i] - dqcoeff[i]; - error += diff * diff; - sqcoeff += coeff[i] * coeff[i]; - } - - *ssz = sqcoeff; - return error; -} - -int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff, - int block_size) { - int i; - int64_t error = 0; - - for (i = 0; i < block_size; i++) { - const int diff = coeff[i] - dqcoeff[i]; - error += diff * diff; - } - - return error; -} - -#if CONFIG_VP9_HIGHBITDEPTH -int64_t vp10_highbd_block_error_c(const tran_low_t *coeff, - const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz, int bd) { - int i; - int64_t error = 0, sqcoeff = 0; - int shift = 2 * (bd - 8); - int rounding = shift > 0 ? 1 << (shift - 1) : 0; - - for (i = 0; i < block_size; i++) { - const int64_t diff = coeff[i] - dqcoeff[i]; - error += diff * diff; - sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i]; - } - assert(error >= 0 && sqcoeff >= 0); - error = (error + rounding) >> shift; - sqcoeff = (sqcoeff + rounding) >> shift; - - *ssz = sqcoeff; - return error; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -/* The trailing '0' is a terminator which is used inside cost_coeffs() to - * decide whether to include cost of a trailing EOB node or not (i.e. we - * can skip this if the last coefficient in this transform block, e.g. the - * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block, - * were non-zero). */ -static const int16_t band_counts[TX_SIZES][8] = { - { 1, 2, 3, 4, 3, 16 - 13, 0 }, - { 1, 2, 3, 4, 11, 64 - 21, 0 }, - { 1, 2, 3, 4, 11, 256 - 21, 0 }, - { 1, 2, 3, 4, 11, 1024 - 21, 0 }, -}; -static int cost_coeffs(MACROBLOCK *x, - int plane, int block, - ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L, - TX_SIZE tx_size, - const int16_t *scan, const int16_t *nb, - int use_fast_coef_costing) { - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - const struct macroblock_plane *p = &x->plane[plane]; - const struct macroblockd_plane *pd = &xd->plane[plane]; - const PLANE_TYPE type = pd->plane_type; - const int16_t *band_count = &band_counts[tx_size][1]; - const int eob = p->eobs[block]; - const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] = - x->token_costs[tx_size][type][is_inter_block(mbmi)]; - uint8_t token_cache[32 * 32]; - int pt = combine_entropy_contexts(*A, *L); - int c, cost; -#if CONFIG_VP9_HIGHBITDEPTH - const int16_t *cat6_high_cost = vp10_get_high_cost_table(xd->bd); -#else - const int16_t *cat6_high_cost = vp10_get_high_cost_table(8); -#endif - - // Check for consistency of tx_size with mode info - assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size - : get_uv_tx_size(mbmi, pd) == tx_size); - - if (eob == 0) { - // single eob token - cost = token_costs[0][0][pt][EOB_TOKEN]; - c = 0; - } else { - int band_left = *band_count++; - - // dc token - int v = qcoeff[0]; - int16_t prev_t; - EXTRABIT e; - vp10_get_token_extra(v, &prev_t, &e); - cost = (*token_costs)[0][pt][prev_t] + - vp10_get_cost(prev_t, e, cat6_high_cost); - - token_cache[0] = vp10_pt_energy_class[prev_t]; - ++token_costs; - - // ac tokens - for (c = 1; c < eob; c++) { - const int rc = scan[c]; - int16_t t; - - v = qcoeff[rc]; - vp10_get_token_extra(v, &t, &e); - if (use_fast_coef_costing) { - cost += (*token_costs)[!prev_t][!prev_t][t] + - vp10_get_cost(t, e, cat6_high_cost); - } else { - pt = get_coef_context(nb, token_cache, c); - cost += (*token_costs)[!prev_t][pt][t] + - vp10_get_cost(t, e, cat6_high_cost); - token_cache[rc] = vp10_pt_energy_class[t]; - } - prev_t = t; - if (!--band_left) { - band_left = *band_count++; - ++token_costs; - } - } - - // eob token - if (band_left) { - if (use_fast_coef_costing) { - cost += (*token_costs)[0][!prev_t][EOB_TOKEN]; - } else { - pt = get_coef_context(nb, token_cache, c); - cost += (*token_costs)[0][pt][EOB_TOKEN]; - } - } - } - - // is eob first coefficient; - *A = *L = (c > 0); - - return cost; -} - -static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, - int64_t *out_dist, int64_t *out_sse) { - const int ss_txfrm_size = tx_size << 1; - MACROBLOCKD* const xd = &x->e_mbd; - const struct macroblock_plane *const p = &x->plane[plane]; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - int64_t this_sse; - int shift = tx_size == TX_32X32 ? 0 : 2; - tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); -#if CONFIG_VP9_HIGHBITDEPTH - const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8; - *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, - &this_sse, bd) >> shift; -#else - *out_dist = vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, - &this_sse) >> shift; -#endif // CONFIG_VP9_HIGHBITDEPTH - *out_sse = this_sse >> shift; -} - -static int rate_block(int plane, int block, int blk_row, int blk_col, - TX_SIZE tx_size, struct rdcost_block_args* args) { - return cost_coeffs(args->x, plane, block, args->t_above + blk_col, - args->t_left + blk_row, tx_size, - args->so->scan, args->so->neighbors, - args->use_fast_coef_costing); -} - -static void block_rd_txfm(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct rdcost_block_args *args = arg; - MACROBLOCK *const x = args->x; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - int64_t rd1, rd2, rd; - int rate; - int64_t dist; - int64_t sse; - - if (args->exit_early) - return; - - if (!is_inter_block(mbmi)) { - struct encode_b_args arg = {x, NULL, &mbmi->skip}; - vp10_encode_block_intra(plane, block, blk_row, blk_col, - plane_bsize, tx_size, &arg); - dist_block(x, plane, block, tx_size, &dist, &sse); - } else if (max_txsize_lookup[plane_bsize] == tx_size) { - if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] == - SKIP_TXFM_NONE) { - // full forward transform and quantization - vp10_xform_quant(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - dist_block(x, plane, block, tx_size, &dist, &sse); - } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] == - SKIP_TXFM_AC_ONLY) { - // compute DC coefficient - tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block); - vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, - plane_bsize, tx_size); - sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; - dist = sse; - if (x->plane[plane].eobs[block]) { - const int64_t orig_sse = (int64_t)coeff[0] * coeff[0]; - const int64_t resd_sse = coeff[0] - dqcoeff[0]; - int64_t dc_correct = orig_sse - resd_sse * resd_sse; -#if CONFIG_VP9_HIGHBITDEPTH - dc_correct >>= ((xd->bd - 8) * 2); -#endif - if (tx_size != TX_32X32) - dc_correct >>= 2; - - dist = VPXMAX(0, sse - dc_correct); - } - } else { - // SKIP_TXFM_AC_DC - // skip forward transform - x->plane[plane].eobs[block] = 0; - sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; - dist = sse; - } - } else { - // full forward transform and quantization - vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size); - dist_block(x, plane, block, tx_size, &dist, &sse); - } - - rd = RDCOST(x->rdmult, x->rddiv, 0, dist); - if (args->this_rd + rd > args->best_rd) { - args->exit_early = 1; - return; - } - - rate = rate_block(plane, block, blk_row, blk_col, tx_size, args); - rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist); - rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse); - - // TODO(jingning): temporarily enabled only for luma component - rd = VPXMIN(rd1, rd2); - if (plane == 0) - x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] || - (rd1 > rd2 && !xd->lossless[mbmi->segment_id]); - - args->this_rate += rate; - args->this_dist += dist; - args->this_sse += sse; - args->this_rd += rd; - - if (args->this_rd > args->best_rd) { - args->exit_early = 1; - return; - } - - args->skippable &= !x->plane[plane].eobs[block]; -} - -static void txfm_rd_in_plane(MACROBLOCK *x, - int *rate, int64_t *distortion, - int *skippable, int64_t *sse, - int64_t ref_best_rd, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size, - int use_fast_coef_casting) { - MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - TX_TYPE tx_type; - struct rdcost_block_args args; - vp10_zero(args); - args.x = x; - args.best_rd = ref_best_rd; - args.use_fast_coef_costing = use_fast_coef_casting; - args.skippable = 1; - - if (plane == 0) - xd->mi[0]->mbmi.tx_size = tx_size; - - vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left); - - tx_type = get_tx_type(pd->plane_type, xd, 0); - args.so = get_scan(tx_size, tx_type); - - vp10_foreach_transformed_block_in_plane(xd, bsize, plane, - block_rd_txfm, &args); - if (args.exit_early) { - *rate = INT_MAX; - *distortion = INT64_MAX; - *sse = INT64_MAX; - *skippable = 0; - } else { - *distortion = args.this_dist; - *rate = args.this_rate; - *sse = args.this_sse; - *skippable = args.skippable; - } -} - -static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, - int *rate, int64_t *distortion, - int *skip, int64_t *sse, - int64_t ref_best_rd, - BLOCK_SIZE bs) { - const TX_SIZE max_tx_size = max_txsize_lookup[bs]; - VP10_COMMON *const cm = &cpi->common; - const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode]; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - - TX_TYPE tx_type, best_tx_type = DCT_DCT; - int r, s; - int64_t d, psse, this_rd, best_rd = INT64_MAX; - vpx_prob skip_prob = vp10_get_skip_prob(cm, xd); - int s0 = vp10_cost_bit(skip_prob, 0); - int s1 = vp10_cost_bit(skip_prob, 1); - const int is_inter = is_inter_block(mbmi); - - mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size); - if (mbmi->tx_size < TX_32X32 && - !xd->lossless[mbmi->segment_id]) { - for (tx_type = 0; tx_type < TX_TYPES; ++tx_type) { - mbmi->tx_type = tx_type; - txfm_rd_in_plane(x, &r, &d, &s, - &psse, ref_best_rd, 0, bs, mbmi->tx_size, - cpi->sf.use_fast_coef_costing); - if (r == INT_MAX) - continue; - if (is_inter) - r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type]; - else - r += cpi->intra_tx_type_costs[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]] - [mbmi->tx_type]; - if (s) - this_rd = RDCOST(x->rdmult, x->rddiv, s1, psse); - else - this_rd = RDCOST(x->rdmult, x->rddiv, r + s0, d); - if (is_inter && !xd->lossless[mbmi->segment_id] && !s) - this_rd = VPXMIN(this_rd, RDCOST(x->rdmult, x->rddiv, s1, psse)); - - if (this_rd < ((best_tx_type == DCT_DCT) ? ext_tx_th : 1) * best_rd) { - best_rd = this_rd; - best_tx_type = mbmi->tx_type; - } - } - } - mbmi->tx_type = best_tx_type; - txfm_rd_in_plane(x, rate, distortion, skip, - sse, ref_best_rd, 0, bs, - mbmi->tx_size, cpi->sf.use_fast_coef_costing); - if (mbmi->tx_size < TX_32X32 && !xd->lossless[mbmi->segment_id] && - *rate != INT_MAX) { - if (is_inter) - *rate += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type]; - else - *rate += cpi->intra_tx_type_costs[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]] - [mbmi->tx_type]; - } -} - -static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, - int *rate, int64_t *distortion, - int *skip, int64_t *sse, - int64_t ref_best_rd, - BLOCK_SIZE bs) { - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - - mbmi->tx_size = TX_4X4; - - txfm_rd_in_plane(x, rate, distortion, skip, - sse, ref_best_rd, 0, bs, - mbmi->tx_size, cpi->sf.use_fast_coef_costing); -} - -static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, - int *rate, - int64_t *distortion, - int *skip, - int64_t *psse, - int64_t ref_best_rd, - BLOCK_SIZE bs) { - const TX_SIZE max_tx_size = max_txsize_lookup[bs]; - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - vpx_prob skip_prob = vp10_get_skip_prob(cm, xd); - int r, s; - int64_t d, sse; - int64_t rd = INT64_MAX; - int n, m; - int s0, s1; - int64_t best_rd = INT64_MAX, last_rd = INT64_MAX; - TX_SIZE best_tx = max_tx_size; - int start_tx, end_tx; - const int tx_select = cm->tx_mode == TX_MODE_SELECT; - TX_TYPE tx_type, best_tx_type = DCT_DCT; - const int is_inter = is_inter_block(mbmi); - - const vpx_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs); - assert(skip_prob > 0); - s0 = vp10_cost_bit(skip_prob, 0); - s1 = vp10_cost_bit(skip_prob, 1); - - if (tx_select) { - start_tx = max_tx_size; - end_tx = 0; - } else { - const TX_SIZE chosen_tx_size = - VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]); - start_tx = chosen_tx_size; - end_tx = chosen_tx_size; - } - - *distortion = INT64_MAX; - *rate = INT_MAX; - *skip = 0; - *psse = INT64_MAX; - - for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) { - last_rd = INT64_MAX; - for (n = start_tx; n >= end_tx; --n) { - int r_tx_size = 0; - for (m = 0; m <= n - (n == (int) max_tx_size); ++m) { - if (m == n) - r_tx_size += vp10_cost_zero(tx_probs[m]); - else - r_tx_size += vp10_cost_one(tx_probs[m]); - } - - if (n >= TX_32X32 && tx_type != DCT_DCT) { - continue; - } - mbmi->tx_type = tx_type; - txfm_rd_in_plane(x, &r, &d, &s, - &sse, ref_best_rd, 0, bs, n, - cpi->sf.use_fast_coef_costing); - if (n < TX_32X32 && - !xd->lossless[xd->mi[0]->mbmi.segment_id] && - r != INT_MAX) { - if (is_inter) - r += cpi->inter_tx_type_costs[mbmi->tx_size][mbmi->tx_type]; - else - r += cpi->intra_tx_type_costs[mbmi->tx_size] - [intra_mode_to_tx_type_context[mbmi->mode]] - [mbmi->tx_type]; - } - - if (r == INT_MAX) - continue; - - if (s) { - if (is_inter) { - rd = RDCOST(x->rdmult, x->rddiv, s1, sse); - } else { - rd = RDCOST(x->rdmult, x->rddiv, s1 + r_tx_size * tx_select, sse); - } - } else { - rd = RDCOST(x->rdmult, x->rddiv, r + s0 + r_tx_size * tx_select, d); - } - - if (tx_select && !(s && is_inter)) - r += r_tx_size; - - if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] && !s) - rd = VPXMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, sse)); - - // Early termination in transform size search. - if (cpi->sf.tx_size_search_breakout && - (rd == INT64_MAX || - (s == 1 && tx_type != DCT_DCT && n < start_tx) || - (n < (int) max_tx_size && rd > last_rd))) - break; - - last_rd = rd; - if (rd < - (is_inter && best_tx_type == DCT_DCT ? ext_tx_th : 1) * - best_rd) { - best_tx = n; - best_rd = rd; - *distortion = d; - *rate = r; - *skip = s; - *psse = sse; - best_tx_type = mbmi->tx_type; - } - } - } - - mbmi->tx_size = best_tx; - mbmi->tx_type = best_tx_type; - if (mbmi->tx_size >= TX_32X32) - assert(mbmi->tx_type == DCT_DCT); - txfm_rd_in_plane(x, &r, &d, &s, - &sse, ref_best_rd, 0, bs, best_tx, - cpi->sf.use_fast_coef_costing); -} - -static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate, - int64_t *distortion, int *skip, - int64_t *psse, BLOCK_SIZE bs, - int64_t ref_best_rd) { - MACROBLOCKD *xd = &x->e_mbd; - int64_t sse; - int64_t *ret_sse = psse ? psse : &sse; - - assert(bs == xd->mi[0]->mbmi.sb_type); - - if (CONFIG_MISC_FIXES && xd->lossless[0]) { - choose_smallest_tx_size(cpi, x, rate, distortion, skip, ret_sse, - ref_best_rd, bs); - } else if (cpi->sf.tx_size_search_method == USE_LARGESTALL || - xd->lossless[xd->mi[0]->mbmi.segment_id]) { - choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd, - bs); - } else { - choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, - ref_best_rd, bs); - } -} - -static int conditional_skipintra(PREDICTION_MODE mode, - PREDICTION_MODE best_intra_mode) { - if (mode == D117_PRED && - best_intra_mode != V_PRED && - best_intra_mode != D135_PRED) - return 1; - if (mode == D63_PRED && - best_intra_mode != V_PRED && - best_intra_mode != D45_PRED) - return 1; - if (mode == D207_PRED && - best_intra_mode != H_PRED && - best_intra_mode != D45_PRED) - return 1; - if (mode == D153_PRED && - best_intra_mode != H_PRED && - best_intra_mode != D135_PRED) - return 1; - return 0; -} - -static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, - int row, int col, - PREDICTION_MODE *best_mode, - const int *bmode_costs, - ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, - int *bestrate, int *bestratey, - int64_t *bestdistortion, - BLOCK_SIZE bsize, int64_t rd_thresh) { - PREDICTION_MODE mode; - MACROBLOCKD *const xd = &x->e_mbd; - int64_t best_rd = rd_thresh; - struct macroblock_plane *p = &x->plane[0]; - struct macroblockd_plane *pd = &xd->plane[0]; - const int src_stride = p->src.stride; - const int dst_stride = pd->dst.stride; - const uint8_t *src_init = &p->src.buf[row * 4 * src_stride + col * 4]; - uint8_t *dst_init = &pd->dst.buf[row * 4 * src_stride + col * 4]; - ENTROPY_CONTEXT ta[2], tempa[2]; - ENTROPY_CONTEXT tl[2], templ[2]; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - uint8_t best_dst[8 * 8]; -#if CONFIG_VP9_HIGHBITDEPTH - uint16_t best_dst16[8 * 8]; -#endif - - memcpy(ta, a, sizeof(ta)); - memcpy(tl, l, sizeof(tl)); - xd->mi[0]->mbmi.tx_size = TX_4X4; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - for (mode = DC_PRED; mode <= TM_PRED; ++mode) { - int64_t this_rd; - int ratey = 0; - int64_t distortion = 0; - int rate = bmode_costs[mode]; - - if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) - continue; - - // Only do the oblique modes if the best so far is - // one of the neighboring directional modes - if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(mode, *best_mode)) - continue; - } - - memcpy(tempa, ta, sizeof(ta)); - memcpy(templ, tl, sizeof(tl)); - - for (idy = 0; idy < num_4x4_blocks_high; ++idy) { - for (idx = 0; idx < num_4x4_blocks_wide; ++idx) { - const int block = (row + idy) * 2 + (col + idx); - const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride]; - uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride]; - int16_t *const src_diff = vp10_raster_block_offset_int16(BLOCK_8X8, - block, - p->src_diff); - tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block); - xd->mi[0]->bmi[block].as_mode = mode; - vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, - dst, dst_stride, - col + idx, row + idy, 0); - vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, - dst, dst_stride, xd->bd); - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block); - const scan_order *so = get_scan(TX_4X4, tx_type); - vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1); - vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); - if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) - goto next_highbd; - vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, p->eobs[block], - xd->bd, DCT_DCT, 1); - } else { - int64_t unused; - TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block); - const scan_order *so = get_scan(TX_4X4, tx_type); - vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0); - vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); - distortion += vp10_highbd_block_error( - coeff, BLOCK_OFFSET(pd->dqcoeff, block), - 16, &unused, xd->bd) >> 2; - if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) - goto next_highbd; - vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, p->eobs[block], - xd->bd, tx_type, 0); - } - } - } - - rate += ratey; - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - - if (this_rd < best_rd) { - *bestrate = rate; - *bestratey = ratey; - *bestdistortion = distortion; - best_rd = this_rd; - *best_mode = mode; - memcpy(a, tempa, sizeof(tempa)); - memcpy(l, templ, sizeof(templ)); - for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) { - memcpy(best_dst16 + idy * 8, - CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride), - num_4x4_blocks_wide * 4 * sizeof(uint16_t)); - } - } - next_highbd: - {} - } - if (best_rd >= rd_thresh) - return best_rd; - - for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) { - memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride), - best_dst16 + idy * 8, - num_4x4_blocks_wide * 4 * sizeof(uint16_t)); - } - - return best_rd; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - for (mode = DC_PRED; mode <= TM_PRED; ++mode) { - int64_t this_rd; - int ratey = 0; - int64_t distortion = 0; - int rate = bmode_costs[mode]; - - if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) - continue; - - // Only do the oblique modes if the best so far is - // one of the neighboring directional modes - if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(mode, *best_mode)) - continue; - } - - memcpy(tempa, ta, sizeof(ta)); - memcpy(templ, tl, sizeof(tl)); - - for (idy = 0; idy < num_4x4_blocks_high; ++idy) { - for (idx = 0; idx < num_4x4_blocks_wide; ++idx) { - const int block = (row + idy) * 2 + (col + idx); - const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride]; - uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride]; - int16_t *const src_diff = - vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff); - tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block); - xd->mi[0]->bmi[block].as_mode = mode; - vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, - dst, dst_stride, col + idx, row + idy, 0); - vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride); - - if (xd->lossless[xd->mi[0]->mbmi.segment_id]) { - TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block); - const scan_order *so = get_scan(TX_4X4, tx_type); - vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1); - vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); - if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) - goto next; - vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, p->eobs[block], DCT_DCT, 1); - } else { - int64_t unused; - TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block); - const scan_order *so = get_scan(TX_4X4, tx_type); - vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0); - vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); - distortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), - 16, &unused) >> 2; - if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) - goto next; - vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, p->eobs[block], tx_type, 0); - } - } - } - - rate += ratey; - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - - if (this_rd < best_rd) { - *bestrate = rate; - *bestratey = ratey; - *bestdistortion = distortion; - best_rd = this_rd; - *best_mode = mode; - memcpy(a, tempa, sizeof(tempa)); - memcpy(l, templ, sizeof(templ)); - for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) - memcpy(best_dst + idy * 8, dst_init + idy * dst_stride, - num_4x4_blocks_wide * 4); - } - next: - {} - } - - if (best_rd >= rd_thresh) - return best_rd; - - for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) - memcpy(dst_init + idy * dst_stride, best_dst + idy * 8, - num_4x4_blocks_wide * 4); - - return best_rd; -} - -static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb, - int *rate, int *rate_y, - int64_t *distortion, - int64_t best_rd) { - int i, j; - const MACROBLOCKD *const xd = &mb->e_mbd; - MODE_INFO *const mic = xd->mi[0]; - const MODE_INFO *above_mi = xd->above_mi; - const MODE_INFO *left_mi = xd->left_mi; - const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - int cost = 0; - int64_t total_distortion = 0; - int tot_rate_y = 0; - int64_t total_rd = 0; - ENTROPY_CONTEXT t_above[4], t_left[4]; - const int *bmode_costs = cpi->mbmode_cost; - - memcpy(t_above, xd->plane[0].above_context, sizeof(t_above)); - memcpy(t_left, xd->plane[0].left_context, sizeof(t_left)); - - // TODO(any): Add search of the tx_type to improve rd performance at the - // expense of speed. - mic->mbmi.tx_type = DCT_DCT; - - // Later we can add search of the tx_type to improve results. - // For now just set it to DCT_DCT - // Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block. - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { - PREDICTION_MODE best_mode = DC_PRED; - int r = INT_MAX, ry = INT_MAX; - int64_t d = INT64_MAX, this_rd = INT64_MAX; - i = idy * 2 + idx; - if (cpi->common.frame_type == KEY_FRAME) { - const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i); - const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i); - - bmode_costs = cpi->y_mode_costs[A][L]; - } - - this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode, - bmode_costs, t_above + idx, t_left + idy, - &r, &ry, &d, bsize, best_rd - total_rd); - if (this_rd >= best_rd - total_rd) - return INT64_MAX; - - total_rd += this_rd; - cost += r; - total_distortion += d; - tot_rate_y += ry; - - mic->bmi[i].as_mode = best_mode; - for (j = 1; j < num_4x4_blocks_high; ++j) - mic->bmi[i + j * 2].as_mode = best_mode; - for (j = 1; j < num_4x4_blocks_wide; ++j) - mic->bmi[i + j].as_mode = best_mode; - - if (total_rd >= best_rd) - return INT64_MAX; - } - } - - *rate = cost; - *rate_y = tot_rate_y; - *distortion = total_distortion; - mic->mbmi.mode = mic->bmi[3].as_mode; - - return RDCOST(mb->rdmult, mb->rddiv, cost, total_distortion); -} - -// This function is used only for intra_only frames -static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize, - int64_t best_rd) { - PREDICTION_MODE mode; - PREDICTION_MODE mode_selected = DC_PRED; - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *const mic = xd->mi[0]; - int this_rate, this_rate_tokenonly, s; - int64_t this_distortion, this_rd; - TX_SIZE best_tx = TX_4X4; - TX_TYPE best_tx_type = DCT_DCT; - int *bmode_costs; - const MODE_INFO *above_mi = xd->above_mi; - const MODE_INFO *left_mi = xd->left_mi; - const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0); - const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0); - bmode_costs = cpi->y_mode_costs[A][L]; - - memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); - - /* Y Search for intra prediction mode */ - for (mode = DC_PRED; mode <= TM_PRED; mode++) { - mic->mbmi.mode = mode; - - super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, - &s, NULL, bsize, best_rd); - - if (this_rate_tokenonly == INT_MAX) - continue; - - this_rate = this_rate_tokenonly + bmode_costs[mode]; - this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); - - if (this_rd < best_rd) { - mode_selected = mode; - best_rd = this_rd; - best_tx = mic->mbmi.tx_size; - best_tx_type = mic->mbmi.tx_type; - *rate = this_rate; - *rate_tokenonly = this_rate_tokenonly; - *distortion = this_distortion; - *skippable = s; - } - } - - mic->mbmi.mode = mode_selected; - mic->mbmi.tx_size = best_tx; - mic->mbmi.tx_type = best_tx_type; - - return best_rd; -} - -// Return value 0: early termination triggered, no valid rd cost available; -// 1: rd cost values are valid. -static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, - int *rate, int64_t *distortion, int *skippable, - int64_t *sse, BLOCK_SIZE bsize, - int64_t ref_best_rd) { - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - const TX_SIZE uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]); - int plane; - int pnrate = 0, pnskip = 1; - int64_t pndist = 0, pnsse = 0; - int is_cost_valid = 1; - - if (ref_best_rd < 0) - is_cost_valid = 0; - - if (is_inter_block(mbmi) && is_cost_valid) { - int plane; - for (plane = 1; plane < MAX_MB_PLANE; ++plane) - vp10_subtract_plane(x, bsize, plane); - } - - *rate = 0; - *distortion = 0; - *sse = 0; - *skippable = 1; - - for (plane = 1; plane < MAX_MB_PLANE; ++plane) { - txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse, - ref_best_rd, plane, bsize, uv_tx_size, - cpi->sf.use_fast_coef_costing); - if (pnrate == INT_MAX) { - is_cost_valid = 0; - break; - } - *rate += pnrate; - *distortion += pndist; - *sse += pnsse; - *skippable &= pnskip; - } - - if (!is_cost_valid) { - // reset cost value - *rate = INT_MAX; - *distortion = INT64_MAX; - *sse = INT64_MAX; - *skippable = 0; - } - - return is_cost_valid; -} - -static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x, - PICK_MODE_CONTEXT *ctx, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize, TX_SIZE max_tx_size) { - MACROBLOCKD *xd = &x->e_mbd; - PREDICTION_MODE mode; - PREDICTION_MODE mode_selected = DC_PRED; - int64_t best_rd = INT64_MAX, this_rd; - int this_rate_tokenonly, this_rate, s; - int64_t this_distortion, this_sse; - - memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); - for (mode = DC_PRED; mode <= TM_PRED; ++mode) { - if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) - continue; - - xd->mi[0]->mbmi.uv_mode = mode; - - if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, - &this_distortion, &s, &this_sse, bsize, best_rd)) - continue; - this_rate = this_rate_tokenonly + - cpi->intra_uv_mode_cost[xd->mi[0]->mbmi.mode][mode]; - this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); - - if (this_rd < best_rd) { - mode_selected = mode; - best_rd = this_rd; - *rate = this_rate; - *rate_tokenonly = this_rate_tokenonly; - *distortion = this_distortion; - *skippable = s; - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE); - } - } - - xd->mi[0]->mbmi.uv_mode = mode_selected; - return best_rd; -} - -static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize) { - int64_t unused; - - x->e_mbd.mi[0]->mbmi.uv_mode = DC_PRED; - memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); - super_block_uvrd(cpi, x, rate_tokenonly, distortion, - skippable, &unused, bsize, INT64_MAX); - *rate = *rate_tokenonly + - cpi->intra_uv_mode_cost[x->e_mbd.mi[0]->mbmi.mode][DC_PRED]; - return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); -} - -static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x, - PICK_MODE_CONTEXT *ctx, - BLOCK_SIZE bsize, TX_SIZE max_tx_size, - int *rate_uv, int *rate_uv_tokenonly, - int64_t *dist_uv, int *skip_uv, - PREDICTION_MODE *mode_uv) { - // Use an estimated rd for uv_intra based on DC_PRED if the - // appropriate speed flag is set. - if (cpi->sf.use_uv_intra_rd_estimate) { - rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, - skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); - // Else do a proper rd search for each possible transform size that may - // be considered in the main rd loop. - } else { - rd_pick_intra_sbuv_mode(cpi, x, ctx, - rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size); - } - *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode; -} - -static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode, - int mode_context) { - assert(is_inter_mode(mode)); - return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)]; -} - -static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, - int i, - PREDICTION_MODE mode, int_mv this_mv[2], - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], - int_mv seg_mvs[MAX_REF_FRAMES], - int_mv *best_ref_mv[2], const int *mvjcost, - int *mvcost[2]) { - MODE_INFO *const mic = xd->mi[0]; - const MB_MODE_INFO *const mbmi = &mic->mbmi; - const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - int thismvcost = 0; - int idx, idy; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type]; - const int is_compound = has_second_ref(mbmi); - - switch (mode) { - case NEWMV: - this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int; - thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv, - mvjcost, mvcost, MV_COST_WEIGHT_SUB); - if (is_compound) { - this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int; - thismvcost += vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, - mvjcost, mvcost, MV_COST_WEIGHT_SUB); - } - break; - case NEARMV: - case NEARESTMV: - this_mv[0].as_int = frame_mv[mode][mbmi->ref_frame[0]].as_int; - if (is_compound) - this_mv[1].as_int = frame_mv[mode][mbmi->ref_frame[1]].as_int; - break; - case ZEROMV: - this_mv[0].as_int = 0; - if (is_compound) - this_mv[1].as_int = 0; - break; - default: - break; - } - - mic->bmi[i].as_mv[0].as_int = this_mv[0].as_int; - if (is_compound) - mic->bmi[i].as_mv[1].as_int = this_mv[1].as_int; - - mic->bmi[i].as_mode = mode; - - for (idy = 0; idy < num_4x4_blocks_high; ++idy) - for (idx = 0; idx < num_4x4_blocks_wide; ++idx) - memmove(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i])); - - return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mbmi->ref_frame[0]]) + - thismvcost; -} - -static int64_t encode_inter_mb_segment(VP10_COMP *cpi, - MACROBLOCK *x, - int64_t best_yrd, - int i, - int *labelyrate, - int64_t *distortion, int64_t *sse, - ENTROPY_CONTEXT *ta, - ENTROPY_CONTEXT *tl, - int ir, int ic, - int mi_row, int mi_col) { - int k; - MACROBLOCKD *xd = &x->e_mbd; - struct macroblockd_plane *const pd = &xd->plane[0]; - struct macroblock_plane *const p = &x->plane[0]; - MODE_INFO *const mi = xd->mi[0]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(mi->mbmi.sb_type, pd); - const int width = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize]; - int idx, idy; - void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride); - - const uint8_t *const src = - &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)]; - uint8_t *const dst = &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, - pd->dst.stride)]; - int64_t thisdistortion = 0, thissse = 0; - int thisrate = 0; - TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i); - const scan_order *so = get_scan(TX_4X4, tx_type); - - vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col); - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_highbd_fwht4x4 - : vpx_highbd_fdct4x4; - } else { - fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : vpx_fdct4x4; - } -#else - fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : vpx_fdct4x4; -#endif // CONFIG_VP9_HIGHBITDEPTH - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_subtract_block( - height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), - 8, src, p->src.stride, dst, pd->dst.stride, xd->bd); - } else { - vpx_subtract_block( - height, width, vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), - 8, src, p->src.stride, dst, pd->dst.stride); - } -#else - vpx_subtract_block(height, width, - vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), - 8, src, p->src.stride, dst, pd->dst.stride); -#endif // CONFIG_VP9_HIGHBITDEPTH - - k = i; - for (idy = 0; idy < height / 4; ++idy) { - for (idx = 0; idx < width / 4; ++idx) { - int64_t ssz, rd, rd1, rd2; - tran_low_t* coeff; - - k += (idy * 2 + idx); - coeff = BLOCK_OFFSET(p->coeff, k); - fwd_txm4x4(vp10_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff), - coeff, 8); - vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan); -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - thisdistortion += vp10_highbd_block_error(coeff, - BLOCK_OFFSET(pd->dqcoeff, k), - 16, &ssz, xd->bd); - } else { - thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), - 16, &ssz); - } -#else - thisdistortion += vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), - 16, &ssz); -#endif // CONFIG_VP9_HIGHBITDEPTH - thissse += ssz; - thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); - rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2); - rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2); - rd = VPXMIN(rd1, rd2); - if (rd >= best_yrd) - return INT64_MAX; - } - } - - *distortion = thisdistortion >> 2; - *labelyrate = thisrate; - *sse = thissse >> 2; - - return RDCOST(x->rdmult, x->rddiv, *labelyrate, *distortion); -} - -typedef struct { - int eobs; - int brate; - int byrate; - int64_t bdist; - int64_t bsse; - int64_t brdcost; - int_mv mvs[2]; - ENTROPY_CONTEXT ta[2]; - ENTROPY_CONTEXT tl[2]; -} SEG_RDSTAT; - -typedef struct { - int_mv *ref_mv[2]; - int_mv mvp; - - int64_t segment_rd; - int r; - int64_t d; - int64_t sse; - int segment_yrate; - PREDICTION_MODE modes[4]; - SEG_RDSTAT rdstat[4][INTER_MODES]; - int mvthresh; -} BEST_SEG_INFO; - -static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) { - return (mv->row >> 3) < x->mv_row_min || - (mv->row >> 3) > x->mv_row_max || - (mv->col >> 3) < x->mv_col_min || - (mv->col >> 3) > x->mv_col_max; -} - -static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { - MB_MODE_INFO *const mbmi = &x->e_mbd.mi[0]->mbmi; - struct macroblock_plane *const p = &x->plane[0]; - struct macroblockd_plane *const pd = &x->e_mbd.plane[0]; - - p->src.buf = &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, - p->src.stride)]; - assert(((intptr_t)pd->pre[0].buf & 0x7) == 0); - pd->pre[0].buf = &pd->pre[0].buf[vp10_raster_block_offset(BLOCK_8X8, i, - pd->pre[0].stride)]; - if (has_second_ref(mbmi)) - pd->pre[1].buf = &pd->pre[1].buf[vp10_raster_block_offset(BLOCK_8X8, i, - pd->pre[1].stride)]; -} - -static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, - struct buf_2d orig_pre[2]) { - MB_MODE_INFO *mbmi = &x->e_mbd.mi[0]->mbmi; - x->plane[0].src = orig_src; - x->e_mbd.plane[0].pre[0] = orig_pre[0]; - if (has_second_ref(mbmi)) - x->e_mbd.plane[0].pre[1] = orig_pre[1]; -} - -static INLINE int mv_has_subpel(const MV *mv) { - return (mv->row & 0x0F) || (mv->col & 0x0F); -} - -// Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion. -// TODO(aconverse): Find out if this is still productive then clean up or remove -static int check_best_zero_mv( - const VP10_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES], - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode, - const MV_REFERENCE_FRAME ref_frames[2]) { - if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) && - frame_mv[this_mode][ref_frames[0]].as_int == 0 && - (ref_frames[1] == NONE || - frame_mv[this_mode][ref_frames[1]].as_int == 0)) { - int rfc = mode_context[ref_frames[0]]; - int c1 = cost_mv_ref(cpi, NEARMV, rfc); - int c2 = cost_mv_ref(cpi, NEARESTMV, rfc); - int c3 = cost_mv_ref(cpi, ZEROMV, rfc); - - if (this_mode == NEARMV) { - if (c1 > c3) return 0; - } else if (this_mode == NEARESTMV) { - if (c2 > c3) return 0; - } else { - assert(this_mode == ZEROMV); - if (ref_frames[1] == NONE) { - if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0) || - (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0)) - return 0; - } else { - if ((c3 >= c2 && frame_mv[NEARESTMV][ref_frames[0]].as_int == 0 && - frame_mv[NEARESTMV][ref_frames[1]].as_int == 0) || - (c3 >= c1 && frame_mv[NEARMV][ref_frames[0]].as_int == 0 && - frame_mv[NEARMV][ref_frames[1]].as_int == 0)) - return 0; - } - } - } - return 1; -} - -static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int_mv *frame_mv, - int mi_row, int mi_col, - int_mv single_newmv[MAX_REF_FRAMES], - int *rate_mv) { - const VP10_COMMON *const cm = &cpi->common; - const int pw = 4 * num_4x4_blocks_wide_lookup[bsize]; - const int ph = 4 * num_4x4_blocks_high_lookup[bsize]; - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - const int refs[2] = {mbmi->ref_frame[0], - mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]}; - int_mv ref_mv[2]; - int ite, ref; - const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter]; - struct scale_factors sf; - - // Do joint motion search in compound mode to get more accurate mv. - struct buf_2d backup_yv12[2][MAX_MB_PLANE]; - int last_besterr[2] = {INT_MAX, INT_MAX}; - const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = { - vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]), - vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1]) - }; - - // Prediction buffer from second frame. -#if CONFIG_VP9_HIGHBITDEPTH - DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]); - uint8_t *second_pred; -#else - DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]); -#endif // CONFIG_VP9_HIGHBITDEPTH - - for (ref = 0; ref < 2; ++ref) { - ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0]; - - if (scaled_ref_frame[ref]) { - int i; - // Swap out the reference frame for a version that's been scaled to - // match the resolution of the current frame, allowing the existing - // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[ref][i] = xd->plane[i].pre[ref]; - vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col, - NULL); - } - - frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int; - } - - // Since we have scaled the reference frames to match the size of the current - // frame we must use a unit scaling factor during mode selection. -#if CONFIG_VP9_HIGHBITDEPTH - vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, - cm->width, cm->height, - cm->use_highbitdepth); -#else - vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, - cm->width, cm->height); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Allow joint search multiple times iteratively for each reference frame - // and break out of the search loop if it couldn't find a better mv. - for (ite = 0; ite < 4; ite++) { - struct buf_2d ref_yv12[2]; - int bestsme = INT_MAX; - int sadpb = x->sadperbit16; - MV tmp_mv; - int search_range = 3; - - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; - int id = ite % 2; // Even iterations search in the first reference frame, - // odd iterations search in the second. The predictor - // found for the 'other' reference frame is factored in. - - // Initialized here because of compiler problem in Visual Studio. - ref_yv12[0] = xd->plane[0].pre[0]; - ref_yv12[1] = xd->plane[0].pre[1]; - - // Get the prediction block from the 'other' reference frame. -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16); - vp10_highbd_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE, mi_row * MI_SIZE, - xd->bd); - } else { - second_pred = (uint8_t *)second_pred_alloc_16; - vp10_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE, mi_row * MI_SIZE); - } -#else - vp10_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE, mi_row * MI_SIZE); -#endif // CONFIG_VP9_HIGHBITDEPTH - - // Do compound motion search on the current reference frame. - if (id) - xd->plane[0].pre[0] = ref_yv12[id]; - vp10_set_mv_search_range(x, &ref_mv[id].as_mv); - - // Use the mv result from the single mode as mv predictor. - tmp_mv = frame_mv[refs[id]].as_mv; - - tmp_mv.col >>= 3; - tmp_mv.row >>= 3; - - // Small-range full-pixel motion search. - bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb, - search_range, - &cpi->fn_ptr[bsize], - &ref_mv[id].as_mv, second_pred); - if (bestsme < INT_MAX) - bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv, - second_pred, &cpi->fn_ptr[bsize], 1); - - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - - if (bestsme < INT_MAX) { - int dis; /* TODO: use dis in distortion calculation later. */ - unsigned int sse; - bestsme = cpi->find_fractional_mv_step( - x, &tmp_mv, - &ref_mv[id].as_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - 0, cpi->sf.mv.subpel_iters_per_step, - NULL, - x->nmvjointcost, x->mvcost, - &dis, &sse, second_pred, - pw, ph); - } - - // Restore the pointer to the first (possibly scaled) prediction buffer. - if (id) - xd->plane[0].pre[0] = ref_yv12[0]; - - if (bestsme < last_besterr[id]) { - frame_mv[refs[id]].as_mv = tmp_mv; - last_besterr[id] = bestsme; - } else { - break; - } - } - - *rate_mv = 0; - - for (ref = 0; ref < 2; ++ref) { - if (scaled_ref_frame[ref]) { - // Restore the prediction frame pointers to their unscaled versions. - int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[ref] = backup_yv12[ref][i]; - } - - *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv, - &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - } -} - -static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x, - int_mv *best_ref_mv, - int_mv *second_best_ref_mv, - int64_t best_rd, int *returntotrate, - int *returnyrate, - int64_t *returndistortion, - int *skippable, int64_t *psse, - int mvthresh, - int_mv seg_mvs[4][MAX_REF_FRAMES], - BEST_SEG_INFO *bsi_buf, int filter_idx, - int mi_row, int mi_col) { - int i; - BEST_SEG_INFO *bsi = bsi_buf + filter_idx; - MACROBLOCKD *xd = &x->e_mbd; - MODE_INFO *mi = xd->mi[0]; - MB_MODE_INFO *mbmi = &mi->mbmi; - int mode_idx; - int k, br = 0, idx, idy; - int64_t bd = 0, block_sse = 0; - PREDICTION_MODE this_mode; - VP10_COMMON *cm = &cpi->common; - struct macroblock_plane *const p = &x->plane[0]; - struct macroblockd_plane *const pd = &xd->plane[0]; - const int label_count = 4; - int64_t this_segment_rd = 0; - int label_mv_thresh; - int segmentyrate = 0; - const BLOCK_SIZE bsize = mbmi->sb_type; - const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; - ENTROPY_CONTEXT t_above[2], t_left[2]; - int subpelmv = 1, have_ref = 0; - const int has_second_rf = has_second_ref(mbmi); - const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize]; - MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - - vp10_zero(*bsi); - - bsi->segment_rd = best_rd; - bsi->ref_mv[0] = best_ref_mv; - bsi->ref_mv[1] = second_best_ref_mv; - bsi->mvp.as_int = best_ref_mv->as_int; - bsi->mvthresh = mvthresh; - - for (i = 0; i < 4; i++) - bsi->modes[i] = ZEROMV; - - memcpy(t_above, pd->above_context, sizeof(t_above)); - memcpy(t_left, pd->left_context, sizeof(t_left)); - - // 64 makes this threshold really big effectively - // making it so that we very rarely check mvs on - // segments. setting this to 1 would make mv thresh - // roughly equal to what it is for macroblocks - label_mv_thresh = 1 * bsi->mvthresh / label_count; - - // Segmentation method overheads - for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { - for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { - // TODO(jingning,rbultje): rewrite the rate-distortion optimization - // loop for 4x4/4x8/8x4 block coding. to be replaced with new rd loop - int_mv mode_mv[MB_MODE_COUNT][2]; - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; - PREDICTION_MODE mode_selected = ZEROMV; - int64_t best_rd = INT64_MAX; - const int i = idy * 2 + idx; - int ref; - - for (ref = 0; ref < 1 + has_second_rf; ++ref) { - const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; - frame_mv[ZEROMV][frame].as_int = 0; - vp10_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col, - &frame_mv[NEARESTMV][frame], - &frame_mv[NEARMV][frame], - mbmi_ext->mode_context); - } - - // search for the best motion vector on this segment - for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { - const struct buf_2d orig_src = x->plane[0].src; - struct buf_2d orig_pre[2]; - - mode_idx = INTER_OFFSET(this_mode); - bsi->rdstat[i][mode_idx].brdcost = INT64_MAX; - if (!(inter_mode_mask & (1 << this_mode))) - continue; - - if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, - this_mode, mbmi->ref_frame)) - continue; - - memcpy(orig_pre, pd->pre, sizeof(orig_pre)); - memcpy(bsi->rdstat[i][mode_idx].ta, t_above, - sizeof(bsi->rdstat[i][mode_idx].ta)); - memcpy(bsi->rdstat[i][mode_idx].tl, t_left, - sizeof(bsi->rdstat[i][mode_idx].tl)); - - // motion search for newmv (single predictor case only) - if (!has_second_rf && this_mode == NEWMV && - seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) { - MV *const new_mv = &mode_mv[NEWMV][0].as_mv; - int step_param = 0; - int bestsme = INT_MAX; - int sadpb = x->sadperbit4; - MV mvp_full; - int max_mv; - int cost_list[5]; - - /* Is the best so far sufficiently good that we cant justify doing - * and new motion search. */ - if (best_rd < label_mv_thresh) - break; - - if (cpi->oxcf.mode != BEST) { - // use previous block's result as next block's MV predictor. - if (i > 0) { - bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int; - if (i == 2) - bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int; - } - } - if (i == 0) - max_mv = x->max_mv_context[mbmi->ref_frame[0]]; - else - max_mv = - VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; - - if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { - // Take wtd average of the step_params based on the last frame's - // max mv magnitude and the best ref mvs of the current block for - // the given reference. - step_param = (vp10_init_search_range(max_mv) + - cpi->mv_step_param) / 2; - } else { - step_param = cpi->mv_step_param; - } - - mvp_full.row = bsi->mvp.as_mv.row >> 3; - mvp_full.col = bsi->mvp.as_mv.col >> 3; - - if (cpi->sf.adaptive_motion_search) { - mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3; - mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3; - step_param = VPXMAX(step_param, 8); - } - - // adjust src pointer for this block - mi_buf_shift(x, i); - - vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv); - - bestsme = vp10_full_pixel_search( - cpi, x, bsize, &mvp_full, step_param, sadpb, - cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL, - &bsi->ref_mv[0]->as_mv, new_mv, - INT_MAX, 1); - - if (bestsme < INT_MAX) { - int distortion; - cpi->find_fractional_mv_step( - x, - new_mv, - &bsi->ref_mv[0]->as_mv, - cm->allow_high_precision_mv, - x->errorperbit, &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &distortion, - &x->pred_sse[mbmi->ref_frame[0]], - NULL, 0, 0); - - // save motion search result for use in compound prediction - seg_mvs[i][mbmi->ref_frame[0]].as_mv = *new_mv; - } - - if (cpi->sf.adaptive_motion_search) - x->pred_mv[mbmi->ref_frame[0]] = *new_mv; - - // restore src pointers - mi_buf_restore(x, orig_src, orig_pre); - } - - if (has_second_rf) { - if (seg_mvs[i][mbmi->ref_frame[1]].as_int == INVALID_MV || - seg_mvs[i][mbmi->ref_frame[0]].as_int == INVALID_MV) - continue; - } - - if (has_second_rf && this_mode == NEWMV && - mbmi->interp_filter == EIGHTTAP) { - // adjust src pointers - mi_buf_shift(x, i); - if (cpi->sf.comp_inter_joint_search_thresh <= bsize) { - int rate_mv; - joint_motion_search(cpi, x, bsize, frame_mv[this_mode], - mi_row, mi_col, seg_mvs[i], - &rate_mv); - seg_mvs[i][mbmi->ref_frame[0]].as_int = - frame_mv[this_mode][mbmi->ref_frame[0]].as_int; - seg_mvs[i][mbmi->ref_frame[1]].as_int = - frame_mv[this_mode][mbmi->ref_frame[1]].as_int; - } - // restore src pointers - mi_buf_restore(x, orig_src, orig_pre); - } - - bsi->rdstat[i][mode_idx].brate = - set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode], - frame_mv, seg_mvs[i], bsi->ref_mv, - x->nmvjointcost, x->mvcost); - - for (ref = 0; ref < 1 + has_second_rf; ++ref) { - bsi->rdstat[i][mode_idx].mvs[ref].as_int = - mode_mv[this_mode][ref].as_int; - if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].mvs[ref].as_int = - mode_mv[this_mode][ref].as_int; - if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].mvs[ref].as_int = - mode_mv[this_mode][ref].as_int; - } - - // Trap vectors that reach beyond the UMV borders - if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) || - (has_second_rf && - mv_check_bounds(x, &mode_mv[this_mode][1].as_mv))) - continue; - - if (filter_idx > 0) { - BEST_SEG_INFO *ref_bsi = bsi_buf; - subpelmv = 0; - have_ref = 1; - - for (ref = 0; ref < 1 + has_second_rf; ++ref) { - subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv); - have_ref &= mode_mv[this_mode][ref].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; - } - - if (filter_idx > 1 && !subpelmv && !have_ref) { - ref_bsi = bsi_buf + 1; - have_ref = 1; - for (ref = 0; ref < 1 + has_second_rf; ++ref) - have_ref &= mode_mv[this_mode][ref].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; - } - - if (!subpelmv && have_ref && - ref_bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) { - memcpy(&bsi->rdstat[i][mode_idx], &ref_bsi->rdstat[i][mode_idx], - sizeof(SEG_RDSTAT)); - if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].eobs = - ref_bsi->rdstat[i + 1][mode_idx].eobs; - if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].eobs = - ref_bsi->rdstat[i + 2][mode_idx].eobs; - - if (bsi->rdstat[i][mode_idx].brdcost < best_rd) { - mode_selected = this_mode; - best_rd = bsi->rdstat[i][mode_idx].brdcost; - } - continue; - } - } - - bsi->rdstat[i][mode_idx].brdcost = - encode_inter_mb_segment(cpi, x, - bsi->segment_rd - this_segment_rd, i, - &bsi->rdstat[i][mode_idx].byrate, - &bsi->rdstat[i][mode_idx].bdist, - &bsi->rdstat[i][mode_idx].bsse, - bsi->rdstat[i][mode_idx].ta, - bsi->rdstat[i][mode_idx].tl, - idy, idx, - mi_row, mi_col); - if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) { - bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv, - bsi->rdstat[i][mode_idx].brate, 0); - bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate; - bsi->rdstat[i][mode_idx].eobs = p->eobs[i]; - if (num_4x4_blocks_wide > 1) - bsi->rdstat[i + 1][mode_idx].eobs = p->eobs[i + 1]; - if (num_4x4_blocks_high > 1) - bsi->rdstat[i + 2][mode_idx].eobs = p->eobs[i + 2]; - } - - if (bsi->rdstat[i][mode_idx].brdcost < best_rd) { - mode_selected = this_mode; - best_rd = bsi->rdstat[i][mode_idx].brdcost; - } - } /*for each 4x4 mode*/ - - if (best_rd == INT64_MAX) { - int iy, midx; - for (iy = i + 1; iy < 4; ++iy) - for (midx = 0; midx < INTER_MODES; ++midx) - bsi->rdstat[iy][midx].brdcost = INT64_MAX; - bsi->segment_rd = INT64_MAX; - return INT64_MAX; - } - - mode_idx = INTER_OFFSET(mode_selected); - memcpy(t_above, bsi->rdstat[i][mode_idx].ta, sizeof(t_above)); - memcpy(t_left, bsi->rdstat[i][mode_idx].tl, sizeof(t_left)); - - set_and_cost_bmi_mvs(cpi, x, xd, i, mode_selected, mode_mv[mode_selected], - frame_mv, seg_mvs[i], bsi->ref_mv, x->nmvjointcost, - x->mvcost); - - br += bsi->rdstat[i][mode_idx].brate; - bd += bsi->rdstat[i][mode_idx].bdist; - block_sse += bsi->rdstat[i][mode_idx].bsse; - segmentyrate += bsi->rdstat[i][mode_idx].byrate; - this_segment_rd += bsi->rdstat[i][mode_idx].brdcost; - - if (this_segment_rd > bsi->segment_rd) { - int iy, midx; - for (iy = i + 1; iy < 4; ++iy) - for (midx = 0; midx < INTER_MODES; ++midx) - bsi->rdstat[iy][midx].brdcost = INT64_MAX; - bsi->segment_rd = INT64_MAX; - return INT64_MAX; - } - } - } /* for each label */ - - bsi->r = br; - bsi->d = bd; - bsi->segment_yrate = segmentyrate; - bsi->segment_rd = this_segment_rd; - bsi->sse = block_sse; - - // update the coding decisions - for (k = 0; k < 4; ++k) - bsi->modes[k] = mi->bmi[k].as_mode; - - if (bsi->segment_rd > best_rd) - return INT64_MAX; - /* set it to the best */ - for (i = 0; i < 4; i++) { - mode_idx = INTER_OFFSET(bsi->modes[i]); - mi->bmi[i].as_mv[0].as_int = bsi->rdstat[i][mode_idx].mvs[0].as_int; - if (has_second_ref(mbmi)) - mi->bmi[i].as_mv[1].as_int = bsi->rdstat[i][mode_idx].mvs[1].as_int; - x->plane[0].eobs[i] = bsi->rdstat[i][mode_idx].eobs; - mi->bmi[i].as_mode = bsi->modes[i]; - } - - /* - * used to set mbmi->mv.as_int - */ - *returntotrate = bsi->r; - *returndistortion = bsi->d; - *returnyrate = bsi->segment_yrate; - *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0); - *psse = bsi->sse; - mbmi->mode = bsi->modes[3]; - - return bsi->segment_rd; -} - -static void estimate_ref_frame_costs(const VP10_COMMON *cm, - const MACROBLOCKD *xd, - int segment_id, - unsigned int *ref_costs_single, - unsigned int *ref_costs_comp, - vpx_prob *comp_mode_p) { - int seg_ref_active = segfeature_active(&cm->seg, segment_id, - SEG_LVL_REF_FRAME); - if (seg_ref_active) { - memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single)); - memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp)); - *comp_mode_p = 128; - } else { - vpx_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd); - vpx_prob comp_inter_p = 128; - - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - comp_inter_p = vp10_get_reference_mode_prob(cm, xd); - *comp_mode_p = comp_inter_p; - } else { - *comp_mode_p = 128; - } - - ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0); - - if (cm->reference_mode != COMPOUND_REFERENCE) { - vpx_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd); - vpx_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd); - unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1); - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - base_cost += vp10_cost_bit(comp_inter_p, 0); - - ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] = - ref_costs_single[ALTREF_FRAME] = base_cost; - ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0); - ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1); - ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1); - ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0); - ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1); - } else { - ref_costs_single[LAST_FRAME] = 512; - ref_costs_single[GOLDEN_FRAME] = 512; - ref_costs_single[ALTREF_FRAME] = 512; - } - if (cm->reference_mode != SINGLE_REFERENCE) { - vpx_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd); - unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1); - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - base_cost += vp10_cost_bit(comp_inter_p, 1); - - ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0); - ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1); - } else { - ref_costs_comp[LAST_FRAME] = 512; - ref_costs_comp[GOLDEN_FRAME] = 512; - } - } -} - -static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, - int mode_index, - int64_t comp_pred_diff[REFERENCE_MODES], - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], - int skippable) { - MACROBLOCKD *const xd = &x->e_mbd; - - // Take a snapshot of the coding context so it can be - // restored if we decide to encode this way - ctx->skip = x->skip; - ctx->skippable = skippable; - ctx->best_mode_index = mode_index; - ctx->mic = *xd->mi[0]; - ctx->mbmi_ext = *x->mbmi_ext; - ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE]; - ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE]; - ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT]; - - memcpy(ctx->best_filter_diff, best_filter_diff, - sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS); -} - -static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x, - MV_REFERENCE_FRAME ref_frame, - BLOCK_SIZE block_size, - int mi_row, int mi_col, - int_mv frame_nearest_mv[MAX_REF_FRAMES], - int_mv frame_near_mv[MAX_REF_FRAMES], - struct buf_2d yv12_mb[4][MAX_MB_PLANE]) { - const VP10_COMMON *cm = &cpi->common; - const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *const mi = xd->mi[0]; - int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame]; - const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; - MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - - assert(yv12 != NULL); - - // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this - // use the UV scaling factors. - vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf); - - // Gets an initial list of candidate vectors from neighbours and orders them - vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, - NULL, NULL, mbmi_ext->mode_context); - - // Candidate refinement carried out at encoder and decoder - vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates, - &frame_nearest_mv[ref_frame], - &frame_near_mv[ref_frame]); - - // Further refinement that is encode side only to test the top few candidates - // in full and choose the best as the centre point for subsequent searches. - // The current implementation doesn't support scaling. - if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8) - vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, - ref_frame, block_size); -} - -static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int mi_row, int mi_col, - int_mv *tmp_mv, int *rate_mv) { - MACROBLOCKD *xd = &x->e_mbd; - const VP10_COMMON *cm = &cpi->common; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}}; - int bestsme = INT_MAX; - int step_param; - int sadpb = x->sadperbit16; - MV mvp_full; - int ref = mbmi->ref_frame[0]; - MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; - - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; - int cost_list[5]; - - const YV12_BUFFER_CONFIG *scaled_ref_frame = vp10_get_scaled_ref_frame(cpi, - ref); - - MV pred_mv[3]; - pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv; - pred_mv[1] = x->mbmi_ext->ref_mvs[ref][1].as_mv; - pred_mv[2] = x->pred_mv[ref]; - - if (scaled_ref_frame) { - int i; - // Swap out the reference frame for a version that's been scaled to - // match the resolution of the current frame, allowing the existing - // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[i] = xd->plane[i].pre[0]; - - vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); - } - - vp10_set_mv_search_range(x, &ref_mv); - - // Work out the size of the first step in the mv step search. - // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc. - if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { - // Take wtd average of the step_params based on the last frame's - // max mv magnitude and that based on the best ref mvs of the current - // block for the given reference. - step_param = (vp10_init_search_range(x->max_mv_context[ref]) + - cpi->mv_step_param) / 2; - } else { - step_param = cpi->mv_step_param; - } - - if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) { - int boffset = - 2 * (b_width_log2_lookup[BLOCK_64X64] - - VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize])); - step_param = VPXMAX(step_param, boffset); - } - - if (cpi->sf.adaptive_motion_search) { - int bwl = b_width_log2_lookup[bsize]; - int bhl = b_height_log2_lookup[bsize]; - int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4); - - if (tlevel < 5) - step_param += 2; - - // prev_mv_sad is not setup for dynamically scaled frames. - if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) { - int i; - for (i = LAST_FRAME; i <= ALTREF_FRAME && cm->show_frame; ++i) { - if ((x->pred_mv_sad[ref] >> 3) > x->pred_mv_sad[i]) { - x->pred_mv[ref].row = 0; - x->pred_mv[ref].col = 0; - tmp_mv->as_int = INVALID_MV; - - if (scaled_ref_frame) { - int i; - for (i = 0; i < MAX_MB_PLANE; ++i) - xd->plane[i].pre[0] = backup_yv12[i]; - } - return; - } - } - } - } - - mvp_full = pred_mv[x->mv_best_ref_index[ref]]; - - mvp_full.col >>= 3; - mvp_full.row >>= 3; - - bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb, - cond_cost_list(cpi, cost_list), - &ref_mv, &tmp_mv->as_mv, INT_MAX, 1); - - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - - if (bestsme < INT_MAX) { - int dis; /* TODO: use dis in distortion calculation later. */ - cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv, - cm->allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &dis, &x->pred_sse[ref], NULL, 0, 0); - } - *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - - if (cpi->sf.adaptive_motion_search) - x->pred_mv[ref] = tmp_mv->as_mv; - - if (scaled_ref_frame) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; - } -} - - - -static INLINE void restore_dst_buf(MACROBLOCKD *xd, - uint8_t *orig_dst[MAX_MB_PLANE], - int orig_dst_stride[MAX_MB_PLANE]) { - int i; - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = orig_dst[i]; - xd->plane[i].dst.stride = orig_dst_stride[i]; - } -} - -// In some situations we want to discount tha pparent cost of a new motion -// vector. Where there is a subtle motion field and especially where there is -// low spatial complexity then it can be hard to cover the cost of a new motion -// vector in a single block, even if that motion vector reduces distortion. -// However, once established that vector may be usable through the nearest and -// near mv modes to reduce distortion in subsequent blocks and also improve -// visual quality. -static int discount_newmv_test(const VP10_COMP *cpi, - int this_mode, - int_mv this_mv, - int_mv (*mode_mv)[MAX_REF_FRAMES], - int ref_frame) { - return (!cpi->rc.is_src_frame_alt_ref && - (this_mode == NEWMV) && - (this_mv.as_int != 0) && - ((mode_mv[NEARESTMV][ref_frame].as_int == 0) || - (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) && - ((mode_mv[NEARMV][ref_frame].as_int == 0) || - (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV))); -} - -#define LEFT_TOP_MARGIN ((VP9_ENC_BORDER_IN_PIXELS - VP9_INTERP_EXTEND) << 3) -#define RIGHT_BOTTOM_MARGIN ((VP9_ENC_BORDER_IN_PIXELS -\ - VP9_INTERP_EXTEND) << 3) - -// TODO(jingning): this mv clamping function should be block size dependent. -static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) { - clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN, - xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN, - xd->mb_to_top_edge - LEFT_TOP_MARGIN, - xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN); -} - -static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int *rate2, int64_t *distortion, - int *skippable, - int *rate_y, int *rate_uv, - int *disable_skip, - int_mv (*mode_mv)[MAX_REF_FRAMES], - int mi_row, int mi_col, - int_mv single_newmv[MAX_REF_FRAMES], - INTERP_FILTER (*single_filter)[MAX_REF_FRAMES], - int (*single_skippable)[MAX_REF_FRAMES], - int64_t *psse, - const int64_t ref_best_rd, - int64_t *mask_filter, - int64_t filter_cache[]) { - VP10_COMMON *cm = &cpi->common; - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - const int is_comp_pred = has_second_ref(mbmi); - const int this_mode = mbmi->mode; - int_mv *frame_mv = mode_mv[this_mode]; - int i; - int refs[2] = { mbmi->ref_frame[0], - (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) }; - int_mv cur_mv[2]; -#if CONFIG_VP9_HIGHBITDEPTH - DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]); - uint8_t *tmp_buf; -#else - DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]); -#endif // CONFIG_VP9_HIGHBITDEPTH - int pred_exists = 0; - int intpel_mv; - int64_t rd, tmp_rd, best_rd = INT64_MAX; - int best_needs_copy = 0; - uint8_t *orig_dst[MAX_MB_PLANE]; - int orig_dst_stride[MAX_MB_PLANE]; - int rs = 0; - INTERP_FILTER best_filter = SWITCHABLE; - uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0}; - int64_t bsse[MAX_MB_PLANE << 2] = {0}; - - int bsl = mi_width_log2_lookup[bsize]; - int pred_filter_search = cpi->sf.cb_pred_filter_search ? - (((mi_row + mi_col) >> bsl) + - get_chessboard_index(cm->current_video_frame)) & 0x1 : 0; - - int skip_txfm_sb = 0; - int64_t skip_sse_sb = INT64_MAX; - int64_t distortion_y = 0, distortion_uv = 0; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16); - } else { - tmp_buf = (uint8_t *)tmp_buf16; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - - if (pred_filter_search) { - INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE; - if (xd->up_available) - af = xd->mi[-xd->mi_stride]->mbmi.interp_filter; - if (xd->left_available) - lf = xd->mi[-1]->mbmi.interp_filter; - - if ((this_mode != NEWMV) || (af == lf)) - best_filter = af; - } - - if (is_comp_pred) { - if (frame_mv[refs[0]].as_int == INVALID_MV || - frame_mv[refs[1]].as_int == INVALID_MV) - return INT64_MAX; - - if (cpi->sf.adaptive_mode_search) { - if (single_filter[this_mode][refs[0]] == - single_filter[this_mode][refs[1]]) - best_filter = single_filter[this_mode][refs[0]]; - } - } - - if (this_mode == NEWMV) { - int rate_mv; - if (is_comp_pred) { - // Initialize mv using single prediction mode result. - frame_mv[refs[0]].as_int = single_newmv[refs[0]].as_int; - frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int; - - if (cpi->sf.comp_inter_joint_search_thresh <= bsize) { - joint_motion_search(cpi, x, bsize, frame_mv, - mi_row, mi_col, single_newmv, &rate_mv); - } else { - rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv, - &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv, - &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); - } - *rate2 += rate_mv; - } else { - int_mv tmp_mv; - single_motion_search(cpi, x, bsize, mi_row, mi_col, - &tmp_mv, &rate_mv); - if (tmp_mv.as_int == INVALID_MV) - return INT64_MAX; - - frame_mv[refs[0]].as_int = - xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int; - single_newmv[refs[0]].as_int = tmp_mv.as_int; - - // Estimate the rate implications of a new mv but discount this - // under certain circumstances where we want to help initiate a weak - // motion field, where the distortion gain for a single block may not - // be enough to overcome the cost of a new mv. - if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) { - *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1); - } else { - *rate2 += rate_mv; - } - } - } - - for (i = 0; i < is_comp_pred + 1; ++i) { - cur_mv[i] = frame_mv[refs[i]]; - // Clip "next_nearest" so that it does not extend to far out of image - if (this_mode != NEWMV) - clamp_mv2(&cur_mv[i].as_mv, xd); - - if (mv_check_bounds(x, &cur_mv[i].as_mv)) - return INT64_MAX; - mbmi->mv[i].as_int = cur_mv[i].as_int; - } - - // do first prediction into the destination buffer. Do the next - // prediction into a temporary buffer. Then keep track of which one - // of these currently holds the best predictor, and use the other - // one for future predictions. In the end, copy from tmp_buf to - // dst if necessary. - for (i = 0; i < MAX_MB_PLANE; i++) { - orig_dst[i] = xd->plane[i].dst.buf; - orig_dst_stride[i] = xd->plane[i].dst.stride; - } - - // We don't include the cost of the second reference here, because there - // are only three options: Last/Golden, ARF/Last or Golden/ARF, or in other - // words if you present them in that order, the second one is always known - // if the first is known. - // - // Under some circumstances we discount the cost of new mv mode to encourage - // initiation of a motion field. - if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], - mode_mv, refs[0])) { - *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, - mbmi_ext->mode_context[refs[0]]), - cost_mv_ref(cpi, NEARESTMV, - mbmi_ext->mode_context[refs[0]])); - } else { - *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]); - } - - if (RDCOST(x->rdmult, x->rddiv, *rate2, 0) > ref_best_rd && - mbmi->mode != NEARESTMV) - return INT64_MAX; - - pred_exists = 0; - // Are all MVs integer pel for Y and UV - intpel_mv = !mv_has_subpel(&mbmi->mv[0].as_mv); - if (is_comp_pred) - intpel_mv &= !mv_has_subpel(&mbmi->mv[1].as_mv); - - // Search for best switchable filter by checking the variance of - // pred error irrespective of whether the filter will be used - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; - - if (cm->interp_filter != BILINEAR) { - if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) { - best_filter = EIGHTTAP; - } else if (best_filter == SWITCHABLE) { - int newbest; - int tmp_rate_sum = 0; - int64_t tmp_dist_sum = 0; - - for (i = 0; i < SWITCHABLE_FILTERS; ++i) { - int j; - int64_t rs_rd; - int tmp_skip_sb = 0; - int64_t tmp_skip_sse = INT64_MAX; - - mbmi->interp_filter = i; - rs = vp10_get_switchable_rate(cpi, xd); - rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); - - if (i > 0 && intpel_mv) { - rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum); - filter_cache[i] = rd; - filter_cache[SWITCHABLE_FILTERS] = - VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - rd += rs_rd; - *mask_filter = VPXMAX(*mask_filter, rd); - } else { - int rate_sum = 0; - int64_t dist_sum = 0; - if (i > 0 && cpi->sf.adaptive_interp_filter_search && - (cpi->sf.interp_filter_search_mask & (1 << i))) { - rate_sum = INT_MAX; - dist_sum = INT64_MAX; - continue; - } - - if ((cm->interp_filter == SWITCHABLE && - (!i || best_needs_copy)) || - (cm->interp_filter != SWITCHABLE && - (cm->interp_filter == mbmi->interp_filter || - (i == 0 && intpel_mv)))) { - restore_dst_buf(xd, orig_dst, orig_dst_stride); - } else { - for (j = 0; j < MAX_MB_PLANE; j++) { - xd->plane[j].dst.buf = tmp_buf + j * 64 * 64; - xd->plane[j].dst.stride = 64; - } - } - vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); - model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, - &tmp_skip_sb, &tmp_skip_sse); - - rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum); - filter_cache[i] = rd; - filter_cache[SWITCHABLE_FILTERS] = - VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - rd += rs_rd; - *mask_filter = VPXMAX(*mask_filter, rd); - - if (i == 0 && intpel_mv) { - tmp_rate_sum = rate_sum; - tmp_dist_sum = dist_sum; - } - } - - if (i == 0 && cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) { - if (rd / 2 > ref_best_rd) { - restore_dst_buf(xd, orig_dst, orig_dst_stride); - return INT64_MAX; - } - } - newbest = i == 0 || rd < best_rd; - - if (newbest) { - best_rd = rd; - best_filter = mbmi->interp_filter; - if (cm->interp_filter == SWITCHABLE && i && !intpel_mv) - best_needs_copy = !best_needs_copy; - } - - if ((cm->interp_filter == SWITCHABLE && newbest) || - (cm->interp_filter != SWITCHABLE && - cm->interp_filter == mbmi->interp_filter)) { - pred_exists = 1; - tmp_rd = best_rd; - - skip_txfm_sb = tmp_skip_sb; - skip_sse_sb = tmp_skip_sse; - memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm)); - memcpy(bsse, x->bsse, sizeof(bsse)); - } - } - restore_dst_buf(xd, orig_dst, orig_dst_stride); - } - } - // Set the appropriate filter - mbmi->interp_filter = cm->interp_filter != SWITCHABLE ? - cm->interp_filter : best_filter; - rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0; - - if (pred_exists) { - if (best_needs_copy) { - // again temporarily set the buffers to local memory to prevent a memcpy - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = tmp_buf + i * 64 * 64; - xd->plane[i].dst.stride = 64; - } - } - rd = tmp_rd + RDCOST(x->rdmult, x->rddiv, rs, 0); - } else { - int tmp_rate; - int64_t tmp_dist; - // Handles the special case when a filter that is not in the - // switchable list (ex. bilinear) is indicated at the frame level, or - // skip condition holds. - vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); - model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, - &skip_txfm_sb, &skip_sse_sb); - rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist); - memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm)); - memcpy(bsse, x->bsse, sizeof(bsse)); - } - - if (!is_comp_pred) - single_filter[this_mode][refs[0]] = mbmi->interp_filter; - - if (cpi->sf.adaptive_mode_search) - if (is_comp_pred) - if (single_skippable[this_mode][refs[0]] && - single_skippable[this_mode][refs[1]]) - memset(skip_txfm, SKIP_TXFM_AC_DC, sizeof(skip_txfm)); - - if (cpi->sf.use_rd_breakout && ref_best_rd < INT64_MAX) { - // if current pred_error modeled rd is substantially more than the best - // so far, do not bother doing full rd - if (rd / 2 > ref_best_rd) { - restore_dst_buf(xd, orig_dst, orig_dst_stride); - return INT64_MAX; - } - } - - if (cm->interp_filter == SWITCHABLE) - *rate2 += rs; - - memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm)); - memcpy(x->bsse, bsse, sizeof(bsse)); - - if (!skip_txfm_sb) { - int skippable_y, skippable_uv; - int64_t sseuv = INT64_MAX; - int64_t rdcosty = INT64_MAX; - - // Y cost and distortion - vp10_subtract_plane(x, bsize, 0); - super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, - bsize, ref_best_rd); - - if (*rate_y == INT_MAX) { - *rate2 = INT_MAX; - *distortion = INT64_MAX; - restore_dst_buf(xd, orig_dst, orig_dst_stride); - return INT64_MAX; - } - - *rate2 += *rate_y; - *distortion += distortion_y; - - rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion); - rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse)); - - if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv, - &sseuv, bsize, ref_best_rd - rdcosty)) { - *rate2 = INT_MAX; - *distortion = INT64_MAX; - restore_dst_buf(xd, orig_dst, orig_dst_stride); - return INT64_MAX; - } - - *psse += sseuv; - *rate2 += *rate_uv; - *distortion += distortion_uv; - *skippable = skippable_y && skippable_uv; - } else { - x->skip = 1; - *disable_skip = 1; - - // The cost of skip bit needs to be added. - *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1); - - *distortion = skip_sse_sb; - } - - if (!is_comp_pred) - single_skippable[this_mode][refs[0]] = *skippable; - - restore_dst_buf(xd, orig_dst, orig_dst_stride); - return 0; // The rate-distortion cost will be re-calculated by caller. -} - -void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, - RD_COST *rd_cost, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, int64_t best_rd) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblockd_plane *const pd = xd->plane; - int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0; - int y_skip = 0, uv_skip = 0; - int64_t dist_y = 0, dist_uv = 0; - TX_SIZE max_uv_tx_size; - ctx->skip = 0; - xd->mi[0]->mbmi.ref_frame[0] = INTRA_FRAME; - xd->mi[0]->mbmi.ref_frame[1] = NONE; - - if (bsize >= BLOCK_8X8) { - if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, - &dist_y, &y_skip, bsize, - best_rd) >= best_rd) { - rd_cost->rate = INT_MAX; - return; - } - } else { - y_skip = 0; - if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate_y, &rate_y_tokenonly, - &dist_y, best_rd) >= best_rd) { - rd_cost->rate = INT_MAX; - return; - } - } - max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->mbmi.tx_size, bsize, - pd[1].subsampling_x, - pd[1].subsampling_y); - rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize), - max_uv_tx_size); - - if (y_skip && uv_skip) { - rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly + - vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1); - rd_cost->dist = dist_y + dist_uv; - } else { - rd_cost->rate = rate_y + rate_uv + - vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0); - rd_cost->dist = dist_y + dist_uv; - } - - ctx->mic = *xd->mi[0]; - ctx->mbmi_ext = *x->mbmi_ext; - rd_cost->rdcost = RDCOST(x->rdmult, x->rddiv, rd_cost->rate, rd_cost->dist); -} - -// This function is designed to apply a bias or adjustment to an rd value based -// on the relative variance of the source and reconstruction. -#define LOW_VAR_THRESH 16 -#define VLOW_ADJ_MAX 25 -#define VHIGH_ADJ_MAX 8 -static void rd_variance_adjustment(VP10_COMP *cpi, - MACROBLOCK *x, - BLOCK_SIZE bsize, - int64_t *this_rd, - MV_REFERENCE_FRAME ref_frame, - unsigned int source_variance) { - MACROBLOCKD *const xd = &x->e_mbd; - unsigned int recon_variance; - unsigned int absvar_diff = 0; - int64_t var_error = 0; - int64_t var_factor = 0; - - if (*this_rd == INT64_MAX) - return; - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - recon_variance = - vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd); - } else { - recon_variance = - vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); - } -#else - recon_variance = - vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); -#endif // CONFIG_VP9_HIGHBITDEPTH - - if ((source_variance + recon_variance) > LOW_VAR_THRESH) { - absvar_diff = (source_variance > recon_variance) - ? (source_variance - recon_variance) - : (recon_variance - source_variance); - - var_error = ((int64_t)200 * source_variance * recon_variance) / - (((int64_t)source_variance * source_variance) + - ((int64_t)recon_variance * recon_variance)); - var_error = 100 - var_error; - } - - // Source variance above a threshold and ref frame is intra. - // This case is targeted mainly at discouraging intra modes that give rise - // to a predictor with a low spatial complexity compared to the source. - if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) && - (source_variance > recon_variance)) { - var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error)); - // A second possible case of interest is where the source variance - // is very low and we wish to discourage false texture or motion trails. - } else if ((source_variance < (LOW_VAR_THRESH >> 1)) && - (recon_variance > source_variance)) { - var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error)); - } - *this_rd += (*this_rd * var_factor) / 100; -} - - -// Do we have an internal image edge (e.g. formatting bars). -int vp10_internal_image_edge(VP10_COMP *cpi) { - return (cpi->oxcf.pass == 2) && - ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) || - (cpi->twopass.this_frame_stats.inactive_zone_cols > 0)); -} - -// Checks to see if a super block is on a horizontal image edge. -// In most cases this is the "real" edge unless there are formatting -// bars embedded in the stream. -int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) { - int top_edge = 0; - int bottom_edge = cpi->common.mi_rows; - int is_active_h_edge = 0; - - // For two pass account for any formatting bars detected. - if (cpi->oxcf.pass == 2) { - TWO_PASS *twopass = &cpi->twopass; - - // The inactive region is specified in MBs not mi units. - // The image edge is in the following MB row. - top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2); - - bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2); - bottom_edge = VPXMAX(top_edge, bottom_edge); - } - - if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) || - ((bottom_edge >= mi_row) && (bottom_edge < (mi_row + mi_step)))) { - is_active_h_edge = 1; - } - return is_active_h_edge; -} - -// Checks to see if a super block is on a vertical image edge. -// In most cases this is the "real" edge unless there are formatting -// bars embedded in the stream. -int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) { - int left_edge = 0; - int right_edge = cpi->common.mi_cols; - int is_active_v_edge = 0; - - // For two pass account for any formatting bars detected. - if (cpi->oxcf.pass == 2) { - TWO_PASS *twopass = &cpi->twopass; - - // The inactive region is specified in MBs not mi units. - // The image edge is in the following MB row. - left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2); - - right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2); - right_edge = VPXMAX(left_edge, right_edge); - } - - if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) || - ((right_edge >= mi_col) && (right_edge < (mi_col + mi_step)))) { - is_active_v_edge = 1; - } - return is_active_v_edge; -} - -// Checks to see if a super block is at the edge of the active image. -// In most cases this is the "real" edge unless there are formatting -// bars embedded in the stream. -int vp10_active_edge_sb(VP10_COMP *cpi, - int mi_row, int mi_col) { - return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) || - vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE); -} - -void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - int mi_row, int mi_col, - RD_COST *rd_cost, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far) { - VP10_COMMON *const cm = &cpi->common; - RD_OPT *const rd_opt = &cpi->rd; - SPEED_FEATURES *const sf = &cpi->sf; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext; - const struct segmentation *const seg = &cm->seg; - PREDICTION_MODE this_mode; - MV_REFERENCE_FRAME ref_frame, second_ref_frame; - unsigned char segment_id = mbmi->segment_id; - int comp_pred, i, k; - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; - struct buf_2d yv12_mb[4][MAX_MB_PLANE]; - int_mv single_newmv[MAX_REF_FRAMES] = { { 0 } }; - INTERP_FILTER single_inter_filter[MB_MODE_COUNT][MAX_REF_FRAMES]; - int single_skippable[MB_MODE_COUNT][MAX_REF_FRAMES]; - static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, - VP9_ALT_FLAG }; - int64_t best_rd = best_rd_so_far; - int64_t best_pred_diff[REFERENCE_MODES]; - int64_t best_pred_rd[REFERENCE_MODES]; - int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS]; - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - MB_MODE_INFO best_mbmode; - int best_mode_skippable = 0; - int midx, best_mode_index = -1; - unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES]; - vpx_prob comp_mode_p; - int64_t best_intra_rd = INT64_MAX; - unsigned int best_pred_sse = UINT_MAX; - PREDICTION_MODE best_intra_mode = DC_PRED; - int rate_uv_intra[TX_SIZES], rate_uv_tokenonly[TX_SIZES]; - int64_t dist_uv[TX_SIZES]; - int skip_uv[TX_SIZES]; - PREDICTION_MODE mode_uv[TX_SIZES]; - const int intra_cost_penalty = vp10_get_intra_cost_penalty( - cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth); - int best_skip2 = 0; - uint8_t ref_frame_skip_mask[2] = { 0 }; - uint16_t mode_skip_mask[MAX_REF_FRAMES] = { 0 }; - int mode_skip_start = sf->mode_skip_start + 1; - const int *const rd_threshes = rd_opt->threshes[segment_id][bsize]; - const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize]; - int64_t mode_threshold[MAX_MODES]; - int *mode_map = tile_data->mode_map[bsize]; - const int mode_search_skip_flags = sf->mode_search_skip_flags; - int64_t mask_filter = 0; - int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS]; - - vp10_zero(best_mbmode); - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; - - estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, - &comp_mode_p); - - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = INT64_MAX; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = INT64_MAX; - for (i = 0; i < TX_SIZES; i++) - rate_uv_intra[i] = INT_MAX; - for (i = 0; i < MAX_REF_FRAMES; ++i) - x->pred_sse[i] = INT_MAX; - for (i = 0; i < MB_MODE_COUNT; ++i) { - for (k = 0; k < MAX_REF_FRAMES; ++k) { - single_inter_filter[i][k] = SWITCHABLE; - single_skippable[i][k] = 0; - } - } - - rd_cost->rate = INT_MAX; - - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - x->pred_mv_sad[ref_frame] = INT_MAX; - if (cpi->ref_frame_flags & flag_list[ref_frame]) { - assert(get_ref_frame_buffer(cpi, ref_frame) != NULL); - setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb); - } - frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; - frame_mv[ZEROMV][ref_frame].as_int = 0; - } - - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { - if (!(cpi->ref_frame_flags & flag_list[ref_frame])) { - // Skip checking missing references in both single and compound reference - // modes. Note that a mode will be skipped iff both reference frames - // are masked out. - ref_frame_skip_mask[0] |= (1 << ref_frame); - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - } else { - for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { - // Skip fixed mv modes for poor references - if ((x->pred_mv_sad[ref_frame] >> 2) > x->pred_mv_sad[i]) { - mode_skip_mask[ref_frame] |= INTER_NEAREST_NEAR_ZERO; - break; - } - } - } - // If the segment reference frame feature is enabled.... - // then do nothing if the current ref frame is not allowed.. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && - get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) { - ref_frame_skip_mask[0] |= (1 << ref_frame); - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - } - } - - // Disable this drop out case if the ref frame - // segment level feature is enabled for this segment. This is to - // prevent the possibility that we end up unable to pick any mode. - if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) { - // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, - // unless ARNR filtering is enabled in which case we want - // an unfiltered alternative. We allow near/nearest as well - // because they may result in zero-zero MVs but be cheaper. - if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) { - ref_frame_skip_mask[0] = (1 << LAST_FRAME) | (1 << GOLDEN_FRAME); - ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK; - mode_skip_mask[ALTREF_FRAME] = ~INTER_NEAREST_NEAR_ZERO; - if (frame_mv[NEARMV][ALTREF_FRAME].as_int != 0) - mode_skip_mask[ALTREF_FRAME] |= (1 << NEARMV); - if (frame_mv[NEARESTMV][ALTREF_FRAME].as_int != 0) - mode_skip_mask[ALTREF_FRAME] |= (1 << NEARESTMV); - } - } - - if (cpi->rc.is_src_frame_alt_ref) { - if (sf->alt_ref_search_fp) { - mode_skip_mask[ALTREF_FRAME] = 0; - ref_frame_skip_mask[0] = ~(1 << ALTREF_FRAME); - ref_frame_skip_mask[1] = SECOND_REF_FRAME_MASK; - } - } - - if (sf->alt_ref_search_fp) - if (!cm->show_frame && x->pred_mv_sad[GOLDEN_FRAME] < INT_MAX) - if (x->pred_mv_sad[ALTREF_FRAME] > (x->pred_mv_sad[GOLDEN_FRAME] << 1)) - mode_skip_mask[ALTREF_FRAME] |= INTER_ALL; - - if (sf->adaptive_mode_search) { - if (cm->show_frame && !cpi->rc.is_src_frame_alt_ref && - cpi->rc.frames_since_golden >= 3) - if (x->pred_mv_sad[GOLDEN_FRAME] > (x->pred_mv_sad[LAST_FRAME] << 1)) - mode_skip_mask[GOLDEN_FRAME] |= INTER_ALL; - } - - if (bsize > sf->max_intra_bsize) { - ref_frame_skip_mask[0] |= (1 << INTRA_FRAME); - ref_frame_skip_mask[1] |= (1 << INTRA_FRAME); - } - - mode_skip_mask[INTRA_FRAME] |= - ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]); - - for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) - mode_threshold[i] = 0; - for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i) - mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5; - - midx = sf->schedule_mode_search ? mode_skip_start : 0; - while (midx > 4) { - uint8_t end_pos = 0; - for (i = 5; i < midx; ++i) { - if (mode_threshold[mode_map[i - 1]] > mode_threshold[mode_map[i]]) { - uint8_t tmp = mode_map[i]; - mode_map[i] = mode_map[i - 1]; - mode_map[i - 1] = tmp; - end_pos = i; - } - } - midx = end_pos; - } - - for (midx = 0; midx < MAX_MODES; ++midx) { - int mode_index = mode_map[midx]; - int mode_excluded = 0; - int64_t this_rd = INT64_MAX; - int disable_skip = 0; - int compmode_cost = 0; - int rate2 = 0, rate_y = 0, rate_uv = 0; - int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0; - int skippable = 0; - int this_skip2 = 0; - int64_t total_sse = INT64_MAX; - int early_term = 0; - - this_mode = vp10_mode_order[mode_index].mode; - ref_frame = vp10_mode_order[mode_index].ref_frame[0]; - second_ref_frame = vp10_mode_order[mode_index].ref_frame[1]; - - // Look at the reference frame of the best mode so far and set the - // skip mask to look at a subset of the remaining modes. - if (midx == mode_skip_start && best_mode_index >= 0) { - switch (best_mbmode.ref_frame[0]) { - case INTRA_FRAME: - break; - case LAST_FRAME: - ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK; - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - break; - case GOLDEN_FRAME: - ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK; - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - break; - case ALTREF_FRAME: - ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; - break; - case NONE: - case MAX_REF_FRAMES: - assert(0 && "Invalid Reference frame"); - break; - } - } - - if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) - continue; - - if (mode_skip_mask[ref_frame] & (1 << this_mode)) - continue; - - // Test best rd so far against threshold for trying this mode. - if (best_mode_skippable && sf->schedule_mode_search) - mode_threshold[mode_index] <<= 1; - - if (best_rd < mode_threshold[mode_index]) - continue; - - comp_pred = second_ref_frame > INTRA_FRAME; - if (comp_pred) { - if (!cpi->allow_comp_inter_inter) - continue; - - // Skip compound inter modes if ARF is not available. - if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) - continue; - - // Do not allow compound prediction if the segment level reference frame - // feature is in use as in this case there can only be one reference. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - continue; - - if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) && - best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME) - continue; - - mode_excluded = cm->reference_mode == SINGLE_REFERENCE; - } else { - if (ref_frame != INTRA_FRAME) - mode_excluded = cm->reference_mode == COMPOUND_REFERENCE; - } - - if (ref_frame == INTRA_FRAME) { - if (sf->adaptive_mode_search) - if ((x->source_variance << num_pels_log2_lookup[bsize]) > best_pred_sse) - continue; - - if (this_mode != DC_PRED) { - // Disable intra modes other than DC_PRED for blocks with low variance - // Threshold for intra skipping based on source variance - // TODO(debargha): Specialize the threshold for super block sizes - const unsigned int skip_intra_var_thresh = 64; - if ((mode_search_skip_flags & FLAG_SKIP_INTRA_LOWVAR) && - x->source_variance < skip_intra_var_thresh) - continue; - // Only search the oblique modes if the best so far is - // one of the neighboring directional modes - if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) && - (this_mode >= D45_PRED && this_mode <= TM_PRED)) { - if (best_mode_index >= 0 && - best_mbmode.ref_frame[0] > INTRA_FRAME) - continue; - } - if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(this_mode, best_intra_mode)) - continue; - } - } - } else { - const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame}; - if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, - this_mode, ref_frames)) - continue; - } - - mbmi->mode = this_mode; - mbmi->uv_mode = DC_PRED; - mbmi->ref_frame[0] = ref_frame; - mbmi->ref_frame[1] = second_ref_frame; - // Evaluate all sub-pel filters irrespective of whether we can use - // them for this frame. - mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP - : cm->interp_filter; - mbmi->mv[0].as_int = mbmi->mv[1].as_int = 0; - - x->skip = 0; - set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); - - // Select prediction reference frames. - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].pre[0] = yv12_mb[ref_frame][i]; - if (comp_pred) - xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; - } - - if (ref_frame == INTRA_FRAME) { - TX_SIZE uv_tx; - struct macroblockd_plane *const pd = &xd->plane[1]; - memset(x->skip_txfm, 0, sizeof(x->skip_txfm)); - super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, - NULL, bsize, best_rd); - if (rate_y == INT_MAX) - continue; - - uv_tx = get_uv_tx_size_impl(mbmi->tx_size, bsize, pd->subsampling_x, - pd->subsampling_y); - if (rate_uv_intra[uv_tx] == INT_MAX) { - choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, - &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx], - &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]); - } - - rate_uv = rate_uv_tokenonly[uv_tx]; - distortion_uv = dist_uv[uv_tx]; - skippable = skippable && skip_uv[uv_tx]; - mbmi->uv_mode = mode_uv[uv_tx]; - - rate2 = rate_y + cpi->mbmode_cost[mbmi->mode] + rate_uv_intra[uv_tx]; - if (this_mode != DC_PRED && this_mode != TM_PRED) - rate2 += intra_cost_penalty; - distortion2 = distortion_y + distortion_uv; - } else { - this_rd = handle_inter_mode(cpi, x, bsize, - &rate2, &distortion2, &skippable, - &rate_y, &rate_uv, - &disable_skip, frame_mv, - mi_row, mi_col, - single_newmv, single_inter_filter, - single_skippable, &total_sse, best_rd, - &mask_filter, filter_cache); - if (this_rd == INT64_MAX) - continue; - - compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred); - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - rate2 += compmode_cost; - } - - // Estimate the reference frame signaling cost and add it - // to the rolling cost variable. - if (comp_pred) { - rate2 += ref_costs_comp[ref_frame]; - } else { - rate2 += ref_costs_single[ref_frame]; - } - - if (!disable_skip) { - if (skippable) { - // Back out the coefficient coding costs - rate2 -= (rate_y + rate_uv); - - // Cost the skip mb case - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1); - } else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) { - if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) < - RDCOST(x->rdmult, x->rddiv, 0, total_sse)) { - // Add in the cost of the no skip flag. - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0); - } else { - // FIXME(rbultje) make this work for splitmv also - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1); - distortion2 = total_sse; - assert(total_sse >= 0); - rate2 -= (rate_y + rate_uv); - this_skip2 = 1; - } - } else { - // Add in the cost of the no skip flag. - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0); - } - - // Calculate the final RD estimate for this mode. - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); - } - - // Apply an adjustment to the rd value based on the similarity of the - // source variance and reconstructed variance. - rd_variance_adjustment(cpi, x, bsize, &this_rd, - ref_frame, x->source_variance); - - if (ref_frame == INTRA_FRAME) { - // Keep record of best intra rd - if (this_rd < best_intra_rd) { - best_intra_rd = this_rd; - best_intra_mode = mbmi->mode; - } - } - - if (!disable_skip && ref_frame == INTRA_FRAME) { - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); - } - - // Did this mode help.. i.e. is it the new best mode - if (this_rd < best_rd || x->skip) { - int max_plane = MAX_MB_PLANE; - if (!mode_excluded) { - // Note index of best mode so far - best_mode_index = mode_index; - - if (ref_frame == INTRA_FRAME) { - /* required for left and above block mv */ - mbmi->mv[0].as_int = 0; - max_plane = 1; - } else { - best_pred_sse = x->pred_sse[ref_frame]; - } - - rd_cost->rate = rate2; - rd_cost->dist = distortion2; - rd_cost->rdcost = this_rd; - best_rd = this_rd; - best_mbmode = *mbmi; - best_skip2 = this_skip2; - best_mode_skippable = skippable; - - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, max_plane); - memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size], - sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); - - // TODO(debargha): enhance this test with a better distortion prediction - // based on qp, activity mask and history - if ((mode_search_skip_flags & FLAG_EARLY_TERMINATE) && - (mode_index > MIN_EARLY_TERM_INDEX)) { - int qstep = xd->plane[0].dequant[1]; - // TODO(debargha): Enhance this by specializing for each mode_index - int scale = 4; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - qstep >>= (xd->bd - 8); - } -#endif // CONFIG_VP9_HIGHBITDEPTH - if (x->source_variance < UINT_MAX) { - const int var_adjust = (x->source_variance < 16); - scale -= var_adjust; - } - if (ref_frame > INTRA_FRAME && - distortion2 * scale < qstep * qstep) { - early_term = 1; - } - } - } - } - - /* keep record of best compound/single-only prediction */ - if (!disable_skip && ref_frame != INTRA_FRAME) { - int64_t single_rd, hybrid_rd, single_rate, hybrid_rate; - - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - single_rate = rate2 - compmode_cost; - hybrid_rate = rate2; - } else { - single_rate = rate2; - hybrid_rate = rate2 + compmode_cost; - } - - single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2); - hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2); - - if (!comp_pred) { - if (single_rd < best_pred_rd[SINGLE_REFERENCE]) - best_pred_rd[SINGLE_REFERENCE] = single_rd; - } else { - if (single_rd < best_pred_rd[COMPOUND_REFERENCE]) - best_pred_rd[COMPOUND_REFERENCE] = single_rd; - } - if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT]) - best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd; - - /* keep record of best filter type */ - if (!mode_excluded && cm->interp_filter != BILINEAR) { - int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->interp_filter]; - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - int64_t adj_rd; - if (ref == INT64_MAX) - adj_rd = 0; - else if (filter_cache[i] == INT64_MAX) - // when early termination is triggered, the encoder does not have - // access to the rate-distortion cost. it only knows that the cost - // should be above the maximum valid value. hence it takes the known - // maximum plus an arbitrary constant as the rate-distortion cost. - adj_rd = mask_filter - ref + 10; - else - adj_rd = filter_cache[i] - ref; - - adj_rd += this_rd; - best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); - } - } - } - - if (early_term) - break; - - if (x->skip && !comp_pred) - break; - } - - // The inter modes' rate costs are not calculated precisely in some cases. - // Therefore, sometimes, NEWMV is chosen instead of NEARESTMV, NEARMV, and - // ZEROMV. Here, checks are added for those cases, and the mode decisions - // are corrected. - if (best_mbmode.mode == NEWMV) { - const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0], - best_mbmode.ref_frame[1]}; - int comp_pred_mode = refs[1] > INTRA_FRAME; - - if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int && - ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int == - best_mbmode.mv[1].as_int) || !comp_pred_mode)) - best_mbmode.mode = NEARESTMV; - else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int && - ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int == - best_mbmode.mv[1].as_int) || !comp_pred_mode)) - best_mbmode.mode = NEARMV; - else if (best_mbmode.mv[0].as_int == 0 && - ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode)) - best_mbmode.mode = ZEROMV; - } - - if (best_mode_index < 0 || best_rd >= best_rd_so_far) { - rd_cost->rate = INT_MAX; - rd_cost->rdcost = INT64_MAX; - return; - } - - // If we used an estimate for the uv intra rd in the loop above... - if (sf->use_uv_intra_rd_estimate) { - // Do Intra UV best rd mode selection if best mode choice above was intra. - if (best_mbmode.ref_frame[0] == INTRA_FRAME) { - TX_SIZE uv_tx_size; - *mbmi = best_mbmode; - uv_tx_size = get_uv_tx_size(mbmi, &xd->plane[1]); - rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size], - &rate_uv_tokenonly[uv_tx_size], - &dist_uv[uv_tx_size], - &skip_uv[uv_tx_size], - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, - uv_tx_size); - } - } - - assert((cm->interp_filter == SWITCHABLE) || - (cm->interp_filter == best_mbmode.interp_filter) || - !is_inter_block(&best_mbmode)); - - if (!cpi->rc.is_src_frame_alt_ref) - vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact, - sf->adaptive_rd_thresh, bsize, best_mode_index); - - // macroblock modes - *mbmi = best_mbmode; - x->skip |= best_skip2; - - for (i = 0; i < REFERENCE_MODES; ++i) { - if (best_pred_rd[i] == INT64_MAX) - best_pred_diff[i] = INT_MIN; - else - best_pred_diff[i] = best_rd - best_pred_rd[i]; - } - - if (!x->skip) { - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - if (best_filter_rd[i] == INT64_MAX) - best_filter_diff[i] = 0; - else - best_filter_diff[i] = best_rd - best_filter_rd[i]; - } - if (cm->interp_filter == SWITCHABLE) - assert(best_filter_diff[SWITCHABLE_FILTERS] == 0); - } else { - vp10_zero(best_filter_diff); - } - - // TODO(yunqingwang): Moving this line in front of the above best_filter_diff - // updating code causes PSNR loss. Need to figure out the confliction. - x->skip |= best_mode_skippable; - - if (!x->skip && !x->select_tx_size) { - int has_high_freq_coeff = 0; - int plane; - int max_plane = is_inter_block(&xd->mi[0]->mbmi) - ? MAX_MB_PLANE : 1; - for (plane = 0; plane < max_plane; ++plane) { - x->plane[plane].eobs = ctx->eobs_pbuf[plane][1]; - has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane); - } - - for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) { - x->plane[plane].eobs = ctx->eobs_pbuf[plane][2]; - has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane); - } - - best_mode_skippable |= !has_high_freq_coeff; - } - - assert(best_mode_index >= 0); - - store_coding_context(x, ctx, best_mode_index, best_pred_diff, - best_filter_diff, best_mode_skippable); -} - -void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - RD_COST *rd_cost, - BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - unsigned char segment_id = mbmi->segment_id; - const int comp_pred = 0; - int i; - int64_t best_pred_diff[REFERENCE_MODES]; - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES]; - vpx_prob comp_mode_p; - INTERP_FILTER best_filter = SWITCHABLE; - int64_t this_rd = INT64_MAX; - int rate2 = 0; - const int64_t distortion2 = 0; - - estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, - &comp_mode_p); - - for (i = 0; i < MAX_REF_FRAMES; ++i) - x->pred_sse[i] = INT_MAX; - for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) - x->pred_mv_sad[i] = INT_MAX; - - rd_cost->rate = INT_MAX; - - assert(segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)); - - mbmi->mode = ZEROMV; - mbmi->uv_mode = DC_PRED; - mbmi->ref_frame[0] = LAST_FRAME; - mbmi->ref_frame[1] = NONE; - mbmi->mv[0].as_int = 0; - x->skip = 1; - - if (cm->interp_filter != BILINEAR) { - best_filter = EIGHTTAP; - if (cm->interp_filter == SWITCHABLE && - x->source_variance >= cpi->sf.disable_filter_search_var_thresh) { - int rs; - int best_rs = INT_MAX; - for (i = 0; i < SWITCHABLE_FILTERS; ++i) { - mbmi->interp_filter = i; - rs = vp10_get_switchable_rate(cpi, xd); - if (rs < best_rs) { - best_rs = rs; - best_filter = mbmi->interp_filter; - } - } - } - } - // Set the appropriate filter - if (cm->interp_filter == SWITCHABLE) { - mbmi->interp_filter = best_filter; - rate2 += vp10_get_switchable_rate(cpi, xd); - } else { - mbmi->interp_filter = cm->interp_filter; - } - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - rate2 += vp10_cost_bit(comp_mode_p, comp_pred); - - // Estimate the reference frame signaling cost and add it - // to the rolling cost variable. - rate2 += ref_costs_single[LAST_FRAME]; - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); - - rd_cost->rate = rate2; - rd_cost->dist = distortion2; - rd_cost->rdcost = this_rd; - - if (this_rd >= best_rd_so_far) { - rd_cost->rate = INT_MAX; - rd_cost->rdcost = INT64_MAX; - return; - } - - assert((cm->interp_filter == SWITCHABLE) || - (cm->interp_filter == mbmi->interp_filter)); - - vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact, - cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV); - - vp10_zero(best_pred_diff); - vp10_zero(best_filter_diff); - - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE); - store_coding_context(x, ctx, THR_ZEROMV, - best_pred_diff, best_filter_diff, 0); -} - -void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - int mi_row, int mi_col, - RD_COST *rd_cost, - BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far) { - VP10_COMMON *const cm = &cpi->common; - RD_OPT *const rd_opt = &cpi->rd; - SPEED_FEATURES *const sf = &cpi->sf; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - const struct segmentation *const seg = &cm->seg; - MV_REFERENCE_FRAME ref_frame, second_ref_frame; - unsigned char segment_id = mbmi->segment_id; - int comp_pred, i; - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES]; - struct buf_2d yv12_mb[4][MAX_MB_PLANE]; - static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, - VP9_ALT_FLAG }; - int64_t best_rd = best_rd_so_far; - int64_t best_yrd = best_rd_so_far; // FIXME(rbultje) more precise - int64_t best_pred_diff[REFERENCE_MODES]; - int64_t best_pred_rd[REFERENCE_MODES]; - int64_t best_filter_rd[SWITCHABLE_FILTER_CONTEXTS]; - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS]; - MB_MODE_INFO best_mbmode; - int ref_index, best_ref_index = 0; - unsigned int ref_costs_single[MAX_REF_FRAMES], ref_costs_comp[MAX_REF_FRAMES]; - vpx_prob comp_mode_p; - INTERP_FILTER tmp_best_filter = SWITCHABLE; - int rate_uv_intra, rate_uv_tokenonly; - int64_t dist_uv; - int skip_uv; - PREDICTION_MODE mode_uv = DC_PRED; - const int intra_cost_penalty = vp10_get_intra_cost_penalty( - cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth); - int_mv seg_mvs[4][MAX_REF_FRAMES]; - b_mode_info best_bmodes[4]; - int best_skip2 = 0; - int ref_frame_skip_mask[2] = { 0 }; - int64_t mask_filter = 0; - int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS]; - int internal_active_edge = - vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi); - - memset(x->zcoeff_blk[TX_4X4], 0, 4); - vp10_zero(best_mbmode); - - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; - - for (i = 0; i < 4; i++) { - int j; - for (j = 0; j < MAX_REF_FRAMES; j++) - seg_mvs[i][j].as_int = INVALID_MV; - } - - estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, - &comp_mode_p); - - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = INT64_MAX; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = INT64_MAX; - rate_uv_intra = INT_MAX; - - rd_cost->rate = INT_MAX; - - for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { - if (cpi->ref_frame_flags & flag_list[ref_frame]) { - setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb); - } else { - ref_frame_skip_mask[0] |= (1 << ref_frame); - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - } - frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; - frame_mv[ZEROMV][ref_frame].as_int = 0; - } - - for (ref_index = 0; ref_index < MAX_REFS; ++ref_index) { - int mode_excluded = 0; - int64_t this_rd = INT64_MAX; - int disable_skip = 0; - int compmode_cost = 0; - int rate2 = 0, rate_y = 0, rate_uv = 0; - int64_t distortion2 = 0, distortion_y = 0, distortion_uv = 0; - int skippable = 0; - int i; - int this_skip2 = 0; - int64_t total_sse = INT_MAX; - int early_term = 0; - - ref_frame = vp10_ref_order[ref_index].ref_frame[0]; - second_ref_frame = vp10_ref_order[ref_index].ref_frame[1]; - - // Look at the reference frame of the best mode so far and set the - // skip mask to look at a subset of the remaining modes. - if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) { - if (ref_index == 3) { - switch (best_mbmode.ref_frame[0]) { - case INTRA_FRAME: - break; - case LAST_FRAME: - ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME); - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - break; - case GOLDEN_FRAME: - ref_frame_skip_mask[0] |= (1 << LAST_FRAME) | (1 << ALTREF_FRAME); - ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; - break; - case ALTREF_FRAME: - ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME); - break; - case NONE: - case MAX_REF_FRAMES: - assert(0 && "Invalid Reference frame"); - break; - } - } - } - - if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) - continue; - - // Test best rd so far against threshold for trying this mode. - if (!internal_active_edge && - rd_less_than_thresh(best_rd, - rd_opt->threshes[segment_id][bsize][ref_index], - tile_data->thresh_freq_fact[bsize][ref_index])) - continue; - - comp_pred = second_ref_frame > INTRA_FRAME; - if (comp_pred) { - if (!cpi->allow_comp_inter_inter) - continue; - if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) - continue; - // Do not allow compound prediction if the segment level reference frame - // feature is in use as in this case there can only be one reference. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - continue; - - if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) && - best_mbmode.ref_frame[0] == INTRA_FRAME) - continue; - } - - // TODO(jingning, jkoleszar): scaling reference frame not supported for - // sub8x8 blocks. - if (ref_frame > INTRA_FRAME && - vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf)) - continue; - - if (second_ref_frame > INTRA_FRAME && - vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf)) - continue; - - if (comp_pred) - mode_excluded = cm->reference_mode == SINGLE_REFERENCE; - else if (ref_frame != INTRA_FRAME) - mode_excluded = cm->reference_mode == COMPOUND_REFERENCE; - - // If the segment reference frame feature is enabled.... - // then do nothing if the current ref frame is not allowed.. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && - get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) { - continue; - // Disable this drop out case if the ref frame - // segment level feature is enabled for this segment. This is to - // prevent the possibility that we end up unable to pick any mode. - } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) { - // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, - // unless ARNR filtering is enabled in which case we want - // an unfiltered alternative. We allow near/nearest as well - // because they may result in zero-zero MVs but be cheaper. - if (cpi->rc.is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) - continue; - } - - mbmi->tx_size = TX_4X4; - mbmi->uv_mode = DC_PRED; - mbmi->ref_frame[0] = ref_frame; - mbmi->ref_frame[1] = second_ref_frame; - // Evaluate all sub-pel filters irrespective of whether we can use - // them for this frame. - mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP - : cm->interp_filter; - x->skip = 0; - set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); - - // Select prediction reference frames. - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].pre[0] = yv12_mb[ref_frame][i]; - if (comp_pred) - xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; - } - - if (ref_frame == INTRA_FRAME) { - int rate; - if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, - &distortion_y, best_rd) >= best_rd) - continue; - rate2 += rate; - rate2 += intra_cost_penalty; - distortion2 += distortion_y; - - if (rate_uv_intra == INT_MAX) { - choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, - &rate_uv_intra, - &rate_uv_tokenonly, - &dist_uv, &skip_uv, - &mode_uv); - } - rate2 += rate_uv_intra; - rate_uv = rate_uv_tokenonly; - distortion2 += dist_uv; - distortion_uv = dist_uv; - mbmi->uv_mode = mode_uv; - } else { - int rate; - int64_t distortion; - int64_t this_rd_thresh; - int64_t tmp_rd, tmp_best_rd = INT64_MAX, tmp_best_rdu = INT64_MAX; - int tmp_best_rate = INT_MAX, tmp_best_ratey = INT_MAX; - int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse; - int tmp_best_skippable = 0; - int switchable_filter_index; - int_mv *second_ref = comp_pred ? - &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL; - b_mode_info tmp_best_bmodes[16]; - MB_MODE_INFO tmp_best_mbmode; - BEST_SEG_INFO bsi[SWITCHABLE_FILTERS]; - int pred_exists = 0; - int uv_skippable; - - this_rd_thresh = (ref_frame == LAST_FRAME) ? - rd_opt->threshes[segment_id][bsize][THR_LAST] : - rd_opt->threshes[segment_id][bsize][THR_ALTR]; - this_rd_thresh = (ref_frame == GOLDEN_FRAME) ? - rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; - - // TODO(any): Add search of the tx_type to improve rd performance at the - // expense of speed. - mbmi->tx_type = DCT_DCT; - - if (cm->interp_filter != BILINEAR) { - tmp_best_filter = EIGHTTAP; - if (x->source_variance < sf->disable_filter_search_var_thresh) { - tmp_best_filter = EIGHTTAP; - } else if (sf->adaptive_pred_interp_filter == 1 && - ctx->pred_interp_filter < SWITCHABLE) { - tmp_best_filter = ctx->pred_interp_filter; - } else if (sf->adaptive_pred_interp_filter == 2) { - tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ? - ctx->pred_interp_filter : 0; - } else { - for (switchable_filter_index = 0; - switchable_filter_index < SWITCHABLE_FILTERS; - ++switchable_filter_index) { - int newbest, rs; - int64_t rs_rd; - MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext; - mbmi->interp_filter = switchable_filter_index; - tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &mbmi_ext->ref_mvs[ref_frame][0], - second_ref, best_yrd, &rate, - &rate_y, &distortion, - &skippable, &total_sse, - (int) this_rd_thresh, seg_mvs, - bsi, switchable_filter_index, - mi_row, mi_col); - - if (tmp_rd == INT64_MAX) - continue; - rs = vp10_get_switchable_rate(cpi, xd); - rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); - filter_cache[switchable_filter_index] = tmp_rd; - filter_cache[SWITCHABLE_FILTERS] = - VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - tmp_rd += rs_rd; - - mask_filter = VPXMAX(mask_filter, tmp_rd); - - newbest = (tmp_rd < tmp_best_rd); - if (newbest) { - tmp_best_filter = mbmi->interp_filter; - tmp_best_rd = tmp_rd; - } - if ((newbest && cm->interp_filter == SWITCHABLE) || - (mbmi->interp_filter == cm->interp_filter && - cm->interp_filter != SWITCHABLE)) { - tmp_best_rdu = tmp_rd; - tmp_best_rate = rate; - tmp_best_ratey = rate_y; - tmp_best_distortion = distortion; - tmp_best_sse = total_sse; - tmp_best_skippable = skippable; - tmp_best_mbmode = *mbmi; - for (i = 0; i < 4; i++) { - tmp_best_bmodes[i] = xd->mi[0]->bmi[i]; - x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i]; - } - pred_exists = 1; - if (switchable_filter_index == 0 && - sf->use_rd_breakout && - best_rd < INT64_MAX) { - if (tmp_best_rdu / 2 > best_rd) { - // skip searching the other filters if the first is - // already substantially larger than the best so far - tmp_best_filter = mbmi->interp_filter; - tmp_best_rdu = INT64_MAX; - break; - } - } - } - } // switchable_filter_index loop - } - } - - if (tmp_best_rdu == INT64_MAX && pred_exists) - continue; - - mbmi->interp_filter = (cm->interp_filter == SWITCHABLE ? - tmp_best_filter : cm->interp_filter); - if (!pred_exists) { - // Handles the special case when a filter that is not in the - // switchable list (bilinear, 6-tap) is indicated at the frame level - tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &x->mbmi_ext->ref_mvs[ref_frame][0], - second_ref, best_yrd, &rate, &rate_y, - &distortion, &skippable, &total_sse, - (int) this_rd_thresh, seg_mvs, bsi, 0, - mi_row, mi_col); - if (tmp_rd == INT64_MAX) - continue; - } else { - total_sse = tmp_best_sse; - rate = tmp_best_rate; - rate_y = tmp_best_ratey; - distortion = tmp_best_distortion; - skippable = tmp_best_skippable; - *mbmi = tmp_best_mbmode; - for (i = 0; i < 4; i++) - xd->mi[0]->bmi[i] = tmp_best_bmodes[i]; - } - - rate2 += rate; - distortion2 += distortion; - - if (cm->interp_filter == SWITCHABLE) - rate2 += vp10_get_switchable_rate(cpi, xd); - - if (!mode_excluded) - mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE - : cm->reference_mode == COMPOUND_REFERENCE; - - compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred); - - tmp_best_rdu = best_rd - - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2), - RDCOST(x->rdmult, x->rddiv, 0, total_sse)); - - if (tmp_best_rdu > 0) { - // If even the 'Y' rd value of split is higher than best so far - // then dont bother looking at UV - vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, - BLOCK_8X8); - memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); - if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable, - &uv_sse, BLOCK_8X8, tmp_best_rdu)) - continue; - - rate2 += rate_uv; - distortion2 += distortion_uv; - skippable = skippable && uv_skippable; - total_sse += uv_sse; - } - } - - if (cm->reference_mode == REFERENCE_MODE_SELECT) - rate2 += compmode_cost; - - // Estimate the reference frame signaling cost and add it - // to the rolling cost variable. - if (second_ref_frame > INTRA_FRAME) { - rate2 += ref_costs_comp[ref_frame]; - } else { - rate2 += ref_costs_single[ref_frame]; - } - - if (!disable_skip) { - // Skip is never coded at the segment level for sub8x8 blocks and instead - // always coded in the bitstream at the mode info level. - - if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) { - if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) < - RDCOST(x->rdmult, x->rddiv, 0, total_sse)) { - // Add in the cost of the no skip flag. - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0); - } else { - // FIXME(rbultje) make this work for splitmv also - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1); - distortion2 = total_sse; - assert(total_sse >= 0); - rate2 -= (rate_y + rate_uv); - rate_y = 0; - rate_uv = 0; - this_skip2 = 1; - } - } else { - // Add in the cost of the no skip flag. - rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0); - } - - // Calculate the final RD estimate for this mode. - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); - } - - if (!disable_skip && ref_frame == INTRA_FRAME) { - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); - } - - // Did this mode help.. i.e. is it the new best mode - if (this_rd < best_rd || x->skip) { - if (!mode_excluded) { - int max_plane = MAX_MB_PLANE; - // Note index of best mode so far - best_ref_index = ref_index; - - if (ref_frame == INTRA_FRAME) { - /* required for left and above block mv */ - mbmi->mv[0].as_int = 0; - max_plane = 1; - } - - rd_cost->rate = rate2; - rd_cost->dist = distortion2; - rd_cost->rdcost = this_rd; - best_rd = this_rd; - best_yrd = best_rd - - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv); - best_mbmode = *mbmi; - best_skip2 = this_skip2; - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, max_plane); - memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4], - sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); - - for (i = 0; i < 4; i++) - best_bmodes[i] = xd->mi[0]->bmi[i]; - - // TODO(debargha): enhance this test with a better distortion prediction - // based on qp, activity mask and history - if ((sf->mode_search_skip_flags & FLAG_EARLY_TERMINATE) && - (ref_index > MIN_EARLY_TERM_INDEX)) { - int qstep = xd->plane[0].dequant[1]; - // TODO(debargha): Enhance this by specializing for each mode_index - int scale = 4; -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - qstep >>= (xd->bd - 8); - } -#endif // CONFIG_VP9_HIGHBITDEPTH - if (x->source_variance < UINT_MAX) { - const int var_adjust = (x->source_variance < 16); - scale -= var_adjust; - } - if (ref_frame > INTRA_FRAME && - distortion2 * scale < qstep * qstep) { - early_term = 1; - } - } - } - } - - /* keep record of best compound/single-only prediction */ - if (!disable_skip && ref_frame != INTRA_FRAME) { - int64_t single_rd, hybrid_rd, single_rate, hybrid_rate; - - if (cm->reference_mode == REFERENCE_MODE_SELECT) { - single_rate = rate2 - compmode_cost; - hybrid_rate = rate2; - } else { - single_rate = rate2; - hybrid_rate = rate2 + compmode_cost; - } - - single_rd = RDCOST(x->rdmult, x->rddiv, single_rate, distortion2); - hybrid_rd = RDCOST(x->rdmult, x->rddiv, hybrid_rate, distortion2); - - if (!comp_pred && single_rd < best_pred_rd[SINGLE_REFERENCE]) - best_pred_rd[SINGLE_REFERENCE] = single_rd; - else if (comp_pred && single_rd < best_pred_rd[COMPOUND_REFERENCE]) - best_pred_rd[COMPOUND_REFERENCE] = single_rd; - - if (hybrid_rd < best_pred_rd[REFERENCE_MODE_SELECT]) - best_pred_rd[REFERENCE_MODE_SELECT] = hybrid_rd; - } - - /* keep record of best filter type */ - if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME && - cm->interp_filter != BILINEAR) { - int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->interp_filter]; - int64_t adj_rd; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - if (ref == INT64_MAX) - adj_rd = 0; - else if (filter_cache[i] == INT64_MAX) - // when early termination is triggered, the encoder does not have - // access to the rate-distortion cost. it only knows that the cost - // should be above the maximum valid value. hence it takes the known - // maximum plus an arbitrary constant as the rate-distortion cost. - adj_rd = mask_filter - ref + 10; - else - adj_rd = filter_cache[i] - ref; - - adj_rd += this_rd; - best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); - } - } - - if (early_term) - break; - - if (x->skip && !comp_pred) - break; - } - - if (best_rd >= best_rd_so_far) { - rd_cost->rate = INT_MAX; - rd_cost->rdcost = INT64_MAX; - return; - } - - // If we used an estimate for the uv intra rd in the loop above... - if (sf->use_uv_intra_rd_estimate) { - // Do Intra UV best rd mode selection if best mode choice above was intra. - if (best_mbmode.ref_frame[0] == INTRA_FRAME) { - *mbmi = best_mbmode; - rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, - &rate_uv_tokenonly, - &dist_uv, - &skip_uv, - BLOCK_8X8, TX_4X4); - } - } - - if (best_rd == INT64_MAX) { - rd_cost->rate = INT_MAX; - rd_cost->dist = INT64_MAX; - rd_cost->rdcost = INT64_MAX; - return; - } - - assert((cm->interp_filter == SWITCHABLE) || - (cm->interp_filter == best_mbmode.interp_filter) || - !is_inter_block(&best_mbmode)); - - vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact, - sf->adaptive_rd_thresh, bsize, best_ref_index); - - // macroblock modes - *mbmi = best_mbmode; - x->skip |= best_skip2; - if (!is_inter_block(&best_mbmode)) { - for (i = 0; i < 4; i++) - xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode; - } else { - for (i = 0; i < 4; ++i) - memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info)); - - mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int; - mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int; - } - - for (i = 0; i < REFERENCE_MODES; ++i) { - if (best_pred_rd[i] == INT64_MAX) - best_pred_diff[i] = INT_MIN; - else - best_pred_diff[i] = best_rd - best_pred_rd[i]; - } - - if (!x->skip) { - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { - if (best_filter_rd[i] == INT64_MAX) - best_filter_diff[i] = 0; - else - best_filter_diff[i] = best_rd - best_filter_rd[i]; - } - if (cm->interp_filter == SWITCHABLE) - assert(best_filter_diff[SWITCHABLE_FILTERS] == 0); - } else { - vp10_zero(best_filter_diff); - } - - store_coding_context(x, ctx, best_ref_index, - best_pred_diff, best_filter_diff, 0); -} diff --git a/libs/libvpx/vp10/encoder/rdopt.h b/libs/libvpx/vp10/encoder/rdopt.h deleted file mode 100644 index b1a8036279..0000000000 --- a/libs/libvpx/vp10/encoder/rdopt.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_RDOPT_H_ -#define VP10_ENCODER_RDOPT_H_ - -#include "vp10/common/blockd.h" - -#include "vp10/encoder/block.h" -#include "vp10/encoder/context_tree.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct TileInfo; -struct VP10_COMP; -struct macroblock; -struct RD_COST; - -void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x, - struct RD_COST *rd_cost, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, int64_t best_rd); - -unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi, - const struct buf_2d *ref, - BLOCK_SIZE bs); -#if CONFIG_VP9_HIGHBITDEPTH -unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi, - const struct buf_2d *ref, - BLOCK_SIZE bs, int bd); -#endif - -void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi, - struct TileDataEnc *tile_data, - struct macroblock *x, - int mi_row, int mi_col, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); - -void vp10_rd_pick_inter_mode_sb_seg_skip(struct VP10_COMP *cpi, - struct TileDataEnc *tile_data, - struct macroblock *x, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); - -int vp10_internal_image_edge(struct VP10_COMP *cpi); -int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step); -int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step); -int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col); - -void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi, - struct TileDataEnc *tile_data, - struct macroblock *x, - int mi_row, int mi_col, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_RDOPT_H_ diff --git a/libs/libvpx/vp10/encoder/resize.c b/libs/libvpx/vp10/encoder/resize.c deleted file mode 100644 index 5572c17ad7..0000000000 --- a/libs/libvpx/vp10/encoder/resize.c +++ /dev/null @@ -1,928 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include -#include -#include -#include - -#if CONFIG_VP9_HIGHBITDEPTH -#include "vpx_dsp/vpx_dsp_common.h" -#endif // CONFIG_VP9_HIGHBITDEPTH -#include "vpx_ports/mem.h" -#include "vp10/common/common.h" -#include "vp10/encoder/resize.h" - -#define FILTER_BITS 7 - -#define INTERP_TAPS 8 -#define SUBPEL_BITS 5 -#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) -#define INTERP_PRECISION_BITS 32 - -typedef int16_t interp_kernel[INTERP_TAPS]; - -// Filters for interpolation (0.5-band) - note this also filters integer pels. -static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = { - {-3, 0, 35, 64, 35, 0, -3, 0}, - {-3, -1, 34, 64, 36, 1, -3, 0}, - {-3, -1, 32, 64, 38, 1, -3, 0}, - {-2, -2, 31, 63, 39, 2, -3, 0}, - {-2, -2, 29, 63, 41, 2, -3, 0}, - {-2, -2, 28, 63, 42, 3, -4, 0}, - {-2, -3, 27, 63, 43, 4, -4, 0}, - {-2, -3, 25, 62, 45, 5, -4, 0}, - {-2, -3, 24, 62, 46, 5, -4, 0}, - {-2, -3, 23, 61, 47, 6, -4, 0}, - {-2, -3, 21, 60, 49, 7, -4, 0}, - {-1, -4, 20, 60, 50, 8, -4, -1}, - {-1, -4, 19, 59, 51, 9, -4, -1}, - {-1, -4, 17, 58, 52, 10, -4, 0}, - {-1, -4, 16, 57, 53, 12, -4, -1}, - {-1, -4, 15, 56, 54, 13, -4, -1}, - {-1, -4, 14, 55, 55, 14, -4, -1}, - {-1, -4, 13, 54, 56, 15, -4, -1}, - {-1, -4, 12, 53, 57, 16, -4, -1}, - {0, -4, 10, 52, 58, 17, -4, -1}, - {-1, -4, 9, 51, 59, 19, -4, -1}, - {-1, -4, 8, 50, 60, 20, -4, -1}, - {0, -4, 7, 49, 60, 21, -3, -2}, - {0, -4, 6, 47, 61, 23, -3, -2}, - {0, -4, 5, 46, 62, 24, -3, -2}, - {0, -4, 5, 45, 62, 25, -3, -2}, - {0, -4, 4, 43, 63, 27, -3, -2}, - {0, -4, 3, 42, 63, 28, -2, -2}, - {0, -3, 2, 41, 63, 29, -2, -2}, - {0, -3, 2, 39, 63, 31, -2, -2}, - {0, -3, 1, 38, 64, 32, -1, -3}, - {0, -3, 1, 36, 64, 34, -1, -3} -}; - -// Filters for interpolation (0.625-band) - note this also filters integer pels. -static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = { - {-1, -8, 33, 80, 33, -8, -1, 0}, - {-1, -8, 30, 80, 35, -8, -1, 1}, - {-1, -8, 28, 80, 37, -7, -2, 1}, - {0, -8, 26, 79, 39, -7, -2, 1}, - {0, -8, 24, 79, 41, -7, -2, 1}, - {0, -8, 22, 78, 43, -6, -2, 1}, - {0, -8, 20, 78, 45, -5, -3, 1}, - {0, -8, 18, 77, 48, -5, -3, 1}, - {0, -8, 16, 76, 50, -4, -3, 1}, - {0, -8, 15, 75, 52, -3, -4, 1}, - {0, -7, 13, 74, 54, -3, -4, 1}, - {0, -7, 11, 73, 56, -2, -4, 1}, - {0, -7, 10, 71, 58, -1, -4, 1}, - {1, -7, 8, 70, 60, 0, -5, 1}, - {1, -6, 6, 68, 62, 1, -5, 1}, - {1, -6, 5, 67, 63, 2, -5, 1}, - {1, -6, 4, 65, 65, 4, -6, 1}, - {1, -5, 2, 63, 67, 5, -6, 1}, - {1, -5, 1, 62, 68, 6, -6, 1}, - {1, -5, 0, 60, 70, 8, -7, 1}, - {1, -4, -1, 58, 71, 10, -7, 0}, - {1, -4, -2, 56, 73, 11, -7, 0}, - {1, -4, -3, 54, 74, 13, -7, 0}, - {1, -4, -3, 52, 75, 15, -8, 0}, - {1, -3, -4, 50, 76, 16, -8, 0}, - {1, -3, -5, 48, 77, 18, -8, 0}, - {1, -3, -5, 45, 78, 20, -8, 0}, - {1, -2, -6, 43, 78, 22, -8, 0}, - {1, -2, -7, 41, 79, 24, -8, 0}, - {1, -2, -7, 39, 79, 26, -8, 0}, - {1, -2, -7, 37, 80, 28, -8, -1}, - {1, -1, -8, 35, 80, 30, -8, -1}, -}; - -// Filters for interpolation (0.75-band) - note this also filters integer pels. -static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = { - {2, -11, 25, 96, 25, -11, 2, 0}, - {2, -11, 22, 96, 28, -11, 2, 0}, - {2, -10, 19, 95, 31, -11, 2, 0}, - {2, -10, 17, 95, 34, -12, 2, 0}, - {2, -9, 14, 94, 37, -12, 2, 0}, - {2, -8, 12, 93, 40, -12, 1, 0}, - {2, -8, 9, 92, 43, -12, 1, 1}, - {2, -7, 7, 91, 46, -12, 1, 0}, - {2, -7, 5, 90, 49, -12, 1, 0}, - {2, -6, 3, 88, 52, -12, 0, 1}, - {2, -5, 1, 86, 55, -12, 0, 1}, - {2, -5, -1, 84, 58, -11, 0, 1}, - {2, -4, -2, 82, 61, -11, -1, 1}, - {2, -4, -4, 80, 64, -10, -1, 1}, - {1, -3, -5, 77, 67, -9, -1, 1}, - {1, -3, -6, 75, 70, -8, -2, 1}, - {1, -2, -7, 72, 72, -7, -2, 1}, - {1, -2, -8, 70, 75, -6, -3, 1}, - {1, -1, -9, 67, 77, -5, -3, 1}, - {1, -1, -10, 64, 80, -4, -4, 2}, - {1, -1, -11, 61, 82, -2, -4, 2}, - {1, 0, -11, 58, 84, -1, -5, 2}, - {1, 0, -12, 55, 86, 1, -5, 2}, - {1, 0, -12, 52, 88, 3, -6, 2}, - {0, 1, -12, 49, 90, 5, -7, 2}, - {0, 1, -12, 46, 91, 7, -7, 2}, - {1, 1, -12, 43, 92, 9, -8, 2}, - {0, 1, -12, 40, 93, 12, -8, 2}, - {0, 2, -12, 37, 94, 14, -9, 2}, - {0, 2, -12, 34, 95, 17, -10, 2}, - {0, 2, -11, 31, 95, 19, -10, 2}, - {0, 2, -11, 28, 96, 22, -11, 2} -}; - -// Filters for interpolation (0.875-band) - note this also filters integer pels. -static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = { - {3, -8, 13, 112, 13, -8, 3, 0}, - {3, -7, 10, 112, 17, -9, 3, -1}, - {2, -6, 7, 111, 21, -9, 3, -1}, - {2, -5, 4, 111, 24, -10, 3, -1}, - {2, -4, 1, 110, 28, -11, 3, -1}, - {1, -3, -1, 108, 32, -12, 4, -1}, - {1, -2, -3, 106, 36, -13, 4, -1}, - {1, -1, -6, 105, 40, -14, 4, -1}, - {1, -1, -7, 102, 44, -14, 4, -1}, - {1, 0, -9, 100, 48, -15, 4, -1}, - {1, 1, -11, 97, 53, -16, 4, -1}, - {0, 1, -12, 95, 57, -16, 4, -1}, - {0, 2, -13, 91, 61, -16, 4, -1}, - {0, 2, -14, 88, 65, -16, 4, -1}, - {0, 3, -15, 84, 69, -17, 4, 0}, - {0, 3, -16, 81, 73, -16, 3, 0}, - {0, 3, -16, 77, 77, -16, 3, 0}, - {0, 3, -16, 73, 81, -16, 3, 0}, - {0, 4, -17, 69, 84, -15, 3, 0}, - {-1, 4, -16, 65, 88, -14, 2, 0}, - {-1, 4, -16, 61, 91, -13, 2, 0}, - {-1, 4, -16, 57, 95, -12, 1, 0}, - {-1, 4, -16, 53, 97, -11, 1, 1}, - {-1, 4, -15, 48, 100, -9, 0, 1}, - {-1, 4, -14, 44, 102, -7, -1, 1}, - {-1, 4, -14, 40, 105, -6, -1, 1}, - {-1, 4, -13, 36, 106, -3, -2, 1}, - {-1, 4, -12, 32, 108, -1, -3, 1}, - {-1, 3, -11, 28, 110, 1, -4, 2}, - {-1, 3, -10, 24, 111, 4, -5, 2}, - {-1, 3, -9, 21, 111, 7, -6, 2}, - {-1, 3, -9, 17, 112, 10, -7, 3} -}; - -// Filters for interpolation (full-band) - no filtering for integer pixels -static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = { - {0, 0, 0, 128, 0, 0, 0, 0}, - {0, 1, -3, 128, 3, -1, 0, 0}, - {-1, 2, -6, 127, 7, -2, 1, 0}, - {-1, 3, -9, 126, 12, -4, 1, 0}, - {-1, 4, -12, 125, 16, -5, 1, 0}, - {-1, 4, -14, 123, 20, -6, 2, 0}, - {-1, 5, -15, 120, 25, -8, 2, 0}, - {-1, 5, -17, 118, 30, -9, 3, -1}, - {-1, 6, -18, 114, 35, -10, 3, -1}, - {-1, 6, -19, 111, 41, -12, 3, -1}, - {-1, 6, -20, 107, 46, -13, 4, -1}, - {-1, 6, -21, 103, 52, -14, 4, -1}, - {-1, 6, -21, 99, 57, -16, 5, -1}, - {-1, 6, -21, 94, 63, -17, 5, -1}, - {-1, 6, -20, 89, 68, -18, 5, -1}, - {-1, 6, -20, 84, 73, -19, 6, -1}, - {-1, 6, -20, 79, 79, -20, 6, -1}, - {-1, 6, -19, 73, 84, -20, 6, -1}, - {-1, 5, -18, 68, 89, -20, 6, -1}, - {-1, 5, -17, 63, 94, -21, 6, -1}, - {-1, 5, -16, 57, 99, -21, 6, -1}, - {-1, 4, -14, 52, 103, -21, 6, -1}, - {-1, 4, -13, 46, 107, -20, 6, -1}, - {-1, 3, -12, 41, 111, -19, 6, -1}, - {-1, 3, -10, 35, 114, -18, 6, -1}, - {-1, 3, -9, 30, 118, -17, 5, -1}, - {0, 2, -8, 25, 120, -15, 5, -1}, - {0, 2, -6, 20, 123, -14, 4, -1}, - {0, 1, -5, 16, 125, -12, 4, -1}, - {0, 1, -4, 12, 126, -9, 3, -1}, - {0, 1, -2, 7, 127, -6, 2, -1}, - {0, 0, -1, 3, 128, -3, 1, 0} -}; - -// Filters for factor of 2 downsampling. -static const int16_t vp10_down2_symeven_half_filter[] = {56, 12, -3, -1}; -static const int16_t vp10_down2_symodd_half_filter[] = {64, 35, 0, -3}; - -static const interp_kernel *choose_interp_filter(int inlength, int outlength) { - int outlength16 = outlength * 16; - if (outlength16 >= inlength * 16) - return filteredinterp_filters1000; - else if (outlength16 >= inlength * 13) - return filteredinterp_filters875; - else if (outlength16 >= inlength * 11) - return filteredinterp_filters750; - else if (outlength16 >= inlength * 9) - return filteredinterp_filters625; - else - return filteredinterp_filters500; -} - -static void interpolate(const uint8_t *const input, int inlength, - uint8_t *output, int outlength) { - const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) / - outlength; - const int64_t offset = inlength > outlength ? - (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength : - -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength; - uint8_t *optr = output; - int x, x1, x2, sum, k, int_pel, sub_pel; - int64_t y; - - const interp_kernel *interp_filters = - choose_interp_filter(inlength, outlength); - - x = 0; - y = offset; - while ((y >> INTERP_PRECISION_BITS) < (INTERP_TAPS / 2 - 1)) { - x++; - y += delta; - } - x1 = x; - x = outlength - 1; - y = delta * x + offset; - while ((y >> INTERP_PRECISION_BITS) + - (int64_t)(INTERP_TAPS / 2) >= inlength) { - x--; - y -= delta; - } - x2 = x; - if (x1 > x2) { - for (x = 0, y = offset; x < outlength; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) { - const int pk = int_pel - INTERP_TAPS / 2 + 1 + k; - sum += filter[k] * input[(pk < 0 ? 0 : - (pk >= inlength ? inlength - 1 : pk))]; - } - *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - } - } else { - // Initial part. - for (x = 0, y = offset; x < x1; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ? - 0 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; - *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - } - // Middle part. - for (; x <= x2; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[int_pel - INTERP_TAPS / 2 + 1 + k]; - *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - } - // End part. - for (; x < outlength; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= - inlength ? inlength - 1 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; - *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); - } - } -} - -static void down2_symeven(const uint8_t *const input, int length, - uint8_t *output) { - // Actual filter len = 2 * filter_len_half. - const int16_t *filter = vp10_down2_symeven_half_filter; - const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2; - int i, j; - uint8_t *optr = output; - int l1 = filter_len_half; - int l2 = (length - filter_len_half); - l1 += (l1 & 1); - l2 += (l2 & 1); - if (l1 > l2) { - // Short input length. - for (i = 0; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + - input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - } else { - // Initial part. - for (i = 0; i < l1; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + 1 + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - // Middle part. - for (; i < l2; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[i - j] + input[i + 1 + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - // End part. - for (; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[i - j] + - input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - } -} - -static void down2_symodd(const uint8_t *const input, int length, - uint8_t *output) { - // Actual filter len = 2 * filter_len_half - 1. - const int16_t *filter = vp10_down2_symodd_half_filter; - const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2; - int i, j; - uint8_t *optr = output; - int l1 = filter_len_half - 1; - int l2 = (length - filter_len_half + 1); - l1 += (l1 & 1); - l2 += (l2 & 1); - if (l1 > l2) { - // Short input length. - for (i = 0; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + - input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - } else { - // Initial part. - for (i = 0; i < l1; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - // Middle part. - for (; i < l2; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[i - j] + input[i + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - // End part. - for (; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel(sum); - } - } -} - -static int get_down2_length(int length, int steps) { - int s; - for (s = 0; s < steps; ++s) - length = (length + 1) >> 1; - return length; -} - -static int get_down2_steps(int in_length, int out_length) { - int steps = 0; - int proj_in_length; - while ((proj_in_length = get_down2_length(in_length, 1)) >= out_length) { - ++steps; - in_length = proj_in_length; - } - return steps; -} - -static void resize_multistep(const uint8_t *const input, - int length, - uint8_t *output, - int olength, - uint8_t *buf) { - int steps; - if (length == olength) { - memcpy(output, input, sizeof(output[0]) * length); - return; - } - steps = get_down2_steps(length, olength); - - if (steps > 0) { - int s; - uint8_t *out = NULL; - uint8_t *tmpbuf = NULL; - uint8_t *otmp, *otmp2; - int filteredlength = length; - if (!tmpbuf) { - tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * length); - otmp = tmpbuf; - } else { - otmp = buf; - } - otmp2 = otmp + get_down2_length(length, 1); - for (s = 0; s < steps; ++s) { - const int proj_filteredlength = get_down2_length(filteredlength, 1); - const uint8_t *const in = (s == 0 ? input : out); - if (s == steps - 1 && proj_filteredlength == olength) - out = output; - else - out = (s & 1 ? otmp2 : otmp); - if (filteredlength & 1) - down2_symodd(in, filteredlength, out); - else - down2_symeven(in, filteredlength, out); - filteredlength = proj_filteredlength; - } - if (filteredlength != olength) { - interpolate(out, filteredlength, output, olength); - } - if (tmpbuf) - free(tmpbuf); - } else { - interpolate(input, length, output, olength); - } -} - -static void fill_col_to_arr(uint8_t *img, int stride, int len, uint8_t *arr) { - int i; - uint8_t *iptr = img; - uint8_t *aptr = arr; - for (i = 0; i < len; ++i, iptr += stride) { - *aptr++ = *iptr; - } -} - -static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) { - int i; - uint8_t *iptr = img; - uint8_t *aptr = arr; - for (i = 0; i < len; ++i, iptr += stride) { - *iptr = *aptr++; - } -} - -void vp10_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride) { - int i; - uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height); - uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * - (width < height ? height : width)); - uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * (height + height2)); - assert(width > 0); - assert(height > 0); - assert(width2 > 0); - assert(height2 > 0); - for (i = 0; i < height; ++i) - resize_multistep(input + in_stride * i, width, - intbuf + width2 * i, width2, tmpbuf); - for (i = 0; i < width2; ++i) { - fill_col_to_arr(intbuf + i, width2, height, arrbuf); - resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf); - fill_arr_to_col(output + i, out_stride, height2, arrbuf + height); - } - free(intbuf); - free(tmpbuf); - free(arrbuf); -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void highbd_interpolate(const uint16_t *const input, int inlength, - uint16_t *output, int outlength, int bd) { - const int64_t delta = - (((uint64_t)inlength << 32) + outlength / 2) / outlength; - const int64_t offset = inlength > outlength ? - (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength : - -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength; - uint16_t *optr = output; - int x, x1, x2, sum, k, int_pel, sub_pel; - int64_t y; - - const interp_kernel *interp_filters = - choose_interp_filter(inlength, outlength); - - x = 0; - y = offset; - while ((y >> INTERP_PRECISION_BITS) < (INTERP_TAPS / 2 - 1)) { - x++; - y += delta; - } - x1 = x; - x = outlength - 1; - y = delta * x + offset; - while ((y >> INTERP_PRECISION_BITS) + - (int64_t)(INTERP_TAPS / 2) >= inlength) { - x--; - y -= delta; - } - x2 = x; - if (x1 > x2) { - for (x = 0, y = offset; x < outlength; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) { - const int pk = int_pel - INTERP_TAPS / 2 + 1 + k; - sum += filter[k] * - input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))]; - } - *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); - } - } else { - // Initial part. - for (x = 0, y = offset; x < x1; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * - input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ? - 0 : int_pel - INTERP_TAPS / 2 + 1 + k)]; - *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); - } - // Middle part. - for (; x <= x2; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[int_pel - INTERP_TAPS / 2 + 1 + k]; - *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); - } - // End part. - for (; x < outlength; ++x, y += delta) { - const int16_t *filter; - int_pel = y >> INTERP_PRECISION_BITS; - sub_pel = (y >> (INTERP_PRECISION_BITS - SUBPEL_BITS)) & SUBPEL_MASK; - filter = interp_filters[sub_pel]; - sum = 0; - for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= - inlength ? inlength - 1 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; - *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); - } - } -} - -static void highbd_down2_symeven(const uint16_t *const input, int length, - uint16_t *output, int bd) { - // Actual filter len = 2 * filter_len_half. - static const int16_t *filter = vp10_down2_symeven_half_filter; - const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2; - int i, j; - uint16_t *optr = output; - int l1 = filter_len_half; - int l2 = (length - filter_len_half); - l1 += (l1 & 1); - l2 += (l2 & 1); - if (l1 > l2) { - // Short input length. - for (i = 0; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + - input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - } else { - // Initial part. - for (i = 0; i < l1; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + 1 + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - // Middle part. - for (; i < l2; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[i - j] + input[i + 1 + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - // End part. - for (; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)); - for (j = 0; j < filter_len_half; ++j) { - sum += (input[i - j] + - input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - } -} - -static void highbd_down2_symodd(const uint16_t *const input, int length, - uint16_t *output, int bd) { - // Actual filter len = 2 * filter_len_half - 1. - static const int16_t *filter = vp10_down2_symodd_half_filter; - const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2; - int i, j; - uint16_t *optr = output; - int l1 = filter_len_half - 1; - int l2 = (length - filter_len_half + 1); - l1 += (l1 & 1); - l2 += (l2 & 1); - if (l1 > l2) { - // Short input length. - for (i = 0; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + - input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - } else { - // Initial part. - for (i = 0; i < l1; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[(i - j < 0 ? 0 : i - j)] + input[i + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - // Middle part. - for (; i < l2; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[i - j] + input[i + j]) * filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - // End part. - for (; i < length; i += 2) { - int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; - for (j = 1; j < filter_len_half; ++j) { - sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; - } - sum >>= FILTER_BITS; - *optr++ = clip_pixel_highbd(sum, bd); - } - } -} - -static void highbd_resize_multistep(const uint16_t *const input, - int length, - uint16_t *output, - int olength, - uint16_t *buf, - int bd) { - int steps; - if (length == olength) { - memcpy(output, input, sizeof(output[0]) * length); - return; - } - steps = get_down2_steps(length, olength); - - if (steps > 0) { - int s; - uint16_t *out = NULL; - uint16_t *tmpbuf = NULL; - uint16_t *otmp, *otmp2; - int filteredlength = length; - if (!tmpbuf) { - tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) * length); - otmp = tmpbuf; - } else { - otmp = buf; - } - otmp2 = otmp + get_down2_length(length, 1); - for (s = 0; s < steps; ++s) { - const int proj_filteredlength = get_down2_length(filteredlength, 1); - const uint16_t *const in = (s == 0 ? input : out); - if (s == steps - 1 && proj_filteredlength == olength) - out = output; - else - out = (s & 1 ? otmp2 : otmp); - if (filteredlength & 1) - highbd_down2_symodd(in, filteredlength, out, bd); - else - highbd_down2_symeven(in, filteredlength, out, bd); - filteredlength = proj_filteredlength; - } - if (filteredlength != olength) { - highbd_interpolate(out, filteredlength, output, olength, bd); - } - if (tmpbuf) - free(tmpbuf); - } else { - highbd_interpolate(input, length, output, olength, bd); - } -} - -static void highbd_fill_col_to_arr(uint16_t *img, int stride, int len, - uint16_t *arr) { - int i; - uint16_t *iptr = img; - uint16_t *aptr = arr; - for (i = 0; i < len; ++i, iptr += stride) { - *aptr++ = *iptr; - } -} - -static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len, - uint16_t *arr) { - int i; - uint16_t *iptr = img; - uint16_t *aptr = arr; - for (i = 0; i < len; ++i, iptr += stride) { - *iptr = *aptr++; - } -} - -void vp10_highbd_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride, - int bd) { - int i; - uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height); - uint16_t *tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) * - (width < height ? height : width)); - uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * (height + height2)); - for (i = 0; i < height; ++i) { - highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width, - intbuf + width2 * i, width2, tmpbuf, bd); - } - for (i = 0; i < width2; ++i) { - highbd_fill_col_to_arr(intbuf + i, width2, height, arrbuf); - highbd_resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf, - bd); - highbd_fill_arr_to_col(CONVERT_TO_SHORTPTR(output + i), out_stride, height2, - arrbuf + height); - } - free(intbuf); - free(tmpbuf); - free(arrbuf); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -void vp10_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp10_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp10_resize_plane(u, height / 2, width / 2, uv_stride, - ou, oheight / 2, owidth / 2, ouv_stride); - vp10_resize_plane(v, height / 2, width / 2, uv_stride, - ov, oheight / 2, owidth / 2, ouv_stride); -} - -void vp10_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp10_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp10_resize_plane(u, height, width / 2, uv_stride, - ou, oheight, owidth / 2, ouv_stride); - vp10_resize_plane(v, height, width / 2, uv_stride, - ov, oheight, owidth / 2, ouv_stride); -} - -void vp10_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp10_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp10_resize_plane(u, height, width, uv_stride, - ou, oheight, owidth, ouv_stride); - vp10_resize_plane(v, height, width, uv_stride, - ov, oheight, owidth, ouv_stride); -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp10_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, - ou, oheight / 2, owidth / 2, ouv_stride, bd); - vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, - ov, oheight / 2, owidth / 2, ouv_stride, bd); -} - -void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp10_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp10_highbd_resize_plane(u, height, width / 2, uv_stride, - ou, oheight, owidth / 2, ouv_stride, bd); - vp10_highbd_resize_plane(v, height, width / 2, uv_stride, - ov, oheight, owidth / 2, ouv_stride, bd); -} - -void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride, - const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp10_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp10_highbd_resize_plane(u, height, width, uv_stride, - ou, oheight, owidth, ouv_stride, bd); - vp10_highbd_resize_plane(v, height, width, uv_stride, - ov, oheight, owidth, ouv_stride, bd); -} -#endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp10/encoder/resize.h b/libs/libvpx/vp10/encoder/resize.h deleted file mode 100644 index bf6377097e..0000000000 --- a/libs/libvpx/vp10/encoder/resize.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_RESIZE_H_ -#define VP10_ENCODER_RESIZE_H_ - -#include -#include "vpx/vpx_integer.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride); -void vp10_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); -void vp10_resize_frame422(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); -void vp10_resize_frame444(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride, - int bd); -void vp10_highbd_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -void vp10_highbd_resize_frame422(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -void vp10_highbd_resize_frame444(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -#endif // CONFIG_VP9_HIGHBITDEPTH - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_RESIZE_H_ diff --git a/libs/libvpx/vp10/encoder/segmentation.c b/libs/libvpx/vp10/encoder/segmentation.c deleted file mode 100644 index 677910fa37..0000000000 --- a/libs/libvpx/vp10/encoder/segmentation.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include - -#include "vpx_mem/vpx_mem.h" - -#include "vp10/common/pred_common.h" -#include "vp10/common/tile_common.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/subexp.h" - -void vp10_enable_segmentation(struct segmentation *seg) { - seg->enabled = 1; - seg->update_map = 1; - seg->update_data = 1; -} - -void vp10_disable_segmentation(struct segmentation *seg) { - seg->enabled = 0; - seg->update_map = 0; - seg->update_data = 0; -} - -void vp10_set_segment_data(struct segmentation *seg, - signed char *feature_data, - unsigned char abs_delta) { - seg->abs_delta = abs_delta; - - memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data)); -} -void vp10_disable_segfeature(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - seg->feature_mask[segment_id] &= ~(1 << feature_id); -} - -void vp10_clear_segdata(struct segmentation *seg, int segment_id, - SEG_LVL_FEATURES feature_id) { - seg->feature_data[segment_id][feature_id] = 0; -} - -// Based on set of segment counts calculate a probability tree -static void calc_segtree_probs(unsigned *segcounts, - vpx_prob *segment_tree_probs, const vpx_prob *cur_tree_probs) { - // Work out probabilities of each segment - const unsigned cc[4] = { - segcounts[0] + segcounts[1], segcounts[2] + segcounts[3], - segcounts[4] + segcounts[5], segcounts[6] + segcounts[7] - }; - const unsigned ccc[2] = { cc[0] + cc[1], cc[2] + cc[3] }; -#if CONFIG_MISC_FIXES - int i; -#endif - - segment_tree_probs[0] = get_binary_prob(ccc[0], ccc[1]); - segment_tree_probs[1] = get_binary_prob(cc[0], cc[1]); - segment_tree_probs[2] = get_binary_prob(cc[2], cc[3]); - segment_tree_probs[3] = get_binary_prob(segcounts[0], segcounts[1]); - segment_tree_probs[4] = get_binary_prob(segcounts[2], segcounts[3]); - segment_tree_probs[5] = get_binary_prob(segcounts[4], segcounts[5]); - segment_tree_probs[6] = get_binary_prob(segcounts[6], segcounts[7]); - -#if CONFIG_MISC_FIXES - for (i = 0; i < 7; i++) { - const unsigned *ct = i == 0 ? ccc : i < 3 ? cc + (i & 2) - : segcounts + (i - 3) * 2; - vp10_prob_diff_update_savings_search(ct, - cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB); - } -#else - (void) cur_tree_probs; -#endif -} - -// Based on set of segment counts and probabilities calculate a cost estimate -static int cost_segmap(unsigned *segcounts, vpx_prob *probs) { - const int c01 = segcounts[0] + segcounts[1]; - const int c23 = segcounts[2] + segcounts[3]; - const int c45 = segcounts[4] + segcounts[5]; - const int c67 = segcounts[6] + segcounts[7]; - const int c0123 = c01 + c23; - const int c4567 = c45 + c67; - - // Cost the top node of the tree - int cost = c0123 * vp10_cost_zero(probs[0]) + - c4567 * vp10_cost_one(probs[0]); - - // Cost subsequent levels - if (c0123 > 0) { - cost += c01 * vp10_cost_zero(probs[1]) + - c23 * vp10_cost_one(probs[1]); - - if (c01 > 0) - cost += segcounts[0] * vp10_cost_zero(probs[3]) + - segcounts[1] * vp10_cost_one(probs[3]); - if (c23 > 0) - cost += segcounts[2] * vp10_cost_zero(probs[4]) + - segcounts[3] * vp10_cost_one(probs[4]); - } - - if (c4567 > 0) { - cost += c45 * vp10_cost_zero(probs[2]) + - c67 * vp10_cost_one(probs[2]); - - if (c45 > 0) - cost += segcounts[4] * vp10_cost_zero(probs[5]) + - segcounts[5] * vp10_cost_one(probs[5]); - if (c67 > 0) - cost += segcounts[6] * vp10_cost_zero(probs[6]) + - segcounts[7] * vp10_cost_one(probs[6]); - } - - return cost; -} - -static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd, - const TileInfo *tile, MODE_INFO **mi, - unsigned *no_pred_segcounts, - unsigned (*temporal_predictor_count)[2], - unsigned *t_unpred_seg_counts, - int bw, int bh, int mi_row, int mi_col) { - int segment_id; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - xd->mi = mi; - segment_id = xd->mi[0]->mbmi.segment_id; - - set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); - - // Count the number of hits on each segment with no prediction - no_pred_segcounts[segment_id]++; - - // Temporal prediction not allowed on key frames - if (cm->frame_type != KEY_FRAME) { - const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; - // Test to see if the segment id matches the predicted value. - const int pred_segment_id = get_segment_id(cm, cm->last_frame_seg_map, - bsize, mi_row, mi_col); - const int pred_flag = pred_segment_id == segment_id; - const int pred_context = vp10_get_pred_context_seg_id(xd); - - // Store the prediction status for this mb and update counts - // as appropriate - xd->mi[0]->mbmi.seg_id_predicted = pred_flag; - temporal_predictor_count[pred_context][pred_flag]++; - - // Update the "unpredicted" segment count - if (!pred_flag) - t_unpred_seg_counts[segment_id]++; - } -} - -static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd, - const TileInfo *tile, MODE_INFO **mi, - unsigned *no_pred_segcounts, - unsigned (*temporal_predictor_count)[2], - unsigned *t_unpred_seg_counts, - int mi_row, int mi_col, - BLOCK_SIZE bsize) { - const int mis = cm->mi_stride; - int bw, bh; - const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; - - bw = num_8x8_blocks_wide_lookup[mi[0]->mbmi.sb_type]; - bh = num_8x8_blocks_high_lookup[mi[0]->mbmi.sb_type]; - - if (bw == bs && bh == bs) { - count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, - t_unpred_seg_counts, bs, bs, mi_row, mi_col); - } else if (bw == bs && bh < bs) { - count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, - t_unpred_seg_counts, bs, hbs, mi_row, mi_col); - count_segs(cm, xd, tile, mi + hbs * mis, no_pred_segcounts, - temporal_predictor_count, t_unpred_seg_counts, bs, hbs, - mi_row + hbs, mi_col); - } else if (bw < bs && bh == bs) { - count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, - t_unpred_seg_counts, hbs, bs, mi_row, mi_col); - count_segs(cm, xd, tile, mi + hbs, - no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, - hbs, bs, mi_row, mi_col + hbs); - } else { - const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; - int n; - - assert(bw < bs && bh < bs); - - for (n = 0; n < 4; n++) { - const int mi_dc = hbs * (n & 1); - const int mi_dr = hbs * (n >> 1); - - count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], - no_pred_segcounts, temporal_predictor_count, - t_unpred_seg_counts, - mi_row + mi_dr, mi_col + mi_dc, subsize); - } - } -} - -void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) { - struct segmentation *seg = &cm->seg; -#if CONFIG_MISC_FIXES - struct segmentation_probs *segp = &cm->fc->seg; -#else - struct segmentation_probs *segp = &cm->segp; -#endif - - int no_pred_cost; - int t_pred_cost = INT_MAX; - - int i, tile_col, mi_row, mi_col; - -#if CONFIG_MISC_FIXES - unsigned (*temporal_predictor_count)[2] = cm->counts.seg.pred; - unsigned *no_pred_segcounts = cm->counts.seg.tree_total; - unsigned *t_unpred_seg_counts = cm->counts.seg.tree_mispred; -#else - unsigned temporal_predictor_count[PREDICTION_PROBS][2] = { { 0 } }; - unsigned no_pred_segcounts[MAX_SEGMENTS] = { 0 }; - unsigned t_unpred_seg_counts[MAX_SEGMENTS] = { 0 }; -#endif - - vpx_prob no_pred_tree[SEG_TREE_PROBS]; - vpx_prob t_pred_tree[SEG_TREE_PROBS]; - vpx_prob t_nopred_prob[PREDICTION_PROBS]; - -#if CONFIG_MISC_FIXES - (void) xd; -#else - // Set default state for the segment tree probabilities and the - // temporal coding probabilities - memset(segp->tree_probs, 255, sizeof(segp->tree_probs)); - memset(segp->pred_probs, 255, sizeof(segp->pred_probs)); -#endif - - // First of all generate stats regarding how well the last segment map - // predicts this one - for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) { - TileInfo tile; - MODE_INFO **mi_ptr; - vp10_tile_init(&tile, cm, 0, tile_col); - - mi_ptr = cm->mi_grid_visible + tile.mi_col_start; - for (mi_row = 0; mi_row < cm->mi_rows; - mi_row += 8, mi_ptr += 8 * cm->mi_stride) { - MODE_INFO **mi = mi_ptr; - for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; - mi_col += 8, mi += 8) - count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts, - temporal_predictor_count, t_unpred_seg_counts, - mi_row, mi_col, BLOCK_64X64); - } - } - - // Work out probability tree for coding segments without prediction - // and the cost. - calc_segtree_probs(no_pred_segcounts, no_pred_tree, segp->tree_probs); - no_pred_cost = cost_segmap(no_pred_segcounts, no_pred_tree); - - // Key frames cannot use temporal prediction - if (!frame_is_intra_only(cm) && !cm->error_resilient_mode) { - // Work out probability tree for coding those segments not - // predicted using the temporal method and the cost. - calc_segtree_probs(t_unpred_seg_counts, t_pred_tree, segp->tree_probs); - t_pred_cost = cost_segmap(t_unpred_seg_counts, t_pred_tree); - - // Add in the cost of the signaling for each prediction context. - for (i = 0; i < PREDICTION_PROBS; i++) { - const int count0 = temporal_predictor_count[i][0]; - const int count1 = temporal_predictor_count[i][1]; - -#if CONFIG_MISC_FIXES - vp10_prob_diff_update_savings_search(temporal_predictor_count[i], - segp->pred_probs[i], - &t_nopred_prob[i], DIFF_UPDATE_PROB); -#else - t_nopred_prob[i] = get_binary_prob(count0, count1); -#endif - - // Add in the predictor signaling cost - t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) + - count1 * vp10_cost_one(t_nopred_prob[i]); - } - } - - // Now choose which coding method to use. - if (t_pred_cost < no_pred_cost) { - assert(!cm->error_resilient_mode); - seg->temporal_update = 1; -#if !CONFIG_MISC_FIXES - memcpy(segp->tree_probs, t_pred_tree, sizeof(t_pred_tree)); - memcpy(segp->pred_probs, t_nopred_prob, sizeof(t_nopred_prob)); -#endif - } else { - seg->temporal_update = 0; -#if !CONFIG_MISC_FIXES - memcpy(segp->tree_probs, no_pred_tree, sizeof(no_pred_tree)); -#endif - } -} - -void vp10_reset_segment_features(VP10_COMMON *cm) { - struct segmentation *seg = &cm->seg; -#if !CONFIG_MISC_FIXES - struct segmentation_probs *segp = &cm->segp; -#endif - - // Set up default state for MB feature flags - seg->enabled = 0; - seg->update_map = 0; - seg->update_data = 0; -#if !CONFIG_MISC_FIXES - memset(segp->tree_probs, 255, sizeof(segp->tree_probs)); -#endif - vp10_clearall_segfeatures(seg); -} diff --git a/libs/libvpx/vp10/encoder/segmentation.h b/libs/libvpx/vp10/encoder/segmentation.h deleted file mode 100644 index b8e6c06c69..0000000000 --- a/libs/libvpx/vp10/encoder/segmentation.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_SEGMENTATION_H_ -#define VP10_ENCODER_SEGMENTATION_H_ - -#include "vp10/common/blockd.h" -#include "vp10/encoder/encoder.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_enable_segmentation(struct segmentation *seg); -void vp10_disable_segmentation(struct segmentation *seg); - -void vp10_disable_segfeature(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id); -void vp10_clear_segdata(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id); - -// The values given for each segment can be either deltas (from the default -// value chosen for the frame) or absolute values. -// -// Valid range for abs values is (0-127 for MB_LVL_ALT_Q), (0-63 for -// SEGMENT_ALT_LF) -// Valid range for delta values are (+/-127 for MB_LVL_ALT_Q), (+/-63 for -// SEGMENT_ALT_LF) -// -// abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use -// the absolute values given). -void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data, - unsigned char abs_delta); - -void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd); - -void vp10_reset_segment_features(VP10_COMMON *cm); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_SEGMENTATION_H_ diff --git a/libs/libvpx/vp10/encoder/skin_detection.c b/libs/libvpx/vp10/encoder/skin_detection.c deleted file mode 100644 index 9aac477a8a..0000000000 --- a/libs/libvpx/vp10/encoder/skin_detection.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/common/blockd.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/skin_detection.h" - -// Fixed-point skin color model parameters. -static const int skin_mean[2] = {7463, 9614}; // q6 -static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16 -static const int skin_threshold = 1570636; // q18 - -// Thresholds on luminance. -static const int y_low = 20; -static const int y_high = 220; - -// Evaluates the Mahalanobis distance measure for the input CbCr values. -static int evaluate_skin_color_difference(int cb, int cr) { - const int cb_q6 = cb << 6; - const int cr_q6 = cr << 6; - const int cb_diff_q12 = (cb_q6 - skin_mean[0]) * (cb_q6 - skin_mean[0]); - const int cbcr_diff_q12 = (cb_q6 - skin_mean[0]) * (cr_q6 - skin_mean[1]); - const int cr_diff_q12 = (cr_q6 - skin_mean[1]) * (cr_q6 - skin_mean[1]); - const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10; - const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10; - const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10; - const int skin_diff = skin_inv_cov[0] * cb_diff_q2 + - skin_inv_cov[1] * cbcr_diff_q2 + - skin_inv_cov[2] * cbcr_diff_q2 + - skin_inv_cov[3] * cr_diff_q2; - return skin_diff; -} - -int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) { - if (y < y_low || y > y_high) - return 0; - else - return (evaluate_skin_color_difference(cb, cr) < skin_threshold); -} - -#ifdef OUTPUT_YUV_SKINMAP -// For viewing skin map on input source. -void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) { - int i, j, mi_row, mi_col; - VP10_COMMON *const cm = &cpi->common; - uint8_t *y; - const uint8_t *src_y = cpi->Source->y_buffer; - const uint8_t *src_u = cpi->Source->u_buffer; - const uint8_t *src_v = cpi->Source->v_buffer; - const int src_ystride = cpi->Source->y_stride; - const int src_uvstride = cpi->Source->uv_stride; - YV12_BUFFER_CONFIG skinmap; - memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG)); - if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) { - vpx_free_frame_buffer(&skinmap); - return; - } - memset(skinmap.buffer_alloc, 128, skinmap.frame_size); - y = skinmap.y_buffer; - // Loop through 8x8 blocks and set skin map based on center pixel of block. - // Set y to white for skin block, otherwise set to source with gray scale. - // Ignore rightmost/bottom boundary blocks. - for (mi_row = 0; mi_row < cm->mi_rows - 1; ++mi_row) { - for (mi_col = 0; mi_col < cm->mi_cols - 1; ++mi_col) { - // Use middle pixel for each 8x8 block for skin detection. - // If middle pixel is skin, assign whole 8x8 block to skin. - const uint8_t ysource = src_y[4 * src_ystride + 4]; - const uint8_t usource = src_u[2 * src_uvstride + 2]; - const uint8_t vsource = src_v[2 * src_uvstride + 2]; - const int is_skin = vp10_skin_pixel(ysource, usource, vsource); - for (i = 0; i < 8; i++) { - for (j = 0; j < 8; j++) { - if (is_skin) - y[i * src_ystride + j] = 255; - else - y[i * src_ystride + j] = src_y[i * src_ystride + j]; - } - } - y += 8; - src_y += 8; - src_u += 4; - src_v += 4; - } - y += (src_ystride << 3) - ((cm->mi_cols - 1) << 3); - src_y += (src_ystride << 3) - ((cm->mi_cols - 1) << 3); - src_u += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2); - src_v += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2); - } - vp10_write_yuv_frame_420(&skinmap, yuv_skinmap_file); - vpx_free_frame_buffer(&skinmap); -} -#endif diff --git a/libs/libvpx/vp10/encoder/skin_detection.h b/libs/libvpx/vp10/encoder/skin_detection.h deleted file mode 100644 index 26b7d5e7c6..0000000000 --- a/libs/libvpx/vp10/encoder/skin_detection.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_SKIN_MAP_H_ -#define VP10_ENCODER_SKIN_MAP_H_ - -#include "vp10/common/blockd.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct VP10_COMP; - -// #define OUTPUT_YUV_SKINMAP - -int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr); - -#ifdef OUTPUT_YUV_SKINMAP -// For viewing skin map on input source. -void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file); -#endif - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_SKIN_MAP_H_ diff --git a/libs/libvpx/vp10/encoder/speed_features.c b/libs/libvpx/vp10/encoder/speed_features.c deleted file mode 100644 index ce0aebeab0..0000000000 --- a/libs/libvpx/vp10/encoder/speed_features.c +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/speed_features.h" -#include "vp10/encoder/rdopt.h" - -#include "vpx_dsp/vpx_dsp_common.h" - -// Mesh search patters for various speed settings -static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = - {{64, 4}, {28, 2}, {15, 1}, {7, 1}}; - -#define MAX_MESH_SPEED 5 // Max speed setting for mesh motion method -static MESH_PATTERN good_quality_mesh_patterns[MAX_MESH_SPEED + 1] - [MAX_MESH_STEP] = - {{{64, 8}, {28, 4}, {15, 1}, {7, 1}}, - {{64, 8}, {28, 4}, {15, 1}, {7, 1}}, - {{64, 8}, {14, 2}, {7, 1}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, - }; -static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = - {50, 25, 15, 5, 1, 1}; - -// Intra only frames, golden frames (except alt ref overlays) and -// alt ref frames tend to be coded at a higher than ambient quality -static int frame_is_boosted(const VP10_COMP *cpi) { - return frame_is_kf_gf_arf(cpi); -} - -// Sets a partition size down to which the auto partition code will always -// search (can go lower), based on the image dimensions. The logic here -// is that the extent to which ringing artefacts are offensive, depends -// partly on the screen area that over which they propogate. Propogation is -// limited by transform block size but the screen area take up by a given block -// size will be larger for a small image format stretched to full screen. -static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) { - unsigned int screen_area = (cm->width * cm->height); - - // Select block size based on image format size. - if (screen_area < 1280 * 720) { - // Formats smaller in area than 720P - return BLOCK_4X4; - } else if (screen_area < 1920 * 1080) { - // Format >= 720P and < 1080P - return BLOCK_8X8; - } else { - // Formats 1080P and up - return BLOCK_16X16; - } -} - -static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi, - SPEED_FEATURES *sf, - int speed) { - VP10_COMMON *const cm = &cpi->common; - - if (speed >= 1) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; - sf->partition_search_breakout_dist_thr = (1 << 23); - } else { - sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; - sf->partition_search_breakout_dist_thr = (1 << 21); - } - } - - if (speed >= 2) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; - sf->adaptive_pred_interp_filter = 0; - sf->partition_search_breakout_dist_thr = (1 << 24); - sf->partition_search_breakout_rate_thr = 120; - } else { - sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; - sf->partition_search_breakout_dist_thr = (1 << 22); - sf->partition_search_breakout_rate_thr = 100; - } - sf->rd_auto_partition_min_limit = set_partition_min_limit(cm); - } - - if (speed >= 3) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = DISABLE_ALL_SPLIT; - sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0; - sf->partition_search_breakout_dist_thr = (1 << 25); - sf->partition_search_breakout_rate_thr = 200; - } else { - sf->max_intra_bsize = BLOCK_32X32; - sf->disable_split_mask = DISABLE_ALL_INTER_SPLIT; - sf->schedule_mode_search = cm->base_qindex < 175 ? 1 : 0; - sf->partition_search_breakout_dist_thr = (1 << 23); - sf->partition_search_breakout_rate_thr = 120; - } - } - - // If this is a two pass clip that fits the criteria for animated or - // graphics content then reset disable_split_mask for speeds 1-4. - // Also if the image edge is internal to the coded area. - if ((speed >= 1) && (cpi->oxcf.pass == 2) && - ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) || - (vp10_internal_image_edge(cpi)))) { - sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; - } - - if (speed >= 4) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->partition_search_breakout_dist_thr = (1 << 26); - } else { - sf->partition_search_breakout_dist_thr = (1 << 24); - } - sf->disable_split_mask = DISABLE_ALL_SPLIT; - } -} - -static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm, - SPEED_FEATURES *sf, int speed) { - const int boosted = frame_is_boosted(cpi); - - sf->adaptive_rd_thresh = 1; - sf->allow_skip_recode = 1; - - if (speed >= 1) { - if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) || - vp10_internal_image_edge(cpi)) { - sf->use_square_partition_only = !frame_is_boosted(cpi); - } else { - sf->use_square_partition_only = !frame_is_intra_only(cm); - } - - sf->less_rectangular_check = 1; - - sf->use_rd_breakout = 1; - sf->adaptive_motion_search = 1; - sf->mv.auto_mv_step_size = 1; - sf->adaptive_rd_thresh = 2; - sf->mv.subpel_iters_per_step = 1; - sf->mode_skip_start = 10; - sf->adaptive_pred_interp_filter = 1; - - sf->recode_loop = ALLOW_RECODE_KFARFGF; - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; - - sf->tx_size_search_breakout = 1; - sf->partition_search_breakout_rate_thr = 80; - } - - if (speed >= 2) { - sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD - : USE_LARGESTALL; - - sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 : - FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; - sf->disable_filter_search_var_thresh = 100; - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; - sf->allow_partition_search_skip = 1; - } - - if (speed >= 3) { - sf->use_square_partition_only = !frame_is_intra_only(cm); - sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD - : USE_LARGESTALL; - sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED; - sf->adaptive_pred_interp_filter = 0; - sf->adaptive_mode_search = 1; - sf->cb_partition_search = !boosted; - sf->cb_pred_filter_search = 1; - sf->alt_ref_search_fp = 1; - sf->recode_loop = ALLOW_RECODE_KFMAXBW; - sf->adaptive_rd_thresh = 3; - sf->mode_skip_start = 6; - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC; - sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC; - sf->adaptive_interp_filter_search = 1; - } - - if (speed >= 4) { - sf->use_square_partition_only = 1; - sf->tx_size_search_method = USE_LARGESTALL; - sf->mv.search_method = BIGDIA; - sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED_MORE; - sf->adaptive_rd_thresh = 4; - if (cm->frame_type != KEY_FRAME) - sf->mode_search_skip_flags |= FLAG_EARLY_TERMINATE; - sf->disable_filter_search_var_thresh = 200; - sf->use_lp32x32fdct = 1; - sf->use_fast_coef_updates = ONE_LOOP_REDUCED; - sf->use_fast_coef_costing = 1; - sf->partition_search_breakout_rate_thr = 300; - } - - if (speed >= 5) { - int i; - sf->optimize_coefficients = 0; - sf->mv.search_method = HEX; - sf->disable_filter_search_var_thresh = 500; - for (i = 0; i < TX_SIZES; ++i) { - sf->intra_y_mode_mask[i] = INTRA_DC; - sf->intra_uv_mode_mask[i] = INTRA_DC; - } - sf->partition_search_breakout_rate_thr = 500; - sf->mv.reduce_first_step_size = 1; - sf->simple_model_rd_from_var = 1; - } -} - -static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi, - SPEED_FEATURES *sf, int speed) { - VP10_COMMON *const cm = &cpi->common; - - if (speed >= 1) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; - } else { - sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; - } - } - - if (speed >= 2) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; - } else { - sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; - } - } - - if (speed >= 5) { - if (VPXMIN(cm->width, cm->height) >= 720) { - sf->partition_search_breakout_dist_thr = (1 << 25); - } else { - sf->partition_search_breakout_dist_thr = (1 << 23); - } - } - - if (speed >= 7) { - sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ? - 800 : 300; - } -} - -static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, - int speed, vp9e_tune_content content) { - VP10_COMMON *const cm = &cpi->common; - const int is_keyframe = cm->frame_type == KEY_FRAME; - const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key; - sf->static_segmentation = 0; - sf->adaptive_rd_thresh = 1; - sf->use_fast_coef_costing = 1; - sf->allow_exhaustive_searches = 0; - sf->exhaustive_searches_thresh = INT_MAX; - - if (speed >= 1) { - sf->use_square_partition_only = !frame_is_intra_only(cm); - sf->less_rectangular_check = 1; - sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD - : USE_LARGESTALL; - - sf->use_rd_breakout = 1; - - sf->adaptive_motion_search = 1; - sf->adaptive_pred_interp_filter = 1; - sf->mv.auto_mv_step_size = 1; - sf->adaptive_rd_thresh = 2; - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; - } - - if (speed >= 2) { - sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 : - FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; - sf->adaptive_pred_interp_filter = 2; - sf->disable_filter_search_var_thresh = 50; - sf->comp_inter_joint_search_thresh = BLOCK_SIZES; - sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; - sf->lf_motion_threshold = LOW_MOTION_THRESHOLD; - sf->adjust_partitioning_from_last_frame = 1; - sf->last_partitioning_redo_frequency = 3; - sf->use_lp32x32fdct = 1; - sf->mode_skip_start = 11; - sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; - } - - if (speed >= 3) { - sf->use_square_partition_only = 1; - sf->disable_filter_search_var_thresh = 100; - sf->use_uv_intra_rd_estimate = 1; - sf->mv.subpel_iters_per_step = 1; - sf->adaptive_rd_thresh = 4; - sf->mode_skip_start = 6; - sf->allow_skip_recode = 0; - sf->optimize_coefficients = 0; - sf->disable_split_mask = DISABLE_ALL_SPLIT; - sf->lpf_pick = LPF_PICK_FROM_Q; - } - - if (speed >= 4) { - int i; - sf->last_partitioning_redo_frequency = 4; - sf->adaptive_rd_thresh = 5; - sf->use_fast_coef_costing = 0; - sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX; - sf->adjust_partitioning_from_last_frame = - cm->last_frame_type != cm->frame_type || (0 == - (frames_since_key + 1) % sf->last_partitioning_redo_frequency); - sf->mv.subpel_force_stop = 1; - for (i = 0; i < TX_SIZES; i++) { - sf->intra_y_mode_mask[i] = INTRA_DC_H_V; - sf->intra_uv_mode_mask[i] = INTRA_DC; - } - sf->intra_y_mode_mask[TX_32X32] = INTRA_DC; - sf->frame_parameter_update = 0; - sf->mv.search_method = FAST_HEX; - - sf->inter_mode_mask[BLOCK_32X32] = INTER_NEAREST_NEAR_NEW; - sf->inter_mode_mask[BLOCK_32X64] = INTER_NEAREST; - sf->inter_mode_mask[BLOCK_64X32] = INTER_NEAREST; - sf->inter_mode_mask[BLOCK_64X64] = INTER_NEAREST; - sf->max_intra_bsize = BLOCK_32X32; - sf->allow_skip_recode = 1; - } - - if (speed >= 5) { - sf->use_quant_fp = !is_keyframe; - sf->auto_min_max_partition_size = is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX - : STRICT_NEIGHBORING_MIN_MAX; - sf->default_max_partition_size = BLOCK_32X32; - sf->default_min_partition_size = BLOCK_8X8; - sf->force_frame_boost = is_keyframe || - (frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1); - sf->max_delta_qindex = is_keyframe ? 20 : 15; - sf->partition_search_type = REFERENCE_PARTITION; - sf->allow_skip_recode = 0; - sf->inter_mode_mask[BLOCK_32X32] = INTER_NEAREST_NEW_ZERO; - sf->inter_mode_mask[BLOCK_32X64] = INTER_NEAREST_NEW_ZERO; - sf->inter_mode_mask[BLOCK_64X32] = INTER_NEAREST_NEW_ZERO; - sf->inter_mode_mask[BLOCK_64X64] = INTER_NEAREST_NEW_ZERO; - sf->adaptive_rd_thresh = 2; - // This feature is only enabled when partition search is disabled. - sf->reuse_inter_pred_sby = 1; - sf->partition_search_breakout_rate_thr = 200; - sf->coeff_prob_appx_step = 4; - sf->use_fast_coef_updates = is_keyframe ? TWO_LOOP : ONE_LOOP_REDUCED; - sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH; - sf->tx_size_search_method = is_keyframe ? USE_LARGESTALL : USE_TX_8X8; - sf->simple_model_rd_from_var = 1; - - if (!is_keyframe) { - int i; - if (content == VP9E_CONTENT_SCREEN) { - for (i = 0; i < BLOCK_SIZES; ++i) - sf->intra_y_mode_bsize_mask[i] = INTRA_DC_TM_H_V; - } else { - for (i = 0; i < BLOCK_SIZES; ++i) - if (i >= BLOCK_16X16) - sf->intra_y_mode_bsize_mask[i] = INTRA_DC; - else - // Use H and V intra mode for block sizes <= 16X16. - sf->intra_y_mode_bsize_mask[i] = INTRA_DC_H_V; - } - } - } - - if (speed >= 6) { - // Adaptively switch between SOURCE_VAR_BASED_PARTITION and FIXED_PARTITION. - sf->partition_search_type = VAR_BASED_PARTITION; - // Turn on this to use non-RD key frame coding mode. - sf->mv.search_method = NSTEP; - sf->mv.reduce_first_step_size = 1; - } - - if (speed >= 7) { - sf->adaptive_rd_thresh = 3; - sf->mv.search_method = FAST_DIAMOND; - sf->mv.fullpel_search_step_param = 10; - } - if (speed >= 8) { - sf->adaptive_rd_thresh = 4; - sf->mv.subpel_force_stop = 2; - sf->lpf_pick = LPF_PICK_MINIMAL_LPF; - } -} - -void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) { - SPEED_FEATURES *const sf = &cpi->sf; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - RD_OPT *const rd = &cpi->rd; - int i; - - if (oxcf->mode == REALTIME) { - set_rt_speed_feature_framesize_dependent(cpi, sf, oxcf->speed); - } else if (oxcf->mode == GOOD) { - set_good_speed_feature_framesize_dependent(cpi, sf, oxcf->speed); - } - - if (sf->disable_split_mask == DISABLE_ALL_SPLIT) { - sf->adaptive_pred_interp_filter = 0; - } - - if (cpi->encode_breakout && oxcf->mode == REALTIME && - sf->encode_breakout_thresh > cpi->encode_breakout) { - cpi->encode_breakout = sf->encode_breakout_thresh; - } - - // Check for masked out split cases. - for (i = 0; i < MAX_REFS; ++i) { - if (sf->disable_split_mask & (1 << i)) { - rd->thresh_mult_sub8x8[i] = INT_MAX; - } - } -} - -void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) { - SPEED_FEATURES *const sf = &cpi->sf; - VP10_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &cpi->td.mb; - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - int i; - - // best quality defaults - sf->frame_parameter_update = 1; - sf->mv.search_method = NSTEP; - sf->recode_loop = ALLOW_RECODE; - sf->mv.subpel_search_method = SUBPEL_TREE; - sf->mv.subpel_iters_per_step = 2; - sf->mv.subpel_force_stop = 0; - sf->optimize_coefficients = !is_lossless_requested(&cpi->oxcf); - sf->mv.reduce_first_step_size = 0; - sf->coeff_prob_appx_step = 1; - sf->mv.auto_mv_step_size = 0; - sf->mv.fullpel_search_step_param = 6; - sf->comp_inter_joint_search_thresh = BLOCK_4X4; - sf->adaptive_rd_thresh = 0; - sf->tx_size_search_method = USE_FULL_RD; - sf->use_lp32x32fdct = 0; - sf->adaptive_motion_search = 0; - sf->adaptive_pred_interp_filter = 0; - sf->adaptive_mode_search = 0; - sf->cb_pred_filter_search = 0; - sf->cb_partition_search = 0; - sf->alt_ref_search_fp = 0; - sf->use_quant_fp = 0; - sf->partition_search_type = SEARCH_PARTITION; - sf->less_rectangular_check = 0; - sf->use_square_partition_only = 0; - sf->auto_min_max_partition_size = NOT_IN_USE; - sf->rd_auto_partition_min_limit = BLOCK_4X4; - sf->default_max_partition_size = BLOCK_64X64; - sf->default_min_partition_size = BLOCK_4X4; - sf->adjust_partitioning_from_last_frame = 0; - sf->last_partitioning_redo_frequency = 4; - sf->disable_split_mask = 0; - sf->mode_search_skip_flags = 0; - sf->force_frame_boost = 0; - sf->max_delta_qindex = 0; - sf->disable_filter_search_var_thresh = 0; - sf->adaptive_interp_filter_search = 0; - sf->allow_partition_search_skip = 0; - - for (i = 0; i < TX_SIZES; i++) { - sf->intra_y_mode_mask[i] = INTRA_ALL; - sf->intra_uv_mode_mask[i] = INTRA_ALL; - } - sf->use_rd_breakout = 0; - sf->use_uv_intra_rd_estimate = 0; - sf->allow_skip_recode = 0; - sf->lpf_pick = LPF_PICK_FROM_FULL_IMAGE; - sf->use_fast_coef_updates = TWO_LOOP; - sf->use_fast_coef_costing = 0; - sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set - sf->schedule_mode_search = 0; - for (i = 0; i < BLOCK_SIZES; ++i) - sf->inter_mode_mask[i] = INTER_ALL; - sf->max_intra_bsize = BLOCK_64X64; - sf->reuse_inter_pred_sby = 0; - // This setting only takes effect when partition_search_type is set - // to FIXED_PARTITION. - sf->always_this_block_size = BLOCK_16X16; - sf->search_type_check_frequency = 50; - sf->encode_breakout_thresh = 0; - // Recode loop tolerance %. - sf->recode_tolerance = 25; - sf->default_interp_filter = SWITCHABLE; - sf->tx_size_search_breakout = 0; - sf->partition_search_breakout_dist_thr = 0; - sf->partition_search_breakout_rate_thr = 0; - sf->simple_model_rd_from_var = 0; - - if (oxcf->mode == REALTIME) - set_rt_speed_feature(cpi, sf, oxcf->speed, oxcf->content); - else if (oxcf->mode == GOOD) - set_good_speed_feature(cpi, cm, sf, oxcf->speed); - - cpi->full_search_sad = vp10_full_search_sad; - cpi->diamond_search_sad = vp10_diamond_search_sad; - - sf->allow_exhaustive_searches = 1; - if (oxcf->mode == BEST) { - if (cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) - sf->exhaustive_searches_thresh = (1 << 20); - else - sf->exhaustive_searches_thresh = (1 << 21); - sf->max_exaustive_pct = 100; - for (i = 0; i < MAX_MESH_STEP; ++i) { - sf->mesh_patterns[i].range = best_quality_mesh_pattern[i].range; - sf->mesh_patterns[i].interval = best_quality_mesh_pattern[i].interval; - } - } else { - int speed = (oxcf->speed > MAX_MESH_SPEED) ? MAX_MESH_SPEED : oxcf->speed; - if (cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) - sf->exhaustive_searches_thresh = (1 << 22); - else - sf->exhaustive_searches_thresh = (1 << 23); - sf->max_exaustive_pct = good_quality_max_mesh_pct[speed]; - if (speed > 0) - sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1; - - for (i = 0; i < MAX_MESH_STEP; ++i) { - sf->mesh_patterns[i].range = - good_quality_mesh_patterns[speed][i].range; - sf->mesh_patterns[i].interval = - good_quality_mesh_patterns[speed][i].interval; - } - } - - // Slow quant, dct and trellis not worthwhile for first pass - // so make sure they are always turned off. - if (oxcf->pass == 1) - sf->optimize_coefficients = 0; - - // No recode for 1 pass. - if (oxcf->pass == 0) { - sf->recode_loop = DISALLOW_RECODE; - sf->optimize_coefficients = 0; - } - - if (sf->mv.subpel_search_method == SUBPEL_TREE) { - cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree; - } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) { - cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned; - } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) { - cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more; - } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) { - cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_evenmore; - } - - x->optimize = sf->optimize_coefficients == 1 && oxcf->pass != 1; - - x->min_partition_size = sf->default_min_partition_size; - x->max_partition_size = sf->default_max_partition_size; - - if (!cpi->oxcf.frame_periodic_boost) { - sf->max_delta_qindex = 0; - } -} diff --git a/libs/libvpx/vp10/encoder/speed_features.h b/libs/libvpx/vp10/encoder/speed_features.h deleted file mode 100644 index 3b91999298..0000000000 --- a/libs/libvpx/vp10/encoder/speed_features.h +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_SPEED_FEATURES_H_ -#define VP10_ENCODER_SPEED_FEATURES_H_ - -#include "vp10/common/enums.h" - -#ifdef __cplusplus -extern "C" { -#endif - -enum { - INTRA_ALL = (1 << DC_PRED) | - (1 << V_PRED) | (1 << H_PRED) | - (1 << D45_PRED) | (1 << D135_PRED) | - (1 << D117_PRED) | (1 << D153_PRED) | - (1 << D207_PRED) | (1 << D63_PRED) | - (1 << TM_PRED), - INTRA_DC = (1 << DC_PRED), - INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED), - INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED), - INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | - (1 << H_PRED) -}; - -enum { - INTER_ALL = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV) | (1 << NEWMV), - INTER_NEAREST = (1 << NEARESTMV), - INTER_NEAREST_NEW = (1 << NEARESTMV) | (1 << NEWMV), - INTER_NEAREST_ZERO = (1 << NEARESTMV) | (1 << ZEROMV), - INTER_NEAREST_NEW_ZERO = (1 << NEARESTMV) | (1 << ZEROMV) | (1 << NEWMV), - INTER_NEAREST_NEAR_NEW = (1 << NEARESTMV) | (1 << NEARMV) | (1 << NEWMV), - INTER_NEAREST_NEAR_ZERO = (1 << NEARESTMV) | (1 << NEARMV) | (1 << ZEROMV), -}; - -enum { - DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | - (1 << THR_COMP_LA) | - (1 << THR_ALTR) | - (1 << THR_GOLD) | - (1 << THR_LAST), - - DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT, - - DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA), - - LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | - (1 << THR_COMP_LA) | - (1 << THR_ALTR) | - (1 << THR_GOLD) -}; - -typedef enum { - DIAMOND = 0, - NSTEP = 1, - HEX = 2, - BIGDIA = 3, - SQUARE = 4, - FAST_HEX = 5, - FAST_DIAMOND = 6 -} SEARCH_METHODS; - -typedef enum { - // No recode. - DISALLOW_RECODE = 0, - // Allow recode for KF and exceeding maximum frame bandwidth. - ALLOW_RECODE_KFMAXBW = 1, - // Allow recode only for KF/ARF/GF frames. - ALLOW_RECODE_KFARFGF = 2, - // Allow recode for all frames based on bitrate constraints. - ALLOW_RECODE = 3, -} RECODE_LOOP_TYPE; - -typedef enum { - SUBPEL_TREE = 0, - SUBPEL_TREE_PRUNED = 1, // Prunes 1/2-pel searches - SUBPEL_TREE_PRUNED_MORE = 2, // Prunes 1/2-pel searches more aggressively - SUBPEL_TREE_PRUNED_EVENMORE = 3, // Prunes 1/2- and 1/4-pel searches - // Other methods to come -} SUBPEL_SEARCH_METHODS; - -typedef enum { - NO_MOTION_THRESHOLD = 0, - LOW_MOTION_THRESHOLD = 7 -} MOTION_THRESHOLD; - -typedef enum { - USE_FULL_RD = 0, - USE_LARGESTALL, - USE_TX_8X8 -} TX_SIZE_SEARCH_METHOD; - -typedef enum { - NOT_IN_USE = 0, - RELAXED_NEIGHBORING_MIN_MAX = 1, - STRICT_NEIGHBORING_MIN_MAX = 2 -} AUTO_MIN_MAX_MODE; - -typedef enum { - // Try the full image with different values. - LPF_PICK_FROM_FULL_IMAGE, - // Try a small portion of the image with different values. - LPF_PICK_FROM_SUBIMAGE, - // Estimate the level based on quantizer and frame type - LPF_PICK_FROM_Q, - // Pick 0 to disable LPF if LPF was enabled last frame - LPF_PICK_MINIMAL_LPF -} LPF_PICK_METHOD; - -typedef enum { - // Terminate search early based on distortion so far compared to - // qp step, distortion in the neighborhood of the frame, etc. - FLAG_EARLY_TERMINATE = 1 << 0, - - // Skips comp inter modes if the best so far is an intra mode. - FLAG_SKIP_COMP_BESTINTRA = 1 << 1, - - // Skips oblique intra modes if the best so far is an inter mode. - FLAG_SKIP_INTRA_BESTINTER = 1 << 3, - - // Skips oblique intra modes at angles 27, 63, 117, 153 if the best - // intra so far is not one of the neighboring directions. - FLAG_SKIP_INTRA_DIRMISMATCH = 1 << 4, - - // Skips intra modes other than DC_PRED if the source variance is small - FLAG_SKIP_INTRA_LOWVAR = 1 << 5, -} MODE_SEARCH_SKIP_LOGIC; - -typedef enum { - FLAG_SKIP_EIGHTTAP = 1 << EIGHTTAP, - FLAG_SKIP_EIGHTTAP_SMOOTH = 1 << EIGHTTAP_SMOOTH, - FLAG_SKIP_EIGHTTAP_SHARP = 1 << EIGHTTAP_SHARP, -} INTERP_FILTER_MASK; - -typedef enum { - // Search partitions using RD criterion - SEARCH_PARTITION, - - // Always use a fixed size partition - FIXED_PARTITION, - - REFERENCE_PARTITION, - - // Use an arbitrary partitioning scheme based on source variance within - // a 64X64 SB - VAR_BASED_PARTITION, - - // Use non-fixed partitions based on source variance - SOURCE_VAR_BASED_PARTITION -} PARTITION_SEARCH_TYPE; - -typedef enum { - // Does a dry run to see if any of the contexts need to be updated or not, - // before the final run. - TWO_LOOP = 0, - - // No dry run, also only half the coef contexts and bands are updated. - // The rest are not updated at all. - ONE_LOOP_REDUCED = 1 -} FAST_COEFF_UPDATE; - -typedef struct MV_SPEED_FEATURES { - // Motion search method (Diamond, NSTEP, Hex, Big Diamond, Square, etc). - SEARCH_METHODS search_method; - - // This parameter controls which step in the n-step process we start at. - // It's changed adaptively based on circumstances. - int reduce_first_step_size; - - // If this is set to 1, we limit the motion search range to 2 times the - // largest motion vector found in the last frame. - int auto_mv_step_size; - - // Subpel_search_method can only be subpel_tree which does a subpixel - // logarithmic search that keeps stepping at 1/2 pixel units until - // you stop getting a gain, and then goes on to 1/4 and repeats - // the same process. Along the way it skips many diagonals. - SUBPEL_SEARCH_METHODS subpel_search_method; - - // Maximum number of steps in logarithmic subpel search before giving up. - int subpel_iters_per_step; - - // Control when to stop subpel search - int subpel_force_stop; - - // This variable sets the step_param used in full pel motion search. - int fullpel_search_step_param; -} MV_SPEED_FEATURES; - -#define MAX_MESH_STEP 4 - -typedef struct MESH_PATTERN { - int range; - int interval; -} MESH_PATTERN; - -typedef struct SPEED_FEATURES { - MV_SPEED_FEATURES mv; - - // Frame level coding parameter update - int frame_parameter_update; - - RECODE_LOOP_TYPE recode_loop; - - // Trellis (dynamic programming) optimization of quantized values (+1, 0). - int optimize_coefficients; - - // Always set to 0. If on it enables 0 cost background transmission - // (except for the initial transmission of the segmentation). The feature is - // disabled because the addition of very large block sizes make the - // backgrounds very to cheap to encode, and the segmentation we have - // adds overhead. - int static_segmentation; - - // If 1 we iterate finding a best reference for 2 ref frames together - via - // a log search that iterates 4 times (check around mv for last for best - // error of combined predictor then check around mv for alt). If 0 we - // we just use the best motion vector found for each frame by itself. - BLOCK_SIZE comp_inter_joint_search_thresh; - - // This variable is used to cap the maximum number of times we skip testing a - // mode to be evaluated. A high value means we will be faster. - int adaptive_rd_thresh; - - // Speed feature to allow or disallow skipping of recode at block - // level within a frame. - int allow_skip_recode; - - // Coefficient probability model approximation step size - int coeff_prob_appx_step; - - // The threshold is to determine how slow the motino is, it is used when - // use_lastframe_partitioning is set to LAST_FRAME_PARTITION_LOW_MOTION - MOTION_THRESHOLD lf_motion_threshold; - - // Determine which method we use to determine transform size. We can choose - // between options like full rd, largest for prediction size, largest - // for intra and model coefs for the rest. - TX_SIZE_SEARCH_METHOD tx_size_search_method; - - // Low precision 32x32 fdct keeps everything in 16 bits and thus is less - // precise but significantly faster than the non lp version. - int use_lp32x32fdct; - - // After looking at the first set of modes (set by index here), skip - // checking modes for reference frames that don't match the reference frame - // of the best so far. - int mode_skip_start; - - PARTITION_SEARCH_TYPE partition_search_type; - - // Used if partition_search_type = FIXED_SIZE_PARTITION - BLOCK_SIZE always_this_block_size; - - // Skip rectangular partition test when partition type none gives better - // rd than partition type split. - int less_rectangular_check; - - // Disable testing non square partitions. (eg 16x32) - int use_square_partition_only; - - // Sets min and max partition sizes for this 64x64 region based on the - // same 64x64 in last encoded frame, and the left and above neighbor. - AUTO_MIN_MAX_MODE auto_min_max_partition_size; - // Ensures the rd based auto partition search will always - // go down at least to the specified level. - BLOCK_SIZE rd_auto_partition_min_limit; - - // Min and max partition size we enable (block_size) as per auto - // min max, but also used by adjust partitioning, and pick_partitioning. - BLOCK_SIZE default_min_partition_size; - BLOCK_SIZE default_max_partition_size; - - // Whether or not we allow partitions one smaller or one greater than the last - // frame's partitioning. Only used if use_lastframe_partitioning is set. - int adjust_partitioning_from_last_frame; - - // How frequently we re do the partitioning from scratch. Only used if - // use_lastframe_partitioning is set. - int last_partitioning_redo_frequency; - - // Disables sub 8x8 blocksizes in different scenarios: Choices are to disable - // it always, to allow it for only Last frame and Intra, disable it for all - // inter modes or to enable it always. - int disable_split_mask; - - // TODO(jingning): combine the related motion search speed features - // This allows us to use motion search at other sizes as a starting - // point for this motion search and limits the search range around it. - int adaptive_motion_search; - - // Flag for allowing some use of exhaustive searches; - int allow_exhaustive_searches; - - // Threshold for allowing exhaistive motion search. - int exhaustive_searches_thresh; - - // Maximum number of exhaustive searches for a frame. - int max_exaustive_pct; - - // Pattern to be used for any exhaustive mesh searches. - MESH_PATTERN mesh_patterns[MAX_MESH_STEP]; - - int schedule_mode_search; - - // Allows sub 8x8 modes to use the prediction filter that was determined - // best for 8x8 mode. If set to 0 we always re check all the filters for - // sizes less than 8x8, 1 means we check all filter modes if no 8x8 filter - // was selected, and 2 means we use 8 tap if no 8x8 filter mode was selected. - int adaptive_pred_interp_filter; - - // Adaptive prediction mode search - int adaptive_mode_search; - - // Chessboard pattern prediction filter type search - int cb_pred_filter_search; - - int cb_partition_search; - - int alt_ref_search_fp; - - // Fast quantization process path - int use_quant_fp; - - // Use finer quantizer in every other few frames that run variable block - // partition type search. - int force_frame_boost; - - // Maximally allowed base quantization index fluctuation. - int max_delta_qindex; - - // Implements various heuristics to skip searching modes - // The heuristics selected are based on flags - // defined in the MODE_SEARCH_SKIP_HEURISTICS enum - unsigned int mode_search_skip_flags; - - // A source variance threshold below which filter search is disabled - // Choose a very large value (UINT_MAX) to use 8-tap always - unsigned int disable_filter_search_var_thresh; - - // These bit masks allow you to enable or disable intra modes for each - // transform size separately. - int intra_y_mode_mask[TX_SIZES]; - int intra_uv_mode_mask[TX_SIZES]; - - // These bit masks allow you to enable or disable intra modes for each - // prediction block size separately. - int intra_y_mode_bsize_mask[BLOCK_SIZES]; - - // This variable enables an early break out of mode testing if the model for - // rd built from the prediction signal indicates a value that's much - // higher than the best rd we've seen so far. - int use_rd_breakout; - - // This enables us to use an estimate for intra rd based on dc mode rather - // than choosing an actual uv mode in the stage of encoding before the actual - // final encode. - int use_uv_intra_rd_estimate; - - // This feature controls how the loop filter level is determined. - LPF_PICK_METHOD lpf_pick; - - // This feature limits the number of coefficients updates we actually do - // by only looking at counts from 1/2 the bands. - FAST_COEFF_UPDATE use_fast_coef_updates; - - // A binary mask indicating if NEARESTMV, NEARMV, ZEROMV, NEWMV - // modes are used in order from LSB to MSB for each BLOCK_SIZE. - int inter_mode_mask[BLOCK_SIZES]; - - // This feature controls whether we do the expensive context update and - // calculation in the rd coefficient costing loop. - int use_fast_coef_costing; - - // This feature controls the tolerence vs target used in deciding whether to - // recode a frame. It has no meaning if recode is disabled. - int recode_tolerance; - - // This variable controls the maximum block size where intra blocks can be - // used in inter frames. - // TODO(aconverse): Fold this into one of the other many mode skips - BLOCK_SIZE max_intra_bsize; - - // The frequency that we check if SOURCE_VAR_BASED_PARTITION or - // FIXED_PARTITION search type should be used. - int search_type_check_frequency; - - // When partition is pre-set, the inter prediction result from pick_inter_mode - // can be reused in final block encoding process. It is enabled only for real- - // time mode speed 6. - int reuse_inter_pred_sby; - - // This variable sets the encode_breakout threshold. Currently, it is only - // enabled in real time mode. - int encode_breakout_thresh; - - // default interp filter choice - INTERP_FILTER default_interp_filter; - - // Early termination in transform size search, which only applies while - // tx_size_search_method is USE_FULL_RD. - int tx_size_search_breakout; - - // adaptive interp_filter search to allow skip of certain filter types. - int adaptive_interp_filter_search; - - // mask for skip evaluation of certain interp_filter type. - INTERP_FILTER_MASK interp_filter_search_mask; - - // Partition search early breakout thresholds. - int64_t partition_search_breakout_dist_thr; - int partition_search_breakout_rate_thr; - - // Allow skipping partition search for still image frame - int allow_partition_search_skip; - - // Fast approximation of vp10_model_rd_from_var_lapndz - int simple_model_rd_from_var; -} SPEED_FEATURES; - -struct VP10_COMP; - -void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi); -void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_SPEED_FEATURES_H_ diff --git a/libs/libvpx/vp10/encoder/subexp.c b/libs/libvpx/vp10/encoder/subexp.c deleted file mode 100644 index eccee8e747..0000000000 --- a/libs/libvpx/vp10/encoder/subexp.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#include "vpx_dsp/bitwriter.h" - -#include "vp10/common/common.h" -#include "vp10/common/entropy.h" -#include "vp10/encoder/cost.h" -#include "vp10/encoder/subexp.h" - -#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd))) - -static const uint8_t update_bits[255] = { - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 11 - CONFIG_MISC_FIXES, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 0, -}; - -static int recenter_nonneg(int v, int m) { - if (v > (m << 1)) - return v; - else if (v >= m) - return ((v - m) << 1); - else - return ((m - v) << 1) - 1; -} - -static int remap_prob(int v, int m) { - int i; - static const uint8_t map_table[MAX_PROB - 1] = { - // generated by: - // map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM); - 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88, - 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102, - 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116, - 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130, - 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, - 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19, - }; - v--; - m--; - if ((m << 1) <= MAX_PROB) - i = recenter_nonneg(v, m) - 1; - else - i = recenter_nonneg(MAX_PROB - 1 - v, MAX_PROB - 1 - m) - 1; - - i = map_table[i]; - return i; -} - -static int prob_diff_update_cost(vpx_prob newp, vpx_prob oldp) { - int delp = remap_prob(newp, oldp); - return update_bits[delp] * 256; -} - -static void encode_uniform(vpx_writer *w, int v) { - const int l = 8; - const int m = (1 << l) - 191 + CONFIG_MISC_FIXES; - if (v < m) { - vpx_write_literal(w, v, l - 1); - } else { - vpx_write_literal(w, m + ((v - m) >> 1), l - 1); - vpx_write_literal(w, (v - m) & 1, 1); - } -} - -static INLINE int write_bit_gte(vpx_writer *w, int word, int test) { - vpx_write_literal(w, word >= test, 1); - return word >= test; -} - -static void encode_term_subexp(vpx_writer *w, int word) { - if (!write_bit_gte(w, word, 16)) { - vpx_write_literal(w, word, 4); - } else if (!write_bit_gte(w, word, 32)) { - vpx_write_literal(w, word - 16, 4); - } else if (!write_bit_gte(w, word, 64)) { - vpx_write_literal(w, word - 32, 5); - } else { - encode_uniform(w, word - 64); - } -} - -void vp10_write_prob_diff_update(vpx_writer *w, vpx_prob newp, vpx_prob oldp) { - const int delp = remap_prob(newp, oldp); - encode_term_subexp(w, delp); -} - -int vp10_prob_diff_update_savings_search(const unsigned int *ct, - vpx_prob oldp, vpx_prob *bestp, - vpx_prob upd) { - const int old_b = cost_branch256(ct, oldp); - int bestsavings = 0; - vpx_prob newp, bestnewp = oldp; - const int step = *bestp > oldp ? -1 : 1; - - for (newp = *bestp; newp != oldp; newp += step) { - const int new_b = cost_branch256(ct, newp); - const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256; - const int savings = old_b - new_b - update_b; - if (savings > bestsavings) { - bestsavings = savings; - bestnewp = newp; - } - } - *bestp = bestnewp; - return bestsavings; -} - -int vp10_prob_diff_update_savings_search_model(const unsigned int *ct, - const vpx_prob *oldp, - vpx_prob *bestp, - vpx_prob upd, - int stepsize) { - int i, old_b, new_b, update_b, savings, bestsavings; - int newp; - const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1; - const int step = stepsize * step_sign; - vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES]; - vp10_model_to_full_probs(oldp, oldplist); - memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES); - for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i) - old_b += cost_branch256(ct + 2 * i, oldplist[i]); - old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]); - - bestsavings = 0; - bestnewp = oldp[PIVOT_NODE]; - - assert(stepsize > 0); - - for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; - newp += step) { - if (newp < 1 || newp > 255) - continue; - newplist[PIVOT_NODE] = newp; - vp10_model_to_full_probs(newplist, newplist); - for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i) - new_b += cost_branch256(ct + 2 * i, newplist[i]); - new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]); - update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + - vp10_cost_upd256; - savings = old_b - new_b - update_b; - if (savings > bestsavings) { - bestsavings = savings; - bestnewp = newp; - } - } - - *bestp = bestnewp; - return bestsavings; -} - -void vp10_cond_prob_diff_update(vpx_writer *w, vpx_prob *oldp, - const unsigned int ct[2]) { - const vpx_prob upd = DIFF_UPDATE_PROB; - vpx_prob newp = get_binary_prob(ct[0], ct[1]); - const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp, - upd); - assert(newp >= 1); - if (savings > 0) { - vpx_write(w, 1, upd); - vp10_write_prob_diff_update(w, newp, *oldp); - *oldp = newp; - } else { - vpx_write(w, 0, upd); - } -} - -int vp10_cond_prob_diff_update_savings(vpx_prob *oldp, - const unsigned int ct[2]) { - const vpx_prob upd = DIFF_UPDATE_PROB; - vpx_prob newp = get_binary_prob(ct[0], ct[1]); - const int savings = vp10_prob_diff_update_savings_search(ct, *oldp, &newp, - upd); - return savings; -} diff --git a/libs/libvpx/vp10/encoder/subexp.h b/libs/libvpx/vp10/encoder/subexp.h deleted file mode 100644 index 091334f1f2..0000000000 --- a/libs/libvpx/vp10/encoder/subexp.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP10_ENCODER_SUBEXP_H_ -#define VP10_ENCODER_SUBEXP_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "vpx_dsp/prob.h" - -struct vpx_writer; - -void vp10_write_prob_diff_update(struct vpx_writer *w, - vpx_prob newp, vpx_prob oldp); - -void vp10_cond_prob_diff_update(struct vpx_writer *w, vpx_prob *oldp, - const unsigned int ct[2]); - -int vp10_prob_diff_update_savings_search(const unsigned int *ct, - vpx_prob oldp, vpx_prob *bestp, - vpx_prob upd); - - -int vp10_prob_diff_update_savings_search_model(const unsigned int *ct, - const vpx_prob *oldp, - vpx_prob *bestp, - vpx_prob upd, - int stepsize); - -int vp10_cond_prob_diff_update_savings(vpx_prob *oldp, - const unsigned int ct[2]); -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_SUBEXP_H_ diff --git a/libs/libvpx/vp10/encoder/temporal_filter.c b/libs/libvpx/vp10/encoder/temporal_filter.c deleted file mode 100644 index 5278d3b736..0000000000 --- a/libs/libvpx/vp10/encoder/temporal_filter.c +++ /dev/null @@ -1,702 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/common/alloccommon.h" -#include "vp10/common/onyxc_int.h" -#include "vp10/common/quant_common.h" -#include "vp10/common/reconinter.h" -#include "vp10/encoder/extend.h" -#include "vp10/encoder/firstpass.h" -#include "vp10/encoder/mcomp.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/quantize.h" -#include "vp10/encoder/ratectrl.h" -#include "vp10/encoder/segmentation.h" -#include "vp10/encoder/temporal_filter.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_mem/vpx_mem.h" -#include "vpx_ports/mem.h" -#include "vpx_ports/vpx_timer.h" -#include "vpx_scale/vpx_scale.h" - -static int fixed_divide[512]; - -static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, - uint8_t *y_mb_ptr, - uint8_t *u_mb_ptr, - uint8_t *v_mb_ptr, - int stride, - int uv_block_width, - int uv_block_height, - int mv_row, - int mv_col, - uint8_t *pred, - struct scale_factors *scale, - int x, int y) { - const int which_mv = 0; - const MV mv = { mv_row, mv_col }; - const InterpKernel *const kernel = - vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter]; - - enum mv_precision mv_precision_uv; - int uv_stride; - if (uv_block_width == 8) { - uv_stride = (stride + 1) >> 1; - mv_precision_uv = MV_PRECISION_Q4; - } else { - uv_stride = stride; - mv_precision_uv = MV_PRECISION_Q3; - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp10_highbd_build_inter_predictor(y_mb_ptr, stride, - &pred[0], 16, - &mv, - scale, - 16, 16, - which_mv, - kernel, MV_PRECISION_Q3, x, y, xd->bd); - - vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride, - &pred[256], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y, xd->bd); - - vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride, - &pred[512], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y, xd->bd); - return; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - vp10_build_inter_predictor(y_mb_ptr, stride, - &pred[0], 16, - &mv, - scale, - 16, 16, - which_mv, - kernel, MV_PRECISION_Q3, x, y); - - vp10_build_inter_predictor(u_mb_ptr, uv_stride, - &pred[256], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y); - - vp10_build_inter_predictor(v_mb_ptr, uv_stride, - &pred[512], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y); -} - -void vp10_temporal_filter_init(void) { - int i; - - fixed_divide[0] = 0; - for (i = 1; i < 512; ++i) - fixed_divide[i] = 0x80000 / i; -} - -void vp10_temporal_filter_apply_c(uint8_t *frame1, - unsigned int stride, - uint8_t *frame2, - unsigned int block_width, - unsigned int block_height, - int strength, - int filter_weight, - unsigned int *accumulator, - uint16_t *count) { - unsigned int i, j, k; - int modifier; - int byte = 0; - const int rounding = strength > 0 ? 1 << (strength - 1) : 0; - - for (i = 0, k = 0; i < block_height; i++) { - for (j = 0; j < block_width; j++, k++) { - int src_byte = frame1[byte]; - int pixel_value = *frame2++; - - modifier = src_byte - pixel_value; - // This is an integer approximation of: - // float coeff = (3.0 * modifer * modifier) / pow(2, strength); - // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); - modifier *= modifier; - modifier *= 3; - modifier += rounding; - modifier >>= strength; - - if (modifier > 16) - modifier = 16; - - modifier = 16 - modifier; - modifier *= filter_weight; - - count[k] += modifier; - accumulator[k] += modifier * pixel_value; - - byte++; - } - - byte += stride - block_width; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp10_highbd_temporal_filter_apply_c(uint8_t *frame1_8, - unsigned int stride, - uint8_t *frame2_8, - unsigned int block_width, - unsigned int block_height, - int strength, - int filter_weight, - unsigned int *accumulator, - uint16_t *count) { - uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8); - uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8); - unsigned int i, j, k; - int modifier; - int byte = 0; - const int rounding = strength > 0 ? 1 << (strength - 1) : 0; - - for (i = 0, k = 0; i < block_height; i++) { - for (j = 0; j < block_width; j++, k++) { - int src_byte = frame1[byte]; - int pixel_value = *frame2++; - - modifier = src_byte - pixel_value; - // This is an integer approximation of: - // float coeff = (3.0 * modifer * modifier) / pow(2, strength); - // modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); - modifier *= modifier; - modifier *= 3; - modifier += rounding; - modifier >>= strength; - - if (modifier > 16) - modifier = 16; - - modifier = 16 - modifier; - modifier *= filter_weight; - - count[k] += modifier; - accumulator[k] += modifier * pixel_value; - - byte++; - } - - byte += stride - block_width; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi, - uint8_t *arf_frame_buf, - uint8_t *frame_ptr_buf, - int stride) { - MACROBLOCK *const x = &cpi->td.mb; - MACROBLOCKD *const xd = &x->e_mbd; - const MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; - int step_param; - int sadpb = x->sadperbit16; - int bestsme = INT_MAX; - int distortion; - unsigned int sse; - int cost_list[5]; - - MV best_ref_mv1 = {0, 0}; - MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ - MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv; - - // Save input state - struct buf_2d src = x->plane[0].src; - struct buf_2d pre = xd->plane[0].pre[0]; - - best_ref_mv1_full.col = best_ref_mv1.col >> 3; - best_ref_mv1_full.row = best_ref_mv1.row >> 3; - - // Setup frame pointers - x->plane[0].src.buf = arf_frame_buf; - x->plane[0].src.stride = stride; - xd->plane[0].pre[0].buf = frame_ptr_buf; - xd->plane[0].pre[0].stride = stride; - - step_param = mv_sf->reduce_first_step_size; - step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); - - // Ignore mv costing by sending NULL pointer instead of cost arrays - vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, - cond_cost_list(cpi, cost_list), - &cpi->fn_ptr[BLOCK_16X16], 0, &best_ref_mv1, ref_mv); - - // Ignore mv costing by sending NULL pointer instead of cost array - bestsme = cpi->find_fractional_mv_step(x, ref_mv, - &best_ref_mv1, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - 0, mv_sf->subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - NULL, NULL, - &distortion, &sse, NULL, 0, 0); - - // Restore input state - x->plane[0].src = src; - xd->plane[0].pre[0] = pre; - - return bestsme; -} - -static void temporal_filter_iterate_c(VP10_COMP *cpi, - YV12_BUFFER_CONFIG **frames, - int frame_count, - int alt_ref_index, - int strength, - struct scale_factors *scale) { - int byte; - int frame; - int mb_col, mb_row; - unsigned int filter_weight; - int mb_cols = (frames[alt_ref_index]->y_crop_width + 15) >> 4; - int mb_rows = (frames[alt_ref_index]->y_crop_height + 15) >> 4; - int mb_y_offset = 0; - int mb_uv_offset = 0; - DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 * 3]); - DECLARE_ALIGNED(16, uint16_t, count[16 * 16 * 3]); - MACROBLOCKD *mbd = &cpi->td.mb.e_mbd; - YV12_BUFFER_CONFIG *f = frames[alt_ref_index]; - uint8_t *dst1, *dst2; -#if CONFIG_VP9_HIGHBITDEPTH - DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]); - DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]); - uint8_t *predictor; -#else - DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]); -#endif - const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y; - const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x; - - // Save input state - uint8_t* input_buffer[MAX_MB_PLANE]; - int i; -#if CONFIG_VP9_HIGHBITDEPTH - if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - predictor = CONVERT_TO_BYTEPTR(predictor16); - } else { - predictor = predictor8; - } -#endif - - for (i = 0; i < MAX_MB_PLANE; i++) - input_buffer[i] = mbd->plane[i].pre[0].buf; - - for (mb_row = 0; mb_row < mb_rows; mb_row++) { - // Source frames are extended to 16 pixels. This is different than - // L/A/G reference frames that have a border of 32 (VP9ENCBORDERINPIXELS) - // A 6/8 tap filter is used for motion search. This requires 2 pixels - // before and 3 pixels after. So the largest Y mv on a border would - // then be 16 - VP9_INTERP_EXTEND. The UV blocks are half the size of the - // Y and therefore only extended by 8. The largest mv that a UV block - // can support is 8 - VP9_INTERP_EXTEND. A UV mv is half of a Y mv. - // (16 - VP9_INTERP_EXTEND) >> 1 which is greater than - // 8 - VP9_INTERP_EXTEND. - // To keep the mv in play for both Y and UV planes the max that it - // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1). - cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND)); - cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16) - + (17 - 2 * VP9_INTERP_EXTEND); - - for (mb_col = 0; mb_col < mb_cols; mb_col++) { - int i, j, k; - int stride; - - memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0])); - memset(count, 0, 16 * 16 * 3 * sizeof(count[0])); - - cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND)); - cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16) - + (17 - 2 * VP9_INTERP_EXTEND); - - for (frame = 0; frame < frame_count; frame++) { - const int thresh_low = 10000; - const int thresh_high = 20000; - - if (frames[frame] == NULL) - continue; - - mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0; - mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0; - - if (frame == alt_ref_index) { - filter_weight = 2; - } else { - // Find best match in this frame by MC - int err = temporal_filter_find_matching_mb_c(cpi, - frames[alt_ref_index]->y_buffer + mb_y_offset, - frames[frame]->y_buffer + mb_y_offset, - frames[frame]->y_stride); - - // Assign higher weight to matching MB if it's error - // score is lower. If not applying MC default behavior - // is to weight all MBs equal. - filter_weight = err < thresh_low - ? 2 : err < thresh_high ? 1 : 0; - } - - if (filter_weight != 0) { - // Construct the predictors - temporal_filter_predictors_mb_c(mbd, - frames[frame]->y_buffer + mb_y_offset, - frames[frame]->u_buffer + mb_uv_offset, - frames[frame]->v_buffer + mb_uv_offset, - frames[frame]->y_stride, - mb_uv_width, mb_uv_height, - mbd->mi[0]->bmi[0].as_mv[0].as_mv.row, - mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, - predictor, scale, - mb_col * 16, mb_row * 16); - -#if CONFIG_VP9_HIGHBITDEPTH - if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - int adj_strength = strength + 2 * (mbd->bd - 8); - // Apply the filter (YUV) - vp10_highbd_temporal_filter_apply(f->y_buffer + mb_y_offset, - f->y_stride, - predictor, 16, 16, adj_strength, - filter_weight, - accumulator, count); - vp10_highbd_temporal_filter_apply(f->u_buffer + mb_uv_offset, - f->uv_stride, predictor + 256, - mb_uv_width, mb_uv_height, - adj_strength, - filter_weight, accumulator + 256, - count + 256); - vp10_highbd_temporal_filter_apply(f->v_buffer + mb_uv_offset, - f->uv_stride, predictor + 512, - mb_uv_width, mb_uv_height, - adj_strength, filter_weight, - accumulator + 512, count + 512); - } else { - // Apply the filter (YUV) - vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, - predictor, 16, 16, - strength, filter_weight, - accumulator, count); - vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, - predictor + 256, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 256, - count + 256); - vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, - predictor + 512, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 512, - count + 512); - } -#else - // Apply the filter (YUV) - vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, - predictor, 16, 16, - strength, filter_weight, - accumulator, count); - vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, - predictor + 256, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 256, - count + 256); - vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, - predictor + 512, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 512, - count + 512); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - } - -#if CONFIG_VP9_HIGHBITDEPTH - if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - uint16_t *dst1_16; - uint16_t *dst2_16; - // Normalize filter output to produce AltRef frame - dst1 = cpi->alt_ref_buffer.y_buffer; - dst1_16 = CONVERT_TO_SHORTPTR(dst1); - stride = cpi->alt_ref_buffer.y_stride; - byte = mb_y_offset; - for (i = 0, k = 0; i < 16; i++) { - for (j = 0; j < 16; j++, k++) { - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - - dst1_16[byte] = (uint16_t)pval; - - // move to next pixel - byte++; - } - - byte += stride - 16; - } - - dst1 = cpi->alt_ref_buffer.u_buffer; - dst2 = cpi->alt_ref_buffer.v_buffer; - dst1_16 = CONVERT_TO_SHORTPTR(dst1); - dst2_16 = CONVERT_TO_SHORTPTR(dst2); - stride = cpi->alt_ref_buffer.uv_stride; - byte = mb_uv_offset; - for (i = 0, k = 256; i < mb_uv_height; i++) { - for (j = 0; j < mb_uv_width; j++, k++) { - int m = k + 256; - - // U - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - dst1_16[byte] = (uint16_t)pval; - - // V - pval = accumulator[m] + (count[m] >> 1); - pval *= fixed_divide[count[m]]; - pval >>= 19; - dst2_16[byte] = (uint16_t)pval; - - // move to next pixel - byte++; - } - - byte += stride - mb_uv_width; - } - } else { - // Normalize filter output to produce AltRef frame - dst1 = cpi->alt_ref_buffer.y_buffer; - stride = cpi->alt_ref_buffer.y_stride; - byte = mb_y_offset; - for (i = 0, k = 0; i < 16; i++) { - for (j = 0; j < 16; j++, k++) { - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - - dst1[byte] = (uint8_t)pval; - - // move to next pixel - byte++; - } - byte += stride - 16; - } - - dst1 = cpi->alt_ref_buffer.u_buffer; - dst2 = cpi->alt_ref_buffer.v_buffer; - stride = cpi->alt_ref_buffer.uv_stride; - byte = mb_uv_offset; - for (i = 0, k = 256; i < mb_uv_height; i++) { - for (j = 0; j < mb_uv_width; j++, k++) { - int m = k + 256; - - // U - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - dst1[byte] = (uint8_t)pval; - - // V - pval = accumulator[m] + (count[m] >> 1); - pval *= fixed_divide[count[m]]; - pval >>= 19; - dst2[byte] = (uint8_t)pval; - - // move to next pixel - byte++; - } - byte += stride - mb_uv_width; - } - } -#else - // Normalize filter output to produce AltRef frame - dst1 = cpi->alt_ref_buffer.y_buffer; - stride = cpi->alt_ref_buffer.y_stride; - byte = mb_y_offset; - for (i = 0, k = 0; i < 16; i++) { - for (j = 0; j < 16; j++, k++) { - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - - dst1[byte] = (uint8_t)pval; - - // move to next pixel - byte++; - } - byte += stride - 16; - } - - dst1 = cpi->alt_ref_buffer.u_buffer; - dst2 = cpi->alt_ref_buffer.v_buffer; - stride = cpi->alt_ref_buffer.uv_stride; - byte = mb_uv_offset; - for (i = 0, k = 256; i < mb_uv_height; i++) { - for (j = 0; j < mb_uv_width; j++, k++) { - int m = k + 256; - - // U - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= fixed_divide[count[k]]; - pval >>= 19; - dst1[byte] = (uint8_t)pval; - - // V - pval = accumulator[m] + (count[m] >> 1); - pval *= fixed_divide[count[m]]; - pval >>= 19; - dst2[byte] = (uint8_t)pval; - - // move to next pixel - byte++; - } - byte += stride - mb_uv_width; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - mb_y_offset += 16; - mb_uv_offset += mb_uv_width; - } - mb_y_offset += 16 * (f->y_stride - mb_cols); - mb_uv_offset += mb_uv_height * f->uv_stride - mb_uv_width * mb_cols; - } - - // Restore input state - for (i = 0; i < MAX_MB_PLANE; i++) - mbd->plane[i].pre[0].buf = input_buffer[i]; -} - -// Apply buffer limits and context specific adjustments to arnr filter. -static void adjust_arnr_filter(VP10_COMP *cpi, - int distance, int group_boost, - int *arnr_frames, int *arnr_strength) { - const VP10EncoderConfig *const oxcf = &cpi->oxcf; - const int frames_after_arf = - vp10_lookahead_depth(cpi->lookahead) - distance - 1; - int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1; - int frames_bwd; - int q, frames, strength; - - // Define the forward and backwards filter limits for this arnr group. - if (frames_fwd > frames_after_arf) - frames_fwd = frames_after_arf; - if (frames_fwd > distance) - frames_fwd = distance; - - frames_bwd = frames_fwd; - - // For even length filter there is one more frame backward - // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff. - if (frames_bwd < distance) - frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1; - - // Set the baseline active filter size. - frames = frames_bwd + 1 + frames_fwd; - - // Adjust the strength based on active max q. - if (cpi->common.current_video_frame > 1) - q = ((int)vp10_convert_qindex_to_q( - cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth)); - else - q = ((int)vp10_convert_qindex_to_q( - cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth)); - if (q > 16) { - strength = oxcf->arnr_strength; - } else { - strength = oxcf->arnr_strength - ((16 - q) / 2); - if (strength < 0) - strength = 0; - } - - // Adjust number of frames in filter and strength based on gf boost level. - if (frames > group_boost / 150) { - frames = group_boost / 150; - frames += !(frames & 1); - } - - if (strength > group_boost / 300) { - strength = group_boost / 300; - } - - // Adjustments for second level arf in multi arf case. - if (cpi->oxcf.pass == 2 && cpi->multi_arf_allowed) { - const GF_GROUP *const gf_group = &cpi->twopass.gf_group; - if (gf_group->rf_level[gf_group->index] != GF_ARF_STD) { - strength >>= 1; - } - } - - *arnr_frames = frames; - *arnr_strength = strength; -} - -void vp10_temporal_filter(VP10_COMP *cpi, int distance) { - RATE_CONTROL *const rc = &cpi->rc; - int frame; - int frames_to_blur; - int start_frame; - int strength; - int frames_to_blur_backward; - int frames_to_blur_forward; - struct scale_factors sf; - YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL}; - - // Apply context specific adjustments to the arnr filter parameters. - adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength); - frames_to_blur_backward = (frames_to_blur / 2); - frames_to_blur_forward = ((frames_to_blur - 1) / 2); - start_frame = distance + frames_to_blur_forward; - - // Setup frame pointers, NULL indicates frame not included in filter. - for (frame = 0; frame < frames_to_blur; ++frame) { - const int which_buffer = start_frame - frame; - struct lookahead_entry *buf = vp10_lookahead_peek(cpi->lookahead, - which_buffer); - frames[frames_to_blur - 1 - frame] = &buf->img; - } - - if (frames_to_blur > 0) { - // Setup scaling factors. Scaling on each of the arnr frames is not - // supported. - // ARF is produced at the native frame size and resized when coded. -#if CONFIG_VP9_HIGHBITDEPTH - vp10_setup_scale_factors_for_frame(&sf, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - cpi->common.use_highbitdepth); -#else - vp10_setup_scale_factors_for_frame(&sf, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - frames[0]->y_crop_width, - frames[0]->y_crop_height); -#endif // CONFIG_VP9_HIGHBITDEPTH - } - - temporal_filter_iterate_c(cpi, frames, frames_to_blur, - frames_to_blur_backward, strength, &sf); -} diff --git a/libs/libvpx/vp10/encoder/temporal_filter.h b/libs/libvpx/vp10/encoder/temporal_filter.h deleted file mode 100644 index 6e331e6ad0..0000000000 --- a/libs/libvpx/vp10/encoder/temporal_filter.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_ -#define VP10_ENCODER_TEMPORAL_FILTER_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_temporal_filter_init(void); -void vp10_temporal_filter(VP10_COMP *cpi, int distance); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_TEMPORAL_FILTER_H_ diff --git a/libs/libvpx/vp10/encoder/tokenize.c b/libs/libvpx/vp10/encoder/tokenize.c deleted file mode 100644 index a665a3cfea..0000000000 --- a/libs/libvpx/vp10/encoder/tokenize.c +++ /dev/null @@ -1,643 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include -#include -#include - -#include "vpx_mem/vpx_mem.h" - -#include "vp10/common/entropy.h" -#include "vp10/common/pred_common.h" -#include "vp10/common/scan.h" -#include "vp10/common/seg_common.h" - -#include "vp10/encoder/cost.h" -#include "vp10/encoder/encoder.h" -#include "vp10/encoder/tokenize.h" - -static const TOKENVALUE dct_cat_lt_10_value_tokens[] = { - {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49}, - {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33}, - {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17}, - {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1}, - {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21}, - {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9}, - {8, 7}, {8, 5}, {8, 3}, {8, 1}, - {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1}, - {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1}, - {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0}, - {1, 0}, {2, 0}, {3, 0}, {4, 0}, - {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6}, - {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14}, - {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12}, - {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24}, - {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2}, - {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16}, - {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28}, - {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40}, - {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52}, - {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62} -}; -const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens + - (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) - / 2; - -// Array indices are identical to previously-existing CONTEXT_NODE indices -const vpx_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = { - -EOB_TOKEN, 2, // 0 = EOB - -ZERO_TOKEN, 4, // 1 = ZERO - -ONE_TOKEN, 6, // 2 = ONE - 8, 12, // 3 = LOW_VAL - -TWO_TOKEN, 10, // 4 = TWO - -THREE_TOKEN, -FOUR_TOKEN, // 5 = THREE - 14, 16, // 6 = HIGH_LOW - -CATEGORY1_TOKEN, -CATEGORY2_TOKEN, // 7 = CAT_ONE - 18, 20, // 8 = CAT_THREEFOUR - -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 9 = CAT_THREE - -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 10 = CAT_FIVE -}; - -static const vpx_tree_index cat1[2] = {0, 0}; -static const vpx_tree_index cat2[4] = {2, 2, 0, 0}; -static const vpx_tree_index cat3[6] = {2, 2, 4, 4, 0, 0}; -static const vpx_tree_index cat4[8] = {2, 2, 4, 4, 6, 6, 0, 0}; -static const vpx_tree_index cat5[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0}; -static const vpx_tree_index cat6[28] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, - 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 0, 0}; - -static const int16_t zero_cost[] = {0}; -static const int16_t sign_cost[] = {255, 257}; -static const int16_t cat1_cost[] = {429, 431, 616, 618}; -static const int16_t cat2_cost[] = {624, 626, 727, 729, 848, 850, 951, 953}; -static const int16_t cat3_cost[] = { - 820, 822, 893, 895, 940, 942, 1013, 1015, 1096, 1098, 1169, 1171, 1216, 1218, - 1289, 1291 -}; -static const int16_t cat4_cost[] = { - 1032, 1034, 1075, 1077, 1105, 1107, 1148, 1150, 1194, 1196, 1237, 1239, - 1267, 1269, 1310, 1312, 1328, 1330, 1371, 1373, 1401, 1403, 1444, 1446, - 1490, 1492, 1533, 1535, 1563, 1565, 1606, 1608 -}; -static const int16_t cat5_cost[] = { - 1269, 1271, 1283, 1285, 1306, 1308, 1320, - 1322, 1347, 1349, 1361, 1363, 1384, 1386, 1398, 1400, 1443, 1445, 1457, - 1459, 1480, 1482, 1494, 1496, 1521, 1523, 1535, 1537, 1558, 1560, 1572, - 1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643, 1645, 1670, 1672, 1684, - 1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782, 1803, 1805, 1817, - 1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897 -}; -const int16_t vp10_cat6_low_cost[256] = { - 1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662, - 1670, 1672, 1678, 1680, 1684, 1686, 1692, 1694, 1711, 1713, 1719, 1721, - 1725, 1727, 1733, 1735, 1743, 1745, 1751, 1753, 1757, 1759, 1765, 1767, - 1787, 1789, 1795, 1797, 1801, 1803, 1809, 1811, 1819, 1821, 1827, 1829, - 1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870, 1874, 1876, 1882, 1884, - 1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940, 1942, 1948, 1950, - 1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988, 1994, 1996, - 2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053, 2055, - 2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113, - 2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172, - 2176, 2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218, - 2082, 2084, 2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124, - 2128, 2130, 2136, 2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179, - 2187, 2189, 2195, 2197, 2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241, - 2245, 2247, 2253, 2255, 2263, 2265, 2271, 2273, 2277, 2279, 2285, 2287, - 2304, 2306, 2312, 2314, 2318, 2320, 2326, 2328, 2336, 2338, 2344, 2346, - 2350, 2352, 2358, 2360, 2384, 2386, 2392, 2394, 2398, 2400, 2406, 2408, - 2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440, 2457, 2459, 2465, 2467, - 2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503, 2505, 2511, 2513, - 2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567, 2573, 2575, - 2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628, 2630, - 2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662 -}; -const int16_t vp10_cat6_high_cost[128] = { - 72, 892, 1183, 2003, 1448, 2268, 2559, 3379, - 1709, 2529, 2820, 3640, 3085, 3905, 4196, 5016, 2118, 2938, 3229, 4049, - 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062, - 2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, - 5131, 5951, 6242, 7062, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, - 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 2118, 2938, 3229, 4049, - 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062, - 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732, - 7177, 7997, 8288, 9108, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, - 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 6210, 7030, 7321, 8141, - 7586, 8406, 8697, 9517, 7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154 -}; - -#if CONFIG_VP9_HIGHBITDEPTH -const int16_t vp10_cat6_high10_high_cost[512] = { - 74, 894, 1185, 2005, 1450, 2270, 2561, - 3381, 1711, 2531, 2822, 3642, 3087, 3907, 4198, 5018, 2120, 2940, 3231, - 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244, - 7064, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, - 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, - 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 2120, 2940, 3231, - 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244, - 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, - 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542, 6362, 6653, - 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, - 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, - 11156, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, - 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, - 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, - 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, - 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, - 9780, 9225, 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, - 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, - 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, - 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, - 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, - 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 2120, - 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, - 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, - 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542, - 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, - 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, - 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, - 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588, - 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 6212, - 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, - 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565, - 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166, 4986, 5277, - 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, - 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, - 9780, 9225, 10045, 10336, 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699, - 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, - 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, - 12382, 13202, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, - 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, - 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258, - 9078, 9369, 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, - 11271, 12091, 12382, 13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791, - 13611, 11941, 12761, 13052, 13872, 13317, 14137, 14428, 15248, -}; -const int16_t vp10_cat6_high12_high_cost[2048] = { - 76, 896, 1187, 2007, 1452, 2272, 2563, - 3383, 1713, 2533, 2824, 3644, 3089, 3909, 4200, 5020, 2122, 2942, 3233, - 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, - 7066, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, - 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, - 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 2122, 2942, 3233, - 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, - 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, - 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, - 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, - 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, - 11158, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, - 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, - 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, - 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, - 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, - 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, - 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, - 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, - 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, - 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, - 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 2122, - 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, - 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, - 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, - 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, - 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, - 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, - 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, - 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, - 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, - 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, - 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279, - 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, - 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, - 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, - 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, - 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, - 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, - 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, - 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, - 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, - 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, - 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 2122, 2942, - 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, - 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, - 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, - 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, - 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, - 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, - 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, - 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, - 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, - 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, - 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279, 6099, - 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, - 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, - 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, - 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, - 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, - 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, - 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, - 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, - 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, - 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, - 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168, 4988, - 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, - 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, - 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, - 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, - 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, - 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, - 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, - 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, - 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, - 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, - 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 6214, - 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, - 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, - 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, - 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, - 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, - 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191, - 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, - 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, - 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, - 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, - 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, - 15920, 15365, 16185, 16476, 17296, 2122, 2942, 3233, 4053, 3498, 4318, 4609, - 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, - 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, - 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, - 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, - 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, - 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, - 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, - 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, - 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, - 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, - 12384, 13204, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, - 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, - 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, - 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, - 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, - 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, - 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, - 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, - 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, - 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, - 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, - 13319, 14139, 14430, 15250, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, - 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, - 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, - 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, - 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, - 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, - 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, - 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, - 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, - 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, - 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, - 13054, 13874, 13319, 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, - 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, - 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, - 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747, - 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, - 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, - 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, - 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, - 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, - 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, - 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, - 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, - 17296, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, - 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, - 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, - 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, - 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, - 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, - 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, - 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, - 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747, - 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, - 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, - 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, - 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, - 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, - 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, - 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, - 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, - 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, - 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, - 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, - 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, - 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, - 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214, 7034, 7325, - 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, - 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, - 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, - 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, - 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, - 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, - 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, - 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, - 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, - 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, - 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, - 16185, 16476, 17296, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, - 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, - 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, - 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, - 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, - 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, - 17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, - 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, - 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, - 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, - 15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509, 16329, 15774, 16594, - 16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231, 18522, 19342 -}; -#endif - -#if CONFIG_VP9_HIGHBITDEPTH -static const vpx_tree_index cat1_high10[2] = {0, 0}; -static const vpx_tree_index cat2_high10[4] = {2, 2, 0, 0}; -static const vpx_tree_index cat3_high10[6] = {2, 2, 4, 4, 0, 0}; -static const vpx_tree_index cat4_high10[8] = {2, 2, 4, 4, 6, 6, 0, 0}; -static const vpx_tree_index cat5_high10[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0}; -static const vpx_tree_index cat6_high10[32] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, - 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28, - 30, 30, 0, 0}; -static const vpx_tree_index cat1_high12[2] = {0, 0}; -static const vpx_tree_index cat2_high12[4] = {2, 2, 0, 0}; -static const vpx_tree_index cat3_high12[6] = {2, 2, 4, 4, 0, 0}; -static const vpx_tree_index cat4_high12[8] = {2, 2, 4, 4, 6, 6, 0, 0}; -static const vpx_tree_index cat5_high12[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0}; -static const vpx_tree_index cat6_high12[36] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, - 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28, - 30, 30, 32, 32, 34, 34, 0, 0}; -#endif - -const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = { - {0, 0, 0, 0, zero_cost}, // ZERO_TOKEN - {0, 0, 0, 1, sign_cost}, // ONE_TOKEN - {0, 0, 0, 2, sign_cost}, // TWO_TOKEN - {0, 0, 0, 3, sign_cost}, // THREE_TOKEN - {0, 0, 0, 4, sign_cost}, // FOUR_TOKEN - {cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CATEGORY1_TOKEN - {cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CATEGORY2_TOKEN - {cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CATEGORY3_TOKEN - {cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CATEGORY4_TOKEN - {cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CATEGORY5_TOKEN - {cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0}, // CATEGORY6_TOKEN - {0, 0, 0, 0, zero_cost} // EOB_TOKEN -}; - -#if CONFIG_VP9_HIGHBITDEPTH -const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = { - {0, 0, 0, 0, zero_cost}, // ZERO - {0, 0, 0, 1, sign_cost}, // ONE - {0, 0, 0, 2, sign_cost}, // TWO - {0, 0, 0, 3, sign_cost}, // THREE - {0, 0, 0, 4, sign_cost}, // FOUR - {cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1 - {cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2 - {cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3 - {cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4 - {cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5 - {cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0}, // CAT6 - {0, 0, 0, 0, zero_cost} // EOB -}; -const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = { - {0, 0, 0, 0, zero_cost}, // ZERO - {0, 0, 0, 1, sign_cost}, // ONE - {0, 0, 0, 2, sign_cost}, // TWO - {0, 0, 0, 3, sign_cost}, // THREE - {0, 0, 0, 4, sign_cost}, // FOUR - {cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1 - {cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2 - {cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3 - {cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4 - {cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5 - {cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0}, // CAT6 - {0, 0, 0, 0, zero_cost} // EOB -}; -#endif - -const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = { - {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7}, - {125, 7}, {126, 7}, {127, 7}, {0, 1} -}; - - -struct tokenize_b_args { - VP10_COMP *cpi; - ThreadData *td; - TOKENEXTRA **tp; -}; - -static void set_entropy_context_b(int plane, int block, - int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct tokenize_b_args* const args = arg; - ThreadData *const td = args->td; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblock_plane *p = &x->plane[plane]; - struct macroblockd_plane *pd = &xd->plane[plane]; - vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, - blk_col, blk_row); -} - -static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree, - int32_t extra, uint8_t token, - uint8_t skip_eob_node, - unsigned int *counts) { - (*t)->token = token; - (*t)->extra = extra; - (*t)->context_tree = context_tree; - (*t)->skip_eob_node = skip_eob_node; - (*t)++; - ++counts[token]; -} - -static INLINE void add_token_no_extra(TOKENEXTRA **t, - const vpx_prob *context_tree, - uint8_t token, - uint8_t skip_eob_node, - unsigned int *counts) { - (*t)->token = token; - (*t)->context_tree = context_tree; - (*t)->skip_eob_node = skip_eob_node; - (*t)++; - ++counts[token]; -} - -static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id, - TX_SIZE tx_size) { - const int eob_max = 16 << (tx_size << 1); - return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; -} - -static void tokenize_b(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct tokenize_b_args* const args = arg; - VP10_COMP *cpi = args->cpi; - ThreadData *const td = args->td; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - TOKENEXTRA **tp = args->tp; - uint8_t token_cache[32 * 32]; - struct macroblock_plane *p = &x->plane[plane]; - struct macroblockd_plane *pd = &xd->plane[plane]; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - int pt; /* near block/prev token context index */ - int c; - TOKENEXTRA *t = *tp; /* store tokens starting here */ - int eob = p->eobs[block]; - const PLANE_TYPE type = pd->plane_type; - const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); - const int segment_id = mbmi->segment_id; - const int16_t *scan, *nb; - const TX_TYPE tx_type = get_tx_type(type, xd, block); - const scan_order *const so = get_scan(tx_size, tx_type); - const int ref = is_inter_block(mbmi); - unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = - td->rd_counts.coef_counts[tx_size][type][ref]; - vpx_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = - cpi->common.fc->coef_probs[tx_size][type][ref]; - unsigned int (*const eob_branch)[COEFF_CONTEXTS] = - td->counts->eob_branch[tx_size][type][ref]; - const uint8_t *const band = get_band_translate(tx_size); - const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); - int16_t token; - EXTRABIT extra; - pt = get_entropy_context(tx_size, pd->above_context + blk_col, - pd->left_context + blk_row); - scan = so->scan; - nb = so->neighbors; - c = 0; - - while (c < eob) { - int v = 0; - int skip_eob = 0; - v = qcoeff[scan[c]]; - - while (!v) { - add_token_no_extra(&t, coef_probs[band[c]][pt], ZERO_TOKEN, skip_eob, - counts[band[c]][pt]); - eob_branch[band[c]][pt] += !skip_eob; - - skip_eob = 1; - token_cache[scan[c]] = 0; - ++c; - pt = get_coef_context(nb, token_cache, c); - v = qcoeff[scan[c]]; - } - - vp10_get_token_extra(v, &token, &extra); - - add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token, - (uint8_t)skip_eob, counts[band[c]][pt]); - eob_branch[band[c]][pt] += !skip_eob; - - token_cache[scan[c]] = vp10_pt_energy_class[token]; - ++c; - pt = get_coef_context(nb, token_cache, c); - } - if (c < seg_eob) { - add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, 0, - counts[band[c]][pt]); - ++eob_branch[band[c]][pt]; - } - - *tp = t; - - vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row); -} - -struct is_skippable_args { - uint16_t *eobs; - int *skippable; -}; -static void is_skippable(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, - void *argv) { - struct is_skippable_args *args = argv; - (void)plane; - (void)plane_bsize; - (void)tx_size; - (void)blk_row; - (void)blk_col; - args->skippable[0] &= (!args->eobs[block]); -} - -// TODO(yaowu): rewrite and optimize this function to remove the usage of -// vp10_foreach_transform_block() and simplify is_skippable(). -int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { - int result = 1; - struct is_skippable_args args = {x->plane[plane].eobs, &result}; - vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable, - &args); - return result; -} - -static void has_high_freq_coeff(int plane, int block, int blk_row, int blk_col, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, - void *argv) { - struct is_skippable_args *args = argv; - int eobs = (tx_size == TX_4X4) ? 3 : 10; - (void) plane; - (void) plane_bsize; - (void) blk_row; - (void) blk_col; - - *(args->skippable) |= (args->eobs[block] > eobs); -} - -int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { - int result = 0; - struct is_skippable_args args = {x->plane[plane].eobs, &result}; - vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, - has_high_freq_coeff, &args); - return result; -} - -void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t, - int dry_run, BLOCK_SIZE bsize) { - VP10_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - const int ctx = vp10_get_skip_context(xd); - const int skip_inc = !segfeature_active(&cm->seg, mbmi->segment_id, - SEG_LVL_SKIP); - struct tokenize_b_args arg = {cpi, td, t}; - if (mbmi->skip) { - if (!dry_run) - td->counts->skip[ctx][1] += skip_inc; - reset_skip_context(xd, bsize); - return; - } - - if (!dry_run) { - int plane; - - td->counts->skip[ctx][0] += skip_inc; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b, - &arg); - (*t)->token = EOSB_TOKEN; - (*t)++; - } - } else { - vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg); - } -} diff --git a/libs/libvpx/vp10/encoder/tokenize.h b/libs/libvpx/vp10/encoder/tokenize.h deleted file mode 100644 index 5bad415a9a..0000000000 --- a/libs/libvpx/vp10/encoder/tokenize.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_TOKENIZE_H_ -#define VP10_ENCODER_TOKENIZE_H_ - -#include "vp10/common/entropy.h" - -#include "vp10/encoder/block.h" -#include "vp10/encoder/treewriter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define EOSB_TOKEN 127 // Not signalled, encoder only - -#if CONFIG_VP9_HIGHBITDEPTH - typedef int32_t EXTRABIT; -#else - typedef int16_t EXTRABIT; -#endif - - -typedef struct { - int16_t token; - EXTRABIT extra; -} TOKENVALUE; - -typedef struct { - const vpx_prob *context_tree; - EXTRABIT extra; - uint8_t token; - uint8_t skip_eob_node; -} TOKENEXTRA; - -extern const vpx_tree_index vp10_coef_tree[]; -extern const vpx_tree_index vp10_coef_con_tree[]; -extern const struct vp10_token vp10_coef_encodings[]; - -int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); -int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); - -struct VP10_COMP; -struct ThreadData; - -void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td, - TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize); - -extern const int16_t *vp10_dct_value_cost_ptr; -/* TODO: The Token field should be broken out into a separate char array to - * improve cache locality, since it's needed for costing when the rest of the - * fields are not. - */ -extern const TOKENVALUE *vp10_dct_value_tokens_ptr; -extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens; -extern const int16_t vp10_cat6_low_cost[256]; -extern const int16_t vp10_cat6_high_cost[128]; -extern const int16_t vp10_cat6_high10_high_cost[512]; -extern const int16_t vp10_cat6_high12_high_cost[2048]; -static INLINE int16_t vp10_get_cost(int16_t token, EXTRABIT extrabits, - const int16_t *cat6_high_table) { - if (token != CATEGORY6_TOKEN) - return vp10_extra_bits[token].cost[extrabits]; - return vp10_cat6_low_cost[extrabits & 0xff] - + cat6_high_table[extrabits >> 8]; -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) { - return bit_depth == 8 ? vp10_cat6_high_cost - : (bit_depth == 10 ? vp10_cat6_high10_high_cost : - vp10_cat6_high12_high_cost); -} -#else -static INLINE const int16_t* vp10_get_high_cost_table(int bit_depth) { - (void) bit_depth; - return vp10_cat6_high_cost; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static INLINE void vp10_get_token_extra(int v, int16_t *token, EXTRABIT *extra) { - if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) { - *token = CATEGORY6_TOKEN; - if (v >= CAT6_MIN_VAL) - *extra = 2 * v - 2 * CAT6_MIN_VAL; - else - *extra = -2 * v - 2 * CAT6_MIN_VAL + 1; - return; - } - *token = vp10_dct_cat_lt_10_value_tokens[v].token; - *extra = vp10_dct_cat_lt_10_value_tokens[v].extra; -} -static INLINE int16_t vp10_get_token(int v) { - if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) - return 10; - return vp10_dct_cat_lt_10_value_tokens[v].token; -} - - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_TOKENIZE_H_ diff --git a/libs/libvpx/vp10/encoder/treewriter.c b/libs/libvpx/vp10/encoder/treewriter.c deleted file mode 100644 index 1f42f32a11..0000000000 --- a/libs/libvpx/vp10/encoder/treewriter.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vp10/encoder/treewriter.h" - -static void tree2tok(struct vp10_token *tokens, const vpx_tree_index *tree, - int i, int v, int l) { - v += v; - ++l; - - do { - const vpx_tree_index j = tree[i++]; - if (j <= 0) { - tokens[-j].value = v; - tokens[-j].len = l; - } else { - tree2tok(tokens, tree, j, v, l); - } - } while (++v & 1); -} - -void vp10_tokens_from_tree(struct vp10_token *tokens, - const vpx_tree_index *tree) { - tree2tok(tokens, tree, 0, 0, 0); -} - -static unsigned int convert_distribution(unsigned int i, vpx_tree tree, - unsigned int branch_ct[][2], - const unsigned int num_events[]) { - unsigned int left, right; - - if (tree[i] <= 0) - left = num_events[-tree[i]]; - else - left = convert_distribution(tree[i], tree, branch_ct, num_events); - - if (tree[i + 1] <= 0) - right = num_events[-tree[i + 1]]; - else - right = convert_distribution(tree[i + 1], tree, branch_ct, num_events); - - branch_ct[i >> 1][0] = left; - branch_ct[i >> 1][1] = right; - return left + right; -} - -void vp10_tree_probs_from_distribution(vpx_tree tree, - unsigned int branch_ct[/* n-1 */][2], - const unsigned int num_events[/* n */]) { - convert_distribution(0, tree, branch_ct, num_events); -} diff --git a/libs/libvpx/vp10/encoder/treewriter.h b/libs/libvpx/vp10/encoder/treewriter.h deleted file mode 100644 index 6b76a03e47..0000000000 --- a/libs/libvpx/vp10/encoder/treewriter.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP10_ENCODER_TREEWRITER_H_ -#define VP10_ENCODER_TREEWRITER_H_ - -#include "vpx_dsp/bitwriter.h" - -#ifdef __cplusplus -extern "C" { -#endif - -void vp10_tree_probs_from_distribution(vpx_tree tree, - unsigned int branch_ct[ /* n - 1 */ ][2], - const unsigned int num_events[ /* n */ ]); - -struct vp10_token { - int value; - int len; -}; - -void vp10_tokens_from_tree(struct vp10_token*, const vpx_tree_index *); - -static INLINE void vp10_write_tree(vpx_writer *w, const vpx_tree_index *tree, - const vpx_prob *probs, int bits, int len, - vpx_tree_index i) { - do { - const int bit = (bits >> --len) & 1; - vpx_write(w, bit, probs[i >> 1]); - i = tree[i + bit]; - } while (len); -} - -static INLINE void vp10_write_token(vpx_writer *w, const vpx_tree_index *tree, - const vpx_prob *probs, - const struct vp10_token *token) { - vp10_write_tree(w, tree, probs, token->value, token->len, 0); -} - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP10_ENCODER_TREEWRITER_H_ diff --git a/libs/libvpx/vp10/encoder/x86/dct_mmx.asm b/libs/libvpx/vp10/encoder/x86/dct_mmx.asm deleted file mode 100644 index 2327fe9e6c..0000000000 --- a/libs/libvpx/vp10/encoder/x86/dct_mmx.asm +++ /dev/null @@ -1,104 +0,0 @@ -; -; Copyright (c) 2014 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%define private_prefix vp10 - -%include "third_party/x86inc/x86inc.asm" - -SECTION .text - -%macro TRANSFORM_COLS 0 - paddw m0, m1 - movq m4, m0 - psubw m3, m2 - psubw m4, m3 - psraw m4, 1 - movq m5, m4 - psubw m5, m1 ;b1 - psubw m4, m2 ;c1 - psubw m0, m4 - paddw m3, m5 - ; m0 a0 - SWAP 1, 4 ; m1 c1 - SWAP 2, 3 ; m2 d1 - SWAP 3, 5 ; m3 b1 -%endmacro - -%macro TRANSPOSE_4X4 0 - movq m4, m0 - movq m5, m2 - punpcklwd m4, m1 - punpckhwd m0, m1 - punpcklwd m5, m3 - punpckhwd m2, m3 - movq m1, m4 - movq m3, m0 - punpckldq m1, m5 - punpckhdq m4, m5 - punpckldq m3, m2 - punpckhdq m0, m2 - SWAP 2, 3, 0, 1, 4 -%endmacro - -INIT_MMX mmx -cglobal fwht4x4, 3, 4, 8, input, output, stride - lea r3q, [inputq + strideq*4] - movq m0, [inputq] ;a1 - movq m1, [inputq + strideq*2] ;b1 - movq m2, [r3q] ;c1 - movq m3, [r3q + strideq*2] ;d1 - - TRANSFORM_COLS - TRANSPOSE_4X4 - TRANSFORM_COLS - TRANSPOSE_4X4 - - psllw m0, 2 - psllw m1, 2 - psllw m2, 2 - psllw m3, 2 - -%if CONFIG_VP9_HIGHBITDEPTH - pxor m4, m4 - pxor m5, m5 - pcmpgtw m4, m0 - pcmpgtw m5, m1 - movq m6, m0 - movq m7, m1 - punpcklwd m0, m4 - punpcklwd m1, m5 - punpckhwd m6, m4 - punpckhwd m7, m5 - movq [outputq], m0 - movq [outputq + 8], m6 - movq [outputq + 16], m1 - movq [outputq + 24], m7 - pxor m4, m4 - pxor m5, m5 - pcmpgtw m4, m2 - pcmpgtw m5, m3 - movq m6, m2 - movq m7, m3 - punpcklwd m2, m4 - punpcklwd m3, m5 - punpckhwd m6, m4 - punpckhwd m7, m5 - movq [outputq + 32], m2 - movq [outputq + 40], m6 - movq [outputq + 48], m3 - movq [outputq + 56], m7 -%else - movq [outputq], m0 - movq [outputq + 8], m1 - movq [outputq + 16], m2 - movq [outputq + 24], m3 -%endif - - RET diff --git a/libs/libvpx/vp10/encoder/x86/dct_sse2.c b/libs/libvpx/vp10/encoder/x86/dct_sse2.c deleted file mode 100644 index e1111570a2..0000000000 --- a/libs/libvpx/vp10/encoder/x86/dct_sse2.c +++ /dev/null @@ -1,2058 +0,0 @@ -/* - * Copyright (c) 2012 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include // SSE2 - -#include "./vp10_rtcd.h" -#include "./vpx_dsp_rtcd.h" -#include "vpx_dsp/txfm_common.h" -#include "vpx_dsp/x86/fwd_txfm_sse2.h" -#include "vpx_dsp/x86/txfm_common_sse2.h" -#include "vpx_ports/mem.h" - -static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in, - int stride) { - const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); - const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); - __m128i mask; - - in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); - in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride)); - in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride)); - - in[0] = _mm_slli_epi16(in[0], 4); - in[1] = _mm_slli_epi16(in[1], 4); - in[2] = _mm_slli_epi16(in[2], 4); - in[3] = _mm_slli_epi16(in[3], 4); - - mask = _mm_cmpeq_epi16(in[0], k__nonzero_bias_a); - in[0] = _mm_add_epi16(in[0], mask); - in[0] = _mm_add_epi16(in[0], k__nonzero_bias_b); -} - -static INLINE void write_buffer_4x4(tran_low_t *output, __m128i *res) { - const __m128i kOne = _mm_set1_epi16(1); - __m128i in01 = _mm_unpacklo_epi64(res[0], res[1]); - __m128i in23 = _mm_unpacklo_epi64(res[2], res[3]); - __m128i out01 = _mm_add_epi16(in01, kOne); - __m128i out23 = _mm_add_epi16(in23, kOne); - out01 = _mm_srai_epi16(out01, 2); - out23 = _mm_srai_epi16(out23, 2); - store_output(&out01, (output + 0 * 8)); - store_output(&out23, (output + 1 * 8)); -} - -static INLINE void transpose_4x4(__m128i *res) { - // Combine and transpose - // 00 01 02 03 20 21 22 23 - // 10 11 12 13 30 31 32 33 - const __m128i tr0_0 = _mm_unpacklo_epi16(res[0], res[1]); - const __m128i tr0_1 = _mm_unpackhi_epi16(res[0], res[1]); - - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - res[0] = _mm_unpacklo_epi32(tr0_0, tr0_1); - res[2] = _mm_unpackhi_epi32(tr0_0, tr0_1); - - // 00 10 20 30 01 11 21 31 - // 02 12 22 32 03 13 23 33 - // only use the first 4 16-bit integers - res[1] = _mm_unpackhi_epi64(res[0], res[0]); - res[3] = _mm_unpackhi_epi64(res[2], res[2]); -} - -static void fdct4_sse2(__m128i *in) { - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - - __m128i u[4], v[4]; - u[0]=_mm_unpacklo_epi16(in[0], in[1]); - u[1]=_mm_unpacklo_epi16(in[3], in[2]); - - v[0] = _mm_add_epi16(u[0], u[1]); - v[1] = _mm_sub_epi16(u[0], u[1]); - - u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); // 0 - u[1] = _mm_madd_epi16(v[0], k__cospi_p16_m16); // 2 - u[2] = _mm_madd_epi16(v[1], k__cospi_p08_p24); // 1 - u[3] = _mm_madd_epi16(v[1], k__cospi_p24_m08); // 3 - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - - in[0] = _mm_packs_epi32(u[0], u[1]); - in[1] = _mm_packs_epi32(u[2], u[3]); - transpose_4x4(in); -} - -static void fadst4_sse2(__m128i *in) { - const __m128i k__sinpi_p01_p02 = pair_set_epi16(sinpi_1_9, sinpi_2_9); - const __m128i k__sinpi_p04_m01 = pair_set_epi16(sinpi_4_9, -sinpi_1_9); - const __m128i k__sinpi_p03_p04 = pair_set_epi16(sinpi_3_9, sinpi_4_9); - const __m128i k__sinpi_m03_p02 = pair_set_epi16(-sinpi_3_9, sinpi_2_9); - const __m128i k__sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi_3_9); - const __m128i kZero = _mm_set1_epi16(0); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i u[8], v[8]; - __m128i in7 = _mm_add_epi16(in[0], in[1]); - - u[0] = _mm_unpacklo_epi16(in[0], in[1]); - u[1] = _mm_unpacklo_epi16(in[2], in[3]); - u[2] = _mm_unpacklo_epi16(in7, kZero); - u[3] = _mm_unpacklo_epi16(in[2], kZero); - u[4] = _mm_unpacklo_epi16(in[3], kZero); - - v[0] = _mm_madd_epi16(u[0], k__sinpi_p01_p02); // s0 + s2 - v[1] = _mm_madd_epi16(u[1], k__sinpi_p03_p04); // s4 + s5 - v[2] = _mm_madd_epi16(u[2], k__sinpi_p03_p03); // x1 - v[3] = _mm_madd_epi16(u[0], k__sinpi_p04_m01); // s1 - s3 - v[4] = _mm_madd_epi16(u[1], k__sinpi_m03_p02); // -s4 + s6 - v[5] = _mm_madd_epi16(u[3], k__sinpi_p03_p03); // s4 - v[6] = _mm_madd_epi16(u[4], k__sinpi_p03_p03); - - u[0] = _mm_add_epi32(v[0], v[1]); - u[1] = _mm_sub_epi32(v[2], v[6]); - u[2] = _mm_add_epi32(v[3], v[4]); - u[3] = _mm_sub_epi32(u[2], u[0]); - u[4] = _mm_slli_epi32(v[5], 2); - u[5] = _mm_sub_epi32(u[4], v[5]); - u[6] = _mm_add_epi32(u[3], u[5]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - - in[0] = _mm_packs_epi32(u[0], u[2]); - in[1] = _mm_packs_epi32(u[1], u[3]); - transpose_4x4(in); -} - -void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - __m128i in[4]; - - switch (tx_type) { - case DCT_DCT: - vpx_fdct4x4_sse2(input, output, stride); - break; - case ADST_DCT: - load_buffer_4x4(input, in, stride); - fadst4_sse2(in); - fdct4_sse2(in); - write_buffer_4x4(output, in); - break; - case DCT_ADST: - load_buffer_4x4(input, in, stride); - fdct4_sse2(in); - fadst4_sse2(in); - write_buffer_4x4(output, in); - break; - case ADST_ADST: - load_buffer_4x4(input, in, stride); - fadst4_sse2(in); - fadst4_sse2(in); - write_buffer_4x4(output, in); - break; - default: - assert(0); - break; - } -} - -void vp10_fdct8x8_quant_sse2(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { - __m128i zero; - int pass; - // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); - __m128i *in[8]; - int index = 0; - - (void)scan_ptr; - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)coeff_ptr; - - // Pre-condition input (shift by two) - in0 = _mm_slli_epi16(in0, 2); - in1 = _mm_slli_epi16(in1, 2); - in2 = _mm_slli_epi16(in2, 2); - in3 = _mm_slli_epi16(in3, 2); - in4 = _mm_slli_epi16(in4, 2); - in5 = _mm_slli_epi16(in5, 2); - in6 = _mm_slli_epi16(in6, 2); - in7 = _mm_slli_epi16(in7, 2); - - in[0] = &in0; - in[1] = &in1; - in[2] = &in2; - in[3] = &in3; - in[4] = &in4; - in[5] = &in5; - in[6] = &in6; - in[7] = &in7; - - // We do two passes, first the columns, then the rows. The results of the - // first pass are transposed so that the same column code can be reused. The - // results of the second pass are also transposed so that the rows (processed - // as columns) are put back in row positions. - for (pass = 0; pass < 2; pass++) { - // To store results of each pass before the transpose. - __m128i res0, res1, res2, res3, res4, res5, res6, res7; - // Add/subtract - const __m128i q0 = _mm_add_epi16(in0, in7); - const __m128i q1 = _mm_add_epi16(in1, in6); - const __m128i q2 = _mm_add_epi16(in2, in5); - const __m128i q3 = _mm_add_epi16(in3, in4); - const __m128i q4 = _mm_sub_epi16(in3, in4); - const __m128i q5 = _mm_sub_epi16(in2, in5); - const __m128i q6 = _mm_sub_epi16(in1, in6); - const __m128i q7 = _mm_sub_epi16(in0, in7); - // Work on first four results - { - // Add/subtract - const __m128i r0 = _mm_add_epi16(q0, q3); - const __m128i r1 = _mm_add_epi16(q1, q2); - const __m128i r2 = _mm_sub_epi16(q1, q2); - const __m128i r3 = _mm_sub_epi16(q0, q3); - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i t0 = _mm_unpacklo_epi16(r0, r1); - const __m128i t1 = _mm_unpackhi_epi16(r0, r1); - const __m128i t2 = _mm_unpacklo_epi16(r2, r3); - const __m128i t3 = _mm_unpackhi_epi16(r2, r3); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); - // dct_const_round_shift - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - res0 = _mm_packs_epi32(w0, w1); - res4 = _mm_packs_epi32(w2, w3); - res2 = _mm_packs_epi32(w4, w5); - res6 = _mm_packs_epi32(w6, w7); - } - // Work on next four results - { - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i d0 = _mm_unpacklo_epi16(q6, q5); - const __m128i d1 = _mm_unpackhi_epi16(q6, q5); - const __m128i e0 = _mm_madd_epi16(d0, k__cospi_p16_m16); - const __m128i e1 = _mm_madd_epi16(d1, k__cospi_p16_m16); - const __m128i e2 = _mm_madd_epi16(d0, k__cospi_p16_p16); - const __m128i e3 = _mm_madd_epi16(d1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i f0 = _mm_add_epi32(e0, k__DCT_CONST_ROUNDING); - const __m128i f1 = _mm_add_epi32(e1, k__DCT_CONST_ROUNDING); - const __m128i f2 = _mm_add_epi32(e2, k__DCT_CONST_ROUNDING); - const __m128i f3 = _mm_add_epi32(e3, k__DCT_CONST_ROUNDING); - const __m128i s0 = _mm_srai_epi32(f0, DCT_CONST_BITS); - const __m128i s1 = _mm_srai_epi32(f1, DCT_CONST_BITS); - const __m128i s2 = _mm_srai_epi32(f2, DCT_CONST_BITS); - const __m128i s3 = _mm_srai_epi32(f3, DCT_CONST_BITS); - // Combine - const __m128i r0 = _mm_packs_epi32(s0, s1); - const __m128i r1 = _mm_packs_epi32(s2, s3); - // Add/subtract - const __m128i x0 = _mm_add_epi16(q4, r0); - const __m128i x1 = _mm_sub_epi16(q4, r0); - const __m128i x2 = _mm_sub_epi16(q7, r1); - const __m128i x3 = _mm_add_epi16(q7, r1); - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i t0 = _mm_unpacklo_epi16(x0, x3); - const __m128i t1 = _mm_unpackhi_epi16(x0, x3); - const __m128i t2 = _mm_unpacklo_epi16(x1, x2); - const __m128i t3 = _mm_unpackhi_epi16(x1, x2); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); - // dct_const_round_shift - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - res1 = _mm_packs_epi32(w0, w1); - res7 = _mm_packs_epi32(w2, w3); - res5 = _mm_packs_epi32(w4, w5); - res3 = _mm_packs_epi32(w6, w7); - } - // Transpose the 8x8. - { - // 00 01 02 03 04 05 06 07 - // 10 11 12 13 14 15 16 17 - // 20 21 22 23 24 25 26 27 - // 30 31 32 33 34 35 36 37 - // 40 41 42 43 44 45 46 47 - // 50 51 52 53 54 55 56 57 - // 60 61 62 63 64 65 66 67 - // 70 71 72 73 74 75 76 77 - const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); - const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3); - const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1); - const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3); - const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5); - const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); - const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5); - const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - // 04 14 05 15 06 16 07 17 - // 24 34 25 35 26 36 27 37 - // 40 50 41 51 42 52 43 53 - // 60 70 61 71 62 72 63 73 - // 54 54 55 55 56 56 57 57 - // 64 74 65 75 66 76 67 77 - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - // 00 10 20 30 01 11 21 31 - // 40 50 60 70 41 51 61 71 - // 02 12 22 32 03 13 23 33 - // 42 52 62 72 43 53 63 73 - // 04 14 24 34 05 15 21 36 - // 44 54 64 74 45 55 61 76 - // 06 16 26 36 07 17 27 37 - // 46 56 66 76 47 57 67 77 - in0 = _mm_unpacklo_epi64(tr1_0, tr1_4); - in1 = _mm_unpackhi_epi64(tr1_0, tr1_4); - in2 = _mm_unpacklo_epi64(tr1_2, tr1_6); - in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); - in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); - in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); - in6 = _mm_unpacklo_epi64(tr1_3, tr1_7); - in7 = _mm_unpackhi_epi64(tr1_3, tr1_7); - // 00 10 20 30 40 50 60 70 - // 01 11 21 31 41 51 61 71 - // 02 12 22 32 42 52 62 72 - // 03 13 23 33 43 53 63 73 - // 04 14 24 34 44 54 64 74 - // 05 15 25 35 45 55 65 75 - // 06 16 26 36 46 56 66 76 - // 07 17 27 37 47 57 67 77 - } - } - // Post-condition output and store it - { - // Post-condition (division by two) - // division of two 16 bits signed numbers using shifts - // n / 2 = (n - (n >> 15)) >> 1 - const __m128i sign_in0 = _mm_srai_epi16(in0, 15); - const __m128i sign_in1 = _mm_srai_epi16(in1, 15); - const __m128i sign_in2 = _mm_srai_epi16(in2, 15); - const __m128i sign_in3 = _mm_srai_epi16(in3, 15); - const __m128i sign_in4 = _mm_srai_epi16(in4, 15); - const __m128i sign_in5 = _mm_srai_epi16(in5, 15); - const __m128i sign_in6 = _mm_srai_epi16(in6, 15); - const __m128i sign_in7 = _mm_srai_epi16(in7, 15); - in0 = _mm_sub_epi16(in0, sign_in0); - in1 = _mm_sub_epi16(in1, sign_in1); - in2 = _mm_sub_epi16(in2, sign_in2); - in3 = _mm_sub_epi16(in3, sign_in3); - in4 = _mm_sub_epi16(in4, sign_in4); - in5 = _mm_sub_epi16(in5, sign_in5); - in6 = _mm_sub_epi16(in6, sign_in6); - in7 = _mm_sub_epi16(in7, sign_in7); - in0 = _mm_srai_epi16(in0, 1); - in1 = _mm_srai_epi16(in1, 1); - in2 = _mm_srai_epi16(in2, 1); - in3 = _mm_srai_epi16(in3, 1); - in4 = _mm_srai_epi16(in4, 1); - in5 = _mm_srai_epi16(in5, 1); - in6 = _mm_srai_epi16(in6, 1); - in7 = _mm_srai_epi16(in7, 1); - } - - iscan_ptr += n_coeffs; - qcoeff_ptr += n_coeffs; - dqcoeff_ptr += n_coeffs; - n_coeffs = -n_coeffs; - zero = _mm_setzero_si128(); - - if (!skip_block) { - __m128i eob; - __m128i round, quant, dequant; - { - __m128i coeff0, coeff1; - - // Setup global values - { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); - } - - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - // Do DC and first 15 AC - coeff0 = *in[0]; - coeff1 = *in[1]; - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - round = _mm_unpackhi_epi64(round, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - quant = _mm_unpackhi_epi64(quant, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - dequant = _mm_unpackhi_epi64(dequant, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } - - { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob = _mm_max_epi16(eob, eob1); - } - n_coeffs += 8 * 2; - } - - // AC only loop - index = 2; - while (n_coeffs < 0) { - __m128i coeff0, coeff1; - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - - assert(index < (int)(sizeof(in) / sizeof(in[0])) - 1); - coeff0 = *in[index]; - coeff1 = *in[index + 1]; - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } - - { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob0, eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob0 = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob0 = _mm_max_epi16(eob0, eob1); - eob = _mm_max_epi16(eob, eob0); - } - n_coeffs += 8 * 2; - index += 2; - } - - // Accumulate EOB - { - __m128i eob_shuffled; - eob_shuffled = _mm_shuffle_epi32(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0x1); - eob = _mm_max_epi16(eob, eob_shuffled); - *eob_ptr = _mm_extract_epi16(eob, 1); - } - } else { - do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); - n_coeffs += 8 * 2; - } while (n_coeffs < 0); - *eob_ptr = 0; - } -} - -// load 8x8 array -static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in, - int stride) { - in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); - in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); - in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride)); - in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride)); - in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride)); - - in[0] = _mm_slli_epi16(in[0], 2); - in[1] = _mm_slli_epi16(in[1], 2); - in[2] = _mm_slli_epi16(in[2], 2); - in[3] = _mm_slli_epi16(in[3], 2); - in[4] = _mm_slli_epi16(in[4], 2); - in[5] = _mm_slli_epi16(in[5], 2); - in[6] = _mm_slli_epi16(in[6], 2); - in[7] = _mm_slli_epi16(in[7], 2); -} - -// right shift and rounding -static INLINE void right_shift_8x8(__m128i *res, const int bit) { - __m128i sign0 = _mm_srai_epi16(res[0], 15); - __m128i sign1 = _mm_srai_epi16(res[1], 15); - __m128i sign2 = _mm_srai_epi16(res[2], 15); - __m128i sign3 = _mm_srai_epi16(res[3], 15); - __m128i sign4 = _mm_srai_epi16(res[4], 15); - __m128i sign5 = _mm_srai_epi16(res[5], 15); - __m128i sign6 = _mm_srai_epi16(res[6], 15); - __m128i sign7 = _mm_srai_epi16(res[7], 15); - - if (bit == 2) { - const __m128i const_rounding = _mm_set1_epi16(1); - res[0] = _mm_add_epi16(res[0], const_rounding); - res[1] = _mm_add_epi16(res[1], const_rounding); - res[2] = _mm_add_epi16(res[2], const_rounding); - res[3] = _mm_add_epi16(res[3], const_rounding); - res[4] = _mm_add_epi16(res[4], const_rounding); - res[5] = _mm_add_epi16(res[5], const_rounding); - res[6] = _mm_add_epi16(res[6], const_rounding); - res[7] = _mm_add_epi16(res[7], const_rounding); - } - - res[0] = _mm_sub_epi16(res[0], sign0); - res[1] = _mm_sub_epi16(res[1], sign1); - res[2] = _mm_sub_epi16(res[2], sign2); - res[3] = _mm_sub_epi16(res[3], sign3); - res[4] = _mm_sub_epi16(res[4], sign4); - res[5] = _mm_sub_epi16(res[5], sign5); - res[6] = _mm_sub_epi16(res[6], sign6); - res[7] = _mm_sub_epi16(res[7], sign7); - - if (bit == 1) { - res[0] = _mm_srai_epi16(res[0], 1); - res[1] = _mm_srai_epi16(res[1], 1); - res[2] = _mm_srai_epi16(res[2], 1); - res[3] = _mm_srai_epi16(res[3], 1); - res[4] = _mm_srai_epi16(res[4], 1); - res[5] = _mm_srai_epi16(res[5], 1); - res[6] = _mm_srai_epi16(res[6], 1); - res[7] = _mm_srai_epi16(res[7], 1); - } else { - res[0] = _mm_srai_epi16(res[0], 2); - res[1] = _mm_srai_epi16(res[1], 2); - res[2] = _mm_srai_epi16(res[2], 2); - res[3] = _mm_srai_epi16(res[3], 2); - res[4] = _mm_srai_epi16(res[4], 2); - res[5] = _mm_srai_epi16(res[5], 2); - res[6] = _mm_srai_epi16(res[6], 2); - res[7] = _mm_srai_epi16(res[7], 2); - } -} - -// write 8x8 array -static INLINE void write_buffer_8x8(tran_low_t *output, __m128i *res, - int stride) { - store_output(&res[0], (output + 0 * stride)); - store_output(&res[1], (output + 1 * stride)); - store_output(&res[2], (output + 2 * stride)); - store_output(&res[3], (output + 3 * stride)); - store_output(&res[4], (output + 4 * stride)); - store_output(&res[5], (output + 5 * stride)); - store_output(&res[6], (output + 6 * stride)); - store_output(&res[7], (output + 7 * stride)); -} - -// perform in-place transpose -static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) { - const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); - const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); - const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]); - const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]); - const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); - const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]); - const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]); - const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - // 04 14 05 15 06 16 07 17 - // 24 34 25 35 26 36 27 37 - // 40 50 41 51 42 52 43 53 - // 60 70 61 71 62 72 63 73 - // 44 54 45 55 46 56 47 57 - // 64 74 65 75 66 76 67 77 - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - // 00 10 20 30 01 11 21 31 - // 40 50 60 70 41 51 61 71 - // 02 12 22 32 03 13 23 33 - // 42 52 62 72 43 53 63 73 - // 04 14 24 34 05 15 25 35 - // 44 54 64 74 45 55 65 75 - // 06 16 26 36 07 17 27 37 - // 46 56 66 76 47 57 67 77 - res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1); - res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1); - res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3); - res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3); - res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5); - res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5); - res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7); - res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); - // 00 10 20 30 40 50 60 70 - // 01 11 21 31 41 51 61 71 - // 02 12 22 32 42 52 62 72 - // 03 13 23 33 43 53 63 73 - // 04 14 24 34 44 54 64 74 - // 05 15 25 35 45 55 65 75 - // 06 16 26 36 46 56 66 76 - // 07 17 27 37 47 57 67 77 -} - -static void fdct8_sse2(__m128i *in) { - // constants - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - __m128i u0, u1, u2, u3, u4, u5, u6, u7; - __m128i v0, v1, v2, v3, v4, v5, v6, v7; - __m128i s0, s1, s2, s3, s4, s5, s6, s7; - - // stage 1 - s0 = _mm_add_epi16(in[0], in[7]); - s1 = _mm_add_epi16(in[1], in[6]); - s2 = _mm_add_epi16(in[2], in[5]); - s3 = _mm_add_epi16(in[3], in[4]); - s4 = _mm_sub_epi16(in[3], in[4]); - s5 = _mm_sub_epi16(in[2], in[5]); - s6 = _mm_sub_epi16(in[1], in[6]); - s7 = _mm_sub_epi16(in[0], in[7]); - - u0 = _mm_add_epi16(s0, s3); - u1 = _mm_add_epi16(s1, s2); - u2 = _mm_sub_epi16(s1, s2); - u3 = _mm_sub_epi16(s0, s3); - // interleave and perform butterfly multiplication/addition - v0 = _mm_unpacklo_epi16(u0, u1); - v1 = _mm_unpackhi_epi16(u0, u1); - v2 = _mm_unpacklo_epi16(u2, u3); - v3 = _mm_unpackhi_epi16(u2, u3); - - u0 = _mm_madd_epi16(v0, k__cospi_p16_p16); - u1 = _mm_madd_epi16(v1, k__cospi_p16_p16); - u2 = _mm_madd_epi16(v0, k__cospi_p16_m16); - u3 = _mm_madd_epi16(v1, k__cospi_p16_m16); - u4 = _mm_madd_epi16(v2, k__cospi_p24_p08); - u5 = _mm_madd_epi16(v3, k__cospi_p24_p08); - u6 = _mm_madd_epi16(v2, k__cospi_m08_p24); - u7 = _mm_madd_epi16(v3, k__cospi_m08_p24); - - // shift and rounding - v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - - u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - - in[0] = _mm_packs_epi32(u0, u1); - in[2] = _mm_packs_epi32(u4, u5); - in[4] = _mm_packs_epi32(u2, u3); - in[6] = _mm_packs_epi32(u6, u7); - - // stage 2 - // interleave and perform butterfly multiplication/addition - u0 = _mm_unpacklo_epi16(s6, s5); - u1 = _mm_unpackhi_epi16(s6, s5); - v0 = _mm_madd_epi16(u0, k__cospi_p16_m16); - v1 = _mm_madd_epi16(u1, k__cospi_p16_m16); - v2 = _mm_madd_epi16(u0, k__cospi_p16_p16); - v3 = _mm_madd_epi16(u1, k__cospi_p16_p16); - - // shift and rounding - u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); - u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); - u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); - u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); - - v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); - v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); - v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); - v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); - - u0 = _mm_packs_epi32(v0, v1); - u1 = _mm_packs_epi32(v2, v3); - - // stage 3 - s0 = _mm_add_epi16(s4, u0); - s1 = _mm_sub_epi16(s4, u0); - s2 = _mm_sub_epi16(s7, u1); - s3 = _mm_add_epi16(s7, u1); - - // stage 4 - u0 = _mm_unpacklo_epi16(s0, s3); - u1 = _mm_unpackhi_epi16(s0, s3); - u2 = _mm_unpacklo_epi16(s1, s2); - u3 = _mm_unpackhi_epi16(s1, s2); - - v0 = _mm_madd_epi16(u0, k__cospi_p28_p04); - v1 = _mm_madd_epi16(u1, k__cospi_p28_p04); - v2 = _mm_madd_epi16(u2, k__cospi_p12_p20); - v3 = _mm_madd_epi16(u3, k__cospi_p12_p20); - v4 = _mm_madd_epi16(u2, k__cospi_m20_p12); - v5 = _mm_madd_epi16(u3, k__cospi_m20_p12); - v6 = _mm_madd_epi16(u0, k__cospi_m04_p28); - v7 = _mm_madd_epi16(u1, k__cospi_m04_p28); - - // shift and rounding - u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); - u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); - u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); - u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); - u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); - u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); - u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); - u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); - - v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); - v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); - v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); - v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); - v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); - v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); - v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); - v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); - - in[1] = _mm_packs_epi32(v0, v1); - in[3] = _mm_packs_epi32(v4, v5); - in[5] = _mm_packs_epi32(v2, v3); - in[7] = _mm_packs_epi32(v6, v7); - - // transpose - array_transpose_8x8(in, in); -} - -static void fadst8_sse2(__m128i *in) { - // Constants - const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64); - const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64); - const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64); - const __m128i k__cospi_p22_m10 = pair_set_epi16(cospi_22_64, -cospi_10_64); - const __m128i k__cospi_p18_p14 = pair_set_epi16(cospi_18_64, cospi_14_64); - const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64); - const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64); - const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__const_0 = _mm_set1_epi16(0); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - - __m128i u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15; - __m128i v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15; - __m128i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; - __m128i s0, s1, s2, s3, s4, s5, s6, s7; - __m128i in0, in1, in2, in3, in4, in5, in6, in7; - - // properly aligned for butterfly input - in0 = in[7]; - in1 = in[0]; - in2 = in[5]; - in3 = in[2]; - in4 = in[3]; - in5 = in[4]; - in6 = in[1]; - in7 = in[6]; - - // column transformation - // stage 1 - // interleave and multiply/add into 32-bit integer - s0 = _mm_unpacklo_epi16(in0, in1); - s1 = _mm_unpackhi_epi16(in0, in1); - s2 = _mm_unpacklo_epi16(in2, in3); - s3 = _mm_unpackhi_epi16(in2, in3); - s4 = _mm_unpacklo_epi16(in4, in5); - s5 = _mm_unpackhi_epi16(in4, in5); - s6 = _mm_unpacklo_epi16(in6, in7); - s7 = _mm_unpackhi_epi16(in6, in7); - - u0 = _mm_madd_epi16(s0, k__cospi_p02_p30); - u1 = _mm_madd_epi16(s1, k__cospi_p02_p30); - u2 = _mm_madd_epi16(s0, k__cospi_p30_m02); - u3 = _mm_madd_epi16(s1, k__cospi_p30_m02); - u4 = _mm_madd_epi16(s2, k__cospi_p10_p22); - u5 = _mm_madd_epi16(s3, k__cospi_p10_p22); - u6 = _mm_madd_epi16(s2, k__cospi_p22_m10); - u7 = _mm_madd_epi16(s3, k__cospi_p22_m10); - u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); - u9 = _mm_madd_epi16(s5, k__cospi_p18_p14); - u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); - u11 = _mm_madd_epi16(s5, k__cospi_p14_m18); - u12 = _mm_madd_epi16(s6, k__cospi_p26_p06); - u13 = _mm_madd_epi16(s7, k__cospi_p26_p06); - u14 = _mm_madd_epi16(s6, k__cospi_p06_m26); - u15 = _mm_madd_epi16(s7, k__cospi_p06_m26); - - // addition - w0 = _mm_add_epi32(u0, u8); - w1 = _mm_add_epi32(u1, u9); - w2 = _mm_add_epi32(u2, u10); - w3 = _mm_add_epi32(u3, u11); - w4 = _mm_add_epi32(u4, u12); - w5 = _mm_add_epi32(u5, u13); - w6 = _mm_add_epi32(u6, u14); - w7 = _mm_add_epi32(u7, u15); - w8 = _mm_sub_epi32(u0, u8); - w9 = _mm_sub_epi32(u1, u9); - w10 = _mm_sub_epi32(u2, u10); - w11 = _mm_sub_epi32(u3, u11); - w12 = _mm_sub_epi32(u4, u12); - w13 = _mm_sub_epi32(u5, u13); - w14 = _mm_sub_epi32(u6, u14); - w15 = _mm_sub_epi32(u7, u15); - - // shift and rounding - v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); - v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); - v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); - v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); - v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); - v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); - v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); - v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); - v8 = _mm_add_epi32(w8, k__DCT_CONST_ROUNDING); - v9 = _mm_add_epi32(w9, k__DCT_CONST_ROUNDING); - v10 = _mm_add_epi32(w10, k__DCT_CONST_ROUNDING); - v11 = _mm_add_epi32(w11, k__DCT_CONST_ROUNDING); - v12 = _mm_add_epi32(w12, k__DCT_CONST_ROUNDING); - v13 = _mm_add_epi32(w13, k__DCT_CONST_ROUNDING); - v14 = _mm_add_epi32(w14, k__DCT_CONST_ROUNDING); - v15 = _mm_add_epi32(w15, k__DCT_CONST_ROUNDING); - - u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - u8 = _mm_srai_epi32(v8, DCT_CONST_BITS); - u9 = _mm_srai_epi32(v9, DCT_CONST_BITS); - u10 = _mm_srai_epi32(v10, DCT_CONST_BITS); - u11 = _mm_srai_epi32(v11, DCT_CONST_BITS); - u12 = _mm_srai_epi32(v12, DCT_CONST_BITS); - u13 = _mm_srai_epi32(v13, DCT_CONST_BITS); - u14 = _mm_srai_epi32(v14, DCT_CONST_BITS); - u15 = _mm_srai_epi32(v15, DCT_CONST_BITS); - - // back to 16-bit and pack 8 integers into __m128i - in[0] = _mm_packs_epi32(u0, u1); - in[1] = _mm_packs_epi32(u2, u3); - in[2] = _mm_packs_epi32(u4, u5); - in[3] = _mm_packs_epi32(u6, u7); - in[4] = _mm_packs_epi32(u8, u9); - in[5] = _mm_packs_epi32(u10, u11); - in[6] = _mm_packs_epi32(u12, u13); - in[7] = _mm_packs_epi32(u14, u15); - - // stage 2 - s0 = _mm_add_epi16(in[0], in[2]); - s1 = _mm_add_epi16(in[1], in[3]); - s2 = _mm_sub_epi16(in[0], in[2]); - s3 = _mm_sub_epi16(in[1], in[3]); - u0 = _mm_unpacklo_epi16(in[4], in[5]); - u1 = _mm_unpackhi_epi16(in[4], in[5]); - u2 = _mm_unpacklo_epi16(in[6], in[7]); - u3 = _mm_unpackhi_epi16(in[6], in[7]); - - v0 = _mm_madd_epi16(u0, k__cospi_p08_p24); - v1 = _mm_madd_epi16(u1, k__cospi_p08_p24); - v2 = _mm_madd_epi16(u0, k__cospi_p24_m08); - v3 = _mm_madd_epi16(u1, k__cospi_p24_m08); - v4 = _mm_madd_epi16(u2, k__cospi_m24_p08); - v5 = _mm_madd_epi16(u3, k__cospi_m24_p08); - v6 = _mm_madd_epi16(u2, k__cospi_p08_p24); - v7 = _mm_madd_epi16(u3, k__cospi_p08_p24); - - w0 = _mm_add_epi32(v0, v4); - w1 = _mm_add_epi32(v1, v5); - w2 = _mm_add_epi32(v2, v6); - w3 = _mm_add_epi32(v3, v7); - w4 = _mm_sub_epi32(v0, v4); - w5 = _mm_sub_epi32(v1, v5); - w6 = _mm_sub_epi32(v2, v6); - w7 = _mm_sub_epi32(v3, v7); - - v0 = _mm_add_epi32(w0, k__DCT_CONST_ROUNDING); - v1 = _mm_add_epi32(w1, k__DCT_CONST_ROUNDING); - v2 = _mm_add_epi32(w2, k__DCT_CONST_ROUNDING); - v3 = _mm_add_epi32(w3, k__DCT_CONST_ROUNDING); - v4 = _mm_add_epi32(w4, k__DCT_CONST_ROUNDING); - v5 = _mm_add_epi32(w5, k__DCT_CONST_ROUNDING); - v6 = _mm_add_epi32(w6, k__DCT_CONST_ROUNDING); - v7 = _mm_add_epi32(w7, k__DCT_CONST_ROUNDING); - - u0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - u1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - u2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - u3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - u4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - u5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - u6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - u7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - - // back to 16-bit intergers - s4 = _mm_packs_epi32(u0, u1); - s5 = _mm_packs_epi32(u2, u3); - s6 = _mm_packs_epi32(u4, u5); - s7 = _mm_packs_epi32(u6, u7); - - // stage 3 - u0 = _mm_unpacklo_epi16(s2, s3); - u1 = _mm_unpackhi_epi16(s2, s3); - u2 = _mm_unpacklo_epi16(s6, s7); - u3 = _mm_unpackhi_epi16(s6, s7); - - v0 = _mm_madd_epi16(u0, k__cospi_p16_p16); - v1 = _mm_madd_epi16(u1, k__cospi_p16_p16); - v2 = _mm_madd_epi16(u0, k__cospi_p16_m16); - v3 = _mm_madd_epi16(u1, k__cospi_p16_m16); - v4 = _mm_madd_epi16(u2, k__cospi_p16_p16); - v5 = _mm_madd_epi16(u3, k__cospi_p16_p16); - v6 = _mm_madd_epi16(u2, k__cospi_p16_m16); - v7 = _mm_madd_epi16(u3, k__cospi_p16_m16); - - u0 = _mm_add_epi32(v0, k__DCT_CONST_ROUNDING); - u1 = _mm_add_epi32(v1, k__DCT_CONST_ROUNDING); - u2 = _mm_add_epi32(v2, k__DCT_CONST_ROUNDING); - u3 = _mm_add_epi32(v3, k__DCT_CONST_ROUNDING); - u4 = _mm_add_epi32(v4, k__DCT_CONST_ROUNDING); - u5 = _mm_add_epi32(v5, k__DCT_CONST_ROUNDING); - u6 = _mm_add_epi32(v6, k__DCT_CONST_ROUNDING); - u7 = _mm_add_epi32(v7, k__DCT_CONST_ROUNDING); - - v0 = _mm_srai_epi32(u0, DCT_CONST_BITS); - v1 = _mm_srai_epi32(u1, DCT_CONST_BITS); - v2 = _mm_srai_epi32(u2, DCT_CONST_BITS); - v3 = _mm_srai_epi32(u3, DCT_CONST_BITS); - v4 = _mm_srai_epi32(u4, DCT_CONST_BITS); - v5 = _mm_srai_epi32(u5, DCT_CONST_BITS); - v6 = _mm_srai_epi32(u6, DCT_CONST_BITS); - v7 = _mm_srai_epi32(u7, DCT_CONST_BITS); - - s2 = _mm_packs_epi32(v0, v1); - s3 = _mm_packs_epi32(v2, v3); - s6 = _mm_packs_epi32(v4, v5); - s7 = _mm_packs_epi32(v6, v7); - - // FIXME(jingning): do subtract using bit inversion? - in[0] = s0; - in[1] = _mm_sub_epi16(k__const_0, s4); - in[2] = s6; - in[3] = _mm_sub_epi16(k__const_0, s2); - in[4] = s3; - in[5] = _mm_sub_epi16(k__const_0, s7); - in[6] = s5; - in[7] = _mm_sub_epi16(k__const_0, s1); - - // transpose - array_transpose_8x8(in, in); -} - -void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - __m128i in[8]; - - switch (tx_type) { - case DCT_DCT: - vpx_fdct8x8_sse2(input, output, stride); - break; - case ADST_DCT: - load_buffer_8x8(input, in, stride); - fadst8_sse2(in); - fdct8_sse2(in); - right_shift_8x8(in, 1); - write_buffer_8x8(output, in, 8); - break; - case DCT_ADST: - load_buffer_8x8(input, in, stride); - fdct8_sse2(in); - fadst8_sse2(in); - right_shift_8x8(in, 1); - write_buffer_8x8(output, in, 8); - break; - case ADST_ADST: - load_buffer_8x8(input, in, stride); - fadst8_sse2(in); - fadst8_sse2(in); - right_shift_8x8(in, 1); - write_buffer_8x8(output, in, 8); - break; - default: - assert(0); - break; - } -} - -static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0, - __m128i *in1, int stride) { - // load first 8 columns - load_buffer_8x8(input, in0, stride); - load_buffer_8x8(input + 8 * stride, in0 + 8, stride); - - input += 8; - // load second 8 columns - load_buffer_8x8(input, in1, stride); - load_buffer_8x8(input + 8 * stride, in1 + 8, stride); -} - -static INLINE void write_buffer_16x16(tran_low_t *output, __m128i *in0, - __m128i *in1, int stride) { - // write first 8 columns - write_buffer_8x8(output, in0, stride); - write_buffer_8x8(output + 8 * stride, in0 + 8, stride); - // write second 8 columns - output += 8; - write_buffer_8x8(output, in1, stride); - write_buffer_8x8(output + 8 * stride, in1 + 8, stride); -} - -static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) { - __m128i tbuf[8]; - array_transpose_8x8(res0, res0); - array_transpose_8x8(res1, tbuf); - array_transpose_8x8(res0 + 8, res1); - array_transpose_8x8(res1 + 8, res1 + 8); - - res0[8] = tbuf[0]; - res0[9] = tbuf[1]; - res0[10] = tbuf[2]; - res0[11] = tbuf[3]; - res0[12] = tbuf[4]; - res0[13] = tbuf[5]; - res0[14] = tbuf[6]; - res0[15] = tbuf[7]; -} - -static INLINE void right_shift_16x16(__m128i *res0, __m128i *res1) { - // perform rounding operations - right_shift_8x8(res0, 2); - right_shift_8x8(res0 + 8, 2); - right_shift_8x8(res1, 2); - right_shift_8x8(res1 + 8, 2); -} - -static void fdct16_8col(__m128i *in) { - // perform 16x16 1-D DCT for 8 columns - __m128i i[8], s[8], p[8], t[8], u[16], v[16]; - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_p08_m24 = pair_set_epi16(cospi_8_64, -cospi_24_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__cospi_p30_p02 = pair_set_epi16(cospi_30_64, cospi_2_64); - const __m128i k__cospi_p14_p18 = pair_set_epi16(cospi_14_64, cospi_18_64); - const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); - const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); - const __m128i k__cospi_p22_p10 = pair_set_epi16(cospi_22_64, cospi_10_64); - const __m128i k__cospi_p06_p26 = pair_set_epi16(cospi_6_64, cospi_26_64); - const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); - const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - - // stage 1 - i[0] = _mm_add_epi16(in[0], in[15]); - i[1] = _mm_add_epi16(in[1], in[14]); - i[2] = _mm_add_epi16(in[2], in[13]); - i[3] = _mm_add_epi16(in[3], in[12]); - i[4] = _mm_add_epi16(in[4], in[11]); - i[5] = _mm_add_epi16(in[5], in[10]); - i[6] = _mm_add_epi16(in[6], in[9]); - i[7] = _mm_add_epi16(in[7], in[8]); - - s[0] = _mm_sub_epi16(in[7], in[8]); - s[1] = _mm_sub_epi16(in[6], in[9]); - s[2] = _mm_sub_epi16(in[5], in[10]); - s[3] = _mm_sub_epi16(in[4], in[11]); - s[4] = _mm_sub_epi16(in[3], in[12]); - s[5] = _mm_sub_epi16(in[2], in[13]); - s[6] = _mm_sub_epi16(in[1], in[14]); - s[7] = _mm_sub_epi16(in[0], in[15]); - - p[0] = _mm_add_epi16(i[0], i[7]); - p[1] = _mm_add_epi16(i[1], i[6]); - p[2] = _mm_add_epi16(i[2], i[5]); - p[3] = _mm_add_epi16(i[3], i[4]); - p[4] = _mm_sub_epi16(i[3], i[4]); - p[5] = _mm_sub_epi16(i[2], i[5]); - p[6] = _mm_sub_epi16(i[1], i[6]); - p[7] = _mm_sub_epi16(i[0], i[7]); - - u[0] = _mm_add_epi16(p[0], p[3]); - u[1] = _mm_add_epi16(p[1], p[2]); - u[2] = _mm_sub_epi16(p[1], p[2]); - u[3] = _mm_sub_epi16(p[0], p[3]); - - v[0] = _mm_unpacklo_epi16(u[0], u[1]); - v[1] = _mm_unpackhi_epi16(u[0], u[1]); - v[2] = _mm_unpacklo_epi16(u[2], u[3]); - v[3] = _mm_unpackhi_epi16(u[2], u[3]); - - u[0] = _mm_madd_epi16(v[0], k__cospi_p16_p16); - u[1] = _mm_madd_epi16(v[1], k__cospi_p16_p16); - u[2] = _mm_madd_epi16(v[0], k__cospi_p16_m16); - u[3] = _mm_madd_epi16(v[1], k__cospi_p16_m16); - u[4] = _mm_madd_epi16(v[2], k__cospi_p24_p08); - u[5] = _mm_madd_epi16(v[3], k__cospi_p24_p08); - u[6] = _mm_madd_epi16(v[2], k__cospi_m08_p24); - u[7] = _mm_madd_epi16(v[3], k__cospi_m08_p24); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - - in[0] = _mm_packs_epi32(u[0], u[1]); - in[4] = _mm_packs_epi32(u[4], u[5]); - in[8] = _mm_packs_epi32(u[2], u[3]); - in[12] = _mm_packs_epi32(u[6], u[7]); - - u[0] = _mm_unpacklo_epi16(p[5], p[6]); - u[1] = _mm_unpackhi_epi16(p[5], p[6]); - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_p16); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - - u[0] = _mm_packs_epi32(v[0], v[1]); - u[1] = _mm_packs_epi32(v[2], v[3]); - - t[0] = _mm_add_epi16(p[4], u[0]); - t[1] = _mm_sub_epi16(p[4], u[0]); - t[2] = _mm_sub_epi16(p[7], u[1]); - t[3] = _mm_add_epi16(p[7], u[1]); - - u[0] = _mm_unpacklo_epi16(t[0], t[3]); - u[1] = _mm_unpackhi_epi16(t[0], t[3]); - u[2] = _mm_unpacklo_epi16(t[1], t[2]); - u[3] = _mm_unpackhi_epi16(t[1], t[2]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p28_p04); - v[1] = _mm_madd_epi16(u[1], k__cospi_p28_p04); - v[2] = _mm_madd_epi16(u[2], k__cospi_p12_p20); - v[3] = _mm_madd_epi16(u[3], k__cospi_p12_p20); - v[4] = _mm_madd_epi16(u[2], k__cospi_m20_p12); - v[5] = _mm_madd_epi16(u[3], k__cospi_m20_p12); - v[6] = _mm_madd_epi16(u[0], k__cospi_m04_p28); - v[7] = _mm_madd_epi16(u[1], k__cospi_m04_p28); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - - in[2] = _mm_packs_epi32(v[0], v[1]); - in[6] = _mm_packs_epi32(v[4], v[5]); - in[10] = _mm_packs_epi32(v[2], v[3]); - in[14] = _mm_packs_epi32(v[6], v[7]); - - // stage 2 - u[0] = _mm_unpacklo_epi16(s[2], s[5]); - u[1] = _mm_unpackhi_epi16(s[2], s[5]); - u[2] = _mm_unpacklo_epi16(s[3], s[4]); - u[3] = _mm_unpackhi_epi16(s[3], s[4]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_p16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_p16); - v[2] = _mm_madd_epi16(u[2], k__cospi_m16_p16); - v[3] = _mm_madd_epi16(u[3], k__cospi_m16_p16); - v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); - v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); - v[6] = _mm_madd_epi16(u[0], k__cospi_p16_p16); - v[7] = _mm_madd_epi16(u[1], k__cospi_p16_p16); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - - t[2] = _mm_packs_epi32(v[0], v[1]); - t[3] = _mm_packs_epi32(v[2], v[3]); - t[4] = _mm_packs_epi32(v[4], v[5]); - t[5] = _mm_packs_epi32(v[6], v[7]); - - // stage 3 - p[0] = _mm_add_epi16(s[0], t[3]); - p[1] = _mm_add_epi16(s[1], t[2]); - p[2] = _mm_sub_epi16(s[1], t[2]); - p[3] = _mm_sub_epi16(s[0], t[3]); - p[4] = _mm_sub_epi16(s[7], t[4]); - p[5] = _mm_sub_epi16(s[6], t[5]); - p[6] = _mm_add_epi16(s[6], t[5]); - p[7] = _mm_add_epi16(s[7], t[4]); - - // stage 4 - u[0] = _mm_unpacklo_epi16(p[1], p[6]); - u[1] = _mm_unpackhi_epi16(p[1], p[6]); - u[2] = _mm_unpacklo_epi16(p[2], p[5]); - u[3] = _mm_unpackhi_epi16(p[2], p[5]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_m08_p24); - v[1] = _mm_madd_epi16(u[1], k__cospi_m08_p24); - v[2] = _mm_madd_epi16(u[2], k__cospi_p24_p08); - v[3] = _mm_madd_epi16(u[3], k__cospi_p24_p08); - v[4] = _mm_madd_epi16(u[2], k__cospi_p08_m24); - v[5] = _mm_madd_epi16(u[3], k__cospi_p08_m24); - v[6] = _mm_madd_epi16(u[0], k__cospi_p24_p08); - v[7] = _mm_madd_epi16(u[1], k__cospi_p24_p08); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - - t[1] = _mm_packs_epi32(v[0], v[1]); - t[2] = _mm_packs_epi32(v[2], v[3]); - t[5] = _mm_packs_epi32(v[4], v[5]); - t[6] = _mm_packs_epi32(v[6], v[7]); - - // stage 5 - s[0] = _mm_add_epi16(p[0], t[1]); - s[1] = _mm_sub_epi16(p[0], t[1]); - s[2] = _mm_add_epi16(p[3], t[2]); - s[3] = _mm_sub_epi16(p[3], t[2]); - s[4] = _mm_sub_epi16(p[4], t[5]); - s[5] = _mm_add_epi16(p[4], t[5]); - s[6] = _mm_sub_epi16(p[7], t[6]); - s[7] = _mm_add_epi16(p[7], t[6]); - - // stage 6 - u[0] = _mm_unpacklo_epi16(s[0], s[7]); - u[1] = _mm_unpackhi_epi16(s[0], s[7]); - u[2] = _mm_unpacklo_epi16(s[1], s[6]); - u[3] = _mm_unpackhi_epi16(s[1], s[6]); - u[4] = _mm_unpacklo_epi16(s[2], s[5]); - u[5] = _mm_unpackhi_epi16(s[2], s[5]); - u[6] = _mm_unpacklo_epi16(s[3], s[4]); - u[7] = _mm_unpackhi_epi16(s[3], s[4]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p30_p02); - v[1] = _mm_madd_epi16(u[1], k__cospi_p30_p02); - v[2] = _mm_madd_epi16(u[2], k__cospi_p14_p18); - v[3] = _mm_madd_epi16(u[3], k__cospi_p14_p18); - v[4] = _mm_madd_epi16(u[4], k__cospi_p22_p10); - v[5] = _mm_madd_epi16(u[5], k__cospi_p22_p10); - v[6] = _mm_madd_epi16(u[6], k__cospi_p06_p26); - v[7] = _mm_madd_epi16(u[7], k__cospi_p06_p26); - v[8] = _mm_madd_epi16(u[6], k__cospi_m26_p06); - v[9] = _mm_madd_epi16(u[7], k__cospi_m26_p06); - v[10] = _mm_madd_epi16(u[4], k__cospi_m10_p22); - v[11] = _mm_madd_epi16(u[5], k__cospi_m10_p22); - v[12] = _mm_madd_epi16(u[2], k__cospi_m18_p14); - v[13] = _mm_madd_epi16(u[3], k__cospi_m18_p14); - v[14] = _mm_madd_epi16(u[0], k__cospi_m02_p30); - v[15] = _mm_madd_epi16(u[1], k__cospi_m02_p30); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - in[1] = _mm_packs_epi32(v[0], v[1]); - in[9] = _mm_packs_epi32(v[2], v[3]); - in[5] = _mm_packs_epi32(v[4], v[5]); - in[13] = _mm_packs_epi32(v[6], v[7]); - in[3] = _mm_packs_epi32(v[8], v[9]); - in[11] = _mm_packs_epi32(v[10], v[11]); - in[7] = _mm_packs_epi32(v[12], v[13]); - in[15] = _mm_packs_epi32(v[14], v[15]); -} - -static void fadst16_8col(__m128i *in) { - // perform 16x16 1-D ADST for 8 columns - __m128i s[16], x[16], u[32], v[32]; - const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64); - const __m128i k__cospi_p31_m01 = pair_set_epi16(cospi_31_64, -cospi_1_64); - const __m128i k__cospi_p05_p27 = pair_set_epi16(cospi_5_64, cospi_27_64); - const __m128i k__cospi_p27_m05 = pair_set_epi16(cospi_27_64, -cospi_5_64); - const __m128i k__cospi_p09_p23 = pair_set_epi16(cospi_9_64, cospi_23_64); - const __m128i k__cospi_p23_m09 = pair_set_epi16(cospi_23_64, -cospi_9_64); - const __m128i k__cospi_p13_p19 = pair_set_epi16(cospi_13_64, cospi_19_64); - const __m128i k__cospi_p19_m13 = pair_set_epi16(cospi_19_64, -cospi_13_64); - const __m128i k__cospi_p17_p15 = pair_set_epi16(cospi_17_64, cospi_15_64); - const __m128i k__cospi_p15_m17 = pair_set_epi16(cospi_15_64, -cospi_17_64); - const __m128i k__cospi_p21_p11 = pair_set_epi16(cospi_21_64, cospi_11_64); - const __m128i k__cospi_p11_m21 = pair_set_epi16(cospi_11_64, -cospi_21_64); - const __m128i k__cospi_p25_p07 = pair_set_epi16(cospi_25_64, cospi_7_64); - const __m128i k__cospi_p07_m25 = pair_set_epi16(cospi_7_64, -cospi_25_64); - const __m128i k__cospi_p29_p03 = pair_set_epi16(cospi_29_64, cospi_3_64); - const __m128i k__cospi_p03_m29 = pair_set_epi16(cospi_3_64, -cospi_29_64); - const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); - const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); - const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); - const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); - const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64); - const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64); - const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); - const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); - const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); - const __m128i k__cospi_m16_m16 = _mm_set1_epi16((int16_t)-cospi_16_64); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i kZero = _mm_set1_epi16(0); - - u[0] = _mm_unpacklo_epi16(in[15], in[0]); - u[1] = _mm_unpackhi_epi16(in[15], in[0]); - u[2] = _mm_unpacklo_epi16(in[13], in[2]); - u[3] = _mm_unpackhi_epi16(in[13], in[2]); - u[4] = _mm_unpacklo_epi16(in[11], in[4]); - u[5] = _mm_unpackhi_epi16(in[11], in[4]); - u[6] = _mm_unpacklo_epi16(in[9], in[6]); - u[7] = _mm_unpackhi_epi16(in[9], in[6]); - u[8] = _mm_unpacklo_epi16(in[7], in[8]); - u[9] = _mm_unpackhi_epi16(in[7], in[8]); - u[10] = _mm_unpacklo_epi16(in[5], in[10]); - u[11] = _mm_unpackhi_epi16(in[5], in[10]); - u[12] = _mm_unpacklo_epi16(in[3], in[12]); - u[13] = _mm_unpackhi_epi16(in[3], in[12]); - u[14] = _mm_unpacklo_epi16(in[1], in[14]); - u[15] = _mm_unpackhi_epi16(in[1], in[14]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p01_p31); - v[1] = _mm_madd_epi16(u[1], k__cospi_p01_p31); - v[2] = _mm_madd_epi16(u[0], k__cospi_p31_m01); - v[3] = _mm_madd_epi16(u[1], k__cospi_p31_m01); - v[4] = _mm_madd_epi16(u[2], k__cospi_p05_p27); - v[5] = _mm_madd_epi16(u[3], k__cospi_p05_p27); - v[6] = _mm_madd_epi16(u[2], k__cospi_p27_m05); - v[7] = _mm_madd_epi16(u[3], k__cospi_p27_m05); - v[8] = _mm_madd_epi16(u[4], k__cospi_p09_p23); - v[9] = _mm_madd_epi16(u[5], k__cospi_p09_p23); - v[10] = _mm_madd_epi16(u[4], k__cospi_p23_m09); - v[11] = _mm_madd_epi16(u[5], k__cospi_p23_m09); - v[12] = _mm_madd_epi16(u[6], k__cospi_p13_p19); - v[13] = _mm_madd_epi16(u[7], k__cospi_p13_p19); - v[14] = _mm_madd_epi16(u[6], k__cospi_p19_m13); - v[15] = _mm_madd_epi16(u[7], k__cospi_p19_m13); - v[16] = _mm_madd_epi16(u[8], k__cospi_p17_p15); - v[17] = _mm_madd_epi16(u[9], k__cospi_p17_p15); - v[18] = _mm_madd_epi16(u[8], k__cospi_p15_m17); - v[19] = _mm_madd_epi16(u[9], k__cospi_p15_m17); - v[20] = _mm_madd_epi16(u[10], k__cospi_p21_p11); - v[21] = _mm_madd_epi16(u[11], k__cospi_p21_p11); - v[22] = _mm_madd_epi16(u[10], k__cospi_p11_m21); - v[23] = _mm_madd_epi16(u[11], k__cospi_p11_m21); - v[24] = _mm_madd_epi16(u[12], k__cospi_p25_p07); - v[25] = _mm_madd_epi16(u[13], k__cospi_p25_p07); - v[26] = _mm_madd_epi16(u[12], k__cospi_p07_m25); - v[27] = _mm_madd_epi16(u[13], k__cospi_p07_m25); - v[28] = _mm_madd_epi16(u[14], k__cospi_p29_p03); - v[29] = _mm_madd_epi16(u[15], k__cospi_p29_p03); - v[30] = _mm_madd_epi16(u[14], k__cospi_p03_m29); - v[31] = _mm_madd_epi16(u[15], k__cospi_p03_m29); - - u[0] = _mm_add_epi32(v[0], v[16]); - u[1] = _mm_add_epi32(v[1], v[17]); - u[2] = _mm_add_epi32(v[2], v[18]); - u[3] = _mm_add_epi32(v[3], v[19]); - u[4] = _mm_add_epi32(v[4], v[20]); - u[5] = _mm_add_epi32(v[5], v[21]); - u[6] = _mm_add_epi32(v[6], v[22]); - u[7] = _mm_add_epi32(v[7], v[23]); - u[8] = _mm_add_epi32(v[8], v[24]); - u[9] = _mm_add_epi32(v[9], v[25]); - u[10] = _mm_add_epi32(v[10], v[26]); - u[11] = _mm_add_epi32(v[11], v[27]); - u[12] = _mm_add_epi32(v[12], v[28]); - u[13] = _mm_add_epi32(v[13], v[29]); - u[14] = _mm_add_epi32(v[14], v[30]); - u[15] = _mm_add_epi32(v[15], v[31]); - u[16] = _mm_sub_epi32(v[0], v[16]); - u[17] = _mm_sub_epi32(v[1], v[17]); - u[18] = _mm_sub_epi32(v[2], v[18]); - u[19] = _mm_sub_epi32(v[3], v[19]); - u[20] = _mm_sub_epi32(v[4], v[20]); - u[21] = _mm_sub_epi32(v[5], v[21]); - u[22] = _mm_sub_epi32(v[6], v[22]); - u[23] = _mm_sub_epi32(v[7], v[23]); - u[24] = _mm_sub_epi32(v[8], v[24]); - u[25] = _mm_sub_epi32(v[9], v[25]); - u[26] = _mm_sub_epi32(v[10], v[26]); - u[27] = _mm_sub_epi32(v[11], v[27]); - u[28] = _mm_sub_epi32(v[12], v[28]); - u[29] = _mm_sub_epi32(v[13], v[29]); - u[30] = _mm_sub_epi32(v[14], v[30]); - u[31] = _mm_sub_epi32(v[15], v[31]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - v[16] = _mm_add_epi32(u[16], k__DCT_CONST_ROUNDING); - v[17] = _mm_add_epi32(u[17], k__DCT_CONST_ROUNDING); - v[18] = _mm_add_epi32(u[18], k__DCT_CONST_ROUNDING); - v[19] = _mm_add_epi32(u[19], k__DCT_CONST_ROUNDING); - v[20] = _mm_add_epi32(u[20], k__DCT_CONST_ROUNDING); - v[21] = _mm_add_epi32(u[21], k__DCT_CONST_ROUNDING); - v[22] = _mm_add_epi32(u[22], k__DCT_CONST_ROUNDING); - v[23] = _mm_add_epi32(u[23], k__DCT_CONST_ROUNDING); - v[24] = _mm_add_epi32(u[24], k__DCT_CONST_ROUNDING); - v[25] = _mm_add_epi32(u[25], k__DCT_CONST_ROUNDING); - v[26] = _mm_add_epi32(u[26], k__DCT_CONST_ROUNDING); - v[27] = _mm_add_epi32(u[27], k__DCT_CONST_ROUNDING); - v[28] = _mm_add_epi32(u[28], k__DCT_CONST_ROUNDING); - v[29] = _mm_add_epi32(u[29], k__DCT_CONST_ROUNDING); - v[30] = _mm_add_epi32(u[30], k__DCT_CONST_ROUNDING); - v[31] = _mm_add_epi32(u[31], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - u[16] = _mm_srai_epi32(v[16], DCT_CONST_BITS); - u[17] = _mm_srai_epi32(v[17], DCT_CONST_BITS); - u[18] = _mm_srai_epi32(v[18], DCT_CONST_BITS); - u[19] = _mm_srai_epi32(v[19], DCT_CONST_BITS); - u[20] = _mm_srai_epi32(v[20], DCT_CONST_BITS); - u[21] = _mm_srai_epi32(v[21], DCT_CONST_BITS); - u[22] = _mm_srai_epi32(v[22], DCT_CONST_BITS); - u[23] = _mm_srai_epi32(v[23], DCT_CONST_BITS); - u[24] = _mm_srai_epi32(v[24], DCT_CONST_BITS); - u[25] = _mm_srai_epi32(v[25], DCT_CONST_BITS); - u[26] = _mm_srai_epi32(v[26], DCT_CONST_BITS); - u[27] = _mm_srai_epi32(v[27], DCT_CONST_BITS); - u[28] = _mm_srai_epi32(v[28], DCT_CONST_BITS); - u[29] = _mm_srai_epi32(v[29], DCT_CONST_BITS); - u[30] = _mm_srai_epi32(v[30], DCT_CONST_BITS); - u[31] = _mm_srai_epi32(v[31], DCT_CONST_BITS); - - s[0] = _mm_packs_epi32(u[0], u[1]); - s[1] = _mm_packs_epi32(u[2], u[3]); - s[2] = _mm_packs_epi32(u[4], u[5]); - s[3] = _mm_packs_epi32(u[6], u[7]); - s[4] = _mm_packs_epi32(u[8], u[9]); - s[5] = _mm_packs_epi32(u[10], u[11]); - s[6] = _mm_packs_epi32(u[12], u[13]); - s[7] = _mm_packs_epi32(u[14], u[15]); - s[8] = _mm_packs_epi32(u[16], u[17]); - s[9] = _mm_packs_epi32(u[18], u[19]); - s[10] = _mm_packs_epi32(u[20], u[21]); - s[11] = _mm_packs_epi32(u[22], u[23]); - s[12] = _mm_packs_epi32(u[24], u[25]); - s[13] = _mm_packs_epi32(u[26], u[27]); - s[14] = _mm_packs_epi32(u[28], u[29]); - s[15] = _mm_packs_epi32(u[30], u[31]); - - // stage 2 - u[0] = _mm_unpacklo_epi16(s[8], s[9]); - u[1] = _mm_unpackhi_epi16(s[8], s[9]); - u[2] = _mm_unpacklo_epi16(s[10], s[11]); - u[3] = _mm_unpackhi_epi16(s[10], s[11]); - u[4] = _mm_unpacklo_epi16(s[12], s[13]); - u[5] = _mm_unpackhi_epi16(s[12], s[13]); - u[6] = _mm_unpacklo_epi16(s[14], s[15]); - u[7] = _mm_unpackhi_epi16(s[14], s[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p04_p28); - v[1] = _mm_madd_epi16(u[1], k__cospi_p04_p28); - v[2] = _mm_madd_epi16(u[0], k__cospi_p28_m04); - v[3] = _mm_madd_epi16(u[1], k__cospi_p28_m04); - v[4] = _mm_madd_epi16(u[2], k__cospi_p20_p12); - v[5] = _mm_madd_epi16(u[3], k__cospi_p20_p12); - v[6] = _mm_madd_epi16(u[2], k__cospi_p12_m20); - v[7] = _mm_madd_epi16(u[3], k__cospi_p12_m20); - v[8] = _mm_madd_epi16(u[4], k__cospi_m28_p04); - v[9] = _mm_madd_epi16(u[5], k__cospi_m28_p04); - v[10] = _mm_madd_epi16(u[4], k__cospi_p04_p28); - v[11] = _mm_madd_epi16(u[5], k__cospi_p04_p28); - v[12] = _mm_madd_epi16(u[6], k__cospi_m12_p20); - v[13] = _mm_madd_epi16(u[7], k__cospi_m12_p20); - v[14] = _mm_madd_epi16(u[6], k__cospi_p20_p12); - v[15] = _mm_madd_epi16(u[7], k__cospi_p20_p12); - - u[0] = _mm_add_epi32(v[0], v[8]); - u[1] = _mm_add_epi32(v[1], v[9]); - u[2] = _mm_add_epi32(v[2], v[10]); - u[3] = _mm_add_epi32(v[3], v[11]); - u[4] = _mm_add_epi32(v[4], v[12]); - u[5] = _mm_add_epi32(v[5], v[13]); - u[6] = _mm_add_epi32(v[6], v[14]); - u[7] = _mm_add_epi32(v[7], v[15]); - u[8] = _mm_sub_epi32(v[0], v[8]); - u[9] = _mm_sub_epi32(v[1], v[9]); - u[10] = _mm_sub_epi32(v[2], v[10]); - u[11] = _mm_sub_epi32(v[3], v[11]); - u[12] = _mm_sub_epi32(v[4], v[12]); - u[13] = _mm_sub_epi32(v[5], v[13]); - u[14] = _mm_sub_epi32(v[6], v[14]); - u[15] = _mm_sub_epi32(v[7], v[15]); - - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - v[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); - u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); - u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); - u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); - u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); - u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); - u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); - u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); - u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); - u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); - u[13] = _mm_srai_epi32(v[13], DCT_CONST_BITS); - u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); - u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - - x[0] = _mm_add_epi16(s[0], s[4]); - x[1] = _mm_add_epi16(s[1], s[5]); - x[2] = _mm_add_epi16(s[2], s[6]); - x[3] = _mm_add_epi16(s[3], s[7]); - x[4] = _mm_sub_epi16(s[0], s[4]); - x[5] = _mm_sub_epi16(s[1], s[5]); - x[6] = _mm_sub_epi16(s[2], s[6]); - x[7] = _mm_sub_epi16(s[3], s[7]); - x[8] = _mm_packs_epi32(u[0], u[1]); - x[9] = _mm_packs_epi32(u[2], u[3]); - x[10] = _mm_packs_epi32(u[4], u[5]); - x[11] = _mm_packs_epi32(u[6], u[7]); - x[12] = _mm_packs_epi32(u[8], u[9]); - x[13] = _mm_packs_epi32(u[10], u[11]); - x[14] = _mm_packs_epi32(u[12], u[13]); - x[15] = _mm_packs_epi32(u[14], u[15]); - - // stage 3 - u[0] = _mm_unpacklo_epi16(x[4], x[5]); - u[1] = _mm_unpackhi_epi16(x[4], x[5]); - u[2] = _mm_unpacklo_epi16(x[6], x[7]); - u[3] = _mm_unpackhi_epi16(x[6], x[7]); - u[4] = _mm_unpacklo_epi16(x[12], x[13]); - u[5] = _mm_unpackhi_epi16(x[12], x[13]); - u[6] = _mm_unpacklo_epi16(x[14], x[15]); - u[7] = _mm_unpackhi_epi16(x[14], x[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_p08_p24); - v[1] = _mm_madd_epi16(u[1], k__cospi_p08_p24); - v[2] = _mm_madd_epi16(u[0], k__cospi_p24_m08); - v[3] = _mm_madd_epi16(u[1], k__cospi_p24_m08); - v[4] = _mm_madd_epi16(u[2], k__cospi_m24_p08); - v[5] = _mm_madd_epi16(u[3], k__cospi_m24_p08); - v[6] = _mm_madd_epi16(u[2], k__cospi_p08_p24); - v[7] = _mm_madd_epi16(u[3], k__cospi_p08_p24); - v[8] = _mm_madd_epi16(u[4], k__cospi_p08_p24); - v[9] = _mm_madd_epi16(u[5], k__cospi_p08_p24); - v[10] = _mm_madd_epi16(u[4], k__cospi_p24_m08); - v[11] = _mm_madd_epi16(u[5], k__cospi_p24_m08); - v[12] = _mm_madd_epi16(u[6], k__cospi_m24_p08); - v[13] = _mm_madd_epi16(u[7], k__cospi_m24_p08); - v[14] = _mm_madd_epi16(u[6], k__cospi_p08_p24); - v[15] = _mm_madd_epi16(u[7], k__cospi_p08_p24); - - u[0] = _mm_add_epi32(v[0], v[4]); - u[1] = _mm_add_epi32(v[1], v[5]); - u[2] = _mm_add_epi32(v[2], v[6]); - u[3] = _mm_add_epi32(v[3], v[7]); - u[4] = _mm_sub_epi32(v[0], v[4]); - u[5] = _mm_sub_epi32(v[1], v[5]); - u[6] = _mm_sub_epi32(v[2], v[6]); - u[7] = _mm_sub_epi32(v[3], v[7]); - u[8] = _mm_add_epi32(v[8], v[12]); - u[9] = _mm_add_epi32(v[9], v[13]); - u[10] = _mm_add_epi32(v[10], v[14]); - u[11] = _mm_add_epi32(v[11], v[15]); - u[12] = _mm_sub_epi32(v[8], v[12]); - u[13] = _mm_sub_epi32(v[9], v[13]); - u[14] = _mm_sub_epi32(v[10], v[14]); - u[15] = _mm_sub_epi32(v[11], v[15]); - - u[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(u[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - s[0] = _mm_add_epi16(x[0], x[2]); - s[1] = _mm_add_epi16(x[1], x[3]); - s[2] = _mm_sub_epi16(x[0], x[2]); - s[3] = _mm_sub_epi16(x[1], x[3]); - s[4] = _mm_packs_epi32(v[0], v[1]); - s[5] = _mm_packs_epi32(v[2], v[3]); - s[6] = _mm_packs_epi32(v[4], v[5]); - s[7] = _mm_packs_epi32(v[6], v[7]); - s[8] = _mm_add_epi16(x[8], x[10]); - s[9] = _mm_add_epi16(x[9], x[11]); - s[10] = _mm_sub_epi16(x[8], x[10]); - s[11] = _mm_sub_epi16(x[9], x[11]); - s[12] = _mm_packs_epi32(v[8], v[9]); - s[13] = _mm_packs_epi32(v[10], v[11]); - s[14] = _mm_packs_epi32(v[12], v[13]); - s[15] = _mm_packs_epi32(v[14], v[15]); - - // stage 4 - u[0] = _mm_unpacklo_epi16(s[2], s[3]); - u[1] = _mm_unpackhi_epi16(s[2], s[3]); - u[2] = _mm_unpacklo_epi16(s[6], s[7]); - u[3] = _mm_unpackhi_epi16(s[6], s[7]); - u[4] = _mm_unpacklo_epi16(s[10], s[11]); - u[5] = _mm_unpackhi_epi16(s[10], s[11]); - u[6] = _mm_unpacklo_epi16(s[14], s[15]); - u[7] = _mm_unpackhi_epi16(s[14], s[15]); - - v[0] = _mm_madd_epi16(u[0], k__cospi_m16_m16); - v[1] = _mm_madd_epi16(u[1], k__cospi_m16_m16); - v[2] = _mm_madd_epi16(u[0], k__cospi_p16_m16); - v[3] = _mm_madd_epi16(u[1], k__cospi_p16_m16); - v[4] = _mm_madd_epi16(u[2], k__cospi_p16_p16); - v[5] = _mm_madd_epi16(u[3], k__cospi_p16_p16); - v[6] = _mm_madd_epi16(u[2], k__cospi_m16_p16); - v[7] = _mm_madd_epi16(u[3], k__cospi_m16_p16); - v[8] = _mm_madd_epi16(u[4], k__cospi_p16_p16); - v[9] = _mm_madd_epi16(u[5], k__cospi_p16_p16); - v[10] = _mm_madd_epi16(u[4], k__cospi_m16_p16); - v[11] = _mm_madd_epi16(u[5], k__cospi_m16_p16); - v[12] = _mm_madd_epi16(u[6], k__cospi_m16_m16); - v[13] = _mm_madd_epi16(u[7], k__cospi_m16_m16); - v[14] = _mm_madd_epi16(u[6], k__cospi_p16_m16); - v[15] = _mm_madd_epi16(u[7], k__cospi_p16_m16); - - u[0] = _mm_add_epi32(v[0], k__DCT_CONST_ROUNDING); - u[1] = _mm_add_epi32(v[1], k__DCT_CONST_ROUNDING); - u[2] = _mm_add_epi32(v[2], k__DCT_CONST_ROUNDING); - u[3] = _mm_add_epi32(v[3], k__DCT_CONST_ROUNDING); - u[4] = _mm_add_epi32(v[4], k__DCT_CONST_ROUNDING); - u[5] = _mm_add_epi32(v[5], k__DCT_CONST_ROUNDING); - u[6] = _mm_add_epi32(v[6], k__DCT_CONST_ROUNDING); - u[7] = _mm_add_epi32(v[7], k__DCT_CONST_ROUNDING); - u[8] = _mm_add_epi32(v[8], k__DCT_CONST_ROUNDING); - u[9] = _mm_add_epi32(v[9], k__DCT_CONST_ROUNDING); - u[10] = _mm_add_epi32(v[10], k__DCT_CONST_ROUNDING); - u[11] = _mm_add_epi32(v[11], k__DCT_CONST_ROUNDING); - u[12] = _mm_add_epi32(v[12], k__DCT_CONST_ROUNDING); - u[13] = _mm_add_epi32(v[13], k__DCT_CONST_ROUNDING); - u[14] = _mm_add_epi32(v[14], k__DCT_CONST_ROUNDING); - u[15] = _mm_add_epi32(v[15], k__DCT_CONST_ROUNDING); - - v[0] = _mm_srai_epi32(u[0], DCT_CONST_BITS); - v[1] = _mm_srai_epi32(u[1], DCT_CONST_BITS); - v[2] = _mm_srai_epi32(u[2], DCT_CONST_BITS); - v[3] = _mm_srai_epi32(u[3], DCT_CONST_BITS); - v[4] = _mm_srai_epi32(u[4], DCT_CONST_BITS); - v[5] = _mm_srai_epi32(u[5], DCT_CONST_BITS); - v[6] = _mm_srai_epi32(u[6], DCT_CONST_BITS); - v[7] = _mm_srai_epi32(u[7], DCT_CONST_BITS); - v[8] = _mm_srai_epi32(u[8], DCT_CONST_BITS); - v[9] = _mm_srai_epi32(u[9], DCT_CONST_BITS); - v[10] = _mm_srai_epi32(u[10], DCT_CONST_BITS); - v[11] = _mm_srai_epi32(u[11], DCT_CONST_BITS); - v[12] = _mm_srai_epi32(u[12], DCT_CONST_BITS); - v[13] = _mm_srai_epi32(u[13], DCT_CONST_BITS); - v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); - v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - - in[0] = s[0]; - in[1] = _mm_sub_epi16(kZero, s[8]); - in[2] = s[12]; - in[3] = _mm_sub_epi16(kZero, s[4]); - in[4] = _mm_packs_epi32(v[4], v[5]); - in[5] = _mm_packs_epi32(v[12], v[13]); - in[6] = _mm_packs_epi32(v[8], v[9]); - in[7] = _mm_packs_epi32(v[0], v[1]); - in[8] = _mm_packs_epi32(v[2], v[3]); - in[9] = _mm_packs_epi32(v[10], v[11]); - in[10] = _mm_packs_epi32(v[14], v[15]); - in[11] = _mm_packs_epi32(v[6], v[7]); - in[12] = s[5]; - in[13] = _mm_sub_epi16(kZero, s[13]); - in[14] = s[9]; - in[15] = _mm_sub_epi16(kZero, s[1]); -} - -static void fdct16_sse2(__m128i *in0, __m128i *in1) { - fdct16_8col(in0); - fdct16_8col(in1); - array_transpose_16x16(in0, in1); -} - -static void fadst16_sse2(__m128i *in0, __m128i *in1) { - fadst16_8col(in0); - fadst16_8col(in1); - array_transpose_16x16(in0, in1); -} - -void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { - __m128i in0[16], in1[16]; - - switch (tx_type) { - case DCT_DCT: - vpx_fdct16x16_sse2(input, output, stride); - break; - case ADST_DCT: - load_buffer_16x16(input, in0, in1, stride); - fadst16_sse2(in0, in1); - right_shift_16x16(in0, in1); - fdct16_sse2(in0, in1); - write_buffer_16x16(output, in0, in1, 16); - break; - case DCT_ADST: - load_buffer_16x16(input, in0, in1, stride); - fdct16_sse2(in0, in1); - right_shift_16x16(in0, in1); - fadst16_sse2(in0, in1); - write_buffer_16x16(output, in0, in1, 16); - break; - case ADST_ADST: - load_buffer_16x16(input, in0, in1, stride); - fadst16_sse2(in0, in1); - right_shift_16x16(in0, in1); - fadst16_sse2(in0, in1); - write_buffer_16x16(output, in0, in1, 16); - break; - default: - assert(0); - break; - } -} diff --git a/libs/libvpx/vp10/encoder/x86/dct_ssse3.c b/libs/libvpx/vp10/encoder/x86/dct_ssse3.c deleted file mode 100644 index df298d8711..0000000000 --- a/libs/libvpx/vp10/encoder/x86/dct_ssse3.c +++ /dev/null @@ -1,472 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#if defined(_MSC_VER) && _MSC_VER <= 1500 -// Need to include math.h before calling tmmintrin.h/intrin.h -// in certain versions of MSVS. -#include -#endif -#include // SSSE3 - -#include "./vp10_rtcd.h" -#include "vpx_dsp/x86/inv_txfm_sse2.h" -#include "vpx_dsp/x86/txfm_common_sse2.h" - -void vp10_fdct8x8_quant_ssse3(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, - int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { - __m128i zero; - int pass; - // Constants - // When we use them, in one case, they are all the same. In all others - // it's a pair of them that we need to repeat four times. This is done - // by constructing the 32 bit constant corresponding to that pair. - const __m128i k__dual_p16_p16 = dual_set_epi16(23170, 23170); - const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); - const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); - // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); - __m128i *in[8]; - int index = 0; - - (void)scan_ptr; - (void)zbin_ptr; - (void)quant_shift_ptr; - (void)coeff_ptr; - - // Pre-condition input (shift by two) - in0 = _mm_slli_epi16(in0, 2); - in1 = _mm_slli_epi16(in1, 2); - in2 = _mm_slli_epi16(in2, 2); - in3 = _mm_slli_epi16(in3, 2); - in4 = _mm_slli_epi16(in4, 2); - in5 = _mm_slli_epi16(in5, 2); - in6 = _mm_slli_epi16(in6, 2); - in7 = _mm_slli_epi16(in7, 2); - - in[0] = &in0; - in[1] = &in1; - in[2] = &in2; - in[3] = &in3; - in[4] = &in4; - in[5] = &in5; - in[6] = &in6; - in[7] = &in7; - - // We do two passes, first the columns, then the rows. The results of the - // first pass are transposed so that the same column code can be reused. The - // results of the second pass are also transposed so that the rows (processed - // as columns) are put back in row positions. - for (pass = 0; pass < 2; pass++) { - // To store results of each pass before the transpose. - __m128i res0, res1, res2, res3, res4, res5, res6, res7; - // Add/subtract - const __m128i q0 = _mm_add_epi16(in0, in7); - const __m128i q1 = _mm_add_epi16(in1, in6); - const __m128i q2 = _mm_add_epi16(in2, in5); - const __m128i q3 = _mm_add_epi16(in3, in4); - const __m128i q4 = _mm_sub_epi16(in3, in4); - const __m128i q5 = _mm_sub_epi16(in2, in5); - const __m128i q6 = _mm_sub_epi16(in1, in6); - const __m128i q7 = _mm_sub_epi16(in0, in7); - // Work on first four results - { - // Add/subtract - const __m128i r0 = _mm_add_epi16(q0, q3); - const __m128i r1 = _mm_add_epi16(q1, q2); - const __m128i r2 = _mm_sub_epi16(q1, q2); - const __m128i r3 = _mm_sub_epi16(q0, q3); - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i t0 = _mm_unpacklo_epi16(r0, r1); - const __m128i t1 = _mm_unpackhi_epi16(r0, r1); - const __m128i t2 = _mm_unpacklo_epi16(r2, r3); - const __m128i t3 = _mm_unpackhi_epi16(r2, r3); - - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p16_p16); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p16_p16); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_p16_m16); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_p16_m16); - - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p24_p08); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p24_p08); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m08_p24); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m08_p24); - // dct_const_round_shift - - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - - res0 = _mm_packs_epi32(w0, w1); - res4 = _mm_packs_epi32(w2, w3); - res2 = _mm_packs_epi32(w4, w5); - res6 = _mm_packs_epi32(w6, w7); - } - // Work on next four results - { - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i d0 = _mm_sub_epi16(q6, q5); - const __m128i d1 = _mm_add_epi16(q6, q5); - const __m128i r0 = _mm_mulhrs_epi16(d0, k__dual_p16_p16); - const __m128i r1 = _mm_mulhrs_epi16(d1, k__dual_p16_p16); - - // Add/subtract - const __m128i x0 = _mm_add_epi16(q4, r0); - const __m128i x1 = _mm_sub_epi16(q4, r0); - const __m128i x2 = _mm_sub_epi16(q7, r1); - const __m128i x3 = _mm_add_epi16(q7, r1); - // Interleave to do the multiply by constants which gets us into 32bits - const __m128i t0 = _mm_unpacklo_epi16(x0, x3); - const __m128i t1 = _mm_unpackhi_epi16(x0, x3); - const __m128i t2 = _mm_unpacklo_epi16(x1, x2); - const __m128i t3 = _mm_unpackhi_epi16(x1, x2); - const __m128i u0 = _mm_madd_epi16(t0, k__cospi_p28_p04); - const __m128i u1 = _mm_madd_epi16(t1, k__cospi_p28_p04); - const __m128i u2 = _mm_madd_epi16(t0, k__cospi_m04_p28); - const __m128i u3 = _mm_madd_epi16(t1, k__cospi_m04_p28); - const __m128i u4 = _mm_madd_epi16(t2, k__cospi_p12_p20); - const __m128i u5 = _mm_madd_epi16(t3, k__cospi_p12_p20); - const __m128i u6 = _mm_madd_epi16(t2, k__cospi_m20_p12); - const __m128i u7 = _mm_madd_epi16(t3, k__cospi_m20_p12); - // dct_const_round_shift - const __m128i v0 = _mm_add_epi32(u0, k__DCT_CONST_ROUNDING); - const __m128i v1 = _mm_add_epi32(u1, k__DCT_CONST_ROUNDING); - const __m128i v2 = _mm_add_epi32(u2, k__DCT_CONST_ROUNDING); - const __m128i v3 = _mm_add_epi32(u3, k__DCT_CONST_ROUNDING); - const __m128i v4 = _mm_add_epi32(u4, k__DCT_CONST_ROUNDING); - const __m128i v5 = _mm_add_epi32(u5, k__DCT_CONST_ROUNDING); - const __m128i v6 = _mm_add_epi32(u6, k__DCT_CONST_ROUNDING); - const __m128i v7 = _mm_add_epi32(u7, k__DCT_CONST_ROUNDING); - const __m128i w0 = _mm_srai_epi32(v0, DCT_CONST_BITS); - const __m128i w1 = _mm_srai_epi32(v1, DCT_CONST_BITS); - const __m128i w2 = _mm_srai_epi32(v2, DCT_CONST_BITS); - const __m128i w3 = _mm_srai_epi32(v3, DCT_CONST_BITS); - const __m128i w4 = _mm_srai_epi32(v4, DCT_CONST_BITS); - const __m128i w5 = _mm_srai_epi32(v5, DCT_CONST_BITS); - const __m128i w6 = _mm_srai_epi32(v6, DCT_CONST_BITS); - const __m128i w7 = _mm_srai_epi32(v7, DCT_CONST_BITS); - // Combine - res1 = _mm_packs_epi32(w0, w1); - res7 = _mm_packs_epi32(w2, w3); - res5 = _mm_packs_epi32(w4, w5); - res3 = _mm_packs_epi32(w6, w7); - } - // Transpose the 8x8. - { - // 00 01 02 03 04 05 06 07 - // 10 11 12 13 14 15 16 17 - // 20 21 22 23 24 25 26 27 - // 30 31 32 33 34 35 36 37 - // 40 41 42 43 44 45 46 47 - // 50 51 52 53 54 55 56 57 - // 60 61 62 63 64 65 66 67 - // 70 71 72 73 74 75 76 77 - const __m128i tr0_0 = _mm_unpacklo_epi16(res0, res1); - const __m128i tr0_1 = _mm_unpacklo_epi16(res2, res3); - const __m128i tr0_2 = _mm_unpackhi_epi16(res0, res1); - const __m128i tr0_3 = _mm_unpackhi_epi16(res2, res3); - const __m128i tr0_4 = _mm_unpacklo_epi16(res4, res5); - const __m128i tr0_5 = _mm_unpacklo_epi16(res6, res7); - const __m128i tr0_6 = _mm_unpackhi_epi16(res4, res5); - const __m128i tr0_7 = _mm_unpackhi_epi16(res6, res7); - // 00 10 01 11 02 12 03 13 - // 20 30 21 31 22 32 23 33 - // 04 14 05 15 06 16 07 17 - // 24 34 25 35 26 36 27 37 - // 40 50 41 51 42 52 43 53 - // 60 70 61 71 62 72 63 73 - // 54 54 55 55 56 56 57 57 - // 64 74 65 75 66 76 67 77 - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); - // 00 10 20 30 01 11 21 31 - // 40 50 60 70 41 51 61 71 - // 02 12 22 32 03 13 23 33 - // 42 52 62 72 43 53 63 73 - // 04 14 24 34 05 15 21 36 - // 44 54 64 74 45 55 61 76 - // 06 16 26 36 07 17 27 37 - // 46 56 66 76 47 57 67 77 - in0 = _mm_unpacklo_epi64(tr1_0, tr1_4); - in1 = _mm_unpackhi_epi64(tr1_0, tr1_4); - in2 = _mm_unpacklo_epi64(tr1_2, tr1_6); - in3 = _mm_unpackhi_epi64(tr1_2, tr1_6); - in4 = _mm_unpacklo_epi64(tr1_1, tr1_5); - in5 = _mm_unpackhi_epi64(tr1_1, tr1_5); - in6 = _mm_unpacklo_epi64(tr1_3, tr1_7); - in7 = _mm_unpackhi_epi64(tr1_3, tr1_7); - // 00 10 20 30 40 50 60 70 - // 01 11 21 31 41 51 61 71 - // 02 12 22 32 42 52 62 72 - // 03 13 23 33 43 53 63 73 - // 04 14 24 34 44 54 64 74 - // 05 15 25 35 45 55 65 75 - // 06 16 26 36 46 56 66 76 - // 07 17 27 37 47 57 67 77 - } - } - // Post-condition output and store it - { - // Post-condition (division by two) - // division of two 16 bits signed numbers using shifts - // n / 2 = (n - (n >> 15)) >> 1 - const __m128i sign_in0 = _mm_srai_epi16(in0, 15); - const __m128i sign_in1 = _mm_srai_epi16(in1, 15); - const __m128i sign_in2 = _mm_srai_epi16(in2, 15); - const __m128i sign_in3 = _mm_srai_epi16(in3, 15); - const __m128i sign_in4 = _mm_srai_epi16(in4, 15); - const __m128i sign_in5 = _mm_srai_epi16(in5, 15); - const __m128i sign_in6 = _mm_srai_epi16(in6, 15); - const __m128i sign_in7 = _mm_srai_epi16(in7, 15); - in0 = _mm_sub_epi16(in0, sign_in0); - in1 = _mm_sub_epi16(in1, sign_in1); - in2 = _mm_sub_epi16(in2, sign_in2); - in3 = _mm_sub_epi16(in3, sign_in3); - in4 = _mm_sub_epi16(in4, sign_in4); - in5 = _mm_sub_epi16(in5, sign_in5); - in6 = _mm_sub_epi16(in6, sign_in6); - in7 = _mm_sub_epi16(in7, sign_in7); - in0 = _mm_srai_epi16(in0, 1); - in1 = _mm_srai_epi16(in1, 1); - in2 = _mm_srai_epi16(in2, 1); - in3 = _mm_srai_epi16(in3, 1); - in4 = _mm_srai_epi16(in4, 1); - in5 = _mm_srai_epi16(in5, 1); - in6 = _mm_srai_epi16(in6, 1); - in7 = _mm_srai_epi16(in7, 1); - } - - iscan_ptr += n_coeffs; - qcoeff_ptr += n_coeffs; - dqcoeff_ptr += n_coeffs; - n_coeffs = -n_coeffs; - zero = _mm_setzero_si128(); - - if (!skip_block) { - __m128i eob; - __m128i round, quant, dequant, thr; - int16_t nzflag; - { - __m128i coeff0, coeff1; - - // Setup global values - { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); - } - - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - // Do DC and first 15 AC - coeff0 = *in[0]; - coeff1 = *in[1]; - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - round = _mm_unpackhi_epi64(round, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - quant = _mm_unpackhi_epi64(quant, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - dequant = _mm_unpackhi_epi64(dequant, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } - - { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob = _mm_max_epi16(eob, eob1); - } - n_coeffs += 8 * 2; - } - - // AC only loop - index = 2; - thr = _mm_srai_epi16(dequant, 1); - while (n_coeffs < 0) { - __m128i coeff0, coeff1; - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - - assert(index < (int)(sizeof(in) / sizeof(in[0])) - 1); - coeff0 = *in[index]; - coeff1 = *in[index + 1]; - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) | - _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); - - if (nzflag) { - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } else { - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - } - } - - if (nzflag) { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob0, eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob0 = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob0 = _mm_max_epi16(eob0, eob1); - eob = _mm_max_epi16(eob, eob0); - } - n_coeffs += 8 * 2; - index += 2; - } - - // Accumulate EOB - { - __m128i eob_shuffled; - eob_shuffled = _mm_shuffle_epi32(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0x1); - eob = _mm_max_epi16(eob, eob_shuffled); - *eob_ptr = _mm_extract_epi16(eob, 1); - } - } else { - do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); - n_coeffs += 8 * 2; - } while (n_coeffs < 0); - *eob_ptr = 0; - } -} diff --git a/libs/libvpx/vp10/encoder/x86/denoiser_sse2.c b/libs/libvpx/vp10/encoder/x86/denoiser_sse2.c deleted file mode 100644 index 047974ef80..0000000000 --- a/libs/libvpx/vp10/encoder/x86/denoiser_sse2.c +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vpx_config.h" -#include "./vp10_rtcd.h" - -#include "vpx_ports/emmintrin_compat.h" -#include "vpx/vpx_integer.h" -#include "vp10/common/reconinter.h" -#include "vp10/encoder/context_tree.h" -#include "vp10/encoder/denoiser.h" -#include "vpx_mem/vpx_mem.h" - -// Compute the sum of all pixel differences of this MB. -static INLINE int sum_diff_16x1(__m128i acc_diff) { - const __m128i k_1 = _mm_set1_epi16(1); - const __m128i acc_diff_lo = - _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8); - const __m128i acc_diff_hi = - _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8); - const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); - const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); - const __m128i hgfe_dcba = - _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8)); - const __m128i hgfedcba = - _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4)); - return _mm_cvtsi128_si32(hgfedcba); -} - -// Denoise a 16x1 vector. -static INLINE __m128i vp10_denoiser_16x1_sse2(const uint8_t *sig, - const uint8_t *mc_running_avg_y, - uint8_t *running_avg_y, - const __m128i *k_0, - const __m128i *k_4, - const __m128i *k_8, - const __m128i *k_16, - const __m128i *l3, - const __m128i *l32, - const __m128i *l21, - __m128i acc_diff) { - // Calculate differences - const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); - const __m128i v_mc_running_avg_y = - _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); - __m128i v_running_avg_y; - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); - // Obtain the sign. FF if diff is negative. - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0); - // Clamp absolute difference to 16 to be used to get mask. Doing this - // allows us to use _mm_cmpgt_epi8, which operates on signed byte. - const __m128i clamped_absdiff = - _mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16); - // Get masks for l2 l1 and l0 adjustments. - const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff); - const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff); - const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff); - // Get adjustments for l2, l1, and l0. - __m128i adj2 = _mm_and_si128(mask2, *l32); - const __m128i adj1 = _mm_and_si128(mask1, *l21); - const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); - __m128i adj, padj, nadj; - - // Combine the adjustments and get absolute adjustments. - adj2 = _mm_add_epi8(adj2, adj1); - adj = _mm_sub_epi8(*l3, adj2); - adj = _mm_andnot_si128(mask0, adj); - adj = _mm_or_si128(adj, adj0); - - // Restore the sign and get positive and negative adjustments. - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); - - // Calculate filtered value. - v_running_avg_y = _mm_adds_epu8(v_sig, padj); - v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); - _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); - - // Adjustments <=7, and each element in acc_diff can fit in signed - // char. - acc_diff = _mm_adds_epi8(acc_diff, padj); - acc_diff = _mm_subs_epi8(acc_diff, nadj); - return acc_diff; -} - -// Denoise a 16x1 vector with a weaker filter. -static INLINE __m128i vp10_denoiser_adj_16x1_sse2( - const uint8_t *sig, const uint8_t *mc_running_avg_y, - uint8_t *running_avg_y, const __m128i k_0, - const __m128i k_delta, __m128i acc_diff) { - __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); - // Calculate differences. - const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); - const __m128i v_mc_running_avg_y = - _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0])); - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); - // Obtain the sign. FF if diff is negative. - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); - // Clamp absolute difference to delta to get the adjustment. - const __m128i adj = - _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); - // Restore the sign and get positive and negative adjustments. - __m128i padj, nadj; - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); - // Calculate filtered value. - v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); - v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); - _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); - - // Accumulate the adjustments. - acc_diff = _mm_subs_epi8(acc_diff, padj); - acc_diff = _mm_adds_epi8(acc_diff, nadj); - return acc_diff; -} - -// Denoiser for 4xM and 8xM blocks. -static int vp10_denoiser_NxM_sse2_small( - const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y, - int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride, - int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) { - int sum_diff_thresh, r, sum_diff = 0; - const int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? - 1 : 0; - uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16]; - __m128i acc_diff = _mm_setzero_si128(); - const __m128i k_0 = _mm_setzero_si128(); - const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); - const __m128i k_8 = _mm_set1_epi8(8); - const __m128i k_16 = _mm_set1_epi8(16); - // Modify each level's adjustment according to motion_magnitude. - const __m128i l3 = _mm_set1_epi8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); - // Difference between level 3 and level 2 is 2. - const __m128i l32 = _mm_set1_epi8(2); - // Difference between level 2 and level 1 is 1. - const __m128i l21 = _mm_set1_epi8(1); - const uint8_t shift = (width == 4) ? 2 : 1; - - for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { - memcpy(sig_buffer[r], sig, width); - memcpy(sig_buffer[r] + width, sig + sig_stride, width); - memcpy(mc_running_buffer[r], mc_running_avg_y, width); - memcpy(mc_running_buffer[r] + width, - mc_running_avg_y + mc_avg_y_stride, width); - memcpy(running_buffer[r], running_avg_y, width); - memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width); - if (width == 4) { - memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width); - memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width); - memcpy(mc_running_buffer[r] + width * 2, - mc_running_avg_y + mc_avg_y_stride * 2, width); - memcpy(mc_running_buffer[r] + width * 3, - mc_running_avg_y + mc_avg_y_stride * 3, width); - memcpy(running_buffer[r] + width * 2, - running_avg_y + avg_y_stride * 2, width); - memcpy(running_buffer[r] + width * 3, - running_avg_y + avg_y_stride * 3, width); - } - acc_diff = vp10_denoiser_16x1_sse2(sig_buffer[r], - mc_running_buffer[r], - running_buffer[r], - &k_0, &k_4, &k_8, &k_16, - &l3, &l32, &l21, acc_diff); - memcpy(running_avg_y, running_buffer[r], width); - memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width); - if (width == 4) { - memcpy(running_avg_y + avg_y_stride * 2, - running_buffer[r] + width * 2, width); - memcpy(running_avg_y + avg_y_stride * 3, - running_buffer[r] + width * 3, width); - } - // Update pointers for next iteration. - sig += (sig_stride << shift); - mc_running_avg_y += (mc_avg_y_stride << shift); - running_avg_y += (avg_y_stride << shift); - } - - { - sum_diff = sum_diff_16x1(acc_diff); - sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); - if (abs(sum_diff) > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), - // check if we can still apply some (weaker) temporal filtering to - // this block, that would otherwise not be denoised at all. Simplest - // is to apply an additional adjustment to running_avg_y to bring it - // closer to sig. The adjustment is capped by a maximum delta, and - // chosen such that in most cases the resulting sum_diff will be - // within the acceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over the - // threshold. - const int delta = ((abs(sum_diff) - sum_diff_thresh) >> - num_pels_log2_lookup[bs]) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const __m128i k_delta = _mm_set1_epi8(delta); - running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); - for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { - acc_diff = vp10_denoiser_adj_16x1_sse2( - sig_buffer[r], mc_running_buffer[r], running_buffer[r], - k_0, k_delta, acc_diff); - memcpy(running_avg_y, running_buffer[r], width); - memcpy(running_avg_y + avg_y_stride, - running_buffer[r] + width, width); - if (width == 4) { - memcpy(running_avg_y + avg_y_stride * 2, - running_buffer[r] + width * 2, width); - memcpy(running_avg_y + avg_y_stride * 3, - running_buffer[r] + width * 3, width); - } - // Update pointers for next iteration. - running_avg_y += (avg_y_stride << shift); - } - sum_diff = sum_diff_16x1(acc_diff); - if (abs(sum_diff) > sum_diff_thresh) { - return COPY_BLOCK; - } - } else { - return COPY_BLOCK; - } - } - } - return FILTER_BLOCK; -} - -// Denoiser for 16xM, 32xM and 64xM blocks -static int vp10_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, - const uint8_t *mc_running_avg_y, - int mc_avg_y_stride, - uint8_t *running_avg_y, - int avg_y_stride, - int increase_denoising, BLOCK_SIZE bs, - int motion_magnitude) { - int sum_diff_thresh, r, c, sum_diff = 0; - const int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? - 1 : 0; - __m128i acc_diff[4][4]; - const __m128i k_0 = _mm_setzero_si128(); - const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); - const __m128i k_8 = _mm_set1_epi8(8); - const __m128i k_16 = _mm_set1_epi8(16); - // Modify each level's adjustment according to motion_magnitude. - const __m128i l3 = _mm_set1_epi8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); - // Difference between level 3 and level 2 is 2. - const __m128i l32 = _mm_set1_epi8(2); - // Difference between level 2 and level 1 is 1. - const __m128i l21 = _mm_set1_epi8(1); - - for (c = 0; c < 4; ++c) { - for (r = 0; r < 4; ++r) { - acc_diff[c][r] = _mm_setzero_si128(); - } - } - - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - acc_diff[c>>4][r>>4] = vp10_denoiser_16x1_sse2( - sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, - &k_8, &k_16, &l3, &l32, &l21, acc_diff[c>>4][r>>4]); - // Update pointers for next iteration. - sig += 16; - mc_running_avg_y += 16; - running_avg_y += 16; - } - - if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); - } - } - - // Update pointers for next iteration. - sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; - mc_running_avg_y = mc_running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - mc_avg_y_stride; - running_avg_y = running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - avg_y_stride; - } - - { - sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); - if (abs(sum_diff) > sum_diff_thresh) { - const int delta = ((abs(sum_diff) - sum_diff_thresh) >> - num_pels_log2_lookup[bs]) + 1; - - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const __m128i k_delta = _mm_set1_epi8(delta); - sig -= sig_stride * (4 << b_height_log2_lookup[bs]); - mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]); - running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); - sum_diff = 0; - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - acc_diff[c>>4][r>>4] = vp10_denoiser_adj_16x1_sse2( - sig, mc_running_avg_y, running_avg_y, k_0, - k_delta, acc_diff[c>>4][r>>4]); - // Update pointers for next iteration. - sig += 16; - mc_running_avg_y += 16; - running_avg_y += 16; - } - - if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); - } - } - sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; - mc_running_avg_y = mc_running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - mc_avg_y_stride; - running_avg_y = running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - avg_y_stride; - } - if (abs(sum_diff) > sum_diff_thresh) { - return COPY_BLOCK; - } - } else { - return COPY_BLOCK; - } - } - } - return FILTER_BLOCK; -} - -int vp10_denoiser_filter_sse2(const uint8_t *sig, int sig_stride, - const uint8_t *mc_avg, - int mc_avg_stride, - uint8_t *avg, int avg_stride, - int increase_denoising, - BLOCK_SIZE bs, - int motion_magnitude) { - if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { - return vp10_denoiser_NxM_sse2_small(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude, 4); - } else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { - return vp10_denoiser_NxM_sse2_small(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude, 8); - } else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || - bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || - bs == BLOCK_64X32 || bs == BLOCK_64X64) { - return vp10_denoiser_NxM_sse2_big(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude); - } else { - return COPY_BLOCK; - } -} diff --git a/libs/libvpx/vp10/encoder/x86/error_intrin_avx2.c b/libs/libvpx/vp10/encoder/x86/error_intrin_avx2.c deleted file mode 100644 index 9766be27bf..0000000000 --- a/libs/libvpx/vp10/encoder/x86/error_intrin_avx2.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Usee of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include // AVX2 - -#include "./vp10_rtcd.h" -#include "vpx/vpx_integer.h" - -int64_t vp10_block_error_avx2(const int16_t *coeff, - const int16_t *dqcoeff, - intptr_t block_size, - int64_t *ssz) { - __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg; - __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi; - __m256i sse_reg_64hi, ssz_reg_64hi; - __m128i sse_reg128, ssz_reg128; - int64_t sse; - int i; - const __m256i zero_reg = _mm256_set1_epi16(0); - - // init sse and ssz registerd to zero - sse_reg = _mm256_set1_epi16(0); - ssz_reg = _mm256_set1_epi16(0); - - for (i = 0 ; i < block_size ; i+= 16) { - // load 32 bytes from coeff and dqcoeff - coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i)); - dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i)); - // dqcoeff - coeff - dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg); - // madd (dqcoeff - coeff) - dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg); - // madd coeff - coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg); - // expand each double word of madd (dqcoeff - coeff) to quad word - exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg); - exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg); - // expand each double word of madd (coeff) to quad word - exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg); - exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg); - // add each quad word of madd (dqcoeff - coeff) and madd (coeff) - sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo); - ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo); - sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi); - ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi); - } - // save the higher 64 bit of each 128 bit lane - sse_reg_64hi = _mm256_srli_si256(sse_reg, 8); - ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8); - // add the higher 64 bit to the low 64 bit - sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi); - ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi); - - // add each 64 bit from each of the 128 bit lane of the 256 bit - sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg), - _mm256_extractf128_si256(sse_reg, 1)); - - ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg), - _mm256_extractf128_si256(ssz_reg, 1)); - - // store the results - _mm_storel_epi64((__m128i*)(&sse), sse_reg128); - - _mm_storel_epi64((__m128i*)(ssz), ssz_reg128); - return sse; -} diff --git a/libs/libvpx/vp10/encoder/x86/error_sse2.asm b/libs/libvpx/vp10/encoder/x86/error_sse2.asm deleted file mode 100644 index 0772da418e..0000000000 --- a/libs/libvpx/vp10/encoder/x86/error_sse2.asm +++ /dev/null @@ -1,122 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%define private_prefix vp10 - -%include "third_party/x86inc/x86inc.asm" - -SECTION .text - -; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size, -; int64_t *ssz) - -INIT_XMM sse2 -cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz - pxor m4, m4 ; sse accumulator - pxor m6, m6 ; ssz accumulator - pxor m5, m5 ; dedicated zero register - lea uqcq, [uqcq+sizeq*2] - lea dqcq, [dqcq+sizeq*2] - neg sizeq -.loop: - mova m2, [uqcq+sizeq*2] - mova m0, [dqcq+sizeq*2] - mova m3, [uqcq+sizeq*2+mmsize] - mova m1, [dqcq+sizeq*2+mmsize] - psubw m0, m2 - psubw m1, m3 - ; individual errors are max. 15bit+sign, so squares are 30bit, and - ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit) - pmaddwd m0, m0 - pmaddwd m1, m1 - pmaddwd m2, m2 - pmaddwd m3, m3 - ; accumulate in 64bit - punpckldq m7, m0, m5 - punpckhdq m0, m5 - paddq m4, m7 - punpckldq m7, m1, m5 - paddq m4, m0 - punpckhdq m1, m5 - paddq m4, m7 - punpckldq m7, m2, m5 - paddq m4, m1 - punpckhdq m2, m5 - paddq m6, m7 - punpckldq m7, m3, m5 - paddq m6, m2 - punpckhdq m3, m5 - paddq m6, m7 - paddq m6, m3 - add sizeq, mmsize - jl .loop - - ; accumulate horizontally and store in return value - movhlps m5, m4 - movhlps m7, m6 - paddq m4, m5 - paddq m6, m7 -%if ARCH_X86_64 - movq rax, m4 - movq [sszq], m6 -%else - mov eax, sszm - pshufd m5, m4, 0x1 - movq [eax], m6 - movd eax, m4 - movd edx, m5 -%endif - RET - -; Compute the sum of squared difference between two int16_t vectors. -; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff, -; intptr_t block_size) - -INIT_XMM sse2 -cglobal block_error_fp, 3, 3, 6, uqc, dqc, size - pxor m4, m4 ; sse accumulator - pxor m5, m5 ; dedicated zero register - lea uqcq, [uqcq+sizeq*2] - lea dqcq, [dqcq+sizeq*2] - neg sizeq -.loop: - mova m2, [uqcq+sizeq*2] - mova m0, [dqcq+sizeq*2] - mova m3, [uqcq+sizeq*2+mmsize] - mova m1, [dqcq+sizeq*2+mmsize] - psubw m0, m2 - psubw m1, m3 - ; individual errors are max. 15bit+sign, so squares are 30bit, and - ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit) - pmaddwd m0, m0 - pmaddwd m1, m1 - ; accumulate in 64bit - punpckldq m3, m0, m5 - punpckhdq m0, m5 - paddq m4, m3 - punpckldq m3, m1, m5 - paddq m4, m0 - punpckhdq m1, m5 - paddq m4, m3 - paddq m4, m1 - add sizeq, mmsize - jl .loop - - ; accumulate horizontally and store in return value - movhlps m5, m4 - paddq m4, m5 -%if ARCH_X86_64 - movq rax, m4 -%else - pshufd m5, m4, 0x1 - movd eax, m4 - movd edx, m5 -%endif - RET diff --git a/libs/libvpx/vp10/encoder/x86/highbd_block_error_intrin_sse2.c b/libs/libvpx/vp10/encoder/x86/highbd_block_error_intrin_sse2.c deleted file mode 100644 index 6b4cf50994..0000000000 --- a/libs/libvpx/vp10/encoder/x86/highbd_block_error_intrin_sse2.c +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "vp10/common/common.h" - -int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff, - intptr_t block_size, int64_t *ssz, - int bps) { - int i, j, test; - uint32_t temp[4]; - __m128i max, min, cmp0, cmp1, cmp2, cmp3; - int64_t error = 0, sqcoeff = 0; - const int shift = 2 * (bps - 8); - const int rounding = shift > 0 ? 1 << (shift - 1) : 0; - - for (i = 0; i < block_size; i+=8) { - // Load the data into xmm registers - __m128i mm_coeff = _mm_load_si128((__m128i*) (coeff + i)); - __m128i mm_coeff2 = _mm_load_si128((__m128i*) (coeff + i + 4)); - __m128i mm_dqcoeff = _mm_load_si128((__m128i*) (dqcoeff + i)); - __m128i mm_dqcoeff2 = _mm_load_si128((__m128i*) (dqcoeff + i + 4)); - // Check if any values require more than 15 bit - max = _mm_set1_epi32(0x3fff); - min = _mm_set1_epi32(0xffffc000); - cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max), - _mm_cmplt_epi32(mm_coeff, min)); - cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max), - _mm_cmplt_epi32(mm_coeff2, min)); - cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max), - _mm_cmplt_epi32(mm_dqcoeff, min)); - cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max), - _mm_cmplt_epi32(mm_dqcoeff2, min)); - test = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(cmp0, cmp1), - _mm_or_si128(cmp2, cmp3))); - - if (!test) { - __m128i mm_diff, error_sse2, sqcoeff_sse2;; - mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2); - mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2); - mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff); - error_sse2 = _mm_madd_epi16(mm_diff, mm_diff); - sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff); - _mm_storeu_si128((__m128i*)temp, error_sse2); - error = error + temp[0] + temp[1] + temp[2] + temp[3]; - _mm_storeu_si128((__m128i*)temp, sqcoeff_sse2); - sqcoeff += temp[0] + temp[1] + temp[2] + temp[3]; - } else { - for (j = 0; j < 8; j++) { - const int64_t diff = coeff[i + j] - dqcoeff[i + j]; - error += diff * diff; - sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j]; - } - } - } - assert(error >= 0 && sqcoeff >= 0); - error = (error + rounding) >> shift; - sqcoeff = (sqcoeff + rounding) >> shift; - - *ssz = sqcoeff; - return error; -} diff --git a/libs/libvpx/vp10/encoder/x86/quantize_sse2.c b/libs/libvpx/vp10/encoder/x86/quantize_sse2.c deleted file mode 100644 index dabd3bd127..0000000000 --- a/libs/libvpx/vp10/encoder/x86/quantize_sse2.c +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vp10_rtcd.h" -#include "vpx/vpx_integer.h" - -void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { - __m128i zero; - __m128i thr; - int16_t nzflag; - (void)scan_ptr; - (void)zbin_ptr; - (void)quant_shift_ptr; - - coeff_ptr += n_coeffs; - iscan_ptr += n_coeffs; - qcoeff_ptr += n_coeffs; - dqcoeff_ptr += n_coeffs; - n_coeffs = -n_coeffs; - zero = _mm_setzero_si128(); - - if (!skip_block) { - __m128i eob; - __m128i round, quant, dequant; - { - __m128i coeff0, coeff1; - - // Setup global values - { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); - } - - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - // Do DC and first 15 AC - coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs)); - coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1); - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - round = _mm_unpackhi_epi64(round, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - quant = _mm_unpackhi_epi64(quant, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - dequant = _mm_unpackhi_epi64(dequant, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } - - { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob = _mm_max_epi16(eob, eob1); - } - n_coeffs += 8 * 2; - } - - thr = _mm_srai_epi16(dequant, 1); - - // AC only loop - while (n_coeffs < 0) { - __m128i coeff0, coeff1; - { - __m128i coeff0_sign, coeff1_sign; - __m128i qcoeff0, qcoeff1; - __m128i qtmp0, qtmp1; - - coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs)); - coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1); - - // Poor man's sign extract - coeff0_sign = _mm_srai_epi16(coeff0, 15); - coeff1_sign = _mm_srai_epi16(coeff1, 15); - qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign); - qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) | - _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); - - if (nzflag) { - qcoeff0 = _mm_adds_epi16(qcoeff0, round); - qcoeff1 = _mm_adds_epi16(qcoeff1, round); - qtmp0 = _mm_mulhi_epi16(qcoeff0, quant); - qtmp1 = _mm_mulhi_epi16(qcoeff1, quant); - - // Reinsert signs - qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign); - qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign); - qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); - qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); - - coeff0 = _mm_mullo_epi16(qcoeff0, dequant); - coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); - } else { - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); - - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - } - } - - if (nzflag) { - // Scan for eob - __m128i zero_coeff0, zero_coeff1; - __m128i nzero_coeff0, nzero_coeff1; - __m128i iscan0, iscan1; - __m128i eob0, eob1; - zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero); - zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); - nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); - nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); - // Add one to convert from indices to counts - iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); - iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); - eob0 = _mm_and_si128(iscan0, nzero_coeff0); - eob1 = _mm_and_si128(iscan1, nzero_coeff1); - eob0 = _mm_max_epi16(eob0, eob1); - eob = _mm_max_epi16(eob, eob0); - } - n_coeffs += 8 * 2; - } - - // Accumulate EOB - { - __m128i eob_shuffled; - eob_shuffled = _mm_shuffle_epi32(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0xe); - eob = _mm_max_epi16(eob, eob_shuffled); - eob_shuffled = _mm_shufflelo_epi16(eob, 0x1); - eob = _mm_max_epi16(eob, eob_shuffled); - *eob_ptr = _mm_extract_epi16(eob, 1); - } - } else { - do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); - n_coeffs += 8 * 2; - } while (n_coeffs < 0); - *eob_ptr = 0; - } -} diff --git a/libs/libvpx/vp10/encoder/x86/quantize_ssse3_x86_64.asm b/libs/libvpx/vp10/encoder/x86/quantize_ssse3_x86_64.asm deleted file mode 100644 index b8fefa2f16..0000000000 --- a/libs/libvpx/vp10/encoder/x86/quantize_ssse3_x86_64.asm +++ /dev/null @@ -1,201 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%define private_prefix vp10 - -%include "third_party/x86inc/x86inc.asm" - -SECTION_RODATA -pw_1: times 8 dw 1 - -SECTION .text - -%macro QUANTIZE_FP 2 -cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \ - shift, qcoeff, dqcoeff, dequant, \ - eob, scan, iscan - cmp dword skipm, 0 - jne .blank - - ; actual quantize loop - setup pointers, rounders, etc. - movifnidn coeffq, coeffmp - movifnidn ncoeffq, ncoeffmp - mov r2, dequantmp - movifnidn zbinq, zbinmp - movifnidn roundq, roundmp - movifnidn quantq, quantmp - mova m1, [roundq] ; m1 = round - mova m2, [quantq] ; m2 = quant -%ifidn %1, fp_32x32 - pcmpeqw m5, m5 - psrlw m5, 15 - paddw m1, m5 - psrlw m1, 1 ; m1 = (m1 + 1) / 2 -%endif - mova m3, [r2q] ; m3 = dequant - mov r3, qcoeffmp - mov r4, dqcoeffmp - mov r5, iscanmp -%ifidn %1, fp_32x32 - psllw m2, 1 -%endif - pxor m5, m5 ; m5 = dedicated zero - - lea coeffq, [ coeffq+ncoeffq*2] - lea r5q, [ r5q+ncoeffq*2] - lea r3q, [ r3q+ncoeffq*2] - lea r4q, [r4q+ncoeffq*2] - neg ncoeffq - - ; get DC and first 15 AC coeffs - mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i] - mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i] - pabsw m6, m9 ; m6 = abs(m9) - pabsw m11, m10 ; m11 = abs(m10) - pcmpeqw m7, m7 - - paddsw m6, m1 ; m6 += round - punpckhqdq m1, m1 - paddsw m11, m1 ; m11 += round - pmulhw m8, m6, m2 ; m8 = m6*q>>16 - punpckhqdq m2, m2 - pmulhw m13, m11, m2 ; m13 = m11*q>>16 - psignw m8, m9 ; m8 = reinsert sign - psignw m13, m10 ; m13 = reinsert sign - mova [r3q+ncoeffq*2+ 0], m8 - mova [r3q+ncoeffq*2+16], m13 -%ifidn %1, fp_32x32 - pabsw m8, m8 - pabsw m13, m13 -%endif - pmullw m8, m3 ; r4[i] = r3[i] * q - punpckhqdq m3, m3 - pmullw m13, m3 ; r4[i] = r3[i] * q -%ifidn %1, fp_32x32 - psrlw m8, 1 - psrlw m13, 1 - psignw m8, m9 - psignw m13, m10 - psrlw m0, m3, 2 -%else - psrlw m0, m3, 1 -%endif - mova [r4q+ncoeffq*2+ 0], m8 - mova [r4q+ncoeffq*2+16], m13 - pcmpeqw m8, m5 ; m8 = c[i] == 0 - pcmpeqw m13, m5 ; m13 = c[i] == 0 - mova m6, [ r5q+ncoeffq*2+ 0] ; m6 = scan[i] - mova m11, [ r5q+ncoeffq*2+16] ; m11 = scan[i] - psubw m6, m7 ; m6 = scan[i] + 1 - psubw m11, m7 ; m11 = scan[i] + 1 - pandn m8, m6 ; m8 = max(eob) - pandn m13, m11 ; m13 = max(eob) - pmaxsw m8, m13 - add ncoeffq, mmsize - jz .accumulate_eob - -.ac_only_loop: - mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i] - mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i] - pabsw m6, m9 ; m6 = abs(m9) - pabsw m11, m10 ; m11 = abs(m10) - - pcmpgtw m7, m6, m0 - pcmpgtw m12, m11, m0 - pmovmskb r6d, m7 - pmovmskb r2d, m12 - - or r6, r2 - jz .skip_iter - - pcmpeqw m7, m7 - - paddsw m6, m1 ; m6 += round - paddsw m11, m1 ; m11 += round - pmulhw m14, m6, m2 ; m14 = m6*q>>16 - pmulhw m13, m11, m2 ; m13 = m11*q>>16 - psignw m14, m9 ; m14 = reinsert sign - psignw m13, m10 ; m13 = reinsert sign - mova [r3q+ncoeffq*2+ 0], m14 - mova [r3q+ncoeffq*2+16], m13 -%ifidn %1, fp_32x32 - pabsw m14, m14 - pabsw m13, m13 -%endif - pmullw m14, m3 ; r4[i] = r3[i] * q - pmullw m13, m3 ; r4[i] = r3[i] * q -%ifidn %1, fp_32x32 - psrlw m14, 1 - psrlw m13, 1 - psignw m14, m9 - psignw m13, m10 -%endif - mova [r4q+ncoeffq*2+ 0], m14 - mova [r4q+ncoeffq*2+16], m13 - pcmpeqw m14, m5 ; m14 = c[i] == 0 - pcmpeqw m13, m5 ; m13 = c[i] == 0 - mova m6, [ r5q+ncoeffq*2+ 0] ; m6 = scan[i] - mova m11, [ r5q+ncoeffq*2+16] ; m11 = scan[i] - psubw m6, m7 ; m6 = scan[i] + 1 - psubw m11, m7 ; m11 = scan[i] + 1 - pandn m14, m6 ; m14 = max(eob) - pandn m13, m11 ; m13 = max(eob) - pmaxsw m8, m14 - pmaxsw m8, m13 - add ncoeffq, mmsize - jl .ac_only_loop - - jmp .accumulate_eob -.skip_iter: - mova [r3q+ncoeffq*2+ 0], m5 - mova [r3q+ncoeffq*2+16], m5 - mova [r4q+ncoeffq*2+ 0], m5 - mova [r4q+ncoeffq*2+16], m5 - add ncoeffq, mmsize - jl .ac_only_loop - -.accumulate_eob: - ; horizontally accumulate/max eobs and write into [eob] memory pointer - mov r2, eobmp - pshufd m7, m8, 0xe - pmaxsw m8, m7 - pshuflw m7, m8, 0xe - pmaxsw m8, m7 - pshuflw m7, m8, 0x1 - pmaxsw m8, m7 - pextrw r6, m8, 0 - mov [r2], r6 - RET - - ; skip-block, i.e. just write all zeroes -.blank: - mov r0, dqcoeffmp - movifnidn ncoeffq, ncoeffmp - mov r2, qcoeffmp - mov r3, eobmp - - lea r0q, [r0q+ncoeffq*2] - lea r2q, [r2q+ncoeffq*2] - neg ncoeffq - pxor m7, m7 -.blank_loop: - mova [r0q+ncoeffq*2+ 0], m7 - mova [r0q+ncoeffq*2+16], m7 - mova [r2q+ncoeffq*2+ 0], m7 - mova [r2q+ncoeffq*2+16], m7 - add ncoeffq, mmsize - jl .blank_loop - mov word [r3q], 0 - RET -%endmacro - -INIT_XMM ssse3 -QUANTIZE_FP fp, 7 -QUANTIZE_FP fp_32x32, 7 diff --git a/libs/libvpx/vp10/encoder/x86/ssim_opt_x86_64.asm b/libs/libvpx/vp10/encoder/x86/ssim_opt_x86_64.asm deleted file mode 100644 index b45f0095d8..0000000000 --- a/libs/libvpx/vp10/encoder/x86/ssim_opt_x86_64.asm +++ /dev/null @@ -1,216 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%include "vpx_ports/x86_abi_support.asm" - -; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr -%macro TABULATE_SSIM 0 - paddusw xmm15, xmm3 ; sum_s - paddusw xmm14, xmm4 ; sum_r - movdqa xmm1, xmm3 - pmaddwd xmm1, xmm1 - paddd xmm13, xmm1 ; sum_sq_s - movdqa xmm2, xmm4 - pmaddwd xmm2, xmm2 - paddd xmm12, xmm2 ; sum_sq_r - pmaddwd xmm3, xmm4 - paddd xmm11, xmm3 ; sum_sxr -%endmacro - -; Sum across the register %1 starting with q words -%macro SUM_ACROSS_Q 1 - movdqa xmm2,%1 - punpckldq %1,xmm0 - punpckhdq xmm2,xmm0 - paddq %1,xmm2 - movdqa xmm2,%1 - punpcklqdq %1,xmm0 - punpckhqdq xmm2,xmm0 - paddq %1,xmm2 -%endmacro - -; Sum across the register %1 starting with q words -%macro SUM_ACROSS_W 1 - movdqa xmm1, %1 - punpcklwd %1,xmm0 - punpckhwd xmm1,xmm0 - paddd %1, xmm1 - SUM_ACROSS_Q %1 -%endmacro -;void ssim_parms_sse2( -; unsigned char *s, -; int sp, -; unsigned char *r, -; int rp -; unsigned long *sum_s, -; unsigned long *sum_r, -; unsigned long *sum_sq_s, -; unsigned long *sum_sq_r, -; unsigned long *sum_sxr); -; -; TODO: Use parm passing through structure, probably don't need the pxors -; ( calling app will initialize to 0 ) could easily fit everything in sse2 -; without too much hastle, and can probably do better estimates with psadw -; or pavgb At this point this is just meant to be first pass for calculating -; all the parms needed for 16x16 ssim so we can play with dssim as distortion -; in mode selection code. -global sym(vp10_ssim_parms_16x16_sse2) PRIVATE -sym(vp10_ssim_parms_16x16_sse2): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 9 - SAVE_XMM 15 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;s - mov rcx, arg(1) ;sp - mov rdi, arg(2) ;r - mov rax, arg(3) ;rp - - pxor xmm0, xmm0 - pxor xmm15,xmm15 ;sum_s - pxor xmm14,xmm14 ;sum_r - pxor xmm13,xmm13 ;sum_sq_s - pxor xmm12,xmm12 ;sum_sq_r - pxor xmm11,xmm11 ;sum_sxr - - mov rdx, 16 ;row counter -.NextRow: - - ;grab source and reference pixels - movdqu xmm5, [rsi] - movdqu xmm6, [rdi] - movdqa xmm3, xmm5 - movdqa xmm4, xmm6 - punpckhbw xmm3, xmm0 ; high_s - punpckhbw xmm4, xmm0 ; high_r - - TABULATE_SSIM - - movdqa xmm3, xmm5 - movdqa xmm4, xmm6 - punpcklbw xmm3, xmm0 ; low_s - punpcklbw xmm4, xmm0 ; low_r - - TABULATE_SSIM - - add rsi, rcx ; next s row - add rdi, rax ; next r row - - dec rdx ; counter - jnz .NextRow - - SUM_ACROSS_W xmm15 - SUM_ACROSS_W xmm14 - SUM_ACROSS_Q xmm13 - SUM_ACROSS_Q xmm12 - SUM_ACROSS_Q xmm11 - - mov rdi,arg(4) - movd [rdi], xmm15; - mov rdi,arg(5) - movd [rdi], xmm14; - mov rdi,arg(6) - movd [rdi], xmm13; - mov rdi,arg(7) - movd [rdi], xmm12; - mov rdi,arg(8) - movd [rdi], xmm11; - - ; begin epilog - pop rdi - pop rsi - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret - -;void ssim_parms_sse2( -; unsigned char *s, -; int sp, -; unsigned char *r, -; int rp -; unsigned long *sum_s, -; unsigned long *sum_r, -; unsigned long *sum_sq_s, -; unsigned long *sum_sq_r, -; unsigned long *sum_sxr); -; -; TODO: Use parm passing through structure, probably don't need the pxors -; ( calling app will initialize to 0 ) could easily fit everything in sse2 -; without too much hastle, and can probably do better estimates with psadw -; or pavgb At this point this is just meant to be first pass for calculating -; all the parms needed for 16x16 ssim so we can play with dssim as distortion -; in mode selection code. -global sym(vp10_ssim_parms_8x8_sse2) PRIVATE -sym(vp10_ssim_parms_8x8_sse2): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 9 - SAVE_XMM 15 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;s - mov rcx, arg(1) ;sp - mov rdi, arg(2) ;r - mov rax, arg(3) ;rp - - pxor xmm0, xmm0 - pxor xmm15,xmm15 ;sum_s - pxor xmm14,xmm14 ;sum_r - pxor xmm13,xmm13 ;sum_sq_s - pxor xmm12,xmm12 ;sum_sq_r - pxor xmm11,xmm11 ;sum_sxr - - mov rdx, 8 ;row counter -.NextRow: - - ;grab source and reference pixels - movq xmm3, [rsi] - movq xmm4, [rdi] - punpcklbw xmm3, xmm0 ; low_s - punpcklbw xmm4, xmm0 ; low_r - - TABULATE_SSIM - - add rsi, rcx ; next s row - add rdi, rax ; next r row - - dec rdx ; counter - jnz .NextRow - - SUM_ACROSS_W xmm15 - SUM_ACROSS_W xmm14 - SUM_ACROSS_Q xmm13 - SUM_ACROSS_Q xmm12 - SUM_ACROSS_Q xmm11 - - mov rdi,arg(4) - movd [rdi], xmm15; - mov rdi,arg(5) - movd [rdi], xmm14; - mov rdi,arg(6) - movd [rdi], xmm13; - mov rdi,arg(7) - movd [rdi], xmm12; - mov rdi,arg(8) - movd [rdi], xmm11; - - ; begin epilog - pop rdi - pop rsi - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret diff --git a/libs/libvpx/vp10/encoder/x86/temporal_filter_apply_sse2.asm b/libs/libvpx/vp10/encoder/x86/temporal_filter_apply_sse2.asm deleted file mode 100644 index 7171807133..0000000000 --- a/libs/libvpx/vp10/encoder/x86/temporal_filter_apply_sse2.asm +++ /dev/null @@ -1,212 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -; void vp10_temporal_filter_apply_sse2 | arg -; (unsigned char *frame1, | 0 -; unsigned int stride, | 1 -; unsigned char *frame2, | 2 -; unsigned int block_width, | 3 -; unsigned int block_height, | 4 -; int strength, | 5 -; int filter_weight, | 6 -; unsigned int *accumulator, | 7 -; unsigned short *count) | 8 -global sym(vp10_temporal_filter_apply_sse2) PRIVATE -sym(vp10_temporal_filter_apply_sse2): - - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 9 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ALIGN_STACK 16, rax - %define block_width 0 - %define block_height 16 - %define strength 32 - %define filter_weight 48 - %define rounding_bit 64 - %define rbp_backup 80 - %define stack_size 96 - sub rsp, stack_size - mov [rsp + rbp_backup], rbp - ; end prolog - - mov edx, arg(3) - mov [rsp + block_width], rdx - mov edx, arg(4) - mov [rsp + block_height], rdx - movd xmm6, arg(5) - movdqa [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read - - ; calculate the rounding bit outside the loop - ; 0x8000 >> (16 - strength) - mov rdx, 16 - sub rdx, arg(5) ; 16 - strength - movq xmm4, rdx ; can't use rdx w/ shift - movdqa xmm5, [GLOBAL(_const_top_bit)] - psrlw xmm5, xmm4 - movdqa [rsp + rounding_bit], xmm5 - - mov rsi, arg(0) ; src/frame1 - mov rdx, arg(2) ; predictor frame - mov rdi, arg(7) ; accumulator - mov rax, arg(8) ; count - - ; dup the filter weight and store for later - movd xmm0, arg(6) ; filter_weight - pshuflw xmm0, xmm0, 0 - punpcklwd xmm0, xmm0 - movdqa [rsp + filter_weight], xmm0 - - mov rbp, arg(1) ; stride - pxor xmm7, xmm7 ; zero for extraction - - mov rcx, [rsp + block_width] - imul rcx, [rsp + block_height] - add rcx, rdx - cmp dword ptr [rsp + block_width], 8 - jne .temporal_filter_apply_load_16 - -.temporal_filter_apply_load_8: - movq xmm0, [rsi] ; first row - lea rsi, [rsi + rbp] ; += stride - punpcklbw xmm0, xmm7 ; src[ 0- 7] - movq xmm1, [rsi] ; second row - lea rsi, [rsi + rbp] ; += stride - punpcklbw xmm1, xmm7 ; src[ 8-15] - jmp .temporal_filter_apply_load_finished - -.temporal_filter_apply_load_16: - movdqa xmm0, [rsi] ; src (frame1) - lea rsi, [rsi + rbp] ; += stride - movdqa xmm1, xmm0 - punpcklbw xmm0, xmm7 ; src[ 0- 7] - punpckhbw xmm1, xmm7 ; src[ 8-15] - -.temporal_filter_apply_load_finished: - movdqa xmm2, [rdx] ; predictor (frame2) - movdqa xmm3, xmm2 - punpcklbw xmm2, xmm7 ; pred[ 0- 7] - punpckhbw xmm3, xmm7 ; pred[ 8-15] - - ; modifier = src_byte - pixel_value - psubw xmm0, xmm2 ; src - pred[ 0- 7] - psubw xmm1, xmm3 ; src - pred[ 8-15] - - ; modifier *= modifier - pmullw xmm0, xmm0 ; modifer[ 0- 7]^2 - pmullw xmm1, xmm1 ; modifer[ 8-15]^2 - - ; modifier *= 3 - pmullw xmm0, [GLOBAL(_const_3w)] - pmullw xmm1, [GLOBAL(_const_3w)] - - ; modifer += 0x8000 >> (16 - strength) - paddw xmm0, [rsp + rounding_bit] - paddw xmm1, [rsp + rounding_bit] - - ; modifier >>= strength - psrlw xmm0, [rsp + strength] - psrlw xmm1, [rsp + strength] - - ; modifier = 16 - modifier - ; saturation takes care of modifier > 16 - movdqa xmm3, [GLOBAL(_const_16w)] - movdqa xmm2, [GLOBAL(_const_16w)] - psubusw xmm3, xmm1 - psubusw xmm2, xmm0 - - ; modifier *= filter_weight - pmullw xmm2, [rsp + filter_weight] - pmullw xmm3, [rsp + filter_weight] - - ; count - movdqa xmm4, [rax] - movdqa xmm5, [rax+16] - ; += modifier - paddw xmm4, xmm2 - paddw xmm5, xmm3 - ; write back - movdqa [rax], xmm4 - movdqa [rax+16], xmm5 - lea rax, [rax + 16*2] ; count += 16*(sizeof(short)) - - ; load and extract the predictor up to shorts - pxor xmm7, xmm7 - movdqa xmm0, [rdx] - lea rdx, [rdx + 16*1] ; pred += 16*(sizeof(char)) - movdqa xmm1, xmm0 - punpcklbw xmm0, xmm7 ; pred[ 0- 7] - punpckhbw xmm1, xmm7 ; pred[ 8-15] - - ; modifier *= pixel_value - pmullw xmm0, xmm2 - pmullw xmm1, xmm3 - - ; expand to double words - movdqa xmm2, xmm0 - punpcklwd xmm0, xmm7 ; [ 0- 3] - punpckhwd xmm2, xmm7 ; [ 4- 7] - movdqa xmm3, xmm1 - punpcklwd xmm1, xmm7 ; [ 8-11] - punpckhwd xmm3, xmm7 ; [12-15] - - ; accumulator - movdqa xmm4, [rdi] - movdqa xmm5, [rdi+16] - movdqa xmm6, [rdi+32] - movdqa xmm7, [rdi+48] - ; += modifier - paddd xmm4, xmm0 - paddd xmm5, xmm2 - paddd xmm6, xmm1 - paddd xmm7, xmm3 - ; write back - movdqa [rdi], xmm4 - movdqa [rdi+16], xmm5 - movdqa [rdi+32], xmm6 - movdqa [rdi+48], xmm7 - lea rdi, [rdi + 16*4] ; accumulator += 16*(sizeof(int)) - - cmp rdx, rcx - je .temporal_filter_apply_epilog - pxor xmm7, xmm7 ; zero for extraction - cmp dword ptr [rsp + block_width], 16 - je .temporal_filter_apply_load_16 - jmp .temporal_filter_apply_load_8 - -.temporal_filter_apply_epilog: - ; begin epilog - mov rbp, [rsp + rbp_backup] - add rsp, stack_size - pop rsp - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret - -SECTION_RODATA -align 16 -_const_3w: - times 8 dw 3 -align 16 -_const_top_bit: - times 8 dw 1<<15 -align 16 -_const_16w - times 8 dw 16 diff --git a/libs/libvpx/vp10/exports_dec b/libs/libvpx/vp10/exports_dec deleted file mode 100644 index 71c8369ba4..0000000000 --- a/libs/libvpx/vp10/exports_dec +++ /dev/null @@ -1,2 +0,0 @@ -data vpx_codec_vp10_dx_algo -text vpx_codec_vp10_dx diff --git a/libs/libvpx/vp10/exports_enc b/libs/libvpx/vp10/exports_enc deleted file mode 100644 index d1644f2605..0000000000 --- a/libs/libvpx/vp10/exports_enc +++ /dev/null @@ -1,2 +0,0 @@ -data vpx_codec_vp10_cx_algo -text vpx_codec_vp10_cx diff --git a/libs/libvpx/vp10/vp10_common.mk b/libs/libvpx/vp10/vp10_common.mk deleted file mode 100644 index 2eb348873b..0000000000 --- a/libs/libvpx/vp10/vp10_common.mk +++ /dev/null @@ -1,104 +0,0 @@ -## -## Copyright (c) 2010 The WebM project authors. All Rights Reserved. -## -## Use of this source code is governed by a BSD-style license -## that can be found in the LICENSE file in the root of the source -## tree. An additional intellectual property rights grant can be found -## in the file PATENTS. All contributing project authors may -## be found in the AUTHORS file in the root of the source tree. -## - -VP10_COMMON_SRCS-yes += vp10_common.mk -VP10_COMMON_SRCS-yes += vp10_iface_common.h -VP10_COMMON_SRCS-yes += common/ppflags.h -VP10_COMMON_SRCS-yes += common/alloccommon.c -VP10_COMMON_SRCS-yes += common/blockd.c -VP10_COMMON_SRCS-yes += common/debugmodes.c -VP10_COMMON_SRCS-yes += common/entropy.c -VP10_COMMON_SRCS-yes += common/entropymode.c -VP10_COMMON_SRCS-yes += common/entropymv.c -VP10_COMMON_SRCS-yes += common/frame_buffers.c -VP10_COMMON_SRCS-yes += common/frame_buffers.h -VP10_COMMON_SRCS-yes += common/alloccommon.h -VP10_COMMON_SRCS-yes += common/blockd.h -VP10_COMMON_SRCS-yes += common/common.h -VP10_COMMON_SRCS-yes += common/entropy.h -VP10_COMMON_SRCS-yes += common/entropymode.h -VP10_COMMON_SRCS-yes += common/entropymv.h -VP10_COMMON_SRCS-yes += common/enums.h -VP10_COMMON_SRCS-yes += common/filter.h -VP10_COMMON_SRCS-yes += common/filter.c -VP10_COMMON_SRCS-yes += common/idct.h -VP10_COMMON_SRCS-yes += common/idct.c -VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.h -VP10_COMMON_SRCS-yes += common/vp10_inv_txfm.c -VP10_COMMON_SRCS-yes += common/loopfilter.h -VP10_COMMON_SRCS-yes += common/thread_common.h -VP10_COMMON_SRCS-yes += common/mv.h -VP10_COMMON_SRCS-yes += common/onyxc_int.h -VP10_COMMON_SRCS-yes += common/pred_common.h -VP10_COMMON_SRCS-yes += common/pred_common.c -VP10_COMMON_SRCS-yes += common/quant_common.h -VP10_COMMON_SRCS-yes += common/reconinter.h -VP10_COMMON_SRCS-yes += common/reconintra.h -VP10_COMMON_SRCS-yes += common/vp10_rtcd.c -VP10_COMMON_SRCS-yes += common/vp10_rtcd_defs.pl -VP10_COMMON_SRCS-yes += common/scale.h -VP10_COMMON_SRCS-yes += common/scale.c -VP10_COMMON_SRCS-yes += common/seg_common.h -VP10_COMMON_SRCS-yes += common/seg_common.c -VP10_COMMON_SRCS-yes += common/textblit.h -VP10_COMMON_SRCS-yes += common/tile_common.h -VP10_COMMON_SRCS-yes += common/tile_common.c -VP10_COMMON_SRCS-yes += common/loopfilter.c -VP10_COMMON_SRCS-yes += common/thread_common.c -VP10_COMMON_SRCS-yes += common/mvref_common.c -VP10_COMMON_SRCS-yes += common/mvref_common.h -VP10_COMMON_SRCS-yes += common/quant_common.c -VP10_COMMON_SRCS-yes += common/reconinter.c -VP10_COMMON_SRCS-yes += common/reconintra.c -VP10_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/textblit.c -VP10_COMMON_SRCS-yes += common/common_data.h -VP10_COMMON_SRCS-yes += common/scan.c -VP10_COMMON_SRCS-yes += common/scan.h -VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.h -VP10_COMMON_SRCS-yes += common/vp10_fwd_txfm.c - -VP10_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/postproc.h -VP10_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/postproc.c -VP10_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/mfqe.h -VP10_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/mfqe.c -ifeq ($(CONFIG_VP9_POSTPROC),yes) -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/mfqe_sse2.asm -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/postproc_sse2.asm -endif - -ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes) -VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans4_dspr2.c -VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans8_dspr2.c -VP10_COMMON_SRCS-$(HAVE_DSPR2) += common/mips/dspr2/itrans16_dspr2.c -endif - -# common (msa) -VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c -VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c -VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c - -ifeq ($(CONFIG_VP9_POSTPROC),yes) -VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/mfqe_msa.c -endif - -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_sse2.c -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_dct32x32_impl_sse2.h -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_fwd_txfm_impl_sse2.h - -ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes) -VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c -VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c -endif - -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.c -VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp10_inv_txfm_sse2.h - -$(eval $(call rtcd_h_template,vp10_rtcd,vp10/common/vp10_rtcd_defs.pl)) diff --git a/libs/libvpx/vp10/vp10_cx_iface.c b/libs/libvpx/vp10/vp10_cx_iface.c deleted file mode 100644 index 63d3adc1ff..0000000000 --- a/libs/libvpx/vp10/vp10_cx_iface.c +++ /dev/null @@ -1,1395 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_config.h" -#include "vpx/vpx_encoder.h" -#include "vpx_ports/vpx_once.h" -#include "vpx/internal/vpx_codec_internal.h" -#include "./vpx_version.h" -#include "vp10/encoder/encoder.h" -#include "vpx/vp8cx.h" -#include "vp10/encoder/firstpass.h" -#include "vp10/vp10_iface_common.h" - -struct vp10_extracfg { - int cpu_used; // available cpu percentage in 1/16 - unsigned int enable_auto_alt_ref; - unsigned int noise_sensitivity; - unsigned int sharpness; - unsigned int static_thresh; - unsigned int tile_columns; - unsigned int tile_rows; - unsigned int arnr_max_frames; - unsigned int arnr_strength; - unsigned int min_gf_interval; - unsigned int max_gf_interval; - vp8e_tuning tuning; - unsigned int cq_level; // constrained quality level - unsigned int rc_max_intra_bitrate_pct; - unsigned int rc_max_inter_bitrate_pct; - unsigned int gf_cbr_boost_pct; - unsigned int lossless; - unsigned int frame_parallel_decoding_mode; - AQ_MODE aq_mode; - unsigned int frame_periodic_boost; - vpx_bit_depth_t bit_depth; - vp9e_tune_content content; - vpx_color_space_t color_space; - int color_range; - int render_width; - int render_height; -}; - -static struct vp10_extracfg default_extra_cfg = { - 0, // cpu_used - 1, // enable_auto_alt_ref - 0, // noise_sensitivity - 0, // sharpness - 0, // static_thresh - 6, // tile_columns - 0, // tile_rows - 7, // arnr_max_frames - 5, // arnr_strength - 0, // min_gf_interval; 0 -> default decision - 0, // max_gf_interval; 0 -> default decision - VP8_TUNE_PSNR, // tuning - 10, // cq_level - 0, // rc_max_intra_bitrate_pct - 0, // rc_max_inter_bitrate_pct - 0, // gf_cbr_boost_pct - 0, // lossless - 1, // frame_parallel_decoding_mode - NO_AQ, // aq_mode - 0, // frame_periodic_delta_q - VPX_BITS_8, // Bit depth - VP9E_CONTENT_DEFAULT, // content - VPX_CS_UNKNOWN, // color space - 0, // color range - 0, // render width - 0, // render height -}; - -struct vpx_codec_alg_priv { - vpx_codec_priv_t base; - vpx_codec_enc_cfg_t cfg; - struct vp10_extracfg extra_cfg; - VP10EncoderConfig oxcf; - VP10_COMP *cpi; - unsigned char *cx_data; - size_t cx_data_sz; - unsigned char *pending_cx_data; - size_t pending_cx_data_sz; - int pending_frame_count; - size_t pending_frame_sizes[8]; -#if !CONFIG_MISC_FIXES - size_t pending_frame_magnitude; -#endif - vpx_image_t preview_img; - vpx_enc_frame_flags_t next_frame_flags; - vp8_postproc_cfg_t preview_ppcfg; - vpx_codec_pkt_list_decl(256) pkt_list; - unsigned int fixed_kf_cntr; - vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb; - // BufferPool that holds all reference frames. - BufferPool *buffer_pool; -}; - -static VP9_REFFRAME ref_frame_to_vp10_reframe(vpx_ref_frame_type_t frame) { - switch (frame) { - case VP8_LAST_FRAME: - return VP9_LAST_FLAG; - case VP8_GOLD_FRAME: - return VP9_GOLD_FLAG; - case VP8_ALTR_FRAME: - return VP9_ALT_FLAG; - } - assert(0 && "Invalid Reference Frame"); - return VP9_LAST_FLAG; -} - -static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) { - const vpx_codec_err_t res = error->error_code; - - if (res != VPX_CODEC_OK) - ctx->base.err_detail = error->has_detail ? error->detail : NULL; - - return res; -} - - -#undef ERROR -#define ERROR(str) do {\ - ctx->base.err_detail = str;\ - return VPX_CODEC_INVALID_PARAM;\ - } while (0) - -#define RANGE_CHECK(p, memb, lo, hi) do {\ - if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \ - ERROR(#memb " out of range ["#lo".."#hi"]");\ - } while (0) - -#define RANGE_CHECK_HI(p, memb, hi) do {\ - if (!((p)->memb <= (hi))) \ - ERROR(#memb " out of range [.."#hi"]");\ - } while (0) - -#define RANGE_CHECK_LO(p, memb, lo) do {\ - if (!((p)->memb >= (lo))) \ - ERROR(#memb " out of range ["#lo"..]");\ - } while (0) - -#define RANGE_CHECK_BOOL(p, memb) do {\ - if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\ - } while (0) - -static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, - const vpx_codec_enc_cfg_t *cfg, - const struct vp10_extracfg *extra_cfg) { - RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available - RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available - RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000); - RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den); - RANGE_CHECK_HI(cfg, g_profile, 3); - - RANGE_CHECK_HI(cfg, rc_max_quantizer, 63); - RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer); - RANGE_CHECK_BOOL(extra_cfg, lossless); - RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1); - RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1); - RANGE_CHECK_HI(cfg, g_threads, 64); - RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS); - RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q); - RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100); - RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100); - RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100); - RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO); - RANGE_CHECK_BOOL(cfg, rc_resize_allowed); - RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100); - RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100); - RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100); - RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS); - RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1)); - RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1)); - if (extra_cfg->max_gf_interval > 0) { - RANGE_CHECK(extra_cfg, max_gf_interval, 2, (MAX_LAG_BUFFERS - 1)); - } - if (extra_cfg->min_gf_interval > 0 && extra_cfg->max_gf_interval > 0) { - RANGE_CHECK(extra_cfg, max_gf_interval, extra_cfg->min_gf_interval, - (MAX_LAG_BUFFERS - 1)); - } - - if (cfg->rc_resize_allowed == 1) { - RANGE_CHECK(cfg, rc_scaled_width, 0, cfg->g_w); - RANGE_CHECK(cfg, rc_scaled_height, 0, cfg->g_h); - } - - // Spatial/temporal scalability are not yet supported in VP10. - // Only accept the default value for range checking. - RANGE_CHECK(cfg, ss_number_layers, 1, 1); - RANGE_CHECK(cfg, ts_number_layers, 1, 1); - // VP9 does not support a lower bound on the keyframe interval in - // automatic keyframe placement mode. - if (cfg->kf_mode != VPX_KF_DISABLED && - cfg->kf_min_dist != cfg->kf_max_dist && - cfg->kf_min_dist > 0) - ERROR("kf_min_dist not supported in auto mode, use 0 " - "or kf_max_dist instead."); - - RANGE_CHECK(extra_cfg, enable_auto_alt_ref, 0, 2); - RANGE_CHECK(extra_cfg, cpu_used, -8, 8); - RANGE_CHECK_HI(extra_cfg, noise_sensitivity, 6); - RANGE_CHECK(extra_cfg, tile_columns, 0, 6); - RANGE_CHECK(extra_cfg, tile_rows, 0, 2); - RANGE_CHECK_HI(extra_cfg, sharpness, 7); - RANGE_CHECK(extra_cfg, arnr_max_frames, 0, 15); - RANGE_CHECK_HI(extra_cfg, arnr_strength, 6); - RANGE_CHECK(extra_cfg, cq_level, 0, 63); - RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12); - RANGE_CHECK(cfg, g_input_bit_depth, 8, 12); - RANGE_CHECK(extra_cfg, content, - VP9E_CONTENT_DEFAULT, VP9E_CONTENT_INVALID - 1); - - // TODO(yaowu): remove this when ssim tuning is implemented for vp9 - if (extra_cfg->tuning == VP8_TUNE_SSIM) - ERROR("Option --tune=ssim is not currently supported in VP9."); - - if (cfg->g_pass == VPX_RC_LAST_PASS) { - const size_t packet_sz = sizeof(FIRSTPASS_STATS); - const int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz); - const FIRSTPASS_STATS *stats; - - if (cfg->rc_twopass_stats_in.buf == NULL) - ERROR("rc_twopass_stats_in.buf not set."); - - if (cfg->rc_twopass_stats_in.sz % packet_sz) - ERROR("rc_twopass_stats_in.sz indicates truncated packet."); - - if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz) - ERROR("rc_twopass_stats_in requires at least two packets."); - - stats = - (const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf + n_packets - 1; - - if ((int)(stats->count + 0.5) != n_packets - 1) - ERROR("rc_twopass_stats_in missing EOS stats packet"); - } - -#if !CONFIG_VP9_HIGHBITDEPTH - if (cfg->g_profile > (unsigned int)PROFILE_1) { - ERROR("Profile > 1 not supported in this build configuration"); - } -#endif - if (cfg->g_profile <= (unsigned int)PROFILE_1 && - cfg->g_bit_depth > VPX_BITS_8) { - ERROR("Codec high bit-depth not supported in profile < 2"); - } - if (cfg->g_profile <= (unsigned int)PROFILE_1 && - cfg->g_input_bit_depth > 8) { - ERROR("Source high bit-depth not supported in profile < 2"); - } - if (cfg->g_profile > (unsigned int)PROFILE_1 && - cfg->g_bit_depth == VPX_BITS_8) { - ERROR("Codec bit-depth 8 not supported in profile > 1"); - } - RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB); - RANGE_CHECK(extra_cfg, color_range, 0, 1); - return VPX_CODEC_OK; -} - -static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img) { - switch (img->fmt) { - case VPX_IMG_FMT_YV12: - case VPX_IMG_FMT_I420: - case VPX_IMG_FMT_I42016: - break; - case VPX_IMG_FMT_I422: - case VPX_IMG_FMT_I444: - case VPX_IMG_FMT_I440: - if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) { - ERROR("Invalid image format. I422, I444, I440 images are " - "not supported in profile."); - } - break; - case VPX_IMG_FMT_I42216: - case VPX_IMG_FMT_I44416: - case VPX_IMG_FMT_I44016: - if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 && - ctx->cfg.g_profile != (unsigned int)PROFILE_3) { - ERROR("Invalid image format. 16-bit I422, I444, I440 images are " - "not supported in profile."); - } - break; - default: - ERROR("Invalid image format. Only YV12, I420, I422, I444 images are " - "supported."); - break; - } - - if (img->d_w != ctx->cfg.g_w || img->d_h != ctx->cfg.g_h) - ERROR("Image size must match encoder init configuration size"); - - return VPX_CODEC_OK; -} - -static int get_image_bps(const vpx_image_t *img) { - switch (img->fmt) { - case VPX_IMG_FMT_YV12: - case VPX_IMG_FMT_I420: return 12; - case VPX_IMG_FMT_I422: return 16; - case VPX_IMG_FMT_I444: return 24; - case VPX_IMG_FMT_I440: return 16; - case VPX_IMG_FMT_I42016: return 24; - case VPX_IMG_FMT_I42216: return 32; - case VPX_IMG_FMT_I44416: return 48; - case VPX_IMG_FMT_I44016: return 32; - default: assert(0 && "Invalid image format"); break; - } - return 0; -} - -static vpx_codec_err_t set_encoder_config( - VP10EncoderConfig *oxcf, - const vpx_codec_enc_cfg_t *cfg, - const struct vp10_extracfg *extra_cfg) { - const int is_vbr = cfg->rc_end_usage == VPX_VBR; - oxcf->profile = cfg->g_profile; - oxcf->max_threads = (int)cfg->g_threads; - oxcf->width = cfg->g_w; - oxcf->height = cfg->g_h; - oxcf->bit_depth = cfg->g_bit_depth; - oxcf->input_bit_depth = cfg->g_input_bit_depth; - // guess a frame rate if out of whack, use 30 - oxcf->init_framerate = (double)cfg->g_timebase.den / cfg->g_timebase.num; - if (oxcf->init_framerate > 180) - oxcf->init_framerate = 30; - - oxcf->mode = GOOD; - - switch (cfg->g_pass) { - case VPX_RC_ONE_PASS: - oxcf->pass = 0; - break; - case VPX_RC_FIRST_PASS: - oxcf->pass = 1; - break; - case VPX_RC_LAST_PASS: - oxcf->pass = 2; - break; - } - - oxcf->lag_in_frames = cfg->g_pass == VPX_RC_FIRST_PASS ? 0 - : cfg->g_lag_in_frames; - oxcf->rc_mode = cfg->rc_end_usage; - - // Convert target bandwidth from Kbit/s to Bit/s - oxcf->target_bandwidth = 1000 * cfg->rc_target_bitrate; - oxcf->rc_max_intra_bitrate_pct = extra_cfg->rc_max_intra_bitrate_pct; - oxcf->rc_max_inter_bitrate_pct = extra_cfg->rc_max_inter_bitrate_pct; - oxcf->gf_cbr_boost_pct = extra_cfg->gf_cbr_boost_pct; - - oxcf->best_allowed_q = - extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer); - oxcf->worst_allowed_q = - extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer); - oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level); - oxcf->fixed_q = -1; - - oxcf->under_shoot_pct = cfg->rc_undershoot_pct; - oxcf->over_shoot_pct = cfg->rc_overshoot_pct; - - oxcf->scaled_frame_width = cfg->rc_scaled_width; - oxcf->scaled_frame_height = cfg->rc_scaled_height; - if (cfg->rc_resize_allowed == 1) { - oxcf->resize_mode = - (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) ? - RESIZE_DYNAMIC : RESIZE_FIXED; - } else { - oxcf->resize_mode = RESIZE_NONE; - } - - oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz; - oxcf->starting_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_initial_sz; - oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz; - - oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh; - - oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct; - oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct; - oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct; - - oxcf->auto_key = cfg->kf_mode == VPX_KF_AUTO && - cfg->kf_min_dist != cfg->kf_max_dist; - - oxcf->key_freq = cfg->kf_max_dist; - - oxcf->speed = abs(extra_cfg->cpu_used); - oxcf->encode_breakout = extra_cfg->static_thresh; - oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref; - oxcf->noise_sensitivity = extra_cfg->noise_sensitivity; - oxcf->sharpness = extra_cfg->sharpness; - - oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in; - -#if CONFIG_FP_MB_STATS - oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in; -#endif - - oxcf->color_space = extra_cfg->color_space; - oxcf->color_range = extra_cfg->color_range; - oxcf->render_width = extra_cfg->render_width; - oxcf->render_height = extra_cfg->render_height; - oxcf->arnr_max_frames = extra_cfg->arnr_max_frames; - oxcf->arnr_strength = extra_cfg->arnr_strength; - oxcf->min_gf_interval = extra_cfg->min_gf_interval; - oxcf->max_gf_interval = extra_cfg->max_gf_interval; - - oxcf->tuning = extra_cfg->tuning; - oxcf->content = extra_cfg->content; - - oxcf->tile_columns = extra_cfg->tile_columns; - oxcf->tile_rows = extra_cfg->tile_rows; - - oxcf->error_resilient_mode = cfg->g_error_resilient; - oxcf->frame_parallel_decoding_mode = extra_cfg->frame_parallel_decoding_mode; - - oxcf->aq_mode = extra_cfg->aq_mode; - - oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost; - - /* - printf("Current VP9 Settings: \n"); - printf("target_bandwidth: %d\n", oxcf->target_bandwidth); - printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity); - printf("sharpness: %d\n", oxcf->sharpness); - printf("cpu_used: %d\n", oxcf->cpu_used); - printf("Mode: %d\n", oxcf->mode); - printf("auto_key: %d\n", oxcf->auto_key); - printf("key_freq: %d\n", oxcf->key_freq); - printf("end_usage: %d\n", oxcf->end_usage); - printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct); - printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct); - printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level); - printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level); - printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size); - printf("fixed_q: %d\n", oxcf->fixed_q); - printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q); - printf("best_allowed_q: %d\n", oxcf->best_allowed_q); - printf("allow_spatial_resampling: %d\n", oxcf->allow_spatial_resampling); - printf("scaled_frame_width: %d\n", oxcf->scaled_frame_width); - printf("scaled_frame_height: %d\n", oxcf->scaled_frame_height); - printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias); - printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section); - printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section); - printf("lag_in_frames: %d\n", oxcf->lag_in_frames); - printf("enable_auto_arf: %d\n", oxcf->enable_auto_arf); - printf("Version: %d\n", oxcf->Version); - printf("encode_breakout: %d\n", oxcf->encode_breakout); - printf("error resilient: %d\n", oxcf->error_resilient_mode); - printf("frame parallel detokenization: %d\n", - oxcf->frame_parallel_decoding_mode); - */ - return VPX_CODEC_OK; -} - -static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx, - const vpx_codec_enc_cfg_t *cfg) { - vpx_codec_err_t res; - int force_key = 0; - - if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) { - if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS) - ERROR("Cannot change width or height after initialization"); - if (!valid_ref_frame_size(ctx->cfg.g_w, ctx->cfg.g_h, cfg->g_w, cfg->g_h) || - (ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) || - (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height)) - force_key = 1; - } - - // Prevent increasing lag_in_frames. This check is stricter than it needs - // to be -- the limit is not increasing past the first lag_in_frames - // value, but we don't track the initial config, only the last successful - // config. - if (cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames) - ERROR("Cannot increase lag_in_frames"); - - res = validate_config(ctx, cfg, &ctx->extra_cfg); - - if (res == VPX_CODEC_OK) { - ctx->cfg = *cfg; - set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg); - // On profile change, request a key frame - force_key |= ctx->cpi->common.profile != ctx->oxcf.profile; - vp10_change_config(ctx->cpi, &ctx->oxcf); - } - - if (force_key) - ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF; - - return res; -} - -static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; - *arg = vp10_get_quantizer(ctx->cpi); - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; - *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi)); - return VPX_CODEC_OK; -} - -static vpx_codec_err_t update_extra_cfg(vpx_codec_alg_priv_t *ctx, - const struct vp10_extracfg *extra_cfg) { - const vpx_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg); - if (res == VPX_CODEC_OK) { - ctx->extra_cfg = *extra_cfg; - set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg); - vp10_change_config(ctx->cpi, &ctx->oxcf); - } - return res; -} - -static vpx_codec_err_t ctrl_set_cpuused(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_enable_auto_alt_ref(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_noise_sensitivity(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.noise_sensitivity = CAST(VP9E_SET_NOISE_SENSITIVITY, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_sharpness(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.sharpness = CAST(VP8E_SET_SHARPNESS, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_static_thresh(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_tile_columns(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.tile_columns = CAST(VP9E_SET_TILE_COLUMNS, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_tile_rows(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.tile_rows = CAST(VP9E_SET_TILE_ROWS, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_arnr_max_frames(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_arnr_strength(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_arnr_type(vpx_codec_alg_priv_t *ctx, - va_list args) { - (void)ctx; - (void)args; - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_set_tuning(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.tuning = CAST(VP8E_SET_TUNING, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_cq_level(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_rc_max_intra_bitrate_pct( - vpx_codec_alg_priv_t *ctx, va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.rc_max_intra_bitrate_pct = - CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct( - vpx_codec_alg_priv_t *ctx, va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.rc_max_inter_bitrate_pct = - CAST(VP8E_SET_MAX_INTER_BITRATE_PCT, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct( - vpx_codec_alg_priv_t *ctx, va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.gf_cbr_boost_pct = - CAST(VP9E_SET_GF_CBR_BOOST_PCT, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_lossless(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.lossless = CAST(VP9E_SET_LOSSLESS, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_frame_parallel_decoding_mode( - vpx_codec_alg_priv_t *ctx, va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.frame_parallel_decoding_mode = - CAST(VP9E_SET_FRAME_PARALLEL_DECODING, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_aq_mode(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.aq_mode = CAST(VP9E_SET_AQ_MODE, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_min_gf_interval(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.min_gf_interval = CAST(VP9E_SET_MIN_GF_INTERVAL, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_max_gf_interval(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.max_gf_interval = CAST(VP9E_SET_MAX_GF_INTERVAL, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_frame_periodic_boost(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.frame_periodic_boost = CAST(VP9E_SET_FRAME_PERIODIC_BOOST, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *data) { - vpx_codec_err_t res = VPX_CODEC_OK; - (void)data; - - if (ctx->priv == NULL) { - vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv)); - if (priv == NULL) - return VPX_CODEC_MEM_ERROR; - - ctx->priv = (vpx_codec_priv_t *)priv; - ctx->priv->init_flags = ctx->init_flags; - ctx->priv->enc.total_encoders = 1; - priv->buffer_pool = - (BufferPool *)vpx_calloc(1, sizeof(BufferPool)); - if (priv->buffer_pool == NULL) - return VPX_CODEC_MEM_ERROR; - -#if CONFIG_MULTITHREAD - if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) { - return VPX_CODEC_MEM_ERROR; - } -#endif - - if (ctx->config.enc) { - // Update the reference to the config structure to an internal copy. - priv->cfg = *ctx->config.enc; - ctx->config.enc = &priv->cfg; - } - - priv->extra_cfg = default_extra_cfg; - once(vp10_initialize_enc); - - res = validate_config(priv, &priv->cfg, &priv->extra_cfg); - - if (res == VPX_CODEC_OK) { - set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg); -#if CONFIG_VP9_HIGHBITDEPTH - priv->oxcf.use_highbitdepth = - (ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0; -#endif - priv->cpi = vp10_create_compressor(&priv->oxcf, priv->buffer_pool); - if (priv->cpi == NULL) - res = VPX_CODEC_MEM_ERROR; - else - priv->cpi->output_pkt_list = &priv->pkt_list.head; - } - } - - return res; -} - -static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) { - free(ctx->cx_data); - vp10_remove_compressor(ctx->cpi); -#if CONFIG_MULTITHREAD - pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex); -#endif - vpx_free(ctx->buffer_pool); - vpx_free(ctx); - return VPX_CODEC_OK; -} - -static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, - unsigned long duration, - unsigned long deadline) { - MODE new_mode = BEST; - - switch (ctx->cfg.g_pass) { - case VPX_RC_ONE_PASS: - if (deadline > 0) { - const vpx_codec_enc_cfg_t *const cfg = &ctx->cfg; - - // Convert duration parameter from stream timebase to microseconds. - const uint64_t duration_us = (uint64_t)duration * 1000000 * - (uint64_t)cfg->g_timebase.num /(uint64_t)cfg->g_timebase.den; - - // If the deadline is more that the duration this frame is to be shown, - // use good quality mode. Otherwise use realtime mode. - new_mode = (deadline > duration_us) ? GOOD : REALTIME; - } else { - new_mode = BEST; - } - break; - case VPX_RC_FIRST_PASS: - break; - case VPX_RC_LAST_PASS: - new_mode = deadline > 0 ? GOOD : BEST; - break; - } - - if (ctx->oxcf.mode != new_mode) { - ctx->oxcf.mode = new_mode; - vp10_change_config(ctx->cpi, &ctx->oxcf); - } -} - -// Turn on to test if supplemental superframe data breaks decoding -// #define TEST_SUPPLEMENTAL_SUPERFRAME_DATA -static int write_superframe_index(vpx_codec_alg_priv_t *ctx) { - uint8_t marker = 0xc0; - unsigned int mask; - int mag, index_sz; -#if CONFIG_MISC_FIXES - int i; - size_t max_frame_sz = 0; -#endif - - assert(ctx->pending_frame_count); - assert(ctx->pending_frame_count <= 8); - - // Add the number of frames to the marker byte - marker |= ctx->pending_frame_count - 1; -#if CONFIG_MISC_FIXES - for (i = 0; i < ctx->pending_frame_count - 1; i++) { - const size_t frame_sz = (unsigned int) ctx->pending_frame_sizes[i] - 1; - max_frame_sz = frame_sz > max_frame_sz ? frame_sz : max_frame_sz; - } -#endif - - // Choose the magnitude - for (mag = 0, mask = 0xff; mag < 4; mag++) { -#if CONFIG_MISC_FIXES - if (max_frame_sz <= mask) - break; -#else - if (ctx->pending_frame_magnitude < mask) - break; -#endif - mask <<= 8; - mask |= 0xff; - } - marker |= mag << 3; - - // Write the index - index_sz = 2 + (mag + 1) * (ctx->pending_frame_count - CONFIG_MISC_FIXES); - if (ctx->pending_cx_data_sz + index_sz < ctx->cx_data_sz) { - uint8_t *x = ctx->pending_cx_data + ctx->pending_cx_data_sz; - int i, j; -#ifdef TEST_SUPPLEMENTAL_SUPERFRAME_DATA - uint8_t marker_test = 0xc0; - int mag_test = 2; // 1 - 4 - int frames_test = 4; // 1 - 8 - int index_sz_test = 2 + mag_test * frames_test; - marker_test |= frames_test - 1; - marker_test |= (mag_test - 1) << 3; - *x++ = marker_test; - for (i = 0; i < mag_test * frames_test; ++i) - *x++ = 0; // fill up with arbitrary data - *x++ = marker_test; - ctx->pending_cx_data_sz += index_sz_test; - printf("Added supplemental superframe data\n"); -#endif - - *x++ = marker; - for (i = 0; i < ctx->pending_frame_count - CONFIG_MISC_FIXES; i++) { - unsigned int this_sz; - - assert(ctx->pending_frame_sizes[i] > 0); - this_sz = (unsigned int)ctx->pending_frame_sizes[i] - CONFIG_MISC_FIXES; - for (j = 0; j <= mag; j++) { - *x++ = this_sz & 0xff; - this_sz >>= 8; - } - } - *x++ = marker; - ctx->pending_cx_data_sz += index_sz; -#ifdef TEST_SUPPLEMENTAL_SUPERFRAME_DATA - index_sz += index_sz_test; -#endif - } - return index_sz; -} - -// vp9 uses 10,000,000 ticks/second as time stamp -#define TICKS_PER_SEC 10000000LL - -static int64_t timebase_units_to_ticks(const vpx_rational_t *timebase, - int64_t n) { - return n * TICKS_PER_SEC * timebase->num / timebase->den; -} - -static int64_t ticks_to_timebase_units(const vpx_rational_t *timebase, - int64_t n) { - const int64_t round = TICKS_PER_SEC * timebase->num / 2 - 1; - return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC; -} - -static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi, - unsigned int lib_flags) { - vpx_codec_frame_flags_t flags = lib_flags << 16; - - if (lib_flags & FRAMEFLAGS_KEY) - flags |= VPX_FRAME_IS_KEY; - - if (cpi->droppable) - flags |= VPX_FRAME_IS_DROPPABLE; - - return flags; -} - -static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned long duration, - vpx_enc_frame_flags_t flags, - unsigned long deadline) { - vpx_codec_err_t res = VPX_CODEC_OK; - VP10_COMP *const cpi = ctx->cpi; - const vpx_rational_t *const timebase = &ctx->cfg.g_timebase; - size_t data_sz; - - if (img != NULL) { - res = validate_img(ctx, img); - // TODO(jzern) the checks related to cpi's validity should be treated as a - // failure condition, encoder setup is done fully in init() currently. - if (res == VPX_CODEC_OK && cpi != NULL) { - // There's no codec control for multiple alt-refs so check the encoder - // instance for its status to determine the compressed data size. - data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img) / 8 * - (cpi->multi_arf_allowed ? 8 : 2); - if (data_sz < 4096) - data_sz = 4096; - if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) { - ctx->cx_data_sz = data_sz; - free(ctx->cx_data); - ctx->cx_data = (unsigned char*)malloc(ctx->cx_data_sz); - if (ctx->cx_data == NULL) { - return VPX_CODEC_MEM_ERROR; - } - } - } - } - - pick_quickcompress_mode(ctx, duration, deadline); - vpx_codec_pkt_list_init(&ctx->pkt_list); - - // Handle Flags - if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) || - ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) { - ctx->base.err_detail = "Conflicting flags."; - return VPX_CODEC_INVALID_PARAM; - } - - vp10_apply_encoding_flags(cpi, flags); - - // Handle fixed keyframe intervals - if (ctx->cfg.kf_mode == VPX_KF_AUTO && - ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) { - if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) { - flags |= VPX_EFLAG_FORCE_KF; - ctx->fixed_kf_cntr = 1; - } - } - - // Initialize the encoder instance on the first frame. - if (res == VPX_CODEC_OK && cpi != NULL) { - unsigned int lib_flags = 0; - YV12_BUFFER_CONFIG sd; - int64_t dst_time_stamp = timebase_units_to_ticks(timebase, pts); - int64_t dst_end_time_stamp = - timebase_units_to_ticks(timebase, pts + duration); - size_t size, cx_data_sz; - unsigned char *cx_data; - - // Set up internal flags - if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) - cpi->b_calculate_psnr = 1; - - if (img != NULL) { - res = image2yuvconfig(img, &sd); - - // Store the original flags in to the frame buffer. Will extract the - // key frame flag when we actually encode this frame. - if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, - &sd, dst_time_stamp, dst_end_time_stamp)) { - res = update_error_state(ctx, &cpi->common.error); - } - ctx->next_frame_flags = 0; - } - - cx_data = ctx->cx_data; - cx_data_sz = ctx->cx_data_sz; - - /* Any pending invisible frames? */ - if (ctx->pending_cx_data) { - memmove(cx_data, ctx->pending_cx_data, ctx->pending_cx_data_sz); - ctx->pending_cx_data = cx_data; - cx_data += ctx->pending_cx_data_sz; - cx_data_sz -= ctx->pending_cx_data_sz; - - /* TODO: this is a minimal check, the underlying codec doesn't respect - * the buffer size anyway. - */ - if (cx_data_sz < ctx->cx_data_sz / 2) { - ctx->base.err_detail = "Compressed data buffer too small"; - return VPX_CODEC_ERROR; - } - } - - while (cx_data_sz >= ctx->cx_data_sz / 2 && - -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, - cx_data, &dst_time_stamp, - &dst_end_time_stamp, !img)) { - if (size) { - vpx_codec_cx_pkt_t pkt; - - // Pack invisible frames with the next visible frame - if (!cpi->common.show_frame) { - if (ctx->pending_cx_data == 0) - ctx->pending_cx_data = cx_data; - ctx->pending_cx_data_sz += size; - ctx->pending_frame_sizes[ctx->pending_frame_count++] = size; -#if !CONFIG_MISC_FIXES - ctx->pending_frame_magnitude |= size; -#endif - cx_data += size; - cx_data_sz -= size; - - if (ctx->output_cx_pkt_cb.output_cx_pkt) { - pkt.kind = VPX_CODEC_CX_FRAME_PKT; - pkt.data.frame.pts = ticks_to_timebase_units(timebase, - dst_time_stamp); - pkt.data.frame.duration = - (unsigned long)ticks_to_timebase_units(timebase, - dst_end_time_stamp - dst_time_stamp); - pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags); - pkt.data.frame.buf = ctx->pending_cx_data; - pkt.data.frame.sz = size; - ctx->pending_cx_data = NULL; - ctx->pending_cx_data_sz = 0; - ctx->pending_frame_count = 0; -#if !CONFIG_MISC_FIXES - ctx->pending_frame_magnitude = 0; -#endif - ctx->output_cx_pkt_cb.output_cx_pkt( - &pkt, ctx->output_cx_pkt_cb.user_priv); - } - continue; - } - - // Add the frame packet to the list of returned packets. - pkt.kind = VPX_CODEC_CX_FRAME_PKT; - pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp); - pkt.data.frame.duration = - (unsigned long)ticks_to_timebase_units(timebase, - dst_end_time_stamp - dst_time_stamp); - pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags); - - if (ctx->pending_cx_data) { - ctx->pending_frame_sizes[ctx->pending_frame_count++] = size; -#if !CONFIG_MISC_FIXES - ctx->pending_frame_magnitude |= size; -#endif - ctx->pending_cx_data_sz += size; - // write the superframe only for the case when - if (!ctx->output_cx_pkt_cb.output_cx_pkt) - size += write_superframe_index(ctx); - pkt.data.frame.buf = ctx->pending_cx_data; - pkt.data.frame.sz = ctx->pending_cx_data_sz; - ctx->pending_cx_data = NULL; - ctx->pending_cx_data_sz = 0; - ctx->pending_frame_count = 0; -#if !CONFIG_MISC_FIXES - ctx->pending_frame_magnitude = 0; -#endif - } else { - pkt.data.frame.buf = cx_data; - pkt.data.frame.sz = size; - } - pkt.data.frame.partition_id = -1; - - if(ctx->output_cx_pkt_cb.output_cx_pkt) - ctx->output_cx_pkt_cb.output_cx_pkt(&pkt, - ctx->output_cx_pkt_cb.user_priv); - else - vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt); - - cx_data += size; - cx_data_sz -= size; - } - } - } - - return res; -} - -static const vpx_codec_cx_pkt_t *encoder_get_cxdata(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter) { - return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter); -} - -static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *); - - if (frame != NULL) { - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - vp10_set_reference_enc(ctx->cpi, ref_frame_to_vp10_reframe(frame->frame_type), - &sd); - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *); - - if (frame != NULL) { - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - vp10_copy_reference_enc(ctx->cpi, - ref_frame_to_vp10_reframe(frame->frame_type), &sd); - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vp9_ref_frame_t *const frame = va_arg(args, vp9_ref_frame_t *); - - if (frame != NULL) { - YV12_BUFFER_CONFIG *fb = get_ref_frame(&ctx->cpi->common, frame->idx); - if (fb == NULL) return VPX_CODEC_ERROR; - - yuvconfig2image(&frame->img, fb, NULL); - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_VP9_POSTPROC - vp8_postproc_cfg_t *config = va_arg(args, vp8_postproc_cfg_t *); - if (config != NULL) { - ctx->preview_ppcfg = *config; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - - -static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) { - YV12_BUFFER_CONFIG sd; - vp10_ppflags_t flags; - vp10_zero(flags); - - if (ctx->preview_ppcfg.post_proc_flag) { - flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag; - flags.deblocking_level = ctx->preview_ppcfg.deblocking_level; - flags.noise_level = ctx->preview_ppcfg.noise_level; - } - - if (vp10_get_preview_raw_frame(ctx->cpi, &sd, &flags) == 0) { - yuvconfig2image(&ctx->preview_img, &sd, NULL); - return &ctx->preview_img; - } else { - return NULL; - } -} - -static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx, - va_list args) { - (void)ctx; - (void)args; - - // TODO(yaowu): Need to re-implement and test for VP9. - return VPX_CODEC_INVALID_PARAM; -} - - -static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *); - - if (map) { - if (!vp10_set_active_map(ctx->cpi, map->active_map, - (int)map->rows, (int)map->cols)) - return VPX_CODEC_OK; - else - return VPX_CODEC_INVALID_PARAM; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *); - - if (map) { - if (!vp10_get_active_map(ctx->cpi, map->active_map, - (int)map->rows, (int)map->cols)) - return VPX_CODEC_OK; - else - return VPX_CODEC_INVALID_PARAM; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *); - - if (mode) { - const int res = vp10_set_internal_size(ctx->cpi, - (VPX_SCALING)mode->h_scaling_mode, - (VPX_SCALING)mode->v_scaling_mode); - return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_register_cx_callback(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_codec_priv_output_cx_pkt_cb_pair_t *cbp = - (vpx_codec_priv_output_cx_pkt_cb_pair_t *)va_arg(args, void *); - ctx->output_cx_pkt_cb.output_cx_pkt = cbp->output_cx_pkt; - ctx->output_cx_pkt_cb.user_priv = cbp->user_priv; - - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_set_tune_content(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.content = CAST(VP9E_SET_TUNE_CONTENT, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_color_space(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_color_range(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.color_range = CAST(VP9E_SET_COLOR_RANGE, args); - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx, - va_list args) { - struct vp10_extracfg extra_cfg = ctx->extra_cfg; - int *const render_size = va_arg(args, int *); - extra_cfg.render_width = render_size[0]; - extra_cfg.render_height = render_size[1]; - return update_extra_cfg(ctx, &extra_cfg); -} - -static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = { - {VP8_COPY_REFERENCE, ctrl_copy_reference}, - - // Setters - {VP8_SET_REFERENCE, ctrl_set_reference}, - {VP8_SET_POSTPROC, ctrl_set_previewpp}, - {VP8E_SET_ROI_MAP, ctrl_set_roi_map}, - {VP8E_SET_ACTIVEMAP, ctrl_set_active_map}, - {VP8E_SET_SCALEMODE, ctrl_set_scale_mode}, - {VP8E_SET_CPUUSED, ctrl_set_cpuused}, - {VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref}, - {VP8E_SET_SHARPNESS, ctrl_set_sharpness}, - {VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh}, - {VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns}, - {VP9E_SET_TILE_ROWS, ctrl_set_tile_rows}, - {VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames}, - {VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength}, - {VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type}, - {VP8E_SET_TUNING, ctrl_set_tuning}, - {VP8E_SET_CQ_LEVEL, ctrl_set_cq_level}, - {VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct}, - {VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct}, - {VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct}, - {VP9E_SET_LOSSLESS, ctrl_set_lossless}, - {VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode}, - {VP9E_SET_AQ_MODE, ctrl_set_aq_mode}, - {VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost}, - {VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback}, - {VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content}, - {VP9E_SET_COLOR_SPACE, ctrl_set_color_space}, - {VP9E_SET_COLOR_RANGE, ctrl_set_color_range}, - {VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity}, - {VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval}, - {VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval}, - {VP9E_SET_RENDER_SIZE, ctrl_set_render_size}, - - // Getters - {VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer}, - {VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64}, - {VP9_GET_REFERENCE, ctrl_get_reference}, - {VP9E_GET_ACTIVEMAP, ctrl_get_active_map}, - - { -1, NULL}, -}; - -static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = { - { - 0, - { // NOLINT - 0, // g_usage - 8, // g_threads - 0, // g_profile - - 320, // g_width - 240, // g_height - VPX_BITS_8, // g_bit_depth - 8, // g_input_bit_depth - - {1, 30}, // g_timebase - - 0, // g_error_resilient - - VPX_RC_ONE_PASS, // g_pass - - 25, // g_lag_in_frames - - 0, // rc_dropframe_thresh - 0, // rc_resize_allowed - 0, // rc_scaled_width - 0, // rc_scaled_height - 60, // rc_resize_down_thresold - 30, // rc_resize_up_thresold - - VPX_VBR, // rc_end_usage - {NULL, 0}, // rc_twopass_stats_in - {NULL, 0}, // rc_firstpass_mb_stats_in - 256, // rc_target_bandwidth - 0, // rc_min_quantizer - 63, // rc_max_quantizer - 25, // rc_undershoot_pct - 25, // rc_overshoot_pct - - 6000, // rc_max_buffer_size - 4000, // rc_buffer_initial_size - 5000, // rc_buffer_optimal_size - - 50, // rc_two_pass_vbrbias - 0, // rc_two_pass_vbrmin_section - 2000, // rc_two_pass_vbrmax_section - - // keyframing settings (kf) - VPX_KF_AUTO, // g_kfmode - 0, // kf_min_dist - 9999, // kf_max_dist - - // TODO(yunqingwang): Spatial/temporal scalability are not supported - // in VP10. The following 10 parameters are not used, which should - // be removed later. - 1, // ss_number_layers - {0}, - {0}, // ss_target_bitrate - 1, // ts_number_layers - {0}, // ts_target_bitrate - {0}, // ts_rate_decimator - 0, // ts_periodicity - {0}, // ts_layer_id - {0}, // layer_taget_bitrate - 0 // temporal_layering_mode - } - }, -}; - -#ifndef VERSION_STRING -#define VERSION_STRING -#endif -CODEC_INTERFACE(vpx_codec_vp10_cx) = { - "WebM Project VP10 Encoder" VERSION_STRING, - VPX_CODEC_INTERNAL_ABI_VERSION, -#if CONFIG_VP9_HIGHBITDEPTH - VPX_CODEC_CAP_HIGHBITDEPTH | -#endif - VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t - encoder_init, // vpx_codec_init_fn_t - encoder_destroy, // vpx_codec_destroy_fn_t - encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t - { // NOLINT - NULL, // vpx_codec_peek_si_fn_t - NULL, // vpx_codec_get_si_fn_t - NULL, // vpx_codec_decode_fn_t - NULL, // vpx_codec_frame_get_fn_t - NULL // vpx_codec_set_fb_fn_t - }, - { // NOLINT - 1, // 1 cfg map - encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t - encoder_encode, // vpx_codec_encode_fn_t - encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t - encoder_set_config, // vpx_codec_enc_config_set_fn_t - NULL, // vpx_codec_get_global_headers_fn_t - encoder_get_preview, // vpx_codec_get_preview_frame_fn_t - NULL // vpx_codec_enc_mr_get_mem_loc_fn_t - } -}; diff --git a/libs/libvpx/vp10/vp10_dx_iface.c b/libs/libvpx/vp10/vp10_dx_iface.c deleted file mode 100644 index 33337a4bd2..0000000000 --- a/libs/libvpx/vp10/vp10_dx_iface.c +++ /dev/null @@ -1,1132 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include - -#include "./vpx_config.h" -#include "./vpx_version.h" - -#include "vpx/internal/vpx_codec_internal.h" -#include "vpx/vp8dx.h" -#include "vpx/vpx_decoder.h" -#include "vpx_dsp/bitreader_buffer.h" -#include "vpx_dsp/vpx_dsp_common.h" -#include "vpx_util/vpx_thread.h" - -#include "vp10/common/alloccommon.h" -#include "vp10/common/frame_buffers.h" - -#include "vp10/decoder/decoder.h" -#include "vp10/decoder/decodeframe.h" - -#include "vp10/vp10_iface_common.h" - -#define VP9_CAP_POSTPROC (CONFIG_VP9_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0) - -typedef vpx_codec_stream_info_t vp10_stream_info_t; - -// This limit is due to framebuffer numbers. -// TODO(hkuang): Remove this limit after implementing ondemand framebuffers. -#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames. - -typedef struct cache_frame { - int fb_idx; - vpx_image_t img; -} cache_frame; - -struct vpx_codec_alg_priv { - vpx_codec_priv_t base; - vpx_codec_dec_cfg_t cfg; - vp10_stream_info_t si; - int postproc_cfg_set; - vp8_postproc_cfg_t postproc_cfg; - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; - vpx_image_t img; - int img_avail; - int flushed; - int invert_tile_order; - int last_show_frame; // Index of last output frame. - int byte_alignment; - int skip_loop_filter; - - // Frame parallel related. - int frame_parallel_decode; // frame-based threading. - VPxWorker *frame_workers; - int num_frame_workers; - int next_submit_worker_id; - int last_submit_worker_id; - int next_output_worker_id; - int available_threads; - cache_frame frame_cache[FRAME_CACHE_SIZE]; - int frame_cache_write; - int frame_cache_read; - int num_cache_frames; - int need_resync; // wait for key/intra-only frame - // BufferPool that holds all reference frames. Shared by all the FrameWorkers. - BufferPool *buffer_pool; - - // External frame buffer info to save for VP9 common. - void *ext_priv; // Private data associated with the external frame buffers. - vpx_get_frame_buffer_cb_fn_t get_ext_fb_cb; - vpx_release_frame_buffer_cb_fn_t release_ext_fb_cb; -}; - -static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *data) { - // This function only allocates space for the vpx_codec_alg_priv_t - // structure. More memory may be required at the time the stream - // information becomes known. - (void)data; - - if (!ctx->priv) { - vpx_codec_alg_priv_t *const priv = - (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv)); - if (priv == NULL) - return VPX_CODEC_MEM_ERROR; - - ctx->priv = (vpx_codec_priv_t *)priv; - ctx->priv->init_flags = ctx->init_flags; - priv->si.sz = sizeof(priv->si); - priv->flushed = 0; - // Only do frame parallel decode when threads > 1. - priv->frame_parallel_decode = - (ctx->config.dec && (ctx->config.dec->threads > 1) && - (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0; - if (ctx->config.dec) { - priv->cfg = *ctx->config.dec; - ctx->config.dec = &priv->cfg; - } - } - - return VPX_CODEC_OK; -} - -static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) { - if (ctx->frame_workers != NULL) { - int i; - for (i = 0; i < ctx->num_frame_workers; ++i) { - VPxWorker *const worker = &ctx->frame_workers[i]; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - vpx_get_worker_interface()->end(worker); - vp10_remove_common(&frame_worker_data->pbi->common); -#if CONFIG_VP9_POSTPROC - vp10_free_postproc_buffers(&frame_worker_data->pbi->common); -#endif - vp10_decoder_remove(frame_worker_data->pbi); - vpx_free(frame_worker_data->scratch_buffer); -#if CONFIG_MULTITHREAD - pthread_mutex_destroy(&frame_worker_data->stats_mutex); - pthread_cond_destroy(&frame_worker_data->stats_cond); -#endif - vpx_free(frame_worker_data); - } -#if CONFIG_MULTITHREAD - pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex); -#endif - } - - if (ctx->buffer_pool) { - vp10_free_ref_frame_buffers(ctx->buffer_pool); - vp10_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers); - } - - vpx_free(ctx->frame_workers); - vpx_free(ctx->buffer_pool); - vpx_free(ctx); - return VPX_CODEC_OK; -} - -static int parse_bitdepth_colorspace_sampling( - BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) { - vpx_color_space_t color_space; - if (profile >= PROFILE_2) - rb->bit_offset += 1; // Bit-depth 10 or 12. - color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3); - if (color_space != VPX_CS_SRGB) { - rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range. - if (profile == PROFILE_1 || profile == PROFILE_3) { - rb->bit_offset += 2; // subsampling x/y. - rb->bit_offset += 1; // unused. - } - } else { - if (profile == PROFILE_1 || profile == PROFILE_3) { - rb->bit_offset += 1; // unused - } else { - // RGB is only available in version 1. - return 0; - } - } - return 1; -} - -static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, - unsigned int data_sz, - vpx_codec_stream_info_t *si, - int *is_intra_only, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state) { - int intra_only_flag = 0; - uint8_t clear_buffer[9]; - - if (data + data_sz <= data) - return VPX_CODEC_INVALID_PARAM; - - si->is_kf = 0; - si->w = si->h = 0; - - if (decrypt_cb) { - data_sz = VPXMIN(sizeof(clear_buffer), data_sz); - decrypt_cb(decrypt_state, data, clear_buffer, data_sz); - data = clear_buffer; - } - - { - int show_frame; - int error_resilient; - struct vpx_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL }; - const int frame_marker = vpx_rb_read_literal(&rb, 2); - const BITSTREAM_PROFILE profile = vp10_read_profile(&rb); - - if (frame_marker != VP9_FRAME_MARKER) - return VPX_CODEC_UNSUP_BITSTREAM; - - if (profile >= MAX_PROFILES) - return VPX_CODEC_UNSUP_BITSTREAM; - - if ((profile >= 2 && data_sz <= 1) || data_sz < 1) - return VPX_CODEC_UNSUP_BITSTREAM; - - if (vpx_rb_read_bit(&rb)) { // show an existing frame - vpx_rb_read_literal(&rb, 3); // Frame buffer to show. - return VPX_CODEC_OK; - } - - if (data_sz <= 8) - return VPX_CODEC_UNSUP_BITSTREAM; - - si->is_kf = !vpx_rb_read_bit(&rb); - show_frame = vpx_rb_read_bit(&rb); - error_resilient = vpx_rb_read_bit(&rb); - - if (si->is_kf) { - if (!vp10_read_sync_code(&rb)) - return VPX_CODEC_UNSUP_BITSTREAM; - - if (!parse_bitdepth_colorspace_sampling(profile, &rb)) - return VPX_CODEC_UNSUP_BITSTREAM; - vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h); - } else { - intra_only_flag = show_frame ? 0 : vpx_rb_read_bit(&rb); - - rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context - - if (intra_only_flag) { - if (!vp10_read_sync_code(&rb)) - return VPX_CODEC_UNSUP_BITSTREAM; - if (profile > PROFILE_0) { - if (!parse_bitdepth_colorspace_sampling(profile, &rb)) - return VPX_CODEC_UNSUP_BITSTREAM; - } - rb.bit_offset += REF_FRAMES; // refresh_frame_flags - vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h); - } - } - } - if (is_intra_only != NULL) - *is_intra_only = intra_only_flag; - return VPX_CODEC_OK; -} - -static vpx_codec_err_t decoder_peek_si(const uint8_t *data, - unsigned int data_sz, - vpx_codec_stream_info_t *si) { - return decoder_peek_si_internal(data, data_sz, si, NULL, NULL, NULL); -} - -static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx, - vpx_codec_stream_info_t *si) { - const size_t sz = (si->sz >= sizeof(vp10_stream_info_t)) - ? sizeof(vp10_stream_info_t) - : sizeof(vpx_codec_stream_info_t); - memcpy(si, &ctx->si, sz); - si->sz = (unsigned int)sz; - - return VPX_CODEC_OK; -} - -static void set_error_detail(vpx_codec_alg_priv_t *ctx, - const char *const error) { - ctx->base.err_detail = error; -} - -static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) { - if (error->error_code) - set_error_detail(ctx, error->has_detail ? error->detail : NULL); - - return error->error_code; -} - -static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) { - int i; - - for (i = 0; i < ctx->num_frame_workers; ++i) { - VPxWorker *const worker = &ctx->frame_workers[i]; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - VP10_COMMON *const cm = &frame_worker_data->pbi->common; - BufferPool *const pool = cm->buffer_pool; - - cm->new_fb_idx = INVALID_IDX; - cm->byte_alignment = ctx->byte_alignment; - cm->skip_loop_filter = ctx->skip_loop_filter; - - if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) { - pool->get_fb_cb = ctx->get_ext_fb_cb; - pool->release_fb_cb = ctx->release_ext_fb_cb; - pool->cb_priv = ctx->ext_priv; - } else { - pool->get_fb_cb = vp10_get_frame_buffer; - pool->release_fb_cb = vp10_release_frame_buffer; - - if (vp10_alloc_internal_frame_buffers(&pool->int_frame_buffers)) - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, - "Failed to initialize internal frame buffers"); - - pool->cb_priv = &pool->int_frame_buffers; - } - } -} - -static void set_default_ppflags(vp8_postproc_cfg_t *cfg) { - cfg->post_proc_flag = VP8_DEBLOCK | VP8_DEMACROBLOCK; - cfg->deblocking_level = 4; - cfg->noise_level = 0; -} - -static void set_ppflags(const vpx_codec_alg_priv_t *ctx, - vp10_ppflags_t *flags) { - flags->post_proc_flag = - ctx->postproc_cfg.post_proc_flag; - - flags->deblocking_level = ctx->postproc_cfg.deblocking_level; - flags->noise_level = ctx->postproc_cfg.noise_level; -} - -static int frame_worker_hook(void *arg1, void *arg2) { - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)arg1; - const uint8_t *data = frame_worker_data->data; - (void)arg2; - - frame_worker_data->result = - vp10_receive_compressed_data(frame_worker_data->pbi, - frame_worker_data->data_size, - &data); - frame_worker_data->data_end = data; - - if (frame_worker_data->pbi->common.frame_parallel_decode) { - // In frame parallel decoding, a worker thread must successfully decode all - // the compressed data. - if (frame_worker_data->result != 0 || - frame_worker_data->data + frame_worker_data->data_size - 1 > data) { - VPxWorker *const worker = frame_worker_data->pbi->frame_worker_owner; - BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool; - // Signal all the other threads that are waiting for this frame. - vp10_frameworker_lock_stats(worker); - frame_worker_data->frame_context_ready = 1; - lock_buffer_pool(pool); - frame_worker_data->pbi->cur_buf->buf.corrupted = 1; - unlock_buffer_pool(pool); - frame_worker_data->pbi->need_resync = 1; - vp10_frameworker_signal_stats(worker); - vp10_frameworker_unlock_stats(worker); - return 0; - } - } else if (frame_worker_data->result != 0) { - // Check decode result in serial decode. - frame_worker_data->pbi->cur_buf->buf.corrupted = 1; - frame_worker_data->pbi->need_resync = 1; - } - return !frame_worker_data->result; -} - -static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) { - int i; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - - ctx->last_show_frame = -1; - ctx->next_submit_worker_id = 0; - ctx->last_submit_worker_id = 0; - ctx->next_output_worker_id = 0; - ctx->frame_cache_read = 0; - ctx->frame_cache_write = 0; - ctx->num_cache_frames = 0; - ctx->need_resync = 1; - ctx->num_frame_workers = - (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1; - if (ctx->num_frame_workers > MAX_DECODE_THREADS) - ctx->num_frame_workers = MAX_DECODE_THREADS; - ctx->available_threads = ctx->num_frame_workers; - ctx->flushed = 0; - - ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool)); - if (ctx->buffer_pool == NULL) - return VPX_CODEC_MEM_ERROR; - -#if CONFIG_MULTITHREAD - if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) { - set_error_detail(ctx, "Failed to allocate buffer pool mutex"); - return VPX_CODEC_MEM_ERROR; - } -#endif - - ctx->frame_workers = (VPxWorker *) - vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers)); - if (ctx->frame_workers == NULL) { - set_error_detail(ctx, "Failed to allocate frame_workers"); - return VPX_CODEC_MEM_ERROR; - } - - for (i = 0; i < ctx->num_frame_workers; ++i) { - VPxWorker *const worker = &ctx->frame_workers[i]; - FrameWorkerData *frame_worker_data = NULL; - winterface->init(worker); - worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData)); - if (worker->data1 == NULL) { - set_error_detail(ctx, "Failed to allocate frame_worker_data"); - return VPX_CODEC_MEM_ERROR; - } - frame_worker_data = (FrameWorkerData *)worker->data1; - frame_worker_data->pbi = vp10_decoder_create(ctx->buffer_pool); - if (frame_worker_data->pbi == NULL) { - set_error_detail(ctx, "Failed to allocate frame_worker_data"); - return VPX_CODEC_MEM_ERROR; - } - frame_worker_data->pbi->frame_worker_owner = worker; - frame_worker_data->worker_id = i; - frame_worker_data->scratch_buffer = NULL; - frame_worker_data->scratch_buffer_size = 0; - frame_worker_data->frame_context_ready = 0; - frame_worker_data->received_frame = 0; -#if CONFIG_MULTITHREAD - if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) { - set_error_detail(ctx, "Failed to allocate frame_worker_data mutex"); - return VPX_CODEC_MEM_ERROR; - } - - if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) { - set_error_detail(ctx, "Failed to allocate frame_worker_data cond"); - return VPX_CODEC_MEM_ERROR; - } -#endif - // If decoding in serial mode, FrameWorker thread could create tile worker - // thread or loopfilter thread. - frame_worker_data->pbi->max_threads = - (ctx->frame_parallel_decode == 0) ? ctx->cfg.threads : 0; - - frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order; - frame_worker_data->pbi->common.frame_parallel_decode = - ctx->frame_parallel_decode; - worker->hook = (VPxWorkerHook)frame_worker_hook; - if (!winterface->reset(worker)) { - set_error_detail(ctx, "Frame Worker thread creation failed"); - return VPX_CODEC_MEM_ERROR; - } - } - - // If postprocessing was enabled by the application and a - // configuration has not been provided, default it. - if (!ctx->postproc_cfg_set && - (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) - set_default_ppflags(&ctx->postproc_cfg); - - init_buffer_callbacks(ctx); - - return VPX_CODEC_OK; -} - -static INLINE void check_resync(vpx_codec_alg_priv_t *const ctx, - const VP10Decoder *const pbi) { - // Clear resync flag if worker got a key frame or intra only frame. - if (ctx->need_resync == 1 && pbi->need_resync == 0 && - (pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME)) - ctx->need_resync = 0; -} - -static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, - const uint8_t **data, unsigned int data_sz, - void *user_priv, int64_t deadline) { - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - (void)deadline; - - // Determine the stream parameters. Note that we rely on peek_si to - // validate that we have a buffer that does not wrap around the top - // of the heap. - if (!ctx->si.h) { - int is_intra_only = 0; - const vpx_codec_err_t res = - decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only, - ctx->decrypt_cb, ctx->decrypt_state); - if (res != VPX_CODEC_OK) - return res; - - if (!ctx->si.is_kf && !is_intra_only) - return VPX_CODEC_ERROR; - } - - if (!ctx->frame_parallel_decode) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - frame_worker_data->data = *data; - frame_worker_data->data_size = data_sz; - frame_worker_data->user_priv = user_priv; - frame_worker_data->received_frame = 1; - - // Set these even if already initialized. The caller may have changed the - // decrypt config between frames. - frame_worker_data->pbi->decrypt_cb = ctx->decrypt_cb; - frame_worker_data->pbi->decrypt_state = ctx->decrypt_state; - - worker->had_error = 0; - winterface->execute(worker); - - // Update data pointer after decode. - *data = frame_worker_data->data_end; - - if (worker->had_error) - return update_error_state(ctx, &frame_worker_data->pbi->common.error); - - check_resync(ctx, frame_worker_data->pbi); - } else { - VPxWorker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id]; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - // Copy context from last worker thread to next worker thread. - if (ctx->next_submit_worker_id != ctx->last_submit_worker_id) - vp10_frameworker_copy_context( - &ctx->frame_workers[ctx->next_submit_worker_id], - &ctx->frame_workers[ctx->last_submit_worker_id]); - - frame_worker_data->pbi->ready_for_new_data = 0; - // Copy the compressed data into worker's internal buffer. - // TODO(hkuang): Will all the workers allocate the same size - // as the size of the first intra frame be better? This will - // avoid too many deallocate and allocate. - if (frame_worker_data->scratch_buffer_size < data_sz) { - frame_worker_data->scratch_buffer = - (uint8_t *)vpx_realloc(frame_worker_data->scratch_buffer, data_sz); - if (frame_worker_data->scratch_buffer == NULL) { - set_error_detail(ctx, "Failed to reallocate scratch buffer"); - return VPX_CODEC_MEM_ERROR; - } - frame_worker_data->scratch_buffer_size = data_sz; - } - frame_worker_data->data_size = data_sz; - memcpy(frame_worker_data->scratch_buffer, *data, data_sz); - - frame_worker_data->frame_decoded = 0; - frame_worker_data->frame_context_ready = 0; - frame_worker_data->received_frame = 1; - frame_worker_data->data = frame_worker_data->scratch_buffer; - frame_worker_data->user_priv = user_priv; - - if (ctx->next_submit_worker_id != ctx->last_submit_worker_id) - ctx->last_submit_worker_id = - (ctx->last_submit_worker_id + 1) % ctx->num_frame_workers; - - ctx->next_submit_worker_id = - (ctx->next_submit_worker_id + 1) % ctx->num_frame_workers; - --ctx->available_threads; - worker->had_error = 0; - winterface->launch(worker); - } - - return VPX_CODEC_OK; -} - -static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) { - YV12_BUFFER_CONFIG sd; - vp10_ppflags_t flags = {0, 0, 0}; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id]; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - ctx->next_output_worker_id = - (ctx->next_output_worker_id + 1) % ctx->num_frame_workers; - // TODO(hkuang): Add worker error handling here. - winterface->sync(worker); - frame_worker_data->received_frame = 0; - ++ctx->available_threads; - - check_resync(ctx, frame_worker_data->pbi); - - if (vp10_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) { - VP10_COMMON *const cm = &frame_worker_data->pbi->common; - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; - ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx; - yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd, - frame_worker_data->user_priv); - ctx->frame_cache[ctx->frame_cache_write].img.fb_priv = - frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv; - ctx->frame_cache_write = - (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE; - ++ctx->num_cache_frames; - } -} - -static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, - const uint8_t *data, unsigned int data_sz, - void *user_priv, long deadline) { - const uint8_t *data_start = data; - const uint8_t * const data_end = data + data_sz; - vpx_codec_err_t res; - uint32_t frame_sizes[8]; - int frame_count; - - if (data == NULL && data_sz == 0) { - ctx->flushed = 1; - return VPX_CODEC_OK; - } - - // Reset flushed when receiving a valid frame. - ctx->flushed = 0; - - // Initialize the decoder workers on the first frame. - if (ctx->frame_workers == NULL) { - const vpx_codec_err_t res = init_decoder(ctx); - if (res != VPX_CODEC_OK) - return res; - } - - res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count, - ctx->decrypt_cb, ctx->decrypt_state); - if (res != VPX_CODEC_OK) - return res; - - if (ctx->frame_parallel_decode) { - // Decode in frame parallel mode. When decoding in this mode, the frame - // passed to the decoder must be either a normal frame or a superframe with - // superframe index so the decoder could get each frame's start position - // in the superframe. - if (frame_count > 0) { - int i; - - for (i = 0; i < frame_count; ++i) { - const uint8_t *data_start_copy = data_start; - const uint32_t frame_size = frame_sizes[i]; - if (data_start < data - || frame_size > (uint32_t) (data_end - data_start)) { - set_error_detail(ctx, "Invalid frame size in index"); - return VPX_CODEC_CORRUPT_FRAME; - } - - if (ctx->available_threads == 0) { - // No more threads for decoding. Wait until the next output worker - // finishes decoding. Then copy the decoded frame into cache. - if (ctx->num_cache_frames < FRAME_CACHE_SIZE) { - wait_worker_and_cache_frame(ctx); - } else { - // TODO(hkuang): Add unit test to test this path. - set_error_detail(ctx, "Frame output cache is full."); - return VPX_CODEC_ERROR; - } - } - - res = decode_one(ctx, &data_start_copy, frame_size, user_priv, - deadline); - if (res != VPX_CODEC_OK) - return res; - data_start += frame_size; - } - } else { - if (ctx->available_threads == 0) { - // No more threads for decoding. Wait until the next output worker - // finishes decoding. Then copy the decoded frame into cache. - if (ctx->num_cache_frames < FRAME_CACHE_SIZE) { - wait_worker_and_cache_frame(ctx); - } else { - // TODO(hkuang): Add unit test to test this path. - set_error_detail(ctx, "Frame output cache is full."); - return VPX_CODEC_ERROR; - } - } - - res = decode_one(ctx, &data, data_sz, user_priv, deadline); - if (res != VPX_CODEC_OK) - return res; - } - } else { - // Decode in serial mode. - if (frame_count > 0) { - int i; - - for (i = 0; i < frame_count; ++i) { - const uint8_t *data_start_copy = data_start; - const uint32_t frame_size = frame_sizes[i]; - vpx_codec_err_t res; - if (data_start < data - || frame_size > (uint32_t) (data_end - data_start)) { - set_error_detail(ctx, "Invalid frame size in index"); - return VPX_CODEC_CORRUPT_FRAME; - } - - res = decode_one(ctx, &data_start_copy, frame_size, user_priv, - deadline); - if (res != VPX_CODEC_OK) - return res; - - data_start += frame_size; - } - } else { - while (data_start < data_end) { - const uint32_t frame_size = (uint32_t) (data_end - data_start); - const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size, - user_priv, deadline); - if (res != VPX_CODEC_OK) - return res; - - // Account for suboptimal termination by the encoder. - while (data_start < data_end) { - const uint8_t marker = read_marker(ctx->decrypt_cb, - ctx->decrypt_state, data_start); - if (marker) - break; - ++data_start; - } - } - } - } - - return res; -} - -static void release_last_output_frame(vpx_codec_alg_priv_t *ctx) { - RefCntBuffer *const frame_bufs = ctx->buffer_pool->frame_bufs; - // Decrease reference count of last output frame in frame parallel mode. - if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) { - BufferPool *const pool = ctx->buffer_pool; - lock_buffer_pool(pool); - decrease_ref_count(ctx->last_show_frame, frame_bufs, pool); - unlock_buffer_pool(pool); - } -} - -static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter) { - vpx_image_t *img = NULL; - - // Only return frame when all the cpu are busy or - // application fluhsed the decoder in frame parallel decode. - if (ctx->frame_parallel_decode && ctx->available_threads > 0 && - !ctx->flushed) { - return NULL; - } - - // Output the frames in the cache first. - if (ctx->num_cache_frames > 0) { - release_last_output_frame(ctx); - ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx; - if (ctx->need_resync) - return NULL; - img = &ctx->frame_cache[ctx->frame_cache_read].img; - ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE; - --ctx->num_cache_frames; - return img; - } - - // iter acts as a flip flop, so an image is only returned on the first - // call to get_frame. - if (*iter == NULL && ctx->frame_workers != NULL) { - do { - YV12_BUFFER_CONFIG sd; - vp10_ppflags_t flags = {0, 0, 0}; - const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - VPxWorker *const worker = - &ctx->frame_workers[ctx->next_output_worker_id]; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - ctx->next_output_worker_id = - (ctx->next_output_worker_id + 1) % ctx->num_frame_workers; - if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) - set_ppflags(ctx, &flags); - // Wait for the frame from worker thread. - if (winterface->sync(worker)) { - // Check if worker has received any frames. - if (frame_worker_data->received_frame == 1) { - ++ctx->available_threads; - frame_worker_data->received_frame = 0; - check_resync(ctx, frame_worker_data->pbi); - } - if (vp10_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) { - VP10_COMMON *const cm = &frame_worker_data->pbi->common; - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; - release_last_output_frame(ctx); - ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx; - if (ctx->need_resync) - return NULL; - yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv); - ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv; - img = &ctx->img; - return img; - } - } else { - // Decoding failed. Release the worker thread. - frame_worker_data->received_frame = 0; - ++ctx->available_threads; - ctx->need_resync = 1; - if (ctx->flushed != 1) - return NULL; - } - } while (ctx->next_output_worker_id != ctx->next_submit_worker_id); - } - return NULL; -} - -static vpx_codec_err_t decoder_set_fb_fn( - vpx_codec_alg_priv_t *ctx, - vpx_get_frame_buffer_cb_fn_t cb_get, - vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { - if (cb_get == NULL || cb_release == NULL) { - return VPX_CODEC_INVALID_PARAM; - } else if (ctx->frame_workers == NULL) { - // If the decoder has already been initialized, do not accept changes to - // the frame buffer functions. - ctx->get_ext_fb_cb = cb_get; - ctx->release_ext_fb_cb = cb_release; - ctx->ext_priv = cb_priv; - return VPX_CODEC_OK; - } - - return VPX_CODEC_ERROR; -} - -static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (data) { - vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data; - YV12_BUFFER_CONFIG sd; - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - image2yuvconfig(&frame->img, &sd); - return vp10_set_reference_dec(&frame_worker_data->pbi->common, - (VP9_REFFRAME)frame->frame_type, &sd); - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (data) { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data; - YV12_BUFFER_CONFIG sd; - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - image2yuvconfig(&frame->img, &sd); - return vp10_copy_reference_dec(frame_worker_data->pbi, - (VP9_REFFRAME)frame->frame_type, &sd); - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx, - va_list args) { - vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (data) { - YV12_BUFFER_CONFIG* fb; - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx); - if (fb == NULL) return VPX_CODEC_ERROR; - yuvconfig2image(&data->img, fb, NULL); - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -} - -static vpx_codec_err_t ctrl_set_postproc(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_VP9_POSTPROC - vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); - - if (data) { - ctx->postproc_cfg_set = 1; - ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data); - return VPX_CODEC_OK; - } else { - return VPX_CODEC_INVALID_PARAM; - } -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - -static vpx_codec_err_t ctrl_set_dbg_options(vpx_codec_alg_priv_t *ctx, - va_list args) { - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -} - -static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *const update_info = va_arg(args, int *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (update_info) { - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - *update_info = frame_worker_data->pbi->refresh_frame_flags; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_ERROR; - } - } - - return VPX_CODEC_INVALID_PARAM; -} - -static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *corrupted = va_arg(args, int *); - - if (corrupted) { - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - RefCntBuffer *const frame_bufs = - frame_worker_data->pbi->common.buffer_pool->frame_bufs; - if (frame_worker_data->pbi->common.frame_to_show == NULL) - return VPX_CODEC_ERROR; - if (ctx->last_show_frame >= 0) - *corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_ERROR; - } - } - - return VPX_CODEC_INVALID_PARAM; -} - -static vpx_codec_err_t ctrl_get_frame_size(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *const frame_size = va_arg(args, int *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (frame_size) { - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - const VP10_COMMON *const cm = &frame_worker_data->pbi->common; - frame_size[0] = cm->width; - frame_size[1] = cm->height; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_ERROR; - } - } - - return VPX_CODEC_INVALID_PARAM; -} - -static vpx_codec_err_t ctrl_get_render_size(vpx_codec_alg_priv_t *ctx, - va_list args) { - int *const render_size = va_arg(args, int *); - - // Only support this function in serial decode. - if (ctx->frame_parallel_decode) { - set_error_detail(ctx, "Not supported in frame parallel decode"); - return VPX_CODEC_INCAPABLE; - } - - if (render_size) { - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - const VP10_COMMON *const cm = &frame_worker_data->pbi->common; - render_size[0] = cm->render_width; - render_size[1] = cm->render_height; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_ERROR; - } - } - - return VPX_CODEC_INVALID_PARAM; -} - -static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx, - va_list args) { - unsigned int *const bit_depth = va_arg(args, unsigned int *); - VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id]; - - if (bit_depth) { - if (worker) { - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - const VP10_COMMON *const cm = &frame_worker_data->pbi->common; - *bit_depth = cm->bit_depth; - return VPX_CODEC_OK; - } else { - return VPX_CODEC_ERROR; - } - } - - return VPX_CODEC_INVALID_PARAM; -} - -static vpx_codec_err_t ctrl_set_invert_tile_order(vpx_codec_alg_priv_t *ctx, - va_list args) { - ctx->invert_tile_order = va_arg(args, int); - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_set_decryptor(vpx_codec_alg_priv_t *ctx, - va_list args) { - vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *); - ctx->decrypt_cb = init ? init->decrypt_cb : NULL; - ctx->decrypt_state = init ? init->decrypt_state : NULL; - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx, - va_list args) { - const int legacy_byte_alignment = 0; - const int min_byte_alignment = 32; - const int max_byte_alignment = 1024; - const int byte_alignment = va_arg(args, int); - - if (byte_alignment != legacy_byte_alignment && - (byte_alignment < min_byte_alignment || - byte_alignment > max_byte_alignment || - (byte_alignment & (byte_alignment - 1)) != 0)) - return VPX_CODEC_INVALID_PARAM; - - ctx->byte_alignment = byte_alignment; - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; - frame_worker_data->pbi->common.byte_alignment = byte_alignment; - } - return VPX_CODEC_OK; -} - -static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx, - va_list args) { - ctx->skip_loop_filter = va_arg(args, int); - - if (ctx->frame_workers) { - VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - frame_worker_data->pbi->common.skip_loop_filter = ctx->skip_loop_filter; - } - - return VPX_CODEC_OK; -} - -static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = { - {VP8_COPY_REFERENCE, ctrl_copy_reference}, - - // Setters - {VP8_SET_REFERENCE, ctrl_set_reference}, - {VP8_SET_POSTPROC, ctrl_set_postproc}, - {VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options}, - {VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options}, - {VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options}, - {VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options}, - {VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order}, - {VPXD_SET_DECRYPTOR, ctrl_set_decryptor}, - {VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment}, - {VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter}, - - // Getters - {VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates}, - {VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted}, - {VP9_GET_REFERENCE, ctrl_get_reference}, - {VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size}, - {VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth}, - {VP9D_GET_FRAME_SIZE, ctrl_get_frame_size}, - - { -1, NULL}, -}; - -#ifndef VERSION_STRING -#define VERSION_STRING -#endif -CODEC_INTERFACE(vpx_codec_vp10_dx) = { - "WebM Project VP10 Decoder" VERSION_STRING, - VPX_CODEC_INTERNAL_ABI_VERSION, - VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC | - VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t - decoder_init, // vpx_codec_init_fn_t - decoder_destroy, // vpx_codec_destroy_fn_t - decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t - { // NOLINT - decoder_peek_si, // vpx_codec_peek_si_fn_t - decoder_get_si, // vpx_codec_get_si_fn_t - decoder_decode, // vpx_codec_decode_fn_t - decoder_get_frame, // vpx_codec_frame_get_fn_t - decoder_set_fb_fn, // vpx_codec_set_fb_fn_t - }, - { // NOLINT - 0, - NULL, // vpx_codec_enc_cfg_map_t - NULL, // vpx_codec_encode_fn_t - NULL, // vpx_codec_get_cx_data_fn_t - NULL, // vpx_codec_enc_config_set_fn_t - NULL, // vpx_codec_get_global_headers_fn_t - NULL, // vpx_codec_get_preview_frame_fn_t - NULL // vpx_codec_enc_mr_get_mem_loc_fn_t - } -}; diff --git a/libs/libvpx/vp10/vp10_iface_common.h b/libs/libvpx/vp10/vp10_iface_common.h deleted file mode 100644 index b2b4b7d8fc..0000000000 --- a/libs/libvpx/vp10/vp10_iface_common.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ -#ifndef VP10_VP10_IFACE_COMMON_H_ -#define VP10_VP10_IFACE_COMMON_H_ - -#include "vpx_ports/mem.h" - -static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, - void *user_priv) { - /** vpx_img_wrap() doesn't allow specifying independent strides for - * the Y, U, and V planes, nor other alignment adjustments that - * might be representable by a YV12_BUFFER_CONFIG, so we just - * initialize all the fields.*/ - int bps; - if (!yv12->subsampling_y) { - if (!yv12->subsampling_x) { - img->fmt = VPX_IMG_FMT_I444; - bps = 24; - } else { - img->fmt = VPX_IMG_FMT_I422; - bps = 16; - } - } else { - if (!yv12->subsampling_x) { - img->fmt = VPX_IMG_FMT_I440; - bps = 16; - } else { - img->fmt = VPX_IMG_FMT_I420; - bps = 12; - } - } - img->cs = yv12->color_space; - img->range = yv12->color_range; - img->bit_depth = 8; - img->w = yv12->y_stride; - img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9_ENC_BORDER_IN_PIXELS, 3); - img->d_w = yv12->y_crop_width; - img->d_h = yv12->y_crop_height; - img->r_w = yv12->render_width; - img->r_h = yv12->render_height; - img->x_chroma_shift = yv12->subsampling_x; - img->y_chroma_shift = yv12->subsampling_y; - img->planes[VPX_PLANE_Y] = yv12->y_buffer; - img->planes[VPX_PLANE_U] = yv12->u_buffer; - img->planes[VPX_PLANE_V] = yv12->v_buffer; - img->planes[VPX_PLANE_ALPHA] = NULL; - img->stride[VPX_PLANE_Y] = yv12->y_stride; - img->stride[VPX_PLANE_U] = yv12->uv_stride; - img->stride[VPX_PLANE_V] = yv12->uv_stride; - img->stride[VPX_PLANE_ALPHA] = yv12->y_stride; -#if CONFIG_VP9_HIGHBITDEPTH - if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) { - // vpx_image_t uses byte strides and a pointer to the first byte - // of the image. - img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH); - img->bit_depth = yv12->bit_depth; - img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer); - img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer); - img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer); - img->planes[VPX_PLANE_ALPHA] = NULL; - img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride; - img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride; - img->stride[VPX_PLANE_V] = 2 * yv12->uv_stride; - img->stride[VPX_PLANE_ALPHA] = 2 * yv12->y_stride; - } -#endif // CONFIG_VP9_HIGHBITDEPTH - img->bps = bps; - img->user_priv = user_priv; - img->img_data = yv12->buffer_alloc; - img->img_data_owner = 0; - img->self_allocd = 0; -} - -static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, - YV12_BUFFER_CONFIG *yv12) { - yv12->y_buffer = img->planes[VPX_PLANE_Y]; - yv12->u_buffer = img->planes[VPX_PLANE_U]; - yv12->v_buffer = img->planes[VPX_PLANE_V]; - - yv12->y_crop_width = img->d_w; - yv12->y_crop_height = img->d_h; - yv12->render_width = img->r_w; - yv12->render_height = img->r_h; - yv12->y_width = img->d_w; - yv12->y_height = img->d_h; - - yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 - : yv12->y_width; - yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 - : yv12->y_height; - yv12->uv_crop_width = yv12->uv_width; - yv12->uv_crop_height = yv12->uv_height; - - yv12->y_stride = img->stride[VPX_PLANE_Y]; - yv12->uv_stride = img->stride[VPX_PLANE_U]; - yv12->color_space = img->cs; - yv12->color_range = img->range; - -#if CONFIG_VP9_HIGHBITDEPTH - if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) { - // In vpx_image_t - // planes point to uint8 address of start of data - // stride counts uint8s to reach next row - // In YV12_BUFFER_CONFIG - // y_buffer, u_buffer, v_buffer point to uint16 address of data - // stride and border counts in uint16s - // This means that all the address calculations in the main body of code - // should work correctly. - // However, before we do any pixel operations we need to cast the address - // to a uint16 ponter and double its value. - yv12->y_buffer = CONVERT_TO_BYTEPTR(yv12->y_buffer); - yv12->u_buffer = CONVERT_TO_BYTEPTR(yv12->u_buffer); - yv12->v_buffer = CONVERT_TO_BYTEPTR(yv12->v_buffer); - yv12->y_stride >>= 1; - yv12->uv_stride >>= 1; - yv12->flags = YV12_FLAG_HIGHBITDEPTH; - } else { - yv12->flags = 0; - } - yv12->border = (yv12->y_stride - img->w) / 2; -#else - yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; -#endif // CONFIG_VP9_HIGHBITDEPTH - yv12->subsampling_x = img->x_chroma_shift; - yv12->subsampling_y = img->y_chroma_shift; - return VPX_CODEC_OK; -} - -#endif // VP10_VP10_IFACE_COMMON_H_ diff --git a/libs/libvpx/vp10/vp10cx.mk b/libs/libvpx/vp10/vp10cx.mk deleted file mode 100644 index dc3b27139c..0000000000 --- a/libs/libvpx/vp10/vp10cx.mk +++ /dev/null @@ -1,128 +0,0 @@ -## -## Copyright (c) 2010 The WebM project authors. All Rights Reserved. -## -## Use of this source code is governed by a BSD-style license -## that can be found in the LICENSE file in the root of the source -## tree. An additional intellectual property rights grant can be found -## in the file PATENTS. All contributing project authors may -## be found in the AUTHORS file in the root of the source tree. -## - -VP10_CX_EXPORTS += exports_enc - -VP10_CX_SRCS-yes += $(VP10_COMMON_SRCS-yes) -VP10_CX_SRCS-no += $(VP10_COMMON_SRCS-no) -VP10_CX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes) -VP10_CX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no) - -VP10_CX_SRCS-yes += vp10_cx_iface.c - -VP10_CX_SRCS-yes += encoder/bitstream.c -VP10_CX_SRCS-yes += encoder/context_tree.c -VP10_CX_SRCS-yes += encoder/context_tree.h -VP10_CX_SRCS-yes += encoder/cost.h -VP10_CX_SRCS-yes += encoder/cost.c -VP10_CX_SRCS-yes += encoder/dct.c -VP10_CX_SRCS-$(CONFIG_VP9_TEMPORAL_DENOISING) += encoder/denoiser.c -VP10_CX_SRCS-$(CONFIG_VP9_TEMPORAL_DENOISING) += encoder/denoiser.h -VP10_CX_SRCS-yes += encoder/encodeframe.c -VP10_CX_SRCS-yes += encoder/encodeframe.h -VP10_CX_SRCS-yes += encoder/encodemb.c -VP10_CX_SRCS-yes += encoder/encodemv.c -VP10_CX_SRCS-yes += encoder/ethread.h -VP10_CX_SRCS-yes += encoder/ethread.c -VP10_CX_SRCS-yes += encoder/extend.c -VP10_CX_SRCS-yes += encoder/firstpass.c -VP10_CX_SRCS-yes += encoder/block.h -VP10_CX_SRCS-yes += encoder/bitstream.h -VP10_CX_SRCS-yes += encoder/encodemb.h -VP10_CX_SRCS-yes += encoder/encodemv.h -VP10_CX_SRCS-yes += encoder/extend.h -VP10_CX_SRCS-yes += encoder/firstpass.h -VP10_CX_SRCS-yes += encoder/lookahead.c -VP10_CX_SRCS-yes += encoder/lookahead.h -VP10_CX_SRCS-yes += encoder/mcomp.h -VP10_CX_SRCS-yes += encoder/encoder.h -VP10_CX_SRCS-yes += encoder/quantize.h -VP10_CX_SRCS-yes += encoder/ratectrl.h -VP10_CX_SRCS-yes += encoder/rd.h -VP10_CX_SRCS-yes += encoder/rdopt.h -VP10_CX_SRCS-yes += encoder/tokenize.h -VP10_CX_SRCS-yes += encoder/treewriter.h -VP10_CX_SRCS-yes += encoder/mcomp.c -VP10_CX_SRCS-yes += encoder/encoder.c -VP10_CX_SRCS-yes += encoder/picklpf.c -VP10_CX_SRCS-yes += encoder/picklpf.h -VP10_CX_SRCS-yes += encoder/quantize.c -VP10_CX_SRCS-yes += encoder/ratectrl.c -VP10_CX_SRCS-yes += encoder/rd.c -VP10_CX_SRCS-yes += encoder/rdopt.c -VP10_CX_SRCS-yes += encoder/segmentation.c -VP10_CX_SRCS-yes += encoder/segmentation.h -VP10_CX_SRCS-yes += encoder/speed_features.c -VP10_CX_SRCS-yes += encoder/speed_features.h -VP10_CX_SRCS-yes += encoder/subexp.c -VP10_CX_SRCS-yes += encoder/subexp.h -VP10_CX_SRCS-yes += encoder/resize.c -VP10_CX_SRCS-yes += encoder/resize.h -VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c - -VP10_CX_SRCS-yes += encoder/tokenize.c -VP10_CX_SRCS-yes += encoder/treewriter.c -VP10_CX_SRCS-yes += encoder/aq_variance.c -VP10_CX_SRCS-yes += encoder/aq_variance.h -VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.c -VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.h -VP10_CX_SRCS-yes += encoder/aq_complexity.c -VP10_CX_SRCS-yes += encoder/aq_complexity.h -VP10_CX_SRCS-yes += encoder/skin_detection.c -VP10_CX_SRCS-yes += encoder/skin_detection.h -ifeq ($(CONFIG_VP9_POSTPROC),yes) -VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.h -VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += common/postproc.c -endif -VP10_CX_SRCS-yes += encoder/temporal_filter.c -VP10_CX_SRCS-yes += encoder/temporal_filter.h -VP10_CX_SRCS-yes += encoder/mbgraph.c -VP10_CX_SRCS-yes += encoder/mbgraph.h - -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c -ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c -endif - -ifeq ($(CONFIG_USE_X86INC),yes) -VP10_CX_SRCS-$(HAVE_MMX) += encoder/x86/dct_mmx.asm -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm -endif - -ifeq ($(ARCH_X86_64),yes) -ifeq ($(CONFIG_USE_X86INC),yes) -VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm -endif -endif - -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c -VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c - -ifeq ($(CONFIG_VP9_TEMPORAL_DENOISING),yes) -VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/denoiser_sse2.c -endif - -VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c - -ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes) -VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c -VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c -endif -VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c - -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h -VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c - -VP10_CX_SRCS-yes := $(filter-out $(VP10_CX_SRCS_REMOVE-yes),$(VP10_CX_SRCS-yes)) diff --git a/libs/libvpx/vp10/vp10dx.mk b/libs/libvpx/vp10/vp10dx.mk deleted file mode 100644 index fce6d0d7d7..0000000000 --- a/libs/libvpx/vp10/vp10dx.mk +++ /dev/null @@ -1,33 +0,0 @@ -## -## Copyright (c) 2010 The WebM project authors. All Rights Reserved. -## -## Use of this source code is governed by a BSD-style license -## that can be found in the LICENSE file in the root of the source -## tree. An additional intellectual property rights grant can be found -## in the file PATENTS. All contributing project authors may -## be found in the AUTHORS file in the root of the source tree. -## - -VP10_DX_EXPORTS += exports_dec - -VP10_DX_SRCS-yes += $(VP10_COMMON_SRCS-yes) -VP10_DX_SRCS-no += $(VP10_COMMON_SRCS-no) -VP10_DX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes) -VP10_DX_SRCS_REMOVE-no += $(VP10_COMMON_SRCS_REMOVE-no) - -VP10_DX_SRCS-yes += vp10_dx_iface.c - -VP10_DX_SRCS-yes += decoder/decodemv.c -VP10_DX_SRCS-yes += decoder/decodeframe.c -VP10_DX_SRCS-yes += decoder/decodeframe.h -VP10_DX_SRCS-yes += decoder/detokenize.c -VP10_DX_SRCS-yes += decoder/decodemv.h -VP10_DX_SRCS-yes += decoder/detokenize.h -VP10_DX_SRCS-yes += decoder/dthread.c -VP10_DX_SRCS-yes += decoder/dthread.h -VP10_DX_SRCS-yes += decoder/decoder.c -VP10_DX_SRCS-yes += decoder/decoder.h -VP10_DX_SRCS-yes += decoder/dsubexp.c -VP10_DX_SRCS-yes += decoder/dsubexp.h - -VP10_DX_SRCS-yes := $(filter-out $(VP10_DX_SRCS_REMOVE-yes),$(VP10_DX_SRCS-yes)) diff --git a/libs/libvpx/vp8/common/alloccommon.c b/libs/libvpx/vp8/common/alloccommon.c index 8dfd4ce203..722b158c3a 100644 --- a/libs/libvpx/vp8/common/alloccommon.c +++ b/libs/libvpx/vp8/common/alloccommon.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "alloccommon.h" #include "blockd.h" @@ -18,173 +17,171 @@ #include "entropymode.h" #include "systemdependent.h" -void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) -{ - int i; - for (i = 0; i < NUM_YV12_BUFFERS; i++) - vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]); +void vp8_de_alloc_frame_buffers(VP8_COMMON *oci) { + int i; + for (i = 0; i < NUM_YV12_BUFFERS; ++i) { + vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]); + } - vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame); + vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame); #if CONFIG_POSTPROC - vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer); - if (oci->post_proc_buffer_int_used) - vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer_int); + vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer); + if (oci->post_proc_buffer_int_used) { + vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer_int); + } - vpx_free(oci->pp_limits_buffer); - oci->pp_limits_buffer = NULL; + vpx_free(oci->pp_limits_buffer); + oci->pp_limits_buffer = NULL; + + vpx_free(oci->postproc_state.generated_noise); + oci->postproc_state.generated_noise = NULL; #endif - vpx_free(oci->above_context); - vpx_free(oci->mip); + vpx_free(oci->above_context); + vpx_free(oci->mip); #if CONFIG_ERROR_CONCEALMENT - vpx_free(oci->prev_mip); - oci->prev_mip = NULL; + vpx_free(oci->prev_mip); + oci->prev_mip = NULL; #endif - oci->above_context = NULL; - oci->mip = NULL; + oci->above_context = NULL; + oci->mip = NULL; } -int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) -{ - int i; +int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height) { + int i; - vp8_de_alloc_frame_buffers(oci); + vp8_de_alloc_frame_buffers(oci); - /* our internal buffers are always multiples of 16 */ - if ((width & 0xf) != 0) - width += 16 - (width & 0xf); + /* our internal buffers are always multiples of 16 */ + if ((width & 0xf) != 0) width += 16 - (width & 0xf); - if ((height & 0xf) != 0) - height += 16 - (height & 0xf); + if ((height & 0xf) != 0) height += 16 - (height & 0xf); - - for (i = 0; i < NUM_YV12_BUFFERS; i++) - { - oci->fb_idx_ref_cnt[i] = 0; - oci->yv12_fb[i].flags = 0; - if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height, VP8BORDERINPIXELS) < 0) - goto allocation_fail; + for (i = 0; i < NUM_YV12_BUFFERS; ++i) { + oci->fb_idx_ref_cnt[i] = 0; + oci->yv12_fb[i].flags = 0; + if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height, + VP8BORDERINPIXELS) < 0) { + goto allocation_fail; } + } - oci->new_fb_idx = 0; - oci->lst_fb_idx = 1; - oci->gld_fb_idx = 2; - oci->alt_fb_idx = 3; + oci->new_fb_idx = 0; + oci->lst_fb_idx = 1; + oci->gld_fb_idx = 2; + oci->alt_fb_idx = 3; - oci->fb_idx_ref_cnt[0] = 1; - oci->fb_idx_ref_cnt[1] = 1; - oci->fb_idx_ref_cnt[2] = 1; - oci->fb_idx_ref_cnt[3] = 1; + oci->fb_idx_ref_cnt[0] = 1; + oci->fb_idx_ref_cnt[1] = 1; + oci->fb_idx_ref_cnt[2] = 1; + oci->fb_idx_ref_cnt[3] = 1; - if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, VP8BORDERINPIXELS) < 0) - goto allocation_fail; + if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, + VP8BORDERINPIXELS) < 0) { + goto allocation_fail; + } - oci->mb_rows = height >> 4; - oci->mb_cols = width >> 4; - oci->MBs = oci->mb_rows * oci->mb_cols; - oci->mode_info_stride = oci->mb_cols + 1; - oci->mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO)); + oci->mb_rows = height >> 4; + oci->mb_cols = width >> 4; + oci->MBs = oci->mb_rows * oci->mb_cols; + oci->mode_info_stride = oci->mb_cols + 1; + oci->mip = + vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO)); - if (!oci->mip) - goto allocation_fail; + if (!oci->mip) goto allocation_fail; - oci->mi = oci->mip + oci->mode_info_stride + 1; + oci->mi = oci->mip + oci->mode_info_stride + 1; - /* Allocation of previous mode info will be done in vp8_decode_frame() - * as it is a decoder only data */ + /* Allocation of previous mode info will be done in vp8_decode_frame() + * as it is a decoder only data */ - oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1); + oci->above_context = + vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1); - if (!oci->above_context) - goto allocation_fail; + if (!oci->above_context) goto allocation_fail; #if CONFIG_POSTPROC - if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height, VP8BORDERINPIXELS) < 0) - goto allocation_fail; + if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height, + VP8BORDERINPIXELS) < 0) { + goto allocation_fail; + } - oci->post_proc_buffer_int_used = 0; - memset(&oci->postproc_state, 0, sizeof(oci->postproc_state)); - memset(oci->post_proc_buffer.buffer_alloc, 128, - oci->post_proc_buffer.frame_size); + oci->post_proc_buffer_int_used = 0; + memset(&oci->postproc_state, 0, sizeof(oci->postproc_state)); + memset(oci->post_proc_buffer.buffer_alloc, 128, + oci->post_proc_buffer.frame_size); - /* Allocate buffer to store post-processing filter coefficients. - * - * Note: Round up mb_cols to support SIMD reads - */ - oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1)); - if (!oci->pp_limits_buffer) - goto allocation_fail; + /* Allocate buffer to store post-processing filter coefficients. + * + * Note: Round up mb_cols to support SIMD reads + */ + oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1)); + if (!oci->pp_limits_buffer) goto allocation_fail; #endif - return 0; + return 0; allocation_fail: - vp8_de_alloc_frame_buffers(oci); - return 1; + vp8_de_alloc_frame_buffers(oci); + return 1; } -void vp8_setup_version(VP8_COMMON *cm) -{ - switch (cm->version) - { +void vp8_setup_version(VP8_COMMON *cm) { + switch (cm->version) { case 0: - cm->no_lpf = 0; - cm->filter_type = NORMAL_LOOPFILTER; - cm->use_bilinear_mc_filter = 0; - cm->full_pixel = 0; - break; + cm->no_lpf = 0; + cm->filter_type = NORMAL_LOOPFILTER; + cm->use_bilinear_mc_filter = 0; + cm->full_pixel = 0; + break; case 1: - cm->no_lpf = 0; - cm->filter_type = SIMPLE_LOOPFILTER; - cm->use_bilinear_mc_filter = 1; - cm->full_pixel = 0; - break; + cm->no_lpf = 0; + cm->filter_type = SIMPLE_LOOPFILTER; + cm->use_bilinear_mc_filter = 1; + cm->full_pixel = 0; + break; case 2: - cm->no_lpf = 1; - cm->filter_type = NORMAL_LOOPFILTER; - cm->use_bilinear_mc_filter = 1; - cm->full_pixel = 0; - break; + cm->no_lpf = 1; + cm->filter_type = NORMAL_LOOPFILTER; + cm->use_bilinear_mc_filter = 1; + cm->full_pixel = 0; + break; case 3: - cm->no_lpf = 1; - cm->filter_type = SIMPLE_LOOPFILTER; - cm->use_bilinear_mc_filter = 1; - cm->full_pixel = 1; - break; + cm->no_lpf = 1; + cm->filter_type = SIMPLE_LOOPFILTER; + cm->use_bilinear_mc_filter = 1; + cm->full_pixel = 1; + break; default: - /*4,5,6,7 are reserved for future use*/ - cm->no_lpf = 0; - cm->filter_type = NORMAL_LOOPFILTER; - cm->use_bilinear_mc_filter = 0; - cm->full_pixel = 0; - break; - } + /*4,5,6,7 are reserved for future use*/ + cm->no_lpf = 0; + cm->filter_type = NORMAL_LOOPFILTER; + cm->use_bilinear_mc_filter = 0; + cm->full_pixel = 0; + break; + } } -void vp8_create_common(VP8_COMMON *oci) -{ - vp8_machine_specific_config(oci); +void vp8_create_common(VP8_COMMON *oci) { + vp8_machine_specific_config(oci); - vp8_init_mbmode_probs(oci); - vp8_default_bmode_probs(oci->fc.bmode_prob); + vp8_init_mbmode_probs(oci); + vp8_default_bmode_probs(oci->fc.bmode_prob); - oci->mb_no_coeff_skip = 1; - oci->no_lpf = 0; - oci->filter_type = NORMAL_LOOPFILTER; - oci->use_bilinear_mc_filter = 0; - oci->full_pixel = 0; - oci->multi_token_partition = ONE_PARTITION; - oci->clamp_type = RECON_CLAMP_REQUIRED; + oci->mb_no_coeff_skip = 1; + oci->no_lpf = 0; + oci->filter_type = NORMAL_LOOPFILTER; + oci->use_bilinear_mc_filter = 0; + oci->full_pixel = 0; + oci->multi_token_partition = ONE_PARTITION; + oci->clamp_type = RECON_CLAMP_REQUIRED; - /* Initialize reference frame sign bias structure to defaults */ - memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias)); + /* Initialize reference frame sign bias structure to defaults */ + memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias)); - /* Default disable buffer to buffer copying */ - oci->copy_buffer_to_gf = 0; - oci->copy_buffer_to_arf = 0; + /* Default disable buffer to buffer copying */ + oci->copy_buffer_to_gf = 0; + oci->copy_buffer_to_arf = 0; } -void vp8_remove_common(VP8_COMMON *oci) -{ - vp8_de_alloc_frame_buffers(oci); -} +void vp8_remove_common(VP8_COMMON *oci) { vp8_de_alloc_frame_buffers(oci); } diff --git a/libs/libvpx/vp8/common/alloccommon.h b/libs/libvpx/vp8/common/alloccommon.h index 93e99d76b1..5d0840c670 100644 --- a/libs/libvpx/vp8/common/alloccommon.h +++ b/libs/libvpx/vp8/common/alloccommon.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ALLOCCOMMON_H_ #define VP8_COMMON_ALLOCCOMMON_H_ diff --git a/libs/libvpx/vp8/common/arm/armv6/bilinearfilter_v6.asm b/libs/libvpx/vp8/common/arm/armv6/bilinearfilter_v6.asm deleted file mode 100644 index 9704b42105..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/bilinearfilter_v6.asm +++ /dev/null @@ -1,237 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_filter_block2d_bil_first_pass_armv6| - EXPORT |vp8_filter_block2d_bil_second_pass_armv6| - - AREA |.text|, CODE, READONLY ; name this block of code - -;------------------------------------- -; r0 unsigned char *src_ptr, -; r1 unsigned short *dst_ptr, -; r2 unsigned int src_pitch, -; r3 unsigned int height, -; stack unsigned int width, -; stack const short *vp8_filter -;------------------------------------- -; The output is transposed stroed in output array to make it easy for second pass filtering. -|vp8_filter_block2d_bil_first_pass_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vp8_filter address - ldr r4, [sp, #36] ; width - - mov r12, r3 ; outer-loop counter - - add r7, r2, r4 ; preload next row - pld [r0, r7] - - sub r2, r2, r4 ; src increment for height loop - - ldr r5, [r11] ; load up filter coefficients - - mov r3, r3, lsl #1 ; height*2 - add r3, r3, #2 ; plus 2 to make output buffer 4-bit aligned since height is actually (height+1) - - mov r11, r1 ; save dst_ptr for each row - - cmp r5, #128 ; if filter coef = 128, then skip the filter - beq bil_null_1st_filter - -|bil_height_loop_1st_v6| - ldrb r6, [r0] ; load source data - ldrb r7, [r0, #1] - ldrb r8, [r0, #2] - mov lr, r4, lsr #2 ; 4-in-parellel loop counter - -|bil_width_loop_1st_v6| - ldrb r9, [r0, #3] - ldrb r10, [r0, #4] - - pkhbt r6, r6, r7, lsl #16 ; src[1] | src[0] - pkhbt r7, r7, r8, lsl #16 ; src[2] | src[1] - - smuad r6, r6, r5 ; apply the filter - pkhbt r8, r8, r9, lsl #16 ; src[3] | src[2] - smuad r7, r7, r5 - pkhbt r9, r9, r10, lsl #16 ; src[4] | src[3] - - smuad r8, r8, r5 - smuad r9, r9, r5 - - add r0, r0, #4 - subs lr, lr, #1 - - add r6, r6, #0x40 ; round_shift_and_clamp - add r7, r7, #0x40 - usat r6, #16, r6, asr #7 - usat r7, #16, r7, asr #7 - - strh r6, [r1], r3 ; result is transposed and stored - - add r8, r8, #0x40 ; round_shift_and_clamp - strh r7, [r1], r3 - add r9, r9, #0x40 - usat r8, #16, r8, asr #7 - usat r9, #16, r9, asr #7 - - strh r8, [r1], r3 ; result is transposed and stored - - ldrneb r6, [r0] ; load source data - strh r9, [r1], r3 - - ldrneb r7, [r0, #1] - ldrneb r8, [r0, #2] - - bne bil_width_loop_1st_v6 - - add r0, r0, r2 ; move to next input row - subs r12, r12, #1 - - add r9, r2, r4, lsl #1 ; adding back block width - pld [r0, r9] ; preload next row - - add r11, r11, #2 ; move over to next column - mov r1, r11 - - bne bil_height_loop_1st_v6 - - ldmia sp!, {r4 - r11, pc} - -|bil_null_1st_filter| -|bil_height_loop_null_1st| - mov lr, r4, lsr #2 ; loop counter - -|bil_width_loop_null_1st| - ldrb r6, [r0] ; load data - ldrb r7, [r0, #1] - ldrb r8, [r0, #2] - ldrb r9, [r0, #3] - - strh r6, [r1], r3 ; store it to immediate buffer - add r0, r0, #4 - strh r7, [r1], r3 - subs lr, lr, #1 - strh r8, [r1], r3 - strh r9, [r1], r3 - - bne bil_width_loop_null_1st - - subs r12, r12, #1 - add r0, r0, r2 ; move to next input line - add r11, r11, #2 ; move over to next column - mov r1, r11 - - bne bil_height_loop_null_1st - - ldmia sp!, {r4 - r11, pc} - - ENDP ; |vp8_filter_block2d_bil_first_pass_armv6| - - -;--------------------------------- -; r0 unsigned short *src_ptr, -; r1 unsigned char *dst_ptr, -; r2 int dst_pitch, -; r3 unsigned int height, -; stack unsigned int width, -; stack const short *vp8_filter -;--------------------------------- -|vp8_filter_block2d_bil_second_pass_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vp8_filter address - ldr r4, [sp, #36] ; width - - ldr r5, [r11] ; load up filter coefficients - mov r12, r4 ; outer-loop counter = width, since we work on transposed data matrix - mov r11, r1 - - cmp r5, #128 ; if filter coef = 128, then skip the filter - beq bil_null_2nd_filter - -|bil_height_loop_2nd| - ldr r6, [r0] ; load the data - ldr r8, [r0, #4] - ldrh r10, [r0, #8] - mov lr, r3, lsr #2 ; loop counter - -|bil_width_loop_2nd| - pkhtb r7, r6, r8 ; src[1] | src[2] - pkhtb r9, r8, r10 ; src[3] | src[4] - - smuad r6, r6, r5 ; apply filter - smuad r8, r8, r5 ; apply filter - - subs lr, lr, #1 - - smuadx r7, r7, r5 ; apply filter - smuadx r9, r9, r5 ; apply filter - - add r0, r0, #8 - - add r6, r6, #0x40 ; round_shift_and_clamp - add r7, r7, #0x40 - usat r6, #8, r6, asr #7 - usat r7, #8, r7, asr #7 - strb r6, [r1], r2 ; the result is transposed back and stored - - add r8, r8, #0x40 ; round_shift_and_clamp - strb r7, [r1], r2 - add r9, r9, #0x40 - usat r8, #8, r8, asr #7 - usat r9, #8, r9, asr #7 - strb r8, [r1], r2 ; the result is transposed back and stored - - ldrne r6, [r0] ; load data - strb r9, [r1], r2 - ldrne r8, [r0, #4] - ldrneh r10, [r0, #8] - - bne bil_width_loop_2nd - - subs r12, r12, #1 - add r0, r0, #4 ; update src for next row - add r11, r11, #1 - mov r1, r11 - - bne bil_height_loop_2nd - ldmia sp!, {r4 - r11, pc} - -|bil_null_2nd_filter| -|bil_height_loop_null_2nd| - mov lr, r3, lsr #2 - -|bil_width_loop_null_2nd| - ldr r6, [r0], #4 ; load data - subs lr, lr, #1 - ldr r8, [r0], #4 - - strb r6, [r1], r2 ; store data - mov r7, r6, lsr #16 - strb r7, [r1], r2 - mov r9, r8, lsr #16 - strb r8, [r1], r2 - strb r9, [r1], r2 - - bne bil_width_loop_null_2nd - - subs r12, r12, #1 - add r0, r0, #4 - add r11, r11, #1 - mov r1, r11 - - bne bil_height_loop_null_2nd - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_filter_block2d_second_pass_armv6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/copymem16x16_v6.asm b/libs/libvpx/vp8/common/arm/armv6/copymem16x16_v6.asm deleted file mode 100644 index abf048c2fa..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/copymem16x16_v6.asm +++ /dev/null @@ -1,186 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_copy_mem16x16_v6| - ; ARM - ; REQUIRE8 - ; PRESERVE8 - - AREA Block, CODE, READONLY ; name this block of code -;void copy_mem16x16_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride) -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -|vp8_copy_mem16x16_v6| PROC - stmdb sp!, {r4 - r7} - ;push {r4-r7} - - ;preload - pld [r0, #31] ; preload for next 16x16 block - - ands r4, r0, #15 - beq copy_mem16x16_fast - - ands r4, r0, #7 - beq copy_mem16x16_8 - - ands r4, r0, #3 - beq copy_mem16x16_4 - - ;copy one byte each time - ldrb r4, [r0] - ldrb r5, [r0, #1] - ldrb r6, [r0, #2] - ldrb r7, [r0, #3] - - mov r12, #16 - -copy_mem16x16_1_loop - strb r4, [r2] - strb r5, [r2, #1] - strb r6, [r2, #2] - strb r7, [r2, #3] - - ldrb r4, [r0, #4] - ldrb r5, [r0, #5] - ldrb r6, [r0, #6] - ldrb r7, [r0, #7] - - subs r12, r12, #1 - - strb r4, [r2, #4] - strb r5, [r2, #5] - strb r6, [r2, #6] - strb r7, [r2, #7] - - ldrb r4, [r0, #8] - ldrb r5, [r0, #9] - ldrb r6, [r0, #10] - ldrb r7, [r0, #11] - - strb r4, [r2, #8] - strb r5, [r2, #9] - strb r6, [r2, #10] - strb r7, [r2, #11] - - ldrb r4, [r0, #12] - ldrb r5, [r0, #13] - ldrb r6, [r0, #14] - ldrb r7, [r0, #15] - - add r0, r0, r1 - - strb r4, [r2, #12] - strb r5, [r2, #13] - strb r6, [r2, #14] - strb r7, [r2, #15] - - add r2, r2, r3 - - ldrneb r4, [r0] - ldrneb r5, [r0, #1] - ldrneb r6, [r0, #2] - ldrneb r7, [r0, #3] - - pld [r0, #31] ; preload for next 16x16 block - - bne copy_mem16x16_1_loop - - ldmia sp!, {r4 - r7} - ;pop {r4-r7} - mov pc, lr - -;copy 4 bytes each time -copy_mem16x16_4 - ldr r4, [r0] - ldr r5, [r0, #4] - ldr r6, [r0, #8] - ldr r7, [r0, #12] - - mov r12, #16 - -copy_mem16x16_4_loop - subs r12, r12, #1 - add r0, r0, r1 - - str r4, [r2] - str r5, [r2, #4] - str r6, [r2, #8] - str r7, [r2, #12] - - add r2, r2, r3 - - ldrne r4, [r0] - ldrne r5, [r0, #4] - ldrne r6, [r0, #8] - ldrne r7, [r0, #12] - - pld [r0, #31] ; preload for next 16x16 block - - bne copy_mem16x16_4_loop - - ldmia sp!, {r4 - r7} - ;pop {r4-r7} - mov pc, lr - -;copy 8 bytes each time -copy_mem16x16_8 - sub r1, r1, #16 - sub r3, r3, #16 - - mov r12, #16 - -copy_mem16x16_8_loop - ldmia r0!, {r4-r5} - ;ldm r0, {r4-r5} - ldmia r0!, {r6-r7} - - add r0, r0, r1 - - stmia r2!, {r4-r5} - subs r12, r12, #1 - ;stm r2, {r4-r5} - stmia r2!, {r6-r7} - - add r2, r2, r3 - - pld [r0, #31] ; preload for next 16x16 block - bne copy_mem16x16_8_loop - - ldmia sp!, {r4 - r7} - ;pop {r4-r7} - mov pc, lr - -;copy 16 bytes each time -copy_mem16x16_fast - ;sub r1, r1, #16 - ;sub r3, r3, #16 - - mov r12, #16 - -copy_mem16x16_fast_loop - ldmia r0, {r4-r7} - ;ldm r0, {r4-r7} - add r0, r0, r1 - - subs r12, r12, #1 - stmia r2, {r4-r7} - ;stm r2, {r4-r7} - add r2, r2, r3 - - pld [r0, #31] ; preload for next 16x16 block - bne copy_mem16x16_fast_loop - - ldmia sp!, {r4 - r7} - ;pop {r4-r7} - mov pc, lr - - ENDP ; |vp8_copy_mem16x16_v6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/copymem8x4_v6.asm b/libs/libvpx/vp8/common/arm/armv6/copymem8x4_v6.asm deleted file mode 100644 index d8362ef052..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/copymem8x4_v6.asm +++ /dev/null @@ -1,128 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_copy_mem8x4_v6| - ; ARM - ; REQUIRE8 - ; PRESERVE8 - - AREA Block, CODE, READONLY ; name this block of code -;void vp8_copy_mem8x4_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride) -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -|vp8_copy_mem8x4_v6| PROC - ;push {r4-r5} - stmdb sp!, {r4-r5} - - ;preload - pld [r0] - pld [r0, r1] - pld [r0, r1, lsl #1] - - ands r4, r0, #7 - beq copy_mem8x4_fast - - ands r4, r0, #3 - beq copy_mem8x4_4 - - ;copy 1 byte each time - ldrb r4, [r0] - ldrb r5, [r0, #1] - - mov r12, #4 - -copy_mem8x4_1_loop - strb r4, [r2] - strb r5, [r2, #1] - - ldrb r4, [r0, #2] - ldrb r5, [r0, #3] - - subs r12, r12, #1 - - strb r4, [r2, #2] - strb r5, [r2, #3] - - ldrb r4, [r0, #4] - ldrb r5, [r0, #5] - - strb r4, [r2, #4] - strb r5, [r2, #5] - - ldrb r4, [r0, #6] - ldrb r5, [r0, #7] - - add r0, r0, r1 - - strb r4, [r2, #6] - strb r5, [r2, #7] - - add r2, r2, r3 - - ldrneb r4, [r0] - ldrneb r5, [r0, #1] - - bne copy_mem8x4_1_loop - - ldmia sp!, {r4 - r5} - ;pop {r4-r5} - mov pc, lr - -;copy 4 bytes each time -copy_mem8x4_4 - ldr r4, [r0] - ldr r5, [r0, #4] - - mov r12, #4 - -copy_mem8x4_4_loop - subs r12, r12, #1 - add r0, r0, r1 - - str r4, [r2] - str r5, [r2, #4] - - add r2, r2, r3 - - ldrne r4, [r0] - ldrne r5, [r0, #4] - - bne copy_mem8x4_4_loop - - ldmia sp!, {r4-r5} - ;pop {r4-r5} - mov pc, lr - -;copy 8 bytes each time -copy_mem8x4_fast - ;sub r1, r1, #8 - ;sub r3, r3, #8 - - mov r12, #4 - -copy_mem8x4_fast_loop - ldmia r0, {r4-r5} - ;ldm r0, {r4-r5} - add r0, r0, r1 - - subs r12, r12, #1 - stmia r2, {r4-r5} - ;stm r2, {r4-r5} - add r2, r2, r3 - - bne copy_mem8x4_fast_loop - - ldmia sp!, {r4-r5} - ;pop {r4-r5} - mov pc, lr - - ENDP ; |vp8_copy_mem8x4_v6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/copymem8x8_v6.asm b/libs/libvpx/vp8/common/arm/armv6/copymem8x8_v6.asm deleted file mode 100644 index c6a60c610b..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/copymem8x8_v6.asm +++ /dev/null @@ -1,128 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_copy_mem8x8_v6| - ; ARM - ; REQUIRE8 - ; PRESERVE8 - - AREA Block, CODE, READONLY ; name this block of code -;void copy_mem8x8_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride) -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -|vp8_copy_mem8x8_v6| PROC - ;push {r4-r5} - stmdb sp!, {r4-r5} - - ;preload - pld [r0] - pld [r0, r1] - pld [r0, r1, lsl #1] - - ands r4, r0, #7 - beq copy_mem8x8_fast - - ands r4, r0, #3 - beq copy_mem8x8_4 - - ;copy 1 byte each time - ldrb r4, [r0] - ldrb r5, [r0, #1] - - mov r12, #8 - -copy_mem8x8_1_loop - strb r4, [r2] - strb r5, [r2, #1] - - ldrb r4, [r0, #2] - ldrb r5, [r0, #3] - - subs r12, r12, #1 - - strb r4, [r2, #2] - strb r5, [r2, #3] - - ldrb r4, [r0, #4] - ldrb r5, [r0, #5] - - strb r4, [r2, #4] - strb r5, [r2, #5] - - ldrb r4, [r0, #6] - ldrb r5, [r0, #7] - - add r0, r0, r1 - - strb r4, [r2, #6] - strb r5, [r2, #7] - - add r2, r2, r3 - - ldrneb r4, [r0] - ldrneb r5, [r0, #1] - - bne copy_mem8x8_1_loop - - ldmia sp!, {r4 - r5} - ;pop {r4-r5} - mov pc, lr - -;copy 4 bytes each time -copy_mem8x8_4 - ldr r4, [r0] - ldr r5, [r0, #4] - - mov r12, #8 - -copy_mem8x8_4_loop - subs r12, r12, #1 - add r0, r0, r1 - - str r4, [r2] - str r5, [r2, #4] - - add r2, r2, r3 - - ldrne r4, [r0] - ldrne r5, [r0, #4] - - bne copy_mem8x8_4_loop - - ldmia sp!, {r4 - r5} - ;pop {r4-r5} - mov pc, lr - -;copy 8 bytes each time -copy_mem8x8_fast - ;sub r1, r1, #8 - ;sub r3, r3, #8 - - mov r12, #8 - -copy_mem8x8_fast_loop - ldmia r0, {r4-r5} - ;ldm r0, {r4-r5} - add r0, r0, r1 - - subs r12, r12, #1 - stmia r2, {r4-r5} - ;stm r2, {r4-r5} - add r2, r2, r3 - - bne copy_mem8x8_fast_loop - - ldmia sp!, {r4-r5} - ;pop {r4-r5} - mov pc, lr - - ENDP ; |vp8_copy_mem8x8_v6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/dc_only_idct_add_v6.asm b/libs/libvpx/vp8/common/arm/armv6/dc_only_idct_add_v6.asm deleted file mode 100644 index 9aa659fa70..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/dc_only_idct_add_v6.asm +++ /dev/null @@ -1,70 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license and patent -; grant that can be found in the LICENSE file in the root of the source -; tree. All contributing project authors may be found in the AUTHORS -; file in the root of the source tree. -; - - EXPORT |vp8_dc_only_idct_add_v6| - - AREA |.text|, CODE, READONLY - -;void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr, -; int pred_stride, unsigned char *dst_ptr, -; int dst_stride) -; r0 input_dc -; r1 pred_ptr -; r2 pred_stride -; r3 dst_ptr -; sp dst_stride - -|vp8_dc_only_idct_add_v6| PROC - stmdb sp!, {r4 - r7} - - add r0, r0, #4 ; input_dc += 4 - ldr r12, c0x0000FFFF - ldr r4, [r1], r2 - and r0, r12, r0, asr #3 ; input_dc >> 3 + mask - ldr r6, [r1], r2 - orr r0, r0, r0, lsl #16 ; a1 | a1 - - ldr r12, [sp, #16] ; dst stride - - uxtab16 r5, r0, r4 ; a1+2 | a1+0 - uxtab16 r4, r0, r4, ror #8 ; a1+3 | a1+1 - uxtab16 r7, r0, r6 - uxtab16 r6, r0, r6, ror #8 - usat16 r5, #8, r5 - usat16 r4, #8, r4 - usat16 r7, #8, r7 - usat16 r6, #8, r6 - orr r5, r5, r4, lsl #8 - orr r7, r7, r6, lsl #8 - ldr r4, [r1], r2 - str r5, [r3], r12 - ldr r6, [r1] - str r7, [r3], r12 - - uxtab16 r5, r0, r4 - uxtab16 r4, r0, r4, ror #8 - uxtab16 r7, r0, r6 - uxtab16 r6, r0, r6, ror #8 - usat16 r5, #8, r5 - usat16 r4, #8, r4 - usat16 r7, #8, r7 - usat16 r6, #8, r6 - orr r5, r5, r4, lsl #8 - orr r7, r7, r6, lsl #8 - str r5, [r3], r12 - str r7, [r3] - - ldmia sp!, {r4 - r7} - bx lr - - ENDP ; |vp8_dc_only_idct_add_v6| - -; Constant Pool -c0x0000FFFF DCD 0x0000FFFF - END diff --git a/libs/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm b/libs/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm deleted file mode 100644 index db48ded582..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm +++ /dev/null @@ -1,190 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license and patent -; grant that can be found in the LICENSE file in the root of the source -; tree. All contributing project authors may be found in the AUTHORS -; file in the root of the source tree. -; - - EXPORT |vp8_dequant_idct_add_v6| - - AREA |.text|, CODE, READONLY -;void vp8_dequant_idct_v6(short *input, short *dq, -; unsigned char *dest, int stride) -; r0 = q -; r1 = dq -; r2 = dst -; r3 = stride - -|vp8_dequant_idct_add_v6| PROC - stmdb sp!, {r4-r11, lr} - - ldr r4, [r0] ;input - ldr r5, [r1], #4 ;dq - - sub sp, sp, #4 - str r3, [sp] - - mov r12, #4 - -vp8_dequant_add_loop - smulbb r6, r4, r5 - smultt r7, r4, r5 - - ldr r4, [r0, #4] ;input - ldr r5, [r1], #4 ;dq - - strh r6, [r0], #2 - strh r7, [r0], #2 - - smulbb r6, r4, r5 - smultt r7, r4, r5 - - subs r12, r12, #1 - - ldrne r4, [r0, #4] - ldrne r5, [r1], #4 - - strh r6, [r0], #2 - strh r7, [r0], #2 - - bne vp8_dequant_add_loop - - sub r0, r0, #32 - mov r1, r0 - -; short_idct4x4llm_v6_dual - ldr r3, cospi8sqrt2minus1 - ldr r4, sinpi8sqrt2 - ldr r6, [r0, #8] - mov r5, #2 -vp8_dequant_idct_loop1_v6 - ldr r12, [r0, #24] - ldr r14, [r0, #16] - smulwt r9, r3, r6 - smulwb r7, r3, r6 - smulwt r10, r4, r6 - smulwb r8, r4, r6 - pkhbt r7, r7, r9, lsl #16 - smulwt r11, r3, r12 - pkhbt r8, r8, r10, lsl #16 - uadd16 r6, r6, r7 - smulwt r7, r4, r12 - smulwb r9, r3, r12 - smulwb r10, r4, r12 - subs r5, r5, #1 - pkhbt r9, r9, r11, lsl #16 - ldr r11, [r0], #4 - pkhbt r10, r10, r7, lsl #16 - uadd16 r7, r12, r9 - usub16 r7, r8, r7 - uadd16 r6, r6, r10 - uadd16 r10, r11, r14 - usub16 r8, r11, r14 - uadd16 r9, r10, r6 - usub16 r10, r10, r6 - uadd16 r6, r8, r7 - usub16 r7, r8, r7 - str r6, [r1, #8] - ldrne r6, [r0, #8] - str r7, [r1, #16] - str r10, [r1, #24] - str r9, [r1], #4 - bne vp8_dequant_idct_loop1_v6 - - mov r5, #2 - sub r0, r1, #8 -vp8_dequant_idct_loop2_v6 - ldr r6, [r0], #4 - ldr r7, [r0], #4 - ldr r8, [r0], #4 - ldr r9, [r0], #4 - smulwt r1, r3, r6 - smulwt r12, r4, r6 - smulwt lr, r3, r8 - smulwt r10, r4, r8 - pkhbt r11, r8, r6, lsl #16 - pkhbt r1, lr, r1, lsl #16 - pkhbt r12, r10, r12, lsl #16 - pkhtb r6, r6, r8, asr #16 - uadd16 r6, r1, r6 - pkhbt lr, r9, r7, lsl #16 - uadd16 r10, r11, lr - usub16 lr, r11, lr - pkhtb r8, r7, r9, asr #16 - subs r5, r5, #1 - smulwt r1, r3, r8 - smulwb r7, r3, r8 - smulwt r11, r4, r8 - smulwb r9, r4, r8 - pkhbt r1, r7, r1, lsl #16 - uadd16 r8, r1, r8 - pkhbt r11, r9, r11, lsl #16 - usub16 r1, r12, r8 - uadd16 r8, r11, r6 - ldr r9, c0x00040004 - ldr r12, [sp] ; get stride from stack - uadd16 r6, r10, r8 - usub16 r7, r10, r8 - uadd16 r7, r7, r9 - uadd16 r6, r6, r9 - uadd16 r10, r14, r1 - usub16 r1, r14, r1 - uadd16 r10, r10, r9 - uadd16 r1, r1, r9 - ldr r11, [r2] ; load input from dst - mov r8, r7, asr #3 - pkhtb r9, r8, r10, asr #19 - mov r8, r1, asr #3 - pkhtb r8, r8, r6, asr #19 - uxtb16 lr, r11, ror #8 - qadd16 r9, r9, lr - uxtb16 lr, r11 - qadd16 r8, r8, lr - usat16 r9, #8, r9 - usat16 r8, #8, r8 - orr r9, r8, r9, lsl #8 - ldr r11, [r2, r12] ; load input from dst - mov r7, r7, lsl #16 - mov r1, r1, lsl #16 - mov r10, r10, lsl #16 - mov r6, r6, lsl #16 - mov r7, r7, asr #3 - pkhtb r7, r7, r10, asr #19 - mov r1, r1, asr #3 - pkhtb r1, r1, r6, asr #19 - uxtb16 r8, r11, ror #8 - qadd16 r7, r7, r8 - uxtb16 r8, r11 - qadd16 r1, r1, r8 - usat16 r7, #8, r7 - usat16 r1, #8, r1 - orr r1, r1, r7, lsl #8 - str r9, [r2], r12 ; store output to dst - str r1, [r2], r12 ; store output to dst - bne vp8_dequant_idct_loop2_v6 - -; memset - sub r0, r0, #32 - add sp, sp, #4 - - mov r12, #0 - str r12, [r0] - str r12, [r0, #4] - str r12, [r0, #8] - str r12, [r0, #12] - str r12, [r0, #16] - str r12, [r0, #20] - str r12, [r0, #24] - str r12, [r0, #28] - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_dequant_idct_add_v6| - -; Constant Pool -cospi8sqrt2minus1 DCD 0x00004E7B -sinpi8sqrt2 DCD 0x00008A8C -c0x00040004 DCD 0x00040004 - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/dequantize_v6.asm b/libs/libvpx/vp8/common/arm/armv6/dequantize_v6.asm deleted file mode 100644 index 72f7e0ee57..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/dequantize_v6.asm +++ /dev/null @@ -1,69 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_dequantize_b_loop_v6| - - AREA |.text|, CODE, READONLY ; name this block of code -;------------------------------- -;void vp8_dequantize_b_loop_v6(short *Q, short *DQC, short *DQ); -; r0 short *Q, -; r1 short *DQC -; r2 short *DQ -|vp8_dequantize_b_loop_v6| PROC - stmdb sp!, {r4-r9, lr} - - ldr r3, [r0] ;load Q - ldr r4, [r1] ;load DQC - ldr r5, [r0, #4] - ldr r6, [r1, #4] - - mov r12, #2 ;loop counter - -dequant_loop - smulbb r7, r3, r4 ;multiply - smultt r8, r3, r4 - smulbb r9, r5, r6 - smultt lr, r5, r6 - - ldr r3, [r0, #8] - ldr r4, [r1, #8] - ldr r5, [r0, #12] - ldr r6, [r1, #12] - - strh r7, [r2], #2 ;store result - smulbb r7, r3, r4 ;multiply - strh r8, [r2], #2 - smultt r8, r3, r4 - strh r9, [r2], #2 - smulbb r9, r5, r6 - strh lr, [r2], #2 - smultt lr, r5, r6 - - subs r12, r12, #1 - - add r0, r0, #16 - add r1, r1, #16 - - ldrne r3, [r0] - strh r7, [r2], #2 ;store result - ldrne r4, [r1] - strh r8, [r2], #2 - ldrne r5, [r0, #4] - strh r9, [r2], #2 - ldrne r6, [r1, #4] - strh lr, [r2], #2 - - bne dequant_loop - - ldmia sp!, {r4-r9, pc} - ENDP ;|vp8_dequantize_b_loop_v6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/filter_v6.asm b/libs/libvpx/vp8/common/arm/armv6/filter_v6.asm deleted file mode 100644 index eb4b75bd80..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/filter_v6.asm +++ /dev/null @@ -1,624 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_filter_block2d_first_pass_armv6| - EXPORT |vp8_filter_block2d_first_pass_16x16_armv6| - EXPORT |vp8_filter_block2d_first_pass_8x8_armv6| - EXPORT |vp8_filter_block2d_second_pass_armv6| - EXPORT |vp8_filter4_block2d_second_pass_armv6| - EXPORT |vp8_filter_block2d_first_pass_only_armv6| - EXPORT |vp8_filter_block2d_second_pass_only_armv6| - - AREA |.text|, CODE, READONLY ; name this block of code -;------------------------------------- -; r0 unsigned char *src_ptr -; r1 short *output_ptr -; r2 unsigned int src_pixels_per_line -; r3 unsigned int output_width -; stack unsigned int output_height -; stack const short *vp8_filter -;------------------------------------- -; vp8_filter the input and put in the output array. Apply the 6 tap FIR filter with -; the output being a 2 byte value and the intput being a 1 byte value. -|vp8_filter_block2d_first_pass_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vp8_filter address - ldr r7, [sp, #36] ; output height - - sub r2, r2, r3 ; inside loop increments input array, - ; so the height loop only needs to add - ; r2 - width to the input pointer - - mov r3, r3, lsl #1 ; multiply width by 2 because using shorts - add r12, r3, #16 ; square off the output - sub sp, sp, #4 - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - str r1, [sp] ; push destination to stack - mov r7, r7, lsl #16 ; height is top part of counter - -; six tap filter -|height_loop_1st_6| - ldrb r8, [r0, #-2] ; load source data - ldrb r9, [r0, #-1] - ldrb r10, [r0], #2 - orr r7, r7, r3, lsr #2 ; construct loop counter - -|width_loop_1st_6| - ldrb r11, [r0, #-1] - - pkhbt lr, r8, r9, lsl #16 ; r9 | r8 - pkhbt r8, r9, r10, lsl #16 ; r10 | r9 - - ldrb r9, [r0] - - smuad lr, lr, r4 ; apply the filter - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - smuad r8, r8, r4 - pkhbt r11, r11, r9, lsl #16 ; r9 | r11 - - smlad lr, r10, r5, lr - ldrb r10, [r0, #1] - smlad r8, r11, r5, r8 - ldrb r11, [r0, #2] - - sub r7, r7, #1 - - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - - smlad lr, r9, r6, lr - smlad r11, r10, r6, r8 - - ands r10, r7, #0xff ; test loop counter - - add lr, lr, #0x40 ; round_shift_and_clamp - ldrneb r8, [r0, #-2] ; load data for next loop - usat lr, #8, lr, asr #7 - add r11, r11, #0x40 - ldrneb r9, [r0, #-1] - usat r11, #8, r11, asr #7 - - strh lr, [r1], r12 ; result is transposed and stored, which - ; will make second pass filtering easier. - ldrneb r10, [r0], #2 - strh r11, [r1], r12 - - bne width_loop_1st_6 - - ldr r1, [sp] ; load and update dst address - subs r7, r7, #0x10000 - add r0, r0, r2 ; move to next input line - - add r1, r1, #2 ; move over to next column - str r1, [sp] - - bne height_loop_1st_6 - - add sp, sp, #4 - ldmia sp!, {r4 - r11, pc} - - ENDP - -; -------------------------- -; 16x16 version -; ----------------------------- -|vp8_filter_block2d_first_pass_16x16_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vp8_filter address - ldr r7, [sp, #36] ; output height - - add r4, r2, #18 ; preload next low - pld [r0, r4] - - sub r2, r2, r3 ; inside loop increments input array, - ; so the height loop only needs to add - ; r2 - width to the input pointer - - mov r3, r3, lsl #1 ; multiply width by 2 because using shorts - add r12, r3, #16 ; square off the output - sub sp, sp, #4 - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - str r1, [sp] ; push destination to stack - mov r7, r7, lsl #16 ; height is top part of counter - -; six tap filter -|height_loop_1st_16_6| - ldrb r8, [r0, #-2] ; load source data - ldrb r9, [r0, #-1] - ldrb r10, [r0], #2 - orr r7, r7, r3, lsr #2 ; construct loop counter - -|width_loop_1st_16_6| - ldrb r11, [r0, #-1] - - pkhbt lr, r8, r9, lsl #16 ; r9 | r8 - pkhbt r8, r9, r10, lsl #16 ; r10 | r9 - - ldrb r9, [r0] - - smuad lr, lr, r4 ; apply the filter - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - smuad r8, r8, r4 - pkhbt r11, r11, r9, lsl #16 ; r9 | r11 - - smlad lr, r10, r5, lr - ldrb r10, [r0, #1] - smlad r8, r11, r5, r8 - ldrb r11, [r0, #2] - - sub r7, r7, #1 - - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - - smlad lr, r9, r6, lr - smlad r11, r10, r6, r8 - - ands r10, r7, #0xff ; test loop counter - - add lr, lr, #0x40 ; round_shift_and_clamp - ldrneb r8, [r0, #-2] ; load data for next loop - usat lr, #8, lr, asr #7 - add r11, r11, #0x40 - ldrneb r9, [r0, #-1] - usat r11, #8, r11, asr #7 - - strh lr, [r1], r12 ; result is transposed and stored, which - ; will make second pass filtering easier. - ldrneb r10, [r0], #2 - strh r11, [r1], r12 - - bne width_loop_1st_16_6 - - ldr r1, [sp] ; load and update dst address - subs r7, r7, #0x10000 - add r0, r0, r2 ; move to next input line - - add r11, r2, #34 ; adding back block width(=16) - pld [r0, r11] ; preload next low - - add r1, r1, #2 ; move over to next column - str r1, [sp] - - bne height_loop_1st_16_6 - - add sp, sp, #4 - ldmia sp!, {r4 - r11, pc} - - ENDP - -; -------------------------- -; 8x8 version -; ----------------------------- -|vp8_filter_block2d_first_pass_8x8_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vp8_filter address - ldr r7, [sp, #36] ; output height - - add r4, r2, #10 ; preload next low - pld [r0, r4] - - sub r2, r2, r3 ; inside loop increments input array, - ; so the height loop only needs to add - ; r2 - width to the input pointer - - mov r3, r3, lsl #1 ; multiply width by 2 because using shorts - add r12, r3, #16 ; square off the output - sub sp, sp, #4 - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - str r1, [sp] ; push destination to stack - mov r7, r7, lsl #16 ; height is top part of counter - -; six tap filter -|height_loop_1st_8_6| - ldrb r8, [r0, #-2] ; load source data - ldrb r9, [r0, #-1] - ldrb r10, [r0], #2 - orr r7, r7, r3, lsr #2 ; construct loop counter - -|width_loop_1st_8_6| - ldrb r11, [r0, #-1] - - pkhbt lr, r8, r9, lsl #16 ; r9 | r8 - pkhbt r8, r9, r10, lsl #16 ; r10 | r9 - - ldrb r9, [r0] - - smuad lr, lr, r4 ; apply the filter - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - smuad r8, r8, r4 - pkhbt r11, r11, r9, lsl #16 ; r9 | r11 - - smlad lr, r10, r5, lr - ldrb r10, [r0, #1] - smlad r8, r11, r5, r8 - ldrb r11, [r0, #2] - - sub r7, r7, #1 - - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - - smlad lr, r9, r6, lr - smlad r11, r10, r6, r8 - - ands r10, r7, #0xff ; test loop counter - - add lr, lr, #0x40 ; round_shift_and_clamp - ldrneb r8, [r0, #-2] ; load data for next loop - usat lr, #8, lr, asr #7 - add r11, r11, #0x40 - ldrneb r9, [r0, #-1] - usat r11, #8, r11, asr #7 - - strh lr, [r1], r12 ; result is transposed and stored, which - ; will make second pass filtering easier. - ldrneb r10, [r0], #2 - strh r11, [r1], r12 - - bne width_loop_1st_8_6 - - ldr r1, [sp] ; load and update dst address - subs r7, r7, #0x10000 - add r0, r0, r2 ; move to next input line - - add r11, r2, #18 ; adding back block width(=8) - pld [r0, r11] ; preload next low - - add r1, r1, #2 ; move over to next column - str r1, [sp] - - bne height_loop_1st_8_6 - - add sp, sp, #4 - ldmia sp!, {r4 - r11, pc} - - ENDP - -;--------------------------------- -; r0 short *src_ptr, -; r1 unsigned char *output_ptr, -; r2 unsigned int output_pitch, -; r3 unsigned int cnt, -; stack const short *vp8_filter -;--------------------------------- -|vp8_filter_block2d_second_pass_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #36] ; vp8_filter address - sub sp, sp, #4 - mov r7, r3, lsl #16 ; height is top part of counter - str r1, [sp] ; push destination to stack - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - pkhbt r12, r5, r4 ; pack the filter differently - pkhbt r11, r6, r5 - - sub r0, r0, #4 ; offset input buffer - -|height_loop_2nd| - ldr r8, [r0] ; load the data - ldr r9, [r0, #4] - orr r7, r7, r3, lsr #1 ; loop counter - -|width_loop_2nd| - smuad lr, r4, r8 ; apply filter - sub r7, r7, #1 - smulbt r8, r4, r8 - - ldr r10, [r0, #8] - - smlad lr, r5, r9, lr - smladx r8, r12, r9, r8 - - ldrh r9, [r0, #12] - - smlad lr, r6, r10, lr - smladx r8, r11, r10, r8 - - add r0, r0, #4 - smlatb r10, r6, r9, r8 - - add lr, lr, #0x40 ; round_shift_and_clamp - ands r8, r7, #0xff - usat lr, #8, lr, asr #7 - add r10, r10, #0x40 - strb lr, [r1], r2 ; the result is transposed back and stored - usat r10, #8, r10, asr #7 - - ldrne r8, [r0] ; load data for next loop - ldrne r9, [r0, #4] - strb r10, [r1], r2 - - bne width_loop_2nd - - ldr r1, [sp] ; update dst for next loop - subs r7, r7, #0x10000 - add r0, r0, #16 ; updata src for next loop - add r1, r1, #1 - str r1, [sp] - - bne height_loop_2nd - - add sp, sp, #4 - ldmia sp!, {r4 - r11, pc} - - ENDP - -;--------------------------------- -; r0 short *src_ptr, -; r1 unsigned char *output_ptr, -; r2 unsigned int output_pitch, -; r3 unsigned int cnt, -; stack const short *vp8_filter -;--------------------------------- -|vp8_filter4_block2d_second_pass_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #36] ; vp8_filter address - mov r7, r3, lsl #16 ; height is top part of counter - - ldr r4, [r11] ; load up packed filter coefficients - add lr, r1, r3 ; save final destination pointer - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - pkhbt r12, r5, r4 ; pack the filter differently - pkhbt r11, r6, r5 - mov r4, #0x40 ; rounding factor (for smlad{x}) - -|height_loop_2nd_4| - ldrd r8, r9, [r0, #-4] ; load the data - orr r7, r7, r3, lsr #1 ; loop counter - -|width_loop_2nd_4| - ldr r10, [r0, #4]! - smladx r6, r9, r12, r4 ; apply filter - pkhbt r8, r9, r8 - smlad r5, r8, r12, r4 - pkhbt r8, r10, r9 - smladx r6, r10, r11, r6 - sub r7, r7, #1 - smlad r5, r8, r11, r5 - - mov r8, r9 ; shift the data for the next loop - mov r9, r10 - - usat r6, #8, r6, asr #7 ; shift and clamp - usat r5, #8, r5, asr #7 - - strb r5, [r1], r2 ; the result is transposed back and stored - tst r7, #0xff - strb r6, [r1], r2 - - bne width_loop_2nd_4 - - subs r7, r7, #0x10000 - add r0, r0, #16 ; update src for next loop - sub r1, lr, r7, lsr #16 ; update dst for next loop - - bne height_loop_2nd_4 - - ldmia sp!, {r4 - r11, pc} - - ENDP - -;------------------------------------ -; r0 unsigned char *src_ptr -; r1 unsigned char *output_ptr, -; r2 unsigned int src_pixels_per_line -; r3 unsigned int cnt, -; stack unsigned int output_pitch, -; stack const short *vp8_filter -;------------------------------------ -|vp8_filter_block2d_first_pass_only_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - add r7, r2, r3 ; preload next low - add r7, r7, #2 - pld [r0, r7] - - ldr r4, [sp, #36] ; output pitch - ldr r11, [sp, #40] ; HFilter address - sub sp, sp, #8 - - mov r7, r3 - sub r2, r2, r3 ; inside loop increments input array, - ; so the height loop only needs to add - ; r2 - width to the input pointer - - sub r4, r4, r3 - str r4, [sp] ; save modified output pitch - str r2, [sp, #4] - - mov r2, #0x40 - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - -; six tap filter -|height_loop_1st_only_6| - ldrb r8, [r0, #-2] ; load data - ldrb r9, [r0, #-1] - ldrb r10, [r0], #2 - - mov r12, r3, lsr #1 ; loop counter - -|width_loop_1st_only_6| - ldrb r11, [r0, #-1] - - pkhbt lr, r8, r9, lsl #16 ; r9 | r8 - pkhbt r8, r9, r10, lsl #16 ; r10 | r9 - - ldrb r9, [r0] - -;; smuad lr, lr, r4 - smlad lr, lr, r4, r2 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 -;; smuad r8, r8, r4 - smlad r8, r8, r4, r2 - pkhbt r11, r11, r9, lsl #16 ; r9 | r11 - - smlad lr, r10, r5, lr - ldrb r10, [r0, #1] - smlad r8, r11, r5, r8 - ldrb r11, [r0, #2] - - subs r12, r12, #1 - - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - - smlad lr, r9, r6, lr - smlad r10, r10, r6, r8 - -;; add lr, lr, #0x40 ; round_shift_and_clamp - ldrneb r8, [r0, #-2] ; load data for next loop - usat lr, #8, lr, asr #7 -;; add r10, r10, #0x40 - strb lr, [r1], #1 ; store the result - usat r10, #8, r10, asr #7 - - ldrneb r9, [r0, #-1] - strb r10, [r1], #1 - ldrneb r10, [r0], #2 - - bne width_loop_1st_only_6 - - ldr lr, [sp] ; load back output pitch - ldr r12, [sp, #4] ; load back output pitch - subs r7, r7, #1 - add r0, r0, r12 ; updata src for next loop - - add r11, r12, r3 ; preload next low - add r11, r11, #2 - pld [r0, r11] - - add r1, r1, lr ; update dst for next loop - - bne height_loop_1st_only_6 - - add sp, sp, #8 - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_filter_block2d_first_pass_only_armv6| - - -;------------------------------------ -; r0 unsigned char *src_ptr, -; r1 unsigned char *output_ptr, -; r2 unsigned int src_pixels_per_line -; r3 unsigned int cnt, -; stack unsigned int output_pitch, -; stack const short *vp8_filter -;------------------------------------ -|vp8_filter_block2d_second_pass_only_armv6| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; VFilter address - ldr r12, [sp, #36] ; output pitch - - mov r7, r3, lsl #16 ; height is top part of counter - sub r0, r0, r2, lsl #1 ; need 6 elements for filtering, 2 before, 3 after - - sub sp, sp, #8 - - ldr r4, [r11] ; load up packed filter coefficients - ldr r5, [r11, #4] - ldr r6, [r11, #8] - - str r0, [sp] ; save r0 to stack - str r1, [sp, #4] ; save dst to stack - -; six tap filter -|width_loop_2nd_only_6| - ldrb r8, [r0], r2 ; load data - orr r7, r7, r3 ; loop counter - ldrb r9, [r0], r2 - ldrb r10, [r0], r2 - -|height_loop_2nd_only_6| - ; filter first column in this inner loop, than, move to next colum. - ldrb r11, [r0], r2 - - pkhbt lr, r8, r9, lsl #16 ; r9 | r8 - pkhbt r8, r9, r10, lsl #16 ; r10 | r9 - - ldrb r9, [r0], r2 - - smuad lr, lr, r4 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - smuad r8, r8, r4 - pkhbt r11, r11, r9, lsl #16 ; r9 | r11 - - smlad lr, r10, r5, lr - ldrb r10, [r0], r2 - smlad r8, r11, r5, r8 - ldrb r11, [r0] - - sub r7, r7, #2 - sub r0, r0, r2, lsl #2 - - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - pkhbt r10, r10, r11, lsl #16 ; r11 | r10 - - smlad lr, r9, r6, lr - smlad r10, r10, r6, r8 - - ands r9, r7, #0xff - - add lr, lr, #0x40 ; round_shift_and_clamp - ldrneb r8, [r0], r2 ; load data for next loop - usat lr, #8, lr, asr #7 - add r10, r10, #0x40 - strb lr, [r1], r12 ; store the result for the column - usat r10, #8, r10, asr #7 - - ldrneb r9, [r0], r2 - strb r10, [r1], r12 - ldrneb r10, [r0], r2 - - bne height_loop_2nd_only_6 - - ldr r0, [sp] - ldr r1, [sp, #4] - subs r7, r7, #0x10000 - add r0, r0, #1 ; move to filter next column - str r0, [sp] - add r1, r1, #1 - str r1, [sp, #4] - - bne width_loop_2nd_only_6 - - add sp, sp, #8 - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_filter_block2d_second_pass_only_armv6| - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/idct_blk_v6.c b/libs/libvpx/vp8/common/arm/armv6/idct_blk_v6.c deleted file mode 100644 index c94f84a62b..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/idct_blk_v6.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_config.h" -#include "vp8_rtcd.h" - - -void vp8_dequant_idct_add_y_block_v6(short *q, short *dq, - unsigned char *dst, - int stride, char *eobs) -{ - int i; - - for (i = 0; i < 4; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_v6 (q, dq, dst, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_v6 (q[0]*dq[0], dst, stride, dst, stride); - ((int *)q)[0] = 0; - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_v6 (q+16, dq, dst+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_v6 (q[16]*dq[0], dst+4, stride, dst+4, stride); - ((int *)(q+16))[0] = 0; - } - - if (eobs[2] > 1) - vp8_dequant_idct_add_v6 (q+32, dq, dst+8, stride); - else if (eobs[2] == 1) - { - vp8_dc_only_idct_add_v6 (q[32]*dq[0], dst+8, stride, dst+8, stride); - ((int *)(q+32))[0] = 0; - } - - if (eobs[3] > 1) - vp8_dequant_idct_add_v6 (q+48, dq, dst+12, stride); - else if (eobs[3] == 1) - { - vp8_dc_only_idct_add_v6 (q[48]*dq[0], dst+12, stride,dst+12,stride); - ((int *)(q+48))[0] = 0; - } - - q += 64; - dst += 4*stride; - eobs += 4; - } -} - -void vp8_dequant_idct_add_uv_block_v6(short *q, short *dq, - unsigned char *dstu, - unsigned char *dstv, - int stride, char *eobs) -{ - int i; - - for (i = 0; i < 2; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_v6 (q, dq, dstu, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_v6 (q[0]*dq[0], dstu, stride, dstu, stride); - ((int *)q)[0] = 0; - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_v6 (q+16, dq, dstu+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_v6 (q[16]*dq[0], dstu+4, stride, - dstu+4, stride); - ((int *)(q+16))[0] = 0; - } - - q += 32; - dstu += 4*stride; - eobs += 2; - } - - for (i = 0; i < 2; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_v6 (q, dq, dstv, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_v6 (q[0]*dq[0], dstv, stride, dstv, stride); - ((int *)q)[0] = 0; - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_v6 (q+16, dq, dstv+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_v6 (q[16]*dq[0], dstv+4, stride, - dstv+4, stride); - ((int *)(q+16))[0] = 0; - } - - q += 32; - dstv += 4*stride; - eobs += 2; - } -} diff --git a/libs/libvpx/vp8/common/arm/armv6/idct_v6.asm b/libs/libvpx/vp8/common/arm/armv6/idct_v6.asm deleted file mode 100644 index b4d44cbeba..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/idct_v6.asm +++ /dev/null @@ -1,202 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_short_idct4x4llm_v6_dual| - - AREA |.text|, CODE, READONLY - - -; void vp8_short_idct4x4llm_c(short *input, unsigned char *pred, int pitch, -; unsigned char *dst, int stride) -; r0 short* input -; r1 unsigned char* pred -; r2 int pitch -; r3 unsigned char* dst -; sp int stride - -|vp8_short_idct4x4llm_v6_dual| PROC - stmdb sp!, {r4-r11, lr} - - sub sp, sp, #4 - - mov r4, #0x00008A00 ; sin - orr r4, r4, #0x0000008C ; sinpi8sqrt2 - - mov r5, #0x00004E00 ; cos - orr r5, r5, #0x0000007B ; cospi8sqrt2minus1 - orr r5, r5, #1<<31 ; loop counter on top bit - -loop1_dual - ldr r6, [r0, #(4*2)] ; i5 | i4 - ldr r12, [r0, #(12*2)] ; i13|i12 - ldr r14, [r0, #(8*2)] ; i9 | i8 - - smulbt r9, r5, r6 ; (ip[5] * cospi8sqrt2minus1) >> 16 - smulbb r7, r5, r6 ; (ip[4] * cospi8sqrt2minus1) >> 16 - smulwt r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16 - smulwb r8, r4, r6 ; (ip[4] * sinpi8sqrt2) >> 16 - - smulbt r11, r5, r12 ; (ip[13] * cospi8sqrt2minus1) >> 16 - pkhtb r7, r9, r7, asr #16 ; 5c | 4c - pkhbt r8, r8, r10, lsl #16 ; 5s | 4s - uadd16 r6, r6, r7 ; 5c+5 | 4c+4 - - smulwt r7, r4, r12 ; (ip[13] * sinpi8sqrt2) >> 16 - smulbb r9, r5, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16 - smulwb r10, r4, r12 ; (ip[12] * sinpi8sqrt2) >> 16 - - subs r5, r5, #1<<31 ; i-- - - pkhtb r9, r11, r9, asr #16 ; 13c | 12c - ldr r11, [r0] ; i1 | i0 - pkhbt r10, r10, r7, lsl #16 ; 13s | 12s - uadd16 r7, r12, r9 ; 13c+13 | 12c+12 - - usub16 r7, r8, r7 ; c - uadd16 r6, r6, r10 ; d - uadd16 r10, r11, r14 ; a - usub16 r8, r11, r14 ; b - - uadd16 r9, r10, r6 ; a+d - usub16 r10, r10, r6 ; a-d - uadd16 r6, r8, r7 ; b+c - usub16 r7, r8, r7 ; b-c - - ; use input buffer to store intermediate results - str r6, [r0, #(4*2)] ; o5 | o4 - str r7, [r0, #(8*2)] ; o9 | o8 - str r10,[r0, #(12*2)] ; o13|o12 - str r9, [r0], #4 ; o1 | o0 - - bcs loop1_dual - - sub r0, r0, #8 ; reset input/output - str r0, [sp] - -loop2_dual - - ldr r6, [r0, #(4*2)] ; i5 | i4 - ldr r12,[r0, #(2*2)] ; i3 | i2 - ldr r14,[r0, #(6*2)] ; i7 | i6 - ldr r0, [r0, #(0*2)] ; i1 | i0 - - smulbt r9, r5, r6 ; (ip[5] * cospi8sqrt2minus1) >> 16 - smulbt r7, r5, r0 ; (ip[1] * cospi8sqrt2minus1) >> 16 - smulwt r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16 - smulwt r8, r4, r0 ; (ip[1] * sinpi8sqrt2) >> 16 - - pkhbt r11, r6, r0, lsl #16 ; i0 | i4 - pkhtb r7, r7, r9, asr #16 ; 1c | 5c - pkhtb r0, r0, r6, asr #16 ; i1 | i5 - pkhbt r8, r10, r8, lsl #16 ; 1s | 5s = temp1 - - uadd16 r0, r7, r0 ; 1c+1 | 5c+5 = temp2 - pkhbt r9, r14, r12, lsl #16 ; i2 | i6 - uadd16 r10, r11, r9 ; a - usub16 r9, r11, r9 ; b - pkhtb r6, r12, r14, asr #16 ; i3 | i7 - - subs r5, r5, #1<<31 ; i-- - - smulbt r7, r5, r6 ; (ip[3] * cospi8sqrt2minus1) >> 16 - smulwt r11, r4, r6 ; (ip[3] * sinpi8sqrt2) >> 16 - smulbb r12, r5, r6 ; (ip[7] * cospi8sqrt2minus1) >> 16 - smulwb r14, r4, r6 ; (ip[7] * sinpi8sqrt2) >> 16 - - pkhtb r7, r7, r12, asr #16 ; 3c | 7c - pkhbt r11, r14, r11, lsl #16 ; 3s | 7s = temp1 - - uadd16 r6, r7, r6 ; 3c+3 | 7c+7 = temp2 - usub16 r12, r8, r6 ; c (o1 | o5) - uadd16 r6, r11, r0 ; d (o3 | o7) - uadd16 r7, r10, r6 ; a+d - - mov r8, #4 ; set up 4's - orr r8, r8, #0x40000 ; 4|4 - - usub16 r6, r10, r6 ; a-d - uadd16 r6, r6, r8 ; a-d+4, 3|7 - uadd16 r7, r7, r8 ; a+d+4, 0|4 - uadd16 r10, r9, r12 ; b+c - usub16 r0, r9, r12 ; b-c - uadd16 r10, r10, r8 ; b+c+4, 1|5 - uadd16 r8, r0, r8 ; b-c+4, 2|6 - - ldr lr, [sp, #40] ; dst stride - - ldrb r0, [r1] ; pred p0 - ldrb r11, [r1, #1] ; pred p1 - ldrb r12, [r1, #2] ; pred p2 - - add r0, r0, r7, asr #19 ; p0 + o0 - add r11, r11, r10, asr #19 ; p1 + o1 - add r12, r12, r8, asr #19 ; p2 + o2 - - usat r0, #8, r0 ; d0 = clip8(p0 + o0) - usat r11, #8, r11 ; d1 = clip8(p1 + o1) - usat r12, #8, r12 ; d2 = clip8(p2 + o2) - - add r0, r0, r11, lsl #8 ; |--|--|d1|d0| - - ldrb r11, [r1, #3] ; pred p3 - - add r0, r0, r12, lsl #16 ; |--|d2|d1|d0| - - add r11, r11, r6, asr #19 ; p3 + o3 - - sxth r7, r7 ; - sxth r10, r10 ; - - usat r11, #8, r11 ; d3 = clip8(p3 + o3) - - sxth r8, r8 ; - sxth r6, r6 ; - - add r0, r0, r11, lsl #24 ; |d3|d2|d1|d0| - - ldrb r12, [r1, r2]! ; pred p4 - str r0, [r3], lr - ldrb r11, [r1, #1] ; pred p5 - - add r12, r12, r7, asr #3 ; p4 + o4 - add r11, r11, r10, asr #3 ; p5 + o5 - - usat r12, #8, r12 ; d4 = clip8(p4 + o4) - usat r11, #8, r11 ; d5 = clip8(p5 + o5) - - ldrb r7, [r1, #2] ; pred p6 - ldrb r10, [r1, #3] ; pred p6 - - add r12, r12, r11, lsl #8 ; |--|--|d5|d4| - - add r7, r7, r8, asr #3 ; p6 + o6 - add r10, r10, r6, asr #3 ; p7 + o7 - - ldr r0, [sp] ; load input pointer - - usat r7, #8, r7 ; d6 = clip8(p6 + o6) - usat r10, #8, r10 ; d7 = clip8(p7 + o7) - - add r12, r12, r7, lsl #16 ; |--|d6|d5|d4| - add r12, r12, r10, lsl #24 ; |d7|d6|d5|d4| - - str r12, [r3], lr - add r0, r0, #16 - add r1, r1, r2 ; pred + pitch - - bcs loop2_dual - - add sp, sp, #4 ; idct_output buffer - ldmia sp!, {r4 - r11, pc} - - ENDP - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/iwalsh_v6.asm b/libs/libvpx/vp8/common/arm/armv6/iwalsh_v6.asm deleted file mode 100644 index 31ef09cada..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/iwalsh_v6.asm +++ /dev/null @@ -1,136 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - EXPORT |vp8_short_inv_walsh4x4_v6| - - ARM - REQUIRE8 - PRESERVE8 - - AREA |.text|, CODE, READONLY ; name this block of code - -;short vp8_short_inv_walsh4x4_v6(short *input, short *mb_dqcoeff) -|vp8_short_inv_walsh4x4_v6| PROC - - stmdb sp!, {r4 - r12, lr} - - ldr r2, [r0, #0] ; [1 | 0] - ldr r3, [r0, #4] ; [3 | 2] - ldr r4, [r0, #8] ; [5 | 4] - ldr r5, [r0, #12] ; [7 | 6] - ldr r6, [r0, #16] ; [9 | 8] - ldr r7, [r0, #20] ; [11 | 10] - ldr r8, [r0, #24] ; [13 | 12] - ldr r9, [r0, #28] ; [15 | 14] - - qadd16 r10, r2, r8 ; a1 [1+13 | 0+12] - qadd16 r11, r4, r6 ; b1 [5+9 | 4+8] - qsub16 r12, r4, r6 ; c1 [5-9 | 4-8] - qsub16 lr, r2, r8 ; d1 [1-13 | 0-12] - - qadd16 r2, r10, r11 ; a1 + b1 [1 | 0] - qadd16 r4, r12, lr ; c1 + d1 [5 | 4] - qsub16 r6, r10, r11 ; a1 - b1 [9 | 8] - qsub16 r8, lr, r12 ; d1 - c1 [13 | 12] - - qadd16 r10, r3, r9 ; a1 [3+15 | 2+14] - qadd16 r11, r5, r7 ; b1 [7+11 | 6+10] - qsub16 r12, r5, r7 ; c1 [7-11 | 6-10] - qsub16 lr, r3, r9 ; d1 [3-15 | 2-14] - - qadd16 r3, r10, r11 ; a1 + b1 [3 | 2] - qadd16 r5, r12, lr ; c1 + d1 [7 | 6] - qsub16 r7, r10, r11 ; a1 - b1 [11 | 10] - qsub16 r9, lr, r12 ; d1 - c1 [15 | 14] - - ; first transform complete - - qsubaddx r10, r2, r3 ; [c1|a1] [1-2 | 0+3] - qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] - qsubaddx r12, r4, r5 ; [c1|a1] [5-6 | 4+7] - qaddsubx lr, r4, r5 ; [b1|d1] [5+6 | 4-7] - - qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1] - qaddsubx r3, r11, r10 ; [a2|d2] [b1+a1 | d1-c1] - ldr r10, c0x00030003 - qaddsubx r4, r12, lr ; [b2|c2] [c1+d1 | a1-b1] - qaddsubx r5, lr, r12 ; [a2|d2] [b1+a1 | d1-c1] - - qadd16 r2, r2, r10 ; [b2+3|c2+3] - qadd16 r3, r3, r10 ; [a2+3|d2+3] - qadd16 r4, r4, r10 ; [b2+3|c2+3] - qadd16 r5, r5, r10 ; [a2+3|d2+3] - - asr r12, r3, #19 ; [0] - strh r12, [r1], #32 - asr lr, r2, #19 ; [1] - strh lr, [r1], #32 - sxth r2, r2 - sxth r3, r3 - asr r2, r2, #3 ; [2] - strh r2, [r1], #32 - asr r3, r3, #3 ; [3] - strh r3, [r1], #32 - - asr r12, r5, #19 ; [4] - strh r12, [r1], #32 - asr lr, r4, #19 ; [5] - strh lr, [r1], #32 - sxth r4, r4 - sxth r5, r5 - asr r4, r4, #3 ; [6] - strh r4, [r1], #32 - asr r5, r5, #3 ; [7] - strh r5, [r1], #32 - - qsubaddx r2, r6, r7 ; [c1|a1] [9-10 | 8+11] - qaddsubx r3, r6, r7 ; [b1|d1] [9+10 | 8-11] - qsubaddx r4, r8, r9 ; [c1|a1] [13-14 | 12+15] - qaddsubx r5, r8, r9 ; [b1|d1] [13+14 | 12-15] - - qaddsubx r6, r2, r3 ; [b2|c2] [c1+d1 | a1-b1] - qaddsubx r7, r3, r2 ; [a2|d2] [b1+a1 | d1-c1] - qaddsubx r8, r4, r5 ; [b2|c2] [c1+d1 | a1-b1] - qaddsubx r9, r5, r4 ; [a2|d2] [b1+a1 | d1-c1] - - qadd16 r6, r6, r10 ; [b2+3|c2+3] - qadd16 r7, r7, r10 ; [a2+3|d2+3] - qadd16 r8, r8, r10 ; [b2+3|c2+3] - qadd16 r9, r9, r10 ; [a2+3|d2+3] - - asr r12, r7, #19 ; [8] - strh r12, [r1], #32 - asr lr, r6, #19 ; [9] - strh lr, [r1], #32 - sxth r6, r6 - sxth r7, r7 - asr r6, r6, #3 ; [10] - strh r6, [r1], #32 - asr r7, r7, #3 ; [11] - strh r7, [r1], #32 - - asr r12, r9, #19 ; [12] - strh r12, [r1], #32 - asr lr, r8, #19 ; [13] - strh lr, [r1], #32 - sxth r8, r8 - sxth r9, r9 - asr r8, r8, #3 ; [14] - strh r8, [r1], #32 - asr r9, r9, #3 ; [15] - strh r9, [r1], #32 - - ldmia sp!, {r4 - r12, pc} - ENDP ; |vp8_short_inv_walsh4x4_v6| - - -; Constant Pool -c0x00030003 DCD 0x00030003 - END diff --git a/libs/libvpx/vp8/common/arm/armv6/loopfilter_v6.asm b/libs/libvpx/vp8/common/arm/armv6/loopfilter_v6.asm deleted file mode 100644 index 1cbbbcdef5..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/loopfilter_v6.asm +++ /dev/null @@ -1,1282 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_loop_filter_horizontal_edge_armv6| - EXPORT |vp8_mbloop_filter_horizontal_edge_armv6| - EXPORT |vp8_loop_filter_vertical_edge_armv6| - EXPORT |vp8_mbloop_filter_vertical_edge_armv6| - - AREA |.text|, CODE, READONLY ; name this block of code - - MACRO - TRANSPOSE_MATRIX $a0, $a1, $a2, $a3, $b0, $b1, $b2, $b3 - ; input: $a0, $a1, $a2, $a3; output: $b0, $b1, $b2, $b3 - ; a0: 03 02 01 00 - ; a1: 13 12 11 10 - ; a2: 23 22 21 20 - ; a3: 33 32 31 30 - ; b3 b2 b1 b0 - - uxtb16 $b1, $a1 ; xx 12 xx 10 - uxtb16 $b0, $a0 ; xx 02 xx 00 - uxtb16 $b3, $a3 ; xx 32 xx 30 - uxtb16 $b2, $a2 ; xx 22 xx 20 - orr $b1, $b0, $b1, lsl #8 ; 12 02 10 00 - orr $b3, $b2, $b3, lsl #8 ; 32 22 30 20 - - uxtb16 $a1, $a1, ror #8 ; xx 13 xx 11 - uxtb16 $a3, $a3, ror #8 ; xx 33 xx 31 - uxtb16 $a0, $a0, ror #8 ; xx 03 xx 01 - uxtb16 $a2, $a2, ror #8 ; xx 23 xx 21 - orr $a0, $a0, $a1, lsl #8 ; 13 03 11 01 - orr $a2, $a2, $a3, lsl #8 ; 33 23 31 21 - - pkhtb $b2, $b3, $b1, asr #16 ; 32 22 12 02 -- p1 - pkhbt $b0, $b1, $b3, lsl #16 ; 30 20 10 00 -- p3 - - pkhtb $b3, $a2, $a0, asr #16 ; 33 23 13 03 -- p0 - pkhbt $b1, $a0, $a2, lsl #16 ; 31 21 11 01 -- p2 - MEND - - -src RN r0 -pstep RN r1 -count RN r5 - -;r0 unsigned char *src_ptr, -;r1 int src_pixel_step, -;r2 const char *blimit, -;r3 const char *limit, -;stack const char *thresh, -;stack int count - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_loop_filter_horizontal_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - ldr count, [sp, #40] ; count for 8-in-parallel - ldr r6, [sp, #36] ; load thresh address - sub sp, sp, #16 ; create temp buffer - - ldr r9, [src], pstep ; p3 - ldrb r4, [r2] ; blimit - ldr r10, [src], pstep ; p2 - ldrb r2, [r3] ; limit - ldr r11, [src], pstep ; p1 - orr r4, r4, r4, lsl #8 - ldrb r3, [r6] ; thresh - orr r2, r2, r2, lsl #8 - mov count, count, lsl #1 ; 4-in-parallel - orr r4, r4, r4, lsl #16 - orr r3, r3, r3, lsl #8 - orr r2, r2, r2, lsl #16 - orr r3, r3, r3, lsl #16 - -|Hnext8| - ; vp8_filter_mask() function - ; calculate breakout conditions - ldr r12, [src], pstep ; p0 - - uqsub8 r6, r9, r10 ; p3 - p2 - uqsub8 r7, r10, r9 ; p2 - p3 - uqsub8 r8, r10, r11 ; p2 - p1 - uqsub8 r10, r11, r10 ; p1 - p2 - - orr r6, r6, r7 ; abs (p3-p2) - orr r8, r8, r10 ; abs (p2-p1) - uqsub8 lr, r6, r2 ; compare to limit. lr: vp8_filter_mask - uqsub8 r8, r8, r2 ; compare to limit - uqsub8 r6, r11, r12 ; p1 - p0 - orr lr, lr, r8 - uqsub8 r7, r12, r11 ; p0 - p1 - ldr r9, [src], pstep ; q0 - ldr r10, [src], pstep ; q1 - orr r6, r6, r7 ; abs (p1-p0) - uqsub8 r7, r6, r2 ; compare to limit - uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later - orr lr, lr, r7 - - uqsub8 r6, r11, r10 ; p1 - q1 - uqsub8 r7, r10, r11 ; q1 - p1 - uqsub8 r11, r12, r9 ; p0 - q0 - uqsub8 r12, r9, r12 ; q0 - p0 - orr r6, r6, r7 ; abs (p1-q1) - ldr r7, c0x7F7F7F7F - orr r12, r11, r12 ; abs (p0-q0) - ldr r11, [src], pstep ; q2 - uqadd8 r12, r12, r12 ; abs (p0-q0) * 2 - and r6, r7, r6, lsr #1 ; abs (p1-q1) / 2 - uqsub8 r7, r9, r10 ; q0 - q1 - uqadd8 r12, r12, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2 - uqsub8 r6, r10, r9 ; q1 - q0 - uqsub8 r12, r12, r4 ; compare to flimit - uqsub8 r9, r11, r10 ; q2 - q1 - - orr lr, lr, r12 - - ldr r12, [src], pstep ; q3 - uqsub8 r10, r10, r11 ; q1 - q2 - orr r6, r7, r6 ; abs (q1-q0) - orr r10, r9, r10 ; abs (q2-q1) - uqsub8 r7, r6, r2 ; compare to limit - uqsub8 r10, r10, r2 ; compare to limit - uqsub8 r6, r6, r3 ; compare to thresh -- save r6 for later - orr lr, lr, r7 - orr lr, lr, r10 - - uqsub8 r10, r12, r11 ; q3 - q2 - uqsub8 r9, r11, r12 ; q2 - q3 - - mvn r11, #0 ; r11 == -1 - - orr r10, r10, r9 ; abs (q3-q2) - uqsub8 r10, r10, r2 ; compare to limit - - mov r12, #0 - orr lr, lr, r10 - sub src, src, pstep, lsl #2 - - usub8 lr, r12, lr ; use usub8 instead of ssub8 - sel lr, r11, r12 ; filter mask: lr - - cmp lr, #0 - beq hskip_filter ; skip filtering - - sub src, src, pstep, lsl #1 ; move src pointer down by 6 lines - - ;vp8_hevmask() function - ;calculate high edge variance - orr r10, r6, r8 ; calculate vp8_hevmask - - ldr r7, [src], pstep ; p1 - - usub8 r10, r12, r10 ; use usub8 instead of ssub8 - sel r6, r12, r11 ; obtain vp8_hevmask: r6 - - ;vp8_filter() function - ldr r8, [src], pstep ; p0 - ldr r12, c0x80808080 - ldr r9, [src], pstep ; q0 - ldr r10, [src], pstep ; q1 - - eor r7, r7, r12 ; p1 offset to convert to a signed value - eor r8, r8, r12 ; p0 offset to convert to a signed value - eor r9, r9, r12 ; q0 offset to convert to a signed value - eor r10, r10, r12 ; q1 offset to convert to a signed value - - str r9, [sp] ; store qs0 temporarily - str r8, [sp, #4] ; store ps0 temporarily - str r10, [sp, #8] ; store qs1 temporarily - str r7, [sp, #12] ; store ps1 temporarily - - qsub8 r7, r7, r10 ; vp8_signed_char_clamp(ps1-qs1) - qsub8 r8, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0)) - - and r7, r7, r6 ; vp8_filter (r7) &= hev - - qadd8 r7, r7, r8 - ldr r9, c0x03030303 ; r9 = 3 --modified for vp8 - - qadd8 r7, r7, r8 - ldr r10, c0x04040404 - - qadd8 r7, r7, r8 - and r7, r7, lr ; vp8_filter &= mask; - - ;modify code for vp8 -- Filter1 = vp8_filter (r7) - qadd8 r8 , r7 , r9 ; Filter2 (r8) = vp8_signed_char_clamp(vp8_filter+3) - qadd8 r7 , r7 , r10 ; vp8_filter = vp8_signed_char_clamp(vp8_filter+4) - - mov r9, #0 - shadd8 r8 , r8 , r9 ; Filter2 >>= 3 - shadd8 r7 , r7 , r9 ; vp8_filter >>= 3 - shadd8 r8 , r8 , r9 - shadd8 r7 , r7 , r9 - shadd8 lr , r8 , r9 ; lr: Filter2 - shadd8 r7 , r7 , r9 ; r7: filter - - ;usub8 lr, r8, r10 ; s = (s==4)*-1 - ;sel lr, r11, r9 - ;usub8 r8, r10, r8 - ;sel r8, r11, r9 - ;and r8, r8, lr ; -1 for each element that equals 4 - - ;calculate output - ;qadd8 lr, r8, r7 ; u = vp8_signed_char_clamp(s + vp8_filter) - - ldr r8, [sp] ; load qs0 - ldr r9, [sp, #4] ; load ps0 - - ldr r10, c0x01010101 - - qsub8 r8 ,r8, r7 ; u = vp8_signed_char_clamp(qs0 - vp8_filter) - qadd8 r9, r9, lr ; u = vp8_signed_char_clamp(ps0 + Filter2) - - ;end of modification for vp8 - - mov lr, #0 - sadd8 r7, r7 , r10 ; vp8_filter += 1 - shadd8 r7, r7, lr ; vp8_filter >>= 1 - - ldr r11, [sp, #12] ; load ps1 - ldr r10, [sp, #8] ; load qs1 - - bic r7, r7, r6 ; vp8_filter &= ~hev - sub src, src, pstep, lsl #2 - - qadd8 r11, r11, r7 ; u = vp8_signed_char_clamp(ps1 + vp8_filter) - qsub8 r10, r10,r7 ; u = vp8_signed_char_clamp(qs1 - vp8_filter) - - eor r11, r11, r12 ; *op1 = u^0x80 - str r11, [src], pstep ; store op1 - eor r9, r9, r12 ; *op0 = u^0x80 - str r9, [src], pstep ; store op0 result - eor r8, r8, r12 ; *oq0 = u^0x80 - str r8, [src], pstep ; store oq0 result - eor r10, r10, r12 ; *oq1 = u^0x80 - str r10, [src], pstep ; store oq1 - - sub src, src, pstep, lsl #1 - -|hskip_filter| - add src, src, #4 - sub src, src, pstep, lsl #2 - - subs count, count, #1 - - ldrne r9, [src], pstep ; p3 - ldrne r10, [src], pstep ; p2 - ldrne r11, [src], pstep ; p1 - - bne Hnext8 - - add sp, sp, #16 - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_loop_filter_horizontal_edge_armv6| - - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_mbloop_filter_horizontal_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - ldr count, [sp, #40] ; count for 8-in-parallel - ldr r6, [sp, #36] ; load thresh address - sub sp, sp, #16 ; create temp buffer - - ldr r9, [src], pstep ; p3 - ldrb r4, [r2] ; blimit - ldr r10, [src], pstep ; p2 - ldrb r2, [r3] ; limit - ldr r11, [src], pstep ; p1 - orr r4, r4, r4, lsl #8 - ldrb r3, [r6] ; thresh - orr r2, r2, r2, lsl #8 - mov count, count, lsl #1 ; 4-in-parallel - orr r4, r4, r4, lsl #16 - orr r3, r3, r3, lsl #8 - orr r2, r2, r2, lsl #16 - orr r3, r3, r3, lsl #16 - -|MBHnext8| - - ; vp8_filter_mask() function - ; calculate breakout conditions - ldr r12, [src], pstep ; p0 - - uqsub8 r6, r9, r10 ; p3 - p2 - uqsub8 r7, r10, r9 ; p2 - p3 - uqsub8 r8, r10, r11 ; p2 - p1 - uqsub8 r10, r11, r10 ; p1 - p2 - - orr r6, r6, r7 ; abs (p3-p2) - orr r8, r8, r10 ; abs (p2-p1) - uqsub8 lr, r6, r2 ; compare to limit. lr: vp8_filter_mask - uqsub8 r8, r8, r2 ; compare to limit - - uqsub8 r6, r11, r12 ; p1 - p0 - orr lr, lr, r8 - uqsub8 r7, r12, r11 ; p0 - p1 - ldr r9, [src], pstep ; q0 - ldr r10, [src], pstep ; q1 - orr r6, r6, r7 ; abs (p1-p0) - uqsub8 r7, r6, r2 ; compare to limit - uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later - orr lr, lr, r7 - - uqsub8 r6, r11, r10 ; p1 - q1 - uqsub8 r7, r10, r11 ; q1 - p1 - uqsub8 r11, r12, r9 ; p0 - q0 - uqsub8 r12, r9, r12 ; q0 - p0 - orr r6, r6, r7 ; abs (p1-q1) - ldr r7, c0x7F7F7F7F - orr r12, r11, r12 ; abs (p0-q0) - ldr r11, [src], pstep ; q2 - uqadd8 r12, r12, r12 ; abs (p0-q0) * 2 - and r6, r7, r6, lsr #1 ; abs (p1-q1) / 2 - uqsub8 r7, r9, r10 ; q0 - q1 - uqadd8 r12, r12, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2 - uqsub8 r6, r10, r9 ; q1 - q0 - uqsub8 r12, r12, r4 ; compare to flimit - uqsub8 r9, r11, r10 ; q2 - q1 - - orr lr, lr, r12 - - ldr r12, [src], pstep ; q3 - - uqsub8 r10, r10, r11 ; q1 - q2 - orr r6, r7, r6 ; abs (q1-q0) - orr r10, r9, r10 ; abs (q2-q1) - uqsub8 r7, r6, r2 ; compare to limit - uqsub8 r10, r10, r2 ; compare to limit - uqsub8 r6, r6, r3 ; compare to thresh -- save r6 for later - orr lr, lr, r7 - orr lr, lr, r10 - - uqsub8 r10, r12, r11 ; q3 - q2 - uqsub8 r9, r11, r12 ; q2 - q3 - - mvn r11, #0 ; r11 == -1 - - orr r10, r10, r9 ; abs (q3-q2) - uqsub8 r10, r10, r2 ; compare to limit - - mov r12, #0 - - orr lr, lr, r10 - - usub8 lr, r12, lr ; use usub8 instead of ssub8 - sel lr, r11, r12 ; filter mask: lr - - cmp lr, #0 - beq mbhskip_filter ; skip filtering - - ;vp8_hevmask() function - ;calculate high edge variance - sub src, src, pstep, lsl #2 ; move src pointer down by 6 lines - sub src, src, pstep, lsl #1 - - orr r10, r6, r8 - ldr r7, [src], pstep ; p1 - - usub8 r10, r12, r10 - sel r6, r12, r11 ; hev mask: r6 - - ;vp8_mbfilter() function - ;p2, q2 are only needed at the end. Don't need to load them in now. - ldr r8, [src], pstep ; p0 - ldr r12, c0x80808080 - ldr r9, [src], pstep ; q0 - ldr r10, [src] ; q1 - - eor r7, r7, r12 ; ps1 - eor r8, r8, r12 ; ps0 - eor r9, r9, r12 ; qs0 - eor r10, r10, r12 ; qs1 - - qsub8 r12, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0)) - str r7, [sp, #12] ; store ps1 temporarily - qsub8 r7, r7, r10 ; vp8_signed_char_clamp(ps1-qs1) - str r10, [sp, #8] ; store qs1 temporarily - qadd8 r7, r7, r12 - str r9, [sp] ; store qs0 temporarily - qadd8 r7, r7, r12 - str r8, [sp, #4] ; store ps0 temporarily - qadd8 r7, r7, r12 ; vp8_filter: r7 - - ldr r10, c0x03030303 ; r10 = 3 --modified for vp8 - ldr r9, c0x04040404 - - and r7, r7, lr ; vp8_filter &= mask (lr is free) - - mov r12, r7 ; Filter2: r12 - and r12, r12, r6 ; Filter2 &= hev - - ;modify code for vp8 - ;save bottom 3 bits so that we round one side +4 and the other +3 - qadd8 r8 , r12 , r9 ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4) - qadd8 r12 , r12 , r10 ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3) - - mov r10, #0 - shadd8 r8 , r8 , r10 ; Filter1 >>= 3 - shadd8 r12 , r12 , r10 ; Filter2 >>= 3 - shadd8 r8 , r8 , r10 - shadd8 r12 , r12 , r10 - shadd8 r8 , r8 , r10 ; r8: Filter1 - shadd8 r12 , r12 , r10 ; r12: Filter2 - - ldr r9, [sp] ; load qs0 - ldr r11, [sp, #4] ; load ps0 - - qsub8 r9 , r9, r8 ; qs0 = vp8_signed_char_clamp(qs0 - Filter1) - qadd8 r11, r11, r12 ; ps0 = vp8_signed_char_clamp(ps0 + Filter2) - - ;save bottom 3 bits so that we round one side +4 and the other +3 - ;and r8, r12, r10 ; s = Filter2 & 7 (s: r8) - ;qadd8 r12 , r12 , r9 ; Filter2 = vp8_signed_char_clamp(Filter2+4) - ;mov r10, #0 - ;shadd8 r12 , r12 , r10 ; Filter2 >>= 3 - ;usub8 lr, r8, r9 ; s = (s==4)*-1 - ;sel lr, r11, r10 - ;shadd8 r12 , r12 , r10 - ;usub8 r8, r9, r8 - ;sel r8, r11, r10 - ;ldr r9, [sp] ; load qs0 - ;ldr r11, [sp, #4] ; load ps0 - ;shadd8 r12 , r12 , r10 - ;and r8, r8, lr ; -1 for each element that equals 4 - ;qadd8 r10, r8, r12 ; u = vp8_signed_char_clamp(s + Filter2) - ;qsub8 r9 , r9, r12 ; qs0 = vp8_signed_char_clamp(qs0 - Filter2) - ;qadd8 r11, r11, r10 ; ps0 = vp8_signed_char_clamp(ps0 + u) - - ;end of modification for vp8 - - bic r12, r7, r6 ; vp8_filter &= ~hev ( r6 is free) - ;mov r12, r7 - - ;roughly 3/7th difference across boundary - mov lr, #0x1b ; 27 - mov r7, #0x3f ; 63 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r7, r10, lr, r7 - smultb r10, r10, lr - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - add r10, r10, #63 - ssat r7, #8, r7, asr #7 - ssat r10, #8, r10, asr #7 - - ldr lr, c0x80808080 - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r7, r10, lsl #16 - uxtb16 r6, r6 - uxtb16 r10, r10 - - sub src, src, pstep - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7) - - qsub8 r8, r9, r10 ; s = vp8_signed_char_clamp(qs0 - u) - qadd8 r10, r11, r10 ; s = vp8_signed_char_clamp(ps0 + u) - eor r8, r8, lr ; *oq0 = s^0x80 - str r8, [src] ; store *oq0 - sub src, src, pstep - eor r10, r10, lr ; *op0 = s^0x80 - str r10, [src] ; store *op0 - - ;roughly 2/7th difference across boundary - mov lr, #0x12 ; 18 - mov r7, #0x3f ; 63 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r9, r10, lr, r7 - smlatb r10, r10, lr, r7 - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - ssat r9, #8, r9, asr #7 - ssat r10, #8, r10, asr #7 - - ldr lr, c0x80808080 - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r9, r10, lsl #16 - - ldr r9, [sp, #8] ; load qs1 - ldr r11, [sp, #12] ; load ps1 - - uxtb16 r6, r6 - uxtb16 r10, r10 - - sub src, src, pstep - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 18)>>7) - - qadd8 r11, r11, r10 ; s = vp8_signed_char_clamp(ps1 + u) - qsub8 r8, r9, r10 ; s = vp8_signed_char_clamp(qs1 - u) - eor r11, r11, lr ; *op1 = s^0x80 - str r11, [src], pstep ; store *op1 - eor r8, r8, lr ; *oq1 = s^0x80 - add src, src, pstep, lsl #1 - - mov r7, #0x3f ; 63 - - str r8, [src], pstep ; store *oq1 - - ;roughly 1/7th difference across boundary - mov lr, #0x9 ; 9 - ldr r9, [src] ; load q2 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r12, r10, lr, r7 - smlatb r10, r10, lr, r7 - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - ssat r12, #8, r12, asr #7 - ssat r10, #8, r10, asr #7 - - sub src, src, pstep, lsl #2 - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r12, r10, lsl #16 - - sub src, src, pstep - ldr lr, c0x80808080 - - ldr r11, [src] ; load p2 - - uxtb16 r6, r6 - uxtb16 r10, r10 - - eor r9, r9, lr - eor r11, r11, lr - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 9)>>7) - - qadd8 r8, r11, r10 ; s = vp8_signed_char_clamp(ps2 + u) - qsub8 r10, r9, r10 ; s = vp8_signed_char_clamp(qs2 - u) - eor r8, r8, lr ; *op2 = s^0x80 - str r8, [src], pstep, lsl #2 ; store *op2 - add src, src, pstep - eor r10, r10, lr ; *oq2 = s^0x80 - str r10, [src], pstep, lsl #1 ; store *oq2 - -|mbhskip_filter| - add src, src, #4 - sub src, src, pstep, lsl #3 - subs count, count, #1 - - ldrne r9, [src], pstep ; p3 - ldrne r10, [src], pstep ; p2 - ldrne r11, [src], pstep ; p1 - - bne MBHnext8 - - add sp, sp, #16 - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_mbloop_filter_horizontal_edge_armv6| - - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_loop_filter_vertical_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - sub src, src, #4 ; move src pointer down by 4 - ldr count, [sp, #40] ; count for 8-in-parallel - ldr r12, [sp, #36] ; load thresh address - sub sp, sp, #16 ; create temp buffer - - ldr r6, [src], pstep ; load source data - ldrb r4, [r2] ; blimit - ldr r7, [src], pstep - ldrb r2, [r3] ; limit - ldr r8, [src], pstep - orr r4, r4, r4, lsl #8 - ldrb r3, [r12] ; thresh - orr r2, r2, r2, lsl #8 - ldr lr, [src], pstep - mov count, count, lsl #1 ; 4-in-parallel - orr r4, r4, r4, lsl #16 - orr r3, r3, r3, lsl #8 - orr r2, r2, r2, lsl #16 - orr r3, r3, r3, lsl #16 - -|Vnext8| - - ; vp8_filter_mask() function - ; calculate breakout conditions - ; transpose the source data for 4-in-parallel operation - TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12 - - uqsub8 r7, r9, r10 ; p3 - p2 - uqsub8 r8, r10, r9 ; p2 - p3 - uqsub8 r9, r10, r11 ; p2 - p1 - uqsub8 r10, r11, r10 ; p1 - p2 - orr r7, r7, r8 ; abs (p3-p2) - orr r10, r9, r10 ; abs (p2-p1) - uqsub8 lr, r7, r2 ; compare to limit. lr: vp8_filter_mask - uqsub8 r10, r10, r2 ; compare to limit - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - orr lr, lr, r10 - - uqsub8 r6, r11, r12 ; p1 - p0 - uqsub8 r7, r12, r11 ; p0 - p1 - add src, src, #4 ; move src pointer up by 4 - orr r6, r6, r7 ; abs (p1-p0) - str r11, [sp, #12] ; save p1 - uqsub8 r10, r6, r2 ; compare to limit - uqsub8 r11, r6, r3 ; compare to thresh - orr lr, lr, r10 - - ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now - ; transpose the source data for 4-in-parallel operation - ldr r6, [src], pstep ; load source data - str r11, [sp] ; push r11 to stack - ldr r7, [src], pstep - str r12, [sp, #4] ; save current reg before load q0 - q3 data - ldr r8, [src], pstep - str lr, [sp, #8] - ldr lr, [src], pstep - - TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12 - - ldr lr, [sp, #8] ; load back (f)limit accumulator - - uqsub8 r6, r12, r11 ; q3 - q2 - uqsub8 r7, r11, r12 ; q2 - q3 - uqsub8 r12, r11, r10 ; q2 - q1 - uqsub8 r11, r10, r11 ; q1 - q2 - orr r6, r6, r7 ; abs (q3-q2) - orr r7, r12, r11 ; abs (q2-q1) - uqsub8 r6, r6, r2 ; compare to limit - uqsub8 r7, r7, r2 ; compare to limit - ldr r11, [sp, #4] ; load back p0 - ldr r12, [sp, #12] ; load back p1 - orr lr, lr, r6 - orr lr, lr, r7 - - uqsub8 r6, r11, r9 ; p0 - q0 - uqsub8 r7, r9, r11 ; q0 - p0 - uqsub8 r8, r12, r10 ; p1 - q1 - uqsub8 r11, r10, r12 ; q1 - p1 - orr r6, r6, r7 ; abs (p0-q0) - ldr r7, c0x7F7F7F7F - orr r8, r8, r11 ; abs (p1-q1) - uqadd8 r6, r6, r6 ; abs (p0-q0) * 2 - and r8, r7, r8, lsr #1 ; abs (p1-q1) / 2 - uqsub8 r11, r10, r9 ; q1 - q0 - uqadd8 r6, r8, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2 - uqsub8 r12, r9, r10 ; q0 - q1 - uqsub8 r6, r6, r4 ; compare to flimit - - orr r9, r11, r12 ; abs (q1-q0) - uqsub8 r8, r9, r2 ; compare to limit - uqsub8 r10, r9, r3 ; compare to thresh - orr lr, lr, r6 - orr lr, lr, r8 - - mvn r11, #0 ; r11 == -1 - mov r12, #0 - - usub8 lr, r12, lr - ldr r9, [sp] ; load the compared result - sel lr, r11, r12 ; filter mask: lr - - cmp lr, #0 - beq vskip_filter ; skip filtering - - ;vp8_hevmask() function - ;calculate high edge variance - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - orr r9, r9, r10 - - ldrh r7, [src, #-2] - ldrh r8, [src], pstep - - usub8 r9, r12, r9 - sel r6, r12, r11 ; hev mask: r6 - - ;vp8_filter() function - ; load soure data to r6, r11, r12, lr - ldrh r9, [src, #-2] - ldrh r10, [src], pstep - - pkhbt r12, r7, r8, lsl #16 - - ldrh r7, [src, #-2] - ldrh r8, [src], pstep - - pkhbt r11, r9, r10, lsl #16 - - ldrh r9, [src, #-2] - ldrh r10, [src], pstep - - ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first - str r6, [sp] - str lr, [sp, #4] - - pkhbt r6, r7, r8, lsl #16 - pkhbt lr, r9, r10, lsl #16 - - ;transpose r12, r11, r6, lr to r7, r8, r9, r10 - TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10 - - ;load back hev_mask r6 and filter_mask lr - ldr r12, c0x80808080 - ldr r6, [sp] - ldr lr, [sp, #4] - - eor r7, r7, r12 ; p1 offset to convert to a signed value - eor r8, r8, r12 ; p0 offset to convert to a signed value - eor r9, r9, r12 ; q0 offset to convert to a signed value - eor r10, r10, r12 ; q1 offset to convert to a signed value - - str r9, [sp] ; store qs0 temporarily - str r8, [sp, #4] ; store ps0 temporarily - str r10, [sp, #8] ; store qs1 temporarily - str r7, [sp, #12] ; store ps1 temporarily - - qsub8 r7, r7, r10 ; vp8_signed_char_clamp(ps1-qs1) - qsub8 r8, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0)) - - and r7, r7, r6 ; vp8_filter (r7) &= hev (r7 : filter) - - qadd8 r7, r7, r8 - ldr r9, c0x03030303 ; r9 = 3 --modified for vp8 - - qadd8 r7, r7, r8 - ldr r10, c0x04040404 - - qadd8 r7, r7, r8 - ;mvn r11, #0 ; r11 == -1 - - and r7, r7, lr ; vp8_filter &= mask - - ;modify code for vp8 -- Filter1 = vp8_filter (r7) - qadd8 r8 , r7 , r9 ; Filter2 (r8) = vp8_signed_char_clamp(vp8_filter+3) - qadd8 r7 , r7 , r10 ; vp8_filter = vp8_signed_char_clamp(vp8_filter+4) - - mov r9, #0 - shadd8 r8 , r8 , r9 ; Filter2 >>= 3 - shadd8 r7 , r7 , r9 ; vp8_filter >>= 3 - shadd8 r8 , r8 , r9 - shadd8 r7 , r7 , r9 - shadd8 lr , r8 , r9 ; lr: filter2 - shadd8 r7 , r7 , r9 ; r7: filter - - ;usub8 lr, r8, r10 ; s = (s==4)*-1 - ;sel lr, r11, r9 - ;usub8 r8, r10, r8 - ;sel r8, r11, r9 - ;and r8, r8, lr ; -1 for each element that equals 4 -- r8: s - - ;calculate output - ;qadd8 lr, r8, r7 ; u = vp8_signed_char_clamp(s + vp8_filter) - - ldr r8, [sp] ; load qs0 - ldr r9, [sp, #4] ; load ps0 - - ldr r10, c0x01010101 - - qsub8 r8, r8, r7 ; u = vp8_signed_char_clamp(qs0 - vp8_filter) - qadd8 r9, r9, lr ; u = vp8_signed_char_clamp(ps0 + Filter2) - ;end of modification for vp8 - - eor r8, r8, r12 - eor r9, r9, r12 - - mov lr, #0 - - sadd8 r7, r7, r10 - shadd8 r7, r7, lr - - ldr r10, [sp, #8] ; load qs1 - ldr r11, [sp, #12] ; load ps1 - - bic r7, r7, r6 ; r7: vp8_filter - - qsub8 r10 , r10, r7 ; u = vp8_signed_char_clamp(qs1 - vp8_filter) - qadd8 r11, r11, r7 ; u = vp8_signed_char_clamp(ps1 + vp8_filter) - eor r10, r10, r12 - eor r11, r11, r12 - - sub src, src, pstep, lsl #2 - - ;we can use TRANSPOSE_MATRIX macro to transpose output - input: q1, q0, p0, p1 - ;output is b0, b1, b2, b3 - ;b0: 03 02 01 00 - ;b1: 13 12 11 10 - ;b2: 23 22 21 20 - ;b3: 33 32 31 30 - ; p1 p0 q0 q1 - ; (a3 a2 a1 a0) - TRANSPOSE_MATRIX r11, r9, r8, r10, r6, r7, r12, lr - - strh r6, [src, #-2] ; store the result - mov r6, r6, lsr #16 - strh r6, [src], pstep - - strh r7, [src, #-2] - mov r7, r7, lsr #16 - strh r7, [src], pstep - - strh r12, [src, #-2] - mov r12, r12, lsr #16 - strh r12, [src], pstep - - strh lr, [src, #-2] - mov lr, lr, lsr #16 - strh lr, [src], pstep - -|vskip_filter| - sub src, src, #4 - subs count, count, #1 - - ldrne r6, [src], pstep ; load source data - ldrne r7, [src], pstep - ldrne r8, [src], pstep - ldrne lr, [src], pstep - - bne Vnext8 - - add sp, sp, #16 - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_loop_filter_vertical_edge_armv6| - - - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_mbloop_filter_vertical_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - sub src, src, #4 ; move src pointer down by 4 - ldr count, [sp, #40] ; count for 8-in-parallel - ldr r12, [sp, #36] ; load thresh address - pld [src, #23] ; preload for next block - sub sp, sp, #16 ; create temp buffer - - ldr r6, [src], pstep ; load source data - ldrb r4, [r2] ; blimit - pld [src, #23] - ldr r7, [src], pstep - ldrb r2, [r3] ; limit - pld [src, #23] - ldr r8, [src], pstep - orr r4, r4, r4, lsl #8 - ldrb r3, [r12] ; thresh - orr r2, r2, r2, lsl #8 - pld [src, #23] - ldr lr, [src], pstep - mov count, count, lsl #1 ; 4-in-parallel - orr r4, r4, r4, lsl #16 - orr r3, r3, r3, lsl #8 - orr r2, r2, r2, lsl #16 - orr r3, r3, r3, lsl #16 - -|MBVnext8| - ; vp8_filter_mask() function - ; calculate breakout conditions - ; transpose the source data for 4-in-parallel operation - TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12 - - uqsub8 r7, r9, r10 ; p3 - p2 - uqsub8 r8, r10, r9 ; p2 - p3 - uqsub8 r9, r10, r11 ; p2 - p1 - uqsub8 r10, r11, r10 ; p1 - p2 - orr r7, r7, r8 ; abs (p3-p2) - orr r10, r9, r10 ; abs (p2-p1) - uqsub8 lr, r7, r2 ; compare to limit. lr: vp8_filter_mask - uqsub8 r10, r10, r2 ; compare to limit - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - orr lr, lr, r10 - - uqsub8 r6, r11, r12 ; p1 - p0 - uqsub8 r7, r12, r11 ; p0 - p1 - add src, src, #4 ; move src pointer up by 4 - orr r6, r6, r7 ; abs (p1-p0) - str r11, [sp, #12] ; save p1 - uqsub8 r10, r6, r2 ; compare to limit - uqsub8 r11, r6, r3 ; compare to thresh - orr lr, lr, r10 - - ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now - ; transpose the source data for 4-in-parallel operation - ldr r6, [src], pstep ; load source data - str r11, [sp] ; push r11 to stack - ldr r7, [src], pstep - str r12, [sp, #4] ; save current reg before load q0 - q3 data - ldr r8, [src], pstep - str lr, [sp, #8] - ldr lr, [src], pstep - - - TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12 - - ldr lr, [sp, #8] ; load back (f)limit accumulator - - uqsub8 r6, r12, r11 ; q3 - q2 - uqsub8 r7, r11, r12 ; q2 - q3 - uqsub8 r12, r11, r10 ; q2 - q1 - uqsub8 r11, r10, r11 ; q1 - q2 - orr r6, r6, r7 ; abs (q3-q2) - orr r7, r12, r11 ; abs (q2-q1) - uqsub8 r6, r6, r2 ; compare to limit - uqsub8 r7, r7, r2 ; compare to limit - ldr r11, [sp, #4] ; load back p0 - ldr r12, [sp, #12] ; load back p1 - orr lr, lr, r6 - orr lr, lr, r7 - - uqsub8 r6, r11, r9 ; p0 - q0 - uqsub8 r7, r9, r11 ; q0 - p0 - uqsub8 r8, r12, r10 ; p1 - q1 - uqsub8 r11, r10, r12 ; q1 - p1 - orr r6, r6, r7 ; abs (p0-q0) - ldr r7, c0x7F7F7F7F - orr r8, r8, r11 ; abs (p1-q1) - uqadd8 r6, r6, r6 ; abs (p0-q0) * 2 - and r8, r7, r8, lsr #1 ; abs (p1-q1) / 2 - uqsub8 r11, r10, r9 ; q1 - q0 - uqadd8 r6, r8, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2 - uqsub8 r12, r9, r10 ; q0 - q1 - uqsub8 r6, r6, r4 ; compare to flimit - - orr r9, r11, r12 ; abs (q1-q0) - uqsub8 r8, r9, r2 ; compare to limit - uqsub8 r10, r9, r3 ; compare to thresh - orr lr, lr, r6 - orr lr, lr, r8 - - mvn r11, #0 ; r11 == -1 - mov r12, #0 - - usub8 lr, r12, lr - ldr r9, [sp] ; load the compared result - sel lr, r11, r12 ; filter mask: lr - - cmp lr, #0 - beq mbvskip_filter ; skip filtering - - - - ;vp8_hevmask() function - ;calculate high edge variance - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - orr r9, r9, r10 - - ldrh r7, [src, #-2] - ldrh r8, [src], pstep - - usub8 r9, r12, r9 - sel r6, r12, r11 ; hev mask: r6 - - - ; vp8_mbfilter() function - ; p2, q2 are only needed at the end. Don't need to load them in now. - ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first - ; load soure data to r6, r11, r12, lr - ldrh r9, [src, #-2] - ldrh r10, [src], pstep - - pkhbt r12, r7, r8, lsl #16 - - ldrh r7, [src, #-2] - ldrh r8, [src], pstep - - pkhbt r11, r9, r10, lsl #16 - - ldrh r9, [src, #-2] - ldrh r10, [src], pstep - - str r6, [sp] ; save r6 - str lr, [sp, #4] ; save lr - - pkhbt r6, r7, r8, lsl #16 - pkhbt lr, r9, r10, lsl #16 - - ;transpose r12, r11, r6, lr to p1, p0, q0, q1 - TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10 - - ;load back hev_mask r6 and filter_mask lr - ldr r12, c0x80808080 - ldr r6, [sp] - ldr lr, [sp, #4] - - eor r7, r7, r12 ; ps1 - eor r8, r8, r12 ; ps0 - eor r9, r9, r12 ; qs0 - eor r10, r10, r12 ; qs1 - - qsub8 r12, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0)) - str r7, [sp, #12] ; store ps1 temporarily - qsub8 r7, r7, r10 ; vp8_signed_char_clamp(ps1-qs1) - str r10, [sp, #8] ; store qs1 temporarily - qadd8 r7, r7, r12 - str r9, [sp] ; store qs0 temporarily - qadd8 r7, r7, r12 - str r8, [sp, #4] ; store ps0 temporarily - qadd8 r7, r7, r12 ; vp8_filter: r7 - - ldr r10, c0x03030303 ; r10 = 3 --modified for vp8 - ldr r9, c0x04040404 - ;mvn r11, #0 ; r11 == -1 - - and r7, r7, lr ; vp8_filter &= mask (lr is free) - - mov r12, r7 ; Filter2: r12 - and r12, r12, r6 ; Filter2 &= hev - - ;modify code for vp8 - ;save bottom 3 bits so that we round one side +4 and the other +3 - qadd8 r8 , r12 , r9 ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4) - qadd8 r12 , r12 , r10 ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3) - - mov r10, #0 - shadd8 r8 , r8 , r10 ; Filter1 >>= 3 - shadd8 r12 , r12 , r10 ; Filter2 >>= 3 - shadd8 r8 , r8 , r10 - shadd8 r12 , r12 , r10 - shadd8 r8 , r8 , r10 ; r8: Filter1 - shadd8 r12 , r12 , r10 ; r12: Filter2 - - ldr r9, [sp] ; load qs0 - ldr r11, [sp, #4] ; load ps0 - - qsub8 r9 , r9, r8 ; qs0 = vp8_signed_char_clamp(qs0 - Filter1) - qadd8 r11, r11, r12 ; ps0 = vp8_signed_char_clamp(ps0 + Filter2) - - ;save bottom 3 bits so that we round one side +4 and the other +3 - ;and r8, r12, r10 ; s = Filter2 & 7 (s: r8) - ;qadd8 r12 , r12 , r9 ; Filter2 = vp8_signed_char_clamp(Filter2+4) - ;mov r10, #0 - ;shadd8 r12 , r12 , r10 ; Filter2 >>= 3 - ;usub8 lr, r8, r9 ; s = (s==4)*-1 - ;sel lr, r11, r10 - ;shadd8 r12 , r12 , r10 - ;usub8 r8, r9, r8 - ;sel r8, r11, r10 - ;ldr r9, [sp] ; load qs0 - ;ldr r11, [sp, #4] ; load ps0 - ;shadd8 r12 , r12 , r10 - ;and r8, r8, lr ; -1 for each element that equals 4 - ;qadd8 r10, r8, r12 ; u = vp8_signed_char_clamp(s + Filter2) - ;qsub8 r9 , r9, r12 ; qs0 = vp8_signed_char_clamp(qs0 - Filter2) - ;qadd8 r11, r11, r10 ; ps0 = vp8_signed_char_clamp(ps0 + u) - - ;end of modification for vp8 - - bic r12, r7, r6 ;vp8_filter &= ~hev ( r6 is free) - ;mov r12, r7 - - ;roughly 3/7th difference across boundary - mov lr, #0x1b ; 27 - mov r7, #0x3f ; 63 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r7, r10, lr, r7 - smultb r10, r10, lr - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - add r10, r10, #63 - ssat r7, #8, r7, asr #7 - ssat r10, #8, r10, asr #7 - - ldr lr, c0x80808080 - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r7, r10, lsl #16 - uxtb16 r6, r6 - uxtb16 r10, r10 - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7) - - qsub8 r8, r9, r10 ; s = vp8_signed_char_clamp(qs0 - u) - qadd8 r10, r11, r10 ; s = vp8_signed_char_clamp(ps0 + u) - eor r8, r8, lr ; *oq0 = s^0x80 - eor r10, r10, lr ; *op0 = s^0x80 - - strb r10, [src, #-1] ; store op0 result - strb r8, [src], pstep ; store oq0 result - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - strb r10, [src, #-1] - strb r8, [src], pstep - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - strb r10, [src, #-1] - strb r8, [src], pstep - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - strb r10, [src, #-1] - strb r8, [src], pstep - - ;roughly 2/7th difference across boundary - mov lr, #0x12 ; 18 - mov r7, #0x3f ; 63 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r9, r10, lr, r7 - - smlatb r10, r10, lr, r7 - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - ssat r9, #8, r9, asr #7 - ssat r10, #8, r10, asr #7 - - sub src, src, pstep, lsl #2 ; move src pointer down by 4 lines - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r9, r10, lsl #16 - - ldr r9, [sp, #8] ; load qs1 - ldr r11, [sp, #12] ; load ps1 - ldr lr, c0x80808080 - - uxtb16 r6, r6 - uxtb16 r10, r10 - - add src, src, #2 - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 18)>>7) - - qsub8 r8, r9, r10 ; s = vp8_signed_char_clamp(qs1 - u) - qadd8 r10, r11, r10 ; s = vp8_signed_char_clamp(ps1 + u) - eor r8, r8, lr ; *oq1 = s^0x80 - eor r10, r10, lr ; *op1 = s^0x80 - - ldrb r11, [src, #-5] ; load p2 for 1/7th difference across boundary - strb r10, [src, #-4] ; store op1 - strb r8, [src, #-1] ; store oq1 - ldrb r9, [src], pstep ; load q2 for 1/7th difference across boundary - - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - - ldrb r6, [src, #-5] - strb r10, [src, #-4] - strb r8, [src, #-1] - ldrb r7, [src], pstep - - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - orr r11, r11, r6, lsl #8 - orr r9, r9, r7, lsl #8 - - ldrb r6, [src, #-5] - strb r10, [src, #-4] - strb r8, [src, #-1] - ldrb r7, [src], pstep - - mov r10, r10, lsr #8 - mov r8, r8, lsr #8 - orr r11, r11, r6, lsl #16 - orr r9, r9, r7, lsl #16 - - ldrb r6, [src, #-5] - strb r10, [src, #-4] - strb r8, [src, #-1] - ldrb r7, [src], pstep - orr r11, r11, r6, lsl #24 - orr r9, r9, r7, lsl #24 - - ;roughly 1/7th difference across boundary - eor r9, r9, lr - eor r11, r11, lr - - mov lr, #0x9 ; 9 - mov r7, #0x3f ; 63 - - sxtb16 r6, r12 - sxtb16 r10, r12, ror #8 - smlabb r8, r6, lr, r7 - smlatb r6, r6, lr, r7 - smlabb r12, r10, lr, r7 - smlatb r10, r10, lr, r7 - ssat r8, #8, r8, asr #7 - ssat r6, #8, r6, asr #7 - ssat r12, #8, r12, asr #7 - ssat r10, #8, r10, asr #7 - - sub src, src, pstep, lsl #2 - - pkhbt r6, r8, r6, lsl #16 - pkhbt r10, r12, r10, lsl #16 - - uxtb16 r6, r6 - uxtb16 r10, r10 - - ldr lr, c0x80808080 - - orr r10, r6, r10, lsl #8 ; u = vp8_signed_char_clamp((63 + Filter2 * 9)>>7) - - qadd8 r8, r11, r10 ; s = vp8_signed_char_clamp(ps2 + u) - qsub8 r10, r9, r10 ; s = vp8_signed_char_clamp(qs2 - u) - eor r8, r8, lr ; *op2 = s^0x80 - eor r10, r10, lr ; *oq2 = s^0x80 - - strb r8, [src, #-5] ; store *op2 - strb r10, [src], pstep ; store *oq2 - mov r8, r8, lsr #8 - mov r10, r10, lsr #8 - strb r8, [src, #-5] - strb r10, [src], pstep - mov r8, r8, lsr #8 - mov r10, r10, lsr #8 - strb r8, [src, #-5] - strb r10, [src], pstep - mov r8, r8, lsr #8 - mov r10, r10, lsr #8 - strb r8, [src, #-5] - strb r10, [src], pstep - - ;adjust src pointer for next loop - sub src, src, #2 - -|mbvskip_filter| - sub src, src, #4 - subs count, count, #1 - - pld [src, #23] ; preload for next block - ldrne r6, [src], pstep ; load source data - pld [src, #23] - ldrne r7, [src], pstep - pld [src, #23] - ldrne r8, [src], pstep - pld [src, #23] - ldrne lr, [src], pstep - - bne MBVnext8 - - add sp, sp, #16 - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_mbloop_filter_vertical_edge_armv6| - -; Constant Pool -c0x80808080 DCD 0x80808080 -c0x03030303 DCD 0x03030303 -c0x04040404 DCD 0x04040404 -c0x01010101 DCD 0x01010101 -c0x7F7F7F7F DCD 0x7F7F7F7F - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/simpleloopfilter_v6.asm b/libs/libvpx/vp8/common/arm/armv6/simpleloopfilter_v6.asm deleted file mode 100644 index 5e00cf01bb..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/simpleloopfilter_v6.asm +++ /dev/null @@ -1,286 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_loop_filter_simple_horizontal_edge_armv6| - EXPORT |vp8_loop_filter_simple_vertical_edge_armv6| - - AREA |.text|, CODE, READONLY ; name this block of code - - MACRO - TRANSPOSE_MATRIX $a0, $a1, $a2, $a3, $b0, $b1, $b2, $b3 - ; input: $a0, $a1, $a2, $a3; output: $b0, $b1, $b2, $b3 - ; a0: 03 02 01 00 - ; a1: 13 12 11 10 - ; a2: 23 22 21 20 - ; a3: 33 32 31 30 - ; b3 b2 b1 b0 - - uxtb16 $b1, $a1 ; xx 12 xx 10 - uxtb16 $b0, $a0 ; xx 02 xx 00 - uxtb16 $b3, $a3 ; xx 32 xx 30 - uxtb16 $b2, $a2 ; xx 22 xx 20 - orr $b1, $b0, $b1, lsl #8 ; 12 02 10 00 - orr $b3, $b2, $b3, lsl #8 ; 32 22 30 20 - - uxtb16 $a1, $a1, ror #8 ; xx 13 xx 11 - uxtb16 $a3, $a3, ror #8 ; xx 33 xx 31 - uxtb16 $a0, $a0, ror #8 ; xx 03 xx 01 - uxtb16 $a2, $a2, ror #8 ; xx 23 xx 21 - orr $a0, $a0, $a1, lsl #8 ; 13 03 11 01 - orr $a2, $a2, $a3, lsl #8 ; 33 23 31 21 - - pkhtb $b2, $b3, $b1, asr #16 ; 32 22 12 02 -- p1 - pkhbt $b0, $b1, $b3, lsl #16 ; 30 20 10 00 -- p3 - - pkhtb $b3, $a2, $a0, asr #16 ; 33 23 13 03 -- p0 - pkhbt $b1, $a0, $a2, lsl #16 ; 31 21 11 01 -- p2 - MEND - - - -src RN r0 -pstep RN r1 - -;r0 unsigned char *src_ptr, -;r1 int src_pixel_step, -;r2 const char *blimit - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_loop_filter_simple_horizontal_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - ldrb r12, [r2] ; blimit - ldr r3, [src, -pstep, lsl #1] ; p1 - ldr r4, [src, -pstep] ; p0 - ldr r5, [src] ; q0 - ldr r6, [src, pstep] ; q1 - orr r12, r12, r12, lsl #8 ; blimit - ldr r2, c0x80808080 - orr r12, r12, r12, lsl #16 ; blimit - mov r9, #4 ; double the count. we're doing 4 at a time - mov lr, #0 ; need 0 in a couple places - -|simple_hnext8| - ; vp8_simple_filter_mask() - - uqsub8 r7, r3, r6 ; p1 - q1 - uqsub8 r8, r6, r3 ; q1 - p1 - uqsub8 r10, r4, r5 ; p0 - q0 - uqsub8 r11, r5, r4 ; q0 - p0 - orr r8, r8, r7 ; abs(p1 - q1) - orr r10, r10, r11 ; abs(p0 - q0) - uqadd8 r10, r10, r10 ; abs(p0 - q0) * 2 - uhadd8 r8, r8, lr ; abs(p1 - q2) >> 1 - uqadd8 r10, r10, r8 ; abs(p0 - q0)*2 + abs(p1 - q1)/2 - mvn r8, #0 - usub8 r10, r12, r10 ; compare to flimit. usub8 sets GE flags - sel r10, r8, lr ; filter mask: F or 0 - cmp r10, #0 - beq simple_hskip_filter ; skip filtering if all masks are 0x00 - - ;vp8_simple_filter() - - eor r3, r3, r2 ; p1 offset to convert to a signed value - eor r6, r6, r2 ; q1 offset to convert to a signed value - eor r4, r4, r2 ; p0 offset to convert to a signed value - eor r5, r5, r2 ; q0 offset to convert to a signed value - - qsub8 r3, r3, r6 ; vp8_filter = p1 - q1 - qsub8 r6, r5, r4 ; q0 - p0 - qadd8 r3, r3, r6 ; += q0 - p0 - ldr r7, c0x04040404 - qadd8 r3, r3, r6 ; += q0 - p0 - ldr r8, c0x03030303 - qadd8 r3, r3, r6 ; vp8_filter = p1-q1 + 3*(q0-p0)) - ;STALL - and r3, r3, r10 ; vp8_filter &= mask - - qadd8 r7 , r3 , r7 ; Filter1 = vp8_filter + 4 - qadd8 r8 , r3 , r8 ; Filter2 = vp8_filter + 3 - - shadd8 r7 , r7 , lr - shadd8 r8 , r8 , lr - shadd8 r7 , r7 , lr - shadd8 r8 , r8 , lr - shadd8 r7 , r7 , lr ; Filter1 >>= 3 - shadd8 r8 , r8 , lr ; Filter2 >>= 3 - - qsub8 r5 ,r5, r7 ; u = q0 - Filter1 - qadd8 r4, r4, r8 ; u = p0 + Filter2 - eor r5, r5, r2 ; *oq0 = u^0x80 - str r5, [src] ; store oq0 result - eor r4, r4, r2 ; *op0 = u^0x80 - str r4, [src, -pstep] ; store op0 result - -|simple_hskip_filter| - subs r9, r9, #1 - addne src, src, #4 ; next row - - ldrne r3, [src, -pstep, lsl #1] ; p1 - ldrne r4, [src, -pstep] ; p0 - ldrne r5, [src] ; q0 - ldrne r6, [src, pstep] ; q1 - - bne simple_hnext8 - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_loop_filter_simple_horizontal_edge_armv6| - - -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -|vp8_loop_filter_simple_vertical_edge_armv6| PROC -;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - stmdb sp!, {r4 - r11, lr} - - ldrb r12, [r2] ; r12: blimit - ldr r2, c0x80808080 - orr r12, r12, r12, lsl #8 - - ; load soure data to r7, r8, r9, r10 - ldrh r3, [src, #-2] - pld [src, #23] ; preload for next block - ldrh r4, [src], pstep - orr r12, r12, r12, lsl #16 - - ldrh r5, [src, #-2] - pld [src, #23] - ldrh r6, [src], pstep - - pkhbt r7, r3, r4, lsl #16 - - ldrh r3, [src, #-2] - pld [src, #23] - ldrh r4, [src], pstep - - pkhbt r8, r5, r6, lsl #16 - - ldrh r5, [src, #-2] - pld [src, #23] - ldrh r6, [src], pstep - mov r11, #4 ; double the count. we're doing 4 at a time - -|simple_vnext8| - ; vp8_simple_filter_mask() function - pkhbt r9, r3, r4, lsl #16 - pkhbt r10, r5, r6, lsl #16 - - ;transpose r7, r8, r9, r10 to r3, r4, r5, r6 - TRANSPOSE_MATRIX r7, r8, r9, r10, r3, r4, r5, r6 - - uqsub8 r7, r3, r6 ; p1 - q1 - uqsub8 r8, r6, r3 ; q1 - p1 - uqsub8 r9, r4, r5 ; p0 - q0 - uqsub8 r10, r5, r4 ; q0 - p0 - orr r7, r7, r8 ; abs(p1 - q1) - orr r9, r9, r10 ; abs(p0 - q0) - mov r8, #0 - uqadd8 r9, r9, r9 ; abs(p0 - q0) * 2 - uhadd8 r7, r7, r8 ; abs(p1 - q1) / 2 - uqadd8 r7, r7, r9 ; abs(p0 - q0)*2 + abs(p1 - q1)/2 - mvn r10, #0 ; r10 == -1 - - usub8 r7, r12, r7 ; compare to flimit - sel lr, r10, r8 ; filter mask - - cmp lr, #0 - beq simple_vskip_filter ; skip filtering - - ;vp8_simple_filter() function - eor r3, r3, r2 ; p1 offset to convert to a signed value - eor r6, r6, r2 ; q1 offset to convert to a signed value - eor r4, r4, r2 ; p0 offset to convert to a signed value - eor r5, r5, r2 ; q0 offset to convert to a signed value - - qsub8 r3, r3, r6 ; vp8_filter = p1 - q1 - qsub8 r6, r5, r4 ; q0 - p0 - - qadd8 r3, r3, r6 ; vp8_filter += q0 - p0 - ldr r9, c0x03030303 ; r9 = 3 - - qadd8 r3, r3, r6 ; vp8_filter += q0 - p0 - ldr r7, c0x04040404 - - qadd8 r3, r3, r6 ; vp8_filter = p1-q1 + 3*(q0-p0)) - ;STALL - and r3, r3, lr ; vp8_filter &= mask - - qadd8 r9 , r3 , r9 ; Filter2 = vp8_filter + 3 - qadd8 r3 , r3 , r7 ; Filter1 = vp8_filter + 4 - - shadd8 r9 , r9 , r8 - shadd8 r3 , r3 , r8 - shadd8 r9 , r9 , r8 - shadd8 r3 , r3 , r8 - shadd8 r9 , r9 , r8 ; Filter2 >>= 3 - shadd8 r3 , r3 , r8 ; Filter1 >>= 3 - - ;calculate output - sub src, src, pstep, lsl #2 - - qadd8 r4, r4, r9 ; u = p0 + Filter2 - qsub8 r5, r5, r3 ; u = q0 - Filter1 - eor r4, r4, r2 ; *op0 = u^0x80 - eor r5, r5, r2 ; *oq0 = u^0x80 - - strb r4, [src, #-1] ; store the result - mov r4, r4, lsr #8 - strb r5, [src], pstep - mov r5, r5, lsr #8 - - strb r4, [src, #-1] - mov r4, r4, lsr #8 - strb r5, [src], pstep - mov r5, r5, lsr #8 - - strb r4, [src, #-1] - mov r4, r4, lsr #8 - strb r5, [src], pstep - mov r5, r5, lsr #8 - - strb r4, [src, #-1] - strb r5, [src], pstep - -|simple_vskip_filter| - subs r11, r11, #1 - - ; load soure data to r7, r8, r9, r10 - ldrneh r3, [src, #-2] - pld [src, #23] ; preload for next block - ldrneh r4, [src], pstep - - ldrneh r5, [src, #-2] - pld [src, #23] - ldrneh r6, [src], pstep - - pkhbt r7, r3, r4, lsl #16 - - ldrneh r3, [src, #-2] - pld [src, #23] - ldrneh r4, [src], pstep - - pkhbt r8, r5, r6, lsl #16 - - ldrneh r5, [src, #-2] - pld [src, #23] - ldrneh r6, [src], pstep - - bne simple_vnext8 - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_loop_filter_simple_vertical_edge_armv6| - -; Constant Pool -c0x80808080 DCD 0x80808080 -c0x03030303 DCD 0x03030303 -c0x04040404 DCD 0x04040404 - - END diff --git a/libs/libvpx/vp8/common/arm/armv6/sixtappredict8x4_v6.asm b/libs/libvpx/vp8/common/arm/armv6/sixtappredict8x4_v6.asm deleted file mode 100644 index e81aef53d5..0000000000 --- a/libs/libvpx/vp8/common/arm/armv6/sixtappredict8x4_v6.asm +++ /dev/null @@ -1,273 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vp8_sixtap_predict8x4_armv6| - - AREA |.text|, CODE, READONLY ; name this block of code -;------------------------------------- -; r0 unsigned char *src_ptr, -; r1 int src_pixels_per_line, -; r2 int xoffset, -; r3 int yoffset, -; stack unsigned char *dst_ptr, -; stack int dst_pitch -;------------------------------------- -;note: In first pass, store the result in transpose(8linesx9columns) on stack. Temporary stack size is 184. -;Line width is 20 that is 9 short data plus 2 to make it 4bytes aligned. In second pass, load data from stack, -;and the result is stored in transpose. -|vp8_sixtap_predict8x4_armv6| PROC - stmdb sp!, {r4 - r11, lr} - str r3, [sp, #-184]! ;reserve space on stack for temporary storage, store yoffset - - cmp r2, #0 ;skip first_pass filter if xoffset=0 - add lr, sp, #4 ;point to temporary buffer - beq skip_firstpass_filter - -;first-pass filter - adr r12, filter8_coeff - sub r0, r0, r1, lsl #1 - - add r3, r1, #10 ; preload next low - pld [r0, r3] - - add r2, r12, r2, lsl #4 ;calculate filter location - add r0, r0, #3 ;adjust src only for loading convinience - - ldr r3, [r2] ; load up packed filter coefficients - ldr r4, [r2, #4] - ldr r5, [r2, #8] - - mov r2, #0x90000 ; height=9 is top part of counter - - sub r1, r1, #8 - -|first_pass_hloop_v6| - ldrb r6, [r0, #-5] ; load source data - ldrb r7, [r0, #-4] - ldrb r8, [r0, #-3] - ldrb r9, [r0, #-2] - ldrb r10, [r0, #-1] - - orr r2, r2, #0x4 ; construct loop counter. width=8=4x2 - - pkhbt r6, r6, r7, lsl #16 ; r7 | r6 - pkhbt r7, r7, r8, lsl #16 ; r8 | r7 - - pkhbt r8, r8, r9, lsl #16 ; r9 | r8 - pkhbt r9, r9, r10, lsl #16 ; r10 | r9 - -|first_pass_wloop_v6| - smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1] - smuad r12, r7, r3 - - ldrb r6, [r0], #1 - - smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3] - ldrb r7, [r0], #1 - smlad r12, r9, r4, r12 - - pkhbt r10, r10, r6, lsl #16 ; r10 | r9 - pkhbt r6, r6, r7, lsl #16 ; r11 | r10 - smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5] - smlad r12, r6, r5, r12 - - sub r2, r2, #1 - - add r11, r11, #0x40 ; round_shift_and_clamp - tst r2, #0xff ; test loop counter - usat r11, #8, r11, asr #7 - add r12, r12, #0x40 - strh r11, [lr], #20 ; result is transposed and stored, which - usat r12, #8, r12, asr #7 - - strh r12, [lr], #20 - - movne r11, r6 - movne r12, r7 - - movne r6, r8 - movne r7, r9 - movne r8, r10 - movne r9, r11 - movne r10, r12 - - bne first_pass_wloop_v6 - - ;;add r9, ppl, #30 ; attempt to load 2 adjacent cache lines - ;;IF ARCHITECTURE=6 - ;pld [src, ppl] - ;;pld [src, r9] - ;;ENDIF - - subs r2, r2, #0x10000 - - sub lr, lr, #158 - - add r0, r0, r1 ; move to next input line - - add r11, r1, #18 ; preload next low. adding back block width(=8), which is subtracted earlier - pld [r0, r11] - - bne first_pass_hloop_v6 - -;second pass filter -secondpass_filter - ldr r3, [sp], #4 ; load back yoffset - ldr r0, [sp, #216] ; load dst address from stack 180+36 - ldr r1, [sp, #220] ; load dst stride from stack 180+40 - - cmp r3, #0 - beq skip_secondpass_filter - - adr r12, filter8_coeff - add lr, r12, r3, lsl #4 ;calculate filter location - - mov r2, #0x00080000 - - ldr r3, [lr] ; load up packed filter coefficients - ldr r4, [lr, #4] - ldr r5, [lr, #8] - - pkhbt r12, r4, r3 ; pack the filter differently - pkhbt r11, r5, r4 - -second_pass_hloop_v6 - ldr r6, [sp] ; load the data - ldr r7, [sp, #4] - - orr r2, r2, #2 ; loop counter - -second_pass_wloop_v6 - smuad lr, r3, r6 ; apply filter - smulbt r10, r3, r6 - - ldr r8, [sp, #8] - - smlad lr, r4, r7, lr - smladx r10, r12, r7, r10 - - ldrh r9, [sp, #12] - - smlad lr, r5, r8, lr - smladx r10, r11, r8, r10 - - add sp, sp, #4 - smlatb r10, r5, r9, r10 - - sub r2, r2, #1 - - add lr, lr, #0x40 ; round_shift_and_clamp - tst r2, #0xff - usat lr, #8, lr, asr #7 - add r10, r10, #0x40 - strb lr, [r0], r1 ; the result is transposed back and stored - usat r10, #8, r10, asr #7 - - strb r10, [r0],r1 - - movne r6, r7 - movne r7, r8 - - bne second_pass_wloop_v6 - - subs r2, r2, #0x10000 - add sp, sp, #12 ; updata src for next loop (20-8) - sub r0, r0, r1, lsl #2 - add r0, r0, #1 - - bne second_pass_hloop_v6 - - add sp, sp, #20 - ldmia sp!, {r4 - r11, pc} - -;-------------------- -skip_firstpass_filter - sub r0, r0, r1, lsl #1 - sub r1, r1, #8 - mov r2, #9 - -skip_firstpass_hloop - ldrb r4, [r0], #1 ; load data - subs r2, r2, #1 - ldrb r5, [r0], #1 - strh r4, [lr], #20 ; store it to immediate buffer - ldrb r6, [r0], #1 ; load data - strh r5, [lr], #20 - ldrb r7, [r0], #1 - strh r6, [lr], #20 - ldrb r8, [r0], #1 - strh r7, [lr], #20 - ldrb r9, [r0], #1 - strh r8, [lr], #20 - ldrb r10, [r0], #1 - strh r9, [lr], #20 - ldrb r11, [r0], #1 - strh r10, [lr], #20 - add r0, r0, r1 ; move to next input line - strh r11, [lr], #20 - - sub lr, lr, #158 ; move over to next column - bne skip_firstpass_hloop - - b secondpass_filter - -;-------------------- -skip_secondpass_filter - mov r2, #8 - add sp, sp, #4 ;start from src[0] instead of src[-2] - -skip_secondpass_hloop - ldr r6, [sp], #4 - subs r2, r2, #1 - ldr r8, [sp], #4 - - mov r7, r6, lsr #16 ; unpack - strb r6, [r0], r1 - mov r9, r8, lsr #16 - strb r7, [r0], r1 - add sp, sp, #12 ; 20-8 - strb r8, [r0], r1 - strb r9, [r0], r1 - - sub r0, r0, r1, lsl #2 - add r0, r0, #1 - - bne skip_secondpass_hloop - - add sp, sp, #16 ; 180 - (160 +4) - - ldmia sp!, {r4 - r11, pc} - - ENDP - -;----------------- -;One word each is reserved. Label filter_coeff can be used to access the data. -;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ... -filter8_coeff - DCD 0x00000000, 0x00000080, 0x00000000, 0x00000000 - DCD 0xfffa0000, 0x000c007b, 0x0000ffff, 0x00000000 - DCD 0xfff50002, 0x0024006c, 0x0001fff8, 0x00000000 - DCD 0xfff70000, 0x0032005d, 0x0000fffa, 0x00000000 - DCD 0xfff00003, 0x004d004d, 0x0003fff0, 0x00000000 - DCD 0xfffa0000, 0x005d0032, 0x0000fff7, 0x00000000 - DCD 0xfff80001, 0x006c0024, 0x0002fff5, 0x00000000 - DCD 0xffff0000, 0x007b000c, 0x0000fffa, 0x00000000 - - ;DCD 0, 0, 128, 0, 0, 0 - ;DCD 0, -6, 123, 12, -1, 0 - ;DCD 2, -11, 108, 36, -8, 1 - ;DCD 0, -9, 93, 50, -6, 0 - ;DCD 3, -16, 77, 77, -16, 3 - ;DCD 0, -6, 50, 93, -9, 0 - ;DCD 1, -8, 36, 108, -11, 2 - ;DCD 0, -1, 12, 123, -6, 0 - - END diff --git a/libs/libvpx/vp8/common/arm/bilinearfilter_arm.c b/libs/libvpx/vp8/common/arm/bilinearfilter_arm.c deleted file mode 100644 index 799c8bd964..0000000000 --- a/libs/libvpx/vp8/common/arm/bilinearfilter_arm.c +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_config.h" -#include "vp8_rtcd.h" -#include -#include "vp8/common/filter.h" -#include "bilinearfilter_arm.h" - -void vp8_filter_block2d_bil_armv6 -( - unsigned char *src_ptr, - unsigned char *dst_ptr, - unsigned int src_pitch, - unsigned int dst_pitch, - const short *HFilter, - const short *VFilter, - int Width, - int Height -) -{ - unsigned short FData[36*16]; /* Temp data buffer used in filtering */ - - /* First filter 1-D horizontally... */ - vp8_filter_block2d_bil_first_pass_armv6(src_ptr, FData, src_pitch, Height + 1, Width, HFilter); - - /* then 1-D vertically... */ - vp8_filter_block2d_bil_second_pass_armv6(FData, dst_ptr, dst_pitch, Height, Width, VFilter); -} - - -void vp8_bilinear_predict4x4_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; - - vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4); -} - -void vp8_bilinear_predict8x8_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; - - vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8); -} - -void vp8_bilinear_predict8x4_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; - - vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4); -} - -void vp8_bilinear_predict16x16_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; - - vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16); -} diff --git a/libs/libvpx/vp8/common/arm/bilinearfilter_arm.h b/libs/libvpx/vp8/common/arm/bilinearfilter_arm.h deleted file mode 100644 index 6b84e6f3b5..0000000000 --- a/libs/libvpx/vp8/common/arm/bilinearfilter_arm.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#ifndef VP8_COMMON_ARM_BILINEARFILTER_ARM_H_ -#define VP8_COMMON_ARM_BILINEARFILTER_ARM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -extern void vp8_filter_block2d_bil_first_pass_armv6 -( - const unsigned char *src_ptr, - unsigned short *dst_ptr, - unsigned int src_pitch, - unsigned int height, - unsigned int width, - const short *vp8_filter -); - -extern void vp8_filter_block2d_bil_second_pass_armv6 -( - const unsigned short *src_ptr, - unsigned char *dst_ptr, - int dst_pitch, - unsigned int height, - unsigned int width, - const short *vp8_filter -); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP8_COMMON_ARM_BILINEARFILTER_ARM_H_ diff --git a/libs/libvpx/vp8/common/arm/dequantize_arm.c b/libs/libvpx/vp8/common/arm/dequantize_arm.c deleted file mode 100644 index 1f8157f0b1..0000000000 --- a/libs/libvpx/vp8/common/arm/dequantize_arm.c +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include "vpx_config.h" -#include "vp8/common/blockd.h" - -#if HAVE_MEDIA -extern void vp8_dequantize_b_loop_v6(short *Q, short *DQC, short *DQ); - -void vp8_dequantize_b_v6(BLOCKD *d, short *DQC) -{ - short *DQ = d->dqcoeff; - short *Q = d->qcoeff; - - vp8_dequantize_b_loop_v6(Q, DQC, DQ); -} -#endif diff --git a/libs/libvpx/vp8/common/arm/filter_arm.c b/libs/libvpx/vp8/common/arm/filter_arm.c deleted file mode 100644 index d6a6781d86..0000000000 --- a/libs/libvpx/vp8/common/arm/filter_arm.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - - -#include "vpx_config.h" -#include "vp8_rtcd.h" -#include -#include "vp8/common/filter.h" -#include "vpx_ports/mem.h" - -extern void vp8_filter_block2d_first_pass_armv6 -( - unsigned char *src_ptr, - short *output_ptr, - unsigned int src_pixels_per_line, - unsigned int output_width, - unsigned int output_height, - const short *vp8_filter -); - -// 8x8 -extern void vp8_filter_block2d_first_pass_8x8_armv6 -( - unsigned char *src_ptr, - short *output_ptr, - unsigned int src_pixels_per_line, - unsigned int output_width, - unsigned int output_height, - const short *vp8_filter -); - -// 16x16 -extern void vp8_filter_block2d_first_pass_16x16_armv6 -( - unsigned char *src_ptr, - short *output_ptr, - unsigned int src_pixels_per_line, - unsigned int output_width, - unsigned int output_height, - const short *vp8_filter -); - -extern void vp8_filter_block2d_second_pass_armv6 -( - short *src_ptr, - unsigned char *output_ptr, - unsigned int output_pitch, - unsigned int cnt, - const short *vp8_filter -); - -extern void vp8_filter4_block2d_second_pass_armv6 -( - short *src_ptr, - unsigned char *output_ptr, - unsigned int output_pitch, - unsigned int cnt, - const short *vp8_filter -); - -extern void vp8_filter_block2d_first_pass_only_armv6 -( - unsigned char *src_ptr, - unsigned char *output_ptr, - unsigned int src_pixels_per_line, - unsigned int cnt, - unsigned int output_pitch, - const short *vp8_filter -); - - -extern void vp8_filter_block2d_second_pass_only_armv6 -( - unsigned char *src_ptr, - unsigned char *output_ptr, - unsigned int src_pixels_per_line, - unsigned int cnt, - unsigned int output_pitch, - const short *vp8_filter -); - -#if HAVE_MEDIA -void vp8_sixtap_predict4x4_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - DECLARE_ALIGNED(4, short, FData[12*4]); /* Temp data buffer used in filtering */ - - - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - - /* Vfilter is null. First pass only */ - if (xoffset && !yoffset) - { - /*vp8_filter_block2d_first_pass_armv6 ( src_ptr, FData+2, src_pixels_per_line, 4, 4, HFilter ); - vp8_filter_block2d_second_pass_armv6 ( FData+2, dst_ptr, dst_pitch, 4, VFilter );*/ - - vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 4, dst_pitch, HFilter); - } - /* Hfilter is null. Second pass only */ - else if (!xoffset && yoffset) - { - vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 4, dst_pitch, VFilter); - } - else - { - /* Vfilter is a 4 tap filter */ - if (yoffset & 0x1) - { - vp8_filter_block2d_first_pass_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 4, 7, HFilter); - vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 4, VFilter); - } - /* Vfilter is 6 tap filter */ - else - { - vp8_filter_block2d_first_pass_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 4, 9, HFilter); - vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 4, VFilter); - } - } -} - -void vp8_sixtap_predict8x8_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - DECLARE_ALIGNED(4, short, FData[16*8]); /* Temp data buffer used in filtering */ - - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - - if (xoffset && !yoffset) - { - vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, HFilter); - } - /* Hfilter is null. Second pass only */ - else if (!xoffset && yoffset) - { - vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, VFilter); - } - else - { - if (yoffset & 0x1) - { - vp8_filter_block2d_first_pass_8x8_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 8, 11, HFilter); - vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 8, VFilter); - } - else - { - vp8_filter_block2d_first_pass_8x8_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 8, 13, HFilter); - vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 8, VFilter); - } - } -} - - -void vp8_sixtap_predict16x16_armv6 -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - DECLARE_ALIGNED(4, short, FData[24*16]); /* Temp data buffer used in filtering */ - - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - - if (xoffset && !yoffset) - { - vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 16, dst_pitch, HFilter); - } - /* Hfilter is null. Second pass only */ - else if (!xoffset && yoffset) - { - vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 16, dst_pitch, VFilter); - } - else - { - if (yoffset & 0x1) - { - vp8_filter_block2d_first_pass_16x16_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 16, 19, HFilter); - vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 16, VFilter); - } - else - { - vp8_filter_block2d_first_pass_16x16_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 16, 21, HFilter); - vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 16, VFilter); - } - } - -} -#endif diff --git a/libs/libvpx/vp8/common/arm/loopfilter_arm.c b/libs/libvpx/vp8/common/arm/loopfilter_arm.c index 5840c2bbaa..e12f65a042 100644 --- a/libs/libvpx/vp8/common/arm/loopfilter_arm.c +++ b/libs/libvpx/vp8/common/arm/loopfilter_arm.c @@ -8,29 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vp8/common/loopfilter.h" #include "vp8/common/onyxc_int.h" -#define prototype_loopfilter(sym) \ - void sym(unsigned char *src, int pitch, const unsigned char *blimit,\ - const unsigned char *limit, const unsigned char *thresh, int count) - -#if HAVE_MEDIA -extern prototype_loopfilter(vp8_loop_filter_horizontal_edge_armv6); -extern prototype_loopfilter(vp8_loop_filter_vertical_edge_armv6); -extern prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_armv6); -extern prototype_loopfilter(vp8_mbloop_filter_vertical_edge_armv6); -#endif - -#if HAVE_NEON typedef void loopfilter_y_neon(unsigned char *src, int pitch, - unsigned char blimit, unsigned char limit, unsigned char thresh); + unsigned char blimit, unsigned char limit, + unsigned char thresh); typedef void loopfilter_uv_neon(unsigned char *u, int pitch, - unsigned char blimit, unsigned char limit, unsigned char thresh, - unsigned char *v); + unsigned char blimit, unsigned char limit, + unsigned char thresh, unsigned char *v); extern loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon; extern loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon; @@ -41,141 +29,73 @@ extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon; extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon; extern loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon; extern loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon; -#endif -#if HAVE_MEDIA -/* ARMV6/MEDIA loopfilter functions*/ -/* Horizontal MB filtering */ -void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_horizontal_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); - - if (u_ptr) - vp8_mbloop_filter_horizontal_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - - if (v_ptr) - vp8_mbloop_filter_horizontal_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); -} - -/* Vertical MB Filtering */ -void vp8_loop_filter_mbv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_vertical_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); - - if (u_ptr) - vp8_mbloop_filter_vertical_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); - - if (v_ptr) - vp8_mbloop_filter_vertical_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); -} - -/* Horizontal B Filtering */ -void vp8_loop_filter_bh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_loop_filter_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - - if (u_ptr) - vp8_loop_filter_horizontal_edge_armv6(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); - - if (v_ptr) - vp8_loop_filter_horizontal_edge_armv6(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); -} - -void vp8_loop_filter_bhs_armv6(unsigned char *y_ptr, int y_stride, - const unsigned char *blimit) -{ - vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, blimit); -} - -/* Vertical B Filtering */ -void vp8_loop_filter_bv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_loop_filter_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - - if (u_ptr) - vp8_loop_filter_vertical_edge_armv6(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); - - if (v_ptr) - vp8_loop_filter_vertical_edge_armv6(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); -} - -void vp8_loop_filter_bvs_armv6(unsigned char *y_ptr, int y_stride, - const unsigned char *blimit) -{ - vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 4, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 8, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 12, y_stride, blimit); -} -#endif - -#if HAVE_NEON /* NEON loopfilter functions */ /* Horizontal MB filtering */ -void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned char mblim = *lfi->mblim; - unsigned char lim = *lfi->lim; - unsigned char hev_thr = *lfi->hev_thr; - vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr); +void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned char mblim = *lfi->mblim; + unsigned char lim = *lfi->lim; + unsigned char hev_thr = *lfi->hev_thr; + vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim, + hev_thr); - if (u_ptr) - vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr); + if (u_ptr) + vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim, + hev_thr, v_ptr); } /* Vertical MB Filtering */ -void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned char mblim = *lfi->mblim; - unsigned char lim = *lfi->lim; - unsigned char hev_thr = *lfi->hev_thr; +void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned char mblim = *lfi->mblim; + unsigned char lim = *lfi->lim; + unsigned char hev_thr = *lfi->hev_thr; - vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr); + vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr); - if (u_ptr) - vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr); + if (u_ptr) + vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim, + hev_thr, v_ptr); } /* Horizontal B Filtering */ -void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned char blim = *lfi->blim; - unsigned char lim = *lfi->lim; - unsigned char hev_thr = *lfi->hev_thr; +void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned char blim = *lfi->blim; + unsigned char lim = *lfi->lim; + unsigned char hev_thr = *lfi->hev_thr; - vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim, lim, hev_thr); - vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim, lim, hev_thr); - vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim, lim, hev_thr); + vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim, + lim, hev_thr); + vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim, + lim, hev_thr); + vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim, + lim, hev_thr); - if (u_ptr) - vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, blim, lim, hev_thr, v_ptr + 4 * uv_stride); + if (u_ptr) + vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, + blim, lim, hev_thr, + v_ptr + 4 * uv_stride); } /* Vertical B Filtering */ -void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned char blim = *lfi->blim; - unsigned char lim = *lfi->lim; - unsigned char hev_thr = *lfi->hev_thr; +void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned char blim = *lfi->blim; + unsigned char lim = *lfi->lim; + unsigned char hev_thr = *lfi->hev_thr; - vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr); - vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr); - vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim, hev_thr); + vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr); + vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr); + vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim, + hev_thr); - if (u_ptr) - vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim, hev_thr, v_ptr + 4); + if (u_ptr) + vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim, + hev_thr, v_ptr + 4); } -#endif diff --git a/libs/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c b/libs/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c index 9824a31936..af566c2c41 100644 --- a/libs/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c @@ -9,691 +9,782 @@ */ #include +#include +#include "./vpx_config.h" -static const uint8_t bifilter4_coeff[8][2] = { - {128, 0}, - {112, 16}, - { 96, 32}, - { 80, 48}, - { 64, 64}, - { 48, 80}, - { 32, 96}, - { 16, 112} -}; +static const uint8_t bifilter4_coeff[8][2] = { { 128, 0 }, { 112, 16 }, + { 96, 32 }, { 80, 48 }, + { 64, 64 }, { 48, 80 }, + { 32, 96 }, { 16, 112 } }; -void vp8_bilinear_predict4x4_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8; - uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8; - uint8x16_t q1u8, q2u8; - uint16x8_t q1u16, q2u16; - uint16x8_t q7u16, q8u16, q9u16; - uint64x2_t q4u64, q5u64; - uint64x1_t d12u64; - uint32x2x2_t d0u32x2, d1u32x2, d2u32x2, d3u32x2; - - if (xoffset == 0) { // skip_1stpass_filter - uint32x2_t d28u32 = vdup_n_u32(0); - uint32x2_t d29u32 = vdup_n_u32(0); - uint32x2_t d30u32 = vdup_n_u32(0); - - d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 0); - src_ptr += src_pixels_per_line; - d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 1); - src_ptr += src_pixels_per_line; - d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 0); - src_ptr += src_pixels_per_line; - d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 1); - src_ptr += src_pixels_per_line; - d30u32 = vld1_lane_u32((const uint32_t *)src_ptr, d30u32, 0); - d28u8 = vreinterpret_u8_u32(d28u32); - d29u8 = vreinterpret_u8_u32(d29u32); - d30u8 = vreinterpret_u8_u32(d30u32); - } else { - d2u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d3u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d4u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d5u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d6u8 = vld1_u8(src_ptr); - - q1u8 = vcombine_u8(d2u8, d3u8); - q2u8 = vcombine_u8(d4u8, d5u8); - - d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); - - q4u64 = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8); - q5u64 = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8); - d12u64 = vshr_n_u64(vreinterpret_u64_u8(d6u8), 8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)), - vreinterpret_u32_u8(vget_high_u8(q1u8))); - d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)), - vreinterpret_u32_u8(vget_high_u8(q2u8))); - d2u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q4u64)), - vreinterpret_u32_u64(vget_high_u64(q4u64))); - d3u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), - vreinterpret_u32_u64(vget_high_u64(q5u64))); - - q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8); - q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8); - q9u16 = vmull_u8(d6u8, d0u8); - - q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d2u32x2.val[0]), d1u8); - q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d3u32x2.val[0]), d1u8); - q9u16 = vmlal_u8(q9u16, vreinterpret_u8_u64(d12u64), d1u8); - - d28u8 = vqrshrn_n_u16(q7u16, 7); - d29u8 = vqrshrn_n_u16(q8u16, 7); - d30u8 = vqrshrn_n_u16(q9u16, 7); - } - - // secondpass_filter - if (yoffset == 0) { // skip_2ndpass_filter - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 1); - } else { - d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); - - q1u16 = vmull_u8(d28u8, d0u8); - q2u16 = vmull_u8(d29u8, d0u8); - - d26u8 = vext_u8(d28u8, d29u8, 4); - d27u8 = vext_u8(d29u8, d30u8, 4); - - q1u16 = vmlal_u8(q1u16, d26u8, d1u8); - q2u16 = vmlal_u8(q2u16, d27u8, d1u8); - - d2u8 = vqrshrn_n_u16(q1u16, 7); - d3u8 = vqrshrn_n_u16(q2u16, 7); - - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1); - } - return; +static INLINE uint8x8_t load_and_shift(const unsigned char *a) { + return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32)); } -void vp8_bilinear_predict8x4_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8; - uint8x8_t d7u8, d9u8, d11u8, d22u8, d23u8, d24u8, d25u8, d26u8; - uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; - uint16x8_t q1u16, q2u16, q3u16, q4u16; - uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16; +static INLINE void store4x4(unsigned char *dst, int dst_stride, + const uint8x8_t a0, const uint8x8_t a1) { + if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) { + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0); + dst += dst_stride; + vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1); + } else { + // Store to the aligned local buffer and memcpy instead of vget_lane_u8 + // which is really really slow. + uint32_t output_buffer[4]; + vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0); + vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1); + vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0); + vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1); - if (xoffset == 0) { // skip_1stpass_filter - d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d26u8 = vld1_u8(src_ptr); - } else { - q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q5u8 = vld1q_u8(src_ptr); - - d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); - - q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); - q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); - q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - - d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); - d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); - d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - - q6u16 = vmlal_u8(q6u16, d3u8, d1u8); - q7u16 = vmlal_u8(q7u16, d5u8, d1u8); - q8u16 = vmlal_u8(q8u16, d7u8, d1u8); - q9u16 = vmlal_u8(q9u16, d9u8, d1u8); - q10u16 = vmlal_u8(q10u16, d11u8, d1u8); - - d22u8 = vqrshrn_n_u16(q6u16, 7); - d23u8 = vqrshrn_n_u16(q7u16, 7); - d24u8 = vqrshrn_n_u16(q8u16, 7); - d25u8 = vqrshrn_n_u16(q9u16, 7); - d26u8 = vqrshrn_n_u16(q10u16, 7); - } - - // secondpass_filter - if (yoffset == 0) { // skip_2ndpass_filter - vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d25u8); - } else { - d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); - - q1u16 = vmull_u8(d22u8, d0u8); - q2u16 = vmull_u8(d23u8, d0u8); - q3u16 = vmull_u8(d24u8, d0u8); - q4u16 = vmull_u8(d25u8, d0u8); - - q1u16 = vmlal_u8(q1u16, d23u8, d1u8); - q2u16 = vmlal_u8(q2u16, d24u8, d1u8); - q3u16 = vmlal_u8(q3u16, d25u8, d1u8); - q4u16 = vmlal_u8(q4u16, d26u8, d1u8); - - d2u8 = vqrshrn_n_u16(q1u16, 7); - d3u8 = vqrshrn_n_u16(q2u16, 7); - d4u8 = vqrshrn_n_u16(q3u16, 7); - d5u8 = vqrshrn_n_u16(q4u16, 7); - - vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d5u8); - } - return; + memcpy(dst, output_buffer, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 1, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 2, 4); + dst += dst_stride; + memcpy(dst, output_buffer + 3, 4); + } } -void vp8_bilinear_predict8x8_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8, d11u8; - uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8; - uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; - uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16; - uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16; +void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + uint8x8_t e0, e1, e2; - if (xoffset == 0) { // skip_1stpass_filter - d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d26u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d27u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d28u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d29u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line; - d30u8 = vld1_u8(src_ptr); - } else { - q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; + if (xoffset == 0) { // skip_1stpass_filter + uint8x8_t a0, a1, a2, a3, a4; - d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); + a0 = load_and_shift(src_ptr); + src_ptr += src_pixels_per_line; + a1 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a2 = load_and_shift(src_ptr); + src_ptr += src_pixels_per_line; + a3 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a4 = vld1_u8(src_ptr); - q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); - q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); - q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + e0 = vext_u8(a0, a1, 4); + e1 = vext_u8(a2, a3, 4); + e2 = a4; + } else { + uint8x8_t a0, a1, a2, a3, a4, b4; + uint8x16_t a01, a23; + uint8x16_t b01, b23; + uint32x2x2_t c0, c1, c2, c3; + uint16x8_t d0, d1, d2; + const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[xoffset][0]); + const uint8x8_t filter1 = vdup_n_u8(bifilter4_coeff[xoffset][1]); - d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); - d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); - d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + a0 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a1 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a2 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a3 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + a4 = vld1_u8(src_ptr); - q6u16 = vmlal_u8(q6u16, d3u8, d1u8); - q7u16 = vmlal_u8(q7u16, d5u8, d1u8); - q8u16 = vmlal_u8(q8u16, d7u8, d1u8); - q9u16 = vmlal_u8(q9u16, d9u8, d1u8); + a01 = vcombine_u8(a0, a1); + a23 = vcombine_u8(a2, a3); - d22u8 = vqrshrn_n_u16(q6u16, 7); - d23u8 = vqrshrn_n_u16(q7u16, 7); - d24u8 = vqrshrn_n_u16(q8u16, 7); - d25u8 = vqrshrn_n_u16(q9u16, 7); + b01 = vreinterpretq_u8_u64(vshrq_n_u64(vreinterpretq_u64_u8(a01), 8)); + b23 = vreinterpretq_u8_u64(vshrq_n_u64(vreinterpretq_u64_u8(a23), 8)); + b4 = vreinterpret_u8_u64(vshr_n_u64(vreinterpret_u64_u8(a4), 8)); - // first_pass filtering on the rest 5-line data - q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q5u8 = vld1q_u8(src_ptr); + c0 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a01)), + vreinterpret_u32_u8(vget_high_u8(a01))); + c1 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a23)), + vreinterpret_u32_u8(vget_high_u8(a23))); + c2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b01)), + vreinterpret_u32_u8(vget_high_u8(b01))); + c3 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b23)), + vreinterpret_u32_u8(vget_high_u8(b23))); - q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); - q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); - q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + d0 = vmull_u8(vreinterpret_u8_u32(c0.val[0]), filter0); + d1 = vmull_u8(vreinterpret_u8_u32(c1.val[0]), filter0); + d2 = vmull_u8(a4, filter0); - d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); - d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); - d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + d0 = vmlal_u8(d0, vreinterpret_u8_u32(c2.val[0]), filter1); + d1 = vmlal_u8(d1, vreinterpret_u8_u32(c3.val[0]), filter1); + d2 = vmlal_u8(d2, b4, filter1); - q6u16 = vmlal_u8(q6u16, d3u8, d1u8); - q7u16 = vmlal_u8(q7u16, d5u8, d1u8); - q8u16 = vmlal_u8(q8u16, d7u8, d1u8); - q9u16 = vmlal_u8(q9u16, d9u8, d1u8); - q10u16 = vmlal_u8(q10u16, d11u8, d1u8); + e0 = vqrshrn_n_u16(d0, 7); + e1 = vqrshrn_n_u16(d1, 7); + e2 = vqrshrn_n_u16(d2, 7); + } - d26u8 = vqrshrn_n_u16(q6u16, 7); - d27u8 = vqrshrn_n_u16(q7u16, 7); - d28u8 = vqrshrn_n_u16(q8u16, 7); - d29u8 = vqrshrn_n_u16(q9u16, 7); - d30u8 = vqrshrn_n_u16(q10u16, 7); - } + // secondpass_filter + if (yoffset == 0) { // skip_2ndpass_filter + store4x4(dst_ptr, dst_pitch, e0, e1); + } else { + uint8x8_t f0, f1; + const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[yoffset][0]); + const uint8x8_t filter1 = vdup_n_u8(bifilter4_coeff[yoffset][1]); - // secondpass_filter - if (yoffset == 0) { // skip_2ndpass_filter - vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d25u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d26u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d27u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d28u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d29u8); - } else { - d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); + uint16x8_t b0 = vmull_u8(e0, filter0); + uint16x8_t b1 = vmull_u8(e1, filter0); - q1u16 = vmull_u8(d22u8, d0u8); - q2u16 = vmull_u8(d23u8, d0u8); - q3u16 = vmull_u8(d24u8, d0u8); - q4u16 = vmull_u8(d25u8, d0u8); - q5u16 = vmull_u8(d26u8, d0u8); - q6u16 = vmull_u8(d27u8, d0u8); - q7u16 = vmull_u8(d28u8, d0u8); - q8u16 = vmull_u8(d29u8, d0u8); + const uint8x8_t a0 = vext_u8(e0, e1, 4); + const uint8x8_t a1 = vext_u8(e1, e2, 4); - q1u16 = vmlal_u8(q1u16, d23u8, d1u8); - q2u16 = vmlal_u8(q2u16, d24u8, d1u8); - q3u16 = vmlal_u8(q3u16, d25u8, d1u8); - q4u16 = vmlal_u8(q4u16, d26u8, d1u8); - q5u16 = vmlal_u8(q5u16, d27u8, d1u8); - q6u16 = vmlal_u8(q6u16, d28u8, d1u8); - q7u16 = vmlal_u8(q7u16, d29u8, d1u8); - q8u16 = vmlal_u8(q8u16, d30u8, d1u8); + b0 = vmlal_u8(b0, a0, filter1); + b1 = vmlal_u8(b1, a1, filter1); - d2u8 = vqrshrn_n_u16(q1u16, 7); - d3u8 = vqrshrn_n_u16(q2u16, 7); - d4u8 = vqrshrn_n_u16(q3u16, 7); - d5u8 = vqrshrn_n_u16(q4u16, 7); - d6u8 = vqrshrn_n_u16(q5u16, 7); - d7u8 = vqrshrn_n_u16(q6u16, 7); - d8u8 = vqrshrn_n_u16(q7u16, 7); - d9u8 = vqrshrn_n_u16(q8u16, 7); + f0 = vqrshrn_n_u16(b0, 7); + f1 = vqrshrn_n_u16(b1, 7); - vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d5u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d6u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d7u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d8u8); dst_ptr += dst_pitch; - vst1_u8((uint8_t *)dst_ptr, d9u8); - } - return; + store4x4(dst_ptr, dst_pitch, f0, f1); + } } -void vp8_bilinear_predict16x16_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - int i; - unsigned char tmp[272]; - unsigned char *tmpp; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; - uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8; - uint8x8_t d19u8, d20u8, d21u8; - uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8; - uint8x16_t q11u8, q12u8, q13u8, q14u8, q15u8; - uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16; - uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16; +void vp8_bilinear_predict8x4_neon(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8; + uint8x8_t d7u8, d9u8, d11u8, d22u8, d23u8, d24u8, d25u8, d26u8; + uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; + uint16x8_t q1u16, q2u16, q3u16, q4u16; + uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16; - if (xoffset == 0) { // secondpass_bfilter16x16_only - d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); - - q11u8 = vld1q_u8(src_ptr); - src_ptr += src_pixels_per_line; - for (i = 4; i > 0; i--) { - q12u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q13u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q14u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - q15u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line; - - q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8); - q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8); - q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8); - q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8); - q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8); - q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8); - q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8); - q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8); - - q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8); - q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8); - q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8); - q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8); - q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8); - q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8); - q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8); - q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8); - - d2u8 = vqrshrn_n_u16(q1u16, 7); - d3u8 = vqrshrn_n_u16(q2u16, 7); - d4u8 = vqrshrn_n_u16(q3u16, 7); - d5u8 = vqrshrn_n_u16(q4u16, 7); - d6u8 = vqrshrn_n_u16(q5u16, 7); - d7u8 = vqrshrn_n_u16(q6u16, 7); - d8u8 = vqrshrn_n_u16(q7u16, 7); - d9u8 = vqrshrn_n_u16(q8u16, 7); - - q1u8 = vcombine_u8(d2u8, d3u8); - q2u8 = vcombine_u8(d4u8, d5u8); - q3u8 = vcombine_u8(d6u8, d7u8); - q4u8 = vcombine_u8(d8u8, d9u8); - - q11u8 = q15u8; - - vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch; - } - return; - } - - if (yoffset == 0) { // firstpass_bfilter16x16_only - d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); - d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); - - for (i = 4; i > 0 ; i--) { - d2u8 = vld1_u8(src_ptr); - d3u8 = vld1_u8(src_ptr + 8); - d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d5u8 = vld1_u8(src_ptr); - d6u8 = vld1_u8(src_ptr + 8); - d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d8u8 = vld1_u8(src_ptr); - d9u8 = vld1_u8(src_ptr + 8); - d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d11u8 = vld1_u8(src_ptr); - d12u8 = vld1_u8(src_ptr + 8); - d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - - q7u16 = vmull_u8(d2u8, d0u8); - q8u16 = vmull_u8(d3u8, d0u8); - q9u16 = vmull_u8(d5u8, d0u8); - q10u16 = vmull_u8(d6u8, d0u8); - q11u16 = vmull_u8(d8u8, d0u8); - q12u16 = vmull_u8(d9u8, d0u8); - q13u16 = vmull_u8(d11u8, d0u8); - q14u16 = vmull_u8(d12u8, d0u8); - - d2u8 = vext_u8(d2u8, d3u8, 1); - d5u8 = vext_u8(d5u8, d6u8, 1); - d8u8 = vext_u8(d8u8, d9u8, 1); - d11u8 = vext_u8(d11u8, d12u8, 1); - - q7u16 = vmlal_u8(q7u16, d2u8, d1u8); - q9u16 = vmlal_u8(q9u16, d5u8, d1u8); - q11u16 = vmlal_u8(q11u16, d8u8, d1u8); - q13u16 = vmlal_u8(q13u16, d11u8, d1u8); - - d3u8 = vext_u8(d3u8, d4u8, 1); - d6u8 = vext_u8(d6u8, d7u8, 1); - d9u8 = vext_u8(d9u8, d10u8, 1); - d12u8 = vext_u8(d12u8, d13u8, 1); - - q8u16 = vmlal_u8(q8u16, d3u8, d1u8); - q10u16 = vmlal_u8(q10u16, d6u8, d1u8); - q12u16 = vmlal_u8(q12u16, d9u8, d1u8); - q14u16 = vmlal_u8(q14u16, d12u8, d1u8); - - d14u8 = vqrshrn_n_u16(q7u16, 7); - d15u8 = vqrshrn_n_u16(q8u16, 7); - d16u8 = vqrshrn_n_u16(q9u16, 7); - d17u8 = vqrshrn_n_u16(q10u16, 7); - d18u8 = vqrshrn_n_u16(q11u16, 7); - d19u8 = vqrshrn_n_u16(q12u16, 7); - d20u8 = vqrshrn_n_u16(q13u16, 7); - d21u8 = vqrshrn_n_u16(q14u16, 7); - - q7u8 = vcombine_u8(d14u8, d15u8); - q8u8 = vcombine_u8(d16u8, d17u8); - q9u8 = vcombine_u8(d18u8, d19u8); - q10u8 =vcombine_u8(d20u8, d21u8); - - vst1q_u8((uint8_t *)dst_ptr, q7u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q8u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q9u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q10u8); dst_ptr += dst_pitch; - } - return; - } + if (xoffset == 0) { // skip_1stpass_filter + d22u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d23u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d24u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d25u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d26u8 = vld1_u8(src_ptr); + } else { + q1u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q2u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q3u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q4u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q5u8 = vld1q_u8(src_ptr); d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); - d2u8 = vld1_u8(src_ptr); - d3u8 = vld1_u8(src_ptr + 8); - d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d5u8 = vld1_u8(src_ptr); - d6u8 = vld1_u8(src_ptr + 8); - d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d8u8 = vld1_u8(src_ptr); - d9u8 = vld1_u8(src_ptr + 8); - d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d11u8 = vld1_u8(src_ptr); - d12u8 = vld1_u8(src_ptr + 8); - d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; + q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); + q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); + q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - // First Pass: output_height lines x output_width columns (17x16) - tmpp = tmp; - for (i = 3; i > 0; i--) { - q7u16 = vmull_u8(d2u8, d0u8); - q8u16 = vmull_u8(d3u8, d0u8); - q9u16 = vmull_u8(d5u8, d0u8); - q10u16 = vmull_u8(d6u8, d0u8); - q11u16 = vmull_u8(d8u8, d0u8); - q12u16 = vmull_u8(d9u8, d0u8); - q13u16 = vmull_u8(d11u8, d0u8); - q14u16 = vmull_u8(d12u8, d0u8); + d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); + d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); + d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - d2u8 = vext_u8(d2u8, d3u8, 1); - d5u8 = vext_u8(d5u8, d6u8, 1); - d8u8 = vext_u8(d8u8, d9u8, 1); - d11u8 = vext_u8(d11u8, d12u8, 1); + q6u16 = vmlal_u8(q6u16, d3u8, d1u8); + q7u16 = vmlal_u8(q7u16, d5u8, d1u8); + q8u16 = vmlal_u8(q8u16, d7u8, d1u8); + q9u16 = vmlal_u8(q9u16, d9u8, d1u8); + q10u16 = vmlal_u8(q10u16, d11u8, d1u8); - q7u16 = vmlal_u8(q7u16, d2u8, d1u8); - q9u16 = vmlal_u8(q9u16, d5u8, d1u8); - q11u16 = vmlal_u8(q11u16, d8u8, d1u8); - q13u16 = vmlal_u8(q13u16, d11u8, d1u8); + d22u8 = vqrshrn_n_u16(q6u16, 7); + d23u8 = vqrshrn_n_u16(q7u16, 7); + d24u8 = vqrshrn_n_u16(q8u16, 7); + d25u8 = vqrshrn_n_u16(q9u16, 7); + d26u8 = vqrshrn_n_u16(q10u16, 7); + } - d3u8 = vext_u8(d3u8, d4u8, 1); - d6u8 = vext_u8(d6u8, d7u8, 1); - d9u8 = vext_u8(d9u8, d10u8, 1); - d12u8 = vext_u8(d12u8, d13u8, 1); - - q8u16 = vmlal_u8(q8u16, d3u8, d1u8); - q10u16 = vmlal_u8(q10u16, d6u8, d1u8); - q12u16 = vmlal_u8(q12u16, d9u8, d1u8); - q14u16 = vmlal_u8(q14u16, d12u8, d1u8); - - d14u8 = vqrshrn_n_u16(q7u16, 7); - d15u8 = vqrshrn_n_u16(q8u16, 7); - d16u8 = vqrshrn_n_u16(q9u16, 7); - d17u8 = vqrshrn_n_u16(q10u16, 7); - d18u8 = vqrshrn_n_u16(q11u16, 7); - d19u8 = vqrshrn_n_u16(q12u16, 7); - d20u8 = vqrshrn_n_u16(q13u16, 7); - d21u8 = vqrshrn_n_u16(q14u16, 7); - - d2u8 = vld1_u8(src_ptr); - d3u8 = vld1_u8(src_ptr + 8); - d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d5u8 = vld1_u8(src_ptr); - d6u8 = vld1_u8(src_ptr + 8); - d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d8u8 = vld1_u8(src_ptr); - d9u8 = vld1_u8(src_ptr + 8); - d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - d11u8 = vld1_u8(src_ptr); - d12u8 = vld1_u8(src_ptr + 8); - d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - - q7u8 = vcombine_u8(d14u8, d15u8); - q8u8 = vcombine_u8(d16u8, d17u8); - q9u8 = vcombine_u8(d18u8, d19u8); - q10u8 = vcombine_u8(d20u8, d21u8); - - vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q9u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q10u8); tmpp += 16; - } - - // First-pass filtering for rest 5 lines - d14u8 = vld1_u8(src_ptr); - d15u8 = vld1_u8(src_ptr + 8); - d16u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line; - - q9u16 = vmull_u8(d2u8, d0u8); - q10u16 = vmull_u8(d3u8, d0u8); - q11u16 = vmull_u8(d5u8, d0u8); - q12u16 = vmull_u8(d6u8, d0u8); - q13u16 = vmull_u8(d8u8, d0u8); - q14u16 = vmull_u8(d9u8, d0u8); - - d2u8 = vext_u8(d2u8, d3u8, 1); - d5u8 = vext_u8(d5u8, d6u8, 1); - d8u8 = vext_u8(d8u8, d9u8, 1); - - q9u16 = vmlal_u8(q9u16, d2u8, d1u8); - q11u16 = vmlal_u8(q11u16, d5u8, d1u8); - q13u16 = vmlal_u8(q13u16, d8u8, d1u8); - - d3u8 = vext_u8(d3u8, d4u8, 1); - d6u8 = vext_u8(d6u8, d7u8, 1); - d9u8 = vext_u8(d9u8, d10u8, 1); - - q10u16 = vmlal_u8(q10u16, d3u8, d1u8); - q12u16 = vmlal_u8(q12u16, d6u8, d1u8); - q14u16 = vmlal_u8(q14u16, d9u8, d1u8); - - q1u16 = vmull_u8(d11u8, d0u8); - q2u16 = vmull_u8(d12u8, d0u8); - q3u16 = vmull_u8(d14u8, d0u8); - q4u16 = vmull_u8(d15u8, d0u8); - - d11u8 = vext_u8(d11u8, d12u8, 1); - d14u8 = vext_u8(d14u8, d15u8, 1); - - q1u16 = vmlal_u8(q1u16, d11u8, d1u8); - q3u16 = vmlal_u8(q3u16, d14u8, d1u8); - - d12u8 = vext_u8(d12u8, d13u8, 1); - d15u8 = vext_u8(d15u8, d16u8, 1); - - q2u16 = vmlal_u8(q2u16, d12u8, d1u8); - q4u16 = vmlal_u8(q4u16, d15u8, d1u8); - - d10u8 = vqrshrn_n_u16(q9u16, 7); - d11u8 = vqrshrn_n_u16(q10u16, 7); - d12u8 = vqrshrn_n_u16(q11u16, 7); - d13u8 = vqrshrn_n_u16(q12u16, 7); - d14u8 = vqrshrn_n_u16(q13u16, 7); - d15u8 = vqrshrn_n_u16(q14u16, 7); - d16u8 = vqrshrn_n_u16(q1u16, 7); - d17u8 = vqrshrn_n_u16(q2u16, 7); - d18u8 = vqrshrn_n_u16(q3u16, 7); - d19u8 = vqrshrn_n_u16(q4u16, 7); - - q5u8 = vcombine_u8(d10u8, d11u8); - q6u8 = vcombine_u8(d12u8, d13u8); - q7u8 = vcombine_u8(d14u8, d15u8); - q8u8 = vcombine_u8(d16u8, d17u8); - q9u8 = vcombine_u8(d18u8, d19u8); - - vst1q_u8((uint8_t *)tmpp, q5u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q6u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16; - vst1q_u8((uint8_t *)tmpp, q9u8); - - // secondpass_filter + // secondpass_filter + if (yoffset == 0) { // skip_2ndpass_filter + vst1_u8((uint8_t *)dst_ptr, d22u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d23u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d24u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d25u8); + } else { d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); - tmpp = tmp; - q11u8 = vld1q_u8(tmpp); - tmpp += 16; + q1u16 = vmull_u8(d22u8, d0u8); + q2u16 = vmull_u8(d23u8, d0u8); + q3u16 = vmull_u8(d24u8, d0u8); + q4u16 = vmull_u8(d25u8, d0u8); + + q1u16 = vmlal_u8(q1u16, d23u8, d1u8); + q2u16 = vmlal_u8(q2u16, d24u8, d1u8); + q3u16 = vmlal_u8(q3u16, d25u8, d1u8); + q4u16 = vmlal_u8(q4u16, d26u8, d1u8); + + d2u8 = vqrshrn_n_u16(q1u16, 7); + d3u8 = vqrshrn_n_u16(q2u16, 7); + d4u8 = vqrshrn_n_u16(q3u16, 7); + d5u8 = vqrshrn_n_u16(q4u16, 7); + + vst1_u8((uint8_t *)dst_ptr, d2u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d3u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d4u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d5u8); + } + return; +} + +void vp8_bilinear_predict8x8_neon(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8, d11u8; + uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8; + uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8; + uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16; + uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16; + + if (xoffset == 0) { // skip_1stpass_filter + d22u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d23u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d24u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d25u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d26u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d27u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d28u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d29u8 = vld1_u8(src_ptr); + src_ptr += src_pixels_per_line; + d30u8 = vld1_u8(src_ptr); + } else { + q1u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q2u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q3u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q4u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + + d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); + + q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); + q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); + q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + + d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); + d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); + d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + + q6u16 = vmlal_u8(q6u16, d3u8, d1u8); + q7u16 = vmlal_u8(q7u16, d5u8, d1u8); + q8u16 = vmlal_u8(q8u16, d7u8, d1u8); + q9u16 = vmlal_u8(q9u16, d9u8, d1u8); + + d22u8 = vqrshrn_n_u16(q6u16, 7); + d23u8 = vqrshrn_n_u16(q7u16, 7); + d24u8 = vqrshrn_n_u16(q8u16, 7); + d25u8 = vqrshrn_n_u16(q9u16, 7); + + // first_pass filtering on the rest 5-line data + q1u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q2u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q3u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q4u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q5u8 = vld1q_u8(src_ptr); + + q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8); + q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8); + q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + + d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1); + d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1); + d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + + q6u16 = vmlal_u8(q6u16, d3u8, d1u8); + q7u16 = vmlal_u8(q7u16, d5u8, d1u8); + q8u16 = vmlal_u8(q8u16, d7u8, d1u8); + q9u16 = vmlal_u8(q9u16, d9u8, d1u8); + q10u16 = vmlal_u8(q10u16, d11u8, d1u8); + + d26u8 = vqrshrn_n_u16(q6u16, 7); + d27u8 = vqrshrn_n_u16(q7u16, 7); + d28u8 = vqrshrn_n_u16(q8u16, 7); + d29u8 = vqrshrn_n_u16(q9u16, 7); + d30u8 = vqrshrn_n_u16(q10u16, 7); + } + + // secondpass_filter + if (yoffset == 0) { // skip_2ndpass_filter + vst1_u8((uint8_t *)dst_ptr, d22u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d23u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d24u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d25u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d26u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d27u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d28u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d29u8); + } else { + d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); + + q1u16 = vmull_u8(d22u8, d0u8); + q2u16 = vmull_u8(d23u8, d0u8); + q3u16 = vmull_u8(d24u8, d0u8); + q4u16 = vmull_u8(d25u8, d0u8); + q5u16 = vmull_u8(d26u8, d0u8); + q6u16 = vmull_u8(d27u8, d0u8); + q7u16 = vmull_u8(d28u8, d0u8); + q8u16 = vmull_u8(d29u8, d0u8); + + q1u16 = vmlal_u8(q1u16, d23u8, d1u8); + q2u16 = vmlal_u8(q2u16, d24u8, d1u8); + q3u16 = vmlal_u8(q3u16, d25u8, d1u8); + q4u16 = vmlal_u8(q4u16, d26u8, d1u8); + q5u16 = vmlal_u8(q5u16, d27u8, d1u8); + q6u16 = vmlal_u8(q6u16, d28u8, d1u8); + q7u16 = vmlal_u8(q7u16, d29u8, d1u8); + q8u16 = vmlal_u8(q8u16, d30u8, d1u8); + + d2u8 = vqrshrn_n_u16(q1u16, 7); + d3u8 = vqrshrn_n_u16(q2u16, 7); + d4u8 = vqrshrn_n_u16(q3u16, 7); + d5u8 = vqrshrn_n_u16(q4u16, 7); + d6u8 = vqrshrn_n_u16(q5u16, 7); + d7u8 = vqrshrn_n_u16(q6u16, 7); + d8u8 = vqrshrn_n_u16(q7u16, 7); + d9u8 = vqrshrn_n_u16(q8u16, 7); + + vst1_u8((uint8_t *)dst_ptr, d2u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d3u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d4u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d5u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d6u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d7u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d8u8); + dst_ptr += dst_pitch; + vst1_u8((uint8_t *)dst_ptr, d9u8); + } + return; +} + +void vp8_bilinear_predict16x16_neon(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + int i; + unsigned char tmp[272]; + unsigned char *tmpp; + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; + uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8; + uint8x8_t d19u8, d20u8, d21u8; + uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8; + uint8x16_t q11u8, q12u8, q13u8, q14u8, q15u8; + uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16; + uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16; + + if (xoffset == 0) { // secondpass_bfilter16x16_only + d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); + + q11u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; for (i = 4; i > 0; i--) { - q12u8 = vld1q_u8(tmpp); tmpp += 16; - q13u8 = vld1q_u8(tmpp); tmpp += 16; - q14u8 = vld1q_u8(tmpp); tmpp += 16; - q15u8 = vld1q_u8(tmpp); tmpp += 16; + q12u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q13u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q14u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; + q15u8 = vld1q_u8(src_ptr); + src_ptr += src_pixels_per_line; - q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8); - q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8); - q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8); - q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8); - q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8); - q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8); - q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8); - q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8); + q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8); + q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8); + q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8); + q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8); + q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8); + q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8); + q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8); + q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8); - q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8); - q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8); - q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8); - q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8); - q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8); - q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8); - q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8); - q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8); + q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8); + q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8); + q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8); + q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8); + q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8); + q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8); + q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8); + q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8); - d2u8 = vqrshrn_n_u16(q1u16, 7); - d3u8 = vqrshrn_n_u16(q2u16, 7); - d4u8 = vqrshrn_n_u16(q3u16, 7); - d5u8 = vqrshrn_n_u16(q4u16, 7); - d6u8 = vqrshrn_n_u16(q5u16, 7); - d7u8 = vqrshrn_n_u16(q6u16, 7); - d8u8 = vqrshrn_n_u16(q7u16, 7); - d9u8 = vqrshrn_n_u16(q8u16, 7); + d2u8 = vqrshrn_n_u16(q1u16, 7); + d3u8 = vqrshrn_n_u16(q2u16, 7); + d4u8 = vqrshrn_n_u16(q3u16, 7); + d5u8 = vqrshrn_n_u16(q4u16, 7); + d6u8 = vqrshrn_n_u16(q5u16, 7); + d7u8 = vqrshrn_n_u16(q6u16, 7); + d8u8 = vqrshrn_n_u16(q7u16, 7); + d9u8 = vqrshrn_n_u16(q8u16, 7); - q1u8 = vcombine_u8(d2u8, d3u8); - q2u8 = vcombine_u8(d4u8, d5u8); - q3u8 = vcombine_u8(d6u8, d7u8); - q4u8 = vcombine_u8(d8u8, d9u8); + q1u8 = vcombine_u8(d2u8, d3u8); + q2u8 = vcombine_u8(d4u8, d5u8); + q3u8 = vcombine_u8(d6u8, d7u8); + q4u8 = vcombine_u8(d8u8, d9u8); - q11u8 = q15u8; + q11u8 = q15u8; - vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch; - vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q1u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q2u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q3u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q4u8); + dst_ptr += dst_pitch; } return; + } + + if (yoffset == 0) { // firstpass_bfilter16x16_only + d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); + + for (i = 4; i > 0; i--) { + d2u8 = vld1_u8(src_ptr); + d3u8 = vld1_u8(src_ptr + 8); + d4u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d5u8 = vld1_u8(src_ptr); + d6u8 = vld1_u8(src_ptr + 8); + d7u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d8u8 = vld1_u8(src_ptr); + d9u8 = vld1_u8(src_ptr + 8); + d10u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d11u8 = vld1_u8(src_ptr); + d12u8 = vld1_u8(src_ptr + 8); + d13u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + + q7u16 = vmull_u8(d2u8, d0u8); + q8u16 = vmull_u8(d3u8, d0u8); + q9u16 = vmull_u8(d5u8, d0u8); + q10u16 = vmull_u8(d6u8, d0u8); + q11u16 = vmull_u8(d8u8, d0u8); + q12u16 = vmull_u8(d9u8, d0u8); + q13u16 = vmull_u8(d11u8, d0u8); + q14u16 = vmull_u8(d12u8, d0u8); + + d2u8 = vext_u8(d2u8, d3u8, 1); + d5u8 = vext_u8(d5u8, d6u8, 1); + d8u8 = vext_u8(d8u8, d9u8, 1); + d11u8 = vext_u8(d11u8, d12u8, 1); + + q7u16 = vmlal_u8(q7u16, d2u8, d1u8); + q9u16 = vmlal_u8(q9u16, d5u8, d1u8); + q11u16 = vmlal_u8(q11u16, d8u8, d1u8); + q13u16 = vmlal_u8(q13u16, d11u8, d1u8); + + d3u8 = vext_u8(d3u8, d4u8, 1); + d6u8 = vext_u8(d6u8, d7u8, 1); + d9u8 = vext_u8(d9u8, d10u8, 1); + d12u8 = vext_u8(d12u8, d13u8, 1); + + q8u16 = vmlal_u8(q8u16, d3u8, d1u8); + q10u16 = vmlal_u8(q10u16, d6u8, d1u8); + q12u16 = vmlal_u8(q12u16, d9u8, d1u8); + q14u16 = vmlal_u8(q14u16, d12u8, d1u8); + + d14u8 = vqrshrn_n_u16(q7u16, 7); + d15u8 = vqrshrn_n_u16(q8u16, 7); + d16u8 = vqrshrn_n_u16(q9u16, 7); + d17u8 = vqrshrn_n_u16(q10u16, 7); + d18u8 = vqrshrn_n_u16(q11u16, 7); + d19u8 = vqrshrn_n_u16(q12u16, 7); + d20u8 = vqrshrn_n_u16(q13u16, 7); + d21u8 = vqrshrn_n_u16(q14u16, 7); + + q7u8 = vcombine_u8(d14u8, d15u8); + q8u8 = vcombine_u8(d16u8, d17u8); + q9u8 = vcombine_u8(d18u8, d19u8); + q10u8 = vcombine_u8(d20u8, d21u8); + + vst1q_u8((uint8_t *)dst_ptr, q7u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q8u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q9u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q10u8); + dst_ptr += dst_pitch; + } + return; + } + + d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]); + + d2u8 = vld1_u8(src_ptr); + d3u8 = vld1_u8(src_ptr + 8); + d4u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d5u8 = vld1_u8(src_ptr); + d6u8 = vld1_u8(src_ptr + 8); + d7u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d8u8 = vld1_u8(src_ptr); + d9u8 = vld1_u8(src_ptr + 8); + d10u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d11u8 = vld1_u8(src_ptr); + d12u8 = vld1_u8(src_ptr + 8); + d13u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + + // First Pass: output_height lines x output_width columns (17x16) + tmpp = tmp; + for (i = 3; i > 0; i--) { + q7u16 = vmull_u8(d2u8, d0u8); + q8u16 = vmull_u8(d3u8, d0u8); + q9u16 = vmull_u8(d5u8, d0u8); + q10u16 = vmull_u8(d6u8, d0u8); + q11u16 = vmull_u8(d8u8, d0u8); + q12u16 = vmull_u8(d9u8, d0u8); + q13u16 = vmull_u8(d11u8, d0u8); + q14u16 = vmull_u8(d12u8, d0u8); + + d2u8 = vext_u8(d2u8, d3u8, 1); + d5u8 = vext_u8(d5u8, d6u8, 1); + d8u8 = vext_u8(d8u8, d9u8, 1); + d11u8 = vext_u8(d11u8, d12u8, 1); + + q7u16 = vmlal_u8(q7u16, d2u8, d1u8); + q9u16 = vmlal_u8(q9u16, d5u8, d1u8); + q11u16 = vmlal_u8(q11u16, d8u8, d1u8); + q13u16 = vmlal_u8(q13u16, d11u8, d1u8); + + d3u8 = vext_u8(d3u8, d4u8, 1); + d6u8 = vext_u8(d6u8, d7u8, 1); + d9u8 = vext_u8(d9u8, d10u8, 1); + d12u8 = vext_u8(d12u8, d13u8, 1); + + q8u16 = vmlal_u8(q8u16, d3u8, d1u8); + q10u16 = vmlal_u8(q10u16, d6u8, d1u8); + q12u16 = vmlal_u8(q12u16, d9u8, d1u8); + q14u16 = vmlal_u8(q14u16, d12u8, d1u8); + + d14u8 = vqrshrn_n_u16(q7u16, 7); + d15u8 = vqrshrn_n_u16(q8u16, 7); + d16u8 = vqrshrn_n_u16(q9u16, 7); + d17u8 = vqrshrn_n_u16(q10u16, 7); + d18u8 = vqrshrn_n_u16(q11u16, 7); + d19u8 = vqrshrn_n_u16(q12u16, 7); + d20u8 = vqrshrn_n_u16(q13u16, 7); + d21u8 = vqrshrn_n_u16(q14u16, 7); + + d2u8 = vld1_u8(src_ptr); + d3u8 = vld1_u8(src_ptr + 8); + d4u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d5u8 = vld1_u8(src_ptr); + d6u8 = vld1_u8(src_ptr + 8); + d7u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d8u8 = vld1_u8(src_ptr); + d9u8 = vld1_u8(src_ptr + 8); + d10u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + d11u8 = vld1_u8(src_ptr); + d12u8 = vld1_u8(src_ptr + 8); + d13u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + + q7u8 = vcombine_u8(d14u8, d15u8); + q8u8 = vcombine_u8(d16u8, d17u8); + q9u8 = vcombine_u8(d18u8, d19u8); + q10u8 = vcombine_u8(d20u8, d21u8); + + vst1q_u8((uint8_t *)tmpp, q7u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q8u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q9u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q10u8); + tmpp += 16; + } + + // First-pass filtering for rest 5 lines + d14u8 = vld1_u8(src_ptr); + d15u8 = vld1_u8(src_ptr + 8); + d16u8 = vld1_u8(src_ptr + 16); + src_ptr += src_pixels_per_line; + + q9u16 = vmull_u8(d2u8, d0u8); + q10u16 = vmull_u8(d3u8, d0u8); + q11u16 = vmull_u8(d5u8, d0u8); + q12u16 = vmull_u8(d6u8, d0u8); + q13u16 = vmull_u8(d8u8, d0u8); + q14u16 = vmull_u8(d9u8, d0u8); + + d2u8 = vext_u8(d2u8, d3u8, 1); + d5u8 = vext_u8(d5u8, d6u8, 1); + d8u8 = vext_u8(d8u8, d9u8, 1); + + q9u16 = vmlal_u8(q9u16, d2u8, d1u8); + q11u16 = vmlal_u8(q11u16, d5u8, d1u8); + q13u16 = vmlal_u8(q13u16, d8u8, d1u8); + + d3u8 = vext_u8(d3u8, d4u8, 1); + d6u8 = vext_u8(d6u8, d7u8, 1); + d9u8 = vext_u8(d9u8, d10u8, 1); + + q10u16 = vmlal_u8(q10u16, d3u8, d1u8); + q12u16 = vmlal_u8(q12u16, d6u8, d1u8); + q14u16 = vmlal_u8(q14u16, d9u8, d1u8); + + q1u16 = vmull_u8(d11u8, d0u8); + q2u16 = vmull_u8(d12u8, d0u8); + q3u16 = vmull_u8(d14u8, d0u8); + q4u16 = vmull_u8(d15u8, d0u8); + + d11u8 = vext_u8(d11u8, d12u8, 1); + d14u8 = vext_u8(d14u8, d15u8, 1); + + q1u16 = vmlal_u8(q1u16, d11u8, d1u8); + q3u16 = vmlal_u8(q3u16, d14u8, d1u8); + + d12u8 = vext_u8(d12u8, d13u8, 1); + d15u8 = vext_u8(d15u8, d16u8, 1); + + q2u16 = vmlal_u8(q2u16, d12u8, d1u8); + q4u16 = vmlal_u8(q4u16, d15u8, d1u8); + + d10u8 = vqrshrn_n_u16(q9u16, 7); + d11u8 = vqrshrn_n_u16(q10u16, 7); + d12u8 = vqrshrn_n_u16(q11u16, 7); + d13u8 = vqrshrn_n_u16(q12u16, 7); + d14u8 = vqrshrn_n_u16(q13u16, 7); + d15u8 = vqrshrn_n_u16(q14u16, 7); + d16u8 = vqrshrn_n_u16(q1u16, 7); + d17u8 = vqrshrn_n_u16(q2u16, 7); + d18u8 = vqrshrn_n_u16(q3u16, 7); + d19u8 = vqrshrn_n_u16(q4u16, 7); + + q5u8 = vcombine_u8(d10u8, d11u8); + q6u8 = vcombine_u8(d12u8, d13u8); + q7u8 = vcombine_u8(d14u8, d15u8); + q8u8 = vcombine_u8(d16u8, d17u8); + q9u8 = vcombine_u8(d18u8, d19u8); + + vst1q_u8((uint8_t *)tmpp, q5u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q6u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q7u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q8u8); + tmpp += 16; + vst1q_u8((uint8_t *)tmpp, q9u8); + + // secondpass_filter + d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]); + d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]); + + tmpp = tmp; + q11u8 = vld1q_u8(tmpp); + tmpp += 16; + for (i = 4; i > 0; i--) { + q12u8 = vld1q_u8(tmpp); + tmpp += 16; + q13u8 = vld1q_u8(tmpp); + tmpp += 16; + q14u8 = vld1q_u8(tmpp); + tmpp += 16; + q15u8 = vld1q_u8(tmpp); + tmpp += 16; + + q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8); + q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8); + q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8); + q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8); + q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8); + q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8); + q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8); + q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8); + + q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8); + q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8); + q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8); + q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8); + q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8); + q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8); + q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8); + q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8); + + d2u8 = vqrshrn_n_u16(q1u16, 7); + d3u8 = vqrshrn_n_u16(q2u16, 7); + d4u8 = vqrshrn_n_u16(q3u16, 7); + d5u8 = vqrshrn_n_u16(q4u16, 7); + d6u8 = vqrshrn_n_u16(q5u16, 7); + d7u8 = vqrshrn_n_u16(q6u16, 7); + d8u8 = vqrshrn_n_u16(q7u16, 7); + d9u8 = vqrshrn_n_u16(q8u16, 7); + + q1u8 = vcombine_u8(d2u8, d3u8); + q2u8 = vcombine_u8(d4u8, d5u8); + q3u8 = vcombine_u8(d6u8, d7u8); + q4u8 = vcombine_u8(d8u8, d9u8); + + q11u8 = q15u8; + + vst1q_u8((uint8_t *)dst_ptr, q1u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q2u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q3u8); + dst_ptr += dst_pitch; + vst1q_u8((uint8_t *)dst_ptr, q4u8); + dst_ptr += dst_pitch; + } + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/copymem_neon.c b/libs/libvpx/vp8/common/arm/neon/copymem_neon.c index deced115c1..c1d293b58d 100644 --- a/libs/libvpx/vp8/common/arm/neon/copymem_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/copymem_neon.c @@ -10,50 +10,41 @@ #include -void vp8_copy_mem8x4_neon( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) { - uint8x8_t vtmp; - int r; +void vp8_copy_mem8x4_neon(unsigned char *src, int src_stride, + unsigned char *dst, int dst_stride) { + uint8x8_t vtmp; + int r; - for (r = 0; r < 4; r++) { - vtmp = vld1_u8(src); - vst1_u8(dst, vtmp); - src += src_stride; - dst += dst_stride; - } + for (r = 0; r < 4; ++r) { + vtmp = vld1_u8(src); + vst1_u8(dst, vtmp); + src += src_stride; + dst += dst_stride; + } } -void vp8_copy_mem8x8_neon( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) { - uint8x8_t vtmp; - int r; +void vp8_copy_mem8x8_neon(unsigned char *src, int src_stride, + unsigned char *dst, int dst_stride) { + uint8x8_t vtmp; + int r; - for (r = 0; r < 8; r++) { - vtmp = vld1_u8(src); - vst1_u8(dst, vtmp); - src += src_stride; - dst += dst_stride; - } + for (r = 0; r < 8; ++r) { + vtmp = vld1_u8(src); + vst1_u8(dst, vtmp); + src += src_stride; + dst += dst_stride; + } } -void vp8_copy_mem16x16_neon( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) { - int r; - uint8x16_t qtmp; +void vp8_copy_mem16x16_neon(unsigned char *src, int src_stride, + unsigned char *dst, int dst_stride) { + int r; + uint8x16_t qtmp; - for (r = 0; r < 16; r++) { - qtmp = vld1q_u8(src); - vst1q_u8(dst, qtmp); - src += src_stride; - dst += dst_stride; - } + for (r = 0; r < 16; ++r) { + qtmp = vld1q_u8(src); + vst1q_u8(dst, qtmp); + src += src_stride; + dst += dst_stride; + } } diff --git a/libs/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c b/libs/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c index ad5f41d7de..d3a53db21f 100644 --- a/libs/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c @@ -10,33 +10,30 @@ #include -void vp8_dc_only_idct_add_neon( - int16_t input_dc, - unsigned char *pred_ptr, - int pred_stride, - unsigned char *dst_ptr, - int dst_stride) { - int i; - uint16_t a1 = ((input_dc + 4) >> 3); - uint32x2_t d2u32 = vdup_n_u32(0); - uint8x8_t d2u8; - uint16x8_t q1u16; - uint16x8_t qAdd; +void vp8_dc_only_idct_add_neon(int16_t input_dc, unsigned char *pred_ptr, + int pred_stride, unsigned char *dst_ptr, + int dst_stride) { + int i; + uint16_t a1 = ((input_dc + 4) >> 3); + uint32x2_t d2u32 = vdup_n_u32(0); + uint8x8_t d2u8; + uint16x8_t q1u16; + uint16x8_t qAdd; - qAdd = vdupq_n_u16(a1); + qAdd = vdupq_n_u16(a1); - for (i = 0; i < 2; i++) { - d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0); - pred_ptr += pred_stride; - d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1); - pred_ptr += pred_stride; + for (i = 0; i < 2; ++i) { + d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0); + pred_ptr += pred_stride; + d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1); + pred_ptr += pred_stride; - q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); + q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0); - dst_ptr += dst_stride; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1); - dst_ptr += dst_stride; - } + vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0); + dst_ptr += dst_stride; + vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1); + dst_ptr += dst_stride; + } } diff --git a/libs/libvpx/vp8/common/arm/neon/dequant_idct_neon.c b/libs/libvpx/vp8/common/arm/neon/dequant_idct_neon.c index 58e11922c7..ff5981eaac 100644 --- a/libs/libvpx/vp8/common/arm/neon/dequant_idct_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/dequant_idct_neon.c @@ -11,132 +11,129 @@ #include static const int16_t cospi8sqrt2minus1 = 20091; -static const int16_t sinpi8sqrt2 = 35468; +static const int16_t sinpi8sqrt2 = 35468; -void vp8_dequant_idct_add_neon( - int16_t *input, - int16_t *dq, - unsigned char *dst, - int stride) { - unsigned char *dst0; - int32x2_t d14, d15; - int16x4_t d2, d3, d4, d5, d10, d11, d12, d13; - int16x8_t q1, q2, q3, q4, q5, q6; - int16x8_t qEmpty = vdupq_n_s16(0); - int32x2x2_t d2tmp0, d2tmp1; - int16x4x2_t d2tmp2, d2tmp3; +void vp8_dequant_idct_add_neon(int16_t *input, int16_t *dq, unsigned char *dst, + int stride) { + unsigned char *dst0; + int32x2_t d14, d15; + int16x4_t d2, d3, d4, d5, d10, d11, d12, d13; + int16x8_t q1, q2, q3, q4, q5, q6; + int16x8_t qEmpty = vdupq_n_s16(0); + int32x2x2_t d2tmp0, d2tmp1; + int16x4x2_t d2tmp2, d2tmp3; - d14 = d15 = vdup_n_s32(0); + d14 = d15 = vdup_n_s32(0); - // load input - q3 = vld1q_s16(input); - vst1q_s16(input, qEmpty); - input += 8; - q4 = vld1q_s16(input); - vst1q_s16(input, qEmpty); + // load input + q3 = vld1q_s16(input); + vst1q_s16(input, qEmpty); + input += 8; + q4 = vld1q_s16(input); + vst1q_s16(input, qEmpty); - // load dq - q5 = vld1q_s16(dq); - dq += 8; - q6 = vld1q_s16(dq); + // load dq + q5 = vld1q_s16(dq); + dq += 8; + q6 = vld1q_s16(dq); - // load src from dst - dst0 = dst; - d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0); - dst0 += stride; - d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1); - dst0 += stride; - d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0); - dst0 += stride; - d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1); + // load src from dst + dst0 = dst; + d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0); + dst0 += stride; + d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1); + dst0 += stride; + d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0); + dst0 += stride; + d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1); - q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3), - vreinterpretq_u16_s16(q5))); - q2 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q4), - vreinterpretq_u16_s16(q6))); + q1 = vreinterpretq_s16_u16( + vmulq_u16(vreinterpretq_u16_s16(q3), vreinterpretq_u16_s16(q5))); + q2 = vreinterpretq_s16_u16( + vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6))); - d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2)); - d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2)); + d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2)); + d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2)); - q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2)); + q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2)); - q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); - q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); + q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); + q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); - q3 = vshrq_n_s16(q3, 1); - q4 = vshrq_n_s16(q4, 1); + q3 = vshrq_n_s16(q3, 1); + q4 = vshrq_n_s16(q4, 1); - q3 = vqaddq_s16(q3, q2); - q4 = vqaddq_s16(q4, q2); + q3 = vqaddq_s16(q3, q2); + q4 = vqaddq_s16(q4, q2); - d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); - d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); + d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); + d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); - d2 = vqadd_s16(d12, d11); - d3 = vqadd_s16(d13, d10); - d4 = vqsub_s16(d13, d10); - d5 = vqsub_s16(d12, d11); + d2 = vqadd_s16(d12, d11); + d3 = vqadd_s16(d13, d10); + d4 = vqsub_s16(d13, d10); + d5 = vqsub_s16(d12, d11); - d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); - d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); - d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]), - vreinterpret_s16_s32(d2tmp1.val[0])); - d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]), - vreinterpret_s16_s32(d2tmp1.val[1])); + d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); + d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); + d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]), + vreinterpret_s16_s32(d2tmp1.val[0])); + d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]), + vreinterpret_s16_s32(d2tmp1.val[1])); - // loop 2 - q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]); + // loop 2 + q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]); - q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); - q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); + q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); + q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); - d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]); - d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]); + d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]); + d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]); - q3 = vshrq_n_s16(q3, 1); - q4 = vshrq_n_s16(q4, 1); + q3 = vshrq_n_s16(q3, 1); + q4 = vshrq_n_s16(q4, 1); - q3 = vqaddq_s16(q3, q2); - q4 = vqaddq_s16(q4, q2); + q3 = vqaddq_s16(q3, q2); + q4 = vqaddq_s16(q4, q2); - d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); - d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); + d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); + d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); - d2 = vqadd_s16(d12, d11); - d3 = vqadd_s16(d13, d10); - d4 = vqsub_s16(d13, d10); - d5 = vqsub_s16(d12, d11); + d2 = vqadd_s16(d12, d11); + d3 = vqadd_s16(d13, d10); + d4 = vqsub_s16(d13, d10); + d5 = vqsub_s16(d12, d11); - d2 = vrshr_n_s16(d2, 3); - d3 = vrshr_n_s16(d3, 3); - d4 = vrshr_n_s16(d4, 3); - d5 = vrshr_n_s16(d5, 3); + d2 = vrshr_n_s16(d2, 3); + d3 = vrshr_n_s16(d3, 3); + d4 = vrshr_n_s16(d4, 3); + d5 = vrshr_n_s16(d5, 3); - d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); - d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); - d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]), - vreinterpret_s16_s32(d2tmp1.val[0])); - d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]), - vreinterpret_s16_s32(d2tmp1.val[1])); + d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); + d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); + d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]), + vreinterpret_s16_s32(d2tmp1.val[0])); + d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]), + vreinterpret_s16_s32(d2tmp1.val[1])); - q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]); - q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]); + q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]); + q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]); - q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1), - vreinterpret_u8_s32(d14))); - q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2), - vreinterpret_u8_s32(d15))); + q1 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s32(d14))); + q2 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s32(d15))); - d14 = vreinterpret_s32_u8(vqmovun_s16(q1)); - d15 = vreinterpret_s32_u8(vqmovun_s16(q2)); + d14 = vreinterpret_s32_u8(vqmovun_s16(q1)); + d15 = vreinterpret_s32_u8(vqmovun_s16(q2)); - dst0 = dst; - vst1_lane_s32((int32_t *)dst0, d14, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d14, 1); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d15, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d15, 1); - return; + dst0 = dst; + vst1_lane_s32((int32_t *)dst0, d14, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d14, 1); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d15, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d15, 1); + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/dequantizeb_neon.c b/libs/libvpx/vp8/common/arm/neon/dequantizeb_neon.c index 54e709dd3c..6edff3c69f 100644 --- a/libs/libvpx/vp8/common/arm/neon/dequantizeb_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/dequantizeb_neon.c @@ -13,13 +13,13 @@ #include "vp8/common/blockd.h" void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) { - int16x8x2_t qQ, qDQC, qDQ; + int16x8x2_t qQ, qDQC, qDQ; - qQ = vld2q_s16(d->qcoeff); - qDQC = vld2q_s16(DQC); + qQ = vld2q_s16(d->qcoeff); + qDQC = vld2q_s16(DQC); - qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]); - qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]); + qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]); + qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]); - vst2q_s16(d->dqcoeff, qDQ); + vst2q_s16(d->dqcoeff, qDQ); } diff --git a/libs/libvpx/vp8/common/arm/neon/idct_blk_neon.c b/libs/libvpx/vp8/common/arm/neon/idct_blk_neon.c index fb327a7260..d61dde86cf 100644 --- a/libs/libvpx/vp8/common/arm/neon/idct_blk_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/idct_blk_neon.c @@ -14,83 +14,71 @@ /* place these declarations here because we don't want to maintain them * outside of this scope */ -void idct_dequant_full_2x_neon(short *q, short *dq, - unsigned char *dst, int stride); -void idct_dequant_0_2x_neon(short *q, short dq, - unsigned char *dst, int stride); +void idct_dequant_full_2x_neon(short *q, short *dq, unsigned char *dst, + int stride); +void idct_dequant_0_2x_neon(short *q, short dq, unsigned char *dst, int stride); +void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst, + int stride, char *eobs) { + int i; -void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, - unsigned char *dst, - int stride, char *eobs) -{ - int i; - - for (i = 0; i < 4; i++) - { - if (((short *)(eobs))[0]) - { - if (((short *)eobs)[0] & 0xfefe) - idct_dequant_full_2x_neon (q, dq, dst, stride); - else - idct_dequant_0_2x_neon (q, dq[0], dst, stride); - } - - if (((short *)(eobs))[1]) - { - if (((short *)eobs)[1] & 0xfefe) - idct_dequant_full_2x_neon (q+32, dq, dst+8, stride); - else - idct_dequant_0_2x_neon (q+32, dq[0], dst+8, stride); - } - q += 64; - dst += 4*stride; - eobs += 4; + for (i = 0; i < 4; ++i) { + if (((short *)(eobs))[0]) { + if (((short *)eobs)[0] & 0xfefe) + idct_dequant_full_2x_neon(q, dq, dst, stride); + else + idct_dequant_0_2x_neon(q, dq[0], dst, stride); } + + if (((short *)(eobs))[1]) { + if (((short *)eobs)[1] & 0xfefe) + idct_dequant_full_2x_neon(q + 32, dq, dst + 8, stride); + else + idct_dequant_0_2x_neon(q + 32, dq[0], dst + 8, stride); + } + q += 64; + dst += 4 * stride; + eobs += 4; + } } void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq, unsigned char *dstu, - unsigned char *dstv, - int stride, char *eobs) -{ - if (((short *)(eobs))[0]) - { - if (((short *)eobs)[0] & 0xfefe) - idct_dequant_full_2x_neon (q, dq, dstu, stride); - else - idct_dequant_0_2x_neon (q, dq[0], dstu, stride); - } + unsigned char *dstv, int stride, + char *eobs) { + if (((short *)(eobs))[0]) { + if (((short *)eobs)[0] & 0xfefe) + idct_dequant_full_2x_neon(q, dq, dstu, stride); + else + idct_dequant_0_2x_neon(q, dq[0], dstu, stride); + } - q += 32; - dstu += 4*stride; + q += 32; + dstu += 4 * stride; - if (((short *)(eobs))[1]) - { - if (((short *)eobs)[1] & 0xfefe) - idct_dequant_full_2x_neon (q, dq, dstu, stride); - else - idct_dequant_0_2x_neon (q, dq[0], dstu, stride); - } + if (((short *)(eobs))[1]) { + if (((short *)eobs)[1] & 0xfefe) + idct_dequant_full_2x_neon(q, dq, dstu, stride); + else + idct_dequant_0_2x_neon(q, dq[0], dstu, stride); + } - q += 32; + q += 32; - if (((short *)(eobs))[2]) - { - if (((short *)eobs)[2] & 0xfefe) - idct_dequant_full_2x_neon (q, dq, dstv, stride); - else - idct_dequant_0_2x_neon (q, dq[0], dstv, stride); - } + if (((short *)(eobs))[2]) { + if (((short *)eobs)[2] & 0xfefe) + idct_dequant_full_2x_neon(q, dq, dstv, stride); + else + idct_dequant_0_2x_neon(q, dq[0], dstv, stride); + } - q += 32; - dstv += 4*stride; + q += 32; + dstv += 4 * stride; - if (((short *)(eobs))[3]) - { - if (((short *)eobs)[3] & 0xfefe) - idct_dequant_full_2x_neon (q, dq, dstv, stride); - else - idct_dequant_0_2x_neon (q, dq[0], dstv, stride); - } + if (((short *)(eobs))[3]) { + if (((short *)eobs)[3] & 0xfefe) + idct_dequant_full_2x_neon(q, dq, dstv, stride); + else + idct_dequant_0_2x_neon(q, dq[0], dstv, stride); + } } diff --git a/libs/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c b/libs/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c index e6f862fa89..c83102a5cc 100644 --- a/libs/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c @@ -10,54 +10,50 @@ #include -void idct_dequant_0_2x_neon( - int16_t *q, - int16_t dq, - unsigned char *dst, - int stride) { - unsigned char *dst0; - int i, a0, a1; - int16x8x2_t q2Add; - int32x2_t d2s32 = vdup_n_s32(0), - d4s32 = vdup_n_s32(0); - uint8x8_t d2u8, d4u8; - uint16x8_t q1u16, q2u16; +void idct_dequant_0_2x_neon(int16_t *q, int16_t dq, unsigned char *dst, + int stride) { + unsigned char *dst0; + int i, a0, a1; + int16x8x2_t q2Add; + int32x2_t d2s32 = vdup_n_s32(0), d4s32 = vdup_n_s32(0); + uint8x8_t d2u8, d4u8; + uint16x8_t q1u16, q2u16; - a0 = ((q[0] * dq) + 4) >> 3; - a1 = ((q[16] * dq) + 4) >> 3; - q[0] = q[16] = 0; - q2Add.val[0] = vdupq_n_s16((int16_t)a0); - q2Add.val[1] = vdupq_n_s16((int16_t)a1); + a0 = ((q[0] * dq) + 4) >> 3; + a1 = ((q[16] * dq) + 4) >> 3; + q[0] = q[16] = 0; + q2Add.val[0] = vdupq_n_s16((int16_t)a0); + q2Add.val[1] = vdupq_n_s16((int16_t)a1); - for (i = 0; i < 2; i++, dst += 4) { - dst0 = dst; - d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0); - dst0 += stride; - d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1); - dst0 += stride; - d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0); - dst0 += stride; - d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1); + for (i = 0; i < 2; i++, dst += 4) { + dst0 = dst; + d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0); + dst0 += stride; + d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1); + dst0 += stride; + d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0); + dst0 += stride; + d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1); - q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), - vreinterpret_u8_s32(d2s32)); - q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), - vreinterpret_u8_s32(d4s32)); + q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), + vreinterpret_u8_s32(d2s32)); + q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]), + vreinterpret_u8_s32(d4s32)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); - d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); + d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16)); - d2s32 = vreinterpret_s32_u8(d2u8); - d4s32 = vreinterpret_s32_u8(d4u8); + d2s32 = vreinterpret_s32_u8(d2u8); + d4s32 = vreinterpret_s32_u8(d4u8); - dst0 = dst; - vst1_lane_s32((int32_t *)dst0, d2s32, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d2s32, 1); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d4s32, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst0, d4s32, 1); - } - return; + dst0 = dst; + vst1_lane_s32((int32_t *)dst0, d2s32, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d2s32, 1); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d4s32, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst0, d4s32, 1); + } + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c b/libs/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c index a60ed46b76..f30671cc3f 100644 --- a/libs/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c @@ -11,175 +11,172 @@ #include static const int16_t cospi8sqrt2minus1 = 20091; -static const int16_t sinpi8sqrt2 = 17734; +static const int16_t sinpi8sqrt2 = 17734; // because the lowest bit in 0x8a8c is 0, we can pre-shift this -void idct_dequant_full_2x_neon( - int16_t *q, - int16_t *dq, - unsigned char *dst, - int stride) { - unsigned char *dst0, *dst1; - int32x2_t d28, d29, d30, d31; - int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; - int16x8_t qEmpty = vdupq_n_s16(0); - int32x4x2_t q2tmp0, q2tmp1; - int16x8x2_t q2tmp2, q2tmp3; - int16x4_t dLow0, dLow1, dHigh0, dHigh1; +void idct_dequant_full_2x_neon(int16_t *q, int16_t *dq, unsigned char *dst, + int stride) { + unsigned char *dst0, *dst1; + int32x2_t d28, d29, d30, d31; + int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; + int16x8_t qEmpty = vdupq_n_s16(0); + int32x4x2_t q2tmp0, q2tmp1; + int16x8x2_t q2tmp2, q2tmp3; + int16x4_t dLow0, dLow1, dHigh0, dHigh1; - d28 = d29 = d30 = d31 = vdup_n_s32(0); + d28 = d29 = d30 = d31 = vdup_n_s32(0); - // load dq - q0 = vld1q_s16(dq); - dq += 8; - q1 = vld1q_s16(dq); + // load dq + q0 = vld1q_s16(dq); + dq += 8; + q1 = vld1q_s16(dq); - // load q - q2 = vld1q_s16(q); - vst1q_s16(q, qEmpty); - q += 8; - q3 = vld1q_s16(q); - vst1q_s16(q, qEmpty); - q += 8; - q4 = vld1q_s16(q); - vst1q_s16(q, qEmpty); - q += 8; - q5 = vld1q_s16(q); - vst1q_s16(q, qEmpty); + // load q + q2 = vld1q_s16(q); + vst1q_s16(q, qEmpty); + q += 8; + q3 = vld1q_s16(q); + vst1q_s16(q, qEmpty); + q += 8; + q4 = vld1q_s16(q); + vst1q_s16(q, qEmpty); + q += 8; + q5 = vld1q_s16(q); + vst1q_s16(q, qEmpty); - // load src from dst - dst0 = dst; - dst1 = dst + 4; - d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0); - dst0 += stride; - d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1); - dst1 += stride; - d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0); - dst0 += stride; - d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1); - dst1 += stride; + // load src from dst + dst0 = dst; + dst1 = dst + 4; + d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0); + dst0 += stride; + d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1); + dst1 += stride; + d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0); + dst0 += stride; + d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1); + dst1 += stride; - d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0); - dst0 += stride; - d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1); - dst1 += stride; - d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0); - d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1); + d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0); + dst0 += stride; + d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1); + dst1 += stride; + d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0); + d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1); - q2 = vmulq_s16(q2, q0); - q3 = vmulq_s16(q3, q1); - q4 = vmulq_s16(q4, q0); - q5 = vmulq_s16(q5, q1); + q2 = vmulq_s16(q2, q0); + q3 = vmulq_s16(q3, q1); + q4 = vmulq_s16(q4, q0); + q5 = vmulq_s16(q5, q1); - // vswp - dLow0 = vget_low_s16(q2); - dHigh0 = vget_high_s16(q2); - dLow1 = vget_low_s16(q4); - dHigh1 = vget_high_s16(q4); - q2 = vcombine_s16(dLow0, dLow1); - q4 = vcombine_s16(dHigh0, dHigh1); + // vswp + dLow0 = vget_low_s16(q2); + dHigh0 = vget_high_s16(q2); + dLow1 = vget_low_s16(q4); + dHigh1 = vget_high_s16(q4); + q2 = vcombine_s16(dLow0, dLow1); + q4 = vcombine_s16(dHigh0, dHigh1); - dLow0 = vget_low_s16(q3); - dHigh0 = vget_high_s16(q3); - dLow1 = vget_low_s16(q5); - dHigh1 = vget_high_s16(q5); - q3 = vcombine_s16(dLow0, dLow1); - q5 = vcombine_s16(dHigh0, dHigh1); + dLow0 = vget_low_s16(q3); + dHigh0 = vget_high_s16(q3); + dLow1 = vget_low_s16(q5); + dHigh1 = vget_high_s16(q5); + q3 = vcombine_s16(dLow0, dLow1); + q5 = vcombine_s16(dHigh0, dHigh1); - q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2); - q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); - q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1); - q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); + q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2); + q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2); + q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1); + q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1); - q10 = vqaddq_s16(q2, q3); - q11 = vqsubq_s16(q2, q3); + q10 = vqaddq_s16(q2, q3); + q11 = vqsubq_s16(q2, q3); - q8 = vshrq_n_s16(q8, 1); - q9 = vshrq_n_s16(q9, 1); + q8 = vshrq_n_s16(q8, 1); + q9 = vshrq_n_s16(q9, 1); - q4 = vqaddq_s16(q4, q8); - q5 = vqaddq_s16(q5, q9); + q4 = vqaddq_s16(q4, q8); + q5 = vqaddq_s16(q5, q9); - q2 = vqsubq_s16(q6, q5); - q3 = vqaddq_s16(q7, q4); + q2 = vqsubq_s16(q6, q5); + q3 = vqaddq_s16(q7, q4); - q4 = vqaddq_s16(q10, q3); - q5 = vqaddq_s16(q11, q2); - q6 = vqsubq_s16(q11, q2); - q7 = vqsubq_s16(q10, q3); + q4 = vqaddq_s16(q10, q3); + q5 = vqaddq_s16(q11, q2); + q6 = vqsubq_s16(q11, q2); + q7 = vqsubq_s16(q10, q3); - q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); - q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); - q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), - vreinterpretq_s16_s32(q2tmp1.val[0])); - q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), - vreinterpretq_s16_s32(q2tmp1.val[1])); + q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); + q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); + q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), + vreinterpretq_s16_s32(q2tmp1.val[0])); + q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), + vreinterpretq_s16_s32(q2tmp1.val[1])); - // loop 2 - q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2); - q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2); - q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1); - q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1); + // loop 2 + q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2); + q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2); + q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1); + q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1); - q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]); - q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]); + q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]); + q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]); - q10 = vshrq_n_s16(q10, 1); - q11 = vshrq_n_s16(q11, 1); + q10 = vshrq_n_s16(q10, 1); + q11 = vshrq_n_s16(q11, 1); - q10 = vqaddq_s16(q2tmp2.val[1], q10); - q11 = vqaddq_s16(q2tmp3.val[1], q11); + q10 = vqaddq_s16(q2tmp2.val[1], q10); + q11 = vqaddq_s16(q2tmp3.val[1], q11); - q8 = vqsubq_s16(q8, q11); - q9 = vqaddq_s16(q9, q10); + q8 = vqsubq_s16(q8, q11); + q9 = vqaddq_s16(q9, q10); - q4 = vqaddq_s16(q2, q9); - q5 = vqaddq_s16(q3, q8); - q6 = vqsubq_s16(q3, q8); - q7 = vqsubq_s16(q2, q9); + q4 = vqaddq_s16(q2, q9); + q5 = vqaddq_s16(q3, q8); + q6 = vqsubq_s16(q3, q8); + q7 = vqsubq_s16(q2, q9); - q4 = vrshrq_n_s16(q4, 3); - q5 = vrshrq_n_s16(q5, 3); - q6 = vrshrq_n_s16(q6, 3); - q7 = vrshrq_n_s16(q7, 3); + q4 = vrshrq_n_s16(q4, 3); + q5 = vrshrq_n_s16(q5, 3); + q6 = vrshrq_n_s16(q6, 3); + q7 = vrshrq_n_s16(q7, 3); - q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); - q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); - q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), - vreinterpretq_s16_s32(q2tmp1.val[0])); - q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), - vreinterpretq_s16_s32(q2tmp1.val[1])); + q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6)); + q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7)); + q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]), + vreinterpretq_s16_s32(q2tmp1.val[0])); + q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]), + vreinterpretq_s16_s32(q2tmp1.val[1])); - q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]), - vreinterpret_u8_s32(d28))); - q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]), - vreinterpret_u8_s32(d29))); - q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]), - vreinterpret_u8_s32(d30))); - q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]), - vreinterpret_u8_s32(d31))); + q4 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]), vreinterpret_u8_s32(d28))); + q5 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]), vreinterpret_u8_s32(d29))); + q6 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]), vreinterpret_u8_s32(d30))); + q7 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]), vreinterpret_u8_s32(d31))); - d28 = vreinterpret_s32_u8(vqmovun_s16(q4)); - d29 = vreinterpret_s32_u8(vqmovun_s16(q5)); - d30 = vreinterpret_s32_u8(vqmovun_s16(q6)); - d31 = vreinterpret_s32_u8(vqmovun_s16(q7)); + d28 = vreinterpret_s32_u8(vqmovun_s16(q4)); + d29 = vreinterpret_s32_u8(vqmovun_s16(q5)); + d30 = vreinterpret_s32_u8(vqmovun_s16(q6)); + d31 = vreinterpret_s32_u8(vqmovun_s16(q7)); - dst0 = dst; - dst1 = dst + 4; - vst1_lane_s32((int32_t *)dst0, d28, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst1, d28, 1); - dst1 += stride; - vst1_lane_s32((int32_t *)dst0, d29, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst1, d29, 1); - dst1 += stride; + dst0 = dst; + dst1 = dst + 4; + vst1_lane_s32((int32_t *)dst0, d28, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst1, d28, 1); + dst1 += stride; + vst1_lane_s32((int32_t *)dst0, d29, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst1, d29, 1); + dst1 += stride; - vst1_lane_s32((int32_t *)dst0, d30, 0); - dst0 += stride; - vst1_lane_s32((int32_t *)dst1, d30, 1); - dst1 += stride; - vst1_lane_s32((int32_t *)dst0, d31, 0); - vst1_lane_s32((int32_t *)dst1, d31, 1); - return; + vst1_lane_s32((int32_t *)dst0, d30, 0); + dst0 += stride; + vst1_lane_s32((int32_t *)dst1, d30, 1); + dst1 += stride; + vst1_lane_s32((int32_t *)dst0, d31, 0); + vst1_lane_s32((int32_t *)dst1, d31, 1); + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/iwalsh_neon.c b/libs/libvpx/vp8/common/arm/neon/iwalsh_neon.c index 6ea9dd712a..6c4bcc134b 100644 --- a/libs/libvpx/vp8/common/arm/neon/iwalsh_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/iwalsh_neon.c @@ -10,93 +10,91 @@ #include -void vp8_short_inv_walsh4x4_neon( - int16_t *input, - int16_t *mb_dqcoeff) { - int16x8_t q0s16, q1s16, q2s16, q3s16; - int16x4_t d4s16, d5s16, d6s16, d7s16; - int16x4x2_t v2tmp0, v2tmp1; - int32x2x2_t v2tmp2, v2tmp3; - int16x8_t qAdd3; +void vp8_short_inv_walsh4x4_neon(int16_t *input, int16_t *mb_dqcoeff) { + int16x8_t q0s16, q1s16, q2s16, q3s16; + int16x4_t d4s16, d5s16, d6s16, d7s16; + int16x4x2_t v2tmp0, v2tmp1; + int32x2x2_t v2tmp2, v2tmp3; + int16x8_t qAdd3; - q0s16 = vld1q_s16(input); - q1s16 = vld1q_s16(input + 8); + q0s16 = vld1q_s16(input); + q1s16 = vld1q_s16(input + 8); - // 1st for loop - d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16)); - d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16)); - d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16)); - d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16)); + // 1st for loop + d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16)); + d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16)); + d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16)); + d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16)); - q2s16 = vcombine_s16(d4s16, d5s16); - q3s16 = vcombine_s16(d6s16, d7s16); + q2s16 = vcombine_s16(d4s16, d5s16); + q3s16 = vcombine_s16(d6s16, d7s16); - q0s16 = vaddq_s16(q2s16, q3s16); - q1s16 = vsubq_s16(q2s16, q3s16); + q0s16 = vaddq_s16(q2s16, q3s16); + q1s16 = vsubq_s16(q2s16, q3s16); - v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)), - vreinterpret_s32_s16(vget_low_s16(q1s16))); - v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)), - vreinterpret_s32_s16(vget_high_s16(q1s16))); - v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), - vreinterpret_s16_s32(v2tmp3.val[0])); - v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), - vreinterpret_s16_s32(v2tmp3.val[1])); + v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)), + vreinterpret_s32_s16(vget_low_s16(q1s16))); + v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)), + vreinterpret_s32_s16(vget_high_s16(q1s16))); + v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), + vreinterpret_s16_s32(v2tmp3.val[0])); + v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), + vreinterpret_s16_s32(v2tmp3.val[1])); - // 2nd for loop - d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); - d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); - d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); - d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); - q2s16 = vcombine_s16(d4s16, d5s16); - q3s16 = vcombine_s16(d6s16, d7s16); + // 2nd for loop + d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); + d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); + d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); + d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); + q2s16 = vcombine_s16(d4s16, d5s16); + q3s16 = vcombine_s16(d6s16, d7s16); - qAdd3 = vdupq_n_s16(3); + qAdd3 = vdupq_n_s16(3); - q0s16 = vaddq_s16(q2s16, q3s16); - q1s16 = vsubq_s16(q2s16, q3s16); + q0s16 = vaddq_s16(q2s16, q3s16); + q1s16 = vsubq_s16(q2s16, q3s16); - q0s16 = vaddq_s16(q0s16, qAdd3); - q1s16 = vaddq_s16(q1s16, qAdd3); + q0s16 = vaddq_s16(q0s16, qAdd3); + q1s16 = vaddq_s16(q1s16, qAdd3); - q0s16 = vshrq_n_s16(q0s16, 3); - q1s16 = vshrq_n_s16(q1s16, 3); + q0s16 = vshrq_n_s16(q0s16, 3); + q1s16 = vshrq_n_s16(q1s16, 3); - // store - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 0); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 0); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0); - mb_dqcoeff += 16; + // store + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 0); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 0); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0); + mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 1); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 1); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1); - mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 1); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 1); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1); + mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 2); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 2); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2); - mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 2); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 2); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2); + mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 3); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 3); - mb_dqcoeff += 16; - vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3); - mb_dqcoeff += 16; - return; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 3); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 3); + mb_dqcoeff += 16; + vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3); + mb_dqcoeff += 16; + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c b/libs/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c index b25686ffb8..a168219705 100644 --- a/libs/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c @@ -12,100 +12,93 @@ #include "./vpx_config.h" static INLINE void vp8_loop_filter_simple_horizontal_edge_neon( - unsigned char *s, - int p, - const unsigned char *blimit) { - uint8_t *sp; - uint8x16_t qblimit, q0u8; - uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8; - int16x8_t q2s16, q3s16, q13s16; - int8x8_t d8s8, d9s8; - int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8; + unsigned char *s, int p, const unsigned char *blimit) { + uint8_t *sp; + uint8x16_t qblimit, q0u8; + uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8; + int16x8_t q2s16, q3s16, q13s16; + int8x8_t d8s8, d9s8; + int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8; - qblimit = vdupq_n_u8(*blimit); + qblimit = vdupq_n_u8(*blimit); - sp = s - (p << 1); - q5u8 = vld1q_u8(sp); - sp += p; - q6u8 = vld1q_u8(sp); - sp += p; - q7u8 = vld1q_u8(sp); - sp += p; - q8u8 = vld1q_u8(sp); + sp = s - (p << 1); + q5u8 = vld1q_u8(sp); + sp += p; + q6u8 = vld1q_u8(sp); + sp += p; + q7u8 = vld1q_u8(sp); + sp += p; + q8u8 = vld1q_u8(sp); - q15u8 = vabdq_u8(q6u8, q7u8); - q14u8 = vabdq_u8(q5u8, q8u8); + q15u8 = vabdq_u8(q6u8, q7u8); + q14u8 = vabdq_u8(q5u8, q8u8); - q15u8 = vqaddq_u8(q15u8, q15u8); - q14u8 = vshrq_n_u8(q14u8, 1); - q0u8 = vdupq_n_u8(0x80); - q13s16 = vdupq_n_s16(3); - q15u8 = vqaddq_u8(q15u8, q14u8); + q15u8 = vqaddq_u8(q15u8, q15u8); + q14u8 = vshrq_n_u8(q14u8, 1); + q0u8 = vdupq_n_u8(0x80); + q13s16 = vdupq_n_s16(3); + q15u8 = vqaddq_u8(q15u8, q14u8); - q5u8 = veorq_u8(q5u8, q0u8); - q6u8 = veorq_u8(q6u8, q0u8); - q7u8 = veorq_u8(q7u8, q0u8); - q8u8 = veorq_u8(q8u8, q0u8); + q5u8 = veorq_u8(q5u8, q0u8); + q6u8 = veorq_u8(q6u8, q0u8); + q7u8 = veorq_u8(q7u8, q0u8); + q8u8 = veorq_u8(q8u8, q0u8); - q15u8 = vcgeq_u8(qblimit, q15u8); + q15u8 = vcgeq_u8(qblimit, q15u8); - q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)), - vget_low_s8(vreinterpretq_s8_u8(q6u8))); - q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)), - vget_high_s8(vreinterpretq_s8_u8(q6u8))); + q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)), + vget_low_s8(vreinterpretq_s8_u8(q6u8))); + q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)), + vget_high_s8(vreinterpretq_s8_u8(q6u8))); - q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8), - vreinterpretq_s8_u8(q8u8)); + q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8), vreinterpretq_s8_u8(q8u8)); - q2s16 = vmulq_s16(q2s16, q13s16); - q3s16 = vmulq_s16(q3s16, q13s16); + q2s16 = vmulq_s16(q2s16, q13s16); + q3s16 = vmulq_s16(q3s16, q13s16); - q10u8 = vdupq_n_u8(3); - q9u8 = vdupq_n_u8(4); + q10u8 = vdupq_n_u8(3); + q9u8 = vdupq_n_u8(4); - q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8)); - q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8)); + q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8)); + q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8)); - d8s8 = vqmovn_s16(q2s16); - d9s8 = vqmovn_s16(q3s16); - q4s8 = vcombine_s8(d8s8, d9s8); + d8s8 = vqmovn_s16(q2s16); + d9s8 = vqmovn_s16(q3s16); + q4s8 = vcombine_s8(d8s8, d9s8); - q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8)); + q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8)); - q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8)); - q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8)); - q2s8 = vshrq_n_s8(q2s8, 3); - q3s8 = vshrq_n_s8(q3s8, 3); + q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8)); + q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8)); + q2s8 = vshrq_n_s8(q2s8, 3); + q3s8 = vshrq_n_s8(q3s8, 3); - q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8); - q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8); + q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8); + q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8); - q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); - q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); + q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); + q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); - vst1q_u8(s, q7u8); - s -= p; - vst1q_u8(s, q6u8); - return; + vst1q_u8(s, q7u8); + s -= p; + vst1q_u8(s, q6u8); + return; } -void vp8_loop_filter_bhs_neon( - unsigned char *y_ptr, - int y_stride, - const unsigned char *blimit) { - y_ptr += y_stride * 4; - vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); - y_ptr += y_stride * 4; - vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); - y_ptr += y_stride * 4; - vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); - return; +void vp8_loop_filter_bhs_neon(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + y_ptr += y_stride * 4; + vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); + y_ptr += y_stride * 4; + vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); + y_ptr += y_stride * 4; + vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); + return; } -void vp8_loop_filter_mbhs_neon( - unsigned char *y_ptr, - int y_stride, - const unsigned char *blimit) { - vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); - return; +void vp8_loop_filter_mbhs_neon(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit); + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c b/libs/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c index 921bcad698..80a222d248 100644 --- a/libs/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c @@ -15,34 +15,33 @@ #ifdef VPX_INCOMPATIBLE_GCC static INLINE void write_2x4(unsigned char *dst, int pitch, const uint8x8x2_t result) { - /* - * uint8x8x2_t result - 00 01 02 03 | 04 05 06 07 - 10 11 12 13 | 14 15 16 17 - --- - * after vtrn_u8 - 00 10 02 12 | 04 14 06 16 - 01 11 03 13 | 05 15 07 17 - */ - const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0], - result.val[1]); - const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]); - const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]); - vst1_lane_u16((uint16_t *)dst, x_0_4, 0); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_1_5, 0); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_0_4, 1); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_1_5, 1); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_0_4, 2); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_1_5, 2); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_0_4, 3); - dst += pitch; - vst1_lane_u16((uint16_t *)dst, x_1_5, 3); + /* + * uint8x8x2_t result + 00 01 02 03 | 04 05 06 07 + 10 11 12 13 | 14 15 16 17 + --- + * after vtrn_u8 + 00 10 02 12 | 04 14 06 16 + 01 11 03 13 | 05 15 07 17 + */ + const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0], result.val[1]); + const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]); + const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]); + vst1_lane_u16((uint16_t *)dst, x_0_4, 0); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_1_5, 0); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_0_4, 1); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_1_5, 1); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_0_4, 2); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_1_5, 2); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_0_4, 3); + dst += pitch; + vst1_lane_u16((uint16_t *)dst, x_1_5, 3); } static INLINE void write_2x8(unsigned char *dst, int pitch, @@ -91,193 +90,183 @@ static INLINE void write_2x8(unsigned char *dst, int pitch, } #endif // VPX_INCOMPATIBLE_GCC - #ifdef VPX_INCOMPATIBLE_GCC -static INLINE -uint8x8x4_t read_4x8(unsigned char *src, int pitch) { - uint8x8x4_t x; - const uint8x8_t a = vld1_u8(src); - const uint8x8_t b = vld1_u8(src + pitch * 1); - const uint8x8_t c = vld1_u8(src + pitch * 2); - const uint8x8_t d = vld1_u8(src + pitch * 3); - const uint8x8_t e = vld1_u8(src + pitch * 4); - const uint8x8_t f = vld1_u8(src + pitch * 5); - const uint8x8_t g = vld1_u8(src + pitch * 6); - const uint8x8_t h = vld1_u8(src + pitch * 7); - const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a), - vreinterpret_u32_u8(e)); - const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b), - vreinterpret_u32_u8(f)); - const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c), - vreinterpret_u32_u8(g)); - const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d), - vreinterpret_u32_u8(h)); - const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]), - vreinterpret_u16_u32(r26_u32.val[0])); - const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]), - vreinterpret_u16_u32(r37_u32.val[0])); - const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]), - vreinterpret_u8_u16(r13_u16.val[0])); - const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]), - vreinterpret_u8_u16(r13_u16.val[1])); - /* - * after vtrn_u32 - 00 01 02 03 | 40 41 42 43 - 10 11 12 13 | 50 51 52 53 - 20 21 22 23 | 60 61 62 63 - 30 31 32 33 | 70 71 72 73 - --- - * after vtrn_u16 - 00 01 20 21 | 40 41 60 61 - 02 03 22 23 | 42 43 62 63 - 10 11 30 31 | 50 51 70 71 - 12 13 32 33 | 52 52 72 73 +static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) { + uint8x8x4_t x; + const uint8x8_t a = vld1_u8(src); + const uint8x8_t b = vld1_u8(src + pitch * 1); + const uint8x8_t c = vld1_u8(src + pitch * 2); + const uint8x8_t d = vld1_u8(src + pitch * 3); + const uint8x8_t e = vld1_u8(src + pitch * 4); + const uint8x8_t f = vld1_u8(src + pitch * 5); + const uint8x8_t g = vld1_u8(src + pitch * 6); + const uint8x8_t h = vld1_u8(src + pitch * 7); + const uint32x2x2_t r04_u32 = + vtrn_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(e)); + const uint32x2x2_t r15_u32 = + vtrn_u32(vreinterpret_u32_u8(b), vreinterpret_u32_u8(f)); + const uint32x2x2_t r26_u32 = + vtrn_u32(vreinterpret_u32_u8(c), vreinterpret_u32_u8(g)); + const uint32x2x2_t r37_u32 = + vtrn_u32(vreinterpret_u32_u8(d), vreinterpret_u32_u8(h)); + const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]), + vreinterpret_u16_u32(r26_u32.val[0])); + const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]), + vreinterpret_u16_u32(r37_u32.val[0])); + const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]), + vreinterpret_u8_u16(r13_u16.val[0])); + const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]), + vreinterpret_u8_u16(r13_u16.val[1])); + /* + * after vtrn_u32 + 00 01 02 03 | 40 41 42 43 + 10 11 12 13 | 50 51 52 53 + 20 21 22 23 | 60 61 62 63 + 30 31 32 33 | 70 71 72 73 + --- + * after vtrn_u16 + 00 01 20 21 | 40 41 60 61 + 02 03 22 23 | 42 43 62 63 + 10 11 30 31 | 50 51 70 71 + 12 13 32 33 | 52 52 72 73 - 00 01 20 21 | 40 41 60 61 - 10 11 30 31 | 50 51 70 71 - 02 03 22 23 | 42 43 62 63 - 12 13 32 33 | 52 52 72 73 - --- - * after vtrn_u8 - 00 10 20 30 | 40 50 60 70 - 01 11 21 31 | 41 51 61 71 - 02 12 22 32 | 42 52 62 72 - 03 13 23 33 | 43 53 63 73 - */ - x.val[0] = r01_u8.val[0]; - x.val[1] = r01_u8.val[1]; - x.val[2] = r23_u8.val[0]; - x.val[3] = r23_u8.val[1]; + 00 01 20 21 | 40 41 60 61 + 10 11 30 31 | 50 51 70 71 + 02 03 22 23 | 42 43 62 63 + 12 13 32 33 | 52 52 72 73 + --- + * after vtrn_u8 + 00 10 20 30 | 40 50 60 70 + 01 11 21 31 | 41 51 61 71 + 02 12 22 32 | 42 52 62 72 + 03 13 23 33 | 43 53 63 73 + */ + x.val[0] = r01_u8.val[0]; + x.val[1] = r01_u8.val[1]; + x.val[2] = r23_u8.val[0]; + x.val[3] = r23_u8.val[1]; - return x; + return x; } #else -static INLINE -uint8x8x4_t read_4x8(unsigned char *src, int pitch) { - uint8x8x4_t x; - x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0); - x = vld4_lane_u8(src, x, 0); - src += pitch; - x = vld4_lane_u8(src, x, 1); - src += pitch; - x = vld4_lane_u8(src, x, 2); - src += pitch; - x = vld4_lane_u8(src, x, 3); - src += pitch; - x = vld4_lane_u8(src, x, 4); - src += pitch; - x = vld4_lane_u8(src, x, 5); - src += pitch; - x = vld4_lane_u8(src, x, 6); - src += pitch; - x = vld4_lane_u8(src, x, 7); - return x; +static INLINE uint8x8x4_t read_4x8(unsigned char *src, int pitch) { + uint8x8x4_t x; + x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0); + x = vld4_lane_u8(src, x, 0); + src += pitch; + x = vld4_lane_u8(src, x, 1); + src += pitch; + x = vld4_lane_u8(src, x, 2); + src += pitch; + x = vld4_lane_u8(src, x, 3); + src += pitch; + x = vld4_lane_u8(src, x, 4); + src += pitch; + x = vld4_lane_u8(src, x, 5); + src += pitch; + x = vld4_lane_u8(src, x, 6); + src += pitch; + x = vld4_lane_u8(src, x, 7); + return x; } #endif // VPX_INCOMPATIBLE_GCC static INLINE void vp8_loop_filter_simple_vertical_edge_neon( - unsigned char *s, - int p, - const unsigned char *blimit) { - unsigned char *src1; - uint8x16_t qblimit, q0u8; - uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8; - int16x8_t q2s16, q13s16, q11s16; - int8x8_t d28s8, d29s8; - int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8; - uint8x8x4_t d0u8x4; // d6, d7, d8, d9 - uint8x8x4_t d1u8x4; // d10, d11, d12, d13 - uint8x8x2_t d2u8x2; // d12, d13 - uint8x8x2_t d3u8x2; // d14, d15 + unsigned char *s, int p, const unsigned char *blimit) { + unsigned char *src1; + uint8x16_t qblimit, q0u8; + uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8; + int16x8_t q2s16, q13s16, q11s16; + int8x8_t d28s8, d29s8; + int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8; + uint8x8x4_t d0u8x4; // d6, d7, d8, d9 + uint8x8x4_t d1u8x4; // d10, d11, d12, d13 + uint8x8x2_t d2u8x2; // d12, d13 + uint8x8x2_t d3u8x2; // d14, d15 - qblimit = vdupq_n_u8(*blimit); + qblimit = vdupq_n_u8(*blimit); - src1 = s - 2; - d0u8x4 = read_4x8(src1, p); - src1 += p * 8; - d1u8x4 = read_4x8(src1, p); + src1 = s - 2; + d0u8x4 = read_4x8(src1, p); + src1 += p * 8; + d1u8x4 = read_4x8(src1, p); - q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10 - q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12 - q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11 - q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13 + q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10 + q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12 + q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11 + q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13 - q15u8 = vabdq_u8(q5u8, q4u8); - q14u8 = vabdq_u8(q3u8, q6u8); + q15u8 = vabdq_u8(q5u8, q4u8); + q14u8 = vabdq_u8(q3u8, q6u8); - q15u8 = vqaddq_u8(q15u8, q15u8); - q14u8 = vshrq_n_u8(q14u8, 1); - q0u8 = vdupq_n_u8(0x80); - q11s16 = vdupq_n_s16(3); - q15u8 = vqaddq_u8(q15u8, q14u8); + q15u8 = vqaddq_u8(q15u8, q15u8); + q14u8 = vshrq_n_u8(q14u8, 1); + q0u8 = vdupq_n_u8(0x80); + q11s16 = vdupq_n_s16(3); + q15u8 = vqaddq_u8(q15u8, q14u8); - q3u8 = veorq_u8(q3u8, q0u8); - q4u8 = veorq_u8(q4u8, q0u8); - q5u8 = veorq_u8(q5u8, q0u8); - q6u8 = veorq_u8(q6u8, q0u8); + q3u8 = veorq_u8(q3u8, q0u8); + q4u8 = veorq_u8(q4u8, q0u8); + q5u8 = veorq_u8(q5u8, q0u8); + q6u8 = veorq_u8(q6u8, q0u8); - q15u8 = vcgeq_u8(qblimit, q15u8); + q15u8 = vcgeq_u8(qblimit, q15u8); - q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)), - vget_low_s8(vreinterpretq_s8_u8(q5u8))); - q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)), - vget_high_s8(vreinterpretq_s8_u8(q5u8))); + q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)), + vget_low_s8(vreinterpretq_s8_u8(q5u8))); + q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)), + vget_high_s8(vreinterpretq_s8_u8(q5u8))); - q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8), - vreinterpretq_s8_u8(q6u8)); + q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8), vreinterpretq_s8_u8(q6u8)); - q2s16 = vmulq_s16(q2s16, q11s16); - q13s16 = vmulq_s16(q13s16, q11s16); + q2s16 = vmulq_s16(q2s16, q11s16); + q13s16 = vmulq_s16(q13s16, q11s16); - q11u8 = vdupq_n_u8(3); - q12u8 = vdupq_n_u8(4); + q11u8 = vdupq_n_u8(3); + q12u8 = vdupq_n_u8(4); - q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8)); - q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8)); + q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8)); + q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8)); - d28s8 = vqmovn_s16(q2s16); - d29s8 = vqmovn_s16(q13s16); - q14s8 = vcombine_s8(d28s8, d29s8); + d28s8 = vqmovn_s16(q2s16); + d29s8 = vqmovn_s16(q13s16); + q14s8 = vcombine_s8(d28s8, d29s8); - q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8)); + q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8)); - q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8)); - q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8)); - q2s8 = vshrq_n_s8(q2s8, 3); - q14s8 = vshrq_n_s8(q3s8, 3); + q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8)); + q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8)); + q2s8 = vshrq_n_s8(q2s8, 3); + q14s8 = vshrq_n_s8(q3s8, 3); - q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8); - q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8); + q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8); + q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8); - q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); - q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); + q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); + q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); - d2u8x2.val[0] = vget_low_u8(q6u8); // d12 - d2u8x2.val[1] = vget_low_u8(q7u8); // d14 - d3u8x2.val[0] = vget_high_u8(q6u8); // d13 - d3u8x2.val[1] = vget_high_u8(q7u8); // d15 + d2u8x2.val[0] = vget_low_u8(q6u8); // d12 + d2u8x2.val[1] = vget_low_u8(q7u8); // d14 + d3u8x2.val[0] = vget_high_u8(q6u8); // d13 + d3u8x2.val[1] = vget_high_u8(q7u8); // d15 - src1 = s - 1; - write_2x8(src1, p, d2u8x2, d3u8x2); + src1 = s - 1; + write_2x8(src1, p, d2u8x2, d3u8x2); } -void vp8_loop_filter_bvs_neon( - unsigned char *y_ptr, - int y_stride, - const unsigned char *blimit) { - y_ptr += 4; - vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); - y_ptr += 4; - vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); - y_ptr += 4; - vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); - return; +void vp8_loop_filter_bvs_neon(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + y_ptr += 4; + vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); + y_ptr += 4; + vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); + y_ptr += 4; + vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); + return; } -void vp8_loop_filter_mbvs_neon( - unsigned char *y_ptr, - int y_stride, - const unsigned char *blimit) { - vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); - return; +void vp8_loop_filter_mbvs_neon(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit); + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c b/libs/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c index 5351f4be66..65eec300ff 100644 --- a/libs/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c @@ -11,615 +11,601 @@ #include #include "./vpx_config.h" -static INLINE void vp8_mbloop_filter_neon( - uint8x16_t qblimit, // mblimit - uint8x16_t qlimit, // limit - uint8x16_t qthresh, // thresh - uint8x16_t q3, // p2 - uint8x16_t q4, // p2 - uint8x16_t q5, // p1 - uint8x16_t q6, // p0 - uint8x16_t q7, // q0 - uint8x16_t q8, // q1 - uint8x16_t q9, // q2 - uint8x16_t q10, // q3 - uint8x16_t *q4r, // p1 - uint8x16_t *q5r, // p1 - uint8x16_t *q6r, // p0 - uint8x16_t *q7r, // q0 - uint8x16_t *q8r, // q1 - uint8x16_t *q9r) { // q1 - uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8; - int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8; - uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16; - int8x16_t q0s8, q12s8, q14s8, q15s8; - int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29; +static INLINE void vp8_mbloop_filter_neon(uint8x16_t qblimit, // mblimit + uint8x16_t qlimit, // limit + uint8x16_t qthresh, // thresh + uint8x16_t q3, // p2 + uint8x16_t q4, // p2 + uint8x16_t q5, // p1 + uint8x16_t q6, // p0 + uint8x16_t q7, // q0 + uint8x16_t q8, // q1 + uint8x16_t q9, // q2 + uint8x16_t q10, // q3 + uint8x16_t *q4r, // p1 + uint8x16_t *q5r, // p1 + uint8x16_t *q6r, // p0 + uint8x16_t *q7r, // q0 + uint8x16_t *q8r, // q1 + uint8x16_t *q9r) { // q1 + uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8; + int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8; + uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16; + int8x16_t q0s8, q12s8, q14s8, q15s8; + int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29; - q11u8 = vabdq_u8(q3, q4); - q12u8 = vabdq_u8(q4, q5); - q13u8 = vabdq_u8(q5, q6); - q14u8 = vabdq_u8(q8, q7); - q1u8 = vabdq_u8(q9, q8); - q0u8 = vabdq_u8(q10, q9); + q11u8 = vabdq_u8(q3, q4); + q12u8 = vabdq_u8(q4, q5); + q13u8 = vabdq_u8(q5, q6); + q14u8 = vabdq_u8(q8, q7); + q1u8 = vabdq_u8(q9, q8); + q0u8 = vabdq_u8(q10, q9); - q11u8 = vmaxq_u8(q11u8, q12u8); - q12u8 = vmaxq_u8(q13u8, q14u8); - q1u8 = vmaxq_u8(q1u8, q0u8); - q15u8 = vmaxq_u8(q11u8, q12u8); + q11u8 = vmaxq_u8(q11u8, q12u8); + q12u8 = vmaxq_u8(q13u8, q14u8); + q1u8 = vmaxq_u8(q1u8, q0u8); + q15u8 = vmaxq_u8(q11u8, q12u8); - q12u8 = vabdq_u8(q6, q7); + q12u8 = vabdq_u8(q6, q7); - // vp8_hevmask - q13u8 = vcgtq_u8(q13u8, qthresh); - q14u8 = vcgtq_u8(q14u8, qthresh); - q15u8 = vmaxq_u8(q15u8, q1u8); + // vp8_hevmask + q13u8 = vcgtq_u8(q13u8, qthresh); + q14u8 = vcgtq_u8(q14u8, qthresh); + q15u8 = vmaxq_u8(q15u8, q1u8); - q15u8 = vcgeq_u8(qlimit, q15u8); + q15u8 = vcgeq_u8(qlimit, q15u8); - q1u8 = vabdq_u8(q5, q8); - q12u8 = vqaddq_u8(q12u8, q12u8); + q1u8 = vabdq_u8(q5, q8); + q12u8 = vqaddq_u8(q12u8, q12u8); - // vp8_filter() function - // convert to signed - q0u8 = vdupq_n_u8(0x80); - q9 = veorq_u8(q9, q0u8); - q8 = veorq_u8(q8, q0u8); - q7 = veorq_u8(q7, q0u8); - q6 = veorq_u8(q6, q0u8); - q5 = veorq_u8(q5, q0u8); - q4 = veorq_u8(q4, q0u8); + // vp8_filter() function + // convert to signed + q0u8 = vdupq_n_u8(0x80); + q9 = veorq_u8(q9, q0u8); + q8 = veorq_u8(q8, q0u8); + q7 = veorq_u8(q7, q0u8); + q6 = veorq_u8(q6, q0u8); + q5 = veorq_u8(q5, q0u8); + q4 = veorq_u8(q4, q0u8); - q1u8 = vshrq_n_u8(q1u8, 1); - q12u8 = vqaddq_u8(q12u8, q1u8); + q1u8 = vshrq_n_u8(q1u8, 1); + q12u8 = vqaddq_u8(q12u8, q1u8); - q14u8 = vorrq_u8(q13u8, q14u8); - q12u8 = vcgeq_u8(qblimit, q12u8); + q14u8 = vorrq_u8(q13u8, q14u8); + q12u8 = vcgeq_u8(qblimit, q12u8); - q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), - vget_low_s8(vreinterpretq_s8_u8(q6))); - q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), - vget_high_s8(vreinterpretq_s8_u8(q6))); + q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), + vget_low_s8(vreinterpretq_s8_u8(q6))); + q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), + vget_high_s8(vreinterpretq_s8_u8(q6))); - q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), - vreinterpretq_s8_u8(q8)); + q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); - q11s16 = vdupq_n_s16(3); - q2s16 = vmulq_s16(q2s16, q11s16); - q13s16 = vmulq_s16(q13s16, q11s16); + q11s16 = vdupq_n_s16(3); + q2s16 = vmulq_s16(q2s16, q11s16); + q13s16 = vmulq_s16(q13s16, q11s16); - q15u8 = vandq_u8(q15u8, q12u8); + q15u8 = vandq_u8(q15u8, q12u8); - q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8)); - q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8)); + q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8)); + q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8)); - q12u8 = vdupq_n_u8(3); - q11u8 = vdupq_n_u8(4); - // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0)) - d2 = vqmovn_s16(q2s16); - d3 = vqmovn_s16(q13s16); - q1s8 = vcombine_s8(d2, d3); - q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8)); - q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); + q12u8 = vdupq_n_u8(3); + q11u8 = vdupq_n_u8(4); + // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0)) + d2 = vqmovn_s16(q2s16); + d3 = vqmovn_s16(q13s16); + q1s8 = vcombine_s8(d2, d3); + q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8)); + q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); - q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8)); - q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8)); - q2s8 = vshrq_n_s8(q2s8, 3); - q13s8 = vshrq_n_s8(q13s8, 3); + q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8)); + q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8)); + q2s8 = vshrq_n_s8(q2s8, 3); + q13s8 = vshrq_n_s8(q13s8, 3); - q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8); - q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8); + q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8); + q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8); - q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); + q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); - q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63); - d5 = vdup_n_s8(9); - d4 = vdup_n_s8(18); + q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63); + d5 = vdup_n_s8(9); + d4 = vdup_n_s8(18); - q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5); - q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5); - d5 = vdup_n_s8(27); - q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4); - q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4); - q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5); - q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5); + q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5); + q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5); + d5 = vdup_n_s8(27); + q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4); + q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4); + q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5); + q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5); - d0 = vqshrn_n_s16(q0s16 , 7); - d1 = vqshrn_n_s16(q11s16, 7); - d24 = vqshrn_n_s16(q12s16, 7); - d25 = vqshrn_n_s16(q13s16, 7); - d28 = vqshrn_n_s16(q14s16, 7); - d29 = vqshrn_n_s16(q15s16, 7); + d0 = vqshrn_n_s16(q0s16, 7); + d1 = vqshrn_n_s16(q11s16, 7); + d24 = vqshrn_n_s16(q12s16, 7); + d25 = vqshrn_n_s16(q13s16, 7); + d28 = vqshrn_n_s16(q14s16, 7); + d29 = vqshrn_n_s16(q15s16, 7); - q0s8 = vcombine_s8(d0, d1); - q12s8 = vcombine_s8(d24, d25); - q14s8 = vcombine_s8(d28, d29); + q0s8 = vcombine_s8(d0, d1); + q12s8 = vcombine_s8(d24, d25); + q14s8 = vcombine_s8(d28, d29); - q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8); - q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8); - q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8); - q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); - q15s8 = vqsubq_s8((q7s8), q14s8); - q14s8 = vqaddq_s8((q6s8), q14s8); + q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8); + q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8); + q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8); + q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8); + q15s8 = vqsubq_s8((q7s8), q14s8); + q14s8 = vqaddq_s8((q6s8), q14s8); - q1u8 = vdupq_n_u8(0x80); - *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8); - *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8); - *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8); - *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8); - *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8); - *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8); - return; + q1u8 = vdupq_n_u8(0x80); + *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8); + *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8); + *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8); + *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8); + *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8); + *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8); + return; } -void vp8_mbloop_filter_horizontal_edge_y_neon( - unsigned char *src, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh) { - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; +void vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh) { + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - src -= (pitch << 2); + src -= (pitch << 2); - q3 = vld1q_u8(src); - src += pitch; - q4 = vld1q_u8(src); - src += pitch; - q5 = vld1q_u8(src); - src += pitch; - q6 = vld1q_u8(src); - src += pitch; - q7 = vld1q_u8(src); - src += pitch; - q8 = vld1q_u8(src); - src += pitch; - q9 = vld1q_u8(src); - src += pitch; - q10 = vld1q_u8(src); + q3 = vld1q_u8(src); + src += pitch; + q4 = vld1q_u8(src); + src += pitch; + q5 = vld1q_u8(src); + src += pitch; + q6 = vld1q_u8(src); + src += pitch; + q7 = vld1q_u8(src); + src += pitch; + q8 = vld1q_u8(src); + src += pitch; + q9 = vld1q_u8(src); + src += pitch; + q10 = vld1q_u8(src); - vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q4, &q5, &q6, &q7, &q8, &q9); + vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q4, &q5, &q6, &q7, &q8, &q9); - src -= (pitch * 6); - vst1q_u8(src, q4); - src += pitch; - vst1q_u8(src, q5); - src += pitch; - vst1q_u8(src, q6); - src += pitch; - vst1q_u8(src, q7); - src += pitch; - vst1q_u8(src, q8); - src += pitch; - vst1q_u8(src, q9); - return; + src -= (pitch * 6); + vst1q_u8(src, q4); + src += pitch; + vst1q_u8(src, q5); + src += pitch; + vst1q_u8(src, q6); + src += pitch; + vst1q_u8(src, q7); + src += pitch; + vst1q_u8(src, q8); + src += pitch; + vst1q_u8(src, q9); + return; } -void vp8_mbloop_filter_horizontal_edge_uv_neon( - unsigned char *u, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh, - unsigned char *v) { - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; +void vp8_mbloop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh, + unsigned char *v) { + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - u -= (pitch << 2); - v -= (pitch << 2); + u -= (pitch << 2); + v -= (pitch << 2); - d6 = vld1_u8(u); - u += pitch; - d7 = vld1_u8(v); - v += pitch; - d8 = vld1_u8(u); - u += pitch; - d9 = vld1_u8(v); - v += pitch; - d10 = vld1_u8(u); - u += pitch; - d11 = vld1_u8(v); - v += pitch; - d12 = vld1_u8(u); - u += pitch; - d13 = vld1_u8(v); - v += pitch; - d14 = vld1_u8(u); - u += pitch; - d15 = vld1_u8(v); - v += pitch; - d16 = vld1_u8(u); - u += pitch; - d17 = vld1_u8(v); - v += pitch; - d18 = vld1_u8(u); - u += pitch; - d19 = vld1_u8(v); - v += pitch; - d20 = vld1_u8(u); - d21 = vld1_u8(v); + d6 = vld1_u8(u); + u += pitch; + d7 = vld1_u8(v); + v += pitch; + d8 = vld1_u8(u); + u += pitch; + d9 = vld1_u8(v); + v += pitch; + d10 = vld1_u8(u); + u += pitch; + d11 = vld1_u8(v); + v += pitch; + d12 = vld1_u8(u); + u += pitch; + d13 = vld1_u8(v); + v += pitch; + d14 = vld1_u8(u); + u += pitch; + d15 = vld1_u8(v); + v += pitch; + d16 = vld1_u8(u); + u += pitch; + d17 = vld1_u8(v); + v += pitch; + d18 = vld1_u8(u); + u += pitch; + d19 = vld1_u8(v); + v += pitch; + d20 = vld1_u8(u); + d21 = vld1_u8(v); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q4, &q5, &q6, &q7, &q8, &q9); + vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q4, &q5, &q6, &q7, &q8, &q9); - u -= (pitch * 6); - v -= (pitch * 6); - vst1_u8(u, vget_low_u8(q4)); - u += pitch; - vst1_u8(v, vget_high_u8(q4)); - v += pitch; - vst1_u8(u, vget_low_u8(q5)); - u += pitch; - vst1_u8(v, vget_high_u8(q5)); - v += pitch; - vst1_u8(u, vget_low_u8(q6)); - u += pitch; - vst1_u8(v, vget_high_u8(q6)); - v += pitch; - vst1_u8(u, vget_low_u8(q7)); - u += pitch; - vst1_u8(v, vget_high_u8(q7)); - v += pitch; - vst1_u8(u, vget_low_u8(q8)); - u += pitch; - vst1_u8(v, vget_high_u8(q8)); - v += pitch; - vst1_u8(u, vget_low_u8(q9)); - vst1_u8(v, vget_high_u8(q9)); - return; + u -= (pitch * 6); + v -= (pitch * 6); + vst1_u8(u, vget_low_u8(q4)); + u += pitch; + vst1_u8(v, vget_high_u8(q4)); + v += pitch; + vst1_u8(u, vget_low_u8(q5)); + u += pitch; + vst1_u8(v, vget_high_u8(q5)); + v += pitch; + vst1_u8(u, vget_low_u8(q6)); + u += pitch; + vst1_u8(v, vget_high_u8(q6)); + v += pitch; + vst1_u8(u, vget_low_u8(q7)); + u += pitch; + vst1_u8(v, vget_high_u8(q7)); + v += pitch; + vst1_u8(u, vget_low_u8(q8)); + u += pitch; + vst1_u8(v, vget_high_u8(q8)); + v += pitch; + vst1_u8(u, vget_low_u8(q9)); + vst1_u8(v, vget_high_u8(q9)); + return; } -void vp8_mbloop_filter_vertical_edge_y_neon( - unsigned char *src, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh) { - unsigned char *s1, *s2; - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; - uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; - uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; - uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; +void vp8_mbloop_filter_vertical_edge_y_neon(unsigned char *src, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh) { + unsigned char *s1, *s2; + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; + uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; + uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; + uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - s1 = src - 4; - s2 = s1 + 8 * pitch; - d6 = vld1_u8(s1); - s1 += pitch; - d7 = vld1_u8(s2); - s2 += pitch; - d8 = vld1_u8(s1); - s1 += pitch; - d9 = vld1_u8(s2); - s2 += pitch; - d10 = vld1_u8(s1); - s1 += pitch; - d11 = vld1_u8(s2); - s2 += pitch; - d12 = vld1_u8(s1); - s1 += pitch; - d13 = vld1_u8(s2); - s2 += pitch; - d14 = vld1_u8(s1); - s1 += pitch; - d15 = vld1_u8(s2); - s2 += pitch; - d16 = vld1_u8(s1); - s1 += pitch; - d17 = vld1_u8(s2); - s2 += pitch; - d18 = vld1_u8(s1); - s1 += pitch; - d19 = vld1_u8(s2); - s2 += pitch; - d20 = vld1_u8(s1); - d21 = vld1_u8(s2); + s1 = src - 4; + s2 = s1 + 8 * pitch; + d6 = vld1_u8(s1); + s1 += pitch; + d7 = vld1_u8(s2); + s2 += pitch; + d8 = vld1_u8(s1); + s1 += pitch; + d9 = vld1_u8(s2); + s2 += pitch; + d10 = vld1_u8(s1); + s1 += pitch; + d11 = vld1_u8(s2); + s2 += pitch; + d12 = vld1_u8(s1); + s1 += pitch; + d13 = vld1_u8(s2); + s2 += pitch; + d14 = vld1_u8(s1); + s1 += pitch; + d15 = vld1_u8(s2); + s2 += pitch; + d16 = vld1_u8(s1); + s1 += pitch; + d17 = vld1_u8(s2); + s2 += pitch; + d18 = vld1_u8(s1); + s1 += pitch; + d19 = vld1_u8(s2); + s2 += pitch; + d20 = vld1_u8(s1); + d21 = vld1_u8(s2); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q4, &q5, &q6, &q7, &q8, &q9); + vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q4, &q5, &q6, &q7, &q8, &q9); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - s1 -= 7 * pitch; - s2 -= 7 * pitch; + s1 -= 7 * pitch; + s2 -= 7 * pitch; - vst1_u8(s1, vget_low_u8(q3)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q3)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q4)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q4)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q5)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q5)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q6)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q6)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q7)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q7)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q8)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q8)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q9)); - s1 += pitch; - vst1_u8(s2, vget_high_u8(q9)); - s2 += pitch; - vst1_u8(s1, vget_low_u8(q10)); - vst1_u8(s2, vget_high_u8(q10)); - return; + vst1_u8(s1, vget_low_u8(q3)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q3)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q4)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q4)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q5)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q5)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q6)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q6)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q7)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q7)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q8)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q8)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q9)); + s1 += pitch; + vst1_u8(s2, vget_high_u8(q9)); + s2 += pitch; + vst1_u8(s1, vget_low_u8(q10)); + vst1_u8(s2, vget_high_u8(q10)); + return; } -void vp8_mbloop_filter_vertical_edge_uv_neon( - unsigned char *u, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh, - unsigned char *v) { - unsigned char *us, *ud; - unsigned char *vs, *vd; - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; - uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; - uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; - uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; +void vp8_mbloop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh, + unsigned char *v) { + unsigned char *us, *ud; + unsigned char *vs, *vd; + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; + uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; + uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; + uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - us = u - 4; - vs = v - 4; - d6 = vld1_u8(us); - us += pitch; - d7 = vld1_u8(vs); - vs += pitch; - d8 = vld1_u8(us); - us += pitch; - d9 = vld1_u8(vs); - vs += pitch; - d10 = vld1_u8(us); - us += pitch; - d11 = vld1_u8(vs); - vs += pitch; - d12 = vld1_u8(us); - us += pitch; - d13 = vld1_u8(vs); - vs += pitch; - d14 = vld1_u8(us); - us += pitch; - d15 = vld1_u8(vs); - vs += pitch; - d16 = vld1_u8(us); - us += pitch; - d17 = vld1_u8(vs); - vs += pitch; - d18 = vld1_u8(us); - us += pitch; - d19 = vld1_u8(vs); - vs += pitch; - d20 = vld1_u8(us); - d21 = vld1_u8(vs); + us = u - 4; + vs = v - 4; + d6 = vld1_u8(us); + us += pitch; + d7 = vld1_u8(vs); + vs += pitch; + d8 = vld1_u8(us); + us += pitch; + d9 = vld1_u8(vs); + vs += pitch; + d10 = vld1_u8(us); + us += pitch; + d11 = vld1_u8(vs); + vs += pitch; + d12 = vld1_u8(us); + us += pitch; + d13 = vld1_u8(vs); + vs += pitch; + d14 = vld1_u8(us); + us += pitch; + d15 = vld1_u8(vs); + vs += pitch; + d16 = vld1_u8(us); + us += pitch; + d17 = vld1_u8(vs); + vs += pitch; + d18 = vld1_u8(us); + us += pitch; + d19 = vld1_u8(vs); + vs += pitch; + d20 = vld1_u8(us); + d21 = vld1_u8(vs); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q4, &q5, &q6, &q7, &q8, &q9); + vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q4, &q5, &q6, &q7, &q8, &q9); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - ud = u - 4; - vst1_u8(ud, vget_low_u8(q3)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q4)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q5)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q6)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q7)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q8)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q9)); - ud += pitch; - vst1_u8(ud, vget_low_u8(q10)); + ud = u - 4; + vst1_u8(ud, vget_low_u8(q3)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q4)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q5)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q6)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q7)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q8)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q9)); + ud += pitch; + vst1_u8(ud, vget_low_u8(q10)); - vd = v - 4; - vst1_u8(vd, vget_high_u8(q3)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q4)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q5)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q6)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q7)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q8)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q9)); - vd += pitch; - vst1_u8(vd, vget_high_u8(q10)); - return; + vd = v - 4; + vst1_u8(vd, vget_high_u8(q3)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q4)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q5)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q6)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q7)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q8)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q9)); + vd += pitch; + vst1_u8(vd, vget_high_u8(q10)); + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c b/libs/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c index 373afa6ed3..a36c0c1cac 100644 --- a/libs/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c @@ -11,113 +11,109 @@ #include static const int16_t cospi8sqrt2minus1 = 20091; -static const int16_t sinpi8sqrt2 = 35468; +static const int16_t sinpi8sqrt2 = 35468; -void vp8_short_idct4x4llm_neon( - int16_t *input, - unsigned char *pred_ptr, - int pred_stride, - unsigned char *dst_ptr, - int dst_stride) { - int i; - uint32x2_t d6u32 = vdup_n_u32(0); - uint8x8_t d1u8; - int16x4_t d2, d3, d4, d5, d10, d11, d12, d13; - uint16x8_t q1u16; - int16x8_t q1s16, q2s16, q3s16, q4s16; - int32x2x2_t v2tmp0, v2tmp1; - int16x4x2_t v2tmp2, v2tmp3; +void vp8_short_idct4x4llm_neon(int16_t *input, unsigned char *pred_ptr, + int pred_stride, unsigned char *dst_ptr, + int dst_stride) { + int i; + uint32x2_t d6u32 = vdup_n_u32(0); + uint8x8_t d1u8; + int16x4_t d2, d3, d4, d5, d10, d11, d12, d13; + uint16x8_t q1u16; + int16x8_t q1s16, q2s16, q3s16, q4s16; + int32x2x2_t v2tmp0, v2tmp1; + int16x4x2_t v2tmp2, v2tmp3; - d2 = vld1_s16(input); - d3 = vld1_s16(input + 4); - d4 = vld1_s16(input + 8); - d5 = vld1_s16(input + 12); + d2 = vld1_s16(input); + d3 = vld1_s16(input + 4); + d4 = vld1_s16(input + 8); + d5 = vld1_s16(input + 12); - // 1st for loop - q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here - q2s16 = vcombine_s16(d3, d5); + // 1st for loop + q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here + q2s16 = vcombine_s16(d3, d5); - q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); - q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); + q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); + q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); - d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 - d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 + d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 + d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 - q3s16 = vshrq_n_s16(q3s16, 1); - q4s16 = vshrq_n_s16(q4s16, 1); + q3s16 = vshrq_n_s16(q3s16, 1); + q4s16 = vshrq_n_s16(q4s16, 1); - q3s16 = vqaddq_s16(q3s16, q2s16); - q4s16 = vqaddq_s16(q4s16, q2s16); + q3s16 = vqaddq_s16(q3s16, q2s16); + q4s16 = vqaddq_s16(q4s16, q2s16); - d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 - d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 + d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 + d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 - d2 = vqadd_s16(d12, d11); - d3 = vqadd_s16(d13, d10); - d4 = vqsub_s16(d13, d10); - d5 = vqsub_s16(d12, d11); + d2 = vqadd_s16(d12, d11); + d3 = vqadd_s16(d13, d10); + d4 = vqsub_s16(d13, d10); + d5 = vqsub_s16(d12, d11); - v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); - v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); - v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), - vreinterpret_s16_s32(v2tmp1.val[0])); - v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), - vreinterpret_s16_s32(v2tmp1.val[1])); + v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); + v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); + v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), + vreinterpret_s16_s32(v2tmp1.val[0])); + v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), + vreinterpret_s16_s32(v2tmp1.val[1])); - // 2nd for loop - q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]); - q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]); + // 2nd for loop + q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]); + q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]); - q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); - q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); + q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2); + q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1); - d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 - d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 + d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1 + d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1 - q3s16 = vshrq_n_s16(q3s16, 1); - q4s16 = vshrq_n_s16(q4s16, 1); + q3s16 = vshrq_n_s16(q3s16, 1); + q4s16 = vshrq_n_s16(q4s16, 1); - q3s16 = vqaddq_s16(q3s16, q2s16); - q4s16 = vqaddq_s16(q4s16, q2s16); + q3s16 = vqaddq_s16(q3s16, q2s16); + q4s16 = vqaddq_s16(q4s16, q2s16); - d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 - d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 + d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1 + d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1 - d2 = vqadd_s16(d12, d11); - d3 = vqadd_s16(d13, d10); - d4 = vqsub_s16(d13, d10); - d5 = vqsub_s16(d12, d11); + d2 = vqadd_s16(d12, d11); + d3 = vqadd_s16(d13, d10); + d4 = vqsub_s16(d13, d10); + d5 = vqsub_s16(d12, d11); - d2 = vrshr_n_s16(d2, 3); - d3 = vrshr_n_s16(d3, 3); - d4 = vrshr_n_s16(d4, 3); - d5 = vrshr_n_s16(d5, 3); + d2 = vrshr_n_s16(d2, 3); + d3 = vrshr_n_s16(d3, 3); + d4 = vrshr_n_s16(d4, 3); + d5 = vrshr_n_s16(d5, 3); - v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); - v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); - v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), - vreinterpret_s16_s32(v2tmp1.val[0])); - v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), - vreinterpret_s16_s32(v2tmp1.val[1])); + v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4)); + v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5)); + v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]), + vreinterpret_s16_s32(v2tmp1.val[0])); + v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]), + vreinterpret_s16_s32(v2tmp1.val[1])); - q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]); - q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]); + q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]); + q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]); - // dc_only_idct_add - for (i = 0; i < 2; i++, q1s16 = q2s16) { - d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0); - pred_ptr += pred_stride; - d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1); - pred_ptr += pred_stride; + // dc_only_idct_add + for (i = 0; i < 2; i++, q1s16 = q2s16) { + d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0); + pred_ptr += pred_stride; + d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1); + pred_ptr += pred_stride; - q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), - vreinterpret_u8_u32(d6u32)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); + q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16), vreinterpret_u8_u32(d6u32)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16)); - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0); - dst_ptr += dst_stride; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1); - dst_ptr += dst_stride; - } - return; + vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0); + dst_ptr += dst_stride; + vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1); + dst_ptr += dst_stride; + } + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/sixtappredict_neon.c b/libs/libvpx/vp8/common/arm/neon/sixtappredict_neon.c index 4c2efc92b1..622baa3c50 100644 --- a/libs/libvpx/vp8/common/arm/neon/sixtappredict_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/sixtappredict_neon.c @@ -12,338 +12,32 @@ #include "vpx_ports/mem.h" static const int8_t vp8_sub_pel_filters[8][8] = { - {0, 0, 128, 0, 0, 0, 0, 0}, /* note that 1/8 pel positionyys are */ - {0, -6, 123, 12, -1, 0, 0, 0}, /* just as per alpha -0.5 bicubic */ - {2, -11, 108, 36, -8, 1, 0, 0}, /* New 1/4 pel 6 tap filter */ - {0, -9, 93, 50, -6, 0, 0, 0}, - {3, -16, 77, 77, -16, 3, 0, 0}, /* New 1/2 pel 6 tap filter */ - {0, -6, 50, 93, -9, 0, 0, 0}, - {1, -8, 36, 108, -11, 2, 0, 0}, /* New 1/4 pel 6 tap filter */ - {0, -1, 12, 123, -6, 0, 0, 0}, + { 0, 0, 128, 0, 0, 0, 0, 0 }, /* note that 1/8 pel positionyys are */ + { 0, -6, 123, 12, -1, 0, 0, 0 }, /* just as per alpha -0.5 bicubic */ + { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -9, 93, 50, -6, 0, 0, 0 }, + { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */ + { 0, -6, 50, 93, -9, 0, 0, 0 }, + { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -1, 12, 123, -6, 0, 0, 0 }, }; -void vp8_sixtap_predict4x4_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - unsigned char *src; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d18u8, d19u8, d20u8, d21u8; - uint8x8_t d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; - int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; - uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; - uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; - int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; - uint8x16_t q3u8, q4u8, q5u8, q6u8, q11u8; - uint64x2_t q3u64, q4u64, q5u64, q6u64, q9u64, q10u64; - uint32x2x2_t d0u32x2, d1u32x2; +void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, + unsigned char *dst_ptr, int dst_pitch) { + unsigned char *src; + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; + uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8; + uint8x8_t d27u8, d28u8, d29u8, d30u8, d31u8; + int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; + uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; + uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; + int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; + uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8; - if (xoffset == 0) { // secondpass_filter4x4_only - uint32x2_t d27u32 = vdup_n_u32(0); - uint32x2_t d28u32 = vdup_n_u32(0); - uint32x2_t d29u32 = vdup_n_u32(0); - uint32x2_t d30u32 = vdup_n_u32(0); - uint32x2_t d31u32 = vdup_n_u32(0); - - // load second_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // load src data - src = src_ptr - src_pixels_per_line * 2; - d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 0); - src += src_pixels_per_line; - d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 1); - src += src_pixels_per_line; - d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 0); - src += src_pixels_per_line; - d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 1); - src += src_pixels_per_line; - d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 0); - src += src_pixels_per_line; - d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 1); - src += src_pixels_per_line; - d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 0); - src += src_pixels_per_line; - d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 1); - src += src_pixels_per_line; - d31u32 = vld1_lane_u32((const uint32_t *)src, d31u32, 0); - - d27u8 = vreinterpret_u8_u32(d27u32); - d28u8 = vreinterpret_u8_u32(d28u32); - d29u8 = vreinterpret_u8_u32(d29u32); - d30u8 = vreinterpret_u8_u32(d30u32); - d31u8 = vreinterpret_u8_u32(d31u32); - - d23u8 = vext_u8(d27u8, d28u8, 4); - d24u8 = vext_u8(d28u8, d29u8, 4); - d25u8 = vext_u8(d29u8, d30u8, 4); - d26u8 = vext_u8(d30u8, d31u8, 4); - - q3u16 = vmull_u8(d27u8, d0u8); - q4u16 = vmull_u8(d28u8, d0u8); - q5u16 = vmull_u8(d25u8, d5u8); - q6u16 = vmull_u8(d26u8, d5u8); - - q3u16 = vmlsl_u8(q3u16, d29u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d30u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d23u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d24u8, d1u8); - - q3u16 = vmlal_u8(q3u16, d28u8, d2u8); - q4u16 = vmlal_u8(q4u16, d29u8, d2u8); - q5u16 = vmlal_u8(q5u16, d24u8, d3u8); - q6u16 = vmlal_u8(q6u16, d25u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - - q5s16 = vqaddq_s16(q5s16, q3s16); - q6s16 = vqaddq_s16(q6s16, q4s16); - - d3u8 = vqrshrun_n_s16(q5s16, 7); - d4u8 = vqrshrun_n_s16(q6s16, 7); - - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1); - return; - } - - // load first_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // First pass: output_height lines x output_width columns (9x4) - - if (yoffset == 0) // firstpass_filter4x4_only - src = src_ptr - 2; - else - src = src_ptr - 2 - (src_pixels_per_line * 2); - - q3u8 = vld1q_u8(src); - src += src_pixels_per_line; - q4u8 = vld1q_u8(src); - src += src_pixels_per_line; - q5u8 = vld1q_u8(src); - src += src_pixels_per_line; - q6u8 = vld1q_u8(src); - src += src_pixels_per_line; - - d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - - // vswp here - q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8)); - q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8)); - - d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8), // d18 d19 - vreinterpret_u32_u8(d19u8)); - d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8), // d20 d21 - vreinterpret_u32_u8(d21u8)); - q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8); - q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8); - - // keep original src data in q4 q6 - q4u64 = vreinterpretq_u64_u8(q3u8); - q6u64 = vreinterpretq_u64_u8(q5u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)), // d6 d7 - vreinterpret_u32_u8(vget_high_u8(q3u8))); - d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)), // d10 d11 - vreinterpret_u32_u8(vget_high_u8(q5u8))); - q9u64 = vshrq_n_u64(q4u64, 8); - q10u64 = vshrq_n_u64(q6u64, 8); - q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8); - q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19 - vreinterpret_u32_u64(vget_high_u64(q9u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211 - vreinterpret_u32_u64(vget_high_u64(q10u64))); - q3u64 = vshrq_n_u64(q4u64, 32); - q5u64 = vshrq_n_u64(q6u64, 32); - q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8); - q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7 - vreinterpret_u32_u64(vget_high_u64(q3u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11 - vreinterpret_u32_u64(vget_high_u64(q5u64))); - q9u64 = vshrq_n_u64(q4u64, 16); - q10u64 = vshrq_n_u64(q6u64, 16); - q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8); - q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19 - vreinterpret_u32_u64(vget_high_u64(q9u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211 - vreinterpret_u32_u64(vget_high_u64(q10u64))); - q3u64 = vshrq_n_u64(q4u64, 24); - q5u64 = vshrq_n_u64(q6u64, 24); - q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8); - q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7 - vreinterpret_u32_u64(vget_high_u64(q3u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11 - vreinterpret_u32_u64(vget_high_u64(q5u64))); - q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8); - q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8); - - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q7s16 = vqaddq_s16(q7s16, q9s16); - q8s16 = vqaddq_s16(q8s16, q10s16); - - d27u8 = vqrshrun_n_s16(q7s16, 7); - d28u8 = vqrshrun_n_s16(q8s16, 7); - - if (yoffset == 0) { // firstpass_filter4x4_only - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 1); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1); - return; - } - - // First Pass on rest 5-line data - q3u8 = vld1q_u8(src); - src += src_pixels_per_line; - q4u8 = vld1q_u8(src); - src += src_pixels_per_line; - q5u8 = vld1q_u8(src); - src += src_pixels_per_line; - q6u8 = vld1q_u8(src); - src += src_pixels_per_line; - q11u8 = vld1q_u8(src); - - d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - - // vswp here - q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8)); - q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8)); - - d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8), // d18 d19 - vreinterpret_u32_u8(d19u8)); - d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8), // d20 d21 - vreinterpret_u32_u8(d21u8)); - d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 5); - q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8); - q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8); - q12u16 = vmull_u8(d31u8, d5u8); - - q4u64 = vreinterpretq_u64_u8(q3u8); - q6u64 = vreinterpretq_u64_u8(q5u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)), // d6 d7 - vreinterpret_u32_u8(vget_high_u8(q3u8))); - d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)), // d10 d11 - vreinterpret_u32_u8(vget_high_u8(q5u8))); - q9u64 = vshrq_n_u64(q4u64, 8); - q10u64 = vshrq_n_u64(q6u64, 8); - q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8); - q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8); - q12u16 = vmlal_u8(q12u16, vget_low_u8(q11u8), d0u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19 - vreinterpret_u32_u64(vget_high_u64(q9u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211 - vreinterpret_u32_u64(vget_high_u64(q10u64))); - q3u64 = vshrq_n_u64(q4u64, 32); - q5u64 = vshrq_n_u64(q6u64, 32); - d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 1); - q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8); - q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7 - vreinterpret_u32_u64(vget_high_u64(q3u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11 - vreinterpret_u32_u64(vget_high_u64(q5u64))); - q9u64 = vshrq_n_u64(q4u64, 16); - q10u64 = vshrq_n_u64(q6u64, 16); - d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 4); - q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8); - q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)), // d18 d19 - vreinterpret_u32_u64(vget_high_u64(q9u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)), // d20 d211 - vreinterpret_u32_u64(vget_high_u64(q10u64))); - q3u64 = vshrq_n_u64(q4u64, 24); - q5u64 = vshrq_n_u64(q6u64, 24); - d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 2); - q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8); - q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8); - q12u16 = vmlal_u8(q12u16, d31u8, d2u8); - - d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)), // d6 d7 - vreinterpret_u32_u64(vget_high_u64(q3u64))); - d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)), // d10 d11 - vreinterpret_u32_u64(vget_high_u64(q5u64))); - d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 3); - q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8); - q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8); - q11u16 = vmull_u8(d31u8, d3u8); - - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q11s16 = vreinterpretq_s16_u16(q11u16); - q12s16 = vreinterpretq_s16_u16(q12u16); - q7s16 = vqaddq_s16(q7s16, q9s16); - q8s16 = vqaddq_s16(q8s16, q10s16); - q12s16 = vqaddq_s16(q12s16, q11s16); - - d29u8 = vqrshrun_n_s16(q7s16, 7); - d30u8 = vqrshrun_n_s16(q8s16, 7); - d31u8 = vqrshrun_n_s16(q12s16, 7); - - // Second pass: 4x4 + if (xoffset == 0) { // secondpass_filter8x4_only + // load second_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); @@ -358,388 +52,25 @@ void vp8_sixtap_predict4x4_neon( d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - d23u8 = vext_u8(d27u8, d28u8, 4); - d24u8 = vext_u8(d28u8, d29u8, 4); - d25u8 = vext_u8(d29u8, d30u8, 4); - d26u8 = vext_u8(d30u8, d31u8, 4); - - q3u16 = vmull_u8(d27u8, d0u8); - q4u16 = vmull_u8(d28u8, d0u8); - q5u16 = vmull_u8(d25u8, d5u8); - q6u16 = vmull_u8(d26u8, d5u8); - - q3u16 = vmlsl_u8(q3u16, d29u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d30u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d23u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d24u8, d1u8); - - q3u16 = vmlal_u8(q3u16, d28u8, d2u8); - q4u16 = vmlal_u8(q4u16, d29u8, d2u8); - q5u16 = vmlal_u8(q5u16, d24u8, d3u8); - q6u16 = vmlal_u8(q6u16, d25u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - - q5s16 = vqaddq_s16(q5s16, q3s16); - q6s16 = vqaddq_s16(q6s16, q4s16); - - d3u8 = vqrshrun_n_s16(q5s16, 7); - d4u8 = vqrshrun_n_s16(q6s16, 7); - - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0); - dst_ptr += dst_pitch; - vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1); - return; -} - -void vp8_sixtap_predict8x4_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - unsigned char *src; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; - uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8; - uint8x8_t d27u8, d28u8, d29u8, d30u8, d31u8; - int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; - uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; - uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; - int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; - uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8; - - if (xoffset == 0) { // secondpass_filter8x4_only - // load second_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // load src data - src = src_ptr - src_pixels_per_line * 2; - d22u8 = vld1_u8(src); - src += src_pixels_per_line; - d23u8 = vld1_u8(src); - src += src_pixels_per_line; - d24u8 = vld1_u8(src); - src += src_pixels_per_line; - d25u8 = vld1_u8(src); - src += src_pixels_per_line; - d26u8 = vld1_u8(src); - src += src_pixels_per_line; - d27u8 = vld1_u8(src); - src += src_pixels_per_line; - d28u8 = vld1_u8(src); - src += src_pixels_per_line; - d29u8 = vld1_u8(src); - src += src_pixels_per_line; - d30u8 = vld1_u8(src); - - q3u16 = vmull_u8(d22u8, d0u8); - q4u16 = vmull_u8(d23u8, d0u8); - q5u16 = vmull_u8(d24u8, d0u8); - q6u16 = vmull_u8(d25u8, d0u8); - - q3u16 = vmlsl_u8(q3u16, d23u8, d1u8); - q4u16 = vmlsl_u8(q4u16, d24u8, d1u8); - q5u16 = vmlsl_u8(q5u16, d25u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d26u8, d1u8); - - q3u16 = vmlsl_u8(q3u16, d26u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d27u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d28u8, d4u8); - q6u16 = vmlsl_u8(q6u16, d29u8, d4u8); - - q3u16 = vmlal_u8(q3u16, d24u8, d2u8); - q4u16 = vmlal_u8(q4u16, d25u8, d2u8); - q5u16 = vmlal_u8(q5u16, d26u8, d2u8); - q6u16 = vmlal_u8(q6u16, d27u8, d2u8); - - q3u16 = vmlal_u8(q3u16, d27u8, d5u8); - q4u16 = vmlal_u8(q4u16, d28u8, d5u8); - q5u16 = vmlal_u8(q5u16, d29u8, d5u8); - q6u16 = vmlal_u8(q6u16, d30u8, d5u8); - - q7u16 = vmull_u8(d25u8, d3u8); - q8u16 = vmull_u8(d26u8, d3u8); - q9u16 = vmull_u8(d27u8, d3u8); - q10u16 = vmull_u8(d28u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); - - d6u8 = vqrshrun_n_s16(q7s16, 7); - d7u8 = vqrshrun_n_s16(q8s16, 7); - d8u8 = vqrshrun_n_s16(q9s16, 7); - d9u8 = vqrshrun_n_s16(q10s16, 7); - - vst1_u8(dst_ptr, d6u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d7u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d8u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d9u8); - return; - } - - // load first_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // First pass: output_height lines x output_width columns (9x4) - if (yoffset == 0) // firstpass_filter4x4_only - src = src_ptr - 2; - else - src = src_ptr - 2 - (src_pixels_per_line * 2); - q3u8 = vld1q_u8(src); + // load src data + src = src_ptr - src_pixels_per_line * 2; + d22u8 = vld1_u8(src); src += src_pixels_per_line; - q4u8 = vld1q_u8(src); + d23u8 = vld1_u8(src); src += src_pixels_per_line; - q5u8 = vld1q_u8(src); + d24u8 = vld1_u8(src); src += src_pixels_per_line; - q6u8 = vld1q_u8(src); - - q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); - - q7u16 = vmlsl_u8(q7u16, d28u8, d1u8); - q8u16 = vmlsl_u8(q8u16, d29u8, d1u8); - q9u16 = vmlsl_u8(q9u16, d30u8, d1u8); - q10u16 = vmlsl_u8(q10u16, d31u8, d1u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); - - q7u16 = vmlsl_u8(q7u16, d28u8, d4u8); - q8u16 = vmlsl_u8(q8u16, d29u8, d4u8); - q9u16 = vmlsl_u8(q9u16, d30u8, d4u8); - q10u16 = vmlsl_u8(q10u16, d31u8, d4u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); - - q7u16 = vmlal_u8(q7u16, d28u8, d2u8); - q8u16 = vmlal_u8(q8u16, d29u8, d2u8); - q9u16 = vmlal_u8(q9u16, d30u8, d2u8); - q10u16 = vmlal_u8(q10u16, d31u8, d2u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - - q7u16 = vmlal_u8(q7u16, d28u8, d5u8); - q8u16 = vmlal_u8(q8u16, d29u8, d5u8); - q9u16 = vmlal_u8(q9u16, d30u8, d5u8); - q10u16 = vmlal_u8(q10u16, d31u8, d5u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); - - q3u16 = vmull_u8(d28u8, d3u8); - q4u16 = vmull_u8(d29u8, d3u8); - q5u16 = vmull_u8(d30u8, d3u8); - q6u16 = vmull_u8(d31u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); - - d22u8 = vqrshrun_n_s16(q7s16, 7); - d23u8 = vqrshrun_n_s16(q8s16, 7); - d24u8 = vqrshrun_n_s16(q9s16, 7); - d25u8 = vqrshrun_n_s16(q10s16, 7); - - if (yoffset == 0) { // firstpass_filter8x4_only - vst1_u8(dst_ptr, d22u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d23u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d24u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d25u8); - return; - } - - // First Pass on rest 5-line data + d25u8 = vld1_u8(src); src += src_pixels_per_line; - q3u8 = vld1q_u8(src); + d26u8 = vld1_u8(src); src += src_pixels_per_line; - q4u8 = vld1q_u8(src); + d27u8 = vld1_u8(src); src += src_pixels_per_line; - q5u8 = vld1q_u8(src); + d28u8 = vld1_u8(src); src += src_pixels_per_line; - q6u8 = vld1q_u8(src); + d29u8 = vld1_u8(src); src += src_pixels_per_line; - q7u8 = vld1q_u8(src); - - q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8); - q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1); - - q8u16 = vmlsl_u8(q8u16, d27u8, d1u8); - q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); - q11u16 = vmlsl_u8(q11u16, d30u8, d1u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4); - - q8u16 = vmlsl_u8(q8u16, d27u8, d4u8); - q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); - q11u16 = vmlsl_u8(q11u16, d30u8, d4u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2); - - q8u16 = vmlal_u8(q8u16, d27u8, d2u8); - q9u16 = vmlal_u8(q9u16, d28u8, d2u8); - q10u16 = vmlal_u8(q10u16, d29u8, d2u8); - q11u16 = vmlal_u8(q11u16, d30u8, d2u8); - q12u16 = vmlal_u8(q12u16, d31u8, d2u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5); - - q8u16 = vmlal_u8(q8u16, d27u8, d5u8); - q9u16 = vmlal_u8(q9u16, d28u8, d5u8); - q10u16 = vmlal_u8(q10u16, d29u8, d5u8); - q11u16 = vmlal_u8(q11u16, d30u8, d5u8); - q12u16 = vmlal_u8(q12u16, d31u8, d5u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3); - - q3u16 = vmull_u8(d27u8, d3u8); - q4u16 = vmull_u8(d28u8, d3u8); - q5u16 = vmull_u8(d29u8, d3u8); - q6u16 = vmull_u8(d30u8, d3u8); - q7u16 = vmull_u8(d31u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q11s16 = vreinterpretq_s16_u16(q11u16); - q12s16 = vreinterpretq_s16_u16(q12u16); - - q8s16 = vqaddq_s16(q8s16, q3s16); - q9s16 = vqaddq_s16(q9s16, q4s16); - q10s16 = vqaddq_s16(q10s16, q5s16); - q11s16 = vqaddq_s16(q11s16, q6s16); - q12s16 = vqaddq_s16(q12s16, q7s16); - - d26u8 = vqrshrun_n_s16(q8s16, 7); - d27u8 = vqrshrun_n_s16(q9s16, 7); - d28u8 = vqrshrun_n_s16(q10s16, 7); - d29u8 = vqrshrun_n_s16(q11s16, 7); - d30u8 = vqrshrun_n_s16(q12s16, 7); - - // Second pass: 8x4 - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + d30u8 = vld1_u8(src); q3u16 = vmull_u8(d22u8, d0u8); q4u16 = vmull_u8(d23u8, d0u8); @@ -798,378 +129,314 @@ void vp8_sixtap_predict8x4_neon( dst_ptr += dst_pitch; vst1_u8(dst_ptr, d9u8); return; + } + + // load first_pass filter + dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + // First pass: output_height lines x output_width columns (9x4) + if (yoffset == 0) // firstpass_filter4x4_only + src = src_ptr - 2; + else + src = src_ptr - 2 - (src_pixels_per_line * 2); + q3u8 = vld1q_u8(src); + src += src_pixels_per_line; + q4u8 = vld1q_u8(src); + src += src_pixels_per_line; + q5u8 = vld1q_u8(src); + src += src_pixels_per_line; + q6u8 = vld1q_u8(src); + + q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); + + q7u16 = vmlsl_u8(q7u16, d28u8, d1u8); + q8u16 = vmlsl_u8(q8u16, d29u8, d1u8); + q9u16 = vmlsl_u8(q9u16, d30u8, d1u8); + q10u16 = vmlsl_u8(q10u16, d31u8, d1u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); + + q7u16 = vmlsl_u8(q7u16, d28u8, d4u8); + q8u16 = vmlsl_u8(q8u16, d29u8, d4u8); + q9u16 = vmlsl_u8(q9u16, d30u8, d4u8); + q10u16 = vmlsl_u8(q10u16, d31u8, d4u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); + + q7u16 = vmlal_u8(q7u16, d28u8, d2u8); + q8u16 = vmlal_u8(q8u16, d29u8, d2u8); + q9u16 = vmlal_u8(q9u16, d30u8, d2u8); + q10u16 = vmlal_u8(q10u16, d31u8, d2u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); + + q7u16 = vmlal_u8(q7u16, d28u8, d5u8); + q8u16 = vmlal_u8(q8u16, d29u8, d5u8); + q9u16 = vmlal_u8(q9u16, d30u8, d5u8); + q10u16 = vmlal_u8(q10u16, d31u8, d5u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); + + q3u16 = vmull_u8(d28u8, d3u8); + q4u16 = vmull_u8(d29u8, d3u8); + q5u16 = vmull_u8(d30u8, d3u8); + q6u16 = vmull_u8(d31u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d22u8 = vqrshrun_n_s16(q7s16, 7); + d23u8 = vqrshrun_n_s16(q8s16, 7); + d24u8 = vqrshrun_n_s16(q9s16, 7); + d25u8 = vqrshrun_n_s16(q10s16, 7); + + if (yoffset == 0) { // firstpass_filter8x4_only + vst1_u8(dst_ptr, d22u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d23u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d24u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d25u8); + return; + } + + // First Pass on rest 5-line data + src += src_pixels_per_line; + q3u8 = vld1q_u8(src); + src += src_pixels_per_line; + q4u8 = vld1q_u8(src); + src += src_pixels_per_line; + q5u8 = vld1q_u8(src); + src += src_pixels_per_line; + q6u8 = vld1q_u8(src); + src += src_pixels_per_line; + q7u8 = vld1q_u8(src); + + q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8); + q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1); + + q8u16 = vmlsl_u8(q8u16, d27u8, d1u8); + q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); + q11u16 = vmlsl_u8(q11u16, d30u8, d1u8); + q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4); + + q8u16 = vmlsl_u8(q8u16, d27u8, d4u8); + q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); + q11u16 = vmlsl_u8(q11u16, d30u8, d4u8); + q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2); + + q8u16 = vmlal_u8(q8u16, d27u8, d2u8); + q9u16 = vmlal_u8(q9u16, d28u8, d2u8); + q10u16 = vmlal_u8(q10u16, d29u8, d2u8); + q11u16 = vmlal_u8(q11u16, d30u8, d2u8); + q12u16 = vmlal_u8(q12u16, d31u8, d2u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5); + + q8u16 = vmlal_u8(q8u16, d27u8, d5u8); + q9u16 = vmlal_u8(q9u16, d28u8, d5u8); + q10u16 = vmlal_u8(q10u16, d29u8, d5u8); + q11u16 = vmlal_u8(q11u16, d30u8, d5u8); + q12u16 = vmlal_u8(q12u16, d31u8, d5u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3); + + q3u16 = vmull_u8(d27u8, d3u8); + q4u16 = vmull_u8(d28u8, d3u8); + q5u16 = vmull_u8(d29u8, d3u8); + q6u16 = vmull_u8(d30u8, d3u8); + q7u16 = vmull_u8(d31u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + q11s16 = vreinterpretq_s16_u16(q11u16); + q12s16 = vreinterpretq_s16_u16(q12u16); + + q8s16 = vqaddq_s16(q8s16, q3s16); + q9s16 = vqaddq_s16(q9s16, q4s16); + q10s16 = vqaddq_s16(q10s16, q5s16); + q11s16 = vqaddq_s16(q11s16, q6s16); + q12s16 = vqaddq_s16(q12s16, q7s16); + + d26u8 = vqrshrun_n_s16(q8s16, 7); + d27u8 = vqrshrun_n_s16(q9s16, 7); + d28u8 = vqrshrun_n_s16(q10s16, 7); + d29u8 = vqrshrun_n_s16(q11s16, 7); + d30u8 = vqrshrun_n_s16(q12s16, 7); + + // Second pass: 8x4 + dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + q3u16 = vmull_u8(d22u8, d0u8); + q4u16 = vmull_u8(d23u8, d0u8); + q5u16 = vmull_u8(d24u8, d0u8); + q6u16 = vmull_u8(d25u8, d0u8); + + q3u16 = vmlsl_u8(q3u16, d23u8, d1u8); + q4u16 = vmlsl_u8(q4u16, d24u8, d1u8); + q5u16 = vmlsl_u8(q5u16, d25u8, d1u8); + q6u16 = vmlsl_u8(q6u16, d26u8, d1u8); + + q3u16 = vmlsl_u8(q3u16, d26u8, d4u8); + q4u16 = vmlsl_u8(q4u16, d27u8, d4u8); + q5u16 = vmlsl_u8(q5u16, d28u8, d4u8); + q6u16 = vmlsl_u8(q6u16, d29u8, d4u8); + + q3u16 = vmlal_u8(q3u16, d24u8, d2u8); + q4u16 = vmlal_u8(q4u16, d25u8, d2u8); + q5u16 = vmlal_u8(q5u16, d26u8, d2u8); + q6u16 = vmlal_u8(q6u16, d27u8, d2u8); + + q3u16 = vmlal_u8(q3u16, d27u8, d5u8); + q4u16 = vmlal_u8(q4u16, d28u8, d5u8); + q5u16 = vmlal_u8(q5u16, d29u8, d5u8); + q6u16 = vmlal_u8(q6u16, d30u8, d5u8); + + q7u16 = vmull_u8(d25u8, d3u8); + q8u16 = vmull_u8(d26u8, d3u8); + q9u16 = vmull_u8(d27u8, d3u8); + q10u16 = vmull_u8(d28u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d6u8 = vqrshrun_n_s16(q7s16, 7); + d7u8 = vqrshrun_n_s16(q8s16, 7); + d8u8 = vqrshrun_n_s16(q9s16, 7); + d9u8 = vqrshrun_n_s16(q10s16, 7); + + vst1_u8(dst_ptr, d6u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d7u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d8u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d9u8); + return; } -void vp8_sixtap_predict8x8_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - unsigned char *src, *tmpp; - unsigned char tmp[64]; - int i; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; - uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8; - uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; - int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; - uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; - uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; - int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; - uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8; +void vp8_sixtap_predict8x8_neon(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, + unsigned char *dst_ptr, int dst_pitch) { + unsigned char *src, *tmpp; + unsigned char tmp[64]; + int i; + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; + uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8; + uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; + int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; + uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16; + uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16; + int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16; + uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8; - if (xoffset == 0) { // secondpass_filter8x8_only - // load second_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // load src data - src = src_ptr - src_pixels_per_line * 2; - d18u8 = vld1_u8(src); - src += src_pixels_per_line; - d19u8 = vld1_u8(src); - src += src_pixels_per_line; - d20u8 = vld1_u8(src); - src += src_pixels_per_line; - d21u8 = vld1_u8(src); - src += src_pixels_per_line; - d22u8 = vld1_u8(src); - src += src_pixels_per_line; - d23u8 = vld1_u8(src); - src += src_pixels_per_line; - d24u8 = vld1_u8(src); - src += src_pixels_per_line; - d25u8 = vld1_u8(src); - src += src_pixels_per_line; - d26u8 = vld1_u8(src); - src += src_pixels_per_line; - d27u8 = vld1_u8(src); - src += src_pixels_per_line; - d28u8 = vld1_u8(src); - src += src_pixels_per_line; - d29u8 = vld1_u8(src); - src += src_pixels_per_line; - d30u8 = vld1_u8(src); - - for (i = 2; i > 0; i--) { - q3u16 = vmull_u8(d18u8, d0u8); - q4u16 = vmull_u8(d19u8, d0u8); - q5u16 = vmull_u8(d20u8, d0u8); - q6u16 = vmull_u8(d21u8, d0u8); - - q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); - q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); - q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); - - q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); - q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); - - q3u16 = vmlal_u8(q3u16, d20u8, d2u8); - q4u16 = vmlal_u8(q4u16, d21u8, d2u8); - q5u16 = vmlal_u8(q5u16, d22u8, d2u8); - q6u16 = vmlal_u8(q6u16, d23u8, d2u8); - - q3u16 = vmlal_u8(q3u16, d23u8, d5u8); - q4u16 = vmlal_u8(q4u16, d24u8, d5u8); - q5u16 = vmlal_u8(q5u16, d25u8, d5u8); - q6u16 = vmlal_u8(q6u16, d26u8, d5u8); - - q7u16 = vmull_u8(d21u8, d3u8); - q8u16 = vmull_u8(d22u8, d3u8); - q9u16 = vmull_u8(d23u8, d3u8); - q10u16 = vmull_u8(d24u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); - - d6u8 = vqrshrun_n_s16(q7s16, 7); - d7u8 = vqrshrun_n_s16(q8s16, 7); - d8u8 = vqrshrun_n_s16(q9s16, 7); - d9u8 = vqrshrun_n_s16(q10s16, 7); - - d18u8 = d22u8; - d19u8 = d23u8; - d20u8 = d24u8; - d21u8 = d25u8; - d22u8 = d26u8; - d23u8 = d27u8; - d24u8 = d28u8; - d25u8 = d29u8; - d26u8 = d30u8; - - vst1_u8(dst_ptr, d6u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d7u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d8u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d9u8); - dst_ptr += dst_pitch; - } - return; - } - - // load first_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // First pass: output_height lines x output_width columns (9x4) - if (yoffset == 0) // firstpass_filter4x4_only - src = src_ptr - 2; - else - src = src_ptr - 2 - (src_pixels_per_line * 2); - - tmpp = tmp; - for (i = 2; i > 0; i--) { - q3u8 = vld1q_u8(src); - src += src_pixels_per_line; - q4u8 = vld1q_u8(src); - src += src_pixels_per_line; - q5u8 = vld1q_u8(src); - src += src_pixels_per_line; - q6u8 = vld1q_u8(src); - src += src_pixels_per_line; - - __builtin_prefetch(src); - __builtin_prefetch(src + src_pixels_per_line); - __builtin_prefetch(src + src_pixels_per_line * 2); - - q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); - - q7u16 = vmlsl_u8(q7u16, d28u8, d1u8); - q8u16 = vmlsl_u8(q8u16, d29u8, d1u8); - q9u16 = vmlsl_u8(q9u16, d30u8, d1u8); - q10u16 = vmlsl_u8(q10u16, d31u8, d1u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); - - q7u16 = vmlsl_u8(q7u16, d28u8, d4u8); - q8u16 = vmlsl_u8(q8u16, d29u8, d4u8); - q9u16 = vmlsl_u8(q9u16, d30u8, d4u8); - q10u16 = vmlsl_u8(q10u16, d31u8, d4u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); - - q7u16 = vmlal_u8(q7u16, d28u8, d2u8); - q8u16 = vmlal_u8(q8u16, d29u8, d2u8); - q9u16 = vmlal_u8(q9u16, d30u8, d2u8); - q10u16 = vmlal_u8(q10u16, d31u8, d2u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - - q7u16 = vmlal_u8(q7u16, d28u8, d5u8); - q8u16 = vmlal_u8(q8u16, d29u8, d5u8); - q9u16 = vmlal_u8(q9u16, d30u8, d5u8); - q10u16 = vmlal_u8(q10u16, d31u8, d5u8); - - d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); - d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); - d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); - d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); - - q3u16 = vmull_u8(d28u8, d3u8); - q4u16 = vmull_u8(d29u8, d3u8); - q5u16 = vmull_u8(d30u8, d3u8); - q6u16 = vmull_u8(d31u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); - - d22u8 = vqrshrun_n_s16(q7s16, 7); - d23u8 = vqrshrun_n_s16(q8s16, 7); - d24u8 = vqrshrun_n_s16(q9s16, 7); - d25u8 = vqrshrun_n_s16(q10s16, 7); - - if (yoffset == 0) { // firstpass_filter8x4_only - vst1_u8(dst_ptr, d22u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d23u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d24u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d25u8); - dst_ptr += dst_pitch; - } else { - vst1_u8(tmpp, d22u8); - tmpp += 8; - vst1_u8(tmpp, d23u8); - tmpp += 8; - vst1_u8(tmpp, d24u8); - tmpp += 8; - vst1_u8(tmpp, d25u8); - tmpp += 8; - } - } - if (yoffset == 0) - return; - - // First Pass on rest 5-line data - q3u8 = vld1q_u8(src); - src += src_pixels_per_line; - q4u8 = vld1q_u8(src); - src += src_pixels_per_line; - q5u8 = vld1q_u8(src); - src += src_pixels_per_line; - q6u8 = vld1q_u8(src); - src += src_pixels_per_line; - q7u8 = vld1q_u8(src); - - q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); - q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); - q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); - q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8); - q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1); - - q8u16 = vmlsl_u8(q8u16, d27u8, d1u8); - q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); - q11u16 = vmlsl_u8(q11u16, d30u8, d1u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4); - - q8u16 = vmlsl_u8(q8u16, d27u8, d4u8); - q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); - q11u16 = vmlsl_u8(q11u16, d30u8, d4u8); - q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2); - - q8u16 = vmlal_u8(q8u16, d27u8, d2u8); - q9u16 = vmlal_u8(q9u16, d28u8, d2u8); - q10u16 = vmlal_u8(q10u16, d29u8, d2u8); - q11u16 = vmlal_u8(q11u16, d30u8, d2u8); - q12u16 = vmlal_u8(q12u16, d31u8, d2u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5); - - q8u16 = vmlal_u8(q8u16, d27u8, d5u8); - q9u16 = vmlal_u8(q9u16, d28u8, d5u8); - q10u16 = vmlal_u8(q10u16, d29u8, d5u8); - q11u16 = vmlal_u8(q11u16, d30u8, d5u8); - q12u16 = vmlal_u8(q12u16, d31u8, d5u8); - - d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); - d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); - d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); - d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); - d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3); - - q3u16 = vmull_u8(d27u8, d3u8); - q4u16 = vmull_u8(d28u8, d3u8); - q5u16 = vmull_u8(d29u8, d3u8); - q6u16 = vmull_u8(d30u8, d3u8); - q7u16 = vmull_u8(d31u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q11s16 = vreinterpretq_s16_u16(q11u16); - q12s16 = vreinterpretq_s16_u16(q12u16); - - q8s16 = vqaddq_s16(q8s16, q3s16); - q9s16 = vqaddq_s16(q9s16, q4s16); - q10s16 = vqaddq_s16(q10s16, q5s16); - q11s16 = vqaddq_s16(q11s16, q6s16); - q12s16 = vqaddq_s16(q12s16, q7s16); - - d26u8 = vqrshrun_n_s16(q8s16, 7); - d27u8 = vqrshrun_n_s16(q9s16, 7); - d28u8 = vqrshrun_n_s16(q10s16, 7); - d29u8 = vqrshrun_n_s16(q11s16, 7); - d30u8 = vqrshrun_n_s16(q12s16, 7); - - // Second pass: 8x8 + if (xoffset == 0) { // secondpass_filter8x8_only + // load second_pass filter dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); d0s8 = vdup_lane_s8(dtmps8, 0); d1s8 = vdup_lane_s8(dtmps8, 1); @@ -1184,25 +451,502 @@ void vp8_sixtap_predict8x8_neon( d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - tmpp = tmp; - q9u8 = vld1q_u8(tmpp); - tmpp += 16; - q10u8 = vld1q_u8(tmpp); - tmpp += 16; - q11u8 = vld1q_u8(tmpp); - tmpp += 16; - q12u8 = vld1q_u8(tmpp); - - d18u8 = vget_low_u8(q9u8); - d19u8 = vget_high_u8(q9u8); - d20u8 = vget_low_u8(q10u8); - d21u8 = vget_high_u8(q10u8); - d22u8 = vget_low_u8(q11u8); - d23u8 = vget_high_u8(q11u8); - d24u8 = vget_low_u8(q12u8); - d25u8 = vget_high_u8(q12u8); + // load src data + src = src_ptr - src_pixels_per_line * 2; + d18u8 = vld1_u8(src); + src += src_pixels_per_line; + d19u8 = vld1_u8(src); + src += src_pixels_per_line; + d20u8 = vld1_u8(src); + src += src_pixels_per_line; + d21u8 = vld1_u8(src); + src += src_pixels_per_line; + d22u8 = vld1_u8(src); + src += src_pixels_per_line; + d23u8 = vld1_u8(src); + src += src_pixels_per_line; + d24u8 = vld1_u8(src); + src += src_pixels_per_line; + d25u8 = vld1_u8(src); + src += src_pixels_per_line; + d26u8 = vld1_u8(src); + src += src_pixels_per_line; + d27u8 = vld1_u8(src); + src += src_pixels_per_line; + d28u8 = vld1_u8(src); + src += src_pixels_per_line; + d29u8 = vld1_u8(src); + src += src_pixels_per_line; + d30u8 = vld1_u8(src); for (i = 2; i > 0; i--) { + q3u16 = vmull_u8(d18u8, d0u8); + q4u16 = vmull_u8(d19u8, d0u8); + q5u16 = vmull_u8(d20u8, d0u8); + q6u16 = vmull_u8(d21u8, d0u8); + + q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); + q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); + q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); + q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); + + q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); + q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); + q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); + q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); + + q3u16 = vmlal_u8(q3u16, d20u8, d2u8); + q4u16 = vmlal_u8(q4u16, d21u8, d2u8); + q5u16 = vmlal_u8(q5u16, d22u8, d2u8); + q6u16 = vmlal_u8(q6u16, d23u8, d2u8); + + q3u16 = vmlal_u8(q3u16, d23u8, d5u8); + q4u16 = vmlal_u8(q4u16, d24u8, d5u8); + q5u16 = vmlal_u8(q5u16, d25u8, d5u8); + q6u16 = vmlal_u8(q6u16, d26u8, d5u8); + + q7u16 = vmull_u8(d21u8, d3u8); + q8u16 = vmull_u8(d22u8, d3u8); + q9u16 = vmull_u8(d23u8, d3u8); + q10u16 = vmull_u8(d24u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d6u8 = vqrshrun_n_s16(q7s16, 7); + d7u8 = vqrshrun_n_s16(q8s16, 7); + d8u8 = vqrshrun_n_s16(q9s16, 7); + d9u8 = vqrshrun_n_s16(q10s16, 7); + + d18u8 = d22u8; + d19u8 = d23u8; + d20u8 = d24u8; + d21u8 = d25u8; + d22u8 = d26u8; + d23u8 = d27u8; + d24u8 = d28u8; + d25u8 = d29u8; + d26u8 = d30u8; + + vst1_u8(dst_ptr, d6u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d7u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d8u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d9u8); + dst_ptr += dst_pitch; + } + return; + } + + // load first_pass filter + dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + // First pass: output_height lines x output_width columns (9x4) + if (yoffset == 0) // firstpass_filter4x4_only + src = src_ptr - 2; + else + src = src_ptr - 2 - (src_pixels_per_line * 2); + + tmpp = tmp; + for (i = 2; i > 0; i--) { + q3u8 = vld1q_u8(src); + src += src_pixels_per_line; + q4u8 = vld1q_u8(src); + src += src_pixels_per_line; + q5u8 = vld1q_u8(src); + src += src_pixels_per_line; + q6u8 = vld1q_u8(src); + src += src_pixels_per_line; + + __builtin_prefetch(src); + __builtin_prefetch(src + src_pixels_per_line); + __builtin_prefetch(src + src_pixels_per_line * 2); + + q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); + + q7u16 = vmlsl_u8(q7u16, d28u8, d1u8); + q8u16 = vmlsl_u8(q8u16, d29u8, d1u8); + q9u16 = vmlsl_u8(q9u16, d30u8, d1u8); + q10u16 = vmlsl_u8(q10u16, d31u8, d1u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); + + q7u16 = vmlsl_u8(q7u16, d28u8, d4u8); + q8u16 = vmlsl_u8(q8u16, d29u8, d4u8); + q9u16 = vmlsl_u8(q9u16, d30u8, d4u8); + q10u16 = vmlsl_u8(q10u16, d31u8, d4u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); + + q7u16 = vmlal_u8(q7u16, d28u8, d2u8); + q8u16 = vmlal_u8(q8u16, d29u8, d2u8); + q9u16 = vmlal_u8(q9u16, d30u8, d2u8); + q10u16 = vmlal_u8(q10u16, d31u8, d2u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); + + q7u16 = vmlal_u8(q7u16, d28u8, d5u8); + q8u16 = vmlal_u8(q8u16, d29u8, d5u8); + q9u16 = vmlal_u8(q9u16, d30u8, d5u8); + q10u16 = vmlal_u8(q10u16, d31u8, d5u8); + + d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); + d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); + d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); + d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); + + q3u16 = vmull_u8(d28u8, d3u8); + q4u16 = vmull_u8(d29u8, d3u8); + q5u16 = vmull_u8(d30u8, d3u8); + q6u16 = vmull_u8(d31u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d22u8 = vqrshrun_n_s16(q7s16, 7); + d23u8 = vqrshrun_n_s16(q8s16, 7); + d24u8 = vqrshrun_n_s16(q9s16, 7); + d25u8 = vqrshrun_n_s16(q10s16, 7); + + if (yoffset == 0) { // firstpass_filter8x4_only + vst1_u8(dst_ptr, d22u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d23u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d24u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d25u8); + dst_ptr += dst_pitch; + } else { + vst1_u8(tmpp, d22u8); + tmpp += 8; + vst1_u8(tmpp, d23u8); + tmpp += 8; + vst1_u8(tmpp, d24u8); + tmpp += 8; + vst1_u8(tmpp, d25u8); + tmpp += 8; + } + } + if (yoffset == 0) return; + + // First Pass on rest 5-line data + q3u8 = vld1q_u8(src); + src += src_pixels_per_line; + q4u8 = vld1q_u8(src); + src += src_pixels_per_line; + q5u8 = vld1q_u8(src); + src += src_pixels_per_line; + q6u8 = vld1q_u8(src); + src += src_pixels_per_line; + q7u8 = vld1q_u8(src); + + q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8); + q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8); + q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8); + q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8); + q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1); + + q8u16 = vmlsl_u8(q8u16, d27u8, d1u8); + q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); + q11u16 = vmlsl_u8(q11u16, d30u8, d1u8); + q12u16 = vmlsl_u8(q12u16, d31u8, d1u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4); + + q8u16 = vmlsl_u8(q8u16, d27u8, d4u8); + q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); + q11u16 = vmlsl_u8(q11u16, d30u8, d4u8); + q12u16 = vmlsl_u8(q12u16, d31u8, d4u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2); + + q8u16 = vmlal_u8(q8u16, d27u8, d2u8); + q9u16 = vmlal_u8(q9u16, d28u8, d2u8); + q10u16 = vmlal_u8(q10u16, d29u8, d2u8); + q11u16 = vmlal_u8(q11u16, d30u8, d2u8); + q12u16 = vmlal_u8(q12u16, d31u8, d2u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5); + + q8u16 = vmlal_u8(q8u16, d27u8, d5u8); + q9u16 = vmlal_u8(q9u16, d28u8, d5u8); + q10u16 = vmlal_u8(q10u16, d29u8, d5u8); + q11u16 = vmlal_u8(q11u16, d30u8, d5u8); + q12u16 = vmlal_u8(q12u16, d31u8, d5u8); + + d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3); + d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3); + d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3); + d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3); + d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3); + + q3u16 = vmull_u8(d27u8, d3u8); + q4u16 = vmull_u8(d28u8, d3u8); + q5u16 = vmull_u8(d29u8, d3u8); + q6u16 = vmull_u8(d30u8, d3u8); + q7u16 = vmull_u8(d31u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + q11s16 = vreinterpretq_s16_u16(q11u16); + q12s16 = vreinterpretq_s16_u16(q12u16); + + q8s16 = vqaddq_s16(q8s16, q3s16); + q9s16 = vqaddq_s16(q9s16, q4s16); + q10s16 = vqaddq_s16(q10s16, q5s16); + q11s16 = vqaddq_s16(q11s16, q6s16); + q12s16 = vqaddq_s16(q12s16, q7s16); + + d26u8 = vqrshrun_n_s16(q8s16, 7); + d27u8 = vqrshrun_n_s16(q9s16, 7); + d28u8 = vqrshrun_n_s16(q10s16, 7); + d29u8 = vqrshrun_n_s16(q11s16, 7); + d30u8 = vqrshrun_n_s16(q12s16, 7); + + // Second pass: 8x8 + dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + tmpp = tmp; + q9u8 = vld1q_u8(tmpp); + tmpp += 16; + q10u8 = vld1q_u8(tmpp); + tmpp += 16; + q11u8 = vld1q_u8(tmpp); + tmpp += 16; + q12u8 = vld1q_u8(tmpp); + + d18u8 = vget_low_u8(q9u8); + d19u8 = vget_high_u8(q9u8); + d20u8 = vget_low_u8(q10u8); + d21u8 = vget_high_u8(q10u8); + d22u8 = vget_low_u8(q11u8); + d23u8 = vget_high_u8(q11u8); + d24u8 = vget_low_u8(q12u8); + d25u8 = vget_high_u8(q12u8); + + for (i = 2; i > 0; i--) { + q3u16 = vmull_u8(d18u8, d0u8); + q4u16 = vmull_u8(d19u8, d0u8); + q5u16 = vmull_u8(d20u8, d0u8); + q6u16 = vmull_u8(d21u8, d0u8); + + q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); + q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); + q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); + q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); + + q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); + q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); + q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); + q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); + + q3u16 = vmlal_u8(q3u16, d20u8, d2u8); + q4u16 = vmlal_u8(q4u16, d21u8, d2u8); + q5u16 = vmlal_u8(q5u16, d22u8, d2u8); + q6u16 = vmlal_u8(q6u16, d23u8, d2u8); + + q3u16 = vmlal_u8(q3u16, d23u8, d5u8); + q4u16 = vmlal_u8(q4u16, d24u8, d5u8); + q5u16 = vmlal_u8(q5u16, d25u8, d5u8); + q6u16 = vmlal_u8(q6u16, d26u8, d5u8); + + q7u16 = vmull_u8(d21u8, d3u8); + q8u16 = vmull_u8(d22u8, d3u8); + q9u16 = vmull_u8(d23u8, d3u8); + q10u16 = vmull_u8(d24u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d6u8 = vqrshrun_n_s16(q7s16, 7); + d7u8 = vqrshrun_n_s16(q8s16, 7); + d8u8 = vqrshrun_n_s16(q9s16, 7); + d9u8 = vqrshrun_n_s16(q10s16, 7); + + d18u8 = d22u8; + d19u8 = d23u8; + d20u8 = d24u8; + d21u8 = d25u8; + d22u8 = d26u8; + d23u8 = d27u8; + d24u8 = d28u8; + d25u8 = d29u8; + d26u8 = d30u8; + + vst1_u8(dst_ptr, d6u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d7u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d8u8); + dst_ptr += dst_pitch; + vst1_u8(dst_ptr, d9u8); + dst_ptr += dst_pitch; + } + return; +} + +void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + unsigned char *src, *src_tmp, *dst, *tmpp; + unsigned char tmp[336]; + int i, j; + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; + uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8; + uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8; + uint8x8_t d28u8, d29u8, d30u8, d31u8; + int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; + uint8x16_t q3u8, q4u8; + uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16; + uint16x8_t q11u16, q12u16, q13u16, q15u16; + int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16; + int16x8_t q11s16, q12s16, q13s16, q15s16; + + if (xoffset == 0) { // secondpass_filter8x8_only + // load second_pass filter + dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + // load src data + src_tmp = src_ptr - src_pixels_per_line * 2; + for (i = 0; i < 2; ++i) { + src = src_tmp + i * 8; + dst = dst_ptr + i * 8; + d18u8 = vld1_u8(src); + src += src_pixels_per_line; + d19u8 = vld1_u8(src); + src += src_pixels_per_line; + d20u8 = vld1_u8(src); + src += src_pixels_per_line; + d21u8 = vld1_u8(src); + src += src_pixels_per_line; + d22u8 = vld1_u8(src); + src += src_pixels_per_line; + for (j = 0; j < 4; ++j) { + d23u8 = vld1_u8(src); + src += src_pixels_per_line; + d24u8 = vld1_u8(src); + src += src_pixels_per_line; + d25u8 = vld1_u8(src); + src += src_pixels_per_line; + d26u8 = vld1_u8(src); + src += src_pixels_per_line; + q3u16 = vmull_u8(d18u8, d0u8); q4u16 = vmull_u8(d19u8, d0u8); q5u16 = vmull_u8(d20u8, d0u8); @@ -1257,498 +1001,365 @@ void vp8_sixtap_predict8x8_neon( d20u8 = d24u8; d21u8 = d25u8; d22u8 = d26u8; - d23u8 = d27u8; - d24u8 = d28u8; - d25u8 = d29u8; - d26u8 = d30u8; - vst1_u8(dst_ptr, d6u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d7u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d8u8); - dst_ptr += dst_pitch; - vst1_u8(dst_ptr, d9u8); - dst_ptr += dst_pitch; + vst1_u8(dst, d6u8); + dst += dst_pitch; + vst1_u8(dst, d7u8); + dst += dst_pitch; + vst1_u8(dst, d8u8); + dst += dst_pitch; + vst1_u8(dst, d9u8); + dst += dst_pitch; + } } return; -} + } -void vp8_sixtap_predict16x16_neon( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch) { - unsigned char *src, *src_tmp, *dst, *tmpp; - unsigned char tmp[336]; - int i, j; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8; - uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8; - uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8; - uint8x8_t d28u8, d29u8, d30u8, d31u8; - int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8; - uint8x16_t q3u8, q4u8; - uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16; - uint16x8_t q11u16, q12u16, q13u16, q15u16; - int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16; - int16x8_t q11s16, q12s16, q13s16, q15s16; + // load first_pass filter + dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - if (xoffset == 0) { // secondpass_filter8x8_only - // load second_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + // First pass: output_height lines x output_width columns (9x4) + if (yoffset == 0) { // firstpass_filter4x4_only + src = src_ptr - 2; + dst = dst_ptr; + for (i = 0; i < 8; ++i) { + d6u8 = vld1_u8(src); + d7u8 = vld1_u8(src + 8); + d8u8 = vld1_u8(src + 16); + src += src_pixels_per_line; + d9u8 = vld1_u8(src); + d10u8 = vld1_u8(src + 8); + d11u8 = vld1_u8(src + 16); + src += src_pixels_per_line; - // load src data - src_tmp = src_ptr - src_pixels_per_line * 2; - for (i = 0; i < 2; i++) { - src = src_tmp + i * 8; - dst = dst_ptr + i * 8; - d18u8 = vld1_u8(src); - src += src_pixels_per_line; - d19u8 = vld1_u8(src); - src += src_pixels_per_line; - d20u8 = vld1_u8(src); - src += src_pixels_per_line; - d21u8 = vld1_u8(src); - src += src_pixels_per_line; - d22u8 = vld1_u8(src); - src += src_pixels_per_line; - for (j = 0; j < 4; j++) { - d23u8 = vld1_u8(src); - src += src_pixels_per_line; - d24u8 = vld1_u8(src); - src += src_pixels_per_line; - d25u8 = vld1_u8(src); - src += src_pixels_per_line; - d26u8 = vld1_u8(src); - src += src_pixels_per_line; + __builtin_prefetch(src); + __builtin_prefetch(src + src_pixels_per_line); - q3u16 = vmull_u8(d18u8, d0u8); - q4u16 = vmull_u8(d19u8, d0u8); - q5u16 = vmull_u8(d20u8, d0u8); - q6u16 = vmull_u8(d21u8, d0u8); + q6u16 = vmull_u8(d6u8, d0u8); + q7u16 = vmull_u8(d7u8, d0u8); + q8u16 = vmull_u8(d9u8, d0u8); + q9u16 = vmull_u8(d10u8, d0u8); - q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); - q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); - q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); + d20u8 = vext_u8(d6u8, d7u8, 1); + d21u8 = vext_u8(d9u8, d10u8, 1); + d22u8 = vext_u8(d7u8, d8u8, 1); + d23u8 = vext_u8(d10u8, d11u8, 1); + d24u8 = vext_u8(d6u8, d7u8, 4); + d25u8 = vext_u8(d9u8, d10u8, 4); + d26u8 = vext_u8(d7u8, d8u8, 4); + d27u8 = vext_u8(d10u8, d11u8, 4); + d28u8 = vext_u8(d6u8, d7u8, 5); + d29u8 = vext_u8(d9u8, d10u8, 5); - q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); - q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); + q6u16 = vmlsl_u8(q6u16, d20u8, d1u8); + q8u16 = vmlsl_u8(q8u16, d21u8, d1u8); + q7u16 = vmlsl_u8(q7u16, d22u8, d1u8); + q9u16 = vmlsl_u8(q9u16, d23u8, d1u8); + q6u16 = vmlsl_u8(q6u16, d24u8, d4u8); + q8u16 = vmlsl_u8(q8u16, d25u8, d4u8); + q7u16 = vmlsl_u8(q7u16, d26u8, d4u8); + q9u16 = vmlsl_u8(q9u16, d27u8, d4u8); + q6u16 = vmlal_u8(q6u16, d28u8, d5u8); + q8u16 = vmlal_u8(q8u16, d29u8, d5u8); - q3u16 = vmlal_u8(q3u16, d20u8, d2u8); - q4u16 = vmlal_u8(q4u16, d21u8, d2u8); - q5u16 = vmlal_u8(q5u16, d22u8, d2u8); - q6u16 = vmlal_u8(q6u16, d23u8, d2u8); + d20u8 = vext_u8(d7u8, d8u8, 5); + d21u8 = vext_u8(d10u8, d11u8, 5); + d22u8 = vext_u8(d6u8, d7u8, 2); + d23u8 = vext_u8(d9u8, d10u8, 2); + d24u8 = vext_u8(d7u8, d8u8, 2); + d25u8 = vext_u8(d10u8, d11u8, 2); + d26u8 = vext_u8(d6u8, d7u8, 3); + d27u8 = vext_u8(d9u8, d10u8, 3); + d28u8 = vext_u8(d7u8, d8u8, 3); + d29u8 = vext_u8(d10u8, d11u8, 3); - q3u16 = vmlal_u8(q3u16, d23u8, d5u8); - q4u16 = vmlal_u8(q4u16, d24u8, d5u8); - q5u16 = vmlal_u8(q5u16, d25u8, d5u8); - q6u16 = vmlal_u8(q6u16, d26u8, d5u8); + q7u16 = vmlal_u8(q7u16, d20u8, d5u8); + q9u16 = vmlal_u8(q9u16, d21u8, d5u8); + q6u16 = vmlal_u8(q6u16, d22u8, d2u8); + q8u16 = vmlal_u8(q8u16, d23u8, d2u8); + q7u16 = vmlal_u8(q7u16, d24u8, d2u8); + q9u16 = vmlal_u8(q9u16, d25u8, d2u8); - q7u16 = vmull_u8(d21u8, d3u8); - q8u16 = vmull_u8(d22u8, d3u8); - q9u16 = vmull_u8(d23u8, d3u8); - q10u16 = vmull_u8(d24u8, d3u8); + q10u16 = vmull_u8(d26u8, d3u8); + q11u16 = vmull_u8(d27u8, d3u8); + q12u16 = vmull_u8(d28u8, d3u8); + q15u16 = vmull_u8(d29u8, d3u8); - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + q11s16 = vreinterpretq_s16_u16(q11u16); + q12s16 = vreinterpretq_s16_u16(q12u16); + q15s16 = vreinterpretq_s16_u16(q15u16); - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); + q6s16 = vqaddq_s16(q6s16, q10s16); + q8s16 = vqaddq_s16(q8s16, q11s16); + q7s16 = vqaddq_s16(q7s16, q12s16); + q9s16 = vqaddq_s16(q9s16, q15s16); - d6u8 = vqrshrun_n_s16(q7s16, 7); - d7u8 = vqrshrun_n_s16(q8s16, 7); - d8u8 = vqrshrun_n_s16(q9s16, 7); - d9u8 = vqrshrun_n_s16(q10s16, 7); + d6u8 = vqrshrun_n_s16(q6s16, 7); + d7u8 = vqrshrun_n_s16(q7s16, 7); + d8u8 = vqrshrun_n_s16(q8s16, 7); + d9u8 = vqrshrun_n_s16(q9s16, 7); - d18u8 = d22u8; - d19u8 = d23u8; - d20u8 = d24u8; - d21u8 = d25u8; - d22u8 = d26u8; - - vst1_u8(dst, d6u8); - dst += dst_pitch; - vst1_u8(dst, d7u8); - dst += dst_pitch; - vst1_u8(dst, d8u8); - dst += dst_pitch; - vst1_u8(dst, d9u8); - dst += dst_pitch; - } - } - return; - } - - // load first_pass filter - dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - // First pass: output_height lines x output_width columns (9x4) - if (yoffset == 0) { // firstpass_filter4x4_only - src = src_ptr - 2; - dst = dst_ptr; - for (i = 0; i < 8; i++) { - d6u8 = vld1_u8(src); - d7u8 = vld1_u8(src + 8); - d8u8 = vld1_u8(src + 16); - src += src_pixels_per_line; - d9u8 = vld1_u8(src); - d10u8 = vld1_u8(src + 8); - d11u8 = vld1_u8(src + 16); - src += src_pixels_per_line; - - __builtin_prefetch(src); - __builtin_prefetch(src + src_pixels_per_line); - - q6u16 = vmull_u8(d6u8, d0u8); - q7u16 = vmull_u8(d7u8, d0u8); - q8u16 = vmull_u8(d9u8, d0u8); - q9u16 = vmull_u8(d10u8, d0u8); - - d20u8 = vext_u8(d6u8, d7u8, 1); - d21u8 = vext_u8(d9u8, d10u8, 1); - d22u8 = vext_u8(d7u8, d8u8, 1); - d23u8 = vext_u8(d10u8, d11u8, 1); - d24u8 = vext_u8(d6u8, d7u8, 4); - d25u8 = vext_u8(d9u8, d10u8, 4); - d26u8 = vext_u8(d7u8, d8u8, 4); - d27u8 = vext_u8(d10u8, d11u8, 4); - d28u8 = vext_u8(d6u8, d7u8, 5); - d29u8 = vext_u8(d9u8, d10u8, 5); - - q6u16 = vmlsl_u8(q6u16, d20u8, d1u8); - q8u16 = vmlsl_u8(q8u16, d21u8, d1u8); - q7u16 = vmlsl_u8(q7u16, d22u8, d1u8); - q9u16 = vmlsl_u8(q9u16, d23u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d24u8, d4u8); - q8u16 = vmlsl_u8(q8u16, d25u8, d4u8); - q7u16 = vmlsl_u8(q7u16, d26u8, d4u8); - q9u16 = vmlsl_u8(q9u16, d27u8, d4u8); - q6u16 = vmlal_u8(q6u16, d28u8, d5u8); - q8u16 = vmlal_u8(q8u16, d29u8, d5u8); - - d20u8 = vext_u8(d7u8, d8u8, 5); - d21u8 = vext_u8(d10u8, d11u8, 5); - d22u8 = vext_u8(d6u8, d7u8, 2); - d23u8 = vext_u8(d9u8, d10u8, 2); - d24u8 = vext_u8(d7u8, d8u8, 2); - d25u8 = vext_u8(d10u8, d11u8, 2); - d26u8 = vext_u8(d6u8, d7u8, 3); - d27u8 = vext_u8(d9u8, d10u8, 3); - d28u8 = vext_u8(d7u8, d8u8, 3); - d29u8 = vext_u8(d10u8, d11u8, 3); - - q7u16 = vmlal_u8(q7u16, d20u8, d5u8); - q9u16 = vmlal_u8(q9u16, d21u8, d5u8); - q6u16 = vmlal_u8(q6u16, d22u8, d2u8); - q8u16 = vmlal_u8(q8u16, d23u8, d2u8); - q7u16 = vmlal_u8(q7u16, d24u8, d2u8); - q9u16 = vmlal_u8(q9u16, d25u8, d2u8); - - q10u16 = vmull_u8(d26u8, d3u8); - q11u16 = vmull_u8(d27u8, d3u8); - q12u16 = vmull_u8(d28u8, d3u8); - q15u16 = vmull_u8(d29u8, d3u8); - - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q11s16 = vreinterpretq_s16_u16(q11u16); - q12s16 = vreinterpretq_s16_u16(q12u16); - q15s16 = vreinterpretq_s16_u16(q15u16); - - q6s16 = vqaddq_s16(q6s16, q10s16); - q8s16 = vqaddq_s16(q8s16, q11s16); - q7s16 = vqaddq_s16(q7s16, q12s16); - q9s16 = vqaddq_s16(q9s16, q15s16); - - d6u8 = vqrshrun_n_s16(q6s16, 7); - d7u8 = vqrshrun_n_s16(q7s16, 7); - d8u8 = vqrshrun_n_s16(q8s16, 7); - d9u8 = vqrshrun_n_s16(q9s16, 7); - - q3u8 = vcombine_u8(d6u8, d7u8); - q4u8 = vcombine_u8(d8u8, d9u8); - vst1q_u8(dst, q3u8); - dst += dst_pitch; - vst1q_u8(dst, q4u8); - dst += dst_pitch; - } - return; - } - - src = src_ptr - 2 - src_pixels_per_line * 2; - tmpp = tmp; - for (i = 0; i < 7; i++) { - d6u8 = vld1_u8(src); - d7u8 = vld1_u8(src + 8); - d8u8 = vld1_u8(src + 16); - src += src_pixels_per_line; - d9u8 = vld1_u8(src); - d10u8 = vld1_u8(src + 8); - d11u8 = vld1_u8(src + 16); - src += src_pixels_per_line; - d12u8 = vld1_u8(src); - d13u8 = vld1_u8(src + 8); - d14u8 = vld1_u8(src + 16); - src += src_pixels_per_line; - - __builtin_prefetch(src); - __builtin_prefetch(src + src_pixels_per_line); - __builtin_prefetch(src + src_pixels_per_line * 2); - - q8u16 = vmull_u8(d6u8, d0u8); - q9u16 = vmull_u8(d7u8, d0u8); - q10u16 = vmull_u8(d9u8, d0u8); - q11u16 = vmull_u8(d10u8, d0u8); - q12u16 = vmull_u8(d12u8, d0u8); - q13u16 = vmull_u8(d13u8, d0u8); - - d28u8 = vext_u8(d6u8, d7u8, 1); - d29u8 = vext_u8(d9u8, d10u8, 1); - d30u8 = vext_u8(d12u8, d13u8, 1); - q8u16 = vmlsl_u8(q8u16, d28u8, d1u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); - q12u16 = vmlsl_u8(q12u16, d30u8, d1u8); - d28u8 = vext_u8(d7u8, d8u8, 1); - d29u8 = vext_u8(d10u8, d11u8, 1); - d30u8 = vext_u8(d13u8, d14u8, 1); - q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); - q11u16 = vmlsl_u8(q11u16, d29u8, d1u8); - q13u16 = vmlsl_u8(q13u16, d30u8, d1u8); - - d28u8 = vext_u8(d6u8, d7u8, 4); - d29u8 = vext_u8(d9u8, d10u8, 4); - d30u8 = vext_u8(d12u8, d13u8, 4); - q8u16 = vmlsl_u8(q8u16, d28u8, d4u8); - q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); - q12u16 = vmlsl_u8(q12u16, d30u8, d4u8); - d28u8 = vext_u8(d7u8, d8u8, 4); - d29u8 = vext_u8(d10u8, d11u8, 4); - d30u8 = vext_u8(d13u8, d14u8, 4); - q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); - q11u16 = vmlsl_u8(q11u16, d29u8, d4u8); - q13u16 = vmlsl_u8(q13u16, d30u8, d4u8); - - d28u8 = vext_u8(d6u8, d7u8, 5); - d29u8 = vext_u8(d9u8, d10u8, 5); - d30u8 = vext_u8(d12u8, d13u8, 5); - q8u16 = vmlal_u8(q8u16, d28u8, d5u8); - q10u16 = vmlal_u8(q10u16, d29u8, d5u8); - q12u16 = vmlal_u8(q12u16, d30u8, d5u8); - d28u8 = vext_u8(d7u8, d8u8, 5); - d29u8 = vext_u8(d10u8, d11u8, 5); - d30u8 = vext_u8(d13u8, d14u8, 5); - q9u16 = vmlal_u8(q9u16, d28u8, d5u8); - q11u16 = vmlal_u8(q11u16, d29u8, d5u8); - q13u16 = vmlal_u8(q13u16, d30u8, d5u8); - - d28u8 = vext_u8(d6u8, d7u8, 2); - d29u8 = vext_u8(d9u8, d10u8, 2); - d30u8 = vext_u8(d12u8, d13u8, 2); - q8u16 = vmlal_u8(q8u16, d28u8, d2u8); - q10u16 = vmlal_u8(q10u16, d29u8, d2u8); - q12u16 = vmlal_u8(q12u16, d30u8, d2u8); - d28u8 = vext_u8(d7u8, d8u8, 2); - d29u8 = vext_u8(d10u8, d11u8, 2); - d30u8 = vext_u8(d13u8, d14u8, 2); - q9u16 = vmlal_u8(q9u16, d28u8, d2u8); - q11u16 = vmlal_u8(q11u16, d29u8, d2u8); - q13u16 = vmlal_u8(q13u16, d30u8, d2u8); - - d28u8 = vext_u8(d6u8, d7u8, 3); - d29u8 = vext_u8(d9u8, d10u8, 3); - d30u8 = vext_u8(d12u8, d13u8, 3); - d15u8 = vext_u8(d7u8, d8u8, 3); - d31u8 = vext_u8(d10u8, d11u8, 3); - d6u8 = vext_u8(d13u8, d14u8, 3); - q4u16 = vmull_u8(d28u8, d3u8); - q5u16 = vmull_u8(d29u8, d3u8); - q6u16 = vmull_u8(d30u8, d3u8); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - q12s16 = vreinterpretq_s16_u16(q12u16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q10s16 = vqaddq_s16(q10s16, q5s16); - q12s16 = vqaddq_s16(q12s16, q6s16); - - q6u16 = vmull_u8(d15u8, d3u8); - q7u16 = vmull_u8(d31u8, d3u8); - q3u16 = vmull_u8(d6u8, d3u8); - q3s16 = vreinterpretq_s16_u16(q3u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q11s16 = vreinterpretq_s16_u16(q11u16); - q13s16 = vreinterpretq_s16_u16(q13u16); - q9s16 = vqaddq_s16(q9s16, q6s16); - q11s16 = vqaddq_s16(q11s16, q7s16); - q13s16 = vqaddq_s16(q13s16, q3s16); - - d6u8 = vqrshrun_n_s16(q8s16, 7); - d7u8 = vqrshrun_n_s16(q9s16, 7); - d8u8 = vqrshrun_n_s16(q10s16, 7); - d9u8 = vqrshrun_n_s16(q11s16, 7); - d10u8 = vqrshrun_n_s16(q12s16, 7); - d11u8 = vqrshrun_n_s16(q13s16, 7); - - vst1_u8(tmpp, d6u8); - tmpp += 8; - vst1_u8(tmpp, d7u8); - tmpp += 8; - vst1_u8(tmpp, d8u8); - tmpp += 8; - vst1_u8(tmpp, d9u8); - tmpp += 8; - vst1_u8(tmpp, d10u8); - tmpp += 8; - vst1_u8(tmpp, d11u8); - tmpp += 8; - } - - // Second pass: 16x16 - dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); - d0s8 = vdup_lane_s8(dtmps8, 0); - d1s8 = vdup_lane_s8(dtmps8, 1); - d2s8 = vdup_lane_s8(dtmps8, 2); - d3s8 = vdup_lane_s8(dtmps8, 3); - d4s8 = vdup_lane_s8(dtmps8, 4); - d5s8 = vdup_lane_s8(dtmps8, 5); - d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); - d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); - d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); - d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); - d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); - d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); - - for (i = 0; i < 2; i++) { - dst = dst_ptr + 8 * i; - tmpp = tmp + 8 * i; - d18u8 = vld1_u8(tmpp); - tmpp += 16; - d19u8 = vld1_u8(tmpp); - tmpp += 16; - d20u8 = vld1_u8(tmpp); - tmpp += 16; - d21u8 = vld1_u8(tmpp); - tmpp += 16; - d22u8 = vld1_u8(tmpp); - tmpp += 16; - for (j = 0; j < 4; j++) { - d23u8 = vld1_u8(tmpp); - tmpp += 16; - d24u8 = vld1_u8(tmpp); - tmpp += 16; - d25u8 = vld1_u8(tmpp); - tmpp += 16; - d26u8 = vld1_u8(tmpp); - tmpp += 16; - - q3u16 = vmull_u8(d18u8, d0u8); - q4u16 = vmull_u8(d19u8, d0u8); - q5u16 = vmull_u8(d20u8, d0u8); - q6u16 = vmull_u8(d21u8, d0u8); - - q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); - q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); - q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); - q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); - - q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); - q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); - q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); - q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); - - q3u16 = vmlal_u8(q3u16, d20u8, d2u8); - q4u16 = vmlal_u8(q4u16, d21u8, d2u8); - q5u16 = vmlal_u8(q5u16, d22u8, d2u8); - q6u16 = vmlal_u8(q6u16, d23u8, d2u8); - - q3u16 = vmlal_u8(q3u16, d23u8, d5u8); - q4u16 = vmlal_u8(q4u16, d24u8, d5u8); - q5u16 = vmlal_u8(q5u16, d25u8, d5u8); - q6u16 = vmlal_u8(q6u16, d26u8, d5u8); - - q7u16 = vmull_u8(d21u8, d3u8); - q8u16 = vmull_u8(d22u8, d3u8); - q9u16 = vmull_u8(d23u8, d3u8); - q10u16 = vmull_u8(d24u8, d3u8); - - q3s16 = vreinterpretq_s16_u16(q3u16); - q4s16 = vreinterpretq_s16_u16(q4u16); - q5s16 = vreinterpretq_s16_u16(q5u16); - q6s16 = vreinterpretq_s16_u16(q6u16); - q7s16 = vreinterpretq_s16_u16(q7u16); - q8s16 = vreinterpretq_s16_u16(q8u16); - q9s16 = vreinterpretq_s16_u16(q9u16); - q10s16 = vreinterpretq_s16_u16(q10u16); - - q7s16 = vqaddq_s16(q7s16, q3s16); - q8s16 = vqaddq_s16(q8s16, q4s16); - q9s16 = vqaddq_s16(q9s16, q5s16); - q10s16 = vqaddq_s16(q10s16, q6s16); - - d6u8 = vqrshrun_n_s16(q7s16, 7); - d7u8 = vqrshrun_n_s16(q8s16, 7); - d8u8 = vqrshrun_n_s16(q9s16, 7); - d9u8 = vqrshrun_n_s16(q10s16, 7); - - d18u8 = d22u8; - d19u8 = d23u8; - d20u8 = d24u8; - d21u8 = d25u8; - d22u8 = d26u8; - - vst1_u8(dst, d6u8); - dst += dst_pitch; - vst1_u8(dst, d7u8); - dst += dst_pitch; - vst1_u8(dst, d8u8); - dst += dst_pitch; - vst1_u8(dst, d9u8); - dst += dst_pitch; - } + q3u8 = vcombine_u8(d6u8, d7u8); + q4u8 = vcombine_u8(d8u8, d9u8); + vst1q_u8(dst, q3u8); + dst += dst_pitch; + vst1q_u8(dst, q4u8); + dst += dst_pitch; } return; + } + + src = src_ptr - 2 - src_pixels_per_line * 2; + tmpp = tmp; + for (i = 0; i < 7; ++i) { + d6u8 = vld1_u8(src); + d7u8 = vld1_u8(src + 8); + d8u8 = vld1_u8(src + 16); + src += src_pixels_per_line; + d9u8 = vld1_u8(src); + d10u8 = vld1_u8(src + 8); + d11u8 = vld1_u8(src + 16); + src += src_pixels_per_line; + d12u8 = vld1_u8(src); + d13u8 = vld1_u8(src + 8); + d14u8 = vld1_u8(src + 16); + src += src_pixels_per_line; + + __builtin_prefetch(src); + __builtin_prefetch(src + src_pixels_per_line); + __builtin_prefetch(src + src_pixels_per_line * 2); + + q8u16 = vmull_u8(d6u8, d0u8); + q9u16 = vmull_u8(d7u8, d0u8); + q10u16 = vmull_u8(d9u8, d0u8); + q11u16 = vmull_u8(d10u8, d0u8); + q12u16 = vmull_u8(d12u8, d0u8); + q13u16 = vmull_u8(d13u8, d0u8); + + d28u8 = vext_u8(d6u8, d7u8, 1); + d29u8 = vext_u8(d9u8, d10u8, 1); + d30u8 = vext_u8(d12u8, d13u8, 1); + q8u16 = vmlsl_u8(q8u16, d28u8, d1u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d1u8); + q12u16 = vmlsl_u8(q12u16, d30u8, d1u8); + d28u8 = vext_u8(d7u8, d8u8, 1); + d29u8 = vext_u8(d10u8, d11u8, 1); + d30u8 = vext_u8(d13u8, d14u8, 1); + q9u16 = vmlsl_u8(q9u16, d28u8, d1u8); + q11u16 = vmlsl_u8(q11u16, d29u8, d1u8); + q13u16 = vmlsl_u8(q13u16, d30u8, d1u8); + + d28u8 = vext_u8(d6u8, d7u8, 4); + d29u8 = vext_u8(d9u8, d10u8, 4); + d30u8 = vext_u8(d12u8, d13u8, 4); + q8u16 = vmlsl_u8(q8u16, d28u8, d4u8); + q10u16 = vmlsl_u8(q10u16, d29u8, d4u8); + q12u16 = vmlsl_u8(q12u16, d30u8, d4u8); + d28u8 = vext_u8(d7u8, d8u8, 4); + d29u8 = vext_u8(d10u8, d11u8, 4); + d30u8 = vext_u8(d13u8, d14u8, 4); + q9u16 = vmlsl_u8(q9u16, d28u8, d4u8); + q11u16 = vmlsl_u8(q11u16, d29u8, d4u8); + q13u16 = vmlsl_u8(q13u16, d30u8, d4u8); + + d28u8 = vext_u8(d6u8, d7u8, 5); + d29u8 = vext_u8(d9u8, d10u8, 5); + d30u8 = vext_u8(d12u8, d13u8, 5); + q8u16 = vmlal_u8(q8u16, d28u8, d5u8); + q10u16 = vmlal_u8(q10u16, d29u8, d5u8); + q12u16 = vmlal_u8(q12u16, d30u8, d5u8); + d28u8 = vext_u8(d7u8, d8u8, 5); + d29u8 = vext_u8(d10u8, d11u8, 5); + d30u8 = vext_u8(d13u8, d14u8, 5); + q9u16 = vmlal_u8(q9u16, d28u8, d5u8); + q11u16 = vmlal_u8(q11u16, d29u8, d5u8); + q13u16 = vmlal_u8(q13u16, d30u8, d5u8); + + d28u8 = vext_u8(d6u8, d7u8, 2); + d29u8 = vext_u8(d9u8, d10u8, 2); + d30u8 = vext_u8(d12u8, d13u8, 2); + q8u16 = vmlal_u8(q8u16, d28u8, d2u8); + q10u16 = vmlal_u8(q10u16, d29u8, d2u8); + q12u16 = vmlal_u8(q12u16, d30u8, d2u8); + d28u8 = vext_u8(d7u8, d8u8, 2); + d29u8 = vext_u8(d10u8, d11u8, 2); + d30u8 = vext_u8(d13u8, d14u8, 2); + q9u16 = vmlal_u8(q9u16, d28u8, d2u8); + q11u16 = vmlal_u8(q11u16, d29u8, d2u8); + q13u16 = vmlal_u8(q13u16, d30u8, d2u8); + + d28u8 = vext_u8(d6u8, d7u8, 3); + d29u8 = vext_u8(d9u8, d10u8, 3); + d30u8 = vext_u8(d12u8, d13u8, 3); + d15u8 = vext_u8(d7u8, d8u8, 3); + d31u8 = vext_u8(d10u8, d11u8, 3); + d6u8 = vext_u8(d13u8, d14u8, 3); + q4u16 = vmull_u8(d28u8, d3u8); + q5u16 = vmull_u8(d29u8, d3u8); + q6u16 = vmull_u8(d30u8, d3u8); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + q12s16 = vreinterpretq_s16_u16(q12u16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q10s16 = vqaddq_s16(q10s16, q5s16); + q12s16 = vqaddq_s16(q12s16, q6s16); + + q6u16 = vmull_u8(d15u8, d3u8); + q7u16 = vmull_u8(d31u8, d3u8); + q3u16 = vmull_u8(d6u8, d3u8); + q3s16 = vreinterpretq_s16_u16(q3u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q11s16 = vreinterpretq_s16_u16(q11u16); + q13s16 = vreinterpretq_s16_u16(q13u16); + q9s16 = vqaddq_s16(q9s16, q6s16); + q11s16 = vqaddq_s16(q11s16, q7s16); + q13s16 = vqaddq_s16(q13s16, q3s16); + + d6u8 = vqrshrun_n_s16(q8s16, 7); + d7u8 = vqrshrun_n_s16(q9s16, 7); + d8u8 = vqrshrun_n_s16(q10s16, 7); + d9u8 = vqrshrun_n_s16(q11s16, 7); + d10u8 = vqrshrun_n_s16(q12s16, 7); + d11u8 = vqrshrun_n_s16(q13s16, 7); + + vst1_u8(tmpp, d6u8); + tmpp += 8; + vst1_u8(tmpp, d7u8); + tmpp += 8; + vst1_u8(tmpp, d8u8); + tmpp += 8; + vst1_u8(tmpp, d9u8); + tmpp += 8; + vst1_u8(tmpp, d10u8); + tmpp += 8; + vst1_u8(tmpp, d11u8); + tmpp += 8; + } + + // Second pass: 16x16 + dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]); + d0s8 = vdup_lane_s8(dtmps8, 0); + d1s8 = vdup_lane_s8(dtmps8, 1); + d2s8 = vdup_lane_s8(dtmps8, 2); + d3s8 = vdup_lane_s8(dtmps8, 3); + d4s8 = vdup_lane_s8(dtmps8, 4); + d5s8 = vdup_lane_s8(dtmps8, 5); + d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8)); + d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8)); + d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8)); + d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8)); + d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8)); + d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8)); + + for (i = 0; i < 2; ++i) { + dst = dst_ptr + 8 * i; + tmpp = tmp + 8 * i; + d18u8 = vld1_u8(tmpp); + tmpp += 16; + d19u8 = vld1_u8(tmpp); + tmpp += 16; + d20u8 = vld1_u8(tmpp); + tmpp += 16; + d21u8 = vld1_u8(tmpp); + tmpp += 16; + d22u8 = vld1_u8(tmpp); + tmpp += 16; + for (j = 0; j < 4; ++j) { + d23u8 = vld1_u8(tmpp); + tmpp += 16; + d24u8 = vld1_u8(tmpp); + tmpp += 16; + d25u8 = vld1_u8(tmpp); + tmpp += 16; + d26u8 = vld1_u8(tmpp); + tmpp += 16; + + q3u16 = vmull_u8(d18u8, d0u8); + q4u16 = vmull_u8(d19u8, d0u8); + q5u16 = vmull_u8(d20u8, d0u8); + q6u16 = vmull_u8(d21u8, d0u8); + + q3u16 = vmlsl_u8(q3u16, d19u8, d1u8); + q4u16 = vmlsl_u8(q4u16, d20u8, d1u8); + q5u16 = vmlsl_u8(q5u16, d21u8, d1u8); + q6u16 = vmlsl_u8(q6u16, d22u8, d1u8); + + q3u16 = vmlsl_u8(q3u16, d22u8, d4u8); + q4u16 = vmlsl_u8(q4u16, d23u8, d4u8); + q5u16 = vmlsl_u8(q5u16, d24u8, d4u8); + q6u16 = vmlsl_u8(q6u16, d25u8, d4u8); + + q3u16 = vmlal_u8(q3u16, d20u8, d2u8); + q4u16 = vmlal_u8(q4u16, d21u8, d2u8); + q5u16 = vmlal_u8(q5u16, d22u8, d2u8); + q6u16 = vmlal_u8(q6u16, d23u8, d2u8); + + q3u16 = vmlal_u8(q3u16, d23u8, d5u8); + q4u16 = vmlal_u8(q4u16, d24u8, d5u8); + q5u16 = vmlal_u8(q5u16, d25u8, d5u8); + q6u16 = vmlal_u8(q6u16, d26u8, d5u8); + + q7u16 = vmull_u8(d21u8, d3u8); + q8u16 = vmull_u8(d22u8, d3u8); + q9u16 = vmull_u8(d23u8, d3u8); + q10u16 = vmull_u8(d24u8, d3u8); + + q3s16 = vreinterpretq_s16_u16(q3u16); + q4s16 = vreinterpretq_s16_u16(q4u16); + q5s16 = vreinterpretq_s16_u16(q5u16); + q6s16 = vreinterpretq_s16_u16(q6u16); + q7s16 = vreinterpretq_s16_u16(q7u16); + q8s16 = vreinterpretq_s16_u16(q8u16); + q9s16 = vreinterpretq_s16_u16(q9u16); + q10s16 = vreinterpretq_s16_u16(q10u16); + + q7s16 = vqaddq_s16(q7s16, q3s16); + q8s16 = vqaddq_s16(q8s16, q4s16); + q9s16 = vqaddq_s16(q9s16, q5s16); + q10s16 = vqaddq_s16(q10s16, q6s16); + + d6u8 = vqrshrun_n_s16(q7s16, 7); + d7u8 = vqrshrun_n_s16(q8s16, 7); + d8u8 = vqrshrun_n_s16(q9s16, 7); + d9u8 = vqrshrun_n_s16(q10s16, 7); + + d18u8 = d22u8; + d19u8 = d23u8; + d20u8 = d24u8; + d21u8 = d25u8; + d22u8 = d26u8; + + vst1_u8(dst, d6u8); + dst += dst_pitch; + vst1_u8(dst, d7u8); + dst += dst_pitch; + vst1_u8(dst, d8u8); + dst += dst_pitch; + vst1_u8(dst, d9u8); + dst += dst_pitch; + } + } + return; } diff --git a/libs/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c b/libs/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c index 9d6807af71..d7286739da 100644 --- a/libs/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c +++ b/libs/libvpx/vp8/common/arm/neon/vp8_loopfilter_neon.c @@ -12,539 +12,525 @@ #include "./vpx_config.h" #include "vpx_ports/arm.h" -static INLINE void vp8_loop_filter_neon( - uint8x16_t qblimit, // flimit - uint8x16_t qlimit, // limit - uint8x16_t qthresh, // thresh - uint8x16_t q3, // p3 - uint8x16_t q4, // p2 - uint8x16_t q5, // p1 - uint8x16_t q6, // p0 - uint8x16_t q7, // q0 - uint8x16_t q8, // q1 - uint8x16_t q9, // q2 - uint8x16_t q10, // q3 - uint8x16_t *q5r, // p1 - uint8x16_t *q6r, // p0 - uint8x16_t *q7r, // q0 - uint8x16_t *q8r) { // q1 - uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; - int16x8_t q2s16, q11s16; - uint16x8_t q4u16; - int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8; - int8x8_t d2s8, d3s8; +static INLINE void vp8_loop_filter_neon(uint8x16_t qblimit, // flimit + uint8x16_t qlimit, // limit + uint8x16_t qthresh, // thresh + uint8x16_t q3, // p3 + uint8x16_t q4, // p2 + uint8x16_t q5, // p1 + uint8x16_t q6, // p0 + uint8x16_t q7, // q0 + uint8x16_t q8, // q1 + uint8x16_t q9, // q2 + uint8x16_t q10, // q3 + uint8x16_t *q5r, // p1 + uint8x16_t *q6r, // p0 + uint8x16_t *q7r, // q0 + uint8x16_t *q8r) { // q1 + uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; + int16x8_t q2s16, q11s16; + uint16x8_t q4u16; + int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8; + int8x8_t d2s8, d3s8; - q11u8 = vabdq_u8(q3, q4); - q12u8 = vabdq_u8(q4, q5); - q13u8 = vabdq_u8(q5, q6); - q14u8 = vabdq_u8(q8, q7); - q3 = vabdq_u8(q9, q8); - q4 = vabdq_u8(q10, q9); + q11u8 = vabdq_u8(q3, q4); + q12u8 = vabdq_u8(q4, q5); + q13u8 = vabdq_u8(q5, q6); + q14u8 = vabdq_u8(q8, q7); + q3 = vabdq_u8(q9, q8); + q4 = vabdq_u8(q10, q9); - q11u8 = vmaxq_u8(q11u8, q12u8); - q12u8 = vmaxq_u8(q13u8, q14u8); - q3 = vmaxq_u8(q3, q4); - q15u8 = vmaxq_u8(q11u8, q12u8); + q11u8 = vmaxq_u8(q11u8, q12u8); + q12u8 = vmaxq_u8(q13u8, q14u8); + q3 = vmaxq_u8(q3, q4); + q15u8 = vmaxq_u8(q11u8, q12u8); - q9 = vabdq_u8(q6, q7); + q9 = vabdq_u8(q6, q7); - // vp8_hevmask - q13u8 = vcgtq_u8(q13u8, qthresh); - q14u8 = vcgtq_u8(q14u8, qthresh); - q15u8 = vmaxq_u8(q15u8, q3); + // vp8_hevmask + q13u8 = vcgtq_u8(q13u8, qthresh); + q14u8 = vcgtq_u8(q14u8, qthresh); + q15u8 = vmaxq_u8(q15u8, q3); - q2u8 = vabdq_u8(q5, q8); - q9 = vqaddq_u8(q9, q9); + q2u8 = vabdq_u8(q5, q8); + q9 = vqaddq_u8(q9, q9); - q15u8 = vcgeq_u8(qlimit, q15u8); + q15u8 = vcgeq_u8(qlimit, q15u8); - // vp8_filter() function - // convert to signed - q10 = vdupq_n_u8(0x80); - q8 = veorq_u8(q8, q10); - q7 = veorq_u8(q7, q10); - q6 = veorq_u8(q6, q10); - q5 = veorq_u8(q5, q10); + // vp8_filter() function + // convert to signed + q10 = vdupq_n_u8(0x80); + q8 = veorq_u8(q8, q10); + q7 = veorq_u8(q7, q10); + q6 = veorq_u8(q6, q10); + q5 = veorq_u8(q5, q10); - q2u8 = vshrq_n_u8(q2u8, 1); - q9 = vqaddq_u8(q9, q2u8); + q2u8 = vshrq_n_u8(q2u8, 1); + q9 = vqaddq_u8(q9, q2u8); - q10 = vdupq_n_u8(3); + q10 = vdupq_n_u8(3); - q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), - vget_low_s8(vreinterpretq_s8_u8(q6))); - q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), - vget_high_s8(vreinterpretq_s8_u8(q6))); + q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), + vget_low_s8(vreinterpretq_s8_u8(q6))); + q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), + vget_high_s8(vreinterpretq_s8_u8(q6))); - q9 = vcgeq_u8(qblimit, q9); + q9 = vcgeq_u8(qblimit, q9); - q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), - vreinterpretq_s8_u8(q8)); + q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), vreinterpretq_s8_u8(q8)); - q14u8 = vorrq_u8(q13u8, q14u8); + q14u8 = vorrq_u8(q13u8, q14u8); - q4u16 = vmovl_u8(vget_low_u8(q10)); - q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16)); - q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16)); + q4u16 = vmovl_u8(vget_low_u8(q10)); + q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16)); + q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16)); - q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8); - q15u8 = vandq_u8(q15u8, q9); + q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8); + q15u8 = vandq_u8(q15u8, q9); - q1s8 = vreinterpretq_s8_u8(q1u8); - q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8)); - q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8)); + q1s8 = vreinterpretq_s8_u8(q1u8); + q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8)); + q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8)); - q9 = vdupq_n_u8(4); - // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0)) - d2s8 = vqmovn_s16(q2s16); - d3s8 = vqmovn_s16(q11s16); - q1s8 = vcombine_s8(d2s8, d3s8); - q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8); - q1s8 = vreinterpretq_s8_u8(q1u8); + q9 = vdupq_n_u8(4); + // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0)) + d2s8 = vqmovn_s16(q2s16); + d3s8 = vqmovn_s16(q11s16); + q1s8 = vcombine_s8(d2s8, d3s8); + q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8); + q1s8 = vreinterpretq_s8_u8(q1u8); - q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10)); - q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9)); - q2s8 = vshrq_n_s8(q2s8, 3); - q1s8 = vshrq_n_s8(q1s8, 3); + q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10)); + q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9)); + q2s8 = vshrq_n_s8(q2s8, 3); + q1s8 = vshrq_n_s8(q1s8, 3); - q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8); - q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8); + q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8); + q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8); - q1s8 = vrshrq_n_s8(q1s8, 1); - q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); + q1s8 = vrshrq_n_s8(q1s8, 1); + q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); - q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); - q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8); + q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); + q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8); - q0u8 = vdupq_n_u8(0x80); - *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8); - *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); - *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); - *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8); - return; + q0u8 = vdupq_n_u8(0x80); + *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8); + *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8); + *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8); + *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8); + return; } -void vp8_loop_filter_horizontal_edge_y_neon( - unsigned char *src, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh) { - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; +void vp8_loop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh) { + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); - src -= (pitch << 2); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); + src -= (pitch << 2); - q3 = vld1q_u8(src); - src += pitch; - q4 = vld1q_u8(src); - src += pitch; - q5 = vld1q_u8(src); - src += pitch; - q6 = vld1q_u8(src); - src += pitch; - q7 = vld1q_u8(src); - src += pitch; - q8 = vld1q_u8(src); - src += pitch; - q9 = vld1q_u8(src); - src += pitch; - q10 = vld1q_u8(src); + q3 = vld1q_u8(src); + src += pitch; + q4 = vld1q_u8(src); + src += pitch; + q5 = vld1q_u8(src); + src += pitch; + q6 = vld1q_u8(src); + src += pitch; + q7 = vld1q_u8(src); + src += pitch; + q8 = vld1q_u8(src); + src += pitch; + q9 = vld1q_u8(src); + src += pitch; + q10 = vld1q_u8(src); - vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q5, &q6, &q7, &q8); + vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q5, &q6, &q7, &q8); - src -= (pitch * 5); - vst1q_u8(src, q5); - src += pitch; - vst1q_u8(src, q6); - src += pitch; - vst1q_u8(src, q7); - src += pitch; - vst1q_u8(src, q8); - return; + src -= (pitch * 5); + vst1q_u8(src, q5); + src += pitch; + vst1q_u8(src, q6); + src += pitch; + vst1q_u8(src, q7); + src += pitch; + vst1q_u8(src, q8); + return; } -void vp8_loop_filter_horizontal_edge_uv_neon( - unsigned char *u, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh, - unsigned char *v) { - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; +void vp8_loop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh, + unsigned char *v) { + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - u -= (pitch << 2); - v -= (pitch << 2); + u -= (pitch << 2); + v -= (pitch << 2); - d6 = vld1_u8(u); - u += pitch; - d7 = vld1_u8(v); - v += pitch; - d8 = vld1_u8(u); - u += pitch; - d9 = vld1_u8(v); - v += pitch; - d10 = vld1_u8(u); - u += pitch; - d11 = vld1_u8(v); - v += pitch; - d12 = vld1_u8(u); - u += pitch; - d13 = vld1_u8(v); - v += pitch; - d14 = vld1_u8(u); - u += pitch; - d15 = vld1_u8(v); - v += pitch; - d16 = vld1_u8(u); - u += pitch; - d17 = vld1_u8(v); - v += pitch; - d18 = vld1_u8(u); - u += pitch; - d19 = vld1_u8(v); - v += pitch; - d20 = vld1_u8(u); - d21 = vld1_u8(v); + d6 = vld1_u8(u); + u += pitch; + d7 = vld1_u8(v); + v += pitch; + d8 = vld1_u8(u); + u += pitch; + d9 = vld1_u8(v); + v += pitch; + d10 = vld1_u8(u); + u += pitch; + d11 = vld1_u8(v); + v += pitch; + d12 = vld1_u8(u); + u += pitch; + d13 = vld1_u8(v); + v += pitch; + d14 = vld1_u8(u); + u += pitch; + d15 = vld1_u8(v); + v += pitch; + d16 = vld1_u8(u); + u += pitch; + d17 = vld1_u8(v); + v += pitch; + d18 = vld1_u8(u); + u += pitch; + d19 = vld1_u8(v); + v += pitch; + d20 = vld1_u8(u); + d21 = vld1_u8(v); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q5, &q6, &q7, &q8); + vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q5, &q6, &q7, &q8); - u -= (pitch * 5); - vst1_u8(u, vget_low_u8(q5)); - u += pitch; - vst1_u8(u, vget_low_u8(q6)); - u += pitch; - vst1_u8(u, vget_low_u8(q7)); - u += pitch; - vst1_u8(u, vget_low_u8(q8)); + u -= (pitch * 5); + vst1_u8(u, vget_low_u8(q5)); + u += pitch; + vst1_u8(u, vget_low_u8(q6)); + u += pitch; + vst1_u8(u, vget_low_u8(q7)); + u += pitch; + vst1_u8(u, vget_low_u8(q8)); - v -= (pitch * 5); - vst1_u8(v, vget_high_u8(q5)); - v += pitch; - vst1_u8(v, vget_high_u8(q6)); - v += pitch; - vst1_u8(v, vget_high_u8(q7)); - v += pitch; - vst1_u8(v, vget_high_u8(q8)); - return; + v -= (pitch * 5); + vst1_u8(v, vget_high_u8(q5)); + v += pitch; + vst1_u8(v, vget_high_u8(q6)); + v += pitch; + vst1_u8(v, vget_high_u8(q7)); + v += pitch; + vst1_u8(v, vget_high_u8(q8)); + return; } static INLINE void write_4x8(unsigned char *dst, int pitch, const uint8x8x4_t result) { #ifdef VPX_INCOMPATIBLE_GCC - /* - * uint8x8x4_t result - 00 01 02 03 | 04 05 06 07 - 10 11 12 13 | 14 15 16 17 - 20 21 22 23 | 24 25 26 27 - 30 31 32 33 | 34 35 36 37 - --- - * after vtrn_u16 - 00 01 20 21 | 04 05 24 25 - 02 03 22 23 | 06 07 26 27 - 10 11 30 31 | 14 15 34 35 - 12 13 32 33 | 16 17 36 37 - --- - * after vtrn_u8 - 00 10 20 30 | 04 14 24 34 - 01 11 21 31 | 05 15 25 35 - 02 12 22 32 | 06 16 26 36 - 03 13 23 33 | 07 17 27 37 - */ - const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]), - vreinterpret_u16_u8(result.val[2])); - const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]), - vreinterpret_u16_u8(result.val[3])); - const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]), - vreinterpret_u8_u16(r13_u16.val[0])); - const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]), - vreinterpret_u8_u16(r13_u16.val[1])); - const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]); - const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]); - const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]); - const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]); - vst1_lane_u32((uint32_t *)dst, x_0_4, 0); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_1_5, 0); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_2_6, 0); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_3_7, 0); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_0_4, 1); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_1_5, 1); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_2_6, 1); - dst += pitch; - vst1_lane_u32((uint32_t *)dst, x_3_7, 1); + /* + * uint8x8x4_t result + 00 01 02 03 | 04 05 06 07 + 10 11 12 13 | 14 15 16 17 + 20 21 22 23 | 24 25 26 27 + 30 31 32 33 | 34 35 36 37 + --- + * after vtrn_u16 + 00 01 20 21 | 04 05 24 25 + 02 03 22 23 | 06 07 26 27 + 10 11 30 31 | 14 15 34 35 + 12 13 32 33 | 16 17 36 37 + --- + * after vtrn_u8 + 00 10 20 30 | 04 14 24 34 + 01 11 21 31 | 05 15 25 35 + 02 12 22 32 | 06 16 26 36 + 03 13 23 33 | 07 17 27 37 + */ + const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]), + vreinterpret_u16_u8(result.val[2])); + const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]), + vreinterpret_u16_u8(result.val[3])); + const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]), + vreinterpret_u8_u16(r13_u16.val[0])); + const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]), + vreinterpret_u8_u16(r13_u16.val[1])); + const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]); + const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]); + const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]); + const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]); + vst1_lane_u32((uint32_t *)dst, x_0_4, 0); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_1_5, 0); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_2_6, 0); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_3_7, 0); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_0_4, 1); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_1_5, 1); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_2_6, 1); + dst += pitch; + vst1_lane_u32((uint32_t *)dst, x_3_7, 1); #else - vst4_lane_u8(dst, result, 0); - dst += pitch; - vst4_lane_u8(dst, result, 1); - dst += pitch; - vst4_lane_u8(dst, result, 2); - dst += pitch; - vst4_lane_u8(dst, result, 3); - dst += pitch; - vst4_lane_u8(dst, result, 4); - dst += pitch; - vst4_lane_u8(dst, result, 5); - dst += pitch; - vst4_lane_u8(dst, result, 6); - dst += pitch; - vst4_lane_u8(dst, result, 7); + vst4_lane_u8(dst, result, 0); + dst += pitch; + vst4_lane_u8(dst, result, 1); + dst += pitch; + vst4_lane_u8(dst, result, 2); + dst += pitch; + vst4_lane_u8(dst, result, 3); + dst += pitch; + vst4_lane_u8(dst, result, 4); + dst += pitch; + vst4_lane_u8(dst, result, 5); + dst += pitch; + vst4_lane_u8(dst, result, 6); + dst += pitch; + vst4_lane_u8(dst, result, 7); #endif // VPX_INCOMPATIBLE_GCC } -void vp8_loop_filter_vertical_edge_y_neon( - unsigned char *src, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh) { - unsigned char *s, *d; - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; - uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; - uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; - uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; - uint8x8x4_t q4ResultH, q4ResultL; +void vp8_loop_filter_vertical_edge_y_neon(unsigned char *src, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh) { + unsigned char *s, *d; + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; + uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; + uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; + uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; + uint8x8x4_t q4ResultH, q4ResultL; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - s = src - 4; - d6 = vld1_u8(s); - s += pitch; - d8 = vld1_u8(s); - s += pitch; - d10 = vld1_u8(s); - s += pitch; - d12 = vld1_u8(s); - s += pitch; - d14 = vld1_u8(s); - s += pitch; - d16 = vld1_u8(s); - s += pitch; - d18 = vld1_u8(s); - s += pitch; - d20 = vld1_u8(s); - s += pitch; - d7 = vld1_u8(s); - s += pitch; - d9 = vld1_u8(s); - s += pitch; - d11 = vld1_u8(s); - s += pitch; - d13 = vld1_u8(s); - s += pitch; - d15 = vld1_u8(s); - s += pitch; - d17 = vld1_u8(s); - s += pitch; - d19 = vld1_u8(s); - s += pitch; - d21 = vld1_u8(s); + s = src - 4; + d6 = vld1_u8(s); + s += pitch; + d8 = vld1_u8(s); + s += pitch; + d10 = vld1_u8(s); + s += pitch; + d12 = vld1_u8(s); + s += pitch; + d14 = vld1_u8(s); + s += pitch; + d16 = vld1_u8(s); + s += pitch; + d18 = vld1_u8(s); + s += pitch; + d20 = vld1_u8(s); + s += pitch; + d7 = vld1_u8(s); + s += pitch; + d9 = vld1_u8(s); + s += pitch; + d11 = vld1_u8(s); + s += pitch; + d13 = vld1_u8(s); + s += pitch; + d15 = vld1_u8(s); + s += pitch; + d17 = vld1_u8(s); + s += pitch; + d19 = vld1_u8(s); + s += pitch; + d21 = vld1_u8(s); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q5, &q6, &q7, &q8); + vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q5, &q6, &q7, &q8); - q4ResultL.val[0] = vget_low_u8(q5); // d10 - q4ResultL.val[1] = vget_low_u8(q6); // d12 - q4ResultL.val[2] = vget_low_u8(q7); // d14 - q4ResultL.val[3] = vget_low_u8(q8); // d16 - q4ResultH.val[0] = vget_high_u8(q5); // d11 - q4ResultH.val[1] = vget_high_u8(q6); // d13 - q4ResultH.val[2] = vget_high_u8(q7); // d15 - q4ResultH.val[3] = vget_high_u8(q8); // d17 + q4ResultL.val[0] = vget_low_u8(q5); // d10 + q4ResultL.val[1] = vget_low_u8(q6); // d12 + q4ResultL.val[2] = vget_low_u8(q7); // d14 + q4ResultL.val[3] = vget_low_u8(q8); // d16 + q4ResultH.val[0] = vget_high_u8(q5); // d11 + q4ResultH.val[1] = vget_high_u8(q6); // d13 + q4ResultH.val[2] = vget_high_u8(q7); // d15 + q4ResultH.val[3] = vget_high_u8(q8); // d17 - d = src - 2; - write_4x8(d, pitch, q4ResultL); - d += pitch * 8; - write_4x8(d, pitch, q4ResultH); + d = src - 2; + write_4x8(d, pitch, q4ResultL); + d += pitch * 8; + write_4x8(d, pitch, q4ResultH); } -void vp8_loop_filter_vertical_edge_uv_neon( - unsigned char *u, - int pitch, - unsigned char blimit, - unsigned char limit, - unsigned char thresh, - unsigned char *v) { - unsigned char *us, *ud; - unsigned char *vs, *vd; - uint8x16_t qblimit, qlimit, qthresh, q3, q4; - uint8x16_t q5, q6, q7, q8, q9, q10; - uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; - uint8x8_t d15, d16, d17, d18, d19, d20, d21; - uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; - uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; - uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; - uint8x8x4_t q4ResultH, q4ResultL; +void vp8_loop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch, + unsigned char blimit, + unsigned char limit, + unsigned char thresh, + unsigned char *v) { + unsigned char *us, *ud; + unsigned char *vs, *vd; + uint8x16_t qblimit, qlimit, qthresh, q3, q4; + uint8x16_t q5, q6, q7, q8, q9, q10; + uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14; + uint8x8_t d15, d16, d17, d18, d19, d20, d21; + uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3; + uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7; + uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11; + uint8x8x4_t q4ResultH, q4ResultL; - qblimit = vdupq_n_u8(blimit); - qlimit = vdupq_n_u8(limit); - qthresh = vdupq_n_u8(thresh); + qblimit = vdupq_n_u8(blimit); + qlimit = vdupq_n_u8(limit); + qthresh = vdupq_n_u8(thresh); - us = u - 4; - d6 = vld1_u8(us); - us += pitch; - d8 = vld1_u8(us); - us += pitch; - d10 = vld1_u8(us); - us += pitch; - d12 = vld1_u8(us); - us += pitch; - d14 = vld1_u8(us); - us += pitch; - d16 = vld1_u8(us); - us += pitch; - d18 = vld1_u8(us); - us += pitch; - d20 = vld1_u8(us); + us = u - 4; + d6 = vld1_u8(us); + us += pitch; + d8 = vld1_u8(us); + us += pitch; + d10 = vld1_u8(us); + us += pitch; + d12 = vld1_u8(us); + us += pitch; + d14 = vld1_u8(us); + us += pitch; + d16 = vld1_u8(us); + us += pitch; + d18 = vld1_u8(us); + us += pitch; + d20 = vld1_u8(us); - vs = v - 4; - d7 = vld1_u8(vs); - vs += pitch; - d9 = vld1_u8(vs); - vs += pitch; - d11 = vld1_u8(vs); - vs += pitch; - d13 = vld1_u8(vs); - vs += pitch; - d15 = vld1_u8(vs); - vs += pitch; - d17 = vld1_u8(vs); - vs += pitch; - d19 = vld1_u8(vs); - vs += pitch; - d21 = vld1_u8(vs); + vs = v - 4; + d7 = vld1_u8(vs); + vs += pitch; + d9 = vld1_u8(vs); + vs += pitch; + d11 = vld1_u8(vs); + vs += pitch; + d13 = vld1_u8(vs); + vs += pitch; + d15 = vld1_u8(vs); + vs += pitch; + d17 = vld1_u8(vs); + vs += pitch; + d19 = vld1_u8(vs); + vs += pitch; + d21 = vld1_u8(vs); - q3 = vcombine_u8(d6, d7); - q4 = vcombine_u8(d8, d9); - q5 = vcombine_u8(d10, d11); - q6 = vcombine_u8(d12, d13); - q7 = vcombine_u8(d14, d15); - q8 = vcombine_u8(d16, d17); - q9 = vcombine_u8(d18, d19); - q10 = vcombine_u8(d20, d21); + q3 = vcombine_u8(d6, d7); + q4 = vcombine_u8(d8, d9); + q5 = vcombine_u8(d10, d11); + q6 = vcombine_u8(d12, d13); + q7 = vcombine_u8(d14, d15); + q8 = vcombine_u8(d16, d17); + q9 = vcombine_u8(d18, d19); + q10 = vcombine_u8(d20, d21); - q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); - q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); - q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); - q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); + q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7)); + q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8)); + q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9)); + q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10)); - q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), - vreinterpretq_u16_u32(q2tmp2.val[0])); - q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), - vreinterpretq_u16_u32(q2tmp3.val[0])); - q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), - vreinterpretq_u16_u32(q2tmp2.val[1])); - q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), - vreinterpretq_u16_u32(q2tmp3.val[1])); + q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]), + vreinterpretq_u16_u32(q2tmp2.val[0])); + q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]), + vreinterpretq_u16_u32(q2tmp3.val[0])); + q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]), + vreinterpretq_u16_u32(q2tmp2.val[1])); + q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]), + vreinterpretq_u16_u32(q2tmp3.val[1])); - q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), - vreinterpretq_u8_u16(q2tmp5.val[0])); - q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), - vreinterpretq_u8_u16(q2tmp5.val[1])); - q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), - vreinterpretq_u8_u16(q2tmp7.val[0])); - q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), - vreinterpretq_u8_u16(q2tmp7.val[1])); + q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]), + vreinterpretq_u8_u16(q2tmp5.val[0])); + q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]), + vreinterpretq_u8_u16(q2tmp5.val[1])); + q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]), + vreinterpretq_u8_u16(q2tmp7.val[0])); + q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]), + vreinterpretq_u8_u16(q2tmp7.val[1])); - q3 = q2tmp8.val[0]; - q4 = q2tmp8.val[1]; - q5 = q2tmp9.val[0]; - q6 = q2tmp9.val[1]; - q7 = q2tmp10.val[0]; - q8 = q2tmp10.val[1]; - q9 = q2tmp11.val[0]; - q10 = q2tmp11.val[1]; + q3 = q2tmp8.val[0]; + q4 = q2tmp8.val[1]; + q5 = q2tmp9.val[0]; + q6 = q2tmp9.val[1]; + q7 = q2tmp10.val[0]; + q8 = q2tmp10.val[1]; + q9 = q2tmp11.val[0]; + q10 = q2tmp11.val[1]; - vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, - q5, q6, q7, q8, q9, q10, - &q5, &q6, &q7, &q8); + vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, q5, q6, q7, q8, q9, + q10, &q5, &q6, &q7, &q8); - q4ResultL.val[0] = vget_low_u8(q5); // d10 - q4ResultL.val[1] = vget_low_u8(q6); // d12 - q4ResultL.val[2] = vget_low_u8(q7); // d14 - q4ResultL.val[3] = vget_low_u8(q8); // d16 - ud = u - 2; - write_4x8(ud, pitch, q4ResultL); + q4ResultL.val[0] = vget_low_u8(q5); // d10 + q4ResultL.val[1] = vget_low_u8(q6); // d12 + q4ResultL.val[2] = vget_low_u8(q7); // d14 + q4ResultL.val[3] = vget_low_u8(q8); // d16 + ud = u - 2; + write_4x8(ud, pitch, q4ResultL); - q4ResultH.val[0] = vget_high_u8(q5); // d11 - q4ResultH.val[1] = vget_high_u8(q6); // d13 - q4ResultH.val[2] = vget_high_u8(q7); // d15 - q4ResultH.val[3] = vget_high_u8(q8); // d17 - vd = v - 2; - write_4x8(vd, pitch, q4ResultH); + q4ResultH.val[0] = vget_high_u8(q5); // d11 + q4ResultH.val[1] = vget_high_u8(q6); // d13 + q4ResultH.val[2] = vget_high_u8(q7); // d15 + q4ResultH.val[3] = vget_high_u8(q8); // d17 + vd = v - 2; + write_4x8(vd, pitch, q4ResultH); } diff --git a/libs/libvpx/vp8/common/blockd.c b/libs/libvpx/vp8/common/blockd.c index 1fc3cd0ca7..f47c5bae15 100644 --- a/libs/libvpx/vp8/common/blockd.c +++ b/libs/libvpx/vp8/common/blockd.c @@ -8,15 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "blockd.h" #include "vpx_mem/vpx_mem.h" -const unsigned char vp8_block2left[25] = -{ - 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 +const unsigned char vp8_block2left[25] = { + 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; -const unsigned char vp8_block2above[25] = -{ - 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8 +const unsigned char vp8_block2above[25] = { + 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8 }; diff --git a/libs/libvpx/vp8/common/blockd.h b/libs/libvpx/vp8/common/blockd.h index 192108a06d..74fc5d6dbf 100644 --- a/libs/libvpx/vp8/common/blockd.h +++ b/libs/libvpx/vp8/common/blockd.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_BLOCKD_H_ #define VP8_COMMON_BLOCKD_H_ @@ -28,280 +27,266 @@ extern "C" { #define DCPREDSIMTHRESH 0 #define DCPREDCNTTHRESH 3 -#define MB_FEATURE_TREE_PROBS 3 -#define MAX_MB_SEGMENTS 4 +#define MB_FEATURE_TREE_PROBS 3 +#define MAX_MB_SEGMENTS 4 -#define MAX_REF_LF_DELTAS 4 -#define MAX_MODE_LF_DELTAS 4 +#define MAX_REF_LF_DELTAS 4 +#define MAX_MODE_LF_DELTAS 4 /* Segment Feature Masks */ -#define SEGMENT_DELTADATA 0 -#define SEGMENT_ABSDATA 1 +#define SEGMENT_DELTADATA 0 +#define SEGMENT_ABSDATA 1 -typedef struct -{ - int r, c; -} POS; - -#define PLANE_TYPE_Y_NO_DC 0 -#define PLANE_TYPE_Y2 1 -#define PLANE_TYPE_UV 2 -#define PLANE_TYPE_Y_WITH_DC 3 +typedef struct { int r, c; } POS; +#define PLANE_TYPE_Y_NO_DC 0 +#define PLANE_TYPE_Y2 1 +#define PLANE_TYPE_UV 2 +#define PLANE_TYPE_Y_WITH_DC 3 typedef char ENTROPY_CONTEXT; -typedef struct -{ - ENTROPY_CONTEXT y1[4]; - ENTROPY_CONTEXT u[2]; - ENTROPY_CONTEXT v[2]; - ENTROPY_CONTEXT y2; +typedef struct { + ENTROPY_CONTEXT y1[4]; + ENTROPY_CONTEXT u[2]; + ENTROPY_CONTEXT v[2]; + ENTROPY_CONTEXT y2; } ENTROPY_CONTEXT_PLANES; extern const unsigned char vp8_block2left[25]; extern const unsigned char vp8_block2above[25]; -#define VP8_COMBINEENTROPYCONTEXTS( Dest, A, B) \ - Dest = (A)+(B); +#define VP8_COMBINEENTROPYCONTEXTS(Dest, A, B) Dest = (A) + (B); +typedef enum { KEY_FRAME = 0, INTER_FRAME = 1 } FRAME_TYPE; -typedef enum -{ - KEY_FRAME = 0, - INTER_FRAME = 1 -} FRAME_TYPE; +typedef enum { + DC_PRED, /* average of above and left pixels */ + V_PRED, /* vertical prediction */ + H_PRED, /* horizontal prediction */ + TM_PRED, /* Truemotion prediction */ + B_PRED, /* block based prediction, each block has its own prediction mode */ -typedef enum -{ - DC_PRED, /* average of above and left pixels */ - V_PRED, /* vertical prediction */ - H_PRED, /* horizontal prediction */ - TM_PRED, /* Truemotion prediction */ - B_PRED, /* block based prediction, each block has its own prediction mode */ + NEARESTMV, + NEARMV, + ZEROMV, + NEWMV, + SPLITMV, - NEARESTMV, - NEARMV, - ZEROMV, - NEWMV, - SPLITMV, - - MB_MODE_COUNT + MB_MODE_COUNT } MB_PREDICTION_MODE; /* Macroblock level features */ -typedef enum -{ - MB_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */ - MB_LVL_ALT_LF = 1, /* Use alternate loop filter value... */ - MB_LVL_MAX = 2 /* Number of MB level features supported */ +typedef enum { + MB_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */ + MB_LVL_ALT_LF = 1, /* Use alternate loop filter value... */ + MB_LVL_MAX = 2 /* Number of MB level features supported */ } MB_LVL_FEATURES; /* Segment Feature Masks */ -#define SEGMENT_ALTQ 0x01 -#define SEGMENT_ALT_LF 0x02 +#define SEGMENT_ALTQ 0x01 +#define SEGMENT_ALT_LF 0x02 -#define VP8_YMODES (B_PRED + 1) +#define VP8_YMODES (B_PRED + 1) #define VP8_UV_MODES (TM_PRED + 1) #define VP8_MVREFS (1 + SPLITMV - NEARESTMV) -typedef enum -{ - B_DC_PRED, /* average of above and left pixels */ - B_TM_PRED, +typedef enum { + B_DC_PRED, /* average of above and left pixels */ + B_TM_PRED, - B_VE_PRED, /* vertical prediction */ - B_HE_PRED, /* horizontal prediction */ + B_VE_PRED, /* vertical prediction */ + B_HE_PRED, /* horizontal prediction */ - B_LD_PRED, - B_RD_PRED, + B_LD_PRED, + B_RD_PRED, - B_VR_PRED, - B_VL_PRED, - B_HD_PRED, - B_HU_PRED, + B_VR_PRED, + B_VL_PRED, + B_HD_PRED, + B_HU_PRED, - LEFT4X4, - ABOVE4X4, - ZERO4X4, - NEW4X4, + LEFT4X4, + ABOVE4X4, + ZERO4X4, + NEW4X4, - B_MODE_COUNT + B_MODE_COUNT } B_PREDICTION_MODE; -#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */ +#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */ #define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4) /* For keyframes, intra block modes are predicted by the (already decoded) modes for the Y blocks to the left and above us; for interframes, there is a single probability table. */ -union b_mode_info -{ - B_PREDICTION_MODE as_mode; - int_mv mv; +union b_mode_info { + B_PREDICTION_MODE as_mode; + int_mv mv; }; -typedef enum -{ - INTRA_FRAME = 0, - LAST_FRAME = 1, - GOLDEN_FRAME = 2, - ALTREF_FRAME = 3, - MAX_REF_FRAMES = 4 +typedef enum { + INTRA_FRAME = 0, + LAST_FRAME = 1, + GOLDEN_FRAME = 2, + ALTREF_FRAME = 3, + MAX_REF_FRAMES = 4 } MV_REFERENCE_FRAME; -typedef struct -{ - uint8_t mode, uv_mode; - uint8_t ref_frame; - uint8_t is_4x4; - int_mv mv; +typedef struct { + uint8_t mode, uv_mode; + uint8_t ref_frame; + uint8_t is_4x4; + int_mv mv; - uint8_t partitioning; - uint8_t mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */ - uint8_t need_to_clamp_mvs; - uint8_t segment_id; /* Which set of segmentation parameters should be used for this MB */ + uint8_t partitioning; + /* does this mb has coefficients at all, 1=no coefficients, 0=need decode + tokens */ + uint8_t mb_skip_coeff; + uint8_t need_to_clamp_mvs; + /* Which set of segmentation parameters should be used for this MB */ + uint8_t segment_id; } MB_MODE_INFO; -typedef struct modeinfo -{ - MB_MODE_INFO mbmi; - union b_mode_info bmi[16]; +typedef struct modeinfo { + MB_MODE_INFO mbmi; + union b_mode_info bmi[16]; } MODE_INFO; #if CONFIG_MULTI_RES_ENCODING /* The mb-level information needed to be stored for higher-resolution encoder */ -typedef struct -{ - MB_PREDICTION_MODE mode; - MV_REFERENCE_FRAME ref_frame; - int_mv mv; - int dissim; /* dissimilarity level of the macroblock */ +typedef struct { + MB_PREDICTION_MODE mode; + MV_REFERENCE_FRAME ref_frame; + int_mv mv; + int dissim; /* dissimilarity level of the macroblock */ } LOWER_RES_MB_INFO; /* The frame-level information needed to be stored for higher-resolution * encoder */ -typedef struct -{ - FRAME_TYPE frame_type; - int is_frame_dropped; - // The frame rate for the lowest resolution. - double low_res_framerate; - /* The frame number of each reference frames */ - unsigned int low_res_ref_frames[MAX_REF_FRAMES]; - // The video frame counter value for the key frame, for lowest resolution. - unsigned int key_frame_counter_value; - LOWER_RES_MB_INFO *mb_info; +typedef struct { + FRAME_TYPE frame_type; + int is_frame_dropped; + // The frame rate for the lowest resolution. + double low_res_framerate; + /* The frame number of each reference frames */ + unsigned int low_res_ref_frames[MAX_REF_FRAMES]; + // The video frame counter value for the key frame, for lowest resolution. + unsigned int key_frame_counter_value; + LOWER_RES_MB_INFO *mb_info; } LOWER_RES_FRAME_INFO; #endif -typedef struct blockd -{ - short *qcoeff; - short *dqcoeff; - unsigned char *predictor; - short *dequant; +typedef struct blockd { + short *qcoeff; + short *dqcoeff; + unsigned char *predictor; + short *dequant; - int offset; - char *eob; + int offset; + char *eob; - union b_mode_info bmi; + union b_mode_info bmi; } BLOCKD; -typedef void (*vp8_subpix_fn_t)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch); +typedef void (*vp8_subpix_fn_t)(unsigned char *src, int src_pitch, int xofst, + int yofst, unsigned char *dst, int dst_pitch); -typedef struct macroblockd -{ - DECLARE_ALIGNED(16, unsigned char, predictor[384]); - DECLARE_ALIGNED(16, short, qcoeff[400]); - DECLARE_ALIGNED(16, short, dqcoeff[400]); - DECLARE_ALIGNED(16, char, eobs[25]); +typedef struct macroblockd { + DECLARE_ALIGNED(16, unsigned char, predictor[384]); + DECLARE_ALIGNED(16, short, qcoeff[400]); + DECLARE_ALIGNED(16, short, dqcoeff[400]); + DECLARE_ALIGNED(16, char, eobs[25]); - DECLARE_ALIGNED(16, short, dequant_y1[16]); - DECLARE_ALIGNED(16, short, dequant_y1_dc[16]); - DECLARE_ALIGNED(16, short, dequant_y2[16]); - DECLARE_ALIGNED(16, short, dequant_uv[16]); + DECLARE_ALIGNED(16, short, dequant_y1[16]); + DECLARE_ALIGNED(16, short, dequant_y1_dc[16]); + DECLARE_ALIGNED(16, short, dequant_y2[16]); + DECLARE_ALIGNED(16, short, dequant_uv[16]); - /* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */ - BLOCKD block[25]; - int fullpixel_mask; + /* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */ + BLOCKD block[25]; + int fullpixel_mask; - YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */ - YV12_BUFFER_CONFIG dst; + YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */ + YV12_BUFFER_CONFIG dst; - MODE_INFO *mode_info_context; - int mode_info_stride; + MODE_INFO *mode_info_context; + int mode_info_stride; - FRAME_TYPE frame_type; + FRAME_TYPE frame_type; - int up_available; - int left_available; + int up_available; + int left_available; - unsigned char *recon_above[3]; - unsigned char *recon_left[3]; - int recon_left_stride[2]; + unsigned char *recon_above[3]; + unsigned char *recon_left[3]; + int recon_left_stride[2]; - /* Y,U,V,Y2 */ - ENTROPY_CONTEXT_PLANES *above_context; - ENTROPY_CONTEXT_PLANES *left_context; + /* Y,U,V,Y2 */ + ENTROPY_CONTEXT_PLANES *above_context; + ENTROPY_CONTEXT_PLANES *left_context; - /* 0 indicates segmentation at MB level is not enabled. Otherwise the individual bits indicate which features are active. */ - unsigned char segmentation_enabled; + /* 0 indicates segmentation at MB level is not enabled. Otherwise the + * individual bits indicate which features are active. */ + unsigned char segmentation_enabled; - /* 0 (do not update) 1 (update) the macroblock segmentation map. */ - unsigned char update_mb_segmentation_map; + /* 0 (do not update) 1 (update) the macroblock segmentation map. */ + unsigned char update_mb_segmentation_map; - /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */ - unsigned char update_mb_segmentation_data; + /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */ + unsigned char update_mb_segmentation_data; - /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */ - unsigned char mb_segement_abs_delta; + /* 0 (do not update) 1 (update) the macroblock segmentation feature data. */ + unsigned char mb_segement_abs_delta; - /* Per frame flags that define which MB level features (such as quantizer or loop filter level) */ - /* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */ - vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS]; /* Probability Tree used to code Segment number */ + /* Per frame flags that define which MB level features (such as quantizer or + * loop filter level) */ + /* are enabled and when enabled the proabilities used to decode the per MB + * flags in MB_MODE_INFO */ + /* Probability Tree used to code Segment number */ + vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS]; + /* Segment parameters */ + signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; - signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; /* Segment parameters */ + /* mode_based Loop filter adjustment */ + unsigned char mode_ref_lf_delta_enabled; + unsigned char mode_ref_lf_delta_update; - /* mode_based Loop filter adjustment */ - unsigned char mode_ref_lf_delta_enabled; - unsigned char mode_ref_lf_delta_update; + /* Delta values have the range +/- MAX_LOOP_FILTER */ + signed char + last_ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */ + signed char ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */ + /* 0 = BPRED, ZERO_MV, MV, SPLIT */ + signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS]; + signed char + mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */ - /* Delta values have the range +/- MAX_LOOP_FILTER */ - signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */ - signed char ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */ - signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */ - signed char mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */ + /* Distance of MB away from frame edges */ + int mb_to_left_edge; + int mb_to_right_edge; + int mb_to_top_edge; + int mb_to_bottom_edge; - /* Distance of MB away from frame edges */ - int mb_to_left_edge; - int mb_to_right_edge; - int mb_to_top_edge; - int mb_to_bottom_edge; + vp8_subpix_fn_t subpixel_predict; + vp8_subpix_fn_t subpixel_predict8x4; + vp8_subpix_fn_t subpixel_predict8x8; + vp8_subpix_fn_t subpixel_predict16x16; + void *current_bc; - - vp8_subpix_fn_t subpixel_predict; - vp8_subpix_fn_t subpixel_predict8x4; - vp8_subpix_fn_t subpixel_predict8x8; - vp8_subpix_fn_t subpixel_predict16x16; - - void *current_bc; - - int corrupted; + int corrupted; #if ARCH_X86 || ARCH_X86_64 - /* This is an intermediate buffer currently used in sub-pixel motion search - * to keep a copy of the reference area. This buffer can be used for other - * purpose. - */ - DECLARE_ALIGNED(32, unsigned char, y_buf[22*32]); + /* This is an intermediate buffer currently used in sub-pixel motion search + * to keep a copy of the reference area. This buffer can be used for other + * purpose. + */ + DECLARE_ALIGNED(32, unsigned char, y_buf[22 * 32]); #endif } MACROBLOCKD; - extern void vp8_build_block_doffsets(MACROBLOCKD *x); extern void vp8_setup_block_dptrs(MACROBLOCKD *x); diff --git a/libs/libvpx/vp8/common/coefupdateprobs.h b/libs/libvpx/vp8/common/coefupdateprobs.h index d96a19e747..9b01bba312 100644 --- a/libs/libvpx/vp8/common/coefupdateprobs.h +++ b/libs/libvpx/vp8/common/coefupdateprobs.h @@ -18,177 +18,177 @@ extern "C" { /* Update probabilities for the nodes in the token entropy tree. Generated file included by entropy.c */ -const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = -{ - { - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255, }, - {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255, }, - {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255, }, - {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255, }, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - }, - { - { - {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255, }, - {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255, }, - }, - { - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - }, - { - { - {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255, }, - {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255, }, - {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255, }, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - }, - { - { - {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255, }, - {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255, }, - {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, }, - {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, }, - }, - }, -}; +const vp8_prob vp8_coef_update_probs + [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES] = { + { + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + }, + { + { + { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 }, + { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 }, + }, + { + { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + }, + { + { + { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 }, + }, + { + { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + }, + { + { + { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + { + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + }, + }, + }; #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/common.h b/libs/libvpx/vp8/common/common.h index e58a9cc23b..bbfc4f3934 100644 --- a/libs/libvpx/vp8/common/common.h +++ b/libs/libvpx/vp8/common/common.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_COMMON_H_ #define VP8_COMMON_COMMON_H_ @@ -24,22 +23,23 @@ extern "C" { /* Only need this for fixed-size arrays, for structs just assign. */ -#define vp8_copy( Dest, Src) { \ - assert( sizeof( Dest) == sizeof( Src)); \ - memcpy( Dest, Src, sizeof( Src)); \ - } +#define vp8_copy(Dest, Src) \ + { \ + assert(sizeof(Dest) == sizeof(Src)); \ + memcpy(Dest, Src, sizeof(Src)); \ + } /* Use this for variably-sized arrays. */ -#define vp8_copy_array( Dest, Src, N) { \ - assert( sizeof( *Dest) == sizeof( *Src)); \ - memcpy( Dest, Src, N * sizeof( *Src)); \ - } +#define vp8_copy_array(Dest, Src, N) \ + { \ + assert(sizeof(*Dest) == sizeof(*Src)); \ + memcpy(Dest, Src, N * sizeof(*Src)); \ + } -#define vp8_zero( Dest) memset( &Dest, 0, sizeof( Dest)); - -#define vp8_zero_array( Dest, N) memset( Dest, 0, N * sizeof( *Dest)); +#define vp8_zero(Dest) memset(&Dest, 0, sizeof(Dest)); +#define vp8_zero_array(Dest, N) memset(Dest, 0, N * sizeof(*Dest)); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/context.c b/libs/libvpx/vp8/common/context.c index 99e95d30ff..3c624ae628 100644 --- a/libs/libvpx/vp8/common/context.c +++ b/libs/libvpx/vp8/common/context.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "entropy.h" /* *** GENERATED FILE: DO NOT EDIT *** */ diff --git a/libs/libvpx/vp8/common/copy_c.c b/libs/libvpx/vp8/common/copy_c.c index e3392913f6..4746125245 100644 --- a/libs/libvpx/vp8/common/copy_c.c +++ b/libs/libvpx/vp8/common/copy_c.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "./vp8_rtcd.h" @@ -16,17 +15,13 @@ /* Copy 2 macroblocks to a buffer */ void vp8_copy32xn_c(const unsigned char *src_ptr, int src_stride, - unsigned char *dst_ptr, int dst_stride, - int height) -{ - int r; + unsigned char *dst_ptr, int dst_stride, int height) { + int r; - for (r = 0; r < height; r++) - { - memcpy(dst_ptr, src_ptr, 32); + for (r = 0; r < height; ++r) { + memcpy(dst_ptr, src_ptr, 32); - src_ptr += src_stride; - dst_ptr += dst_stride; - - } + src_ptr += src_stride; + dst_ptr += dst_stride; + } } diff --git a/libs/libvpx/vp8/common/debugmodes.c b/libs/libvpx/vp8/common/debugmodes.c index 159fddc6a7..27a97b260c 100644 --- a/libs/libvpx/vp8/common/debugmodes.c +++ b/libs/libvpx/vp8/common/debugmodes.c @@ -8,148 +8,128 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "blockd.h" +void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, + int frame) { + int mb_row; + int mb_col; + int mb_index = 0; + FILE *mvs = fopen("mvs.stt", "a"); -void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, int frame) -{ + /* print out the macroblock Y modes */ + mb_index = 0; + fprintf(mvs, "Mb Modes for Frame %d\n", frame); - int mb_row; - int mb_col; - int mb_index = 0; - FILE *mvs = fopen("mvs.stt", "a"); + for (mb_row = 0; mb_row < rows; ++mb_row) { + for (mb_col = 0; mb_col < cols; ++mb_col) { + fprintf(mvs, "%2d ", mi[mb_index].mbmi.mode); - /* print out the macroblock Y modes */ - mb_index = 0; - fprintf(mvs, "Mb Modes for Frame %d\n", frame); - - for (mb_row = 0; mb_row < rows; mb_row++) - { - for (mb_col = 0; mb_col < cols; mb_col++) - { - - fprintf(mvs, "%2d ", mi[mb_index].mbmi.mode); - - mb_index++; - } - - fprintf(mvs, "\n"); - mb_index++; + mb_index++; } fprintf(mvs, "\n"); + mb_index++; + } - mb_index = 0; - fprintf(mvs, "Mb mv ref for Frame %d\n", frame); + fprintf(mvs, "\n"); - for (mb_row = 0; mb_row < rows; mb_row++) - { - for (mb_col = 0; mb_col < cols; mb_col++) - { + mb_index = 0; + fprintf(mvs, "Mb mv ref for Frame %d\n", frame); - fprintf(mvs, "%2d ", mi[mb_index].mbmi.ref_frame); + for (mb_row = 0; mb_row < rows; ++mb_row) { + for (mb_col = 0; mb_col < cols; ++mb_col) { + fprintf(mvs, "%2d ", mi[mb_index].mbmi.ref_frame); - mb_index++; - } - - fprintf(mvs, "\n"); - mb_index++; + mb_index++; } fprintf(mvs, "\n"); + mb_index++; + } - /* print out the macroblock UV modes */ - mb_index = 0; - fprintf(mvs, "UV Modes for Frame %d\n", frame); + fprintf(mvs, "\n"); - for (mb_row = 0; mb_row < rows; mb_row++) - { - for (mb_col = 0; mb_col < cols; mb_col++) - { + /* print out the macroblock UV modes */ + mb_index = 0; + fprintf(mvs, "UV Modes for Frame %d\n", frame); - fprintf(mvs, "%2d ", mi[mb_index].mbmi.uv_mode); + for (mb_row = 0; mb_row < rows; ++mb_row) { + for (mb_col = 0; mb_col < cols; ++mb_col) { + fprintf(mvs, "%2d ", mi[mb_index].mbmi.uv_mode); - mb_index++; - } - - mb_index++; - fprintf(mvs, "\n"); + mb_index++; } + mb_index++; fprintf(mvs, "\n"); + } - /* print out the block modes */ - fprintf(mvs, "Mbs for Frame %d\n", frame); - { - int b_row; + fprintf(mvs, "\n"); - for (b_row = 0; b_row < 4 * rows; b_row++) - { - int b_col; - int bindex; + /* print out the block modes */ + fprintf(mvs, "Mbs for Frame %d\n", frame); + { + int b_row; - for (b_col = 0; b_col < 4 * cols; b_col++) - { - mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2); - bindex = (b_row & 3) * 4 + (b_col & 3); + for (b_row = 0; b_row < 4 * rows; ++b_row) { + int b_col; + int bindex; - if (mi[mb_index].mbmi.mode == B_PRED) - fprintf(mvs, "%2d ", mi[mb_index].bmi[bindex].as_mode); - else - fprintf(mvs, "xx "); + for (b_col = 0; b_col < 4 * cols; ++b_col) { + mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2); + bindex = (b_row & 3) * 4 + (b_col & 3); - } + if (mi[mb_index].mbmi.mode == B_PRED) + fprintf(mvs, "%2d ", mi[mb_index].bmi[bindex].as_mode); + else + fprintf(mvs, "xx "); + } - fprintf(mvs, "\n"); - } + fprintf(mvs, "\n"); } - fprintf(mvs, "\n"); + } + fprintf(mvs, "\n"); - /* print out the macroblock mvs */ - mb_index = 0; - fprintf(mvs, "MVs for Frame %d\n", frame); + /* print out the macroblock mvs */ + mb_index = 0; + fprintf(mvs, "MVs for Frame %d\n", frame); - for (mb_row = 0; mb_row < rows; mb_row++) - { - for (mb_col = 0; mb_col < cols; mb_col++) - { - fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2, mi[mb_index].mbmi.mv.as_mv.col / 2); + for (mb_row = 0; mb_row < rows; ++mb_row) { + for (mb_col = 0; mb_col < cols; ++mb_col) { + fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2, + mi[mb_index].mbmi.mv.as_mv.col / 2); - mb_index++; - } - - mb_index++; - fprintf(mvs, "\n"); + mb_index++; } + mb_index++; fprintf(mvs, "\n"); + } + fprintf(mvs, "\n"); - /* print out the block modes */ - fprintf(mvs, "MVs for Frame %d\n", frame); - { - int b_row; + /* print out the block modes */ + fprintf(mvs, "MVs for Frame %d\n", frame); + { + int b_row; - for (b_row = 0; b_row < 4 * rows; b_row++) - { - int b_col; - int bindex; + for (b_row = 0; b_row < 4 * rows; ++b_row) { + int b_col; + int bindex; - for (b_col = 0; b_col < 4 * cols; b_col++) - { - mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2); - bindex = (b_row & 3) * 4 + (b_col & 3); - fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row, mi[mb_index].bmi[bindex].mv.as_mv.col); + for (b_col = 0; b_col < 4 * cols; ++b_col) { + mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2); + bindex = (b_row & 3) * 4 + (b_col & 3); + fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row, + mi[mb_index].bmi[bindex].mv.as_mv.col); + } - } - - fprintf(mvs, "\n"); - } + fprintf(mvs, "\n"); } - fprintf(mvs, "\n"); + } + fprintf(mvs, "\n"); - - fclose(mvs); + fclose(mvs); } diff --git a/libs/libvpx/vp8/common/default_coef_probs.h b/libs/libvpx/vp8/common/default_coef_probs.h index 4d69e4be66..8c861ac876 100644 --- a/libs/libvpx/vp8/common/default_coef_probs.h +++ b/libs/libvpx/vp8/common/default_coef_probs.h @@ -17,181 +17,141 @@ extern "C" { /*Generated file, included by entropy.c*/ - -static const vp8_prob default_coef_probs [BLOCK_TYPES] - [COEF_BANDS] - [PREV_COEF_CONTEXTS] - [ENTROPY_NODES] = -{ - { /* Block Type ( 0 ) */ +static const vp8_prob default_coef_probs + [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES] = { + { /* Block Type ( 0 ) */ { /* Coeff Band ( 0 )*/ - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } - }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 1 )*/ - { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 }, - { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 }, - { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 } - }, + { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 }, + { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 }, + { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 } }, { /* Coeff Band ( 2 )*/ - { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 }, - { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 }, - { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 } - }, + { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 }, + { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 }, + { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 } }, { /* Coeff Band ( 3 )*/ - { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 }, - { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 }, - { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 } - }, + { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 }, + { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 }, + { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 4 )*/ - { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 }, - { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 }, - { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 } - }, + { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 }, + { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 }, + { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 } }, { /* Coeff Band ( 5 )*/ - { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 }, - { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 }, - { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 } - }, + { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 }, + { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 }, + { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 6 )*/ - { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 }, - { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 }, - { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 } - }, + { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 }, + { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 }, + { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 7 )*/ - { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } - } - }, - { /* Block Type ( 1 ) */ + { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } } }, + { /* Block Type ( 1 ) */ { /* Coeff Band ( 0 )*/ - { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 }, - { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 }, - { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 } - }, + { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 }, + { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 }, + { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 } }, { /* Coeff Band ( 1 )*/ - { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 }, - { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 }, - { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 } - }, + { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 }, + { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 }, + { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 } }, { /* Coeff Band ( 2 )*/ - { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 }, - { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 }, - { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 } - }, + { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 }, + { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 }, + { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 } }, { /* Coeff Band ( 3 )*/ - { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 }, - { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 }, - { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 } - }, + { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 }, + { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 }, + { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 } }, { /* Coeff Band ( 4 )*/ - { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 }, - { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 }, - { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 } - }, + { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 }, + { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 }, + { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 } }, { /* Coeff Band ( 5 )*/ - { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 }, - { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 }, - { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 } - }, + { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 }, + { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 }, + { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 } }, { /* Coeff Band ( 6 )*/ - { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 }, - { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 }, - { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 } - }, + { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 }, + { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 }, + { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 } }, { /* Coeff Band ( 7 )*/ - { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 }, - { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, - { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 } - } - }, - { /* Block Type ( 2 ) */ + { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 } } }, + { /* Block Type ( 2 ) */ { /* Coeff Band ( 0 )*/ - { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 }, - { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 }, - { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 } - }, + { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 }, + { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 }, + { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 } }, { /* Coeff Band ( 1 )*/ - { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 }, - { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 }, - { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 } - }, + { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 }, + { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 }, + { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 } }, { /* Coeff Band ( 2 )*/ - { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 }, - { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 }, - { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 } - }, + { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 }, + { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 }, + { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 } }, { /* Coeff Band ( 3 )*/ - { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 }, - { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 }, - { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 } - }, + { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 4 )*/ - { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, - { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 }, - { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } - }, + { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 }, + { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 5 )*/ - { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 } - }, + { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 6 )*/ - { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 }, - { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 }, - { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 } - }, + { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 } }, { /* Coeff Band ( 7 )*/ - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } - } - }, - { /* Block Type ( 3 ) */ + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } } }, + { /* Block Type ( 3 ) */ { /* Coeff Band ( 0 )*/ - { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 }, - { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 }, - { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 } - }, + { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 }, + { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 }, + { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 } }, { /* Coeff Band ( 1 )*/ - { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 }, - { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 }, - { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 } - }, + { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 }, + { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 }, + { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 } }, { /* Coeff Band ( 2 )*/ - { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 }, - { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 }, - { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 } - }, + { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 }, + { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 }, + { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 } }, { /* Coeff Band ( 3 )*/ - { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 }, - { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 }, - { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 } - }, + { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 }, + { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 }, + { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 } }, { /* Coeff Band ( 4 )*/ - { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 }, - { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 }, - { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 } - }, + { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 }, + { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 }, + { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 } }, { /* Coeff Band ( 5 )*/ - { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 }, - { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 }, - { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 } - }, + { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 }, + { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 }, + { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 } }, { /* Coeff Band ( 6 )*/ - { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 }, - { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 }, - { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 } - }, + { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 }, + { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 }, + { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 } }, { /* Coeff Band ( 7 )*/ - { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, - { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } - } - } -}; + { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } } } + }; #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/dequantize.c b/libs/libvpx/vp8/common/dequantize.c index f8b04fa4ee..8a56ae6868 100644 --- a/libs/libvpx/vp8/common/dequantize.c +++ b/libs/libvpx/vp8/common/dequantize.c @@ -8,36 +8,30 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vp8/common/blockd.h" #include "vpx_mem/vpx_mem.h" -void vp8_dequantize_b_c(BLOCKD *d, short *DQC) -{ - int i; - short *DQ = d->dqcoeff; - short *Q = d->qcoeff; +void vp8_dequantize_b_c(BLOCKD *d, short *DQC) { + int i; + short *DQ = d->dqcoeff; + short *Q = d->qcoeff; - for (i = 0; i < 16; i++) - { - DQ[i] = Q[i] * DQC[i]; - } + for (i = 0; i < 16; ++i) { + DQ[i] = Q[i] * DQC[i]; + } } -void vp8_dequant_idct_add_c(short *input, short *dq, - unsigned char *dest, int stride) -{ - int i; +void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *dest, + int stride) { + int i; - for (i = 0; i < 16; i++) - { - input[i] = dq[i] * input[i]; - } + for (i = 0; i < 16; ++i) { + input[i] = dq[i] * input[i]; + } - vp8_short_idct4x4llm_c(input, dest, stride, dest, stride); - - memset(input, 0, 32); + vp8_short_idct4x4llm_c(input, dest, stride, dest, stride); + memset(input, 0, 32); } diff --git a/libs/libvpx/vp8/common/entropy.c b/libs/libvpx/vp8/common/entropy.c index c00e565f06..f61fa9e8e4 100644 --- a/libs/libvpx/vp8/common/entropy.c +++ b/libs/libvpx/vp8/common/entropy.c @@ -15,47 +15,34 @@ #include "coefupdateprobs.h" -DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = -{ - 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) = { + 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; -DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]) = -{ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7}; +DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]) = { + 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 +}; DECLARE_ALIGNED(16, const unsigned char, - vp8_prev_token_class[MAX_ENTROPY_TOKENS]) = -{ 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0}; - -DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = -{ - 0, 1, 4, 8, - 5, 2, 3, 6, - 9, 12, 13, 10, - 7, 11, 14, 15, + vp8_prev_token_class[MAX_ENTROPY_TOKENS]) = { + 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0 }; -DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = -{ - 1, 2, 6, 7, - 3, 5, 8, 13, - 4, 9, 12, 14, - 10, 11, 15, 16 +DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15, +}; + +DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = { + 1, 2, 6, 7, 3, 5, 8, 13, 4, 9, 12, 14, 10, 11, 15, 16 }; /* vp8_default_zig_zag_mask generated with: @@ -64,94 +51,76 @@ DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) = { int i; - for (i = 0; i < 16; i++) + for (i = 0; i < 16; ++i) { vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i; } } */ -DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]) = -{ - 1, 2, 32, 64, - 4, 16, 128, 4096, - 8, 256, 2048, 8192, - 512, 1024, 16384, -32768 +DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]) = { + 1, 2, 32, 64, 4, 16, 128, 4096, 8, 256, 2048, 8192, 512, 1024, 16384, -32768 }; -const int vp8_mb_feature_data_bits[MB_LVL_MAX] = {7, 6}; +const int vp8_mb_feature_data_bits[MB_LVL_MAX] = { 7, 6 }; /* Array indices are identical to previously-existing CONTEXT_NODE indices */ - -const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */ -{ - -DCT_EOB_TOKEN, 2, /* 0 = EOB */ - -ZERO_TOKEN, 4, /* 1 = ZERO */ - -ONE_TOKEN, 6, /* 2 = ONE */ - 8, 12, /* 3 = LOW_VAL */ - -TWO_TOKEN, 10, /* 4 = TWO */ - -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */ - 14, 16, /* 6 = HIGH_LOW */ - -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */ - 18, 20, /* 8 = CAT_THREEFOUR */ - -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */ - -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */ +/* corresponding _CONTEXT_NODEs */ +/* clang-format off */ +const vp8_tree_index vp8_coef_tree[22] = { + -DCT_EOB_TOKEN, 2, /* 0 = EOB */ + -ZERO_TOKEN, 4, /* 1 = ZERO */ + -ONE_TOKEN, 6, /* 2 = ONE */ + 8, 12, /* 3 = LOW_VAL */ + -TWO_TOKEN, 10, /* 4 = TWO */ + -THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */ + 14, 16, /* 6 = HIGH_LOW */ + -DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */ + 18, 20, /* 8 = CAT_THREEFOUR */ + -DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */ + -DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */ }; +/* clang-format on */ /* vp8_coef_encodings generated with: vp8_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree); */ -vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] = -{ - {2, 2}, - {6, 3}, - {28, 5}, - {58, 6}, - {59, 6}, - {60, 6}, - {61, 6}, - {124, 7}, - {125, 7}, - {126, 7}, - {127, 7}, - {0, 1} +vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] = { + { 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 }, + { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 } }; /* Trees for extra bits. Probabilities are constant and do not depend on previously encoded bits */ -static const vp8_prob Pcat1[] = { 159}; -static const vp8_prob Pcat2[] = { 165, 145}; -static const vp8_prob Pcat3[] = { 173, 148, 140}; -static const vp8_prob Pcat4[] = { 176, 155, 140, 135}; -static const vp8_prob Pcat5[] = { 180, 157, 141, 134, 130}; -static const vp8_prob Pcat6[] = -{ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129}; - +static const vp8_prob Pcat1[] = { 159 }; +static const vp8_prob Pcat2[] = { 165, 145 }; +static const vp8_prob Pcat3[] = { 173, 148, 140 }; +static const vp8_prob Pcat4[] = { 176, 155, 140, 135 }; +static const vp8_prob Pcat5[] = { 180, 157, 141, 134, 130 }; +static const vp8_prob Pcat6[] = { 254, 254, 243, 230, 196, 177, + 153, 140, 133, 130, 129 }; /* tree index tables generated with: - void init_bit_tree(vp8_tree_index *p, int n) - { - int i = 0; + void init_bit_tree(vp8_tree_index *p, int n) { + int i = 0; - while (++i < n) - { - p[0] = p[1] = i << 1; - p += 2; - } + while (++i < n) { + p[0] = p[1] = i << 1; + p += 2; + } - p[0] = p[1] = 0; + p[0] = p[1] = 0; } - void init_bit_trees() - { - init_bit_tree(cat1, 1); - init_bit_tree(cat2, 2); - init_bit_tree(cat3, 3); - init_bit_tree(cat4, 4); - init_bit_tree(cat5, 5); - init_bit_tree(cat6, 11); + void init_bit_trees() { + init_bit_tree(cat1, 1); + init_bit_tree(cat2, 2); + init_bit_tree(cat3, 3); + init_bit_tree(cat4, 4); + init_bit_tree(cat5, 5); + init_bit_tree(cat6, 11); } */ @@ -160,29 +129,19 @@ static const vp8_tree_index cat2[4] = { 2, 2, 0, 0 }; static const vp8_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 }; static const vp8_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 }; static const vp8_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 }; -static const vp8_tree_index cat6[22] = { 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, - 14, 14, 16, 16, 18, 18, 20, 20, 0, 0 }; +static const vp8_tree_index cat6[22] = { + 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 0, 0 +}; -const vp8_extra_bit_struct vp8_extra_bits[12] = -{ - { 0, 0, 0, 0}, - { 0, 0, 0, 1}, - { 0, 0, 0, 2}, - { 0, 0, 0, 3}, - { 0, 0, 0, 4}, - { cat1, Pcat1, 1, 5}, - { cat2, Pcat2, 2, 7}, - { cat3, Pcat3, 3, 11}, - { cat4, Pcat4, 4, 19}, - { cat5, Pcat5, 5, 35}, - { cat6, Pcat6, 11, 67}, - { 0, 0, 0, 0} +const vp8_extra_bit_struct vp8_extra_bits[12] = { + { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { 0, 0, 0, 2 }, + { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { cat1, Pcat1, 1, 5 }, + { cat2, Pcat2, 2, 7 }, { cat3, Pcat3, 3, 11 }, { cat4, Pcat4, 4, 19 }, + { cat5, Pcat5, 5, 35 }, { cat6, Pcat6, 11, 67 }, { 0, 0, 0, 0 } }; #include "default_coef_probs.h" -void vp8_default_coef_probs(VP8_COMMON *pc) -{ - memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs)); +void vp8_default_coef_probs(VP8_COMMON *pc) { + memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs)); } - diff --git a/libs/libvpx/vp8/common/entropy.h b/libs/libvpx/vp8/common/entropy.h index a90bab4bac..d088560011 100644 --- a/libs/libvpx/vp8/common/entropy.h +++ b/libs/libvpx/vp8/common/entropy.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ENTROPY_H_ #define VP8_COMMON_ENTROPY_H_ @@ -21,18 +20,18 @@ extern "C" { /* Coefficient token alphabet */ -#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */ -#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */ -#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */ -#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */ -#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */ -#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */ -#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */ -#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */ -#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */ -#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */ -#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 11+1 */ -#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */ +#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */ +#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */ +#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */ +#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */ +#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */ +#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */ +#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */ +#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */ +#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */ +#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */ +#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 11+1 */ +#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */ #define MAX_ENTROPY_TOKENS 12 #define ENTROPY_NODES 11 @@ -41,21 +40,20 @@ extern const vp8_tree_index vp8_coef_tree[]; extern const struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS]; -typedef struct -{ - vp8_tree_p tree; - const vp8_prob *prob; - int Len; - int base_val; +typedef struct { + vp8_tree_p tree; + const vp8_prob *prob; + int Len; + int base_val; } vp8_extra_bit_struct; -extern const vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */ +extern const vp8_extra_bit_struct + vp8_extra_bits[12]; /* indexed by token value */ -#define PROB_UPDATE_BASELINE_COST 7 - -#define MAX_PROB 255 -#define DCT_MAX_VALUE 2048 +#define PROB_UPDATE_BASELINE_COST 7 +#define MAX_PROB 255 +#define DCT_MAX_VALUE 2048 /* Coefficients are predicted via a 3-dimensional probability table. */ @@ -86,12 +84,13 @@ extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]); distinct bands). */ /*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */ -# define PREV_COEF_CONTEXTS 3 +#define PREV_COEF_CONTEXTS 3 -extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]); - -extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; +extern DECLARE_ALIGNED(16, const unsigned char, + vp8_prev_token_class[MAX_ENTROPY_TOKENS]); +extern const vp8_prob vp8_coef_update_probs[BLOCK_TYPES][COEF_BANDS] + [PREV_COEF_CONTEXTS][ENTROPY_NODES]; struct VP8Common; void vp8_default_coef_probs(struct VP8Common *); diff --git a/libs/libvpx/vp8/common/entropymode.c b/libs/libvpx/vp8/common/entropymode.c index 8981a8d3c2..30c2fa86ae 100644 --- a/libs/libvpx/vp8/common/entropymode.c +++ b/libs/libvpx/vp8/common/entropymode.c @@ -16,156 +16,88 @@ #include "vp8_entropymodedata.h" -int vp8_mv_cont(const int_mv *l, const int_mv *a) -{ - int lez = (l->as_int == 0); - int aez = (a->as_int == 0); - int lea = (l->as_int == a->as_int); +int vp8_mv_cont(const int_mv *l, const int_mv *a) { + int lez = (l->as_int == 0); + int aez = (a->as_int == 0); + int lea = (l->as_int == a->as_int); - if (lea && lez) - return SUBMVREF_LEFT_ABOVE_ZED; + if (lea && lez) return SUBMVREF_LEFT_ABOVE_ZED; - if (lea) - return SUBMVREF_LEFT_ABOVE_SAME; + if (lea) return SUBMVREF_LEFT_ABOVE_SAME; - if (aez) - return SUBMVREF_ABOVE_ZED; + if (aez) return SUBMVREF_ABOVE_ZED; - if (lez) - return SUBMVREF_LEFT_ZED; + if (lez) return SUBMVREF_LEFT_ZED; - return SUBMVREF_NORMAL; + return SUBMVREF_NORMAL; } -static const vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1] = { 180, 162, 25}; +static const vp8_prob sub_mv_ref_prob[VP8_SUBMVREFS - 1] = { 180, 162, 25 }; -const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS-1] = -{ - { 147, 136, 18 }, - { 106, 145, 1 }, - { 179, 121, 1 }, - { 223, 1 , 34 }, - { 208, 1 , 1 } +const vp8_prob vp8_sub_mv_ref_prob2[SUBMVREF_COUNT] + [VP8_SUBMVREFS - 1] = { { 147, 136, 18 }, + { 106, 145, 1 }, + { 179, 121, 1 }, + { 223, 1, 34 }, + { 208, 1, 1 } }; + +const vp8_mbsplit vp8_mbsplits[VP8_NUMMBSPLITS] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, + { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } }; +const int vp8_mbsplit_count[VP8_NUMMBSPLITS] = { 2, 2, 4, 16 }; - -const vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS] = -{ - { - 0, 0, 0, 0, - 0, 0, 0, 0, - 1, 1, 1, 1, - 1, 1, 1, 1, - }, - { - 0, 0, 1, 1, - 0, 0, 1, 1, - 0, 0, 1, 1, - 0, 0, 1, 1, - }, - { - 0, 0, 1, 1, - 0, 0, 1, 1, - 2, 2, 3, 3, - 2, 2, 3, 3, - }, - { - 0, 1, 2, 3, - 4, 5, 6, 7, - 8, 9, 10, 11, - 12, 13, 14, 15, - } -}; - -const int vp8_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16}; - -const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS-1] = { 110, 111, 150}; - +const vp8_prob vp8_mbsplit_probs[VP8_NUMMBSPLITS - 1] = { 110, 111, 150 }; /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */ -const vp8_tree_index vp8_bmode_tree[18] = /* INTRAMODECONTEXTNODE value */ -{ - -B_DC_PRED, 2, /* 0 = DC_NODE */ - -B_TM_PRED, 4, /* 1 = TM_NODE */ - -B_VE_PRED, 6, /* 2 = VE_NODE */ - 8, 12, /* 3 = COM_NODE */ - -B_HE_PRED, 10, /* 4 = HE_NODE */ - -B_RD_PRED, -B_VR_PRED, /* 5 = RD_NODE */ - -B_LD_PRED, 14, /* 6 = LD_NODE */ - -B_VL_PRED, 16, /* 7 = VL_NODE */ - -B_HD_PRED, -B_HU_PRED /* 8 = HD_NODE */ -}; +const vp8_tree_index vp8_bmode_tree[18] = /* INTRAMODECONTEXTNODE value */ + { + -B_DC_PRED, 2, /* 0 = DC_NODE */ + -B_TM_PRED, 4, /* 1 = TM_NODE */ + -B_VE_PRED, 6, /* 2 = VE_NODE */ + 8, 12, /* 3 = COM_NODE */ + -B_HE_PRED, 10, /* 4 = HE_NODE */ + -B_RD_PRED, -B_VR_PRED, /* 5 = RD_NODE */ + -B_LD_PRED, 14, /* 6 = LD_NODE */ + -B_VL_PRED, 16, /* 7 = VL_NODE */ + -B_HD_PRED, -B_HU_PRED /* 8 = HD_NODE */ + }; /* Again, these trees use the same probability indices as their explicitly-programmed predecessors. */ -const vp8_tree_index vp8_ymode_tree[8] = -{ - -DC_PRED, 2, - 4, 6, - -V_PRED, -H_PRED, - -TM_PRED, -B_PRED +const vp8_tree_index vp8_ymode_tree[8] = { + -DC_PRED, 2, 4, 6, -V_PRED, -H_PRED, -TM_PRED, -B_PRED }; -const vp8_tree_index vp8_kf_ymode_tree[8] = -{ - -B_PRED, 2, - 4, 6, - -DC_PRED, -V_PRED, - -H_PRED, -TM_PRED +const vp8_tree_index vp8_kf_ymode_tree[8] = { + -B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED }; -const vp8_tree_index vp8_uv_mode_tree[6] = -{ - -DC_PRED, 2, - -V_PRED, 4, - -H_PRED, -TM_PRED -}; +const vp8_tree_index vp8_uv_mode_tree[6] = { -DC_PRED, 2, -V_PRED, + 4, -H_PRED, -TM_PRED }; -const vp8_tree_index vp8_mbsplit_tree[6] = -{ - -3, 2, - -2, 4, - -0, -1 -}; +const vp8_tree_index vp8_mbsplit_tree[6] = { -3, 2, -2, 4, -0, -1 }; -const vp8_tree_index vp8_mv_ref_tree[8] = -{ - -ZEROMV, 2, - -NEARESTMV, 4, - -NEARMV, 6, - -NEWMV, -SPLITMV -}; +const vp8_tree_index vp8_mv_ref_tree[8] = { -ZEROMV, 2, -NEARESTMV, 4, + -NEARMV, 6, -NEWMV, -SPLITMV }; -const vp8_tree_index vp8_sub_mv_ref_tree[6] = -{ - -LEFT4X4, 2, - -ABOVE4X4, 4, - -ZERO4X4, -NEW4X4 -}; +const vp8_tree_index vp8_sub_mv_ref_tree[6] = { -LEFT4X4, 2, -ABOVE4X4, + 4, -ZERO4X4, -NEW4X4 }; -const vp8_tree_index vp8_small_mvtree [14] = -{ - 2, 8, - 4, 6, - -0, -1, - -2, -3, - 10, 12, - -4, -5, - -6, -7 -}; +const vp8_tree_index vp8_small_mvtree[14] = { 2, 8, 4, 6, -0, -1, -2, + -3, 10, 12, -4, -5, -6, -7 }; -void vp8_init_mbmode_probs(VP8_COMMON *x) -{ - memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob)); - memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob)); - memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob)); +void vp8_init_mbmode_probs(VP8_COMMON *x) { + memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob)); + memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob)); + memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob)); } -void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES-1]) -{ - memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob)); +void vp8_default_bmode_probs(vp8_prob p[VP8_BINTRAMODES - 1]) { + memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob)); } - diff --git a/libs/libvpx/vp8/common/entropymode.h b/libs/libvpx/vp8/common/entropymode.h index 81bdfc4b8b..e0a17df10c 100644 --- a/libs/libvpx/vp8/common/entropymode.h +++ b/libs/libvpx/vp8/common/entropymode.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ENTROPYMODE_H_ #define VP8_COMMON_ENTROPYMODE_H_ @@ -19,42 +18,41 @@ extern "C" { #endif -typedef enum -{ - SUBMVREF_NORMAL, - SUBMVREF_LEFT_ZED, - SUBMVREF_ABOVE_ZED, - SUBMVREF_LEFT_ABOVE_SAME, - SUBMVREF_LEFT_ABOVE_ZED +typedef enum { + SUBMVREF_NORMAL, + SUBMVREF_LEFT_ZED, + SUBMVREF_ABOVE_ZED, + SUBMVREF_LEFT_ABOVE_SAME, + SUBMVREF_LEFT_ABOVE_ZED } sumvfref_t; typedef int vp8_mbsplit[16]; #define VP8_NUMMBSPLITS 4 -extern const vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS]; +extern const vp8_mbsplit vp8_mbsplits[VP8_NUMMBSPLITS]; -extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */ +extern const int vp8_mbsplit_count[VP8_NUMMBSPLITS]; /* # of subsets */ -extern const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS-1]; +extern const vp8_prob vp8_mbsplit_probs[VP8_NUMMBSPLITS - 1]; extern int vp8_mv_cont(const int_mv *l, const int_mv *a); #define SUBMVREF_COUNT 5 -extern const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS-1]; - - -extern const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES]; +extern const vp8_prob vp8_sub_mv_ref_prob2[SUBMVREF_COUNT][VP8_SUBMVREFS - 1]; +extern const unsigned int vp8_kf_default_bmode_counts[VP8_BINTRAMODES] + [VP8_BINTRAMODES] + [VP8_BINTRAMODES]; extern const vp8_tree_index vp8_bmode_tree[]; -extern const vp8_tree_index vp8_ymode_tree[]; -extern const vp8_tree_index vp8_kf_ymode_tree[]; -extern const vp8_tree_index vp8_uv_mode_tree[]; +extern const vp8_tree_index vp8_ymode_tree[]; +extern const vp8_tree_index vp8_kf_ymode_tree[]; +extern const vp8_tree_index vp8_uv_mode_tree[]; -extern const vp8_tree_index vp8_mbsplit_tree[]; -extern const vp8_tree_index vp8_mv_ref_tree[]; -extern const vp8_tree_index vp8_sub_mv_ref_tree[]; +extern const vp8_tree_index vp8_mbsplit_tree[]; +extern const vp8_tree_index vp8_mv_ref_tree[]; +extern const vp8_tree_index vp8_sub_mv_ref_tree[]; extern const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES]; extern const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES]; @@ -65,7 +63,8 @@ extern const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS]; /* Inter mode values do not start at zero */ extern const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS]; -extern const struct vp8_token_struct vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS]; +extern const struct vp8_token_struct + vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS]; extern const vp8_tree_index vp8_small_mvtree[]; @@ -73,13 +72,14 @@ extern const struct vp8_token_struct vp8_small_mvencodings[8]; /* Key frame default mode probs */ extern const vp8_prob vp8_kf_bmode_prob[VP8_BINTRAMODES][VP8_BINTRAMODES] -[VP8_BINTRAMODES-1]; -extern const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES-1]; -extern const vp8_prob vp8_kf_ymode_prob[VP8_YMODES-1]; + [VP8_BINTRAMODES - 1]; +extern const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES - 1]; +extern const vp8_prob vp8_kf_ymode_prob[VP8_YMODES - 1]; void vp8_init_mbmode_probs(VP8_COMMON *x); -void vp8_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES-1]); -void vp8_kf_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES-1]); +void vp8_default_bmode_probs(vp8_prob dest[VP8_BINTRAMODES - 1]); +void vp8_kf_default_bmode_probs(vp8_prob dest[VP8_BINTRAMODES][VP8_BINTRAMODES] + [VP8_BINTRAMODES - 1]); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/entropymv.c b/libs/libvpx/vp8/common/entropymv.c index e5df1f0955..fb4f0c889f 100644 --- a/libs/libvpx/vp8/common/entropymv.c +++ b/libs/libvpx/vp8/common/entropymv.c @@ -8,42 +8,40 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "entropymv.h" -const MV_CONTEXT vp8_mv_update_probs[2] = -{ - {{ - 237, - 246, - 253, 253, 254, 254, 254, 254, 254, - 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 - }}, - {{ - 231, - 243, - 245, 253, 254, 254, 254, 254, 254, - 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 - }} +/* clang-format off */ +const MV_CONTEXT vp8_mv_update_probs[2] = { + { { + 237, + 246, + 253, 253, 254, 254, 254, 254, 254, + 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 + } }, + { { + 231, + 243, + 245, 253, 254, 254, 254, 254, 254, + 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 + } } }; -const MV_CONTEXT vp8_default_mv_context[2] = -{ - {{ - /* row */ - 162, /* is short */ - 128, /* sign */ - 225, 146, 172, 147, 214, 39, 156, /* short tree */ - 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */ - }}, +/* clang-format on */ +const MV_CONTEXT vp8_default_mv_context[2] = { + { { + /* row */ + 162, /* is short */ + 128, /* sign */ + 225, 146, 172, 147, 214, 39, 156, /* short tree */ + 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */ + } }, + { { + /* same for column */ + 164, /* is short */ + 128, /**/ + 204, 170, 119, 235, 140, 230, 228, /**/ + 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */ - {{ - /* same for column */ - 164, /* is short */ - 128, - 204, 170, 119, 235, 140, 230, 228, - 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */ - - }} + } } }; diff --git a/libs/libvpx/vp8/common/entropymv.h b/libs/libvpx/vp8/common/entropymv.h index 42840d58ad..6373000903 100644 --- a/libs/libvpx/vp8/common/entropymv.h +++ b/libs/libvpx/vp8/common/entropymv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ENTROPYMV_H_ #define VP8_COMMON_ENTROPYMV_H_ @@ -18,29 +17,27 @@ extern "C" { #endif -enum -{ - mv_max = 1023, /* max absolute value of a MV component */ - MVvals = (2 * mv_max) + 1, /* # possible values "" */ - mvfp_max = 255, /* max absolute value of a full pixel MV component */ - MVfpvals = (2 * mvfp_max) +1, /* # possible full pixel MV values */ +enum { + mv_max = 1023, /* max absolute value of a MV component */ + MVvals = (2 * mv_max) + 1, /* # possible values "" */ + mvfp_max = 255, /* max absolute value of a full pixel MV component */ + MVfpvals = (2 * mvfp_max) + 1, /* # possible full pixel MV values */ - mvlong_width = 10, /* Large MVs have 9 bit magnitudes */ - mvnum_short = 8, /* magnitudes 0 through 7 */ + mvlong_width = 10, /* Large MVs have 9 bit magnitudes */ + mvnum_short = 8, /* magnitudes 0 through 7 */ - /* probability offsets for coding each MV component */ + /* probability offsets for coding each MV component */ - mvpis_short = 0, /* short (<= 7) vs long (>= 8) */ - MVPsign, /* sign for non-zero */ - MVPshort, /* 8 short values = 7-position tree */ + mvpis_short = 0, /* short (<= 7) vs long (>= 8) */ + MVPsign, /* sign for non-zero */ + MVPshort, /* 8 short values = 7-position tree */ - MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */ - MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */ + MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */ + MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */ }; -typedef struct mv_context -{ - vp8_prob prob[MVPcount]; /* often come in row, col pairs */ +typedef struct mv_context { + vp8_prob prob[MVPcount]; /* often come in row, col pairs */ } MV_CONTEXT; extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2]; diff --git a/libs/libvpx/vp8/common/extend.c b/libs/libvpx/vp8/common/extend.c index 2d938ad782..2d67b516be 100644 --- a/libs/libvpx/vp8/common/extend.c +++ b/libs/libvpx/vp8/common/extend.c @@ -8,181 +8,146 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "extend.h" #include "vpx_mem/vpx_mem.h" +static void copy_and_extend_plane(unsigned char *s, /* source */ + int sp, /* source pitch */ + unsigned char *d, /* destination */ + int dp, /* destination pitch */ + int h, /* height */ + int w, /* width */ + int et, /* extend top border */ + int el, /* extend left border */ + int eb, /* extend bottom border */ + int er /* extend right border */ + ) { + int i; + unsigned char *src_ptr1, *src_ptr2; + unsigned char *dest_ptr1, *dest_ptr2; + int linesize; -static void copy_and_extend_plane -( - unsigned char *s, /* source */ - int sp, /* source pitch */ - unsigned char *d, /* destination */ - int dp, /* destination pitch */ - int h, /* height */ - int w, /* width */ - int et, /* extend top border */ - int el, /* extend left border */ - int eb, /* extend bottom border */ - int er /* extend right border */ -) -{ - int i; - unsigned char *src_ptr1, *src_ptr2; - unsigned char *dest_ptr1, *dest_ptr2; - int linesize; + /* copy the left and right most columns out */ + src_ptr1 = s; + src_ptr2 = s + w - 1; + dest_ptr1 = d - el; + dest_ptr2 = d + w; - /* copy the left and right most columns out */ - src_ptr1 = s; - src_ptr2 = s + w - 1; - dest_ptr1 = d - el; - dest_ptr2 = d + w; + for (i = 0; i < h; ++i) { + memset(dest_ptr1, src_ptr1[0], el); + memcpy(dest_ptr1 + el, src_ptr1, w); + memset(dest_ptr2, src_ptr2[0], er); + src_ptr1 += sp; + src_ptr2 += sp; + dest_ptr1 += dp; + dest_ptr2 += dp; + } - for (i = 0; i < h; i++) - { - memset(dest_ptr1, src_ptr1[0], el); - memcpy(dest_ptr1 + el, src_ptr1, w); - memset(dest_ptr2, src_ptr2[0], er); - src_ptr1 += sp; - src_ptr2 += sp; - dest_ptr1 += dp; - dest_ptr2 += dp; - } + /* Now copy the top and bottom lines into each line of the respective + * borders + */ + src_ptr1 = d - el; + src_ptr2 = d + dp * (h - 1) - el; + dest_ptr1 = d + dp * (-et) - el; + dest_ptr2 = d + dp * (h)-el; + linesize = el + er + w; - /* Now copy the top and bottom lines into each line of the respective - * borders - */ - src_ptr1 = d - el; - src_ptr2 = d + dp * (h - 1) - el; - dest_ptr1 = d + dp * (-et) - el; - dest_ptr2 = d + dp * (h) - el; - linesize = el + er + w; + for (i = 0; i < et; ++i) { + memcpy(dest_ptr1, src_ptr1, linesize); + dest_ptr1 += dp; + } - for (i = 0; i < et; i++) - { - memcpy(dest_ptr1, src_ptr1, linesize); - dest_ptr1 += dp; - } - - for (i = 0; i < eb; i++) - { - memcpy(dest_ptr2, src_ptr2, linesize); - dest_ptr2 += dp; - } + for (i = 0; i < eb; ++i) { + memcpy(dest_ptr2, src_ptr2, linesize); + dest_ptr2 += dp; + } } - void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst) -{ - int et = dst->border; - int el = dst->border; - int eb = dst->border + dst->y_height - src->y_height; - int er = dst->border + dst->y_width - src->y_width; + YV12_BUFFER_CONFIG *dst) { + int et = dst->border; + int el = dst->border; + int eb = dst->border + dst->y_height - src->y_height; + int er = dst->border + dst->y_width - src->y_width; - copy_and_extend_plane(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, - src->y_height, src->y_width, - et, el, eb, er); + copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer, + dst->y_stride, src->y_height, src->y_width, et, el, eb, + er); - et = dst->border >> 1; - el = dst->border >> 1; - eb = (dst->border >> 1) + dst->uv_height - src->uv_height; - er = (dst->border >> 1) + dst->uv_width - src->uv_width; + et = dst->border >> 1; + el = dst->border >> 1; + eb = (dst->border >> 1) + dst->uv_height - src->uv_height; + er = (dst->border >> 1) + dst->uv_width - src->uv_width; - copy_and_extend_plane(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, - src->uv_height, src->uv_width, - et, el, eb, er); + copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer, + dst->uv_stride, src->uv_height, src->uv_width, et, el, + eb, er); - copy_and_extend_plane(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, - src->uv_height, src->uv_width, - et, el, eb, er); + copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer, + dst->uv_stride, src->uv_height, src->uv_width, et, el, + eb, er); } - void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw) -{ - int et = dst->border; - int el = dst->border; - int eb = dst->border + dst->y_height - src->y_height; - int er = dst->border + dst->y_width - src->y_width; - int src_y_offset = srcy * src->y_stride + srcx; - int dst_y_offset = srcy * dst->y_stride + srcx; - int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1); - int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1); + YV12_BUFFER_CONFIG *dst, int srcy, + int srcx, int srch, int srcw) { + int et = dst->border; + int el = dst->border; + int eb = dst->border + dst->y_height - src->y_height; + int er = dst->border + dst->y_width - src->y_width; + int src_y_offset = srcy * src->y_stride + srcx; + int dst_y_offset = srcy * dst->y_stride + srcx; + int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1); + int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1); - /* If the side is not touching the bounder then don't extend. */ - if (srcy) - et = 0; - if (srcx) - el = 0; - if (srcy + srch != src->y_height) - eb = 0; - if (srcx + srcw != src->y_width) - er = 0; + /* If the side is not touching the bounder then don't extend. */ + if (srcy) et = 0; + if (srcx) el = 0; + if (srcy + srch != src->y_height) eb = 0; + if (srcx + srcw != src->y_width) er = 0; - copy_and_extend_plane(src->y_buffer + src_y_offset, - src->y_stride, - dst->y_buffer + dst_y_offset, - dst->y_stride, - srch, srcw, - et, el, eb, er); + copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride, + dst->y_buffer + dst_y_offset, dst->y_stride, srch, srcw, + et, el, eb, er); - et = (et + 1) >> 1; - el = (el + 1) >> 1; - eb = (eb + 1) >> 1; - er = (er + 1) >> 1; - srch = (srch + 1) >> 1; - srcw = (srcw + 1) >> 1; + et = (et + 1) >> 1; + el = (el + 1) >> 1; + eb = (eb + 1) >> 1; + er = (er + 1) >> 1; + srch = (srch + 1) >> 1; + srcw = (srcw + 1) >> 1; - copy_and_extend_plane(src->u_buffer + src_uv_offset, - src->uv_stride, - dst->u_buffer + dst_uv_offset, - dst->uv_stride, - srch, srcw, - et, el, eb, er); + copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride, + dst->u_buffer + dst_uv_offset, dst->uv_stride, srch, + srcw, et, el, eb, er); - copy_and_extend_plane(src->v_buffer + src_uv_offset, - src->uv_stride, - dst->v_buffer + dst_uv_offset, - dst->uv_stride, - srch, srcw, - et, el, eb, er); + copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride, + dst->v_buffer + dst_uv_offset, dst->uv_stride, srch, + srcw, et, el, eb, er); } - /* note the extension is only for the last row, for intra prediction purpose */ -void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, - unsigned char *YPtr, - unsigned char *UPtr, - unsigned char *VPtr) -{ - int i; +void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, + unsigned char *UPtr, unsigned char *VPtr) { + int i; - YPtr += ybf->y_stride * 14; - UPtr += ybf->uv_stride * 6; - VPtr += ybf->uv_stride * 6; + YPtr += ybf->y_stride * 14; + UPtr += ybf->uv_stride * 6; + VPtr += ybf->uv_stride * 6; - for (i = 0; i < 4; i++) - { - YPtr[i] = YPtr[-1]; - UPtr[i] = UPtr[-1]; - VPtr[i] = VPtr[-1]; - } + for (i = 0; i < 4; ++i) { + YPtr[i] = YPtr[-1]; + UPtr[i] = UPtr[-1]; + VPtr[i] = VPtr[-1]; + } - YPtr += ybf->y_stride; - UPtr += ybf->uv_stride; - VPtr += ybf->uv_stride; + YPtr += ybf->y_stride; + UPtr += ybf->uv_stride; + VPtr += ybf->uv_stride; - for (i = 0; i < 4; i++) - { - YPtr[i] = YPtr[-1]; - UPtr[i] = UPtr[-1]; - VPtr[i] = VPtr[-1]; - } + for (i = 0; i < 4; ++i) { + YPtr[i] = YPtr[-1]; + UPtr[i] = UPtr[-1]; + VPtr[i] = VPtr[-1]; + } } diff --git a/libs/libvpx/vp8/common/extend.h b/libs/libvpx/vp8/common/extend.h index 068f4ac523..7da5ce31da 100644 --- a/libs/libvpx/vp8/common/extend.h +++ b/libs/libvpx/vp8/common/extend.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_EXTEND_H_ #define VP8_COMMON_EXTEND_H_ @@ -18,13 +17,13 @@ extern "C" { #endif -void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr); +void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, + unsigned char *UPtr, unsigned char *VPtr); void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst); void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw); + YV12_BUFFER_CONFIG *dst, int srcy, + int srcx, int srch, int srcw); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/filter.c b/libs/libvpx/vp8/common/filter.c index 84c608effa..267498335c 100644 --- a/libs/libvpx/vp8/common/filter.c +++ b/libs/libvpx/vp8/common/filter.c @@ -8,243 +8,189 @@ * be found in the AUTHORS file in the root of the source tree. */ - -#include "filter.h" +#include #include "./vp8_rtcd.h" +#include "vp8/common/filter.h" -DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) = -{ - { 128, 0 }, - { 112, 16 }, - { 96, 32 }, - { 80, 48 }, - { 64, 64 }, - { 48, 80 }, - { 32, 96 }, - { 16, 112 } +DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) = { + { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 }, + { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 } }; -DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]) = -{ +DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]) = { - { 0, 0, 128, 0, 0, 0 }, /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */ - { 0, -6, 123, 12, -1, 0 }, - { 2, -11, 108, 36, -8, 1 }, /* New 1/4 pel 6 tap filter */ - { 0, -9, 93, 50, -6, 0 }, - { 3, -16, 77, 77, -16, 3 }, /* New 1/2 pel 6 tap filter */ - { 0, -6, 50, 93, -9, 0 }, - { 1, -8, 36, 108, -11, 2 }, /* New 1/4 pel 6 tap filter */ - { 0, -1, 12, 123, -6, 0 }, + { 0, 0, 128, 0, 0, + 0 }, /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */ + { 0, -6, 123, 12, -1, 0 }, + { 2, -11, 108, 36, -8, 1 }, /* New 1/4 pel 6 tap filter */ + { 0, -9, 93, 50, -6, 0 }, + { 3, -16, 77, 77, -16, 3 }, /* New 1/2 pel 6 tap filter */ + { 0, -6, 50, 93, -9, 0 }, + { 1, -8, 36, 108, -11, 2 }, /* New 1/4 pel 6 tap filter */ + { 0, -1, 12, 123, -6, 0 }, }; -static void filter_block2d_first_pass -( - unsigned char *src_ptr, - int *output_ptr, - unsigned int src_pixels_per_line, - unsigned int pixel_step, - unsigned int output_height, - unsigned int output_width, - const short *vp8_filter -) -{ - unsigned int i, j; - int Temp; +static void filter_block2d_first_pass(unsigned char *src_ptr, int *output_ptr, + unsigned int src_pixels_per_line, + unsigned int pixel_step, + unsigned int output_height, + unsigned int output_width, + const short *vp8_filter) { + unsigned int i, j; + int Temp; - for (i = 0; i < output_height; i++) - { - for (j = 0; j < output_width; j++) - { - Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) + - ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) + - ((int)src_ptr[0] * vp8_filter[2]) + - ((int)src_ptr[pixel_step] * vp8_filter[3]) + - ((int)src_ptr[2*pixel_step] * vp8_filter[4]) + - ((int)src_ptr[3*pixel_step] * vp8_filter[5]) + - (VP8_FILTER_WEIGHT >> 1); /* Rounding */ + for (i = 0; i < output_height; ++i) { + for (j = 0; j < output_width; ++j) { + Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) + + ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) + + ((int)src_ptr[0] * vp8_filter[2]) + + ((int)src_ptr[pixel_step] * vp8_filter[3]) + + ((int)src_ptr[2 * pixel_step] * vp8_filter[4]) + + ((int)src_ptr[3 * pixel_step] * vp8_filter[5]) + + (VP8_FILTER_WEIGHT >> 1); /* Rounding */ - /* Normalize back to 0-255 */ - Temp = Temp >> VP8_FILTER_SHIFT; + /* Normalize back to 0-255 */ + Temp = Temp >> VP8_FILTER_SHIFT; - if (Temp < 0) - Temp = 0; - else if (Temp > 255) - Temp = 255; + if (Temp < 0) { + Temp = 0; + } else if (Temp > 255) { + Temp = 255; + } - output_ptr[j] = Temp; - src_ptr++; - } - - /* Next row... */ - src_ptr += src_pixels_per_line - output_width; - output_ptr += output_width; + output_ptr[j] = Temp; + src_ptr++; } + + /* Next row... */ + src_ptr += src_pixels_per_line - output_width; + output_ptr += output_width; + } } -static void filter_block2d_second_pass -( - int *src_ptr, - unsigned char *output_ptr, - int output_pitch, - unsigned int src_pixels_per_line, - unsigned int pixel_step, - unsigned int output_height, - unsigned int output_width, - const short *vp8_filter -) -{ - unsigned int i, j; - int Temp; +static void filter_block2d_second_pass(int *src_ptr, unsigned char *output_ptr, + int output_pitch, + unsigned int src_pixels_per_line, + unsigned int pixel_step, + unsigned int output_height, + unsigned int output_width, + const short *vp8_filter) { + unsigned int i, j; + int Temp; - for (i = 0; i < output_height; i++) - { - for (j = 0; j < output_width; j++) - { - /* Apply filter */ - Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) + - ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) + - ((int)src_ptr[0] * vp8_filter[2]) + - ((int)src_ptr[pixel_step] * vp8_filter[3]) + - ((int)src_ptr[2*pixel_step] * vp8_filter[4]) + - ((int)src_ptr[3*pixel_step] * vp8_filter[5]) + - (VP8_FILTER_WEIGHT >> 1); /* Rounding */ + for (i = 0; i < output_height; ++i) { + for (j = 0; j < output_width; ++j) { + /* Apply filter */ + Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) + + ((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) + + ((int)src_ptr[0] * vp8_filter[2]) + + ((int)src_ptr[pixel_step] * vp8_filter[3]) + + ((int)src_ptr[2 * pixel_step] * vp8_filter[4]) + + ((int)src_ptr[3 * pixel_step] * vp8_filter[5]) + + (VP8_FILTER_WEIGHT >> 1); /* Rounding */ - /* Normalize back to 0-255 */ - Temp = Temp >> VP8_FILTER_SHIFT; + /* Normalize back to 0-255 */ + Temp = Temp >> VP8_FILTER_SHIFT; - if (Temp < 0) - Temp = 0; - else if (Temp > 255) - Temp = 255; + if (Temp < 0) { + Temp = 0; + } else if (Temp > 255) { + Temp = 255; + } - output_ptr[j] = (unsigned char)Temp; - src_ptr++; - } - - /* Start next row */ - src_ptr += src_pixels_per_line - output_width; - output_ptr += output_pitch; + output_ptr[j] = (unsigned char)Temp; + src_ptr++; } + + /* Start next row */ + src_ptr += src_pixels_per_line - output_width; + output_ptr += output_pitch; + } } +static void filter_block2d(unsigned char *src_ptr, unsigned char *output_ptr, + unsigned int src_pixels_per_line, int output_pitch, + const short *HFilter, const short *VFilter) { + int FData[9 * 4]; /* Temp data buffer used in filtering */ -static void filter_block2d -( - unsigned char *src_ptr, - unsigned char *output_ptr, - unsigned int src_pixels_per_line, - int output_pitch, - const short *HFilter, - const short *VFilter -) -{ - int FData[9*4]; /* Temp data buffer used in filtering */ + /* First filter 1-D horizontally... */ + filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, + src_pixels_per_line, 1, 9, 4, HFilter); - /* First filter 1-D horizontally... */ - filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 9, 4, HFilter); - - /* then filter verticaly... */ - filter_block2d_second_pass(FData + 8, output_ptr, output_pitch, 4, 4, 4, 4, VFilter); + /* then filter verticaly... */ + filter_block2d_second_pass(FData + 8, output_ptr, output_pitch, 4, 4, 4, 4, + VFilter); } +void vp8_sixtap_predict4x4_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; -void vp8_sixtap_predict4x4_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; + HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ + VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - - filter_block2d(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter); + filter_block2d(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, + VFilter); } -void vp8_sixtap_predict8x8_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - int FData[13*16]; /* Temp data buffer used in filtering */ +void vp8_sixtap_predict8x8_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; + int FData[13 * 16]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ + HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ + VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - /* First filter 1-D horizontally... */ - filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 13, 8, HFilter); - - - /* then filter verticaly... */ - filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 8, 8, VFilter); + /* First filter 1-D horizontally... */ + filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, + src_pixels_per_line, 1, 13, 8, HFilter); + /* then filter verticaly... */ + filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 8, 8, + VFilter); } -void vp8_sixtap_predict8x4_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - int FData[13*16]; /* Temp data buffer used in filtering */ +void vp8_sixtap_predict8x4_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; + int FData[13 * 16]; /* Temp data buffer used in filtering */ - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ + HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ + VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - /* First filter 1-D horizontally... */ - filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 9, 8, HFilter); - - - /* then filter verticaly... */ - filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 4, 8, VFilter); + /* First filter 1-D horizontally... */ + filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, + src_pixels_per_line, 1, 9, 8, HFilter); + /* then filter verticaly... */ + filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 4, 8, + VFilter); } -void vp8_sixtap_predict16x16_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - int FData[21*24]; /* Temp data buffer used in filtering */ +void vp8_sixtap_predict16x16_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; + int FData[21 * 24]; /* Temp data buffer used in filtering */ + HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ + VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */ - VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */ - - /* First filter 1-D horizontally... */ - filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 21, 16, HFilter); - - /* then filter verticaly... */ - filter_block2d_second_pass(FData + 32, dst_ptr, dst_pitch, 16, 16, 16, 16, VFilter); + /* First filter 1-D horizontally... */ + filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, + src_pixels_per_line, 1, 21, 16, HFilter); + /* then filter verticaly... */ + filter_block2d_second_pass(FData + 32, dst_ptr, dst_pitch, 16, 16, 16, 16, + VFilter); } - /**************************************************************************** * * ROUTINE : filter_block2d_bil_first_pass @@ -267,33 +213,25 @@ void vp8_sixtap_predict16x16_c * Two filter taps should sum to VP8_FILTER_WEIGHT. * ****************************************************************************/ -static void filter_block2d_bil_first_pass -( - unsigned char *src_ptr, - unsigned short *dst_ptr, - unsigned int src_stride, - unsigned int height, - unsigned int width, - const short *vp8_filter -) -{ - unsigned int i, j; +static void filter_block2d_bil_first_pass( + unsigned char *src_ptr, unsigned short *dst_ptr, unsigned int src_stride, + unsigned int height, unsigned int width, const short *vp8_filter) { + unsigned int i, j; - for (i = 0; i < height; i++) - { - for (j = 0; j < width; j++) - { - /* Apply bilinear filter */ - dst_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) + - ((int)src_ptr[1] * vp8_filter[1]) + - (VP8_FILTER_WEIGHT / 2)) >> VP8_FILTER_SHIFT; - src_ptr++; - } - - /* Next row... */ - src_ptr += src_stride - width; - dst_ptr += width; + for (i = 0; i < height; ++i) { + for (j = 0; j < width; ++j) { + /* Apply bilinear filter */ + dst_ptr[j] = + (((int)src_ptr[0] * vp8_filter[0]) + + ((int)src_ptr[1] * vp8_filter[1]) + (VP8_FILTER_WEIGHT / 2)) >> + VP8_FILTER_SHIFT; + src_ptr++; } + + /* Next row... */ + src_ptr += src_stride - width; + dst_ptr += width; + } } /**************************************************************************** @@ -312,42 +250,35 @@ static void filter_block2d_bil_first_pass * * FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block * in the vertical direction to produce the filtered output - * block. Used to implement second-pass of 2-D separable filter. + * block. Used to implement second-pass of 2-D separable + * filter. * - * SPECIAL NOTES : Requires 32-bit input as produced by filter_block2d_bil_first_pass. + * SPECIAL NOTES : Requires 32-bit input as produced by + * filter_block2d_bil_first_pass. * Two filter taps should sum to VP8_FILTER_WEIGHT. * ****************************************************************************/ -static void filter_block2d_bil_second_pass -( - unsigned short *src_ptr, - unsigned char *dst_ptr, - int dst_pitch, - unsigned int height, - unsigned int width, - const short *vp8_filter -) -{ - unsigned int i, j; - int Temp; +static void filter_block2d_bil_second_pass(unsigned short *src_ptr, + unsigned char *dst_ptr, + int dst_pitch, unsigned int height, + unsigned int width, + const short *vp8_filter) { + unsigned int i, j; + int Temp; - for (i = 0; i < height; i++) - { - for (j = 0; j < width; j++) - { - /* Apply filter */ - Temp = ((int)src_ptr[0] * vp8_filter[0]) + - ((int)src_ptr[width] * vp8_filter[1]) + - (VP8_FILTER_WEIGHT / 2); - dst_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT); - src_ptr++; - } - - /* Next row... */ - dst_ptr += dst_pitch; + for (i = 0; i < height; ++i) { + for (j = 0; j < width; ++j) { + /* Apply filter */ + Temp = ((int)src_ptr[0] * vp8_filter[0]) + + ((int)src_ptr[width] * vp8_filter[1]) + (VP8_FILTER_WEIGHT / 2); + dst_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT); + src_ptr++; } -} + /* Next row... */ + dst_ptr += dst_pitch; + } +} /**************************************************************************** * @@ -356,7 +287,8 @@ static void filter_block2d_bil_second_pass * INPUTS : UINT8 *src_ptr : Pointer to source block. * UINT32 src_pitch : Stride of source block. * UINT32 dst_pitch : Stride of destination block. - * INT32 *HFilter : Array of 2 horizontal filter taps. + * INT32 *HFilter : Array of 2 horizontal filter + * taps. * INT32 *VFilter : Array of 2 vertical filter taps. * INT32 Width : Block width * INT32 Height : Block height @@ -372,122 +304,78 @@ static void filter_block2d_bil_second_pass * SPECIAL NOTES : The largest block size can be handled here is 16x16 * ****************************************************************************/ -static void filter_block2d_bil -( - unsigned char *src_ptr, - unsigned char *dst_ptr, - unsigned int src_pitch, - unsigned int dst_pitch, - const short *HFilter, - const short *VFilter, - int Width, - int Height -) -{ +static void filter_block2d_bil(unsigned char *src_ptr, unsigned char *dst_ptr, + unsigned int src_pitch, unsigned int dst_pitch, + const short *HFilter, const short *VFilter, + int Width, int Height) { + unsigned short FData[17 * 16]; /* Temp data buffer used in filtering */ - unsigned short FData[17*16]; /* Temp data buffer used in filtering */ + /* First filter 1-D horizontally... */ + filter_block2d_bil_first_pass(src_ptr, FData, src_pitch, Height + 1, Width, + HFilter); - /* First filter 1-D horizontally... */ - filter_block2d_bil_first_pass(src_ptr, FData, src_pitch, Height + 1, Width, HFilter); - - /* then 1-D vertically... */ - filter_block2d_bil_second_pass(FData, dst_ptr, dst_pitch, Height, Width, VFilter); + /* then 1-D vertically... */ + filter_block2d_bil_second_pass(FData, dst_ptr, dst_pitch, Height, Width, + VFilter); } +void vp8_bilinear_predict4x4_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; -void vp8_bilinear_predict4x4_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; - - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; -#if 0 - { - int i; - unsigned char temp1[16]; - unsigned char temp2[16]; - - bilinear_predict4x4_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, temp1, 4); - filter_block2d_bil(src_ptr, temp2, src_pixels_per_line, 4, HFilter, VFilter, 4, 4); - - for (i = 0; i < 16; i++) - { - if (temp1[i] != temp2[i]) - { - bilinear_predict4x4_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, temp1, 4); - filter_block2d_bil(src_ptr, temp2, src_pixels_per_line, 4, HFilter, VFilter, 4, 4); - } - } - } -#endif - filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4); + // This represents a copy and is not required to be handled by optimizations. + assert((xoffset | yoffset) != 0); + HFilter = vp8_bilinear_filters[xoffset]; + VFilter = vp8_bilinear_filters[yoffset]; + filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, + VFilter, 4, 4); } -void vp8_bilinear_predict8x8_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; +void vp8_bilinear_predict8x8_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + assert((xoffset | yoffset) != 0); - filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8); + HFilter = vp8_bilinear_filters[xoffset]; + VFilter = vp8_bilinear_filters[yoffset]; + filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, + VFilter, 8, 8); } -void vp8_bilinear_predict8x4_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; +void vp8_bilinear_predict8x4_c(unsigned char *src_ptr, int src_pixels_per_line, + int xoffset, int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + assert((xoffset | yoffset) != 0); - filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4); + HFilter = vp8_bilinear_filters[xoffset]; + VFilter = vp8_bilinear_filters[yoffset]; + filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, + VFilter, 8, 4); } -void vp8_bilinear_predict16x16_c -( - unsigned char *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *dst_ptr, - int dst_pitch -) -{ - const short *HFilter; - const short *VFilter; +void vp8_bilinear_predict16x16_c(unsigned char *src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *dst_ptr, + int dst_pitch) { + const short *HFilter; + const short *VFilter; - HFilter = vp8_bilinear_filters[xoffset]; - VFilter = vp8_bilinear_filters[yoffset]; + assert((xoffset | yoffset) != 0); - filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16); + HFilter = vp8_bilinear_filters[xoffset]; + VFilter = vp8_bilinear_filters[yoffset]; + + filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, + VFilter, 16, 16); } diff --git a/libs/libvpx/vp8/common/filter.h b/libs/libvpx/vp8/common/filter.h index cfba775fce..f1d5ece4a5 100644 --- a/libs/libvpx/vp8/common/filter.h +++ b/libs/libvpx/vp8/common/filter.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_FILTER_H_ #define VP8_COMMON_FILTER_H_ @@ -20,7 +19,7 @@ extern "C" { #define BLOCK_HEIGHT_WIDTH 4 #define VP8_FILTER_WEIGHT 128 -#define VP8_FILTER_SHIFT 7 +#define VP8_FILTER_SHIFT 7 extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]); extern DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]); diff --git a/libs/libvpx/vp8/common/findnearmv.c b/libs/libvpx/vp8/common/findnearmv.c index e8ee40f56c..f40d2c6bde 100644 --- a/libs/libvpx/vp8/common/findnearmv.c +++ b/libs/libvpx/vp8/common/findnearmv.c @@ -8,186 +8,150 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "findnearmv.h" const unsigned char vp8_mbsplit_offset[4][16] = { - { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + { 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } }; /* Predict motion vectors using those from already-decoded nearby blocks. Note that we only consider one 4x4 subblock from each candidate 16x16 macroblock. */ -void vp8_find_near_mvs -( - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv *nearest, - int_mv *nearby, - int_mv *best_mv, - int cnt[4], - int refframe, - int *ref_frame_sign_bias -) -{ - const MODE_INFO *above = here - xd->mode_info_stride; - const MODE_INFO *left = here - 1; - const MODE_INFO *aboveleft = above - 1; - int_mv near_mvs[4]; - int_mv *mv = near_mvs; - int *cntx = cnt; - enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV}; +void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest, + int_mv *nearby, int_mv *best_mv, int cnt[4], + int refframe, int *ref_frame_sign_bias) { + const MODE_INFO *above = here - xd->mode_info_stride; + const MODE_INFO *left = here - 1; + const MODE_INFO *aboveleft = above - 1; + int_mv near_mvs[4]; + int_mv *mv = near_mvs; + int *cntx = cnt; + enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV }; - /* Zero accumulators */ - mv[0].as_int = mv[1].as_int = mv[2].as_int = 0; - cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; + /* Zero accumulators */ + mv[0].as_int = mv[1].as_int = mv[2].as_int = 0; + cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; - /* Process above */ - if (above->mbmi.ref_frame != INTRA_FRAME) - { - if (above->mbmi.mv.as_int) - { - (++mv)->as_int = above->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, mv, ref_frame_sign_bias); - ++cntx; - } - - *cntx += 2; + /* Process above */ + if (above->mbmi.ref_frame != INTRA_FRAME) { + if (above->mbmi.mv.as_int) { + (++mv)->as_int = above->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, mv, + ref_frame_sign_bias); + ++cntx; } - /* Process left */ - if (left->mbmi.ref_frame != INTRA_FRAME) - { - if (left->mbmi.mv.as_int) - { - int_mv this_mv; + *cntx += 2; + } - this_mv.as_int = left->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &this_mv, ref_frame_sign_bias); + /* Process left */ + if (left->mbmi.ref_frame != INTRA_FRAME) { + if (left->mbmi.mv.as_int) { + int_mv this_mv; - if (this_mv.as_int != mv->as_int) - { - (++mv)->as_int = this_mv.as_int; - ++cntx; - } + this_mv.as_int = left->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &this_mv, + ref_frame_sign_bias); - *cntx += 2; - } - else - cnt[CNT_INTRA] += 2; + if (this_mv.as_int != mv->as_int) { + (++mv)->as_int = this_mv.as_int; + ++cntx; + } + + *cntx += 2; + } else { + cnt[CNT_INTRA] += 2; } + } - /* Process above left */ - if (aboveleft->mbmi.ref_frame != INTRA_FRAME) - { - if (aboveleft->mbmi.mv.as_int) - { - int_mv this_mv; + /* Process above left */ + if (aboveleft->mbmi.ref_frame != INTRA_FRAME) { + if (aboveleft->mbmi.mv.as_int) { + int_mv this_mv; - this_mv.as_int = aboveleft->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &this_mv, ref_frame_sign_bias); + this_mv.as_int = aboveleft->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, + &this_mv, ref_frame_sign_bias); - if (this_mv.as_int != mv->as_int) - { - (++mv)->as_int = this_mv.as_int; - ++cntx; - } + if (this_mv.as_int != mv->as_int) { + (++mv)->as_int = this_mv.as_int; + ++cntx; + } - *cntx += 1; - } - else - cnt[CNT_INTRA] += 1; + *cntx += 1; + } else { + cnt[CNT_INTRA] += 1; } + } - /* If we have three distinct MV's ... */ - if (cnt[CNT_SPLITMV]) - { - /* See if above-left MV can be merged with NEAREST */ - if (mv->as_int == near_mvs[CNT_NEAREST].as_int) - cnt[CNT_NEAREST] += 1; - } + /* If we have three distinct MV's ... */ + if (cnt[CNT_SPLITMV]) { + /* See if above-left MV can be merged with NEAREST */ + if (mv->as_int == near_mvs[CNT_NEAREST].as_int) cnt[CNT_NEAREST] += 1; + } - cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV) - + (left->mbmi.mode == SPLITMV)) * 2 - + (aboveleft->mbmi.mode == SPLITMV); + cnt[CNT_SPLITMV] = + ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) * 2 + + (aboveleft->mbmi.mode == SPLITMV); - /* Swap near and nearest if necessary */ - if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) - { - int tmp; - tmp = cnt[CNT_NEAREST]; - cnt[CNT_NEAREST] = cnt[CNT_NEAR]; - cnt[CNT_NEAR] = tmp; - tmp = near_mvs[CNT_NEAREST].as_int; - near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; - near_mvs[CNT_NEAR].as_int = tmp; - } + /* Swap near and nearest if necessary */ + if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) { + int tmp; + tmp = cnt[CNT_NEAREST]; + cnt[CNT_NEAREST] = cnt[CNT_NEAR]; + cnt[CNT_NEAR] = tmp; + tmp = near_mvs[CNT_NEAREST].as_int; + near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; + near_mvs[CNT_NEAR].as_int = tmp; + } - /* Use near_mvs[0] to store the "best" MV */ - if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]) - near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST]; + /* Use near_mvs[0] to store the "best" MV */ + if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]) { + near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST]; + } - /* Set up return values */ - best_mv->as_int = near_mvs[0].as_int; - nearest->as_int = near_mvs[CNT_NEAREST].as_int; - nearby->as_int = near_mvs[CNT_NEAR].as_int; + /* Set up return values */ + best_mv->as_int = near_mvs[0].as_int; + nearest->as_int = near_mvs[CNT_NEAREST].as_int; + nearby->as_int = near_mvs[CNT_NEAR].as_int; } - -static void invert_and_clamp_mvs(int_mv *inv, int_mv *src, MACROBLOCKD *xd) -{ - inv->as_mv.row = src->as_mv.row * -1; - inv->as_mv.col = src->as_mv.col * -1; - vp8_clamp_mv2(inv, xd); - vp8_clamp_mv2(src, xd); +static void invert_and_clamp_mvs(int_mv *inv, int_mv *src, MACROBLOCKD *xd) { + inv->as_mv.row = src->as_mv.row * -1; + inv->as_mv.col = src->as_mv.col * -1; + vp8_clamp_mv2(inv, xd); + vp8_clamp_mv2(src, xd); } +int vp8_find_near_mvs_bias(MACROBLOCKD *xd, const MODE_INFO *here, + int_mv mode_mv_sb[2][MB_MODE_COUNT], + int_mv best_mv_sb[2], int cnt[4], int refframe, + int *ref_frame_sign_bias) { + int sign_bias = ref_frame_sign_bias[refframe]; -int vp8_find_near_mvs_bias -( - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv mode_mv_sb[2][MB_MODE_COUNT], - int_mv best_mv_sb[2], - int cnt[4], - int refframe, - int *ref_frame_sign_bias -) -{ - int sign_bias = ref_frame_sign_bias[refframe]; + vp8_find_near_mvs(xd, here, &mode_mv_sb[sign_bias][NEARESTMV], + &mode_mv_sb[sign_bias][NEARMV], &best_mv_sb[sign_bias], cnt, + refframe, ref_frame_sign_bias); - vp8_find_near_mvs(xd, - here, - &mode_mv_sb[sign_bias][NEARESTMV], - &mode_mv_sb[sign_bias][NEARMV], - &best_mv_sb[sign_bias], - cnt, - refframe, - ref_frame_sign_bias); + invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARESTMV], + &mode_mv_sb[sign_bias][NEARESTMV], xd); + invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARMV], + &mode_mv_sb[sign_bias][NEARMV], xd); + invert_and_clamp_mvs(&best_mv_sb[!sign_bias], &best_mv_sb[sign_bias], xd); - invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARESTMV], - &mode_mv_sb[sign_bias][NEARESTMV], xd); - invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARMV], - &mode_mv_sb[sign_bias][NEARMV], xd); - invert_and_clamp_mvs(&best_mv_sb[!sign_bias], - &best_mv_sb[sign_bias], xd); - - return sign_bias; + return sign_bias; } - -vp8_prob *vp8_mv_ref_probs( - vp8_prob p[VP8_MVREFS-1], const int near_mv_ref_ct[4] -) -{ - p[0] = vp8_mode_contexts [near_mv_ref_ct[0]] [0]; - p[1] = vp8_mode_contexts [near_mv_ref_ct[1]] [1]; - p[2] = vp8_mode_contexts [near_mv_ref_ct[2]] [2]; - p[3] = vp8_mode_contexts [near_mv_ref_ct[3]] [3]; - /*p[3] = vp8_mode_contexts [near_mv_ref_ct[1] + near_mv_ref_ct[2] + near_mv_ref_ct[3]] [3];*/ - return p; +vp8_prob *vp8_mv_ref_probs(vp8_prob p[VP8_MVREFS - 1], + const int near_mv_ref_ct[4]) { + p[0] = vp8_mode_contexts[near_mv_ref_ct[0]][0]; + p[1] = vp8_mode_contexts[near_mv_ref_ct[1]][1]; + p[2] = vp8_mode_contexts[near_mv_ref_ct[2]][2]; + p[3] = vp8_mode_contexts[near_mv_ref_ct[3]][3]; + /* p[3] = vp8_mode_contexts[near_mv_ref_ct[1] + near_mv_ref_ct[2] + + near_mv_ref_ct[3]][3]; */ + return p; } - diff --git a/libs/libvpx/vp8/common/findnearmv.h b/libs/libvpx/vp8/common/findnearmv.h index 155847ca24..c1eaa26980 100644 --- a/libs/libvpx/vp8/common/findnearmv.h +++ b/libs/libvpx/vp8/common/findnearmv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_FINDNEARMV_H_ #define VP8_COMMON_FINDNEARMV_H_ @@ -22,169 +21,127 @@ extern "C" { #endif - static INLINE void mv_bias(int refmb_ref_frame_sign_bias, int refframe, - int_mv *mvp, const int *ref_frame_sign_bias) -{ - if (refmb_ref_frame_sign_bias != ref_frame_sign_bias[refframe]) - { - mvp->as_mv.row *= -1; - mvp->as_mv.col *= -1; - } + int_mv *mvp, const int *ref_frame_sign_bias) { + if (refmb_ref_frame_sign_bias != ref_frame_sign_bias[refframe]) { + mvp->as_mv.row *= -1; + mvp->as_mv.col *= -1; + } } #define LEFT_TOP_MARGIN (16 << 3) #define RIGHT_BOTTOM_MARGIN (16 << 3) -static INLINE void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) -{ - if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) - mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN; - else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN) - mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; +static INLINE void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd) { + if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN)) { + mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN; + } else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN) { + mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN; + } - if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) - mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; - else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) - mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; + if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN)) { + mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN; + } else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN) { + mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN; + } } static INLINE void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge, int mb_to_right_edge, int mb_to_top_edge, - int mb_to_bottom_edge) -{ - mv->as_mv.col = (mv->as_mv.col < mb_to_left_edge) ? - mb_to_left_edge : mv->as_mv.col; - mv->as_mv.col = (mv->as_mv.col > mb_to_right_edge) ? - mb_to_right_edge : mv->as_mv.col; - mv->as_mv.row = (mv->as_mv.row < mb_to_top_edge) ? - mb_to_top_edge : mv->as_mv.row; - mv->as_mv.row = (mv->as_mv.row > mb_to_bottom_edge) ? - mb_to_bottom_edge : mv->as_mv.row; + int mb_to_bottom_edge) { + mv->as_mv.col = + (mv->as_mv.col < mb_to_left_edge) ? mb_to_left_edge : mv->as_mv.col; + mv->as_mv.col = + (mv->as_mv.col > mb_to_right_edge) ? mb_to_right_edge : mv->as_mv.col; + mv->as_mv.row = + (mv->as_mv.row < mb_to_top_edge) ? mb_to_top_edge : mv->as_mv.row; + mv->as_mv.row = + (mv->as_mv.row > mb_to_bottom_edge) ? mb_to_bottom_edge : mv->as_mv.row; } static INLINE unsigned int vp8_check_mv_bounds(int_mv *mv, int mb_to_left_edge, int mb_to_right_edge, int mb_to_top_edge, - int mb_to_bottom_edge) -{ - unsigned int need_to_clamp; - need_to_clamp = (mv->as_mv.col < mb_to_left_edge); - need_to_clamp |= (mv->as_mv.col > mb_to_right_edge); - need_to_clamp |= (mv->as_mv.row < mb_to_top_edge); - need_to_clamp |= (mv->as_mv.row > mb_to_bottom_edge); - return need_to_clamp; + int mb_to_bottom_edge) { + unsigned int need_to_clamp; + need_to_clamp = (mv->as_mv.col < mb_to_left_edge); + need_to_clamp |= (mv->as_mv.col > mb_to_right_edge); + need_to_clamp |= (mv->as_mv.row < mb_to_top_edge); + need_to_clamp |= (mv->as_mv.row > mb_to_bottom_edge); + return need_to_clamp; } -void vp8_find_near_mvs -( - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv *nearest, int_mv *nearby, int_mv *best, - int near_mv_ref_cts[4], - int refframe, - int *ref_frame_sign_bias -); +void vp8_find_near_mvs(MACROBLOCKD *xd, const MODE_INFO *here, int_mv *nearest, + int_mv *nearby, int_mv *best, int near_mv_ref_cts[4], + int refframe, int *ref_frame_sign_bias); +int vp8_find_near_mvs_bias(MACROBLOCKD *xd, const MODE_INFO *here, + int_mv mode_mv_sb[2][MB_MODE_COUNT], + int_mv best_mv_sb[2], int cnt[4], int refframe, + int *ref_frame_sign_bias); -int vp8_find_near_mvs_bias -( - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv mode_mv_sb[2][MB_MODE_COUNT], - int_mv best_mv_sb[2], - int cnt[4], - int refframe, - int *ref_frame_sign_bias -); - - -vp8_prob *vp8_mv_ref_probs( - vp8_prob p[VP8_MVREFS-1], const int near_mv_ref_ct[4] -); +vp8_prob *vp8_mv_ref_probs(vp8_prob p[VP8_MVREFS - 1], + const int near_mv_ref_ct[4]); extern const unsigned char vp8_mbsplit_offset[4][16]; +static INLINE uint32_t left_block_mv(const MODE_INFO *cur_mb, int b) { + if (!(b & 3)) { + /* On L edge, get from MB to left of us */ + --cur_mb; -static INLINE int left_block_mv(const MODE_INFO *cur_mb, int b) -{ - if (!(b & 3)) - { - /* On L edge, get from MB to left of us */ - --cur_mb; + if (cur_mb->mbmi.mode != SPLITMV) return cur_mb->mbmi.mv.as_int; + b += 4; + } - if(cur_mb->mbmi.mode != SPLITMV) - return cur_mb->mbmi.mv.as_int; - b += 4; - } - - return (cur_mb->bmi + b - 1)->mv.as_int; + return (cur_mb->bmi + b - 1)->mv.as_int; } -static INLINE int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) -{ - if (!(b >> 2)) - { - /* On top edge, get from MB above us */ - cur_mb -= mi_stride; +static INLINE uint32_t above_block_mv(const MODE_INFO *cur_mb, int b, + int mi_stride) { + if (!(b >> 2)) { + /* On top edge, get from MB above us */ + cur_mb -= mi_stride; - if(cur_mb->mbmi.mode != SPLITMV) - return cur_mb->mbmi.mv.as_int; - b += 16; - } + if (cur_mb->mbmi.mode != SPLITMV) return cur_mb->mbmi.mv.as_int; + b += 16; + } - return (cur_mb->bmi + (b - 4))->mv.as_int; + return (cur_mb->bmi + (b - 4))->mv.as_int; } -static INLINE B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) -{ - if (!(b & 3)) - { - /* On L edge, get from MB to left of us */ - --cur_mb; - switch (cur_mb->mbmi.mode) - { - case B_PRED: - return (cur_mb->bmi + b + 3)->as_mode; - case DC_PRED: - return B_DC_PRED; - case V_PRED: - return B_VE_PRED; - case H_PRED: - return B_HE_PRED; - case TM_PRED: - return B_TM_PRED; - default: - return B_DC_PRED; - } +static INLINE B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, + int b) { + if (!(b & 3)) { + /* On L edge, get from MB to left of us */ + --cur_mb; + switch (cur_mb->mbmi.mode) { + case B_PRED: return (cur_mb->bmi + b + 3)->as_mode; + case DC_PRED: return B_DC_PRED; + case V_PRED: return B_VE_PRED; + case H_PRED: return B_HE_PRED; + case TM_PRED: return B_TM_PRED; + default: return B_DC_PRED; } + } - return (cur_mb->bmi + b - 1)->as_mode; + return (cur_mb->bmi + b - 1)->as_mode; } static INLINE B_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb, int b, - int mi_stride) -{ - if (!(b >> 2)) - { - /* On top edge, get from MB above us */ - cur_mb -= mi_stride; + int mi_stride) { + if (!(b >> 2)) { + /* On top edge, get from MB above us */ + cur_mb -= mi_stride; - switch (cur_mb->mbmi.mode) - { - case B_PRED: - return (cur_mb->bmi + b + 12)->as_mode; - case DC_PRED: - return B_DC_PRED; - case V_PRED: - return B_VE_PRED; - case H_PRED: - return B_HE_PRED; - case TM_PRED: - return B_TM_PRED; - default: - return B_DC_PRED; - } + switch (cur_mb->mbmi.mode) { + case B_PRED: return (cur_mb->bmi + b + 12)->as_mode; + case DC_PRED: return B_DC_PRED; + case V_PRED: return B_VE_PRED; + case H_PRED: return B_HE_PRED; + case TM_PRED: return B_TM_PRED; + default: return B_DC_PRED; } + } - return (cur_mb->bmi + b - 4)->as_mode; + return (cur_mb->bmi + b - 4)->as_mode; } #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/generic/systemdependent.c b/libs/libvpx/vp8/common/generic/systemdependent.c index 28dc262ae5..89abd41c09 100644 --- a/libs/libvpx/vp8/common/generic/systemdependent.c +++ b/libs/libvpx/vp8/common/generic/systemdependent.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #if ARCH_ARM @@ -24,7 +23,7 @@ #include #elif defined(_WIN32) #include -typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO); +typedef void(WINAPI *PGNSI)(LPSYSTEM_INFO); #elif defined(__OS2__) #define INCL_DOS #define INCL_DOSSPINLOCK @@ -33,72 +32,69 @@ typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO); #endif #if CONFIG_MULTITHREAD -static int get_cpu_count() -{ - int core_count = 16; +static int get_cpu_count() { + int core_count = 16; #if HAVE_UNISTD_H && !defined(__OS2__) #if defined(_SC_NPROCESSORS_ONLN) - core_count = sysconf(_SC_NPROCESSORS_ONLN); + core_count = (int)sysconf(_SC_NPROCESSORS_ONLN); #elif defined(_SC_NPROC_ONLN) - core_count = sysconf(_SC_NPROC_ONLN); + core_count = (int)sysconf(_SC_NPROC_ONLN); #endif #elif defined(_WIN32) - { + { #if _WIN32_WINNT >= 0x0501 - SYSTEM_INFO sysinfo; - GetNativeSystemInfo(&sysinfo); + SYSTEM_INFO sysinfo; + GetNativeSystemInfo(&sysinfo); #else - PGNSI pGNSI; - SYSTEM_INFO sysinfo; + PGNSI pGNSI; + SYSTEM_INFO sysinfo; - /* Call GetNativeSystemInfo if supported or - * GetSystemInfo otherwise. */ + /* Call GetNativeSystemInfo if supported or + * GetSystemInfo otherwise. */ - pGNSI = (PGNSI) GetProcAddress( - GetModuleHandle(TEXT("kernel32.dll")), "GetNativeSystemInfo"); - if (pGNSI != NULL) - pGNSI(&sysinfo); - else - GetSystemInfo(&sysinfo); + pGNSI = (PGNSI)GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), + "GetNativeSystemInfo"); + if (pGNSI != NULL) + pGNSI(&sysinfo); + else + GetSystemInfo(&sysinfo); #endif - core_count = sysinfo.dwNumberOfProcessors; - } + core_count = (int)sysinfo.dwNumberOfProcessors; + } #elif defined(__OS2__) - { - ULONG proc_id; - ULONG status; + { + ULONG proc_id; + ULONG status; - core_count = 0; - for (proc_id = 1; ; proc_id++) - { - if (DosGetProcessorStatus(proc_id, &status)) - break; + core_count = 0; + for (proc_id = 1;; ++proc_id) { + if (DosGetProcessorStatus(proc_id, &status)) break; - if (status == PROC_ONLINE) - core_count++; - } + if (status == PROC_ONLINE) core_count++; } + } #else - /* other platforms */ +/* other platforms */ #endif - return core_count > 0 ? core_count : 1; + return core_count > 0 ? core_count : 1; } #endif -void vp8_clear_system_state_c() {}; +void vp8_clear_system_state_c(){}; -void vp8_machine_specific_config(VP8_COMMON *ctx) -{ +void vp8_machine_specific_config(VP8_COMMON *ctx) { #if CONFIG_MULTITHREAD - ctx->processor_core_count = get_cpu_count(); + ctx->processor_core_count = get_cpu_count(); +#else + (void)ctx; #endif /* CONFIG_MULTITHREAD */ #if ARCH_ARM - ctx->cpu_caps = arm_cpu_caps(); + ctx->cpu_caps = arm_cpu_caps(); #elif ARCH_X86 || ARCH_X86_64 - ctx->cpu_caps = x86_simd_caps(); + ctx->cpu_caps = x86_simd_caps(); #endif } diff --git a/libs/libvpx/vp8/common/header.h b/libs/libvpx/vp8/common/header.h index e27bca16bd..1df01fc6fa 100644 --- a/libs/libvpx/vp8/common/header.h +++ b/libs/libvpx/vp8/common/header.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_HEADER_H_ #define VP8_COMMON_HEADER_H_ @@ -17,22 +16,21 @@ extern "C" { #endif /* 24 bits total */ -typedef struct -{ - unsigned int type: 1; - unsigned int version: 3; - unsigned int show_frame: 1; +typedef struct { + unsigned int type : 1; + unsigned int version : 3; + unsigned int show_frame : 1; - /* Allow 2^20 bytes = 8 megabits for first partition */ + /* Allow 2^20 bytes = 8 megabits for first partition */ - unsigned int first_partition_length_in_bytes: 19; + unsigned int first_partition_length_in_bytes : 19; #ifdef PACKET_TESTING - unsigned int frame_number; - unsigned int update_gold: 1; - unsigned int uses_gold: 1; - unsigned int update_last: 1; - unsigned int uses_last: 1; + unsigned int frame_number; + unsigned int update_gold : 1; + unsigned int uses_gold : 1; + unsigned int update_last : 1; + unsigned int uses_last : 1; #endif } VP8_HEADER; @@ -43,7 +41,6 @@ typedef struct #define VP8_HEADER_SIZE 3 #endif - #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp8/common/idct_blk.c b/libs/libvpx/vp8/common/idct_blk.c index 8aa7d9bf0f..ff9f3eb7f2 100644 --- a/libs/libvpx/vp8/common/idct_blk.c +++ b/libs/libvpx/vp8/common/idct_blk.c @@ -12,79 +12,67 @@ #include "vp8_rtcd.h" #include "vpx_mem/vpx_mem.h" -void vp8_dequant_idct_add_c(short *input, short *dq, - unsigned char *dest, int stride); -void vp8_dc_only_idct_add_c(short input_dc, unsigned char * pred, +void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *dest, + int stride); +void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred, int pred_stride, unsigned char *dst_ptr, int dst_stride); -void vp8_dequant_idct_add_y_block_c - (short *q, short *dq, - unsigned char *dst, int stride, char *eobs) -{ - int i, j; +void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst, + int stride, char *eobs) { + int i, j; - for (i = 0; i < 4; i++) - { - for (j = 0; j < 4; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_c (q, dq, dst, stride); - else - { - vp8_dc_only_idct_add_c (q[0]*dq[0], dst, stride, dst, stride); - memset(q, 0, 2 * sizeof(q[0])); - } + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) { + if (*eobs++ > 1) { + vp8_dequant_idct_add_c(q, dq, dst, stride); + } else { + vp8_dc_only_idct_add_c(q[0] * dq[0], dst, stride, dst, stride); + memset(q, 0, 2 * sizeof(q[0])); + } - q += 16; - dst += 4; - } - - dst += 4*stride - 16; + q += 16; + dst += 4; } + + dst += 4 * stride - 16; + } } -void vp8_dequant_idct_add_uv_block_c - (short *q, short *dq, - unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) -{ - int i, j; +void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dstu, + unsigned char *dstv, int stride, + char *eobs) { + int i, j; - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_c (q, dq, dstu, stride); - else - { - vp8_dc_only_idct_add_c (q[0]*dq[0], dstu, stride, dstu, stride); - memset(q, 0, 2 * sizeof(q[0])); - } + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + if (*eobs++ > 1) { + vp8_dequant_idct_add_c(q, dq, dstu, stride); + } else { + vp8_dc_only_idct_add_c(q[0] * dq[0], dstu, stride, dstu, stride); + memset(q, 0, 2 * sizeof(q[0])); + } - q += 16; - dstu += 4; - } - - dstu += 4*stride - 8; + q += 16; + dstu += 4; } - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_c (q, dq, dstv, stride); - else - { - vp8_dc_only_idct_add_c (q[0]*dq[0], dstv, stride, dstv, stride); - memset(q, 0, 2 * sizeof(q[0])); - } + dstu += 4 * stride - 8; + } - q += 16; - dstv += 4; - } + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + if (*eobs++ > 1) { + vp8_dequant_idct_add_c(q, dq, dstv, stride); + } else { + vp8_dc_only_idct_add_c(q[0] * dq[0], dstv, stride, dstv, stride); + memset(q, 0, 2 * sizeof(q[0])); + } - dstv += 4*stride - 8; + q += 16; + dstv += 4; } + + dstv += 4 * stride - 8; + } } diff --git a/libs/libvpx/vp8/common/idctllm.c b/libs/libvpx/vp8/common/idctllm.c index f5403c5aaf..2f5adc0b40 100644 --- a/libs/libvpx/vp8/common/idctllm.c +++ b/libs/libvpx/vp8/common/idctllm.c @@ -24,182 +24,162 @@ * x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1). **************************************************************************/ static const int cospi8sqrt2minus1 = 20091; -static const int sinpi8sqrt2 = 35468; +static const int sinpi8sqrt2 = 35468; void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, - int dst_stride) -{ - int i; - int r, c; - int a1, b1, c1, d1; - short output[16]; - short *ip = input; - short *op = output; - int temp1, temp2; - int shortpitch = 4; + int dst_stride) { + int i; + int r, c; + int a1, b1, c1, d1; + short output[16]; + short *ip = input; + short *op = output; + int temp1, temp2; + int shortpitch = 4; - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[8]; - b1 = ip[0] - ip[8]; + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[8]; + b1 = ip[0] - ip[8]; - temp1 = (ip[4] * sinpi8sqrt2) >> 16; - temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; + temp1 = (ip[4] * sinpi8sqrt2) >> 16; + temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; - temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[12] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; + temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[12] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; - op[shortpitch*0] = a1 + d1; - op[shortpitch*3] = a1 - d1; + op[shortpitch * 0] = a1 + d1; + op[shortpitch * 3] = a1 - d1; - op[shortpitch*1] = b1 + c1; - op[shortpitch*2] = b1 - c1; + op[shortpitch * 1] = b1 + c1; + op[shortpitch * 2] = b1 - c1; - ip++; - op++; - } - - ip = output; - op = output; - - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[2]; - b1 = ip[0] - ip[2]; - - temp1 = (ip[1] * sinpi8sqrt2) >> 16; - temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; - - temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[3] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; - - - op[0] = (a1 + d1 + 4) >> 3; - op[3] = (a1 - d1 + 4) >> 3; - - op[1] = (b1 + c1 + 4) >> 3; - op[2] = (b1 - c1 + 4) >> 3; - - ip += shortpitch; - op += shortpitch; - } - - ip = output; - for (r = 0; r < 4; r++) - { - for (c = 0; c < 4; c++) - { - int a = ip[c] + pred_ptr[c] ; - - if (a < 0) - a = 0; - - if (a > 255) - a = 255; - - dst_ptr[c] = (unsigned char) a ; - } - ip += 4; - dst_ptr += dst_stride; - pred_ptr += pred_stride; + ip++; + op++; + } + + ip = output; + op = output; + + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[2]; + b1 = ip[0] - ip[2]; + + temp1 = (ip[1] * sinpi8sqrt2) >> 16; + temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; + + temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[3] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; + + op[0] = (a1 + d1 + 4) >> 3; + op[3] = (a1 - d1 + 4) >> 3; + + op[1] = (b1 + c1 + 4) >> 3; + op[2] = (b1 - c1 + 4) >> 3; + + ip += shortpitch; + op += shortpitch; + } + + ip = output; + for (r = 0; r < 4; ++r) { + for (c = 0; c < 4; ++c) { + int a = ip[c] + pred_ptr[c]; + + if (a < 0) a = 0; + + if (a > 255) a = 255; + + dst_ptr[c] = (unsigned char)a; } + ip += 4; + dst_ptr += dst_stride; + pred_ptr += pred_stride; + } } void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, - int dst_stride) -{ - int a1 = ((input_dc + 4) >> 3); - int r, c; + int dst_stride) { + int a1 = ((input_dc + 4) >> 3); + int r, c; - for (r = 0; r < 4; r++) - { - for (c = 0; c < 4; c++) - { - int a = a1 + pred_ptr[c] ; + for (r = 0; r < 4; ++r) { + for (c = 0; c < 4; ++c) { + int a = a1 + pred_ptr[c]; - if (a < 0) - a = 0; + if (a < 0) a = 0; - if (a > 255) - a = 255; + if (a > 255) a = 255; - dst_ptr[c] = (unsigned char) a ; - } - - dst_ptr += dst_stride; - pred_ptr += pred_stride; + dst_ptr[c] = (unsigned char)a; } + dst_ptr += dst_stride; + pred_ptr += pred_stride; + } } -void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) -{ - short output[16]; - int i; - int a1, b1, c1, d1; - int a2, b2, c2, d2; - short *ip = input; - short *op = output; +void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff) { + short output[16]; + int i; + int a1, b1, c1, d1; + int a2, b2, c2, d2; + short *ip = input; + short *op = output; - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[12]; - b1 = ip[4] + ip[8]; - c1 = ip[4] - ip[8]; - d1 = ip[0] - ip[12]; + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[12]; + b1 = ip[4] + ip[8]; + c1 = ip[4] - ip[8]; + d1 = ip[0] - ip[12]; - op[0] = a1 + b1; - op[4] = c1 + d1; - op[8] = a1 - b1; - op[12] = d1 - c1; - ip++; - op++; - } + op[0] = a1 + b1; + op[4] = c1 + d1; + op[8] = a1 - b1; + op[12] = d1 - c1; + ip++; + op++; + } - ip = output; - op = output; + ip = output; + op = output; - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[3]; - b1 = ip[1] + ip[2]; - c1 = ip[1] - ip[2]; - d1 = ip[0] - ip[3]; + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[3]; + b1 = ip[1] + ip[2]; + c1 = ip[1] - ip[2]; + d1 = ip[0] - ip[3]; - a2 = a1 + b1; - b2 = c1 + d1; - c2 = a1 - b1; - d2 = d1 - c1; + a2 = a1 + b1; + b2 = c1 + d1; + c2 = a1 - b1; + d2 = d1 - c1; - op[0] = (a2 + 3) >> 3; - op[1] = (b2 + 3) >> 3; - op[2] = (c2 + 3) >> 3; - op[3] = (d2 + 3) >> 3; + op[0] = (a2 + 3) >> 3; + op[1] = (b2 + 3) >> 3; + op[2] = (c2 + 3) >> 3; + op[3] = (d2 + 3) >> 3; - ip += 4; - op += 4; - } + ip += 4; + op += 4; + } - for(i = 0; i < 16; i++) - { - mb_dqcoeff[i * 16] = output[i]; - } + for (i = 0; i < 16; ++i) { + mb_dqcoeff[i * 16] = output[i]; + } } -void vp8_short_inv_walsh4x4_1_c(short *input, short *mb_dqcoeff) -{ - int i; - int a1; +void vp8_short_inv_walsh4x4_1_c(short *input, short *mb_dqcoeff) { + int i; + int a1; - a1 = ((input[0] + 3) >> 3); - for(i = 0; i < 16; i++) - { - mb_dqcoeff[i * 16] = a1; - } + a1 = ((input[0] + 3) >> 3); + for (i = 0; i < 16; ++i) { + mb_dqcoeff[i * 16] = a1; + } } diff --git a/libs/libvpx/vp8/common/invtrans.h b/libs/libvpx/vp8/common/invtrans.h index 9cfea8d513..c7af32fb67 100644 --- a/libs/libvpx/vp8/common/invtrans.h +++ b/libs/libvpx/vp8/common/invtrans.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_INVTRANS_H_ #define VP8_COMMON_INVTRANS_H_ @@ -25,43 +24,31 @@ extern "C" { #endif -static void eob_adjust(char *eobs, short *diff) -{ - /* eob adjust.... the idct can only skip if both the dc and eob are zero */ - int js; - for(js = 0; js < 16; js++) - { - if((eobs[js] == 0) && (diff[0] != 0)) - eobs[js]++; - diff+=16; - } +static void eob_adjust(char *eobs, short *diff) { + /* eob adjust.... the idct can only skip if both the dc and eob are zero */ + int js; + for (js = 0; js < 16; ++js) { + if ((eobs[js] == 0) && (diff[0] != 0)) eobs[js]++; + diff += 16; + } } -static INLINE void vp8_inverse_transform_mby(MACROBLOCKD *xd) -{ - short *DQC = xd->dequant_y1; +static INLINE void vp8_inverse_transform_mby(MACROBLOCKD *xd) { + short *DQC = xd->dequant_y1; - if (xd->mode_info_context->mbmi.mode != SPLITMV) - { - /* do 2nd order transform on the dc block */ - if (xd->eobs[24] > 1) - { - vp8_short_inv_walsh4x4 - (&xd->block[24].dqcoeff[0], xd->qcoeff); - } - else - { - vp8_short_inv_walsh4x4_1 - (&xd->block[24].dqcoeff[0], xd->qcoeff); - } - eob_adjust(xd->eobs, xd->qcoeff); - - DQC = xd->dequant_y1_dc; + if (xd->mode_info_context->mbmi.mode != SPLITMV) { + /* do 2nd order transform on the dc block */ + if (xd->eobs[24] > 1) { + vp8_short_inv_walsh4x4(&xd->block[24].dqcoeff[0], xd->qcoeff); + } else { + vp8_short_inv_walsh4x4_1(&xd->block[24].dqcoeff[0], xd->qcoeff); } - vp8_dequant_idct_add_y_block - (xd->qcoeff, DQC, - xd->dst.y_buffer, - xd->dst.y_stride, xd->eobs); + eob_adjust(xd->eobs, xd->qcoeff); + + DQC = xd->dequant_y1_dc; + } + vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer, + xd->dst.y_stride, xd->eobs); } #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/loopfilter.h b/libs/libvpx/vp8/common/loopfilter.h index 20a6bd375b..7484563e06 100644 --- a/libs/libvpx/vp8/common/loopfilter.h +++ b/libs/libvpx/vp8/common/loopfilter.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_LOOPFILTER_H_ #define VP8_COMMON_LOOPFILTER_H_ @@ -20,16 +19,12 @@ extern "C" { #endif -#define MAX_LOOP_FILTER 63 +#define MAX_LOOP_FILTER 63 /* fraction of total macroblock rows to be used in fast filter level picking */ /* has to be > 2 */ -#define PARTIAL_FRAME_FRACTION 8 +#define PARTIAL_FRAME_FRACTION 8 -typedef enum -{ - NORMAL_LOOPFILTER = 0, - SIMPLE_LOOPFILTER = 1 -} LOOPFILTERTYPE; +typedef enum { NORMAL_LOOPFILTER = 0, SIMPLE_LOOPFILTER = 1 } LOOPFILTERTYPE; #if ARCH_ARM #define SIMD_WIDTH 1 @@ -40,35 +35,32 @@ typedef enum /* Need to align this structure so when it is declared and * passed it can be loaded into vector registers. */ -typedef struct -{ - DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); - DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, blim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); - DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); - DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, hev_thr[4][SIMD_WIDTH]); - unsigned char lvl[4][4][4]; - unsigned char hev_thr_lut[2][MAX_LOOP_FILTER + 1]; - unsigned char mode_lf_lut[10]; +typedef struct { + DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, + mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); + DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, + blim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); + DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, + lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]); + DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, hev_thr[4][SIMD_WIDTH]); + unsigned char lvl[4][4][4]; + unsigned char hev_thr_lut[2][MAX_LOOP_FILTER + 1]; + unsigned char mode_lf_lut[10]; } loop_filter_info_n; -typedef struct loop_filter_info -{ - const unsigned char * mblim; - const unsigned char * blim; - const unsigned char * lim; - const unsigned char * hev_thr; +typedef struct loop_filter_info { + const unsigned char *mblim; + const unsigned char *blim; + const unsigned char *lim; + const unsigned char *hev_thr; } loop_filter_info; - -typedef void loop_filter_uvfunction -( - unsigned char *u, /* source pointer */ - int p, /* pitch */ - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - unsigned char *v -); +typedef void loop_filter_uvfunction(unsigned char *u, /* source pointer */ + int p, /* pitch */ + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, + unsigned char *v); /* assorted loopfilter functions which get used elsewhere */ struct VP8Common; @@ -77,8 +69,7 @@ struct modeinfo; void vp8_loop_filter_init(struct VP8Common *cm); -void vp8_loop_filter_frame_init(struct VP8Common *cm, - struct macroblockd *mbd, +void vp8_loop_filter_frame_init(struct VP8Common *cm, struct macroblockd *mbd, int default_filt_lvl); void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd, @@ -88,22 +79,21 @@ void vp8_loop_filter_partial_frame(struct VP8Common *cm, struct macroblockd *mbd, int default_filt_lvl); -void vp8_loop_filter_frame_yonly(struct VP8Common *cm, - struct macroblockd *mbd, +void vp8_loop_filter_frame_yonly(struct VP8Common *cm, struct macroblockd *mbd, int default_filt_lvl); void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl); void vp8_loop_filter_row_normal(struct VP8Common *cm, - struct modeinfo *mode_info_context, - int mb_row, int post_ystride, int post_uvstride, + struct modeinfo *mode_info_context, int mb_row, + int post_ystride, int post_uvstride, unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr); void vp8_loop_filter_row_simple(struct VP8Common *cm, - struct modeinfo *mode_info_context, - int mb_row, int post_ystride, int post_uvstride, + struct modeinfo *mode_info_context, int mb_row, + int post_ystride, int post_uvstride, unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr); #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/loopfilter_filters.c b/libs/libvpx/vp8/common/loopfilter_filters.c index 1d51696ff7..1f60721e1c 100644 --- a/libs/libvpx/vp8/common/loopfilter_filters.c +++ b/libs/libvpx/vp8/common/loopfilter_filters.c @@ -8,423 +8,382 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "loopfilter.h" #include "onyxc_int.h" typedef unsigned char uc; -static signed char vp8_signed_char_clamp(int t) -{ - t = (t < -128 ? -128 : t); - t = (t > 127 ? 127 : t); - return (signed char) t; +static signed char vp8_signed_char_clamp(int t) { + t = (t < -128 ? -128 : t); + t = (t > 127 ? 127 : t); + return (signed char)t; } - /* should we apply any filter at all ( 11111111 yes, 00000000 no) */ -static signed char vp8_filter_mask(uc limit, uc blimit, - uc p3, uc p2, uc p1, uc p0, - uc q0, uc q1, uc q2, uc q3) -{ - signed char mask = 0; - mask |= (abs(p3 - p2) > limit); - mask |= (abs(p2 - p1) > limit); - mask |= (abs(p1 - p0) > limit); - mask |= (abs(q1 - q0) > limit); - mask |= (abs(q2 - q1) > limit); - mask |= (abs(q3 - q2) > limit); - mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit); - return mask - 1; +static signed char vp8_filter_mask(uc limit, uc blimit, uc p3, uc p2, uc p1, + uc p0, uc q0, uc q1, uc q2, uc q3) { + signed char mask = 0; + mask |= (abs(p3 - p2) > limit); + mask |= (abs(p2 - p1) > limit); + mask |= (abs(p1 - p0) > limit); + mask |= (abs(q1 - q0) > limit); + mask |= (abs(q2 - q1) > limit); + mask |= (abs(q3 - q2) > limit); + mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit); + return mask - 1; } /* is there high variance internal edge ( 11111111 yes, 00000000 no) */ -static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1) -{ - signed char hev = 0; - hev |= (abs(p1 - p0) > thresh) * -1; - hev |= (abs(q1 - q0) > thresh) * -1; - return hev; +static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1) { + signed char hev = 0; + hev |= (abs(p1 - p0) > thresh) * -1; + hev |= (abs(q1 - q0) > thresh) * -1; + return hev; } -static void vp8_filter(signed char mask, uc hev, uc *op1, - uc *op0, uc *oq0, uc *oq1) +static void vp8_filter(signed char mask, uc hev, uc *op1, uc *op0, uc *oq0, + uc *oq1) { + signed char ps0, qs0; + signed char ps1, qs1; + signed char filter_value, Filter1, Filter2; + signed char u; -{ - signed char ps0, qs0; - signed char ps1, qs1; - signed char filter_value, Filter1, Filter2; - signed char u; + ps1 = (signed char)*op1 ^ 0x80; + ps0 = (signed char)*op0 ^ 0x80; + qs0 = (signed char)*oq0 ^ 0x80; + qs1 = (signed char)*oq1 ^ 0x80; - ps1 = (signed char) * op1 ^ 0x80; - ps0 = (signed char) * op0 ^ 0x80; - qs0 = (signed char) * oq0 ^ 0x80; - qs1 = (signed char) * oq1 ^ 0x80; + /* add outer taps if we have high edge variance */ + filter_value = vp8_signed_char_clamp(ps1 - qs1); + filter_value &= hev; - /* add outer taps if we have high edge variance */ - filter_value = vp8_signed_char_clamp(ps1 - qs1); - filter_value &= hev; + /* inner taps */ + filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0)); + filter_value &= mask; - /* inner taps */ - filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0)); - filter_value &= mask; + /* save bottom 3 bits so that we round one side +4 and the other +3 + * if it equals 4 we'll set to adjust by -1 to account for the fact + * we'd round 3 the other way + */ + Filter1 = vp8_signed_char_clamp(filter_value + 4); + Filter2 = vp8_signed_char_clamp(filter_value + 3); + Filter1 >>= 3; + Filter2 >>= 3; + u = vp8_signed_char_clamp(qs0 - Filter1); + *oq0 = u ^ 0x80; + u = vp8_signed_char_clamp(ps0 + Filter2); + *op0 = u ^ 0x80; + filter_value = Filter1; - /* save bottom 3 bits so that we round one side +4 and the other +3 - * if it equals 4 we'll set to adjust by -1 to account for the fact - * we'd round 3 the other way - */ - Filter1 = vp8_signed_char_clamp(filter_value + 4); - Filter2 = vp8_signed_char_clamp(filter_value + 3); - Filter1 >>= 3; - Filter2 >>= 3; - u = vp8_signed_char_clamp(qs0 - Filter1); - *oq0 = u ^ 0x80; - u = vp8_signed_char_clamp(ps0 + Filter2); - *op0 = u ^ 0x80; - filter_value = Filter1; - - /* outer tap adjustments */ - filter_value += 1; - filter_value >>= 1; - filter_value &= ~hev; - - u = vp8_signed_char_clamp(qs1 - filter_value); - *oq1 = u ^ 0x80; - u = vp8_signed_char_clamp(ps1 + filter_value); - *op1 = u ^ 0x80; + /* outer tap adjustments */ + filter_value += 1; + filter_value >>= 1; + filter_value &= ~hev; + u = vp8_signed_char_clamp(qs1 - filter_value); + *oq1 = u ^ 0x80; + u = vp8_signed_char_clamp(ps1 + filter_value); + *op1 = u ^ 0x80; } -void vp8_loop_filter_horizontal_edge_c -( - unsigned char *s, - int p, /* pitch */ - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - int count -) -{ - int hev = 0; /* high edge variance */ - signed char mask = 0; - int i = 0; +void vp8_loop_filter_horizontal_edge_c(unsigned char *s, int p, /* pitch */ + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, int count) { + int hev = 0; /* high edge variance */ + signed char mask = 0; + int i = 0; - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - do - { - mask = vp8_filter_mask(limit[0], blimit[0], - s[-4*p], s[-3*p], s[-2*p], s[-1*p], - s[0*p], s[1*p], s[2*p], s[3*p]); + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + do { + mask = vp8_filter_mask(limit[0], blimit[0], s[-4 * p], s[-3 * p], s[-2 * p], + s[-1 * p], s[0 * p], s[1 * p], s[2 * p], s[3 * p]); - hev = vp8_hevmask(thresh[0], s[-2*p], s[-1*p], s[0*p], s[1*p]); + hev = vp8_hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]); - vp8_filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p); + vp8_filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p); - ++s; - } - while (++i < count * 8); + ++s; + } while (++i < count * 8); } -void vp8_loop_filter_vertical_edge_c -( - unsigned char *s, - int p, - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - int count -) -{ - int hev = 0; /* high edge variance */ - signed char mask = 0; - int i = 0; +void vp8_loop_filter_vertical_edge_c(unsigned char *s, int p, + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, int count) { + int hev = 0; /* high edge variance */ + signed char mask = 0; + int i = 0; - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - do - { - mask = vp8_filter_mask(limit[0], blimit[0], - s[-4], s[-3], s[-2], s[-1], s[0], s[1], s[2], s[3]); + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + do { + mask = vp8_filter_mask(limit[0], blimit[0], s[-4], s[-3], s[-2], s[-1], + s[0], s[1], s[2], s[3]); - hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]); + hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]); - vp8_filter(mask, hev, s - 2, s - 1, s, s + 1); + vp8_filter(mask, hev, s - 2, s - 1, s, s + 1); - s += p; - } - while (++i < count * 8); + s += p; + } while (++i < count * 8); } -static void vp8_mbfilter(signed char mask, uc hev, - uc *op2, uc *op1, uc *op0, uc *oq0, uc *oq1, uc *oq2) -{ - signed char s, u; - signed char filter_value, Filter1, Filter2; - signed char ps2 = (signed char) * op2 ^ 0x80; - signed char ps1 = (signed char) * op1 ^ 0x80; - signed char ps0 = (signed char) * op0 ^ 0x80; - signed char qs0 = (signed char) * oq0 ^ 0x80; - signed char qs1 = (signed char) * oq1 ^ 0x80; - signed char qs2 = (signed char) * oq2 ^ 0x80; +static void vp8_mbfilter(signed char mask, uc hev, uc *op2, uc *op1, uc *op0, + uc *oq0, uc *oq1, uc *oq2) { + signed char s, u; + signed char filter_value, Filter1, Filter2; + signed char ps2 = (signed char)*op2 ^ 0x80; + signed char ps1 = (signed char)*op1 ^ 0x80; + signed char ps0 = (signed char)*op0 ^ 0x80; + signed char qs0 = (signed char)*oq0 ^ 0x80; + signed char qs1 = (signed char)*oq1 ^ 0x80; + signed char qs2 = (signed char)*oq2 ^ 0x80; - /* add outer taps if we have high edge variance */ - filter_value = vp8_signed_char_clamp(ps1 - qs1); - filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0)); - filter_value &= mask; + /* add outer taps if we have high edge variance */ + filter_value = vp8_signed_char_clamp(ps1 - qs1); + filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0)); + filter_value &= mask; - Filter2 = filter_value; - Filter2 &= hev; + Filter2 = filter_value; + Filter2 &= hev; - /* save bottom 3 bits so that we round one side +4 and the other +3 */ - Filter1 = vp8_signed_char_clamp(Filter2 + 4); - Filter2 = vp8_signed_char_clamp(Filter2 + 3); - Filter1 >>= 3; - Filter2 >>= 3; - qs0 = vp8_signed_char_clamp(qs0 - Filter1); - ps0 = vp8_signed_char_clamp(ps0 + Filter2); + /* save bottom 3 bits so that we round one side +4 and the other +3 */ + Filter1 = vp8_signed_char_clamp(Filter2 + 4); + Filter2 = vp8_signed_char_clamp(Filter2 + 3); + Filter1 >>= 3; + Filter2 >>= 3; + qs0 = vp8_signed_char_clamp(qs0 - Filter1); + ps0 = vp8_signed_char_clamp(ps0 + Filter2); + /* only apply wider filter if not high edge variance */ + filter_value &= ~hev; + Filter2 = filter_value; - /* only apply wider filter if not high edge variance */ - filter_value &= ~hev; - Filter2 = filter_value; + /* roughly 3/7th difference across boundary */ + u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7); + s = vp8_signed_char_clamp(qs0 - u); + *oq0 = s ^ 0x80; + s = vp8_signed_char_clamp(ps0 + u); + *op0 = s ^ 0x80; - /* roughly 3/7th difference across boundary */ - u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7); - s = vp8_signed_char_clamp(qs0 - u); - *oq0 = s ^ 0x80; - s = vp8_signed_char_clamp(ps0 + u); - *op0 = s ^ 0x80; + /* roughly 2/7th difference across boundary */ + u = vp8_signed_char_clamp((63 + Filter2 * 18) >> 7); + s = vp8_signed_char_clamp(qs1 - u); + *oq1 = s ^ 0x80; + s = vp8_signed_char_clamp(ps1 + u); + *op1 = s ^ 0x80; - /* roughly 2/7th difference across boundary */ - u = vp8_signed_char_clamp((63 + Filter2 * 18) >> 7); - s = vp8_signed_char_clamp(qs1 - u); - *oq1 = s ^ 0x80; - s = vp8_signed_char_clamp(ps1 + u); - *op1 = s ^ 0x80; - - /* roughly 1/7th difference across boundary */ - u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); - s = vp8_signed_char_clamp(qs2 - u); - *oq2 = s ^ 0x80; - s = vp8_signed_char_clamp(ps2 + u); - *op2 = s ^ 0x80; + /* roughly 1/7th difference across boundary */ + u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); + s = vp8_signed_char_clamp(qs2 - u); + *oq2 = s ^ 0x80; + s = vp8_signed_char_clamp(ps2 + u); + *op2 = s ^ 0x80; } -void vp8_mbloop_filter_horizontal_edge_c -( - unsigned char *s, - int p, - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - int count -) -{ - signed char hev = 0; /* high edge variance */ - signed char mask = 0; - int i = 0; +void vp8_mbloop_filter_horizontal_edge_c(unsigned char *s, int p, + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, + int count) { + signed char hev = 0; /* high edge variance */ + signed char mask = 0; + int i = 0; - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - do - { + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + do { + mask = vp8_filter_mask(limit[0], blimit[0], s[-4 * p], s[-3 * p], s[-2 * p], + s[-1 * p], s[0 * p], s[1 * p], s[2 * p], s[3 * p]); - mask = vp8_filter_mask(limit[0], blimit[0], - s[-4*p], s[-3*p], s[-2*p], s[-1*p], - s[0*p], s[1*p], s[2*p], s[3*p]); + hev = vp8_hevmask(thresh[0], s[-2 * p], s[-1 * p], s[0 * p], s[1 * p]); - hev = vp8_hevmask(thresh[0], s[-2*p], s[-1*p], s[0*p], s[1*p]); - - vp8_mbfilter(mask, hev, s - 3 * p, s - 2 * p, s - 1 * p, s, s + 1 * p, s + 2 * p); - - ++s; - } - while (++i < count * 8); + vp8_mbfilter(mask, hev, s - 3 * p, s - 2 * p, s - 1 * p, s, s + 1 * p, + s + 2 * p); + ++s; + } while (++i < count * 8); } +void vp8_mbloop_filter_vertical_edge_c(unsigned char *s, int p, + const unsigned char *blimit, + const unsigned char *limit, + const unsigned char *thresh, int count) { + signed char hev = 0; /* high edge variance */ + signed char mask = 0; + int i = 0; -void vp8_mbloop_filter_vertical_edge_c -( - unsigned char *s, - int p, - const unsigned char *blimit, - const unsigned char *limit, - const unsigned char *thresh, - int count -) -{ - signed char hev = 0; /* high edge variance */ - signed char mask = 0; - int i = 0; + do { + mask = vp8_filter_mask(limit[0], blimit[0], s[-4], s[-3], s[-2], s[-1], + s[0], s[1], s[2], s[3]); - do - { + hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]); - mask = vp8_filter_mask(limit[0], blimit[0], - s[-4], s[-3], s[-2], s[-1], s[0], s[1], s[2], s[3]); - - hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]); - - vp8_mbfilter(mask, hev, s - 3, s - 2, s - 1, s, s + 1, s + 2); - - s += p; - } - while (++i < count * 8); + vp8_mbfilter(mask, hev, s - 3, s - 2, s - 1, s, s + 1, s + 2); + s += p; + } while (++i < count * 8); } /* should we apply any filter at all ( 11111111 yes, 00000000 no) */ -static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1) -{ -/* Why does this cause problems for win32? - * error C2143: syntax error : missing ';' before 'type' - * (void) limit; - */ - signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1; - return mask; +static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, + uc q1) { + /* Why does this cause problems for win32? + * error C2143: syntax error : missing ';' before 'type' + * (void) limit; + */ + signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1; + return mask; } -static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0, uc *oq1) -{ - signed char filter_value, Filter1, Filter2; - signed char p1 = (signed char) * op1 ^ 0x80; - signed char p0 = (signed char) * op0 ^ 0x80; - signed char q0 = (signed char) * oq0 ^ 0x80; - signed char q1 = (signed char) * oq1 ^ 0x80; - signed char u; +static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0, + uc *oq1) { + signed char filter_value, Filter1, Filter2; + signed char p1 = (signed char)*op1 ^ 0x80; + signed char p0 = (signed char)*op0 ^ 0x80; + signed char q0 = (signed char)*oq0 ^ 0x80; + signed char q1 = (signed char)*oq1 ^ 0x80; + signed char u; - filter_value = vp8_signed_char_clamp(p1 - q1); - filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0)); - filter_value &= mask; + filter_value = vp8_signed_char_clamp(p1 - q1); + filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0)); + filter_value &= mask; - /* save bottom 3 bits so that we round one side +4 and the other +3 */ - Filter1 = vp8_signed_char_clamp(filter_value + 4); - Filter1 >>= 3; - u = vp8_signed_char_clamp(q0 - Filter1); - *oq0 = u ^ 0x80; + /* save bottom 3 bits so that we round one side +4 and the other +3 */ + Filter1 = vp8_signed_char_clamp(filter_value + 4); + Filter1 >>= 3; + u = vp8_signed_char_clamp(q0 - Filter1); + *oq0 = u ^ 0x80; - Filter2 = vp8_signed_char_clamp(filter_value + 3); - Filter2 >>= 3; - u = vp8_signed_char_clamp(p0 + Filter2); - *op0 = u ^ 0x80; + Filter2 = vp8_signed_char_clamp(filter_value + 3); + Filter2 >>= 3; + u = vp8_signed_char_clamp(p0 + Filter2); + *op0 = u ^ 0x80; } -void vp8_loop_filter_simple_horizontal_edge_c -( - unsigned char *s, - int p, - const unsigned char *blimit -) -{ - signed char mask = 0; - int i = 0; +void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *s, int p, + const unsigned char *blimit) { + signed char mask = 0; + int i = 0; - do - { - mask = vp8_simple_filter_mask(blimit[0], s[-2*p], s[-1*p], s[0*p], s[1*p]); - vp8_simple_filter(mask, s - 2 * p, s - 1 * p, s, s + 1 * p); - ++s; - } - while (++i < 16); + do { + mask = vp8_simple_filter_mask(blimit[0], s[-2 * p], s[-1 * p], s[0 * p], + s[1 * p]); + vp8_simple_filter(mask, s - 2 * p, s - 1 * p, s, s + 1 * p); + ++s; + } while (++i < 16); } -void vp8_loop_filter_simple_vertical_edge_c -( - unsigned char *s, - int p, - const unsigned char *blimit -) -{ - signed char mask = 0; - int i = 0; - - do - { - mask = vp8_simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]); - vp8_simple_filter(mask, s - 2, s - 1, s, s + 1); - s += p; - } - while (++i < 16); +void vp8_loop_filter_simple_vertical_edge_c(unsigned char *s, int p, + const unsigned char *blimit) { + signed char mask = 0; + int i = 0; + do { + mask = vp8_simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]); + vp8_simple_filter(mask, s - 2, s - 1, s, s + 1); + s += p; + } while (++i < 16); } /* Horizontal MB filtering */ void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, - loop_filter_info *lfi) -{ - vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); + loop_filter_info *lfi) { + vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } - if (v_ptr) - vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } } /* Vertical MB Filtering */ void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, - loop_filter_info *lfi) -{ - vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); + loop_filter_info *lfi) { + vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } - if (v_ptr) - vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } } /* Horizontal B Filtering */ void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, - loop_filter_info *lfi) -{ - vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); + loop_filter_info *lfi) { + vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, + lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, + lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, + lfi->lim, lfi->hev_thr, 2); - if (u_ptr) - vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, + lfi->blim, lfi->lim, lfi->hev_thr, 1); + } - if (v_ptr) - vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, + lfi->blim, lfi->lim, lfi->hev_thr, 1); + } } void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride, - const unsigned char *blimit) -{ - vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit); + const unsigned char *blimit) { + vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, + blimit); } /* Vertical B Filtering */ void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, int y_stride, int uv_stride, - loop_filter_info *lfi) -{ - vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); + loop_filter_info *lfi) { + vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); + vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); + vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 1); + } - if (v_ptr) - vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 1); + } } void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride, - const unsigned char *blimit) -{ - vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit); + const unsigned char *blimit) { + vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit); } diff --git a/libs/libvpx/vp8/common/mbpitch.c b/libs/libvpx/vp8/common/mbpitch.c index 32e1b66409..188b57f389 100644 --- a/libs/libvpx/vp8/common/mbpitch.c +++ b/libs/libvpx/vp8/common/mbpitch.c @@ -8,61 +8,50 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "blockd.h" -void vp8_setup_block_dptrs(MACROBLOCKD *x) -{ - int r, c; +void vp8_setup_block_dptrs(MACROBLOCKD *x) { + int r, c; - for (r = 0; r < 4; r++) - { - for (c = 0; c < 4; c++) - { - x->block[r*4+c].predictor = x->predictor + r * 4 * 16 + c * 4; - } + for (r = 0; r < 4; ++r) { + for (c = 0; c < 4; ++c) { + x->block[r * 4 + c].predictor = x->predictor + r * 4 * 16 + c * 4; } + } - for (r = 0; r < 2; r++) - { - for (c = 0; c < 2; c++) - { - x->block[16+r*2+c].predictor = x->predictor + 256 + r * 4 * 8 + c * 4; - - } + for (r = 0; r < 2; ++r) { + for (c = 0; c < 2; ++c) { + x->block[16 + r * 2 + c].predictor = + x->predictor + 256 + r * 4 * 8 + c * 4; } + } - for (r = 0; r < 2; r++) - { - for (c = 0; c < 2; c++) - { - x->block[20+r*2+c].predictor = x->predictor + 320 + r * 4 * 8 + c * 4; - - } + for (r = 0; r < 2; ++r) { + for (c = 0; c < 2; ++c) { + x->block[20 + r * 2 + c].predictor = + x->predictor + 320 + r * 4 * 8 + c * 4; } + } - for (r = 0; r < 25; r++) - { - x->block[r].qcoeff = x->qcoeff + r * 16; - x->block[r].dqcoeff = x->dqcoeff + r * 16; - x->block[r].eob = x->eobs + r; - } + for (r = 0; r < 25; ++r) { + x->block[r].qcoeff = x->qcoeff + r * 16; + x->block[r].dqcoeff = x->dqcoeff + r * 16; + x->block[r].eob = x->eobs + r; + } } -void vp8_build_block_doffsets(MACROBLOCKD *x) -{ - int block; +void vp8_build_block_doffsets(MACROBLOCKD *x) { + int block; - for (block = 0; block < 16; block++) /* y blocks */ - { - x->block[block].offset = - (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4; - } + for (block = 0; block < 16; ++block) /* y blocks */ + { + x->block[block].offset = + (block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4; + } - for (block = 16; block < 20; block++) /* U and V blocks */ - { - x->block[block+4].offset = - x->block[block].offset = - ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4; - } + for (block = 16; block < 20; ++block) /* U and V blocks */ + { + x->block[block + 4].offset = x->block[block].offset = + ((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4; + } } diff --git a/libs/libvpx/vp8/common/mfqe.c b/libs/libvpx/vp8/common/mfqe.c index 2bfefb126d..5aace8c99d 100644 --- a/libs/libvpx/vp8/common/mfqe.c +++ b/libs/libvpx/vp8/common/mfqe.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /* MFQE: Multiframe Quality Enhancement * In rate limited situations keyframes may cause significant visual artifacts * commonly referred to as "popping." This file implements a postproccesing @@ -28,359 +27,304 @@ #include static void filter_by_weight(unsigned char *src, int src_stride, - unsigned char *dst, int dst_stride, - int block_size, int src_weight) -{ - int dst_weight = (1 << MFQE_PRECISION) - src_weight; - int rounding_bit = 1 << (MFQE_PRECISION - 1); - int r, c; + unsigned char *dst, int dst_stride, int block_size, + int src_weight) { + int dst_weight = (1 << MFQE_PRECISION) - src_weight; + int rounding_bit = 1 << (MFQE_PRECISION - 1); + int r, c; - for (r = 0; r < block_size; r++) - { - for (c = 0; c < block_size; c++) - { - dst[c] = (src[c] * src_weight + - dst[c] * dst_weight + - rounding_bit) >> MFQE_PRECISION; - } - src += src_stride; - dst += dst_stride; + for (r = 0; r < block_size; ++r) { + for (c = 0; c < block_size; ++c) { + dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >> + MFQE_PRECISION; } + src += src_stride; + dst += dst_stride; + } } void vp8_filter_by_weight16x16_c(unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, - int src_weight) -{ - filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight); + int src_weight) { + filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight); } void vp8_filter_by_weight8x8_c(unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, - int src_weight) -{ - filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight); + int src_weight) { + filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight); } void vp8_filter_by_weight4x4_c(unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, - int src_weight) -{ - filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight); + int src_weight) { + filter_by_weight(src, src_stride, dst, dst_stride, 4, src_weight); } -static void apply_ifactor(unsigned char *y_src, - int y_src_stride, - unsigned char *y_dst, - int y_dst_stride, - unsigned char *u_src, - unsigned char *v_src, - int uv_src_stride, - unsigned char *u_dst, - unsigned char *v_dst, - int uv_dst_stride, - int block_size, - int src_weight) -{ - if (block_size == 16) - { - vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride, src_weight); - vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride, src_weight); - vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride, src_weight); - } - else /* if (block_size == 8) */ - { - vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride, src_weight); - vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride, src_weight); - vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride, src_weight); - } +static void apply_ifactor(unsigned char *y_src, int y_src_stride, + unsigned char *y_dst, int y_dst_stride, + unsigned char *u_src, unsigned char *v_src, + int uv_src_stride, unsigned char *u_dst, + unsigned char *v_dst, int uv_dst_stride, + int block_size, int src_weight) { + if (block_size == 16) { + vp8_filter_by_weight16x16(y_src, y_src_stride, y_dst, y_dst_stride, + src_weight); + vp8_filter_by_weight8x8(u_src, uv_src_stride, u_dst, uv_dst_stride, + src_weight); + vp8_filter_by_weight8x8(v_src, uv_src_stride, v_dst, uv_dst_stride, + src_weight); + } else /* if (block_size == 8) */ + { + vp8_filter_by_weight8x8(y_src, y_src_stride, y_dst, y_dst_stride, + src_weight); + vp8_filter_by_weight4x4(u_src, uv_src_stride, u_dst, uv_dst_stride, + src_weight); + vp8_filter_by_weight4x4(v_src, uv_src_stride, v_dst, uv_dst_stride, + src_weight); + } } -static unsigned int int_sqrt(unsigned int x) -{ - unsigned int y = x; - unsigned int guess; - int p = 1; - while (y>>=1) p++; - p>>=1; +static unsigned int int_sqrt(unsigned int x) { + unsigned int y = x; + unsigned int guess; + int p = 1; + while (y >>= 1) p++; + p >>= 1; - guess=0; - while (p>=0) - { - guess |= (1<= 0) { + guess |= (1 << p); + if (x < guess * guess) guess -= (1 << p); + p--; + } + /* choose between guess or guess+1 */ + return guess + (guess * guess + guess + 1 <= x); } #define USE_SSD -static void multiframe_quality_enhance_block -( +static void multiframe_quality_enhance_block( int blksize, /* Currently only values supported are 16, 8 */ - int qcurr, - int qprev, - unsigned char *y, - unsigned char *u, - unsigned char *v, - int y_stride, - int uv_stride, - unsigned char *yd, - unsigned char *ud, - unsigned char *vd, - int yd_stride, - int uvd_stride -) -{ - static const unsigned char VP8_ZEROS[16]= + int qcurr, int qprev, unsigned char *y, unsigned char *u, unsigned char *v, + int y_stride, int uv_stride, unsigned char *yd, unsigned char *ud, + unsigned char *vd, int yd_stride, int uvd_stride) { + static const unsigned char VP8_ZEROS[16] = { 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; + int uvblksize = blksize >> 1; + int qdiff = qcurr - qprev; + + int i; + unsigned char *up; + unsigned char *udp; + unsigned char *vp; + unsigned char *vdp; + + unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; + + if (blksize == 16) { + actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse) + 128) >> 8; + act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse) + 128) >> 8; +#ifdef USE_SSD + vpx_variance16x16(y, y_stride, yd, yd_stride, &sse); + sad = (sse + 128) >> 8; + vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse); + usad = (sse + 32) >> 6; + vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse); + vsad = (sse + 32) >> 6; +#else + sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; + usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; + vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride) + 32) >> 6; +#endif + } else /* if (blksize == 8) */ + { + actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse) + 32) >> 6; + act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse) + 32) >> 6; +#ifdef USE_SSD + vpx_variance8x8(y, y_stride, yd, yd_stride, &sse); + sad = (sse + 32) >> 6; + vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse); + usad = (sse + 8) >> 4; + vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse); + vsad = (sse + 8) >> 4; +#else + sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; + usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; + vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; +#endif + } + + actrisk = (actd > act * 5); + + /* thr = qdiff/16 + log2(act) + log4(qprev) */ + thr = (qdiff >> 4); + while (actd >>= 1) thr++; + while (qprev >>= 2) thr++; + +#ifdef USE_SSD + thrsq = thr * thr; + if (sad < thrsq && + /* additional checks for color mismatch and excessive addition of + * high-frequencies */ + 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk) +#else + if (sad < thr && + /* additional checks for color mismatch and excessive addition of + * high-frequencies */ + 2 * usad < thr && 2 * vsad < thr && !actrisk) +#endif + { + int ifactor; +#ifdef USE_SSD + /* TODO: optimize this later to not need sqr root */ + sad = int_sqrt(sad); +#endif + ifactor = (sad << MFQE_PRECISION) / thr; + ifactor >>= (qdiff >> 5); + + if (ifactor) { + apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd, + uvd_stride, blksize, ifactor); + } + } else /* else implicitly copy from previous frame */ + { + if (blksize == 16) { + vp8_copy_mem16x16(y, y_stride, yd, yd_stride); + vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride); + vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride); + } else /* if (blksize == 8) */ { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + vp8_copy_mem8x8(y, y_stride, yd, yd_stride); + for (up = u, udp = ud, i = 0; i < uvblksize; + ++i, up += uv_stride, udp += uvd_stride) { + memcpy(udp, up, uvblksize); + } + for (vp = v, vdp = vd, i = 0; i < uvblksize; + ++i, vp += uv_stride, vdp += uvd_stride) { + memcpy(vdp, vp, uvblksize); + } + } + } +} + +static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) { + if (mode_info_context->mbmi.mb_skip_coeff) { + map[0] = map[1] = map[2] = map[3] = 1; + } else if (mode_info_context->mbmi.mode == SPLITMV) { + static int ndx[4][4] = { + { 0, 1, 4, 5 }, { 2, 3, 6, 7 }, { 8, 9, 12, 13 }, { 10, 11, 14, 15 } }; - int uvblksize = blksize >> 1; - int qdiff = qcurr - qprev; - - int i; - unsigned char *up; - unsigned char *udp; - unsigned char *vp; - unsigned char *vdp; - - unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk; - - if (blksize == 16) - { - actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8; - act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8; -#ifdef USE_SSD - vpx_variance16x16(y, y_stride, yd, yd_stride, &sse); - sad = (sse + 128)>>8; - vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse); - usad = (sse + 32)>>6; - vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse); - vsad = (sse + 32)>>6; -#else - sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8; - usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6; - vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride)+ 32) >> 6; -#endif - } - else /* if (blksize == 8) */ - { - actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6; - act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6; -#ifdef USE_SSD - vpx_variance8x8(y, y_stride, yd, yd_stride, &sse); - sad = (sse + 32)>>6; - vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse); - usad = (sse + 8)>>4; - vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse); - vsad = (sse + 8)>>4; -#else - sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6; - usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4; - vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4; -#endif - } - - actrisk = (actd > act * 5); - - /* thr = qdiff/16 + log2(act) + log4(qprev) */ - thr = (qdiff >> 4); - while (actd >>= 1) thr++; - while (qprev >>= 2) thr++; - -#ifdef USE_SSD - thrsq = thr * thr; - if (sad < thrsq && - /* additional checks for color mismatch and excessive addition of - * high-frequencies */ - 4 * usad < thrsq && 4 * vsad < thrsq && !actrisk) -#else - if (sad < thr && - /* additional checks for color mismatch and excessive addition of - * high-frequencies */ - 2 * usad < thr && 2 * vsad < thr && !actrisk) -#endif - { - int ifactor; -#ifdef USE_SSD - /* TODO: optimize this later to not need sqr root */ - sad = int_sqrt(sad); -#endif - ifactor = (sad << MFQE_PRECISION) / thr; - ifactor >>= (qdiff >> 5); - - if (ifactor) - { - apply_ifactor(y, y_stride, yd, yd_stride, - u, v, uv_stride, - ud, vd, uvd_stride, - blksize, ifactor); - } - } - else /* else implicitly copy from previous frame */ - { - if (blksize == 16) - { - vp8_copy_mem16x16(y, y_stride, yd, yd_stride); - vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride); - vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride); - } - else /* if (blksize == 8) */ - { - vp8_copy_mem8x8(y, y_stride, yd, yd_stride); - for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, udp += uvd_stride) - memcpy(udp, up, uvblksize); - for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, vdp += uvd_stride) - memcpy(vdp, vp, uvblksize); - } + int i, j; + for (i = 0; i < 4; ++i) { + map[i] = 1; + for (j = 0; j < 4 && map[j]; ++j) { + map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 && + mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2); + } } + } else { + map[0] = map[1] = map[2] = map[3] = + (mode_info_context->mbmi.mode > B_PRED && + abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 && + abs(mode_info_context->mbmi.mv.as_mv.col) <= 2); + } + return (map[0] + map[1] + map[2] + map[3]); } -static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map) -{ - if (mode_info_context->mbmi.mb_skip_coeff) - map[0] = map[1] = map[2] = map[3] = 1; - else if (mode_info_context->mbmi.mode==SPLITMV) - { - static int ndx[4][4] = - { - {0, 1, 4, 5}, - {2, 3, 6, 7}, - {8, 9, 12, 13}, - {10, 11, 14, 15} - }; - int i, j; - for (i=0; i<4; ++i) - { - map[i] = 1; - for (j=0; j<4 && map[j]; ++j) - map[i] &= (mode_info_context->bmi[ndx[i][j]].mv.as_mv.row <= 2 && - mode_info_context->bmi[ndx[i][j]].mv.as_mv.col <= 2); - } - } - else - { - map[0] = map[1] = map[2] = map[3] = - (mode_info_context->mbmi.mode > B_PRED && - abs(mode_info_context->mbmi.mv.as_mv.row) <= 2 && - abs(mode_info_context->mbmi.mv.as_mv.col) <= 2); - } - return (map[0]+map[1]+map[2]+map[3]); -} +void vp8_multiframe_quality_enhance(VP8_COMMON *cm) { + YV12_BUFFER_CONFIG *show = cm->frame_to_show; + YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer; -void vp8_multiframe_quality_enhance -( - VP8_COMMON *cm -) -{ - YV12_BUFFER_CONFIG *show = cm->frame_to_show; - YV12_BUFFER_CONFIG *dest = &cm->post_proc_buffer; + FRAME_TYPE frame_type = cm->frame_type; + /* Point at base of Mb MODE_INFO list has motion vectors etc */ + const MODE_INFO *mode_info_context = cm->show_frame_mi; + int mb_row; + int mb_col; + int totmap, map[4]; + int qcurr = cm->base_qindex; + int qprev = cm->postproc_state.last_base_qindex; - FRAME_TYPE frame_type = cm->frame_type; - /* Point at base of Mb MODE_INFO list has motion vectors etc */ - const MODE_INFO *mode_info_context = cm->show_frame_mi; - int mb_row; - int mb_col; - int totmap, map[4]; - int qcurr = cm->base_qindex; - int qprev = cm->postproc_state.last_base_qindex; + unsigned char *y_ptr, *u_ptr, *v_ptr; + unsigned char *yd_ptr, *ud_ptr, *vd_ptr; - unsigned char *y_ptr, *u_ptr, *v_ptr; - unsigned char *yd_ptr, *ud_ptr, *vd_ptr; + /* Set up the buffer pointers */ + y_ptr = show->y_buffer; + u_ptr = show->u_buffer; + v_ptr = show->v_buffer; + yd_ptr = dest->y_buffer; + ud_ptr = dest->u_buffer; + vd_ptr = dest->v_buffer; - /* Set up the buffer pointers */ - y_ptr = show->y_buffer; - u_ptr = show->u_buffer; - v_ptr = show->v_buffer; - yd_ptr = dest->y_buffer; - ud_ptr = dest->u_buffer; - vd_ptr = dest->v_buffer; - - /* postprocess each macro block */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - /* if motion is high there will likely be no benefit */ - if (frame_type == INTER_FRAME) totmap = qualify_inter_mb(mode_info_context, map); - else totmap = (frame_type == KEY_FRAME ? 4 : 0); - if (totmap) - { - if (totmap < 4) - { - int i, j; - for (i=0; i<2; ++i) - for (j=0; j<2; ++j) - { - if (map[i*2+j]) - { - multiframe_quality_enhance_block(8, qcurr, qprev, - y_ptr + 8*(i*show->y_stride+j), - u_ptr + 4*(i*show->uv_stride+j), - v_ptr + 4*(i*show->uv_stride+j), - show->y_stride, - show->uv_stride, - yd_ptr + 8*(i*dest->y_stride+j), - ud_ptr + 4*(i*dest->uv_stride+j), - vd_ptr + 4*(i*dest->uv_stride+j), - dest->y_stride, - dest->uv_stride); - } - else - { - /* copy a 8x8 block */ - int k; - unsigned char *up = u_ptr + 4*(i*show->uv_stride+j); - unsigned char *udp = ud_ptr + 4*(i*dest->uv_stride+j); - unsigned char *vp = v_ptr + 4*(i*show->uv_stride+j); - unsigned char *vdp = vd_ptr + 4*(i*dest->uv_stride+j); - vp8_copy_mem8x8(y_ptr + 8*(i*show->y_stride+j), show->y_stride, - yd_ptr + 8*(i*dest->y_stride+j), dest->y_stride); - for (k = 0; k < 4; ++k, up += show->uv_stride, udp += dest->uv_stride, - vp += show->uv_stride, vdp += dest->uv_stride) - { - memcpy(udp, up, 4); - memcpy(vdp, vp, 4); - } - } - } - } - else /* totmap = 4 */ - { - multiframe_quality_enhance_block(16, qcurr, qprev, y_ptr, - u_ptr, v_ptr, - show->y_stride, - show->uv_stride, - yd_ptr, ud_ptr, vd_ptr, - dest->y_stride, - dest->uv_stride); + /* postprocess each macro block */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + /* if motion is high there will likely be no benefit */ + if (frame_type == INTER_FRAME) { + totmap = qualify_inter_mb(mode_info_context, map); + } else { + totmap = (frame_type == KEY_FRAME ? 4 : 0); + } + if (totmap) { + if (totmap < 4) { + int i, j; + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + if (map[i * 2 + j]) { + multiframe_quality_enhance_block( + 8, qcurr, qprev, y_ptr + 8 * (i * show->y_stride + j), + u_ptr + 4 * (i * show->uv_stride + j), + v_ptr + 4 * (i * show->uv_stride + j), show->y_stride, + show->uv_stride, yd_ptr + 8 * (i * dest->y_stride + j), + ud_ptr + 4 * (i * dest->uv_stride + j), + vd_ptr + 4 * (i * dest->uv_stride + j), dest->y_stride, + dest->uv_stride); + } else { + /* copy a 8x8 block */ + int k; + unsigned char *up = u_ptr + 4 * (i * show->uv_stride + j); + unsigned char *udp = ud_ptr + 4 * (i * dest->uv_stride + j); + unsigned char *vp = v_ptr + 4 * (i * show->uv_stride + j); + unsigned char *vdp = vd_ptr + 4 * (i * dest->uv_stride + j); + vp8_copy_mem8x8( + y_ptr + 8 * (i * show->y_stride + j), show->y_stride, + yd_ptr + 8 * (i * dest->y_stride + j), dest->y_stride); + for (k = 0; k < 4; ++k, up += show->uv_stride, + udp += dest->uv_stride, vp += show->uv_stride, + vdp += dest->uv_stride) { + memcpy(udp, up, 4); + memcpy(vdp, vp, 4); } + } } - else - { - vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride); - vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride); - vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride); - } - y_ptr += 16; - u_ptr += 8; - v_ptr += 8; - yd_ptr += 16; - ud_ptr += 8; - vd_ptr += 8; - mode_info_context++; /* step to next MB */ + } + } else /* totmap = 4 */ + { + multiframe_quality_enhance_block( + 16, qcurr, qprev, y_ptr, u_ptr, v_ptr, show->y_stride, + show->uv_stride, yd_ptr, ud_ptr, vd_ptr, dest->y_stride, + dest->uv_stride); } - - y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; - u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; - v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; - yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; - ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; - vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; - - mode_info_context++; /* Skip border mb */ + } else { + vp8_copy_mem16x16(y_ptr, show->y_stride, yd_ptr, dest->y_stride); + vp8_copy_mem8x8(u_ptr, show->uv_stride, ud_ptr, dest->uv_stride); + vp8_copy_mem8x8(v_ptr, show->uv_stride, vd_ptr, dest->uv_stride); + } + y_ptr += 16; + u_ptr += 8; + v_ptr += 8; + yd_ptr += 16; + ud_ptr += 8; + vd_ptr += 8; + mode_info_context++; /* step to next MB */ } + + y_ptr += show->y_stride * 16 - 16 * cm->mb_cols; + u_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; + v_ptr += show->uv_stride * 8 - 8 * cm->mb_cols; + yd_ptr += dest->y_stride * 16 - 16 * cm->mb_cols; + ud_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; + vd_ptr += dest->uv_stride * 8 - 8 * cm->mb_cols; + + mode_info_context++; /* Skip border mb */ + } } diff --git a/libs/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c index fc3bb8ad9d..1cfd146189 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/dequantize_dspr2.c @@ -8,26 +8,22 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vpx_mem/vpx_mem.h" #if HAVE_DSPR2 -void vp8_dequant_idct_add_dspr2(short *input, short *dq, - unsigned char *dest, int stride) -{ - int i; +void vp8_dequant_idct_add_dspr2(short *input, short *dq, unsigned char *dest, + int stride) { + int i; - for (i = 0; i < 16; i++) - { - input[i] = dq[i] * input[i]; - } + for (i = 0; i < 16; ++i) { + input[i] = dq[i] * input[i]; + } - vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride); - - memset(input, 0, 32); + vp8_short_idct4x4llm_dspr2(input, dest, stride, dest, stride); + memset(input, 0, 32); } #endif diff --git a/libs/libvpx/vp8/common/mips/dspr2/filter_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/filter_dspr2.c index ace5d400cb..7612024b7d 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/filter_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/filter_dspr2.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vp8_rtcd.h" #include "vpx_ports/mem.h" @@ -17,684 +16,364 @@ #define CROP_WIDTH 256 unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH]; -static const unsigned short sub_pel_filterss[8][3] = -{ - { 0, 0, 0}, - { 0, 0x0601, 0x7b0c}, - { 0x0201, 0x0b08, 0x6c24}, - { 0, 0x0906, 0x5d32}, - { 0x0303, 0x1010, 0x4d4d}, - { 0, 0x0609, 0x325d}, - { 0x0102, 0x080b, 0x246c}, - { 0, 0x0106, 0x0c7b}, +static const unsigned short sub_pel_filterss[8][3] = { + { 0, 0, 0 }, + { 0, 0x0601, 0x7b0c }, + { 0x0201, 0x0b08, 0x6c24 }, + { 0, 0x0906, 0x5d32 }, + { 0x0303, 0x1010, 0x4d4d }, + { 0, 0x0609, 0x325d }, + { 0x0102, 0x080b, 0x246c }, + { 0, 0x0106, 0x0c7b }, +}; + +static const int sub_pel_filters_int[8][3] = { + { 0, 0, 0 }, + { 0x0000fffa, 0x007b000c, 0xffff0000 }, + { 0x0002fff5, 0x006c0024, 0xfff80001 }, + { 0x0000fff7, 0x005d0032, 0xfffa0000 }, + { 0x0003fff0, 0x004d004d, 0xfff00003 }, + { 0x0000fffa, 0x0032005d, 0xfff70000 }, + { 0x0001fff8, 0x0024006c, 0xfff50002 }, + { 0x0000ffff, 0x000c007b, 0xfffa0000 }, +}; + +static const int sub_pel_filters_inv[8][3] = { + { 0, 0, 0 }, + { 0xfffa0000, 0x000c007b, 0x0000ffff }, + { 0xfff50002, 0x0024006c, 0x0001fff8 }, + { 0xfff70000, 0x0032005d, 0x0000fffa }, + { 0xfff00003, 0x004d004d, 0x0003fff0 }, + { 0xfffa0000, 0x005d0032, 0x0000fff7 }, + { 0xfff80001, 0x006c0024, 0x0002fff5 }, + { 0xffff0000, 0x007b000c, 0x0000fffa }, +}; + +/* clang-format off */ +static const int sub_pel_filters_int_tap_4[8][2] = { + { 0, 0}, + { 0xfffa007b, 0x000cffff}, + { 0, 0}, + { 0xfff7005d, 0x0032fffa}, + { 0, 0}, + { 0xfffa0032, 0x005dfff7}, + { 0, 0}, + { 0xffff000c, 0x007bfffa}, }; -static const int sub_pel_filters_int[8][3] = -{ - { 0, 0, 0}, - { 0x0000fffa, 0x007b000c, 0xffff0000}, - { 0x0002fff5, 0x006c0024, 0xfff80001}, - { 0x0000fff7, 0x005d0032, 0xfffa0000}, - { 0x0003fff0, 0x004d004d, 0xfff00003}, - { 0x0000fffa, 0x0032005d, 0xfff70000}, - { 0x0001fff8, 0x0024006c, 0xfff50002}, - { 0x0000ffff, 0x000c007b, 0xfffa0000}, +static const int sub_pel_filters_inv_tap_4[8][2] = { + { 0, 0}, + { 0x007bfffa, 0xffff000c}, + { 0, 0}, + { 0x005dfff7, 0xfffa0032}, + { 0, 0}, + { 0x0032fffa, 0xfff7005d}, + { 0, 0}, + { 0x000cffff, 0xfffa007b}, }; +/* clang-format on */ - -static const int sub_pel_filters_inv[8][3] = -{ - { 0, 0, 0}, - { 0xfffa0000, 0x000c007b, 0x0000ffff}, - { 0xfff50002, 0x0024006c, 0x0001fff8}, - { 0xfff70000, 0x0032005d, 0x0000fffa}, - { 0xfff00003, 0x004d004d, 0x0003fff0}, - { 0xfffa0000, 0x005d0032, 0x0000fff7}, - { 0xfff80001, 0x006c0024, 0x0002fff5}, - { 0xffff0000, 0x007b000c, 0x0000fffa}, -}; - - -static const int sub_pel_filters_int_tap_4[8][2] = -{ - { 0, 0}, - { 0xfffa007b, 0x000cffff}, - { 0, 0}, - { 0xfff7005d, 0x0032fffa}, - { 0, 0}, - { 0xfffa0032, 0x005dfff7}, - { 0, 0}, - { 0xffff000c, 0x007bfffa}, -}; - - -static const int sub_pel_filters_inv_tap_4[8][2] = -{ - { 0, 0}, - { 0x007bfffa, 0xffff000c}, - { 0, 0}, - { 0x005dfff7, 0xfffa0032}, - { 0, 0}, - { 0x0032fffa, 0xfff7005d}, - { 0, 0}, - { 0x000cffff, 0xfffa007b}, -}; - -inline void prefetch_load(unsigned char *src) -{ - __asm__ __volatile__ ( - "pref 0, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); +inline void prefetch_load(unsigned char *src) { + __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); } - -inline void prefetch_store(unsigned char *dst) -{ - __asm__ __volatile__ ( - "pref 1, 0(%[dst]) \n\t" - : - : [dst] "r" (dst) - ); +inline void prefetch_store(unsigned char *dst) { + __asm__ __volatile__("pref 1, 0(%[dst]) \n\t" : : [dst] "r"(dst)); } -void dsputil_static_init(void) -{ - int i; +void dsputil_static_init(void) { + int i; - for (i = 0; i < 256; i++) ff_cropTbl[i + CROP_WIDTH] = i; + for (i = 0; i < 256; ++i) ff_cropTbl[i + CROP_WIDTH] = i; - for (i = 0; i < CROP_WIDTH; i++) - { - ff_cropTbl[i] = 0; - ff_cropTbl[i + CROP_WIDTH + 256] = 255; + for (i = 0; i < CROP_WIDTH; ++i) { + ff_cropTbl[i] = 0; + ff_cropTbl[i + CROP_WIDTH + 256] = 255; + } +} + +void vp8_filter_block2d_first_pass_4(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT dst_ptr, + unsigned int src_pixels_per_line, + unsigned int output_height, int xoffset, + int pitch) { + unsigned int i; + int Temp1, Temp2, Temp3, Temp4; + + unsigned int vector4a = 64; + int vector1b, vector2b, vector3b; + unsigned int tp1, tp2, tn1, tn2; + unsigned int p1, p2, p3; + unsigned int n1, n2, n3; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; + + vector3b = sub_pel_filters_inv[xoffset][2]; + + /* if (xoffset == 0) we don't need any filtering */ + if (vector3b == 0) { + for (i = 0; i < output_height; ++i) { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + src_pixels_per_line); + dst_ptr[0] = src_ptr[0]; + dst_ptr[1] = src_ptr[1]; + dst_ptr[2] = src_ptr[2]; + dst_ptr[3] = src_ptr[3]; + + /* next row... */ + src_ptr += src_pixels_per_line; + dst_ptr += 4; } + } else { + if (vector3b > 65536) { + /* 6 tap filter */ + + vector1b = sub_pel_filters_inv[xoffset][0]; + vector2b = sub_pel_filters_inv[xoffset][1]; + + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + src_pixels_per_line); + + for (i = output_height; i--;) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp1], -2(%[src_ptr]) \n\t" + "ulw %[tp2], 2(%[src_ptr]) \n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp1] \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "preceu.ph.qbr %[p3], %[tp2] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p1], %[tp2] \n\t" + "balign %[tp2], %[tp1], 3 \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" + + /* odd 1. pixel */ + "ulw %[tn2], 3(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tp2] \n\t" + "preceu.ph.qbl %[n2], %[tp2] \n\t" + "preceu.ph.qbr %[n3], %[tn2] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n1], %[tn2] \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + /* clamp */ + "lbux %[tp1], %[Temp1](%[cm]) \n\t" + "lbux %[tn1], %[Temp2](%[cm]) \n\t" + "lbux %[tp2], %[Temp3](%[cm]) \n\t" + "lbux %[n2], %[Temp4](%[cm]) \n\t" + + /* store bytes */ + "sb %[tp1], 0(%[dst_ptr]) \n\t" + "sb %[tn1], 1(%[dst_ptr]) \n\t" + "sb %[tp2], 2(%[dst_ptr]) \n\t" + "sb %[n2], 3(%[dst_ptr]) \n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr), + [vector3b] "r"(vector3b), [src_ptr] "r"(src_ptr)); + + /* Next row... */ + src_ptr += src_pixels_per_line; + dst_ptr += pitch; + } + } else { + /* 4 tap filter */ + + vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; + vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; + + for (i = output_height; i--;) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp1], -1(%[src_ptr]) \n\t" + "ulw %[tp2], 3(%[src_ptr]) \n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp1] \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "preceu.ph.qbr %[p3], %[tp2] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 1. pixel */ + "srl %[tn1], %[tp2], 8 \n\t" + "balign %[tp2], %[tp1], 3 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tp2] \n\t" + "preceu.ph.qbl %[n2], %[tp2] \n\t" + "preceu.ph.qbr %[n3], %[tn1] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + + /* odd 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + /* clamp and store results */ + "lbux %[tp1], %[Temp1](%[cm]) \n\t" + "lbux %[tn1], %[Temp2](%[cm]) \n\t" + "lbux %[tp2], %[Temp3](%[cm]) \n\t" + "sb %[tp1], 0(%[dst_ptr]) \n\t" + "sb %[tn1], 1(%[dst_ptr]) \n\t" + "lbux %[n2], %[Temp4](%[cm]) \n\t" + "sb %[tp2], 2(%[dst_ptr]) \n\t" + "sb %[n2], 3(%[dst_ptr]) \n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1), + [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr), + [src_ptr] "r"(src_ptr)); + /* Next row... */ + src_ptr += src_pixels_per_line; + dst_ptr += pitch; + } + } + } } -void vp8_filter_block2d_first_pass_4 -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT dst_ptr, - unsigned int src_pixels_per_line, - unsigned int output_height, - int xoffset, - int pitch -) -{ - unsigned int i; - int Temp1, Temp2, Temp3, Temp4; +void vp8_filter_block2d_first_pass_8_all(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT dst_ptr, + unsigned int src_pixels_per_line, + unsigned int output_height, + int xoffset, int pitch) { + unsigned int i; + int Temp1, Temp2, Temp3, Temp4; - unsigned int vector4a = 64; - int vector1b, vector2b, vector3b; - unsigned int tp1, tp2, tn1, tn2; - unsigned int p1, p2, p3; - unsigned int n1, n2, n3; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; + unsigned int vector4a = 64; + unsigned int vector1b, vector2b, vector3b; + unsigned int tp1, tp2, tn1, tn2; + unsigned int p1, p2, p3, p4; + unsigned int n1, n2, n3, n4; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; + + /* if (xoffset == 0) we don't need any filtering */ + if (xoffset == 0) { + for (i = 0; i < output_height; ++i) { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + src_pixels_per_line); + + dst_ptr[0] = src_ptr[0]; + dst_ptr[1] = src_ptr[1]; + dst_ptr[2] = src_ptr[2]; + dst_ptr[3] = src_ptr[3]; + dst_ptr[4] = src_ptr[4]; + dst_ptr[5] = src_ptr[5]; + dst_ptr[6] = src_ptr[6]; + dst_ptr[7] = src_ptr[7]; + + /* next row... */ + src_ptr += src_pixels_per_line; + dst_ptr += 8; + } + } else { vector3b = sub_pel_filters_inv[xoffset][2]; - /* if (xoffset == 0) we don't need any filtering */ - if (vector3b == 0) - { - for (i = 0; i < output_height; i++) - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + src_pixels_per_line); - dst_ptr[0] = src_ptr[0]; - dst_ptr[1] = src_ptr[1]; - dst_ptr[2] = src_ptr[2]; - dst_ptr[3] = src_ptr[3]; + if (vector3b > 65536) { + /* 6 tap filter */ - /* next row... */ - src_ptr += src_pixels_per_line; - dst_ptr += 4; - } - } - else - { - if (vector3b > 65536) - { - /* 6 tap filter */ + vector1b = sub_pel_filters_inv[xoffset][0]; + vector2b = sub_pel_filters_inv[xoffset][1]; - vector1b = sub_pel_filters_inv[xoffset][0]; - vector2b = sub_pel_filters_inv[xoffset][1]; - - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + src_pixels_per_line); - - for (i = output_height; i--;) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -2(%[src_ptr]) \n\t" - "ulw %[tp2], 2(%[src_ptr]) \n\t" - - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" - - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p1], %[tp2] \n\t" - "balign %[tp2], %[tp1], 3 \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" - - /* odd 1. pixel */ - "ulw %[tn2], 3(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn2] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" - - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n1], %[tn2] \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - /* clamp */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" - "lbux %[tp2], %[Temp3](%[cm]) \n\t" - "lbux %[n2], %[Temp4](%[cm]) \n\t" - - /* store bytes */ - "sb %[tp1], 0(%[dst_ptr]) \n\t" - "sb %[tn1], 1(%[dst_ptr]) \n\t" - "sb %[tp2], 2(%[dst_ptr]) \n\t" - "sb %[n2], 3(%[dst_ptr]) \n\t" - - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), - [tn2] "=&r" (tn2), [p1] "=&r" (p1), [p2] "=&r" (p2), - [p3] "=&r" (p3), [n1] "=&r" (n1), [n2] "=&r" (n2), - [n3] "=&r" (n3), [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr), - [vector3b] "r" (vector3b), [src_ptr] "r" (src_ptr) - ); - - /* Next row... */ - src_ptr += src_pixels_per_line; - dst_ptr += pitch; - } - } - else - { - /* 4 tap filter */ - - vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; - vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; - - for (i = output_height; i--;) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -1(%[src_ptr]) \n\t" - "ulw %[tp2], 3(%[src_ptr]) \n\t" - - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" - - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - /* odd 1. pixel */ - "srl %[tn1], %[tp2], 8 \n\t" - "balign %[tp2], %[tp1], 3 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn1] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" - - /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - /* clamp and store results */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" - "lbux %[tp2], %[Temp3](%[cm]) \n\t" - "sb %[tp1], 0(%[dst_ptr]) \n\t" - "sb %[tn1], 1(%[dst_ptr]) \n\t" - "lbux %[n2], %[Temp4](%[cm]) \n\t" - "sb %[tp2], 2(%[dst_ptr]) \n\t" - "sb %[n2], 3(%[dst_ptr]) \n\t" - - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr), - [src_ptr] "r" (src_ptr) - ); - /* Next row... */ - src_ptr += src_pixels_per_line; - dst_ptr += pitch; - } - } - } -} - -void vp8_filter_block2d_first_pass_8_all -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT dst_ptr, - unsigned int src_pixels_per_line, - unsigned int output_height, - int xoffset, - int pitch -) -{ - unsigned int i; - int Temp1, Temp2, Temp3, Temp4; - - unsigned int vector4a = 64; - unsigned int vector1b, vector2b, vector3b; - unsigned int tp1, tp2, tn1, tn2; - unsigned int p1, p2, p3, p4; - unsigned int n1, n2, n3, n4; - - unsigned char *cm = ff_cropTbl + CROP_WIDTH; - - /* if (xoffset == 0) we don't need any filtering */ - if (xoffset == 0) - { - for (i = 0; i < output_height; i++) - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + src_pixels_per_line); - - dst_ptr[0] = src_ptr[0]; - dst_ptr[1] = src_ptr[1]; - dst_ptr[2] = src_ptr[2]; - dst_ptr[3] = src_ptr[3]; - dst_ptr[4] = src_ptr[4]; - dst_ptr[5] = src_ptr[5]; - dst_ptr[6] = src_ptr[6]; - dst_ptr[7] = src_ptr[7]; - - /* next row... */ - src_ptr += src_pixels_per_line; - dst_ptr += 8; - } - } - else - { - vector3b = sub_pel_filters_inv[xoffset][2]; - - if (vector3b > 65536) - { - /* 6 tap filter */ - - vector1b = sub_pel_filters_inv[xoffset][0]; - vector2b = sub_pel_filters_inv[xoffset][1]; - - for (i = output_height; i--;) - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + src_pixels_per_line); - - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -2(%[src_ptr]) \n\t" - "ulw %[tp2], 2(%[src_ptr]) \n\t" - - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" - - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p1], %[tp2] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" - - "balign %[tp2], %[tp1], 3 \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - "ulw %[tn2], 3(%[src_ptr]) \n\t" - - /* odd 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn2] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" - - /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n1], %[tn2] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" - "ulw %[tp1], 6(%[src_ptr]) \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p2], %[tp1] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [vector3b] "r" (vector3b), - [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - dst_ptr[0] = cm[Temp1]; - dst_ptr[1] = cm[Temp2]; - dst_ptr[2] = cm[Temp3]; - dst_ptr[3] = cm[Temp4]; - - /* next 4 pixels */ - __asm__ __volatile__ ( - /* even 3. pixel */ - "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t" - - /* even 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p4], %[tp1] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t" - - "ulw %[tn1], 7(%[src_ptr]) \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - /* odd 3. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n2], %[tn1] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - - /* odd 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n4], %[tn1] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - : [tn1] "=&r" (tn1), [n2] "=&r" (n2), - [p4] "=&r" (p4), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [tp1] "r" (tp1), [vector1b] "r" (vector1b), [p2] "r" (p2), - [vector2b] "r" (vector2b), [n1] "r" (n1), [p1] "r" (p1), - [vector4a] "r" (vector4a), [vector3b] "r" (vector3b), - [p3] "r" (p3), [n3] "r" (n3), [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - dst_ptr[4] = cm[Temp1]; - dst_ptr[5] = cm[Temp2]; - dst_ptr[6] = cm[Temp3]; - dst_ptr[7] = cm[Temp4]; - - src_ptr += src_pixels_per_line; - dst_ptr += pitch; - } - } - else - { - /* 4 tap filter */ - - vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; - vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; - - for (i = output_height; i--;) - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + src_pixels_per_line); - - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -1(%[src_ptr]) \n\t" - - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" - - "ulw %[tp2], 3(%[src_ptr]) \n\t" - - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "preceu.ph.qbl %[p4], %[tp2] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - "balign %[tp2], %[tp1], 3 \n\t" - - /* odd 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - - "ulw %[tn2], 4(%[src_ptr]) \n\t" - - /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbr %[n3], %[tn2] \n\t" - "preceu.ph.qbl %[n4], %[tn2] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "ulw %[tp1], 7(%[src_ptr]) \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn2] "=&r" (tn2), [p1] "=&r" (p1), [p2] "=&r" (p2), - [p3] "=&r" (p3), [p4] "=&r" (p4), [n1] "=&r" (n1), - [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - dst_ptr[0] = cm[Temp1]; - dst_ptr[1] = cm[Temp2]; - dst_ptr[2] = cm[Temp3]; - dst_ptr[3] = cm[Temp4]; - - /* next 4 pixels */ - __asm__ __volatile__ ( - /* even 3. pixel */ - "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t" - - /* even 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbr %[p2], %[tp1] \n\t" - "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - /* odd 3. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t" - "ulw %[tn1], 8(%[src_ptr]) \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - - /* odd 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbr %[n2], %[tn1] \n\t" - "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - : [tn1] "=&r" (tn1), [p2] "=&r" (p2), [n2] "=&r" (n2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [tp1] "r" (tp1), [p3] "r" (p3), [p4] "r" (p4), - [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr), - [n3] "r" (n3), [n4] "r" (n4) - ); - - /* clamp and store results */ - dst_ptr[4] = cm[Temp1]; - dst_ptr[5] = cm[Temp2]; - dst_ptr[6] = cm[Temp3]; - dst_ptr[7] = cm[Temp4]; - - /* next row... */ - src_ptr += src_pixels_per_line; - dst_ptr += pitch; - } - } - } -} - - -void vp8_filter_block2d_first_pass16_6tap -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT dst_ptr, - unsigned int src_pixels_per_line, - unsigned int output_height, - int xoffset, - int pitch -) -{ - unsigned int i; - int Temp1, Temp2, Temp3, Temp4; - - unsigned int vector4a; - unsigned int vector1b, vector2b, vector3b; - unsigned int tp1, tp2, tn1, tn2; - unsigned int p1, p2, p3, p4; - unsigned int n1, n2, n3, n4; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; - - vector1b = sub_pel_filters_inv[xoffset][0]; - vector2b = sub_pel_filters_inv[xoffset][1]; - vector3b = sub_pel_filters_inv[xoffset][2]; - vector4a = 64; - - for (i = output_height; i--;) - { + for (i = output_height; i--;) { /* prefetch src_ptr data to cache memory */ prefetch_load(src_ptr + src_pixels_per_line); /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -2(%[src_ptr]) \n\t" - "ulw %[tp2], 2(%[src_ptr]) \n\t" + __asm__ __volatile__( + "ulw %[tp1], -2(%[src_ptr]) \n\t" + "ulw %[tp2], 2(%[src_ptr]) \n\t" /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp1] \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "preceu.ph.qbr %[p3], %[tp2] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p1], %[tp2] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p1], %[tp2] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" - "balign %[tp2], %[tp1], 3 \n\t" - "ulw %[tn2], 3(%[src_ptr]) \n\t" - "extp %[Temp1], $ac3, 9 \n\t" + "balign %[tp2], %[tp1], 3 \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + "ulw %[tn2], 3(%[src_ptr]) \n\t" /* odd 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn2] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tp2] \n\t" + "preceu.ph.qbl %[n2], %[tp2] \n\t" + "preceu.ph.qbr %[n3], %[tn2] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n1], %[tn2] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" - "ulw %[tp1], 6(%[src_ptr]) \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p2], %[tp1] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n1], %[tn2] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" + "ulw %[tp1], 6(%[src_ptr]) \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p2], %[tp1] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [vector3b] "r" (vector3b), - [src_ptr] "r" (src_ptr) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1), + [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [vector3b] "r"(vector3b), + [src_ptr] "r"(src_ptr)); /* clamp and store results */ dst_ptr[0] = cm[Temp1]; @@ -703,50 +382,46 @@ void vp8_filter_block2d_first_pass16_6tap dst_ptr[3] = cm[Temp4]; /* next 4 pixels */ - __asm__ __volatile__ ( + __asm__ __volatile__( /* even 3. pixel */ - "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t" + "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t" /* even 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p4], %[tp1] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t" - "ulw %[tn1], 7(%[src_ptr]) \n\t" - "extp %[Temp1], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p4], %[tp1] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t" + + "ulw %[tn1], 7(%[src_ptr]) \n\t" + "extp %[Temp1], $ac3, 9 \n\t" /* odd 3. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n2], %[tn1] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n2], %[tn1] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" /* odd 4. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n4], %[tn1] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t" - "ulw %[tp2], 10(%[src_ptr]) \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp2] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n4], %[tn1] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "extp %[Temp4], $ac2, 9 \n\t" - : [tn1] "=&r" (tn1), [tp2] "=&r" (tp2), [n2] "=&r" (n2), - [p4] "=&r" (p4), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [tp1] "r" (tp1), [n1] "r" (n1), [p1] "r" (p1), - [vector4a] "r" (vector4a), [p2] "r" (p2), [vector3b] "r" (vector3b), - [p3] "r" (p3), [n3] "r" (n3), [src_ptr] "r" (src_ptr) - ); + : [tn1] "=&r"(tn1), [n2] "=&r"(n2), [p4] "=&r"(p4), [n4] "=&r"(n4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4) + : [tp1] "r"(tp1), [vector1b] "r"(vector1b), [p2] "r"(p2), + [vector2b] "r"(vector2b), [n1] "r"(n1), [p1] "r"(p1), + [vector4a] "r"(vector4a), [vector3b] "r"(vector3b), [p3] "r"(p3), + [n3] "r"(n3), [src_ptr] "r"(src_ptr)); /* clamp and store results */ dst_ptr[4] = cm[Temp1]; @@ -754,2070 +429,2338 @@ void vp8_filter_block2d_first_pass16_6tap dst_ptr[6] = cm[Temp3]; dst_ptr[7] = cm[Temp4]; - /* next 4 pixels */ - __asm__ __volatile__ ( - /* even 5. pixel */ - "dpa.w.ph $ac3, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t" - - /* even 6. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p3], %[tp2] \n\t" - "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector3b] \n\t" - - "ulw %[tn1], 11(%[src_ptr]) \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - /* odd 5. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tn1] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - - /* odd 6. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n3], %[tn1] \n\t" - "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n1], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector3b] \n\t" - "ulw %[tp1], 14(%[src_ptr]) \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[p4], %[tp1] \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1), - [n1] "=&r" (n1), [p3] "=&r" (p3), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [tp2] "r" (tp2), [p2] "r" (p2), [n2] "r" (n2), - [p4] "r" (p4), [n4] "r" (n4), [p1] "r" (p1), [src_ptr] "r" (src_ptr), - [vector4a] "r" (vector4a), [vector3b] "r" (vector3b) - ); - - /* clamp and store results */ - dst_ptr[8] = cm[Temp1]; - dst_ptr[9] = cm[Temp2]; - dst_ptr[10] = cm[Temp3]; - dst_ptr[11] = cm[Temp4]; - - /* next 4 pixels */ - __asm__ __volatile__ ( - /* even 7. pixel */ - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p3], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[p4], %[vector3b] \n\t" - - /* even 8. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p4], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[p2], %[vector3b] \n\t" - "ulw %[tn1], 15(%[src_ptr]) \n\t" - "extp %[Temp1], $ac3, 9 \n\t" - - /* odd 7. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "preceu.ph.qbr %[n4], %[tn1] \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n3], %[vector2b] \n\t" - "dpa.w.ph $ac3, %[n4], %[vector3b] \n\t" - "extp %[Temp3], $ac2, 9 \n\t" - - /* odd 8. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "preceu.ph.qbl %[n2], %[tn1] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n4], %[vector2b] \n\t" - "dpa.w.ph $ac2, %[n2], %[vector3b] \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - "extp %[Temp4], $ac2, 9 \n\t" - - /* clamp and store results */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" - "lbux %[p2], %[Temp3](%[cm]) \n\t" - "sb %[tp1], 12(%[dst_ptr]) \n\t" - "sb %[tn1], 13(%[dst_ptr]) \n\t" - "lbux %[n2], %[Temp4](%[cm]) \n\t" - "sb %[p2], 14(%[dst_ptr]) \n\t" - "sb %[n2], 15(%[dst_ptr]) \n\t" - - : [tn1] "=&r" (tn1), [p2] "=&r" (p2), [n2] "=&r" (n2), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [tp1] "r" (tp1), [p4] "r" (p4), [n1] "r" (n1), [p1] "r" (p1), - [vector4a] "r" (vector4a), [vector3b] "r" (vector3b), [p3] "r" (p3), - [n3] "r" (n3), [src_ptr] "r" (src_ptr), - [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); - src_ptr += src_pixels_per_line; dst_ptr += pitch; + } + } else { + /* 4 tap filter */ + + vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; + vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; + + for (i = output_height; i--;) { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + src_pixels_per_line); + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp1], -1(%[src_ptr]) \n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp1] \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + + "ulw %[tp2], 3(%[src_ptr]) \n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbr %[p3], %[tp2] \n\t" + "preceu.ph.qbl %[p4], %[tp2] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + "balign %[tp2], %[tp1], 3 \n\t" + + /* odd 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tp2] \n\t" + "preceu.ph.qbl %[n2], %[tp2] \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + + "ulw %[tn2], 4(%[src_ptr]) \n\t" + + /* odd 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbr %[n3], %[tn2] \n\t" + "preceu.ph.qbl %[n4], %[tn2] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" + "ulw %[tp1], 7(%[src_ptr]) \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), + [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3), [n4] "=&r"(n4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + dst_ptr[0] = cm[Temp1]; + dst_ptr[1] = cm[Temp2]; + dst_ptr[2] = cm[Temp3]; + dst_ptr[3] = cm[Temp4]; + + /* next 4 pixels */ + __asm__ __volatile__( + /* even 3. pixel */ + "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t" + + /* even 4. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbr %[p2], %[tp1] \n\t" + "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 3. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t" + "ulw %[tn1], 8(%[src_ptr]) \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + + /* odd 4. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbr %[n2], %[tn1] \n\t" + "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + : [tn1] "=&r"(tn1), [p2] "=&r"(p2), [n2] "=&r"(n2), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4) + : [tp1] "r"(tp1), [p3] "r"(p3), [p4] "r"(p4), + [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr), [n3] "r"(n3), + [n4] "r"(n4)); + + /* clamp and store results */ + dst_ptr[4] = cm[Temp1]; + dst_ptr[5] = cm[Temp2]; + dst_ptr[6] = cm[Temp3]; + dst_ptr[7] = cm[Temp4]; + + /* next row... */ + src_ptr += src_pixels_per_line; + dst_ptr += pitch; + } } + } } +void vp8_filter_block2d_first_pass16_6tap(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT dst_ptr, + unsigned int src_pixels_per_line, + unsigned int output_height, + int xoffset, int pitch) { + unsigned int i; + int Temp1, Temp2, Temp3, Temp4; -void vp8_filter_block2d_first_pass16_0 -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT output_ptr, - unsigned int src_pixels_per_line -) -{ - int Temp1, Temp2, Temp3, Temp4; - int i; + unsigned int vector4a; + unsigned int vector1b, vector2b, vector3b; + unsigned int tp1, tp2, tn1, tn2; + unsigned int p1, p2, p3, p4; + unsigned int n1, n2, n3, n4; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; + + vector1b = sub_pel_filters_inv[xoffset][0]; + vector2b = sub_pel_filters_inv[xoffset][1]; + vector3b = sub_pel_filters_inv[xoffset][2]; + vector4a = 64; + + for (i = output_height; i--;) { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + src_pixels_per_line); + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp1], -2(%[src_ptr]) \n\t" + "ulw %[tp2], 2(%[src_ptr]) \n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp1] \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "preceu.ph.qbr %[p3], %[tp2] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p3], %[vector3b] \n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p1], %[tp2] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector3b] \n\t" + + "balign %[tp2], %[tp1], 3 \n\t" + "ulw %[tn2], 3(%[src_ptr]) \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 1. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tp2] \n\t" + "preceu.ph.qbl %[n2], %[tp2] \n\t" + "preceu.ph.qbr %[n3], %[tn2] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector3b] \n\t" + + /* odd 2. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n1], %[tn2] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector3b] \n\t" + "ulw %[tp1], 6(%[src_ptr]) \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p2], %[tp1] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn2] "=&r"(tn2), [p1] "=&r"(p1), + [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1), [n2] "=&r"(n2), + [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [vector3b] "r"(vector3b), + [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + dst_ptr[0] = cm[Temp1]; + dst_ptr[1] = cm[Temp2]; + dst_ptr[2] = cm[Temp3]; + dst_ptr[3] = cm[Temp4]; + + /* next 4 pixels */ + __asm__ __volatile__( + /* even 3. pixel */ + "dpa.w.ph $ac3, %[p3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p2], %[vector3b] \n\t" + + /* even 4. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p4], %[tp1] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p4], %[vector3b] \n\t" + "ulw %[tn1], 7(%[src_ptr]) \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 3. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n2], %[tn1] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n1], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector3b] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + + /* odd 4. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n4], %[tn1] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t" + "ulw %[tp2], 10(%[src_ptr]) \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p1], %[tp2] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + : [tn1] "=&r"(tn1), [tp2] "=&r"(tp2), [n2] "=&r"(n2), [p4] "=&r"(p4), + [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [tp1] "r"(tp1), + [n1] "r"(n1), [p1] "r"(p1), [vector4a] "r"(vector4a), [p2] "r"(p2), + [vector3b] "r"(vector3b), [p3] "r"(p3), [n3] "r"(n3), + [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + dst_ptr[4] = cm[Temp1]; + dst_ptr[5] = cm[Temp2]; + dst_ptr[6] = cm[Temp3]; + dst_ptr[7] = cm[Temp4]; + + /* next 4 pixels */ + __asm__ __volatile__( + /* even 5. pixel */ + "dpa.w.ph $ac3, %[p2], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p4], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p1], %[vector3b] \n\t" + + /* even 6. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p3], %[tp2] \n\t" + "dpa.w.ph $ac2, %[p4], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p1], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector3b] \n\t" + + "ulw %[tn1], 11(%[src_ptr]) \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 5. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n1], %[tn1] \n\t" + "dpa.w.ph $ac3, %[n2], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n4], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n1], %[vector3b] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + + /* odd 6. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n3], %[tn1] \n\t" + "dpa.w.ph $ac2, %[n4], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n1], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector3b] \n\t" + "ulw %[tp1], 14(%[src_ptr]) \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[p4], %[tp1] \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [n1] "=&r"(n1), [p3] "=&r"(p3), + [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [tp2] "r"(tp2), + [p2] "r"(p2), [n2] "r"(n2), [p4] "r"(p4), [n4] "r"(n4), [p1] "r"(p1), + [src_ptr] "r"(src_ptr), [vector4a] "r"(vector4a), + [vector3b] "r"(vector3b)); + + /* clamp and store results */ + dst_ptr[8] = cm[Temp1]; + dst_ptr[9] = cm[Temp2]; + dst_ptr[10] = cm[Temp3]; + dst_ptr[11] = cm[Temp4]; + + /* next 4 pixels */ + __asm__ __volatile__( + /* even 7. pixel */ + "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[p3], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[p4], %[vector3b] \n\t" + + /* even 8. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[p2], %[tp1] \n\t" + "dpa.w.ph $ac2, %[p3], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[p4], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[p2], %[vector3b] \n\t" + "ulw %[tn1], 15(%[src_ptr]) \n\t" + "extp %[Temp1], $ac3, 9 \n\t" + + /* odd 7. pixel */ + "mtlo %[vector4a], $ac3 \n\t" + "preceu.ph.qbr %[n4], %[tn1] \n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" + "dpa.w.ph $ac3, %[n3], %[vector2b] \n\t" + "dpa.w.ph $ac3, %[n4], %[vector3b] \n\t" + "extp %[Temp3], $ac2, 9 \n\t" + + /* odd 8. pixel */ + "mtlo %[vector4a], $ac2 \n\t" + "preceu.ph.qbl %[n2], %[tn1] \n\t" + "dpa.w.ph $ac2, %[n3], %[vector1b] \n\t" + "dpa.w.ph $ac2, %[n4], %[vector2b] \n\t" + "dpa.w.ph $ac2, %[n2], %[vector3b] \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + "extp %[Temp4], $ac2, 9 \n\t" + + /* clamp and store results */ + "lbux %[tp1], %[Temp1](%[cm]) \n\t" + "lbux %[tn1], %[Temp2](%[cm]) \n\t" + "lbux %[p2], %[Temp3](%[cm]) \n\t" + "sb %[tp1], 12(%[dst_ptr]) \n\t" + "sb %[tn1], 13(%[dst_ptr]) \n\t" + "lbux %[n2], %[Temp4](%[cm]) \n\t" + "sb %[p2], 14(%[dst_ptr]) \n\t" + "sb %[n2], 15(%[dst_ptr]) \n\t" + + : [tn1] "=&r"(tn1), [p2] "=&r"(p2), [n2] "=&r"(n2), [n4] "=&r"(n4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), [tp1] "r"(tp1), + [p4] "r"(p4), [n1] "r"(n1), [p1] "r"(p1), [vector4a] "r"(vector4a), + [vector3b] "r"(vector3b), [p3] "r"(p3), [n3] "r"(n3), + [src_ptr] "r"(src_ptr), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); + + src_ptr += src_pixels_per_line; + dst_ptr += pitch; + } +} + +void vp8_filter_block2d_first_pass16_0(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT output_ptr, + unsigned int src_pixels_per_line) { + int Temp1, Temp2, Temp3, Temp4; + int i; + + /* prefetch src_ptr data to cache memory */ + prefetch_store(output_ptr + 32); + + /* copy memory from src buffer to dst buffer */ + for (i = 0; i < 7; ++i) { + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "ulw %[Temp3], 8(%[src_ptr]) \n\t" + "ulw %[Temp4], 12(%[src_ptr]) \n\t" + "sw %[Temp1], 0(%[output_ptr]) \n\t" + "sw %[Temp2], 4(%[output_ptr]) \n\t" + "sw %[Temp3], 8(%[output_ptr]) \n\t" + "sw %[Temp4], 12(%[output_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr) + : [src_pixels_per_line] "r"(src_pixels_per_line), + [output_ptr] "r"(output_ptr)); + + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "ulw %[Temp3], 8(%[src_ptr]) \n\t" + "ulw %[Temp4], 12(%[src_ptr]) \n\t" + "sw %[Temp1], 16(%[output_ptr]) \n\t" + "sw %[Temp2], 20(%[output_ptr]) \n\t" + "sw %[Temp3], 24(%[output_ptr]) \n\t" + "sw %[Temp4], 28(%[output_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr) + : [src_pixels_per_line] "r"(src_pixels_per_line), + [output_ptr] "r"(output_ptr)); + + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "ulw %[Temp3], 8(%[src_ptr]) \n\t" + "ulw %[Temp4], 12(%[src_ptr]) \n\t" + "sw %[Temp1], 32(%[output_ptr]) \n\t" + "sw %[Temp2], 36(%[output_ptr]) \n\t" + "sw %[Temp3], 40(%[output_ptr]) \n\t" + "sw %[Temp4], 44(%[output_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [src_ptr] "+r"(src_ptr) + : [src_pixels_per_line] "r"(src_pixels_per_line), + [output_ptr] "r"(output_ptr)); + + output_ptr += 48; + } +} + +void vp8_filter_block2d_first_pass16_4tap( + unsigned char *RESTRICT src_ptr, unsigned char *RESTRICT output_ptr, + unsigned int src_pixels_per_line, unsigned int output_width, + unsigned int output_height, int xoffset, int yoffset, + unsigned char *RESTRICT dst_ptr, int pitch) { + unsigned int i, j; + int Temp1, Temp2, Temp3, Temp4; + + unsigned int vector4a; + int vector1b, vector2b; + unsigned int tp1, tp2, tp3, tn1; + unsigned int p1, p2, p3; + unsigned int n1, n2, n3; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; + + vector4a = 64; + + vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; + vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; + + /* if (yoffset == 0) don't need temp buffer, data will be stored in dst_ptr */ + if (yoffset == 0) { + output_height -= 5; + src_ptr += (src_pixels_per_line + src_pixels_per_line); + + for (i = output_height; i--;) { + __asm__ __volatile__("ulw %[tp3], -1(%[src_ptr]) \n\t" + : [tp3] "=&r"(tp3) + : [src_ptr] "r"(src_ptr)); + + /* processing 4 adjacent pixels */ + for (j = 0; j < 16; j += 4) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp2], 3(%[src_ptr]) " + "\n\t" + "move %[tp1], %[tp3] " + "\n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 " + "\n\t" + "mthi $0, $ac3 " + "\n\t" + "move %[tp3], %[tp2] " + "\n\t" + "preceu.ph.qbr %[p1], %[tp1] " + "\n\t" + "preceu.ph.qbl %[p2], %[tp1] " + "\n\t" + "preceu.ph.qbr %[p3], %[tp2] " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] " + "\n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 " + "\n\t" + "mthi $0, $ac2 " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] " + "\n\t" + "extr.w %[Temp1], $ac3, 7 " + "\n\t" + + /* odd 1. pixel */ + "ulw %[tn1], 4(%[src_ptr]) " + "\n\t" + "balign %[tp2], %[tp1], 3 " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "mthi $0, $ac3 " + "\n\t" + "preceu.ph.qbr %[n1], %[tp2] " + "\n\t" + "preceu.ph.qbl %[n2], %[tp2] " + "\n\t" + "preceu.ph.qbr %[n3], %[tn1] " + "\n\t" + "extr.w %[Temp3], $ac2, 7 " + "\n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] " + "\n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] " + "\n\t" + + /* odd 2. pixel */ + "mtlo %[vector4a], $ac2 " + "\n\t" + "mthi $0, $ac2 " + "\n\t" + "extr.w %[Temp2], $ac3, 7 " + "\n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] " + "\n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] " + "\n\t" + "extr.w %[Temp4], $ac2, 7 " + "\n\t" + + /* clamp and store results */ + "lbux %[tp1], %[Temp1](%[cm]) " + "\n\t" + "lbux %[tn1], %[Temp2](%[cm]) " + "\n\t" + "lbux %[tp2], %[Temp3](%[cm]) " + "\n\t" + "sb %[tp1], 0(%[dst_ptr]) " + "\n\t" + "sb %[tn1], 1(%[dst_ptr]) " + "\n\t" + "lbux %[n2], %[Temp4](%[cm]) " + "\n\t" + "sb %[tp2], 2(%[dst_ptr]) " + "\n\t" + "sb %[n2], 3(%[dst_ptr]) " + "\n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tn1] "=&r"(tn1), [p1] "=&r"(p1), [p2] "=&r"(p2), [n1] "=&r"(n1), + [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [p3] "=&r"(p3), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr), + [src_ptr] "r"(src_ptr)); + + src_ptr += 4; + } + + /* Next row... */ + src_ptr += src_pixels_per_line - 16; + dst_ptr += pitch; + } + } else { + for (i = output_height; i--;) { + /* processing 4 adjacent pixels */ + for (j = 0; j < 16; j += 4) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "ulw %[tp1], -1(%[src_ptr]) " + "\n\t" + "ulw %[tp2], 3(%[src_ptr]) " + "\n\t" + + /* even 1. pixel */ + "mtlo %[vector4a], $ac3 " + "\n\t" + "mthi $0, $ac3 " + "\n\t" + "preceu.ph.qbr %[p1], %[tp1] " + "\n\t" + "preceu.ph.qbl %[p2], %[tp1] " + "\n\t" + "preceu.ph.qbr %[p3], %[tp2] " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[vector1b] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[vector2b] " + "\n\t" + + /* even 2. pixel */ + "mtlo %[vector4a], $ac2 " + "\n\t" + "mthi $0, $ac2 " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[vector1b] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[vector2b] " + "\n\t" + "extr.w %[Temp1], $ac3, 7 " + "\n\t" + + /* odd 1. pixel */ + "ulw %[tn1], 4(%[src_ptr]) " + "\n\t" + "balign %[tp2], %[tp1], 3 " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "mthi $0, $ac3 " + "\n\t" + "preceu.ph.qbr %[n1], %[tp2] " + "\n\t" + "preceu.ph.qbl %[n2], %[tp2] " + "\n\t" + "preceu.ph.qbr %[n3], %[tn1] " + "\n\t" + "extr.w %[Temp3], $ac2, 7 " + "\n\t" + "dpa.w.ph $ac3, %[n1], %[vector1b] " + "\n\t" + "dpa.w.ph $ac3, %[n2], %[vector2b] " + "\n\t" + + /* odd 2. pixel */ + "mtlo %[vector4a], $ac2 " + "\n\t" + "mthi $0, $ac2 " + "\n\t" + "extr.w %[Temp2], $ac3, 7 " + "\n\t" + "dpa.w.ph $ac2, %[n2], %[vector1b] " + "\n\t" + "dpa.w.ph $ac2, %[n3], %[vector2b] " + "\n\t" + "extr.w %[Temp4], $ac2, 7 " + "\n\t" + + /* clamp and store results */ + "lbux %[tp1], %[Temp1](%[cm]) " + "\n\t" + "lbux %[tn1], %[Temp2](%[cm]) " + "\n\t" + "lbux %[tp2], %[Temp3](%[cm]) " + "\n\t" + "sb %[tp1], 0(%[output_ptr]) " + "\n\t" + "sb %[tn1], 1(%[output_ptr]) " + "\n\t" + "lbux %[n2], %[Temp4](%[cm]) " + "\n\t" + "sb %[tp2], 2(%[output_ptr]) " + "\n\t" + "sb %[n2], 3(%[output_ptr]) " + "\n\t" + + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [n1] "=&r"(n1), + [n2] "=&r"(n2), [n3] "=&r"(n3), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector4a] "r"(vector4a), [cm] "r"(cm), + [output_ptr] "r"(output_ptr), [src_ptr] "r"(src_ptr)); + + src_ptr += 4; + } + + /* next row... */ + src_ptr += src_pixels_per_line; + output_ptr += output_width; + } + } +} + +void vp8_filter_block2d_second_pass4(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT output_ptr, + int output_pitch, int yoffset) { + unsigned int i; + + int Temp1, Temp2, Temp3, Temp4; + unsigned int vector1b, vector2b, vector3b, vector4a; + + unsigned char src_ptr_l2; + unsigned char src_ptr_l1; + unsigned char src_ptr_0; + unsigned char src_ptr_r1; + unsigned char src_ptr_r2; + unsigned char src_ptr_r3; + + unsigned char *cm = ff_cropTbl + CROP_WIDTH; + + vector4a = 64; + + /* load filter coefficients */ + vector1b = sub_pel_filterss[yoffset][0]; + vector2b = sub_pel_filterss[yoffset][2]; + vector3b = sub_pel_filterss[yoffset][1]; + + if (vector1b) { + /* 6 tap filter */ + + for (i = 2; i--;) { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr); + + /* do not allow compiler to reorder instructions */ + __asm__ __volatile__( + ".set noreorder \n\t" + : + :); + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -8(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 12(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -7(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 13(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -6(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 14(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -5(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 15(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp3], $ac0, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1), + [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1), + [src_ptr_r2] "=&r"(src_ptr_r2), [src_ptr_l2] "=&r"(src_ptr_l2), + [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + + output_ptr += output_pitch; + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -4(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 16(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -3(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 17(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -2(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 18(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -1(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 19(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp3], $ac0, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1), + [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1), + [src_ptr_r2] "=&r"(src_ptr_r2), [src_ptr_l2] "=&r"(src_ptr_l2), + [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + + src_ptr += 8; + output_ptr += output_pitch; + } + } else { + /* 4 tap filter */ /* prefetch src_ptr data to cache memory */ - prefetch_store(output_ptr + 32); + prefetch_load(src_ptr); - /* copy memory from src buffer to dst buffer */ - for (i = 0; i < 7; i++) - { - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "ulw %[Temp3], 8(%[src_ptr]) \n\t" - "ulw %[Temp4], 12(%[src_ptr]) \n\t" - "sw %[Temp1], 0(%[output_ptr]) \n\t" - "sw %[Temp2], 4(%[output_ptr]) \n\t" - "sw %[Temp3], 8(%[output_ptr]) \n\t" - "sw %[Temp4], 12(%[output_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + for (i = 2; i--;) { + /* do not allow compiler to reorder instructions */ + __asm__ __volatile__( + ".set noreorder \n\t" + : + :); - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr) - : [src_pixels_per_line] "r" (src_pixels_per_line), - [output_ptr] "r" (output_ptr) - ); + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "ulw %[Temp3], 8(%[src_ptr]) \n\t" - "ulw %[Temp4], 12(%[src_ptr]) \n\t" - "sw %[Temp1], 16(%[output_ptr]) \n\t" - "sw %[Temp2], 20(%[output_ptr]) \n\t" - "sw %[Temp3], 24(%[output_ptr]) \n\t" - "sw %[Temp4], 28(%[output_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr) - : [src_pixels_per_line] "r" (src_pixels_per_line), - [output_ptr] "r" (output_ptr) - ); + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "ulw %[Temp3], 8(%[src_ptr]) \n\t" - "ulw %[Temp4], 12(%[src_ptr]) \n\t" - "sw %[Temp1], 32(%[output_ptr]) \n\t" - "sw %[Temp2], 36(%[output_ptr]) \n\t" - "sw %[Temp3], 40(%[output_ptr]) \n\t" - "sw %[Temp4], 44(%[output_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [Temp4] "=&r" (Temp4), [src_ptr] "+r" (src_ptr) - : [src_pixels_per_line] "r" (src_pixels_per_line), - [output_ptr] "r" (output_ptr) - ); + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - output_ptr += 48; + "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp3], $ac0, 9 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1), + [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1), + [src_ptr_r2] "=&r"(src_ptr_r2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + + output_ptr += output_pitch; + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" + + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" + + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp3], $ac0, 9 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=r"(Temp4), [src_ptr_l1] "=&r"(src_ptr_l1), + [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1), + [src_ptr_r2] "=&r"(src_ptr_r2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + + src_ptr += 8; + output_ptr += output_pitch; } + } } +void vp8_filter_block2d_second_pass_8(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT output_ptr, + int output_pitch, + unsigned int output_height, + unsigned int output_width, + unsigned int yoffset) { + unsigned int i; -void vp8_filter_block2d_first_pass16_4tap -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT output_ptr, - unsigned int src_pixels_per_line, - unsigned int output_width, - unsigned int output_height, - int xoffset, - int yoffset, - unsigned char *RESTRICT dst_ptr, - int pitch -) -{ - unsigned int i, j; - int Temp1, Temp2, Temp3, Temp4; + int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8; + unsigned int vector1b, vector2b, vector3b, vector4a; - unsigned int vector4a; - int vector1b, vector2b; - unsigned int tp1, tp2, tp3, tn1; - unsigned int p1, p2, p3; - unsigned int n1, n2, n3; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; + unsigned char src_ptr_l2; + unsigned char src_ptr_l1; + unsigned char src_ptr_0; + unsigned char src_ptr_r1; + unsigned char src_ptr_r2; + unsigned char src_ptr_r3; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; - vector4a = 64; + vector4a = 64; - vector1b = sub_pel_filters_inv_tap_4[xoffset][0]; - vector2b = sub_pel_filters_inv_tap_4[xoffset][1]; + vector1b = sub_pel_filterss[yoffset][0]; + vector2b = sub_pel_filterss[yoffset][2]; + vector3b = sub_pel_filterss[yoffset][1]; - /* if (yoffset == 0) don't need temp buffer, data will be stored in dst_ptr */ - if (yoffset == 0) - { - output_height -= 5; - src_ptr += (src_pixels_per_line + src_pixels_per_line); + if (vector1b) { + /* 6 tap filter */ - for (i = output_height; i--;) - { - __asm__ __volatile__ ( - "ulw %[tp3], -1(%[src_ptr]) \n\t" - : [tp3] "=&r" (tp3) - : [src_ptr] "r" (src_ptr) - ); + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr); - /* processing 4 adjacent pixels */ - for (j = 0; j < 16; j += 4) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp2], 3(%[src_ptr]) \n\t" - "move %[tp1], %[tp3] \n\t" + for (i = output_height; i--;) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -16(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 24(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "mthi $0, $ac3 \n\t" - "move %[tp3], %[tp2] \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "mthi $0, $ac2 \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "extr.w %[Temp1], $ac3, 7 \n\t" + "lbu %[src_ptr_l2], -15(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 25(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" - /* odd 1. pixel */ - "ulw %[tn1], 4(%[src_ptr]) \n\t" - "balign %[tp2], %[tp1], 3 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "mthi $0, $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn1] \n\t" - "extr.w %[Temp3], $ac2, 7 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "mthi $0, $ac2 \n\t" - "extr.w %[Temp2], $ac3, 7 \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "extr.w %[Temp4], $ac2, 7 \n\t" + "lbu %[src_ptr_l2], -14(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 18(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 26(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp2], $ac3, 9 \n\t" - /* clamp and store results */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" - "lbux %[tp2], %[Temp3](%[cm]) \n\t" - "sb %[tp1], 0(%[dst_ptr]) \n\t" - "sb %[tn1], 1(%[dst_ptr]) \n\t" - "lbux %[n2], %[Temp4](%[cm]) \n\t" - "sb %[tp2], 2(%[dst_ptr]) \n\t" - "sb %[n2], 3(%[dst_ptr]) \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3), - [tn1] "=&r" (tn1), [p1] "=&r" (p1), [p2] "=&r" (p2), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [p3] "=&r" (p3), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr), - [src_ptr] "r" (src_ptr) - ); + "lbu %[src_ptr_l2], -13(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 19(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 27(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp3], $ac0, 9 \n\t" - src_ptr += 4; - } + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - /* Next row... */ - src_ptr += src_pixels_per_line - 16; - dst_ptr += pitch; - } + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2), + [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); + + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -12(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 12(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 20(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 28(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + "lbu %[src_ptr_l2], -11(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 13(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 21(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 29(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp5], $ac2, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -10(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 14(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 22(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 30(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp6], $ac3, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -9(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 15(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 23(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 31(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp7], $ac0, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp8], $ac1, 9 \n\t" + + : [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6), + [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8), + [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2), + [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + output_ptr[4] = cm[Temp5]; + output_ptr[5] = cm[Temp6]; + output_ptr[6] = cm[Temp7]; + output_ptr[7] = cm[Temp8]; + + src_ptr += 8; + output_ptr += output_pitch; } - else - { - for (i = output_height; i--;) - { - /* processing 4 adjacent pixels */ - for (j = 0; j < 16; j += 4) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "ulw %[tp1], -1(%[src_ptr]) \n\t" - "ulw %[tp2], 3(%[src_ptr]) \n\t" + } else { + /* 4 tap filter */ - /* even 1. pixel */ - "mtlo %[vector4a], $ac3 \n\t" - "mthi $0, $ac3 \n\t" - "preceu.ph.qbr %[p1], %[tp1] \n\t" - "preceu.ph.qbl %[p2], %[tp1] \n\t" - "preceu.ph.qbr %[p3], %[tp2] \n\t" - "dpa.w.ph $ac3, %[p1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[p2], %[vector2b] \n\t" + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr); - /* even 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "mthi $0, $ac2 \n\t" - "dpa.w.ph $ac2, %[p2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[p3], %[vector2b] \n\t" - "extr.w %[Temp1], $ac3, 7 \n\t" + for (i = output_height; i--;) { + __asm__ __volatile__( + "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - /* odd 1. pixel */ - "ulw %[tn1], 4(%[src_ptr]) \n\t" - "balign %[tp2], %[tp1], 3 \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "mthi $0, $ac3 \n\t" - "preceu.ph.qbr %[n1], %[tp2] \n\t" - "preceu.ph.qbl %[n2], %[tp2] \n\t" - "preceu.ph.qbr %[n3], %[tn1] \n\t" - "extr.w %[Temp3], $ac2, 7 \n\t" - "dpa.w.ph $ac3, %[n1], %[vector1b] \n\t" - "dpa.w.ph $ac3, %[n2], %[vector2b] \n\t" + : [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); - /* odd 2. pixel */ - "mtlo %[vector4a], $ac2 \n\t" - "mthi $0, $ac2 \n\t" - "extr.w %[Temp2], $ac3, 7 \n\t" - "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" - "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" - "extr.w %[Temp4], $ac2, 7 \n\t" + __asm__ __volatile__( + "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp1], $ac2, 9 \n\t" - /* clamp and store results */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" - "lbux %[tp2], %[Temp3](%[cm]) \n\t" - "sb %[tp1], 0(%[output_ptr]) \n\t" - "sb %[tn1], 1(%[output_ptr]) \n\t" - "lbux %[n2], %[Temp4](%[cm]) \n\t" - "sb %[tp2], 2(%[output_ptr]) \n\t" - "sb %[n2], 3(%[output_ptr]) \n\t" + : [Temp1] "=r"(Temp1), [src_ptr_l1] "=&r"(src_ptr_l1), + [src_ptr_0] "=&r"(src_ptr_0), [src_ptr_r1] "=&r"(src_ptr_r1), + [src_ptr_r2] "=&r"(src_ptr_r2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector4a] "r" (vector4a), [cm] "r" (cm), - [output_ptr] "r" (output_ptr), [src_ptr] "r" (src_ptr) - ); + src_ptr_l1 = src_ptr[-6]; + src_ptr_0 = src_ptr[2]; + src_ptr_r1 = src_ptr[10]; + src_ptr_r2 = src_ptr[18]; - src_ptr += 4; - } + __asm__ __volatile__( + "mtlo %[vector4a], $ac0 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp2], $ac3, 9 \n\t" - /* next row... */ - src_ptr += src_pixels_per_line; - output_ptr += output_width; - } + : [Temp2] "=r"(Temp2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + src_ptr_l1 = src_ptr[-5]; + src_ptr_0 = src_ptr[3]; + src_ptr_r1 = src_ptr[11]; + src_ptr_r2 = src_ptr[19]; + + __asm__ __volatile__( + "mtlo %[vector4a], $ac1 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp3], $ac0, 9 \n\t" + + : [Temp3] "=r"(Temp3) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + src_ptr_l1 = src_ptr[-4]; + src_ptr_0 = src_ptr[4]; + src_ptr_r1 = src_ptr[12]; + src_ptr_r2 = src_ptr[20]; + + __asm__ __volatile__( + "mtlo %[vector4a], $ac2 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp4], $ac1, 9 \n\t" + + : [Temp4] "=r"(Temp4) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + src_ptr_l1 = src_ptr[-3]; + src_ptr_0 = src_ptr[5]; + src_ptr_r1 = src_ptr[13]; + src_ptr_r2 = src_ptr[21]; + + __asm__ __volatile__( + "mtlo %[vector4a], $ac3 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp5], $ac2, 9 \n\t" + + : [Temp5] "=&r"(Temp5) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + src_ptr_l1 = src_ptr[-2]; + src_ptr_0 = src_ptr[6]; + src_ptr_r1 = src_ptr[14]; + src_ptr_r2 = src_ptr[22]; + + __asm__ __volatile__( + "mtlo %[vector4a], $ac0 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp6], $ac3, 9 \n\t" + + : [Temp6] "=r"(Temp6) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + src_ptr_l1 = src_ptr[-1]; + src_ptr_0 = src_ptr[7]; + src_ptr_r1 = src_ptr[15]; + src_ptr_r2 = src_ptr[23]; + + __asm__ __volatile__( + "mtlo %[vector4a], $ac1 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp7], $ac0, 9 \n\t" + "extp %[Temp8], $ac1, 9 \n\t" + + : [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [src_ptr_l1] "r"(src_ptr_l1), [src_ptr_0] "r"(src_ptr_0), + [src_ptr_r1] "r"(src_ptr_r1), [src_ptr_r2] "r"(src_ptr_r2), + [vector4a] "r"(vector4a)); + + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + output_ptr[4] = cm[Temp5]; + output_ptr[5] = cm[Temp6]; + output_ptr[6] = cm[Temp7]; + output_ptr[7] = cm[Temp8]; + + src_ptr += 8; + output_ptr += output_pitch; } + } } +void vp8_filter_block2d_second_pass161(unsigned char *RESTRICT src_ptr, + unsigned char *RESTRICT output_ptr, + int output_pitch, + const unsigned short *vp8_filter) { + unsigned int i, j; -void vp8_filter_block2d_second_pass4 -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT output_ptr, - int output_pitch, - int yoffset -) -{ - unsigned int i; + int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8; + unsigned int vector4a; + unsigned int vector1b, vector2b, vector3b; - int Temp1, Temp2, Temp3, Temp4; - unsigned int vector1b, vector2b, vector3b, vector4a; + unsigned char src_ptr_l2; + unsigned char src_ptr_l1; + unsigned char src_ptr_0; + unsigned char src_ptr_r1; + unsigned char src_ptr_r2; + unsigned char src_ptr_r3; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; - unsigned char src_ptr_l2; - unsigned char src_ptr_l1; - unsigned char src_ptr_0; - unsigned char src_ptr_r1; - unsigned char src_ptr_r2; - unsigned char src_ptr_r3; + vector4a = 64; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; + vector1b = vp8_filter[0]; + vector2b = vp8_filter[2]; + vector3b = vp8_filter[1]; - vector4a = 64; + if (vector1b == 0) { + /* 4 tap filter */ - /* load filter coefficients */ - vector1b = sub_pel_filterss[yoffset][0]; - vector2b = sub_pel_filterss[yoffset][2]; - vector3b = sub_pel_filterss[yoffset][1]; + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + 16); - if (vector1b) - { - /* 6 tap filter */ + for (i = 16; i--;) { + /* unrolling for loop */ + for (j = 0; j < 16; j += 8) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l1], -16(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 16(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 32(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac2 " + "\n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] " + "\n\t" - for (i = 2; i--;) - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr); + "lbu %[src_ptr_l1], -15(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 17(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 33(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "extp %[Temp1], $ac2, 9 " + "\n\t" - /* do not allow compiler to reorder instructions */ - __asm__ __volatile__ ( - ".set noreorder \n\t" - : - : - ); + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] " + "\n\t" - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -8(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 12(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" + "lbu %[src_ptr_l1], -14(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 2(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 18(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 34(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac1 " + "\n\t" + "extp %[Temp2], $ac3, 9 " + "\n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] " + "\n\t" - "lbu %[src_ptr_l2], -7(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 13(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" + "lbu %[src_ptr_l1], -13(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 3(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 19(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 35(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "extp %[Temp3], $ac1, 9 " + "\n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] " + "\n\t" - "lbu %[src_ptr_l2], -6(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 14(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" + "lbu %[src_ptr_l1], -12(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 4(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 20(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 36(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac2 " + "\n\t" + "extp %[Temp4], $ac3, 9 " + "\n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] " + "\n\t" - "lbu %[src_ptr_l2], -5(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 15(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp3], $ac0, 9 \n\t" + "lbu %[src_ptr_l1], -11(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 5(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 21(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 37(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "extp %[Temp5], $ac2, 9 " + "\n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] " + "\n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); + "lbu %[src_ptr_l1], -10(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 6(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 22(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 38(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac1 " + "\n\t" + "extp %[Temp6], $ac3, 9 " + "\n\t" - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] " + "\n\t" - output_ptr += output_pitch; + "lbu %[src_ptr_l1], -9(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_0], 7(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r1], 23(%[src_ptr]) " + "\n\t" + "lbu %[src_ptr_r2], 39(%[src_ptr]) " + "\n\t" + "mtlo %[vector4a], $ac3 " + "\n\t" + "extp %[Temp7], $ac1, 9 " + "\n\t" - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -4(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 16(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 " + "\n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 " + "\n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] " + "\n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] " + "\n\t" + "extp %[Temp8], $ac3, 9 " + "\n\t" - "lbu %[src_ptr_l2], -3(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 17(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6), + [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8), + [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2) + : [vector2b] "r"(vector2b), [vector3b] "r"(vector3b), + [vector4a] "r"(vector4a), [src_ptr] "r"(src_ptr)); - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + /* clamp and store results */ + output_ptr[j] = cm[Temp1]; + output_ptr[j + 1] = cm[Temp2]; + output_ptr[j + 2] = cm[Temp3]; + output_ptr[j + 3] = cm[Temp4]; + output_ptr[j + 4] = cm[Temp5]; + output_ptr[j + 5] = cm[Temp6]; + output_ptr[j + 6] = cm[Temp7]; + output_ptr[j + 7] = cm[Temp8]; - "lbu %[src_ptr_l2], -2(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 18(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" + src_ptr += 8; + } - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -1(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 19(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp3], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; - - src_ptr += 8; - output_ptr += output_pitch; - } + output_ptr += output_pitch; } - else - { - /* 4 tap filter */ + } else { + /* 4 tap filter */ - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr); + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + 16); - for (i = 2; i--;) - { - /* do not allow compiler to reorder instructions */ - __asm__ __volatile__ ( - ".set noreorder \n\t" - : - : - ); + /* unroll for loop */ + for (i = 16; i--;) { + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -32(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -16(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 16(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 32(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 48(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 8(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 9(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" + "lbu %[src_ptr_l2], -31(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -15(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 17(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 33(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 49(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 10(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" + "lbu %[src_ptr_l2], -30(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -14(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 18(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 34(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 50(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp2], $ac0, 9 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 11(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp3], $ac0, 9 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" + "lbu %[src_ptr_l2], -29(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -13(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 19(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 35(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 51(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp3], $ac1, 9 \n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; + "lbu %[src_ptr_l2], -28(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -12(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 20(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 36(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 52(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "extp %[Temp4], $ac3, 9 \n\t" - output_ptr += output_pitch; + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l1], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 12(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + "lbu %[src_ptr_l2], -27(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -11(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 21(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 37(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 53(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp5], $ac2, 9 \n\t" - "lbu %[src_ptr_l1], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 13(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "lbu %[src_ptr_l2], -26(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -10(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 22(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 38(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 54(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp6], $ac0, 9 \n\t" - "lbu %[src_ptr_l1], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 14(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + "lbu %[src_ptr_l2], -25(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -9(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 23(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 39(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 55(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp7], $ac1, 9 \n\t" - "lbu %[src_ptr_l1], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 15(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp3], $ac0, 9 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp8], $ac3, 9 \n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=r" (Temp4), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6), + [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8), + [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2), + [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; + /* clamp and store results */ + output_ptr[0] = cm[Temp1]; + output_ptr[1] = cm[Temp2]; + output_ptr[2] = cm[Temp3]; + output_ptr[3] = cm[Temp4]; + output_ptr[4] = cm[Temp5]; + output_ptr[5] = cm[Temp6]; + output_ptr[6] = cm[Temp7]; + output_ptr[7] = cm[Temp8]; - src_ptr += 8; - output_ptr += output_pitch; - } + /* apply filter with vectors pairs */ + __asm__ __volatile__( + "lbu %[src_ptr_l2], -24(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 8(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 24(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 40(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 56(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -23(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 9(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 25(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 41(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 57(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp1], $ac2, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -22(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 10(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 26(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 42(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 58(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp2], $ac0, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -21(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 11(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 27(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 43(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 59(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp3], $ac1, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -20(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 12(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 28(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 44(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 60(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac2 \n\t" + "extp %[Temp4], $ac3, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -19(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 13(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 29(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 45(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 61(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac0 \n\t" + "extp %[Temp5], $ac2, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -18(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 14(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 30(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 46(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 62(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac1 \n\t" + "extp %[Temp6], $ac0, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" + + "lbu %[src_ptr_l2], -17(%[src_ptr]) \n\t" + "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" + "lbu %[src_ptr_0], 15(%[src_ptr]) \n\t" + "lbu %[src_ptr_r1], 31(%[src_ptr]) \n\t" + "lbu %[src_ptr_r2], 47(%[src_ptr]) \n\t" + "lbu %[src_ptr_r3], 63(%[src_ptr]) \n\t" + "mtlo %[vector4a], $ac3 \n\t" + "extp %[Temp7], $ac1, 9 \n\t" + + "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" + "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" + "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" + "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" + "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" + "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" + "extp %[Temp8], $ac3, 9 \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4), [Temp5] "=&r"(Temp5), [Temp6] "=&r"(Temp6), + [Temp7] "=&r"(Temp7), [Temp8] "=r"(Temp8), + [src_ptr_l1] "=&r"(src_ptr_l1), [src_ptr_0] "=&r"(src_ptr_0), + [src_ptr_r1] "=&r"(src_ptr_r1), [src_ptr_r2] "=&r"(src_ptr_r2), + [src_ptr_l2] "=&r"(src_ptr_l2), [src_ptr_r3] "=&r"(src_ptr_r3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4a] "r"(vector4a), + [src_ptr] "r"(src_ptr)); + + src_ptr += 16; + output_ptr[8] = cm[Temp1]; + output_ptr[9] = cm[Temp2]; + output_ptr[10] = cm[Temp3]; + output_ptr[11] = cm[Temp4]; + output_ptr[12] = cm[Temp5]; + output_ptr[13] = cm[Temp6]; + output_ptr[14] = cm[Temp7]; + output_ptr[15] = cm[Temp8]; + + output_ptr += output_pitch; } + } } - -void vp8_filter_block2d_second_pass_8 -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT output_ptr, - int output_pitch, - unsigned int output_height, - unsigned int output_width, - unsigned int yoffset -) -{ - unsigned int i; - - int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8; - unsigned int vector1b, vector2b, vector3b, vector4a; - - unsigned char src_ptr_l2; - unsigned char src_ptr_l1; - unsigned char src_ptr_0; - unsigned char src_ptr_r1; - unsigned char src_ptr_r2; - unsigned char src_ptr_r3; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; - - vector4a = 64; - - vector1b = sub_pel_filterss[yoffset][0]; - vector2b = sub_pel_filterss[yoffset][2]; - vector3b = sub_pel_filterss[yoffset][1]; - - if (vector1b) - { - /* 6 tap filter */ - - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr); - - for (i = output_height; i--;) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -16(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 24(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -15(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 25(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -14(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 10(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 18(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 26(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -13(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 11(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 19(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 27(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp3], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); - - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -12(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 12(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 20(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 28(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" - - "lbu %[src_ptr_l2], -11(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 13(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 21(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 29(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp5], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -10(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 14(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 22(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 30(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp6], $ac3, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -9(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 15(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 23(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 31(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp7], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp8], $ac1, 9 \n\t" - - : [Temp4] "=&r" (Temp4), [Temp5] "=&r" (Temp5), - [Temp6] "=&r" (Temp6), [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2),[src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; - output_ptr[4] = cm[Temp5]; - output_ptr[5] = cm[Temp6]; - output_ptr[6] = cm[Temp7]; - output_ptr[7] = cm[Temp8]; - - src_ptr += 8; - output_ptr += output_pitch; - } - } - else - { - /* 4 tap filter */ - - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr); - - for (i = output_height; i--;) - { - __asm__ __volatile__ ( - "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 16(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - : [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); - - __asm__ __volatile__ ( - "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 17(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp1], $ac2, 9 \n\t" - - : [Temp1] "=r" (Temp1), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); - - src_ptr_l1 = src_ptr[-6]; - src_ptr_0 = src_ptr[2]; - src_ptr_r1 = src_ptr[10]; - src_ptr_r2 = src_ptr[18]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac0 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - - : [Temp2] "=r" (Temp2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - src_ptr_l1 = src_ptr[-5]; - src_ptr_0 = src_ptr[3]; - src_ptr_r1 = src_ptr[11]; - src_ptr_r2 = src_ptr[19]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac1 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp3], $ac0, 9 \n\t" - - : [Temp3] "=r" (Temp3) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - src_ptr_l1 = src_ptr[-4]; - src_ptr_0 = src_ptr[4]; - src_ptr_r1 = src_ptr[12]; - src_ptr_r2 = src_ptr[20]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp4], $ac1, 9 \n\t" - - : [Temp4] "=r" (Temp4) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - src_ptr_l1 = src_ptr[-3]; - src_ptr_0 = src_ptr[5]; - src_ptr_r1 = src_ptr[13]; - src_ptr_r2 = src_ptr[21]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac3 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp5], $ac2, 9 \n\t" - - : [Temp5] "=&r" (Temp5) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - src_ptr_l1 = src_ptr[-2]; - src_ptr_0 = src_ptr[6]; - src_ptr_r1 = src_ptr[14]; - src_ptr_r2 = src_ptr[22]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac0 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp6], $ac3, 9 \n\t" - - : [Temp6] "=r" (Temp6) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - src_ptr_l1 = src_ptr[-1]; - src_ptr_0 = src_ptr[7]; - src_ptr_r1 = src_ptr[15]; - src_ptr_r2 = src_ptr[23]; - - __asm__ __volatile__ ( - "mtlo %[vector4a], $ac1 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp7], $ac0, 9 \n\t" - "extp %[Temp8], $ac1, 9 \n\t" - - : [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [src_ptr_l1] "r" (src_ptr_l1), [src_ptr_0] "r" (src_ptr_0), - [src_ptr_r1] "r" (src_ptr_r1), [src_ptr_r2] "r" (src_ptr_r2), - [vector4a] "r" (vector4a) - ); - - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; - output_ptr[4] = cm[Temp5]; - output_ptr[5] = cm[Temp6]; - output_ptr[6] = cm[Temp7]; - output_ptr[7] = cm[Temp8]; - - src_ptr += 8; - output_ptr += output_pitch; - } - } -} - - -void vp8_filter_block2d_second_pass161 -( - unsigned char *RESTRICT src_ptr, - unsigned char *RESTRICT output_ptr, - int output_pitch, - const unsigned short *vp8_filter -) -{ - unsigned int i, j; - - int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6, Temp7, Temp8; - unsigned int vector4a; - unsigned int vector1b, vector2b, vector3b; - - unsigned char src_ptr_l2; - unsigned char src_ptr_l1; - unsigned char src_ptr_0; - unsigned char src_ptr_r1; - unsigned char src_ptr_r2; - unsigned char src_ptr_r3; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; - - vector4a = 64; - - vector1b = vp8_filter[0]; - vector2b = vp8_filter[2]; - vector3b = vp8_filter[1]; - - if (vector1b == 0) - { - /* 4 tap filter */ - - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + 16); - - for (i = 16; i--;) - { - /* unrolling for loop */ - for (j = 0; j < 16; j += 8) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l1], -16(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 16(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 32(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -15(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 17(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 33(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -14(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 18(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 34(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp2], $ac3, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -13(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 19(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 35(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp3], $ac1, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -12(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 20(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 36(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "extp %[Temp4], $ac3, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -11(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 21(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 37(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp5], $ac2, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -10(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 22(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 38(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp6], $ac3, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l1], -9(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 23(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 39(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp7], $ac1, 9 \n\t" - - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp8], $ac3, 9 \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4), - [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6), - [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2) - : [vector2b] "r" (vector2b), [vector3b] "r" (vector3b), - [vector4a] "r" (vector4a), [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - output_ptr[j] = cm[Temp1]; - output_ptr[j + 1] = cm[Temp2]; - output_ptr[j + 2] = cm[Temp3]; - output_ptr[j + 3] = cm[Temp4]; - output_ptr[j + 4] = cm[Temp5]; - output_ptr[j + 5] = cm[Temp6]; - output_ptr[j + 6] = cm[Temp7]; - output_ptr[j + 7] = cm[Temp8]; - - src_ptr += 8; - } - - output_ptr += output_pitch; - } - } - else - { - /* 4 tap filter */ - - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + 16); - - /* unroll for loop */ - for (i = 16; i--;) - { - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -32(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -16(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 0(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 16(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 32(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 48(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -31(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -15(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 1(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 17(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 33(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 49(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -30(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -14(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 2(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 18(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 34(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 50(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp2], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -29(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -13(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 3(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 19(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 35(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 51(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp3], $ac1, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -28(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -12(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 4(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 20(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 36(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 52(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "extp %[Temp4], $ac3, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -27(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -11(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 5(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 21(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 37(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 53(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp5], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -26(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -10(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 6(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 22(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 38(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 54(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp6], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -25(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -9(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 7(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 23(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 39(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 55(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp7], $ac1, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp8], $ac3, 9 \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4), - [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6), - [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2),[src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); - - /* clamp and store results */ - output_ptr[0] = cm[Temp1]; - output_ptr[1] = cm[Temp2]; - output_ptr[2] = cm[Temp3]; - output_ptr[3] = cm[Temp4]; - output_ptr[4] = cm[Temp5]; - output_ptr[5] = cm[Temp6]; - output_ptr[6] = cm[Temp7]; - output_ptr[7] = cm[Temp8]; - - /* apply filter with vectors pairs */ - __asm__ __volatile__ ( - "lbu %[src_ptr_l2], -24(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -8(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 8(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 24(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 40(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 56(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -23(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -7(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 9(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 25(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 41(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 57(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp1], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -22(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -6(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 10(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 26(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 42(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 58(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp2], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -21(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -5(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 11(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 27(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 43(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 59(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp3], $ac1, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -20(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -4(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 12(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 28(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 44(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 60(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac2 \n\t" - "extp %[Temp4], $ac3, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac2, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac2, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac2, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -19(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -3(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 13(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 29(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 45(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 61(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac0 \n\t" - "extp %[Temp5], $ac2, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac0, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac0, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac0, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -18(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -2(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 14(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 30(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 46(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 62(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac1 \n\t" - "extp %[Temp6], $ac0, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac1, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac1, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac1, %[src_ptr_l1], %[vector3b] \n\t" - - "lbu %[src_ptr_l2], -17(%[src_ptr]) \n\t" - "lbu %[src_ptr_l1], -1(%[src_ptr]) \n\t" - "lbu %[src_ptr_0], 15(%[src_ptr]) \n\t" - "lbu %[src_ptr_r1], 31(%[src_ptr]) \n\t" - "lbu %[src_ptr_r2], 47(%[src_ptr]) \n\t" - "lbu %[src_ptr_r3], 63(%[src_ptr]) \n\t" - "mtlo %[vector4a], $ac3 \n\t" - "extp %[Temp7], $ac1, 9 \n\t" - - "append %[src_ptr_l2], %[src_ptr_r3], 8 \n\t" - "append %[src_ptr_0], %[src_ptr_r1], 8 \n\t" - "append %[src_ptr_l1], %[src_ptr_r2], 8 \n\t" - "dpau.h.qbr $ac3, %[src_ptr_l2], %[vector1b] \n\t" - "dpau.h.qbr $ac3, %[src_ptr_0], %[vector2b] \n\t" - "dpsu.h.qbr $ac3, %[src_ptr_l1], %[vector3b] \n\t" - "extp %[Temp8], $ac3, 9 \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4), - [Temp5] "=&r" (Temp5), [Temp6] "=&r" (Temp6), - [Temp7] "=&r" (Temp7), [Temp8] "=r" (Temp8), - [src_ptr_l1] "=&r" (src_ptr_l1), [src_ptr_0] "=&r" (src_ptr_0), - [src_ptr_r1] "=&r" (src_ptr_r1), [src_ptr_r2] "=&r" (src_ptr_r2), - [src_ptr_l2] "=&r" (src_ptr_l2), [src_ptr_r3] "=&r" (src_ptr_r3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4a] "r" (vector4a), - [src_ptr] "r" (src_ptr) - ); - - src_ptr += 16; - output_ptr[8] = cm[Temp1]; - output_ptr[9] = cm[Temp2]; - output_ptr[10] = cm[Temp3]; - output_ptr[11] = cm[Temp4]; - output_ptr[12] = cm[Temp5]; - output_ptr[13] = cm[Temp6]; - output_ptr[14] = cm[Temp7]; - output_ptr[15] = cm[Temp8]; - - output_ptr += output_pitch; - } - } -} - - -void vp8_sixtap_predict4x4_dspr2 -( - unsigned char *RESTRICT src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *RESTRICT dst_ptr, - int dst_pitch -) -{ - unsigned char FData[9 * 4]; /* Temp data bufffer used in filtering */ - unsigned int pos = 16; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - if (yoffset) - { - /* First filter 1-D horizontally... */ - vp8_filter_block2d_first_pass_4(src_ptr - (2 * src_pixels_per_line), FData, - src_pixels_per_line, 9, xoffset, 4); - /* then filter verticaly... */ - vp8_filter_block2d_second_pass4(FData + 8, dst_ptr, dst_pitch, yoffset); - } - else - /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ - vp8_filter_block2d_first_pass_4(src_ptr, dst_ptr, src_pixels_per_line, - 4, xoffset, dst_pitch); -} - - -void vp8_sixtap_predict8x8_dspr2 -( - unsigned char *RESTRICT src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *RESTRICT dst_ptr, - int dst_pitch -) -{ - - unsigned char FData[13 * 8]; /* Temp data bufffer used in filtering */ - unsigned int pos, Temp1, Temp2; - - pos = 16; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - if (yoffset) - { - - src_ptr = src_ptr - (2 * src_pixels_per_line); - - if (xoffset) - /* filter 1-D horizontally... */ - vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line, - 13, xoffset, 8); - - else - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + 2 * src_pixels_per_line); - - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 0(%[FData]) \n\t" - "sw %[Temp2], 4(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 8(%[FData]) \n\t" - "sw %[Temp2], 12(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 16(%[FData]) \n\t" - "sw %[Temp2], 20(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 24(%[FData]) \n\t" - "sw %[Temp2], 28(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 32(%[FData]) \n\t" - "sw %[Temp2], 36(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 40(%[FData]) \n\t" - "sw %[Temp2], 44(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 48(%[FData]) \n\t" - "sw %[Temp2], 52(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 56(%[FData]) \n\t" - "sw %[Temp2], 60(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 64(%[FData]) \n\t" - "sw %[Temp2], 68(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 72(%[FData]) \n\t" - "sw %[Temp2], 76(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 80(%[FData]) \n\t" - "sw %[Temp2], 84(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 88(%[FData]) \n\t" - "sw %[Temp2], 92(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 96(%[FData]) \n\t" - "sw %[Temp2], 100(%[FData]) \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2) - : [FData] "r" (FData), [src_ptr] "r" (src_ptr), - [src_pixels_per_line] "r" (src_pixels_per_line) - ); - } - - /* filter verticaly... */ - vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 8, 8, yoffset); - } - +void vp8_sixtap_predict4x4_dspr2(unsigned char *RESTRICT src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *RESTRICT dst_ptr, + int dst_pitch) { + unsigned char FData[9 * 4]; /* Temp data bufffer used in filtering */ + unsigned int pos = 16; + + /* bit positon for extract from acc */ + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); + + if (yoffset) { + /* First filter 1-D horizontally... */ + vp8_filter_block2d_first_pass_4(src_ptr - (2 * src_pixels_per_line), FData, + src_pixels_per_line, 9, xoffset, 4); + /* then filter verticaly... */ + vp8_filter_block2d_second_pass4(FData + 8, dst_ptr, dst_pitch, yoffset); + } else /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ - else - { - if (xoffset) - vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line, - 8, xoffset, dst_pitch); - - else - { - /* copy from src buffer to dst buffer */ - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 0(%[dst_ptr]) \n\t" - "sw %[Temp2], 4(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 8(%[dst_ptr]) \n\t" - "sw %[Temp2], 12(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 16(%[dst_ptr]) \n\t" - "sw %[Temp2], 20(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 24(%[dst_ptr]) \n\t" - "sw %[Temp2], 28(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 32(%[dst_ptr]) \n\t" - "sw %[Temp2], 36(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 40(%[dst_ptr]) \n\t" - "sw %[Temp2], 44(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 48(%[dst_ptr]) \n\t" - "sw %[Temp2], 52(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 56(%[dst_ptr]) \n\t" - "sw %[Temp2], 60(%[dst_ptr]) \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2) - : [dst_ptr] "r" (dst_ptr), [src_ptr] "r" (src_ptr), - [src_pixels_per_line] "r" (src_pixels_per_line) - ); - } - } + vp8_filter_block2d_first_pass_4(src_ptr, dst_ptr, src_pixels_per_line, 4, + xoffset, dst_pitch); } +void vp8_sixtap_predict8x8_dspr2(unsigned char *RESTRICT src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *RESTRICT dst_ptr, + int dst_pitch) { + unsigned char FData[13 * 8]; /* Temp data bufffer used in filtering */ + unsigned int pos, Temp1, Temp2; -void vp8_sixtap_predict8x4_dspr2 -( - unsigned char *RESTRICT src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *RESTRICT dst_ptr, - int dst_pitch -) -{ - unsigned char FData[9 * 8]; /* Temp data bufffer used in filtering */ - unsigned int pos, Temp1, Temp2; + pos = 16; - pos = 16; + /* bit positon for extract from acc */ + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + if (yoffset) { + src_ptr = src_ptr - (2 * src_pixels_per_line); - if (yoffset) - { + if (xoffset) /* filter 1-D horizontally... */ + vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line, + 13, xoffset, 8); - src_ptr = src_ptr - (2 * src_pixels_per_line); + else { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + 2 * src_pixels_per_line); - if (xoffset) - /* filter 1-D horizontally... */ - vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line, - 9, xoffset, 8); + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 0(%[FData]) \n\t" + "sw %[Temp2], 4(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - else - { - /* prefetch src_ptr data to cache memory */ - prefetch_load(src_ptr + 2 * src_pixels_per_line); + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 8(%[FData]) \n\t" + "sw %[Temp2], 12(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 0(%[FData]) \n\t" - "sw %[Temp2], 4(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 16(%[FData]) \n\t" + "sw %[Temp2], 20(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 8(%[FData]) \n\t" - "sw %[Temp2], 12(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 24(%[FData]) \n\t" + "sw %[Temp2], 28(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 16(%[FData]) \n\t" - "sw %[Temp2], 20(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 32(%[FData]) \n\t" + "sw %[Temp2], 36(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 24(%[FData]) \n\t" - "sw %[Temp2], 28(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 40(%[FData]) \n\t" + "sw %[Temp2], 44(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 32(%[FData]) \n\t" - "sw %[Temp2], 36(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 48(%[FData]) \n\t" + "sw %[Temp2], 52(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 40(%[FData]) \n\t" - "sw %[Temp2], 44(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 56(%[FData]) \n\t" + "sw %[Temp2], 60(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 48(%[FData]) \n\t" - "sw %[Temp2], 52(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 64(%[FData]) \n\t" + "sw %[Temp2], 68(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 56(%[FData]) \n\t" - "sw %[Temp2], 60(%[FData]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 72(%[FData]) \n\t" + "sw %[Temp2], 76(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 64(%[FData]) \n\t" - "sw %[Temp2], 68(%[FData]) \n\t" + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 80(%[FData]) \n\t" + "sw %[Temp2], 84(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2) - : [FData] "r" (FData), [src_ptr] "r" (src_ptr), - [src_pixels_per_line] "r" (src_pixels_per_line) - ); - } + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 88(%[FData]) \n\t" + "sw %[Temp2], 92(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - /* filter verticaly... */ - vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 4, 8, yoffset); + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 96(%[FData]) \n\t" + "sw %[Temp2], 100(%[FData]) \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2) + : [FData] "r"(FData), [src_ptr] "r"(src_ptr), + [src_pixels_per_line] "r"(src_pixels_per_line)); } + /* filter verticaly... */ + vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 8, 8, + yoffset); + } + + /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ + else { + if (xoffset) + vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line, + 8, xoffset, dst_pitch); + + else { + /* copy from src buffer to dst buffer */ + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 0(%[dst_ptr]) \n\t" + "sw %[Temp2], 4(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 8(%[dst_ptr]) \n\t" + "sw %[Temp2], 12(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 16(%[dst_ptr]) \n\t" + "sw %[Temp2], 20(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 24(%[dst_ptr]) \n\t" + "sw %[Temp2], 28(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 32(%[dst_ptr]) \n\t" + "sw %[Temp2], 36(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 40(%[dst_ptr]) \n\t" + "sw %[Temp2], 44(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 48(%[dst_ptr]) \n\t" + "sw %[Temp2], 52(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 56(%[dst_ptr]) \n\t" + "sw %[Temp2], 60(%[dst_ptr]) \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2) + : [dst_ptr] "r"(dst_ptr), [src_ptr] "r"(src_ptr), + [src_pixels_per_line] "r"(src_pixels_per_line)); + } + } +} + +void vp8_sixtap_predict8x4_dspr2(unsigned char *RESTRICT src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *RESTRICT dst_ptr, + int dst_pitch) { + unsigned char FData[9 * 8]; /* Temp data bufffer used in filtering */ + unsigned int pos, Temp1, Temp2; + + pos = 16; + + /* bit positon for extract from acc */ + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); + + if (yoffset) { + src_ptr = src_ptr - (2 * src_pixels_per_line); + + if (xoffset) /* filter 1-D horizontally... */ + vp8_filter_block2d_first_pass_8_all(src_ptr, FData, src_pixels_per_line, + 9, xoffset, 8); + + else { + /* prefetch src_ptr data to cache memory */ + prefetch_load(src_ptr + 2 * src_pixels_per_line); + + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 0(%[FData]) \n\t" + "sw %[Temp2], 4(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 8(%[FData]) \n\t" + "sw %[Temp2], 12(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 16(%[FData]) \n\t" + "sw %[Temp2], 20(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 24(%[FData]) \n\t" + "sw %[Temp2], 28(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 32(%[FData]) \n\t" + "sw %[Temp2], 36(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 40(%[FData]) \n\t" + "sw %[Temp2], 44(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 48(%[FData]) \n\t" + "sw %[Temp2], 52(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 56(%[FData]) \n\t" + "sw %[Temp2], 60(%[FData]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 64(%[FData]) \n\t" + "sw %[Temp2], 68(%[FData]) \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2) + : [FData] "r"(FData), [src_ptr] "r"(src_ptr), + [src_pixels_per_line] "r"(src_pixels_per_line)); + } + + /* filter verticaly... */ + vp8_filter_block2d_second_pass_8(FData + 16, dst_ptr, dst_pitch, 4, 8, + yoffset); + } + + /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ + else { + if (xoffset) + vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line, + 4, xoffset, dst_pitch); + + else { + /* copy from src buffer to dst buffer */ + __asm__ __volatile__( + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 0(%[dst_ptr]) \n\t" + "sw %[Temp2], 4(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 8(%[dst_ptr]) \n\t" + "sw %[Temp2], 12(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 16(%[dst_ptr]) \n\t" + "sw %[Temp2], 20(%[dst_ptr]) \n\t" + "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" + + "ulw %[Temp1], 0(%[src_ptr]) \n\t" + "ulw %[Temp2], 4(%[src_ptr]) \n\t" + "sw %[Temp1], 24(%[dst_ptr]) \n\t" + "sw %[Temp2], 28(%[dst_ptr]) \n\t" + + : [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2) + : [dst_ptr] "r"(dst_ptr), [src_ptr] "r"(src_ptr), + [src_pixels_per_line] "r"(src_pixels_per_line)); + } + } +} + +void vp8_sixtap_predict16x16_dspr2(unsigned char *RESTRICT src_ptr, + int src_pixels_per_line, int xoffset, + int yoffset, unsigned char *RESTRICT dst_ptr, + int dst_pitch) { + const unsigned short *VFilter; + unsigned char FData[21 * 16]; /* Temp data bufffer used in filtering */ + unsigned int pos; + + VFilter = sub_pel_filterss[yoffset]; + + pos = 16; + + /* bit positon for extract from acc */ + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); + + if (yoffset) { + src_ptr = src_ptr - (2 * src_pixels_per_line); + + switch (xoffset) { + /* filter 1-D horizontally... */ + case 2: + case 4: + case 6: + /* 6 tap filter */ + vp8_filter_block2d_first_pass16_6tap( + src_ptr, FData, src_pixels_per_line, 21, xoffset, 16); + break; + + case 0: + /* only copy buffer */ + vp8_filter_block2d_first_pass16_0(src_ptr, FData, src_pixels_per_line); + break; + + case 1: + case 3: + case 5: + case 7: + /* 4 tap filter */ + vp8_filter_block2d_first_pass16_4tap( + src_ptr, FData, src_pixels_per_line, 16, 21, xoffset, yoffset, + dst_ptr, dst_pitch); + break; + } + + /* filter verticaly... */ + vp8_filter_block2d_second_pass161(FData + 32, dst_ptr, dst_pitch, VFilter); + } else { /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ - else - { - if (xoffset) - vp8_filter_block2d_first_pass_8_all(src_ptr, dst_ptr, src_pixels_per_line, - 4, xoffset, dst_pitch); + switch (xoffset) { + case 2: + case 4: + case 6: + /* 6 tap filter */ + vp8_filter_block2d_first_pass16_6tap( + src_ptr, dst_ptr, src_pixels_per_line, 16, xoffset, dst_pitch); + break; - else - { - /* copy from src buffer to dst buffer */ - __asm__ __volatile__ ( - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 0(%[dst_ptr]) \n\t" - "sw %[Temp2], 4(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 8(%[dst_ptr]) \n\t" - "sw %[Temp2], 12(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 16(%[dst_ptr]) \n\t" - "sw %[Temp2], 20(%[dst_ptr]) \n\t" - "addu %[src_ptr], %[src_ptr], %[src_pixels_per_line] \n\t" - - "ulw %[Temp1], 0(%[src_ptr]) \n\t" - "ulw %[Temp2], 4(%[src_ptr]) \n\t" - "sw %[Temp1], 24(%[dst_ptr]) \n\t" - "sw %[Temp2], 28(%[dst_ptr]) \n\t" - - : [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2) - : [dst_ptr] "r" (dst_ptr), [src_ptr] "r" (src_ptr), - [src_pixels_per_line] "r" (src_pixels_per_line) - ); - } - } -} - - -void vp8_sixtap_predict16x16_dspr2 -( - unsigned char *RESTRICT src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - unsigned char *RESTRICT dst_ptr, - int dst_pitch -) -{ - const unsigned short *VFilter; - unsigned char FData[21 * 16]; /* Temp data bufffer used in filtering */ - unsigned int pos; - - VFilter = sub_pel_filterss[yoffset]; - - pos = 16; - - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); - - if (yoffset) - { - - src_ptr = src_ptr - (2 * src_pixels_per_line); - - switch (xoffset) - { - /* filter 1-D horizontally... */ - case 2: - case 4: - case 6: - /* 6 tap filter */ - vp8_filter_block2d_first_pass16_6tap(src_ptr, FData, src_pixels_per_line, - 21, xoffset, 16); - break; - - case 0: - /* only copy buffer */ - vp8_filter_block2d_first_pass16_0(src_ptr, FData, src_pixels_per_line); - break; - - case 1: - case 3: - case 5: - case 7: - /* 4 tap filter */ - vp8_filter_block2d_first_pass16_4tap(src_ptr, FData, src_pixels_per_line, 16, - 21, xoffset, yoffset, dst_ptr, dst_pitch); - break; - } - - /* filter verticaly... */ - vp8_filter_block2d_second_pass161(FData + 32, dst_ptr, dst_pitch, VFilter); - } - else - { - /* if (yoffsset == 0) vp8_filter_block2d_first_pass save data to dst_ptr */ - switch (xoffset) - { - case 2: - case 4: - case 6: - /* 6 tap filter */ - vp8_filter_block2d_first_pass16_6tap(src_ptr, dst_ptr, src_pixels_per_line, - 16, xoffset, dst_pitch); - break; - - case 1: - case 3: - case 5: - case 7: - /* 4 tap filter */ - vp8_filter_block2d_first_pass16_4tap(src_ptr, dst_ptr, src_pixels_per_line, 16, - 21, xoffset, yoffset, dst_ptr, dst_pitch); - break; - } + case 1: + case 3: + case 5: + case 7: + /* 4 tap filter */ + vp8_filter_block2d_first_pass16_4tap( + src_ptr, dst_ptr, src_pixels_per_line, 16, 21, xoffset, yoffset, + dst_ptr, dst_pitch); + break; } + } } #endif diff --git a/libs/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c index ab938cd6af..899dc10ad9 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/idct_blk_dspr2.c @@ -13,76 +13,64 @@ #if HAVE_DSPR2 -void vp8_dequant_idct_add_y_block_dspr2 -(short *q, short *dq, - unsigned char *dst, int stride, char *eobs) -{ - int i, j; +void vp8_dequant_idct_add_y_block_dspr2(short *q, short *dq, unsigned char *dst, + int stride, char *eobs) { + int i, j; - for (i = 0; i < 4; i++) - { - for (j = 0; j < 4; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_dspr2(q, dq, dst, stride); - else - { - vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dst, stride, dst, stride); - ((int *)q)[0] = 0; - } + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) { + if (*eobs++ > 1) + vp8_dequant_idct_add_dspr2(q, dq, dst, stride); + else { + vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dst, stride, dst, stride); + ((int *)q)[0] = 0; + } - q += 16; - dst += 4; - } - - dst += 4 * stride - 16; + q += 16; + dst += 4; } + + dst += 4 * stride - 16; + } } -void vp8_dequant_idct_add_uv_block_dspr2 -(short *q, short *dq, - unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) -{ - int i, j; +void vp8_dequant_idct_add_uv_block_dspr2(short *q, short *dq, + unsigned char *dstu, + unsigned char *dstv, int stride, + char *eobs) { + int i, j; - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_dspr2(q, dq, dstu, stride); - else - { - vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstu, stride, dstu, stride); - ((int *)q)[0] = 0; - } + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + if (*eobs++ > 1) + vp8_dequant_idct_add_dspr2(q, dq, dstu, stride); + else { + vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dstu, stride, dstu, stride); + ((int *)q)[0] = 0; + } - q += 16; - dstu += 4; - } - - dstu += 4 * stride - 8; + q += 16; + dstu += 4; } - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - if (*eobs++ > 1) - vp8_dequant_idct_add_dspr2(q, dq, dstv, stride); - else - { - vp8_dc_only_idct_add_dspr2(q[0]*dq[0], dstv, stride, dstv, stride); - ((int *)q)[0] = 0; - } + dstu += 4 * stride - 8; + } - q += 16; - dstv += 4; - } + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + if (*eobs++ > 1) + vp8_dequant_idct_add_dspr2(q, dq, dstv, stride); + else { + vp8_dc_only_idct_add_dspr2(q[0] * dq[0], dstv, stride, dstv, stride); + ((int *)q)[0] = 0; + } - dstv += 4 * stride - 8; + q += 16; + dstv += 4; } + + dstv += 4 * stride - 8; + } } #endif - diff --git a/libs/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c index 2eff71069d..9163ffad1e 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/idctllm_dspr2.c @@ -28,342 +28,319 @@ ****************************************************************************/ extern unsigned char ff_cropTbl[256 + 2 * CROP_WIDTH]; static const int cospi8sqrt2minus1 = 20091; -static const int sinpi8sqrt2 = 35468; +static const int sinpi8sqrt2 = 35468; -inline void prefetch_load_short(short *src) -{ - __asm__ __volatile__ ( - "pref 0, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); +inline void prefetch_load_short(short *src) { + __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); } void vp8_short_idct4x4llm_dspr2(short *input, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, - int dst_stride) -{ - int r, c; - int a1, b1, c1, d1; - short output[16]; - short *ip = input; - short *op = output; - int temp1, temp2; - int shortpitch = 4; + int dst_stride) { + int r, c; + int a1, b1, c1, d1; + short output[16]; + short *ip = input; + short *op = output; + int temp1, temp2; + int shortpitch = 4; - int c2, d2; - int temp3, temp4; - unsigned char *cm = ff_cropTbl + CROP_WIDTH; + int c2, d2; + int temp3, temp4; + unsigned char *cm = ff_cropTbl + CROP_WIDTH; - /* prepare data for load */ - prefetch_load_short(ip + 8); + /* prepare data for load */ + prefetch_load_short(ip + 8); - /* first loop is unrolled */ - a1 = ip[0] + ip[8]; - b1 = ip[0] - ip[8]; + /* first loop is unrolled */ + a1 = ip[0] + ip[8]; + b1 = ip[0] - ip[8]; - temp1 = (ip[4] * sinpi8sqrt2) >> 16; - temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; + temp1 = (ip[4] * sinpi8sqrt2) >> 16; + temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; - temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[12] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; + temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[12] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; - temp3 = (ip[5] * sinpi8sqrt2) >> 16; - temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16); - c2 = temp3 - temp4; + temp3 = (ip[5] * sinpi8sqrt2) >> 16; + temp4 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16); + c2 = temp3 - temp4; - temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16); - temp4 = (ip[13] * sinpi8sqrt2) >> 16; - d2 = temp3 + temp4; + temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16); + temp4 = (ip[13] * sinpi8sqrt2) >> 16; + d2 = temp3 + temp4; - op[0] = a1 + d1; - op[12] = a1 - d1; - op[4] = b1 + c1; - op[8] = b1 - c1; + op[0] = a1 + d1; + op[12] = a1 - d1; + op[4] = b1 + c1; + op[8] = b1 - c1; - a1 = ip[1] + ip[9]; - b1 = ip[1] - ip[9]; + a1 = ip[1] + ip[9]; + b1 = ip[1] - ip[9]; - op[1] = a1 + d2; - op[13] = a1 - d2; - op[5] = b1 + c2; - op[9] = b1 - c2; + op[1] = a1 + d2; + op[13] = a1 - d2; + op[5] = b1 + c2; + op[9] = b1 - c2; - a1 = ip[2] + ip[10]; - b1 = ip[2] - ip[10]; + a1 = ip[2] + ip[10]; + b1 = ip[2] - ip[10]; - temp1 = (ip[6] * sinpi8sqrt2) >> 16; - temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; + temp1 = (ip[6] * sinpi8sqrt2) >> 16; + temp2 = ip[14] + ((ip[14] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; - temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[14] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; + temp1 = ip[6] + ((ip[6] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[14] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; - temp3 = (ip[7] * sinpi8sqrt2) >> 16; - temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16); - c2 = temp3 - temp4; + temp3 = (ip[7] * sinpi8sqrt2) >> 16; + temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16); + c2 = temp3 - temp4; - temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16); - temp4 = (ip[15] * sinpi8sqrt2) >> 16; - d2 = temp3 + temp4; + temp3 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16); + temp4 = (ip[15] * sinpi8sqrt2) >> 16; + d2 = temp3 + temp4; - op[2] = a1 + d1; - op[14] = a1 - d1; - op[6] = b1 + c1; - op[10] = b1 - c1; + op[2] = a1 + d1; + op[14] = a1 - d1; + op[6] = b1 + c1; + op[10] = b1 - c1; - a1 = ip[3] + ip[11]; - b1 = ip[3] - ip[11]; + a1 = ip[3] + ip[11]; + b1 = ip[3] - ip[11]; - op[3] = a1 + d2; - op[15] = a1 - d2; - op[7] = b1 + c2; - op[11] = b1 - c2; + op[3] = a1 + d2; + op[15] = a1 - d2; + op[7] = b1 + c2; + op[11] = b1 - c2; - ip = output; + ip = output; - /* prepare data for load */ - prefetch_load_short(ip + shortpitch); + /* prepare data for load */ + prefetch_load_short(ip + shortpitch); - /* second loop is unrolled */ - a1 = ip[0] + ip[2]; - b1 = ip[0] - ip[2]; + /* second loop is unrolled */ + a1 = ip[0] + ip[2]; + b1 = ip[0] - ip[2]; - temp1 = (ip[1] * sinpi8sqrt2) >> 16; - temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; + temp1 = (ip[1] * sinpi8sqrt2) >> 16; + temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; - temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[3] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; + temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[3] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; - temp3 = (ip[5] * sinpi8sqrt2) >> 16; - temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16); - c2 = temp3 - temp4; + temp3 = (ip[5] * sinpi8sqrt2) >> 16; + temp4 = ip[7] + ((ip[7] * cospi8sqrt2minus1) >> 16); + c2 = temp3 - temp4; - temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16); - temp4 = (ip[7] * sinpi8sqrt2) >> 16; - d2 = temp3 + temp4; + temp3 = ip[5] + ((ip[5] * cospi8sqrt2minus1) >> 16); + temp4 = (ip[7] * sinpi8sqrt2) >> 16; + d2 = temp3 + temp4; - op[0] = (a1 + d1 + 4) >> 3; - op[3] = (a1 - d1 + 4) >> 3; - op[1] = (b1 + c1 + 4) >> 3; - op[2] = (b1 - c1 + 4) >> 3; + op[0] = (a1 + d1 + 4) >> 3; + op[3] = (a1 - d1 + 4) >> 3; + op[1] = (b1 + c1 + 4) >> 3; + op[2] = (b1 - c1 + 4) >> 3; - a1 = ip[4] + ip[6]; - b1 = ip[4] - ip[6]; + a1 = ip[4] + ip[6]; + b1 = ip[4] - ip[6]; - op[4] = (a1 + d2 + 4) >> 3; - op[7] = (a1 - d2 + 4) >> 3; - op[5] = (b1 + c2 + 4) >> 3; - op[6] = (b1 - c2 + 4) >> 3; + op[4] = (a1 + d2 + 4) >> 3; + op[7] = (a1 - d2 + 4) >> 3; + op[5] = (b1 + c2 + 4) >> 3; + op[6] = (b1 - c2 + 4) >> 3; - a1 = ip[8] + ip[10]; - b1 = ip[8] - ip[10]; + a1 = ip[8] + ip[10]; + b1 = ip[8] - ip[10]; - temp1 = (ip[9] * sinpi8sqrt2) >> 16; - temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16); - c1 = temp1 - temp2; + temp1 = (ip[9] * sinpi8sqrt2) >> 16; + temp2 = ip[11] + ((ip[11] * cospi8sqrt2minus1) >> 16); + c1 = temp1 - temp2; - temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16); - temp2 = (ip[11] * sinpi8sqrt2) >> 16; - d1 = temp1 + temp2; + temp1 = ip[9] + ((ip[9] * cospi8sqrt2minus1) >> 16); + temp2 = (ip[11] * sinpi8sqrt2) >> 16; + d1 = temp1 + temp2; - temp3 = (ip[13] * sinpi8sqrt2) >> 16; - temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16); - c2 = temp3 - temp4; + temp3 = (ip[13] * sinpi8sqrt2) >> 16; + temp4 = ip[15] + ((ip[15] * cospi8sqrt2minus1) >> 16); + c2 = temp3 - temp4; - temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16); - temp4 = (ip[15] * sinpi8sqrt2) >> 16; - d2 = temp3 + temp4; + temp3 = ip[13] + ((ip[13] * cospi8sqrt2minus1) >> 16); + temp4 = (ip[15] * sinpi8sqrt2) >> 16; + d2 = temp3 + temp4; - op[8] = (a1 + d1 + 4) >> 3; - op[11] = (a1 - d1 + 4) >> 3; - op[9] = (b1 + c1 + 4) >> 3; - op[10] = (b1 - c1 + 4) >> 3; + op[8] = (a1 + d1 + 4) >> 3; + op[11] = (a1 - d1 + 4) >> 3; + op[9] = (b1 + c1 + 4) >> 3; + op[10] = (b1 - c1 + 4) >> 3; - a1 = ip[12] + ip[14]; - b1 = ip[12] - ip[14]; + a1 = ip[12] + ip[14]; + b1 = ip[12] - ip[14]; - op[12] = (a1 + d2 + 4) >> 3; - op[15] = (a1 - d2 + 4) >> 3; - op[13] = (b1 + c2 + 4) >> 3; - op[14] = (b1 - c2 + 4) >> 3; + op[12] = (a1 + d2 + 4) >> 3; + op[15] = (a1 - d2 + 4) >> 3; + op[13] = (b1 + c2 + 4) >> 3; + op[14] = (b1 - c2 + 4) >> 3; - ip = output; + ip = output; - for (r = 0; r < 4; r++) - { - for (c = 0; c < 4; c++) - { - short a = ip[c] + pred_ptr[c] ; - dst_ptr[c] = cm[a] ; - } - - ip += 4; - dst_ptr += dst_stride; - pred_ptr += pred_stride; + for (r = 0; r < 4; ++r) { + for (c = 0; c < 4; ++c) { + short a = ip[c] + pred_ptr[c]; + dst_ptr[c] = cm[a]; } + + ip += 4; + dst_ptr += dst_stride; + pred_ptr += pred_stride; + } } -void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr, int pred_stride, unsigned char *dst_ptr, int dst_stride) -{ - int a1; - int i, absa1; - int t2, vector_a1, vector_a; +void vp8_dc_only_idct_add_dspr2(short input_dc, unsigned char *pred_ptr, + int pred_stride, unsigned char *dst_ptr, + int dst_stride) { + int a1; + int i, absa1; + int t2, vector_a1, vector_a; - /* a1 = ((input_dc + 4) >> 3); */ - __asm__ __volatile__ ( - "addi %[a1], %[input_dc], 4 \n\t" - "sra %[a1], %[a1], 3 \n\t" - : [a1] "=r" (a1) - : [input_dc] "r" (input_dc) - ); + /* a1 = ((input_dc + 4) >> 3); */ + __asm__ __volatile__( + "addi %[a1], %[input_dc], 4 \n\t" + "sra %[a1], %[a1], 3 \n\t" + : [a1] "=r"(a1) + : [input_dc] "r"(input_dc)); - if (a1 < 0) - { - /* use quad-byte - * input and output memory are four byte aligned - */ - __asm__ __volatile__ ( - "abs %[absa1], %[a1] \n\t" - "replv.qb %[vector_a1], %[absa1] \n\t" - : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + if (a1 < 0) { + /* use quad-byte + * input and output memory are four byte aligned + */ + __asm__ __volatile__( + "abs %[absa1], %[a1] \n\t" + "replv.qb %[vector_a1], %[absa1] \n\t" + : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); - /* use (a1 - predptr[c]) instead a1 + predptr[c] */ - for (i = 4; i--;) - { - __asm__ __volatile__ ( - "lw %[t2], 0(%[pred_ptr]) \n\t" - "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t" - "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t" - "sw %[vector_a], 0(%[dst_ptr]) \n\t" - "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" - : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a), - [dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr) - : [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1) - ); - } + /* use (a1 - predptr[c]) instead a1 + predptr[c] */ + for (i = 4; i--;) { + __asm__ __volatile__( + "lw %[t2], 0(%[pred_ptr]) \n\t" + "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t" + "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t" + "sw %[vector_a], 0(%[dst_ptr]) \n\t" + "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" + : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), + [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr) + : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride), + [vector_a1] "r"(vector_a1)); } - else - { - /* use quad-byte - * input and output memory are four byte aligned - */ - __asm__ __volatile__ ( - "replv.qb %[vector_a1], %[a1] \n\t" - : [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + } else { + /* use quad-byte + * input and output memory are four byte aligned + */ + __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t" + : [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); - for (i = 4; i--;) - { - __asm__ __volatile__ ( - "lw %[t2], 0(%[pred_ptr]) \n\t" - "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t" - "addu_s.qb %[vector_a], %[vector_a1], %[t2] \n\t" - "sw %[vector_a], 0(%[dst_ptr]) \n\t" - "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" - : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a), - [dst_ptr] "+&r" (dst_ptr), [pred_ptr] "+&r" (pred_ptr) - : [dst_stride] "r" (dst_stride), [pred_stride] "r" (pred_stride), [vector_a1] "r" (vector_a1) - ); - } + for (i = 4; i--;) { + __asm__ __volatile__( + "lw %[t2], 0(%[pred_ptr]) \n\t" + "add %[pred_ptr], %[pred_ptr], %[pred_stride] \n\t" + "addu_s.qb %[vector_a], %[vector_a1], %[t2] \n\t" + "sw %[vector_a], 0(%[dst_ptr]) \n\t" + "add %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" + : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), + [dst_ptr] "+&r"(dst_ptr), [pred_ptr] "+&r"(pred_ptr) + : [dst_stride] "r"(dst_stride), [pred_stride] "r"(pred_stride), + [vector_a1] "r"(vector_a1)); } - + } } -void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff) -{ - short output[16]; - int i; - int a1, b1, c1, d1; - int a2, b2, c2, d2; - short *ip = input; - short *op = output; +void vp8_short_inv_walsh4x4_dspr2(short *input, short *mb_dqcoeff) { + short output[16]; + int i; + int a1, b1, c1, d1; + int a2, b2, c2, d2; + short *ip = input; + short *op = output; - prefetch_load_short(ip); + prefetch_load_short(ip); - for (i = 4; i--;) - { - a1 = ip[0] + ip[12]; - b1 = ip[4] + ip[8]; - c1 = ip[4] - ip[8]; - d1 = ip[0] - ip[12]; + for (i = 4; i--;) { + a1 = ip[0] + ip[12]; + b1 = ip[4] + ip[8]; + c1 = ip[4] - ip[8]; + d1 = ip[0] - ip[12]; - op[0] = a1 + b1; - op[4] = c1 + d1; - op[8] = a1 - b1; - op[12] = d1 - c1; + op[0] = a1 + b1; + op[4] = c1 + d1; + op[8] = a1 - b1; + op[12] = d1 - c1; - ip++; - op++; - } + ip++; + op++; + } - ip = output; - op = output; + ip = output; + op = output; - prefetch_load_short(ip); + prefetch_load_short(ip); - for (i = 4; i--;) - { - a1 = ip[0] + ip[3] + 3; - b1 = ip[1] + ip[2]; - c1 = ip[1] - ip[2]; - d1 = ip[0] - ip[3] + 3; + for (i = 4; i--;) { + a1 = ip[0] + ip[3] + 3; + b1 = ip[1] + ip[2]; + c1 = ip[1] - ip[2]; + d1 = ip[0] - ip[3] + 3; - a2 = a1 + b1; - b2 = d1 + c1; - c2 = a1 - b1; - d2 = d1 - c1; + a2 = a1 + b1; + b2 = d1 + c1; + c2 = a1 - b1; + d2 = d1 - c1; - op[0] = a2 >> 3; - op[1] = b2 >> 3; - op[2] = c2 >> 3; - op[3] = d2 >> 3; + op[0] = a2 >> 3; + op[1] = b2 >> 3; + op[2] = c2 >> 3; + op[3] = d2 >> 3; - ip += 4; - op += 4; - } + ip += 4; + op += 4; + } - for (i = 0; i < 16; i++) - { - mb_dqcoeff[i * 16] = output[i]; - } + for (i = 0; i < 16; ++i) { + mb_dqcoeff[i * 16] = output[i]; + } } -void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff) -{ - int a1; +void vp8_short_inv_walsh4x4_1_dspr2(short *input, short *mb_dqcoeff) { + int a1; - a1 = ((input[0] + 3) >> 3); + a1 = ((input[0] + 3) >> 3); - __asm__ __volatile__ ( - "sh %[a1], 0(%[mb_dqcoeff]) \n\t" - "sh %[a1], 32(%[mb_dqcoeff]) \n\t" - "sh %[a1], 64(%[mb_dqcoeff]) \n\t" - "sh %[a1], 96(%[mb_dqcoeff]) \n\t" - "sh %[a1], 128(%[mb_dqcoeff]) \n\t" - "sh %[a1], 160(%[mb_dqcoeff]) \n\t" - "sh %[a1], 192(%[mb_dqcoeff]) \n\t" - "sh %[a1], 224(%[mb_dqcoeff]) \n\t" - "sh %[a1], 256(%[mb_dqcoeff]) \n\t" - "sh %[a1], 288(%[mb_dqcoeff]) \n\t" - "sh %[a1], 320(%[mb_dqcoeff]) \n\t" - "sh %[a1], 352(%[mb_dqcoeff]) \n\t" - "sh %[a1], 384(%[mb_dqcoeff]) \n\t" - "sh %[a1], 416(%[mb_dqcoeff]) \n\t" - "sh %[a1], 448(%[mb_dqcoeff]) \n\t" - "sh %[a1], 480(%[mb_dqcoeff]) \n\t" + __asm__ __volatile__( + "sh %[a1], 0(%[mb_dqcoeff]) \n\t" + "sh %[a1], 32(%[mb_dqcoeff]) \n\t" + "sh %[a1], 64(%[mb_dqcoeff]) \n\t" + "sh %[a1], 96(%[mb_dqcoeff]) \n\t" + "sh %[a1], 128(%[mb_dqcoeff]) \n\t" + "sh %[a1], 160(%[mb_dqcoeff]) \n\t" + "sh %[a1], 192(%[mb_dqcoeff]) \n\t" + "sh %[a1], 224(%[mb_dqcoeff]) \n\t" + "sh %[a1], 256(%[mb_dqcoeff]) \n\t" + "sh %[a1], 288(%[mb_dqcoeff]) \n\t" + "sh %[a1], 320(%[mb_dqcoeff]) \n\t" + "sh %[a1], 352(%[mb_dqcoeff]) \n\t" + "sh %[a1], 384(%[mb_dqcoeff]) \n\t" + "sh %[a1], 416(%[mb_dqcoeff]) \n\t" + "sh %[a1], 448(%[mb_dqcoeff]) \n\t" + "sh %[a1], 480(%[mb_dqcoeff]) \n\t" - : - : [a1] "r" (a1), [mb_dqcoeff] "r" (mb_dqcoeff) - ); + : + : [a1] "r"(a1), [mb_dqcoeff] "r"(mb_dqcoeff)); } #endif diff --git a/libs/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c index a14b397d8f..e44ae29278 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/reconinter_dspr2.c @@ -8,114 +8,90 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vpx/vpx_integer.h" #if HAVE_DSPR2 -inline void prefetch_load_int(unsigned char *src) -{ - __asm__ __volatile__ ( - "pref 0, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); +inline void prefetch_load_int(unsigned char *src) { + __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); } +__inline void vp8_copy_mem16x16_dspr2(unsigned char *RESTRICT src, + int src_stride, + unsigned char *RESTRICT dst, + int dst_stride) { + int r; + unsigned int a0, a1, a2, a3; -__inline void vp8_copy_mem16x16_dspr2( - unsigned char *RESTRICT src, - int src_stride, - unsigned char *RESTRICT dst, - int dst_stride) -{ - int r; - unsigned int a0, a1, a2, a3; - - for (r = 16; r--;) - { - /* load src data in cache memory */ - prefetch_load_int(src + src_stride); - - /* use unaligned memory load and store */ - __asm__ __volatile__ ( - "ulw %[a0], 0(%[src]) \n\t" - "ulw %[a1], 4(%[src]) \n\t" - "ulw %[a2], 8(%[src]) \n\t" - "ulw %[a3], 12(%[src]) \n\t" - "sw %[a0], 0(%[dst]) \n\t" - "sw %[a1], 4(%[dst]) \n\t" - "sw %[a2], 8(%[dst]) \n\t" - "sw %[a3], 12(%[dst]) \n\t" - : [a0] "=&r" (a0), [a1] "=&r" (a1), - [a2] "=&r" (a2), [a3] "=&r" (a3) - : [src] "r" (src), [dst] "r" (dst) - ); - - src += src_stride; - dst += dst_stride; - } -} - - -__inline void vp8_copy_mem8x8_dspr2( - unsigned char *RESTRICT src, - int src_stride, - unsigned char *RESTRICT dst, - int dst_stride) -{ - int r; - unsigned int a0, a1; - + for (r = 16; r--;) { /* load src data in cache memory */ prefetch_load_int(src + src_stride); - for (r = 8; r--;) - { - /* use unaligned memory load and store */ - __asm__ __volatile__ ( - "ulw %[a0], 0(%[src]) \n\t" - "ulw %[a1], 4(%[src]) \n\t" - "sw %[a0], 0(%[dst]) \n\t" - "sw %[a1], 4(%[dst]) \n\t" - : [a0] "=&r" (a0), [a1] "=&r" (a1) - : [src] "r" (src), [dst] "r" (dst) - ); + /* use unaligned memory load and store */ + __asm__ __volatile__( + "ulw %[a0], 0(%[src]) \n\t" + "ulw %[a1], 4(%[src]) \n\t" + "ulw %[a2], 8(%[src]) \n\t" + "ulw %[a3], 12(%[src]) \n\t" + "sw %[a0], 0(%[dst]) \n\t" + "sw %[a1], 4(%[dst]) \n\t" + "sw %[a2], 8(%[dst]) \n\t" + "sw %[a3], 12(%[dst]) \n\t" + : [a0] "=&r"(a0), [a1] "=&r"(a1), [a2] "=&r"(a2), [a3] "=&r"(a3) + : [src] "r"(src), [dst] "r"(dst)); - src += src_stride; - dst += dst_stride; - } + src += src_stride; + dst += dst_stride; + } } +__inline void vp8_copy_mem8x8_dspr2(unsigned char *RESTRICT src, int src_stride, + unsigned char *RESTRICT dst, + int dst_stride) { + int r; + unsigned int a0, a1; -__inline void vp8_copy_mem8x4_dspr2( - unsigned char *RESTRICT src, - int src_stride, - unsigned char *RESTRICT dst, - int dst_stride) -{ - int r; - unsigned int a0, a1; + /* load src data in cache memory */ + prefetch_load_int(src + src_stride); - /* load src data in cache memory */ - prefetch_load_int(src + src_stride); + for (r = 8; r--;) { + /* use unaligned memory load and store */ + __asm__ __volatile__( + "ulw %[a0], 0(%[src]) \n\t" + "ulw %[a1], 4(%[src]) \n\t" + "sw %[a0], 0(%[dst]) \n\t" + "sw %[a1], 4(%[dst]) \n\t" + : [a0] "=&r"(a0), [a1] "=&r"(a1) + : [src] "r"(src), [dst] "r"(dst)); - for (r = 4; r--;) - { - /* use unaligned memory load and store */ - __asm__ __volatile__ ( - "ulw %[a0], 0(%[src]) \n\t" - "ulw %[a1], 4(%[src]) \n\t" - "sw %[a0], 0(%[dst]) \n\t" - "sw %[a1], 4(%[dst]) \n\t" - : [a0] "=&r" (a0), [a1] "=&r" (a1) - : [src] "r" (src), [dst] "r" (dst) - ); + src += src_stride; + dst += dst_stride; + } +} - src += src_stride; - dst += dst_stride; - } +__inline void vp8_copy_mem8x4_dspr2(unsigned char *RESTRICT src, int src_stride, + unsigned char *RESTRICT dst, + int dst_stride) { + int r; + unsigned int a0, a1; + + /* load src data in cache memory */ + prefetch_load_int(src + src_stride); + + for (r = 4; r--;) { + /* use unaligned memory load and store */ + __asm__ __volatile__( + "ulw %[a0], 0(%[src]) \n\t" + "ulw %[a1], 4(%[src]) \n\t" + "sw %[a0], 0(%[dst]) \n\t" + "sw %[a1], 4(%[dst]) \n\t" + : [a0] "=&r"(a0), [a1] "=&r"(a1) + : [src] "r"(src), [dst] "r"(dst)); + + src += src_stride; + dst += dst_stride; + } } #endif diff --git a/libs/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c b/libs/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c index 9ae6bc8f92..b79af1cc88 100644 --- a/libs/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c +++ b/libs/libvpx/vp8/common/mips/dspr2/vp8_loopfilter_filters_dspr2.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vp8_rtcd.h" #include "vp8/common/onyxc_int.h" @@ -17,1198 +16,636 @@ typedef unsigned char uc; /* prefetch data for load */ -inline void prefetch_load_lf(unsigned char *src) -{ - __asm__ __volatile__ ( - "pref 0, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); +inline void prefetch_load_lf(unsigned char *src) { + __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); } - /* prefetch data for store */ -inline void prefetch_store_lf(unsigned char *dst) -{ - __asm__ __volatile__ ( - "pref 1, 0(%[dst]) \n\t" - : - : [dst] "r" (dst) - ); +inline void prefetch_store_lf(unsigned char *dst) { + __asm__ __volatile__("pref 1, 0(%[dst]) \n\t" : : [dst] "r"(dst)); } /* processing 4 pixels at the same time * compute hev and mask in the same function */ -static __inline void vp8_filter_mask_vec_mips -( - uint32_t limit, - uint32_t flimit, - uint32_t p1, - uint32_t p0, - uint32_t p3, - uint32_t p2, - uint32_t q0, - uint32_t q1, - uint32_t q2, - uint32_t q3, - uint32_t thresh, - uint32_t *hev, - uint32_t *mask -) -{ - uint32_t c, r, r3, r_k; - uint32_t s1, s2, s3; - uint32_t ones = 0xFFFFFFFF; - uint32_t hev1; +static __inline void vp8_filter_mask_vec_mips( + uint32_t limit, uint32_t flimit, uint32_t p1, uint32_t p0, uint32_t p3, + uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2, uint32_t q3, + uint32_t thresh, uint32_t *hev, uint32_t *mask) { + uint32_t c, r, r3, r_k; + uint32_t s1, s2, s3; + uint32_t ones = 0xFFFFFFFF; + uint32_t hev1; - __asm__ __volatile__ ( - /* mask |= (abs(p3 - p2) > limit) */ - "subu_s.qb %[c], %[p3], %[p2] \n\t" - "subu_s.qb %[r_k], %[p2], %[p3] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], $0, %[c] \n\t" + __asm__ __volatile__( + /* mask |= (abs(p3 - p2) > limit) */ + "subu_s.qb %[c], %[p3], %[p2] \n\t" + "subu_s.qb %[r_k], %[p2], %[p3] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], $0, %[c] \n\t" - /* mask |= (abs(p2 - p1) > limit) */ - "subu_s.qb %[c], %[p2], %[p1] \n\t" - "subu_s.qb %[r_k], %[p1], %[p2] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], %[r], %[c] \n\t" + /* mask |= (abs(p2 - p1) > limit) */ + "subu_s.qb %[c], %[p2], %[p1] \n\t" + "subu_s.qb %[r_k], %[p1], %[p2] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], %[r], %[c] \n\t" - /* mask |= (abs(p1 - p0) > limit) - * hev |= (abs(p1 - p0) > thresh) - */ - "subu_s.qb %[c], %[p1], %[p0] \n\t" - "subu_s.qb %[r_k], %[p0], %[p1] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t" - "or %[r3], $0, %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], %[r], %[c] \n\t" + /* mask |= (abs(p1 - p0) > limit) + * hev |= (abs(p1 - p0) > thresh) + */ + "subu_s.qb %[c], %[p1], %[p0] \n\t" + "subu_s.qb %[r_k], %[p0], %[p1] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t" + "or %[r3], $0, %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], %[r], %[c] \n\t" - /* mask |= (abs(q1 - q0) > limit) - * hev |= (abs(q1 - q0) > thresh) - */ - "subu_s.qb %[c], %[q1], %[q0] \n\t" - "subu_s.qb %[r_k], %[q0], %[q1] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t" - "or %[r3], %[r3], %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], %[r], %[c] \n\t" + /* mask |= (abs(q1 - q0) > limit) + * hev |= (abs(q1 - q0) > thresh) + */ + "subu_s.qb %[c], %[q1], %[q0] \n\t" + "subu_s.qb %[r_k], %[q0], %[q1] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[thresh], %[r_k] \n\t" + "or %[r3], %[r3], %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], %[r], %[c] \n\t" - /* mask |= (abs(q2 - q1) > limit) */ - "subu_s.qb %[c], %[q2], %[q1] \n\t" - "subu_s.qb %[r_k], %[q1], %[q2] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], %[r], %[c] \n\t" - "sll %[r3], %[r3], 24 \n\t" + /* mask |= (abs(q2 - q1) > limit) */ + "subu_s.qb %[c], %[q2], %[q1] \n\t" + "subu_s.qb %[r_k], %[q1], %[q2] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], %[r], %[c] \n\t" + "sll %[r3], %[r3], 24 \n\t" - /* mask |= (abs(q3 - q2) > limit) */ - "subu_s.qb %[c], %[q3], %[q2] \n\t" - "subu_s.qb %[r_k], %[q2], %[q3] \n\t" - "or %[r_k], %[r_k], %[c] \n\t" - "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" - "or %[r], %[r], %[c] \n\t" + /* mask |= (abs(q3 - q2) > limit) */ + "subu_s.qb %[c], %[q3], %[q2] \n\t" + "subu_s.qb %[r_k], %[q2], %[q3] \n\t" + "or %[r_k], %[r_k], %[c] \n\t" + "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" + "or %[r], %[r], %[c] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), - [r] "=&r" (r), [r3] "=&r" (r3) - : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2), - [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), - [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3) + : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3), + [thresh] "r"(thresh)); - __asm__ __volatile__ ( - /* abs(p0 - q0) */ - "subu_s.qb %[c], %[p0], %[q0] \n\t" - "subu_s.qb %[r_k], %[q0], %[p0] \n\t" - "wrdsp %[r3] \n\t" - "or %[s1], %[r_k], %[c] \n\t" + __asm__ __volatile__( + /* abs(p0 - q0) */ + "subu_s.qb %[c], %[p0], %[q0] \n\t" + "subu_s.qb %[r_k], %[q0], %[p0] \n\t" + "wrdsp %[r3] \n\t" + "or %[s1], %[r_k], %[c] \n\t" - /* abs(p1 - q1) */ - "subu_s.qb %[c], %[p1], %[q1] \n\t" - "addu_s.qb %[s3], %[s1], %[s1] \n\t" - "pick.qb %[hev1], %[ones], $0 \n\t" - "subu_s.qb %[r_k], %[q1], %[p1] \n\t" - "or %[s2], %[r_k], %[c] \n\t" + /* abs(p1 - q1) */ + "subu_s.qb %[c], %[p1], %[q1] \n\t" + "addu_s.qb %[s3], %[s1], %[s1] \n\t" + "pick.qb %[hev1], %[ones], $0 \n\t" + "subu_s.qb %[r_k], %[q1], %[p1] \n\t" + "or %[s2], %[r_k], %[c] \n\t" - /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > flimit * 2 + limit */ - "shrl.qb %[s2], %[s2], 1 \n\t" - "addu_s.qb %[s1], %[s2], %[s3] \n\t" - "cmpgu.lt.qb %[c], %[flimit], %[s1] \n\t" - "or %[r], %[r], %[c] \n\t" - "sll %[r], %[r], 24 \n\t" + /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > flimit * 2 + limit */ + "shrl.qb %[s2], %[s2], 1 \n\t" + "addu_s.qb %[s1], %[s2], %[s3] \n\t" + "cmpgu.lt.qb %[c], %[flimit], %[s1] \n\t" + "or %[r], %[r], %[c] \n\t" + "sll %[r], %[r], 24 \n\t" - "wrdsp %[r] \n\t" - "pick.qb %[s2], $0, %[ones] \n\t" + "wrdsp %[r] \n\t" + "pick.qb %[s2], $0, %[ones] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1), - [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3) - : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), - [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1), + [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3) + : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1), + [ones] "r"(ones), [flimit] "r"(flimit)); - *hev = hev1; - *mask = s2; + *hev = hev1; + *mask = s2; } - /* inputs & outputs are quad-byte vectors */ -static __inline void vp8_filter_mips -( - uint32_t mask, - uint32_t hev, - uint32_t *ps1, - uint32_t *ps0, - uint32_t *qs0, - uint32_t *qs1 -) -{ - int32_t vp8_filter_l, vp8_filter_r; - int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; - int32_t subr_r, subr_l; - uint32_t t1, t2, HWM, t3; - uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; +static __inline void vp8_filter_mips(uint32_t mask, uint32_t hev, uint32_t *ps1, + uint32_t *ps0, uint32_t *qs0, + uint32_t *qs1) { + int32_t vp8_filter_l, vp8_filter_r; + int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; + int32_t subr_r, subr_l; + uint32_t t1, t2, HWM, t3; + uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; - int32_t vps1, vps0, vqs0, vqs1; - int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; - uint32_t N128; + int32_t vps1, vps0, vqs0, vqs1; + int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; + uint32_t N128; - N128 = 0x80808080; - t1 = 0x03000300; - t2 = 0x04000400; - t3 = 0x01000100; - HWM = 0xFF00FF00; + N128 = 0x80808080; + t1 = 0x03000300; + t2 = 0x04000400; + t3 = 0x01000100; + HWM = 0xFF00FF00; - vps0 = (*ps0) ^ N128; - vps1 = (*ps1) ^ N128; - vqs0 = (*qs0) ^ N128; - vqs1 = (*qs1) ^ N128; + vps0 = (*ps0) ^ N128; + vps1 = (*ps1) ^ N128; + vqs0 = (*qs0) ^ N128; + vqs1 = (*qs1) ^ N128; - /* use halfword pairs instead quad-bytes because of accuracy */ - vps0_l = vps0 & HWM; - vps0_r = vps0 << 8; - vps0_r = vps0_r & HWM; + /* use halfword pairs instead quad-bytes because of accuracy */ + vps0_l = vps0 & HWM; + vps0_r = vps0 << 8; + vps0_r = vps0_r & HWM; - vps1_l = vps1 & HWM; - vps1_r = vps1 << 8; - vps1_r = vps1_r & HWM; + vps1_l = vps1 & HWM; + vps1_r = vps1 << 8; + vps1_r = vps1_r & HWM; - vqs0_l = vqs0 & HWM; - vqs0_r = vqs0 << 8; - vqs0_r = vqs0_r & HWM; + vqs0_l = vqs0 & HWM; + vqs0_r = vqs0 << 8; + vqs0_r = vqs0_r & HWM; - vqs1_l = vqs1 & HWM; - vqs1_r = vqs1 << 8; - vqs1_r = vqs1_r & HWM; + vqs1_l = vqs1 & HWM; + vqs1_r = vqs1 << 8; + vqs1_r = vqs1_r & HWM; - mask_l = mask & HWM; - mask_r = mask << 8; - mask_r = mask_r & HWM; + mask_l = mask & HWM; + mask_r = mask << 8; + mask_r = mask_r & HWM; - hev_l = hev & HWM; - hev_r = hev << 8; - hev_r = hev_r & HWM; + hev_l = hev & HWM; + hev_r = hev << 8; + hev_r = hev_r & HWM; - __asm__ __volatile__ ( - /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */ - "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t" - "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t" + __asm__ __volatile__( + /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */ + "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t" + "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t" - /* qs0 - ps0 */ - "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" - "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" + /* qs0 - ps0 */ + "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" + "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" - /* vp8_filter &= hev; */ - "and %[vp8_filter_l], %[vp8_filter_l], %[hev_l] \n\t" - "and %[vp8_filter_r], %[vp8_filter_r], %[hev_r] \n\t" + /* vp8_filter &= hev; */ + "and %[vp8_filter_l], %[vp8_filter_l], %[hev_l] \n\t" + "and %[vp8_filter_r], %[vp8_filter_r], %[hev_r] \n\t" - /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */ - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - "xor %[invhev_l], %[hev_l], %[HWM] \n\t" - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - "xor %[invhev_r], %[hev_r], %[HWM] \n\t" - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */ + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + "xor %[invhev_l], %[hev_l], %[HWM] \n\t" + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + "xor %[invhev_r], %[hev_r], %[HWM] \n\t" + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - /* vp8_filter &= mask; */ - "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t" - "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t" + /* vp8_filter &= mask; */ + "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t" + "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t" - : [vp8_filter_l] "=&r" (vp8_filter_l), [vp8_filter_r] "=&r" (vp8_filter_r), - [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r), - [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r) + : [vp8_filter_l] "=&r"(vp8_filter_l), [vp8_filter_r] "=&r"(vp8_filter_r), + [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r), + [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r) - : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l), - [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r), - [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r), - [mask_l] "r" (mask_l), [mask_r] "r" (mask_r), - [hev_l] "r" (hev_l), [hev_r] "r" (hev_r), - [HWM] "r" (HWM) - ); + : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), + [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r), + [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l), + [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r), + [HWM] "r"(HWM)); - /* save bottom 3 bits so that we round one side +4 and the other +3 */ - __asm__ __volatile__ ( - /* Filter2 = vp8_signed_char_clamp(vp8_filter + 3) >>= 3; */ - "addq_s.ph %[Filter1_l], %[vp8_filter_l], %[t2] \n\t" - "addq_s.ph %[Filter1_r], %[vp8_filter_r], %[t2] \n\t" + /* save bottom 3 bits so that we round one side +4 and the other +3 */ + __asm__ __volatile__( + /* Filter2 = vp8_signed_char_clamp(vp8_filter + 3) >>= 3; */ + "addq_s.ph %[Filter1_l], %[vp8_filter_l], %[t2] \n\t" + "addq_s.ph %[Filter1_r], %[vp8_filter_r], %[t2] \n\t" - /* Filter1 = vp8_signed_char_clamp(vp8_filter + 4) >>= 3; */ - "addq_s.ph %[Filter2_l], %[vp8_filter_l], %[t1] \n\t" - "addq_s.ph %[Filter2_r], %[vp8_filter_r], %[t1] \n\t" - "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" - "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" + /* Filter1 = vp8_signed_char_clamp(vp8_filter + 4) >>= 3; */ + "addq_s.ph %[Filter2_l], %[vp8_filter_l], %[t1] \n\t" + "addq_s.ph %[Filter2_r], %[vp8_filter_r], %[t1] \n\t" + "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" + "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" - "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t" - "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t" + "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t" + "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t" - "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t" - "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t" + "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t" + "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t" - /* vps0 = vp8_signed_char_clamp(ps0 + Filter2); */ - "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t" - "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t" + /* vps0 = vp8_signed_char_clamp(ps0 + Filter2); */ + "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t" + "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t" - /* vqs0 = vp8_signed_char_clamp(qs0 - Filter1); */ - "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" - "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" + /* vqs0 = vp8_signed_char_clamp(qs0 - Filter1); */ + "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" + "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" - : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r), - [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r), - [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r) + : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r), + [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r), + [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), + [vqs0_r] "+r"(vqs0_r) - : [t1] "r" (t1), [t2] "r" (t2), - [vp8_filter_l] "r" (vp8_filter_l), [vp8_filter_r] "r" (vp8_filter_r), - [HWM] "r" (HWM) - ); + : [t1] "r"(t1), [t2] "r"(t2), [vp8_filter_l] "r"(vp8_filter_l), + [vp8_filter_r] "r"(vp8_filter_r), [HWM] "r"(HWM)); - __asm__ __volatile__ ( - /* (vp8_filter += 1) >>= 1 */ - "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" - "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" + __asm__ __volatile__( + /* (vp8_filter += 1) >>= 1 */ + "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" + "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" - /* vp8_filter &= ~hev; */ - "and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t" - "and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t" + /* vp8_filter &= ~hev; */ + "and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t" + "and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t" - /* vps1 = vp8_signed_char_clamp(ps1 + vp8_filter); */ - "addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t" - "addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t" + /* vps1 = vp8_signed_char_clamp(ps1 + vp8_filter); */ + "addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t" + "addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t" - /* vqs1 = vp8_signed_char_clamp(qs1 - vp8_filter); */ - "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" - "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" + /* vqs1 = vp8_signed_char_clamp(qs1 - vp8_filter); */ + "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" + "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" - : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r), - [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), - [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r) + : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r), + [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l), + [vqs1_r] "+r"(vqs1_r) - : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r) - ); + : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r)); - /* Create quad-bytes from halfword pairs */ - vqs0_l = vqs0_l & HWM; - vqs1_l = vqs1_l & HWM; - vps0_l = vps0_l & HWM; - vps1_l = vps1_l & HWM; + /* Create quad-bytes from halfword pairs */ + vqs0_l = vqs0_l & HWM; + vqs1_l = vqs1_l & HWM; + vps0_l = vps0_l & HWM; + vps1_l = vps1_l & HWM; - __asm__ __volatile__ ( - "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" - "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" - "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" - "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" + __asm__ __volatile__( + "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" + "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" + "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" + "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" - : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r), - [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r) - : - ); + : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r), + [vqs0_r] "+r"(vqs0_r) + :); - vqs0 = vqs0_l | vqs0_r; - vqs1 = vqs1_l | vqs1_r; - vps0 = vps0_l | vps0_r; - vps1 = vps1_l | vps1_r; + vqs0 = vqs0_l | vqs0_r; + vqs1 = vqs1_l | vqs1_r; + vps0 = vps0_l | vps0_r; + vps1 = vps1_l | vps1_r; - *ps0 = vps0 ^ N128; - *ps1 = vps1 ^ N128; - *qs0 = vqs0 ^ N128; - *qs1 = vqs1 ^ N128; + *ps0 = vps0 ^ N128; + *ps1 = vps1 ^ N128; + *qs0 = vqs0 ^ N128; + *qs1 = vqs1 ^ N128; } -void vp8_loop_filter_horizontal_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - uint32_t mask; - uint32_t hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; +void vp8_loop_filter_horizontal_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + uint32_t mask; + uint32_t hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - mask = 0; - hev = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; + mask = 0; + hev = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; + /* prefetch data for store */ + prefetch_store_lf(s); + + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + + sm1 = s - (p << 2); + s0 = s - p - p - p; + s1 = s - p - p; + s2 = s - p; + s3 = s; + s4 = s + p; + s5 = s + p + p; + s6 = s + p + p + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } + + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } + + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } + + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } +} + +void vp8_loop_filter_uvhorizontal_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + uint32_t mask; + uint32_t hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; + + mask = 0; + hev = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; + + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + + sm1 = s - (p << 2); + s0 = s - p - p - p; + s1 = s - p - p; + s2 = s - p; + s3 = s; + s4 = s + p; + s5 = s + p + p; + s6 = s + p + p + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } + + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood */ + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + } + } +} + +void vp8_loop_filter_vertical_edge_mips(unsigned char *s, int p, + const unsigned int flimit, + const unsigned int limit, + const unsigned int thresh, int count) { + int i; + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + + hev = 0; + mask = 0; + i = 0; + pm1 = 0; + p0 = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; + p5 = 0; + p6 = 0; + + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + + /* apply filter on 4 pixesl at the same time */ + do { /* prefetch data for store */ - prefetch_store_lf(s); - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - sm1 = s - (p << 2); - s0 = s - p - p - p; - s1 = s - p - p ; - s2 = s - p; - s3 = s; - s4 = s + p; - s5 = s + p + p; - s6 = s + p + p + p; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } -} - -void vp8_loop_filter_uvhorizontal_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - uint32_t mask; - uint32_t hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - - mask = 0; - hev = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - sm1 = s - (p << 2); - s0 = s - p - p - p; - s1 = s - p - p ; - s2 = s - p; - s3 = s; - s4 = s + p; - s5 = s + p + p; - s6 = s + p + p + p; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood */ - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - } - } -} - -void vp8_loop_filter_vertical_edge_mips -( - unsigned char *s, - int p, - const unsigned int flimit, - const unsigned int limit, - const unsigned int thresh, - int count -) -{ - int i; - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - - hev = 0; - mask = 0; - i = 0; - pm1 = 0; - p0 = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; - p5 = 0; - p6 = 0; - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - /* apply filter on 4 pixesl at the same time */ - do - { - - /* prefetch data for store */ - prefetch_store_lf(s + p); - - s1 = s; - s2 = s + p; - s3 = s2 + p; - s4 = s3 + p; - s = s4 + p; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); - pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); - - /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" - "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" - "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" - "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" - - "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" - "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" - "append %[p1], %[sec3], 16 \n\t" - "append %[pm1], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" - "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" - "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" - "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" - - "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" - "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" - "append %[p5], %[sec3], 16 \n\t" - "append %[p3], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood - * don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - : [p1] "+r" (p1) - : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1) - ); - } - } - - s1 = s; - s2 = s + p; - s3 = s2 + p; - s4 = s3 + p; - s = s4 + p; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); - pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); - - /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" - "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" - "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" - "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" - - "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" - "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" - "append %[p1], %[sec3], 16 \n\t" - "append %[pm1], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" - "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" - "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" - "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" - - "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" - "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" - "append %[p5], %[sec3], 16 \n\t" - "append %[p3], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood - * don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - : [p1] "+r" (p1) - : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1) - ); - } - } - - i += 8; - } - - while (i < count); -} - -void vp8_loop_filter_uvvertical_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - /* apply filter on 4 pixesl at the same time */ + prefetch_store_lf(s + p); s1 = s; s2 = s + p; s3 = s2 + p; s4 = s3 + p; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); - pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); - - /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" - "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" - "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" - "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" - - "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" - "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" - "append %[p1], %[sec3], 16 \n\t" - "append %[pm1], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" - "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" - "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" - "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" - - "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" - "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" - "append %[p5], %[sec3], 16 \n\t" - "append %[p3], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - - /* unpack processed 4x4 neighborhood - * don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - : [p1] "+r" (p1) - : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1) - ); - - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), [p2] "r" (p2), [p1] "r" (p1) - ); - } - } - - s1 = s4 + p; - s2 = s1 + p; - s3 = s2 + p; - s4 = s3 + p; + s = s4 + p; /* load quad-byte vectors * memory is 4 byte aligned */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); + p3 = *((uint32_t *)(s4)); /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" @@ -1224,15 +661,13 @@ void vp8_loop_filter_uvvertical_edge_mips "append %[p1], %[sec3], 16 \n\t" "append %[pm1], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" @@ -1248,566 +683,830 @@ void vp8_loop_filter_uvvertical_edge_mips "append %[p5], %[sec3], 16 \n\t" "append %[p3], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* if (p1 - p4 == 0) and (p2 - p3 == 0) * mask will be zero and filtering is not needed */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + /* unpack processed 4x4 neighborhood + * don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), + [p1] "r"(p1)); - /* unpack processed 4x4 neighborhood - * don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1) - ); + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); + __asm__ __volatile__( + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + : [p1] "+r"(p1) + : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2)); - __asm__ __volatile__ ( - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - : [p1] "+r" (p1) - : [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), [p2] "r" (p2) - ); + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); + __asm__ __volatile__( + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), + [p1] "r"(p1)); - __asm__ __volatile__ ( - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1) - ); + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); - - __asm__ __volatile__ ( - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - : - : [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1) - ); - } + __asm__ __volatile__( + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), + [p1] "r"(p1)); + } } + + s1 = s; + s2 = s + p; + s3 = s2 + p; + s4 = s3 + p; + s = s4 + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); + pm1 = *((uint32_t *)(s4 - 4)); + p3 = *((uint32_t *)(s4)); + + /* transpose pm1, p0, p1, p2 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" + "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" + "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" + "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" + + "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" + "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" + "append %[p1], %[sec3], 16 \n\t" + "append %[pm1], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* transpose p3, p4, p5, p6 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" + "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" + "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" + "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" + + "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" + "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" + "append %[p5], %[sec3], 16 \n\t" + "append %[p3], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood + * don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), + [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + : [p1] "+r"(p1) + : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), + [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + : + : [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), + [p1] "r"(p1)); + } + } + + i += 8; + } + + while (i < count); +} + +void vp8_loop_filter_uvvertical_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + + /* apply filter on 4 pixesl at the same time */ + + s1 = s; + s2 = s + p; + s3 = s2 + p; + s4 = s3 + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); + pm1 = *((uint32_t *)(s4 - 4)); + p3 = *((uint32_t *)(s4)); + + /* transpose pm1, p0, p1, p2 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" + "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" + "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" + "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" + + "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" + "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" + "append %[p1], %[sec3], 16 \n\t" + "append %[pm1], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* transpose p3, p4, p5, p6 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" + "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" + "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" + "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" + + "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" + "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" + "append %[p5], %[sec3], 16 \n\t" + "append %[p3], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood + * don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + : [p1] "+r"(p1) + : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), [p1] "r"(p1)); + } + } + + s1 = s4 + p; + s2 = s1 + p; + s3 = s2 + p; + s4 = s3 + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); + pm1 = *((uint32_t *)(s4 - 4)); + p3 = *((uint32_t *)(s4)); + + /* transpose pm1, p0, p1, p2 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" + "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" + "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" + "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" + + "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" + "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" + "append %[p1], %[sec3], 16 \n\t" + "append %[pm1], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* transpose p3, p4, p5, p6 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" + "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" + "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" + "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" + + "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" + "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" + "append %[p5], %[sec3], 16 \n\t" + "append %[p3], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_filter_mips(mask, hev, &p1, &p2, &p3, &p4); + + /* unpack processed 4x4 neighborhood + * don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), [p2] "r"(p2), [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + : [p1] "+r"(p1) + : [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), [p2] "r"(p2)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), [p2] "r"(p2), [p1] "r"(p1)); + + __asm__ __volatile__( + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); + + __asm__ __volatile__( + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + : + : + [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), [p2] "r"(p2), [p1] "r"(p1)); + } + } } /* inputs & outputs are quad-byte vectors */ -static __inline void vp8_mbfilter_mips -( - uint32_t mask, - uint32_t hev, - uint32_t *ps2, - uint32_t *ps1, - uint32_t *ps0, - uint32_t *qs0, - uint32_t *qs1, - uint32_t *qs2 -) -{ - int32_t vps2, vps1, vps0, vqs0, vqs1, vqs2; - int32_t vps2_l, vps1_l, vps0_l, vqs0_l, vqs1_l, vqs2_l; - int32_t vps2_r, vps1_r, vps0_r, vqs0_r, vqs1_r, vqs2_r; - uint32_t HWM, vp8_filter_l, vp8_filter_r, mask_l, mask_r, hev_l, hev_r, subr_r, subr_l; - uint32_t Filter2_l, Filter2_r, t1, t2, Filter1_l, Filter1_r, invhev_l, invhev_r; - uint32_t N128, R63; - uint32_t u1_l, u1_r, u2_l, u2_r, u3_l, u3_r; +static __inline void vp8_mbfilter_mips(uint32_t mask, uint32_t hev, + uint32_t *ps2, uint32_t *ps1, + uint32_t *ps0, uint32_t *qs0, + uint32_t *qs1, uint32_t *qs2) { + int32_t vps2, vps1, vps0, vqs0, vqs1, vqs2; + int32_t vps2_l, vps1_l, vps0_l, vqs0_l, vqs1_l, vqs2_l; + int32_t vps2_r, vps1_r, vps0_r, vqs0_r, vqs1_r, vqs2_r; + uint32_t HWM, vp8_filter_l, vp8_filter_r, mask_l, mask_r, hev_l, hev_r, + subr_r, subr_l; + uint32_t Filter2_l, Filter2_r, t1, t2, Filter1_l, Filter1_r, invhev_l, + invhev_r; + uint32_t N128, R63; + uint32_t u1_l, u1_r, u2_l, u2_r, u3_l, u3_r; - R63 = 0x003F003F; - HWM = 0xFF00FF00; - N128 = 0x80808080; - t1 = 0x03000300; - t2 = 0x04000400; + R63 = 0x003F003F; + HWM = 0xFF00FF00; + N128 = 0x80808080; + t1 = 0x03000300; + t2 = 0x04000400; - vps0 = (*ps0) ^ N128; - vps1 = (*ps1) ^ N128; - vps2 = (*ps2) ^ N128; - vqs0 = (*qs0) ^ N128; - vqs1 = (*qs1) ^ N128; - vqs2 = (*qs2) ^ N128; + vps0 = (*ps0) ^ N128; + vps1 = (*ps1) ^ N128; + vps2 = (*ps2) ^ N128; + vqs0 = (*qs0) ^ N128; + vqs1 = (*qs1) ^ N128; + vqs2 = (*qs2) ^ N128; - /* use halfword pairs instead quad-bytes because of accuracy */ - vps0_l = vps0 & HWM; - vps0_r = vps0 << 8; - vps0_r = vps0_r & HWM; + /* use halfword pairs instead quad-bytes because of accuracy */ + vps0_l = vps0 & HWM; + vps0_r = vps0 << 8; + vps0_r = vps0_r & HWM; - vqs0_l = vqs0 & HWM; - vqs0_r = vqs0 << 8; - vqs0_r = vqs0_r & HWM; + vqs0_l = vqs0 & HWM; + vqs0_r = vqs0 << 8; + vqs0_r = vqs0_r & HWM; - vps1_l = vps1 & HWM; - vps1_r = vps1 << 8; - vps1_r = vps1_r & HWM; + vps1_l = vps1 & HWM; + vps1_r = vps1 << 8; + vps1_r = vps1_r & HWM; - vqs1_l = vqs1 & HWM; - vqs1_r = vqs1 << 8; - vqs1_r = vqs1_r & HWM; + vqs1_l = vqs1 & HWM; + vqs1_r = vqs1 << 8; + vqs1_r = vqs1_r & HWM; - vqs2_l = vqs2 & HWM; - vqs2_r = vqs2 << 8; - vqs2_r = vqs2_r & HWM; + vqs2_l = vqs2 & HWM; + vqs2_r = vqs2 << 8; + vqs2_r = vqs2_r & HWM; - __asm__ __volatile__ ( - /* qs0 - ps0 */ - "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" - "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" + __asm__ __volatile__( + /* qs0 - ps0 */ + "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" + "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" - /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */ - "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t" - "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t" + /* vp8_filter = vp8_signed_char_clamp(ps1 - qs1); */ + "subq_s.ph %[vp8_filter_l], %[vps1_l], %[vqs1_l] \n\t" + "subq_s.ph %[vp8_filter_r], %[vps1_r], %[vqs1_r] \n\t" - : [vp8_filter_l] "=&r" (vp8_filter_l), [vp8_filter_r] "=r" (vp8_filter_r), - [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r) - : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l), - [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r), - [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r) - ); + : [vp8_filter_l] "=&r"(vp8_filter_l), [vp8_filter_r] "=r"(vp8_filter_r), + [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r) + : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), + [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r), + [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r)); - vps2_l = vps2 & HWM; - vps2_r = vps2 << 8; - vps2_r = vps2_r & HWM; + vps2_l = vps2 & HWM; + vps2_r = vps2 << 8; + vps2_r = vps2_r & HWM; - /* add outer taps if we have high edge variance */ - __asm__ __volatile__ ( - /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */ - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - "and %[mask_l], %[HWM], %[mask] \n\t" - "sll %[mask_r], %[mask], 8 \n\t" - "and %[mask_r], %[HWM], %[mask_r] \n\t" - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - "and %[hev_l], %[HWM], %[hev] \n\t" - "sll %[hev_r], %[hev], 8 \n\t" - "and %[hev_r], %[HWM], %[hev_r] \n\t" - "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + /* add outer taps if we have high edge variance */ + __asm__ __volatile__( + /* vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * (qs0 - ps0)); */ + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + "and %[mask_l], %[HWM], %[mask] \n\t" + "sll %[mask_r], %[mask], 8 \n\t" + "and %[mask_r], %[HWM], %[mask_r] \n\t" + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" + "and %[hev_l], %[HWM], %[hev] \n\t" + "sll %[hev_r], %[hev], 8 \n\t" + "and %[hev_r], %[HWM], %[hev_r] \n\t" + "addq_s.ph %[vp8_filter_l], %[vp8_filter_l], %[subr_l] \n\t" + "addq_s.ph %[vp8_filter_r], %[vp8_filter_r], %[subr_r] \n\t" - /* vp8_filter &= mask; */ - "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t" - "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t" + /* vp8_filter &= mask; */ + "and %[vp8_filter_l], %[vp8_filter_l], %[mask_l] \n\t" + "and %[vp8_filter_r], %[vp8_filter_r], %[mask_r] \n\t" - /* Filter2 = vp8_filter & hev; */ - "and %[Filter2_l], %[vp8_filter_l], %[hev_l] \n\t" - "and %[Filter2_r], %[vp8_filter_r], %[hev_r] \n\t" + /* Filter2 = vp8_filter & hev; */ + "and %[Filter2_l], %[vp8_filter_l], %[hev_l] \n\t" + "and %[Filter2_r], %[vp8_filter_r], %[hev_r] \n\t" - : [vp8_filter_l] "+r" (vp8_filter_l), [vp8_filter_r] "+r" (vp8_filter_r), - [hev_l] "=&r" (hev_l), [hev_r] "=&r" (hev_r), - [mask_l] "=&r" (mask_l), [mask_r] "=&r" (mask_r), - [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r) - : [subr_l] "r" (subr_l), [subr_r] "r" (subr_r), - [HWM] "r" (HWM), [hev] "r" (hev), [mask] "r" (mask) - ); + : [vp8_filter_l] "+r"(vp8_filter_l), [vp8_filter_r] "+r"(vp8_filter_r), + [hev_l] "=&r"(hev_l), [hev_r] "=&r"(hev_r), [mask_l] "=&r"(mask_l), + [mask_r] "=&r"(mask_r), [Filter2_l] "=&r"(Filter2_l), + [Filter2_r] "=&r"(Filter2_r) + : [subr_l] "r"(subr_l), [subr_r] "r"(subr_r), [HWM] "r"(HWM), + [hev] "r"(hev), [mask] "r"(mask)); - /* save bottom 3 bits so that we round one side +4 and the other +3 */ - __asm__ __volatile__ ( - /* Filter1 = vp8_signed_char_clamp(Filter2 + 4) >>= 3; */ - "addq_s.ph %[Filter1_l], %[Filter2_l], %[t2] \n\t" - "xor %[invhev_l], %[hev_l], %[HWM] \n\t" - "addq_s.ph %[Filter1_r], %[Filter2_r], %[t2] \n\t" + /* save bottom 3 bits so that we round one side +4 and the other +3 */ + __asm__ __volatile__( + /* Filter1 = vp8_signed_char_clamp(Filter2 + 4) >>= 3; */ + "addq_s.ph %[Filter1_l], %[Filter2_l], %[t2] \n\t" + "xor %[invhev_l], %[hev_l], %[HWM] \n\t" + "addq_s.ph %[Filter1_r], %[Filter2_r], %[t2] \n\t" - /* Filter2 = vp8_signed_char_clamp(Filter2 + 3) >>= 3; */ - "addq_s.ph %[Filter2_l], %[Filter2_l], %[t1] \n\t" - "addq_s.ph %[Filter2_r], %[Filter2_r], %[t1] \n\t" + /* Filter2 = vp8_signed_char_clamp(Filter2 + 3) >>= 3; */ + "addq_s.ph %[Filter2_l], %[Filter2_l], %[t1] \n\t" + "addq_s.ph %[Filter2_r], %[Filter2_r], %[t1] \n\t" - "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" - "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" + "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" + "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" - "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t" - "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t" - "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t" - "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t" - "xor %[invhev_r], %[hev_r], %[HWM] \n\t" + "shra.ph %[Filter2_l], %[Filter2_l], 3 \n\t" + "shra.ph %[Filter2_r], %[Filter2_r], 3 \n\t" + "and %[Filter1_l], %[Filter1_l], %[HWM] \n\t" + "and %[Filter1_r], %[Filter1_r], %[HWM] \n\t" + "xor %[invhev_r], %[hev_r], %[HWM] \n\t" - /* qs0 = vp8_signed_char_clamp(qs0 - Filter1); */ - "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" - "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" + /* qs0 = vp8_signed_char_clamp(qs0 - Filter1); */ + "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" + "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" - /* ps0 = vp8_signed_char_clamp(ps0 + Filter2); */ - "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t" - "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t" + /* ps0 = vp8_signed_char_clamp(ps0 + Filter2); */ + "addq_s.ph %[vps0_l], %[vps0_l], %[Filter2_l] \n\t" + "addq_s.ph %[vps0_r], %[vps0_r], %[Filter2_r] \n\t" - : [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r), - [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r), - [Filter2_l] "+r" (Filter2_l), [Filter2_r] "+r" (Filter2_r), - [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r) - : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM), - [hev_l] "r" (hev_l), [hev_r] "r" (hev_r) - ); + : [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r), + [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r), + [Filter2_l] "+r"(Filter2_l), [Filter2_r] "+r"(Filter2_r), + [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), + [vqs0_r] "+r"(vqs0_r) + : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), [hev_l] "r"(hev_l), + [hev_r] "r"(hev_r)); - /* only apply wider filter if not high edge variance */ - __asm__ __volatile__ ( - /* vp8_filter &= ~hev; */ - "and %[Filter2_l], %[vp8_filter_l], %[invhev_l] \n\t" - "and %[Filter2_r], %[vp8_filter_r], %[invhev_r] \n\t" + /* only apply wider filter if not high edge variance */ + __asm__ __volatile__( + /* vp8_filter &= ~hev; */ + "and %[Filter2_l], %[vp8_filter_l], %[invhev_l] \n\t" + "and %[Filter2_r], %[vp8_filter_r], %[invhev_r] \n\t" - "shra.ph %[Filter2_l], %[Filter2_l], 8 \n\t" - "shra.ph %[Filter2_r], %[Filter2_r], 8 \n\t" + "shra.ph %[Filter2_l], %[Filter2_l], 8 \n\t" + "shra.ph %[Filter2_r], %[Filter2_r], 8 \n\t" - : [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r) - : [vp8_filter_l] "r" (vp8_filter_l), [vp8_filter_r] "r" (vp8_filter_r), - [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r) - ); + : [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r) + : [vp8_filter_l] "r"(vp8_filter_l), [vp8_filter_r] "r"(vp8_filter_r), + [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r)); - /* roughly 3/7th difference across boundary */ - __asm__ __volatile__ ( - "shll.ph %[u3_l], %[Filter2_l], 3 \n\t" - "shll.ph %[u3_r], %[Filter2_r], 3 \n\t" + /* roughly 3/7th difference across boundary */ + __asm__ __volatile__( + "shll.ph %[u3_l], %[Filter2_l], 3 \n\t" + "shll.ph %[u3_r], %[Filter2_r], 3 \n\t" - "addq.ph %[u3_l], %[u3_l], %[Filter2_l] \n\t" - "addq.ph %[u3_r], %[u3_r], %[Filter2_r] \n\t" + "addq.ph %[u3_l], %[u3_l], %[Filter2_l] \n\t" + "addq.ph %[u3_r], %[u3_r], %[Filter2_r] \n\t" - "shll.ph %[u2_l], %[u3_l], 1 \n\t" - "shll.ph %[u2_r], %[u3_r], 1 \n\t" + "shll.ph %[u2_l], %[u3_l], 1 \n\t" + "shll.ph %[u2_r], %[u3_r], 1 \n\t" - "addq.ph %[u1_l], %[u3_l], %[u2_l] \n\t" - "addq.ph %[u1_r], %[u3_r], %[u2_r] \n\t" + "addq.ph %[u1_l], %[u3_l], %[u2_l] \n\t" + "addq.ph %[u1_r], %[u3_r], %[u2_r] \n\t" - "addq.ph %[u2_l], %[u2_l], %[R63] \n\t" - "addq.ph %[u2_r], %[u2_r], %[R63] \n\t" + "addq.ph %[u2_l], %[u2_l], %[R63] \n\t" + "addq.ph %[u2_r], %[u2_r], %[R63] \n\t" - "addq.ph %[u3_l], %[u3_l], %[R63] \n\t" - "addq.ph %[u3_r], %[u3_r], %[R63] \n\t" + "addq.ph %[u3_l], %[u3_l], %[R63] \n\t" + "addq.ph %[u3_r], %[u3_r], %[R63] \n\t" - /* vp8_signed_char_clamp((63 + Filter2 * 27) >> 7) - * vp8_signed_char_clamp((63 + Filter2 * 18) >> 7) - */ - "addq.ph %[u1_l], %[u1_l], %[R63] \n\t" - "addq.ph %[u1_r], %[u1_r], %[R63] \n\t" - "shra.ph %[u1_l], %[u1_l], 7 \n\t" - "shra.ph %[u1_r], %[u1_r], 7 \n\t" - "shra.ph %[u2_l], %[u2_l], 7 \n\t" - "shra.ph %[u2_r], %[u2_r], 7 \n\t" - "shll.ph %[u1_l], %[u1_l], 8 \n\t" - "shll.ph %[u1_r], %[u1_r], 8 \n\t" - "shll.ph %[u2_l], %[u2_l], 8 \n\t" - "shll.ph %[u2_r], %[u2_r], 8 \n\t" + /* vp8_signed_char_clamp((63 + Filter2 * 27) >> 7) + * vp8_signed_char_clamp((63 + Filter2 * 18) >> 7) + */ + "addq.ph %[u1_l], %[u1_l], %[R63] \n\t" + "addq.ph %[u1_r], %[u1_r], %[R63] \n\t" + "shra.ph %[u1_l], %[u1_l], 7 \n\t" + "shra.ph %[u1_r], %[u1_r], 7 \n\t" + "shra.ph %[u2_l], %[u2_l], 7 \n\t" + "shra.ph %[u2_r], %[u2_r], 7 \n\t" + "shll.ph %[u1_l], %[u1_l], 8 \n\t" + "shll.ph %[u1_r], %[u1_r], 8 \n\t" + "shll.ph %[u2_l], %[u2_l], 8 \n\t" + "shll.ph %[u2_r], %[u2_r], 8 \n\t" - /* vqs0 = vp8_signed_char_clamp(qs0 - u); */ - "subq_s.ph %[vqs0_l], %[vqs0_l], %[u1_l] \n\t" - "subq_s.ph %[vqs0_r], %[vqs0_r], %[u1_r] \n\t" + /* vqs0 = vp8_signed_char_clamp(qs0 - u); */ + "subq_s.ph %[vqs0_l], %[vqs0_l], %[u1_l] \n\t" + "subq_s.ph %[vqs0_r], %[vqs0_r], %[u1_r] \n\t" - /* vps0 = vp8_signed_char_clamp(ps0 + u); */ - "addq_s.ph %[vps0_l], %[vps0_l], %[u1_l] \n\t" - "addq_s.ph %[vps0_r], %[vps0_r], %[u1_r] \n\t" + /* vps0 = vp8_signed_char_clamp(ps0 + u); */ + "addq_s.ph %[vps0_l], %[vps0_l], %[u1_l] \n\t" + "addq_s.ph %[vps0_r], %[vps0_r], %[u1_r] \n\t" - : [u1_l] "=&r" (u1_l), [u1_r] "=&r" (u1_r), [u2_l] "=&r" (u2_l), - [u2_r] "=&r" (u2_r), [u3_l] "=&r" (u3_l), [u3_r] "=&r" (u3_r), - [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r) - : [R63] "r" (R63), - [Filter2_l] "r" (Filter2_l), [Filter2_r] "r" (Filter2_r) - ); + : [u1_l] "=&r"(u1_l), [u1_r] "=&r"(u1_r), [u2_l] "=&r"(u2_l), + [u2_r] "=&r"(u2_r), [u3_l] "=&r"(u3_l), [u3_r] "=&r"(u3_r), + [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), + [vqs0_r] "+r"(vqs0_r) + : [R63] "r"(R63), [Filter2_l] "r"(Filter2_l), [Filter2_r] "r"(Filter2_r)); - __asm__ __volatile__ ( - /* vqs1 = vp8_signed_char_clamp(qs1 - u); */ - "subq_s.ph %[vqs1_l], %[vqs1_l], %[u2_l] \n\t" - "addq_s.ph %[vps1_l], %[vps1_l], %[u2_l] \n\t" + __asm__ __volatile__( + /* vqs1 = vp8_signed_char_clamp(qs1 - u); */ + "subq_s.ph %[vqs1_l], %[vqs1_l], %[u2_l] \n\t" + "addq_s.ph %[vps1_l], %[vps1_l], %[u2_l] \n\t" - /* vps1 = vp8_signed_char_clamp(ps1 + u); */ - "addq_s.ph %[vps1_r], %[vps1_r], %[u2_r] \n\t" - "subq_s.ph %[vqs1_r], %[vqs1_r], %[u2_r] \n\t" + /* vps1 = vp8_signed_char_clamp(ps1 + u); */ + "addq_s.ph %[vps1_r], %[vps1_r], %[u2_r] \n\t" + "subq_s.ph %[vqs1_r], %[vqs1_r], %[u2_r] \n\t" - : [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), - [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r) - : [u2_l] "r" (u2_l), [u2_r] "r" (u2_r) - ); + : [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l), + [vqs1_r] "+r"(vqs1_r) + : [u2_l] "r"(u2_l), [u2_r] "r"(u2_r)); - /* roughly 1/7th difference across boundary */ - __asm__ __volatile__ ( - /* u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); */ - "shra.ph %[u3_l], %[u3_l], 7 \n\t" - "shra.ph %[u3_r], %[u3_r], 7 \n\t" - "shll.ph %[u3_l], %[u3_l], 8 \n\t" - "shll.ph %[u3_r], %[u3_r], 8 \n\t" + /* roughly 1/7th difference across boundary */ + __asm__ __volatile__( + /* u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7); */ + "shra.ph %[u3_l], %[u3_l], 7 \n\t" + "shra.ph %[u3_r], %[u3_r], 7 \n\t" + "shll.ph %[u3_l], %[u3_l], 8 \n\t" + "shll.ph %[u3_r], %[u3_r], 8 \n\t" - /* vqs2 = vp8_signed_char_clamp(qs2 - u); */ - "subq_s.ph %[vqs2_l], %[vqs2_l], %[u3_l] \n\t" - "subq_s.ph %[vqs2_r], %[vqs2_r], %[u3_r] \n\t" + /* vqs2 = vp8_signed_char_clamp(qs2 - u); */ + "subq_s.ph %[vqs2_l], %[vqs2_l], %[u3_l] \n\t" + "subq_s.ph %[vqs2_r], %[vqs2_r], %[u3_r] \n\t" - /* vps2 = vp8_signed_char_clamp(ps2 + u); */ - "addq_s.ph %[vps2_l], %[vps2_l], %[u3_l] \n\t" - "addq_s.ph %[vps2_r], %[vps2_r], %[u3_r] \n\t" + /* vps2 = vp8_signed_char_clamp(ps2 + u); */ + "addq_s.ph %[vps2_l], %[vps2_l], %[u3_l] \n\t" + "addq_s.ph %[vps2_r], %[vps2_r], %[u3_r] \n\t" - : [u3_l] "+r" (u3_l), [u3_r] "+r" (u3_r), [vps2_l] "+r" (vps2_l), - [vps2_r] "+r" (vps2_r), [vqs2_l] "+r" (vqs2_l), [vqs2_r] "+r" (vqs2_r) - : - ); + : [u3_l] "+r"(u3_l), [u3_r] "+r"(u3_r), [vps2_l] "+r"(vps2_l), + [vps2_r] "+r"(vps2_r), [vqs2_l] "+r"(vqs2_l), [vqs2_r] "+r"(vqs2_r) + :); - /* Create quad-bytes from halfword pairs */ - __asm__ __volatile__ ( - "and %[vqs0_l], %[vqs0_l], %[HWM] \n\t" - "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" + /* Create quad-bytes from halfword pairs */ + __asm__ __volatile__( + "and %[vqs0_l], %[vqs0_l], %[HWM] \n\t" + "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" - "and %[vps0_l], %[vps0_l], %[HWM] \n\t" - "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" + "and %[vps0_l], %[vps0_l], %[HWM] \n\t" + "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" - "and %[vqs1_l], %[vqs1_l], %[HWM] \n\t" - "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" + "and %[vqs1_l], %[vqs1_l], %[HWM] \n\t" + "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" - "and %[vps1_l], %[vps1_l], %[HWM] \n\t" - "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" + "and %[vps1_l], %[vps1_l], %[HWM] \n\t" + "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" - "and %[vqs2_l], %[vqs2_l], %[HWM] \n\t" - "shrl.ph %[vqs2_r], %[vqs2_r], 8 \n\t" + "and %[vqs2_l], %[vqs2_l], %[HWM] \n\t" + "shrl.ph %[vqs2_r], %[vqs2_r], 8 \n\t" - "and %[vps2_l], %[vps2_l], %[HWM] \n\t" - "shrl.ph %[vps2_r], %[vps2_r], 8 \n\t" + "and %[vps2_l], %[vps2_l], %[HWM] \n\t" + "shrl.ph %[vps2_r], %[vps2_r], 8 \n\t" - "or %[vqs0_r], %[vqs0_l], %[vqs0_r] \n\t" - "or %[vps0_r], %[vps0_l], %[vps0_r] \n\t" - "or %[vqs1_r], %[vqs1_l], %[vqs1_r] \n\t" - "or %[vps1_r], %[vps1_l], %[vps1_r] \n\t" - "or %[vqs2_r], %[vqs2_l], %[vqs2_r] \n\t" - "or %[vps2_r], %[vps2_l], %[vps2_r] \n\t" + "or %[vqs0_r], %[vqs0_l], %[vqs0_r] \n\t" + "or %[vps0_r], %[vps0_l], %[vps0_r] \n\t" + "or %[vqs1_r], %[vqs1_l], %[vqs1_r] \n\t" + "or %[vps1_r], %[vps1_l], %[vps1_r] \n\t" + "or %[vqs2_r], %[vqs2_l], %[vqs2_r] \n\t" + "or %[vps2_r], %[vps2_l], %[vps2_r] \n\t" - : [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), [vqs1_l] "+r" (vqs1_l), - [vqs1_r] "+r" (vqs1_r), [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r), [vqs2_l] "+r" (vqs2_l), - [vqs2_r] "+r" (vqs2_r), [vps2_r] "+r" (vps2_r), [vps2_l] "+r" (vps2_l) - : [HWM] "r" (HWM) - ); + : [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l), + [vqs1_r] "+r"(vqs1_r), [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), + [vqs0_l] "+r"(vqs0_l), [vqs0_r] "+r"(vqs0_r), [vqs2_l] "+r"(vqs2_l), + [vqs2_r] "+r"(vqs2_r), [vps2_r] "+r"(vps2_r), [vps2_l] "+r"(vps2_l) + : [HWM] "r"(HWM)); - *ps0 = vps0_r ^ N128; - *ps1 = vps1_r ^ N128; - *ps2 = vps2_r ^ N128; - *qs0 = vqs0_r ^ N128; - *qs1 = vqs1_r ^ N128; - *qs2 = vqs2_r ^ N128; + *ps0 = vps0_r ^ N128; + *ps1 = vps1_r ^ N128; + *ps2 = vps2_r ^ N128; + *qs0 = vqs0_r ^ N128; + *qs1 = vqs1_r ^ N128; + *qs2 = vqs2_r ^ N128; } -void vp8_mbloop_filter_horizontal_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - int i; - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; +void vp8_mbloop_filter_horizontal_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + int i; + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - mask = 0; - hev = 0; - i = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; + mask = 0; + hev = 0; + i = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ - sm1 = s - (p << 2); - s0 = s - p - p - p; - s1 = s - p - p; - s2 = s - p; - s3 = s; - s4 = s + p; - s5 = s + p + p; - s6 = s + p + p + p; + sm1 = s - (p << 2); + s0 = s - p - p - p; + s1 = s - p - p; + s2 = s - p; + s3 = s; + s4 = s + p; + s5 = s + p + p; + s6 = s + p + p + p; - /* prefetch data for load */ - prefetch_load_lf(s + p); - - /* apply filter on 4 pixesl at the same time */ - do - { - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - - /* unpack processed 4x4 neighborhood - * memory is 4 byte aligned - */ - *((uint32_t *)s0) = p0; - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - *((uint32_t *)s5) = p5; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p1 = *((uint32_t *)(s1)); - p2 = *((uint32_t *)(s2)); - p3 = *((uint32_t *)(s3)); - p4 = *((uint32_t *)(s4)); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - - /* unpack processed 4x4 neighborhood - * memory is 4 byte aligned - */ - *((uint32_t *)s0) = p0; - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - *((uint32_t *)s5) = p5; - } - } - - sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; - - i += 8; - } - - while (i < count); -} - -void vp8_mbloop_filter_uvhorizontal_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - - mask = 0; - hev = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - sm1 = s - (p << 2); - s0 = s - p - p - p; - s1 = s - p - p; - s2 = s - p; - s3 = s; - s4 = s + p; - s5 = s + p + p; - s6 = s + p + p + p; + /* prefetch data for load */ + prefetch_load_lf(s + p); + /* apply filter on 4 pixesl at the same time */ + do { /* load quad-byte vectors * memory is 4 byte aligned */ @@ -1819,43 +1518,40 @@ void vp8_mbloop_filter_uvhorizontal_edge_mips /* if (p1 - p4 == 0) and (p2 - p3 == 0) * mask will be zero and filtering is not needed */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); - /* if mask == 0 do filtering is not needed */ - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - - /* unpack processed 4x4 neighborhood - * memory is 4 byte aligned - */ - *((uint32_t *)s0) = p0; - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - *((uint32_t *)s5) = p5; - } + /* unpack processed 4x4 neighborhood + * memory is 4 byte aligned + */ + *((uint32_t *)s0) = p0; + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + *((uint32_t *)s5) = p5; + } } sm1 += 4; - s0 += 4; - s1 += 4; - s2 += 4; - s3 += 4; - s4 += 4; - s5 += 4; - s6 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; /* load quad-byte vectors * memory is 4 byte aligned @@ -1868,303 +1564,207 @@ void vp8_mbloop_filter_uvhorizontal_edge_mips /* if (p1 - p4 == 0) and (p2 - p3 == 0) * mask will be zero and filtering is not needed */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); - pm1 = *((uint32_t *)(sm1)); - p0 = *((uint32_t *)(s0)); - p5 = *((uint32_t *)(s5)); - p6 = *((uint32_t *)(s6)); + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - - /* unpack processed 4x4 neighborhood - * memory is 4 byte aligned - */ - *((uint32_t *)s0) = p0; - *((uint32_t *)s1) = p1; - *((uint32_t *)s2) = p2; - *((uint32_t *)s3) = p3; - *((uint32_t *)s4) = p4; - *((uint32_t *)s5) = p5; - } - } -} - - -void vp8_mbloop_filter_vertical_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - - int i; - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - - mask = 0; - hev = 0; - i = 0; - pm1 = 0; - p0 = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; - p5 = 0; - p6 = 0; - - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ - - /* apply filter on 4 pixesl at the same time */ - do - { - s1 = s; - s2 = s + p; - s3 = s2 + p; - s4 = s3 + p; - s = s4 + p; - - /* load quad-byte vectors + /* unpack processed 4x4 neighborhood * memory is 4 byte aligned */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); - pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); - - /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" - "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" - "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" - "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" - - "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" - "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" - "append %[p1], %[sec3], 16 \n\t" - "append %[pm1], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" - "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" - "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" - "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" - - "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - - "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" - "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" - "append %[p5], %[sec3], 16 \n\t" - "append %[p3], %[sec4], 16 \n\t" - - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); - - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { - - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); - - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - - /* don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p5], 2(%[s4]) \n\t" - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - "sb %[p0], -3(%[s4]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); - - __asm__ __volatile__ ( - "sb %[p5], 2(%[s3]) \n\t" - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - "sb %[p0], -3(%[s3]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); - - __asm__ __volatile__ ( - "sb %[p5], 2(%[s2]) \n\t" - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - "sb %[p0], -3(%[s2]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); - - __asm__ __volatile__ ( - "sb %[p5], 2(%[s1]) \n\t" - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - "sb %[p0], -3(%[s1]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - } - } - - i += 4; + *((uint32_t *)s0) = p0; + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + *((uint32_t *)s5) = p5; + } } - while (i < count); + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + i += 8; + } + + while (i < count); } -void vp8_mbloop_filter_uvvertical_edge_mips -( - unsigned char *s, - int p, - unsigned int flimit, - unsigned int limit, - unsigned int thresh, - int count -) -{ - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - unsigned char *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; +void vp8_mbloop_filter_uvhorizontal_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - mask = 0; - hev = 0; - pm1 = 0; - p0 = 0; - p1 = 0; - p2 = 0; - p3 = 0; - p4 = 0; - p5 = 0; - p6 = 0; + mask = 0; + hev = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; - /* loop filter designed to work using chars so that we can make maximum use - * of 8 bit simd instructions. - */ + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ - /* apply filter on 4 pixesl at the same time */ + sm1 = s - (p << 2); + s0 = s - p - p - p; + s1 = s - p - p; + s2 = s - p; + s3 = s; + s4 = s + p; + s5 = s + p + p; + s6 = s + p + p + p; + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + /* if mask == 0 do filtering is not needed */ + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); + + /* unpack processed 4x4 neighborhood + * memory is 4 byte aligned + */ + *((uint32_t *)s0) = p0; + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + *((uint32_t *)s5) = p5; + } + } + + sm1 += 4; + s0 += 4; + s1 += 4; + s2 += 4; + s3 += 4; + s4 += 4; + s5 += 4; + s6 += 4; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p1 = *((uint32_t *)(s1)); + p2 = *((uint32_t *)(s2)); + p3 = *((uint32_t *)(s3)); + p4 = *((uint32_t *)(s4)); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + pm1 = *((uint32_t *)(sm1)); + p0 = *((uint32_t *)(s0)); + p5 = *((uint32_t *)(s5)); + p6 = *((uint32_t *)(s6)); + + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); + + /* unpack processed 4x4 neighborhood + * memory is 4 byte aligned + */ + *((uint32_t *)s0) = p0; + *((uint32_t *)s1) = p1; + *((uint32_t *)s2) = p2; + *((uint32_t *)s3) = p3; + *((uint32_t *)s4) = p4; + *((uint32_t *)s5) = p5; + } + } +} + +void vp8_mbloop_filter_vertical_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + int i; + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + + mask = 0; + hev = 0; + i = 0; + pm1 = 0; + p0 = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; + p5 = 0; + p6 = 0; + + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ + + /* apply filter on 4 pixesl at the same time */ + do { s1 = s; s2 = s + p; s3 = s2 + p; s4 = s3 + p; - - /* prefetch data for load */ - prefetch_load_lf(s + 2 * p); + s = s4 + p; /* load quad-byte vectors * memory is 4 byte aligned */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); + p3 = *((uint32_t *)(s4)); /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" @@ -2180,15 +1780,13 @@ void vp8_mbloop_filter_uvvertical_edge_mips "append %[p1], %[sec3], 16 \n\t" "append %[pm1], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" @@ -2204,419 +1802,595 @@ void vp8_mbloop_filter_uvvertical_edge_mips "append %[p5], %[sec3], 16 \n\t" "append %[p3], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* if (p1 - p4 == 0) and (p2 - p3 == 0) * mask will be zero and filtering is not needed */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, - thresh, &hev, &mask); + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); + /* don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p5], 2(%[s4]) \n\t" + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + "sb %[p0], -3(%[s4]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); - /* don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p5], 2(%[s4]) \n\t" - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - "sb %[p0], -3(%[s4]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); + __asm__ __volatile__( + "sb %[p5], 2(%[s3]) \n\t" + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + "sb %[p0], -3(%[s3]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); - __asm__ __volatile__ ( - "sb %[p5], 2(%[s3]) \n\t" - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - "sb %[p0], -3(%[s3]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); + __asm__ __volatile__( + "sb %[p5], 2(%[s2]) \n\t" + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + "sb %[p0], -3(%[s2]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); - __asm__ __volatile__ ( - "sb %[p5], 2(%[s2]) \n\t" - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - "sb %[p0], -3(%[s2]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); - - __asm__ __volatile__ ( - "sb %[p5], 2(%[s1]) \n\t" - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - "sb %[p0], -3(%[s1]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - } + __asm__ __volatile__( + "sb %[p5], 2(%[s1]) \n\t" + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + "sb %[p0], -3(%[s1]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + } } - s1 = s4 + p; - s2 = s1 + p; - s3 = s2 + p; - s4 = s3 + p; + i += 4; + } - /* load quad-byte vectors - * memory is 4 byte aligned - */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); - pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); + while (i < count); +} - /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" - "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" - "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" - "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" +void vp8_mbloop_filter_uvvertical_edge_mips(unsigned char *s, int p, + unsigned int flimit, + unsigned int limit, + unsigned int thresh, int count) { + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + unsigned char *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + mask = 0; + hev = 0; + pm1 = 0; + p0 = 0; + p1 = 0; + p2 = 0; + p3 = 0; + p4 = 0; + p5 = 0; + p6 = 0; - "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" - "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" - "append %[p1], %[sec3], 16 \n\t" - "append %[pm1], %[sec4], 16 \n\t" + /* loop filter designed to work using chars so that we can make maximum use + * of 8 bit simd instructions. + */ - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + /* apply filter on 4 pixesl at the same time */ - /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( - "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" - "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" - "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" - "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" + s1 = s; + s2 = s + p; + s3 = s2 + p; + s4 = s3 + p; - "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" - "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" - "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" - "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + /* prefetch data for load */ + prefetch_load_lf(s + 2 * p); - "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" - "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" - "append %[p5], %[sec3], 16 \n\t" - "append %[p3], %[sec4], 16 \n\t" + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); + pm1 = *((uint32_t *)(s4 - 4)); + p3 = *((uint32_t *)(s4)); - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + /* transpose pm1, p0, p1, p2 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" + "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" + "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" + "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" - /* if (p1 - p4 == 0) and (p2 - p3 == 0) - * mask will be zero and filtering is not needed - */ - if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) - { + "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, thresh, &hev, &mask); + "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" + "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" + "append %[p1], %[sec3], 16 \n\t" + "append %[pm1], %[sec4], 16 \n\t" - /* if mask == 0 do filtering is not needed */ - if (mask) - { - /* filtering */ - vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); - /* don't use transpose on output data - * because memory isn't aligned - */ - __asm__ __volatile__ ( - "sb %[p5], 2(%[s4]) \n\t" - "sb %[p4], 1(%[s4]) \n\t" - "sb %[p3], 0(%[s4]) \n\t" - "sb %[p2], -1(%[s4]) \n\t" - "sb %[p1], -2(%[s4]) \n\t" - "sb %[p0], -3(%[s4]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s4] "r" (s4), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + /* transpose p3, p4, p5, p6 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" + "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" + "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" + "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); + "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" - __asm__ __volatile__ ( - "sb %[p5], 2(%[s3]) \n\t" - "sb %[p4], 1(%[s3]) \n\t" - "sb %[p3], 0(%[s3]) \n\t" - "sb %[p2], -1(%[s3]) \n\t" - "sb %[p1], -2(%[s3]) \n\t" - "sb %[p0], -3(%[s3]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s3] "r" (s3), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" + "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" + "append %[p5], %[sec3], 16 \n\t" + "append %[p3], %[sec4], 16 \n\t" - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); - __asm__ __volatile__ ( - "sb %[p5], 2(%[s2]) \n\t" - "sb %[p4], 1(%[s2]) \n\t" - "sb %[p3], 0(%[s2]) \n\t" - "sb %[p2], -1(%[s2]) \n\t" - "sb %[p1], -2(%[s2]) \n\t" - "sb %[p0], -3(%[s2]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s2] "r" (s2), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); - __asm__ __volatile__ ( - "srl %[p5], %[p5], 8 \n\t" - "srl %[p4], %[p4], 8 \n\t" - "srl %[p3], %[p3], 8 \n\t" - "srl %[p2], %[p2], 8 \n\t" - "srl %[p1], %[p1], 8 \n\t" - "srl %[p0], %[p0], 8 \n\t" - : [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0) - : - ); + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); - __asm__ __volatile__ ( - "sb %[p5], 2(%[s1]) \n\t" - "sb %[p4], 1(%[s1]) \n\t" - "sb %[p3], 0(%[s1]) \n\t" - "sb %[p2], -1(%[s1]) \n\t" - "sb %[p1], -2(%[s1]) \n\t" - "sb %[p0], -3(%[s1]) \n\t" - : - : [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), [s1] "r" (s1), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0) - ); - } + /* don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p5], 2(%[s4]) \n\t" + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + "sb %[p0], -3(%[s4]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s3]) \n\t" + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + "sb %[p0], -3(%[s3]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s2]) \n\t" + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + "sb %[p0], -3(%[s2]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s1]) \n\t" + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + "sb %[p0], -3(%[s1]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); } + } + + s1 = s4 + p; + s2 = s1 + p; + s3 = s2 + p; + s4 = s3 + p; + + /* load quad-byte vectors + * memory is 4 byte aligned + */ + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); + pm1 = *((uint32_t *)(s4 - 4)); + p3 = *((uint32_t *)(s4)); + + /* transpose pm1, p0, p1, p2 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" + "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" + "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" + "precr.qb.ph %[prim4], %[p0], %[pm1] \n\t" + + "precrq.qb.ph %[p1], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[pm1], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p2], %[p1], %[sec3] \n\t" + "precrq.ph.w %[p0], %[pm1], %[sec4] \n\t" + "append %[p1], %[sec3], 16 \n\t" + "append %[pm1], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* transpose p3, p4, p5, p6 */ + __asm__ __volatile__( + "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" + "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" + "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" + "precr.qb.ph %[prim4], %[p4], %[p3] \n\t" + + "precrq.qb.ph %[p5], %[prim1], %[prim2] \n\t" + "precr.qb.ph %[p3], %[prim1], %[prim2] \n\t" + "precrq.qb.ph %[sec3], %[prim3], %[prim4] \n\t" + "precr.qb.ph %[sec4], %[prim3], %[prim4] \n\t" + + "precrq.ph.w %[p6], %[p5], %[sec3] \n\t" + "precrq.ph.w %[p4], %[p3], %[sec4] \n\t" + "append %[p5], %[sec3], 16 \n\t" + "append %[p3], %[sec4], 16 \n\t" + + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); + + /* if (p1 - p4 == 0) and (p2 - p3 == 0) + * mask will be zero and filtering is not needed + */ + if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { + vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, + thresh, &hev, &mask); + + /* if mask == 0 do filtering is not needed */ + if (mask) { + /* filtering */ + vp8_mbfilter_mips(mask, hev, &p0, &p1, &p2, &p3, &p4, &p5); + + /* don't use transpose on output data + * because memory isn't aligned + */ + __asm__ __volatile__( + "sb %[p5], 2(%[s4]) \n\t" + "sb %[p4], 1(%[s4]) \n\t" + "sb %[p3], 0(%[s4]) \n\t" + "sb %[p2], -1(%[s4]) \n\t" + "sb %[p1], -2(%[s4]) \n\t" + "sb %[p0], -3(%[s4]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s4] "r"(s4), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s3]) \n\t" + "sb %[p4], 1(%[s3]) \n\t" + "sb %[p3], 0(%[s3]) \n\t" + "sb %[p2], -1(%[s3]) \n\t" + "sb %[p1], -2(%[s3]) \n\t" + "sb %[p0], -3(%[s3]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s3] "r"(s3), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s2]) \n\t" + "sb %[p4], 1(%[s2]) \n\t" + "sb %[p3], 0(%[s2]) \n\t" + "sb %[p2], -1(%[s2]) \n\t" + "sb %[p1], -2(%[s2]) \n\t" + "sb %[p0], -3(%[s2]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s2] "r"(s2), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + + __asm__ __volatile__( + "srl %[p5], %[p5], 8 \n\t" + "srl %[p4], %[p4], 8 \n\t" + "srl %[p3], %[p3], 8 \n\t" + "srl %[p2], %[p2], 8 \n\t" + "srl %[p1], %[p1], 8 \n\t" + "srl %[p0], %[p0], 8 \n\t" + : [p5] "+r"(p5), [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), + [p1] "+r"(p1), [p0] "+r"(p0) + :); + + __asm__ __volatile__( + "sb %[p5], 2(%[s1]) \n\t" + "sb %[p4], 1(%[s1]) \n\t" + "sb %[p3], 0(%[s1]) \n\t" + "sb %[p2], -1(%[s1]) \n\t" + "sb %[p1], -2(%[s1]) \n\t" + "sb %[p0], -3(%[s1]) \n\t" + : + : [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [s1] "r"(s1), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0)); + } + } } /* Horizontal MB filtering */ -void vp8_loop_filter_mbh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned int thresh_vec, flimit_vec, limit_vec; - unsigned char thresh, flimit, limit, flimit_temp; +void vp8_loop_filter_mbh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, + int uv_stride, loop_filter_info *lfi) { + unsigned int thresh_vec, flimit_vec, limit_vec; + unsigned char thresh, flimit, limit, flimit_temp; - /* use direct value instead pointers */ - limit = *(lfi->lim); - flimit_temp = *(lfi->mblim); - thresh = *(lfi->hev_thr); - flimit = flimit_temp; + /* use direct value instead pointers */ + limit = *(lfi->lim); + flimit_temp = *(lfi->mblim); + thresh = *(lfi->hev_thr); + flimit = flimit_temp; - /* create quad-byte */ - __asm__ __volatile__ ( - "replv.qb %[thresh_vec], %[thresh] \n\t" - "replv.qb %[flimit_vec], %[flimit] \n\t" - "replv.qb %[limit_vec], %[limit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec) - : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit) - ); + /* create quad-byte */ + __asm__ __volatile__( + "replv.qb %[thresh_vec], %[thresh] \n\t" + "replv.qb %[flimit_vec], %[flimit] \n\t" + "replv.qb %[limit_vec], %[limit] \n\t" + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit)); - vp8_mbloop_filter_horizontal_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, thresh_vec, 16); + vp8_mbloop_filter_horizontal_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, + thresh_vec, 16); - if (u_ptr) - { - vp8_mbloop_filter_uvhorizontal_edge_mips(u_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); - } + if (u_ptr) { + vp8_mbloop_filter_uvhorizontal_edge_mips(u_ptr, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); + } - if (v_ptr) - { - vp8_mbloop_filter_uvhorizontal_edge_mips(v_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); - } + if (v_ptr) { + vp8_mbloop_filter_uvhorizontal_edge_mips(v_ptr, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); + } } - /* Vertical MB Filtering */ -void vp8_loop_filter_mbv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned int thresh_vec, flimit_vec, limit_vec; - unsigned char thresh, flimit, limit, flimit_temp; +void vp8_loop_filter_mbv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, + int uv_stride, loop_filter_info *lfi) { + unsigned int thresh_vec, flimit_vec, limit_vec; + unsigned char thresh, flimit, limit, flimit_temp; - /* use direct value instead pointers */ - limit = *(lfi->lim); - flimit_temp = *(lfi->mblim); - thresh = *(lfi->hev_thr); - flimit = flimit_temp; + /* use direct value instead pointers */ + limit = *(lfi->lim); + flimit_temp = *(lfi->mblim); + thresh = *(lfi->hev_thr); + flimit = flimit_temp; - /* create quad-byte */ - __asm__ __volatile__ ( - "replv.qb %[thresh_vec], %[thresh] \n\t" - "replv.qb %[flimit_vec], %[flimit] \n\t" - "replv.qb %[limit_vec], %[limit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec) - : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit) - ); + /* create quad-byte */ + __asm__ __volatile__( + "replv.qb %[thresh_vec], %[thresh] \n\t" + "replv.qb %[flimit_vec], %[flimit] \n\t" + "replv.qb %[limit_vec], %[limit] \n\t" + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit)); - vp8_mbloop_filter_vertical_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, thresh_vec, 16); + vp8_mbloop_filter_vertical_edge_mips(y_ptr, y_stride, flimit_vec, limit_vec, + thresh_vec, 16); - if (u_ptr) - vp8_mbloop_filter_uvvertical_edge_mips(u_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (u_ptr) + vp8_mbloop_filter_uvvertical_edge_mips(u_ptr, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); - if (v_ptr) - vp8_mbloop_filter_uvvertical_edge_mips(v_ptr, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (v_ptr) + vp8_mbloop_filter_uvvertical_edge_mips(v_ptr, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); } - /* Horizontal B Filtering */ -void vp8_loop_filter_bh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned int thresh_vec, flimit_vec, limit_vec; - unsigned char thresh, flimit, limit, flimit_temp; +void vp8_loop_filter_bh_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned int thresh_vec, flimit_vec, limit_vec; + unsigned char thresh, flimit, limit, flimit_temp; - /* use direct value instead pointers */ - limit = *(lfi->lim); - flimit_temp = *(lfi->blim); - thresh = *(lfi->hev_thr); - flimit = flimit_temp; + /* use direct value instead pointers */ + limit = *(lfi->lim); + flimit_temp = *(lfi->blim); + thresh = *(lfi->hev_thr); + flimit = flimit_temp; - /* create quad-byte */ - __asm__ __volatile__ ( - "replv.qb %[thresh_vec], %[thresh] \n\t" - "replv.qb %[flimit_vec], %[flimit] \n\t" - "replv.qb %[limit_vec], %[limit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec) - : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit) - ); + /* create quad-byte */ + __asm__ __volatile__( + "replv.qb %[thresh_vec], %[thresh] \n\t" + "replv.qb %[flimit_vec], %[flimit] \n\t" + "replv.qb %[limit_vec], %[limit] \n\t" + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit)); - vp8_loop_filter_horizontal_edge_mips(y_ptr + 4 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16); - vp8_loop_filter_horizontal_edge_mips(y_ptr + 8 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16); - vp8_loop_filter_horizontal_edge_mips(y_ptr + 12 * y_stride, y_stride, flimit_vec, limit_vec, thresh_vec, 16); + vp8_loop_filter_horizontal_edge_mips(y_ptr + 4 * y_stride, y_stride, + flimit_vec, limit_vec, thresh_vec, 16); + vp8_loop_filter_horizontal_edge_mips(y_ptr + 8 * y_stride, y_stride, + flimit_vec, limit_vec, thresh_vec, 16); + vp8_loop_filter_horizontal_edge_mips(y_ptr + 12 * y_stride, y_stride, + flimit_vec, limit_vec, thresh_vec, 16); - if (u_ptr) - vp8_loop_filter_uvhorizontal_edge_mips(u_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (u_ptr) + vp8_loop_filter_uvhorizontal_edge_mips( + u_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); - if (v_ptr) - vp8_loop_filter_uvhorizontal_edge_mips(v_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (v_ptr) + vp8_loop_filter_uvhorizontal_edge_mips( + v_ptr + 4 * uv_stride, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); } - /* Vertical B Filtering */ -void vp8_loop_filter_bv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - unsigned int thresh_vec, flimit_vec, limit_vec; - unsigned char thresh, flimit, limit, flimit_temp; +void vp8_loop_filter_bv_dspr2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + unsigned int thresh_vec, flimit_vec, limit_vec; + unsigned char thresh, flimit, limit, flimit_temp; - /* use direct value instead pointers */ - limit = *(lfi->lim); - flimit_temp = *(lfi->blim); - thresh = *(lfi->hev_thr); - flimit = flimit_temp; + /* use direct value instead pointers */ + limit = *(lfi->lim); + flimit_temp = *(lfi->blim); + thresh = *(lfi->hev_thr); + flimit = flimit_temp; - /* create quad-byte */ - __asm__ __volatile__ ( - "replv.qb %[thresh_vec], %[thresh] \n\t" - "replv.qb %[flimit_vec], %[flimit] \n\t" - "replv.qb %[limit_vec], %[limit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), [limit_vec] "=r" (limit_vec) - : [thresh] "r" (thresh), [flimit] "r" (flimit), [limit] "r" (limit) - ); + /* create quad-byte */ + __asm__ __volatile__( + "replv.qb %[thresh_vec], %[thresh] \n\t" + "replv.qb %[flimit_vec], %[flimit] \n\t" + "replv.qb %[limit_vec], %[limit] \n\t" + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [thresh] "r"(thresh), [flimit] "r"(flimit), [limit] "r"(limit)); - vp8_loop_filter_vertical_edge_mips(y_ptr + 4, y_stride, flimit_vec, limit_vec, thresh_vec, 16); - vp8_loop_filter_vertical_edge_mips(y_ptr + 8, y_stride, flimit_vec, limit_vec, thresh_vec, 16); - vp8_loop_filter_vertical_edge_mips(y_ptr + 12, y_stride, flimit_vec, limit_vec, thresh_vec, 16); + vp8_loop_filter_vertical_edge_mips(y_ptr + 4, y_stride, flimit_vec, limit_vec, + thresh_vec, 16); + vp8_loop_filter_vertical_edge_mips(y_ptr + 8, y_stride, flimit_vec, limit_vec, + thresh_vec, 16); + vp8_loop_filter_vertical_edge_mips(y_ptr + 12, y_stride, flimit_vec, + limit_vec, thresh_vec, 16); - if (u_ptr) - vp8_loop_filter_uvvertical_edge_mips(u_ptr + 4, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (u_ptr) + vp8_loop_filter_uvvertical_edge_mips(u_ptr + 4, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); - if (v_ptr) - vp8_loop_filter_uvvertical_edge_mips(v_ptr + 4, uv_stride, flimit_vec, limit_vec, thresh_vec, 0); + if (v_ptr) + vp8_loop_filter_uvvertical_edge_mips(v_ptr + 4, uv_stride, flimit_vec, + limit_vec, thresh_vec, 0); } #endif diff --git a/libs/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c b/libs/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c index 1054ed3997..c7fb1ed33f 100644 --- a/libs/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/bilinear_filter_msa.c @@ -13,206 +13,209 @@ #include "vp8/common/filter.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -DECLARE_ALIGNED(16, static const int8_t, vp8_bilinear_filters_msa[7][2]) = -{ - { 112, 16 }, - { 96, 32 }, - { 80, 48 }, - { 64, 64 }, - { 48, 80 }, - { 32, 96 }, - { 16, 112 } +DECLARE_ALIGNED(16, static const int8_t, vp8_bilinear_filters_msa[7][2]) = { + { 112, 16 }, { 96, 32 }, { 80, 48 }, { 64, 64 }, + { 48, 80 }, { 32, 96 }, { 16, 112 } }; -static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = -{ - /* 8 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, - /* 4 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, - /* 4 width cases */ - 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 +static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { + /* 8 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + /* 4 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, + /* 4 width cases */ + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 }; static void common_hz_2t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, mask; - v16u8 filt0, vec0, vec1, res0, res1; - v8u16 vec2, vec3, filt; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, mask; + v16u8 filt0, vec0, vec1, res0, res1; + v8u16 vec2, vec3, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[16]); + mask = LD_SB(&vp8_mc_filt_mask_arr[16]); - filt = LD_UH(filter); - filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); - LD_SB4(src, src_stride, src0, src1, src2, src3); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); - SRARI_H2_UH(vec2, vec3, VP8_FILTER_SHIFT); - PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1); - ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); + SRARI_H2_UH(vec2, vec3, VP8_FILTER_SHIFT); + PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); } static void common_hz_2t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 vec0, vec1, vec2, vec3, filt0; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16i8 res0, res1, res2, res3; - v8u16 vec4, vec5, vec6, vec7, filt; + const int8_t *filter) { + v16u8 vec0, vec1, vec2, vec3, filt0; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; + v16i8 res0, res1, res2, res3; + v8u16 vec4, vec5, vec6, vec7, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[16]); + mask = LD_SB(&vp8_mc_filt_mask_arr[16]); - filt = LD_UH(filter); - filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); - VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec4, vec5, vec6, vec7); - SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, - res0, res1, res2, res3); - ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); + LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); + VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); + VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, + vec6, vec7); + SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); + PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, + res3); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + dst += (4 * dst_stride); + ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); } static void common_hz_2t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } - else if (8 == height) - { - common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_hz_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } else if (8 == height) { + common_hz_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } } static void common_hz_2t_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 filt0; - v16i8 src0, src1, src2, src3, mask; - v8u16 vec0, vec1, vec2, vec3, filt; + const int8_t *filter) { + v16u8 filt0; + v16i8 src0, src1, src2, src3, mask; + v8u16 vec0, vec1, vec2, vec3, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - filt = LD_UH(filter); - filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); - LD_SB4(src, src_stride, src0, src1, src2, src3); - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1); - ST8x4_UB(src0, src1, dst, dst_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); + VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); + SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(vec1, vec0, vec3, vec2, src0, src1); + ST8x4_UB(src0, src1, dst, dst_stride); } static void common_hz_2t_8x8mult_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - v16u8 filt0; - v16i8 src0, src1, src2, src3, mask, out0, out1; - v8u16 vec0, vec1, vec2, vec3, filt; + const int8_t *filter, int32_t height) { + v16u8 filt0; + v16i8 src0, src1, src2, src3, mask, out0, out1; + v8u16 vec0, vec1, vec2, vec3, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - filt = LD_UH(filter); - filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); + + VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); + VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); + SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); + + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); + + PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + + VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); + VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); + SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + + if (16 == height) { LD_SB4(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); - LD_SB4(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); - - if (16 == height) - { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - - VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); - SRARI_H4_UH(vec0, vec1, vec2, vec3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(vec1, vec0, vec3, vec2, out0, out1); - ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride); - } + ST8x4_UB(out0, out1, dst + 4 * dst_stride, dst_stride); + } } static void common_hz_2t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); - } - else - { - common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_hz_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); + } else { + common_hz_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height); + } } static void common_hz_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; - v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; + v16u8 filt0, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; + v8u16 out0, out1, out2, out3, out4, out5, out6, out7, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - loop_cnt = (height >> 2) - 1; + loop_cnt = (height >> 2) - 1; - filt = LD_UH(filter); - filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + src += (4 * src_stride); + + VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); + VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); + VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); + VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1, + out2, out3); + DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, + out6, out7); + SRARI_H4_UH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SRARI_H4_UH(out4, out5, out6, out7, VP8_FILTER_SHIFT); + PCKEV_ST_SB(out0, out1, dst); + dst += dst_stride; + PCKEV_ST_SB(out2, out3, dst); + dst += dst_stride; + PCKEV_ST_SB(out4, out5, dst); + dst += dst_stride; + PCKEV_ST_SB(out6, out7, dst); + dst += dst_stride; + + for (; loop_cnt--;) { LD_SB4(src, src_stride, src0, src2, src4, src6); LD_SB4(src + 8, src_stride, src1, src3, src5, src7); src += (4 * src_stride); @@ -221,10 +224,10 @@ static void common_hz_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1, + out2, out3); + DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, + out6, out7); SRARI_H4_UH(out0, out1, out2, out3, VP8_FILTER_SHIFT); SRARI_H4_UH(out4, out5, out6, out7, VP8_FILTER_SHIFT); PCKEV_ST_SB(out0, out1, dst); @@ -235,677 +238,560 @@ static void common_hz_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, dst += dst_stride; PCKEV_ST_SB(out6, out7, dst); dst += dst_stride; - - for (; loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); - - VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1); - VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3); - VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5); - VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); - SRARI_H4_UH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SRARI_H4_UH(out4, out5, out6, out7, VP8_FILTER_SHIFT); - PCKEV_ST_SB(out0, out1, dst); - dst += dst_stride; - PCKEV_ST_SB(out2, out3, dst); - dst += dst_stride; - PCKEV_ST_SB(out4, out5, dst); - dst += dst_stride; - PCKEV_ST_SB(out6, out7, dst); - dst += dst_stride; - } + } } static void common_vt_2t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, src4; - v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332; - v16u8 filt0; - v8i16 filt; - v8u16 tmp0, tmp1; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, src4; + v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332; + v16u8 filt0; + v8i16 filt; + v8u16 tmp0, tmp1; - filt = LD_SH(filter); - filt0 = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter); + filt0 = (v16u8)__msa_splati_h(filt, 0); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); - ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); - DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); - src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); - ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride); + ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); + ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); + DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); + SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); + src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); + ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride); } static void common_vt_2t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r; - v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776; - v8u16 tmp0, tmp1, tmp2, tmp3; - v16u8 filt0; - v8i16 filt; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r; + v16i8 src65_r, src87_r, src2110, src4332, src6554, src8776; + v8u16 tmp0, tmp1, tmp2, tmp3; + v16u8 filt0; + v8i16 filt; - filt = LD_SH(filter); - filt0 = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter); + filt0 = (v16u8)__msa_splati_h(filt, 0); - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); + LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); + src += (8 * src_stride); - src8 = LD_SB(src); - src += src_stride; + src8 = LD_SB(src); + src += src_stride; - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, - src32_r, src43_r); - ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, - src76_r, src87_r); - ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, - src87_r, src76_r, src2110, src4332, src6554, src8776); - DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332); - ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride); - ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride); + ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); + ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, + src76_r, src87_r); + ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r, + src76_r, src2110, src4332, src6554, src8776); + DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, + tmp0, tmp1, tmp2, tmp3); + SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, src2110, src4332); + ST4x4_UB(src2110, src2110, 0, 1, 2, 3, dst, dst_stride); + ST4x4_UB(src4332, src4332, 0, 1, 2, 3, dst + 4 * dst_stride, dst_stride); } static void common_vt_2t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } - else if (8 == height) - { - common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_vt_2t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } else if (8 == height) { + common_vt_2t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } } static void common_vt_2t_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0; - v16i8 out0, out1; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; + const int8_t *filter) { + v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0; + v16i8 out0, out1; + v8u16 tmp0, tmp1, tmp2, tmp3; + v8i16 filt; - filt = LD_SH(filter); - filt0 = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter); + filt0 = (v16u8)__msa_splati_h(filt, 0); - LD_UB5(src, src_stride, src0, src1, src2, src3, src4); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); - ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); + LD_UB5(src, src_stride, src0, src1, src2, src3, src4); + ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); + ILVR_B2_UB(src3, src2, src4, src3, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, + tmp2, tmp3); + SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); } static void common_vt_2t_8x8mult_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v16i8 out0, out1; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; + v16i8 out0, out1; + v8u16 tmp0, tmp1, tmp2, tmp3; + v8i16 filt; - filt = LD_SH(filter); - filt0 = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter); + filt0 = (v16u8)__msa_splati_h(filt, 0); - src0 = LD_UB(src); - src += src_stride; + src0 = LD_UB(src); + src += src_stride; - for (loop_cnt = (height >> 3); loop_cnt--;) - { - LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); - src += (8 * src_stride); + for (loop_cnt = (height >> 3); loop_cnt--;) { + LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); + src += (8 * src_stride); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, - vec4, vec5, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); + ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, + vec3); + ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6, + vec7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, + tmp2, tmp3); + SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); + DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, + tmp2, tmp3); + SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); - src0 = src8; - } + src0 = src8; + } } static void common_vt_2t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); - } - else - { - common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, - height); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_vt_2t_8x4_msa(src, src_stride, dst, dst_stride, filter); + } else { + common_vt_2t_8x8mult_msa(src, src_stride, dst, dst_stride, filter, height); + } } static void common_vt_2t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16u8 src0, src1, src2, src3, src4; - v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; - v8u16 tmp0, tmp1, tmp2, tmp3; - v8i16 filt; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16u8 src0, src1, src2, src3, src4; + v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; + v8u16 tmp0, tmp1, tmp2, tmp3; + v8i16 filt; - filt = LD_SH(filter); - filt0 = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter); + filt0 = (v16u8)__msa_splati_h(filt, 0); - src0 = LD_UB(src); - src += src_stride; + src0 = LD_UB(src); + src += src_stride; - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_UB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_UB4(src, src_stride, src1, src2, src3, src4); + src += (4 * src_stride); - ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); - ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); - DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp0, tmp1, dst); - dst += dst_stride; + ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); + ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3); + DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); + SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp0, tmp1, dst); + dst += dst_stride; - ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); - ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); - DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp2, tmp3, dst); - dst += dst_stride; + ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6); + ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7); + DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3); + SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp2, tmp3, dst); + dst += dst_stride; - DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp0, tmp1, dst); - dst += dst_stride; + DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1); + SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp0, tmp1, dst); + dst += dst_stride; - DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); - SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp2, tmp3, dst); - dst += dst_stride; + DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3); + SRARI_H2_UH(tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp2, tmp3, dst); + dst += dst_stride; - src0 = src4; - } + src0 = src4; + } } static void common_hv_2ht_2vt_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, mask; - v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1; + const int8_t *filter_vert) { + v16i8 src0, src1, src2, src3, src4, mask; + v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; + v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, filt, tmp0, tmp1; - mask = LD_SB(&vp8_mc_filt_mask_arr[16]); + mask = LD_SB(&vp8_mc_filt_mask_arr[16]); - filt = LD_UH(filter_horiz); - filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0); - filt = LD_UH(filter_vert); - filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter_horiz); + filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter_vert); + filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); - hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); + hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); - SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); - PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); - ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); + SRARI_H2_UH(tmp0, tmp1, VP8_FILTER_SHIFT); + PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); } static void common_hv_2ht_2vt_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; - v16i8 res0, res1, res2, res3; - v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; - v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt; + const int8_t *filter_vert) { + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; + v16i8 res0, res1, res2, res3; + v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; + v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8u16 hz_out7, hz_out8, vec4, vec5, vec6, vec7, filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[16]); + mask = LD_SB(&vp8_mc_filt_mask_arr[16]); - filt = LD_UH(filter_horiz); - filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0); - filt = LD_UH(filter_vert); - filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter_horiz); + filt_hz = (v16u8)__msa_splati_h((v8i16)filt, 0); + filt = LD_UH(filter_vert); + filt_vt = (v16u8)__msa_splati_h((v8i16)filt, 0); - LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - src8 = LD_SB(src); + LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); + src += (8 * src_stride); + src8 = LD_SB(src); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, VP8_FILTER_SHIFT); - SLDI_B3_UH(hz_out2, hz_out4, hz_out6, hz_out0, hz_out2, hz_out4, hz_out1, - hz_out3, hz_out5, 8); - hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6); + hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out4 = HORIZ_2TAP_FILT_UH(src4, src5, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out6 = HORIZ_2TAP_FILT_UH(src6, src7, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out8 = HORIZ_2TAP_FILT_UH(src8, src8, mask, filt_hz, VP8_FILTER_SHIFT); + SLDI_B3_UH(hz_out2, hz_out4, hz_out6, hz_out0, hz_out2, hz_out4, hz_out1, + hz_out3, hz_out5, 8); + hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, - vec4, vec5, vec6, vec7); - SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, - res0, res1, res2, res3); - ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); + ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); + ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, vec4, + vec5, vec6, vec7); + SRARI_H4_UH(vec4, vec5, vec6, vec7, VP8_FILTER_SHIFT); + PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, + res3); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + dst += (4 * dst_stride); + ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); } static void common_hv_2ht_2vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - if (4 == height) - { - common_hv_2ht_2vt_4x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } - else if (8 == height) - { - common_hv_2ht_2vt_4x8_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } + int32_t height) { + if (4 == height) { + common_hv_2ht_2vt_4x4_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert); + } else if (8 == height) { + common_hv_2ht_2vt_4x8_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert); + } } static void common_hv_2ht_2vt_8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, - const int8_t *filter_vert) -{ - v16i8 src0, src1, src2, src3, src4, mask, out0, out1; - v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; - v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; - v8i16 filt; + const int8_t *filter_vert) { + v16i8 src0, src1, src2, src3, src4, mask, out0, out1; + v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; + v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; + v8i16 filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - filt = LD_SH(filter_horiz); - filt_hz = (v16u8)__msa_splati_h(filt, 0); - filt = LD_SH(filter_vert); - filt_vt = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter_horiz); + filt_hz = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter_vert); + filt_vt = (v16u8)__msa_splati_h(filt, 0); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp0 = __msa_dotp_u_h(vec0, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp0 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT); - vec1 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp1 = __msa_dotp_u_h(vec1, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT); + vec1 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp1 = __msa_dotp_u_h(vec1, filt_vt); - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT); - vec2 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp2 = __msa_dotp_u_h(vec2, filt_vt); + hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT); + vec2 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp2 = __msa_dotp_u_h(vec2, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); - vec3 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp3 = __msa_dotp_u_h(vec3, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); + vec3 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp3 = __msa_dotp_u_h(vec3, filt_vt); - SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); + SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp1, tmp0, tmp3, tmp2, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); } -static void common_hv_2ht_2vt_8x8mult_msa(uint8_t *RESTRICT src, - int32_t src_stride, - uint8_t *RESTRICT dst, - int32_t dst_stride, - const int8_t *filter_horiz, - const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, mask, out0, out1; - v16u8 filt_hz, filt_vt, vec0; - v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; - v8i16 filt; +static void common_hv_2ht_2vt_8x8mult_msa( + uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, + int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, mask, out0, out1; + v16u8 filt_hz, filt_vt, vec0; + v8u16 hz_out0, hz_out1, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + v8i16 filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - filt = LD_SH(filter_horiz); - filt_hz = (v16u8)__msa_splati_h(filt, 0); - filt = LD_SH(filter_vert); - filt_vt = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter_horiz); + filt_hz = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter_vert); + filt_vt = (v16u8)__msa_splati_h(filt, 0); - src0 = LD_SB(src); - src += src_stride; + src0 = LD_SB(src); + src += src_stride; - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); - for (loop_cnt = (height >> 3); loop_cnt--;) - { - LD_SB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); + for (loop_cnt = (height >> 3); loop_cnt--;) { + LD_SB4(src, src_stride, src1, src2, src3, src4); + src += (4 * src_stride); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp1 = __msa_dotp_u_h(vec0, filt_vt); + hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp1 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp2 = __msa_dotp_u_h(vec0, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp2 = __msa_dotp_u_h(vec0, filt_vt); - SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); + SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp3 = __msa_dotp_u_h(vec0, filt_vt); + hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp3 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, - VP8_FILTER_SHIFT); - LD_SB4(src, src_stride, src1, src2, src3, src4); - src += (4 * src_stride); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp4 = __msa_dotp_u_h(vec0, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); + LD_SB4(src, src_stride, src1, src2, src3, src4); + src += (4 * src_stride); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp4 = __msa_dotp_u_h(vec0, filt_vt); - SRARI_H2_UH(tmp3, tmp4, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp2, tmp1, tmp4, tmp3, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); + SRARI_H2_UH(tmp3, tmp4, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp2, tmp1, tmp4, tmp3, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); - hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp5 = __msa_dotp_u_h(vec0, filt_vt); + hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp5 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp6 = __msa_dotp_u_h(vec0, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp6 = __msa_dotp_u_h(vec0, filt_vt); - hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp7 = __msa_dotp_u_h(vec0, filt_vt); + hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp7 = __msa_dotp_u_h(vec0, filt_vt); - hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, - VP8_FILTER_SHIFT); - vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); - tmp8 = __msa_dotp_u_h(vec0, filt_vt); + hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); + vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1); + tmp8 = __msa_dotp_u_h(vec0, filt_vt); - SRARI_H4_UH(tmp5, tmp6, tmp7, tmp8, VP8_FILTER_SHIFT); - PCKEV_B2_SB(tmp6, tmp5, tmp8, tmp7, out0, out1); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); - } + SRARI_H4_UH(tmp5, tmp6, tmp7, tmp8, VP8_FILTER_SHIFT); + PCKEV_B2_SB(tmp6, tmp5, tmp8, tmp7, out0, out1); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + } } static void common_hv_2ht_2vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - if (4 == height) - { - common_hv_2ht_2vt_8x4_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert); - } - else - { - common_hv_2ht_2vt_8x8mult_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, height); - } + int32_t height) { + if (4 == height) { + common_hv_2ht_2vt_8x4_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert); + } else { + common_hv_2ht_2vt_8x8mult_msa(src, src_stride, dst, dst_stride, + filter_horiz, filter_vert, height); + } } static void common_hv_2ht_2vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; - v16u8 filt_hz, filt_vt, vec0, vec1; - v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3; - v8i16 filt; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; + v16u8 filt_hz, filt_vt, vec0, vec1; + v8u16 tmp1, tmp2, hz_out0, hz_out1, hz_out2, hz_out3; + v8i16 filt; - mask = LD_SB(&vp8_mc_filt_mask_arr[0]); + mask = LD_SB(&vp8_mc_filt_mask_arr[0]); - /* rearranging filter */ - filt = LD_SH(filter_horiz); - filt_hz = (v16u8)__msa_splati_h(filt, 0); - filt = LD_SH(filter_vert); - filt_vt = (v16u8)__msa_splati_h(filt, 0); + /* rearranging filter */ + filt = LD_SH(filter_horiz); + filt_hz = (v16u8)__msa_splati_h(filt, 0); + filt = LD_SH(filter_vert); + filt_vt = (v16u8)__msa_splati_h(filt, 0); - LD_SB2(src, 8, src0, src1); - src += src_stride; + LD_SB2(src, 8, src0, src1); + src += src_stride; - hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); - hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + src += (4 * src_stride); - hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, - VP8_FILTER_SHIFT); - hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, - VP8_FILTER_SHIFT); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; + hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, VP8_FILTER_SHIFT); + ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); + SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp1, tmp2, dst); + dst += dst_stride; - hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, - VP8_FILTER_SHIFT); - hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, - VP8_FILTER_SHIFT); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; + hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, VP8_FILTER_SHIFT); + ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); + SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp1, tmp2, dst); + dst += dst_stride; - hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, - VP8_FILTER_SHIFT); - hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, - VP8_FILTER_SHIFT); - ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; + hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, VP8_FILTER_SHIFT); + ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); + SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp1, tmp2, dst); + dst += dst_stride; - hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, - VP8_FILTER_SHIFT); - hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, - VP8_FILTER_SHIFT); - ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); - DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); - SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); - PCKEV_ST_SB(tmp1, tmp2, dst); - dst += dst_stride; - } + hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, VP8_FILTER_SHIFT); + hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, VP8_FILTER_SHIFT); + ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1); + DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp1, tmp2); + SRARI_H2_UH(tmp1, tmp2, VP8_FILTER_SHIFT); + PCKEV_ST_SB(tmp1, tmp2, dst); + dst += dst_stride; + } } void vp8_bilinear_predict4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - common_hv_2ht_2vt_4w_msa(src, src_stride, dst, dst_stride, - h_filter, v_filter, 4); - } - else - { - common_vt_2t_4w_msa(src, src_stride, dst, dst_stride, v_filter, 4); - } + if (yoffset) { + if (xoffset) { + common_hv_2ht_2vt_4w_msa(src, src_stride, dst, dst_stride, h_filter, + v_filter, 4); + } else { + common_vt_2t_4w_msa(src, src_stride, dst, dst_stride, v_filter, 4); } - else - { - if (xoffset) - { - common_hz_2t_4w_msa(src, src_stride, dst, dst_stride, h_filter, 4); - } - else - { - uint32_t tp0, tp1, tp2, tp3; + } else { + if (xoffset) { + common_hz_2t_4w_msa(src, src_stride, dst, dst_stride, h_filter, 4); + } else { + uint32_t tp0, tp1, tp2, tp3; - LW4(src, src_stride, tp0, tp1, tp2, tp3); - SW4(tp0, tp1, tp2, tp3, dst, dst_stride); - } + LW4(src, src_stride, tp0, tp1, tp2, tp3); + SW4(tp0, tp1, tp2, tp3, dst, dst_stride); } + } } void vp8_bilinear_predict8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, - h_filter, v_filter, 4); - } - else - { - common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 4); - } + if (yoffset) { + if (xoffset) { + common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, h_filter, + v_filter, 4); + } else { + common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 4); } - else - { - if (xoffset) - { - common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 4); - } - else - { - vp8_copy_mem8x4(src, src_stride, dst, dst_stride); - } + } else { + if (xoffset) { + common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 4); + } else { + vp8_copy_mem8x4(src, src_stride, dst, dst_stride); } + } } void vp8_bilinear_predict8x8_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, - h_filter, v_filter, 8); - } - else - { - common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 8); - } + if (yoffset) { + if (xoffset) { + common_hv_2ht_2vt_8w_msa(src, src_stride, dst, dst_stride, h_filter, + v_filter, 8); + } else { + common_vt_2t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 8); } - else - { - if (xoffset) - { - common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 8); - } - else - { - vp8_copy_mem8x8(src, src_stride, dst, dst_stride); - } + } else { + if (xoffset) { + common_hz_2t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 8); + } else { + vp8_copy_mem8x8(src, src_stride, dst, dst_stride); } + } } void vp8_bilinear_predict16x16_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_bilinear_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_bilinear_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - common_hv_2ht_2vt_16w_msa(src, src_stride, dst, dst_stride, - h_filter, v_filter, 16); - } - else - { - common_vt_2t_16w_msa(src, src_stride, dst, dst_stride, v_filter, - 16); - } + if (yoffset) { + if (xoffset) { + common_hv_2ht_2vt_16w_msa(src, src_stride, dst, dst_stride, h_filter, + v_filter, 16); + } else { + common_vt_2t_16w_msa(src, src_stride, dst, dst_stride, v_filter, 16); } - else - { - if (xoffset) - { - common_hz_2t_16w_msa(src, src_stride, dst, dst_stride, h_filter, - 16); - } - else - { - vp8_copy_mem16x16(src, src_stride, dst, dst_stride); - } + } else { + if (xoffset) { + common_hz_2t_16w_msa(src, src_stride, dst, dst_stride, h_filter, 16); + } else { + vp8_copy_mem16x16(src, src_stride, dst, dst_stride); } + } } diff --git a/libs/libvpx/vp8/common/mips/msa/copymem_msa.c b/libs/libvpx/vp8/common/mips/msa/copymem_msa.c index 002a5ed91d..357c99b8b6 100644 --- a/libs/libvpx/vp8/common/mips/msa/copymem_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/copymem_msa.c @@ -11,60 +11,52 @@ #include "./vp8_rtcd.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -static void copy_8x4_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - uint64_t src0, src1, src2, src3; +static void copy_8x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + uint64_t src0, src1, src2, src3; - LD4(src, src_stride, src0, src1, src2, src3); - SD4(src0, src1, src2, src3, dst, dst_stride); + LD4(src, src_stride, src0, src1, src2, src3); + SD4(src0, src1, src2, src3, dst, dst_stride); } -static void copy_8x8_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - uint64_t src0, src1, src2, src3; +static void copy_8x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + uint64_t src0, src1, src2, src3; - LD4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); - SD4(src0, src1, src2, src3, dst, dst_stride); - dst += (4 * dst_stride); + LD4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); + SD4(src0, src1, src2, src3, dst, dst_stride); + dst += (4 * dst_stride); - LD4(src, src_stride, src0, src1, src2, src3); - SD4(src0, src1, src2, src3, dst, dst_stride); + LD4(src, src_stride, src0, src1, src2, src3); + SD4(src0, src1, src2, src3, dst, dst_stride); } -static void copy_16x16_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 src8, src9, src10, src11, src12, src13, src14, src15; +static void copy_16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; + v16u8 src8, src9, src10, src11, src12, src13, src14, src15; - LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); - src += (8 * src_stride); - LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, - src15); + LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); + src += (8 * src_stride); + LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, src15); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); - dst += (8 * dst_stride); - ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, dst, - dst_stride); + ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst, dst_stride); + dst += (8 * dst_stride); + ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, dst, dst_stride); } -void vp8_copy_mem16x16_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - copy_16x16_msa(src, src_stride, dst, dst_stride); +void vp8_copy_mem16x16_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + copy_16x16_msa(src, src_stride, dst, dst_stride); } -void vp8_copy_mem8x8_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - copy_8x8_msa(src, src_stride, dst, dst_stride); +void vp8_copy_mem8x8_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + copy_8x8_msa(src, src_stride, dst, dst_stride); } -void vp8_copy_mem8x4_msa(uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride) -{ - copy_8x4_msa(src, src_stride, dst, dst_stride); +void vp8_copy_mem8x4_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride) { + copy_8x4_msa(src, src_stride, dst, dst_stride); } diff --git a/libs/libvpx/vp8/common/mips/msa/idct_msa.c b/libs/libvpx/vp8/common/mips/msa/idct_msa.c index e537a3ffc9..e1759c875e 100644 --- a/libs/libvpx/vp8/common/mips/msa/idct_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/idct_msa.c @@ -15,443 +15,401 @@ static const int32_t cospi8sqrt2minus1 = 20091; static const int32_t sinpi8sqrt2 = 35468; -#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 s4_m, s5_m, s6_m, s7_m; \ - \ - TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \ - ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \ - out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \ - out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \ -} +#define TRANSPOSE_TWO_4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 s4_m, s5_m, s6_m, s7_m; \ + \ + TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, s4_m, s5_m, s6_m, s7_m); \ + ILVR_D2_SH(s6_m, s4_m, s7_m, s5_m, out0, out2); \ + out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \ + out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \ + } -#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \ -({ \ - v8i16 out_m; \ - v8i16 zero_m = { 0 }; \ - v4i32 tmp1_m, tmp2_m; \ - v4i32 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ - \ - ILVRL_H2_SW(in, zero_m, tmp1_m, tmp2_m); \ - tmp1_m >>= 16; \ - tmp2_m >>= 16; \ - tmp1_m = (tmp1_m * sinpi8_sqrt2_m) >> 16; \ - tmp2_m = (tmp2_m * sinpi8_sqrt2_m) >> 16; \ - out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \ - \ - out_m; \ -}) +#define EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in) \ + ({ \ + v8i16 out_m; \ + v8i16 zero_m = { 0 }; \ + v4i32 tmp1_m, tmp2_m; \ + v4i32 sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ + \ + ILVRL_H2_SW(in, zero_m, tmp1_m, tmp2_m); \ + tmp1_m >>= 16; \ + tmp2_m >>= 16; \ + tmp1_m = (tmp1_m * sinpi8_sqrt2_m) >> 16; \ + tmp2_m = (tmp2_m * sinpi8_sqrt2_m) >> 16; \ + out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \ + \ + out_m; \ + }) -#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 a1_m, b1_m, c1_m, d1_m; \ - v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ - v8i16 const_cospi8sqrt2minus1_m; \ - \ - const_cospi8sqrt2minus1_m = __msa_fill_h(cospi8sqrt2minus1); \ - a1_m = in0 + in2; \ - b1_m = in0 - in2; \ - c_tmp1_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1); \ - c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \ - c_tmp2_m = c_tmp2_m >> 1; \ - c_tmp2_m = in3 + c_tmp2_m; \ - c1_m = c_tmp1_m - c_tmp2_m; \ - d_tmp1_m = __msa_mul_q_h(in1, const_cospi8sqrt2minus1_m); \ - d_tmp1_m = d_tmp1_m >> 1; \ - d_tmp1_m = in1 + d_tmp1_m; \ - d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \ - d1_m = d_tmp1_m + d_tmp2_m; \ - BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ -} +#define VP8_IDCT_1D_H(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 a1_m, b1_m, c1_m, d1_m; \ + v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ + v8i16 const_cospi8sqrt2minus1_m; \ + \ + const_cospi8sqrt2minus1_m = __msa_fill_h(cospi8sqrt2minus1); \ + a1_m = in0 + in2; \ + b1_m = in0 - in2; \ + c_tmp1_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in1); \ + c_tmp2_m = __msa_mul_q_h(in3, const_cospi8sqrt2minus1_m); \ + c_tmp2_m = c_tmp2_m >> 1; \ + c_tmp2_m = in3 + c_tmp2_m; \ + c1_m = c_tmp1_m - c_tmp2_m; \ + d_tmp1_m = __msa_mul_q_h(in1, const_cospi8sqrt2minus1_m); \ + d_tmp1_m = d_tmp1_m >> 1; \ + d_tmp1_m = in1 + d_tmp1_m; \ + d_tmp2_m = EXPAND_TO_H_MULTIPLY_SINPI8SQRT2_PCK_TO_W(in3); \ + d1_m = d_tmp1_m + d_tmp2_m; \ + BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ + } -#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v4i32 a1_m, b1_m, c1_m, d1_m; \ - v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ - v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \ - \ - const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \ - sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ - a1_m = in0 + in2; \ - b1_m = in0 - in2; \ - c_tmp1_m = (in1 * sinpi8_sqrt2_m) >> 16; \ - c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \ - c1_m = c_tmp1_m - c_tmp2_m; \ - d_tmp1_m = in1 + ((in1 * const_cospi8sqrt2minus1_m) >> 16); \ - d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \ - d1_m = d_tmp1_m + d_tmp2_m; \ - BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ -} +#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v4i32 a1_m, b1_m, c1_m, d1_m; \ + v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ + v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m; \ + \ + const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1); \ + sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2); \ + a1_m = in0 + in2; \ + b1_m = in0 - in2; \ + c_tmp1_m = (in1 * sinpi8_sqrt2_m) >> 16; \ + c_tmp2_m = in3 + ((in3 * const_cospi8sqrt2minus1_m) >> 16); \ + c1_m = c_tmp1_m - c_tmp2_m; \ + d_tmp1_m = in1 + ((in1 * const_cospi8sqrt2minus1_m) >> 16); \ + d_tmp2_m = (in3 * sinpi8_sqrt2_m) >> 16; \ + d1_m = d_tmp1_m + d_tmp2_m; \ + BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ + } static void idct4x4_addblk_msa(int16_t *input, uint8_t *pred, - int32_t pred_stride, - uint8_t *dest, int32_t dest_stride) -{ - v8i16 input0, input1; - v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; - v4i32 res0, res1, res2, res3; - v16i8 zero = { 0 }; - v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3; - v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31 }; + int32_t pred_stride, uint8_t *dest, + int32_t dest_stride) { + v8i16 input0, input1; + v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; + v4i32 res0, res1, res2, res3; + v16i8 zero = { 0 }; + v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3; + v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; - LD_SH2(input, 8, input0, input1); - UNPCK_SH_SW(input0, in0, in1); - UNPCK_SH_SW(input1, in2, in3); - VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); - TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); - VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); - SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); - TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); - LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); - ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, - res2, res3); - ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, - res2, res3); - ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); - res0 = CLIP_SW_0_255(res0); - res1 = CLIP_SW_0_255(res1); - res2 = CLIP_SW_0_255(res2); - res3 = CLIP_SW_0_255(res3); - LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); - VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); - VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); - ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); + LD_SH2(input, 8, input0, input1); + UNPCK_SH_SW(input0, in0, in1); + UNPCK_SH_SW(input1, in2, in3); + VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); + TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); + VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); + SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); + TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); + LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); + ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, + res2, res3); + ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2, + res3); + ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); + res0 = CLIP_SW_0_255(res0); + res1 = CLIP_SW_0_255(res1); + res2 = CLIP_SW_0_255(res2); + res3 = CLIP_SW_0_255(res3); + LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); + VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); + VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); + ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); } static void idct4x4_addconst_msa(int16_t in_dc, uint8_t *pred, - int32_t pred_stride, - uint8_t *dest, int32_t dest_stride) -{ - v8i16 vec; - v8i16 res0, res1, res2, res3; - v16i8 zero = { 0 }; - v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3; - v16i8 mask = { 0, 2, 4, 6, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; + int32_t pred_stride, uint8_t *dest, + int32_t dest_stride) { + v8i16 vec; + v8i16 res0, res1, res2, res3; + v16i8 zero = { 0 }; + v16i8 pred0, pred1, pred2, pred3, dest0, dest1, dest2, dest3; + v16i8 mask = { 0, 2, 4, 6, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; - vec = __msa_fill_h(in_dc); - vec = __msa_srari_h(vec, 3); - LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); - ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, - res2, res3); - ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); - CLIP_SH4_0_255(res0, res1, res2, res3); - LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); - VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); - VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); - ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); + vec = __msa_fill_h(in_dc); + vec = __msa_srari_h(vec, 3); + LD_SB4(pred, pred_stride, pred0, pred1, pred2, pred3); + ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3, res0, res1, + res2, res3); + ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); + CLIP_SH4_0_255(res0, res1, res2, res3); + LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); + VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); + VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); + ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); } -void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff) -{ - v8i16 input0, input1; - v4i32 in0, in1, in2, in3, a1, b1, c1, d1; - v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; +void vp8_short_inv_walsh4x4_msa(int16_t *input, int16_t *mb_dq_coeff) { + v8i16 input0, input1; + v4i32 in0, in1, in2, in3, a1, b1, c1, d1; + v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; - LD_SH2(input, 8, input0, input1); - UNPCK_SH_SW(input0, in0, in1); - UNPCK_SH_SW(input1, in2, in3); - BUTTERFLY_4(in0, in1, in2, in3, a1, b1, c1, d1); - BUTTERFLY_4(a1, d1, c1, b1, hz0, hz1, hz3, hz2); - TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); - BUTTERFLY_4(hz0, hz1, hz2, hz3, a1, b1, c1, d1); - BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2); - ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3); - SRA_4V(vt0, vt1, vt2, vt3, 3); - mb_dq_coeff[0] = __msa_copy_s_h((v8i16)vt0, 0); - mb_dq_coeff[16] = __msa_copy_s_h((v8i16)vt1, 0); - mb_dq_coeff[32] = __msa_copy_s_h((v8i16)vt2, 0); - mb_dq_coeff[48] = __msa_copy_s_h((v8i16)vt3, 0); - mb_dq_coeff[64] = __msa_copy_s_h((v8i16)vt0, 2); - mb_dq_coeff[80] = __msa_copy_s_h((v8i16)vt1, 2); - mb_dq_coeff[96] = __msa_copy_s_h((v8i16)vt2, 2); - mb_dq_coeff[112] = __msa_copy_s_h((v8i16)vt3, 2); - mb_dq_coeff[128] = __msa_copy_s_h((v8i16)vt0, 4); - mb_dq_coeff[144] = __msa_copy_s_h((v8i16)vt1, 4); - mb_dq_coeff[160] = __msa_copy_s_h((v8i16)vt2, 4); - mb_dq_coeff[176] = __msa_copy_s_h((v8i16)vt3, 4); - mb_dq_coeff[192] = __msa_copy_s_h((v8i16)vt0, 6); - mb_dq_coeff[208] = __msa_copy_s_h((v8i16)vt1, 6); - mb_dq_coeff[224] = __msa_copy_s_h((v8i16)vt2, 6); - mb_dq_coeff[240] = __msa_copy_s_h((v8i16)vt3, 6); + LD_SH2(input, 8, input0, input1); + UNPCK_SH_SW(input0, in0, in1); + UNPCK_SH_SW(input1, in2, in3); + BUTTERFLY_4(in0, in1, in2, in3, a1, b1, c1, d1); + BUTTERFLY_4(a1, d1, c1, b1, hz0, hz1, hz3, hz2); + TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); + BUTTERFLY_4(hz0, hz1, hz2, hz3, a1, b1, c1, d1); + BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2); + ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3); + SRA_4V(vt0, vt1, vt2, vt3, 3); + mb_dq_coeff[0] = __msa_copy_s_h((v8i16)vt0, 0); + mb_dq_coeff[16] = __msa_copy_s_h((v8i16)vt1, 0); + mb_dq_coeff[32] = __msa_copy_s_h((v8i16)vt2, 0); + mb_dq_coeff[48] = __msa_copy_s_h((v8i16)vt3, 0); + mb_dq_coeff[64] = __msa_copy_s_h((v8i16)vt0, 2); + mb_dq_coeff[80] = __msa_copy_s_h((v8i16)vt1, 2); + mb_dq_coeff[96] = __msa_copy_s_h((v8i16)vt2, 2); + mb_dq_coeff[112] = __msa_copy_s_h((v8i16)vt3, 2); + mb_dq_coeff[128] = __msa_copy_s_h((v8i16)vt0, 4); + mb_dq_coeff[144] = __msa_copy_s_h((v8i16)vt1, 4); + mb_dq_coeff[160] = __msa_copy_s_h((v8i16)vt2, 4); + mb_dq_coeff[176] = __msa_copy_s_h((v8i16)vt3, 4); + mb_dq_coeff[192] = __msa_copy_s_h((v8i16)vt0, 6); + mb_dq_coeff[208] = __msa_copy_s_h((v8i16)vt1, 6); + mb_dq_coeff[224] = __msa_copy_s_h((v8i16)vt2, 6); + mb_dq_coeff[240] = __msa_copy_s_h((v8i16)vt3, 6); } static void dequant_idct4x4_addblk_msa(int16_t *input, int16_t *dequant_input, - uint8_t *dest, int32_t dest_stride) -{ - v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1; - v8i16 in0, in1, in2, in3; - v8i16 hz0_h, hz1_h, hz2_h, hz3_h; - v16i8 dest0, dest1, dest2, dest3; - v4i32 hz0_w, hz1_w, hz2_w, hz3_w; - v4i32 vt0, vt1, vt2, vt3, res0, res1, res2, res3; - v2i64 zero = { 0 }; - v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31 }; + uint8_t *dest, int32_t dest_stride) { + v8i16 input0, input1, dequant_in0, dequant_in1, mul0, mul1; + v8i16 in0, in1, in2, in3; + v8i16 hz0_h, hz1_h, hz2_h, hz3_h; + v16i8 dest0, dest1, dest2, dest3; + v4i32 hz0_w, hz1_w, hz2_w, hz3_w; + v4i32 vt0, vt1, vt2, vt3, res0, res1, res2, res3; + v2i64 zero = { 0 }; + v16i8 mask = { 0, 4, 8, 12, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 }; - LD_SH2(input, 8, input0, input1); - LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); - MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1); - PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2); - PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3); - VP8_IDCT_1D_H(in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h); - PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1); - UNPCK_SH_SW(mul0, hz0_w, hz1_w); - UNPCK_SH_SW(mul1, hz2_w, hz3_w); - TRANSPOSE4x4_SW_SW(hz0_w, hz1_w, hz2_w, hz3_w, hz0_w, hz1_w, hz2_w, hz3_w); - VP8_IDCT_1D_W(hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3); - SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); - TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); - LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); - ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, - res2, res3); - ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, - res2, res3); - ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); - res0 = CLIP_SW_0_255(res0); - res1 = CLIP_SW_0_255(res1); - res2 = CLIP_SW_0_255(res2); - res3 = CLIP_SW_0_255(res3); - VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); - VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); - ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); + LD_SH2(input, 8, input0, input1); + LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); + MUL2(input0, dequant_in0, input1, dequant_in1, mul0, mul1); + PCKEV_D2_SH(zero, mul0, zero, mul1, in0, in2); + PCKOD_D2_SH(zero, mul0, zero, mul1, in1, in3); + VP8_IDCT_1D_H(in0, in1, in2, in3, hz0_h, hz1_h, hz2_h, hz3_h); + PCKEV_D2_SH(hz1_h, hz0_h, hz3_h, hz2_h, mul0, mul1); + UNPCK_SH_SW(mul0, hz0_w, hz1_w); + UNPCK_SH_SW(mul1, hz2_w, hz3_w); + TRANSPOSE4x4_SW_SW(hz0_w, hz1_w, hz2_w, hz3_w, hz0_w, hz1_w, hz2_w, hz3_w); + VP8_IDCT_1D_W(hz0_w, hz1_w, hz2_w, hz3_w, vt0, vt1, vt2, vt3); + SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); + TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); + LD_SB4(dest, dest_stride, dest0, dest1, dest2, dest3); + ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, + res2, res3); + ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, res0, res1, res2, + res3); + ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); + res0 = CLIP_SW_0_255(res0); + res1 = CLIP_SW_0_255(res1); + res2 = CLIP_SW_0_255(res2); + res3 = CLIP_SW_0_255(res3); + VSHF_B2_SB(res0, dest0, res1, dest1, mask, mask, dest0, dest1); + VSHF_B2_SB(res2, dest2, res3, dest3, mask, mask, dest2, dest3); + ST_SB4(dest0, dest1, dest2, dest3, dest, dest_stride); } static void dequant_idct4x4_addblk_2x_msa(int16_t *input, - int16_t *dequant_input, - uint8_t *dest, int32_t dest_stride) -{ - v16u8 dest0, dest1, dest2, dest3; - v8i16 in0, in1, in2, in3; - v8i16 mul0, mul1, mul2, mul3, dequant_in0, dequant_in1; - v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; - v8i16 res0, res1, res2, res3; - v4i32 hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r; - v4i32 vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r; - v16i8 zero = { 0 }; + int16_t *dequant_input, uint8_t *dest, + int32_t dest_stride) { + v16u8 dest0, dest1, dest2, dest3; + v8i16 in0, in1, in2, in3; + v8i16 mul0, mul1, mul2, mul3, dequant_in0, dequant_in1; + v8i16 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; + v8i16 res0, res1, res2, res3; + v4i32 hz0l, hz1l, hz2l, hz3l, hz0r, hz1r, hz2r, hz3r; + v4i32 vt0l, vt1l, vt2l, vt3l, vt0r, vt1r, vt2r, vt3r; + v16i8 zero = { 0 }; - LD_SH4(input, 8, in0, in1, in2, in3); - LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); - MUL4(in0, dequant_in0, in1, dequant_in1, in2, dequant_in0, in3, dequant_in1, - mul0, mul1, mul2, mul3); - PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2); - PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3); - VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3); - TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); - UNPCK_SH_SW(hz0, hz0r, hz0l); - UNPCK_SH_SW(hz1, hz1r, hz1l); - UNPCK_SH_SW(hz2, hz2r, hz2l); - UNPCK_SH_SW(hz3, hz3r, hz3l); - VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l); - SRARI_W4_SW(vt0l, vt1l, vt2l, vt3l, 3); - VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r); - SRARI_W4_SW(vt0r, vt1r, vt2r, vt3r, 3); - PCKEV_H4_SH(vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r, vt0, vt1, vt2, - vt3); - TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); - LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); - ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, - res2, res3); - ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); - CLIP_SH4_0_255(res0, res1, res2, res3); - PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1, - res2, res3); - PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1); - PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3); - ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride); + LD_SH4(input, 8, in0, in1, in2, in3); + LD_SH2(dequant_input, 8, dequant_in0, dequant_in1); + MUL4(in0, dequant_in0, in1, dequant_in1, in2, dequant_in0, in3, dequant_in1, + mul0, mul1, mul2, mul3); + PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2); + PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3); + VP8_IDCT_1D_H(in0, in1, in2, in3, hz0, hz1, hz2, hz3); + TRANSPOSE_TWO_4x4_H(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); + UNPCK_SH_SW(hz0, hz0r, hz0l); + UNPCK_SH_SW(hz1, hz1r, hz1l); + UNPCK_SH_SW(hz2, hz2r, hz2l); + UNPCK_SH_SW(hz3, hz3r, hz3l); + VP8_IDCT_1D_W(hz0l, hz1l, hz2l, hz3l, vt0l, vt1l, vt2l, vt3l); + SRARI_W4_SW(vt0l, vt1l, vt2l, vt3l, 3); + VP8_IDCT_1D_W(hz0r, hz1r, hz2r, hz3r, vt0r, vt1r, vt2r, vt3r); + SRARI_W4_SW(vt0r, vt1r, vt2r, vt3r, 3); + PCKEV_H4_SH(vt0l, vt0r, vt1l, vt1r, vt2l, vt2r, vt3l, vt3r, vt0, vt1, vt2, + vt3); + TRANSPOSE_TWO_4x4_H(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); + LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); + ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, + res2, res3); + ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); + CLIP_SH4_0_255(res0, res1, res2, res3); + PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1, res2, + res3); + PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1); + PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3); + ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride); - __asm__ __volatile__( - "sw $zero, 0(%[input]) \n\t" - "sw $zero, 4(%[input]) \n\t" - "sw $zero, 8(%[input]) \n\t" - "sw $zero, 12(%[input]) \n\t" - "sw $zero, 16(%[input]) \n\t" - "sw $zero, 20(%[input]) \n\t" - "sw $zero, 24(%[input]) \n\t" - "sw $zero, 28(%[input]) \n\t" - "sw $zero, 32(%[input]) \n\t" - "sw $zero, 36(%[input]) \n\t" - "sw $zero, 40(%[input]) \n\t" - "sw $zero, 44(%[input]) \n\t" - "sw $zero, 48(%[input]) \n\t" - "sw $zero, 52(%[input]) \n\t" - "sw $zero, 56(%[input]) \n\t" - "sw $zero, 60(%[input]) \n\t":: + __asm__ __volatile__( + "sw $zero, 0(%[input]) \n\t" + "sw $zero, 4(%[input]) \n\t" + "sw $zero, 8(%[input]) \n\t" + "sw $zero, 12(%[input]) \n\t" + "sw $zero, 16(%[input]) \n\t" + "sw $zero, 20(%[input]) \n\t" + "sw $zero, 24(%[input]) \n\t" + "sw $zero, 28(%[input]) \n\t" + "sw $zero, 32(%[input]) \n\t" + "sw $zero, 36(%[input]) \n\t" + "sw $zero, 40(%[input]) \n\t" + "sw $zero, 44(%[input]) \n\t" + "sw $zero, 48(%[input]) \n\t" + "sw $zero, 52(%[input]) \n\t" + "sw $zero, 56(%[input]) \n\t" + "sw $zero, 60(%[input]) \n\t" :: - [input] "r"(input) - ); + [input] "r"(input)); } static void dequant_idct_addconst_2x_msa(int16_t *input, int16_t *dequant_input, - uint8_t *dest, int32_t dest_stride) -{ - v8i16 input_dc0, input_dc1, vec; - v16u8 dest0, dest1, dest2, dest3; - v16i8 zero = { 0 }; - v8i16 res0, res1, res2, res3; + uint8_t *dest, int32_t dest_stride) { + v8i16 input_dc0, input_dc1, vec; + v16u8 dest0, dest1, dest2, dest3; + v16i8 zero = { 0 }; + v8i16 res0, res1, res2, res3; - input_dc0 = __msa_fill_h(input[0] * dequant_input[0]); - input_dc1 = __msa_fill_h(input[16] * dequant_input[0]); - SRARI_H2_SH(input_dc0, input_dc1, 3); - vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0); - input[0] = 0; - input[16] = 0; - LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); - ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, - res1, res2, res3); - ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); - CLIP_SH4_0_255(res0, res1, res2, res3); - PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1, - res2, res3); - PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1); - PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3); - ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride); + input_dc0 = __msa_fill_h(input[0] * dequant_input[0]); + input_dc1 = __msa_fill_h(input[16] * dequant_input[0]); + SRARI_H2_SH(input_dc0, input_dc1, 3); + vec = (v8i16)__msa_pckev_d((v2i64)input_dc1, (v2i64)input_dc0); + input[0] = 0; + input[16] = 0; + LD_UB4(dest, dest_stride, dest0, dest1, dest2, dest3); + ILVR_B4_SH(zero, dest0, zero, dest1, zero, dest2, zero, dest3, res0, res1, + res2, res3); + ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); + CLIP_SH4_0_255(res0, res1, res2, res3); + PCKEV_B4_SH(res0, res0, res1, res1, res2, res2, res3, res3, res0, res1, res2, + res3); + PCKOD_D2_UB(dest0, res0, dest1, res1, dest0, dest1); + PCKOD_D2_UB(dest2, res2, dest3, res3, dest2, dest3); + ST_UB4(dest0, dest1, dest2, dest3, dest, dest_stride); } void vp8_short_idct4x4llm_msa(int16_t *input, uint8_t *pred_ptr, int32_t pred_stride, uint8_t *dst_ptr, - int32_t dst_stride) -{ - idct4x4_addblk_msa(input, pred_ptr, pred_stride, dst_ptr, dst_stride); + int32_t dst_stride) { + idct4x4_addblk_msa(input, pred_ptr, pred_stride, dst_ptr, dst_stride); } void vp8_dc_only_idct_add_msa(int16_t input_dc, uint8_t *pred_ptr, int32_t pred_stride, uint8_t *dst_ptr, - int32_t dst_stride) -{ - idct4x4_addconst_msa(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride); + int32_t dst_stride) { + idct4x4_addconst_msa(input_dc, pred_ptr, pred_stride, dst_ptr, dst_stride); } -void vp8_dequantize_b_msa(BLOCKD *d, int16_t *DQC) -{ - v8i16 dqc0, dqc1, q0, q1, dq0, dq1; +void vp8_dequantize_b_msa(BLOCKD *d, int16_t *DQC) { + v8i16 dqc0, dqc1, q0, q1, dq0, dq1; - LD_SH2(DQC, 8, dqc0, dqc1); - LD_SH2(d->qcoeff, 8, q0, q1); - MUL2(dqc0, q0, dqc1, q1, dq0, dq1); - ST_SH2(dq0, dq1, d->dqcoeff, 8); + LD_SH2(DQC, 8, dqc0, dqc1); + LD_SH2(d->qcoeff, 8, q0, q1); + MUL2(dqc0, q0, dqc1, q1, dq0, dq1); + ST_SH2(dq0, dq1, d->dqcoeff, 8); } -void vp8_dequant_idct_add_msa(int16_t *input, int16_t *dq, - uint8_t *dest, int32_t stride) -{ - dequant_idct4x4_addblk_msa(input, dq, dest, stride); +void vp8_dequant_idct_add_msa(int16_t *input, int16_t *dq, uint8_t *dest, + int32_t stride) { + dequant_idct4x4_addblk_msa(input, dq, dest, stride); - __asm__ __volatile__ ( - "sw $zero, 0(%[input]) \n\t" - "sw $zero, 4(%[input]) \n\t" - "sw $zero, 8(%[input]) \n\t" - "sw $zero, 12(%[input]) \n\t" - "sw $zero, 16(%[input]) \n\t" - "sw $zero, 20(%[input]) \n\t" - "sw $zero, 24(%[input]) \n\t" - "sw $zero, 28(%[input]) \n\t" + __asm__ __volatile__( + "sw $zero, 0(%[input]) \n\t" + "sw $zero, 4(%[input]) \n\t" + "sw $zero, 8(%[input]) \n\t" + "sw $zero, 12(%[input]) \n\t" + "sw $zero, 16(%[input]) \n\t" + "sw $zero, 20(%[input]) \n\t" + "sw $zero, 24(%[input]) \n\t" + "sw $zero, 28(%[input]) \n\t" - : - : [input] "r" (input) - ); + : + : [input] "r"(input)); } -void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq, - uint8_t *dst, int32_t stride, - char *eobs) -{ - int16_t *eobs_h = (int16_t *)eobs; - uint8_t i; +void vp8_dequant_idct_add_y_block_msa(int16_t *q, int16_t *dq, uint8_t *dst, + int32_t stride, char *eobs) { + int16_t *eobs_h = (int16_t *)eobs; + uint8_t i; - for (i = 4; i--;) - { - if (eobs_h[0]) - { - if (eobs_h[0] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dst, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dst, stride); - } - } - - q += 32; - - if (eobs_h[1]) - { - if (eobs_h[1] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dst + 8, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dst + 8, stride); - } - } - - q += 32; - dst += (4 * stride); - eobs_h += 2; - } -} - -void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, - uint8_t *dstu, uint8_t *dstv, - int32_t stride, char *eobs) -{ - int16_t *eobs_h = (int16_t *)eobs; - - if (eobs_h[0]) - { - if (eobs_h[0] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dstu, stride); - } - } - - q += 32; - dstu += (stride * 4); - - if (eobs_h[1]) - { - if (eobs_h[1] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dstu, stride); - } + for (i = 4; i--;) { + if (eobs_h[0]) { + if (eobs_h[0] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dst, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dst, stride); + } } q += 32; - if (eobs_h[2]) - { - if (eobs_h[2] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dstv, stride); - } + if (eobs_h[1]) { + if (eobs_h[1] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dst + 8, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dst + 8, stride); + } } q += 32; - dstv += (stride * 4); - - if (eobs_h[3]) - { - if (eobs_h[3] & 0xfefe) - { - dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); - } - else - { - dequant_idct_addconst_2x_msa(q, dq, dstv, stride); - } - } + dst += (4 * stride); + eobs_h += 2; + } +} + +void vp8_dequant_idct_add_uv_block_msa(int16_t *q, int16_t *dq, uint8_t *dstu, + uint8_t *dstv, int32_t stride, + char *eobs) { + int16_t *eobs_h = (int16_t *)eobs; + + if (eobs_h[0]) { + if (eobs_h[0] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dstu, stride); + } + } + + q += 32; + dstu += (stride * 4); + + if (eobs_h[1]) { + if (eobs_h[1] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dstu, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dstu, stride); + } + } + + q += 32; + + if (eobs_h[2]) { + if (eobs_h[2] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dstv, stride); + } + } + + q += 32; + dstv += (stride * 4); + + if (eobs_h[3]) { + if (eobs_h[3] & 0xfefe) { + dequant_idct4x4_addblk_2x_msa(q, dq, dstv, stride); + } else { + dequant_idct_addconst_2x_msa(q, dq, dstv, stride); + } + } } diff --git a/libs/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c b/libs/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c index a40f378098..f5f1790ef5 100644 --- a/libs/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/loopfilter_filters_msa.c @@ -12,264 +12,262 @@ #include "vp8/common/loopfilter.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -#define VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) \ -{ \ - v16u8 p1_a_sub_q1, p0_a_sub_q0; \ +#define VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) \ + { \ + v16u8 p1_a_sub_q1, p0_a_sub_q0; \ + \ + p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \ + p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \ + p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ + p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \ + mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \ + mask = ((v16u8)mask <= b_limit); \ + } + +#define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \ + mask_in, hev_in) \ + { \ + v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ + v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ + v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ + \ + p1_m = (v16i8)__msa_xori_b(p1_in_out, 0x80); \ + p0_m = (v16i8)__msa_xori_b(p0_in_out, 0x80); \ + q0_m = (v16i8)__msa_xori_b(q0_in_out, 0x80); \ + q1_m = (v16i8)__msa_xori_b(q1_in_out, 0x80); \ + \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + \ + filt = filt & (v16i8)hev_in; \ + \ + q0_sub_p0 = q0_m - p0_m; \ + filt_sign = __msa_clti_s_b(filt, 0); \ + \ + cnst3h = __msa_ldi_h(3); \ + q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ + q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ + filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ + filt_r += q0_sub_p0_r; \ + filt_r = __msa_sat_s_h(filt_r, 7); \ + \ + q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \ + q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h); \ + filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ + filt_l += q0_sub_p0_l; \ + filt_l = __msa_sat_s_h(filt_l, 7); \ + \ + filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ + filt = filt & (v16i8)mask_in; \ + \ + cnst4b = __msa_ldi_b(4); \ + filt1 = __msa_adds_s_b(filt, cnst4b); \ + filt1 >>= 3; \ + \ + cnst3b = __msa_ldi_b(3); \ + filt2 = __msa_adds_s_b(filt, cnst3b); \ + filt2 >>= 3; \ + \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + q0_in_out = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + p0_in_out = __msa_xori_b((v16u8)p0_m, 0x80); \ + \ + filt = __msa_srari_b(filt1, 1); \ + hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ + filt = filt & (v16i8)hev_in; \ + \ + q1_m = __msa_subs_s_b(q1_m, filt); \ + q1_in_out = __msa_xori_b((v16u8)q1_m, 0x80); \ + p1_m = __msa_adds_s_b(p1_m, filt); \ + p1_in_out = __msa_xori_b((v16u8)p1_m, 0x80); \ + } + +#define VP8_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) \ + { \ + v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, q0_sub_p0_sign; \ + v16i8 filt, filt1, filt2, cnst4b, cnst3b, filt_sign; \ + v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ + \ + p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ + p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ + q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ + q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ + \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + \ + q0_sub_p0 = q0_m - p0_m; \ + filt_sign = __msa_clti_s_b(filt, 0); \ + \ + cnst3h = __msa_ldi_h(3); \ + q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \ + q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \ + q0_sub_p0_r *= cnst3h; \ + filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ + filt_r += q0_sub_p0_r; \ + filt_r = __msa_sat_s_h(filt_r, 7); \ + \ + q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \ + q0_sub_p0_l *= cnst3h; \ + filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ + filt_l += q0_sub_p0_l; \ + filt_l = __msa_sat_s_h(filt_l, 7); \ + \ + filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ + filt = filt & (v16i8)(mask); \ + \ + cnst4b = __msa_ldi_b(4); \ + filt1 = __msa_adds_s_b(filt, cnst4b); \ + filt1 >>= 3; \ + \ + cnst3b = __msa_ldi_b(3); \ + filt2 = __msa_adds_s_b(filt, cnst3b); \ + filt2 >>= 3; \ + \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \ + } + +#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \ + { \ + v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \ + v16i8 filt, q0_sub_p0, cnst4b, cnst3b; \ + v16i8 u, filt1, filt2, filt_sign, q0_sub_p0_sign; \ + v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_r, u_r, u_l, filt_l; \ + v8i16 cnst3h, cnst27h, cnst18h, cnst63h; \ + \ + cnst3h = __msa_ldi_h(3); \ + \ + p2_m = (v16i8)__msa_xori_b(p2, 0x80); \ + p1_m = (v16i8)__msa_xori_b(p1, 0x80); \ + p0_m = (v16i8)__msa_xori_b(p0, 0x80); \ + q0_m = (v16i8)__msa_xori_b(q0, 0x80); \ + q1_m = (v16i8)__msa_xori_b(q1, 0x80); \ + q2_m = (v16i8)__msa_xori_b(q2, 0x80); \ + \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + q0_sub_p0 = q0_m - p0_m; \ + q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \ + filt_sign = __msa_clti_s_b(filt, 0); \ + \ + q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \ + q0_sub_p0_r *= cnst3h; \ + filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ + filt_r = filt_r + q0_sub_p0_r; \ + filt_r = __msa_sat_s_h(filt_r, 7); \ + \ + q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \ + q0_sub_p0_l *= cnst3h; \ + filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ + filt_l = filt_l + q0_sub_p0_l; \ + filt_l = __msa_sat_s_h(filt_l, 7); \ + \ + filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ + filt = filt & (v16i8)mask; \ + filt2 = filt & (v16i8)hev; \ + \ + hev = __msa_xori_b(hev, 0xff); \ + filt = filt & (v16i8)hev; \ + cnst4b = __msa_ldi_b(4); \ + filt1 = __msa_adds_s_b(filt2, cnst4b); \ + filt1 >>= 3; \ + cnst3b = __msa_ldi_b(3); \ + filt2 = __msa_adds_s_b(filt2, cnst3b); \ + filt2 >>= 3; \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + \ + filt_sign = __msa_clti_s_b(filt, 0); \ + ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \ + \ + cnst27h = __msa_ldi_h(27); \ + cnst63h = __msa_ldi_h(63); \ + \ + u_r = filt_r * cnst27h; \ + u_r += cnst63h; \ + u_r >>= 7; \ + u_r = __msa_sat_s_h(u_r, 7); \ + u_l = filt_l * cnst27h; \ + u_l += cnst63h; \ + u_l >>= 7; \ + u_l = __msa_sat_s_h(u_l, 7); \ + u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ + q0_m = __msa_subs_s_b(q0_m, u); \ + q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_m = __msa_adds_s_b(p0_m, u); \ + p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ + cnst18h = __msa_ldi_h(18); \ + u_r = filt_r * cnst18h; \ + u_r += cnst63h; \ + u_r >>= 7; \ + u_r = __msa_sat_s_h(u_r, 7); \ + \ + u_l = filt_l * cnst18h; \ + u_l += cnst63h; \ + u_l >>= 7; \ + u_l = __msa_sat_s_h(u_l, 7); \ + u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ + q1_m = __msa_subs_s_b(q1_m, u); \ + q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ + p1_m = __msa_adds_s_b(p1_m, u); \ + p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ + u_r = filt_r << 3; \ + u_r += filt_r + cnst63h; \ + u_r >>= 7; \ + u_r = __msa_sat_s_h(u_r, 7); \ + \ + u_l = filt_l << 3; \ + u_l += filt_l + cnst63h; \ + u_l >>= 7; \ + u_l = __msa_sat_s_h(u_l, 7); \ + u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ + q2_m = __msa_subs_s_b(q2_m, u); \ + q2 = __msa_xori_b((v16u8)q2_m, 0x80); \ + p2_m = __msa_adds_s_b(p2_m, u); \ + p2 = __msa_xori_b((v16u8)p2_m, 0x80); \ + } + +#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \ + limit_in, b_limit_in, thresh_in, hev_out, mask_out, \ + flat_out) \ + { \ + v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ + v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ + \ + p3_asub_p2_m = __msa_asub_u_b((p3_in), (p2_in)); \ + p2_asub_p1_m = __msa_asub_u_b((p2_in), (p1_in)); \ + p1_asub_p0_m = __msa_asub_u_b((p1_in), (p0_in)); \ + q1_asub_q0_m = __msa_asub_u_b((q1_in), (q0_in)); \ + q2_asub_q1_m = __msa_asub_u_b((q2_in), (q1_in)); \ + q3_asub_q2_m = __msa_asub_u_b((q3_in), (q2_in)); \ + p0_asub_q0_m = __msa_asub_u_b((p0_in), (q0_in)); \ + p1_asub_q1_m = __msa_asub_u_b((p1_in), (q1_in)); \ + flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ + hev_out = (thresh_in) < (v16u8)flat_out; \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ + p1_asub_q1_m >>= 1; \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ + mask_out = (b_limit_in) < p0_asub_q0_m; \ + mask_out = __msa_max_u_b(flat_out, mask_out); \ + p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ + mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ + q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ + mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ + mask_out = (limit_in) < (v16u8)mask_out; \ + mask_out = __msa_xori_b(mask_out, 0xff); \ + } + +#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \ + { \ + uint16_t tmp0_h; \ + uint32_t tmp0_w; \ \ - p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \ - p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \ - p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ - p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \ - mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \ - mask = ((v16u8)mask <= b_limit); \ -} - -#define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \ - mask_in, hev_in) \ -{ \ - v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ - v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ - v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ - \ - p1_m = (v16i8)__msa_xori_b(p1_in_out, 0x80); \ - p0_m = (v16i8)__msa_xori_b(p0_in_out, 0x80); \ - q0_m = (v16i8)__msa_xori_b(q0_in_out, 0x80); \ - q1_m = (v16i8)__msa_xori_b(q1_in_out, 0x80); \ - \ - filt = __msa_subs_s_b(p1_m, q1_m); \ - \ - filt = filt & (v16i8)hev_in; \ - \ - q0_sub_p0 = q0_m - p0_m; \ - filt_sign = __msa_clti_s_b(filt, 0); \ - \ - cnst3h = __msa_ldi_h(3); \ - q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ - q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ - filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ - filt_r += q0_sub_p0_r; \ - filt_r = __msa_sat_s_h(filt_r, 7); \ - \ - q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \ - q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h); \ - filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ - filt_l += q0_sub_p0_l; \ - filt_l = __msa_sat_s_h(filt_l, 7); \ - \ - filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ - filt = filt & (v16i8)mask_in; \ - \ - cnst4b = __msa_ldi_b(4); \ - filt1 = __msa_adds_s_b(filt, cnst4b); \ - filt1 >>= 3; \ - \ - cnst3b = __msa_ldi_b(3); \ - filt2 = __msa_adds_s_b(filt, cnst3b); \ - filt2 >>= 3; \ - \ - q0_m = __msa_subs_s_b(q0_m, filt1); \ - q0_in_out = __msa_xori_b((v16u8)q0_m, 0x80); \ - p0_m = __msa_adds_s_b(p0_m, filt2); \ - p0_in_out = __msa_xori_b((v16u8)p0_m, 0x80); \ - \ - filt = __msa_srari_b(filt1, 1); \ - hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ - filt = filt & (v16i8)hev_in; \ - \ - q1_m = __msa_subs_s_b(q1_m, filt); \ - q1_in_out = __msa_xori_b((v16u8)q1_m, 0x80); \ - p1_m = __msa_adds_s_b(p1_m, filt); \ - p1_in_out = __msa_xori_b((v16u8)p1_m, 0x80); \ -} - -#define VP8_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) \ -{ \ - v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, q0_sub_p0_sign; \ - v16i8 filt, filt1, filt2, cnst4b, cnst3b, filt_sign; \ - v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ - \ - p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ - p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ - q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ - q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ - \ - filt = __msa_subs_s_b(p1_m, q1_m); \ - \ - q0_sub_p0 = q0_m - p0_m; \ - filt_sign = __msa_clti_s_b(filt, 0); \ - \ - cnst3h = __msa_ldi_h(3); \ - q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \ - q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \ - q0_sub_p0_r *= cnst3h; \ - filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ - filt_r += q0_sub_p0_r; \ - filt_r = __msa_sat_s_h(filt_r, 7); \ - \ - q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \ - q0_sub_p0_l *= cnst3h; \ - filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ - filt_l += q0_sub_p0_l; \ - filt_l = __msa_sat_s_h(filt_l, 7); \ - \ - filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ - filt = filt & (v16i8)(mask); \ - \ - cnst4b = __msa_ldi_b(4); \ - filt1 = __msa_adds_s_b(filt, cnst4b); \ - filt1 >>= 3; \ - \ - cnst3b = __msa_ldi_b(3); \ - filt2 = __msa_adds_s_b(filt, cnst3b); \ - filt2 >>= 3; \ - \ - q0_m = __msa_subs_s_b(q0_m, filt1); \ - p0_m = __msa_adds_s_b(p0_m, filt2); \ - q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \ - p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \ -} - -#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \ -{ \ - v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \ - v16i8 filt, q0_sub_p0, cnst4b, cnst3b; \ - v16i8 u, filt1, filt2, filt_sign, q0_sub_p0_sign; \ - v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_r, u_r, u_l, filt_l; \ - v8i16 cnst3h, cnst27h, cnst18h, cnst63h; \ - \ - cnst3h = __msa_ldi_h(3); \ - \ - p2_m = (v16i8)__msa_xori_b(p2, 0x80); \ - p1_m = (v16i8)__msa_xori_b(p1, 0x80); \ - p0_m = (v16i8)__msa_xori_b(p0, 0x80); \ - q0_m = (v16i8)__msa_xori_b(q0, 0x80); \ - q1_m = (v16i8)__msa_xori_b(q1, 0x80); \ - q2_m = (v16i8)__msa_xori_b(q2, 0x80); \ - \ - filt = __msa_subs_s_b(p1_m, q1_m); \ - q0_sub_p0 = q0_m - p0_m; \ - q0_sub_p0_sign = __msa_clti_s_b(q0_sub_p0, 0); \ - filt_sign = __msa_clti_s_b(filt, 0); \ - \ - q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0_sign, q0_sub_p0); \ - q0_sub_p0_r *= cnst3h; \ - filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ - filt_r = filt_r + q0_sub_p0_r; \ - filt_r = __msa_sat_s_h(filt_r, 7); \ - \ - q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0_sign, q0_sub_p0); \ - q0_sub_p0_l *= cnst3h; \ - filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ - filt_l = filt_l + q0_sub_p0_l; \ - filt_l = __msa_sat_s_h(filt_l, 7); \ - \ - filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ - filt = filt & (v16i8)mask; \ - filt2 = filt & (v16i8)hev; \ - \ - hev = __msa_xori_b(hev, 0xff); \ - filt = filt & (v16i8)hev; \ - cnst4b = __msa_ldi_b(4); \ - filt1 = __msa_adds_s_b(filt2, cnst4b); \ - filt1 >>= 3; \ - cnst3b = __msa_ldi_b(3); \ - filt2 = __msa_adds_s_b(filt2, cnst3b); \ - filt2 >>= 3; \ - q0_m = __msa_subs_s_b(q0_m, filt1); \ - p0_m = __msa_adds_s_b(p0_m, filt2); \ - \ - filt_sign = __msa_clti_s_b(filt, 0); \ - ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \ - \ - cnst27h = __msa_ldi_h(27); \ - cnst63h = __msa_ldi_h(63); \ - \ - u_r = filt_r * cnst27h; \ - u_r += cnst63h; \ - u_r >>= 7; \ - u_r = __msa_sat_s_h(u_r, 7); \ - u_l = filt_l * cnst27h; \ - u_l += cnst63h; \ - u_l >>= 7; \ - u_l = __msa_sat_s_h(u_l, 7); \ - u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ - q0_m = __msa_subs_s_b(q0_m, u); \ - q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ - p0_m = __msa_adds_s_b(p0_m, u); \ - p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ - cnst18h = __msa_ldi_h(18); \ - u_r = filt_r * cnst18h; \ - u_r += cnst63h; \ - u_r >>= 7; \ - u_r = __msa_sat_s_h(u_r, 7); \ - \ - u_l = filt_l * cnst18h; \ - u_l += cnst63h; \ - u_l >>= 7; \ - u_l = __msa_sat_s_h(u_l, 7); \ - u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ - q1_m = __msa_subs_s_b(q1_m, u); \ - q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ - p1_m = __msa_adds_s_b(p1_m, u); \ - p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ - u_r = filt_r << 3; \ - u_r += filt_r + cnst63h; \ - u_r >>= 7; \ - u_r = __msa_sat_s_h(u_r, 7); \ - \ - u_l = filt_l << 3; \ - u_l += filt_l + cnst63h; \ - u_l >>= 7; \ - u_l = __msa_sat_s_h(u_l, 7); \ - u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ - q2_m = __msa_subs_s_b(q2_m, u); \ - q2 = __msa_xori_b((v16u8)q2_m, 0x80); \ - p2_m = __msa_adds_s_b(p2_m, u); \ - p2 = __msa_xori_b((v16u8)p2_m, 0x80); \ -} - -#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \ - q0_in, q1_in, q2_in, q3_in, \ - limit_in, b_limit_in, thresh_in, \ - hev_out, mask_out, flat_out) \ -{ \ - v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ - v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ - \ - p3_asub_p2_m = __msa_asub_u_b((p3_in), (p2_in)); \ - p2_asub_p1_m = __msa_asub_u_b((p2_in), (p1_in)); \ - p1_asub_p0_m = __msa_asub_u_b((p1_in), (p0_in)); \ - q1_asub_q0_m = __msa_asub_u_b((q1_in), (q0_in)); \ - q2_asub_q1_m = __msa_asub_u_b((q2_in), (q1_in)); \ - q3_asub_q2_m = __msa_asub_u_b((q3_in), (q2_in)); \ - p0_asub_q0_m = __msa_asub_u_b((p0_in), (q0_in)); \ - p1_asub_q1_m = __msa_asub_u_b((p1_in), (q1_in)); \ - flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ - hev_out = (thresh_in) < (v16u8)flat_out; \ - p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ - p1_asub_q1_m >>= 1; \ - p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ - mask_out = (b_limit_in) < p0_asub_q0_m; \ - mask_out = __msa_max_u_b(flat_out, mask_out); \ - p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ - mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ - q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ - mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ - mask_out = (limit_in) < (v16u8)mask_out; \ - mask_out = __msa_xori_b(mask_out, 0xff); \ -} - -#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \ -{ \ - uint16_t tmp0_h; \ - uint32_t tmp0_w; \ - \ - tmp0_w = __msa_copy_u_w((v4i32)in0, in0_idx); \ - tmp0_h = __msa_copy_u_h((v8i16)in1, in1_idx); \ - SW(tmp0_w, pdst); \ - SH(tmp0_h, pdst + stride); \ -} - + tmp0_w = __msa_copy_u_w((v4i32)in0, in0_idx); \ + tmp0_h = __msa_copy_u_h((v8i16)in1, in1_idx); \ + SW(tmp0_w, pdst); \ + SH(tmp0_h, pdst + stride); \ + } static void loop_filter_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0_ptr, @@ -277,30 +275,29 @@ static void loop_filter_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *thresh0_ptr, const uint8_t *b_limit1_ptr, const uint8_t *limit1_ptr, - const uint8_t *thresh1_ptr) -{ - v16u8 mask, hev, flat; - v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + const uint8_t *thresh1_ptr) { + v16u8 mask, hev, flat; + v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); - thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); - thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); - thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); + LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); + thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); + thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); + thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); - b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr); - b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr); - b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); + b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr); + b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr); + b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); - limit0 = (v16u8)__msa_fill_b(*limit0_ptr); - limit1 = (v16u8)__msa_fill_b(*limit1_ptr); - limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); + limit0 = (v16u8)__msa_fill_b(*limit0_ptr); + limit1 = (v16u8)__msa_fill_b(*limit1_ptr); + limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, - hev, mask, flat); - VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, + mask, flat); + VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); - ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch); + ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch); } static void loop_filter_vertical_4_dual_msa(uint8_t *src, int32_t pitch, @@ -309,518 +306,467 @@ static void loop_filter_vertical_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *thresh0_ptr, const uint8_t *b_limit1_ptr, const uint8_t *limit1_ptr, - const uint8_t *thresh1_ptr) -{ - v16u8 mask, hev, flat; - v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 row0, row1, row2, row3, row4, row5, row6, row7; - v16u8 row8, row9, row10, row11, row12, row13, row14, row15; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + const uint8_t *thresh1_ptr) { + v16u8 mask, hev, flat; + v16u8 thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7; + v16u8 row8, row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; - LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - LD_UB8(src - 4 + (8 * pitch), pitch, - row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, + row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); - thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); - thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); - thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); + thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); + thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); + thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); - b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr); - b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr); - b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); + b_limit0 = (v16u8)__msa_fill_b(*b_limit0_ptr); + b_limit1 = (v16u8)__msa_fill_b(*b_limit1_ptr); + b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); - limit0 = (v16u8)__msa_fill_b(*limit0_ptr); - limit1 = (v16u8)__msa_fill_b(*limit1_ptr); - limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); + limit0 = (v16u8)__msa_fill_b(*limit0_ptr); + limit1 = (v16u8)__msa_fill_b(*limit1_ptr); + limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, - hev, mask, flat); - VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); - ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); - ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, + mask, flat); + VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); + ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); - src -= 2; - ST4x8_UB(tmp2, tmp3, src, pitch); - src += (8 * pitch); - ST4x8_UB(tmp4, tmp5, src, pitch); + src -= 2; + ST4x8_UB(tmp2, tmp3, src, pitch); + src += (8 * pitch); + ST4x8_UB(tmp4, tmp5, src, pitch); } static void mbloop_filter_horizontal_edge_y_msa(uint8_t *src, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - uint8_t *temp_src; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; + const uint8_t thresh_in) { + uint8_t *temp_src; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; - b_limit = (v16u8)__msa_fill_b(b_limit_in); - limit = (v16u8)__msa_fill_b(limit_in); - thresh = (v16u8)__msa_fill_b(thresh_in); - temp_src = src - (pitch << 2); - LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); - temp_src = src - 3 * pitch; - ST_UB4(p2, p1, p0, q0, temp_src, pitch); - temp_src += (4 * pitch); - ST_UB2(q1, q2, temp_src, pitch); + b_limit = (v16u8)__msa_fill_b(b_limit_in); + limit = (v16u8)__msa_fill_b(limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); + temp_src = src - (pitch << 2); + LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + temp_src = src - 3 * pitch; + ST_UB4(p2, p1, p0, q0, temp_src, pitch); + temp_src += (4 * pitch); + ST_UB2(q1, q2, temp_src, pitch); } static void mbloop_filter_horizontal_edge_uv_msa(uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - uint8_t *temp_src; - uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; - v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; - v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; + const uint8_t thresh_in) { + uint8_t *temp_src; + uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; + v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; + v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; - b_limit = (v16u8)__msa_fill_b(b_limit_in); - limit = (v16u8)__msa_fill_b(limit_in); - thresh = (v16u8)__msa_fill_b(thresh_in); + b_limit = (v16u8)__msa_fill_b(b_limit_in); + limit = (v16u8)__msa_fill_b(limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); - temp_src = src_u - (pitch << 2); - LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); - temp_src = src_v - (pitch << 2); - LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); + temp_src = src_u - (pitch << 2); + LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); + temp_src = src_v - (pitch << 2); + LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); - ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); - ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); + ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); - p2_d = __msa_copy_u_d((v2i64)p2, 0); - p1_d = __msa_copy_u_d((v2i64)p1, 0); - p0_d = __msa_copy_u_d((v2i64)p0, 0); - q0_d = __msa_copy_u_d((v2i64)q0, 0); - q1_d = __msa_copy_u_d((v2i64)q1, 0); - q2_d = __msa_copy_u_d((v2i64)q2, 0); - src_u -= (pitch * 3); - SD4(p2_d, p1_d, p0_d, q0_d, src_u, pitch); - src_u += 4 * pitch; - SD(q1_d, src_u); - src_u += pitch; - SD(q2_d, src_u); + p2_d = __msa_copy_u_d((v2i64)p2, 0); + p1_d = __msa_copy_u_d((v2i64)p1, 0); + p0_d = __msa_copy_u_d((v2i64)p0, 0); + q0_d = __msa_copy_u_d((v2i64)q0, 0); + q1_d = __msa_copy_u_d((v2i64)q1, 0); + q2_d = __msa_copy_u_d((v2i64)q2, 0); + src_u -= (pitch * 3); + SD4(p2_d, p1_d, p0_d, q0_d, src_u, pitch); + src_u += 4 * pitch; + SD(q1_d, src_u); + src_u += pitch; + SD(q2_d, src_u); - p2_d = __msa_copy_u_d((v2i64)p2, 1); - p1_d = __msa_copy_u_d((v2i64)p1, 1); - p0_d = __msa_copy_u_d((v2i64)p0, 1); - q0_d = __msa_copy_u_d((v2i64)q0, 1); - q1_d = __msa_copy_u_d((v2i64)q1, 1); - q2_d = __msa_copy_u_d((v2i64)q2, 1); - src_v -= (pitch * 3); - SD4(p2_d, p1_d, p0_d, q0_d, src_v, pitch); - src_v += 4 * pitch; - SD(q1_d, src_v); - src_v += pitch; - SD(q2_d, src_v); + p2_d = __msa_copy_u_d((v2i64)p2, 1); + p1_d = __msa_copy_u_d((v2i64)p1, 1); + p0_d = __msa_copy_u_d((v2i64)p0, 1); + q0_d = __msa_copy_u_d((v2i64)q0, 1); + q1_d = __msa_copy_u_d((v2i64)q1, 1); + q2_d = __msa_copy_u_d((v2i64)q2, 1); + src_v -= (pitch * 3); + SD4(p2_d, p1_d, p0_d, q0_d, src_v, pitch); + src_v += 4 * pitch; + SD(q1_d, src_v); + src_v += pitch; + SD(q2_d, src_v); } static void mbloop_filter_vertical_edge_y_msa(uint8_t *src, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - uint8_t *temp_src; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; - v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; - v16u8 row9, row10, row11, row12, row13, row14, row15; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + const uint8_t thresh_in) { + uint8_t *temp_src; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - b_limit = (v16u8)__msa_fill_b(b_limit_in); - limit = (v16u8)__msa_fill_b(limit_in); - thresh = (v16u8)__msa_fill_b(thresh_in); - temp_src = src - 4; - LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - temp_src += (8 * pitch); - LD_UB8(temp_src, pitch, - row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + b_limit = (v16u8)__msa_fill_b(b_limit_in); + limit = (v16u8)__msa_fill_b(limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); + temp_src = src - 4; + LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); + temp_src += (8 * pitch); + LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); - ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); - ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); - ILVRL_B2_SH(q2, q1, tmp2, tmp5); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); + ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); + ILVRL_B2_SH(q2, q1, tmp2, tmp5); - temp_src = src - 3; - VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); - temp_src += pitch; - VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4); + temp_src = src - 3; + VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); + temp_src += pitch; + VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4); } static void mbloop_filter_vertical_edge_uv_msa(uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; - v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; - v16u8 row9, row10, row11, row12, row13, row14, row15; - v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + const uint8_t thresh_in) { + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - b_limit = (v16u8)__msa_fill_b(b_limit_in); - limit = (v16u8)__msa_fill_b(limit_in); - thresh = (v16u8)__msa_fill_b(thresh_in); + b_limit = (v16u8)__msa_fill_b(b_limit_in); + limit = (v16u8)__msa_fill_b(limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); - LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - LD_UB8(src_v - 4, pitch, - row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14, + row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); - ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); - ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); - ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); - ILVRL_B2_SH(q2, q1, tmp2, tmp5); + ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); + ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); + ILVRL_B2_SH(q2, q1, tmp2, tmp5); - src_u -= 3; - VP8_ST6x1_UB(tmp3, 0, tmp2, 0, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp3, 1, tmp2, 1, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp3, 2, tmp2, 2, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp3, 3, tmp2, 3, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp4, 0, tmp2, 4, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp4, 1, tmp2, 5, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp4, 2, tmp2, 6, src_u, 4); - src_u += pitch; - VP8_ST6x1_UB(tmp4, 3, tmp2, 7, src_u, 4); + src_u -= 3; + VP8_ST6x1_UB(tmp3, 0, tmp2, 0, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp3, 1, tmp2, 1, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp3, 2, tmp2, 2, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp3, 3, tmp2, 3, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp4, 0, tmp2, 4, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp4, 1, tmp2, 5, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp4, 2, tmp2, 6, src_u, 4); + src_u += pitch; + VP8_ST6x1_UB(tmp4, 3, tmp2, 7, src_u, 4); - src_v -= 3; - VP8_ST6x1_UB(tmp6, 0, tmp5, 0, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp6, 1, tmp5, 1, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp6, 2, tmp5, 2, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp6, 3, tmp5, 3, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp7, 0, tmp5, 4, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp7, 1, tmp5, 5, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp7, 2, tmp5, 6, src_v, 4); - src_v += pitch; - VP8_ST6x1_UB(tmp7, 3, tmp5, 7, src_v, 4); + src_v -= 3; + VP8_ST6x1_UB(tmp6, 0, tmp5, 0, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp6, 1, tmp5, 1, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp6, 2, tmp5, 2, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp6, 3, tmp5, 3, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp7, 0, tmp5, 4, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp7, 1, tmp5, 5, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp7, 2, tmp5, 6, src_v, 4); + src_v += pitch; + VP8_ST6x1_UB(tmp7, 3, tmp5, 7, src_v, 4); } void vp8_loop_filter_simple_horizontal_edge_msa(uint8_t *src, int32_t pitch, - const uint8_t *b_limit_ptr) -{ - v16u8 p1, p0, q1, q0; - v16u8 mask, b_limit; + const uint8_t *b_limit_ptr) { + v16u8 p1, p0, q1, q0; + v16u8 mask, b_limit; - b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); - LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1); - VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); - VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); - ST_UB2(p0, q0, (src - pitch), pitch); + b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); + LD_UB4(src - (pitch << 1), pitch, p1, p0, q0, q1); + VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); + VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); + ST_UB2(p0, q0, (src - pitch), pitch); } void vp8_loop_filter_simple_vertical_edge_msa(uint8_t *src, int32_t pitch, - const uint8_t *b_limit_ptr) -{ - uint8_t *temp_src; - v16u8 p1, p0, q1, q0; - v16u8 mask, b_limit; - v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; - v16u8 row9, row10, row11, row12, row13, row14, row15; - v8i16 tmp0, tmp1; + const uint8_t *b_limit_ptr) { + uint8_t *temp_src; + v16u8 p1, p0, q1, q0; + v16u8 mask, b_limit; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1; - b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); - temp_src = src - 2; - LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - temp_src += (8 * pitch); - LD_UB8(temp_src, pitch, - row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p1, p0, q0, q1); - VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); - VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); - ILVRL_B2_SH(q0, p0, tmp1, tmp0); + b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); + temp_src = src - 2; + LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); + temp_src += (8 * pitch); + LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p1, p0, + q0, q1); + VP8_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); + VP8_SIMPLE_FILT(p1, p0, q0, q1, mask); + ILVRL_B2_SH(q0, p0, tmp1, tmp0); - src -= 1; - ST2x4_UB(tmp1, 0, src, pitch); - src += 4 * pitch; - ST2x4_UB(tmp1, 4, src, pitch); - src += 4 * pitch; - ST2x4_UB(tmp0, 0, src, pitch); - src += 4 * pitch; - ST2x4_UB(tmp0, 4, src, pitch); - src += 4 * pitch; + src -= 1; + ST2x4_UB(tmp1, 0, src, pitch); + src += 4 * pitch; + ST2x4_UB(tmp1, 4, src, pitch); + src += 4 * pitch; + ST2x4_UB(tmp0, 0, src, pitch); + src += 4 * pitch; + ST2x4_UB(tmp0, 4, src, pitch); + src += 4 * pitch; } static void loop_filter_horizontal_edge_uv_msa(uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - uint64_t p1_d, p0_d, q0_d, q1_d; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; - v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; - v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; + const uint8_t thresh_in) { + uint64_t p1_d, p0_d, q0_d, q1_d; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; + v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; + v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; - thresh = (v16u8)__msa_fill_b(thresh_in); - limit = (v16u8)__msa_fill_b(limit_in); - b_limit = (v16u8)__msa_fill_b(b_limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); + limit = (v16u8)__msa_fill_b(limit_in); + b_limit = (v16u8)__msa_fill_b(b_limit_in); - src_u = src_u - (pitch << 2); - LD_UB8(src_u, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); - src_u += (5 * pitch); - src_v = src_v - (pitch << 2); - LD_UB8(src_v, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); - src_v += (5 * pitch); + src_u = src_u - (pitch << 2); + LD_UB8(src_u, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); + src_u += (5 * pitch); + src_v = src_v - (pitch << 2); + LD_UB8(src_v, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); + src_v += (5 * pitch); - /* right 8 element of p3 are u pixel and - left 8 element of p3 are v pixel */ - ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); - ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + /* right 8 element of p3 are u pixel and + left 8 element of p3 are v pixel */ + ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); + ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); - p1_d = __msa_copy_u_d((v2i64)p1, 0); - p0_d = __msa_copy_u_d((v2i64)p0, 0); - q0_d = __msa_copy_u_d((v2i64)q0, 0); - q1_d = __msa_copy_u_d((v2i64)q1, 0); - SD4(q1_d, q0_d, p0_d, p1_d, src_u, (- pitch)); + p1_d = __msa_copy_u_d((v2i64)p1, 0); + p0_d = __msa_copy_u_d((v2i64)p0, 0); + q0_d = __msa_copy_u_d((v2i64)q0, 0); + q1_d = __msa_copy_u_d((v2i64)q1, 0); + SD4(q1_d, q0_d, p0_d, p1_d, src_u, (-pitch)); - p1_d = __msa_copy_u_d((v2i64)p1, 1); - p0_d = __msa_copy_u_d((v2i64)p0, 1); - q0_d = __msa_copy_u_d((v2i64)q0, 1); - q1_d = __msa_copy_u_d((v2i64)q1, 1); - SD4(q1_d, q0_d, p0_d, p1_d, src_v, (- pitch)); + p1_d = __msa_copy_u_d((v2i64)p1, 1); + p0_d = __msa_copy_u_d((v2i64)p0, 1); + q0_d = __msa_copy_u_d((v2i64)q0, 1); + q1_d = __msa_copy_u_d((v2i64)q1, 1); + SD4(q1_d, q0_d, p0_d, p1_d, src_v, (-pitch)); } static void loop_filter_vertical_edge_uv_msa(uint8_t *src_u, uint8_t *src_v, int32_t pitch, const uint8_t b_limit_in, const uint8_t limit_in, - const uint8_t thresh_in) -{ - uint8_t *temp_src_u, *temp_src_v; - v16u8 p3, p2, p1, p0, q3, q2, q1, q0; - v16u8 mask, hev, flat, thresh, limit, b_limit; - v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; - v16u8 row9, row10, row11, row12, row13, row14, row15; - v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + const uint8_t thresh_in) { + uint8_t *temp_src_u, *temp_src_v; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev, flat, thresh, limit, b_limit; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; - thresh = (v16u8)__msa_fill_b(thresh_in); - limit = (v16u8)__msa_fill_b(limit_in); - b_limit = (v16u8)__msa_fill_b(b_limit_in); + thresh = (v16u8)__msa_fill_b(thresh_in); + limit = (v16u8)__msa_fill_b(limit_in); + b_limit = (v16u8)__msa_fill_b(b_limit_in); - LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - LD_UB8(src_v - 4, pitch, - row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14, + row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); - VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); - ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1); - ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3); - tmp0 = (v4i32)__msa_ilvl_b((v16i8)p0, (v16i8)p1); - tmp1 = (v4i32)__msa_ilvl_b((v16i8)q1, (v16i8)q0); - ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); + VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3); + tmp0 = (v4i32)__msa_ilvl_b((v16i8)p0, (v16i8)p1); + tmp1 = (v4i32)__msa_ilvl_b((v16i8)q1, (v16i8)q0); + ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5); - temp_src_u = src_u - 2; - ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, temp_src_u, pitch); - temp_src_u += 4 * pitch; - ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, temp_src_u, pitch); + temp_src_u = src_u - 2; + ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, temp_src_u, pitch); + temp_src_u += 4 * pitch; + ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, temp_src_u, pitch); - temp_src_v = src_v - 2; - ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, temp_src_v, pitch); - temp_src_v += 4 * pitch; - ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, temp_src_v, pitch); + temp_src_v = src_v - 2; + ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, temp_src_v, pitch); + temp_src_v += 4 * pitch; + ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, temp_src_v, pitch); } -void vp8_loop_filter_mbh_msa(uint8_t *src_y, uint8_t *src_u, - uint8_t *src_v, int32_t pitch_y, - int32_t pitch_u_v, - loop_filter_info *lpf_info_ptr) -{ - mbloop_filter_horizontal_edge_y_msa(src_y, pitch_y, - *lpf_info_ptr->mblim, - *lpf_info_ptr->lim, - *lpf_info_ptr->hev_thr); - if (src_u) - { - mbloop_filter_horizontal_edge_uv_msa(src_u, src_v, pitch_u_v, - *lpf_info_ptr->mblim, - *lpf_info_ptr->lim, - *lpf_info_ptr->hev_thr); - } -} - -void vp8_loop_filter_mbv_msa(uint8_t *src_y, uint8_t *src_u, - uint8_t *src_v, int32_t pitch_y, - int32_t pitch_u_v, - loop_filter_info *lpf_info_ptr) -{ - mbloop_filter_vertical_edge_y_msa(src_y, pitch_y, - *lpf_info_ptr->mblim, +void vp8_loop_filter_mbh_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + mbloop_filter_horizontal_edge_y_msa(src_y, pitch_y, *lpf_info_ptr->mblim, *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr); - if (src_u) - { - mbloop_filter_vertical_edge_uv_msa(src_u, src_v, pitch_u_v, - *lpf_info_ptr->mblim, - *lpf_info_ptr->lim, - *lpf_info_ptr->hev_thr); - } + if (src_u) { + mbloop_filter_horizontal_edge_uv_msa( + src_u, src_v, pitch_u_v, *lpf_info_ptr->mblim, *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + } } -void vp8_loop_filter_bh_msa(uint8_t *src_y, uint8_t *src_u, - uint8_t *src_v, int32_t pitch_y, - int32_t pitch_u_v, - loop_filter_info *lpf_info_ptr) -{ - loop_filter_horizontal_4_dual_msa(src_y + 4 * pitch_y, pitch_y, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - loop_filter_horizontal_4_dual_msa(src_y + 8 * pitch_y, pitch_y, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - loop_filter_horizontal_4_dual_msa(src_y + 12 * pitch_y, pitch_y, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - if (src_u) - { - loop_filter_horizontal_edge_uv_msa(src_u + (4 * pitch_u_v), - src_v + (4 * pitch_u_v), - pitch_u_v, - *lpf_info_ptr->blim, - *lpf_info_ptr->lim, - *lpf_info_ptr->hev_thr); - } +void vp8_loop_filter_mbv_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + mbloop_filter_vertical_edge_y_msa(src_y, pitch_y, *lpf_info_ptr->mblim, + *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr); + if (src_u) { + mbloop_filter_vertical_edge_uv_msa(src_u, src_v, pitch_u_v, + *lpf_info_ptr->mblim, *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + } } -void vp8_loop_filter_bv_msa(uint8_t *src_y, uint8_t *src_u, - uint8_t *src_v, int32_t pitch_y, - int32_t pitch_u_v, - loop_filter_info *lpf_info_ptr) -{ - loop_filter_vertical_4_dual_msa(src_y + 4, pitch_y, lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - loop_filter_vertical_4_dual_msa(src_y + 8, pitch_y, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - loop_filter_vertical_4_dual_msa(src_y + 12, pitch_y, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr, - lpf_info_ptr->blim, - lpf_info_ptr->lim, - lpf_info_ptr->hev_thr); - if (src_u) - { - loop_filter_vertical_edge_uv_msa(src_u + 4, src_v + 4, pitch_u_v, - *lpf_info_ptr->blim, - *lpf_info_ptr->lim, - *lpf_info_ptr->hev_thr); - } +void vp8_loop_filter_bh_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + loop_filter_horizontal_4_dual_msa(src_y + 4 * pitch_y, pitch_y, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr); + loop_filter_horizontal_4_dual_msa(src_y + 8 * pitch_y, pitch_y, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr); + loop_filter_horizontal_4_dual_msa(src_y + 12 * pitch_y, pitch_y, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr); + if (src_u) { + loop_filter_horizontal_edge_uv_msa( + src_u + (4 * pitch_u_v), src_v + (4 * pitch_u_v), pitch_u_v, + *lpf_info_ptr->blim, *lpf_info_ptr->lim, *lpf_info_ptr->hev_thr); + } +} + +void vp8_loop_filter_bv_msa(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, + int32_t pitch_y, int32_t pitch_u_v, + loop_filter_info *lpf_info_ptr) { + loop_filter_vertical_4_dual_msa(src_y + 4, pitch_y, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr); + loop_filter_vertical_4_dual_msa(src_y + 8, pitch_y, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr); + loop_filter_vertical_4_dual_msa(src_y + 12, pitch_y, lpf_info_ptr->blim, + lpf_info_ptr->lim, lpf_info_ptr->hev_thr, + lpf_info_ptr->blim, lpf_info_ptr->lim, + lpf_info_ptr->hev_thr); + if (src_u) { + loop_filter_vertical_edge_uv_msa(src_u + 4, src_v + 4, pitch_u_v, + *lpf_info_ptr->blim, *lpf_info_ptr->lim, + *lpf_info_ptr->hev_thr); + } } void vp8_loop_filter_bhs_msa(uint8_t *src_y, int32_t pitch_y, - const uint8_t *b_limit_ptr) -{ - vp8_loop_filter_simple_horizontal_edge_msa(src_y + (4 * pitch_y), - pitch_y, b_limit_ptr); - vp8_loop_filter_simple_horizontal_edge_msa(src_y + (8 * pitch_y), - pitch_y, b_limit_ptr); - vp8_loop_filter_simple_horizontal_edge_msa(src_y + (12 * pitch_y), - pitch_y, b_limit_ptr); + const uint8_t *b_limit_ptr) { + vp8_loop_filter_simple_horizontal_edge_msa(src_y + (4 * pitch_y), pitch_y, + b_limit_ptr); + vp8_loop_filter_simple_horizontal_edge_msa(src_y + (8 * pitch_y), pitch_y, + b_limit_ptr); + vp8_loop_filter_simple_horizontal_edge_msa(src_y + (12 * pitch_y), pitch_y, + b_limit_ptr); } void vp8_loop_filter_bvs_msa(uint8_t *src_y, int32_t pitch_y, - const uint8_t *b_limit_ptr) -{ - vp8_loop_filter_simple_vertical_edge_msa(src_y + 4, pitch_y, b_limit_ptr); - vp8_loop_filter_simple_vertical_edge_msa(src_y + 8, pitch_y, b_limit_ptr); - vp8_loop_filter_simple_vertical_edge_msa(src_y + 12, pitch_y, b_limit_ptr); + const uint8_t *b_limit_ptr) { + vp8_loop_filter_simple_vertical_edge_msa(src_y + 4, pitch_y, b_limit_ptr); + vp8_loop_filter_simple_vertical_edge_msa(src_y + 8, pitch_y, b_limit_ptr); + vp8_loop_filter_simple_vertical_edge_msa(src_y + 12, pitch_y, b_limit_ptr); } diff --git a/libs/libvpx/vp8/common/mips/msa/mfqe_msa.c b/libs/libvpx/vp8/common/mips/msa/mfqe_msa.c index 3e7629f3a1..9aac95b2fa 100644 --- a/libs/libvpx/vp8/common/mips/msa/mfqe_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/mfqe_msa.c @@ -14,133 +14,126 @@ static void filter_by_weight8x8_msa(uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - int32_t src_weight) -{ - int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; - int32_t row; - uint64_t src0_d, src1_d, dst0_d, dst1_d; - v16i8 src0 = { 0 }; - v16i8 src1 = { 0 }; - v16i8 dst0 = { 0 }; - v16i8 dst1 = { 0 }; - v8i16 src_wt, dst_wt, res_h_r, res_h_l, src_r, src_l, dst_r, dst_l; + int32_t src_weight) { + int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; + int32_t row; + uint64_t src0_d, src1_d, dst0_d, dst1_d; + v16i8 src0 = { 0 }; + v16i8 src1 = { 0 }; + v16i8 dst0 = { 0 }; + v16i8 dst1 = { 0 }; + v8i16 src_wt, dst_wt, res_h_r, res_h_l, src_r, src_l, dst_r, dst_l; - src_wt = __msa_fill_h(src_weight); - dst_wt = __msa_fill_h(dst_weight); + src_wt = __msa_fill_h(src_weight); + dst_wt = __msa_fill_h(dst_weight); - for (row = 2; row--;) - { - LD2(src_ptr, src_stride, src0_d, src1_d); - src_ptr += (2 * src_stride); - LD2(dst_ptr, dst_stride, dst0_d, dst1_d); - INSERT_D2_SB(src0_d, src1_d, src0); - INSERT_D2_SB(dst0_d, dst1_d, dst0); + for (row = 2; row--;) { + LD2(src_ptr, src_stride, src0_d, src1_d); + src_ptr += (2 * src_stride); + LD2(dst_ptr, dst_stride, dst0_d, dst1_d); + INSERT_D2_SB(src0_d, src1_d, src0); + INSERT_D2_SB(dst0_d, dst1_d, dst0); - LD2(src_ptr, src_stride, src0_d, src1_d); - src_ptr += (2 * src_stride); - LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); - INSERT_D2_SB(src0_d, src1_d, src1); - INSERT_D2_SB(dst0_d, dst1_d, dst1); + LD2(src_ptr, src_stride, src0_d, src1_d); + src_ptr += (2 * src_stride); + LD2((dst_ptr + 2 * dst_stride), dst_stride, dst0_d, dst1_d); + INSERT_D2_SB(src0_d, src1_d, src1); + INSERT_D2_SB(dst0_d, dst1_d, dst1); - UNPCK_UB_SH(src0, src_r, src_l); - UNPCK_UB_SH(dst0, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); - ST8x2_UB(dst0, dst_ptr, dst_stride); - dst_ptr += (2 * dst_stride); + UNPCK_UB_SH(src0, src_r, src_l); + UNPCK_UB_SH(dst0, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); + ST8x2_UB(dst0, dst_ptr, dst_stride); + dst_ptr += (2 * dst_stride); - UNPCK_UB_SH(src1, src_r, src_l); - UNPCK_UB_SH(dst1, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); - ST8x2_UB(dst1, dst_ptr, dst_stride); - dst_ptr += (2 * dst_stride); - } + UNPCK_UB_SH(src1, src_r, src_l); + UNPCK_UB_SH(dst1, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); + ST8x2_UB(dst1, dst_ptr, dst_stride); + dst_ptr += (2 * dst_stride); + } } static void filter_by_weight16x16_msa(uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - int32_t src_weight) -{ - int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; - int32_t row; - v16i8 src0, src1, src2, src3; - v16i8 dst0, dst1, dst2, dst3; - v8i16 src_wt, dst_wt; - v8i16 res_h_r, res_h_l; - v8i16 src_r, src_l, dst_r, dst_l; + int32_t src_weight) { + int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; + int32_t row; + v16i8 src0, src1, src2, src3; + v16i8 dst0, dst1, dst2, dst3; + v8i16 src_wt, dst_wt; + v8i16 res_h_r, res_h_l; + v8i16 src_r, src_l, dst_r, dst_l; - src_wt = __msa_fill_h(src_weight); - dst_wt = __msa_fill_h(dst_weight); + src_wt = __msa_fill_h(src_weight); + dst_wt = __msa_fill_h(dst_weight); - for (row = 4; row--;) - { - LD_SB4(src_ptr, src_stride, src0, src1, src2, src3); - src_ptr += (4 * src_stride); - LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3); + for (row = 4; row--;) { + LD_SB4(src_ptr, src_stride, src0, src1, src2, src3); + src_ptr += (4 * src_stride); + LD_SB4(dst_ptr, dst_stride, dst0, dst1, dst2, dst3); - UNPCK_UB_SH(src0, src_r, src_l); - UNPCK_UB_SH(dst0, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; + UNPCK_UB_SH(src0, src_r, src_l); + UNPCK_UB_SH(dst0, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); + dst_ptr += dst_stride; - UNPCK_UB_SH(src1, src_r, src_l); - UNPCK_UB_SH(dst1, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; + UNPCK_UB_SH(src1, src_r, src_l); + UNPCK_UB_SH(dst1, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); + dst_ptr += dst_stride; - UNPCK_UB_SH(src2, src_r, src_l); - UNPCK_UB_SH(dst2, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; + UNPCK_UB_SH(src2, src_r, src_l); + UNPCK_UB_SH(dst2, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); + dst_ptr += dst_stride; - UNPCK_UB_SH(src3, src_r, src_l); - UNPCK_UB_SH(dst3, dst_r, dst_l); - res_h_r = (src_r * src_wt); - res_h_r += (dst_r * dst_wt); - res_h_l = (src_l * src_wt); - res_h_l += (dst_l * dst_wt); - SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); - PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); - dst_ptr += dst_stride; - } + UNPCK_UB_SH(src3, src_r, src_l); + UNPCK_UB_SH(dst3, dst_r, dst_l); + res_h_r = (src_r * src_wt); + res_h_r += (dst_r * dst_wt); + res_h_l = (src_l * src_wt); + res_h_l += (dst_l * dst_wt); + SRARI_H2_SH(res_h_r, res_h_l, MFQE_PRECISION); + PCKEV_ST_SB(res_h_r, res_h_l, dst_ptr); + dst_ptr += dst_stride; + } } void vp8_filter_by_weight16x16_msa(uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - int32_t src_weight) -{ - filter_by_weight16x16_msa(src_ptr, src_stride, dst_ptr, dst_stride, - src_weight); + int32_t src_weight) { + filter_by_weight16x16_msa(src_ptr, src_stride, dst_ptr, dst_stride, + src_weight); } void vp8_filter_by_weight8x8_msa(uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - int32_t src_weight) -{ - filter_by_weight8x8_msa(src_ptr, src_stride, dst_ptr, dst_stride, - src_weight); + int32_t src_weight) { + filter_by_weight8x8_msa(src_ptr, src_stride, dst_ptr, dst_stride, src_weight); } diff --git a/libs/libvpx/vp8/common/mips/msa/postproc_msa.c b/libs/libvpx/vp8/common/mips/msa/postproc_msa.c deleted file mode 100644 index c88f30238b..0000000000 --- a/libs/libvpx/vp8/common/mips/msa/postproc_msa.c +++ /dev/null @@ -1,851 +0,0 @@ -/* - * Copyright (c) 2015 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include -#include "./vp8_rtcd.h" -#include "vp8/common/mips/msa/vp8_macros_msa.h" - -static const int16_t vp8_rv_msa[] = -{ - 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, - 0, 3, 9, 0, 0, 0, 8, 3, 14, 4, - 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, - 8, 6, 10, 0, 0, 8, 9, 0, 3, 14, - 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, - 1, 2, 3, 14, 13, 1, 8, 2, 9, 7, - 3, 3, 1, 13, 13, 6, 6, 5, 2, 7, - 11, 9, 11, 8, 7, 3, 2, 0, 13, 13, - 14, 4, 12, 5, 12, 10, 8, 10, 13, 10, - 4, 14, 4, 10, 0, 8, 11, 1, 13, 7, - 7, 14, 6, 14, 13, 2, 13, 5, 4, 4, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 3, 8, 3, 7, 8, 5, 11, 4, 12, 3, - 11, 9, 14, 8, 14, 13, 4, 3, 1, 2, - 14, 6, 5, 4, 4, 11, 4, 6, 2, 1, - 5, 8, 8, 12, 13, 5, 14, 10, 12, 13, - 0, 9, 5, 5, 11, 10, 13, 9, 10, 13, -}; - -#define VP8_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, \ - out4, out5, out6, out7, \ - out8, out9, out10, out11, \ - out12, out13, out14, out15) \ -{ \ - v8i16 temp0, temp1, temp2, temp3, temp4; \ - v8i16 temp5, temp6, temp7, temp8, temp9; \ - \ - ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \ - temp0, temp1, temp2, temp3); \ - ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ - ILVRL_W2_SH(temp5, temp4, temp6, temp7); \ - ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ - ILVRL_W2_SH(temp5, temp4, temp8, temp9); \ - ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \ - temp0, temp1, temp2, temp3); \ - ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ - ILVRL_W2_UB(temp5, temp4, out8, out10); \ - ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ - ILVRL_W2_UB(temp5, temp4, out12, out14); \ - out0 = (v16u8)temp6; \ - out2 = (v16u8)temp7; \ - out4 = (v16u8)temp8; \ - out6 = (v16u8)temp9; \ - out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \ - out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \ - out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \ - out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \ - out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ - out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ - out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \ - out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \ -} - -#define VP8_AVER_IF_RETAIN(above2_in, above1_in, src_in, \ - below1_in, below2_in, ref, out) \ -{ \ - v16u8 temp0, temp1; \ - \ - temp1 = __msa_aver_u_b(above2_in, above1_in); \ - temp0 = __msa_aver_u_b(below2_in, below1_in); \ - temp1 = __msa_aver_u_b(temp1, temp0); \ - out = __msa_aver_u_b(src_in, temp1); \ - temp0 = __msa_asub_u_b(src_in, above2_in); \ - temp1 = __msa_asub_u_b(src_in, above1_in); \ - temp0 = (temp0 < ref); \ - temp1 = (temp1 < ref); \ - temp0 = temp0 & temp1; \ - temp1 = __msa_asub_u_b(src_in, below1_in); \ - temp1 = (temp1 < ref); \ - temp0 = temp0 & temp1; \ - temp1 = __msa_asub_u_b(src_in, below2_in); \ - temp1 = (temp1 < ref); \ - temp0 = temp0 & temp1; \ - out = __msa_bmz_v(out, src_in, temp0); \ -} - -#define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15) \ -{ \ - v8i16 temp0, temp1, temp2, temp3, temp4; \ - v8i16 temp5, temp6, temp7, temp8, temp9; \ - \ - ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \ - ILVRL_H2_SH(temp1, temp0, temp2, temp3); \ - ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \ - ILVRL_H2_SH(temp1, temp0, temp4, temp5); \ - ILVRL_W2_SH(temp4, temp2, temp0, temp1); \ - ILVRL_W2_SH(temp5, temp3, temp2, temp3); \ - ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ - ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ - ILVRL_H2_SH(temp5, temp4, temp6, temp7); \ - ILVR_B2_SH(in13, in12, in15, in14, temp4, temp5); \ - ILVRL_H2_SH(temp5, temp4, temp8, temp9); \ - ILVRL_W2_SH(temp8, temp6, temp4, temp5); \ - ILVRL_W2_SH(temp9, temp7, temp6, temp7); \ - ILVL_B2_SH(in1, in0, in3, in2, temp8, temp9); \ - ILVR_D2_UB(temp4, temp0, temp5, temp1, in0, in2); \ - in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \ - in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \ - ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1); \ - ILVR_D2_UB(temp6, temp2, temp7, temp3, in4, in6); \ - in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2); \ - in7 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp3); \ - ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, \ - temp2, temp3, temp4, temp5); \ - ILVR_H4_SH(temp9, temp8, temp1, temp0, temp3, temp2, temp5, temp4, \ - temp6, temp7, temp8, temp9); \ - ILVR_W2_SH(temp7, temp6, temp9, temp8, temp0, temp1); \ - in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0); \ - in9 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp0); \ - ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3); \ - in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \ - in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \ -} - -#define VP8_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, \ - in6, in7, in8, in9, in10, in11) \ -{ \ - v8i16 temp0, temp1, temp2, temp3; \ - v8i16 temp4, temp5, temp6, temp7; \ - \ - ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \ - ILVRL_H2_SH(temp1, temp0, temp2, temp3); \ - ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \ - ILVRL_H2_SH(temp1, temp0, temp4, temp5); \ - ILVRL_W2_SH(temp4, temp2, temp0, temp1); \ - ILVRL_W2_SH(temp5, temp3, temp2, temp3); \ - ILVL_B2_SH(in1, in0, in3, in2, temp4, temp5); \ - temp4 = __msa_ilvr_h(temp5, temp4); \ - ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7); \ - temp5 = __msa_ilvr_h(temp7, temp6); \ - ILVRL_W2_SH(temp5, temp4, temp6, temp7); \ - in0 = (v16u8)temp0; \ - in2 = (v16u8)temp1; \ - in4 = (v16u8)temp2; \ - in6 = (v16u8)temp3; \ - in8 = (v16u8)temp6; \ - in10 = (v16u8)temp7; \ - in1 = (v16u8)__msa_ilvl_d((v2i64)temp0, (v2i64)temp0); \ - in3 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp1); \ - in5 = (v16u8)__msa_ilvl_d((v2i64)temp2, (v2i64)temp2); \ - in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3); \ - in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6); \ - in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \ -} - -static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr, - int32_t src_stride, - int32_t dst_stride, - int32_t cols, uint8_t *f) -{ - uint8_t *p_src = src_ptr; - uint8_t *p_dst = dst_ptr; - uint8_t *f_orig = f; - uint8_t *p_dst_st = dst_ptr; - uint16_t col; - uint64_t out0, out1, out2, out3; - v16u8 above2, above1, below2, below1, src, ref, ref_temp; - v16u8 inter0, inter1, inter2, inter3, inter4, inter5; - v16u8 inter6, inter7, inter8, inter9, inter10, inter11; - - for (col = (cols / 16); col--;) - { - ref = LD_UB(f); - LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); - src = LD_UB(p_src); - LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); - above2 = LD_UB(p_src + 3 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); - above1 = LD_UB(p_src + 4 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); - src = LD_UB(p_src + 5 * src_stride); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); - below1 = LD_UB(p_src + 6 * src_stride); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); - below2 = LD_UB(p_src + 7 * src_stride); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); - above2 = LD_UB(p_src + 8 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); - above1 = LD_UB(p_src + 9 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); - ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7, - p_dst, dst_stride); - - p_dst += 16; - p_src += 16; - f += 16; - } - - if (0 != (cols / 16)) - { - ref = LD_UB(f); - LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); - src = LD_UB(p_src); - LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); - above2 = LD_UB(p_src + 3 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); - above1 = LD_UB(p_src + 4 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); - src = LD_UB(p_src + 5 * src_stride); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); - below1 = LD_UB(p_src + 6 * src_stride); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); - below2 = LD_UB(p_src + 7 * src_stride); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); - above2 = LD_UB(p_src + 8 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); - above1 = LD_UB(p_src + 9 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); - out0 = __msa_copy_u_d((v2i64)inter0, 0); - out1 = __msa_copy_u_d((v2i64)inter1, 0); - out2 = __msa_copy_u_d((v2i64)inter2, 0); - out3 = __msa_copy_u_d((v2i64)inter3, 0); - SD4(out0, out1, out2, out3, p_dst, dst_stride); - - out0 = __msa_copy_u_d((v2i64)inter4, 0); - out1 = __msa_copy_u_d((v2i64)inter5, 0); - out2 = __msa_copy_u_d((v2i64)inter6, 0); - out3 = __msa_copy_u_d((v2i64)inter7, 0); - SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride); - } - - f = f_orig; - p_dst = dst_ptr - 2; - LD_UB8(p_dst, dst_stride, - inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7); - - for (col = 0; col < (cols / 8); ++col) - { - ref = LD_UB(f); - f += 8; - VP8_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, - inter4, inter5, inter6, inter7, - inter8, inter9, inter10, inter11); - if (0 == col) - { - above2 = inter2; - above1 = inter2; - } - else - { - above2 = inter0; - above1 = inter1; - } - src = inter2; - below1 = inter3; - below2 = inter4; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, - ref_temp, inter2); - above2 = inter5; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, - ref_temp, inter3); - above1 = inter6; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, - ref_temp, inter4); - src = inter7; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, - ref_temp, inter5); - below1 = inter8; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, - ref_temp, inter6); - below2 = inter9; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, - ref_temp, inter7); - if (col == (cols / 8 - 1)) - { - above2 = inter9; - } - else - { - above2 = inter10; - } - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, - ref_temp, inter8); - if (col == (cols / 8 - 1)) - { - above1 = inter9; - } - else - { - above1 = inter11; - } - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, - ref_temp, inter9); - TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, - inter8, inter9, inter2, inter3, inter4, inter5, - inter6, inter7, inter8, inter9); - p_dst += 8; - LD_UB2(p_dst, dst_stride, inter0, inter1); - ST8x1_UB(inter2, p_dst_st); - ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride)); - LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3); - ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride)); - ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride)); - LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5); - ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride)); - ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride)); - LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7); - ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride)); - ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride)); - p_dst_st += 8; - } -} - -static void postproc_down_across_luma_msa(uint8_t *src_ptr, uint8_t *dst_ptr, - int32_t src_stride, - int32_t dst_stride, - int32_t cols, uint8_t *f) -{ - uint8_t *p_src = src_ptr; - uint8_t *p_dst = dst_ptr; - uint8_t *p_dst_st = dst_ptr; - uint8_t *f_orig = f; - uint16_t col; - v16u8 above2, above1, below2, below1; - v16u8 src, ref, ref_temp; - v16u8 inter0, inter1, inter2, inter3, inter4, inter5, inter6; - v16u8 inter7, inter8, inter9, inter10, inter11; - v16u8 inter12, inter13, inter14, inter15; - - for (col = (cols / 16); col--;) - { - ref = LD_UB(f); - LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); - src = LD_UB(p_src); - LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); - above2 = LD_UB(p_src + 3 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); - above1 = LD_UB(p_src + 4 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); - src = LD_UB(p_src + 5 * src_stride); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); - below1 = LD_UB(p_src + 6 * src_stride); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); - below2 = LD_UB(p_src + 7 * src_stride); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); - above2 = LD_UB(p_src + 8 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); - above1 = LD_UB(p_src + 9 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); - src = LD_UB(p_src + 10 * src_stride); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8); - below1 = LD_UB(p_src + 11 * src_stride); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9); - below2 = LD_UB(p_src + 12 * src_stride); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10); - above2 = LD_UB(p_src + 13 * src_stride); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11); - above1 = LD_UB(p_src + 14 * src_stride); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12); - src = LD_UB(p_src + 15 * src_stride); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13); - below1 = LD_UB(p_src + 16 * src_stride); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14); - below2 = LD_UB(p_src + 17 * src_stride); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15); - ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7, - p_dst, dst_stride); - ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, - inter14, inter15, p_dst + 8 * dst_stride, dst_stride); - p_src += 16; - p_dst += 16; - f += 16; - } - - f = f_orig; - p_dst = dst_ptr - 2; - LD_UB8(p_dst, dst_stride, - inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7); - LD_UB8(p_dst + 8 * dst_stride, dst_stride, - inter8, inter9, inter10, inter11, inter12, inter13, - inter14, inter15); - - for (col = 0; col < cols / 8; ++col) - { - ref = LD_UB(f); - f += 8; - TRANSPOSE12x16_B(inter0, inter1, inter2, inter3, inter4, inter5, - inter6, inter7, inter8, inter9, inter10, inter11, - inter12, inter13, inter14, inter15); - if (0 == col) - { - above2 = inter2; - above1 = inter2; - } - else - { - above2 = inter0; - above1 = inter1; - } - - src = inter2; - below1 = inter3; - below2 = inter4; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, - ref_temp, inter2); - above2 = inter5; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, - ref_temp, inter3); - above1 = inter6; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, - ref_temp, inter4); - src = inter7; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3); - VP8_AVER_IF_RETAIN(below1, below2, above2, above1, src, - ref_temp, inter5); - below1 = inter8; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4); - VP8_AVER_IF_RETAIN(below2, above2, above1, src, below1, - ref_temp, inter6); - below2 = inter9; - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5); - VP8_AVER_IF_RETAIN(above2, above1, src, below1, below2, - ref_temp, inter7); - if (col == (cols / 8 - 1)) - { - above2 = inter9; - } - else - { - above2 = inter10; - } - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6); - VP8_AVER_IF_RETAIN(above1, src, below1, below2, above2, - ref_temp, inter8); - if (col == (cols / 8 - 1)) - { - above1 = inter9; - } - else - { - above1 = inter11; - } - ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7); - VP8_AVER_IF_RETAIN(src, below1, below2, above2, above1, - ref_temp, inter9); - VP8_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, - inter6, inter7, inter8, inter9, - inter2, inter3, inter4, inter5, - inter6, inter7, inter8, inter9, - inter10, inter11, inter12, inter13, - inter14, inter15, above2, above1); - - p_dst += 8; - LD_UB2(p_dst, dst_stride, inter0, inter1); - ST8x1_UB(inter2, p_dst_st); - ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride)); - LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3); - ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride)); - ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride)); - LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5); - ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride)); - ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride)); - LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7); - ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride)); - ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride)); - LD_UB2(p_dst + 8 * dst_stride, dst_stride, inter8, inter9); - ST8x1_UB(inter10, (p_dst_st + 8 * dst_stride)); - ST8x1_UB(inter11, (p_dst_st + 9 * dst_stride)); - LD_UB2(p_dst + 10 * dst_stride, dst_stride, inter10, inter11); - ST8x1_UB(inter12, (p_dst_st + 10 * dst_stride)); - ST8x1_UB(inter13, (p_dst_st + 11 * dst_stride)); - LD_UB2(p_dst + 12 * dst_stride, dst_stride, inter12, inter13); - ST8x1_UB(inter14, (p_dst_st + 12 * dst_stride)); - ST8x1_UB(inter15, (p_dst_st + 13 * dst_stride)); - LD_UB2(p_dst + 14 * dst_stride, dst_stride, inter14, inter15); - ST8x1_UB(above2, (p_dst_st + 14 * dst_stride)); - ST8x1_UB(above1, (p_dst_st + 15 * dst_stride)); - p_dst_st += 8; - } -} - -void vp8_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst, - int32_t src_stride, - int32_t dst_stride, - int32_t cols, uint8_t *f, - int32_t size) -{ - if (8 == size) - { - postproc_down_across_chroma_msa(src, dst, src_stride, dst_stride, - cols, f); - } - else if (16 == size) - { - postproc_down_across_luma_msa(src, dst, src_stride, dst_stride, - cols, f); - } -} - -void vp8_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch, - int32_t rows, int32_t cols, int32_t flimit) -{ - int32_t row, col, cnt; - uint8_t *src_dup = src_ptr; - v16u8 src0, src, tmp_orig; - v16u8 tmp = { 0 }; - v16i8 zero = { 0 }; - v8u16 sum_h, src_r_h, src_l_h; - v4u32 src_r_w, src_l_w; - v4i32 flimit_vec; - - flimit_vec = __msa_fill_w(flimit); - for (row = rows; row--;) - { - int32_t sum_sq = 0; - int32_t sum = 0; - src0 = (v16u8)__msa_fill_b(src_dup[0]); - ST8x1_UB(src0, (src_dup - 8)); - - src0 = (v16u8)__msa_fill_b(src_dup[cols - 1]); - ST_UB(src0, src_dup + cols); - src_dup[cols + 16] = src_dup[cols - 1]; - tmp_orig = (v16u8)__msa_ldi_b(0); - tmp_orig[15] = tmp[15]; - src = LD_UB(src_dup - 8); - src[15] = 0; - ILVRL_B2_UH(zero, src, src_r_h, src_l_h); - src_r_w = __msa_dotp_u_w(src_r_h, src_r_h); - src_l_w = __msa_dotp_u_w(src_l_h, src_l_h); - sum_sq = HADD_SW_S32(src_r_w); - sum_sq += HADD_SW_S32(src_l_w); - sum_h = __msa_hadd_u_h(src, src); - sum = HADD_UH_U32(sum_h); - { - v16u8 src7, src8, src_r, src_l; - v16i8 mask; - v8u16 add_r, add_l; - v8i16 sub_r, sub_l, sum_r, sum_l, mask0, mask1; - v4i32 sum_sq0, sum_sq1, sum_sq2, sum_sq3; - v4i32 sub0, sub1, sub2, sub3; - v4i32 sum0_w, sum1_w, sum2_w, sum3_w; - v4i32 mul0, mul1, mul2, mul3; - v4i32 total0, total1, total2, total3; - v8i16 const8 = __msa_fill_h(8); - - src7 = LD_UB(src_dup + 7); - src8 = LD_UB(src_dup - 8); - for (col = 0; col < (cols >> 4); ++col) - { - ILVRL_B2_UB(src7, src8, src_r, src_l); - HSUB_UB2_SH(src_r, src_l, sub_r, sub_l); - - sum_r[0] = sum + sub_r[0]; - for (cnt = 0; cnt < 7; ++cnt) - { - sum_r[cnt + 1] = sum_r[cnt] + sub_r[cnt + 1]; - } - sum_l[0] = sum_r[7] + sub_l[0]; - for (cnt = 0; cnt < 7; ++cnt) - { - sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1]; - } - sum = sum_l[7]; - src = LD_UB(src_dup + 16 * col); - ILVRL_B2_UH(zero, src, src_r_h, src_l_h); - src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4); - src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4); - tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7); - - HADD_UB2_UH(src_r, src_l, add_r, add_l); - UNPCK_SH_SW(sub_r, sub0, sub1); - UNPCK_SH_SW(sub_l, sub2, sub3); - ILVR_H2_SW(zero, add_r, zero, add_l, sum0_w, sum2_w); - ILVL_H2_SW(zero, add_r, zero, add_l, sum1_w, sum3_w); - MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, - mul0, mul1, mul2, mul3); - sum_sq0[0] = sum_sq + mul0[0]; - for (cnt = 0; cnt < 3; ++cnt) - { - sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1]; - } - sum_sq1[0] = sum_sq0[3] + mul1[0]; - for (cnt = 0; cnt < 3; ++cnt) - { - sum_sq1[cnt + 1] = sum_sq1[cnt] + mul1[cnt + 1]; - } - sum_sq2[0] = sum_sq1[3] + mul2[0]; - for (cnt = 0; cnt < 3; ++cnt) - { - sum_sq2[cnt + 1] = sum_sq2[cnt] + mul2[cnt + 1]; - } - sum_sq3[0] = sum_sq2[3] + mul3[0]; - for (cnt = 0; cnt < 3; ++cnt) - { - sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1]; - } - sum_sq = sum_sq3[3]; - - UNPCK_SH_SW(sum_r, sum0_w, sum1_w); - UNPCK_SH_SW(sum_l, sum2_w, sum3_w); - total0 = sum_sq0 * __msa_ldi_w(15); - total0 -= sum0_w * sum0_w; - total1 = sum_sq1 * __msa_ldi_w(15); - total1 -= sum1_w * sum1_w; - total2 = sum_sq2 * __msa_ldi_w(15); - total2 -= sum2_w * sum2_w; - total3 = sum_sq3 * __msa_ldi_w(15); - total3 -= sum3_w * sum3_w; - total0 = (total0 < flimit_vec); - total1 = (total1 < flimit_vec); - total2 = (total2 < flimit_vec); - total3 = (total3 < flimit_vec); - PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1); - mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0); - tmp = __msa_bmz_v(tmp, src, (v16u8)mask); - - if (col == 0) - { - uint64_t src_d; - - src_d = __msa_copy_u_d((v2i64)tmp_orig, 1); - SD(src_d, (src_dup - 8)); - } - - src7 = LD_UB(src_dup + 16 * (col + 1) + 7); - src8 = LD_UB(src_dup + 16 * (col + 1) - 8); - ST_UB(tmp, (src_dup + (16 * col))); - } - - src_dup += pitch; - } - } -} - -void vp8_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows, - int32_t cols, int32_t flimit) -{ - int32_t row, col, cnt, i; - const int16_t *rv3 = &vp8_rv_msa[63 & rand()]; - v4i32 flimit_vec; - v16u8 dst7, dst8, dst_r_b, dst_l_b; - v16i8 mask; - v8u16 add_r, add_l; - v8i16 dst_r_h, dst_l_h, sub_r, sub_l, mask0, mask1; - v4i32 sub0, sub1, sub2, sub3, total0, total1, total2, total3; - - flimit_vec = __msa_fill_w(flimit); - - for (col = 0; col < (cols >> 4); ++col) - { - uint8_t *dst_tmp = &dst_ptr[col << 4]; - v16u8 dst; - v16i8 zero = { 0 }; - v16u8 tmp[16]; - v8i16 mult0, mult1, rv2_0, rv2_1; - v8i16 sum0_h = { 0 }; - v8i16 sum1_h = { 0 }; - v4i32 mul0 = { 0 }; - v4i32 mul1 = { 0 }; - v4i32 mul2 = { 0 }; - v4i32 mul3 = { 0 }; - v4i32 sum0_w, sum1_w, sum2_w, sum3_w; - v4i32 add0, add1, add2, add3; - const int16_t *rv2[16]; - - dst = LD_UB(dst_tmp); - for (cnt = (col << 4), i = 0; i < 16; ++cnt) - { - rv2[i] = rv3 + ((cnt * 17) & 127); - ++i; - } - for (cnt = -8; cnt < 0; ++cnt) - { - ST_UB(dst, dst_tmp + cnt * pitch); - } - - dst = LD_UB((dst_tmp + (rows - 1) * pitch)); - for (cnt = rows; cnt < rows + 17; ++cnt) - { - ST_UB(dst, dst_tmp + cnt * pitch); - } - for (cnt = -8; cnt <= 6; ++cnt) - { - dst = LD_UB(dst_tmp + (cnt * pitch)); - UNPCK_UB_SH(dst, dst_r_h, dst_l_h); - MUL2(dst_r_h, dst_r_h, dst_l_h, dst_l_h, mult0, mult1); - mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0); - mul1 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult0); - mul2 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult1); - mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1); - ADD2(sum0_h, dst_r_h, sum1_h, dst_l_h, sum0_h, sum1_h); - } - - for (row = 0; row < (rows + 8); ++row) - { - for (i = 0; i < 8; ++i) - { - rv2_0[i] = *(rv2[i] + (row & 127)); - rv2_1[i] = *(rv2[i + 8] + (row & 127)); - } - dst7 = LD_UB(dst_tmp + (7 * pitch)); - dst8 = LD_UB(dst_tmp - (8 * pitch)); - ILVRL_B2_UB(dst7, dst8, dst_r_b, dst_l_b); - - HSUB_UB2_SH(dst_r_b, dst_l_b, sub_r, sub_l); - UNPCK_SH_SW(sub_r, sub0, sub1); - UNPCK_SH_SW(sub_l, sub2, sub3); - sum0_h += sub_r; - sum1_h += sub_l; - - HADD_UB2_UH(dst_r_b, dst_l_b, add_r, add_l); - - ILVRL_H2_SW(zero, add_r, add0, add1); - ILVRL_H2_SW(zero, add_l, add2, add3); - mul0 += add0 * sub0; - mul1 += add1 * sub1; - mul2 += add2 * sub2; - mul3 += add3 * sub3; - dst = LD_UB(dst_tmp); - ILVRL_B2_SH(zero, dst, dst_r_h, dst_l_h); - dst7 = (v16u8)((rv2_0 + sum0_h + dst_r_h) >> 4); - dst8 = (v16u8)((rv2_1 + sum1_h + dst_l_h) >> 4); - tmp[row & 15] = (v16u8)__msa_pckev_b((v16i8)dst8, (v16i8)dst7); - - UNPCK_SH_SW(sum0_h, sum0_w, sum1_w); - UNPCK_SH_SW(sum1_h, sum2_w, sum3_w); - total0 = mul0 * __msa_ldi_w(15); - total0 -= sum0_w * sum0_w; - total1 = mul1 * __msa_ldi_w(15); - total1 -= sum1_w * sum1_w; - total2 = mul2 * __msa_ldi_w(15); - total2 -= sum2_w * sum2_w; - total3 = mul3 * __msa_ldi_w(15); - total3 -= sum3_w * sum3_w; - total0 = (total0 < flimit_vec); - total1 = (total1 < flimit_vec); - total2 = (total2 < flimit_vec); - total3 = (total3 < flimit_vec); - PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1); - mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0); - tmp[row & 15] = __msa_bmz_v(tmp[row & 15], dst, (v16u8)mask); - - if (row >= 8) - { - ST_UB(tmp[(row - 8) & 15], (dst_tmp - 8 * pitch)); - } - - dst_tmp += pitch; - } - } -} - -void vp8_plane_add_noise_msa(uint8_t *start_ptr, char *noise, - char blackclamp[16], char whiteclamp[16], - char bothclamp[16], - uint32_t width, uint32_t height, - int32_t pitch) -{ - uint32_t i, j; - - for (i = 0; i < height / 2; ++i) - { - uint8_t *pos0_ptr = start_ptr + (2 * i) * pitch; - int8_t *ref0_ptr = (int8_t *) (noise + (rand() & 0xff)); - uint8_t *pos1_ptr = start_ptr + (2 * i + 1) * pitch; - int8_t *ref1_ptr = (int8_t *) (noise + (rand() & 0xff)); - for (j = width / 16; j--;) - { - v16i8 temp00_s, temp01_s; - v16u8 temp00, temp01, black_clamp, white_clamp; - v16u8 pos0, ref0, pos1, ref1; - v16i8 const127 = __msa_ldi_b(127); - - pos0 = LD_UB(pos0_ptr); - ref0 = LD_UB(ref0_ptr); - pos1 = LD_UB(pos1_ptr); - ref1 = LD_UB(ref1_ptr); - black_clamp = (v16u8)__msa_fill_b(blackclamp[0]); - white_clamp = (v16u8)__msa_fill_b(whiteclamp[0]); - temp00 = (pos0 < black_clamp); - pos0 = __msa_bmnz_v(pos0, black_clamp, temp00); - temp01 = (pos1 < black_clamp); - pos1 = __msa_bmnz_v(pos1, black_clamp, temp01); - XORI_B2_128_UB(pos0, pos1); - temp00_s = __msa_adds_s_b((v16i8)white_clamp, const127); - temp00 = (v16u8)(temp00_s < pos0); - pos0 = (v16u8)__msa_bmnz_v((v16u8)pos0, (v16u8)temp00_s, temp00); - temp01_s = __msa_adds_s_b((v16i8)white_clamp, const127); - temp01 = (temp01_s < pos1); - pos1 = (v16u8)__msa_bmnz_v((v16u8)pos1, (v16u8)temp01_s, temp01); - XORI_B2_128_UB(pos0, pos1); - pos0 += ref0; - ST_UB(pos0, pos0_ptr); - pos1 += ref1; - ST_UB(pos1, pos1_ptr); - pos0_ptr += 16; - pos1_ptr += 16; - ref0_ptr += 16; - ref1_ptr += 16; - } - } -} diff --git a/libs/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c b/libs/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c index fb60fc1346..b0affcff01 100644 --- a/libs/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c +++ b/libs/libvpx/vp8/common/mips/msa/sixtap_filter_msa.c @@ -13,226 +13,225 @@ #include "vp8/common/filter.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_msa[7][8]) = -{ - { 0, -6, 123, 12, -1, 0, 0, 0 }, - { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */ - { 0, -9, 93, 50, -6, 0, 0, 0 }, - { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */ - { 0, -6, 50, 93, -9, 0, 0, 0 }, - { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */ - { 0, -1, 12, 123, -6, 0, 0, 0 }, +DECLARE_ALIGNED(16, static const int8_t, vp8_subpel_filters_msa[7][8]) = { + { 0, -6, 123, 12, -1, 0, 0, 0 }, + { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -9, 93, 50, -6, 0, 0, 0 }, + { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */ + { 0, -6, 50, 93, -9, 0, 0, 0 }, + { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */ + { 0, -1, 12, 123, -6, 0, 0, 0 }, }; -static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = -{ - /* 8 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, - /* 4 width cases */ - 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, - /* 4 width cases */ - 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 +static const uint8_t vp8_mc_filt_mask_arr[16 * 3] = { + /* 8 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + /* 4 width cases */ + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20, + /* 4 width cases */ + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28 }; -#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, \ - filt_h0, filt_h1, filt_h2) \ -({ \ - v16i8 vec0_m, vec1_m, vec2_m; \ - v8i16 hz_out_m; \ +#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_h0, filt_h1, \ + filt_h2) \ + ({ \ + v16i8 vec0_m, vec1_m, vec2_m; \ + v8i16 hz_out_m; \ + \ + VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \ + vec0_m, vec1_m, vec2_m); \ + hz_out_m = \ + DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, filt_h0, filt_h1, filt_h2); \ + \ + hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ + \ + hz_out_m; \ + }) + +#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, filt0, filt1, filt2, out0, out1) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ + \ + VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ + DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ + DPADD_SB2_SH(vec4_m, vec5_m, filt2, filt2, out0, out1); \ + } + +#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, filt0, filt1, filt2, out0, out1, \ + out2, out3) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + \ + VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ + out0, out1, out2, out3); \ + VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ + VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec4_m, vec5_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \ + DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ + out0, out1, out2, out3); \ + DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt2, filt2, filt2, filt2, \ + out0, out1, out2, out3); \ + } + +#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \ + ({ \ + v8i16 tmp0; \ + \ + tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ + tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ + \ + tmp0; \ + }) + +#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \ + ({ \ + v16i8 vec0_m, vec1_m; \ + v8i16 hz_out_m; \ + \ + VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0_m, vec1_m); \ + hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \ + \ + hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ + hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ + \ + hz_out_m; \ + }) + +#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + filt0, filt1, out0, out1) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ \ - VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \ - vec0_m, vec1_m, vec2_m); \ - hz_out_m = DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, \ - filt_h0, filt_h1, filt_h2); \ - \ - hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ - hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ - \ - hz_out_m; \ -}) + VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ + DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ + VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ + } -#define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, \ - filt0, filt1, filt2, \ - out0, out1) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ - \ - VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ - DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ - VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ - DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ - VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ - DPADD_SB2_SH(vec4_m, vec5_m, filt2, filt2, out0, out1); \ -} - -#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, \ - filt0, filt1, filt2, \ - out0, out1, out2, out3) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - \ - VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ - out0, out1, out2, out3); \ - VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec4_m, vec5_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \ - DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ - out0, out1, out2, out3); \ - DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt2, filt2, filt2, filt2, \ - out0, out1, out2, out3); \ -} - -#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \ -({ \ - v8i16 tmp0; \ - \ - tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ - tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ - \ - tmp0; \ -}) - -#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \ -({ \ - v16i8 vec0_m, vec1_m; \ - v8i16 hz_out_m; \ - \ - VSHF_B2_SB(src0, src1, src0, src1, mask0, mask1, vec0_m, vec1_m); \ - hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \ - \ - hz_out_m = __msa_srari_h(hz_out_m, VP8_FILTER_SHIFT); \ - hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ - \ - hz_out_m; \ -}) - -#define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, filt0, filt1, \ - out0, out1) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ - \ - VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ - DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, out0, out1); \ - VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ - DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, out0, out1); \ -} - -#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, filt0, filt1, \ - out0, out1, out2, out3) \ -{ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ - \ - VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ - out0, out1, out2, out3); \ - VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ - DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ - out0, out1, out2, out3); \ -} +#define HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + filt0, filt1, out0, out1, out2, out3) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ + \ + VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ + out0, out1, out2, out3); \ + VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt1, filt1, filt1, filt1, \ + out0, out1, out2, out3); \ + } static void common_hz_6t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, filt0, filt1, filt2; - v16u8 mask0, mask1, mask2, out; - v8i16 filt, out0, out1; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); - src -= 2; + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= 2; - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - filt0, filt1, filt2, out0, out1); - SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); - SAT_SH2_SH(out0, out1, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1); + SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); + SAT_SH2_SH(out0, out1, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); } static void common_hz_6t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, filt0, filt1, filt2; - v16u8 mask0, mask1, mask2, out; - v8i16 filt, out0, out1, out2, out3; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1, out2, out3; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); - src -= 2; + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= 2; - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - filt0, filt1, filt2, out0, out1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - filt0, filt1, filt2, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - out = PCKEV_XORI128_UB(out2, out3); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + src += (4 * src_stride); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + out = PCKEV_XORI128_UB(out2, out3); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); } static void common_hz_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_hz_6t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } - else if (8 == height) - { - common_hz_6t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_hz_6t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } else if (8 == height) { + common_hz_6t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } } static void common_hz_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, filt2; - v16u8 mask0, mask1, mask2, tmp0, tmp1; - v8i16 filt, out0, out1, out2, out3; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, tmp0, tmp1; + v8i16 filt, out0, out1, out2, out3; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); - src -= 2; + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= 2; - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + src += (4 * src_stride); + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, filt0, + filt1, filt2, out0, out1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + tmp0 = PCKEV_XORI128_UB(out0, out1); + tmp1 = PCKEV_XORI128_UB(out2, out3); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + + for (loop_cnt = (height >> 2) - 1; loop_cnt--;) { LD_SB4(src, src_stride, src0, src1, src2, src3); XORI_B4_128_SB(src0, src1, src2, src3); src += (4 * src_stride); @@ -244,1607 +243,1463 @@ static void common_hz_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, tmp1 = PCKEV_XORI128_UB(out2, out3); ST8x4_UB(tmp0, tmp1, dst, dst_stride); dst += (4 * dst_stride); - - for (loop_cnt = (height >> 2) - 1; loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - src += (4 * src_stride); - HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - filt0, filt1, filt2, out0, out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - tmp0 = PCKEV_XORI128_UB(out0, out1); - tmp1 = PCKEV_XORI128_UB(out2, out3); - ST8x4_UB(tmp0, tmp1, dst, dst_stride); - dst += (4 * dst_stride); - } + } } static void common_hz_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2; - v16u8 mask0, mask1, mask2, out; - v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1, filt2; + v16u8 mask0, mask1, mask2, out; + v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); - src -= 2; + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= 2; - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); + src += (4 * src_stride); - HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, - filt0, filt1, filt2, out0, out1, out2, out3); - HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2, - filt0, filt1, filt2, out4, out5, out6, out7); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out4, out5, out6, out7, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out4, out5); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out6, out7); - ST_UB(out, dst); - dst += dst_stride; - } + HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, + filt0, filt1, filt2, out0, out1, out2, out3); + HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2, + filt0, filt1, filt2, out4, out5, out6, out7); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + SAT_SH4_SH(out4, out5, out6, out7, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out2, out3); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out4, out5); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out6, out7); + ST_UB(out, dst); + dst += dst_stride; + } } static void common_vt_6t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; - v16i8 src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2; - v16u8 out; - v8i16 filt, out10, out32; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + v16i8 src87_r, src2110, src4332, src6554, src8776, filt0, filt1, filt2; + v16u8 out; + v8i16 filt, out10, out32; - src -= (2 * src_stride); + src -= (2 * src_stride); - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, - src32_r, src43_r); - ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); - XORI_B2_128_SB(src2110, src4332); + ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); + ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); + XORI_B2_128_SB(src2110, src4332); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src5, src6, src7, src8); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); - ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, - src65_r, src76_r, src87_r); - ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776); - XORI_B2_128_SB(src6554, src8776); - out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2); - out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2); - SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); - SAT_SH2_SH(out10, out32, 7); - out = PCKEV_XORI128_UB(out10, out32); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); + ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, + src76_r, src87_r); + ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776); + XORI_B2_128_SB(src6554, src8776); + out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2); + out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2); + SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); + SAT_SH2_SH(out10, out32, 7); + out = PCKEV_XORI128_UB(out10, out32); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); - src2110 = src6554; - src4332 = src8776; - src4 = src8; - } + src2110 = src6554; + src4332 = src8776; + src4 = src8; + } } static void common_vt_6t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; - v16i8 src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r; - v16i8 src109_r, filt0, filt1, filt2; - v16u8 tmp0, tmp1; - v8i16 filt, out0_r, out1_r, out2_r, out3_r; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src7, src8, src9, src10; + v16i8 src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r; + v16i8 src109_r, filt0, filt1, filt2; + v16u8 tmp0, tmp1; + v8i16 filt, out0_r, out1_r, out2_r, out3_r; - src -= (2 * src_stride); + src -= (2 * src_stride); - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - ILVR_B4_SB(src1, src0, src3, src2, src2, src1, src4, src3, src10_r, src32_r, - src21_r, src43_r); + XORI_B5_128_SB(src0, src1, src2, src3, src4); + ILVR_B4_SB(src1, src0, src3, src2, src2, src1, src4, src3, src10_r, src32_r, + src21_r, src43_r); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src7, src8, src9, src10); - XORI_B4_128_SB(src7, src8, src9, src10); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src7, src8, src9, src10); + XORI_B4_128_SB(src7, src8, src9, src10); + src += (4 * src_stride); - ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r, - src87_r, src98_r, src109_r); - out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2); - out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2); - out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2); - out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); - tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); - ST8x4_UB(tmp0, tmp1, dst, dst_stride); - dst += (4 * dst_stride); + ILVR_B4_SB(src7, src4, src8, src7, src9, src8, src10, src9, src76_r, + src87_r, src98_r, src109_r); + out0_r = DPADD_SH3_SH(src10_r, src32_r, src76_r, filt0, filt1, filt2); + out1_r = DPADD_SH3_SH(src21_r, src43_r, src87_r, filt0, filt1, filt2); + out2_r = DPADD_SH3_SH(src32_r, src76_r, src98_r, filt0, filt1, filt2); + out3_r = DPADD_SH3_SH(src43_r, src87_r, src109_r, filt0, filt1, filt2); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); + tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); - src10_r = src76_r; - src32_r = src98_r; - src21_r = src87_r; - src43_r = src109_r; - src4 = src10; - } + src10_r = src76_r; + src32_r = src98_r; + src21_r = src87_r; + src43_r = src109_r; + src4 = src10; + } } static void common_vt_6t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; - v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; - v16i8 src65_l, src87_l, filt0, filt1, filt2; - v16u8 tmp0, tmp1, tmp2, tmp3; - v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r; + v16i8 src87_r, src10_l, src32_l, src54_l, src76_l, src21_l, src43_l; + v16i8 src65_l, src87_l, filt0, filt1, filt2; + v16u8 tmp0, tmp1, tmp2, tmp3; + v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt; - src -= (2 * src_stride); + src -= (2 * src_stride); - filt = LD_SH(filter); - SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); + filt = LD_SH(filter); + SPLATI_H3_SB(filt, 0, 1, 2, filt0, filt1, filt2); - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - ILVR_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_r, - src32_r, src43_r, src21_r); - ILVL_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_l, - src32_l, src43_l, src21_l); + XORI_B5_128_SB(src0, src1, src2, src3, src4); + ILVR_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_r, src32_r, + src43_r, src21_r); + ILVL_B4_SB(src1, src0, src3, src2, src4, src3, src2, src1, src10_l, src32_l, + src43_l, src21_l); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src5, src6, src7, src8); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); - XORI_B4_128_SB(src5, src6, src7, src8); - ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, - src65_r, src76_r, src87_r); - ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, - src65_l, src76_l, src87_l); - out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); - out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); - out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); - out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); - out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); - out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); - out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); - out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); - SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); - PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, - out3_r, tmp0, tmp1, tmp2, tmp3); - XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); - ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); - dst += (4 * dst_stride); + XORI_B4_128_SB(src5, src6, src7, src8); + ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, + src76_r, src87_r); + ILVL_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_l, src65_l, + src76_l, src87_l); + out0_r = DPADD_SH3_SH(src10_r, src32_r, src54_r, filt0, filt1, filt2); + out1_r = DPADD_SH3_SH(src21_r, src43_r, src65_r, filt0, filt1, filt2); + out2_r = DPADD_SH3_SH(src32_r, src54_r, src76_r, filt0, filt1, filt2); + out3_r = DPADD_SH3_SH(src43_r, src65_r, src87_r, filt0, filt1, filt2); + out0_l = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2); + out1_l = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2); + out2_l = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2); + out3_l = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); + PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r, + tmp0, tmp1, tmp2, tmp3); + XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); + ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); + dst += (4 * dst_stride); - src10_r = src54_r; - src32_r = src76_r; - src21_r = src65_r; - src43_r = src87_r; - src10_l = src54_l; - src32_l = src76_l; - src21_l = src65_l; - src43_l = src87_l; - src4 = src8; - } + src10_r = src54_r; + src32_r = src76_r; + src21_r = src65_r; + src43_r = src87_r; + src10_l = src54_l; + src32_l = src76_l; + src21_l = src65_l; + src43_l = src87_l; + src4 = src8; + } } static void common_hv_6ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 filt_hz0, filt_hz1, filt_hz2; - v16u8 mask0, mask1, mask2, out; - v8i16 tmp0, tmp1; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, filt, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 mask0, mask1, mask2, out; + v8i16 tmp0, tmp1; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, filt, filt_vt0, filt_vt1, filt_vt2, out0, out1, out2, out3; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); - src -= (2 + 2 * src_stride); + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (2 + 2 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); - filt = LD_SH(filter_vert); - SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB2(src, src_stride, src5, src6); + src += (2 * src_stride); + + XORI_B2_128_SB(src5, src6); + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out2 = HORIZ_6TAP_FILT(src2, src3, mask0, mask1, mask2, filt_hz0, + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + + LD_SB2(src, src_stride, src7, src8); + src += (2 * src_stride); + + XORI_B2_128_SB(src7, src8); + hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); - hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB2(src, src_stride, src5, src6); - src += (2 * src_stride); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); - XORI_B2_128_SB(src5, src6); - hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); - LD_SB2(src, src_stride, src7, src8); - src += (2 * src_stride); + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); - XORI_B2_128_SB(src7, src8); - hz_out7 = HORIZ_6TAP_FILT(src7, src8, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); - - out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); - - out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); - tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); - - SRARI_H2_SH(tmp0, tmp1, 7); - SAT_SH2_SH(tmp0, tmp1, 7); - out = PCKEV_XORI128_UB(tmp0, tmp1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out3 = hz_out7; - out0 = out2; - out1 = out3; - } + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } } static void common_hv_6ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 filt_hz0, filt_hz1, filt_hz2; - v16u8 mask0, mask1, mask2, vec0, vec1; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 tmp0, tmp1, tmp2, tmp3; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 mask0, mask1, mask2, vec0, vec1; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + v8i16 tmp0, tmp1, tmp2, tmp3; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); - src -= (2 + 2 * src_stride); + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= (2 + 2 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); + + XORI_B4_128_SB(src5, src6, src7, src8); + hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + + hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, + out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); + tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + + hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, + out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2); + + hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); + out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); + tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2); - filt = LD_SH(filter_vert); - SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + vec0 = PCKEV_XORI128_UB(tmp0, tmp1); + vec1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(vec0, vec1, dst, dst_stride); + dst += (4 * dst_stride); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); - - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src5, src6, src7, src8); - src += (4 * src_stride); - - XORI_B4_128_SB(src5, src6, src7, src8); - hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); - - hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); - tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); - - hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); - tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2); - - hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); - tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2); - - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - vec0 = PCKEV_XORI128_UB(tmp0, tmp1); - vec1 = PCKEV_XORI128_UB(tmp2, tmp3); - ST8x4_UB(vec0, vec1, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out4 = hz_out8; - out0 = out2; - out1 = out7; - out3 = out5; - out4 = out6; - } + hz_out4 = hz_out8; + out0 = out2; + out1 = out7; + out3 = out5; + out4 = out6; + } } static void common_hv_6ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - for (multiple8_cnt = 2; multiple8_cnt--;) - { - common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - src += 8; - dst += 8; - } + int32_t height) { + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) { + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } } static void common_hz_4t_4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; - v8i16 filt, out0, out1; - v16u8 out; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v8i16 filt, out0, out1; + v16u8 out; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); - src -= 1; + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + src -= 1; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, - filt0, filt1, out0, out1); - SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); - SAT_SH2_SH(out0, out1, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out0, out1); + SRARI_H2_SH(out0, out1, VP8_FILTER_SHIFT); + SAT_SH2_SH(out0, out1, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); } static void common_hz_4t_4x8_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter) -{ - v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; - v16u8 out; - v8i16 filt, out0, out1, out2, out3; + const int8_t *filter) { + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v16u8 out; + v8i16 filt, out0, out1, out2, out3; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); - src -= 1; + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + src -= 1; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, - filt0, filt1, out0, out1); - LD_SB4(src, src_stride, src0, src1, src2, src3); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, - filt0, filt1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - out = PCKEV_XORI128_UB(out2, out3); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out0, out1); + LD_SB4(src, src_stride, src0, src1, src2, src3); + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, filt1, + out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + out = PCKEV_XORI128_UB(out2, out3); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); } static void common_hz_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - if (4 == height) - { - common_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter); - } - else if (8 == height) - { - common_hz_4t_4x8_msa(src, src_stride, dst, dst_stride, filter); - } + const int8_t *filter, int32_t height) { + if (4 == height) { + common_hz_4t_4x4_msa(src, src_stride, dst, dst_stride, filter); + } else if (8 == height) { + common_hz_4t_4x8_msa(src, src_stride, dst, dst_stride, filter); + } } static void common_hz_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; - v16u8 tmp0, tmp1; - v8i16 filt, out0, out1, out2, out3; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, filt0, filt1, mask0, mask1; + v16u8 tmp0, tmp1; + v8i16 filt, out0, out1, out2, out3; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); - src -= 1; + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= 1; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src1, src2, src3); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src0, src1, src2, src3); + src += (4 * src_stride); - XORI_B4_128_SB(src0, src1, src2, src3); - HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, - filt1, out0, out1, out2, out3); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - tmp0 = PCKEV_XORI128_UB(out0, out1); - tmp1 = PCKEV_XORI128_UB(out2, out3); - ST8x4_UB(tmp0, tmp1, dst, dst_stride); - dst += (4 * dst_stride); - } + XORI_B4_128_SB(src0, src1, src2, src3); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + tmp0 = PCKEV_XORI128_UB(out0, out1); + tmp1 = PCKEV_XORI128_UB(out2, out3); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); + } } static void common_hz_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7; - v16i8 filt0, filt1, mask0, mask1; - v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; - v16u8 out; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7; + v16i8 filt0, filt1, mask0, mask1; + v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; + v16u8 out; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); - src -= 1; + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= 1; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src0, src2, src4, src6); - LD_SB4(src + 8, src_stride, src1, src3, src5, src7); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src0, src2, src4, src6); + LD_SB4(src + 8, src_stride, src1, src3, src5, src7); + src += (4 * src_stride); - XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); - HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, - filt1, out0, out1, out2, out3); - HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0, - filt1, out4, out5, out6, out7); - SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); - SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0, out1, out2, out3, 7); - SAT_SH4_SH(out4, out5, out6, out7, 7); - out = PCKEV_XORI128_UB(out0, out1); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out2, out3); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out4, out5); - ST_UB(out, dst); - dst += dst_stride; - out = PCKEV_XORI128_UB(out6, out7); - ST_UB(out, dst); - dst += dst_stride; - } + XORI_B8_128_SB(src0, src1, src2, src3, src4, src5, src6, src7); + HORIZ_4TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, filt0, + filt1, out0, out1, out2, out3); + HORIZ_4TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, filt0, + filt1, out4, out5, out6, out7); + SRARI_H4_SH(out0, out1, out2, out3, VP8_FILTER_SHIFT); + SRARI_H4_SH(out4, out5, out6, out7, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0, out1, out2, out3, 7); + SAT_SH4_SH(out4, out5, out6, out7, 7); + out = PCKEV_XORI128_UB(out0, out1); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out2, out3); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out4, out5); + ST_UB(out, dst); + dst += dst_stride; + out = PCKEV_XORI128_UB(out6, out7); + ST_UB(out, dst); + dst += dst_stride; + } } static void common_vt_4t_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5; - v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r; - v16i8 src2110, src4332, filt0, filt1; - v8i16 filt, out10, out32; - v16u8 out; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5; + v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r; + v16i8 src2110, src4332, filt0, filt1; + v8i16 filt, out10, out32; + v16u8 out; - src -= src_stride; + src -= src_stride; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - LD_SB3(src, src_stride, src0, src1, src2); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); + + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + + src2110 = (v16i8)__msa_ilvr_d((v2i64)src21_r, (v2i64)src10_r); + src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB3(src, src_stride, src3, src4, src5); src += (3 * src_stride); + ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r); + src4332 = (v16i8)__msa_ilvr_d((v2i64)src43_r, (v2i64)src32_r); + src4332 = (v16i8)__msa_xori_b((v16u8)src4332, 128); + out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1); - ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); - - src2110 = (v16i8)__msa_ilvr_d((v2i64)src21_r, (v2i64)src10_r); + src2 = LD_SB(src); + src += (src_stride); + ILVR_B2_SB(src5, src4, src2, src5, src54_r, src65_r); + src2110 = (v16i8)__msa_ilvr_d((v2i64)src65_r, (v2i64)src54_r); src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128); - - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB3(src, src_stride, src3, src4, src5); - src += (3 * src_stride); - ILVR_B2_SB(src3, src2, src4, src3, src32_r, src43_r); - src4332 = (v16i8)__msa_ilvr_d((v2i64)src43_r, (v2i64)src32_r); - src4332 = (v16i8)__msa_xori_b((v16u8)src4332, 128); - out10 = FILT_4TAP_DPADD_S_H(src2110, src4332, filt0, filt1); - - src2 = LD_SB(src); - src += (src_stride); - ILVR_B2_SB(src5, src4, src2, src5, src54_r, src65_r); - src2110 = (v16i8)__msa_ilvr_d((v2i64)src65_r, (v2i64)src54_r); - src2110 = (v16i8)__msa_xori_b((v16u8)src2110, 128); - out32 = FILT_4TAP_DPADD_S_H(src4332, src2110, filt0, filt1); - SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); - SAT_SH2_SH(out10, out32, 7); - out = PCKEV_XORI128_UB(out10, out32); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); - } + out32 = FILT_4TAP_DPADD_S_H(src4332, src2110, filt0, filt1); + SRARI_H2_SH(out10, out32, VP8_FILTER_SHIFT); + SAT_SH2_SH(out10, out32, 7); + out = PCKEV_XORI128_UB(out10, out32); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); + } } static void common_vt_4t_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src7, src8, src9, src10; - v16i8 src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1; - v16u8 tmp0, tmp1; - v8i16 filt, out0_r, out1_r, out2_r, out3_r; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src7, src8, src9, src10; + v16i8 src10_r, src72_r, src98_r, src21_r, src87_r, src109_r, filt0, filt1; + v16u8 tmp0, tmp1; + v8i16 filt, out0_r, out1_r, out2_r, out3_r; - src -= src_stride; + src -= src_stride; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + XORI_B3_128_SB(src0, src1, src2); + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src7, src8, src9, src10); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src7, src8, src9, src10); + src += (4 * src_stride); - XORI_B4_128_SB(src7, src8, src9, src10); - ILVR_B4_SB(src7, src2, src8, src7, src9, src8, src10, src9, - src72_r, src87_r, src98_r, src109_r); - out0_r = FILT_4TAP_DPADD_S_H(src10_r, src72_r, filt0, filt1); - out1_r = FILT_4TAP_DPADD_S_H(src21_r, src87_r, filt0, filt1); - out2_r = FILT_4TAP_DPADD_S_H(src72_r, src98_r, filt0, filt1); - out3_r = FILT_4TAP_DPADD_S_H(src87_r, src109_r, filt0, filt1); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); - tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); - ST8x4_UB(tmp0, tmp1, dst, dst_stride); - dst += (4 * dst_stride); + XORI_B4_128_SB(src7, src8, src9, src10); + ILVR_B4_SB(src7, src2, src8, src7, src9, src8, src10, src9, src72_r, + src87_r, src98_r, src109_r); + out0_r = FILT_4TAP_DPADD_S_H(src10_r, src72_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_S_H(src21_r, src87_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_S_H(src72_r, src98_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_S_H(src87_r, src109_r, filt0, filt1); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + tmp0 = PCKEV_XORI128_UB(out0_r, out1_r); + tmp1 = PCKEV_XORI128_UB(out2_r, out3_r); + ST8x4_UB(tmp0, tmp1, dst, dst_stride); + dst += (4 * dst_stride); - src10_r = src98_r; - src21_r = src109_r; - src2 = src10; - } + src10_r = src98_r; + src21_r = src109_r; + src2 = src10; + } } static void common_vt_4t_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, - const int8_t *filter, int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6; - v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l; - v16i8 src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1; - v16u8 tmp0, tmp1, tmp2, tmp3; - v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; + const int8_t *filter, int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 src10_r, src32_r, src54_r, src21_r, src43_r, src65_r, src10_l; + v16i8 src32_l, src54_l, src21_l, src43_l, src65_l, filt0, filt1; + v16u8 tmp0, tmp1, tmp2, tmp3; + v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l; - src -= src_stride; + src -= src_stride; - filt = LD_SH(filter); - SPLATI_H2_SB(filt, 0, 1, filt0, filt1); + filt = LD_SH(filter); + SPLATI_H2_SB(filt, 0, 1, filt0, filt1); - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); - ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l); + XORI_B3_128_SB(src0, src1, src2); + ILVR_B2_SB(src1, src0, src2, src1, src10_r, src21_r); + ILVL_B2_SB(src1, src0, src2, src1, src10_l, src21_l); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src3, src4, src5, src6); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); - XORI_B4_128_SB(src3, src4, src5, src6); - ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, - src32_r, src43_r, src54_r, src65_r); - ILVL_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, - src32_l, src43_l, src54_l, src65_l); - out0_r = FILT_4TAP_DPADD_S_H(src10_r, src32_r, filt0, filt1); - out1_r = FILT_4TAP_DPADD_S_H(src21_r, src43_r, filt0, filt1); - out2_r = FILT_4TAP_DPADD_S_H(src32_r, src54_r, filt0, filt1); - out3_r = FILT_4TAP_DPADD_S_H(src43_r, src65_r, filt0, filt1); - out0_l = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1); - out1_l = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1); - out2_l = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1); - out3_l = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1); - SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); - SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); - SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); - SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); - PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, - out3_r, tmp0, tmp1, tmp2, tmp3); - XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); - ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); - dst += (4 * dst_stride); + XORI_B4_128_SB(src3, src4, src5, src6); + ILVR_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_r, src43_r, + src54_r, src65_r); + ILVL_B4_SB(src3, src2, src4, src3, src5, src4, src6, src5, src32_l, src43_l, + src54_l, src65_l); + out0_r = FILT_4TAP_DPADD_S_H(src10_r, src32_r, filt0, filt1); + out1_r = FILT_4TAP_DPADD_S_H(src21_r, src43_r, filt0, filt1); + out2_r = FILT_4TAP_DPADD_S_H(src32_r, src54_r, filt0, filt1); + out3_r = FILT_4TAP_DPADD_S_H(src43_r, src65_r, filt0, filt1); + out0_l = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1); + out1_l = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1); + out2_l = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1); + out3_l = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1); + SRARI_H4_SH(out0_r, out1_r, out2_r, out3_r, VP8_FILTER_SHIFT); + SRARI_H4_SH(out0_l, out1_l, out2_l, out3_l, VP8_FILTER_SHIFT); + SAT_SH4_SH(out0_r, out1_r, out2_r, out3_r, 7); + SAT_SH4_SH(out0_l, out1_l, out2_l, out3_l, 7); + PCKEV_B4_UB(out0_l, out0_r, out1_l, out1_r, out2_l, out2_r, out3_l, out3_r, + tmp0, tmp1, tmp2, tmp3); + XORI_B4_128_UB(tmp0, tmp1, tmp2, tmp3); + ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); + dst += (4 * dst_stride); - src10_r = src54_r; - src21_r = src65_r; - src10_l = src54_l; - src21_l = src65_l; - src2 = src6; - } + src10_r = src54_r; + src21_r = src65_r; + src10_l = src54_l; + src21_l = src65_l; + src2 = src6; + } } static void common_hv_4ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; - v16u8 mask0, mask1, out; - v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + v16u8 mask0, mask1, out; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); - src -= (1 + 1 * src_stride); + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (1 + 1 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); - hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1); - vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src2, mask0, mask1, filt_hz0, filt_hz1); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - filt = LD_SH(filter_vert); - SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src3, src4, src5, src6); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); - XORI_B2_128_SB(src3, src4); - hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); - hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); - vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); - tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + XORI_B2_128_SB(src3, src4); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); - XORI_B2_128_SB(src5, src6); - hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); - hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); - vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); + XORI_B2_128_SB(src5, src6); + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); - SRARI_H2_SH(tmp0, tmp1, 7); - SAT_SH2_SH(tmp0, tmp1, 7); - out = PCKEV_XORI128_UB(tmp0, tmp1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); - hz_out1 = hz_out5; - vec0 = vec2; - } + hz_out1 = hz_out5; + vec0 = vec2; + } } static void common_hv_4ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; - v16u8 mask0, mask1, out0, out1; - v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3; - v8i16 vec0, vec1, vec2, vec3, vec4; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, filt_hz0, filt_hz1; + v16u8 mask0, mask1, out0, out1; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, tmp2, tmp3; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3; + v8i16 vec0, vec1, vec2, vec3, vec4; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); - src -= (1 + 1 * src_stride); + mask0 = LD_UB(&vp8_mc_filt_mask_arr[0]); + src -= (1 + 1 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); - hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); - hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); - filt = LD_SH(filter_vert); - SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src3, src4, src5, src6); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); - XORI_B4_128_SB(src3, src4, src5, src6); - hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); - vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); - tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + XORI_B4_128_SB(src3, src4, src5, src6); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); - hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); - vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); - tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); + hz_out0 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); + tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); - hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); - vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt_vt0, filt_vt1); + hz_out1 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec4, filt_vt0, filt_vt1); - hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); - ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec0, vec1); - tmp3 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + hz_out2 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec0, vec1); + tmp3 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - out0 = PCKEV_XORI128_UB(tmp0, tmp1); - out1 = PCKEV_XORI128_UB(tmp2, tmp3); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + out0 = PCKEV_XORI128_UB(tmp0, tmp1); + out1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); - vec0 = vec4; - vec2 = vec1; - } + vec0 = vec4; + vec2 = vec1; + } } static void common_hv_4ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - for (multiple8_cnt = 2; multiple8_cnt--;) - { - common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - src += 8; - dst += 8; - } + int32_t height) { + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) { + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } } static void common_hv_6ht_4vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6; - v16i8 filt_hz0, filt_hz1, filt_hz2; - v16u8 res0, res1, mask0, mask1, mask2; - v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 filt_hz0, filt_hz1, filt_hz2; + v16u8 res0, res1, mask0, mask1, mask2; + v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5; - mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); - src -= (2 + 1 * src_stride); + mask0 = LD_UB(&vp8_mc_filt_mask_arr[16]); + src -= (2 + 1 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out1 = HORIZ_6TAP_FILT(src1, src2, mask0, mask1, mask2, filt_hz0, + hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); - filt = LD_SH(filter_vert); - SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); + XORI_B2_128_UB(res0, res1); + ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); + dst += (4 * dst_stride); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src3, src4, src5, src6); - src += (4 * src_stride); - - XORI_B4_128_SB(src3, src4, src5, src6); - hz_out3 = HORIZ_6TAP_FILT(src3, src4, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - hz_out2 = (v8i16)__msa_sldi_b((v16i8)hz_out3, (v16i8)hz_out1, 8); - vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); - tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); - - hz_out5 = HORIZ_6TAP_FILT(src5, src6, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); - vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp1 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); - - SRARI_H2_SH(tmp0, tmp1, 7); - SAT_SH2_SH(tmp0, tmp1, 7); - PCKEV_B2_UB(tmp0, tmp0, tmp1, tmp1, res0, res1); - XORI_B2_128_UB(res0, res1); - ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); - dst += (4 * dst_stride); - - hz_out1 = hz_out5; - vec0 = vec2; - } + hz_out1 = hz_out5; + vec0 = vec2; + } } static void common_hv_6ht_4vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6; - v16i8 filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; - v8i16 filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3; - v8i16 tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3; - v16u8 out0, out1; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6; + v16i8 filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2; + v8i16 filt, filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3; + v8i16 tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3; + v16u8 out0, out1; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); - src -= (2 + src_stride); + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= (2 + src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); + filt = LD_SH(filter_horiz); + SPLATI_H3_SB(filt, 0, 1, 2, filt_hz0, filt_hz1, filt_hz2); - mask1 = mask0 + 2; - mask2 = mask0 + 4; + mask1 = mask0 + 2; + mask2 = mask0 + 4; - LD_SB3(src, src_stride, src0, src1, src2); - src += (3 * src_stride); + LD_SB3(src, src_stride, src0, src1, src2); + src += (3 * src_stride); - XORI_B3_128_SB(src0, src1, src2); - hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, + XORI_B3_128_SB(src0, src1, src2); + hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, filt_hz1, + filt_hz2); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); + + filt = LD_SH(filter_vert); + SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src3, src4, src5, src6); + src += (4 * src_stride); + + XORI_B4_128_SB(src3, src4, src5, src6); + + hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0, + vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); + tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); + + hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0, + vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); + tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); + + hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, filt_hz1, filt_hz2); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out1, hz_out2, vec0, vec2); + vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); + tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1); - filt = LD_SH(filter_vert); - SPLATI_H2_SH(filt, 0, 1, filt_vt0, filt_vt1); + hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, + filt_hz1, filt_hz2); + ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec1, vec2); + tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src3, src4, src5, src6); - src += (4 * src_stride); - - XORI_B4_128_SB(src3, src4, src5, src6); - - hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - vec1 = (v8i16)__msa_ilvev_b((v16i8)hz_out3, (v16i8)hz_out2); - tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); - - hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out3); - tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1); - - hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); - tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1); - - hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0, - filt_hz1, filt_hz2); - ILVEV_B2_SH(hz_out3, hz_out0, hz_out1, hz_out2, vec1, vec2); - tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1); - - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - out0 = PCKEV_XORI128_UB(tmp0, tmp1); - out1 = PCKEV_XORI128_UB(tmp2, tmp3); - ST8x4_UB(out0, out1, dst, dst_stride); - dst += (4 * dst_stride); - } + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + out0 = PCKEV_XORI128_UB(tmp0, tmp1); + out1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(out0, out1, dst, dst_stride); + dst += (4 * dst_stride); + } } static void common_hv_6ht_4vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - for (multiple8_cnt = 2; multiple8_cnt--;) - { - common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - src += 8; - dst += 8; - } + int32_t height) { + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) { + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } } static void common_hv_4ht_6vt_4w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 filt_hz0, filt_hz1, mask0, mask1; - v16u8 out; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, tmp0, tmp1, out0, out1, out2, out3; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, mask0, mask1; + v16u8 out; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, tmp0, tmp1, out0, out1, out2, out3; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); + mask0 = LD_SB(&vp8_mc_filt_mask_arr[16]); - src -= (1 + 2 * src_stride); + src -= (1 + 2 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); - hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1); - hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); - hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src4, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = (v8i16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - filt = LD_SH(filter_vert); - SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src5, src6, src7, src8); - XORI_B4_128_SB(src5, src6, src7, src8); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src5, src6, src7, src8); + XORI_B4_128_SB(src5, src6, src7, src8); + src += (4 * src_stride); - hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); - hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); - out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + hz_out5 = HORIZ_4TAP_FILT(src5, src6, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = (v8i16)__msa_sldi_b((v16i8)hz_out5, (v16i8)hz_out3, 8); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); - hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1); - hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); - out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); - tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); + hz_out7 = HORIZ_4TAP_FILT(src7, src8, mask0, mask1, filt_hz0, filt_hz1); + hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); + out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp1 = DPADD_SH3_SH(out1, out2, out3, filt_vt0, filt_vt1, filt_vt2); - SRARI_H2_SH(tmp0, tmp1, 7); - SAT_SH2_SH(tmp0, tmp1, 7); - out = PCKEV_XORI128_UB(tmp0, tmp1); - ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); - dst += (4 * dst_stride); + SRARI_H2_SH(tmp0, tmp1, 7); + SAT_SH2_SH(tmp0, tmp1, 7); + out = PCKEV_XORI128_UB(tmp0, tmp1); + ST4x4_UB(out, out, 0, 1, 2, 3, dst, dst_stride); + dst += (4 * dst_stride); - hz_out3 = hz_out7; - out0 = out2; - out1 = out3; - } + hz_out3 = hz_out7; + out0 = out2; + out1 = out3; + } } static void common_hv_4ht_6vt_8w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - uint32_t loop_cnt; - v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; - v16i8 filt_hz0, filt_hz1, mask0, mask1; - v8i16 filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3; - v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; - v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; - v16u8 vec0, vec1; + int32_t height) { + uint32_t loop_cnt; + v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; + v16i8 filt_hz0, filt_hz1, mask0, mask1; + v8i16 filt, filt_vt0, filt_vt1, filt_vt2, tmp0, tmp1, tmp2, tmp3; + v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; + v8i16 hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7; + v16u8 vec0, vec1; - mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); - src -= (1 + 2 * src_stride); + mask0 = LD_SB(&vp8_mc_filt_mask_arr[0]); + src -= (1 + 2 * src_stride); - filt = LD_SH(filter_horiz); - SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); + filt = LD_SH(filter_horiz); + SPLATI_H2_SB(filt, 0, 1, filt_hz0, filt_hz1); - mask1 = mask0 + 2; + mask1 = mask0 + 2; - LD_SB5(src, src_stride, src0, src1, src2, src3, src4); - src += (5 * src_stride); + LD_SB5(src, src_stride, src0, src1, src2, src3, src4); + src += (5 * src_stride); - XORI_B5_128_SB(src0, src1, src2, src3, src4); - hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); - hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); - hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); - hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); - hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); - ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); - ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); + XORI_B5_128_SB(src0, src1, src2, src3, src4); + hz_out0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1); + hz_out1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1); + hz_out2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1); + hz_out3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1); + hz_out4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1); + ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, out0, out1); + ILVEV_B2_SH(hz_out1, hz_out2, hz_out3, hz_out4, out3, out4); - filt = LD_SH(filter_vert); - SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); + filt = LD_SH(filter_vert); + SPLATI_H3_SH(filt, 0, 1, 2, filt_vt0, filt_vt1, filt_vt2); - for (loop_cnt = (height >> 2); loop_cnt--;) - { - LD_SB4(src, src_stride, src5, src6, src7, src8); - src += (4 * src_stride); + for (loop_cnt = (height >> 2); loop_cnt--;) { + LD_SB4(src, src_stride, src5, src6, src7, src8); + src += (4 * src_stride); - XORI_B4_128_SB(src5, src6, src7, src8); + XORI_B4_128_SB(src5, src6, src7, src8); - hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); - out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); - tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); + hz_out5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1); + out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); + tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2); - hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); - out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); - tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); + hz_out6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1); + out5 = (v8i16)__msa_ilvev_b((v16i8)hz_out6, (v16i8)hz_out5); + tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2); - hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1); - out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); - tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2); + hz_out7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1); + out6 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); + tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2); - hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1); - out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); - tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2); + hz_out8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1); + out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); + tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2); - SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); - SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - vec0 = PCKEV_XORI128_UB(tmp0, tmp1); - vec1 = PCKEV_XORI128_UB(tmp2, tmp3); - ST8x4_UB(vec0, vec1, dst, dst_stride); - dst += (4 * dst_stride); + SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, 7); + SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); + vec0 = PCKEV_XORI128_UB(tmp0, tmp1); + vec1 = PCKEV_XORI128_UB(tmp2, tmp3); + ST8x4_UB(vec0, vec1, dst, dst_stride); + dst += (4 * dst_stride); - hz_out4 = hz_out8; - out0 = out2; - out1 = out6; - out3 = out5; - out4 = out7; - } + hz_out4 = hz_out8; + out0 = out2; + out1 = out6; + out3 = out5; + out4 = out7; + } } static void common_hv_4ht_6vt_16w_msa(uint8_t *RESTRICT src, int32_t src_stride, uint8_t *RESTRICT dst, int32_t dst_stride, const int8_t *filter_horiz, const int8_t *filter_vert, - int32_t height) -{ - int32_t multiple8_cnt; - for (multiple8_cnt = 2; multiple8_cnt--;) - { - common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, - filter_vert, height); - src += 8; - dst += 8; - } + int32_t height) { + int32_t multiple8_cnt; + for (multiple8_cnt = 2; multiple8_cnt--;) { + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, filter_horiz, + filter_vert, height); + src += 8; + dst += 8; + } } void vp8_sixtap_predict4x4_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - switch (xoffset) - { - case 2: - case 4: - case 6: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_6ht_6vt_4w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_6ht_4vt_4w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter + 1, 4); - break; - } - break; - - case 1: - case 3: - case 5: - case 7: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_4ht_6vt_4w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_4ht_4vt_4w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter + 1, 4); - break; - } - break; - } - } - else - { - switch (yoffset) - { - case 2: - case 4: - case 6: - common_vt_6t_4w_msa(src, src_stride, dst, dst_stride, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_vt_4t_4w_msa(src, src_stride, dst, dst_stride, - v_filter + 1, 4); - break; - } - } - } - else - { - switch (xoffset) - { - case 0: - { - uint32_t tp0, tp1, tp2, tp3; - - LW4(src, src_stride, tp0, tp1, tp2, tp3); - SW4(tp0, tp1, tp2, tp3, dst, dst_stride); - break; - } + if (yoffset) { + if (xoffset) { + switch (xoffset) { + case 2: + case 4: + case 6: + switch (yoffset) { case 2: case 4: case 6: - common_hz_6t_4w_msa(src, src_stride, dst, dst_stride, - h_filter, 4); - break; + common_hv_6ht_6vt_4w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter, 4); + break; case 1: case 3: case 5: case 7: - common_hz_4t_4w_msa(src, src_stride, dst, dst_stride, - h_filter + 1, 4); - break; - } + common_hv_6ht_4vt_4w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 4); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_4w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_4w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 4); + break; + } + break; + } + } else { + switch (yoffset) { + case 2: + case 4: + case 6: + common_vt_6t_4w_msa(src, src_stride, dst, dst_stride, v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_4w_msa(src, src_stride, dst, dst_stride, v_filter + 1, + 4); + break; + } } + } else { + switch (xoffset) { + case 0: { + uint32_t tp0, tp1, tp2, tp3; + + LW4(src, src_stride, tp0, tp1, tp2, tp3); + SW4(tp0, tp1, tp2, tp3, dst, dst_stride); + break; + } + case 2: + case 4: + case 6: + common_hz_6t_4w_msa(src, src_stride, dst, dst_stride, h_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_4w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 4); + break; + } + } } void vp8_sixtap_predict8x4_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - switch (xoffset) - { - case 2: - case 4: - case 6: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_6ht_6vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_6ht_4vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter + 1, 4); - break; - } - break; - - case 1: - case 3: - case 5: - case 7: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_4ht_6vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_4ht_4vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter + 1, 4); - break; - } - break; - } - } - else - { - switch (yoffset) - { - case 2: - case 4: - case 6: - common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, - v_filter, 4); - break; - - case 1: - case 3: - case 5: - case 7: - common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, - v_filter + 1, 4); - break; - } - } - } - else - { - switch (xoffset) - { - case 0: - vp8_copy_mem8x4(src, src_stride, dst, dst_stride); - break; + if (yoffset) { + if (xoffset) { + switch (xoffset) { + case 2: + case 4: + case 6: + switch (yoffset) { case 2: case 4: case 6: - common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, - h_filter, 4); - break; + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter, 4); + break; case 1: case 3: case 5: case 7: - common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, - h_filter + 1, 4); - break; - } + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 4); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 4); + break; + } + break; + } + } else { + switch (yoffset) { + case 2: + case 4: + case 6: + common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, v_filter + 1, + 4); + break; + } } + } else { + switch (xoffset) { + case 0: vp8_copy_mem8x4(src, src_stride, dst, dst_stride); break; + case 2: + case 4: + case 6: + common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 4); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 4); + break; + } + } } void vp8_sixtap_predict8x8_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - switch (xoffset) - { - case 2: - case 4: - case 6: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_6ht_6vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter, 8); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_6ht_4vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter + 1, 8); - break; - } - break; - - case 1: - case 3: - case 5: - case 7: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_4ht_6vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter, 8); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_4ht_4vt_8w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter + 1, 8); - break; - } - break; - } - } - else - { - switch (yoffset) - { - case 2: - case 4: - case 6: - common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, - v_filter, 8); - break; - - case 1: - case 3: - case 5: - case 7: - common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, - v_filter + 1, 8); - break; - } - } - } - else - { - switch (xoffset) - { - case 0: - vp8_copy_mem8x8(src, src_stride, dst, dst_stride); - break; + if (yoffset) { + if (xoffset) { + switch (xoffset) { + case 2: + case 4: + case 6: + switch (yoffset) { case 2: case 4: case 6: - common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, - 8); - break; + common_hv_6ht_6vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter, 8); + break; case 1: case 3: case 5: case 7: - common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, - h_filter + 1, 8); - break; - } + common_hv_6ht_4vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 8); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_8w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 8); + break; + } + break; + } + } else { + switch (yoffset) { + case 2: + case 4: + case 6: + common_vt_6t_8w_msa(src, src_stride, dst, dst_stride, v_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_8w_msa(src, src_stride, dst, dst_stride, v_filter + 1, + 8); + break; + } } + } else { + switch (xoffset) { + case 0: vp8_copy_mem8x8(src, src_stride, dst, dst_stride); break; + case 2: + case 4: + case 6: + common_hz_6t_8w_msa(src, src_stride, dst, dst_stride, h_filter, 8); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_8w_msa(src, src_stride, dst, dst_stride, h_filter + 1, 8); + break; + } + } } void vp8_sixtap_predict16x16_msa(uint8_t *RESTRICT src, int32_t src_stride, int32_t xoffset, int32_t yoffset, - uint8_t *RESTRICT dst, int32_t dst_stride) -{ - const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; - const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; + uint8_t *RESTRICT dst, int32_t dst_stride) { + const int8_t *h_filter = vp8_subpel_filters_msa[xoffset - 1]; + const int8_t *v_filter = vp8_subpel_filters_msa[yoffset - 1]; - if (yoffset) - { - if (xoffset) - { - switch (xoffset) - { - case 2: - case 4: - case 6: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_6ht_6vt_16w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter, 16); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_6ht_4vt_16w_msa(src, src_stride, dst, - dst_stride, h_filter, - v_filter + 1, 16); - break; - } - break; - - case 1: - case 3: - case 5: - case 7: - switch (yoffset) - { - case 2: - case 4: - case 6: - common_hv_4ht_6vt_16w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter, 16); - break; - - case 1: - case 3: - case 5: - case 7: - common_hv_4ht_4vt_16w_msa(src, src_stride, dst, - dst_stride, h_filter + 1, - v_filter + 1, 16); - break; - } - break; - } - } - else - { - switch (yoffset) - { - case 2: - case 4: - case 6: - common_vt_6t_16w_msa(src, src_stride, dst, dst_stride, - v_filter, 16); - break; - - case 1: - case 3: - case 5: - case 7: - common_vt_4t_16w_msa(src, src_stride, dst, dst_stride, - v_filter + 1, 16); - break; - } - } - } - else - { - switch (xoffset) - { - case 0: - vp8_copy_mem16x16(src, src_stride, dst, dst_stride); - break; + if (yoffset) { + if (xoffset) { + switch (xoffset) { + case 2: + case 4: + case 6: + switch (yoffset) { case 2: case 4: case 6: - common_hz_6t_16w_msa(src, src_stride, dst, dst_stride, - h_filter, 16); - break; + common_hv_6ht_6vt_16w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter, 16); + break; case 1: case 3: case 5: case 7: - common_hz_4t_16w_msa(src, src_stride, dst, dst_stride, - h_filter + 1, 16); - break; - } + common_hv_6ht_4vt_16w_msa(src, src_stride, dst, dst_stride, + h_filter, v_filter + 1, 16); + break; + } + break; + + case 1: + case 3: + case 5: + case 7: + switch (yoffset) { + case 2: + case 4: + case 6: + common_hv_4ht_6vt_16w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_hv_4ht_4vt_16w_msa(src, src_stride, dst, dst_stride, + h_filter + 1, v_filter + 1, 16); + break; + } + break; + } + } else { + switch (yoffset) { + case 2: + case 4: + case 6: + common_vt_6t_16w_msa(src, src_stride, dst, dst_stride, v_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_vt_4t_16w_msa(src, src_stride, dst, dst_stride, v_filter + 1, + 16); + break; + } } + } else { + switch (xoffset) { + case 0: vp8_copy_mem16x16(src, src_stride, dst, dst_stride); break; + case 2: + case 4: + case 6: + common_hz_6t_16w_msa(src, src_stride, dst, dst_stride, h_filter, 16); + break; + + case 1: + case 3: + case 5: + case 7: + common_hz_4t_16w_msa(src, src_stride, dst, dst_stride, h_filter + 1, + 16); + break; + } + } } diff --git a/libs/libvpx/vp8/common/mips/msa/vp8_macros_msa.h b/libs/libvpx/vp8/common/mips/msa/vp8_macros_msa.h index 27d5929956..65905f6c02 100644 --- a/libs/libvpx/vp8/common/mips/msa/vp8_macros_msa.h +++ b/libs/libvpx/vp8/common/mips/msa/vp8_macros_msa.h @@ -40,177 +40,159 @@ #define ST_SW(...) ST_W(v4i32, __VA_ARGS__) #if (__mips_isa_rev >= 6) -#define LW(psrc) \ -({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val_m; \ - \ - asm volatile ( \ - "lw %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LW(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val_m; \ + \ + asm volatile("lw %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #if (__mips == 64) -#define LD(psrc) \ -({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint64_t val_m = 0; \ - \ - asm volatile ( \ - "ld %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint64_t val_m = 0; \ + \ + asm volatile("ld %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #else // !(__mips == 64) -#define LD(psrc) \ -({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val0_m, val1_m; \ - uint64_t val_m = 0; \ - \ - val0_m = LW(psrc_m); \ - val1_m = LW(psrc_m + 4); \ - \ - val_m = (uint64_t)(val1_m); \ - val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ - val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val0_m, val1_m; \ + uint64_t val_m = 0; \ + \ + val0_m = LW(psrc_m); \ + val1_m = LW(psrc_m + 4); \ + \ + val_m = (uint64_t)(val1_m); \ + val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ + val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ + \ + val_m; \ + }) #endif // (__mips == 64) -#define SH(val, pdst) \ -{ \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint16_t val_m = (val); \ - \ - asm volatile ( \ - "sh %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SH(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint16_t val_m = (val); \ + \ + asm volatile("sh %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SW(val, pdst) \ -{ \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint32_t val_m = (val); \ - \ - asm volatile ( \ - "sw %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SW(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint32_t val_m = (val); \ + \ + asm volatile("sw %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SD(val, pdst) \ -{ \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint64_t val_m = (val); \ - \ - asm volatile ( \ - "sd %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SD(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint64_t val_m = (val); \ + \ + asm volatile("sd %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } #else // !(__mips_isa_rev >= 6) -#define LW(psrc) \ -({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val_m; \ - \ - asm volatile ( \ - "ulw %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LW(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val_m; \ + \ + asm volatile("ulw %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #if (__mips == 64) -#define LD(psrc) \ -({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint64_t val_m = 0; \ - \ - asm volatile ( \ - "uld %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint64_t val_m = 0; \ + \ + asm volatile("uld %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #else // !(__mips == 64) -#define LD(psrc) \ -({ \ - const uint8_t *psrc_m1 = (const uint8_t *)(psrc); \ - uint32_t val0_m, val1_m; \ - uint64_t val_m = 0; \ - \ - val0_m = LW(psrc_m1); \ - val1_m = LW(psrc_m1 + 4); \ - \ - val_m = (uint64_t)(val1_m); \ - val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ - val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m1 = (const uint8_t *)(psrc); \ + uint32_t val0_m, val1_m; \ + uint64_t val_m = 0; \ + \ + val0_m = LW(psrc_m1); \ + val1_m = LW(psrc_m1 + 4); \ + \ + val_m = (uint64_t)(val1_m); \ + val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ + val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ + \ + val_m; \ + }) #endif // (__mips == 64) -#define SH(val, pdst) \ -{ \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint16_t val_m = (val); \ - \ - asm volatile ( \ - "ush %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SH(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint16_t val_m = (val); \ + \ + asm volatile("ush %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SW(val, pdst) \ -{ \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint32_t val_m = (val); \ - \ - asm volatile ( \ - "usw %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SW(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint32_t val_m = (val); \ + \ + asm volatile("usw %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SD(val, pdst) \ -{ \ - uint8_t *pdst_m1 = (uint8_t *)(pdst); \ - uint32_t val0_m, val1_m; \ - \ - val0_m = (uint32_t)((val) & 0x00000000FFFFFFFF); \ - val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \ - \ - SW(val0_m, pdst_m1); \ - SW(val1_m, pdst_m1 + 4); \ -} +#define SD(val, pdst) \ + { \ + uint8_t *pdst_m1 = (uint8_t *)(pdst); \ + uint32_t val0_m, val1_m; \ + \ + val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \ + val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \ + \ + SW(val0_m, pdst_m1); \ + SW(val1_m, pdst_m1 + 4); \ + } #endif // (__mips_isa_rev >= 6) /* Description : Load 4 words with stride @@ -221,13 +203,13 @@ Load word in 'out2' from (psrc + 2 * stride) Load word in 'out3' from (psrc + 3 * stride) */ -#define LW4(psrc, stride, out0, out1, out2, out3) \ -{ \ - out0 = LW((psrc)); \ - out1 = LW((psrc) + stride); \ - out2 = LW((psrc) + 2 * stride); \ - out3 = LW((psrc) + 3 * stride); \ -} +#define LW4(psrc, stride, out0, out1, out2, out3) \ + { \ + out0 = LW((psrc)); \ + out1 = LW((psrc) + stride); \ + out2 = LW((psrc) + 2 * stride); \ + out3 = LW((psrc) + 3 * stride); \ + } /* Description : Load double words with stride Arguments : Inputs - psrc, stride @@ -235,16 +217,16 @@ Details : Load double word in 'out0' from (psrc) Load double word in 'out1' from (psrc + stride) */ -#define LD2(psrc, stride, out0, out1) \ -{ \ - out0 = LD((psrc)); \ - out1 = LD((psrc) + stride); \ -} -#define LD4(psrc, stride, out0, out1, out2, out3) \ -{ \ - LD2((psrc), stride, out0, out1); \ - LD2((psrc) + 2 * stride, stride, out2, out3); \ -} +#define LD2(psrc, stride, out0, out1) \ + { \ + out0 = LD((psrc)); \ + out1 = LD((psrc) + stride); \ + } +#define LD4(psrc, stride, out0, out1, out2, out3) \ + { \ + LD2((psrc), stride, out0, out1); \ + LD2((psrc) + 2 * stride, stride, out2, out3); \ + } /* Description : Store 4 words with stride Arguments : Inputs - in0, in1, in2, in3, pdst, stride @@ -253,13 +235,13 @@ Store word from 'in2' to (pdst + 2 * stride) Store word from 'in3' to (pdst + 3 * stride) */ -#define SW4(in0, in1, in2, in3, pdst, stride) \ -{ \ - SW(in0, (pdst)); \ - SW(in1, (pdst) + stride); \ - SW(in2, (pdst) + 2 * stride); \ - SW(in3, (pdst) + 3 * stride); \ -} +#define SW4(in0, in1, in2, in3, pdst, stride) \ + { \ + SW(in0, (pdst)); \ + SW(in1, (pdst) + stride); \ + SW(in2, (pdst) + 2 * stride); \ + SW(in3, (pdst) + 3 * stride); \ + } /* Description : Store 4 double words with stride Arguments : Inputs - in0, in1, in2, in3, pdst, stride @@ -268,13 +250,13 @@ Store double word from 'in2' to (pdst + 2 * stride) Store double word from 'in3' to (pdst + 3 * stride) */ -#define SD4(in0, in1, in2, in3, pdst, stride) \ -{ \ - SD(in0, (pdst)); \ - SD(in1, (pdst) + stride); \ - SD(in2, (pdst) + 2 * stride); \ - SD(in3, (pdst) + 3 * stride); \ -} +#define SD4(in0, in1, in2, in3, pdst, stride) \ + { \ + SD(in0, (pdst)); \ + SD(in1, (pdst) + stride); \ + SD(in2, (pdst) + 2 * stride); \ + SD(in3, (pdst) + 3 * stride); \ + } /* Description : Load vectors with 16 byte elements with stride Arguments : Inputs - psrc, stride @@ -283,44 +265,44 @@ Details : Load 16 byte elements in 'out0' from (psrc) Load 16 byte elements in 'out1' from (psrc + stride) */ -#define LD_B2(RTYPE, psrc, stride, out0, out1) \ -{ \ - out0 = LD_B(RTYPE, (psrc)); \ - out1 = LD_B(RTYPE, (psrc) + stride); \ -} +#define LD_B2(RTYPE, psrc, stride, out0, out1) \ + { \ + out0 = LD_B(RTYPE, (psrc)); \ + out1 = LD_B(RTYPE, (psrc) + stride); \ + } #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__) #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) -#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \ -{ \ - LD_B2(RTYPE, (psrc), stride, out0, out1); \ - out2 = LD_B(RTYPE, (psrc) + 2 * stride); \ -} +#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \ + { \ + LD_B2(RTYPE, (psrc), stride, out0, out1); \ + out2 = LD_B(RTYPE, (psrc) + 2 * stride); \ + } #define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__) #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__) -#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \ -{ \ - LD_B2(RTYPE, (psrc), stride, out0, out1); \ - LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \ -} +#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \ + { \ + LD_B2(RTYPE, (psrc), stride, out0, out1); \ + LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ + } #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__) #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) -#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \ -{ \ - LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ - out4 = LD_B(RTYPE, (psrc) + 4 * stride); \ -} +#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \ + { \ + LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + out4 = LD_B(RTYPE, (psrc) + 4 * stride); \ + } #define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__) #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) -#define LD_B8(RTYPE, psrc, stride, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ - LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ -} +#define LD_B8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \ + out7) \ + { \ + LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ + } #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) @@ -330,18 +312,18 @@ Details : Load 8 halfword elements in 'out0' from (psrc) Load 8 halfword elements in 'out1' from (psrc + stride) */ -#define LD_H2(RTYPE, psrc, stride, out0, out1) \ -{ \ - out0 = LD_H(RTYPE, (psrc)); \ - out1 = LD_H(RTYPE, (psrc) + (stride)); \ -} +#define LD_H2(RTYPE, psrc, stride, out0, out1) \ + { \ + out0 = LD_H(RTYPE, (psrc)); \ + out1 = LD_H(RTYPE, (psrc) + (stride)); \ + } #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__) -#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \ -{ \ - LD_H2(RTYPE, (psrc), stride, out0, out1); \ - LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ -} +#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \ + { \ + LD_H2(RTYPE, (psrc), stride, out0, out1); \ + LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ + } #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__) /* Description : Load 2 vectors of signed word elements with stride @@ -349,38 +331,37 @@ Outputs - out0, out1 Return Type - signed word */ -#define LD_SW2(psrc, stride, out0, out1) \ -{ \ - out0 = LD_SW((psrc)); \ - out1 = LD_SW((psrc) + stride); \ -} +#define LD_SW2(psrc, stride, out0, out1) \ + { \ + out0 = LD_SW((psrc)); \ + out1 = LD_SW((psrc) + stride); \ + } /* Description : Store vectors of 16 byte elements with stride Arguments : Inputs - in0, in1, pdst, stride Details : Store 16 byte elements from 'in0' to (pdst) Store 16 byte elements from 'in1' to (pdst + stride) */ -#define ST_B2(RTYPE, in0, in1, pdst, stride) \ -{ \ - ST_B(RTYPE, in0, (pdst)); \ - ST_B(RTYPE, in1, (pdst) + stride); \ -} +#define ST_B2(RTYPE, in0, in1, pdst, stride) \ + { \ + ST_B(RTYPE, in0, (pdst)); \ + ST_B(RTYPE, in1, (pdst) + stride); \ + } #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__) -#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ -{ \ - ST_B2(RTYPE, in0, in1, (pdst), stride); \ - ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ -} +#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ + { \ + ST_B2(RTYPE, in0, in1, (pdst), stride); \ + ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ + } #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__) #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__) -#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - pdst, stride) \ -{ \ - ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ - ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ -} +#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ + { \ + ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ + ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ + } #define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__) /* Description : Store vectors of 8 halfword elements with stride @@ -388,11 +369,11 @@ Details : Store 8 halfword elements from 'in0' to (pdst) Store 8 halfword elements from 'in1' to (pdst + stride) */ -#define ST_H2(RTYPE, in0, in1, pdst, stride) \ -{ \ - ST_H(RTYPE, in0, (pdst)); \ - ST_H(RTYPE, in1, (pdst) + stride); \ -} +#define ST_H2(RTYPE, in0, in1, pdst, stride) \ + { \ + ST_H(RTYPE, in0, (pdst)); \ + ST_H(RTYPE, in1, (pdst) + stride); \ + } #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__) /* Description : Store vectors of word elements with stride @@ -400,11 +381,11 @@ Details : Store 4 word elements from 'in0' to (pdst) Store 4 word elements from 'in1' to (pdst + stride) */ -#define ST_SW2(in0, in1, pdst, stride) \ -{ \ - ST_SW(in0, (pdst)); \ - ST_SW(in1, (pdst) + stride); \ -} +#define ST_SW2(in0, in1, pdst, stride) \ + { \ + ST_SW(in0, (pdst)); \ + ST_SW(in1, (pdst) + stride); \ + } /* Description : Store 2x4 byte block to destination memory from input vector Arguments : Inputs - in, stidx, pdst, stride @@ -417,21 +398,21 @@ Index 'stidx+3' halfword element from 'in' vector is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST2x4_UB(in, stidx, pdst, stride) \ -{ \ - uint16_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \ - out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \ - out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \ - out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \ - \ - SH(out0_m, pblk_2x4_m); \ - SH(out1_m, pblk_2x4_m + stride); \ - SH(out2_m, pblk_2x4_m + 2 * stride); \ - SH(out3_m, pblk_2x4_m + 3 * stride); \ -} +#define ST2x4_UB(in, stidx, pdst, stride) \ + { \ + uint16_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \ + out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \ + out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \ + out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \ + \ + SH(out0_m, pblk_2x4_m); \ + SH(out1_m, pblk_2x4_m + stride); \ + SH(out2_m, pblk_2x4_m + 2 * stride); \ + SH(out3_m, pblk_2x4_m + 3 * stride); \ + } /* Description : Store 4x4 byte block to destination memory from input vector Arguments : Inputs - in0, in1, pdst, stride @@ -444,38 +425,38 @@ 'Idx3' word element from input vector 'in0' is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \ -{ \ - uint32_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_w((v4i32)in0, idx0); \ - out1_m = __msa_copy_u_w((v4i32)in0, idx1); \ - out2_m = __msa_copy_u_w((v4i32)in1, idx2); \ - out3_m = __msa_copy_u_w((v4i32)in1, idx3); \ - \ - SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \ -} -#define ST4x8_UB(in0, in1, pdst, stride) \ -{ \ - uint8_t *pblk_4x8 = (uint8_t *)(pdst); \ - \ - ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \ - ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ -} +#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \ + { \ + uint32_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_w((v4i32)in0, idx0); \ + out1_m = __msa_copy_u_w((v4i32)in0, idx1); \ + out2_m = __msa_copy_u_w((v4i32)in1, idx2); \ + out3_m = __msa_copy_u_w((v4i32)in1, idx3); \ + \ + SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \ + } +#define ST4x8_UB(in0, in1, pdst, stride) \ + { \ + uint8_t *pblk_4x8 = (uint8_t *)(pdst); \ + \ + ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \ + ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ + } /* Description : Store 8x1 byte block to destination memory from input vector Arguments : Inputs - in, pdst Details : Index 0 double word element from 'in' vector is copied to the GP register and stored to (pdst) */ -#define ST8x1_UB(in, pdst) \ -{ \ - uint64_t out0_m; \ - \ - out0_m = __msa_copy_u_d((v2i64)in, 0); \ - SD(out0_m, pdst); \ -} +#define ST8x1_UB(in, pdst) \ + { \ + uint64_t out0_m; \ + \ + out0_m = __msa_copy_u_d((v2i64)in, 0); \ + SD(out0_m, pdst); \ + } /* Description : Store 8x2 byte block to destination memory from input vector Arguments : Inputs - in, pdst, stride @@ -484,17 +465,17 @@ Index 1 double word element from 'in' vector is copied to the GP register and stored to (pdst + stride) */ -#define ST8x2_UB(in, pdst, stride) \ -{ \ - uint64_t out0_m, out1_m; \ - uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_d((v2i64)in, 0); \ - out1_m = __msa_copy_u_d((v2i64)in, 1); \ - \ - SD(out0_m, pblk_8x2_m); \ - SD(out1_m, pblk_8x2_m + stride); \ -} +#define ST8x2_UB(in, pdst, stride) \ + { \ + uint64_t out0_m, out1_m; \ + uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_d((v2i64)in, 0); \ + out1_m = __msa_copy_u_d((v2i64)in, 1); \ + \ + SD(out0_m, pblk_8x2_m); \ + SD(out1_m, pblk_8x2_m + stride); \ + } /* Description : Store 8x4 byte block to destination memory from input vectors @@ -508,18 +489,18 @@ Index 1 double word element from 'in1' vector is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST8x4_UB(in0, in1, pdst, stride) \ -{ \ - uint64_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_d((v2i64)in0, 0); \ - out1_m = __msa_copy_u_d((v2i64)in0, 1); \ - out2_m = __msa_copy_u_d((v2i64)in1, 0); \ - out3_m = __msa_copy_u_d((v2i64)in1, 1); \ - \ - SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \ -} +#define ST8x4_UB(in0, in1, pdst, stride) \ + { \ + uint64_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_d((v2i64)in0, 0); \ + out1_m = __msa_copy_u_d((v2i64)in0, 1); \ + out2_m = __msa_copy_u_d((v2i64)in1, 0); \ + out3_m = __msa_copy_u_d((v2i64)in1, 1); \ + \ + SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \ + } /* Description : Immediate number of elements to slide with zero Arguments : Inputs - in0, in1, slide_val @@ -528,13 +509,13 @@ Details : Byte elements from 'zero_m' vector are slid into 'in0' by value specified in the 'slide_val' */ -#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \ -{ \ - v16i8 zero_m = { 0 }; \ - \ - out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ - out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ -} +#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \ + { \ + v16i8 zero_m = { 0 }; \ + \ + out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ + out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ + } #define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__) /* Description : Immediate number of elements to slide @@ -544,18 +525,18 @@ Details : Byte elements from 'in0_0' vector are slid into 'in1_0' by value specified in the 'slide_val' */ -#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \ -{ \ - out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \ - out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \ -} +#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \ + { \ + out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \ + out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \ + } -#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, \ - out0, out1, out2, slide_val) \ -{ \ - SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val); \ - out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \ -} +#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \ + out2, slide_val) \ + { \ + SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val); \ + out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \ + } #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__) /* Description : Shuffle byte vector elements as per mask vector @@ -565,21 +546,21 @@ Details : Byte elements from 'in0' & 'in1' are copied selectively to 'out0' as per control vector 'mask0' */ -#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \ - out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ -} +#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ + } #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__) #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__) #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__) -#define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ - out0, out1, out2) \ -{ \ - VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ - out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \ -} +#define VSHF_B3(RTYPE, in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \ + out0, out1, out2) \ + { \ + VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1); \ + out2 = (RTYPE)__msa_vshf_b((v16i8)mask2, (v16i8)in5, (v16i8)in4); \ + } #define VSHF_B3_SB(...) VSHF_B3(v16i8, __VA_ARGS__) /* Description : Shuffle halfword vector elements as per mask vector @@ -589,11 +570,11 @@ Details : halfword elements from 'in0' & 'in1' are copied selectively to 'out0' as per control vector 'mask0' */ -#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \ - out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \ -} +#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \ + } #define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__) /* Description : Dot product of byte vector elements @@ -606,20 +587,19 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \ - out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \ -} +#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \ + out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \ + } #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__) -#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, \ - out0, out1, out2, out3) \ -{ \ - DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__) /* Description : Dot product of byte vector elements @@ -632,19 +612,19 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ -} +#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ + } #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__) -#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ -{ \ - DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__) /* Description : Dot product of halfword vector elements @@ -657,19 +637,18 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \ -} +#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \ + } -#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, \ - out0, out1, out2, out3) \ -{ \ - DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__) /* Description : Dot product of word vector elements @@ -682,11 +661,11 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \ -} +#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \ + } #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__) /* Description : Dot product & addition of byte vector elements @@ -699,19 +678,19 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \ - out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \ -} +#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \ + } #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__) -#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ -{ \ - DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__) /* Description : Dot product & addition of halfword vector elements @@ -724,19 +703,19 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \ - out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \ -} +#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \ + } #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__) -#define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) \ -{ \ - DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DPADD_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DPADD_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DPADD_SH4_SW(...) DPADD_SH4(v4i32, __VA_ARGS__) /* Description : Dot product & addition of double word vector elements @@ -749,11 +728,11 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \ - out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \ -} +#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \ + out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \ + } #define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__) /* Description : Clips all signed halfword elements of input vector @@ -762,25 +741,25 @@ Output - out_m Return Type - signed halfword */ -#define CLIP_SH_0_255(in) \ -({ \ - v8i16 max_m = __msa_ldi_h(255); \ - v8i16 out_m; \ - \ - out_m = __msa_maxi_s_h((v8i16)in, 0); \ - out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \ - out_m; \ -}) -#define CLIP_SH2_0_255(in0, in1) \ -{ \ - in0 = CLIP_SH_0_255(in0); \ - in1 = CLIP_SH_0_255(in1); \ -} -#define CLIP_SH4_0_255(in0, in1, in2, in3) \ -{ \ - CLIP_SH2_0_255(in0, in1); \ - CLIP_SH2_0_255(in2, in3); \ -} +#define CLIP_SH_0_255(in) \ + ({ \ + v8i16 max_m = __msa_ldi_h(255); \ + v8i16 out_m; \ + \ + out_m = __msa_maxi_s_h((v8i16)in, 0); \ + out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \ + out_m; \ + }) +#define CLIP_SH2_0_255(in0, in1) \ + { \ + in0 = CLIP_SH_0_255(in0); \ + in1 = CLIP_SH_0_255(in1); \ + } +#define CLIP_SH4_0_255(in0, in1, in2, in3) \ + { \ + CLIP_SH2_0_255(in0, in1); \ + CLIP_SH2_0_255(in2, in3); \ + } /* Description : Clips all signed word elements of input vector between 0 & 255 @@ -788,15 +767,15 @@ Output - out_m Return Type - signed word */ -#define CLIP_SW_0_255(in) \ -({ \ - v4i32 max_m = __msa_ldi_w(255); \ - v4i32 out_m; \ - \ - out_m = __msa_maxi_s_w((v4i32)in, 0); \ - out_m = __msa_min_s_w((v4i32)max_m, (v4i32)out_m); \ - out_m; \ -}) +#define CLIP_SW_0_255(in) \ + ({ \ + v4i32 max_m = __msa_ldi_w(255); \ + v4i32 out_m; \ + \ + out_m = __msa_maxi_s_w((v4i32)in, 0); \ + out_m = __msa_min_s_w((v4i32)max_m, (v4i32)out_m); \ + out_m; \ + }) /* Description : Horizontal addition of 4 signed word elements of input vector Arguments : Input - in (signed word vector) @@ -805,17 +784,17 @@ Details : 4 signed word elements of 'in' vector are added together and the resulting integer sum is returned */ -#define HADD_SW_S32(in) \ -({ \ - v2i64 res0_m, res1_m; \ - int32_t sum_m; \ - \ - res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ - res1_m = __msa_splati_d(res0_m, 1); \ - res0_m = res0_m + res1_m; \ - sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ - sum_m; \ -}) +#define HADD_SW_S32(in) \ + ({ \ + v2i64 res0_m, res1_m; \ + int32_t sum_m; \ + \ + res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ + res1_m = __msa_splati_d(res0_m, 1); \ + res0_m = res0_m + res1_m; \ + sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ + sum_m; \ + }) /* Description : Horizontal addition of 8 unsigned halfword elements Arguments : Inputs - in (unsigned halfword vector) @@ -824,19 +803,19 @@ Details : 8 unsigned halfword elements of input vector are added together and the resulting integer sum is returned */ -#define HADD_UH_U32(in) \ -({ \ - v4u32 res_m; \ - v2u64 res0_m, res1_m; \ - uint32_t sum_m; \ - \ - res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \ - res0_m = __msa_hadd_u_d(res_m, res_m); \ - res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ - res0_m = res0_m + res1_m; \ - sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \ - sum_m; \ -}) +#define HADD_UH_U32(in) \ + ({ \ + v4u32 res_m; \ + v2u64 res0_m, res1_m; \ + uint32_t sum_m; \ + \ + res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \ + res0_m = __msa_hadd_u_d(res_m, res_m); \ + res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ + res0_m = res0_m + res1_m; \ + sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \ + sum_m; \ + }) /* Description : Horizontal addition of unsigned byte vector elements Arguments : Inputs - in0, in1 @@ -846,11 +825,11 @@ even unsigned byte element from 'in0' (pairwise) and the halfword result is written to 'out0' */ -#define HADD_UB2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \ - out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \ -} +#define HADD_UB2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \ + out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \ + } #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__) /* Description : Horizontal subtraction of unsigned byte vector elements @@ -861,11 +840,11 @@ even unsigned byte element from 'in0' (pairwise) and the halfword result is written to 'out0' */ -#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \ - out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \ -} +#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \ + out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \ + } #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__) /* Description : Horizontal subtraction of signed halfword vector elements @@ -876,11 +855,11 @@ even signed halfword element from 'in0' (pairwise) and the word result is written to 'out0' */ -#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \ - out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \ -} +#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \ + out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \ + } #define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__) /* Description : Set element n input vector to GPR value @@ -889,11 +868,11 @@ Return Type - as per RTYPE Details : Set element 0 in vector 'out' to value specified in 'in0' */ -#define INSERT_D2(RTYPE, in0, in1, out) \ -{ \ - out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ - out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ -} +#define INSERT_D2(RTYPE, in0, in1, out) \ + { \ + out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ + out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ + } #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__) /* Description : Interleave even byte elements from vectors @@ -903,11 +882,11 @@ Details : Even byte elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ - out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ -} +#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ + } #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__) #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__) #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__) @@ -919,11 +898,11 @@ Details : Even halfword elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ - out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \ -} +#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \ + } #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__) #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__) @@ -934,11 +913,11 @@ Details : Even word elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ - out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \ -} +#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ + out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \ + } #define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__) /* Description : Interleave even double word elements from vectors @@ -948,11 +927,11 @@ Details : Even double word elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \ - out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \ -} +#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \ + out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \ + } #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__) /* Description : Interleave left half of byte elements from vectors @@ -962,21 +941,21 @@ Details : Left half of byte elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ -} +#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ + } #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__) #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__) #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__) -#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__) #define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__) @@ -987,11 +966,11 @@ Details : Left half of halfword elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \ -} +#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \ + } #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__) #define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__) @@ -1002,11 +981,11 @@ Details : Left half of word elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \ -} +#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \ + } #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__) /* Description : Interleave right half of byte elements from vectors @@ -1016,22 +995,22 @@ Details : Right half of byte elements of 'in0' and 'in1' are interleaved and written to out0. */ -#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ -} +#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ + } #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__) #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__) #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__) #define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__) -#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__) #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__) #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__) @@ -1045,28 +1024,28 @@ Details : Right half of halfword elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \ -} +#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \ + } #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__) #define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__) -#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__) #define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__) -#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \ -} +#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \ + } #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__) /* Description : Interleave right half of double word elements from vectors @@ -1076,21 +1055,21 @@ Details : Right half of double word elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \ - out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \ -} +#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \ + out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \ + } #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__) #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__) #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__) -#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__) #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__) @@ -1101,29 +1080,29 @@ Details : Right half of byte elements from 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ -} +#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ + } #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__) #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__) #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__) #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__) -#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ -} +#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ + } #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__) #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__) -#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ -} +#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ + } #define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__) #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__) #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__) @@ -1136,11 +1115,11 @@ Details : Maximum of signed halfword element values from 'in0' and 'max_val' are written in place */ -#define MAXI_SH2(RTYPE, in0, in1, max_val) \ -{ \ - in0 = (RTYPE)__msa_maxi_s_h((v8i16)in0, (max_val)); \ - in1 = (RTYPE)__msa_maxi_s_h((v8i16)in1, (max_val)); \ -} +#define MAXI_SH2(RTYPE, in0, in1, max_val) \ + { \ + in0 = (RTYPE)__msa_maxi_s_h((v8i16)in0, (max_val)); \ + in1 = (RTYPE)__msa_maxi_s_h((v8i16)in1, (max_val)); \ + } #define MAXI_SH2_SH(...) MAXI_SH2(v8i16, __VA_ARGS__) /* Description : Saturate the halfword element values to the max @@ -1153,11 +1132,11 @@ value generated with (sat_val + 1) bit range. The results are written in place */ -#define SAT_UH2(RTYPE, in0, in1, sat_val) \ -{ \ - in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \ - in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \ -} +#define SAT_UH2(RTYPE, in0, in1, sat_val) \ + { \ + in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \ + in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \ + } #define SAT_UH2_SH(...) SAT_UH2(v8i16, __VA_ARGS__) /* Description : Saturate the halfword element values to the max @@ -1170,18 +1149,18 @@ value generated with (sat_val + 1) bit range The results are written in place */ -#define SAT_SH2(RTYPE, in0, in1, sat_val) \ -{ \ - in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \ - in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \ -} +#define SAT_SH2(RTYPE, in0, in1, sat_val) \ + { \ + in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \ + in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \ + } #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__) -#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \ -{ \ - SAT_SH2(RTYPE, in0, in1, sat_val); \ - SAT_SH2(RTYPE, in2, in3, sat_val); \ -} +#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \ + { \ + SAT_SH2(RTYPE, in0, in1, sat_val); \ + SAT_SH2(RTYPE, in2, in3, sat_val); \ + } #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__) /* Description : Indexed halfword element values are replicated to all @@ -1193,20 +1172,19 @@ elements in 'out0' vector Valid index range for halfword operation is 0-7 */ -#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \ - out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \ -} +#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \ + out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \ + } #define SPLATI_H2_SB(...) SPLATI_H2(v16i8, __VA_ARGS__) #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__) -#define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, \ - out0, out1, out2) \ -{ \ - SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \ - out2 = (RTYPE)__msa_splati_h((v8i16)in, idx2); \ -} +#define SPLATI_H3(RTYPE, in, idx0, idx1, idx2, out0, out1, out2) \ + { \ + SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \ + out2 = (RTYPE)__msa_splati_h((v8i16)in, idx2); \ + } #define SPLATI_H3_SB(...) SPLATI_H3(v16i8, __VA_ARGS__) #define SPLATI_H3_SH(...) SPLATI_H3(v8i16, __VA_ARGS__) @@ -1221,11 +1199,11 @@ elements in 'out1' vector Valid index range for word operation is 0-3 */ -#define SPLATI_W2(RTYPE, in, stidx, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_splati_w((v4i32)in, stidx); \ - out1 = (RTYPE)__msa_splati_w((v4i32)in, (stidx+1)); \ -} +#define SPLATI_W2(RTYPE, in, stidx, out0, out1) \ + { \ + out0 = (RTYPE)__msa_splati_w((v4i32)in, stidx); \ + out1 = (RTYPE)__msa_splati_w((v4i32)in, (stidx + 1)); \ + } #define SPLATI_W2_SW(...) SPLATI_W2(v4i32, __VA_ARGS__) /* Description : Pack even byte elements of vector pairs @@ -1236,20 +1214,20 @@ 'out0' & even byte elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \ -} +#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \ + } #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__) #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__) -#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__) #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__) #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__) @@ -1262,19 +1240,19 @@ 'out0' & even halfword elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \ -} +#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \ + } #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__) -#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ - PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__) /* Description : Pack even double word elements of vector pairs @@ -1285,11 +1263,11 @@ 'out0' & even double elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \ - out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \ -} +#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \ + out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \ + } #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__) #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__) @@ -1301,11 +1279,11 @@ of 'out0' & odd double word elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = (RTYPE)__msa_pckod_d((v2i64)in0, (v2i64)in1); \ - out1 = (RTYPE)__msa_pckod_d((v2i64)in2, (v2i64)in3); \ -} +#define PCKOD_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckod_d((v2i64)in0, (v2i64)in1); \ + out1 = (RTYPE)__msa_pckod_d((v2i64)in2, (v2i64)in3); \ + } #define PCKOD_D2_UB(...) PCKOD_D2(v16u8, __VA_ARGS__) #define PCKOD_D2_SH(...) PCKOD_D2(v8i16, __VA_ARGS__) @@ -1316,41 +1294,41 @@ Details : Each unsigned byte element from input vector 'in0' is logically xor'ed with 128 and the result is stored in-place. */ -#define XORI_B2_128(RTYPE, in0, in1) \ -{ \ - in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \ - in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \ -} +#define XORI_B2_128(RTYPE, in0, in1) \ + { \ + in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \ + in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \ + } #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__) #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__) -#define XORI_B3_128(RTYPE, in0, in1, in2) \ -{ \ - XORI_B2_128(RTYPE, in0, in1); \ - in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \ -} +#define XORI_B3_128(RTYPE, in0, in1, in2) \ + { \ + XORI_B2_128(RTYPE, in0, in1); \ + in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \ + } #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__) -#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \ -{ \ - XORI_B2_128(RTYPE, in0, in1); \ - XORI_B2_128(RTYPE, in2, in3); \ -} +#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \ + { \ + XORI_B2_128(RTYPE, in0, in1); \ + XORI_B2_128(RTYPE, in2, in3); \ + } #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__) #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__) -#define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \ -{ \ - XORI_B3_128(RTYPE, in0, in1, in2); \ - XORI_B2_128(RTYPE, in3, in4); \ -} +#define XORI_B5_128(RTYPE, in0, in1, in2, in3, in4) \ + { \ + XORI_B3_128(RTYPE, in0, in1, in2); \ + XORI_B2_128(RTYPE, in3, in4); \ + } #define XORI_B5_128_SB(...) XORI_B5_128(v16i8, __VA_ARGS__) -#define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \ -{ \ - XORI_B4_128(RTYPE, in0, in1, in2, in3); \ - XORI_B4_128(RTYPE, in4, in5, in6, in7); \ -} +#define XORI_B8_128(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7) \ + { \ + XORI_B4_128(RTYPE, in0, in1, in2, in3); \ + XORI_B4_128(RTYPE, in4, in5, in6, in7); \ + } #define XORI_B8_128_SB(...) XORI_B8_128(v16i8, __VA_ARGS__) /* Description : Shift left all elements of vector (generic for all data types) @@ -1360,13 +1338,13 @@ Details : Each element of vector 'in0' is left shifted by 'shift' and the result is written in-place. */ -#define SLLI_4V(in0, in1, in2, in3, shift) \ -{ \ - in0 = in0 << shift; \ - in1 = in1 << shift; \ - in2 = in2 << shift; \ - in3 = in3 << shift; \ -} +#define SLLI_4V(in0, in1, in2, in3, shift) \ + { \ + in0 = in0 << shift; \ + in1 = in1 << shift; \ + in2 = in2 << shift; \ + in3 = in3 << shift; \ + } /* Description : Arithmetic shift right all elements of vector (generic for all data types) @@ -1376,13 +1354,13 @@ Details : Each element of vector 'in0' is right shifted by 'shift' and the result is written in-place. 'shift' is a GP variable. */ -#define SRA_4V(in0, in1, in2, in3, shift) \ -{ \ - in0 = in0 >> shift; \ - in1 = in1 >> shift; \ - in2 = in2 >> shift; \ - in3 = in3 >> shift; \ -} +#define SRA_4V(in0, in1, in2, in3, shift) \ + { \ + in0 = in0 >> shift; \ + in1 = in1 >> shift; \ + in2 = in2 >> shift; \ + in3 = in3 >> shift; \ + } /* Description : Shift right arithmetic rounded words Arguments : Inputs - in0, in1, shift @@ -1394,17 +1372,17 @@ rounding and the result is written in-place. 'shift' is a vector. */ -#define SRAR_W2(RTYPE, in0, in1, shift) \ -{ \ - in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \ - in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \ -} +#define SRAR_W2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \ + in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \ + } -#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \ -{ \ - SRAR_W2(RTYPE, in0, in1, shift); \ - SRAR_W2(RTYPE, in2, in3, shift); \ -} +#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRAR_W2(RTYPE, in0, in1, shift); \ + SRAR_W2(RTYPE, in2, in3, shift); \ + } #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__) /* Description : Shift right arithmetic rounded (immediate) @@ -1416,33 +1394,33 @@ shifted value for rounding and the result is written in-place. 'shift' is an immediate value. */ -#define SRARI_H2(RTYPE, in0, in1, shift) \ -{ \ - in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \ - in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \ -} +#define SRARI_H2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \ + in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \ + } #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__) #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__) -#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ -{ \ - SRARI_H2(RTYPE, in0, in1, shift); \ - SRARI_H2(RTYPE, in2, in3, shift); \ -} +#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRARI_H2(RTYPE, in0, in1, shift); \ + SRARI_H2(RTYPE, in2, in3, shift); \ + } #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__) #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__) -#define SRARI_W2(RTYPE, in0, in1, shift) \ -{ \ - in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ - in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \ -} +#define SRARI_W2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ + in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \ + } -#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \ -{ \ - SRARI_W2(RTYPE, in0, in1, shift); \ - SRARI_W2(RTYPE, in2, in3, shift); \ -} +#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRARI_W2(RTYPE, in0, in1, shift); \ + SRARI_W2(RTYPE, in2, in3, shift); \ + } #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__) /* Description : Multiplication of pairs of vectors @@ -1451,17 +1429,16 @@ Details : Each element from 'in0' is multiplied with elements from 'in1' and the result is written to 'out0' */ -#define MUL2(in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = in0 * in1; \ - out1 = in2 * in3; \ -} -#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - MUL2(in0, in1, in2, in3, out0, out1); \ - MUL2(in4, in5, in6, in7, out2, out3); \ -} +#define MUL2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 * in1; \ + out1 = in2 * in3; \ + } +#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + MUL2(in0, in1, in2, in3, out0, out1); \ + MUL2(in4, in5, in6, in7, out2, out3); \ + } /* Description : Addition of 2 pairs of vectors Arguments : Inputs - in0, in1, in2, in3 @@ -1469,17 +1446,16 @@ Details : Each element in 'in0' is added to 'in1' and result is written to 'out0'. */ -#define ADD2(in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = in0 + in1; \ - out1 = in2 + in3; \ -} -#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - ADD2(in0, in1, in2, in3, out0, out1); \ - ADD2(in4, in5, in6, in7, out2, out3); \ -} +#define ADD2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 + in1; \ + out1 = in2 + in3; \ + } +#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + ADD2(in0, in1, in2, in3, out0, out1); \ + ADD2(in4, in5, in6, in7, out2, out3); \ + } /* Description : Subtraction of 2 pairs of vectors Arguments : Inputs - in0, in1, in2, in3 @@ -1487,19 +1463,18 @@ Details : Each element in 'in1' is subtracted from 'in0' and result is written to 'out0'. */ -#define SUB2(in0, in1, in2, in3, out0, out1) \ -{ \ - out0 = in0 - in1; \ - out1 = in2 - in3; \ -} -#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) \ -{ \ - out0 = in0 - in1; \ - out1 = in2 - in3; \ - out2 = in4 - in5; \ - out3 = in6 - in7; \ -} +#define SUB2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + } +#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + out2 = in4 - in5; \ + out3 = in6 - in7; \ + } /* Description : Sign extend halfword elements from right half of the vector Arguments : Input - in (halfword vector) @@ -1509,13 +1484,13 @@ extracted and interleaved with same vector 'in0' to generate 4 word elements keeping sign intact */ -#define UNPCK_R_SH_SW(in, out) \ -{ \ - v8i16 sign_m; \ - \ - sign_m = __msa_clti_s_h((v8i16)in, 0); \ - out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \ -} +#define UNPCK_R_SH_SW(in, out) \ + { \ + v8i16 sign_m; \ + \ + sign_m = __msa_clti_s_h((v8i16)in, 0); \ + out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \ + } /* Description : Zero extend unsigned byte elements to halfword elements Arguments : Input - in (unsigned byte vector) @@ -1524,12 +1499,12 @@ Details : Zero extended right half of vector is returned in 'out0' Zero extended left half of vector is returned in 'out1' */ -#define UNPCK_UB_SH(in, out0, out1) \ -{ \ - v16i8 zero_m = { 0 }; \ - \ - ILVRL_B2_SH(zero_m, in, out0, out1); \ -} +#define UNPCK_UB_SH(in, out0, out1) \ + { \ + v16i8 zero_m = { 0 }; \ + \ + ILVRL_B2_SH(zero_m, in, out0, out1); \ + } /* Description : Sign extend halfword elements from input vector and return the result in pair of vectors @@ -1542,48 +1517,48 @@ Then interleaved left with same vector 'in0' to generate 4 signed word elements in 'out1' */ -#define UNPCK_SH_SW(in, out0, out1) \ -{ \ - v8i16 tmp_m; \ - \ - tmp_m = __msa_clti_s_h((v8i16)in, 0); \ - ILVRL_H2_SW(tmp_m, in, out0, out1); \ -} +#define UNPCK_SH_SW(in, out0, out1) \ + { \ + v8i16 tmp_m; \ + \ + tmp_m = __msa_clti_s_h((v8i16)in, 0); \ + ILVRL_H2_SW(tmp_m, in, out0, out1); \ + } /* Description : Butterfly of 4 input vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1, out2, out3 Details : Butterfly operation */ -#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - out0 = in0 + in3; \ - out1 = in1 + in2; \ - \ - out2 = in1 - in2; \ - out3 = in0 - in3; \ -} +#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = in0 + in3; \ + out1 = in1 + in2; \ + \ + out2 = in1 - in2; \ + out3 = in0 - in3; \ + } /* Description : Transpose input 8x8 byte block Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - as per RTYPE */ -#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ - \ - ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \ - tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \ - ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \ - ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \ - ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \ - SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \ - SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \ -} +#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \ + out1, out2, out3, out4, out5, out6, out7) \ + { \ + v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, tmp0_m, tmp1_m, tmp2_m, \ + tmp3_m); \ + ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \ + ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \ + ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \ + ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \ + SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \ + SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \ + } #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__) /* Description : Transpose 16x4 block into 4x16 with byte elements in vectors @@ -1592,33 +1567,33 @@ Outputs - out0, out1, out2, out3 Return Type - unsigned byte */ -#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15, \ - out0, out1, out2, out3) \ -{ \ - v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \ - out1 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \ - \ - ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \ - out3 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \ - \ - ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \ - \ - tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \ - ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \ - \ - tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \ - ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \ - out0 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ - out2 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ - \ - tmp0_m = (v2i64)__msa_ilvod_b((v16i8)out3, (v16i8)out1); \ - tmp1_m = (v2i64)__msa_ilvod_b((v16i8)tmp3_m, (v16i8)tmp2_m); \ - out1 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ - out3 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ -} +#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ + in10, in11, in12, in13, in14, in15, out0, out1, \ + out2, out3) \ + { \ + v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + ILVEV_W2_SD(in0, in4, in8, in12, tmp0_m, tmp1_m); \ + out1 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \ + \ + ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \ + out3 = (v16u8)__msa_ilvev_d(tmp1_m, tmp0_m); \ + \ + ILVEV_W2_SD(in2, in6, in10, in14, tmp0_m, tmp1_m); \ + \ + tmp2_m = __msa_ilvev_d(tmp1_m, tmp0_m); \ + ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \ + \ + tmp3_m = __msa_ilvev_d(tmp1_m, tmp0_m); \ + ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \ + out0 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + out2 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + \ + tmp0_m = (v2i64)__msa_ilvod_b((v16i8)out3, (v16i8)out1); \ + tmp1_m = (v2i64)__msa_ilvod_b((v16i8)tmp3_m, (v16i8)tmp2_m); \ + out1 = (v16u8)__msa_ilvev_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + out3 = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + } /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, @@ -1626,95 +1601,95 @@ Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - unsigned byte */ -#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ - \ - ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \ - ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \ - ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \ - ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \ - \ - tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7); \ - tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7); \ - tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5); \ - tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5); \ - out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3); \ - tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3); \ - out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1); \ - tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1); \ - \ - ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ - out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5); \ - out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ - out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ - out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ -} +#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ + in10, in11, in12, in13, in14, in15, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \ + ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \ + ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \ + ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \ + \ + tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7); \ + tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7); \ + tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5); \ + tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5); \ + out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3); \ + tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3); \ + out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1); \ + tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1); \ + \ + ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ + out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5); \ + out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ + out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ + out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + } /* Description : Transpose 4x4 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1, out2, out3 Return Type - signed halfword */ -#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 s0_m, s1_m; \ - \ - ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \ - ILVRL_W2_SH(s1_m, s0_m, out0, out2); \ - out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ - out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \ -} +#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 s0_m, s1_m; \ + \ + ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \ + ILVRL_W2_SH(s1_m, s0_m, out0, out2); \ + out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ + out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \ + } /* Description : Transpose 8x4 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - signed halfword */ -#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \ - ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \ - ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \ - ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \ -} +#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \ + ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \ + ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \ + ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \ + } /* Description : Transpose 4x4 block with word elements in vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1, out2, out3 Return Type - signed word */ -#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v4i32 s0_m, s1_m, s2_m, s3_m; \ - \ - ILVRL_W2_SW(in1, in0, s0_m, s1_m); \ - ILVRL_W2_SW(in3, in2, s2_m, s3_m); \ - \ - out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \ - out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \ - out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ - out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ -} +#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v4i32 s0_m, s1_m, s2_m, s3_m; \ + \ + ILVRL_W2_SW(in1, in0, s0_m, s1_m); \ + ILVRL_W2_SW(in3, in2, s2_m, s3_m); \ + \ + out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \ + out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \ + out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ + out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ + } /* Description : Dot product and addition of 3 signed halfword input vectors Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2 @@ -1726,18 +1701,18 @@ Addition of all the 3 vector results out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2) */ -#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \ -({ \ - v8i16 tmp1_m; \ - v8i16 out0_m; \ - \ - out0_m = __msa_dotp_s_h((v16i8)in0, (v16i8)coeff0); \ - out0_m = __msa_dpadd_s_h(out0_m, (v16i8)in1, (v16i8)coeff1); \ - tmp1_m = __msa_dotp_s_h((v16i8)in2, (v16i8)coeff2); \ - out0_m = __msa_adds_s_h(out0_m, tmp1_m); \ - \ - out0_m; \ -}) +#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \ + ({ \ + v8i16 tmp1_m; \ + v8i16 out0_m; \ + \ + out0_m = __msa_dotp_s_h((v16i8)in0, (v16i8)coeff0); \ + out0_m = __msa_dpadd_s_h(out0_m, (v16i8)in1, (v16i8)coeff1); \ + tmp1_m = __msa_dotp_s_h((v16i8)in2, (v16i8)coeff2); \ + out0_m = __msa_adds_s_h(out0_m, tmp1_m); \ + \ + out0_m; \ + }) /* Description : Pack even elements of input vectors & xor with 128 Arguments : Inputs - in0, in1 @@ -1747,37 +1722,37 @@ together in one vector and the resulting vector is xor'ed with 128 to shift the range from signed to unsigned byte */ -#define PCKEV_XORI128_UB(in0, in1) \ -({ \ - v16u8 out_m; \ - out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \ - out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \ - out_m; \ -}) +#define PCKEV_XORI128_UB(in0, in1) \ + ({ \ + v16u8 out_m; \ + out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \ + out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \ + out_m; \ + }) /* Description : Pack even byte elements and store byte vector in destination memory Arguments : Inputs - in0, in1, pdst */ -#define PCKEV_ST_SB(in0, in1, pdst) \ -{ \ - v16i8 tmp_m; \ - tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \ - ST_SB(tmp_m, (pdst)); \ -} +#define PCKEV_ST_SB(in0, in1, pdst) \ + { \ + v16i8 tmp_m; \ + tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \ + ST_SB(tmp_m, (pdst)); \ + } /* Description : Horizontal 2 tap filter kernel code Arguments : Inputs - in0, in1, mask, coeff, shift */ -#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \ -({ \ - v16i8 tmp0_m; \ - v8u16 tmp1_m; \ - \ - tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \ - tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff); \ - tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift); \ - \ - tmp1_m; \ -}) -#endif /* VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_ */ +#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \ + ({ \ + v16i8 tmp0_m; \ + v8u16 tmp1_m; \ + \ + tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \ + tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff); \ + tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift); \ + \ + tmp1_m; \ + }) +#endif /* VP8_COMMON_MIPS_MSA_VP8_MACROS_MSA_H_ */ diff --git a/libs/libvpx/vp8/common/modecont.c b/libs/libvpx/vp8/common/modecont.c index 86a74bc0ff..d6ad9bb99a 100644 --- a/libs/libvpx/vp8/common/modecont.c +++ b/libs/libvpx/vp8/common/modecont.c @@ -8,33 +8,31 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "entropy.h" -const int vp8_mode_contexts[6][4] = -{ - { - /* 0 */ - 7, 1, 1, 143, - }, - { - /* 1 */ - 14, 18, 14, 107, - }, - { - /* 2 */ - 135, 64, 57, 68, - }, - { - /* 3 */ - 60, 56, 128, 65, - }, - { - /* 4 */ - 159, 134, 128, 34, - }, - { - /* 5 */ - 234, 188, 128, 28, - }, +const int vp8_mode_contexts[6][4] = { + { + /* 0 */ + 7, 1, 1, 143, + }, + { + /* 1 */ + 14, 18, 14, 107, + }, + { + /* 2 */ + 135, 64, 57, 68, + }, + { + /* 3 */ + 60, 56, 128, 65, + }, + { + /* 4 */ + 159, 134, 128, 34, + }, + { + /* 5 */ + 234, 188, 128, 28, + }, }; diff --git a/libs/libvpx/vp8/common/modecont.h b/libs/libvpx/vp8/common/modecont.h index ff34c33c55..b58c7dc2d3 100644 --- a/libs/libvpx/vp8/common/modecont.h +++ b/libs/libvpx/vp8/common/modecont.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_MODECONT_H_ #define VP8_COMMON_MODECONT_H_ diff --git a/libs/libvpx/vp8/common/mv.h b/libs/libvpx/vp8/common/mv.h index 111ccd63c7..b6d2147af8 100644 --- a/libs/libvpx/vp8/common/mv.h +++ b/libs/libvpx/vp8/common/mv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_MV_H_ #define VP8_COMMON_MV_H_ #include "vpx/vpx_integer.h" @@ -17,17 +16,15 @@ extern "C" { #endif -typedef struct -{ - short row; - short col; +typedef struct { + short row; + short col; } MV; -typedef union int_mv -{ - uint32_t as_int; - MV as_mv; -} int_mv; /* facilitates faster equality tests and copies */ +typedef union int_mv { + uint32_t as_int; + MV as_mv; +} int_mv; /* facilitates faster equality tests and copies */ #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/onyx.h b/libs/libvpx/vp8/common/onyx.h index febe81505a..43e3c29b50 100644 --- a/libs/libvpx/vp8/common/onyx.h +++ b/libs/libvpx/vp8/common/onyx.h @@ -8,13 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ONYX_H_ #define VP8_COMMON_ONYX_H_ #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif #include "vpx_config.h" @@ -24,256 +22,258 @@ extern "C" #include "vpx_scale/yv12config.h" #include "ppflags.h" - struct VP8_COMP; +struct VP8_COMP; - /* Create/destroy static data structures. */ +/* Create/destroy static data structures. */ - typedef enum - { - NORMAL = 0, - FOURFIVE = 1, - THREEFIVE = 2, - ONETWO = 3 +typedef enum { + NORMAL = 0, + FOURFIVE = 1, + THREEFIVE = 2, + ONETWO = 3 +} VPX_SCALING; - } VPX_SCALING; +typedef enum { + USAGE_LOCAL_FILE_PLAYBACK = 0x0, + USAGE_STREAM_FROM_SERVER = 0x1, + USAGE_CONSTRAINED_QUALITY = 0x2, + USAGE_CONSTANT_QUALITY = 0x3 +} END_USAGE; - typedef enum - { - USAGE_LOCAL_FILE_PLAYBACK = 0x0, - USAGE_STREAM_FROM_SERVER = 0x1, - USAGE_CONSTRAINED_QUALITY = 0x2, - USAGE_CONSTANT_QUALITY = 0x3 - } END_USAGE; - - - typedef enum - { - MODE_REALTIME = 0x0, - MODE_GOODQUALITY = 0x1, - MODE_BESTQUALITY = 0x2, - MODE_FIRSTPASS = 0x3, - MODE_SECONDPASS = 0x4, - MODE_SECONDPASS_BEST = 0x5 - } MODE; - - typedef enum - { - FRAMEFLAGS_KEY = 1, - FRAMEFLAGS_GOLDEN = 2, - FRAMEFLAGS_ALTREF = 4 - } FRAMETYPE_FLAGS; +typedef enum { + MODE_REALTIME = 0x0, + MODE_GOODQUALITY = 0x1, + MODE_BESTQUALITY = 0x2, + MODE_FIRSTPASS = 0x3, + MODE_SECONDPASS = 0x4, + MODE_SECONDPASS_BEST = 0x5 +} MODE; +typedef enum { + FRAMEFLAGS_KEY = 1, + FRAMEFLAGS_GOLDEN = 2, + FRAMEFLAGS_ALTREF = 4 +} FRAMETYPE_FLAGS; #include - static INLINE void Scale2Ratio(int mode, int *hr, int *hs) - { - switch (mode) - { - case NORMAL: - *hr = 1; - *hs = 1; - break; - case FOURFIVE: - *hr = 4; - *hs = 5; - break; - case THREEFIVE: - *hr = 3; - *hs = 5; - break; - case ONETWO: - *hr = 1; - *hs = 2; - break; - default: - *hr = 1; - *hs = 1; - assert(0); - break; - } - } +static INLINE void Scale2Ratio(int mode, int *hr, int *hs) { + switch (mode) { + case NORMAL: + *hr = 1; + *hs = 1; + break; + case FOURFIVE: + *hr = 4; + *hs = 5; + break; + case THREEFIVE: + *hr = 3; + *hs = 5; + break; + case ONETWO: + *hr = 1; + *hs = 2; + break; + default: + *hr = 1; + *hs = 1; + assert(0); + break; + } +} - typedef struct - { - /* 4 versions of bitstream defined: - * 0 best quality/slowest decode, 3 lowest quality/fastest decode - */ - int Version; - int Width; - int Height; - struct vpx_rational timebase; - unsigned int target_bandwidth; /* kilobits per second */ +typedef struct { + /* 4 versions of bitstream defined: + * 0 best quality/slowest decode, 3 lowest quality/fastest decode + */ + int Version; + int Width; + int Height; + struct vpx_rational timebase; + unsigned int target_bandwidth; /* kilobits per second */ - /* Parameter used for applying denoiser. - * For temporal denoiser: noise_sensitivity = 0 means off, - * noise_sensitivity = 1 means temporal denoiser on for Y channel only, - * noise_sensitivity = 2 means temporal denoiser on for all channels. - * noise_sensitivity = 3 means aggressive denoising mode. - * noise_sensitivity >= 4 means adaptive denoising mode. - * Temporal denoiser is enabled via the configuration option: - * CONFIG_TEMPORAL_DENOISING. - * For spatial denoiser: noise_sensitivity controls the amount of - * pre-processing blur: noise_sensitivity = 0 means off. - * Spatial denoiser invoked under !CONFIG_TEMPORAL_DENOISING. - */ - int noise_sensitivity; + /* Parameter used for applying denoiser. + * For temporal denoiser: noise_sensitivity = 0 means off, + * noise_sensitivity = 1 means temporal denoiser on for Y channel only, + * noise_sensitivity = 2 means temporal denoiser on for all channels. + * noise_sensitivity = 3 means aggressive denoising mode. + * noise_sensitivity >= 4 means adaptive denoising mode. + * Temporal denoiser is enabled via the configuration option: + * CONFIG_TEMPORAL_DENOISING. + * For spatial denoiser: noise_sensitivity controls the amount of + * pre-processing blur: noise_sensitivity = 0 means off. + * Spatial denoiser invoked under !CONFIG_TEMPORAL_DENOISING. + */ + int noise_sensitivity; - /* parameter used for sharpening output: recommendation 0: */ - int Sharpness; - int cpu_used; - unsigned int rc_max_intra_bitrate_pct; - unsigned int screen_content_mode; + /* parameter used for sharpening output: recommendation 0: */ + int Sharpness; + int cpu_used; + unsigned int rc_max_intra_bitrate_pct; + unsigned int screen_content_mode; - /* mode -> - *(0)=Realtime/Live Encoding. This mode is optimized for realtim - * encoding (for example, capturing a television signal or feed - * from a live camera). ( speed setting controls how fast ) - *(1)=Good Quality Fast Encoding. The encoder balances quality with - * the amount of time it takes to encode the output. ( speed - * setting controls how fast ) - *(2)=One Pass - Best Quality. The encoder places priority on the - * quality of the output over encoding speed. The output is - * compressed at the highest possible quality. This option takes - * the longest amount of time to encode. ( speed setting ignored - * ) - *(3)=Two Pass - First Pass. The encoder generates a file of - * statistics for use in the second encoding pass. ( speed - * setting controls how fast ) - *(4)=Two Pass - Second Pass. The encoder uses the statistics that - * were generated in the first encoding pass to create the - * compressed output. ( speed setting controls how fast ) - *(5)=Two Pass - Second Pass Best. The encoder uses the statistics - * that were generated in the first encoding pass to create the - * compressed output using the highest possible quality, and - * taking a longer amount of time to encode.. ( speed setting - * ignored ) - */ - int Mode; + /* mode -> + *(0)=Realtime/Live Encoding. This mode is optimized for realtim + * encoding (for example, capturing a television signal or feed + * from a live camera). ( speed setting controls how fast ) + *(1)=Good Quality Fast Encoding. The encoder balances quality with + * the amount of time it takes to encode the output. ( speed + * setting controls how fast ) + *(2)=One Pass - Best Quality. The encoder places priority on the + * quality of the output over encoding speed. The output is + * compressed at the highest possible quality. This option takes + * the longest amount of time to encode. ( speed setting ignored + * ) + *(3)=Two Pass - First Pass. The encoder generates a file of + * statistics for use in the second encoding pass. ( speed + * setting controls how fast ) + *(4)=Two Pass - Second Pass. The encoder uses the statistics that + * were generated in the first encoding pass to create the + * compressed output. ( speed setting controls how fast ) + *(5)=Two Pass - Second Pass Best. The encoder uses the statistics + * that were generated in the first encoding pass to create the + * compressed output using the highest possible quality, and + * taking a longer amount of time to encode.. ( speed setting + * ignored ) + */ + int Mode; - /* Key Framing Operations */ - int auto_key; /* automatically detect cut scenes */ - int key_freq; /* maximum distance to key frame. */ + /* Key Framing Operations */ + int auto_key; /* automatically detect cut scenes */ + int key_freq; /* maximum distance to key frame. */ - /* lagged compression (if allow_lag == 0 lag_in_frames is ignored) */ - int allow_lag; - int lag_in_frames; /* how many frames lag before we start encoding */ + /* lagged compression (if allow_lag == 0 lag_in_frames is ignored) */ + int allow_lag; + int lag_in_frames; /* how many frames lag before we start encoding */ - /* - * DATARATE CONTROL OPTIONS - */ + /* + * DATARATE CONTROL OPTIONS + */ - int end_usage; /* vbr or cbr */ + int end_usage; /* vbr or cbr */ - /* buffer targeting aggressiveness */ - int under_shoot_pct; - int over_shoot_pct; + /* buffer targeting aggressiveness */ + int under_shoot_pct; + int over_shoot_pct; - /* buffering parameters */ - int64_t starting_buffer_level; - int64_t optimal_buffer_level; - int64_t maximum_buffer_size; + /* buffering parameters */ + int64_t starting_buffer_level; + int64_t optimal_buffer_level; + int64_t maximum_buffer_size; - int64_t starting_buffer_level_in_ms; - int64_t optimal_buffer_level_in_ms; - int64_t maximum_buffer_size_in_ms; + int64_t starting_buffer_level_in_ms; + int64_t optimal_buffer_level_in_ms; + int64_t maximum_buffer_size_in_ms; - /* controlling quality */ - int fixed_q; - int worst_allowed_q; - int best_allowed_q; - int cq_level; + /* controlling quality */ + int fixed_q; + int worst_allowed_q; + int best_allowed_q; + int cq_level; - /* allow internal resizing */ - int allow_spatial_resampling; - int resample_down_water_mark; - int resample_up_water_mark; + /* allow internal resizing */ + int allow_spatial_resampling; + int resample_down_water_mark; + int resample_up_water_mark; - /* allow internal frame rate alterations */ - int allow_df; - int drop_frames_water_mark; + /* allow internal frame rate alterations */ + int allow_df; + int drop_frames_water_mark; - /* two pass datarate control */ - int two_pass_vbrbias; - int two_pass_vbrmin_section; - int two_pass_vbrmax_section; + /* two pass datarate control */ + int two_pass_vbrbias; + int two_pass_vbrmin_section; + int two_pass_vbrmax_section; - /* - * END DATARATE CONTROL OPTIONS - */ + /* + * END DATARATE CONTROL OPTIONS + */ - /* these parameters aren't to be used in final build don't use!!! */ - int play_alternate; - int alt_freq; - int alt_q; - int key_q; - int gold_q; + /* these parameters aren't to be used in final build don't use!!! */ + int play_alternate; + int alt_freq; + int alt_q; + int key_q; + int gold_q; + int multi_threaded; /* how many threads to run the encoder on */ + int token_partitions; /* how many token partitions to create */ - int multi_threaded; /* how many threads to run the encoder on */ - int token_partitions; /* how many token partitions to create */ + /* early breakout threshold: for video conf recommend 800 */ + int encode_breakout; - /* early breakout threshold: for video conf recommend 800 */ - int encode_breakout; + /* Bitfield defining the error resiliency features to enable. + * Can provide decodable frames after losses in previous + * frames and decodable partitions after losses in the same frame. + */ + unsigned int error_resilient_mode; - /* Bitfield defining the error resiliency features to enable. - * Can provide decodable frames after losses in previous - * frames and decodable partitions after losses in the same frame. - */ - unsigned int error_resilient_mode; + int arnr_max_frames; + int arnr_strength; + int arnr_type; - int arnr_max_frames; - int arnr_strength; - int arnr_type; + vpx_fixed_buf_t two_pass_stats_in; + struct vpx_codec_pkt_list *output_pkt_list; - vpx_fixed_buf_t two_pass_stats_in; - struct vpx_codec_pkt_list *output_pkt_list; + vp8e_tuning tuning; - vp8e_tuning tuning; - - /* Temporal scaling parameters */ - unsigned int number_of_layers; - unsigned int target_bitrate[VPX_TS_MAX_PERIODICITY]; - unsigned int rate_decimator[VPX_TS_MAX_PERIODICITY]; - unsigned int periodicity; - unsigned int layer_id[VPX_TS_MAX_PERIODICITY]; + /* Temporal scaling parameters */ + unsigned int number_of_layers; + unsigned int target_bitrate[VPX_TS_MAX_PERIODICITY]; + unsigned int rate_decimator[VPX_TS_MAX_PERIODICITY]; + unsigned int periodicity; + unsigned int layer_id[VPX_TS_MAX_PERIODICITY]; #if CONFIG_MULTI_RES_ENCODING - /* Number of total resolutions encoded */ - unsigned int mr_total_resolutions; + /* Number of total resolutions encoded */ + unsigned int mr_total_resolutions; - /* Current encoder ID */ - unsigned int mr_encoder_id; + /* Current encoder ID */ + unsigned int mr_encoder_id; - /* Down-sampling factor */ - vpx_rational_t mr_down_sampling_factor; + /* Down-sampling factor */ + vpx_rational_t mr_down_sampling_factor; - /* Memory location to store low-resolution encoder's mode info */ - void* mr_low_res_mode_info; + /* Memory location to store low-resolution encoder's mode info */ + void *mr_low_res_mode_info; #endif - } VP8_CONFIG; +} VP8_CONFIG; +void vp8_initialize(); - void vp8_initialize(); +struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf); +void vp8_remove_compressor(struct VP8_COMP **comp); - struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf); - void vp8_remove_compressor(struct VP8_COMP* *comp); +void vp8_init_config(struct VP8_COMP *onyx, VP8_CONFIG *oxcf); +void vp8_change_config(struct VP8_COMP *onyx, VP8_CONFIG *oxcf); - void vp8_init_config(struct VP8_COMP* onyx, VP8_CONFIG *oxcf); - void vp8_change_config(struct VP8_COMP* onyx, VP8_CONFIG *oxcf); +int vp8_receive_raw_frame(struct VP8_COMP *comp, unsigned int frame_flags, + YV12_BUFFER_CONFIG *sd, int64_t time_stamp, + int64_t end_time_stamp); +int vp8_get_compressed_data(struct VP8_COMP *comp, unsigned int *frame_flags, + size_t *size, unsigned char *dest, + unsigned char *dest_end, int64_t *time_stamp, + int64_t *time_end, int flush); +int vp8_get_preview_raw_frame(struct VP8_COMP *comp, YV12_BUFFER_CONFIG *dest, + vp8_ppflags_t *flags); - int vp8_receive_raw_frame(struct VP8_COMP* comp, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp); - int vp8_get_compressed_data(struct VP8_COMP* comp, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush); - int vp8_get_preview_raw_frame(struct VP8_COMP* comp, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags); - - int vp8_use_as_reference(struct VP8_COMP* comp, int ref_frame_flags); - int vp8_update_reference(struct VP8_COMP* comp, int ref_frame_flags); - int vp8_get_reference(struct VP8_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd); - int vp8_set_reference(struct VP8_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd); - int vp8_update_entropy(struct VP8_COMP* comp, int update); - int vp8_set_roimap(struct VP8_COMP* comp, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]); - int vp8_set_active_map(struct VP8_COMP* comp, unsigned char *map, unsigned int rows, unsigned int cols); - int vp8_set_internal_size(struct VP8_COMP* comp, VPX_SCALING horiz_mode, VPX_SCALING vert_mode); - int vp8_get_quantizer(struct VP8_COMP* c); +int vp8_use_as_reference(struct VP8_COMP *comp, int ref_frame_flags); +int vp8_update_reference(struct VP8_COMP *comp, int ref_frame_flags); +int vp8_get_reference(struct VP8_COMP *comp, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd); +int vp8_set_reference(struct VP8_COMP *comp, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd); +int vp8_update_entropy(struct VP8_COMP *comp, int update); +int vp8_set_roimap(struct VP8_COMP *comp, unsigned char *map, unsigned int rows, + unsigned int cols, int delta_q[4], int delta_lf[4], + unsigned int threshold[4]); +int vp8_set_active_map(struct VP8_COMP *comp, unsigned char *map, + unsigned int rows, unsigned int cols); +int vp8_set_internal_size(struct VP8_COMP *comp, VPX_SCALING horiz_mode, + VPX_SCALING vert_mode); +int vp8_get_quantizer(struct VP8_COMP *c); #ifdef __cplusplus } diff --git a/libs/libvpx/vp8/common/onyxc_int.h b/libs/libvpx/vp8/common/onyxc_int.h index 6d89865c60..9a12c7fb67 100644 --- a/libs/libvpx/vp8/common/onyxc_int.h +++ b/libs/libvpx/vp8/common/onyxc_int.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ONYXC_INT_H_ #define VP8_COMMON_ONYXC_INT_H_ @@ -38,144 +37,137 @@ extern "C" { #define MAX_PARTITIONS 9 -typedef struct frame_contexts -{ - vp8_prob bmode_prob [VP8_BINTRAMODES-1]; - vp8_prob ymode_prob [VP8_YMODES-1]; /* interframe intra mode probs */ - vp8_prob uv_mode_prob [VP8_UV_MODES-1]; - vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1]; - vp8_prob coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; - MV_CONTEXT mvc[2]; +typedef struct frame_contexts { + vp8_prob bmode_prob[VP8_BINTRAMODES - 1]; + vp8_prob ymode_prob[VP8_YMODES - 1]; /* interframe intra mode probs */ + vp8_prob uv_mode_prob[VP8_UV_MODES - 1]; + vp8_prob sub_mv_ref_prob[VP8_SUBMVREFS - 1]; + vp8_prob coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [ENTROPY_NODES]; + MV_CONTEXT mvc[2]; } FRAME_CONTEXT; -typedef enum -{ - ONE_PARTITION = 0, - TWO_PARTITION = 1, - FOUR_PARTITION = 2, - EIGHT_PARTITION = 3 +typedef enum { + ONE_PARTITION = 0, + TWO_PARTITION = 1, + FOUR_PARTITION = 2, + EIGHT_PARTITION = 3 } TOKEN_PARTITION; -typedef enum -{ - RECON_CLAMP_REQUIRED = 0, - RECON_CLAMP_NOTREQUIRED = 1 +typedef enum { + RECON_CLAMP_REQUIRED = 0, + RECON_CLAMP_NOTREQUIRED = 1 } CLAMP_TYPE; -typedef struct VP8Common +typedef struct VP8Common { + struct vpx_internal_error_info error; -{ - struct vpx_internal_error_info error; + DECLARE_ALIGNED(16, short, Y1dequant[QINDEX_RANGE][2]); + DECLARE_ALIGNED(16, short, Y2dequant[QINDEX_RANGE][2]); + DECLARE_ALIGNED(16, short, UVdequant[QINDEX_RANGE][2]); - DECLARE_ALIGNED(16, short, Y1dequant[QINDEX_RANGE][2]); - DECLARE_ALIGNED(16, short, Y2dequant[QINDEX_RANGE][2]); - DECLARE_ALIGNED(16, short, UVdequant[QINDEX_RANGE][2]); + int Width; + int Height; + int horiz_scale; + int vert_scale; - int Width; - int Height; - int horiz_scale; - int vert_scale; + CLAMP_TYPE clamp_type; - CLAMP_TYPE clamp_type; + YV12_BUFFER_CONFIG *frame_to_show; - YV12_BUFFER_CONFIG *frame_to_show; + YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS]; + int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; + int new_fb_idx, lst_fb_idx, gld_fb_idx, alt_fb_idx; - YV12_BUFFER_CONFIG yv12_fb[NUM_YV12_BUFFERS]; - int fb_idx_ref_cnt[NUM_YV12_BUFFERS]; - int new_fb_idx, lst_fb_idx, gld_fb_idx, alt_fb_idx; - - YV12_BUFFER_CONFIG temp_scale_frame; + YV12_BUFFER_CONFIG temp_scale_frame; #if CONFIG_POSTPROC - YV12_BUFFER_CONFIG post_proc_buffer; - YV12_BUFFER_CONFIG post_proc_buffer_int; - int post_proc_buffer_int_used; - unsigned char *pp_limits_buffer; /* post-processing filter coefficients */ + YV12_BUFFER_CONFIG post_proc_buffer; + YV12_BUFFER_CONFIG post_proc_buffer_int; + int post_proc_buffer_int_used; + unsigned char *pp_limits_buffer; /* post-processing filter coefficients */ #endif - FRAME_TYPE last_frame_type; /* Save last frame's frame type for motion search. */ - FRAME_TYPE frame_type; + FRAME_TYPE + last_frame_type; /* Save last frame's frame type for motion search. */ + FRAME_TYPE frame_type; - int show_frame; + int show_frame; - int frame_flags; - int MBs; - int mb_rows; - int mb_cols; - int mode_info_stride; + int frame_flags; + int MBs; + int mb_rows; + int mb_cols; + int mode_info_stride; - /* profile settings */ - int mb_no_coeff_skip; - int no_lpf; - int use_bilinear_mc_filter; - int full_pixel; + /* profile settings */ + int mb_no_coeff_skip; + int no_lpf; + int use_bilinear_mc_filter; + int full_pixel; - int base_qindex; + int base_qindex; - int y1dc_delta_q; - int y2dc_delta_q; - int y2ac_delta_q; - int uvdc_delta_q; - int uvac_delta_q; + int y1dc_delta_q; + int y2dc_delta_q; + int y2ac_delta_q; + int uvdc_delta_q; + int uvac_delta_q; - /* We allocate a MODE_INFO struct for each macroblock, together with - an extra row on top and column on the left to simplify prediction. */ + /* We allocate a MODE_INFO struct for each macroblock, together with + an extra row on top and column on the left to simplify prediction. */ - MODE_INFO *mip; /* Base of allocated array */ - MODE_INFO *mi; /* Corresponds to upper left visible macroblock */ + MODE_INFO *mip; /* Base of allocated array */ + MODE_INFO *mi; /* Corresponds to upper left visible macroblock */ #if CONFIG_ERROR_CONCEALMENT - MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */ - MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ + MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */ + MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */ #endif - MODE_INFO *show_frame_mi; /* MODE_INFO for the last decoded frame - to show */ - LOOPFILTERTYPE filter_type; + /* MODE_INFO for the last decoded frame to show */ + MODE_INFO *show_frame_mi; + LOOPFILTERTYPE filter_type; - loop_filter_info_n lf_info; + loop_filter_info_n lf_info; - int filter_level; - int last_sharpness_level; - int sharpness_level; + int filter_level; + int last_sharpness_level; + int sharpness_level; - int refresh_last_frame; /* Two state 0 = NO, 1 = YES */ - int refresh_golden_frame; /* Two state 0 = NO, 1 = YES */ - int refresh_alt_ref_frame; /* Two state 0 = NO, 1 = YES */ + int refresh_last_frame; /* Two state 0 = NO, 1 = YES */ + int refresh_golden_frame; /* Two state 0 = NO, 1 = YES */ + int refresh_alt_ref_frame; /* Two state 0 = NO, 1 = YES */ - int copy_buffer_to_gf; /* 0 none, 1 Last to GF, 2 ARF to GF */ - int copy_buffer_to_arf; /* 0 none, 1 Last to ARF, 2 GF to ARF */ + int copy_buffer_to_gf; /* 0 none, 1 Last to GF, 2 ARF to GF */ + int copy_buffer_to_arf; /* 0 none, 1 Last to ARF, 2 GF to ARF */ - int refresh_entropy_probs; /* Two state 0 = NO, 1 = YES */ + int refresh_entropy_probs; /* Two state 0 = NO, 1 = YES */ - int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ + int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ - /* Y,U,V,Y2 */ - ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */ - ENTROPY_CONTEXT_PLANES left_context; /* (up to) 4 contexts "" */ + /* Y,U,V,Y2 */ + ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */ + ENTROPY_CONTEXT_PLANES left_context; /* (up to) 4 contexts "" */ - FRAME_CONTEXT lfc; /* last frame entropy */ - FRAME_CONTEXT fc; /* this frame entropy */ + FRAME_CONTEXT lfc; /* last frame entropy */ + FRAME_CONTEXT fc; /* this frame entropy */ - unsigned int current_video_frame; + unsigned int current_video_frame; - int version; + int version; - TOKEN_PARTITION multi_token_partition; + TOKEN_PARTITION multi_token_partition; #ifdef PACKET_TESTING - VP8_HEADER oh; -#endif -#if CONFIG_POSTPROC_VISUALIZER - double bitrate; - double framerate; + VP8_HEADER oh; #endif #if CONFIG_MULTITHREAD - int processor_core_count; + int processor_core_count; #endif #if CONFIG_POSTPROC - struct postproc_state postproc_state; + struct postproc_state postproc_state; #endif - int cpu_caps; + int cpu_caps; } VP8_COMMON; #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/onyxd.h b/libs/libvpx/vp8/common/onyxd.h index e37b29f32c..e05461aad0 100644 --- a/libs/libvpx/vp8/common/onyxd.h +++ b/libs/libvpx/vp8/common/onyxd.h @@ -8,15 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_ONYXD_H_ #define VP8_COMMON_ONYXD_H_ - /* Create/destroy static data structures. */ #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif #include "vpx_scale/yv12config.h" #include "ppflags.h" @@ -24,40 +21,40 @@ extern "C" #include "vpx/vpx_codec.h" #include "vpx/vp8.h" - struct VP8D_COMP; +struct VP8D_COMP; - typedef struct - { - int Width; - int Height; - int Version; - int postprocess; - int max_threads; - int error_concealment; - } VP8D_CONFIG; +typedef struct { + int Width; + int Height; + int Version; + int postprocess; + int max_threads; + int error_concealment; +} VP8D_CONFIG; - typedef enum - { - VP8D_OK = 0 - } VP8D_SETTING; +typedef enum { VP8D_OK = 0 } VP8D_SETTING; - void vp8dx_initialize(void); +void vp8dx_initialize(void); - void vp8dx_set_setting(struct VP8D_COMP* comp, VP8D_SETTING oxst, int x); +void vp8dx_set_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst, int x); - int vp8dx_get_setting(struct VP8D_COMP* comp, VP8D_SETTING oxst); +int vp8dx_get_setting(struct VP8D_COMP *comp, VP8D_SETTING oxst); - int vp8dx_receive_compressed_data(struct VP8D_COMP* comp, - size_t size, const uint8_t *dest, - int64_t time_stamp); - int vp8dx_get_raw_frame(struct VP8D_COMP* comp, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags); +int vp8dx_receive_compressed_data(struct VP8D_COMP *comp, size_t size, + const uint8_t *dest, int64_t time_stamp); +int vp8dx_get_raw_frame(struct VP8D_COMP *comp, YV12_BUFFER_CONFIG *sd, + int64_t *time_stamp, int64_t *time_end_stamp, + vp8_ppflags_t *flags); - vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd); - vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP* comp, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd); +vpx_codec_err_t vp8dx_get_reference(struct VP8D_COMP *comp, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd); +vpx_codec_err_t vp8dx_set_reference(struct VP8D_COMP *comp, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd); #ifdef __cplusplus } #endif - #endif // VP8_COMMON_ONYXD_H_ diff --git a/libs/libvpx/vp8/common/postproc.c b/libs/libvpx/vp8/common/postproc.c index 322b61383b..8b8c1701a3 100644 --- a/libs/libvpx/vp8/common/postproc.c +++ b/libs/libvpx/vp8/common/postproc.c @@ -8,9 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" +#include "vpx_dsp_rtcd.h" #include "vp8_rtcd.h" +#include "vpx_dsp/postproc.h" #include "vpx_scale_rtcd.h" #include "vpx_scale/yv12config.h" #include "postproc.h" @@ -23,1186 +24,366 @@ #include #include -#define RGB_TO_YUV(t) \ - ( (0.257*(float)(t>>16)) + (0.504*(float)(t>>8&0xff)) + (0.098*(float)(t&0xff)) + 16), \ - (-(0.148*(float)(t>>16)) - (0.291*(float)(t>>8&0xff)) + (0.439*(float)(t&0xff)) + 128), \ - ( (0.439*(float)(t>>16)) - (0.368*(float)(t>>8&0xff)) - (0.071*(float)(t&0xff)) + 128) +/* clang-format off */ +#define RGB_TO_YUV(t) \ + (unsigned char)((0.257 * (float)(t >> 16)) + \ + (0.504 * (float)(t >> 8 & 0xff)) + \ + (0.098 * (float)(t & 0xff)) + 16), \ + (unsigned char)(-(0.148 * (float)(t >> 16)) - \ + (0.291 * (float)(t >> 8 & 0xff)) + \ + (0.439 * (float)(t & 0xff)) + 128), \ + (unsigned char)((0.439 * (float)(t >> 16)) - \ + (0.368 * (float)(t >> 8 & 0xff)) - \ + (0.071 * (float)(t & 0xff)) + 128) +/* clang-format on */ -/* global constants */ -#if CONFIG_POSTPROC_VISUALIZER -static const unsigned char MB_PREDICTION_MODE_colors[MB_MODE_COUNT][3] = -{ - { RGB_TO_YUV(0x98FB98) }, /* PaleGreen */ - { RGB_TO_YUV(0x00FF00) }, /* Green */ - { RGB_TO_YUV(0xADFF2F) }, /* GreenYellow */ - { RGB_TO_YUV(0x228B22) }, /* ForestGreen */ - { RGB_TO_YUV(0x006400) }, /* DarkGreen */ - { RGB_TO_YUV(0x98F5FF) }, /* Cadet Blue */ - { RGB_TO_YUV(0x6CA6CD) }, /* Sky Blue */ - { RGB_TO_YUV(0x00008B) }, /* Dark blue */ - { RGB_TO_YUV(0x551A8B) }, /* Purple */ - { RGB_TO_YUV(0xFF0000) } /* Red */ -}; - -static const unsigned char B_PREDICTION_MODE_colors[B_MODE_COUNT][3] = -{ - { RGB_TO_YUV(0x6633ff) }, /* Purple */ - { RGB_TO_YUV(0xcc33ff) }, /* Magenta */ - { RGB_TO_YUV(0xff33cc) }, /* Pink */ - { RGB_TO_YUV(0xff3366) }, /* Coral */ - { RGB_TO_YUV(0x3366ff) }, /* Blue */ - { RGB_TO_YUV(0xed00f5) }, /* Dark Blue */ - { RGB_TO_YUV(0x2e00b8) }, /* Dark Purple */ - { RGB_TO_YUV(0xff6633) }, /* Orange */ - { RGB_TO_YUV(0x33ccff) }, /* Light Blue */ - { RGB_TO_YUV(0x8ab800) }, /* Green */ - { RGB_TO_YUV(0xffcc33) }, /* Light Orange */ - { RGB_TO_YUV(0x33ffcc) }, /* Aqua */ - { RGB_TO_YUV(0x66ff33) }, /* Light Green */ - { RGB_TO_YUV(0xccff33) }, /* Yellow */ -}; - -static const unsigned char MV_REFERENCE_FRAME_colors[MAX_REF_FRAMES][3] = -{ - { RGB_TO_YUV(0x00ff00) }, /* Blue */ - { RGB_TO_YUV(0x0000ff) }, /* Green */ - { RGB_TO_YUV(0xffff00) }, /* Yellow */ - { RGB_TO_YUV(0xff0000) }, /* Red */ -}; -#endif - -const short vp8_rv[] = -{ - 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, - 0, 3, 9, 0, 0, 0, 8, 3, 14, 4, - 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, - 8, 6, 10, 0, 0, 8, 9, 0, 3, 14, - 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, - 1, 2, 3, 14, 13, 1, 8, 2, 9, 7, - 3, 3, 1, 13, 13, 6, 6, 5, 2, 7, - 11, 9, 11, 8, 7, 3, 2, 0, 13, 13, - 14, 4, 12, 5, 12, 10, 8, 10, 13, 10, - 4, 14, 4, 10, 0, 8, 11, 1, 13, 7, - 7, 14, 6, 14, 13, 2, 13, 5, 4, 4, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 3, 8, 3, 7, 8, 5, 11, 4, 12, 3, - 11, 9, 14, 8, 14, 13, 4, 3, 1, 2, - 14, 6, 5, 4, 4, 11, 4, 6, 2, 1, - 5, 8, 8, 12, 13, 5, 14, 10, 12, 13, - 0, 9, 5, 5, 11, 10, 13, 9, 10, 13, -}; - -extern void vp8_blit_text(const char *msg, unsigned char *address, const int pitch); -extern void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, const int pitch); +extern void vp8_blit_text(const char *msg, unsigned char *address, + const int pitch); +extern void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, + const int pitch); /*********************************************************************************************************** */ -void vp8_post_proc_down_and_across_mb_row_c -( - unsigned char *src_ptr, - unsigned char *dst_ptr, - int src_pixels_per_line, - int dst_pixels_per_line, - int cols, - unsigned char *f, - int size -) -{ - unsigned char *p_src, *p_dst; - int row; - int col; - unsigned char v; - unsigned char d[4]; - - for (row = 0; row < size; row++) - { - /* post_proc_down for one row */ - p_src = src_ptr; - p_dst = dst_ptr; - - for (col = 0; col < cols; col++) - { - unsigned char p_above2 = p_src[col - 2 * src_pixels_per_line]; - unsigned char p_above1 = p_src[col - src_pixels_per_line]; - unsigned char p_below1 = p_src[col + src_pixels_per_line]; - unsigned char p_below2 = p_src[col + 2 * src_pixels_per_line]; - - v = p_src[col]; - - if ((abs(v - p_above2) < f[col]) && (abs(v - p_above1) < f[col]) - && (abs(v - p_below1) < f[col]) && (abs(v - p_below2) < f[col])) - { - unsigned char k1, k2, k3; - k1 = (p_above2 + p_above1 + 1) >> 1; - k2 = (p_below2 + p_below1 + 1) >> 1; - k3 = (k1 + k2 + 1) >> 1; - v = (k3 + v + 1) >> 1; - } - - p_dst[col] = v; - } - - /* now post_proc_across */ - p_src = dst_ptr; - p_dst = dst_ptr; - - p_src[-2] = p_src[-1] = p_src[0]; - p_src[cols] = p_src[cols + 1] = p_src[cols - 1]; - - for (col = 0; col < cols; col++) - { - v = p_src[col]; - - if ((abs(v - p_src[col - 2]) < f[col]) - && (abs(v - p_src[col - 1]) < f[col]) - && (abs(v - p_src[col + 1]) < f[col]) - && (abs(v - p_src[col + 2]) < f[col])) - { - unsigned char k1, k2, k3; - k1 = (p_src[col - 2] + p_src[col - 1] + 1) >> 1; - k2 = (p_src[col + 2] + p_src[col + 1] + 1) >> 1; - k3 = (k1 + k2 + 1) >> 1; - v = (k3 + v + 1) >> 1; - } - - d[col & 3] = v; - - if (col >= 2) - p_dst[col - 2] = d[(col - 2) & 3]; - } - - /* handle the last two pixels */ - p_dst[col - 2] = d[(col - 2) & 3]; - p_dst[col - 1] = d[(col - 1) & 3]; - - /* next row */ - src_ptr += src_pixels_per_line; - dst_ptr += dst_pixels_per_line; - } -} - -static int q2mbl(int x) -{ - if (x < 20) x = 20; - - x = 50 + (x - 50) * 10 / 8; - return x * x / 3; -} - -void vp8_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, int cols, int flimit) -{ - int r, c, i; - - unsigned char *s = src; - unsigned char d[16]; - - for (r = 0; r < rows; r++) - { - int sumsq = 0; - int sum = 0; - - for (i = -8; i < 0; i++) - s[i]=s[0]; - - /* 17 avoids valgrind warning - we buffer values in c in d - * and only write them when we've read 8 ahead... - */ - for (i = 0; i < 17; i++) - s[i+cols]=s[cols-1]; - - for (i = -8; i <= 6; i++) - { - sumsq += s[i] * s[i]; - sum += s[i]; - d[i+8] = 0; - } - - for (c = 0; c < cols + 8; c++) - { - int x = s[c+7] - s[c-8]; - int y = s[c+7] + s[c-8]; - - sum += x; - sumsq += x * y; - - d[c&15] = s[c]; - - if (sumsq * 15 - sum * sum < flimit) - { - d[c&15] = (8 + sum + s[c]) >> 4; - } - - s[c-8] = d[(c-8)&15]; - } - - s += pitch; - } -} - -void vp8_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, int flimit) -{ - int r, c, i; - const short *rv3 = &vp8_rv[63&rand()]; - - for (c = 0; c < cols; c++ ) - { - unsigned char *s = &dst[c]; - int sumsq = 0; - int sum = 0; - unsigned char d[16]; - const short *rv2 = rv3 + ((c * 17) & 127); - - for (i = -8; i < 0; i++) - s[i*pitch]=s[0]; - - /* 17 avoids valgrind warning - we buffer values in c in d - * and only write them when we've read 8 ahead... - */ - for (i = 0; i < 17; i++) - s[(i+rows)*pitch]=s[(rows-1)*pitch]; - - for (i = -8; i <= 6; i++) - { - sumsq += s[i*pitch] * s[i*pitch]; - sum += s[i*pitch]; - } - - for (r = 0; r < rows + 8; r++) - { - sumsq += s[7*pitch] * s[ 7*pitch] - s[-8*pitch] * s[-8*pitch]; - sum += s[7*pitch] - s[-8*pitch]; - d[r&15] = s[0]; - - if (sumsq * 15 - sum * sum < flimit) - { - d[r&15] = (rv2[r&127] + sum + s[0]) >> 4; - } - if (r >= 8) - s[-8*pitch] = d[(r-8)&15]; - s += pitch; - } - } -} - #if CONFIG_POSTPROC -static void vp8_de_mblock(YV12_BUFFER_CONFIG *post, - int q) -{ - vp8_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - vp8_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); +static int q2mbl(int x) { + if (x < 20) x = 20; + + x = 50 + (x - 50) * 10 / 8; + return x * x / 3; } -void vp8_deblock(VP8_COMMON *cm, - YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag) -{ - double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; - int ppl = (int)(level + .5); - - const MODE_INFO *mode_info_context = cm->show_frame_mi; - int mbr, mbc; - - /* The pixel thresholds are adjusted according to if or not the macroblock - * is a skipped block. */ - unsigned char *ylimits = cm->pp_limits_buffer; - unsigned char *uvlimits = cm->pp_limits_buffer + 16 * cm->mb_cols; - (void) low_var_thresh; - (void) flag; - - if (ppl > 0) - { - for (mbr = 0; mbr < cm->mb_rows; mbr++) - { - unsigned char *ylptr = ylimits; - unsigned char *uvlptr = uvlimits; - for (mbc = 0; mbc < cm->mb_cols; mbc++) - { - unsigned char mb_ppl; - - if (mode_info_context->mbmi.mb_skip_coeff) - mb_ppl = (unsigned char)ppl >> 1; - else - mb_ppl = (unsigned char)ppl; - - memset(ylptr, mb_ppl, 16); - memset(uvlptr, mb_ppl, 8); - - ylptr += 16; - uvlptr += 8; - mode_info_context++; - } - mode_info_context++; - - vp8_post_proc_down_and_across_mb_row( - source->y_buffer + 16 * mbr * source->y_stride, - post->y_buffer + 16 * mbr * post->y_stride, source->y_stride, - post->y_stride, source->y_width, ylimits, 16); - - vp8_post_proc_down_and_across_mb_row( - source->u_buffer + 8 * mbr * source->uv_stride, - post->u_buffer + 8 * mbr * post->uv_stride, source->uv_stride, - post->uv_stride, source->uv_width, uvlimits, 8); - vp8_post_proc_down_and_across_mb_row( - source->v_buffer + 8 * mbr * source->uv_stride, - post->v_buffer + 8 * mbr * post->uv_stride, source->uv_stride, - post->uv_stride, source->uv_width, uvlimits, 8); - } - } else - { - vp8_yv12_copy_frame(source, post); - } -} -#endif - -void vp8_de_noise(VP8_COMMON *cm, - YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag, - int uvfilter) -{ - int mbr; - double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; - int ppl = (int)(level + .5); - int mb_rows = cm->mb_rows; - int mb_cols = cm->mb_cols; - unsigned char *limits = cm->pp_limits_buffer;; - (void) post; - (void) low_var_thresh; - (void) flag; - - memset(limits, (unsigned char)ppl, 16 * mb_cols); - - /* TODO: The original code don't filter the 2 outer rows and columns. */ - for (mbr = 0; mbr < mb_rows; mbr++) - { - vp8_post_proc_down_and_across_mb_row( - source->y_buffer + 16 * mbr * source->y_stride, - source->y_buffer + 16 * mbr * source->y_stride, - source->y_stride, source->y_stride, source->y_width, limits, 16); - if (uvfilter == 1) { - vp8_post_proc_down_and_across_mb_row( - source->u_buffer + 8 * mbr * source->uv_stride, - source->u_buffer + 8 * mbr * source->uv_stride, - source->uv_stride, source->uv_stride, source->uv_width, limits, - 8); - vp8_post_proc_down_and_across_mb_row( - source->v_buffer + 8 * mbr * source->uv_stride, - source->v_buffer + 8 * mbr * source->uv_stride, - source->uv_stride, source->uv_stride, source->uv_width, limits, - 8); - } - } +static void vp8_de_mblock(YV12_BUFFER_CONFIG *post, int q) { + vpx_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, + post->y_width, q2mbl(q)); + vpx_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, + post->y_width, q2mbl(q)); } -static double gaussian(double sigma, double mu, double x) -{ - return 1 / (sigma * sqrt(2.0 * 3.14159265)) * - (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))); -} +void vp8_deblock(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, + int flag) { + double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; + int ppl = (int)(level + .5); -static void fillrd(struct postproc_state *state, int q, int a) -{ - char char_dist[300]; + const MODE_INFO *mode_info_context = cm->show_frame_mi; + int mbr, mbc; - double sigma; - int i; + /* The pixel thresholds are adjusted according to if or not the macroblock + * is a skipped block. */ + unsigned char *ylimits = cm->pp_limits_buffer; + unsigned char *uvlimits = cm->pp_limits_buffer + 16 * cm->mb_cols; + (void)low_var_thresh; + (void)flag; - vp8_clear_system_state(); - - - sigma = a + .5 + .6 * (63 - q) / 63.0; - - /* set up a lookup table of 256 entries that matches - * a gaussian distribution with sigma determined by q. - */ - { - int next, j; - - next = 0; - - for (i = -32; i < 32; i++) - { - const int v = (int)(.5 + 256 * gaussian(sigma, 0, i)); - - if (v) - { - for (j = 0; j < v; j++) - { - char_dist[next+j] = (char) i; - } - - next = next + j; - } + if (ppl > 0) { + for (mbr = 0; mbr < cm->mb_rows; ++mbr) { + unsigned char *ylptr = ylimits; + unsigned char *uvlptr = uvlimits; + for (mbc = 0; mbc < cm->mb_cols; ++mbc) { + unsigned char mb_ppl; + if (mode_info_context->mbmi.mb_skip_coeff) { + mb_ppl = (unsigned char)ppl >> 1; + } else { + mb_ppl = (unsigned char)ppl; } - for (; next < 256; next++) - char_dist[next] = 0; + memset(ylptr, mb_ppl, 16); + memset(uvlptr, mb_ppl, 8); + ylptr += 16; + uvlptr += 8; + mode_info_context++; + } + mode_info_context++; + + vpx_post_proc_down_and_across_mb_row( + source->y_buffer + 16 * mbr * source->y_stride, + post->y_buffer + 16 * mbr * post->y_stride, source->y_stride, + post->y_stride, source->y_width, ylimits, 16); + + vpx_post_proc_down_and_across_mb_row( + source->u_buffer + 8 * mbr * source->uv_stride, + post->u_buffer + 8 * mbr * post->uv_stride, source->uv_stride, + post->uv_stride, source->uv_width, uvlimits, 8); + vpx_post_proc_down_and_across_mb_row( + source->v_buffer + 8 * mbr * source->uv_stride, + post->v_buffer + 8 * mbr * post->uv_stride, source->uv_stride, + post->uv_stride, source->uv_width, uvlimits, 8); } - - for (i = 0; i < 3072; i++) - { - state->noise[i] = char_dist[rand() & 0xff]; - } - - for (i = 0; i < 16; i++) - { - state->blackclamp[i] = -char_dist[0]; - state->whiteclamp[i] = -char_dist[0]; - state->bothclamp[i] = -2 * char_dist[0]; - } - - state->last_q = q; - state->last_noise = a; + } else { + vp8_yv12_copy_frame(source, post); + } } -/**************************************************************************** - * - * ROUTINE : plane_add_noise_c - * - * INPUTS : unsigned char *Start starting address of buffer to add gaussian - * noise to - * unsigned int Width width of plane - * unsigned int Height height of plane - * int Pitch distance between subsequent lines of frame - * int q quantizer used to determine amount of noise - * to add - * - * OUTPUTS : None. - * - * RETURNS : void. - * - * FUNCTION : adds gaussian noise to a plane of pixels - * - * SPECIAL NOTES : None. - * - ****************************************************************************/ -void vp8_plane_add_noise_c(unsigned char *Start, char *noise, - char blackclamp[16], - char whiteclamp[16], - char bothclamp[16], - unsigned int Width, unsigned int Height, int Pitch) -{ - unsigned int i, j; - (void)bothclamp; +void vp8_de_noise(VP8_COMMON *cm, YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag, + int uvfilter) { + int mbr; + double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; + int ppl = (int)(level + .5); + int mb_rows = cm->mb_rows; + int mb_cols = cm->mb_cols; + unsigned char *limits = cm->pp_limits_buffer; + (void)post; + (void)low_var_thresh; + (void)flag; - for (i = 0; i < Height; i++) - { - unsigned char *Pos = Start + i * Pitch; - char *Ref = (char *)(noise + (rand() & 0xff)); + memset(limits, (unsigned char)ppl, 16 * mb_cols); - for (j = 0; j < Width; j++) - { - if (Pos[j] < blackclamp[0]) - Pos[j] = blackclamp[0]; - - if (Pos[j] > 255 + whiteclamp[0]) - Pos[j] = 255 + whiteclamp[0]; - - Pos[j] += Ref[j]; - } + /* TODO: The original code don't filter the 2 outer rows and columns. */ + for (mbr = 0; mbr < mb_rows; ++mbr) { + vpx_post_proc_down_and_across_mb_row( + source->y_buffer + 16 * mbr * source->y_stride, + source->y_buffer + 16 * mbr * source->y_stride, source->y_stride, + source->y_stride, source->y_width, limits, 16); + if (uvfilter == 1) { + vpx_post_proc_down_and_across_mb_row( + source->u_buffer + 8 * mbr * source->uv_stride, + source->u_buffer + 8 * mbr * source->uv_stride, source->uv_stride, + source->uv_stride, source->uv_width, limits, 8); + vpx_post_proc_down_and_across_mb_row( + source->v_buffer + 8 * mbr * source->uv_stride, + source->v_buffer + 8 * mbr * source->uv_stride, source->uv_stride, + source->uv_stride, source->uv_width, limits, 8); } + } } +#endif // CONFIG_POSTPROC /* Blend the macro block with a solid colored square. Leave the * edges unblended to give distinction to macro blocks in areas * filled with the same color block. */ -void vp8_blend_mb_inner_c (unsigned char *y, unsigned char *u, unsigned char *v, - int y_1, int u_1, int v_1, int alpha, int stride) -{ - int i, j; - int y1_const = y_1*((1<<16)-alpha); - int u1_const = u_1*((1<<16)-alpha); - int v1_const = v_1*((1<<16)-alpha); +void vp8_blend_mb_inner_c(unsigned char *y, unsigned char *u, unsigned char *v, + int y_1, int u_1, int v_1, int alpha, int stride) { + int i, j; + int y1_const = y_1 * ((1 << 16) - alpha); + int u1_const = u_1 * ((1 << 16) - alpha); + int v1_const = v_1 * ((1 << 16) - alpha); - y += 2*stride + 2; - for (i = 0; i < 12; i++) - { - for (j = 0; j < 12; j++) - { - y[j] = (y[j]*alpha + y1_const)>>16; - } - y += stride; + y += 2 * stride + 2; + for (i = 0; i < 12; ++i) { + for (j = 0; j < 12; ++j) { + y[j] = (y[j] * alpha + y1_const) >> 16; } + y += stride; + } - stride >>= 1; + stride >>= 1; - u += stride + 1; - v += stride + 1; + u += stride + 1; + v += stride + 1; - for (i = 0; i < 6; i++) - { - for (j = 0; j < 6; j++) - { - u[j] = (u[j]*alpha + u1_const)>>16; - v[j] = (v[j]*alpha + v1_const)>>16; - } - u += stride; - v += stride; + for (i = 0; i < 6; ++i) { + for (j = 0; j < 6; ++j) { + u[j] = (u[j] * alpha + u1_const) >> 16; + v[j] = (v[j] * alpha + v1_const) >> 16; } + u += stride; + v += stride; + } } /* Blend only the edge of the macro block. Leave center * unblended to allow for other visualizations to be layered. */ -void vp8_blend_mb_outer_c (unsigned char *y, unsigned char *u, unsigned char *v, - int y_1, int u_1, int v_1, int alpha, int stride) -{ - int i, j; - int y1_const = y_1*((1<<16)-alpha); - int u1_const = u_1*((1<<16)-alpha); - int v1_const = v_1*((1<<16)-alpha); +void vp8_blend_mb_outer_c(unsigned char *y, unsigned char *u, unsigned char *v, + int y_1, int u_1, int v_1, int alpha, int stride) { + int i, j; + int y1_const = y_1 * ((1 << 16) - alpha); + int u1_const = u_1 * ((1 << 16) - alpha); + int v1_const = v_1 * ((1 << 16) - alpha); - for (i = 0; i < 2; i++) - { - for (j = 0; j < 16; j++) - { - y[j] = (y[j]*alpha + y1_const)>>16; - } - y += stride; + for (i = 0; i < 2; ++i) { + for (j = 0; j < 16; ++j) { + y[j] = (y[j] * alpha + y1_const) >> 16; } + y += stride; + } - for (i = 0; i < 12; i++) - { - y[0] = (y[0]*alpha + y1_const)>>16; - y[1] = (y[1]*alpha + y1_const)>>16; - y[14] = (y[14]*alpha + y1_const)>>16; - y[15] = (y[15]*alpha + y1_const)>>16; - y += stride; + for (i = 0; i < 12; ++i) { + y[0] = (y[0] * alpha + y1_const) >> 16; + y[1] = (y[1] * alpha + y1_const) >> 16; + y[14] = (y[14] * alpha + y1_const) >> 16; + y[15] = (y[15] * alpha + y1_const) >> 16; + y += stride; + } + + for (i = 0; i < 2; ++i) { + for (j = 0; j < 16; ++j) { + y[j] = (y[j] * alpha + y1_const) >> 16; } + y += stride; + } - for (i = 0; i < 2; i++) - { - for (j = 0; j < 16; j++) - { - y[j] = (y[j]*alpha + y1_const)>>16; - } - y += stride; + stride >>= 1; + + for (j = 0; j < 8; ++j) { + u[j] = (u[j] * alpha + u1_const) >> 16; + v[j] = (v[j] * alpha + v1_const) >> 16; + } + u += stride; + v += stride; + + for (i = 0; i < 6; ++i) { + u[0] = (u[0] * alpha + u1_const) >> 16; + v[0] = (v[0] * alpha + v1_const) >> 16; + + u[7] = (u[7] * alpha + u1_const) >> 16; + v[7] = (v[7] * alpha + v1_const) >> 16; + + u += stride; + v += stride; + } + + for (j = 0; j < 8; ++j) { + u[j] = (u[j] * alpha + u1_const) >> 16; + v[j] = (v[j] * alpha + v1_const) >> 16; + } +} + +void vp8_blend_b_c(unsigned char *y, unsigned char *u, unsigned char *v, + int y_1, int u_1, int v_1, int alpha, int stride) { + int i, j; + int y1_const = y_1 * ((1 << 16) - alpha); + int u1_const = u_1 * ((1 << 16) - alpha); + int v1_const = v_1 * ((1 << 16) - alpha); + + for (i = 0; i < 4; ++i) { + for (j = 0; j < 4; ++j) { + y[j] = (y[j] * alpha + y1_const) >> 16; } + y += stride; + } - stride >>= 1; + stride >>= 1; - for (j = 0; j < 8; j++) - { - u[j] = (u[j]*alpha + u1_const)>>16; - v[j] = (v[j]*alpha + v1_const)>>16; + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + u[j] = (u[j] * alpha + u1_const) >> 16; + v[j] = (v[j] * alpha + v1_const) >> 16; } u += stride; v += stride; - - for (i = 0; i < 6; i++) - { - u[0] = (u[0]*alpha + u1_const)>>16; - v[0] = (v[0]*alpha + v1_const)>>16; - - u[7] = (u[7]*alpha + u1_const)>>16; - v[7] = (v[7]*alpha + v1_const)>>16; - - u += stride; - v += stride; - } - - for (j = 0; j < 8; j++) - { - u[j] = (u[j]*alpha + u1_const)>>16; - v[j] = (v[j]*alpha + v1_const)>>16; - } + } } -void vp8_blend_b_c (unsigned char *y, unsigned char *u, unsigned char *v, - int y_1, int u_1, int v_1, int alpha, int stride) -{ - int i, j; - int y1_const = y_1*((1<<16)-alpha); - int u1_const = u_1*((1<<16)-alpha); - int v1_const = v_1*((1<<16)-alpha); - - for (i = 0; i < 4; i++) - { - for (j = 0; j < 4; j++) - { - y[j] = (y[j]*alpha + y1_const)>>16; - } - y += stride; - } - - stride >>= 1; - - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - u[j] = (u[j]*alpha + u1_const)>>16; - v[j] = (v[j]*alpha + v1_const)>>16; - } - u += stride; - v += stride; - } -} - -#if CONFIG_POSTPROC_VISUALIZER -static void constrain_line (int x_0, int *x_1, int y_0, int *y_1, int width, int height) -{ - int dx; - int dy; - - if (*x_1 > width) - { - dx = *x_1 - x_0; - dy = *y_1 - y_0; - - *x_1 = width; - if (dx) - *y_1 = ((width-x_0)*dy)/dx + y_0; - } - if (*x_1 < 0) - { - dx = *x_1 - x_0; - dy = *y_1 - y_0; - - *x_1 = 0; - if (dx) - *y_1 = ((0-x_0)*dy)/dx + y_0; - } - if (*y_1 > height) - { - dx = *x_1 - x_0; - dy = *y_1 - y_0; - - *y_1 = height; - if (dy) - *x_1 = ((height-y_0)*dx)/dy + x_0; - } - if (*y_1 < 0) - { - dx = *x_1 - x_0; - dy = *y_1 - y_0; - - *y_1 = 0; - if (dy) - *x_1 = ((0-y_0)*dx)/dy + x_0; - } -} -#endif // CONFIG_POSTPROC_VISUALIZER - #if CONFIG_POSTPROC -int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *ppflags) -{ - int q = oci->filter_level * 10 / 6; - int flags = ppflags->post_proc_flag; - int deblock_level = ppflags->deblocking_level; - int noise_level = ppflags->noise_level; +int vp8_post_proc_frame(VP8_COMMON *oci, YV12_BUFFER_CONFIG *dest, + vp8_ppflags_t *ppflags) { + int q = oci->filter_level * 10 / 6; + int flags = ppflags->post_proc_flag; + int deblock_level = ppflags->deblocking_level; + int noise_level = ppflags->noise_level; - if (!oci->frame_to_show) - return -1; + if (!oci->frame_to_show) return -1; - if (q > 63) - q = 63; + if (q > 63) q = 63; - if (!flags) - { - *dest = *oci->frame_to_show; - - /* handle problem with extending borders */ - dest->y_width = oci->Width; - dest->y_height = oci->Height; - dest->uv_height = dest->y_height / 2; - oci->postproc_state.last_base_qindex = oci->base_qindex; - oci->postproc_state.last_frame_valid = 1; - return 0; - } - - /* Allocate post_proc_buffer_int if needed */ - if ((flags & VP8D_MFQE) && !oci->post_proc_buffer_int_used) - { - if ((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) - { - int width = (oci->Width + 15) & ~15; - int height = (oci->Height + 15) & ~15; - - if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer_int, - width, height, VP8BORDERINPIXELS)) - vpx_internal_error(&oci->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate MFQE framebuffer"); - - oci->post_proc_buffer_int_used = 1; - - /* insure that postproc is set to all 0's so that post proc - * doesn't pull random data in from edge - */ - memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size); - - } - } - - vp8_clear_system_state(); - - if ((flags & VP8D_MFQE) && - oci->postproc_state.last_frame_valid && - oci->current_video_frame >= 2 && - oci->postproc_state.last_base_qindex < 60 && - oci->base_qindex - oci->postproc_state.last_base_qindex >= 20) - { - vp8_multiframe_quality_enhance(oci); - if (((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) && - oci->post_proc_buffer_int_used) - { - vp8_yv12_copy_frame(&oci->post_proc_buffer, &oci->post_proc_buffer_int); - if (flags & VP8D_DEMACROBLOCK) - { - vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer, - q + (deblock_level - 5) * 10, 1, 0); - vp8_de_mblock(&oci->post_proc_buffer, - q + (deblock_level - 5) * 10); - } - else if (flags & VP8D_DEBLOCK) - { - vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer, - q, 1, 0); - } - } - /* Move partially towards the base q of the previous frame */ - oci->postproc_state.last_base_qindex = (3*oci->postproc_state.last_base_qindex + oci->base_qindex)>>2; - } - else if (flags & VP8D_DEMACROBLOCK) - { - vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer, - q + (deblock_level - 5) * 10, 1, 0); - vp8_de_mblock(&oci->post_proc_buffer, q + (deblock_level - 5) * 10); - - oci->postproc_state.last_base_qindex = oci->base_qindex; - } - else if (flags & VP8D_DEBLOCK) - { - vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer, - q, 1, 0); - oci->postproc_state.last_base_qindex = oci->base_qindex; - } - else - { - vp8_yv12_copy_frame(oci->frame_to_show, &oci->post_proc_buffer); - oci->postproc_state.last_base_qindex = oci->base_qindex; - } - oci->postproc_state.last_frame_valid = 1; - - if (flags & VP8D_ADDNOISE) - { - if (oci->postproc_state.last_q != q - || oci->postproc_state.last_noise != noise_level) - { - fillrd(&oci->postproc_state, 63 - q, noise_level); - } - - vp8_plane_add_noise - (oci->post_proc_buffer.y_buffer, - oci->postproc_state.noise, - oci->postproc_state.blackclamp, - oci->postproc_state.whiteclamp, - oci->postproc_state.bothclamp, - oci->post_proc_buffer.y_width, oci->post_proc_buffer.y_height, - oci->post_proc_buffer.y_stride); - } - -#if CONFIG_POSTPROC_VISUALIZER - if (flags & VP8D_DEBUG_TXT_FRAME_INFO) - { - char message[512]; - sprintf(message, "F%1dG%1dQ%3dF%3dP%d_s%dx%d", - (oci->frame_type == KEY_FRAME), - oci->refresh_golden_frame, - oci->base_qindex, - oci->filter_level, - flags, - oci->mb_cols, oci->mb_rows); - vp8_blit_text(message, oci->post_proc_buffer.y_buffer, oci->post_proc_buffer.y_stride); - } - - if (flags & VP8D_DEBUG_TXT_MBLK_MODES) - { - int i, j; - unsigned char *y_ptr; - YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer; - int mb_rows = post->y_height >> 4; - int mb_cols = post->y_width >> 4; - int mb_index = 0; - MODE_INFO *mi = oci->mi; - - y_ptr = post->y_buffer + 4 * post->y_stride + 4; - - /* vp8_filter each macro block */ - for (i = 0; i < mb_rows; i++) - { - for (j = 0; j < mb_cols; j++) - { - char zz[4]; - - sprintf(zz, "%c", mi[mb_index].mbmi.mode + 'a'); - - vp8_blit_text(zz, y_ptr, post->y_stride); - mb_index ++; - y_ptr += 16; - } - - mb_index ++; /* border */ - y_ptr += post->y_stride * 16 - post->y_width; - - } - } - - if (flags & VP8D_DEBUG_TXT_DC_DIFF) - { - int i, j; - unsigned char *y_ptr; - YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer; - int mb_rows = post->y_height >> 4; - int mb_cols = post->y_width >> 4; - int mb_index = 0; - MODE_INFO *mi = oci->mi; - - y_ptr = post->y_buffer + 4 * post->y_stride + 4; - - /* vp8_filter each macro block */ - for (i = 0; i < mb_rows; i++) - { - for (j = 0; j < mb_cols; j++) - { - char zz[4]; - int dc_diff = !(mi[mb_index].mbmi.mode != B_PRED && - mi[mb_index].mbmi.mode != SPLITMV && - mi[mb_index].mbmi.mb_skip_coeff); - - if (oci->frame_type == KEY_FRAME) - sprintf(zz, "a"); - else - sprintf(zz, "%c", dc_diff + '0'); - - vp8_blit_text(zz, y_ptr, post->y_stride); - mb_index ++; - y_ptr += 16; - } - - mb_index ++; /* border */ - y_ptr += post->y_stride * 16 - post->y_width; - - } - } - - if (flags & VP8D_DEBUG_TXT_RATE_INFO) - { - char message[512]; - sprintf(message, "Bitrate: %10.2f framerate: %10.2f ", oci->bitrate, oci->framerate); - vp8_blit_text(message, oci->post_proc_buffer.y_buffer, oci->post_proc_buffer.y_stride); - } - - /* Draw motion vectors */ - if ((flags & VP8D_DEBUG_DRAW_MV) && ppflags->display_mv_flag) - { - YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - unsigned char *y_buffer = oci->post_proc_buffer.y_buffer; - int y_stride = oci->post_proc_buffer.y_stride; - MODE_INFO *mi = oci->mi; - int x0, y0; - - for (y0 = 0; y0 < height; y0 += 16) - { - for (x0 = 0; x0 < width; x0 += 16) - { - int x1, y1; - - if (!(ppflags->display_mv_flag & (1<mbmi.mode))) - { - mi++; - continue; - } - - if (mi->mbmi.mode == SPLITMV) - { - switch (mi->mbmi.partitioning) - { - case 0 : /* mv_top_bottom */ - { - union b_mode_info *bmi = &mi->bmi[0]; - MV *mv = &bmi->mv.as_mv; - - x1 = x0 + 8 + (mv->col >> 3); - y1 = y0 + 4 + (mv->row >> 3); - - constrain_line (x0+8, &x1, y0+4, &y1, width, height); - vp8_blit_line (x0+8, x1, y0+4, y1, y_buffer, y_stride); - - bmi = &mi->bmi[8]; - - x1 = x0 + 8 + (mv->col >> 3); - y1 = y0 +12 + (mv->row >> 3); - - constrain_line (x0+8, &x1, y0+12, &y1, width, height); - vp8_blit_line (x0+8, x1, y0+12, y1, y_buffer, y_stride); - - break; - } - case 1 : /* mv_left_right */ - { - union b_mode_info *bmi = &mi->bmi[0]; - MV *mv = &bmi->mv.as_mv; - - x1 = x0 + 4 + (mv->col >> 3); - y1 = y0 + 8 + (mv->row >> 3); - - constrain_line (x0+4, &x1, y0+8, &y1, width, height); - vp8_blit_line (x0+4, x1, y0+8, y1, y_buffer, y_stride); - - bmi = &mi->bmi[2]; - - x1 = x0 +12 + (mv->col >> 3); - y1 = y0 + 8 + (mv->row >> 3); - - constrain_line (x0+12, &x1, y0+8, &y1, width, height); - vp8_blit_line (x0+12, x1, y0+8, y1, y_buffer, y_stride); - - break; - } - case 2 : /* mv_quarters */ - { - union b_mode_info *bmi = &mi->bmi[0]; - MV *mv = &bmi->mv.as_mv; - - x1 = x0 + 4 + (mv->col >> 3); - y1 = y0 + 4 + (mv->row >> 3); - - constrain_line (x0+4, &x1, y0+4, &y1, width, height); - vp8_blit_line (x0+4, x1, y0+4, y1, y_buffer, y_stride); - - bmi = &mi->bmi[2]; - - x1 = x0 +12 + (mv->col >> 3); - y1 = y0 + 4 + (mv->row >> 3); - - constrain_line (x0+12, &x1, y0+4, &y1, width, height); - vp8_blit_line (x0+12, x1, y0+4, y1, y_buffer, y_stride); - - bmi = &mi->bmi[8]; - - x1 = x0 + 4 + (mv->col >> 3); - y1 = y0 +12 + (mv->row >> 3); - - constrain_line (x0+4, &x1, y0+12, &y1, width, height); - vp8_blit_line (x0+4, x1, y0+12, y1, y_buffer, y_stride); - - bmi = &mi->bmi[10]; - - x1 = x0 +12 + (mv->col >> 3); - y1 = y0 +12 + (mv->row >> 3); - - constrain_line (x0+12, &x1, y0+12, &y1, width, height); - vp8_blit_line (x0+12, x1, y0+12, y1, y_buffer, y_stride); - break; - } - default : - { - union b_mode_info *bmi = mi->bmi; - int bx0, by0; - - for (by0 = y0; by0 < (y0+16); by0 += 4) - { - for (bx0 = x0; bx0 < (x0+16); bx0 += 4) - { - MV *mv = &bmi->mv.as_mv; - - x1 = bx0 + 2 + (mv->col >> 3); - y1 = by0 + 2 + (mv->row >> 3); - - constrain_line (bx0+2, &x1, by0+2, &y1, width, height); - vp8_blit_line (bx0+2, x1, by0+2, y1, y_buffer, y_stride); - - bmi++; - } - } - } - } - } - else if (mi->mbmi.mode >= NEARESTMV) - { - MV *mv = &mi->mbmi.mv.as_mv; - const int lx0 = x0 + 8; - const int ly0 = y0 + 8; - - x1 = lx0 + (mv->col >> 3); - y1 = ly0 + (mv->row >> 3); - - if (x1 != lx0 && y1 != ly0) - { - constrain_line (lx0, &x1, ly0-1, &y1, width, height); - vp8_blit_line (lx0, x1, ly0-1, y1, y_buffer, y_stride); - - constrain_line (lx0, &x1, ly0+1, &y1, width, height); - vp8_blit_line (lx0, x1, ly0+1, y1, y_buffer, y_stride); - } - else - vp8_blit_line (lx0, x1, ly0, y1, y_buffer, y_stride); - } - - mi++; - } - mi++; - } - } - - /* Color in block modes */ - if ((flags & VP8D_DEBUG_CLR_BLK_MODES) - && (ppflags->display_mb_modes_flag || ppflags->display_b_modes_flag)) - { - int y, x; - YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - unsigned char *y_ptr = oci->post_proc_buffer.y_buffer; - unsigned char *u_ptr = oci->post_proc_buffer.u_buffer; - unsigned char *v_ptr = oci->post_proc_buffer.v_buffer; - int y_stride = oci->post_proc_buffer.y_stride; - MODE_INFO *mi = oci->mi; - - for (y = 0; y < height; y += 16) - { - for (x = 0; x < width; x += 16) - { - int Y = 0, U = 0, V = 0; - - if (mi->mbmi.mode == B_PRED && - ((ppflags->display_mb_modes_flag & B_PRED) || ppflags->display_b_modes_flag)) - { - int by, bx; - unsigned char *yl, *ul, *vl; - union b_mode_info *bmi = mi->bmi; - - yl = y_ptr + x; - ul = u_ptr + (x>>1); - vl = v_ptr + (x>>1); - - for (by = 0; by < 16; by += 4) - { - for (bx = 0; bx < 16; bx += 4) - { - if ((ppflags->display_b_modes_flag & (1<mbmi.mode)) - || (ppflags->display_mb_modes_flag & B_PRED)) - { - Y = B_PREDICTION_MODE_colors[bmi->as_mode][0]; - U = B_PREDICTION_MODE_colors[bmi->as_mode][1]; - V = B_PREDICTION_MODE_colors[bmi->as_mode][2]; - - vp8_blend_b - (yl+bx, ul+(bx>>1), vl+(bx>>1), Y, U, V, 0xc000, y_stride); - } - bmi++; - } - - yl += y_stride*4; - ul += y_stride*1; - vl += y_stride*1; - } - } - else if (ppflags->display_mb_modes_flag & (1<mbmi.mode)) - { - Y = MB_PREDICTION_MODE_colors[mi->mbmi.mode][0]; - U = MB_PREDICTION_MODE_colors[mi->mbmi.mode][1]; - V = MB_PREDICTION_MODE_colors[mi->mbmi.mode][2]; - - vp8_blend_mb_inner - (y_ptr+x, u_ptr+(x>>1), v_ptr+(x>>1), Y, U, V, 0xc000, y_stride); - } - - mi++; - } - y_ptr += y_stride*16; - u_ptr += y_stride*4; - v_ptr += y_stride*4; - - mi++; - } - } - - /* Color in frame reference blocks */ - if ((flags & VP8D_DEBUG_CLR_FRM_REF_BLKS) && ppflags->display_ref_frame_flag) - { - int y, x; - YV12_BUFFER_CONFIG *post = &oci->post_proc_buffer; - int width = post->y_width; - int height = post->y_height; - unsigned char *y_ptr = oci->post_proc_buffer.y_buffer; - unsigned char *u_ptr = oci->post_proc_buffer.u_buffer; - unsigned char *v_ptr = oci->post_proc_buffer.v_buffer; - int y_stride = oci->post_proc_buffer.y_stride; - MODE_INFO *mi = oci->mi; - - for (y = 0; y < height; y += 16) - { - for (x = 0; x < width; x +=16) - { - int Y = 0, U = 0, V = 0; - - if (ppflags->display_ref_frame_flag & (1<mbmi.ref_frame)) - { - Y = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][0]; - U = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][1]; - V = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][2]; - - vp8_blend_mb_outer - (y_ptr+x, u_ptr+(x>>1), v_ptr+(x>>1), Y, U, V, 0xc000, y_stride); - } - - mi++; - } - y_ptr += y_stride*16; - u_ptr += y_stride*4; - v_ptr += y_stride*4; - - mi++; - } - } -#endif - - *dest = oci->post_proc_buffer; + if (!flags) { + *dest = *oci->frame_to_show; /* handle problem with extending borders */ dest->y_width = oci->Width; dest->y_height = oci->Height; dest->uv_height = dest->y_height / 2; + oci->postproc_state.last_base_qindex = oci->base_qindex; + oci->postproc_state.last_frame_valid = 1; return 0; + } + if (flags & VP8D_ADDNOISE) { + if (!oci->postproc_state.generated_noise) { + oci->postproc_state.generated_noise = vpx_calloc( + oci->Width + 256, sizeof(*oci->postproc_state.generated_noise)); + if (!oci->postproc_state.generated_noise) return 1; + } + } + + /* Allocate post_proc_buffer_int if needed */ + if ((flags & VP8D_MFQE) && !oci->post_proc_buffer_int_used) { + if ((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) { + int width = (oci->Width + 15) & ~15; + int height = (oci->Height + 15) & ~15; + + if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer_int, width, height, + VP8BORDERINPIXELS)) { + vpx_internal_error(&oci->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate MFQE framebuffer"); + } + + oci->post_proc_buffer_int_used = 1; + + /* insure that postproc is set to all 0's so that post proc + * doesn't pull random data in from edge + */ + memset((&oci->post_proc_buffer_int)->buffer_alloc, 128, + (&oci->post_proc_buffer)->frame_size); + } + } + + vp8_clear_system_state(); + + if ((flags & VP8D_MFQE) && oci->postproc_state.last_frame_valid && + oci->current_video_frame >= 2 && + oci->postproc_state.last_base_qindex < 60 && + oci->base_qindex - oci->postproc_state.last_base_qindex >= 20) { + vp8_multiframe_quality_enhance(oci); + if (((flags & VP8D_DEBLOCK) || (flags & VP8D_DEMACROBLOCK)) && + oci->post_proc_buffer_int_used) { + vp8_yv12_copy_frame(&oci->post_proc_buffer, &oci->post_proc_buffer_int); + if (flags & VP8D_DEMACROBLOCK) { + vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer, + q + (deblock_level - 5) * 10, 1, 0); + vp8_de_mblock(&oci->post_proc_buffer, q + (deblock_level - 5) * 10); + } else if (flags & VP8D_DEBLOCK) { + vp8_deblock(oci, &oci->post_proc_buffer_int, &oci->post_proc_buffer, q, + 1, 0); + } + } + /* Move partially towards the base q of the previous frame */ + oci->postproc_state.last_base_qindex = + (3 * oci->postproc_state.last_base_qindex + oci->base_qindex) >> 2; + } else if (flags & VP8D_DEMACROBLOCK) { + vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer, + q + (deblock_level - 5) * 10, 1, 0); + vp8_de_mblock(&oci->post_proc_buffer, q + (deblock_level - 5) * 10); + + oci->postproc_state.last_base_qindex = oci->base_qindex; + } else if (flags & VP8D_DEBLOCK) { + vp8_deblock(oci, oci->frame_to_show, &oci->post_proc_buffer, q, 1, 0); + oci->postproc_state.last_base_qindex = oci->base_qindex; + } else { + vp8_yv12_copy_frame(oci->frame_to_show, &oci->post_proc_buffer); + oci->postproc_state.last_base_qindex = oci->base_qindex; + } + oci->postproc_state.last_frame_valid = 1; + + if (flags & VP8D_ADDNOISE) { + if (oci->postproc_state.last_q != q || + oci->postproc_state.last_noise != noise_level) { + double sigma; + struct postproc_state *ppstate = &oci->postproc_state; + vp8_clear_system_state(); + sigma = noise_level + .5 + .6 * q / 63.0; + ppstate->clamp = + vpx_setup_noise(sigma, ppstate->generated_noise, oci->Width + 256); + ppstate->last_q = q; + ppstate->last_noise = noise_level; + } + + vpx_plane_add_noise( + oci->post_proc_buffer.y_buffer, oci->postproc_state.generated_noise, + oci->postproc_state.clamp, oci->postproc_state.clamp, + oci->post_proc_buffer.y_width, oci->post_proc_buffer.y_height, + oci->post_proc_buffer.y_stride); + } + + *dest = oci->post_proc_buffer; + + /* handle problem with extending borders */ + dest->y_width = oci->Width; + dest->y_height = oci->Height; + dest->uv_height = dest->y_height / 2; + return 0; } #endif diff --git a/libs/libvpx/vp8/common/postproc.h b/libs/libvpx/vp8/common/postproc.h index 0fa12a7c67..7be112b163 100644 --- a/libs/libvpx/vp8/common/postproc.h +++ b/libs/libvpx/vp8/common/postproc.h @@ -8,21 +8,17 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_POSTPROC_H_ #define VP8_COMMON_POSTPROC_H_ #include "vpx_ports/mem.h" -struct postproc_state -{ - int last_q; - int last_noise; - char noise[3072]; - int last_base_qindex; - int last_frame_valid; - DECLARE_ALIGNED(16, char, blackclamp[16]); - DECLARE_ALIGNED(16, char, whiteclamp[16]); - DECLARE_ALIGNED(16, char, bothclamp[16]); +struct postproc_state { + int last_q; + int last_noise; + int last_base_qindex; + int last_frame_valid; + int clamp; + int8_t *generated_noise; }; #include "onyxc_int.h" #include "ppflags.h" @@ -33,21 +29,12 @@ extern "C" { int vp8_post_proc_frame(struct VP8Common *oci, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags); +void vp8_de_noise(struct VP8Common *oci, YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag, + int uvfilter); -void vp8_de_noise(struct VP8Common *oci, - YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag, - int uvfilter); - -void vp8_deblock(struct VP8Common *oci, - YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag); +void vp8_deblock(struct VP8Common *oci, YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int q, int low_var_thresh, int flag); #define MFQE_PRECISION 4 diff --git a/libs/libvpx/vp8/common/ppflags.h b/libs/libvpx/vp8/common/ppflags.h index 768224aad5..713f5dffe0 100644 --- a/libs/libvpx/vp8/common/ppflags.h +++ b/libs/libvpx/vp8/common/ppflags.h @@ -8,38 +8,35 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_PPFLAGS_H_ #define VP8_COMMON_PPFLAGS_H_ #ifdef __cplusplus extern "C" { #endif -enum -{ - VP8D_NOFILTERING = 0, - VP8D_DEBLOCK = 1<<0, - VP8D_DEMACROBLOCK = 1<<1, - VP8D_ADDNOISE = 1<<2, - VP8D_DEBUG_TXT_FRAME_INFO = 1<<3, - VP8D_DEBUG_TXT_MBLK_MODES = 1<<4, - VP8D_DEBUG_TXT_DC_DIFF = 1<<5, - VP8D_DEBUG_TXT_RATE_INFO = 1<<6, - VP8D_DEBUG_DRAW_MV = 1<<7, - VP8D_DEBUG_CLR_BLK_MODES = 1<<8, - VP8D_DEBUG_CLR_FRM_REF_BLKS = 1<<9, - VP8D_MFQE = 1<<10 +enum { + VP8D_NOFILTERING = 0, + VP8D_DEBLOCK = 1 << 0, + VP8D_DEMACROBLOCK = 1 << 1, + VP8D_ADDNOISE = 1 << 2, + VP8D_DEBUG_TXT_FRAME_INFO = 1 << 3, + VP8D_DEBUG_TXT_MBLK_MODES = 1 << 4, + VP8D_DEBUG_TXT_DC_DIFF = 1 << 5, + VP8D_DEBUG_TXT_RATE_INFO = 1 << 6, + VP8D_DEBUG_DRAW_MV = 1 << 7, + VP8D_DEBUG_CLR_BLK_MODES = 1 << 8, + VP8D_DEBUG_CLR_FRM_REF_BLKS = 1 << 9, + VP8D_MFQE = 1 << 10 }; -typedef struct -{ - int post_proc_flag; - int deblocking_level; - int noise_level; - int display_ref_frame_flag; - int display_mb_modes_flag; - int display_b_modes_flag; - int display_mv_flag; +typedef struct { + int post_proc_flag; + int deblocking_level; + int noise_level; + int display_ref_frame_flag; + int display_mb_modes_flag; + int display_b_modes_flag; + int display_mv_flag; } vp8_ppflags_t; #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/quant_common.c b/libs/libvpx/vp8/common/quant_common.c index 05f9210702..e290eec92b 100644 --- a/libs/libvpx/vp8/common/quant_common.c +++ b/libs/libvpx/vp8/common/quant_common.c @@ -8,128 +8,123 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "quant_common.h" -static const int dc_qlookup[QINDEX_RANGE] = -{ - 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, 17, - 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 91, 93, 95, 96, 98, 100, 101, 102, 104, 106, 108, 110, 112, 114, 116, 118, - 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 143, 145, 148, 151, 154, 157, +static const int dc_qlookup[QINDEX_RANGE] = { + 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, 15, 16, 17, + 17, 18, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 25, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 91, 93, 95, 96, 98, 100, 101, 102, 104, + 106, 108, 110, 112, 114, 116, 118, 122, 124, 126, 128, 130, 132, 134, 136, + 138, 140, 143, 145, 148, 151, 154, 157, }; -static const int ac_qlookup[QINDEX_RANGE] = -{ - 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, - 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, - 110, 112, 114, 116, 119, 122, 125, 128, 131, 134, 137, 140, 143, 146, 149, 152, - 155, 158, 161, 164, 167, 170, 173, 177, 181, 185, 189, 193, 197, 201, 205, 209, - 213, 217, 221, 225, 229, 234, 239, 245, 249, 254, 259, 264, 269, 274, 279, 284, +static const int ac_qlookup[QINDEX_RANGE] = { + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, 62, 64, 66, 68, + 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, + 100, 102, 104, 106, 108, 110, 112, 114, 116, 119, 122, 125, 128, 131, 134, + 137, 140, 143, 146, 149, 152, 155, 158, 161, 164, 167, 170, 173, 177, 181, + 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 234, 239, 245, + 249, 254, 259, 264, 269, 274, 279, 284, }; +int vp8_dc_quant(int QIndex, int Delta) { + int retval; -int vp8_dc_quant(int QIndex, int Delta) -{ - int retval; + QIndex = QIndex + Delta; - QIndex = QIndex + Delta; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; - - retval = dc_qlookup[ QIndex ]; - return retval; + retval = dc_qlookup[QIndex]; + return retval; } -int vp8_dc2quant(int QIndex, int Delta) -{ - int retval; +int vp8_dc2quant(int QIndex, int Delta) { + int retval; - QIndex = QIndex + Delta; + QIndex = QIndex + Delta; - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; - - retval = dc_qlookup[ QIndex ] * 2; - return retval; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } + retval = dc_qlookup[QIndex] * 2; + return retval; } -int vp8_dc_uv_quant(int QIndex, int Delta) -{ - int retval; +int vp8_dc_uv_quant(int QIndex, int Delta) { + int retval; - QIndex = QIndex + Delta; + QIndex = QIndex + Delta; - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } - retval = dc_qlookup[ QIndex ]; + retval = dc_qlookup[QIndex]; - if (retval > 132) - retval = 132; + if (retval > 132) retval = 132; - return retval; + return retval; } -int vp8_ac_yquant(int QIndex) -{ - int retval; +int vp8_ac_yquant(int QIndex) { + int retval; - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } - retval = ac_qlookup[ QIndex ]; - return retval; + retval = ac_qlookup[QIndex]; + return retval; } -int vp8_ac2quant(int QIndex, int Delta) -{ - int retval; +int vp8_ac2quant(int QIndex, int Delta) { + int retval; - QIndex = QIndex + Delta; + QIndex = QIndex + Delta; - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } - /* For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16. - * The smallest precision for that is '(x*6349) >> 12' but 16 is a good - * word size. */ - retval = (ac_qlookup[ QIndex ] * 101581) >> 16; + /* For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16. + * The smallest precision for that is '(x*6349) >> 12' but 16 is a good + * word size. */ + retval = (ac_qlookup[QIndex] * 101581) >> 16; - if (retval < 8) - retval = 8; + if (retval < 8) retval = 8; - return retval; + return retval; } -int vp8_ac_uv_quant(int QIndex, int Delta) -{ - int retval; +int vp8_ac_uv_quant(int QIndex, int Delta) { + int retval; - QIndex = QIndex + Delta; + QIndex = QIndex + Delta; - if (QIndex > 127) - QIndex = 127; - else if (QIndex < 0) - QIndex = 0; + if (QIndex > 127) { + QIndex = 127; + } else if (QIndex < 0) { + QIndex = 0; + } - retval = ac_qlookup[ QIndex ]; - return retval; + retval = ac_qlookup[QIndex]; + return retval; } diff --git a/libs/libvpx/vp8/common/quant_common.h b/libs/libvpx/vp8/common/quant_common.h index 700b5e6d72..ff4203df87 100644 --- a/libs/libvpx/vp8/common/quant_common.h +++ b/libs/libvpx/vp8/common/quant_common.h @@ -11,7 +11,6 @@ #ifndef VP8_COMMON_QUANT_COMMON_H_ #define VP8_COMMON_QUANT_COMMON_H_ - #include "string.h" #include "blockd.h" #include "onyxc_int.h" diff --git a/libs/libvpx/vp8/common/reconinter.c b/libs/libvpx/vp8/common/reconinter.c index e302595587..48892c9b8e 100644 --- a/libs/libvpx/vp8/common/reconinter.c +++ b/libs/libvpx/vp8/common/reconinter.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include @@ -21,524 +20,477 @@ #include "onyxc_int.h" #endif -void vp8_copy_mem16x16_c( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) -{ +void vp8_copy_mem16x16_c(unsigned char *src, int src_stride, unsigned char *dst, + int dst_stride) { + int r; - int r; - - for (r = 0; r < 16; r++) - { - memcpy(dst, src, 16); - - src += src_stride; - dst += dst_stride; - - } + for (r = 0; r < 16; ++r) { + memcpy(dst, src, 16); + src += src_stride; + dst += dst_stride; + } } -void vp8_copy_mem8x8_c( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) -{ - int r; +void vp8_copy_mem8x8_c(unsigned char *src, int src_stride, unsigned char *dst, + int dst_stride) { + int r; - for (r = 0; r < 8; r++) - { - memcpy(dst, src, 8); - - src += src_stride; - dst += dst_stride; - - } + for (r = 0; r < 8; ++r) { + memcpy(dst, src, 8); + src += src_stride; + dst += dst_stride; + } } -void vp8_copy_mem8x4_c( - unsigned char *src, - int src_stride, - unsigned char *dst, - int dst_stride) -{ - int r; +void vp8_copy_mem8x4_c(unsigned char *src, int src_stride, unsigned char *dst, + int dst_stride) { + int r; - for (r = 0; r < 4; r++) - { - memcpy(dst, src, 8); - - src += src_stride; - dst += dst_stride; - - } + for (r = 0; r < 4; ++r) { + memcpy(dst, src, 8); + src += src_stride; + dst += dst_stride; + } } +void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, + int pre_stride, vp8_subpix_fn_t sppf) { + int r; + unsigned char *pred_ptr = d->predictor; + unsigned char *ptr; + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + + (d->bmi.mv.as_mv.col >> 3); -void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) -{ - int r; - unsigned char *pred_ptr = d->predictor; - unsigned char *ptr; - ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); - - if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) - { - sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, pred_ptr, pitch); - } - else - { - for (r = 0; r < 4; r++) - { - pred_ptr[0] = ptr[0]; - pred_ptr[1] = ptr[1]; - pred_ptr[2] = ptr[2]; - pred_ptr[3] = ptr[3]; - pred_ptr += pitch; - ptr += pre_stride; - } + if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { + sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, + pred_ptr, pitch); + } else { + for (r = 0; r < 4; ++r) { + pred_ptr[0] = ptr[0]; + pred_ptr[1] = ptr[1]; + pred_ptr[2] = ptr[2]; + pred_ptr[3] = ptr[3]; + pred_ptr += pitch; + ptr += pre_stride; } + } } -static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) -{ - unsigned char *ptr; - ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); +static void build_inter_predictors4b(MACROBLOCKD *x, BLOCKD *d, + unsigned char *dst, int dst_stride, + unsigned char *base_pre, int pre_stride) { + unsigned char *ptr; + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + + (d->bmi.mv.as_mv.col >> 3); - if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) - { - x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); - } - else - { - vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride); - } + if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { + x->subpixel_predict8x8(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, + d->bmi.mv.as_mv.row & 7, dst, dst_stride); + } else { + vp8_copy_mem8x8(ptr, pre_stride, dst, dst_stride); + } } -static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride) -{ - unsigned char *ptr; - ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); +static void build_inter_predictors2b(MACROBLOCKD *x, BLOCKD *d, + unsigned char *dst, int dst_stride, + unsigned char *base_pre, int pre_stride) { + unsigned char *ptr; + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + + (d->bmi.mv.as_mv.col >> 3); - if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) - { - x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); - } - else - { - vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride); - } + if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { + x->subpixel_predict8x4(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, + d->bmi.mv.as_mv.row & 7, dst, dst_stride); + } else { + vp8_copy_mem8x4(ptr, pre_stride, dst, dst_stride); + } } -static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, int dst_stride, unsigned char *base_pre, int pre_stride, vp8_subpix_fn_t sppf) -{ - int r; - unsigned char *ptr; - ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + (d->bmi.mv.as_mv.col >> 3); +static void build_inter_predictors_b(BLOCKD *d, unsigned char *dst, + int dst_stride, unsigned char *base_pre, + int pre_stride, vp8_subpix_fn_t sppf) { + int r; + unsigned char *ptr; + ptr = base_pre + d->offset + (d->bmi.mv.as_mv.row >> 3) * pre_stride + + (d->bmi.mv.as_mv.col >> 3); - if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) - { - sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, dst_stride); - } - else - { - for (r = 0; r < 4; r++) - { - dst[0] = ptr[0]; - dst[1] = ptr[1]; - dst[2] = ptr[2]; - dst[3] = ptr[3]; - dst += dst_stride; - ptr += pre_stride; - } - } -} - - -/*encoder only*/ -void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) -{ - unsigned char *uptr, *vptr; - unsigned char *upred_ptr = &x->predictor[256]; - unsigned char *vpred_ptr = &x->predictor[320]; - - int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; - int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; - int offset; - int pre_stride = x->pre.uv_stride; - - /* calc uv motion vectors */ - mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1)); - mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1)); - mv_row /= 2; - mv_col /= 2; - mv_row &= x->fullpixel_mask; - mv_col &= x->fullpixel_mask; - - offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); - uptr = x->pre.u_buffer + offset; - vptr = x->pre.v_buffer + offset; - - if ((mv_row | mv_col) & 7) - { - x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, 8); - x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, 8); - } - else - { - vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8); - vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8); + if (d->bmi.mv.as_mv.row & 7 || d->bmi.mv.as_mv.col & 7) { + sppf(ptr, pre_stride, d->bmi.mv.as_mv.col & 7, d->bmi.mv.as_mv.row & 7, dst, + dst_stride); + } else { + for (r = 0; r < 4; ++r) { + dst[0] = ptr[0]; + dst[1] = ptr[1]; + dst[2] = ptr[2]; + dst[3] = ptr[3]; + dst += dst_stride; + ptr += pre_stride; } + } } /*encoder only*/ -void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) -{ - int i, j; - int pre_stride = x->pre.uv_stride; - unsigned char *base_pre; +void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x) { + unsigned char *uptr, *vptr; + unsigned char *upred_ptr = &x->predictor[256]; + unsigned char *vpred_ptr = &x->predictor[320]; - /* build uv mvs */ - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - int yoffset = i * 8 + j * 2; - int uoffset = 16 + i * 2 + j; - int voffset = 20 + i * 2 + j; + int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; + int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; + int offset; + int pre_stride = x->pre.uv_stride; - int temp; + /* calc uv motion vectors */ + mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1)); + mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1)); + mv_row /= 2; + mv_col /= 2; + mv_row &= x->fullpixel_mask; + mv_col &= x->fullpixel_mask; - temp = x->block[yoffset ].bmi.mv.as_mv.row - + x->block[yoffset+1].bmi.mv.as_mv.row - + x->block[yoffset+4].bmi.mv.as_mv.row - + x->block[yoffset+5].bmi.mv.as_mv.row; + offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); + uptr = x->pre.u_buffer + offset; + vptr = x->pre.v_buffer + offset; - temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); - - x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; - - temp = x->block[yoffset ].bmi.mv.as_mv.col - + x->block[yoffset+1].bmi.mv.as_mv.col - + x->block[yoffset+4].bmi.mv.as_mv.col - + x->block[yoffset+5].bmi.mv.as_mv.col; - - temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); - - x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; - - x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; - } - } - - base_pre = x->pre.u_buffer; - for (i = 16; i < 20; i += 2) - { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i+1]; - - if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); - else - { - vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); - } - } - - base_pre = x->pre.v_buffer; - for (i = 20; i < 24; i += 2) - { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i+1]; - - if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); - else - { - vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, x->subpixel_predict); - vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, x->subpixel_predict); - } - } + if ((mv_row | mv_col) & 7) { + x->subpixel_predict8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, upred_ptr, + 8); + x->subpixel_predict8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, vpred_ptr, + 8); + } else { + vp8_copy_mem8x8(uptr, pre_stride, upred_ptr, 8); + vp8_copy_mem8x8(vptr, pre_stride, vpred_ptr, 8); + } } - /*encoder only*/ -void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, - unsigned char *dst_y, - int dst_ystride) -{ - unsigned char *ptr_base; - unsigned char *ptr; - int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; - int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; - int pre_stride = x->pre.y_stride; +void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x) { + int i, j; + int pre_stride = x->pre.uv_stride; + unsigned char *base_pre; - ptr_base = x->pre.y_buffer; - ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); + /* build uv mvs */ + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + int yoffset = i * 8 + j * 2; + int uoffset = 16 + i * 2 + j; + int voffset = 20 + i * 2 + j; - if ((mv_row | mv_col) & 7) - { - x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, - dst_y, dst_ystride); + int temp; + + temp = x->block[yoffset].bmi.mv.as_mv.row + + x->block[yoffset + 1].bmi.mv.as_mv.row + + x->block[yoffset + 4].bmi.mv.as_mv.row + + x->block[yoffset + 5].bmi.mv.as_mv.row; + + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); + + x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; + + temp = x->block[yoffset].bmi.mv.as_mv.col + + x->block[yoffset + 1].bmi.mv.as_mv.col + + x->block[yoffset + 4].bmi.mv.as_mv.col + + x->block[yoffset + 5].bmi.mv.as_mv.col; + + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); + + x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; + + x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; } - else - { - vp8_copy_mem16x16(ptr, pre_stride, dst_y, - dst_ystride); + } + + base_pre = x->pre.u_buffer; + for (i = 16; i < 20; i += 2) { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i + 1]; + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) { + build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); + } else { + vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, + x->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, + x->subpixel_predict); } + } + + base_pre = x->pre.v_buffer; + for (i = 20; i < 24; i += 2) { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i + 1]; + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) { + build_inter_predictors2b(x, d0, d0->predictor, 8, base_pre, pre_stride); + } else { + vp8_build_inter_predictors_b(d0, 8, base_pre, pre_stride, + x->subpixel_predict); + vp8_build_inter_predictors_b(d1, 8, base_pre, pre_stride, + x->subpixel_predict); + } + } } -static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) -{ - /* If the MV points so far into the UMV border that no visible pixels - * are used for reconstruction, the subpel part of the MV can be - * discarded and the MV limited to 16 pixels with equivalent results. - * - * This limit kicks in at 19 pixels for the top and left edges, for - * the 16 pixels plus 3 taps right of the central pixel when subpel - * filtering. The bottom and right edges use 16 pixels plus 2 pixels - * left of the central pixel when filtering. - */ - if (mv->col < (xd->mb_to_left_edge - (19 << 3))) - mv->col = xd->mb_to_left_edge - (16 << 3); - else if (mv->col > xd->mb_to_right_edge + (18 << 3)) - mv->col = xd->mb_to_right_edge + (16 << 3); +/*encoder only*/ +void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y, + int dst_ystride) { + unsigned char *ptr_base; + unsigned char *ptr; + int mv_row = x->mode_info_context->mbmi.mv.as_mv.row; + int mv_col = x->mode_info_context->mbmi.mv.as_mv.col; + int pre_stride = x->pre.y_stride; - if (mv->row < (xd->mb_to_top_edge - (19 << 3))) - mv->row = xd->mb_to_top_edge - (16 << 3); - else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) - mv->row = xd->mb_to_bottom_edge + (16 << 3); + ptr_base = x->pre.y_buffer; + ptr = ptr_base + (mv_row >> 3) * pre_stride + (mv_col >> 3); + + if ((mv_row | mv_col) & 7) { + x->subpixel_predict16x16(ptr, pre_stride, mv_col & 7, mv_row & 7, dst_y, + dst_ystride); + } else { + vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride); + } +} + +static void clamp_mv_to_umv_border(MV *mv, const MACROBLOCKD *xd) { + /* If the MV points so far into the UMV border that no visible pixels + * are used for reconstruction, the subpel part of the MV can be + * discarded and the MV limited to 16 pixels with equivalent results. + * + * This limit kicks in at 19 pixels for the top and left edges, for + * the 16 pixels plus 3 taps right of the central pixel when subpel + * filtering. The bottom and right edges use 16 pixels plus 2 pixels + * left of the central pixel when filtering. + */ + if (mv->col < (xd->mb_to_left_edge - (19 << 3))) { + mv->col = xd->mb_to_left_edge - (16 << 3); + } else if (mv->col > xd->mb_to_right_edge + (18 << 3)) { + mv->col = xd->mb_to_right_edge + (16 << 3); + } + + if (mv->row < (xd->mb_to_top_edge - (19 << 3))) { + mv->row = xd->mb_to_top_edge - (16 << 3); + } else if (mv->row > xd->mb_to_bottom_edge + (18 << 3)) { + mv->row = xd->mb_to_bottom_edge + (16 << 3); + } } /* A version of the above function for chroma block MVs.*/ -static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) -{ - mv->col = (2*mv->col < (xd->mb_to_left_edge - (19 << 3))) ? - (xd->mb_to_left_edge - (16 << 3)) >> 1 : mv->col; - mv->col = (2*mv->col > xd->mb_to_right_edge + (18 << 3)) ? - (xd->mb_to_right_edge + (16 << 3)) >> 1 : mv->col; +static void clamp_uvmv_to_umv_border(MV *mv, const MACROBLOCKD *xd) { + mv->col = (2 * mv->col < (xd->mb_to_left_edge - (19 << 3))) + ? (xd->mb_to_left_edge - (16 << 3)) >> 1 + : mv->col; + mv->col = (2 * mv->col > xd->mb_to_right_edge + (18 << 3)) + ? (xd->mb_to_right_edge + (16 << 3)) >> 1 + : mv->col; - mv->row = (2*mv->row < (xd->mb_to_top_edge - (19 << 3))) ? - (xd->mb_to_top_edge - (16 << 3)) >> 1 : mv->row; - mv->row = (2*mv->row > xd->mb_to_bottom_edge + (18 << 3)) ? - (xd->mb_to_bottom_edge + (16 << 3)) >> 1 : mv->row; + mv->row = (2 * mv->row < (xd->mb_to_top_edge - (19 << 3))) + ? (xd->mb_to_top_edge - (16 << 3)) >> 1 + : mv->row; + mv->row = (2 * mv->row > xd->mb_to_bottom_edge + (18 << 3)) + ? (xd->mb_to_bottom_edge + (16 << 3)) >> 1 + : mv->row; } -void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, - unsigned char *dst_y, +void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, unsigned char *dst_y, unsigned char *dst_u, - unsigned char *dst_v, - int dst_ystride, - int dst_uvstride) -{ - int offset; - unsigned char *ptr; - unsigned char *uptr, *vptr; + unsigned char *dst_v, int dst_ystride, + int dst_uvstride) { + int offset; + unsigned char *ptr; + unsigned char *uptr, *vptr; - int_mv _16x16mv; + int_mv _16x16mv; - unsigned char *ptr_base = x->pre.y_buffer; - int pre_stride = x->pre.y_stride; + unsigned char *ptr_base = x->pre.y_buffer; + int pre_stride = x->pre.y_stride; - _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int; + _16x16mv.as_int = x->mode_info_context->mbmi.mv.as_int; - if (x->mode_info_context->mbmi.need_to_clamp_mvs) - { - clamp_mv_to_umv_border(&_16x16mv.as_mv, x); - } + if (x->mode_info_context->mbmi.need_to_clamp_mvs) { + clamp_mv_to_umv_border(&_16x16mv.as_mv, x); + } - ptr = ptr_base + ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); + ptr = ptr_base + (_16x16mv.as_mv.row >> 3) * pre_stride + + (_16x16mv.as_mv.col >> 3); - if ( _16x16mv.as_int & 0x00070007) - { - x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_y, dst_ystride); - } - else - { - vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride); - } + if (_16x16mv.as_int & 0x00070007) { + x->subpixel_predict16x16(ptr, pre_stride, _16x16mv.as_mv.col & 7, + _16x16mv.as_mv.row & 7, dst_y, dst_ystride); + } else { + vp8_copy_mem16x16(ptr, pre_stride, dst_y, dst_ystride); + } - /* calc uv motion vectors */ - _16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1)); - _16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1)); - _16x16mv.as_mv.row /= 2; - _16x16mv.as_mv.col /= 2; - _16x16mv.as_mv.row &= x->fullpixel_mask; - _16x16mv.as_mv.col &= x->fullpixel_mask; + /* calc uv motion vectors */ + _16x16mv.as_mv.row += + 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1)); + _16x16mv.as_mv.col += + 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1)); + _16x16mv.as_mv.row /= 2; + _16x16mv.as_mv.col /= 2; + _16x16mv.as_mv.row &= x->fullpixel_mask; + _16x16mv.as_mv.col &= x->fullpixel_mask; - pre_stride >>= 1; - offset = ( _16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); - uptr = x->pre.u_buffer + offset; - vptr = x->pre.v_buffer + offset; + pre_stride >>= 1; + offset = (_16x16mv.as_mv.row >> 3) * pre_stride + (_16x16mv.as_mv.col >> 3); + uptr = x->pre.u_buffer + offset; + vptr = x->pre.v_buffer + offset; - if ( _16x16mv.as_int & 0x00070007) - { - x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_u, dst_uvstride); - x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, _16x16mv.as_mv.row & 7, dst_v, dst_uvstride); - } - else - { - vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride); - vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride); - } + if (_16x16mv.as_int & 0x00070007) { + x->subpixel_predict8x8(uptr, pre_stride, _16x16mv.as_mv.col & 7, + _16x16mv.as_mv.row & 7, dst_u, dst_uvstride); + x->subpixel_predict8x8(vptr, pre_stride, _16x16mv.as_mv.col & 7, + _16x16mv.as_mv.row & 7, dst_v, dst_uvstride); + } else { + vp8_copy_mem8x8(uptr, pre_stride, dst_u, dst_uvstride); + vp8_copy_mem8x8(vptr, pre_stride, dst_v, dst_uvstride); + } } -static void build_inter4x4_predictors_mb(MACROBLOCKD *x) -{ - int i; - unsigned char *base_dst = x->dst.y_buffer; - unsigned char *base_pre = x->pre.y_buffer; +static void build_inter4x4_predictors_mb(MACROBLOCKD *x) { + int i; + unsigned char *base_dst = x->dst.y_buffer; + unsigned char *base_pre = x->pre.y_buffer; - if (x->mode_info_context->mbmi.partitioning < 3) - { - BLOCKD *b; - int dst_stride = x->dst.y_stride; + if (x->mode_info_context->mbmi.partitioning < 3) { + BLOCKD *b; + int dst_stride = x->dst.y_stride; - x->block[ 0].bmi = x->mode_info_context->bmi[ 0]; - x->block[ 2].bmi = x->mode_info_context->bmi[ 2]; - x->block[ 8].bmi = x->mode_info_context->bmi[ 8]; - x->block[10].bmi = x->mode_info_context->bmi[10]; - if (x->mode_info_context->mbmi.need_to_clamp_mvs) - { - clamp_mv_to_umv_border(&x->block[ 0].bmi.mv.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 2].bmi.mv.as_mv, x); - clamp_mv_to_umv_border(&x->block[ 8].bmi.mv.as_mv, x); - clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x); - } - - b = &x->block[ 0]; - build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); - b = &x->block[ 2]; - build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); - b = &x->block[ 8]; - build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); - b = &x->block[10]; - build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, dst_stride); - } - else - { - for (i = 0; i < 16; i += 2) - { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i+1]; - int dst_stride = x->dst.y_stride; - - x->block[i+0].bmi = x->mode_info_context->bmi[i+0]; - x->block[i+1].bmi = x->mode_info_context->bmi[i+1]; - if (x->mode_info_context->mbmi.need_to_clamp_mvs) - { - clamp_mv_to_umv_border(&x->block[i+0].bmi.mv.as_mv, x); - clamp_mv_to_umv_border(&x->block[i+1].bmi.mv.as_mv, x); - } - - if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); - else - { - build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - } - - } - - } - base_dst = x->dst.u_buffer; - base_pre = x->pre.u_buffer; - for (i = 16; i < 20; i += 2) - { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i+1]; - int dst_stride = x->dst.uv_stride; - - /* Note: uv mvs already clamped in build_4x4uvmvs() */ - - if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); - else - { - build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - } + x->block[0].bmi = x->mode_info_context->bmi[0]; + x->block[2].bmi = x->mode_info_context->bmi[2]; + x->block[8].bmi = x->mode_info_context->bmi[8]; + x->block[10].bmi = x->mode_info_context->bmi[10]; + if (x->mode_info_context->mbmi.need_to_clamp_mvs) { + clamp_mv_to_umv_border(&x->block[0].bmi.mv.as_mv, x); + clamp_mv_to_umv_border(&x->block[2].bmi.mv.as_mv, x); + clamp_mv_to_umv_border(&x->block[8].bmi.mv.as_mv, x); + clamp_mv_to_umv_border(&x->block[10].bmi.mv.as_mv, x); } - base_dst = x->dst.v_buffer; - base_pre = x->pre.v_buffer; - for (i = 20; i < 24; i += 2) - { - BLOCKD *d0 = &x->block[i]; - BLOCKD *d1 = &x->block[i+1]; - int dst_stride = x->dst.uv_stride; + b = &x->block[0]; + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, + dst_stride); + b = &x->block[2]; + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, + dst_stride); + b = &x->block[8]; + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, + dst_stride); + b = &x->block[10]; + build_inter_predictors4b(x, b, base_dst + b->offset, dst_stride, base_pre, + dst_stride); + } else { + for (i = 0; i < 16; i += 2) { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i + 1]; + int dst_stride = x->dst.y_stride; - /* Note: uv mvs already clamped in build_4x4uvmvs() */ + x->block[i + 0].bmi = x->mode_info_context->bmi[i + 0]; + x->block[i + 1].bmi = x->mode_info_context->bmi[i + 1]; + if (x->mode_info_context->mbmi.need_to_clamp_mvs) { + clamp_mv_to_umv_border(&x->block[i + 0].bmi.mv.as_mv, x); + clamp_mv_to_umv_border(&x->block[i + 1].bmi.mv.as_mv, x); + } - if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) - build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride); - else - { - build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, dst_stride, x->subpixel_predict); - } + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) { + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, + base_pre, dst_stride); + } else { + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, + base_pre, dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, + base_pre, dst_stride, x->subpixel_predict); + } } + } + base_dst = x->dst.u_buffer; + base_pre = x->pre.u_buffer; + for (i = 16; i < 20; i += 2) { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i + 1]; + int dst_stride = x->dst.uv_stride; + + /* Note: uv mvs already clamped in build_4x4uvmvs() */ + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) { + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, + base_pre, dst_stride); + } else { + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, + dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, + dst_stride, x->subpixel_predict); + } + } + + base_dst = x->dst.v_buffer; + base_pre = x->pre.v_buffer; + for (i = 20; i < 24; i += 2) { + BLOCKD *d0 = &x->block[i]; + BLOCKD *d1 = &x->block[i + 1]; + int dst_stride = x->dst.uv_stride; + + /* Note: uv mvs already clamped in build_4x4uvmvs() */ + + if (d0->bmi.mv.as_int == d1->bmi.mv.as_int) { + build_inter_predictors2b(x, d0, base_dst + d0->offset, dst_stride, + base_pre, dst_stride); + } else { + build_inter_predictors_b(d0, base_dst + d0->offset, dst_stride, base_pre, + dst_stride, x->subpixel_predict); + build_inter_predictors_b(d1, base_dst + d1->offset, dst_stride, base_pre, + dst_stride, x->subpixel_predict); + } + } } -static -void build_4x4uvmvs(MACROBLOCKD *x) -{ - int i, j; +static void build_4x4uvmvs(MACROBLOCKD *x) { + int i, j; - for (i = 0; i < 2; i++) - { - for (j = 0; j < 2; j++) - { - int yoffset = i * 8 + j * 2; - int uoffset = 16 + i * 2 + j; - int voffset = 20 + i * 2 + j; + for (i = 0; i < 2; ++i) { + for (j = 0; j < 2; ++j) { + int yoffset = i * 8 + j * 2; + int uoffset = 16 + i * 2 + j; + int voffset = 20 + i * 2 + j; - int temp; + int temp; - temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row - + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row - + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row - + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row; + temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.row + + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.row + + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row + + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row; - temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); - x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; + x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask; - temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col - + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col - + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col - + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col; + temp = x->mode_info_context->bmi[yoffset + 0].mv.as_mv.col + + x->mode_info_context->bmi[yoffset + 1].mv.as_mv.col + + x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col + + x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col; - temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); + temp += 4 + ((temp >> (sizeof(temp) * CHAR_BIT - 1)) * 8); - x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; + x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask; - if (x->mode_info_context->mbmi.need_to_clamp_mvs) - clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x); + if (x->mode_info_context->mbmi.need_to_clamp_mvs) { + clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x); + } - x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; - } + x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int; } + } } -void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) -{ - if (xd->mode_info_context->mbmi.mode != SPLITMV) - { - vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.y_stride, xd->dst.uv_stride); - } - else - { - build_4x4uvmvs(xd); - build_inter4x4_predictors_mb(xd); - } +void vp8_build_inter_predictors_mb(MACROBLOCKD *xd) { + if (xd->mode_info_context->mbmi.mode != SPLITMV) { + vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, xd->dst.y_stride, + xd->dst.uv_stride); + } else { + build_4x4uvmvs(xd); + build_inter4x4_predictors_mb(xd); + } } diff --git a/libs/libvpx/vp8/common/reconinter.h b/libs/libvpx/vp8/common/reconinter.h index ba979b9664..4cdd4fee0f 100644 --- a/libs/libvpx/vp8/common/reconinter.h +++ b/libs/libvpx/vp8/common/reconinter.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_RECONINTER_H_ #define VP8_COMMON_RECONINTER_H_ @@ -17,21 +16,16 @@ extern "C" { #endif extern void vp8_build_inter_predictors_mb(MACROBLOCKD *x); -extern void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x, - unsigned char *dst_y, - unsigned char *dst_u, - unsigned char *dst_v, - int dst_ystride, - int dst_uvstride); - +extern void vp8_build_inter16x16_predictors_mb( + MACROBLOCKD *x, unsigned char *dst_y, unsigned char *dst_u, + unsigned char *dst_v, int dst_ystride, int dst_uvstride); extern void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x, unsigned char *dst_y, int dst_ystride); extern void vp8_build_inter_predictors_b(BLOCKD *d, int pitch, unsigned char *base_pre, - int pre_stride, - vp8_subpix_fn_t sppf); + int pre_stride, vp8_subpix_fn_t sppf); extern void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x); extern void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x); diff --git a/libs/libvpx/vp8/common/reconintra.c b/libs/libvpx/vp8/common/reconintra.c index 356655dac7..986074ec7e 100644 --- a/libs/libvpx/vp8/common/reconintra.c +++ b/libs/libvpx/vp8/common/reconintra.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "./vpx_config.h" #include "./vpx_dsp_rtcd.h" #include "./vp8_rtcd.h" @@ -19,9 +18,9 @@ #include "vp8/common/reconintra4x4.h" enum { - SIZE_16, - SIZE_8, - NUM_SIZES, + SIZE_16, + SIZE_8, + NUM_SIZES, }; typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride, @@ -30,88 +29,68 @@ typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride, static intra_pred_fn pred[4][NUM_SIZES]; static intra_pred_fn dc_pred[2][2][NUM_SIZES]; -static void vp8_init_intra_predictors_internal(void) -{ -#define INIT_SIZE(sz) \ - pred[V_PRED][SIZE_##sz] = vpx_v_predictor_##sz##x##sz; \ - pred[H_PRED][SIZE_##sz] = vpx_h_predictor_##sz##x##sz; \ - pred[TM_PRED][SIZE_##sz] = vpx_tm_predictor_##sz##x##sz; \ - \ - dc_pred[0][0][SIZE_##sz] = vpx_dc_128_predictor_##sz##x##sz; \ - dc_pred[0][1][SIZE_##sz] = vpx_dc_top_predictor_##sz##x##sz; \ - dc_pred[1][0][SIZE_##sz] = vpx_dc_left_predictor_##sz##x##sz; \ - dc_pred[1][1][SIZE_##sz] = vpx_dc_predictor_##sz##x##sz +static void vp8_init_intra_predictors_internal(void) { +#define INIT_SIZE(sz) \ + pred[V_PRED][SIZE_##sz] = vpx_v_predictor_##sz##x##sz; \ + pred[H_PRED][SIZE_##sz] = vpx_h_predictor_##sz##x##sz; \ + pred[TM_PRED][SIZE_##sz] = vpx_tm_predictor_##sz##x##sz; \ + \ + dc_pred[0][0][SIZE_##sz] = vpx_dc_128_predictor_##sz##x##sz; \ + dc_pred[0][1][SIZE_##sz] = vpx_dc_top_predictor_##sz##x##sz; \ + dc_pred[1][0][SIZE_##sz] = vpx_dc_left_predictor_##sz##x##sz; \ + dc_pred[1][1][SIZE_##sz] = vpx_dc_predictor_##sz##x##sz - INIT_SIZE(16); - INIT_SIZE(8); - vp8_init_intra4x4_predictors_internal(); + INIT_SIZE(16); + INIT_SIZE(8); + vp8_init_intra4x4_predictors_internal(); } -void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, - unsigned char * yabove_row, - unsigned char * yleft, - int left_stride, - unsigned char * ypred_ptr, - int y_stride) -{ - MB_PREDICTION_MODE mode = x->mode_info_context->mbmi.mode; - DECLARE_ALIGNED(16, uint8_t, yleft_col[16]); - int i; - intra_pred_fn fn; +void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, unsigned char *yabove_row, + unsigned char *yleft, int left_stride, + unsigned char *ypred_ptr, int y_stride) { + MB_PREDICTION_MODE mode = x->mode_info_context->mbmi.mode; + DECLARE_ALIGNED(16, uint8_t, yleft_col[16]); + int i; + intra_pred_fn fn; - for (i = 0; i < 16; i++) - { - yleft_col[i] = yleft[i* left_stride]; - } + for (i = 0; i < 16; ++i) { + yleft_col[i] = yleft[i * left_stride]; + } - if (mode == DC_PRED) - { - fn = dc_pred[x->left_available][x->up_available][SIZE_16]; - } - else - { - fn = pred[mode][SIZE_16]; - } + if (mode == DC_PRED) { + fn = dc_pred[x->left_available][x->up_available][SIZE_16]; + } else { + fn = pred[mode][SIZE_16]; + } - fn(ypred_ptr, y_stride, yabove_row, yleft_col); + fn(ypred_ptr, y_stride, yabove_row, yleft_col); } -void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x, - unsigned char * uabove_row, - unsigned char * vabove_row, - unsigned char * uleft, - unsigned char * vleft, - int left_stride, - unsigned char * upred_ptr, - unsigned char * vpred_ptr, - int pred_stride) -{ - MB_PREDICTION_MODE uvmode = x->mode_info_context->mbmi.uv_mode; - unsigned char uleft_col[8]; - unsigned char vleft_col[8]; - int i; - intra_pred_fn fn; +void vp8_build_intra_predictors_mbuv_s( + MACROBLOCKD *x, unsigned char *uabove_row, unsigned char *vabove_row, + unsigned char *uleft, unsigned char *vleft, int left_stride, + unsigned char *upred_ptr, unsigned char *vpred_ptr, int pred_stride) { + MB_PREDICTION_MODE uvmode = x->mode_info_context->mbmi.uv_mode; + unsigned char uleft_col[8]; + unsigned char vleft_col[8]; + int i; + intra_pred_fn fn; - for (i = 0; i < 8; i++) - { - uleft_col[i] = uleft[i * left_stride]; - vleft_col[i] = vleft[i * left_stride]; - } + for (i = 0; i < 8; ++i) { + uleft_col[i] = uleft[i * left_stride]; + vleft_col[i] = vleft[i * left_stride]; + } - if (uvmode == DC_PRED) - { - fn = dc_pred[x->left_available][x->up_available][SIZE_8]; - } - else - { - fn = pred[uvmode][SIZE_8]; - } + if (uvmode == DC_PRED) { + fn = dc_pred[x->left_available][x->up_available][SIZE_8]; + } else { + fn = pred[uvmode][SIZE_8]; + } - fn(upred_ptr, pred_stride, uabove_row, uleft_col); - fn(vpred_ptr, pred_stride, vabove_row, vleft_col); + fn(upred_ptr, pred_stride, uabove_row, uleft_col); + fn(vpred_ptr, pred_stride, vabove_row, vleft_col); } -void vp8_init_intra_predictors(void) -{ - once(vp8_init_intra_predictors_internal); +void vp8_init_intra_predictors(void) { + once(vp8_init_intra_predictors_internal); } diff --git a/libs/libvpx/vp8/common/reconintra.h b/libs/libvpx/vp8/common/reconintra.h index b6225a6637..fd7c725f35 100644 --- a/libs/libvpx/vp8/common/reconintra.h +++ b/libs/libvpx/vp8/common/reconintra.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_RECONINTRA_H_ #define VP8_COMMON_RECONINTRA_H_ @@ -18,22 +17,14 @@ extern "C" { #endif -void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, - unsigned char *yabove_row, - unsigned char *yleft, - int left_stride, - unsigned char *ypred_ptr, - int y_stride); +void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x, unsigned char *yabove_row, + unsigned char *yleft, int left_stride, + unsigned char *ypred_ptr, int y_stride); -void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x, - unsigned char * uabove_row, - unsigned char * vabove_row, - unsigned char * uleft, - unsigned char * vleft, - int left_stride, - unsigned char * upred_ptr, - unsigned char * vpred_ptr, - int pred_stride); +void vp8_build_intra_predictors_mbuv_s( + MACROBLOCKD *x, unsigned char *uabove_row, unsigned char *vabove_row, + unsigned char *uleft, unsigned char *vleft, int left_stride, + unsigned char *upred_ptr, unsigned char *vpred_ptr, int pred_stride); void vp8_init_intra_predictors(void); diff --git a/libs/libvpx/vp8/common/reconintra4x4.c b/libs/libvpx/vp8/common/reconintra4x4.c index 35ad891eff..07c9223331 100644 --- a/libs/libvpx/vp8/common/reconintra4x4.c +++ b/libs/libvpx/vp8/common/reconintra4x4.c @@ -14,41 +14,52 @@ #include "./vpx_dsp_rtcd.h" #include "vp8_rtcd.h" #include "blockd.h" +#include "reconintra4x4.h" +#include "vp8/common/common.h" +#include "vpx_ports/mem.h" typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left); static intra_pred_fn pred[10]; -void vp8_init_intra4x4_predictors_internal(void) -{ - pred[B_DC_PRED] = vpx_dc_predictor_4x4; - pred[B_TM_PRED] = vpx_tm_predictor_4x4; - pred[B_VE_PRED] = vpx_ve_predictor_4x4; - pred[B_HE_PRED] = vpx_he_predictor_4x4; - pred[B_LD_PRED] = vpx_d45e_predictor_4x4; - pred[B_RD_PRED] = vpx_d135_predictor_4x4; - pred[B_VR_PRED] = vpx_d117_predictor_4x4; - pred[B_VL_PRED] = vpx_d63f_predictor_4x4; - pred[B_HD_PRED] = vpx_d153_predictor_4x4; - pred[B_HU_PRED] = vpx_d207_predictor_4x4; +void vp8_init_intra4x4_predictors_internal(void) { + pred[B_DC_PRED] = vpx_dc_predictor_4x4; + pred[B_TM_PRED] = vpx_tm_predictor_4x4; + pred[B_VE_PRED] = vpx_ve_predictor_4x4; + pred[B_HE_PRED] = vpx_he_predictor_4x4; + pred[B_LD_PRED] = vpx_d45e_predictor_4x4; + pred[B_RD_PRED] = vpx_d135_predictor_4x4; + pred[B_VR_PRED] = vpx_d117_predictor_4x4; + pred[B_VL_PRED] = vpx_d63f_predictor_4x4; + pred[B_HD_PRED] = vpx_d153_predictor_4x4; + pred[B_HU_PRED] = vpx_d207_predictor_4x4; } -void vp8_intra4x4_predict(unsigned char *above, - unsigned char *yleft, int left_stride, - B_PREDICTION_MODE b_mode, +void vp8_intra4x4_predict(unsigned char *above, unsigned char *yleft, + int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, - unsigned char top_left) -{ - unsigned char Left[4]; - unsigned char Aboveb[12], *Above = Aboveb + 4; + unsigned char top_left) { + unsigned char Aboveb[12], *Above = Aboveb + 4; +#if HAVE_NEON + // Neon intrinsics are unable to load 32 bits, or 4 8 bit values. Instead, it + // over reads but does not use the extra 4 values. + unsigned char Left[8]; +#if VPX_WITH_ASAN + // Silence an 'uninitialized read' warning. Although uninitialized values are + // indeed read, they are not used. + vp8_zero_array(Left, 8); +#endif // VPX_WITH_ASAN +#else + unsigned char Left[4]; +#endif // HAVE_NEON - Left[0] = yleft[0]; - Left[1] = yleft[left_stride]; - Left[2] = yleft[2 * left_stride]; - Left[3] = yleft[3 * left_stride]; - memcpy(Above, above, 8); - Above[-1] = top_left; + Left[0] = yleft[0]; + Left[1] = yleft[left_stride]; + Left[2] = yleft[2 * left_stride]; + Left[3] = yleft[3 * left_stride]; + memcpy(Above, above, 8); + Above[-1] = top_left; - pred[b_mode](dst, dst_stride, Above, Left); + pred[b_mode](dst, dst_stride, Above, Left); } diff --git a/libs/libvpx/vp8/common/reconintra4x4.h b/libs/libvpx/vp8/common/reconintra4x4.h index 5dc5d13a5c..e17fc58c01 100644 --- a/libs/libvpx/vp8/common/reconintra4x4.h +++ b/libs/libvpx/vp8/common/reconintra4x4.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_RECONINTRA4X4_H_ #define VP8_COMMON_RECONINTRA4X4_H_ #include "vp8/common/blockd.h" @@ -18,24 +17,22 @@ extern "C" { #endif static INLINE void intra_prediction_down_copy(MACROBLOCKD *xd, - unsigned char *above_right_src) -{ - int dst_stride = xd->dst.y_stride; - unsigned char *above_right_dst = xd->dst.y_buffer - dst_stride + 16; + unsigned char *above_right_src) { + int dst_stride = xd->dst.y_stride; + unsigned char *above_right_dst = xd->dst.y_buffer - dst_stride + 16; - unsigned int *src_ptr = (unsigned int *)above_right_src; - unsigned int *dst_ptr0 = (unsigned int *)(above_right_dst + 4 * dst_stride); - unsigned int *dst_ptr1 = (unsigned int *)(above_right_dst + 8 * dst_stride); - unsigned int *dst_ptr2 = (unsigned int *)(above_right_dst + 12 * dst_stride); + unsigned int *src_ptr = (unsigned int *)above_right_src; + unsigned int *dst_ptr0 = (unsigned int *)(above_right_dst + 4 * dst_stride); + unsigned int *dst_ptr1 = (unsigned int *)(above_right_dst + 8 * dst_stride); + unsigned int *dst_ptr2 = (unsigned int *)(above_right_dst + 12 * dst_stride); - *dst_ptr0 = *src_ptr; - *dst_ptr1 = *src_ptr; - *dst_ptr2 = *src_ptr; + *dst_ptr0 = *src_ptr; + *dst_ptr1 = *src_ptr; + *dst_ptr2 = *src_ptr; } -void vp8_intra4x4_predict(unsigned char *Above, - unsigned char *yleft, int left_stride, - B_PREDICTION_MODE b_mode, +void vp8_intra4x4_predict(unsigned char *Above, unsigned char *yleft, + int left_stride, B_PREDICTION_MODE b_mode, unsigned char *dst, int dst_stride, unsigned char top_left); diff --git a/libs/libvpx/vp8/common/rtcd.c b/libs/libvpx/vp8/common/rtcd.c index ab0e9b47fe..09a0e2b4b3 100644 --- a/libs/libvpx/vp8/common/rtcd.c +++ b/libs/libvpx/vp8/common/rtcd.c @@ -12,8 +12,4 @@ #include "./vp8_rtcd.h" #include "vpx_ports/vpx_once.h" - -void vp8_rtcd() -{ - once(setup_rtcd_internal); -} +void vp8_rtcd() { once(setup_rtcd_internal); } diff --git a/libs/libvpx/vp8/common/rtcd_defs.pl b/libs/libvpx/vp8/common/rtcd_defs.pl index 6799c2787a..8dc36f7317 100644 --- a/libs/libvpx/vp8/common/rtcd_defs.pl +++ b/libs/libvpx/vp8/common/rtcd_defs.pl @@ -29,81 +29,69 @@ $vp8_clear_system_state_mmx=vpx_reset_mmx_state; # Dequant # add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *dqc"; -specialize qw/vp8_dequantize_b mmx media neon msa/; -$vp8_dequantize_b_media=vp8_dequantize_b_v6; +specialize qw/vp8_dequantize_b mmx neon msa/; add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *output, int stride"; -specialize qw/vp8_dequant_idct_add mmx media neon dspr2 msa/; -$vp8_dequant_idct_add_media=vp8_dequant_idct_add_v6; +specialize qw/vp8_dequant_idct_add mmx neon dspr2 msa/; $vp8_dequant_idct_add_dspr2=vp8_dequant_idct_add_dspr2; add_proto qw/void vp8_dequant_idct_add_y_block/, "short *q, short *dq, unsigned char *dst, int stride, char *eobs"; -specialize qw/vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2 msa/; -$vp8_dequant_idct_add_y_block_media=vp8_dequant_idct_add_y_block_v6; +specialize qw/vp8_dequant_idct_add_y_block mmx sse2 neon dspr2 msa/; $vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2; add_proto qw/void vp8_dequant_idct_add_uv_block/, "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs"; -specialize qw/vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2 msa/; -$vp8_dequant_idct_add_uv_block_media=vp8_dequant_idct_add_uv_block_v6; +specialize qw/vp8_dequant_idct_add_uv_block mmx sse2 neon dspr2 msa/; $vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2; # # Loopfilter # add_proto qw/void vp8_loop_filter_mbv/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_mbv mmx sse2 media neon dspr2 msa/; -$vp8_loop_filter_mbv_media=vp8_loop_filter_mbv_armv6; +specialize qw/vp8_loop_filter_mbv mmx sse2 neon dspr2 msa/; $vp8_loop_filter_mbv_dspr2=vp8_loop_filter_mbv_dspr2; add_proto qw/void vp8_loop_filter_bv/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_bv mmx sse2 media neon dspr2 msa/; -$vp8_loop_filter_bv_media=vp8_loop_filter_bv_armv6; +specialize qw/vp8_loop_filter_bv mmx sse2 neon dspr2 msa/; $vp8_loop_filter_bv_dspr2=vp8_loop_filter_bv_dspr2; add_proto qw/void vp8_loop_filter_mbh/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_mbh mmx sse2 media neon dspr2 msa/; -$vp8_loop_filter_mbh_media=vp8_loop_filter_mbh_armv6; +specialize qw/vp8_loop_filter_mbh mmx sse2 neon dspr2 msa/; $vp8_loop_filter_mbh_dspr2=vp8_loop_filter_mbh_dspr2; add_proto qw/void vp8_loop_filter_bh/, "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"; -specialize qw/vp8_loop_filter_bh mmx sse2 media neon dspr2 msa/; -$vp8_loop_filter_bh_media=vp8_loop_filter_bh_armv6; +specialize qw/vp8_loop_filter_bh mmx sse2 neon dspr2 msa/; $vp8_loop_filter_bh_dspr2=vp8_loop_filter_bh_dspr2; add_proto qw/void vp8_loop_filter_simple_mbv/, "unsigned char *y, int ystride, const unsigned char *blimit"; -specialize qw/vp8_loop_filter_simple_mbv mmx sse2 media neon msa/; +specialize qw/vp8_loop_filter_simple_mbv mmx sse2 neon msa/; $vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c; $vp8_loop_filter_simple_mbv_mmx=vp8_loop_filter_simple_vertical_edge_mmx; $vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2; -$vp8_loop_filter_simple_mbv_media=vp8_loop_filter_simple_vertical_edge_armv6; $vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon; $vp8_loop_filter_simple_mbv_msa=vp8_loop_filter_simple_vertical_edge_msa; add_proto qw/void vp8_loop_filter_simple_mbh/, "unsigned char *y, int ystride, const unsigned char *blimit"; -specialize qw/vp8_loop_filter_simple_mbh mmx sse2 media neon msa/; +specialize qw/vp8_loop_filter_simple_mbh mmx sse2 neon msa/; $vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c; $vp8_loop_filter_simple_mbh_mmx=vp8_loop_filter_simple_horizontal_edge_mmx; $vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2; -$vp8_loop_filter_simple_mbh_media=vp8_loop_filter_simple_horizontal_edge_armv6; $vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon; $vp8_loop_filter_simple_mbh_msa=vp8_loop_filter_simple_horizontal_edge_msa; add_proto qw/void vp8_loop_filter_simple_bv/, "unsigned char *y, int ystride, const unsigned char *blimit"; -specialize qw/vp8_loop_filter_simple_bv mmx sse2 media neon msa/; +specialize qw/vp8_loop_filter_simple_bv mmx sse2 neon msa/; $vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c; $vp8_loop_filter_simple_bv_mmx=vp8_loop_filter_bvs_mmx; $vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2; -$vp8_loop_filter_simple_bv_media=vp8_loop_filter_bvs_armv6; $vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon; $vp8_loop_filter_simple_bv_msa=vp8_loop_filter_bvs_msa; add_proto qw/void vp8_loop_filter_simple_bh/, "unsigned char *y, int ystride, const unsigned char *blimit"; -specialize qw/vp8_loop_filter_simple_bh mmx sse2 media neon msa/; +specialize qw/vp8_loop_filter_simple_bh mmx sse2 neon msa/; $vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c; $vp8_loop_filter_simple_bh_mmx=vp8_loop_filter_bhs_mmx; $vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2; -$vp8_loop_filter_simple_bh_media=vp8_loop_filter_bhs_armv6; $vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon; $vp8_loop_filter_simple_bh_msa=vp8_loop_filter_bhs_msa; @@ -112,8 +100,7 @@ $vp8_loop_filter_simple_bh_msa=vp8_loop_filter_bhs_msa; # #idct16 add_proto qw/void vp8_short_idct4x4llm/, "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride"; -specialize qw/vp8_short_idct4x4llm mmx media neon dspr2 msa/; -$vp8_short_idct4x4llm_media=vp8_short_idct4x4llm_v6_dual; +specialize qw/vp8_short_idct4x4llm mmx neon dspr2 msa/; $vp8_short_idct4x4llm_dspr2=vp8_short_idct4x4llm_dspr2; #iwalsh1 @@ -124,52 +111,33 @@ $vp8_short_inv_walsh4x4_1_dspr2=vp8_short_inv_walsh4x4_1_dspr2; #iwalsh16 add_proto qw/void vp8_short_inv_walsh4x4/, "short *input, short *output"; -specialize qw/vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2 msa/; -$vp8_short_inv_walsh4x4_media=vp8_short_inv_walsh4x4_v6; +specialize qw/vp8_short_inv_walsh4x4 mmx sse2 neon dspr2 msa/; $vp8_short_inv_walsh4x4_dspr2=vp8_short_inv_walsh4x4_dspr2; #idct1_scalar_add add_proto qw/void vp8_dc_only_idct_add/, "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride"; -specialize qw/vp8_dc_only_idct_add mmx media neon dspr2 msa/; -$vp8_dc_only_idct_add_media=vp8_dc_only_idct_add_v6; +specialize qw/vp8_dc_only_idct_add mmx neon dspr2 msa/; $vp8_dc_only_idct_add_dspr2=vp8_dc_only_idct_add_dspr2; # # RECON # add_proto qw/void vp8_copy_mem16x16/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_copy_mem16x16 mmx sse2 media neon dspr2 msa/; -$vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6; +specialize qw/vp8_copy_mem16x16 mmx sse2 neon dspr2 msa/; $vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2; add_proto qw/void vp8_copy_mem8x8/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_copy_mem8x8 mmx media neon dspr2 msa/; -$vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6; +specialize qw/vp8_copy_mem8x8 mmx neon dspr2 msa/; $vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2; add_proto qw/void vp8_copy_mem8x4/, "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_copy_mem8x4 mmx media neon dspr2 msa/; -$vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6; +specialize qw/vp8_copy_mem8x4 mmx neon dspr2 msa/; $vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2; # # Postproc # if (vpx_config("CONFIG_POSTPROC") eq "yes") { - add_proto qw/void vp8_mbpost_proc_down/, "unsigned char *dst, int pitch, int rows, int cols,int flimit"; - specialize qw/vp8_mbpost_proc_down mmx sse2 msa/; - $vp8_mbpost_proc_down_sse2=vp8_mbpost_proc_down_xmm; - - add_proto qw/void vp8_mbpost_proc_across_ip/, "unsigned char *dst, int pitch, int rows, int cols,int flimit"; - specialize qw/vp8_mbpost_proc_across_ip sse2 msa/; - $vp8_mbpost_proc_across_ip_sse2=vp8_mbpost_proc_across_ip_xmm; - - add_proto qw/void vp8_post_proc_down_and_across_mb_row/, "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size"; - specialize qw/vp8_post_proc_down_and_across_mb_row sse2 msa/; - - add_proto qw/void vp8_plane_add_noise/, "unsigned char *s, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int w, unsigned int h, int pitch"; - specialize qw/vp8_plane_add_noise mmx sse2 msa/; - $vp8_plane_add_noise_sse2=vp8_plane_add_noise_wmt; add_proto qw/void vp8_blend_mb_inner/, "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride"; # no asm yet @@ -194,42 +162,34 @@ if (vpx_config("CONFIG_POSTPROC") eq "yes") { # Subpixel # add_proto qw/void vp8_sixtap_predict16x16/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2 msa/; -$vp8_sixtap_predict16x16_media=vp8_sixtap_predict16x16_armv6; +specialize qw/vp8_sixtap_predict16x16 mmx sse2 ssse3 neon dspr2 msa/; $vp8_sixtap_predict16x16_dspr2=vp8_sixtap_predict16x16_dspr2; add_proto qw/void vp8_sixtap_predict8x8/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2 msa/; -$vp8_sixtap_predict8x8_media=vp8_sixtap_predict8x8_armv6; +specialize qw/vp8_sixtap_predict8x8 mmx sse2 ssse3 neon dspr2 msa/; $vp8_sixtap_predict8x8_dspr2=vp8_sixtap_predict8x8_dspr2; add_proto qw/void vp8_sixtap_predict8x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2 msa/; -$vp8_sixtap_predict8x4_media=vp8_sixtap_predict8x4_armv6; +specialize qw/vp8_sixtap_predict8x4 mmx sse2 ssse3 neon dspr2 msa/; $vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2; +# TODO(johannkoenig): Add neon implementation +# https://bugs.chromium.org/p/webm/issues/detail?id=1273 add_proto qw/void vp8_sixtap_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -#TODO(johannkoenig): fix the neon version https://code.google.com/p/webm/issues/detail?id=817 -specialize qw/vp8_sixtap_predict4x4 mmx ssse3 media dspr2 msa/; -$vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6; +specialize qw/vp8_sixtap_predict4x4 mmx ssse3 dspr2 msa/; $vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2; add_proto qw/void vp8_bilinear_predict16x16/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_bilinear_predict16x16 mmx sse2 ssse3 media neon msa/; -$vp8_bilinear_predict16x16_media=vp8_bilinear_predict16x16_armv6; +specialize qw/vp8_bilinear_predict16x16 mmx sse2 ssse3 neon msa/; add_proto qw/void vp8_bilinear_predict8x8/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_bilinear_predict8x8 mmx sse2 ssse3 media neon msa/; -$vp8_bilinear_predict8x8_media=vp8_bilinear_predict8x8_armv6; +specialize qw/vp8_bilinear_predict8x8 mmx sse2 ssse3 neon msa/; add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -specialize qw/vp8_bilinear_predict8x4 mmx media neon msa/; -$vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6; +specialize qw/vp8_bilinear_predict8x4 mmx neon msa/; add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"; -#TODO(johannkoenig): fix the neon version https://code.google.com/p/webm/issues/detail?id=892 -specialize qw/vp8_bilinear_predict4x4 mmx media msa/; -$vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6; +specialize qw/vp8_bilinear_predict4x4 mmx neon msa/; # # Encoder functions below this point. @@ -248,16 +208,13 @@ if ($opts{arch} =~ /x86/) { # Forward DCT # add_proto qw/void vp8_short_fdct4x4/, "short *input, short *output, int pitch"; -specialize qw/vp8_short_fdct4x4 mmx sse2 media neon msa/; -$vp8_short_fdct4x4_media=vp8_short_fdct4x4_armv6; +specialize qw/vp8_short_fdct4x4 mmx sse2 neon msa/; add_proto qw/void vp8_short_fdct8x4/, "short *input, short *output, int pitch"; -specialize qw/vp8_short_fdct8x4 mmx sse2 media neon msa/; -$vp8_short_fdct8x4_media=vp8_short_fdct8x4_armv6; +specialize qw/vp8_short_fdct8x4 mmx sse2 neon msa/; add_proto qw/void vp8_short_walsh4x4/, "short *input, short *output, int pitch"; -specialize qw/vp8_short_walsh4x4 sse2 media neon msa/; -$vp8_short_walsh4x4_media=vp8_short_walsh4x4_armv6; +specialize qw/vp8_short_walsh4x4 sse2 neon msa/; # # Quantizer diff --git a/libs/libvpx/vp8/common/setupintrarecon.c b/libs/libvpx/vp8/common/setupintrarecon.c index 669564db42..dc8a8aae96 100644 --- a/libs/libvpx/vp8/common/setupintrarecon.c +++ b/libs/libvpx/vp8/common/setupintrarecon.c @@ -8,32 +8,31 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "setupintrarecon.h" #include "vpx_mem/vpx_mem.h" -void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) -{ - int i; +void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf) { + int i; - /* set up frame new frame for intra coded blocks */ - memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5); - for (i = 0; i < ybf->y_height; i++) - ybf->y_buffer[ybf->y_stride *i - 1] = (unsigned char) 129; + /* set up frame new frame for intra coded blocks */ + memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5); + for (i = 0; i < ybf->y_height; ++i) { + ybf->y_buffer[ybf->y_stride * i - 1] = (unsigned char)129; + } - memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); - for (i = 0; i < ybf->uv_height; i++) - ybf->u_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129; - - memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); - for (i = 0; i < ybf->uv_height; i++) - ybf->v_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129; + memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); + for (i = 0; i < ybf->uv_height; ++i) { + ybf->u_buffer[ybf->uv_stride * i - 1] = (unsigned char)129; + } + memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); + for (i = 0; i < ybf->uv_height; ++i) { + ybf->v_buffer[ybf->uv_stride * i - 1] = (unsigned char)129; + } } -void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf) -{ - memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5); - memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); - memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); +void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf) { + memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5); + memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); + memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5); } diff --git a/libs/libvpx/vp8/common/setupintrarecon.h b/libs/libvpx/vp8/common/setupintrarecon.h index 1857c4e26a..f3ffa16607 100644 --- a/libs/libvpx/vp8/common/setupintrarecon.h +++ b/libs/libvpx/vp8/common/setupintrarecon.h @@ -22,20 +22,15 @@ extern void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf); static INLINE void setup_intra_recon_left(unsigned char *y_buffer, unsigned char *u_buffer, - unsigned char *v_buffer, - int y_stride, - int uv_stride) -{ - int i; + unsigned char *v_buffer, int y_stride, + int uv_stride) { + int i; - for (i = 0; i < 16; i++) - y_buffer[y_stride *i] = (unsigned char) 129; + for (i = 0; i < 16; ++i) y_buffer[y_stride * i] = (unsigned char)129; - for (i = 0; i < 8; i++) - u_buffer[uv_stride *i] = (unsigned char) 129; + for (i = 0; i < 8; ++i) u_buffer[uv_stride * i] = (unsigned char)129; - for (i = 0; i < 8; i++) - v_buffer[uv_stride *i] = (unsigned char) 129; + for (i = 0; i < 8; ++i) v_buffer[uv_stride * i] = (unsigned char)129; } #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/swapyv12buffer.c b/libs/libvpx/vp8/common/swapyv12buffer.c index 73656b3d72..5ff21e94a8 100644 --- a/libs/libvpx/vp8/common/swapyv12buffer.c +++ b/libs/libvpx/vp8/common/swapyv12buffer.c @@ -8,27 +8,25 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "swapyv12buffer.h" -void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame) -{ - unsigned char *temp; +void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, + YV12_BUFFER_CONFIG *last_frame) { + unsigned char *temp; - temp = last_frame->buffer_alloc; - last_frame->buffer_alloc = new_frame->buffer_alloc; - new_frame->buffer_alloc = temp; + temp = last_frame->buffer_alloc; + last_frame->buffer_alloc = new_frame->buffer_alloc; + new_frame->buffer_alloc = temp; - temp = last_frame->y_buffer; - last_frame->y_buffer = new_frame->y_buffer; - new_frame->y_buffer = temp; + temp = last_frame->y_buffer; + last_frame->y_buffer = new_frame->y_buffer; + new_frame->y_buffer = temp; - temp = last_frame->u_buffer; - last_frame->u_buffer = new_frame->u_buffer; - new_frame->u_buffer = temp; - - temp = last_frame->v_buffer; - last_frame->v_buffer = new_frame->v_buffer; - new_frame->v_buffer = temp; + temp = last_frame->u_buffer; + last_frame->u_buffer = new_frame->u_buffer; + new_frame->u_buffer = temp; + temp = last_frame->v_buffer; + last_frame->v_buffer = new_frame->v_buffer; + new_frame->v_buffer = temp; } diff --git a/libs/libvpx/vp8/common/swapyv12buffer.h b/libs/libvpx/vp8/common/swapyv12buffer.h index 1d66cd3d62..0ee9a52ceb 100644 --- a/libs/libvpx/vp8/common/swapyv12buffer.h +++ b/libs/libvpx/vp8/common/swapyv12buffer.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_SWAPYV12BUFFER_H_ #define VP8_COMMON_SWAPYV12BUFFER_H_ @@ -18,7 +17,8 @@ extern "C" { #endif -void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, YV12_BUFFER_CONFIG *last_frame); +void vp8_swap_yv12_buffer(YV12_BUFFER_CONFIG *new_frame, + YV12_BUFFER_CONFIG *last_frame); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/textblit.c b/libs/libvpx/vp8/common/textblit.c deleted file mode 100644 index 1756100a7e..0000000000 --- a/libs/libvpx/vp8/common/textblit.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - - -void vp8_blit_text(const char *msg, unsigned char *address, const int pitch) -{ - int letter_bitmap; - unsigned char *output_pos = address; - int colpos; - const int font[] = - { - 0x0, 0x5C00, 0x8020, 0xAFABEA, 0xD7EC0, 0x1111111, 0x1855740, 0x18000, - 0x45C0, 0x74400, 0x51140, 0x23880, 0xC4000, 0x21080, 0x80000, 0x111110, - 0xE9D72E, 0x87E40, 0x12AD732, 0xAAD62A, 0x4F94C4, 0x4D6B7, 0x456AA, - 0x3E8423, 0xAAD6AA, 0xAAD6A2, 0x2800, 0x2A00, 0x8A880, 0x52940, 0x22A20, - 0x15422, 0x6AD62E, 0x1E4A53E, 0xAAD6BF, 0x8C62E, 0xE8C63F, 0x118D6BF, - 0x1094BF, 0xCAC62E, 0x1F2109F, 0x118FE31, 0xF8C628, 0x8A89F, 0x108421F, - 0x1F1105F, 0x1F4105F, 0xE8C62E, 0x2294BF, 0x164C62E, 0x12694BF, 0x8AD6A2, - 0x10FC21, 0x1F8421F, 0x744107, 0xF8220F, 0x1151151, 0x117041, 0x119D731, - 0x47E0, 0x1041041, 0xFC400, 0x10440, 0x1084210, 0x820 - }; - colpos = 0; - - while (msg[colpos] != 0) - { - char letter = msg[colpos]; - int fontcol, fontrow; - - if (letter <= 'Z' && letter >= ' ') - letter_bitmap = font[letter-' ']; - else if (letter <= 'z' && letter >= 'a') - letter_bitmap = font[letter-'a'+'A' - ' ']; - else - letter_bitmap = font[0]; - - for (fontcol = 6; fontcol >= 0 ; fontcol--) - for (fontrow = 0; fontrow < 5; fontrow++) - output_pos[fontrow *pitch + fontcol] = - ((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0); - - output_pos += 7; - colpos++; - } -} - -static void plot (const int x, const int y, unsigned char *image, const int pitch) -{ - image [x+y*pitch] ^= 255; -} - -/* Bresenham line algorithm */ -void vp8_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, const int pitch) -{ - int steep = abs(y1 - y0) > abs(x1 - x0); - int deltax, deltay; - int error, ystep, y, x; - - if (steep) - { - int t; - t = x0; - x0 = y0; - y0 = t; - - t = x1; - x1 = y1; - y1 = t; - } - - if (x0 > x1) - { - int t; - t = x0; - x0 = x1; - x1 = t; - - t = y0; - y0 = y1; - y1 = t; - } - - deltax = x1 - x0; - deltay = abs(y1 - y0); - error = deltax / 2; - - y = y0; - - if (y0 < y1) - ystep = 1; - else - ystep = -1; - - if (steep) - { - for (x = x0; x <= x1; x++) - { - plot(y,x, image, pitch); - - error = error - deltay; - if (error < 0) - { - y = y + ystep; - error = error + deltax; - } - } - } - else - { - for (x = x0; x <= x1; x++) - { - plot(x,y, image, pitch); - - error = error - deltay; - if (error < 0) - { - y = y + ystep; - error = error + deltax; - } - } - } -} diff --git a/libs/libvpx/vp8/common/threading.h b/libs/libvpx/vp8/common/threading.h index 069e26b992..f27b209c40 100644 --- a/libs/libvpx/vp8/common/threading.h +++ b/libs/libvpx/vp8/common/threading.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_THREADING_H_ #define VP8_COMMON_THREADING_H_ @@ -25,15 +24,23 @@ extern "C" { /* Win32 */ #include #include +#if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)) +#define THREAD_FUNCTION \ + __attribute__((force_align_arg_pointer)) unsigned int __stdcall +#else #define THREAD_FUNCTION unsigned int __stdcall +#endif #define THREAD_FUNCTION_RETURN DWORD #define THREAD_SPECIFIC_INDEX DWORD #define pthread_t HANDLE #define pthread_attr_t DWORD -#define pthread_detach(thread) if(thread!=NULL)CloseHandle(thread) +#define pthread_detach(thread) \ + if (thread != NULL) CloseHandle(thread) #define thread_sleep(nms) Sleep(nms) -#define pthread_cancel(thread) terminate_thread(thread,0) -#define ts_key_create(ts_key, destructor) {ts_key = TlsAlloc();}; +#define pthread_cancel(thread) terminate_thread(thread, 0) +#define ts_key_create(ts_key, destructor) \ + { ts_key = TlsAlloc(); }; #define pthread_getspecific(ts_key) TlsGetValue(ts_key) #define pthread_setspecific(ts_key, value) TlsSetValue(ts_key, (void *)value) #define pthread_self() GetCurrentThreadId() @@ -44,8 +51,8 @@ extern "C" { #include #include -#define THREAD_FUNCTION void -#define THREAD_FUNCTION_RETURN void +#define THREAD_FUNCTION void * +#define THREAD_FUNCTION_RETURN void * #define THREAD_SPECIFIC_INDEX PULONG #define pthread_t TID #define pthread_attr_t ULONG @@ -53,9 +60,9 @@ extern "C" { #define thread_sleep(nms) DosSleep(nms) #define pthread_cancel(thread) DosKillThread(thread) #define ts_key_create(ts_key, destructor) \ - DosAllocThreadLocalMemory(1, &(ts_key)); + DosAllocThreadLocalMemory(1, &(ts_key)); #define pthread_getspecific(ts_key) ((void *)(*(ts_key))) -#define pthread_setspecific(ts_key, value) (*(ts_key)=(ULONG)(value)) +#define pthread_setspecific(ts_key, value) (*(ts_key) = (ULONG)(value)) #define pthread_self() _gettid() #else #ifdef __APPLE__ @@ -75,85 +82,82 @@ extern "C" { #define THREAD_FUNCTION void * #define THREAD_FUNCTION_RETURN void * #define THREAD_SPECIFIC_INDEX pthread_key_t -#define ts_key_create(ts_key, destructor) pthread_key_create (&(ts_key), destructor); +#define ts_key_create(ts_key, destructor) \ + pthread_key_create(&(ts_key), destructor); #endif /* Synchronization macros: Win32 and Pthreads */ #if defined(_WIN32) && !HAVE_PTHREAD_H #define sem_t HANDLE #define pause(voidpara) __asm PAUSE -#define sem_init(sem, sem_attr1, sem_init_value) (int)((*sem = CreateSemaphore(NULL,0,32768,NULL))==NULL) -#define sem_wait(sem) (int)(WAIT_OBJECT_0 != WaitForSingleObject(*sem,INFINITE)) -#define sem_post(sem) ReleaseSemaphore(*sem,1,NULL) -#define sem_destroy(sem) if(*sem)((int)(CloseHandle(*sem))==TRUE) +#define sem_init(sem, sem_attr1, sem_init_value) \ + (int)((*sem = CreateSemaphore(NULL, 0, 32768, NULL)) == NULL) +#define sem_wait(sem) \ + (int)(WAIT_OBJECT_0 != WaitForSingleObject(*sem, INFINITE)) +#define sem_post(sem) ReleaseSemaphore(*sem, 1, NULL) +#define sem_destroy(sem) \ + if (*sem) ((int)(CloseHandle(*sem)) == TRUE) #define thread_sleep(nms) Sleep(nms) #elif defined(__OS2__) -typedef struct -{ - HEV event; - HMTX wait_mutex; - HMTX count_mutex; - int count; +typedef struct { + HEV event; + HMTX wait_mutex; + HMTX count_mutex; + int count; } sem_t; -static inline int sem_init(sem_t *sem, int pshared, unsigned int value) -{ - DosCreateEventSem(NULL, &sem->event, pshared ? DC_SEM_SHARED : 0, - value > 0 ? TRUE : FALSE); - DosCreateMutexSem(NULL, &sem->wait_mutex, 0, FALSE); - DosCreateMutexSem(NULL, &sem->count_mutex, 0, FALSE); +static inline int sem_init(sem_t *sem, int pshared, unsigned int value) { + DosCreateEventSem(NULL, &sem->event, pshared ? DC_SEM_SHARED : 0, + value > 0 ? TRUE : FALSE); + DosCreateMutexSem(NULL, &sem->wait_mutex, 0, FALSE); + DosCreateMutexSem(NULL, &sem->count_mutex, 0, FALSE); - sem->count = value; + sem->count = value; - return 0; + return 0; } -static inline int sem_wait(sem_t * sem) -{ - DosRequestMutexSem(sem->wait_mutex, -1); +static inline int sem_wait(sem_t *sem) { + DosRequestMutexSem(sem->wait_mutex, -1); - DosWaitEventSem(sem->event, -1); + DosWaitEventSem(sem->event, -1); - DosRequestMutexSem(sem->count_mutex, -1); + DosRequestMutexSem(sem->count_mutex, -1); - sem->count--; - if (sem->count == 0) - { - ULONG post_count; + sem->count--; + if (sem->count == 0) { + ULONG post_count; - DosResetEventSem(sem->event, &post_count); - } + DosResetEventSem(sem->event, &post_count); + } - DosReleaseMutexSem(sem->count_mutex); + DosReleaseMutexSem(sem->count_mutex); - DosReleaseMutexSem(sem->wait_mutex); + DosReleaseMutexSem(sem->wait_mutex); - return 0; + return 0; } -static inline int sem_post(sem_t * sem) -{ - DosRequestMutexSem(sem->count_mutex, -1); +static inline int sem_post(sem_t *sem) { + DosRequestMutexSem(sem->count_mutex, -1); - if (sem->count < 32768) - { - sem->count++; - DosPostEventSem(sem->event); - } + if (sem->count < 32768) { + sem->count++; + DosPostEventSem(sem->event); + } - DosReleaseMutexSem(sem->count_mutex); + DosReleaseMutexSem(sem->count_mutex); - return 0; + return 0; } -static inline int sem_destroy(sem_t * sem) -{ - DosCloseEventSem(sem->event); - DosCloseMutexSem(sem->wait_mutex); - DosCloseMutexSem(sem->count_mutex); +static inline int sem_destroy(sem_t *sem) { + DosCloseEventSem(sem->event); + DosCloseMutexSem(sem->wait_mutex); + DosCloseMutexSem(sem->count_mutex); - return 0; + return 0; } #define thread_sleep(nms) DosSleep(nms) @@ -162,15 +166,20 @@ static inline int sem_destroy(sem_t * sem) #ifdef __APPLE__ #define sem_t semaphore_t -#define sem_init(X,Y,Z) semaphore_create(mach_task_self(), X, SYNC_POLICY_FIFO, Z) -#define sem_wait(sem) (semaphore_wait(*sem) ) +#define sem_init(X, Y, Z) \ + semaphore_create(mach_task_self(), X, SYNC_POLICY_FIFO, Z) +#define sem_wait(sem) (semaphore_wait(*sem)) #define sem_post(sem) semaphore_signal(*sem) -#define sem_destroy(sem) semaphore_destroy(mach_task_self(),*sem) -#define thread_sleep(nms) { struct timespec ts;ts.tv_sec=0; ts.tv_nsec = 1000*nms;nanosleep(&ts, NULL);} +#define sem_destroy(sem) semaphore_destroy(mach_task_self(), *sem) +#define thread_sleep(nms) +/* { struct timespec ts;ts.tv_sec=0; ts.tv_nsec = + 1000*nms;nanosleep(&ts, NULL);} */ #else #include #include -#define thread_sleep(nms) {struct timespec ts;ts.tv_sec=0; ts.tv_nsec = 1000*nms;nanosleep(&ts, NULL);} +#define thread_sleep(nms) sched_yield(); +/* {struct timespec ts;ts.tv_sec=0; + ts.tv_nsec = 1000*nms;nanosleep(&ts, NULL);} */ #endif /* Not Windows. Assume pthreads */ @@ -184,45 +193,6 @@ static inline int sem_destroy(sem_t * sem) #include "vpx_util/vpx_thread.h" -static INLINE void mutex_lock(pthread_mutex_t *const mutex) { - const int kMaxTryLocks = 4000; - int locked = 0; - int i; - - for (i = 0; i < kMaxTryLocks; ++i) { - if (!pthread_mutex_trylock(mutex)) { - locked = 1; - break; - } - } - - if (!locked) - pthread_mutex_lock(mutex); -} - -static INLINE int protected_read(pthread_mutex_t *const mutex, const int *p) { - int ret; - mutex_lock(mutex); - ret = *p; - pthread_mutex_unlock(mutex); - return ret; -} - -static INLINE void sync_read(pthread_mutex_t *const mutex, int mb_col, - const int *last_row_current_mb_col, - const int nsync) { - while (mb_col > (protected_read(mutex, last_row_current_mb_col) - nsync)) { - x86_pause_hint(); - thread_sleep(1); - } -} - -static INLINE void protected_write(pthread_mutex_t *mutex, int *p, int v) { - mutex_lock(mutex); - *p = v; - pthread_mutex_unlock(mutex); -} - #endif /* CONFIG_OS_SUPPORT && CONFIG_MULTITHREAD */ #ifdef __cplusplus diff --git a/libs/libvpx/vp8/common/treecoder.c b/libs/libvpx/vp8/common/treecoder.c index d80c64bdfa..9feb40a5a7 100644 --- a/libs/libvpx/vp8/common/treecoder.c +++ b/libs/libvpx/vp8/common/treecoder.c @@ -8,136 +8,94 @@ * be found in the AUTHORS file in the root of the source tree. */ - -#if CONFIG_DEBUG #include -#endif #include -#include "treecoder.h" +#include "vp8/common/treecoder.h" -static void tree2tok( - struct vp8_token_struct *const p, - vp8_tree t, - int i, - int v, - int L -) -{ - v += v; - ++L; +static void tree2tok(struct vp8_token_struct *const p, vp8_tree t, int i, int v, + int L) { + v += v; + ++L; - do - { - const vp8_tree_index j = t[i++]; + do { + const vp8_tree_index j = t[i++]; - if (j <= 0) - { - p[-j].value = v; - p[-j].Len = L; - } - else - tree2tok(p, t, j, v, L); + if (j <= 0) { + p[-j].value = v; + p[-j].Len = L; + } else { + tree2tok(p, t, j, v, L); } - while (++v & 1); + } while (++v & 1); } -void vp8_tokens_from_tree(struct vp8_token_struct *p, vp8_tree t) -{ - tree2tok(p, t, 0, 0, 0); +void vp8_tokens_from_tree(struct vp8_token_struct *p, vp8_tree t) { + tree2tok(p, t, 0, 0, 0); } void vp8_tokens_from_tree_offset(struct vp8_token_struct *p, vp8_tree t, - int offset) -{ - tree2tok(p - offset, t, 0, 0, 0); + int offset) { + tree2tok(p - offset, t, 0, 0, 0); } -static void branch_counts( - int n, /* n = size of alphabet */ - vp8_token tok [ /* n */ ], - vp8_tree tree, - unsigned int branch_ct [ /* n-1 */ ] [2], - const unsigned int num_events[ /* n */ ] -) -{ - const int tree_len = n - 1; - int t = 0; +static void branch_counts(int n, /* n = size of alphabet */ + vp8_token tok[/* n */], vp8_tree tree, + unsigned int branch_ct[/* n-1 */][2], + const unsigned int num_events[/* n */]) { + const int tree_len = n - 1; + int t = 0; -#if CONFIG_DEBUG - assert(tree_len); -#endif + assert(tree_len); - do - { - branch_ct[t][0] = branch_ct[t][1] = 0; - } - while (++t < tree_len); + do { + branch_ct[t][0] = branch_ct[t][1] = 0; + } while (++t < tree_len); - t = 0; + t = 0; - do - { - int L = tok[t].Len; - const int enc = tok[t].value; - const unsigned int ct = num_events[t]; + do { + int L = tok[t].Len; + const int enc = tok[t].value; + const unsigned int ct = num_events[t]; - vp8_tree_index i = 0; + vp8_tree_index i = 0; - do - { - const int b = (enc >> --L) & 1; - const int j = i >> 1; -#if CONFIG_DEBUG - assert(j < tree_len && 0 <= L); -#endif + do { + const int b = (enc >> --L) & 1; + const int j = i >> 1; + assert(j < tree_len && 0 <= L); - branch_ct [j] [b] += ct; - i = tree[ i + b]; - } - while (i > 0); - -#if CONFIG_DEBUG - assert(!L); -#endif - } - while (++t < n); + branch_ct[j][b] += ct; + i = tree[i + b]; + } while (i > 0); + assert(!L); + } while (++t < n); } +void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */ + vp8_token tok[/* n */], vp8_tree tree, + vp8_prob probs[/* n-1 */], + unsigned int branch_ct[/* n-1 */][2], + const unsigned int num_events[/* n */], + unsigned int Pfac, int rd) { + const int tree_len = n - 1; + int t = 0; -void vp8_tree_probs_from_distribution( - int n, /* n = size of alphabet */ - vp8_token tok [ /* n */ ], - vp8_tree tree, - vp8_prob probs [ /* n-1 */ ], - unsigned int branch_ct [ /* n-1 */ ] [2], - const unsigned int num_events[ /* n */ ], - unsigned int Pfac, - int rd -) -{ - const int tree_len = n - 1; - int t = 0; + branch_counts(n, tok, tree, branch_ct, num_events); - branch_counts(n, tok, tree, branch_ct, num_events); + do { + const unsigned int *const c = branch_ct[t]; + const unsigned int tot = c[0] + c[1]; - do - { - const unsigned int *const c = branch_ct[t]; - const unsigned int tot = c[0] + c[1]; + assert(tot < (1 << 24)); /* no overflow below */ -#if CONFIG_DEBUG - assert(tot < (1 << 24)); /* no overflow below */ -#endif - - if (tot) - { - const unsigned int p = ((c[0] * Pfac) + (rd ? tot >> 1 : 0)) / tot; - probs[t] = p < 256 ? (p ? p : 1) : 255; /* agree w/old version for now */ - } - else - probs[t] = vp8_prob_half; + if (tot) { + const unsigned int p = ((c[0] * Pfac) + (rd ? tot >> 1 : 0)) / tot; + probs[t] = p < 256 ? (p ? p : 1) : 255; /* agree w/old version for now */ + } else { + probs[t] = vp8_prob_half; } - while (++t < tree_len); + } while (++t < tree_len); } diff --git a/libs/libvpx/vp8/common/treecoder.h b/libs/libvpx/vp8/common/treecoder.h index d22b7c570c..d8503cf3f8 100644 --- a/libs/libvpx/vp8/common/treecoder.h +++ b/libs/libvpx/vp8/common/treecoder.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_COMMON_TREECODER_H_ #define VP8_COMMON_TREECODER_H_ @@ -18,10 +17,9 @@ extern "C" { typedef unsigned char vp8bc_index_t; /* probability index */ - typedef unsigned char vp8_prob; -#define vp8_prob_half ( (vp8_prob) 128) +#define vp8_prob_half ((vp8_prob)128) typedef signed char vp8_tree_index; struct bool_coder_spec; @@ -34,10 +32,7 @@ typedef const bool_coder_spec c_bool_coder_spec; typedef const bool_writer c_bool_writer; typedef const bool_reader c_bool_reader; - - -# define vp8_complement( x) (255 - x) - +#define vp8_complement(x) (255 - x) /* We build coding trees compactly in arrays. Each node of the tree is a pair of vp8_tree_indices. @@ -48,11 +43,9 @@ typedef const bool_reader c_bool_reader; typedef const vp8_tree_index vp8_tree[], *vp8_tree_p; - -typedef const struct vp8_token_struct -{ - int value; - int Len; +typedef const struct vp8_token_struct { + int value; + int Len; } vp8_token; /* Construct encoding array from tree. */ @@ -61,35 +54,26 @@ void vp8_tokens_from_tree(struct vp8_token_struct *, vp8_tree); void vp8_tokens_from_tree_offset(struct vp8_token_struct *, vp8_tree, int offset); - /* Convert array of token occurrence counts into a table of probabilities for the associated binary encoding tree. Also writes count of branches taken for each node on the tree; this facilitiates decisions as to probability updates. */ -void vp8_tree_probs_from_distribution( - int n, /* n = size of alphabet */ - vp8_token tok [ /* n */ ], - vp8_tree tree, - vp8_prob probs [ /* n-1 */ ], - unsigned int branch_ct [ /* n-1 */ ] [2], - const unsigned int num_events[ /* n */ ], - unsigned int Pfactor, - int Round -); +void vp8_tree_probs_from_distribution(int n, /* n = size of alphabet */ + vp8_token tok[/* n */], vp8_tree tree, + vp8_prob probs[/* n-1 */], + unsigned int branch_ct[/* n-1 */][2], + const unsigned int num_events[/* n */], + unsigned int Pfactor, int Round); /* Variant of above using coder spec rather than hardwired 8-bit probs. */ -void vp8bc_tree_probs_from_distribution( - int n, /* n = size of alphabet */ - vp8_token tok [ /* n */ ], - vp8_tree tree, - vp8_prob probs [ /* n-1 */ ], - unsigned int branch_ct [ /* n-1 */ ] [2], - const unsigned int num_events[ /* n */ ], - c_bool_coder_spec *s -); - +void vp8bc_tree_probs_from_distribution(int n, /* n = size of alphabet */ + vp8_token tok[/* n */], vp8_tree tree, + vp8_prob probs[/* n-1 */], + unsigned int branch_ct[/* n-1 */][2], + const unsigned int num_events[/* n */], + c_bool_coder_spec *s); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/vp8_entropymodedata.h b/libs/libvpx/vp8/common/vp8_entropymodedata.h index c4aed49897..9a81ebfe62 100644 --- a/libs/libvpx/vp8/common/vp8_entropymodedata.h +++ b/libs/libvpx/vp8/common/vp8_entropymodedata.h @@ -17,235 +17,153 @@ extern "C" { /*Generated file, included by entropymode.c*/ - -const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES] = -{ - { 0, 1 }, - { 2, 2 }, - { 6, 3 }, - { 28, 5 }, - { 30, 5 }, - { 58, 6 }, - { 59, 6 }, - { 62, 6 }, - { 126, 7 }, - { 127, 7 } +const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES] = { + { 0, 1 }, { 2, 2 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, + { 58, 6 }, { 59, 6 }, { 62, 6 }, { 126, 7 }, { 127, 7 } }; -const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES] = -{ - { 0, 1 }, - { 4, 3 }, - { 5, 3 }, - { 6, 3 }, - { 7, 3 } +const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES] = { + { 0, 1 }, { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 } }; -const struct vp8_token_struct vp8_kf_ymode_encodings[VP8_YMODES] = -{ - { 4, 3 }, - { 5, 3 }, - { 6, 3 }, - { 7, 3 }, - { 0, 1 } +const struct vp8_token_struct vp8_kf_ymode_encodings[VP8_YMODES] = { + { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 }, { 0, 1 } }; -const struct vp8_token_struct vp8_uv_mode_encodings[VP8_UV_MODES] = -{ - { 0, 1 }, - { 2, 2 }, - { 6, 3 }, - { 7, 3 } +const struct vp8_token_struct vp8_uv_mode_encodings[VP8_UV_MODES] = { + { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } }; -const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS] = -{ - { 6, 3 }, - { 7, 3 }, - { 2, 2 }, - { 0, 1 } +const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS] = { + { 6, 3 }, { 7, 3 }, { 2, 2 }, { 0, 1 } }; -const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS] = -{ - { 2, 2 }, - { 6, 3 }, - { 0, 1 }, - { 14, 4 }, - { 15, 4 } +const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS] = { + { 2, 2 }, { 6, 3 }, { 0, 1 }, { 14, 4 }, { 15, 4 } }; -const struct vp8_token_struct vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS] = -{ - { 0, 1 }, - { 2, 2 }, - { 6, 3 }, - { 7, 3 } +const struct vp8_token_struct vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS] = { + { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } }; -const struct vp8_token_struct vp8_small_mvencodings[8] = -{ - { 0, 3 }, - { 1, 3 }, - { 2, 3 }, - { 3, 3 }, - { 4, 3 }, - { 5, 3 }, - { 6, 3 }, - { 7, 3 } +const struct vp8_token_struct vp8_small_mvencodings[8] = { + { 0, 3 }, { 1, 3 }, { 2, 3 }, { 3, 3 }, { 4, 3 }, { 5, 3 }, { 6, 3 }, { 7, 3 } }; -const vp8_prob vp8_ymode_prob[VP8_YMODES-1] = -{ - 112, 86, 140, 37 -}; +const vp8_prob vp8_ymode_prob[VP8_YMODES - 1] = { 112, 86, 140, 37 }; -const vp8_prob vp8_kf_ymode_prob[VP8_YMODES-1] = -{ - 145, 156, 163, 128 -}; +const vp8_prob vp8_kf_ymode_prob[VP8_YMODES - 1] = { 145, 156, 163, 128 }; -const vp8_prob vp8_uv_mode_prob[VP8_UV_MODES-1] = -{ - 162, 101, 204 -}; +const vp8_prob vp8_uv_mode_prob[VP8_UV_MODES - 1] = { 162, 101, 204 }; -const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES-1] = -{ - 142, 114, 183 -}; +const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES - 1] = { 142, 114, 183 }; -const vp8_prob vp8_bmode_prob[VP8_BINTRAMODES-1] = -{ - 120, 90, 79, 133, 87, 85, 80, 111, 151 -}; +const vp8_prob vp8_bmode_prob[VP8_BINTRAMODES - 1] = { 120, 90, 79, 133, 87, + 85, 80, 111, 151 }; - - -const vp8_prob vp8_kf_bmode_prob -[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES-1] = -{ - { - { 231, 120, 48, 89, 115, 113, 120, 152, 112 }, - { 152, 179, 64, 126, 170, 118, 46, 70, 95 }, - { 175, 69, 143, 80, 85, 82, 72, 155, 103 }, - { 56, 58, 10, 171, 218, 189, 17, 13, 152 }, - { 144, 71, 10, 38, 171, 213, 144, 34, 26 }, - { 114, 26, 17, 163, 44, 195, 21, 10, 173 }, - { 121, 24, 80, 195, 26, 62, 44, 64, 85 }, - { 170, 46, 55, 19, 136, 160, 33, 206, 71 }, - { 63, 20, 8, 114, 114, 208, 12, 9, 226 }, - { 81, 40, 11, 96, 182, 84, 29, 16, 36 } - }, - { - { 134, 183, 89, 137, 98, 101, 106, 165, 148 }, - { 72, 187, 100, 130, 157, 111, 32, 75, 80 }, - { 66, 102, 167, 99, 74, 62, 40, 234, 128 }, - { 41, 53, 9, 178, 241, 141, 26, 8, 107 }, - { 104, 79, 12, 27, 217, 255, 87, 17, 7 }, - { 74, 43, 26, 146, 73, 166, 49, 23, 157 }, - { 65, 38, 105, 160, 51, 52, 31, 115, 128 }, - { 87, 68, 71, 44, 114, 51, 15, 186, 23 }, - { 47, 41, 14, 110, 182, 183, 21, 17, 194 }, - { 66, 45, 25, 102, 197, 189, 23, 18, 22 } - }, - { - { 88, 88, 147, 150, 42, 46, 45, 196, 205 }, - { 43, 97, 183, 117, 85, 38, 35, 179, 61 }, - { 39, 53, 200, 87, 26, 21, 43, 232, 171 }, - { 56, 34, 51, 104, 114, 102, 29, 93, 77 }, - { 107, 54, 32, 26, 51, 1, 81, 43, 31 }, - { 39, 28, 85, 171, 58, 165, 90, 98, 64 }, - { 34, 22, 116, 206, 23, 34, 43, 166, 73 }, - { 68, 25, 106, 22, 64, 171, 36, 225, 114 }, - { 34, 19, 21, 102, 132, 188, 16, 76, 124 }, - { 62, 18, 78, 95, 85, 57, 50, 48, 51 } - }, - { - { 193, 101, 35, 159, 215, 111, 89, 46, 111 }, - { 60, 148, 31, 172, 219, 228, 21, 18, 111 }, - { 112, 113, 77, 85, 179, 255, 38, 120, 114 }, - { 40, 42, 1, 196, 245, 209, 10, 25, 109 }, - { 100, 80, 8, 43, 154, 1, 51, 26, 71 }, - { 88, 43, 29, 140, 166, 213, 37, 43, 154 }, - { 61, 63, 30, 155, 67, 45, 68, 1, 209 }, - { 142, 78, 78, 16, 255, 128, 34, 197, 171 }, - { 41, 40, 5, 102, 211, 183, 4, 1, 221 }, - { 51, 50, 17, 168, 209, 192, 23, 25, 82 } - }, - { - { 125, 98, 42, 88, 104, 85, 117, 175, 82 }, - { 95, 84, 53, 89, 128, 100, 113, 101, 45 }, - { 75, 79, 123, 47, 51, 128, 81, 171, 1 }, - { 57, 17, 5, 71, 102, 57, 53, 41, 49 }, - { 115, 21, 2, 10, 102, 255, 166, 23, 6 }, - { 38, 33, 13, 121, 57, 73, 26, 1, 85 }, - { 41, 10, 67, 138, 77, 110, 90, 47, 114 }, - { 101, 29, 16, 10, 85, 128, 101, 196, 26 }, - { 57, 18, 10, 102, 102, 213, 34, 20, 43 }, - { 117, 20, 15, 36, 163, 128, 68, 1, 26 } - }, - { - { 138, 31, 36, 171, 27, 166, 38, 44, 229 }, - { 67, 87, 58, 169, 82, 115, 26, 59, 179 }, - { 63, 59, 90, 180, 59, 166, 93, 73, 154 }, - { 40, 40, 21, 116, 143, 209, 34, 39, 175 }, - { 57, 46, 22, 24, 128, 1, 54, 17, 37 }, - { 47, 15, 16, 183, 34, 223, 49, 45, 183 }, - { 46, 17, 33, 183, 6, 98, 15, 32, 183 }, - { 65, 32, 73, 115, 28, 128, 23, 128, 205 }, - { 40, 3, 9, 115, 51, 192, 18, 6, 223 }, - { 87, 37, 9, 115, 59, 77, 64, 21, 47 } - }, - { - { 104, 55, 44, 218, 9, 54, 53, 130, 226 }, - { 64, 90, 70, 205, 40, 41, 23, 26, 57 }, - { 54, 57, 112, 184, 5, 41, 38, 166, 213 }, - { 30, 34, 26, 133, 152, 116, 10, 32, 134 }, - { 75, 32, 12, 51, 192, 255, 160, 43, 51 }, - { 39, 19, 53, 221, 26, 114, 32, 73, 255 }, - { 31, 9, 65, 234, 2, 15, 1, 118, 73 }, - { 88, 31, 35, 67, 102, 85, 55, 186, 85 }, - { 56, 21, 23, 111, 59, 205, 45, 37, 192 }, - { 55, 38, 70, 124, 73, 102, 1, 34, 98 } - }, - { - { 102, 61, 71, 37, 34, 53, 31, 243, 192 }, - { 69, 60, 71, 38, 73, 119, 28, 222, 37 }, - { 68, 45, 128, 34, 1, 47, 11, 245, 171 }, - { 62, 17, 19, 70, 146, 85, 55, 62, 70 }, - { 75, 15, 9, 9, 64, 255, 184, 119, 16 }, - { 37, 43, 37, 154, 100, 163, 85, 160, 1 }, - { 63, 9, 92, 136, 28, 64, 32, 201, 85 }, - { 86, 6, 28, 5, 64, 255, 25, 248, 1 }, - { 56, 8, 17, 132, 137, 255, 55, 116, 128 }, - { 58, 15, 20, 82, 135, 57, 26, 121, 40 } - }, - { - { 164, 50, 31, 137, 154, 133, 25, 35, 218 }, - { 51, 103, 44, 131, 131, 123, 31, 6, 158 }, - { 86, 40, 64, 135, 148, 224, 45, 183, 128 }, - { 22, 26, 17, 131, 240, 154, 14, 1, 209 }, - { 83, 12, 13, 54, 192, 255, 68, 47, 28 }, - { 45, 16, 21, 91, 64, 222, 7, 1, 197 }, - { 56, 21, 39, 155, 60, 138, 23, 102, 213 }, - { 85, 26, 85, 85, 128, 128, 32, 146, 171 }, - { 18, 11, 7, 63, 144, 171, 4, 4, 246 }, - { 35, 27, 10, 146, 174, 171, 12, 26, 128 } - }, - { - { 190, 80, 35, 99, 180, 80, 126, 54, 45 }, - { 85, 126, 47, 87, 176, 51, 41, 20, 32 }, - { 101, 75, 128, 139, 118, 146, 116, 128, 85 }, - { 56, 41, 15, 176, 236, 85, 37, 9, 62 }, - { 146, 36, 19, 30, 171, 255, 97, 27, 20 }, - { 71, 30, 17, 119, 118, 255, 17, 18, 138 }, - { 101, 38, 60, 138, 55, 70, 43, 26, 142 }, - { 138, 45, 61, 62, 219, 1, 81, 188, 64 }, - { 32, 41, 20, 117, 151, 142, 20, 21, 163 }, - { 112, 19, 12, 61, 195, 128, 48, 4, 24 } - } -}; +const vp8_prob + vp8_kf_bmode_prob[VP8_BINTRAMODES][VP8_BINTRAMODES][VP8_BINTRAMODES - 1] = { + { { 231, 120, 48, 89, 115, 113, 120, 152, 112 }, + { 152, 179, 64, 126, 170, 118, 46, 70, 95 }, + { 175, 69, 143, 80, 85, 82, 72, 155, 103 }, + { 56, 58, 10, 171, 218, 189, 17, 13, 152 }, + { 144, 71, 10, 38, 171, 213, 144, 34, 26 }, + { 114, 26, 17, 163, 44, 195, 21, 10, 173 }, + { 121, 24, 80, 195, 26, 62, 44, 64, 85 }, + { 170, 46, 55, 19, 136, 160, 33, 206, 71 }, + { 63, 20, 8, 114, 114, 208, 12, 9, 226 }, + { 81, 40, 11, 96, 182, 84, 29, 16, 36 } }, + { { 134, 183, 89, 137, 98, 101, 106, 165, 148 }, + { 72, 187, 100, 130, 157, 111, 32, 75, 80 }, + { 66, 102, 167, 99, 74, 62, 40, 234, 128 }, + { 41, 53, 9, 178, 241, 141, 26, 8, 107 }, + { 104, 79, 12, 27, 217, 255, 87, 17, 7 }, + { 74, 43, 26, 146, 73, 166, 49, 23, 157 }, + { 65, 38, 105, 160, 51, 52, 31, 115, 128 }, + { 87, 68, 71, 44, 114, 51, 15, 186, 23 }, + { 47, 41, 14, 110, 182, 183, 21, 17, 194 }, + { 66, 45, 25, 102, 197, 189, 23, 18, 22 } }, + { { 88, 88, 147, 150, 42, 46, 45, 196, 205 }, + { 43, 97, 183, 117, 85, 38, 35, 179, 61 }, + { 39, 53, 200, 87, 26, 21, 43, 232, 171 }, + { 56, 34, 51, 104, 114, 102, 29, 93, 77 }, + { 107, 54, 32, 26, 51, 1, 81, 43, 31 }, + { 39, 28, 85, 171, 58, 165, 90, 98, 64 }, + { 34, 22, 116, 206, 23, 34, 43, 166, 73 }, + { 68, 25, 106, 22, 64, 171, 36, 225, 114 }, + { 34, 19, 21, 102, 132, 188, 16, 76, 124 }, + { 62, 18, 78, 95, 85, 57, 50, 48, 51 } }, + { { 193, 101, 35, 159, 215, 111, 89, 46, 111 }, + { 60, 148, 31, 172, 219, 228, 21, 18, 111 }, + { 112, 113, 77, 85, 179, 255, 38, 120, 114 }, + { 40, 42, 1, 196, 245, 209, 10, 25, 109 }, + { 100, 80, 8, 43, 154, 1, 51, 26, 71 }, + { 88, 43, 29, 140, 166, 213, 37, 43, 154 }, + { 61, 63, 30, 155, 67, 45, 68, 1, 209 }, + { 142, 78, 78, 16, 255, 128, 34, 197, 171 }, + { 41, 40, 5, 102, 211, 183, 4, 1, 221 }, + { 51, 50, 17, 168, 209, 192, 23, 25, 82 } }, + { { 125, 98, 42, 88, 104, 85, 117, 175, 82 }, + { 95, 84, 53, 89, 128, 100, 113, 101, 45 }, + { 75, 79, 123, 47, 51, 128, 81, 171, 1 }, + { 57, 17, 5, 71, 102, 57, 53, 41, 49 }, + { 115, 21, 2, 10, 102, 255, 166, 23, 6 }, + { 38, 33, 13, 121, 57, 73, 26, 1, 85 }, + { 41, 10, 67, 138, 77, 110, 90, 47, 114 }, + { 101, 29, 16, 10, 85, 128, 101, 196, 26 }, + { 57, 18, 10, 102, 102, 213, 34, 20, 43 }, + { 117, 20, 15, 36, 163, 128, 68, 1, 26 } }, + { { 138, 31, 36, 171, 27, 166, 38, 44, 229 }, + { 67, 87, 58, 169, 82, 115, 26, 59, 179 }, + { 63, 59, 90, 180, 59, 166, 93, 73, 154 }, + { 40, 40, 21, 116, 143, 209, 34, 39, 175 }, + { 57, 46, 22, 24, 128, 1, 54, 17, 37 }, + { 47, 15, 16, 183, 34, 223, 49, 45, 183 }, + { 46, 17, 33, 183, 6, 98, 15, 32, 183 }, + { 65, 32, 73, 115, 28, 128, 23, 128, 205 }, + { 40, 3, 9, 115, 51, 192, 18, 6, 223 }, + { 87, 37, 9, 115, 59, 77, 64, 21, 47 } }, + { { 104, 55, 44, 218, 9, 54, 53, 130, 226 }, + { 64, 90, 70, 205, 40, 41, 23, 26, 57 }, + { 54, 57, 112, 184, 5, 41, 38, 166, 213 }, + { 30, 34, 26, 133, 152, 116, 10, 32, 134 }, + { 75, 32, 12, 51, 192, 255, 160, 43, 51 }, + { 39, 19, 53, 221, 26, 114, 32, 73, 255 }, + { 31, 9, 65, 234, 2, 15, 1, 118, 73 }, + { 88, 31, 35, 67, 102, 85, 55, 186, 85 }, + { 56, 21, 23, 111, 59, 205, 45, 37, 192 }, + { 55, 38, 70, 124, 73, 102, 1, 34, 98 } }, + { { 102, 61, 71, 37, 34, 53, 31, 243, 192 }, + { 69, 60, 71, 38, 73, 119, 28, 222, 37 }, + { 68, 45, 128, 34, 1, 47, 11, 245, 171 }, + { 62, 17, 19, 70, 146, 85, 55, 62, 70 }, + { 75, 15, 9, 9, 64, 255, 184, 119, 16 }, + { 37, 43, 37, 154, 100, 163, 85, 160, 1 }, + { 63, 9, 92, 136, 28, 64, 32, 201, 85 }, + { 86, 6, 28, 5, 64, 255, 25, 248, 1 }, + { 56, 8, 17, 132, 137, 255, 55, 116, 128 }, + { 58, 15, 20, 82, 135, 57, 26, 121, 40 } }, + { { 164, 50, 31, 137, 154, 133, 25, 35, 218 }, + { 51, 103, 44, 131, 131, 123, 31, 6, 158 }, + { 86, 40, 64, 135, 148, 224, 45, 183, 128 }, + { 22, 26, 17, 131, 240, 154, 14, 1, 209 }, + { 83, 12, 13, 54, 192, 255, 68, 47, 28 }, + { 45, 16, 21, 91, 64, 222, 7, 1, 197 }, + { 56, 21, 39, 155, 60, 138, 23, 102, 213 }, + { 85, 26, 85, 85, 128, 128, 32, 146, 171 }, + { 18, 11, 7, 63, 144, 171, 4, 4, 246 }, + { 35, 27, 10, 146, 174, 171, 12, 26, 128 } }, + { { 190, 80, 35, 99, 180, 80, 126, 54, 45 }, + { 85, 126, 47, 87, 176, 51, 41, 20, 32 }, + { 101, 75, 128, 139, 118, 146, 116, 128, 85 }, + { 56, 41, 15, 176, 236, 85, 37, 9, 62 }, + { 146, 36, 19, 30, 171, 255, 97, 27, 20 }, + { 71, 30, 17, 119, 118, 255, 17, 18, 138 }, + { 101, 38, 60, 138, 55, 70, 43, 26, 142 }, + { 138, 45, 61, 62, 219, 1, 81, 188, 64 }, + { 32, 41, 20, 117, 151, 142, 20, 21, 163 }, + { 112, 19, 12, 61, 195, 128, 48, 4, 24 } } + }; #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/common/vp8_loopfilter.c b/libs/libvpx/vp8/common/vp8_loopfilter.c index 756ad488f9..c6430be465 100644 --- a/libs/libvpx/vp8/common/vp8_loopfilter.c +++ b/libs/libvpx/vp8/common/vp8_loopfilter.c @@ -8,265 +8,298 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "loopfilter.h" #include "onyxc_int.h" #include "vpx_mem/vpx_mem.h" +static void lf_init_lut(loop_filter_info_n *lfi) { + int filt_lvl; -static void lf_init_lut(loop_filter_info_n *lfi) -{ - int filt_lvl; - - for (filt_lvl = 0; filt_lvl <= MAX_LOOP_FILTER; filt_lvl++) - { - if (filt_lvl >= 40) - { - lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 2; - lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 3; - } - else if (filt_lvl >= 20) - { - lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1; - lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 2; - } - else if (filt_lvl >= 15) - { - lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1; - lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 1; - } - else - { - lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 0; - lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 0; - } + for (filt_lvl = 0; filt_lvl <= MAX_LOOP_FILTER; ++filt_lvl) { + if (filt_lvl >= 40) { + lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 2; + lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 3; + } else if (filt_lvl >= 20) { + lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1; + lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 2; + } else if (filt_lvl >= 15) { + lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 1; + lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 1; + } else { + lfi->hev_thr_lut[KEY_FRAME][filt_lvl] = 0; + lfi->hev_thr_lut[INTER_FRAME][filt_lvl] = 0; } + } - lfi->mode_lf_lut[DC_PRED] = 1; - lfi->mode_lf_lut[V_PRED] = 1; - lfi->mode_lf_lut[H_PRED] = 1; - lfi->mode_lf_lut[TM_PRED] = 1; - lfi->mode_lf_lut[B_PRED] = 0; - - lfi->mode_lf_lut[ZEROMV] = 1; - lfi->mode_lf_lut[NEARESTMV] = 2; - lfi->mode_lf_lut[NEARMV] = 2; - lfi->mode_lf_lut[NEWMV] = 2; - lfi->mode_lf_lut[SPLITMV] = 3; + lfi->mode_lf_lut[DC_PRED] = 1; + lfi->mode_lf_lut[V_PRED] = 1; + lfi->mode_lf_lut[H_PRED] = 1; + lfi->mode_lf_lut[TM_PRED] = 1; + lfi->mode_lf_lut[B_PRED] = 0; + lfi->mode_lf_lut[ZEROMV] = 1; + lfi->mode_lf_lut[NEARESTMV] = 2; + lfi->mode_lf_lut[NEARMV] = 2; + lfi->mode_lf_lut[NEWMV] = 2; + lfi->mode_lf_lut[SPLITMV] = 3; } void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi, - int sharpness_lvl) -{ - int i; + int sharpness_lvl) { + int i; - /* For each possible value for the loop filter fill out limits */ - for (i = 0; i <= MAX_LOOP_FILTER; i++) - { - int filt_lvl = i; - int block_inside_limit = 0; + /* For each possible value for the loop filter fill out limits */ + for (i = 0; i <= MAX_LOOP_FILTER; ++i) { + int filt_lvl = i; + int block_inside_limit = 0; - /* Set loop filter paramaeters that control sharpness. */ - block_inside_limit = filt_lvl >> (sharpness_lvl > 0); - block_inside_limit = block_inside_limit >> (sharpness_lvl > 4); + /* Set loop filter paramaeters that control sharpness. */ + block_inside_limit = filt_lvl >> (sharpness_lvl > 0); + block_inside_limit = block_inside_limit >> (sharpness_lvl > 4); - if (sharpness_lvl > 0) - { - if (block_inside_limit > (9 - sharpness_lvl)) - block_inside_limit = (9 - sharpness_lvl); - } - - if (block_inside_limit < 1) - block_inside_limit = 1; - - memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH); - memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit), SIMD_WIDTH); - memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit), - SIMD_WIDTH); + if (sharpness_lvl > 0) { + if (block_inside_limit > (9 - sharpness_lvl)) { + block_inside_limit = (9 - sharpness_lvl); + } } + + if (block_inside_limit < 1) block_inside_limit = 1; + + memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH); + memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit), SIMD_WIDTH); + memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit), + SIMD_WIDTH); + } } -void vp8_loop_filter_init(VP8_COMMON *cm) -{ - loop_filter_info_n *lfi = &cm->lf_info; - int i; +void vp8_loop_filter_init(VP8_COMMON *cm) { + loop_filter_info_n *lfi = &cm->lf_info; + int i; - /* init limits for given sharpness*/ + /* init limits for given sharpness*/ + vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level); + cm->last_sharpness_level = cm->sharpness_level; + + /* init LUT for lvl and hev thr picking */ + lf_init_lut(lfi); + + /* init hev threshold const vectors */ + for (i = 0; i < 4; ++i) { + memset(lfi->hev_thr[i], i, SIMD_WIDTH); + } +} + +void vp8_loop_filter_frame_init(VP8_COMMON *cm, MACROBLOCKD *mbd, + int default_filt_lvl) { + int seg, /* segment number */ + ref, /* index in ref_lf_deltas */ + mode; /* index in mode_lf_deltas */ + + loop_filter_info_n *lfi = &cm->lf_info; + + /* update limits if sharpness has changed */ + if (cm->last_sharpness_level != cm->sharpness_level) { vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level); cm->last_sharpness_level = cm->sharpness_level; + } - /* init LUT for lvl and hev thr picking */ - lf_init_lut(lfi); + for (seg = 0; seg < MAX_MB_SEGMENTS; ++seg) { + int lvl_seg = default_filt_lvl; + int lvl_ref, lvl_mode; - /* init hev threshold const vectors */ - for(i = 0; i < 4 ; i++) - { - memset(lfi->hev_thr[i], i, SIMD_WIDTH); - } -} - -void vp8_loop_filter_frame_init(VP8_COMMON *cm, - MACROBLOCKD *mbd, - int default_filt_lvl) -{ - int seg, /* segment number */ - ref, /* index in ref_lf_deltas */ - mode; /* index in mode_lf_deltas */ - - loop_filter_info_n *lfi = &cm->lf_info; - - /* update limits if sharpness has changed */ - if(cm->last_sharpness_level != cm->sharpness_level) - { - vp8_loop_filter_update_sharpness(lfi, cm->sharpness_level); - cm->last_sharpness_level = cm->sharpness_level; + /* Note the baseline filter values for each segment */ + if (mbd->segmentation_enabled) { + /* Abs value */ + if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA) { + lvl_seg = mbd->segment_feature_data[MB_LVL_ALT_LF][seg]; + } else /* Delta Value */ + { + lvl_seg += mbd->segment_feature_data[MB_LVL_ALT_LF][seg]; + } + lvl_seg = (lvl_seg > 0) ? ((lvl_seg > 63) ? 63 : lvl_seg) : 0; } - for(seg = 0; seg < MAX_MB_SEGMENTS; seg++) - { - int lvl_seg = default_filt_lvl; - int lvl_ref, lvl_mode; + if (!mbd->mode_ref_lf_delta_enabled) { + /* we could get rid of this if we assume that deltas are set to + * zero when not in use; encoder always uses deltas + */ + memset(lfi->lvl[seg][0], lvl_seg, 4 * 4); + continue; + } - /* Note the baseline filter values for each segment */ - if (mbd->segmentation_enabled) - { - /* Abs value */ - if (mbd->mb_segement_abs_delta == SEGMENT_ABSDATA) - { - lvl_seg = mbd->segment_feature_data[MB_LVL_ALT_LF][seg]; - } - else /* Delta Value */ - { - lvl_seg += mbd->segment_feature_data[MB_LVL_ALT_LF][seg]; - } - lvl_seg = (lvl_seg > 0) ? ((lvl_seg > 63) ? 63: lvl_seg) : 0; - } + /* INTRA_FRAME */ + ref = INTRA_FRAME; - if (!mbd->mode_ref_lf_delta_enabled) - { - /* we could get rid of this if we assume that deltas are set to - * zero when not in use; encoder always uses deltas - */ - memset(lfi->lvl[seg][0], lvl_seg, 4 * 4 ); - continue; - } + /* Apply delta for reference frame */ + lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref]; - /* INTRA_FRAME */ - ref = INTRA_FRAME; + /* Apply delta for Intra modes */ + mode = 0; /* B_PRED */ + /* Only the split mode BPRED has a further special case */ + lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode]; + /* clamp */ + lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; - /* Apply delta for reference frame */ - lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref]; + lfi->lvl[seg][ref][mode] = lvl_mode; - /* Apply delta for Intra modes */ - mode = 0; /* B_PRED */ - /* Only the split mode BPRED has a further special case */ + mode = 1; /* all the rest of Intra modes */ + /* clamp */ + lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0; + lfi->lvl[seg][ref][mode] = lvl_mode; + + /* LAST, GOLDEN, ALT */ + for (ref = 1; ref < MAX_REF_FRAMES; ++ref) { + /* Apply delta for reference frame */ + lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref]; + + /* Apply delta for Inter modes */ + for (mode = 1; mode < 4; ++mode) { lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode]; /* clamp */ lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; lfi->lvl[seg][ref][mode] = lvl_mode; - - mode = 1; /* all the rest of Intra modes */ - /* clamp */ - lvl_mode = (lvl_ref > 0) ? (lvl_ref > 63 ? 63 : lvl_ref) : 0; - lfi->lvl[seg][ref][mode] = lvl_mode; - - /* LAST, GOLDEN, ALT */ - for(ref = 1; ref < MAX_REF_FRAMES; ref++) - { - /* Apply delta for reference frame */ - lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref]; - - /* Apply delta for Inter modes */ - for (mode = 1; mode < 4; mode++) - { - lvl_mode = lvl_ref + mbd->mode_lf_deltas[mode]; - /* clamp */ - lvl_mode = (lvl_mode > 0) ? (lvl_mode > 63 ? 63 : lvl_mode) : 0; - - lfi->lvl[seg][ref][mode] = lvl_mode; - } - } + } } + } } - void vp8_loop_filter_row_normal(VP8_COMMON *cm, MODE_INFO *mode_info_context, - int mb_row, int post_ystride, int post_uvstride, - unsigned char *y_ptr, unsigned char *u_ptr, - unsigned char *v_ptr) -{ - int mb_col; - int filter_level; - loop_filter_info_n *lfi_n = &cm->lf_info; - loop_filter_info lfi; - FRAME_TYPE frame_type = cm->frame_type; + int mb_row, int post_ystride, int post_uvstride, + unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr) { + int mb_col; + int filter_level; + loop_filter_info_n *lfi_n = &cm->lf_info; + loop_filter_info lfi; + FRAME_TYPE frame_type = cm->frame_type; - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + int skip_lf = (mode_info_context->mbmi.mode != B_PRED && + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); - const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; - const int seg = mode_info_context->mbmi.segment_id; - const int ref_frame = mode_info_context->mbmi.ref_frame; + const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; + const int seg = mode_info_context->mbmi.segment_id; + const int ref_frame = mode_info_context->mbmi.ref_frame; - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - if (filter_level) - { - const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; + if (filter_level) { + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; - if (mb_col > 0) - vp8_loop_filter_mbv - (y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, &lfi); + if (mb_col > 0) + vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, + &lfi); - if (!skip_lf) - vp8_loop_filter_bv - (y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, &lfi); + if (!skip_lf) + vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, + &lfi); - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_mbh - (y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, &lfi); + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, + &lfi); - if (!skip_lf) - vp8_loop_filter_bh - (y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, &lfi); - } - - y_ptr += 16; - u_ptr += 8; - v_ptr += 8; - - mode_info_context++; /* step to next MB */ + if (!skip_lf) + vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post_ystride, post_uvstride, + &lfi); } + y_ptr += 16; + u_ptr += 8; + v_ptr += 8; + + mode_info_context++; /* step to next MB */ + } } void vp8_loop_filter_row_simple(VP8_COMMON *cm, MODE_INFO *mode_info_context, - int mb_row, int post_ystride, int post_uvstride, - unsigned char *y_ptr, unsigned char *u_ptr, - unsigned char *v_ptr) -{ - int mb_col; - int filter_level; - loop_filter_info_n *lfi_n = &cm->lf_info; - (void)post_uvstride; + int mb_row, int post_ystride, int post_uvstride, + unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr) { + int mb_col; + int filter_level; + loop_filter_info_n *lfi_n = &cm->lf_info; + (void)post_uvstride; - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + int skip_lf = (mode_info_context->mbmi.mode != B_PRED && + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); + + const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; + const int seg = mode_info_context->mbmi.segment_id; + const int ref_frame = mode_info_context->mbmi.ref_frame; + + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + + if (filter_level) { + if (mb_col > 0) + vp8_loop_filter_simple_mbv(y_ptr, post_ystride, + lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bv(y_ptr, post_ystride, + lfi_n->blim[filter_level]); + + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_simple_mbh(y_ptr, post_ystride, + lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bh(y_ptr, post_ystride, + lfi_n->blim[filter_level]); + } + + y_ptr += 16; + u_ptr += 8; + v_ptr += 8; + + mode_info_context++; /* step to next MB */ + } +} +void vp8_loop_filter_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, int frame_type) { + YV12_BUFFER_CONFIG *post = cm->frame_to_show; + loop_filter_info_n *lfi_n = &cm->lf_info; + loop_filter_info lfi; + + int mb_row; + int mb_col; + int mb_rows = cm->mb_rows; + int mb_cols = cm->mb_cols; + + int filter_level; + + unsigned char *y_ptr, *u_ptr, *v_ptr; + + /* Point at base of Mb MODE_INFO list */ + const MODE_INFO *mode_info_context = cm->mi; + int post_y_stride = post->y_stride; + int post_uv_stride = post->uv_stride; + + /* Initialize the loop filter for this frame. */ + vp8_loop_filter_frame_init(cm, mbd, cm->filter_level); + + /* Set up the buffer pointers */ + y_ptr = post->y_buffer; + u_ptr = post->u_buffer; + v_ptr = post->v_buffer; + + /* vp8_filter each macro block */ + if (cm->filter_type == NORMAL_LOOPFILTER) { + for (mb_row = 0; mb_row < mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < mb_cols; ++mb_col) { int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; const int seg = mode_info_context->mbmi.segment_id; @@ -274,388 +307,267 @@ void vp8_loop_filter_row_simple(VP8_COMMON *cm, MODE_INFO *mode_info_context, filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - if (filter_level) - { - if (mb_col > 0) - vp8_loop_filter_simple_mbv - (y_ptr, post_ystride, lfi_n->mblim[filter_level]); + if (filter_level) { + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; - if (!skip_lf) - vp8_loop_filter_simple_bv - (y_ptr, post_ystride, lfi_n->blim[filter_level]); + if (mb_col > 0) + vp8_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post_y_stride, + post_uv_stride, &lfi); - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_simple_mbh - (y_ptr, post_ystride, lfi_n->mblim[filter_level]); + if (!skip_lf) + vp8_loop_filter_bv(y_ptr, u_ptr, v_ptr, post_y_stride, + post_uv_stride, &lfi); - if (!skip_lf) - vp8_loop_filter_simple_bh - (y_ptr, post_ystride, lfi_n->blim[filter_level]); + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post_y_stride, + post_uv_stride, &lfi); + + if (!skip_lf) + vp8_loop_filter_bh(y_ptr, u_ptr, v_ptr, post_y_stride, + post_uv_stride, &lfi); } y_ptr += 16; u_ptr += 8; v_ptr += 8; - mode_info_context++; /* step to next MB */ + mode_info_context++; /* step to next MB */ + } + y_ptr += post_y_stride * 16 - post->y_width; + u_ptr += post_uv_stride * 8 - post->uv_width; + v_ptr += post_uv_stride * 8 - post->uv_width; + + mode_info_context++; /* Skip border mb */ } + } else /* SIMPLE_LOOPFILTER */ + { + for (mb_row = 0; mb_row < mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < mb_cols; ++mb_col) { + int skip_lf = (mode_info_context->mbmi.mode != B_PRED && + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); -} -void vp8_loop_filter_frame(VP8_COMMON *cm, - MACROBLOCKD *mbd, - int frame_type) -{ - YV12_BUFFER_CONFIG *post = cm->frame_to_show; - loop_filter_info_n *lfi_n = &cm->lf_info; - loop_filter_info lfi; + const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; + const int seg = mode_info_context->mbmi.segment_id; + const int ref_frame = mode_info_context->mbmi.ref_frame; - int mb_row; - int mb_col; - int mb_rows = cm->mb_rows; - int mb_cols = cm->mb_cols; + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + if (filter_level) { + const unsigned char *mblim = lfi_n->mblim[filter_level]; + const unsigned char *blim = lfi_n->blim[filter_level]; - int filter_level; + if (mb_col > 0) + vp8_loop_filter_simple_mbv(y_ptr, post_y_stride, mblim); - unsigned char *y_ptr, *u_ptr, *v_ptr; + if (!skip_lf) vp8_loop_filter_simple_bv(y_ptr, post_y_stride, blim); - /* Point at base of Mb MODE_INFO list */ - const MODE_INFO *mode_info_context = cm->mi; - int post_y_stride = post->y_stride; - int post_uv_stride = post->uv_stride; - - /* Initialize the loop filter for this frame. */ - vp8_loop_filter_frame_init(cm, mbd, cm->filter_level); - - /* Set up the buffer pointers */ - y_ptr = post->y_buffer; - u_ptr = post->u_buffer; - v_ptr = post->v_buffer; - - /* vp8_filter each macro block */ - if (cm->filter_type == NORMAL_LOOPFILTER) - { - for (mb_row = 0; mb_row < mb_rows; mb_row++) - { - for (mb_col = 0; mb_col < mb_cols; mb_col++) - { - int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); - - const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; - const int seg = mode_info_context->mbmi.segment_id; - const int ref_frame = mode_info_context->mbmi.ref_frame; - - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - - if (filter_level) - { - const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; - - if (mb_col > 0) - vp8_loop_filter_mbv - (y_ptr, u_ptr, v_ptr, post_y_stride, post_uv_stride, &lfi); - - if (!skip_lf) - vp8_loop_filter_bv - (y_ptr, u_ptr, v_ptr, post_y_stride, post_uv_stride, &lfi); - - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_mbh - (y_ptr, u_ptr, v_ptr, post_y_stride, post_uv_stride, &lfi); - - if (!skip_lf) - vp8_loop_filter_bh - (y_ptr, u_ptr, v_ptr, post_y_stride, post_uv_stride, &lfi); - } - - y_ptr += 16; - u_ptr += 8; - v_ptr += 8; - - mode_info_context++; /* step to next MB */ - } - y_ptr += post_y_stride * 16 - post->y_width; - u_ptr += post_uv_stride * 8 - post->uv_width; - v_ptr += post_uv_stride * 8 - post->uv_width; - - mode_info_context++; /* Skip border mb */ - - } - } - else /* SIMPLE_LOOPFILTER */ - { - for (mb_row = 0; mb_row < mb_rows; mb_row++) - { - for (mb_col = 0; mb_col < mb_cols; mb_col++) - { - int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); - - const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; - const int seg = mode_info_context->mbmi.segment_id; - const int ref_frame = mode_info_context->mbmi.ref_frame; - - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - if (filter_level) - { - const unsigned char * mblim = lfi_n->mblim[filter_level]; - const unsigned char * blim = lfi_n->blim[filter_level]; - - if (mb_col > 0) - vp8_loop_filter_simple_mbv - (y_ptr, post_y_stride, mblim); - - if (!skip_lf) - vp8_loop_filter_simple_bv - (y_ptr, post_y_stride, blim); - - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_simple_mbh - (y_ptr, post_y_stride, mblim); - - if (!skip_lf) - vp8_loop_filter_simple_bh - (y_ptr, post_y_stride, blim); - } - - y_ptr += 16; - u_ptr += 8; - v_ptr += 8; - - mode_info_context++; /* step to next MB */ - } - y_ptr += post_y_stride * 16 - post->y_width; - u_ptr += post_uv_stride * 8 - post->uv_width; - v_ptr += post_uv_stride * 8 - post->uv_width; - - mode_info_context++; /* Skip border mb */ + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_simple_mbh(y_ptr, post_y_stride, mblim); + if (!skip_lf) vp8_loop_filter_simple_bh(y_ptr, post_y_stride, blim); } + + y_ptr += 16; + u_ptr += 8; + v_ptr += 8; + + mode_info_context++; /* step to next MB */ + } + y_ptr += post_y_stride * 16 - post->y_width; + u_ptr += post_uv_stride * 8 - post->uv_width; + v_ptr += post_uv_stride * 8 - post->uv_width; + + mode_info_context++; /* Skip border mb */ } + } } -void vp8_loop_filter_frame_yonly -( - VP8_COMMON *cm, - MACROBLOCKD *mbd, - int default_filt_lvl -) -{ - YV12_BUFFER_CONFIG *post = cm->frame_to_show; +void vp8_loop_filter_frame_yonly(VP8_COMMON *cm, MACROBLOCKD *mbd, + int default_filt_lvl) { + YV12_BUFFER_CONFIG *post = cm->frame_to_show; - unsigned char *y_ptr; - int mb_row; - int mb_col; + unsigned char *y_ptr; + int mb_row; + int mb_col; - loop_filter_info_n *lfi_n = &cm->lf_info; - loop_filter_info lfi; + loop_filter_info_n *lfi_n = &cm->lf_info; + loop_filter_info lfi; - int filter_level; - FRAME_TYPE frame_type = cm->frame_type; + int filter_level; + FRAME_TYPE frame_type = cm->frame_type; - /* Point at base of Mb MODE_INFO list */ - const MODE_INFO *mode_info_context = cm->mi; + /* Point at base of Mb MODE_INFO list */ + const MODE_INFO *mode_info_context = cm->mi; #if 0 if(default_filt_lvl == 0) /* no filter applied */ return; #endif - /* Initialize the loop filter for this frame. */ - vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl); + /* Initialize the loop filter for this frame. */ + vp8_loop_filter_frame_init(cm, mbd, default_filt_lvl); - /* Set up the buffer pointers */ - y_ptr = post->y_buffer; + /* Set up the buffer pointers */ + y_ptr = post->y_buffer; - /* vp8_filter each macro block */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); + /* vp8_filter each macro block */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + int skip_lf = (mode_info_context->mbmi.mode != B_PRED && + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); - const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; - const int seg = mode_info_context->mbmi.segment_id; - const int ref_frame = mode_info_context->mbmi.ref_frame; + const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; + const int seg = mode_info_context->mbmi.segment_id; + const int ref_frame = mode_info_context->mbmi.ref_frame; - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - if (filter_level) - { - if (cm->filter_type == NORMAL_LOOPFILTER) - { - const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; + if (filter_level) { + if (cm->filter_type == NORMAL_LOOPFILTER) { + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; - if (mb_col > 0) - vp8_loop_filter_mbv - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + if (mb_col > 0) + vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi); - if (!skip_lf) - vp8_loop_filter_bv - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + if (!skip_lf) + vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi); - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_mbh - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi); - if (!skip_lf) - vp8_loop_filter_bh - (y_ptr, 0, 0, post->y_stride, 0, &lfi); - } - else - { - if (mb_col > 0) - vp8_loop_filter_simple_mbv - (y_ptr, post->y_stride, lfi_n->mblim[filter_level]); + if (!skip_lf) + vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi); + } else { + if (mb_col > 0) + vp8_loop_filter_simple_mbv(y_ptr, post->y_stride, + lfi_n->mblim[filter_level]); - if (!skip_lf) - vp8_loop_filter_simple_bv - (y_ptr, post->y_stride, lfi_n->blim[filter_level]); + if (!skip_lf) + vp8_loop_filter_simple_bv(y_ptr, post->y_stride, + lfi_n->blim[filter_level]); - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_simple_mbh - (y_ptr, post->y_stride, lfi_n->mblim[filter_level]); - - if (!skip_lf) - vp8_loop_filter_simple_bh - (y_ptr, post->y_stride, lfi_n->blim[filter_level]); - } - } - - y_ptr += 16; - mode_info_context ++; /* step to next MB */ + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_simple_mbh(y_ptr, post->y_stride, + lfi_n->mblim[filter_level]); + if (!skip_lf) + vp8_loop_filter_simple_bh(y_ptr, post->y_stride, + lfi_n->blim[filter_level]); } + } - y_ptr += post->y_stride * 16 - post->y_width; - mode_info_context ++; /* Skip border mb */ + y_ptr += 16; + mode_info_context++; /* step to next MB */ } + y_ptr += post->y_stride * 16 - post->y_width; + mode_info_context++; /* Skip border mb */ + } } -void vp8_loop_filter_partial_frame -( - VP8_COMMON *cm, - MACROBLOCKD *mbd, - int default_filt_lvl -) -{ - YV12_BUFFER_CONFIG *post = cm->frame_to_show; +void vp8_loop_filter_partial_frame(VP8_COMMON *cm, MACROBLOCKD *mbd, + int default_filt_lvl) { + YV12_BUFFER_CONFIG *post = cm->frame_to_show; - unsigned char *y_ptr; - int mb_row; - int mb_col; - int mb_cols = post->y_width >> 4; - int mb_rows = post->y_height >> 4; + unsigned char *y_ptr; + int mb_row; + int mb_col; + int mb_cols = post->y_width >> 4; + int mb_rows = post->y_height >> 4; - int linestocopy; + int linestocopy; - loop_filter_info_n *lfi_n = &cm->lf_info; - loop_filter_info lfi; + loop_filter_info_n *lfi_n = &cm->lf_info; + loop_filter_info lfi; - int filter_level; - FRAME_TYPE frame_type = cm->frame_type; + int filter_level; + FRAME_TYPE frame_type = cm->frame_type; - const MODE_INFO *mode_info_context; + const MODE_INFO *mode_info_context; #if 0 if(default_filt_lvl == 0) /* no filter applied */ return; #endif - /* Initialize the loop filter for this frame. */ - vp8_loop_filter_frame_init( cm, mbd, default_filt_lvl); + /* Initialize the loop filter for this frame. */ + vp8_loop_filter_frame_init(cm, mbd, default_filt_lvl); - /* number of MB rows to use in partial filtering */ - linestocopy = mb_rows / PARTIAL_FRAME_FRACTION; - linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ + /* number of MB rows to use in partial filtering */ + linestocopy = mb_rows / PARTIAL_FRAME_FRACTION; + linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ - /* Set up the buffer pointers; partial image starts at ~middle of frame */ - y_ptr = post->y_buffer + ((post->y_height >> 5) * 16) * post->y_stride; - mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1); + /* Set up the buffer pointers; partial image starts at ~middle of frame */ + y_ptr = post->y_buffer + ((post->y_height >> 5) * 16) * post->y_stride; + mode_info_context = cm->mi + (post->y_height >> 5) * (mb_cols + 1); - /* vp8_filter each macro block */ - for (mb_row = 0; mb_row<(linestocopy >> 4); mb_row++) - { - for (mb_col = 0; mb_col < mb_cols; mb_col++) - { - int skip_lf = (mode_info_context->mbmi.mode != B_PRED && - mode_info_context->mbmi.mode != SPLITMV && - mode_info_context->mbmi.mb_skip_coeff); + /* vp8_filter each macro block */ + for (mb_row = 0; mb_row < (linestocopy >> 4); ++mb_row) { + for (mb_col = 0; mb_col < mb_cols; ++mb_col) { + int skip_lf = (mode_info_context->mbmi.mode != B_PRED && + mode_info_context->mbmi.mode != SPLITMV && + mode_info_context->mbmi.mb_skip_coeff); - const int mode_index = - lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; - const int seg = mode_info_context->mbmi.segment_id; - const int ref_frame = mode_info_context->mbmi.ref_frame; + const int mode_index = lfi_n->mode_lf_lut[mode_info_context->mbmi.mode]; + const int seg = mode_info_context->mbmi.segment_id; + const int ref_frame = mode_info_context->mbmi.ref_frame; - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - if (filter_level) - { - if (cm->filter_type == NORMAL_LOOPFILTER) - { - const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; + if (filter_level) { + if (cm->filter_type == NORMAL_LOOPFILTER) { + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; - if (mb_col > 0) - vp8_loop_filter_mbv - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + if (mb_col > 0) + vp8_loop_filter_mbv(y_ptr, 0, 0, post->y_stride, 0, &lfi); - if (!skip_lf) - vp8_loop_filter_bv - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + if (!skip_lf) + vp8_loop_filter_bv(y_ptr, 0, 0, post->y_stride, 0, &lfi); - vp8_loop_filter_mbh - (y_ptr, 0, 0, post->y_stride, 0, &lfi); + vp8_loop_filter_mbh(y_ptr, 0, 0, post->y_stride, 0, &lfi); - if (!skip_lf) - vp8_loop_filter_bh - (y_ptr, 0, 0, post->y_stride, 0, &lfi); - } - else - { - if (mb_col > 0) - vp8_loop_filter_simple_mbv - (y_ptr, post->y_stride, lfi_n->mblim[filter_level]); + if (!skip_lf) + vp8_loop_filter_bh(y_ptr, 0, 0, post->y_stride, 0, &lfi); + } else { + if (mb_col > 0) + vp8_loop_filter_simple_mbv(y_ptr, post->y_stride, + lfi_n->mblim[filter_level]); - if (!skip_lf) - vp8_loop_filter_simple_bv - (y_ptr, post->y_stride, lfi_n->blim[filter_level]); + if (!skip_lf) + vp8_loop_filter_simple_bv(y_ptr, post->y_stride, + lfi_n->blim[filter_level]); - vp8_loop_filter_simple_mbh - (y_ptr, post->y_stride, lfi_n->mblim[filter_level]); + vp8_loop_filter_simple_mbh(y_ptr, post->y_stride, + lfi_n->mblim[filter_level]); - if (!skip_lf) - vp8_loop_filter_simple_bh - (y_ptr, post->y_stride, lfi_n->blim[filter_level]); - } - } - - y_ptr += 16; - mode_info_context += 1; /* step to next MB */ + if (!skip_lf) + vp8_loop_filter_simple_bh(y_ptr, post->y_stride, + lfi_n->blim[filter_level]); } + } - y_ptr += post->y_stride * 16 - post->y_width; - mode_info_context += 1; /* Skip border mb */ + y_ptr += 16; + mode_info_context += 1; /* step to next MB */ } + + y_ptr += post->y_stride * 16 - post->y_width; + mode_info_context += 1; /* Skip border mb */ + } } diff --git a/libs/libvpx/vp8/common/x86/filter_x86.c b/libs/libvpx/vp8/common/x86/filter_x86.c index 7f496ed7db..2405342f02 100644 --- a/libs/libvpx/vp8/common/x86/filter_x86.c +++ b/libs/libvpx/vp8/common/x86/filter_x86.c @@ -10,26 +10,20 @@ #include "vp8/common/x86/filter_x86.h" -DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]) = -{ - { 128, 128, 128, 128, 0, 0, 0, 0 }, - { 112, 112, 112, 112, 16, 16, 16, 16 }, - { 96, 96, 96, 96, 32, 32, 32, 32 }, - { 80, 80, 80, 80, 48, 48, 48, 48 }, - { 64, 64, 64, 64, 64, 64, 64, 64 }, - { 48, 48, 48, 48, 80, 80, 80, 80 }, - { 32, 32, 32, 32, 96, 96, 96, 96 }, - { 16, 16, 16, 16, 112, 112, 112, 112 } +DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_4[8][8]) = { + { 128, 128, 128, 128, 0, 0, 0, 0 }, { 112, 112, 112, 112, 16, 16, 16, 16 }, + { 96, 96, 96, 96, 32, 32, 32, 32 }, { 80, 80, 80, 80, 48, 48, 48, 48 }, + { 64, 64, 64, 64, 64, 64, 64, 64 }, { 48, 48, 48, 48, 80, 80, 80, 80 }, + { 32, 32, 32, 32, 96, 96, 96, 96 }, { 16, 16, 16, 16, 112, 112, 112, 112 } }; -DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_8[8][16]) = -{ - { 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16 }, - { 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32 }, - { 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48 }, - { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 }, - { 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80 }, - { 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96 }, - { 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112 } +DECLARE_ALIGNED(16, const short, vp8_bilinear_filters_x86_8[8][16]) = { + { 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16 }, + { 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32 }, + { 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48 }, + { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 }, + { 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80 }, + { 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96 }, + { 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112 } }; diff --git a/libs/libvpx/vp8/common/x86/idct_blk_mmx.c b/libs/libvpx/vp8/common/x86/idct_blk_mmx.c index f2532b34da..6194cdf8fa 100644 --- a/libs/libvpx/vp8/common/x86/idct_blk_mmx.c +++ b/libs/libvpx/vp8/common/x86/idct_blk_mmx.c @@ -15,114 +15,97 @@ extern void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q); -void vp8_dequantize_b_mmx(BLOCKD *d, short *DQC) -{ - short *sq = (short *) d->qcoeff; - short *dq = (short *) d->dqcoeff; +void vp8_dequantize_b_mmx(BLOCKD *d, short *DQC) { + short *sq = (short *)d->qcoeff; + short *dq = (short *)d->dqcoeff; - vp8_dequantize_b_impl_mmx(sq, dq, DQC); + vp8_dequantize_b_impl_mmx(sq, dq, DQC); } -void vp8_dequant_idct_add_y_block_mmx - (short *q, short *dq, - unsigned char *dst, int stride, char *eobs) -{ - int i; +void vp8_dequant_idct_add_y_block_mmx(short *q, short *dq, unsigned char *dst, + int stride, char *eobs) { + int i; - for (i = 0; i < 4; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_mmx (q, dq, dst, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_mmx (q[0]*dq[0], dst, stride, dst, stride); - memset(q, 0, 2 * sizeof(q[0])); - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_mmx (q+16, dq, dst+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_mmx (q[16]*dq[0], dst+4, stride, - dst+4, stride); - memset(q + 16, 0, 2 * sizeof(q[0])); - } - - if (eobs[2] > 1) - vp8_dequant_idct_add_mmx (q+32, dq, dst+8, stride); - else if (eobs[2] == 1) - { - vp8_dc_only_idct_add_mmx (q[32]*dq[0], dst+8, stride, - dst+8, stride); - memset(q + 32, 0, 2 * sizeof(q[0])); - } - - if (eobs[3] > 1) - vp8_dequant_idct_add_mmx (q+48, dq, dst+12, stride); - else if (eobs[3] == 1) - { - vp8_dc_only_idct_add_mmx (q[48]*dq[0], dst+12, stride, - dst+12, stride); - memset(q + 48, 0, 2 * sizeof(q[0])); - } - - q += 64; - dst += 4*stride; - eobs += 4; + for (i = 0; i < 4; ++i) { + if (eobs[0] > 1) { + vp8_dequant_idct_add_mmx(q, dq, dst, stride); + } else if (eobs[0] == 1) { + vp8_dc_only_idct_add_mmx(q[0] * dq[0], dst, stride, dst, stride); + memset(q, 0, 2 * sizeof(q[0])); } + + if (eobs[1] > 1) { + vp8_dequant_idct_add_mmx(q + 16, dq, dst + 4, stride); + } else if (eobs[1] == 1) { + vp8_dc_only_idct_add_mmx(q[16] * dq[0], dst + 4, stride, dst + 4, stride); + memset(q + 16, 0, 2 * sizeof(q[0])); + } + + if (eobs[2] > 1) { + vp8_dequant_idct_add_mmx(q + 32, dq, dst + 8, stride); + } else if (eobs[2] == 1) { + vp8_dc_only_idct_add_mmx(q[32] * dq[0], dst + 8, stride, dst + 8, stride); + memset(q + 32, 0, 2 * sizeof(q[0])); + } + + if (eobs[3] > 1) { + vp8_dequant_idct_add_mmx(q + 48, dq, dst + 12, stride); + } else if (eobs[3] == 1) { + vp8_dc_only_idct_add_mmx(q[48] * dq[0], dst + 12, stride, dst + 12, + stride); + memset(q + 48, 0, 2 * sizeof(q[0])); + } + + q += 64; + dst += 4 * stride; + eobs += 4; + } } -void vp8_dequant_idct_add_uv_block_mmx - (short *q, short *dq, - unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) -{ - int i; +void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dstu, + unsigned char *dstv, int stride, + char *eobs) { + int i; - for (i = 0; i < 2; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_mmx (q, dq, dstu, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstu, stride, dstu, stride); - memset(q, 0, 2 * sizeof(q[0])); - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_mmx (q+16, dq, dstu+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstu+4, stride, - dstu+4, stride); - memset(q + 16, 0, 2 * sizeof(q[0])); - } - - q += 32; - dstu += 4*stride; - eobs += 2; + for (i = 0; i < 2; ++i) { + if (eobs[0] > 1) { + vp8_dequant_idct_add_mmx(q, dq, dstu, stride); + } else if (eobs[0] == 1) { + vp8_dc_only_idct_add_mmx(q[0] * dq[0], dstu, stride, dstu, stride); + memset(q, 0, 2 * sizeof(q[0])); } - for (i = 0; i < 2; i++) - { - if (eobs[0] > 1) - vp8_dequant_idct_add_mmx (q, dq, dstv, stride); - else if (eobs[0] == 1) - { - vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstv, stride, dstv, stride); - memset(q, 0, 2 * sizeof(q[0])); - } - - if (eobs[1] > 1) - vp8_dequant_idct_add_mmx (q+16, dq, dstv+4, stride); - else if (eobs[1] == 1) - { - vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstv+4, stride, - dstv+4, stride); - memset(q + 16, 0, 2 * sizeof(q[0])); - } - - q += 32; - dstv += 4*stride; - eobs += 2; + if (eobs[1] > 1) { + vp8_dequant_idct_add_mmx(q + 16, dq, dstu + 4, stride); + } else if (eobs[1] == 1) { + vp8_dc_only_idct_add_mmx(q[16] * dq[0], dstu + 4, stride, dstu + 4, + stride); + memset(q + 16, 0, 2 * sizeof(q[0])); } + + q += 32; + dstu += 4 * stride; + eobs += 2; + } + + for (i = 0; i < 2; ++i) { + if (eobs[0] > 1) { + vp8_dequant_idct_add_mmx(q, dq, dstv, stride); + } else if (eobs[0] == 1) { + vp8_dc_only_idct_add_mmx(q[0] * dq[0], dstv, stride, dstv, stride); + memset(q, 0, 2 * sizeof(q[0])); + } + + if (eobs[1] > 1) { + vp8_dequant_idct_add_mmx(q + 16, dq, dstv + 4, stride); + } else if (eobs[1] == 1) { + vp8_dc_only_idct_add_mmx(q[16] * dq[0], dstv + 4, stride, dstv + 4, + stride); + memset(q + 16, 0, 2 * sizeof(q[0])); + } + + q += 32; + dstv += 4 * stride; + eobs += 2; + } } diff --git a/libs/libvpx/vp8/common/x86/idct_blk_sse2.c b/libs/libvpx/vp8/common/x86/idct_blk_sse2.c index ae96ec858c..8aefb27997 100644 --- a/libs/libvpx/vp8/common/x86/idct_blk_sse2.c +++ b/libs/libvpx/vp8/common/x86/idct_blk_sse2.c @@ -11,79 +11,74 @@ #include "vpx_config.h" #include "vp8_rtcd.h" -void vp8_idct_dequant_0_2x_sse2 - (short *q, short *dq , - unsigned char *dst, int dst_stride); -void vp8_idct_dequant_full_2x_sse2 - (short *q, short *dq , - unsigned char *dst, int dst_stride); +void vp8_idct_dequant_0_2x_sse2(short *q, short *dq, unsigned char *dst, + int dst_stride); +void vp8_idct_dequant_full_2x_sse2(short *q, short *dq, unsigned char *dst, + int dst_stride); -void vp8_dequant_idct_add_y_block_sse2 - (short *q, short *dq, - unsigned char *dst, int stride, char *eobs) -{ - int i; +void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst, + int stride, char *eobs) { + int i; - for (i = 0; i < 4; i++) - { - if (((short *)(eobs))[0]) - { - if (((short *)(eobs))[0] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q, dq, dst, stride); - else - vp8_idct_dequant_0_2x_sse2 (q, dq, dst, stride); - } - if (((short *)(eobs))[1]) - { - if (((short *)(eobs))[1] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q+32, dq, dst+8, stride); - else - vp8_idct_dequant_0_2x_sse2 (q+32, dq, dst+8, stride); - } - q += 64; - dst += stride*4; - eobs += 4; + for (i = 0; i < 4; ++i) { + if (((short *)(eobs))[0]) { + if (((short *)(eobs))[0] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q, dq, dst, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q, dq, dst, stride); + } } + if (((short *)(eobs))[1]) { + if (((short *)(eobs))[1] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q + 32, dq, dst + 8, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q + 32, dq, dst + 8, stride); + } + } + q += 64; + dst += stride * 4; + eobs += 4; + } } -void vp8_dequant_idct_add_uv_block_sse2 - (short *q, short *dq, - unsigned char *dstu, unsigned char *dstv, int stride, char *eobs) -{ - if (((short *)(eobs))[0]) - { - if (((short *)(eobs))[0] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q, dq, dstu, stride); - else - vp8_idct_dequant_0_2x_sse2 (q, dq, dstu, stride); +void vp8_dequant_idct_add_uv_block_sse2(short *q, short *dq, + unsigned char *dstu, + unsigned char *dstv, int stride, + char *eobs) { + if (((short *)(eobs))[0]) { + if (((short *)(eobs))[0] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride); } - q += 32; - dstu += stride*4; + } + q += 32; + dstu += stride * 4; - if (((short *)(eobs))[1]) - { - if (((short *)(eobs))[1] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q, dq, dstu, stride); - else - vp8_idct_dequant_0_2x_sse2 (q, dq, dstu, stride); + if (((short *)(eobs))[1]) { + if (((short *)(eobs))[1] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q, dq, dstu, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q, dq, dstu, stride); } - q += 32; + } + q += 32; - if (((short *)(eobs))[2]) - { - if (((short *)(eobs))[2] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q, dq, dstv, stride); - else - vp8_idct_dequant_0_2x_sse2 (q, dq, dstv, stride); + if (((short *)(eobs))[2]) { + if (((short *)(eobs))[2] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride); } - q += 32; - dstv += stride*4; + } + q += 32; + dstv += stride * 4; - if (((short *)(eobs))[3]) - { - if (((short *)(eobs))[3] & 0xfefe) - vp8_idct_dequant_full_2x_sse2 (q, dq, dstv, stride); - else - vp8_idct_dequant_0_2x_sse2 (q, dq, dstv, stride); + if (((short *)(eobs))[3]) { + if (((short *)(eobs))[3] & 0xfefe) { + vp8_idct_dequant_full_2x_sse2(q, dq, dstv, stride); + } else { + vp8_idct_dequant_0_2x_sse2(q, dq, dstv, stride); } + } } diff --git a/libs/libvpx/vp8/common/x86/loopfilter_x86.c b/libs/libvpx/vp8/common/x86/loopfilter_x86.c index 6586004600..aa2f8f1acf 100644 --- a/libs/libvpx/vp8/common/x86/loopfilter_x86.c +++ b/libs/libvpx/vp8/common/x86/loopfilter_x86.c @@ -8,20 +8,19 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8/common/loopfilter.h" -#define prototype_loopfilter(sym) \ - void sym(unsigned char *src, int pitch, const unsigned char *blimit,\ - const unsigned char *limit, const unsigned char *thresh, int count) +#define prototype_loopfilter(sym) \ + void sym(unsigned char *src, int pitch, const unsigned char *blimit, \ + const unsigned char *limit, const unsigned char *thresh, int count) -#define prototype_loopfilter_nc(sym) \ - void sym(unsigned char *src, int pitch, const unsigned char *blimit,\ - const unsigned char *limit, const unsigned char *thresh) +#define prototype_loopfilter_nc(sym) \ + void sym(unsigned char *src, int pitch, const unsigned char *blimit, \ + const unsigned char *limit, const unsigned char *thresh) #define prototype_simple_loopfilter(sym) \ - void sym(unsigned char *y, int ystride, const unsigned char *blimit) + void sym(unsigned char *y, int ystride, const unsigned char *blimit) prototype_loopfilter(vp8_mbloop_filter_vertical_edge_mmx); prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_mmx); @@ -47,152 +46,190 @@ extern loop_filter_uvfunction vp8_mbloop_filter_vertical_edge_uv_sse2; #if HAVE_MMX /* Horizontal MB filtering */ -void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_horizontal_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); +void vp8_loop_filter_mbh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_mbloop_filter_horizontal_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_mbloop_filter_horizontal_edge_mmx(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_mbloop_filter_horizontal_edge_mmx(u_ptr, uv_stride, lfi->mblim, + lfi->lim, lfi->hev_thr, 1); + } - if (v_ptr) - vp8_mbloop_filter_horizontal_edge_mmx(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_mbloop_filter_horizontal_edge_mmx(v_ptr, uv_stride, lfi->mblim, + lfi->lim, lfi->hev_thr, 1); + } } - /* Vertical MB Filtering */ -void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_vertical_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2); +void vp8_loop_filter_mbv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_mbloop_filter_vertical_edge_mmx(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_mbloop_filter_vertical_edge_mmx(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_mbloop_filter_vertical_edge_mmx(u_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } - if (v_ptr) - vp8_mbloop_filter_vertical_edge_mmx(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_mbloop_filter_vertical_edge_mmx(v_ptr, uv_stride, lfi->mblim, lfi->lim, + lfi->hev_thr, 1); + } } - /* Horizontal B Filtering */ -void vp8_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_loop_filter_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); +void vp8_loop_filter_bh_mmx(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_loop_filter_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, lfi->blim, + lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, lfi->blim, + lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, + lfi->blim, lfi->lim, lfi->hev_thr, 2); - if (u_ptr) - vp8_loop_filter_horizontal_edge_mmx(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_loop_filter_horizontal_edge_mmx(u_ptr + 4 * uv_stride, uv_stride, + lfi->blim, lfi->lim, lfi->hev_thr, 1); + } - if (v_ptr) - vp8_loop_filter_horizontal_edge_mmx(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_loop_filter_horizontal_edge_mmx(v_ptr + 4 * uv_stride, uv_stride, + lfi->blim, lfi->lim, lfi->hev_thr, 1); + } } - -void vp8_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) -{ - vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, blimit); +void vp8_loop_filter_bhs_mmx(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 4 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 8 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_mmx(y_ptr + 12 * y_stride, y_stride, + blimit); } - /* Vertical B Filtering */ -void vp8_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_loop_filter_vertical_edge_mmx(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_mmx(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); - vp8_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); +void vp8_loop_filter_bv_mmx(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_loop_filter_vertical_edge_mmx(y_ptr + 4, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); + vp8_loop_filter_vertical_edge_mmx(y_ptr + 8, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); + vp8_loop_filter_vertical_edge_mmx(y_ptr + 12, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 2); - if (u_ptr) - vp8_loop_filter_vertical_edge_mmx(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (u_ptr) { + vp8_loop_filter_vertical_edge_mmx(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 1); + } - if (v_ptr) - vp8_loop_filter_vertical_edge_mmx(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1); + if (v_ptr) { + vp8_loop_filter_vertical_edge_mmx(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, + lfi->hev_thr, 1); + } } - -void vp8_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) -{ - vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 4, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 8, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 12, y_stride, blimit); +void vp8_loop_filter_bvs_mmx(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 4, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 8, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_mmx(y_ptr + 12, y_stride, blimit); } #endif - /* Horizontal MB filtering */ #if HAVE_SSE2 -void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_horizontal_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr); +void vp8_loop_filter_mbh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_mbloop_filter_horizontal_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr); - if (u_ptr) - vp8_mbloop_filter_horizontal_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, v_ptr); + if (u_ptr) { + vp8_mbloop_filter_horizontal_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim, + lfi->lim, lfi->hev_thr, v_ptr); + } } - /* Vertical MB Filtering */ -void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ - vp8_mbloop_filter_vertical_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr); +void vp8_loop_filter_mbv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { + vp8_mbloop_filter_vertical_edge_sse2(y_ptr, y_stride, lfi->mblim, lfi->lim, + lfi->hev_thr); - if (u_ptr) - vp8_mbloop_filter_vertical_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, v_ptr); + if (u_ptr) { + vp8_mbloop_filter_vertical_edge_uv_sse2(u_ptr, uv_stride, lfi->mblim, + lfi->lim, lfi->hev_thr, v_ptr); + } } - /* Horizontal B Filtering */ -void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ +void vp8_loop_filter_bh_sse2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { #if ARCH_X86_64 - vp8_loop_filter_bh_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_bh_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, + 2); #else - vp8_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); - vp8_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); - vp8_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); + vp8_loop_filter_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, + lfi->blim, lfi->lim, lfi->hev_thr); + vp8_loop_filter_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, + lfi->blim, lfi->lim, lfi->hev_thr); + vp8_loop_filter_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, + lfi->blim, lfi->lim, lfi->hev_thr); #endif - if (u_ptr) - vp8_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4 * uv_stride); + if (u_ptr) { + vp8_loop_filter_horizontal_edge_uv_sse2(u_ptr + 4 * uv_stride, uv_stride, + lfi->blim, lfi->lim, lfi->hev_thr, + v_ptr + 4 * uv_stride); + } } - -void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) -{ - vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, blimit); - vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, blimit); +void vp8_loop_filter_bhs_sse2(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 4 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 8 * y_stride, y_stride, + blimit); + vp8_loop_filter_simple_horizontal_edge_sse2(y_ptr + 12 * y_stride, y_stride, + blimit); } - /* Vertical B Filtering */ -void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr, - int y_stride, int uv_stride, loop_filter_info *lfi) -{ +void vp8_loop_filter_bv_sse2(unsigned char *y_ptr, unsigned char *u_ptr, + unsigned char *v_ptr, int y_stride, int uv_stride, + loop_filter_info *lfi) { #if ARCH_X86_64 - vp8_loop_filter_bv_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2); + vp8_loop_filter_bv_y_sse2(y_ptr, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, + 2); #else - vp8_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); - vp8_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); - vp8_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr); + vp8_loop_filter_vertical_edge_sse2(y_ptr + 4, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr); + vp8_loop_filter_vertical_edge_sse2(y_ptr + 8, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr); + vp8_loop_filter_vertical_edge_sse2(y_ptr + 12, y_stride, lfi->blim, lfi->lim, + lfi->hev_thr); #endif - if (u_ptr) - vp8_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, v_ptr + 4); + if (u_ptr) { + vp8_loop_filter_vertical_edge_uv_sse2(u_ptr + 4, uv_stride, lfi->blim, + lfi->lim, lfi->hev_thr, v_ptr + 4); + } } - -void vp8_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride, const unsigned char *blimit) -{ - vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 4, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 8, y_stride, blimit); - vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 12, y_stride, blimit); +void vp8_loop_filter_bvs_sse2(unsigned char *y_ptr, int y_stride, + const unsigned char *blimit) { + vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 4, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 8, y_stride, blimit); + vp8_loop_filter_simple_vertical_edge_sse2(y_ptr + 12, y_stride, blimit); } #endif diff --git a/libs/libvpx/vp8/common/x86/mfqe_sse2.asm b/libs/libvpx/vp8/common/x86/mfqe_sse2.asm index a8a7f568dc..8177b79226 100644 --- a/libs/libvpx/vp8/common/x86/mfqe_sse2.asm +++ b/libs/libvpx/vp8/common/x86/mfqe_sse2.asm @@ -45,7 +45,7 @@ sym(vp8_filter_by_weight16x16_sse2): mov rcx, 16 ; loop count pxor xmm6, xmm6 -.combine +.combine: movdqa xmm2, [rax] movdqa xmm4, [rdx] add rax, rsi @@ -122,7 +122,7 @@ sym(vp8_filter_by_weight8x8_sse2): mov rcx, 8 ; loop count pxor xmm4, xmm4 -.combine +.combine: movq xmm2, [rax] movq xmm3, [rdx] add rax, rsi @@ -189,7 +189,7 @@ sym(vp8_variance_and_sad_16x16_sse2): ; Because we're working with the actual output frames ; we can't depend on any kind of data alignment. -.accumulate +.accumulate: movdqa xmm0, [rax] ; src1 movdqa xmm1, [rdx] ; src2 add rax, rcx ; src1 + stride1 diff --git a/libs/libvpx/vp8/common/x86/postproc_mmx.asm b/libs/libvpx/vp8/common/x86/postproc_mmx.asm deleted file mode 100644 index a2b16327f0..0000000000 --- a/libs/libvpx/vp8/common/x86/postproc_mmx.asm +++ /dev/null @@ -1,315 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -%define VP8_FILTER_WEIGHT 128 -%define VP8_FILTER_SHIFT 7 - -;void vp8_mbpost_proc_down_mmx(unsigned char *dst, -; int pitch, int rows, int cols,int flimit) -extern sym(vp8_rv) -global sym(vp8_mbpost_proc_down_mmx) PRIVATE -sym(vp8_mbpost_proc_down_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 5 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - ALIGN_STACK 16, rax - sub rsp, 136 - - ; unsigned char d[16][8] at [rsp] - ; create flimit2 at [rsp+128] - mov eax, dword ptr arg(4) ;flimit - mov [rsp+128], eax - mov [rsp+128+4], eax -%define flimit2 [rsp+128] - -%if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp8_rv))] -%endif - - ;rows +=8; - add dword ptr arg(2), 8 - - ;for(c=0; cuser_buffer_end = source+source_sz; - br->user_buffer = source; - br->value = 0; - br->count = -8; - br->range = 255; - br->decrypt_cb = decrypt_cb; - br->decrypt_state = decrypt_state; +int vp8dx_start_decode(BOOL_DECODER *br, const unsigned char *source, + unsigned int source_sz, vpx_decrypt_cb decrypt_cb, + void *decrypt_state) { + br->user_buffer_end = source + source_sz; + br->user_buffer = source; + br->value = 0; + br->count = -8; + br->range = 255; + br->decrypt_cb = decrypt_cb; + br->decrypt_state = decrypt_state; - if (source_sz && !source) - return 1; + if (source_sz && !source) return 1; - /* Populate the buffer */ - vp8dx_bool_decoder_fill(br); + /* Populate the buffer */ + vp8dx_bool_decoder_fill(br); - return 0; + return 0; } -void vp8dx_bool_decoder_fill(BOOL_DECODER *br) -{ - const unsigned char *bufptr = br->user_buffer; - VP8_BD_VALUE value = br->value; - int count = br->count; - int shift = VP8_BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT); - size_t bytes_left = br->user_buffer_end - bufptr; - size_t bits_left = bytes_left * CHAR_BIT; - int x = (int)(shift + CHAR_BIT - bits_left); - int loop_end = 0; - unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1]; +void vp8dx_bool_decoder_fill(BOOL_DECODER *br) { + const unsigned char *bufptr = br->user_buffer; + VP8_BD_VALUE value = br->value; + int count = br->count; + int shift = VP8_BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT); + size_t bytes_left = br->user_buffer_end - bufptr; + size_t bits_left = bytes_left * CHAR_BIT; + int x = shift + CHAR_BIT - (int)bits_left; + int loop_end = 0; + unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1]; - if (br->decrypt_cb) { - size_t n = VPXMIN(sizeof(decrypted), bytes_left); - br->decrypt_cb(br->decrypt_state, bufptr, decrypted, (int)n); - bufptr = decrypted; + if (br->decrypt_cb) { + size_t n = VPXMIN(sizeof(decrypted), bytes_left); + br->decrypt_cb(br->decrypt_state, bufptr, decrypted, (int)n); + bufptr = decrypted; + } + + if (x >= 0) { + count += VP8_LOTS_OF_BITS; + loop_end = x; + } + + if (x < 0 || bits_left) { + while (shift >= loop_end) { + count += CHAR_BIT; + value |= (VP8_BD_VALUE)*bufptr << shift; + ++bufptr; + ++br->user_buffer; + shift -= CHAR_BIT; } + } - if(x >= 0) - { - count += VP8_LOTS_OF_BITS; - loop_end = x; - } - - if (x < 0 || bits_left) - { - while(shift >= loop_end) - { - count += CHAR_BIT; - value |= (VP8_BD_VALUE)*bufptr << shift; - ++bufptr; - ++br->user_buffer; - shift -= CHAR_BIT; - } - } - - br->value = value; - br->count = count; + br->value = value; + br->count = count; } diff --git a/libs/libvpx/vp8/decoder/dboolhuff.h b/libs/libvpx/vp8/decoder/dboolhuff.h index cc9eaaf439..04c027cd78 100644 --- a/libs/libvpx/vp8/decoder/dboolhuff.h +++ b/libs/libvpx/vp8/decoder/dboolhuff.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_DECODER_DBOOLHUFF_H_ #define VP8_DECODER_DBOOLHUFF_H_ @@ -26,112 +25,102 @@ extern "C" { typedef size_t VP8_BD_VALUE; -#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT) +#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE) * CHAR_BIT) /*This is meant to be a large, positive constant that can still be efficiently loaded as an immediate (on platforms like ARM, for example). Even relatively modest values like 100 would work fine.*/ #define VP8_LOTS_OF_BITS (0x40000000) -typedef struct -{ - const unsigned char *user_buffer_end; - const unsigned char *user_buffer; - VP8_BD_VALUE value; - int count; - unsigned int range; - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; +typedef struct { + const unsigned char *user_buffer_end; + const unsigned char *user_buffer; + VP8_BD_VALUE value; + int count; + unsigned int range; + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; } BOOL_DECODER; DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); -int vp8dx_start_decode(BOOL_DECODER *br, - const unsigned char *source, - unsigned int source_sz, - vpx_decrypt_cb decrypt_cb, +int vp8dx_start_decode(BOOL_DECODER *br, const unsigned char *source, + unsigned int source_sz, vpx_decrypt_cb decrypt_cb, void *decrypt_state); void vp8dx_bool_decoder_fill(BOOL_DECODER *br); - static int vp8dx_decode_bool(BOOL_DECODER *br, int probability) { - unsigned int bit = 0; - VP8_BD_VALUE value; - unsigned int split; - VP8_BD_VALUE bigsplit; - int count; - unsigned int range; + unsigned int bit = 0; + VP8_BD_VALUE value; + unsigned int split; + VP8_BD_VALUE bigsplit; + int count; + unsigned int range; - split = 1 + (((br->range - 1) * probability) >> 8); + split = 1 + (((br->range - 1) * probability) >> 8); - if(br->count < 0) - vp8dx_bool_decoder_fill(br); + if (br->count < 0) vp8dx_bool_decoder_fill(br); - value = br->value; - count = br->count; + value = br->value; + count = br->count; - bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); + bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); - range = split; + range = split; - if (value >= bigsplit) - { - range = br->range - split; - value = value - bigsplit; - bit = 1; - } + if (value >= bigsplit) { + range = br->range - split; + value = value - bigsplit; + bit = 1; + } - { - register unsigned int shift = vp8_norm[range]; - range <<= shift; - value <<= shift; - count -= shift; - } - br->value = value; - br->count = count; - br->range = range; + { + register int shift = vp8_norm[range]; + range <<= shift; + value <<= shift; + count -= shift; + } + br->value = value; + br->count = count; + br->range = range; - return bit; + return bit; } -static INLINE int vp8_decode_value(BOOL_DECODER *br, int bits) -{ - int z = 0; - int bit; +static INLINE int vp8_decode_value(BOOL_DECODER *br, int bits) { + int z = 0; + int bit; - for (bit = bits - 1; bit >= 0; bit--) - { - z |= (vp8dx_decode_bool(br, 0x80) << bit); - } + for (bit = bits - 1; bit >= 0; bit--) { + z |= (vp8dx_decode_bool(br, 0x80) << bit); + } - return z; + return z; } -static INLINE int vp8dx_bool_error(BOOL_DECODER *br) -{ - /* Check if we have reached the end of the buffer. - * - * Variable 'count' stores the number of bits in the 'value' buffer, minus - * 8. The top byte is part of the algorithm, and the remainder is buffered - * to be shifted into it. So if count == 8, the top 16 bits of 'value' are - * occupied, 8 for the algorithm and 8 in the buffer. - * - * When reading a byte from the user's buffer, count is filled with 8 and - * one byte is filled into the value buffer. When we reach the end of the - * data, count is additionally filled with VP8_LOTS_OF_BITS. So when - * count == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted. +static INLINE int vp8dx_bool_error(BOOL_DECODER *br) { + /* Check if we have reached the end of the buffer. + * + * Variable 'count' stores the number of bits in the 'value' buffer, minus + * 8. The top byte is part of the algorithm, and the remainder is buffered + * to be shifted into it. So if count == 8, the top 16 bits of 'value' are + * occupied, 8 for the algorithm and 8 in the buffer. + * + * When reading a byte from the user's buffer, count is filled with 8 and + * one byte is filled into the value buffer. When we reach the end of the + * data, count is additionally filled with VP8_LOTS_OF_BITS. So when + * count == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted. + */ + if ((br->count > VP8_BD_VALUE_SIZE) && (br->count < VP8_LOTS_OF_BITS)) { + /* We have tried to decode bits after the end of + * stream was encountered. */ - if ((br->count > VP8_BD_VALUE_SIZE) && (br->count < VP8_LOTS_OF_BITS)) - { - /* We have tried to decode bits after the end of - * stream was encountered. - */ - return 1; - } + return 1; + } - /* No error. */ - return 0; + /* No error. */ + return 0; } #ifdef __cplusplus diff --git a/libs/libvpx/vp8/decoder/decodeframe.c b/libs/libvpx/vp8/decoder/decodeframe.c index 4bc87eb134..0aec2a01b0 100644 --- a/libs/libvpx/vp8/decoder/decodeframe.c +++ b/libs/libvpx/vp8/decoder/decodeframe.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "./vpx_scale_rtcd.h" @@ -40,264 +39,212 @@ #include #include -void vp8cx_init_de_quantizer(VP8D_COMP *pbi) -{ - int Q; - VP8_COMMON *const pc = & pbi->common; +void vp8cx_init_de_quantizer(VP8D_COMP *pbi) { + int Q; + VP8_COMMON *const pc = &pbi->common; - for (Q = 0; Q < QINDEX_RANGE; Q++) - { - pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q); - pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q); - pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q); + for (Q = 0; Q < QINDEX_RANGE; ++Q) { + pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q); + pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q); + pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q); - pc->Y1dequant[Q][1] = (short)vp8_ac_yquant(Q); - pc->Y2dequant[Q][1] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q); - pc->UVdequant[Q][1] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q); - } + pc->Y1dequant[Q][1] = (short)vp8_ac_yquant(Q); + pc->Y2dequant[Q][1] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q); + pc->UVdequant[Q][1] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q); + } } -void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) -{ - int i; - int QIndex; - MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; - VP8_COMMON *const pc = & pbi->common; +void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) { + int i; + int QIndex; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; + VP8_COMMON *const pc = &pbi->common; - /* Decide whether to use the default or alternate baseline Q value. */ - if (xd->segmentation_enabled) - { - /* Abs Value */ - if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) - QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; + /* Decide whether to use the default or alternate baseline Q value. */ + if (xd->segmentation_enabled) { + /* Abs Value */ + if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) { + QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; - /* Delta Value */ - else - QIndex = pc->base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; - - QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; /* Clamp to valid range */ + /* Delta Value */ + } else { + QIndex = pc->base_qindex + + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; } - else - QIndex = pc->base_qindex; - /* Set up the macroblock dequant constants */ - xd->dequant_y1_dc[0] = 1; - xd->dequant_y1[0] = pc->Y1dequant[QIndex][0]; - xd->dequant_y2[0] = pc->Y2dequant[QIndex][0]; - xd->dequant_uv[0] = pc->UVdequant[QIndex][0]; + QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) + : 0; /* Clamp to valid range */ + } else { + QIndex = pc->base_qindex; + } - for (i = 1; i < 16; i++) - { - xd->dequant_y1_dc[i] = - xd->dequant_y1[i] = pc->Y1dequant[QIndex][1]; - xd->dequant_y2[i] = pc->Y2dequant[QIndex][1]; - xd->dequant_uv[i] = pc->UVdequant[QIndex][1]; - } + /* Set up the macroblock dequant constants */ + xd->dequant_y1_dc[0] = 1; + xd->dequant_y1[0] = pc->Y1dequant[QIndex][0]; + xd->dequant_y2[0] = pc->Y2dequant[QIndex][0]; + xd->dequant_uv[0] = pc->UVdequant[QIndex][0]; + + for (i = 1; i < 16; ++i) { + xd->dequant_y1_dc[i] = xd->dequant_y1[i] = pc->Y1dequant[QIndex][1]; + xd->dequant_y2[i] = pc->Y2dequant[QIndex][1]; + xd->dequant_uv[i] = pc->UVdequant[QIndex][1]; + } } static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, - unsigned int mb_idx) -{ - MB_PREDICTION_MODE mode; - int i; + unsigned int mb_idx) { + MB_PREDICTION_MODE mode; + int i; #if CONFIG_ERROR_CONCEALMENT - int corruption_detected = 0; + int corruption_detected = 0; #else - (void)mb_idx; + (void)mb_idx; #endif - if (xd->mode_info_context->mbmi.mb_skip_coeff) - { - vp8_reset_mb_tokens_context(xd); - } - else if (!vp8dx_bool_error(xd->current_bc)) - { - int eobtotal; - eobtotal = vp8_decode_mb_tokens(pbi, xd); + if (xd->mode_info_context->mbmi.mb_skip_coeff) { + vp8_reset_mb_tokens_context(xd); + } else if (!vp8dx_bool_error(xd->current_bc)) { + int eobtotal; + eobtotal = vp8_decode_mb_tokens(pbi, xd); - /* Special case: Force the loopfilter to skip when eobtotal is zero */ - xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); - } + /* Special case: Force the loopfilter to skip when eobtotal is zero */ + xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal == 0); + } - mode = xd->mode_info_context->mbmi.mode; - - if (xd->segmentation_enabled) - vp8_mb_init_dequantizer(pbi, xd); + mode = xd->mode_info_context->mbmi.mode; + if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd); #if CONFIG_ERROR_CONCEALMENT - if(pbi->ec_active) - { - int throw_residual; - /* When we have independent partitions we can apply residual even - * though other partitions within the frame are corrupt. + if (pbi->ec_active) { + int throw_residual; + /* When we have independent partitions we can apply residual even + * though other partitions within the frame are corrupt. + */ + throw_residual = + (!pbi->independent_partitions && pbi->frame_corrupt_residual); + throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + + if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { + /* MB with corrupt residuals or corrupt mode/motion vectors. + * Better to use the predictor as reconstruction. + */ + pbi->frame_corrupt_residual = 1; + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + + corruption_detected = 1; + + /* force idct to be skipped for B_PRED and use the + * prediction only for reconstruction + * */ + memset(xd->eobs, 0, 25); + } + } +#endif + + /* do prediction */ + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + vp8_build_intra_predictors_mbuv_s( + xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1], + xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer, + xd->dst.v_buffer, xd->dst.uv_stride); + + if (mode != B_PRED) { + vp8_build_intra_predictors_mby_s( + xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0], + xd->dst.y_buffer, xd->dst.y_stride); + } else { + short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + + /* clear out residual eob info */ + if (xd->mode_info_context->mbmi.mb_skip_coeff) memset(xd->eobs, 0, 25); + + intra_prediction_down_copy(xd, xd->recon_above[0] + 16); + + for (i = 0; i < 16; ++i) { + BLOCKD *b = &xd->block[i]; + unsigned char *dst = xd->dst.y_buffer + b->offset; + B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode; + unsigned char *Above = dst - dst_stride; + unsigned char *yleft = dst - 1; + int left_stride = dst_stride; + unsigned char top_left = Above[-1]; + + vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride, + top_left); + + if (xd->eobs[i]) { + if (xd->eobs[i] > 1) { + vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); + } else { + vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], dst, dst_stride, dst, + dst_stride); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + } + } + } + } else { + vp8_build_inter_predictors_mb(xd); + } + +#if CONFIG_ERROR_CONCEALMENT + if (corruption_detected) { + return; + } +#endif + + if (!xd->mode_info_context->mbmi.mb_skip_coeff) { + /* dequantization and idct */ + if (mode != B_PRED) { + short *DQC = xd->dequant_y1; + + if (mode != SPLITMV) { + BLOCKD *b = &xd->block[24]; + + /* do 2nd order transform on the dc block */ + if (xd->eobs[24] > 1) { + vp8_dequantize_b(b, xd->dequant_y2); + + vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff); + memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); + } else { + b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; + vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + + /* override the dc dequant constant in order to preserve the + * dc components */ - throw_residual = (!pbi->independent_partitions && - pbi->frame_corrupt_residual); - throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + DQC = xd->dequant_y1_dc; + } - if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) - { - /* MB with corrupt residuals or corrupt mode/motion vectors. - * Better to use the predictor as reconstruction. - */ - pbi->frame_corrupt_residual = 1; - memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); - vp8_conceal_corrupt_mb(xd); - - - corruption_detected = 1; - - /* force idct to be skipped for B_PRED and use the - * prediction only for reconstruction - * */ - memset(xd->eobs, 0, 25); - } - } -#endif - - /* do prediction */ - if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) - { - vp8_build_intra_predictors_mbuv_s(xd, - xd->recon_above[1], - xd->recon_above[2], - xd->recon_left[1], - xd->recon_left[2], - xd->recon_left_stride[1], - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride); - - if (mode != B_PRED) - { - vp8_build_intra_predictors_mby_s(xd, - xd->recon_above[0], - xd->recon_left[0], - xd->recon_left_stride[0], - xd->dst.y_buffer, - xd->dst.y_stride); - } - else - { - short *DQC = xd->dequant_y1; - int dst_stride = xd->dst.y_stride; - - /* clear out residual eob info */ - if(xd->mode_info_context->mbmi.mb_skip_coeff) - memset(xd->eobs, 0, 25); - - intra_prediction_down_copy(xd, xd->recon_above[0] + 16); - - for (i = 0; i < 16; i++) - { - BLOCKD *b = &xd->block[i]; - unsigned char *dst = xd->dst.y_buffer + b->offset; - B_PREDICTION_MODE b_mode = - xd->mode_info_context->bmi[i].as_mode; - unsigned char *Above = dst - dst_stride; - unsigned char *yleft = dst - 1; - int left_stride = dst_stride; - unsigned char top_left = Above[-1]; - - vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, - dst, dst_stride, top_left); - - if (xd->eobs[i]) - { - if (xd->eobs[i] > 1) - { - vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); - } - else - { - vp8_dc_only_idct_add - (b->qcoeff[0] * DQC[0], - dst, dst_stride, - dst, dst_stride); - memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); - } - } - } - } - } - else - { - vp8_build_inter_predictors_mb(xd); + vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer, + xd->dst.y_stride, xd->eobs); } - -#if CONFIG_ERROR_CONCEALMENT - if (corruption_detected) - { - return; - } -#endif - - if(!xd->mode_info_context->mbmi.mb_skip_coeff) - { - /* dequantization and idct */ - if (mode != B_PRED) - { - short *DQC = xd->dequant_y1; - - if (mode != SPLITMV) - { - BLOCKD *b = &xd->block[24]; - - /* do 2nd order transform on the dc block */ - if (xd->eobs[24] > 1) - { - vp8_dequantize_b(b, xd->dequant_y2); - - vp8_short_inv_walsh4x4(&b->dqcoeff[0], - xd->qcoeff); - memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); - } - else - { - b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; - vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], - xd->qcoeff); - memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); - } - - /* override the dc dequant constant in order to preserve the - * dc components - */ - DQC = xd->dequant_y1_dc; - } - - vp8_dequant_idct_add_y_block - (xd->qcoeff, DQC, - xd->dst.y_buffer, - xd->dst.y_stride, xd->eobs); - } - - vp8_dequant_idct_add_uv_block - (xd->qcoeff+16*16, xd->dequant_uv, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride, xd->eobs+16); - } + vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs + 16); + } } -static int get_delta_q(vp8_reader *bc, int prev, int *q_update) -{ - int ret_val = 0; +static int get_delta_q(vp8_reader *bc, int prev, int *q_update) { + int ret_val = 0; - if (vp8_read_bit(bc)) - { - ret_val = vp8_read_literal(bc, 4); + if (vp8_read_bit(bc)) { + ret_val = vp8_read_literal(bc, 4); - if (vp8_read_bit(bc)) - ret_val = -ret_val; - } + if (vp8_read_bit(bc)) ret_val = -ret_val; + } - /* Trigger a quantizer update if the delta-q value has changed */ - if (ret_val != prev) - *q_update = 1; + /* Trigger a quantizer update if the delta-q value has changed */ + if (ret_val != prev) *q_update = 1; - return ret_val; + return ret_val; } #ifdef PACKET_TESTING @@ -305,1095 +252,999 @@ static int get_delta_q(vp8_reader *bc, int prev, int *q_update) FILE *vpxlog = 0; #endif -static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) -{ - int i; - unsigned char *src_ptr1; - unsigned char *dest_ptr1; +static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) { + int i; + unsigned char *src_ptr1; + unsigned char *dest_ptr1; - unsigned int Border; - int plane_stride; + unsigned int Border; + int plane_stride; - /***********/ - /* Y Plane */ - /***********/ - Border = ybf->border; - plane_stride = ybf->y_stride; - src_ptr1 = ybf->y_buffer - Border; - dest_ptr1 = src_ptr1 - (Border * plane_stride); + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + src_ptr1 = ybf->y_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); - for (i = 0; i < (int)Border; i++) - { - memcpy(dest_ptr1, src_ptr1, plane_stride); - dest_ptr1 += plane_stride; - } + for (i = 0; i < (int)Border; ++i) { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + Border /= 2; + src_ptr1 = ybf->u_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); - /***********/ - /* U Plane */ - /***********/ - plane_stride = ybf->uv_stride; - Border /= 2; - src_ptr1 = ybf->u_buffer - Border; - dest_ptr1 = src_ptr1 - (Border * plane_stride); + for (i = 0; i < (int)(Border); ++i) { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } - for (i = 0; i < (int)(Border); i++) - { - memcpy(dest_ptr1, src_ptr1, plane_stride); - dest_ptr1 += plane_stride; - } + /***********/ + /* V Plane */ + /***********/ - /***********/ - /* V Plane */ - /***********/ + src_ptr1 = ybf->v_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); - src_ptr1 = ybf->v_buffer - Border; - dest_ptr1 = src_ptr1 - (Border * plane_stride); - - for (i = 0; i < (int)(Border); i++) - { - memcpy(dest_ptr1, src_ptr1, plane_stride); - dest_ptr1 += plane_stride; - } + for (i = 0; i < (int)(Border); ++i) { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } } -static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) -{ - int i; - unsigned char *src_ptr1, *src_ptr2; - unsigned char *dest_ptr2; +static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) { + int i; + unsigned char *src_ptr1, *src_ptr2; + unsigned char *dest_ptr2; - unsigned int Border; - int plane_stride; - int plane_height; + unsigned int Border; + int plane_stride; + int plane_height; - /***********/ - /* Y Plane */ - /***********/ - Border = ybf->border; - plane_stride = ybf->y_stride; - plane_height = ybf->y_height; + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + plane_height = ybf->y_height; - src_ptr1 = ybf->y_buffer - Border; - src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; - dest_ptr2 = src_ptr2 + plane_stride; + src_ptr1 = ybf->y_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; - for (i = 0; i < (int)Border; i++) - { - memcpy(dest_ptr2, src_ptr2, plane_stride); - dest_ptr2 += plane_stride; - } + for (i = 0; i < (int)Border; ++i) { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + plane_height = ybf->uv_height; + Border /= 2; - /***********/ - /* U Plane */ - /***********/ - plane_stride = ybf->uv_stride; - plane_height = ybf->uv_height; - Border /= 2; + src_ptr1 = ybf->u_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; - src_ptr1 = ybf->u_buffer - Border; - src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; - dest_ptr2 = src_ptr2 + plane_stride; + for (i = 0; i < (int)(Border); ++i) { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } - for (i = 0; i < (int)(Border); i++) - { - memcpy(dest_ptr2, src_ptr2, plane_stride); - dest_ptr2 += plane_stride; - } + /***********/ + /* V Plane */ + /***********/ - /***********/ - /* V Plane */ - /***********/ + src_ptr1 = ybf->v_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; - src_ptr1 = ybf->v_buffer - Border; - src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; - dest_ptr2 = src_ptr2 + plane_stride; - - for (i = 0; i < (int)(Border); i++) - { - memcpy(dest_ptr2, src_ptr2, plane_stride); - dest_ptr2 += plane_stride; - } + for (i = 0; i < (int)(Border); ++i) { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } } static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf, unsigned char *y_src, unsigned char *u_src, - unsigned char *v_src) -{ - int i; - unsigned char *src_ptr1, *src_ptr2; - unsigned char *dest_ptr1, *dest_ptr2; + unsigned char *v_src) { + int i; + unsigned char *src_ptr1, *src_ptr2; + unsigned char *dest_ptr1, *dest_ptr2; - unsigned int Border; - int plane_stride; - int plane_height; - int plane_width; + unsigned int Border; + int plane_stride; + int plane_height; + int plane_width; - /***********/ - /* Y Plane */ - /***********/ - Border = ybf->border; - plane_stride = ybf->y_stride; - plane_height = 16; - plane_width = ybf->y_width; + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + plane_height = 16; + plane_width = ybf->y_width; - /* copy the left and right most columns out */ - src_ptr1 = y_src; - src_ptr2 = src_ptr1 + plane_width - 1; - dest_ptr1 = src_ptr1 - Border; - dest_ptr2 = src_ptr2 + 1; + /* copy the left and right most columns out */ + src_ptr1 = y_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; - for (i = 0; i < plane_height; i++) - { - memset(dest_ptr1, src_ptr1[0], Border); - memset(dest_ptr2, src_ptr2[0], Border); - src_ptr1 += plane_stride; - src_ptr2 += plane_stride; - dest_ptr1 += plane_stride; - dest_ptr2 += plane_stride; - } + for (i = 0; i < plane_height; ++i) { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } - /***********/ - /* U Plane */ - /***********/ - plane_stride = ybf->uv_stride; - plane_height = 8; - plane_width = ybf->uv_width; - Border /= 2; + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + plane_height = 8; + plane_width = ybf->uv_width; + Border /= 2; - /* copy the left and right most columns out */ - src_ptr1 = u_src; - src_ptr2 = src_ptr1 + plane_width - 1; - dest_ptr1 = src_ptr1 - Border; - dest_ptr2 = src_ptr2 + 1; + /* copy the left and right most columns out */ + src_ptr1 = u_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; - for (i = 0; i < plane_height; i++) - { - memset(dest_ptr1, src_ptr1[0], Border); - memset(dest_ptr2, src_ptr2[0], Border); - src_ptr1 += plane_stride; - src_ptr2 += plane_stride; - dest_ptr1 += plane_stride; - dest_ptr2 += plane_stride; - } + for (i = 0; i < plane_height; ++i) { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } - /***********/ - /* V Plane */ - /***********/ + /***********/ + /* V Plane */ + /***********/ - /* copy the left and right most columns out */ - src_ptr1 = v_src; - src_ptr2 = src_ptr1 + plane_width - 1; - dest_ptr1 = src_ptr1 - Border; - dest_ptr2 = src_ptr2 + 1; + /* copy the left and right most columns out */ + src_ptr1 = v_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; - for (i = 0; i < plane_height; i++) - { - memset(dest_ptr1, src_ptr1[0], Border); - memset(dest_ptr2, src_ptr2[0], Border); - src_ptr1 += plane_stride; - src_ptr2 += plane_stride; - dest_ptr1 += plane_stride; - dest_ptr2 += plane_stride; - } + for (i = 0; i < plane_height; ++i) { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } } -static void decode_mb_rows(VP8D_COMP *pbi) -{ - VP8_COMMON *const pc = & pbi->common; - MACROBLOCKD *const xd = & pbi->mb; +static void decode_mb_rows(VP8D_COMP *pbi) { + VP8_COMMON *const pc = &pbi->common; + MACROBLOCKD *const xd = &pbi->mb; - MODE_INFO *lf_mic = xd->mode_info_context; + MODE_INFO *lf_mic = xd->mode_info_context; - int ibc = 0; - int num_part = 1 << pc->multi_token_partition; + int ibc = 0; + int num_part = 1 << pc->multi_token_partition; - int recon_yoffset, recon_uvoffset; - int mb_row, mb_col; - int mb_idx = 0; + int recon_yoffset, recon_uvoffset; + int mb_row, mb_col; + int mb_idx = 0; - YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; - int recon_y_stride = yv12_fb_new->y_stride; - int recon_uv_stride = yv12_fb_new->uv_stride; + int recon_y_stride = yv12_fb_new->y_stride; + int recon_uv_stride = yv12_fb_new->uv_stride; - unsigned char *ref_buffer[MAX_REF_FRAMES][3]; - unsigned char *dst_buffer[3]; - unsigned char *lf_dst[3]; - unsigned char *eb_dst[3]; - int i; - int ref_fb_corrupted[MAX_REF_FRAMES]; + unsigned char *ref_buffer[MAX_REF_FRAMES][3]; + unsigned char *dst_buffer[3]; + unsigned char *lf_dst[3]; + unsigned char *eb_dst[3]; + int i; + int ref_fb_corrupted[MAX_REF_FRAMES]; - ref_fb_corrupted[INTRA_FRAME] = 0; + ref_fb_corrupted[INTRA_FRAME] = 0; - for(i = 1; i < MAX_REF_FRAMES; i++) - { - YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; + for (i = 1; i < MAX_REF_FRAMES; ++i) { + YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; - ref_buffer[i][0] = this_fb->y_buffer; - ref_buffer[i][1] = this_fb->u_buffer; - ref_buffer[i][2] = this_fb->v_buffer; + ref_buffer[i][0] = this_fb->y_buffer; + ref_buffer[i][1] = this_fb->u_buffer; + ref_buffer[i][2] = this_fb->v_buffer; - ref_fb_corrupted[i] = this_fb->corrupted; + ref_fb_corrupted[i] = this_fb->corrupted; + } + + /* Set up the buffer pointers */ + eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer; + eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer; + eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer; + + xd->up_available = 0; + + /* Initialize the loop filter for this frame. */ + if (pc->filter_level) vp8_loop_filter_frame_init(pc, xd, pc->filter_level); + + vp8_setup_intra_recon_top_line(yv12_fb_new); + + /* Decode the individual macro block */ + for (mb_row = 0; mb_row < pc->mb_rows; ++mb_row) { + if (num_part > 1) { + xd->current_bc = &pbi->mbc[ibc]; + ibc++; + + if (ibc == num_part) ibc = 0; } - /* Set up the buffer pointers */ - eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer; - eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer; - eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer; + recon_yoffset = mb_row * recon_y_stride * 16; + recon_uvoffset = mb_row * recon_uv_stride * 8; - xd->up_available = 0; + /* reset contexts */ + xd->above_context = pc->above_context; + memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - /* Initialize the loop filter for this frame. */ - if(pc->filter_level) - vp8_loop_filter_frame_init(pc, xd, pc->filter_level); + xd->left_available = 0; - vp8_setup_intra_recon_top_line(yv12_fb_new); + xd->mb_to_top_edge = -((mb_row * 16) << 3); + xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; - /* Decode the individual macro block */ - for (mb_row = 0; mb_row < pc->mb_rows; mb_row++) - { - if (num_part > 1) - { - xd->current_bc = & pbi->mbc[ibc]; - ibc++; + xd->recon_above[0] = dst_buffer[0] + recon_yoffset; + xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; + xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; - if (ibc == num_part) - ibc = 0; - } + xd->recon_left[0] = xd->recon_above[0] - 1; + xd->recon_left[1] = xd->recon_above[1] - 1; + xd->recon_left[2] = xd->recon_above[2] - 1; - recon_yoffset = mb_row * recon_y_stride * 16; - recon_uvoffset = mb_row * recon_uv_stride * 8; + xd->recon_above[0] -= xd->dst.y_stride; + xd->recon_above[1] -= xd->dst.uv_stride; + xd->recon_above[2] -= xd->dst.uv_stride; - /* reset contexts */ - xd->above_context = pc->above_context; - memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = xd->dst.y_stride; + xd->recon_left_stride[1] = xd->dst.uv_stride; - xd->left_available = 0; + setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], + xd->recon_left[2], xd->dst.y_stride, + xd->dst.uv_stride); - xd->mb_to_top_edge = -((mb_row * 16) << 3); - xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; - - xd->recon_above[0] = dst_buffer[0] + recon_yoffset; - xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; - xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; - - xd->recon_left[0] = xd->recon_above[0] - 1; - xd->recon_left[1] = xd->recon_above[1] - 1; - xd->recon_left[2] = xd->recon_above[2] - 1; - - xd->recon_above[0] -= xd->dst.y_stride; - xd->recon_above[1] -= xd->dst.uv_stride; - xd->recon_above[2] -= xd->dst.uv_stride; - - /* TODO: move to outside row loop */ - xd->recon_left_stride[0] = xd->dst.y_stride; - xd->recon_left_stride[1] = xd->dst.uv_stride; - - setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], - xd->recon_left[2], xd->dst.y_stride, - xd->dst.uv_stride); - - for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) - { - /* Distance of Mb to the various image edges. - * These are specified to 8th pel as they are always compared to values - * that are in 1/8th pel units - */ - xd->mb_to_left_edge = -((mb_col * 16) << 3); - xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; + for (mb_col = 0; mb_col < pc->mb_cols; ++mb_col) { + /* Distance of Mb to the various image edges. + * These are specified to 8th pel as they are always compared to values + * that are in 1/8th pel units + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; #if CONFIG_ERROR_CONCEALMENT - { - int corrupt_residual = (!pbi->independent_partitions && - pbi->frame_corrupt_residual) || - vp8dx_bool_error(xd->current_bc); - if (pbi->ec_active && - xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && - corrupt_residual) - { - /* We have an intra block with corrupt coefficients, better to - * conceal with an inter block. Interpolate MVs from neighboring - * MBs. - * - * Note that for the first mb with corrupt residual in a frame, - * we might not discover that before decoding the residual. That - * happens after this check, and therefore no inter concealment - * will be done. - */ - vp8_interpolate_motion(xd, - mb_row, mb_col, - pc->mb_rows, pc->mb_cols, - pc->mode_info_stride); - } - } + { + int corrupt_residual = + (!pbi->independent_partitions && pbi->frame_corrupt_residual) || + vp8dx_bool_error(xd->current_bc); + if (pbi->ec_active && + xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && + corrupt_residual) { + /* We have an intra block with corrupt coefficients, better to + * conceal with an inter block. Interpolate MVs from neighboring + * MBs. + * + * Note that for the first mb with corrupt residual in a frame, + * we might not discover that before decoding the residual. That + * happens after this check, and therefore no inter concealment + * will be done. + */ + vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols); + } + } #endif - xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; - xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; - xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; + xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; + xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; + xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; - if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) { - const MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame; - xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset; - xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset; - xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset; - } else { - // ref_frame is INTRA_FRAME, pre buffer should not be used. - xd->pre.y_buffer = 0; - xd->pre.u_buffer = 0; - xd->pre.v_buffer = 0; - } + if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) { + const MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame; + xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset; + xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset; + xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset; + } else { + // ref_frame is INTRA_FRAME, pre buffer should not be used. + xd->pre.y_buffer = 0; + xd->pre.u_buffer = 0; + xd->pre.v_buffer = 0; + } - /* propagate errors from reference frames */ - xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; + /* propagate errors from reference frames */ + xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; - decode_macroblock(pbi, xd, mb_idx); + decode_macroblock(pbi, xd, mb_idx); - mb_idx++; - xd->left_available = 1; + mb_idx++; + xd->left_available = 1; - /* check if the boolean decoder has suffered an error */ - xd->corrupted |= vp8dx_bool_error(xd->current_bc); + /* check if the boolean decoder has suffered an error */ + xd->corrupted |= vp8dx_bool_error(xd->current_bc); - xd->recon_above[0] += 16; - xd->recon_above[1] += 8; - xd->recon_above[2] += 8; - xd->recon_left[0] += 16; - xd->recon_left[1] += 8; - xd->recon_left[2] += 8; + xd->recon_above[0] += 16; + xd->recon_above[1] += 8; + xd->recon_above[2] += 8; + xd->recon_left[0] += 16; + xd->recon_left[1] += 8; + xd->recon_left[2] += 8; - recon_yoffset += 16; - recon_uvoffset += 8; + recon_yoffset += 16; + recon_uvoffset += 8; - ++xd->mode_info_context; /* next mb */ + ++xd->mode_info_context; /* next mb */ - xd->above_context++; - } - - /* adjust to the next row of mbs */ - vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, - xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); - - ++xd->mode_info_context; /* skip prediction column */ - xd->up_available = 1; - - if(pc->filter_level) - { - if(mb_row > 0) - { - if (pc->filter_type == NORMAL_LOOPFILTER) - vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, - recon_y_stride, recon_uv_stride, - lf_dst[0], lf_dst[1], lf_dst[2]); - else - vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, - recon_y_stride, recon_uv_stride, - lf_dst[0], lf_dst[1], lf_dst[2]); - if(mb_row > 1) - { - yv12_extend_frame_left_right_c(yv12_fb_new, - eb_dst[0], - eb_dst[1], - eb_dst[2]); - - eb_dst[0] += recon_y_stride * 16; - eb_dst[1] += recon_uv_stride * 8; - eb_dst[2] += recon_uv_stride * 8; - } - - lf_dst[0] += recon_y_stride * 16; - lf_dst[1] += recon_uv_stride * 8; - lf_dst[2] += recon_uv_stride * 8; - lf_mic += pc->mb_cols; - lf_mic++; /* Skip border mb */ - } - } - else - { - if(mb_row > 0) - { - /**/ - yv12_extend_frame_left_right_c(yv12_fb_new, - eb_dst[0], - eb_dst[1], - eb_dst[2]); - eb_dst[0] += recon_y_stride * 16; - eb_dst[1] += recon_uv_stride * 8; - eb_dst[2] += recon_uv_stride * 8; - } - } + xd->above_context++; } - if(pc->filter_level) - { - if (pc->filter_type == NORMAL_LOOPFILTER) - vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride, - recon_uv_stride, lf_dst[0], lf_dst[1], - lf_dst[2]); - else - vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride, - recon_uv_stride, lf_dst[0], lf_dst[1], - lf_dst[2]); + /* adjust to the next row of mbs */ + vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, + xd->dst.v_buffer + 8); - yv12_extend_frame_left_right_c(yv12_fb_new, - eb_dst[0], - eb_dst[1], + ++xd->mode_info_context; /* skip prediction column */ + xd->up_available = 1; + + if (pc->filter_level) { + if (mb_row > 0) { + if (pc->filter_type == NORMAL_LOOPFILTER) { + vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + } else { + vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + } + if (mb_row > 1) { + yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], + eb_dst[2]); + + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } + + lf_dst[0] += recon_y_stride * 16; + lf_dst[1] += recon_uv_stride * 8; + lf_dst[2] += recon_uv_stride * 8; + lf_mic += pc->mb_cols; + lf_mic++; /* Skip border mb */ + } + } else { + if (mb_row > 0) { + /**/ + yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); - eb_dst[0] += recon_y_stride * 16; - eb_dst[1] += recon_uv_stride * 8; - eb_dst[2] += recon_uv_stride * 8; + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } } - yv12_extend_frame_left_right_c(yv12_fb_new, - eb_dst[0], - eb_dst[1], - eb_dst[2]); - yv12_extend_frame_top_c(yv12_fb_new); - yv12_extend_frame_bottom_c(yv12_fb_new); + } + if (pc->filter_level) { + if (pc->filter_type == NORMAL_LOOPFILTER) { + vp8_loop_filter_row_normal(pc, lf_mic, mb_row - 1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + } else { + vp8_loop_filter_row_simple(pc, lf_mic, mb_row - 1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + } + + yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], + eb_dst[2]); + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } + yv12_extend_frame_left_right_c(yv12_fb_new, eb_dst[0], eb_dst[1], eb_dst[2]); + yv12_extend_frame_top_c(yv12_fb_new); + yv12_extend_frame_bottom_c(yv12_fb_new); } static unsigned int read_partition_size(VP8D_COMP *pbi, - const unsigned char *cx_size) -{ - unsigned char temp[3]; - if (pbi->decrypt_cb) - { - pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3); - cx_size = temp; - } - return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16); + const unsigned char *cx_size) { + unsigned char temp[3]; + if (pbi->decrypt_cb) { + pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3); + cx_size = temp; + } + return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16); } -static int read_is_valid(const unsigned char *start, - size_t len, - const unsigned char *end) -{ - return (start + len > start && start + len <= end); +static int read_is_valid(const unsigned char *start, size_t len, + const unsigned char *end) { + return (start + len > start && start + len <= end); } static unsigned int read_available_partition_size( - VP8D_COMP *pbi, - const unsigned char *token_part_sizes, - const unsigned char *fragment_start, - const unsigned char *first_fragment_end, - const unsigned char *fragment_end, - int i, - int num_part) -{ - VP8_COMMON* pc = &pbi->common; - const unsigned char *partition_size_ptr = token_part_sizes + i * 3; - unsigned int partition_size = 0; - ptrdiff_t bytes_left = fragment_end - fragment_start; - /* Calculate the length of this partition. The last partition - * size is implicit. If the partition size can't be read, then - * either use the remaining data in the buffer (for EC mode) - * or throw an error. - */ - if (i < num_part - 1) - { - if (read_is_valid(partition_size_ptr, 3, first_fragment_end)) - partition_size = read_partition_size(pbi, partition_size_ptr); - else if (pbi->ec_active) - partition_size = (unsigned int)bytes_left; - else - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Truncated partition size data"); + VP8D_COMP *pbi, const unsigned char *token_part_sizes, + const unsigned char *fragment_start, + const unsigned char *first_fragment_end, const unsigned char *fragment_end, + int i, int num_part) { + VP8_COMMON *pc = &pbi->common; + const unsigned char *partition_size_ptr = token_part_sizes + i * 3; + unsigned int partition_size = 0; + ptrdiff_t bytes_left = fragment_end - fragment_start; + /* Calculate the length of this partition. The last partition + * size is implicit. If the partition size can't be read, then + * either use the remaining data in the buffer (for EC mode) + * or throw an error. + */ + if (i < num_part - 1) { + if (read_is_valid(partition_size_ptr, 3, first_fragment_end)) { + partition_size = read_partition_size(pbi, partition_size_ptr); + } else if (pbi->ec_active) { + partition_size = (unsigned int)bytes_left; + } else { + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated partition size data"); } - else - partition_size = (unsigned int)bytes_left; + } else { + partition_size = (unsigned int)bytes_left; + } - /* Validate the calculated partition length. If the buffer - * described by the partition can't be fully read, then restrict - * it to the portion that can be (for EC mode) or throw an error. - */ - if (!read_is_valid(fragment_start, partition_size, fragment_end)) - { - if (pbi->ec_active) - partition_size = (unsigned int)bytes_left; - else - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt partition " - "%d length", i + 1); + /* Validate the calculated partition length. If the buffer + * described by the partition can't be fully read, then restrict + * it to the portion that can be (for EC mode) or throw an error. + */ + if (!read_is_valid(fragment_start, partition_size, fragment_end)) { + if (pbi->ec_active) { + partition_size = (unsigned int)bytes_left; + } else { + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt partition " + "%d length", + i + 1); } - return partition_size; + } + return partition_size; } - static void setup_token_decoder(VP8D_COMP *pbi, - const unsigned char* token_part_sizes) -{ - vp8_reader *bool_decoder = &pbi->mbc[0]; - unsigned int partition_idx; - unsigned int fragment_idx; - unsigned int num_token_partitions; - const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] + - pbi->fragments.sizes[0]; + const unsigned char *token_part_sizes) { + vp8_reader *bool_decoder = &pbi->mbc[0]; + unsigned int partition_idx; + unsigned int fragment_idx; + unsigned int num_token_partitions; + const unsigned char *first_fragment_end = + pbi->fragments.ptrs[0] + pbi->fragments.sizes[0]; - TOKEN_PARTITION multi_token_partition = - (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2); - if (!vp8dx_bool_error(&pbi->mbc[8])) - pbi->common.multi_token_partition = multi_token_partition; - num_token_partitions = 1 << pbi->common.multi_token_partition; + TOKEN_PARTITION multi_token_partition = + (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2); + if (!vp8dx_bool_error(&pbi->mbc[8])) { + pbi->common.multi_token_partition = multi_token_partition; + } + num_token_partitions = 1 << pbi->common.multi_token_partition; - /* Check for partitions within the fragments and unpack the fragments - * so that each fragment pointer points to its corresponding partition. */ - for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx) - { - unsigned int fragment_size = pbi->fragments.sizes[fragment_idx]; - const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] + - fragment_size; - /* Special case for handling the first partition since we have already - * read its size. */ - if (fragment_idx == 0) - { - /* Size of first partition + token partition sizes element */ - ptrdiff_t ext_first_part_size = token_part_sizes - - pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1); - fragment_size -= (unsigned int)ext_first_part_size; - if (fragment_size > 0) - { - pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size; - /* The fragment contains an additional partition. Move to - * next. */ - fragment_idx++; - pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] + - pbi->fragments.sizes[0]; - } - } - /* Split the chunk into partitions read from the bitstream */ - while (fragment_size > 0) - { - ptrdiff_t partition_size = read_available_partition_size( - pbi, - token_part_sizes, - pbi->fragments.ptrs[fragment_idx], - first_fragment_end, - fragment_end, - fragment_idx - 1, - num_token_partitions); - pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size; - fragment_size -= (unsigned int)partition_size; - assert(fragment_idx <= num_token_partitions); - if (fragment_size > 0) - { - /* The fragment contains an additional partition. - * Move to next. */ - fragment_idx++; - pbi->fragments.ptrs[fragment_idx] = - pbi->fragments.ptrs[fragment_idx - 1] + partition_size; - } - } + /* Check for partitions within the fragments and unpack the fragments + * so that each fragment pointer points to its corresponding partition. */ + for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx) { + unsigned int fragment_size = pbi->fragments.sizes[fragment_idx]; + const unsigned char *fragment_end = + pbi->fragments.ptrs[fragment_idx] + fragment_size; + /* Special case for handling the first partition since we have already + * read its size. */ + if (fragment_idx == 0) { + /* Size of first partition + token partition sizes element */ + ptrdiff_t ext_first_part_size = token_part_sizes - + pbi->fragments.ptrs[0] + + 3 * (num_token_partitions - 1); + fragment_size -= (unsigned int)ext_first_part_size; + if (fragment_size > 0) { + pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size; + /* The fragment contains an additional partition. Move to + * next. */ + fragment_idx++; + pbi->fragments.ptrs[fragment_idx] = + pbi->fragments.ptrs[0] + pbi->fragments.sizes[0]; + } + } + /* Split the chunk into partitions read from the bitstream */ + while (fragment_size > 0) { + ptrdiff_t partition_size = read_available_partition_size( + pbi, token_part_sizes, pbi->fragments.ptrs[fragment_idx], + first_fragment_end, fragment_end, fragment_idx - 1, + num_token_partitions); + pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size; + fragment_size -= (unsigned int)partition_size; + assert(fragment_idx <= num_token_partitions); + if (fragment_size > 0) { + /* The fragment contains an additional partition. + * Move to next. */ + fragment_idx++; + pbi->fragments.ptrs[fragment_idx] = + pbi->fragments.ptrs[fragment_idx - 1] + partition_size; + } + } + } + + pbi->fragments.count = num_token_partitions + 1; + + for (partition_idx = 1; partition_idx < pbi->fragments.count; + ++partition_idx) { + if (vp8dx_start_decode(bool_decoder, pbi->fragments.ptrs[partition_idx], + pbi->fragments.sizes[partition_idx], pbi->decrypt_cb, + pbi->decrypt_state)) { + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate bool decoder %d", partition_idx); } - pbi->fragments.count = num_token_partitions + 1; - - for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx) - { - if (vp8dx_start_decode(bool_decoder, - pbi->fragments.ptrs[partition_idx], - pbi->fragments.sizes[partition_idx], - pbi->decrypt_cb, pbi->decrypt_state)) - vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate bool decoder %d", - partition_idx); - - bool_decoder++; - } + bool_decoder++; + } #if CONFIG_MULTITHREAD - /* Clamp number of decoder threads */ - if (pbi->decoding_thread_count > num_token_partitions - 1) - pbi->decoding_thread_count = num_token_partitions - 1; + /* Clamp number of decoder threads */ + if (pbi->decoding_thread_count > num_token_partitions - 1) { + pbi->decoding_thread_count = num_token_partitions - 1; + } + if ((int)pbi->decoding_thread_count > pbi->common.mb_rows - 1) { + assert(pbi->common.mb_rows > 0); + pbi->decoding_thread_count = pbi->common.mb_rows - 1; + } #endif } +static void init_frame(VP8D_COMP *pbi) { + VP8_COMMON *const pc = &pbi->common; + MACROBLOCKD *const xd = &pbi->mb; -static void init_frame(VP8D_COMP *pbi) -{ - VP8_COMMON *const pc = & pbi->common; - MACROBLOCKD *const xd = & pbi->mb; + if (pc->frame_type == KEY_FRAME) { + /* Various keyframe initializations */ + memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); - if (pc->frame_type == KEY_FRAME) - { - /* Various keyframe initializations */ - memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); + vp8_init_mbmode_probs(pc); - vp8_init_mbmode_probs(pc); + vp8_default_coef_probs(pc); - vp8_default_coef_probs(pc); + /* reset the segment feature data to 0 with delta coding (Default state). */ + memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); + xd->mb_segement_abs_delta = SEGMENT_DELTADATA; - /* reset the segment feature data to 0 with delta coding (Default state). */ - memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); - xd->mb_segement_abs_delta = SEGMENT_DELTADATA; + /* reset the mode ref deltasa for loop filter */ + memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); + memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); - /* reset the mode ref deltasa for loop filter */ - memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); - memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); + /* All buffers are implicitly updated on key frames. */ + pc->refresh_golden_frame = 1; + pc->refresh_alt_ref_frame = 1; + pc->copy_buffer_to_gf = 0; + pc->copy_buffer_to_arf = 0; - /* All buffers are implicitly updated on key frames. */ - pc->refresh_golden_frame = 1; - pc->refresh_alt_ref_frame = 1; - pc->copy_buffer_to_gf = 0; - pc->copy_buffer_to_arf = 0; - - /* Note that Golden and Altref modes cannot be used on a key frame so - * ref_frame_sign_bias[] is undefined and meaningless - */ - pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0; - pc->ref_frame_sign_bias[ALTREF_FRAME] = 0; - } - else - { - /* To enable choice of different interploation filters */ - if (!pc->use_bilinear_mc_filter) - { - xd->subpixel_predict = vp8_sixtap_predict4x4; - xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; - xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; - xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; - } - else - { - xd->subpixel_predict = vp8_bilinear_predict4x4; - xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; - xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; - xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; - } - - if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) - pbi->ec_active = 1; - } - - xd->left_context = &pc->left_context; - xd->mode_info_context = pc->mi; - xd->frame_type = pc->frame_type; - xd->mode_info_context->mbmi.mode = DC_PRED; - xd->mode_info_stride = pc->mode_info_stride; - xd->corrupted = 0; /* init without corruption */ - - xd->fullpixel_mask = 0xffffffff; - if(pc->full_pixel) - xd->fullpixel_mask = 0xfffffff8; - -} - -int vp8_decode_frame(VP8D_COMP *pbi) -{ - vp8_reader *const bc = &pbi->mbc[8]; - VP8_COMMON *const pc = &pbi->common; - MACROBLOCKD *const xd = &pbi->mb; - const unsigned char *data = pbi->fragments.ptrs[0]; - const unsigned char *data_end = data + pbi->fragments.sizes[0]; - ptrdiff_t first_partition_length_in_bytes; - - int i, j, k, l; - const int *const mb_feature_data_bits = vp8_mb_feature_data_bits; - int corrupt_tokens = 0; - int prev_independent_partitions = pbi->independent_partitions; - - YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; - - /* start with no corruption of current frame */ - xd->corrupted = 0; - yv12_fb_new->corrupted = 0; - - if (data_end - data < 3) - { - if (!pbi->ec_active) - { - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet"); - } - - /* Declare the missing frame as an inter frame since it will - be handled as an inter frame when we have estimated its - motion vectors. */ - pc->frame_type = INTER_FRAME; - pc->version = 0; - pc->show_frame = 1; - first_partition_length_in_bytes = 0; - } - else - { - unsigned char clear_buffer[10]; - const unsigned char *clear = data; - if (pbi->decrypt_cb) - { - int n = (int)VPXMIN(sizeof(clear_buffer), data_end - data); - pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n); - clear = clear_buffer; - } - - pc->frame_type = (FRAME_TYPE)(clear[0] & 1); - pc->version = (clear[0] >> 1) & 7; - pc->show_frame = (clear[0] >> 4) & 1; - first_partition_length_in_bytes = - (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5; - - if (!pbi->ec_active && - (data + first_partition_length_in_bytes > data_end - || data + first_partition_length_in_bytes < data)) - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt partition 0 length"); - - data += 3; - clear += 3; - - vp8_setup_version(pc); - - - if (pc->frame_type == KEY_FRAME) - { - /* vet via sync code */ - /* When error concealment is enabled we should only check the sync - * code if we have enough bits available - */ - if (!pbi->ec_active || data + 3 < data_end) - { - if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a) - vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid frame sync code"); - } - - /* If error concealment is enabled we should only parse the new size - * if we have enough data. Otherwise we will end up with the wrong - * size. - */ - if (!pbi->ec_active || data + 6 < data_end) - { - pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff; - pc->horiz_scale = clear[4] >> 6; - pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff; - pc->vert_scale = clear[6] >> 6; - } - data += 7; - } - else - { - memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); - memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); - } - } - if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) - { - return -1; - } - - init_frame(pbi); - - if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data), - pbi->decrypt_cb, pbi->decrypt_state)) - vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate bool decoder 0"); - if (pc->frame_type == KEY_FRAME) { - (void)vp8_read_bit(bc); // colorspace - pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc); - } - - /* Is segmentation enabled */ - xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc); - - if (xd->segmentation_enabled) - { - /* Signal whether or not the segmentation map is being explicitly updated this frame. */ - xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc); - xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc); - - if (xd->update_mb_segmentation_data) - { - xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc); - - memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); - - /* For each segmentation feature (Quant and loop filter level) */ - for (i = 0; i < MB_LVL_MAX; i++) - { - for (j = 0; j < MAX_MB_SEGMENTS; j++) - { - /* Frame level data */ - if (vp8_read_bit(bc)) - { - xd->segment_feature_data[i][j] = (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]); - - if (vp8_read_bit(bc)) - xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j]; - } - else - xd->segment_feature_data[i][j] = 0; - } - } - } - - if (xd->update_mb_segmentation_map) - { - /* Which macro block level features are enabled */ - memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); - - /* Read the probs used to decode the segment id for each macro block. */ - for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) - { - /* If not explicitly set value is defaulted to 255 by memset above */ - if (vp8_read_bit(bc)) - xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8); - } - } - } - else - { - /* No segmentation updates on this frame */ - xd->update_mb_segmentation_map = 0; - xd->update_mb_segmentation_data = 0; - } - - /* Read the loop filter level and type */ - pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc); - pc->filter_level = vp8_read_literal(bc, 6); - pc->sharpness_level = vp8_read_literal(bc, 3); - - /* Read in loop filter deltas applied at the MB level based on mode or ref frame. */ - xd->mode_ref_lf_delta_update = 0; - xd->mode_ref_lf_delta_enabled = (unsigned char)vp8_read_bit(bc); - - if (xd->mode_ref_lf_delta_enabled) - { - /* Do the deltas need to be updated */ - xd->mode_ref_lf_delta_update = (unsigned char)vp8_read_bit(bc); - - if (xd->mode_ref_lf_delta_update) - { - /* Send update */ - for (i = 0; i < MAX_REF_LF_DELTAS; i++) - { - if (vp8_read_bit(bc)) - { - /*sign = vp8_read_bit( bc );*/ - xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); - - if (vp8_read_bit(bc)) /* Apply sign */ - xd->ref_lf_deltas[i] = xd->ref_lf_deltas[i] * -1; - } - } - - /* Send update */ - for (i = 0; i < MAX_MODE_LF_DELTAS; i++) - { - if (vp8_read_bit(bc)) - { - /*sign = vp8_read_bit( bc );*/ - xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); - - if (vp8_read_bit(bc)) /* Apply sign */ - xd->mode_lf_deltas[i] = xd->mode_lf_deltas[i] * -1; - } - } - } - } - - setup_token_decoder(pbi, data + first_partition_length_in_bytes); - - xd->current_bc = &pbi->mbc[0]; - - /* Read the default quantizers. */ - { - int Q, q_update; - - Q = vp8_read_literal(bc, 7); /* AC 1st order Q = default */ - pc->base_qindex = Q; - q_update = 0; - pc->y1dc_delta_q = get_delta_q(bc, pc->y1dc_delta_q, &q_update); - pc->y2dc_delta_q = get_delta_q(bc, pc->y2dc_delta_q, &q_update); - pc->y2ac_delta_q = get_delta_q(bc, pc->y2ac_delta_q, &q_update); - pc->uvdc_delta_q = get_delta_q(bc, pc->uvdc_delta_q, &q_update); - pc->uvac_delta_q = get_delta_q(bc, pc->uvac_delta_q, &q_update); - - if (q_update) - vp8cx_init_de_quantizer(pbi); - - /* MB level dequantizer setup */ - vp8_mb_init_dequantizer(pbi, &pbi->mb); - } - - /* Determine if the golden frame or ARF buffer should be updated and how. - * For all non key frames the GF and ARF refresh flags and sign bias - * flags must be set explicitly. + /* Note that Golden and Altref modes cannot be used on a key frame so + * ref_frame_sign_bias[] is undefined and meaningless */ - if (pc->frame_type != KEY_FRAME) - { - /* Should the GF or ARF be updated from the current frame */ - pc->refresh_golden_frame = vp8_read_bit(bc); -#if CONFIG_ERROR_CONCEALMENT - /* Assume we shouldn't refresh golden if the bit is missing */ - xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->refresh_golden_frame = 0; -#endif - - pc->refresh_alt_ref_frame = vp8_read_bit(bc); -#if CONFIG_ERROR_CONCEALMENT - /* Assume we shouldn't refresh altref if the bit is missing */ - xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->refresh_alt_ref_frame = 0; -#endif - - /* Buffer to buffer copy flags. */ - pc->copy_buffer_to_gf = 0; - - if (!pc->refresh_golden_frame) - pc->copy_buffer_to_gf = vp8_read_literal(bc, 2); - -#if CONFIG_ERROR_CONCEALMENT - /* Assume we shouldn't copy to the golden if the bit is missing */ - xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->copy_buffer_to_gf = 0; -#endif - - pc->copy_buffer_to_arf = 0; - - if (!pc->refresh_alt_ref_frame) - pc->copy_buffer_to_arf = vp8_read_literal(bc, 2); - -#if CONFIG_ERROR_CONCEALMENT - /* Assume we shouldn't copy to the alt-ref if the bit is missing */ - xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->copy_buffer_to_arf = 0; -#endif - - - pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc); - pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc); + pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0; + pc->ref_frame_sign_bias[ALTREF_FRAME] = 0; + } else { + /* To enable choice of different interploation filters */ + if (!pc->use_bilinear_mc_filter) { + xd->subpixel_predict = vp8_sixtap_predict4x4; + xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; + xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; + xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; + } else { + xd->subpixel_predict = vp8_bilinear_predict4x4; + xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; + xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; + xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; } - pc->refresh_entropy_probs = vp8_read_bit(bc); + if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) { + pbi->ec_active = 1; + } + } + + xd->left_context = &pc->left_context; + xd->mode_info_context = pc->mi; + xd->frame_type = pc->frame_type; + xd->mode_info_context->mbmi.mode = DC_PRED; + xd->mode_info_stride = pc->mode_info_stride; + xd->corrupted = 0; /* init without corruption */ + + xd->fullpixel_mask = 0xffffffff; + if (pc->full_pixel) xd->fullpixel_mask = 0xfffffff8; +} + +int vp8_decode_frame(VP8D_COMP *pbi) { + vp8_reader *const bc = &pbi->mbc[8]; + VP8_COMMON *const pc = &pbi->common; + MACROBLOCKD *const xd = &pbi->mb; + const unsigned char *data = pbi->fragments.ptrs[0]; + const unsigned int data_sz = pbi->fragments.sizes[0]; + const unsigned char *data_end = data + data_sz; + ptrdiff_t first_partition_length_in_bytes; + + int i, j, k, l; + const int *const mb_feature_data_bits = vp8_mb_feature_data_bits; + int corrupt_tokens = 0; + int prev_independent_partitions = pbi->independent_partitions; + + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + + /* start with no corruption of current frame */ + xd->corrupted = 0; + yv12_fb_new->corrupted = 0; + + if (data_end - data < 3) { + if (!pbi->ec_active) { + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet"); + } + + /* Declare the missing frame as an inter frame since it will + be handled as an inter frame when we have estimated its + motion vectors. */ + pc->frame_type = INTER_FRAME; + pc->version = 0; + pc->show_frame = 1; + first_partition_length_in_bytes = 0; + } else { + unsigned char clear_buffer[10]; + const unsigned char *clear = data; + if (pbi->decrypt_cb) { + int n = (int)VPXMIN(sizeof(clear_buffer), data_sz); + pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n); + clear = clear_buffer; + } + + pc->frame_type = (FRAME_TYPE)(clear[0] & 1); + pc->version = (clear[0] >> 1) & 7; + pc->show_frame = (clear[0] >> 4) & 1; + first_partition_length_in_bytes = + (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5; + + if (!pbi->ec_active && (data + first_partition_length_in_bytes > data_end || + data + first_partition_length_in_bytes < data)) { + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt partition 0 length"); + } + + data += 3; + clear += 3; + + vp8_setup_version(pc); + + if (pc->frame_type == KEY_FRAME) { + /* vet via sync code */ + /* When error concealment is enabled we should only check the sync + * code if we have enough bits available + */ + if (!pbi->ec_active || data + 3 < data_end) { + if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a) { + vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM, + "Invalid frame sync code"); + } + } + + /* If error concealment is enabled we should only parse the new size + * if we have enough data. Otherwise we will end up with the wrong + * size. + */ + if (!pbi->ec_active || data + 6 < data_end) { + pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff; + pc->horiz_scale = clear[4] >> 6; + pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff; + pc->vert_scale = clear[6] >> 6; + } + data += 7; + } else { + memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); + memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); + } + } + if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) { + return -1; + } + + init_frame(pbi); + + if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data), + pbi->decrypt_cb, pbi->decrypt_state)) { + vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate bool decoder 0"); + } + if (pc->frame_type == KEY_FRAME) { + (void)vp8_read_bit(bc); // colorspace + pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc); + } + + /* Is segmentation enabled */ + xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc); + + if (xd->segmentation_enabled) { + /* Signal whether or not the segmentation map is being explicitly updated + * this frame. */ + xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc); + xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc); + + if (xd->update_mb_segmentation_data) { + xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc); + + memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); + + /* For each segmentation feature (Quant and loop filter level) */ + for (i = 0; i < MB_LVL_MAX; ++i) { + for (j = 0; j < MAX_MB_SEGMENTS; ++j) { + /* Frame level data */ + if (vp8_read_bit(bc)) { + xd->segment_feature_data[i][j] = + (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]); + + if (vp8_read_bit(bc)) { + xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j]; + } + } else { + xd->segment_feature_data[i][j] = 0; + } + } + } + } + + if (xd->update_mb_segmentation_map) { + /* Which macro block level features are enabled */ + memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); + + /* Read the probs used to decode the segment id for each macro block. */ + for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { + /* If not explicitly set value is defaulted to 255 by memset above */ + if (vp8_read_bit(bc)) { + xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8); + } + } + } + } else { + /* No segmentation updates on this frame */ + xd->update_mb_segmentation_map = 0; + xd->update_mb_segmentation_data = 0; + } + + /* Read the loop filter level and type */ + pc->filter_type = (LOOPFILTERTYPE)vp8_read_bit(bc); + pc->filter_level = vp8_read_literal(bc, 6); + pc->sharpness_level = vp8_read_literal(bc, 3); + + /* Read in loop filter deltas applied at the MB level based on mode or ref + * frame. */ + xd->mode_ref_lf_delta_update = 0; + xd->mode_ref_lf_delta_enabled = (unsigned char)vp8_read_bit(bc); + + if (xd->mode_ref_lf_delta_enabled) { + /* Do the deltas need to be updated */ + xd->mode_ref_lf_delta_update = (unsigned char)vp8_read_bit(bc); + + if (xd->mode_ref_lf_delta_update) { + /* Send update */ + for (i = 0; i < MAX_REF_LF_DELTAS; ++i) { + if (vp8_read_bit(bc)) { + /*sign = vp8_read_bit( bc );*/ + xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); + + if (vp8_read_bit(bc)) { /* Apply sign */ + xd->ref_lf_deltas[i] = xd->ref_lf_deltas[i] * -1; + } + } + } + + /* Send update */ + for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) { + if (vp8_read_bit(bc)) { + /*sign = vp8_read_bit( bc );*/ + xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); + + if (vp8_read_bit(bc)) { /* Apply sign */ + xd->mode_lf_deltas[i] = xd->mode_lf_deltas[i] * -1; + } + } + } + } + } + + setup_token_decoder(pbi, data + first_partition_length_in_bytes); + + xd->current_bc = &pbi->mbc[0]; + + /* Read the default quantizers. */ + { + int Q, q_update; + + Q = vp8_read_literal(bc, 7); /* AC 1st order Q = default */ + pc->base_qindex = Q; + q_update = 0; + pc->y1dc_delta_q = get_delta_q(bc, pc->y1dc_delta_q, &q_update); + pc->y2dc_delta_q = get_delta_q(bc, pc->y2dc_delta_q, &q_update); + pc->y2ac_delta_q = get_delta_q(bc, pc->y2ac_delta_q, &q_update); + pc->uvdc_delta_q = get_delta_q(bc, pc->uvdc_delta_q, &q_update); + pc->uvac_delta_q = get_delta_q(bc, pc->uvac_delta_q, &q_update); + + if (q_update) vp8cx_init_de_quantizer(pbi); + + /* MB level dequantizer setup */ + vp8_mb_init_dequantizer(pbi, &pbi->mb); + } + + /* Determine if the golden frame or ARF buffer should be updated and how. + * For all non key frames the GF and ARF refresh flags and sign bias + * flags must be set explicitly. + */ + if (pc->frame_type != KEY_FRAME) { + /* Should the GF or ARF be updated from the current frame */ + pc->refresh_golden_frame = vp8_read_bit(bc); #if CONFIG_ERROR_CONCEALMENT - /* Assume we shouldn't refresh the probabilities if the bit is - * missing */ + /* Assume we shouldn't refresh golden if the bit is missing */ xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->refresh_entropy_probs = 0; + if (pbi->ec_active && xd->corrupted) pc->refresh_golden_frame = 0; #endif - if (pc->refresh_entropy_probs == 0) - { - memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc)); - } - - pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc); + pc->refresh_alt_ref_frame = vp8_read_bit(bc); #if CONFIG_ERROR_CONCEALMENT - /* Assume we should refresh the last frame if the bit is missing */ + /* Assume we shouldn't refresh altref if the bit is missing */ xd->corrupted |= vp8dx_bool_error(bc); - if (pbi->ec_active && xd->corrupted) - pc->refresh_last_frame = 1; + if (pbi->ec_active && xd->corrupted) pc->refresh_alt_ref_frame = 0; #endif - if (0) - { - FILE *z = fopen("decodestats.stt", "a"); - fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", - pc->current_video_frame, - pc->frame_type, - pc->refresh_golden_frame, - pc->refresh_alt_ref_frame, - pc->refresh_last_frame, - pc->base_qindex); - fclose(z); + /* Buffer to buffer copy flags. */ + pc->copy_buffer_to_gf = 0; + + if (!pc->refresh_golden_frame) { + pc->copy_buffer_to_gf = vp8_read_literal(bc, 2); } - { - pbi->independent_partitions = 1; - - /* read coef probability tree */ - for (i = 0; i < BLOCK_TYPES; i++) - for (j = 0; j < COEF_BANDS; j++) - for (k = 0; k < PREV_COEF_CONTEXTS; k++) - for (l = 0; l < ENTROPY_NODES; l++) - { - - vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l; - - if (vp8_read(bc, vp8_coef_update_probs [i][j][k][l])) - { - *p = (vp8_prob)vp8_read_literal(bc, 8); - - } - if (k > 0 && *p != pc->fc.coef_probs[i][j][k-1][l]) - pbi->independent_partitions = 0; - - } - } - - /* clear out the coeff buffer */ - memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); - - vp8_decode_mode_mvs(pbi); - #if CONFIG_ERROR_CONCEALMENT - if (pbi->ec_active && - pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) - { - /* Motion vectors are missing in this frame. We will try to estimate - * them and then continue decoding the frame as usual */ - vp8_estimate_missing_mvs(pbi); - } + /* Assume we shouldn't copy to the golden if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) pc->copy_buffer_to_gf = 0; #endif - memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); - pbi->frame_corrupt_residual = 0; + pc->copy_buffer_to_arf = 0; + + if (!pc->refresh_alt_ref_frame) { + pc->copy_buffer_to_arf = vp8_read_literal(bc, 2); + } + +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't copy to the alt-ref if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) pc->copy_buffer_to_arf = 0; +#endif + + pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc); + pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc); + } + + pc->refresh_entropy_probs = vp8_read_bit(bc); +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't refresh the probabilities if the bit is + * missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) pc->refresh_entropy_probs = 0; +#endif + if (pc->refresh_entropy_probs == 0) { + memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc)); + } + + pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc); + +#if CONFIG_ERROR_CONCEALMENT + /* Assume we should refresh the last frame if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) pc->refresh_last_frame = 1; +#endif + + if (0) { + FILE *z = fopen("decodestats.stt", "a"); + fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", pc->current_video_frame, + pc->frame_type, pc->refresh_golden_frame, pc->refresh_alt_ref_frame, + pc->refresh_last_frame, pc->base_qindex); + fclose(z); + } + + { + pbi->independent_partitions = 1; + + /* read coef probability tree */ + for (i = 0; i < BLOCK_TYPES; ++i) { + for (j = 0; j < COEF_BANDS; ++j) { + for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { + for (l = 0; l < ENTROPY_NODES; ++l) { + vp8_prob *const p = pc->fc.coef_probs[i][j][k] + l; + + if (vp8_read(bc, vp8_coef_update_probs[i][j][k][l])) { + *p = (vp8_prob)vp8_read_literal(bc, 8); + } + if (k > 0 && *p != pc->fc.coef_probs[i][j][k - 1][l]) { + pbi->independent_partitions = 0; + } + } + } + } + } + } + + /* clear out the coeff buffer */ + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + + vp8_decode_mode_mvs(pbi); + +#if CONFIG_ERROR_CONCEALMENT + if (pbi->ec_active && + pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) { + /* Motion vectors are missing in this frame. We will try to estimate + * them and then continue decoding the frame as usual */ + vp8_estimate_missing_mvs(pbi); + } +#endif + + memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); + pbi->frame_corrupt_residual = 0; #if CONFIG_MULTITHREAD - if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) - { - unsigned int thread; - vp8mt_decode_mb_rows(pbi, xd); - vp8_yv12_extend_frame_borders(yv12_fb_new); - for (thread = 0; thread < pbi->decoding_thread_count; ++thread) - corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted; + if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) { + unsigned int thread; + vp8mt_decode_mb_rows(pbi, xd); + vp8_yv12_extend_frame_borders(yv12_fb_new); + for (thread = 0; thread < pbi->decoding_thread_count; ++thread) { + corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted; } - else + } else #endif - { - decode_mb_rows(pbi); - corrupt_tokens |= xd->corrupted; + { + decode_mb_rows(pbi); + corrupt_tokens |= xd->corrupted; + } + + /* Collect information about decoder corruption. */ + /* 1. Check first boolean decoder for errors. */ + yv12_fb_new->corrupted = vp8dx_bool_error(bc); + /* 2. Check the macroblock information */ + yv12_fb_new->corrupted |= corrupt_tokens; + + if (!pbi->decoded_key_frame) { + if (pc->frame_type == KEY_FRAME && !yv12_fb_new->corrupted) { + pbi->decoded_key_frame = 1; + } else { + vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, + "A stream must start with a complete key frame"); } + } - /* Collect information about decoder corruption. */ - /* 1. Check first boolean decoder for errors. */ - yv12_fb_new->corrupted = vp8dx_bool_error(bc); - /* 2. Check the macroblock information */ - yv12_fb_new->corrupted |= corrupt_tokens; + /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes + * \n",bc->pos+pbi->bc2.pos); */ - if (!pbi->decoded_key_frame) - { - if (pc->frame_type == KEY_FRAME && - !yv12_fb_new->corrupted) - pbi->decoded_key_frame = 1; - else - vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, - "A stream must start with a complete key frame"); - } - - /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes \n",bc->pos+pbi->bc2.pos); */ - - if (pc->refresh_entropy_probs == 0) - { - memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc)); - pbi->independent_partitions = prev_independent_partitions; - } + if (pc->refresh_entropy_probs == 0) { + memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc)); + pbi->independent_partitions = prev_independent_partitions; + } #ifdef PACKET_TESTING - { - FILE *f = fopen("decompressor.VP8", "ab"); - unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8; - fwrite((void *) &size, 4, 1, f); - fwrite((void *) pbi->Source, size, 1, f); - fclose(f); - } + { + FILE *f = fopen("decompressor.VP8", "ab"); + unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8; + fwrite((void *)&size, 4, 1, f); + fwrite((void *)pbi->Source, size, 1, f); + fclose(f); + } #endif - return 0; + return 0; } diff --git a/libs/libvpx/vp8/decoder/decodemv.c b/libs/libvpx/vp8/decoder/decodemv.c index 1d155e7e16..b946ab73d0 100644 --- a/libs/libvpx/vp8/decoder/decodemv.c +++ b/libs/libvpx/vp8/decoder/decodemv.c @@ -8,663 +8,558 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "treereader.h" #include "vp8/common/entropymv.h" #include "vp8/common/entropymode.h" #include "onyxd_int.h" #include "vp8/common/findnearmv.h" -#if CONFIG_DEBUG -#include -#endif -static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) -{ - const int i = vp8_treed_read(bc, vp8_bmode_tree, p); +static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) { + const int i = vp8_treed_read(bc, vp8_bmode_tree, p); - return (B_PREDICTION_MODE)i; + return (B_PREDICTION_MODE)i; } -static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) -{ - const int i = vp8_treed_read(bc, vp8_ymode_tree, p); +static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) { + const int i = vp8_treed_read(bc, vp8_ymode_tree, p); - return (MB_PREDICTION_MODE)i; + return (MB_PREDICTION_MODE)i; } -static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) -{ - const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); +static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) { + const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); - return (MB_PREDICTION_MODE)i; + return (MB_PREDICTION_MODE)i; } -static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) -{ - const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); +static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) { + const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); - return (MB_PREDICTION_MODE)i; + return (MB_PREDICTION_MODE)i; } -static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) -{ - vp8_reader *const bc = & pbi->mbc[8]; - const int mis = pbi->common.mode_info_stride; +static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) { + vp8_reader *const bc = &pbi->mbc[8]; + const int mis = pbi->common.mode_info_stride; - mi->mbmi.ref_frame = INTRA_FRAME; - mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob); + mi->mbmi.ref_frame = INTRA_FRAME; + mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob); - if (mi->mbmi.mode == B_PRED) - { - int i = 0; - mi->mbmi.is_4x4 = 1; + if (mi->mbmi.mode == B_PRED) { + int i = 0; + mi->mbmi.is_4x4 = 1; - do - { - const B_PREDICTION_MODE A = above_block_mode(mi, i, mis); - const B_PREDICTION_MODE L = left_block_mode(mi, i); + do { + const B_PREDICTION_MODE A = above_block_mode(mi, i, mis); + const B_PREDICTION_MODE L = left_block_mode(mi, i); - mi->bmi[i].as_mode = - read_bmode(bc, vp8_kf_bmode_prob [A] [L]); - } - while (++i < 16); - } + mi->bmi[i].as_mode = read_bmode(bc, vp8_kf_bmode_prob[A][L]); + } while (++i < 16); + } - mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob); + mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob); } -static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) -{ - const vp8_prob *const p = (const vp8_prob *) mvc; - int x = 0; +static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) { + const vp8_prob *const p = (const vp8_prob *)mvc; + int x = 0; - if (vp8_read(r, p [mvpis_short])) /* Large */ - { - int i = 0; - - do - { - x += vp8_read(r, p [MVPbits + i]) << i; - } - while (++i < 3); - - i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ - - do - { - x += vp8_read(r, p [MVPbits + i]) << i; - } - while (--i > 3); - - if (!(x & 0xFFF0) || vp8_read(r, p [MVPbits + 3])) - x += 8; - } - else /* small */ - x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort); - - if (x && vp8_read(r, p [MVPsign])) - x = -x; - - return x; -} - -static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) -{ - mv->row = (short)(read_mvcomponent(r, mvc) * 2); - mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); -} - - -static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) -{ + if (vp8_read(r, p[mvpis_short])) /* Large */ + { int i = 0; - do - { - const vp8_prob *up = vp8_mv_update_probs[i].prob; - vp8_prob *p = (vp8_prob *)(mvc + i); - vp8_prob *const pstop = p + MVPcount; + do { + x += vp8_read(r, p[MVPbits + i]) << i; + } while (++i < 3); - do - { - if (vp8_read(bc, *up++)) - { - const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7); + i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ - *p = x ? x << 1 : 1; - } - } - while (++p < pstop); - } - while (++i < 2); + do { + x += vp8_read(r, p[MVPbits + i]) << i; + } while (--i > 3); + + if (!(x & 0xFFF0) || vp8_read(r, p[MVPbits + 3])) x += 8; + } else { /* small */ + x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort); + } + + if (x && vp8_read(r, p[MVPsign])) x = -x; + + return x; } -static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1}; +static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) { + mv->row = (short)(read_mvcomponent(r, mvc) * 2); + mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); +} + +static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) { + int i = 0; + + do { + const vp8_prob *up = vp8_mv_update_probs[i].prob; + vp8_prob *p = (vp8_prob *)(mvc + i); + vp8_prob *const pstop = p + MVPcount; + + do { + if (vp8_read(bc, *up++)) { + const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7); + + *p = x ? x << 1 : 1; + } + } while (++p < pstop); + } while (++i < 2); +} + +static const unsigned char mbsplit_fill_count[4] = { 8, 8, 4, 1 }; static const unsigned char mbsplit_fill_offset[4][16] = { - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15}, - { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}, - { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 }, + { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15 }, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } }; - -static void mb_mode_mv_init(VP8D_COMP *pbi) -{ - vp8_reader *const bc = & pbi->mbc[8]; - MV_CONTEXT *const mvc = pbi->common.fc.mvc; +static void mb_mode_mv_init(VP8D_COMP *pbi) { + vp8_reader *const bc = &pbi->mbc[8]; + MV_CONTEXT *const mvc = pbi->common.fc.mvc; #if CONFIG_ERROR_CONCEALMENT - /* Default is that no macroblock is corrupt, therefore we initialize - * mvs_corrupt_from_mb to something very big, which we can be sure is - * outside the frame. */ - pbi->mvs_corrupt_from_mb = UINT_MAX; + /* Default is that no macroblock is corrupt, therefore we initialize + * mvs_corrupt_from_mb to something very big, which we can be sure is + * outside the frame. */ + pbi->mvs_corrupt_from_mb = UINT_MAX; #endif - /* Read the mb_no_coeff_skip flag */ - pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc); + /* Read the mb_no_coeff_skip flag */ + pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc); - pbi->prob_skip_false = 0; - if (pbi->common.mb_no_coeff_skip) - pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); + pbi->prob_skip_false = 0; + if (pbi->common.mb_no_coeff_skip) { + pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); + } - if(pbi->common.frame_type != KEY_FRAME) - { - pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); - pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); - pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); + if (pbi->common.frame_type != KEY_FRAME) { + pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); + pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); + pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); - if (vp8_read_bit(bc)) - { - int i = 0; + if (vp8_read_bit(bc)) { + int i = 0; - do - { - pbi->common.fc.ymode_prob[i] = - (vp8_prob) vp8_read_literal(bc, 8); - } - while (++i < 4); - } - - if (vp8_read_bit(bc)) - { - int i = 0; - - do - { - pbi->common.fc.uv_mode_prob[i] = - (vp8_prob) vp8_read_literal(bc, 8); - } - while (++i < 3); - } - - read_mvcontexts(bc, mvc); + do { + pbi->common.fc.ymode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8); + } while (++i < 4); } + + if (vp8_read_bit(bc)) { + int i = 0; + + do { + pbi->common.fc.uv_mode_prob[i] = (vp8_prob)vp8_read_literal(bc, 8); + } while (++i < 3); + } + + read_mvcontexts(bc, mvc); + } } -const vp8_prob vp8_sub_mv_ref_prob3 [8][VP8_SUBMVREFS-1] = -{ - { 147, 136, 18 }, /* SUBMVREF_NORMAL */ - { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ - { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */ - { 208, 1 , 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */ - { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ - { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ - { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ - { 208, 1 , 1 } /* SUBMVREF_LEFT_ABOVE_ZED */ +const vp8_prob vp8_sub_mv_ref_prob3[8][VP8_SUBMVREFS - 1] = { + { 147, 136, 18 }, /* SUBMVREF_NORMAL */ + { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ + { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */ + { 208, 1, 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */ + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ + { 223, 1, 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ + { 208, 1, 1 } /* SUBMVREF_LEFT_ABOVE_ZED */ }; -static -const vp8_prob * get_sub_mv_ref_prob(const int left, const int above) -{ - int lez = (left == 0); - int aez = (above == 0); - int lea = (left == above); - const vp8_prob * prob; +static const vp8_prob *get_sub_mv_ref_prob(const int left, const int above) { + int lez = (left == 0); + int aez = (above == 0); + int lea = (left == above); + const vp8_prob *prob; - prob = vp8_sub_mv_ref_prob3[(aez << 2) | - (lez << 1) | - (lea)]; + prob = vp8_sub_mv_ref_prob3[(aez << 2) | (lez << 1) | (lea)]; - return prob; + return prob; } static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi, - const MODE_INFO *left_mb, const MODE_INFO *above_mb, - MB_MODE_INFO *mbmi, int_mv best_mv, - MV_CONTEXT *const mvc, int mb_to_left_edge, - int mb_to_right_edge, int mb_to_top_edge, - int mb_to_bottom_edge) -{ - int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */ - int num_p; /* number of partitions in the split configuration - (see vp8_mbsplit_count) */ - int j = 0; + const MODE_INFO *left_mb, const MODE_INFO *above_mb, + MB_MODE_INFO *mbmi, int_mv best_mv, + MV_CONTEXT *const mvc, int mb_to_left_edge, + int mb_to_right_edge, int mb_to_top_edge, + int mb_to_bottom_edge) { + int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */ + /* number of partitions in the split configuration (see vp8_mbsplit_count) */ + int num_p; + int j = 0; - s = 3; - num_p = 16; - if( vp8_read(bc, 110) ) - { - s = 2; - num_p = 4; - if( vp8_read(bc, 111) ) - { - s = vp8_read(bc, 150); - num_p = 2; - } + s = 3; + num_p = 16; + if (vp8_read(bc, 110)) { + s = 2; + num_p = 4; + if (vp8_read(bc, 111)) { + s = vp8_read(bc, 150); + num_p = 2; + } + } + + do /* for each subset j */ + { + int_mv leftmv, abovemv; + int_mv blockmv; + int k; /* first block in subset j */ + + const vp8_prob *prob; + k = vp8_mbsplit_offset[s][j]; + + if (!(k & 3)) { + /* On L edge, get from MB to left of us */ + if (left_mb->mbmi.mode != SPLITMV) { + leftmv.as_int = left_mb->mbmi.mv.as_int; + } else { + leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; + } + } else { + leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; } - do /* for each subset j */ - { - int_mv leftmv, abovemv; - int_mv blockmv; - int k; /* first block in subset j */ - - const vp8_prob *prob; - k = vp8_mbsplit_offset[s][j]; - - if (!(k & 3)) - { - /* On L edge, get from MB to left of us */ - if(left_mb->mbmi.mode != SPLITMV) - leftmv.as_int = left_mb->mbmi.mv.as_int; - else - leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; - } - else - leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; - - if (!(k >> 2)) - { - /* On top edge, get from MB above us */ - if(above_mb->mbmi.mode != SPLITMV) - abovemv.as_int = above_mb->mbmi.mv.as_int; - else - abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; - } - else - abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; - - prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int); - - if( vp8_read(bc, prob[0]) ) - { - if( vp8_read(bc, prob[1]) ) - { - blockmv.as_int = 0; - if( vp8_read(bc, prob[2]) ) - { - blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; - blockmv.as_mv.row += best_mv.as_mv.row; - blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; - blockmv.as_mv.col += best_mv.as_mv.col; - } - } - else - { - blockmv.as_int = abovemv.as_int; - } - } - else - { - blockmv.as_int = leftmv.as_int; - } - - mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&blockmv, - mb_to_left_edge, - mb_to_right_edge, - mb_to_top_edge, - mb_to_bottom_edge); - - { - /* Fill (uniform) modes, mvs of jth subset. - Must do it here because ensuing subsets can - refer back to us via "left" or "above". */ - const unsigned char *fill_offset; - unsigned int fill_count = mbsplit_fill_count[s]; - - fill_offset = &mbsplit_fill_offset[s] - [(unsigned char)j * mbsplit_fill_count[s]]; - - do { - mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int; - fill_offset++; - }while (--fill_count); - } - + if (!(k >> 2)) { + /* On top edge, get from MB above us */ + if (above_mb->mbmi.mode != SPLITMV) { + abovemv.as_int = above_mb->mbmi.mv.as_int; + } else { + abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; + } + } else { + abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; } - while (++j < num_p); - mbmi->partitioning = s; + prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int); + + if (vp8_read(bc, prob[0])) { + if (vp8_read(bc, prob[1])) { + blockmv.as_int = 0; + if (vp8_read(bc, prob[2])) { + blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; + blockmv.as_mv.row += best_mv.as_mv.row; + blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; + blockmv.as_mv.col += best_mv.as_mv.col; + } + } else { + blockmv.as_int = abovemv.as_int; + } + } else { + blockmv.as_int = leftmv.as_int; + } + + mbmi->need_to_clamp_mvs |= + vp8_check_mv_bounds(&blockmv, mb_to_left_edge, mb_to_right_edge, + mb_to_top_edge, mb_to_bottom_edge); + + { + /* Fill (uniform) modes, mvs of jth subset. + Must do it here because ensuing subsets can + refer back to us via "left" or "above". */ + const unsigned char *fill_offset; + unsigned int fill_count = mbsplit_fill_count[s]; + + fill_offset = + &mbsplit_fill_offset[s][(unsigned char)j * mbsplit_fill_count[s]]; + + do { + mi->bmi[*fill_offset].mv.as_int = blockmv.as_int; + fill_offset++; + } while (--fill_count); + } + + } while (++j < num_p); + + mbmi->partitioning = s; } -static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi) -{ - vp8_reader *const bc = & pbi->mbc[8]; - mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra); - if (mbmi->ref_frame) /* inter MB */ - { - enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV}; - int cnt[4]; - int *cntx = cnt; - int_mv near_mvs[4]; - int_mv *nmv = near_mvs; - const int mis = pbi->mb.mode_info_stride; - const MODE_INFO *above = mi - mis; - const MODE_INFO *left = mi - 1; - const MODE_INFO *aboveleft = above - 1; - int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias; +static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, + MB_MODE_INFO *mbmi) { + vp8_reader *const bc = &pbi->mbc[8]; + mbmi->ref_frame = (MV_REFERENCE_FRAME)vp8_read(bc, pbi->prob_intra); + if (mbmi->ref_frame) /* inter MB */ + { + enum { CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV }; + int cnt[4]; + int *cntx = cnt; + int_mv near_mvs[4]; + int_mv *nmv = near_mvs; + const int mis = pbi->mb.mode_info_stride; + const MODE_INFO *above = mi - mis; + const MODE_INFO *left = mi - 1; + const MODE_INFO *aboveleft = above - 1; + int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias; - mbmi->need_to_clamp_mvs = 0; + mbmi->need_to_clamp_mvs = 0; - if (vp8_read(bc, pbi->prob_last)) - { - mbmi->ref_frame = - (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf))); + if (vp8_read(bc, pbi->prob_last)) { + mbmi->ref_frame = + (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf))); + } + + /* Zero accumulators */ + nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0; + cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; + + /* Process above */ + if (above->mbmi.ref_frame != INTRA_FRAME) { + if (above->mbmi.mv.as_int) { + (++nmv)->as_int = above->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], mbmi->ref_frame, + nmv, ref_frame_sign_bias); + ++cntx; + } + + *cntx += 2; + } + + /* Process left */ + if (left->mbmi.ref_frame != INTRA_FRAME) { + if (left->mbmi.mv.as_int) { + int_mv this_mv; + + this_mv.as_int = left->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], mbmi->ref_frame, + &this_mv, ref_frame_sign_bias); + + if (this_mv.as_int != nmv->as_int) { + (++nmv)->as_int = this_mv.as_int; + ++cntx; } - /* Zero accumulators */ - nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0; - cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; + *cntx += 2; + } else { + cnt[CNT_INTRA] += 2; + } + } - /* Process above */ - if (above->mbmi.ref_frame != INTRA_FRAME) - { - if (above->mbmi.mv.as_int) - { - (++nmv)->as_int = above->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], - mbmi->ref_frame, nmv, ref_frame_sign_bias); - ++cntx; - } + /* Process above left */ + if (aboveleft->mbmi.ref_frame != INTRA_FRAME) { + if (aboveleft->mbmi.mv.as_int) { + int_mv this_mv; - *cntx += 2; + this_mv.as_int = aboveleft->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], mbmi->ref_frame, + &this_mv, ref_frame_sign_bias); + + if (this_mv.as_int != nmv->as_int) { + (++nmv)->as_int = this_mv.as_int; + ++cntx; } - /* Process left */ - if (left->mbmi.ref_frame != INTRA_FRAME) - { - if (left->mbmi.mv.as_int) - { - int_mv this_mv; + *cntx += 1; + } else { + cnt[CNT_INTRA] += 1; + } + } - this_mv.as_int = left->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], - mbmi->ref_frame, &this_mv, ref_frame_sign_bias); + if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_INTRA]][0])) { + /* If we have three distinct MV's ... */ + /* See if above-left MV can be merged with NEAREST */ + cnt[CNT_NEAREST] += ((cnt[CNT_SPLITMV] > 0) & + (nmv->as_int == near_mvs[CNT_NEAREST].as_int)); - if (this_mv.as_int != nmv->as_int) - { - (++nmv)->as_int = this_mv.as_int; - ++cntx; - } + /* Swap near and nearest if necessary */ + if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) { + int tmp; + tmp = cnt[CNT_NEAREST]; + cnt[CNT_NEAREST] = cnt[CNT_NEAR]; + cnt[CNT_NEAR] = tmp; + tmp = near_mvs[CNT_NEAREST].as_int; + near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; + near_mvs[CNT_NEAR].as_int = tmp; + } - *cntx += 2; - } - else - cnt[CNT_INTRA] += 2; - } - - /* Process above left */ - if (aboveleft->mbmi.ref_frame != INTRA_FRAME) - { - if (aboveleft->mbmi.mv.as_int) - { - int_mv this_mv; - - this_mv.as_int = aboveleft->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], - mbmi->ref_frame, &this_mv, ref_frame_sign_bias); - - if (this_mv.as_int != nmv->as_int) - { - (++nmv)->as_int = this_mv.as_int; - ++cntx; - } - - *cntx += 1; - } - else - cnt[CNT_INTRA] += 1; - } - - if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_INTRA]] [0]) ) - { - - /* If we have three distinct MV's ... */ - /* See if above-left MV can be merged with NEAREST */ - cnt[CNT_NEAREST] += ( (cnt[CNT_SPLITMV] > 0) & - (nmv->as_int == near_mvs[CNT_NEAREST].as_int)); - - /* Swap near and nearest if necessary */ - if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) - { - int tmp; - tmp = cnt[CNT_NEAREST]; - cnt[CNT_NEAREST] = cnt[CNT_NEAR]; - cnt[CNT_NEAR] = tmp; - tmp = near_mvs[CNT_NEAREST].as_int; - near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; - near_mvs[CNT_NEAR].as_int = tmp; - } - - if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAREST]] [1]) ) - { - - if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAR]] [2]) ) - { - int mb_to_top_edge; - int mb_to_bottom_edge; - int mb_to_left_edge; - int mb_to_right_edge; - MV_CONTEXT *const mvc = pbi->common.fc.mvc; - int near_index; - - mb_to_top_edge = pbi->mb.mb_to_top_edge; - mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; - mb_to_top_edge -= LEFT_TOP_MARGIN; - mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; - mb_to_right_edge = pbi->mb.mb_to_right_edge; - mb_to_right_edge += RIGHT_BOTTOM_MARGIN; - mb_to_left_edge = pbi->mb.mb_to_left_edge; - mb_to_left_edge -= LEFT_TOP_MARGIN; - - /* Use near_mvs[0] to store the "best" MV */ - near_index = CNT_INTRA + - (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]); - - vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb); - - cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV) - + (left->mbmi.mode == SPLITMV)) * 2 - + (aboveleft->mbmi.mode == SPLITMV); - - if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_SPLITMV]] [3]) ) - { - decode_split_mv(bc, mi, left, above, - mbmi, - near_mvs[near_index], - mvc, mb_to_left_edge, - mb_to_right_edge, - mb_to_top_edge, - mb_to_bottom_edge); - mbmi->mv.as_int = mi->bmi[15].mv.as_int; - mbmi->mode = SPLITMV; - mbmi->is_4x4 = 1; - } - else - { - int_mv *const mbmi_mv = & mbmi->mv; - read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *) mvc); - mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row; - mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col; - - /* Don't need to check this on NEARMV and NEARESTMV - * modes since those modes clamp the MV. The NEWMV mode - * does not, so signal to the prediction stage whether - * special handling may be required. - */ - mbmi->need_to_clamp_mvs = - vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, - mb_to_right_edge, - mb_to_top_edge, - mb_to_bottom_edge); - mbmi->mode = NEWMV; - } - } - else - { - mbmi->mode = NEARMV; - mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int; - vp8_clamp_mv2(&mbmi->mv, &pbi->mb); - } - } - else - { - mbmi->mode = NEARESTMV; - mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int; - vp8_clamp_mv2(&mbmi->mv, &pbi->mb); - } - } - else - { - mbmi->mode = ZEROMV; - mbmi->mv.as_int = 0; + if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) { + if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_NEAR]][2])) { + int mb_to_top_edge; + int mb_to_bottom_edge; + int mb_to_left_edge; + int mb_to_right_edge; + MV_CONTEXT *const mvc = pbi->common.fc.mvc; + int near_index; + + mb_to_top_edge = pbi->mb.mb_to_top_edge; + mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; + mb_to_top_edge -= LEFT_TOP_MARGIN; + mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; + mb_to_right_edge = pbi->mb.mb_to_right_edge; + mb_to_right_edge += RIGHT_BOTTOM_MARGIN; + mb_to_left_edge = pbi->mb.mb_to_left_edge; + mb_to_left_edge -= LEFT_TOP_MARGIN; + + /* Use near_mvs[0] to store the "best" MV */ + near_index = CNT_INTRA + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]); + + vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb); + + cnt[CNT_SPLITMV] = + ((above->mbmi.mode == SPLITMV) + (left->mbmi.mode == SPLITMV)) * + 2 + + (aboveleft->mbmi.mode == SPLITMV); + + if (vp8_read(bc, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) { + decode_split_mv(bc, mi, left, above, mbmi, near_mvs[near_index], + mvc, mb_to_left_edge, mb_to_right_edge, + mb_to_top_edge, mb_to_bottom_edge); + mbmi->mv.as_int = mi->bmi[15].mv.as_int; + mbmi->mode = SPLITMV; + mbmi->is_4x4 = 1; + } else { + int_mv *const mbmi_mv = &mbmi->mv; + read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *)mvc); + mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row; + mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col; + + /* Don't need to check this on NEARMV and NEARESTMV + * modes since those modes clamp the MV. The NEWMV mode + * does not, so signal to the prediction stage whether + * special handling may be required. + */ + mbmi->need_to_clamp_mvs = + vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, mb_to_right_edge, + mb_to_top_edge, mb_to_bottom_edge); + mbmi->mode = NEWMV; + } + } else { + mbmi->mode = NEARMV; + mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int; + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); } + } else { + mbmi->mode = NEARESTMV; + mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int; + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); + } + } else { + mbmi->mode = ZEROMV; + mbmi->mv.as_int = 0; + } #if CONFIG_ERROR_CONCEALMENT - if(pbi->ec_enabled && (mbmi->mode != SPLITMV)) - { - mi->bmi[ 0].mv.as_int = - mi->bmi[ 1].mv.as_int = - mi->bmi[ 2].mv.as_int = - mi->bmi[ 3].mv.as_int = - mi->bmi[ 4].mv.as_int = - mi->bmi[ 5].mv.as_int = - mi->bmi[ 6].mv.as_int = - mi->bmi[ 7].mv.as_int = - mi->bmi[ 8].mv.as_int = - mi->bmi[ 9].mv.as_int = - mi->bmi[10].mv.as_int = - mi->bmi[11].mv.as_int = - mi->bmi[12].mv.as_int = - mi->bmi[13].mv.as_int = - mi->bmi[14].mv.as_int = - mi->bmi[15].mv.as_int = mbmi->mv.as_int; - } + if (pbi->ec_enabled && (mbmi->mode != SPLITMV)) { + mi->bmi[0].mv.as_int = mi->bmi[1].mv.as_int = mi->bmi[2].mv.as_int = + mi->bmi[3].mv.as_int = mi->bmi[4].mv.as_int = mi->bmi[5].mv.as_int = + mi->bmi[6].mv.as_int = mi->bmi[7].mv.as_int = + mi->bmi[8].mv.as_int = mi->bmi[9].mv.as_int = + mi->bmi[10].mv.as_int = mi->bmi[11].mv.as_int = + mi->bmi[12].mv.as_int = mi->bmi[13].mv.as_int = + mi->bmi[14].mv.as_int = mi->bmi[15].mv.as_int = + mbmi->mv.as_int; + } #endif - } - else - { - /* required for left and above block mv */ - mbmi->mv.as_int = 0; + } else { + /* required for left and above block mv */ + mbmi->mv.as_int = 0; - /* MB is intra coded */ - if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) - { - int j = 0; - mbmi->is_4x4 = 1; - do - { - mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob); - } - while (++j < 16); - } - - mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob); + /* MB is intra coded */ + if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) { + int j = 0; + mbmi->is_4x4 = 1; + do { + mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob); + } while (++j < 16); } + mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob); + } } -static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) -{ - /* Is segmentation enabled */ - if (x->segmentation_enabled && x->update_mb_segmentation_map) - { - /* If so then read the segment id. */ - if (vp8_read(r, x->mb_segment_tree_probs[0])) - mi->segment_id = - (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2])); - else - mi->segment_id = - (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1])); +static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) { + /* Is segmentation enabled */ + if (x->segmentation_enabled && x->update_mb_segmentation_map) { + /* If so then read the segment id. */ + if (vp8_read(r, x->mb_segment_tree_probs[0])) { + mi->segment_id = + (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2])); + } else { + mi->segment_id = + (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1])); } + } } static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi, - MB_MODE_INFO *mbmi) -{ - (void)mbmi; + MB_MODE_INFO *mbmi) { + (void)mbmi; - /* Read the Macroblock segmentation map if it is being updated explicitly - * this frame (reset to 0 above by default) - * By default on a key frame reset all MBs to segment 0 - */ - if (pbi->mb.update_mb_segmentation_map) - read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb); - else if(pbi->common.frame_type == KEY_FRAME) - mi->mbmi.segment_id = 0; + /* Read the Macroblock segmentation map if it is being updated explicitly + * this frame (reset to 0 above by default) + * By default on a key frame reset all MBs to segment 0 + */ + if (pbi->mb.update_mb_segmentation_map) { + read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb); + } else if (pbi->common.frame_type == KEY_FRAME) { + mi->mbmi.segment_id = 0; + } - /* Read the macroblock coeff skip flag if this feature is in use, - * else default to 0 */ - if (pbi->common.mb_no_coeff_skip) - mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false); - else - mi->mbmi.mb_skip_coeff = 0; - - mi->mbmi.is_4x4 = 0; - if(pbi->common.frame_type == KEY_FRAME) - read_kf_modes(pbi, mi); - else - read_mb_modes_mv(pbi, mi, &mi->mbmi); + /* Read the macroblock coeff skip flag if this feature is in use, + * else default to 0 */ + if (pbi->common.mb_no_coeff_skip) { + mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false); + } else { + mi->mbmi.mb_skip_coeff = 0; + } + mi->mbmi.is_4x4 = 0; + if (pbi->common.frame_type == KEY_FRAME) { + read_kf_modes(pbi, mi); + } else { + read_mb_modes_mv(pbi, mi, &mi->mbmi); + } } -void vp8_decode_mode_mvs(VP8D_COMP *pbi) -{ - MODE_INFO *mi = pbi->common.mi; - int mb_row = -1; - int mb_to_right_edge_start; +void vp8_decode_mode_mvs(VP8D_COMP *pbi) { + MODE_INFO *mi = pbi->common.mi; + int mb_row = -1; + int mb_to_right_edge_start; - mb_mode_mv_init(pbi); + mb_mode_mv_init(pbi); - pbi->mb.mb_to_top_edge = 0; - pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3; - mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; + pbi->mb.mb_to_top_edge = 0; + pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3; + mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; - while (++mb_row < pbi->common.mb_rows) - { - int mb_col = -1; + while (++mb_row < pbi->common.mb_rows) { + int mb_col = -1; - pbi->mb.mb_to_left_edge = 0; - pbi->mb.mb_to_right_edge = mb_to_right_edge_start; + pbi->mb.mb_to_left_edge = 0; + pbi->mb.mb_to_right_edge = mb_to_right_edge_start; - while (++mb_col < pbi->common.mb_cols) - { + while (++mb_col < pbi->common.mb_cols) { #if CONFIG_ERROR_CONCEALMENT - int mb_num = mb_row * pbi->common.mb_cols + mb_col; + int mb_num = mb_row * pbi->common.mb_cols + mb_col; #endif - decode_mb_mode_mvs(pbi, mi, &mi->mbmi); + decode_mb_mode_mvs(pbi, mi, &mi->mbmi); #if CONFIG_ERROR_CONCEALMENT - /* look for corruption. set mvs_corrupt_from_mb to the current - * mb_num if the frame is corrupt from this macroblock. */ - if (vp8dx_bool_error(&pbi->mbc[8]) && mb_num < - (int)pbi->mvs_corrupt_from_mb) - { - pbi->mvs_corrupt_from_mb = mb_num; - /* no need to continue since the partition is corrupt from - * here on. - */ - return; - } + /* look for corruption. set mvs_corrupt_from_mb to the current + * mb_num if the frame is corrupt from this macroblock. */ + if (vp8dx_bool_error(&pbi->mbc[8]) && + mb_num < (int)pbi->mvs_corrupt_from_mb) { + pbi->mvs_corrupt_from_mb = mb_num; + /* no need to continue since the partition is corrupt from + * here on. + */ + return; + } #endif - pbi->mb.mb_to_left_edge -= (16 << 3); - pbi->mb.mb_to_right_edge -= (16 << 3); - mi++; /* next macroblock */ - } - pbi->mb.mb_to_top_edge -= (16 << 3); - pbi->mb.mb_to_bottom_edge -= (16 << 3); - - mi++; /* skip left predictor each row */ + pbi->mb.mb_to_left_edge -= (16 << 3); + pbi->mb.mb_to_right_edge -= (16 << 3); + mi++; /* next macroblock */ } + pbi->mb.mb_to_top_edge -= (16 << 3); + pbi->mb.mb_to_bottom_edge -= (16 << 3); + + mi++; /* skip left predictor each row */ + } } diff --git a/libs/libvpx/vp8/decoder/detokenize.c b/libs/libvpx/vp8/decoder/detokenize.c index fcc7533c50..b350bafbc5 100644 --- a/libs/libvpx/vp8/decoder/detokenize.c +++ b/libs/libvpx/vp8/decoder/detokenize.c @@ -8,26 +8,23 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/blockd.h" #include "onyxd_int.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" #include "detokenize.h" -void vp8_reset_mb_tokens_context(MACROBLOCKD *x) -{ - ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); - ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); +void vp8_reset_mb_tokens_context(MACROBLOCKD *x) { + ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); + ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); - memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); - memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); + memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); - /* Clear entropy contexts for Y2 blocks */ - if (!x->mode_info_context->mbmi.is_4x4) - { - a_ctx[8] = l_ctx[8] = 0; - } + /* Clear entropy contexts for Y2 blocks */ + if (!x->mode_info_context->mbmi.is_4x4) { + a_ctx[8] = l_ctx[8] = 0; + } } /* @@ -35,211 +32,175 @@ void vp8_reset_mb_tokens_context(MACROBLOCKD *x) Residual decoding (Paragraph 13.2 / 13.3) */ static const uint8_t kBands[16 + 1] = { - 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, - 0 /* extra entry as sentinel */ + 0, 1, 2, 3, 6, 4, 5, 6, 6, + 6, 6, 6, 6, 6, 6, 7, 0 /* extra entry as sentinel */ }; static const uint8_t kCat3[] = { 173, 148, 140, 0 }; static const uint8_t kCat4[] = { 176, 155, 140, 135, 0 }; static const uint8_t kCat5[] = { 180, 157, 141, 134, 130, 0 }; -static const uint8_t kCat6[] = - { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 }; -static const uint8_t* const kCat3456[] = { kCat3, kCat4, kCat5, kCat6 }; -static const uint8_t kZigzag[16] = { - 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 -}; +static const uint8_t kCat6[] = { 254, 254, 243, 230, 196, 177, + 153, 140, 133, 130, 129, 0 }; +static const uint8_t *const kCat3456[] = { kCat3, kCat4, kCat5, kCat6 }; +static const uint8_t kZigzag[16] = { 0, 1, 4, 8, 5, 2, 3, 6, + 9, 12, 13, 10, 7, 11, 14, 15 }; #define VP8GetBit vp8dx_decode_bool -#define NUM_PROBAS 11 -#define NUM_CTX 3 +#define NUM_PROBAS 11 +#define NUM_CTX 3 /* for const-casting */ typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS]; -static int GetSigned(BOOL_DECODER *br, int value_to_sign) -{ - int split = (br->range + 1) >> 1; - VP8_BD_VALUE bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); - int v; +static int GetSigned(BOOL_DECODER *br, int value_to_sign) { + int split = (br->range + 1) >> 1; + VP8_BD_VALUE bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); + int v; - if(br->count < 0) - vp8dx_bool_decoder_fill(br); + if (br->count < 0) vp8dx_bool_decoder_fill(br); - if ( br->value < bigsplit ) - { - br->range = split; - v= value_to_sign; - } - else - { - br->range = br->range-split; - br->value = br->value-bigsplit; - v = -value_to_sign; - } - br->range +=br->range; - br->value +=br->value; - br->count--; + if (br->value < bigsplit) { + br->range = split; + v = value_to_sign; + } else { + br->range = br->range - split; + br->value = br->value - bigsplit; + v = -value_to_sign; + } + br->range += br->range; + br->value += br->value; + br->count--; - return v; + return v; } /* Returns the position of the last non-zero coeff plus one (and 0 if there's no coeff at all) */ -static int GetCoeffs(BOOL_DECODER *br, ProbaArray prob, - int ctx, int n, int16_t* out) -{ - const uint8_t* p = prob[n][ctx]; - if (!VP8GetBit(br, p[0])) - { /* first EOB is more a 'CBP' bit. */ - return 0; - } - while (1) - { - ++n; - if (!VP8GetBit(br, p[1])) - { - p = prob[kBands[n]][0]; +static int GetCoeffs(BOOL_DECODER *br, ProbaArray prob, int ctx, int n, + int16_t *out) { + const uint8_t *p = prob[n][ctx]; + if (!VP8GetBit(br, p[0])) { /* first EOB is more a 'CBP' bit. */ + return 0; + } + while (1) { + ++n; + if (!VP8GetBit(br, p[1])) { + p = prob[kBands[n]][0]; + } else { /* non zero coeff */ + int v, j; + if (!VP8GetBit(br, p[2])) { + p = prob[kBands[n]][1]; + v = 1; + } else { + if (!VP8GetBit(br, p[3])) { + if (!VP8GetBit(br, p[4])) { + v = 2; + } else { + v = 3 + VP8GetBit(br, p[5]); + } + } else { + if (!VP8GetBit(br, p[6])) { + if (!VP8GetBit(br, p[7])) { + v = 5 + VP8GetBit(br, 159); + } else { + v = 7 + 2 * VP8GetBit(br, 165); + v += VP8GetBit(br, 145); + } + } else { + const uint8_t *tab; + const int bit1 = VP8GetBit(br, p[8]); + const int bit0 = VP8GetBit(br, p[9 + bit1]); + const int cat = 2 * bit1 + bit0; + v = 0; + for (tab = kCat3456[cat]; *tab; ++tab) { + v += v + VP8GetBit(br, *tab); + } + v += 3 + (8 << cat); + } } - else - { /* non zero coeff */ - int v, j; - if (!VP8GetBit(br, p[2])) - { - p = prob[kBands[n]][1]; - v = 1; - } - else - { - if (!VP8GetBit(br, p[3])) - { - if (!VP8GetBit(br, p[4])) - { - v = 2; - } - else - { - v = 3 + VP8GetBit(br, p[5]); - } - } - else - { - if (!VP8GetBit(br, p[6])) - { - if (!VP8GetBit(br, p[7])) - { - v = 5 + VP8GetBit(br, 159); - } else - { - v = 7 + 2 * VP8GetBit(br, 165); - v += VP8GetBit(br, 145); - } - } - else - { - const uint8_t* tab; - const int bit1 = VP8GetBit(br, p[8]); - const int bit0 = VP8GetBit(br, p[9 + bit1]); - const int cat = 2 * bit1 + bit0; - v = 0; - for (tab = kCat3456[cat]; *tab; ++tab) - { - v += v + VP8GetBit(br, *tab); - } - v += 3 + (8 << cat); - } - } - p = prob[kBands[n]][2]; - } - j = kZigzag[n - 1]; + p = prob[kBands[n]][2]; + } + j = kZigzag[n - 1]; - out[j] = GetSigned(br, v); + out[j] = GetSigned(br, v); - if (n == 16 || !VP8GetBit(br, p[0])) - { /* EOB */ - return n; - } - } - if (n == 16) - { - return 16; - } + if (n == 16 || !VP8GetBit(br, p[0])) { /* EOB */ + return n; + } } + if (n == 16) { + return 16; + } + } } -int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x) -{ - BOOL_DECODER *bc = x->current_bc; - const FRAME_CONTEXT * const fc = &dx->common.fc; - char *eobs = x->eobs; +int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x) { + BOOL_DECODER *bc = x->current_bc; + const FRAME_CONTEXT *const fc = &dx->common.fc; + char *eobs = x->eobs; - int i; - int nonzeros; - int eobtotal = 0; + int i; + int nonzeros; + int eobtotal = 0; - short *qcoeff_ptr; - ProbaArray coef_probs; - ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); - ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); - ENTROPY_CONTEXT *a; - ENTROPY_CONTEXT *l; - int skip_dc = 0; + short *qcoeff_ptr; + ProbaArray coef_probs; + ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); + ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); + ENTROPY_CONTEXT *a; + ENTROPY_CONTEXT *l; + int skip_dc = 0; - qcoeff_ptr = &x->qcoeff[0]; + qcoeff_ptr = &x->qcoeff[0]; - if (!x->mode_info_context->mbmi.is_4x4) - { - a = a_ctx + 8; - l = l_ctx + 8; + if (!x->mode_info_context->mbmi.is_4x4) { + a = a_ctx + 8; + l = l_ctx + 8; - coef_probs = fc->coef_probs [1]; + coef_probs = fc->coef_probs[1]; - nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr + 24 * 16); - *a = *l = (nonzeros > 0); + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr + 24 * 16); + *a = *l = (nonzeros > 0); - eobs[24] = nonzeros; - eobtotal += nonzeros - 16; + eobs[24] = nonzeros; + eobtotal += nonzeros - 16; - coef_probs = fc->coef_probs [0]; - skip_dc = 1; - } - else - { - coef_probs = fc->coef_probs [3]; - skip_dc = 0; - } + coef_probs = fc->coef_probs[0]; + skip_dc = 1; + } else { + coef_probs = fc->coef_probs[3]; + skip_dc = 0; + } - for (i = 0; i < 16; ++i) - { - a = a_ctx + (i&3); - l = l_ctx + ((i&0xc)>>2); + for (i = 0; i < 16; ++i) { + a = a_ctx + (i & 3); + l = l_ctx + ((i & 0xc) >> 2); - nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), skip_dc, qcoeff_ptr); - *a = *l = (nonzeros > 0); + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), skip_dc, qcoeff_ptr); + *a = *l = (nonzeros > 0); - nonzeros += skip_dc; - eobs[i] = nonzeros; - eobtotal += nonzeros; - qcoeff_ptr += 16; - } + nonzeros += skip_dc; + eobs[i] = nonzeros; + eobtotal += nonzeros; + qcoeff_ptr += 16; + } - coef_probs = fc->coef_probs [2]; + coef_probs = fc->coef_probs[2]; - a_ctx += 4; - l_ctx += 4; - for (i = 16; i < 24; ++i) - { - a = a_ctx + ((i > 19)<<1) + (i&1); - l = l_ctx + ((i > 19)<<1) + ((i&3)>1); + a_ctx += 4; + l_ctx += 4; + for (i = 16; i < 24; ++i) { + a = a_ctx + ((i > 19) << 1) + (i & 1); + l = l_ctx + ((i > 19) << 1) + ((i & 3) > 1); - nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr); - *a = *l = (nonzeros > 0); + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr); + *a = *l = (nonzeros > 0); - eobs[i] = nonzeros; - eobtotal += nonzeros; - qcoeff_ptr += 16; - } + eobs[i] = nonzeros; + eobtotal += nonzeros; + qcoeff_ptr += 16; + } - return eobtotal; + return eobtotal; } - diff --git a/libs/libvpx/vp8/decoder/ec_types.h b/libs/libvpx/vp8/decoder/ec_types.h index 3af5ca86b4..0ab08b649a 100644 --- a/libs/libvpx/vp8/decoder/ec_types.h +++ b/libs/libvpx/vp8/decoder/ec_types.h @@ -17,38 +17,31 @@ extern "C" { #define MAX_OVERLAPS 16 - /* The area (pixel area in Q6) the block pointed to by bmi overlaps * another block with. */ -typedef struct -{ - int overlap; - union b_mode_info *bmi; +typedef struct { + int overlap; + union b_mode_info *bmi; } OVERLAP_NODE; /* Structure to keep track of overlapping blocks on a block level. */ -typedef struct -{ - /* TODO(holmer): This array should be exchanged for a linked list */ - OVERLAP_NODE overlaps[MAX_OVERLAPS]; +typedef struct { + /* TODO(holmer): This array should be exchanged for a linked list */ + OVERLAP_NODE overlaps[MAX_OVERLAPS]; } B_OVERLAP; /* Structure used to hold all the overlaps of a macroblock. The overlaps of a * macroblock is further divided into block overlaps. */ -typedef struct -{ - B_OVERLAP overlaps[16]; -} MB_OVERLAP; +typedef struct { B_OVERLAP overlaps[16]; } MB_OVERLAP; /* Structure for keeping track of motion vectors and which reference frame they * refer to. Used for motion vector interpolation. */ -typedef struct -{ - MV mv; - MV_REFERENCE_FRAME ref_frame; +typedef struct { + MV mv; + MV_REFERENCE_FRAME ref_frame; } EC_BLOCK; #ifdef __cplusplus diff --git a/libs/libvpx/vp8/decoder/error_concealment.c b/libs/libvpx/vp8/decoder/error_concealment.c index 0b846a08b4..e22141492c 100644 --- a/libs/libvpx/vp8/decoder/error_concealment.c +++ b/libs/libvpx/vp8/decoder/error_concealment.c @@ -18,14 +18,13 @@ #include "vp8/common/common.h" #include "vpx_dsp/vpx_dsp_common.h" -#define FLOOR(x,q) ((x) & -(1 << (q))) +#define FLOOR(x, q) ((x) & -(1 << (q))) #define NUM_NEIGHBORS 20 -typedef struct ec_position -{ - int row; - int col; +typedef struct ec_position { + int row; + int col; } EC_POS; /* @@ -35,56 +34,45 @@ typedef struct ec_position * W = round((1./(sqrt(x.^2 + y.^2))*2^7)); * W(1,1) = 0; */ -static const int weights_q7[5][5] = { - { 0, 128, 64, 43, 32 }, - {128, 91, 57, 40, 31 }, - { 64, 57, 45, 36, 29 }, - { 43, 40, 36, 30, 26 }, - { 32, 31, 29, 26, 23 } -}; +static const int weights_q7[5][5] = { { 0, 128, 64, 43, 32 }, + { 128, 91, 57, 40, 31 }, + { 64, 57, 45, 36, 29 }, + { 43, 40, 36, 30, 26 }, + { 32, 31, 29, 26, 23 } }; -int vp8_alloc_overlap_lists(VP8D_COMP *pbi) -{ - if (pbi->overlaps != NULL) - { - vpx_free(pbi->overlaps); - pbi->overlaps = NULL; - } - - pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols, - sizeof(MB_OVERLAP)); - - if (pbi->overlaps == NULL) - return -1; - - return 0; -} - -void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi) -{ +int vp8_alloc_overlap_lists(VP8D_COMP *pbi) { + if (pbi->overlaps != NULL) { vpx_free(pbi->overlaps); pbi->overlaps = NULL; + } + + pbi->overlaps = + vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols, sizeof(MB_OVERLAP)); + + if (pbi->overlaps == NULL) return -1; + + return 0; +} + +void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi) { + vpx_free(pbi->overlaps); + pbi->overlaps = NULL; } /* Inserts a new overlap area value to the list of overlaps of a block */ -static void assign_overlap(OVERLAP_NODE* overlaps, - union b_mode_info *bmi, - int overlap) -{ - int i; - if (overlap <= 0) - return; - /* Find and assign to the next empty overlap node in the list of overlaps. - * Empty is defined as bmi == NULL */ - for (i = 0; i < MAX_OVERLAPS; i++) - { - if (overlaps[i].bmi == NULL) - { - overlaps[i].bmi = bmi; - overlaps[i].overlap = overlap; - break; - } +static void assign_overlap(OVERLAP_NODE *overlaps, union b_mode_info *bmi, + int overlap) { + int i; + if (overlap <= 0) return; + /* Find and assign to the next empty overlap node in the list of overlaps. + * Empty is defined as bmi == NULL */ + for (i = 0; i < MAX_OVERLAPS; ++i) { + if (overlaps[i].bmi == NULL) { + overlaps[i].bmi = bmi; + overlaps[i].overlap = overlap; + break; } + } } /* Calculates the overlap area between two 4x4 squares, where the first @@ -92,16 +80,16 @@ static void assign_overlap(OVERLAP_NODE* overlaps, * square has its upper-left corner at (b2_row, b2_col). Doesn't * properly handle squares which do not overlap. */ -static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col) -{ - const int int_top = VPXMAX(b1_row, b2_row); // top - const int int_left = VPXMAX(b1_col, b2_col); // left - /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge - * gives us the right/bottom edge. - */ - const int int_right = VPXMIN(b1_col + (4<<3), b2_col + (4<<3)); // right - const int int_bottom = VPXMIN(b1_row + (4<<3), b2_row + (4<<3)); // bottom - return (int_bottom - int_top) * (int_right - int_left); +static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col) { + const int int_top = VPXMAX(b1_row, b2_row); // top + const int int_left = VPXMAX(b1_col, b2_col); // left + /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge + * gives us the right/bottom edge. + */ + const int int_right = VPXMIN(b1_col + (4 << 3), b2_col + (4 << 3)); // right + const int int_bottom = + VPXMIN(b1_row + (4 << 3), b2_row + (4 << 3)); // bottom + return (int_bottom - int_top) * (int_right - int_left); } /* Calculates the overlap area for all blocks in a macroblock at position @@ -111,314 +99,254 @@ static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col) * first_blk_col) in blocks relative the upper-left corner of the image. */ static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, union b_mode_info *bmi, - int new_row, int new_col, - int mb_row, int mb_col, - int first_blk_row, int first_blk_col) -{ - /* Find the blocks within this MB (defined by mb_row, mb_col) which are - * overlapped by bmi and calculate and assign overlap for each of those - * blocks. */ + int new_row, int new_col, int mb_row, + int mb_col, int first_blk_row, + int first_blk_col) { + /* Find the blocks within this MB (defined by mb_row, mb_col) which are + * overlapped by bmi and calculate and assign overlap for each of those + * blocks. */ - /* Block coordinates relative the upper-left block */ - const int rel_ol_blk_row = first_blk_row - mb_row * 4; - const int rel_ol_blk_col = first_blk_col - mb_col * 4; - /* If the block partly overlaps any previous MB, these coordinates - * can be < 0. We don't want to access blocks in previous MBs. - */ - const int blk_idx = VPXMAX(rel_ol_blk_row,0) * 4 + VPXMAX(rel_ol_blk_col,0); - /* Upper left overlapping block */ - B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]); + /* Block coordinates relative the upper-left block */ + const int rel_ol_blk_row = first_blk_row - mb_row * 4; + const int rel_ol_blk_col = first_blk_col - mb_col * 4; + /* If the block partly overlaps any previous MB, these coordinates + * can be < 0. We don't want to access blocks in previous MBs. + */ + const int blk_idx = VPXMAX(rel_ol_blk_row, 0) * 4 + VPXMAX(rel_ol_blk_col, 0); + /* Upper left overlapping block */ + B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]); - /* Calculate and assign overlaps for all blocks in this MB - * which the motion compensated block overlaps - */ - /* Avoid calculating overlaps for blocks in later MBs */ - int end_row = VPXMIN(4 + mb_row * 4 - first_blk_row, 2); - int end_col = VPXMIN(4 + mb_col * 4 - first_blk_col, 2); - int row, col; + /* Calculate and assign overlaps for all blocks in this MB + * which the motion compensated block overlaps + */ + /* Avoid calculating overlaps for blocks in later MBs */ + int end_row = VPXMIN(4 + mb_row * 4 - first_blk_row, 2); + int end_col = VPXMIN(4 + mb_col * 4 - first_blk_col, 2); + int row, col; - /* Check if new_row and new_col are evenly divisible by 4 (Q3), - * and if so we shouldn't check neighboring blocks - */ - if (new_row >= 0 && (new_row & 0x1F) == 0) - end_row = 1; - if (new_col >= 0 && (new_col & 0x1F) == 0) - end_col = 1; + /* Check if new_row and new_col are evenly divisible by 4 (Q3), + * and if so we shouldn't check neighboring blocks + */ + if (new_row >= 0 && (new_row & 0x1F) == 0) end_row = 1; + if (new_col >= 0 && (new_col & 0x1F) == 0) end_col = 1; - /* Check if the overlapping block partly overlaps a previous MB - * and if so, we're overlapping fewer blocks in this MB. - */ - if (new_row < (mb_row*16)<<3) - end_row = 1; - if (new_col < (mb_col*16)<<3) - end_col = 1; + /* Check if the overlapping block partly overlaps a previous MB + * and if so, we're overlapping fewer blocks in this MB. + */ + if (new_row < (mb_row * 16) << 3) end_row = 1; + if (new_col < (mb_col * 16) << 3) end_col = 1; - for (row = 0; row < end_row; ++row) - { - for (col = 0; col < end_col; ++col) - { - /* input in Q3, result in Q6 */ - const int overlap = block_overlap(new_row, new_col, - (((first_blk_row + row) * - 4) << 3), - (((first_blk_col + col) * - 4) << 3)); - assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap); - } + for (row = 0; row < end_row; ++row) { + for (col = 0; col < end_col; ++col) { + /* input in Q3, result in Q6 */ + const int overlap = + block_overlap(new_row, new_col, (((first_blk_row + row) * 4) << 3), + (((first_blk_col + col) * 4) << 3)); + assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap); } + } } -void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul, - int mb_rows, int mb_cols, - union b_mode_info *bmi, - int b_row, int b_col) -{ - MB_OVERLAP *mb_overlap; - int row, col, rel_row, rel_col; - int new_row, new_col; - int end_row, end_col; - int overlap_b_row, overlap_b_col; - int overlap_mb_row, overlap_mb_col; +void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul, int mb_rows, int mb_cols, + union b_mode_info *bmi, int b_row, int b_col) { + MB_OVERLAP *mb_overlap; + int row, col, rel_row, rel_col; + int new_row, new_col; + int end_row, end_col; + int overlap_b_row, overlap_b_col; + int overlap_mb_row, overlap_mb_col; - /* mb subpixel position */ - row = (4 * b_row) << 3; /* Q3 */ - col = (4 * b_col) << 3; /* Q3 */ + /* mb subpixel position */ + row = (4 * b_row) << 3; /* Q3 */ + col = (4 * b_col) << 3; /* Q3 */ - /* reverse compensate for motion */ - new_row = row - bmi->mv.as_mv.row; - new_col = col - bmi->mv.as_mv.col; + /* reverse compensate for motion */ + new_row = row - bmi->mv.as_mv.row; + new_col = col - bmi->mv.as_mv.col; - if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3)) - { - /* the new block ended up outside the frame */ - return; - } - - if (new_row <= (-4 << 3) || new_col <= (-4 << 3)) - { - /* outside the frame */ - return; - } - /* overlapping block's position in blocks */ - overlap_b_row = FLOOR(new_row / 4, 3) >> 3; - overlap_b_col = FLOOR(new_col / 4, 3) >> 3; - - /* overlapping block's MB position in MBs - * operations are done in Q3 - */ - overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3; - overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3; - - end_row = VPXMIN(mb_rows - overlap_mb_row, 2); - end_col = VPXMIN(mb_cols - overlap_mb_col, 2); - - /* Don't calculate overlap for MBs we don't overlap */ - /* Check if the new block row starts at the last block row of the MB */ - if (abs(new_row - ((16*overlap_mb_row) << 3)) < ((3*4) << 3)) - end_row = 1; - /* Check if the new block col starts at the last block col of the MB */ - if (abs(new_col - ((16*overlap_mb_col) << 3)) < ((3*4) << 3)) - end_col = 1; - - /* find the MB(s) this block is overlapping */ - for (rel_row = 0; rel_row < end_row; ++rel_row) - { - for (rel_col = 0; rel_col < end_col; ++rel_col) - { - if (overlap_mb_row + rel_row < 0 || - overlap_mb_col + rel_col < 0) - continue; - mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols + - overlap_mb_col + rel_col; - - calculate_overlaps_mb(mb_overlap->overlaps, bmi, - new_row, new_col, - overlap_mb_row + rel_row, - overlap_mb_col + rel_col, - overlap_b_row + rel_row, - overlap_b_col + rel_col); - } + if (new_row >= ((16 * mb_rows) << 3) || new_col >= ((16 * mb_cols) << 3)) { + /* the new block ended up outside the frame */ + return; + } + + if (new_row <= -32 || new_col <= -32) { + /* outside the frame */ + return; + } + /* overlapping block's position in blocks */ + overlap_b_row = FLOOR(new_row / 4, 3) >> 3; + overlap_b_col = FLOOR(new_col / 4, 3) >> 3; + + /* overlapping block's MB position in MBs + * operations are done in Q3 + */ + overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3; + overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3; + + end_row = VPXMIN(mb_rows - overlap_mb_row, 2); + end_col = VPXMIN(mb_cols - overlap_mb_col, 2); + + /* Don't calculate overlap for MBs we don't overlap */ + /* Check if the new block row starts at the last block row of the MB */ + if (abs(new_row - ((16 * overlap_mb_row) << 3)) < ((3 * 4) << 3)) end_row = 1; + /* Check if the new block col starts at the last block col of the MB */ + if (abs(new_col - ((16 * overlap_mb_col) << 3)) < ((3 * 4) << 3)) end_col = 1; + + /* find the MB(s) this block is overlapping */ + for (rel_row = 0; rel_row < end_row; ++rel_row) { + for (rel_col = 0; rel_col < end_col; ++rel_col) { + if (overlap_mb_row + rel_row < 0 || overlap_mb_col + rel_col < 0) + continue; + mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols + + overlap_mb_col + rel_col; + + calculate_overlaps_mb(mb_overlap->overlaps, bmi, new_row, new_col, + overlap_mb_row + rel_row, overlap_mb_col + rel_col, + overlap_b_row + rel_row, overlap_b_col + rel_col); } + } } /* Estimates a motion vector given the overlapping blocks' motion vectors. * Filters out all overlapping blocks which do not refer to the correct * reference frame type. */ -static void estimate_mv(const OVERLAP_NODE *overlaps, union b_mode_info *bmi) -{ - int i; - int overlap_sum = 0; - int row_acc = 0; - int col_acc = 0; +static void estimate_mv(const OVERLAP_NODE *overlaps, union b_mode_info *bmi) { + int i; + int overlap_sum = 0; + int row_acc = 0; + int col_acc = 0; - bmi->mv.as_int = 0; - for (i=0; i < MAX_OVERLAPS; ++i) - { - if (overlaps[i].bmi == NULL) - break; - col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col; - row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row; - overlap_sum += overlaps[i].overlap; - } - if (overlap_sum > 0) - { - /* Q9 / Q6 = Q3 */ - bmi->mv.as_mv.col = col_acc / overlap_sum; - bmi->mv.as_mv.row = row_acc / overlap_sum; - } - else - { - bmi->mv.as_mv.col = 0; - bmi->mv.as_mv.row = 0; - } + bmi->mv.as_int = 0; + for (i = 0; i < MAX_OVERLAPS; ++i) { + if (overlaps[i].bmi == NULL) break; + col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col; + row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row; + overlap_sum += overlaps[i].overlap; + } + if (overlap_sum > 0) { + /* Q9 / Q6 = Q3 */ + bmi->mv.as_mv.col = col_acc / overlap_sum; + bmi->mv.as_mv.row = row_acc / overlap_sum; + } else { + bmi->mv.as_mv.col = 0; + bmi->mv.as_mv.row = 0; + } } /* Estimates all motion vectors for a macroblock given the lists of * overlaps for each block. Decides whether or not the MVs must be clamped. */ -static void estimate_mb_mvs(const B_OVERLAP *block_overlaps, - MODE_INFO *mi, - int mb_to_left_edge, - int mb_to_right_edge, - int mb_to_top_edge, - int mb_to_bottom_edge) -{ - int row, col; - int non_zero_count = 0; - MV * const filtered_mv = &(mi->mbmi.mv.as_mv); - union b_mode_info * const bmi = mi->bmi; - filtered_mv->col = 0; - filtered_mv->row = 0; - mi->mbmi.need_to_clamp_mvs = 0; - for (row = 0; row < 4; ++row) - { - int this_b_to_top_edge = mb_to_top_edge + ((row*4)<<3); - int this_b_to_bottom_edge = mb_to_bottom_edge - ((row*4)<<3); - for (col = 0; col < 4; ++col) - { - int i = row * 4 + col; - int this_b_to_left_edge = mb_to_left_edge + ((col*4)<<3); - int this_b_to_right_edge = mb_to_right_edge - ((col*4)<<3); - /* Estimate vectors for all blocks which are overlapped by this */ - /* type. Interpolate/extrapolate the rest of the block's MVs */ - estimate_mv(block_overlaps[i].overlaps, &(bmi[i])); - mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds( - &bmi[i].mv, - this_b_to_left_edge, - this_b_to_right_edge, - this_b_to_top_edge, - this_b_to_bottom_edge); - if (bmi[i].mv.as_int != 0) - { - ++non_zero_count; - filtered_mv->col += bmi[i].mv.as_mv.col; - filtered_mv->row += bmi[i].mv.as_mv.row; - } - } - } - if (non_zero_count > 0) - { - filtered_mv->col /= non_zero_count; - filtered_mv->row /= non_zero_count; +static void estimate_mb_mvs(const B_OVERLAP *block_overlaps, MODE_INFO *mi, + int mb_to_left_edge, int mb_to_right_edge, + int mb_to_top_edge, int mb_to_bottom_edge) { + int row, col; + int non_zero_count = 0; + MV *const filtered_mv = &(mi->mbmi.mv.as_mv); + union b_mode_info *const bmi = mi->bmi; + filtered_mv->col = 0; + filtered_mv->row = 0; + mi->mbmi.need_to_clamp_mvs = 0; + for (row = 0; row < 4; ++row) { + int this_b_to_top_edge = mb_to_top_edge + ((row * 4) << 3); + int this_b_to_bottom_edge = mb_to_bottom_edge - ((row * 4) << 3); + for (col = 0; col < 4; ++col) { + int i = row * 4 + col; + int this_b_to_left_edge = mb_to_left_edge + ((col * 4) << 3); + int this_b_to_right_edge = mb_to_right_edge - ((col * 4) << 3); + /* Estimate vectors for all blocks which are overlapped by this */ + /* type. Interpolate/extrapolate the rest of the block's MVs */ + estimate_mv(block_overlaps[i].overlaps, &(bmi[i])); + mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds( + &bmi[i].mv, this_b_to_left_edge, this_b_to_right_edge, + this_b_to_top_edge, this_b_to_bottom_edge); + if (bmi[i].mv.as_int != 0) { + ++non_zero_count; + filtered_mv->col += bmi[i].mv.as_mv.col; + filtered_mv->row += bmi[i].mv.as_mv.row; + } } + } + if (non_zero_count > 0) { + filtered_mv->col /= non_zero_count; + filtered_mv->row /= non_zero_count; + } } static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi, - int mb_row, int mb_col, - int mb_rows, int mb_cols) -{ - int sub_row; - int sub_col; - for (sub_row = 0; sub_row < 4; ++sub_row) - { - for (sub_col = 0; sub_col < 4; ++sub_col) - { - vp8_calculate_overlaps( - overlaps, mb_rows, mb_cols, - &(prev_mi->bmi[sub_row * 4 + sub_col]), - 4 * mb_row + sub_row, - 4 * mb_col + sub_col); - } + int mb_row, int mb_col, int mb_rows, + int mb_cols) { + int sub_row; + int sub_col; + for (sub_row = 0; sub_row < 4; ++sub_row) { + for (sub_col = 0; sub_col < 4; ++sub_col) { + vp8_calculate_overlaps(overlaps, mb_rows, mb_cols, + &(prev_mi->bmi[sub_row * 4 + sub_col]), + 4 * mb_row + sub_row, 4 * mb_col + sub_col); } + } } /* Estimate all missing motion vectors. This function does the same as the one * above, but has different input arguments. */ -static void estimate_missing_mvs(MB_OVERLAP *overlaps, - MODE_INFO *mi, MODE_INFO *prev_mi, - int mb_rows, int mb_cols, - unsigned int first_corrupt) -{ - int mb_row, mb_col; - memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols); - /* First calculate the overlaps for all blocks */ - for (mb_row = 0; mb_row < mb_rows; ++mb_row) - { - for (mb_col = 0; mb_col < mb_cols; ++mb_col) - { - /* We're only able to use blocks referring to the last frame - * when extrapolating new vectors. - */ - if (prev_mi->mbmi.ref_frame == LAST_FRAME) - { - calc_prev_mb_overlaps(overlaps, prev_mi, - mb_row, mb_col, - mb_rows, mb_cols); - } - ++prev_mi; - } - ++prev_mi; +static void estimate_missing_mvs(MB_OVERLAP *overlaps, MODE_INFO *mi, + MODE_INFO *prev_mi, int mb_rows, int mb_cols, + unsigned int first_corrupt) { + int mb_row, mb_col; + memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols); + /* First calculate the overlaps for all blocks */ + for (mb_row = 0; mb_row < mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < mb_cols; ++mb_col) { + /* We're only able to use blocks referring to the last frame + * when extrapolating new vectors. + */ + if (prev_mi->mbmi.ref_frame == LAST_FRAME) { + calc_prev_mb_overlaps(overlaps, prev_mi, mb_row, mb_col, mb_rows, + mb_cols); + } + ++prev_mi; } + ++prev_mi; + } - mb_row = first_corrupt / mb_cols; - mb_col = first_corrupt - mb_row * mb_cols; - mi += mb_row*(mb_cols + 1) + mb_col; - /* Go through all macroblocks in the current image with missing MVs - * and calculate new MVs using the overlaps. - */ - for (; mb_row < mb_rows; ++mb_row) - { - int mb_to_top_edge = -((mb_row * 16)) << 3; - int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3; - for (; mb_col < mb_cols; ++mb_col) - { - int mb_to_left_edge = -((mb_col * 16) << 3); - int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3; - const B_OVERLAP *block_overlaps = - overlaps[mb_row*mb_cols + mb_col].overlaps; - mi->mbmi.ref_frame = LAST_FRAME; - mi->mbmi.mode = SPLITMV; - mi->mbmi.uv_mode = DC_PRED; - mi->mbmi.partitioning = 3; - mi->mbmi.segment_id = 0; - estimate_mb_mvs(block_overlaps, - mi, - mb_to_left_edge, - mb_to_right_edge, - mb_to_top_edge, - mb_to_bottom_edge); - ++mi; - } - mb_col = 0; - ++mi; + mb_row = first_corrupt / mb_cols; + mb_col = first_corrupt - mb_row * mb_cols; + mi += mb_row * (mb_cols + 1) + mb_col; + /* Go through all macroblocks in the current image with missing MVs + * and calculate new MVs using the overlaps. + */ + for (; mb_row < mb_rows; ++mb_row) { + int mb_to_top_edge = -((mb_row * 16)) << 3; + int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3; + for (; mb_col < mb_cols; ++mb_col) { + int mb_to_left_edge = -((mb_col * 16) << 3); + int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3; + const B_OVERLAP *block_overlaps = + overlaps[mb_row * mb_cols + mb_col].overlaps; + mi->mbmi.ref_frame = LAST_FRAME; + mi->mbmi.mode = SPLITMV; + mi->mbmi.uv_mode = DC_PRED; + mi->mbmi.partitioning = 3; + mi->mbmi.segment_id = 0; + estimate_mb_mvs(block_overlaps, mi, mb_to_left_edge, mb_to_right_edge, + mb_to_top_edge, mb_to_bottom_edge); + ++mi; } + mb_col = 0; + ++mi; + } } -void vp8_estimate_missing_mvs(VP8D_COMP *pbi) -{ - VP8_COMMON * const pc = &pbi->common; - estimate_missing_mvs(pbi->overlaps, - pc->mi, pc->prev_mi, - pc->mb_rows, pc->mb_cols, - pbi->mvs_corrupt_from_mb); +void vp8_estimate_missing_mvs(VP8D_COMP *pbi) { + VP8_COMMON *const pc = &pbi->common; + estimate_missing_mvs(pbi->overlaps, pc->mi, pc->prev_mi, pc->mb_rows, + pc->mb_cols, pbi->mvs_corrupt_from_mb); } -static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx) -{ - assert(mi->mbmi.ref_frame < MAX_REF_FRAMES); - neighbor->ref_frame = mi->mbmi.ref_frame; - neighbor->mv = mi->bmi[block_idx].mv.as_mv; +static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx) { + assert(mi->mbmi.ref_frame < MAX_REF_FRAMES); + neighbor->ref_frame = mi->mbmi.ref_frame; + neighbor->mv = mi->bmi[block_idx].mv.as_mv; } /* Finds the neighboring blocks of a macroblocks. In the general case @@ -429,169 +357,126 @@ static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx) * neighbor, and so on. The last element refers to the neighbor below the first * neighbor. */ -static void find_neighboring_blocks(MODE_INFO *mi, - EC_BLOCK *neighbors, - int mb_row, int mb_col, - int mb_rows, int mb_cols, - int mi_stride) -{ - int i = 0; - int j; - if (mb_row > 0) - { - /* upper left */ - if (mb_col > 0) - assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15); - ++i; - /* above */ - for (j = 12; j < 16; ++j, ++i) - assign_neighbor(&neighbors[i], mi - mi_stride, j); - } - else - i += 5; +static void find_neighboring_blocks(MODE_INFO *mi, EC_BLOCK *neighbors, + int mb_row, int mb_col, int mb_rows, + int mb_cols, int mi_stride) { + int i = 0; + int j; + if (mb_row > 0) { + /* upper left */ + if (mb_col > 0) assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15); + ++i; + /* above */ + for (j = 12; j < 16; ++j, ++i) + assign_neighbor(&neighbors[i], mi - mi_stride, j); + } else + i += 5; + if (mb_col < mb_cols - 1) { + /* upper right */ + if (mb_row > 0) assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12); + ++i; + /* right */ + for (j = 0; j <= 12; j += 4, ++i) assign_neighbor(&neighbors[i], mi + 1, j); + } else + i += 5; + if (mb_row < mb_rows - 1) { + /* lower right */ if (mb_col < mb_cols - 1) - { - /* upper right */ - if (mb_row > 0) - assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12); - ++i; - /* right */ - for (j = 0; j <= 12; j += 4, ++i) - assign_neighbor(&neighbors[i], mi + 1, j); - } - else - i += 5; + assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0); + ++i; + /* below */ + for (j = 0; j < 4; ++j, ++i) + assign_neighbor(&neighbors[i], mi + mi_stride, j); + } else + i += 5; + if (mb_col > 0) { + /* lower left */ if (mb_row < mb_rows - 1) - { - /* lower right */ - if (mb_col < mb_cols - 1) - assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0); - ++i; - /* below */ - for (j = 0; j < 4; ++j, ++i) - assign_neighbor(&neighbors[i], mi + mi_stride, j); + assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4); + ++i; + /* left */ + for (j = 3; j < 16; j += 4, ++i) { + assign_neighbor(&neighbors[i], mi - 1, j); } - else - i += 5; - if (mb_col > 0) - { - /* lower left */ - if (mb_row < mb_rows - 1) - assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4); - ++i; - /* left */ - for (j = 3; j < 16; j += 4, ++i) - { - assign_neighbor(&neighbors[i], mi - 1, j); - } - } - else - i += 5; - assert(i == 20); + } else + i += 5; + assert(i == 20); } /* Interpolates all motion vectors for a macroblock from the neighboring blocks' * motion vectors. */ -static void interpolate_mvs(MACROBLOCKD *mb, - EC_BLOCK *neighbors, - MV_REFERENCE_FRAME dom_ref_frame) -{ - int row, col, i; - MODE_INFO * const mi = mb->mode_info_context; - /* Table with the position of the neighboring blocks relative the position - * of the upper left block of the current MB. Starting with the upper left - * neighbor and going to the right. - */ - const EC_POS neigh_pos[NUM_NEIGHBORS] = { - {-1,-1}, {-1,0}, {-1,1}, {-1,2}, {-1,3}, - {-1,4}, {0,4}, {1,4}, {2,4}, {3,4}, - {4,4}, {4,3}, {4,2}, {4,1}, {4,0}, - {4,-1}, {3,-1}, {2,-1}, {1,-1}, {0,-1} - }; - mi->mbmi.need_to_clamp_mvs = 0; - for (row = 0; row < 4; ++row) - { - int mb_to_top_edge = mb->mb_to_top_edge + ((row*4)<<3); - int mb_to_bottom_edge = mb->mb_to_bottom_edge - ((row*4)<<3); - for (col = 0; col < 4; ++col) - { - int mb_to_left_edge = mb->mb_to_left_edge + ((col*4)<<3); - int mb_to_right_edge = mb->mb_to_right_edge - ((col*4)<<3); - int w_sum = 0; - int mv_row_sum = 0; - int mv_col_sum = 0; - int_mv * const mv = &(mi->bmi[row*4 + col].mv); - mv->as_int = 0; - for (i = 0; i < NUM_NEIGHBORS; ++i) - { - /* Calculate the weighted sum of neighboring MVs referring - * to the dominant frame type. - */ - const int w = weights_q7[abs(row - neigh_pos[i].row)] - [abs(col - neigh_pos[i].col)]; - if (neighbors[i].ref_frame != dom_ref_frame) - continue; - w_sum += w; - /* Q7 * Q3 = Q10 */ - mv_row_sum += w*neighbors[i].mv.row; - mv_col_sum += w*neighbors[i].mv.col; - } - if (w_sum > 0) - { - /* Avoid division by zero. - * Normalize with the sum of the coefficients - * Q3 = Q10 / Q7 - */ - mv->as_mv.row = mv_row_sum / w_sum; - mv->as_mv.col = mv_col_sum / w_sum; - mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds( - mv, - mb_to_left_edge, - mb_to_right_edge, - mb_to_top_edge, - mb_to_bottom_edge); - } - } +static void interpolate_mvs(MACROBLOCKD *mb, EC_BLOCK *neighbors, + MV_REFERENCE_FRAME dom_ref_frame) { + int row, col, i; + MODE_INFO *const mi = mb->mode_info_context; + /* Table with the position of the neighboring blocks relative the position + * of the upper left block of the current MB. Starting with the upper left + * neighbor and going to the right. + */ + const EC_POS neigh_pos[NUM_NEIGHBORS] = { + { -1, -1 }, { -1, 0 }, { -1, 1 }, { -1, 2 }, { -1, 3 }, { -1, 4 }, { 0, 4 }, + { 1, 4 }, { 2, 4 }, { 3, 4 }, { 4, 4 }, { 4, 3 }, { 4, 2 }, { 4, 1 }, + { 4, 0 }, { 4, -1 }, { 3, -1 }, { 2, -1 }, { 1, -1 }, { 0, -1 } + }; + mi->mbmi.need_to_clamp_mvs = 0; + for (row = 0; row < 4; ++row) { + int mb_to_top_edge = mb->mb_to_top_edge + ((row * 4) << 3); + int mb_to_bottom_edge = mb->mb_to_bottom_edge - ((row * 4) << 3); + for (col = 0; col < 4; ++col) { + int mb_to_left_edge = mb->mb_to_left_edge + ((col * 4) << 3); + int mb_to_right_edge = mb->mb_to_right_edge - ((col * 4) << 3); + int w_sum = 0; + int mv_row_sum = 0; + int mv_col_sum = 0; + int_mv *const mv = &(mi->bmi[row * 4 + col].mv); + mv->as_int = 0; + for (i = 0; i < NUM_NEIGHBORS; ++i) { + /* Calculate the weighted sum of neighboring MVs referring + * to the dominant frame type. + */ + const int w = weights_q7[abs(row - neigh_pos[i].row)] + [abs(col - neigh_pos[i].col)]; + if (neighbors[i].ref_frame != dom_ref_frame) continue; + w_sum += w; + /* Q7 * Q3 = Q10 */ + mv_row_sum += w * neighbors[i].mv.row; + mv_col_sum += w * neighbors[i].mv.col; + } + if (w_sum > 0) { + /* Avoid division by zero. + * Normalize with the sum of the coefficients + * Q3 = Q10 / Q7 + */ + mv->as_mv.row = mv_row_sum / w_sum; + mv->as_mv.col = mv_col_sum / w_sum; + mi->mbmi.need_to_clamp_mvs |= + vp8_check_mv_bounds(mv, mb_to_left_edge, mb_to_right_edge, + mb_to_top_edge, mb_to_bottom_edge); + } } + } } -void vp8_interpolate_motion(MACROBLOCKD *mb, - int mb_row, int mb_col, - int mb_rows, int mb_cols, - int mi_stride) -{ - /* Find relevant neighboring blocks */ - EC_BLOCK neighbors[NUM_NEIGHBORS]; - int i; - /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */ - for (i = 0; i < NUM_NEIGHBORS; ++i) - { - neighbors[i].ref_frame = MAX_REF_FRAMES; - neighbors[i].mv.row = neighbors[i].mv.col = 0; - } - find_neighboring_blocks(mb->mode_info_context, - neighbors, - mb_row, mb_col, - mb_rows, mb_cols, - mb->mode_info_stride); - /* Interpolate MVs for the missing blocks from the surrounding - * blocks which refer to the last frame. */ - interpolate_mvs(mb, neighbors, LAST_FRAME); - - mb->mode_info_context->mbmi.ref_frame = LAST_FRAME; - mb->mode_info_context->mbmi.mode = SPLITMV; - mb->mode_info_context->mbmi.uv_mode = DC_PRED; - mb->mode_info_context->mbmi.partitioning = 3; - mb->mode_info_context->mbmi.segment_id = 0; -} - -void vp8_conceal_corrupt_mb(MACROBLOCKD *xd) -{ - /* This macroblock has corrupt residual, use the motion compensated - image (predictor) for concealment */ - - /* The build predictor functions now output directly into the dst buffer, - * so the copies are no longer necessary */ +void vp8_interpolate_motion(MACROBLOCKD *mb, int mb_row, int mb_col, + int mb_rows, int mb_cols) { + /* Find relevant neighboring blocks */ + EC_BLOCK neighbors[NUM_NEIGHBORS]; + int i; + /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */ + for (i = 0; i < NUM_NEIGHBORS; ++i) { + neighbors[i].ref_frame = MAX_REF_FRAMES; + neighbors[i].mv.row = neighbors[i].mv.col = 0; + } + find_neighboring_blocks(mb->mode_info_context, neighbors, mb_row, mb_col, + mb_rows, mb_cols, mb->mode_info_stride); + /* Interpolate MVs for the missing blocks from the surrounding + * blocks which refer to the last frame. */ + interpolate_mvs(mb, neighbors, LAST_FRAME); + mb->mode_info_context->mbmi.ref_frame = LAST_FRAME; + mb->mode_info_context->mbmi.mode = SPLITMV; + mb->mode_info_context->mbmi.uv_mode = DC_PRED; + mb->mode_info_context->mbmi.partitioning = 3; + mb->mode_info_context->mbmi.segment_id = 0; } diff --git a/libs/libvpx/vp8/decoder/error_concealment.h b/libs/libvpx/vp8/decoder/error_concealment.h index 9a1e024865..89c78c1442 100644 --- a/libs/libvpx/vp8/decoder/error_concealment.h +++ b/libs/libvpx/vp8/decoder/error_concealment.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_DECODER_ERROR_CONCEALMENT_H_ #define VP8_DECODER_ERROR_CONCEALMENT_H_ @@ -32,15 +31,8 @@ void vp8_estimate_missing_mvs(VP8D_COMP *pbi); /* Interpolates all motion vectors for a macroblock mb at position * (mb_row, mb_col). */ -void vp8_interpolate_motion(MACROBLOCKD *mb, - int mb_row, int mb_col, - int mb_rows, int mb_cols, - int mi_stride); - -/* Conceal a macroblock with corrupt residual. - * Copies the prediction signal to the reconstructed image. - */ -void vp8_conceal_corrupt_mb(MACROBLOCKD *xd); +void vp8_interpolate_motion(MACROBLOCKD *mb, int mb_row, int mb_col, + int mb_rows, int mb_cols); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/decoder/onyxd_if.c b/libs/libvpx/vp8/decoder/onyxd_if.c index 3468268a2a..c6b5660364 100644 --- a/libs/libvpx/vp8/decoder/onyxd_if.c +++ b/libs/libvpx/vp8/decoder/onyxd_if.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/onyxc_int.h" #if CONFIG_POSTPROC #include "vp8/common/postproc.h" @@ -42,480 +41,444 @@ extern void vp8_init_loop_filter(VP8_COMMON *cm); extern void vp8cx_init_de_quantizer(VP8D_COMP *pbi); -static int get_free_fb (VP8_COMMON *cm); -static void ref_cnt_fb (int *buf, int *idx, int new_idx); +static int get_free_fb(VP8_COMMON *cm); +static void ref_cnt_fb(int *buf, int *idx, int new_idx); static void initialize_dec(void) { - static volatile int init_done = 0; + static volatile int init_done = 0; - if (!init_done) - { - vpx_dsp_rtcd(); - vp8_init_intra_predictors(); - init_done = 1; - } + if (!init_done) { + vpx_dsp_rtcd(); + vp8_init_intra_predictors(); + init_done = 1; + } } -static void remove_decompressor(VP8D_COMP *pbi) -{ +static void remove_decompressor(VP8D_COMP *pbi) { #if CONFIG_ERROR_CONCEALMENT - vp8_de_alloc_overlap_lists(pbi); + vp8_de_alloc_overlap_lists(pbi); #endif - vp8_remove_common(&pbi->common); - vpx_free(pbi); + vp8_remove_common(&pbi->common); + vpx_free(pbi); } -static struct VP8D_COMP * create_decompressor(VP8D_CONFIG *oxcf) -{ - VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP)); +static struct VP8D_COMP *create_decompressor(VP8D_CONFIG *oxcf) { + VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP)); - if (!pbi) - return NULL; + if (!pbi) return NULL; - memset(pbi, 0, sizeof(VP8D_COMP)); - - if (setjmp(pbi->common.error.jmp)) - { - pbi->common.error.setjmp = 0; - remove_decompressor(pbi); - return 0; - } - - pbi->common.error.setjmp = 1; - - vp8_create_common(&pbi->common); - - pbi->common.current_video_frame = 0; - pbi->ready_for_new_data = 1; - - /* vp8cx_init_de_quantizer() is first called here. Add check in frame_init_dequantizer() to avoid - * unnecessary calling of vp8cx_init_de_quantizer() for every frame. - */ - vp8cx_init_de_quantizer(pbi); - - vp8_loop_filter_init(&pbi->common); + memset(pbi, 0, sizeof(VP8D_COMP)); + if (setjmp(pbi->common.error.jmp)) { pbi->common.error.setjmp = 0; + remove_decompressor(pbi); + return 0; + } + + pbi->common.error.setjmp = 1; + + vp8_create_common(&pbi->common); + + pbi->common.current_video_frame = 0; + pbi->ready_for_new_data = 1; + + /* vp8cx_init_de_quantizer() is first called here. Add check in + * frame_init_dequantizer() to avoid + * unnecessary calling of vp8cx_init_de_quantizer() for every frame. + */ + vp8cx_init_de_quantizer(pbi); + + vp8_loop_filter_init(&pbi->common); + + pbi->common.error.setjmp = 0; #if CONFIG_ERROR_CONCEALMENT - pbi->ec_enabled = oxcf->error_concealment; - pbi->overlaps = NULL; + pbi->ec_enabled = oxcf->error_concealment; + pbi->overlaps = NULL; #else - (void)oxcf; - pbi->ec_enabled = 0; + (void)oxcf; + pbi->ec_enabled = 0; #endif - /* Error concealment is activated after a key frame has been - * decoded without errors when error concealment is enabled. - */ - pbi->ec_active = 0; + /* Error concealment is activated after a key frame has been + * decoded without errors when error concealment is enabled. + */ + pbi->ec_active = 0; - pbi->decoded_key_frame = 0; + pbi->decoded_key_frame = 0; - /* Independent partitions is activated when a frame updates the - * token probability table to have equal probabilities over the - * PREV_COEF context. - */ - pbi->independent_partitions = 0; + /* Independent partitions is activated when a frame updates the + * token probability table to have equal probabilities over the + * PREV_COEF context. + */ + pbi->independent_partitions = 0; - vp8_setup_block_dptrs(&pbi->mb); + vp8_setup_block_dptrs(&pbi->mb); - once(initialize_dec); + once(initialize_dec); - return pbi; + return pbi; } -vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) -{ - VP8_COMMON *cm = &pbi->common; - int ref_fb_idx; - - if (ref_frame_flag == VP8_LAST_FRAME) - ref_fb_idx = cm->lst_fb_idx; - else if (ref_frame_flag == VP8_GOLD_FRAME) - ref_fb_idx = cm->gld_fb_idx; - else if (ref_frame_flag == VP8_ALTR_FRAME) - ref_fb_idx = cm->alt_fb_idx; - else{ - vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, - "Invalid reference frame"); - return pbi->common.error.error_code; - } - - if(cm->yv12_fb[ref_fb_idx].y_height != sd->y_height || - cm->yv12_fb[ref_fb_idx].y_width != sd->y_width || - cm->yv12_fb[ref_fb_idx].uv_height != sd->uv_height || - cm->yv12_fb[ref_fb_idx].uv_width != sd->uv_width){ - vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, - "Incorrect buffer dimensions"); - } - else - vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); +vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd) { + VP8_COMMON *cm = &pbi->common; + int ref_fb_idx; + if (ref_frame_flag == VP8_LAST_FRAME) { + ref_fb_idx = cm->lst_fb_idx; + } else if (ref_frame_flag == VP8_GOLD_FRAME) { + ref_fb_idx = cm->gld_fb_idx; + } else if (ref_frame_flag == VP8_ALTR_FRAME) { + ref_fb_idx = cm->alt_fb_idx; + } else { + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Invalid reference frame"); return pbi->common.error.error_code; + } + + if (cm->yv12_fb[ref_fb_idx].y_height != sd->y_height || + cm->yv12_fb[ref_fb_idx].y_width != sd->y_width || + cm->yv12_fb[ref_fb_idx].uv_height != sd->uv_height || + cm->yv12_fb[ref_fb_idx].uv_width != sd->uv_width) { + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Incorrect buffer dimensions"); + } else + vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); + + return pbi->common.error.error_code; } +vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi, + enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd) { + VP8_COMMON *cm = &pbi->common; + int *ref_fb_ptr = NULL; + int free_fb; -vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) -{ - VP8_COMMON *cm = &pbi->common; - int *ref_fb_ptr = NULL; - int free_fb; + if (ref_frame_flag == VP8_LAST_FRAME) { + ref_fb_ptr = &cm->lst_fb_idx; + } else if (ref_frame_flag == VP8_GOLD_FRAME) { + ref_fb_ptr = &cm->gld_fb_idx; + } else if (ref_frame_flag == VP8_ALTR_FRAME) { + ref_fb_ptr = &cm->alt_fb_idx; + } else { + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Invalid reference frame"); + return pbi->common.error.error_code; + } - if (ref_frame_flag == VP8_LAST_FRAME) - ref_fb_ptr = &cm->lst_fb_idx; - else if (ref_frame_flag == VP8_GOLD_FRAME) - ref_fb_ptr = &cm->gld_fb_idx; - else if (ref_frame_flag == VP8_ALTR_FRAME) - ref_fb_ptr = &cm->alt_fb_idx; - else{ - vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, - "Invalid reference frame"); - return pbi->common.error.error_code; - } + if (cm->yv12_fb[*ref_fb_ptr].y_height != sd->y_height || + cm->yv12_fb[*ref_fb_ptr].y_width != sd->y_width || + cm->yv12_fb[*ref_fb_ptr].uv_height != sd->uv_height || + cm->yv12_fb[*ref_fb_ptr].uv_width != sd->uv_width) { + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Incorrect buffer dimensions"); + } else { + /* Find an empty frame buffer. */ + free_fb = get_free_fb(cm); + /* Decrease fb_idx_ref_cnt since it will be increased again in + * ref_cnt_fb() below. */ + cm->fb_idx_ref_cnt[free_fb]--; - if(cm->yv12_fb[*ref_fb_ptr].y_height != sd->y_height || - cm->yv12_fb[*ref_fb_ptr].y_width != sd->y_width || - cm->yv12_fb[*ref_fb_ptr].uv_height != sd->uv_height || - cm->yv12_fb[*ref_fb_ptr].uv_width != sd->uv_width){ - vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, - "Incorrect buffer dimensions"); - } - else{ - /* Find an empty frame buffer. */ - free_fb = get_free_fb(cm); - /* Decrease fb_idx_ref_cnt since it will be increased again in - * ref_cnt_fb() below. */ - cm->fb_idx_ref_cnt[free_fb]--; + /* Manage the reference counters and copy image. */ + ref_cnt_fb(cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb); + vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]); + } - /* Manage the reference counters and copy image. */ - ref_cnt_fb (cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb); - vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]); - } - - return pbi->common.error.error_code; + return pbi->common.error.error_code; } -static int get_free_fb (VP8_COMMON *cm) -{ - int i; - for (i = 0; i < NUM_YV12_BUFFERS; i++) - if (cm->fb_idx_ref_cnt[i] == 0) - break; +static int get_free_fb(VP8_COMMON *cm) { + int i; + for (i = 0; i < NUM_YV12_BUFFERS; ++i) { + if (cm->fb_idx_ref_cnt[i] == 0) break; + } - assert(i < NUM_YV12_BUFFERS); - cm->fb_idx_ref_cnt[i] = 1; - return i; + assert(i < NUM_YV12_BUFFERS); + cm->fb_idx_ref_cnt[i] = 1; + return i; } -static void ref_cnt_fb (int *buf, int *idx, int new_idx) -{ - if (buf[*idx] > 0) - buf[*idx]--; +static void ref_cnt_fb(int *buf, int *idx, int new_idx) { + if (buf[*idx] > 0) buf[*idx]--; - *idx = new_idx; + *idx = new_idx; - buf[new_idx]++; + buf[new_idx]++; } /* If any buffer copy / swapping is signalled it should be done here. */ -static int swap_frame_buffers (VP8_COMMON *cm) -{ - int err = 0; +static int swap_frame_buffers(VP8_COMMON *cm) { + int err = 0; - /* The alternate reference frame or golden frame can be updated - * using the new, last, or golden/alt ref frame. If it - * is updated using the newly decoded frame it is a refresh. - * An update using the last or golden/alt ref frame is a copy. - */ - if (cm->copy_buffer_to_arf) - { - int new_fb = 0; + /* The alternate reference frame or golden frame can be updated + * using the new, last, or golden/alt ref frame. If it + * is updated using the newly decoded frame it is a refresh. + * An update using the last or golden/alt ref frame is a copy. + */ + if (cm->copy_buffer_to_arf) { + int new_fb = 0; - if (cm->copy_buffer_to_arf == 1) - new_fb = cm->lst_fb_idx; - else if (cm->copy_buffer_to_arf == 2) - new_fb = cm->gld_fb_idx; - else - err = -1; - - ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb); + if (cm->copy_buffer_to_arf == 1) { + new_fb = cm->lst_fb_idx; + } else if (cm->copy_buffer_to_arf == 2) { + new_fb = cm->gld_fb_idx; + } else { + err = -1; } - if (cm->copy_buffer_to_gf) - { - int new_fb = 0; + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb); + } - if (cm->copy_buffer_to_gf == 1) - new_fb = cm->lst_fb_idx; - else if (cm->copy_buffer_to_gf == 2) - new_fb = cm->alt_fb_idx; - else - err = -1; + if (cm->copy_buffer_to_gf) { + int new_fb = 0; - ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb); + if (cm->copy_buffer_to_gf == 1) { + new_fb = cm->lst_fb_idx; + } else if (cm->copy_buffer_to_gf == 2) { + new_fb = cm->alt_fb_idx; + } else { + err = -1; } - if (cm->refresh_golden_frame) - ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx); + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb); + } - if (cm->refresh_alt_ref_frame) - ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx); + if (cm->refresh_golden_frame) { + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx); + } - if (cm->refresh_last_frame) - { - ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx); + if (cm->refresh_alt_ref_frame) { + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx); + } - cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; - } - else - cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; + if (cm->refresh_last_frame) { + ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx); - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; + } else { + cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; + } - return err; + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + + return err; } -static int check_fragments_for_errors(VP8D_COMP *pbi) -{ - if (!pbi->ec_active && - pbi->fragments.count <= 1 && pbi->fragments.sizes[0] == 0) - { - VP8_COMMON *cm = &pbi->common; +static int check_fragments_for_errors(VP8D_COMP *pbi) { + if (!pbi->ec_active && pbi->fragments.count <= 1 && + pbi->fragments.sizes[0] == 0) { + VP8_COMMON *cm = &pbi->common; - /* If error concealment is disabled we won't signal missing frames - * to the decoder. - */ - if (cm->fb_idx_ref_cnt[cm->lst_fb_idx] > 1) - { - /* The last reference shares buffer with another reference - * buffer. Move it to its own buffer before setting it as - * corrupt, otherwise we will make multiple buffers corrupt. - */ - const int prev_idx = cm->lst_fb_idx; - cm->fb_idx_ref_cnt[prev_idx]--; - cm->lst_fb_idx = get_free_fb(cm); - vp8_yv12_copy_frame(&cm->yv12_fb[prev_idx], - &cm->yv12_fb[cm->lst_fb_idx]); - } - /* This is used to signal that we are missing frames. - * We do not know if the missing frame(s) was supposed to update - * any of the reference buffers, but we act conservative and - * mark only the last buffer as corrupted. - */ - cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; - - /* Signal that we have no frame to show. */ - cm->show_frame = 0; - - /* Nothing more to do. */ - return 0; + /* If error concealment is disabled we won't signal missing frames + * to the decoder. + */ + if (cm->fb_idx_ref_cnt[cm->lst_fb_idx] > 1) { + /* The last reference shares buffer with another reference + * buffer. Move it to its own buffer before setting it as + * corrupt, otherwise we will make multiple buffers corrupt. + */ + const int prev_idx = cm->lst_fb_idx; + cm->fb_idx_ref_cnt[prev_idx]--; + cm->lst_fb_idx = get_free_fb(cm); + vp8_yv12_copy_frame(&cm->yv12_fb[prev_idx], &cm->yv12_fb[cm->lst_fb_idx]); } + /* This is used to signal that we are missing frames. + * We do not know if the missing frame(s) was supposed to update + * any of the reference buffers, but we act conservative and + * mark only the last buffer as corrupted. + */ + cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; - return 1; + /* Signal that we have no frame to show. */ + cm->show_frame = 0; + + /* Nothing more to do. */ + return 0; + } + + return 1; } int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size, - const uint8_t *source, - int64_t time_stamp) -{ - VP8_COMMON *cm = &pbi->common; - int retcode = -1; - (void)size; - (void)source; + const uint8_t *source, int64_t time_stamp) { + VP8_COMMON *cm = &pbi->common; + int retcode = -1; + (void)size; + (void)source; - pbi->common.error.error_code = VPX_CODEC_OK; + pbi->common.error.error_code = VPX_CODEC_OK; - retcode = check_fragments_for_errors(pbi); - if(retcode <= 0) - return retcode; + retcode = check_fragments_for_errors(pbi); + if (retcode <= 0) return retcode; - cm->new_fb_idx = get_free_fb (cm); + cm->new_fb_idx = get_free_fb(cm); - /* setup reference frames for vp8_decode_frame */ - pbi->dec_fb_ref[INTRA_FRAME] = &cm->yv12_fb[cm->new_fb_idx]; - pbi->dec_fb_ref[LAST_FRAME] = &cm->yv12_fb[cm->lst_fb_idx]; - pbi->dec_fb_ref[GOLDEN_FRAME] = &cm->yv12_fb[cm->gld_fb_idx]; - pbi->dec_fb_ref[ALTREF_FRAME] = &cm->yv12_fb[cm->alt_fb_idx]; + /* setup reference frames for vp8_decode_frame */ + pbi->dec_fb_ref[INTRA_FRAME] = &cm->yv12_fb[cm->new_fb_idx]; + pbi->dec_fb_ref[LAST_FRAME] = &cm->yv12_fb[cm->lst_fb_idx]; + pbi->dec_fb_ref[GOLDEN_FRAME] = &cm->yv12_fb[cm->gld_fb_idx]; + pbi->dec_fb_ref[ALTREF_FRAME] = &cm->yv12_fb[cm->alt_fb_idx]; - if (setjmp(pbi->common.error.jmp)) - { - /* We do not know if the missing frame(s) was supposed to update - * any of the reference buffers, but we act conservative and - * mark only the last buffer as corrupted. - */ - cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; + if (setjmp(pbi->common.error.jmp)) { + /* We do not know if the missing frame(s) was supposed to update + * any of the reference buffers, but we act conservative and + * mark only the last buffer as corrupted. + */ + cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; - if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) { + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + } + goto decode_exit; + } - goto decode_exit; + pbi->common.error.setjmp = 1; + + retcode = vp8_decode_frame(pbi); + + if (retcode < 0) { + if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) { + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; } - pbi->common.error.setjmp = 1; + pbi->common.error.error_code = VPX_CODEC_ERROR; + goto decode_exit; + } - retcode = vp8_decode_frame(pbi); + if (swap_frame_buffers(cm)) { + pbi->common.error.error_code = VPX_CODEC_ERROR; + goto decode_exit; + } - if (retcode < 0) - { - if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) - cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + vp8_clear_system_state(); - pbi->common.error.error_code = VPX_CODEC_ERROR; - goto decode_exit; - } - - if (swap_frame_buffers (cm)) - { - pbi->common.error.error_code = VPX_CODEC_ERROR; - goto decode_exit; - } - - vp8_clear_system_state(); - - if (cm->show_frame) - { - cm->current_video_frame++; - cm->show_frame_mi = cm->mi; - } - - #if CONFIG_ERROR_CONCEALMENT - /* swap the mode infos to storage for future error concealment */ - if (pbi->ec_enabled && pbi->common.prev_mi) - { - MODE_INFO* tmp = pbi->common.prev_mi; - int row, col; - pbi->common.prev_mi = pbi->common.mi; - pbi->common.mi = tmp; - - /* Propagate the segment_ids to the next frame */ - for (row = 0; row < pbi->common.mb_rows; ++row) - { - for (col = 0; col < pbi->common.mb_cols; ++col) - { - const int i = row*pbi->common.mode_info_stride + col; - pbi->common.mi[i].mbmi.segment_id = - pbi->common.prev_mi[i].mbmi.segment_id; - } - } + if (cm->show_frame) { + cm->current_video_frame++; + cm->show_frame_mi = cm->mi; + } + +#if CONFIG_ERROR_CONCEALMENT + /* swap the mode infos to storage for future error concealment */ + if (pbi->ec_enabled && pbi->common.prev_mi) { + MODE_INFO *tmp = pbi->common.prev_mi; + int row, col; + pbi->common.prev_mi = pbi->common.mi; + pbi->common.mi = tmp; + + /* Propagate the segment_ids to the next frame */ + for (row = 0; row < pbi->common.mb_rows; ++row) { + for (col = 0; col < pbi->common.mb_cols; ++col) { + const int i = row * pbi->common.mode_info_stride + col; + pbi->common.mi[i].mbmi.segment_id = + pbi->common.prev_mi[i].mbmi.segment_id; + } } + } #endif - pbi->ready_for_new_data = 0; - pbi->last_time_stamp = time_stamp; + pbi->ready_for_new_data = 0; + pbi->last_time_stamp = time_stamp; decode_exit: - pbi->common.error.setjmp = 0; - vp8_clear_system_state(); - return retcode; + pbi->common.error.setjmp = 0; + vp8_clear_system_state(); + return retcode; } -int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags) -{ - int ret = -1; +int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, + int64_t *time_stamp, int64_t *time_end_stamp, + vp8_ppflags_t *flags) { + int ret = -1; - if (pbi->ready_for_new_data == 1) - return ret; + if (pbi->ready_for_new_data == 1) return ret; - /* ie no raw frame to show!!! */ - if (pbi->common.show_frame == 0) - return ret; + /* ie no raw frame to show!!! */ + if (pbi->common.show_frame == 0) return ret; - pbi->ready_for_new_data = 1; - *time_stamp = pbi->last_time_stamp; - *time_end_stamp = 0; + pbi->ready_for_new_data = 1; + *time_stamp = pbi->last_time_stamp; + *time_end_stamp = 0; #if CONFIG_POSTPROC - ret = vp8_post_proc_frame(&pbi->common, sd, flags); + ret = vp8_post_proc_frame(&pbi->common, sd, flags); #else - (void)flags; + (void)flags; - if (pbi->common.frame_to_show) - { - *sd = *pbi->common.frame_to_show; - sd->y_width = pbi->common.Width; - sd->y_height = pbi->common.Height; - sd->uv_height = pbi->common.Height / 2; - ret = 0; - } - else - { - ret = -1; - } + if (pbi->common.frame_to_show) { + *sd = *pbi->common.frame_to_show; + sd->y_width = pbi->common.Width; + sd->y_height = pbi->common.Height; + sd->uv_height = pbi->common.Height / 2; + ret = 0; + } else { + ret = -1; + } #endif /*!CONFIG_POSTPROC*/ - vp8_clear_system_state(); - return ret; + vp8_clear_system_state(); + return ret; } - /* This function as written isn't decoder specific, but the encoder has * much faster ways of computing this, so it's ok for it to live in a * decode specific file. */ -int vp8dx_references_buffer( VP8_COMMON *oci, int ref_frame ) -{ - const MODE_INFO *mi = oci->mi; - int mb_row, mb_col; +int vp8dx_references_buffer(VP8_COMMON *oci, int ref_frame) { + const MODE_INFO *mi = oci->mi; + int mb_row, mb_col; - for (mb_row = 0; mb_row < oci->mb_rows; mb_row++) - { - for (mb_col = 0; mb_col < oci->mb_cols; mb_col++,mi++) - { - if( mi->mbmi.ref_frame == ref_frame) - return 1; - } - mi++; + for (mb_row = 0; mb_row < oci->mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < oci->mb_cols; mb_col++, mi++) { + if (mi->mbmi.ref_frame == ref_frame) return 1; } - return 0; - + mi++; + } + return 0; } -int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf) -{ - if(!fb->use_frame_threads) - { - /* decoder instance for single thread mode */ - fb->pbi[0] = create_decompressor(oxcf); - if(!fb->pbi[0]) - return VPX_CODEC_ERROR; +int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf) { + if (!fb->use_frame_threads) { + /* decoder instance for single thread mode */ + fb->pbi[0] = create_decompressor(oxcf); + if (!fb->pbi[0]) return VPX_CODEC_ERROR; #if CONFIG_MULTITHREAD - /* enable row-based threading only when use_frame_threads - * is disabled */ - fb->pbi[0]->max_threads = oxcf->max_threads; - vp8_decoder_create_threads(fb->pbi[0]); -#endif - } - else - { - /* TODO : create frame threads and decoder instances for each - * thread here */ + if (setjmp(fb->pbi[0]->common.error.jmp)) { + vp8_remove_decoder_instances(fb); + memset(fb->pbi, 0, sizeof(fb->pbi) / sizeof(fb->pbi[0])); + vp8_clear_system_state(); + return VPX_CODEC_ERROR; } - return VPX_CODEC_OK; + fb->pbi[0]->common.error.setjmp = 1; + fb->pbi[0]->max_threads = oxcf->max_threads; + vp8_decoder_create_threads(fb->pbi[0]); + fb->pbi[0]->common.error.setjmp = 0; +#endif + } else { + /* TODO : create frame threads and decoder instances for each + * thread here */ + } + + return VPX_CODEC_OK; } -int vp8_remove_decoder_instances(struct frame_buffers *fb) -{ - if(!fb->use_frame_threads) - { - VP8D_COMP *pbi = fb->pbi[0]; +int vp8_remove_decoder_instances(struct frame_buffers *fb) { + if (!fb->use_frame_threads) { + VP8D_COMP *pbi = fb->pbi[0]; - if (!pbi) - return VPX_CODEC_ERROR; + if (!pbi) return VPX_CODEC_ERROR; #if CONFIG_MULTITHREAD - if (pbi->b_multithreaded_rd) - vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows); - vp8_decoder_remove_threads(pbi); + vp8_decoder_remove_threads(pbi); #endif - /* decoder instance for single thread mode */ - remove_decompressor(pbi); - } - else - { - /* TODO : remove frame threads and decoder instances for each - * thread here */ - } + /* decoder instance for single thread mode */ + remove_decompressor(pbi); + } else { + /* TODO : remove frame threads and decoder instances for each + * thread here */ + } - return VPX_CODEC_OK; + return VPX_CODEC_OK; } diff --git a/libs/libvpx/vp8/decoder/onyxd_int.h b/libs/libvpx/vp8/decoder/onyxd_int.h index 313fe01c07..3a8a431465 100644 --- a/libs/libvpx/vp8/decoder/onyxd_int.h +++ b/libs/libvpx/vp8/decoder/onyxd_int.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_DECODER_ONYXD_INT_H_ #define VP8_DECODER_ONYXD_INT_H_ @@ -26,110 +25,98 @@ extern "C" { #endif -typedef struct -{ - int ithread; - void *ptr1; - void *ptr2; +typedef struct { + int ithread; + void *ptr1; + void *ptr2; } DECODETHREAD_DATA; -typedef struct -{ - MACROBLOCKD mbd; -} MB_ROW_DEC; +typedef struct { MACROBLOCKD mbd; } MB_ROW_DEC; - -typedef struct -{ - int enabled; - unsigned int count; - const unsigned char *ptrs[MAX_PARTITIONS]; - unsigned int sizes[MAX_PARTITIONS]; +typedef struct { + int enabled; + unsigned int count; + const unsigned char *ptrs[MAX_PARTITIONS]; + unsigned int sizes[MAX_PARTITIONS]; } FRAGMENT_DATA; #define MAX_FB_MT_DEC 32 -struct frame_buffers -{ - /* - * this struct will be populated with frame buffer management - * info in future commits. */ +struct frame_buffers { + /* + * this struct will be populated with frame buffer management + * info in future commits. */ - /* enable/disable frame-based threading */ - int use_frame_threads; - - /* decoder instances */ - struct VP8D_COMP *pbi[MAX_FB_MT_DEC]; + /* enable/disable frame-based threading */ + int use_frame_threads; + /* decoder instances */ + struct VP8D_COMP *pbi[MAX_FB_MT_DEC]; }; -typedef struct VP8D_COMP -{ - DECLARE_ALIGNED(16, MACROBLOCKD, mb); +typedef struct VP8D_COMP { + DECLARE_ALIGNED(16, MACROBLOCKD, mb); - YV12_BUFFER_CONFIG *dec_fb_ref[NUM_YV12_BUFFERS]; + YV12_BUFFER_CONFIG *dec_fb_ref[NUM_YV12_BUFFERS]; - DECLARE_ALIGNED(16, VP8_COMMON, common); + DECLARE_ALIGNED(16, VP8_COMMON, common); - /* the last partition will be used for the modes/mvs */ - vp8_reader mbc[MAX_PARTITIONS]; + /* the last partition will be used for the modes/mvs */ + vp8_reader mbc[MAX_PARTITIONS]; - VP8D_CONFIG oxcf; + VP8D_CONFIG oxcf; - FRAGMENT_DATA fragments; + FRAGMENT_DATA fragments; #if CONFIG_MULTITHREAD - /* variable for threading */ + /* variable for threading */ + volatile int b_multithreaded_rd; + int max_threads; + int current_mb_col_main; + unsigned int decoding_thread_count; + int allocated_decoding_thread_count; - int b_multithreaded_rd; - int max_threads; - int current_mb_col_main; - unsigned int decoding_thread_count; - int allocated_decoding_thread_count; + int mt_baseline_filter_level[MAX_MB_SEGMENTS]; + int sync_range; + int *mt_current_mb_col; /* Each row remembers its already decoded column. */ - int mt_baseline_filter_level[MAX_MB_SEGMENTS]; - int sync_range; - int *mt_current_mb_col; /* Each row remembers its already decoded column. */ - pthread_mutex_t *pmutex; - pthread_mutex_t mt_mutex; /* mutex for b_multithreaded_rd */ + unsigned char **mt_yabove_row; /* mb_rows x width */ + unsigned char **mt_uabove_row; + unsigned char **mt_vabove_row; + unsigned char **mt_yleft_col; /* mb_rows x 16 */ + unsigned char **mt_uleft_col; /* mb_rows x 8 */ + unsigned char **mt_vleft_col; /* mb_rows x 8 */ - unsigned char **mt_yabove_row; /* mb_rows x width */ - unsigned char **mt_uabove_row; - unsigned char **mt_vabove_row; - unsigned char **mt_yleft_col; /* mb_rows x 16 */ - unsigned char **mt_uleft_col; /* mb_rows x 8 */ - unsigned char **mt_vleft_col; /* mb_rows x 8 */ + MB_ROW_DEC *mb_row_di; + DECODETHREAD_DATA *de_thread_data; - MB_ROW_DEC *mb_row_di; - DECODETHREAD_DATA *de_thread_data; - - pthread_t *h_decoding_thread; - sem_t *h_event_start_decoding; - sem_t h_event_end_decoding; - /* end of threading data */ + pthread_t *h_decoding_thread; + sem_t *h_event_start_decoding; + sem_t h_event_end_decoding; +/* end of threading data */ #endif - int64_t last_time_stamp; - int ready_for_new_data; + int64_t last_time_stamp; + int ready_for_new_data; - vp8_prob prob_intra; - vp8_prob prob_last; - vp8_prob prob_gf; - vp8_prob prob_skip_false; + vp8_prob prob_intra; + vp8_prob prob_last; + vp8_prob prob_gf; + vp8_prob prob_skip_false; #if CONFIG_ERROR_CONCEALMENT - MB_OVERLAP *overlaps; - /* the mb num from which modes and mvs (first partition) are corrupt */ - unsigned int mvs_corrupt_from_mb; + MB_OVERLAP *overlaps; + /* the mb num from which modes and mvs (first partition) are corrupt */ + unsigned int mvs_corrupt_from_mb; #endif - int ec_enabled; - int ec_active; - int decoded_key_frame; - int independent_partitions; - int frame_corrupt_residual; + int ec_enabled; + int ec_active; + int decoded_key_frame; + int independent_partitions; + int frame_corrupt_residual; - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; } VP8D_COMP; int vp8_decode_frame(VP8D_COMP *cpi); @@ -138,20 +125,22 @@ int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf); int vp8_remove_decoder_instances(struct frame_buffers *fb); #if CONFIG_DEBUG -#define CHECK_MEM_ERROR(lval,expr) do {\ - lval = (expr); \ - if(!lval) \ - vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\ - "Failed to allocate "#lval" at %s:%d", \ - __FILE__,__LINE__);\ - } while(0) +#define CHECK_MEM_ERROR(lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval " at %s:%d", __FILE__, \ + __LINE__); \ + } while (0) #else -#define CHECK_MEM_ERROR(lval,expr) do {\ - lval = (expr); \ - if(!lval) \ - vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\ - "Failed to allocate "#lval);\ - } while(0) +#define CHECK_MEM_ERROR(lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval); \ + } while (0) #endif #ifdef __cplusplus diff --git a/libs/libvpx/vp8/decoder/threading.c b/libs/libvpx/vp8/decoder/threading.c index 97979e3b2f..44ca16bfdd 100644 --- a/libs/libvpx/vp8/decoder/threading.c +++ b/libs/libvpx/vp8/decoder/threading.c @@ -8,11 +8,10 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #if !defined(WIN32) && CONFIG_OS_SUPPORT == 1 -# include +#include #endif #include "onyxd_int.h" #include "vpx_mem/vpx_mem.h" @@ -31,901 +30,829 @@ #endif #define CALLOC_ARRAY(p, n) CHECK_MEM_ERROR((p), vpx_calloc(sizeof(*(p)), (n))) -#define CALLOC_ARRAY_ALIGNED(p, n, algn) do { \ - CHECK_MEM_ERROR((p), vpx_memalign((algn), sizeof(*(p)) * (n))); \ - memset((p), 0, (n) * sizeof(*(p))); \ -} while (0) - +#define CALLOC_ARRAY_ALIGNED(p, n, algn) \ + do { \ + CHECK_MEM_ERROR((p), vpx_memalign((algn), sizeof(*(p)) * (n))); \ + memset((p), 0, (n) * sizeof(*(p))); \ + } while (0) void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); -static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count) -{ - VP8_COMMON *const pc = & pbi->common; - int i; +static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, + MB_ROW_DEC *mbrd, int count) { + VP8_COMMON *const pc = &pbi->common; + int i; - for (i = 0; i < count; i++) - { - MACROBLOCKD *mbd = &mbrd[i].mbd; - mbd->subpixel_predict = xd->subpixel_predict; - mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; - mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; - mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; + for (i = 0; i < count; ++i) { + MACROBLOCKD *mbd = &mbrd[i].mbd; + mbd->subpixel_predict = xd->subpixel_predict; + mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; + mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; + mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; - mbd->frame_type = pc->frame_type; - mbd->pre = xd->pre; - mbd->dst = xd->dst; + mbd->mode_info_context = pc->mi + pc->mode_info_stride * (i + 1); + mbd->mode_info_stride = pc->mode_info_stride; - mbd->segmentation_enabled = xd->segmentation_enabled; - mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta; - memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data)); + mbd->frame_type = pc->frame_type; + mbd->pre = xd->pre; + mbd->dst = xd->dst; - /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/ - memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas)); - /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/ - memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas)); - /*unsigned char mode_ref_lf_delta_enabled; - unsigned char mode_ref_lf_delta_update;*/ - mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled; - mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update; + mbd->segmentation_enabled = xd->segmentation_enabled; + mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta; + memcpy(mbd->segment_feature_data, xd->segment_feature_data, + sizeof(xd->segment_feature_data)); - mbd->current_bc = &pbi->mbc[0]; + /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/ + memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas)); + /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/ + memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas)); + /*unsigned char mode_ref_lf_delta_enabled; + unsigned char mode_ref_lf_delta_update;*/ + mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled; + mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update; - memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); - memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); - memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); - memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); + mbd->current_bc = &pbi->mbc[0]; - mbd->fullpixel_mask = 0xffffffff; + memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); + memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); + memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); + memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); - if (pc->full_pixel) - mbd->fullpixel_mask = 0xfffffff8; + mbd->fullpixel_mask = 0xffffffff; - } + if (pc->full_pixel) mbd->fullpixel_mask = 0xfffffff8; + } - for (i = 0; i < pc->mb_rows; i++) - pbi->mt_current_mb_col[i] = -1; + for (i = 0; i < pc->mb_rows; ++i) pbi->mt_current_mb_col[i] = -1; } static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, - unsigned int mb_idx) -{ - MB_PREDICTION_MODE mode; - int i; + unsigned int mb_idx) { + MB_PREDICTION_MODE mode; + int i; #if CONFIG_ERROR_CONCEALMENT - int corruption_detected = 0; + int corruption_detected = 0; #else - (void)mb_idx; + (void)mb_idx; #endif - if (xd->mode_info_context->mbmi.mb_skip_coeff) - { - vp8_reset_mb_tokens_context(xd); - } - else if (!vp8dx_bool_error(xd->current_bc)) - { - int eobtotal; - eobtotal = vp8_decode_mb_tokens(pbi, xd); + if (xd->mode_info_context->mbmi.mb_skip_coeff) { + vp8_reset_mb_tokens_context(xd); + } else if (!vp8dx_bool_error(xd->current_bc)) { + int eobtotal; + eobtotal = vp8_decode_mb_tokens(pbi, xd); - /* Special case: Force the loopfilter to skip when eobtotal is zero */ - xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); - } + /* Special case: Force the loopfilter to skip when eobtotal is zero */ + xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal == 0); + } - mode = xd->mode_info_context->mbmi.mode; - - if (xd->segmentation_enabled) - vp8_mb_init_dequantizer(pbi, xd); + mode = xd->mode_info_context->mbmi.mode; + if (xd->segmentation_enabled) vp8_mb_init_dequantizer(pbi, xd); #if CONFIG_ERROR_CONCEALMENT - if(pbi->ec_active) - { - int throw_residual; - /* When we have independent partitions we can apply residual even - * though other partitions within the frame are corrupt. + if (pbi->ec_active) { + int throw_residual; + /* When we have independent partitions we can apply residual even + * though other partitions within the frame are corrupt. + */ + throw_residual = + (!pbi->independent_partitions && pbi->frame_corrupt_residual); + throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + + if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) { + /* MB with corrupt residuals or corrupt mode/motion vectors. + * Better to use the predictor as reconstruction. + */ + pbi->frame_corrupt_residual = 1; + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + + corruption_detected = 1; + + /* force idct to be skipped for B_PRED and use the + * prediction only for reconstruction + * */ + memset(xd->eobs, 0, 25); + } + } +#endif + + /* do prediction */ + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + vp8_build_intra_predictors_mbuv_s( + xd, xd->recon_above[1], xd->recon_above[2], xd->recon_left[1], + xd->recon_left[2], xd->recon_left_stride[1], xd->dst.u_buffer, + xd->dst.v_buffer, xd->dst.uv_stride); + + if (mode != B_PRED) { + vp8_build_intra_predictors_mby_s( + xd, xd->recon_above[0], xd->recon_left[0], xd->recon_left_stride[0], + xd->dst.y_buffer, xd->dst.y_stride); + } else { + short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + + /* clear out residual eob info */ + if (xd->mode_info_context->mbmi.mb_skip_coeff) memset(xd->eobs, 0, 25); + + intra_prediction_down_copy(xd, xd->recon_above[0] + 16); + + for (i = 0; i < 16; ++i) { + BLOCKD *b = &xd->block[i]; + unsigned char *dst = xd->dst.y_buffer + b->offset; + B_PREDICTION_MODE b_mode = xd->mode_info_context->bmi[i].as_mode; + unsigned char *Above; + unsigned char *yleft; + int left_stride; + unsigned char top_left; + + /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 + * above-right).*/ + if (i < 4 && pbi->common.filter_level) { + Above = xd->recon_above[0] + b->offset; + } else { + Above = dst - dst_stride; + } + + if (i % 4 == 0 && pbi->common.filter_level) { + yleft = xd->recon_left[0] + i; + left_stride = 1; + } else { + yleft = dst - 1; + left_stride = dst_stride; + } + + if ((i == 4 || i == 8 || i == 12) && pbi->common.filter_level) { + top_left = *(xd->recon_left[0] + i - 1); + } else { + top_left = Above[-1]; + } + + vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, dst, dst_stride, + top_left); + + if (xd->eobs[i]) { + if (xd->eobs[i] > 1) { + vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); + } else { + vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], dst, dst_stride, dst, + dst_stride); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + } + } + } + } else { + vp8_build_inter_predictors_mb(xd); + } + +#if CONFIG_ERROR_CONCEALMENT + if (corruption_detected) { + return; + } +#endif + + if (!xd->mode_info_context->mbmi.mb_skip_coeff) { + /* dequantization and idct */ + if (mode != B_PRED) { + short *DQC = xd->dequant_y1; + + if (mode != SPLITMV) { + BLOCKD *b = &xd->block[24]; + + /* do 2nd order transform on the dc block */ + if (xd->eobs[24] > 1) { + vp8_dequantize_b(b, xd->dequant_y2); + + vp8_short_inv_walsh4x4(&b->dqcoeff[0], xd->qcoeff); + memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); + } else { + b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; + vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + + /* override the dc dequant constant in order to preserve the + * dc components */ - throw_residual = (!pbi->independent_partitions && - pbi->frame_corrupt_residual); - throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + DQC = xd->dequant_y1_dc; + } - if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) - { - /* MB with corrupt residuals or corrupt mode/motion vectors. - * Better to use the predictor as reconstruction. - */ - pbi->frame_corrupt_residual = 1; - memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); - vp8_conceal_corrupt_mb(xd); - - - corruption_detected = 1; - - /* force idct to be skipped for B_PRED and use the - * prediction only for reconstruction - * */ - memset(xd->eobs, 0, 25); - } - } -#endif - - /* do prediction */ - if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) - { - vp8_build_intra_predictors_mbuv_s(xd, - xd->recon_above[1], - xd->recon_above[2], - xd->recon_left[1], - xd->recon_left[2], - xd->recon_left_stride[1], - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride); - - if (mode != B_PRED) - { - vp8_build_intra_predictors_mby_s(xd, - xd->recon_above[0], - xd->recon_left[0], - xd->recon_left_stride[0], - xd->dst.y_buffer, - xd->dst.y_stride); - } - else - { - short *DQC = xd->dequant_y1; - int dst_stride = xd->dst.y_stride; - - /* clear out residual eob info */ - if(xd->mode_info_context->mbmi.mb_skip_coeff) - memset(xd->eobs, 0, 25); - - intra_prediction_down_copy(xd, xd->recon_above[0] + 16); - - for (i = 0; i < 16; i++) - { - BLOCKD *b = &xd->block[i]; - unsigned char *dst = xd->dst.y_buffer + b->offset; - B_PREDICTION_MODE b_mode = - xd->mode_info_context->bmi[i].as_mode; - unsigned char *Above; - unsigned char *yleft; - int left_stride; - unsigned char top_left; - - /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/ - if (i < 4 && pbi->common.filter_level) - Above = xd->recon_above[0] + b->offset; - else - Above = dst - dst_stride; - - if (i%4==0 && pbi->common.filter_level) - { - yleft = xd->recon_left[0] + i; - left_stride = 1; - } - else - { - yleft = dst - 1; - left_stride = dst_stride; - } - - if ((i==4 || i==8 || i==12) && pbi->common.filter_level) - top_left = *(xd->recon_left[0] + i - 1); - else - top_left = Above[-1]; - - vp8_intra4x4_predict(Above, yleft, left_stride, - b_mode, dst, dst_stride, top_left); - - if (xd->eobs[i] ) - { - if (xd->eobs[i] > 1) - { - vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); - } - else - { - vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], - dst, dst_stride, dst, dst_stride); - memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); - } - } - } - } - } - else - { - vp8_build_inter_predictors_mb(xd); + vp8_dequant_idct_add_y_block(xd->qcoeff, DQC, xd->dst.y_buffer, + xd->dst.y_stride, xd->eobs); } + vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs + 16); + } +} + +static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, + int start_mb_row) { + volatile const int *last_row_current_mb_col; + volatile int *current_mb_col; + int mb_row; + VP8_COMMON *pc = &pbi->common; + const int nsync = pbi->sync_range; + const int first_row_no_sync_above = pc->mb_cols + nsync; + int num_part = 1 << pbi->common.multi_token_partition; + int last_mb_row = start_mb_row; + + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME]; + + int recon_y_stride = yv12_fb_new->y_stride; + int recon_uv_stride = yv12_fb_new->uv_stride; + + unsigned char *ref_buffer[MAX_REF_FRAMES][3]; + unsigned char *dst_buffer[3]; + int i; + int ref_fb_corrupted[MAX_REF_FRAMES]; + + ref_fb_corrupted[INTRA_FRAME] = 0; + + for (i = 1; i < MAX_REF_FRAMES; ++i) { + YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; + + ref_buffer[i][0] = this_fb->y_buffer; + ref_buffer[i][1] = this_fb->u_buffer; + ref_buffer[i][2] = this_fb->v_buffer; + + ref_fb_corrupted[i] = this_fb->corrupted; + } + + dst_buffer[0] = yv12_fb_new->y_buffer; + dst_buffer[1] = yv12_fb_new->u_buffer; + dst_buffer[2] = yv12_fb_new->v_buffer; + + xd->up_available = (start_mb_row != 0); + + for (mb_row = start_mb_row; mb_row < pc->mb_rows; + mb_row += (pbi->decoding_thread_count + 1)) { + int recon_yoffset, recon_uvoffset; + int mb_col; + int filter_level; + loop_filter_info_n *lfi_n = &pc->lf_info; + + /* save last row processed by this thread */ + last_mb_row = mb_row; + /* select bool coder for current partition */ + xd->current_bc = &pbi->mbc[mb_row % num_part]; + + if (mb_row > 0) { + last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row - 1]; + } else { + last_row_current_mb_col = &first_row_no_sync_above; + } + + current_mb_col = &pbi->mt_current_mb_col[mb_row]; + + recon_yoffset = mb_row * recon_y_stride * 16; + recon_uvoffset = mb_row * recon_uv_stride * 8; + + /* reset contexts */ + xd->above_context = pc->above_context; + memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + + xd->left_available = 0; + + xd->mb_to_top_edge = -((mb_row * 16)) << 3; + xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; + + if (pbi->common.filter_level) { + xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0 * 16 + 32; + xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0 * 8 + 16; + xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0 * 8 + 16; + + xd->recon_left[0] = pbi->mt_yleft_col[mb_row]; + xd->recon_left[1] = pbi->mt_uleft_col[mb_row]; + xd->recon_left[2] = pbi->mt_vleft_col[mb_row]; + + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = 1; + xd->recon_left_stride[1] = 1; + } else { + xd->recon_above[0] = dst_buffer[0] + recon_yoffset; + xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; + xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; + + xd->recon_left[0] = xd->recon_above[0] - 1; + xd->recon_left[1] = xd->recon_above[1] - 1; + xd->recon_left[2] = xd->recon_above[2] - 1; + + xd->recon_above[0] -= xd->dst.y_stride; + xd->recon_above[1] -= xd->dst.uv_stride; + xd->recon_above[2] -= xd->dst.uv_stride; + + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = xd->dst.y_stride; + xd->recon_left_stride[1] = xd->dst.uv_stride; + + setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], + xd->recon_left[2], xd->dst.y_stride, + xd->dst.uv_stride); + } + + for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) { + *current_mb_col = mb_col - 1; + + if ((mb_col & (nsync - 1)) == 0) { + while (mb_col > (*last_row_current_mb_col - nsync)) { + x86_pause_hint(); + thread_sleep(0); + } + } + + /* Distance of MB to the various image edges. + * These are specified to 8th pel as they are always + * compared to values that are in 1/8th pel units. + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; #if CONFIG_ERROR_CONCEALMENT - if (corruption_detected) - { - return; - } + { + int corrupt_residual = + (!pbi->independent_partitions && pbi->frame_corrupt_residual) || + vp8dx_bool_error(xd->current_bc); + if (pbi->ec_active && + (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) && + corrupt_residual) { + /* We have an intra block with corrupt + * coefficients, better to conceal with an inter + * block. + * Interpolate MVs from neighboring MBs + * + * Note that for the first mb with corrupt + * residual in a frame, we might not discover + * that before decoding the residual. That + * happens after this check, and therefore no + * inter concealment will be done. + */ + vp8_interpolate_motion(xd, mb_row, mb_col, pc->mb_rows, pc->mb_cols); + } + } #endif - if(!xd->mode_info_context->mbmi.mb_skip_coeff) - { - /* dequantization and idct */ - if (mode != B_PRED) - { - short *DQC = xd->dequant_y1; + xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; + xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; + xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; - if (mode != SPLITMV) - { - BLOCKD *b = &xd->block[24]; + xd->pre.y_buffer = + ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset; + xd->pre.u_buffer = + ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset; + xd->pre.v_buffer = + ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset; - /* do 2nd order transform on the dc block */ - if (xd->eobs[24] > 1) - { - vp8_dequantize_b(b, xd->dequant_y2); + /* propagate errors from reference frames */ + xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; - vp8_short_inv_walsh4x4(&b->dqcoeff[0], - xd->qcoeff); - memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); - } - else - { - b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; - vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], - xd->qcoeff); - memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); - } + mt_decode_macroblock(pbi, xd, 0); - /* override the dc dequant constant in order to preserve the - * dc components - */ - DQC = xd->dequant_y1_dc; - } + xd->left_available = 1; - vp8_dequant_idct_add_y_block - (xd->qcoeff, DQC, - xd->dst.y_buffer, - xd->dst.y_stride, xd->eobs); + /* check if the boolean decoder has suffered an error */ + xd->corrupted |= vp8dx_bool_error(xd->current_bc); + + xd->recon_above[0] += 16; + xd->recon_above[1] += 8; + xd->recon_above[2] += 8; + + if (!pbi->common.filter_level) { + xd->recon_left[0] += 16; + xd->recon_left[1] += 8; + xd->recon_left[2] += 8; + } + + if (pbi->common.filter_level) { + int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV && + xd->mode_info_context->mbmi.mb_skip_coeff); + + const int mode_index = + lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode]; + const int seg = xd->mode_info_context->mbmi.segment_id; + const int ref_frame = xd->mode_info_context->mbmi.ref_frame; + + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + + if (mb_row != pc->mb_rows - 1) { + /* Save decoded MB last row data for next-row decoding */ + memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col * 16), + (xd->dst.y_buffer + 15 * recon_y_stride), 16); + memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col * 8), + (xd->dst.u_buffer + 7 * recon_uv_stride), 8); + memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col * 8), + (xd->dst.v_buffer + 7 * recon_uv_stride), 8); } - vp8_dequant_idct_add_uv_block - (xd->qcoeff+16*16, xd->dequant_uv, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride, xd->eobs+16); + /* save left_col for next MB decoding */ + if (mb_col != pc->mb_cols - 1) { + MODE_INFO *next = xd->mode_info_context + 1; + + if (next->mbmi.ref_frame == INTRA_FRAME) { + for (i = 0; i < 16; ++i) { + pbi->mt_yleft_col[mb_row][i] = + xd->dst.y_buffer[i * recon_y_stride + 15]; + } + for (i = 0; i < 8; ++i) { + pbi->mt_uleft_col[mb_row][i] = + xd->dst.u_buffer[i * recon_uv_stride + 7]; + pbi->mt_vleft_col[mb_row][i] = + xd->dst.v_buffer[i * recon_uv_stride + 7]; + } + } + } + + /* loopfilter on this macroblock. */ + if (filter_level) { + if (pc->filter_type == NORMAL_LOOPFILTER) { + loop_filter_info lfi; + FRAME_TYPE frame_type = pc->frame_type; + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; + + if (mb_col > 0) + vp8_loop_filter_mbv(xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, recon_y_stride, + recon_uv_stride, &lfi); + + if (!skip_lf) + vp8_loop_filter_bv(xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, recon_y_stride, + recon_uv_stride, &lfi); + + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_mbh(xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, recon_y_stride, + recon_uv_stride, &lfi); + + if (!skip_lf) + vp8_loop_filter_bh(xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, recon_y_stride, + recon_uv_stride, &lfi); + } else { + if (mb_col > 0) + vp8_loop_filter_simple_mbv(xd->dst.y_buffer, recon_y_stride, + lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bv(xd->dst.y_buffer, recon_y_stride, + lfi_n->blim[filter_level]); + + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_simple_mbh(xd->dst.y_buffer, recon_y_stride, + lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bh(xd->dst.y_buffer, recon_y_stride, + lfi_n->blim[filter_level]); + } + } + } + + recon_yoffset += 16; + recon_uvoffset += 8; + + ++xd->mode_info_context; /* next mb */ + + xd->above_context++; } + + /* adjust to the next row of mbs */ + if (pbi->common.filter_level) { + if (mb_row != pc->mb_rows - 1) { + int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS; + int lastuv = (yv12_fb_lst->y_width >> 1) + (VP8BORDERINPIXELS >> 1); + + for (i = 0; i < 4; ++i) { + pbi->mt_yabove_row[mb_row + 1][lasty + i] = + pbi->mt_yabove_row[mb_row + 1][lasty - 1]; + pbi->mt_uabove_row[mb_row + 1][lastuv + i] = + pbi->mt_uabove_row[mb_row + 1][lastuv - 1]; + pbi->mt_vabove_row[mb_row + 1][lastuv + i] = + pbi->mt_vabove_row[mb_row + 1][lastuv - 1]; + } + } + } else { + vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, + xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + } + + /* last MB of row is ready just after extension is done */ + *current_mb_col = mb_col + nsync; + + ++xd->mode_info_context; /* skip prediction column */ + xd->up_available = 1; + + /* since we have multithread */ + xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count; + } + + /* signal end of frame decoding if this thread processed the last mb_row */ + if (last_mb_row == (pc->mb_rows - 1)) sem_post(&pbi->h_event_end_decoding); } -static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row) -{ - const int *last_row_current_mb_col; - int *current_mb_col; - int mb_row; - VP8_COMMON *pc = &pbi->common; - const int nsync = pbi->sync_range; - const int first_row_no_sync_above = pc->mb_cols + nsync; - int num_part = 1 << pbi->common.multi_token_partition; - int last_mb_row = start_mb_row; +static THREAD_FUNCTION thread_decoding_proc(void *p_data) { + int ithread = ((DECODETHREAD_DATA *)p_data)->ithread; + VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1); + MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2); + ENTROPY_CONTEXT_PLANES mb_row_left_context; - YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; - YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME]; + while (1) { + if (pbi->b_multithreaded_rd == 0) break; - int recon_y_stride = yv12_fb_new->y_stride; - int recon_uv_stride = yv12_fb_new->uv_stride; + if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) { + if (pbi->b_multithreaded_rd == 0) { + break; + } else { + MACROBLOCKD *xd = &mbrd->mbd; + xd->left_context = &mb_row_left_context; - unsigned char *ref_buffer[MAX_REF_FRAMES][3]; - unsigned char *dst_buffer[3]; - int i; - int ref_fb_corrupted[MAX_REF_FRAMES]; - - ref_fb_corrupted[INTRA_FRAME] = 0; - - for(i = 1; i < MAX_REF_FRAMES; i++) - { - YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; - - ref_buffer[i][0] = this_fb->y_buffer; - ref_buffer[i][1] = this_fb->u_buffer; - ref_buffer[i][2] = this_fb->v_buffer; - - ref_fb_corrupted[i] = this_fb->corrupted; + mt_decode_mb_rows(pbi, xd, ithread + 1); + } } + } - dst_buffer[0] = yv12_fb_new->y_buffer; - dst_buffer[1] = yv12_fb_new->u_buffer; - dst_buffer[2] = yv12_fb_new->v_buffer; - - xd->up_available = (start_mb_row != 0); - - xd->mode_info_context = pc->mi + pc->mode_info_stride * start_mb_row; - xd->mode_info_stride = pc->mode_info_stride; - - for (mb_row = start_mb_row; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count + 1)) - { - int recon_yoffset, recon_uvoffset; - int mb_col; - int filter_level; - loop_filter_info_n *lfi_n = &pc->lf_info; - - /* save last row processed by this thread */ - last_mb_row = mb_row; - /* select bool coder for current partition */ - xd->current_bc = &pbi->mbc[mb_row%num_part]; - - if (mb_row > 0) - last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row -1]; - else - last_row_current_mb_col = &first_row_no_sync_above; - - current_mb_col = &pbi->mt_current_mb_col[mb_row]; - - recon_yoffset = mb_row * recon_y_stride * 16; - recon_uvoffset = mb_row * recon_uv_stride * 8; - - /* reset contexts */ - xd->above_context = pc->above_context; - memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - - xd->left_available = 0; - - xd->mb_to_top_edge = -((mb_row * 16)) << 3; - xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; - - if (pbi->common.filter_level) - { - xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0*16 +32; - xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0*8 +16; - xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0*8 +16; - - xd->recon_left[0] = pbi->mt_yleft_col[mb_row]; - xd->recon_left[1] = pbi->mt_uleft_col[mb_row]; - xd->recon_left[2] = pbi->mt_vleft_col[mb_row]; - - /* TODO: move to outside row loop */ - xd->recon_left_stride[0] = 1; - xd->recon_left_stride[1] = 1; - } - else - { - xd->recon_above[0] = dst_buffer[0] + recon_yoffset; - xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; - xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; - - xd->recon_left[0] = xd->recon_above[0] - 1; - xd->recon_left[1] = xd->recon_above[1] - 1; - xd->recon_left[2] = xd->recon_above[2] - 1; - - xd->recon_above[0] -= xd->dst.y_stride; - xd->recon_above[1] -= xd->dst.uv_stride; - xd->recon_above[2] -= xd->dst.uv_stride; - - /* TODO: move to outside row loop */ - xd->recon_left_stride[0] = xd->dst.y_stride; - xd->recon_left_stride[1] = xd->dst.uv_stride; - - setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], - xd->recon_left[2], xd->dst.y_stride, - xd->dst.uv_stride); - } - - for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) { - if (((mb_col - 1) % nsync) == 0) { - pthread_mutex_t *mutex = &pbi->pmutex[mb_row]; - protected_write(mutex, current_mb_col, mb_col - 1); - } - - if (mb_row && !(mb_col & (nsync - 1))) { - pthread_mutex_t *mutex = &pbi->pmutex[mb_row-1]; - sync_read(mutex, mb_col, last_row_current_mb_col, nsync); - } - - /* Distance of MB to the various image edges. - * These are specified to 8th pel as they are always - * compared to values that are in 1/8th pel units. - */ - xd->mb_to_left_edge = -((mb_col * 16) << 3); - xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; - - #if CONFIG_ERROR_CONCEALMENT - { - int corrupt_residual = - (!pbi->independent_partitions && - pbi->frame_corrupt_residual) || - vp8dx_bool_error(xd->current_bc); - if (pbi->ec_active && - (xd->mode_info_context->mbmi.ref_frame == - INTRA_FRAME) && - corrupt_residual) - { - /* We have an intra block with corrupt - * coefficients, better to conceal with an inter - * block. - * Interpolate MVs from neighboring MBs - * - * Note that for the first mb with corrupt - * residual in a frame, we might not discover - * that before decoding the residual. That - * happens after this check, and therefore no - * inter concealment will be done. - */ - vp8_interpolate_motion(xd, - mb_row, mb_col, - pc->mb_rows, pc->mb_cols, - pc->mode_info_stride); - } - } - #endif - - - xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; - xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; - xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; - - xd->pre.y_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset; - xd->pre.u_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset; - xd->pre.v_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset; - - /* propagate errors from reference frames */ - xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; - - mt_decode_macroblock(pbi, xd, 0); - - xd->left_available = 1; - - /* check if the boolean decoder has suffered an error */ - xd->corrupted |= vp8dx_bool_error(xd->current_bc); - - xd->recon_above[0] += 16; - xd->recon_above[1] += 8; - xd->recon_above[2] += 8; - - if (!pbi->common.filter_level) - { - xd->recon_left[0] += 16; - xd->recon_left[1] += 8; - xd->recon_left[2] += 8; - } - - if (pbi->common.filter_level) - { - int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED && - xd->mode_info_context->mbmi.mode != SPLITMV && - xd->mode_info_context->mbmi.mb_skip_coeff); - - const int mode_index = lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode]; - const int seg = xd->mode_info_context->mbmi.segment_id; - const int ref_frame = xd->mode_info_context->mbmi.ref_frame; - - filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; - - if( mb_row != pc->mb_rows-1 ) - { - /* Save decoded MB last row data for next-row decoding */ - memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); - memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); - memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); - } - - /* save left_col for next MB decoding */ - if(mb_col != pc->mb_cols-1) - { - MODE_INFO *next = xd->mode_info_context +1; - - if (next->mbmi.ref_frame == INTRA_FRAME) - { - for (i = 0; i < 16; i++) - pbi->mt_yleft_col[mb_row][i] = xd->dst.y_buffer [i* recon_y_stride + 15]; - for (i = 0; i < 8; i++) - { - pbi->mt_uleft_col[mb_row][i] = xd->dst.u_buffer [i* recon_uv_stride + 7]; - pbi->mt_vleft_col[mb_row][i] = xd->dst.v_buffer [i* recon_uv_stride + 7]; - } - } - } - - /* loopfilter on this macroblock. */ - if (filter_level) - { - if(pc->filter_type == NORMAL_LOOPFILTER) - { - loop_filter_info lfi; - FRAME_TYPE frame_type = pc->frame_type; - const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; - - if (mb_col > 0) - vp8_loop_filter_mbv - (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); - - if (!skip_lf) - vp8_loop_filter_bv - (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); - - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_mbh - (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); - - if (!skip_lf) - vp8_loop_filter_bh - (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); - } - else - { - if (mb_col > 0) - vp8_loop_filter_simple_mbv - (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]); - - if (!skip_lf) - vp8_loop_filter_simple_bv - (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]); - - /* don't apply across umv border */ - if (mb_row > 0) - vp8_loop_filter_simple_mbh - (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]); - - if (!skip_lf) - vp8_loop_filter_simple_bh - (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]); - } - } - - } - - recon_yoffset += 16; - recon_uvoffset += 8; - - ++xd->mode_info_context; /* next mb */ - - xd->above_context++; - } - - /* adjust to the next row of mbs */ - if (pbi->common.filter_level) - { - if(mb_row != pc->mb_rows-1) - { - int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS; - int lastuv = (yv12_fb_lst->y_width>>1) + (VP8BORDERINPIXELS>>1); - - for (i = 0; i < 4; i++) - { - pbi->mt_yabove_row[mb_row +1][lasty + i] = pbi->mt_yabove_row[mb_row +1][lasty -1]; - pbi->mt_uabove_row[mb_row +1][lastuv + i] = pbi->mt_uabove_row[mb_row +1][lastuv -1]; - pbi->mt_vabove_row[mb_row +1][lastuv + i] = pbi->mt_vabove_row[mb_row +1][lastuv -1]; - } - } - } - else - vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, - xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); - - /* last MB of row is ready just after extension is done */ - protected_write(&pbi->pmutex[mb_row], current_mb_col, mb_col + nsync); - - ++xd->mode_info_context; /* skip prediction column */ - xd->up_available = 1; - - /* since we have multithread */ - xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count; - } - - /* signal end of frame decoding if this thread processed the last mb_row */ - if (last_mb_row == (pc->mb_rows - 1)) - sem_post(&pbi->h_event_end_decoding); - + return 0; } +void vp8_decoder_create_threads(VP8D_COMP *pbi) { + int core_count = 0; + unsigned int ithread; -static THREAD_FUNCTION thread_decoding_proc(void *p_data) -{ - int ithread = ((DECODETHREAD_DATA *)p_data)->ithread; - VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1); - MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2); - ENTROPY_CONTEXT_PLANES mb_row_left_context; + pbi->b_multithreaded_rd = 0; + pbi->allocated_decoding_thread_count = 0; - while (1) - { - if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd) == 0) - break; + /* limit decoding threads to the max number of token partitions */ + core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; - if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) - { - if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd) == 0) - break; - else - { - MACROBLOCKD *xd = &mbrd->mbd; - xd->left_context = &mb_row_left_context; + /* limit decoding threads to the available cores */ + if (core_count > pbi->common.processor_core_count) { + core_count = pbi->common.processor_core_count; + } - mt_decode_mb_rows(pbi, xd, ithread+1); - } - } + if (core_count > 1) { + pbi->b_multithreaded_rd = 1; + pbi->decoding_thread_count = core_count - 1; + + CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count); + CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count); + CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32); + CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count); + + if (sem_init(&pbi->h_event_end_decoding, 0, 0)) { + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to initialize semaphore"); } - return 0 ; -} + for (ithread = 0; ithread < pbi->decoding_thread_count; ++ithread) { + if (sem_init(&pbi->h_event_start_decoding[ithread], 0, 0)) break; + vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd); -void vp8_decoder_create_threads(VP8D_COMP *pbi) -{ - int core_count = 0; - unsigned int ithread; + pbi->de_thread_data[ithread].ithread = ithread; + pbi->de_thread_data[ithread].ptr1 = (void *)pbi; + pbi->de_thread_data[ithread].ptr2 = (void *)&pbi->mb_row_di[ithread]; - pbi->b_multithreaded_rd = 0; - pbi->allocated_decoding_thread_count = 0; - pthread_mutex_init(&pbi->mt_mutex, NULL); - - /* limit decoding threads to the max number of token partitions */ - core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; - - /* limit decoding threads to the available cores */ - if (core_count > pbi->common.processor_core_count) - core_count = pbi->common.processor_core_count; - - if (core_count > 1) - { - pbi->b_multithreaded_rd = 1; - pbi->decoding_thread_count = core_count - 1; - - CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count); - CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count); - CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32); - CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count); - - for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) - { - sem_init(&pbi->h_event_start_decoding[ithread], 0, 0); - - vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd); - - pbi->de_thread_data[ithread].ithread = ithread; - pbi->de_thread_data[ithread].ptr1 = (void *)pbi; - pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ithread]; - - pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_proc, (&pbi->de_thread_data[ithread])); - } - - sem_init(&pbi->h_event_end_decoding, 0, 0); - - pbi->allocated_decoding_thread_count = pbi->decoding_thread_count; + if (pthread_create(&pbi->h_decoding_thread[ithread], 0, + thread_decoding_proc, &pbi->de_thread_data[ithread])) { + sem_destroy(&pbi->h_event_start_decoding[ithread]); + break; + } } -} - - -void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) -{ - int i; - - if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd)) - { - /* De-allocate mutex */ - if (pbi->pmutex != NULL) { - for (i = 0; i < mb_rows; i++) { - pthread_mutex_destroy(&pbi->pmutex[i]); - } - vpx_free(pbi->pmutex); - pbi->pmutex = NULL; - } - - vpx_free(pbi->mt_current_mb_col); - pbi->mt_current_mb_col = NULL ; - - /* Free above_row buffers. */ - if (pbi->mt_yabove_row) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_yabove_row[i]); - pbi->mt_yabove_row[i] = NULL ; - } - vpx_free(pbi->mt_yabove_row); - pbi->mt_yabove_row = NULL ; - } - - if (pbi->mt_uabove_row) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_uabove_row[i]); - pbi->mt_uabove_row[i] = NULL ; - } - vpx_free(pbi->mt_uabove_row); - pbi->mt_uabove_row = NULL ; - } - - if (pbi->mt_vabove_row) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_vabove_row[i]); - pbi->mt_vabove_row[i] = NULL ; - } - vpx_free(pbi->mt_vabove_row); - pbi->mt_vabove_row = NULL ; - } - - /* Free left_col buffers. */ - if (pbi->mt_yleft_col) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_yleft_col[i]); - pbi->mt_yleft_col[i] = NULL ; - } - vpx_free(pbi->mt_yleft_col); - pbi->mt_yleft_col = NULL ; - } - - if (pbi->mt_uleft_col) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_uleft_col[i]); - pbi->mt_uleft_col[i] = NULL ; - } - vpx_free(pbi->mt_uleft_col); - pbi->mt_uleft_col = NULL ; - } - - if (pbi->mt_vleft_col) - { - for (i=0; i< mb_rows; i++) - { - vpx_free(pbi->mt_vleft_col[i]); - pbi->mt_vleft_col[i] = NULL ; - } - vpx_free(pbi->mt_vleft_col); - pbi->mt_vleft_col = NULL ; - } - } -} - - -void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) -{ - VP8_COMMON *const pc = & pbi->common; - int i; - int uv_width; - - if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd)) - { - vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows); - - /* our internal buffers are always multiples of 16 */ - if ((width & 0xf) != 0) - width += 16 - (width & 0xf); - - if (width < 640) pbi->sync_range = 1; - else if (width <= 1280) pbi->sync_range = 8; - else if (width <= 2560) pbi->sync_range =16; - else pbi->sync_range = 32; - - uv_width = width >>1; - - /* Allocate mutex */ - CHECK_MEM_ERROR(pbi->pmutex, vpx_malloc(sizeof(*pbi->pmutex) * - pc->mb_rows)); - if (pbi->pmutex) { - for (i = 0; i < pc->mb_rows; i++) { - pthread_mutex_init(&pbi->pmutex[i], NULL); - } - } - - /* Allocate an int for each mb row. */ - CALLOC_ARRAY(pbi->mt_current_mb_col, pc->mb_rows); - - /* Allocate memory for above_row buffers. */ - CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_yabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (width + (VP8BORDERINPIXELS<<1)))); - - CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_uabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); - - CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_vabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); - - /* Allocate memory for left_col buffers. */ - CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_yleft_col[i], vpx_calloc(sizeof(unsigned char) * 16, 1)); - - CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_uleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1)); - - CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows); - for (i = 0; i < pc->mb_rows; i++) - CHECK_MEM_ERROR(pbi->mt_vleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1)); - } -} - - -void vp8_decoder_remove_threads(VP8D_COMP *pbi) -{ - /* shutdown MB Decoding thread; */ - if (protected_read(&pbi->mt_mutex, &pbi->b_multithreaded_rd)) - { - int i; - - protected_write(&pbi->mt_mutex, &pbi->b_multithreaded_rd, 0); - - /* allow all threads to exit */ - for (i = 0; i < pbi->allocated_decoding_thread_count; i++) - { - sem_post(&pbi->h_event_start_decoding[i]); - pthread_join(pbi->h_decoding_thread[i], NULL); - } - - for (i = 0; i < pbi->allocated_decoding_thread_count; i++) - { - sem_destroy(&pbi->h_event_start_decoding[i]); - } + pbi->allocated_decoding_thread_count = ithread; + if (pbi->allocated_decoding_thread_count != + (int)pbi->decoding_thread_count) { + /* the remainder of cleanup cases will be handled in + * vp8_decoder_remove_threads(). */ + if (pbi->allocated_decoding_thread_count == 0) { sem_destroy(&pbi->h_event_end_decoding); - - vpx_free(pbi->h_decoding_thread); - pbi->h_decoding_thread = NULL; - - vpx_free(pbi->h_event_start_decoding); - pbi->h_event_start_decoding = NULL; - - vpx_free(pbi->mb_row_di); - pbi->mb_row_di = NULL ; - - vpx_free(pbi->de_thread_data); - pbi->de_thread_data = NULL; + } + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to create threads"); } - pthread_mutex_destroy(&pbi->mt_mutex); + } } -void vp8mt_decode_mb_rows( VP8D_COMP *pbi, MACROBLOCKD *xd) -{ - VP8_COMMON *pc = &pbi->common; - unsigned int i; - int j; +void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) { + int i; - int filter_level = pc->filter_level; - YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + vpx_free(pbi->mt_current_mb_col); + pbi->mt_current_mb_col = NULL; - if (filter_level) - { - /* Set above_row buffer to 127 for decoding first MB row */ - memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, yv12_fb_new->y_width + 5); - memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5); - memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5); - - for (j=1; jmb_rows; j++) - { - memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS-1, (unsigned char)129, 1); - memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1); - memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1); - } - - /* Set left_col to 129 initially */ - for (j=0; jmb_rows; j++) - { - memset(pbi->mt_yleft_col[j], (unsigned char)129, 16); - memset(pbi->mt_uleft_col[j], (unsigned char)129, 8); - memset(pbi->mt_vleft_col[j], (unsigned char)129, 8); - } - - /* Initialize the loop filter for this frame. */ - vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level); + /* Free above_row buffers. */ + if (pbi->mt_yabove_row) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_yabove_row[i]); + pbi->mt_yabove_row[i] = NULL; } - else - vp8_setup_intra_recon_top_line(yv12_fb_new); + vpx_free(pbi->mt_yabove_row); + pbi->mt_yabove_row = NULL; + } - setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_count); + if (pbi->mt_uabove_row) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_uabove_row[i]); + pbi->mt_uabove_row[i] = NULL; + } + vpx_free(pbi->mt_uabove_row); + pbi->mt_uabove_row = NULL; + } - for (i = 0; i < pbi->decoding_thread_count; i++) - sem_post(&pbi->h_event_start_decoding[i]); + if (pbi->mt_vabove_row) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_vabove_row[i]); + pbi->mt_vabove_row[i] = NULL; + } + vpx_free(pbi->mt_vabove_row); + pbi->mt_vabove_row = NULL; + } - mt_decode_mb_rows(pbi, xd, 0); + /* Free left_col buffers. */ + if (pbi->mt_yleft_col) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_yleft_col[i]); + pbi->mt_yleft_col[i] = NULL; + } + vpx_free(pbi->mt_yleft_col); + pbi->mt_yleft_col = NULL; + } - sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ + if (pbi->mt_uleft_col) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_uleft_col[i]); + pbi->mt_uleft_col[i] = NULL; + } + vpx_free(pbi->mt_uleft_col); + pbi->mt_uleft_col = NULL; + } + + if (pbi->mt_vleft_col) { + for (i = 0; i < mb_rows; ++i) { + vpx_free(pbi->mt_vleft_col[i]); + pbi->mt_vleft_col[i] = NULL; + } + vpx_free(pbi->mt_vleft_col); + pbi->mt_vleft_col = NULL; + } +} + +void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) { + VP8_COMMON *const pc = &pbi->common; + int i; + int uv_width; + + if (pbi->b_multithreaded_rd) { + vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows); + + /* our internal buffers are always multiples of 16 */ + if ((width & 0xf) != 0) width += 16 - (width & 0xf); + + if (width < 640) { + pbi->sync_range = 1; + } else if (width <= 1280) { + pbi->sync_range = 8; + } else if (width <= 2560) { + pbi->sync_range = 16; + } else { + pbi->sync_range = 32; + } + + uv_width = width >> 1; + + /* Allocate an int for each mb row. */ + CALLOC_ARRAY(pbi->mt_current_mb_col, pc->mb_rows); + + /* Allocate memory for above_row buffers. */ + CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_yabove_row[i], + vpx_memalign(16, sizeof(unsigned char) * + (width + (VP8BORDERINPIXELS << 1)))); + + CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_uabove_row[i], + vpx_memalign(16, sizeof(unsigned char) * + (uv_width + VP8BORDERINPIXELS))); + + CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_vabove_row[i], + vpx_memalign(16, sizeof(unsigned char) * + (uv_width + VP8BORDERINPIXELS))); + + /* Allocate memory for left_col buffers. */ + CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_yleft_col[i], + vpx_calloc(sizeof(unsigned char) * 16, 1)); + + CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_uleft_col[i], + vpx_calloc(sizeof(unsigned char) * 8, 1)); + + CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; ++i) + CHECK_MEM_ERROR(pbi->mt_vleft_col[i], + vpx_calloc(sizeof(unsigned char) * 8, 1)); + } +} + +void vp8_decoder_remove_threads(VP8D_COMP *pbi) { + /* shutdown MB Decoding thread; */ + if (pbi->b_multithreaded_rd) { + int i; + pbi->b_multithreaded_rd = 0; + + /* allow all threads to exit */ + for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) { + sem_post(&pbi->h_event_start_decoding[i]); + pthread_join(pbi->h_decoding_thread[i], NULL); + } + + for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) { + sem_destroy(&pbi->h_event_start_decoding[i]); + } + + if (pbi->allocated_decoding_thread_count) { + sem_destroy(&pbi->h_event_end_decoding); + } + + vpx_free(pbi->h_decoding_thread); + pbi->h_decoding_thread = NULL; + + vpx_free(pbi->h_event_start_decoding); + pbi->h_event_start_decoding = NULL; + + vpx_free(pbi->mb_row_di); + pbi->mb_row_di = NULL; + + vpx_free(pbi->de_thread_data); + pbi->de_thread_data = NULL; + + vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows); + } +} + +void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd) { + VP8_COMMON *pc = &pbi->common; + unsigned int i; + int j; + + int filter_level = pc->filter_level; + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + + if (filter_level) { + /* Set above_row buffer to 127 for decoding first MB row */ + memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS - 1, 127, + yv12_fb_new->y_width + 5); + memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127, + (yv12_fb_new->y_width >> 1) + 5); + memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS >> 1) - 1, 127, + (yv12_fb_new->y_width >> 1) + 5); + + for (j = 1; j < pc->mb_rows; ++j) { + memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS - 1, (unsigned char)129, + 1); + memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1, + (unsigned char)129, 1); + memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS >> 1) - 1, + (unsigned char)129, 1); + } + + /* Set left_col to 129 initially */ + for (j = 0; j < pc->mb_rows; ++j) { + memset(pbi->mt_yleft_col[j], (unsigned char)129, 16); + memset(pbi->mt_uleft_col[j], (unsigned char)129, 8); + memset(pbi->mt_vleft_col[j], (unsigned char)129, 8); + } + + /* Initialize the loop filter for this frame. */ + vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level); + } else { + vp8_setup_intra_recon_top_line(yv12_fb_new); + } + + setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, + pbi->decoding_thread_count); + + for (i = 0; i < pbi->decoding_thread_count; ++i) { + sem_post(&pbi->h_event_start_decoding[i]); + } + + mt_decode_mb_rows(pbi, xd, 0); + + sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ } diff --git a/libs/libvpx/vp8/decoder/treereader.h b/libs/libvpx/vp8/decoder/treereader.h index f7d23c3698..dd0f0986e9 100644 --- a/libs/libvpx/vp8/decoder/treereader.h +++ b/libs/libvpx/vp8/decoder/treereader.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_DECODER_TREEREADER_H_ #define VP8_DECODER_TREEREADER_H_ @@ -26,20 +25,17 @@ typedef BOOL_DECODER vp8_reader; #define vp8_read_literal vp8_decode_value #define vp8_read_bit(R) vp8_read(R, vp8_prob_half) - /* Intent of tree data structure is to make decoding trivial. */ static INLINE int vp8_treed_read( - vp8_reader *const r, /* !!! must return a 0 or 1 !!! */ - vp8_tree t, - const vp8_prob *const p -) -{ - register vp8_tree_index i = 0; + vp8_reader *const r, /* !!! must return a 0 or 1 !!! */ + vp8_tree t, const vp8_prob *const p) { + register vp8_tree_index i = 0; - while ((i = t[ i + vp8_read(r, p[i>>1])]) > 0) ; + while ((i = t[i + vp8_read(r, p[i >> 1])]) > 0) { + } - return -i; + return -i; } #ifdef __cplusplus diff --git a/libs/libvpx/vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm b/libs/libvpx/vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm deleted file mode 100644 index 8034c1db9a..0000000000 --- a/libs/libvpx/vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm +++ /dev/null @@ -1,262 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - EXPORT |vp8_short_fdct4x4_armv6| - - ARM - REQUIRE8 - PRESERVE8 - - AREA |.text|, CODE, READONLY -; void vp8_short_fdct4x4_c(short *input, short *output, int pitch) -|vp8_short_fdct4x4_armv6| PROC - - stmfd sp!, {r4 - r12, lr} - - ; PART 1 - - ; coeffs 0-3 - ldrd r4, r5, [r0] ; [i1 | i0] [i3 | i2] - - ldr r10, c7500 - ldr r11, c14500 - ldr r12, c0x22a453a0 ; [2217*4 | 5352*4] - ldr lr, c0x00080008 - ror r5, r5, #16 ; [i2 | i3] - - qadd16 r6, r4, r5 ; [i1+i2 | i0+i3] = [b1 | a1] without shift - qsub16 r7, r4, r5 ; [i1-i2 | i0-i3] = [c1 | d1] without shift - - add r0, r0, r2 ; update input pointer - - qadd16 r7, r7, r7 ; 2*[c1|d1] --> we can use smlad and smlsd - ; with 2217*4 and 5352*4 without losing the - ; sign bit (overflow) - - smuad r4, r6, lr ; o0 = (i1+i2)*8 + (i0+i3)*8 - smusd r5, r6, lr ; o2 = (i1+i2)*8 - (i0+i3)*8 - - smlad r6, r7, r12, r11 ; o1 = (c1 * 2217 + d1 * 5352 + 14500) - smlsdx r7, r7, r12, r10 ; o3 = (d1 * 2217 - c1 * 5352 + 7500) - - ldrd r8, r9, [r0] ; [i5 | i4] [i7 | i6] - - pkhbt r3, r4, r6, lsl #4 ; [o1 | o0], keep in register for PART 2 - pkhbt r6, r5, r7, lsl #4 ; [o3 | o2] - - str r6, [r1, #4] - - ; coeffs 4-7 - ror r9, r9, #16 ; [i6 | i7] - - qadd16 r6, r8, r9 ; [i5+i6 | i4+i7] = [b1 | a1] without shift - qsub16 r7, r8, r9 ; [i5-i6 | i4-i7] = [c1 | d1] without shift - - add r0, r0, r2 ; update input pointer - - qadd16 r7, r7, r7 ; 2x[c1|d1] --> we can use smlad and smlsd - ; with 2217*4 and 5352*4 without losing the - ; sign bit (overflow) - - smuad r9, r6, lr ; o4 = (i5+i6)*8 + (i4+i7)*8 - smusd r8, r6, lr ; o6 = (i5+i6)*8 - (i4+i7)*8 - - smlad r6, r7, r12, r11 ; o5 = (c1 * 2217 + d1 * 5352 + 14500) - smlsdx r7, r7, r12, r10 ; o7 = (d1 * 2217 - c1 * 5352 + 7500) - - ldrd r4, r5, [r0] ; [i9 | i8] [i11 | i10] - - pkhbt r9, r9, r6, lsl #4 ; [o5 | o4], keep in register for PART 2 - pkhbt r6, r8, r7, lsl #4 ; [o7 | o6] - - str r6, [r1, #12] - - ; coeffs 8-11 - ror r5, r5, #16 ; [i10 | i11] - - qadd16 r6, r4, r5 ; [i9+i10 | i8+i11]=[b1 | a1] without shift - qsub16 r7, r4, r5 ; [i9-i10 | i8-i11]=[c1 | d1] without shift - - add r0, r0, r2 ; update input pointer - - qadd16 r7, r7, r7 ; 2x[c1|d1] --> we can use smlad and smlsd - ; with 2217*4 and 5352*4 without losing the - ; sign bit (overflow) - - smuad r2, r6, lr ; o8 = (i9+i10)*8 + (i8+i11)*8 - smusd r8, r6, lr ; o10 = (i9+i10)*8 - (i8+i11)*8 - - smlad r6, r7, r12, r11 ; o9 = (c1 * 2217 + d1 * 5352 + 14500) - smlsdx r7, r7, r12, r10 ; o11 = (d1 * 2217 - c1 * 5352 + 7500) - - ldrd r4, r5, [r0] ; [i13 | i12] [i15 | i14] - - pkhbt r2, r2, r6, lsl #4 ; [o9 | o8], keep in register for PART 2 - pkhbt r6, r8, r7, lsl #4 ; [o11 | o10] - - str r6, [r1, #20] - - ; coeffs 12-15 - ror r5, r5, #16 ; [i14 | i15] - - qadd16 r6, r4, r5 ; [i13+i14 | i12+i15]=[b1|a1] without shift - qsub16 r7, r4, r5 ; [i13-i14 | i12-i15]=[c1|d1] without shift - - qadd16 r7, r7, r7 ; 2x[c1|d1] --> we can use smlad and smlsd - ; with 2217*4 and 5352*4 without losing the - ; sign bit (overflow) - - smuad r4, r6, lr ; o12 = (i13+i14)*8 + (i12+i15)*8 - smusd r5, r6, lr ; o14 = (i13+i14)*8 - (i12+i15)*8 - - smlad r6, r7, r12, r11 ; o13 = (c1 * 2217 + d1 * 5352 + 14500) - smlsdx r7, r7, r12, r10 ; o15 = (d1 * 2217 - c1 * 5352 + 7500) - - pkhbt r0, r4, r6, lsl #4 ; [o13 | o12], keep in register for PART 2 - pkhbt r6, r5, r7, lsl #4 ; [o15 | o14] - - str r6, [r1, #28] - - - ; PART 2 ------------------------------------------------- - ldr r11, c12000 - ldr r10, c51000 - ldr lr, c0x00070007 - - qadd16 r4, r3, r0 ; a1 = [i1+i13 | i0+i12] - qadd16 r5, r9, r2 ; b1 = [i5+i9 | i4+i8] - qsub16 r6, r9, r2 ; c1 = [i5-i9 | i4-i8] - qsub16 r7, r3, r0 ; d1 = [i1-i13 | i0-i12] - - qadd16 r4, r4, lr ; a1 + 7 - - add r0, r11, #0x10000 ; add (d!=0) - - qadd16 r2, r4, r5 ; a1 + b1 + 7 - qsub16 r3, r4, r5 ; a1 - b1 + 7 - - ldr r12, c0x08a914e8 ; [2217 | 5352] - - lsl r8, r2, #16 ; prepare bottom halfword for scaling - asr r2, r2, #4 ; scale top halfword - lsl r9, r3, #16 ; prepare bottom halfword for scaling - asr r3, r3, #4 ; scale top halfword - pkhtb r4, r2, r8, asr #20 ; pack and scale bottom halfword - pkhtb r5, r3, r9, asr #20 ; pack and scale bottom halfword - - smulbt r2, r6, r12 ; [ ------ | c1*2217] - str r4, [r1, #0] ; [ o1 | o0] - smultt r3, r6, r12 ; [c1*2217 | ------ ] - str r5, [r1, #16] ; [ o9 | o8] - - smlabb r8, r7, r12, r2 ; [ ------ | d1*5352] - smlatb r9, r7, r12, r3 ; [d1*5352 | ------ ] - - smulbb r2, r6, r12 ; [ ------ | c1*5352] - smultb r3, r6, r12 ; [c1*5352 | ------ ] - - lsls r6, r7, #16 ; d1 != 0 ? - addeq r8, r8, r11 ; c1_b*2217+d1_b*5352+12000 + (d==0) - addne r8, r8, r0 ; c1_b*2217+d1_b*5352+12000 + (d!=0) - asrs r6, r7, #16 - addeq r9, r9, r11 ; c1_t*2217+d1_t*5352+12000 + (d==0) - addne r9, r9, r0 ; c1_t*2217+d1_t*5352+12000 + (d!=0) - - smlabt r4, r7, r12, r10 ; [ ------ | d1*2217] + 51000 - smlatt r5, r7, r12, r10 ; [d1*2217 | ------ ] + 51000 - - pkhtb r9, r9, r8, asr #16 - - sub r4, r4, r2 - sub r5, r5, r3 - - ldr r3, [r1, #4] ; [i3 | i2] - - pkhtb r5, r5, r4, asr #16 ; [o13|o12] - - str r9, [r1, #8] ; [o5 | 04] - - ldr r9, [r1, #12] ; [i7 | i6] - ldr r8, [r1, #28] ; [i15|i14] - ldr r2, [r1, #20] ; [i11|i10] - str r5, [r1, #24] ; [o13|o12] - - qadd16 r4, r3, r8 ; a1 = [i3+i15 | i2+i14] - qadd16 r5, r9, r2 ; b1 = [i7+i11 | i6+i10] - - qadd16 r4, r4, lr ; a1 + 7 - - qsub16 r6, r9, r2 ; c1 = [i7-i11 | i6-i10] - qadd16 r2, r4, r5 ; a1 + b1 + 7 - qsub16 r7, r3, r8 ; d1 = [i3-i15 | i2-i14] - qsub16 r3, r4, r5 ; a1 - b1 + 7 - - lsl r8, r2, #16 ; prepare bottom halfword for scaling - asr r2, r2, #4 ; scale top halfword - lsl r9, r3, #16 ; prepare bottom halfword for scaling - asr r3, r3, #4 ; scale top halfword - pkhtb r4, r2, r8, asr #20 ; pack and scale bottom halfword - pkhtb r5, r3, r9, asr #20 ; pack and scale bottom halfword - - smulbt r2, r6, r12 ; [ ------ | c1*2217] - str r4, [r1, #4] ; [ o3 | o2] - smultt r3, r6, r12 ; [c1*2217 | ------ ] - str r5, [r1, #20] ; [ o11 | o10] - - smlabb r8, r7, r12, r2 ; [ ------ | d1*5352] - smlatb r9, r7, r12, r3 ; [d1*5352 | ------ ] - - smulbb r2, r6, r12 ; [ ------ | c1*5352] - smultb r3, r6, r12 ; [c1*5352 | ------ ] - - lsls r6, r7, #16 ; d1 != 0 ? - addeq r8, r8, r11 ; c1_b*2217+d1_b*5352+12000 + (d==0) - addne r8, r8, r0 ; c1_b*2217+d1_b*5352+12000 + (d!=0) - - asrs r6, r7, #16 - addeq r9, r9, r11 ; c1_t*2217+d1_t*5352+12000 + (d==0) - addne r9, r9, r0 ; c1_t*2217+d1_t*5352+12000 + (d!=0) - - smlabt r4, r7, r12, r10 ; [ ------ | d1*2217] + 51000 - smlatt r5, r7, r12, r10 ; [d1*2217 | ------ ] + 51000 - - pkhtb r9, r9, r8, asr #16 - - sub r4, r4, r2 - sub r5, r5, r3 - - str r9, [r1, #12] ; [o7 | o6] - pkhtb r5, r5, r4, asr #16 ; [o15|o14] - - str r5, [r1, #28] ; [o15|o14] - - ldmfd sp!, {r4 - r12, pc} - - ENDP - -; Used constants -c7500 - DCD 7500 -c14500 - DCD 14500 -c0x22a453a0 - DCD 0x22a453a0 -c0x00080008 - DCD 0x00080008 -c12000 - DCD 12000 -c51000 - DCD 51000 -c0x00070007 - DCD 0x00070007 -c0x08a914e8 - DCD 0x08a914e8 - - END diff --git a/libs/libvpx/vp8/encoder/arm/armv6/walsh_v6.asm b/libs/libvpx/vp8/encoder/arm/armv6/walsh_v6.asm deleted file mode 100644 index 5eaf3f25a9..0000000000 --- a/libs/libvpx/vp8/encoder/arm/armv6/walsh_v6.asm +++ /dev/null @@ -1,212 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - EXPORT |vp8_short_walsh4x4_armv6| - - ARM - REQUIRE8 - PRESERVE8 - - AREA |.text|, CODE, READONLY ; name this block of code - -;short vp8_short_walsh4x4_armv6(short *input, short *output, int pitch) -; r0 short *input, -; r1 short *output, -; r2 int pitch -|vp8_short_walsh4x4_armv6| PROC - - stmdb sp!, {r4 - r11, lr} - - ldrd r4, r5, [r0], r2 - ldr lr, c00040004 - ldrd r6, r7, [r0], r2 - - ; 0-3 - qadd16 r3, r4, r5 ; [d1|a1] [1+3 | 0+2] - qsub16 r4, r4, r5 ; [c1|b1] [1-3 | 0-2] - - ldrd r8, r9, [r0], r2 - ; 4-7 - qadd16 r5, r6, r7 ; [d1|a1] [5+7 | 4+6] - qsub16 r6, r6, r7 ; [c1|b1] [5-7 | 4-6] - - ldrd r10, r11, [r0] - ; 8-11 - qadd16 r7, r8, r9 ; [d1|a1] [9+11 | 8+10] - qsub16 r8, r8, r9 ; [c1|b1] [9-11 | 8-10] - - ; 12-15 - qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14] - qsub16 r10, r10, r11 ; [c1|b1] [13-15 | 12-14] - - - lsls r2, r3, #16 - smuad r11, r3, lr ; A0 = a1<<2 + d1<<2 - addne r11, r11, #1 ; A0 += (a1!=0) - - lsls r2, r7, #16 - smuad r12, r7, lr ; C0 = a1<<2 + d1<<2 - addne r12, r12, #1 ; C0 += (a1!=0) - - add r0, r11, r12 ; a1_0 = A0 + C0 - sub r11, r11, r12 ; b1_0 = A0 - C0 - - lsls r2, r5, #16 - smuad r12, r5, lr ; B0 = a1<<2 + d1<<2 - addne r12, r12, #1 ; B0 += (a1!=0) - - lsls r2, r9, #16 - smuad r2, r9, lr ; D0 = a1<<2 + d1<<2 - addne r2, r2, #1 ; D0 += (a1!=0) - - add lr, r12, r2 ; d1_0 = B0 + D0 - sub r12, r12, r2 ; c1_0 = B0 - D0 - - ; op[0,4,8,12] - adds r2, r0, lr ; a2 = a1_0 + d1_0 - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - subs r0, r0, lr ; d2 = a1_0 - d1_0 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1] ; op[0] - - addmi r0, r0, #1 ; += a2 < 0 - add r0, r0, #3 ; += 3 - ldr lr, c00040004 - mov r0, r0, asr #3 ; >> 3 - strh r0, [r1, #24] ; op[12] - - adds r2, r11, r12 ; b2 = b1_0 + c1_0 - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - subs r0, r11, r12 ; c2 = b1_0 - c1_0 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #8] ; op[4] - - addmi r0, r0, #1 ; += a2 < 0 - add r0, r0, #3 ; += 3 - smusd r3, r3, lr ; A3 = a1<<2 - d1<<2 - smusd r7, r7, lr ; C3 = a1<<2 - d1<<2 - mov r0, r0, asr #3 ; >> 3 - strh r0, [r1, #16] ; op[8] - - - ; op[3,7,11,15] - add r0, r3, r7 ; a1_3 = A3 + C3 - sub r3, r3, r7 ; b1_3 = A3 - C3 - - smusd r5, r5, lr ; B3 = a1<<2 - d1<<2 - smusd r9, r9, lr ; D3 = a1<<2 - d1<<2 - add r7, r5, r9 ; d1_3 = B3 + D3 - sub r5, r5, r9 ; c1_3 = B3 - D3 - - adds r2, r0, r7 ; a2 = a1_3 + d1_3 - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - adds r9, r3, r5 ; b2 = b1_3 + c1_3 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #6] ; op[3] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - subs r2, r3, r5 ; c2 = b1_3 - c1_3 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #14] ; op[7] - - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - subs r9, r0, r7 ; d2 = a1_3 - d1_3 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #22] ; op[11] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - smuad r3, r4, lr ; A1 = b1<<2 + c1<<2 - smuad r5, r8, lr ; C1 = b1<<2 + c1<<2 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #30] ; op[15] - - ; op[1,5,9,13] - add r0, r3, r5 ; a1_1 = A1 + C1 - sub r3, r3, r5 ; b1_1 = A1 - C1 - - smuad r7, r6, lr ; B1 = b1<<2 + c1<<2 - smuad r9, r10, lr ; D1 = b1<<2 + c1<<2 - add r5, r7, r9 ; d1_1 = B1 + D1 - sub r7, r7, r9 ; c1_1 = B1 - D1 - - adds r2, r0, r5 ; a2 = a1_1 + d1_1 - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - adds r9, r3, r7 ; b2 = b1_1 + c1_1 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #2] ; op[1] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - subs r2, r3, r7 ; c2 = b1_1 - c1_1 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #10] ; op[5] - - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - subs r9, r0, r5 ; d2 = a1_1 - d1_1 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #18] ; op[9] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - smusd r4, r4, lr ; A2 = b1<<2 - c1<<2 - smusd r8, r8, lr ; C2 = b1<<2 - c1<<2 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #26] ; op[13] - - - ; op[2,6,10,14] - add r11, r4, r8 ; a1_2 = A2 + C2 - sub r12, r4, r8 ; b1_2 = A2 - C2 - - smusd r6, r6, lr ; B2 = b1<<2 - c1<<2 - smusd r10, r10, lr ; D2 = b1<<2 - c1<<2 - add r4, r6, r10 ; d1_2 = B2 + D2 - sub r8, r6, r10 ; c1_2 = B2 - D2 - - adds r2, r11, r4 ; a2 = a1_2 + d1_2 - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - adds r9, r12, r8 ; b2 = b1_2 + c1_2 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #4] ; op[2] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - subs r2, r12, r8 ; c2 = b1_2 - c1_2 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #12] ; op[6] - - addmi r2, r2, #1 ; += a2 < 0 - add r2, r2, #3 ; += 3 - subs r9, r11, r4 ; d2 = a1_2 - d1_2 - mov r2, r2, asr #3 ; >> 3 - strh r2, [r1, #20] ; op[10] - - addmi r9, r9, #1 ; += a2 < 0 - add r9, r9, #3 ; += 3 - mov r9, r9, asr #3 ; >> 3 - strh r9, [r1, #28] ; op[14] - - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vp8_short_walsh4x4_armv6| - -c00040004 - DCD 0x00040004 - - END diff --git a/libs/libvpx/vp8/encoder/arm/dct_arm.c b/libs/libvpx/vp8/encoder/arm/dct_arm.c deleted file mode 100644 index f71300d2c6..0000000000 --- a/libs/libvpx/vp8/encoder/arm/dct_arm.c +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2011 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "vpx_config.h" -#include "vp8_rtcd.h" - -#if HAVE_MEDIA - -void vp8_short_fdct8x4_armv6(short *input, short *output, int pitch) -{ - vp8_short_fdct4x4_armv6(input, output, pitch); - vp8_short_fdct4x4_armv6(input + 4, output + 16, pitch); -} - -#endif /* HAVE_MEDIA */ diff --git a/libs/libvpx/vp8/encoder/arm/neon/denoising_neon.c b/libs/libvpx/vp8/encoder/arm/neon/denoising_neon.c index 08be76e433..67267b8f3a 100644 --- a/libs/libvpx/vp8/encoder/arm/neon/denoising_neon.c +++ b/libs/libvpx/vp8/encoder/arm/neon/denoising_neon.c @@ -48,431 +48,413 @@ int vp8_denoiser_filter_neon(unsigned char *mc_running_avg_y, int mc_running_avg_y_stride, unsigned char *running_avg_y, - int running_avg_y_stride, - unsigned char *sig, int sig_stride, - unsigned int motion_magnitude, + int running_avg_y_stride, unsigned char *sig, + int sig_stride, unsigned int motion_magnitude, int increase_denoising) { - /* If motion_magnitude is small, making the denoiser more aggressive by - * increasing the adjustment for each level, level1 adjustment is - * increased, the deltas stay the same. + /* If motion_magnitude is small, making the denoiser more aggressive by + * increasing the adjustment for each level, level1 adjustment is + * increased, the deltas stay the same. + */ + int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) + ? 1 + : 0; + const uint8x16_t v_level1_adjustment = vmovq_n_u8( + (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3); + const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); + const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); + const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); + const uint8x16_t v_level2_threshold = vdupq_n_u8(8); + const uint8x16_t v_level3_threshold = vdupq_n_u8(16); + int64x2_t v_sum_diff_total = vdupq_n_s64(0); + + /* Go over lines. */ + int r; + for (r = 0; r < 16; ++r) { + /* Load inputs. */ + const uint8x16_t v_sig = vld1q_u8(sig); + const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); + + /* Calculate absolute difference and sign masks. */ + const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); + const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); + const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); + + /* Figure out which level that put us in. */ + const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); + const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); + const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); + + /* Calculate absolute adjustments for level 1, 2 and 3. */ + const uint8x16_t v_level2_adjustment = + vandq_u8(v_level2_mask, v_delta_level_1_and_2); + const uint8x16_t v_level3_adjustment = + vandq_u8(v_level3_mask, v_delta_level_2_and_3); + const uint8x16_t v_level1and2_adjustment = + vaddq_u8(v_level1_adjustment, v_level2_adjustment); + const uint8x16_t v_level1and2and3_adjustment = + vaddq_u8(v_level1and2_adjustment, v_level3_adjustment); + + /* Figure adjustment absolute value by selecting between the absolute + * difference if in level0 or the value for level 1, 2 and 3. */ - int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; - const uint8x16_t v_level1_adjustment = vmovq_n_u8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 4 + shift_inc : 3); - const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); - const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); - const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); - const uint8x16_t v_level2_threshold = vdupq_n_u8(8); - const uint8x16_t v_level3_threshold = vdupq_n_u8(16); - int64x2_t v_sum_diff_total = vdupq_n_s64(0); + const uint8x16_t v_abs_adjustment = + vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); - /* Go over lines. */ - int r; - for (r = 0; r < 16; ++r) { - /* Load inputs. */ - const uint8x16_t v_sig = vld1q_u8(sig); - const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); + /* Calculate positive and negative adjustments. Apply them to the signal + * and accumulate them. Adjustments are less than eight and the maximum + * sum of them (7 * 16) can fit in a signed char. + */ + const uint8x16_t v_pos_adjustment = + vandq_u8(v_diff_pos_mask, v_abs_adjustment); + const uint8x16_t v_neg_adjustment = + vandq_u8(v_diff_neg_mask, v_abs_adjustment); - /* Calculate absolute difference and sign masks. */ - const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); - const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg_y); - const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg_y); + uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); + v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); - /* Figure out which level that put us in. */ - const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, - v_abs_diff); - const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, - v_abs_diff); - const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, - v_abs_diff); + /* Store results. */ + vst1q_u8(running_avg_y, v_running_avg_y); - /* Calculate absolute adjustments for level 1, 2 and 3. */ - const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, - v_delta_level_1_and_2); - const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, - v_delta_level_2_and_3); - const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, - v_level2_adjustment); - const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( - v_level1and2_adjustment, v_level3_adjustment); + /* Sum all the accumulators to have the sum of all pixel differences + * for this macroblock. + */ + { + const int8x16_t v_sum_diff = + vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), + vreinterpretq_s8_u8(v_neg_adjustment)); - /* Figure adjustment absolute value by selecting between the absolute - * difference if in level0 or the value for level 1, 2 and 3. - */ - const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, - v_level1and2and3_adjustment, v_abs_diff); + const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); - /* Calculate positive and negative adjustments. Apply them to the signal - * and accumulate them. Adjustments are less than eight and the maximum - * sum of them (7 * 16) can fit in a signed char. - */ - const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, - v_abs_adjustment); - const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, - v_abs_adjustment); + const int32x4_t fedc_ba98_7654_3210 = + vpaddlq_s16(fe_dc_ba_98_76_54_32_10); - uint8x16_t v_running_avg_y = vqaddq_u8(v_sig, v_pos_adjustment); - v_running_avg_y = vqsubq_u8(v_running_avg_y, v_neg_adjustment); + const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); - /* Store results. */ - vst1q_u8(running_avg_y, v_running_avg_y); + v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); + } - /* Sum all the accumulators to have the sum of all pixel differences - * for this macroblock. - */ - { + /* Update pointers for next iteration. */ + sig += sig_stride; + mc_running_avg_y += mc_running_avg_y_stride; + running_avg_y += running_avg_y_stride; + } + + /* Too much adjustments => copy block. */ + { + int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), + vget_low_s64(v_sum_diff_total)); + int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); + int sum_diff_thresh = SUM_DIFF_THRESHOLD; + + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; + if (sum_diff > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), + // checK if we can still apply some (weaker) temporal filtering to + // this block, that would otherwise not be denoised at all. Simplest + // is to apply an additional adjustment to running_avg_y to bring it + // closer to sig. The adjustment is capped by a maximum delta, and + // chosen such that in most cases the resulting sum_diff will be + // within the accceptable range given by sum_diff_thresh. + + // The delta is set by the excess of absolute pixel diff over the + // threshold. + int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + const uint8x16_t k_delta = vmovq_n_u8(delta); + sig -= sig_stride * 16; + mc_running_avg_y -= mc_running_avg_y_stride * 16; + running_avg_y -= running_avg_y_stride * 16; + for (r = 0; r < 16; ++r) { + uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y); + const uint8x16_t v_sig = vld1q_u8(sig); + const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); + + /* Calculate absolute difference and sign masks. */ + const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg_y); + const uint8x16_t v_diff_pos_mask = + vcltq_u8(v_sig, v_mc_running_avg_y); + const uint8x16_t v_diff_neg_mask = + vcgtq_u8(v_sig, v_mc_running_avg_y); + // Clamp absolute difference to delta to get the adjustment. + const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta)); + + const uint8x16_t v_pos_adjustment = + vandq_u8(v_diff_pos_mask, v_abs_adjustment); + const uint8x16_t v_neg_adjustment = + vandq_u8(v_diff_neg_mask, v_abs_adjustment); + + v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment); + v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment); + + /* Store results. */ + vst1q_u8(running_avg_y, v_running_avg_y); + + { const int8x16_t v_sum_diff = - vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), - vreinterpretq_s8_u8(v_neg_adjustment)); + vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), + vreinterpretq_s8_u8(v_pos_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); - const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); - const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); + } + /* Update pointers for next iteration. */ + sig += sig_stride; + mc_running_avg_y += mc_running_avg_y_stride; + running_avg_y += running_avg_y_stride; } + { + // Update the sum of all pixel differences of this MB. + x = vqadd_s64(vget_high_s64(v_sum_diff_total), + vget_low_s64(v_sum_diff_total)); + sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - /* Update pointers for next iteration. */ - sig += sig_stride; - mc_running_avg_y += mc_running_avg_y_stride; - running_avg_y += running_avg_y_stride; - } - - /* Too much adjustments => copy block. */ - { - int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), - vget_low_s64(v_sum_diff_total)); - int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - int sum_diff_thresh = SUM_DIFF_THRESHOLD; - - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; - if (sum_diff > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), - // checK if we can still apply some (weaker) temporal filtering to - // this block, that would otherwise not be denoised at all. Simplest - // is to apply an additional adjustment to running_avg_y to bring it - // closer to sig. The adjustment is capped by a maximum delta, and - // chosen such that in most cases the resulting sum_diff will be - // within the accceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over the - // threshold. - int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const uint8x16_t k_delta = vmovq_n_u8(delta); - sig -= sig_stride * 16; - mc_running_avg_y -= mc_running_avg_y_stride * 16; - running_avg_y -= running_avg_y_stride * 16; - for (r = 0; r < 16; ++r) { - uint8x16_t v_running_avg_y = vld1q_u8(running_avg_y); - const uint8x16_t v_sig = vld1q_u8(sig); - const uint8x16_t v_mc_running_avg_y = vld1q_u8(mc_running_avg_y); - - /* Calculate absolute difference and sign masks. */ - const uint8x16_t v_abs_diff = vabdq_u8(v_sig, - v_mc_running_avg_y); - const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, - v_mc_running_avg_y); - const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, - v_mc_running_avg_y); - // Clamp absolute difference to delta to get the adjustment. - const uint8x16_t v_abs_adjustment = - vminq_u8(v_abs_diff, (k_delta)); - - const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, - v_abs_adjustment); - const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, - v_abs_adjustment); - - v_running_avg_y = vqsubq_u8(v_running_avg_y, v_pos_adjustment); - v_running_avg_y = vqaddq_u8(v_running_avg_y, v_neg_adjustment); - - /* Store results. */ - vst1q_u8(running_avg_y, v_running_avg_y); - - { - const int8x16_t v_sum_diff = - vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), - vreinterpretq_s8_u8(v_pos_adjustment)); - - const int16x8_t fe_dc_ba_98_76_54_32_10 = - vpaddlq_s8(v_sum_diff); - const int32x4_t fedc_ba98_7654_3210 = - vpaddlq_s16(fe_dc_ba_98_76_54_32_10); - const int64x2_t fedcba98_76543210 = - vpaddlq_s32(fedc_ba98_7654_3210); - - v_sum_diff_total = vqaddq_s64(v_sum_diff_total, - fedcba98_76543210); - } - /* Update pointers for next iteration. */ - sig += sig_stride; - mc_running_avg_y += mc_running_avg_y_stride; - running_avg_y += running_avg_y_stride; - } - { - // Update the sum of all pixel differences of this MB. - x = vqadd_s64(vget_high_s64(v_sum_diff_total), - vget_low_s64(v_sum_diff_total)); - sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - - if (sum_diff > sum_diff_thresh) { - return COPY_BLOCK; - } - } - } else { + if (sum_diff > sum_diff_thresh) { return COPY_BLOCK; } } + } else { + return COPY_BLOCK; + } } + } - /* Tell above level that block was filtered. */ - running_avg_y -= running_avg_y_stride * 16; - sig -= sig_stride * 16; + /* Tell above level that block was filtered. */ + running_avg_y -= running_avg_y_stride * 16; + sig -= sig_stride * 16; - vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride); + vp8_copy_mem16x16(running_avg_y, running_avg_y_stride, sig, sig_stride); - return FILTER_BLOCK; + return FILTER_BLOCK; } int vp8_denoiser_filter_uv_neon(unsigned char *mc_running_avg, - int mc_running_avg_stride, - unsigned char *running_avg, - int running_avg_stride, - unsigned char *sig, int sig_stride, - unsigned int motion_magnitude, - int increase_denoising) { - /* If motion_magnitude is small, making the denoiser more aggressive by - * increasing the adjustment for each level, level1 adjustment is - * increased, the deltas stay the same. - */ - int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 1 : 0; - const uint8x16_t v_level1_adjustment = vmovq_n_u8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3); + int mc_running_avg_stride, + unsigned char *running_avg, + int running_avg_stride, unsigned char *sig, + int sig_stride, unsigned int motion_magnitude, + int increase_denoising) { + /* If motion_magnitude is small, making the denoiser more aggressive by + * increasing the adjustment for each level, level1 adjustment is + * increased, the deltas stay the same. + */ + int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) + ? 1 + : 0; + const uint8x16_t v_level1_adjustment = vmovq_n_u8( + (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 4 + shift_inc : 3); - const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); - const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); - const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); - const uint8x16_t v_level2_threshold = vdupq_n_u8(8); - const uint8x16_t v_level3_threshold = vdupq_n_u8(16); - int64x2_t v_sum_diff_total = vdupq_n_s64(0); - int r; + const uint8x16_t v_delta_level_1_and_2 = vdupq_n_u8(1); + const uint8x16_t v_delta_level_2_and_3 = vdupq_n_u8(2); + const uint8x16_t v_level1_threshold = vmovq_n_u8(4 + shift_inc); + const uint8x16_t v_level2_threshold = vdupq_n_u8(8); + const uint8x16_t v_level3_threshold = vdupq_n_u8(16); + int64x2_t v_sum_diff_total = vdupq_n_s64(0); + int r; + { + uint16x4_t v_sum_block = vdup_n_u16(0); + + // Avoid denoising color signal if its close to average level. + for (r = 0; r < 8; ++r) { + const uint8x8_t v_sig = vld1_u8(sig); + const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig); + v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10); + sig += sig_stride; + } + sig -= sig_stride * 8; { - uint16x4_t v_sum_block = vdup_n_u16(0); - - // Avoid denoising color signal if its close to average level. - for (r = 0; r < 8; ++r) { - const uint8x8_t v_sig = vld1_u8(sig); - const uint16x4_t _76_54_32_10 = vpaddl_u8(v_sig); - v_sum_block = vqadd_u16(v_sum_block, _76_54_32_10); - sig += sig_stride; - } - sig -= sig_stride * 8; - { - const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block); - const uint64x1_t _76543210 = vpaddl_u32(_7654_3210); - const int sum_block = - vget_lane_s32(vreinterpret_s32_u64(_76543210), 0); - if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { - return COPY_BLOCK; - } + const uint32x2_t _7654_3210 = vpaddl_u16(v_sum_block); + const uint64x1_t _76543210 = vpaddl_u32(_7654_3210); + const int sum_block = vget_lane_s32(vreinterpret_s32_u64(_76543210), 0); + if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + return COPY_BLOCK; } } + } - /* Go over lines. */ - for (r = 0; r < 4; ++r) { - /* Load inputs. */ - const uint8x8_t v_sig_lo = vld1_u8(sig); - const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]); - const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); - const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg); - const uint8x8_t v_mc_running_avg_hi = - vld1_u8(&mc_running_avg[mc_running_avg_stride]); - const uint8x16_t v_mc_running_avg = - vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); - /* Calculate absolute difference and sign masks. */ - const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg); - const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg); - const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg); + /* Go over lines. */ + for (r = 0; r < 4; ++r) { + /* Load inputs. */ + const uint8x8_t v_sig_lo = vld1_u8(sig); + const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]); + const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); + const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg); + const uint8x8_t v_mc_running_avg_hi = + vld1_u8(&mc_running_avg[mc_running_avg_stride]); + const uint8x16_t v_mc_running_avg = + vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); + /* Calculate absolute difference and sign masks. */ + const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg); + const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg); + const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg); - /* Figure out which level that put us in. */ - const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, - v_abs_diff); - const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, - v_abs_diff); - const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, - v_abs_diff); + /* Figure out which level that put us in. */ + const uint8x16_t v_level1_mask = vcleq_u8(v_level1_threshold, v_abs_diff); + const uint8x16_t v_level2_mask = vcleq_u8(v_level2_threshold, v_abs_diff); + const uint8x16_t v_level3_mask = vcleq_u8(v_level3_threshold, v_abs_diff); - /* Calculate absolute adjustments for level 1, 2 and 3. */ - const uint8x16_t v_level2_adjustment = vandq_u8(v_level2_mask, - v_delta_level_1_and_2); - const uint8x16_t v_level3_adjustment = vandq_u8(v_level3_mask, - v_delta_level_2_and_3); - const uint8x16_t v_level1and2_adjustment = vaddq_u8(v_level1_adjustment, - v_level2_adjustment); - const uint8x16_t v_level1and2and3_adjustment = vaddq_u8( - v_level1and2_adjustment, v_level3_adjustment); + /* Calculate absolute adjustments for level 1, 2 and 3. */ + const uint8x16_t v_level2_adjustment = + vandq_u8(v_level2_mask, v_delta_level_1_and_2); + const uint8x16_t v_level3_adjustment = + vandq_u8(v_level3_mask, v_delta_level_2_and_3); + const uint8x16_t v_level1and2_adjustment = + vaddq_u8(v_level1_adjustment, v_level2_adjustment); + const uint8x16_t v_level1and2and3_adjustment = + vaddq_u8(v_level1and2_adjustment, v_level3_adjustment); - /* Figure adjustment absolute value by selecting between the absolute - * difference if in level0 or the value for level 1, 2 and 3. - */ - const uint8x16_t v_abs_adjustment = vbslq_u8(v_level1_mask, - v_level1and2and3_adjustment, v_abs_diff); + /* Figure adjustment absolute value by selecting between the absolute + * difference if in level0 or the value for level 1, 2 and 3. + */ + const uint8x16_t v_abs_adjustment = + vbslq_u8(v_level1_mask, v_level1and2and3_adjustment, v_abs_diff); - /* Calculate positive and negative adjustments. Apply them to the signal - * and accumulate them. Adjustments are less than eight and the maximum - * sum of them (7 * 16) can fit in a signed char. - */ - const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, - v_abs_adjustment); - const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, - v_abs_adjustment); + /* Calculate positive and negative adjustments. Apply them to the signal + * and accumulate them. Adjustments are less than eight and the maximum + * sum of them (7 * 16) can fit in a signed char. + */ + const uint8x16_t v_pos_adjustment = + vandq_u8(v_diff_pos_mask, v_abs_adjustment); + const uint8x16_t v_neg_adjustment = + vandq_u8(v_diff_neg_mask, v_abs_adjustment); - uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment); - v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment); + uint8x16_t v_running_avg = vqaddq_u8(v_sig, v_pos_adjustment); + v_running_avg = vqsubq_u8(v_running_avg, v_neg_adjustment); - /* Store results. */ - vst1_u8(running_avg, vget_low_u8(v_running_avg)); - vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg)); + /* Store results. */ + vst1_u8(running_avg, vget_low_u8(v_running_avg)); + vst1_u8(&running_avg[running_avg_stride], vget_high_u8(v_running_avg)); - /* Sum all the accumulators to have the sum of all pixel differences - * for this macroblock. - */ - { + /* Sum all the accumulators to have the sum of all pixel differences + * for this macroblock. + */ + { + const int8x16_t v_sum_diff = + vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), + vreinterpretq_s8_u8(v_neg_adjustment)); + + const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); + + const int32x4_t fedc_ba98_7654_3210 = + vpaddlq_s16(fe_dc_ba_98_76_54_32_10); + + const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); + + v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); + } + + /* Update pointers for next iteration. */ + sig += sig_stride * 2; + mc_running_avg += mc_running_avg_stride * 2; + running_avg += running_avg_stride * 2; + } + + /* Too much adjustments => copy block. */ + { + int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), + vget_low_s64(v_sum_diff_total)); + int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); + int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; + if (sum_diff > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), + // checK if we can still apply some (weaker) temporal filtering to + // this block, that would otherwise not be denoised at all. Simplest + // is to apply an additional adjustment to running_avg_y to bring it + // closer to sig. The adjustment is capped by a maximum delta, and + // chosen such that in most cases the resulting sum_diff will be + // within the accceptable range given by sum_diff_thresh. + + // The delta is set by the excess of absolute pixel diff over the + // threshold. + int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + const uint8x16_t k_delta = vmovq_n_u8(delta); + sig -= sig_stride * 8; + mc_running_avg -= mc_running_avg_stride * 8; + running_avg -= running_avg_stride * 8; + for (r = 0; r < 4; ++r) { + const uint8x8_t v_sig_lo = vld1_u8(sig); + const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]); + const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); + const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg); + const uint8x8_t v_mc_running_avg_hi = + vld1_u8(&mc_running_avg[mc_running_avg_stride]); + const uint8x16_t v_mc_running_avg = + vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); + /* Calculate absolute difference and sign masks. */ + const uint8x16_t v_abs_diff = vabdq_u8(v_sig, v_mc_running_avg); + const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, v_mc_running_avg); + const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, v_mc_running_avg); + // Clamp absolute difference to delta to get the adjustment. + const uint8x16_t v_abs_adjustment = vminq_u8(v_abs_diff, (k_delta)); + + const uint8x16_t v_pos_adjustment = + vandq_u8(v_diff_pos_mask, v_abs_adjustment); + const uint8x16_t v_neg_adjustment = + vandq_u8(v_diff_neg_mask, v_abs_adjustment); + const uint8x8_t v_running_avg_lo = vld1_u8(running_avg); + const uint8x8_t v_running_avg_hi = + vld1_u8(&running_avg[running_avg_stride]); + uint8x16_t v_running_avg = + vcombine_u8(v_running_avg_lo, v_running_avg_hi); + + v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment); + v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment); + + /* Store results. */ + vst1_u8(running_avg, vget_low_u8(v_running_avg)); + vst1_u8(&running_avg[running_avg_stride], + vget_high_u8(v_running_avg)); + + { const int8x16_t v_sum_diff = - vqsubq_s8(vreinterpretq_s8_u8(v_pos_adjustment), - vreinterpretq_s8_u8(v_neg_adjustment)); + vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), + vreinterpretq_s8_u8(v_pos_adjustment)); const int16x8_t fe_dc_ba_98_76_54_32_10 = vpaddlq_s8(v_sum_diff); - const int32x4_t fedc_ba98_7654_3210 = vpaddlq_s16(fe_dc_ba_98_76_54_32_10); - const int64x2_t fedcba98_76543210 = vpaddlq_s32(fedc_ba98_7654_3210); v_sum_diff_total = vqaddq_s64(v_sum_diff_total, fedcba98_76543210); + } + /* Update pointers for next iteration. */ + sig += sig_stride * 2; + mc_running_avg += mc_running_avg_stride * 2; + running_avg += running_avg_stride * 2; } + { + // Update the sum of all pixel differences of this MB. + x = vqadd_s64(vget_high_s64(v_sum_diff_total), + vget_low_s64(v_sum_diff_total)); + sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - /* Update pointers for next iteration. */ - sig += sig_stride * 2; - mc_running_avg += mc_running_avg_stride * 2; - running_avg += running_avg_stride * 2; - } - - - /* Too much adjustments => copy block. */ - { - int64x1_t x = vqadd_s64(vget_high_s64(v_sum_diff_total), - vget_low_s64(v_sum_diff_total)); - int sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - int sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; - if (sum_diff > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), - // checK if we can still apply some (weaker) temporal filtering to - // this block, that would otherwise not be denoised at all. Simplest - // is to apply an additional adjustment to running_avg_y to bring it - // closer to sig. The adjustment is capped by a maximum delta, and - // chosen such that in most cases the resulting sum_diff will be - // within the accceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over the - // threshold. - int delta = ((sum_diff - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const uint8x16_t k_delta = vmovq_n_u8(delta); - sig -= sig_stride * 8; - mc_running_avg -= mc_running_avg_stride * 8; - running_avg -= running_avg_stride * 8; - for (r = 0; r < 4; ++r) { - const uint8x8_t v_sig_lo = vld1_u8(sig); - const uint8x8_t v_sig_hi = vld1_u8(&sig[sig_stride]); - const uint8x16_t v_sig = vcombine_u8(v_sig_lo, v_sig_hi); - const uint8x8_t v_mc_running_avg_lo = vld1_u8(mc_running_avg); - const uint8x8_t v_mc_running_avg_hi = - vld1_u8(&mc_running_avg[mc_running_avg_stride]); - const uint8x16_t v_mc_running_avg = - vcombine_u8(v_mc_running_avg_lo, v_mc_running_avg_hi); - /* Calculate absolute difference and sign masks. */ - const uint8x16_t v_abs_diff = vabdq_u8(v_sig, - v_mc_running_avg); - const uint8x16_t v_diff_pos_mask = vcltq_u8(v_sig, - v_mc_running_avg); - const uint8x16_t v_diff_neg_mask = vcgtq_u8(v_sig, - v_mc_running_avg); - // Clamp absolute difference to delta to get the adjustment. - const uint8x16_t v_abs_adjustment = - vminq_u8(v_abs_diff, (k_delta)); - - const uint8x16_t v_pos_adjustment = vandq_u8(v_diff_pos_mask, - v_abs_adjustment); - const uint8x16_t v_neg_adjustment = vandq_u8(v_diff_neg_mask, - v_abs_adjustment); - const uint8x8_t v_running_avg_lo = vld1_u8(running_avg); - const uint8x8_t v_running_avg_hi = - vld1_u8(&running_avg[running_avg_stride]); - uint8x16_t v_running_avg = - vcombine_u8(v_running_avg_lo, v_running_avg_hi); - - v_running_avg = vqsubq_u8(v_running_avg, v_pos_adjustment); - v_running_avg = vqaddq_u8(v_running_avg, v_neg_adjustment); - - /* Store results. */ - vst1_u8(running_avg, vget_low_u8(v_running_avg)); - vst1_u8(&running_avg[running_avg_stride], - vget_high_u8(v_running_avg)); - - { - const int8x16_t v_sum_diff = - vqsubq_s8(vreinterpretq_s8_u8(v_neg_adjustment), - vreinterpretq_s8_u8(v_pos_adjustment)); - - const int16x8_t fe_dc_ba_98_76_54_32_10 = - vpaddlq_s8(v_sum_diff); - const int32x4_t fedc_ba98_7654_3210 = - vpaddlq_s16(fe_dc_ba_98_76_54_32_10); - const int64x2_t fedcba98_76543210 = - vpaddlq_s32(fedc_ba98_7654_3210); - - v_sum_diff_total = vqaddq_s64(v_sum_diff_total, - fedcba98_76543210); - } - /* Update pointers for next iteration. */ - sig += sig_stride * 2; - mc_running_avg += mc_running_avg_stride * 2; - running_avg += running_avg_stride * 2; - } - { - // Update the sum of all pixel differences of this MB. - x = vqadd_s64(vget_high_s64(v_sum_diff_total), - vget_low_s64(v_sum_diff_total)); - sum_diff = vget_lane_s32(vabs_s32(vreinterpret_s32_s64(x)), 0); - - if (sum_diff > sum_diff_thresh) { - return COPY_BLOCK; - } - } - } else { + if (sum_diff > sum_diff_thresh) { return COPY_BLOCK; } } + } else { + return COPY_BLOCK; + } } + } - /* Tell above level that block was filtered. */ - running_avg -= running_avg_stride * 8; - sig -= sig_stride * 8; + /* Tell above level that block was filtered. */ + running_avg -= running_avg_stride * 8; + sig -= sig_stride * 8; - vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride); + vp8_copy_mem8x8(running_avg, running_avg_stride, sig, sig_stride); - return FILTER_BLOCK; + return FILTER_BLOCK; } diff --git a/libs/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c b/libs/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c index e5824bfb21..c42005df6c 100644 --- a/libs/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c +++ b/libs/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c @@ -11,79 +11,73 @@ #include #include "vp8/encoder/block.h" -static const uint16_t inv_zig_zag[16] = { - 1, 2, 6, 7, - 3, 5, 8, 13, - 4, 9, 12, 14, - 10, 11, 15, 16 -}; +static const uint16_t inv_zig_zag[16] = { 1, 2, 6, 7, 3, 5, 8, 13, + 4, 9, 12, 14, 10, 11, 15, 16 }; void vp8_fast_quantize_b_neon(BLOCK *b, BLOCKD *d) { - const int16x8_t one_q = vdupq_n_s16(-1), - z0 = vld1q_s16(b->coeff), - z1 = vld1q_s16(b->coeff + 8), - round0 = vld1q_s16(b->round), - round1 = vld1q_s16(b->round + 8), - quant0 = vld1q_s16(b->quant_fast), - quant1 = vld1q_s16(b->quant_fast + 8), - dequant0 = vld1q_s16(d->dequant), - dequant1 = vld1q_s16(d->dequant + 8); - const uint16x8_t zig_zag0 = vld1q_u16(inv_zig_zag), - zig_zag1 = vld1q_u16(inv_zig_zag + 8); - int16x8_t x0, x1, sz0, sz1, y0, y1; - uint16x8_t eob0, eob1; - uint16x4_t eob_d16; - uint32x2_t eob_d32; - uint32x4_t eob_q32; + const int16x8_t one_q = vdupq_n_s16(-1), z0 = vld1q_s16(b->coeff), + z1 = vld1q_s16(b->coeff + 8), round0 = vld1q_s16(b->round), + round1 = vld1q_s16(b->round + 8), + quant0 = vld1q_s16(b->quant_fast), + quant1 = vld1q_s16(b->quant_fast + 8), + dequant0 = vld1q_s16(d->dequant), + dequant1 = vld1q_s16(d->dequant + 8); + const uint16x8_t zig_zag0 = vld1q_u16(inv_zig_zag), + zig_zag1 = vld1q_u16(inv_zig_zag + 8); + int16x8_t x0, x1, sz0, sz1, y0, y1; + uint16x8_t eob0, eob1; + uint16x4_t eob_d16; + uint32x2_t eob_d32; + uint32x4_t eob_q32; - /* sign of z: z >> 15 */ - sz0 = vshrq_n_s16(z0, 15); - sz1 = vshrq_n_s16(z1, 15); + /* sign of z: z >> 15 */ + sz0 = vshrq_n_s16(z0, 15); + sz1 = vshrq_n_s16(z1, 15); - /* x = abs(z) */ - x0 = vabsq_s16(z0); - x1 = vabsq_s16(z1); + /* x = abs(z) */ + x0 = vabsq_s16(z0); + x1 = vabsq_s16(z1); - /* x += round */ - x0 = vaddq_s16(x0, round0); - x1 = vaddq_s16(x1, round1); + /* x += round */ + x0 = vaddq_s16(x0, round0); + x1 = vaddq_s16(x1, round1); - /* y = 2 * (x * quant) >> 16 */ - y0 = vqdmulhq_s16(x0, quant0); - y1 = vqdmulhq_s16(x1, quant1); + /* y = 2 * (x * quant) >> 16 */ + y0 = vqdmulhq_s16(x0, quant0); + y1 = vqdmulhq_s16(x1, quant1); - /* Compensate for doubling in vqdmulhq */ - y0 = vshrq_n_s16(y0, 1); - y1 = vshrq_n_s16(y1, 1); + /* Compensate for doubling in vqdmulhq */ + y0 = vshrq_n_s16(y0, 1); + y1 = vshrq_n_s16(y1, 1); - /* Restore sign bit */ - y0 = veorq_s16(y0, sz0); - y1 = veorq_s16(y1, sz1); - x0 = vsubq_s16(y0, sz0); - x1 = vsubq_s16(y1, sz1); + /* Restore sign bit */ + y0 = veorq_s16(y0, sz0); + y1 = veorq_s16(y1, sz1); + x0 = vsubq_s16(y0, sz0); + x1 = vsubq_s16(y1, sz1); - /* find non-zero elements */ - eob0 = vtstq_s16(x0, one_q); - eob1 = vtstq_s16(x1, one_q); + /* find non-zero elements */ + eob0 = vtstq_s16(x0, one_q); + eob1 = vtstq_s16(x1, one_q); - /* mask zig zag */ - eob0 = vandq_u16(eob0, zig_zag0); - eob1 = vandq_u16(eob1, zig_zag1); + /* mask zig zag */ + eob0 = vandq_u16(eob0, zig_zag0); + eob1 = vandq_u16(eob1, zig_zag1); - /* select the largest value */ - eob0 = vmaxq_u16(eob0, eob1); - eob_d16 = vmax_u16(vget_low_u16(eob0), vget_high_u16(eob0)); - eob_q32 = vmovl_u16(eob_d16); - eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32)); - eob_d32 = vpmax_u32(eob_d32, eob_d32); + /* select the largest value */ + eob0 = vmaxq_u16(eob0, eob1); + eob_d16 = vmax_u16(vget_low_u16(eob0), vget_high_u16(eob0)); + eob_q32 = vmovl_u16(eob_d16); + eob_d32 = vmax_u32(vget_low_u32(eob_q32), vget_high_u32(eob_q32)); + eob_d32 = vpmax_u32(eob_d32, eob_d32); - /* qcoeff = x */ - vst1q_s16(d->qcoeff, x0); - vst1q_s16(d->qcoeff + 8, x1); + /* qcoeff = x */ + vst1q_s16(d->qcoeff, x0); + vst1q_s16(d->qcoeff + 8, x1); - /* dqcoeff = x * dequant */ - vst1q_s16(d->dqcoeff, vmulq_s16(dequant0, x0)); - vst1q_s16(d->dqcoeff + 8, vmulq_s16(dequant1, x1)); + /* dqcoeff = x * dequant */ + vst1q_s16(d->dqcoeff, vmulq_s16(dequant0, x0)); + vst1q_s16(d->dqcoeff + 8, vmulq_s16(dequant1, x1)); - vst1_lane_s8((int8_t *)d->eob, vreinterpret_s8_u32(eob_d32), 0); + vst1_lane_s8((int8_t *)d->eob, vreinterpret_s8_u32(eob_d32), 0); } diff --git a/libs/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c b/libs/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c index 391e5f9907..76853e6524 100644 --- a/libs/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c +++ b/libs/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c @@ -10,260 +10,250 @@ #include -void vp8_short_fdct4x4_neon( - int16_t *input, - int16_t *output, - int pitch) { - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d16s16, d17s16, d26s16, dEmptys16; - uint16x4_t d4u16; - int16x8_t q0s16, q1s16; - int32x4_t q9s32, q10s32, q11s32, q12s32; - int16x4x2_t v2tmp0, v2tmp1; - int32x2x2_t v2tmp2, v2tmp3; +void vp8_short_fdct4x4_neon(int16_t *input, int16_t *output, int pitch) { + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int16x4_t d16s16, d17s16, d26s16, dEmptys16; + uint16x4_t d4u16; + int16x8_t q0s16, q1s16; + int32x4_t q9s32, q10s32, q11s32, q12s32; + int16x4x2_t v2tmp0, v2tmp1; + int32x2x2_t v2tmp2, v2tmp3; - d16s16 = vdup_n_s16(5352); - d17s16 = vdup_n_s16(2217); - q9s32 = vdupq_n_s32(14500); - q10s32 = vdupq_n_s32(7500); - q11s32 = vdupq_n_s32(12000); - q12s32 = vdupq_n_s32(51000); + d16s16 = vdup_n_s16(5352); + d17s16 = vdup_n_s16(2217); + q9s32 = vdupq_n_s32(14500); + q10s32 = vdupq_n_s32(7500); + q11s32 = vdupq_n_s32(12000); + q12s32 = vdupq_n_s32(51000); - // Part one - pitch >>= 1; - d0s16 = vld1_s16(input); - input += pitch; - d1s16 = vld1_s16(input); - input += pitch; - d2s16 = vld1_s16(input); - input += pitch; - d3s16 = vld1_s16(input); + // Part one + pitch >>= 1; + d0s16 = vld1_s16(input); + input += pitch; + d1s16 = vld1_s16(input); + input += pitch; + d2s16 = vld1_s16(input); + input += pitch; + d3s16 = vld1_s16(input); - v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), - vreinterpret_s32_s16(d2s16)); - v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), - vreinterpret_s32_s16(d3s16)); - v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 - vreinterpret_s16_s32(v2tmp3.val[0])); // d1 - v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 - vreinterpret_s16_s32(v2tmp3.val[1])); // d3 + v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16)); + v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); + v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 + vreinterpret_s16_s32(v2tmp3.val[0])); // d1 + v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 + vreinterpret_s16_s32(v2tmp3.val[1])); // d3 - d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); - d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); - d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); - d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); + d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); + d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); + d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); + d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); - d4s16 = vshl_n_s16(d4s16, 3); - d5s16 = vshl_n_s16(d5s16, 3); - d6s16 = vshl_n_s16(d6s16, 3); - d7s16 = vshl_n_s16(d7s16, 3); + d4s16 = vshl_n_s16(d4s16, 3); + d5s16 = vshl_n_s16(d5s16, 3); + d6s16 = vshl_n_s16(d6s16, 3); + d7s16 = vshl_n_s16(d7s16, 3); - d0s16 = vadd_s16(d4s16, d5s16); - d2s16 = vsub_s16(d4s16, d5s16); + d0s16 = vadd_s16(d4s16, d5s16); + d2s16 = vsub_s16(d4s16, d5s16); - q9s32 = vmlal_s16(q9s32, d7s16, d16s16); - q10s32 = vmlal_s16(q10s32, d7s16, d17s16); - q9s32 = vmlal_s16(q9s32, d6s16, d17s16); - q10s32 = vmlsl_s16(q10s32, d6s16, d16s16); + q9s32 = vmlal_s16(q9s32, d7s16, d16s16); + q10s32 = vmlal_s16(q10s32, d7s16, d17s16); + q9s32 = vmlal_s16(q9s32, d6s16, d17s16); + q10s32 = vmlsl_s16(q10s32, d6s16, d16s16); - d1s16 = vshrn_n_s32(q9s32, 12); - d3s16 = vshrn_n_s32(q10s32, 12); + d1s16 = vshrn_n_s32(q9s32, 12); + d3s16 = vshrn_n_s32(q10s32, 12); - // Part two - v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), - vreinterpret_s32_s16(d2s16)); - v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), - vreinterpret_s32_s16(d3s16)); - v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 - vreinterpret_s16_s32(v2tmp3.val[0])); // d1 - v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 - vreinterpret_s16_s32(v2tmp3.val[1])); // d3 + // Part two + v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16)); + v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); + v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 + vreinterpret_s16_s32(v2tmp3.val[0])); // d1 + v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 + vreinterpret_s16_s32(v2tmp3.val[1])); // d3 - d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); - d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); - d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); - d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); + d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]); + d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]); + d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]); + d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]); - d26s16 = vdup_n_s16(7); - d4s16 = vadd_s16(d4s16, d26s16); + d26s16 = vdup_n_s16(7); + d4s16 = vadd_s16(d4s16, d26s16); - d0s16 = vadd_s16(d4s16, d5s16); - d2s16 = vsub_s16(d4s16, d5s16); + d0s16 = vadd_s16(d4s16, d5s16); + d2s16 = vsub_s16(d4s16, d5s16); - q11s32 = vmlal_s16(q11s32, d7s16, d16s16); - q12s32 = vmlal_s16(q12s32, d7s16, d17s16); + q11s32 = vmlal_s16(q11s32, d7s16, d16s16); + q12s32 = vmlal_s16(q12s32, d7s16, d17s16); - dEmptys16 = vdup_n_s16(0); - d4u16 = vceq_s16(d7s16, dEmptys16); + dEmptys16 = vdup_n_s16(0); + d4u16 = vceq_s16(d7s16, dEmptys16); - d0s16 = vshr_n_s16(d0s16, 4); - d2s16 = vshr_n_s16(d2s16, 4); + d0s16 = vshr_n_s16(d0s16, 4); + d2s16 = vshr_n_s16(d2s16, 4); - q11s32 = vmlal_s16(q11s32, d6s16, d17s16); - q12s32 = vmlsl_s16(q12s32, d6s16, d16s16); + q11s32 = vmlal_s16(q11s32, d6s16, d17s16); + q12s32 = vmlsl_s16(q12s32, d6s16, d16s16); - d4u16 = vmvn_u16(d4u16); - d1s16 = vshrn_n_s32(q11s32, 16); - d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16)); - d3s16 = vshrn_n_s32(q12s32, 16); + d4u16 = vmvn_u16(d4u16); + d1s16 = vshrn_n_s32(q11s32, 16); + d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d4u16)); + d3s16 = vshrn_n_s32(q12s32, 16); - q0s16 = vcombine_s16(d0s16, d1s16); - q1s16 = vcombine_s16(d2s16, d3s16); + q0s16 = vcombine_s16(d0s16, d1s16); + q1s16 = vcombine_s16(d2s16, d3s16); - vst1q_s16(output, q0s16); - vst1q_s16(output + 8, q1s16); - return; + vst1q_s16(output, q0s16); + vst1q_s16(output + 8, q1s16); + return; } -void vp8_short_fdct8x4_neon( - int16_t *input, - int16_t *output, - int pitch) { - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16; - uint16x4_t d28u16, d29u16; - uint16x8_t q14u16; - int16x8_t q0s16, q1s16, q2s16, q3s16; - int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16; - int32x4_t q9s32, q10s32, q11s32, q12s32; - int16x8x2_t v2tmp0, v2tmp1; - int32x4x2_t v2tmp2, v2tmp3; +void vp8_short_fdct8x4_neon(int16_t *input, int16_t *output, int pitch) { + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int16x4_t d16s16, d17s16, d26s16, d27s16, d28s16, d29s16; + uint16x4_t d28u16, d29u16; + uint16x8_t q14u16; + int16x8_t q0s16, q1s16, q2s16, q3s16; + int16x8_t q11s16, q12s16, q13s16, q14s16, q15s16, qEmptys16; + int32x4_t q9s32, q10s32, q11s32, q12s32; + int16x8x2_t v2tmp0, v2tmp1; + int32x4x2_t v2tmp2, v2tmp3; - d16s16 = vdup_n_s16(5352); - d17s16 = vdup_n_s16(2217); - q9s32 = vdupq_n_s32(14500); - q10s32 = vdupq_n_s32(7500); + d16s16 = vdup_n_s16(5352); + d17s16 = vdup_n_s16(2217); + q9s32 = vdupq_n_s32(14500); + q10s32 = vdupq_n_s32(7500); - // Part one - pitch >>= 1; - q0s16 = vld1q_s16(input); - input += pitch; - q1s16 = vld1q_s16(input); - input += pitch; - q2s16 = vld1q_s16(input); - input += pitch; - q3s16 = vld1q_s16(input); + // Part one + pitch >>= 1; + q0s16 = vld1q_s16(input); + input += pitch; + q1s16 = vld1q_s16(input); + input += pitch; + q2s16 = vld1q_s16(input); + input += pitch; + q3s16 = vld1q_s16(input); - v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16), - vreinterpretq_s32_s16(q2s16)); - v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16), - vreinterpretq_s32_s16(q3s16)); - v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 - vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 - v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 - vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 + v2tmp2 = + vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16)); + v2tmp3 = + vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16)); + v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 + vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 + v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 + vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 - q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); - q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); - q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); - q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); + q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); + q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); + q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); + q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); - q11s16 = vshlq_n_s16(q11s16, 3); - q12s16 = vshlq_n_s16(q12s16, 3); - q13s16 = vshlq_n_s16(q13s16, 3); - q14s16 = vshlq_n_s16(q14s16, 3); + q11s16 = vshlq_n_s16(q11s16, 3); + q12s16 = vshlq_n_s16(q12s16, 3); + q13s16 = vshlq_n_s16(q13s16, 3); + q14s16 = vshlq_n_s16(q14s16, 3); - q0s16 = vaddq_s16(q11s16, q12s16); - q2s16 = vsubq_s16(q11s16, q12s16); + q0s16 = vaddq_s16(q11s16, q12s16); + q2s16 = vsubq_s16(q11s16, q12s16); - q11s32 = q9s32; - q12s32 = q10s32; + q11s32 = q9s32; + q12s32 = q10s32; - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); - q9s32 = vmlal_s16(q9s32, d28s16, d16s16); - q10s32 = vmlal_s16(q10s32, d28s16, d17s16); - q11s32 = vmlal_s16(q11s32, d29s16, d16s16); - q12s32 = vmlal_s16(q12s32, d29s16, d17s16); + q9s32 = vmlal_s16(q9s32, d28s16, d16s16); + q10s32 = vmlal_s16(q10s32, d28s16, d17s16); + q11s32 = vmlal_s16(q11s32, d29s16, d16s16); + q12s32 = vmlal_s16(q12s32, d29s16, d17s16); - q9s32 = vmlal_s16(q9s32, d26s16, d17s16); - q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); - q11s32 = vmlal_s16(q11s32, d27s16, d17s16); - q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); + q9s32 = vmlal_s16(q9s32, d26s16, d17s16); + q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); + q11s32 = vmlal_s16(q11s32, d27s16, d17s16); + q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); - d2s16 = vshrn_n_s32(q9s32, 12); - d6s16 = vshrn_n_s32(q10s32, 12); - d3s16 = vshrn_n_s32(q11s32, 12); - d7s16 = vshrn_n_s32(q12s32, 12); - q1s16 = vcombine_s16(d2s16, d3s16); - q3s16 = vcombine_s16(d6s16, d7s16); + d2s16 = vshrn_n_s32(q9s32, 12); + d6s16 = vshrn_n_s32(q10s32, 12); + d3s16 = vshrn_n_s32(q11s32, 12); + d7s16 = vshrn_n_s32(q12s32, 12); + q1s16 = vcombine_s16(d2s16, d3s16); + q3s16 = vcombine_s16(d6s16, d7s16); - // Part two - q9s32 = vdupq_n_s32(12000); - q10s32 = vdupq_n_s32(51000); + // Part two + q9s32 = vdupq_n_s32(12000); + q10s32 = vdupq_n_s32(51000); - v2tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q0s16), - vreinterpretq_s32_s16(q2s16)); - v2tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q1s16), - vreinterpretq_s32_s16(q3s16)); - v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 - vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 - v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 - vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 + v2tmp2 = + vtrnq_s32(vreinterpretq_s32_s16(q0s16), vreinterpretq_s32_s16(q2s16)); + v2tmp3 = + vtrnq_s32(vreinterpretq_s32_s16(q1s16), vreinterpretq_s32_s16(q3s16)); + v2tmp0 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[0]), // q0 + vreinterpretq_s16_s32(v2tmp3.val[0])); // q1 + v2tmp1 = vtrnq_s16(vreinterpretq_s16_s32(v2tmp2.val[1]), // q2 + vreinterpretq_s16_s32(v2tmp3.val[1])); // q3 - q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); - q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); - q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); - q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); + q11s16 = vaddq_s16(v2tmp0.val[0], v2tmp1.val[1]); + q12s16 = vaddq_s16(v2tmp0.val[1], v2tmp1.val[0]); + q13s16 = vsubq_s16(v2tmp0.val[1], v2tmp1.val[0]); + q14s16 = vsubq_s16(v2tmp0.val[0], v2tmp1.val[1]); - q15s16 = vdupq_n_s16(7); - q11s16 = vaddq_s16(q11s16, q15s16); - q0s16 = vaddq_s16(q11s16, q12s16); - q1s16 = vsubq_s16(q11s16, q12s16); + q15s16 = vdupq_n_s16(7); + q11s16 = vaddq_s16(q11s16, q15s16); + q0s16 = vaddq_s16(q11s16, q12s16); + q1s16 = vsubq_s16(q11s16, q12s16); - q11s32 = q9s32; - q12s32 = q10s32; + q11s32 = q9s32; + q12s32 = q10s32; - d0s16 = vget_low_s16(q0s16); - d1s16 = vget_high_s16(q0s16); - d2s16 = vget_low_s16(q1s16); - d3s16 = vget_high_s16(q1s16); + d0s16 = vget_low_s16(q0s16); + d1s16 = vget_high_s16(q0s16); + d2s16 = vget_low_s16(q1s16); + d3s16 = vget_high_s16(q1s16); - d0s16 = vshr_n_s16(d0s16, 4); - d4s16 = vshr_n_s16(d1s16, 4); - d2s16 = vshr_n_s16(d2s16, 4); - d6s16 = vshr_n_s16(d3s16, 4); + d0s16 = vshr_n_s16(d0s16, 4); + d4s16 = vshr_n_s16(d1s16, 4); + d2s16 = vshr_n_s16(d2s16, 4); + d6s16 = vshr_n_s16(d3s16, 4); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); - q9s32 = vmlal_s16(q9s32, d28s16, d16s16); - q10s32 = vmlal_s16(q10s32, d28s16, d17s16); - q11s32 = vmlal_s16(q11s32, d29s16, d16s16); - q12s32 = vmlal_s16(q12s32, d29s16, d17s16); + q9s32 = vmlal_s16(q9s32, d28s16, d16s16); + q10s32 = vmlal_s16(q10s32, d28s16, d17s16); + q11s32 = vmlal_s16(q11s32, d29s16, d16s16); + q12s32 = vmlal_s16(q12s32, d29s16, d17s16); - q9s32 = vmlal_s16(q9s32, d26s16, d17s16); - q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); - q11s32 = vmlal_s16(q11s32, d27s16, d17s16); - q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); + q9s32 = vmlal_s16(q9s32, d26s16, d17s16); + q10s32 = vmlsl_s16(q10s32, d26s16, d16s16); + q11s32 = vmlal_s16(q11s32, d27s16, d17s16); + q12s32 = vmlsl_s16(q12s32, d27s16, d16s16); - d1s16 = vshrn_n_s32(q9s32, 16); - d3s16 = vshrn_n_s32(q10s32, 16); - d5s16 = vshrn_n_s32(q11s32, 16); - d7s16 = vshrn_n_s32(q12s32, 16); + d1s16 = vshrn_n_s32(q9s32, 16); + d3s16 = vshrn_n_s32(q10s32, 16); + d5s16 = vshrn_n_s32(q11s32, 16); + d7s16 = vshrn_n_s32(q12s32, 16); - qEmptys16 = vdupq_n_s16(0); - q14u16 = vceqq_s16(q14s16, qEmptys16); - q14u16 = vmvnq_u16(q14u16); + qEmptys16 = vdupq_n_s16(0); + q14u16 = vceqq_s16(q14s16, qEmptys16); + q14u16 = vmvnq_u16(q14u16); - d28u16 = vget_low_u16(q14u16); - d29u16 = vget_high_u16(q14u16); - d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16)); - d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16)); + d28u16 = vget_low_u16(q14u16); + d29u16 = vget_high_u16(q14u16); + d1s16 = vsub_s16(d1s16, vreinterpret_s16_u16(d28u16)); + d5s16 = vsub_s16(d5s16, vreinterpret_s16_u16(d29u16)); - q0s16 = vcombine_s16(d0s16, d1s16); - q1s16 = vcombine_s16(d2s16, d3s16); - q2s16 = vcombine_s16(d4s16, d5s16); - q3s16 = vcombine_s16(d6s16, d7s16); + q0s16 = vcombine_s16(d0s16, d1s16); + q1s16 = vcombine_s16(d2s16, d3s16); + q2s16 = vcombine_s16(d4s16, d5s16); + q3s16 = vcombine_s16(d6s16, d7s16); - vst1q_s16(output, q0s16); - vst1q_s16(output + 8, q1s16); - vst1q_s16(output + 16, q2s16); - vst1q_s16(output + 24, q3s16); - return; + vst1q_s16(output, q0s16); + vst1q_s16(output + 8, q1s16); + vst1q_s16(output + 16, q2s16); + vst1q_s16(output + 24, q3s16); + return; } diff --git a/libs/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c b/libs/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c index 5ad9465002..8d6ea4ccbe 100644 --- a/libs/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c +++ b/libs/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c @@ -13,117 +13,107 @@ #ifdef VPX_INCOMPATIBLE_GCC #include "./vp8_rtcd.h" -void vp8_short_walsh4x4_neon( - int16_t *input, - int16_t *output, - int pitch) { +void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) { vp8_short_walsh4x4_c(input, output, pitch); } #else -void vp8_short_walsh4x4_neon( - int16_t *input, - int16_t *output, - int pitch) { - uint16x4_t d16u16; - int16x8_t q0s16, q1s16; - int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32; - int32x4_t q9s32, q10s32, q11s32, q15s32; - uint32x4_t q8u32, q9u32, q10u32, q11u32; - int16x4x2_t v2tmp0, v2tmp1; - int32x2x2_t v2tmp2, v2tmp3; +void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) { + uint16x4_t d16u16; + int16x8_t q0s16, q1s16; + int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32; + int32x4_t q9s32, q10s32, q11s32, q15s32; + uint32x4_t q8u32, q9u32, q10u32, q11u32; + int16x4x2_t v2tmp0, v2tmp1; + int32x2x2_t v2tmp2, v2tmp3; - dEmptys16 = vdup_n_s16(0); - qEmptys32 = vdupq_n_s32(0); - q15s32 = vdupq_n_s32(3); + dEmptys16 = vdup_n_s16(0); + qEmptys32 = vdupq_n_s32(0); + q15s32 = vdupq_n_s32(3); - d0s16 = vld1_s16(input); - input += pitch/2; - d1s16 = vld1_s16(input); - input += pitch/2; - d2s16 = vld1_s16(input); - input += pitch/2; - d3s16 = vld1_s16(input); + d0s16 = vld1_s16(input); + input += pitch / 2; + d1s16 = vld1_s16(input); + input += pitch / 2; + d2s16 = vld1_s16(input); + input += pitch / 2; + d3s16 = vld1_s16(input); - v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), - vreinterpret_s32_s16(d2s16)); - v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), - vreinterpret_s32_s16(d3s16)); - v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 - vreinterpret_s16_s32(v2tmp3.val[0])); // d1 - v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 - vreinterpret_s16_s32(v2tmp3.val[1])); // d3 + v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16)); + v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); + v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]), // d0 + vreinterpret_s16_s32(v2tmp3.val[0])); // d1 + v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]), // d2 + vreinterpret_s16_s32(v2tmp3.val[1])); // d3 - d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[0]); - d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[1]); - d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[1]); - d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[0]); + d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[0]); + d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[1]); + d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[1]); + d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[0]); - d4s16 = vshl_n_s16(d4s16, 2); - d5s16 = vshl_n_s16(d5s16, 2); - d6s16 = vshl_n_s16(d6s16, 2); - d7s16 = vshl_n_s16(d7s16, 2); + d4s16 = vshl_n_s16(d4s16, 2); + d5s16 = vshl_n_s16(d5s16, 2); + d6s16 = vshl_n_s16(d6s16, 2); + d7s16 = vshl_n_s16(d7s16, 2); - d16u16 = vceq_s16(d4s16, dEmptys16); - d16u16 = vmvn_u16(d16u16); + d16u16 = vceq_s16(d4s16, dEmptys16); + d16u16 = vmvn_u16(d16u16); - d0s16 = vadd_s16(d4s16, d5s16); - d3s16 = vsub_s16(d4s16, d5s16); - d1s16 = vadd_s16(d7s16, d6s16); - d2s16 = vsub_s16(d7s16, d6s16); + d0s16 = vadd_s16(d4s16, d5s16); + d3s16 = vsub_s16(d4s16, d5s16); + d1s16 = vadd_s16(d7s16, d6s16); + d2s16 = vsub_s16(d7s16, d6s16); - d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16)); + d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16)); - // Second for-loop - v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16), - vreinterpret_s32_s16(d3s16)); - v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d0s16), - vreinterpret_s32_s16(d2s16)); - v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[1]), // d2 - vreinterpret_s16_s32(v2tmp2.val[1])); // d3 - v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[0]), // d0 - vreinterpret_s16_s32(v2tmp2.val[0])); // d1 + // Second for-loop + v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16)); + v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16)); + v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[1]), // d2 + vreinterpret_s16_s32(v2tmp2.val[1])); // d3 + v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[0]), // d0 + vreinterpret_s16_s32(v2tmp2.val[0])); // d1 - q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]); - q9s32 = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]); - q10s32 = vsubl_s16(v2tmp1.val[1], v2tmp0.val[1]); - q11s32 = vsubl_s16(v2tmp1.val[0], v2tmp0.val[0]); + q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]); + q9s32 = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]); + q10s32 = vsubl_s16(v2tmp1.val[1], v2tmp0.val[1]); + q11s32 = vsubl_s16(v2tmp1.val[0], v2tmp0.val[0]); - q0s32 = vaddq_s32(q8s32, q9s32); - q1s32 = vaddq_s32(q11s32, q10s32); - q2s32 = vsubq_s32(q11s32, q10s32); - q3s32 = vsubq_s32(q8s32, q9s32); + q0s32 = vaddq_s32(q8s32, q9s32); + q1s32 = vaddq_s32(q11s32, q10s32); + q2s32 = vsubq_s32(q11s32, q10s32); + q3s32 = vsubq_s32(q8s32, q9s32); - q8u32 = vcltq_s32(q0s32, qEmptys32); - q9u32 = vcltq_s32(q1s32, qEmptys32); - q10u32 = vcltq_s32(q2s32, qEmptys32); - q11u32 = vcltq_s32(q3s32, qEmptys32); + q8u32 = vcltq_s32(q0s32, qEmptys32); + q9u32 = vcltq_s32(q1s32, qEmptys32); + q10u32 = vcltq_s32(q2s32, qEmptys32); + q11u32 = vcltq_s32(q3s32, qEmptys32); - q8s32 = vreinterpretq_s32_u32(q8u32); - q9s32 = vreinterpretq_s32_u32(q9u32); - q10s32 = vreinterpretq_s32_u32(q10u32); - q11s32 = vreinterpretq_s32_u32(q11u32); + q8s32 = vreinterpretq_s32_u32(q8u32); + q9s32 = vreinterpretq_s32_u32(q9u32); + q10s32 = vreinterpretq_s32_u32(q10u32); + q11s32 = vreinterpretq_s32_u32(q11u32); - q0s32 = vsubq_s32(q0s32, q8s32); - q1s32 = vsubq_s32(q1s32, q9s32); - q2s32 = vsubq_s32(q2s32, q10s32); - q3s32 = vsubq_s32(q3s32, q11s32); + q0s32 = vsubq_s32(q0s32, q8s32); + q1s32 = vsubq_s32(q1s32, q9s32); + q2s32 = vsubq_s32(q2s32, q10s32); + q3s32 = vsubq_s32(q3s32, q11s32); - q8s32 = vaddq_s32(q0s32, q15s32); - q9s32 = vaddq_s32(q1s32, q15s32); - q10s32 = vaddq_s32(q2s32, q15s32); - q11s32 = vaddq_s32(q3s32, q15s32); + q8s32 = vaddq_s32(q0s32, q15s32); + q9s32 = vaddq_s32(q1s32, q15s32); + q10s32 = vaddq_s32(q2s32, q15s32); + q11s32 = vaddq_s32(q3s32, q15s32); - d0s16 = vshrn_n_s32(q8s32, 3); - d1s16 = vshrn_n_s32(q9s32, 3); - d2s16 = vshrn_n_s32(q10s32, 3); - d3s16 = vshrn_n_s32(q11s32, 3); + d0s16 = vshrn_n_s32(q8s32, 3); + d1s16 = vshrn_n_s32(q9s32, 3); + d2s16 = vshrn_n_s32(q10s32, 3); + d3s16 = vshrn_n_s32(q11s32, 3); - q0s16 = vcombine_s16(d0s16, d1s16); - q1s16 = vcombine_s16(d2s16, d3s16); + q0s16 = vcombine_s16(d0s16, d1s16); + q1s16 = vcombine_s16(d2s16, d3s16); - vst1q_s16(output, q0s16); - vst1q_s16(output + 8, q1s16); - return; + vst1q_s16(output, q0s16); + vst1q_s16(output + 8, q1s16); + return; } #endif // VPX_INCOMPATIBLE_GCC diff --git a/libs/libvpx/vp8/encoder/bitstream.c b/libs/libvpx/vp8/encoder/bitstream.c index f3d91b5528..1b100cfe8b 100644 --- a/libs/libvpx/vp8/encoder/bitstream.c +++ b/libs/libvpx/vp8/encoder/bitstream.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/header.h" #include "encodemv.h" #include "vp8/common/entropymode.h" @@ -25,24 +24,16 @@ #include "defaultcoefcounts.h" #include "vp8/common/common.h" -const int vp8cx_base_skip_false_prob[128] = -{ - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, - 251, 248, 244, 240, 236, 232, 229, 225, - 221, 217, 213, 208, 204, 199, 194, 190, - 187, 183, 179, 175, 172, 168, 164, 160, - 157, 153, 149, 145, 142, 138, 134, 130, - 127, 124, 120, 117, 114, 110, 107, 104, - 101, 98, 95, 92, 89, 86, 83, 80, - 77, 74, 71, 68, 65, 62, 59, 56, - 53, 50, 47, 44, 41, 38, 35, 32, - 30, 28, 26, 24, 22, 20, 18, 16, +const int vp8cx_base_skip_false_prob[128] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 251, 248, 244, 240, + 236, 232, 229, 225, 221, 217, 213, 208, 204, 199, 194, 190, 187, 183, 179, + 175, 172, 168, 164, 160, 157, 153, 149, 145, 142, 138, 134, 130, 127, 124, + 120, 117, 114, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77, + 74, 71, 68, 65, 62, 59, 56, 53, 50, 47, 44, 41, 38, 35, 32, + 30, 28, 26, 24, 22, 20, 18, 16, }; #if defined(SECTIONBITS_OUTPUT) @@ -51,7 +42,8 @@ unsigned __int64 Sectionbits[500]; #ifdef VP8_ENTROPY_STATS int intra_mode_stats[10][10][10]; -static unsigned int tree_update_hist [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] [2]; +static unsigned int tree_update_hist[BLOCK_TYPES][COEF_BANDS] + [PREV_COEF_CONTEXTS][ENTROPY_NODES][2]; extern unsigned int active_section; #endif @@ -59,731 +51,610 @@ extern unsigned int active_section; int count_mb_seg[4] = { 0, 0, 0, 0 }; #endif +static void update_mode(vp8_writer *const w, int n, vp8_token tok[/* n */], + vp8_tree tree, vp8_prob Pnew[/* n-1 */], + vp8_prob Pcur[/* n-1 */], + unsigned int bct[/* n-1 */][2], + const unsigned int num_events[/* n */]) { + unsigned int new_b = 0, old_b = 0; + int i = 0; -static void update_mode( - vp8_writer *const w, - int n, - vp8_token tok [/* n */], - vp8_tree tree, - vp8_prob Pnew [/* n-1 */], - vp8_prob Pcur [/* n-1 */], - unsigned int bct [/* n-1 */] [2], - const unsigned int num_events[/* n */] -) -{ - unsigned int new_b = 0, old_b = 0; + vp8_tree_probs_from_distribution(n--, tok, tree, Pnew, bct, num_events, 256, + 1); + + do { + new_b += vp8_cost_branch(bct[i], Pnew[i]); + old_b += vp8_cost_branch(bct[i], Pcur[i]); + } while (++i < n); + + if (new_b + (n << 8) < old_b) { + int j = 0; + + vp8_write_bit(w, 1); + + do { + const vp8_prob p = Pnew[j]; + + vp8_write_literal(w, Pcur[j] = p ? p : 1, 8); + } while (++j < n); + } else + vp8_write_bit(w, 0); +} + +static void update_mbintra_mode_probs(VP8_COMP *cpi) { + VP8_COMMON *const x = &cpi->common; + + vp8_writer *const w = cpi->bc; + + { + vp8_prob Pnew[VP8_YMODES - 1]; + unsigned int bct[VP8_YMODES - 1][2]; + + update_mode(w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, Pnew, + x->fc.ymode_prob, bct, (unsigned int *)cpi->mb.ymode_count); + } + { + vp8_prob Pnew[VP8_UV_MODES - 1]; + unsigned int bct[VP8_UV_MODES - 1][2]; + + update_mode(w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, Pnew, + x->fc.uv_mode_prob, bct, (unsigned int *)cpi->mb.uv_mode_count); + } +} + +static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) { + vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m); +} + +static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) { + vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m); +} + +static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) { + vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m); +} + +static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) { + vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m); +} + +static void write_split(vp8_writer *bc, int x) { + vp8_write_token(bc, vp8_mbsplit_tree, vp8_mbsplit_probs, + vp8_mbsplit_encodings + x); +} + +void vp8_pack_tokens(vp8_writer *w, const TOKENEXTRA *p, int xcount) { + const TOKENEXTRA *stop = p + xcount; + unsigned int split; + int shift; + int count = w->count; + unsigned int range = w->range; + unsigned int lowvalue = w->lowvalue; + + while (p < stop) { + const int t = p->Token; + vp8_token *a = vp8_coef_encodings + t; + const vp8_extra_bit_struct *b = vp8_extra_bits + t; int i = 0; + const unsigned char *pp = p->context_tree; + int v = a->value; + int n = a->Len; - vp8_tree_probs_from_distribution( - n--, tok, tree, - Pnew, bct, num_events, - 256, 1 - ); - - do - { - new_b += vp8_cost_branch(bct[i], Pnew[i]); - old_b += vp8_cost_branch(bct[i], Pcur[i]); + if (p->skip_eob_node) { + n--; + i = 2; } - while (++i < n); - if (new_b + (n << 8) < old_b) - { - int j = 0; + do { + const int bb = (v >> --n) & 1; + split = 1 + (((range - 1) * pp[i >> 1]) >> 8); + i = vp8_coef_tree[i + bb]; - vp8_write_bit(w, 1); + if (bb) { + lowvalue += split; + range = range - split; + } else { + range = split; + } - do - { - const vp8_prob p = Pnew[j]; + shift = vp8_norm[range]; + range <<= shift; + count += shift; - vp8_write_literal(w, Pcur[j] = p ? p : 1, 8); - } - while (++j < n); - } - else - vp8_write_bit(w, 0); -} + if (count >= 0) { + int offset = shift - count; -static void update_mbintra_mode_probs(VP8_COMP *cpi) -{ - VP8_COMMON *const x = & cpi->common; + if ((lowvalue << (offset - 1)) & 0x80000000) { + int x = w->pos - 1; - vp8_writer *const w = cpi->bc; + while (x >= 0 && w->buffer[x] == 0xff) { + w->buffer[x] = (unsigned char)0; + x--; + } - { - vp8_prob Pnew [VP8_YMODES-1]; - unsigned int bct [VP8_YMODES-1] [2]; - - update_mode( - w, VP8_YMODES, vp8_ymode_encodings, vp8_ymode_tree, - Pnew, x->fc.ymode_prob, bct, (unsigned int *)cpi->mb.ymode_count - ); - } - { - vp8_prob Pnew [VP8_UV_MODES-1]; - unsigned int bct [VP8_UV_MODES-1] [2]; - - update_mode( - w, VP8_UV_MODES, vp8_uv_mode_encodings, vp8_uv_mode_tree, - Pnew, x->fc.uv_mode_prob, bct, (unsigned int *)cpi->mb.uv_mode_count - ); - } -} - -static void write_ymode(vp8_writer *bc, int m, const vp8_prob *p) -{ - vp8_write_token(bc, vp8_ymode_tree, p, vp8_ymode_encodings + m); -} - -static void kfwrite_ymode(vp8_writer *bc, int m, const vp8_prob *p) -{ - vp8_write_token(bc, vp8_kf_ymode_tree, p, vp8_kf_ymode_encodings + m); -} - -static void write_uv_mode(vp8_writer *bc, int m, const vp8_prob *p) -{ - vp8_write_token(bc, vp8_uv_mode_tree, p, vp8_uv_mode_encodings + m); -} - - -static void write_bmode(vp8_writer *bc, int m, const vp8_prob *p) -{ - vp8_write_token(bc, vp8_bmode_tree, p, vp8_bmode_encodings + m); -} - -static void write_split(vp8_writer *bc, int x) -{ - vp8_write_token( - bc, vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + x - ); -} - -void vp8_pack_tokens(vp8_writer *w, const TOKENEXTRA *p, int xcount) -{ - const TOKENEXTRA *stop = p + xcount; - unsigned int split; - unsigned int shift; - int count = w->count; - unsigned int range = w->range; - unsigned int lowvalue = w->lowvalue; - - while (p < stop) - { - const int t = p->Token; - vp8_token *a = vp8_coef_encodings + t; - const vp8_extra_bit_struct *b = vp8_extra_bits + t; - int i = 0; - const unsigned char *pp = p->context_tree; - int v = a->value; - int n = a->Len; - - if (p->skip_eob_node) - { - n--; - i = 2; + w->buffer[x] += 1; } - do - { - const int bb = (v >> --n) & 1; - split = 1 + (((range - 1) * pp[i>>1]) >> 8); - i = vp8_coef_tree[i+bb]; + validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error); - if (bb) - { - lowvalue += split; - range = range - split; - } - else - { - range = split; + w->buffer[w->pos++] = (lowvalue >> (24 - offset)); + lowvalue <<= offset; + shift = count; + lowvalue &= 0xffffff; + count -= 8; + } + + lowvalue <<= shift; + } while (n); + + if (b->base_val) { + const int e = p->Extra, L = b->Len; + + if (L) { + const unsigned char *proba = b->prob; + const int v2 = e >> 1; + int n2 = L; /* number of bits in v2, assumed nonzero */ + i = 0; + + do { + const int bb = (v2 >> --n2) & 1; + split = 1 + (((range - 1) * proba[i >> 1]) >> 8); + i = b->tree[i + bb]; + + if (bb) { + lowvalue += split; + range = range - split; + } else { + range = split; + } + + shift = vp8_norm[range]; + range <<= shift; + count += shift; + + if (count >= 0) { + int offset = shift - count; + + if ((lowvalue << (offset - 1)) & 0x80000000) { + int x = w->pos - 1; + + while (x >= 0 && w->buffer[x] == 0xff) { + w->buffer[x] = (unsigned char)0; + x--; + } + + w->buffer[x] += 1; } - shift = vp8_norm[range]; - range <<= shift; - count += shift; + validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error); - if (count >= 0) - { - int offset = shift - count; + w->buffer[w->pos++] = (lowvalue >> (24 - offset)); + lowvalue <<= offset; + shift = count; + lowvalue &= 0xffffff; + count -= 8; + } - if ((lowvalue << (offset - 1)) & 0x80000000) - { - int x = w->pos - 1; + lowvalue <<= shift; + } while (n2); + } - while (x >= 0 && w->buffer[x] == 0xff) - { - w->buffer[x] = (unsigned char)0; - x--; - } - - w->buffer[x] += 1; - } - - validate_buffer(w->buffer + w->pos, - 1, - w->buffer_end, - w->error); - - w->buffer[w->pos++] = (lowvalue >> (24 - offset)); - lowvalue <<= offset; - shift = count; - lowvalue &= 0xffffff; - count -= 8 ; - } - - lowvalue <<= shift; - } - while (n); - - - if (b->base_val) - { - const int e = p->Extra, L = b->Len; - - if (L) - { - const unsigned char *proba = b->prob; - const int v2 = e >> 1; - int n2 = L; /* number of bits in v2, assumed nonzero */ - i = 0; - - do - { - const int bb = (v2 >> --n2) & 1; - split = 1 + (((range - 1) * proba[i>>1]) >> 8); - i = b->tree[i+bb]; - - if (bb) - { - lowvalue += split; - range = range - split; - } - else - { - range = split; - } - - shift = vp8_norm[range]; - range <<= shift; - count += shift; - - if (count >= 0) - { - int offset = shift - count; - - if ((lowvalue << (offset - 1)) & 0x80000000) - { - int x = w->pos - 1; - - while (x >= 0 && w->buffer[x] == 0xff) - { - w->buffer[x] = (unsigned char)0; - x--; - } - - w->buffer[x] += 1; - } - - validate_buffer(w->buffer + w->pos, - 1, - w->buffer_end, - w->error); - - w->buffer[w->pos++] = (lowvalue >> (24 - offset)); - lowvalue <<= offset; - shift = count; - lowvalue &= 0xffffff; - count -= 8 ; - } - - lowvalue <<= shift; - } - while (n2); - } - - - { - - split = (range + 1) >> 1; - - if (e & 1) - { - lowvalue += split; - range = range - split; - } - else - { - range = split; - } - - range <<= 1; - - if ((lowvalue & 0x80000000)) - { - int x = w->pos - 1; - - while (x >= 0 && w->buffer[x] == 0xff) - { - w->buffer[x] = (unsigned char)0; - x--; - } - - w->buffer[x] += 1; - - } - - lowvalue <<= 1; - - if (!++count) - { - count = -8; - - validate_buffer(w->buffer + w->pos, - 1, - w->buffer_end, - w->error); - - w->buffer[w->pos++] = (lowvalue >> 24); - lowvalue &= 0xffffff; - } - } + { + split = (range + 1) >> 1; + if (e & 1) { + lowvalue += split; + range = range - split; + } else { + range = split; } - ++p; + range <<= 1; + + if ((lowvalue & 0x80000000)) { + int x = w->pos - 1; + + while (x >= 0 && w->buffer[x] == 0xff) { + w->buffer[x] = (unsigned char)0; + x--; + } + + w->buffer[x] += 1; + } + + lowvalue <<= 1; + + if (!++count) { + count = -8; + + validate_buffer(w->buffer + w->pos, 1, w->buffer_end, w->error); + + w->buffer[w->pos++] = (lowvalue >> 24); + lowvalue &= 0xffffff; + } + } } - w->count = count; - w->lowvalue = lowvalue; - w->range = range; + ++p; + } + w->count = count; + w->lowvalue = lowvalue; + w->range = range; } -static void write_partition_size(unsigned char *cx_data, int size) -{ - signed char csize; - - csize = size & 0xff; - *cx_data = csize; - csize = (size >> 8) & 0xff; - *(cx_data + 1) = csize; - csize = (size >> 16) & 0xff; - *(cx_data + 2) = csize; +static void write_partition_size(unsigned char *cx_data, int size) { + signed char csize; + csize = size & 0xff; + *cx_data = csize; + csize = (size >> 8) & 0xff; + *(cx_data + 1) = csize; + csize = (size >> 16) & 0xff; + *(cx_data + 2) = csize; } static void pack_tokens_into_partitions(VP8_COMP *cpi, unsigned char *cx_data, - unsigned char * cx_data_end, - int num_part) -{ + unsigned char *cx_data_end, + int num_part) { + int i; + unsigned char *ptr = cx_data; + unsigned char *ptr_end = cx_data_end; + vp8_writer *w; - int i; - unsigned char *ptr = cx_data; - unsigned char *ptr_end = cx_data_end; - vp8_writer * w; - - for (i = 0; i < num_part; i++) - { - int mb_row; - - w = cpi->bc + i + 1; - - vp8_start_encode(w, ptr, ptr_end); - - for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part) - { - const TOKENEXTRA *p = cpi->tplist[mb_row].start; - const TOKENEXTRA *stop = cpi->tplist[mb_row].stop; - int tokens = (int)(stop - p); - - vp8_pack_tokens(w, p, tokens); - } - - vp8_stop_encode(w); - ptr += w->pos; - } -} - - -#if CONFIG_MULTITHREAD -static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) -{ + for (i = 0; i < num_part; ++i) { int mb_row; - for (mb_row = 0; mb_row < cpi->common.mb_rows; mb_row++) - { - const TOKENEXTRA *p = cpi->tplist[mb_row].start; - const TOKENEXTRA *stop = cpi->tplist[mb_row].stop; - int tokens = (int)(stop - p); + w = cpi->bc + i + 1; - vp8_pack_tokens(w, p, tokens); + vp8_start_encode(w, ptr, ptr_end); + + for (mb_row = i; mb_row < cpi->common.mb_rows; mb_row += num_part) { + const TOKENEXTRA *p = cpi->tplist[mb_row].start; + const TOKENEXTRA *stop = cpi->tplist[mb_row].stop; + int tokens = (int)(stop - p); + + vp8_pack_tokens(w, p, tokens); } + vp8_stop_encode(w); + ptr += w->pos; + } +} + +#if CONFIG_MULTITHREAD +static void pack_mb_row_tokens(VP8_COMP *cpi, vp8_writer *w) { + int mb_row; + + for (mb_row = 0; mb_row < cpi->common.mb_rows; ++mb_row) { + const TOKENEXTRA *p = cpi->tplist[mb_row].start; + const TOKENEXTRA *stop = cpi->tplist[mb_row].stop; + int tokens = (int)(stop - p); + + vp8_pack_tokens(w, p, tokens); + } } #endif // CONFIG_MULTITHREAD -static void write_mv_ref -( - vp8_writer *w, MB_PREDICTION_MODE m, const vp8_prob *p -) -{ -#if CONFIG_DEBUG - assert(NEARESTMV <= m && m <= SPLITMV); +static void write_mv_ref(vp8_writer *w, MB_PREDICTION_MODE m, + const vp8_prob *p) { + assert(NEARESTMV <= m && m <= SPLITMV); + vp8_write_token(w, vp8_mv_ref_tree, p, + vp8_mv_ref_encoding_array + (m - NEARESTMV)); +} + +static void write_sub_mv_ref(vp8_writer *w, B_PREDICTION_MODE m, + const vp8_prob *p) { + assert(LEFT4X4 <= m && m <= NEW4X4); + vp8_write_token(w, vp8_sub_mv_ref_tree, p, + vp8_sub_mv_ref_encoding_array + (m - LEFT4X4)); +} + +static void write_mv(vp8_writer *w, const MV *mv, const int_mv *ref, + const MV_CONTEXT *mvc) { + MV e; + e.row = mv->row - ref->as_mv.row; + e.col = mv->col - ref->as_mv.col; + + vp8_encode_motion_vector(w, &e, mvc); +} + +static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, + const MACROBLOCKD *x) { + /* Encode the MB segment id. */ + if (x->segmentation_enabled && x->update_mb_segmentation_map) { + switch (mi->segment_id) { + case 0: + vp8_write(w, 0, x->mb_segment_tree_probs[0]); + vp8_write(w, 0, x->mb_segment_tree_probs[1]); + break; + case 1: + vp8_write(w, 0, x->mb_segment_tree_probs[0]); + vp8_write(w, 1, x->mb_segment_tree_probs[1]); + break; + case 2: + vp8_write(w, 1, x->mb_segment_tree_probs[0]); + vp8_write(w, 0, x->mb_segment_tree_probs[2]); + break; + case 3: + vp8_write(w, 1, x->mb_segment_tree_probs[0]); + vp8_write(w, 1, x->mb_segment_tree_probs[2]); + break; + + /* TRAP.. This should not happen */ + default: + vp8_write(w, 0, x->mb_segment_tree_probs[0]); + vp8_write(w, 0, x->mb_segment_tree_probs[1]); + break; + } + } +} +void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) { + const int *const rfct = cpi->mb.count_mb_ref_frame_usage; + const int rf_intra = rfct[INTRA_FRAME]; + const int rf_inter = + rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; + + /* Calculate the probabilities used to code the ref frame based on usage */ + if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) { + cpi->prob_intra_coded = 1; + } + + cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; + + if (!cpi->prob_last_coded) cpi->prob_last_coded = 1; + + cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) + ? (rfct[GOLDEN_FRAME] * 255) / + (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) + : 128; + + if (!cpi->prob_gf_coded) cpi->prob_gf_coded = 1; +} + +static void pack_inter_mode_mvs(VP8_COMP *const cpi) { + VP8_COMMON *const pc = &cpi->common; + vp8_writer *const w = cpi->bc; + const MV_CONTEXT *mvc = pc->fc.mvc; + + MODE_INFO *m = pc->mi; + const int mis = pc->mode_info_stride; + int mb_row = -1; + + int prob_skip_false = 0; + + cpi->mb.partition_info = cpi->mb.pi; + + vp8_convert_rfct_to_prob(cpi); + +#ifdef VP8_ENTROPY_STATS + active_section = 1; #endif - vp8_write_token(w, vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array + (m - NEARESTMV)); -} -static void write_sub_mv_ref -( - vp8_writer *w, B_PREDICTION_MODE m, const vp8_prob *p -) -{ -#if CONFIG_DEBUG - assert(LEFT4X4 <= m && m <= NEW4X4); + if (pc->mb_no_coeff_skip) { + int total_mbs = pc->mb_rows * pc->mb_cols; + + prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs; + + if (prob_skip_false <= 1) prob_skip_false = 1; + + if (prob_skip_false > 255) prob_skip_false = 255; + + cpi->prob_skip_false = prob_skip_false; + vp8_write_literal(w, prob_skip_false, 8); + } + + vp8_write_literal(w, cpi->prob_intra_coded, 8); + vp8_write_literal(w, cpi->prob_last_coded, 8); + vp8_write_literal(w, cpi->prob_gf_coded, 8); + + update_mbintra_mode_probs(cpi); + + vp8_write_mvprobs(cpi); + + while (++mb_row < pc->mb_rows) { + int mb_col = -1; + + while (++mb_col < pc->mb_cols) { + const MB_MODE_INFO *const mi = &m->mbmi; + const MV_REFERENCE_FRAME rf = mi->ref_frame; + const MB_PREDICTION_MODE mode = mi->mode; + + MACROBLOCKD *xd = &cpi->mb.e_mbd; + + /* Distance of Mb to the various image edges. + * These specified to 8th pel as they are always compared to MV + * values that are in 1/8th pel units + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; + xd->mb_to_top_edge = -((mb_row * 16) << 3); + xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; + +#ifdef VP8_ENTROPY_STATS + active_section = 9; #endif - vp8_write_token(w, vp8_sub_mv_ref_tree, p, - vp8_sub_mv_ref_encoding_array + (m - LEFT4X4)); -} -static void write_mv -( - vp8_writer *w, const MV *mv, const int_mv *ref, const MV_CONTEXT *mvc -) -{ - MV e; - e.row = mv->row - ref->as_mv.row; - e.col = mv->col - ref->as_mv.col; + if (cpi->mb.e_mbd.update_mb_segmentation_map) { + write_mb_features(w, mi, &cpi->mb.e_mbd); + } - vp8_encode_motion_vector(w, &e, mvc); -} + if (pc->mb_no_coeff_skip) { + vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false); + } -static void write_mb_features(vp8_writer *w, const MB_MODE_INFO *mi, const MACROBLOCKD *x) -{ - /* Encode the MB segment id. */ - if (x->segmentation_enabled && x->update_mb_segmentation_map) - { - switch (mi->segment_id) - { - case 0: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[1]); - break; - case 1: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 1, x->mb_segment_tree_probs[1]); - break; - case 2: - vp8_write(w, 1, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[2]); - break; - case 3: - vp8_write(w, 1, x->mb_segment_tree_probs[0]); - vp8_write(w, 1, x->mb_segment_tree_probs[2]); - break; + if (rf == INTRA_FRAME) { + vp8_write(w, 0, cpi->prob_intra_coded); +#ifdef VP8_ENTROPY_STATS + active_section = 6; +#endif + write_ymode(w, mode, pc->fc.ymode_prob); - /* TRAP.. This should not happen */ - default: - vp8_write(w, 0, x->mb_segment_tree_probs[0]); - vp8_write(w, 0, x->mb_segment_tree_probs[1]); - break; + if (mode == B_PRED) { + int j = 0; + + do { + write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob); + } while (++j < 16); } - } -} -void vp8_convert_rfct_to_prob(VP8_COMP *const cpi) -{ - const int *const rfct = cpi->mb.count_mb_ref_frame_usage; - const int rf_intra = rfct[INTRA_FRAME]; - const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; - /* Calculate the probabilities used to code the ref frame based on usage */ - if (!(cpi->prob_intra_coded = rf_intra * 255 / (rf_intra + rf_inter))) - cpi->prob_intra_coded = 1; + write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob); + } else /* inter coded */ + { + int_mv best_mv; + vp8_prob mv_ref_p[VP8_MVREFS - 1]; - cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; + vp8_write(w, 1, cpi->prob_intra_coded); - if (!cpi->prob_last_coded) - cpi->prob_last_coded = 1; + if (rf == LAST_FRAME) + vp8_write(w, 0, cpi->prob_last_coded); + else { + vp8_write(w, 1, cpi->prob_last_coded); + vp8_write(w, (rf == GOLDEN_FRAME) ? 0 : 1, cpi->prob_gf_coded); + } - cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) - ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; - - if (!cpi->prob_gf_coded) - cpi->prob_gf_coded = 1; - -} - -static void pack_inter_mode_mvs(VP8_COMP *const cpi) -{ - VP8_COMMON *const pc = & cpi->common; - vp8_writer *const w = cpi->bc; - const MV_CONTEXT *mvc = pc->fc.mvc; - - - MODE_INFO *m = pc->mi; - const int mis = pc->mode_info_stride; - int mb_row = -1; - - int prob_skip_false = 0; - - cpi->mb.partition_info = cpi->mb.pi; - - vp8_convert_rfct_to_prob(cpi); - -#ifdef VP8_ENTROPY_STATS - active_section = 1; -#endif - - if (pc->mb_no_coeff_skip) - { - int total_mbs = pc->mb_rows * pc->mb_cols; - - prob_skip_false = (total_mbs - cpi->mb.skip_true_count ) * 256 / total_mbs; - - if (prob_skip_false <= 1) - prob_skip_false = 1; - - if (prob_skip_false > 255) - prob_skip_false = 255; - - cpi->prob_skip_false = prob_skip_false; - vp8_write_literal(w, prob_skip_false, 8); - } - - vp8_write_literal(w, cpi->prob_intra_coded, 8); - vp8_write_literal(w, cpi->prob_last_coded, 8); - vp8_write_literal(w, cpi->prob_gf_coded, 8); - - update_mbintra_mode_probs(cpi); - - vp8_write_mvprobs(cpi); - - while (++mb_row < pc->mb_rows) - { - int mb_col = -1; - - while (++mb_col < pc->mb_cols) { - const MB_MODE_INFO *const mi = & m->mbmi; - const MV_REFERENCE_FRAME rf = mi->ref_frame; - const MB_PREDICTION_MODE mode = mi->mode; + int_mv n1, n2; + int ct[4]; - MACROBLOCKD *xd = &cpi->mb.e_mbd; + vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf, + cpi->common.ref_frame_sign_bias); + vp8_clamp_mv2(&best_mv, xd); - /* Distance of Mb to the various image edges. - * These specified to 8th pel as they are always compared to MV - * values that are in 1/8th pel units - */ - xd->mb_to_left_edge = -((mb_col * 16) << 3); - xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; - xd->mb_to_top_edge = -((mb_row * 16) << 3); - xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; + vp8_mv_ref_probs(mv_ref_p, ct); #ifdef VP8_ENTROPY_STATS - active_section = 9; + accum_mv_refs(mode, ct); #endif - - if (cpi->mb.e_mbd.update_mb_segmentation_map) - write_mb_features(w, mi, &cpi->mb.e_mbd); - - if (pc->mb_no_coeff_skip) - vp8_encode_bool(w, m->mbmi.mb_skip_coeff, prob_skip_false); - - if (rf == INTRA_FRAME) - { - vp8_write(w, 0, cpi->prob_intra_coded); -#ifdef VP8_ENTROPY_STATS - active_section = 6; -#endif - write_ymode(w, mode, pc->fc.ymode_prob); - - if (mode == B_PRED) - { - int j = 0; - - do - write_bmode(w, m->bmi[j].as_mode, pc->fc.bmode_prob); - while (++j < 16); - } - - write_uv_mode(w, mi->uv_mode, pc->fc.uv_mode_prob); - } - else /* inter coded */ - { - int_mv best_mv; - vp8_prob mv_ref_p [VP8_MVREFS-1]; - - vp8_write(w, 1, cpi->prob_intra_coded); - - if (rf == LAST_FRAME) - vp8_write(w, 0, cpi->prob_last_coded); - else - { - vp8_write(w, 1, cpi->prob_last_coded); - vp8_write(w, (rf == GOLDEN_FRAME) ? 0 : 1, cpi->prob_gf_coded); - } - - { - int_mv n1, n2; - int ct[4]; - - vp8_find_near_mvs(xd, m, &n1, &n2, &best_mv, ct, rf, cpi->common.ref_frame_sign_bias); - vp8_clamp_mv2(&best_mv, xd); - - vp8_mv_ref_probs(mv_ref_p, ct); + } #ifdef VP8_ENTROPY_STATS - accum_mv_refs(mode, ct); + active_section = 3; #endif - } + write_mv_ref(w, mode, mv_ref_p); + + switch (mode) /* new, split require MVs */ + { + case NEWMV: #ifdef VP8_ENTROPY_STATS - active_section = 3; + active_section = 5; #endif - write_mv_ref(w, mode, mv_ref_p); + write_mv(w, &mi->mv.as_mv, &best_mv, mvc); + break; - switch (mode) /* new, split require MVs */ - { - case NEWMV: - -#ifdef VP8_ENTROPY_STATS - active_section = 5; -#endif - - write_mv(w, &mi->mv.as_mv, &best_mv, mvc); - break; - - case SPLITMV: - { - int j = 0; + case SPLITMV: { + int j = 0; #ifdef MODE_STATS - ++count_mb_seg [mi->partitioning]; + ++count_mb_seg[mi->partitioning]; #endif - write_split(w, mi->partitioning); + write_split(w, mi->partitioning); - do - { - B_PREDICTION_MODE blockmode; - int_mv blockmv; - const int *const L = vp8_mbsplits [mi->partitioning]; - int k = -1; /* first block in subset j */ - int mv_contz; - int_mv leftmv, abovemv; + do { + B_PREDICTION_MODE blockmode; + int_mv blockmv; + const int *const L = vp8_mbsplits[mi->partitioning]; + int k = -1; /* first block in subset j */ + int mv_contz; + int_mv leftmv, abovemv; - blockmode = cpi->mb.partition_info->bmi[j].mode; - blockmv = cpi->mb.partition_info->bmi[j].mv; -#if CONFIG_DEBUG - while (j != L[++k]) - if (k >= 16) - assert(0); -#else - while (j != L[++k]); -#endif - leftmv.as_int = left_block_mv(m, k); - abovemv.as_int = above_block_mv(m, k, mis); - mv_contz = vp8_mv_cont(&leftmv, &abovemv); + blockmode = cpi->mb.partition_info->bmi[j].mode; + blockmv = cpi->mb.partition_info->bmi[j].mv; + while (j != L[++k]) { + assert(k < 16); + } + leftmv.as_int = left_block_mv(m, k); + abovemv.as_int = above_block_mv(m, k, mis); + mv_contz = vp8_mv_cont(&leftmv, &abovemv); - write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2 [mv_contz]); + write_sub_mv_ref(w, blockmode, vp8_sub_mv_ref_prob2[mv_contz]); - if (blockmode == NEW4X4) - { + if (blockmode == NEW4X4) { #ifdef VP8_ENTROPY_STATS - active_section = 11; + active_section = 11; #endif - write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *) mvc); - } - } - while (++j < cpi->mb.partition_info->count); - } - break; - default: - break; - } - } - - ++m; - cpi->mb.partition_info++; + write_mv(w, &blockmv.as_mv, &best_mv, (const MV_CONTEXT *)mvc); + } + } while (++j < cpi->mb.partition_info->count); + break; + } + default: break; } + } - ++m; /* skip L prediction border */ - cpi->mb.partition_info++; + ++m; + cpi->mb.partition_info++; } + + ++m; /* skip L prediction border */ + cpi->mb.partition_info++; + } } +static void write_kfmodes(VP8_COMP *cpi) { + vp8_writer *const bc = cpi->bc; + const VP8_COMMON *const c = &cpi->common; + /* const */ + MODE_INFO *m = c->mi; -static void write_kfmodes(VP8_COMP *cpi) -{ - vp8_writer *const bc = cpi->bc; - const VP8_COMMON *const c = & cpi->common; - /* const */ - MODE_INFO *m = c->mi; + int mb_row = -1; + int prob_skip_false = 0; - int mb_row = -1; - int prob_skip_false = 0; + if (c->mb_no_coeff_skip) { + int total_mbs = c->mb_rows * c->mb_cols; - if (c->mb_no_coeff_skip) - { - int total_mbs = c->mb_rows * c->mb_cols; + prob_skip_false = (total_mbs - cpi->mb.skip_true_count) * 256 / total_mbs; - prob_skip_false = (total_mbs - cpi->mb.skip_true_count ) * 256 / total_mbs; + if (prob_skip_false <= 1) prob_skip_false = 1; - if (prob_skip_false <= 1) - prob_skip_false = 1; + if (prob_skip_false >= 255) prob_skip_false = 255; - if (prob_skip_false >= 255) - prob_skip_false = 255; + cpi->prob_skip_false = prob_skip_false; + vp8_write_literal(bc, prob_skip_false, 8); + } - cpi->prob_skip_false = prob_skip_false; - vp8_write_literal(bc, prob_skip_false, 8); - } + while (++mb_row < c->mb_rows) { + int mb_col = -1; - while (++mb_row < c->mb_rows) - { - int mb_col = -1; + while (++mb_col < c->mb_cols) { + const int ym = m->mbmi.mode; - while (++mb_col < c->mb_cols) - { - const int ym = m->mbmi.mode; + if (cpi->mb.e_mbd.update_mb_segmentation_map) { + write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd); + } - if (cpi->mb.e_mbd.update_mb_segmentation_map) - write_mb_features(bc, &m->mbmi, &cpi->mb.e_mbd); + if (c->mb_no_coeff_skip) { + vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false); + } - if (c->mb_no_coeff_skip) - vp8_encode_bool(bc, m->mbmi.mb_skip_coeff, prob_skip_false); + kfwrite_ymode(bc, ym, vp8_kf_ymode_prob); - kfwrite_ymode(bc, ym, vp8_kf_ymode_prob); + if (ym == B_PRED) { + const int mis = c->mode_info_stride; + int i = 0; - if (ym == B_PRED) - { - const int mis = c->mode_info_stride; - int i = 0; - - do - { - const B_PREDICTION_MODE A = above_block_mode(m, i, mis); - const B_PREDICTION_MODE L = left_block_mode(m, i); - const int bm = m->bmi[i].as_mode; + do { + const B_PREDICTION_MODE A = above_block_mode(m, i, mis); + const B_PREDICTION_MODE L = left_block_mode(m, i); + const int bm = m->bmi[i].as_mode; #ifdef VP8_ENTROPY_STATS - ++intra_mode_stats [A] [L] [bm]; + ++intra_mode_stats[A][L][bm]; #endif - write_bmode(bc, bm, vp8_kf_bmode_prob [A] [L]); - } - while (++i < 16); - } + write_bmode(bc, bm, vp8_kf_bmode_prob[A][L]); + } while (++i < 16); + } - write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob); - } - - m++; /* skip L prediction border */ + write_uv_mode(bc, (m++)->mbmi.uv_mode, vp8_kf_uv_mode_prob); } + + m++; /* skip L prediction border */ + } } #if 0 @@ -795,16 +666,16 @@ static void print_prob_tree(vp8_prob int i,j,k,l; FILE* f = fopen("enc_tree_probs.txt", "a"); fprintf(f, "{\n"); - for (i = 0; i < BLOCK_TYPES; i++) + for (i = 0; i < BLOCK_TYPES; ++i) { fprintf(f, " {\n"); - for (j = 0; j < COEF_BANDS; j++) + for (j = 0; j < COEF_BANDS; ++j) { fprintf(f, " {\n"); - for (k = 0; k < PREV_COEF_CONTEXTS; k++) + for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { fprintf(f, " {"); - for (l = 0; l < ENTROPY_NODES; l++) + for (l = 0; l < ENTROPY_NODES; ++l) { fprintf(f, "%3u, ", (unsigned int)(coef_probs [i][j][k][l])); @@ -821,427 +692,349 @@ static void print_prob_tree(vp8_prob #endif static void sum_probs_over_prev_coef_context( - const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS], - unsigned int* out) -{ - int i, j; - for (i=0; i < MAX_ENTROPY_TOKENS; ++i) - { - for (j=0; j < PREV_COEF_CONTEXTS; ++j) - { - const unsigned int tmp = out[i]; - out[i] += probs[j][i]; - /* check for wrap */ - if (out[i] < tmp) - out[i] = UINT_MAX; + const unsigned int probs[PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS], + unsigned int *out) { + int i, j; + for (i = 0; i < MAX_ENTROPY_TOKENS; ++i) { + for (j = 0; j < PREV_COEF_CONTEXTS; ++j) { + const unsigned int tmp = out[i]; + out[i] += probs[j][i]; + /* check for wrap */ + if (out[i] < tmp) out[i] = UINT_MAX; + } + } +} + +static int prob_update_savings(const unsigned int *ct, const vp8_prob oldp, + const vp8_prob newp, const vp8_prob upd) { + const int old_b = vp8_cost_branch(ct, oldp); + const int new_b = vp8_cost_branch(ct, newp); + const int update_b = 8 + ((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8); + + return old_b - new_b - update_b; +} + +static int independent_coef_context_savings(VP8_COMP *cpi) { + MACROBLOCK *const x = &cpi->mb; + int savings = 0; + int i = 0; + do { + int j = 0; + do { + int k = 0; + unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = { 0 }; + int prev_coef_savings[MAX_ENTROPY_TOKENS] = { 0 }; + const unsigned int(*probs)[MAX_ENTROPY_TOKENS]; + /* Calculate new probabilities given the constraint that + * they must be equal over the prev coef contexts + */ + + probs = (const unsigned int(*)[MAX_ENTROPY_TOKENS])x->coef_counts[i][j]; + + /* Reset to default probabilities at key frames */ + if (cpi->common.frame_type == KEY_FRAME) { + probs = default_coef_counts[i][j]; + } + + sum_probs_over_prev_coef_context(probs, prev_coef_count_sum); + + do { + /* at every context */ + + /* calc probs and branch cts for this frame only */ + int t = 0; /* token/prob index */ + + vp8_tree_probs_from_distribution( + MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k], + prev_coef_count_sum, 256, 1); + + do { + const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t]; + const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t]; + const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t]; + const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; + const int s = prob_update_savings(ct, oldp, newp, upd); + + if (cpi->common.frame_type != KEY_FRAME || + (cpi->common.frame_type == KEY_FRAME && newp != oldp)) { + prev_coef_savings[t] += s; + } + } while (++t < ENTROPY_NODES); + } while (++k < PREV_COEF_CONTEXTS); + k = 0; + do { + /* We only update probabilities if we can save bits, except + * for key frames where we have to update all probabilities + * to get the equal probabilities across the prev coef + * contexts. + */ + if (prev_coef_savings[k] > 0 || cpi->common.frame_type == KEY_FRAME) { + savings += prev_coef_savings[k]; } - } + } while (++k < ENTROPY_NODES); + } while (++j < COEF_BANDS); + } while (++i < BLOCK_TYPES); + return savings; } -static int prob_update_savings(const unsigned int *ct, - const vp8_prob oldp, const vp8_prob newp, - const vp8_prob upd) -{ - const int old_b = vp8_cost_branch(ct, oldp); - const int new_b = vp8_cost_branch(ct, newp); - const int update_b = 8 + - ((vp8_cost_one(upd) - vp8_cost_zero(upd)) >> 8); +static int default_coef_context_savings(VP8_COMP *cpi) { + MACROBLOCK *const x = &cpi->mb; + int savings = 0; + int i = 0; + do { + int j = 0; + do { + int k = 0; + do { + /* at every context */ - return old_b - new_b - update_b; + /* calc probs and branch cts for this frame only */ + int t = 0; /* token/prob index */ + + vp8_tree_probs_from_distribution( + MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, + cpi->frame_coef_probs[i][j][k], cpi->frame_branch_ct[i][j][k], + x->coef_counts[i][j][k], 256, 1); + + do { + const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t]; + const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t]; + const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t]; + const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; + const int s = prob_update_savings(ct, oldp, newp, upd); + + if (s > 0) { + savings += s; + } + } while (++t < ENTROPY_NODES); + } while (++k < PREV_COEF_CONTEXTS); + } while (++j < COEF_BANDS); + } while (++i < BLOCK_TYPES); + return savings; } -static int independent_coef_context_savings(VP8_COMP *cpi) -{ - MACROBLOCK *const x = & cpi->mb; - int savings = 0; - int i = 0; - do - { - int j = 0; - do - { - int k = 0; - unsigned int prev_coef_count_sum[MAX_ENTROPY_TOKENS] = {0}; - int prev_coef_savings[MAX_ENTROPY_TOKENS] = {0}; - const unsigned int (*probs)[MAX_ENTROPY_TOKENS]; - /* Calculate new probabilities given the constraint that - * they must be equal over the prev coef contexts - */ - - probs = (const unsigned int (*)[MAX_ENTROPY_TOKENS]) - x->coef_counts[i][j]; - - /* Reset to default probabilities at key frames */ - if (cpi->common.frame_type == KEY_FRAME) - probs = default_coef_counts[i][j]; - - sum_probs_over_prev_coef_context(probs, prev_coef_count_sum); - - do - { - /* at every context */ - - /* calc probs and branch cts for this frame only */ - int t = 0; /* token/prob index */ - - vp8_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, - cpi->frame_coef_probs[i][j][k], - cpi->frame_branch_ct [i][j][k], - prev_coef_count_sum, - 256, 1); - - do - { - const unsigned int *ct = cpi->frame_branch_ct [i][j][k][t]; - const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t]; - const vp8_prob oldp = cpi->common.fc.coef_probs [i][j][k][t]; - const vp8_prob upd = vp8_coef_update_probs [i][j][k][t]; - const int s = prob_update_savings(ct, oldp, newp, upd); - - if (cpi->common.frame_type != KEY_FRAME || - (cpi->common.frame_type == KEY_FRAME && newp != oldp)) - prev_coef_savings[t] += s; - } - while (++t < ENTROPY_NODES); - } - while (++k < PREV_COEF_CONTEXTS); - k = 0; - do - { - /* We only update probabilities if we can save bits, except - * for key frames where we have to update all probabilities - * to get the equal probabilities across the prev coef - * contexts. - */ - if (prev_coef_savings[k] > 0 || - cpi->common.frame_type == KEY_FRAME) - savings += prev_coef_savings[k]; - } - while (++k < ENTROPY_NODES); - } - while (++j < COEF_BANDS); - } - while (++i < BLOCK_TYPES); - return savings; +void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra, + int prob_last, int prob_garf) { + assert(prob_intra >= 0); + assert(prob_intra <= 255); + assert(prob_last >= 0); + assert(prob_last <= 255); + assert(prob_garf >= 0); + assert(prob_garf <= 255); + ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra); + ref_frame_cost[LAST_FRAME] = + vp8_cost_one(prob_intra) + vp8_cost_zero(prob_last); + ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(prob_intra) + + vp8_cost_one(prob_last) + + vp8_cost_zero(prob_garf); + ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(prob_intra) + + vp8_cost_one(prob_last) + + vp8_cost_one(prob_garf); } -static int default_coef_context_savings(VP8_COMP *cpi) -{ - MACROBLOCK *const x = & cpi->mb; - int savings = 0; - int i = 0; - do - { - int j = 0; - do - { - int k = 0; - do - { - /* at every context */ +int vp8_estimate_entropy_savings(VP8_COMP *cpi) { + int savings = 0; - /* calc probs and branch cts for this frame only */ - int t = 0; /* token/prob index */ + const int *const rfct = cpi->mb.count_mb_ref_frame_usage; + const int rf_intra = rfct[INTRA_FRAME]; + const int rf_inter = + rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; + int new_intra, new_last, new_garf, oldtotal, newtotal; + int ref_frame_cost[MAX_REF_FRAMES]; - vp8_tree_probs_from_distribution( - MAX_ENTROPY_TOKENS, vp8_coef_encodings, vp8_coef_tree, - cpi->frame_coef_probs [i][j][k], - cpi->frame_branch_ct [i][j][k], - x->coef_counts [i][j][k], - 256, 1 - ); + vp8_clear_system_state(); - do - { - const unsigned int *ct = cpi->frame_branch_ct [i][j][k][t]; - const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t]; - const vp8_prob oldp = cpi->common.fc.coef_probs [i][j][k][t]; - const vp8_prob upd = vp8_coef_update_probs [i][j][k][t]; - const int s = prob_update_savings(ct, oldp, newp, upd); + if (cpi->common.frame_type != KEY_FRAME) { + if (!(new_intra = rf_intra * 255 / (rf_intra + rf_inter))) new_intra = 1; - if (s > 0) - { - savings += s; - } - } - while (++t < ENTROPY_NODES); - } - while (++k < PREV_COEF_CONTEXTS); - } - while (++j < COEF_BANDS); - } - while (++i < BLOCK_TYPES); - return savings; -} + new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; -void vp8_calc_ref_frame_costs(int *ref_frame_cost, - int prob_intra, - int prob_last, - int prob_garf - ) -{ - assert(prob_intra >= 0); - assert(prob_intra <= 255); - assert(prob_last >= 0); - assert(prob_last <= 255); - assert(prob_garf >= 0); - assert(prob_garf <= 255); - ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(prob_intra); - ref_frame_cost[LAST_FRAME] = vp8_cost_one(prob_intra) - + vp8_cost_zero(prob_last); - ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(prob_intra) - + vp8_cost_one(prob_last) - + vp8_cost_zero(prob_garf); - ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(prob_intra) - + vp8_cost_one(prob_last) - + vp8_cost_one(prob_garf); + new_garf = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) + ? (rfct[GOLDEN_FRAME] * 255) / + (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) + : 128; -} + vp8_calc_ref_frame_costs(ref_frame_cost, new_intra, new_last, new_garf); -int vp8_estimate_entropy_savings(VP8_COMP *cpi) -{ - int savings = 0; + newtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] + + rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] + + rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] + + rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME]; - const int *const rfct = cpi->mb.count_mb_ref_frame_usage; - const int rf_intra = rfct[INTRA_FRAME]; - const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; - int new_intra, new_last, new_garf, oldtotal, newtotal; - int ref_frame_cost[MAX_REF_FRAMES]; + /* old costs */ + vp8_calc_ref_frame_costs(ref_frame_cost, cpi->prob_intra_coded, + cpi->prob_last_coded, cpi->prob_gf_coded); - vp8_clear_system_state(); + oldtotal = rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] + + rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] + + rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] + + rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME]; - if (cpi->common.frame_type != KEY_FRAME) - { - if (!(new_intra = rf_intra * 255 / (rf_intra + rf_inter))) - new_intra = 1; + savings += (oldtotal - newtotal) / 256; + } - new_last = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128; + if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) { + savings += independent_coef_context_savings(cpi); + } else { + savings += default_coef_context_savings(cpi); + } - new_garf = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) - ? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128; - - - vp8_calc_ref_frame_costs(ref_frame_cost,new_intra,new_last,new_garf); - - newtotal = - rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] + - rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] + - rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] + - rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME]; - - - /* old costs */ - vp8_calc_ref_frame_costs(ref_frame_cost,cpi->prob_intra_coded, - cpi->prob_last_coded,cpi->prob_gf_coded); - - oldtotal = - rfct[INTRA_FRAME] * ref_frame_cost[INTRA_FRAME] + - rfct[LAST_FRAME] * ref_frame_cost[LAST_FRAME] + - rfct[GOLDEN_FRAME] * ref_frame_cost[GOLDEN_FRAME] + - rfct[ALTREF_FRAME] * ref_frame_cost[ALTREF_FRAME]; - - savings += (oldtotal - newtotal) / 256; - } - - - if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) - savings += independent_coef_context_savings(cpi); - else - savings += default_coef_context_savings(cpi); - - - return savings; + return savings; } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING -int vp8_update_coef_context(VP8_COMP *cpi) -{ - int savings = 0; +int vp8_update_coef_context(VP8_COMP *cpi) { + int savings = 0; + if (cpi->common.frame_type == KEY_FRAME) { + /* Reset to default counts/probabilities at key frames */ + vp8_copy(cpi->mb.coef_counts, default_coef_counts); + } - if (cpi->common.frame_type == KEY_FRAME) - { - /* Reset to default counts/probabilities at key frames */ - vp8_copy(cpi->mb.coef_counts, default_coef_counts); - } + if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) + savings += independent_coef_context_savings(cpi); + else + savings += default_coef_context_savings(cpi); - if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) - savings += independent_coef_context_savings(cpi); - else - savings += default_coef_context_savings(cpi); - - return savings; + return savings; } #endif -void vp8_update_coef_probs(VP8_COMP *cpi) -{ - int i = 0; +void vp8_update_coef_probs(VP8_COMP *cpi) { + int i = 0; #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - vp8_writer *const w = cpi->bc; + vp8_writer *const w = cpi->bc; #endif - int savings = 0; + int savings = 0; - vp8_clear_system_state(); + vp8_clear_system_state(); - do - { - int j = 0; + do { + int j = 0; - do - { - int k = 0; - int prev_coef_savings[ENTROPY_NODES] = {0}; - if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) - { - for (k = 0; k < PREV_COEF_CONTEXTS; ++k) - { - int t; /* token/prob index */ - for (t = 0; t < ENTROPY_NODES; ++t) - { - const unsigned int *ct = cpi->frame_branch_ct [i][j] - [k][t]; - const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t]; - const vp8_prob oldp = cpi->common.fc.coef_probs[i][j] - [k][t]; - const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; + do { + int k = 0; + int prev_coef_savings[ENTROPY_NODES] = { 0 }; + if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) { + for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { + int t; /* token/prob index */ + for (t = 0; t < ENTROPY_NODES; ++t) { + const unsigned int *ct = cpi->frame_branch_ct[i][j][k][t]; + const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t]; + const vp8_prob oldp = cpi->common.fc.coef_probs[i][j][k][t]; + const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; - prev_coef_savings[t] += - prob_update_savings(ct, oldp, newp, upd); - } - } - k = 0; - } - do - { - /* note: use result from vp8_estimate_entropy_savings, so no - * need to call vp8_tree_probs_from_distribution here. - */ + prev_coef_savings[t] += prob_update_savings(ct, oldp, newp, upd); + } + } + k = 0; + } + do { + /* note: use result from vp8_estimate_entropy_savings, so no + * need to call vp8_tree_probs_from_distribution here. + */ - /* at every context */ + /* at every context */ - /* calc probs and branch cts for this frame only */ - int t = 0; /* token/prob index */ + /* calc probs and branch cts for this frame only */ + int t = 0; /* token/prob index */ - do - { - const vp8_prob newp = cpi->frame_coef_probs [i][j][k][t]; + do { + const vp8_prob newp = cpi->frame_coef_probs[i][j][k][t]; - vp8_prob *Pold = cpi->common.fc.coef_probs [i][j][k] + t; - const vp8_prob upd = vp8_coef_update_probs [i][j][k][t]; + vp8_prob *Pold = cpi->common.fc.coef_probs[i][j][k] + t; + const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; - int s = prev_coef_savings[t]; - int u = 0; + int s = prev_coef_savings[t]; + int u = 0; - if (!(cpi->oxcf.error_resilient_mode & - VPX_ERROR_RESILIENT_PARTITIONS)) - { - s = prob_update_savings( - cpi->frame_branch_ct [i][j][k][t], - *Pold, newp, upd); - } + if (!(cpi->oxcf.error_resilient_mode & + VPX_ERROR_RESILIENT_PARTITIONS)) { + s = prob_update_savings(cpi->frame_branch_ct[i][j][k][t], *Pold, + newp, upd); + } - if (s > 0) - u = 1; + if (s > 0) u = 1; - /* Force updates on key frames if the new is different, - * so that we can be sure we end up with equal probabilities - * over the prev coef contexts. - */ - if ((cpi->oxcf.error_resilient_mode & - VPX_ERROR_RESILIENT_PARTITIONS) && - cpi->common.frame_type == KEY_FRAME && newp != *Pold) - u = 1; + /* Force updates on key frames if the new is different, + * so that we can be sure we end up with equal probabilities + * over the prev coef contexts. + */ + if ((cpi->oxcf.error_resilient_mode & + VPX_ERROR_RESILIENT_PARTITIONS) && + cpi->common.frame_type == KEY_FRAME && newp != *Pold) { + u = 1; + } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - cpi->update_probs[i][j][k][t] = u; + cpi->update_probs[i][j][k][t] = u; #else - vp8_write(w, u, upd); + vp8_write(w, u, upd); #endif - #ifdef VP8_ENTROPY_STATS - ++ tree_update_hist [i][j][k][t] [u]; + ++tree_update_hist[i][j][k][t][u]; #endif - if (u) - { - /* send/use new probability */ + if (u) { + /* send/use new probability */ - *Pold = newp; + *Pold = newp; #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - vp8_write_literal(w, newp, 8); + vp8_write_literal(w, newp, 8); #endif - savings += s; + savings += s; + } - } + } while (++t < ENTROPY_NODES); - } - while (++t < ENTROPY_NODES); - - /* Accum token counts for generation of default statistics */ +/* Accum token counts for generation of default statistics */ #ifdef VP8_ENTROPY_STATS - t = 0; + t = 0; - do - { - context_counters [i][j][k][t] += cpi->coef_counts [i][j][k][t]; - } - while (++t < MAX_ENTROPY_TOKENS); + do { + context_counters[i][j][k][t] += cpi->coef_counts[i][j][k][t]; + } while (++t < MAX_ENTROPY_TOKENS); #endif - } - while (++k < PREV_COEF_CONTEXTS); - } - while (++j < COEF_BANDS); - } - while (++i < BLOCK_TYPES); - + } while (++k < PREV_COEF_CONTEXTS); + } while (++j < COEF_BANDS); + } while (++i < BLOCK_TYPES); } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING -static void pack_coef_probs(VP8_COMP *cpi) -{ - int i = 0; - vp8_writer *const w = cpi->bc; +static void pack_coef_probs(VP8_COMP *cpi) { + int i = 0; + vp8_writer *const w = cpi->bc; - do - { - int j = 0; + do { + int j = 0; - do - { - int k = 0; + do { + int k = 0; - do - { - int t = 0; /* token/prob index */ + do { + int t = 0; /* token/prob index */ - do - { - const vp8_prob newp = cpi->common.fc.coef_probs [i][j][k][t]; - const vp8_prob upd = vp8_coef_update_probs [i][j][k][t]; + do { + const vp8_prob newp = cpi->common.fc.coef_probs[i][j][k][t]; + const vp8_prob upd = vp8_coef_update_probs[i][j][k][t]; - const char u = cpi->update_probs[i][j][k][t] ; + const char u = cpi->update_probs[i][j][k][t]; - vp8_write(w, u, upd); + vp8_write(w, u, upd); - if (u) - { - /* send/use new probability */ - vp8_write_literal(w, newp, 8); - } - } - while (++t < ENTROPY_NODES); - } - while (++k < PREV_COEF_CONTEXTS); - } - while (++j < COEF_BANDS); - } - while (++i < BLOCK_TYPES); + if (u) { + /* send/use new probability */ + vp8_write_literal(w, newp, 8); + } + } while (++t < ENTROPY_NODES); + } while (++k < PREV_COEF_CONTEXTS); + } while (++j < COEF_BANDS); + } while (++i < BLOCK_TYPES); } #endif @@ -1249,491 +1042,439 @@ static void pack_coef_probs(VP8_COMP *cpi) FILE *vpxlogc = 0; #endif -static void put_delta_q(vp8_writer *bc, int delta_q) -{ - if (delta_q != 0) - { - vp8_write_bit(bc, 1); - vp8_write_literal(bc, abs(delta_q), 4); +static void put_delta_q(vp8_writer *bc, int delta_q) { + if (delta_q != 0) { + vp8_write_bit(bc, 1); + vp8_write_literal(bc, abs(delta_q), 4); - if (delta_q < 0) - vp8_write_bit(bc, 1); - else - vp8_write_bit(bc, 0); - } + if (delta_q < 0) + vp8_write_bit(bc, 1); else - vp8_write_bit(bc, 0); + vp8_write_bit(bc, 0); + } else + vp8_write_bit(bc, 0); } -void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, unsigned char * dest_end, unsigned long *size) -{ - int i, j; - VP8_HEADER oh; - VP8_COMMON *const pc = & cpi->common; - vp8_writer *const bc = cpi->bc; - MACROBLOCKD *const xd = & cpi->mb.e_mbd; - int extra_bytes_packed = 0; +void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, + unsigned char *dest_end, size_t *size) { + int i, j; + VP8_HEADER oh; + VP8_COMMON *const pc = &cpi->common; + vp8_writer *const bc = cpi->bc; + MACROBLOCKD *const xd = &cpi->mb.e_mbd; + int extra_bytes_packed = 0; - unsigned char *cx_data = dest; - unsigned char *cx_data_end = dest_end; - const int *mb_feature_data_bits; + unsigned char *cx_data = dest; + unsigned char *cx_data_end = dest_end; + const int *mb_feature_data_bits; - oh.show_frame = (int) pc->show_frame; - oh.type = (int)pc->frame_type; - oh.version = pc->version; - oh.first_partition_length_in_bytes = 0; + oh.show_frame = (int)pc->show_frame; + oh.type = (int)pc->frame_type; + oh.version = pc->version; + oh.first_partition_length_in_bytes = 0; - mb_feature_data_bits = vp8_mb_feature_data_bits; + mb_feature_data_bits = vp8_mb_feature_data_bits; - bc[0].error = &pc->error; + bc[0].error = &pc->error; - validate_buffer(cx_data, 3, cx_data_end, &cpi->common.error); - cx_data += 3; + validate_buffer(cx_data, 3, cx_data_end, &cpi->common.error); + cx_data += 3; #if defined(SECTIONBITS_OUTPUT) - Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256; + Sectionbits[active_section = 1] += sizeof(VP8_HEADER) * 8 * 256; #endif - /* every keyframe send startcode, width, height, scale factor, clamp - * and color type - */ - if (oh.type == KEY_FRAME) - { - int v; + /* every keyframe send startcode, width, height, scale factor, clamp + * and color type + */ + if (oh.type == KEY_FRAME) { + int v; - validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error); + validate_buffer(cx_data, 7, cx_data_end, &cpi->common.error); - /* Start / synch code */ - cx_data[0] = 0x9D; - cx_data[1] = 0x01; - cx_data[2] = 0x2a; + /* Start / synch code */ + cx_data[0] = 0x9D; + cx_data[1] = 0x01; + cx_data[2] = 0x2a; - v = (pc->horiz_scale << 14) | pc->Width; - cx_data[3] = v; - cx_data[4] = v >> 8; + v = (pc->horiz_scale << 14) | pc->Width; + cx_data[3] = v; + cx_data[4] = v >> 8; - v = (pc->vert_scale << 14) | pc->Height; - cx_data[5] = v; - cx_data[6] = v >> 8; + v = (pc->vert_scale << 14) | pc->Height; + cx_data[5] = v; + cx_data[6] = v >> 8; + extra_bytes_packed = 7; + cx_data += extra_bytes_packed; - extra_bytes_packed = 7; - cx_data += extra_bytes_packed ; + vp8_start_encode(bc, cx_data, cx_data_end); - vp8_start_encode(bc, cx_data, cx_data_end); + /* signal clr type */ + vp8_write_bit(bc, 0); + vp8_write_bit(bc, pc->clamp_type); - /* signal clr type */ - vp8_write_bit(bc, 0); - vp8_write_bit(bc, pc->clamp_type); + } else { + vp8_start_encode(bc, cx_data, cx_data_end); + } - } - else - vp8_start_encode(bc, cx_data, cx_data_end); + /* Signal whether or not Segmentation is enabled */ + vp8_write_bit(bc, xd->segmentation_enabled); + /* Indicate which features are enabled */ + if (xd->segmentation_enabled) { + /* Signal whether or not the segmentation map is being updated. */ + vp8_write_bit(bc, xd->update_mb_segmentation_map); + vp8_write_bit(bc, xd->update_mb_segmentation_data); - /* Signal whether or not Segmentation is enabled */ - vp8_write_bit(bc, xd->segmentation_enabled); + if (xd->update_mb_segmentation_data) { + signed char Data; - /* Indicate which features are enabled */ - if (xd->segmentation_enabled) - { - /* Signal whether or not the segmentation map is being updated. */ - vp8_write_bit(bc, xd->update_mb_segmentation_map); - vp8_write_bit(bc, xd->update_mb_segmentation_data); + vp8_write_bit(bc, xd->mb_segement_abs_delta); - if (xd->update_mb_segmentation_data) - { - signed char Data; + /* For each segmentation feature (Quant and loop filter level) */ + for (i = 0; i < MB_LVL_MAX; ++i) { + /* For each of the segments */ + for (j = 0; j < MAX_MB_SEGMENTS; ++j) { + Data = xd->segment_feature_data[i][j]; - vp8_write_bit(bc, xd->mb_segement_abs_delta); + /* Frame level data */ + if (Data) { + vp8_write_bit(bc, 1); - /* For each segmentation feature (Quant and loop filter level) */ - for (i = 0; i < MB_LVL_MAX; i++) - { - /* For each of the segments */ - for (j = 0; j < MAX_MB_SEGMENTS; j++) - { - Data = xd->segment_feature_data[i][j]; - - /* Frame level data */ - if (Data) - { - vp8_write_bit(bc, 1); - - if (Data < 0) - { - Data = - Data; - vp8_write_literal(bc, Data, mb_feature_data_bits[i]); - vp8_write_bit(bc, 1); - } - else - { - vp8_write_literal(bc, Data, mb_feature_data_bits[i]); - vp8_write_bit(bc, 0); - } - } - else - vp8_write_bit(bc, 0); - } - } - } - - if (xd->update_mb_segmentation_map) - { - /* Write the probs used to decode the segment id for each mb */ - for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) - { - int Data = xd->mb_segment_tree_probs[i]; - - if (Data != 255) - { - vp8_write_bit(bc, 1); - vp8_write_literal(bc, Data, 8); - } - else - vp8_write_bit(bc, 0); + if (Data < 0) { + Data = -Data; + vp8_write_literal(bc, Data, mb_feature_data_bits[i]); + vp8_write_bit(bc, 1); + } else { + vp8_write_literal(bc, Data, mb_feature_data_bits[i]); + vp8_write_bit(bc, 0); } + } else + vp8_write_bit(bc, 0); } + } } - vp8_write_bit(bc, pc->filter_type); - vp8_write_literal(bc, pc->filter_level, 6); - vp8_write_literal(bc, pc->sharpness_level, 3); + if (xd->update_mb_segmentation_map) { + /* Write the probs used to decode the segment id for each mb */ + for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { + int Data = xd->mb_segment_tree_probs[i]; - /* Write out loop filter deltas applied at the MB level based on mode - * or ref frame (if they are enabled). + if (Data != 255) { + vp8_write_bit(bc, 1); + vp8_write_literal(bc, Data, 8); + } else + vp8_write_bit(bc, 0); + } + } + } + + vp8_write_bit(bc, pc->filter_type); + vp8_write_literal(bc, pc->filter_level, 6); + vp8_write_literal(bc, pc->sharpness_level, 3); + + /* Write out loop filter deltas applied at the MB level based on mode + * or ref frame (if they are enabled). + */ + vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled); + + if (xd->mode_ref_lf_delta_enabled) { + /* Do the deltas need to be updated */ + int send_update = + xd->mode_ref_lf_delta_update || cpi->oxcf.error_resilient_mode; + + vp8_write_bit(bc, send_update); + if (send_update) { + int Data; + + /* Send update */ + for (i = 0; i < MAX_REF_LF_DELTAS; ++i) { + Data = xd->ref_lf_deltas[i]; + + /* Frame level data */ + if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i] || + cpi->oxcf.error_resilient_mode) { + xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i]; + vp8_write_bit(bc, 1); + + if (Data > 0) { + vp8_write_literal(bc, (Data & 0x3F), 6); + vp8_write_bit(bc, 0); /* sign */ + } else { + Data = -Data; + vp8_write_literal(bc, (Data & 0x3F), 6); + vp8_write_bit(bc, 1); /* sign */ + } + } else + vp8_write_bit(bc, 0); + } + + /* Send update */ + for (i = 0; i < MAX_MODE_LF_DELTAS; ++i) { + Data = xd->mode_lf_deltas[i]; + + if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] || + cpi->oxcf.error_resilient_mode) { + xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i]; + vp8_write_bit(bc, 1); + + if (Data > 0) { + vp8_write_literal(bc, (Data & 0x3F), 6); + vp8_write_bit(bc, 0); /* sign */ + } else { + Data = -Data; + vp8_write_literal(bc, (Data & 0x3F), 6); + vp8_write_bit(bc, 1); /* sign */ + } + } else + vp8_write_bit(bc, 0); + } + } + } + + /* signal here is multi token partition is enabled */ + vp8_write_literal(bc, pc->multi_token_partition, 2); + + /* Frame Qbaseline quantizer index */ + vp8_write_literal(bc, pc->base_qindex, 7); + + /* Transmit Dc, Second order and Uv quantizer delta information */ + put_delta_q(bc, pc->y1dc_delta_q); + put_delta_q(bc, pc->y2dc_delta_q); + put_delta_q(bc, pc->y2ac_delta_q); + put_delta_q(bc, pc->uvdc_delta_q); + put_delta_q(bc, pc->uvac_delta_q); + + /* When there is a key frame all reference buffers are updated using + * the new key frame + */ + if (pc->frame_type != KEY_FRAME) { + /* Should the GF or ARF be updated using the transmitted frame + * or buffer */ - vp8_write_bit(bc, xd->mode_ref_lf_delta_enabled); + vp8_write_bit(bc, pc->refresh_golden_frame); + vp8_write_bit(bc, pc->refresh_alt_ref_frame); - if (xd->mode_ref_lf_delta_enabled) - { - /* Do the deltas need to be updated */ - int send_update = xd->mode_ref_lf_delta_update - || cpi->oxcf.error_resilient_mode; - - vp8_write_bit(bc, send_update); - if (send_update) - { - int Data; - - /* Send update */ - for (i = 0; i < MAX_REF_LF_DELTAS; i++) - { - Data = xd->ref_lf_deltas[i]; - - /* Frame level data */ - if (xd->ref_lf_deltas[i] != xd->last_ref_lf_deltas[i] - || cpi->oxcf.error_resilient_mode) - { - xd->last_ref_lf_deltas[i] = xd->ref_lf_deltas[i]; - vp8_write_bit(bc, 1); - - if (Data > 0) - { - vp8_write_literal(bc, (Data & 0x3F), 6); - vp8_write_bit(bc, 0); /* sign */ - } - else - { - Data = -Data; - vp8_write_literal(bc, (Data & 0x3F), 6); - vp8_write_bit(bc, 1); /* sign */ - } - } - else - vp8_write_bit(bc, 0); - } - - /* Send update */ - for (i = 0; i < MAX_MODE_LF_DELTAS; i++) - { - Data = xd->mode_lf_deltas[i]; - - if (xd->mode_lf_deltas[i] != xd->last_mode_lf_deltas[i] - || cpi->oxcf.error_resilient_mode) - { - xd->last_mode_lf_deltas[i] = xd->mode_lf_deltas[i]; - vp8_write_bit(bc, 1); - - if (Data > 0) - { - vp8_write_literal(bc, (Data & 0x3F), 6); - vp8_write_bit(bc, 0); /* sign */ - } - else - { - Data = -Data; - vp8_write_literal(bc, (Data & 0x3F), 6); - vp8_write_bit(bc, 1); /* sign */ - } - } - else - vp8_write_bit(bc, 0); - } - } - } - - /* signal here is multi token partition is enabled */ - vp8_write_literal(bc, pc->multi_token_partition, 2); - - /* Frame Qbaseline quantizer index */ - vp8_write_literal(bc, pc->base_qindex, 7); - - /* Transmit Dc, Second order and Uv quantizer delta information */ - put_delta_q(bc, pc->y1dc_delta_q); - put_delta_q(bc, pc->y2dc_delta_q); - put_delta_q(bc, pc->y2ac_delta_q); - put_delta_q(bc, pc->uvdc_delta_q); - put_delta_q(bc, pc->uvac_delta_q); - - /* When there is a key frame all reference buffers are updated using - * the new key frame + /* If not being updated from current frame should either GF or ARF + * be updated from another buffer */ - if (pc->frame_type != KEY_FRAME) - { - /* Should the GF or ARF be updated using the transmitted frame - * or buffer - */ - vp8_write_bit(bc, pc->refresh_golden_frame); - vp8_write_bit(bc, pc->refresh_alt_ref_frame); + if (!pc->refresh_golden_frame) + vp8_write_literal(bc, pc->copy_buffer_to_gf, 2); - /* If not being updated from current frame should either GF or ARF - * be updated from another buffer - */ - if (!pc->refresh_golden_frame) - vp8_write_literal(bc, pc->copy_buffer_to_gf, 2); + if (!pc->refresh_alt_ref_frame) + vp8_write_literal(bc, pc->copy_buffer_to_arf, 2); - if (!pc->refresh_alt_ref_frame) - vp8_write_literal(bc, pc->copy_buffer_to_arf, 2); - - /* Indicate reference frame sign bias for Golden and ARF frames - * (always 0 for last frame buffer) - */ - vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]); - vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]); - } + /* Indicate reference frame sign bias for Golden and ARF frames + * (always 0 for last frame buffer) + */ + vp8_write_bit(bc, pc->ref_frame_sign_bias[GOLDEN_FRAME]); + vp8_write_bit(bc, pc->ref_frame_sign_bias[ALTREF_FRAME]); + } #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) - { - if (pc->frame_type == KEY_FRAME) - pc->refresh_entropy_probs = 1; - else - pc->refresh_entropy_probs = 0; + if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) { + if (pc->frame_type == KEY_FRAME) { + pc->refresh_entropy_probs = 1; + } else { + pc->refresh_entropy_probs = 0; } + } #endif - vp8_write_bit(bc, pc->refresh_entropy_probs); + vp8_write_bit(bc, pc->refresh_entropy_probs); - if (pc->frame_type != KEY_FRAME) - vp8_write_bit(bc, pc->refresh_last_frame); + if (pc->frame_type != KEY_FRAME) vp8_write_bit(bc, pc->refresh_last_frame); #ifdef VP8_ENTROPY_STATS - if (pc->frame_type == INTER_FRAME) - active_section = 0; - else - active_section = 7; + if (pc->frame_type == INTER_FRAME) + active_section = 0; + else + active_section = 7; #endif - vp8_clear_system_state(); + vp8_clear_system_state(); #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - pack_coef_probs(cpi); + pack_coef_probs(cpi); #else - if (pc->refresh_entropy_probs == 0) - { - /* save a copy for later refresh */ - memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc)); - } + if (pc->refresh_entropy_probs == 0) { + /* save a copy for later refresh */ + memcpy(&cpi->common.lfc, &cpi->common.fc, sizeof(cpi->common.fc)); + } - vp8_update_coef_probs(cpi); + vp8_update_coef_probs(cpi); #endif #ifdef VP8_ENTROPY_STATS - active_section = 2; + active_section = 2; #endif - /* Write out the mb_no_coeff_skip flag */ - vp8_write_bit(bc, pc->mb_no_coeff_skip); + /* Write out the mb_no_coeff_skip flag */ + vp8_write_bit(bc, pc->mb_no_coeff_skip); - if (pc->frame_type == KEY_FRAME) - { - write_kfmodes(cpi); + if (pc->frame_type == KEY_FRAME) { + write_kfmodes(cpi); #ifdef VP8_ENTROPY_STATS - active_section = 8; + active_section = 8; #endif - } - else - { - pack_inter_mode_mvs(cpi); + } else { + pack_inter_mode_mvs(cpi); #ifdef VP8_ENTROPY_STATS - active_section = 1; + active_section = 1; #endif - } + } - vp8_stop_encode(bc); + vp8_stop_encode(bc); - cx_data += bc->pos; + cx_data += bc->pos; - oh.first_partition_length_in_bytes = cpi->bc->pos; + oh.first_partition_length_in_bytes = cpi->bc->pos; - /* update frame tag */ - { - int v = (oh.first_partition_length_in_bytes << 5) | - (oh.show_frame << 4) | - (oh.version << 1) | - oh.type; + /* update frame tag */ + { + int v = (oh.first_partition_length_in_bytes << 5) | (oh.show_frame << 4) | + (oh.version << 1) | oh.type; - dest[0] = v; - dest[1] = v >> 8; - dest[2] = v >> 16; - } + dest[0] = v; + dest[1] = v >> 8; + dest[2] = v >> 16; + } - *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc->pos; + *size = VP8_HEADER_SIZE + extra_bytes_packed + cpi->bc->pos; - cpi->partition_sz[0] = *size; + cpi->partition_sz[0] = (unsigned int)*size; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - { - const int num_part = (1 << pc->multi_token_partition); - unsigned char * dp = cpi->partition_d[0] + cpi->partition_sz[0]; + { + const int num_part = (1 << pc->multi_token_partition); + unsigned char *dp = cpi->partition_d[0] + cpi->partition_sz[0]; - if (num_part > 1) - { - /* write token part sizes (all but last) if more than 1 */ - validate_buffer(dp, 3 * (num_part - 1), cpi->partition_d_end[0], - &pc->error); + if (num_part > 1) { + /* write token part sizes (all but last) if more than 1 */ + validate_buffer(dp, 3 * (num_part - 1), cpi->partition_d_end[0], + &pc->error); - cpi->partition_sz[0] += 3*(num_part-1); + cpi->partition_sz[0] += 3 * (num_part - 1); - for(i = 1; i < num_part; i++) - { - write_partition_size(dp, cpi->partition_sz[i]); - dp += 3; - } - } - - if (!cpi->output_partition) - { - /* concatenate partition buffers */ - for(i = 0; i < num_part; i++) - { - memmove(dp, cpi->partition_d[i+1], cpi->partition_sz[i+1]); - cpi->partition_d[i+1] = dp; - dp += cpi->partition_sz[i+1]; - } - } - - /* update total size */ - *size = 0; - for(i = 0; i < num_part+1; i++) - { - *size += cpi->partition_sz[i]; - } + for (i = 1; i < num_part; ++i) { + write_partition_size(dp, cpi->partition_sz[i]); + dp += 3; + } } + + if (!cpi->output_partition) { + /* concatenate partition buffers */ + for (i = 0; i < num_part; ++i) { + memmove(dp, cpi->partition_d[i + 1], cpi->partition_sz[i + 1]); + cpi->partition_d[i + 1] = dp; + dp += cpi->partition_sz[i + 1]; + } + } + + /* update total size */ + *size = 0; + for (i = 0; i < num_part + 1; ++i) { + *size += cpi->partition_sz[i]; + } + } #else - if (pc->multi_token_partition != ONE_PARTITION) - { - int num_part = 1 << pc->multi_token_partition; + if (pc->multi_token_partition != ONE_PARTITION) { + int num_part = 1 << pc->multi_token_partition; - /* partition size table at the end of first partition */ - cpi->partition_sz[0] += 3 * (num_part - 1); - *size += 3 * (num_part - 1); + /* partition size table at the end of first partition */ + cpi->partition_sz[0] += 3 * (num_part - 1); + *size += 3 * (num_part - 1); - validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, - &pc->error); + validate_buffer(cx_data, 3 * (num_part - 1), cx_data_end, &pc->error); - for(i = 1; i < num_part + 1; i++) - { - cpi->bc[i].error = &pc->error; - } - - pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), - cx_data_end, num_part); - - for(i = 1; i < num_part; i++) - { - cpi->partition_sz[i] = cpi->bc[i].pos; - write_partition_size(cx_data, cpi->partition_sz[i]); - cx_data += 3; - *size += cpi->partition_sz[i]; /* add to total */ - } - - /* add last partition to total size */ - cpi->partition_sz[i] = cpi->bc[i].pos; - *size += cpi->partition_sz[i]; + for (i = 1; i < num_part + 1; ++i) { + cpi->bc[i].error = &pc->error; } - else - { - bc[1].error = &pc->error; - vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end); + pack_tokens_into_partitions(cpi, cx_data + 3 * (num_part - 1), cx_data_end, + num_part); + + for (i = 1; i < num_part; ++i) { + cpi->partition_sz[i] = cpi->bc[i].pos; + write_partition_size(cx_data, cpi->partition_sz[i]); + cx_data += 3; + *size += cpi->partition_sz[i]; /* add to total */ + } + + /* add last partition to total size */ + cpi->partition_sz[i] = cpi->bc[i].pos; + *size += cpi->partition_sz[i]; + } else { + bc[1].error = &pc->error; + + vp8_start_encode(&cpi->bc[1], cx_data, cx_data_end); #if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded) - pack_mb_row_tokens(cpi, &cpi->bc[1]); - else -#endif // CONFIG_MULTITHREAD - vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count); - - vp8_stop_encode(&cpi->bc[1]); - - *size += cpi->bc[1].pos; - cpi->partition_sz[1] = cpi->bc[1].pos; + if (cpi->b_multi_threaded) { + pack_mb_row_tokens(cpi, &cpi->bc[1]); + } else { + vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count); } +#else + vp8_pack_tokens(&cpi->bc[1], cpi->tok, cpi->tok_count); +#endif // CONFIG_MULTITHREAD + + vp8_stop_encode(&cpi->bc[1]); + + *size += cpi->bc[1].pos; + cpi->partition_sz[1] = cpi->bc[1].pos; + } #endif } #ifdef VP8_ENTROPY_STATS -void print_tree_update_probs() -{ - int i, j, k, l; - FILE *f = fopen("context.c", "a"); - int Sum; - fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); - fprintf(f, "const vp8_prob tree_update_probs[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n"); +void print_tree_update_probs() { + int i, j, k, l; + FILE *f = fopen("context.c", "a"); + int Sum; + fprintf(f, "\n/* Update probabilities for token entropy tree. */\n\n"); + fprintf(f, + "const vp8_prob tree_update_probs[BLOCK_TYPES] [COEF_BANDS] " + "[PREV_COEF_CONTEXTS] [ENTROPY_NODES] = {\n"); - for (i = 0; i < BLOCK_TYPES; i++) - { - fprintf(f, " { \n"); + for (i = 0; i < BLOCK_TYPES; ++i) { + fprintf(f, " { \n"); - for (j = 0; j < COEF_BANDS; j++) - { - fprintf(f, " {\n"); + for (j = 0; j < COEF_BANDS; ++j) { + fprintf(f, " {\n"); - for (k = 0; k < PREV_COEF_CONTEXTS; k++) - { - fprintf(f, " {"); + for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { + fprintf(f, " {"); - for (l = 0; l < ENTROPY_NODES; l++) - { - Sum = tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1]; + for (l = 0; l < ENTROPY_NODES; ++l) { + Sum = + tree_update_hist[i][j][k][l][0] + tree_update_hist[i][j][k][l][1]; - if (Sum > 0) - { - if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0) - fprintf(f, "%3ld, ", (tree_update_hist[i][j][k][l][0] * 255) / Sum); - else - fprintf(f, "%3ld, ", 1); - } - else - fprintf(f, "%3ld, ", 128); - } - - fprintf(f, "},\n"); - } - - fprintf(f, " },\n"); + if (Sum > 0) { + if (((tree_update_hist[i][j][k][l][0] * 255) / Sum) > 0) + fprintf(f, "%3ld, ", + (tree_update_hist[i][j][k][l][0] * 255) / Sum); + else + fprintf(f, "%3ld, ", 1); + } else + fprintf(f, "%3ld, ", 128); } - fprintf(f, " },\n"); + fprintf(f, "},\n"); + } + + fprintf(f, " },\n"); } - fprintf(f, "};\n"); - fclose(f); + fprintf(f, " },\n"); + } + + fprintf(f, "};\n"); + fclose(f); } #endif diff --git a/libs/libvpx/vp8/encoder/bitstream.h b/libs/libvpx/vp8/encoder/bitstream.h index de69805513..2b196dcd27 100644 --- a/libs/libvpx/vp8/encoder/bitstream.h +++ b/libs/libvpx/vp8/encoder/bitstream.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_BITSTREAM_H_ #define VP8_ENCODER_BITSTREAM_H_ diff --git a/libs/libvpx/vp8/encoder/block.h b/libs/libvpx/vp8/encoder/block.h index 248e79549b..492af0e41f 100644 --- a/libs/libvpx/vp8/encoder/block.h +++ b/libs/libvpx/vp8/encoder/block.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_BLOCK_H_ #define VP8_ENCODER_BLOCK_H_ @@ -26,148 +25,142 @@ extern "C" { #define MAX_ERROR_BINS 1024 /* motion search site */ -typedef struct -{ - MV mv; - int offset; +typedef struct { + MV mv; + int offset; } search_site; -typedef struct block -{ - /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ - short *src_diff; - short *coeff; +typedef struct block { + /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ + short *src_diff; + short *coeff; - /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ - short *quant; - short *quant_fast; - short *quant_shift; - short *zbin; - short *zrun_zbin_boost; - short *round; + /* 16 Y blocks, 4 U blocks, 4 V blocks each with 16 entries */ + short *quant; + short *quant_fast; + short *quant_shift; + short *zbin; + short *zrun_zbin_boost; + short *round; - /* Zbin Over Quant value */ - short zbin_extra; + /* Zbin Over Quant value */ + short zbin_extra; - unsigned char **base_src; - int src; - int src_stride; + unsigned char **base_src; + int src; + int src_stride; } BLOCK; -typedef struct -{ - int count; - struct - { - B_PREDICTION_MODE mode; - int_mv mv; - } bmi[16]; +typedef struct { + int count; + struct { + B_PREDICTION_MODE mode; + int_mv mv; + } bmi[16]; } PARTITION_INFO; -typedef struct macroblock -{ - DECLARE_ALIGNED(16, short, src_diff[400]); /* 25 blocks Y,U,V,Y2 */ - DECLARE_ALIGNED(16, short, coeff[400]); /* 25 blocks Y,U,V,Y2 */ - DECLARE_ALIGNED(16, unsigned char, thismb[256]); +typedef struct macroblock { + DECLARE_ALIGNED(16, short, src_diff[400]); /* 25 blocks Y,U,V,Y2 */ + DECLARE_ALIGNED(16, short, coeff[400]); /* 25 blocks Y,U,V,Y2 */ + DECLARE_ALIGNED(16, unsigned char, thismb[256]); - unsigned char *thismb_ptr; - /* 16 Y, 4 U, 4 V, 1 DC 2nd order block */ - BLOCK block[25]; + unsigned char *thismb_ptr; + /* 16 Y, 4 U, 4 V, 1 DC 2nd order block */ + BLOCK block[25]; - YV12_BUFFER_CONFIG src; + YV12_BUFFER_CONFIG src; - MACROBLOCKD e_mbd; - PARTITION_INFO *partition_info; /* work pointer */ - PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */ - PARTITION_INFO *pip; /* Base of allocated array */ + MACROBLOCKD e_mbd; + PARTITION_INFO *partition_info; /* work pointer */ + PARTITION_INFO *pi; /* Corresponds to upper left visible macroblock */ + PARTITION_INFO *pip; /* Base of allocated array */ - int ref_frame_cost[MAX_REF_FRAMES]; + int ref_frame_cost[MAX_REF_FRAMES]; - search_site *ss; - int ss_count; - int searches_per_step; + search_site *ss; + int ss_count; + int searches_per_step; - int errorperbit; - int sadperbit16; - int sadperbit4; - int rddiv; - int rdmult; - unsigned int * mb_activity_ptr; - int * mb_norm_activity_ptr; - signed int act_zbin_adj; - signed int last_act_zbin_adj; + int errorperbit; + int sadperbit16; + int sadperbit4; + int rddiv; + int rdmult; + unsigned int *mb_activity_ptr; + int *mb_norm_activity_ptr; + signed int act_zbin_adj; + signed int last_act_zbin_adj; - int *mvcost[2]; - int *mvsadcost[2]; - int (*mbmode_cost)[MB_MODE_COUNT]; - int (*intra_uv_mode_cost)[MB_MODE_COUNT]; - int (*bmode_costs)[10][10]; - int *inter_bmode_costs; - int (*token_costs)[COEF_BANDS][PREV_COEF_CONTEXTS] - [MAX_ENTROPY_TOKENS]; + int *mvcost[2]; + int *mvsadcost[2]; + int (*mbmode_cost)[MB_MODE_COUNT]; + int (*intra_uv_mode_cost)[MB_MODE_COUNT]; + int (*bmode_costs)[10][10]; + int *inter_bmode_costs; + int (*token_costs)[COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; - /* These define limits to motion vector components to prevent - * them from extending outside the UMV borders. - */ - int mv_col_min; - int mv_col_max; - int mv_row_min; - int mv_row_max; + /* These define limits to motion vector components to prevent + * them from extending outside the UMV borders. + */ + int mv_col_min; + int mv_col_max; + int mv_row_min; + int mv_row_max; - int skip; + int skip; - unsigned int encode_breakout; + unsigned int encode_breakout; - signed char *gf_active_ptr; + signed char *gf_active_ptr; - unsigned char *active_ptr; - MV_CONTEXT *mvc; + unsigned char *active_ptr; + MV_CONTEXT *mvc; - int optimize; - int q_index; - int is_skin; - int denoise_zeromv; + int optimize; + int q_index; + int is_skin; + int denoise_zeromv; #if CONFIG_TEMPORAL_DENOISING - int increase_denoising; - MB_PREDICTION_MODE best_sse_inter_mode; - int_mv best_sse_mv; - MV_REFERENCE_FRAME best_reference_frame; - MV_REFERENCE_FRAME best_zeromv_reference_frame; - unsigned char need_to_clamp_best_mvs; + int increase_denoising; + MB_PREDICTION_MODE best_sse_inter_mode; + int_mv best_sse_mv; + MV_REFERENCE_FRAME best_reference_frame; + MV_REFERENCE_FRAME best_zeromv_reference_frame; + unsigned char need_to_clamp_best_mvs; #endif - int skip_true_count; - unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; - unsigned int MVcount [2] [MVvals]; /* (row,col) MV cts this frame */ - int ymode_count [VP8_YMODES]; /* intra MB type cts this frame */ - int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */ - int64_t prediction_error; - int64_t intra_error; - int count_mb_ref_frame_usage[MAX_REF_FRAMES]; + int skip_true_count; + unsigned int coef_counts[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [MAX_ENTROPY_TOKENS]; + unsigned int MVcount[2][MVvals]; /* (row,col) MV cts this frame */ + int ymode_count[VP8_YMODES]; /* intra MB type cts this frame */ + int uv_mode_count[VP8_UV_MODES]; /* intra MB type cts this frame */ + int64_t prediction_error; + int64_t intra_error; + int count_mb_ref_frame_usage[MAX_REF_FRAMES]; - int rd_thresh_mult[MAX_MODES]; - int rd_threshes[MAX_MODES]; - unsigned int mbs_tested_so_far; - unsigned int mode_test_hit_counts[MAX_MODES]; - int zbin_mode_boost_enabled; - int zbin_mode_boost; - int last_zbin_mode_boost; + int rd_thresh_mult[MAX_MODES]; + int rd_threshes[MAX_MODES]; + unsigned int mbs_tested_so_far; + unsigned int mode_test_hit_counts[MAX_MODES]; + int zbin_mode_boost_enabled; + int zbin_mode_boost; + int last_zbin_mode_boost; - int last_zbin_over_quant; - int zbin_over_quant; - int error_bins[MAX_ERROR_BINS]; + int last_zbin_over_quant; + int zbin_over_quant; + int error_bins[MAX_ERROR_BINS]; - void (*short_fdct4x4)(short *input, short *output, int pitch); - void (*short_fdct8x4)(short *input, short *output, int pitch); - void (*short_walsh4x4)(short *input, short *output, int pitch); - void (*quantize_b)(BLOCK *b, BLOCKD *d); + void (*short_fdct4x4)(short *input, short *output, int pitch); + void (*short_fdct8x4)(short *input, short *output, int pitch); + void (*short_walsh4x4)(short *input, short *output, int pitch); + void (*quantize_b)(BLOCK *b, BLOCKD *d); - unsigned int mbs_zero_last_dot_suppress; - int zero_last_dot_suppress; + unsigned int mbs_zero_last_dot_suppress; + int zero_last_dot_suppress; } MACROBLOCK; - #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp8/encoder/boolhuff.c b/libs/libvpx/vp8/encoder/boolhuff.c index 3b0c03a142..04f8db9331 100644 --- a/libs/libvpx/vp8/encoder/boolhuff.c +++ b/libs/libvpx/vp8/encoder/boolhuff.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "boolhuff.h" #if defined(SECTIONBITS_OUTPUT) @@ -20,51 +19,49 @@ unsigned __int64 Sectionbits[500]; unsigned int active_section = 0; #endif -const unsigned int vp8_prob_cost[256] = -{ - 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, 1099, 1072, 1046, - 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, 843, 829, 816, 803, 790, 778, - 767, 755, 744, 733, 723, 713, 703, 693, 684, 675, 666, 657, 649, 641, 633, 625, - 617, 609, 602, 594, 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516, - 511, 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, 437, 433, - 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, 381, 377, 373, 369, 365, - 361, 357, 353, 349, 346, 342, 338, 335, 331, 328, 324, 321, 317, 314, 311, 307, - 304, 301, 297, 294, 291, 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257, - 255, 252, 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, 214, - 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, 181, 179, 177, 174, - 172, 170, 168, 165, 163, 161, 159, 156, 154, 152, 150, 148, 145, 143, 141, 139, - 137, 135, 133, 131, 129, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, - 105, 103, 101, 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77, - 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, 53, 51, 50, - 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, 32, 30, 29, 27, 25, 24, - 22, 21, 19, 18, 16, 15, 13, 12, 10, 9, 7, 6, 4, 3, 1, 1 +const unsigned int vp8_prob_cost[256] = { + 2047, 2047, 1791, 1641, 1535, 1452, 1385, 1328, 1279, 1235, 1196, 1161, 1129, + 1099, 1072, 1046, 1023, 1000, 979, 959, 940, 922, 905, 889, 873, 858, + 843, 829, 816, 803, 790, 778, 767, 755, 744, 733, 723, 713, 703, + 693, 684, 675, 666, 657, 649, 641, 633, 625, 617, 609, 602, 594, + 587, 580, 573, 567, 560, 553, 547, 541, 534, 528, 522, 516, 511, + 505, 499, 494, 488, 483, 477, 472, 467, 462, 457, 452, 447, 442, + 437, 433, 428, 424, 419, 415, 410, 406, 401, 397, 393, 389, 385, + 381, 377, 373, 369, 365, 361, 357, 353, 349, 346, 342, 338, 335, + 331, 328, 324, 321, 317, 314, 311, 307, 304, 301, 297, 294, 291, + 288, 285, 281, 278, 275, 272, 269, 266, 263, 260, 257, 255, 252, + 249, 246, 243, 240, 238, 235, 232, 229, 227, 224, 221, 219, 216, + 214, 211, 208, 206, 203, 201, 198, 196, 194, 191, 189, 186, 184, + 181, 179, 177, 174, 172, 170, 168, 165, 163, 161, 159, 156, 154, + 152, 150, 148, 145, 143, 141, 139, 137, 135, 133, 131, 129, 127, + 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, 105, 103, 101, + 99, 97, 95, 93, 92, 90, 88, 86, 84, 82, 81, 79, 77, + 75, 73, 72, 70, 68, 66, 65, 63, 61, 60, 58, 56, 55, + 53, 51, 50, 48, 46, 45, 43, 41, 40, 38, 37, 35, 33, + 32, 30, 29, 27, 25, 24, 22, 21, 19, 18, 16, 15, 13, + 12, 10, 9, 7, 6, 4, 3, 1, 1 }; -void vp8_start_encode(BOOL_CODER *br, unsigned char *source, unsigned char *source_end) -{ - - br->lowvalue = 0; - br->range = 255; - br->count = -24; - br->buffer = source; - br->buffer_end = source_end; - br->pos = 0; +void vp8_start_encode(BOOL_CODER *br, unsigned char *source, + unsigned char *source_end) { + br->lowvalue = 0; + br->range = 255; + br->count = -24; + br->buffer = source; + br->buffer_end = source_end; + br->pos = 0; } -void vp8_stop_encode(BOOL_CODER *br) -{ - int i; +void vp8_stop_encode(BOOL_CODER *br) { + int i; - for (i = 0; i < 32; i++) - vp8_encode_bool(br, 0, 128); + for (i = 0; i < 32; ++i) vp8_encode_bool(br, 0, 128); } +void vp8_encode_value(BOOL_CODER *br, int data, int bits) { + int bit; -void vp8_encode_value(BOOL_CODER *br, int data, int bits) -{ - int bit; - - for (bit = bits - 1; bit >= 0; bit--) - vp8_encode_bool(br, (1 & (data >> bit)), 0x80); - + for (bit = bits - 1; bit >= 0; bit--) { + vp8_encode_bool(br, (1 & (data >> bit)), 0x80); + } } diff --git a/libs/libvpx/vp8/encoder/boolhuff.h b/libs/libvpx/vp8/encoder/boolhuff.h index 7c012a8296..d001eea9cd 100644 --- a/libs/libvpx/vp8/encoder/boolhuff.h +++ b/libs/libvpx/vp8/encoder/boolhuff.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /**************************************************************************** * * Module Title : boolhuff.h @@ -26,103 +25,96 @@ extern "C" { #endif -typedef struct -{ - unsigned int lowvalue; - unsigned int range; - int count; - unsigned int pos; - unsigned char *buffer; - unsigned char *buffer_end; - struct vpx_internal_error_info *error; +typedef struct { + unsigned int lowvalue; + unsigned int range; + int count; + unsigned int pos; + unsigned char *buffer; + unsigned char *buffer_end; + struct vpx_internal_error_info *error; } BOOL_CODER; -extern void vp8_start_encode(BOOL_CODER *bc, unsigned char *buffer, unsigned char *buffer_end); +extern void vp8_start_encode(BOOL_CODER *bc, unsigned char *buffer, + unsigned char *buffer_end); extern void vp8_encode_value(BOOL_CODER *br, int data, int bits); extern void vp8_stop_encode(BOOL_CODER *bc); extern const unsigned int vp8_prob_cost[256]; - DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); -static int validate_buffer(const unsigned char *start, - size_t len, +static int validate_buffer(const unsigned char *start, size_t len, const unsigned char *end, - struct vpx_internal_error_info *error) -{ - if (start + len > start && start + len < end) - return 1; - else - vpx_internal_error(error, VPX_CODEC_CORRUPT_FRAME, - "Truncated packet or corrupt partition "); + struct vpx_internal_error_info *error) { + if (start + len > start && start + len < end) { + return 1; + } else { + vpx_internal_error(error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt partition "); + } - return 0; + return 0; } -static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) -{ - unsigned int split; - int count = br->count; - unsigned int range = br->range; - unsigned int lowvalue = br->lowvalue; - register unsigned int shift; +static void vp8_encode_bool(BOOL_CODER *br, int bit, int probability) { + unsigned int split; + int count = br->count; + unsigned int range = br->range; + unsigned int lowvalue = br->lowvalue; + register int shift; #ifdef VP8_ENTROPY_STATS #if defined(SECTIONBITS_OUTPUT) - if (bit) - Sectionbits[active_section] += vp8_prob_cost[255-probability]; - else - Sectionbits[active_section] += vp8_prob_cost[probability]; + if (bit) + Sectionbits[active_section] += vp8_prob_cost[255 - probability]; + else + Sectionbits[active_section] += vp8_prob_cost[probability]; #endif #endif - split = 1 + (((range - 1) * probability) >> 8); + split = 1 + (((range - 1) * probability) >> 8); - range = split; + range = split; - if (bit) - { - lowvalue += split; - range = br->range - split; + if (bit) { + lowvalue += split; + range = br->range - split; + } + + shift = vp8_norm[range]; + + range <<= shift; + count += shift; + + if (count >= 0) { + int offset = shift - count; + + if ((lowvalue << (offset - 1)) & 0x80000000) { + int x = br->pos - 1; + + while (x >= 0 && br->buffer[x] == 0xff) { + br->buffer[x] = (unsigned char)0; + x--; + } + + br->buffer[x] += 1; } - shift = vp8_norm[range]; + validate_buffer(br->buffer + br->pos, 1, br->buffer_end, br->error); + br->buffer[br->pos++] = (lowvalue >> (24 - offset)); - range <<= shift; - count += shift; + lowvalue <<= offset; + shift = count; + lowvalue &= 0xffffff; + count -= 8; + } - if (count >= 0) - { - int offset = shift - count; - - if ((lowvalue << (offset - 1)) & 0x80000000) - { - int x = br->pos - 1; - - while (x >= 0 && br->buffer[x] == 0xff) - { - br->buffer[x] = (unsigned char)0; - x--; - } - - br->buffer[x] += 1; - } - - validate_buffer(br->buffer + br->pos, 1, br->buffer_end, br->error); - br->buffer[br->pos++] = (lowvalue >> (24 - offset)); - - lowvalue <<= offset; - shift = count; - lowvalue &= 0xffffff; - count -= 8 ; - } - - lowvalue <<= shift; - br->count = count; - br->lowvalue = lowvalue; - br->range = range; + lowvalue <<= shift; + br->count = count; + br->lowvalue = lowvalue; + br->range = range; } #ifdef __cplusplus diff --git a/libs/libvpx/vp8/encoder/dct.c b/libs/libvpx/vp8/encoder/dct.c index 0c7198d5d3..7d214eafb0 100644 --- a/libs/libvpx/vp8/encoder/dct.c +++ b/libs/libvpx/vp8/encoder/dct.c @@ -8,111 +8,101 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "./vp8_rtcd.h" -void vp8_short_fdct4x4_c(short *input, short *output, int pitch) -{ - int i; - int a1, b1, c1, d1; - short *ip = input; - short *op = output; +void vp8_short_fdct4x4_c(short *input, short *output, int pitch) { + int i; + int a1, b1, c1, d1; + short *ip = input; + short *op = output; - for (i = 0; i < 4; i++) - { - a1 = ((ip[0] + ip[3]) * 8); - b1 = ((ip[1] + ip[2]) * 8); - c1 = ((ip[1] - ip[2]) * 8); - d1 = ((ip[0] - ip[3]) * 8); + for (i = 0; i < 4; ++i) { + a1 = ((ip[0] + ip[3]) * 8); + b1 = ((ip[1] + ip[2]) * 8); + c1 = ((ip[1] - ip[2]) * 8); + d1 = ((ip[0] - ip[3]) * 8); - op[0] = a1 + b1; - op[2] = a1 - b1; + op[0] = a1 + b1; + op[2] = a1 - b1; - op[1] = (c1 * 2217 + d1 * 5352 + 14500)>>12; - op[3] = (d1 * 2217 - c1 * 5352 + 7500)>>12; + op[1] = (c1 * 2217 + d1 * 5352 + 14500) >> 12; + op[3] = (d1 * 2217 - c1 * 5352 + 7500) >> 12; - ip += pitch / 2; - op += 4; + ip += pitch / 2; + op += 4; + } + ip = output; + op = output; + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[12]; + b1 = ip[4] + ip[8]; + c1 = ip[4] - ip[8]; + d1 = ip[0] - ip[12]; - } - ip = output; - op = output; - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[12]; - b1 = ip[4] + ip[8]; - c1 = ip[4] - ip[8]; - d1 = ip[0] - ip[12]; + op[0] = (a1 + b1 + 7) >> 4; + op[8] = (a1 - b1 + 7) >> 4; - op[0] = ( a1 + b1 + 7)>>4; - op[8] = ( a1 - b1 + 7)>>4; + op[4] = ((c1 * 2217 + d1 * 5352 + 12000) >> 16) + (d1 != 0); + op[12] = (d1 * 2217 - c1 * 5352 + 51000) >> 16; - op[4] =((c1 * 2217 + d1 * 5352 + 12000)>>16) + (d1!=0); - op[12] = (d1 * 2217 - c1 * 5352 + 51000)>>16; - - ip++; - op++; - } + ip++; + op++; + } } -void vp8_short_fdct8x4_c(short *input, short *output, int pitch) -{ - vp8_short_fdct4x4_c(input, output, pitch); - vp8_short_fdct4x4_c(input + 4, output + 16, pitch); +void vp8_short_fdct8x4_c(short *input, short *output, int pitch) { + vp8_short_fdct4x4_c(input, output, pitch); + vp8_short_fdct4x4_c(input + 4, output + 16, pitch); } -void vp8_short_walsh4x4_c(short *input, short *output, int pitch) -{ - int i; - int a1, b1, c1, d1; - int a2, b2, c2, d2; - short *ip = input; - short *op = output; +void vp8_short_walsh4x4_c(short *input, short *output, int pitch) { + int i; + int a1, b1, c1, d1; + int a2, b2, c2, d2; + short *ip = input; + short *op = output; + for (i = 0; i < 4; ++i) { + a1 = ((ip[0] + ip[2]) * 4); + d1 = ((ip[1] + ip[3]) * 4); + c1 = ((ip[1] - ip[3]) * 4); + b1 = ((ip[0] - ip[2]) * 4); - for (i = 0; i < 4; i++) - { - a1 = ((ip[0] + ip[2]) * 4); - d1 = ((ip[1] + ip[3]) * 4); - c1 = ((ip[1] - ip[3]) * 4); - b1 = ((ip[0] - ip[2]) * 4); + op[0] = a1 + d1 + (a1 != 0); + op[1] = b1 + c1; + op[2] = b1 - c1; + op[3] = a1 - d1; + ip += pitch / 2; + op += 4; + } - op[0] = a1 + d1 + (a1!=0); - op[1] = b1 + c1; - op[2] = b1 - c1; - op[3] = a1 - d1; - ip += pitch / 2; - op += 4; - } + ip = output; + op = output; - ip = output; - op = output; + for (i = 0; i < 4; ++i) { + a1 = ip[0] + ip[8]; + d1 = ip[4] + ip[12]; + c1 = ip[4] - ip[12]; + b1 = ip[0] - ip[8]; - for (i = 0; i < 4; i++) - { - a1 = ip[0] + ip[8]; - d1 = ip[4] + ip[12]; - c1 = ip[4] - ip[12]; - b1 = ip[0] - ip[8]; + a2 = a1 + d1; + b2 = b1 + c1; + c2 = b1 - c1; + d2 = a1 - d1; - a2 = a1 + d1; - b2 = b1 + c1; - c2 = b1 - c1; - d2 = a1 - d1; + a2 += a2 < 0; + b2 += b2 < 0; + c2 += c2 < 0; + d2 += d2 < 0; - a2 += a2<0; - b2 += b2<0; - c2 += c2<0; - d2 += d2<0; + op[0] = (a2 + 3) >> 3; + op[4] = (b2 + 3) >> 3; + op[8] = (c2 + 3) >> 3; + op[12] = (d2 + 3) >> 3; - op[0] = (a2+3) >> 3; - op[4] = (b2+3) >> 3; - op[8] = (c2+3) >> 3; - op[12]= (d2+3) >> 3; - - ip++; - op++; - } + ip++; + op++; + } } diff --git a/libs/libvpx/vp8/encoder/dct_value_cost.h b/libs/libvpx/vp8/encoder/dct_value_cost.h index 1cd3eec84a..278dce73f4 100644 --- a/libs/libvpx/vp8/encoder/dct_value_cost.h +++ b/libs/libvpx/vp8/encoder/dct_value_cost.h @@ -18,350 +18,323 @@ extern "C" { /* Generated file, included by tokenize.c */ /* Values generated by fill_value_tokens() */ -static const short dct_value_cost[2048*2] = -{ - 8285, 8277, 8267, 8259, 8253, 8245, 8226, 8218, 8212, 8204, 8194, 8186, - 8180, 8172, 8150, 8142, 8136, 8128, 8118, 8110, 8104, 8096, 8077, 8069, - 8063, 8055, 8045, 8037, 8031, 8023, 7997, 7989, 7983, 7975, 7965, 7957, - 7951, 7943, 7924, 7916, 7910, 7902, 7892, 7884, 7878, 7870, 7848, 7840, - 7834, 7826, 7816, 7808, 7802, 7794, 7775, 7767, 7761, 7753, 7743, 7735, - 7729, 7721, 7923, 7915, 7909, 7901, 7891, 7883, 7877, 7869, 7850, 7842, - 7836, 7828, 7818, 7810, 7804, 7796, 7774, 7766, 7760, 7752, 7742, 7734, - 7728, 7720, 7701, 7693, 7687, 7679, 7669, 7661, 7655, 7647, 7621, 7613, - 7607, 7599, 7589, 7581, 7575, 7567, 7548, 7540, 7534, 7526, 7516, 7508, - 7502, 7494, 7472, 7464, 7458, 7450, 7440, 7432, 7426, 7418, 7399, 7391, - 7385, 7377, 7367, 7359, 7353, 7345, 7479, 7471, 7465, 7457, 7447, 7439, - 7433, 7425, 7406, 7398, 7392, 7384, 7374, 7366, 7360, 7352, 7330, 7322, - 7316, 7308, 7298, 7290, 7284, 7276, 7257, 7249, 7243, 7235, 7225, 7217, - 7211, 7203, 7177, 7169, 7163, 7155, 7145, 7137, 7131, 7123, 7104, 7096, - 7090, 7082, 7072, 7064, 7058, 7050, 7028, 7020, 7014, 7006, 6996, 6988, - 6982, 6974, 6955, 6947, 6941, 6933, 6923, 6915, 6909, 6901, 7632, 7624, - 7618, 7610, 7600, 7592, 7586, 7578, 7559, 7551, 7545, 7537, 7527, 7519, - 7513, 7505, 7483, 7475, 7469, 7461, 7451, 7443, 7437, 7429, 7410, 7402, - 7396, 7388, 7378, 7370, 7364, 7356, 7330, 7322, 7316, 7308, 7298, 7290, - 7284, 7276, 7257, 7249, 7243, 7235, 7225, 7217, 7211, 7203, 7181, 7173, - 7167, 7159, 7149, 7141, 7135, 7127, 7108, 7100, 7094, 7086, 7076, 7068, - 7062, 7054, 7188, 7180, 7174, 7166, 7156, 7148, 7142, 7134, 7115, 7107, - 7101, 7093, 7083, 7075, 7069, 7061, 7039, 7031, 7025, 7017, 7007, 6999, - 6993, 6985, 6966, 6958, 6952, 6944, 6934, 6926, 6920, 6912, 6886, 6878, - 6872, 6864, 6854, 6846, 6840, 6832, 6813, 6805, 6799, 6791, 6781, 6773, - 6767, 6759, 6737, 6729, 6723, 6715, 6705, 6697, 6691, 6683, 6664, 6656, - 6650, 6642, 6632, 6624, 6618, 6610, 6812, 6804, 6798, 6790, 6780, 6772, - 6766, 6758, 6739, 6731, 6725, 6717, 6707, 6699, 6693, 6685, 6663, 6655, - 6649, 6641, 6631, 6623, 6617, 6609, 6590, 6582, 6576, 6568, 6558, 6550, - 6544, 6536, 6510, 6502, 6496, 6488, 6478, 6470, 6464, 6456, 6437, 6429, - 6423, 6415, 6405, 6397, 6391, 6383, 6361, 6353, 6347, 6339, 6329, 6321, - 6315, 6307, 6288, 6280, 6274, 6266, 6256, 6248, 6242, 6234, 6368, 6360, - 6354, 6346, 6336, 6328, 6322, 6314, 6295, 6287, 6281, 6273, 6263, 6255, - 6249, 6241, 6219, 6211, 6205, 6197, 6187, 6179, 6173, 6165, 6146, 6138, - 6132, 6124, 6114, 6106, 6100, 6092, 6066, 6058, 6052, 6044, 6034, 6026, - 6020, 6012, 5993, 5985, 5979, 5971, 5961, 5953, 5947, 5939, 5917, 5909, - 5903, 5895, 5885, 5877, 5871, 5863, 5844, 5836, 5830, 5822, 5812, 5804, - 5798, 5790, 6697, 6689, 6683, 6675, 6665, 6657, 6651, 6643, 6624, 6616, - 6610, 6602, 6592, 6584, 6578, 6570, 6548, 6540, 6534, 6526, 6516, 6508, - 6502, 6494, 6475, 6467, 6461, 6453, 6443, 6435, 6429, 6421, 6395, 6387, - 6381, 6373, 6363, 6355, 6349, 6341, 6322, 6314, 6308, 6300, 6290, 6282, - 6276, 6268, 6246, 6238, 6232, 6224, 6214, 6206, 6200, 6192, 6173, 6165, - 6159, 6151, 6141, 6133, 6127, 6119, 6253, 6245, 6239, 6231, 6221, 6213, - 6207, 6199, 6180, 6172, 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096, - 6090, 6082, 6072, 6064, 6058, 6050, 6031, 6023, 6017, 6009, 5999, 5991, - 5985, 5977, 5951, 5943, 5937, 5929, 5919, 5911, 5905, 5897, 5878, 5870, - 5864, 5856, 5846, 5838, 5832, 5824, 5802, 5794, 5788, 5780, 5770, 5762, - 5756, 5748, 5729, 5721, 5715, 5707, 5697, 5689, 5683, 5675, 5877, 5869, - 5863, 5855, 5845, 5837, 5831, 5823, 5804, 5796, 5790, 5782, 5772, 5764, - 5758, 5750, 5728, 5720, 5714, 5706, 5696, 5688, 5682, 5674, 5655, 5647, - 5641, 5633, 5623, 5615, 5609, 5601, 5575, 5567, 5561, 5553, 5543, 5535, - 5529, 5521, 5502, 5494, 5488, 5480, 5470, 5462, 5456, 5448, 5426, 5418, - 5412, 5404, 5394, 5386, 5380, 5372, 5353, 5345, 5339, 5331, 5321, 5313, - 5307, 5299, 5433, 5425, 5419, 5411, 5401, 5393, 5387, 5379, 5360, 5352, - 5346, 5338, 5328, 5320, 5314, 5306, 5284, 5276, 5270, 5262, 5252, 5244, - 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5131, 5123, - 5117, 5109, 5099, 5091, 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018, - 5012, 5004, 4982, 4974, 4968, 4960, 4950, 4942, 4936, 4928, 4909, 4901, - 4895, 4887, 4877, 4869, 4863, 4855, 5586, 5578, 5572, 5564, 5554, 5546, - 5540, 5532, 5513, 5505, 5499, 5491, 5481, 5473, 5467, 5459, 5437, 5429, - 5423, 5415, 5405, 5397, 5391, 5383, 5364, 5356, 5350, 5342, 5332, 5324, - 5318, 5310, 5284, 5276, 5270, 5262, 5252, 5244, 5238, 5230, 5211, 5203, - 5197, 5189, 5179, 5171, 5165, 5157, 5135, 5127, 5121, 5113, 5103, 5095, - 5089, 5081, 5062, 5054, 5048, 5040, 5030, 5022, 5016, 5008, 5142, 5134, - 5128, 5120, 5110, 5102, 5096, 5088, 5069, 5061, 5055, 5047, 5037, 5029, - 5023, 5015, 4993, 4985, 4979, 4971, 4961, 4953, 4947, 4939, 4920, 4912, - 4906, 4898, 4888, 4880, 4874, 4866, 4840, 4832, 4826, 4818, 4808, 4800, - 4794, 4786, 4767, 4759, 4753, 4745, 4735, 4727, 4721, 4713, 4691, 4683, - 4677, 4669, 4659, 4651, 4645, 4637, 4618, 4610, 4604, 4596, 4586, 4578, - 4572, 4564, 4766, 4758, 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685, - 4679, 4671, 4661, 4653, 4647, 4639, 4617, 4609, 4603, 4595, 4585, 4577, - 4571, 4563, 4544, 4536, 4530, 4522, 4512, 4504, 4498, 4490, 4464, 4456, - 4450, 4442, 4432, 4424, 4418, 4410, 4391, 4383, 4377, 4369, 4359, 4351, - 4345, 4337, 4315, 4307, 4301, 4293, 4283, 4275, 4269, 4261, 4242, 4234, - 4228, 4220, 4210, 4202, 4196, 4188, 4322, 4314, 4308, 4300, 4290, 4282, - 4276, 4268, 4249, 4241, 4235, 4227, 4217, 4209, 4203, 4195, 4173, 4165, - 4159, 4151, 4141, 4133, 4127, 4119, 4100, 4092, 4086, 4078, 4068, 4060, - 4054, 4046, 4020, 4012, 4006, 3998, 3988, 3980, 3974, 3966, 3947, 3939, - 3933, 3925, 3915, 3907, 3901, 3893, 3871, 3863, 3857, 3849, 3839, 3831, - 3825, 3817, 3798, 3790, 3784, 3776, 3766, 3758, 3752, 3744, 6697, 6689, - 6683, 6675, 6665, 6657, 6651, 6643, 6624, 6616, 6610, 6602, 6592, 6584, - 6578, 6570, 6548, 6540, 6534, 6526, 6516, 6508, 6502, 6494, 6475, 6467, - 6461, 6453, 6443, 6435, 6429, 6421, 6395, 6387, 6381, 6373, 6363, 6355, - 6349, 6341, 6322, 6314, 6308, 6300, 6290, 6282, 6276, 6268, 6246, 6238, - 6232, 6224, 6214, 6206, 6200, 6192, 6173, 6165, 6159, 6151, 6141, 6133, - 6127, 6119, 6253, 6245, 6239, 6231, 6221, 6213, 6207, 6199, 6180, 6172, - 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096, 6090, 6082, 6072, 6064, - 6058, 6050, 6031, 6023, 6017, 6009, 5999, 5991, 5985, 5977, 5951, 5943, - 5937, 5929, 5919, 5911, 5905, 5897, 5878, 5870, 5864, 5856, 5846, 5838, - 5832, 5824, 5802, 5794, 5788, 5780, 5770, 5762, 5756, 5748, 5729, 5721, - 5715, 5707, 5697, 5689, 5683, 5675, 5877, 5869, 5863, 5855, 5845, 5837, - 5831, 5823, 5804, 5796, 5790, 5782, 5772, 5764, 5758, 5750, 5728, 5720, - 5714, 5706, 5696, 5688, 5682, 5674, 5655, 5647, 5641, 5633, 5623, 5615, - 5609, 5601, 5575, 5567, 5561, 5553, 5543, 5535, 5529, 5521, 5502, 5494, - 5488, 5480, 5470, 5462, 5456, 5448, 5426, 5418, 5412, 5404, 5394, 5386, - 5380, 5372, 5353, 5345, 5339, 5331, 5321, 5313, 5307, 5299, 5433, 5425, - 5419, 5411, 5401, 5393, 5387, 5379, 5360, 5352, 5346, 5338, 5328, 5320, - 5314, 5306, 5284, 5276, 5270, 5262, 5252, 5244, 5238, 5230, 5211, 5203, - 5197, 5189, 5179, 5171, 5165, 5157, 5131, 5123, 5117, 5109, 5099, 5091, - 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018, 5012, 5004, 4982, 4974, - 4968, 4960, 4950, 4942, 4936, 4928, 4909, 4901, 4895, 4887, 4877, 4869, - 4863, 4855, 5586, 5578, 5572, 5564, 5554, 5546, 5540, 5532, 5513, 5505, - 5499, 5491, 5481, 5473, 5467, 5459, 5437, 5429, 5423, 5415, 5405, 5397, - 5391, 5383, 5364, 5356, 5350, 5342, 5332, 5324, 5318, 5310, 5284, 5276, - 5270, 5262, 5252, 5244, 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, - 5165, 5157, 5135, 5127, 5121, 5113, 5103, 5095, 5089, 5081, 5062, 5054, - 5048, 5040, 5030, 5022, 5016, 5008, 5142, 5134, 5128, 5120, 5110, 5102, - 5096, 5088, 5069, 5061, 5055, 5047, 5037, 5029, 5023, 5015, 4993, 4985, - 4979, 4971, 4961, 4953, 4947, 4939, 4920, 4912, 4906, 4898, 4888, 4880, - 4874, 4866, 4840, 4832, 4826, 4818, 4808, 4800, 4794, 4786, 4767, 4759, - 4753, 4745, 4735, 4727, 4721, 4713, 4691, 4683, 4677, 4669, 4659, 4651, - 4645, 4637, 4618, 4610, 4604, 4596, 4586, 4578, 4572, 4564, 4766, 4758, - 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685, 4679, 4671, 4661, 4653, - 4647, 4639, 4617, 4609, 4603, 4595, 4585, 4577, 4571, 4563, 4544, 4536, - 4530, 4522, 4512, 4504, 4498, 4490, 4464, 4456, 4450, 4442, 4432, 4424, - 4418, 4410, 4391, 4383, 4377, 4369, 4359, 4351, 4345, 4337, 4315, 4307, - 4301, 4293, 4283, 4275, 4269, 4261, 4242, 4234, 4228, 4220, 4210, 4202, - 4196, 4188, 4322, 4314, 4308, 4300, 4290, 4282, 4276, 4268, 4249, 4241, - 4235, 4227, 4217, 4209, 4203, 4195, 4173, 4165, 4159, 4151, 4141, 4133, - 4127, 4119, 4100, 4092, 4086, 4078, 4068, 4060, 4054, 4046, 4020, 4012, - 4006, 3998, 3988, 3980, 3974, 3966, 3947, 3939, 3933, 3925, 3915, 3907, - 3901, 3893, 3871, 3863, 3857, 3849, 3839, 3831, 3825, 3817, 3798, 3790, - 3784, 3776, 3766, 3758, 3752, 3744, 4651, 4643, 4637, 4629, 4619, 4611, - 4605, 4597, 4578, 4570, 4564, 4556, 4546, 4538, 4532, 4524, 4502, 4494, - 4488, 4480, 4470, 4462, 4456, 4448, 4429, 4421, 4415, 4407, 4397, 4389, - 4383, 4375, 4349, 4341, 4335, 4327, 4317, 4309, 4303, 4295, 4276, 4268, - 4262, 4254, 4244, 4236, 4230, 4222, 4200, 4192, 4186, 4178, 4168, 4160, - 4154, 4146, 4127, 4119, 4113, 4105, 4095, 4087, 4081, 4073, 4207, 4199, - 4193, 4185, 4175, 4167, 4161, 4153, 4134, 4126, 4120, 4112, 4102, 4094, - 4088, 4080, 4058, 4050, 4044, 4036, 4026, 4018, 4012, 4004, 3985, 3977, - 3971, 3963, 3953, 3945, 3939, 3931, 3905, 3897, 3891, 3883, 3873, 3865, - 3859, 3851, 3832, 3824, 3818, 3810, 3800, 3792, 3786, 3778, 3756, 3748, - 3742, 3734, 3724, 3716, 3710, 3702, 3683, 3675, 3669, 3661, 3651, 3643, - 3637, 3629, 3831, 3823, 3817, 3809, 3799, 3791, 3785, 3777, 3758, 3750, - 3744, 3736, 3726, 3718, 3712, 3704, 3682, 3674, 3668, 3660, 3650, 3642, - 3636, 3628, 3609, 3601, 3595, 3587, 3577, 3569, 3563, 3555, 3529, 3521, - 3515, 3507, 3497, 3489, 3483, 3475, 3456, 3448, 3442, 3434, 3424, 3416, - 3410, 3402, 3380, 3372, 3366, 3358, 3348, 3340, 3334, 3326, 3307, 3299, - 3293, 3285, 3275, 3267, 3261, 3253, 3387, 3379, 3373, 3365, 3355, 3347, - 3341, 3333, 3314, 3306, 3300, 3292, 3282, 3274, 3268, 3260, 3238, 3230, - 3224, 3216, 3206, 3198, 3192, 3184, 3165, 3157, 3151, 3143, 3133, 3125, - 3119, 3111, 3085, 3077, 3071, 3063, 3053, 3045, 3039, 3031, 3012, 3004, - 2998, 2990, 2980, 2972, 2966, 2958, 2936, 2928, 2922, 2914, 2904, 2896, - 2890, 2882, 2863, 2855, 2849, 2841, 2831, 2823, 2817, 2809, 3540, 3532, - 3526, 3518, 3508, 3500, 3494, 3486, 3467, 3459, 3453, 3445, 3435, 3427, - 3421, 3413, 3391, 3383, 3377, 3369, 3359, 3351, 3345, 3337, 3318, 3310, - 3304, 3296, 3286, 3278, 3272, 3264, 3238, 3230, 3224, 3216, 3206, 3198, - 3192, 3184, 3165, 3157, 3151, 3143, 3133, 3125, 3119, 3111, 3089, 3081, - 3075, 3067, 3057, 3049, 3043, 3035, 3016, 3008, 3002, 2994, 2984, 2976, - 2970, 2962, 3096, 3088, 3082, 3074, 3064, 3056, 3050, 3042, 3023, 3015, - 3009, 3001, 2991, 2983, 2977, 2969, 2947, 2939, 2933, 2925, 2915, 2907, - 2901, 2893, 2874, 2866, 2860, 2852, 2842, 2834, 2828, 2820, 2794, 2786, - 2780, 2772, 2762, 2754, 2748, 2740, 2721, 2713, 2707, 2699, 2689, 2681, - 2675, 2667, 2645, 2637, 2631, 2623, 2613, 2605, 2599, 2591, 2572, 2564, - 2558, 2550, 2540, 2532, 2526, 2518, 2720, 2712, 2706, 2698, 2688, 2680, - 2674, 2666, 2647, 2639, 2633, 2625, 2615, 2607, 2601, 2593, 2571, 2563, - 2557, 2549, 2539, 2531, 2525, 2517, 2498, 2490, 2484, 2476, 2466, 2458, - 2452, 2444, 2418, 2410, 2404, 2396, 2386, 2378, 2372, 2364, 2345, 2337, - 2331, 2323, 2313, 2305, 2299, 2291, 2269, 2261, 2255, 2247, 2237, 2229, - 2223, 2215, 2196, 2188, 2182, 2174, 2164, 2156, 2150, 2142, 2276, 2268, - 2262, 2254, 2244, 2236, 2230, 2222, 2203, 2195, 2189, 2181, 2171, 2163, - 2157, 2149, 2127, 2119, 2113, 2105, 2095, 2087, 2081, 2073, 2054, 2046, - 2040, 2032, 2022, 2014, 2008, 2000, 1974, 1966, 1960, 1952, 1942, 1934, - 1928, 1920, 1901, 1893, 1887, 1879, 1869, 1861, 1855, 1847, 1825, 1817, - 1811, 1803, 1793, 1785, 1779, 1771, 1752, 1744, 1738, 1730, 1720, 1712, - 1706, 1698, 1897, 1883, 1860, 1846, 1819, 1805, 1782, 1768, 1723, 1709, - 1686, 1672, 1645, 1631, 1608, 1594, 1574, 1560, 1537, 1523, 1496, 1482, - 1459, 1445, 1400, 1386, 1363, 1349, 1322, 1308, 1285, 1271, 1608, 1565, - 1535, 1492, 1446, 1403, 1373, 1330, 1312, 1269, 1239, 1196, 1150, 1107, - 1077, 1034, 1291, 1218, 1171, 1098, 1015, 942, 895, 822, 953, 850, - 729, 626, 618, 431, 257, 257, 257, 257, 0, 255, 255, 255, - 255, 429, 616, 624, 727, 848, 951, 820, 893, 940, 1013, 1096, - 1169, 1216, 1289, 1032, 1075, 1105, 1148, 1194, 1237, 1267, 1310, 1328, - 1371, 1401, 1444, 1490, 1533, 1563, 1606, 1269, 1283, 1306, 1320, 1347, - 1361, 1384, 1398, 1443, 1457, 1480, 1494, 1521, 1535, 1558, 1572, 1592, - 1606, 1629, 1643, 1670, 1684, 1707, 1721, 1766, 1780, 1803, 1817, 1844, - 1858, 1881, 1895, 1696, 1704, 1710, 1718, 1728, 1736, 1742, 1750, 1769, - 1777, 1783, 1791, 1801, 1809, 1815, 1823, 1845, 1853, 1859, 1867, 1877, - 1885, 1891, 1899, 1918, 1926, 1932, 1940, 1950, 1958, 1964, 1972, 1998, - 2006, 2012, 2020, 2030, 2038, 2044, 2052, 2071, 2079, 2085, 2093, 2103, - 2111, 2117, 2125, 2147, 2155, 2161, 2169, 2179, 2187, 2193, 2201, 2220, - 2228, 2234, 2242, 2252, 2260, 2266, 2274, 2140, 2148, 2154, 2162, 2172, - 2180, 2186, 2194, 2213, 2221, 2227, 2235, 2245, 2253, 2259, 2267, 2289, - 2297, 2303, 2311, 2321, 2329, 2335, 2343, 2362, 2370, 2376, 2384, 2394, - 2402, 2408, 2416, 2442, 2450, 2456, 2464, 2474, 2482, 2488, 2496, 2515, - 2523, 2529, 2537, 2547, 2555, 2561, 2569, 2591, 2599, 2605, 2613, 2623, - 2631, 2637, 2645, 2664, 2672, 2678, 2686, 2696, 2704, 2710, 2718, 2516, - 2524, 2530, 2538, 2548, 2556, 2562, 2570, 2589, 2597, 2603, 2611, 2621, - 2629, 2635, 2643, 2665, 2673, 2679, 2687, 2697, 2705, 2711, 2719, 2738, - 2746, 2752, 2760, 2770, 2778, 2784, 2792, 2818, 2826, 2832, 2840, 2850, - 2858, 2864, 2872, 2891, 2899, 2905, 2913, 2923, 2931, 2937, 2945, 2967, - 2975, 2981, 2989, 2999, 3007, 3013, 3021, 3040, 3048, 3054, 3062, 3072, - 3080, 3086, 3094, 2960, 2968, 2974, 2982, 2992, 3000, 3006, 3014, 3033, - 3041, 3047, 3055, 3065, 3073, 3079, 3087, 3109, 3117, 3123, 3131, 3141, - 3149, 3155, 3163, 3182, 3190, 3196, 3204, 3214, 3222, 3228, 3236, 3262, - 3270, 3276, 3284, 3294, 3302, 3308, 3316, 3335, 3343, 3349, 3357, 3367, - 3375, 3381, 3389, 3411, 3419, 3425, 3433, 3443, 3451, 3457, 3465, 3484, - 3492, 3498, 3506, 3516, 3524, 3530, 3538, 2807, 2815, 2821, 2829, 2839, - 2847, 2853, 2861, 2880, 2888, 2894, 2902, 2912, 2920, 2926, 2934, 2956, - 2964, 2970, 2978, 2988, 2996, 3002, 3010, 3029, 3037, 3043, 3051, 3061, - 3069, 3075, 3083, 3109, 3117, 3123, 3131, 3141, 3149, 3155, 3163, 3182, - 3190, 3196, 3204, 3214, 3222, 3228, 3236, 3258, 3266, 3272, 3280, 3290, - 3298, 3304, 3312, 3331, 3339, 3345, 3353, 3363, 3371, 3377, 3385, 3251, - 3259, 3265, 3273, 3283, 3291, 3297, 3305, 3324, 3332, 3338, 3346, 3356, - 3364, 3370, 3378, 3400, 3408, 3414, 3422, 3432, 3440, 3446, 3454, 3473, - 3481, 3487, 3495, 3505, 3513, 3519, 3527, 3553, 3561, 3567, 3575, 3585, - 3593, 3599, 3607, 3626, 3634, 3640, 3648, 3658, 3666, 3672, 3680, 3702, - 3710, 3716, 3724, 3734, 3742, 3748, 3756, 3775, 3783, 3789, 3797, 3807, - 3815, 3821, 3829, 3627, 3635, 3641, 3649, 3659, 3667, 3673, 3681, 3700, - 3708, 3714, 3722, 3732, 3740, 3746, 3754, 3776, 3784, 3790, 3798, 3808, - 3816, 3822, 3830, 3849, 3857, 3863, 3871, 3881, 3889, 3895, 3903, 3929, - 3937, 3943, 3951, 3961, 3969, 3975, 3983, 4002, 4010, 4016, 4024, 4034, - 4042, 4048, 4056, 4078, 4086, 4092, 4100, 4110, 4118, 4124, 4132, 4151, - 4159, 4165, 4173, 4183, 4191, 4197, 4205, 4071, 4079, 4085, 4093, 4103, - 4111, 4117, 4125, 4144, 4152, 4158, 4166, 4176, 4184, 4190, 4198, 4220, - 4228, 4234, 4242, 4252, 4260, 4266, 4274, 4293, 4301, 4307, 4315, 4325, - 4333, 4339, 4347, 4373, 4381, 4387, 4395, 4405, 4413, 4419, 4427, 4446, - 4454, 4460, 4468, 4478, 4486, 4492, 4500, 4522, 4530, 4536, 4544, 4554, - 4562, 4568, 4576, 4595, 4603, 4609, 4617, 4627, 4635, 4641, 4649, 3742, - 3750, 3756, 3764, 3774, 3782, 3788, 3796, 3815, 3823, 3829, 3837, 3847, - 3855, 3861, 3869, 3891, 3899, 3905, 3913, 3923, 3931, 3937, 3945, 3964, - 3972, 3978, 3986, 3996, 4004, 4010, 4018, 4044, 4052, 4058, 4066, 4076, - 4084, 4090, 4098, 4117, 4125, 4131, 4139, 4149, 4157, 4163, 4171, 4193, - 4201, 4207, 4215, 4225, 4233, 4239, 4247, 4266, 4274, 4280, 4288, 4298, - 4306, 4312, 4320, 4186, 4194, 4200, 4208, 4218, 4226, 4232, 4240, 4259, - 4267, 4273, 4281, 4291, 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367, - 4375, 4381, 4389, 4408, 4416, 4422, 4430, 4440, 4448, 4454, 4462, 4488, - 4496, 4502, 4510, 4520, 4528, 4534, 4542, 4561, 4569, 4575, 4583, 4593, - 4601, 4607, 4615, 4637, 4645, 4651, 4659, 4669, 4677, 4683, 4691, 4710, - 4718, 4724, 4732, 4742, 4750, 4756, 4764, 4562, 4570, 4576, 4584, 4594, - 4602, 4608, 4616, 4635, 4643, 4649, 4657, 4667, 4675, 4681, 4689, 4711, - 4719, 4725, 4733, 4743, 4751, 4757, 4765, 4784, 4792, 4798, 4806, 4816, - 4824, 4830, 4838, 4864, 4872, 4878, 4886, 4896, 4904, 4910, 4918, 4937, - 4945, 4951, 4959, 4969, 4977, 4983, 4991, 5013, 5021, 5027, 5035, 5045, - 5053, 5059, 5067, 5086, 5094, 5100, 5108, 5118, 5126, 5132, 5140, 5006, - 5014, 5020, 5028, 5038, 5046, 5052, 5060, 5079, 5087, 5093, 5101, 5111, - 5119, 5125, 5133, 5155, 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, - 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5308, 5316, 5322, 5330, 5340, - 5348, 5354, 5362, 5381, 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457, - 5465, 5471, 5479, 5489, 5497, 5503, 5511, 5530, 5538, 5544, 5552, 5562, - 5570, 5576, 5584, 4853, 4861, 4867, 4875, 4885, 4893, 4899, 4907, 4926, - 4934, 4940, 4948, 4958, 4966, 4972, 4980, 5002, 5010, 5016, 5024, 5034, - 5042, 5048, 5056, 5075, 5083, 5089, 5097, 5107, 5115, 5121, 5129, 5155, - 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236, 5242, 5250, 5260, - 5268, 5274, 5282, 5304, 5312, 5318, 5326, 5336, 5344, 5350, 5358, 5377, - 5385, 5391, 5399, 5409, 5417, 5423, 5431, 5297, 5305, 5311, 5319, 5329, - 5337, 5343, 5351, 5370, 5378, 5384, 5392, 5402, 5410, 5416, 5424, 5446, - 5454, 5460, 5468, 5478, 5486, 5492, 5500, 5519, 5527, 5533, 5541, 5551, - 5559, 5565, 5573, 5599, 5607, 5613, 5621, 5631, 5639, 5645, 5653, 5672, - 5680, 5686, 5694, 5704, 5712, 5718, 5726, 5748, 5756, 5762, 5770, 5780, - 5788, 5794, 5802, 5821, 5829, 5835, 5843, 5853, 5861, 5867, 5875, 5673, - 5681, 5687, 5695, 5705, 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778, - 5786, 5792, 5800, 5822, 5830, 5836, 5844, 5854, 5862, 5868, 5876, 5895, - 5903, 5909, 5917, 5927, 5935, 5941, 5949, 5975, 5983, 5989, 5997, 6007, - 6015, 6021, 6029, 6048, 6056, 6062, 6070, 6080, 6088, 6094, 6102, 6124, - 6132, 6138, 6146, 6156, 6164, 6170, 6178, 6197, 6205, 6211, 6219, 6229, - 6237, 6243, 6251, 6117, 6125, 6131, 6139, 6149, 6157, 6163, 6171, 6190, - 6198, 6204, 6212, 6222, 6230, 6236, 6244, 6266, 6274, 6280, 6288, 6298, - 6306, 6312, 6320, 6339, 6347, 6353, 6361, 6371, 6379, 6385, 6393, 6419, - 6427, 6433, 6441, 6451, 6459, 6465, 6473, 6492, 6500, 6506, 6514, 6524, - 6532, 6538, 6546, 6568, 6576, 6582, 6590, 6600, 6608, 6614, 6622, 6641, - 6649, 6655, 6663, 6673, 6681, 6687, 6695, 3742, 3750, 3756, 3764, 3774, - 3782, 3788, 3796, 3815, 3823, 3829, 3837, 3847, 3855, 3861, 3869, 3891, - 3899, 3905, 3913, 3923, 3931, 3937, 3945, 3964, 3972, 3978, 3986, 3996, - 4004, 4010, 4018, 4044, 4052, 4058, 4066, 4076, 4084, 4090, 4098, 4117, - 4125, 4131, 4139, 4149, 4157, 4163, 4171, 4193, 4201, 4207, 4215, 4225, - 4233, 4239, 4247, 4266, 4274, 4280, 4288, 4298, 4306, 4312, 4320, 4186, - 4194, 4200, 4208, 4218, 4226, 4232, 4240, 4259, 4267, 4273, 4281, 4291, - 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367, 4375, 4381, 4389, 4408, - 4416, 4422, 4430, 4440, 4448, 4454, 4462, 4488, 4496, 4502, 4510, 4520, - 4528, 4534, 4542, 4561, 4569, 4575, 4583, 4593, 4601, 4607, 4615, 4637, - 4645, 4651, 4659, 4669, 4677, 4683, 4691, 4710, 4718, 4724, 4732, 4742, - 4750, 4756, 4764, 4562, 4570, 4576, 4584, 4594, 4602, 4608, 4616, 4635, - 4643, 4649, 4657, 4667, 4675, 4681, 4689, 4711, 4719, 4725, 4733, 4743, - 4751, 4757, 4765, 4784, 4792, 4798, 4806, 4816, 4824, 4830, 4838, 4864, - 4872, 4878, 4886, 4896, 4904, 4910, 4918, 4937, 4945, 4951, 4959, 4969, - 4977, 4983, 4991, 5013, 5021, 5027, 5035, 5045, 5053, 5059, 5067, 5086, - 5094, 5100, 5108, 5118, 5126, 5132, 5140, 5006, 5014, 5020, 5028, 5038, - 5046, 5052, 5060, 5079, 5087, 5093, 5101, 5111, 5119, 5125, 5133, 5155, - 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236, 5242, 5250, 5260, - 5268, 5274, 5282, 5308, 5316, 5322, 5330, 5340, 5348, 5354, 5362, 5381, - 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457, 5465, 5471, 5479, 5489, - 5497, 5503, 5511, 5530, 5538, 5544, 5552, 5562, 5570, 5576, 5584, 4853, - 4861, 4867, 4875, 4885, 4893, 4899, 4907, 4926, 4934, 4940, 4948, 4958, - 4966, 4972, 4980, 5002, 5010, 5016, 5024, 5034, 5042, 5048, 5056, 5075, - 5083, 5089, 5097, 5107, 5115, 5121, 5129, 5155, 5163, 5169, 5177, 5187, - 5195, 5201, 5209, 5228, 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5304, - 5312, 5318, 5326, 5336, 5344, 5350, 5358, 5377, 5385, 5391, 5399, 5409, - 5417, 5423, 5431, 5297, 5305, 5311, 5319, 5329, 5337, 5343, 5351, 5370, - 5378, 5384, 5392, 5402, 5410, 5416, 5424, 5446, 5454, 5460, 5468, 5478, - 5486, 5492, 5500, 5519, 5527, 5533, 5541, 5551, 5559, 5565, 5573, 5599, - 5607, 5613, 5621, 5631, 5639, 5645, 5653, 5672, 5680, 5686, 5694, 5704, - 5712, 5718, 5726, 5748, 5756, 5762, 5770, 5780, 5788, 5794, 5802, 5821, - 5829, 5835, 5843, 5853, 5861, 5867, 5875, 5673, 5681, 5687, 5695, 5705, - 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778, 5786, 5792, 5800, 5822, - 5830, 5836, 5844, 5854, 5862, 5868, 5876, 5895, 5903, 5909, 5917, 5927, - 5935, 5941, 5949, 5975, 5983, 5989, 5997, 6007, 6015, 6021, 6029, 6048, - 6056, 6062, 6070, 6080, 6088, 6094, 6102, 6124, 6132, 6138, 6146, 6156, - 6164, 6170, 6178, 6197, 6205, 6211, 6219, 6229, 6237, 6243, 6251, 6117, - 6125, 6131, 6139, 6149, 6157, 6163, 6171, 6190, 6198, 6204, 6212, 6222, - 6230, 6236, 6244, 6266, 6274, 6280, 6288, 6298, 6306, 6312, 6320, 6339, - 6347, 6353, 6361, 6371, 6379, 6385, 6393, 6419, 6427, 6433, 6441, 6451, - 6459, 6465, 6473, 6492, 6500, 6506, 6514, 6524, 6532, 6538, 6546, 6568, - 6576, 6582, 6590, 6600, 6608, 6614, 6622, 6641, 6649, 6655, 6663, 6673, - 6681, 6687, 6695, 5788, 5796, 5802, 5810, 5820, 5828, 5834, 5842, 5861, - 5869, 5875, 5883, 5893, 5901, 5907, 5915, 5937, 5945, 5951, 5959, 5969, - 5977, 5983, 5991, 6010, 6018, 6024, 6032, 6042, 6050, 6056, 6064, 6090, - 6098, 6104, 6112, 6122, 6130, 6136, 6144, 6163, 6171, 6177, 6185, 6195, - 6203, 6209, 6217, 6239, 6247, 6253, 6261, 6271, 6279, 6285, 6293, 6312, - 6320, 6326, 6334, 6344, 6352, 6358, 6366, 6232, 6240, 6246, 6254, 6264, - 6272, 6278, 6286, 6305, 6313, 6319, 6327, 6337, 6345, 6351, 6359, 6381, - 6389, 6395, 6403, 6413, 6421, 6427, 6435, 6454, 6462, 6468, 6476, 6486, - 6494, 6500, 6508, 6534, 6542, 6548, 6556, 6566, 6574, 6580, 6588, 6607, - 6615, 6621, 6629, 6639, 6647, 6653, 6661, 6683, 6691, 6697, 6705, 6715, - 6723, 6729, 6737, 6756, 6764, 6770, 6778, 6788, 6796, 6802, 6810, 6608, - 6616, 6622, 6630, 6640, 6648, 6654, 6662, 6681, 6689, 6695, 6703, 6713, - 6721, 6727, 6735, 6757, 6765, 6771, 6779, 6789, 6797, 6803, 6811, 6830, - 6838, 6844, 6852, 6862, 6870, 6876, 6884, 6910, 6918, 6924, 6932, 6942, - 6950, 6956, 6964, 6983, 6991, 6997, 7005, 7015, 7023, 7029, 7037, 7059, - 7067, 7073, 7081, 7091, 7099, 7105, 7113, 7132, 7140, 7146, 7154, 7164, - 7172, 7178, 7186, 7052, 7060, 7066, 7074, 7084, 7092, 7098, 7106, 7125, - 7133, 7139, 7147, 7157, 7165, 7171, 7179, 7201, 7209, 7215, 7223, 7233, - 7241, 7247, 7255, 7274, 7282, 7288, 7296, 7306, 7314, 7320, 7328, 7354, - 7362, 7368, 7376, 7386, 7394, 7400, 7408, 7427, 7435, 7441, 7449, 7459, - 7467, 7473, 7481, 7503, 7511, 7517, 7525, 7535, 7543, 7549, 7557, 7576, - 7584, 7590, 7598, 7608, 7616, 7622, 7630, 6899, 6907, 6913, 6921, 6931, - 6939, 6945, 6953, 6972, 6980, 6986, 6994, 7004, 7012, 7018, 7026, 7048, - 7056, 7062, 7070, 7080, 7088, 7094, 7102, 7121, 7129, 7135, 7143, 7153, - 7161, 7167, 7175, 7201, 7209, 7215, 7223, 7233, 7241, 7247, 7255, 7274, - 7282, 7288, 7296, 7306, 7314, 7320, 7328, 7350, 7358, 7364, 7372, 7382, - 7390, 7396, 7404, 7423, 7431, 7437, 7445, 7455, 7463, 7469, 7477, 7343, - 7351, 7357, 7365, 7375, 7383, 7389, 7397, 7416, 7424, 7430, 7438, 7448, - 7456, 7462, 7470, 7492, 7500, 7506, 7514, 7524, 7532, 7538, 7546, 7565, - 7573, 7579, 7587, 7597, 7605, 7611, 7619, 7645, 7653, 7659, 7667, 7677, - 7685, 7691, 7699, 7718, 7726, 7732, 7740, 7750, 7758, 7764, 7772, 7794, - 7802, 7808, 7816, 7826, 7834, 7840, 7848, 7867, 7875, 7881, 7889, 7899, - 7907, 7913, 7921, 7719, 7727, 7733, 7741, 7751, 7759, 7765, 7773, 7792, - 7800, 7806, 7814, 7824, 7832, 7838, 7846, 7868, 7876, 7882, 7890, 7900, - 7908, 7914, 7922, 7941, 7949, 7955, 7963, 7973, 7981, 7987, 7995, 8021, - 8029, 8035, 8043, 8053, 8061, 8067, 8075, 8094, 8102, 8108, 8116, 8126, - 8134, 8140, 8148, 8170, 8178, 8184, 8192, 8202, 8210, 8216, 8224, 8243, - 8251, 8257, 8265, 8275 +static const short dct_value_cost[2048 * 2] = { + 8285, 8277, 8267, 8259, 8253, 8245, 8226, 8218, 8212, 8204, 8194, 8186, 8180, + 8172, 8150, 8142, 8136, 8128, 8118, 8110, 8104, 8096, 8077, 8069, 8063, 8055, + 8045, 8037, 8031, 8023, 7997, 7989, 7983, 7975, 7965, 7957, 7951, 7943, 7924, + 7916, 7910, 7902, 7892, 7884, 7878, 7870, 7848, 7840, 7834, 7826, 7816, 7808, + 7802, 7794, 7775, 7767, 7761, 7753, 7743, 7735, 7729, 7721, 7923, 7915, 7909, + 7901, 7891, 7883, 7877, 7869, 7850, 7842, 7836, 7828, 7818, 7810, 7804, 7796, + 7774, 7766, 7760, 7752, 7742, 7734, 7728, 7720, 7701, 7693, 7687, 7679, 7669, + 7661, 7655, 7647, 7621, 7613, 7607, 7599, 7589, 7581, 7575, 7567, 7548, 7540, + 7534, 7526, 7516, 7508, 7502, 7494, 7472, 7464, 7458, 7450, 7440, 7432, 7426, + 7418, 7399, 7391, 7385, 7377, 7367, 7359, 7353, 7345, 7479, 7471, 7465, 7457, + 7447, 7439, 7433, 7425, 7406, 7398, 7392, 7384, 7374, 7366, 7360, 7352, 7330, + 7322, 7316, 7308, 7298, 7290, 7284, 7276, 7257, 7249, 7243, 7235, 7225, 7217, + 7211, 7203, 7177, 7169, 7163, 7155, 7145, 7137, 7131, 7123, 7104, 7096, 7090, + 7082, 7072, 7064, 7058, 7050, 7028, 7020, 7014, 7006, 6996, 6988, 6982, 6974, + 6955, 6947, 6941, 6933, 6923, 6915, 6909, 6901, 7632, 7624, 7618, 7610, 7600, + 7592, 7586, 7578, 7559, 7551, 7545, 7537, 7527, 7519, 7513, 7505, 7483, 7475, + 7469, 7461, 7451, 7443, 7437, 7429, 7410, 7402, 7396, 7388, 7378, 7370, 7364, + 7356, 7330, 7322, 7316, 7308, 7298, 7290, 7284, 7276, 7257, 7249, 7243, 7235, + 7225, 7217, 7211, 7203, 7181, 7173, 7167, 7159, 7149, 7141, 7135, 7127, 7108, + 7100, 7094, 7086, 7076, 7068, 7062, 7054, 7188, 7180, 7174, 7166, 7156, 7148, + 7142, 7134, 7115, 7107, 7101, 7093, 7083, 7075, 7069, 7061, 7039, 7031, 7025, + 7017, 7007, 6999, 6993, 6985, 6966, 6958, 6952, 6944, 6934, 6926, 6920, 6912, + 6886, 6878, 6872, 6864, 6854, 6846, 6840, 6832, 6813, 6805, 6799, 6791, 6781, + 6773, 6767, 6759, 6737, 6729, 6723, 6715, 6705, 6697, 6691, 6683, 6664, 6656, + 6650, 6642, 6632, 6624, 6618, 6610, 6812, 6804, 6798, 6790, 6780, 6772, 6766, + 6758, 6739, 6731, 6725, 6717, 6707, 6699, 6693, 6685, 6663, 6655, 6649, 6641, + 6631, 6623, 6617, 6609, 6590, 6582, 6576, 6568, 6558, 6550, 6544, 6536, 6510, + 6502, 6496, 6488, 6478, 6470, 6464, 6456, 6437, 6429, 6423, 6415, 6405, 6397, + 6391, 6383, 6361, 6353, 6347, 6339, 6329, 6321, 6315, 6307, 6288, 6280, 6274, + 6266, 6256, 6248, 6242, 6234, 6368, 6360, 6354, 6346, 6336, 6328, 6322, 6314, + 6295, 6287, 6281, 6273, 6263, 6255, 6249, 6241, 6219, 6211, 6205, 6197, 6187, + 6179, 6173, 6165, 6146, 6138, 6132, 6124, 6114, 6106, 6100, 6092, 6066, 6058, + 6052, 6044, 6034, 6026, 6020, 6012, 5993, 5985, 5979, 5971, 5961, 5953, 5947, + 5939, 5917, 5909, 5903, 5895, 5885, 5877, 5871, 5863, 5844, 5836, 5830, 5822, + 5812, 5804, 5798, 5790, 6697, 6689, 6683, 6675, 6665, 6657, 6651, 6643, 6624, + 6616, 6610, 6602, 6592, 6584, 6578, 6570, 6548, 6540, 6534, 6526, 6516, 6508, + 6502, 6494, 6475, 6467, 6461, 6453, 6443, 6435, 6429, 6421, 6395, 6387, 6381, + 6373, 6363, 6355, 6349, 6341, 6322, 6314, 6308, 6300, 6290, 6282, 6276, 6268, + 6246, 6238, 6232, 6224, 6214, 6206, 6200, 6192, 6173, 6165, 6159, 6151, 6141, + 6133, 6127, 6119, 6253, 6245, 6239, 6231, 6221, 6213, 6207, 6199, 6180, 6172, + 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096, 6090, 6082, 6072, 6064, 6058, + 6050, 6031, 6023, 6017, 6009, 5999, 5991, 5985, 5977, 5951, 5943, 5937, 5929, + 5919, 5911, 5905, 5897, 5878, 5870, 5864, 5856, 5846, 5838, 5832, 5824, 5802, + 5794, 5788, 5780, 5770, 5762, 5756, 5748, 5729, 5721, 5715, 5707, 5697, 5689, + 5683, 5675, 5877, 5869, 5863, 5855, 5845, 5837, 5831, 5823, 5804, 5796, 5790, + 5782, 5772, 5764, 5758, 5750, 5728, 5720, 5714, 5706, 5696, 5688, 5682, 5674, + 5655, 5647, 5641, 5633, 5623, 5615, 5609, 5601, 5575, 5567, 5561, 5553, 5543, + 5535, 5529, 5521, 5502, 5494, 5488, 5480, 5470, 5462, 5456, 5448, 5426, 5418, + 5412, 5404, 5394, 5386, 5380, 5372, 5353, 5345, 5339, 5331, 5321, 5313, 5307, + 5299, 5433, 5425, 5419, 5411, 5401, 5393, 5387, 5379, 5360, 5352, 5346, 5338, + 5328, 5320, 5314, 5306, 5284, 5276, 5270, 5262, 5252, 5244, 5238, 5230, 5211, + 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5131, 5123, 5117, 5109, 5099, 5091, + 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018, 5012, 5004, 4982, 4974, 4968, + 4960, 4950, 4942, 4936, 4928, 4909, 4901, 4895, 4887, 4877, 4869, 4863, 4855, + 5586, 5578, 5572, 5564, 5554, 5546, 5540, 5532, 5513, 5505, 5499, 5491, 5481, + 5473, 5467, 5459, 5437, 5429, 5423, 5415, 5405, 5397, 5391, 5383, 5364, 5356, + 5350, 5342, 5332, 5324, 5318, 5310, 5284, 5276, 5270, 5262, 5252, 5244, 5238, + 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5135, 5127, 5121, 5113, + 5103, 5095, 5089, 5081, 5062, 5054, 5048, 5040, 5030, 5022, 5016, 5008, 5142, + 5134, 5128, 5120, 5110, 5102, 5096, 5088, 5069, 5061, 5055, 5047, 5037, 5029, + 5023, 5015, 4993, 4985, 4979, 4971, 4961, 4953, 4947, 4939, 4920, 4912, 4906, + 4898, 4888, 4880, 4874, 4866, 4840, 4832, 4826, 4818, 4808, 4800, 4794, 4786, + 4767, 4759, 4753, 4745, 4735, 4727, 4721, 4713, 4691, 4683, 4677, 4669, 4659, + 4651, 4645, 4637, 4618, 4610, 4604, 4596, 4586, 4578, 4572, 4564, 4766, 4758, + 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685, 4679, 4671, 4661, 4653, 4647, + 4639, 4617, 4609, 4603, 4595, 4585, 4577, 4571, 4563, 4544, 4536, 4530, 4522, + 4512, 4504, 4498, 4490, 4464, 4456, 4450, 4442, 4432, 4424, 4418, 4410, 4391, + 4383, 4377, 4369, 4359, 4351, 4345, 4337, 4315, 4307, 4301, 4293, 4283, 4275, + 4269, 4261, 4242, 4234, 4228, 4220, 4210, 4202, 4196, 4188, 4322, 4314, 4308, + 4300, 4290, 4282, 4276, 4268, 4249, 4241, 4235, 4227, 4217, 4209, 4203, 4195, + 4173, 4165, 4159, 4151, 4141, 4133, 4127, 4119, 4100, 4092, 4086, 4078, 4068, + 4060, 4054, 4046, 4020, 4012, 4006, 3998, 3988, 3980, 3974, 3966, 3947, 3939, + 3933, 3925, 3915, 3907, 3901, 3893, 3871, 3863, 3857, 3849, 3839, 3831, 3825, + 3817, 3798, 3790, 3784, 3776, 3766, 3758, 3752, 3744, 6697, 6689, 6683, 6675, + 6665, 6657, 6651, 6643, 6624, 6616, 6610, 6602, 6592, 6584, 6578, 6570, 6548, + 6540, 6534, 6526, 6516, 6508, 6502, 6494, 6475, 6467, 6461, 6453, 6443, 6435, + 6429, 6421, 6395, 6387, 6381, 6373, 6363, 6355, 6349, 6341, 6322, 6314, 6308, + 6300, 6290, 6282, 6276, 6268, 6246, 6238, 6232, 6224, 6214, 6206, 6200, 6192, + 6173, 6165, 6159, 6151, 6141, 6133, 6127, 6119, 6253, 6245, 6239, 6231, 6221, + 6213, 6207, 6199, 6180, 6172, 6166, 6158, 6148, 6140, 6134, 6126, 6104, 6096, + 6090, 6082, 6072, 6064, 6058, 6050, 6031, 6023, 6017, 6009, 5999, 5991, 5985, + 5977, 5951, 5943, 5937, 5929, 5919, 5911, 5905, 5897, 5878, 5870, 5864, 5856, + 5846, 5838, 5832, 5824, 5802, 5794, 5788, 5780, 5770, 5762, 5756, 5748, 5729, + 5721, 5715, 5707, 5697, 5689, 5683, 5675, 5877, 5869, 5863, 5855, 5845, 5837, + 5831, 5823, 5804, 5796, 5790, 5782, 5772, 5764, 5758, 5750, 5728, 5720, 5714, + 5706, 5696, 5688, 5682, 5674, 5655, 5647, 5641, 5633, 5623, 5615, 5609, 5601, + 5575, 5567, 5561, 5553, 5543, 5535, 5529, 5521, 5502, 5494, 5488, 5480, 5470, + 5462, 5456, 5448, 5426, 5418, 5412, 5404, 5394, 5386, 5380, 5372, 5353, 5345, + 5339, 5331, 5321, 5313, 5307, 5299, 5433, 5425, 5419, 5411, 5401, 5393, 5387, + 5379, 5360, 5352, 5346, 5338, 5328, 5320, 5314, 5306, 5284, 5276, 5270, 5262, + 5252, 5244, 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, 5157, 5131, + 5123, 5117, 5109, 5099, 5091, 5085, 5077, 5058, 5050, 5044, 5036, 5026, 5018, + 5012, 5004, 4982, 4974, 4968, 4960, 4950, 4942, 4936, 4928, 4909, 4901, 4895, + 4887, 4877, 4869, 4863, 4855, 5586, 5578, 5572, 5564, 5554, 5546, 5540, 5532, + 5513, 5505, 5499, 5491, 5481, 5473, 5467, 5459, 5437, 5429, 5423, 5415, 5405, + 5397, 5391, 5383, 5364, 5356, 5350, 5342, 5332, 5324, 5318, 5310, 5284, 5276, + 5270, 5262, 5252, 5244, 5238, 5230, 5211, 5203, 5197, 5189, 5179, 5171, 5165, + 5157, 5135, 5127, 5121, 5113, 5103, 5095, 5089, 5081, 5062, 5054, 5048, 5040, + 5030, 5022, 5016, 5008, 5142, 5134, 5128, 5120, 5110, 5102, 5096, 5088, 5069, + 5061, 5055, 5047, 5037, 5029, 5023, 5015, 4993, 4985, 4979, 4971, 4961, 4953, + 4947, 4939, 4920, 4912, 4906, 4898, 4888, 4880, 4874, 4866, 4840, 4832, 4826, + 4818, 4808, 4800, 4794, 4786, 4767, 4759, 4753, 4745, 4735, 4727, 4721, 4713, + 4691, 4683, 4677, 4669, 4659, 4651, 4645, 4637, 4618, 4610, 4604, 4596, 4586, + 4578, 4572, 4564, 4766, 4758, 4752, 4744, 4734, 4726, 4720, 4712, 4693, 4685, + 4679, 4671, 4661, 4653, 4647, 4639, 4617, 4609, 4603, 4595, 4585, 4577, 4571, + 4563, 4544, 4536, 4530, 4522, 4512, 4504, 4498, 4490, 4464, 4456, 4450, 4442, + 4432, 4424, 4418, 4410, 4391, 4383, 4377, 4369, 4359, 4351, 4345, 4337, 4315, + 4307, 4301, 4293, 4283, 4275, 4269, 4261, 4242, 4234, 4228, 4220, 4210, 4202, + 4196, 4188, 4322, 4314, 4308, 4300, 4290, 4282, 4276, 4268, 4249, 4241, 4235, + 4227, 4217, 4209, 4203, 4195, 4173, 4165, 4159, 4151, 4141, 4133, 4127, 4119, + 4100, 4092, 4086, 4078, 4068, 4060, 4054, 4046, 4020, 4012, 4006, 3998, 3988, + 3980, 3974, 3966, 3947, 3939, 3933, 3925, 3915, 3907, 3901, 3893, 3871, 3863, + 3857, 3849, 3839, 3831, 3825, 3817, 3798, 3790, 3784, 3776, 3766, 3758, 3752, + 3744, 4651, 4643, 4637, 4629, 4619, 4611, 4605, 4597, 4578, 4570, 4564, 4556, + 4546, 4538, 4532, 4524, 4502, 4494, 4488, 4480, 4470, 4462, 4456, 4448, 4429, + 4421, 4415, 4407, 4397, 4389, 4383, 4375, 4349, 4341, 4335, 4327, 4317, 4309, + 4303, 4295, 4276, 4268, 4262, 4254, 4244, 4236, 4230, 4222, 4200, 4192, 4186, + 4178, 4168, 4160, 4154, 4146, 4127, 4119, 4113, 4105, 4095, 4087, 4081, 4073, + 4207, 4199, 4193, 4185, 4175, 4167, 4161, 4153, 4134, 4126, 4120, 4112, 4102, + 4094, 4088, 4080, 4058, 4050, 4044, 4036, 4026, 4018, 4012, 4004, 3985, 3977, + 3971, 3963, 3953, 3945, 3939, 3931, 3905, 3897, 3891, 3883, 3873, 3865, 3859, + 3851, 3832, 3824, 3818, 3810, 3800, 3792, 3786, 3778, 3756, 3748, 3742, 3734, + 3724, 3716, 3710, 3702, 3683, 3675, 3669, 3661, 3651, 3643, 3637, 3629, 3831, + 3823, 3817, 3809, 3799, 3791, 3785, 3777, 3758, 3750, 3744, 3736, 3726, 3718, + 3712, 3704, 3682, 3674, 3668, 3660, 3650, 3642, 3636, 3628, 3609, 3601, 3595, + 3587, 3577, 3569, 3563, 3555, 3529, 3521, 3515, 3507, 3497, 3489, 3483, 3475, + 3456, 3448, 3442, 3434, 3424, 3416, 3410, 3402, 3380, 3372, 3366, 3358, 3348, + 3340, 3334, 3326, 3307, 3299, 3293, 3285, 3275, 3267, 3261, 3253, 3387, 3379, + 3373, 3365, 3355, 3347, 3341, 3333, 3314, 3306, 3300, 3292, 3282, 3274, 3268, + 3260, 3238, 3230, 3224, 3216, 3206, 3198, 3192, 3184, 3165, 3157, 3151, 3143, + 3133, 3125, 3119, 3111, 3085, 3077, 3071, 3063, 3053, 3045, 3039, 3031, 3012, + 3004, 2998, 2990, 2980, 2972, 2966, 2958, 2936, 2928, 2922, 2914, 2904, 2896, + 2890, 2882, 2863, 2855, 2849, 2841, 2831, 2823, 2817, 2809, 3540, 3532, 3526, + 3518, 3508, 3500, 3494, 3486, 3467, 3459, 3453, 3445, 3435, 3427, 3421, 3413, + 3391, 3383, 3377, 3369, 3359, 3351, 3345, 3337, 3318, 3310, 3304, 3296, 3286, + 3278, 3272, 3264, 3238, 3230, 3224, 3216, 3206, 3198, 3192, 3184, 3165, 3157, + 3151, 3143, 3133, 3125, 3119, 3111, 3089, 3081, 3075, 3067, 3057, 3049, 3043, + 3035, 3016, 3008, 3002, 2994, 2984, 2976, 2970, 2962, 3096, 3088, 3082, 3074, + 3064, 3056, 3050, 3042, 3023, 3015, 3009, 3001, 2991, 2983, 2977, 2969, 2947, + 2939, 2933, 2925, 2915, 2907, 2901, 2893, 2874, 2866, 2860, 2852, 2842, 2834, + 2828, 2820, 2794, 2786, 2780, 2772, 2762, 2754, 2748, 2740, 2721, 2713, 2707, + 2699, 2689, 2681, 2675, 2667, 2645, 2637, 2631, 2623, 2613, 2605, 2599, 2591, + 2572, 2564, 2558, 2550, 2540, 2532, 2526, 2518, 2720, 2712, 2706, 2698, 2688, + 2680, 2674, 2666, 2647, 2639, 2633, 2625, 2615, 2607, 2601, 2593, 2571, 2563, + 2557, 2549, 2539, 2531, 2525, 2517, 2498, 2490, 2484, 2476, 2466, 2458, 2452, + 2444, 2418, 2410, 2404, 2396, 2386, 2378, 2372, 2364, 2345, 2337, 2331, 2323, + 2313, 2305, 2299, 2291, 2269, 2261, 2255, 2247, 2237, 2229, 2223, 2215, 2196, + 2188, 2182, 2174, 2164, 2156, 2150, 2142, 2276, 2268, 2262, 2254, 2244, 2236, + 2230, 2222, 2203, 2195, 2189, 2181, 2171, 2163, 2157, 2149, 2127, 2119, 2113, + 2105, 2095, 2087, 2081, 2073, 2054, 2046, 2040, 2032, 2022, 2014, 2008, 2000, + 1974, 1966, 1960, 1952, 1942, 1934, 1928, 1920, 1901, 1893, 1887, 1879, 1869, + 1861, 1855, 1847, 1825, 1817, 1811, 1803, 1793, 1785, 1779, 1771, 1752, 1744, + 1738, 1730, 1720, 1712, 1706, 1698, 1897, 1883, 1860, 1846, 1819, 1805, 1782, + 1768, 1723, 1709, 1686, 1672, 1645, 1631, 1608, 1594, 1574, 1560, 1537, 1523, + 1496, 1482, 1459, 1445, 1400, 1386, 1363, 1349, 1322, 1308, 1285, 1271, 1608, + 1565, 1535, 1492, 1446, 1403, 1373, 1330, 1312, 1269, 1239, 1196, 1150, 1107, + 1077, 1034, 1291, 1218, 1171, 1098, 1015, 942, 895, 822, 953, 850, 729, + 626, 618, 431, 257, 257, 257, 257, 0, 255, 255, 255, 255, 429, + 616, 624, 727, 848, 951, 820, 893, 940, 1013, 1096, 1169, 1216, 1289, + 1032, 1075, 1105, 1148, 1194, 1237, 1267, 1310, 1328, 1371, 1401, 1444, 1490, + 1533, 1563, 1606, 1269, 1283, 1306, 1320, 1347, 1361, 1384, 1398, 1443, 1457, + 1480, 1494, 1521, 1535, 1558, 1572, 1592, 1606, 1629, 1643, 1670, 1684, 1707, + 1721, 1766, 1780, 1803, 1817, 1844, 1858, 1881, 1895, 1696, 1704, 1710, 1718, + 1728, 1736, 1742, 1750, 1769, 1777, 1783, 1791, 1801, 1809, 1815, 1823, 1845, + 1853, 1859, 1867, 1877, 1885, 1891, 1899, 1918, 1926, 1932, 1940, 1950, 1958, + 1964, 1972, 1998, 2006, 2012, 2020, 2030, 2038, 2044, 2052, 2071, 2079, 2085, + 2093, 2103, 2111, 2117, 2125, 2147, 2155, 2161, 2169, 2179, 2187, 2193, 2201, + 2220, 2228, 2234, 2242, 2252, 2260, 2266, 2274, 2140, 2148, 2154, 2162, 2172, + 2180, 2186, 2194, 2213, 2221, 2227, 2235, 2245, 2253, 2259, 2267, 2289, 2297, + 2303, 2311, 2321, 2329, 2335, 2343, 2362, 2370, 2376, 2384, 2394, 2402, 2408, + 2416, 2442, 2450, 2456, 2464, 2474, 2482, 2488, 2496, 2515, 2523, 2529, 2537, + 2547, 2555, 2561, 2569, 2591, 2599, 2605, 2613, 2623, 2631, 2637, 2645, 2664, + 2672, 2678, 2686, 2696, 2704, 2710, 2718, 2516, 2524, 2530, 2538, 2548, 2556, + 2562, 2570, 2589, 2597, 2603, 2611, 2621, 2629, 2635, 2643, 2665, 2673, 2679, + 2687, 2697, 2705, 2711, 2719, 2738, 2746, 2752, 2760, 2770, 2778, 2784, 2792, + 2818, 2826, 2832, 2840, 2850, 2858, 2864, 2872, 2891, 2899, 2905, 2913, 2923, + 2931, 2937, 2945, 2967, 2975, 2981, 2989, 2999, 3007, 3013, 3021, 3040, 3048, + 3054, 3062, 3072, 3080, 3086, 3094, 2960, 2968, 2974, 2982, 2992, 3000, 3006, + 3014, 3033, 3041, 3047, 3055, 3065, 3073, 3079, 3087, 3109, 3117, 3123, 3131, + 3141, 3149, 3155, 3163, 3182, 3190, 3196, 3204, 3214, 3222, 3228, 3236, 3262, + 3270, 3276, 3284, 3294, 3302, 3308, 3316, 3335, 3343, 3349, 3357, 3367, 3375, + 3381, 3389, 3411, 3419, 3425, 3433, 3443, 3451, 3457, 3465, 3484, 3492, 3498, + 3506, 3516, 3524, 3530, 3538, 2807, 2815, 2821, 2829, 2839, 2847, 2853, 2861, + 2880, 2888, 2894, 2902, 2912, 2920, 2926, 2934, 2956, 2964, 2970, 2978, 2988, + 2996, 3002, 3010, 3029, 3037, 3043, 3051, 3061, 3069, 3075, 3083, 3109, 3117, + 3123, 3131, 3141, 3149, 3155, 3163, 3182, 3190, 3196, 3204, 3214, 3222, 3228, + 3236, 3258, 3266, 3272, 3280, 3290, 3298, 3304, 3312, 3331, 3339, 3345, 3353, + 3363, 3371, 3377, 3385, 3251, 3259, 3265, 3273, 3283, 3291, 3297, 3305, 3324, + 3332, 3338, 3346, 3356, 3364, 3370, 3378, 3400, 3408, 3414, 3422, 3432, 3440, + 3446, 3454, 3473, 3481, 3487, 3495, 3505, 3513, 3519, 3527, 3553, 3561, 3567, + 3575, 3585, 3593, 3599, 3607, 3626, 3634, 3640, 3648, 3658, 3666, 3672, 3680, + 3702, 3710, 3716, 3724, 3734, 3742, 3748, 3756, 3775, 3783, 3789, 3797, 3807, + 3815, 3821, 3829, 3627, 3635, 3641, 3649, 3659, 3667, 3673, 3681, 3700, 3708, + 3714, 3722, 3732, 3740, 3746, 3754, 3776, 3784, 3790, 3798, 3808, 3816, 3822, + 3830, 3849, 3857, 3863, 3871, 3881, 3889, 3895, 3903, 3929, 3937, 3943, 3951, + 3961, 3969, 3975, 3983, 4002, 4010, 4016, 4024, 4034, 4042, 4048, 4056, 4078, + 4086, 4092, 4100, 4110, 4118, 4124, 4132, 4151, 4159, 4165, 4173, 4183, 4191, + 4197, 4205, 4071, 4079, 4085, 4093, 4103, 4111, 4117, 4125, 4144, 4152, 4158, + 4166, 4176, 4184, 4190, 4198, 4220, 4228, 4234, 4242, 4252, 4260, 4266, 4274, + 4293, 4301, 4307, 4315, 4325, 4333, 4339, 4347, 4373, 4381, 4387, 4395, 4405, + 4413, 4419, 4427, 4446, 4454, 4460, 4468, 4478, 4486, 4492, 4500, 4522, 4530, + 4536, 4544, 4554, 4562, 4568, 4576, 4595, 4603, 4609, 4617, 4627, 4635, 4641, + 4649, 3742, 3750, 3756, 3764, 3774, 3782, 3788, 3796, 3815, 3823, 3829, 3837, + 3847, 3855, 3861, 3869, 3891, 3899, 3905, 3913, 3923, 3931, 3937, 3945, 3964, + 3972, 3978, 3986, 3996, 4004, 4010, 4018, 4044, 4052, 4058, 4066, 4076, 4084, + 4090, 4098, 4117, 4125, 4131, 4139, 4149, 4157, 4163, 4171, 4193, 4201, 4207, + 4215, 4225, 4233, 4239, 4247, 4266, 4274, 4280, 4288, 4298, 4306, 4312, 4320, + 4186, 4194, 4200, 4208, 4218, 4226, 4232, 4240, 4259, 4267, 4273, 4281, 4291, + 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367, 4375, 4381, 4389, 4408, 4416, + 4422, 4430, 4440, 4448, 4454, 4462, 4488, 4496, 4502, 4510, 4520, 4528, 4534, + 4542, 4561, 4569, 4575, 4583, 4593, 4601, 4607, 4615, 4637, 4645, 4651, 4659, + 4669, 4677, 4683, 4691, 4710, 4718, 4724, 4732, 4742, 4750, 4756, 4764, 4562, + 4570, 4576, 4584, 4594, 4602, 4608, 4616, 4635, 4643, 4649, 4657, 4667, 4675, + 4681, 4689, 4711, 4719, 4725, 4733, 4743, 4751, 4757, 4765, 4784, 4792, 4798, + 4806, 4816, 4824, 4830, 4838, 4864, 4872, 4878, 4886, 4896, 4904, 4910, 4918, + 4937, 4945, 4951, 4959, 4969, 4977, 4983, 4991, 5013, 5021, 5027, 5035, 5045, + 5053, 5059, 5067, 5086, 5094, 5100, 5108, 5118, 5126, 5132, 5140, 5006, 5014, + 5020, 5028, 5038, 5046, 5052, 5060, 5079, 5087, 5093, 5101, 5111, 5119, 5125, + 5133, 5155, 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236, 5242, 5250, + 5260, 5268, 5274, 5282, 5308, 5316, 5322, 5330, 5340, 5348, 5354, 5362, 5381, + 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457, 5465, 5471, 5479, 5489, 5497, + 5503, 5511, 5530, 5538, 5544, 5552, 5562, 5570, 5576, 5584, 4853, 4861, 4867, + 4875, 4885, 4893, 4899, 4907, 4926, 4934, 4940, 4948, 4958, 4966, 4972, 4980, + 5002, 5010, 5016, 5024, 5034, 5042, 5048, 5056, 5075, 5083, 5089, 5097, 5107, + 5115, 5121, 5129, 5155, 5163, 5169, 5177, 5187, 5195, 5201, 5209, 5228, 5236, + 5242, 5250, 5260, 5268, 5274, 5282, 5304, 5312, 5318, 5326, 5336, 5344, 5350, + 5358, 5377, 5385, 5391, 5399, 5409, 5417, 5423, 5431, 5297, 5305, 5311, 5319, + 5329, 5337, 5343, 5351, 5370, 5378, 5384, 5392, 5402, 5410, 5416, 5424, 5446, + 5454, 5460, 5468, 5478, 5486, 5492, 5500, 5519, 5527, 5533, 5541, 5551, 5559, + 5565, 5573, 5599, 5607, 5613, 5621, 5631, 5639, 5645, 5653, 5672, 5680, 5686, + 5694, 5704, 5712, 5718, 5726, 5748, 5756, 5762, 5770, 5780, 5788, 5794, 5802, + 5821, 5829, 5835, 5843, 5853, 5861, 5867, 5875, 5673, 5681, 5687, 5695, 5705, + 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778, 5786, 5792, 5800, 5822, 5830, + 5836, 5844, 5854, 5862, 5868, 5876, 5895, 5903, 5909, 5917, 5927, 5935, 5941, + 5949, 5975, 5983, 5989, 5997, 6007, 6015, 6021, 6029, 6048, 6056, 6062, 6070, + 6080, 6088, 6094, 6102, 6124, 6132, 6138, 6146, 6156, 6164, 6170, 6178, 6197, + 6205, 6211, 6219, 6229, 6237, 6243, 6251, 6117, 6125, 6131, 6139, 6149, 6157, + 6163, 6171, 6190, 6198, 6204, 6212, 6222, 6230, 6236, 6244, 6266, 6274, 6280, + 6288, 6298, 6306, 6312, 6320, 6339, 6347, 6353, 6361, 6371, 6379, 6385, 6393, + 6419, 6427, 6433, 6441, 6451, 6459, 6465, 6473, 6492, 6500, 6506, 6514, 6524, + 6532, 6538, 6546, 6568, 6576, 6582, 6590, 6600, 6608, 6614, 6622, 6641, 6649, + 6655, 6663, 6673, 6681, 6687, 6695, 3742, 3750, 3756, 3764, 3774, 3782, 3788, + 3796, 3815, 3823, 3829, 3837, 3847, 3855, 3861, 3869, 3891, 3899, 3905, 3913, + 3923, 3931, 3937, 3945, 3964, 3972, 3978, 3986, 3996, 4004, 4010, 4018, 4044, + 4052, 4058, 4066, 4076, 4084, 4090, 4098, 4117, 4125, 4131, 4139, 4149, 4157, + 4163, 4171, 4193, 4201, 4207, 4215, 4225, 4233, 4239, 4247, 4266, 4274, 4280, + 4288, 4298, 4306, 4312, 4320, 4186, 4194, 4200, 4208, 4218, 4226, 4232, 4240, + 4259, 4267, 4273, 4281, 4291, 4299, 4305, 4313, 4335, 4343, 4349, 4357, 4367, + 4375, 4381, 4389, 4408, 4416, 4422, 4430, 4440, 4448, 4454, 4462, 4488, 4496, + 4502, 4510, 4520, 4528, 4534, 4542, 4561, 4569, 4575, 4583, 4593, 4601, 4607, + 4615, 4637, 4645, 4651, 4659, 4669, 4677, 4683, 4691, 4710, 4718, 4724, 4732, + 4742, 4750, 4756, 4764, 4562, 4570, 4576, 4584, 4594, 4602, 4608, 4616, 4635, + 4643, 4649, 4657, 4667, 4675, 4681, 4689, 4711, 4719, 4725, 4733, 4743, 4751, + 4757, 4765, 4784, 4792, 4798, 4806, 4816, 4824, 4830, 4838, 4864, 4872, 4878, + 4886, 4896, 4904, 4910, 4918, 4937, 4945, 4951, 4959, 4969, 4977, 4983, 4991, + 5013, 5021, 5027, 5035, 5045, 5053, 5059, 5067, 5086, 5094, 5100, 5108, 5118, + 5126, 5132, 5140, 5006, 5014, 5020, 5028, 5038, 5046, 5052, 5060, 5079, 5087, + 5093, 5101, 5111, 5119, 5125, 5133, 5155, 5163, 5169, 5177, 5187, 5195, 5201, + 5209, 5228, 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5308, 5316, 5322, 5330, + 5340, 5348, 5354, 5362, 5381, 5389, 5395, 5403, 5413, 5421, 5427, 5435, 5457, + 5465, 5471, 5479, 5489, 5497, 5503, 5511, 5530, 5538, 5544, 5552, 5562, 5570, + 5576, 5584, 4853, 4861, 4867, 4875, 4885, 4893, 4899, 4907, 4926, 4934, 4940, + 4948, 4958, 4966, 4972, 4980, 5002, 5010, 5016, 5024, 5034, 5042, 5048, 5056, + 5075, 5083, 5089, 5097, 5107, 5115, 5121, 5129, 5155, 5163, 5169, 5177, 5187, + 5195, 5201, 5209, 5228, 5236, 5242, 5250, 5260, 5268, 5274, 5282, 5304, 5312, + 5318, 5326, 5336, 5344, 5350, 5358, 5377, 5385, 5391, 5399, 5409, 5417, 5423, + 5431, 5297, 5305, 5311, 5319, 5329, 5337, 5343, 5351, 5370, 5378, 5384, 5392, + 5402, 5410, 5416, 5424, 5446, 5454, 5460, 5468, 5478, 5486, 5492, 5500, 5519, + 5527, 5533, 5541, 5551, 5559, 5565, 5573, 5599, 5607, 5613, 5621, 5631, 5639, + 5645, 5653, 5672, 5680, 5686, 5694, 5704, 5712, 5718, 5726, 5748, 5756, 5762, + 5770, 5780, 5788, 5794, 5802, 5821, 5829, 5835, 5843, 5853, 5861, 5867, 5875, + 5673, 5681, 5687, 5695, 5705, 5713, 5719, 5727, 5746, 5754, 5760, 5768, 5778, + 5786, 5792, 5800, 5822, 5830, 5836, 5844, 5854, 5862, 5868, 5876, 5895, 5903, + 5909, 5917, 5927, 5935, 5941, 5949, 5975, 5983, 5989, 5997, 6007, 6015, 6021, + 6029, 6048, 6056, 6062, 6070, 6080, 6088, 6094, 6102, 6124, 6132, 6138, 6146, + 6156, 6164, 6170, 6178, 6197, 6205, 6211, 6219, 6229, 6237, 6243, 6251, 6117, + 6125, 6131, 6139, 6149, 6157, 6163, 6171, 6190, 6198, 6204, 6212, 6222, 6230, + 6236, 6244, 6266, 6274, 6280, 6288, 6298, 6306, 6312, 6320, 6339, 6347, 6353, + 6361, 6371, 6379, 6385, 6393, 6419, 6427, 6433, 6441, 6451, 6459, 6465, 6473, + 6492, 6500, 6506, 6514, 6524, 6532, 6538, 6546, 6568, 6576, 6582, 6590, 6600, + 6608, 6614, 6622, 6641, 6649, 6655, 6663, 6673, 6681, 6687, 6695, 5788, 5796, + 5802, 5810, 5820, 5828, 5834, 5842, 5861, 5869, 5875, 5883, 5893, 5901, 5907, + 5915, 5937, 5945, 5951, 5959, 5969, 5977, 5983, 5991, 6010, 6018, 6024, 6032, + 6042, 6050, 6056, 6064, 6090, 6098, 6104, 6112, 6122, 6130, 6136, 6144, 6163, + 6171, 6177, 6185, 6195, 6203, 6209, 6217, 6239, 6247, 6253, 6261, 6271, 6279, + 6285, 6293, 6312, 6320, 6326, 6334, 6344, 6352, 6358, 6366, 6232, 6240, 6246, + 6254, 6264, 6272, 6278, 6286, 6305, 6313, 6319, 6327, 6337, 6345, 6351, 6359, + 6381, 6389, 6395, 6403, 6413, 6421, 6427, 6435, 6454, 6462, 6468, 6476, 6486, + 6494, 6500, 6508, 6534, 6542, 6548, 6556, 6566, 6574, 6580, 6588, 6607, 6615, + 6621, 6629, 6639, 6647, 6653, 6661, 6683, 6691, 6697, 6705, 6715, 6723, 6729, + 6737, 6756, 6764, 6770, 6778, 6788, 6796, 6802, 6810, 6608, 6616, 6622, 6630, + 6640, 6648, 6654, 6662, 6681, 6689, 6695, 6703, 6713, 6721, 6727, 6735, 6757, + 6765, 6771, 6779, 6789, 6797, 6803, 6811, 6830, 6838, 6844, 6852, 6862, 6870, + 6876, 6884, 6910, 6918, 6924, 6932, 6942, 6950, 6956, 6964, 6983, 6991, 6997, + 7005, 7015, 7023, 7029, 7037, 7059, 7067, 7073, 7081, 7091, 7099, 7105, 7113, + 7132, 7140, 7146, 7154, 7164, 7172, 7178, 7186, 7052, 7060, 7066, 7074, 7084, + 7092, 7098, 7106, 7125, 7133, 7139, 7147, 7157, 7165, 7171, 7179, 7201, 7209, + 7215, 7223, 7233, 7241, 7247, 7255, 7274, 7282, 7288, 7296, 7306, 7314, 7320, + 7328, 7354, 7362, 7368, 7376, 7386, 7394, 7400, 7408, 7427, 7435, 7441, 7449, + 7459, 7467, 7473, 7481, 7503, 7511, 7517, 7525, 7535, 7543, 7549, 7557, 7576, + 7584, 7590, 7598, 7608, 7616, 7622, 7630, 6899, 6907, 6913, 6921, 6931, 6939, + 6945, 6953, 6972, 6980, 6986, 6994, 7004, 7012, 7018, 7026, 7048, 7056, 7062, + 7070, 7080, 7088, 7094, 7102, 7121, 7129, 7135, 7143, 7153, 7161, 7167, 7175, + 7201, 7209, 7215, 7223, 7233, 7241, 7247, 7255, 7274, 7282, 7288, 7296, 7306, + 7314, 7320, 7328, 7350, 7358, 7364, 7372, 7382, 7390, 7396, 7404, 7423, 7431, + 7437, 7445, 7455, 7463, 7469, 7477, 7343, 7351, 7357, 7365, 7375, 7383, 7389, + 7397, 7416, 7424, 7430, 7438, 7448, 7456, 7462, 7470, 7492, 7500, 7506, 7514, + 7524, 7532, 7538, 7546, 7565, 7573, 7579, 7587, 7597, 7605, 7611, 7619, 7645, + 7653, 7659, 7667, 7677, 7685, 7691, 7699, 7718, 7726, 7732, 7740, 7750, 7758, + 7764, 7772, 7794, 7802, 7808, 7816, 7826, 7834, 7840, 7848, 7867, 7875, 7881, + 7889, 7899, 7907, 7913, 7921, 7719, 7727, 7733, 7741, 7751, 7759, 7765, 7773, + 7792, 7800, 7806, 7814, 7824, 7832, 7838, 7846, 7868, 7876, 7882, 7890, 7900, + 7908, 7914, 7922, 7941, 7949, 7955, 7963, 7973, 7981, 7987, 7995, 8021, 8029, + 8035, 8043, 8053, 8061, 8067, 8075, 8094, 8102, 8108, 8116, 8126, 8134, 8140, + 8148, 8170, 8178, 8184, 8192, 8202, 8210, 8216, 8224, 8243, 8251, 8257, 8265, + 8275 }; #ifdef __cplusplus diff --git a/libs/libvpx/vp8/encoder/dct_value_tokens.h b/libs/libvpx/vp8/encoder/dct_value_tokens.h index c2aadefca7..0597deab2d 100644 --- a/libs/libvpx/vp8/encoder/dct_value_tokens.h +++ b/libs/libvpx/vp8/encoder/dct_value_tokens.h @@ -18,691 +18,827 @@ extern "C" { /* Generated file, included by tokenize.c */ /* Values generated by fill_value_tokens() */ -static const TOKENVALUE dct_value_tokens[2048*2] = -{ - {10, 3963}, {10, 3961}, {10, 3959}, {10, 3957}, {10, 3955}, {10, 3953}, - {10, 3951}, {10, 3949}, {10, 3947}, {10, 3945}, {10, 3943}, {10, 3941}, - {10, 3939}, {10, 3937}, {10, 3935}, {10, 3933}, {10, 3931}, {10, 3929}, - {10, 3927}, {10, 3925}, {10, 3923}, {10, 3921}, {10, 3919}, {10, 3917}, - {10, 3915}, {10, 3913}, {10, 3911}, {10, 3909}, {10, 3907}, {10, 3905}, - {10, 3903}, {10, 3901}, {10, 3899}, {10, 3897}, {10, 3895}, {10, 3893}, - {10, 3891}, {10, 3889}, {10, 3887}, {10, 3885}, {10, 3883}, {10, 3881}, - {10, 3879}, {10, 3877}, {10, 3875}, {10, 3873}, {10, 3871}, {10, 3869}, - {10, 3867}, {10, 3865}, {10, 3863}, {10, 3861}, {10, 3859}, {10, 3857}, - {10, 3855}, {10, 3853}, {10, 3851}, {10, 3849}, {10, 3847}, {10, 3845}, - {10, 3843}, {10, 3841}, {10, 3839}, {10, 3837}, {10, 3835}, {10, 3833}, - {10, 3831}, {10, 3829}, {10, 3827}, {10, 3825}, {10, 3823}, {10, 3821}, - {10, 3819}, {10, 3817}, {10, 3815}, {10, 3813}, {10, 3811}, {10, 3809}, - {10, 3807}, {10, 3805}, {10, 3803}, {10, 3801}, {10, 3799}, {10, 3797}, - {10, 3795}, {10, 3793}, {10, 3791}, {10, 3789}, {10, 3787}, {10, 3785}, - {10, 3783}, {10, 3781}, {10, 3779}, {10, 3777}, {10, 3775}, {10, 3773}, - {10, 3771}, {10, 3769}, {10, 3767}, {10, 3765}, {10, 3763}, {10, 3761}, - {10, 3759}, {10, 3757}, {10, 3755}, {10, 3753}, {10, 3751}, {10, 3749}, - {10, 3747}, {10, 3745}, {10, 3743}, {10, 3741}, {10, 3739}, {10, 3737}, - {10, 3735}, {10, 3733}, {10, 3731}, {10, 3729}, {10, 3727}, {10, 3725}, - {10, 3723}, {10, 3721}, {10, 3719}, {10, 3717}, {10, 3715}, {10, 3713}, - {10, 3711}, {10, 3709}, {10, 3707}, {10, 3705}, {10, 3703}, {10, 3701}, - {10, 3699}, {10, 3697}, {10, 3695}, {10, 3693}, {10, 3691}, {10, 3689}, - {10, 3687}, {10, 3685}, {10, 3683}, {10, 3681}, {10, 3679}, {10, 3677}, - {10, 3675}, {10, 3673}, {10, 3671}, {10, 3669}, {10, 3667}, {10, 3665}, - {10, 3663}, {10, 3661}, {10, 3659}, {10, 3657}, {10, 3655}, {10, 3653}, - {10, 3651}, {10, 3649}, {10, 3647}, {10, 3645}, {10, 3643}, {10, 3641}, - {10, 3639}, {10, 3637}, {10, 3635}, {10, 3633}, {10, 3631}, {10, 3629}, - {10, 3627}, {10, 3625}, {10, 3623}, {10, 3621}, {10, 3619}, {10, 3617}, - {10, 3615}, {10, 3613}, {10, 3611}, {10, 3609}, {10, 3607}, {10, 3605}, - {10, 3603}, {10, 3601}, {10, 3599}, {10, 3597}, {10, 3595}, {10, 3593}, - {10, 3591}, {10, 3589}, {10, 3587}, {10, 3585}, {10, 3583}, {10, 3581}, - {10, 3579}, {10, 3577}, {10, 3575}, {10, 3573}, {10, 3571}, {10, 3569}, - {10, 3567}, {10, 3565}, {10, 3563}, {10, 3561}, {10, 3559}, {10, 3557}, - {10, 3555}, {10, 3553}, {10, 3551}, {10, 3549}, {10, 3547}, {10, 3545}, - {10, 3543}, {10, 3541}, {10, 3539}, {10, 3537}, {10, 3535}, {10, 3533}, - {10, 3531}, {10, 3529}, {10, 3527}, {10, 3525}, {10, 3523}, {10, 3521}, - {10, 3519}, {10, 3517}, {10, 3515}, {10, 3513}, {10, 3511}, {10, 3509}, - {10, 3507}, {10, 3505}, {10, 3503}, {10, 3501}, {10, 3499}, {10, 3497}, - {10, 3495}, {10, 3493}, {10, 3491}, {10, 3489}, {10, 3487}, {10, 3485}, - {10, 3483}, {10, 3481}, {10, 3479}, {10, 3477}, {10, 3475}, {10, 3473}, - {10, 3471}, {10, 3469}, {10, 3467}, {10, 3465}, {10, 3463}, {10, 3461}, - {10, 3459}, {10, 3457}, {10, 3455}, {10, 3453}, {10, 3451}, {10, 3449}, - {10, 3447}, {10, 3445}, {10, 3443}, {10, 3441}, {10, 3439}, {10, 3437}, - {10, 3435}, {10, 3433}, {10, 3431}, {10, 3429}, {10, 3427}, {10, 3425}, - {10, 3423}, {10, 3421}, {10, 3419}, {10, 3417}, {10, 3415}, {10, 3413}, - {10, 3411}, {10, 3409}, {10, 3407}, {10, 3405}, {10, 3403}, {10, 3401}, - {10, 3399}, {10, 3397}, {10, 3395}, {10, 3393}, {10, 3391}, {10, 3389}, - {10, 3387}, {10, 3385}, {10, 3383}, {10, 3381}, {10, 3379}, {10, 3377}, - {10, 3375}, {10, 3373}, {10, 3371}, {10, 3369}, {10, 3367}, {10, 3365}, - {10, 3363}, {10, 3361}, {10, 3359}, {10, 3357}, {10, 3355}, {10, 3353}, - {10, 3351}, {10, 3349}, {10, 3347}, {10, 3345}, {10, 3343}, {10, 3341}, - {10, 3339}, {10, 3337}, {10, 3335}, {10, 3333}, {10, 3331}, {10, 3329}, - {10, 3327}, {10, 3325}, {10, 3323}, {10, 3321}, {10, 3319}, {10, 3317}, - {10, 3315}, {10, 3313}, {10, 3311}, {10, 3309}, {10, 3307}, {10, 3305}, - {10, 3303}, {10, 3301}, {10, 3299}, {10, 3297}, {10, 3295}, {10, 3293}, - {10, 3291}, {10, 3289}, {10, 3287}, {10, 3285}, {10, 3283}, {10, 3281}, - {10, 3279}, {10, 3277}, {10, 3275}, {10, 3273}, {10, 3271}, {10, 3269}, - {10, 3267}, {10, 3265}, {10, 3263}, {10, 3261}, {10, 3259}, {10, 3257}, - {10, 3255}, {10, 3253}, {10, 3251}, {10, 3249}, {10, 3247}, {10, 3245}, - {10, 3243}, {10, 3241}, {10, 3239}, {10, 3237}, {10, 3235}, {10, 3233}, - {10, 3231}, {10, 3229}, {10, 3227}, {10, 3225}, {10, 3223}, {10, 3221}, - {10, 3219}, {10, 3217}, {10, 3215}, {10, 3213}, {10, 3211}, {10, 3209}, - {10, 3207}, {10, 3205}, {10, 3203}, {10, 3201}, {10, 3199}, {10, 3197}, - {10, 3195}, {10, 3193}, {10, 3191}, {10, 3189}, {10, 3187}, {10, 3185}, - {10, 3183}, {10, 3181}, {10, 3179}, {10, 3177}, {10, 3175}, {10, 3173}, - {10, 3171}, {10, 3169}, {10, 3167}, {10, 3165}, {10, 3163}, {10, 3161}, - {10, 3159}, {10, 3157}, {10, 3155}, {10, 3153}, {10, 3151}, {10, 3149}, - {10, 3147}, {10, 3145}, {10, 3143}, {10, 3141}, {10, 3139}, {10, 3137}, - {10, 3135}, {10, 3133}, {10, 3131}, {10, 3129}, {10, 3127}, {10, 3125}, - {10, 3123}, {10, 3121}, {10, 3119}, {10, 3117}, {10, 3115}, {10, 3113}, - {10, 3111}, {10, 3109}, {10, 3107}, {10, 3105}, {10, 3103}, {10, 3101}, - {10, 3099}, {10, 3097}, {10, 3095}, {10, 3093}, {10, 3091}, {10, 3089}, - {10, 3087}, {10, 3085}, {10, 3083}, {10, 3081}, {10, 3079}, {10, 3077}, - {10, 3075}, {10, 3073}, {10, 3071}, {10, 3069}, {10, 3067}, {10, 3065}, - {10, 3063}, {10, 3061}, {10, 3059}, {10, 3057}, {10, 3055}, {10, 3053}, - {10, 3051}, {10, 3049}, {10, 3047}, {10, 3045}, {10, 3043}, {10, 3041}, - {10, 3039}, {10, 3037}, {10, 3035}, {10, 3033}, {10, 3031}, {10, 3029}, - {10, 3027}, {10, 3025}, {10, 3023}, {10, 3021}, {10, 3019}, {10, 3017}, - {10, 3015}, {10, 3013}, {10, 3011}, {10, 3009}, {10, 3007}, {10, 3005}, - {10, 3003}, {10, 3001}, {10, 2999}, {10, 2997}, {10, 2995}, {10, 2993}, - {10, 2991}, {10, 2989}, {10, 2987}, {10, 2985}, {10, 2983}, {10, 2981}, - {10, 2979}, {10, 2977}, {10, 2975}, {10, 2973}, {10, 2971}, {10, 2969}, - {10, 2967}, {10, 2965}, {10, 2963}, {10, 2961}, {10, 2959}, {10, 2957}, - {10, 2955}, {10, 2953}, {10, 2951}, {10, 2949}, {10, 2947}, {10, 2945}, - {10, 2943}, {10, 2941}, {10, 2939}, {10, 2937}, {10, 2935}, {10, 2933}, - {10, 2931}, {10, 2929}, {10, 2927}, {10, 2925}, {10, 2923}, {10, 2921}, - {10, 2919}, {10, 2917}, {10, 2915}, {10, 2913}, {10, 2911}, {10, 2909}, - {10, 2907}, {10, 2905}, {10, 2903}, {10, 2901}, {10, 2899}, {10, 2897}, - {10, 2895}, {10, 2893}, {10, 2891}, {10, 2889}, {10, 2887}, {10, 2885}, - {10, 2883}, {10, 2881}, {10, 2879}, {10, 2877}, {10, 2875}, {10, 2873}, - {10, 2871}, {10, 2869}, {10, 2867}, {10, 2865}, {10, 2863}, {10, 2861}, - {10, 2859}, {10, 2857}, {10, 2855}, {10, 2853}, {10, 2851}, {10, 2849}, - {10, 2847}, {10, 2845}, {10, 2843}, {10, 2841}, {10, 2839}, {10, 2837}, - {10, 2835}, {10, 2833}, {10, 2831}, {10, 2829}, {10, 2827}, {10, 2825}, - {10, 2823}, {10, 2821}, {10, 2819}, {10, 2817}, {10, 2815}, {10, 2813}, - {10, 2811}, {10, 2809}, {10, 2807}, {10, 2805}, {10, 2803}, {10, 2801}, - {10, 2799}, {10, 2797}, {10, 2795}, {10, 2793}, {10, 2791}, {10, 2789}, - {10, 2787}, {10, 2785}, {10, 2783}, {10, 2781}, {10, 2779}, {10, 2777}, - {10, 2775}, {10, 2773}, {10, 2771}, {10, 2769}, {10, 2767}, {10, 2765}, - {10, 2763}, {10, 2761}, {10, 2759}, {10, 2757}, {10, 2755}, {10, 2753}, - {10, 2751}, {10, 2749}, {10, 2747}, {10, 2745}, {10, 2743}, {10, 2741}, - {10, 2739}, {10, 2737}, {10, 2735}, {10, 2733}, {10, 2731}, {10, 2729}, - {10, 2727}, {10, 2725}, {10, 2723}, {10, 2721}, {10, 2719}, {10, 2717}, - {10, 2715}, {10, 2713}, {10, 2711}, {10, 2709}, {10, 2707}, {10, 2705}, - {10, 2703}, {10, 2701}, {10, 2699}, {10, 2697}, {10, 2695}, {10, 2693}, - {10, 2691}, {10, 2689}, {10, 2687}, {10, 2685}, {10, 2683}, {10, 2681}, - {10, 2679}, {10, 2677}, {10, 2675}, {10, 2673}, {10, 2671}, {10, 2669}, - {10, 2667}, {10, 2665}, {10, 2663}, {10, 2661}, {10, 2659}, {10, 2657}, - {10, 2655}, {10, 2653}, {10, 2651}, {10, 2649}, {10, 2647}, {10, 2645}, - {10, 2643}, {10, 2641}, {10, 2639}, {10, 2637}, {10, 2635}, {10, 2633}, - {10, 2631}, {10, 2629}, {10, 2627}, {10, 2625}, {10, 2623}, {10, 2621}, - {10, 2619}, {10, 2617}, {10, 2615}, {10, 2613}, {10, 2611}, {10, 2609}, - {10, 2607}, {10, 2605}, {10, 2603}, {10, 2601}, {10, 2599}, {10, 2597}, - {10, 2595}, {10, 2593}, {10, 2591}, {10, 2589}, {10, 2587}, {10, 2585}, - {10, 2583}, {10, 2581}, {10, 2579}, {10, 2577}, {10, 2575}, {10, 2573}, - {10, 2571}, {10, 2569}, {10, 2567}, {10, 2565}, {10, 2563}, {10, 2561}, - {10, 2559}, {10, 2557}, {10, 2555}, {10, 2553}, {10, 2551}, {10, 2549}, - {10, 2547}, {10, 2545}, {10, 2543}, {10, 2541}, {10, 2539}, {10, 2537}, - {10, 2535}, {10, 2533}, {10, 2531}, {10, 2529}, {10, 2527}, {10, 2525}, - {10, 2523}, {10, 2521}, {10, 2519}, {10, 2517}, {10, 2515}, {10, 2513}, - {10, 2511}, {10, 2509}, {10, 2507}, {10, 2505}, {10, 2503}, {10, 2501}, - {10, 2499}, {10, 2497}, {10, 2495}, {10, 2493}, {10, 2491}, {10, 2489}, - {10, 2487}, {10, 2485}, {10, 2483}, {10, 2481}, {10, 2479}, {10, 2477}, - {10, 2475}, {10, 2473}, {10, 2471}, {10, 2469}, {10, 2467}, {10, 2465}, - {10, 2463}, {10, 2461}, {10, 2459}, {10, 2457}, {10, 2455}, {10, 2453}, - {10, 2451}, {10, 2449}, {10, 2447}, {10, 2445}, {10, 2443}, {10, 2441}, - {10, 2439}, {10, 2437}, {10, 2435}, {10, 2433}, {10, 2431}, {10, 2429}, - {10, 2427}, {10, 2425}, {10, 2423}, {10, 2421}, {10, 2419}, {10, 2417}, - {10, 2415}, {10, 2413}, {10, 2411}, {10, 2409}, {10, 2407}, {10, 2405}, - {10, 2403}, {10, 2401}, {10, 2399}, {10, 2397}, {10, 2395}, {10, 2393}, - {10, 2391}, {10, 2389}, {10, 2387}, {10, 2385}, {10, 2383}, {10, 2381}, - {10, 2379}, {10, 2377}, {10, 2375}, {10, 2373}, {10, 2371}, {10, 2369}, - {10, 2367}, {10, 2365}, {10, 2363}, {10, 2361}, {10, 2359}, {10, 2357}, - {10, 2355}, {10, 2353}, {10, 2351}, {10, 2349}, {10, 2347}, {10, 2345}, - {10, 2343}, {10, 2341}, {10, 2339}, {10, 2337}, {10, 2335}, {10, 2333}, - {10, 2331}, {10, 2329}, {10, 2327}, {10, 2325}, {10, 2323}, {10, 2321}, - {10, 2319}, {10, 2317}, {10, 2315}, {10, 2313}, {10, 2311}, {10, 2309}, - {10, 2307}, {10, 2305}, {10, 2303}, {10, 2301}, {10, 2299}, {10, 2297}, - {10, 2295}, {10, 2293}, {10, 2291}, {10, 2289}, {10, 2287}, {10, 2285}, - {10, 2283}, {10, 2281}, {10, 2279}, {10, 2277}, {10, 2275}, {10, 2273}, - {10, 2271}, {10, 2269}, {10, 2267}, {10, 2265}, {10, 2263}, {10, 2261}, - {10, 2259}, {10, 2257}, {10, 2255}, {10, 2253}, {10, 2251}, {10, 2249}, - {10, 2247}, {10, 2245}, {10, 2243}, {10, 2241}, {10, 2239}, {10, 2237}, - {10, 2235}, {10, 2233}, {10, 2231}, {10, 2229}, {10, 2227}, {10, 2225}, - {10, 2223}, {10, 2221}, {10, 2219}, {10, 2217}, {10, 2215}, {10, 2213}, - {10, 2211}, {10, 2209}, {10, 2207}, {10, 2205}, {10, 2203}, {10, 2201}, - {10, 2199}, {10, 2197}, {10, 2195}, {10, 2193}, {10, 2191}, {10, 2189}, - {10, 2187}, {10, 2185}, {10, 2183}, {10, 2181}, {10, 2179}, {10, 2177}, - {10, 2175}, {10, 2173}, {10, 2171}, {10, 2169}, {10, 2167}, {10, 2165}, - {10, 2163}, {10, 2161}, {10, 2159}, {10, 2157}, {10, 2155}, {10, 2153}, - {10, 2151}, {10, 2149}, {10, 2147}, {10, 2145}, {10, 2143}, {10, 2141}, - {10, 2139}, {10, 2137}, {10, 2135}, {10, 2133}, {10, 2131}, {10, 2129}, - {10, 2127}, {10, 2125}, {10, 2123}, {10, 2121}, {10, 2119}, {10, 2117}, - {10, 2115}, {10, 2113}, {10, 2111}, {10, 2109}, {10, 2107}, {10, 2105}, - {10, 2103}, {10, 2101}, {10, 2099}, {10, 2097}, {10, 2095}, {10, 2093}, - {10, 2091}, {10, 2089}, {10, 2087}, {10, 2085}, {10, 2083}, {10, 2081}, - {10, 2079}, {10, 2077}, {10, 2075}, {10, 2073}, {10, 2071}, {10, 2069}, - {10, 2067}, {10, 2065}, {10, 2063}, {10, 2061}, {10, 2059}, {10, 2057}, - {10, 2055}, {10, 2053}, {10, 2051}, {10, 2049}, {10, 2047}, {10, 2045}, - {10, 2043}, {10, 2041}, {10, 2039}, {10, 2037}, {10, 2035}, {10, 2033}, - {10, 2031}, {10, 2029}, {10, 2027}, {10, 2025}, {10, 2023}, {10, 2021}, - {10, 2019}, {10, 2017}, {10, 2015}, {10, 2013}, {10, 2011}, {10, 2009}, - {10, 2007}, {10, 2005}, {10, 2003}, {10, 2001}, {10, 1999}, {10, 1997}, - {10, 1995}, {10, 1993}, {10, 1991}, {10, 1989}, {10, 1987}, {10, 1985}, - {10, 1983}, {10, 1981}, {10, 1979}, {10, 1977}, {10, 1975}, {10, 1973}, - {10, 1971}, {10, 1969}, {10, 1967}, {10, 1965}, {10, 1963}, {10, 1961}, - {10, 1959}, {10, 1957}, {10, 1955}, {10, 1953}, {10, 1951}, {10, 1949}, - {10, 1947}, {10, 1945}, {10, 1943}, {10, 1941}, {10, 1939}, {10, 1937}, - {10, 1935}, {10, 1933}, {10, 1931}, {10, 1929}, {10, 1927}, {10, 1925}, - {10, 1923}, {10, 1921}, {10, 1919}, {10, 1917}, {10, 1915}, {10, 1913}, - {10, 1911}, {10, 1909}, {10, 1907}, {10, 1905}, {10, 1903}, {10, 1901}, - {10, 1899}, {10, 1897}, {10, 1895}, {10, 1893}, {10, 1891}, {10, 1889}, - {10, 1887}, {10, 1885}, {10, 1883}, {10, 1881}, {10, 1879}, {10, 1877}, - {10, 1875}, {10, 1873}, {10, 1871}, {10, 1869}, {10, 1867}, {10, 1865}, - {10, 1863}, {10, 1861}, {10, 1859}, {10, 1857}, {10, 1855}, {10, 1853}, - {10, 1851}, {10, 1849}, {10, 1847}, {10, 1845}, {10, 1843}, {10, 1841}, - {10, 1839}, {10, 1837}, {10, 1835}, {10, 1833}, {10, 1831}, {10, 1829}, - {10, 1827}, {10, 1825}, {10, 1823}, {10, 1821}, {10, 1819}, {10, 1817}, - {10, 1815}, {10, 1813}, {10, 1811}, {10, 1809}, {10, 1807}, {10, 1805}, - {10, 1803}, {10, 1801}, {10, 1799}, {10, 1797}, {10, 1795}, {10, 1793}, - {10, 1791}, {10, 1789}, {10, 1787}, {10, 1785}, {10, 1783}, {10, 1781}, - {10, 1779}, {10, 1777}, {10, 1775}, {10, 1773}, {10, 1771}, {10, 1769}, - {10, 1767}, {10, 1765}, {10, 1763}, {10, 1761}, {10, 1759}, {10, 1757}, - {10, 1755}, {10, 1753}, {10, 1751}, {10, 1749}, {10, 1747}, {10, 1745}, - {10, 1743}, {10, 1741}, {10, 1739}, {10, 1737}, {10, 1735}, {10, 1733}, - {10, 1731}, {10, 1729}, {10, 1727}, {10, 1725}, {10, 1723}, {10, 1721}, - {10, 1719}, {10, 1717}, {10, 1715}, {10, 1713}, {10, 1711}, {10, 1709}, - {10, 1707}, {10, 1705}, {10, 1703}, {10, 1701}, {10, 1699}, {10, 1697}, - {10, 1695}, {10, 1693}, {10, 1691}, {10, 1689}, {10, 1687}, {10, 1685}, - {10, 1683}, {10, 1681}, {10, 1679}, {10, 1677}, {10, 1675}, {10, 1673}, - {10, 1671}, {10, 1669}, {10, 1667}, {10, 1665}, {10, 1663}, {10, 1661}, - {10, 1659}, {10, 1657}, {10, 1655}, {10, 1653}, {10, 1651}, {10, 1649}, - {10, 1647}, {10, 1645}, {10, 1643}, {10, 1641}, {10, 1639}, {10, 1637}, - {10, 1635}, {10, 1633}, {10, 1631}, {10, 1629}, {10, 1627}, {10, 1625}, - {10, 1623}, {10, 1621}, {10, 1619}, {10, 1617}, {10, 1615}, {10, 1613}, - {10, 1611}, {10, 1609}, {10, 1607}, {10, 1605}, {10, 1603}, {10, 1601}, - {10, 1599}, {10, 1597}, {10, 1595}, {10, 1593}, {10, 1591}, {10, 1589}, - {10, 1587}, {10, 1585}, {10, 1583}, {10, 1581}, {10, 1579}, {10, 1577}, - {10, 1575}, {10, 1573}, {10, 1571}, {10, 1569}, {10, 1567}, {10, 1565}, - {10, 1563}, {10, 1561}, {10, 1559}, {10, 1557}, {10, 1555}, {10, 1553}, - {10, 1551}, {10, 1549}, {10, 1547}, {10, 1545}, {10, 1543}, {10, 1541}, - {10, 1539}, {10, 1537}, {10, 1535}, {10, 1533}, {10, 1531}, {10, 1529}, - {10, 1527}, {10, 1525}, {10, 1523}, {10, 1521}, {10, 1519}, {10, 1517}, - {10, 1515}, {10, 1513}, {10, 1511}, {10, 1509}, {10, 1507}, {10, 1505}, - {10, 1503}, {10, 1501}, {10, 1499}, {10, 1497}, {10, 1495}, {10, 1493}, - {10, 1491}, {10, 1489}, {10, 1487}, {10, 1485}, {10, 1483}, {10, 1481}, - {10, 1479}, {10, 1477}, {10, 1475}, {10, 1473}, {10, 1471}, {10, 1469}, - {10, 1467}, {10, 1465}, {10, 1463}, {10, 1461}, {10, 1459}, {10, 1457}, - {10, 1455}, {10, 1453}, {10, 1451}, {10, 1449}, {10, 1447}, {10, 1445}, - {10, 1443}, {10, 1441}, {10, 1439}, {10, 1437}, {10, 1435}, {10, 1433}, - {10, 1431}, {10, 1429}, {10, 1427}, {10, 1425}, {10, 1423}, {10, 1421}, - {10, 1419}, {10, 1417}, {10, 1415}, {10, 1413}, {10, 1411}, {10, 1409}, - {10, 1407}, {10, 1405}, {10, 1403}, {10, 1401}, {10, 1399}, {10, 1397}, - {10, 1395}, {10, 1393}, {10, 1391}, {10, 1389}, {10, 1387}, {10, 1385}, - {10, 1383}, {10, 1381}, {10, 1379}, {10, 1377}, {10, 1375}, {10, 1373}, - {10, 1371}, {10, 1369}, {10, 1367}, {10, 1365}, {10, 1363}, {10, 1361}, - {10, 1359}, {10, 1357}, {10, 1355}, {10, 1353}, {10, 1351}, {10, 1349}, - {10, 1347}, {10, 1345}, {10, 1343}, {10, 1341}, {10, 1339}, {10, 1337}, - {10, 1335}, {10, 1333}, {10, 1331}, {10, 1329}, {10, 1327}, {10, 1325}, - {10, 1323}, {10, 1321}, {10, 1319}, {10, 1317}, {10, 1315}, {10, 1313}, - {10, 1311}, {10, 1309}, {10, 1307}, {10, 1305}, {10, 1303}, {10, 1301}, - {10, 1299}, {10, 1297}, {10, 1295}, {10, 1293}, {10, 1291}, {10, 1289}, - {10, 1287}, {10, 1285}, {10, 1283}, {10, 1281}, {10, 1279}, {10, 1277}, - {10, 1275}, {10, 1273}, {10, 1271}, {10, 1269}, {10, 1267}, {10, 1265}, - {10, 1263}, {10, 1261}, {10, 1259}, {10, 1257}, {10, 1255}, {10, 1253}, - {10, 1251}, {10, 1249}, {10, 1247}, {10, 1245}, {10, 1243}, {10, 1241}, - {10, 1239}, {10, 1237}, {10, 1235}, {10, 1233}, {10, 1231}, {10, 1229}, - {10, 1227}, {10, 1225}, {10, 1223}, {10, 1221}, {10, 1219}, {10, 1217}, - {10, 1215}, {10, 1213}, {10, 1211}, {10, 1209}, {10, 1207}, {10, 1205}, - {10, 1203}, {10, 1201}, {10, 1199}, {10, 1197}, {10, 1195}, {10, 1193}, - {10, 1191}, {10, 1189}, {10, 1187}, {10, 1185}, {10, 1183}, {10, 1181}, - {10, 1179}, {10, 1177}, {10, 1175}, {10, 1173}, {10, 1171}, {10, 1169}, - {10, 1167}, {10, 1165}, {10, 1163}, {10, 1161}, {10, 1159}, {10, 1157}, - {10, 1155}, {10, 1153}, {10, 1151}, {10, 1149}, {10, 1147}, {10, 1145}, - {10, 1143}, {10, 1141}, {10, 1139}, {10, 1137}, {10, 1135}, {10, 1133}, - {10, 1131}, {10, 1129}, {10, 1127}, {10, 1125}, {10, 1123}, {10, 1121}, - {10, 1119}, {10, 1117}, {10, 1115}, {10, 1113}, {10, 1111}, {10, 1109}, - {10, 1107}, {10, 1105}, {10, 1103}, {10, 1101}, {10, 1099}, {10, 1097}, - {10, 1095}, {10, 1093}, {10, 1091}, {10, 1089}, {10, 1087}, {10, 1085}, - {10, 1083}, {10, 1081}, {10, 1079}, {10, 1077}, {10, 1075}, {10, 1073}, - {10, 1071}, {10, 1069}, {10, 1067}, {10, 1065}, {10, 1063}, {10, 1061}, - {10, 1059}, {10, 1057}, {10, 1055}, {10, 1053}, {10, 1051}, {10, 1049}, - {10, 1047}, {10, 1045}, {10, 1043}, {10, 1041}, {10, 1039}, {10, 1037}, - {10, 1035}, {10, 1033}, {10, 1031}, {10, 1029}, {10, 1027}, {10, 1025}, - {10, 1023}, {10, 1021}, {10, 1019}, {10, 1017}, {10, 1015}, {10, 1013}, - {10, 1011}, {10, 1009}, {10, 1007}, {10, 1005}, {10, 1003}, {10, 1001}, - {10, 999}, {10, 997}, {10, 995}, {10, 993}, {10, 991}, {10, 989}, - {10, 987}, {10, 985}, {10, 983}, {10, 981}, {10, 979}, {10, 977}, - {10, 975}, {10, 973}, {10, 971}, {10, 969}, {10, 967}, {10, 965}, - {10, 963}, {10, 961}, {10, 959}, {10, 957}, {10, 955}, {10, 953}, - {10, 951}, {10, 949}, {10, 947}, {10, 945}, {10, 943}, {10, 941}, - {10, 939}, {10, 937}, {10, 935}, {10, 933}, {10, 931}, {10, 929}, - {10, 927}, {10, 925}, {10, 923}, {10, 921}, {10, 919}, {10, 917}, - {10, 915}, {10, 913}, {10, 911}, {10, 909}, {10, 907}, {10, 905}, - {10, 903}, {10, 901}, {10, 899}, {10, 897}, {10, 895}, {10, 893}, - {10, 891}, {10, 889}, {10, 887}, {10, 885}, {10, 883}, {10, 881}, - {10, 879}, {10, 877}, {10, 875}, {10, 873}, {10, 871}, {10, 869}, - {10, 867}, {10, 865}, {10, 863}, {10, 861}, {10, 859}, {10, 857}, - {10, 855}, {10, 853}, {10, 851}, {10, 849}, {10, 847}, {10, 845}, - {10, 843}, {10, 841}, {10, 839}, {10, 837}, {10, 835}, {10, 833}, - {10, 831}, {10, 829}, {10, 827}, {10, 825}, {10, 823}, {10, 821}, - {10, 819}, {10, 817}, {10, 815}, {10, 813}, {10, 811}, {10, 809}, - {10, 807}, {10, 805}, {10, 803}, {10, 801}, {10, 799}, {10, 797}, - {10, 795}, {10, 793}, {10, 791}, {10, 789}, {10, 787}, {10, 785}, - {10, 783}, {10, 781}, {10, 779}, {10, 777}, {10, 775}, {10, 773}, - {10, 771}, {10, 769}, {10, 767}, {10, 765}, {10, 763}, {10, 761}, - {10, 759}, {10, 757}, {10, 755}, {10, 753}, {10, 751}, {10, 749}, - {10, 747}, {10, 745}, {10, 743}, {10, 741}, {10, 739}, {10, 737}, - {10, 735}, {10, 733}, {10, 731}, {10, 729}, {10, 727}, {10, 725}, - {10, 723}, {10, 721}, {10, 719}, {10, 717}, {10, 715}, {10, 713}, - {10, 711}, {10, 709}, {10, 707}, {10, 705}, {10, 703}, {10, 701}, - {10, 699}, {10, 697}, {10, 695}, {10, 693}, {10, 691}, {10, 689}, - {10, 687}, {10, 685}, {10, 683}, {10, 681}, {10, 679}, {10, 677}, - {10, 675}, {10, 673}, {10, 671}, {10, 669}, {10, 667}, {10, 665}, - {10, 663}, {10, 661}, {10, 659}, {10, 657}, {10, 655}, {10, 653}, - {10, 651}, {10, 649}, {10, 647}, {10, 645}, {10, 643}, {10, 641}, - {10, 639}, {10, 637}, {10, 635}, {10, 633}, {10, 631}, {10, 629}, - {10, 627}, {10, 625}, {10, 623}, {10, 621}, {10, 619}, {10, 617}, - {10, 615}, {10, 613}, {10, 611}, {10, 609}, {10, 607}, {10, 605}, - {10, 603}, {10, 601}, {10, 599}, {10, 597}, {10, 595}, {10, 593}, - {10, 591}, {10, 589}, {10, 587}, {10, 585}, {10, 583}, {10, 581}, - {10, 579}, {10, 577}, {10, 575}, {10, 573}, {10, 571}, {10, 569}, - {10, 567}, {10, 565}, {10, 563}, {10, 561}, {10, 559}, {10, 557}, - {10, 555}, {10, 553}, {10, 551}, {10, 549}, {10, 547}, {10, 545}, - {10, 543}, {10, 541}, {10, 539}, {10, 537}, {10, 535}, {10, 533}, - {10, 531}, {10, 529}, {10, 527}, {10, 525}, {10, 523}, {10, 521}, - {10, 519}, {10, 517}, {10, 515}, {10, 513}, {10, 511}, {10, 509}, - {10, 507}, {10, 505}, {10, 503}, {10, 501}, {10, 499}, {10, 497}, - {10, 495}, {10, 493}, {10, 491}, {10, 489}, {10, 487}, {10, 485}, - {10, 483}, {10, 481}, {10, 479}, {10, 477}, {10, 475}, {10, 473}, - {10, 471}, {10, 469}, {10, 467}, {10, 465}, {10, 463}, {10, 461}, - {10, 459}, {10, 457}, {10, 455}, {10, 453}, {10, 451}, {10, 449}, - {10, 447}, {10, 445}, {10, 443}, {10, 441}, {10, 439}, {10, 437}, - {10, 435}, {10, 433}, {10, 431}, {10, 429}, {10, 427}, {10, 425}, - {10, 423}, {10, 421}, {10, 419}, {10, 417}, {10, 415}, {10, 413}, - {10, 411}, {10, 409}, {10, 407}, {10, 405}, {10, 403}, {10, 401}, - {10, 399}, {10, 397}, {10, 395}, {10, 393}, {10, 391}, {10, 389}, - {10, 387}, {10, 385}, {10, 383}, {10, 381}, {10, 379}, {10, 377}, - {10, 375}, {10, 373}, {10, 371}, {10, 369}, {10, 367}, {10, 365}, - {10, 363}, {10, 361}, {10, 359}, {10, 357}, {10, 355}, {10, 353}, - {10, 351}, {10, 349}, {10, 347}, {10, 345}, {10, 343}, {10, 341}, - {10, 339}, {10, 337}, {10, 335}, {10, 333}, {10, 331}, {10, 329}, - {10, 327}, {10, 325}, {10, 323}, {10, 321}, {10, 319}, {10, 317}, - {10, 315}, {10, 313}, {10, 311}, {10, 309}, {10, 307}, {10, 305}, - {10, 303}, {10, 301}, {10, 299}, {10, 297}, {10, 295}, {10, 293}, - {10, 291}, {10, 289}, {10, 287}, {10, 285}, {10, 283}, {10, 281}, - {10, 279}, {10, 277}, {10, 275}, {10, 273}, {10, 271}, {10, 269}, - {10, 267}, {10, 265}, {10, 263}, {10, 261}, {10, 259}, {10, 257}, - {10, 255}, {10, 253}, {10, 251}, {10, 249}, {10, 247}, {10, 245}, - {10, 243}, {10, 241}, {10, 239}, {10, 237}, {10, 235}, {10, 233}, - {10, 231}, {10, 229}, {10, 227}, {10, 225}, {10, 223}, {10, 221}, - {10, 219}, {10, 217}, {10, 215}, {10, 213}, {10, 211}, {10, 209}, - {10, 207}, {10, 205}, {10, 203}, {10, 201}, {10, 199}, {10, 197}, - {10, 195}, {10, 193}, {10, 191}, {10, 189}, {10, 187}, {10, 185}, - {10, 183}, {10, 181}, {10, 179}, {10, 177}, {10, 175}, {10, 173}, - {10, 171}, {10, 169}, {10, 167}, {10, 165}, {10, 163}, {10, 161}, - {10, 159}, {10, 157}, {10, 155}, {10, 153}, {10, 151}, {10, 149}, - {10, 147}, {10, 145}, {10, 143}, {10, 141}, {10, 139}, {10, 137}, - {10, 135}, {10, 133}, {10, 131}, {10, 129}, {10, 127}, {10, 125}, - {10, 123}, {10, 121}, {10, 119}, {10, 117}, {10, 115}, {10, 113}, - {10, 111}, {10, 109}, {10, 107}, {10, 105}, {10, 103}, {10, 101}, - {10, 99}, {10, 97}, {10, 95}, {10, 93}, {10, 91}, {10, 89}, - {10, 87}, {10, 85}, {10, 83}, {10, 81}, {10, 79}, {10, 77}, - {10, 75}, {10, 73}, {10, 71}, {10, 69}, {10, 67}, {10, 65}, - {10, 63}, {10, 61}, {10, 59}, {10, 57}, {10, 55}, {10, 53}, - {10, 51}, {10, 49}, {10, 47}, {10, 45}, {10, 43}, {10, 41}, - {10, 39}, {10, 37}, {10, 35}, {10, 33}, {10, 31}, {10, 29}, - {10, 27}, {10, 25}, {10, 23}, {10, 21}, {10, 19}, {10, 17}, - {10, 15}, {10, 13}, {10, 11}, {10, 9}, {10, 7}, {10, 5}, - {10, 3}, {10, 1}, {9, 63}, {9, 61}, {9, 59}, {9, 57}, - {9, 55}, {9, 53}, {9, 51}, {9, 49}, {9, 47}, {9, 45}, - {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33}, - {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, - {9, 19}, {9, 17}, {9, 15}, {9, 13}, {9, 11}, {9, 9}, - {9, 7}, {9, 5}, {9, 3}, {9, 1}, {8, 31}, {8, 29}, - {8, 27}, {8, 25}, {8, 23}, {8, 21}, {8, 19}, {8, 17}, - {8, 15}, {8, 13}, {8, 11}, {8, 9}, {8, 7}, {8, 5}, - {8, 3}, {8, 1}, {7, 15}, {7, 13}, {7, 11}, {7, 9}, - {7, 7}, {7, 5}, {7, 3}, {7, 1}, {6, 7}, {6, 5}, - {6, 3}, {6, 1}, {5, 3}, {5, 1}, {4, 1}, {3, 1}, - {2, 1}, {1, 1}, {0, 0}, {1, 0}, {2, 0}, {3, 0}, - {4, 0}, {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, - {6, 6}, {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, - {7, 10}, {7, 12}, {7, 14}, {8, 0}, {8, 2}, {8, 4}, - {8, 6}, {8, 8}, {8, 10}, {8, 12}, {8, 14}, {8, 16}, - {8, 18}, {8, 20}, {8, 22}, {8, 24}, {8, 26}, {8, 28}, - {8, 30}, {9, 0}, {9, 2}, {9, 4}, {9, 6}, {9, 8}, - {9, 10}, {9, 12}, {9, 14}, {9, 16}, {9, 18}, {9, 20}, - {9, 22}, {9, 24}, {9, 26}, {9, 28}, {9, 30}, {9, 32}, - {9, 34}, {9, 36}, {9, 38}, {9, 40}, {9, 42}, {9, 44}, - {9, 46}, {9, 48}, {9, 50}, {9, 52}, {9, 54}, {9, 56}, - {9, 58}, {9, 60}, {9, 62}, {10, 0}, {10, 2}, {10, 4}, - {10, 6}, {10, 8}, {10, 10}, {10, 12}, {10, 14}, {10, 16}, - {10, 18}, {10, 20}, {10, 22}, {10, 24}, {10, 26}, {10, 28}, - {10, 30}, {10, 32}, {10, 34}, {10, 36}, {10, 38}, {10, 40}, - {10, 42}, {10, 44}, {10, 46}, {10, 48}, {10, 50}, {10, 52}, - {10, 54}, {10, 56}, {10, 58}, {10, 60}, {10, 62}, {10, 64}, - {10, 66}, {10, 68}, {10, 70}, {10, 72}, {10, 74}, {10, 76}, - {10, 78}, {10, 80}, {10, 82}, {10, 84}, {10, 86}, {10, 88}, - {10, 90}, {10, 92}, {10, 94}, {10, 96}, {10, 98}, {10, 100}, - {10, 102}, {10, 104}, {10, 106}, {10, 108}, {10, 110}, {10, 112}, - {10, 114}, {10, 116}, {10, 118}, {10, 120}, {10, 122}, {10, 124}, - {10, 126}, {10, 128}, {10, 130}, {10, 132}, {10, 134}, {10, 136}, - {10, 138}, {10, 140}, {10, 142}, {10, 144}, {10, 146}, {10, 148}, - {10, 150}, {10, 152}, {10, 154}, {10, 156}, {10, 158}, {10, 160}, - {10, 162}, {10, 164}, {10, 166}, {10, 168}, {10, 170}, {10, 172}, - {10, 174}, {10, 176}, {10, 178}, {10, 180}, {10, 182}, {10, 184}, - {10, 186}, {10, 188}, {10, 190}, {10, 192}, {10, 194}, {10, 196}, - {10, 198}, {10, 200}, {10, 202}, {10, 204}, {10, 206}, {10, 208}, - {10, 210}, {10, 212}, {10, 214}, {10, 216}, {10, 218}, {10, 220}, - {10, 222}, {10, 224}, {10, 226}, {10, 228}, {10, 230}, {10, 232}, - {10, 234}, {10, 236}, {10, 238}, {10, 240}, {10, 242}, {10, 244}, - {10, 246}, {10, 248}, {10, 250}, {10, 252}, {10, 254}, {10, 256}, - {10, 258}, {10, 260}, {10, 262}, {10, 264}, {10, 266}, {10, 268}, - {10, 270}, {10, 272}, {10, 274}, {10, 276}, {10, 278}, {10, 280}, - {10, 282}, {10, 284}, {10, 286}, {10, 288}, {10, 290}, {10, 292}, - {10, 294}, {10, 296}, {10, 298}, {10, 300}, {10, 302}, {10, 304}, - {10, 306}, {10, 308}, {10, 310}, {10, 312}, {10, 314}, {10, 316}, - {10, 318}, {10, 320}, {10, 322}, {10, 324}, {10, 326}, {10, 328}, - {10, 330}, {10, 332}, {10, 334}, {10, 336}, {10, 338}, {10, 340}, - {10, 342}, {10, 344}, {10, 346}, {10, 348}, {10, 350}, {10, 352}, - {10, 354}, {10, 356}, {10, 358}, {10, 360}, {10, 362}, {10, 364}, - {10, 366}, {10, 368}, {10, 370}, {10, 372}, {10, 374}, {10, 376}, - {10, 378}, {10, 380}, {10, 382}, {10, 384}, {10, 386}, {10, 388}, - {10, 390}, {10, 392}, {10, 394}, {10, 396}, {10, 398}, {10, 400}, - {10, 402}, {10, 404}, {10, 406}, {10, 408}, {10, 410}, {10, 412}, - {10, 414}, {10, 416}, {10, 418}, {10, 420}, {10, 422}, {10, 424}, - {10, 426}, {10, 428}, {10, 430}, {10, 432}, {10, 434}, {10, 436}, - {10, 438}, {10, 440}, {10, 442}, {10, 444}, {10, 446}, {10, 448}, - {10, 450}, {10, 452}, {10, 454}, {10, 456}, {10, 458}, {10, 460}, - {10, 462}, {10, 464}, {10, 466}, {10, 468}, {10, 470}, {10, 472}, - {10, 474}, {10, 476}, {10, 478}, {10, 480}, {10, 482}, {10, 484}, - {10, 486}, {10, 488}, {10, 490}, {10, 492}, {10, 494}, {10, 496}, - {10, 498}, {10, 500}, {10, 502}, {10, 504}, {10, 506}, {10, 508}, - {10, 510}, {10, 512}, {10, 514}, {10, 516}, {10, 518}, {10, 520}, - {10, 522}, {10, 524}, {10, 526}, {10, 528}, {10, 530}, {10, 532}, - {10, 534}, {10, 536}, {10, 538}, {10, 540}, {10, 542}, {10, 544}, - {10, 546}, {10, 548}, {10, 550}, {10, 552}, {10, 554}, {10, 556}, - {10, 558}, {10, 560}, {10, 562}, {10, 564}, {10, 566}, {10, 568}, - {10, 570}, {10, 572}, {10, 574}, {10, 576}, {10, 578}, {10, 580}, - {10, 582}, {10, 584}, {10, 586}, {10, 588}, {10, 590}, {10, 592}, - {10, 594}, {10, 596}, {10, 598}, {10, 600}, {10, 602}, {10, 604}, - {10, 606}, {10, 608}, {10, 610}, {10, 612}, {10, 614}, {10, 616}, - {10, 618}, {10, 620}, {10, 622}, {10, 624}, {10, 626}, {10, 628}, - {10, 630}, {10, 632}, {10, 634}, {10, 636}, {10, 638}, {10, 640}, - {10, 642}, {10, 644}, {10, 646}, {10, 648}, {10, 650}, {10, 652}, - {10, 654}, {10, 656}, {10, 658}, {10, 660}, {10, 662}, {10, 664}, - {10, 666}, {10, 668}, {10, 670}, {10, 672}, {10, 674}, {10, 676}, - {10, 678}, {10, 680}, {10, 682}, {10, 684}, {10, 686}, {10, 688}, - {10, 690}, {10, 692}, {10, 694}, {10, 696}, {10, 698}, {10, 700}, - {10, 702}, {10, 704}, {10, 706}, {10, 708}, {10, 710}, {10, 712}, - {10, 714}, {10, 716}, {10, 718}, {10, 720}, {10, 722}, {10, 724}, - {10, 726}, {10, 728}, {10, 730}, {10, 732}, {10, 734}, {10, 736}, - {10, 738}, {10, 740}, {10, 742}, {10, 744}, {10, 746}, {10, 748}, - {10, 750}, {10, 752}, {10, 754}, {10, 756}, {10, 758}, {10, 760}, - {10, 762}, {10, 764}, {10, 766}, {10, 768}, {10, 770}, {10, 772}, - {10, 774}, {10, 776}, {10, 778}, {10, 780}, {10, 782}, {10, 784}, - {10, 786}, {10, 788}, {10, 790}, {10, 792}, {10, 794}, {10, 796}, - {10, 798}, {10, 800}, {10, 802}, {10, 804}, {10, 806}, {10, 808}, - {10, 810}, {10, 812}, {10, 814}, {10, 816}, {10, 818}, {10, 820}, - {10, 822}, {10, 824}, {10, 826}, {10, 828}, {10, 830}, {10, 832}, - {10, 834}, {10, 836}, {10, 838}, {10, 840}, {10, 842}, {10, 844}, - {10, 846}, {10, 848}, {10, 850}, {10, 852}, {10, 854}, {10, 856}, - {10, 858}, {10, 860}, {10, 862}, {10, 864}, {10, 866}, {10, 868}, - {10, 870}, {10, 872}, {10, 874}, {10, 876}, {10, 878}, {10, 880}, - {10, 882}, {10, 884}, {10, 886}, {10, 888}, {10, 890}, {10, 892}, - {10, 894}, {10, 896}, {10, 898}, {10, 900}, {10, 902}, {10, 904}, - {10, 906}, {10, 908}, {10, 910}, {10, 912}, {10, 914}, {10, 916}, - {10, 918}, {10, 920}, {10, 922}, {10, 924}, {10, 926}, {10, 928}, - {10, 930}, {10, 932}, {10, 934}, {10, 936}, {10, 938}, {10, 940}, - {10, 942}, {10, 944}, {10, 946}, {10, 948}, {10, 950}, {10, 952}, - {10, 954}, {10, 956}, {10, 958}, {10, 960}, {10, 962}, {10, 964}, - {10, 966}, {10, 968}, {10, 970}, {10, 972}, {10, 974}, {10, 976}, - {10, 978}, {10, 980}, {10, 982}, {10, 984}, {10, 986}, {10, 988}, - {10, 990}, {10, 992}, {10, 994}, {10, 996}, {10, 998}, {10, 1000}, - {10, 1002}, {10, 1004}, {10, 1006}, {10, 1008}, {10, 1010}, {10, 1012}, - {10, 1014}, {10, 1016}, {10, 1018}, {10, 1020}, {10, 1022}, {10, 1024}, - {10, 1026}, {10, 1028}, {10, 1030}, {10, 1032}, {10, 1034}, {10, 1036}, - {10, 1038}, {10, 1040}, {10, 1042}, {10, 1044}, {10, 1046}, {10, 1048}, - {10, 1050}, {10, 1052}, {10, 1054}, {10, 1056}, {10, 1058}, {10, 1060}, - {10, 1062}, {10, 1064}, {10, 1066}, {10, 1068}, {10, 1070}, {10, 1072}, - {10, 1074}, {10, 1076}, {10, 1078}, {10, 1080}, {10, 1082}, {10, 1084}, - {10, 1086}, {10, 1088}, {10, 1090}, {10, 1092}, {10, 1094}, {10, 1096}, - {10, 1098}, {10, 1100}, {10, 1102}, {10, 1104}, {10, 1106}, {10, 1108}, - {10, 1110}, {10, 1112}, {10, 1114}, {10, 1116}, {10, 1118}, {10, 1120}, - {10, 1122}, {10, 1124}, {10, 1126}, {10, 1128}, {10, 1130}, {10, 1132}, - {10, 1134}, {10, 1136}, {10, 1138}, {10, 1140}, {10, 1142}, {10, 1144}, - {10, 1146}, {10, 1148}, {10, 1150}, {10, 1152}, {10, 1154}, {10, 1156}, - {10, 1158}, {10, 1160}, {10, 1162}, {10, 1164}, {10, 1166}, {10, 1168}, - {10, 1170}, {10, 1172}, {10, 1174}, {10, 1176}, {10, 1178}, {10, 1180}, - {10, 1182}, {10, 1184}, {10, 1186}, {10, 1188}, {10, 1190}, {10, 1192}, - {10, 1194}, {10, 1196}, {10, 1198}, {10, 1200}, {10, 1202}, {10, 1204}, - {10, 1206}, {10, 1208}, {10, 1210}, {10, 1212}, {10, 1214}, {10, 1216}, - {10, 1218}, {10, 1220}, {10, 1222}, {10, 1224}, {10, 1226}, {10, 1228}, - {10, 1230}, {10, 1232}, {10, 1234}, {10, 1236}, {10, 1238}, {10, 1240}, - {10, 1242}, {10, 1244}, {10, 1246}, {10, 1248}, {10, 1250}, {10, 1252}, - {10, 1254}, {10, 1256}, {10, 1258}, {10, 1260}, {10, 1262}, {10, 1264}, - {10, 1266}, {10, 1268}, {10, 1270}, {10, 1272}, {10, 1274}, {10, 1276}, - {10, 1278}, {10, 1280}, {10, 1282}, {10, 1284}, {10, 1286}, {10, 1288}, - {10, 1290}, {10, 1292}, {10, 1294}, {10, 1296}, {10, 1298}, {10, 1300}, - {10, 1302}, {10, 1304}, {10, 1306}, {10, 1308}, {10, 1310}, {10, 1312}, - {10, 1314}, {10, 1316}, {10, 1318}, {10, 1320}, {10, 1322}, {10, 1324}, - {10, 1326}, {10, 1328}, {10, 1330}, {10, 1332}, {10, 1334}, {10, 1336}, - {10, 1338}, {10, 1340}, {10, 1342}, {10, 1344}, {10, 1346}, {10, 1348}, - {10, 1350}, {10, 1352}, {10, 1354}, {10, 1356}, {10, 1358}, {10, 1360}, - {10, 1362}, {10, 1364}, {10, 1366}, {10, 1368}, {10, 1370}, {10, 1372}, - {10, 1374}, {10, 1376}, {10, 1378}, {10, 1380}, {10, 1382}, {10, 1384}, - {10, 1386}, {10, 1388}, {10, 1390}, {10, 1392}, {10, 1394}, {10, 1396}, - {10, 1398}, {10, 1400}, {10, 1402}, {10, 1404}, {10, 1406}, {10, 1408}, - {10, 1410}, {10, 1412}, {10, 1414}, {10, 1416}, {10, 1418}, {10, 1420}, - {10, 1422}, {10, 1424}, {10, 1426}, {10, 1428}, {10, 1430}, {10, 1432}, - {10, 1434}, {10, 1436}, {10, 1438}, {10, 1440}, {10, 1442}, {10, 1444}, - {10, 1446}, {10, 1448}, {10, 1450}, {10, 1452}, {10, 1454}, {10, 1456}, - {10, 1458}, {10, 1460}, {10, 1462}, {10, 1464}, {10, 1466}, {10, 1468}, - {10, 1470}, {10, 1472}, {10, 1474}, {10, 1476}, {10, 1478}, {10, 1480}, - {10, 1482}, {10, 1484}, {10, 1486}, {10, 1488}, {10, 1490}, {10, 1492}, - {10, 1494}, {10, 1496}, {10, 1498}, {10, 1500}, {10, 1502}, {10, 1504}, - {10, 1506}, {10, 1508}, {10, 1510}, {10, 1512}, {10, 1514}, {10, 1516}, - {10, 1518}, {10, 1520}, {10, 1522}, {10, 1524}, {10, 1526}, {10, 1528}, - {10, 1530}, {10, 1532}, {10, 1534}, {10, 1536}, {10, 1538}, {10, 1540}, - {10, 1542}, {10, 1544}, {10, 1546}, {10, 1548}, {10, 1550}, {10, 1552}, - {10, 1554}, {10, 1556}, {10, 1558}, {10, 1560}, {10, 1562}, {10, 1564}, - {10, 1566}, {10, 1568}, {10, 1570}, {10, 1572}, {10, 1574}, {10, 1576}, - {10, 1578}, {10, 1580}, {10, 1582}, {10, 1584}, {10, 1586}, {10, 1588}, - {10, 1590}, {10, 1592}, {10, 1594}, {10, 1596}, {10, 1598}, {10, 1600}, - {10, 1602}, {10, 1604}, {10, 1606}, {10, 1608}, {10, 1610}, {10, 1612}, - {10, 1614}, {10, 1616}, {10, 1618}, {10, 1620}, {10, 1622}, {10, 1624}, - {10, 1626}, {10, 1628}, {10, 1630}, {10, 1632}, {10, 1634}, {10, 1636}, - {10, 1638}, {10, 1640}, {10, 1642}, {10, 1644}, {10, 1646}, {10, 1648}, - {10, 1650}, {10, 1652}, {10, 1654}, {10, 1656}, {10, 1658}, {10, 1660}, - {10, 1662}, {10, 1664}, {10, 1666}, {10, 1668}, {10, 1670}, {10, 1672}, - {10, 1674}, {10, 1676}, {10, 1678}, {10, 1680}, {10, 1682}, {10, 1684}, - {10, 1686}, {10, 1688}, {10, 1690}, {10, 1692}, {10, 1694}, {10, 1696}, - {10, 1698}, {10, 1700}, {10, 1702}, {10, 1704}, {10, 1706}, {10, 1708}, - {10, 1710}, {10, 1712}, {10, 1714}, {10, 1716}, {10, 1718}, {10, 1720}, - {10, 1722}, {10, 1724}, {10, 1726}, {10, 1728}, {10, 1730}, {10, 1732}, - {10, 1734}, {10, 1736}, {10, 1738}, {10, 1740}, {10, 1742}, {10, 1744}, - {10, 1746}, {10, 1748}, {10, 1750}, {10, 1752}, {10, 1754}, {10, 1756}, - {10, 1758}, {10, 1760}, {10, 1762}, {10, 1764}, {10, 1766}, {10, 1768}, - {10, 1770}, {10, 1772}, {10, 1774}, {10, 1776}, {10, 1778}, {10, 1780}, - {10, 1782}, {10, 1784}, {10, 1786}, {10, 1788}, {10, 1790}, {10, 1792}, - {10, 1794}, {10, 1796}, {10, 1798}, {10, 1800}, {10, 1802}, {10, 1804}, - {10, 1806}, {10, 1808}, {10, 1810}, {10, 1812}, {10, 1814}, {10, 1816}, - {10, 1818}, {10, 1820}, {10, 1822}, {10, 1824}, {10, 1826}, {10, 1828}, - {10, 1830}, {10, 1832}, {10, 1834}, {10, 1836}, {10, 1838}, {10, 1840}, - {10, 1842}, {10, 1844}, {10, 1846}, {10, 1848}, {10, 1850}, {10, 1852}, - {10, 1854}, {10, 1856}, {10, 1858}, {10, 1860}, {10, 1862}, {10, 1864}, - {10, 1866}, {10, 1868}, {10, 1870}, {10, 1872}, {10, 1874}, {10, 1876}, - {10, 1878}, {10, 1880}, {10, 1882}, {10, 1884}, {10, 1886}, {10, 1888}, - {10, 1890}, {10, 1892}, {10, 1894}, {10, 1896}, {10, 1898}, {10, 1900}, - {10, 1902}, {10, 1904}, {10, 1906}, {10, 1908}, {10, 1910}, {10, 1912}, - {10, 1914}, {10, 1916}, {10, 1918}, {10, 1920}, {10, 1922}, {10, 1924}, - {10, 1926}, {10, 1928}, {10, 1930}, {10, 1932}, {10, 1934}, {10, 1936}, - {10, 1938}, {10, 1940}, {10, 1942}, {10, 1944}, {10, 1946}, {10, 1948}, - {10, 1950}, {10, 1952}, {10, 1954}, {10, 1956}, {10, 1958}, {10, 1960}, - {10, 1962}, {10, 1964}, {10, 1966}, {10, 1968}, {10, 1970}, {10, 1972}, - {10, 1974}, {10, 1976}, {10, 1978}, {10, 1980}, {10, 1982}, {10, 1984}, - {10, 1986}, {10, 1988}, {10, 1990}, {10, 1992}, {10, 1994}, {10, 1996}, - {10, 1998}, {10, 2000}, {10, 2002}, {10, 2004}, {10, 2006}, {10, 2008}, - {10, 2010}, {10, 2012}, {10, 2014}, {10, 2016}, {10, 2018}, {10, 2020}, - {10, 2022}, {10, 2024}, {10, 2026}, {10, 2028}, {10, 2030}, {10, 2032}, - {10, 2034}, {10, 2036}, {10, 2038}, {10, 2040}, {10, 2042}, {10, 2044}, - {10, 2046}, {10, 2048}, {10, 2050}, {10, 2052}, {10, 2054}, {10, 2056}, - {10, 2058}, {10, 2060}, {10, 2062}, {10, 2064}, {10, 2066}, {10, 2068}, - {10, 2070}, {10, 2072}, {10, 2074}, {10, 2076}, {10, 2078}, {10, 2080}, - {10, 2082}, {10, 2084}, {10, 2086}, {10, 2088}, {10, 2090}, {10, 2092}, - {10, 2094}, {10, 2096}, {10, 2098}, {10, 2100}, {10, 2102}, {10, 2104}, - {10, 2106}, {10, 2108}, {10, 2110}, {10, 2112}, {10, 2114}, {10, 2116}, - {10, 2118}, {10, 2120}, {10, 2122}, {10, 2124}, {10, 2126}, {10, 2128}, - {10, 2130}, {10, 2132}, {10, 2134}, {10, 2136}, {10, 2138}, {10, 2140}, - {10, 2142}, {10, 2144}, {10, 2146}, {10, 2148}, {10, 2150}, {10, 2152}, - {10, 2154}, {10, 2156}, {10, 2158}, {10, 2160}, {10, 2162}, {10, 2164}, - {10, 2166}, {10, 2168}, {10, 2170}, {10, 2172}, {10, 2174}, {10, 2176}, - {10, 2178}, {10, 2180}, {10, 2182}, {10, 2184}, {10, 2186}, {10, 2188}, - {10, 2190}, {10, 2192}, {10, 2194}, {10, 2196}, {10, 2198}, {10, 2200}, - {10, 2202}, {10, 2204}, {10, 2206}, {10, 2208}, {10, 2210}, {10, 2212}, - {10, 2214}, {10, 2216}, {10, 2218}, {10, 2220}, {10, 2222}, {10, 2224}, - {10, 2226}, {10, 2228}, {10, 2230}, {10, 2232}, {10, 2234}, {10, 2236}, - {10, 2238}, {10, 2240}, {10, 2242}, {10, 2244}, {10, 2246}, {10, 2248}, - {10, 2250}, {10, 2252}, {10, 2254}, {10, 2256}, {10, 2258}, {10, 2260}, - {10, 2262}, {10, 2264}, {10, 2266}, {10, 2268}, {10, 2270}, {10, 2272}, - {10, 2274}, {10, 2276}, {10, 2278}, {10, 2280}, {10, 2282}, {10, 2284}, - {10, 2286}, {10, 2288}, {10, 2290}, {10, 2292}, {10, 2294}, {10, 2296}, - {10, 2298}, {10, 2300}, {10, 2302}, {10, 2304}, {10, 2306}, {10, 2308}, - {10, 2310}, {10, 2312}, {10, 2314}, {10, 2316}, {10, 2318}, {10, 2320}, - {10, 2322}, {10, 2324}, {10, 2326}, {10, 2328}, {10, 2330}, {10, 2332}, - {10, 2334}, {10, 2336}, {10, 2338}, {10, 2340}, {10, 2342}, {10, 2344}, - {10, 2346}, {10, 2348}, {10, 2350}, {10, 2352}, {10, 2354}, {10, 2356}, - {10, 2358}, {10, 2360}, {10, 2362}, {10, 2364}, {10, 2366}, {10, 2368}, - {10, 2370}, {10, 2372}, {10, 2374}, {10, 2376}, {10, 2378}, {10, 2380}, - {10, 2382}, {10, 2384}, {10, 2386}, {10, 2388}, {10, 2390}, {10, 2392}, - {10, 2394}, {10, 2396}, {10, 2398}, {10, 2400}, {10, 2402}, {10, 2404}, - {10, 2406}, {10, 2408}, {10, 2410}, {10, 2412}, {10, 2414}, {10, 2416}, - {10, 2418}, {10, 2420}, {10, 2422}, {10, 2424}, {10, 2426}, {10, 2428}, - {10, 2430}, {10, 2432}, {10, 2434}, {10, 2436}, {10, 2438}, {10, 2440}, - {10, 2442}, {10, 2444}, {10, 2446}, {10, 2448}, {10, 2450}, {10, 2452}, - {10, 2454}, {10, 2456}, {10, 2458}, {10, 2460}, {10, 2462}, {10, 2464}, - {10, 2466}, {10, 2468}, {10, 2470}, {10, 2472}, {10, 2474}, {10, 2476}, - {10, 2478}, {10, 2480}, {10, 2482}, {10, 2484}, {10, 2486}, {10, 2488}, - {10, 2490}, {10, 2492}, {10, 2494}, {10, 2496}, {10, 2498}, {10, 2500}, - {10, 2502}, {10, 2504}, {10, 2506}, {10, 2508}, {10, 2510}, {10, 2512}, - {10, 2514}, {10, 2516}, {10, 2518}, {10, 2520}, {10, 2522}, {10, 2524}, - {10, 2526}, {10, 2528}, {10, 2530}, {10, 2532}, {10, 2534}, {10, 2536}, - {10, 2538}, {10, 2540}, {10, 2542}, {10, 2544}, {10, 2546}, {10, 2548}, - {10, 2550}, {10, 2552}, {10, 2554}, {10, 2556}, {10, 2558}, {10, 2560}, - {10, 2562}, {10, 2564}, {10, 2566}, {10, 2568}, {10, 2570}, {10, 2572}, - {10, 2574}, {10, 2576}, {10, 2578}, {10, 2580}, {10, 2582}, {10, 2584}, - {10, 2586}, {10, 2588}, {10, 2590}, {10, 2592}, {10, 2594}, {10, 2596}, - {10, 2598}, {10, 2600}, {10, 2602}, {10, 2604}, {10, 2606}, {10, 2608}, - {10, 2610}, {10, 2612}, {10, 2614}, {10, 2616}, {10, 2618}, {10, 2620}, - {10, 2622}, {10, 2624}, {10, 2626}, {10, 2628}, {10, 2630}, {10, 2632}, - {10, 2634}, {10, 2636}, {10, 2638}, {10, 2640}, {10, 2642}, {10, 2644}, - {10, 2646}, {10, 2648}, {10, 2650}, {10, 2652}, {10, 2654}, {10, 2656}, - {10, 2658}, {10, 2660}, {10, 2662}, {10, 2664}, {10, 2666}, {10, 2668}, - {10, 2670}, {10, 2672}, {10, 2674}, {10, 2676}, {10, 2678}, {10, 2680}, - {10, 2682}, {10, 2684}, {10, 2686}, {10, 2688}, {10, 2690}, {10, 2692}, - {10, 2694}, {10, 2696}, {10, 2698}, {10, 2700}, {10, 2702}, {10, 2704}, - {10, 2706}, {10, 2708}, {10, 2710}, {10, 2712}, {10, 2714}, {10, 2716}, - {10, 2718}, {10, 2720}, {10, 2722}, {10, 2724}, {10, 2726}, {10, 2728}, - {10, 2730}, {10, 2732}, {10, 2734}, {10, 2736}, {10, 2738}, {10, 2740}, - {10, 2742}, {10, 2744}, {10, 2746}, {10, 2748}, {10, 2750}, {10, 2752}, - {10, 2754}, {10, 2756}, {10, 2758}, {10, 2760}, {10, 2762}, {10, 2764}, - {10, 2766}, {10, 2768}, {10, 2770}, {10, 2772}, {10, 2774}, {10, 2776}, - {10, 2778}, {10, 2780}, {10, 2782}, {10, 2784}, {10, 2786}, {10, 2788}, - {10, 2790}, {10, 2792}, {10, 2794}, {10, 2796}, {10, 2798}, {10, 2800}, - {10, 2802}, {10, 2804}, {10, 2806}, {10, 2808}, {10, 2810}, {10, 2812}, - {10, 2814}, {10, 2816}, {10, 2818}, {10, 2820}, {10, 2822}, {10, 2824}, - {10, 2826}, {10, 2828}, {10, 2830}, {10, 2832}, {10, 2834}, {10, 2836}, - {10, 2838}, {10, 2840}, {10, 2842}, {10, 2844}, {10, 2846}, {10, 2848}, - {10, 2850}, {10, 2852}, {10, 2854}, {10, 2856}, {10, 2858}, {10, 2860}, - {10, 2862}, {10, 2864}, {10, 2866}, {10, 2868}, {10, 2870}, {10, 2872}, - {10, 2874}, {10, 2876}, {10, 2878}, {10, 2880}, {10, 2882}, {10, 2884}, - {10, 2886}, {10, 2888}, {10, 2890}, {10, 2892}, {10, 2894}, {10, 2896}, - {10, 2898}, {10, 2900}, {10, 2902}, {10, 2904}, {10, 2906}, {10, 2908}, - {10, 2910}, {10, 2912}, {10, 2914}, {10, 2916}, {10, 2918}, {10, 2920}, - {10, 2922}, {10, 2924}, {10, 2926}, {10, 2928}, {10, 2930}, {10, 2932}, - {10, 2934}, {10, 2936}, {10, 2938}, {10, 2940}, {10, 2942}, {10, 2944}, - {10, 2946}, {10, 2948}, {10, 2950}, {10, 2952}, {10, 2954}, {10, 2956}, - {10, 2958}, {10, 2960}, {10, 2962}, {10, 2964}, {10, 2966}, {10, 2968}, - {10, 2970}, {10, 2972}, {10, 2974}, {10, 2976}, {10, 2978}, {10, 2980}, - {10, 2982}, {10, 2984}, {10, 2986}, {10, 2988}, {10, 2990}, {10, 2992}, - {10, 2994}, {10, 2996}, {10, 2998}, {10, 3000}, {10, 3002}, {10, 3004}, - {10, 3006}, {10, 3008}, {10, 3010}, {10, 3012}, {10, 3014}, {10, 3016}, - {10, 3018}, {10, 3020}, {10, 3022}, {10, 3024}, {10, 3026}, {10, 3028}, - {10, 3030}, {10, 3032}, {10, 3034}, {10, 3036}, {10, 3038}, {10, 3040}, - {10, 3042}, {10, 3044}, {10, 3046}, {10, 3048}, {10, 3050}, {10, 3052}, - {10, 3054}, {10, 3056}, {10, 3058}, {10, 3060}, {10, 3062}, {10, 3064}, - {10, 3066}, {10, 3068}, {10, 3070}, {10, 3072}, {10, 3074}, {10, 3076}, - {10, 3078}, {10, 3080}, {10, 3082}, {10, 3084}, {10, 3086}, {10, 3088}, - {10, 3090}, {10, 3092}, {10, 3094}, {10, 3096}, {10, 3098}, {10, 3100}, - {10, 3102}, {10, 3104}, {10, 3106}, {10, 3108}, {10, 3110}, {10, 3112}, - {10, 3114}, {10, 3116}, {10, 3118}, {10, 3120}, {10, 3122}, {10, 3124}, - {10, 3126}, {10, 3128}, {10, 3130}, {10, 3132}, {10, 3134}, {10, 3136}, - {10, 3138}, {10, 3140}, {10, 3142}, {10, 3144}, {10, 3146}, {10, 3148}, - {10, 3150}, {10, 3152}, {10, 3154}, {10, 3156}, {10, 3158}, {10, 3160}, - {10, 3162}, {10, 3164}, {10, 3166}, {10, 3168}, {10, 3170}, {10, 3172}, - {10, 3174}, {10, 3176}, {10, 3178}, {10, 3180}, {10, 3182}, {10, 3184}, - {10, 3186}, {10, 3188}, {10, 3190}, {10, 3192}, {10, 3194}, {10, 3196}, - {10, 3198}, {10, 3200}, {10, 3202}, {10, 3204}, {10, 3206}, {10, 3208}, - {10, 3210}, {10, 3212}, {10, 3214}, {10, 3216}, {10, 3218}, {10, 3220}, - {10, 3222}, {10, 3224}, {10, 3226}, {10, 3228}, {10, 3230}, {10, 3232}, - {10, 3234}, {10, 3236}, {10, 3238}, {10, 3240}, {10, 3242}, {10, 3244}, - {10, 3246}, {10, 3248}, {10, 3250}, {10, 3252}, {10, 3254}, {10, 3256}, - {10, 3258}, {10, 3260}, {10, 3262}, {10, 3264}, {10, 3266}, {10, 3268}, - {10, 3270}, {10, 3272}, {10, 3274}, {10, 3276}, {10, 3278}, {10, 3280}, - {10, 3282}, {10, 3284}, {10, 3286}, {10, 3288}, {10, 3290}, {10, 3292}, - {10, 3294}, {10, 3296}, {10, 3298}, {10, 3300}, {10, 3302}, {10, 3304}, - {10, 3306}, {10, 3308}, {10, 3310}, {10, 3312}, {10, 3314}, {10, 3316}, - {10, 3318}, {10, 3320}, {10, 3322}, {10, 3324}, {10, 3326}, {10, 3328}, - {10, 3330}, {10, 3332}, {10, 3334}, {10, 3336}, {10, 3338}, {10, 3340}, - {10, 3342}, {10, 3344}, {10, 3346}, {10, 3348}, {10, 3350}, {10, 3352}, - {10, 3354}, {10, 3356}, {10, 3358}, {10, 3360}, {10, 3362}, {10, 3364}, - {10, 3366}, {10, 3368}, {10, 3370}, {10, 3372}, {10, 3374}, {10, 3376}, - {10, 3378}, {10, 3380}, {10, 3382}, {10, 3384}, {10, 3386}, {10, 3388}, - {10, 3390}, {10, 3392}, {10, 3394}, {10, 3396}, {10, 3398}, {10, 3400}, - {10, 3402}, {10, 3404}, {10, 3406}, {10, 3408}, {10, 3410}, {10, 3412}, - {10, 3414}, {10, 3416}, {10, 3418}, {10, 3420}, {10, 3422}, {10, 3424}, - {10, 3426}, {10, 3428}, {10, 3430}, {10, 3432}, {10, 3434}, {10, 3436}, - {10, 3438}, {10, 3440}, {10, 3442}, {10, 3444}, {10, 3446}, {10, 3448}, - {10, 3450}, {10, 3452}, {10, 3454}, {10, 3456}, {10, 3458}, {10, 3460}, - {10, 3462}, {10, 3464}, {10, 3466}, {10, 3468}, {10, 3470}, {10, 3472}, - {10, 3474}, {10, 3476}, {10, 3478}, {10, 3480}, {10, 3482}, {10, 3484}, - {10, 3486}, {10, 3488}, {10, 3490}, {10, 3492}, {10, 3494}, {10, 3496}, - {10, 3498}, {10, 3500}, {10, 3502}, {10, 3504}, {10, 3506}, {10, 3508}, - {10, 3510}, {10, 3512}, {10, 3514}, {10, 3516}, {10, 3518}, {10, 3520}, - {10, 3522}, {10, 3524}, {10, 3526}, {10, 3528}, {10, 3530}, {10, 3532}, - {10, 3534}, {10, 3536}, {10, 3538}, {10, 3540}, {10, 3542}, {10, 3544}, - {10, 3546}, {10, 3548}, {10, 3550}, {10, 3552}, {10, 3554}, {10, 3556}, - {10, 3558}, {10, 3560}, {10, 3562}, {10, 3564}, {10, 3566}, {10, 3568}, - {10, 3570}, {10, 3572}, {10, 3574}, {10, 3576}, {10, 3578}, {10, 3580}, - {10, 3582}, {10, 3584}, {10, 3586}, {10, 3588}, {10, 3590}, {10, 3592}, - {10, 3594}, {10, 3596}, {10, 3598}, {10, 3600}, {10, 3602}, {10, 3604}, - {10, 3606}, {10, 3608}, {10, 3610}, {10, 3612}, {10, 3614}, {10, 3616}, - {10, 3618}, {10, 3620}, {10, 3622}, {10, 3624}, {10, 3626}, {10, 3628}, - {10, 3630}, {10, 3632}, {10, 3634}, {10, 3636}, {10, 3638}, {10, 3640}, - {10, 3642}, {10, 3644}, {10, 3646}, {10, 3648}, {10, 3650}, {10, 3652}, - {10, 3654}, {10, 3656}, {10, 3658}, {10, 3660}, {10, 3662}, {10, 3664}, - {10, 3666}, {10, 3668}, {10, 3670}, {10, 3672}, {10, 3674}, {10, 3676}, - {10, 3678}, {10, 3680}, {10, 3682}, {10, 3684}, {10, 3686}, {10, 3688}, - {10, 3690}, {10, 3692}, {10, 3694}, {10, 3696}, {10, 3698}, {10, 3700}, - {10, 3702}, {10, 3704}, {10, 3706}, {10, 3708}, {10, 3710}, {10, 3712}, - {10, 3714}, {10, 3716}, {10, 3718}, {10, 3720}, {10, 3722}, {10, 3724}, - {10, 3726}, {10, 3728}, {10, 3730}, {10, 3732}, {10, 3734}, {10, 3736}, - {10, 3738}, {10, 3740}, {10, 3742}, {10, 3744}, {10, 3746}, {10, 3748}, - {10, 3750}, {10, 3752}, {10, 3754}, {10, 3756}, {10, 3758}, {10, 3760}, - {10, 3762}, {10, 3764}, {10, 3766}, {10, 3768}, {10, 3770}, {10, 3772}, - {10, 3774}, {10, 3776}, {10, 3778}, {10, 3780}, {10, 3782}, {10, 3784}, - {10, 3786}, {10, 3788}, {10, 3790}, {10, 3792}, {10, 3794}, {10, 3796}, - {10, 3798}, {10, 3800}, {10, 3802}, {10, 3804}, {10, 3806}, {10, 3808}, - {10, 3810}, {10, 3812}, {10, 3814}, {10, 3816}, {10, 3818}, {10, 3820}, - {10, 3822}, {10, 3824}, {10, 3826}, {10, 3828}, {10, 3830}, {10, 3832}, - {10, 3834}, {10, 3836}, {10, 3838}, {10, 3840}, {10, 3842}, {10, 3844}, - {10, 3846}, {10, 3848}, {10, 3850}, {10, 3852}, {10, 3854}, {10, 3856}, - {10, 3858}, {10, 3860}, {10, 3862}, {10, 3864}, {10, 3866}, {10, 3868}, - {10, 3870}, {10, 3872}, {10, 3874}, {10, 3876}, {10, 3878}, {10, 3880}, - {10, 3882}, {10, 3884}, {10, 3886}, {10, 3888}, {10, 3890}, {10, 3892}, - {10, 3894}, {10, 3896}, {10, 3898}, {10, 3900}, {10, 3902}, {10, 3904}, - {10, 3906}, {10, 3908}, {10, 3910}, {10, 3912}, {10, 3914}, {10, 3916}, - {10, 3918}, {10, 3920}, {10, 3922}, {10, 3924}, {10, 3926}, {10, 3928}, - {10, 3930}, {10, 3932}, {10, 3934}, {10, 3936}, {10, 3938}, {10, 3940}, - {10, 3942}, {10, 3944}, {10, 3946}, {10, 3948}, {10, 3950}, {10, 3952}, - {10, 3954}, {10, 3956}, {10, 3958}, {10, 3960} +static const TOKENVALUE dct_value_tokens[2048 * 2] = { + { 10, 3963 }, { 10, 3961 }, { 10, 3959 }, { 10, 3957 }, { 10, 3955 }, + { 10, 3953 }, { 10, 3951 }, { 10, 3949 }, { 10, 3947 }, { 10, 3945 }, + { 10, 3943 }, { 10, 3941 }, { 10, 3939 }, { 10, 3937 }, { 10, 3935 }, + { 10, 3933 }, { 10, 3931 }, { 10, 3929 }, { 10, 3927 }, { 10, 3925 }, + { 10, 3923 }, { 10, 3921 }, { 10, 3919 }, { 10, 3917 }, { 10, 3915 }, + { 10, 3913 }, { 10, 3911 }, { 10, 3909 }, { 10, 3907 }, { 10, 3905 }, + { 10, 3903 }, { 10, 3901 }, { 10, 3899 }, { 10, 3897 }, { 10, 3895 }, + { 10, 3893 }, { 10, 3891 }, { 10, 3889 }, { 10, 3887 }, { 10, 3885 }, + { 10, 3883 }, { 10, 3881 }, { 10, 3879 }, { 10, 3877 }, { 10, 3875 }, + { 10, 3873 }, { 10, 3871 }, { 10, 3869 }, { 10, 3867 }, { 10, 3865 }, + { 10, 3863 }, { 10, 3861 }, { 10, 3859 }, { 10, 3857 }, { 10, 3855 }, + { 10, 3853 }, { 10, 3851 }, { 10, 3849 }, { 10, 3847 }, { 10, 3845 }, + { 10, 3843 }, { 10, 3841 }, { 10, 3839 }, { 10, 3837 }, { 10, 3835 }, + { 10, 3833 }, { 10, 3831 }, { 10, 3829 }, { 10, 3827 }, { 10, 3825 }, + { 10, 3823 }, { 10, 3821 }, { 10, 3819 }, { 10, 3817 }, { 10, 3815 }, + { 10, 3813 }, { 10, 3811 }, { 10, 3809 }, { 10, 3807 }, { 10, 3805 }, + { 10, 3803 }, { 10, 3801 }, { 10, 3799 }, { 10, 3797 }, { 10, 3795 }, + { 10, 3793 }, { 10, 3791 }, { 10, 3789 }, { 10, 3787 }, { 10, 3785 }, + { 10, 3783 }, { 10, 3781 }, { 10, 3779 }, { 10, 3777 }, { 10, 3775 }, + { 10, 3773 }, { 10, 3771 }, { 10, 3769 }, { 10, 3767 }, { 10, 3765 }, + { 10, 3763 }, { 10, 3761 }, { 10, 3759 }, { 10, 3757 }, { 10, 3755 }, + { 10, 3753 }, { 10, 3751 }, { 10, 3749 }, { 10, 3747 }, { 10, 3745 }, + { 10, 3743 }, { 10, 3741 }, { 10, 3739 }, { 10, 3737 }, { 10, 3735 }, + { 10, 3733 }, { 10, 3731 }, { 10, 3729 }, { 10, 3727 }, { 10, 3725 }, + { 10, 3723 }, { 10, 3721 }, { 10, 3719 }, { 10, 3717 }, { 10, 3715 }, + { 10, 3713 }, { 10, 3711 }, { 10, 3709 }, { 10, 3707 }, { 10, 3705 }, + { 10, 3703 }, { 10, 3701 }, { 10, 3699 }, { 10, 3697 }, { 10, 3695 }, + { 10, 3693 }, { 10, 3691 }, { 10, 3689 }, { 10, 3687 }, { 10, 3685 }, + { 10, 3683 }, { 10, 3681 }, { 10, 3679 }, { 10, 3677 }, { 10, 3675 }, + { 10, 3673 }, { 10, 3671 }, { 10, 3669 }, { 10, 3667 }, { 10, 3665 }, + { 10, 3663 }, { 10, 3661 }, { 10, 3659 }, { 10, 3657 }, { 10, 3655 }, + { 10, 3653 }, { 10, 3651 }, { 10, 3649 }, { 10, 3647 }, { 10, 3645 }, + { 10, 3643 }, { 10, 3641 }, { 10, 3639 }, { 10, 3637 }, { 10, 3635 }, + { 10, 3633 }, { 10, 3631 }, { 10, 3629 }, { 10, 3627 }, { 10, 3625 }, + { 10, 3623 }, { 10, 3621 }, { 10, 3619 }, { 10, 3617 }, { 10, 3615 }, + { 10, 3613 }, { 10, 3611 }, { 10, 3609 }, { 10, 3607 }, { 10, 3605 }, + { 10, 3603 }, { 10, 3601 }, { 10, 3599 }, { 10, 3597 }, { 10, 3595 }, + { 10, 3593 }, { 10, 3591 }, { 10, 3589 }, { 10, 3587 }, { 10, 3585 }, + { 10, 3583 }, { 10, 3581 }, { 10, 3579 }, { 10, 3577 }, { 10, 3575 }, + { 10, 3573 }, { 10, 3571 }, { 10, 3569 }, { 10, 3567 }, { 10, 3565 }, + { 10, 3563 }, { 10, 3561 }, { 10, 3559 }, { 10, 3557 }, { 10, 3555 }, + { 10, 3553 }, { 10, 3551 }, { 10, 3549 }, { 10, 3547 }, { 10, 3545 }, + { 10, 3543 }, { 10, 3541 }, { 10, 3539 }, { 10, 3537 }, { 10, 3535 }, + { 10, 3533 }, { 10, 3531 }, { 10, 3529 }, { 10, 3527 }, { 10, 3525 }, + { 10, 3523 }, { 10, 3521 }, { 10, 3519 }, { 10, 3517 }, { 10, 3515 }, + { 10, 3513 }, { 10, 3511 }, { 10, 3509 }, { 10, 3507 }, { 10, 3505 }, + { 10, 3503 }, { 10, 3501 }, { 10, 3499 }, { 10, 3497 }, { 10, 3495 }, + { 10, 3493 }, { 10, 3491 }, { 10, 3489 }, { 10, 3487 }, { 10, 3485 }, + { 10, 3483 }, { 10, 3481 }, { 10, 3479 }, { 10, 3477 }, { 10, 3475 }, + { 10, 3473 }, { 10, 3471 }, { 10, 3469 }, { 10, 3467 }, { 10, 3465 }, + { 10, 3463 }, { 10, 3461 }, { 10, 3459 }, { 10, 3457 }, { 10, 3455 }, + { 10, 3453 }, { 10, 3451 }, { 10, 3449 }, { 10, 3447 }, { 10, 3445 }, + { 10, 3443 }, { 10, 3441 }, { 10, 3439 }, { 10, 3437 }, { 10, 3435 }, + { 10, 3433 }, { 10, 3431 }, { 10, 3429 }, { 10, 3427 }, { 10, 3425 }, + { 10, 3423 }, { 10, 3421 }, { 10, 3419 }, { 10, 3417 }, { 10, 3415 }, + { 10, 3413 }, { 10, 3411 }, { 10, 3409 }, { 10, 3407 }, { 10, 3405 }, + { 10, 3403 }, { 10, 3401 }, { 10, 3399 }, { 10, 3397 }, { 10, 3395 }, + { 10, 3393 }, { 10, 3391 }, { 10, 3389 }, { 10, 3387 }, { 10, 3385 }, + { 10, 3383 }, { 10, 3381 }, { 10, 3379 }, { 10, 3377 }, { 10, 3375 }, + { 10, 3373 }, { 10, 3371 }, { 10, 3369 }, { 10, 3367 }, { 10, 3365 }, + { 10, 3363 }, { 10, 3361 }, { 10, 3359 }, { 10, 3357 }, { 10, 3355 }, + { 10, 3353 }, { 10, 3351 }, { 10, 3349 }, { 10, 3347 }, { 10, 3345 }, + { 10, 3343 }, { 10, 3341 }, { 10, 3339 }, { 10, 3337 }, { 10, 3335 }, + { 10, 3333 }, { 10, 3331 }, { 10, 3329 }, { 10, 3327 }, { 10, 3325 }, + { 10, 3323 }, { 10, 3321 }, { 10, 3319 }, { 10, 3317 }, { 10, 3315 }, + { 10, 3313 }, { 10, 3311 }, { 10, 3309 }, { 10, 3307 }, { 10, 3305 }, + { 10, 3303 }, { 10, 3301 }, { 10, 3299 }, { 10, 3297 }, { 10, 3295 }, + { 10, 3293 }, { 10, 3291 }, { 10, 3289 }, { 10, 3287 }, { 10, 3285 }, + { 10, 3283 }, { 10, 3281 }, { 10, 3279 }, { 10, 3277 }, { 10, 3275 }, + { 10, 3273 }, { 10, 3271 }, { 10, 3269 }, { 10, 3267 }, { 10, 3265 }, + { 10, 3263 }, { 10, 3261 }, { 10, 3259 }, { 10, 3257 }, { 10, 3255 }, + { 10, 3253 }, { 10, 3251 }, { 10, 3249 }, { 10, 3247 }, { 10, 3245 }, + { 10, 3243 }, { 10, 3241 }, { 10, 3239 }, { 10, 3237 }, { 10, 3235 }, + { 10, 3233 }, { 10, 3231 }, { 10, 3229 }, { 10, 3227 }, { 10, 3225 }, + { 10, 3223 }, { 10, 3221 }, { 10, 3219 }, { 10, 3217 }, { 10, 3215 }, + { 10, 3213 }, { 10, 3211 }, { 10, 3209 }, { 10, 3207 }, { 10, 3205 }, + { 10, 3203 }, { 10, 3201 }, { 10, 3199 }, { 10, 3197 }, { 10, 3195 }, + { 10, 3193 }, { 10, 3191 }, { 10, 3189 }, { 10, 3187 }, { 10, 3185 }, + { 10, 3183 }, { 10, 3181 }, { 10, 3179 }, { 10, 3177 }, { 10, 3175 }, + { 10, 3173 }, { 10, 3171 }, { 10, 3169 }, { 10, 3167 }, { 10, 3165 }, + { 10, 3163 }, { 10, 3161 }, { 10, 3159 }, { 10, 3157 }, { 10, 3155 }, + { 10, 3153 }, { 10, 3151 }, { 10, 3149 }, { 10, 3147 }, { 10, 3145 }, + { 10, 3143 }, { 10, 3141 }, { 10, 3139 }, { 10, 3137 }, { 10, 3135 }, + { 10, 3133 }, { 10, 3131 }, { 10, 3129 }, { 10, 3127 }, { 10, 3125 }, + { 10, 3123 }, { 10, 3121 }, { 10, 3119 }, { 10, 3117 }, { 10, 3115 }, + { 10, 3113 }, { 10, 3111 }, { 10, 3109 }, { 10, 3107 }, { 10, 3105 }, + { 10, 3103 }, { 10, 3101 }, { 10, 3099 }, { 10, 3097 }, { 10, 3095 }, + { 10, 3093 }, { 10, 3091 }, { 10, 3089 }, { 10, 3087 }, { 10, 3085 }, + { 10, 3083 }, { 10, 3081 }, { 10, 3079 }, { 10, 3077 }, { 10, 3075 }, + { 10, 3073 }, { 10, 3071 }, { 10, 3069 }, { 10, 3067 }, { 10, 3065 }, + { 10, 3063 }, { 10, 3061 }, { 10, 3059 }, { 10, 3057 }, { 10, 3055 }, + { 10, 3053 }, { 10, 3051 }, { 10, 3049 }, { 10, 3047 }, { 10, 3045 }, + { 10, 3043 }, { 10, 3041 }, { 10, 3039 }, { 10, 3037 }, { 10, 3035 }, + { 10, 3033 }, { 10, 3031 }, { 10, 3029 }, { 10, 3027 }, { 10, 3025 }, + { 10, 3023 }, { 10, 3021 }, { 10, 3019 }, { 10, 3017 }, { 10, 3015 }, + { 10, 3013 }, { 10, 3011 }, { 10, 3009 }, { 10, 3007 }, { 10, 3005 }, + { 10, 3003 }, { 10, 3001 }, { 10, 2999 }, { 10, 2997 }, { 10, 2995 }, + { 10, 2993 }, { 10, 2991 }, { 10, 2989 }, { 10, 2987 }, { 10, 2985 }, + { 10, 2983 }, { 10, 2981 }, { 10, 2979 }, { 10, 2977 }, { 10, 2975 }, + { 10, 2973 }, { 10, 2971 }, { 10, 2969 }, { 10, 2967 }, { 10, 2965 }, + { 10, 2963 }, { 10, 2961 }, { 10, 2959 }, { 10, 2957 }, { 10, 2955 }, + { 10, 2953 }, { 10, 2951 }, { 10, 2949 }, { 10, 2947 }, { 10, 2945 }, + { 10, 2943 }, { 10, 2941 }, { 10, 2939 }, { 10, 2937 }, { 10, 2935 }, + { 10, 2933 }, { 10, 2931 }, { 10, 2929 }, { 10, 2927 }, { 10, 2925 }, + { 10, 2923 }, { 10, 2921 }, { 10, 2919 }, { 10, 2917 }, { 10, 2915 }, + { 10, 2913 }, { 10, 2911 }, { 10, 2909 }, { 10, 2907 }, { 10, 2905 }, + { 10, 2903 }, { 10, 2901 }, { 10, 2899 }, { 10, 2897 }, { 10, 2895 }, + { 10, 2893 }, { 10, 2891 }, { 10, 2889 }, { 10, 2887 }, { 10, 2885 }, + { 10, 2883 }, { 10, 2881 }, { 10, 2879 }, { 10, 2877 }, { 10, 2875 }, + { 10, 2873 }, { 10, 2871 }, { 10, 2869 }, { 10, 2867 }, { 10, 2865 }, + { 10, 2863 }, { 10, 2861 }, { 10, 2859 }, { 10, 2857 }, { 10, 2855 }, + { 10, 2853 }, { 10, 2851 }, { 10, 2849 }, { 10, 2847 }, { 10, 2845 }, + { 10, 2843 }, { 10, 2841 }, { 10, 2839 }, { 10, 2837 }, { 10, 2835 }, + { 10, 2833 }, { 10, 2831 }, { 10, 2829 }, { 10, 2827 }, { 10, 2825 }, + { 10, 2823 }, { 10, 2821 }, { 10, 2819 }, { 10, 2817 }, { 10, 2815 }, + { 10, 2813 }, { 10, 2811 }, { 10, 2809 }, { 10, 2807 }, { 10, 2805 }, + { 10, 2803 }, { 10, 2801 }, { 10, 2799 }, { 10, 2797 }, { 10, 2795 }, + { 10, 2793 }, { 10, 2791 }, { 10, 2789 }, { 10, 2787 }, { 10, 2785 }, + { 10, 2783 }, { 10, 2781 }, { 10, 2779 }, { 10, 2777 }, { 10, 2775 }, + { 10, 2773 }, { 10, 2771 }, { 10, 2769 }, { 10, 2767 }, { 10, 2765 }, + { 10, 2763 }, { 10, 2761 }, { 10, 2759 }, { 10, 2757 }, { 10, 2755 }, + { 10, 2753 }, { 10, 2751 }, { 10, 2749 }, { 10, 2747 }, { 10, 2745 }, + { 10, 2743 }, { 10, 2741 }, { 10, 2739 }, { 10, 2737 }, { 10, 2735 }, + { 10, 2733 }, { 10, 2731 }, { 10, 2729 }, { 10, 2727 }, { 10, 2725 }, + { 10, 2723 }, { 10, 2721 }, { 10, 2719 }, { 10, 2717 }, { 10, 2715 }, + { 10, 2713 }, { 10, 2711 }, { 10, 2709 }, { 10, 2707 }, { 10, 2705 }, + { 10, 2703 }, { 10, 2701 }, { 10, 2699 }, { 10, 2697 }, { 10, 2695 }, + { 10, 2693 }, { 10, 2691 }, { 10, 2689 }, { 10, 2687 }, { 10, 2685 }, + { 10, 2683 }, { 10, 2681 }, { 10, 2679 }, { 10, 2677 }, { 10, 2675 }, + { 10, 2673 }, { 10, 2671 }, { 10, 2669 }, { 10, 2667 }, { 10, 2665 }, + { 10, 2663 }, { 10, 2661 }, { 10, 2659 }, { 10, 2657 }, { 10, 2655 }, + { 10, 2653 }, { 10, 2651 }, { 10, 2649 }, { 10, 2647 }, { 10, 2645 }, + { 10, 2643 }, { 10, 2641 }, { 10, 2639 }, { 10, 2637 }, { 10, 2635 }, + { 10, 2633 }, { 10, 2631 }, { 10, 2629 }, { 10, 2627 }, { 10, 2625 }, + { 10, 2623 }, { 10, 2621 }, { 10, 2619 }, { 10, 2617 }, { 10, 2615 }, + { 10, 2613 }, { 10, 2611 }, { 10, 2609 }, { 10, 2607 }, { 10, 2605 }, + { 10, 2603 }, { 10, 2601 }, { 10, 2599 }, { 10, 2597 }, { 10, 2595 }, + { 10, 2593 }, { 10, 2591 }, { 10, 2589 }, { 10, 2587 }, { 10, 2585 }, + { 10, 2583 }, { 10, 2581 }, { 10, 2579 }, { 10, 2577 }, { 10, 2575 }, + { 10, 2573 }, { 10, 2571 }, { 10, 2569 }, { 10, 2567 }, { 10, 2565 }, + { 10, 2563 }, { 10, 2561 }, { 10, 2559 }, { 10, 2557 }, { 10, 2555 }, + { 10, 2553 }, { 10, 2551 }, { 10, 2549 }, { 10, 2547 }, { 10, 2545 }, + { 10, 2543 }, { 10, 2541 }, { 10, 2539 }, { 10, 2537 }, { 10, 2535 }, + { 10, 2533 }, { 10, 2531 }, { 10, 2529 }, { 10, 2527 }, { 10, 2525 }, + { 10, 2523 }, { 10, 2521 }, { 10, 2519 }, { 10, 2517 }, { 10, 2515 }, + { 10, 2513 }, { 10, 2511 }, { 10, 2509 }, { 10, 2507 }, { 10, 2505 }, + { 10, 2503 }, { 10, 2501 }, { 10, 2499 }, { 10, 2497 }, { 10, 2495 }, + { 10, 2493 }, { 10, 2491 }, { 10, 2489 }, { 10, 2487 }, { 10, 2485 }, + { 10, 2483 }, { 10, 2481 }, { 10, 2479 }, { 10, 2477 }, { 10, 2475 }, + { 10, 2473 }, { 10, 2471 }, { 10, 2469 }, { 10, 2467 }, { 10, 2465 }, + { 10, 2463 }, { 10, 2461 }, { 10, 2459 }, { 10, 2457 }, { 10, 2455 }, + { 10, 2453 }, { 10, 2451 }, { 10, 2449 }, { 10, 2447 }, { 10, 2445 }, + { 10, 2443 }, { 10, 2441 }, { 10, 2439 }, { 10, 2437 }, { 10, 2435 }, + { 10, 2433 }, { 10, 2431 }, { 10, 2429 }, { 10, 2427 }, { 10, 2425 }, + { 10, 2423 }, { 10, 2421 }, { 10, 2419 }, { 10, 2417 }, { 10, 2415 }, + { 10, 2413 }, { 10, 2411 }, { 10, 2409 }, { 10, 2407 }, { 10, 2405 }, + { 10, 2403 }, { 10, 2401 }, { 10, 2399 }, { 10, 2397 }, { 10, 2395 }, + { 10, 2393 }, { 10, 2391 }, { 10, 2389 }, { 10, 2387 }, { 10, 2385 }, + { 10, 2383 }, { 10, 2381 }, { 10, 2379 }, { 10, 2377 }, { 10, 2375 }, + { 10, 2373 }, { 10, 2371 }, { 10, 2369 }, { 10, 2367 }, { 10, 2365 }, + { 10, 2363 }, { 10, 2361 }, { 10, 2359 }, { 10, 2357 }, { 10, 2355 }, + { 10, 2353 }, { 10, 2351 }, { 10, 2349 }, { 10, 2347 }, { 10, 2345 }, + { 10, 2343 }, { 10, 2341 }, { 10, 2339 }, { 10, 2337 }, { 10, 2335 }, + { 10, 2333 }, { 10, 2331 }, { 10, 2329 }, { 10, 2327 }, { 10, 2325 }, + { 10, 2323 }, { 10, 2321 }, { 10, 2319 }, { 10, 2317 }, { 10, 2315 }, + { 10, 2313 }, { 10, 2311 }, { 10, 2309 }, { 10, 2307 }, { 10, 2305 }, + { 10, 2303 }, { 10, 2301 }, { 10, 2299 }, { 10, 2297 }, { 10, 2295 }, + { 10, 2293 }, { 10, 2291 }, { 10, 2289 }, { 10, 2287 }, { 10, 2285 }, + { 10, 2283 }, { 10, 2281 }, { 10, 2279 }, { 10, 2277 }, { 10, 2275 }, + { 10, 2273 }, { 10, 2271 }, { 10, 2269 }, { 10, 2267 }, { 10, 2265 }, + { 10, 2263 }, { 10, 2261 }, { 10, 2259 }, { 10, 2257 }, { 10, 2255 }, + { 10, 2253 }, { 10, 2251 }, { 10, 2249 }, { 10, 2247 }, { 10, 2245 }, + { 10, 2243 }, { 10, 2241 }, { 10, 2239 }, { 10, 2237 }, { 10, 2235 }, + { 10, 2233 }, { 10, 2231 }, { 10, 2229 }, { 10, 2227 }, { 10, 2225 }, + { 10, 2223 }, { 10, 2221 }, { 10, 2219 }, { 10, 2217 }, { 10, 2215 }, + { 10, 2213 }, { 10, 2211 }, { 10, 2209 }, { 10, 2207 }, { 10, 2205 }, + { 10, 2203 }, { 10, 2201 }, { 10, 2199 }, { 10, 2197 }, { 10, 2195 }, + { 10, 2193 }, { 10, 2191 }, { 10, 2189 }, { 10, 2187 }, { 10, 2185 }, + { 10, 2183 }, { 10, 2181 }, { 10, 2179 }, { 10, 2177 }, { 10, 2175 }, + { 10, 2173 }, { 10, 2171 }, { 10, 2169 }, { 10, 2167 }, { 10, 2165 }, + { 10, 2163 }, { 10, 2161 }, { 10, 2159 }, { 10, 2157 }, { 10, 2155 }, + { 10, 2153 }, { 10, 2151 }, { 10, 2149 }, { 10, 2147 }, { 10, 2145 }, + { 10, 2143 }, { 10, 2141 }, { 10, 2139 }, { 10, 2137 }, { 10, 2135 }, + { 10, 2133 }, { 10, 2131 }, { 10, 2129 }, { 10, 2127 }, { 10, 2125 }, + { 10, 2123 }, { 10, 2121 }, { 10, 2119 }, { 10, 2117 }, { 10, 2115 }, + { 10, 2113 }, { 10, 2111 }, { 10, 2109 }, { 10, 2107 }, { 10, 2105 }, + { 10, 2103 }, { 10, 2101 }, { 10, 2099 }, { 10, 2097 }, { 10, 2095 }, + { 10, 2093 }, { 10, 2091 }, { 10, 2089 }, { 10, 2087 }, { 10, 2085 }, + { 10, 2083 }, { 10, 2081 }, { 10, 2079 }, { 10, 2077 }, { 10, 2075 }, + { 10, 2073 }, { 10, 2071 }, { 10, 2069 }, { 10, 2067 }, { 10, 2065 }, + { 10, 2063 }, { 10, 2061 }, { 10, 2059 }, { 10, 2057 }, { 10, 2055 }, + { 10, 2053 }, { 10, 2051 }, { 10, 2049 }, { 10, 2047 }, { 10, 2045 }, + { 10, 2043 }, { 10, 2041 }, { 10, 2039 }, { 10, 2037 }, { 10, 2035 }, + { 10, 2033 }, { 10, 2031 }, { 10, 2029 }, { 10, 2027 }, { 10, 2025 }, + { 10, 2023 }, { 10, 2021 }, { 10, 2019 }, { 10, 2017 }, { 10, 2015 }, + { 10, 2013 }, { 10, 2011 }, { 10, 2009 }, { 10, 2007 }, { 10, 2005 }, + { 10, 2003 }, { 10, 2001 }, { 10, 1999 }, { 10, 1997 }, { 10, 1995 }, + { 10, 1993 }, { 10, 1991 }, { 10, 1989 }, { 10, 1987 }, { 10, 1985 }, + { 10, 1983 }, { 10, 1981 }, { 10, 1979 }, { 10, 1977 }, { 10, 1975 }, + { 10, 1973 }, { 10, 1971 }, { 10, 1969 }, { 10, 1967 }, { 10, 1965 }, + { 10, 1963 }, { 10, 1961 }, { 10, 1959 }, { 10, 1957 }, { 10, 1955 }, + { 10, 1953 }, { 10, 1951 }, { 10, 1949 }, { 10, 1947 }, { 10, 1945 }, + { 10, 1943 }, { 10, 1941 }, { 10, 1939 }, { 10, 1937 }, { 10, 1935 }, + { 10, 1933 }, { 10, 1931 }, { 10, 1929 }, { 10, 1927 }, { 10, 1925 }, + { 10, 1923 }, { 10, 1921 }, { 10, 1919 }, { 10, 1917 }, { 10, 1915 }, + { 10, 1913 }, { 10, 1911 }, { 10, 1909 }, { 10, 1907 }, { 10, 1905 }, + { 10, 1903 }, { 10, 1901 }, { 10, 1899 }, { 10, 1897 }, { 10, 1895 }, + { 10, 1893 }, { 10, 1891 }, { 10, 1889 }, { 10, 1887 }, { 10, 1885 }, + { 10, 1883 }, { 10, 1881 }, { 10, 1879 }, { 10, 1877 }, { 10, 1875 }, + { 10, 1873 }, { 10, 1871 }, { 10, 1869 }, { 10, 1867 }, { 10, 1865 }, + { 10, 1863 }, { 10, 1861 }, { 10, 1859 }, { 10, 1857 }, { 10, 1855 }, + { 10, 1853 }, { 10, 1851 }, { 10, 1849 }, { 10, 1847 }, { 10, 1845 }, + { 10, 1843 }, { 10, 1841 }, { 10, 1839 }, { 10, 1837 }, { 10, 1835 }, + { 10, 1833 }, { 10, 1831 }, { 10, 1829 }, { 10, 1827 }, { 10, 1825 }, + { 10, 1823 }, { 10, 1821 }, { 10, 1819 }, { 10, 1817 }, { 10, 1815 }, + { 10, 1813 }, { 10, 1811 }, { 10, 1809 }, { 10, 1807 }, { 10, 1805 }, + { 10, 1803 }, { 10, 1801 }, { 10, 1799 }, { 10, 1797 }, { 10, 1795 }, + { 10, 1793 }, { 10, 1791 }, { 10, 1789 }, { 10, 1787 }, { 10, 1785 }, + { 10, 1783 }, { 10, 1781 }, { 10, 1779 }, { 10, 1777 }, { 10, 1775 }, + { 10, 1773 }, { 10, 1771 }, { 10, 1769 }, { 10, 1767 }, { 10, 1765 }, + { 10, 1763 }, { 10, 1761 }, { 10, 1759 }, { 10, 1757 }, { 10, 1755 }, + { 10, 1753 }, { 10, 1751 }, { 10, 1749 }, { 10, 1747 }, { 10, 1745 }, + { 10, 1743 }, { 10, 1741 }, { 10, 1739 }, { 10, 1737 }, { 10, 1735 }, + { 10, 1733 }, { 10, 1731 }, { 10, 1729 }, { 10, 1727 }, { 10, 1725 }, + { 10, 1723 }, { 10, 1721 }, { 10, 1719 }, { 10, 1717 }, { 10, 1715 }, + { 10, 1713 }, { 10, 1711 }, { 10, 1709 }, { 10, 1707 }, { 10, 1705 }, + { 10, 1703 }, { 10, 1701 }, { 10, 1699 }, { 10, 1697 }, { 10, 1695 }, + { 10, 1693 }, { 10, 1691 }, { 10, 1689 }, { 10, 1687 }, { 10, 1685 }, + { 10, 1683 }, { 10, 1681 }, { 10, 1679 }, { 10, 1677 }, { 10, 1675 }, + { 10, 1673 }, { 10, 1671 }, { 10, 1669 }, { 10, 1667 }, { 10, 1665 }, + { 10, 1663 }, { 10, 1661 }, { 10, 1659 }, { 10, 1657 }, { 10, 1655 }, + { 10, 1653 }, { 10, 1651 }, { 10, 1649 }, { 10, 1647 }, { 10, 1645 }, + { 10, 1643 }, { 10, 1641 }, { 10, 1639 }, { 10, 1637 }, { 10, 1635 }, + { 10, 1633 }, { 10, 1631 }, { 10, 1629 }, { 10, 1627 }, { 10, 1625 }, + { 10, 1623 }, { 10, 1621 }, { 10, 1619 }, { 10, 1617 }, { 10, 1615 }, + { 10, 1613 }, { 10, 1611 }, { 10, 1609 }, { 10, 1607 }, { 10, 1605 }, + { 10, 1603 }, { 10, 1601 }, { 10, 1599 }, { 10, 1597 }, { 10, 1595 }, + { 10, 1593 }, { 10, 1591 }, { 10, 1589 }, { 10, 1587 }, { 10, 1585 }, + { 10, 1583 }, { 10, 1581 }, { 10, 1579 }, { 10, 1577 }, { 10, 1575 }, + { 10, 1573 }, { 10, 1571 }, { 10, 1569 }, { 10, 1567 }, { 10, 1565 }, + { 10, 1563 }, { 10, 1561 }, { 10, 1559 }, { 10, 1557 }, { 10, 1555 }, + { 10, 1553 }, { 10, 1551 }, { 10, 1549 }, { 10, 1547 }, { 10, 1545 }, + { 10, 1543 }, { 10, 1541 }, { 10, 1539 }, { 10, 1537 }, { 10, 1535 }, + { 10, 1533 }, { 10, 1531 }, { 10, 1529 }, { 10, 1527 }, { 10, 1525 }, + { 10, 1523 }, { 10, 1521 }, { 10, 1519 }, { 10, 1517 }, { 10, 1515 }, + { 10, 1513 }, { 10, 1511 }, { 10, 1509 }, { 10, 1507 }, { 10, 1505 }, + { 10, 1503 }, { 10, 1501 }, { 10, 1499 }, { 10, 1497 }, { 10, 1495 }, + { 10, 1493 }, { 10, 1491 }, { 10, 1489 }, { 10, 1487 }, { 10, 1485 }, + { 10, 1483 }, { 10, 1481 }, { 10, 1479 }, { 10, 1477 }, { 10, 1475 }, + { 10, 1473 }, { 10, 1471 }, { 10, 1469 }, { 10, 1467 }, { 10, 1465 }, + { 10, 1463 }, { 10, 1461 }, { 10, 1459 }, { 10, 1457 }, { 10, 1455 }, + { 10, 1453 }, { 10, 1451 }, { 10, 1449 }, { 10, 1447 }, { 10, 1445 }, + { 10, 1443 }, { 10, 1441 }, { 10, 1439 }, { 10, 1437 }, { 10, 1435 }, + { 10, 1433 }, { 10, 1431 }, { 10, 1429 }, { 10, 1427 }, { 10, 1425 }, + { 10, 1423 }, { 10, 1421 }, { 10, 1419 }, { 10, 1417 }, { 10, 1415 }, + { 10, 1413 }, { 10, 1411 }, { 10, 1409 }, { 10, 1407 }, { 10, 1405 }, + { 10, 1403 }, { 10, 1401 }, { 10, 1399 }, { 10, 1397 }, { 10, 1395 }, + { 10, 1393 }, { 10, 1391 }, { 10, 1389 }, { 10, 1387 }, { 10, 1385 }, + { 10, 1383 }, { 10, 1381 }, { 10, 1379 }, { 10, 1377 }, { 10, 1375 }, + { 10, 1373 }, { 10, 1371 }, { 10, 1369 }, { 10, 1367 }, { 10, 1365 }, + { 10, 1363 }, { 10, 1361 }, { 10, 1359 }, { 10, 1357 }, { 10, 1355 }, + { 10, 1353 }, { 10, 1351 }, { 10, 1349 }, { 10, 1347 }, { 10, 1345 }, + { 10, 1343 }, { 10, 1341 }, { 10, 1339 }, { 10, 1337 }, { 10, 1335 }, + { 10, 1333 }, { 10, 1331 }, { 10, 1329 }, { 10, 1327 }, { 10, 1325 }, + { 10, 1323 }, { 10, 1321 }, { 10, 1319 }, { 10, 1317 }, { 10, 1315 }, + { 10, 1313 }, { 10, 1311 }, { 10, 1309 }, { 10, 1307 }, { 10, 1305 }, + { 10, 1303 }, { 10, 1301 }, { 10, 1299 }, { 10, 1297 }, { 10, 1295 }, + { 10, 1293 }, { 10, 1291 }, { 10, 1289 }, { 10, 1287 }, { 10, 1285 }, + { 10, 1283 }, { 10, 1281 }, { 10, 1279 }, { 10, 1277 }, { 10, 1275 }, + { 10, 1273 }, { 10, 1271 }, { 10, 1269 }, { 10, 1267 }, { 10, 1265 }, + { 10, 1263 }, { 10, 1261 }, { 10, 1259 }, { 10, 1257 }, { 10, 1255 }, + { 10, 1253 }, { 10, 1251 }, { 10, 1249 }, { 10, 1247 }, { 10, 1245 }, + { 10, 1243 }, { 10, 1241 }, { 10, 1239 }, { 10, 1237 }, { 10, 1235 }, + { 10, 1233 }, { 10, 1231 }, { 10, 1229 }, { 10, 1227 }, { 10, 1225 }, + { 10, 1223 }, { 10, 1221 }, { 10, 1219 }, { 10, 1217 }, { 10, 1215 }, + { 10, 1213 }, { 10, 1211 }, { 10, 1209 }, { 10, 1207 }, { 10, 1205 }, + { 10, 1203 }, { 10, 1201 }, { 10, 1199 }, { 10, 1197 }, { 10, 1195 }, + { 10, 1193 }, { 10, 1191 }, { 10, 1189 }, { 10, 1187 }, { 10, 1185 }, + { 10, 1183 }, { 10, 1181 }, { 10, 1179 }, { 10, 1177 }, { 10, 1175 }, + { 10, 1173 }, { 10, 1171 }, { 10, 1169 }, { 10, 1167 }, { 10, 1165 }, + { 10, 1163 }, { 10, 1161 }, { 10, 1159 }, { 10, 1157 }, { 10, 1155 }, + { 10, 1153 }, { 10, 1151 }, { 10, 1149 }, { 10, 1147 }, { 10, 1145 }, + { 10, 1143 }, { 10, 1141 }, { 10, 1139 }, { 10, 1137 }, { 10, 1135 }, + { 10, 1133 }, { 10, 1131 }, { 10, 1129 }, { 10, 1127 }, { 10, 1125 }, + { 10, 1123 }, { 10, 1121 }, { 10, 1119 }, { 10, 1117 }, { 10, 1115 }, + { 10, 1113 }, { 10, 1111 }, { 10, 1109 }, { 10, 1107 }, { 10, 1105 }, + { 10, 1103 }, { 10, 1101 }, { 10, 1099 }, { 10, 1097 }, { 10, 1095 }, + { 10, 1093 }, { 10, 1091 }, { 10, 1089 }, { 10, 1087 }, { 10, 1085 }, + { 10, 1083 }, { 10, 1081 }, { 10, 1079 }, { 10, 1077 }, { 10, 1075 }, + { 10, 1073 }, { 10, 1071 }, { 10, 1069 }, { 10, 1067 }, { 10, 1065 }, + { 10, 1063 }, { 10, 1061 }, { 10, 1059 }, { 10, 1057 }, { 10, 1055 }, + { 10, 1053 }, { 10, 1051 }, { 10, 1049 }, { 10, 1047 }, { 10, 1045 }, + { 10, 1043 }, { 10, 1041 }, { 10, 1039 }, { 10, 1037 }, { 10, 1035 }, + { 10, 1033 }, { 10, 1031 }, { 10, 1029 }, { 10, 1027 }, { 10, 1025 }, + { 10, 1023 }, { 10, 1021 }, { 10, 1019 }, { 10, 1017 }, { 10, 1015 }, + { 10, 1013 }, { 10, 1011 }, { 10, 1009 }, { 10, 1007 }, { 10, 1005 }, + { 10, 1003 }, { 10, 1001 }, { 10, 999 }, { 10, 997 }, { 10, 995 }, + { 10, 993 }, { 10, 991 }, { 10, 989 }, { 10, 987 }, { 10, 985 }, + { 10, 983 }, { 10, 981 }, { 10, 979 }, { 10, 977 }, { 10, 975 }, + { 10, 973 }, { 10, 971 }, { 10, 969 }, { 10, 967 }, { 10, 965 }, + { 10, 963 }, { 10, 961 }, { 10, 959 }, { 10, 957 }, { 10, 955 }, + { 10, 953 }, { 10, 951 }, { 10, 949 }, { 10, 947 }, { 10, 945 }, + { 10, 943 }, { 10, 941 }, { 10, 939 }, { 10, 937 }, { 10, 935 }, + { 10, 933 }, { 10, 931 }, { 10, 929 }, { 10, 927 }, { 10, 925 }, + { 10, 923 }, { 10, 921 }, { 10, 919 }, { 10, 917 }, { 10, 915 }, + { 10, 913 }, { 10, 911 }, { 10, 909 }, { 10, 907 }, { 10, 905 }, + { 10, 903 }, { 10, 901 }, { 10, 899 }, { 10, 897 }, { 10, 895 }, + { 10, 893 }, { 10, 891 }, { 10, 889 }, { 10, 887 }, { 10, 885 }, + { 10, 883 }, { 10, 881 }, { 10, 879 }, { 10, 877 }, { 10, 875 }, + { 10, 873 }, { 10, 871 }, { 10, 869 }, { 10, 867 }, { 10, 865 }, + { 10, 863 }, { 10, 861 }, { 10, 859 }, { 10, 857 }, { 10, 855 }, + { 10, 853 }, { 10, 851 }, { 10, 849 }, { 10, 847 }, { 10, 845 }, + { 10, 843 }, { 10, 841 }, { 10, 839 }, { 10, 837 }, { 10, 835 }, + { 10, 833 }, { 10, 831 }, { 10, 829 }, { 10, 827 }, { 10, 825 }, + { 10, 823 }, { 10, 821 }, { 10, 819 }, { 10, 817 }, { 10, 815 }, + { 10, 813 }, { 10, 811 }, { 10, 809 }, { 10, 807 }, { 10, 805 }, + { 10, 803 }, { 10, 801 }, { 10, 799 }, { 10, 797 }, { 10, 795 }, + { 10, 793 }, { 10, 791 }, { 10, 789 }, { 10, 787 }, { 10, 785 }, + { 10, 783 }, { 10, 781 }, { 10, 779 }, { 10, 777 }, { 10, 775 }, + { 10, 773 }, { 10, 771 }, { 10, 769 }, { 10, 767 }, { 10, 765 }, + { 10, 763 }, { 10, 761 }, { 10, 759 }, { 10, 757 }, { 10, 755 }, + { 10, 753 }, { 10, 751 }, { 10, 749 }, { 10, 747 }, { 10, 745 }, + { 10, 743 }, { 10, 741 }, { 10, 739 }, { 10, 737 }, { 10, 735 }, + { 10, 733 }, { 10, 731 }, { 10, 729 }, { 10, 727 }, { 10, 725 }, + { 10, 723 }, { 10, 721 }, { 10, 719 }, { 10, 717 }, { 10, 715 }, + { 10, 713 }, { 10, 711 }, { 10, 709 }, { 10, 707 }, { 10, 705 }, + { 10, 703 }, { 10, 701 }, { 10, 699 }, { 10, 697 }, { 10, 695 }, + { 10, 693 }, { 10, 691 }, { 10, 689 }, { 10, 687 }, { 10, 685 }, + { 10, 683 }, { 10, 681 }, { 10, 679 }, { 10, 677 }, { 10, 675 }, + { 10, 673 }, { 10, 671 }, { 10, 669 }, { 10, 667 }, { 10, 665 }, + { 10, 663 }, { 10, 661 }, { 10, 659 }, { 10, 657 }, { 10, 655 }, + { 10, 653 }, { 10, 651 }, { 10, 649 }, { 10, 647 }, { 10, 645 }, + { 10, 643 }, { 10, 641 }, { 10, 639 }, { 10, 637 }, { 10, 635 }, + { 10, 633 }, { 10, 631 }, { 10, 629 }, { 10, 627 }, { 10, 625 }, + { 10, 623 }, { 10, 621 }, { 10, 619 }, { 10, 617 }, { 10, 615 }, + { 10, 613 }, { 10, 611 }, { 10, 609 }, { 10, 607 }, { 10, 605 }, + { 10, 603 }, { 10, 601 }, { 10, 599 }, { 10, 597 }, { 10, 595 }, + { 10, 593 }, { 10, 591 }, { 10, 589 }, { 10, 587 }, { 10, 585 }, + { 10, 583 }, { 10, 581 }, { 10, 579 }, { 10, 577 }, { 10, 575 }, + { 10, 573 }, { 10, 571 }, { 10, 569 }, { 10, 567 }, { 10, 565 }, + { 10, 563 }, { 10, 561 }, { 10, 559 }, { 10, 557 }, { 10, 555 }, + { 10, 553 }, { 10, 551 }, { 10, 549 }, { 10, 547 }, { 10, 545 }, + { 10, 543 }, { 10, 541 }, { 10, 539 }, { 10, 537 }, { 10, 535 }, + { 10, 533 }, { 10, 531 }, { 10, 529 }, { 10, 527 }, { 10, 525 }, + { 10, 523 }, { 10, 521 }, { 10, 519 }, { 10, 517 }, { 10, 515 }, + { 10, 513 }, { 10, 511 }, { 10, 509 }, { 10, 507 }, { 10, 505 }, + { 10, 503 }, { 10, 501 }, { 10, 499 }, { 10, 497 }, { 10, 495 }, + { 10, 493 }, { 10, 491 }, { 10, 489 }, { 10, 487 }, { 10, 485 }, + { 10, 483 }, { 10, 481 }, { 10, 479 }, { 10, 477 }, { 10, 475 }, + { 10, 473 }, { 10, 471 }, { 10, 469 }, { 10, 467 }, { 10, 465 }, + { 10, 463 }, { 10, 461 }, { 10, 459 }, { 10, 457 }, { 10, 455 }, + { 10, 453 }, { 10, 451 }, { 10, 449 }, { 10, 447 }, { 10, 445 }, + { 10, 443 }, { 10, 441 }, { 10, 439 }, { 10, 437 }, { 10, 435 }, + { 10, 433 }, { 10, 431 }, { 10, 429 }, { 10, 427 }, { 10, 425 }, + { 10, 423 }, { 10, 421 }, { 10, 419 }, { 10, 417 }, { 10, 415 }, + { 10, 413 }, { 10, 411 }, { 10, 409 }, { 10, 407 }, { 10, 405 }, + { 10, 403 }, { 10, 401 }, { 10, 399 }, { 10, 397 }, { 10, 395 }, + { 10, 393 }, { 10, 391 }, { 10, 389 }, { 10, 387 }, { 10, 385 }, + { 10, 383 }, { 10, 381 }, { 10, 379 }, { 10, 377 }, { 10, 375 }, + { 10, 373 }, { 10, 371 }, { 10, 369 }, { 10, 367 }, { 10, 365 }, + { 10, 363 }, { 10, 361 }, { 10, 359 }, { 10, 357 }, { 10, 355 }, + { 10, 353 }, { 10, 351 }, { 10, 349 }, { 10, 347 }, { 10, 345 }, + { 10, 343 }, { 10, 341 }, { 10, 339 }, { 10, 337 }, { 10, 335 }, + { 10, 333 }, { 10, 331 }, { 10, 329 }, { 10, 327 }, { 10, 325 }, + { 10, 323 }, { 10, 321 }, { 10, 319 }, { 10, 317 }, { 10, 315 }, + { 10, 313 }, { 10, 311 }, { 10, 309 }, { 10, 307 }, { 10, 305 }, + { 10, 303 }, { 10, 301 }, { 10, 299 }, { 10, 297 }, { 10, 295 }, + { 10, 293 }, { 10, 291 }, { 10, 289 }, { 10, 287 }, { 10, 285 }, + { 10, 283 }, { 10, 281 }, { 10, 279 }, { 10, 277 }, { 10, 275 }, + { 10, 273 }, { 10, 271 }, { 10, 269 }, { 10, 267 }, { 10, 265 }, + { 10, 263 }, { 10, 261 }, { 10, 259 }, { 10, 257 }, { 10, 255 }, + { 10, 253 }, { 10, 251 }, { 10, 249 }, { 10, 247 }, { 10, 245 }, + { 10, 243 }, { 10, 241 }, { 10, 239 }, { 10, 237 }, { 10, 235 }, + { 10, 233 }, { 10, 231 }, { 10, 229 }, { 10, 227 }, { 10, 225 }, + { 10, 223 }, { 10, 221 }, { 10, 219 }, { 10, 217 }, { 10, 215 }, + { 10, 213 }, { 10, 211 }, { 10, 209 }, { 10, 207 }, { 10, 205 }, + { 10, 203 }, { 10, 201 }, { 10, 199 }, { 10, 197 }, { 10, 195 }, + { 10, 193 }, { 10, 191 }, { 10, 189 }, { 10, 187 }, { 10, 185 }, + { 10, 183 }, { 10, 181 }, { 10, 179 }, { 10, 177 }, { 10, 175 }, + { 10, 173 }, { 10, 171 }, { 10, 169 }, { 10, 167 }, { 10, 165 }, + { 10, 163 }, { 10, 161 }, { 10, 159 }, { 10, 157 }, { 10, 155 }, + { 10, 153 }, { 10, 151 }, { 10, 149 }, { 10, 147 }, { 10, 145 }, + { 10, 143 }, { 10, 141 }, { 10, 139 }, { 10, 137 }, { 10, 135 }, + { 10, 133 }, { 10, 131 }, { 10, 129 }, { 10, 127 }, { 10, 125 }, + { 10, 123 }, { 10, 121 }, { 10, 119 }, { 10, 117 }, { 10, 115 }, + { 10, 113 }, { 10, 111 }, { 10, 109 }, { 10, 107 }, { 10, 105 }, + { 10, 103 }, { 10, 101 }, { 10, 99 }, { 10, 97 }, { 10, 95 }, + { 10, 93 }, { 10, 91 }, { 10, 89 }, { 10, 87 }, { 10, 85 }, + { 10, 83 }, { 10, 81 }, { 10, 79 }, { 10, 77 }, { 10, 75 }, + { 10, 73 }, { 10, 71 }, { 10, 69 }, { 10, 67 }, { 10, 65 }, + { 10, 63 }, { 10, 61 }, { 10, 59 }, { 10, 57 }, { 10, 55 }, + { 10, 53 }, { 10, 51 }, { 10, 49 }, { 10, 47 }, { 10, 45 }, + { 10, 43 }, { 10, 41 }, { 10, 39 }, { 10, 37 }, { 10, 35 }, + { 10, 33 }, { 10, 31 }, { 10, 29 }, { 10, 27 }, { 10, 25 }, + { 10, 23 }, { 10, 21 }, { 10, 19 }, { 10, 17 }, { 10, 15 }, + { 10, 13 }, { 10, 11 }, { 10, 9 }, { 10, 7 }, { 10, 5 }, + { 10, 3 }, { 10, 1 }, { 9, 63 }, { 9, 61 }, { 9, 59 }, + { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 }, { 9, 49 }, + { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, + { 9, 37 }, { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, + { 9, 27 }, { 9, 25 }, { 9, 23 }, { 9, 21 }, { 9, 19 }, + { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 }, + { 9, 7 }, { 9, 5 }, { 9, 3 }, { 9, 1 }, { 8, 31 }, + { 8, 29 }, { 8, 27 }, { 8, 25 }, { 8, 23 }, { 8, 21 }, + { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 }, { 8, 11 }, + { 8, 9 }, { 8, 7 }, { 8, 5 }, { 8, 3 }, { 8, 1 }, + { 7, 15 }, { 7, 13 }, { 7, 11 }, { 7, 9 }, { 7, 7 }, + { 7, 5 }, { 7, 3 }, { 7, 1 }, { 6, 7 }, { 6, 5 }, + { 6, 3 }, { 6, 1 }, { 5, 3 }, { 5, 1 }, { 4, 1 }, + { 3, 1 }, { 2, 1 }, { 1, 1 }, { 0, 0 }, { 1, 0 }, + { 2, 0 }, { 3, 0 }, { 4, 0 }, { 5, 0 }, { 5, 2 }, + { 6, 0 }, { 6, 2 }, { 6, 4 }, { 6, 6 }, { 7, 0 }, + { 7, 2 }, { 7, 4 }, { 7, 6 }, { 7, 8 }, { 7, 10 }, + { 7, 12 }, { 7, 14 }, { 8, 0 }, { 8, 2 }, { 8, 4 }, + { 8, 6 }, { 8, 8 }, { 8, 10 }, { 8, 12 }, { 8, 14 }, + { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 }, + { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 }, { 9, 2 }, + { 9, 4 }, { 9, 6 }, { 9, 8 }, { 9, 10 }, { 9, 12 }, + { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 }, { 9, 22 }, + { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, + { 9, 34 }, { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, + { 9, 44 }, { 9, 46 }, { 9, 48 }, { 9, 50 }, { 9, 52 }, + { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }, + { 10, 0 }, { 10, 2 }, { 10, 4 }, { 10, 6 }, { 10, 8 }, + { 10, 10 }, { 10, 12 }, { 10, 14 }, { 10, 16 }, { 10, 18 }, + { 10, 20 }, { 10, 22 }, { 10, 24 }, { 10, 26 }, { 10, 28 }, + { 10, 30 }, { 10, 32 }, { 10, 34 }, { 10, 36 }, { 10, 38 }, + { 10, 40 }, { 10, 42 }, { 10, 44 }, { 10, 46 }, { 10, 48 }, + { 10, 50 }, { 10, 52 }, { 10, 54 }, { 10, 56 }, { 10, 58 }, + { 10, 60 }, { 10, 62 }, { 10, 64 }, { 10, 66 }, { 10, 68 }, + { 10, 70 }, { 10, 72 }, { 10, 74 }, { 10, 76 }, { 10, 78 }, + { 10, 80 }, { 10, 82 }, { 10, 84 }, { 10, 86 }, { 10, 88 }, + { 10, 90 }, { 10, 92 }, { 10, 94 }, { 10, 96 }, { 10, 98 }, + { 10, 100 }, { 10, 102 }, { 10, 104 }, { 10, 106 }, { 10, 108 }, + { 10, 110 }, { 10, 112 }, { 10, 114 }, { 10, 116 }, { 10, 118 }, + { 10, 120 }, { 10, 122 }, { 10, 124 }, { 10, 126 }, { 10, 128 }, + { 10, 130 }, { 10, 132 }, { 10, 134 }, { 10, 136 }, { 10, 138 }, + { 10, 140 }, { 10, 142 }, { 10, 144 }, { 10, 146 }, { 10, 148 }, + { 10, 150 }, { 10, 152 }, { 10, 154 }, { 10, 156 }, { 10, 158 }, + { 10, 160 }, { 10, 162 }, { 10, 164 }, { 10, 166 }, { 10, 168 }, + { 10, 170 }, { 10, 172 }, { 10, 174 }, { 10, 176 }, { 10, 178 }, + { 10, 180 }, { 10, 182 }, { 10, 184 }, { 10, 186 }, { 10, 188 }, + { 10, 190 }, { 10, 192 }, { 10, 194 }, { 10, 196 }, { 10, 198 }, + { 10, 200 }, { 10, 202 }, { 10, 204 }, { 10, 206 }, { 10, 208 }, + { 10, 210 }, { 10, 212 }, { 10, 214 }, { 10, 216 }, { 10, 218 }, + { 10, 220 }, { 10, 222 }, { 10, 224 }, { 10, 226 }, { 10, 228 }, + { 10, 230 }, { 10, 232 }, { 10, 234 }, { 10, 236 }, { 10, 238 }, + { 10, 240 }, { 10, 242 }, { 10, 244 }, { 10, 246 }, { 10, 248 }, + { 10, 250 }, { 10, 252 }, { 10, 254 }, { 10, 256 }, { 10, 258 }, + { 10, 260 }, { 10, 262 }, { 10, 264 }, { 10, 266 }, { 10, 268 }, + { 10, 270 }, { 10, 272 }, { 10, 274 }, { 10, 276 }, { 10, 278 }, + { 10, 280 }, { 10, 282 }, { 10, 284 }, { 10, 286 }, { 10, 288 }, + { 10, 290 }, { 10, 292 }, { 10, 294 }, { 10, 296 }, { 10, 298 }, + { 10, 300 }, { 10, 302 }, { 10, 304 }, { 10, 306 }, { 10, 308 }, + { 10, 310 }, { 10, 312 }, { 10, 314 }, { 10, 316 }, { 10, 318 }, + { 10, 320 }, { 10, 322 }, { 10, 324 }, { 10, 326 }, { 10, 328 }, + { 10, 330 }, { 10, 332 }, { 10, 334 }, { 10, 336 }, { 10, 338 }, + { 10, 340 }, { 10, 342 }, { 10, 344 }, { 10, 346 }, { 10, 348 }, + { 10, 350 }, { 10, 352 }, { 10, 354 }, { 10, 356 }, { 10, 358 }, + { 10, 360 }, { 10, 362 }, { 10, 364 }, { 10, 366 }, { 10, 368 }, + { 10, 370 }, { 10, 372 }, { 10, 374 }, { 10, 376 }, { 10, 378 }, + { 10, 380 }, { 10, 382 }, { 10, 384 }, { 10, 386 }, { 10, 388 }, + { 10, 390 }, { 10, 392 }, { 10, 394 }, { 10, 396 }, { 10, 398 }, + { 10, 400 }, { 10, 402 }, { 10, 404 }, { 10, 406 }, { 10, 408 }, + { 10, 410 }, { 10, 412 }, { 10, 414 }, { 10, 416 }, { 10, 418 }, + { 10, 420 }, { 10, 422 }, { 10, 424 }, { 10, 426 }, { 10, 428 }, + { 10, 430 }, { 10, 432 }, { 10, 434 }, { 10, 436 }, { 10, 438 }, + { 10, 440 }, { 10, 442 }, { 10, 444 }, { 10, 446 }, { 10, 448 }, + { 10, 450 }, { 10, 452 }, { 10, 454 }, { 10, 456 }, { 10, 458 }, + { 10, 460 }, { 10, 462 }, { 10, 464 }, { 10, 466 }, { 10, 468 }, + { 10, 470 }, { 10, 472 }, { 10, 474 }, { 10, 476 }, { 10, 478 }, + { 10, 480 }, { 10, 482 }, { 10, 484 }, { 10, 486 }, { 10, 488 }, + { 10, 490 }, { 10, 492 }, { 10, 494 }, { 10, 496 }, { 10, 498 }, + { 10, 500 }, { 10, 502 }, { 10, 504 }, { 10, 506 }, { 10, 508 }, + { 10, 510 }, { 10, 512 }, { 10, 514 }, { 10, 516 }, { 10, 518 }, + { 10, 520 }, { 10, 522 }, { 10, 524 }, { 10, 526 }, { 10, 528 }, + { 10, 530 }, { 10, 532 }, { 10, 534 }, { 10, 536 }, { 10, 538 }, + { 10, 540 }, { 10, 542 }, { 10, 544 }, { 10, 546 }, { 10, 548 }, + { 10, 550 }, { 10, 552 }, { 10, 554 }, { 10, 556 }, { 10, 558 }, + { 10, 560 }, { 10, 562 }, { 10, 564 }, { 10, 566 }, { 10, 568 }, + { 10, 570 }, { 10, 572 }, { 10, 574 }, { 10, 576 }, { 10, 578 }, + { 10, 580 }, { 10, 582 }, { 10, 584 }, { 10, 586 }, { 10, 588 }, + { 10, 590 }, { 10, 592 }, { 10, 594 }, { 10, 596 }, { 10, 598 }, + { 10, 600 }, { 10, 602 }, { 10, 604 }, { 10, 606 }, { 10, 608 }, + { 10, 610 }, { 10, 612 }, { 10, 614 }, { 10, 616 }, { 10, 618 }, + { 10, 620 }, { 10, 622 }, { 10, 624 }, { 10, 626 }, { 10, 628 }, + { 10, 630 }, { 10, 632 }, { 10, 634 }, { 10, 636 }, { 10, 638 }, + { 10, 640 }, { 10, 642 }, { 10, 644 }, { 10, 646 }, { 10, 648 }, + { 10, 650 }, { 10, 652 }, { 10, 654 }, { 10, 656 }, { 10, 658 }, + { 10, 660 }, { 10, 662 }, { 10, 664 }, { 10, 666 }, { 10, 668 }, + { 10, 670 }, { 10, 672 }, { 10, 674 }, { 10, 676 }, { 10, 678 }, + { 10, 680 }, { 10, 682 }, { 10, 684 }, { 10, 686 }, { 10, 688 }, + { 10, 690 }, { 10, 692 }, { 10, 694 }, { 10, 696 }, { 10, 698 }, + { 10, 700 }, { 10, 702 }, { 10, 704 }, { 10, 706 }, { 10, 708 }, + { 10, 710 }, { 10, 712 }, { 10, 714 }, { 10, 716 }, { 10, 718 }, + { 10, 720 }, { 10, 722 }, { 10, 724 }, { 10, 726 }, { 10, 728 }, + { 10, 730 }, { 10, 732 }, { 10, 734 }, { 10, 736 }, { 10, 738 }, + { 10, 740 }, { 10, 742 }, { 10, 744 }, { 10, 746 }, { 10, 748 }, + { 10, 750 }, { 10, 752 }, { 10, 754 }, { 10, 756 }, { 10, 758 }, + { 10, 760 }, { 10, 762 }, { 10, 764 }, { 10, 766 }, { 10, 768 }, + { 10, 770 }, { 10, 772 }, { 10, 774 }, { 10, 776 }, { 10, 778 }, + { 10, 780 }, { 10, 782 }, { 10, 784 }, { 10, 786 }, { 10, 788 }, + { 10, 790 }, { 10, 792 }, { 10, 794 }, { 10, 796 }, { 10, 798 }, + { 10, 800 }, { 10, 802 }, { 10, 804 }, { 10, 806 }, { 10, 808 }, + { 10, 810 }, { 10, 812 }, { 10, 814 }, { 10, 816 }, { 10, 818 }, + { 10, 820 }, { 10, 822 }, { 10, 824 }, { 10, 826 }, { 10, 828 }, + { 10, 830 }, { 10, 832 }, { 10, 834 }, { 10, 836 }, { 10, 838 }, + { 10, 840 }, { 10, 842 }, { 10, 844 }, { 10, 846 }, { 10, 848 }, + { 10, 850 }, { 10, 852 }, { 10, 854 }, { 10, 856 }, { 10, 858 }, + { 10, 860 }, { 10, 862 }, { 10, 864 }, { 10, 866 }, { 10, 868 }, + { 10, 870 }, { 10, 872 }, { 10, 874 }, { 10, 876 }, { 10, 878 }, + { 10, 880 }, { 10, 882 }, { 10, 884 }, { 10, 886 }, { 10, 888 }, + { 10, 890 }, { 10, 892 }, { 10, 894 }, { 10, 896 }, { 10, 898 }, + { 10, 900 }, { 10, 902 }, { 10, 904 }, { 10, 906 }, { 10, 908 }, + { 10, 910 }, { 10, 912 }, { 10, 914 }, { 10, 916 }, { 10, 918 }, + { 10, 920 }, { 10, 922 }, { 10, 924 }, { 10, 926 }, { 10, 928 }, + { 10, 930 }, { 10, 932 }, { 10, 934 }, { 10, 936 }, { 10, 938 }, + { 10, 940 }, { 10, 942 }, { 10, 944 }, { 10, 946 }, { 10, 948 }, + { 10, 950 }, { 10, 952 }, { 10, 954 }, { 10, 956 }, { 10, 958 }, + { 10, 960 }, { 10, 962 }, { 10, 964 }, { 10, 966 }, { 10, 968 }, + { 10, 970 }, { 10, 972 }, { 10, 974 }, { 10, 976 }, { 10, 978 }, + { 10, 980 }, { 10, 982 }, { 10, 984 }, { 10, 986 }, { 10, 988 }, + { 10, 990 }, { 10, 992 }, { 10, 994 }, { 10, 996 }, { 10, 998 }, + { 10, 1000 }, { 10, 1002 }, { 10, 1004 }, { 10, 1006 }, { 10, 1008 }, + { 10, 1010 }, { 10, 1012 }, { 10, 1014 }, { 10, 1016 }, { 10, 1018 }, + { 10, 1020 }, { 10, 1022 }, { 10, 1024 }, { 10, 1026 }, { 10, 1028 }, + { 10, 1030 }, { 10, 1032 }, { 10, 1034 }, { 10, 1036 }, { 10, 1038 }, + { 10, 1040 }, { 10, 1042 }, { 10, 1044 }, { 10, 1046 }, { 10, 1048 }, + { 10, 1050 }, { 10, 1052 }, { 10, 1054 }, { 10, 1056 }, { 10, 1058 }, + { 10, 1060 }, { 10, 1062 }, { 10, 1064 }, { 10, 1066 }, { 10, 1068 }, + { 10, 1070 }, { 10, 1072 }, { 10, 1074 }, { 10, 1076 }, { 10, 1078 }, + { 10, 1080 }, { 10, 1082 }, { 10, 1084 }, { 10, 1086 }, { 10, 1088 }, + { 10, 1090 }, { 10, 1092 }, { 10, 1094 }, { 10, 1096 }, { 10, 1098 }, + { 10, 1100 }, { 10, 1102 }, { 10, 1104 }, { 10, 1106 }, { 10, 1108 }, + { 10, 1110 }, { 10, 1112 }, { 10, 1114 }, { 10, 1116 }, { 10, 1118 }, + { 10, 1120 }, { 10, 1122 }, { 10, 1124 }, { 10, 1126 }, { 10, 1128 }, + { 10, 1130 }, { 10, 1132 }, { 10, 1134 }, { 10, 1136 }, { 10, 1138 }, + { 10, 1140 }, { 10, 1142 }, { 10, 1144 }, { 10, 1146 }, { 10, 1148 }, + { 10, 1150 }, { 10, 1152 }, { 10, 1154 }, { 10, 1156 }, { 10, 1158 }, + { 10, 1160 }, { 10, 1162 }, { 10, 1164 }, { 10, 1166 }, { 10, 1168 }, + { 10, 1170 }, { 10, 1172 }, { 10, 1174 }, { 10, 1176 }, { 10, 1178 }, + { 10, 1180 }, { 10, 1182 }, { 10, 1184 }, { 10, 1186 }, { 10, 1188 }, + { 10, 1190 }, { 10, 1192 }, { 10, 1194 }, { 10, 1196 }, { 10, 1198 }, + { 10, 1200 }, { 10, 1202 }, { 10, 1204 }, { 10, 1206 }, { 10, 1208 }, + { 10, 1210 }, { 10, 1212 }, { 10, 1214 }, { 10, 1216 }, { 10, 1218 }, + { 10, 1220 }, { 10, 1222 }, { 10, 1224 }, { 10, 1226 }, { 10, 1228 }, + { 10, 1230 }, { 10, 1232 }, { 10, 1234 }, { 10, 1236 }, { 10, 1238 }, + { 10, 1240 }, { 10, 1242 }, { 10, 1244 }, { 10, 1246 }, { 10, 1248 }, + { 10, 1250 }, { 10, 1252 }, { 10, 1254 }, { 10, 1256 }, { 10, 1258 }, + { 10, 1260 }, { 10, 1262 }, { 10, 1264 }, { 10, 1266 }, { 10, 1268 }, + { 10, 1270 }, { 10, 1272 }, { 10, 1274 }, { 10, 1276 }, { 10, 1278 }, + { 10, 1280 }, { 10, 1282 }, { 10, 1284 }, { 10, 1286 }, { 10, 1288 }, + { 10, 1290 }, { 10, 1292 }, { 10, 1294 }, { 10, 1296 }, { 10, 1298 }, + { 10, 1300 }, { 10, 1302 }, { 10, 1304 }, { 10, 1306 }, { 10, 1308 }, + { 10, 1310 }, { 10, 1312 }, { 10, 1314 }, { 10, 1316 }, { 10, 1318 }, + { 10, 1320 }, { 10, 1322 }, { 10, 1324 }, { 10, 1326 }, { 10, 1328 }, + { 10, 1330 }, { 10, 1332 }, { 10, 1334 }, { 10, 1336 }, { 10, 1338 }, + { 10, 1340 }, { 10, 1342 }, { 10, 1344 }, { 10, 1346 }, { 10, 1348 }, + { 10, 1350 }, { 10, 1352 }, { 10, 1354 }, { 10, 1356 }, { 10, 1358 }, + { 10, 1360 }, { 10, 1362 }, { 10, 1364 }, { 10, 1366 }, { 10, 1368 }, + { 10, 1370 }, { 10, 1372 }, { 10, 1374 }, { 10, 1376 }, { 10, 1378 }, + { 10, 1380 }, { 10, 1382 }, { 10, 1384 }, { 10, 1386 }, { 10, 1388 }, + { 10, 1390 }, { 10, 1392 }, { 10, 1394 }, { 10, 1396 }, { 10, 1398 }, + { 10, 1400 }, { 10, 1402 }, { 10, 1404 }, { 10, 1406 }, { 10, 1408 }, + { 10, 1410 }, { 10, 1412 }, { 10, 1414 }, { 10, 1416 }, { 10, 1418 }, + { 10, 1420 }, { 10, 1422 }, { 10, 1424 }, { 10, 1426 }, { 10, 1428 }, + { 10, 1430 }, { 10, 1432 }, { 10, 1434 }, { 10, 1436 }, { 10, 1438 }, + { 10, 1440 }, { 10, 1442 }, { 10, 1444 }, { 10, 1446 }, { 10, 1448 }, + { 10, 1450 }, { 10, 1452 }, { 10, 1454 }, { 10, 1456 }, { 10, 1458 }, + { 10, 1460 }, { 10, 1462 }, { 10, 1464 }, { 10, 1466 }, { 10, 1468 }, + { 10, 1470 }, { 10, 1472 }, { 10, 1474 }, { 10, 1476 }, { 10, 1478 }, + { 10, 1480 }, { 10, 1482 }, { 10, 1484 }, { 10, 1486 }, { 10, 1488 }, + { 10, 1490 }, { 10, 1492 }, { 10, 1494 }, { 10, 1496 }, { 10, 1498 }, + { 10, 1500 }, { 10, 1502 }, { 10, 1504 }, { 10, 1506 }, { 10, 1508 }, + { 10, 1510 }, { 10, 1512 }, { 10, 1514 }, { 10, 1516 }, { 10, 1518 }, + { 10, 1520 }, { 10, 1522 }, { 10, 1524 }, { 10, 1526 }, { 10, 1528 }, + { 10, 1530 }, { 10, 1532 }, { 10, 1534 }, { 10, 1536 }, { 10, 1538 }, + { 10, 1540 }, { 10, 1542 }, { 10, 1544 }, { 10, 1546 }, { 10, 1548 }, + { 10, 1550 }, { 10, 1552 }, { 10, 1554 }, { 10, 1556 }, { 10, 1558 }, + { 10, 1560 }, { 10, 1562 }, { 10, 1564 }, { 10, 1566 }, { 10, 1568 }, + { 10, 1570 }, { 10, 1572 }, { 10, 1574 }, { 10, 1576 }, { 10, 1578 }, + { 10, 1580 }, { 10, 1582 }, { 10, 1584 }, { 10, 1586 }, { 10, 1588 }, + { 10, 1590 }, { 10, 1592 }, { 10, 1594 }, { 10, 1596 }, { 10, 1598 }, + { 10, 1600 }, { 10, 1602 }, { 10, 1604 }, { 10, 1606 }, { 10, 1608 }, + { 10, 1610 }, { 10, 1612 }, { 10, 1614 }, { 10, 1616 }, { 10, 1618 }, + { 10, 1620 }, { 10, 1622 }, { 10, 1624 }, { 10, 1626 }, { 10, 1628 }, + { 10, 1630 }, { 10, 1632 }, { 10, 1634 }, { 10, 1636 }, { 10, 1638 }, + { 10, 1640 }, { 10, 1642 }, { 10, 1644 }, { 10, 1646 }, { 10, 1648 }, + { 10, 1650 }, { 10, 1652 }, { 10, 1654 }, { 10, 1656 }, { 10, 1658 }, + { 10, 1660 }, { 10, 1662 }, { 10, 1664 }, { 10, 1666 }, { 10, 1668 }, + { 10, 1670 }, { 10, 1672 }, { 10, 1674 }, { 10, 1676 }, { 10, 1678 }, + { 10, 1680 }, { 10, 1682 }, { 10, 1684 }, { 10, 1686 }, { 10, 1688 }, + { 10, 1690 }, { 10, 1692 }, { 10, 1694 }, { 10, 1696 }, { 10, 1698 }, + { 10, 1700 }, { 10, 1702 }, { 10, 1704 }, { 10, 1706 }, { 10, 1708 }, + { 10, 1710 }, { 10, 1712 }, { 10, 1714 }, { 10, 1716 }, { 10, 1718 }, + { 10, 1720 }, { 10, 1722 }, { 10, 1724 }, { 10, 1726 }, { 10, 1728 }, + { 10, 1730 }, { 10, 1732 }, { 10, 1734 }, { 10, 1736 }, { 10, 1738 }, + { 10, 1740 }, { 10, 1742 }, { 10, 1744 }, { 10, 1746 }, { 10, 1748 }, + { 10, 1750 }, { 10, 1752 }, { 10, 1754 }, { 10, 1756 }, { 10, 1758 }, + { 10, 1760 }, { 10, 1762 }, { 10, 1764 }, { 10, 1766 }, { 10, 1768 }, + { 10, 1770 }, { 10, 1772 }, { 10, 1774 }, { 10, 1776 }, { 10, 1778 }, + { 10, 1780 }, { 10, 1782 }, { 10, 1784 }, { 10, 1786 }, { 10, 1788 }, + { 10, 1790 }, { 10, 1792 }, { 10, 1794 }, { 10, 1796 }, { 10, 1798 }, + { 10, 1800 }, { 10, 1802 }, { 10, 1804 }, { 10, 1806 }, { 10, 1808 }, + { 10, 1810 }, { 10, 1812 }, { 10, 1814 }, { 10, 1816 }, { 10, 1818 }, + { 10, 1820 }, { 10, 1822 }, { 10, 1824 }, { 10, 1826 }, { 10, 1828 }, + { 10, 1830 }, { 10, 1832 }, { 10, 1834 }, { 10, 1836 }, { 10, 1838 }, + { 10, 1840 }, { 10, 1842 }, { 10, 1844 }, { 10, 1846 }, { 10, 1848 }, + { 10, 1850 }, { 10, 1852 }, { 10, 1854 }, { 10, 1856 }, { 10, 1858 }, + { 10, 1860 }, { 10, 1862 }, { 10, 1864 }, { 10, 1866 }, { 10, 1868 }, + { 10, 1870 }, { 10, 1872 }, { 10, 1874 }, { 10, 1876 }, { 10, 1878 }, + { 10, 1880 }, { 10, 1882 }, { 10, 1884 }, { 10, 1886 }, { 10, 1888 }, + { 10, 1890 }, { 10, 1892 }, { 10, 1894 }, { 10, 1896 }, { 10, 1898 }, + { 10, 1900 }, { 10, 1902 }, { 10, 1904 }, { 10, 1906 }, { 10, 1908 }, + { 10, 1910 }, { 10, 1912 }, { 10, 1914 }, { 10, 1916 }, { 10, 1918 }, + { 10, 1920 }, { 10, 1922 }, { 10, 1924 }, { 10, 1926 }, { 10, 1928 }, + { 10, 1930 }, { 10, 1932 }, { 10, 1934 }, { 10, 1936 }, { 10, 1938 }, + { 10, 1940 }, { 10, 1942 }, { 10, 1944 }, { 10, 1946 }, { 10, 1948 }, + { 10, 1950 }, { 10, 1952 }, { 10, 1954 }, { 10, 1956 }, { 10, 1958 }, + { 10, 1960 }, { 10, 1962 }, { 10, 1964 }, { 10, 1966 }, { 10, 1968 }, + { 10, 1970 }, { 10, 1972 }, { 10, 1974 }, { 10, 1976 }, { 10, 1978 }, + { 10, 1980 }, { 10, 1982 }, { 10, 1984 }, { 10, 1986 }, { 10, 1988 }, + { 10, 1990 }, { 10, 1992 }, { 10, 1994 }, { 10, 1996 }, { 10, 1998 }, + { 10, 2000 }, { 10, 2002 }, { 10, 2004 }, { 10, 2006 }, { 10, 2008 }, + { 10, 2010 }, { 10, 2012 }, { 10, 2014 }, { 10, 2016 }, { 10, 2018 }, + { 10, 2020 }, { 10, 2022 }, { 10, 2024 }, { 10, 2026 }, { 10, 2028 }, + { 10, 2030 }, { 10, 2032 }, { 10, 2034 }, { 10, 2036 }, { 10, 2038 }, + { 10, 2040 }, { 10, 2042 }, { 10, 2044 }, { 10, 2046 }, { 10, 2048 }, + { 10, 2050 }, { 10, 2052 }, { 10, 2054 }, { 10, 2056 }, { 10, 2058 }, + { 10, 2060 }, { 10, 2062 }, { 10, 2064 }, { 10, 2066 }, { 10, 2068 }, + { 10, 2070 }, { 10, 2072 }, { 10, 2074 }, { 10, 2076 }, { 10, 2078 }, + { 10, 2080 }, { 10, 2082 }, { 10, 2084 }, { 10, 2086 }, { 10, 2088 }, + { 10, 2090 }, { 10, 2092 }, { 10, 2094 }, { 10, 2096 }, { 10, 2098 }, + { 10, 2100 }, { 10, 2102 }, { 10, 2104 }, { 10, 2106 }, { 10, 2108 }, + { 10, 2110 }, { 10, 2112 }, { 10, 2114 }, { 10, 2116 }, { 10, 2118 }, + { 10, 2120 }, { 10, 2122 }, { 10, 2124 }, { 10, 2126 }, { 10, 2128 }, + { 10, 2130 }, { 10, 2132 }, { 10, 2134 }, { 10, 2136 }, { 10, 2138 }, + { 10, 2140 }, { 10, 2142 }, { 10, 2144 }, { 10, 2146 }, { 10, 2148 }, + { 10, 2150 }, { 10, 2152 }, { 10, 2154 }, { 10, 2156 }, { 10, 2158 }, + { 10, 2160 }, { 10, 2162 }, { 10, 2164 }, { 10, 2166 }, { 10, 2168 }, + { 10, 2170 }, { 10, 2172 }, { 10, 2174 }, { 10, 2176 }, { 10, 2178 }, + { 10, 2180 }, { 10, 2182 }, { 10, 2184 }, { 10, 2186 }, { 10, 2188 }, + { 10, 2190 }, { 10, 2192 }, { 10, 2194 }, { 10, 2196 }, { 10, 2198 }, + { 10, 2200 }, { 10, 2202 }, { 10, 2204 }, { 10, 2206 }, { 10, 2208 }, + { 10, 2210 }, { 10, 2212 }, { 10, 2214 }, { 10, 2216 }, { 10, 2218 }, + { 10, 2220 }, { 10, 2222 }, { 10, 2224 }, { 10, 2226 }, { 10, 2228 }, + { 10, 2230 }, { 10, 2232 }, { 10, 2234 }, { 10, 2236 }, { 10, 2238 }, + { 10, 2240 }, { 10, 2242 }, { 10, 2244 }, { 10, 2246 }, { 10, 2248 }, + { 10, 2250 }, { 10, 2252 }, { 10, 2254 }, { 10, 2256 }, { 10, 2258 }, + { 10, 2260 }, { 10, 2262 }, { 10, 2264 }, { 10, 2266 }, { 10, 2268 }, + { 10, 2270 }, { 10, 2272 }, { 10, 2274 }, { 10, 2276 }, { 10, 2278 }, + { 10, 2280 }, { 10, 2282 }, { 10, 2284 }, { 10, 2286 }, { 10, 2288 }, + { 10, 2290 }, { 10, 2292 }, { 10, 2294 }, { 10, 2296 }, { 10, 2298 }, + { 10, 2300 }, { 10, 2302 }, { 10, 2304 }, { 10, 2306 }, { 10, 2308 }, + { 10, 2310 }, { 10, 2312 }, { 10, 2314 }, { 10, 2316 }, { 10, 2318 }, + { 10, 2320 }, { 10, 2322 }, { 10, 2324 }, { 10, 2326 }, { 10, 2328 }, + { 10, 2330 }, { 10, 2332 }, { 10, 2334 }, { 10, 2336 }, { 10, 2338 }, + { 10, 2340 }, { 10, 2342 }, { 10, 2344 }, { 10, 2346 }, { 10, 2348 }, + { 10, 2350 }, { 10, 2352 }, { 10, 2354 }, { 10, 2356 }, { 10, 2358 }, + { 10, 2360 }, { 10, 2362 }, { 10, 2364 }, { 10, 2366 }, { 10, 2368 }, + { 10, 2370 }, { 10, 2372 }, { 10, 2374 }, { 10, 2376 }, { 10, 2378 }, + { 10, 2380 }, { 10, 2382 }, { 10, 2384 }, { 10, 2386 }, { 10, 2388 }, + { 10, 2390 }, { 10, 2392 }, { 10, 2394 }, { 10, 2396 }, { 10, 2398 }, + { 10, 2400 }, { 10, 2402 }, { 10, 2404 }, { 10, 2406 }, { 10, 2408 }, + { 10, 2410 }, { 10, 2412 }, { 10, 2414 }, { 10, 2416 }, { 10, 2418 }, + { 10, 2420 }, { 10, 2422 }, { 10, 2424 }, { 10, 2426 }, { 10, 2428 }, + { 10, 2430 }, { 10, 2432 }, { 10, 2434 }, { 10, 2436 }, { 10, 2438 }, + { 10, 2440 }, { 10, 2442 }, { 10, 2444 }, { 10, 2446 }, { 10, 2448 }, + { 10, 2450 }, { 10, 2452 }, { 10, 2454 }, { 10, 2456 }, { 10, 2458 }, + { 10, 2460 }, { 10, 2462 }, { 10, 2464 }, { 10, 2466 }, { 10, 2468 }, + { 10, 2470 }, { 10, 2472 }, { 10, 2474 }, { 10, 2476 }, { 10, 2478 }, + { 10, 2480 }, { 10, 2482 }, { 10, 2484 }, { 10, 2486 }, { 10, 2488 }, + { 10, 2490 }, { 10, 2492 }, { 10, 2494 }, { 10, 2496 }, { 10, 2498 }, + { 10, 2500 }, { 10, 2502 }, { 10, 2504 }, { 10, 2506 }, { 10, 2508 }, + { 10, 2510 }, { 10, 2512 }, { 10, 2514 }, { 10, 2516 }, { 10, 2518 }, + { 10, 2520 }, { 10, 2522 }, { 10, 2524 }, { 10, 2526 }, { 10, 2528 }, + { 10, 2530 }, { 10, 2532 }, { 10, 2534 }, { 10, 2536 }, { 10, 2538 }, + { 10, 2540 }, { 10, 2542 }, { 10, 2544 }, { 10, 2546 }, { 10, 2548 }, + { 10, 2550 }, { 10, 2552 }, { 10, 2554 }, { 10, 2556 }, { 10, 2558 }, + { 10, 2560 }, { 10, 2562 }, { 10, 2564 }, { 10, 2566 }, { 10, 2568 }, + { 10, 2570 }, { 10, 2572 }, { 10, 2574 }, { 10, 2576 }, { 10, 2578 }, + { 10, 2580 }, { 10, 2582 }, { 10, 2584 }, { 10, 2586 }, { 10, 2588 }, + { 10, 2590 }, { 10, 2592 }, { 10, 2594 }, { 10, 2596 }, { 10, 2598 }, + { 10, 2600 }, { 10, 2602 }, { 10, 2604 }, { 10, 2606 }, { 10, 2608 }, + { 10, 2610 }, { 10, 2612 }, { 10, 2614 }, { 10, 2616 }, { 10, 2618 }, + { 10, 2620 }, { 10, 2622 }, { 10, 2624 }, { 10, 2626 }, { 10, 2628 }, + { 10, 2630 }, { 10, 2632 }, { 10, 2634 }, { 10, 2636 }, { 10, 2638 }, + { 10, 2640 }, { 10, 2642 }, { 10, 2644 }, { 10, 2646 }, { 10, 2648 }, + { 10, 2650 }, { 10, 2652 }, { 10, 2654 }, { 10, 2656 }, { 10, 2658 }, + { 10, 2660 }, { 10, 2662 }, { 10, 2664 }, { 10, 2666 }, { 10, 2668 }, + { 10, 2670 }, { 10, 2672 }, { 10, 2674 }, { 10, 2676 }, { 10, 2678 }, + { 10, 2680 }, { 10, 2682 }, { 10, 2684 }, { 10, 2686 }, { 10, 2688 }, + { 10, 2690 }, { 10, 2692 }, { 10, 2694 }, { 10, 2696 }, { 10, 2698 }, + { 10, 2700 }, { 10, 2702 }, { 10, 2704 }, { 10, 2706 }, { 10, 2708 }, + { 10, 2710 }, { 10, 2712 }, { 10, 2714 }, { 10, 2716 }, { 10, 2718 }, + { 10, 2720 }, { 10, 2722 }, { 10, 2724 }, { 10, 2726 }, { 10, 2728 }, + { 10, 2730 }, { 10, 2732 }, { 10, 2734 }, { 10, 2736 }, { 10, 2738 }, + { 10, 2740 }, { 10, 2742 }, { 10, 2744 }, { 10, 2746 }, { 10, 2748 }, + { 10, 2750 }, { 10, 2752 }, { 10, 2754 }, { 10, 2756 }, { 10, 2758 }, + { 10, 2760 }, { 10, 2762 }, { 10, 2764 }, { 10, 2766 }, { 10, 2768 }, + { 10, 2770 }, { 10, 2772 }, { 10, 2774 }, { 10, 2776 }, { 10, 2778 }, + { 10, 2780 }, { 10, 2782 }, { 10, 2784 }, { 10, 2786 }, { 10, 2788 }, + { 10, 2790 }, { 10, 2792 }, { 10, 2794 }, { 10, 2796 }, { 10, 2798 }, + { 10, 2800 }, { 10, 2802 }, { 10, 2804 }, { 10, 2806 }, { 10, 2808 }, + { 10, 2810 }, { 10, 2812 }, { 10, 2814 }, { 10, 2816 }, { 10, 2818 }, + { 10, 2820 }, { 10, 2822 }, { 10, 2824 }, { 10, 2826 }, { 10, 2828 }, + { 10, 2830 }, { 10, 2832 }, { 10, 2834 }, { 10, 2836 }, { 10, 2838 }, + { 10, 2840 }, { 10, 2842 }, { 10, 2844 }, { 10, 2846 }, { 10, 2848 }, + { 10, 2850 }, { 10, 2852 }, { 10, 2854 }, { 10, 2856 }, { 10, 2858 }, + { 10, 2860 }, { 10, 2862 }, { 10, 2864 }, { 10, 2866 }, { 10, 2868 }, + { 10, 2870 }, { 10, 2872 }, { 10, 2874 }, { 10, 2876 }, { 10, 2878 }, + { 10, 2880 }, { 10, 2882 }, { 10, 2884 }, { 10, 2886 }, { 10, 2888 }, + { 10, 2890 }, { 10, 2892 }, { 10, 2894 }, { 10, 2896 }, { 10, 2898 }, + { 10, 2900 }, { 10, 2902 }, { 10, 2904 }, { 10, 2906 }, { 10, 2908 }, + { 10, 2910 }, { 10, 2912 }, { 10, 2914 }, { 10, 2916 }, { 10, 2918 }, + { 10, 2920 }, { 10, 2922 }, { 10, 2924 }, { 10, 2926 }, { 10, 2928 }, + { 10, 2930 }, { 10, 2932 }, { 10, 2934 }, { 10, 2936 }, { 10, 2938 }, + { 10, 2940 }, { 10, 2942 }, { 10, 2944 }, { 10, 2946 }, { 10, 2948 }, + { 10, 2950 }, { 10, 2952 }, { 10, 2954 }, { 10, 2956 }, { 10, 2958 }, + { 10, 2960 }, { 10, 2962 }, { 10, 2964 }, { 10, 2966 }, { 10, 2968 }, + { 10, 2970 }, { 10, 2972 }, { 10, 2974 }, { 10, 2976 }, { 10, 2978 }, + { 10, 2980 }, { 10, 2982 }, { 10, 2984 }, { 10, 2986 }, { 10, 2988 }, + { 10, 2990 }, { 10, 2992 }, { 10, 2994 }, { 10, 2996 }, { 10, 2998 }, + { 10, 3000 }, { 10, 3002 }, { 10, 3004 }, { 10, 3006 }, { 10, 3008 }, + { 10, 3010 }, { 10, 3012 }, { 10, 3014 }, { 10, 3016 }, { 10, 3018 }, + { 10, 3020 }, { 10, 3022 }, { 10, 3024 }, { 10, 3026 }, { 10, 3028 }, + { 10, 3030 }, { 10, 3032 }, { 10, 3034 }, { 10, 3036 }, { 10, 3038 }, + { 10, 3040 }, { 10, 3042 }, { 10, 3044 }, { 10, 3046 }, { 10, 3048 }, + { 10, 3050 }, { 10, 3052 }, { 10, 3054 }, { 10, 3056 }, { 10, 3058 }, + { 10, 3060 }, { 10, 3062 }, { 10, 3064 }, { 10, 3066 }, { 10, 3068 }, + { 10, 3070 }, { 10, 3072 }, { 10, 3074 }, { 10, 3076 }, { 10, 3078 }, + { 10, 3080 }, { 10, 3082 }, { 10, 3084 }, { 10, 3086 }, { 10, 3088 }, + { 10, 3090 }, { 10, 3092 }, { 10, 3094 }, { 10, 3096 }, { 10, 3098 }, + { 10, 3100 }, { 10, 3102 }, { 10, 3104 }, { 10, 3106 }, { 10, 3108 }, + { 10, 3110 }, { 10, 3112 }, { 10, 3114 }, { 10, 3116 }, { 10, 3118 }, + { 10, 3120 }, { 10, 3122 }, { 10, 3124 }, { 10, 3126 }, { 10, 3128 }, + { 10, 3130 }, { 10, 3132 }, { 10, 3134 }, { 10, 3136 }, { 10, 3138 }, + { 10, 3140 }, { 10, 3142 }, { 10, 3144 }, { 10, 3146 }, { 10, 3148 }, + { 10, 3150 }, { 10, 3152 }, { 10, 3154 }, { 10, 3156 }, { 10, 3158 }, + { 10, 3160 }, { 10, 3162 }, { 10, 3164 }, { 10, 3166 }, { 10, 3168 }, + { 10, 3170 }, { 10, 3172 }, { 10, 3174 }, { 10, 3176 }, { 10, 3178 }, + { 10, 3180 }, { 10, 3182 }, { 10, 3184 }, { 10, 3186 }, { 10, 3188 }, + { 10, 3190 }, { 10, 3192 }, { 10, 3194 }, { 10, 3196 }, { 10, 3198 }, + { 10, 3200 }, { 10, 3202 }, { 10, 3204 }, { 10, 3206 }, { 10, 3208 }, + { 10, 3210 }, { 10, 3212 }, { 10, 3214 }, { 10, 3216 }, { 10, 3218 }, + { 10, 3220 }, { 10, 3222 }, { 10, 3224 }, { 10, 3226 }, { 10, 3228 }, + { 10, 3230 }, { 10, 3232 }, { 10, 3234 }, { 10, 3236 }, { 10, 3238 }, + { 10, 3240 }, { 10, 3242 }, { 10, 3244 }, { 10, 3246 }, { 10, 3248 }, + { 10, 3250 }, { 10, 3252 }, { 10, 3254 }, { 10, 3256 }, { 10, 3258 }, + { 10, 3260 }, { 10, 3262 }, { 10, 3264 }, { 10, 3266 }, { 10, 3268 }, + { 10, 3270 }, { 10, 3272 }, { 10, 3274 }, { 10, 3276 }, { 10, 3278 }, + { 10, 3280 }, { 10, 3282 }, { 10, 3284 }, { 10, 3286 }, { 10, 3288 }, + { 10, 3290 }, { 10, 3292 }, { 10, 3294 }, { 10, 3296 }, { 10, 3298 }, + { 10, 3300 }, { 10, 3302 }, { 10, 3304 }, { 10, 3306 }, { 10, 3308 }, + { 10, 3310 }, { 10, 3312 }, { 10, 3314 }, { 10, 3316 }, { 10, 3318 }, + { 10, 3320 }, { 10, 3322 }, { 10, 3324 }, { 10, 3326 }, { 10, 3328 }, + { 10, 3330 }, { 10, 3332 }, { 10, 3334 }, { 10, 3336 }, { 10, 3338 }, + { 10, 3340 }, { 10, 3342 }, { 10, 3344 }, { 10, 3346 }, { 10, 3348 }, + { 10, 3350 }, { 10, 3352 }, { 10, 3354 }, { 10, 3356 }, { 10, 3358 }, + { 10, 3360 }, { 10, 3362 }, { 10, 3364 }, { 10, 3366 }, { 10, 3368 }, + { 10, 3370 }, { 10, 3372 }, { 10, 3374 }, { 10, 3376 }, { 10, 3378 }, + { 10, 3380 }, { 10, 3382 }, { 10, 3384 }, { 10, 3386 }, { 10, 3388 }, + { 10, 3390 }, { 10, 3392 }, { 10, 3394 }, { 10, 3396 }, { 10, 3398 }, + { 10, 3400 }, { 10, 3402 }, { 10, 3404 }, { 10, 3406 }, { 10, 3408 }, + { 10, 3410 }, { 10, 3412 }, { 10, 3414 }, { 10, 3416 }, { 10, 3418 }, + { 10, 3420 }, { 10, 3422 }, { 10, 3424 }, { 10, 3426 }, { 10, 3428 }, + { 10, 3430 }, { 10, 3432 }, { 10, 3434 }, { 10, 3436 }, { 10, 3438 }, + { 10, 3440 }, { 10, 3442 }, { 10, 3444 }, { 10, 3446 }, { 10, 3448 }, + { 10, 3450 }, { 10, 3452 }, { 10, 3454 }, { 10, 3456 }, { 10, 3458 }, + { 10, 3460 }, { 10, 3462 }, { 10, 3464 }, { 10, 3466 }, { 10, 3468 }, + { 10, 3470 }, { 10, 3472 }, { 10, 3474 }, { 10, 3476 }, { 10, 3478 }, + { 10, 3480 }, { 10, 3482 }, { 10, 3484 }, { 10, 3486 }, { 10, 3488 }, + { 10, 3490 }, { 10, 3492 }, { 10, 3494 }, { 10, 3496 }, { 10, 3498 }, + { 10, 3500 }, { 10, 3502 }, { 10, 3504 }, { 10, 3506 }, { 10, 3508 }, + { 10, 3510 }, { 10, 3512 }, { 10, 3514 }, { 10, 3516 }, { 10, 3518 }, + { 10, 3520 }, { 10, 3522 }, { 10, 3524 }, { 10, 3526 }, { 10, 3528 }, + { 10, 3530 }, { 10, 3532 }, { 10, 3534 }, { 10, 3536 }, { 10, 3538 }, + { 10, 3540 }, { 10, 3542 }, { 10, 3544 }, { 10, 3546 }, { 10, 3548 }, + { 10, 3550 }, { 10, 3552 }, { 10, 3554 }, { 10, 3556 }, { 10, 3558 }, + { 10, 3560 }, { 10, 3562 }, { 10, 3564 }, { 10, 3566 }, { 10, 3568 }, + { 10, 3570 }, { 10, 3572 }, { 10, 3574 }, { 10, 3576 }, { 10, 3578 }, + { 10, 3580 }, { 10, 3582 }, { 10, 3584 }, { 10, 3586 }, { 10, 3588 }, + { 10, 3590 }, { 10, 3592 }, { 10, 3594 }, { 10, 3596 }, { 10, 3598 }, + { 10, 3600 }, { 10, 3602 }, { 10, 3604 }, { 10, 3606 }, { 10, 3608 }, + { 10, 3610 }, { 10, 3612 }, { 10, 3614 }, { 10, 3616 }, { 10, 3618 }, + { 10, 3620 }, { 10, 3622 }, { 10, 3624 }, { 10, 3626 }, { 10, 3628 }, + { 10, 3630 }, { 10, 3632 }, { 10, 3634 }, { 10, 3636 }, { 10, 3638 }, + { 10, 3640 }, { 10, 3642 }, { 10, 3644 }, { 10, 3646 }, { 10, 3648 }, + { 10, 3650 }, { 10, 3652 }, { 10, 3654 }, { 10, 3656 }, { 10, 3658 }, + { 10, 3660 }, { 10, 3662 }, { 10, 3664 }, { 10, 3666 }, { 10, 3668 }, + { 10, 3670 }, { 10, 3672 }, { 10, 3674 }, { 10, 3676 }, { 10, 3678 }, + { 10, 3680 }, { 10, 3682 }, { 10, 3684 }, { 10, 3686 }, { 10, 3688 }, + { 10, 3690 }, { 10, 3692 }, { 10, 3694 }, { 10, 3696 }, { 10, 3698 }, + { 10, 3700 }, { 10, 3702 }, { 10, 3704 }, { 10, 3706 }, { 10, 3708 }, + { 10, 3710 }, { 10, 3712 }, { 10, 3714 }, { 10, 3716 }, { 10, 3718 }, + { 10, 3720 }, { 10, 3722 }, { 10, 3724 }, { 10, 3726 }, { 10, 3728 }, + { 10, 3730 }, { 10, 3732 }, { 10, 3734 }, { 10, 3736 }, { 10, 3738 }, + { 10, 3740 }, { 10, 3742 }, { 10, 3744 }, { 10, 3746 }, { 10, 3748 }, + { 10, 3750 }, { 10, 3752 }, { 10, 3754 }, { 10, 3756 }, { 10, 3758 }, + { 10, 3760 }, { 10, 3762 }, { 10, 3764 }, { 10, 3766 }, { 10, 3768 }, + { 10, 3770 }, { 10, 3772 }, { 10, 3774 }, { 10, 3776 }, { 10, 3778 }, + { 10, 3780 }, { 10, 3782 }, { 10, 3784 }, { 10, 3786 }, { 10, 3788 }, + { 10, 3790 }, { 10, 3792 }, { 10, 3794 }, { 10, 3796 }, { 10, 3798 }, + { 10, 3800 }, { 10, 3802 }, { 10, 3804 }, { 10, 3806 }, { 10, 3808 }, + { 10, 3810 }, { 10, 3812 }, { 10, 3814 }, { 10, 3816 }, { 10, 3818 }, + { 10, 3820 }, { 10, 3822 }, { 10, 3824 }, { 10, 3826 }, { 10, 3828 }, + { 10, 3830 }, { 10, 3832 }, { 10, 3834 }, { 10, 3836 }, { 10, 3838 }, + { 10, 3840 }, { 10, 3842 }, { 10, 3844 }, { 10, 3846 }, { 10, 3848 }, + { 10, 3850 }, { 10, 3852 }, { 10, 3854 }, { 10, 3856 }, { 10, 3858 }, + { 10, 3860 }, { 10, 3862 }, { 10, 3864 }, { 10, 3866 }, { 10, 3868 }, + { 10, 3870 }, { 10, 3872 }, { 10, 3874 }, { 10, 3876 }, { 10, 3878 }, + { 10, 3880 }, { 10, 3882 }, { 10, 3884 }, { 10, 3886 }, { 10, 3888 }, + { 10, 3890 }, { 10, 3892 }, { 10, 3894 }, { 10, 3896 }, { 10, 3898 }, + { 10, 3900 }, { 10, 3902 }, { 10, 3904 }, { 10, 3906 }, { 10, 3908 }, + { 10, 3910 }, { 10, 3912 }, { 10, 3914 }, { 10, 3916 }, { 10, 3918 }, + { 10, 3920 }, { 10, 3922 }, { 10, 3924 }, { 10, 3926 }, { 10, 3928 }, + { 10, 3930 }, { 10, 3932 }, { 10, 3934 }, { 10, 3936 }, { 10, 3938 }, + { 10, 3940 }, { 10, 3942 }, { 10, 3944 }, { 10, 3946 }, { 10, 3948 }, + { 10, 3950 }, { 10, 3952 }, { 10, 3954 }, { 10, 3956 }, { 10, 3958 }, + { 10, 3960 } }; #ifdef __cplusplus diff --git a/libs/libvpx/vp8/encoder/defaultcoefcounts.h b/libs/libvpx/vp8/encoder/defaultcoefcounts.h index 1e8e80484a..2976325dc5 100644 --- a/libs/libvpx/vp8/encoder/defaultcoefcounts.h +++ b/libs/libvpx/vp8/encoder/defaultcoefcounts.h @@ -17,217 +17,216 @@ extern "C" { /* Generated file, included by entropy.c */ -static const unsigned int default_coef_counts[BLOCK_TYPES] - [COEF_BANDS] - [PREV_COEF_CONTEXTS] - [MAX_ENTROPY_TOKENS] = -{ +static const unsigned int default_coef_counts + [BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS] = { - { - /* Block Type ( 0 ) */ - { - /* Coeff Band ( 0 ) */ - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - }, - { - /* Coeff Band ( 1 ) */ - {30190, 26544, 225, 24, 4, 0, 0, 0, 0, 0, 0, 4171593,}, - {26846, 25157, 1241, 130, 26, 6, 1, 0, 0, 0, 0, 149987,}, - {10484, 9538, 1006, 160, 36, 18, 0, 0, 0, 0, 0, 15104,}, - }, - { - /* Coeff Band ( 2 ) */ - {25842, 40456, 1126, 83, 11, 2, 0, 0, 0, 0, 0, 0,}, - {9338, 8010, 512, 73, 7, 3, 2, 0, 0, 0, 0, 43294,}, - {1047, 751, 149, 31, 13, 6, 1, 0, 0, 0, 0, 879,}, - }, - { - /* Coeff Band ( 3 ) */ - {26136, 9826, 252, 13, 0, 0, 0, 0, 0, 0, 0, 0,}, - {8134, 5574, 191, 14, 2, 0, 0, 0, 0, 0, 0, 35302,}, - { 605, 677, 116, 9, 1, 0, 0, 0, 0, 0, 0, 611,}, - }, - { - /* Coeff Band ( 4 ) */ - {10263, 15463, 283, 17, 0, 0, 0, 0, 0, 0, 0, 0,}, - {2773, 2191, 128, 9, 2, 2, 0, 0, 0, 0, 0, 10073,}, - { 134, 125, 32, 4, 0, 2, 0, 0, 0, 0, 0, 50,}, - }, - { - /* Coeff Band ( 5 ) */ - {10483, 2663, 23, 1, 0, 0, 0, 0, 0, 0, 0, 0,}, - {2137, 1251, 27, 1, 1, 0, 0, 0, 0, 0, 0, 14362,}, - { 116, 156, 14, 2, 1, 0, 0, 0, 0, 0, 0, 190,}, - }, - { - /* Coeff Band ( 6 ) */ - {40977, 27614, 412, 28, 0, 0, 0, 0, 0, 0, 0, 0,}, - {6113, 5213, 261, 22, 3, 0, 0, 0, 0, 0, 0, 26164,}, - { 382, 312, 50, 14, 2, 0, 0, 0, 0, 0, 0, 345,}, - }, - { - /* Coeff Band ( 7 ) */ - { 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319,}, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,}, - }, - }, - { - /* Block Type ( 1 ) */ - { - /* Coeff Band ( 0 ) */ - {3268, 19382, 1043, 250, 93, 82, 49, 26, 17, 8, 25, 82289,}, - {8758, 32110, 5436, 1832, 827, 668, 420, 153, 24, 0, 3, 52914,}, - {9337, 23725, 8487, 3954, 2107, 1836, 1069, 399, 59, 0, 0, 18620,}, - }, - { - /* Coeff Band ( 1 ) */ - {12419, 8420, 452, 62, 9, 1, 0, 0, 0, 0, 0, 0,}, - {11715, 8705, 693, 92, 15, 7, 2, 0, 0, 0, 0, 53988,}, - {7603, 8585, 2306, 778, 270, 145, 39, 5, 0, 0, 0, 9136,}, - }, - { - /* Coeff Band ( 2 ) */ - {15938, 14335, 1207, 184, 55, 13, 4, 1, 0, 0, 0, 0,}, - {7415, 6829, 1138, 244, 71, 26, 7, 0, 0, 0, 0, 9980,}, - {1580, 1824, 655, 241, 89, 46, 10, 2, 0, 0, 0, 429,}, - }, - { - /* Coeff Band ( 3 ) */ - {19453, 5260, 201, 19, 0, 0, 0, 0, 0, 0, 0, 0,}, - {9173, 3758, 213, 22, 1, 1, 0, 0, 0, 0, 0, 9820,}, - {1689, 1277, 276, 51, 17, 4, 0, 0, 0, 0, 0, 679,}, - }, - { - /* Coeff Band ( 4 ) */ - {12076, 10667, 620, 85, 19, 9, 5, 0, 0, 0, 0, 0,}, - {4665, 3625, 423, 55, 19, 9, 0, 0, 0, 0, 0, 5127,}, - { 415, 440, 143, 34, 20, 7, 2, 0, 0, 0, 0, 101,}, - }, - { - /* Coeff Band ( 5 ) */ - {12183, 4846, 115, 11, 1, 0, 0, 0, 0, 0, 0, 0,}, - {4226, 3149, 177, 21, 2, 0, 0, 0, 0, 0, 0, 7157,}, - { 375, 621, 189, 51, 11, 4, 1, 0, 0, 0, 0, 198,}, - }, - { - /* Coeff Band ( 6 ) */ - {61658, 37743, 1203, 94, 10, 3, 0, 0, 0, 0, 0, 0,}, - {15514, 11563, 903, 111, 14, 5, 0, 0, 0, 0, 0, 25195,}, - { 929, 1077, 291, 78, 14, 7, 1, 0, 0, 0, 0, 507,}, - }, - { - /* Coeff Band ( 7 ) */ - { 0, 990, 15, 3, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 412, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1641,}, - { 0, 18, 7, 1, 0, 0, 0, 0, 0, 0, 0, 30,}, - }, - }, - { - /* Block Type ( 2 ) */ - { - /* Coeff Band ( 0 ) */ - { 953, 24519, 628, 120, 28, 12, 4, 0, 0, 0, 0, 2248798,}, - {1525, 25654, 2647, 617, 239, 143, 42, 5, 0, 0, 0, 66837,}, - {1180, 11011, 3001, 1237, 532, 448, 239, 54, 5, 0, 0, 7122,}, - }, - { - /* Coeff Band ( 1 ) */ - {1356, 2220, 67, 10, 4, 1, 0, 0, 0, 0, 0, 0,}, - {1450, 2544, 102, 18, 4, 3, 0, 0, 0, 0, 0, 57063,}, - {1182, 2110, 470, 130, 41, 21, 0, 0, 0, 0, 0, 6047,}, - }, - { - /* Coeff Band ( 2 ) */ - { 370, 3378, 200, 30, 5, 4, 1, 0, 0, 0, 0, 0,}, - { 293, 1006, 131, 29, 11, 0, 0, 0, 0, 0, 0, 5404,}, - { 114, 387, 98, 23, 4, 8, 1, 0, 0, 0, 0, 236,}, - }, - { - /* Coeff Band ( 3 ) */ - { 579, 194, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 395, 213, 5, 1, 0, 0, 0, 0, 0, 0, 0, 4157,}, - { 119, 122, 4, 0, 0, 0, 0, 0, 0, 0, 0, 300,}, - }, - { - /* Coeff Band ( 4 ) */ - { 38, 557, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 21, 114, 12, 1, 0, 0, 0, 0, 0, 0, 0, 427,}, - { 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,}, - }, - { - /* Coeff Band ( 5 ) */ - { 52, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 18, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 652,}, - { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30,}, - }, - { - /* Coeff Band ( 6 ) */ - { 640, 569, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 25, 77, 2, 0, 0, 0, 0, 0, 0, 0, 0, 517,}, - { 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,}, - }, - { - /* Coeff Band ( 7 ) */ - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - }, - }, - { - /* Block Type ( 3 ) */ - { - /* Coeff Band ( 0 ) */ - {2506, 20161, 2707, 767, 261, 178, 107, 30, 14, 3, 0, 100694,}, - {8806, 36478, 8817, 3268, 1280, 850, 401, 114, 42, 0, 0, 58572,}, - {11003, 27214, 11798, 5716, 2482, 2072, 1048, 175, 32, 0, 0, 19284,}, - }, - { - /* Coeff Band ( 1 ) */ - {9738, 11313, 959, 205, 70, 18, 11, 1, 0, 0, 0, 0,}, - {12628, 15085, 1507, 273, 52, 19, 9, 0, 0, 0, 0, 54280,}, - {10701, 15846, 5561, 1926, 813, 570, 249, 36, 0, 0, 0, 6460,}, - }, - { - /* Coeff Band ( 2 ) */ - {6781, 22539, 2784, 634, 182, 123, 20, 4, 0, 0, 0, 0,}, - {6263, 11544, 2649, 790, 259, 168, 27, 5, 0, 0, 0, 20539,}, - {3109, 4075, 2031, 896, 457, 386, 158, 29, 0, 0, 0, 1138,}, - }, - { - /* Coeff Band ( 3 ) */ - {11515, 4079, 465, 73, 5, 14, 2, 0, 0, 0, 0, 0,}, - {9361, 5834, 650, 96, 24, 8, 4, 0, 0, 0, 0, 22181,}, - {4343, 3974, 1360, 415, 132, 96, 14, 1, 0, 0, 0, 1267,}, - }, - { - /* Coeff Band ( 4 ) */ - {4787, 9297, 823, 168, 44, 12, 4, 0, 0, 0, 0, 0,}, - {3619, 4472, 719, 198, 60, 31, 3, 0, 0, 0, 0, 8401,}, - {1157, 1175, 483, 182, 88, 31, 8, 0, 0, 0, 0, 268,}, - }, - { - /* Coeff Band ( 5 ) */ - {8299, 1226, 32, 5, 1, 0, 0, 0, 0, 0, 0, 0,}, - {3502, 1568, 57, 4, 1, 1, 0, 0, 0, 0, 0, 9811,}, - {1055, 1070, 166, 29, 6, 1, 0, 0, 0, 0, 0, 527,}, - }, - { - /* Coeff Band ( 6 ) */ - {27414, 27927, 1989, 347, 69, 26, 0, 0, 0, 0, 0, 0,}, - {5876, 10074, 1574, 341, 91, 24, 4, 0, 0, 0, 0, 21954,}, - {1571, 2171, 778, 324, 124, 65, 16, 0, 0, 0, 0, 979,}, - }, - { - /* Coeff Band ( 7 ) */ - { 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, - { 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 459,}, - { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13,}, - }, - }, -}; + { + /* Block Type ( 0 ) */ + { + /* Coeff Band ( 0 ) */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + }, + { + /* Coeff Band ( 1 ) */ + { 30190, 26544, 225, 24, 4, 0, 0, 0, 0, 0, 0, 4171593 }, + { 26846, 25157, 1241, 130, 26, 6, 1, 0, 0, 0, 0, 149987 }, + { 10484, 9538, 1006, 160, 36, 18, 0, 0, 0, 0, 0, 15104 }, + }, + { + /* Coeff Band ( 2 ) */ + { 25842, 40456, 1126, 83, 11, 2, 0, 0, 0, 0, 0, 0 }, + { 9338, 8010, 512, 73, 7, 3, 2, 0, 0, 0, 0, 43294 }, + { 1047, 751, 149, 31, 13, 6, 1, 0, 0, 0, 0, 879 }, + }, + { + /* Coeff Band ( 3 ) */ + { 26136, 9826, 252, 13, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 8134, 5574, 191, 14, 2, 0, 0, 0, 0, 0, 0, 35302 }, + { 605, 677, 116, 9, 1, 0, 0, 0, 0, 0, 0, 611 }, + }, + { + /* Coeff Band ( 4 ) */ + { 10263, 15463, 283, 17, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 2773, 2191, 128, 9, 2, 2, 0, 0, 0, 0, 0, 10073 }, + { 134, 125, 32, 4, 0, 2, 0, 0, 0, 0, 0, 50 }, + }, + { + /* Coeff Band ( 5 ) */ + { 10483, 2663, 23, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 2137, 1251, 27, 1, 1, 0, 0, 0, 0, 0, 0, 14362 }, + { 116, 156, 14, 2, 1, 0, 0, 0, 0, 0, 0, 190 }, + }, + { + /* Coeff Band ( 6 ) */ + { 40977, 27614, 412, 28, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 6113, 5213, 261, 22, 3, 0, 0, 0, 0, 0, 0, 26164 }, + { 382, 312, 50, 14, 2, 0, 0, 0, 0, 0, 0, 345 }, + }, + { + /* Coeff Band ( 7 ) */ + { 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 }, + }, + }, + { + /* Block Type ( 1 ) */ + { + /* Coeff Band ( 0 ) */ + { 3268, 19382, 1043, 250, 93, 82, 49, 26, 17, 8, 25, 82289 }, + { 8758, 32110, 5436, 1832, 827, 668, 420, 153, 24, 0, 3, 52914 }, + { 9337, 23725, 8487, 3954, 2107, 1836, 1069, 399, 59, 0, 0, + 18620 }, + }, + { + /* Coeff Band ( 1 ) */ + { 12419, 8420, 452, 62, 9, 1, 0, 0, 0, 0, 0, 0 }, + { 11715, 8705, 693, 92, 15, 7, 2, 0, 0, 0, 0, 53988 }, + { 7603, 8585, 2306, 778, 270, 145, 39, 5, 0, 0, 0, 9136 }, + }, + { + /* Coeff Band ( 2 ) */ + { 15938, 14335, 1207, 184, 55, 13, 4, 1, 0, 0, 0, 0 }, + { 7415, 6829, 1138, 244, 71, 26, 7, 0, 0, 0, 0, 9980 }, + { 1580, 1824, 655, 241, 89, 46, 10, 2, 0, 0, 0, 429 }, + }, + { + /* Coeff Band ( 3 ) */ + { 19453, 5260, 201, 19, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 9173, 3758, 213, 22, 1, 1, 0, 0, 0, 0, 0, 9820 }, + { 1689, 1277, 276, 51, 17, 4, 0, 0, 0, 0, 0, 679 }, + }, + { + /* Coeff Band ( 4 ) */ + { 12076, 10667, 620, 85, 19, 9, 5, 0, 0, 0, 0, 0 }, + { 4665, 3625, 423, 55, 19, 9, 0, 0, 0, 0, 0, 5127 }, + { 415, 440, 143, 34, 20, 7, 2, 0, 0, 0, 0, 101 }, + }, + { + /* Coeff Band ( 5 ) */ + { 12183, 4846, 115, 11, 1, 0, 0, 0, 0, 0, 0, 0 }, + { 4226, 3149, 177, 21, 2, 0, 0, 0, 0, 0, 0, 7157 }, + { 375, 621, 189, 51, 11, 4, 1, 0, 0, 0, 0, 198 }, + }, + { + /* Coeff Band ( 6 ) */ + { 61658, 37743, 1203, 94, 10, 3, 0, 0, 0, 0, 0, 0 }, + { 15514, 11563, 903, 111, 14, 5, 0, 0, 0, 0, 0, 25195 }, + { 929, 1077, 291, 78, 14, 7, 1, 0, 0, 0, 0, 507 }, + }, + { + /* Coeff Band ( 7 ) */ + { 0, 990, 15, 3, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 412, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1641 }, + { 0, 18, 7, 1, 0, 0, 0, 0, 0, 0, 0, 30 }, + }, + }, + { + /* Block Type ( 2 ) */ + { + /* Coeff Band ( 0 ) */ + { 953, 24519, 628, 120, 28, 12, 4, 0, 0, 0, 0, 2248798 }, + { 1525, 25654, 2647, 617, 239, 143, 42, 5, 0, 0, 0, 66837 }, + { 1180, 11011, 3001, 1237, 532, 448, 239, 54, 5, 0, 0, 7122 }, + }, + { + /* Coeff Band ( 1 ) */ + { 1356, 2220, 67, 10, 4, 1, 0, 0, 0, 0, 0, 0 }, + { 1450, 2544, 102, 18, 4, 3, 0, 0, 0, 0, 0, 57063 }, + { 1182, 2110, 470, 130, 41, 21, 0, 0, 0, 0, 0, 6047 }, + }, + { + /* Coeff Band ( 2 ) */ + { 370, 3378, 200, 30, 5, 4, 1, 0, 0, 0, 0, 0 }, + { 293, 1006, 131, 29, 11, 0, 0, 0, 0, 0, 0, 5404 }, + { 114, 387, 98, 23, 4, 8, 1, 0, 0, 0, 0, 236 }, + }, + { + /* Coeff Band ( 3 ) */ + { 579, 194, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 395, 213, 5, 1, 0, 0, 0, 0, 0, 0, 0, 4157 }, + { 119, 122, 4, 0, 0, 0, 0, 0, 0, 0, 0, 300 }, + }, + { + /* Coeff Band ( 4 ) */ + { 38, 557, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 21, 114, 12, 1, 0, 0, 0, 0, 0, 0, 0, 427 }, + { 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 }, + }, + { + /* Coeff Band ( 5 ) */ + { 52, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 18, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 652 }, + { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, + }, + { + /* Coeff Band ( 6 ) */ + { 640, 569, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 25, 77, 2, 0, 0, 0, 0, 0, 0, 0, 0, 517 }, + { 4, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 }, + }, + { + /* Coeff Band ( 7 ) */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + }, + }, + { + /* Block Type ( 3 ) */ + { + /* Coeff Band ( 0 ) */ + { 2506, 20161, 2707, 767, 261, 178, 107, 30, 14, 3, 0, 100694 }, + { 8806, 36478, 8817, 3268, 1280, 850, 401, 114, 42, 0, 0, 58572 }, + { 11003, 27214, 11798, 5716, 2482, 2072, 1048, 175, 32, 0, 0, + 19284 }, + }, + { + /* Coeff Band ( 1 ) */ + { 9738, 11313, 959, 205, 70, 18, 11, 1, 0, 0, 0, 0 }, + { 12628, 15085, 1507, 273, 52, 19, 9, 0, 0, 0, 0, 54280 }, + { 10701, 15846, 5561, 1926, 813, 570, 249, 36, 0, 0, 0, 6460 }, + }, + { + /* Coeff Band ( 2 ) */ + { 6781, 22539, 2784, 634, 182, 123, 20, 4, 0, 0, 0, 0 }, + { 6263, 11544, 2649, 790, 259, 168, 27, 5, 0, 0, 0, 20539 }, + { 3109, 4075, 2031, 896, 457, 386, 158, 29, 0, 0, 0, 1138 }, + }, + { + /* Coeff Band ( 3 ) */ + { 11515, 4079, 465, 73, 5, 14, 2, 0, 0, 0, 0, 0 }, + { 9361, 5834, 650, 96, 24, 8, 4, 0, 0, 0, 0, 22181 }, + { 4343, 3974, 1360, 415, 132, 96, 14, 1, 0, 0, 0, 1267 }, + }, + { + /* Coeff Band ( 4 ) */ + { 4787, 9297, 823, 168, 44, 12, 4, 0, 0, 0, 0, 0 }, + { 3619, 4472, 719, 198, 60, 31, 3, 0, 0, 0, 0, 8401 }, + { 1157, 1175, 483, 182, 88, 31, 8, 0, 0, 0, 0, 268 }, + }, + { + /* Coeff Band ( 5 ) */ + { 8299, 1226, 32, 5, 1, 0, 0, 0, 0, 0, 0, 0 }, + { 3502, 1568, 57, 4, 1, 1, 0, 0, 0, 0, 0, 9811 }, + { 1055, 1070, 166, 29, 6, 1, 0, 0, 0, 0, 0, 527 }, + }, + { + /* Coeff Band ( 6 ) */ + { 27414, 27927, 1989, 347, 69, 26, 0, 0, 0, 0, 0, 0 }, + { 5876, 10074, 1574, 341, 91, 24, 4, 0, 0, 0, 0, 21954 }, + { 1571, 2171, 778, 324, 124, 65, 16, 0, 0, 0, 0, 979 }, + }, + { + /* Coeff Band ( 7 ) */ + { 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 459 }, + { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 }, + }, + }, + }; #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/denoising.c b/libs/libvpx/vp8/encoder/denoising.c index 2a21943fe1..eb963b97e3 100644 --- a/libs/libvpx/vp8/encoder/denoising.c +++ b/libs/libvpx/vp8/encoder/denoising.c @@ -23,7 +23,7 @@ static const unsigned int NOISE_MOTION_THRESHOLD = 25 * 25; */ static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20; static const unsigned int SSE_THRESHOLD = 16 * 16 * 40; -static const unsigned int SSE_THRESHOLD_HIGH = 16 * 16 * 60; +static const unsigned int SSE_THRESHOLD_HIGH = 16 * 16 * 80; /* * The filter function was modified to reduce the computational complexity. @@ -58,311 +58,301 @@ int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, - int increase_denoising) -{ - unsigned char *running_avg_y_start = running_avg_y; - unsigned char *sig_start = sig; - int sum_diff_thresh; - int r, c; - int sum_diff = 0; - int adj_val[3] = {3, 4, 6}; - int shift_inc1 = 0; - int shift_inc2 = 1; - int col_sum[16] = {0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0}; - /* If motion_magnitude is small, making the denoiser more aggressive by - * increasing the adjustment for each level. Add another increment for - * blocks that are labeled for increase denoising. */ - if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) - { - if (increase_denoising) { - shift_inc1 = 1; - shift_inc2 = 2; - } - adj_val[0] += shift_inc2; - adj_val[1] += shift_inc2; - adj_val[2] += shift_inc2; + int increase_denoising) { + unsigned char *running_avg_y_start = running_avg_y; + unsigned char *sig_start = sig; + int sum_diff_thresh; + int r, c; + int sum_diff = 0; + int adj_val[3] = { 3, 4, 6 }; + int shift_inc1 = 0; + int shift_inc2 = 1; + int col_sum[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + /* If motion_magnitude is small, making the denoiser more aggressive by + * increasing the adjustment for each level. Add another increment for + * blocks that are labeled for increase denoising. */ + if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) { + if (increase_denoising) { + shift_inc1 = 1; + shift_inc2 = 2; } + adj_val[0] += shift_inc2; + adj_val[1] += shift_inc2; + adj_val[2] += shift_inc2; + } - for (r = 0; r < 16; ++r) - { - for (c = 0; c < 16; ++c) - { - int diff = 0; - int adjustment = 0; - int absdiff = 0; + for (r = 0; r < 16; ++r) { + for (c = 0; c < 16; ++c) { + int diff = 0; + int adjustment = 0; + int absdiff = 0; - diff = mc_running_avg_y[c] - sig[c]; - absdiff = abs(diff); + diff = mc_running_avg_y[c] - sig[c]; + absdiff = abs(diff); - // When |diff| <= |3 + shift_inc1|, use pixel value from - // last denoised raw. - if (absdiff <= 3 + shift_inc1) - { - running_avg_y[c] = mc_running_avg_y[c]; - col_sum[c] += diff; - } - else - { - if (absdiff >= 4 + shift_inc1 && absdiff <= 7) - adjustment = adj_val[0]; - else if (absdiff >= 8 && absdiff <= 15) - adjustment = adj_val[1]; - else - adjustment = adj_val[2]; - - if (diff > 0) - { - if ((sig[c] + adjustment) > 255) - running_avg_y[c] = 255; - else - running_avg_y[c] = sig[c] + adjustment; - - col_sum[c] += adjustment; - } - else - { - if ((sig[c] - adjustment) < 0) - running_avg_y[c] = 0; - else - running_avg_y[c] = sig[c] - adjustment; - - col_sum[c] -= adjustment; - } - } + // When |diff| <= |3 + shift_inc1|, use pixel value from + // last denoised raw. + if (absdiff <= 3 + shift_inc1) { + running_avg_y[c] = mc_running_avg_y[c]; + col_sum[c] += diff; + } else { + if (absdiff >= 4 + shift_inc1 && absdiff <= 7) { + adjustment = adj_val[0]; + } else if (absdiff >= 8 && absdiff <= 15) { + adjustment = adj_val[1]; + } else { + adjustment = adj_val[2]; } - /* Update pointers for next iteration. */ + if (diff > 0) { + if ((sig[c] + adjustment) > 255) { + running_avg_y[c] = 255; + } else { + running_avg_y[c] = sig[c] + adjustment; + } + + col_sum[c] += adjustment; + } else { + if ((sig[c] - adjustment) < 0) { + running_avg_y[c] = 0; + } else { + running_avg_y[c] = sig[c] - adjustment; + } + + col_sum[c] -= adjustment; + } + } + } + + /* Update pointers for next iteration. */ + sig += sig_stride; + mc_running_avg_y += mc_avg_y_stride; + running_avg_y += avg_y_stride; + } + + for (c = 0; c < 16; ++c) { + // Below we clip the value in the same way which SSE code use. + // When adopting aggressive denoiser, the adj_val for each pixel + // could be at most 8 (this is current max adjustment of the map). + // In SSE code, we calculate the sum of adj_val for + // the columns, so the sum could be upto 128(16 rows). However, + // the range of the value is -128 ~ 127 in SSE code, that's why + // we do this change in C code. + // We don't do this for UV denoiser, since there are only 8 rows, + // and max adjustments <= 8, so the sum of the columns will not + // exceed 64. + if (col_sum[c] >= 128) { + col_sum[c] = 127; + } + sum_diff += col_sum[c]; + } + + sum_diff_thresh = SUM_DIFF_THRESHOLD; + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; + if (abs(sum_diff) > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), check + // if we can still apply some (weaker) temporal filtering to this block, + // that would otherwise not be denoised at all. Simplest is to apply + // an additional adjustment to running_avg_y to bring it closer to sig. + // The adjustment is capped by a maximum delta, and chosen such that + // in most cases the resulting sum_diff will be within the + // accceptable range given by sum_diff_thresh. + + // The delta is set by the excess of absolute pixel diff over threshold. + int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + sig -= sig_stride * 16; + mc_running_avg_y -= mc_avg_y_stride * 16; + running_avg_y -= avg_y_stride * 16; + for (r = 0; r < 16; ++r) { + for (c = 0; c < 16; ++c) { + int diff = mc_running_avg_y[c] - sig[c]; + int adjustment = abs(diff); + if (adjustment > delta) adjustment = delta; + if (diff > 0) { + // Bring denoised signal down. + if (running_avg_y[c] - adjustment < 0) { + running_avg_y[c] = 0; + } else { + running_avg_y[c] = running_avg_y[c] - adjustment; + } + col_sum[c] -= adjustment; + } else if (diff < 0) { + // Bring denoised signal up. + if (running_avg_y[c] + adjustment > 255) { + running_avg_y[c] = 255; + } else { + running_avg_y[c] = running_avg_y[c] + adjustment; + } + col_sum[c] += adjustment; + } + } + // TODO(marpan): Check here if abs(sum_diff) has gone below the + // threshold sum_diff_thresh, and if so, we can exit the row loop. sig += sig_stride; mc_running_avg_y += mc_avg_y_stride; running_avg_y += avg_y_stride; - } - - for (c = 0; c < 16; ++c) { - // Below we clip the value in the same way which SSE code use. - // When adopting aggressive denoiser, the adj_val for each pixel - // could be at most 8 (this is current max adjustment of the map). - // In SSE code, we calculate the sum of adj_val for - // the columns, so the sum could be upto 128(16 rows). However, - // the range of the value is -128 ~ 127 in SSE code, that's why - // we do this change in C code. - // We don't do this for UV denoiser, since there are only 8 rows, - // and max adjustments <= 8, so the sum of the columns will not - // exceed 64. - if (col_sum[c] >= 128) { - col_sum[c] = 127; } - sum_diff += col_sum[c]; - } - sum_diff_thresh= SUM_DIFF_THRESHOLD; - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; - if (abs(sum_diff) > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), check - // if we can still apply some (weaker) temporal filtering to this block, - // that would otherwise not be denoised at all. Simplest is to apply - // an additional adjustment to running_avg_y to bring it closer to sig. - // The adjustment is capped by a maximum delta, and chosen such that - // in most cases the resulting sum_diff will be within the - // accceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over threshold. - int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - sig -= sig_stride * 16; - mc_running_avg_y -= mc_avg_y_stride * 16; - running_avg_y -= avg_y_stride * 16; - for (r = 0; r < 16; ++r) { - for (c = 0; c < 16; ++c) { - int diff = mc_running_avg_y[c] - sig[c]; - int adjustment = abs(diff); - if (adjustment > delta) - adjustment = delta; - if (diff > 0) { - // Bring denoised signal down. - if (running_avg_y[c] - adjustment < 0) - running_avg_y[c] = 0; - else - running_avg_y[c] = running_avg_y[c] - adjustment; - col_sum[c] -= adjustment; - } else if (diff < 0) { - // Bring denoised signal up. - if (running_avg_y[c] + adjustment > 255) - running_avg_y[c] = 255; - else - running_avg_y[c] = running_avg_y[c] + adjustment; - col_sum[c] += adjustment; - } - } - // TODO(marpan): Check here if abs(sum_diff) has gone below the - // threshold sum_diff_thresh, and if so, we can exit the row loop. - sig += sig_stride; - mc_running_avg_y += mc_avg_y_stride; - running_avg_y += avg_y_stride; + sum_diff = 0; + for (c = 0; c < 16; ++c) { + if (col_sum[c] >= 128) { + col_sum[c] = 127; } - - sum_diff = 0; - for (c = 0; c < 16; ++c) { - if (col_sum[c] >= 128) { - col_sum[c] = 127; - } - sum_diff += col_sum[c]; - } - - if (abs(sum_diff) > sum_diff_thresh) - return COPY_BLOCK; - } else { - return COPY_BLOCK; + sum_diff += col_sum[c]; } - } - vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride); - return FILTER_BLOCK; + if (abs(sum_diff) > sum_diff_thresh) return COPY_BLOCK; + } else { + return COPY_BLOCK; + } + } + + vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride); + return FILTER_BLOCK; } int vp8_denoiser_filter_uv_c(unsigned char *mc_running_avg_uv, int mc_avg_uv_stride, - unsigned char *running_avg_uv, - int avg_uv_stride, - unsigned char *sig, - int sig_stride, + unsigned char *running_avg_uv, int avg_uv_stride, + unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising) { - unsigned char *running_avg_uv_start = running_avg_uv; - unsigned char *sig_start = sig; - int sum_diff_thresh; - int r, c; - int sum_diff = 0; - int sum_block = 0; - int adj_val[3] = {3, 4, 6}; - int shift_inc1 = 0; - int shift_inc2 = 1; - /* If motion_magnitude is small, making the denoiser more aggressive by - * increasing the adjustment for each level. Add another increment for - * blocks that are labeled for increase denoising. */ - if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) { - if (increase_denoising) { - shift_inc1 = 1; - shift_inc2 = 2; - } - adj_val[0] += shift_inc2; - adj_val[1] += shift_inc2; - adj_val[2] += shift_inc2; + unsigned char *running_avg_uv_start = running_avg_uv; + unsigned char *sig_start = sig; + int sum_diff_thresh; + int r, c; + int sum_diff = 0; + int sum_block = 0; + int adj_val[3] = { 3, 4, 6 }; + int shift_inc1 = 0; + int shift_inc2 = 1; + /* If motion_magnitude is small, making the denoiser more aggressive by + * increasing the adjustment for each level. Add another increment for + * blocks that are labeled for increase denoising. */ + if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) { + if (increase_denoising) { + shift_inc1 = 1; + shift_inc2 = 2; } + adj_val[0] += shift_inc2; + adj_val[1] += shift_inc2; + adj_val[2] += shift_inc2; + } - // Avoid denoising color signal if its close to average level. - for (r = 0; r < 8; ++r) { - for (c = 0; c < 8; ++c) { - sum_block += sig[c]; - } - sig += sig_stride; + // Avoid denoising color signal if its close to average level. + for (r = 0; r < 8; ++r) { + for (c = 0; c < 8; ++c) { + sum_block += sig[c]; } - if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + sig += sig_stride; + } + if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + return COPY_BLOCK; + } + + sig -= sig_stride * 8; + for (r = 0; r < 8; ++r) { + for (c = 0; c < 8; ++c) { + int diff = 0; + int adjustment = 0; + int absdiff = 0; + + diff = mc_running_avg_uv[c] - sig[c]; + absdiff = abs(diff); + + // When |diff| <= |3 + shift_inc1|, use pixel value from + // last denoised raw. + if (absdiff <= 3 + shift_inc1) { + running_avg_uv[c] = mc_running_avg_uv[c]; + sum_diff += diff; + } else { + if (absdiff >= 4 && absdiff <= 7) { + adjustment = adj_val[0]; + } else if (absdiff >= 8 && absdiff <= 15) { + adjustment = adj_val[1]; + } else { + adjustment = adj_val[2]; + } + if (diff > 0) { + if ((sig[c] + adjustment) > 255) { + running_avg_uv[c] = 255; + } else { + running_avg_uv[c] = sig[c] + adjustment; + } + sum_diff += adjustment; + } else { + if ((sig[c] - adjustment) < 0) { + running_avg_uv[c] = 0; + } else { + running_avg_uv[c] = sig[c] - adjustment; + } + sum_diff -= adjustment; + } + } + } + /* Update pointers for next iteration. */ + sig += sig_stride; + mc_running_avg_uv += mc_avg_uv_stride; + running_avg_uv += avg_uv_stride; + } + + sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; + if (abs(sum_diff) > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), check + // if we can still apply some (weaker) temporal filtering to this block, + // that would otherwise not be denoised at all. Simplest is to apply + // an additional adjustment to running_avg_y to bring it closer to sig. + // The adjustment is capped by a maximum delta, and chosen such that + // in most cases the resulting sum_diff will be within the + // accceptable range given by sum_diff_thresh. + + // The delta is set by the excess of absolute pixel diff over threshold. + int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + sig -= sig_stride * 8; + mc_running_avg_uv -= mc_avg_uv_stride * 8; + running_avg_uv -= avg_uv_stride * 8; + for (r = 0; r < 8; ++r) { + for (c = 0; c < 8; ++c) { + int diff = mc_running_avg_uv[c] - sig[c]; + int adjustment = abs(diff); + if (adjustment > delta) adjustment = delta; + if (diff > 0) { + // Bring denoised signal down. + if (running_avg_uv[c] - adjustment < 0) { + running_avg_uv[c] = 0; + } else { + running_avg_uv[c] = running_avg_uv[c] - adjustment; + } + sum_diff -= adjustment; + } else if (diff < 0) { + // Bring denoised signal up. + if (running_avg_uv[c] + adjustment > 255) { + running_avg_uv[c] = 255; + } else { + running_avg_uv[c] = running_avg_uv[c] + adjustment; + } + sum_diff += adjustment; + } + } + // TODO(marpan): Check here if abs(sum_diff) has gone below the + // threshold sum_diff_thresh, and if so, we can exit the row loop. + sig += sig_stride; + mc_running_avg_uv += mc_avg_uv_stride; + running_avg_uv += avg_uv_stride; + } + if (abs(sum_diff) > sum_diff_thresh) return COPY_BLOCK; + } else { return COPY_BLOCK; } + } - sig -= sig_stride * 8; - for (r = 0; r < 8; ++r) { - for (c = 0; c < 8; ++c) { - int diff = 0; - int adjustment = 0; - int absdiff = 0; - - diff = mc_running_avg_uv[c] - sig[c]; - absdiff = abs(diff); - - // When |diff| <= |3 + shift_inc1|, use pixel value from - // last denoised raw. - if (absdiff <= 3 + shift_inc1) { - running_avg_uv[c] = mc_running_avg_uv[c]; - sum_diff += diff; - } else { - if (absdiff >= 4 && absdiff <= 7) - adjustment = adj_val[0]; - else if (absdiff >= 8 && absdiff <= 15) - adjustment = adj_val[1]; - else - adjustment = adj_val[2]; - if (diff > 0) { - if ((sig[c] + adjustment) > 255) - running_avg_uv[c] = 255; - else - running_avg_uv[c] = sig[c] + adjustment; - sum_diff += adjustment; - } else { - if ((sig[c] - adjustment) < 0) - running_avg_uv[c] = 0; - else - running_avg_uv[c] = sig[c] - adjustment; - sum_diff -= adjustment; - } - } - } - /* Update pointers for next iteration. */ - sig += sig_stride; - mc_running_avg_uv += mc_avg_uv_stride; - running_avg_uv += avg_uv_stride; - } - - sum_diff_thresh= SUM_DIFF_THRESHOLD_UV; - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; - if (abs(sum_diff) > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), check - // if we can still apply some (weaker) temporal filtering to this block, - // that would otherwise not be denoised at all. Simplest is to apply - // an additional adjustment to running_avg_y to bring it closer to sig. - // The adjustment is capped by a maximum delta, and chosen such that - // in most cases the resulting sum_diff will be within the - // accceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over threshold. - int delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - sig -= sig_stride * 8; - mc_running_avg_uv -= mc_avg_uv_stride * 8; - running_avg_uv -= avg_uv_stride * 8; - for (r = 0; r < 8; ++r) { - for (c = 0; c < 8; ++c) { - int diff = mc_running_avg_uv[c] - sig[c]; - int adjustment = abs(diff); - if (adjustment > delta) - adjustment = delta; - if (diff > 0) { - // Bring denoised signal down. - if (running_avg_uv[c] - adjustment < 0) - running_avg_uv[c] = 0; - else - running_avg_uv[c] = running_avg_uv[c] - adjustment; - sum_diff -= adjustment; - } else if (diff < 0) { - // Bring denoised signal up. - if (running_avg_uv[c] + adjustment > 255) - running_avg_uv[c] = 255; - else - running_avg_uv[c] = running_avg_uv[c] + adjustment; - sum_diff += adjustment; - } - } - // TODO(marpan): Check here if abs(sum_diff) has gone below the - // threshold sum_diff_thresh, and if so, we can exit the row loop. - sig += sig_stride; - mc_running_avg_uv += mc_avg_uv_stride; - running_avg_uv += avg_uv_stride; - } - if (abs(sum_diff) > sum_diff_thresh) - return COPY_BLOCK; - } else { - return COPY_BLOCK; - } - } - - vp8_copy_mem8x8(running_avg_uv_start, avg_uv_stride, sig_start, - sig_stride); - return FILTER_BLOCK; + vp8_copy_mem8x8(running_avg_uv_start, avg_uv_stride, sig_start, sig_stride); + return FILTER_BLOCK; } void vp8_denoiser_set_parameters(VP8_DENOISER *denoiser, int mode) { @@ -398,347 +388,339 @@ void vp8_denoiser_set_parameters(VP8_DENOISER *denoiser, int mode) { } int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height, - int num_mb_rows, int num_mb_cols, int mode) -{ - int i; - assert(denoiser); - denoiser->num_mb_cols = num_mb_cols; + int num_mb_rows, int num_mb_cols, int mode) { + int i; + assert(denoiser); + denoiser->num_mb_cols = num_mb_cols; - for (i = 0; i < MAX_REF_FRAMES; i++) - { - denoiser->yv12_running_avg[i].flags = 0; + for (i = 0; i < MAX_REF_FRAMES; ++i) { + denoiser->yv12_running_avg[i].flags = 0; - if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg[i]), width, - height, VP8BORDERINPIXELS) - < 0) - { - vp8_denoiser_free(denoiser); - return 1; - } - memset(denoiser->yv12_running_avg[i].buffer_alloc, 0, - denoiser->yv12_running_avg[i].frame_size); - - } - denoiser->yv12_mc_running_avg.flags = 0; - - if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_mc_running_avg), width, - height, VP8BORDERINPIXELS) < 0) - { - vp8_denoiser_free(denoiser); - return 1; - } - - memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0, - denoiser->yv12_mc_running_avg.frame_size); - - if (vp8_yv12_alloc_frame_buffer(&denoiser->yv12_last_source, width, + if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg[i]), width, height, VP8BORDERINPIXELS) < 0) { vp8_denoiser_free(denoiser); return 1; } - memset(denoiser->yv12_last_source.buffer_alloc, 0, - denoiser->yv12_last_source.frame_size); + memset(denoiser->yv12_running_avg[i].buffer_alloc, 0, + denoiser->yv12_running_avg[i].frame_size); + } + denoiser->yv12_mc_running_avg.flags = 0; - denoiser->denoise_state = vpx_calloc((num_mb_rows * num_mb_cols), 1); - memset(denoiser->denoise_state, 0, (num_mb_rows * num_mb_cols)); - vp8_denoiser_set_parameters(denoiser, mode); - denoiser->nmse_source_diff = 0; - denoiser->nmse_source_diff_count = 0; - denoiser->qp_avg = 0; - // QP threshold below which we can go up to aggressive mode. - denoiser->qp_threshold_up = 80; - // QP threshold above which we can go back down to normal mode. - // For now keep this second threshold high, so not used currently. - denoiser->qp_threshold_down = 128; - // Bitrate thresholds and noise metric (nmse) thresholds for switching to - // aggressive mode. - // TODO(marpan): Adjust thresholds, including effect on resolution. - denoiser->bitrate_threshold = 400000; // (bits/sec). - denoiser->threshold_aggressive_mode = 80; - if (width * height > 1280 * 720) { - denoiser->bitrate_threshold = 3000000; - denoiser->threshold_aggressive_mode = 200; - } else if (width * height > 960 * 540) { - denoiser->bitrate_threshold = 1200000; - denoiser->threshold_aggressive_mode = 120; - } else if (width * height > 640 * 480) { - denoiser->bitrate_threshold = 600000; - denoiser->threshold_aggressive_mode = 100; - } - return 0; + if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_mc_running_avg), width, + height, VP8BORDERINPIXELS) < 0) { + vp8_denoiser_free(denoiser); + return 1; + } + + memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0, + denoiser->yv12_mc_running_avg.frame_size); + + if (vp8_yv12_alloc_frame_buffer(&denoiser->yv12_last_source, width, height, + VP8BORDERINPIXELS) < 0) { + vp8_denoiser_free(denoiser); + return 1; + } + memset(denoiser->yv12_last_source.buffer_alloc, 0, + denoiser->yv12_last_source.frame_size); + + denoiser->denoise_state = vpx_calloc((num_mb_rows * num_mb_cols), 1); + if (!denoiser->denoise_state) { + vp8_denoiser_free(denoiser); + return 1; + } + memset(denoiser->denoise_state, 0, (num_mb_rows * num_mb_cols)); + vp8_denoiser_set_parameters(denoiser, mode); + denoiser->nmse_source_diff = 0; + denoiser->nmse_source_diff_count = 0; + denoiser->qp_avg = 0; + // QP threshold below which we can go up to aggressive mode. + denoiser->qp_threshold_up = 80; + // QP threshold above which we can go back down to normal mode. + // For now keep this second threshold high, so not used currently. + denoiser->qp_threshold_down = 128; + // Bitrate thresholds and noise metric (nmse) thresholds for switching to + // aggressive mode. + // TODO(marpan): Adjust thresholds, including effect on resolution. + denoiser->bitrate_threshold = 400000; // (bits/sec). + denoiser->threshold_aggressive_mode = 80; + if (width * height > 1280 * 720) { + denoiser->bitrate_threshold = 3000000; + denoiser->threshold_aggressive_mode = 200; + } else if (width * height > 960 * 540) { + denoiser->bitrate_threshold = 1200000; + denoiser->threshold_aggressive_mode = 120; + } else if (width * height > 640 * 480) { + denoiser->bitrate_threshold = 600000; + denoiser->threshold_aggressive_mode = 100; + } + return 0; } +void vp8_denoiser_free(VP8_DENOISER *denoiser) { + int i; + assert(denoiser); -void vp8_denoiser_free(VP8_DENOISER *denoiser) -{ - int i; - assert(denoiser); - - for (i = 0; i < MAX_REF_FRAMES ; i++) - { - vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]); - } - vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg); - vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_last_source); - vpx_free(denoiser->denoise_state); + for (i = 0; i < MAX_REF_FRAMES; ++i) { + vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]); + } + vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg); + vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_last_source); + vpx_free(denoiser->denoise_state); } -void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, - MACROBLOCK *x, - unsigned int best_sse, - unsigned int zero_mv_sse, - int recon_yoffset, - int recon_uvoffset, - loop_filter_info_n *lfi_n, - int mb_row, - int mb_col, - int block_index) +void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x, + unsigned int best_sse, unsigned int zero_mv_sse, + int recon_yoffset, int recon_uvoffset, + loop_filter_info_n *lfi_n, int mb_row, int mb_col, + int block_index, int consec_zero_last) { - int mv_row; - int mv_col; - unsigned int motion_threshold; - unsigned int motion_magnitude2; - unsigned int sse_thresh; - int sse_diff_thresh = 0; - // Spatial loop filter: only applied selectively based on - // temporal filter state of block relative to top/left neighbors. - int apply_spatial_loop_filter = 1; - MV_REFERENCE_FRAME frame = x->best_reference_frame; - MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame; + int mv_row; + int mv_col; + unsigned int motion_threshold; + unsigned int motion_magnitude2; + unsigned int sse_thresh; + int sse_diff_thresh = 0; + // Spatial loop filter: only applied selectively based on + // temporal filter state of block relative to top/left neighbors. + int apply_spatial_loop_filter = 1; + MV_REFERENCE_FRAME frame = x->best_reference_frame; + MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame; - enum vp8_denoiser_decision decision = FILTER_BLOCK; - enum vp8_denoiser_decision decision_u = COPY_BLOCK; - enum vp8_denoiser_decision decision_v = COPY_BLOCK; + enum vp8_denoiser_decision decision = FILTER_BLOCK; + enum vp8_denoiser_decision decision_u = COPY_BLOCK; + enum vp8_denoiser_decision decision_v = COPY_BLOCK; - if (zero_frame) - { - YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame]; - YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg; - YV12_BUFFER_CONFIG saved_pre,saved_dst; - MB_MODE_INFO saved_mbmi; - MACROBLOCKD *filter_xd = &x->e_mbd; - MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi; - int sse_diff = 0; - // Bias on zero motion vector sse. - const int zero_bias = denoiser->denoise_pars.denoise_mv_bias; - zero_mv_sse = (unsigned int)((int64_t)zero_mv_sse * zero_bias / 100); - sse_diff = zero_mv_sse - best_sse; + if (zero_frame) { + YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame]; + YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg; + YV12_BUFFER_CONFIG saved_pre, saved_dst; + MB_MODE_INFO saved_mbmi; + MACROBLOCKD *filter_xd = &x->e_mbd; + MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi; + int sse_diff = 0; + // Bias on zero motion vector sse. + const int zero_bias = denoiser->denoise_pars.denoise_mv_bias; + zero_mv_sse = (unsigned int)((int64_t)zero_mv_sse * zero_bias / 100); + sse_diff = (int)zero_mv_sse - (int)best_sse; - saved_mbmi = *mbmi; + saved_mbmi = *mbmi; - /* Use the best MV for the compensation. */ - mbmi->ref_frame = x->best_reference_frame; - mbmi->mode = x->best_sse_inter_mode; - mbmi->mv = x->best_sse_mv; - mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs; - mv_col = x->best_sse_mv.as_mv.col; - mv_row = x->best_sse_mv.as_mv.row; - // Bias to zero_mv if small amount of motion. - // Note sse_diff_thresh is intialized to zero, so this ensures - // we will always choose zero_mv for denoising if - // zero_mv_see <= best_sse (i.e., sse_diff <= 0). - if ((unsigned int)(mv_row * mv_row + mv_col * mv_col) - <= NOISE_MOTION_THRESHOLD) - sse_diff_thresh = (int)SSE_DIFF_THRESHOLD; + /* Use the best MV for the compensation. */ + mbmi->ref_frame = x->best_reference_frame; + mbmi->mode = x->best_sse_inter_mode; + mbmi->mv = x->best_sse_mv; + mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs; + mv_col = x->best_sse_mv.as_mv.col; + mv_row = x->best_sse_mv.as_mv.row; + // Bias to zero_mv if small amount of motion. + // Note sse_diff_thresh is intialized to zero, so this ensures + // we will always choose zero_mv for denoising if + // zero_mv_see <= best_sse (i.e., sse_diff <= 0). + if ((unsigned int)(mv_row * mv_row + mv_col * mv_col) <= + NOISE_MOTION_THRESHOLD) { + sse_diff_thresh = (int)SSE_DIFF_THRESHOLD; + } - if (frame == INTRA_FRAME || - sse_diff <= sse_diff_thresh) - { - /* - * Handle intra blocks as referring to last frame with zero motion - * and let the absolute pixel difference affect the filter factor. - * Also consider small amount of motion as being random walk due - * to noise, if it doesn't mean that we get a much bigger error. - * Note that any changes to the mode info only affects the - * denoising. - */ - x->denoise_zeromv = 1; - mbmi->ref_frame = - x->best_zeromv_reference_frame; + if (frame == INTRA_FRAME || sse_diff <= sse_diff_thresh) { + /* + * Handle intra blocks as referring to last frame with zero motion + * and let the absolute pixel difference affect the filter factor. + * Also consider small amount of motion as being random walk due + * to noise, if it doesn't mean that we get a much bigger error. + * Note that any changes to the mode info only affects the + * denoising. + */ + x->denoise_zeromv = 1; + mbmi->ref_frame = x->best_zeromv_reference_frame; - src = &denoiser->yv12_running_avg[zero_frame]; - - mbmi->mode = ZEROMV; - mbmi->mv.as_int = 0; - x->best_sse_inter_mode = ZEROMV; - x->best_sse_mv.as_int = 0; - best_sse = zero_mv_sse; - } - - saved_pre = filter_xd->pre; - saved_dst = filter_xd->dst; - - /* Compensate the running average. */ - filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset; - filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset; - filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset; - /* Write the compensated running average to the destination buffer. */ - filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset; - filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset; - filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset; - - if (!x->skip) - { - vp8_build_inter_predictors_mb(filter_xd); - } - else - { - vp8_build_inter16x16_predictors_mb(filter_xd, - filter_xd->dst.y_buffer, - filter_xd->dst.u_buffer, - filter_xd->dst.v_buffer, - filter_xd->dst.y_stride, - filter_xd->dst.uv_stride); - } - filter_xd->pre = saved_pre; - filter_xd->dst = saved_dst; - *mbmi = saved_mbmi; + src = &denoiser->yv12_running_avg[zero_frame]; + mbmi->mode = ZEROMV; + mbmi->mv.as_int = 0; + x->best_sse_inter_mode = ZEROMV; + x->best_sse_mv.as_int = 0; + best_sse = zero_mv_sse; } mv_row = x->best_sse_mv.as_mv.row; mv_col = x->best_sse_mv.as_mv.col; motion_magnitude2 = mv_row * mv_row + mv_col * mv_col; - motion_threshold = denoiser->denoise_pars.scale_motion_thresh * - NOISE_MOTION_THRESHOLD; - - // If block is considered to be skin area, lower the motion threshold. - // In current version set threshold = 0, so only denoise zero mv on skin. - if (x->is_skin) - motion_threshold = 0; + motion_threshold = + denoiser->denoise_pars.scale_motion_thresh * NOISE_MOTION_THRESHOLD; if (motion_magnitude2 < - denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD) + denoiser->denoise_pars.scale_increase_filter * NOISE_MOTION_THRESHOLD) { x->increase_denoising = 1; + } sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD; - if (x->increase_denoising) + if (x->increase_denoising) { sse_thresh = denoiser->denoise_pars.scale_sse_thresh * SSE_THRESHOLD_HIGH; + } - if (best_sse > sse_thresh || motion_magnitude2 > motion_threshold) + if (best_sse > sse_thresh || motion_magnitude2 > motion_threshold) { decision = COPY_BLOCK; - - if (decision == FILTER_BLOCK) - { - unsigned char *mc_running_avg_y = - denoiser->yv12_mc_running_avg.y_buffer + recon_yoffset; - int mc_avg_y_stride = denoiser->yv12_mc_running_avg.y_stride; - unsigned char *running_avg_y = - denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset; - int avg_y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride; - - /* Filter. */ - decision = vp8_denoiser_filter(mc_running_avg_y, mc_avg_y_stride, - running_avg_y, avg_y_stride, - x->thismb, 16, motion_magnitude2, - x->increase_denoising); - denoiser->denoise_state[block_index] = motion_magnitude2 > 0 ? - kFilterNonZeroMV : kFilterZeroMV; - // Only denoise UV for zero motion, and if y channel was denoised. - if (denoiser->denoiser_mode != kDenoiserOnYOnly && - motion_magnitude2 == 0 && - decision == FILTER_BLOCK) { - unsigned char *mc_running_avg_u = - denoiser->yv12_mc_running_avg.u_buffer + recon_uvoffset; - unsigned char *running_avg_u = - denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset; - unsigned char *mc_running_avg_v = - denoiser->yv12_mc_running_avg.v_buffer + recon_uvoffset; - unsigned char *running_avg_v = - denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset; - int mc_avg_uv_stride = denoiser->yv12_mc_running_avg.uv_stride; - int avg_uv_stride = denoiser->yv12_running_avg[INTRA_FRAME].uv_stride; - int signal_stride = x->block[16].src_stride; - decision_u = - vp8_denoiser_filter_uv(mc_running_avg_u, mc_avg_uv_stride, - running_avg_u, avg_uv_stride, - x->block[16].src + *x->block[16].base_src, - signal_stride, motion_magnitude2, 0); - decision_v = - vp8_denoiser_filter_uv(mc_running_avg_v, mc_avg_uv_stride, - running_avg_v, avg_uv_stride, - x->block[20].src + *x->block[20].base_src, - signal_stride, motion_magnitude2, 0); - } } - if (decision == COPY_BLOCK) - { - /* No filtering of this block; it differs too much from the predictor, - * or the motion vector magnitude is considered too big. - */ - x->denoise_zeromv = 0; - vp8_copy_mem16x16( - x->thismb, 16, - denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, - denoiser->yv12_running_avg[INTRA_FRAME].y_stride); - denoiser->denoise_state[block_index] = kNoFilter; + + // If block is considered skin, don't denoise if the block + // (1) is selected as non-zero motion for current frame, or + // (2) has not been selected as ZERO_LAST mode at least x past frames + // in a row. + // TODO(marpan): Parameter "x" should be varied with framerate. + // In particualar, should be reduced for layers (base layer/LAST). + if (x->is_skin && (consec_zero_last < 2 || motion_magnitude2 > 0)) { + decision = COPY_BLOCK; } - if (denoiser->denoiser_mode != kDenoiserOnYOnly) { - if (decision_u == COPY_BLOCK) { - vp8_copy_mem8x8( - x->block[16].src + *x->block[16].base_src, x->block[16].src_stride, - denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset, - denoiser->yv12_running_avg[INTRA_FRAME].uv_stride); - } - if (decision_v == COPY_BLOCK) { - vp8_copy_mem8x8( - x->block[20].src + *x->block[20].base_src, x->block[16].src_stride, - denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset, - denoiser->yv12_running_avg[INTRA_FRAME].uv_stride); + + if (decision == FILTER_BLOCK) { + saved_pre = filter_xd->pre; + saved_dst = filter_xd->dst; + + /* Compensate the running average. */ + filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset; + filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset; + filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset; + /* Write the compensated running average to the destination buffer. */ + filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset; + filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset; + filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset; + + if (!x->skip) { + vp8_build_inter_predictors_mb(filter_xd); + } else { + vp8_build_inter16x16_predictors_mb( + filter_xd, filter_xd->dst.y_buffer, filter_xd->dst.u_buffer, + filter_xd->dst.v_buffer, filter_xd->dst.y_stride, + filter_xd->dst.uv_stride); } + filter_xd->pre = saved_pre; + filter_xd->dst = saved_dst; + *mbmi = saved_mbmi; } - // Option to selectively deblock the denoised signal, for y channel only. - if (apply_spatial_loop_filter) { - loop_filter_info lfi; - int apply_filter_col = 0; - int apply_filter_row = 0; - int apply_filter = 0; - int y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride; - int uv_stride =denoiser->yv12_running_avg[INTRA_FRAME].uv_stride; + } else { + // zero_frame should always be 1 for real-time mode, as the + // ZEROMV mode is always checked, so we should never go into this branch. + // If case ZEROMV is not checked, then we will force no denoise (COPY). + decision = COPY_BLOCK; + } - // Fix filter level to some nominal value for now. - int filter_level = 48; + if (decision == FILTER_BLOCK) { + unsigned char *mc_running_avg_y = + denoiser->yv12_mc_running_avg.y_buffer + recon_yoffset; + int mc_avg_y_stride = denoiser->yv12_mc_running_avg.y_stride; + unsigned char *running_avg_y = + denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset; + int avg_y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride; - int hev_index = lfi_n->hev_thr_lut[INTER_FRAME][filter_level]; - lfi.mblim = lfi_n->mblim[filter_level]; - lfi.blim = lfi_n->blim[filter_level]; - lfi.lim = lfi_n->lim[filter_level]; - lfi.hev_thr = lfi_n->hev_thr[hev_index]; + /* Filter. */ + decision = vp8_denoiser_filter(mc_running_avg_y, mc_avg_y_stride, + running_avg_y, avg_y_stride, x->thismb, 16, + motion_magnitude2, x->increase_denoising); + denoiser->denoise_state[block_index] = + motion_magnitude2 > 0 ? kFilterNonZeroMV : kFilterZeroMV; + // Only denoise UV for zero motion, and if y channel was denoised. + if (denoiser->denoiser_mode != kDenoiserOnYOnly && motion_magnitude2 == 0 && + decision == FILTER_BLOCK) { + unsigned char *mc_running_avg_u = + denoiser->yv12_mc_running_avg.u_buffer + recon_uvoffset; + unsigned char *running_avg_u = + denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset; + unsigned char *mc_running_avg_v = + denoiser->yv12_mc_running_avg.v_buffer + recon_uvoffset; + unsigned char *running_avg_v = + denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset; + int mc_avg_uv_stride = denoiser->yv12_mc_running_avg.uv_stride; + int avg_uv_stride = denoiser->yv12_running_avg[INTRA_FRAME].uv_stride; + int signal_stride = x->block[16].src_stride; + decision_u = vp8_denoiser_filter_uv( + mc_running_avg_u, mc_avg_uv_stride, running_avg_u, avg_uv_stride, + x->block[16].src + *x->block[16].base_src, signal_stride, + motion_magnitude2, 0); + decision_v = vp8_denoiser_filter_uv( + mc_running_avg_v, mc_avg_uv_stride, running_avg_v, avg_uv_stride, + x->block[20].src + *x->block[20].base_src, signal_stride, + motion_magnitude2, 0); + } + } + if (decision == COPY_BLOCK) { + /* No filtering of this block; it differs too much from the predictor, + * or the motion vector magnitude is considered too big. + */ + x->denoise_zeromv = 0; + vp8_copy_mem16x16( + x->thismb, 16, + denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, + denoiser->yv12_running_avg[INTRA_FRAME].y_stride); + denoiser->denoise_state[block_index] = kNoFilter; + } + if (denoiser->denoiser_mode != kDenoiserOnYOnly) { + if (decision_u == COPY_BLOCK) { + vp8_copy_mem8x8( + x->block[16].src + *x->block[16].base_src, x->block[16].src_stride, + denoiser->yv12_running_avg[INTRA_FRAME].u_buffer + recon_uvoffset, + denoiser->yv12_running_avg[INTRA_FRAME].uv_stride); + } + if (decision_v == COPY_BLOCK) { + vp8_copy_mem8x8( + x->block[20].src + *x->block[20].base_src, x->block[16].src_stride, + denoiser->yv12_running_avg[INTRA_FRAME].v_buffer + recon_uvoffset, + denoiser->yv12_running_avg[INTRA_FRAME].uv_stride); + } + } + // Option to selectively deblock the denoised signal, for y channel only. + if (apply_spatial_loop_filter) { + loop_filter_info lfi; + int apply_filter_col = 0; + int apply_filter_row = 0; + int apply_filter = 0; + int y_stride = denoiser->yv12_running_avg[INTRA_FRAME].y_stride; + int uv_stride = denoiser->yv12_running_avg[INTRA_FRAME].uv_stride; - // Apply filter if there is a difference in the denoiser filter state - // between the current and left/top block, or if non-zero motion vector - // is used for the motion-compensated filtering. - if (mb_col > 0) { - apply_filter_col = !((denoiser->denoise_state[block_index] == - denoiser->denoise_state[block_index - 1]) && + // Fix filter level to some nominal value for now. + int filter_level = 48; + + int hev_index = lfi_n->hev_thr_lut[INTER_FRAME][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; + + // Apply filter if there is a difference in the denoiser filter state + // between the current and left/top block, or if non-zero motion vector + // is used for the motion-compensated filtering. + if (mb_col > 0) { + apply_filter_col = + !((denoiser->denoise_state[block_index] == + denoiser->denoise_state[block_index - 1]) && denoiser->denoise_state[block_index] != kFilterNonZeroMV); - if (apply_filter_col) { - // Filter left vertical edge. - apply_filter = 1; - vp8_loop_filter_mbv( - denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, - NULL, NULL, y_stride, uv_stride, &lfi); - } - } - if (mb_row > 0) { - apply_filter_row = !((denoiser->denoise_state[block_index] == - denoiser->denoise_state[block_index - denoiser->num_mb_cols]) && - denoiser->denoise_state[block_index] != kFilterNonZeroMV); - if (apply_filter_row) { - // Filter top horizontal edge. - apply_filter = 1; - vp8_loop_filter_mbh( - denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, - NULL, NULL, y_stride, uv_stride, &lfi); - } - } - if (apply_filter) { - // Update the signal block |x|. Pixel changes are only to top and/or - // left boundary pixels: can we avoid full block copy here. - vp8_copy_mem16x16( + if (apply_filter_col) { + // Filter left vertical edge. + apply_filter = 1; + vp8_loop_filter_mbv( denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, - y_stride, x->thismb, 16); + NULL, NULL, y_stride, uv_stride, &lfi); } } + if (mb_row > 0) { + apply_filter_row = + !((denoiser->denoise_state[block_index] == + denoiser->denoise_state[block_index - denoiser->num_mb_cols]) && + denoiser->denoise_state[block_index] != kFilterNonZeroMV); + if (apply_filter_row) { + // Filter top horizontal edge. + apply_filter = 1; + vp8_loop_filter_mbh( + denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, + NULL, NULL, y_stride, uv_stride, &lfi); + } + } + if (apply_filter) { + // Update the signal block |x|. Pixel changes are only to top and/or + // left boundary pixels: can we avoid full block copy here. + vp8_copy_mem16x16( + denoiser->yv12_running_avg[INTRA_FRAME].y_buffer + recon_yoffset, + y_stride, x->thismb, 16); + } + } } diff --git a/libs/libvpx/vp8/encoder/denoising.h b/libs/libvpx/vp8/encoder/denoising.h index 9a379a6a16..91d87b3a1c 100644 --- a/libs/libvpx/vp8/encoder/denoising.h +++ b/libs/libvpx/vp8/encoder/denoising.h @@ -18,28 +18,20 @@ extern "C" { #endif -#define SUM_DIFF_THRESHOLD (16 * 16 * 2) -#define SUM_DIFF_THRESHOLD_HIGH (600) // ~(16 * 16 * 1.5) -#define MOTION_MAGNITUDE_THRESHOLD (8*3) +#define SUM_DIFF_THRESHOLD 512 +#define SUM_DIFF_THRESHOLD_HIGH 600 +#define MOTION_MAGNITUDE_THRESHOLD (8 * 3) -#define SUM_DIFF_THRESHOLD_UV (96) // (8 * 8 * 1.5) +#define SUM_DIFF_THRESHOLD_UV (96) // (8 * 8 * 1.5) #define SUM_DIFF_THRESHOLD_HIGH_UV (8 * 8 * 2) #define SUM_DIFF_FROM_AVG_THRESH_UV (8 * 8 * 8) -#define MOTION_MAGNITUDE_THRESHOLD_UV (8*3) +#define MOTION_MAGNITUDE_THRESHOLD_UV (8 * 3) #define MAX_GF_ARF_DENOISE_RANGE (8) -enum vp8_denoiser_decision -{ - COPY_BLOCK, - FILTER_BLOCK -}; +enum vp8_denoiser_decision { COPY_BLOCK, FILTER_BLOCK }; -enum vp8_denoiser_filter_state { - kNoFilter, - kFilterZeroMV, - kFilterNonZeroMV -}; +enum vp8_denoiser_filter_state { kNoFilter, kFilterZeroMV, kFilterNonZeroMV }; enum vp8_denoiser_mode { kDenoiserOff, @@ -73,23 +65,22 @@ typedef struct { unsigned int spatial_blur; } denoise_params; -typedef struct vp8_denoiser -{ - YV12_BUFFER_CONFIG yv12_running_avg[MAX_REF_FRAMES]; - YV12_BUFFER_CONFIG yv12_mc_running_avg; - // TODO(marpan): Should remove yv12_last_source and use vp8_lookahead_peak. - YV12_BUFFER_CONFIG yv12_last_source; - unsigned char* denoise_state; - int num_mb_cols; - int denoiser_mode; - int threshold_aggressive_mode; - int nmse_source_diff; - int nmse_source_diff_count; - int qp_avg; - int qp_threshold_up; - int qp_threshold_down; - int bitrate_threshold; - denoise_params denoise_pars; +typedef struct vp8_denoiser { + YV12_BUFFER_CONFIG yv12_running_avg[MAX_REF_FRAMES]; + YV12_BUFFER_CONFIG yv12_mc_running_avg; + // TODO(marpan): Should remove yv12_last_source and use vp8_lookahead_peak. + YV12_BUFFER_CONFIG yv12_last_source; + unsigned char *denoise_state; + int num_mb_cols; + int denoiser_mode; + int threshold_aggressive_mode; + int nmse_source_diff; + int nmse_source_diff_count; + int qp_avg; + int qp_threshold_up; + int qp_threshold_down; + int bitrate_threshold; + denoise_params denoise_pars; } VP8_DENOISER; int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height, @@ -99,16 +90,11 @@ void vp8_denoiser_free(VP8_DENOISER *denoiser); void vp8_denoiser_set_parameters(VP8_DENOISER *denoiser, int mode); -void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, - MACROBLOCK *x, - unsigned int best_sse, - unsigned int zero_mv_sse, - int recon_yoffset, - int recon_uvoffset, - loop_filter_info_n *lfi_n, - int mb_row, - int mb_col, - int block_index); +void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, MACROBLOCK *x, + unsigned int best_sse, unsigned int zero_mv_sse, + int recon_yoffset, int recon_uvoffset, + loop_filter_info_n *lfi_n, int mb_row, int mb_col, + int block_index, int consec_zero_last); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/encodeframe.c b/libs/libvpx/vp8/encoder/encodeframe.c index 9b05cd1fcd..e41d513c1b 100644 --- a/libs/libvpx/vp8/encoder/encodeframe.c +++ b/libs/libvpx/vp8/encoder/encodeframe.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "./vpx_dsp_rtcd.h" @@ -35,31 +34,27 @@ #endif #include "encodeframe.h" -extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ; -extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, - int prob_intra, - int prob_last, - int prob_garf - ); +extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t); +extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, int prob_intra, + int prob_last, int prob_garf); extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi); extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); extern void vp8_auto_select_speed(VP8_COMP *cpi); -extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, - MACROBLOCK *x, - MB_ROW_COMP *mbr_ei, - int count); -static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ); +extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi, MACROBLOCK *x, + MB_ROW_COMP *mbr_ei, int count); +static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x); #ifdef MODE_STATS -unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -unsigned int inter_uv_modes[4] = {0, 0, 0, 0}; -unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; -unsigned int y_modes[5] = {0, 0, 0, 0, 0}; -unsigned int uv_modes[4] = {0, 0, 0, 0}; -unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; +unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 }; +unsigned int inter_b_modes[15] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +unsigned int y_modes[5] = { 0, 0, 0, 0, 0 }; +unsigned int uv_modes[4] = { 0, 0, 0, 0 }; +unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; #endif - /* activity_avg must be positive, or flat regions could get a zero weight * (infinite lambda), which confounds analysis. * This also avoids the need for divide by zero checks in @@ -72,707 +67,642 @@ unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; * Eventually this should be replaced by custom no-reference routines, * which will be faster. */ -static const unsigned char VP8_VAR_OFFS[16]= -{ - 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128 +static const unsigned char VP8_VAR_OFFS[16] = { + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; - /* Original activity measure from Tim T's code. */ -static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x ) -{ - unsigned int act; - unsigned int sse; - (void)cpi; - /* TODO: This could also be done over smaller areas (8x8), but that would - * require extensive changes elsewhere, as lambda is assumed to be fixed - * over an entire MB in most of the code. - * Another option is to compute four 8x8 variances, and pick a single - * lambda using a non-linear combination (e.g., the smallest, or second - * smallest, etc.). - */ - act = vpx_variance16x16(x->src.y_buffer, - x->src.y_stride, VP8_VAR_OFFS, 0, &sse); - act = act<<4; +static unsigned int tt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x) { + unsigned int act; + unsigned int sse; + (void)cpi; + /* TODO: This could also be done over smaller areas (8x8), but that would + * require extensive changes elsewhere, as lambda is assumed to be fixed + * over an entire MB in most of the code. + * Another option is to compute four 8x8 variances, and pick a single + * lambda using a non-linear combination (e.g., the smallest, or second + * smallest, etc.). + */ + act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0, + &sse); + act = act << 4; - /* If the region is flat, lower the activity some more. */ - if (act < 8<<12) - act = act < 5<<12 ? act : 5<<12; + /* If the region is flat, lower the activity some more. */ + if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12; - return act; + return act; } /* Stub for alternative experimental activity measures. */ -static unsigned int alt_activity_measure( VP8_COMP *cpi, - MACROBLOCK *x, int use_dc_pred ) -{ - return vp8_encode_intra(cpi,x, use_dc_pred); +static unsigned int alt_activity_measure(VP8_COMP *cpi, MACROBLOCK *x, + int use_dc_pred) { + return vp8_encode_intra(cpi, x, use_dc_pred); } - /* Measure the activity of the current macroblock * What we measure here is TBD so abstracted to this function */ #define ALT_ACT_MEASURE 1 -static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x, - int mb_row, int mb_col) -{ - unsigned int mb_activity; +static unsigned int mb_activity_measure(VP8_COMP *cpi, MACROBLOCK *x, + int mb_row, int mb_col) { + unsigned int mb_activity; - if ( ALT_ACT_MEASURE ) - { - int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); + if (ALT_ACT_MEASURE) { + int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); - /* Or use and alternative. */ - mb_activity = alt_activity_measure( cpi, x, use_dc_pred ); - } - else - { - /* Original activity measure from Tim T's code. */ - mb_activity = tt_activity_measure( cpi, x ); - } + /* Or use and alternative. */ + mb_activity = alt_activity_measure(cpi, x, use_dc_pred); + } else { + /* Original activity measure from Tim T's code. */ + mb_activity = tt_activity_measure(cpi, x); + } - if ( mb_activity < VP8_ACTIVITY_AVG_MIN ) - mb_activity = VP8_ACTIVITY_AVG_MIN; + if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN; - return mb_activity; + return mb_activity; } /* Calculate an "average" mb activity value for the frame */ #define ACT_MEDIAN 0 -static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum ) -{ +static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) { #if ACT_MEDIAN - /* Find median: Simple n^2 algorithm for experimentation */ - { - unsigned int median; - unsigned int i,j; - unsigned int * sortlist; - unsigned int tmp; + /* Find median: Simple n^2 algorithm for experimentation */ + { + unsigned int median; + unsigned int i, j; + unsigned int *sortlist; + unsigned int tmp; - /* Create a list to sort to */ - CHECK_MEM_ERROR(sortlist, - vpx_calloc(sizeof(unsigned int), - cpi->common.MBs)); + /* Create a list to sort to */ + CHECK_MEM_ERROR(sortlist, + vpx_calloc(sizeof(unsigned int), cpi->common.MBs)); - /* Copy map to sort list */ - memcpy( sortlist, cpi->mb_activity_map, - sizeof(unsigned int) * cpi->common.MBs ); + /* Copy map to sort list */ + memcpy(sortlist, cpi->mb_activity_map, + sizeof(unsigned int) * cpi->common.MBs); - - /* Ripple each value down to its correct position */ - for ( i = 1; i < cpi->common.MBs; i ++ ) - { - for ( j = i; j > 0; j -- ) - { - if ( sortlist[j] < sortlist[j-1] ) - { - /* Swap values */ - tmp = sortlist[j-1]; - sortlist[j-1] = sortlist[j]; - sortlist[j] = tmp; - } - else - break; - } - } - - /* Even number MBs so estimate median as mean of two either side. */ - median = ( 1 + sortlist[cpi->common.MBs >> 1] + - sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1; - - cpi->activity_avg = median; - - vpx_free(sortlist); + /* Ripple each value down to its correct position */ + for (i = 1; i < cpi->common.MBs; ++i) { + for (j = i; j > 0; j--) { + if (sortlist[j] < sortlist[j - 1]) { + /* Swap values */ + tmp = sortlist[j - 1]; + sortlist[j - 1] = sortlist[j]; + sortlist[j] = tmp; + } else + break; + } } + + /* Even number MBs so estimate median as mean of two either side. */ + median = (1 + sortlist[cpi->common.MBs >> 1] + + sortlist[(cpi->common.MBs >> 1) + 1]) >> + 1; + + cpi->activity_avg = median; + + vpx_free(sortlist); + } #else - /* Simple mean for now */ - cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs); + /* Simple mean for now */ + cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs); #endif - if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) - cpi->activity_avg = VP8_ACTIVITY_AVG_MIN; + if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) { + cpi->activity_avg = VP8_ACTIVITY_AVG_MIN; + } - /* Experimental code: return fixed value normalized for several clips */ - if ( ALT_ACT_MEASURE ) - cpi->activity_avg = 100000; + /* Experimental code: return fixed value normalized for several clips */ + if (ALT_ACT_MEASURE) cpi->activity_avg = 100000; } -#define USE_ACT_INDEX 0 -#define OUTPUT_NORM_ACT_STATS 0 +#define USE_ACT_INDEX 0 +#define OUTPUT_NORM_ACT_STATS 0 #if USE_ACT_INDEX /* Calculate and activity index for each mb */ -static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x ) -{ - VP8_COMMON *const cm = & cpi->common; - int mb_row, mb_col; +static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) { + VP8_COMMON *const cm = &cpi->common; + int mb_row, mb_col; - int64_t act; - int64_t a; - int64_t b; + int64_t act; + int64_t a; + int64_t b; #if OUTPUT_NORM_ACT_STATS - FILE *f = fopen("norm_act.stt", "a"); - fprintf(f, "\n%12d\n", cpi->activity_avg ); + FILE *f = fopen("norm_act.stt", "a"); + fprintf(f, "\n%12d\n", cpi->activity_avg); #endif - /* Reset pointers to start of activity map */ - x->mb_activity_ptr = cpi->mb_activity_map; + /* Reset pointers to start of activity map */ + x->mb_activity_ptr = cpi->mb_activity_map; - /* Calculate normalized mb activity number. */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - /* Read activity from the map */ - act = *(x->mb_activity_ptr); + /* Calculate normalized mb activity number. */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + /* Read activity from the map */ + act = *(x->mb_activity_ptr); - /* Calculate a normalized activity number */ - a = act + 4*cpi->activity_avg; - b = 4*act + cpi->activity_avg; + /* Calculate a normalized activity number */ + a = act + 4 * cpi->activity_avg; + b = 4 * act + cpi->activity_avg; - if ( b >= a ) - *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1; - else - *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b); + if (b >= a) + *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1; + else + *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b); #if OUTPUT_NORM_ACT_STATS - fprintf(f, " %6d", *(x->mb_activity_ptr)); + fprintf(f, " %6d", *(x->mb_activity_ptr)); #endif - /* Increment activity map pointers */ - x->mb_activity_ptr++; - } - -#if OUTPUT_NORM_ACT_STATS - fprintf(f, "\n"); -#endif - + /* Increment activity map pointers */ + x->mb_activity_ptr++; } #if OUTPUT_NORM_ACT_STATS - fclose(f); + fprintf(f, "\n"); #endif + } +#if OUTPUT_NORM_ACT_STATS + fclose(f); +#endif } #endif /* Loop through all MBs. Note activity of each, average activity and * calculate a normalized activity for each */ -static void build_activity_map( VP8_COMP *cpi ) -{ - MACROBLOCK *const x = & cpi->mb; - MACROBLOCKD *xd = &x->e_mbd; - VP8_COMMON *const cm = & cpi->common; +static void build_activity_map(VP8_COMP *cpi) { + MACROBLOCK *const x = &cpi->mb; + MACROBLOCKD *xd = &x->e_mbd; + VP8_COMMON *const cm = &cpi->common; #if ALT_ACT_MEASURE - YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; - int recon_yoffset; - int recon_y_stride = new_yv12->y_stride; + YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; + int recon_yoffset; + int recon_y_stride = new_yv12->y_stride; #endif - int mb_row, mb_col; - unsigned int mb_activity; - int64_t activity_sum = 0; + int mb_row, mb_col; + unsigned int mb_activity; + int64_t activity_sum = 0; - /* for each macroblock row in image */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { + /* for each macroblock row in image */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { #if ALT_ACT_MEASURE - /* reset above block coeffs */ - xd->up_available = (mb_row != 0); - recon_yoffset = (mb_row * recon_y_stride * 16); + /* reset above block coeffs */ + xd->up_available = (mb_row != 0); + recon_yoffset = (mb_row * recon_y_stride * 16); #endif - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { #if ALT_ACT_MEASURE - xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; - xd->left_available = (mb_col != 0); - recon_yoffset += 16; + xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; + xd->left_available = (mb_col != 0); + recon_yoffset += 16; #endif - /* Copy current mb to a buffer */ - vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); + /* Copy current mb to a buffer */ + vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); - /* measure activity */ - mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col ); + /* measure activity */ + mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col); - /* Keep frame sum */ - activity_sum += mb_activity; + /* Keep frame sum */ + activity_sum += mb_activity; - /* Store MB level activity details. */ - *x->mb_activity_ptr = mb_activity; + /* Store MB level activity details. */ + *x->mb_activity_ptr = mb_activity; - /* Increment activity map pointer */ - x->mb_activity_ptr++; - - /* adjust to the next column of source macroblocks */ - x->src.y_buffer += 16; - } - - - /* adjust to the next row of mbs */ - x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; - -#if ALT_ACT_MEASURE - /* extend the recon for intra prediction */ - vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, - xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); -#endif + /* Increment activity map pointer */ + x->mb_activity_ptr++; + /* adjust to the next column of source macroblocks */ + x->src.y_buffer += 16; } - /* Calculate an "average" MB activity */ - calc_av_activity(cpi, activity_sum); + /* adjust to the next row of mbs */ + x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; + +#if ALT_ACT_MEASURE + /* extend the recon for intra prediction */ + vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, + xd->dst.v_buffer + 8); +#endif + } + + /* Calculate an "average" MB activity */ + calc_av_activity(cpi, activity_sum); #if USE_ACT_INDEX - /* Calculate an activity index number of each mb */ - calc_activity_index( cpi, x ); + /* Calculate an activity index number of each mb */ + calc_activity_index(cpi, x); #endif - } /* Macroblock activity masking */ -void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) -{ +void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) { #if USE_ACT_INDEX - x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); - x->errorperbit = x->rdmult * 100 /(110 * x->rddiv); - x->errorperbit += (x->errorperbit==0); + x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); + x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); + x->errorperbit += (x->errorperbit == 0); #else - int64_t a; - int64_t b; - int64_t act = *(x->mb_activity_ptr); + int64_t a; + int64_t b; + int64_t act = *(x->mb_activity_ptr); - /* Apply the masking to the RD multiplier. */ - a = act + (2*cpi->activity_avg); - b = (2*act) + cpi->activity_avg; + /* Apply the masking to the RD multiplier. */ + a = act + (2 * cpi->activity_avg); + b = (2 * act) + cpi->activity_avg; - x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a); - x->errorperbit = x->rdmult * 100 /(110 * x->rddiv); - x->errorperbit += (x->errorperbit==0); + x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a); + x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); + x->errorperbit += (x->errorperbit == 0); #endif - /* Activity based Zbin adjustment */ - adjust_act_zbin(cpi, x); + /* Activity based Zbin adjustment */ + adjust_act_zbin(cpi, x); } -static -void encode_mb_row(VP8_COMP *cpi, - VP8_COMMON *cm, - int mb_row, - MACROBLOCK *x, - MACROBLOCKD *xd, - TOKENEXTRA **tp, - int *segment_counts, - int *totalrate) -{ - int recon_yoffset, recon_uvoffset; - int mb_col; - int ref_fb_idx = cm->lst_fb_idx; - int dst_fb_idx = cm->new_fb_idx; - int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; - int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; - int map_index = (mb_row * cpi->common.mb_cols); +static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row, + MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp, + int *segment_counts, int *totalrate) { + int recon_yoffset, recon_uvoffset; + int mb_col; + int ref_fb_idx = cm->lst_fb_idx; + int dst_fb_idx = cm->new_fb_idx; + int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; + int map_index = (mb_row * cpi->common.mb_cols); #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - const int num_part = (1 << cm->multi_token_partition); - TOKENEXTRA * tp_start = cpi->tok; - vp8_writer *w; + const int num_part = (1 << cm->multi_token_partition); + TOKENEXTRA *tp_start = cpi->tok; + vp8_writer *w; #endif #if CONFIG_MULTITHREAD - const int nsync = cpi->mt_sync_range; - const int rightmost_col = cm->mb_cols + nsync; - const int *last_row_current_mb_col; - int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; + const int nsync = cpi->mt_sync_range; + const int rightmost_col = cm->mb_cols + nsync; + volatile const int *last_row_current_mb_col; + volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; - if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) - last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; - else - last_row_current_mb_col = &rightmost_col; + if ((cpi->b_multi_threaded != 0) && (mb_row != 0)) { + last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; + } else { + last_row_current_mb_col = &rightmost_col; + } #endif #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - if(num_part > 1) - w= &cpi->bc[1 + (mb_row % num_part)]; - else - w = &cpi->bc[1]; + if (num_part > 1) + w = &cpi->bc[1 + (mb_row % num_part)]; + else + w = &cpi->bc[1]; #endif - /* reset above block coeffs */ - xd->above_context = cm->above_context; + /* reset above block coeffs */ + xd->above_context = cm->above_context; - xd->up_available = (mb_row != 0); - recon_yoffset = (mb_row * recon_y_stride * 16); - recon_uvoffset = (mb_row * recon_uv_stride * 8); + xd->up_available = (mb_row != 0); + recon_yoffset = (mb_row * recon_y_stride * 16); + recon_uvoffset = (mb_row * recon_uv_stride * 8); - cpi->tplist[mb_row].start = *tp; - /* printf("Main mb_row = %d\n", mb_row); */ + cpi->tplist[mb_row].start = *tp; + /* printf("Main mb_row = %d\n", mb_row); */ - /* Distance of Mb to the top & bottom edges, specified in 1/8th pel - * units as they are always compared to values that are in 1/8th pel + /* Distance of Mb to the top & bottom edges, specified in 1/8th pel + * units as they are always compared to values that are in 1/8th pel + */ + xd->mb_to_top_edge = -((mb_row * 16) << 3); + xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; + + /* Set up limit values for vertical motion vector components + * to prevent them extending beyond the UMV borders + */ + x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); + + /* Set the mb activity pointer to the start of the row. */ + x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; + + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { +#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) + *tp = cpi->tok; +#endif + /* Distance of Mb to the left & right edges, specified in + * 1/8th pel units as they are always compared to values + * that are in 1/8th pel units */ - xd->mb_to_top_edge = -((mb_row * 16) << 3); - xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; - /* Set up limit values for vertical motion vector components + /* Set up limit values for horizontal motion vector components * to prevent them extending beyond the UMV borders */ - x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) - + (VP8BORDERINPIXELS - 16); + x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_col_max = + ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); - /* Set the mb activity pointer to the start of the row. */ - x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; + xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; + xd->left_available = (mb_col != 0); - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { + x->rddiv = cpi->RDDIV; + x->rdmult = cpi->RDMULT; -#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - *tp = cpi->tok; -#endif - /* Distance of Mb to the left & right edges, specified in - * 1/8th pel units as they are always compared to values - * that are in 1/8th pel units - */ - xd->mb_to_left_edge = -((mb_col * 16) << 3); - xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; - - /* Set up limit values for horizontal motion vector components - * to prevent them extending beyond the UMV borders - */ - x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) - + (VP8BORDERINPIXELS - 16); - - xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; - xd->left_available = (mb_col != 0); - - x->rddiv = cpi->RDDIV; - x->rdmult = cpi->RDMULT; - - /* Copy current mb to a buffer */ - vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); + /* Copy current mb to a buffer */ + vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); #if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded != 0) { - if (((mb_col - 1) % nsync) == 0) { - pthread_mutex_t *mutex = &cpi->pmutex[mb_row]; - protected_write(mutex, current_mb_col, mb_col - 1); - } + if (cpi->b_multi_threaded != 0) { + *current_mb_col = mb_col - 1; /* set previous MB done */ - if (mb_row && !(mb_col & (nsync - 1))) { - pthread_mutex_t *mutex = &cpi->pmutex[mb_row-1]; - sync_read(mutex, mb_col, last_row_current_mb_col, nsync); - } + if ((mb_col & (nsync - 1)) == 0) { + while (mb_col > (*last_row_current_mb_col - nsync)) { + x86_pause_hint(); + thread_sleep(0); } + } + } #endif - if(cpi->oxcf.tuning == VP8_TUNE_SSIM) - vp8_activity_masking(cpi, x); + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); - /* Is segmentation enabled */ - /* MB level adjustment to quantizer */ - if (xd->segmentation_enabled) - { - /* Code to set segment id in xd->mbmi.segment_id for current MB - * (with range checking) - */ - if (cpi->segmentation_map[map_index+mb_col] <= 3) - xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col]; - else - xd->mode_info_context->mbmi.segment_id = 0; + /* Is segmentation enabled */ + /* MB level adjustment to quantizer */ + if (xd->segmentation_enabled) { + /* Code to set segment id in xd->mbmi.segment_id for current MB + * (with range checking) + */ + if (cpi->segmentation_map[map_index + mb_col] <= 3) { + xd->mode_info_context->mbmi.segment_id = + cpi->segmentation_map[map_index + mb_col]; + } else { + xd->mode_info_context->mbmi.segment_id = 0; + } - vp8cx_mb_init_quantizer(cpi, x, 1); - } - else - /* Set to Segment 0 by default */ - xd->mode_info_context->mbmi.segment_id = 0; + vp8cx_mb_init_quantizer(cpi, x, 1); + } else { + /* Set to Segment 0 by default */ + xd->mode_info_context->mbmi.segment_id = 0; + } - x->active_ptr = cpi->active_map + map_index + mb_col; + x->active_ptr = cpi->active_map + map_index + mb_col; - if (cm->frame_type == KEY_FRAME) - { - *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp); + if (cm->frame_type == KEY_FRAME) { + *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp); #ifdef MODE_STATS - y_modes[xd->mbmi.mode] ++; + y_modes[xd->mbmi.mode]++; #endif - } - else - { - *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); + } else { + *totalrate += vp8cx_encode_inter_macroblock( + cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); #ifdef MODE_STATS - inter_y_modes[xd->mbmi.mode] ++; + inter_y_modes[xd->mbmi.mode]++; - if (xd->mbmi.mode == SPLITMV) - { - int b; + if (xd->mbmi.mode == SPLITMV) { + int b; - for (b = 0; b < xd->mbmi.partition_count; b++) - { - inter_b_modes[x->partition->bmi[b].mode] ++; - } - } + for (b = 0; b < xd->mbmi.partition_count; ++b) { + inter_b_modes[x->partition->bmi[b].mode]++; + } + } #endif - // Keep track of how many (consecutive) times a block is coded - // as ZEROMV_LASTREF, for base layer frames. - // Reset to 0 if its coded as anything else. - if (cpi->current_layer == 0) { - if (xd->mode_info_context->mbmi.mode == ZEROMV && - xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { - // Increment, check for wrap-around. - if (cpi->consec_zero_last[map_index+mb_col] < 255) - cpi->consec_zero_last[map_index+mb_col] += 1; - if (cpi->consec_zero_last_mvbias[map_index+mb_col] < 255) - cpi->consec_zero_last_mvbias[map_index+mb_col] += 1; - } else { - cpi->consec_zero_last[map_index+mb_col] = 0; - cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; - } - if (x->zero_last_dot_suppress) - cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; - } - - /* Special case code for cyclic refresh - * If cyclic update enabled then copy xd->mbmi.segment_id; (which - * may have been updated based on mode during - * vp8cx_encode_inter_macroblock()) back into the global - * segmentation map - */ - if ((cpi->current_layer == 0) && - (cpi->cyclic_refresh_mode_enabled && - xd->segmentation_enabled)) - { - cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id; - - /* If the block has been refreshed mark it as clean (the - * magnitude of the -ve influences how long it will be before - * we consider another refresh): - * Else if it was coded (last frame 0,0) and has not already - * been refreshed then mark it as a candidate for cleanup - * next time (marked 0) else mark it as dirty (1). - */ - if (xd->mode_info_context->mbmi.segment_id) - cpi->cyclic_refresh_map[map_index+mb_col] = -1; - else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) - { - if (cpi->cyclic_refresh_map[map_index+mb_col] == 1) - cpi->cyclic_refresh_map[map_index+mb_col] = 0; - } - else - cpi->cyclic_refresh_map[map_index+mb_col] = 1; - - } + // Keep track of how many (consecutive) times a block is coded + // as ZEROMV_LASTREF, for base layer frames. + // Reset to 0 if its coded as anything else. + if (cpi->current_layer == 0) { + if (xd->mode_info_context->mbmi.mode == ZEROMV && + xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { + // Increment, check for wrap-around. + if (cpi->consec_zero_last[map_index + mb_col] < 255) { + cpi->consec_zero_last[map_index + mb_col] += 1; + } + if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) { + cpi->consec_zero_last_mvbias[map_index + mb_col] += 1; + } + } else { + cpi->consec_zero_last[map_index + mb_col] = 0; + cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; } + if (x->zero_last_dot_suppress) { + cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; + } + } - cpi->tplist[mb_row].stop = *tp; + /* Special case code for cyclic refresh + * If cyclic update enabled then copy xd->mbmi.segment_id; (which + * may have been updated based on mode during + * vp8cx_encode_inter_macroblock()) back into the global + * segmentation map + */ + if ((cpi->current_layer == 0) && + (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) { + cpi->segmentation_map[map_index + mb_col] = + xd->mode_info_context->mbmi.segment_id; + + /* If the block has been refreshed mark it as clean (the + * magnitude of the -ve influences how long it will be before + * we consider another refresh): + * Else if it was coded (last frame 0,0) and has not already + * been refreshed then mark it as a candidate for cleanup + * next time (marked 0) else mark it as dirty (1). + */ + if (xd->mode_info_context->mbmi.segment_id) { + cpi->cyclic_refresh_map[map_index + mb_col] = -1; + } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && + (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) { + if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) { + cpi->cyclic_refresh_map[map_index + mb_col] = 0; + } + } else { + cpi->cyclic_refresh_map[map_index + mb_col] = 1; + } + } + } + + cpi->tplist[mb_row].stop = *tp; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - /* pack tokens for this MB */ - { - int tok_count = *tp - tp_start; - vp8_pack_tokens(w, tp_start, tok_count); - } -#endif - /* Increment pointer into gf usage flags structure. */ - x->gf_active_ptr++; - - /* Increment the activity mask pointers. */ - x->mb_activity_ptr++; - - /* adjust to the next column of macroblocks */ - x->src.y_buffer += 16; - x->src.u_buffer += 8; - x->src.v_buffer += 8; - - recon_yoffset += 16; - recon_uvoffset += 8; - - /* Keep track of segment usage */ - segment_counts[xd->mode_info_context->mbmi.segment_id] ++; - - /* skip to next mb */ - xd->mode_info_context++; - x->partition_info++; - xd->above_context++; + /* pack tokens for this MB */ + { + int tok_count = *tp - tp_start; + vp8_pack_tokens(w, tp_start, tok_count); } - - /* extend the recon for intra prediction */ - vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], - xd->dst.y_buffer + 16, - xd->dst.u_buffer + 8, - xd->dst.v_buffer + 8); - -#if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded != 0) - protected_write(&cpi->pmutex[mb_row], current_mb_col, rightmost_col); #endif + /* Increment pointer into gf usage flags structure. */ + x->gf_active_ptr++; - /* this is to account for the border */ + /* Increment the activity mask pointers. */ + x->mb_activity_ptr++; + + /* adjust to the next column of macroblocks */ + x->src.y_buffer += 16; + x->src.u_buffer += 8; + x->src.v_buffer += 8; + + recon_yoffset += 16; + recon_uvoffset += 8; + + /* Keep track of segment usage */ + segment_counts[xd->mode_info_context->mbmi.segment_id]++; + + /* skip to next mb */ xd->mode_info_context++; x->partition_info++; + xd->above_context++; + } + + /* extend the recon for intra prediction */ + vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, + xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + +#if CONFIG_MULTITHREAD + if (cpi->b_multi_threaded != 0) *current_mb_col = rightmost_col; +#endif + + /* this is to account for the border */ + xd->mode_info_context++; + x->partition_info++; } -static void init_encode_frame_mb_context(VP8_COMP *cpi) -{ - MACROBLOCK *const x = & cpi->mb; - VP8_COMMON *const cm = & cpi->common; - MACROBLOCKD *const xd = & x->e_mbd; +static void init_encode_frame_mb_context(VP8_COMP *cpi) { + MACROBLOCK *const x = &cpi->mb; + VP8_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; - /* GF active flags data structure */ - x->gf_active_ptr = (signed char *)cpi->gf_active_flags; + /* GF active flags data structure */ + x->gf_active_ptr = (signed char *)cpi->gf_active_flags; - /* Activity map pointer */ - x->mb_activity_ptr = cpi->mb_activity_map; + /* Activity map pointer */ + x->mb_activity_ptr = cpi->mb_activity_map; - x->act_zbin_adj = 0; + x->act_zbin_adj = 0; - x->partition_info = x->pi; + x->partition_info = x->pi; - xd->mode_info_context = cm->mi; - xd->mode_info_stride = cm->mode_info_stride; + xd->mode_info_context = cm->mi; + xd->mode_info_stride = cm->mode_info_stride; - xd->frame_type = cm->frame_type; + xd->frame_type = cm->frame_type; - /* reset intra mode contexts */ - if (cm->frame_type == KEY_FRAME) - vp8_init_mbmode_probs(cm); + /* reset intra mode contexts */ + if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm); - /* Copy data over into macro block data structures. */ - x->src = * cpi->Source; - xd->pre = cm->yv12_fb[cm->lst_fb_idx]; - xd->dst = cm->yv12_fb[cm->new_fb_idx]; + /* Copy data over into macro block data structures. */ + x->src = *cpi->Source; + xd->pre = cm->yv12_fb[cm->lst_fb_idx]; + xd->dst = cm->yv12_fb[cm->new_fb_idx]; - /* set up frame for intra coded blocks */ - vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); + /* set up frame for intra coded blocks */ + vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); - vp8_build_block_offsets(x); + vp8_build_block_offsets(x); - xd->mode_info_context->mbmi.mode = DC_PRED; - xd->mode_info_context->mbmi.uv_mode = DC_PRED; + xd->mode_info_context->mbmi.mode = DC_PRED; + xd->mode_info_context->mbmi.uv_mode = DC_PRED; - xd->left_context = &cm->left_context; + xd->left_context = &cm->left_context; - x->mvc = cm->fc.mvc; + x->mvc = cm->fc.mvc; - memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); + memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); - /* Special case treatment when GF and ARF are not sensible options - * for reference - */ - if (cpi->ref_frame_flags == VP8_LAST_FRAME) - vp8_calc_ref_frame_costs(x->ref_frame_cost, - cpi->prob_intra_coded,255,128); - else if ((cpi->oxcf.number_of_layers > 1) && - (cpi->ref_frame_flags == VP8_GOLD_FRAME)) - vp8_calc_ref_frame_costs(x->ref_frame_cost, - cpi->prob_intra_coded,1,255); - else if ((cpi->oxcf.number_of_layers > 1) && - (cpi->ref_frame_flags == VP8_ALTR_FRAME)) - vp8_calc_ref_frame_costs(x->ref_frame_cost, - cpi->prob_intra_coded,1,1); - else - vp8_calc_ref_frame_costs(x->ref_frame_cost, - cpi->prob_intra_coded, - cpi->prob_last_coded, - cpi->prob_gf_coded); + /* Special case treatment when GF and ARF are not sensible options + * for reference + */ + if (cpi->ref_frame_flags == VP8_LAST_FRAME) { + vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255, + 128); + } else if ((cpi->oxcf.number_of_layers > 1) && + (cpi->ref_frame_flags == VP8_GOLD_FRAME)) { + vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255); + } else if ((cpi->oxcf.number_of_layers > 1) && + (cpi->ref_frame_flags == VP8_ALTR_FRAME)) { + vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1); + } else { + vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, + cpi->prob_last_coded, cpi->prob_gf_coded); + } - xd->fullpixel_mask = 0xffffffff; - if(cm->full_pixel) - xd->fullpixel_mask = 0xfffffff8; + xd->fullpixel_mask = 0xffffffff; + if (cm->full_pixel) xd->fullpixel_mask = 0xfffffff8; - vp8_zero(x->coef_counts); - vp8_zero(x->ymode_count); - vp8_zero(x->uv_mode_count) - x->prediction_error = 0; - x->intra_error = 0; - vp8_zero(x->count_mb_ref_frame_usage); + vp8_zero(x->coef_counts); + vp8_zero(x->ymode_count); + vp8_zero(x->uv_mode_count) x->prediction_error = 0; + x->intra_error = 0; + vp8_zero(x->count_mb_ref_frame_usage); } #if CONFIG_MULTITHREAD -static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) -{ - int i = 0; - do - { - int j = 0; - do - { - int k = 0; - do - { - /* at every context */ +static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) { + int i = 0; + do { + int j = 0; + do { + int k = 0; + do { + /* at every context */ - /* calc probs and branch cts for this frame only */ - int t = 0; /* token/prob index */ + /* calc probs and branch cts for this frame only */ + int t = 0; /* token/prob index */ - do - { - x->coef_counts [i][j][k][t] += - x_thread->coef_counts [i][j][k][t]; - } - while (++t < ENTROPY_NODES); - } - while (++k < PREV_COEF_CONTEXTS); - } - while (++j < COEF_BANDS); - } - while (++i < BLOCK_TYPES); + do { + x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t]; + } while (++t < ENTROPY_NODES); + } while (++k < PREV_COEF_CONTEXTS); + } while (++j < COEF_BANDS); + } while (++i < BLOCK_TYPES); } #endif // CONFIG_MULTITHREAD -void vp8_encode_frame(VP8_COMP *cpi) -{ - int mb_row; - MACROBLOCK *const x = & cpi->mb; - VP8_COMMON *const cm = & cpi->common; - MACROBLOCKD *const xd = & x->e_mbd; - TOKENEXTRA *tp = cpi->tok; - int segment_counts[MAX_MB_SEGMENTS]; - int totalrate; +void vp8_encode_frame(VP8_COMP *cpi) { + int mb_row; + MACROBLOCK *const x = &cpi->mb; + VP8_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; + TOKENEXTRA *tp = cpi->tok; + int segment_counts[MAX_MB_SEGMENTS]; + int totalrate; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - BOOL_CODER * bc = &cpi->bc[1]; /* bc[0] is for control partition */ - const int num_part = (1 << cm->multi_token_partition); + BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */ + const int num_part = (1 << cm->multi_token_partition); #endif - memset(segment_counts, 0, sizeof(segment_counts)); - totalrate = 0; + memset(segment_counts, 0, sizeof(segment_counts)); + totalrate = 0; - if (cpi->compressor_speed == 2) - { - if (cpi->oxcf.cpu_used < 0) - cpi->Speed = -(cpi->oxcf.cpu_used); - else - vp8_auto_select_speed(cpi); + if (cpi->compressor_speed == 2) { + if (cpi->oxcf.cpu_used < 0) { + cpi->Speed = -(cpi->oxcf.cpu_used); + } else { + vp8_auto_select_speed(cpi); } + } - /* Functions setup for all frame types so we can use MC in AltRef */ - if(!cm->use_bilinear_mc_filter) - { - xd->subpixel_predict = vp8_sixtap_predict4x4; - xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; - xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; - xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; - } - else - { - xd->subpixel_predict = vp8_bilinear_predict4x4; - xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; - xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; - xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; - } + /* Functions setup for all frame types so we can use MC in AltRef */ + if (!cm->use_bilinear_mc_filter) { + xd->subpixel_predict = vp8_sixtap_predict4x4; + xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; + xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; + xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; + } else { + xd->subpixel_predict = vp8_bilinear_predict4x4; + xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; + xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; + xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; + } - cpi->mb.skip_true_count = 0; - cpi->tok_count = 0; + cpi->mb.skip_true_count = 0; + cpi->tok_count = 0; #if 0 /* Experimental code */ @@ -780,444 +710,405 @@ void vp8_encode_frame(VP8_COMP *cpi) cpi->last_mb_distortion = 0; #endif - xd->mode_info_context = cm->mi; + xd->mode_info_context = cm->mi; - vp8_zero(cpi->mb.MVcount); + vp8_zero(cpi->mb.MVcount); - vp8cx_frame_init_quantizer(cpi); + vp8cx_frame_init_quantizer(cpi); - vp8_initialize_rd_consts(cpi, x, - vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); + vp8_initialize_rd_consts(cpi, x, + vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); - vp8cx_initialize_me_consts(cpi, cm->base_qindex); + vp8cx_initialize_me_consts(cpi, cm->base_qindex); - if(cpi->oxcf.tuning == VP8_TUNE_SSIM) - { - /* Initialize encode frame context. */ - init_encode_frame_mb_context(cpi); - - /* Build a frame level activity map */ - build_activity_map(cpi); - } - - /* re-init encode frame context. */ + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { + /* Initialize encode frame context. */ init_encode_frame_mb_context(cpi); + /* Build a frame level activity map */ + build_activity_map(cpi); + } + + /* re-init encode frame context. */ + init_encode_frame_mb_context(cpi); + #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - { - int i; - for(i = 0; i < num_part; i++) - { - vp8_start_encode(&bc[i], cpi->partition_d[i + 1], - cpi->partition_d_end[i + 1]); - bc[i].error = &cm->error; - } + { + int i; + for (i = 0; i < num_part; ++i) { + vp8_start_encode(&bc[i], cpi->partition_d[i + 1], + cpi->partition_d_end[i + 1]); + bc[i].error = &cm->error; } + } #endif - { - struct vpx_usec_timer emr_timer; - vpx_usec_timer_start(&emr_timer); + { + struct vpx_usec_timer emr_timer; + vpx_usec_timer_start(&emr_timer); #if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded) - { - int i; + if (cpi->b_multi_threaded) { + int i; - vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, - cpi->encoding_thread_count); + vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, + cpi->encoding_thread_count); - for (i = 0; i < cm->mb_rows; i++) - cpi->mt_current_mb_col[i] = -1; + for (i = 0; i < cm->mb_rows; ++i) cpi->mt_current_mb_col[i] = -1; - for (i = 0; i < cpi->encoding_thread_count; i++) - { - sem_post(&cpi->h_event_start_encoding[i]); - } + for (i = 0; i < cpi->encoding_thread_count; ++i) { + sem_post(&cpi->h_event_start_encoding[i]); + } - for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) - { - vp8_zero(cm->left_context) + for (mb_row = 0; mb_row < cm->mb_rows; + mb_row += (cpi->encoding_thread_count + 1)) { + vp8_zero(cm->left_context) #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - tp = cpi->tok; + tp = cpi->tok; #else - tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); + tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); #endif - encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); + encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); - /* adjust to the next row of mbs */ - x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; - x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; - x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; + /* adjust to the next row of mbs */ + x->src.y_buffer += + 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - + 16 * cm->mb_cols; + x->src.u_buffer += + 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - + 8 * cm->mb_cols; + x->src.v_buffer += + 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - + 8 * cm->mb_cols; - xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; - x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; - x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; + xd->mode_info_context += + xd->mode_info_stride * cpi->encoding_thread_count; + x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; + x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; + } + /* Wait for all the threads to finish. */ + for (i = 0; i < cpi->encoding_thread_count; ++i) { + sem_wait(&cpi->h_event_end_encoding[i]); + } - if(mb_row == cm->mb_rows - 1) - { - sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ - } + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop - + cpi->tplist[mb_row].start); + } + + if (xd->segmentation_enabled) { + int j; + + if (xd->segmentation_enabled) { + for (i = 0; i < cpi->encoding_thread_count; ++i) { + for (j = 0; j < 4; ++j) { + segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j]; } - - sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */ - - for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++) - { - cpi->tok_count += (unsigned int) - (cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start); - } - - if (xd->segmentation_enabled) - { - int j; - - if (xd->segmentation_enabled) - { - for (i = 0; i < cpi->encoding_thread_count; i++) - { - for (j = 0; j < 4; j++) - segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j]; - } - } - } - - for (i = 0; i < cpi->encoding_thread_count; i++) - { - int mode_count; - int c_idx; - totalrate += cpi->mb_row_ei[i].totalrate; - - cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count; - - for(mode_count = 0; mode_count < VP8_YMODES; mode_count++) - cpi->mb.ymode_count[mode_count] += - cpi->mb_row_ei[i].mb.ymode_count[mode_count]; - - for(mode_count = 0; mode_count < VP8_UV_MODES; mode_count++) - cpi->mb.uv_mode_count[mode_count] += - cpi->mb_row_ei[i].mb.uv_mode_count[mode_count]; - - for(c_idx = 0; c_idx < MVvals; c_idx++) - { - cpi->mb.MVcount[0][c_idx] += - cpi->mb_row_ei[i].mb.MVcount[0][c_idx]; - cpi->mb.MVcount[1][c_idx] += - cpi->mb_row_ei[i].mb.MVcount[1][c_idx]; - } - - cpi->mb.prediction_error += - cpi->mb_row_ei[i].mb.prediction_error; - cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error; - - for(c_idx = 0; c_idx < MAX_REF_FRAMES; c_idx++) - cpi->mb.count_mb_ref_frame_usage[c_idx] += - cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx]; - - for(c_idx = 0; c_idx < MAX_ERROR_BINS; c_idx++) - cpi->mb.error_bins[c_idx] += - cpi->mb_row_ei[i].mb.error_bins[c_idx]; - - /* add up counts for each thread */ - sum_coef_counts(x, &cpi->mb_row_ei[i].mb); - } - + } } - else + } + + for (i = 0; i < cpi->encoding_thread_count; ++i) { + int mode_count; + int c_idx; + totalrate += cpi->mb_row_ei[i].totalrate; + + cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count; + + for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) { + cpi->mb.ymode_count[mode_count] += + cpi->mb_row_ei[i].mb.ymode_count[mode_count]; + } + + for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) { + cpi->mb.uv_mode_count[mode_count] += + cpi->mb_row_ei[i].mb.uv_mode_count[mode_count]; + } + + for (c_idx = 0; c_idx < MVvals; ++c_idx) { + cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx]; + cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx]; + } + + cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error; + cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error; + + for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) { + cpi->mb.count_mb_ref_frame_usage[c_idx] += + cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx]; + } + + for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) { + cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx]; + } + + /* add up counts for each thread */ + sum_coef_counts(x, &cpi->mb_row_ei[i].mb); + } + + } else #endif // CONFIG_MULTITHREAD - { + { - /* for each macroblock row in image */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - vp8_zero(cm->left_context) + /* for each macroblock row in image */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + vp8_zero(cm->left_context) #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - tp = cpi->tok; + tp = cpi->tok; #endif - encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); + encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); - /* adjust to the next row of mbs */ - x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; - x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; - x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; - } + /* adjust to the next row of mbs */ + x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; + x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; + x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; + } - cpi->tok_count = (unsigned int)(tp - cpi->tok); - } + cpi->tok_count = (unsigned int)(tp - cpi->tok); + } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - { - int i; - for(i = 0; i < num_part; i++) - { - vp8_stop_encode(&bc[i]); - cpi->partition_sz[i+1] = bc[i].pos; - } - } + { + int i; + for (i = 0; i < num_part; ++i) { + vp8_stop_encode(&bc[i]); + cpi->partition_sz[i + 1] = bc[i].pos; + } + } #endif - vpx_usec_timer_mark(&emr_timer); - cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer); - } + vpx_usec_timer_mark(&emr_timer); + cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer); + } - - // Work out the segment probabilities if segmentation is enabled - // and needs to be updated - if (xd->segmentation_enabled && xd->update_mb_segmentation_map) - { - int tot_count; - int i; - - /* Set to defaults */ - memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs)); - - tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3]; - - if (tot_count) - { - xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count; - - tot_count = segment_counts[0] + segment_counts[1]; - - if (tot_count > 0) - { - xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count; - } - - tot_count = segment_counts[2] + segment_counts[3]; - - if (tot_count > 0) - xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count; - - /* Zero probabilities not allowed */ - for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++) - { - if (xd->mb_segment_tree_probs[i] == 0) - xd->mb_segment_tree_probs[i] = 1; - } - } - } - - /* projected_frame_size in units of BYTES */ - cpi->projected_frame_size = totalrate >> 8; - - /* Make a note of the percentage MBs coded Intra. */ - if (cm->frame_type == KEY_FRAME) - { - cpi->this_frame_percent_intra = 100; - } - else - { - int tot_modes; - - tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] - + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] - + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] - + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; - - if (tot_modes) - cpi->this_frame_percent_intra = - cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes; - - } - -#if ! CONFIG_REALTIME_ONLY - /* Adjust the projected reference frame usage probability numbers to - * reflect what we have just seen. This may be useful when we make - * multiple iterations of the recode loop rather than continuing to use - * values from the previous frame. - */ - if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) || - (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) - { - vp8_convert_rfct_to_prob(cpi); - } -#endif -} -void vp8_setup_block_ptrs(MACROBLOCK *x) -{ - int r, c; + // Work out the segment probabilities if segmentation is enabled + // and needs to be updated + if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { + int tot_count; int i; - for (r = 0; r < 4; r++) - { - for (c = 0; c < 4; c++) - { - x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4; - } + /* Set to defaults */ + memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); + + tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + + segment_counts[3]; + + if (tot_count) { + xd->mb_segment_tree_probs[0] = + ((segment_counts[0] + segment_counts[1]) * 255) / tot_count; + + tot_count = segment_counts[0] + segment_counts[1]; + + if (tot_count > 0) { + xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count; + } + + tot_count = segment_counts[2] + segment_counts[3]; + + if (tot_count > 0) { + xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count; + } + + /* Zero probabilities not allowed */ + for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { + if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1; + } } + } - for (r = 0; r < 2; r++) - { - for (c = 0; c < 2; c++) - { - x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4; - } + /* projected_frame_size in units of BYTES */ + cpi->projected_frame_size = totalrate >> 8; + + /* Make a note of the percentage MBs coded Intra. */ + if (cm->frame_type == KEY_FRAME) { + cpi->this_frame_percent_intra = 100; + } else { + int tot_modes; + + tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] + + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] + + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] + + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; + + if (tot_modes) { + cpi->this_frame_percent_intra = + cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes; } + } +#if !CONFIG_REALTIME_ONLY + /* Adjust the projected reference frame usage probability numbers to + * reflect what we have just seen. This may be useful when we make + * multiple iterations of the recode loop rather than continuing to use + * values from the previous frame. + */ + if ((cm->frame_type != KEY_FRAME) && + ((cpi->oxcf.number_of_layers > 1) || + (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) { + vp8_convert_rfct_to_prob(cpi); + } +#endif +} +void vp8_setup_block_ptrs(MACROBLOCK *x) { + int r, c; + int i; - for (r = 0; r < 2; r++) - { - for (c = 0; c < 2; c++) - { - x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4; - } + for (r = 0; r < 4; ++r) { + for (c = 0; c < 4; ++c) { + x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4; } + } - x->block[24].src_diff = x->src_diff + 384; - - - for (i = 0; i < 25; i++) - { - x->block[i].coeff = x->coeff + i * 16; + for (r = 0; r < 2; ++r) { + for (c = 0; c < 2; ++c) { + x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4; } + } + + for (r = 0; r < 2; ++r) { + for (c = 0; c < 2; ++c) { + x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4; + } + } + + x->block[24].src_diff = x->src_diff + 384; + + for (i = 0; i < 25; ++i) { + x->block[i].coeff = x->coeff + i * 16; + } } -void vp8_build_block_offsets(MACROBLOCK *x) -{ - int block = 0; - int br, bc; +void vp8_build_block_offsets(MACROBLOCK *x) { + int block = 0; + int br, bc; - vp8_build_block_doffsets(&x->e_mbd); + vp8_build_block_doffsets(&x->e_mbd); - /* y blocks */ - x->thismb_ptr = &x->thismb[0]; - for (br = 0; br < 4; br++) - { - for (bc = 0; bc < 4; bc++) - { - BLOCK *this_block = &x->block[block]; - this_block->base_src = &x->thismb_ptr; - this_block->src_stride = 16; - this_block->src = 4 * br * 16 + 4 * bc; - ++block; - } + /* y blocks */ + x->thismb_ptr = &x->thismb[0]; + for (br = 0; br < 4; ++br) { + for (bc = 0; bc < 4; ++bc) { + BLOCK *this_block = &x->block[block]; + this_block->base_src = &x->thismb_ptr; + this_block->src_stride = 16; + this_block->src = 4 * br * 16 + 4 * bc; + ++block; } + } - /* u blocks */ - for (br = 0; br < 2; br++) - { - for (bc = 0; bc < 2; bc++) - { - BLOCK *this_block = &x->block[block]; - this_block->base_src = &x->src.u_buffer; - this_block->src_stride = x->src.uv_stride; - this_block->src = 4 * br * this_block->src_stride + 4 * bc; - ++block; - } + /* u blocks */ + for (br = 0; br < 2; ++br) { + for (bc = 0; bc < 2; ++bc) { + BLOCK *this_block = &x->block[block]; + this_block->base_src = &x->src.u_buffer; + this_block->src_stride = x->src.uv_stride; + this_block->src = 4 * br * this_block->src_stride + 4 * bc; + ++block; } + } - /* v blocks */ - for (br = 0; br < 2; br++) - { - for (bc = 0; bc < 2; bc++) - { - BLOCK *this_block = &x->block[block]; - this_block->base_src = &x->src.v_buffer; - this_block->src_stride = x->src.uv_stride; - this_block->src = 4 * br * this_block->src_stride + 4 * bc; - ++block; - } + /* v blocks */ + for (br = 0; br < 2; ++br) { + for (bc = 0; bc < 2; ++bc) { + BLOCK *this_block = &x->block[block]; + this_block->base_src = &x->src.v_buffer; + this_block->src_stride = x->src.uv_stride; + this_block->src = 4 * br * this_block->src_stride + 4 * bc; + ++block; } + } } -static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) -{ - const MACROBLOCKD *xd = & x->e_mbd; - const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode; - const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; +static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) { + const MACROBLOCKD *xd = &x->e_mbd; + const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode; + const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; #ifdef MODE_STATS - const int is_key = cpi->common.frame_type == KEY_FRAME; + const int is_key = cpi->common.frame_type == KEY_FRAME; - ++ (is_key ? uv_modes : inter_uv_modes)[uvm]; + ++(is_key ? uv_modes : inter_uv_modes)[uvm]; - if (m == B_PRED) - { - unsigned int *const bct = is_key ? b_modes : inter_b_modes; + if (m == B_PRED) { + unsigned int *const bct = is_key ? b_modes : inter_b_modes; - int b = 0; + int b = 0; - do - { - ++ bct[xd->block[b].bmi.mode]; - } - while (++b < 16); - } + do { + ++bct[xd->block[b].bmi.mode]; + } while (++b < 16); + } #else - (void)cpi; + (void)cpi; #endif - ++x->ymode_count[m]; - ++x->uv_mode_count[uvm]; - + ++x->ymode_count[m]; + ++x->uv_mode_count[uvm]; } /* Experimental stub function to create a per MB zbin adjustment based on * some previously calculated measure of MB activity. */ -static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x ) -{ +static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) { #if USE_ACT_INDEX - x->act_zbin_adj = *(x->mb_activity_ptr); + x->act_zbin_adj = *(x->mb_activity_ptr); #else - int64_t a; - int64_t b; - int64_t act = *(x->mb_activity_ptr); + int64_t a; + int64_t b; + int64_t act = *(x->mb_activity_ptr); - /* Apply the masking to the RD multiplier. */ - a = act + 4*cpi->activity_avg; - b = 4*act + cpi->activity_avg; + /* Apply the masking to the RD multiplier. */ + a = act + 4 * cpi->activity_avg; + b = 4 * act + cpi->activity_avg; - if ( act > cpi->activity_avg ) - x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1; - else - x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b); + if (act > cpi->activity_avg) { + x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1; + } else { + x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b); + } #endif } int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, - TOKENEXTRA **t) -{ - MACROBLOCKD *xd = &x->e_mbd; - int rate; + TOKENEXTRA **t) { + MACROBLOCKD *xd = &x->e_mbd; + int rate; - if (cpi->sf.RD && cpi->compressor_speed != 2) - vp8_rd_pick_intra_mode(x, &rate); - else - vp8_pick_intra_mode(x, &rate); + if (cpi->sf.RD && cpi->compressor_speed != 2) { + vp8_rd_pick_intra_mode(x, &rate); + } else { + vp8_pick_intra_mode(x, &rate); + } - if(cpi->oxcf.tuning == VP8_TUNE_SSIM) - { - adjust_act_zbin( cpi, x ); - vp8_update_zbin_extra(cpi, x); - } + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { + adjust_act_zbin(cpi, x); + vp8_update_zbin_extra(cpi, x); + } - if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) - vp8_encode_intra4x4mby(x); - else - vp8_encode_intra16x16mby(x); + if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) { + vp8_encode_intra4x4mby(x); + } else { + vp8_encode_intra16x16mby(x); + } - vp8_encode_intra16x16mbuv(x); + vp8_encode_intra16x16mbuv(x); - sum_intra_stats(cpi, x); + sum_intra_stats(cpi, x); - vp8_tokenize_mb(cpi, x, t); + vp8_tokenize_mb(cpi, x, t); - if (xd->mode_info_context->mbmi.mode != B_PRED) - vp8_inverse_transform_mby(xd); + if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd); - vp8_dequant_idct_add_uv_block - (xd->qcoeff+16*16, xd->dequant_uv, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride, xd->eobs+16); - return rate; + vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs + 16); + return rate; } #ifdef SPEEDSTATS extern int cnt_pm; @@ -1225,74 +1116,66 @@ extern int cnt_pm; extern void vp8_fix_contexts(MACROBLOCKD *x); -int vp8cx_encode_inter_macroblock -( - VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, - int mb_row, int mb_col -) -{ - MACROBLOCKD *const xd = &x->e_mbd; - int intra_error = 0; - int rate; - int distortion; +int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, + int recon_yoffset, int recon_uvoffset, + int mb_row, int mb_col) { + MACROBLOCKD *const xd = &x->e_mbd; + int intra_error = 0; + int rate; + int distortion; - x->skip = 0; + x->skip = 0; - if (xd->segmentation_enabled) - x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id]; - else - x->encode_breakout = cpi->oxcf.encode_breakout; + if (xd->segmentation_enabled) { + x->encode_breakout = + cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id]; + } else { + x->encode_breakout = cpi->oxcf.encode_breakout; + } #if CONFIG_TEMPORAL_DENOISING - /* Reset the best sse mode/mv for each macroblock. */ - x->best_reference_frame = INTRA_FRAME; - x->best_zeromv_reference_frame = INTRA_FRAME; - x->best_sse_inter_mode = 0; - x->best_sse_mv.as_int = 0; - x->need_to_clamp_best_mvs = 0; + /* Reset the best sse mode/mv for each macroblock. */ + x->best_reference_frame = INTRA_FRAME; + x->best_zeromv_reference_frame = INTRA_FRAME; + x->best_sse_inter_mode = 0; + x->best_sse_mv.as_int = 0; + x->need_to_clamp_best_mvs = 0; #endif - if (cpi->sf.RD) - { - int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; + if (cpi->sf.RD) { + int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; - /* Are we using the fast quantizer for the mode selection? */ - if(cpi->sf.use_fastquant_for_pick) - { - x->quantize_b = vp8_fast_quantize_b; - - /* the fast quantizer does not use zbin_extra, so - * do not recalculate */ - x->zbin_mode_boost_enabled = 0; - } - vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, - &distortion, &intra_error, mb_row, mb_col); - - /* switch back to the regular quantizer for the encode */ - if (cpi->sf.improved_quant) - { - x->quantize_b = vp8_regular_quantize_b; - } - - /* restore cpi->zbin_mode_boost_enabled */ - x->zbin_mode_boost_enabled = zbin_mode_boost_enabled; + /* Are we using the fast quantizer for the mode selection? */ + if (cpi->sf.use_fastquant_for_pick) { + x->quantize_b = vp8_fast_quantize_b; + /* the fast quantizer does not use zbin_extra, so + * do not recalculate */ + x->zbin_mode_boost_enabled = 0; } - else - { - vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, - &distortion, &intra_error, mb_row, mb_col); + vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, + &distortion, &intra_error, mb_row, mb_col); + + /* switch back to the regular quantizer for the encode */ + if (cpi->sf.improved_quant) { + x->quantize_b = vp8_regular_quantize_b; } - x->prediction_error += distortion; - x->intra_error += intra_error; + /* restore cpi->zbin_mode_boost_enabled */ + x->zbin_mode_boost_enabled = zbin_mode_boost_enabled; - if(cpi->oxcf.tuning == VP8_TUNE_SSIM) - { - /* Adjust the zbin based on this MB rate. */ - adjust_act_zbin( cpi, x ); - } + } else { + vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, + &distortion, &intra_error, mb_row, mb_col); + } + + x->prediction_error += distortion; + x->intra_error += intra_error; + + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { + /* Adjust the zbin based on this MB rate. */ + adjust_act_zbin(cpi, x); + } #if 0 /* Experimental RD code */ @@ -1300,125 +1183,109 @@ int vp8cx_encode_inter_macroblock cpi->last_mb_distortion = distortion; #endif - /* MB level adjutment to quantizer setup */ - if (xd->segmentation_enabled) - { - /* If cyclic update enabled */ - if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) - { - /* Clear segment_id back to 0 if not coded (last frame 0,0) */ - if ((xd->mode_info_context->mbmi.segment_id == 1) && - ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV))) - { - xd->mode_info_context->mbmi.segment_id = 0; + /* MB level adjutment to quantizer setup */ + if (xd->segmentation_enabled) { + /* If cyclic update enabled */ + if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) { + /* Clear segment_id back to 0 if not coded (last frame 0,0) */ + if ((xd->mode_info_context->mbmi.segment_id == 1) && + ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || + (xd->mode_info_context->mbmi.mode != ZEROMV))) { + xd->mode_info_context->mbmi.segment_id = 0; - /* segment_id changed, so update */ - vp8cx_mb_init_quantizer(cpi, x, 1); - } + /* segment_id changed, so update */ + vp8cx_mb_init_quantizer(cpi, x, 1); + } + } + } + + { + /* Experimental code. + * Special case for gf and arf zeromv modes, for 1 temporal layer. + * Increase zbin size to supress noise. + */ + x->zbin_mode_boost = 0; + if (x->zbin_mode_boost_enabled) { + if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) { + if (xd->mode_info_context->mbmi.mode == ZEROMV) { + if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME && + cpi->oxcf.number_of_layers == 1) { + x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; + } else { + x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; + } + } else if (xd->mode_info_context->mbmi.mode == SPLITMV) { + x->zbin_mode_boost = 0; + } else { + x->zbin_mode_boost = MV_ZBIN_BOOST; } + } } - { - /* Experimental code. - * Special case for gf and arf zeromv modes, for 1 temporal layer. - * Increase zbin size to supress noise. - */ - x->zbin_mode_boost = 0; - if (x->zbin_mode_boost_enabled) - { - if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME ) - { - if (xd->mode_info_context->mbmi.mode == ZEROMV) - { - if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME && - cpi->oxcf.number_of_layers == 1) - x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; - else - x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; - } - else if (xd->mode_info_context->mbmi.mode == SPLITMV) - x->zbin_mode_boost = 0; - else - x->zbin_mode_boost = MV_ZBIN_BOOST; - } - } + /* The fast quantizer doesn't use zbin_extra, only do so with + * the regular quantizer. */ + if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x); + } - /* The fast quantizer doesn't use zbin_extra, only do so with - * the regular quantizer. */ - if (cpi->sf.improved_quant) - vp8_update_zbin_extra(cpi, x); + x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++; + + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + vp8_encode_intra16x16mbuv(x); + + if (xd->mode_info_context->mbmi.mode == B_PRED) { + vp8_encode_intra4x4mby(x); + } else { + vp8_encode_intra16x16mby(x); } - x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++; - - if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) - { - vp8_encode_intra16x16mbuv(x); - - if (xd->mode_info_context->mbmi.mode == B_PRED) - { - vp8_encode_intra4x4mby(x); - } - else - { - vp8_encode_intra16x16mby(x); - } - - sum_intra_stats(cpi, x); - } - else - { - int ref_fb_idx; - - if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) - ref_fb_idx = cpi->common.lst_fb_idx; - else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) - ref_fb_idx = cpi->common.gld_fb_idx; - else - ref_fb_idx = cpi->common.alt_fb_idx; - - xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; - xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; - xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; - - if (!x->skip) - { - vp8_encode_inter16x16(x); - } - else - vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.y_stride, xd->dst.uv_stride); + sum_intra_stats(cpi, x); + } else { + int ref_fb_idx; + if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { + ref_fb_idx = cpi->common.lst_fb_idx; + } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) { + ref_fb_idx = cpi->common.gld_fb_idx; + } else { + ref_fb_idx = cpi->common.alt_fb_idx; } - if (!x->skip) - { - vp8_tokenize_mb(cpi, x, t); + xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; + xd->pre.u_buffer = + cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; + xd->pre.v_buffer = + cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; - if (xd->mode_info_context->mbmi.mode != B_PRED) - vp8_inverse_transform_mby(xd); - - vp8_dequant_idct_add_uv_block - (xd->qcoeff+16*16, xd->dequant_uv, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride, xd->eobs+16); + if (!x->skip) { + vp8_encode_inter16x16(x); + } else { + vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer, + xd->dst.v_buffer, xd->dst.y_stride, + xd->dst.uv_stride); } - else - { - /* always set mb_skip_coeff as it is needed by the loopfilter */ - xd->mode_info_context->mbmi.mb_skip_coeff = 1; + } - if (cpi->common.mb_no_coeff_skip) - { - x->skip_true_count ++; - vp8_fix_contexts(xd); - } - else - { - vp8_stuff_mb(cpi, x, t); - } + if (!x->skip) { + vp8_tokenize_mb(cpi, x, t); + + if (xd->mode_info_context->mbmi.mode != B_PRED) { + vp8_inverse_transform_mby(xd); } - return rate; + vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs + 16); + } else { + /* always set mb_skip_coeff as it is needed by the loopfilter */ + xd->mode_info_context->mbmi.mb_skip_coeff = 1; + + if (cpi->common.mb_no_coeff_skip) { + x->skip_true_count++; + vp8_fix_contexts(xd); + } else { + vp8_stuff_mb(cpi, x, t); + } + } + + return rate; } diff --git a/libs/libvpx/vp8/encoder/encodeframe.h b/libs/libvpx/vp8/encoder/encodeframe.h index e185c1035c..c1d8634927 100644 --- a/libs/libvpx/vp8/encoder/encodeframe.h +++ b/libs/libvpx/vp8/encoder/encodeframe.h @@ -22,12 +22,12 @@ extern void vp8_setup_block_ptrs(MACROBLOCK *x); extern void vp8_encode_frame(VP8_COMP *cpi); extern int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, - TOKENEXTRA **t, - int recon_yoffset, int recon_uvoffset, - int mb_row, int mb_col); + TOKENEXTRA **t, int recon_yoffset, + int recon_uvoffset, int mb_row, + int mb_col); extern int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, - TOKENEXTRA **t); + TOKENEXTRA **t); #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp8/encoder/encodeintra.c b/libs/libvpx/vp8/encoder/encodeintra.c index 44be959c96..f89e7cb1fa 100644 --- a/libs/libvpx/vp8/encoder/encodeintra.c +++ b/libs/libvpx/vp8/encoder/encodeintra.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "./vpx_dsp_rtcd.h" @@ -19,122 +18,100 @@ #include "vp8/common/invtrans.h" #include "encodeintra.h" +int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) { + int i; + int intra_pred_var = 0; + (void)cpi; -int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) -{ + if (use_dc_pred) { + x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; - int i; - int intra_pred_var = 0; - (void) cpi; + vp8_encode_intra16x16mby(x); - if (use_dc_pred) - { - x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; - - vp8_encode_intra16x16mby(x); - - vp8_inverse_transform_mby(&x->e_mbd); - } - else - { - for (i = 0; i < 16; i++) - { - x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; - vp8_encode_intra4x4block(x, i); - } + vp8_inverse_transform_mby(&x->e_mbd); + } else { + for (i = 0; i < 16; ++i) { + x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; + vp8_encode_intra4x4block(x, i); } + } - intra_pred_var = vpx_get_mb_ss(x->src_diff); + intra_pred_var = vpx_get_mb_ss(x->src_diff); - return intra_pred_var; + return intra_pred_var; } -void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) -{ - BLOCKD *b = &x->e_mbd.block[ib]; - BLOCK *be = &x->block[ib]; - int dst_stride = x->e_mbd.dst.y_stride; - unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; - unsigned char *Above = dst - dst_stride; - unsigned char *yleft = dst - 1; - unsigned char top_left = Above[-1]; +void vp8_encode_intra4x4block(MACROBLOCK *x, int ib) { + BLOCKD *b = &x->e_mbd.block[ib]; + BLOCK *be = &x->block[ib]; + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; + unsigned char *Above = dst - dst_stride; + unsigned char *yleft = dst - 1; + unsigned char top_left = Above[-1]; - vp8_intra4x4_predict(Above, yleft, dst_stride, b->bmi.as_mode, - b->predictor, 16, top_left); + vp8_intra4x4_predict(Above, yleft, dst_stride, b->bmi.as_mode, b->predictor, + 16, top_left); - vp8_subtract_b(be, b, 16); + vp8_subtract_b(be, b, 16); - x->short_fdct4x4(be->src_diff, be->coeff, 32); + x->short_fdct4x4(be->src_diff, be->coeff, 32); - x->quantize_b(be, b); + x->quantize_b(be, b); - if (*b->eob > 1) - { - vp8_short_idct4x4llm(b->dqcoeff, b->predictor, 16, dst, dst_stride); - } - else - { - vp8_dc_only_idct_add(b->dqcoeff[0], b->predictor, 16, dst, dst_stride); - } + if (*b->eob > 1) { + vp8_short_idct4x4llm(b->dqcoeff, b->predictor, 16, dst, dst_stride); + } else { + vp8_dc_only_idct_add(b->dqcoeff[0], b->predictor, 16, dst, dst_stride); + } } -void vp8_encode_intra4x4mby(MACROBLOCK *mb) -{ - int i; +void vp8_encode_intra4x4mby(MACROBLOCK *mb) { + int i; - MACROBLOCKD *xd = &mb->e_mbd; - intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); + MACROBLOCKD *xd = &mb->e_mbd; + intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); - for (i = 0; i < 16; i++) - vp8_encode_intra4x4block(mb, i); - return; + for (i = 0; i < 16; ++i) vp8_encode_intra4x4block(mb, i); + return; } -void vp8_encode_intra16x16mby(MACROBLOCK *x) -{ - BLOCK *b = &x->block[0]; - MACROBLOCKD *xd = &x->e_mbd; +void vp8_encode_intra16x16mby(MACROBLOCK *x) { + BLOCK *b = &x->block[0]; + MACROBLOCKD *xd = &x->e_mbd; - vp8_build_intra_predictors_mby_s(xd, - xd->dst.y_buffer - xd->dst.y_stride, - xd->dst.y_buffer - 1, - xd->dst.y_stride, - xd->dst.y_buffer, - xd->dst.y_stride); + vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride, + xd->dst.y_buffer - 1, xd->dst.y_stride, + xd->dst.y_buffer, xd->dst.y_stride); - vp8_subtract_mby(x->src_diff, *(b->base_src), - b->src_stride, xd->dst.y_buffer, xd->dst.y_stride); + vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride, xd->dst.y_buffer, + xd->dst.y_stride); - vp8_transform_intra_mby(x); + vp8_transform_intra_mby(x); - vp8_quantize_mby(x); + vp8_quantize_mby(x); - if (x->optimize) - vp8_optimize_mby(x); + if (x->optimize) vp8_optimize_mby(x); } -void vp8_encode_intra16x16mbuv(MACROBLOCK *x) -{ - MACROBLOCKD *xd = &x->e_mbd; +void vp8_encode_intra16x16mbuv(MACROBLOCK *x) { + MACROBLOCKD *xd = &x->e_mbd; - vp8_build_intra_predictors_mbuv_s(xd, xd->dst.u_buffer - xd->dst.uv_stride, - xd->dst.v_buffer - xd->dst.uv_stride, - xd->dst.u_buffer - 1, - xd->dst.v_buffer - 1, - xd->dst.uv_stride, - xd->dst.u_buffer, xd->dst.v_buffer, - xd->dst.uv_stride); + vp8_build_intra_predictors_mbuv_s(xd, xd->dst.u_buffer - xd->dst.uv_stride, + xd->dst.v_buffer - xd->dst.uv_stride, + xd->dst.u_buffer - 1, xd->dst.v_buffer - 1, + xd->dst.uv_stride, xd->dst.u_buffer, + xd->dst.v_buffer, xd->dst.uv_stride); - vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, - x->src.v_buffer, x->src.uv_stride, xd->dst.u_buffer, - xd->dst.v_buffer, xd->dst.uv_stride); + vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer, + x->src.uv_stride, xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride); - vp8_transform_mbuv(x); + vp8_transform_mbuv(x); - vp8_quantize_mbuv(x); + vp8_quantize_mbuv(x); - if (x->optimize) - vp8_optimize_mbuv(x); + if (x->optimize) vp8_optimize_mbuv(x); } diff --git a/libs/libvpx/vp8/encoder/encodeintra.h b/libs/libvpx/vp8/encoder/encodeintra.h index a8d0284d29..3956cf5fb1 100644 --- a/libs/libvpx/vp8/encoder/encodeintra.h +++ b/libs/libvpx/vp8/encoder/encodeintra.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_ENCODEINTRA_H_ #define VP8_ENCODER_ENCODEINTRA_H_ #include "onyx_int.h" diff --git a/libs/libvpx/vp8/encoder/encodemb.c b/libs/libvpx/vp8/encoder/encodemb.c index 932a157ada..3fd8d5fabe 100644 --- a/libs/libvpx/vp8/encoder/encodemb.c +++ b/libs/libvpx/vp8/encoder/encodemb.c @@ -26,13 +26,13 @@ void vp8_subtract_b(BLOCK *be, BLOCKD *bd, int pitch) { unsigned char *pred_ptr = bd->predictor; int src_stride = be->src_stride; - vpx_subtract_block(4, 4, diff_ptr, pitch, src_ptr, src_stride, - pred_ptr, pitch); + vpx_subtract_block(4, 4, diff_ptr, pitch, src_ptr, src_stride, pred_ptr, + pitch); } void vp8_subtract_mbuv(short *diff, unsigned char *usrc, unsigned char *vsrc, - int src_stride, unsigned char *upred, - unsigned char *vpred, int pred_stride) { + int src_stride, unsigned char *upred, + unsigned char *vpred, int pred_stride) { short *udiff = diff + 256; short *vdiff = diff + 320; @@ -45,119 +45,91 @@ void vp8_subtract_mby(short *diff, unsigned char *src, int src_stride, vpx_subtract_block(16, 16, diff, 16, src, src_stride, pred, pred_stride); } -static void vp8_subtract_mb(MACROBLOCK *x) -{ - BLOCK *b = &x->block[0]; +static void vp8_subtract_mb(MACROBLOCK *x) { + BLOCK *b = &x->block[0]; - vp8_subtract_mby(x->src_diff, *(b->base_src), - b->src_stride, x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride); - vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, - x->src.v_buffer, x->src.uv_stride, x->e_mbd.dst.u_buffer, - x->e_mbd.dst.v_buffer, x->e_mbd.dst.uv_stride); + vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride, + x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride); + vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer, + x->src.uv_stride, x->e_mbd.dst.u_buffer, + x->e_mbd.dst.v_buffer, x->e_mbd.dst.uv_stride); } -static void build_dcblock(MACROBLOCK *x) -{ - short *src_diff_ptr = &x->src_diff[384]; - int i; +static void build_dcblock(MACROBLOCK *x) { + short *src_diff_ptr = &x->src_diff[384]; + int i; - for (i = 0; i < 16; i++) - { - src_diff_ptr[i] = x->coeff[i * 16]; - } + for (i = 0; i < 16; ++i) { + src_diff_ptr[i] = x->coeff[i * 16]; + } } -void vp8_transform_mbuv(MACROBLOCK *x) -{ - int i; +void vp8_transform_mbuv(MACROBLOCK *x) { + int i; - for (i = 16; i < 24; i += 2) - { - x->short_fdct8x4(&x->block[i].src_diff[0], - &x->block[i].coeff[0], 16); - } + for (i = 16; i < 24; i += 2) { + x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 16); + } } +void vp8_transform_intra_mby(MACROBLOCK *x) { + int i; -void vp8_transform_intra_mby(MACROBLOCK *x) -{ - int i; + for (i = 0; i < 16; i += 2) { + x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); + } - for (i = 0; i < 16; i += 2) - { - x->short_fdct8x4(&x->block[i].src_diff[0], - &x->block[i].coeff[0], 32); - } + /* build dc block from 16 y dc values */ + build_dcblock(x); - /* build dc block from 16 y dc values */ + /* do 2nd order transform on the dc block */ + x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8); +} + +static void transform_mb(MACROBLOCK *x) { + int i; + + for (i = 0; i < 16; i += 2) { + x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); + } + + /* build dc block from 16 y dc values */ + if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) build_dcblock(x); + + for (i = 16; i < 24; i += 2) { + x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 16); + } + + /* do 2nd order transform on the dc block */ + if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) { + x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8); + } +} + +static void transform_mby(MACROBLOCK *x) { + int i; + + for (i = 0; i < 16; i += 2) { + x->short_fdct8x4(&x->block[i].src_diff[0], &x->block[i].coeff[0], 32); + } + + /* build dc block from 16 y dc values */ + if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) { build_dcblock(x); - - /* do 2nd order transform on the dc block */ - x->short_walsh4x4(&x->block[24].src_diff[0], - &x->block[24].coeff[0], 8); - + x->short_walsh4x4(&x->block[24].src_diff[0], &x->block[24].coeff[0], 8); + } } - -static void transform_mb(MACROBLOCK *x) -{ - int i; - - for (i = 0; i < 16; i += 2) - { - x->short_fdct8x4(&x->block[i].src_diff[0], - &x->block[i].coeff[0], 32); - } - - /* build dc block from 16 y dc values */ - if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) - build_dcblock(x); - - for (i = 16; i < 24; i += 2) - { - x->short_fdct8x4(&x->block[i].src_diff[0], - &x->block[i].coeff[0], 16); - } - - /* do 2nd order transform on the dc block */ - if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) - x->short_walsh4x4(&x->block[24].src_diff[0], - &x->block[24].coeff[0], 8); - -} - - -static void transform_mby(MACROBLOCK *x) -{ - int i; - - for (i = 0; i < 16; i += 2) - { - x->short_fdct8x4(&x->block[i].src_diff[0], - &x->block[i].coeff[0], 32); - } - - /* build dc block from 16 y dc values */ - if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) - { - build_dcblock(x); - x->short_walsh4x4(&x->block[24].src_diff[0], - &x->block[24].coeff[0], 8); - } -} - - - -#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF ) +#define RDTRUNC(RM, DM, R, D) ((128 + (R) * (RM)) & 0xFF) typedef struct vp8_token_state vp8_token_state; -struct vp8_token_state{ - int rate; - int error; - signed char next; - signed char token; - short qc; +struct vp8_token_state { + int rate; + int error; + signed char next; + signed char token; + short qc; }; /* TODO: experiments to find optimal multiple numbers */ @@ -165,429 +137,376 @@ struct vp8_token_state{ #define UV_RD_MULT 2 #define Y2_RD_MULT 16 -static const int plane_rd_mult[4]= -{ - Y1_RD_MULT, - Y2_RD_MULT, - UV_RD_MULT, - Y1_RD_MULT -}; +static const int plane_rd_mult[4] = { Y1_RD_MULT, Y2_RD_MULT, UV_RD_MULT, + Y1_RD_MULT }; -static void optimize_b(MACROBLOCK *mb, int ib, int type, - ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) -{ - BLOCK *b; - BLOCKD *d; - vp8_token_state tokens[17][2]; - unsigned best_mask[2]; - const short *dequant_ptr; - const short *coeff_ptr; - short *qcoeff_ptr; - short *dqcoeff_ptr; - int eob; - int i0; - int rc; - int x; - int sz = 0; - int next; - int rdmult; - int rddiv; - int final_eob; - int rd_cost0; - int rd_cost1; - int rate0; - int rate1; - int error0; - int error1; - int t0; - int t1; - int best; - int band; - int pt; - int i; - int err_mult = plane_rd_mult[type]; +static void optimize_b(MACROBLOCK *mb, int ib, int type, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l) { + BLOCK *b; + BLOCKD *d; + vp8_token_state tokens[17][2]; + unsigned best_mask[2]; + const short *dequant_ptr; + const short *coeff_ptr; + short *qcoeff_ptr; + short *dqcoeff_ptr; + int eob; + int i0; + int rc; + int x; + int sz = 0; + int next; + int rdmult; + int rddiv; + int final_eob; + int rd_cost0; + int rd_cost1; + int rate0; + int rate1; + int error0; + int error1; + int t0; + int t1; + int best; + int band; + int pt; + int i; + int err_mult = plane_rd_mult[type]; - b = &mb->block[ib]; - d = &mb->e_mbd.block[ib]; + b = &mb->block[ib]; + d = &mb->e_mbd.block[ib]; - dequant_ptr = d->dequant; - coeff_ptr = b->coeff; - qcoeff_ptr = d->qcoeff; - dqcoeff_ptr = d->dqcoeff; - i0 = !type; - eob = *d->eob; + dequant_ptr = d->dequant; + coeff_ptr = b->coeff; + qcoeff_ptr = d->qcoeff; + dqcoeff_ptr = d->dqcoeff; + i0 = !type; + eob = *d->eob; - /* Now set up a Viterbi trellis to evaluate alternative roundings. */ - rdmult = mb->rdmult * err_mult; - if(mb->e_mbd.mode_info_context->mbmi.ref_frame==INTRA_FRAME) - rdmult = (rdmult * 9)>>4; + /* Now set up a Viterbi trellis to evaluate alternative roundings. */ + rdmult = mb->rdmult * err_mult; + if (mb->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + rdmult = (rdmult * 9) >> 4; + } - rddiv = mb->rddiv; - best_mask[0] = best_mask[1] = 0; - /* Initialize the sentinel node of the trellis. */ - tokens[eob][0].rate = 0; - tokens[eob][0].error = 0; - tokens[eob][0].next = 16; - tokens[eob][0].token = DCT_EOB_TOKEN; - tokens[eob][0].qc = 0; - *(tokens[eob] + 1) = *(tokens[eob] + 0); - next = eob; - for (i = eob; i-- > i0;) - { - int base_bits; - int d2; - int dx; + rddiv = mb->rddiv; + best_mask[0] = best_mask[1] = 0; + /* Initialize the sentinel node of the trellis. */ + tokens[eob][0].rate = 0; + tokens[eob][0].error = 0; + tokens[eob][0].next = 16; + tokens[eob][0].token = DCT_EOB_TOKEN; + tokens[eob][0].qc = 0; + *(tokens[eob] + 1) = *(tokens[eob] + 0); + next = eob; + for (i = eob; i-- > i0;) { + int base_bits; + int d2; + int dx; - rc = vp8_default_zig_zag1d[i]; - x = qcoeff_ptr[rc]; - /* Only add a trellis state for non-zero coefficients. */ - if (x) - { - int shortcut=0; - error0 = tokens[next][0].error; - error1 = tokens[next][1].error; - /* Evaluate the first possibility for this state. */ - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - t0 = (vp8_dct_value_tokens_ptr + x)->Token; - /* Consider both possible successor states. */ - if (next < 16) - { - band = vp8_coef_bands[i + 1]; - pt = vp8_prev_token_class[t0]; - rate0 += - mb->token_costs[type][band][pt][tokens[next][0].token]; - rate1 += - mb->token_costs[type][band][pt][tokens[next][1].token]; - } - rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); - rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); - if (rd_cost0 == rd_cost1) - { - rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); - rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); - } - /* And pick the best. */ - best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); - dx = dqcoeff_ptr[rc] - coeff_ptr[rc]; - d2 = dx*dx; - tokens[i][0].rate = base_bits + (best ? rate1 : rate0); - tokens[i][0].error = d2 + (best ? error1 : error0); - tokens[i][0].next = next; - tokens[i][0].token = t0; - tokens[i][0].qc = x; - best_mask[0] |= best << i; - /* Evaluate the second possibility for this state. */ - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - - if((abs(x)*dequant_ptr[rc]>abs(coeff_ptr[rc])) && - (abs(x)*dequant_ptr[rc]Token; - } - if (next < 16) - { - band = vp8_coef_bands[i + 1]; - if(t0!=DCT_EOB_TOKEN) - { - pt = vp8_prev_token_class[t0]; - rate0 += mb->token_costs[type][band][pt][ - tokens[next][0].token]; - } - if(t1!=DCT_EOB_TOKEN) - { - pt = vp8_prev_token_class[t1]; - rate1 += mb->token_costs[type][band][pt][ - tokens[next][1].token]; - } - } - - rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); - rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); - if (rd_cost0 == rd_cost1) - { - rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); - rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); - } - /* And pick the best. */ - best = rd_cost1 < rd_cost0; - base_bits = *(vp8_dct_value_cost_ptr + x); - - if(shortcut) - { - dx -= (dequant_ptr[rc] + sz) ^ sz; - d2 = dx*dx; - } - tokens[i][1].rate = base_bits + (best ? rate1 : rate0); - tokens[i][1].error = d2 + (best ? error1 : error0); - tokens[i][1].next = next; - tokens[i][1].token =best?t1:t0; - tokens[i][1].qc = x; - best_mask[1] |= best << i; - /* Finally, make this the new head of the trellis. */ - next = i; - } - /* There's no choice to make for a zero coefficient, so we don't - * add a new trellis node, but we do need to update the costs. - */ - else - { - band = vp8_coef_bands[i + 1]; - t0 = tokens[next][0].token; - t1 = tokens[next][1].token; - /* Update the cost of each path if we're past the EOB token. */ - if (t0 != DCT_EOB_TOKEN) - { - tokens[next][0].rate += mb->token_costs[type][band][0][t0]; - tokens[next][0].token = ZERO_TOKEN; - } - if (t1 != DCT_EOB_TOKEN) - { - tokens[next][1].rate += mb->token_costs[type][band][0][t1]; - tokens[next][1].token = ZERO_TOKEN; - } - /* Don't update next, because we didn't add a new node. */ - } - } - - /* Now pick the best path through the whole trellis. */ - band = vp8_coef_bands[i + 1]; - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - rate0 = tokens[next][0].rate; - rate1 = tokens[next][1].rate; - error0 = tokens[next][0].error; - error1 = tokens[next][1].error; - t0 = tokens[next][0].token; - t1 = tokens[next][1].token; - rate0 += mb->token_costs[type][band][pt][t0]; - rate1 += mb->token_costs[type][band][pt][t1]; - rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); - rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); - if (rd_cost0 == rd_cost1) - { + rc = vp8_default_zig_zag1d[i]; + x = qcoeff_ptr[rc]; + /* Only add a trellis state for non-zero coefficients. */ + if (x) { + int shortcut = 0; + error0 = tokens[next][0].error; + error1 = tokens[next][1].error; + /* Evaluate the first possibility for this state. */ + rate0 = tokens[next][0].rate; + rate1 = tokens[next][1].rate; + t0 = (vp8_dct_value_tokens_ptr + x)->Token; + /* Consider both possible successor states. */ + if (next < 16) { + band = vp8_coef_bands[i + 1]; + pt = vp8_prev_token_class[t0]; + rate0 += mb->token_costs[type][band][pt][tokens[next][0].token]; + rate1 += mb->token_costs[type][band][pt][tokens[next][1].token]; + } + rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); + rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); + if (rd_cost0 == rd_cost1) { rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); - } - best = rd_cost1 < rd_cost0; - final_eob = i0 - 1; - for (i = next; i < eob; i = next) - { - x = tokens[i][best].qc; - if (x) - final_eob = i; - rc = vp8_default_zig_zag1d[i]; - qcoeff_ptr[rc] = x; - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; - next = tokens[i][best].next; - best = (best_mask[best] >> i) & 1; - } - final_eob++; + } + /* And pick the best. */ + best = rd_cost1 < rd_cost0; + base_bits = *(vp8_dct_value_cost_ptr + x); + dx = dqcoeff_ptr[rc] - coeff_ptr[rc]; + d2 = dx * dx; + tokens[i][0].rate = base_bits + (best ? rate1 : rate0); + tokens[i][0].error = d2 + (best ? error1 : error0); + tokens[i][0].next = next; + tokens[i][0].token = t0; + tokens[i][0].qc = x; + best_mask[0] |= best << i; + /* Evaluate the second possibility for this state. */ + rate0 = tokens[next][0].rate; + rate1 = tokens[next][1].rate; - *a = *l = (final_eob != !type); - *d->eob = (char)final_eob; -} -static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, - ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) -{ - int sum=0; - int i; - BLOCKD *bd = &x->block[24]; + if ((abs(x) * dequant_ptr[rc] > abs(coeff_ptr[rc])) && + (abs(x) * dequant_ptr[rc] < abs(coeff_ptr[rc]) + dequant_ptr[rc])) { + shortcut = 1; + } else { + shortcut = 0; + } - if(bd->dequant[0]>=35 && bd->dequant[1]>=35) - return; + if (shortcut) { + sz = -(x < 0); + x -= 2 * sz + 1; + } - for(i=0;i<(*bd->eob);i++) - { - int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]]; - sum+= (coef>=0)?coef:-coef; - if(sum>=35) - return; - } - /************************************************************************** - our inverse hadamard transform effectively is weighted sum of all 16 inputs - with weight either 1 or -1. It has a last stage scaling of (sum+3)>>3. And - dc only idct is (dc+4)>>3. So if all the sums are between -35 and 29, the - output after inverse wht and idct will be all zero. A sum of absolute value - smaller than 35 guarantees all 16 different (+1/-1) weighted sums in wht - fall between -35 and +35. - **************************************************************************/ - if(sum < 35) - { - for(i=0;i<(*bd->eob);i++) - { - int rc = vp8_default_zig_zag1d[i]; - bd->qcoeff[rc]=0; - bd->dqcoeff[rc]=0; + /* Consider both possible successor states. */ + if (!x) { + /* If we reduced this coefficient to zero, check to see if + * we need to move the EOB back here. + */ + t0 = + tokens[next][0].token == DCT_EOB_TOKEN ? DCT_EOB_TOKEN : ZERO_TOKEN; + t1 = + tokens[next][1].token == DCT_EOB_TOKEN ? DCT_EOB_TOKEN : ZERO_TOKEN; + } else { + t0 = t1 = (vp8_dct_value_tokens_ptr + x)->Token; + } + if (next < 16) { + band = vp8_coef_bands[i + 1]; + if (t0 != DCT_EOB_TOKEN) { + pt = vp8_prev_token_class[t0]; + rate0 += mb->token_costs[type][band][pt][tokens[next][0].token]; } - *bd->eob = 0; - *a = *l = (*bd->eob != !type); + if (t1 != DCT_EOB_TOKEN) { + pt = vp8_prev_token_class[t1]; + rate1 += mb->token_costs[type][band][pt][tokens[next][1].token]; + } + } + + rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); + rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); + if (rd_cost0 == rd_cost1) { + rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); + rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); + } + /* And pick the best. */ + best = rd_cost1 < rd_cost0; + base_bits = *(vp8_dct_value_cost_ptr + x); + + if (shortcut) { + dx -= (dequant_ptr[rc] + sz) ^ sz; + d2 = dx * dx; + } + tokens[i][1].rate = base_bits + (best ? rate1 : rate0); + tokens[i][1].error = d2 + (best ? error1 : error0); + tokens[i][1].next = next; + tokens[i][1].token = best ? t1 : t0; + tokens[i][1].qc = x; + best_mask[1] |= best << i; + /* Finally, make this the new head of the trellis. */ + next = i; } + /* There's no choice to make for a zero coefficient, so we don't + * add a new trellis node, but we do need to update the costs. + */ + else { + band = vp8_coef_bands[i + 1]; + t0 = tokens[next][0].token; + t1 = tokens[next][1].token; + /* Update the cost of each path if we're past the EOB token. */ + if (t0 != DCT_EOB_TOKEN) { + tokens[next][0].rate += mb->token_costs[type][band][0][t0]; + tokens[next][0].token = ZERO_TOKEN; + } + if (t1 != DCT_EOB_TOKEN) { + tokens[next][1].rate += mb->token_costs[type][band][0][t1]; + tokens[next][1].token = ZERO_TOKEN; + } + /* Don't update next, because we didn't add a new node. */ + } + } + + /* Now pick the best path through the whole trellis. */ + band = vp8_coef_bands[i + 1]; + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + rate0 = tokens[next][0].rate; + rate1 = tokens[next][1].rate; + error0 = tokens[next][0].error; + error1 = tokens[next][1].error; + t0 = tokens[next][0].token; + t1 = tokens[next][1].token; + rate0 += mb->token_costs[type][band][pt][t0]; + rate1 += mb->token_costs[type][band][pt][t1]; + rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); + rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); + if (rd_cost0 == rd_cost1) { + rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0); + rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1); + } + best = rd_cost1 < rd_cost0; + final_eob = i0 - 1; + for (i = next; i < eob; i = next) { + x = tokens[i][best].qc; + if (x) final_eob = i; + rc = vp8_default_zig_zag1d[i]; + qcoeff_ptr[rc] = x; + dqcoeff_ptr[rc] = x * dequant_ptr[rc]; + next = tokens[i][best].next; + best = (best_mask[best] >> i) & 1; + } + final_eob++; + + *a = *l = (final_eob != !type); + *d->eob = (char)final_eob; +} +static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l) { + int sum = 0; + int i; + BLOCKD *bd = &x->block[24]; + + if (bd->dequant[0] >= 35 && bd->dequant[1] >= 35) return; + + for (i = 0; i < (*bd->eob); ++i) { + int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]]; + sum += (coef >= 0) ? coef : -coef; + if (sum >= 35) return; + } + /************************************************************************** + our inverse hadamard transform effectively is weighted sum of all 16 inputs + with weight either 1 or -1. It has a last stage scaling of (sum+3)>>3. And + dc only idct is (dc+4)>>3. So if all the sums are between -35 and 29, the + output after inverse wht and idct will be all zero. A sum of absolute value + smaller than 35 guarantees all 16 different (+1/-1) weighted sums in wht + fall between -35 and +35. + **************************************************************************/ + if (sum < 35) { + for (i = 0; i < (*bd->eob); ++i) { + int rc = vp8_default_zig_zag1d[i]; + bd->qcoeff[rc] = 0; + bd->dqcoeff[rc] = 0; + } + *bd->eob = 0; + *a = *l = (*bd->eob != !type); + } } -static void optimize_mb(MACROBLOCK *x) -{ - int b; - int type; - int has_2nd_order; +static void optimize_mb(MACROBLOCK *x) { + int b; + int type; + int has_2nd_order; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; - memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; - has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED - && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); - type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC; + has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && + x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); + type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC; - for (b = 0; b < 16; b++) - { - optimize_b(x, b, type, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } + for (b = 0; b < 16; ++b) { + optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]); + } - for (b = 16; b < 24; b++) - { - optimize_b(x, b, PLANE_TYPE_UV, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } + for (b = 16; b < 24; ++b) { + optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + } - if (has_2nd_order) - { - b=24; - optimize_b(x, b, PLANE_TYPE_Y2, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } + if (has_2nd_order) { + b = 24; + optimize_b(x, b, PLANE_TYPE_Y2, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + } } +void vp8_optimize_mby(MACROBLOCK *x) { + int b; + int type; + int has_2nd_order; -void vp8_optimize_mby(MACROBLOCK *x) -{ - int b; - int type; - int has_2nd_order; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; + if (!x->e_mbd.above_context) return; - if (!x->e_mbd.above_context) - return; + if (!x->e_mbd.left_context) return; - if (!x->e_mbd.left_context) - return; + memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; + has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && + x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); + type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC; - has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED - && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); - type = has_2nd_order ? PLANE_TYPE_Y_NO_DC : PLANE_TYPE_Y_WITH_DC; + for (b = 0; b < 16; ++b) { + optimize_b(x, b, type, ta + vp8_block2above[b], tl + vp8_block2left[b]); + } - for (b = 0; b < 16; b++) - { - optimize_b(x, b, type, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } - - - if (has_2nd_order) - { - b=24; - optimize_b(x, b, PLANE_TYPE_Y2, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } + if (has_2nd_order) { + b = 24; + optimize_b(x, b, PLANE_TYPE_Y2, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + } } -void vp8_optimize_mbuv(MACROBLOCK *x) -{ - int b; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; +void vp8_optimize_mbuv(MACROBLOCK *x) { + int b; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; - if (!x->e_mbd.above_context) - return; + if (!x->e_mbd.above_context) return; - if (!x->e_mbd.left_context) - return; + if (!x->e_mbd.left_context) return; - memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; - for (b = 16; b < 24; b++) - { - optimize_b(x, b, PLANE_TYPE_UV, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - } + for (b = 16; b < 24; ++b) { + optimize_b(x, b, PLANE_TYPE_UV, ta + vp8_block2above[b], + tl + vp8_block2left[b]); + } } -void vp8_encode_inter16x16(MACROBLOCK *x) -{ - vp8_build_inter_predictors_mb(&x->e_mbd); +void vp8_encode_inter16x16(MACROBLOCK *x) { + vp8_build_inter_predictors_mb(&x->e_mbd); - vp8_subtract_mb(x); + vp8_subtract_mb(x); - transform_mb(x); + transform_mb(x); - vp8_quantize_mb(x); + vp8_quantize_mb(x); - if (x->optimize) - optimize_mb(x); + if (x->optimize) optimize_mb(x); } /* this funciton is used by first pass only */ -void vp8_encode_inter16x16y(MACROBLOCK *x) -{ - BLOCK *b = &x->block[0]; +void vp8_encode_inter16x16y(MACROBLOCK *x) { + BLOCK *b = &x->block[0]; - vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.dst.y_buffer, - x->e_mbd.dst.y_stride); + vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.dst.y_buffer, + x->e_mbd.dst.y_stride); - vp8_subtract_mby(x->src_diff, *(b->base_src), - b->src_stride, x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride); + vp8_subtract_mby(x->src_diff, *(b->base_src), b->src_stride, + x->e_mbd.dst.y_buffer, x->e_mbd.dst.y_stride); - transform_mby(x); + transform_mby(x); - vp8_quantize_mby(x); + vp8_quantize_mby(x); - vp8_inverse_transform_mby(&x->e_mbd); + vp8_inverse_transform_mby(&x->e_mbd); } diff --git a/libs/libvpx/vp8/encoder/encodemb.h b/libs/libvpx/vp8/encoder/encodemb.h index 10b3d8651a..b55ba3ac3f 100644 --- a/libs/libvpx/vp8/encoder/encodemb.h +++ b/libs/libvpx/vp8/encoder/encodemb.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_ENCODEMB_H_ #define VP8_ENCODER_ENCODEMB_H_ diff --git a/libs/libvpx/vp8/encoder/encodemv.c b/libs/libvpx/vp8/encoder/encodemv.c index 2a74ff4ae3..cfc7af663f 100644 --- a/libs/libvpx/vp8/encoder/encodemv.c +++ b/libs/libvpx/vp8/encoder/encodemv.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/common.h" #include "encodemv.h" #include "vp8/common/entropymode.h" @@ -20,54 +19,46 @@ extern unsigned int active_section; #endif -static void encode_mvcomponent( - vp8_writer *const w, - const int v, - const struct mv_context *mvc -) -{ - const vp8_prob *p = mvc->prob; - const int x = v < 0 ? -v : v; +static void encode_mvcomponent(vp8_writer *const w, const int v, + const struct mv_context *mvc) { + const vp8_prob *p = mvc->prob; + const int x = v < 0 ? -v : v; - if (x < mvnum_short) /* Small */ - { - vp8_write(w, 0, p [mvpis_short]); - vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, 3); + if (x < mvnum_short) /* Small */ + { + vp8_write(w, 0, p[mvpis_short]); + vp8_treed_write(w, vp8_small_mvtree, p + MVPshort, x, 3); - if (!x) - return; /* no sign bit */ - } - else /* Large */ - { - int i = 0; + if (!x) return; /* no sign bit */ + } else /* Large */ + { + int i = 0; - vp8_write(w, 1, p [mvpis_short]); + vp8_write(w, 1, p[mvpis_short]); - do - vp8_write(w, (x >> i) & 1, p [MVPbits + i]); + do + vp8_write(w, (x >> i) & 1, p[MVPbits + i]); - while (++i < 3); + while (++i < 3); - i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ + i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ - do - vp8_write(w, (x >> i) & 1, p [MVPbits + i]); + do + vp8_write(w, (x >> i) & 1, p[MVPbits + i]); - while (--i > 3); + while (--i > 3); - if (x & 0xFFF0) - vp8_write(w, (x >> 3) & 1, p [MVPbits + 3]); - } + if (x & 0xFFF0) vp8_write(w, (x >> 3) & 1, p[MVPbits + 3]); + } - vp8_write(w, v < 0, p [MVPsign]); + vp8_write(w, v < 0, p[MVPsign]); } #if 0 static int max_mv_r = 0; static int max_mv_c = 0; #endif -void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc) -{ - +void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, + const MV_CONTEXT *mvc) { #if 0 { if (abs(mv->row >> 1) > max_mv_r) @@ -92,289 +83,249 @@ void vp8_encode_motion_vector(vp8_writer *w, const MV *mv, const MV_CONTEXT *mvc } #endif - encode_mvcomponent(w, mv->row >> 1, &mvc[0]); - encode_mvcomponent(w, mv->col >> 1, &mvc[1]); + encode_mvcomponent(w, mv->row >> 1, &mvc[0]); + encode_mvcomponent(w, mv->col >> 1, &mvc[1]); } +static unsigned int cost_mvcomponent(const int v, + const struct mv_context *mvc) { + const vp8_prob *p = mvc->prob; + const int x = v; + unsigned int cost; -static unsigned int cost_mvcomponent(const int v, const struct mv_context *mvc) -{ - const vp8_prob *p = mvc->prob; - const int x = v; - unsigned int cost; + if (x < mvnum_short) { + cost = vp8_cost_zero(p[mvpis_short]) + + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, 3); - if (x < mvnum_short) - { - cost = vp8_cost_zero(p [mvpis_short]) - + vp8_treed_cost(vp8_small_mvtree, p + MVPshort, x, 3); + if (!x) return cost; + } else { + int i = 0; + cost = vp8_cost_one(p[mvpis_short]); - if (!x) - return cost; - } - else - { - int i = 0; - cost = vp8_cost_one(p [mvpis_short]); + do { + cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1); - do - cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1); + } while (++i < 3); - while (++i < 3); + i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ - i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ + do { + cost += vp8_cost_bit(p[MVPbits + i], (x >> i) & 1); - do - cost += vp8_cost_bit(p [MVPbits + i], (x >> i) & 1); + } while (--i > 3); - while (--i > 3); + if (x & 0xFFF0) cost += vp8_cost_bit(p[MVPbits + 3], (x >> 3) & 1); + } - if (x & 0xFFF0) - cost += vp8_cost_bit(p [MVPbits + 3], (x >> 3) & 1); - } - - return cost; /* + vp8_cost_bit( p [MVPsign], v < 0); */ + return cost; /* + vp8_cost_bit( p [MVPsign], v < 0); */ } -void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]) -{ - int i = 1; - unsigned int cost0 = 0; - unsigned int cost1 = 0; +void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, + int mvc_flag[2]) { + int i = 1; + unsigned int cost0 = 0; + unsigned int cost1 = 0; - vp8_clear_system_state(); + vp8_clear_system_state(); - i = 1; + i = 1; - if (mvc_flag[0]) - { - mvcost [0] [0] = cost_mvcomponent(0, &mvc[0]); + if (mvc_flag[0]) { + mvcost[0][0] = cost_mvcomponent(0, &mvc[0]); - do - { - cost0 = cost_mvcomponent(i, &mvc[0]); + do { + cost0 = cost_mvcomponent(i, &mvc[0]); - mvcost [0] [i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]); - mvcost [0] [-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]); - } - while (++i <= mv_max); - } + mvcost[0][i] = cost0 + vp8_cost_zero(mvc[0].prob[MVPsign]); + mvcost[0][-i] = cost0 + vp8_cost_one(mvc[0].prob[MVPsign]); + } while (++i <= mv_max); + } - i = 1; + i = 1; - if (mvc_flag[1]) - { - mvcost [1] [0] = cost_mvcomponent(0, &mvc[1]); + if (mvc_flag[1]) { + mvcost[1][0] = cost_mvcomponent(0, &mvc[1]); - do - { - cost1 = cost_mvcomponent(i, &mvc[1]); + do { + cost1 = cost_mvcomponent(i, &mvc[1]); - mvcost [1] [i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]); - mvcost [1] [-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]); - } - while (++i <= mv_max); - } + mvcost[1][i] = cost1 + vp8_cost_zero(mvc[1].prob[MVPsign]); + mvcost[1][-i] = cost1 + vp8_cost_one(mvc[1].prob[MVPsign]); + } while (++i <= mv_max); + } } - /* Motion vector probability table update depends on benefit. * Small correction allows for the fact that an update to an MV probability * may have benefit in subsequent frames as well as the current one. */ -#define MV_PROB_UPDATE_CORRECTION -1 +#define MV_PROB_UPDATE_CORRECTION -1 +static void calc_prob(vp8_prob *p, const unsigned int ct[2]) { + const unsigned int tot = ct[0] + ct[1]; -static void calc_prob(vp8_prob *p, const unsigned int ct[2]) -{ - const unsigned int tot = ct[0] + ct[1]; - - if (tot) - { - const vp8_prob x = ((ct[0] * 255) / tot) & -2; - *p = x ? x : 1; - } + if (tot) { + const vp8_prob x = ((ct[0] * 255) / tot) & -2; + *p = x ? x : 1; + } } -static void update( - vp8_writer *const w, - const unsigned int ct[2], - vp8_prob *const cur_p, - const vp8_prob new_p, - const vp8_prob update_p, - int *updated -) -{ - const int cur_b = vp8_cost_branch(ct, *cur_p); - const int new_b = vp8_cost_branch(ct, new_p); - const int cost = 7 + MV_PROB_UPDATE_CORRECTION + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8); +static void update(vp8_writer *const w, const unsigned int ct[2], + vp8_prob *const cur_p, const vp8_prob new_p, + const vp8_prob update_p, int *updated) { + const int cur_b = vp8_cost_branch(ct, *cur_p); + const int new_b = vp8_cost_branch(ct, new_p); + const int cost = + 7 + MV_PROB_UPDATE_CORRECTION + + ((vp8_cost_one(update_p) - vp8_cost_zero(update_p) + 128) >> 8); - if (cur_b - new_b > cost) - { - *cur_p = new_p; - vp8_write(w, 1, update_p); - vp8_write_literal(w, new_p >> 1, 7); - *updated = 1; + if (cur_b - new_b > cost) { + *cur_p = new_p; + vp8_write(w, 1, update_p); + vp8_write_literal(w, new_p >> 1, 7); + *updated = 1; - } - else - vp8_write(w, 0, update_p); + } else + vp8_write(w, 0, update_p); } -static void write_component_probs( - vp8_writer *const w, - struct mv_context *cur_mvc, - const struct mv_context *default_mvc_, - const struct mv_context *update_mvc, - const unsigned int events [MVvals], - unsigned int rc, - int *updated -) -{ - vp8_prob *Pcur = cur_mvc->prob; - const vp8_prob *default_mvc = default_mvc_->prob; - const vp8_prob *Pupdate = update_mvc->prob; - unsigned int is_short_ct[2], sign_ct[2]; +static void write_component_probs(vp8_writer *const w, + struct mv_context *cur_mvc, + const struct mv_context *default_mvc_, + const struct mv_context *update_mvc, + const unsigned int events[MVvals], + unsigned int rc, int *updated) { + vp8_prob *Pcur = cur_mvc->prob; + const vp8_prob *default_mvc = default_mvc_->prob; + const vp8_prob *Pupdate = update_mvc->prob; + unsigned int is_short_ct[2], sign_ct[2]; - unsigned int bit_ct [mvlong_width] [2]; + unsigned int bit_ct[mvlong_width][2]; - unsigned int short_ct [mvnum_short]; - unsigned int short_bct [mvnum_short-1] [2]; + unsigned int short_ct[mvnum_short]; + unsigned int short_bct[mvnum_short - 1][2]; - vp8_prob Pnew [MVPcount]; + vp8_prob Pnew[MVPcount]; - (void) rc; - vp8_copy_array(Pnew, default_mvc, MVPcount); + (void)rc; + vp8_copy_array(Pnew, default_mvc, MVPcount); - vp8_zero(is_short_ct) - vp8_zero(sign_ct) - vp8_zero(bit_ct) - vp8_zero(short_ct) - vp8_zero(short_bct) + vp8_zero(is_short_ct) vp8_zero(sign_ct) vp8_zero(bit_ct) vp8_zero(short_ct) + vp8_zero(short_bct) + /* j=0 */ + { + const int c = events[mv_max]; - /* j=0 */ - { - const int c = events [mv_max]; + is_short_ct[0] += c; /* Short vector */ + short_ct[0] += c; /* Magnitude distribution */ + } - is_short_ct [0] += c; /* Short vector */ - short_ct [0] += c; /* Magnitude distribution */ - } + /* j: 1 ~ mv_max (1023) */ + { + int j = 1; - /* j: 1 ~ mv_max (1023) */ - { - int j = 1; + do { + const int c1 = events[mv_max + j]; /* positive */ + const int c2 = events[mv_max - j]; /* negative */ + const int c = c1 + c2; + int a = j; - do - { - const int c1 = events [mv_max + j]; /* positive */ - const int c2 = events [mv_max - j]; /* negative */ - const int c = c1 + c2; - int a = j; + sign_ct[0] += c1; + sign_ct[1] += c2; - sign_ct [0] += c1; - sign_ct [1] += c2; + if (a < mvnum_short) { + is_short_ct[0] += c; /* Short vector */ + short_ct[a] += c; /* Magnitude distribution */ + } else { + int k = mvlong_width - 1; + is_short_ct[1] += c; /* Long vector */ - if (a < mvnum_short) - { - is_short_ct [0] += c; /* Short vector */ - short_ct [a] += c; /* Magnitude distribution */ - } - else - { - int k = mvlong_width - 1; - is_short_ct [1] += c; /* Long vector */ + /* bit 3 not always encoded. */ + do { + bit_ct[k][(a >> k) & 1] += c; - /* bit 3 not always encoded. */ - do - bit_ct [k] [(a >> k) & 1] += c; + } while (--k >= 0); + } + } while (++j <= mv_max); + } - while (--k >= 0); - } - } - while (++j <= mv_max); - } + calc_prob(Pnew + mvpis_short, is_short_ct); - calc_prob(Pnew + mvpis_short, is_short_ct); + calc_prob(Pnew + MVPsign, sign_ct); - calc_prob(Pnew + MVPsign, sign_ct); + { + vp8_prob p[mvnum_short - 1]; /* actually only need branch ct */ + int j = 0; - { - vp8_prob p [mvnum_short - 1]; /* actually only need branch ct */ - int j = 0; + vp8_tree_probs_from_distribution(8, vp8_small_mvencodings, vp8_small_mvtree, + p, short_bct, short_ct, 256, 1); - vp8_tree_probs_from_distribution( - 8, vp8_small_mvencodings, vp8_small_mvtree, - p, short_bct, short_ct, - 256, 1 - ); + do { + calc_prob(Pnew + MVPshort + j, short_bct[j]); - do - calc_prob(Pnew + MVPshort + j, short_bct[j]); + } while (++j < mvnum_short - 1); + } - while (++j < mvnum_short - 1); - } + { + int j = 0; - { - int j = 0; + do { + calc_prob(Pnew + MVPbits + j, bit_ct[j]); - do - calc_prob(Pnew + MVPbits + j, bit_ct[j]); + } while (++j < mvlong_width); + } - while (++j < mvlong_width); - } + update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++, + updated); - update(w, is_short_ct, Pcur + mvpis_short, Pnew[mvpis_short], *Pupdate++, updated); + update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated); - update(w, sign_ct, Pcur + MVPsign, Pnew[MVPsign], *Pupdate++, updated); + { + const vp8_prob *const new_p = Pnew + MVPshort; + vp8_prob *const cur_p = Pcur + MVPshort; - { - const vp8_prob *const new_p = Pnew + MVPshort; - vp8_prob *const cur_p = Pcur + MVPshort; + int j = 0; - int j = 0; + do { + update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated); - do + } while (++j < mvnum_short - 1); + } - update(w, short_bct[j], cur_p + j, new_p[j], *Pupdate++, updated); + { + const vp8_prob *const new_p = Pnew + MVPbits; + vp8_prob *const cur_p = Pcur + MVPbits; - while (++j < mvnum_short - 1); - } + int j = 0; - { - const vp8_prob *const new_p = Pnew + MVPbits; - vp8_prob *const cur_p = Pcur + MVPbits; + do { + update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated); - int j = 0; - - do - - update(w, bit_ct[j], cur_p + j, new_p[j], *Pupdate++, updated); - - while (++j < mvlong_width); - } + } while (++j < mvlong_width); + } } -void vp8_write_mvprobs(VP8_COMP *cpi) -{ - vp8_writer *const w = cpi->bc; - MV_CONTEXT *mvc = cpi->common.fc.mvc; - int flags[2] = {0, 0}; +void vp8_write_mvprobs(VP8_COMP *cpi) { + vp8_writer *const w = cpi->bc; + MV_CONTEXT *mvc = cpi->common.fc.mvc; + int flags[2] = { 0, 0 }; #ifdef VP8_ENTROPY_STATS - active_section = 4; + active_section = 4; #endif - write_component_probs( - w, &mvc[0], &vp8_default_mv_context[0], &vp8_mv_update_probs[0], - cpi->mb.MVcount[0], 0, &flags[0] - ); - write_component_probs( - w, &mvc[1], &vp8_default_mv_context[1], &vp8_mv_update_probs[1], - cpi->mb.MVcount[1], 1, &flags[1] - ); + write_component_probs(w, &mvc[0], &vp8_default_mv_context[0], + &vp8_mv_update_probs[0], cpi->mb.MVcount[0], 0, + &flags[0]); + write_component_probs(w, &mvc[1], &vp8_default_mv_context[1], + &vp8_mv_update_probs[1], cpi->mb.MVcount[1], 1, + &flags[1]); - if (flags[0] || flags[1]) - vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flags); + if (flags[0] || flags[1]) { + vp8_build_component_cost_table( + cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flags); + } #ifdef VP8_ENTROPY_STATS - active_section = 5; + active_section = 5; #endif } diff --git a/libs/libvpx/vp8/encoder/encodemv.h b/libs/libvpx/vp8/encoder/encodemv.h index 722162ba21..87db30f310 100644 --- a/libs/libvpx/vp8/encoder/encodemv.h +++ b/libs/libvpx/vp8/encoder/encodemv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_ENCODEMV_H_ #define VP8_ENCODER_ENCODEMV_H_ @@ -20,7 +19,8 @@ extern "C" { void vp8_write_mvprobs(VP8_COMP *); void vp8_encode_motion_vector(vp8_writer *, const MV *, const MV_CONTEXT *); -void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, int mvc_flag[2]); +void vp8_build_component_cost_table(int *mvcost[2], const MV_CONTEXT *mvc, + int mvc_flag[2]); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/ethreading.c b/libs/libvpx/vp8/encoder/ethreading.c index 4f689c4bc7..708002b1e6 100644 --- a/libs/libvpx/vp8/encoder/ethreading.c +++ b/libs/libvpx/vp8/encoder/ethreading.c @@ -17,664 +17,623 @@ #if CONFIG_MULTITHREAD -extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip); +extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, + int ok_to_skip); -static THREAD_FUNCTION thread_loopfilter(void *p_data) -{ - VP8_COMP *cpi = (VP8_COMP *)(((LPFTHREAD_DATA *)p_data)->ptr1); - VP8_COMMON *cm = &cpi->common; +static THREAD_FUNCTION thread_loopfilter(void *p_data) { + VP8_COMP *cpi = (VP8_COMP *)(((LPFTHREAD_DATA *)p_data)->ptr1); + VP8_COMMON *cm = &cpi->common; - while (1) - { - if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) - break; + while (1) { + if (cpi->b_multi_threaded == 0) break; - if (sem_wait(&cpi->h_event_start_lpf) == 0) - { - /* we're shutting down */ - if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) - break; + if (sem_wait(&cpi->h_event_start_lpf) == 0) { + /* we're shutting down */ + if (cpi->b_multi_threaded == 0) break; - vp8_loopfilter_frame(cpi, cm); + vp8_loopfilter_frame(cpi, cm); - sem_post(&cpi->h_event_end_lpf); - } + sem_post(&cpi->h_event_end_lpf); } + } - return 0; + return 0; } -static -THREAD_FUNCTION thread_encoding_proc(void *p_data) -{ - int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread; - VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1); - MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2); - ENTROPY_CONTEXT_PLANES mb_row_left_context; +static THREAD_FUNCTION thread_encoding_proc(void *p_data) { + int ithread = ((ENCODETHREAD_DATA *)p_data)->ithread; + VP8_COMP *cpi = (VP8_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr1); + MB_ROW_COMP *mbri = (MB_ROW_COMP *)(((ENCODETHREAD_DATA *)p_data)->ptr2); + ENTROPY_CONTEXT_PLANES mb_row_left_context; - while (1) - { - if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) - break; + while (1) { + if (cpi->b_multi_threaded == 0) break; - if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) - { - const int nsync = cpi->mt_sync_range; - VP8_COMMON *cm = &cpi->common; - int mb_row; - MACROBLOCK *x = &mbri->mb; - MACROBLOCKD *xd = &x->e_mbd; - TOKENEXTRA *tp ; + if (sem_wait(&cpi->h_event_start_encoding[ithread]) == 0) { + const int nsync = cpi->mt_sync_range; + VP8_COMMON *cm = &cpi->common; + int mb_row; + MACROBLOCK *x = &mbri->mb; + MACROBLOCKD *xd = &x->e_mbd; + TOKENEXTRA *tp; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24); - const int num_part = (1 << cm->multi_token_partition); + TOKENEXTRA *tp_start = cpi->tok + (1 + ithread) * (16 * 24); + const int num_part = (1 << cm->multi_token_partition); #endif - int *segment_counts = mbri->segment_counts; - int *totalrate = &mbri->totalrate; + int *segment_counts = mbri->segment_counts; + int *totalrate = &mbri->totalrate; - /* we're shutting down */ - if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded) == 0) - break; + /* we're shutting down */ + if (cpi->b_multi_threaded == 0) break; - xd->mode_info_context = cm->mi + cm->mode_info_stride * - (ithread + 1); - xd->mode_info_stride = cm->mode_info_stride; + for (mb_row = ithread + 1; mb_row < cm->mb_rows; + mb_row += (cpi->encoding_thread_count + 1)) { + int recon_yoffset, recon_uvoffset; + int mb_col; + int ref_fb_idx = cm->lst_fb_idx; + int dst_fb_idx = cm->new_fb_idx; + int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; + int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; + int map_index = (mb_row * cm->mb_cols); + volatile const int *last_row_current_mb_col; + volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; - for (mb_row = ithread + 1; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1)) - { - - int recon_yoffset, recon_uvoffset; - int mb_col; - int ref_fb_idx = cm->lst_fb_idx; - int dst_fb_idx = cm->new_fb_idx; - int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; - int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; - int map_index = (mb_row * cm->mb_cols); - const int *last_row_current_mb_col; - int *current_mb_col = &cpi->mt_current_mb_col[mb_row]; - -#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)]; +#if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) + vp8_writer *w = &cpi->bc[1 + (mb_row % num_part)]; #else - tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); - cpi->tplist[mb_row].start = tp; + tp = cpi->tok + (mb_row * (cm->mb_cols * 16 * 24)); + cpi->tplist[mb_row].start = tp; #endif - last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; + last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; - /* reset above block coeffs */ - xd->above_context = cm->above_context; - xd->left_context = &mb_row_left_context; + /* reset above block coeffs */ + xd->above_context = cm->above_context; + xd->left_context = &mb_row_left_context; - vp8_zero(mb_row_left_context); + vp8_zero(mb_row_left_context); - xd->up_available = (mb_row != 0); - recon_yoffset = (mb_row * recon_y_stride * 16); - recon_uvoffset = (mb_row * recon_uv_stride * 8); + xd->up_available = (mb_row != 0); + recon_yoffset = (mb_row * recon_y_stride * 16); + recon_uvoffset = (mb_row * recon_uv_stride * 8); - /* Set the mb activity pointer to the start of the row. */ - x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; + /* Set the mb activity pointer to the start of the row. */ + x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - if (((mb_col - 1) % nsync) == 0) { - pthread_mutex_t *mutex = &cpi->pmutex[mb_row]; - protected_write(mutex, current_mb_col, mb_col - 1); - } + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + *current_mb_col = mb_col - 1; - if (mb_row && !(mb_col & (nsync - 1))) { - pthread_mutex_t *mutex = &cpi->pmutex[mb_row-1]; - sync_read(mutex, mb_col, last_row_current_mb_col, nsync); - } - -#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - tp = tp_start; -#endif - - /* Distance of Mb to the various image edges. - * These specified to 8th pel as they are always compared - * to values that are in 1/8th pel units - */ - xd->mb_to_left_edge = -((mb_col * 16) << 3); - xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; - xd->mb_to_top_edge = -((mb_row * 16) << 3); - xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; - - /* Set up limit values for motion vectors used to prevent - * them extending outside the UMV borders - */ - x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); - x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); - - xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; - xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; - xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; - xd->left_available = (mb_col != 0); - - x->rddiv = cpi->RDDIV; - x->rdmult = cpi->RDMULT; - - /* Copy current mb to a buffer */ - vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); - - if (cpi->oxcf.tuning == VP8_TUNE_SSIM) - vp8_activity_masking(cpi, x); - - /* Is segmentation enabled */ - /* MB level adjustment to quantizer */ - if (xd->segmentation_enabled) - { - /* Code to set segment id in xd->mbmi.segment_id for - * current MB (with range checking) - */ - if (cpi->segmentation_map[map_index + mb_col] <= 3) - xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index + mb_col]; - else - xd->mode_info_context->mbmi.segment_id = 0; - - vp8cx_mb_init_quantizer(cpi, x, 1); - } - else - /* Set to Segment 0 by default */ - xd->mode_info_context->mbmi.segment_id = 0; - - x->active_ptr = cpi->active_map + map_index + mb_col; - - if (cm->frame_type == KEY_FRAME) - { - *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp); -#ifdef MODE_STATS - y_modes[xd->mbmi.mode] ++; -#endif - } - else - { - *totalrate += vp8cx_encode_inter_macroblock(cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); - -#ifdef MODE_STATS - inter_y_modes[xd->mbmi.mode] ++; - - if (xd->mbmi.mode == SPLITMV) - { - int b; - - for (b = 0; b < xd->mbmi.partition_count; b++) - { - inter_b_modes[x->partition->bmi[b].mode] ++; - } - } - -#endif - // Keep track of how many (consecutive) times a block - // is coded as ZEROMV_LASTREF, for base layer frames. - // Reset to 0 if its coded as anything else. - if (cpi->current_layer == 0) { - if (xd->mode_info_context->mbmi.mode == ZEROMV && - xd->mode_info_context->mbmi.ref_frame == - LAST_FRAME) { - // Increment, check for wrap-around. - if (cpi->consec_zero_last[map_index+mb_col] < 255) - cpi->consec_zero_last[map_index+mb_col] += 1; - if (cpi->consec_zero_last_mvbias[map_index+mb_col] < 255) - cpi->consec_zero_last_mvbias[map_index+mb_col] += 1; - } else { - cpi->consec_zero_last[map_index+mb_col] = 0; - cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; - } - if (x->zero_last_dot_suppress) - cpi->consec_zero_last_mvbias[map_index+mb_col] = 0; - } - - /* Special case code for cyclic refresh - * If cyclic update enabled then copy - * xd->mbmi.segment_id; (which may have been updated - * based on mode during - * vp8cx_encode_inter_macroblock()) back into the - * global segmentation map - */ - if ((cpi->current_layer == 0) && - (cpi->cyclic_refresh_mode_enabled && - xd->segmentation_enabled)) - { - const MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi; - cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id; - - /* If the block has been refreshed mark it as clean - * (the magnitude of the -ve influences how long it - * will be before we consider another refresh): - * Else if it was coded (last frame 0,0) and has - * not already been refreshed then mark it as a - * candidate for cleanup next time (marked 0) else - * mark it as dirty (1). - */ - if (mbmi->segment_id) - cpi->cyclic_refresh_map[map_index + mb_col] = -1; - else if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME)) - { - if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) - cpi->cyclic_refresh_map[map_index + mb_col] = 0; - } - else - cpi->cyclic_refresh_map[map_index + mb_col] = 1; - - } - } - -#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - /* pack tokens for this MB */ - { - int tok_count = tp - tp_start; - vp8_pack_tokens(w, tp_start, tok_count); - } -#else - cpi->tplist[mb_row].stop = tp; -#endif - /* Increment pointer into gf usage flags structure. */ - x->gf_active_ptr++; - - /* Increment the activity mask pointers. */ - x->mb_activity_ptr++; - - /* adjust to the next column of macroblocks */ - x->src.y_buffer += 16; - x->src.u_buffer += 8; - x->src.v_buffer += 8; - - recon_yoffset += 16; - recon_uvoffset += 8; - - /* Keep track of segment usage */ - segment_counts[xd->mode_info_context->mbmi.segment_id]++; - - /* skip to next mb */ - xd->mode_info_context++; - x->partition_info++; - xd->above_context++; - } - - vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx], - xd->dst.y_buffer + 16, - xd->dst.u_buffer + 8, - xd->dst.v_buffer + 8); - - protected_write(&cpi->pmutex[mb_row], current_mb_col, - mb_col + nsync); - - /* this is to account for the border */ - xd->mode_info_context++; - x->partition_info++; - - x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols; - x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; - x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols; - - xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count; - x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; - x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; - - if (mb_row == cm->mb_rows - 1) - { - sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */ - } + if ((mb_col & (nsync - 1)) == 0) { + while (mb_col > (*last_row_current_mb_col - nsync)) { + x86_pause_hint(); + thread_sleep(0); } - } - } + } - /* printf("exit thread %d\n", ithread); */ - return 0; +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING + tp = tp_start; +#endif + + /* Distance of Mb to the various image edges. + * These specified to 8th pel as they are always compared + * to values that are in 1/8th pel units + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; + xd->mb_to_top_edge = -((mb_row * 16) << 3); + xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; + + /* Set up limit values for motion vectors used to prevent + * them extending outside the UMV borders + */ + x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_col_max = + ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); + x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_row_max = + ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); + + xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; + xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; + xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; + xd->left_available = (mb_col != 0); + + x->rddiv = cpi->RDDIV; + x->rdmult = cpi->RDMULT; + + /* Copy current mb to a buffer */ + vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); + + if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); + + /* Is segmentation enabled */ + /* MB level adjustment to quantizer */ + if (xd->segmentation_enabled) { + /* Code to set segment id in xd->mbmi.segment_id for + * current MB (with range checking) + */ + if (cpi->segmentation_map[map_index + mb_col] <= 3) { + xd->mode_info_context->mbmi.segment_id = + cpi->segmentation_map[map_index + mb_col]; + } else { + xd->mode_info_context->mbmi.segment_id = 0; + } + + vp8cx_mb_init_quantizer(cpi, x, 1); + } else { + /* Set to Segment 0 by default */ + xd->mode_info_context->mbmi.segment_id = 0; + } + + x->active_ptr = cpi->active_map + map_index + mb_col; + + if (cm->frame_type == KEY_FRAME) { + *totalrate += vp8cx_encode_intra_macroblock(cpi, x, &tp); +#ifdef MODE_STATS + y_modes[xd->mbmi.mode]++; +#endif + } else { + *totalrate += vp8cx_encode_inter_macroblock( + cpi, x, &tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); + +#ifdef MODE_STATS + inter_y_modes[xd->mbmi.mode]++; + + if (xd->mbmi.mode == SPLITMV) { + int b; + + for (b = 0; b < xd->mbmi.partition_count; ++b) { + inter_b_modes[x->partition->bmi[b].mode]++; + } + } + +#endif + // Keep track of how many (consecutive) times a block + // is coded as ZEROMV_LASTREF, for base layer frames. + // Reset to 0 if its coded as anything else. + if (cpi->current_layer == 0) { + if (xd->mode_info_context->mbmi.mode == ZEROMV && + xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { + // Increment, check for wrap-around. + if (cpi->consec_zero_last[map_index + mb_col] < 255) { + cpi->consec_zero_last[map_index + mb_col] += 1; + } + if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) { + cpi->consec_zero_last_mvbias[map_index + mb_col] += 1; + } + } else { + cpi->consec_zero_last[map_index + mb_col] = 0; + cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; + } + if (x->zero_last_dot_suppress) { + cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; + } + } + + /* Special case code for cyclic refresh + * If cyclic update enabled then copy + * xd->mbmi.segment_id; (which may have been updated + * based on mode during + * vp8cx_encode_inter_macroblock()) back into the + * global segmentation map + */ + if ((cpi->current_layer == 0) && + (cpi->cyclic_refresh_mode_enabled && + xd->segmentation_enabled)) { + const MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; + cpi->segmentation_map[map_index + mb_col] = mbmi->segment_id; + + /* If the block has been refreshed mark it as clean + * (the magnitude of the -ve influences how long it + * will be before we consider another refresh): + * Else if it was coded (last frame 0,0) and has + * not already been refreshed then mark it as a + * candidate for cleanup next time (marked 0) else + * mark it as dirty (1). + */ + if (mbmi->segment_id) { + cpi->cyclic_refresh_map[map_index + mb_col] = -1; + } else if ((mbmi->mode == ZEROMV) && + (mbmi->ref_frame == LAST_FRAME)) { + if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) { + cpi->cyclic_refresh_map[map_index + mb_col] = 0; + } + } else { + cpi->cyclic_refresh_map[map_index + mb_col] = 1; + } + } + } + +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING + /* pack tokens for this MB */ + { + int tok_count = tp - tp_start; + vp8_pack_tokens(w, tp_start, tok_count); + } +#else + cpi->tplist[mb_row].stop = tp; +#endif + /* Increment pointer into gf usage flags structure. */ + x->gf_active_ptr++; + + /* Increment the activity mask pointers. */ + x->mb_activity_ptr++; + + /* adjust to the next column of macroblocks */ + x->src.y_buffer += 16; + x->src.u_buffer += 8; + x->src.v_buffer += 8; + + recon_yoffset += 16; + recon_uvoffset += 8; + + /* Keep track of segment usage */ + segment_counts[xd->mode_info_context->mbmi.segment_id]++; + + /* skip to next mb */ + xd->mode_info_context++; + x->partition_info++; + xd->above_context++; + } + + vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, + xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + + *current_mb_col = mb_col + nsync; + + /* this is to account for the border */ + xd->mode_info_context++; + x->partition_info++; + + x->src.y_buffer += + 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - + 16 * cm->mb_cols; + x->src.u_buffer += + 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - + 8 * cm->mb_cols; + x->src.v_buffer += + 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - + 8 * cm->mb_cols; + + xd->mode_info_context += + xd->mode_info_stride * cpi->encoding_thread_count; + x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; + x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; + } + /* Signal that this thread has completed processing its rows. */ + sem_post(&cpi->h_event_end_encoding[ithread]); + } + } + + /* printf("exit thread %d\n", ithread); */ + return 0; } -static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) -{ +static void setup_mbby_copy(MACROBLOCK *mbdst, MACROBLOCK *mbsrc) { + MACROBLOCK *x = mbsrc; + MACROBLOCK *z = mbdst; + int i; - MACROBLOCK *x = mbsrc; - MACROBLOCK *z = mbdst; - int i; + z->ss = x->ss; + z->ss_count = x->ss_count; + z->searches_per_step = x->searches_per_step; + z->errorperbit = x->errorperbit; - z->ss = x->ss; - z->ss_count = x->ss_count; - z->searches_per_step = x->searches_per_step; - z->errorperbit = x->errorperbit; + z->sadperbit16 = x->sadperbit16; + z->sadperbit4 = x->sadperbit4; - z->sadperbit16 = x->sadperbit16; - z->sadperbit4 = x->sadperbit4; + /* + z->mv_col_min = x->mv_col_min; + z->mv_col_max = x->mv_col_max; + z->mv_row_min = x->mv_row_min; + z->mv_row_max = x->mv_row_max; + */ + + z->short_fdct4x4 = x->short_fdct4x4; + z->short_fdct8x4 = x->short_fdct8x4; + z->short_walsh4x4 = x->short_walsh4x4; + z->quantize_b = x->quantize_b; + z->optimize = x->optimize; + + /* + z->mvc = x->mvc; + z->src.y_buffer = x->src.y_buffer; + z->src.u_buffer = x->src.u_buffer; + z->src.v_buffer = x->src.v_buffer; + */ + + z->mvcost[0] = x->mvcost[0]; + z->mvcost[1] = x->mvcost[1]; + z->mvsadcost[0] = x->mvsadcost[0]; + z->mvsadcost[1] = x->mvsadcost[1]; + + z->token_costs = x->token_costs; + z->inter_bmode_costs = x->inter_bmode_costs; + z->mbmode_cost = x->mbmode_cost; + z->intra_uv_mode_cost = x->intra_uv_mode_cost; + z->bmode_costs = x->bmode_costs; + + for (i = 0; i < 25; ++i) { + z->block[i].quant = x->block[i].quant; + z->block[i].quant_fast = x->block[i].quant_fast; + z->block[i].quant_shift = x->block[i].quant_shift; + z->block[i].zbin = x->block[i].zbin; + z->block[i].zrun_zbin_boost = x->block[i].zrun_zbin_boost; + z->block[i].round = x->block[i].round; + z->block[i].src_stride = x->block[i].src_stride; + } + + z->q_index = x->q_index; + z->act_zbin_adj = x->act_zbin_adj; + z->last_act_zbin_adj = x->last_act_zbin_adj; + + { + MACROBLOCKD *xd = &x->e_mbd; + MACROBLOCKD *zd = &z->e_mbd; /* - z->mv_col_min = x->mv_col_min; - z->mv_col_max = x->mv_col_max; - z->mv_row_min = x->mv_row_min; - z->mv_row_max = x->mv_row_max; + zd->mode_info_context = xd->mode_info_context; + zd->mode_info = xd->mode_info; + + zd->mode_info_stride = xd->mode_info_stride; + zd->frame_type = xd->frame_type; + zd->up_available = xd->up_available ; + zd->left_available = xd->left_available; + zd->left_context = xd->left_context; + zd->last_frame_dc = xd->last_frame_dc; + zd->last_frame_dccons = xd->last_frame_dccons; + zd->gold_frame_dc = xd->gold_frame_dc; + zd->gold_frame_dccons = xd->gold_frame_dccons; + zd->mb_to_left_edge = xd->mb_to_left_edge; + zd->mb_to_right_edge = xd->mb_to_right_edge; + zd->mb_to_top_edge = xd->mb_to_top_edge ; + zd->mb_to_bottom_edge = xd->mb_to_bottom_edge; + zd->gf_active_ptr = xd->gf_active_ptr; + zd->frames_since_golden = xd->frames_since_golden; + zd->frames_till_alt_ref_frame = xd->frames_till_alt_ref_frame; */ + zd->subpixel_predict = xd->subpixel_predict; + zd->subpixel_predict8x4 = xd->subpixel_predict8x4; + zd->subpixel_predict8x8 = xd->subpixel_predict8x8; + zd->subpixel_predict16x16 = xd->subpixel_predict16x16; + zd->segmentation_enabled = xd->segmentation_enabled; + zd->mb_segement_abs_delta = xd->mb_segement_abs_delta; + memcpy(zd->segment_feature_data, xd->segment_feature_data, + sizeof(xd->segment_feature_data)); - z->short_fdct4x4 = x->short_fdct4x4; - z->short_fdct8x4 = x->short_fdct8x4; - z->short_walsh4x4 = x->short_walsh4x4; - z->quantize_b = x->quantize_b; - z->optimize = x->optimize; - - /* - z->mvc = x->mvc; - z->src.y_buffer = x->src.y_buffer; - z->src.u_buffer = x->src.u_buffer; - z->src.v_buffer = x->src.v_buffer; - */ - - z->mvcost[0] = x->mvcost[0]; - z->mvcost[1] = x->mvcost[1]; - z->mvsadcost[0] = x->mvsadcost[0]; - z->mvsadcost[1] = x->mvsadcost[1]; - - z->token_costs = x->token_costs; - z->inter_bmode_costs = x->inter_bmode_costs; - z->mbmode_cost = x->mbmode_cost; - z->intra_uv_mode_cost = x->intra_uv_mode_cost; - z->bmode_costs = x->bmode_costs; - - for (i = 0; i < 25; i++) - { - z->block[i].quant = x->block[i].quant; - z->block[i].quant_fast = x->block[i].quant_fast; - z->block[i].quant_shift = x->block[i].quant_shift; - z->block[i].zbin = x->block[i].zbin; - z->block[i].zrun_zbin_boost = x->block[i].zrun_zbin_boost; - z->block[i].round = x->block[i].round; - z->block[i].src_stride = x->block[i].src_stride; - } - - z->q_index = x->q_index; - z->act_zbin_adj = x->act_zbin_adj; - z->last_act_zbin_adj = x->last_act_zbin_adj; - - { - MACROBLOCKD *xd = &x->e_mbd; - MACROBLOCKD *zd = &z->e_mbd; - - /* - zd->mode_info_context = xd->mode_info_context; - zd->mode_info = xd->mode_info; - - zd->mode_info_stride = xd->mode_info_stride; - zd->frame_type = xd->frame_type; - zd->up_available = xd->up_available ; - zd->left_available = xd->left_available; - zd->left_context = xd->left_context; - zd->last_frame_dc = xd->last_frame_dc; - zd->last_frame_dccons = xd->last_frame_dccons; - zd->gold_frame_dc = xd->gold_frame_dc; - zd->gold_frame_dccons = xd->gold_frame_dccons; - zd->mb_to_left_edge = xd->mb_to_left_edge; - zd->mb_to_right_edge = xd->mb_to_right_edge; - zd->mb_to_top_edge = xd->mb_to_top_edge ; - zd->mb_to_bottom_edge = xd->mb_to_bottom_edge; - zd->gf_active_ptr = xd->gf_active_ptr; - zd->frames_since_golden = xd->frames_since_golden; - zd->frames_till_alt_ref_frame = xd->frames_till_alt_ref_frame; - */ - zd->subpixel_predict = xd->subpixel_predict; - zd->subpixel_predict8x4 = xd->subpixel_predict8x4; - zd->subpixel_predict8x8 = xd->subpixel_predict8x8; - zd->subpixel_predict16x16 = xd->subpixel_predict16x16; - zd->segmentation_enabled = xd->segmentation_enabled; - zd->mb_segement_abs_delta = xd->mb_segement_abs_delta; - memcpy(zd->segment_feature_data, xd->segment_feature_data, - sizeof(xd->segment_feature_data)); - - memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); - memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); - memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); - memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); + memcpy(zd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); + memcpy(zd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); + memcpy(zd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); + memcpy(zd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); #if 1 - /*TODO: Remove dequant from BLOCKD. This is a temporary solution until - * the quantizer code uses a passed in pointer to the dequant constants. - * This will also require modifications to the x86 and neon assembly. - * */ - for (i = 0; i < 16; i++) - zd->block[i].dequant = zd->dequant_y1; - for (i = 16; i < 24; i++) - zd->block[i].dequant = zd->dequant_uv; - zd->block[24].dequant = zd->dequant_y2; + /*TODO: Remove dequant from BLOCKD. This is a temporary solution until + * the quantizer code uses a passed in pointer to the dequant constants. + * This will also require modifications to the x86 and neon assembly. + * */ + for (i = 0; i < 16; ++i) zd->block[i].dequant = zd->dequant_y1; + for (i = 16; i < 24; ++i) zd->block[i].dequant = zd->dequant_uv; + zd->block[24].dequant = zd->dequant_y2; #endif + memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes)); + memcpy(z->rd_thresh_mult, x->rd_thresh_mult, sizeof(x->rd_thresh_mult)); - memcpy(z->rd_threshes, x->rd_threshes, sizeof(x->rd_threshes)); - memcpy(z->rd_thresh_mult, x->rd_thresh_mult, sizeof(x->rd_thresh_mult)); + z->zbin_over_quant = x->zbin_over_quant; + z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; + z->zbin_mode_boost = x->zbin_mode_boost; - z->zbin_over_quant = x->zbin_over_quant; - z->zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; - z->zbin_mode_boost = x->zbin_mode_boost; - - memset(z->error_bins, 0, sizeof(z->error_bins)); - } + memset(z->error_bins, 0, sizeof(z->error_bins)); + } } -void vp8cx_init_mbrthread_data(VP8_COMP *cpi, - MACROBLOCK *x, - MB_ROW_COMP *mbr_ei, - int count - ) -{ +void vp8cx_init_mbrthread_data(VP8_COMP *cpi, MACROBLOCK *x, + MB_ROW_COMP *mbr_ei, int count) { + VP8_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; + int i; - VP8_COMMON *const cm = & cpi->common; - MACROBLOCKD *const xd = & x->e_mbd; - int i; + for (i = 0; i < count; ++i) { + MACROBLOCK *mb = &mbr_ei[i].mb; + MACROBLOCKD *mbd = &mb->e_mbd; - for (i = 0; i < count; i++) - { - MACROBLOCK *mb = & mbr_ei[i].mb; - MACROBLOCKD *mbd = &mb->e_mbd; + mbd->subpixel_predict = xd->subpixel_predict; + mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; + mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; + mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; + mb->gf_active_ptr = x->gf_active_ptr; - mbd->subpixel_predict = xd->subpixel_predict; - mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; - mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; - mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; - mb->gf_active_ptr = x->gf_active_ptr; + memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts)); + mbr_ei[i].totalrate = 0; - memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts)); - mbr_ei[i].totalrate = 0; + mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1); - mb->partition_info = x->pi + x->e_mbd.mode_info_stride * (i + 1); + mbd->mode_info_context = cm->mi + x->e_mbd.mode_info_stride * (i + 1); + mbd->mode_info_stride = cm->mode_info_stride; - mbd->frame_type = cm->frame_type; + mbd->frame_type = cm->frame_type; - mb->src = * cpi->Source; - mbd->pre = cm->yv12_fb[cm->lst_fb_idx]; - mbd->dst = cm->yv12_fb[cm->new_fb_idx]; + mb->src = *cpi->Source; + mbd->pre = cm->yv12_fb[cm->lst_fb_idx]; + mbd->dst = cm->yv12_fb[cm->new_fb_idx]; - mb->src.y_buffer += 16 * x->src.y_stride * (i + 1); - mb->src.u_buffer += 8 * x->src.uv_stride * (i + 1); - mb->src.v_buffer += 8 * x->src.uv_stride * (i + 1); + mb->src.y_buffer += 16 * x->src.y_stride * (i + 1); + mb->src.u_buffer += 8 * x->src.uv_stride * (i + 1); + mb->src.v_buffer += 8 * x->src.uv_stride * (i + 1); - vp8_build_block_offsets(mb); + vp8_build_block_offsets(mb); - mbd->left_context = &cm->left_context; - mb->mvc = cm->fc.mvc; + mbd->left_context = &cm->left_context; + mb->mvc = cm->fc.mvc; - setup_mbby_copy(&mbr_ei[i].mb, x); + setup_mbby_copy(&mbr_ei[i].mb, x); - mbd->fullpixel_mask = 0xffffffff; - if(cm->full_pixel) - mbd->fullpixel_mask = 0xfffffff8; + mbd->fullpixel_mask = 0xffffffff; + if (cm->full_pixel) mbd->fullpixel_mask = 0xfffffff8; - vp8_zero(mb->coef_counts); - vp8_zero(x->ymode_count); - mb->skip_true_count = 0; - vp8_zero(mb->MVcount); - mb->prediction_error = 0; - mb->intra_error = 0; - vp8_zero(mb->count_mb_ref_frame_usage); - mb->mbs_tested_so_far = 0; - mb->mbs_zero_last_dot_suppress = 0; - } + vp8_zero(mb->coef_counts); + vp8_zero(x->ymode_count); + mb->skip_true_count = 0; + vp8_zero(mb->MVcount); + mb->prediction_error = 0; + mb->intra_error = 0; + vp8_zero(mb->count_mb_ref_frame_usage); + mb->mbs_tested_so_far = 0; + mb->mbs_zero_last_dot_suppress = 0; + } } -int vp8cx_create_encoder_threads(VP8_COMP *cpi) -{ - const VP8_COMMON * cm = &cpi->common; +int vp8cx_create_encoder_threads(VP8_COMP *cpi) { + const VP8_COMMON *cm = &cpi->common; - cpi->b_multi_threaded = 0; - cpi->encoding_thread_count = 0; - cpi->b_lpf_running = 0; + cpi->b_multi_threaded = 0; + cpi->encoding_thread_count = 0; + cpi->b_lpf_running = 0; - pthread_mutex_init(&cpi->mt_mutex, NULL); + if (cm->processor_core_count > 1 && cpi->oxcf.multi_threaded > 1) { + int ithread; + int th_count = cpi->oxcf.multi_threaded - 1; + int rc = 0; - if (cm->processor_core_count > 1 && cpi->oxcf.multi_threaded > 1) - { - int ithread; - int th_count = cpi->oxcf.multi_threaded - 1; - int rc = 0; - - /* don't allocate more threads than cores available */ - if (cpi->oxcf.multi_threaded > cm->processor_core_count) - th_count = cm->processor_core_count - 1; - - /* we have th_count + 1 (main) threads processing one row each */ - /* no point to have more threads than the sync range allows */ - if(th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) - { - th_count = (cm->mb_cols / cpi->mt_sync_range) - 1; - } - - if(th_count == 0) - return 0; - - CHECK_MEM_ERROR(cpi->h_encoding_thread, - vpx_malloc(sizeof(pthread_t) * th_count)); - CHECK_MEM_ERROR(cpi->h_event_start_encoding, - vpx_malloc(sizeof(sem_t) * th_count)); - CHECK_MEM_ERROR(cpi->mb_row_ei, - vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count)); - memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count); - CHECK_MEM_ERROR(cpi->en_thread_data, - vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count)); - - sem_init(&cpi->h_event_end_encoding, 0, 0); - - cpi->b_multi_threaded = 1; - cpi->encoding_thread_count = th_count; - - /* - printf("[VP8:] multi_threaded encoding is enabled with %d threads\n\n", - (cpi->encoding_thread_count +1)); - */ - - for (ithread = 0; ithread < th_count; ithread++) - { - ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread]; - - /* Setup block ptrs and offsets */ - vp8_setup_block_ptrs(&cpi->mb_row_ei[ithread].mb); - vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd); - - sem_init(&cpi->h_event_start_encoding[ithread], 0, 0); - - ethd->ithread = ithread; - ethd->ptr1 = (void *)cpi; - ethd->ptr2 = (void *)&cpi->mb_row_ei[ithread]; - - rc = pthread_create(&cpi->h_encoding_thread[ithread], 0, - thread_encoding_proc, ethd); - if(rc) - break; - } - - if(rc) - { - /* shutdown other threads */ - protected_write(&cpi->mt_mutex, &cpi->b_multi_threaded, 0); - for(--ithread; ithread >= 0; ithread--) - { - pthread_join(cpi->h_encoding_thread[ithread], 0); - sem_destroy(&cpi->h_event_start_encoding[ithread]); - } - sem_destroy(&cpi->h_event_end_encoding); - - /* free thread related resources */ - vpx_free(cpi->h_event_start_encoding); - vpx_free(cpi->h_encoding_thread); - vpx_free(cpi->mb_row_ei); - vpx_free(cpi->en_thread_data); - - pthread_mutex_destroy(&cpi->mt_mutex); - - return -1; - } - - - { - LPFTHREAD_DATA * lpfthd = &cpi->lpf_thread_data; - - sem_init(&cpi->h_event_start_lpf, 0, 0); - sem_init(&cpi->h_event_end_lpf, 0, 0); - - lpfthd->ptr1 = (void *)cpi; - rc = pthread_create(&cpi->h_filter_thread, 0, thread_loopfilter, - lpfthd); - - if(rc) - { - /* shutdown other threads */ - protected_write(&cpi->mt_mutex, &cpi->b_multi_threaded, 0); - for(--ithread; ithread >= 0; ithread--) - { - sem_post(&cpi->h_event_start_encoding[ithread]); - pthread_join(cpi->h_encoding_thread[ithread], 0); - sem_destroy(&cpi->h_event_start_encoding[ithread]); - } - sem_destroy(&cpi->h_event_end_encoding); - sem_destroy(&cpi->h_event_end_lpf); - sem_destroy(&cpi->h_event_start_lpf); - - /* free thread related resources */ - vpx_free(cpi->h_event_start_encoding); - vpx_free(cpi->h_encoding_thread); - vpx_free(cpi->mb_row_ei); - vpx_free(cpi->en_thread_data); - - pthread_mutex_destroy(&cpi->mt_mutex); - - return -2; - } - } + /* don't allocate more threads than cores available */ + if (cpi->oxcf.multi_threaded > cm->processor_core_count) { + th_count = cm->processor_core_count - 1; + } + + /* we have th_count + 1 (main) threads processing one row each */ + /* no point to have more threads than the sync range allows */ + if (th_count > ((cm->mb_cols / cpi->mt_sync_range) - 1)) { + th_count = (cm->mb_cols / cpi->mt_sync_range) - 1; + } + + if (th_count == 0) return 0; + + CHECK_MEM_ERROR(cpi->h_encoding_thread, + vpx_malloc(sizeof(pthread_t) * th_count)); + CHECK_MEM_ERROR(cpi->h_event_start_encoding, + vpx_malloc(sizeof(sem_t) * th_count)); + CHECK_MEM_ERROR(cpi->h_event_end_encoding, + vpx_malloc(sizeof(sem_t) * th_count)); + CHECK_MEM_ERROR(cpi->mb_row_ei, + vpx_memalign(32, sizeof(MB_ROW_COMP) * th_count)); + memset(cpi->mb_row_ei, 0, sizeof(MB_ROW_COMP) * th_count); + CHECK_MEM_ERROR(cpi->en_thread_data, + vpx_malloc(sizeof(ENCODETHREAD_DATA) * th_count)); + + cpi->b_multi_threaded = 1; + cpi->encoding_thread_count = th_count; + + /* + printf("[VP8:] multi_threaded encoding is enabled with %d threads\n\n", + (cpi->encoding_thread_count +1)); + */ + + for (ithread = 0; ithread < th_count; ++ithread) { + ENCODETHREAD_DATA *ethd = &cpi->en_thread_data[ithread]; + + /* Setup block ptrs and offsets */ + vp8_setup_block_ptrs(&cpi->mb_row_ei[ithread].mb); + vp8_setup_block_dptrs(&cpi->mb_row_ei[ithread].mb.e_mbd); + + sem_init(&cpi->h_event_start_encoding[ithread], 0, 0); + sem_init(&cpi->h_event_end_encoding[ithread], 0, 0); + + ethd->ithread = ithread; + ethd->ptr1 = (void *)cpi; + ethd->ptr2 = (void *)&cpi->mb_row_ei[ithread]; + + rc = pthread_create(&cpi->h_encoding_thread[ithread], 0, + thread_encoding_proc, ethd); + if (rc) break; + } + + if (rc) { + /* shutdown other threads */ + cpi->b_multi_threaded = 0; + for (--ithread; ithread >= 0; ithread--) { + pthread_join(cpi->h_encoding_thread[ithread], 0); + sem_destroy(&cpi->h_event_start_encoding[ithread]); + sem_destroy(&cpi->h_event_end_encoding[ithread]); + } + + /* free thread related resources */ + vpx_free(cpi->h_event_start_encoding); + vpx_free(cpi->h_event_end_encoding); + vpx_free(cpi->h_encoding_thread); + vpx_free(cpi->mb_row_ei); + vpx_free(cpi->en_thread_data); + + return -1; } - return 0; -} -void vp8cx_remove_encoder_threads(VP8_COMP *cpi) -{ - if (protected_read(&cpi->mt_mutex, &cpi->b_multi_threaded)) { + LPFTHREAD_DATA *lpfthd = &cpi->lpf_thread_data; + + sem_init(&cpi->h_event_start_lpf, 0, 0); + sem_init(&cpi->h_event_end_lpf, 0, 0); + + lpfthd->ptr1 = (void *)cpi; + rc = pthread_create(&cpi->h_filter_thread, 0, thread_loopfilter, lpfthd); + + if (rc) { /* shutdown other threads */ - protected_write(&cpi->mt_mutex, &cpi->b_multi_threaded, 0); - { - int i; - - for (i = 0; i < cpi->encoding_thread_count; i++) - { - sem_post(&cpi->h_event_start_encoding[i]); - pthread_join(cpi->h_encoding_thread[i], 0); - - sem_destroy(&cpi->h_event_start_encoding[i]); - } - - sem_post(&cpi->h_event_start_lpf); - pthread_join(cpi->h_filter_thread, 0); + cpi->b_multi_threaded = 0; + for (--ithread; ithread >= 0; ithread--) { + sem_post(&cpi->h_event_start_encoding[ithread]); + sem_post(&cpi->h_event_end_encoding[ithread]); + pthread_join(cpi->h_encoding_thread[ithread], 0); + sem_destroy(&cpi->h_event_start_encoding[ithread]); + sem_destroy(&cpi->h_event_end_encoding[ithread]); } - - sem_destroy(&cpi->h_event_end_encoding); sem_destroy(&cpi->h_event_end_lpf); sem_destroy(&cpi->h_event_start_lpf); /* free thread related resources */ vpx_free(cpi->h_event_start_encoding); + vpx_free(cpi->h_event_end_encoding); vpx_free(cpi->h_encoding_thread); vpx_free(cpi->mb_row_ei); vpx_free(cpi->en_thread_data); + + return -2; + } } - pthread_mutex_destroy(&cpi->mt_mutex); + } + return 0; +} + +void vp8cx_remove_encoder_threads(VP8_COMP *cpi) { + if (cpi->b_multi_threaded) { + /* shutdown other threads */ + cpi->b_multi_threaded = 0; + { + int i; + + for (i = 0; i < cpi->encoding_thread_count; ++i) { + sem_post(&cpi->h_event_start_encoding[i]); + sem_post(&cpi->h_event_end_encoding[i]); + + pthread_join(cpi->h_encoding_thread[i], 0); + + sem_destroy(&cpi->h_event_start_encoding[i]); + sem_destroy(&cpi->h_event_end_encoding[i]); + } + + sem_post(&cpi->h_event_start_lpf); + pthread_join(cpi->h_filter_thread, 0); + } + + sem_destroy(&cpi->h_event_end_lpf); + sem_destroy(&cpi->h_event_start_lpf); + + /* free thread related resources */ + vpx_free(cpi->h_event_start_encoding); + vpx_free(cpi->h_event_end_encoding); + vpx_free(cpi->h_encoding_thread); + vpx_free(cpi->mb_row_ei); + vpx_free(cpi->en_thread_data); + } } #endif diff --git a/libs/libvpx/vp8/encoder/firstpass.c b/libs/libvpx/vp8/encoder/firstpass.c index 4c2acc7745..cd34e33fb8 100644 --- a/libs/libvpx/vp8/encoder/firstpass.c +++ b/libs/libvpx/vp8/encoder/firstpass.c @@ -18,6 +18,7 @@ #include "onyx_int.h" #include "vpx_dsp/variance.h" #include "encodeintra.h" +#include "vp8/common/common.h" #include "vp8/common/setupintrarecon.h" #include "vp8/common/systemdependent.h" #include "mcomp.h" @@ -32,7 +33,7 @@ #include "encodemv.h" #include "encodeframe.h" -/* #define OUTPUT_FPF 1 */ +#define OUTPUT_FPF 0 extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi); @@ -41,36 +42,33 @@ extern int vp8_kf_boost_qadjustment[QINDEX_RANGE]; extern const int vp8_gf_boost_qadjustment[QINDEX_RANGE]; -#define IIFACTOR 1.5 +#define IIFACTOR 1.5 #define IIKFACTOR1 1.40 #define IIKFACTOR2 1.5 -#define RMAX 14.0 -#define GF_RMAX 48.0 +#define RMAX 14.0 +#define GF_RMAX 48.0 #define KF_MB_INTRA_MIN 300 #define GF_MB_INTRA_MIN 200 -#define DOUBLE_DIVIDE_CHECK(X) ((X)<0?(X)-.000001:(X)+.000001) +#define DOUBLE_DIVIDE_CHECK(X) ((X) < 0 ? (X)-.000001 : (X) + .000001) -#define POW1 (double)cpi->oxcf.two_pass_vbrbias/100.0 -#define POW2 (double)cpi->oxcf.two_pass_vbrbias/100.0 +#define POW1 (double)cpi->oxcf.two_pass_vbrbias / 100.0 +#define POW2 (double)cpi->oxcf.two_pass_vbrbias / 100.0 #define NEW_BOOST 1 -static int vscale_lookup[7] = {0, 1, 1, 2, 2, 3, 3}; -static int hscale_lookup[7] = {0, 0, 1, 1, 2, 2, 3}; +static int vscale_lookup[7] = { 0, 1, 1, 2, 2, 3, 3 }; +static int hscale_lookup[7] = { 0, 0, 1, 1, 2, 2, 3 }; - -static const int cq_level[QINDEX_RANGE] = -{ - 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9, - 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20, - 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31, - 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43, - 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56, - 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70, - 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85, - 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100 +static const int cq_level[QINDEX_RANGE] = { + 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, + 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24, + 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38, + 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53, + 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100 }; static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame); @@ -78,828 +76,772 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame); /* Resets the first pass file to the given position using a relative seek * from the current position */ -static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position) -{ - cpi->twopass.stats_in = Position; +static void reset_fpf_position(VP8_COMP *cpi, FIRSTPASS_STATS *Position) { + cpi->twopass.stats_in = Position; } -static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame) -{ - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) - return EOF; +static int lookup_next_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame) { + if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) return EOF; - *next_frame = *cpi->twopass.stats_in; - return 1; + *next_frame = *cpi->twopass.stats_in; + return 1; } /* Read frame stats at an offset from the current position */ -static int read_frame_stats( VP8_COMP *cpi, - FIRSTPASS_STATS *frame_stats, - int offset ) -{ - FIRSTPASS_STATS * fps_ptr = cpi->twopass.stats_in; +static int read_frame_stats(VP8_COMP *cpi, FIRSTPASS_STATS *frame_stats, + int offset) { + FIRSTPASS_STATS *fps_ptr = cpi->twopass.stats_in; - /* Check legality of offset */ - if ( offset >= 0 ) - { - if ( &fps_ptr[offset] >= cpi->twopass.stats_in_end ) - return EOF; - } - else if ( offset < 0 ) - { - if ( &fps_ptr[offset] < cpi->twopass.stats_in_start ) - return EOF; - } + /* Check legality of offset */ + if (offset >= 0) { + if (&fps_ptr[offset] >= cpi->twopass.stats_in_end) return EOF; + } else if (offset < 0) { + if (&fps_ptr[offset] < cpi->twopass.stats_in_start) return EOF; + } - *frame_stats = fps_ptr[offset]; - return 1; + *frame_stats = fps_ptr[offset]; + return 1; } -static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps) -{ - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) - return EOF; +static int input_stats(VP8_COMP *cpi, FIRSTPASS_STATS *fps) { + if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) return EOF; - *fps = *cpi->twopass.stats_in; - cpi->twopass.stats_in = - (void*)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS)); - return 1; + *fps = *cpi->twopass.stats_in; + cpi->twopass.stats_in = + (void *)((char *)cpi->twopass.stats_in + sizeof(FIRSTPASS_STATS)); + return 1; } -static void output_stats(const VP8_COMP *cpi, +static void output_stats(const VP8_COMP *cpi, struct vpx_codec_pkt_list *pktlist, - FIRSTPASS_STATS *stats) -{ - struct vpx_codec_cx_pkt pkt; - (void)cpi; - pkt.kind = VPX_CODEC_STATS_PKT; - pkt.data.twopass_stats.buf = stats; - pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS); - vpx_codec_pkt_list_add(pktlist, &pkt); + FIRSTPASS_STATS *stats) { + struct vpx_codec_cx_pkt pkt; + (void)cpi; + pkt.kind = VPX_CODEC_STATS_PKT; + pkt.data.twopass_stats.buf = stats; + pkt.data.twopass_stats.sz = sizeof(FIRSTPASS_STATS); + vpx_codec_pkt_list_add(pktlist, &pkt); /* TEMP debug code */ #if OUTPUT_FPF - { - FILE *fpfile; - fpfile = fopen("firstpass.stt", "a"); + { + FILE *fpfile; + fpfile = fopen("firstpass.stt", "a"); - fprintf(fpfile, "%12.0f %12.0f %12.0f %12.4f %12.4f %12.4f %12.4f" - " %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f" - " %12.0f %12.0f %12.4f\n", - stats->frame, - stats->intra_error, - stats->coded_error, - stats->ssim_weighted_pred_err, - stats->pcnt_inter, - stats->pcnt_motion, - stats->pcnt_second_ref, - stats->pcnt_neutral, - stats->MVr, - stats->mvr_abs, - stats->MVc, - stats->mvc_abs, - stats->MVrv, - stats->MVcv, - stats->mv_in_out_count, - stats->new_mv_count, - stats->count, - stats->duration); - fclose(fpfile); - } + fprintf(fpfile, + "%12.0f %12.0f %12.0f %12.4f %12.4f %12.4f %12.4f" + " %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f %12.4f" + " %12.0f %12.0f %12.4f\n", + stats->frame, stats->intra_error, stats->coded_error, + stats->ssim_weighted_pred_err, stats->pcnt_inter, + stats->pcnt_motion, stats->pcnt_second_ref, stats->pcnt_neutral, + stats->MVr, stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv, + stats->MVcv, stats->mv_in_out_count, stats->new_mv_count, + stats->count, stats->duration); + fclose(fpfile); + } #endif } -static void zero_stats(FIRSTPASS_STATS *section) -{ - section->frame = 0.0; - section->intra_error = 0.0; - section->coded_error = 0.0; - section->ssim_weighted_pred_err = 0.0; - section->pcnt_inter = 0.0; - section->pcnt_motion = 0.0; - section->pcnt_second_ref = 0.0; - section->pcnt_neutral = 0.0; - section->MVr = 0.0; - section->mvr_abs = 0.0; - section->MVc = 0.0; - section->mvc_abs = 0.0; - section->MVrv = 0.0; - section->MVcv = 0.0; - section->mv_in_out_count = 0.0; - section->new_mv_count = 0.0; - section->count = 0.0; - section->duration = 1.0; +static void zero_stats(FIRSTPASS_STATS *section) { + section->frame = 0.0; + section->intra_error = 0.0; + section->coded_error = 0.0; + section->ssim_weighted_pred_err = 0.0; + section->pcnt_inter = 0.0; + section->pcnt_motion = 0.0; + section->pcnt_second_ref = 0.0; + section->pcnt_neutral = 0.0; + section->MVr = 0.0; + section->mvr_abs = 0.0; + section->MVc = 0.0; + section->mvc_abs = 0.0; + section->MVrv = 0.0; + section->MVcv = 0.0; + section->mv_in_out_count = 0.0; + section->new_mv_count = 0.0; + section->count = 0.0; + section->duration = 1.0; } -static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) -{ - section->frame += frame->frame; - section->intra_error += frame->intra_error; - section->coded_error += frame->coded_error; - section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err; - section->pcnt_inter += frame->pcnt_inter; - section->pcnt_motion += frame->pcnt_motion; - section->pcnt_second_ref += frame->pcnt_second_ref; - section->pcnt_neutral += frame->pcnt_neutral; - section->MVr += frame->MVr; - section->mvr_abs += frame->mvr_abs; - section->MVc += frame->MVc; - section->mvc_abs += frame->mvc_abs; - section->MVrv += frame->MVrv; - section->MVcv += frame->MVcv; - section->mv_in_out_count += frame->mv_in_out_count; - section->new_mv_count += frame->new_mv_count; - section->count += frame->count; - section->duration += frame->duration; +static void accumulate_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) { + section->frame += frame->frame; + section->intra_error += frame->intra_error; + section->coded_error += frame->coded_error; + section->ssim_weighted_pred_err += frame->ssim_weighted_pred_err; + section->pcnt_inter += frame->pcnt_inter; + section->pcnt_motion += frame->pcnt_motion; + section->pcnt_second_ref += frame->pcnt_second_ref; + section->pcnt_neutral += frame->pcnt_neutral; + section->MVr += frame->MVr; + section->mvr_abs += frame->mvr_abs; + section->MVc += frame->MVc; + section->mvc_abs += frame->mvc_abs; + section->MVrv += frame->MVrv; + section->MVcv += frame->MVcv; + section->mv_in_out_count += frame->mv_in_out_count; + section->new_mv_count += frame->new_mv_count; + section->count += frame->count; + section->duration += frame->duration; } -static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) -{ - section->frame -= frame->frame; - section->intra_error -= frame->intra_error; - section->coded_error -= frame->coded_error; - section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err; - section->pcnt_inter -= frame->pcnt_inter; - section->pcnt_motion -= frame->pcnt_motion; - section->pcnt_second_ref -= frame->pcnt_second_ref; - section->pcnt_neutral -= frame->pcnt_neutral; - section->MVr -= frame->MVr; - section->mvr_abs -= frame->mvr_abs; - section->MVc -= frame->MVc; - section->mvc_abs -= frame->mvc_abs; - section->MVrv -= frame->MVrv; - section->MVcv -= frame->MVcv; - section->mv_in_out_count -= frame->mv_in_out_count; - section->new_mv_count -= frame->new_mv_count; - section->count -= frame->count; - section->duration -= frame->duration; +static void subtract_stats(FIRSTPASS_STATS *section, FIRSTPASS_STATS *frame) { + section->frame -= frame->frame; + section->intra_error -= frame->intra_error; + section->coded_error -= frame->coded_error; + section->ssim_weighted_pred_err -= frame->ssim_weighted_pred_err; + section->pcnt_inter -= frame->pcnt_inter; + section->pcnt_motion -= frame->pcnt_motion; + section->pcnt_second_ref -= frame->pcnt_second_ref; + section->pcnt_neutral -= frame->pcnt_neutral; + section->MVr -= frame->MVr; + section->mvr_abs -= frame->mvr_abs; + section->MVc -= frame->MVc; + section->mvc_abs -= frame->mvc_abs; + section->MVrv -= frame->MVrv; + section->MVcv -= frame->MVcv; + section->mv_in_out_count -= frame->mv_in_out_count; + section->new_mv_count -= frame->new_mv_count; + section->count -= frame->count; + section->duration -= frame->duration; } -static void avg_stats(FIRSTPASS_STATS *section) -{ - if (section->count < 1.0) - return; +static void avg_stats(FIRSTPASS_STATS *section) { + if (section->count < 1.0) return; - section->intra_error /= section->count; - section->coded_error /= section->count; - section->ssim_weighted_pred_err /= section->count; - section->pcnt_inter /= section->count; - section->pcnt_second_ref /= section->count; - section->pcnt_neutral /= section->count; - section->pcnt_motion /= section->count; - section->MVr /= section->count; - section->mvr_abs /= section->count; - section->MVc /= section->count; - section->mvc_abs /= section->count; - section->MVrv /= section->count; - section->MVcv /= section->count; - section->mv_in_out_count /= section->count; - section->duration /= section->count; + section->intra_error /= section->count; + section->coded_error /= section->count; + section->ssim_weighted_pred_err /= section->count; + section->pcnt_inter /= section->count; + section->pcnt_second_ref /= section->count; + section->pcnt_neutral /= section->count; + section->pcnt_motion /= section->count; + section->MVr /= section->count; + section->mvr_abs /= section->count; + section->MVc /= section->count; + section->mvc_abs /= section->count; + section->MVrv /= section->count; + section->MVcv /= section->count; + section->mv_in_out_count /= section->count; + section->duration /= section->count; } /* Calculate a modified Error used in distributing bits between easier * and harder frames */ -static double calculate_modified_err(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) -{ - double av_err = ( cpi->twopass.total_stats.ssim_weighted_pred_err / - cpi->twopass.total_stats.count ); - double this_err = this_frame->ssim_weighted_pred_err; - double modified_err; +static double calculate_modified_err(VP8_COMP *cpi, + FIRSTPASS_STATS *this_frame) { + double av_err = (cpi->twopass.total_stats.ssim_weighted_pred_err / + cpi->twopass.total_stats.count); + double this_err = this_frame->ssim_weighted_pred_err; + double modified_err; - if (this_err > av_err) - modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1); - else - modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2); + if (this_err > av_err) { + modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW1); + } else { + modified_err = av_err * pow((this_err / DOUBLE_DIVIDE_CHECK(av_err)), POW2); + } - return modified_err; + return modified_err; } static const double weight_table[256] = { -0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, -0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, -0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, -0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, -0.020000, 0.031250, 0.062500, 0.093750, 0.125000, 0.156250, 0.187500, 0.218750, -0.250000, 0.281250, 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750, -0.500000, 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750, -0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, 0.968750, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, -1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000 + 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, + 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, + 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, + 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, + 0.020000, 0.020000, 0.020000, 0.020000, 0.020000, 0.031250, 0.062500, + 0.093750, 0.125000, 0.156250, 0.187500, 0.218750, 0.250000, 0.281250, + 0.312500, 0.343750, 0.375000, 0.406250, 0.437500, 0.468750, 0.500000, + 0.531250, 0.562500, 0.593750, 0.625000, 0.656250, 0.687500, 0.718750, + 0.750000, 0.781250, 0.812500, 0.843750, 0.875000, 0.906250, 0.937500, + 0.968750, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, 1.000000, + 1.000000, 1.000000, 1.000000, 1.000000 }; -static double simple_weight(YV12_BUFFER_CONFIG *source) -{ - int i, j; +static double simple_weight(YV12_BUFFER_CONFIG *source) { + int i, j; - unsigned char *src = source->y_buffer; - double sum_weights = 0.0; + unsigned char *src = source->y_buffer; + double sum_weights = 0.0; - /* Loop throught the Y plane raw examining levels and creating a weight - * for the image - */ - i = source->y_height; - do - { - j = source->y_width; - do - { - sum_weights += weight_table[ *src]; - src++; - }while(--j); - src -= source->y_width; - src += source->y_stride; - }while(--i); + /* Loop throught the Y plane raw examining levels and creating a weight + * for the image + */ + i = source->y_height; + do { + j = source->y_width; + do { + sum_weights += weight_table[*src]; + src++; + } while (--j); + src -= source->y_width; + src += source->y_stride; + } while (--i); - sum_weights /= (source->y_height * source->y_width); + sum_weights /= (source->y_height * source->y_width); - return sum_weights; + return sum_weights; } - /* This function returns the current per frame maximum bitrate target */ -static int frame_max_bits(VP8_COMP *cpi) -{ - /* Max allocation for a single frame based on the max section guidelines - * passed in and how many bits are left +static int frame_max_bits(VP8_COMP *cpi) { + /* Max allocation for a single frame based on the max section guidelines + * passed in and how many bits are left + */ + int max_bits; + + /* For CBR we need to also consider buffer fullness. + * If we are running below the optimal level then we need to gradually + * tighten up on max_bits. + */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + double buffer_fullness_ratio = + (double)cpi->buffer_level / + DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.optimal_buffer_level); + + /* For CBR base this on the target average bits per frame plus the + * maximum sedction rate passed in by the user */ - int max_bits; + max_bits = (int)(cpi->av_per_frame_bandwidth * + ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0)); - /* For CBR we need to also consider buffer fullness. - * If we are running below the optimal level then we need to gradually - * tighten up on max_bits. + /* If our buffer is below the optimum level */ + if (buffer_fullness_ratio < 1.0) { + /* The lower of max_bits / 4 or cpi->av_per_frame_bandwidth / 4. */ + int min_max_bits = ((cpi->av_per_frame_bandwidth >> 2) < (max_bits >> 2)) + ? cpi->av_per_frame_bandwidth >> 2 + : max_bits >> 2; + + max_bits = (int)(max_bits * buffer_fullness_ratio); + + /* Lowest value we will set ... which should allow the buffer to + * refill. + */ + if (max_bits < min_max_bits) max_bits = min_max_bits; + } + } + /* VBR */ + else { + /* For VBR base this on the bits and frames left plus the + * two_pass_vbrmax_section rate passed in by the user */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - double buffer_fullness_ratio = (double)cpi->buffer_level / DOUBLE_DIVIDE_CHECK((double)cpi->oxcf.optimal_buffer_level); + max_bits = (int)(((double)cpi->twopass.bits_left / + (cpi->twopass.total_stats.count - + (double)cpi->common.current_video_frame)) * + ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0)); + } - /* For CBR base this on the target average bits per frame plus the - * maximum sedction rate passed in by the user - */ - max_bits = (int)(cpi->av_per_frame_bandwidth * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0)); + /* Trap case where we are out of bits */ + if (max_bits < 0) max_bits = 0; - /* If our buffer is below the optimum level */ - if (buffer_fullness_ratio < 1.0) - { - /* The lower of max_bits / 4 or cpi->av_per_frame_bandwidth / 4. */ - int min_max_bits = ((cpi->av_per_frame_bandwidth >> 2) < (max_bits >> 2)) ? cpi->av_per_frame_bandwidth >> 2 : max_bits >> 2; - - max_bits = (int)(max_bits * buffer_fullness_ratio); - - /* Lowest value we will set ... which should allow the buffer to - * refill. - */ - if (max_bits < min_max_bits) - max_bits = min_max_bits; - } - } - /* VBR */ - else - { - /* For VBR base this on the bits and frames left plus the - * two_pass_vbrmax_section rate passed in by the user - */ - max_bits = (int)(((double)cpi->twopass.bits_left / (cpi->twopass.total_stats.count - (double)cpi->common.current_video_frame)) * ((double)cpi->oxcf.two_pass_vbrmax_section / 100.0)); - } - - /* Trap case where we are out of bits */ - if (max_bits < 0) - max_bits = 0; - - return max_bits; + return max_bits; } -void vp8_init_first_pass(VP8_COMP *cpi) -{ - zero_stats(&cpi->twopass.total_stats); +void vp8_init_first_pass(VP8_COMP *cpi) { + zero_stats(&cpi->twopass.total_stats); } -void vp8_end_first_pass(VP8_COMP *cpi) -{ - output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats); +void vp8_end_first_pass(VP8_COMP *cpi) { + output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.total_stats); } -static void zz_motion_search( VP8_COMP *cpi, MACROBLOCK * x, - YV12_BUFFER_CONFIG * raw_buffer, - int * raw_motion_err, - YV12_BUFFER_CONFIG * recon_buffer, - int * best_motion_err, int recon_yoffset) -{ - MACROBLOCKD * const xd = & x->e_mbd; - BLOCK *b = &x->block[0]; - BLOCKD *d = &x->e_mbd.block[0]; +static void zz_motion_search(VP8_COMP *cpi, MACROBLOCK *x, + YV12_BUFFER_CONFIG *raw_buffer, + int *raw_motion_err, + YV12_BUFFER_CONFIG *recon_buffer, + int *best_motion_err, int recon_yoffset) { + MACROBLOCKD *const xd = &x->e_mbd; + BLOCK *b = &x->block[0]; + BLOCKD *d = &x->e_mbd.block[0]; - unsigned char *src_ptr = (*(b->base_src) + b->src); - int src_stride = b->src_stride; - unsigned char *raw_ptr; - int raw_stride = raw_buffer->y_stride; - unsigned char *ref_ptr; - int ref_stride = x->e_mbd.pre.y_stride; - (void)cpi; + unsigned char *src_ptr = (*(b->base_src) + b->src); + int src_stride = b->src_stride; + unsigned char *raw_ptr; + int raw_stride = raw_buffer->y_stride; + unsigned char *ref_ptr; + int ref_stride = x->e_mbd.pre.y_stride; + (void)cpi; - /* Set up pointers for this macro block raw buffer */ - raw_ptr = (unsigned char *)(raw_buffer->y_buffer + recon_yoffset - + d->offset); - vpx_mse16x16(src_ptr, src_stride, raw_ptr, raw_stride, - (unsigned int *)(raw_motion_err)); + /* Set up pointers for this macro block raw buffer */ + raw_ptr = (unsigned char *)(raw_buffer->y_buffer + recon_yoffset + d->offset); + vpx_mse16x16(src_ptr, src_stride, raw_ptr, raw_stride, + (unsigned int *)(raw_motion_err)); - /* Set up pointers for this macro block recon buffer */ - xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; - ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset ); - vpx_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride, - (unsigned int *)(best_motion_err)); + /* Set up pointers for this macro block recon buffer */ + xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; + ref_ptr = (unsigned char *)(xd->pre.y_buffer + d->offset); + vpx_mse16x16(src_ptr, src_stride, ref_ptr, ref_stride, + (unsigned int *)(best_motion_err)); } static void first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, int_mv *ref_mv, MV *best_mv, YV12_BUFFER_CONFIG *recon_buffer, - int *best_motion_err, int recon_yoffset ) -{ - MACROBLOCKD *const xd = & x->e_mbd; - BLOCK *b = &x->block[0]; - BLOCKD *d = &x->e_mbd.block[0]; - int num00; + int *best_motion_err, int recon_yoffset) { + MACROBLOCKD *const xd = &x->e_mbd; + BLOCK *b = &x->block[0]; + BLOCKD *d = &x->e_mbd.block[0]; + int num00; - int_mv tmp_mv; - int_mv ref_mv_full; + int_mv tmp_mv; + int_mv ref_mv_full; - int tmp_err; - int step_param = 3; /* Dont search over full range for first pass */ - int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; - int n; - vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; - int new_mv_mode_penalty = 256; + int tmp_err; + int step_param = 3; /* Dont search over full range for first pass */ + int further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; + int n; + vp8_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; + int new_mv_mode_penalty = 256; - /* override the default variance function to use MSE */ - v_fn_ptr.vf = vpx_mse16x16; + /* override the default variance function to use MSE */ + v_fn_ptr.vf = vpx_mse16x16; - /* Set up pointers for this macro block recon buffer */ - xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; + /* Set up pointers for this macro block recon buffer */ + xd->pre.y_buffer = recon_buffer->y_buffer + recon_yoffset; - /* Initial step/diamond search centred on best mv */ - tmp_mv.as_int = 0; - ref_mv_full.as_mv.col = ref_mv->as_mv.col>>3; - ref_mv_full.as_mv.row = ref_mv->as_mv.row>>3; - tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param, - x->sadperbit16, &num00, &v_fn_ptr, - x->mvcost, ref_mv); - if ( tmp_err < INT_MAX-new_mv_mode_penalty ) + /* Initial step/diamond search centred on best mv */ + tmp_mv.as_int = 0; + ref_mv_full.as_mv.col = ref_mv->as_mv.col >> 3; + ref_mv_full.as_mv.row = ref_mv->as_mv.row >> 3; + tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, step_param, + x->sadperbit16, &num00, &v_fn_ptr, + x->mvcost, ref_mv); + if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty; + + if (tmp_err < *best_motion_err) { + *best_motion_err = tmp_err; + best_mv->row = tmp_mv.as_mv.row; + best_mv->col = tmp_mv.as_mv.col; + } + + /* Further step/diamond searches as necessary */ + n = num00; + num00 = 0; + + while (n < further_steps) { + n++; + + if (num00) { + num00--; + } else { + tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, + step_param + n, x->sadperbit16, &num00, + &v_fn_ptr, x->mvcost, ref_mv); + if (tmp_err < INT_MAX - new_mv_mode_penalty) { tmp_err += new_mv_mode_penalty; + } - if (tmp_err < *best_motion_err) - { + if (tmp_err < *best_motion_err) { *best_motion_err = tmp_err; best_mv->row = tmp_mv.as_mv.row; best_mv->col = tmp_mv.as_mv.col; + } } - - /* Further step/diamond searches as necessary */ - n = num00; - num00 = 0; - - while (n < further_steps) - { - n++; - - if (num00) - num00--; - else - { - tmp_err = cpi->diamond_search_sad(x, b, d, &ref_mv_full, &tmp_mv, - step_param + n, x->sadperbit16, - &num00, &v_fn_ptr, x->mvcost, - ref_mv); - if ( tmp_err < INT_MAX-new_mv_mode_penalty ) - tmp_err += new_mv_mode_penalty; - - if (tmp_err < *best_motion_err) - { - *best_motion_err = tmp_err; - best_mv->row = tmp_mv.as_mv.row; - best_mv->col = tmp_mv.as_mv.col; - } - } - } + } } -void vp8_first_pass(VP8_COMP *cpi) -{ - int mb_row, mb_col; - MACROBLOCK *const x = & cpi->mb; - VP8_COMMON *const cm = & cpi->common; - MACROBLOCKD *const xd = & x->e_mbd; +void vp8_first_pass(VP8_COMP *cpi) { + int mb_row, mb_col; + MACROBLOCK *const x = &cpi->mb; + VP8_COMMON *const cm = &cpi->common; + MACROBLOCKD *const xd = &x->e_mbd; - int recon_yoffset, recon_uvoffset; - YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; - YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; - YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; - int recon_y_stride = lst_yv12->y_stride; - int recon_uv_stride = lst_yv12->uv_stride; - int64_t intra_error = 0; - int64_t coded_error = 0; + int recon_yoffset, recon_uvoffset; + YV12_BUFFER_CONFIG *lst_yv12 = &cm->yv12_fb[cm->lst_fb_idx]; + YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; + YV12_BUFFER_CONFIG *gld_yv12 = &cm->yv12_fb[cm->gld_fb_idx]; + int recon_y_stride = lst_yv12->y_stride; + int recon_uv_stride = lst_yv12->uv_stride; + int64_t intra_error = 0; + int64_t coded_error = 0; - int sum_mvr = 0, sum_mvc = 0; - int sum_mvr_abs = 0, sum_mvc_abs = 0; - int sum_mvrs = 0, sum_mvcs = 0; - int mvcount = 0; - int intercount = 0; - int second_ref_count = 0; - int intrapenalty = 256; - int neutral_count = 0; - int new_mv_count = 0; - int sum_in_vectors = 0; - uint32_t lastmv_as_int = 0; + int sum_mvr = 0, sum_mvc = 0; + int sum_mvr_abs = 0, sum_mvc_abs = 0; + int sum_mvrs = 0, sum_mvcs = 0; + int mvcount = 0; + int intercount = 0; + int second_ref_count = 0; + int intrapenalty = 256; + int neutral_count = 0; + int new_mv_count = 0; + int sum_in_vectors = 0; + uint32_t lastmv_as_int = 0; - int_mv zero_ref_mv; + int_mv zero_ref_mv; - zero_ref_mv.as_int = 0; + zero_ref_mv.as_int = 0; - vp8_clear_system_state(); + vp8_clear_system_state(); - x->src = * cpi->Source; - xd->pre = *lst_yv12; - xd->dst = *new_yv12; + x->src = *cpi->Source; + xd->pre = *lst_yv12; + xd->dst = *new_yv12; - x->partition_info = x->pi; + x->partition_info = x->pi; - xd->mode_info_context = cm->mi; + xd->mode_info_context = cm->mi; - if(!cm->use_bilinear_mc_filter) - { - xd->subpixel_predict = vp8_sixtap_predict4x4; - xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; - xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; - xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; - } - else - { - xd->subpixel_predict = vp8_bilinear_predict4x4; - xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; - xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; - xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; - } + if (!cm->use_bilinear_mc_filter) { + xd->subpixel_predict = vp8_sixtap_predict4x4; + xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; + xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; + xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; + } else { + xd->subpixel_predict = vp8_bilinear_predict4x4; + xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; + xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; + xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; + } - vp8_build_block_offsets(x); + vp8_build_block_offsets(x); - /* set up frame new frame for intra coded blocks */ - vp8_setup_intra_recon(new_yv12); - vp8cx_frame_init_quantizer(cpi); + /* set up frame new frame for intra coded blocks */ + vp8_setup_intra_recon(new_yv12); + vp8cx_frame_init_quantizer(cpi); - /* Initialise the MV cost table to the defaults */ - { - int flag[2] = {1, 1}; - vp8_initialize_rd_consts(cpi, x, vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); - memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); - vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag); - } + /* Initialise the MV cost table to the defaults */ + { + int flag[2] = { 1, 1 }; + vp8_initialize_rd_consts(cpi, x, + vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); + memcpy(cm->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); + vp8_build_component_cost_table(cpi->mb.mvcost, + (const MV_CONTEXT *)cm->fc.mvc, flag); + } - /* for each macroblock row in image */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - int_mv best_ref_mv; + /* for each macroblock row in image */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + int_mv best_ref_mv; + best_ref_mv.as_int = 0; + + /* reset above block coeffs */ + xd->up_available = (mb_row != 0); + recon_yoffset = (mb_row * recon_y_stride * 16); + recon_uvoffset = (mb_row * recon_uv_stride * 8); + + /* Set up limit values for motion vectors to prevent them extending + * outside the UMV borders + */ + x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_row_max = + ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); + + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + int this_error; + int gf_motion_error = INT_MAX; + int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); + + xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; + xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset; + xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset; + xd->left_available = (mb_col != 0); + + /* Copy current mb to a buffer */ + vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); + + /* do intra 16x16 prediction */ + this_error = vp8_encode_intra(cpi, x, use_dc_pred); + + /* "intrapenalty" below deals with situations where the intra + * and inter error scores are very low (eg a plain black frame) + * We do not have special cases in first pass for 0,0 and + * nearest etc so all inter modes carry an overhead cost + * estimate fot the mv. When the error score is very low this + * causes us to pick all or lots of INTRA modes and throw lots + * of key frames. This penalty adds a cost matching that of a + * 0,0 mv to the intra case. + */ + this_error += intrapenalty; + + /* Cumulative intra error total */ + intra_error += (int64_t)this_error; + + /* Set up limit values for motion vectors to prevent them + * extending outside the UMV borders + */ + x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); + x->mv_col_max = + ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); + + /* Other than for the first frame do a motion search */ + if (cm->current_video_frame > 0) { + BLOCKD *d = &x->e_mbd.block[0]; + MV tmp_mv = { 0, 0 }; + int tmp_err; + int motion_error = INT_MAX; + int raw_motion_error = INT_MAX; + + /* Simple 0,0 motion with no mv overhead */ + zz_motion_search(cpi, x, cpi->last_frame_unscaled_source, + &raw_motion_error, lst_yv12, &motion_error, + recon_yoffset); + d->bmi.mv.as_mv.row = 0; + d->bmi.mv.as_mv.col = 0; + + if (raw_motion_error < cpi->oxcf.encode_breakout) { + goto skip_motion_search; + } + + /* Test last reference frame using the previous best mv as the + * starting point (best reference) for the search + */ + first_pass_motion_search(cpi, x, &best_ref_mv, &d->bmi.mv.as_mv, + lst_yv12, &motion_error, recon_yoffset); + + /* If the current best reference mv is not centred on 0,0 + * then do a 0,0 based search as well + */ + if (best_ref_mv.as_int) { + tmp_err = INT_MAX; + first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, lst_yv12, + &tmp_err, recon_yoffset); + + if (tmp_err < motion_error) { + motion_error = tmp_err; + d->bmi.mv.as_mv.row = tmp_mv.row; + d->bmi.mv.as_mv.col = tmp_mv.col; + } + } + + /* Experimental search in a second reference frame ((0,0) + * based only) + */ + if (cm->current_video_frame > 1) { + first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, + &gf_motion_error, recon_yoffset); + + if ((gf_motion_error < motion_error) && + (gf_motion_error < this_error)) { + second_ref_count++; + } + + /* Reset to last frame as reference buffer */ + xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset; + xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset; + xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset; + } + + skip_motion_search: + /* Intra assumed best */ best_ref_mv.as_int = 0; - /* reset above block coeffs */ - xd->up_available = (mb_row != 0); - recon_yoffset = (mb_row * recon_y_stride * 16); - recon_uvoffset = (mb_row * recon_uv_stride * 8); + if (motion_error <= this_error) { + /* Keep a count of cases where the inter and intra were + * very close and very low. This helps with scene cut + * detection for example in cropped clips with black bars + * at the sides or top and bottom. + */ + if ((((this_error - intrapenalty) * 9) <= (motion_error * 10)) && + (this_error < (2 * intrapenalty))) { + neutral_count++; + } - /* Set up limit values for motion vectors to prevent them extending - * outside the UMV borders - */ - x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); + d->bmi.mv.as_mv.row *= 8; + d->bmi.mv.as_mv.col *= 8; + this_error = motion_error; + vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv); + vp8_encode_inter16x16y(x); + sum_mvr += d->bmi.mv.as_mv.row; + sum_mvr_abs += abs(d->bmi.mv.as_mv.row); + sum_mvc += d->bmi.mv.as_mv.col; + sum_mvc_abs += abs(d->bmi.mv.as_mv.col); + sum_mvrs += d->bmi.mv.as_mv.row * d->bmi.mv.as_mv.row; + sum_mvcs += d->bmi.mv.as_mv.col * d->bmi.mv.as_mv.col; + intercount++; + best_ref_mv.as_int = d->bmi.mv.as_int; - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - int this_error; - int gf_motion_error = INT_MAX; - int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); + /* Was the vector non-zero */ + if (d->bmi.mv.as_int) { + mvcount++; - xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; - xd->dst.u_buffer = new_yv12->u_buffer + recon_uvoffset; - xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset; - xd->left_available = (mb_col != 0); + /* Was it different from the last non zero vector */ + if (d->bmi.mv.as_int != lastmv_as_int) new_mv_count++; + lastmv_as_int = d->bmi.mv.as_int; - /* Copy current mb to a buffer */ - vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); - - /* do intra 16x16 prediction */ - this_error = vp8_encode_intra(cpi, x, use_dc_pred); - - /* "intrapenalty" below deals with situations where the intra - * and inter error scores are very low (eg a plain black frame) - * We do not have special cases in first pass for 0,0 and - * nearest etc so all inter modes carry an overhead cost - * estimate fot the mv. When the error score is very low this - * causes us to pick all or lots of INTRA modes and throw lots - * of key frames. This penalty adds a cost matching that of a - * 0,0 mv to the intra case. - */ - this_error += intrapenalty; - - /* Cumulative intra error total */ - intra_error += (int64_t)this_error; - - /* Set up limit values for motion vectors to prevent them - * extending outside the UMV borders - */ - x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); - - /* Other than for the first frame do a motion search */ - if (cm->current_video_frame > 0) - { - BLOCKD *d = &x->e_mbd.block[0]; - MV tmp_mv = {0, 0}; - int tmp_err; - int motion_error = INT_MAX; - int raw_motion_error = INT_MAX; - - /* Simple 0,0 motion with no mv overhead */ - zz_motion_search( cpi, x, cpi->last_frame_unscaled_source, - &raw_motion_error, lst_yv12, &motion_error, - recon_yoffset ); - d->bmi.mv.as_mv.row = 0; - d->bmi.mv.as_mv.col = 0; - - if (raw_motion_error < cpi->oxcf.encode_breakout) - goto skip_motion_search; - - /* Test last reference frame using the previous best mv as the - * starting point (best reference) for the search - */ - first_pass_motion_search(cpi, x, &best_ref_mv, - &d->bmi.mv.as_mv, lst_yv12, - &motion_error, recon_yoffset); - - /* If the current best reference mv is not centred on 0,0 - * then do a 0,0 based search as well - */ - if (best_ref_mv.as_int) - { - tmp_err = INT_MAX; - first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, - lst_yv12, &tmp_err, recon_yoffset); - - if ( tmp_err < motion_error ) - { - motion_error = tmp_err; - d->bmi.mv.as_mv.row = tmp_mv.row; - d->bmi.mv.as_mv.col = tmp_mv.col; - } - } - - /* Experimental search in a second reference frame ((0,0) - * based only) - */ - if (cm->current_video_frame > 1) - { - first_pass_motion_search(cpi, x, &zero_ref_mv, &tmp_mv, gld_yv12, &gf_motion_error, recon_yoffset); - - if ((gf_motion_error < motion_error) && (gf_motion_error < this_error)) - { - second_ref_count++; - } - - /* Reset to last frame as reference buffer */ - xd->pre.y_buffer = lst_yv12->y_buffer + recon_yoffset; - xd->pre.u_buffer = lst_yv12->u_buffer + recon_uvoffset; - xd->pre.v_buffer = lst_yv12->v_buffer + recon_uvoffset; - } - -skip_motion_search: - /* Intra assumed best */ - best_ref_mv.as_int = 0; - - if (motion_error <= this_error) - { - /* Keep a count of cases where the inter and intra were - * very close and very low. This helps with scene cut - * detection for example in cropped clips with black bars - * at the sides or top and bottom. - */ - if( (((this_error-intrapenalty) * 9) <= - (motion_error*10)) && - (this_error < (2*intrapenalty)) ) - { - neutral_count++; - } - - d->bmi.mv.as_mv.row *= 8; - d->bmi.mv.as_mv.col *= 8; - this_error = motion_error; - vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv); - vp8_encode_inter16x16y(x); - sum_mvr += d->bmi.mv.as_mv.row; - sum_mvr_abs += abs(d->bmi.mv.as_mv.row); - sum_mvc += d->bmi.mv.as_mv.col; - sum_mvc_abs += abs(d->bmi.mv.as_mv.col); - sum_mvrs += d->bmi.mv.as_mv.row * d->bmi.mv.as_mv.row; - sum_mvcs += d->bmi.mv.as_mv.col * d->bmi.mv.as_mv.col; - intercount++; - - best_ref_mv.as_int = d->bmi.mv.as_int; - - /* Was the vector non-zero */ - if (d->bmi.mv.as_int) - { - mvcount++; - - /* Was it different from the last non zero vector */ - if ( d->bmi.mv.as_int != lastmv_as_int ) - new_mv_count++; - lastmv_as_int = d->bmi.mv.as_int; - - /* Does the Row vector point inwards or outwards */ - if (mb_row < cm->mb_rows / 2) - { - if (d->bmi.mv.as_mv.row > 0) - sum_in_vectors--; - else if (d->bmi.mv.as_mv.row < 0) - sum_in_vectors++; - } - else if (mb_row > cm->mb_rows / 2) - { - if (d->bmi.mv.as_mv.row > 0) - sum_in_vectors++; - else if (d->bmi.mv.as_mv.row < 0) - sum_in_vectors--; - } - - /* Does the Row vector point inwards or outwards */ - if (mb_col < cm->mb_cols / 2) - { - if (d->bmi.mv.as_mv.col > 0) - sum_in_vectors--; - else if (d->bmi.mv.as_mv.col < 0) - sum_in_vectors++; - } - else if (mb_col > cm->mb_cols / 2) - { - if (d->bmi.mv.as_mv.col > 0) - sum_in_vectors++; - else if (d->bmi.mv.as_mv.col < 0) - sum_in_vectors--; - } - } - } + /* Does the Row vector point inwards or outwards */ + if (mb_row < cm->mb_rows / 2) { + if (d->bmi.mv.as_mv.row > 0) { + sum_in_vectors--; + } else if (d->bmi.mv.as_mv.row < 0) { + sum_in_vectors++; + } + } else if (mb_row > cm->mb_rows / 2) { + if (d->bmi.mv.as_mv.row > 0) { + sum_in_vectors++; + } else if (d->bmi.mv.as_mv.row < 0) { + sum_in_vectors--; + } } - coded_error += (int64_t)this_error; - - /* adjust to the next column of macroblocks */ - x->src.y_buffer += 16; - x->src.u_buffer += 8; - x->src.v_buffer += 8; - - recon_yoffset += 16; - recon_uvoffset += 8; + /* Does the Row vector point inwards or outwards */ + if (mb_col < cm->mb_cols / 2) { + if (d->bmi.mv.as_mv.col > 0) { + sum_in_vectors--; + } else if (d->bmi.mv.as_mv.col < 0) { + sum_in_vectors++; + } + } else if (mb_col > cm->mb_cols / 2) { + if (d->bmi.mv.as_mv.col > 0) { + sum_in_vectors++; + } else if (d->bmi.mv.as_mv.col < 0) { + sum_in_vectors--; + } + } + } } + } - /* adjust to the next row of mbs */ - x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; - x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; - x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; + coded_error += (int64_t)this_error; - /* extend the recon for intra prediction */ - vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); - vp8_clear_system_state(); + /* adjust to the next column of macroblocks */ + x->src.y_buffer += 16; + x->src.u_buffer += 8; + x->src.v_buffer += 8; + + recon_yoffset += 16; + recon_uvoffset += 8; } + /* adjust to the next row of mbs */ + x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; + x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; + x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; + + /* extend the recon for intra prediction */ + vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, + xd->dst.v_buffer + 8); vp8_clear_system_state(); - { - double weight = 0.0; + } - FIRSTPASS_STATS fps; + vp8_clear_system_state(); + { + double weight = 0.0; - fps.frame = cm->current_video_frame ; - fps.intra_error = (double)(intra_error >> 8); - fps.coded_error = (double)(coded_error >> 8); - weight = simple_weight(cpi->Source); + FIRSTPASS_STATS fps; + fps.frame = cm->current_video_frame; + fps.intra_error = (double)(intra_error >> 8); + fps.coded_error = (double)(coded_error >> 8); + weight = simple_weight(cpi->Source); - if (weight < 0.1) - weight = 0.1; + if (weight < 0.1) weight = 0.1; - fps.ssim_weighted_pred_err = fps.coded_error * weight; + fps.ssim_weighted_pred_err = fps.coded_error * weight; - fps.pcnt_inter = 0.0; - fps.pcnt_motion = 0.0; - fps.MVr = 0.0; - fps.mvr_abs = 0.0; - fps.MVc = 0.0; - fps.mvc_abs = 0.0; - fps.MVrv = 0.0; - fps.MVcv = 0.0; - fps.mv_in_out_count = 0.0; - fps.new_mv_count = 0.0; - fps.count = 1.0; + fps.pcnt_inter = 0.0; + fps.pcnt_motion = 0.0; + fps.MVr = 0.0; + fps.mvr_abs = 0.0; + fps.MVc = 0.0; + fps.mvc_abs = 0.0; + fps.MVrv = 0.0; + fps.MVcv = 0.0; + fps.mv_in_out_count = 0.0; + fps.new_mv_count = 0.0; + fps.count = 1.0; - fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs; - fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs; - fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs; + fps.pcnt_inter = 1.0 * (double)intercount / cm->MBs; + fps.pcnt_second_ref = 1.0 * (double)second_ref_count / cm->MBs; + fps.pcnt_neutral = 1.0 * (double)neutral_count / cm->MBs; - if (mvcount > 0) - { - fps.MVr = (double)sum_mvr / (double)mvcount; - fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount; - fps.MVc = (double)sum_mvc / (double)mvcount; - fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount; - fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / (double)mvcount; - fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / (double)mvcount; - fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2); - fps.new_mv_count = new_mv_count; + if (mvcount > 0) { + fps.MVr = (double)sum_mvr / (double)mvcount; + fps.mvr_abs = (double)sum_mvr_abs / (double)mvcount; + fps.MVc = (double)sum_mvc / (double)mvcount; + fps.mvc_abs = (double)sum_mvc_abs / (double)mvcount; + fps.MVrv = ((double)sum_mvrs - (fps.MVr * fps.MVr / (double)mvcount)) / + (double)mvcount; + fps.MVcv = ((double)sum_mvcs - (fps.MVc * fps.MVc / (double)mvcount)) / + (double)mvcount; + fps.mv_in_out_count = (double)sum_in_vectors / (double)(mvcount * 2); + fps.new_mv_count = new_mv_count; - fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs; - } - - /* TODO: handle the case when duration is set to 0, or something less - * than the full time between subsequent cpi->source_time_stamps - */ - fps.duration = (double)(cpi->source->ts_end - - cpi->source->ts_start); - - /* don't want to do output stats with a stack variable! */ - memcpy(&cpi->twopass.this_frame_stats, - &fps, - sizeof(FIRSTPASS_STATS)); - output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.this_frame_stats); - accumulate_stats(&cpi->twopass.total_stats, &fps); + fps.pcnt_motion = 1.0 * (double)mvcount / cpi->common.MBs; } - /* Copy the previous Last Frame into the GF buffer if specific - * conditions for doing so are met + /* TODO: handle the case when duration is set to 0, or something less + * than the full time between subsequent cpi->source_time_stamps */ - if ((cm->current_video_frame > 0) && - (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) && - ((cpi->twopass.this_frame_stats.intra_error / - DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) > - 2.0)) - { - vp8_yv12_copy_frame(lst_yv12, gld_yv12); + fps.duration = (double)(cpi->source->ts_end - cpi->source->ts_start); + + /* don't want to do output stats with a stack variable! */ + memcpy(&cpi->twopass.this_frame_stats, &fps, sizeof(FIRSTPASS_STATS)); + output_stats(cpi, cpi->output_pkt_list, &cpi->twopass.this_frame_stats); + accumulate_stats(&cpi->twopass.total_stats, &fps); + } + + /* Copy the previous Last Frame into the GF buffer if specific + * conditions for doing so are met + */ + if ((cm->current_video_frame > 0) && + (cpi->twopass.this_frame_stats.pcnt_inter > 0.20) && + ((cpi->twopass.this_frame_stats.intra_error / + DOUBLE_DIVIDE_CHECK(cpi->twopass.this_frame_stats.coded_error)) > + 2.0)) { + vp8_yv12_copy_frame(lst_yv12, gld_yv12); + } + + /* swap frame pointers so last frame refers to the frame we just + * compressed + */ + vp8_swap_yv12_buffer(lst_yv12, new_yv12); + vp8_yv12_extend_frame_borders(lst_yv12); + + /* Special case for the first frame. Copy into the GF buffer as a + * second reference. + */ + if (cm->current_video_frame == 0) { + vp8_yv12_copy_frame(lst_yv12, gld_yv12); + } + + /* use this to see what the first pass reconstruction looks like */ + if (0) { + char filename[512]; + FILE *recon_file; + sprintf(filename, "enc%04d.yuv", (int)cm->current_video_frame); + + if (cm->current_video_frame == 0) { + recon_file = fopen(filename, "wb"); + } else { + recon_file = fopen(filename, "ab"); } - /* swap frame pointers so last frame refers to the frame we just - * compressed - */ - vp8_swap_yv12_buffer(lst_yv12, new_yv12); - vp8_yv12_extend_frame_borders(lst_yv12); - - /* Special case for the first frame. Copy into the GF buffer as a - * second reference. - */ - if (cm->current_video_frame == 0) - { - vp8_yv12_copy_frame(lst_yv12, gld_yv12); - } - - - /* use this to see what the first pass reconstruction looks like */ - if (0) - { - char filename[512]; - FILE *recon_file; - sprintf(filename, "enc%04d.yuv", (int) cm->current_video_frame); - - if (cm->current_video_frame == 0) - recon_file = fopen(filename, "wb"); - else - recon_file = fopen(filename, "ab"); - - (void) fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, - recon_file); - fclose(recon_file); - } - - cm->current_video_frame++; + (void)fwrite(lst_yv12->buffer_alloc, lst_yv12->frame_size, 1, recon_file); + fclose(recon_file); + } + cm->current_video_frame++; } extern const int vp8_bits_per_mb[2][QINDEX_RANGE]; @@ -908,2166 +850,2012 @@ extern const int vp8_bits_per_mb[2][QINDEX_RANGE]; * Currently simplistic in its assumptions for testing. */ -static double bitcost( double prob ) -{ - if (prob > 0.000122) +static double bitcost(double prob) { + if (prob > 0.000122) { return -log(prob) / log(2.0); - else + } else { return 13.0; + } } -static int64_t estimate_modemvcost(VP8_COMP *cpi, - FIRSTPASS_STATS * fpstats) -{ - int mv_cost; - int64_t mode_cost; +static int64_t estimate_modemvcost(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats) { + int mv_cost; + int64_t mode_cost; - double av_pct_inter = fpstats->pcnt_inter / fpstats->count; - double av_pct_motion = fpstats->pcnt_motion / fpstats->count; - double av_intra = (1.0 - av_pct_inter); + double av_pct_inter = fpstats->pcnt_inter / fpstats->count; + double av_pct_motion = fpstats->pcnt_motion / fpstats->count; + double av_intra = (1.0 - av_pct_inter); - double zz_cost; - double motion_cost; - double intra_cost; + double zz_cost; + double motion_cost; + double intra_cost; - zz_cost = bitcost(av_pct_inter - av_pct_motion); - motion_cost = bitcost(av_pct_motion); - intra_cost = bitcost(av_intra); + zz_cost = bitcost(av_pct_inter - av_pct_motion); + motion_cost = bitcost(av_pct_motion); + intra_cost = bitcost(av_intra); - /* Estimate of extra bits per mv overhead for mbs - * << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb - */ - mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9; + /* Estimate of extra bits per mv overhead for mbs + * << 9 is the normalization to the (bits * 512) used in vp8_bits_per_mb + */ + mv_cost = ((int)(fpstats->new_mv_count / fpstats->count) * 8) << 9; - /* Crude estimate of overhead cost from modes - * << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb - */ - mode_cost = (int64_t)((((av_pct_inter - av_pct_motion) * zz_cost) + - (av_pct_motion * motion_cost) + - (av_intra * intra_cost)) * cpi->common.MBs) * 512; + /* Crude estimate of overhead cost from modes + * << 9 is the normalization to (bits * 512) used in vp8_bits_per_mb + */ + mode_cost = + (int64_t)((((av_pct_inter - av_pct_motion) * zz_cost) + + (av_pct_motion * motion_cost) + (av_intra * intra_cost)) * + cpi->common.MBs) * + 512; - return mv_cost + mode_cost; + return mv_cost + mode_cost; } -static double calc_correction_factor( double err_per_mb, - double err_devisor, - double pt_low, - double pt_high, - int Q ) -{ - double power_term; - double error_term = err_per_mb / err_devisor; - double correction_factor; +static double calc_correction_factor(double err_per_mb, double err_devisor, + double pt_low, double pt_high, int Q) { + double power_term; + double error_term = err_per_mb / err_devisor; + double correction_factor; - /* Adjustment based on Q to power term. */ - power_term = pt_low + (Q * 0.01); - power_term = (power_term > pt_high) ? pt_high : power_term; + /* Adjustment based on Q to power term. */ + power_term = pt_low + (Q * 0.01); + power_term = (power_term > pt_high) ? pt_high : power_term; - /* Adjustments to error term */ - /* TBD */ + /* Adjustments to error term */ + /* TBD */ - /* Calculate correction factor */ - correction_factor = pow(error_term, power_term); + /* Calculate correction factor */ + correction_factor = pow(error_term, power_term); - /* Clip range */ - correction_factor = - (correction_factor < 0.05) - ? 0.05 : (correction_factor > 5.0) ? 5.0 : correction_factor; + /* Clip range */ + correction_factor = (correction_factor < 0.05) + ? 0.05 + : (correction_factor > 5.0) ? 5.0 : correction_factor; - return correction_factor; + return correction_factor; } -static int estimate_max_q(VP8_COMP *cpi, - FIRSTPASS_STATS * fpstats, - int section_target_bandwitdh, - int overhead_bits ) -{ - int Q; - int num_mbs = cpi->common.MBs; - int target_norm_bits_per_mb; +static int estimate_max_q(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats, + int section_target_bandwitdh, int overhead_bits) { + int Q; + int num_mbs = cpi->common.MBs; + int target_norm_bits_per_mb; - double section_err = (fpstats->coded_error / fpstats->count); - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double speed_correction = 1.0; - int overhead_bits_per_mb; + double section_err = (fpstats->coded_error / fpstats->count); + double err_per_mb = section_err / num_mbs; + double err_correction_factor; + double speed_correction = 1.0; + int overhead_bits_per_mb; - if (section_target_bandwitdh <= 0) - return cpi->twopass.maxq_max_limit; /* Highest value allowed */ + if (section_target_bandwitdh <= 0) { + return cpi->twopass.maxq_max_limit; /* Highest value allowed */ + } - target_norm_bits_per_mb = - (section_target_bandwitdh < (1 << 20)) - ? (512 * section_target_bandwitdh) / num_mbs - : 512 * (section_target_bandwitdh / num_mbs); + target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) + ? (512 * section_target_bandwitdh) / num_mbs + : 512 * (section_target_bandwitdh / num_mbs); - /* Calculate a corrective factor based on a rolling ratio of bits spent - * vs target bits + /* Calculate a corrective factor based on a rolling ratio of bits spent + * vs target bits + */ + if ((cpi->rolling_target_bits > 0) && + (cpi->active_worst_quality < cpi->worst_quality)) { + double rolling_ratio; + + rolling_ratio = + (double)cpi->rolling_actual_bits / (double)cpi->rolling_target_bits; + + if (rolling_ratio < 0.95) { + cpi->twopass.est_max_qcorrection_factor -= 0.005; + } else if (rolling_ratio > 1.05) { + cpi->twopass.est_max_qcorrection_factor += 0.005; + } + + cpi->twopass.est_max_qcorrection_factor = + (cpi->twopass.est_max_qcorrection_factor < 0.1) + ? 0.1 + : (cpi->twopass.est_max_qcorrection_factor > 10.0) + ? 10.0 + : cpi->twopass.est_max_qcorrection_factor; + } + + /* Corrections for higher compression speed settings + * (reduced compression expected) + */ + if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) { + if (cpi->oxcf.cpu_used <= 5) { + speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); + } else { + speed_correction = 1.25; + } + } + + /* Estimate of overhead bits per mb */ + /* Correction to overhead bits for min allowed Q. */ + overhead_bits_per_mb = overhead_bits / num_mbs; + overhead_bits_per_mb = (int)(overhead_bits_per_mb * + pow(0.98, (double)cpi->twopass.maxq_min_limit)); + + /* Try and pick a max Q that will be high enough to encode the + * content at the given rate. + */ + for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; ++Q) { + int bits_per_mb_at_this_q; + + /* Error per MB based correction factor */ + err_correction_factor = + calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q); + + bits_per_mb_at_this_q = + vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb; + + bits_per_mb_at_this_q = (int)(.5 + + err_correction_factor * speed_correction * + cpi->twopass.est_max_qcorrection_factor * + cpi->twopass.section_max_qfactor * + (double)bits_per_mb_at_this_q); + + /* Mode and motion overhead */ + /* As Q rises in real encode loop rd code will force overhead down + * We make a crude adjustment for this here as *.98 per Q step. */ - if ((cpi->rolling_target_bits > 0) && - (cpi->active_worst_quality < cpi->worst_quality)) - { - double rolling_ratio; + overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98); - rolling_ratio = (double)cpi->rolling_actual_bits / - (double)cpi->rolling_target_bits; + if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break; + } - if (rolling_ratio < 0.95) - cpi->twopass.est_max_qcorrection_factor -= 0.005; - else if (rolling_ratio > 1.05) - cpi->twopass.est_max_qcorrection_factor += 0.005; + /* Restriction on active max q for constrained quality mode. */ + if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && + (Q < cpi->cq_target_quality)) { + Q = cpi->cq_target_quality; + } - cpi->twopass.est_max_qcorrection_factor = - (cpi->twopass.est_max_qcorrection_factor < 0.1) - ? 0.1 - : (cpi->twopass.est_max_qcorrection_factor > 10.0) - ? 10.0 : cpi->twopass.est_max_qcorrection_factor; - } + /* Adjust maxq_min_limit and maxq_max_limit limits based on + * average q observed in clip for non kf/gf.arf frames + * Give average a chance to settle though. + */ + if ((cpi->ni_frames > ((int)cpi->twopass.total_stats.count >> 8)) && + (cpi->ni_frames > 150)) { + cpi->twopass.maxq_max_limit = ((cpi->ni_av_qi + 32) < cpi->worst_quality) + ? (cpi->ni_av_qi + 32) + : cpi->worst_quality; + cpi->twopass.maxq_min_limit = ((cpi->ni_av_qi - 32) > cpi->best_quality) + ? (cpi->ni_av_qi - 32) + : cpi->best_quality; + } - /* Corrections for higher compression speed settings - * (reduced compression expected) - */ - if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) - { - if (cpi->oxcf.cpu_used <= 5) - speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); - else - speed_correction = 1.25; - } - - /* Estimate of overhead bits per mb */ - /* Correction to overhead bits for min allowed Q. */ - overhead_bits_per_mb = overhead_bits / num_mbs; - overhead_bits_per_mb = (int)(overhead_bits_per_mb * - pow( 0.98, (double)cpi->twopass.maxq_min_limit )); - - /* Try and pick a max Q that will be high enough to encode the - * content at the given rate. - */ - for (Q = cpi->twopass.maxq_min_limit; Q < cpi->twopass.maxq_max_limit; Q++) - { - int bits_per_mb_at_this_q; - - /* Error per MB based correction factor */ - err_correction_factor = - calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q); - - bits_per_mb_at_this_q = - vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb; - - bits_per_mb_at_this_q = (int)(.5 + err_correction_factor - * speed_correction * cpi->twopass.est_max_qcorrection_factor - * cpi->twopass.section_max_qfactor - * (double)bits_per_mb_at_this_q); - - /* Mode and motion overhead */ - /* As Q rises in real encode loop rd code will force overhead down - * We make a crude adjustment for this here as *.98 per Q step. - */ - overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98); - - if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) - break; - } - - /* Restriction on active max q for constrained quality mode. */ - if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && - (Q < cpi->cq_target_quality) ) - { - Q = cpi->cq_target_quality; - } - - /* Adjust maxq_min_limit and maxq_max_limit limits based on - * average q observed in clip for non kf/gf.arf frames - * Give average a chance to settle though. - */ - if ( (cpi->ni_frames > - ((int)cpi->twopass.total_stats.count >> 8)) && - (cpi->ni_frames > 150) ) - { - cpi->twopass.maxq_max_limit = ((cpi->ni_av_qi + 32) < cpi->worst_quality) - ? (cpi->ni_av_qi + 32) : cpi->worst_quality; - cpi->twopass.maxq_min_limit = ((cpi->ni_av_qi - 32) > cpi->best_quality) - ? (cpi->ni_av_qi - 32) : cpi->best_quality; - } - - return Q; + return Q; } /* For cq mode estimate a cq level that matches the observed * complexity and data rate. */ -static int estimate_cq( VP8_COMP *cpi, - FIRSTPASS_STATS * fpstats, - int section_target_bandwitdh, - int overhead_bits ) -{ - int Q; - int num_mbs = cpi->common.MBs; - int target_norm_bits_per_mb; +static int estimate_cq(VP8_COMP *cpi, FIRSTPASS_STATS *fpstats, + int section_target_bandwitdh, int overhead_bits) { + int Q; + int num_mbs = cpi->common.MBs; + int target_norm_bits_per_mb; - double section_err = (fpstats->coded_error / fpstats->count); - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double speed_correction = 1.0; - double clip_iiratio; - double clip_iifactor; - int overhead_bits_per_mb; + double section_err = (fpstats->coded_error / fpstats->count); + double err_per_mb = section_err / num_mbs; + double err_correction_factor; + double speed_correction = 1.0; + double clip_iiratio; + double clip_iifactor; + int overhead_bits_per_mb; - if (0) - { - FILE *f = fopen("epmp.stt", "a"); - fprintf(f, "%10.2f\n", err_per_mb ); - fclose(f); + if (0) { + FILE *f = fopen("epmp.stt", "a"); + fprintf(f, "%10.2f\n", err_per_mb); + fclose(f); + } + + target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) + ? (512 * section_target_bandwitdh) / num_mbs + : 512 * (section_target_bandwitdh / num_mbs); + + /* Estimate of overhead bits per mb */ + overhead_bits_per_mb = overhead_bits / num_mbs; + + /* Corrections for higher compression speed settings + * (reduced compression expected) + */ + if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) { + if (cpi->oxcf.cpu_used <= 5) { + speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); + } else { + speed_correction = 1.25; } + } - target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) - ? (512 * section_target_bandwitdh) / num_mbs - : 512 * (section_target_bandwitdh / num_mbs); + /* II ratio correction factor for clip as a whole */ + clip_iiratio = cpi->twopass.total_stats.intra_error / + DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error); + clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025); + if (clip_iifactor < 0.80) clip_iifactor = 0.80; - /* Estimate of overhead bits per mb */ - overhead_bits_per_mb = overhead_bits / num_mbs; + /* Try and pick a Q that can encode the content at the given rate. */ + for (Q = 0; Q < MAXQ; ++Q) { + int bits_per_mb_at_this_q; - /* Corrections for higher compression speed settings - * (reduced compression expected) + /* Error per MB based correction factor */ + err_correction_factor = + calc_correction_factor(err_per_mb, 100.0, 0.40, 0.90, Q); + + bits_per_mb_at_this_q = + vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb; + + bits_per_mb_at_this_q = + (int)(.5 + + err_correction_factor * speed_correction * clip_iifactor * + (double)bits_per_mb_at_this_q); + + /* Mode and motion overhead */ + /* As Q rises in real encode loop rd code will force overhead down + * We make a crude adjustment for this here as *.98 per Q step. */ - if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) - { - if (cpi->oxcf.cpu_used <= 5) - speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); - else - speed_correction = 1.25; - } + overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98); - /* II ratio correction factor for clip as a whole */ - clip_iiratio = cpi->twopass.total_stats.intra_error / - DOUBLE_DIVIDE_CHECK(cpi->twopass.total_stats.coded_error); - clip_iifactor = 1.0 - ((clip_iiratio - 10.0) * 0.025); - if (clip_iifactor < 0.80) - clip_iifactor = 0.80; + if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break; + } - /* Try and pick a Q that can encode the content at the given rate. */ - for (Q = 0; Q < MAXQ; Q++) - { - int bits_per_mb_at_this_q; + /* Clip value to range "best allowed to (worst allowed - 1)" */ + Q = cq_level[Q]; + if (Q >= cpi->worst_quality) Q = cpi->worst_quality - 1; + if (Q < cpi->best_quality) Q = cpi->best_quality; - /* Error per MB based correction factor */ - err_correction_factor = - calc_correction_factor(err_per_mb, 100.0, 0.40, 0.90, Q); - - bits_per_mb_at_this_q = - vp8_bits_per_mb[INTER_FRAME][Q] + overhead_bits_per_mb; - - bits_per_mb_at_this_q = - (int)( .5 + err_correction_factor * - speed_correction * - clip_iifactor * - (double)bits_per_mb_at_this_q); - - /* Mode and motion overhead */ - /* As Q rises in real encode loop rd code will force overhead down - * We make a crude adjustment for this here as *.98 per Q step. - */ - overhead_bits_per_mb = (int)((double)overhead_bits_per_mb * 0.98); - - if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) - break; - } - - /* Clip value to range "best allowed to (worst allowed - 1)" */ - Q = cq_level[Q]; - if ( Q >= cpi->worst_quality ) - Q = cpi->worst_quality - 1; - if ( Q < cpi->best_quality ) - Q = cpi->best_quality; - - return Q; + return Q; } -static int estimate_q(VP8_COMP *cpi, double section_err, int section_target_bandwitdh) -{ - int Q; - int num_mbs = cpi->common.MBs; - int target_norm_bits_per_mb; +static int estimate_q(VP8_COMP *cpi, double section_err, + int section_target_bandwitdh) { + int Q; + int num_mbs = cpi->common.MBs; + int target_norm_bits_per_mb; - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double speed_correction = 1.0; + double err_per_mb = section_err / num_mbs; + double err_correction_factor; + double speed_correction = 1.0; - target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) ? (512 * section_target_bandwitdh) / num_mbs : 512 * (section_target_bandwitdh / num_mbs); + target_norm_bits_per_mb = (section_target_bandwitdh < (1 << 20)) + ? (512 * section_target_bandwitdh) / num_mbs + : 512 * (section_target_bandwitdh / num_mbs); - /* Corrections for higher compression speed settings - * (reduced compression expected) - */ - if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) - { - if (cpi->oxcf.cpu_used <= 5) - speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); - else - speed_correction = 1.25; + /* Corrections for higher compression speed settings + * (reduced compression expected) + */ + if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) { + if (cpi->oxcf.cpu_used <= 5) { + speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); + } else { + speed_correction = 1.25; } + } - /* Try and pick a Q that can encode the content at the given rate. */ - for (Q = 0; Q < MAXQ; Q++) - { - int bits_per_mb_at_this_q; + /* Try and pick a Q that can encode the content at the given rate. */ + for (Q = 0; Q < MAXQ; ++Q) { + int bits_per_mb_at_this_q; - /* Error per MB based correction factor */ - err_correction_factor = - calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q); + /* Error per MB based correction factor */ + err_correction_factor = + calc_correction_factor(err_per_mb, 150.0, 0.40, 0.90, Q); - bits_per_mb_at_this_q = - (int)( .5 + ( err_correction_factor * - speed_correction * - cpi->twopass.est_max_qcorrection_factor * - (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0 ) ); + bits_per_mb_at_this_q = + (int)(.5 + (err_correction_factor * speed_correction * + cpi->twopass.est_max_qcorrection_factor * + (double)vp8_bits_per_mb[INTER_FRAME][Q] / 1.0)); - if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) - break; - } + if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break; + } - return Q; + return Q; } /* Estimate a worst case Q for a KF group */ -static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, int section_target_bandwitdh, double group_iiratio) -{ - int Q; - int num_mbs = cpi->common.MBs; - int target_norm_bits_per_mb = (512 * section_target_bandwitdh) / num_mbs; - int bits_per_mb_at_this_q; +static int estimate_kf_group_q(VP8_COMP *cpi, double section_err, + int section_target_bandwitdh, + double group_iiratio) { + int Q; + int num_mbs = cpi->common.MBs; + int target_norm_bits_per_mb = (512 * section_target_bandwitdh) / num_mbs; + int bits_per_mb_at_this_q; - double err_per_mb = section_err / num_mbs; - double err_correction_factor; - double speed_correction = 1.0; - double current_spend_ratio = 1.0; + double err_per_mb = section_err / num_mbs; + double err_correction_factor; + double speed_correction = 1.0; + double current_spend_ratio = 1.0; - double pow_highq = (POW1 < 0.6) ? POW1 + 0.3 : 0.90; - double pow_lowq = (POW1 < 0.7) ? POW1 + 0.1 : 0.80; + double pow_highq = (POW1 < 0.6) ? POW1 + 0.3 : 0.90; + double pow_lowq = (POW1 < 0.7) ? POW1 + 0.1 : 0.80; - double iiratio_correction_factor = 1.0; + double iiratio_correction_factor = 1.0; - double combined_correction_factor; + double combined_correction_factor; - /* Trap special case where the target is <= 0 */ - if (target_norm_bits_per_mb <= 0) - return MAXQ * 2; + /* Trap special case where the target is <= 0 */ + if (target_norm_bits_per_mb <= 0) return MAXQ * 2; - /* Calculate a corrective factor based on a rolling ratio of bits spent - * vs target bits - * This is clamped to the range 0.1 to 10.0 - */ - if (cpi->long_rolling_target_bits <= 0) - current_spend_ratio = 10.0; - else - { - current_spend_ratio = (double)cpi->long_rolling_actual_bits / (double)cpi->long_rolling_target_bits; - current_spend_ratio = (current_spend_ratio > 10.0) ? 10.0 : (current_spend_ratio < 0.1) ? 0.1 : current_spend_ratio; + /* Calculate a corrective factor based on a rolling ratio of bits spent + * vs target bits + * This is clamped to the range 0.1 to 10.0 + */ + if (cpi->long_rolling_target_bits <= 0) { + current_spend_ratio = 10.0; + } else { + current_spend_ratio = (double)cpi->long_rolling_actual_bits / + (double)cpi->long_rolling_target_bits; + current_spend_ratio = + (current_spend_ratio > 10.0) ? 10.0 : (current_spend_ratio < 0.1) + ? 0.1 + : current_spend_ratio; + } + + /* Calculate a correction factor based on the quality of prediction in + * the sequence as indicated by intra_inter error score ratio (IIRatio) + * The idea here is to favour subsampling in the hardest sections vs + * the easyest. + */ + iiratio_correction_factor = 1.0 - ((group_iiratio - 6.0) * 0.1); + + if (iiratio_correction_factor < 0.5) iiratio_correction_factor = 0.5; + + /* Corrections for higher compression speed settings + * (reduced compression expected) + */ + if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) { + if (cpi->oxcf.cpu_used <= 5) { + speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); + } else { + speed_correction = 1.25; } + } - /* Calculate a correction factor based on the quality of prediction in - * the sequence as indicated by intra_inter error score ratio (IIRatio) - * The idea here is to favour subsampling in the hardest sections vs - * the easyest. - */ - iiratio_correction_factor = 1.0 - ((group_iiratio - 6.0) * 0.1); + /* Combine the various factors calculated above */ + combined_correction_factor = + speed_correction * iiratio_correction_factor * current_spend_ratio; - if (iiratio_correction_factor < 0.5) - iiratio_correction_factor = 0.5; + /* Try and pick a Q that should be high enough to encode the content at + * the given rate. + */ + for (Q = 0; Q < MAXQ; ++Q) { + /* Error per MB based correction factor */ + err_correction_factor = + calc_correction_factor(err_per_mb, 150.0, pow_lowq, pow_highq, Q); - /* Corrections for higher compression speed settings - * (reduced compression expected) - */ - if ((cpi->compressor_speed == 3) || (cpi->compressor_speed == 1)) - { - if (cpi->oxcf.cpu_used <= 5) - speed_correction = 1.04 + (cpi->oxcf.cpu_used * 0.04); - else - speed_correction = 1.25; - } + bits_per_mb_at_this_q = + (int)(.5 + (err_correction_factor * combined_correction_factor * + (double)vp8_bits_per_mb[INTER_FRAME][Q])); - /* Combine the various factors calculated above */ - combined_correction_factor = speed_correction * iiratio_correction_factor * current_spend_ratio; + if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) break; + } - /* Try and pick a Q that should be high enough to encode the content at - * the given rate. - */ - for (Q = 0; Q < MAXQ; Q++) - { - /* Error per MB based correction factor */ - err_correction_factor = - calc_correction_factor(err_per_mb, 150.0, pow_lowq, pow_highq, Q); + /* If we could not hit the target even at Max Q then estimate what Q + * would have been required + */ + while ((bits_per_mb_at_this_q > target_norm_bits_per_mb) && + (Q < (MAXQ * 2))) { + bits_per_mb_at_this_q = (int)(0.96 * bits_per_mb_at_this_q); + Q++; + } - bits_per_mb_at_this_q = - (int)(.5 + ( err_correction_factor * - combined_correction_factor * - (double)vp8_bits_per_mb[INTER_FRAME][Q]) ); + if (0) { + FILE *f = fopen("estkf_q.stt", "a"); + fprintf(f, "%8d %8d %8d %8.2f %8.3f %8.2f %8.3f %8.3f %8.3f %8d\n", + cpi->common.current_video_frame, bits_per_mb_at_this_q, + target_norm_bits_per_mb, err_per_mb, err_correction_factor, + current_spend_ratio, group_iiratio, iiratio_correction_factor, + (double)cpi->buffer_level / (double)cpi->oxcf.optimal_buffer_level, + Q); + fclose(f); + } - if (bits_per_mb_at_this_q <= target_norm_bits_per_mb) - break; - } - - /* If we could not hit the target even at Max Q then estimate what Q - * would have been required - */ - while ((bits_per_mb_at_this_q > target_norm_bits_per_mb) && (Q < (MAXQ * 2))) - { - - bits_per_mb_at_this_q = (int)(0.96 * bits_per_mb_at_this_q); - Q++; - } - - if (0) - { - FILE *f = fopen("estkf_q.stt", "a"); - fprintf(f, "%8d %8d %8d %8.2f %8.3f %8.2f %8.3f %8.3f %8.3f %8d\n", cpi->common.current_video_frame, bits_per_mb_at_this_q, - target_norm_bits_per_mb, err_per_mb, err_correction_factor, - current_spend_ratio, group_iiratio, iiratio_correction_factor, - (double)cpi->buffer_level / (double)cpi->oxcf.optimal_buffer_level, Q); - fclose(f); - } - - return Q; + return Q; } -void vp8_init_second_pass(VP8_COMP *cpi) -{ - FIRSTPASS_STATS this_frame; - FIRSTPASS_STATS *start_pos; +void vp8_init_second_pass(VP8_COMP *cpi) { + FIRSTPASS_STATS this_frame; + FIRSTPASS_STATS *start_pos; - double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100); + double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth * + cpi->oxcf.two_pass_vbrmin_section / 100); - zero_stats(&cpi->twopass.total_stats); - zero_stats(&cpi->twopass.total_left_stats); + zero_stats(&cpi->twopass.total_stats); + zero_stats(&cpi->twopass.total_left_stats); - if (!cpi->twopass.stats_in_end) - return; + if (!cpi->twopass.stats_in_end) return; - cpi->twopass.total_stats = *cpi->twopass.stats_in_end; - cpi->twopass.total_left_stats = cpi->twopass.total_stats; + cpi->twopass.total_stats = *cpi->twopass.stats_in_end; + cpi->twopass.total_left_stats = cpi->twopass.total_stats; - /* each frame can have a different duration, as the frame rate in the - * source isn't guaranteed to be constant. The frame rate prior to - * the first frame encoded in the second pass is a guess. However the - * sum duration is not. Its calculated based on the actual durations of - * all frames from the first pass. - */ - vp8_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count / cpi->twopass.total_stats.duration); + /* each frame can have a different duration, as the frame rate in the + * source isn't guaranteed to be constant. The frame rate prior to + * the first frame encoded in the second pass is a guess. However the + * sum duration is not. Its calculated based on the actual durations of + * all frames from the first pass. + */ + vp8_new_framerate(cpi, 10000000.0 * cpi->twopass.total_stats.count / + cpi->twopass.total_stats.duration); - cpi->output_framerate = cpi->framerate; - cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration * cpi->oxcf.target_bandwidth / 10000000.0) ; - cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration * two_pass_min_rate / 10000000.0); + cpi->output_framerate = cpi->framerate; + cpi->twopass.bits_left = (int64_t)(cpi->twopass.total_stats.duration * + cpi->oxcf.target_bandwidth / 10000000.0); + cpi->twopass.bits_left -= (int64_t)(cpi->twopass.total_stats.duration * + two_pass_min_rate / 10000000.0); - /* Calculate a minimum intra value to be used in determining the IIratio - * scores used in the second pass. We have this minimum to make sure - * that clips that are static but "low complexity" in the intra domain - * are still boosted appropriately for KF/GF/ARF - */ - cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs; - cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs; + /* Calculate a minimum intra value to be used in determining the IIratio + * scores used in the second pass. We have this minimum to make sure + * that clips that are static but "low complexity" in the intra domain + * are still boosted appropriately for KF/GF/ARF + */ + cpi->twopass.kf_intra_err_min = KF_MB_INTRA_MIN * cpi->common.MBs; + cpi->twopass.gf_intra_err_min = GF_MB_INTRA_MIN * cpi->common.MBs; - /* Scan the first pass file and calculate an average Intra / Inter error - * score ratio for the sequence - */ - { - double sum_iiratio = 0.0; - double IIRatio; + /* Scan the first pass file and calculate an average Intra / Inter error + * score ratio for the sequence + */ + { + double sum_iiratio = 0.0; + double IIRatio; - start_pos = cpi->twopass.stats_in; /* Note starting "file" position */ + start_pos = cpi->twopass.stats_in; /* Note starting "file" position */ - while (input_stats(cpi, &this_frame) != EOF) - { - IIRatio = this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error); - IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio; - sum_iiratio += IIRatio; - } - - cpi->twopass.avg_iiratio = sum_iiratio / DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count); - - /* Reset file position */ - reset_fpf_position(cpi, start_pos); + while (input_stats(cpi, &this_frame) != EOF) { + IIRatio = + this_frame.intra_error / DOUBLE_DIVIDE_CHECK(this_frame.coded_error); + IIRatio = (IIRatio < 1.0) ? 1.0 : (IIRatio > 20.0) ? 20.0 : IIRatio; + sum_iiratio += IIRatio; } - /* Scan the first pass file and calculate a modified total error based - * upon the bias/power function used to allocate bits - */ - { - start_pos = cpi->twopass.stats_in; /* Note starting "file" position */ + cpi->twopass.avg_iiratio = + sum_iiratio / + DOUBLE_DIVIDE_CHECK((double)cpi->twopass.total_stats.count); - cpi->twopass.modified_error_total = 0.0; - cpi->twopass.modified_error_used = 0.0; + /* Reset file position */ + reset_fpf_position(cpi, start_pos); + } - while (input_stats(cpi, &this_frame) != EOF) - { - cpi->twopass.modified_error_total += calculate_modified_err(cpi, &this_frame); - } - cpi->twopass.modified_error_left = cpi->twopass.modified_error_total; + /* Scan the first pass file and calculate a modified total error based + * upon the bias/power function used to allocate bits + */ + { + start_pos = cpi->twopass.stats_in; /* Note starting "file" position */ - reset_fpf_position(cpi, start_pos); /* Reset file position */ + cpi->twopass.modified_error_total = 0.0; + cpi->twopass.modified_error_used = 0.0; + while (input_stats(cpi, &this_frame) != EOF) { + cpi->twopass.modified_error_total += + calculate_modified_err(cpi, &this_frame); } + cpi->twopass.modified_error_left = cpi->twopass.modified_error_total; + + reset_fpf_position(cpi, start_pos); /* Reset file position */ + } } -void vp8_end_second_pass(VP8_COMP *cpi) -{ - (void)cpi; -} +void vp8_end_second_pass(VP8_COMP *cpi) { (void)cpi; } /* This function gives and estimate of how badly we believe the prediction * quality is decaying from frame to frame. */ -static double get_prediction_decay_rate(VP8_COMP *cpi, FIRSTPASS_STATS *next_frame) -{ - double prediction_decay_rate; - double motion_decay; - double motion_pct = next_frame->pcnt_motion; - (void)cpi; +static double get_prediction_decay_rate(VP8_COMP *cpi, + FIRSTPASS_STATS *next_frame) { + double prediction_decay_rate; + double motion_decay; + double motion_pct = next_frame->pcnt_motion; + (void)cpi; - /* Initial basis is the % mbs inter coded */ - prediction_decay_rate = next_frame->pcnt_inter; + /* Initial basis is the % mbs inter coded */ + prediction_decay_rate = next_frame->pcnt_inter; - /* High % motion -> somewhat higher decay rate */ - motion_decay = (1.0 - (motion_pct / 20.0)); - if (motion_decay < prediction_decay_rate) - prediction_decay_rate = motion_decay; + /* High % motion -> somewhat higher decay rate */ + motion_decay = (1.0 - (motion_pct / 20.0)); + if (motion_decay < prediction_decay_rate) { + prediction_decay_rate = motion_decay; + } - /* Adjustment to decay rate based on speed of motion */ - { - double this_mv_rabs; - double this_mv_cabs; - double distance_factor; + /* Adjustment to decay rate based on speed of motion */ + { + double this_mv_rabs; + double this_mv_cabs; + double distance_factor; - this_mv_rabs = fabs(next_frame->mvr_abs * motion_pct); - this_mv_cabs = fabs(next_frame->mvc_abs * motion_pct); + this_mv_rabs = fabs(next_frame->mvr_abs * motion_pct); + this_mv_cabs = fabs(next_frame->mvc_abs * motion_pct); - distance_factor = sqrt((this_mv_rabs * this_mv_rabs) + - (this_mv_cabs * this_mv_cabs)) / 250.0; - distance_factor = ((distance_factor > 1.0) - ? 0.0 : (1.0 - distance_factor)); - if (distance_factor < prediction_decay_rate) - prediction_decay_rate = distance_factor; + distance_factor = + sqrt((this_mv_rabs * this_mv_rabs) + (this_mv_cabs * this_mv_cabs)) / + 250.0; + distance_factor = ((distance_factor > 1.0) ? 0.0 : (1.0 - distance_factor)); + if (distance_factor < prediction_decay_rate) { + prediction_decay_rate = distance_factor; } + } - return prediction_decay_rate; + return prediction_decay_rate; } /* Function to test for a condition where a complex transition is followed * by a static section. For example in slide shows where there is a fade * between slides. This is to help with more optimal kf and gf positioning. */ -static int detect_transition_to_still( - VP8_COMP *cpi, - int frame_interval, - int still_interval, - double loop_decay_rate, - double decay_accumulator ) -{ - int trans_to_still = 0; +static int detect_transition_to_still(VP8_COMP *cpi, int frame_interval, + int still_interval, + double loop_decay_rate, + double decay_accumulator) { + int trans_to_still = 0; - /* Break clause to detect very still sections after motion - * For example a static image after a fade or other transition - * instead of a clean scene cut. - */ - if ( (frame_interval > MIN_GF_INTERVAL) && - (loop_decay_rate >= 0.999) && - (decay_accumulator < 0.9) ) - { - int j; - FIRSTPASS_STATS * position = cpi->twopass.stats_in; - FIRSTPASS_STATS tmp_next_frame; - double decay_rate; + /* Break clause to detect very still sections after motion + * For example a static image after a fade or other transition + * instead of a clean scene cut. + */ + if ((frame_interval > MIN_GF_INTERVAL) && (loop_decay_rate >= 0.999) && + (decay_accumulator < 0.9)) { + int j; + FIRSTPASS_STATS *position = cpi->twopass.stats_in; + FIRSTPASS_STATS tmp_next_frame; + double decay_rate; - /* Look ahead a few frames to see if static condition persists... */ - for ( j = 0; j < still_interval; j++ ) - { - if (EOF == input_stats(cpi, &tmp_next_frame)) - break; + /* Look ahead a few frames to see if static condition persists... */ + for (j = 0; j < still_interval; ++j) { + if (EOF == input_stats(cpi, &tmp_next_frame)) break; - decay_rate = get_prediction_decay_rate(cpi, &tmp_next_frame); - if ( decay_rate < 0.999 ) - break; - } - /* Reset file position */ - reset_fpf_position(cpi, position); - - /* Only if it does do we signal a transition to still */ - if ( j == still_interval ) - trans_to_still = 1; + decay_rate = get_prediction_decay_rate(cpi, &tmp_next_frame); + if (decay_rate < 0.999) break; } + /* Reset file position */ + reset_fpf_position(cpi, position); - return trans_to_still; + /* Only if it does do we signal a transition to still */ + if (j == still_interval) trans_to_still = 1; + } + + return trans_to_still; } /* This function detects a flash through the high relative pcnt_second_ref * score in the frame following a flash frame. The offset passed in should * reflect this */ -static int detect_flash( VP8_COMP *cpi, int offset ) -{ - FIRSTPASS_STATS next_frame; +static int detect_flash(VP8_COMP *cpi, int offset) { + FIRSTPASS_STATS next_frame; - int flash_detected = 0; + int flash_detected = 0; - /* Read the frame data. */ - /* The return is 0 (no flash detected) if not a valid frame */ - if ( read_frame_stats(cpi, &next_frame, offset) != EOF ) - { - /* What we are looking for here is a situation where there is a - * brief break in prediction (such as a flash) but subsequent frames - * are reasonably well predicted by an earlier (pre flash) frame. - * The recovery after a flash is indicated by a high pcnt_second_ref - * comapred to pcnt_inter. - */ - if ( (next_frame.pcnt_second_ref > next_frame.pcnt_inter) && - (next_frame.pcnt_second_ref >= 0.5 ) ) - { - flash_detected = 1; + /* Read the frame data. */ + /* The return is 0 (no flash detected) if not a valid frame */ + if (read_frame_stats(cpi, &next_frame, offset) != EOF) { + /* What we are looking for here is a situation where there is a + * brief break in prediction (such as a flash) but subsequent frames + * are reasonably well predicted by an earlier (pre flash) frame. + * The recovery after a flash is indicated by a high pcnt_second_ref + * comapred to pcnt_inter. + */ + if ((next_frame.pcnt_second_ref > next_frame.pcnt_inter) && + (next_frame.pcnt_second_ref >= 0.5)) { + flash_detected = 1; - /*if (1) - { - FILE *f = fopen("flash.stt", "a"); - fprintf(f, "%8.0f %6.2f %6.2f\n", - next_frame.frame, - next_frame.pcnt_inter, - next_frame.pcnt_second_ref); - fclose(f); - }*/ - } + /*if (1) + { + FILE *f = fopen("flash.stt", "a"); + fprintf(f, "%8.0f %6.2f %6.2f\n", + next_frame.frame, + next_frame.pcnt_inter, + next_frame.pcnt_second_ref); + fclose(f); + }*/ } + } - return flash_detected; + return flash_detected; } /* Update the motion related elements to the GF arf boost calculation */ -static void accumulate_frame_motion_stats( - VP8_COMP *cpi, - FIRSTPASS_STATS * this_frame, - double * this_frame_mv_in_out, - double * mv_in_out_accumulator, - double * abs_mv_in_out_accumulator, - double * mv_ratio_accumulator ) -{ - double this_frame_mvr_ratio; - double this_frame_mvc_ratio; - double motion_pct; - (void)cpi; +static void accumulate_frame_motion_stats(VP8_COMP *cpi, + FIRSTPASS_STATS *this_frame, + double *this_frame_mv_in_out, + double *mv_in_out_accumulator, + double *abs_mv_in_out_accumulator, + double *mv_ratio_accumulator) { + double this_frame_mvr_ratio; + double this_frame_mvc_ratio; + double motion_pct; + (void)cpi; - /* Accumulate motion stats. */ - motion_pct = this_frame->pcnt_motion; + /* Accumulate motion stats. */ + motion_pct = this_frame->pcnt_motion; - /* Accumulate Motion In/Out of frame stats */ - *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct; - *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct; - *abs_mv_in_out_accumulator += - fabs(this_frame->mv_in_out_count * motion_pct); + /* Accumulate Motion In/Out of frame stats */ + *this_frame_mv_in_out = this_frame->mv_in_out_count * motion_pct; + *mv_in_out_accumulator += this_frame->mv_in_out_count * motion_pct; + *abs_mv_in_out_accumulator += fabs(this_frame->mv_in_out_count * motion_pct); - /* Accumulate a measure of how uniform (or conversely how random) - * the motion field is. (A ratio of absmv / mv) - */ - if (motion_pct > 0.05) - { - this_frame_mvr_ratio = fabs(this_frame->mvr_abs) / - DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr)); + /* Accumulate a measure of how uniform (or conversely how random) + * the motion field is. (A ratio of absmv / mv) + */ + if (motion_pct > 0.05) { + this_frame_mvr_ratio = + fabs(this_frame->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVr)); - this_frame_mvc_ratio = fabs(this_frame->mvc_abs) / - DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc)); + this_frame_mvc_ratio = + fabs(this_frame->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(this_frame->MVc)); - *mv_ratio_accumulator += - (this_frame_mvr_ratio < this_frame->mvr_abs) - ? (this_frame_mvr_ratio * motion_pct) - : this_frame->mvr_abs * motion_pct; + *mv_ratio_accumulator += (this_frame_mvr_ratio < this_frame->mvr_abs) + ? (this_frame_mvr_ratio * motion_pct) + : this_frame->mvr_abs * motion_pct; - *mv_ratio_accumulator += - (this_frame_mvc_ratio < this_frame->mvc_abs) - ? (this_frame_mvc_ratio * motion_pct) - : this_frame->mvc_abs * motion_pct; - - } + *mv_ratio_accumulator += (this_frame_mvc_ratio < this_frame->mvc_abs) + ? (this_frame_mvc_ratio * motion_pct) + : this_frame->mvc_abs * motion_pct; + } } /* Calculate a baseline boost number for the current frame. */ -static double calc_frame_boost( - VP8_COMP *cpi, - FIRSTPASS_STATS * this_frame, - double this_frame_mv_in_out ) -{ - double frame_boost; +static double calc_frame_boost(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame, + double this_frame_mv_in_out) { + double frame_boost; - /* Underlying boost factor is based on inter intra error ratio */ - if (this_frame->intra_error > cpi->twopass.gf_intra_err_min) - frame_boost = (IIFACTOR * this_frame->intra_error / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); - else - frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); + /* Underlying boost factor is based on inter intra error ratio */ + if (this_frame->intra_error > cpi->twopass.gf_intra_err_min) { + frame_boost = (IIFACTOR * this_frame->intra_error / + DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); + } else { + frame_boost = (IIFACTOR * cpi->twopass.gf_intra_err_min / + DOUBLE_DIVIDE_CHECK(this_frame->coded_error)); + } - /* Increase boost for frames where new data coming into frame - * (eg zoom out). Slightly reduce boost if there is a net balance - * of motion out of the frame (zoom in). - * The range for this_frame_mv_in_out is -1.0 to +1.0 - */ - if (this_frame_mv_in_out > 0.0) - frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); + /* Increase boost for frames where new data coming into frame + * (eg zoom out). Slightly reduce boost if there is a net balance + * of motion out of the frame (zoom in). + * The range for this_frame_mv_in_out is -1.0 to +1.0 + */ + if (this_frame_mv_in_out > 0.0) { + frame_boost += frame_boost * (this_frame_mv_in_out * 2.0); /* In extreme case boost is halved */ - else - frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); + } else { + frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); + } - /* Clip to maximum */ - if (frame_boost > GF_RMAX) - frame_boost = GF_RMAX; + /* Clip to maximum */ + if (frame_boost > GF_RMAX) frame_boost = GF_RMAX; - return frame_boost; + return frame_boost; } #if NEW_BOOST -static int calc_arf_boost( - VP8_COMP *cpi, - int offset, - int f_frames, - int b_frames, - int *f_boost, - int *b_boost ) -{ - FIRSTPASS_STATS this_frame; +static int calc_arf_boost(VP8_COMP *cpi, int offset, int f_frames, int b_frames, + int *f_boost, int *b_boost) { + FIRSTPASS_STATS this_frame; - int i; - double boost_score = 0.0; - double mv_ratio_accumulator = 0.0; - double decay_accumulator = 1.0; - double this_frame_mv_in_out = 0.0; - double mv_in_out_accumulator = 0.0; - double abs_mv_in_out_accumulator = 0.0; - double r; - int flash_detected = 0; + int i; + double boost_score = 0.0; + double mv_ratio_accumulator = 0.0; + double decay_accumulator = 1.0; + double this_frame_mv_in_out = 0.0; + double mv_in_out_accumulator = 0.0; + double abs_mv_in_out_accumulator = 0.0; + double r; + int flash_detected = 0; - /* Search forward from the proposed arf/next gf position */ - for ( i = 0; i < f_frames; i++ ) - { - if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF ) - break; + /* Search forward from the proposed arf/next gf position */ + for (i = 0; i < f_frames; ++i) { + if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) break; - /* Update the motion related elements to the boost calculation */ - accumulate_frame_motion_stats( cpi, &this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, &mv_ratio_accumulator ); + /* Update the motion related elements to the boost calculation */ + accumulate_frame_motion_stats( + cpi, &this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); - /* Calculate the baseline boost number for this frame */ - r = calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out ); + /* Calculate the baseline boost number for this frame */ + r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out); - /* We want to discount the the flash frame itself and the recovery - * frame that follows as both will have poor scores. - */ - flash_detected = detect_flash(cpi, (i+offset)) || - detect_flash(cpi, (i+offset+1)); + /* We want to discount the the flash frame itself and the recovery + * frame that follows as both will have poor scores. + */ + flash_detected = + detect_flash(cpi, (i + offset)) || detect_flash(cpi, (i + offset + 1)); - /* Cumulative effect of prediction quality decay */ - if ( !flash_detected ) - { - decay_accumulator = - decay_accumulator * - get_prediction_decay_rate(cpi, &this_frame); - decay_accumulator = - decay_accumulator < 0.1 ? 0.1 : decay_accumulator; - } - boost_score += (decay_accumulator * r); + /* Cumulative effect of prediction quality decay */ + if (!flash_detected) { + decay_accumulator = + decay_accumulator * get_prediction_decay_rate(cpi, &this_frame); + decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator; + } + boost_score += (decay_accumulator * r); - /* Break out conditions. */ - if ( (!flash_detected) && - ((mv_ratio_accumulator > 100.0) || - (abs_mv_in_out_accumulator > 3.0) || - (mv_in_out_accumulator < -2.0) ) ) - { - break; - } + /* Break out conditions. */ + if ((!flash_detected) && + ((mv_ratio_accumulator > 100.0) || (abs_mv_in_out_accumulator > 3.0) || + (mv_in_out_accumulator < -2.0))) { + break; + } + } + + *f_boost = (int)(boost_score * 100.0) >> 4; + + /* Reset for backward looking loop */ + boost_score = 0.0; + mv_ratio_accumulator = 0.0; + decay_accumulator = 1.0; + this_frame_mv_in_out = 0.0; + mv_in_out_accumulator = 0.0; + abs_mv_in_out_accumulator = 0.0; + + /* Search forward from the proposed arf/next gf position */ + for (i = -1; i >= -b_frames; i--) { + if (read_frame_stats(cpi, &this_frame, (i + offset)) == EOF) break; + + /* Update the motion related elements to the boost calculation */ + accumulate_frame_motion_stats( + cpi, &this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); + + /* Calculate the baseline boost number for this frame */ + r = calc_frame_boost(cpi, &this_frame, this_frame_mv_in_out); + + /* We want to discount the the flash frame itself and the recovery + * frame that follows as both will have poor scores. + */ + flash_detected = + detect_flash(cpi, (i + offset)) || detect_flash(cpi, (i + offset + 1)); + + /* Cumulative effect of prediction quality decay */ + if (!flash_detected) { + decay_accumulator = + decay_accumulator * get_prediction_decay_rate(cpi, &this_frame); + decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator; } - *f_boost = (int)(boost_score * 100.0) >> 4; + boost_score += (decay_accumulator * r); - /* Reset for backward looking loop */ - boost_score = 0.0; - mv_ratio_accumulator = 0.0; - decay_accumulator = 1.0; - this_frame_mv_in_out = 0.0; - mv_in_out_accumulator = 0.0; - abs_mv_in_out_accumulator = 0.0; - - /* Search forward from the proposed arf/next gf position */ - for ( i = -1; i >= -b_frames; i-- ) - { - if ( read_frame_stats(cpi, &this_frame, (i+offset)) == EOF ) - break; - - /* Update the motion related elements to the boost calculation */ - accumulate_frame_motion_stats( cpi, &this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, &mv_ratio_accumulator ); - - /* Calculate the baseline boost number for this frame */ - r = calc_frame_boost( cpi, &this_frame, this_frame_mv_in_out ); - - /* We want to discount the the flash frame itself and the recovery - * frame that follows as both will have poor scores. - */ - flash_detected = detect_flash(cpi, (i+offset)) || - detect_flash(cpi, (i+offset+1)); - - /* Cumulative effect of prediction quality decay */ - if ( !flash_detected ) - { - decay_accumulator = - decay_accumulator * - get_prediction_decay_rate(cpi, &this_frame); - decay_accumulator = - decay_accumulator < 0.1 ? 0.1 : decay_accumulator; - } - - boost_score += (decay_accumulator * r); - - /* Break out conditions. */ - if ( (!flash_detected) && - ((mv_ratio_accumulator > 100.0) || - (abs_mv_in_out_accumulator > 3.0) || - (mv_in_out_accumulator < -2.0) ) ) - { - break; - } + /* Break out conditions. */ + if ((!flash_detected) && + ((mv_ratio_accumulator > 100.0) || (abs_mv_in_out_accumulator > 3.0) || + (mv_in_out_accumulator < -2.0))) { + break; } - *b_boost = (int)(boost_score * 100.0) >> 4; + } + *b_boost = (int)(boost_score * 100.0) >> 4; - return (*f_boost + *b_boost); + return (*f_boost + *b_boost); } #endif /* Analyse and define a gf/arf group . */ -static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) -{ - FIRSTPASS_STATS next_frame; - FIRSTPASS_STATS *start_pos; - int i; - double r; - double boost_score = 0.0; - double old_boost_score = 0.0; - double gf_group_err = 0.0; - double gf_first_frame_err = 0.0; - double mod_frame_err = 0.0; +static void define_gf_group(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) { + FIRSTPASS_STATS next_frame; + FIRSTPASS_STATS *start_pos; + int i; + double r; + double boost_score = 0.0; + double old_boost_score = 0.0; + double gf_group_err = 0.0; + double gf_first_frame_err = 0.0; + double mod_frame_err = 0.0; - double mv_ratio_accumulator = 0.0; - double decay_accumulator = 1.0; + double mv_ratio_accumulator = 0.0; + double decay_accumulator = 1.0; - double loop_decay_rate = 1.00; /* Starting decay rate */ + double loop_decay_rate = 1.00; /* Starting decay rate */ - double this_frame_mv_in_out = 0.0; - double mv_in_out_accumulator = 0.0; - double abs_mv_in_out_accumulator = 0.0; - double mod_err_per_mb_accumulator = 0.0; + double this_frame_mv_in_out = 0.0; + double mv_in_out_accumulator = 0.0; + double abs_mv_in_out_accumulator = 0.0; + double mod_err_per_mb_accumulator = 0.0; - int max_bits = frame_max_bits(cpi); /* Max for a single frame */ + int max_bits = frame_max_bits(cpi); /* Max for a single frame */ - unsigned int allow_alt_ref = - cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames; + unsigned int allow_alt_ref = + cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames; - int alt_boost = 0; - int f_boost = 0; - int b_boost = 0; - int flash_detected; + int alt_boost = 0; + int f_boost = 0; + int b_boost = 0; + int flash_detected; - cpi->twopass.gf_group_bits = 0; - cpi->twopass.gf_decay_rate = 0; + cpi->twopass.gf_group_bits = 0; + cpi->twopass.gf_decay_rate = 0; - vp8_clear_system_state(); + vp8_clear_system_state(); - start_pos = cpi->twopass.stats_in; + start_pos = cpi->twopass.stats_in; - memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */ + memset(&next_frame, 0, sizeof(next_frame)); /* assure clean */ - /* Load stats for the current frame. */ + /* Load stats for the current frame. */ + mod_frame_err = calculate_modified_err(cpi, this_frame); + + /* Note the error of the frame at the start of the group (this will be + * the GF frame error if we code a normal gf + */ + gf_first_frame_err = mod_frame_err; + + /* Special treatment if the current frame is a key frame (which is also + * a gf). If it is then its error score (and hence bit allocation) need + * to be subtracted out from the calculation for the GF group + */ + if (cpi->common.frame_type == KEY_FRAME) gf_group_err -= gf_first_frame_err; + + /* Scan forward to try and work out how many frames the next gf group + * should contain and what level of boost is appropriate for the GF + * or ARF that will be coded with the group + */ + i = 0; + + while (((i < cpi->twopass.static_scene_max_gf_interval) || + ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) && + (i < cpi->twopass.frames_to_key)) { + i++; + + /* Accumulate error score of frames in this gf group */ mod_frame_err = calculate_modified_err(cpi, this_frame); - /* Note the error of the frame at the start of the group (this will be - * the GF frame error if we code a normal gf + gf_group_err += mod_frame_err; + + mod_err_per_mb_accumulator += + mod_frame_err / DOUBLE_DIVIDE_CHECK((double)cpi->common.MBs); + + if (EOF == input_stats(cpi, &next_frame)) break; + + /* Test for the case where there is a brief flash but the prediction + * quality back to an earlier frame is then restored. */ - gf_first_frame_err = mod_frame_err; + flash_detected = detect_flash(cpi, 0); - /* Special treatment if the current frame is a key frame (which is also - * a gf). If it is then its error score (and hence bit allocation) need - * to be subtracted out from the calculation for the GF group + /* Update the motion related elements to the boost calculation */ + accumulate_frame_motion_stats( + cpi, &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); + + /* Calculate a baseline boost number for this frame */ + r = calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out); + + /* Cumulative effect of prediction quality decay */ + if (!flash_detected) { + loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); + decay_accumulator = decay_accumulator * loop_decay_rate; + decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator; + } + boost_score += (decay_accumulator * r); + + /* Break clause to detect very still sections after motion + * For example a staic image after a fade or other transition. */ - if (cpi->common.frame_type == KEY_FRAME) - gf_group_err -= gf_first_frame_err; + if (detect_transition_to_still(cpi, i, 5, loop_decay_rate, + decay_accumulator)) { + allow_alt_ref = 0; + boost_score = old_boost_score; + break; + } - /* Scan forward to try and work out how many frames the next gf group - * should contain and what level of boost is appropriate for the GF - * or ARF that will be coded with the group - */ - i = 0; + /* Break out conditions. */ + if ( + /* Break at cpi->max_gf_interval unless almost totally static */ + (i >= cpi->max_gf_interval && (decay_accumulator < 0.995)) || + ( + /* Dont break out with a very short interval */ + (i > MIN_GF_INTERVAL) && + /* Dont break out very close to a key frame */ + ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) && + ((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) && + (!flash_detected) && ((mv_ratio_accumulator > 100.0) || + (abs_mv_in_out_accumulator > 3.0) || + (mv_in_out_accumulator < -2.0) || + ((boost_score - old_boost_score) < 2.0)))) { + boost_score = old_boost_score; + break; + } - while (((i < cpi->twopass.static_scene_max_gf_interval) || - ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL)) && - (i < cpi->twopass.frames_to_key)) - { - i++; + memcpy(this_frame, &next_frame, sizeof(*this_frame)); - /* Accumulate error score of frames in this gf group */ + old_boost_score = boost_score; + } + + cpi->twopass.gf_decay_rate = + (i > 0) ? (int)(100.0 * (1.0 - decay_accumulator)) / i : 0; + + /* When using CBR apply additional buffer related upper limits */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + double max_boost; + + /* For cbr apply buffer related limits */ + if (cpi->drop_frames_allowed) { + int64_t df_buffer_level = cpi->oxcf.drop_frames_water_mark * + (cpi->oxcf.optimal_buffer_level / 100); + + if (cpi->buffer_level > df_buffer_level) { + max_boost = + ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) / + DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); + } else { + max_boost = 0.0; + } + } else if (cpi->buffer_level > 0) { + max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) / + DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); + } else { + max_boost = 0.0; + } + + if (boost_score > max_boost) boost_score = max_boost; + } + + /* Dont allow conventional gf too near the next kf */ + if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) { + while (i < cpi->twopass.frames_to_key) { + i++; + + if (EOF == input_stats(cpi, this_frame)) break; + + if (i < cpi->twopass.frames_to_key) { mod_frame_err = calculate_modified_err(cpi, this_frame); - gf_group_err += mod_frame_err; - - mod_err_per_mb_accumulator += - mod_frame_err / DOUBLE_DIVIDE_CHECK((double)cpi->common.MBs); - - if (EOF == input_stats(cpi, &next_frame)) - break; - - /* Test for the case where there is a brief flash but the prediction - * quality back to an earlier frame is then restored. - */ - flash_detected = detect_flash(cpi, 0); - - /* Update the motion related elements to the boost calculation */ - accumulate_frame_motion_stats( cpi, &next_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, &mv_ratio_accumulator ); - - /* Calculate a baseline boost number for this frame */ - r = calc_frame_boost( cpi, &next_frame, this_frame_mv_in_out ); - - /* Cumulative effect of prediction quality decay */ - if ( !flash_detected ) - { - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - decay_accumulator = decay_accumulator * loop_decay_rate; - decay_accumulator = - decay_accumulator < 0.1 ? 0.1 : decay_accumulator; - } - boost_score += (decay_accumulator * r); - - /* Break clause to detect very still sections after motion - * For example a staic image after a fade or other transition. - */ - if ( detect_transition_to_still( cpi, i, 5, - loop_decay_rate, - decay_accumulator ) ) - { - allow_alt_ref = 0; - boost_score = old_boost_score; - break; - } - - /* Break out conditions. */ - if ( - /* Break at cpi->max_gf_interval unless almost totally static */ - (i >= cpi->max_gf_interval && (decay_accumulator < 0.995)) || - ( - /* Dont break out with a very short interval */ - (i > MIN_GF_INTERVAL) && - /* Dont break out very close to a key frame */ - ((cpi->twopass.frames_to_key - i) >= MIN_GF_INTERVAL) && - ((boost_score > 20.0) || (next_frame.pcnt_inter < 0.75)) && - (!flash_detected) && - ((mv_ratio_accumulator > 100.0) || - (abs_mv_in_out_accumulator > 3.0) || - (mv_in_out_accumulator < -2.0) || - ((boost_score - old_boost_score) < 2.0)) - ) ) - { - boost_score = old_boost_score; - break; - } - - memcpy(this_frame, &next_frame, sizeof(*this_frame)); - - old_boost_score = boost_score; + } } + } - cpi->twopass.gf_decay_rate = - (i > 0) ? (int)(100.0 * (1.0 - decay_accumulator)) / i : 0; - - /* When using CBR apply additional buffer related upper limits */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - double max_boost; - - /* For cbr apply buffer related limits */ - if (cpi->drop_frames_allowed) - { - int64_t df_buffer_level = cpi->oxcf.drop_frames_water_mark * - (cpi->oxcf.optimal_buffer_level / 100); - - if (cpi->buffer_level > df_buffer_level) - max_boost = ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) / DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); - else - max_boost = 0.0; - } - else if (cpi->buffer_level > 0) - { - max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) / DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); - } - else - { - max_boost = 0.0; - } - - if (boost_score > max_boost) - boost_score = max_boost; - } - - /* Dont allow conventional gf too near the next kf */ - if ((cpi->twopass.frames_to_key - i) < MIN_GF_INTERVAL) - { - while (i < cpi->twopass.frames_to_key) - { - i++; - - if (EOF == input_stats(cpi, this_frame)) - break; - - if (i < cpi->twopass.frames_to_key) - { - mod_frame_err = calculate_modified_err(cpi, this_frame); - gf_group_err += mod_frame_err; - } - } - } - - cpi->gfu_boost = (int)(boost_score * 100.0) >> 4; + cpi->gfu_boost = (int)(boost_score * 100.0) >> 4; #if NEW_BOOST - /* Alterrnative boost calculation for alt ref */ - alt_boost = calc_arf_boost( cpi, 0, (i-1), (i-1), &f_boost, &b_boost ); + /* Alterrnative boost calculation for alt ref */ + alt_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost); #endif - /* Should we use the alternate refernce frame */ - if (allow_alt_ref && - (i >= MIN_GF_INTERVAL) && - /* dont use ARF very near next kf */ - (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) && + /* Should we use the alternate refernce frame */ + if (allow_alt_ref && (i >= MIN_GF_INTERVAL) && + /* dont use ARF very near next kf */ + (i <= (cpi->twopass.frames_to_key - MIN_GF_INTERVAL)) && #if NEW_BOOST - ((next_frame.pcnt_inter > 0.75) || - (next_frame.pcnt_second_ref > 0.5)) && - ((mv_in_out_accumulator / (double)i > -0.2) || - (mv_in_out_accumulator > -2.0)) && - (b_boost > 100) && - (f_boost > 100) ) + ((next_frame.pcnt_inter > 0.75) || (next_frame.pcnt_second_ref > 0.5)) && + ((mv_in_out_accumulator / (double)i > -0.2) || + (mv_in_out_accumulator > -2.0)) && + (b_boost > 100) && (f_boost > 100)) #else - (next_frame.pcnt_inter > 0.75) && - ((mv_in_out_accumulator / (double)i > -0.2) || - (mv_in_out_accumulator > -2.0)) && - (cpi->gfu_boost > 100) && - (cpi->twopass.gf_decay_rate <= - (ARF_DECAY_THRESH + (cpi->gfu_boost / 200))) ) + (next_frame.pcnt_inter > 0.75) && + ((mv_in_out_accumulator / (double)i > -0.2) || + (mv_in_out_accumulator > -2.0)) && + (cpi->gfu_boost > 100) && (cpi->twopass.gf_decay_rate <= + (ARF_DECAY_THRESH + (cpi->gfu_boost / 200)))) #endif - { - int Boost; - int allocation_chunks; - int Q = (cpi->oxcf.fixed_q < 0) - ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; - int tmp_q; - int arf_frame_bits = 0; - int group_bits; + { + int Boost; + int allocation_chunks; + int Q = + (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; + int tmp_q; + int arf_frame_bits = 0; + int group_bits; #if NEW_BOOST - cpi->gfu_boost = alt_boost; + cpi->gfu_boost = alt_boost; #endif - /* Estimate the bits to be allocated to the group as a whole */ - if ((cpi->twopass.kf_group_bits > 0) && - (cpi->twopass.kf_group_error_left > 0)) - { - group_bits = (int)((double)cpi->twopass.kf_group_bits * - (gf_group_err / (double)cpi->twopass.kf_group_error_left)); - } - else - group_bits = 0; - - /* Boost for arf frame */ -#if NEW_BOOST - Boost = (alt_boost * GFQ_ADJUSTMENT) / 100; -#else - Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100); -#endif - Boost += (i * 50); - - /* Set max and minimum boost and hence minimum allocation */ - if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) - Boost = ((cpi->baseline_gf_interval + 1) * 200); - else if (Boost < 125) - Boost = 125; - - allocation_chunks = (i * 100) + Boost; - - /* Normalize Altboost and allocations chunck down to prevent overflow */ - while (Boost > 1000) - { - Boost /= 2; - allocation_chunks /= 2; - } - - /* Calculate the number of bits to be spent on the arf based on the - * boost number - */ - arf_frame_bits = (int)((double)Boost * (group_bits / - (double)allocation_chunks)); - - /* Estimate if there are enough bits available to make worthwhile use - * of an arf. - */ - tmp_q = estimate_q(cpi, mod_frame_err, (int)arf_frame_bits); - - /* Only use an arf if it is likely we will be able to code - * it at a lower Q than the surrounding frames. - */ - if (tmp_q < cpi->worst_quality) - { - int half_gf_int; - int frames_after_arf; - int frames_bwd = cpi->oxcf.arnr_max_frames - 1; - int frames_fwd = cpi->oxcf.arnr_max_frames - 1; - - cpi->source_alt_ref_pending = 1; - - /* - * For alt ref frames the error score for the end frame of the - * group (the alt ref frame) should not contribute to the group - * total and hence the number of bit allocated to the group. - * Rather it forms part of the next group (it is the GF at the - * start of the next group) - * gf_group_err -= mod_frame_err; - * - * For alt ref frames alt ref frame is technically part of the - * GF frame for the next group but we always base the error - * calculation and bit allocation on the current group of frames. - * - * Set the interval till the next gf or arf. - * For ARFs this is the number of frames to be coded before the - * future frame that is coded as an ARF. - * The future frame itself is part of the next group - */ - cpi->baseline_gf_interval = i; - - /* - * Define the arnr filter width for this group of frames: - * We only filter frames that lie within a distance of half - * the GF interval from the ARF frame. We also have to trap - * cases where the filter extends beyond the end of clip. - * Note: this_frame->frame has been updated in the loop - * so it now points at the ARF frame. - */ - half_gf_int = cpi->baseline_gf_interval >> 1; - frames_after_arf = (int)(cpi->twopass.total_stats.count - - this_frame->frame - 1); - - switch (cpi->oxcf.arnr_type) - { - case 1: /* Backward filter */ - frames_fwd = 0; - if (frames_bwd > half_gf_int) - frames_bwd = half_gf_int; - break; - - case 2: /* Forward filter */ - if (frames_fwd > half_gf_int) - frames_fwd = half_gf_int; - if (frames_fwd > frames_after_arf) - frames_fwd = frames_after_arf; - frames_bwd = 0; - break; - - case 3: /* Centered filter */ - default: - frames_fwd >>= 1; - if (frames_fwd > frames_after_arf) - frames_fwd = frames_after_arf; - if (frames_fwd > half_gf_int) - frames_fwd = half_gf_int; - - frames_bwd = frames_fwd; - - /* For even length filter there is one more frame backward - * than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff. - */ - if (frames_bwd < half_gf_int) - frames_bwd += (cpi->oxcf.arnr_max_frames+1) & 0x1; - break; - } - - cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd; - } - else - { - cpi->source_alt_ref_pending = 0; - cpi->baseline_gf_interval = i; - } - } - else - { - cpi->source_alt_ref_pending = 0; - cpi->baseline_gf_interval = i; - } - - /* - * Now decide how many bits should be allocated to the GF group as a - * proportion of those remaining in the kf group. - * The final key frame group in the clip is treated as a special case - * where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left. - * This is also important for short clips where there may only be one - * key frame. - */ - if (cpi->twopass.frames_to_key >= (int)(cpi->twopass.total_stats.count - - cpi->common.current_video_frame)) - { - cpi->twopass.kf_group_bits = - (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0; - } - - /* Calculate the bits to be allocated to the group as a whole */ + /* Estimate the bits to be allocated to the group as a whole */ if ((cpi->twopass.kf_group_bits > 0) && - (cpi->twopass.kf_group_error_left > 0)) - { - cpi->twopass.gf_group_bits = - (int64_t)(cpi->twopass.kf_group_bits * - (gf_group_err / cpi->twopass.kf_group_error_left)); + (cpi->twopass.kf_group_error_left > 0)) { + group_bits = + (int)((double)cpi->twopass.kf_group_bits * + (gf_group_err / (double)cpi->twopass.kf_group_error_left)); + } else { + group_bits = 0; } - else - cpi->twopass.gf_group_bits = 0; - cpi->twopass.gf_group_bits = - (cpi->twopass.gf_group_bits < 0) - ? 0 - : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits) - ? cpi->twopass.kf_group_bits : cpi->twopass.gf_group_bits; +/* Boost for arf frame */ +#if NEW_BOOST + Boost = (alt_boost * GFQ_ADJUSTMENT) / 100; +#else + Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100); +#endif + Boost += (i * 50); - /* Clip cpi->twopass.gf_group_bits based on user supplied data rate - * variability limit (cpi->oxcf.two_pass_vbrmax_section) + /* Set max and minimum boost and hence minimum allocation */ + if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) { + Boost = ((cpi->baseline_gf_interval + 1) * 200); + } else if (Boost < 125) { + Boost = 125; + } + + allocation_chunks = (i * 100) + Boost; + + /* Normalize Altboost and allocations chunck down to prevent overflow */ + while (Boost > 1000) { + Boost /= 2; + allocation_chunks /= 2; + } + + /* Calculate the number of bits to be spent on the arf based on the + * boost number */ - if (cpi->twopass.gf_group_bits > - (int64_t)max_bits * cpi->baseline_gf_interval) - cpi->twopass.gf_group_bits = - (int64_t)max_bits * cpi->baseline_gf_interval; + arf_frame_bits = + (int)((double)Boost * (group_bits / (double)allocation_chunks)); - /* Reset the file position */ + /* Estimate if there are enough bits available to make worthwhile use + * of an arf. + */ + tmp_q = estimate_q(cpi, mod_frame_err, (int)arf_frame_bits); + + /* Only use an arf if it is likely we will be able to code + * it at a lower Q than the surrounding frames. + */ + if (tmp_q < cpi->worst_quality) { + int half_gf_int; + int frames_after_arf; + int frames_bwd = cpi->oxcf.arnr_max_frames - 1; + int frames_fwd = cpi->oxcf.arnr_max_frames - 1; + + cpi->source_alt_ref_pending = 1; + + /* + * For alt ref frames the error score for the end frame of the + * group (the alt ref frame) should not contribute to the group + * total and hence the number of bit allocated to the group. + * Rather it forms part of the next group (it is the GF at the + * start of the next group) + * gf_group_err -= mod_frame_err; + * + * For alt ref frames alt ref frame is technically part of the + * GF frame for the next group but we always base the error + * calculation and bit allocation on the current group of frames. + * + * Set the interval till the next gf or arf. + * For ARFs this is the number of frames to be coded before the + * future frame that is coded as an ARF. + * The future frame itself is part of the next group + */ + cpi->baseline_gf_interval = i; + + /* + * Define the arnr filter width for this group of frames: + * We only filter frames that lie within a distance of half + * the GF interval from the ARF frame. We also have to trap + * cases where the filter extends beyond the end of clip. + * Note: this_frame->frame has been updated in the loop + * so it now points at the ARF frame. + */ + half_gf_int = cpi->baseline_gf_interval >> 1; + frames_after_arf = + (int)(cpi->twopass.total_stats.count - this_frame->frame - 1); + + switch (cpi->oxcf.arnr_type) { + case 1: /* Backward filter */ + frames_fwd = 0; + if (frames_bwd > half_gf_int) frames_bwd = half_gf_int; + break; + + case 2: /* Forward filter */ + if (frames_fwd > half_gf_int) frames_fwd = half_gf_int; + if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf; + frames_bwd = 0; + break; + + case 3: /* Centered filter */ + default: + frames_fwd >>= 1; + if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf; + if (frames_fwd > half_gf_int) frames_fwd = half_gf_int; + + frames_bwd = frames_fwd; + + /* For even length filter there is one more frame backward + * than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff. + */ + if (frames_bwd < half_gf_int) { + frames_bwd += (cpi->oxcf.arnr_max_frames + 1) & 0x1; + } + break; + } + + cpi->active_arnr_frames = frames_bwd + 1 + frames_fwd; + } else { + cpi->source_alt_ref_pending = 0; + cpi->baseline_gf_interval = i; + } + } else { + cpi->source_alt_ref_pending = 0; + cpi->baseline_gf_interval = i; + } + + /* + * Now decide how many bits should be allocated to the GF group as a + * proportion of those remaining in the kf group. + * The final key frame group in the clip is treated as a special case + * where cpi->twopass.kf_group_bits is tied to cpi->twopass.bits_left. + * This is also important for short clips where there may only be one + * key frame. + */ + if (cpi->twopass.frames_to_key >= + (int)(cpi->twopass.total_stats.count - cpi->common.current_video_frame)) { + cpi->twopass.kf_group_bits = + (cpi->twopass.bits_left > 0) ? cpi->twopass.bits_left : 0; + } + + /* Calculate the bits to be allocated to the group as a whole */ + if ((cpi->twopass.kf_group_bits > 0) && + (cpi->twopass.kf_group_error_left > 0)) { + cpi->twopass.gf_group_bits = + (int64_t)(cpi->twopass.kf_group_bits * + (gf_group_err / cpi->twopass.kf_group_error_left)); + } else { + cpi->twopass.gf_group_bits = 0; + } + + cpi->twopass.gf_group_bits = + (cpi->twopass.gf_group_bits < 0) + ? 0 + : (cpi->twopass.gf_group_bits > cpi->twopass.kf_group_bits) + ? cpi->twopass.kf_group_bits + : cpi->twopass.gf_group_bits; + + /* Clip cpi->twopass.gf_group_bits based on user supplied data rate + * variability limit (cpi->oxcf.two_pass_vbrmax_section) + */ + if (cpi->twopass.gf_group_bits > + (int64_t)max_bits * cpi->baseline_gf_interval) { + cpi->twopass.gf_group_bits = (int64_t)max_bits * cpi->baseline_gf_interval; + } + + /* Reset the file position */ + reset_fpf_position(cpi, start_pos); + + /* Update the record of error used so far (only done once per gf group) */ + cpi->twopass.modified_error_used += gf_group_err; + + /* Assign bits to the arf or gf. */ + for (i = 0; i <= (cpi->source_alt_ref_pending && + cpi->common.frame_type != KEY_FRAME); + i++) { + int Boost; + int allocation_chunks; + int Q = + (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; + int gf_bits; + + /* For ARF frames */ + if (cpi->source_alt_ref_pending && i == 0) { +#if NEW_BOOST + Boost = (alt_boost * GFQ_ADJUSTMENT) / 100; +#else + Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100); +#endif + Boost += (cpi->baseline_gf_interval * 50); + + /* Set max and minimum boost and hence minimum allocation */ + if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) { + Boost = ((cpi->baseline_gf_interval + 1) * 200); + } else if (Boost < 125) { + Boost = 125; + } + + allocation_chunks = ((cpi->baseline_gf_interval + 1) * 100) + Boost; + } + /* Else for standard golden frames */ + else { + /* boost based on inter / intra ratio of subsequent frames */ + Boost = (cpi->gfu_boost * GFQ_ADJUSTMENT) / 100; + + /* Set max and minimum boost and hence minimum allocation */ + if (Boost > (cpi->baseline_gf_interval * 150)) { + Boost = (cpi->baseline_gf_interval * 150); + } else if (Boost < 125) { + Boost = 125; + } + + allocation_chunks = (cpi->baseline_gf_interval * 100) + (Boost - 100); + } + + /* Normalize Altboost and allocations chunck down to prevent overflow */ + while (Boost > 1000) { + Boost /= 2; + allocation_chunks /= 2; + } + + /* Calculate the number of bits to be spent on the gf or arf based on + * the boost number + */ + gf_bits = (int)((double)Boost * + (cpi->twopass.gf_group_bits / (double)allocation_chunks)); + + /* If the frame that is to be boosted is simpler than the average for + * the gf/arf group then use an alternative calculation + * based on the error score of the frame itself + */ + if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) { + double alt_gf_grp_bits; + int alt_gf_bits; + + alt_gf_grp_bits = + (double)cpi->twopass.kf_group_bits * + (mod_frame_err * (double)cpi->baseline_gf_interval) / + DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left); + + alt_gf_bits = + (int)((double)Boost * (alt_gf_grp_bits / (double)allocation_chunks)); + + if (gf_bits > alt_gf_bits) { + gf_bits = alt_gf_bits; + } + } + /* Else if it is harder than other frames in the group make sure it at + * least receives an allocation in keeping with its relative error + * score, otherwise it may be worse off than an "un-boosted" frame + */ + else { + int alt_gf_bits = + (int)((double)cpi->twopass.kf_group_bits * mod_frame_err / + DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left)); + + if (alt_gf_bits > gf_bits) { + gf_bits = alt_gf_bits; + } + } + + /* Apply an additional limit for CBR */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + if (cpi->twopass.gf_bits > (int)(cpi->buffer_level >> 1)) { + cpi->twopass.gf_bits = (int)(cpi->buffer_level >> 1); + } + } + + /* Dont allow a negative value for gf_bits */ + if (gf_bits < 0) gf_bits = 0; + + /* Add in minimum for a frame */ + gf_bits += cpi->min_frame_bandwidth; + + if (i == 0) { + cpi->twopass.gf_bits = gf_bits; + } + if (i == 1 || (!cpi->source_alt_ref_pending && + (cpi->common.frame_type != KEY_FRAME))) { + /* Per frame bit target for this frame */ + cpi->per_frame_bandwidth = gf_bits; + } + } + + { + /* Adjust KF group bits and error remainin */ + cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err; + cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits; + + if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0; + + /* Note the error score left in the remaining frames of the group. + * For normal GFs we want to remove the error score for the first + * frame of the group (except in Key frame case where this has + * already happened) + */ + if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME) { + cpi->twopass.gf_group_error_left = + (int)(gf_group_err - gf_first_frame_err); + } else { + cpi->twopass.gf_group_error_left = (int)gf_group_err; + } + + cpi->twopass.gf_group_bits -= + cpi->twopass.gf_bits - cpi->min_frame_bandwidth; + + if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0; + + /* This condition could fail if there are two kfs very close together + * despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the + * calculation of cpi->twopass.alt_extra_bits. + */ + if (cpi->baseline_gf_interval >= 3) { +#if NEW_BOOST + int boost = (cpi->source_alt_ref_pending) ? b_boost : cpi->gfu_boost; +#else + int boost = cpi->gfu_boost; +#endif + if (boost >= 150) { + int pct_extra; + + pct_extra = (boost - 100) / 50; + pct_extra = (pct_extra > 20) ? 20 : pct_extra; + + cpi->twopass.alt_extra_bits = + (int)(cpi->twopass.gf_group_bits * pct_extra) / 100; + cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits; + cpi->twopass.alt_extra_bits /= ((cpi->baseline_gf_interval - 1) >> 1); + } else { + cpi->twopass.alt_extra_bits = 0; + } + } else { + cpi->twopass.alt_extra_bits = 0; + } + } + + /* Adjustments based on a measure of complexity of the section */ + if (cpi->common.frame_type != KEY_FRAME) { + FIRSTPASS_STATS sectionstats; + double Ratio; + + zero_stats(§ionstats); reset_fpf_position(cpi, start_pos); - /* Update the record of error used so far (only done once per gf group) */ - cpi->twopass.modified_error_used += gf_group_err; - - /* Assign bits to the arf or gf. */ - for (i = 0; i <= (cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME); i++) { - int Boost; - int allocation_chunks; - int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; - int gf_bits; - - /* For ARF frames */ - if (cpi->source_alt_ref_pending && i == 0) - { -#if NEW_BOOST - Boost = (alt_boost * GFQ_ADJUSTMENT) / 100; -#else - Boost = (cpi->gfu_boost * 3 * GFQ_ADJUSTMENT) / (2 * 100); -#endif - Boost += (cpi->baseline_gf_interval * 50); - - /* Set max and minimum boost and hence minimum allocation */ - if (Boost > ((cpi->baseline_gf_interval + 1) * 200)) - Boost = ((cpi->baseline_gf_interval + 1) * 200); - else if (Boost < 125) - Boost = 125; - - allocation_chunks = - ((cpi->baseline_gf_interval + 1) * 100) + Boost; - } - /* Else for standard golden frames */ - else - { - /* boost based on inter / intra ratio of subsequent frames */ - Boost = (cpi->gfu_boost * GFQ_ADJUSTMENT) / 100; - - /* Set max and minimum boost and hence minimum allocation */ - if (Boost > (cpi->baseline_gf_interval * 150)) - Boost = (cpi->baseline_gf_interval * 150); - else if (Boost < 125) - Boost = 125; - - allocation_chunks = - (cpi->baseline_gf_interval * 100) + (Boost - 100); - } - - /* Normalize Altboost and allocations chunck down to prevent overflow */ - while (Boost > 1000) - { - Boost /= 2; - allocation_chunks /= 2; - } - - /* Calculate the number of bits to be spent on the gf or arf based on - * the boost number - */ - gf_bits = (int)((double)Boost * - (cpi->twopass.gf_group_bits / - (double)allocation_chunks)); - - /* If the frame that is to be boosted is simpler than the average for - * the gf/arf group then use an alternative calculation - * based on the error score of the frame itself - */ - if (mod_frame_err < gf_group_err / (double)cpi->baseline_gf_interval) - { - double alt_gf_grp_bits; - int alt_gf_bits; - - alt_gf_grp_bits = - (double)cpi->twopass.kf_group_bits * - (mod_frame_err * (double)cpi->baseline_gf_interval) / - DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left); - - alt_gf_bits = (int)((double)Boost * (alt_gf_grp_bits / - (double)allocation_chunks)); - - if (gf_bits > alt_gf_bits) - { - gf_bits = alt_gf_bits; - } - } - /* Else if it is harder than other frames in the group make sure it at - * least receives an allocation in keeping with its relative error - * score, otherwise it may be worse off than an "un-boosted" frame - */ - else - { - int alt_gf_bits = - (int)((double)cpi->twopass.kf_group_bits * - mod_frame_err / - DOUBLE_DIVIDE_CHECK((double)cpi->twopass.kf_group_error_left)); - - if (alt_gf_bits > gf_bits) - { - gf_bits = alt_gf_bits; - } - } - - /* Apply an additional limit for CBR */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - if (cpi->twopass.gf_bits > (int)(cpi->buffer_level >> 1)) - cpi->twopass.gf_bits = (int)(cpi->buffer_level >> 1); - } - - /* Dont allow a negative value for gf_bits */ - if (gf_bits < 0) - gf_bits = 0; - - /* Add in minimum for a frame */ - gf_bits += cpi->min_frame_bandwidth; - - if (i == 0) - { - cpi->twopass.gf_bits = gf_bits; - } - if (i == 1 || (!cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME))) - { - /* Per frame bit target for this frame */ - cpi->per_frame_bandwidth = gf_bits; - } + for (i = 0; i < cpi->baseline_gf_interval; ++i) { + input_stats(cpi, &next_frame); + accumulate_stats(§ionstats, &next_frame); } - { - /* Adjust KF group bits and error remainin */ - cpi->twopass.kf_group_error_left -= (int64_t)gf_group_err; - cpi->twopass.kf_group_bits -= cpi->twopass.gf_group_bits; + avg_stats(§ionstats); - if (cpi->twopass.kf_group_bits < 0) - cpi->twopass.kf_group_bits = 0; + cpi->twopass.section_intra_rating = + (unsigned int)(sectionstats.intra_error / + DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); - /* Note the error score left in the remaining frames of the group. - * For normal GFs we want to remove the error score for the first - * frame of the group (except in Key frame case where this has - * already happened) - */ - if (!cpi->source_alt_ref_pending && cpi->common.frame_type != KEY_FRAME) - cpi->twopass.gf_group_error_left = (int)(gf_group_err - - gf_first_frame_err); - else - cpi->twopass.gf_group_error_left = (int) gf_group_err; + Ratio = sectionstats.intra_error / + DOUBLE_DIVIDE_CHECK(sectionstats.coded_error); + cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025); - cpi->twopass.gf_group_bits -= cpi->twopass.gf_bits - cpi->min_frame_bandwidth; - - if (cpi->twopass.gf_group_bits < 0) - cpi->twopass.gf_group_bits = 0; - - /* This condition could fail if there are two kfs very close together - * despite (MIN_GF_INTERVAL) and would cause a devide by 0 in the - * calculation of cpi->twopass.alt_extra_bits. - */ - if ( cpi->baseline_gf_interval >= 3 ) - { -#if NEW_BOOST - int boost = (cpi->source_alt_ref_pending) - ? b_boost : cpi->gfu_boost; -#else - int boost = cpi->gfu_boost; -#endif - if ( boost >= 150 ) - { - int pct_extra; - - pct_extra = (boost - 100) / 50; - pct_extra = (pct_extra > 20) ? 20 : pct_extra; - - cpi->twopass.alt_extra_bits = - (int)(cpi->twopass.gf_group_bits * pct_extra) / 100; - cpi->twopass.gf_group_bits -= cpi->twopass.alt_extra_bits; - cpi->twopass.alt_extra_bits /= - ((cpi->baseline_gf_interval-1)>>1); - } - else - cpi->twopass.alt_extra_bits = 0; - } - else - cpi->twopass.alt_extra_bits = 0; + if (cpi->twopass.section_max_qfactor < 0.80) { + cpi->twopass.section_max_qfactor = 0.80; } - /* Adjustments based on a measure of complexity of the section */ - if (cpi->common.frame_type != KEY_FRAME) - { - FIRSTPASS_STATS sectionstats; - double Ratio; - - zero_stats(§ionstats); - reset_fpf_position(cpi, start_pos); - - for (i = 0 ; i < cpi->baseline_gf_interval ; i++) - { - input_stats(cpi, &next_frame); - accumulate_stats(§ionstats, &next_frame); - } - - avg_stats(§ionstats); - - cpi->twopass.section_intra_rating = (unsigned int) - (sectionstats.intra_error / - DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); - - Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error); - cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025); - - if (cpi->twopass.section_max_qfactor < 0.80) - cpi->twopass.section_max_qfactor = 0.80; - - reset_fpf_position(cpi, start_pos); - } + reset_fpf_position(cpi, start_pos); + } } -/* Allocate bits to a normal frame that is neither a gf an arf or a key frame. */ -static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) -{ - int target_frame_size; +/* Allocate bits to a normal frame that is neither a gf an arf or a key frame. + */ +static void assign_std_frame_bits(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) { + int target_frame_size; - double modified_err; - double err_fraction; + double modified_err; + double err_fraction; - int max_bits = frame_max_bits(cpi); /* Max for a single frame */ + int max_bits = frame_max_bits(cpi); /* Max for a single frame */ - /* Calculate modified prediction error used in bit allocation */ - modified_err = calculate_modified_err(cpi, this_frame); + /* Calculate modified prediction error used in bit allocation */ + modified_err = calculate_modified_err(cpi, this_frame); - /* What portion of the remaining GF group error is used by this frame */ - if (cpi->twopass.gf_group_error_left > 0) - err_fraction = modified_err / cpi->twopass.gf_group_error_left; - else - err_fraction = 0.0; + /* What portion of the remaining GF group error is used by this frame */ + if (cpi->twopass.gf_group_error_left > 0) { + err_fraction = modified_err / cpi->twopass.gf_group_error_left; + } else { + err_fraction = 0.0; + } - /* How many of those bits available for allocation should we give it? */ - target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); + /* How many of those bits available for allocation should we give it? */ + target_frame_size = (int)((double)cpi->twopass.gf_group_bits * err_fraction); - /* Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) - * at the top end. + /* Clip to target size to 0 - max_bits (or cpi->twopass.gf_group_bits) + * at the top end. + */ + if (target_frame_size < 0) { + target_frame_size = 0; + } else { + if (target_frame_size > max_bits) target_frame_size = max_bits; + + if (target_frame_size > cpi->twopass.gf_group_bits) { + target_frame_size = (int)cpi->twopass.gf_group_bits; + } + } + + /* Adjust error and bits remaining */ + cpi->twopass.gf_group_error_left -= (int)modified_err; + cpi->twopass.gf_group_bits -= target_frame_size; + + if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0; + + /* Add in the minimum number of bits that is set aside for every frame. */ + target_frame_size += cpi->min_frame_bandwidth; + + /* Every other frame gets a few extra bits */ + if ((cpi->frames_since_golden & 0x01) && + (cpi->frames_till_gf_update_due > 0)) { + target_frame_size += cpi->twopass.alt_extra_bits; + } + + /* Per frame bit target for this frame */ + cpi->per_frame_bandwidth = target_frame_size; +} + +void vp8_second_pass(VP8_COMP *cpi) { + int tmp_q; + int frames_left = + (int)(cpi->twopass.total_stats.count - cpi->common.current_video_frame); + + FIRSTPASS_STATS this_frame; + FIRSTPASS_STATS this_frame_copy; + + double this_frame_intra_error; + double this_frame_coded_error; + + int overhead_bits; + + vp8_zero(this_frame); + + if (!cpi->twopass.stats_in) { + return; + } + + vp8_clear_system_state(); + + if (EOF == input_stats(cpi, &this_frame)) return; + + this_frame_intra_error = this_frame.intra_error; + this_frame_coded_error = this_frame.coded_error; + + /* keyframe and section processing ! */ + if (cpi->twopass.frames_to_key == 0) { + /* Define next KF group and assign bits to it */ + memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); + find_next_key_frame(cpi, &this_frame_copy); + + /* Special case: Error error_resilient_mode mode does not make much + * sense for two pass but with its current meaning this code is + * designed to stop outlandish behaviour if someone does set it when + * using two pass. It effectively disables GF groups. This is + * temporary code until we decide what should really happen in this + * case. */ - if (target_frame_size < 0) - target_frame_size = 0; - else - { - if (target_frame_size > max_bits) - target_frame_size = max_bits; - - if (target_frame_size > cpi->twopass.gf_group_bits) - target_frame_size = (int)cpi->twopass.gf_group_bits; + if (cpi->oxcf.error_resilient_mode) { + cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits; + cpi->twopass.gf_group_error_left = (int)cpi->twopass.kf_group_error_left; + cpi->baseline_gf_interval = cpi->twopass.frames_to_key; + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; + cpi->source_alt_ref_pending = 0; } + } - /* Adjust error and bits remaining */ - cpi->twopass.gf_group_error_left -= (int)modified_err; - cpi->twopass.gf_group_bits -= target_frame_size; + /* Is this a GF / ARF (Note that a KF is always also a GF) */ + if (cpi->frames_till_gf_update_due == 0) { + /* Define next gf group and assign bits to it */ + memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); + define_gf_group(cpi, &this_frame_copy); - if (cpi->twopass.gf_group_bits < 0) - cpi->twopass.gf_group_bits = 0; - - /* Add in the minimum number of bits that is set aside for every frame. */ - target_frame_size += cpi->min_frame_bandwidth; - - /* Every other frame gets a few extra bits */ - if ( (cpi->frames_since_golden & 0x01) && - (cpi->frames_till_gf_update_due > 0) ) - { - target_frame_size += cpi->twopass.alt_extra_bits; + /* If we are going to code an altref frame at the end of the group + * and the current frame is not a key frame.... If the previous + * group used an arf this frame has already benefited from that arf + * boost and it should not be given extra bits If the previous + * group was NOT coded using arf we may want to apply some boost to + * this GF as well + */ + if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) { + /* Assign a standard frames worth of bits from those allocated + * to the GF group + */ + int bak = cpi->per_frame_bandwidth; + memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); + assign_std_frame_bits(cpi, &this_frame_copy); + cpi->per_frame_bandwidth = bak; } + } - /* Per frame bit target for this frame */ - cpi->per_frame_bandwidth = target_frame_size; -} + /* Otherwise this is an ordinary frame */ + else { + /* Special case: Error error_resilient_mode mode does not make much + * sense for two pass but with its current meaning but this code is + * designed to stop outlandish behaviour if someone does set it + * when using two pass. It effectively disables GF groups. This is + * temporary code till we decide what should really happen in this + * case. + */ + if (cpi->oxcf.error_resilient_mode) { + cpi->frames_till_gf_update_due = cpi->twopass.frames_to_key; -void vp8_second_pass(VP8_COMP *cpi) -{ - int tmp_q; - int frames_left = (int)(cpi->twopass.total_stats.count - cpi->common.current_video_frame); - - FIRSTPASS_STATS this_frame = {0}; - FIRSTPASS_STATS this_frame_copy; - - double this_frame_intra_error; - double this_frame_coded_error; - - int overhead_bits; - - if (!cpi->twopass.stats_in) - { - return ; - } - - vp8_clear_system_state(); - - if (EOF == input_stats(cpi, &this_frame)) - return; - - this_frame_intra_error = this_frame.intra_error; - this_frame_coded_error = this_frame.coded_error; - - /* keyframe and section processing ! */ - if (cpi->twopass.frames_to_key == 0) - { - /* Define next KF group and assign bits to it */ + if (cpi->common.frame_type != KEY_FRAME) { + /* Assign bits from those allocated to the GF group */ memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); - find_next_key_frame(cpi, &this_frame_copy); - - /* Special case: Error error_resilient_mode mode does not make much - * sense for two pass but with its current meaning this code is - * designed to stop outlandish behaviour if someone does set it when - * using two pass. It effectively disables GF groups. This is - * temporary code until we decide what should really happen in this - * case. - */ - if (cpi->oxcf.error_resilient_mode) - { - cpi->twopass.gf_group_bits = cpi->twopass.kf_group_bits; - cpi->twopass.gf_group_error_left = - (int)cpi->twopass.kf_group_error_left; - cpi->baseline_gf_interval = cpi->twopass.frames_to_key; - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - cpi->source_alt_ref_pending = 0; - } - + assign_std_frame_bits(cpi, &this_frame_copy); + } + } else { + /* Assign bits from those allocated to the GF group */ + memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); + assign_std_frame_bits(cpi, &this_frame_copy); } + } - /* Is this a GF / ARF (Note that a KF is always also a GF) */ - if (cpi->frames_till_gf_update_due == 0) - { - /* Define next gf group and assign bits to it */ - memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); - define_gf_group(cpi, &this_frame_copy); - - /* If we are going to code an altref frame at the end of the group - * and the current frame is not a key frame.... If the previous - * group used an arf this frame has already benefited from that arf - * boost and it should not be given extra bits If the previous - * group was NOT coded using arf we may want to apply some boost to - * this GF as well - */ - if (cpi->source_alt_ref_pending && (cpi->common.frame_type != KEY_FRAME)) - { - /* Assign a standard frames worth of bits from those allocated - * to the GF group - */ - int bak = cpi->per_frame_bandwidth; - memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); - assign_std_frame_bits(cpi, &this_frame_copy); - cpi->per_frame_bandwidth = bak; - } - } - - /* Otherwise this is an ordinary frame */ - else - { - /* Special case: Error error_resilient_mode mode does not make much - * sense for two pass but with its current meaning but this code is - * designed to stop outlandish behaviour if someone does set it - * when using two pass. It effectively disables GF groups. This is - * temporary code till we decide what should really happen in this - * case. - */ - if (cpi->oxcf.error_resilient_mode) - { - cpi->frames_till_gf_update_due = cpi->twopass.frames_to_key; - - if (cpi->common.frame_type != KEY_FRAME) - { - /* Assign bits from those allocated to the GF group */ - memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); - assign_std_frame_bits(cpi, &this_frame_copy); - } - } - else - { - /* Assign bits from those allocated to the GF group */ - memcpy(&this_frame_copy, &this_frame, sizeof(this_frame)); - assign_std_frame_bits(cpi, &this_frame_copy); - } - } - - /* Keep a globally available copy of this and the next frame's iiratio. */ - cpi->twopass.this_iiratio = (unsigned int)(this_frame_intra_error / - DOUBLE_DIVIDE_CHECK(this_frame_coded_error)); - { - FIRSTPASS_STATS next_frame; - if ( lookup_next_frame_stats(cpi, &next_frame) != EOF ) - { - cpi->twopass.next_iiratio = (unsigned int)(next_frame.intra_error / - DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); - } - } - - /* Set nominal per second bandwidth for this frame */ - cpi->target_bandwidth = (int) - (cpi->per_frame_bandwidth * cpi->output_framerate); - if (cpi->target_bandwidth < 0) - cpi->target_bandwidth = 0; - - - /* Account for mv, mode and other overheads. */ - overhead_bits = (int)estimate_modemvcost( - cpi, &cpi->twopass.total_left_stats ); - - /* Special case code for first frame. */ - if (cpi->common.current_video_frame == 0) - { - cpi->twopass.est_max_qcorrection_factor = 1.0; - - /* Set a cq_level in constrained quality mode. */ - if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY ) - { - int est_cq; - - est_cq = - estimate_cq( cpi, - &cpi->twopass.total_left_stats, - (int)(cpi->twopass.bits_left / frames_left), - overhead_bits ); - - cpi->cq_target_quality = cpi->oxcf.cq_level; - if ( est_cq > cpi->cq_target_quality ) - cpi->cq_target_quality = est_cq; - } - - /* guess at maxq needed in 2nd pass */ - cpi->twopass.maxq_max_limit = cpi->worst_quality; - cpi->twopass.maxq_min_limit = cpi->best_quality; - - tmp_q = estimate_max_q( - cpi, - &cpi->twopass.total_left_stats, - (int)(cpi->twopass.bits_left / frames_left), - overhead_bits ); - - /* Limit the maxq value returned subsequently. - * This increases the risk of overspend or underspend if the initial - * estimate for the clip is bad, but helps prevent excessive - * variation in Q, especially near the end of a clip - * where for example a small overspend may cause Q to crash - */ - cpi->twopass.maxq_max_limit = ((tmp_q + 32) < cpi->worst_quality) - ? (tmp_q + 32) : cpi->worst_quality; - cpi->twopass.maxq_min_limit = ((tmp_q - 32) > cpi->best_quality) - ? (tmp_q - 32) : cpi->best_quality; - - cpi->active_worst_quality = tmp_q; - cpi->ni_av_qi = tmp_q; - } - - /* The last few frames of a clip almost always have to few or too many - * bits and for the sake of over exact rate control we dont want to make - * radical adjustments to the allowed quantizer range just to use up a - * few surplus bits or get beneath the target rate. - */ - else if ( (cpi->common.current_video_frame < - (((unsigned int)cpi->twopass.total_stats.count * 255)>>8)) && - ((cpi->common.current_video_frame + cpi->baseline_gf_interval) < - (unsigned int)cpi->twopass.total_stats.count) ) - { - if (frames_left < 1) - frames_left = 1; - - tmp_q = estimate_max_q( - cpi, - &cpi->twopass.total_left_stats, - (int)(cpi->twopass.bits_left / frames_left), - overhead_bits ); - - /* Move active_worst_quality but in a damped way */ - if (tmp_q > cpi->active_worst_quality) - cpi->active_worst_quality ++; - else if (tmp_q < cpi->active_worst_quality) - cpi->active_worst_quality --; - - cpi->active_worst_quality = - ((cpi->active_worst_quality * 3) + tmp_q + 2) / 4; - } - - cpi->twopass.frames_to_key --; - - /* Update the total stats remaining sturcture */ - subtract_stats(&cpi->twopass.total_left_stats, &this_frame ); -} - - -static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, FIRSTPASS_STATS *this_frame, FIRSTPASS_STATS *next_frame) -{ - int is_viable_kf = 0; - - /* Does the frame satisfy the primary criteria of a key frame - * If so, then examine how well it predicts subsequent frames - */ - if ((this_frame->pcnt_second_ref < 0.10) && - (next_frame->pcnt_second_ref < 0.10) && - ((this_frame->pcnt_inter < 0.05) || - ( - ((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .25) && - ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) && - ((fabs(last_frame->coded_error - this_frame->coded_error) / DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > .40) || - (fabs(last_frame->intra_error - this_frame->intra_error) / DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > .40) || - ((next_frame->intra_error / DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5) - ) - ) - ) - ) - { - int i; - FIRSTPASS_STATS *start_pos; - - FIRSTPASS_STATS local_next_frame; - - double boost_score = 0.0; - double old_boost_score = 0.0; - double decay_accumulator = 1.0; - double next_iiratio; - - memcpy(&local_next_frame, next_frame, sizeof(*next_frame)); - - /* Note the starting file position so we can reset to it */ - start_pos = cpi->twopass.stats_in; - - /* Examine how well the key frame predicts subsequent frames */ - for (i = 0 ; i < 16; i++) - { - next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)) ; - - if (next_iiratio > RMAX) - next_iiratio = RMAX; - - /* Cumulative effect of decay in prediction quality */ - if (local_next_frame.pcnt_inter > 0.85) - decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter; - else - decay_accumulator = decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0); - - /* Keep a running total */ - boost_score += (decay_accumulator * next_iiratio); - - /* Test various breakout clauses */ - if ((local_next_frame.pcnt_inter < 0.05) || - (next_iiratio < 1.5) || - (((local_next_frame.pcnt_inter - - local_next_frame.pcnt_neutral) < 0.20) && - (next_iiratio < 3.0)) || - ((boost_score - old_boost_score) < 0.5) || - (local_next_frame.intra_error < 200) - ) - { - break; - } - - old_boost_score = boost_score; - - /* Get the next frame details */ - if (EOF == input_stats(cpi, &local_next_frame)) - break; - } - - /* If there is tolerable prediction for at least the next 3 frames - * then break out else discard this pottential key frame and move on - */ - if (boost_score > 5.0 && (i > 3)) - is_viable_kf = 1; - else - { - /* Reset the file position */ - reset_fpf_position(cpi, start_pos); - - is_viable_kf = 0; - } - } - - return is_viable_kf; -} -static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) -{ - int i,j; - FIRSTPASS_STATS last_frame; - FIRSTPASS_STATS first_frame; + /* Keep a globally available copy of this and the next frame's iiratio. */ + cpi->twopass.this_iiratio = + (unsigned int)(this_frame_intra_error / + DOUBLE_DIVIDE_CHECK(this_frame_coded_error)); + { FIRSTPASS_STATS next_frame; - FIRSTPASS_STATS *start_position; + if (lookup_next_frame_stats(cpi, &next_frame) != EOF) { + cpi->twopass.next_iiratio = + (unsigned int)(next_frame.intra_error / + DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); + } + } - double decay_accumulator = 1.0; - double boost_score = 0; + /* Set nominal per second bandwidth for this frame */ + cpi->target_bandwidth = + (int)(cpi->per_frame_bandwidth * cpi->output_framerate); + if (cpi->target_bandwidth < 0) cpi->target_bandwidth = 0; + + /* Account for mv, mode and other overheads. */ + overhead_bits = (int)estimate_modemvcost(cpi, &cpi->twopass.total_left_stats); + + /* Special case code for first frame. */ + if (cpi->common.current_video_frame == 0) { + cpi->twopass.est_max_qcorrection_factor = 1.0; + + /* Set a cq_level in constrained quality mode. */ + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { + int est_cq; + + est_cq = estimate_cq(cpi, &cpi->twopass.total_left_stats, + (int)(cpi->twopass.bits_left / frames_left), + overhead_bits); + + cpi->cq_target_quality = cpi->oxcf.cq_level; + if (est_cq > cpi->cq_target_quality) cpi->cq_target_quality = est_cq; + } + + /* guess at maxq needed in 2nd pass */ + cpi->twopass.maxq_max_limit = cpi->worst_quality; + cpi->twopass.maxq_min_limit = cpi->best_quality; + + tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats, + (int)(cpi->twopass.bits_left / frames_left), + overhead_bits); + + /* Limit the maxq value returned subsequently. + * This increases the risk of overspend or underspend if the initial + * estimate for the clip is bad, but helps prevent excessive + * variation in Q, especially near the end of a clip + * where for example a small overspend may cause Q to crash + */ + cpi->twopass.maxq_max_limit = + ((tmp_q + 32) < cpi->worst_quality) ? (tmp_q + 32) : cpi->worst_quality; + cpi->twopass.maxq_min_limit = + ((tmp_q - 32) > cpi->best_quality) ? (tmp_q - 32) : cpi->best_quality; + + cpi->active_worst_quality = tmp_q; + cpi->ni_av_qi = tmp_q; + } + + /* The last few frames of a clip almost always have to few or too many + * bits and for the sake of over exact rate control we dont want to make + * radical adjustments to the allowed quantizer range just to use up a + * few surplus bits or get beneath the target rate. + */ + else if ((cpi->common.current_video_frame < + (((unsigned int)cpi->twopass.total_stats.count * 255) >> 8)) && + ((cpi->common.current_video_frame + cpi->baseline_gf_interval) < + (unsigned int)cpi->twopass.total_stats.count)) { + if (frames_left < 1) frames_left = 1; + + tmp_q = estimate_max_q(cpi, &cpi->twopass.total_left_stats, + (int)(cpi->twopass.bits_left / frames_left), + overhead_bits); + + /* Move active_worst_quality but in a damped way */ + if (tmp_q > cpi->active_worst_quality) { + cpi->active_worst_quality++; + } else if (tmp_q < cpi->active_worst_quality) { + cpi->active_worst_quality--; + } + + cpi->active_worst_quality = + ((cpi->active_worst_quality * 3) + tmp_q + 2) / 4; + } + + cpi->twopass.frames_to_key--; + + /* Update the total stats remaining sturcture */ + subtract_stats(&cpi->twopass.total_left_stats, &this_frame); +} + +static int test_candidate_kf(VP8_COMP *cpi, FIRSTPASS_STATS *last_frame, + FIRSTPASS_STATS *this_frame, + FIRSTPASS_STATS *next_frame) { + int is_viable_kf = 0; + + /* Does the frame satisfy the primary criteria of a key frame + * If so, then examine how well it predicts subsequent frames + */ + if ((this_frame->pcnt_second_ref < 0.10) && + (next_frame->pcnt_second_ref < 0.10) && + ((this_frame->pcnt_inter < 0.05) || + (((this_frame->pcnt_inter - this_frame->pcnt_neutral) < .25) && + ((this_frame->intra_error / + DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < 2.5) && + ((fabs(last_frame->coded_error - this_frame->coded_error) / + DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > + .40) || + (fabs(last_frame->intra_error - this_frame->intra_error) / + DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > + .40) || + ((next_frame->intra_error / + DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > 3.5))))) { + int i; + FIRSTPASS_STATS *start_pos; + + FIRSTPASS_STATS local_next_frame; + + double boost_score = 0.0; double old_boost_score = 0.0; - double loop_decay_rate; + double decay_accumulator = 1.0; + double next_iiratio; - double kf_mod_err = 0.0; - double kf_group_err = 0.0; - double kf_group_intra_err = 0.0; - double kf_group_coded_err = 0.0; - double recent_loop_decay[8] = {1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}; + memcpy(&local_next_frame, next_frame, sizeof(*next_frame)); - memset(&next_frame, 0, sizeof(next_frame)); + /* Note the starting file position so we can reset to it */ + start_pos = cpi->twopass.stats_in; - vp8_clear_system_state(); - start_position = cpi->twopass.stats_in; + /* Examine how well the key frame predicts subsequent frames */ + for (i = 0; i < 16; ++i) { + next_iiratio = (IIKFACTOR1 * local_next_frame.intra_error / + DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)); - cpi->common.frame_type = KEY_FRAME; + if (next_iiratio > RMAX) next_iiratio = RMAX; - /* is this a forced key frame by interval */ - cpi->this_key_frame_forced = cpi->next_key_frame_forced; + /* Cumulative effect of decay in prediction quality */ + if (local_next_frame.pcnt_inter > 0.85) { + decay_accumulator = decay_accumulator * local_next_frame.pcnt_inter; + } else { + decay_accumulator = + decay_accumulator * ((0.85 + local_next_frame.pcnt_inter) / 2.0); + } - /* Clear the alt ref active flag as this can never be active on a key - * frame + /* Keep a running total */ + boost_score += (decay_accumulator * next_iiratio); + + /* Test various breakout clauses */ + if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) || + (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) < + 0.20) && + (next_iiratio < 3.0)) || + ((boost_score - old_boost_score) < 0.5) || + (local_next_frame.intra_error < 200)) { + break; + } + + old_boost_score = boost_score; + + /* Get the next frame details */ + if (EOF == input_stats(cpi, &local_next_frame)) break; + } + + /* If there is tolerable prediction for at least the next 3 frames + * then break out else discard this pottential key frame and move on */ - cpi->source_alt_ref_active = 0; + if (boost_score > 5.0 && (i > 3)) { + is_viable_kf = 1; + } else { + /* Reset the file position */ + reset_fpf_position(cpi, start_pos); - /* Kf is always a gf so clear frames till next gf counter */ - cpi->frames_till_gf_update_due = 0; + is_viable_kf = 0; + } + } - cpi->twopass.frames_to_key = 1; + return is_viable_kf; +} +static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) { + int i, j; + FIRSTPASS_STATS last_frame; + FIRSTPASS_STATS first_frame; + FIRSTPASS_STATS next_frame; + FIRSTPASS_STATS *start_position; - /* Take a copy of the initial frame details */ - memcpy(&first_frame, this_frame, sizeof(*this_frame)); + double decay_accumulator = 1.0; + double boost_score = 0; + double old_boost_score = 0.0; + double loop_decay_rate; + double kf_mod_err = 0.0; + double kf_group_err = 0.0; + double kf_group_intra_err = 0.0; + double kf_group_coded_err = 0.0; + double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 }; + + memset(&next_frame, 0, sizeof(next_frame)); + + vp8_clear_system_state(); + start_position = cpi->twopass.stats_in; + + cpi->common.frame_type = KEY_FRAME; + + /* is this a forced key frame by interval */ + cpi->this_key_frame_forced = cpi->next_key_frame_forced; + + /* Clear the alt ref active flag as this can never be active on a key + * frame + */ + cpi->source_alt_ref_active = 0; + + /* Kf is always a gf so clear frames till next gf counter */ + cpi->frames_till_gf_update_due = 0; + + cpi->twopass.frames_to_key = 1; + + /* Take a copy of the initial frame details */ + memcpy(&first_frame, this_frame, sizeof(*this_frame)); + + cpi->twopass.kf_group_bits = 0; + cpi->twopass.kf_group_error_left = 0; + + kf_mod_err = calculate_modified_err(cpi, this_frame); + + /* find the next keyframe */ + i = 0; + while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) { + /* Accumulate kf group error */ + kf_group_err += calculate_modified_err(cpi, this_frame); + + /* These figures keep intra and coded error counts for all frames + * including key frames in the group. The effect of the key frame + * itself can be subtracted out using the first_frame data + * collected above + */ + kf_group_intra_err += this_frame->intra_error; + kf_group_coded_err += this_frame->coded_error; + + /* Load the next frame's stats. */ + memcpy(&last_frame, this_frame, sizeof(*this_frame)); + input_stats(cpi, this_frame); + + /* Provided that we are not at the end of the file... */ + if (cpi->oxcf.auto_key && + lookup_next_frame_stats(cpi, &next_frame) != EOF) { + /* Normal scene cut check */ + if ((i >= MIN_GF_INTERVAL) && + test_candidate_kf(cpi, &last_frame, this_frame, &next_frame)) { + break; + } + + /* How fast is prediction quality decaying */ + loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); + + /* We want to know something about the recent past... rather than + * as used elsewhere where we are concened with decay in prediction + * quality since the last GF or KF. + */ + recent_loop_decay[i % 8] = loop_decay_rate; + decay_accumulator = 1.0; + for (j = 0; j < 8; ++j) { + decay_accumulator = decay_accumulator * recent_loop_decay[j]; + } + + /* Special check for transition or high motion followed by a + * static scene. + */ + if (detect_transition_to_still(cpi, i, + ((int)(cpi->key_frame_frequency) - (int)i), + loop_decay_rate, decay_accumulator)) { + break; + } + + /* Step on to the next frame */ + cpi->twopass.frames_to_key++; + + /* If we don't have a real key frame within the next two + * forcekeyframeevery intervals then break out of the loop. + */ + if (cpi->twopass.frames_to_key >= 2 * (int)cpi->key_frame_frequency) { + break; + } + } else { + cpi->twopass.frames_to_key++; + } + + i++; + } + + /* If there is a max kf interval set by the user we must obey it. + * We already breakout of the loop above at 2x max. + * This code centers the extra kf if the actual natural + * interval is between 1x and 2x + */ + if (cpi->oxcf.auto_key && + cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency) { + FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in; + FIRSTPASS_STATS tmp_frame; + + cpi->twopass.frames_to_key /= 2; + + /* Copy first frame details */ + memcpy(&tmp_frame, &first_frame, sizeof(first_frame)); + + /* Reset to the start of the group */ + reset_fpf_position(cpi, start_position); + + kf_group_err = 0; + kf_group_intra_err = 0; + kf_group_coded_err = 0; + + /* Rescan to get the correct error data for the forced kf group */ + for (i = 0; i < cpi->twopass.frames_to_key; ++i) { + /* Accumulate kf group errors */ + kf_group_err += calculate_modified_err(cpi, &tmp_frame); + kf_group_intra_err += tmp_frame.intra_error; + kf_group_coded_err += tmp_frame.coded_error; + + /* Load a the next frame's stats */ + input_stats(cpi, &tmp_frame); + } + + /* Reset to the start of the group */ + reset_fpf_position(cpi, current_pos); + + cpi->next_key_frame_forced = 1; + } else { + cpi->next_key_frame_forced = 0; + } + + /* Special case for the last frame of the file */ + if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) { + /* Accumulate kf group error */ + kf_group_err += calculate_modified_err(cpi, this_frame); + + /* These figures keep intra and coded error counts for all frames + * including key frames in the group. The effect of the key frame + * itself can be subtracted out using the first_frame data + * collected above + */ + kf_group_intra_err += this_frame->intra_error; + kf_group_coded_err += this_frame->coded_error; + } + + /* Calculate the number of bits that should be assigned to the kf group. */ + if ((cpi->twopass.bits_left > 0) && + (cpi->twopass.modified_error_left > 0.0)) { + /* Max for a single normal frame (not key frame) */ + int max_bits = frame_max_bits(cpi); + + /* Maximum bits for the kf group */ + int64_t max_grp_bits; + + /* Default allocation based on bits left and relative + * complexity of the section + */ + cpi->twopass.kf_group_bits = + (int64_t)(cpi->twopass.bits_left * + (kf_group_err / cpi->twopass.modified_error_left)); + + /* Clip based on maximum per frame rate defined by the user. */ + max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key; + if (cpi->twopass.kf_group_bits > max_grp_bits) { + cpi->twopass.kf_group_bits = max_grp_bits; + } + + /* Additional special case for CBR if buffer is getting full. */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + int64_t opt_buffer_lvl = cpi->oxcf.optimal_buffer_level; + int64_t buffer_lvl = cpi->buffer_level; + + /* If the buffer is near or above the optimal and this kf group is + * not being allocated much then increase the allocation a bit. + */ + if (buffer_lvl >= opt_buffer_lvl) { + int64_t high_water_mark = + (opt_buffer_lvl + cpi->oxcf.maximum_buffer_size) >> 1; + + int64_t av_group_bits; + + /* Av bits per frame * number of frames */ + av_group_bits = (int64_t)cpi->av_per_frame_bandwidth * + (int64_t)cpi->twopass.frames_to_key; + + /* We are at or above the maximum. */ + if (cpi->buffer_level >= high_water_mark) { + int64_t min_group_bits; + + min_group_bits = + av_group_bits + (int64_t)(buffer_lvl - high_water_mark); + + if (cpi->twopass.kf_group_bits < min_group_bits) { + cpi->twopass.kf_group_bits = min_group_bits; + } + } + /* We are above optimal but below the maximum */ + else if (cpi->twopass.kf_group_bits < av_group_bits) { + int64_t bits_below_av = av_group_bits - cpi->twopass.kf_group_bits; + + cpi->twopass.kf_group_bits += (int64_t)( + (double)bits_below_av * (double)(buffer_lvl - opt_buffer_lvl) / + (double)(high_water_mark - opt_buffer_lvl)); + } + } + } + } else { cpi->twopass.kf_group_bits = 0; - cpi->twopass.kf_group_error_left = 0; + } - kf_mod_err = calculate_modified_err(cpi, this_frame); + /* Reset the first pass file position */ + reset_fpf_position(cpi, start_position); - /* find the next keyframe */ - i = 0; - while (cpi->twopass.stats_in < cpi->twopass.stats_in_end) - { - /* Accumulate kf group error */ - kf_group_err += calculate_modified_err(cpi, this_frame); + /* determine how big to make this keyframe based on how well the + * subsequent frames use inter blocks + */ + decay_accumulator = 1.0; + boost_score = 0.0; - /* These figures keep intra and coded error counts for all frames - * including key frames in the group. The effect of the key frame - * itself can be subtracted out using the first_frame data - * collected above - */ - kf_group_intra_err += this_frame->intra_error; - kf_group_coded_err += this_frame->coded_error; + for (i = 0; i < cpi->twopass.frames_to_key; ++i) { + double r; - /* Load the next frame's stats. */ - memcpy(&last_frame, this_frame, sizeof(*this_frame)); - input_stats(cpi, this_frame); + if (EOF == input_stats(cpi, &next_frame)) break; - /* Provided that we are not at the end of the file... */ - if (cpi->oxcf.auto_key - && lookup_next_frame_stats(cpi, &next_frame) != EOF) - { - /* Normal scene cut check */ - if ( ( i >= MIN_GF_INTERVAL ) && - test_candidate_kf(cpi, &last_frame, this_frame, &next_frame) ) - { - break; - } - - /* How fast is prediction quality decaying */ - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - - /* We want to know something about the recent past... rather than - * as used elsewhere where we are concened with decay in prediction - * quality since the last GF or KF. - */ - recent_loop_decay[i%8] = loop_decay_rate; - decay_accumulator = 1.0; - for (j = 0; j < 8; j++) - { - decay_accumulator = decay_accumulator * recent_loop_decay[j]; - } - - /* Special check for transition or high motion followed by a - * static scene. - */ - if ( detect_transition_to_still( cpi, i, - (cpi->key_frame_frequency-i), - loop_decay_rate, - decay_accumulator ) ) - { - break; - } - - - /* Step on to the next frame */ - cpi->twopass.frames_to_key ++; - - /* If we don't have a real key frame within the next two - * forcekeyframeevery intervals then break out of the loop. - */ - if (cpi->twopass.frames_to_key >= 2 *(int)cpi->key_frame_frequency) - break; - } else - cpi->twopass.frames_to_key ++; - - i++; + if (next_frame.intra_error > cpi->twopass.kf_intra_err_min) { + r = (IIKFACTOR2 * next_frame.intra_error / + DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); + } else { + r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min / + DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); } - /* If there is a max kf interval set by the user we must obey it. - * We already breakout of the loop above at 2x max. - * This code centers the extra kf if the actual natural - * interval is between 1x and 2x - */ - if (cpi->oxcf.auto_key - && cpi->twopass.frames_to_key > (int)cpi->key_frame_frequency ) - { - FIRSTPASS_STATS *current_pos = cpi->twopass.stats_in; - FIRSTPASS_STATS tmp_frame; + if (r > RMAX) r = RMAX; - cpi->twopass.frames_to_key /= 2; + /* How fast is prediction quality decaying */ + loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - /* Copy first frame details */ - memcpy(&tmp_frame, &first_frame, sizeof(first_frame)); + decay_accumulator = decay_accumulator * loop_decay_rate; + decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator; - /* Reset to the start of the group */ - reset_fpf_position(cpi, start_position); + boost_score += (decay_accumulator * r); - kf_group_err = 0; - kf_group_intra_err = 0; - kf_group_coded_err = 0; - - /* Rescan to get the correct error data for the forced kf group */ - for( i = 0; i < cpi->twopass.frames_to_key; i++ ) - { - /* Accumulate kf group errors */ - kf_group_err += calculate_modified_err(cpi, &tmp_frame); - kf_group_intra_err += tmp_frame.intra_error; - kf_group_coded_err += tmp_frame.coded_error; - - /* Load a the next frame's stats */ - input_stats(cpi, &tmp_frame); - } - - /* Reset to the start of the group */ - reset_fpf_position(cpi, current_pos); - - cpi->next_key_frame_forced = 1; - } - else - cpi->next_key_frame_forced = 0; - - /* Special case for the last frame of the file */ - if (cpi->twopass.stats_in >= cpi->twopass.stats_in_end) - { - /* Accumulate kf group error */ - kf_group_err += calculate_modified_err(cpi, this_frame); - - /* These figures keep intra and coded error counts for all frames - * including key frames in the group. The effect of the key frame - * itself can be subtracted out using the first_frame data - * collected above - */ - kf_group_intra_err += this_frame->intra_error; - kf_group_coded_err += this_frame->coded_error; + if ((i > MIN_GF_INTERVAL) && ((boost_score - old_boost_score) < 1.0)) { + break; } - /* Calculate the number of bits that should be assigned to the kf group. */ - if ((cpi->twopass.bits_left > 0) && (cpi->twopass.modified_error_left > 0.0)) - { - /* Max for a single normal frame (not key frame) */ - int max_bits = frame_max_bits(cpi); + old_boost_score = boost_score; + } - /* Maximum bits for the kf group */ - int64_t max_grp_bits; + if (1) { + FIRSTPASS_STATS sectionstats; + double Ratio; - /* Default allocation based on bits left and relative - * complexity of the section - */ - cpi->twopass.kf_group_bits = (int64_t)( cpi->twopass.bits_left * - ( kf_group_err / - cpi->twopass.modified_error_left )); - - /* Clip based on maximum per frame rate defined by the user. */ - max_grp_bits = (int64_t)max_bits * (int64_t)cpi->twopass.frames_to_key; - if (cpi->twopass.kf_group_bits > max_grp_bits) - cpi->twopass.kf_group_bits = max_grp_bits; - - /* Additional special case for CBR if buffer is getting full. */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - int64_t opt_buffer_lvl = cpi->oxcf.optimal_buffer_level; - int64_t buffer_lvl = cpi->buffer_level; - - /* If the buffer is near or above the optimal and this kf group is - * not being allocated much then increase the allocation a bit. - */ - if (buffer_lvl >= opt_buffer_lvl) - { - int64_t high_water_mark = (opt_buffer_lvl + - cpi->oxcf.maximum_buffer_size) >> 1; - - int64_t av_group_bits; - - /* Av bits per frame * number of frames */ - av_group_bits = (int64_t)cpi->av_per_frame_bandwidth * - (int64_t)cpi->twopass.frames_to_key; - - /* We are at or above the maximum. */ - if (cpi->buffer_level >= high_water_mark) - { - int64_t min_group_bits; - - min_group_bits = av_group_bits + - (int64_t)(buffer_lvl - - high_water_mark); - - if (cpi->twopass.kf_group_bits < min_group_bits) - cpi->twopass.kf_group_bits = min_group_bits; - } - /* We are above optimal but below the maximum */ - else if (cpi->twopass.kf_group_bits < av_group_bits) - { - int64_t bits_below_av = av_group_bits - - cpi->twopass.kf_group_bits; - - cpi->twopass.kf_group_bits += - (int64_t)((double)bits_below_av * - (double)(buffer_lvl - opt_buffer_lvl) / - (double)(high_water_mark - opt_buffer_lvl)); - } - } - } - } - else - cpi->twopass.kf_group_bits = 0; - - /* Reset the first pass file position */ + zero_stats(§ionstats); reset_fpf_position(cpi, start_position); - /* determine how big to make this keyframe based on how well the - * subsequent frames use inter blocks - */ - decay_accumulator = 1.0; - boost_score = 0.0; - - for (i = 0 ; i < cpi->twopass.frames_to_key ; i++) - { - double r; - - if (EOF == input_stats(cpi, &next_frame)) - break; - - if (next_frame.intra_error > cpi->twopass.kf_intra_err_min) - r = (IIKFACTOR2 * next_frame.intra_error / - DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); - else - r = (IIKFACTOR2 * cpi->twopass.kf_intra_err_min / - DOUBLE_DIVIDE_CHECK(next_frame.coded_error)); - - if (r > RMAX) - r = RMAX; - - /* How fast is prediction quality decaying */ - loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); - - decay_accumulator = decay_accumulator * loop_decay_rate; - decay_accumulator = decay_accumulator < 0.1 ? 0.1 : decay_accumulator; - - boost_score += (decay_accumulator * r); - - if ((i > MIN_GF_INTERVAL) && - ((boost_score - old_boost_score) < 1.0)) - { - break; - } - - old_boost_score = boost_score; + for (i = 0; i < cpi->twopass.frames_to_key; ++i) { + input_stats(cpi, &next_frame); + accumulate_stats(§ionstats, &next_frame); } - if (1) - { - FIRSTPASS_STATS sectionstats; - double Ratio; + avg_stats(§ionstats); - zero_stats(§ionstats); - reset_fpf_position(cpi, start_position); + cpi->twopass.section_intra_rating = + (unsigned int)(sectionstats.intra_error / + DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); - for (i = 0 ; i < cpi->twopass.frames_to_key ; i++) - { - input_stats(cpi, &next_frame); - accumulate_stats(§ionstats, &next_frame); - } + Ratio = sectionstats.intra_error / + DOUBLE_DIVIDE_CHECK(sectionstats.coded_error); + cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025); - avg_stats(§ionstats); + if (cpi->twopass.section_max_qfactor < 0.80) { + cpi->twopass.section_max_qfactor = 0.80; + } + } - cpi->twopass.section_intra_rating = (unsigned int) - (sectionstats.intra_error - / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error)); + /* When using CBR apply additional buffer fullness related upper limits */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + double max_boost; - Ratio = sectionstats.intra_error / DOUBLE_DIVIDE_CHECK(sectionstats.coded_error); - cpi->twopass.section_max_qfactor = 1.0 - ((Ratio - 10.0) * 0.025); + if (cpi->drop_frames_allowed) { + int df_buffer_level = (int)(cpi->oxcf.drop_frames_water_mark * + (cpi->oxcf.optimal_buffer_level / 100)); - if (cpi->twopass.section_max_qfactor < 0.80) - cpi->twopass.section_max_qfactor = 0.80; + if (cpi->buffer_level > df_buffer_level) { + max_boost = + ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) / + DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); + } else { + max_boost = 0.0; + } + } else if (cpi->buffer_level > 0) { + max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) / + DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); + } else { + max_boost = 0.0; } - /* When using CBR apply additional buffer fullness related upper limits */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - double max_boost; + if (boost_score > max_boost) boost_score = max_boost; + } - if (cpi->drop_frames_allowed) - { - int df_buffer_level = (int)(cpi->oxcf.drop_frames_water_mark - * (cpi->oxcf.optimal_buffer_level / 100)); + /* Reset the first pass file position */ + reset_fpf_position(cpi, start_position); - if (cpi->buffer_level > df_buffer_level) - max_boost = ((double)((cpi->buffer_level - df_buffer_level) * 2 / 3) * 16.0) / DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); - else - max_boost = 0.0; - } - else if (cpi->buffer_level > 0) - { - max_boost = ((double)(cpi->buffer_level * 2 / 3) * 16.0) / DOUBLE_DIVIDE_CHECK((double)cpi->av_per_frame_bandwidth); - } - else - { - max_boost = 0.0; - } - - if (boost_score > max_boost) - boost_score = max_boost; - } - - /* Reset the first pass file position */ - reset_fpf_position(cpi, start_position); - - /* Work out how many bits to allocate for the key frame itself */ - if (1) - { - int kf_boost = (int)boost_score; - int allocation_chunks; - int Counter = cpi->twopass.frames_to_key; - int alt_kf_bits; - YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; - /* Min boost based on kf interval */ + /* Work out how many bits to allocate for the key frame itself */ + if (1) { + int kf_boost = (int)boost_score; + int allocation_chunks; + int Counter = cpi->twopass.frames_to_key; + int alt_kf_bits; + YV12_BUFFER_CONFIG *lst_yv12 = &cpi->common.yv12_fb[cpi->common.lst_fb_idx]; +/* Min boost based on kf interval */ #if 0 while ((kf_boost < 48) && (Counter > 0)) @@ -3078,291 +2866,292 @@ static void find_next_key_frame(VP8_COMP *cpi, FIRSTPASS_STATS *this_frame) #endif - if (kf_boost < 48) - { - kf_boost += ((Counter + 1) >> 1); + if (kf_boost < 48) { + kf_boost += ((Counter + 1) >> 1); - if (kf_boost > 48) kf_boost = 48; - } - - /* bigger frame sizes need larger kf boosts, smaller frames smaller - * boosts... - */ - if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240)) - kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240); - else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240)) - kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height); - - /* Min KF boost */ - kf_boost = (int)((double)kf_boost * 100.0) >> 4; /* Scale 16 to 100 */ - if (kf_boost < 250) - kf_boost = 250; - - /* - * We do three calculations for kf size. - * The first is based on the error score for the whole kf group. - * The second (optionaly) on the key frames own error if this is - * smaller than the average for the group. - * The final one insures that the frame receives at least the - * allocation it would have received based on its own error score vs - * the error score remaining - * Special case if the sequence appears almost totaly static - * as measured by the decay accumulator. In this case we want to - * spend almost all of the bits on the key frame. - * cpi->twopass.frames_to_key-1 because key frame itself is taken - * care of by kf_boost. - */ - if ( decay_accumulator >= 0.99 ) - { - allocation_chunks = - ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost; - } - else - { - allocation_chunks = - ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost; - } - - /* Normalize Altboost and allocations chunck down to prevent overflow */ - while (kf_boost > 1000) - { - kf_boost /= 2; - allocation_chunks /= 2; - } - - cpi->twopass.kf_group_bits = (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits; - - /* Calculate the number of bits to be spent on the key frame */ - cpi->twopass.kf_bits = (int)((double)kf_boost * ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks)); - - /* Apply an additional limit for CBR */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - if (cpi->twopass.kf_bits > (int)((3 * cpi->buffer_level) >> 2)) - cpi->twopass.kf_bits = (int)((3 * cpi->buffer_level) >> 2); - } - - /* If the key frame is actually easier than the average for the - * kf group (which does sometimes happen... eg a blank intro frame) - * Then use an alternate calculation based on the kf error score - * which should give a smaller key frame. - */ - if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) - { - double alt_kf_grp_bits = - ((double)cpi->twopass.bits_left * - (kf_mod_err * (double)cpi->twopass.frames_to_key) / - DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)); - - alt_kf_bits = (int)((double)kf_boost * - (alt_kf_grp_bits / (double)allocation_chunks)); - - if (cpi->twopass.kf_bits > alt_kf_bits) - { - cpi->twopass.kf_bits = alt_kf_bits; - } - } - /* Else if it is much harder than other frames in the group make sure - * it at least receives an allocation in keeping with its relative - * error score - */ - else - { - alt_kf_bits = - (int)((double)cpi->twopass.bits_left * - (kf_mod_err / - DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left))); - - if (alt_kf_bits > cpi->twopass.kf_bits) - { - cpi->twopass.kf_bits = alt_kf_bits; - } - } - - cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits; - /* Add in the minimum frame allowance */ - cpi->twopass.kf_bits += cpi->min_frame_bandwidth; - - /* Peer frame bit target for this frame */ - cpi->per_frame_bandwidth = cpi->twopass.kf_bits; - - /* Convert to a per second bitrate */ - cpi->target_bandwidth = (int)(cpi->twopass.kf_bits * - cpi->output_framerate); + if (kf_boost > 48) kf_boost = 48; } - /* Note the total error score of the kf group minus the key frame itself */ - cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err); - - /* Adjust the count of total modified error left. The count of bits left - * is adjusted elsewhere based on real coded frame sizes + /* bigger frame sizes need larger kf boosts, smaller frames smaller + * boosts... */ - cpi->twopass.modified_error_left -= kf_group_err; + if ((lst_yv12->y_width * lst_yv12->y_height) > (320 * 240)) { + kf_boost += 2 * (lst_yv12->y_width * lst_yv12->y_height) / (320 * 240); + } else if ((lst_yv12->y_width * lst_yv12->y_height) < (320 * 240)) { + kf_boost -= 4 * (320 * 240) / (lst_yv12->y_width * lst_yv12->y_height); + } - if (cpi->oxcf.allow_spatial_resampling) - { - int resample_trigger = 0; - int last_kf_resampled = 0; - int kf_q; - int scale_val = 0; - int hr, hs, vr, vs; - int new_width = cpi->oxcf.Width; - int new_height = cpi->oxcf.Height; + /* Min KF boost */ + kf_boost = (int)((double)kf_boost * 100.0) >> 4; /* Scale 16 to 100 */ + if (kf_boost < 250) kf_boost = 250; - int projected_buffer_level; - int tmp_q; + /* + * We do three calculations for kf size. + * The first is based on the error score for the whole kf group. + * The second (optionaly) on the key frames own error if this is + * smaller than the average for the group. + * The final one insures that the frame receives at least the + * allocation it would have received based on its own error score vs + * the error score remaining + * Special case if the sequence appears almost totaly static + * as measured by the decay accumulator. In this case we want to + * spend almost all of the bits on the key frame. + * cpi->twopass.frames_to_key-1 because key frame itself is taken + * care of by kf_boost. + */ + if (decay_accumulator >= 0.99) { + allocation_chunks = ((cpi->twopass.frames_to_key - 1) * 10) + kf_boost; + } else { + allocation_chunks = ((cpi->twopass.frames_to_key - 1) * 100) + kf_boost; + } - double projected_bits_perframe; - double group_iiratio = (kf_group_intra_err - first_frame.intra_error) / (kf_group_coded_err - first_frame.coded_error); - double err_per_frame = kf_group_err / cpi->twopass.frames_to_key; - double bits_per_frame; - double av_bits_per_frame; - double effective_size_ratio; + /* Normalize Altboost and allocations chunck down to prevent overflow */ + while (kf_boost > 1000) { + kf_boost /= 2; + allocation_chunks /= 2; + } - if ((cpi->common.Width != cpi->oxcf.Width) || (cpi->common.Height != cpi->oxcf.Height)) - last_kf_resampled = 1; + cpi->twopass.kf_group_bits = + (cpi->twopass.kf_group_bits < 0) ? 0 : cpi->twopass.kf_group_bits; - /* Set back to unscaled by defaults */ - cpi->common.horiz_scale = NORMAL; - cpi->common.vert_scale = NORMAL; + /* Calculate the number of bits to be spent on the key frame */ + cpi->twopass.kf_bits = + (int)((double)kf_boost * + ((double)cpi->twopass.kf_group_bits / (double)allocation_chunks)); - /* Calculate Average bits per frame. */ - av_bits_per_frame = cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->framerate); + /* Apply an additional limit for CBR */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + if (cpi->twopass.kf_bits > (int)((3 * cpi->buffer_level) >> 2)) { + cpi->twopass.kf_bits = (int)((3 * cpi->buffer_level) >> 2); + } + } - /* CBR... Use the clip average as the target for deciding resample */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - bits_per_frame = av_bits_per_frame; - } + /* If the key frame is actually easier than the average for the + * kf group (which does sometimes happen... eg a blank intro frame) + * Then use an alternate calculation based on the kf error score + * which should give a smaller key frame. + */ + if (kf_mod_err < kf_group_err / cpi->twopass.frames_to_key) { + double alt_kf_grp_bits = + ((double)cpi->twopass.bits_left * + (kf_mod_err * (double)cpi->twopass.frames_to_key) / + DOUBLE_DIVIDE_CHECK(cpi->twopass.modified_error_left)); - /* In VBR we want to avoid downsampling in easy section unless we - * are under extreme pressure So use the larger of target bitrate - * for this section or average bitrate for sequence - */ - else - { - /* This accounts for how hard the section is... */ - bits_per_frame = (double) - (cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key); + alt_kf_bits = (int)((double)kf_boost * + (alt_kf_grp_bits / (double)allocation_chunks)); - /* Dont turn to resampling in easy sections just because they - * have been assigned a small number of bits - */ - if (bits_per_frame < av_bits_per_frame) - bits_per_frame = av_bits_per_frame; - } + if (cpi->twopass.kf_bits > alt_kf_bits) { + cpi->twopass.kf_bits = alt_kf_bits; + } + } + /* Else if it is much harder than other frames in the group make sure + * it at least receives an allocation in keeping with its relative + * error score + */ + else { + alt_kf_bits = (int)((double)cpi->twopass.bits_left * + (kf_mod_err / DOUBLE_DIVIDE_CHECK( + cpi->twopass.modified_error_left))); - /* bits_per_frame should comply with our minimum */ - if (bits_per_frame < (cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100)) - bits_per_frame = (cpi->oxcf.target_bandwidth * cpi->oxcf.two_pass_vbrmin_section / 100); + if (alt_kf_bits > cpi->twopass.kf_bits) { + cpi->twopass.kf_bits = alt_kf_bits; + } + } - /* Work out if spatial resampling is necessary */ - kf_q = estimate_kf_group_q(cpi, err_per_frame, - (int)bits_per_frame, group_iiratio); + cpi->twopass.kf_group_bits -= cpi->twopass.kf_bits; + /* Add in the minimum frame allowance */ + cpi->twopass.kf_bits += cpi->min_frame_bandwidth; - /* If we project a required Q higher than the maximum allowed Q then - * make a guess at the actual size of frames in this section - */ - projected_bits_perframe = bits_per_frame; - tmp_q = kf_q; + /* Peer frame bit target for this frame */ + cpi->per_frame_bandwidth = cpi->twopass.kf_bits; - while (tmp_q > cpi->worst_quality) - { - projected_bits_perframe *= 1.04; - tmp_q--; - } + /* Convert to a per second bitrate */ + cpi->target_bandwidth = (int)(cpi->twopass.kf_bits * cpi->output_framerate); + } - /* Guess at buffer level at the end of the section */ - projected_buffer_level = (int) - (cpi->buffer_level - (int) - ((projected_bits_perframe - av_bits_per_frame) * + /* Note the total error score of the kf group minus the key frame itself */ + cpi->twopass.kf_group_error_left = (int)(kf_group_err - kf_mod_err); + + /* Adjust the count of total modified error left. The count of bits left + * is adjusted elsewhere based on real coded frame sizes + */ + cpi->twopass.modified_error_left -= kf_group_err; + + if (cpi->oxcf.allow_spatial_resampling) { + int resample_trigger = 0; + int last_kf_resampled = 0; + int kf_q; + int scale_val = 0; + int hr, hs, vr, vs; + int new_width = cpi->oxcf.Width; + int new_height = cpi->oxcf.Height; + + int projected_buffer_level; + int tmp_q; + + double projected_bits_perframe; + double group_iiratio = (kf_group_intra_err - first_frame.intra_error) / + (kf_group_coded_err - first_frame.coded_error); + double err_per_frame = kf_group_err / cpi->twopass.frames_to_key; + double bits_per_frame; + double av_bits_per_frame; + double effective_size_ratio; + + if ((cpi->common.Width != cpi->oxcf.Width) || + (cpi->common.Height != cpi->oxcf.Height)) { + last_kf_resampled = 1; + } + + /* Set back to unscaled by defaults */ + cpi->common.horiz_scale = NORMAL; + cpi->common.vert_scale = NORMAL; + + /* Calculate Average bits per frame. */ + av_bits_per_frame = cpi->oxcf.target_bandwidth / + DOUBLE_DIVIDE_CHECK((double)cpi->framerate); + + /* CBR... Use the clip average as the target for deciding resample */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + bits_per_frame = av_bits_per_frame; + } + + /* In VBR we want to avoid downsampling in easy section unless we + * are under extreme pressure So use the larger of target bitrate + * for this section or average bitrate for sequence + */ + else { + /* This accounts for how hard the section is... */ + bits_per_frame = + (double)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key); + + /* Dont turn to resampling in easy sections just because they + * have been assigned a small number of bits + */ + if (bits_per_frame < av_bits_per_frame) { + bits_per_frame = av_bits_per_frame; + } + } + + /* bits_per_frame should comply with our minimum */ + if (bits_per_frame < (cpi->oxcf.target_bandwidth * + cpi->oxcf.two_pass_vbrmin_section / 100)) { + bits_per_frame = (cpi->oxcf.target_bandwidth * + cpi->oxcf.two_pass_vbrmin_section / 100); + } + + /* Work out if spatial resampling is necessary */ + kf_q = estimate_kf_group_q(cpi, err_per_frame, (int)bits_per_frame, + group_iiratio); + + /* If we project a required Q higher than the maximum allowed Q then + * make a guess at the actual size of frames in this section + */ + projected_bits_perframe = bits_per_frame; + tmp_q = kf_q; + + while (tmp_q > cpi->worst_quality) { + projected_bits_perframe *= 1.04; + tmp_q--; + } + + /* Guess at buffer level at the end of the section */ + projected_buffer_level = + (int)(cpi->buffer_level - + (int)((projected_bits_perframe - av_bits_per_frame) * cpi->twopass.frames_to_key)); - if (0) - { - FILE *f = fopen("Subsamle.stt", "a"); - fprintf(f, " %8d %8d %8d %8d %12.0f %8d %8d %8d\n", cpi->common.current_video_frame, kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key, (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), new_height, new_width); - fclose(f); - } - - /* The trigger for spatial resampling depends on the various - * parameters such as whether we are streaming (CBR) or VBR. - */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - /* Trigger resample if we are projected to fall below down - * sample level or resampled last time and are projected to - * remain below the up sample level - */ - if ((projected_buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100)) || - (last_kf_resampled && (projected_buffer_level < (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100)))) - resample_trigger = 1; - else - resample_trigger = 0; - } - else - { - int64_t clip_bits = (int64_t)(cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth / DOUBLE_DIVIDE_CHECK((double)cpi->framerate)); - int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level; - - /* If triggered last time the threshold for triggering again is - * reduced: - * - * Projected Q higher than allowed and Overspend > 5% of total - * bits - */ - if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || - ((kf_q > cpi->worst_quality) && - (over_spend > clip_bits / 20))) - resample_trigger = 1; - else - resample_trigger = 0; - - } - - if (resample_trigger) - { - while ((kf_q >= cpi->worst_quality) && (scale_val < 6)) - { - scale_val ++; - - cpi->common.vert_scale = vscale_lookup[scale_val]; - cpi->common.horiz_scale = hscale_lookup[scale_val]; - - Scale2Ratio(cpi->common.horiz_scale, &hr, &hs); - Scale2Ratio(cpi->common.vert_scale, &vr, &vs); - - new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; - new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; - - /* Reducing the area to 1/4 does not reduce the complexity - * (err_per_frame) to 1/4... effective_sizeratio attempts - * to provide a crude correction for this - */ - effective_size_ratio = (double)(new_width * new_height) / (double)(cpi->oxcf.Width * cpi->oxcf.Height); - effective_size_ratio = (1.0 + (3.0 * effective_size_ratio)) / 4.0; - - /* Now try again and see what Q we get with the smaller - * image size - */ - kf_q = estimate_kf_group_q(cpi, - err_per_frame * effective_size_ratio, - (int)bits_per_frame, group_iiratio); - - if (0) - { - FILE *f = fopen("Subsamle.stt", "a"); - fprintf(f, "******** %8d %8d %8d %12.0f %8d %8d %8d\n", kf_q, cpi->common.horiz_scale, cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key, (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), new_height, new_width); - fclose(f); - } - } - } - - if ((cpi->common.Width != new_width) || (cpi->common.Height != new_height)) - { - cpi->common.Width = new_width; - cpi->common.Height = new_height; - vp8_alloc_compressor_data(cpi); - } + if (0) { + FILE *f = fopen("Subsamle.stt", "a"); + fprintf(f, " %8d %8d %8d %8d %12.0f %8d %8d %8d\n", + cpi->common.current_video_frame, kf_q, cpi->common.horiz_scale, + cpi->common.vert_scale, kf_group_err / cpi->twopass.frames_to_key, + (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), + new_height, new_width); + fclose(f); } + + /* The trigger for spatial resampling depends on the various + * parameters such as whether we are streaming (CBR) or VBR. + */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + /* Trigger resample if we are projected to fall below down + * sample level or resampled last time and are projected to + * remain below the up sample level + */ + if ((projected_buffer_level < (cpi->oxcf.resample_down_water_mark * + cpi->oxcf.optimal_buffer_level / 100)) || + (last_kf_resampled && + (projected_buffer_level < (cpi->oxcf.resample_up_water_mark * + cpi->oxcf.optimal_buffer_level / 100)))) { + resample_trigger = 1; + } else { + resample_trigger = 0; + } + } else { + int64_t clip_bits = (int64_t)( + cpi->twopass.total_stats.count * cpi->oxcf.target_bandwidth / + DOUBLE_DIVIDE_CHECK((double)cpi->framerate)); + int64_t over_spend = cpi->oxcf.starting_buffer_level - cpi->buffer_level; + + /* If triggered last time the threshold for triggering again is + * reduced: + * + * Projected Q higher than allowed and Overspend > 5% of total + * bits + */ + if ((last_kf_resampled && (kf_q > cpi->worst_quality)) || + ((kf_q > cpi->worst_quality) && (over_spend > clip_bits / 20))) { + resample_trigger = 1; + } else { + resample_trigger = 0; + } + } + + if (resample_trigger) { + while ((kf_q >= cpi->worst_quality) && (scale_val < 6)) { + scale_val++; + + cpi->common.vert_scale = vscale_lookup[scale_val]; + cpi->common.horiz_scale = hscale_lookup[scale_val]; + + Scale2Ratio(cpi->common.horiz_scale, &hr, &hs); + Scale2Ratio(cpi->common.vert_scale, &vr, &vs); + + new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; + new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; + + /* Reducing the area to 1/4 does not reduce the complexity + * (err_per_frame) to 1/4... effective_sizeratio attempts + * to provide a crude correction for this + */ + effective_size_ratio = (double)(new_width * new_height) / + (double)(cpi->oxcf.Width * cpi->oxcf.Height); + effective_size_ratio = (1.0 + (3.0 * effective_size_ratio)) / 4.0; + + /* Now try again and see what Q we get with the smaller + * image size + */ + kf_q = estimate_kf_group_q(cpi, err_per_frame * effective_size_ratio, + (int)bits_per_frame, group_iiratio); + + if (0) { + FILE *f = fopen("Subsamle.stt", "a"); + fprintf( + f, "******** %8d %8d %8d %12.0f %8d %8d %8d\n", kf_q, + cpi->common.horiz_scale, cpi->common.vert_scale, + kf_group_err / cpi->twopass.frames_to_key, + (int)(cpi->twopass.kf_group_bits / cpi->twopass.frames_to_key), + new_height, new_width); + fclose(f); + } + } + } + + if ((cpi->common.Width != new_width) || + (cpi->common.Height != new_height)) { + cpi->common.Width = new_width; + cpi->common.Height = new_height; + vp8_alloc_compressor_data(cpi); + } + } } diff --git a/libs/libvpx/vp8/encoder/firstpass.h b/libs/libvpx/vp8/encoder/firstpass.h index c409ebca8f..ac8a7b1bfb 100644 --- a/libs/libvpx/vp8/encoder/firstpass.h +++ b/libs/libvpx/vp8/encoder/firstpass.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_FIRSTPASS_H_ #define VP8_ENCODER_FIRSTPASS_H_ diff --git a/libs/libvpx/vp8/encoder/lookahead.c b/libs/libvpx/vp8/encoder/lookahead.c index 6623385743..37aa9eee84 100644 --- a/libs/libvpx/vp8/encoder/lookahead.c +++ b/libs/libvpx/vp8/encoder/lookahead.c @@ -13,219 +13,172 @@ #include "lookahead.h" #include "vp8/common/extend.h" -#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY? 1 : 25) +#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY ? 1 : 25) -struct lookahead_ctx -{ - unsigned int max_sz; /* Absolute size of the queue */ - unsigned int sz; /* Number of buffers currently in the queue */ - unsigned int read_idx; /* Read index */ - unsigned int write_idx; /* Write index */ - struct lookahead_entry *buf; /* Buffer list */ +struct lookahead_ctx { + unsigned int max_sz; /* Absolute size of the queue */ + unsigned int sz; /* Number of buffers currently in the queue */ + unsigned int read_idx; /* Read index */ + unsigned int write_idx; /* Write index */ + struct lookahead_entry *buf; /* Buffer list */ }; - /* Return the buffer at the given absolute index and increment the index */ -static struct lookahead_entry * -pop(struct lookahead_ctx *ctx, - unsigned int *idx) -{ - unsigned int index = *idx; - struct lookahead_entry *buf = ctx->buf + index; +static struct lookahead_entry *pop(struct lookahead_ctx *ctx, + unsigned int *idx) { + unsigned int index = *idx; + struct lookahead_entry *buf = ctx->buf + index; - assert(index < ctx->max_sz); - if(++index >= ctx->max_sz) - index -= ctx->max_sz; - *idx = index; - return buf; + assert(index < ctx->max_sz); + if (++index >= ctx->max_sz) index -= ctx->max_sz; + *idx = index; + return buf; } +void vp8_lookahead_destroy(struct lookahead_ctx *ctx) { + if (ctx) { + if (ctx->buf) { + unsigned int i; -void -vp8_lookahead_destroy(struct lookahead_ctx *ctx) -{ - if(ctx) - { - if(ctx->buf) - { - unsigned int i; - - for(i = 0; i < ctx->max_sz; i++) - vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img); - free(ctx->buf); - } - free(ctx); + for (i = 0; i < ctx->max_sz; ++i) { + vp8_yv12_de_alloc_frame_buffer(&ctx->buf[i].img); + } + free(ctx->buf); } + free(ctx); + } } +struct lookahead_ctx *vp8_lookahead_init(unsigned int width, + unsigned int height, + unsigned int depth) { + struct lookahead_ctx *ctx = NULL; + unsigned int i; -struct lookahead_ctx* -vp8_lookahead_init(unsigned int width, - unsigned int height, - unsigned int depth) -{ - struct lookahead_ctx *ctx = NULL; - unsigned int i; + /* Clamp the lookahead queue depth */ + if (depth < 1) { + depth = 1; + } else if (depth > MAX_LAG_BUFFERS) { + depth = MAX_LAG_BUFFERS; + } - /* Clamp the lookahead queue depth */ - if(depth < 1) - depth = 1; - else if(depth > MAX_LAG_BUFFERS) - depth = MAX_LAG_BUFFERS; + /* Keep last frame in lookahead buffer by increasing depth by 1.*/ + depth += 1; - /* Keep last frame in lookahead buffer by increasing depth by 1.*/ - depth += 1; + /* Align the buffer dimensions */ + width = (width + 15) & ~15; + height = (height + 15) & ~15; - /* Align the buffer dimensions */ - width = (width + 15) & ~15; - height = (height + 15) & ~15; - - /* Allocate the lookahead structures */ - ctx = calloc(1, sizeof(*ctx)); - if(ctx) - { - ctx->max_sz = depth; - ctx->buf = calloc(depth, sizeof(*ctx->buf)); - if(!ctx->buf) - goto bail; - for(i=0; ibuf[i].img, - width, height, VP8BORDERINPIXELS)) - goto bail; + /* Allocate the lookahead structures */ + ctx = calloc(1, sizeof(*ctx)); + if (ctx) { + ctx->max_sz = depth; + ctx->buf = calloc(depth, sizeof(*ctx->buf)); + if (!ctx->buf) goto bail; + for (i = 0; i < depth; ++i) { + if (vp8_yv12_alloc_frame_buffer(&ctx->buf[i].img, width, height, + VP8BORDERINPIXELS)) { + goto bail; + } } - return ctx; + } + return ctx; bail: - vp8_lookahead_destroy(ctx); - return NULL; + vp8_lookahead_destroy(ctx); + return NULL; } +int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, + int64_t ts_start, int64_t ts_end, unsigned int flags, + unsigned char *active_map) { + struct lookahead_entry *buf; + int row, col, active_end; + int mb_rows = (src->y_height + 15) >> 4; + int mb_cols = (src->y_width + 15) >> 4; -int -vp8_lookahead_push(struct lookahead_ctx *ctx, - YV12_BUFFER_CONFIG *src, - int64_t ts_start, - int64_t ts_end, - unsigned int flags, - unsigned char *active_map) -{ - struct lookahead_entry* buf; - int row, col, active_end; - int mb_rows = (src->y_height + 15) >> 4; - int mb_cols = (src->y_width + 15) >> 4; + if (ctx->sz + 2 > ctx->max_sz) return 1; + ctx->sz++; + buf = pop(ctx, &ctx->write_idx); - if(ctx->sz + 2 > ctx->max_sz) - return 1; - ctx->sz++; - buf = pop(ctx, &ctx->write_idx); + /* Only do this partial copy if the following conditions are all met: + * 1. Lookahead queue has has size of 1. + * 2. Active map is provided. + * 3. This is not a key frame, golden nor altref frame. + */ + if (ctx->max_sz == 1 && active_map && !flags) { + for (row = 0; row < mb_rows; ++row) { + col = 0; - /* Only do this partial copy if the following conditions are all met: - * 1. Lookahead queue has has size of 1. - * 2. Active map is provided. - * 3. This is not a key frame, golden nor altref frame. - */ - if (ctx->max_sz == 1 && active_map && !flags) - { - for (row = 0; row < mb_rows; ++row) - { - col = 0; - - while (1) - { - /* Find the first active macroblock in this row. */ - for (; col < mb_cols; ++col) - { - if (active_map[col]) - break; - } - - /* No more active macroblock in this row. */ - if (col == mb_cols) - break; - - /* Find the end of active region in this row. */ - active_end = col; - - for (; active_end < mb_cols; ++active_end) - { - if (!active_map[active_end]) - break; - } - - /* Only copy this active region. */ - vp8_copy_and_extend_frame_with_rect(src, &buf->img, - row << 4, - col << 4, 16, - (active_end - col) << 4); - - /* Start again from the end of this active region. */ - col = active_end; - } - - active_map += mb_cols; + while (1) { + /* Find the first active macroblock in this row. */ + for (; col < mb_cols; ++col) { + if (active_map[col]) break; } - } - else - { - vp8_copy_and_extend_frame(src, &buf->img); - } - buf->ts_start = ts_start; - buf->ts_end = ts_end; - buf->flags = flags; - return 0; -} + /* No more active macroblock in this row. */ + if (col == mb_cols) break; -struct lookahead_entry* -vp8_lookahead_pop(struct lookahead_ctx *ctx, - int drain) -{ - struct lookahead_entry* buf = NULL; + /* Find the end of active region in this row. */ + active_end = col; - assert(ctx != NULL); - if(ctx->sz && (drain || ctx->sz == ctx->max_sz - 1)) - { - buf = pop(ctx, &ctx->read_idx); - ctx->sz--; - } - return buf; -} - - -struct lookahead_entry* -vp8_lookahead_peek(struct lookahead_ctx *ctx, - unsigned int index, - int direction) -{ - struct lookahead_entry* buf = NULL; - - if (direction == PEEK_FORWARD) - { - assert(index < ctx->max_sz - 1); - if(index < ctx->sz) - { - index += ctx->read_idx; - if(index >= ctx->max_sz) - index -= ctx->max_sz; - buf = ctx->buf + index; + for (; active_end < mb_cols; ++active_end) { + if (!active_map[active_end]) break; } - } - else if (direction == PEEK_BACKWARD) - { - assert(index == 1); - if(ctx->read_idx == 0) - index = ctx->max_sz - 1; - else - index = ctx->read_idx - index; - buf = ctx->buf + index; - } + /* Only copy this active region. */ + vp8_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4, + 16, (active_end - col) << 4); - return buf; + /* Start again from the end of this active region. */ + col = active_end; + } + + active_map += mb_cols; + } + } else { + vp8_copy_and_extend_frame(src, &buf->img); + } + buf->ts_start = ts_start; + buf->ts_end = ts_end; + buf->flags = flags; + return 0; } +struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx, + int drain) { + struct lookahead_entry *buf = NULL; -unsigned int -vp8_lookahead_depth(struct lookahead_ctx *ctx) -{ - return ctx->sz; + assert(ctx != NULL); + if (ctx->sz && (drain || ctx->sz == ctx->max_sz - 1)) { + buf = pop(ctx, &ctx->read_idx); + ctx->sz--; + } + return buf; } + +struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx, + unsigned int index, int direction) { + struct lookahead_entry *buf = NULL; + + if (direction == PEEK_FORWARD) { + assert(index < ctx->max_sz - 1); + if (index < ctx->sz) { + index += ctx->read_idx; + if (index >= ctx->max_sz) index -= ctx->max_sz; + buf = ctx->buf + index; + } + } else if (direction == PEEK_BACKWARD) { + assert(index == 1); + + if (ctx->read_idx == 0) { + index = ctx->max_sz - 1; + } else { + index = ctx->read_idx - index; + } + buf = ctx->buf + index; + } + + return buf; +} + +unsigned int vp8_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; } diff --git a/libs/libvpx/vp8/encoder/lookahead.h b/libs/libvpx/vp8/encoder/lookahead.h index cad68e639f..a67f226946 100644 --- a/libs/libvpx/vp8/encoder/lookahead.h +++ b/libs/libvpx/vp8/encoder/lookahead.h @@ -16,15 +16,13 @@ extern "C" { #endif -struct lookahead_entry -{ - YV12_BUFFER_CONFIG img; - int64_t ts_start; - int64_t ts_end; - unsigned int flags; +struct lookahead_entry { + YV12_BUFFER_CONFIG img; + int64_t ts_start; + int64_t ts_end; + unsigned int flags; }; - struct lookahead_ctx; /**\brief Initializes the lookahead stage @@ -34,18 +32,15 @@ struct lookahead_ctx; * * */ -struct lookahead_ctx* vp8_lookahead_init(unsigned int width, +struct lookahead_ctx *vp8_lookahead_init(unsigned int width, unsigned int height, - unsigned int depth - ); - + unsigned int depth); /**\brief Destroys the lookahead stage * */ void vp8_lookahead_destroy(struct lookahead_ctx *ctx); - /**\brief Enqueue a source buffer * * This function will copy the source image into a new framebuffer with @@ -61,14 +56,9 @@ void vp8_lookahead_destroy(struct lookahead_ctx *ctx); * \param[in] flags Flags set on this frame * \param[in] active_map Map that specifies which macroblock is active */ -int -vp8_lookahead_push(struct lookahead_ctx *ctx, - YV12_BUFFER_CONFIG *src, - int64_t ts_start, - int64_t ts_end, - unsigned int flags, - unsigned char *active_map); - +int vp8_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, + int64_t ts_start, int64_t ts_end, unsigned int flags, + unsigned char *active_map); /**\brief Get the next source buffer to encode * @@ -81,12 +71,9 @@ vp8_lookahead_push(struct lookahead_ctx *ctx, * \retval NULL, if drain not set and queue not of the configured depth * */ -struct lookahead_entry* -vp8_lookahead_pop(struct lookahead_ctx *ctx, - int drain); +struct lookahead_entry *vp8_lookahead_pop(struct lookahead_ctx *ctx, int drain); - -#define PEEK_FORWARD 1 +#define PEEK_FORWARD 1 #define PEEK_BACKWARD -1 /**\brief Get a future source buffer to encode * @@ -96,19 +83,14 @@ vp8_lookahead_pop(struct lookahead_ctx *ctx, * \retval NULL, if no buffer exists at the specified index * */ -struct lookahead_entry* -vp8_lookahead_peek(struct lookahead_ctx *ctx, - unsigned int index, - int direction); - +struct lookahead_entry *vp8_lookahead_peek(struct lookahead_ctx *ctx, + unsigned int index, int direction); /**\brief Get the number of frames currently in the lookahead queue * * \param[in] ctx Pointer to the lookahead context */ -unsigned int -vp8_lookahead_depth(struct lookahead_ctx *ctx); - +unsigned int vp8_lookahead_depth(struct lookahead_ctx *ctx); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/mcomp.c b/libs/libvpx/vp8/encoder/mcomp.c index 768c764ceb..970120f3b2 100644 --- a/libs/libvpx/vp8/encoder/mcomp.c +++ b/libs/libvpx/vp8/encoder/mcomp.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "./vp8_rtcd.h" #include "./vpx_dsp_rtcd.h" #include "onyx_int.h" @@ -23,161 +22,161 @@ #include "vpx_dsp/vpx_dsp_common.h" #ifdef VP8_ENTROPY_STATS -static int mv_ref_ct [31] [4] [2]; -static int mv_mode_cts [4] [2]; +static int mv_ref_ct[31][4][2]; +static int mv_mode_cts[4][2]; #endif -int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) -{ - /* MV costing is based on the distribution of vectors in the previous - * frame and as such will tend to over state the cost of vectors. In - * addition coding a new vector can have a knock on effect on the cost - * of subsequent vectors and the quality of prediction from NEAR and - * NEAREST for subsequent blocks. The "Weight" parameter allows, to a - * limited extent, for some account to be taken of these factors. - */ - return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7; +int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight) { + /* MV costing is based on the distribution of vectors in the previous + * frame and as such will tend to over state the cost of vectors. In + * addition coding a new vector can have a knock on effect on the cost + * of subsequent vectors and the quality of prediction from NEAR and + * NEAREST for subsequent blocks. The "Weight" parameter allows, to a + * limited extent, for some account to be taken of these factors. + */ + return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * + Weight) >> + 7; } -static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit) -{ - /* Ignore mv costing if mvcost is NULL */ - if (mvcost) - return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + - mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) - * error_per_bit + 128) >> 8; - return 0; +static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], + int error_per_bit) { + /* Ignore mv costing if mvcost is NULL */ + if (mvcost) { + return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * + error_per_bit + + 128) >> + 8; + } + return 0; } -static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit) -{ - /* Calculate sad error cost on full pixel basis. */ - /* Ignore mv costing if mvsadcost is NULL */ - if (mvsadcost) - return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] + - mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) - * error_per_bit + 128) >> 8; - return 0; +static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], + int error_per_bit) { + /* Calculate sad error cost on full pixel basis. */ + /* Ignore mv costing if mvsadcost is NULL */ + if (mvsadcost) { + return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] + + mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)]) * + error_per_bit + + 128) >> + 8; + } + return 0; } -void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) -{ - int Len; - int search_site_count = 0; +void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride) { + int Len; + int search_site_count = 0; + /* Generate offsets for 4 search sites per step. */ + Len = MAX_FIRST_STEP; + x->ss[search_site_count].mv.col = 0; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = 0; + search_site_count++; - /* Generate offsets for 4 search sites per step. */ - Len = MAX_FIRST_STEP; + while (Len > 0) { + /* Compute offsets for search sites. */ x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = 0; + x->ss[search_site_count].mv.row = -Len; + x->ss[search_site_count].offset = -Len * stride; search_site_count++; - while (Len > 0) - { - - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = -Len; - x->ss[search_site_count].offset = -Len * stride; - search_site_count++; - - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = Len; - x->ss[search_site_count].offset = Len * stride; - search_site_count++; - - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = -Len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = -Len; - search_site_count++; - - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = Len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = Len; - search_site_count++; - - /* Contract. */ - Len /= 2; - } - - x->ss_count = search_site_count; - x->searches_per_step = 4; -} - -void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) -{ - int Len; - int search_site_count = 0; - - /* Generate offsets for 8 search sites per step. */ - Len = MAX_FIRST_STEP; + /* Compute offsets for search sites. */ x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = 0; + x->ss[search_site_count].mv.row = Len; + x->ss[search_site_count].offset = Len * stride; search_site_count++; - while (Len > 0) - { + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = -Len; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = -Len; + search_site_count++; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = -Len; - x->ss[search_site_count].offset = -Len * stride; - search_site_count++; + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = Len; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = Len; + search_site_count++; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = 0; - x->ss[search_site_count].mv.row = Len; - x->ss[search_site_count].offset = Len * stride; - search_site_count++; + /* Contract. */ + Len /= 2; + } - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = -Len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = -Len; - search_site_count++; + x->ss_count = search_site_count; + x->searches_per_step = 4; +} - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = Len; - x->ss[search_site_count].mv.row = 0; - x->ss[search_site_count].offset = Len; - search_site_count++; +void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) { + int Len; + int search_site_count = 0; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = -Len; - x->ss[search_site_count].mv.row = -Len; - x->ss[search_site_count].offset = -Len * stride - Len; - search_site_count++; + /* Generate offsets for 8 search sites per step. */ + Len = MAX_FIRST_STEP; + x->ss[search_site_count].mv.col = 0; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = 0; + search_site_count++; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = Len; - x->ss[search_site_count].mv.row = -Len; - x->ss[search_site_count].offset = -Len * stride + Len; - search_site_count++; + while (Len > 0) { + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = 0; + x->ss[search_site_count].mv.row = -Len; + x->ss[search_site_count].offset = -Len * stride; + search_site_count++; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = -Len; - x->ss[search_site_count].mv.row = Len; - x->ss[search_site_count].offset = Len * stride - Len; - search_site_count++; + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = 0; + x->ss[search_site_count].mv.row = Len; + x->ss[search_site_count].offset = Len * stride; + search_site_count++; - /* Compute offsets for search sites. */ - x->ss[search_site_count].mv.col = Len; - x->ss[search_site_count].mv.row = Len; - x->ss[search_site_count].offset = Len * stride + Len; - search_site_count++; + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = -Len; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = -Len; + search_site_count++; + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = Len; + x->ss[search_site_count].mv.row = 0; + x->ss[search_site_count].offset = Len; + search_site_count++; - /* Contract. */ - Len /= 2; - } + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = -Len; + x->ss[search_site_count].mv.row = -Len; + x->ss[search_site_count].offset = -Len * stride - Len; + search_site_count++; - x->ss_count = search_site_count; - x->searches_per_step = 8; + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = Len; + x->ss[search_site_count].mv.row = -Len; + x->ss[search_site_count].offset = -Len * stride + Len; + search_site_count++; + + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = -Len; + x->ss[search_site_count].mv.row = Len; + x->ss[search_site_count].offset = Len * stride - Len; + search_site_count++; + + /* Compute offsets for search sites. */ + x->ss[search_site_count].mv.col = Len; + x->ss[search_site_count].mv.row = Len; + x->ss[search_site_count].offset = Len * stride + Len; + search_site_count++; + + /* Contract. */ + Len /= 2; + } + + x->ss_count = search_site_count; + x->searches_per_step = 8; } /* @@ -191,168 +190,171 @@ void vp8_init3smotion_compensation(MACROBLOCK *x, int stride) */ /* estimated cost of a motion vector (r,c) */ -#define MVC(r,c) (mvcost ? ((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 : 0) +#define MVC(r, c) \ + (mvcost \ + ? ((mvcost[0][(r)-rr] + mvcost[1][(c)-rc]) * error_per_bit + 128) >> 8 \ + : 0) /* pointer to predictor base of a motionvector */ -#define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) +#define PRE(r, c) (y + (((r) >> 2) * y_stride + ((c) >> 2) - (offset))) /* convert motion vector component to offset for svf calc */ -#define SP(x) (((x)&3)<<1) +#define SP(x) (((x)&3) << 1) /* returns subpixel variance error function. */ -#define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) -#define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e; +#define DIST(r, c) \ + vfp->svf(PRE(r, c), y_stride, SP(c), SP(r), z, b->src_stride, &sse) +#define IFMVCV(r, c, s, e) \ + if (c >= minc && c <= maxc && r >= minr && r <= maxr) s else e; /* returns distortion + motion vector cost */ -#define ERR(r,c) (MVC(r,c)+DIST(r,c)) +#define ERR(r, c) (MVC(r, c) + DIST(r, c)) /* checks if (r,c) has better score than previous best */ -#define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=UINT_MAX;) +#define CHECK_BETTER(v, r, c) \ + IFMVCV(r, c, \ + { \ + thismse = DIST(r, c); \ + if ((v = (MVC(r, c) + thismse)) < besterr) { \ + besterr = v; \ + br = r; \ + bc = c; \ + *distortion = thismse; \ + *sse1 = sse; \ + } \ + }, \ + v = UINT_MAX;) int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, - unsigned int *sse1) -{ - unsigned char *z = (*(b->base_src) + b->src); + unsigned int *sse1) { + unsigned char *z = (*(b->base_src) + b->src); - int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1; - int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4; - int tr = br, tc = bc; - unsigned int besterr; - unsigned int left, right, up, down, diag; - unsigned int sse; - unsigned int whichdir; - unsigned int halfiters = 4; - unsigned int quarteriters = 4; - int thismse; + int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1; + int br = bestmv->as_mv.row * 4, bc = bestmv->as_mv.col * 4; + int tr = br, tc = bc; + unsigned int besterr; + unsigned int left, right, up, down, diag; + unsigned int sse; + unsigned int whichdir; + unsigned int halfiters = 4; + unsigned int quarteriters = 4; + int thismse; - int minc = VPXMAX(x->mv_col_min * 4, - (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1)); - int maxc = VPXMIN(x->mv_col_max * 4, - (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1)); - int minr = VPXMAX(x->mv_row_min * 4, - (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1)); - int maxr = VPXMIN(x->mv_row_max * 4, - (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1)); - - int y_stride; - int offset; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int minc = VPXMAX(x->mv_col_min * 4, + (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1)); + int maxc = VPXMIN(x->mv_col_max * 4, + (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1)); + int minr = VPXMAX(x->mv_row_min * 4, + (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1)); + int maxr = VPXMIN(x->mv_row_max * 4, + (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1)); + int y_stride; + int offset; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; #if ARCH_X86 || ARCH_X86_64 - MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - unsigned char *y; - int buf_r1, buf_r2, buf_c1; + MACROBLOCKD *xd = &x->e_mbd; + unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + unsigned char *y; + int buf_r1, buf_r2, buf_c1; - /* Clamping to avoid out-of-range data access */ - buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3; - buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3; - buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3; - y_stride = 32; + /* Clamping to avoid out-of-range data access */ + buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min) + ? (bestmv->as_mv.row - x->mv_row_min) + : 3; + buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max) + ? (x->mv_row_max - bestmv->as_mv.row) + : 3; + buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min) + ? (bestmv->as_mv.col - x->mv_col_min) + : 3; + y_stride = 32; - /* Copy to intermediate buffer before searching. */ - vfp->copymem(y_0 - buf_c1 - pre_stride*buf_r1, pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2); - y = xd->y_buf + y_stride*buf_r1 +buf_c1; + /* Copy to intermediate buffer before searching. */ + vfp->copymem(y_0 - buf_c1 - pre_stride * buf_r1, pre_stride, xd->y_buf, + y_stride, 16 + buf_r1 + buf_r2); + y = xd->y_buf + y_stride * buf_r1 + buf_c1; #else - unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - y_stride = pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + y_stride = pre_stride; #endif - offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col; + offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col; - /* central mv */ - bestmv->as_mv.row *= 8; - bestmv->as_mv.col *= 8; + /* central mv */ + bestmv->as_mv.row *= 8; + bestmv->as_mv.col *= 8; - /* calculate central point error */ - besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1); - *distortion = besterr; - besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); + /* calculate central point error */ + besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1); + *distortion = besterr; + besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); - /* TODO: Each subsequent iteration checks at least one point in common - * with the last iteration could be 2 ( if diag selected) - */ - while (--halfiters) - { - /* 1/2 pel */ - CHECK_BETTER(left, tr, tc - 2); - CHECK_BETTER(right, tr, tc + 2); - CHECK_BETTER(up, tr - 2, tc); - CHECK_BETTER(down, tr + 2, tc); + /* TODO: Each subsequent iteration checks at least one point in common + * with the last iteration could be 2 ( if diag selected) + */ + while (--halfiters) { + /* 1/2 pel */ + CHECK_BETTER(left, tr, tc - 2); + CHECK_BETTER(right, tr, tc + 2); + CHECK_BETTER(up, tr - 2, tc); + CHECK_BETTER(down, tr + 2, tc); - whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - switch (whichdir) - { - case 0: - CHECK_BETTER(diag, tr - 2, tc - 2); - break; - case 1: - CHECK_BETTER(diag, tr - 2, tc + 2); - break; - case 2: - CHECK_BETTER(diag, tr + 2, tc - 2); - break; - case 3: - CHECK_BETTER(diag, tr + 2, tc + 2); - break; - } - - /* no reason to check the same one again. */ - if (tr == br && tc == bc) - break; - - tr = br; - tc = bc; + switch (whichdir) { + case 0: CHECK_BETTER(diag, tr - 2, tc - 2); break; + case 1: CHECK_BETTER(diag, tr - 2, tc + 2); break; + case 2: CHECK_BETTER(diag, tr + 2, tc - 2); break; + case 3: CHECK_BETTER(diag, tr + 2, tc + 2); break; } - /* TODO: Each subsequent iteration checks at least one point in common - * with the last iteration could be 2 ( if diag selected) - */ + /* no reason to check the same one again. */ + if (tr == br && tc == bc) break; - /* 1/4 pel */ - while (--quarteriters) - { - CHECK_BETTER(left, tr, tc - 1); - CHECK_BETTER(right, tr, tc + 1); - CHECK_BETTER(up, tr - 1, tc); - CHECK_BETTER(down, tr + 1, tc); + tr = br; + tc = bc; + } - whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); + /* TODO: Each subsequent iteration checks at least one point in common + * with the last iteration could be 2 ( if diag selected) + */ - switch (whichdir) - { - case 0: - CHECK_BETTER(diag, tr - 1, tc - 1); - break; - case 1: - CHECK_BETTER(diag, tr - 1, tc + 1); - break; - case 2: - CHECK_BETTER(diag, tr + 1, tc - 1); - break; - case 3: - CHECK_BETTER(diag, tr + 1, tc + 1); - break; - } + /* 1/4 pel */ + while (--quarteriters) { + CHECK_BETTER(left, tr, tc - 1); + CHECK_BETTER(right, tr, tc + 1); + CHECK_BETTER(up, tr - 1, tc); + CHECK_BETTER(down, tr + 1, tc); - /* no reason to check the same one again. */ - if (tr == br && tc == bc) - break; + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - tr = br; - tc = bc; + switch (whichdir) { + case 0: CHECK_BETTER(diag, tr - 1, tc - 1); break; + case 1: CHECK_BETTER(diag, tr - 1, tc + 1); break; + case 2: CHECK_BETTER(diag, tr + 1, tc - 1); break; + case 3: CHECK_BETTER(diag, tr + 1, tc + 1); break; } - bestmv->as_mv.row = br * 2; - bestmv->as_mv.col = bc * 2; + /* no reason to check the same one again. */ + if (tr == br && tc == bc) break; - if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) || - (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3))) - return INT_MAX; + tr = br; + tc = bc; + } - return besterr; + bestmv->as_mv.row = br * 2; + bestmv->as_mv.col = bc * 2; + + if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL << 3)) || + (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL << 3))) { + return INT_MAX; + } + + return besterr; } #undef MVC #undef PRE @@ -367,310 +369,295 @@ int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, - unsigned int *sse1) -{ - int bestmse = INT_MAX; - int_mv startmv; - int_mv this_mv; - unsigned char *z = (*(b->base_src) + b->src); - int left, right, up, down, diag; - unsigned int sse; - int whichdir ; - int thismse; - int y_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; + unsigned int *sse1) { + int bestmse = INT_MAX; + int_mv startmv; + int_mv this_mv; + unsigned char *z = (*(b->base_src) + b->src); + int left, right, up, down, diag; + unsigned int sse; + int whichdir; + int thismse; + int y_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; #if ARCH_X86 || ARCH_X86_64 - MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - unsigned char *y; + MACROBLOCKD *xd = &x->e_mbd; + unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + unsigned char *y; - y_stride = 32; - /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ - vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); - y = xd->y_buf + y_stride + 1; + y_stride = 32; + /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ + vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); + y = xd->y_buf + y_stride + 1; #else - unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - y_stride = pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + y_stride = pre_stride; #endif - /* central mv */ - bestmv->as_mv.row *= 8; - bestmv->as_mv.col *= 8; - startmv = *bestmv; + /* central mv */ + bestmv->as_mv.row *= 8; + bestmv->as_mv.col *= 8; + startmv = *bestmv; - /* calculate central point error */ - bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); - *distortion = bestmse; - bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); + /* calculate central point error */ + bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); + *distortion = bestmse; + bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); - /* go left then right and check error */ - this_mv.as_mv.row = startmv.as_mv.row; - this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4); - thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse); - left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + /* go left then right and check error */ + this_mv.as_mv.row = startmv.as_mv.row; + this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4); + /* "halfpix" horizontal variance */ + thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse); + left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (left < bestmse) - { - *bestmv = this_mv; - bestmse = left; - *distortion = thismse; - *sse1 = sse; - } + if (left < bestmse) { + *bestmv = this_mv; + bestmse = left; + *distortion = thismse; + *sse1 = sse; + } - this_mv.as_mv.col += 8; - thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse); - right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + this_mv.as_mv.col += 8; + /* "halfpix" horizontal variance */ + thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse); + right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (right < bestmse) - { - *bestmv = this_mv; - bestmse = right; - *distortion = thismse; - *sse1 = sse; - } + if (right < bestmse) { + *bestmv = this_mv; + bestmse = right; + *distortion = thismse; + *sse1 = sse; + } - /* go up then down and check error */ - this_mv.as_mv.col = startmv.as_mv.col; - this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4); - thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse); - up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + /* go up then down and check error */ + this_mv.as_mv.col = startmv.as_mv.col; + this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4); + /* "halfpix" vertical variance */ + thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse); + up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (up < bestmse) - { - *bestmv = this_mv; - bestmse = up; - *distortion = thismse; - *sse1 = sse; - } + if (up < bestmse) { + *bestmv = this_mv; + bestmse = up; + *distortion = thismse; + *sse1 = sse; + } - this_mv.as_mv.row += 8; - thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse); - down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + this_mv.as_mv.row += 8; + /* "halfpix" vertical variance */ + thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse); + down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (down < bestmse) - { - *bestmv = this_mv; - bestmse = down; - *distortion = thismse; - *sse1 = sse; - } + if (down < bestmse) { + *bestmv = this_mv; + bestmse = down; + *distortion = thismse; + *sse1 = sse; + } + /* now check 1 more diagonal */ + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); + this_mv = startmv; - /* now check 1 more diagonal */ - whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - this_mv = startmv; - - switch (whichdir) - { + switch (whichdir) { case 0: - this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; - this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; - thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; + this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; + /* "halfpix" horizontal/vertical variance */ + thismse = + vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 1: - this_mv.as_mv.col += 4; - this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; - thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col += 4; + this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 2: - this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; - this_mv.as_mv.row += 4; - thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; + this_mv.as_mv.row += 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 3: default: - this_mv.as_mv.col += 4; - this_mv.as_mv.row += 4; - thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse); - break; - } + this_mv.as_mv.col += 4; + this_mv.as_mv.row += 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse); + break; + } - diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (diag < bestmse) - { - *bestmv = this_mv; - bestmse = diag; - *distortion = thismse; - *sse1 = sse; - } + if (diag < bestmse) { + *bestmv = this_mv; + bestmse = diag; + *distortion = thismse; + *sse1 = sse; + } + /* time to check quarter pels. */ + if (bestmv->as_mv.row < startmv.as_mv.row) y -= y_stride; - /* time to check quarter pels. */ - if (bestmv->as_mv.row < startmv.as_mv.row) - y -= y_stride; + if (bestmv->as_mv.col < startmv.as_mv.col) y--; - if (bestmv->as_mv.col < startmv.as_mv.col) - y--; + startmv = *bestmv; - startmv = *bestmv; + /* go left then right and check error */ + this_mv.as_mv.row = startmv.as_mv.row; + if (startmv.as_mv.col & 7) { + this_mv.as_mv.col = startmv.as_mv.col - 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + } else { + this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; + thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, + b->src_stride, &sse); + } + left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - /* go left then right and check error */ - this_mv.as_mv.row = startmv.as_mv.row; + if (left < bestmse) { + *bestmv = this_mv; + bestmse = left; + *distortion = thismse; + *sse1 = sse; + } - if (startmv.as_mv.col & 7) - { - this_mv.as_mv.col = startmv.as_mv.col - 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; - thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } + this_mv.as_mv.col += 4; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, + z, b->src_stride, &sse); + right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + if (right < bestmse) { + *bestmv = this_mv; + bestmse = right; + *distortion = thismse; + *sse1 = sse; + } - if (left < bestmse) - { - *bestmv = this_mv; - bestmse = left; - *distortion = thismse; - *sse1 = sse; - } + /* go up then down and check error */ + this_mv.as_mv.col = startmv.as_mv.col; - this_mv.as_mv.col += 4; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + if (startmv.as_mv.row & 7) { + this_mv.as_mv.row = startmv.as_mv.row - 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + } else { + this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; + thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, + b->src_stride, &sse); + } - if (right < bestmse) - { - *bestmv = this_mv; - bestmse = right; - *distortion = thismse; - *sse1 = sse; - } + up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - /* go up then down and check error */ - this_mv.as_mv.col = startmv.as_mv.col; + if (up < bestmse) { + *bestmv = this_mv; + bestmse = up; + *distortion = thismse; + *sse1 = sse; + } - if (startmv.as_mv.row & 7) - { - this_mv.as_mv.row = startmv.as_mv.row - 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; - thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse); - } + this_mv.as_mv.row += 4; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, + z, b->src_stride, &sse); + down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + if (down < bestmse) { + *bestmv = this_mv; + bestmse = down; + *distortion = thismse; + *sse1 = sse; + } - if (up < bestmse) - { - *bestmv = this_mv; - bestmse = up; - *distortion = thismse; - *sse1 = sse; - } + /* now check 1 more diagonal */ + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - this_mv.as_mv.row += 4; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + this_mv = startmv; - if (down < bestmse) - { - *bestmv = this_mv; - bestmse = down; - *distortion = thismse; - *sse1 = sse; - } - - - /* now check 1 more diagonal */ - whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - - this_mv = startmv; - - switch (whichdir) - { + switch (whichdir) { case 0: - if (startmv.as_mv.row & 7) - { - this_mv.as_mv.row -= 2; + if (startmv.as_mv.row & 7) { + this_mv.as_mv.row -= 2; - if (startmv.as_mv.col & 7) - { - this_mv.as_mv.col -= 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; - thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);; - } + if (startmv.as_mv.col & 7) { + this_mv.as_mv.col -= 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + } else { + this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; + thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, + b->src_stride, &sse); } - else - { - this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; + } else { + this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; - if (startmv.as_mv.col & 7) - { - this_mv.as_mv.col -= 2; - thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; - thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse); - } + if (startmv.as_mv.col & 7) { + this_mv.as_mv.col -= 2; + thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, + z, b->src_stride, &sse); + } else { + this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; + thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, + &sse); } + } - break; + break; case 1: - this_mv.as_mv.col += 2; + this_mv.as_mv.col += 2; - if (startmv.as_mv.row & 7) - { - this_mv.as_mv.row -= 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; - thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse); - } + if (startmv.as_mv.row & 7) { + this_mv.as_mv.row -= 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + } else { + this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6; + thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, + b->src_stride, &sse); + } - break; + break; case 2: - this_mv.as_mv.row += 2; + this_mv.as_mv.row += 2; - if (startmv.as_mv.col & 7) - { - this_mv.as_mv.col -= 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } - else - { - this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; - thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - } + if (startmv.as_mv.col & 7) { + this_mv.as_mv.col -= 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + } else { + this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6; + thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, + b->src_stride, &sse); + } - break; + break; case 3: - this_mv.as_mv.col += 2; - this_mv.as_mv.row += 2; - thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse); - break; - } + this_mv.as_mv.col += 2; + this_mv.as_mv.row += 2; + thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, + this_mv.as_mv.row & 7, z, b->src_stride, &sse); + break; + } - diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (diag < bestmse) - { - *bestmv = this_mv; - bestmse = diag; - *distortion = thismse; - *sse1 = sse; - } + if (diag < bestmse) { + *bestmv = this_mv; + bestmse = diag; + *distortion = thismse; + *sse1 = sse; + } - return bestmse; + return bestmse; } int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, @@ -678,1356 +665,1249 @@ int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, - unsigned int *sse1) -{ - int bestmse = INT_MAX; - int_mv startmv; - int_mv this_mv; - unsigned char *z = (*(b->base_src) + b->src); - int left, right, up, down, diag; - unsigned int sse; - int whichdir ; - int thismse; - int y_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; + unsigned int *sse1) { + int bestmse = INT_MAX; + int_mv startmv; + int_mv this_mv; + unsigned char *z = (*(b->base_src) + b->src); + int left, right, up, down, diag; + unsigned int sse; + int whichdir; + int thismse; + int y_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; #if ARCH_X86 || ARCH_X86_64 - MACROBLOCKD *xd = &x->e_mbd; - unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - unsigned char *y; + MACROBLOCKD *xd = &x->e_mbd; + unsigned char *y_0 = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + unsigned char *y; - y_stride = 32; - /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ - vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); - y = xd->y_buf + y_stride + 1; + y_stride = 32; + /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */ + vfp->copymem(y_0 - 1 - pre_stride, pre_stride, xd->y_buf, y_stride, 18); + y = xd->y_buf + y_stride + 1; #else - unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + bestmv->as_mv.col; - y_stride = pre_stride; + unsigned char *y = base_pre + d->offset + (bestmv->as_mv.row) * pre_stride + + bestmv->as_mv.col; + y_stride = pre_stride; #endif - /* central mv */ - bestmv->as_mv.row *= 8; - bestmv->as_mv.col *= 8; - startmv = *bestmv; + /* central mv */ + bestmv->as_mv.row *= 8; + bestmv->as_mv.col *= 8; + startmv = *bestmv; - /* calculate central point error */ - bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); - *distortion = bestmse; - bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); + /* calculate central point error */ + bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); + *distortion = bestmse; + bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit); - /* go left then right and check error */ - this_mv.as_mv.row = startmv.as_mv.row; - this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4); - thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse); - left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + /* go left then right and check error */ + this_mv.as_mv.row = startmv.as_mv.row; + this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4); + /* "halfpix" horizontal variance */ + thismse = vfp->svf(y - 1, y_stride, 4, 0, z, b->src_stride, &sse); + left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (left < bestmse) - { - *bestmv = this_mv; - bestmse = left; - *distortion = thismse; - *sse1 = sse; - } + if (left < bestmse) { + *bestmv = this_mv; + bestmse = left; + *distortion = thismse; + *sse1 = sse; + } - this_mv.as_mv.col += 8; - thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse); - right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + this_mv.as_mv.col += 8; + /* "halfpix" horizontal variance */ + thismse = vfp->svf(y, y_stride, 4, 0, z, b->src_stride, &sse); + right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (right < bestmse) - { - *bestmv = this_mv; - bestmse = right; - *distortion = thismse; - *sse1 = sse; - } + if (right < bestmse) { + *bestmv = this_mv; + bestmse = right; + *distortion = thismse; + *sse1 = sse; + } - /* go up then down and check error */ - this_mv.as_mv.col = startmv.as_mv.col; - this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4); - thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse); - up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + /* go up then down and check error */ + this_mv.as_mv.col = startmv.as_mv.col; + this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4); + /* "halfpix" vertical variance */ + thismse = vfp->svf(y - y_stride, y_stride, 0, 4, z, b->src_stride, &sse); + up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (up < bestmse) - { - *bestmv = this_mv; - bestmse = up; - *distortion = thismse; - *sse1 = sse; - } + if (up < bestmse) { + *bestmv = this_mv; + bestmse = up; + *distortion = thismse; + *sse1 = sse; + } - this_mv.as_mv.row += 8; - thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse); - down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + this_mv.as_mv.row += 8; + /* "halfpix" vertical variance */ + thismse = vfp->svf(y, y_stride, 0, 4, z, b->src_stride, &sse); + down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (down < bestmse) - { - *bestmv = this_mv; - bestmse = down; - *distortion = thismse; - *sse1 = sse; - } + if (down < bestmse) { + *bestmv = this_mv; + bestmse = down; + *distortion = thismse; + *sse1 = sse; + } - /* now check 1 more diagonal - */ - whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); - this_mv = startmv; + /* now check 1 more diagonal - */ + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); + this_mv = startmv; - switch (whichdir) - { + switch (whichdir) { case 0: - this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; - this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; - thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; + this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; + /* "halfpix" horizontal/vertical variance */ + thismse = + vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 1: - this_mv.as_mv.col += 4; - this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; - thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col += 4; + this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 2: - this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; - this_mv.as_mv.row += 4; - thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse); - break; + this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4; + this_mv.as_mv.row += 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse); + break; case 3: default: - this_mv.as_mv.col += 4; - this_mv.as_mv.row += 4; - thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse); - break; - } + this_mv.as_mv.col += 4; + this_mv.as_mv.row += 4; + /* "halfpix" horizontal/vertical variance */ + thismse = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse); + break; + } - diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); + diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit); - if (diag < bestmse) - { - *bestmv = this_mv; - bestmse = diag; - *distortion = thismse; - *sse1 = sse; - } + if (diag < bestmse) { + *bestmv = this_mv; + bestmse = diag; + *distortion = thismse; + *sse1 = sse; + } - return bestmse; + return bestmse; } -#define CHECK_BOUNDS(range) \ -{\ - all_in = 1;\ - all_in &= ((br-range) >= x->mv_row_min);\ - all_in &= ((br+range) <= x->mv_row_max);\ - all_in &= ((bc-range) >= x->mv_col_min);\ - all_in &= ((bc+range) <= x->mv_col_max);\ -} +#define CHECK_BOUNDS(range) \ + { \ + all_in = 1; \ + all_in &= ((br - range) >= x->mv_row_min); \ + all_in &= ((br + range) <= x->mv_row_max); \ + all_in &= ((bc - range) >= x->mv_col_min); \ + all_in &= ((bc + range) <= x->mv_col_max); \ + } -#define CHECK_POINT \ -{\ - if (this_mv.as_mv.col < x->mv_col_min) continue;\ - if (this_mv.as_mv.col > x->mv_col_max) continue;\ - if (this_mv.as_mv.row < x->mv_row_min) continue;\ - if (this_mv.as_mv.row > x->mv_row_max) continue;\ -} +#define CHECK_POINT \ + { \ + if (this_mv.as_mv.col < x->mv_col_min) continue; \ + if (this_mv.as_mv.col > x->mv_col_max) continue; \ + if (this_mv.as_mv.row < x->mv_row_min) continue; \ + if (this_mv.as_mv.row > x->mv_row_max) continue; \ + } -#define CHECK_BETTER \ -{\ - if (thissad < bestsad)\ - {\ - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\ - if (thissad < bestsad)\ - {\ - bestsad = thissad;\ - best_site = i;\ - }\ - }\ -} +#define CHECK_BETTER \ + { \ + if (thissad < bestsad) { \ + thissad += \ + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); \ + if (thissad < bestsad) { \ + bestsad = thissad; \ + best_site = i; \ + } \ + } \ + } -static const MV next_chkpts[6][3] = -{ - {{ -2, 0}, { -1, -2}, {1, -2}}, - {{ -1, -2}, {1, -2}, {2, 0}}, - {{1, -2}, {2, 0}, {1, 2}}, - {{2, 0}, {1, 2}, { -1, 2}}, - {{1, 2}, { -1, 2}, { -2, 0}}, - {{ -1, 2}, { -2, 0}, { -1, -2}} +static const MV next_chkpts[6][3] = { + { { -2, 0 }, { -1, -2 }, { 1, -2 } }, { { -1, -2 }, { 1, -2 }, { 2, 0 } }, + { { 1, -2 }, { 2, 0 }, { 1, 2 } }, { { 2, 0 }, { 1, 2 }, { -1, 2 } }, + { { 1, 2 }, { -1, 2 }, { -2, 0 } }, { { -1, 2 }, { -2, 0 }, { -1, -2 } } }; -int vp8_hex_search -( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int_mv *best_mv, - int search_param, - int sad_per_bit, - const vp8_variance_fn_ptr_t *vfp, - int *mvsadcost[2], - int *mvcost[2], - int_mv *center_mv -) -{ - MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ; - MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ; - int i, j; +int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, + int_mv *best_mv, int search_param, int sad_per_bit, + const vp8_variance_fn_ptr_t *vfp, int *mvsadcost[2], + int *mvcost[2], int_mv *center_mv) { + MV hex[6] = { + { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } + }; + MV neighbors[4] = { { 0, -1 }, { -1, 0 }, { 1, 0 }, { 0, 1 } }; + int i, j; - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - int br, bc; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - unsigned char *base_offset; - unsigned char *this_offset; - int k = -1; - int all_in; - int best_site = -1; - int hex_range = 127; - int dia_range = 8; + int in_what_stride = pre_stride; + int br, bc; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; + unsigned char *base_offset; + unsigned char *this_offset; + int k = -1; + int all_in; + int best_site = -1; + int hex_range = 127; + int dia_range = 8; - int_mv fcenter_mv; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + int_mv fcenter_mv; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - (void)mvcost; + (void)mvcost; - /* adjust ref_mv to make sure it is within MV range */ - vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - br = ref_mv->as_mv.row; - bc = ref_mv->as_mv.col; + /* adjust ref_mv to make sure it is within MV range */ + vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, + x->mv_row_max); + br = ref_mv->as_mv.row; + bc = ref_mv->as_mv.col; - /* Work out the start point for the search */ - base_offset = (unsigned char *)(base_pre + d->offset); - this_offset = base_offset + (br * (pre_stride)) + bc; - this_mv.as_mv.row = br; - this_mv.as_mv.col = bc; - bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride) - + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Work out the start point for the search */ + base_offset = (unsigned char *)(base_pre + d->offset); + this_offset = base_offset + (br * (pre_stride)) + bc; + this_mv.as_mv.row = br; + this_mv.as_mv.col = bc; + bestsad = vfp->sdf(what, what_stride, this_offset, in_what_stride) + + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); #if CONFIG_MULTI_RES_ENCODING - /* Lower search range based on prediction info */ - if (search_param >= 6) goto cal_neighbors; - else if (search_param >= 5) hex_range = 4; - else if (search_param >= 4) hex_range = 6; - else if (search_param >= 3) hex_range = 15; - else if (search_param >= 2) hex_range = 31; - else if (search_param >= 1) hex_range = 63; + /* Lower search range based on prediction info */ + if (search_param >= 6) + goto cal_neighbors; + else if (search_param >= 5) + hex_range = 4; + else if (search_param >= 4) + hex_range = 6; + else if (search_param >= 3) + hex_range = 15; + else if (search_param >= 2) + hex_range = 31; + else if (search_param >= 1) + hex_range = 63; - dia_range = 8; + dia_range = 8; #else - (void)search_param; + (void)search_param; #endif - /* hex search */ + /* hex search */ + CHECK_BOUNDS(2) + + if (all_in) { + for (i = 0; i < 6; ++i) { + this_mv.as_mv.row = br + hex[i].row; + this_mv.as_mv.col = bc + hex[i].col; + this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } + } else { + for (i = 0; i < 6; ++i) { + this_mv.as_mv.row = br + hex[i].row; + this_mv.as_mv.col = bc + hex[i].col; + CHECK_POINT + this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } + } + + if (best_site == -1) { + goto cal_neighbors; + } else { + br += hex[best_site].row; + bc += hex[best_site].col; + k = best_site; + } + + for (j = 1; j < hex_range; ++j) { + best_site = -1; CHECK_BOUNDS(2) - if(all_in) - { - for (i = 0; i < 6; i++) - { - this_mv.as_mv.row = br + hex[i].row; - this_mv.as_mv.col = bc + hex[i].col; - this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } - }else - { - for (i = 0; i < 6; i++) - { - this_mv.as_mv.row = br + hex[i].row; - this_mv.as_mv.col = bc + hex[i].col; - CHECK_POINT - this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } + if (all_in) { + for (i = 0; i < 3; ++i) { + this_mv.as_mv.row = br + next_chkpts[k][i].row; + this_mv.as_mv.col = bc + next_chkpts[k][i].col; + this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } + } else { + for (i = 0; i < 3; ++i) { + this_mv.as_mv.row = br + next_chkpts[k][i].row; + this_mv.as_mv.col = bc + next_chkpts[k][i].col; + CHECK_POINT + this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } } - if (best_site == -1) - goto cal_neighbors; - else - { - br += hex[best_site].row; - bc += hex[best_site].col; - k = best_site; + if (best_site == -1) { + break; + } else { + br += next_chkpts[k][best_site].row; + bc += next_chkpts[k][best_site].col; + k += 5 + best_site; + if (k >= 12) { + k -= 12; + } else if (k >= 6) { + k -= 6; + } } + } - for (j = 1; j < hex_range; j++) - { - best_site = -1; - CHECK_BOUNDS(2) - - if(all_in) - { - for (i = 0; i < 3; i++) - { - this_mv.as_mv.row = br + next_chkpts[k][i].row; - this_mv.as_mv.col = bc + next_chkpts[k][i].col; - this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } - }else - { - for (i = 0; i < 3; i++) - { - this_mv.as_mv.row = br + next_chkpts[k][i].row; - this_mv.as_mv.col = bc + next_chkpts[k][i].col; - CHECK_POINT - this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } - } - - if (best_site == -1) - break; - else - { - br += next_chkpts[k][best_site].row; - bc += next_chkpts[k][best_site].col; - k += 5 + best_site; - if (k >= 12) k -= 12; - else if (k >= 6) k -= 6; - } - } - - /* check 4 1-away neighbors */ +/* check 4 1-away neighbors */ cal_neighbors: - for (j = 0; j < dia_range; j++) - { - best_site = -1; - CHECK_BOUNDS(1) + for (j = 0; j < dia_range; ++j) { + best_site = -1; + CHECK_BOUNDS(1) - if(all_in) - { - for (i = 0; i < 4; i++) - { - this_mv.as_mv.row = br + neighbors[i].row; - this_mv.as_mv.col = bc + neighbors[i].col; - this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } - }else - { - for (i = 0; i < 4; i++) - { - this_mv.as_mv.row = br + neighbors[i].row; - this_mv.as_mv.col = bc + neighbors[i].col; - CHECK_POINT - this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col; - thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); - CHECK_BETTER - } - } - - if (best_site == -1) - break; - else - { - br += neighbors[best_site].row; - bc += neighbors[best_site].col; - } + if (all_in) { + for (i = 0; i < 4; ++i) { + this_mv.as_mv.row = br + neighbors[i].row; + this_mv.as_mv.col = bc + neighbors[i].col; + this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } + } else { + for (i = 0; i < 4; ++i) { + this_mv.as_mv.row = br + neighbors[i].row; + this_mv.as_mv.col = bc + neighbors[i].col; + CHECK_POINT + this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + + this_mv.as_mv.col; + thissad = vfp->sdf(what, what_stride, this_offset, in_what_stride); + CHECK_BETTER + } } - best_mv->as_mv.row = br; - best_mv->as_mv.col = bc; + if (best_site == -1) { + break; + } else { + br += neighbors[best_site].row; + bc += neighbors[best_site].col; + } + } - return bestsad; + best_mv->as_mv.row = br; + best_mv->as_mv.col = bc; + + return bestsad; } #undef CHECK_BOUNDS #undef CHECK_POINT #undef CHECK_BETTER -int vp8_diamond_search_sad_c -( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int_mv *best_mv, - int search_param, - int sad_per_bit, - int *num00, - vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], - int_mv *center_mv -) -{ - int i, j, step; +int vp8_diamond_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, + int_mv *best_mv, int search_param, int sad_per_bit, + int *num00, vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv) { + int i, j, step; - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - unsigned char *in_what; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - unsigned char *best_address; + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + unsigned char *in_what; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + unsigned char *best_address; - int tot_steps; - int_mv this_mv; + int tot_steps; + int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - int best_site = 0; - int last_site = 0; + unsigned int bestsad; + unsigned int thissad; + int best_site = 0; + int last_site = 0; - int ref_row; - int ref_col; - int this_row_offset; - int this_col_offset; - search_site *ss; + int ref_row; + int ref_col; + int this_row_offset; + int this_col_offset; + search_site *ss; - unsigned char *check_here; + unsigned char *check_here; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - ref_row = ref_mv->as_mv.row; - ref_col = ref_mv->as_mv.col; - *num00 = 0; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, + x->mv_row_max); + ref_row = ref_mv->as_mv.row; + ref_col = ref_mv->as_mv.col; + *num00 = 0; + best_mv->as_mv.row = ref_row; + best_mv->as_mv.col = ref_col; - /* Work out the start point for the search */ - in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col); - best_address = in_what; + /* Work out the start point for the search */ + in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + + ref_col); + best_address = in_what; - /* Check the starting position */ - bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) - + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Check the starting position */ + bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) + + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); - /* search_param determines the length of the initial step and hence - * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel : - * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. - */ - ss = &x->ss[search_param * x->searches_per_step]; - tot_steps = (x->ss_count / x->searches_per_step) - search_param; + /* search_param determines the length of the initial step and hence + * the number of iterations 0 = initial step (MAX_FIRST_STEP) pel : + * 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. + */ + ss = &x->ss[search_param * x->searches_per_step]; + tot_steps = (x->ss_count / x->searches_per_step) - search_param; - i = 1; + i = 1; - for (step = 0; step < tot_steps ; step++) - { - for (j = 0 ; j < x->searches_per_step ; j++) - { - /* Trap illegal vectors */ - this_row_offset = best_mv->as_mv.row + ss[i].mv.row; - this_col_offset = best_mv->as_mv.col + ss[i].mv.col; + for (step = 0; step < tot_steps; ++step) { + for (j = 0; j < x->searches_per_step; ++j) { + /* Trap illegal vectors */ + this_row_offset = best_mv->as_mv.row + ss[i].mv.row; + this_col_offset = best_mv->as_mv.col + ss[i].mv.col; - if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) + if ((this_col_offset > x->mv_col_min) && + (this_col_offset < x->mv_col_max) && + (this_row_offset > x->mv_row_min) && + (this_row_offset < x->mv_row_max)) - { - check_here = ss[i].offset + best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); + { + check_here = ss[i].offset + best_address; + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - if (thissad < bestsad) - { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); + if (thissad < bestsad) { + this_mv.as_mv.row = this_row_offset; + this_mv.as_mv.col = this_col_offset; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_site = i; - } - } - } - - i++; + if (thissad < bestsad) { + bestsad = thissad; + best_site = i; + } } + } - if (best_site != last_site) - { - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; - best_address += ss[best_site].offset; - last_site = best_site; - } - else if (best_address == in_what) - (*num00)++; + i++; } - this_mv.as_mv.row = best_mv->as_mv.row << 3; - this_mv.as_mv.col = best_mv->as_mv.col << 3; + if (best_site != last_site) { + best_mv->as_mv.row += ss[best_site].mv.row; + best_mv->as_mv.col += ss[best_site].mv.col; + best_address += ss[best_site].offset; + last_site = best_site; + } else if (best_address == in_what) { + (*num00)++; + } + } - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + this_mv.as_mv.row = best_mv->as_mv.row << 3; + this_mv.as_mv.col = best_mv->as_mv.col << 3; + + return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } -int vp8_diamond_search_sadx4 -( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int_mv *best_mv, - int search_param, - int sad_per_bit, - int *num00, - vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], - int_mv *center_mv -) -{ - int i, j, step; +int vp8_diamond_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, + int_mv *best_mv, int search_param, int sad_per_bit, + int *num00, vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv) { + int i, j, step; - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - unsigned char *in_what; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - unsigned char *best_address; + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + unsigned char *in_what; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + unsigned char *best_address; - int tot_steps; - int_mv this_mv; + int tot_steps; + int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - int best_site = 0; - int last_site = 0; + unsigned int bestsad; + unsigned int thissad; + int best_site = 0; + int last_site = 0; - int ref_row; - int ref_col; - int this_row_offset; - int this_col_offset; - search_site *ss; + int ref_row; + int ref_col; + int this_row_offset; + int this_col_offset; + search_site *ss; - unsigned char *check_here; + unsigned char *check_here; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - ref_row = ref_mv->as_mv.row; - ref_col = ref_mv->as_mv.col; - *num00 = 0; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, + x->mv_row_max); + ref_row = ref_mv->as_mv.row; + ref_col = ref_mv->as_mv.col; + *num00 = 0; + best_mv->as_mv.row = ref_row; + best_mv->as_mv.col = ref_col; - /* Work out the start point for the search */ - in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + ref_col); - best_address = in_what; + /* Work out the start point for the search */ + in_what = (unsigned char *)(base_pre + d->offset + (ref_row * pre_stride) + + ref_col); + best_address = in_what; - /* Check the starting position */ - bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) - + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Check the starting position */ + bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) + + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); - /* search_param determines the length of the initial step and hence the - * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 = - * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. + /* search_param determines the length of the initial step and hence the + * number of iterations 0 = initial step (MAX_FIRST_STEP) pel : 1 = + * (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc. + */ + ss = &x->ss[search_param * x->searches_per_step]; + tot_steps = (x->ss_count / x->searches_per_step) - search_param; + + i = 1; + + for (step = 0; step < tot_steps; ++step) { + int all_in = 1, t; + + /* To know if all neighbor points are within the bounds, 4 bounds + * checking are enough instead of checking 4 bounds for each + * points. */ - ss = &x->ss[search_param * x->searches_per_step]; - tot_steps = (x->ss_count / x->searches_per_step) - search_param; + all_in &= ((best_mv->as_mv.row + ss[i].mv.row) > x->mv_row_min); + all_in &= ((best_mv->as_mv.row + ss[i + 1].mv.row) < x->mv_row_max); + all_in &= ((best_mv->as_mv.col + ss[i + 2].mv.col) > x->mv_col_min); + all_in &= ((best_mv->as_mv.col + ss[i + 3].mv.col) < x->mv_col_max); - i = 1; + if (all_in) { + unsigned int sad_array[4]; - for (step = 0; step < tot_steps ; step++) - { - int all_in = 1, t; + for (j = 0; j < x->searches_per_step; j += 4) { + const unsigned char *block_offset[4]; - /* To know if all neighbor points are within the bounds, 4 bounds - * checking are enough instead of checking 4 bounds for each - * points. - */ - all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min); - all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max); - all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min); - all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max); + for (t = 0; t < 4; ++t) { + block_offset[t] = ss[i + t].offset + best_address; + } - if (all_in) - { - unsigned int sad_array[4]; + fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, + sad_array); - for (j = 0 ; j < x->searches_per_step ; j += 4) - { - const unsigned char *block_offset[4]; + for (t = 0; t < 4; t++, i++) { + if (sad_array[t] < bestsad) { + this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row; + this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col; + sad_array[t] += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - for (t = 0; t < 4; t++) - block_offset[t] = ss[i+t].offset + best_address; - - fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array); - - for (t = 0; t < 4; t++, i++) - { - if (sad_array[t] < bestsad) - { - this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row; - this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col; - sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); - - if (sad_array[t] < bestsad) - { - bestsad = sad_array[t]; - best_site = i; - } - } - } + if (sad_array[t] < bestsad) { + bestsad = sad_array[t]; + best_site = i; } + } } - else - { - for (j = 0 ; j < x->searches_per_step ; j++) - { - /* Trap illegal vectors */ - this_row_offset = best_mv->as_mv.row + ss[i].mv.row; - this_col_offset = best_mv->as_mv.col + ss[i].mv.col; + } + } else { + for (j = 0; j < x->searches_per_step; ++j) { + /* Trap illegal vectors */ + this_row_offset = best_mv->as_mv.row + ss[i].mv.row; + this_col_offset = best_mv->as_mv.col + ss[i].mv.col; - if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) - { - check_here = ss[i].offset + best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); + if ((this_col_offset > x->mv_col_min) && + (this_col_offset < x->mv_col_max) && + (this_row_offset > x->mv_row_min) && + (this_row_offset < x->mv_row_max)) { + check_here = ss[i].offset + best_address; + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - if (thissad < bestsad) - { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); + if (thissad < bestsad) { + this_mv.as_mv.row = this_row_offset; + this_mv.as_mv.col = this_col_offset; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_site = i; - } - } - } - i++; + if (thissad < bestsad) { + bestsad = thissad; + best_site = i; } + } } - - if (best_site != last_site) - { - best_mv->as_mv.row += ss[best_site].mv.row; - best_mv->as_mv.col += ss[best_site].mv.col; - best_address += ss[best_site].offset; - last_site = best_site; - } - else if (best_address == in_what) - (*num00)++; + i++; + } } - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; + if (best_site != last_site) { + best_mv->as_mv.row += ss[best_site].mv.row; + best_mv->as_mv.col += ss[best_site].mv.col; + best_address += ss[best_site].offset; + last_site = best_site; + } else if (best_address == in_what) { + (*num00)++; + } + } - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + this_mv.as_mv.row = best_mv->as_mv.row * 8; + this_mv.as_mv.col = best_mv->as_mv.col * 8; + + return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } int vp8_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, - int sad_per_bit, int distance, - vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], - int_mv *center_mv) -{ - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - unsigned char *in_what; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - int mv_stride = pre_stride; - unsigned char *bestaddress; - int_mv *best_mv = &d->bmi.mv; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - int r, c; + int sad_per_bit, int distance, + vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], + int_mv *center_mv) { + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + unsigned char *in_what; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; + unsigned char *bestaddress; + int_mv *best_mv = &d->bmi.mv; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; + int r, c; - unsigned char *check_here; + unsigned char *check_here; - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; + int ref_row = ref_mv->as_mv.row; + int ref_col = ref_mv->as_mv.col; - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; + int row_min = ref_row - distance; + int row_max = ref_row + distance; + int col_min = ref_col - distance; + int col_max = ref_col + distance; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - /* Work out the mid point for the search */ - in_what = base_pre + d->offset; - bestaddress = in_what + (ref_row * pre_stride) + ref_col; + /* Work out the mid point for the search */ + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->as_mv.row = ref_row; + best_mv->as_mv.col = ref_col; - /* Baseline value at the centre */ - bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) - + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Baseline value at the centre */ + bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) + + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); - /* Apply further limits to prevent us looking using vectors that - * stretch beyiond the UMV border - */ - if (col_min < x->mv_col_min) - col_min = x->mv_col_min; + /* Apply further limits to prevent us looking using vectors that + * stretch beyiond the UMV border + */ + if (col_min < x->mv_col_min) col_min = x->mv_col_min; - if (col_max > x->mv_col_max) - col_max = x->mv_col_max; + if (col_max > x->mv_col_max) col_max = x->mv_col_max; - if (row_min < x->mv_row_min) - row_min = x->mv_row_min; + if (row_min < x->mv_row_min) row_min = x->mv_row_min; - if (row_max > x->mv_row_max) - row_max = x->mv_row_max; + if (row_max > x->mv_row_max) row_max = x->mv_row_max; - for (r = row_min; r < row_max ; r++) - { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; + for (r = row_min; r < row_max; ++r) { + this_mv.as_mv.row = r; + check_here = r * mv_stride + in_what + col_min; - for (c = col_min; c < col_max; c++) - { - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); + for (c = col_min; c < col_max; ++c) { + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); + this_mv.as_mv.col = c; + thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } - check_here++; - } + check_here++; } + } - this_mv.as_mv.row = best_mv->as_mv.row << 3; - this_mv.as_mv.col = best_mv->as_mv.col << 3; + this_mv.as_mv.row = best_mv->as_mv.row << 3; + this_mv.as_mv.col = best_mv->as_mv.col << 3; - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, int sad_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], - int_mv *center_mv) -{ - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - unsigned char *in_what; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - int mv_stride = pre_stride; - unsigned char *bestaddress; - int_mv *best_mv = &d->bmi.mv; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - int r, c; + int_mv *center_mv) { + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + unsigned char *in_what; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; + unsigned char *bestaddress; + int_mv *best_mv = &d->bmi.mv; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; + int r, c; - unsigned char *check_here; + unsigned char *check_here; - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; + int ref_row = ref_mv->as_mv.row; + int ref_col = ref_mv->as_mv.col; - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; + int row_min = ref_row - distance; + int row_max = ref_row + distance; + int col_min = ref_col - distance; + int col_max = ref_col + distance; - unsigned int sad_array[3]; + unsigned int sad_array[3]; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - /* Work out the mid point for the search */ - in_what = base_pre + d->offset; - bestaddress = in_what + (ref_row * pre_stride) + ref_col; + /* Work out the mid point for the search */ + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->as_mv.row = ref_row; + best_mv->as_mv.col = ref_col; - /* Baseline value at the centre */ - bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) - + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Baseline value at the centre */ + bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) + + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); - /* Apply further limits to prevent us looking using vectors that stretch - * beyond the UMV border - */ - if (col_min < x->mv_col_min) - col_min = x->mv_col_min; + /* Apply further limits to prevent us looking using vectors that stretch + * beyond the UMV border + */ + if (col_min < x->mv_col_min) col_min = x->mv_col_min; - if (col_max > x->mv_col_max) - col_max = x->mv_col_max; + if (col_max > x->mv_col_max) col_max = x->mv_col_max; - if (row_min < x->mv_row_min) - row_min = x->mv_row_min; + if (row_min < x->mv_row_min) row_min = x->mv_row_min; - if (row_max > x->mv_row_max) - row_max = x->mv_row_max; + if (row_max > x->mv_row_max) row_max = x->mv_row_max; - for (r = row_min; r < row_max ; r++) - { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; - c = col_min; + for (r = row_min; r < row_max; ++r) { + this_mv.as_mv.row = r; + check_here = r * mv_stride + in_what + col_min; + c = col_min; - while ((c + 2) < col_max) - { - int i; + while ((c + 2) < col_max) { + int i; - fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array); + fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array); - for (i = 0; i < 3; i++) - { - thissad = sad_array[i]; + for (i = 0; i < 3; ++i) { + thissad = sad_array[i]; - if (thissad < bestsad) - { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); + if (thissad < bestsad) { + this_mv.as_mv.col = c; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } - } - - check_here++; - c++; - } - } - - while (c < col_max) - { - thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - - if (thissad < bestsad) - { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); - - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } - } - - check_here ++; - c ++; + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } } + check_here++; + c++; + } } - this_mv.as_mv.row = best_mv->as_mv.row << 3; - this_mv.as_mv.col = best_mv->as_mv.col << 3; + while (c < col_max) { + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + if (thissad < bestsad) { + this_mv.as_mv.col = c; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); + + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } + } + + check_here++; + c++; + } + } + + this_mv.as_mv.row = best_mv->as_mv.row << 3; + this_mv.as_mv.col = best_mv->as_mv.col << 3; + + return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, int sad_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], - int_mv *center_mv) -{ - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - unsigned char *in_what; - int in_what_stride = pre_stride; - int mv_stride = pre_stride; - unsigned char *bestaddress; - int_mv *best_mv = &d->bmi.mv; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; - int r, c; + int_mv *center_mv) { + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + unsigned char *in_what; + int in_what_stride = pre_stride; + int mv_stride = pre_stride; + unsigned char *bestaddress; + int_mv *best_mv = &d->bmi.mv; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; + int r, c; - unsigned char *check_here; + unsigned char *check_here; - int ref_row = ref_mv->as_mv.row; - int ref_col = ref_mv->as_mv.col; + int ref_row = ref_mv->as_mv.row; + int ref_col = ref_mv->as_mv.col; - int row_min = ref_row - distance; - int row_max = ref_row + distance; - int col_min = ref_col - distance; - int col_max = ref_col + distance; + int row_min = ref_row - distance; + int row_max = ref_row + distance; + int col_min = ref_col - distance; + int col_max = ref_col + distance; - // TODO(johannkoenig): check if this alignment is necessary. - DECLARE_ALIGNED(16, unsigned int, sad_array8[8]); - unsigned int sad_array[3]; + DECLARE_ALIGNED(16, unsigned int, sad_array8[8]); + unsigned int sad_array[3]; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - /* Work out the mid point for the search */ - in_what = base_pre + d->offset; - bestaddress = in_what + (ref_row * pre_stride) + ref_col; + /* Work out the mid point for the search */ + in_what = base_pre + d->offset; + bestaddress = in_what + (ref_row * pre_stride) + ref_col; - best_mv->as_mv.row = ref_row; - best_mv->as_mv.col = ref_col; + best_mv->as_mv.row = ref_row; + best_mv->as_mv.col = ref_col; - /* Baseline value at the centre */ - bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) - + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); + /* Baseline value at the centre */ + bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride) + + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit); - /* Apply further limits to prevent us looking using vectors that stretch - * beyond the UMV border - */ - if (col_min < x->mv_col_min) - col_min = x->mv_col_min; + /* Apply further limits to prevent us looking using vectors that stretch + * beyond the UMV border + */ + if (col_min < x->mv_col_min) col_min = x->mv_col_min; - if (col_max > x->mv_col_max) - col_max = x->mv_col_max; + if (col_max > x->mv_col_max) col_max = x->mv_col_max; - if (row_min < x->mv_row_min) - row_min = x->mv_row_min; + if (row_min < x->mv_row_min) row_min = x->mv_row_min; - if (row_max > x->mv_row_max) - row_max = x->mv_row_max; + if (row_max > x->mv_row_max) row_max = x->mv_row_max; - for (r = row_min; r < row_max ; r++) - { - this_mv.as_mv.row = r; - check_here = r * mv_stride + in_what + col_min; - c = col_min; + for (r = row_min; r < row_max; ++r) { + this_mv.as_mv.row = r; + check_here = r * mv_stride + in_what + col_min; + c = col_min; - while ((c + 7) < col_max) - { - int i; + while ((c + 7) < col_max) { + int i; - fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8); + fn_ptr->sdx8f(what, what_stride, check_here, in_what_stride, sad_array8); - for (i = 0; i < 8; i++) - { - thissad = sad_array8[i]; + for (i = 0; i < 8; ++i) { + thissad = sad_array8[i]; - if (thissad < bestsad) - { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); + if (thissad < bestsad) { + this_mv.as_mv.col = c; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } - } - - check_here++; - c++; - } + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } } - while ((c + 2) < col_max) - { - int i; - - fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array); - - for (i = 0; i < 3; i++) - { - thissad = sad_array[i]; - - if (thissad < bestsad) - { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); - - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } - } - - check_here++; - c++; - } - } - - while (c < col_max) - { - thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride); - - if (thissad < bestsad) - { - this_mv.as_mv.col = c; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, - mvsadcost, sad_per_bit); - - if (thissad < bestsad) - { - bestsad = thissad; - best_mv->as_mv.row = r; - best_mv->as_mv.col = c; - bestaddress = check_here; - } - } - - check_here ++; - c ++; - } + check_here++; + c++; + } } - this_mv.as_mv.row = best_mv->as_mv.row * 8; - this_mv.as_mv.col = best_mv->as_mv.col * 8; + while ((c + 2) < col_max) { + int i; - return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + fn_ptr->sdx3f(what, what_stride, check_here, in_what_stride, sad_array); + + for (i = 0; i < 3; ++i) { + thissad = sad_array[i]; + + if (thissad < bestsad) { + this_mv.as_mv.col = c; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); + + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } + } + + check_here++; + c++; + } + } + + while (c < col_max) { + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); + + if (thissad < bestsad) { + this_mv.as_mv.col = c; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit); + + if (thissad < bestsad) { + bestsad = thissad; + best_mv->as_mv.row = r; + best_mv->as_mv.col = c; + bestaddress = check_here; + } + } + + check_here++; + c++; + } + } + + this_mv.as_mv.row = best_mv->as_mv.row * 8; + this_mv.as_mv.col = best_mv->as_mv.col * 8; + + return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } -int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, - int error_per_bit, int search_range, - vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], - int_mv *center_mv) -{ - MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}}; - int i, j; - short this_row_offset, this_col_offset; +int vp8_refining_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, + int_mv *ref_mv, int error_per_bit, + int search_range, vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv) { + MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } }; + int i, j; + short this_row_offset, this_col_offset; - int what_stride = b->src_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - unsigned char *what = (*(b->base_src) + b->src); - unsigned char *best_address = (unsigned char *)(base_pre + d->offset + - (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); - unsigned char *check_here; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; + int what_stride = b->src_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + unsigned char *what = (*(b->base_src) + b->src); + unsigned char *best_address = + (unsigned char *)(base_pre + d->offset + + (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); + unsigned char *check_here; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) - + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit); + bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit); - for (i=0; ias_mv.row + neighbors[j].row; - this_col_offset = ref_mv->as_mv.col + neighbors[j].col; + for (j = 0; j < 4; ++j) { + this_row_offset = ref_mv->as_mv.row + neighbors[j].row; + this_col_offset = ref_mv->as_mv.col + neighbors[j].col; - if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) - { - check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride); + if ((this_col_offset > x->mv_col_min) && + (this_col_offset < x->mv_col_max) && + (this_row_offset > x->mv_row_min) && + (this_row_offset < x->mv_row_max)) { + check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + + best_address; + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); - if (thissad < bestsad) - { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); + if (thissad < bestsad) { + this_mv.as_mv.row = this_row_offset; + this_mv.as_mv.col = this_col_offset; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); - if (thissad < bestsad) - { - bestsad = thissad; - best_site = j; - } - } - } - } - - if (best_site == -1) - break; - else - { - ref_mv->as_mv.row += neighbors[best_site].row; - ref_mv->as_mv.col += neighbors[best_site].col; - best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col; + if (thissad < bestsad) { + bestsad = thissad; + best_site = j; + } } + } } - this_mv.as_mv.row = ref_mv->as_mv.row << 3; - this_mv.as_mv.col = ref_mv->as_mv.col << 3; + if (best_site == -1) { + break; + } else { + ref_mv->as_mv.row += neighbors[best_site].row; + ref_mv->as_mv.col += neighbors[best_site].col; + best_address += (neighbors[best_site].row) * in_what_stride + + neighbors[best_site].col; + } + } - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + this_mv.as_mv.row = ref_mv->as_mv.row << 3; + this_mv.as_mv.col = ref_mv->as_mv.col << 3; + + return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, int error_per_bit, int search_range, vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], int_mv *center_mv) -{ - MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}}; - int i, j; - short this_row_offset, this_col_offset; + int *mvcost[2], int_mv *center_mv) { + MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } }; + int i, j; + short this_row_offset, this_col_offset; - int what_stride = b->src_stride; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int in_what_stride = pre_stride; - unsigned char *what = (*(b->base_src) + b->src); - unsigned char *best_address = (unsigned char *)(base_pre + d->offset + - (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); - unsigned char *check_here; - int_mv this_mv; - unsigned int bestsad; - unsigned int thissad; + int what_stride = b->src_stride; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int in_what_stride = pre_stride; + unsigned char *what = (*(b->base_src) + b->src); + unsigned char *best_address = + (unsigned char *)(base_pre + d->offset + + (ref_mv->as_mv.row * pre_stride) + ref_mv->as_mv.col); + unsigned char *check_here; + int_mv this_mv; + unsigned int bestsad; + unsigned int thissad; - int *mvsadcost[2]; - int_mv fcenter_mv; + int *mvsadcost[2]; + int_mv fcenter_mv; - mvsadcost[0] = x->mvsadcost[0]; - mvsadcost[1] = x->mvsadcost[1]; - fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; - fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; + mvsadcost[0] = x->mvsadcost[0]; + mvsadcost[1] = x->mvsadcost[1]; + fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3; + fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3; - bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) - + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit); + bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride) + + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit); - for (i=0; ias_mv.row - 1) > x->mv_row_min); - all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max); - all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min); - all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max); + all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min); + all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max); + all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min); + all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max); - if(all_in) - { - unsigned int sad_array[4]; - const unsigned char *block_offset[4]; - block_offset[0] = best_address - in_what_stride; - block_offset[1] = best_address - 1; - block_offset[2] = best_address + 1; - block_offset[3] = best_address + in_what_stride; + if (all_in) { + unsigned int sad_array[4]; + const unsigned char *block_offset[4]; + block_offset[0] = best_address - in_what_stride; + block_offset[1] = best_address - 1; + block_offset[2] = best_address + 1; + block_offset[3] = best_address + in_what_stride; - fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array); + fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, + sad_array); - for (j = 0; j < 4; j++) - { - if (sad_array[j] < bestsad) - { - this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row; - this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col; - sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); + for (j = 0; j < 4; ++j) { + if (sad_array[j] < bestsad) { + this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row; + this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col; + sad_array[j] += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); - if (sad_array[j] < bestsad) - { - bestsad = sad_array[j]; - best_site = j; - } - } + if (sad_array[j] < bestsad) { + bestsad = sad_array[j]; + best_site = j; + } + } + } + } else { + for (j = 0; j < 4; ++j) { + this_row_offset = ref_mv->as_mv.row + neighbors[j].row; + this_col_offset = ref_mv->as_mv.col + neighbors[j].col; + + if ((this_col_offset > x->mv_col_min) && + (this_col_offset < x->mv_col_max) && + (this_row_offset > x->mv_row_min) && + (this_row_offset < x->mv_row_max)) { + check_here = (neighbors[j].row) * in_what_stride + neighbors[j].col + + best_address; + thissad = fn_ptr->sdf(what, what_stride, check_here, in_what_stride); + + if (thissad < bestsad) { + this_mv.as_mv.row = this_row_offset; + this_mv.as_mv.col = this_col_offset; + thissad += + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); + + if (thissad < bestsad) { + bestsad = thissad; + best_site = j; } + } } - else - { - for (j = 0 ; j < 4 ; j++) - { - this_row_offset = ref_mv->as_mv.row + neighbors[j].row; - this_col_offset = ref_mv->as_mv.col + neighbors[j].col; - - if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) && - (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max)) - { - check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address; - thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride); - - if (thissad < bestsad) - { - this_mv.as_mv.row = this_row_offset; - this_mv.as_mv.col = this_col_offset; - thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit); - - if (thissad < bestsad) - { - bestsad = thissad; - best_site = j; - } - } - } - } - } - - if (best_site == -1) - break; - else - { - ref_mv->as_mv.row += neighbors[best_site].row; - ref_mv->as_mv.col += neighbors[best_site].col; - best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col; - } + } } - this_mv.as_mv.row = ref_mv->as_mv.row * 8; - this_mv.as_mv.col = ref_mv->as_mv.col * 8; + if (best_site == -1) { + break; + } else { + ref_mv->as_mv.row += neighbors[best_site].row; + ref_mv->as_mv.col += neighbors[best_site].col; + best_address += (neighbors[best_site].row) * in_what_stride + + neighbors[best_site].col; + } + } - return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) - + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); + this_mv.as_mv.row = ref_mv->as_mv.row * 8; + this_mv.as_mv.col = ref_mv->as_mv.col * 8; + + return fn_ptr->vf(what, what_stride, best_address, in_what_stride, &thissad) + + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit); } #ifdef VP8_ENTROPY_STATS -void print_mode_context(void) -{ - FILE *f = fopen("modecont.c", "w"); - int i, j; +void print_mode_context(void) { + FILE *f = fopen("modecont.c", "w"); + int i, j; - fprintf(f, "#include \"entropy.h\"\n"); - fprintf(f, "const int vp8_mode_contexts[6][4] =\n"); - fprintf(f, "{\n"); + fprintf(f, "#include \"entropy.h\"\n"); + fprintf(f, "const int vp8_mode_contexts[6][4] =\n"); + fprintf(f, "{\n"); - for (j = 0; j < 6; j++) - { - fprintf(f, " { /* %d */\n", j); - fprintf(f, " "); + for (j = 0; j < 6; ++j) { + fprintf(f, " { /* %d */\n", j); + fprintf(f, " "); - for (i = 0; i < 4; i++) - { - int overal_prob; - int this_prob; - int count; + for (i = 0; i < 4; ++i) { + int overal_prob; + int this_prob; + int count; - /* Overall probs */ - count = mv_mode_cts[i][0] + mv_mode_cts[i][1]; + /* Overall probs */ + count = mv_mode_cts[i][0] + mv_mode_cts[i][1]; - if (count) - overal_prob = 256 * mv_mode_cts[i][0] / count; - else - overal_prob = 128; + if (count) + overal_prob = 256 * mv_mode_cts[i][0] / count; + else + overal_prob = 128; - if (overal_prob == 0) - overal_prob = 1; + if (overal_prob == 0) overal_prob = 1; - /* context probs */ - count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1]; + /* context probs */ + count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1]; - if (count) - this_prob = 256 * mv_ref_ct[j][i][0] / count; - else - this_prob = 128; + if (count) + this_prob = 256 * mv_ref_ct[j][i][0] / count; + else + this_prob = 128; - if (this_prob == 0) - this_prob = 1; + if (this_prob == 0) this_prob = 1; - fprintf(f, "%5d, ", this_prob); - } - - fprintf(f, " },\n"); + fprintf(f, "%5d, ", this_prob); } - fprintf(f, "};\n"); - fclose(f); + fprintf(f, " },\n"); + } + + fprintf(f, "};\n"); + fclose(f); } /* MV ref count VP8_ENTROPY_STATS stats code */ #ifdef VP8_ENTROPY_STATS -void init_mv_ref_counts() -{ - memset(mv_ref_ct, 0, sizeof(mv_ref_ct)); - memset(mv_mode_cts, 0, sizeof(mv_mode_cts)); +void init_mv_ref_counts() { + memset(mv_ref_ct, 0, sizeof(mv_ref_ct)); + memset(mv_mode_cts, 0, sizeof(mv_mode_cts)); } -void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4]) -{ - if (m == ZEROMV) - { - ++mv_ref_ct [ct[0]] [0] [0]; - ++mv_mode_cts[0][0]; - } - else - { - ++mv_ref_ct [ct[0]] [0] [1]; - ++mv_mode_cts[0][1]; +void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4]) { + if (m == ZEROMV) { + ++mv_ref_ct[ct[0]][0][0]; + ++mv_mode_cts[0][0]; + } else { + ++mv_ref_ct[ct[0]][0][1]; + ++mv_mode_cts[0][1]; - if (m == NEARESTMV) - { - ++mv_ref_ct [ct[1]] [1] [0]; - ++mv_mode_cts[1][0]; - } - else - { - ++mv_ref_ct [ct[1]] [1] [1]; - ++mv_mode_cts[1][1]; - - if (m == NEARMV) - { - ++mv_ref_ct [ct[2]] [2] [0]; - ++mv_mode_cts[2][0]; - } - else - { - ++mv_ref_ct [ct[2]] [2] [1]; - ++mv_mode_cts[2][1]; - - if (m == NEWMV) - { - ++mv_ref_ct [ct[3]] [3] [0]; - ++mv_mode_cts[3][0]; - } - else - { - ++mv_ref_ct [ct[3]] [3] [1]; - ++mv_mode_cts[3][1]; - } - } + if (m == NEARESTMV) { + ++mv_ref_ct[ct[1]][1][0]; + ++mv_mode_cts[1][0]; + } else { + ++mv_ref_ct[ct[1]][1][1]; + ++mv_mode_cts[1][1]; + + if (m == NEARMV) { + ++mv_ref_ct[ct[2]][2][0]; + ++mv_mode_cts[2][0]; + } else { + ++mv_ref_ct[ct[2]][2][1]; + ++mv_mode_cts[2][1]; + + if (m == NEWMV) { + ++mv_ref_ct[ct[3]][3][0]; + ++mv_mode_cts[3][0]; + } else { + ++mv_ref_ct[ct[3]][3][1]; + ++mv_mode_cts[3][1]; } + } } + } } -#endif/* END MV ref count VP8_ENTROPY_STATS stats code */ +#endif /* END MV ref count VP8_ENTROPY_STATS stats code */ #endif diff --git a/libs/libvpx/vp8/encoder/mcomp.h b/libs/libvpx/vp8/encoder/mcomp.h index 1694af819e..b6228798ff 100644 --- a/libs/libvpx/vp8/encoder/mcomp.h +++ b/libs/libvpx/vp8/encoder/mcomp.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_MCOMP_H_ #define VP8_ENCODER_MCOMP_H_ @@ -24,7 +23,6 @@ extern void init_mv_ref_counts(); extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]); #endif - /* The maximum number of steps in a step search given the largest allowed * initial step */ @@ -34,79 +32,47 @@ extern void accum_mv_refs(MB_PREDICTION_MODE, const int near_mv_ref_cts[4]); #define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS)) - 1) /* Maximum size of the first step in full pel units */ -#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) +#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1)) extern void print_mode_context(void); extern int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight); extern void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride); -extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride); +extern void vp8_init3smotion_compensation(MACROBLOCK *x, int stride); +extern int vp8_hex_search(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv, + int_mv *best_mv, int search_param, int error_per_bit, + const vp8_variance_fn_ptr_t *vf, int *mvsadcost[2], + int *mvcost[2], int_mv *center_mv); -extern int vp8_hex_search -( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int_mv *best_mv, - int search_param, - int error_per_bit, - const vp8_variance_fn_ptr_t *vf, - int *mvsadcost[2], - int *mvcost[2], - int_mv *center_mv -); - -typedef int (fractional_mv_step_fp) - (MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *bestmv, int_mv *ref_mv, - int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], - int *distortion, unsigned int *sse); +typedef int(fractional_mv_step_fp)(MACROBLOCK *x, BLOCK *b, BLOCKD *d, + int_mv *bestmv, int_mv *ref_mv, + int error_per_bit, + const vp8_variance_fn_ptr_t *vfp, + int *mvcost[2], int *distortion, + unsigned int *sse); extern fractional_mv_step_fp vp8_find_best_sub_pixel_step_iteratively; extern fractional_mv_step_fp vp8_find_best_sub_pixel_step; extern fractional_mv_step_fp vp8_find_best_half_pixel_step; extern fractional_mv_step_fp vp8_skip_fractional_mv_step; -typedef int (*vp8_full_search_fn_t) - ( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int sad_per_bit, - int distance, - vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], - int_mv *center_mv - ); +typedef int (*vp8_full_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d, + int_mv *ref_mv, int sad_per_bit, + int distance, vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv); -typedef int (*vp8_refining_search_fn_t) - ( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int sad_per_bit, - int distance, - vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], - int_mv *center_mv - ); +typedef int (*vp8_refining_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d, + int_mv *ref_mv, int sad_per_bit, + int distance, + vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv); -typedef int (*vp8_diamond_search_fn_t) - ( - MACROBLOCK *x, - BLOCK *b, - BLOCKD *d, - int_mv *ref_mv, - int_mv *best_mv, - int search_param, - int sad_per_bit, - int *num00, - vp8_variance_fn_ptr_t *fn_ptr, - int *mvcost[2], - int_mv *center_mv - ); +typedef int (*vp8_diamond_search_fn_t)(MACROBLOCK *x, BLOCK *b, BLOCKD *d, + int_mv *ref_mv, int_mv *best_mv, + int search_param, int sad_per_bit, + int *num00, + vp8_variance_fn_ptr_t *fn_ptr, + int *mvcost[2], int_mv *center_mv); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/mips/msa/dct_msa.c b/libs/libvpx/vp8/encoder/mips/msa/dct_msa.c index be61ffa0db..3084667552 100644 --- a/libs/libvpx/vp8/encoder/mips/msa/dct_msa.c +++ b/libs/libvpx/vp8/encoder/mips/msa/dct_msa.c @@ -11,189 +11,186 @@ #include "./vp8_rtcd.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -#define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \ - \ - ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tp0_m, tp1_m); \ - ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tp2_m, tp3_m); \ - PCKEV_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out0, out2); \ - PCKOD_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out1, out3); \ +#define TRANSPOSE4x4_H(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \ + \ + ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tp0_m, tp1_m); \ + ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tp2_m, tp3_m); \ + PCKEV_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out0, out2); \ + PCKOD_D2_SH(tp2_m, tp0_m, tp3_m, tp1_m, out1, out3); \ + } + +#define SET_DOTP_VALUES(coeff, val0, val1, val2, const1, const2) \ + { \ + v8i16 tmp0_m; \ + \ + SPLATI_H3_SH(coeff, val0, val1, val2, tmp0_m, const1, const2); \ + ILVEV_H2_SH(tmp0_m, const1, const2, tmp0_m, const1, const2); \ + } + +#define RET_1_IF_NZERO_H(in0) \ + ({ \ + v8i16 tmp0_m; \ + v8i16 one_m = __msa_ldi_h(1); \ + \ + tmp0_m = __msa_ceqi_h(in0, 0); \ + tmp0_m = tmp0_m ^ 255; \ + tmp0_m = one_m & tmp0_m; \ + \ + tmp0_m; \ + }) + +#define RET_1_IF_NZERO_W(in0) \ + ({ \ + v4i32 tmp0_m; \ + v4i32 one_m = __msa_ldi_w(1); \ + \ + tmp0_m = __msa_ceqi_w(in0, 0); \ + tmp0_m = tmp0_m ^ 255; \ + tmp0_m = one_m & tmp0_m; \ + \ + tmp0_m; \ + }) + +#define RET_1_IF_NEG_W(in0) \ + ({ \ + v4i32 tmp0_m; \ + \ + v4i32 one_m = __msa_ldi_w(1); \ + tmp0_m = __msa_clti_s_w(in0, 0); \ + tmp0_m = one_m & tmp0_m; \ + \ + tmp0_m; \ + }) + +void vp8_short_fdct4x4_msa(int16_t *input, int16_t *output, int32_t pitch) { + v8i16 in0, in1, in2, in3; + v8i16 temp0, temp1; + v8i16 const0, const1; + v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; + v4i32 out0, out1, out2, out3; + v8i16 zero = { 0 }; + + LD_SH4(input, pitch / 2, in0, in1, in2, in3); + TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); + + BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); + SLLI_4V(temp0, temp1, in1, in3, 3); + in0 = temp0 + temp1; + in2 = temp0 - temp1; + SET_DOTP_VALUES(coeff, 0, 1, 2, const0, const1); + temp0 = __msa_ilvr_h(in3, in1); + in1 = __msa_splati_h(coeff, 3); + out0 = (v4i32)__msa_ilvev_h(zero, in1); + coeff = __msa_ilvl_h(zero, coeff); + out1 = __msa_splati_w((v4i32)coeff, 0); + DPADD_SH2_SW(temp0, temp0, const0, const1, out0, out1); + out0 >>= 12; + out1 >>= 12; + PCKEV_H2_SH(out0, out0, out1, out1, in1, in3); + TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); + + BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); + in0 = temp0 + temp1 + 7; + in2 = temp0 - temp1 + 7; + in0 >>= 4; + in2 >>= 4; + ILVR_H2_SW(zero, in0, zero, in2, out0, out2); + temp1 = RET_1_IF_NZERO_H(in3); + ILVR_H2_SH(zero, temp1, in3, in1, temp1, temp0); + SPLATI_W2_SW(coeff, 2, out3, out1); + out3 += out1; + out1 = __msa_splati_w((v4i32)coeff, 1); + DPADD_SH2_SW(temp0, temp0, const0, const1, out1, out3); + out1 >>= 16; + out3 >>= 16; + out1 += (v4i32)temp1; + PCKEV_H2_SH(out1, out0, out3, out2, in0, in2); + ST_SH2(in0, in2, output, 8); } -#define SET_DOTP_VALUES(coeff, val0, val1, val2, const1, const2) \ -{ \ - v8i16 tmp0_m; \ - \ - SPLATI_H3_SH(coeff, val0, val1, val2, tmp0_m, const1, const2); \ - ILVEV_H2_SH(tmp0_m, const1, const2, tmp0_m, const1, const2); \ +void vp8_short_fdct8x4_msa(int16_t *input, int16_t *output, int32_t pitch) { + v8i16 in0, in1, in2, in3; + v8i16 temp0, temp1, tmp0, tmp1; + v8i16 const0, const1, const2; + v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; + v8i16 zero = { 0 }; + v4i32 vec0_w, vec1_w, vec2_w, vec3_w; + + LD_SH4(input, pitch / 2, in0, in1, in2, in3); + TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); + + BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); + SLLI_4V(temp0, temp1, in1, in3, 3); + in0 = temp0 + temp1; + in2 = temp0 - temp1; + SET_DOTP_VALUES(coeff, 0, 1, 2, const1, const2); + temp0 = __msa_splati_h(coeff, 3); + vec1_w = (v4i32)__msa_ilvev_h(zero, temp0); + coeff = __msa_ilvl_h(zero, coeff); + vec3_w = __msa_splati_w((v4i32)coeff, 0); + ILVRL_H2_SH(in3, in1, tmp1, tmp0); + vec0_w = vec1_w; + vec2_w = vec3_w; + DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w, + vec1_w, vec2_w, vec3_w); + SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 12); + PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); + TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); + + BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); + in0 = temp0 + temp1 + 7; + in2 = temp0 - temp1 + 7; + in0 >>= 4; + in2 >>= 4; + SPLATI_W2_SW(coeff, 2, vec3_w, vec1_w); + vec3_w += vec1_w; + vec1_w = __msa_splati_w((v4i32)coeff, 1); + const0 = RET_1_IF_NZERO_H(in3); + ILVRL_H2_SH(in3, in1, tmp1, tmp0); + vec0_w = vec1_w; + vec2_w = vec3_w; + DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, vec0_w, + vec1_w, vec2_w, vec3_w); + SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 16); + PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); + in1 += const0; + PCKEV_D2_SH(in1, in0, in3, in2, temp0, temp1); + ST_SH2(temp0, temp1, output, 8); + + PCKOD_D2_SH(in1, in0, in3, in2, in0, in2); + ST_SH2(in0, in2, output + 16, 8); } -#define RET_1_IF_NZERO_H(in0) \ -({ \ - v8i16 tmp0_m; \ - v8i16 one_m = __msa_ldi_h(1); \ - \ - tmp0_m = __msa_ceqi_h(in0, 0); \ - tmp0_m = tmp0_m ^ 255; \ - tmp0_m = one_m & tmp0_m; \ - \ - tmp0_m; \ -}) +void vp8_short_walsh4x4_msa(int16_t *input, int16_t *output, int32_t pitch) { + v8i16 in0_h, in1_h, in2_h, in3_h; + v4i32 in0_w, in1_w, in2_w, in3_w, temp0, temp1, temp2, temp3; -#define RET_1_IF_NZERO_W(in0) \ -({ \ - v4i32 tmp0_m; \ - v4i32 one_m = __msa_ldi_w(1); \ - \ - tmp0_m = __msa_ceqi_w(in0, 0); \ - tmp0_m = tmp0_m ^ 255; \ - tmp0_m = one_m & tmp0_m; \ - \ - tmp0_m; \ -}) + LD_SH4(input, pitch / 2, in0_h, in1_h, in2_h, in3_h); + TRANSPOSE4x4_SH_SH(in0_h, in1_h, in2_h, in3_h, in0_h, in1_h, in2_h, in3_h); -#define RET_1_IF_NEG_W(in0) \ -({ \ - v4i32 tmp0_m; \ - \ - v4i32 one_m = __msa_ldi_w(1); \ - tmp0_m = __msa_clti_s_w(in0, 0); \ - tmp0_m = one_m & tmp0_m; \ - \ - tmp0_m; \ -}) + UNPCK_R_SH_SW(in0_h, in0_w); + UNPCK_R_SH_SW(in1_h, in1_w); + UNPCK_R_SH_SW(in2_h, in2_w); + UNPCK_R_SH_SW(in3_h, in3_w); + BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1); + SLLI_4V(temp0, temp1, temp2, temp3, 2); + BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w); + temp0 = RET_1_IF_NZERO_W(temp0); + in0_w += temp0; + TRANSPOSE4x4_SW_SW(in0_w, in1_w, in2_w, in3_w, in0_w, in1_w, in2_w, in3_w); -void vp8_short_fdct4x4_msa(int16_t *input, int16_t *output, int32_t pitch) -{ - v8i16 in0, in1, in2, in3; - v8i16 temp0, temp1; - v8i16 const0, const1; - v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; - v4i32 out0, out1, out2, out3; - v8i16 zero = { 0 }; - - LD_SH4(input, pitch / 2, in0, in1, in2, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - - BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); - SLLI_4V(temp0, temp1, in1, in3, 3); - in0 = temp0 + temp1; - in2 = temp0 - temp1; - SET_DOTP_VALUES(coeff, 0, 1, 2, const0, const1); - temp0 = __msa_ilvr_h(in3, in1); - in1 = __msa_splati_h(coeff, 3); - out0 = (v4i32)__msa_ilvev_h(zero, in1); - coeff = __msa_ilvl_h(zero, coeff); - out1 = __msa_splati_w((v4i32)coeff, 0); - DPADD_SH2_SW(temp0, temp0, const0, const1, out0, out1); - out0 >>= 12; - out1 >>= 12; - PCKEV_H2_SH(out0, out0, out1, out1, in1, in3); - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - - BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); - in0 = temp0 + temp1 + 7; - in2 = temp0 - temp1 + 7; - in0 >>= 4; - in2 >>= 4; - ILVR_H2_SW(zero, in0, zero, in2, out0, out2); - temp1 = RET_1_IF_NZERO_H(in3); - ILVR_H2_SH(zero, temp1, in3, in1, temp1, temp0); - SPLATI_W2_SW(coeff, 2, out3, out1); - out3 += out1; - out1 = __msa_splati_w((v4i32)coeff, 1); - DPADD_SH2_SW(temp0, temp0, const0, const1, out1, out3); - out1 >>= 16; - out3 >>= 16; - out1 += (v4i32)temp1; - PCKEV_H2_SH(out1, out0, out3, out2, in0, in2); - ST_SH2(in0, in2, output, 8); -} - -void vp8_short_fdct8x4_msa(int16_t *input, int16_t *output, int32_t pitch) -{ - v8i16 in0, in1, in2, in3; - v8i16 temp0, temp1, tmp0, tmp1; - v8i16 const0, const1, const2; - v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; - v8i16 zero = { 0 }; - v4i32 vec0_w, vec1_w, vec2_w, vec3_w; - - LD_SH4(input, pitch / 2, in0, in1, in2, in3); - TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); - - BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); - SLLI_4V(temp0, temp1, in1, in3, 3); - in0 = temp0 + temp1; - in2 = temp0 - temp1; - SET_DOTP_VALUES(coeff, 0, 1, 2, const1, const2); - temp0 = __msa_splati_h(coeff, 3); - vec1_w = (v4i32)__msa_ilvev_h(zero, temp0); - coeff = __msa_ilvl_h(zero, coeff); - vec3_w = __msa_splati_w((v4i32)coeff, 0); - ILVRL_H2_SH(in3, in1, tmp1, tmp0); - vec0_w = vec1_w; - vec2_w = vec3_w; - DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, - vec0_w, vec1_w, vec2_w, vec3_w); - SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 12); - PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); - TRANSPOSE4x4_H(in0, in1, in2, in3, in0, in1, in2, in3); - - BUTTERFLY_4(in0, in1, in2, in3, temp0, temp1, in1, in3); - in0 = temp0 + temp1 + 7; - in2 = temp0 - temp1 + 7; - in0 >>= 4; - in2 >>= 4; - SPLATI_W2_SW(coeff, 2, vec3_w, vec1_w); - vec3_w += vec1_w; - vec1_w = __msa_splati_w((v4i32)coeff, 1); - const0 = RET_1_IF_NZERO_H(in3); - ILVRL_H2_SH(in3, in1, tmp1, tmp0); - vec0_w = vec1_w; - vec2_w = vec3_w; - DPADD_SH4_SW(tmp1, tmp0, tmp1, tmp0, const1, const1, const2, const2, - vec0_w, vec1_w, vec2_w, vec3_w); - SRA_4V(vec1_w, vec0_w, vec3_w, vec2_w, 16); - PCKEV_H2_SH(vec1_w, vec0_w, vec3_w, vec2_w, in1, in3); - in1 += const0; - PCKEV_D2_SH(in1, in0, in3, in2, temp0, temp1); - ST_SH2(temp0, temp1, output, 8); - - PCKOD_D2_SH(in1, in0, in3, in2, in0, in2); - ST_SH2(in0, in2, output + 16, 8); -} - -void vp8_short_walsh4x4_msa(int16_t *input, int16_t *output, int32_t pitch) -{ - v8i16 in0_h, in1_h, in2_h, in3_h; - v4i32 in0_w, in1_w, in2_w, in3_w, temp0, temp1, temp2, temp3; - - LD_SH4(input, pitch / 2, in0_h, in1_h, in2_h, in3_h); - TRANSPOSE4x4_SH_SH(in0_h, in1_h, in2_h, in3_h, in0_h, in1_h, in2_h, in3_h); - - UNPCK_R_SH_SW(in0_h, in0_w); - UNPCK_R_SH_SW(in1_h, in1_w); - UNPCK_R_SH_SW(in2_h, in2_w); - UNPCK_R_SH_SW(in3_h, in3_w); - BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1); - SLLI_4V(temp0, temp1, temp2, temp3, 2); - BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w); - temp0 = RET_1_IF_NZERO_W(temp0); - in0_w += temp0; - TRANSPOSE4x4_SW_SW(in0_w, in1_w, in2_w, in3_w, in0_w, in1_w, in2_w, in3_w); - - BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1); - BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w); - in0_w += RET_1_IF_NEG_W(in0_w); - in1_w += RET_1_IF_NEG_W(in1_w); - in2_w += RET_1_IF_NEG_W(in2_w); - in3_w += RET_1_IF_NEG_W(in3_w); - ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w); - SRA_4V(in0_w, in1_w, in2_w, in3_w, 3); - PCKEV_H2_SH(in1_w, in0_w, in3_w, in2_w, in0_h, in1_h); - ST_SH2(in0_h, in1_h, output, 8); + BUTTERFLY_4(in0_w, in1_w, in3_w, in2_w, temp0, temp3, temp2, temp1); + BUTTERFLY_4(temp0, temp1, temp2, temp3, in0_w, in1_w, in2_w, in3_w); + in0_w += RET_1_IF_NEG_W(in0_w); + in1_w += RET_1_IF_NEG_W(in1_w); + in2_w += RET_1_IF_NEG_W(in2_w); + in3_w += RET_1_IF_NEG_W(in3_w); + ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w); + SRA_4V(in0_w, in1_w, in2_w, in3_w, 3); + PCKEV_H2_SH(in1_w, in0_w, in3_w, in2_w, in0_h, in1_h); + ST_SH2(in0_h, in1_h, output, 8); } diff --git a/libs/libvpx/vp8/encoder/mips/msa/denoising_msa.c b/libs/libvpx/vp8/encoder/mips/msa/denoising_msa.c index 66965c6685..f8b653a9a7 100644 --- a/libs/libvpx/vp8/encoder/mips/msa/denoising_msa.c +++ b/libs/libvpx/vp8/encoder/mips/msa/denoising_msa.c @@ -16,609 +16,553 @@ int32_t vp8_denoiser_filter_msa(uint8_t *mc_running_avg_y_ptr, int32_t mc_avg_y_stride, uint8_t *running_avg_y_ptr, - int32_t avg_y_stride, - uint8_t *sig_ptr, int32_t sig_stride, - uint32_t motion_magnitude, - int32_t increase_denoising) -{ - uint8_t *running_avg_y_start = running_avg_y_ptr; - uint8_t *sig_start = sig_ptr; - int32_t cnt = 0; - int32_t sum_diff = 0; - int32_t shift_inc1 = 3; - int32_t delta = 0; - int32_t sum_diff_thresh; - v16u8 src0, src1, src2, src3, src4, src5, src6, src7; - v16u8 src8, src9, src10, src11, src12, src13, src14, src15; - v16u8 mc_running_avg_y0, running_avg_y, sig0; - v16u8 mc_running_avg_y1, running_avg_y1, sig1; - v16u8 coeff0, coeff1; - v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1; - v8i16 adjust0, adjust1, adjust2, adjust3; - v8i16 shift_inc1_vec = { 0 }; - v8i16 col_sum0 = { 0 }; - v8i16 col_sum1 = { 0 }; - v8i16 col_sum2 = { 0 }; - v8i16 col_sum3 = { 0 }; - v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec; - v4i32 temp0_w; - v2i64 temp0_d, temp1_d; - v8i16 zero = { 0 }; - v8i16 one = __msa_ldi_h(1); - v8i16 four = __msa_ldi_h(4); - v8i16 val_127 = __msa_ldi_h(127); - v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 }; + int32_t avg_y_stride, uint8_t *sig_ptr, + int32_t sig_stride, uint32_t motion_magnitude, + int32_t increase_denoising) { + uint8_t *running_avg_y_start = running_avg_y_ptr; + uint8_t *sig_start = sig_ptr; + int32_t cnt = 0; + int32_t sum_diff = 0; + int32_t shift_inc1 = 3; + int32_t delta = 0; + int32_t sum_diff_thresh; + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; + v16u8 src8, src9, src10, src11, src12, src13, src14, src15; + v16u8 mc_running_avg_y0, running_avg_y, sig0; + v16u8 mc_running_avg_y1, running_avg_y1, sig1; + v16u8 coeff0, coeff1; + v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1; + v8i16 adjust0, adjust1, adjust2, adjust3; + v8i16 shift_inc1_vec = { 0 }; + v8i16 col_sum0 = { 0 }; + v8i16 col_sum1 = { 0 }; + v8i16 col_sum2 = { 0 }; + v8i16 col_sum3 = { 0 }; + v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec; + v4i32 temp0_w; + v2i64 temp0_d, temp1_d; + v8i16 zero = { 0 }; + v8i16 one = __msa_ldi_h(1); + v8i16 four = __msa_ldi_h(4); + v8i16 val_127 = __msa_ldi_h(127); + v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 }; - if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) - { - adj_val = __msa_add_a_h(adj_val, one); - if (increase_denoising) - { - adj_val = __msa_add_a_h(adj_val, one); - shift_inc1 = 4; - } - - temp0_h = zero - adj_val; - adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val); + if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) { + adj_val = __msa_add_a_h(adj_val, one); + if (increase_denoising) { + adj_val = __msa_add_a_h(adj_val, one); + shift_inc1 = 4; } - adj_val = __msa_insert_h(adj_val, 3, cnt); - adj_val = __msa_insert_h(adj_val, 7, cnt); - shift_inc1_vec = __msa_fill_h(shift_inc1); + temp0_h = zero - adj_val; + adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val); + } - for (cnt = 8; cnt--;) - { - v8i16 mask0 = { 0 }; - v8i16 mask1 = { 0 }; + adj_val = __msa_insert_h(adj_val, 3, cnt); + adj_val = __msa_insert_h(adj_val, 7, cnt); + shift_inc1_vec = __msa_fill_h(shift_inc1); + for (cnt = 8; cnt--;) { + v8i16 mask0 = { 0 }; + v8i16 mask1 = { 0 }; + + mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); + sig0 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + mc_running_avg_y_ptr += mc_avg_y_stride; + + mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); + sig1 = LD_UB(sig_ptr); + + ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1); + HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); + abs_diff0 = __msa_add_a_h(diff0, zero); + abs_diff1 = __msa_add_a_h(diff1, zero); + cmp = __msa_clei_s_h(abs_diff0, 15); + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff0, 7); + cmp = cmp & one; + mask0 += cmp; + cmp = abs_diff0 < shift_inc1_vec; + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff1, 15); + cmp = cmp & one; + mask1 += cmp; + cmp = __msa_clei_s_h(abs_diff1, 7); + cmp = cmp & one; + mask1 += cmp; + cmp = abs_diff1 < shift_inc1_vec; + cmp = cmp & one; + mask1 += cmp; + temp0_h = __msa_clei_s_h(diff0, 0); + temp0_h = temp0_h & four; + mask0 += temp0_h; + temp1_h = __msa_clei_s_h(diff1, 0); + temp1_h = temp1_h & four; + mask1 += temp1_h; + VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0, + adjust1); + temp2_h = __msa_ceqi_h(adjust0, 0); + temp3_h = __msa_ceqi_h(adjust1, 0); + adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); + adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h); + ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); + UNPCK_UB_SH(sig0, temp0_h, temp1_h); + ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); + MAXI_SH2_SH(temp0_h, temp1_h, 0); + SAT_UH2_SH(temp0_h, temp1_h, 7); + temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); + running_avg_y = + __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h); + ST_UB(running_avg_y, running_avg_y_ptr); + running_avg_y_ptr += avg_y_stride; + + mask0 = zero; + mask1 = zero; + ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1); + HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); + abs_diff0 = __msa_add_a_h(diff0, zero); + abs_diff1 = __msa_add_a_h(diff1, zero); + cmp = __msa_clei_s_h(abs_diff0, 15); + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff0, 7); + cmp = cmp & one; + mask0 += cmp; + cmp = abs_diff0 < shift_inc1_vec; + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff1, 15); + cmp = cmp & one; + mask1 += cmp; + cmp = __msa_clei_s_h(abs_diff1, 7); + cmp = cmp & one; + mask1 += cmp; + cmp = abs_diff1 < shift_inc1_vec; + cmp = cmp & one; + mask1 += cmp; + temp0_h = __msa_clei_s_h(diff0, 0); + temp0_h = temp0_h & four; + mask0 += temp0_h; + temp1_h = __msa_clei_s_h(diff1, 0); + temp1_h = temp1_h & four; + mask1 += temp1_h; + VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0, + adjust1); + temp2_h = __msa_ceqi_h(adjust0, 0); + temp3_h = __msa_ceqi_h(adjust1, 0); + adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); + adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h); + ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); + UNPCK_UB_SH(sig1, temp0_h, temp1_h); + ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); + MAXI_SH2_SH(temp0_h, temp1_h, 0); + SAT_UH2_SH(temp0_h, temp1_h, 7); + temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); + running_avg_y = + __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h); + ST_UB(running_avg_y, running_avg_y_ptr); + sig_ptr += sig_stride; + mc_running_avg_y_ptr += mc_avg_y_stride; + running_avg_y_ptr += avg_y_stride; + } + + col_sum0 = __msa_min_s_h(col_sum0, val_127); + col_sum1 = __msa_min_s_h(col_sum1, val_127); + temp0_h = col_sum0 + col_sum1; + temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); + temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); + temp1_d = __msa_splati_d(temp0_d, 1); + temp0_d += temp1_d; + sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); + sig_ptr -= sig_stride * 16; + mc_running_avg_y_ptr -= mc_avg_y_stride * 16; + running_avg_y_ptr -= avg_y_stride * 16; + + if (increase_denoising) { + sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; + } + + if (abs(sum_diff) > sum_diff_thresh) { + delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; + delta_vec = __msa_fill_h(delta); + if (delta < 4) { + for (cnt = 8; cnt--;) { + running_avg_y = LD_UB(running_avg_y_ptr); mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); sig0 = LD_UB(sig_ptr); sig_ptr += sig_stride; mc_running_avg_y_ptr += mc_avg_y_stride; - + running_avg_y_ptr += avg_y_stride; mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); sig1 = LD_UB(sig_ptr); - + running_avg_y1 = LD_UB(running_avg_y_ptr); ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1); HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); abs_diff0 = __msa_add_a_h(diff0, zero); abs_diff1 = __msa_add_a_h(diff1, zero); - cmp = __msa_clei_s_h(abs_diff0, 15); - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff0, 7); - cmp = cmp & one; - mask0 += cmp; - cmp = abs_diff0 < shift_inc1_vec; - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff1, 15); - cmp = cmp & one; - mask1 += cmp; - cmp = __msa_clei_s_h(abs_diff1, 7); - cmp = cmp & one; - mask1 += cmp; - cmp = abs_diff1 < shift_inc1_vec; - cmp = cmp & one; - mask1 += cmp; + temp0_h = abs_diff0 < delta_vec; + temp1_h = abs_diff1 < delta_vec; + abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec, + (v16u8)temp0_h); + abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec, + (v16u8)temp1_h); + SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1); + abs_diff_neg0 = zero - abs_diff0; + abs_diff_neg1 = zero - abs_diff1; temp0_h = __msa_clei_s_h(diff0, 0); - temp0_h = temp0_h & four; - mask0 += temp0_h; temp1_h = __msa_clei_s_h(diff1, 0); - temp1_h = temp1_h & four; - mask1 += temp1_h; - VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0, - adjust1); - temp2_h = __msa_ceqi_h(adjust0, 0); - temp3_h = __msa_ceqi_h(adjust1, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, - (v16u8)temp2_h); - adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, - (v16u8)temp3_h); - ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); - UNPCK_UB_SH(sig0, temp0_h, temp1_h); - ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); - MAXI_SH2_SH(temp0_h, temp1_h, 0); - SAT_UH2_SH(temp0_h, temp1_h, 7); - temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); - running_avg_y = __msa_bmnz_v(running_avg_y, mc_running_avg_y0, - (v16u8)temp2_h); - ST_UB(running_avg_y, running_avg_y_ptr); - running_avg_y_ptr += avg_y_stride; - - mask0 = zero; - mask1 = zero; + adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0, + (v16u8)temp0_h); + adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1, + (v16u8)temp1_h); + ILVRL_B2_SH(zero, running_avg_y, temp2_h, temp3_h); + ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); + MAXI_SH2_SH(adjust2, adjust3, 0); + SAT_UH2_SH(adjust2, adjust3, 7); + temp0_h = __msa_ceqi_h(diff0, 0); + temp1_h = __msa_ceqi_h(diff1, 0); + adjust2 = + (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h); + adjust3 = + (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h); + adjust0 = + (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h); + adjust1 = + (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h); + ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2); + ST_UB(running_avg_y, running_avg_y_ptr - avg_y_stride); ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1); HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); abs_diff0 = __msa_add_a_h(diff0, zero); abs_diff1 = __msa_add_a_h(diff1, zero); - cmp = __msa_clei_s_h(abs_diff0, 15); - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff0, 7); - cmp = cmp & one; - mask0 += cmp; - cmp = abs_diff0 < shift_inc1_vec; - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff1, 15); - cmp = cmp & one; - mask1 += cmp; - cmp = __msa_clei_s_h(abs_diff1, 7); - cmp = cmp & one; - mask1 += cmp; - cmp = abs_diff1 < shift_inc1_vec; - cmp = cmp & one; - mask1 += cmp; + temp0_h = abs_diff0 < delta_vec; + temp1_h = abs_diff1 < delta_vec; + abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)delta_vec, + (v16u8)temp0_h); + abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, (v16u8)delta_vec, + (v16u8)temp1_h); + SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, abs_diff_neg1); temp0_h = __msa_clei_s_h(diff0, 0); - temp0_h = temp0_h & four; - mask0 += temp0_h; temp1_h = __msa_clei_s_h(diff1, 0); - temp1_h = temp1_h & four; - mask1 += temp1_h; - VSHF_H2_SH(adj_val, adj_val, adj_val, adj_val, mask0, mask1, adjust0, - adjust1); - temp2_h = __msa_ceqi_h(adjust0, 0); - temp3_h = __msa_ceqi_h(adjust1, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, - (v16u8)temp2_h); - adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, - (v16u8)temp3_h); - ADD2(col_sum0, adjust0, col_sum1, adjust1, col_sum0, col_sum1); - UNPCK_UB_SH(sig1, temp0_h, temp1_h); - ADD2(temp0_h, adjust0, temp1_h, adjust1, temp0_h, temp1_h); - MAXI_SH2_SH(temp0_h, temp1_h, 0); - SAT_UH2_SH(temp0_h, temp1_h, 7); - temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); - running_avg_y = __msa_bmnz_v(running_avg_y, mc_running_avg_y1, - (v16u8)temp2_h); + adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0, + (v16u8)temp0_h); + adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, (v16u8)abs_diff_neg1, + (v16u8)temp1_h); + ILVRL_H2_SH(zero, running_avg_y1, temp2_h, temp3_h); + ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); + MAXI_SH2_SH(adjust2, adjust3, 0); + SAT_UH2_SH(adjust2, adjust3, 7); + temp0_h = __msa_ceqi_h(diff0, 0); + temp1_h = __msa_ceqi_h(diff1, 0); + adjust2 = + (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h); + adjust3 = + (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, (v16u8)temp1_h); + adjust0 = + (v8i16)__msa_bmz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h); + adjust1 = + (v8i16)__msa_bmz_v((v16u8)adjust1, (v16u8)zero, (v16u8)temp1_h); + ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, (v16i8)adjust2); ST_UB(running_avg_y, running_avg_y_ptr); + running_avg_y_ptr += avg_y_stride; + } + + col_sum2 = __msa_min_s_h(col_sum2, val_127); + col_sum3 = __msa_min_s_h(col_sum3, val_127); + temp0_h = col_sum2 + col_sum3; + temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); + temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); + temp1_d = __msa_splati_d(temp0_d, 1); + temp0_d += (v2i64)temp1_d; + sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); + if (abs(sum_diff) > SUM_DIFF_THRESHOLD) { + return COPY_BLOCK; + } + } else { + return COPY_BLOCK; + } + } + + LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, src7); + sig_start += (8 * sig_stride); + LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, src14, + src15); + + ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, running_avg_y_start, + avg_y_stride); + running_avg_y_start += (8 * avg_y_stride); + ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, + running_avg_y_start, avg_y_stride); + + return FILTER_BLOCK; +} + +int32_t vp8_denoiser_filter_uv_msa( + uint8_t *mc_running_avg_y_ptr, int32_t mc_avg_y_stride, + uint8_t *running_avg_y_ptr, int32_t avg_y_stride, uint8_t *sig_ptr, + int32_t sig_stride, uint32_t motion_magnitude, int32_t increase_denoising) { + uint8_t *running_avg_y_start = running_avg_y_ptr; + uint8_t *sig_start = sig_ptr; + int32_t cnt = 0; + int32_t sum_diff = 0; + int32_t shift_inc1 = 3; + int32_t delta = 0; + int32_t sum_block = 0; + int32_t sum_diff_thresh; + int64_t dst0, dst1, src0, src1, src2, src3; + v16u8 mc_running_avg_y0, running_avg_y, sig0; + v16u8 mc_running_avg_y1, running_avg_y1, sig1; + v16u8 sig2, sig3, sig4, sig5, sig6, sig7; + v16u8 coeff0; + v8i16 diff0, abs_diff0, abs_diff_neg0; + v8i16 adjust0, adjust2; + v8i16 shift_inc1_vec = { 0 }; + v8i16 col_sum0 = { 0 }; + v8i16 temp0_h, temp2_h, cmp, delta_vec; + v4i32 temp0_w; + v2i64 temp0_d, temp1_d; + v16i8 zero = { 0 }; + v8i16 one = __msa_ldi_h(1); + v8i16 four = __msa_ldi_h(4); + v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 }; + + sig0 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); + sig1 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig1); + sig2 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig2); + sig3 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig3); + sig4 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig4); + sig5 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig5); + sig6 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig6); + sig7 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig7); + temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); + temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); + temp1_d = __msa_splati_d(temp0_d, 1); + temp0_d += temp1_d; + sum_block = __msa_copy_s_w((v4i32)temp0_d, 0); + sig_ptr -= sig_stride * 8; + + if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + return COPY_BLOCK; + } + + if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) { + adj_val = __msa_add_a_h(adj_val, one); + + if (increase_denoising) { + adj_val = __msa_add_a_h(adj_val, one); + shift_inc1 = 4; + } + + temp0_h = (v8i16)zero - adj_val; + adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val); + } + + adj_val = __msa_insert_h(adj_val, 3, cnt); + adj_val = __msa_insert_h(adj_val, 7, cnt); + shift_inc1_vec = __msa_fill_h(shift_inc1); + for (cnt = 4; cnt--;) { + v8i16 mask0 = { 0 }; + mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); + sig0 = LD_UB(sig_ptr); + sig_ptr += sig_stride; + mc_running_avg_y_ptr += mc_avg_y_stride; + mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); + sig1 = LD_UB(sig_ptr); + coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0); + diff0 = __msa_hsub_u_h(coeff0, coeff0); + abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); + cmp = __msa_clei_s_h(abs_diff0, 15); + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff0, 7); + cmp = cmp & one; + mask0 += cmp; + cmp = abs_diff0 < shift_inc1_vec; + cmp = cmp & one; + mask0 += cmp; + temp0_h = __msa_clei_s_h(diff0, 0); + temp0_h = temp0_h & four; + mask0 += temp0_h; + adjust0 = __msa_vshf_h(mask0, adj_val, adj_val); + temp2_h = __msa_ceqi_h(adjust0, 0); + adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); + col_sum0 += adjust0; + temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); + temp0_h += adjust0; + temp0_h = __msa_maxi_s_h(temp0_h, 0); + temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7); + temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h); + running_avg_y = + __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h); + dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0); + SD(dst0, running_avg_y_ptr); + running_avg_y_ptr += avg_y_stride; + + mask0 = __msa_ldi_h(0); + coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1); + diff0 = __msa_hsub_u_h(coeff0, coeff0); + abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); + cmp = __msa_clei_s_h(abs_diff0, 15); + cmp = cmp & one; + mask0 += cmp; + cmp = __msa_clei_s_h(abs_diff0, 7); + cmp = cmp & one; + mask0 += cmp; + cmp = abs_diff0 < shift_inc1_vec; + cmp = cmp & one; + mask0 += cmp; + temp0_h = __msa_clei_s_h(diff0, 0); + temp0_h = temp0_h & four; + mask0 += temp0_h; + adjust0 = __msa_vshf_h(mask0, adj_val, adj_val); + temp2_h = __msa_ceqi_h(adjust0, 0); + adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); + col_sum0 += adjust0; + temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig1); + temp0_h += adjust0; + temp0_h = __msa_maxi_s_h(temp0_h, 0); + temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7); + + temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h); + running_avg_y = + __msa_bmnz_v(running_avg_y, mc_running_avg_y1, (v16u8)temp2_h); + dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0); + SD(dst1, running_avg_y_ptr); + + sig_ptr += sig_stride; + mc_running_avg_y_ptr += mc_avg_y_stride; + running_avg_y_ptr += avg_y_stride; + } + + temp0_h = col_sum0; + temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); + temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); + temp1_d = __msa_splati_d(temp0_d, 1); + temp0_d += temp1_d; + sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); + sig_ptr -= sig_stride * 8; + mc_running_avg_y_ptr -= mc_avg_y_stride * 8; + running_avg_y_ptr -= avg_y_stride * 8; + sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; + + if (increase_denoising) { + sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; + } + + if (abs(sum_diff) > sum_diff_thresh) { + delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; + delta_vec = __msa_fill_h(delta); + if (delta < 4) { + for (cnt = 4; cnt--;) { + running_avg_y = LD_UB(running_avg_y_ptr); + mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); + sig0 = LD_UB(sig_ptr); + /* Update pointers for next iteration. */ sig_ptr += sig_stride; mc_running_avg_y_ptr += mc_avg_y_stride; running_avg_y_ptr += avg_y_stride; - } - col_sum0 = __msa_min_s_h(col_sum0, val_127); - col_sum1 = __msa_min_s_h(col_sum1, val_127); - temp0_h = col_sum0 + col_sum1; - temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); - temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); - temp1_d = __msa_splati_d(temp0_d, 1); - temp0_d += temp1_d; - sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); - sig_ptr -= sig_stride * 16; - mc_running_avg_y_ptr -= mc_avg_y_stride * 16; - running_avg_y_ptr -= avg_y_stride * 16; - - if (increase_denoising) - { - sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; - } - - if (abs(sum_diff) > sum_diff_thresh) - { - delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; - delta_vec = __msa_fill_h(delta); - if (delta < 4) - { - for (cnt = 8; cnt--;) - { - running_avg_y = LD_UB(running_avg_y_ptr); - mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); - sig0 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - mc_running_avg_y_ptr += mc_avg_y_stride; - running_avg_y_ptr += avg_y_stride; - mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); - sig1 = LD_UB(sig_ptr); - running_avg_y1 = LD_UB(running_avg_y_ptr); - ILVRL_B2_UB(mc_running_avg_y0, sig0, coeff0, coeff1); - HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); - abs_diff0 = __msa_add_a_h(diff0, zero); - abs_diff1 = __msa_add_a_h(diff1, zero); - temp0_h = abs_diff0 < delta_vec; - temp1_h = abs_diff1 < delta_vec; - abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, - (v16u8)delta_vec, - (v16u8)temp0_h); - abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, - (v16u8)delta_vec, - (v16u8)temp1_h); - SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, - abs_diff_neg1); - abs_diff_neg0 = zero - abs_diff0; - abs_diff_neg1 = zero - abs_diff1; - temp0_h = __msa_clei_s_h(diff0, 0); - temp1_h = __msa_clei_s_h(diff1, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, - (v16u8)abs_diff_neg0, - (v16u8)temp0_h); - adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, - (v16u8)abs_diff_neg1, - (v16u8)temp1_h); - ILVRL_B2_SH(zero, running_avg_y, temp2_h, temp3_h); - ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); - MAXI_SH2_SH(adjust2, adjust3, 0); - SAT_UH2_SH(adjust2, adjust3, 7); - temp0_h = __msa_ceqi_h(diff0, 0); - temp1_h = __msa_ceqi_h(diff1, 0); - adjust2 = (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, - (v16u8)temp0_h); - adjust3 = (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, - (v16u8)temp1_h); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, - (v16u8)temp0_h); - adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)zero, - (v16u8)temp1_h); - ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, - (v16i8)adjust2); - ST_UB(running_avg_y, running_avg_y_ptr - avg_y_stride); - ILVRL_B2_UB(mc_running_avg_y1, sig1, coeff0, coeff1); - HSUB_UB2_SH(coeff0, coeff1, diff0, diff1); - abs_diff0 = __msa_add_a_h(diff0, zero); - abs_diff1 = __msa_add_a_h(diff1, zero); - temp0_h = abs_diff0 < delta_vec; - temp1_h = abs_diff1 < delta_vec; - abs_diff0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, - (v16u8)delta_vec, - (v16u8)temp0_h); - abs_diff1 = (v8i16)__msa_bmz_v((v16u8)abs_diff1, - (v16u8)delta_vec, - (v16u8)temp1_h); - SUB2(zero, abs_diff0, zero, abs_diff1, abs_diff_neg0, - abs_diff_neg1); - temp0_h = __msa_clei_s_h(diff0, 0); - temp1_h = __msa_clei_s_h(diff1, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, - (v16u8)abs_diff_neg0, - (v16u8)temp0_h); - adjust1 = (v8i16)__msa_bmnz_v((v16u8)abs_diff1, - (v16u8)abs_diff_neg1, - (v16u8)temp1_h); - ILVRL_H2_SH(zero, running_avg_y1, temp2_h, temp3_h); - ADD2(temp2_h, adjust0, temp3_h, adjust1, adjust2, adjust3); - MAXI_SH2_SH(adjust2, adjust3, 0); - SAT_UH2_SH(adjust2, adjust3, 7); - temp0_h = __msa_ceqi_h(diff0, 0); - temp1_h = __msa_ceqi_h(diff1, 0); - adjust2 = (v8i16)__msa_bmz_v((v16u8)adjust2, (v16u8)temp2_h, - (v16u8)temp0_h); - adjust3 = (v8i16)__msa_bmz_v((v16u8)adjust3, (v16u8)temp3_h, - (v16u8)temp1_h); - adjust0 = (v8i16)__msa_bmz_v((v16u8)adjust0, (v16u8)zero, - (v16u8)temp0_h); - adjust1 = (v8i16)__msa_bmz_v((v16u8)adjust1, (v16u8)zero, - (v16u8)temp1_h); - ADD2(col_sum2, adjust0, col_sum3, adjust1, col_sum2, col_sum3); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, - (v16i8)adjust2); - ST_UB(running_avg_y, running_avg_y_ptr); - running_avg_y_ptr += avg_y_stride; - } - - col_sum2 = __msa_min_s_h(col_sum2, val_127); - col_sum3 = __msa_min_s_h(col_sum3, val_127); - temp0_h = col_sum2 + col_sum3; - temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); - temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); - temp1_d = __msa_splati_d(temp0_d, 1); - temp0_d += (v2i64)temp1_d; - sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); - if (abs(sum_diff) > SUM_DIFF_THRESHOLD) - { - return COPY_BLOCK; - } - } - else - { - return COPY_BLOCK; - } - } - - LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, - src7); - sig_start += (8 * sig_stride); - LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, - src14, src15); - - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, running_avg_y_start, - avg_y_stride); - running_avg_y_start += (8 * avg_y_stride); - ST_UB8(src8, src9, src10, src11, src12, src13, src14, src15, - running_avg_y_start, avg_y_stride); - - return FILTER_BLOCK; -} - -int32_t vp8_denoiser_filter_uv_msa(uint8_t *mc_running_avg_y_ptr, - int32_t mc_avg_y_stride, - uint8_t *running_avg_y_ptr, - int32_t avg_y_stride, - uint8_t *sig_ptr, - int32_t sig_stride, - uint32_t motion_magnitude, - int32_t increase_denoising) -{ - uint8_t *running_avg_y_start = running_avg_y_ptr; - uint8_t *sig_start = sig_ptr; - int32_t cnt = 0; - int32_t sum_diff = 0; - int32_t shift_inc1 = 3; - int32_t delta = 0; - int32_t sum_block = 0; - int32_t sum_diff_thresh; - int64_t dst0, dst1, src0, src1, src2, src3; - v16u8 mc_running_avg_y0, running_avg_y, sig0; - v16u8 mc_running_avg_y1, running_avg_y1, sig1; - v16u8 sig2, sig3, sig4, sig5, sig6, sig7; - v16u8 coeff0; - v8i16 diff0, abs_diff0, abs_diff_neg0; - v8i16 adjust0, adjust2; - v8i16 shift_inc1_vec = { 0 }; - v8i16 col_sum0 = { 0 }; - v8i16 temp0_h, temp2_h, cmp, delta_vec; - v4i32 temp0_w; - v2i64 temp0_d, temp1_d; - v16i8 zero = { 0 }; - v8i16 one = __msa_ldi_h(1); - v8i16 four = __msa_ldi_h(4); - v8i16 adj_val = { 6, 4, 3, 0, -6, -4, -3, 0 }; - - - sig0 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); - sig1 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig1); - sig2 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig2); - sig3 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig3); - sig4 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig4); - sig5 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig5); - sig6 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig6); - sig7 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - temp0_h += (v8i16)__msa_ilvr_b(zero, (v16i8)sig7); - temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); - temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); - temp1_d = __msa_splati_d(temp0_d, 1); - temp0_d += temp1_d; - sum_block = __msa_copy_s_w((v4i32)temp0_d, 0); - sig_ptr -= sig_stride * 8; - - if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) - { - return COPY_BLOCK; - } - - if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) - { - adj_val = __msa_add_a_h(adj_val, one); - - if (increase_denoising) - { - adj_val = __msa_add_a_h(adj_val, one); - shift_inc1 = 4; - } - - temp0_h = (v8i16)zero - adj_val; - adj_val = (v8i16)__msa_ilvev_d((v2i64)temp0_h, (v2i64)adj_val); - } - - adj_val = __msa_insert_h(adj_val, 3, cnt); - adj_val = __msa_insert_h(adj_val, 7, cnt); - shift_inc1_vec = __msa_fill_h(shift_inc1); - for (cnt = 4; cnt--;) - { - v8i16 mask0 = { 0 }; - mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); - sig0 = LD_UB(sig_ptr); - sig_ptr += sig_stride; - mc_running_avg_y_ptr += mc_avg_y_stride; mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); sig1 = LD_UB(sig_ptr); + running_avg_y1 = LD_UB(running_avg_y_ptr); + coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, (v16i8)sig0); diff0 = __msa_hsub_u_h(coeff0, coeff0); abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); - cmp = __msa_clei_s_h(abs_diff0, 15); - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff0, 7); - cmp = cmp & one; - mask0 += cmp; - cmp = abs_diff0 < shift_inc1_vec; - cmp = cmp & one; - mask0 += cmp; + temp0_h = delta_vec < abs_diff0; + abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec, + (v16u8)temp0_h); + abs_diff_neg0 = (v8i16)zero - abs_diff0; temp0_h = __msa_clei_s_h(diff0, 0); - temp0_h = temp0_h & four; - mask0 += temp0_h; - adjust0 = __msa_vshf_h(mask0, adj_val, adj_val); - temp2_h = __msa_ceqi_h(adjust0, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, - (v16u8)temp2_h); + adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0, + (v16u8)temp0_h); + temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y); + adjust2 = temp2_h + adjust0; + adjust2 = __msa_maxi_s_h(adjust2, 0); + adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7); + temp0_h = __msa_ceqi_h(diff0, 0); + adjust2 = + (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h); + adjust0 = + (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h); col_sum0 += adjust0; - temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); - temp0_h += adjust0; - temp0_h = __msa_maxi_s_h(temp0_h, 0); - temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7); - temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h); - running_avg_y = __msa_bmnz_v(running_avg_y, mc_running_avg_y0, - (v16u8)temp2_h); - dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0); - SD(dst0, running_avg_y_ptr); - running_avg_y_ptr += avg_y_stride; + running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2); + dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0); + SD(dst0, running_avg_y_ptr - avg_y_stride); - mask0 = __msa_ldi_h(0); coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, (v16i8)sig1); diff0 = __msa_hsub_u_h(coeff0, coeff0); abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); - cmp = __msa_clei_s_h(abs_diff0, 15); - cmp = cmp & one; - mask0 += cmp; - cmp = __msa_clei_s_h(abs_diff0, 7); - cmp = cmp & one; - mask0 += cmp; - cmp = abs_diff0 < shift_inc1_vec; - cmp = cmp & one; - mask0 += cmp; + temp0_h = delta_vec < abs_diff0; + abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, (v16u8)delta_vec, + (v16u8)temp0_h); + abs_diff_neg0 = (v8i16)zero - abs_diff0; temp0_h = __msa_clei_s_h(diff0, 0); - temp0_h = temp0_h & four; - mask0 += temp0_h; - adjust0 = __msa_vshf_h(mask0, adj_val, adj_val); - temp2_h = __msa_ceqi_h(adjust0, 0); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, - (v16u8)temp2_h); + adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, (v16u8)abs_diff_neg0, + (v16u8)temp0_h); + temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y1); + adjust2 = temp2_h + adjust0; + adjust2 = __msa_maxi_s_h(adjust2, 0); + adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7); + temp0_h = __msa_ceqi_h(diff0, 0); + adjust2 = + (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, (v16u8)temp0_h); + adjust0 = + (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, (v16u8)temp0_h); col_sum0 += adjust0; - temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig1); - temp0_h += adjust0; - temp0_h = __msa_maxi_s_h(temp0_h, 0); - temp0_h = (v8i16)__msa_sat_u_h((v8u16)temp0_h, 7); - - temp2_h = (v8i16)__msa_pckev_b((v16i8)temp2_h, (v16i8)temp2_h); - running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp0_h, (v16i8)temp0_h); - running_avg_y = __msa_bmnz_v(running_avg_y, mc_running_avg_y1, - (v16u8)temp2_h); + running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, (v16i8)adjust2); dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0); SD(dst1, running_avg_y_ptr); - - sig_ptr += sig_stride; - mc_running_avg_y_ptr += mc_avg_y_stride; running_avg_y_ptr += avg_y_stride; + } + + temp0_h = col_sum0; + temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); + temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); + temp1_d = __msa_splati_d(temp0_d, 1); + temp0_d += temp1_d; + sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); + + if (abs(sum_diff) > sum_diff_thresh) { + return COPY_BLOCK; + } + } else { + return COPY_BLOCK; } + } - temp0_h = col_sum0; - temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); - temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); - temp1_d = __msa_splati_d(temp0_d, 1); - temp0_d += temp1_d; - sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); - sig_ptr -= sig_stride * 8; - mc_running_avg_y_ptr -= mc_avg_y_stride * 8; - running_avg_y_ptr -= avg_y_stride * 8; - sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; + LD4(sig_start, sig_stride, src0, src1, src2, src3); + sig_start += (4 * sig_stride); + SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride); + running_avg_y_start += (4 * avg_y_stride); - if (increase_denoising) - { - sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; - } + LD4(sig_start, sig_stride, src0, src1, src2, src3); + SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride); - if (abs(sum_diff) > sum_diff_thresh) - { - delta = ((abs(sum_diff) - sum_diff_thresh) >> 8) + 1; - delta_vec = __msa_fill_h(delta); - if (delta < 4) - { - for (cnt = 4; cnt--;) - { - running_avg_y = LD_UB(running_avg_y_ptr); - mc_running_avg_y0 = LD_UB(mc_running_avg_y_ptr); - sig0 = LD_UB(sig_ptr); - /* Update pointers for next iteration. */ - sig_ptr += sig_stride; - mc_running_avg_y_ptr += mc_avg_y_stride; - running_avg_y_ptr += avg_y_stride; - - mc_running_avg_y1 = LD_UB(mc_running_avg_y_ptr); - sig1 = LD_UB(sig_ptr); - running_avg_y1 = LD_UB(running_avg_y_ptr); - - coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y0, - (v16i8)sig0); - diff0 = __msa_hsub_u_h(coeff0, coeff0); - abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); - temp0_h = delta_vec < abs_diff0; - abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, - (v16u8)delta_vec, - (v16u8)temp0_h); - abs_diff_neg0 = (v8i16)zero - abs_diff0; - temp0_h = __msa_clei_s_h(diff0, 0); - adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, - (v16u8)abs_diff_neg0, - (v16u8)temp0_h); - temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y); - adjust2 = temp2_h + adjust0; - adjust2 = __msa_maxi_s_h(adjust2, 0); - adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7); - temp0_h = __msa_ceqi_h(diff0, 0); - adjust2 = (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, - (v16u8)temp0_h); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, - (v16u8)temp0_h); - col_sum0 += adjust0; - running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, - (v16i8)adjust2); - dst0 = __msa_copy_s_d((v2i64)running_avg_y, 0); - SD(dst0, running_avg_y_ptr - avg_y_stride); - - coeff0 = (v16u8)__msa_ilvr_b((v16i8)mc_running_avg_y1, - (v16i8)sig1); - diff0 = __msa_hsub_u_h(coeff0, coeff0); - abs_diff0 = __msa_add_a_h(diff0, (v8i16)zero); - temp0_h = delta_vec < abs_diff0; - abs_diff0 = (v8i16)__msa_bmnz_v((v16u8)abs_diff0, - (v16u8)delta_vec, - (v16u8)temp0_h); - abs_diff_neg0 = (v8i16)zero - abs_diff0; - temp0_h = __msa_clei_s_h(diff0, 0); - adjust0 = (v8i16)__msa_bmz_v((v16u8)abs_diff0, - (v16u8)abs_diff_neg0, - (v16u8)temp0_h); - temp2_h = (v8i16)__msa_ilvr_b(zero, (v16i8)running_avg_y1); - adjust2 = temp2_h + adjust0; - adjust2 = __msa_maxi_s_h(adjust2, 0); - adjust2 = (v8i16)__msa_sat_u_h((v8u16)adjust2, 7); - temp0_h = __msa_ceqi_h(diff0, 0); - adjust2 = (v8i16)__msa_bmnz_v((v16u8)adjust2, (v16u8)temp2_h, - (v16u8)temp0_h); - adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)zero, - (v16u8)temp0_h); - col_sum0 += adjust0; - running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust2, - (v16i8)adjust2); - dst1 = __msa_copy_s_d((v2i64)running_avg_y, 0); - SD(dst1, running_avg_y_ptr); - running_avg_y_ptr += avg_y_stride; - } - - temp0_h = col_sum0; - temp0_w = __msa_hadd_s_w(temp0_h, temp0_h); - temp0_d = __msa_hadd_s_d(temp0_w, temp0_w); - temp1_d = __msa_splati_d(temp0_d, 1); - temp0_d += temp1_d; - sum_diff = __msa_copy_s_w((v4i32)temp0_d, 0); - - if (abs(sum_diff) > sum_diff_thresh) - { - return COPY_BLOCK; - } - } - else - { - return COPY_BLOCK; - } - } - - LD4(sig_start, sig_stride, src0, src1, src2, src3); - sig_start += (4 * sig_stride); - SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride); - running_avg_y_start += (4 * avg_y_stride); - - LD4(sig_start, sig_stride, src0, src1, src2, src3); - SD4(src0, src1, src2, src3, running_avg_y_start, avg_y_stride); - - return FILTER_BLOCK; + return FILTER_BLOCK; } diff --git a/libs/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c b/libs/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c index ea794a8a8e..2bcddb6235 100644 --- a/libs/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c +++ b/libs/libvpx/vp8/encoder/mips/msa/encodeopt_msa.c @@ -12,163 +12,156 @@ #include "vp8/common/mips/msa/vp8_macros_msa.h" #include "vp8/encoder/block.h" -int32_t vp8_block_error_msa(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) -{ - int32_t err = 0; - uint32_t loop_cnt; - v8i16 coeff, dq_coeff, coeff0, coeff1; - v4i32 diff0, diff1; - v2i64 err0 = { 0 }; - v2i64 err1 = { 0 }; +int32_t vp8_block_error_msa(int16_t *coeff_ptr, int16_t *dq_coeff_ptr) { + int32_t err = 0; + uint32_t loop_cnt; + v8i16 coeff, dq_coeff, coeff0, coeff1; + v4i32 diff0, diff1; + v2i64 err0 = { 0 }; + v2i64 err1 = { 0 }; - for (loop_cnt = 2; loop_cnt--;) - { - coeff = LD_SH(coeff_ptr); - dq_coeff = LD_SH(dq_coeff_ptr); - ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DPADD_SD2_SD(diff0, diff1, err0, err1); - coeff_ptr += 8; - dq_coeff_ptr += 8; - } + for (loop_cnt = 2; loop_cnt--;) { + coeff = LD_SH(coeff_ptr); + dq_coeff = LD_SH(dq_coeff_ptr); + ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DPADD_SD2_SD(diff0, diff1, err0, err1); + coeff_ptr += 8; + dq_coeff_ptr += 8; + } + err0 += __msa_splati_d(err0, 1); + err1 += __msa_splati_d(err1, 1); + err = __msa_copy_s_d(err0, 0); + err += __msa_copy_s_d(err1, 0); + + return err; +} + +int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc) { + BLOCK *be; + BLOCKD *bd; + int16_t *coeff_ptr, *dq_coeff_ptr; + int32_t err = 0; + uint32_t loop_cnt; + v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; + v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; + v4i32 diff0, diff1; + v2i64 err0, err1; + v16u8 zero = { 0 }; + v16u8 mask0 = (v16u8)__msa_ldi_b(255); + + if (1 == dc) { + mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero); + } + + for (loop_cnt = 0; loop_cnt < 8; ++loop_cnt) { + be = &mb->block[2 * loop_cnt]; + bd = &mb->e_mbd.block[2 * loop_cnt]; + coeff_ptr = be->coeff; + dq_coeff_ptr = bd->dqcoeff; + coeff = LD_SH(coeff_ptr); + dq_coeff = LD_SH(dq_coeff_ptr); + coeff_ptr += 8; + dq_coeff_ptr += 8; + coeff2 = LD_SH(coeff_ptr); + dq_coeff2 = LD_SH(dq_coeff_ptr); + be = &mb->block[2 * loop_cnt + 1]; + bd = &mb->e_mbd.block[2 * loop_cnt + 1]; + coeff_ptr = be->coeff; + dq_coeff_ptr = bd->dqcoeff; + coeff3 = LD_SH(coeff_ptr); + dq_coeff3 = LD_SH(dq_coeff_ptr); + coeff_ptr += 8; + dq_coeff_ptr += 8; + coeff4 = LD_SH(coeff_ptr); + dq_coeff4 = LD_SH(dq_coeff_ptr); + ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); + DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); + ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DPADD_SD2_SD(diff0, diff1, err0, err1); err0 += __msa_splati_d(err0, 1); err1 += __msa_splati_d(err1, 1); - err = __msa_copy_s_d(err0, 0); + err += __msa_copy_s_d(err0, 0); err += __msa_copy_s_d(err1, 0); - return err; + ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); + DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); + ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DPADD_SD2_SD(diff0, diff1, err0, err1); + err0 += __msa_splati_d(err0, 1); + err1 += __msa_splati_d(err1, 1); + err += __msa_copy_s_d(err0, 0); + err += __msa_copy_s_d(err1, 0); + } + + return err; } -int32_t vp8_mbblock_error_msa(MACROBLOCK *mb, int32_t dc) -{ - BLOCK *be; - BLOCKD *bd; - int16_t *coeff_ptr, *dq_coeff_ptr; - int32_t err = 0; - uint32_t loop_cnt; - v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; - v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; - v4i32 diff0, diff1; - v2i64 err0, err1; - v16u8 zero = { 0 }; - v16u8 mask0 = (v16u8)__msa_ldi_b(255); +int32_t vp8_mbuverror_msa(MACROBLOCK *mb) { + BLOCK *be; + BLOCKD *bd; + int16_t *coeff_ptr, *dq_coeff_ptr; + int32_t err = 0; + uint32_t loop_cnt; + v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; + v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; + v4i32 diff0, diff1; + v2i64 err0, err1, err_dup0, err_dup1; - if (1 == dc) - { - mask0 = (v16u8)__msa_insve_w((v4i32)mask0, 0, (v4i32)zero); - } + for (loop_cnt = 16; loop_cnt < 24; loop_cnt += 2) { + be = &mb->block[loop_cnt]; + bd = &mb->e_mbd.block[loop_cnt]; + coeff_ptr = be->coeff; + dq_coeff_ptr = bd->dqcoeff; + coeff = LD_SH(coeff_ptr); + dq_coeff = LD_SH(dq_coeff_ptr); + coeff_ptr += 8; + dq_coeff_ptr += 8; + coeff2 = LD_SH(coeff_ptr); + dq_coeff2 = LD_SH(dq_coeff_ptr); + be = &mb->block[loop_cnt + 1]; + bd = &mb->e_mbd.block[loop_cnt + 1]; + coeff_ptr = be->coeff; + dq_coeff_ptr = bd->dqcoeff; + coeff3 = LD_SH(coeff_ptr); + dq_coeff3 = LD_SH(dq_coeff_ptr); + coeff_ptr += 8; + dq_coeff_ptr += 8; + coeff4 = LD_SH(coeff_ptr); + dq_coeff4 = LD_SH(dq_coeff_ptr); - for (loop_cnt = 0; loop_cnt < 8; loop_cnt++) - { - be = &mb->block[2 * loop_cnt]; - bd = &mb->e_mbd.block[2 * loop_cnt]; - coeff_ptr = be->coeff; - dq_coeff_ptr = bd->dqcoeff; - coeff = LD_SH(coeff_ptr); - dq_coeff = LD_SH(dq_coeff_ptr); - coeff_ptr += 8; - dq_coeff_ptr += 8; - coeff2 = LD_SH(coeff_ptr); - dq_coeff2 = LD_SH(dq_coeff_ptr); - be = &mb->block[2 * loop_cnt + 1]; - bd = &mb->e_mbd.block[2 * loop_cnt + 1]; - coeff_ptr = be->coeff; - dq_coeff_ptr = bd->dqcoeff; - coeff3 = LD_SH(coeff_ptr); - dq_coeff3 = LD_SH(dq_coeff_ptr); - coeff_ptr += 8; - dq_coeff_ptr += 8; - coeff4 = LD_SH(coeff_ptr); - dq_coeff4 = LD_SH(dq_coeff_ptr); - ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); - DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); - ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DPADD_SD2_SD(diff0, diff1, err0, err1); - err0 += __msa_splati_d(err0, 1); - err1 += __msa_splati_d(err1, 1); - err += __msa_copy_s_d(err0, 0); - err += __msa_copy_s_d(err1, 0); + ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); - ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - diff0 = (v4i32)__msa_bmnz_v(zero, (v16u8)diff0, mask0); - DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); - ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DPADD_SD2_SD(diff0, diff1, err0, err1); - err0 += __msa_splati_d(err0, 1); - err1 += __msa_splati_d(err1, 1); - err += __msa_copy_s_d(err0, 0); - err += __msa_copy_s_d(err1, 0); - } + ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DPADD_SD2_SD(diff0, diff1, err0, err1); + err_dup0 = __msa_splati_d(err0, 1); + err_dup1 = __msa_splati_d(err1, 1); + ADD2(err0, err_dup0, err1, err_dup1, err0, err1); + err += __msa_copy_s_d(err0, 0); + err += __msa_copy_s_d(err1, 0); - return err; -} - -int32_t vp8_mbuverror_msa(MACROBLOCK *mb) -{ - BLOCK *be; - BLOCKD *bd; - int16_t *coeff_ptr, *dq_coeff_ptr; - int32_t err = 0; - uint32_t loop_cnt; - v8i16 coeff, coeff0, coeff1, coeff2, coeff3, coeff4; - v8i16 dq_coeff, dq_coeff2, dq_coeff3, dq_coeff4; - v4i32 diff0, diff1; - v2i64 err0, err1, err_dup0, err_dup1; - - for (loop_cnt = 16; loop_cnt < 24; loop_cnt += 2) - { - be = &mb->block[loop_cnt]; - bd = &mb->e_mbd.block[loop_cnt]; - coeff_ptr = be->coeff; - dq_coeff_ptr = bd->dqcoeff; - coeff = LD_SH(coeff_ptr); - dq_coeff = LD_SH(dq_coeff_ptr); - coeff_ptr += 8; - dq_coeff_ptr += 8; - coeff2 = LD_SH(coeff_ptr); - dq_coeff2 = LD_SH(dq_coeff_ptr); - be = &mb->block[loop_cnt + 1]; - bd = &mb->e_mbd.block[loop_cnt + 1]; - coeff_ptr = be->coeff; - dq_coeff_ptr = bd->dqcoeff; - coeff3 = LD_SH(coeff_ptr); - dq_coeff3 = LD_SH(dq_coeff_ptr); - coeff_ptr += 8; - dq_coeff_ptr += 8; - coeff4 = LD_SH(coeff_ptr); - dq_coeff4 = LD_SH(dq_coeff_ptr); - - ILVRL_H2_SH(coeff, dq_coeff, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); - - ILVRL_H2_SH(coeff2, dq_coeff2, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DPADD_SD2_SD(diff0, diff1, err0, err1); - err_dup0 = __msa_splati_d(err0, 1); - err_dup1 = __msa_splati_d(err1, 1); - ADD2(err0, err_dup0, err1, err_dup1, err0, err1); - err += __msa_copy_s_d(err0, 0); - err += __msa_copy_s_d(err1, 0); - - ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); - ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); - HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); - DPADD_SD2_SD(diff0, diff1, err0, err1); - err_dup0 = __msa_splati_d(err0, 1); - err_dup1 = __msa_splati_d(err1, 1); - ADD2(err0, err_dup0, err1, err_dup1, err0, err1); - err += __msa_copy_s_d(err0, 0); - err += __msa_copy_s_d(err1, 0); - } - - return err; + ILVRL_H2_SH(coeff3, dq_coeff3, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DOTP_SW2_SD(diff0, diff1, diff0, diff1, err0, err1); + ILVRL_H2_SH(coeff4, dq_coeff4, coeff0, coeff1); + HSUB_UH2_SW(coeff0, coeff1, diff0, diff1); + DPADD_SD2_SD(diff0, diff1, err0, err1); + err_dup0 = __msa_splati_d(err0, 1); + err_dup1 = __msa_splati_d(err1, 1); + ADD2(err0, err_dup0, err1, err_dup1, err0, err1); + err += __msa_copy_s_d(err0, 0); + err += __msa_copy_s_d(err1, 0); + } + + return err; } diff --git a/libs/libvpx/vp8/encoder/mips/msa/quantize_msa.c b/libs/libvpx/vp8/encoder/mips/msa/quantize_msa.c index 0f97646b56..11f70ae82e 100644 --- a/libs/libvpx/vp8/encoder/mips/msa/quantize_msa.c +++ b/libs/libvpx/vp8/encoder/mips/msa/quantize_msa.c @@ -15,232 +15,199 @@ static int8_t fast_quantize_b_msa(int16_t *coeff_ptr, int16_t *zbin, int16_t *round, int16_t *quant, int16_t *de_quant, int16_t *q_coeff, - int16_t *dq_coeff) -{ - int32_t cnt, eob; - v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, - 3, 8, 11, 13, 9, 10, 14, 15 }; - v8i16 round0, round1; - v8i16 sign_z0, sign_z1; - v8i16 q_coeff0, q_coeff1; - v8i16 x0, x1, de_quant0, de_quant1; - v8i16 coeff0, coeff1, z0, z1; - v8i16 quant0, quant1, quant2, quant3; - v8i16 zero = { 0 }; - v8i16 inv_zig_zag0, inv_zig_zag1; - v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; - v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; - v8i16 temp0_h, temp1_h, temp2_h, temp3_h; - v4i32 temp0_w, temp1_w, temp2_w, temp3_w; + int16_t *dq_coeff) { + int32_t cnt, eob; + v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, 3, 8, 11, 13, 9, 10, 14, 15 }; + v8i16 round0, round1; + v8i16 sign_z0, sign_z1; + v8i16 q_coeff0, q_coeff1; + v8i16 x0, x1, de_quant0, de_quant1; + v8i16 coeff0, coeff1, z0, z1; + v8i16 quant0, quant1, quant2, quant3; + v8i16 zero = { 0 }; + v8i16 inv_zig_zag0, inv_zig_zag1; + v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; + v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; + v8i16 temp0_h, temp1_h, temp2_h, temp3_h; + v4i32 temp0_w, temp1_w, temp2_w, temp3_w; - ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1); - eob = -1; - LD_SH2(coeff_ptr, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - z0, z1); - LD_SH2(round, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - round0, round1); - LD_SH2(quant, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - quant0, quant2); - sign_z0 = z0 >> 15; - sign_z1 = z1 >> 15; - x0 = __msa_add_a_h(z0, zero); - x1 = __msa_add_a_h(z1, zero); - ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); - ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); - ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h); - ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h); - DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, - quant3, temp0_w, temp1_w, temp2_w, temp3_w); - SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); - PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1); - x0 = x0 ^ sign_z0; - x1 = x1 ^ sign_z1; - SUB2(x0, sign_z0, x1, sign_z1, x0, x1); - VSHF_H2_SH(x0, x1, x0, x1, inv_zig_zag0, inv_zig_zag1, q_coeff0, q_coeff1); - ST_SH2(q_coeff0, q_coeff1, q_coeff, 8); - LD_SH2(de_quant, 8, de_quant0, de_quant1); - q_coeff0 *= de_quant0; - q_coeff1 *= de_quant1; - ST_SH2(q_coeff0, q_coeff1, dq_coeff, 8); + ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1); + eob = -1; + LD_SH2(coeff_ptr, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z0, + z1); + LD_SH2(round, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, round0, + round1); + LD_SH2(quant, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0, + quant2); + sign_z0 = z0 >> 15; + sign_z1 = z1 >> 15; + x0 = __msa_add_a_h(z0, zero); + x1 = __msa_add_a_h(z1, zero); + ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); + ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); + ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h); + ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h); + DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, + quant3, temp0_w, temp1_w, temp2_w, temp3_w); + SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); + PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1); + x0 = x0 ^ sign_z0; + x1 = x1 ^ sign_z1; + SUB2(x0, sign_z0, x1, sign_z1, x0, x1); + VSHF_H2_SH(x0, x1, x0, x1, inv_zig_zag0, inv_zig_zag1, q_coeff0, q_coeff1); + ST_SH2(q_coeff0, q_coeff1, q_coeff, 8); + LD_SH2(de_quant, 8, de_quant0, de_quant1); + q_coeff0 *= de_quant0; + q_coeff1 *= de_quant1; + ST_SH2(q_coeff0, q_coeff1, dq_coeff, 8); - for (cnt = 0; cnt < 16; ++cnt) - { - if ((cnt <= 7) && (x1[7 - cnt] != 0)) - { - eob = (15 - cnt); - break; - } - - if ((cnt > 7) && (x0[7 - (cnt - 8)] != 0)) - { - eob = (7 - (cnt - 8)); - break; - } + for (cnt = 0; cnt < 16; ++cnt) { + if ((cnt <= 7) && (x1[7 - cnt] != 0)) { + eob = (15 - cnt); + break; } - return (int8_t)(eob + 1); -} - -static int8_t exact_regular_quantize_b_msa(int16_t *zbin_boost, - int16_t *coeff_ptr, - int16_t *zbin, - int16_t *round, - int16_t *quant, - int16_t *quant_shift, - int16_t *de_quant, - int16_t zbin_oq_in, - int16_t *q_coeff, - int16_t *dq_coeff) -{ - int32_t cnt, eob; - int16_t *boost_temp = zbin_boost; - v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, - 3, 8, 11, 13, 9, 10, 14, 15 }; - v8i16 round0, round1; - v8i16 sign_z0, sign_z1; - v8i16 q_coeff0, q_coeff1; - v8i16 z_bin0, z_bin1, zbin_o_q; - v8i16 x0, x1, sign_x0, sign_x1, de_quant0, de_quant1; - v8i16 coeff0, coeff1, z0, z1; - v8i16 quant0, quant1, quant2, quant3; - v8i16 zero = { 0 }; - v8i16 inv_zig_zag0, inv_zig_zag1; - v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; - v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; - v8i16 temp0_h, temp1_h, temp2_h, temp3_h; - v4i32 temp0_w, temp1_w, temp2_w, temp3_w; - - ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1); - zbin_o_q = __msa_fill_h(zbin_oq_in); - eob = -1; - LD_SH2(coeff_ptr, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - z0, z1); - LD_SH2(round, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - round0, round1); - LD_SH2(quant, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - quant0, quant2); - LD_SH2(zbin, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - z_bin0, z_bin1); - sign_z0 = z0 >> 15; - sign_z1 = z1 >> 15; - x0 = __msa_add_a_h(z0, zero); - x1 = __msa_add_a_h(z1, zero); - SUB2(x0, z_bin0, x1, z_bin1, z_bin0, z_bin1); - SUB2(z_bin0, zbin_o_q, z_bin1, zbin_o_q, z_bin0, z_bin1); - ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); - ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); - ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h); - ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h); - DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, - quant3, temp0_w, temp1_w, temp2_w, temp3_w); - SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); - PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, temp0_h, temp2_h); - LD_SH2(quant_shift, 8, coeff0, coeff1); - VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, - quant0, quant2); - ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); - ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); - ADD2(x0, round0, x1, round1, x0, x1); - ILVL_H2_SH(temp0_h, x0, temp2_h, x1, temp1_h, temp3_h); - ILVR_H2_SH(temp0_h, x0, temp2_h, x1, temp0_h, temp2_h); - DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, - quant3, temp0_w, temp1_w, temp2_w, temp3_w); - SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); - PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1); - sign_x0 = x0 ^ sign_z0; - sign_x1 = x1 ^ sign_z1; - SUB2(sign_x0, sign_z0, sign_x1, sign_z1, sign_x0, sign_x1); - for (cnt = 0; cnt < 16; ++cnt) - { - if (cnt <= 7) - { - if (boost_temp[0] <= z_bin0[cnt]) - { - if (x0[cnt]) - { - eob = cnt; - boost_temp = zbin_boost; - } - else - { - boost_temp++; - } - } - else - { - sign_x0[cnt] = 0; - boost_temp++; - } - } - else - { - if (boost_temp[0] <= z_bin1[cnt - 8]) - { - if (x1[cnt - 8]) - { - eob = cnt; - boost_temp = zbin_boost; - } - else - { - boost_temp++; - } - } - else - { - sign_x1[cnt - 8] = 0; - boost_temp++; - } - } + if ((cnt > 7) && (x0[7 - (cnt - 8)] != 0)) { + eob = (7 - (cnt - 8)); + break; } + } - VSHF_H2_SH(sign_x0, sign_x1, sign_x0, sign_x1, inv_zig_zag0, inv_zig_zag1, - q_coeff0, q_coeff1); - ST_SH2(q_coeff0, q_coeff1, q_coeff, 8); - LD_SH2(de_quant, 8, de_quant0, de_quant1); - MUL2(de_quant0, q_coeff0, de_quant1, q_coeff1, de_quant0, de_quant1); - ST_SH2(de_quant0, de_quant1, dq_coeff, 8); - - return (int8_t)(eob + 1); + return (int8_t)(eob + 1); } -void vp8_fast_quantize_b_msa(BLOCK *b, BLOCKD *d) -{ - int16_t *coeff_ptr = b->coeff; - int16_t *zbin_ptr = b->zbin; - int16_t *round_ptr = b->round; - int16_t *quant_ptr = b->quant_fast; - int16_t *qcoeff_ptr = d->qcoeff; - int16_t *dqcoeff_ptr = d->dqcoeff; - int16_t *dequant_ptr = d->dequant; +static int8_t exact_regular_quantize_b_msa( + int16_t *zbin_boost, int16_t *coeff_ptr, int16_t *zbin, int16_t *round, + int16_t *quant, int16_t *quant_shift, int16_t *de_quant, int16_t zbin_oq_in, + int16_t *q_coeff, int16_t *dq_coeff) { + int32_t cnt, eob; + int16_t *boost_temp = zbin_boost; + v16i8 inv_zig_zag = { 0, 1, 5, 6, 2, 4, 7, 12, 3, 8, 11, 13, 9, 10, 14, 15 }; + v8i16 round0, round1; + v8i16 sign_z0, sign_z1; + v8i16 q_coeff0, q_coeff1; + v8i16 z_bin0, z_bin1, zbin_o_q; + v8i16 x0, x1, sign_x0, sign_x1, de_quant0, de_quant1; + v8i16 coeff0, coeff1, z0, z1; + v8i16 quant0, quant1, quant2, quant3; + v8i16 zero = { 0 }; + v8i16 inv_zig_zag0, inv_zig_zag1; + v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; + v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; + v8i16 temp0_h, temp1_h, temp2_h, temp3_h; + v4i32 temp0_w, temp1_w, temp2_w, temp3_w; - *d->eob = fast_quantize_b_msa(coeff_ptr, zbin_ptr, round_ptr, quant_ptr, - dequant_ptr, qcoeff_ptr, dqcoeff_ptr); + ILVRL_B2_SH(zero, inv_zig_zag, inv_zig_zag0, inv_zig_zag1); + zbin_o_q = __msa_fill_h(zbin_oq_in); + eob = -1; + LD_SH2(coeff_ptr, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z0, + z1); + LD_SH2(round, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, round0, + round1); + LD_SH2(quant, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0, + quant2); + LD_SH2(zbin, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, z_bin0, + z_bin1); + sign_z0 = z0 >> 15; + sign_z1 = z1 >> 15; + x0 = __msa_add_a_h(z0, zero); + x1 = __msa_add_a_h(z1, zero); + SUB2(x0, z_bin0, x1, z_bin1, z_bin0, z_bin1); + SUB2(z_bin0, zbin_o_q, z_bin1, zbin_o_q, z_bin0, z_bin1); + ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); + ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); + ILVL_H2_SH(round0, x0, round1, x1, temp1_h, temp3_h); + ILVR_H2_SH(round0, x0, round1, x1, temp0_h, temp2_h); + DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, + quant3, temp0_w, temp1_w, temp2_w, temp3_w); + SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); + PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, temp0_h, temp2_h); + LD_SH2(quant_shift, 8, coeff0, coeff1); + VSHF_H2_SH(coeff0, coeff1, coeff0, coeff1, zigzag_mask0, zigzag_mask1, quant0, + quant2); + ILVL_H2_SH(quant0, quant0, quant2, quant2, quant1, quant3); + ILVR_H2_SH(quant0, quant0, quant2, quant2, quant0, quant2); + ADD2(x0, round0, x1, round1, x0, x1); + ILVL_H2_SH(temp0_h, x0, temp2_h, x1, temp1_h, temp3_h); + ILVR_H2_SH(temp0_h, x0, temp2_h, x1, temp0_h, temp2_h); + DOTP_SH4_SW(temp0_h, temp1_h, temp2_h, temp3_h, quant0, quant1, quant2, + quant3, temp0_w, temp1_w, temp2_w, temp3_w); + SRA_4V(temp0_w, temp1_w, temp2_w, temp3_w, 16); + PCKEV_H2_SH(temp1_w, temp0_w, temp3_w, temp2_w, x0, x1); + sign_x0 = x0 ^ sign_z0; + sign_x1 = x1 ^ sign_z1; + SUB2(sign_x0, sign_z0, sign_x1, sign_z1, sign_x0, sign_x1); + for (cnt = 0; cnt < 16; ++cnt) { + if (cnt <= 7) { + if (boost_temp[0] <= z_bin0[cnt]) { + if (x0[cnt]) { + eob = cnt; + boost_temp = zbin_boost; + } else { + boost_temp++; + } + } else { + sign_x0[cnt] = 0; + boost_temp++; + } + } else { + if (boost_temp[0] <= z_bin1[cnt - 8]) { + if (x1[cnt - 8]) { + eob = cnt; + boost_temp = zbin_boost; + } else { + boost_temp++; + } + } else { + sign_x1[cnt - 8] = 0; + boost_temp++; + } + } + } + + VSHF_H2_SH(sign_x0, sign_x1, sign_x0, sign_x1, inv_zig_zag0, inv_zig_zag1, + q_coeff0, q_coeff1); + ST_SH2(q_coeff0, q_coeff1, q_coeff, 8); + LD_SH2(de_quant, 8, de_quant0, de_quant1); + MUL2(de_quant0, q_coeff0, de_quant1, q_coeff1, de_quant0, de_quant1); + ST_SH2(de_quant0, de_quant1, dq_coeff, 8); + + return (int8_t)(eob + 1); } -void vp8_regular_quantize_b_msa(BLOCK *b, BLOCKD *d) -{ - int16_t *zbin_boost_ptr = b->zrun_zbin_boost; - int16_t *coeff_ptr = b->coeff; - int16_t *zbin_ptr = b->zbin; - int16_t *round_ptr = b->round; - int16_t *quant_ptr = b->quant; - int16_t *quant_shift_ptr = b->quant_shift; - int16_t *qcoeff_ptr = d->qcoeff; - int16_t *dqcoeff_ptr = d->dqcoeff; - int16_t *dequant_ptr = d->dequant; - int16_t zbin_oq_value = b->zbin_extra; +void vp8_fast_quantize_b_msa(BLOCK *b, BLOCKD *d) { + int16_t *coeff_ptr = b->coeff; + int16_t *zbin_ptr = b->zbin; + int16_t *round_ptr = b->round; + int16_t *quant_ptr = b->quant_fast; + int16_t *qcoeff_ptr = d->qcoeff; + int16_t *dqcoeff_ptr = d->dqcoeff; + int16_t *dequant_ptr = d->dequant; - *d->eob = exact_regular_quantize_b_msa(zbin_boost_ptr, coeff_ptr, - zbin_ptr, round_ptr, - quant_ptr, quant_shift_ptr, - dequant_ptr, zbin_oq_value, - qcoeff_ptr, dqcoeff_ptr); + *d->eob = fast_quantize_b_msa(coeff_ptr, zbin_ptr, round_ptr, quant_ptr, + dequant_ptr, qcoeff_ptr, dqcoeff_ptr); +} + +void vp8_regular_quantize_b_msa(BLOCK *b, BLOCKD *d) { + int16_t *zbin_boost_ptr = b->zrun_zbin_boost; + int16_t *coeff_ptr = b->coeff; + int16_t *zbin_ptr = b->zbin; + int16_t *round_ptr = b->round; + int16_t *quant_ptr = b->quant; + int16_t *quant_shift_ptr = b->quant_shift; + int16_t *qcoeff_ptr = d->qcoeff; + int16_t *dqcoeff_ptr = d->dqcoeff; + int16_t *dequant_ptr = d->dequant; + int16_t zbin_oq_value = b->zbin_extra; + + *d->eob = exact_regular_quantize_b_msa( + zbin_boost_ptr, coeff_ptr, zbin_ptr, round_ptr, quant_ptr, + quant_shift_ptr, dequant_ptr, zbin_oq_value, qcoeff_ptr, dqcoeff_ptr); } diff --git a/libs/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c b/libs/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c index 5cca5e0872..fb83f07bd2 100644 --- a/libs/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c +++ b/libs/libvpx/vp8/encoder/mips/msa/temporal_filter_msa.c @@ -11,293 +11,274 @@ #include "./vp8_rtcd.h" #include "vp8/common/mips/msa/vp8_macros_msa.h" -static void temporal_filter_apply_16size_msa(uint8_t *frame1_ptr, - uint32_t stride, - uint8_t *frame2_ptr, - int32_t strength_in, - int32_t filter_wt_in, - uint32_t *acc, uint16_t *cnt) -{ - uint32_t row; - v16i8 frame1_0_b, frame1_1_b, frame2_0_b, frame2_1_b; - v16u8 frame_l, frame_h; - v16i8 zero = { 0 }; - v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h; - v8i16 diff0, diff1, cnt0, cnt1; - v4i32 const3, const16, filter_wt, strength; - v4i32 mod0_w, mod1_w, mod2_w, mod3_w; - v4i32 diff0_r, diff0_l, diff1_r, diff1_l; - v4i32 frame2_0, frame2_1, frame2_2, frame2_3; - v4i32 acc0, acc1, acc2, acc3; +static void temporal_filter_apply_16size_msa( + uint8_t *frame1_ptr, uint32_t stride, uint8_t *frame2_ptr, + int32_t strength_in, int32_t filter_wt_in, uint32_t *acc, uint16_t *cnt) { + uint32_t row; + v16i8 frame1_0_b, frame1_1_b, frame2_0_b, frame2_1_b; + v16u8 frame_l, frame_h; + v16i8 zero = { 0 }; + v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h; + v8i16 diff0, diff1, cnt0, cnt1; + v4i32 const3, const16, filter_wt, strength; + v4i32 mod0_w, mod1_w, mod2_w, mod3_w; + v4i32 diff0_r, diff0_l, diff1_r, diff1_l; + v4i32 frame2_0, frame2_1, frame2_2, frame2_3; + v4i32 acc0, acc1, acc2, acc3; - filter_wt = __msa_fill_w(filter_wt_in); - strength = __msa_fill_w(strength_in); - const3 = __msa_ldi_w(3); - const16 = __msa_ldi_w(16); + filter_wt = __msa_fill_w(filter_wt_in); + strength = __msa_fill_w(strength_in); + const3 = __msa_ldi_w(3); + const16 = __msa_ldi_w(16); - for (row = 8; row--;) - { - frame1_0_b = LD_SB(frame1_ptr); - frame2_0_b = LD_SB(frame2_ptr); - frame1_ptr += stride; - frame2_ptr += 16; - frame1_1_b = LD_SB(frame1_ptr); - frame2_1_b = LD_SB(frame2_ptr); - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - ILVRL_B2_UB(frame1_0_b, frame2_0_b, frame_l, frame_h); - HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - diff0_r = (mod0_w < const16); - diff0_l = (mod1_w < const16); - diff1_r = (mod2_w < const16); - diff1_l = (mod3_w < const16); - SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, - filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h) - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; - ILVRL_B2_SH(zero, frame2_0_b, frame2_0_h, frame2_1_h); - UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); - UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); - MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, - frame2_3, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - ST_SW2(mod0_w, mod1_w, acc, 4); - ST_SW2(mod2_w, mod3_w, acc + 8, 4); - acc += 16; - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - ILVRL_B2_UB(frame1_1_b, frame2_1_b, frame_l, frame_h); - HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - diff0_r = (mod0_w < const16); - diff0_l = (mod1_w < const16); - diff1_r = (mod2_w < const16); - diff1_l = (mod3_w < const16); - SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, - filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; + for (row = 8; row--;) { + frame1_0_b = LD_SB(frame1_ptr); + frame2_0_b = LD_SB(frame2_ptr); + frame1_ptr += stride; + frame2_ptr += 16; + frame1_1_b = LD_SB(frame1_ptr); + frame2_1_b = LD_SB(frame2_ptr); + LD_SW2(acc, 4, acc0, acc1); + LD_SW2(acc + 8, 4, acc2, acc3); + LD_SH2(cnt, 8, cnt0, cnt1); + ILVRL_B2_UB(frame1_0_b, frame2_0_b, frame_l, frame_h); + HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); + UNPCK_SH_SW(diff0, diff0_r, diff0_l); + UNPCK_SH_SW(diff1, diff1_r, diff1_l); + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, + mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w, + mod1_w, mod2_w, mod3_w); + SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); + diff0_r = (mod0_w < const16); + diff0_l = (mod1_w < const16); + diff1_r = (mod2_w < const16); + diff1_l = (mod3_w < const16); + SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, + mod0_w, mod1_w, mod2_w, mod3_w); + mod0_w = diff0_r & mod0_w; + mod1_w = diff0_l & mod1_w; + mod2_w = diff1_r & mod2_w; + mod3_w = diff1_l & mod3_w; + MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, + filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); + PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h) + ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); + ST_SH2(mod0_h, mod1_h, cnt, 8); + cnt += 16; + ILVRL_B2_SH(zero, frame2_0_b, frame2_0_h, frame2_1_h); + UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); + UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); + MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3, + mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); + ST_SW2(mod0_w, mod1_w, acc, 4); + ST_SW2(mod2_w, mod3_w, acc + 8, 4); + acc += 16; + LD_SW2(acc, 4, acc0, acc1); + LD_SW2(acc + 8, 4, acc2, acc3); + LD_SH2(cnt, 8, cnt0, cnt1); + ILVRL_B2_UB(frame1_1_b, frame2_1_b, frame_l, frame_h); + HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); + UNPCK_SH_SW(diff0, diff0_r, diff0_l); + UNPCK_SH_SW(diff1, diff1_r, diff1_l); + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, + mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w, + mod1_w, mod2_w, mod3_w); + SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); + diff0_r = (mod0_w < const16); + diff0_l = (mod1_w < const16); + diff1_r = (mod2_w < const16); + diff1_l = (mod3_w < const16); + SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, + mod0_w, mod1_w, mod2_w, mod3_w); + mod0_w = diff0_r & mod0_w; + mod1_w = diff0_l & mod1_w; + mod2_w = diff1_r & mod2_w; + mod3_w = diff1_l & mod3_w; + MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, + filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); + PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); + ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); + ST_SH2(mod0_h, mod1_h, cnt, 8); + cnt += 16; - UNPCK_UB_SH(frame2_1_b, frame2_0_h, frame2_1_h); - UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); - UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); - MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, - frame2_3, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - ST_SW2(mod0_w, mod1_w, acc, 4); - ST_SW2(mod2_w, mod3_w, acc + 8, 4); - acc += 16; - frame1_ptr += stride; - frame2_ptr += 16; - } + UNPCK_UB_SH(frame2_1_b, frame2_0_h, frame2_1_h); + UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); + UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); + MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3, + mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); + ST_SW2(mod0_w, mod1_w, acc, 4); + ST_SW2(mod2_w, mod3_w, acc + 8, 4); + acc += 16; + frame1_ptr += stride; + frame2_ptr += 16; + } } -static void temporal_filter_apply_8size_msa(uint8_t *frame1_ptr, - uint32_t stride, - uint8_t *frame2_ptr, - int32_t strength_in, - int32_t filter_wt_in, - uint32_t *acc, uint16_t *cnt) -{ - uint32_t row; - uint64_t f0, f1, f2, f3, f4, f5, f6, f7; - v16i8 frame1 = { 0 }; - v16i8 frame2 = { 0 }; - v16i8 frame3 = { 0 }; - v16i8 frame4 = { 0 }; - v16u8 frame_l, frame_h; - v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h; - v8i16 diff0, diff1, cnt0, cnt1; - v4i32 const3, const16; - v4i32 filter_wt, strength; - v4i32 mod0_w, mod1_w, mod2_w, mod3_w; - v4i32 diff0_r, diff0_l, diff1_r, diff1_l; - v4i32 frame2_0, frame2_1, frame2_2, frame2_3; - v4i32 acc0, acc1, acc2, acc3; +static void temporal_filter_apply_8size_msa( + uint8_t *frame1_ptr, uint32_t stride, uint8_t *frame2_ptr, + int32_t strength_in, int32_t filter_wt_in, uint32_t *acc, uint16_t *cnt) { + uint32_t row; + uint64_t f0, f1, f2, f3, f4, f5, f6, f7; + v16i8 frame1 = { 0 }; + v16i8 frame2 = { 0 }; + v16i8 frame3 = { 0 }; + v16i8 frame4 = { 0 }; + v16u8 frame_l, frame_h; + v8i16 frame2_0_h, frame2_1_h, mod0_h, mod1_h; + v8i16 diff0, diff1, cnt0, cnt1; + v4i32 const3, const16; + v4i32 filter_wt, strength; + v4i32 mod0_w, mod1_w, mod2_w, mod3_w; + v4i32 diff0_r, diff0_l, diff1_r, diff1_l; + v4i32 frame2_0, frame2_1, frame2_2, frame2_3; + v4i32 acc0, acc1, acc2, acc3; - filter_wt = __msa_fill_w(filter_wt_in); - strength = __msa_fill_w(strength_in); - const3 = __msa_ldi_w(3); - const16 = __msa_ldi_w(16); + filter_wt = __msa_fill_w(filter_wt_in); + strength = __msa_fill_w(strength_in); + const3 = __msa_ldi_w(3); + const16 = __msa_ldi_w(16); - for (row = 2; row--;) - { - LD2(frame1_ptr, stride, f0, f1); - frame1_ptr += (2 * stride); - LD2(frame2_ptr, 8, f2, f3); - frame2_ptr += 16; - LD2(frame1_ptr, stride, f4, f5); - frame1_ptr += (2 * stride); - LD2(frame2_ptr, 8, f6, f7); - frame2_ptr += 16; + for (row = 2; row--;) { + LD2(frame1_ptr, stride, f0, f1); + frame1_ptr += (2 * stride); + LD2(frame2_ptr, 8, f2, f3); + frame2_ptr += 16; + LD2(frame1_ptr, stride, f4, f5); + frame1_ptr += (2 * stride); + LD2(frame2_ptr, 8, f6, f7); + frame2_ptr += 16; - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - INSERT_D2_SB(f0, f1, frame1); - INSERT_D2_SB(f2, f3, frame2); - INSERT_D2_SB(f4, f5, frame3); - INSERT_D2_SB(f6, f7, frame4); - ILVRL_B2_UB(frame1, frame2, frame_l, frame_h); - HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - diff0_r = (mod0_w < const16); - diff0_l = (mod1_w < const16); - diff1_r = (mod2_w < const16); - diff1_l = (mod3_w < const16); - SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, - filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; + LD_SW2(acc, 4, acc0, acc1); + LD_SW2(acc + 8, 4, acc2, acc3); + LD_SH2(cnt, 8, cnt0, cnt1); + INSERT_D2_SB(f0, f1, frame1); + INSERT_D2_SB(f2, f3, frame2); + INSERT_D2_SB(f4, f5, frame3); + INSERT_D2_SB(f6, f7, frame4); + ILVRL_B2_UB(frame1, frame2, frame_l, frame_h); + HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); + UNPCK_SH_SW(diff0, diff0_r, diff0_l); + UNPCK_SH_SW(diff1, diff1_r, diff1_l); + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, + mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w, + mod1_w, mod2_w, mod3_w); + SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); + diff0_r = (mod0_w < const16); + diff0_l = (mod1_w < const16); + diff1_r = (mod2_w < const16); + diff1_l = (mod3_w < const16); + SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, + mod0_w, mod1_w, mod2_w, mod3_w); + mod0_w = diff0_r & mod0_w; + mod1_w = diff0_l & mod1_w; + mod2_w = diff1_r & mod2_w; + mod3_w = diff1_l & mod3_w; + MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, + filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); + PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); + ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); + ST_SH2(mod0_h, mod1_h, cnt, 8); + cnt += 16; - UNPCK_UB_SH(frame2, frame2_0_h, frame2_1_h); - UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); - UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); - MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, - frame2_3, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - ST_SW2(mod0_w, mod1_w, acc, 4); - ST_SW2(mod2_w, mod3_w, acc + 8, 4); - acc += 16; + UNPCK_UB_SH(frame2, frame2_0_h, frame2_1_h); + UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); + UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); + MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3, + mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); + ST_SW2(mod0_w, mod1_w, acc, 4); + ST_SW2(mod2_w, mod3_w, acc + 8, 4); + acc += 16; - LD_SW2(acc, 4, acc0, acc1); - LD_SW2(acc + 8, 4, acc2, acc3); - LD_SH2(cnt, 8, cnt0, cnt1); - ILVRL_B2_UB(frame3, frame4, frame_l, frame_h); - HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); - UNPCK_SH_SW(diff0, diff0_r, diff0_l); - UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, - mod0_w, mod1_w, mod2_w, mod3_w); - SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); - diff0_r = (mod0_w < const16); - diff0_l = (mod1_w < const16); - diff1_r = (mod2_w < const16); - diff1_l = (mod3_w < const16); - SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); - mod0_w = diff0_r & mod0_w; - mod1_w = diff0_l & mod1_w; - mod2_w = diff1_r & mod2_w; - mod3_w = diff1_l & mod3_w; - MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, - filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); - PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); - ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); - ST_SH2(mod0_h, mod1_h, cnt, 8); - cnt += 16; + LD_SW2(acc, 4, acc0, acc1); + LD_SW2(acc + 8, 4, acc2, acc3); + LD_SH2(cnt, 8, cnt0, cnt1); + ILVRL_B2_UB(frame3, frame4, frame_l, frame_h); + HSUB_UB2_SH(frame_l, frame_h, diff0, diff1); + UNPCK_SH_SW(diff0, diff0_r, diff0_l); + UNPCK_SH_SW(diff1, diff1_r, diff1_l); + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, + mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, const3, mod1_w, const3, mod2_w, const3, mod3_w, const3, mod0_w, + mod1_w, mod2_w, mod3_w); + SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); + diff0_r = (mod0_w < const16); + diff0_l = (mod1_w < const16); + diff1_r = (mod2_w < const16); + diff1_l = (mod3_w < const16); + SUB4(const16, mod0_w, const16, mod1_w, const16, mod2_w, const16, mod3_w, + mod0_w, mod1_w, mod2_w, mod3_w); + mod0_w = diff0_r & mod0_w; + mod1_w = diff0_l & mod1_w; + mod2_w = diff1_r & mod2_w; + mod3_w = diff1_l & mod3_w; + MUL4(mod0_w, filter_wt, mod1_w, filter_wt, mod2_w, filter_wt, mod3_w, + filter_wt, mod0_w, mod1_w, mod2_w, mod3_w); + PCKEV_H2_SH(mod1_w, mod0_w, mod3_w, mod2_w, mod0_h, mod1_h); + ADD2(mod0_h, cnt0, mod1_h, cnt1, mod0_h, mod1_h); + ST_SH2(mod0_h, mod1_h, cnt, 8); + cnt += 16; - UNPCK_UB_SH(frame4, frame2_0_h, frame2_1_h); - UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); - UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); - MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, - frame2_3, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); - ST_SW2(mod0_w, mod1_w, acc, 4); - ST_SW2(mod2_w, mod3_w, acc + 8, 4); - acc += 16; - } + UNPCK_UB_SH(frame4, frame2_0_h, frame2_1_h); + UNPCK_SH_SW(frame2_0_h, frame2_0, frame2_1); + UNPCK_SH_SW(frame2_1_h, frame2_2, frame2_3); + MUL4(mod0_w, frame2_0, mod1_w, frame2_1, mod2_w, frame2_2, mod3_w, frame2_3, + mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); + ST_SW2(mod0_w, mod1_w, acc, 4); + ST_SW2(mod2_w, mod3_w, acc + 8, 4); + acc += 16; + } } void vp8_temporal_filter_apply_msa(uint8_t *frame1, uint32_t stride, uint8_t *frame2, uint32_t block_size, - int32_t strength, int32_t filter_weight, - uint32_t *accumulator, uint16_t *count) -{ - if (8 == block_size) - { - temporal_filter_apply_8size_msa(frame1, stride, frame2, strength, - filter_weight, accumulator, count); - } - else if (16 == block_size) - { - temporal_filter_apply_16size_msa(frame1, stride, frame2, strength, - filter_weight, accumulator, count); - } - else - { - uint32_t i, j, k; - int32_t modifier; - int32_t byte = 0; - const int32_t rounding = strength > 0 ? 1 << (strength - 1) : 0; - - for (i = 0, k = 0; i < block_size; ++i) - { - for (j = 0; j < block_size; ++j, ++k) - { - int src_byte = frame1[byte]; - int pixel_value = *frame2++; - - modifier = src_byte - pixel_value; - modifier *= modifier; - modifier *= 3; - modifier += rounding; - modifier >>= strength; - - if (modifier > 16) - modifier = 16; - - modifier = 16 - modifier; - modifier *= filter_weight; - - count[k] += modifier; - accumulator[k] += modifier * pixel_value; - - byte++; - } - - byte += stride - block_size; - } + int32_t strength, int32_t filter_weight, + uint32_t *accumulator, uint16_t *count) { + if (8 == block_size) { + temporal_filter_apply_8size_msa(frame1, stride, frame2, strength, + filter_weight, accumulator, count); + } else if (16 == block_size) { + temporal_filter_apply_16size_msa(frame1, stride, frame2, strength, + filter_weight, accumulator, count); + } else { + uint32_t i, j, k; + int32_t modifier; + int32_t byte = 0; + const int32_t rounding = strength > 0 ? 1 << (strength - 1) : 0; + + for (i = 0, k = 0; i < block_size; ++i) { + for (j = 0; j < block_size; ++j, ++k) { + int src_byte = frame1[byte]; + int pixel_value = *frame2++; + + modifier = src_byte - pixel_value; + modifier *= modifier; + modifier *= 3; + modifier += rounding; + modifier >>= strength; + + if (modifier > 16) modifier = 16; + + modifier = 16 - modifier; + modifier *= filter_weight; + + count[k] += modifier; + accumulator[k] += modifier * pixel_value; + + byte++; + } + + byte += stride - block_size; } + } } diff --git a/libs/libvpx/vp8/encoder/modecosts.c b/libs/libvpx/vp8/encoder/modecosts.c index ad0e9308dc..b1c3120a92 100644 --- a/libs/libvpx/vp8/encoder/modecosts.c +++ b/libs/libvpx/vp8/encoder/modecosts.c @@ -8,48 +8,41 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/blockd.h" #include "modecosts.h" #include "onyx_int.h" #include "treewriter.h" #include "vp8/common/entropymode.h" +void vp8_init_mode_costs(VP8_COMP *c) { + VP8_COMMON *x = &c->common; + struct rd_costs_struct *rd_costs = &c->rd_costs; -void vp8_init_mode_costs(VP8_COMP *c) -{ - VP8_COMMON *x = &c->common; - struct rd_costs_struct *rd_costs = &c->rd_costs; + { + const vp8_tree_p T = vp8_bmode_tree; - { - const vp8_tree_p T = vp8_bmode_tree; + int i = 0; - int i = 0; + do { + int j = 0; - do - { - int j = 0; + do { + vp8_cost_tokens(rd_costs->bmode_costs[i][j], vp8_kf_bmode_prob[i][j], + T); + } while (++j < VP8_BINTRAMODES); + } while (++i < VP8_BINTRAMODES); - do - { - vp8_cost_tokens(rd_costs->bmode_costs[i][j], - vp8_kf_bmode_prob[i][j], T); - } - while (++j < VP8_BINTRAMODES); - } - while (++i < VP8_BINTRAMODES); + vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.bmode_prob, T); + } + vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.sub_mv_ref_prob, + vp8_sub_mv_ref_tree); - vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.bmode_prob, T); - } - vp8_cost_tokens(rd_costs->inter_bmode_costs, x->fc.sub_mv_ref_prob, - vp8_sub_mv_ref_tree); + vp8_cost_tokens(rd_costs->mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree); + vp8_cost_tokens(rd_costs->mbmode_cost[0], vp8_kf_ymode_prob, + vp8_kf_ymode_tree); - vp8_cost_tokens(rd_costs->mbmode_cost[1], x->fc.ymode_prob, vp8_ymode_tree); - vp8_cost_tokens(rd_costs->mbmode_cost[0], vp8_kf_ymode_prob, - vp8_kf_ymode_tree); - - vp8_cost_tokens(rd_costs->intra_uv_mode_cost[1], x->fc.uv_mode_prob, - vp8_uv_mode_tree); - vp8_cost_tokens(rd_costs->intra_uv_mode_cost[0], vp8_kf_uv_mode_prob, - vp8_uv_mode_tree); + vp8_cost_tokens(rd_costs->intra_uv_mode_cost[1], x->fc.uv_mode_prob, + vp8_uv_mode_tree); + vp8_cost_tokens(rd_costs->intra_uv_mode_cost[0], vp8_kf_uv_mode_prob, + vp8_uv_mode_tree); } diff --git a/libs/libvpx/vp8/encoder/modecosts.h b/libs/libvpx/vp8/encoder/modecosts.h index 9871bfffdf..dfb8989f7f 100644 --- a/libs/libvpx/vp8/encoder/modecosts.h +++ b/libs/libvpx/vp8/encoder/modecosts.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_MODECOSTS_H_ #define VP8_ENCODER_MODECOSTS_H_ diff --git a/libs/libvpx/vp8/encoder/mr_dissim.c b/libs/libvpx/vp8/encoder/mr_dissim.c index 886cba2fd5..011b62a08f 100644 --- a/libs/libvpx/vp8/encoder/mr_dissim.c +++ b/libs/libvpx/vp8/encoder/mr_dissim.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vpx_config.h" #include "onyx_int.h" @@ -18,223 +17,199 @@ #include "rdopt.h" #include "vp8/common/common.h" -void vp8_cal_low_res_mb_cols(VP8_COMP *cpi) -{ - int low_res_w; +void vp8_cal_low_res_mb_cols(VP8_COMP *cpi) { + int low_res_w; - /* Support arbitrary down-sampling factor */ - unsigned int iw = cpi->oxcf.Width*cpi->oxcf.mr_down_sampling_factor.den - + cpi->oxcf.mr_down_sampling_factor.num - 1; + /* Support arbitrary down-sampling factor */ + unsigned int iw = cpi->oxcf.Width * cpi->oxcf.mr_down_sampling_factor.den + + cpi->oxcf.mr_down_sampling_factor.num - 1; - low_res_w = iw/cpi->oxcf.mr_down_sampling_factor.num; - cpi->mr_low_res_mb_cols = ((low_res_w + 15) >> 4); + low_res_w = iw / cpi->oxcf.mr_down_sampling_factor.num; + cpi->mr_low_res_mb_cols = ((low_res_w + 15) >> 4); } -#define GET_MV(x) \ -if(x->mbmi.ref_frame !=INTRA_FRAME) \ -{ \ - mvx[cnt] = x->mbmi.mv.as_mv.row; \ - mvy[cnt] = x->mbmi.mv.as_mv.col; \ - cnt++; \ -} +#define GET_MV(x) \ + if (x->mbmi.ref_frame != INTRA_FRAME) { \ + mvx[cnt] = x->mbmi.mv.as_mv.row; \ + mvy[cnt] = x->mbmi.mv.as_mv.col; \ + cnt++; \ + } -#define GET_MV_SIGN(x) \ -if(x->mbmi.ref_frame !=INTRA_FRAME) \ -{ \ - mvx[cnt] = x->mbmi.mv.as_mv.row; \ - mvy[cnt] = x->mbmi.mv.as_mv.col; \ - if (cm->ref_frame_sign_bias[x->mbmi.ref_frame] \ - != cm->ref_frame_sign_bias[tmp->mbmi.ref_frame]) \ - { \ - mvx[cnt] *= -1; \ - mvy[cnt] *= -1; \ - } \ - cnt++; \ -} +#define GET_MV_SIGN(x) \ + if (x->mbmi.ref_frame != INTRA_FRAME) { \ + mvx[cnt] = x->mbmi.mv.as_mv.row; \ + mvy[cnt] = x->mbmi.mv.as_mv.col; \ + if (cm->ref_frame_sign_bias[x->mbmi.ref_frame] != \ + cm->ref_frame_sign_bias[tmp->mbmi.ref_frame]) { \ + mvx[cnt] *= -1; \ + mvy[cnt] *= -1; \ + } \ + cnt++; \ + } -void vp8_cal_dissimilarity(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; - int i; +void vp8_cal_dissimilarity(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; + int i; - /* Note: The first row & first column in mip are outside the frame, which - * were initialized to all 0.(ref_frame, mode, mv...) - * Their ref_frame = 0 means they won't be counted in the following - * calculation. + /* Note: The first row & first column in mip are outside the frame, which + * were initialized to all 0.(ref_frame, mode, mv...) + * Their ref_frame = 0 means they won't be counted in the following + * calculation. + */ + if (cpi->oxcf.mr_total_resolutions > 1 && + cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) { + /* Store info for show/no-show frames for supporting alt_ref. + * If parent frame is alt_ref, child has one too. */ - if (cpi->oxcf.mr_total_resolutions >1 - && cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) - { - /* Store info for show/no-show frames for supporting alt_ref. - * If parent frame is alt_ref, child has one too. - */ - LOWER_RES_FRAME_INFO* store_info - = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info; + LOWER_RES_FRAME_INFO *store_info = + (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info; - store_info->frame_type = cm->frame_type; + store_info->frame_type = cm->frame_type; - if(cm->frame_type != KEY_FRAME) - { - store_info->is_frame_dropped = 0; - for (i = 1; i < MAX_REF_FRAMES; i++) - store_info->low_res_ref_frames[i] = cpi->current_ref_frames[i]; - } - - if(cm->frame_type != KEY_FRAME) - { - int mb_row; - int mb_col; - /* Point to beginning of allocated MODE_INFO arrays. */ - MODE_INFO *tmp = cm->mip + cm->mode_info_stride; - LOWER_RES_MB_INFO* store_mode_info = store_info->mb_info; - - for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++) - { - tmp++; - for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++) - { - int dissim = INT_MAX; - - if(tmp->mbmi.ref_frame !=INTRA_FRAME) - { - int mvx[8]; - int mvy[8]; - int mmvx; - int mmvy; - int cnt=0; - const MODE_INFO *here = tmp; - const MODE_INFO *above = here - cm->mode_info_stride; - const MODE_INFO *left = here - 1; - const MODE_INFO *aboveleft = above - 1; - const MODE_INFO *aboveright = NULL; - const MODE_INFO *right = NULL; - const MODE_INFO *belowleft = NULL; - const MODE_INFO *below = NULL; - const MODE_INFO *belowright = NULL; - - /* If alternate reference frame is used, we have to - * check sign of MV. */ - if(cpi->oxcf.play_alternate) - { - /* Gather mv of neighboring MBs */ - GET_MV_SIGN(above) - GET_MV_SIGN(left) - GET_MV_SIGN(aboveleft) - - if(mb_col < (cm->mb_cols-1)) - { - right = here + 1; - aboveright = above + 1; - GET_MV_SIGN(right) - GET_MV_SIGN(aboveright) - } - - if(mb_row < (cm->mb_rows-1)) - { - below = here + cm->mode_info_stride; - belowleft = below - 1; - GET_MV_SIGN(below) - GET_MV_SIGN(belowleft) - } - - if(mb_col < (cm->mb_cols-1) - && mb_row < (cm->mb_rows-1)) - { - belowright = below + 1; - GET_MV_SIGN(belowright) - } - }else - { - /* No alt_ref and gather mv of neighboring MBs */ - GET_MV(above) - GET_MV(left) - GET_MV(aboveleft) - - if(mb_col < (cm->mb_cols-1)) - { - right = here + 1; - aboveright = above + 1; - GET_MV(right) - GET_MV(aboveright) - } - - if(mb_row < (cm->mb_rows-1)) - { - below = here + cm->mode_info_stride; - belowleft = below - 1; - GET_MV(below) - GET_MV(belowleft) - } - - if(mb_col < (cm->mb_cols-1) - && mb_row < (cm->mb_rows-1)) - { - belowright = below + 1; - GET_MV(belowright) - } - } - - if (cnt > 0) - { - int max_mvx = mvx[0]; - int min_mvx = mvx[0]; - int max_mvy = mvy[0]; - int min_mvy = mvy[0]; - int i; - - if (cnt > 1) - { - for (i=1; i< cnt; i++) - { - if (mvx[i] > max_mvx) max_mvx = mvx[i]; - else if (mvx[i] < min_mvx) min_mvx = mvx[i]; - if (mvy[i] > max_mvy) max_mvy = mvy[i]; - else if (mvy[i] < min_mvy) min_mvy = mvy[i]; - } - } - - mmvx = VPXMAX( - abs(min_mvx - here->mbmi.mv.as_mv.row), - abs(max_mvx - here->mbmi.mv.as_mv.row)); - mmvy = VPXMAX( - abs(min_mvy - here->mbmi.mv.as_mv.col), - abs(max_mvy - here->mbmi.mv.as_mv.col)); - dissim = VPXMAX(mmvx, mmvy); - } - } - - /* Store mode info for next resolution encoding */ - store_mode_info->mode = tmp->mbmi.mode; - store_mode_info->ref_frame = tmp->mbmi.ref_frame; - store_mode_info->mv.as_int = tmp->mbmi.mv.as_int; - store_mode_info->dissim = dissim; - tmp++; - store_mode_info++; - } - } - } + if (cm->frame_type != KEY_FRAME) { + store_info->is_frame_dropped = 0; + for (i = 1; i < MAX_REF_FRAMES; ++i) + store_info->low_res_ref_frames[i] = cpi->current_ref_frames[i]; } + + if (cm->frame_type != KEY_FRAME) { + int mb_row; + int mb_col; + /* Point to beginning of allocated MODE_INFO arrays. */ + MODE_INFO *tmp = cm->mip + cm->mode_info_stride; + LOWER_RES_MB_INFO *store_mode_info = store_info->mb_info; + + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + tmp++; + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + int dissim = INT_MAX; + + if (tmp->mbmi.ref_frame != INTRA_FRAME) { + int mvx[8]; + int mvy[8]; + int mmvx; + int mmvy; + int cnt = 0; + const MODE_INFO *here = tmp; + const MODE_INFO *above = here - cm->mode_info_stride; + const MODE_INFO *left = here - 1; + const MODE_INFO *aboveleft = above - 1; + const MODE_INFO *aboveright = NULL; + const MODE_INFO *right = NULL; + const MODE_INFO *belowleft = NULL; + const MODE_INFO *below = NULL; + const MODE_INFO *belowright = NULL; + + /* If alternate reference frame is used, we have to + * check sign of MV. */ + if (cpi->oxcf.play_alternate) { + /* Gather mv of neighboring MBs */ + GET_MV_SIGN(above) + GET_MV_SIGN(left) + GET_MV_SIGN(aboveleft) + + if (mb_col < (cm->mb_cols - 1)) { + right = here + 1; + aboveright = above + 1; + GET_MV_SIGN(right) + GET_MV_SIGN(aboveright) + } + + if (mb_row < (cm->mb_rows - 1)) { + below = here + cm->mode_info_stride; + belowleft = below - 1; + GET_MV_SIGN(below) + GET_MV_SIGN(belowleft) + } + + if (mb_col < (cm->mb_cols - 1) && mb_row < (cm->mb_rows - 1)) { + belowright = below + 1; + GET_MV_SIGN(belowright) + } + } else { + /* No alt_ref and gather mv of neighboring MBs */ + GET_MV(above) + GET_MV(left) + GET_MV(aboveleft) + + if (mb_col < (cm->mb_cols - 1)) { + right = here + 1; + aboveright = above + 1; + GET_MV(right) + GET_MV(aboveright) + } + + if (mb_row < (cm->mb_rows - 1)) { + below = here + cm->mode_info_stride; + belowleft = below - 1; + GET_MV(below) + GET_MV(belowleft) + } + + if (mb_col < (cm->mb_cols - 1) && mb_row < (cm->mb_rows - 1)) { + belowright = below + 1; + GET_MV(belowright) + } + } + + if (cnt > 0) { + int max_mvx = mvx[0]; + int min_mvx = mvx[0]; + int max_mvy = mvy[0]; + int min_mvy = mvy[0]; + int i; + + if (cnt > 1) { + for (i = 1; i < cnt; ++i) { + if (mvx[i] > max_mvx) + max_mvx = mvx[i]; + else if (mvx[i] < min_mvx) + min_mvx = mvx[i]; + if (mvy[i] > max_mvy) + max_mvy = mvy[i]; + else if (mvy[i] < min_mvy) + min_mvy = mvy[i]; + } + } + + mmvx = VPXMAX(abs(min_mvx - here->mbmi.mv.as_mv.row), + abs(max_mvx - here->mbmi.mv.as_mv.row)); + mmvy = VPXMAX(abs(min_mvy - here->mbmi.mv.as_mv.col), + abs(max_mvy - here->mbmi.mv.as_mv.col)); + dissim = VPXMAX(mmvx, mmvy); + } + } + + /* Store mode info for next resolution encoding */ + store_mode_info->mode = tmp->mbmi.mode; + store_mode_info->ref_frame = tmp->mbmi.ref_frame; + store_mode_info->mv.as_int = tmp->mbmi.mv.as_int; + store_mode_info->dissim = dissim; + tmp++; + store_mode_info++; + } + } + } + } } /* This function is called only when this frame is dropped at current resolution level. */ -void vp8_store_drop_frame_info(VP8_COMP *cpi) -{ - /* If the frame is dropped in lower-resolution encoding, this information - is passed to higher resolution level so that the encoder knows there - is no mode & motion info available. +void vp8_store_drop_frame_info(VP8_COMP *cpi) { + /* If the frame is dropped in lower-resolution encoding, this information + is passed to higher resolution level so that the encoder knows there + is no mode & motion info available. + */ + if (cpi->oxcf.mr_total_resolutions > 1 && + cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) { + /* Store info for show/no-show frames for supporting alt_ref. + * If parent frame is alt_ref, child has one too. */ - if (cpi->oxcf.mr_total_resolutions >1 - && cpi->oxcf.mr_encoder_id < (cpi->oxcf.mr_total_resolutions - 1)) - { - /* Store info for show/no-show frames for supporting alt_ref. - * If parent frame is alt_ref, child has one too. - */ - LOWER_RES_FRAME_INFO* store_info - = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info; + LOWER_RES_FRAME_INFO *store_info = + (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info; - /* Set frame_type to be INTER_FRAME since we won't drop key frame. */ - store_info->frame_type = INTER_FRAME; - store_info->is_frame_dropped = 1; - } + /* Set frame_type to be INTER_FRAME since we won't drop key frame. */ + store_info->frame_type = INTER_FRAME; + store_info->is_frame_dropped = 1; + } } diff --git a/libs/libvpx/vp8/encoder/mr_dissim.h b/libs/libvpx/vp8/encoder/mr_dissim.h index 5a59ce62a6..da36628afa 100644 --- a/libs/libvpx/vp8/encoder/mr_dissim.h +++ b/libs/libvpx/vp8/encoder/mr_dissim.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_MR_DISSIM_H_ #define VP8_ENCODER_MR_DISSIM_H_ #include "vpx_config.h" diff --git a/libs/libvpx/vp8/encoder/onyx_if.c b/libs/libvpx/vp8/encoder/onyx_if.c index 5a4b37dcff..6ebf233edf 100644 --- a/libs/libvpx/vp8/encoder/onyx_if.c +++ b/libs/libvpx/vp8/encoder/onyx_if.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "./vpx_scale_rtcd.h" #include "./vpx_dsp_rtcd.h" @@ -21,7 +20,7 @@ #include "vp8/common/alloccommon.h" #include "mcomp.h" #include "firstpass.h" -#include "vpx/internal/vpx_psnr.h" +#include "vpx_dsp/psnr.h" #include "vpx_scale/vpx_scale.h" #include "vp8/common/extend.h" #include "ratectrl.h" @@ -56,7 +55,9 @@ extern void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); extern void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val); extern void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi); -extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *post, int filt_lvl, int low_var_thresh, int flag); +extern void vp8_deblock_frame(YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int filt_lvl, + int low_var_thresh, int flag); extern void print_parms(VP8_CONFIG *ocf, char *filenam); extern unsigned int vp8_get_processor_freq(); extern void print_tree_update_probs(); @@ -78,7 +79,6 @@ extern const int vp8_gf_interval_table[101]; #include "vpx_dsp/ssim.h" #endif - #ifdef OUTPUT_YUV_SRC FILE *yuv_file; #endif @@ -97,13 +97,13 @@ extern int skip_true_count; extern int skip_false_count; #endif - #ifdef VP8_ENTROPY_STATS extern int intra_mode_stats[10][10][10]; #endif #ifdef SPEEDSTATS -unsigned int frames_at_speed[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +unsigned int frames_at_speed[16] = { 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int tot_pm = 0; unsigned int cnt_pm = 0; unsigned int tot_ef = 0; @@ -112,12 +112,12 @@ unsigned int cnt_ef = 0; #ifdef MODE_STATS extern unsigned __int64 Sectionbits[50]; -extern int y_modes[5] ; -extern int uv_modes[4] ; -extern int b_modes[10] ; +extern int y_modes[5]; +extern int uv_modes[4]; +extern int b_modes[10]; -extern int inter_y_modes[10] ; -extern int inter_uv_modes[4] ; +extern int inter_y_modes[10]; +extern int inter_uv_modes[4]; extern unsigned int inter_b_modes[15]; #endif @@ -129,395 +129,350 @@ extern void vp8cx_init_quantizer(VP8_COMP *cpi); extern const int vp8cx_base_skip_false_prob[128]; /* Tables relating active max Q to active min Q */ -static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = -{ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2, - 3,3,3,3,3,3,4,4,4,5,5,5,5,5,6,6, - 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11, - 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16, - 16,16,17,17,18,18,18,18,19,20,20,21,21,22,23,23 +static const unsigned char kf_low_motion_minq[QINDEX_RANGE] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 10, 10, 11, + 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, + 17, 17, 18, 18, 18, 18, 19, 20, 20, 21, 21, 22, 23, 23 }; -static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = -{ - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3, - 3,3,3,3,4,4,4,4,5,5,5,5,5,5,6,6, - 6,6,7,7,8,8,8,8,9,9,10,10,10,10,11,11, - 11,11,12,12,13,13,13,13,14,14,15,15,15,15,16,16, - 16,16,17,17,18,18,18,18,19,19,20,20,20,20,21,21, - 21,21,22,22,23,23,24,25,25,26,26,27,28,28,29,30 +static const unsigned char kf_high_motion_minq[QINDEX_RANGE] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, + 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, + 10, 10, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15, 15, 15, 16, + 16, 16, 16, 17, 17, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29, 30 }; -static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = -{ - 0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2, - 3,3,3,3,4,4,4,4,5,5,5,5,6,6,6,6, - 7,7,7,7,8,8,8,8,9,9,9,9,10,10,10,10, - 11,11,12,12,13,13,14,14,15,15,16,16,17,17,18,18, - 19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26, - 27,27,28,28,29,29,30,30,31,31,32,32,33,33,34,34, - 35,35,36,36,37,37,38,38,39,39,40,40,41,41,42,42, - 43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58 +static const unsigned char gf_low_motion_minq[QINDEX_RANGE] = { + 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, + 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, + 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, + 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, + 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, + 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58 }; -static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = -{ - 0,0,0,0,1,1,1,1,1,1,2,2,3,3,3,4, - 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, - 9,10,10,10,10,11,11,11,12,12,12,12,13,13,13,14, - 14,14,15,15,16,16,17,17,18,18,19,19,20,20,21,21, - 22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29, - 30,30,31,31,32,32,33,33,34,34,35,35,36,36,37,37, - 38,39,39,40,40,41,41,42,42,43,43,44,45,46,47,48, - 49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64 +static const unsigned char gf_mid_motion_minq[QINDEX_RANGE] = { + 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, + 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, + 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18, + 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, + 37, 38, 39, 39, 40, 40, 41, 41, 42, 42, 43, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 }; -static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = -{ - 0,0,0,0,1,1,1,1,1,2,2,2,3,3,3,4, - 4,4,5,5,5,6,6,6,7,7,7,8,8,8,9,9, - 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16, - 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, - 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, - 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, - 41,41,42,42,43,44,45,46,47,48,49,50,51,52,53,54, - 55,56,57,58,59,60,62,64,66,68,70,72,74,76,78,80 +static const unsigned char gf_high_motion_minq[QINDEX_RANGE] = { + 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, + 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, + 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, + 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 30, 30, + 31, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, + 40, 41, 41, 42, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80 }; -static const unsigned char inter_minq[QINDEX_RANGE] = -{ - 0,0,1,1,2,3,3,4,4,5,6,6,7,8,8,9, - 9,10,11,11,12,13,13,14,15,15,16,17,17,18,19,20, - 20,21,22,22,23,24,24,25,26,27,27,28,29,30,30,31, - 32,33,33,34,35,36,36,37,38,39,39,40,41,42,42,43, - 44,45,46,46,47,48,49,50,50,51,52,53,54,55,55,56, - 57,58,59,60,60,61,62,63,64,65,66,67,67,68,69,70, - 71,72,73,74,75,75,76,77,78,79,80,81,82,83,84,85, - 86,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100 +static const unsigned char inter_minq[QINDEX_RANGE] = { + 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, + 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23, 24, + 24, 25, 26, 27, 27, 28, 29, 30, 30, 31, 32, 33, 33, 34, 35, 36, 36, 37, 38, + 39, 39, 40, 41, 42, 42, 43, 44, 45, 46, 46, 47, 48, 49, 50, 50, 51, 52, 53, + 54, 55, 55, 56, 57, 58, 59, 60, 60, 61, 62, 63, 64, 65, 66, 67, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100 }; #ifdef PACKET_TESTING extern FILE *vpxlogc; #endif -static void save_layer_context(VP8_COMP *cpi) -{ - LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer]; +static void save_layer_context(VP8_COMP *cpi) { + LAYER_CONTEXT *lc = &cpi->layer_context[cpi->current_layer]; - /* Save layer dependent coding state */ - lc->target_bandwidth = cpi->target_bandwidth; - lc->starting_buffer_level = cpi->oxcf.starting_buffer_level; - lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level; - lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size; - lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms; - lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms; - lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms; - lc->buffer_level = cpi->buffer_level; - lc->bits_off_target = cpi->bits_off_target; - lc->total_actual_bits = cpi->total_actual_bits; - lc->worst_quality = cpi->worst_quality; - lc->active_worst_quality = cpi->active_worst_quality; - lc->best_quality = cpi->best_quality; - lc->active_best_quality = cpi->active_best_quality; - lc->ni_av_qi = cpi->ni_av_qi; - lc->ni_tot_qi = cpi->ni_tot_qi; - lc->ni_frames = cpi->ni_frames; - lc->avg_frame_qindex = cpi->avg_frame_qindex; - lc->rate_correction_factor = cpi->rate_correction_factor; - lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor; - lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor; - lc->zbin_over_quant = cpi->mb.zbin_over_quant; - lc->inter_frame_target = cpi->inter_frame_target; - lc->total_byte_count = cpi->total_byte_count; - lc->filter_level = cpi->common.filter_level; + /* Save layer dependent coding state */ + lc->target_bandwidth = cpi->target_bandwidth; + lc->starting_buffer_level = cpi->oxcf.starting_buffer_level; + lc->optimal_buffer_level = cpi->oxcf.optimal_buffer_level; + lc->maximum_buffer_size = cpi->oxcf.maximum_buffer_size; + lc->starting_buffer_level_in_ms = cpi->oxcf.starting_buffer_level_in_ms; + lc->optimal_buffer_level_in_ms = cpi->oxcf.optimal_buffer_level_in_ms; + lc->maximum_buffer_size_in_ms = cpi->oxcf.maximum_buffer_size_in_ms; + lc->buffer_level = cpi->buffer_level; + lc->bits_off_target = cpi->bits_off_target; + lc->total_actual_bits = cpi->total_actual_bits; + lc->worst_quality = cpi->worst_quality; + lc->active_worst_quality = cpi->active_worst_quality; + lc->best_quality = cpi->best_quality; + lc->active_best_quality = cpi->active_best_quality; + lc->ni_av_qi = cpi->ni_av_qi; + lc->ni_tot_qi = cpi->ni_tot_qi; + lc->ni_frames = cpi->ni_frames; + lc->avg_frame_qindex = cpi->avg_frame_qindex; + lc->rate_correction_factor = cpi->rate_correction_factor; + lc->key_frame_rate_correction_factor = cpi->key_frame_rate_correction_factor; + lc->gf_rate_correction_factor = cpi->gf_rate_correction_factor; + lc->zbin_over_quant = cpi->mb.zbin_over_quant; + lc->inter_frame_target = cpi->inter_frame_target; + lc->total_byte_count = cpi->total_byte_count; + lc->filter_level = cpi->common.filter_level; - lc->last_frame_percent_intra = cpi->last_frame_percent_intra; + lc->last_frame_percent_intra = cpi->last_frame_percent_intra; - memcpy (lc->count_mb_ref_frame_usage, - cpi->mb.count_mb_ref_frame_usage, - sizeof(cpi->mb.count_mb_ref_frame_usage)); + memcpy(lc->count_mb_ref_frame_usage, cpi->mb.count_mb_ref_frame_usage, + sizeof(cpi->mb.count_mb_ref_frame_usage)); } -static void restore_layer_context(VP8_COMP *cpi, const int layer) -{ - LAYER_CONTEXT *lc = &cpi->layer_context[layer]; +static void restore_layer_context(VP8_COMP *cpi, const int layer) { + LAYER_CONTEXT *lc = &cpi->layer_context[layer]; - /* Restore layer dependent coding state */ - cpi->current_layer = layer; - cpi->target_bandwidth = lc->target_bandwidth; - cpi->oxcf.target_bandwidth = lc->target_bandwidth; - cpi->oxcf.starting_buffer_level = lc->starting_buffer_level; - cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level; - cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size; - cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms; - cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms; - cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms; - cpi->buffer_level = lc->buffer_level; - cpi->bits_off_target = lc->bits_off_target; - cpi->total_actual_bits = lc->total_actual_bits; - cpi->active_worst_quality = lc->active_worst_quality; - cpi->active_best_quality = lc->active_best_quality; - cpi->ni_av_qi = lc->ni_av_qi; - cpi->ni_tot_qi = lc->ni_tot_qi; - cpi->ni_frames = lc->ni_frames; - cpi->avg_frame_qindex = lc->avg_frame_qindex; - cpi->rate_correction_factor = lc->rate_correction_factor; - cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor; - cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor; - cpi->mb.zbin_over_quant = lc->zbin_over_quant; - cpi->inter_frame_target = lc->inter_frame_target; - cpi->total_byte_count = lc->total_byte_count; - cpi->common.filter_level = lc->filter_level; + /* Restore layer dependent coding state */ + cpi->current_layer = layer; + cpi->target_bandwidth = lc->target_bandwidth; + cpi->oxcf.target_bandwidth = lc->target_bandwidth; + cpi->oxcf.starting_buffer_level = lc->starting_buffer_level; + cpi->oxcf.optimal_buffer_level = lc->optimal_buffer_level; + cpi->oxcf.maximum_buffer_size = lc->maximum_buffer_size; + cpi->oxcf.starting_buffer_level_in_ms = lc->starting_buffer_level_in_ms; + cpi->oxcf.optimal_buffer_level_in_ms = lc->optimal_buffer_level_in_ms; + cpi->oxcf.maximum_buffer_size_in_ms = lc->maximum_buffer_size_in_ms; + cpi->buffer_level = lc->buffer_level; + cpi->bits_off_target = lc->bits_off_target; + cpi->total_actual_bits = lc->total_actual_bits; + cpi->active_worst_quality = lc->active_worst_quality; + cpi->active_best_quality = lc->active_best_quality; + cpi->ni_av_qi = lc->ni_av_qi; + cpi->ni_tot_qi = lc->ni_tot_qi; + cpi->ni_frames = lc->ni_frames; + cpi->avg_frame_qindex = lc->avg_frame_qindex; + cpi->rate_correction_factor = lc->rate_correction_factor; + cpi->key_frame_rate_correction_factor = lc->key_frame_rate_correction_factor; + cpi->gf_rate_correction_factor = lc->gf_rate_correction_factor; + cpi->mb.zbin_over_quant = lc->zbin_over_quant; + cpi->inter_frame_target = lc->inter_frame_target; + cpi->total_byte_count = lc->total_byte_count; + cpi->common.filter_level = lc->filter_level; - cpi->last_frame_percent_intra = lc->last_frame_percent_intra; + cpi->last_frame_percent_intra = lc->last_frame_percent_intra; - memcpy (cpi->mb.count_mb_ref_frame_usage, - lc->count_mb_ref_frame_usage, - sizeof(cpi->mb.count_mb_ref_frame_usage)); + memcpy(cpi->mb.count_mb_ref_frame_usage, lc->count_mb_ref_frame_usage, + sizeof(cpi->mb.count_mb_ref_frame_usage)); } -static int rescale(int val, int num, int denom) -{ - int64_t llnum = num; - int64_t llden = denom; - int64_t llval = val; +static int rescale(int val, int num, int denom) { + int64_t llnum = num; + int64_t llden = denom; + int64_t llval = val; - return (int)(llval * llnum / llden); + return (int)(llval * llnum / llden); } -static void init_temporal_layer_context(VP8_COMP *cpi, - VP8_CONFIG *oxcf, +static void init_temporal_layer_context(VP8_COMP *cpi, VP8_CONFIG *oxcf, const int layer, - double prev_layer_framerate) -{ - LAYER_CONTEXT *lc = &cpi->layer_context[layer]; + double prev_layer_framerate) { + LAYER_CONTEXT *lc = &cpi->layer_context[layer]; - lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer]; - lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000; + lc->framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[layer]; + lc->target_bandwidth = cpi->oxcf.target_bitrate[layer] * 1000; - lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level; - lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level; - lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size; + lc->starting_buffer_level_in_ms = oxcf->starting_buffer_level; + lc->optimal_buffer_level_in_ms = oxcf->optimal_buffer_level; + lc->maximum_buffer_size_in_ms = oxcf->maximum_buffer_size; - lc->starting_buffer_level = - rescale((int)(oxcf->starting_buffer_level), - lc->target_bandwidth, 1000); + lc->starting_buffer_level = + rescale((int)(oxcf->starting_buffer_level), lc->target_bandwidth, 1000); - if (oxcf->optimal_buffer_level == 0) - lc->optimal_buffer_level = lc->target_bandwidth / 8; - else - lc->optimal_buffer_level = - rescale((int)(oxcf->optimal_buffer_level), - lc->target_bandwidth, 1000); + if (oxcf->optimal_buffer_level == 0) { + lc->optimal_buffer_level = lc->target_bandwidth / 8; + } else { + lc->optimal_buffer_level = + rescale((int)(oxcf->optimal_buffer_level), lc->target_bandwidth, 1000); + } - if (oxcf->maximum_buffer_size == 0) - lc->maximum_buffer_size = lc->target_bandwidth / 8; - else - lc->maximum_buffer_size = - rescale((int)(oxcf->maximum_buffer_size), - lc->target_bandwidth, 1000); + if (oxcf->maximum_buffer_size == 0) { + lc->maximum_buffer_size = lc->target_bandwidth / 8; + } else { + lc->maximum_buffer_size = + rescale((int)(oxcf->maximum_buffer_size), lc->target_bandwidth, 1000); + } - /* Work out the average size of a frame within this layer */ - if (layer > 0) - lc->avg_frame_size_for_layer = - (int)((cpi->oxcf.target_bitrate[layer] - - cpi->oxcf.target_bitrate[layer-1]) * 1000 / - (lc->framerate - prev_layer_framerate)); + /* Work out the average size of a frame within this layer */ + if (layer > 0) { + lc->avg_frame_size_for_layer = + (int)((cpi->oxcf.target_bitrate[layer] - + cpi->oxcf.target_bitrate[layer - 1]) * + 1000 / (lc->framerate - prev_layer_framerate)); + } - lc->active_worst_quality = cpi->oxcf.worst_allowed_q; - lc->active_best_quality = cpi->oxcf.best_allowed_q; - lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q; + lc->active_worst_quality = cpi->oxcf.worst_allowed_q; + lc->active_best_quality = cpi->oxcf.best_allowed_q; + lc->avg_frame_qindex = cpi->oxcf.worst_allowed_q; - lc->buffer_level = lc->starting_buffer_level; - lc->bits_off_target = lc->starting_buffer_level; + lc->buffer_level = lc->starting_buffer_level; + lc->bits_off_target = lc->starting_buffer_level; - lc->total_actual_bits = 0; - lc->ni_av_qi = 0; - lc->ni_tot_qi = 0; - lc->ni_frames = 0; - lc->rate_correction_factor = 1.0; - lc->key_frame_rate_correction_factor = 1.0; - lc->gf_rate_correction_factor = 1.0; - lc->inter_frame_target = 0; + lc->total_actual_bits = 0; + lc->ni_av_qi = 0; + lc->ni_tot_qi = 0; + lc->ni_frames = 0; + lc->rate_correction_factor = 1.0; + lc->key_frame_rate_correction_factor = 1.0; + lc->gf_rate_correction_factor = 1.0; + lc->inter_frame_target = 0; } // Upon a run-time change in temporal layers, reset the layer context parameters // for any "new" layers. For "existing" layers, let them inherit the parameters // from the previous layer state (at the same layer #). In future we may want // to better map the previous layer state(s) to the "new" ones. -static void reset_temporal_layer_change(VP8_COMP *cpi, - VP8_CONFIG *oxcf, - const int prev_num_layers) -{ - int i; - double prev_layer_framerate = 0; - const int curr_num_layers = cpi->oxcf.number_of_layers; - // If the previous state was 1 layer, get current layer context from cpi. - // We need this to set the layer context for the new layers below. - if (prev_num_layers == 1) - { - cpi->current_layer = 0; - save_layer_context(cpi); +static void reset_temporal_layer_change(VP8_COMP *cpi, VP8_CONFIG *oxcf, + const int prev_num_layers) { + int i; + double prev_layer_framerate = 0; + const int curr_num_layers = cpi->oxcf.number_of_layers; + // If the previous state was 1 layer, get current layer context from cpi. + // We need this to set the layer context for the new layers below. + if (prev_num_layers == 1) { + cpi->current_layer = 0; + save_layer_context(cpi); + } + for (i = 0; i < curr_num_layers; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + if (i >= prev_num_layers) { + init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate); } - for (i = 0; i < curr_num_layers; i++) - { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - if (i >= prev_num_layers) - { - init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate); - } - // The initial buffer levels are set based on their starting levels. - // We could set the buffer levels based on the previous state (normalized - // properly by the layer bandwidths) but we would need to keep track of - // the previous set of layer bandwidths (i.e., target_bitrate[i]) - // before the layer change. For now, reset to the starting levels. - lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms * - cpi->oxcf.target_bitrate[i]; - lc->bits_off_target = lc->buffer_level; - // TDOD(marpan): Should we set the rate_correction_factor and - // active_worst/best_quality to values derived from the previous layer - // state (to smooth-out quality dips/rate fluctuation at transition)? + // The initial buffer levels are set based on their starting levels. + // We could set the buffer levels based on the previous state (normalized + // properly by the layer bandwidths) but we would need to keep track of + // the previous set of layer bandwidths (i.e., target_bitrate[i]) + // before the layer change. For now, reset to the starting levels. + lc->buffer_level = + cpi->oxcf.starting_buffer_level_in_ms * cpi->oxcf.target_bitrate[i]; + lc->bits_off_target = lc->buffer_level; + // TDOD(marpan): Should we set the rate_correction_factor and + // active_worst/best_quality to values derived from the previous layer + // state (to smooth-out quality dips/rate fluctuation at transition)? - // We need to treat the 1 layer case separately: oxcf.target_bitrate[i] - // is not set for 1 layer, and the restore_layer_context/save_context() - // are not called in the encoding loop, so we need to call it here to - // pass the layer context state to |cpi|. - if (curr_num_layers == 1) - { - lc->target_bandwidth = cpi->oxcf.target_bandwidth; - lc->buffer_level = cpi->oxcf.starting_buffer_level_in_ms * - lc->target_bandwidth / 1000; - lc->bits_off_target = lc->buffer_level; - restore_layer_context(cpi, 0); - } - prev_layer_framerate = cpi->output_framerate / - cpi->oxcf.rate_decimator[i]; + // We need to treat the 1 layer case separately: oxcf.target_bitrate[i] + // is not set for 1 layer, and the restore_layer_context/save_context() + // are not called in the encoding loop, so we need to call it here to + // pass the layer context state to |cpi|. + if (curr_num_layers == 1) { + lc->target_bandwidth = cpi->oxcf.target_bandwidth; + lc->buffer_level = + cpi->oxcf.starting_buffer_level_in_ms * lc->target_bandwidth / 1000; + lc->bits_off_target = lc->buffer_level; + restore_layer_context(cpi, 0); } + prev_layer_framerate = cpi->output_framerate / cpi->oxcf.rate_decimator[i]; + } } -static void setup_features(VP8_COMP *cpi) -{ - // If segmentation enabled set the update flags - if ( cpi->mb.e_mbd.segmentation_enabled ) - { - cpi->mb.e_mbd.update_mb_segmentation_map = 1; - cpi->mb.e_mbd.update_mb_segmentation_data = 1; - } - else - { - cpi->mb.e_mbd.update_mb_segmentation_map = 0; - cpi->mb.e_mbd.update_mb_segmentation_data = 0; - } +static void setup_features(VP8_COMP *cpi) { + // If segmentation enabled set the update flags + if (cpi->mb.e_mbd.segmentation_enabled) { + cpi->mb.e_mbd.update_mb_segmentation_map = 1; + cpi->mb.e_mbd.update_mb_segmentation_data = 1; + } else { + cpi->mb.e_mbd.update_mb_segmentation_map = 0; + cpi->mb.e_mbd.update_mb_segmentation_data = 0; + } - cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0; - cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; - memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); - memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); - memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); - memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); - - set_default_lf_deltas(cpi); + cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 0; + cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; + memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); + memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); + memset(cpi->mb.e_mbd.last_ref_lf_deltas, 0, + sizeof(cpi->mb.e_mbd.ref_lf_deltas)); + memset(cpi->mb.e_mbd.last_mode_lf_deltas, 0, + sizeof(cpi->mb.e_mbd.mode_lf_deltas)); + set_default_lf_deltas(cpi); } - static void dealloc_raw_frame_buffers(VP8_COMP *cpi); -void vp8_initialize_enc(void) -{ - static volatile int init_done = 0; +void vp8_initialize_enc(void) { + static volatile int init_done = 0; - if (!init_done) { - vpx_dsp_rtcd(); - vp8_init_intra_predictors(); - init_done = 1; - } + if (!init_done) { + vpx_dsp_rtcd(); + vp8_init_intra_predictors(); + init_done = 1; + } } -static void dealloc_compressor_data(VP8_COMP *cpi) -{ - vpx_free(cpi->tplist); - cpi->tplist = NULL; +static void dealloc_compressor_data(VP8_COMP *cpi) { + vpx_free(cpi->tplist); + cpi->tplist = NULL; - /* Delete last frame MV storage buffers */ - vpx_free(cpi->lfmv); - cpi->lfmv = 0; + /* Delete last frame MV storage buffers */ + vpx_free(cpi->lfmv); + cpi->lfmv = 0; - vpx_free(cpi->lf_ref_frame_sign_bias); - cpi->lf_ref_frame_sign_bias = 0; + vpx_free(cpi->lf_ref_frame_sign_bias); + cpi->lf_ref_frame_sign_bias = 0; - vpx_free(cpi->lf_ref_frame); - cpi->lf_ref_frame = 0; + vpx_free(cpi->lf_ref_frame); + cpi->lf_ref_frame = 0; - /* Delete sementation map */ - vpx_free(cpi->segmentation_map); - cpi->segmentation_map = 0; + /* Delete sementation map */ + vpx_free(cpi->segmentation_map); + cpi->segmentation_map = 0; - vpx_free(cpi->active_map); - cpi->active_map = 0; + vpx_free(cpi->active_map); + cpi->active_map = 0; - vp8_de_alloc_frame_buffers(&cpi->common); + vp8_de_alloc_frame_buffers(&cpi->common); - vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame); - vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source); - dealloc_raw_frame_buffers(cpi); + vp8_yv12_de_alloc_frame_buffer(&cpi->pick_lf_lvl_frame); + vp8_yv12_de_alloc_frame_buffer(&cpi->scaled_source); + dealloc_raw_frame_buffers(cpi); - vpx_free(cpi->tok); - cpi->tok = 0; + vpx_free(cpi->tok); + cpi->tok = 0; - /* Structure used to monitor GF usage */ - vpx_free(cpi->gf_active_flags); - cpi->gf_active_flags = 0; + /* Structure used to monitor GF usage */ + vpx_free(cpi->gf_active_flags); + cpi->gf_active_flags = 0; - /* Activity mask based per mb zbin adjustments */ - vpx_free(cpi->mb_activity_map); - cpi->mb_activity_map = 0; + /* Activity mask based per mb zbin adjustments */ + vpx_free(cpi->mb_activity_map); + cpi->mb_activity_map = 0; - vpx_free(cpi->mb.pip); - cpi->mb.pip = 0; + vpx_free(cpi->mb.pip); + cpi->mb.pip = 0; #if CONFIG_MULTITHREAD - /* De-allocate mutex */ - if (cpi->pmutex != NULL) { - VP8_COMMON *const pc = &cpi->common; - int i; - - for (i = 0; i < pc->mb_rows; i++) { - pthread_mutex_destroy(&cpi->pmutex[i]); - } - vpx_free(cpi->pmutex); - cpi->pmutex = NULL; - } - - vpx_free(cpi->mt_current_mb_col); - cpi->mt_current_mb_col = NULL; + vpx_free(cpi->mt_current_mb_col); + cpi->mt_current_mb_col = NULL; #endif } -static void enable_segmentation(VP8_COMP *cpi) -{ - /* Set the appropriate feature bit */ - cpi->mb.e_mbd.segmentation_enabled = 1; - cpi->mb.e_mbd.update_mb_segmentation_map = 1; - cpi->mb.e_mbd.update_mb_segmentation_data = 1; +static void enable_segmentation(VP8_COMP *cpi) { + /* Set the appropriate feature bit */ + cpi->mb.e_mbd.segmentation_enabled = 1; + cpi->mb.e_mbd.update_mb_segmentation_map = 1; + cpi->mb.e_mbd.update_mb_segmentation_data = 1; } -static void disable_segmentation(VP8_COMP *cpi) -{ - /* Clear the appropriate feature bit */ - cpi->mb.e_mbd.segmentation_enabled = 0; +static void disable_segmentation(VP8_COMP *cpi) { + /* Clear the appropriate feature bit */ + cpi->mb.e_mbd.segmentation_enabled = 0; } /* Valid values for a segment are 0 to 3 * Segmentation map is arrange as [Rows][Columns] */ -static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map) -{ - /* Copy in the new segmentation map */ - memcpy(cpi->segmentation_map, segmentation_map, (cpi->common.mb_rows * cpi->common.mb_cols)); +static void set_segmentation_map(VP8_COMP *cpi, + unsigned char *segmentation_map) { + /* Copy in the new segmentation map */ + memcpy(cpi->segmentation_map, segmentation_map, + (cpi->common.mb_rows * cpi->common.mb_cols)); - /* Signal that the map should be updated. */ - cpi->mb.e_mbd.update_mb_segmentation_map = 1; - cpi->mb.e_mbd.update_mb_segmentation_data = 1; + /* Signal that the map should be updated. */ + cpi->mb.e_mbd.update_mb_segmentation_map = 1; + cpi->mb.e_mbd.update_mb_segmentation_data = 1; } /* The values given for each segment can be either deltas (from the default @@ -532,1311 +487,1166 @@ static void set_segmentation_map(VP8_COMP *cpi, unsigned char *segmentation_map) * abs_delta = SEGMENT_ABSDATA (use the absolute values given). * */ -static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, unsigned char abs_delta) -{ - cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta; - memcpy(cpi->segment_feature_data, feature_data, sizeof(cpi->segment_feature_data)); +static void set_segment_data(VP8_COMP *cpi, signed char *feature_data, + unsigned char abs_delta) { + cpi->mb.e_mbd.mb_segement_abs_delta = abs_delta; + memcpy(cpi->segment_feature_data, feature_data, + sizeof(cpi->segment_feature_data)); } - /* A simple function to cyclically refresh the background at a lower Q */ -static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) -{ - unsigned char *seg_map = cpi->segmentation_map; - signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; - int i; - int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; - int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols; +static void cyclic_background_refresh(VP8_COMP *cpi, int Q, int lf_adjustment) { + unsigned char *seg_map = cpi->segmentation_map; + signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; + int i; + int block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; + int mbs_in_frame = cpi->common.mb_rows * cpi->common.mb_cols; - cpi->cyclic_refresh_q = Q / 2; + cpi->cyclic_refresh_q = Q / 2; - if (cpi->oxcf.screen_content_mode) { - // Modify quality ramp-up based on Q. Above some Q level, increase the - // number of blocks to be refreshed, and reduce it below the thredhold. - // Turn-off under certain conditions (i.e., away from key frame, and if - // we are at good quality (low Q) and most of the blocks were skipped-encoded - // in previous frame. - int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100; - if (Q >= qp_thresh) { - cpi->cyclic_refresh_mode_max_mbs_perframe = - (cpi->common.mb_rows * cpi->common.mb_cols) / 10; - } else if (cpi->frames_since_key > 250 && - Q < 20 && - cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) { - cpi->cyclic_refresh_mode_max_mbs_perframe = 0; - } else { - cpi->cyclic_refresh_mode_max_mbs_perframe = - (cpi->common.mb_rows * cpi->common.mb_cols) / 20; - } - block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; + if (cpi->oxcf.screen_content_mode) { + // Modify quality ramp-up based on Q. Above some Q level, increase the + // number of blocks to be refreshed, and reduce it below the thredhold. + // Turn-off under certain conditions (i.e., away from key frame, and if + // we are at good quality (low Q) and most of the blocks were + // skipped-encoded + // in previous frame. + int qp_thresh = (cpi->oxcf.screen_content_mode == 2) ? 80 : 100; + if (Q >= qp_thresh) { + cpi->cyclic_refresh_mode_max_mbs_perframe = + (cpi->common.mb_rows * cpi->common.mb_cols) / 10; + } else if (cpi->frames_since_key > 250 && Q < 20 && + cpi->mb.skip_true_count > (int)(0.95 * mbs_in_frame)) { + cpi->cyclic_refresh_mode_max_mbs_perframe = 0; + } else { + cpi->cyclic_refresh_mode_max_mbs_perframe = + (cpi->common.mb_rows * cpi->common.mb_cols) / 20; } + block_count = cpi->cyclic_refresh_mode_max_mbs_perframe; + } - // Set every macroblock to be eligible for update. - // For key frame this will reset seg map to 0. - memset(cpi->segmentation_map, 0, mbs_in_frame); + // Set every macroblock to be eligible for update. + // For key frame this will reset seg map to 0. + memset(cpi->segmentation_map, 0, mbs_in_frame); - if (cpi->common.frame_type != KEY_FRAME && block_count > 0) - { - /* Cycle through the macro_block rows */ - /* MB loop to set local segmentation map */ - i = cpi->cyclic_refresh_mode_index; - assert(i < mbs_in_frame); - do - { - /* If the MB is as a candidate for clean up then mark it for - * possible boost/refresh (segment 1) The segment id may get - * reset to 0 later if the MB gets coded anything other than - * last frame 0,0 as only (last frame 0,0) MBs are eligable for - * refresh : that is to say Mbs likely to be background blocks. - */ - if (cpi->cyclic_refresh_map[i] == 0) - { - seg_map[i] = 1; - block_count --; - } - else if (cpi->cyclic_refresh_map[i] < 0) - cpi->cyclic_refresh_map[i]++; + if (cpi->common.frame_type != KEY_FRAME && block_count > 0) { + /* Cycle through the macro_block rows */ + /* MB loop to set local segmentation map */ + i = cpi->cyclic_refresh_mode_index; + assert(i < mbs_in_frame); + do { + /* If the MB is as a candidate for clean up then mark it for + * possible boost/refresh (segment 1) The segment id may get + * reset to 0 later if the MB gets coded anything other than + * last frame 0,0 as only (last frame 0,0) MBs are eligable for + * refresh : that is to say Mbs likely to be background blocks. + */ + if (cpi->cyclic_refresh_map[i] == 0) { + seg_map[i] = 1; + block_count--; + } else if (cpi->cyclic_refresh_map[i] < 0) { + cpi->cyclic_refresh_map[i]++; + } - i++; - if (i == mbs_in_frame) - i = 0; + i++; + if (i == mbs_in_frame) i = 0; - } - while(block_count && i != cpi->cyclic_refresh_mode_index); + } while (block_count && i != cpi->cyclic_refresh_mode_index); - cpi->cyclic_refresh_mode_index = i; + cpi->cyclic_refresh_mode_index = i; #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { - if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive && - Q < (int)cpi->denoiser.denoise_pars.qp_thresh && - (cpi->frames_since_key > - 2 * cpi->denoiser.denoise_pars.consec_zerolast)) { - // Under aggressive denoising, use segmentation to turn off loop - // filter below some qp thresh. The filter is reduced for all - // blocks that have been encoded as ZEROMV LAST x frames in a row, - // where x is set by cpi->denoiser.denoise_pars.consec_zerolast. - // This is to avoid "dot" artifacts that can occur from repeated - // loop filtering on noisy input source. - cpi->cyclic_refresh_q = Q; - // lf_adjustment = -MAX_LOOP_FILTER; - lf_adjustment = -40; - for (i = 0; i < mbs_in_frame; ++i) { - seg_map[i] = (cpi->consec_zero_last[i] > - cpi->denoiser.denoise_pars.consec_zerolast) ? 1 : 0; - } - } + if (cpi->oxcf.noise_sensitivity > 0) { + if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive && + Q < (int)cpi->denoiser.denoise_pars.qp_thresh && + (cpi->frames_since_key > + 2 * cpi->denoiser.denoise_pars.consec_zerolast)) { + // Under aggressive denoising, use segmentation to turn off loop + // filter below some qp thresh. The filter is reduced for all + // blocks that have been encoded as ZEROMV LAST x frames in a row, + // where x is set by cpi->denoiser.denoise_pars.consec_zerolast. + // This is to avoid "dot" artifacts that can occur from repeated + // loop filtering on noisy input source. + cpi->cyclic_refresh_q = Q; + // lf_adjustment = -MAX_LOOP_FILTER; + lf_adjustment = -40; + for (i = 0; i < mbs_in_frame; ++i) { + seg_map[i] = (cpi->consec_zero_last[i] > + cpi->denoiser.denoise_pars.consec_zerolast) + ? 1 + : 0; } -#endif + } } +#endif + } - /* Activate segmentation. */ - cpi->mb.e_mbd.update_mb_segmentation_map = 1; - cpi->mb.e_mbd.update_mb_segmentation_data = 1; - enable_segmentation(cpi); + /* Activate segmentation. */ + cpi->mb.e_mbd.update_mb_segmentation_map = 1; + cpi->mb.e_mbd.update_mb_segmentation_data = 1; + enable_segmentation(cpi); - /* Set up the quant segment data */ - feature_data[MB_LVL_ALT_Q][0] = 0; - feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q); - feature_data[MB_LVL_ALT_Q][2] = 0; - feature_data[MB_LVL_ALT_Q][3] = 0; + /* Set up the quant segment data */ + feature_data[MB_LVL_ALT_Q][0] = 0; + feature_data[MB_LVL_ALT_Q][1] = (cpi->cyclic_refresh_q - Q); + feature_data[MB_LVL_ALT_Q][2] = 0; + feature_data[MB_LVL_ALT_Q][3] = 0; - /* Set up the loop segment data */ - feature_data[MB_LVL_ALT_LF][0] = 0; - feature_data[MB_LVL_ALT_LF][1] = lf_adjustment; - feature_data[MB_LVL_ALT_LF][2] = 0; - feature_data[MB_LVL_ALT_LF][3] = 0; - - /* Initialise the feature data structure */ - set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA); + /* Set up the loop segment data */ + feature_data[MB_LVL_ALT_LF][0] = 0; + feature_data[MB_LVL_ALT_LF][1] = lf_adjustment; + feature_data[MB_LVL_ALT_LF][2] = 0; + feature_data[MB_LVL_ALT_LF][3] = 0; + /* Initialise the feature data structure */ + set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA); } -static void set_default_lf_deltas(VP8_COMP *cpi) -{ - cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1; - cpi->mb.e_mbd.mode_ref_lf_delta_update = 1; +static void set_default_lf_deltas(VP8_COMP *cpi) { + cpi->mb.e_mbd.mode_ref_lf_delta_enabled = 1; + cpi->mb.e_mbd.mode_ref_lf_delta_update = 1; - memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); - memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); + memset(cpi->mb.e_mbd.ref_lf_deltas, 0, sizeof(cpi->mb.e_mbd.ref_lf_deltas)); + memset(cpi->mb.e_mbd.mode_lf_deltas, 0, sizeof(cpi->mb.e_mbd.mode_lf_deltas)); - /* Test of ref frame deltas */ - cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2; - cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0; - cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2; - cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2; + /* Test of ref frame deltas */ + cpi->mb.e_mbd.ref_lf_deltas[INTRA_FRAME] = 2; + cpi->mb.e_mbd.ref_lf_deltas[LAST_FRAME] = 0; + cpi->mb.e_mbd.ref_lf_deltas[GOLDEN_FRAME] = -2; + cpi->mb.e_mbd.ref_lf_deltas[ALTREF_FRAME] = -2; - cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */ + cpi->mb.e_mbd.mode_lf_deltas[0] = 4; /* BPRED */ - if(cpi->oxcf.Mode == MODE_REALTIME) - cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */ - else - cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */ + if (cpi->oxcf.Mode == MODE_REALTIME) { + cpi->mb.e_mbd.mode_lf_deltas[1] = -12; /* Zero */ + } else { + cpi->mb.e_mbd.mode_lf_deltas[1] = -2; /* Zero */ + } - cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */ - cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */ + cpi->mb.e_mbd.mode_lf_deltas[2] = 2; /* New mv */ + cpi->mb.e_mbd.mode_lf_deltas[3] = 4; /* Split mv */ } /* Convenience macros for mapping speed and mode into a continuous * range */ -#define GOOD(x) (x+1) -#define RT(x) (x+7) +#define GOOD(x) (x + 1) +#define RT(x) (x + 7) -static int speed_map(int speed, const int *map) -{ - int res; +static int speed_map(int speed, const int *map) { + int res; - do - { - res = *map++; - } while(speed >= *map++); - return res; + do { + res = *map++; + } while (speed >= *map++); + return res; } static const int thresh_mult_map_znn[] = { - /* map common to zero, nearest, and near */ - 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX + /* map common to zero, nearest, and near */ + 0, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(2), 2000, INT_MAX }; -static const int thresh_mult_map_vhpred[] = { - 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 1000, RT(1), 2000, - RT(7), INT_MAX, INT_MAX -}; +static const int thresh_mult_map_vhpred[] = { 1000, GOOD(2), 1500, GOOD(3), + 2000, RT(0), 1000, RT(1), + 2000, RT(7), INT_MAX, INT_MAX }; -static const int thresh_mult_map_bpred[] = { - 2000, GOOD(0), 2500, GOOD(2), 5000, GOOD(3), 7500, RT(0), 2500, RT(1), 5000, - RT(6), INT_MAX, INT_MAX -}; +static const int thresh_mult_map_bpred[] = { 2000, GOOD(0), 2500, GOOD(2), + 5000, GOOD(3), 7500, RT(0), + 2500, RT(1), 5000, RT(6), + INT_MAX, INT_MAX }; -static const int thresh_mult_map_tm[] = { - 1000, GOOD(2), 1500, GOOD(3), 2000, RT(0), 0, RT(1), 1000, RT(2), 2000, - RT(7), INT_MAX, INT_MAX -}; +static const int thresh_mult_map_tm[] = { 1000, GOOD(2), 1500, GOOD(3), + 2000, RT(0), 0, RT(1), + 1000, RT(2), 2000, RT(7), + INT_MAX, INT_MAX }; -static const int thresh_mult_map_new1[] = { - 1000, GOOD(2), 2000, RT(0), 2000, INT_MAX -}; +static const int thresh_mult_map_new1[] = { 1000, GOOD(2), 2000, + RT(0), 2000, INT_MAX }; -static const int thresh_mult_map_new2[] = { - 1000, GOOD(2), 2000, GOOD(3), 2500, GOOD(5), 4000, RT(0), 2000, RT(2), 2500, - RT(5), 4000, INT_MAX -}; +static const int thresh_mult_map_new2[] = { 1000, GOOD(2), 2000, GOOD(3), + 2500, GOOD(5), 4000, RT(0), + 2000, RT(2), 2500, RT(5), + 4000, INT_MAX }; static const int thresh_mult_map_split1[] = { - 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX, - RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX + 2500, GOOD(0), 1700, GOOD(2), 10000, GOOD(3), 25000, GOOD(4), INT_MAX, + RT(0), 5000, RT(1), 10000, RT(2), 25000, RT(3), INT_MAX, INT_MAX }; static const int thresh_mult_map_split2[] = { - 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX, - RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX + 5000, GOOD(0), 4500, GOOD(2), 20000, GOOD(3), 50000, GOOD(4), INT_MAX, + RT(0), 10000, RT(1), 20000, RT(2), 50000, RT(3), INT_MAX, INT_MAX }; static const int mode_check_freq_map_zn2[] = { - /* {zero,nearest}{2,3} */ - 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX + /* {zero,nearest}{2,3} */ + 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX }; static const int mode_check_freq_map_vhbpred[] = { - 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX + 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(5), 4, INT_MAX }; static const int mode_check_freq_map_near2[] = { - 0, GOOD(5), 2, RT(0), 0, RT(3), 2, RT(10), 1<<2, RT(11), 1<<3, RT(12), 1<<4, - INT_MAX + 0, GOOD(5), 2, RT(0), 0, RT(3), 2, + RT(10), 1 << 2, RT(11), 1 << 3, RT(12), 1 << 4, INT_MAX }; static const int mode_check_freq_map_new1[] = { - 0, RT(10), 1<<1, RT(11), 1<<2, RT(12), 1<<3, INT_MAX + 0, RT(10), 1 << 1, RT(11), 1 << 2, RT(12), 1 << 3, INT_MAX }; -static const int mode_check_freq_map_new2[] = { - 0, GOOD(5), 4, RT(0), 0, RT(3), 4, RT(10), 1<<3, RT(11), 1<<4, RT(12), 1<<5, - INT_MAX -}; +static const int mode_check_freq_map_new2[] = { 0, GOOD(5), 4, RT(0), + 0, RT(3), 4, RT(10), + 1 << 3, RT(11), 1 << 4, RT(12), + 1 << 5, INT_MAX }; static const int mode_check_freq_map_split1[] = { - 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX + 0, GOOD(2), 2, GOOD(3), 7, RT(1), 2, RT(2), 7, INT_MAX }; static const int mode_check_freq_map_split2[] = { - 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX + 0, GOOD(1), 2, GOOD(2), 4, GOOD(3), 15, RT(1), 4, RT(2), 15, INT_MAX }; -void vp8_set_speed_features(VP8_COMP *cpi) -{ - SPEED_FEATURES *sf = &cpi->sf; - int Mode = cpi->compressor_speed; - int Speed = cpi->Speed; - int i; - VP8_COMMON *cm = &cpi->common; - int last_improved_quant = sf->improved_quant; - int ref_frames; +void vp8_set_speed_features(VP8_COMP *cpi) { + SPEED_FEATURES *sf = &cpi->sf; + int Mode = cpi->compressor_speed; + int Speed = cpi->Speed; + int i; + VP8_COMMON *cm = &cpi->common; + int last_improved_quant = sf->improved_quant; + int ref_frames; - /* Initialise default mode frequency sampling variables */ - for (i = 0; i < MAX_MODES; i ++) - { - cpi->mode_check_freq[i] = 0; + /* Initialise default mode frequency sampling variables */ + for (i = 0; i < MAX_MODES; ++i) { + cpi->mode_check_freq[i] = 0; + } + + cpi->mb.mbs_tested_so_far = 0; + cpi->mb.mbs_zero_last_dot_suppress = 0; + + /* best quality defaults */ + sf->RD = 1; + sf->search_method = NSTEP; + sf->improved_quant = 1; + sf->improved_dct = 1; + sf->auto_filter = 1; + sf->recode_loop = 1; + sf->quarter_pixel_search = 1; + sf->half_pixel_search = 1; + sf->iterative_sub_pixel = 1; + sf->optimize_coefficients = 1; + sf->use_fastquant_for_pick = 0; + sf->no_skip_block4x4_search = 1; + + sf->first_step = 0; + sf->max_step_search_steps = MAX_MVSEARCH_STEPS; + sf->improved_mv_pred = 1; + + /* default thresholds to 0 */ + for (i = 0; i < MAX_MODES; ++i) sf->thresh_mult[i] = 0; + + /* Count enabled references */ + ref_frames = 1; + if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frames++; + if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frames++; + if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frames++; + + /* Convert speed to continuous range, with clamping */ + if (Mode == 0) { + Speed = 0; + } else if (Mode == 2) { + Speed = RT(Speed); + } else { + if (Speed > 5) Speed = 5; + Speed = GOOD(Speed); + } + + sf->thresh_mult[THR_ZERO1] = sf->thresh_mult[THR_NEAREST1] = + sf->thresh_mult[THR_NEAR1] = sf->thresh_mult[THR_DC] = 0; /* always */ + + sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO3] = + sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST3] = + sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR3] = + speed_map(Speed, thresh_mult_map_znn); + + sf->thresh_mult[THR_V_PRED] = sf->thresh_mult[THR_H_PRED] = + speed_map(Speed, thresh_mult_map_vhpred); + sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred); + sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm); + sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1); + sf->thresh_mult[THR_NEW2] = sf->thresh_mult[THR_NEW3] = + speed_map(Speed, thresh_mult_map_new2); + sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1); + sf->thresh_mult[THR_SPLIT2] = sf->thresh_mult[THR_SPLIT3] = + speed_map(Speed, thresh_mult_map_split2); + + // Special case for temporal layers. + // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is + // used as second reference. We don't modify thresholds for ALTREF case + // since ALTREF is usually used as long-term reference in temporal layers. + if ((cpi->Speed <= 6) && (cpi->oxcf.number_of_layers > 1) && + (cpi->ref_frame_flags & VP8_LAST_FRAME) && + (cpi->ref_frame_flags & VP8_GOLD_FRAME)) { + if (cpi->closest_reference_frame == GOLDEN_FRAME) { + sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3; + sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3; + sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3; + } else { + sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1; + sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1; + sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1; } + } - cpi->mb.mbs_tested_so_far = 0; - cpi->mb.mbs_zero_last_dot_suppress = 0; + cpi->mode_check_freq[THR_ZERO1] = cpi->mode_check_freq[THR_NEAREST1] = + cpi->mode_check_freq[THR_NEAR1] = cpi->mode_check_freq[THR_TM] = + cpi->mode_check_freq[THR_DC] = 0; /* always */ - /* best quality defaults */ - sf->RD = 1; - sf->search_method = NSTEP; - sf->improved_quant = 1; - sf->improved_dct = 1; - sf->auto_filter = 1; - sf->recode_loop = 1; - sf->quarter_pixel_search = 1; - sf->half_pixel_search = 1; - sf->iterative_sub_pixel = 1; - sf->optimize_coefficients = 1; - sf->use_fastquant_for_pick = 0; - sf->no_skip_block4x4_search = 1; + cpi->mode_check_freq[THR_ZERO2] = cpi->mode_check_freq[THR_ZERO3] = + cpi->mode_check_freq[THR_NEAREST2] = cpi->mode_check_freq[THR_NEAREST3] = + speed_map(Speed, mode_check_freq_map_zn2); - sf->first_step = 0; - sf->max_step_search_steps = MAX_MVSEARCH_STEPS; - sf->improved_mv_pred = 1; + cpi->mode_check_freq[THR_NEAR2] = cpi->mode_check_freq[THR_NEAR3] = + speed_map(Speed, mode_check_freq_map_near2); - /* default thresholds to 0 */ - for (i = 0; i < MAX_MODES; i++) - sf->thresh_mult[i] = 0; - - /* Count enabled references */ - ref_frames = 1; - if (cpi->ref_frame_flags & VP8_LAST_FRAME) - ref_frames++; - if (cpi->ref_frame_flags & VP8_GOLD_FRAME) - ref_frames++; - if (cpi->ref_frame_flags & VP8_ALTR_FRAME) - ref_frames++; - - /* Convert speed to continuous range, with clamping */ - if (Mode == 0) - Speed = 0; - else if (Mode == 2) - Speed = RT(Speed); - else - { - if (Speed > 5) - Speed = 5; - Speed = GOOD(Speed); - } - - sf->thresh_mult[THR_ZERO1] = - sf->thresh_mult[THR_NEAREST1] = - sf->thresh_mult[THR_NEAR1] = - sf->thresh_mult[THR_DC] = 0; /* always */ - - sf->thresh_mult[THR_ZERO2] = - sf->thresh_mult[THR_ZERO3] = - sf->thresh_mult[THR_NEAREST2] = - sf->thresh_mult[THR_NEAREST3] = - sf->thresh_mult[THR_NEAR2] = - sf->thresh_mult[THR_NEAR3] = speed_map(Speed, thresh_mult_map_znn); - - sf->thresh_mult[THR_V_PRED] = - sf->thresh_mult[THR_H_PRED] = speed_map(Speed, thresh_mult_map_vhpred); - sf->thresh_mult[THR_B_PRED] = speed_map(Speed, thresh_mult_map_bpred); - sf->thresh_mult[THR_TM] = speed_map(Speed, thresh_mult_map_tm); - sf->thresh_mult[THR_NEW1] = speed_map(Speed, thresh_mult_map_new1); - sf->thresh_mult[THR_NEW2] = - sf->thresh_mult[THR_NEW3] = speed_map(Speed, thresh_mult_map_new2); - sf->thresh_mult[THR_SPLIT1] = speed_map(Speed, thresh_mult_map_split1); - sf->thresh_mult[THR_SPLIT2] = - sf->thresh_mult[THR_SPLIT3] = speed_map(Speed, thresh_mult_map_split2); - - // Special case for temporal layers. - // Reduce the thresholds for zero/nearest/near for GOLDEN, if GOLDEN is - // used as second reference. We don't modify thresholds for ALTREF case - // since ALTREF is usually used as long-term reference in temporal layers. - if ((cpi->Speed <= 6) && - (cpi->oxcf.number_of_layers > 1) && - (cpi->ref_frame_flags & VP8_LAST_FRAME) && - (cpi->ref_frame_flags & VP8_GOLD_FRAME)) { - if (cpi->closest_reference_frame == GOLDEN_FRAME) { - sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 3; - sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 3; - sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 3; - } else { - sf->thresh_mult[THR_ZERO2] = sf->thresh_mult[THR_ZERO2] >> 1; - sf->thresh_mult[THR_NEAREST2] = sf->thresh_mult[THR_NEAREST2] >> 1; - sf->thresh_mult[THR_NEAR2] = sf->thresh_mult[THR_NEAR2] >> 1; - } - } - - cpi->mode_check_freq[THR_ZERO1] = - cpi->mode_check_freq[THR_NEAREST1] = - cpi->mode_check_freq[THR_NEAR1] = - cpi->mode_check_freq[THR_TM] = - cpi->mode_check_freq[THR_DC] = 0; /* always */ - - cpi->mode_check_freq[THR_ZERO2] = - cpi->mode_check_freq[THR_ZERO3] = - cpi->mode_check_freq[THR_NEAREST2] = - cpi->mode_check_freq[THR_NEAREST3] = speed_map(Speed, - mode_check_freq_map_zn2); - - cpi->mode_check_freq[THR_NEAR2] = - cpi->mode_check_freq[THR_NEAR3] = speed_map(Speed, - mode_check_freq_map_near2); - - cpi->mode_check_freq[THR_V_PRED] = - cpi->mode_check_freq[THR_H_PRED] = - cpi->mode_check_freq[THR_B_PRED] = speed_map(Speed, - mode_check_freq_map_vhbpred); - cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, - mode_check_freq_map_new1); - cpi->mode_check_freq[THR_NEW2] = - cpi->mode_check_freq[THR_NEW3] = speed_map(Speed, - mode_check_freq_map_new2); - cpi->mode_check_freq[THR_SPLIT1] = speed_map(Speed, - mode_check_freq_map_split1); - cpi->mode_check_freq[THR_SPLIT2] = - cpi->mode_check_freq[THR_SPLIT3] = speed_map(Speed, - mode_check_freq_map_split2); - Speed = cpi->Speed; - switch (Mode) - { + cpi->mode_check_freq[THR_V_PRED] = cpi->mode_check_freq[THR_H_PRED] = + cpi->mode_check_freq[THR_B_PRED] = + speed_map(Speed, mode_check_freq_map_vhbpred); + cpi->mode_check_freq[THR_NEW1] = speed_map(Speed, mode_check_freq_map_new1); + cpi->mode_check_freq[THR_NEW2] = cpi->mode_check_freq[THR_NEW3] = + speed_map(Speed, mode_check_freq_map_new2); + cpi->mode_check_freq[THR_SPLIT1] = + speed_map(Speed, mode_check_freq_map_split1); + cpi->mode_check_freq[THR_SPLIT2] = cpi->mode_check_freq[THR_SPLIT3] = + speed_map(Speed, mode_check_freq_map_split2); + Speed = cpi->Speed; + switch (Mode) { #if !CONFIG_REALTIME_ONLY case 0: /* best quality mode */ - sf->first_step = 0; - sf->max_step_search_steps = MAX_MVSEARCH_STEPS; - break; + sf->first_step = 0; + sf->max_step_search_steps = MAX_MVSEARCH_STEPS; + break; case 1: case 3: - if (Speed > 0) - { - /* Disable coefficient optimization above speed 0 */ - sf->optimize_coefficients = 0; - sf->use_fastquant_for_pick = 1; - sf->no_skip_block4x4_search = 0; + if (Speed > 0) { + /* Disable coefficient optimization above speed 0 */ + sf->optimize_coefficients = 0; + sf->use_fastquant_for_pick = 1; + sf->no_skip_block4x4_search = 0; - sf->first_step = 1; - } + sf->first_step = 1; + } - if (Speed > 2) - { - sf->improved_quant = 0; - sf->improved_dct = 0; + if (Speed > 2) { + sf->improved_quant = 0; + sf->improved_dct = 0; - /* Only do recode loop on key frames, golden frames and - * alt ref frames - */ - sf->recode_loop = 2; + /* Only do recode loop on key frames, golden frames and + * alt ref frames + */ + sf->recode_loop = 2; + } - } + if (Speed > 3) { + sf->auto_filter = 1; + sf->recode_loop = 0; /* recode loop off */ + sf->RD = 0; /* Turn rd off */ + } - if (Speed > 3) - { - sf->auto_filter = 1; - sf->recode_loop = 0; /* recode loop off */ - sf->RD = 0; /* Turn rd off */ + if (Speed > 4) { + sf->auto_filter = 0; /* Faster selection of loop filter */ + } - } - - if (Speed > 4) - { - sf->auto_filter = 0; /* Faster selection of loop filter */ - } - - break; + break; #endif case 2: - sf->optimize_coefficients = 0; - sf->recode_loop = 0; - sf->auto_filter = 1; - sf->iterative_sub_pixel = 1; - sf->search_method = NSTEP; + sf->optimize_coefficients = 0; + sf->recode_loop = 0; + sf->auto_filter = 1; + sf->iterative_sub_pixel = 1; + sf->search_method = NSTEP; - if (Speed > 0) - { - sf->improved_quant = 0; - sf->improved_dct = 0; - - sf->use_fastquant_for_pick = 1; - sf->no_skip_block4x4_search = 0; - sf->first_step = 1; - } - - if (Speed > 2) - sf->auto_filter = 0; /* Faster selection of loop filter */ - - if (Speed > 3) - { - sf->RD = 0; - sf->auto_filter = 1; - } - - if (Speed > 4) - { - sf->auto_filter = 0; /* Faster selection of loop filter */ - sf->search_method = HEX; - sf->iterative_sub_pixel = 0; - } - - if (Speed > 6) - { - unsigned int sum = 0; - unsigned int total_mbs = cm->MBs; - int thresh; - unsigned int total_skip; - - int min = 2000; - - if (cpi->oxcf.encode_breakout > 2000) - min = cpi->oxcf.encode_breakout; - - min >>= 7; - - for (i = 0; i < min; i++) - { - sum += cpi->mb.error_bins[i]; - } - - total_skip = sum; - sum = 0; - - /* i starts from 2 to make sure thresh started from 2048 */ - for (; i < 1024; i++) - { - sum += cpi->mb.error_bins[i]; - - if (10 * sum >= (unsigned int)(cpi->Speed - 6)*(total_mbs - total_skip)) - break; - } - - i--; - thresh = (i << 7); - - if (thresh < 2000) - thresh = 2000; - - if (ref_frames > 1) - { - sf->thresh_mult[THR_NEW1 ] = thresh; - sf->thresh_mult[THR_NEAREST1 ] = thresh >> 1; - sf->thresh_mult[THR_NEAR1 ] = thresh >> 1; - } - - if (ref_frames > 2) - { - sf->thresh_mult[THR_NEW2] = thresh << 1; - sf->thresh_mult[THR_NEAREST2 ] = thresh; - sf->thresh_mult[THR_NEAR2 ] = thresh; - } - - if (ref_frames > 3) - { - sf->thresh_mult[THR_NEW3] = thresh << 1; - sf->thresh_mult[THR_NEAREST3 ] = thresh; - sf->thresh_mult[THR_NEAR3 ] = thresh; - } - - sf->improved_mv_pred = 0; - } - - if (Speed > 8) - sf->quarter_pixel_search = 0; - - if(cm->version == 0) - { - cm->filter_type = NORMAL_LOOPFILTER; - - if (Speed >= 14) - cm->filter_type = SIMPLE_LOOPFILTER; - } - else - { - cm->filter_type = SIMPLE_LOOPFILTER; - } - - /* This has a big hit on quality. Last resort */ - if (Speed >= 15) - sf->half_pixel_search = 0; - - memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins)); - - }; /* switch */ - - /* Slow quant, dct and trellis not worthwhile for first pass - * so make sure they are always turned off. - */ - if ( cpi->pass == 1 ) - { + if (Speed > 0) { sf->improved_quant = 0; - sf->optimize_coefficients = 0; sf->improved_dct = 0; - } - if (cpi->sf.search_method == NSTEP) - { - vp8_init3smotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); - } - else if (cpi->sf.search_method == DIAMOND) - { - vp8_init_dsmotion_compensation(&cpi->mb, cm->yv12_fb[cm->lst_fb_idx].y_stride); - } + sf->use_fastquant_for_pick = 1; + sf->no_skip_block4x4_search = 0; + sf->first_step = 1; + } - if (cpi->sf.improved_dct) - { - cpi->mb.short_fdct8x4 = vp8_short_fdct8x4; - cpi->mb.short_fdct4x4 = vp8_short_fdct4x4; - } - else - { - /* No fast FDCT defined for any platform at this time. */ - cpi->mb.short_fdct8x4 = vp8_short_fdct8x4; - cpi->mb.short_fdct4x4 = vp8_short_fdct4x4; - } + if (Speed > 2) sf->auto_filter = 0; /* Faster selection of loop filter */ - cpi->mb.short_walsh4x4 = vp8_short_walsh4x4; + if (Speed > 3) { + sf->RD = 0; + sf->auto_filter = 1; + } - if (cpi->sf.improved_quant) - { - cpi->mb.quantize_b = vp8_regular_quantize_b; - } - else - { - cpi->mb.quantize_b = vp8_fast_quantize_b; - } - if (cpi->sf.improved_quant != last_improved_quant) - vp8cx_init_quantizer(cpi); + if (Speed > 4) { + sf->auto_filter = 0; /* Faster selection of loop filter */ + sf->search_method = HEX; + sf->iterative_sub_pixel = 0; + } - if (cpi->sf.iterative_sub_pixel == 1) - { - cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively; - } - else if (cpi->sf.quarter_pixel_search) - { - cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step; - } - else if (cpi->sf.half_pixel_search) - { - cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step; - } - else - { - cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; - } + if (Speed > 6) { + unsigned int sum = 0; + unsigned int total_mbs = cm->MBs; + int thresh; + unsigned int total_skip; - if (cpi->sf.optimize_coefficients == 1 && cpi->pass!=1) - cpi->mb.optimize = 1; - else - cpi->mb.optimize = 0; + int min = 2000; - if (cpi->common.full_pixel) - cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; + if (cpi->oxcf.encode_breakout > 2000) min = cpi->oxcf.encode_breakout; + + min >>= 7; + + for (i = 0; i < min; ++i) { + sum += cpi->mb.error_bins[i]; + } + + total_skip = sum; + sum = 0; + + /* i starts from 2 to make sure thresh started from 2048 */ + for (; i < 1024; ++i) { + sum += cpi->mb.error_bins[i]; + + if (10 * sum >= + (unsigned int)(cpi->Speed - 6) * (total_mbs - total_skip)) { + break; + } + } + + i--; + thresh = (i << 7); + + if (thresh < 2000) thresh = 2000; + + if (ref_frames > 1) { + sf->thresh_mult[THR_NEW1] = thresh; + sf->thresh_mult[THR_NEAREST1] = thresh >> 1; + sf->thresh_mult[THR_NEAR1] = thresh >> 1; + } + + if (ref_frames > 2) { + sf->thresh_mult[THR_NEW2] = thresh << 1; + sf->thresh_mult[THR_NEAREST2] = thresh; + sf->thresh_mult[THR_NEAR2] = thresh; + } + + if (ref_frames > 3) { + sf->thresh_mult[THR_NEW3] = thresh << 1; + sf->thresh_mult[THR_NEAREST3] = thresh; + sf->thresh_mult[THR_NEAR3] = thresh; + } + + sf->improved_mv_pred = 0; + } + + if (Speed > 8) sf->quarter_pixel_search = 0; + + if (cm->version == 0) { + cm->filter_type = NORMAL_LOOPFILTER; + + if (Speed >= 14) cm->filter_type = SIMPLE_LOOPFILTER; + } else { + cm->filter_type = SIMPLE_LOOPFILTER; + } + + /* This has a big hit on quality. Last resort */ + if (Speed >= 15) sf->half_pixel_search = 0; + + memset(cpi->mb.error_bins, 0, sizeof(cpi->mb.error_bins)); + + }; /* switch */ + + /* Slow quant, dct and trellis not worthwhile for first pass + * so make sure they are always turned off. + */ + if (cpi->pass == 1) { + sf->improved_quant = 0; + sf->optimize_coefficients = 0; + sf->improved_dct = 0; + } + + if (cpi->sf.search_method == NSTEP) { + vp8_init3smotion_compensation(&cpi->mb, + cm->yv12_fb[cm->lst_fb_idx].y_stride); + } else if (cpi->sf.search_method == DIAMOND) { + vp8_init_dsmotion_compensation(&cpi->mb, + cm->yv12_fb[cm->lst_fb_idx].y_stride); + } + + if (cpi->sf.improved_dct) { + cpi->mb.short_fdct8x4 = vp8_short_fdct8x4; + cpi->mb.short_fdct4x4 = vp8_short_fdct4x4; + } else { + /* No fast FDCT defined for any platform at this time. */ + cpi->mb.short_fdct8x4 = vp8_short_fdct8x4; + cpi->mb.short_fdct4x4 = vp8_short_fdct4x4; + } + + cpi->mb.short_walsh4x4 = vp8_short_walsh4x4; + + if (cpi->sf.improved_quant) { + cpi->mb.quantize_b = vp8_regular_quantize_b; + } else { + cpi->mb.quantize_b = vp8_fast_quantize_b; + } + if (cpi->sf.improved_quant != last_improved_quant) vp8cx_init_quantizer(cpi); + + if (cpi->sf.iterative_sub_pixel == 1) { + cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step_iteratively; + } else if (cpi->sf.quarter_pixel_search) { + cpi->find_fractional_mv_step = vp8_find_best_sub_pixel_step; + } else if (cpi->sf.half_pixel_search) { + cpi->find_fractional_mv_step = vp8_find_best_half_pixel_step; + } else { + cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; + } + + if (cpi->sf.optimize_coefficients == 1 && cpi->pass != 1) { + cpi->mb.optimize = 1; + } else { + cpi->mb.optimize = 0; + } + + if (cpi->common.full_pixel) { + cpi->find_fractional_mv_step = vp8_skip_fractional_mv_step; + } #ifdef SPEEDSTATS - frames_at_speed[cpi->Speed]++; + frames_at_speed[cpi->Speed]++; #endif } #undef GOOD #undef RT -static void alloc_raw_frame_buffers(VP8_COMP *cpi) -{ +static void alloc_raw_frame_buffers(VP8_COMP *cpi) { #if VP8_TEMPORAL_ALT_REF - int width = (cpi->oxcf.Width + 15) & ~15; - int height = (cpi->oxcf.Height + 15) & ~15; + int width = (cpi->oxcf.Width + 15) & ~15; + int height = (cpi->oxcf.Height + 15) & ~15; #endif - cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height, - cpi->oxcf.lag_in_frames); - if(!cpi->lookahead) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate lag buffers"); + cpi->lookahead = vp8_lookahead_init(cpi->oxcf.Width, cpi->oxcf.Height, + cpi->oxcf.lag_in_frames); + if (!cpi->lookahead) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate lag buffers"); + } #if VP8_TEMPORAL_ALT_REF - if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, - width, height, VP8BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate altref buffer"); + if (vp8_yv12_alloc_frame_buffer(&cpi->alt_ref_buffer, width, height, + VP8BORDERINPIXELS)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate altref buffer"); + } #endif } - -static void dealloc_raw_frame_buffers(VP8_COMP *cpi) -{ +static void dealloc_raw_frame_buffers(VP8_COMP *cpi) { #if VP8_TEMPORAL_ALT_REF - vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer); + vp8_yv12_de_alloc_frame_buffer(&cpi->alt_ref_buffer); #endif - vp8_lookahead_destroy(cpi->lookahead); + vp8_lookahead_destroy(cpi->lookahead); } +static int vp8_alloc_partition_data(VP8_COMP *cpi) { + vpx_free(cpi->mb.pip); -static int vp8_alloc_partition_data(VP8_COMP *cpi) -{ - vpx_free(cpi->mb.pip); + cpi->mb.pip = + vpx_calloc((cpi->common.mb_cols + 1) * (cpi->common.mb_rows + 1), + sizeof(PARTITION_INFO)); + if (!cpi->mb.pip) return 1; - cpi->mb.pip = vpx_calloc((cpi->common.mb_cols + 1) * - (cpi->common.mb_rows + 1), - sizeof(PARTITION_INFO)); - if(!cpi->mb.pip) - return 1; + cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1; - cpi->mb.pi = cpi->mb.pip + cpi->common.mode_info_stride + 1; - - return 0; + return 0; } -void vp8_alloc_compressor_data(VP8_COMP *cpi) -{ - VP8_COMMON *cm = & cpi->common; +void vp8_alloc_compressor_data(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - int width = cm->Width; - int height = cm->Height; -#if CONFIG_MULTITHREAD - int prev_mb_rows = cm->mb_rows; -#endif + int width = cm->Width; + int height = cm->Height; - if (vp8_alloc_frame_buffers(cm, width, height)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate frame buffers"); + if (vp8_alloc_frame_buffers(cm, width, height)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffers"); + } - if (vp8_alloc_partition_data(cpi)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate partition data"); + if (vp8_alloc_partition_data(cpi)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate partition data"); + } + if ((width & 0xf) != 0) width += 16 - (width & 0xf); - if ((width & 0xf) != 0) - width += 16 - (width & 0xf); + if ((height & 0xf) != 0) height += 16 - (height & 0xf); - if ((height & 0xf) != 0) - height += 16 - (height & 0xf); + if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, width, height, + VP8BORDERINPIXELS)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate last frame buffer"); + } + if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, width, height, + VP8BORDERINPIXELS)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate scaled source buffer"); + } - if (vp8_yv12_alloc_frame_buffer(&cpi->pick_lf_lvl_frame, - width, height, VP8BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate last frame buffer"); + vpx_free(cpi->tok); - if (vp8_yv12_alloc_frame_buffer(&cpi->scaled_source, - width, height, VP8BORDERINPIXELS)) - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, - "Failed to allocate scaled source buffer"); - - vpx_free(cpi->tok); - - { + { #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */ + unsigned int tokens = 8 * 24 * 16; /* one MB for each thread */ #else - unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16; + unsigned int tokens = cm->mb_rows * cm->mb_cols * 24 * 16; #endif - CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok))); - } + CHECK_MEM_ERROR(cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok))); + } - /* Data used for real time vc mode to see if gf needs refreshing */ - cpi->zeromv_count = 0; + /* Data used for real time vc mode to see if gf needs refreshing */ + cpi->zeromv_count = 0; + /* Structures used to monitor GF usage */ + vpx_free(cpi->gf_active_flags); + CHECK_MEM_ERROR( + cpi->gf_active_flags, + vpx_calloc(sizeof(*cpi->gf_active_flags), cm->mb_rows * cm->mb_cols)); + cpi->gf_active_count = cm->mb_rows * cm->mb_cols; - /* Structures used to monitor GF usage */ - vpx_free(cpi->gf_active_flags); - CHECK_MEM_ERROR(cpi->gf_active_flags, - vpx_calloc(sizeof(*cpi->gf_active_flags), - cm->mb_rows * cm->mb_cols)); - cpi->gf_active_count = cm->mb_rows * cm->mb_cols; + vpx_free(cpi->mb_activity_map); + CHECK_MEM_ERROR( + cpi->mb_activity_map, + vpx_calloc(sizeof(*cpi->mb_activity_map), cm->mb_rows * cm->mb_cols)); - vpx_free(cpi->mb_activity_map); - CHECK_MEM_ERROR(cpi->mb_activity_map, - vpx_calloc(sizeof(*cpi->mb_activity_map), - cm->mb_rows * cm->mb_cols)); + /* allocate memory for storing last frame's MVs for MV prediction. */ + vpx_free(cpi->lfmv); + CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), + sizeof(*cpi->lfmv))); + vpx_free(cpi->lf_ref_frame_sign_bias); + CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, + vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), + sizeof(*cpi->lf_ref_frame_sign_bias))); + vpx_free(cpi->lf_ref_frame); + CHECK_MEM_ERROR(cpi->lf_ref_frame, + vpx_calloc((cm->mb_rows + 2) * (cm->mb_cols + 2), + sizeof(*cpi->lf_ref_frame))); - /* allocate memory for storing last frame's MVs for MV prediction. */ - vpx_free(cpi->lfmv); - CHECK_MEM_ERROR(cpi->lfmv, vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2), - sizeof(*cpi->lfmv))); - vpx_free(cpi->lf_ref_frame_sign_bias); - CHECK_MEM_ERROR(cpi->lf_ref_frame_sign_bias, - vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2), - sizeof(*cpi->lf_ref_frame_sign_bias))); - vpx_free(cpi->lf_ref_frame); - CHECK_MEM_ERROR(cpi->lf_ref_frame, - vpx_calloc((cm->mb_rows+2) * (cm->mb_cols+2), - sizeof(*cpi->lf_ref_frame))); - - /* Create the encoder segmentation map and set all entries to 0 */ - vpx_free(cpi->segmentation_map); - CHECK_MEM_ERROR(cpi->segmentation_map, - vpx_calloc(cm->mb_rows * cm->mb_cols, - sizeof(*cpi->segmentation_map))); - cpi->cyclic_refresh_mode_index = 0; - vpx_free(cpi->active_map); - CHECK_MEM_ERROR(cpi->active_map, - vpx_calloc(cm->mb_rows * cm->mb_cols, - sizeof(*cpi->active_map))); - memset(cpi->active_map , 1, (cm->mb_rows * cm->mb_cols)); + /* Create the encoder segmentation map and set all entries to 0 */ + vpx_free(cpi->segmentation_map); + CHECK_MEM_ERROR( + cpi->segmentation_map, + vpx_calloc(cm->mb_rows * cm->mb_cols, sizeof(*cpi->segmentation_map))); + cpi->cyclic_refresh_mode_index = 0; + vpx_free(cpi->active_map); + CHECK_MEM_ERROR(cpi->active_map, vpx_calloc(cm->mb_rows * cm->mb_cols, + sizeof(*cpi->active_map))); + memset(cpi->active_map, 1, (cm->mb_rows * cm->mb_cols)); #if CONFIG_MULTITHREAD - if (width < 640) - cpi->mt_sync_range = 1; - else if (width <= 1280) - cpi->mt_sync_range = 4; - else if (width <= 2560) - cpi->mt_sync_range = 8; - else - cpi->mt_sync_range = 16; + if (width < 640) { + cpi->mt_sync_range = 1; + } else if (width <= 1280) { + cpi->mt_sync_range = 4; + } else if (width <= 2560) { + cpi->mt_sync_range = 8; + } else { + cpi->mt_sync_range = 16; + } - if (cpi->oxcf.multi_threaded > 1) - { - int i; - - /* De-allocate and re-allocate mutex */ - if (cpi->pmutex != NULL) { - for (i = 0; i < prev_mb_rows; i++) { - pthread_mutex_destroy(&cpi->pmutex[i]); - } - vpx_free(cpi->pmutex); - cpi->pmutex = NULL; - } - - CHECK_MEM_ERROR(cpi->pmutex, vpx_malloc(sizeof(*cpi->pmutex) * - cm->mb_rows)); - if (cpi->pmutex) { - for (i = 0; i < cm->mb_rows; i++) { - pthread_mutex_init(&cpi->pmutex[i], NULL); - } - } - - vpx_free(cpi->mt_current_mb_col); - CHECK_MEM_ERROR(cpi->mt_current_mb_col, + if (cpi->oxcf.multi_threaded > 1) { + vpx_free(cpi->mt_current_mb_col); + CHECK_MEM_ERROR(cpi->mt_current_mb_col, vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows)); - } + } #endif - vpx_free(cpi->tplist); - CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows)); + vpx_free(cpi->tplist); + CHECK_MEM_ERROR(cpi->tplist, vpx_malloc(sizeof(TOKENLIST) * cm->mb_rows)); #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { - vp8_denoiser_free(&cpi->denoiser); - vp8_denoiser_allocate(&cpi->denoiser, width, height, - cm->mb_rows, cm->mb_cols, - cpi->oxcf.noise_sensitivity); + if (cpi->oxcf.noise_sensitivity > 0) { + vp8_denoiser_free(&cpi->denoiser); + if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows, + cm->mb_cols, cpi->oxcf.noise_sensitivity)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate denoiser"); } + } #endif } - /* Quant MOD */ -static const int q_trans[] = -{ - 0, 1, 2, 3, 4, 5, 7, 8, - 9, 10, 12, 13, 15, 17, 18, 19, - 20, 21, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 33, 35, 37, 39, 41, - 43, 45, 47, 49, 51, 53, 55, 57, - 59, 61, 64, 67, 70, 73, 76, 79, - 82, 85, 88, 91, 94, 97, 100, 103, - 106, 109, 112, 115, 118, 121, 124, 127, +static const int q_trans[] = { + 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 15, 17, 18, 19, + 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33, 35, 37, 39, 41, + 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 64, 67, 70, 73, 76, 79, + 82, 85, 88, 91, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, }; -int vp8_reverse_trans(int x) -{ - int i; +int vp8_reverse_trans(int x) { + int i; - for (i = 0; i < 64; i++) - if (q_trans[i] >= x) - return i; + for (i = 0; i < 64; ++i) { + if (q_trans[i] >= x) return i; + } - return 63; + return 63; } -void vp8_new_framerate(VP8_COMP *cpi, double framerate) -{ - if(framerate < .1) - framerate = 30; +void vp8_new_framerate(VP8_COMP *cpi, double framerate) { + if (framerate < .1) framerate = 30; - cpi->framerate = framerate; - cpi->output_framerate = framerate; - cpi->per_frame_bandwidth = (int)(cpi->oxcf.target_bandwidth / - cpi->output_framerate); - cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth; - cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * - cpi->oxcf.two_pass_vbrmin_section / 100); + cpi->framerate = framerate; + cpi->output_framerate = framerate; + cpi->per_frame_bandwidth = + (int)(cpi->oxcf.target_bandwidth / cpi->output_framerate); + cpi->av_per_frame_bandwidth = cpi->per_frame_bandwidth; + cpi->min_frame_bandwidth = (int)(cpi->av_per_frame_bandwidth * + cpi->oxcf.two_pass_vbrmin_section / 100); - /* Set Maximum gf/arf interval */ - cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2); + /* Set Maximum gf/arf interval */ + cpi->max_gf_interval = ((int)(cpi->output_framerate / 2.0) + 2); - if(cpi->max_gf_interval < 12) - cpi->max_gf_interval = 12; + if (cpi->max_gf_interval < 12) cpi->max_gf_interval = 12; - /* Extended interval for genuinely static scenes */ - cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1; + /* Extended interval for genuinely static scenes */ + cpi->twopass.static_scene_max_gf_interval = cpi->key_frame_frequency >> 1; - /* Special conditions when altr ref frame enabled in lagged compress mode */ - if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) - { - if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) - cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1; - - if (cpi->twopass.static_scene_max_gf_interval > cpi->oxcf.lag_in_frames - 1) - cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1; + /* Special conditions when altr ref frame enabled in lagged compress mode */ + if (cpi->oxcf.play_alternate && cpi->oxcf.lag_in_frames) { + if (cpi->max_gf_interval > cpi->oxcf.lag_in_frames - 1) { + cpi->max_gf_interval = cpi->oxcf.lag_in_frames - 1; } - if ( cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval ) - cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval; + if (cpi->twopass.static_scene_max_gf_interval > + cpi->oxcf.lag_in_frames - 1) { + cpi->twopass.static_scene_max_gf_interval = cpi->oxcf.lag_in_frames - 1; + } + } + + if (cpi->max_gf_interval > cpi->twopass.static_scene_max_gf_interval) { + cpi->max_gf_interval = cpi->twopass.static_scene_max_gf_interval; + } } +static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) { + VP8_COMMON *cm = &cpi->common; -static void init_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) -{ - VP8_COMMON *cm = &cpi->common; + cpi->oxcf = *oxcf; - cpi->oxcf = *oxcf; + cpi->auto_gold = 1; + cpi->auto_adjust_gold_quantizer = 1; - cpi->auto_gold = 1; - cpi->auto_adjust_gold_quantizer = 1; + cm->version = oxcf->Version; + vp8_setup_version(cm); - cm->version = oxcf->Version; - vp8_setup_version(cm); + /* Frame rate is not available on the first frame, as it's derived from + * the observed timestamps. The actual value used here doesn't matter + * too much, as it will adapt quickly. + */ + if (oxcf->timebase.num > 0) { + cpi->framerate = + (double)(oxcf->timebase.den) / (double)(oxcf->timebase.num); + } else { + cpi->framerate = 30; + } - /* Frame rate is not available on the first frame, as it's derived from - * the observed timestamps. The actual value used here doesn't matter - * too much, as it will adapt quickly. - */ - if (oxcf->timebase.num > 0) { - cpi->framerate = (double)(oxcf->timebase.den) / - (double)(oxcf->timebase.num); - } else { - cpi->framerate = 30; - } - - /* If the reciprocal of the timebase seems like a reasonable framerate, - * then use that as a guess, otherwise use 30. - */ - if (cpi->framerate > 180) - cpi->framerate = 30; - - cpi->ref_framerate = cpi->framerate; - - cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME; - - cm->refresh_golden_frame = 0; - cm->refresh_last_frame = 1; - cm->refresh_entropy_probs = 1; - - /* change includes all joint functionality */ - vp8_change_config(cpi, oxcf); - - /* Initialize active best and worst q and average q values. */ - cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; - cpi->active_best_quality = cpi->oxcf.best_allowed_q; - cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; - - /* Initialise the starting buffer levels */ - cpi->buffer_level = cpi->oxcf.starting_buffer_level; - cpi->bits_off_target = cpi->oxcf.starting_buffer_level; - - cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; - cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; - cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; - cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; - - cpi->total_actual_bits = 0; - cpi->total_target_vs_actual = 0; - - /* Temporal scalabilty */ - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; - double prev_layer_framerate=0; - - for (i=0; ioxcf.number_of_layers; i++) - { - init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate); - prev_layer_framerate = cpi->output_framerate / - cpi->oxcf.rate_decimator[i]; - } + /* If the reciprocal of the timebase seems like a reasonable framerate, + * then use that as a guess, otherwise use 30. + */ + if (cpi->framerate > 180) cpi->framerate = 30; + + cpi->ref_framerate = cpi->framerate; + + cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME; + + cm->refresh_golden_frame = 0; + cm->refresh_last_frame = 1; + cm->refresh_entropy_probs = 1; + + /* change includes all joint functionality */ + vp8_change_config(cpi, oxcf); + + /* Initialize active best and worst q and average q values. */ + cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; + cpi->active_best_quality = cpi->oxcf.best_allowed_q; + cpi->avg_frame_qindex = cpi->oxcf.worst_allowed_q; + + /* Initialise the starting buffer levels */ + cpi->buffer_level = cpi->oxcf.starting_buffer_level; + cpi->bits_off_target = cpi->oxcf.starting_buffer_level; + + cpi->rolling_target_bits = cpi->av_per_frame_bandwidth; + cpi->rolling_actual_bits = cpi->av_per_frame_bandwidth; + cpi->long_rolling_target_bits = cpi->av_per_frame_bandwidth; + cpi->long_rolling_actual_bits = cpi->av_per_frame_bandwidth; + + cpi->total_actual_bits = 0; + cpi->total_target_vs_actual = 0; + + /* Temporal scalabilty */ + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; + double prev_layer_framerate = 0; + + for (i = 0; i < cpi->oxcf.number_of_layers; ++i) { + init_temporal_layer_context(cpi, oxcf, i, prev_layer_framerate); + prev_layer_framerate = + cpi->output_framerate / cpi->oxcf.rate_decimator[i]; } + } #if VP8_TEMPORAL_ALT_REF - { - int i; + { + int i; - cpi->fixed_divide[0] = 0; + cpi->fixed_divide[0] = 0; - for (i = 1; i < 512; i++) - cpi->fixed_divide[i] = 0x80000 / i; - } + for (i = 1; i < 512; ++i) cpi->fixed_divide[i] = 0x80000 / i; + } #endif } -static void update_layer_contexts (VP8_COMP *cpi) -{ - VP8_CONFIG *oxcf = &cpi->oxcf; +static void update_layer_contexts(VP8_COMP *cpi) { + VP8_CONFIG *oxcf = &cpi->oxcf; - /* Update snapshots of the layer contexts to reflect new parameters */ - if (oxcf->number_of_layers > 1) - { - unsigned int i; - double prev_layer_framerate=0; + /* Update snapshots of the layer contexts to reflect new parameters */ + if (oxcf->number_of_layers > 1) { + unsigned int i; + double prev_layer_framerate = 0; - assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS); - for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) - { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; + assert(oxcf->number_of_layers <= VPX_TS_MAX_LAYERS); + for (i = 0; i < oxcf->number_of_layers && i < VPX_TS_MAX_LAYERS; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; - lc->framerate = - cpi->ref_framerate / oxcf->rate_decimator[i]; - lc->target_bandwidth = oxcf->target_bitrate[i] * 1000; + lc->framerate = cpi->ref_framerate / oxcf->rate_decimator[i]; + lc->target_bandwidth = oxcf->target_bitrate[i] * 1000; - lc->starting_buffer_level = rescale( - (int)oxcf->starting_buffer_level_in_ms, - lc->target_bandwidth, 1000); + lc->starting_buffer_level = rescale( + (int)oxcf->starting_buffer_level_in_ms, lc->target_bandwidth, 1000); - if (oxcf->optimal_buffer_level == 0) - lc->optimal_buffer_level = lc->target_bandwidth / 8; - else - lc->optimal_buffer_level = rescale( - (int)oxcf->optimal_buffer_level_in_ms, - lc->target_bandwidth, 1000); + if (oxcf->optimal_buffer_level == 0) { + lc->optimal_buffer_level = lc->target_bandwidth / 8; + } else { + lc->optimal_buffer_level = rescale( + (int)oxcf->optimal_buffer_level_in_ms, lc->target_bandwidth, 1000); + } - if (oxcf->maximum_buffer_size == 0) - lc->maximum_buffer_size = lc->target_bandwidth / 8; - else - lc->maximum_buffer_size = rescale( - (int)oxcf->maximum_buffer_size_in_ms, - lc->target_bandwidth, 1000); + if (oxcf->maximum_buffer_size == 0) { + lc->maximum_buffer_size = lc->target_bandwidth / 8; + } else { + lc->maximum_buffer_size = rescale((int)oxcf->maximum_buffer_size_in_ms, + lc->target_bandwidth, 1000); + } - /* Work out the average size of a frame within this layer */ - if (i > 0) - lc->avg_frame_size_for_layer = - (int)((oxcf->target_bitrate[i] - - oxcf->target_bitrate[i-1]) * 1000 / - (lc->framerate - prev_layer_framerate)); + /* Work out the average size of a frame within this layer */ + if (i > 0) { + lc->avg_frame_size_for_layer = + (int)((oxcf->target_bitrate[i] - oxcf->target_bitrate[i - 1]) * + 1000 / (lc->framerate - prev_layer_framerate)); + } - prev_layer_framerate = lc->framerate; - } + prev_layer_framerate = lc->framerate; } + } } -void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) -{ - VP8_COMMON *cm = &cpi->common; - int last_w, last_h, prev_number_of_layers; +void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) { + VP8_COMMON *cm = &cpi->common; + int last_w, last_h; + unsigned int prev_number_of_layers; - if (!cpi) - return; + if (!cpi) return; - if (!oxcf) - return; + if (!oxcf) return; -#if CONFIG_MULTITHREAD - /* wait for the last picture loopfilter thread done */ - if (cpi->b_lpf_running) - { - sem_wait(&cpi->h_event_end_lpf); - cpi->b_lpf_running = 0; - } -#endif + if (cm->version != oxcf->Version) { + cm->version = oxcf->Version; + vp8_setup_version(cm); + } - if (cm->version != oxcf->Version) - { - cm->version = oxcf->Version; - vp8_setup_version(cm); - } + last_w = cpi->oxcf.Width; + last_h = cpi->oxcf.Height; + prev_number_of_layers = cpi->oxcf.number_of_layers; - last_w = cpi->oxcf.Width; - last_h = cpi->oxcf.Height; - prev_number_of_layers = cpi->oxcf.number_of_layers; - - cpi->oxcf = *oxcf; - - switch (cpi->oxcf.Mode) - { + cpi->oxcf = *oxcf; + switch (cpi->oxcf.Mode) { case MODE_REALTIME: - cpi->pass = 0; - cpi->compressor_speed = 2; + cpi->pass = 0; + cpi->compressor_speed = 2; - if (cpi->oxcf.cpu_used < -16) - { - cpi->oxcf.cpu_used = -16; - } + if (cpi->oxcf.cpu_used < -16) { + cpi->oxcf.cpu_used = -16; + } - if (cpi->oxcf.cpu_used > 16) - cpi->oxcf.cpu_used = 16; + if (cpi->oxcf.cpu_used > 16) cpi->oxcf.cpu_used = 16; - break; + break; case MODE_GOODQUALITY: - cpi->pass = 0; - cpi->compressor_speed = 1; + cpi->pass = 0; + cpi->compressor_speed = 1; - if (cpi->oxcf.cpu_used < -5) - { - cpi->oxcf.cpu_used = -5; - } + if (cpi->oxcf.cpu_used < -5) { + cpi->oxcf.cpu_used = -5; + } - if (cpi->oxcf.cpu_used > 5) - cpi->oxcf.cpu_used = 5; + if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; - break; + break; case MODE_BESTQUALITY: - cpi->pass = 0; - cpi->compressor_speed = 0; - break; + cpi->pass = 0; + cpi->compressor_speed = 0; + break; case MODE_FIRSTPASS: - cpi->pass = 1; - cpi->compressor_speed = 1; - break; + cpi->pass = 1; + cpi->compressor_speed = 1; + break; case MODE_SECONDPASS: - cpi->pass = 2; - cpi->compressor_speed = 1; + cpi->pass = 2; + cpi->compressor_speed = 1; - if (cpi->oxcf.cpu_used < -5) - { - cpi->oxcf.cpu_used = -5; - } + if (cpi->oxcf.cpu_used < -5) { + cpi->oxcf.cpu_used = -5; + } - if (cpi->oxcf.cpu_used > 5) - cpi->oxcf.cpu_used = 5; + if (cpi->oxcf.cpu_used > 5) cpi->oxcf.cpu_used = 5; - break; + break; case MODE_SECONDPASS_BEST: - cpi->pass = 2; - cpi->compressor_speed = 0; - break; + cpi->pass = 2; + cpi->compressor_speed = 0; + break; + } + + if (cpi->pass == 0) cpi->auto_worst_q = 1; + + cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; + cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; + cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level]; + + if (oxcf->fixed_q >= 0) { + if (oxcf->worst_allowed_q < 0) { + cpi->oxcf.fixed_q = q_trans[0]; + } else { + cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; } - if (cpi->pass == 0) - cpi->auto_worst_q = 1; - - cpi->oxcf.worst_allowed_q = q_trans[oxcf->worst_allowed_q]; - cpi->oxcf.best_allowed_q = q_trans[oxcf->best_allowed_q]; - cpi->oxcf.cq_level = q_trans[cpi->oxcf.cq_level]; - - if (oxcf->fixed_q >= 0) - { - if (oxcf->worst_allowed_q < 0) - cpi->oxcf.fixed_q = q_trans[0]; - else - cpi->oxcf.fixed_q = q_trans[oxcf->worst_allowed_q]; - - if (oxcf->alt_q < 0) - cpi->oxcf.alt_q = q_trans[0]; - else - cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; - - if (oxcf->key_q < 0) - cpi->oxcf.key_q = q_trans[0]; - else - cpi->oxcf.key_q = q_trans[oxcf->key_q]; - - if (oxcf->gold_q < 0) - cpi->oxcf.gold_q = q_trans[0]; - else - cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; - + if (oxcf->alt_q < 0) { + cpi->oxcf.alt_q = q_trans[0]; + } else { + cpi->oxcf.alt_q = q_trans[oxcf->alt_q]; } - cpi->baseline_gf_interval = - cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; + if (oxcf->key_q < 0) { + cpi->oxcf.key_q = q_trans[0]; + } else { + cpi->oxcf.key_q = q_trans[oxcf->key_q]; + } + + if (oxcf->gold_q < 0) { + cpi->oxcf.gold_q = q_trans[0]; + } else { + cpi->oxcf.gold_q = q_trans[oxcf->gold_q]; + } + } + + cpi->baseline_gf_interval = + cpi->oxcf.alt_freq ? cpi->oxcf.alt_freq : DEFAULT_GF_INTERVAL; #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - cpi->oxcf.token_partitions = 3; + cpi->oxcf.token_partitions = 3; #endif - if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) - cm->multi_token_partition = - (TOKEN_PARTITION) cpi->oxcf.token_partitions; + if (cpi->oxcf.token_partitions >= 0 && cpi->oxcf.token_partitions <= 3) { + cm->multi_token_partition = (TOKEN_PARTITION)cpi->oxcf.token_partitions; + } - setup_features(cpi); + setup_features(cpi); - { - int i; + { + int i; - for (i = 0; i < MAX_MB_SEGMENTS; i++) - cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; + for (i = 0; i < MAX_MB_SEGMENTS; ++i) { + cpi->segment_encode_breakout[i] = cpi->oxcf.encode_breakout; } + } - /* At the moment the first order values may not be > MAXQ */ - if (cpi->oxcf.fixed_q > MAXQ) - cpi->oxcf.fixed_q = MAXQ; + /* At the moment the first order values may not be > MAXQ */ + if (cpi->oxcf.fixed_q > MAXQ) cpi->oxcf.fixed_q = MAXQ; - /* local file playback mode == really big buffer */ - if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) - { - cpi->oxcf.starting_buffer_level = 60000; - cpi->oxcf.optimal_buffer_level = 60000; - cpi->oxcf.maximum_buffer_size = 240000; - cpi->oxcf.starting_buffer_level_in_ms = 60000; - cpi->oxcf.optimal_buffer_level_in_ms = 60000; - cpi->oxcf.maximum_buffer_size_in_ms = 240000; + /* local file playback mode == really big buffer */ + if (cpi->oxcf.end_usage == USAGE_LOCAL_FILE_PLAYBACK) { + cpi->oxcf.starting_buffer_level = 60000; + cpi->oxcf.optimal_buffer_level = 60000; + cpi->oxcf.maximum_buffer_size = 240000; + cpi->oxcf.starting_buffer_level_in_ms = 60000; + cpi->oxcf.optimal_buffer_level_in_ms = 60000; + cpi->oxcf.maximum_buffer_size_in_ms = 240000; + } + + /* Convert target bandwidth from Kbit/s to Bit/s */ + cpi->oxcf.target_bandwidth *= 1000; + + cpi->oxcf.starting_buffer_level = rescale( + (int)cpi->oxcf.starting_buffer_level, cpi->oxcf.target_bandwidth, 1000); + + /* Set or reset optimal and maximum buffer levels. */ + if (cpi->oxcf.optimal_buffer_level == 0) { + cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; + } else { + cpi->oxcf.optimal_buffer_level = rescale( + (int)cpi->oxcf.optimal_buffer_level, cpi->oxcf.target_bandwidth, 1000); + } + + if (cpi->oxcf.maximum_buffer_size == 0) { + cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; + } else { + cpi->oxcf.maximum_buffer_size = rescale((int)cpi->oxcf.maximum_buffer_size, + cpi->oxcf.target_bandwidth, 1000); + } + // Under a configuration change, where maximum_buffer_size may change, + // keep buffer level clipped to the maximum allowed buffer size. + if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) { + cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; + cpi->buffer_level = cpi->bits_off_target; + } + + /* Set up frame rate and related parameters rate control values. */ + vp8_new_framerate(cpi, cpi->framerate); + + /* Set absolute upper and lower quality limits */ + cpi->worst_quality = cpi->oxcf.worst_allowed_q; + cpi->best_quality = cpi->oxcf.best_allowed_q; + + /* active values should only be modified if out of new range */ + if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) { + cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; + } + /* less likely */ + else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) { + cpi->active_worst_quality = cpi->oxcf.best_allowed_q; + } + if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) { + cpi->active_best_quality = cpi->oxcf.best_allowed_q; + } + /* less likely */ + else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) { + cpi->active_best_quality = cpi->oxcf.worst_allowed_q; + } + + cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0; + + cpi->cq_target_quality = cpi->oxcf.cq_level; + + /* Only allow dropped frames in buffered mode */ + cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; + + cpi->target_bandwidth = cpi->oxcf.target_bandwidth; + + // Check if the number of temporal layers has changed, and if so reset the + // pattern counter and set/initialize the temporal layer context for the + // new layer configuration. + if (cpi->oxcf.number_of_layers != prev_number_of_layers) { + // If the number of temporal layers are changed we must start at the + // base of the pattern cycle, so set the layer id to 0 and reset + // the temporal pattern counter. + if (cpi->temporal_layer_id > 0) { + cpi->temporal_layer_id = 0; } + cpi->temporal_pattern_counter = 0; + reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers); + } - /* Convert target bandwidth from Kbit/s to Bit/s */ - cpi->oxcf.target_bandwidth *= 1000; + if (!cpi->initial_width) { + cpi->initial_width = cpi->oxcf.Width; + cpi->initial_height = cpi->oxcf.Height; + } - cpi->oxcf.starting_buffer_level = - rescale((int)cpi->oxcf.starting_buffer_level, - cpi->oxcf.target_bandwidth, 1000); + cm->Width = cpi->oxcf.Width; + cm->Height = cpi->oxcf.Height; + assert(cm->Width <= cpi->initial_width); + assert(cm->Height <= cpi->initial_height); - /* Set or reset optimal and maximum buffer levels. */ - if (cpi->oxcf.optimal_buffer_level == 0) - cpi->oxcf.optimal_buffer_level = cpi->oxcf.target_bandwidth / 8; - else - cpi->oxcf.optimal_buffer_level = - rescale((int)cpi->oxcf.optimal_buffer_level, - cpi->oxcf.target_bandwidth, 1000); + /* TODO(jkoleszar): if an internal spatial resampling is active, + * and we downsize the input image, maybe we should clear the + * internal scale immediately rather than waiting for it to + * correct. + */ - if (cpi->oxcf.maximum_buffer_size == 0) - cpi->oxcf.maximum_buffer_size = cpi->oxcf.target_bandwidth / 8; - else - cpi->oxcf.maximum_buffer_size = - rescale((int)cpi->oxcf.maximum_buffer_size, - cpi->oxcf.target_bandwidth, 1000); - // Under a configuration change, where maximum_buffer_size may change, - // keep buffer level clipped to the maximum allowed buffer size. - if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) { - cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; - cpi->buffer_level = cpi->bits_off_target; - } + /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */ + if (cpi->oxcf.Sharpness > 7) cpi->oxcf.Sharpness = 7; - /* Set up frame rate and related parameters rate control values. */ - vp8_new_framerate(cpi, cpi->framerate); + cm->sharpness_level = cpi->oxcf.Sharpness; - /* Set absolute upper and lower quality limits */ - cpi->worst_quality = cpi->oxcf.worst_allowed_q; - cpi->best_quality = cpi->oxcf.best_allowed_q; + if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) { + int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); + int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); - /* active values should only be modified if out of new range */ - if (cpi->active_worst_quality > cpi->oxcf.worst_allowed_q) - { - cpi->active_worst_quality = cpi->oxcf.worst_allowed_q; - } - /* less likely */ - else if (cpi->active_worst_quality < cpi->oxcf.best_allowed_q) - { - cpi->active_worst_quality = cpi->oxcf.best_allowed_q; - } - if (cpi->active_best_quality < cpi->oxcf.best_allowed_q) - { - cpi->active_best_quality = cpi->oxcf.best_allowed_q; - } - /* less likely */ - else if (cpi->active_best_quality > cpi->oxcf.worst_allowed_q) - { - cpi->active_best_quality = cpi->oxcf.worst_allowed_q; - } + Scale2Ratio(cm->horiz_scale, &hr, &hs); + Scale2Ratio(cm->vert_scale, &vr, &vs); - cpi->buffered_mode = cpi->oxcf.optimal_buffer_level > 0; + /* always go to the next whole number */ + cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; + cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; + } - cpi->cq_target_quality = cpi->oxcf.cq_level; + if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) { + cpi->force_next_frame_intra = 1; + } - /* Only allow dropped frames in buffered mode */ - cpi->drop_frames_allowed = cpi->oxcf.allow_df && cpi->buffered_mode; + if (((cm->Width + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_width || + ((cm->Height + 15) & ~15) != cm->yv12_fb[cm->lst_fb_idx].y_height || + cm->yv12_fb[cm->lst_fb_idx].y_width == 0) { + dealloc_raw_frame_buffers(cpi); + alloc_raw_frame_buffers(cpi); + vp8_alloc_compressor_data(cpi); + } - cpi->target_bandwidth = cpi->oxcf.target_bandwidth; + if (cpi->oxcf.fixed_q >= 0) { + cpi->last_q[0] = cpi->oxcf.fixed_q; + cpi->last_q[1] = cpi->oxcf.fixed_q; + } - // Check if the number of temporal layers has changed, and if so reset the - // pattern counter and set/initialize the temporal layer context for the - // new layer configuration. - if (cpi->oxcf.number_of_layers != prev_number_of_layers) - { - // If the number of temporal layers are changed we must start at the - // base of the pattern cycle, so set the layer id to 0 and reset - // the temporal pattern counter. - if (cpi->temporal_layer_id > 0) { - cpi->temporal_layer_id = 0; - } - cpi->temporal_pattern_counter = 0; - reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers); - } + cpi->Speed = cpi->oxcf.cpu_used; - if (!cpi->initial_width) - { - cpi->initial_width = cpi->oxcf.Width; - cpi->initial_height = cpi->oxcf.Height; - } + /* force to allowlag to 0 if lag_in_frames is 0; */ + if (cpi->oxcf.lag_in_frames == 0) { + cpi->oxcf.allow_lag = 0; + } + /* Limit on lag buffers as these are not currently dynamically allocated */ + else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) { + cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; + } - cm->Width = cpi->oxcf.Width; - cm->Height = cpi->oxcf.Height; - assert(cm->Width <= cpi->initial_width); - assert(cm->Height <= cpi->initial_height); - - /* TODO(jkoleszar): if an internal spatial resampling is active, - * and we downsize the input image, maybe we should clear the - * internal scale immediately rather than waiting for it to - * correct. - */ - - /* VP8 sharpness level mapping 0-7 (vs 0-10 in general VPx dialogs) */ - if (cpi->oxcf.Sharpness > 7) - cpi->oxcf.Sharpness = 7; - - cm->sharpness_level = cpi->oxcf.Sharpness; - - if (cm->horiz_scale != NORMAL || cm->vert_scale != NORMAL) - { - int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); - int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); - - Scale2Ratio(cm->horiz_scale, &hr, &hs); - Scale2Ratio(cm->vert_scale, &vr, &vs); - - /* always go to the next whole number */ - cm->Width = (hs - 1 + cpi->oxcf.Width * hr) / hs; - cm->Height = (vs - 1 + cpi->oxcf.Height * vr) / vs; - } - - if (last_w != cpi->oxcf.Width || last_h != cpi->oxcf.Height) - cpi->force_next_frame_intra = 1; - - if (((cm->Width + 15) & 0xfffffff0) != - cm->yv12_fb[cm->lst_fb_idx].y_width || - ((cm->Height + 15) & 0xfffffff0) != - cm->yv12_fb[cm->lst_fb_idx].y_height || - cm->yv12_fb[cm->lst_fb_idx].y_width == 0) - { - dealloc_raw_frame_buffers(cpi); - alloc_raw_frame_buffers(cpi); - vp8_alloc_compressor_data(cpi); - } - - if (cpi->oxcf.fixed_q >= 0) - { - cpi->last_q[0] = cpi->oxcf.fixed_q; - cpi->last_q[1] = cpi->oxcf.fixed_q; - } - - cpi->Speed = cpi->oxcf.cpu_used; - - /* force to allowlag to 0 if lag_in_frames is 0; */ - if (cpi->oxcf.lag_in_frames == 0) - { - cpi->oxcf.allow_lag = 0; - } - /* Limit on lag buffers as these are not currently dynamically allocated */ - else if (cpi->oxcf.lag_in_frames > MAX_LAG_BUFFERS) - cpi->oxcf.lag_in_frames = MAX_LAG_BUFFERS; - - /* YX Temp */ - cpi->alt_ref_source = NULL; - cpi->is_src_frame_alt_ref = 0; + /* YX Temp */ + cpi->alt_ref_source = NULL; + cpi->is_src_frame_alt_ref = 0; #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) - { - if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) - { - int width = (cpi->oxcf.Width + 15) & ~15; - int height = (cpi->oxcf.Height + 15) & ~15; - vp8_denoiser_allocate(&cpi->denoiser, width, height, - cm->mb_rows, cm->mb_cols, - cpi->oxcf.noise_sensitivity); + if (cpi->oxcf.noise_sensitivity) { + if (!cpi->denoiser.yv12_mc_running_avg.buffer_alloc) { + int width = (cpi->oxcf.Width + 15) & ~15; + int height = (cpi->oxcf.Height + 15) & ~15; + if (vp8_denoiser_allocate(&cpi->denoiser, width, height, cm->mb_rows, + cm->mb_cols, cpi->oxcf.noise_sensitivity)) { + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate denoiser"); } } + } #endif #if 0 @@ -1844,95 +1654,90 @@ void vp8_change_config(VP8_COMP *cpi, VP8_CONFIG *oxcf) cpi->frame_distortion = 0; cpi->last_frame_distortion = 0; #endif - } #ifndef M_LOG2_E #define M_LOG2_E 0.693147180559945309417 #endif -#define log2f(x) (log (x) / (float) M_LOG2_E) +#define log2f(x) (log(x) / (float)M_LOG2_E) -static void cal_mvsadcosts(int *mvsadcost[2]) -{ - int i = 1; +static void cal_mvsadcosts(int *mvsadcost[2]) { + int i = 1; - mvsadcost [0] [0] = 300; - mvsadcost [1] [0] = 300; + mvsadcost[0][0] = 300; + mvsadcost[1][0] = 300; - do - { - double z = 256 * (2 * (log2f(8 * i) + .6)); - mvsadcost [0][i] = (int) z; - mvsadcost [1][i] = (int) z; - mvsadcost [0][-i] = (int) z; - mvsadcost [1][-i] = (int) z; - } - while (++i <= mvfp_max); + do { + double z = 256 * (2 * (log2f(8 * i) + .6)); + mvsadcost[0][i] = (int)z; + mvsadcost[1][i] = (int)z; + mvsadcost[0][-i] = (int)z; + mvsadcost[1][-i] = (int)z; + } while (++i <= mvfp_max); } -struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf) -{ - int i; +struct VP8_COMP *vp8_create_compressor(VP8_CONFIG *oxcf) { + int i; - VP8_COMP *cpi; - VP8_COMMON *cm; + VP8_COMP *cpi; + VP8_COMMON *cm; - cpi = vpx_memalign(32, sizeof(VP8_COMP)); - /* Check that the CPI instance is valid */ - if (!cpi) - return 0; + cpi = vpx_memalign(32, sizeof(VP8_COMP)); + /* Check that the CPI instance is valid */ + if (!cpi) return 0; - cm = &cpi->common; + cm = &cpi->common; - memset(cpi, 0, sizeof(VP8_COMP)); + memset(cpi, 0, sizeof(VP8_COMP)); - if (setjmp(cm->error.jmp)) - { - cpi->common.error.setjmp = 0; - vp8_remove_compressor(&cpi); - return 0; - } + if (setjmp(cm->error.jmp)) { + cpi->common.error.setjmp = 0; + vp8_remove_compressor(&cpi); + return 0; + } - cpi->common.error.setjmp = 1; + cpi->common.error.setjmp = 1; - CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), (MAX_MVSEARCH_STEPS * 8) + 1)); + CHECK_MEM_ERROR(cpi->mb.ss, vpx_calloc(sizeof(search_site), + (MAX_MVSEARCH_STEPS * 8) + 1)); - vp8_create_common(&cpi->common); + vp8_create_common(&cpi->common); - init_config(cpi, oxcf); + init_config(cpi, oxcf); - memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, sizeof(vp8cx_base_skip_false_prob)); - cpi->common.current_video_frame = 0; - cpi->temporal_pattern_counter = 0; - cpi->temporal_layer_id = -1; - cpi->kf_overspend_bits = 0; - cpi->kf_bitrate_adjustment = 0; - cpi->frames_till_gf_update_due = 0; - cpi->gf_overspend_bits = 0; - cpi->non_gf_bitrate_adjustment = 0; - cpi->prob_last_coded = 128; - cpi->prob_gf_coded = 128; - cpi->prob_intra_coded = 63; + memcpy(cpi->base_skip_false_prob, vp8cx_base_skip_false_prob, + sizeof(vp8cx_base_skip_false_prob)); + cpi->common.current_video_frame = 0; + cpi->temporal_pattern_counter = 0; + cpi->temporal_layer_id = -1; + cpi->kf_overspend_bits = 0; + cpi->kf_bitrate_adjustment = 0; + cpi->frames_till_gf_update_due = 0; + cpi->gf_overspend_bits = 0; + cpi->non_gf_bitrate_adjustment = 0; + cpi->prob_last_coded = 128; + cpi->prob_gf_coded = 128; + cpi->prob_intra_coded = 63; - /* Prime the recent reference frame usage counters. - * Hereafter they will be maintained as a sort of moving average - */ - cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; - cpi->recent_ref_frame_usage[LAST_FRAME] = 1; - cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; - cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; + /* Prime the recent reference frame usage counters. + * Hereafter they will be maintained as a sort of moving average + */ + cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; + cpi->recent_ref_frame_usage[LAST_FRAME] = 1; + cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; + cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; - /* Set reference frame sign bias for ALTREF frame to 1 (for now) */ - cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; + /* Set reference frame sign bias for ALTREF frame to 1 (for now) */ + cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; - cpi->twopass.gf_decay_rate = 0; - cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; + cpi->twopass.gf_decay_rate = 0; + cpi->baseline_gf_interval = DEFAULT_GF_INTERVAL; - cpi->gold_is_last = 0 ; - cpi->alt_is_last = 0 ; - cpi->gold_is_alt = 0 ; + cpi->gold_is_last = 0; + cpi->alt_is_last = 0; + cpi->gold_is_alt = 0; - cpi->active_map_enabled = 0; + cpi->active_map_enabled = 0; #if 0 /* Experimental code for lagged and one pass */ @@ -1942,7 +1747,7 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf) { cpi->one_pass_frame_index = 0; - for (i = 0; i < MAX_LAG_BUFFERS; i++) + for (i = 0; i < MAX_LAG_BUFFERS; ++i) { cpi->one_pass_frame_stats[i].frames_so_far = 0; cpi->one_pass_frame_stats[i].frame_intra_error = 0.0; @@ -1957,115 +1762,106 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf) } #endif - cpi->mse_source_denoised = 0; + cpi->mse_source_denoised = 0; - /* Should we use the cyclic refresh method. - * Currently this is tied to error resilliant mode - */ - cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode; - cpi->cyclic_refresh_mode_max_mbs_perframe = (cpi->common.mb_rows * cpi->common.mb_cols) / 7; - if (cpi->oxcf.number_of_layers == 1) { - cpi->cyclic_refresh_mode_max_mbs_perframe = - (cpi->common.mb_rows * cpi->common.mb_cols) / 20; - } else if (cpi->oxcf.number_of_layers == 2) { - cpi->cyclic_refresh_mode_max_mbs_perframe = - (cpi->common.mb_rows * cpi->common.mb_cols) / 10; - } - cpi->cyclic_refresh_mode_index = 0; - cpi->cyclic_refresh_q = 32; + /* Should we use the cyclic refresh method. + * Currently this is tied to error resilliant mode + */ + cpi->cyclic_refresh_mode_enabled = cpi->oxcf.error_resilient_mode; + cpi->cyclic_refresh_mode_max_mbs_perframe = + (cpi->common.mb_rows * cpi->common.mb_cols) / 7; + if (cpi->oxcf.number_of_layers == 1) { + cpi->cyclic_refresh_mode_max_mbs_perframe = + (cpi->common.mb_rows * cpi->common.mb_cols) / 20; + } else if (cpi->oxcf.number_of_layers == 2) { + cpi->cyclic_refresh_mode_max_mbs_perframe = + (cpi->common.mb_rows * cpi->common.mb_cols) / 10; + } + cpi->cyclic_refresh_mode_index = 0; + cpi->cyclic_refresh_q = 32; - if (cpi->cyclic_refresh_mode_enabled) - { - CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1)); - } - else - cpi->cyclic_refresh_map = (signed char *) NULL; - - CHECK_MEM_ERROR(cpi->consec_zero_last, - vpx_calloc(cm->mb_rows * cm->mb_cols, 1)); - CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias, + if (cpi->cyclic_refresh_mode_enabled) { + CHECK_MEM_ERROR(cpi->cyclic_refresh_map, vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1)); + } else { + cpi->cyclic_refresh_map = (signed char *)NULL; + } + + CHECK_MEM_ERROR(cpi->consec_zero_last, + vpx_calloc(cm->mb_rows * cm->mb_cols, 1)); + CHECK_MEM_ERROR(cpi->consec_zero_last_mvbias, + vpx_calloc((cpi->common.mb_rows * cpi->common.mb_cols), 1)); #ifdef VP8_ENTROPY_STATS - init_context_counters(); + init_context_counters(); #endif - /*Initialize the feed-forward activity masking.*/ - cpi->activity_avg = 90<<12; + /*Initialize the feed-forward activity masking.*/ + cpi->activity_avg = 90 << 12; - /* Give a sensible default for the first frame. */ - cpi->frames_since_key = 8; - cpi->key_frame_frequency = cpi->oxcf.key_freq; - cpi->this_key_frame_forced = 0; - cpi->next_key_frame_forced = 0; + /* Give a sensible default for the first frame. */ + cpi->frames_since_key = 8; + cpi->key_frame_frequency = cpi->oxcf.key_freq; + cpi->this_key_frame_forced = 0; + cpi->next_key_frame_forced = 0; - cpi->source_alt_ref_pending = 0; - cpi->source_alt_ref_active = 0; - cpi->common.refresh_alt_ref_frame = 0; + cpi->source_alt_ref_pending = 0; + cpi->source_alt_ref_active = 0; + cpi->common.refresh_alt_ref_frame = 0; - cpi->force_maxqp = 0; + cpi->force_maxqp = 0; - cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS; + cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS; #if CONFIG_INTERNAL_STATS - cpi->b_calculate_ssimg = 0; + cpi->b_calculate_ssimg = 0; - cpi->count = 0; - cpi->bytes = 0; + cpi->count = 0; + cpi->bytes = 0; - if (cpi->b_calculate_psnr) - { - cpi->total_sq_error = 0.0; - cpi->total_sq_error2 = 0.0; - cpi->total_y = 0.0; - cpi->total_u = 0.0; - cpi->total_v = 0.0; - cpi->total = 0.0; - cpi->totalp_y = 0.0; - cpi->totalp_u = 0.0; - cpi->totalp_v = 0.0; - cpi->totalp = 0.0; - cpi->tot_recode_hits = 0; - cpi->summed_quality = 0; - cpi->summed_weights = 0; - } - - if (cpi->b_calculate_ssimg) - { - cpi->total_ssimg_y = 0; - cpi->total_ssimg_u = 0; - cpi->total_ssimg_v = 0; - cpi->total_ssimg_all = 0; - } + if (cpi->b_calculate_psnr) { + cpi->total_sq_error = 0.0; + cpi->total_sq_error2 = 0.0; + cpi->total_y = 0.0; + cpi->total_u = 0.0; + cpi->total_v = 0.0; + cpi->total = 0.0; + cpi->totalp_y = 0.0; + cpi->totalp_u = 0.0; + cpi->totalp_v = 0.0; + cpi->totalp = 0.0; + cpi->tot_recode_hits = 0; + cpi->summed_quality = 0; + cpi->summed_weights = 0; + } #endif - cpi->first_time_stamp_ever = 0x7FFFFFFF; + cpi->first_time_stamp_ever = 0x7FFFFFFF; - cpi->frames_till_gf_update_due = 0; - cpi->key_frame_count = 1; + cpi->frames_till_gf_update_due = 0; + cpi->key_frame_count = 1; - cpi->ni_av_qi = cpi->oxcf.worst_allowed_q; - cpi->ni_tot_qi = 0; - cpi->ni_frames = 0; - cpi->total_byte_count = 0; + cpi->ni_av_qi = cpi->oxcf.worst_allowed_q; + cpi->ni_tot_qi = 0; + cpi->ni_frames = 0; + cpi->total_byte_count = 0; - cpi->drop_frame = 0; + cpi->drop_frame = 0; - cpi->rate_correction_factor = 1.0; - cpi->key_frame_rate_correction_factor = 1.0; - cpi->gf_rate_correction_factor = 1.0; - cpi->twopass.est_max_qcorrection_factor = 1.0; + cpi->rate_correction_factor = 1.0; + cpi->key_frame_rate_correction_factor = 1.0; + cpi->gf_rate_correction_factor = 1.0; + cpi->twopass.est_max_qcorrection_factor = 1.0; - for (i = 0; i < KEY_FRAME_CONTEXT; i++) - { - cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate; - } + for (i = 0; i < KEY_FRAME_CONTEXT; ++i) { + cpi->prior_key_frame_distance[i] = (int)cpi->output_framerate; + } #ifdef OUTPUT_YUV_SRC - yuv_file = fopen("bd.yuv", "ab"); + yuv_file = fopen("bd.yuv", "ab"); #endif #ifdef OUTPUT_YUV_DENOISED - yuv_denoised_file = fopen("denoised.yuv", "ab"); + yuv_denoised_file = fopen("denoised.yuv", "ab"); #endif #if 0 @@ -2073,414 +1869,335 @@ struct VP8_COMP* vp8_create_compressor(VP8_CONFIG *oxcf) kf_list = fopen("kf_list.stt", "w"); #endif - cpi->output_pkt_list = oxcf->output_pkt_list; + cpi->output_pkt_list = oxcf->output_pkt_list; #if !CONFIG_REALTIME_ONLY - if (cpi->pass == 1) - { - vp8_init_first_pass(cpi); - } - else if (cpi->pass == 2) - { - size_t packet_sz = sizeof(FIRSTPASS_STATS); - int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); + if (cpi->pass == 1) { + vp8_init_first_pass(cpi); + } else if (cpi->pass == 2) { + size_t packet_sz = sizeof(FIRSTPASS_STATS); + int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); - cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf; - cpi->twopass.stats_in = cpi->twopass.stats_in_start; - cpi->twopass.stats_in_end = (void*)((char *)cpi->twopass.stats_in - + (packets - 1) * packet_sz); - vp8_init_second_pass(cpi); - } + cpi->twopass.stats_in_start = oxcf->two_pass_stats_in.buf; + cpi->twopass.stats_in = cpi->twopass.stats_in_start; + cpi->twopass.stats_in_end = + (void *)((char *)cpi->twopass.stats_in + (packets - 1) * packet_sz); + vp8_init_second_pass(cpi); + } #endif - if (cpi->compressor_speed == 2) - { - cpi->avg_encode_time = 0; - cpi->avg_pick_mode_time = 0; - } + if (cpi->compressor_speed == 2) { + cpi->avg_encode_time = 0; + cpi->avg_pick_mode_time = 0; + } - vp8_set_speed_features(cpi); + vp8_set_speed_features(cpi); - /* Set starting values of RD threshold multipliers (128 = *1) */ - for (i = 0; i < MAX_MODES; i++) - { - cpi->mb.rd_thresh_mult[i] = 128; - } + /* Set starting values of RD threshold multipliers (128 = *1) */ + for (i = 0; i < MAX_MODES; ++i) { + cpi->mb.rd_thresh_mult[i] = 128; + } #ifdef VP8_ENTROPY_STATS - init_mv_ref_counts(); + init_mv_ref_counts(); #endif #if CONFIG_MULTITHREAD - if(vp8cx_create_encoder_threads(cpi)) - { - vp8_remove_compressor(&cpi); - return 0; - } + if (vp8cx_create_encoder_threads(cpi)) { + vp8_remove_compressor(&cpi); + return 0; + } #endif - cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16; - cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16; - cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16; - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_h = vpx_variance_halfpixvar16x16_h; - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_v = vpx_variance_halfpixvar16x16_v; - cpi->fn_ptr[BLOCK_16X16].svf_halfpix_hv = vpx_variance_halfpixvar16x16_hv; - cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3; - cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8; - cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d; + cpi->fn_ptr[BLOCK_16X16].sdf = vpx_sad16x16; + cpi->fn_ptr[BLOCK_16X16].vf = vpx_variance16x16; + cpi->fn_ptr[BLOCK_16X16].svf = vpx_sub_pixel_variance16x16; + cpi->fn_ptr[BLOCK_16X16].sdx3f = vpx_sad16x16x3; + cpi->fn_ptr[BLOCK_16X16].sdx8f = vpx_sad16x16x8; + cpi->fn_ptr[BLOCK_16X16].sdx4df = vpx_sad16x16x4d; - cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8; - cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8; - cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8; - cpi->fn_ptr[BLOCK_16X8].svf_halfpix_h = NULL; - cpi->fn_ptr[BLOCK_16X8].svf_halfpix_v = NULL; - cpi->fn_ptr[BLOCK_16X8].svf_halfpix_hv = NULL; - cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3; - cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8; - cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d; + cpi->fn_ptr[BLOCK_16X8].sdf = vpx_sad16x8; + cpi->fn_ptr[BLOCK_16X8].vf = vpx_variance16x8; + cpi->fn_ptr[BLOCK_16X8].svf = vpx_sub_pixel_variance16x8; + cpi->fn_ptr[BLOCK_16X8].sdx3f = vpx_sad16x8x3; + cpi->fn_ptr[BLOCK_16X8].sdx8f = vpx_sad16x8x8; + cpi->fn_ptr[BLOCK_16X8].sdx4df = vpx_sad16x8x4d; - cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16; - cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16; - cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16; - cpi->fn_ptr[BLOCK_8X16].svf_halfpix_h = NULL; - cpi->fn_ptr[BLOCK_8X16].svf_halfpix_v = NULL; - cpi->fn_ptr[BLOCK_8X16].svf_halfpix_hv = NULL; - cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3; - cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8; - cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d; + cpi->fn_ptr[BLOCK_8X16].sdf = vpx_sad8x16; + cpi->fn_ptr[BLOCK_8X16].vf = vpx_variance8x16; + cpi->fn_ptr[BLOCK_8X16].svf = vpx_sub_pixel_variance8x16; + cpi->fn_ptr[BLOCK_8X16].sdx3f = vpx_sad8x16x3; + cpi->fn_ptr[BLOCK_8X16].sdx8f = vpx_sad8x16x8; + cpi->fn_ptr[BLOCK_8X16].sdx4df = vpx_sad8x16x4d; - cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8; - cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8; - cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8; - cpi->fn_ptr[BLOCK_8X8].svf_halfpix_h = NULL; - cpi->fn_ptr[BLOCK_8X8].svf_halfpix_v = NULL; - cpi->fn_ptr[BLOCK_8X8].svf_halfpix_hv = NULL; - cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3; - cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8; - cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d; + cpi->fn_ptr[BLOCK_8X8].sdf = vpx_sad8x8; + cpi->fn_ptr[BLOCK_8X8].vf = vpx_variance8x8; + cpi->fn_ptr[BLOCK_8X8].svf = vpx_sub_pixel_variance8x8; + cpi->fn_ptr[BLOCK_8X8].sdx3f = vpx_sad8x8x3; + cpi->fn_ptr[BLOCK_8X8].sdx8f = vpx_sad8x8x8; + cpi->fn_ptr[BLOCK_8X8].sdx4df = vpx_sad8x8x4d; - cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4; - cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4; - cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4; - cpi->fn_ptr[BLOCK_4X4].svf_halfpix_h = NULL; - cpi->fn_ptr[BLOCK_4X4].svf_halfpix_v = NULL; - cpi->fn_ptr[BLOCK_4X4].svf_halfpix_hv = NULL; - cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3; - cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8; - cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d; + cpi->fn_ptr[BLOCK_4X4].sdf = vpx_sad4x4; + cpi->fn_ptr[BLOCK_4X4].vf = vpx_variance4x4; + cpi->fn_ptr[BLOCK_4X4].svf = vpx_sub_pixel_variance4x4; + cpi->fn_ptr[BLOCK_4X4].sdx3f = vpx_sad4x4x3; + cpi->fn_ptr[BLOCK_4X4].sdx8f = vpx_sad4x4x8; + cpi->fn_ptr[BLOCK_4X4].sdx4df = vpx_sad4x4x4d; #if ARCH_X86 || ARCH_X86_64 - cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn; - cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn; - cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn; - cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn; - cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn; + cpi->fn_ptr[BLOCK_16X16].copymem = vp8_copy32xn; + cpi->fn_ptr[BLOCK_16X8].copymem = vp8_copy32xn; + cpi->fn_ptr[BLOCK_8X16].copymem = vp8_copy32xn; + cpi->fn_ptr[BLOCK_8X8].copymem = vp8_copy32xn; + cpi->fn_ptr[BLOCK_4X4].copymem = vp8_copy32xn; #endif - cpi->full_search_sad = vp8_full_search_sad; - cpi->diamond_search_sad = vp8_diamond_search_sad; - cpi->refining_search_sad = vp8_refining_search_sad; + cpi->full_search_sad = vp8_full_search_sad; + cpi->diamond_search_sad = vp8_diamond_search_sad; + cpi->refining_search_sad = vp8_refining_search_sad; - /* make sure frame 1 is okay */ - cpi->mb.error_bins[0] = cpi->common.MBs; + /* make sure frame 1 is okay */ + cpi->mb.error_bins[0] = cpi->common.MBs; - /* vp8cx_init_quantizer() is first called here. Add check in - * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only - * called later when needed. This will avoid unnecessary calls of - * vp8cx_init_quantizer() for every frame. - */ - vp8cx_init_quantizer(cpi); + /* vp8cx_init_quantizer() is first called here. Add check in + * vp8cx_frame_init_quantizer() so that vp8cx_init_quantizer is only + * called later when needed. This will avoid unnecessary calls of + * vp8cx_init_quantizer() for every frame. + */ + vp8cx_init_quantizer(cpi); - vp8_loop_filter_init(cm); + vp8_loop_filter_init(cm); - cpi->common.error.setjmp = 0; + cpi->common.error.setjmp = 0; #if CONFIG_MULTI_RES_ENCODING - /* Calculate # of MBs in a row in lower-resolution level image. */ - if (cpi->oxcf.mr_encoder_id > 0) - vp8_cal_low_res_mb_cols(cpi); + /* Calculate # of MBs in a row in lower-resolution level image. */ + if (cpi->oxcf.mr_encoder_id > 0) vp8_cal_low_res_mb_cols(cpi); #endif - /* setup RD costs to MACROBLOCK struct */ + /* setup RD costs to MACROBLOCK struct */ - cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max+1]; - cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max+1]; - cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max+1]; - cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max+1]; + cpi->mb.mvcost[0] = &cpi->rd_costs.mvcosts[0][mv_max + 1]; + cpi->mb.mvcost[1] = &cpi->rd_costs.mvcosts[1][mv_max + 1]; + cpi->mb.mvsadcost[0] = &cpi->rd_costs.mvsadcosts[0][mvfp_max + 1]; + cpi->mb.mvsadcost[1] = &cpi->rd_costs.mvsadcosts[1][mvfp_max + 1]; - cal_mvsadcosts(cpi->mb.mvsadcost); + cal_mvsadcosts(cpi->mb.mvsadcost); - cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost; - cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost; - cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs; - cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs; - cpi->mb.token_costs = cpi->rd_costs.token_costs; + cpi->mb.mbmode_cost = cpi->rd_costs.mbmode_cost; + cpi->mb.intra_uv_mode_cost = cpi->rd_costs.intra_uv_mode_cost; + cpi->mb.bmode_costs = cpi->rd_costs.bmode_costs; + cpi->mb.inter_bmode_costs = cpi->rd_costs.inter_bmode_costs; + cpi->mb.token_costs = cpi->rd_costs.token_costs; - /* setup block ptrs & offsets */ - vp8_setup_block_ptrs(&cpi->mb); - vp8_setup_block_dptrs(&cpi->mb.e_mbd); + /* setup block ptrs & offsets */ + vp8_setup_block_ptrs(&cpi->mb); + vp8_setup_block_dptrs(&cpi->mb.e_mbd); - return cpi; + return cpi; } +void vp8_remove_compressor(VP8_COMP **ptr) { + VP8_COMP *cpi = *ptr; -void vp8_remove_compressor(VP8_COMP **ptr) -{ - VP8_COMP *cpi = *ptr; + if (!cpi) return; - if (!cpi) - return; - - if (cpi && (cpi->common.current_video_frame > 0)) - { + if (cpi && (cpi->common.current_video_frame > 0)) { #if !CONFIG_REALTIME_ONLY - if (cpi->pass == 2) - { - vp8_end_second_pass(cpi); - } + if (cpi->pass == 2) { + vp8_end_second_pass(cpi); + } #endif #ifdef VP8_ENTROPY_STATS - print_context_counters(); - print_tree_update_probs(); - print_mode_context(); + print_context_counters(); + print_tree_update_probs(); + print_mode_context(); #endif #if CONFIG_INTERNAL_STATS - if (cpi->pass != 1) - { - FILE *f = fopen("opsnr.stt", "a"); - double time_encoded = (cpi->last_end_time_stamp_seen - - cpi->first_time_stamp_ever) / 10000000.000; - double total_encode_time = (cpi->time_receive_data + - cpi->time_compress_data) / 1000.000; - double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded; + if (cpi->pass != 1) { + FILE *f = fopen("opsnr.stt", "a"); + double time_encoded = + (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) / + 10000000.000; + double total_encode_time = + (cpi->time_receive_data + cpi->time_compress_data) / 1000.000; + double dr = (double)cpi->bytes * 8.0 / 1000.0 / time_encoded; + const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000; + const double rate_err = ((100.0 * (dr - target_rate)) / target_rate); - if (cpi->b_calculate_psnr) - { - if (cpi->oxcf.number_of_layers > 1) - { - int i; + if (cpi->b_calculate_psnr) { + if (cpi->oxcf.number_of_layers > 1) { + int i; - fprintf(f, "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t" - "GLPsnrP\tVPXSSIM\t\n"); - for (i=0; i<(int)cpi->oxcf.number_of_layers; i++) - { - double dr = (double)cpi->bytes_in_layer[i] * - 8.0 / 1000.0 / time_encoded; - double samples = 3.0 / 2 * cpi->frames_in_layer[i] * - cpi->common.Width * cpi->common.Height; - double total_psnr = - vpx_sse_to_psnr(samples, 255.0, - cpi->total_error2[i]); - double total_psnr2 = - vpx_sse_to_psnr(samples, 255.0, - cpi->total_error2_p[i]); - double total_ssim = 100 * pow(cpi->sum_ssim[i] / - cpi->sum_weights[i], 8.0); + fprintf(f, + "Layer\tBitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t" + "GLPsnrP\tVPXSSIM\t\n"); + for (i = 0; i < (int)cpi->oxcf.number_of_layers; ++i) { + double dr = + (double)cpi->bytes_in_layer[i] * 8.0 / 1000.0 / time_encoded; + double samples = 3.0 / 2 * cpi->frames_in_layer[i] * + cpi->common.Width * cpi->common.Height; + double total_psnr = + vpx_sse_to_psnr(samples, 255.0, cpi->total_error2[i]); + double total_psnr2 = + vpx_sse_to_psnr(samples, 255.0, cpi->total_error2_p[i]); + double total_ssim = + 100 * pow(cpi->sum_ssim[i] / cpi->sum_weights[i], 8.0); - fprintf(f, "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t" - "%7.3f\t%7.3f\n", - i, dr, - cpi->sum_psnr[i] / cpi->frames_in_layer[i], - total_psnr, - cpi->sum_psnr_p[i] / cpi->frames_in_layer[i], - total_psnr2, total_ssim); - } - } - else - { - double samples = 3.0 / 2 * cpi->count * - cpi->common.Width * cpi->common.Height; - double total_psnr = vpx_sse_to_psnr(samples, 255.0, - cpi->total_sq_error); - double total_psnr2 = vpx_sse_to_psnr(samples, 255.0, - cpi->total_sq_error2); - double total_ssim = 100 * pow(cpi->summed_quality / - cpi->summed_weights, 8.0); + fprintf(f, + "%5d\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t" + "%7.3f\t%7.3f\n", + i, dr, cpi->sum_psnr[i] / cpi->frames_in_layer[i], + total_psnr, cpi->sum_psnr_p[i] / cpi->frames_in_layer[i], + total_psnr2, total_ssim); + } + } else { + double samples = + 3.0 / 2 * cpi->count * cpi->common.Width * cpi->common.Height; + double total_psnr = + vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error); + double total_psnr2 = + vpx_sse_to_psnr(samples, 255.0, cpi->total_sq_error2); + double total_ssim = + 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0); - fprintf(f, "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t" - "GLPsnrP\tVPXSSIM\t Time(us)\n"); - fprintf(f, "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t" - "%7.3f\t%8.0f\n", - dr, cpi->total / cpi->count, total_psnr, - cpi->totalp / cpi->count, total_psnr2, - total_ssim, total_encode_time); - } - } - - if (cpi->b_calculate_ssimg) - { - if (cpi->oxcf.number_of_layers > 1) - { - int i; - - fprintf(f, "Layer\tBitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t" - "Time(us)\n"); - for (i=0; i<(int)cpi->oxcf.number_of_layers; i++) - { - double dr = (double)cpi->bytes_in_layer[i] * - 8.0 / 1000.0 / time_encoded; - fprintf(f, "%5d\t%7.3f\t%6.4f\t" - "%6.4f\t%6.4f\t%6.4f\t%8.0f\n", - i, dr, - cpi->total_ssimg_y_in_layer[i] / - cpi->frames_in_layer[i], - cpi->total_ssimg_u_in_layer[i] / - cpi->frames_in_layer[i], - cpi->total_ssimg_v_in_layer[i] / - cpi->frames_in_layer[i], - cpi->total_ssimg_all_in_layer[i] / - cpi->frames_in_layer[i], - total_encode_time); - } - } - else - { - fprintf(f, "BitRate\tSSIM_Y\tSSIM_U\tSSIM_V\tSSIM_A\t" - "Time(us)\n"); - fprintf(f, "%7.3f\t%6.4f\t%6.4f\t%6.4f\t%6.4f\t%8.0f\n", dr, - cpi->total_ssimg_y / cpi->count, - cpi->total_ssimg_u / cpi->count, - cpi->total_ssimg_v / cpi->count, - cpi->total_ssimg_all / cpi->count, total_encode_time); - } - } - - fclose(f); + fprintf(f, + "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\t" + "GLPsnrP\tVPXSSIM\tTime(us)\tRc-Err\t" + "Abs Err\n"); + fprintf(f, + "%7.3f\t%7.3f\t%7.3f\t%7.3f\t%7.3f\t" + "%7.3f\t%8.0f\t%7.2f\t%7.2f\n", + dr, cpi->total / cpi->count, total_psnr, + cpi->totalp / cpi->count, total_psnr2, total_ssim, + total_encode_time, rate_err, fabs(rate_err)); + } + } + fclose(f); #if 0 f = fopen("qskip.stt", "a"); fprintf(f, "minq:%d -maxq:%d skiptrue:skipfalse = %d:%d\n", cpi->oxcf.best_allowed_q, cpi->oxcf.worst_allowed_q, skiptruecount, skipfalsecount); fclose(f); #endif - - } + } #endif - #ifdef SPEEDSTATS - if (cpi->compressor_speed == 2) - { - int i; - FILE *f = fopen("cxspeed.stt", "a"); - cnt_pm /= cpi->common.MBs; + if (cpi->compressor_speed == 2) { + int i; + FILE *f = fopen("cxspeed.stt", "a"); + cnt_pm /= cpi->common.MBs; - for (i = 0; i < 16; i++) - fprintf(f, "%5d", frames_at_speed[i]); + for (i = 0; i < 16; ++i) fprintf(f, "%5d", frames_at_speed[i]); - fprintf(f, "\n"); - fclose(f); - } + fprintf(f, "\n"); + fclose(f); + } #endif - #ifdef MODE_STATS - { - extern int count_mb_seg[4]; - FILE *f = fopen("modes.stt", "a"); - double dr = (double)cpi->framerate * (double)bytes * (double)8 / (double)count / (double)1000 ; - fprintf(f, "intra_mode in Intra Frames:\n"); - fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], y_modes[2], y_modes[3], y_modes[4]); - fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], uv_modes[2], uv_modes[3]); - fprintf(f, "B: "); - { - int i; + { + extern int count_mb_seg[4]; + FILE *f = fopen("modes.stt", "a"); + double dr = (double)cpi->framerate * (double)bytes * (double)8 / + (double)count / (double)1000; + fprintf(f, "intra_mode in Intra Frames:\n"); + fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d\n", y_modes[0], y_modes[1], + y_modes[2], y_modes[3], y_modes[4]); + fprintf(f, "UV:%8d, %8d, %8d, %8d\n", uv_modes[0], uv_modes[1], + uv_modes[2], uv_modes[3]); + fprintf(f, "B: "); + { + int i; - for (i = 0; i < 10; i++) - fprintf(f, "%8d, ", b_modes[i]); + for (i = 0; i < 10; ++i) fprintf(f, "%8d, ", b_modes[i]); - fprintf(f, "\n"); + fprintf(f, "\n"); + } - } + fprintf(f, "Modes in Inter Frames:\n"); + fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n", + inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], + inter_y_modes[3], inter_y_modes[4], inter_y_modes[5], + inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], + inter_y_modes[9]); + fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], + inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]); + fprintf(f, "B: "); + { + int i; - fprintf(f, "Modes in Inter Frames:\n"); - fprintf(f, "Y: %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d, %8d\n", - inter_y_modes[0], inter_y_modes[1], inter_y_modes[2], inter_y_modes[3], inter_y_modes[4], - inter_y_modes[5], inter_y_modes[6], inter_y_modes[7], inter_y_modes[8], inter_y_modes[9]); - fprintf(f, "UV:%8d, %8d, %8d, %8d\n", inter_uv_modes[0], inter_uv_modes[1], inter_uv_modes[2], inter_uv_modes[3]); - fprintf(f, "B: "); - { - int i; + for (i = 0; i < 15; ++i) fprintf(f, "%8d, ", inter_b_modes[i]); - for (i = 0; i < 15; i++) - fprintf(f, "%8d, ", inter_b_modes[i]); + fprintf(f, "\n"); + } + fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], + count_mb_seg[2], count_mb_seg[3]); + fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], + inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], + inter_b_modes[NEW4X4]); - fprintf(f, "\n"); - - } - fprintf(f, "P:%8d, %8d, %8d, %8d\n", count_mb_seg[0], count_mb_seg[1], count_mb_seg[2], count_mb_seg[3]); - fprintf(f, "PB:%8d, %8d, %8d, %8d\n", inter_b_modes[LEFT4X4], inter_b_modes[ABOVE4X4], inter_b_modes[ZERO4X4], inter_b_modes[NEW4X4]); - - - - fclose(f); - } + fclose(f); + } #endif #ifdef VP8_ENTROPY_STATS - { - int i, j, k; - FILE *fmode = fopen("modecontext.c", "w"); + { + int i, j, k; + FILE *fmode = fopen("modecontext.c", "w"); - fprintf(fmode, "\n#include \"entropymode.h\"\n\n"); - fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts "); - fprintf(fmode, "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n"); + fprintf(fmode, "\n#include \"entropymode.h\"\n\n"); + fprintf(fmode, "const unsigned int vp8_kf_default_bmode_counts "); + fprintf(fmode, + "[VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES] =\n{\n"); - for (i = 0; i < 10; i++) - { + for (i = 0; i < 10; ++i) { + fprintf(fmode, " { /* Above Mode : %d */\n", i); - fprintf(fmode, " { /* Above Mode : %d */\n", i); + for (j = 0; j < 10; ++j) { + fprintf(fmode, " {"); - for (j = 0; j < 10; j++) - { + for (k = 0; k < 10; ++k) { + if (!intra_mode_stats[i][j][k]) + fprintf(fmode, " %5d, ", 1); + else + fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]); + } - fprintf(fmode, " {"); - - for (k = 0; k < 10; k++) - { - if (!intra_mode_stats[i][j][k]) - fprintf(fmode, " %5d, ", 1); - else - fprintf(fmode, " %5d, ", intra_mode_stats[i][j][k]); - } - - fprintf(fmode, "}, /* left_mode %d */\n", j); - - } - - fprintf(fmode, " },\n"); - - } - - fprintf(fmode, "};\n"); - fclose(fmode); + fprintf(fmode, "}, /* left_mode %d */\n", j); } -#endif + fprintf(fmode, " },\n"); + } + + fprintf(fmode, "};\n"); + fclose(fmode); + } +#endif #if defined(SECTIONBITS_OUTPUT) - if (0) - { - int i; - FILE *f = fopen("tokenbits.stt", "a"); + if (0) { + int i; + FILE *f = fopen("tokenbits.stt", "a"); - for (i = 0; i < 28; i++) - fprintf(f, "%8d", (int)(Sectionbits[i] / 256)); + for (i = 0; i < 28; ++i) fprintf(f, "%8d", (int)(Sectionbits[i] / 256)); - fprintf(f, "\n"); - fclose(f); - } + fprintf(f, "\n"); + fclose(f); + } #endif @@ -2491,32 +2208,31 @@ void vp8_remove_compressor(VP8_COMP **ptr) printf("%6d %10ld %10ld %10ld %10ld\n", cpi->common.current_video_frame, cpi->time_receive_data / 1000, cpi->time_encode_mb_row / 1000, cpi->time_compress_data / 1000, (cpi->time_receive_data + cpi->time_compress_data) / 1000); } #endif - - } + } #if CONFIG_MULTITHREAD - vp8cx_remove_encoder_threads(cpi); + vp8cx_remove_encoder_threads(cpi); #endif #if CONFIG_TEMPORAL_DENOISING - vp8_denoiser_free(&cpi->denoiser); + vp8_denoiser_free(&cpi->denoiser); #endif - dealloc_compressor_data(cpi); - vpx_free(cpi->mb.ss); - vpx_free(cpi->tok); - vpx_free(cpi->cyclic_refresh_map); - vpx_free(cpi->consec_zero_last); - vpx_free(cpi->consec_zero_last_mvbias); + dealloc_compressor_data(cpi); + vpx_free(cpi->mb.ss); + vpx_free(cpi->tok); + vpx_free(cpi->cyclic_refresh_map); + vpx_free(cpi->consec_zero_last); + vpx_free(cpi->consec_zero_last_mvbias); - vp8_remove_common(&cpi->common); - vpx_free(cpi); - *ptr = 0; + vp8_remove_common(&cpi->common); + vpx_free(cpi); + *ptr = 0; #ifdef OUTPUT_YUV_SRC - fclose(yuv_file); + fclose(yuv_file); #endif #ifdef OUTPUT_YUV_DENOISED - fclose(yuv_denoised_file); + fclose(yuv_denoised_file); #endif #if 0 @@ -2531,579 +2247,513 @@ void vp8_remove_compressor(VP8_COMP **ptr) fclose(kf_list); #endif - } - static uint64_t calc_plane_error(unsigned char *orig, int orig_stride, unsigned char *recon, int recon_stride, - unsigned int cols, unsigned int rows) -{ - unsigned int row, col; - uint64_t total_sse = 0; - int diff; + unsigned int cols, unsigned int rows) { + unsigned int row, col; + uint64_t total_sse = 0; + int diff; - for (row = 0; row + 16 <= rows; row += 16) - { - for (col = 0; col + 16 <= cols; col += 16) - { - unsigned int sse; + for (row = 0; row + 16 <= rows; row += 16) { + for (col = 0; col + 16 <= cols; col += 16) { + unsigned int sse; - vpx_mse16x16(orig + col, orig_stride, - recon + col, recon_stride, - &sse); - total_sse += sse; - } - - /* Handle odd-sized width */ - if (col < cols) - { - unsigned int border_row, border_col; - unsigned char *border_orig = orig; - unsigned char *border_recon = recon; - - for (border_row = 0; border_row < 16; border_row++) - { - for (border_col = col; border_col < cols; border_col++) - { - diff = border_orig[border_col] - border_recon[border_col]; - total_sse += diff * diff; - } - - border_orig += orig_stride; - border_recon += recon_stride; - } - } - - orig += orig_stride * 16; - recon += recon_stride * 16; + vpx_mse16x16(orig + col, orig_stride, recon + col, recon_stride, &sse); + total_sse += sse; } - /* Handle odd-sized height */ - for (; row < rows; row++) - { - for (col = 0; col < cols; col++) - { - diff = orig[col] - recon[col]; - total_sse += diff * diff; + /* Handle odd-sized width */ + if (col < cols) { + unsigned int border_row, border_col; + unsigned char *border_orig = orig; + unsigned char *border_recon = recon; + + for (border_row = 0; border_row < 16; ++border_row) { + for (border_col = col; border_col < cols; ++border_col) { + diff = border_orig[border_col] - border_recon[border_col]; + total_sse += diff * diff; } - orig += orig_stride; - recon += recon_stride; + border_orig += orig_stride; + border_recon += recon_stride; + } } - vp8_clear_system_state(); - return total_sse; + orig += orig_stride * 16; + recon += recon_stride * 16; + } + + /* Handle odd-sized height */ + for (; row < rows; ++row) { + for (col = 0; col < cols; ++col) { + diff = orig[col] - recon[col]; + total_sse += diff * diff; + } + + orig += orig_stride; + recon += recon_stride; + } + + vp8_clear_system_state(); + return total_sse; } +static void generate_psnr_packet(VP8_COMP *cpi) { + YV12_BUFFER_CONFIG *orig = cpi->Source; + YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; + struct vpx_codec_cx_pkt pkt; + uint64_t sse; + int i; + unsigned int width = cpi->common.Width; + unsigned int height = cpi->common.Height; -static void generate_psnr_packet(VP8_COMP *cpi) -{ - YV12_BUFFER_CONFIG *orig = cpi->Source; - YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; - struct vpx_codec_cx_pkt pkt; - uint64_t sse; - int i; - unsigned int width = cpi->common.Width; - unsigned int height = cpi->common.Height; + pkt.kind = VPX_CODEC_PSNR_PKT; + sse = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer, + recon->y_stride, width, height); + pkt.data.psnr.sse[0] = sse; + pkt.data.psnr.sse[1] = sse; + pkt.data.psnr.samples[0] = width * height; + pkt.data.psnr.samples[1] = width * height; - pkt.kind = VPX_CODEC_PSNR_PKT; - sse = calc_plane_error(orig->y_buffer, orig->y_stride, - recon->y_buffer, recon->y_stride, - width, height); - pkt.data.psnr.sse[0] = sse; - pkt.data.psnr.sse[1] = sse; - pkt.data.psnr.samples[0] = width * height; - pkt.data.psnr.samples[1] = width * height; + width = (width + 1) / 2; + height = (height + 1) / 2; - width = (width + 1) / 2; - height = (height + 1) / 2; + sse = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer, + recon->uv_stride, width, height); + pkt.data.psnr.sse[0] += sse; + pkt.data.psnr.sse[2] = sse; + pkt.data.psnr.samples[0] += width * height; + pkt.data.psnr.samples[2] = width * height; - sse = calc_plane_error(orig->u_buffer, orig->uv_stride, - recon->u_buffer, recon->uv_stride, - width, height); - pkt.data.psnr.sse[0] += sse; - pkt.data.psnr.sse[2] = sse; - pkt.data.psnr.samples[0] += width * height; - pkt.data.psnr.samples[2] = width * height; + sse = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer, + recon->uv_stride, width, height); + pkt.data.psnr.sse[0] += sse; + pkt.data.psnr.sse[3] = sse; + pkt.data.psnr.samples[0] += width * height; + pkt.data.psnr.samples[3] = width * height; - sse = calc_plane_error(orig->v_buffer, orig->uv_stride, - recon->v_buffer, recon->uv_stride, - width, height); - pkt.data.psnr.sse[0] += sse; - pkt.data.psnr.sse[3] = sse; - pkt.data.psnr.samples[0] += width * height; - pkt.data.psnr.samples[3] = width * height; + for (i = 0; i < 4; ++i) { + pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0, + (double)(pkt.data.psnr.sse[i])); + } - for (i = 0; i < 4; i++) - pkt.data.psnr.psnr[i] = vpx_sse_to_psnr(pkt.data.psnr.samples[i], 255.0, - (double)(pkt.data.psnr.sse[i])); - - vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); + vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); } +int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) { + if (ref_frame_flags > 7) return -1; -int vp8_use_as_reference(VP8_COMP *cpi, int ref_frame_flags) -{ - if (ref_frame_flags > 7) - return -1 ; - - cpi->ref_frame_flags = ref_frame_flags; - return 0; + cpi->ref_frame_flags = ref_frame_flags; + return 0; } -int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) -{ - if (ref_frame_flags > 7) - return -1 ; +int vp8_update_reference(VP8_COMP *cpi, int ref_frame_flags) { + if (ref_frame_flags > 7) return -1; - cpi->common.refresh_golden_frame = 0; - cpi->common.refresh_alt_ref_frame = 0; - cpi->common.refresh_last_frame = 0; + cpi->common.refresh_golden_frame = 0; + cpi->common.refresh_alt_ref_frame = 0; + cpi->common.refresh_last_frame = 0; - if (ref_frame_flags & VP8_LAST_FRAME) - cpi->common.refresh_last_frame = 1; + if (ref_frame_flags & VP8_LAST_FRAME) cpi->common.refresh_last_frame = 1; - if (ref_frame_flags & VP8_GOLD_FRAME) - cpi->common.refresh_golden_frame = 1; + if (ref_frame_flags & VP8_GOLD_FRAME) cpi->common.refresh_golden_frame = 1; - if (ref_frame_flags & VP8_ALTR_FRAME) - cpi->common.refresh_alt_ref_frame = 1; + if (ref_frame_flags & VP8_ALTR_FRAME) cpi->common.refresh_alt_ref_frame = 1; - return 0; + return 0; } -int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) -{ - VP8_COMMON *cm = &cpi->common; - int ref_fb_idx; +int vp8_get_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd) { + VP8_COMMON *cm = &cpi->common; + int ref_fb_idx; - if (ref_frame_flag == VP8_LAST_FRAME) - ref_fb_idx = cm->lst_fb_idx; - else if (ref_frame_flag == VP8_GOLD_FRAME) - ref_fb_idx = cm->gld_fb_idx; - else if (ref_frame_flag == VP8_ALTR_FRAME) - ref_fb_idx = cm->alt_fb_idx; - else - return -1; + if (ref_frame_flag == VP8_LAST_FRAME) { + ref_fb_idx = cm->lst_fb_idx; + } else if (ref_frame_flag == VP8_GOLD_FRAME) { + ref_fb_idx = cm->gld_fb_idx; + } else if (ref_frame_flag == VP8_ALTR_FRAME) { + ref_fb_idx = cm->alt_fb_idx; + } else { + return -1; + } - vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); + vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); - return 0; + return 0; } -int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) -{ - VP8_COMMON *cm = &cpi->common; +int vp8_set_reference(VP8_COMP *cpi, enum vpx_ref_frame_type ref_frame_flag, + YV12_BUFFER_CONFIG *sd) { + VP8_COMMON *cm = &cpi->common; - int ref_fb_idx; + int ref_fb_idx; - if (ref_frame_flag == VP8_LAST_FRAME) - ref_fb_idx = cm->lst_fb_idx; - else if (ref_frame_flag == VP8_GOLD_FRAME) - ref_fb_idx = cm->gld_fb_idx; - else if (ref_frame_flag == VP8_ALTR_FRAME) - ref_fb_idx = cm->alt_fb_idx; - else - return -1; + if (ref_frame_flag == VP8_LAST_FRAME) { + ref_fb_idx = cm->lst_fb_idx; + } else if (ref_frame_flag == VP8_GOLD_FRAME) { + ref_fb_idx = cm->gld_fb_idx; + } else if (ref_frame_flag == VP8_ALTR_FRAME) { + ref_fb_idx = cm->alt_fb_idx; + } else { + return -1; + } - vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]); + vp8_yv12_copy_frame(sd, &cm->yv12_fb[ref_fb_idx]); - return 0; + return 0; } -int vp8_update_entropy(VP8_COMP *cpi, int update) -{ - VP8_COMMON *cm = &cpi->common; - cm->refresh_entropy_probs = update; +int vp8_update_entropy(VP8_COMP *cpi, int update) { + VP8_COMMON *cm = &cpi->common; + cm->refresh_entropy_probs = update; - return 0; + return 0; } - #if defined(OUTPUT_YUV_SRC) || defined(OUTPUT_YUV_DENOISED) -void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) -{ - unsigned char *src = s->y_buffer; - int h = s->y_height; +void vp8_write_yuv_frame(FILE *yuv_file, YV12_BUFFER_CONFIG *s) { + unsigned char *src = s->y_buffer; + int h = s->y_height; - do - { - fwrite(src, s->y_width, 1, yuv_file); - src += s->y_stride; - } - while (--h); + do { + fwrite(src, s->y_width, 1, yuv_file); + src += s->y_stride; + } while (--h); - src = s->u_buffer; - h = s->uv_height; + src = s->u_buffer; + h = s->uv_height; - do - { - fwrite(src, s->uv_width, 1, yuv_file); - src += s->uv_stride; - } - while (--h); + do { + fwrite(src, s->uv_width, 1, yuv_file); + src += s->uv_stride; + } while (--h); - src = s->v_buffer; - h = s->uv_height; + src = s->v_buffer; + h = s->uv_height; - do - { - fwrite(src, s->uv_width, 1, yuv_file); - src += s->uv_stride; - } - while (--h); + do { + fwrite(src, s->uv_width, 1, yuv_file); + src += s->uv_stride; + } while (--h); } #endif -static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +static void scale_and_extend_source(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - /* are we resizing the image */ - if (cm->horiz_scale != 0 || cm->vert_scale != 0) - { + /* are we resizing the image */ + if (cm->horiz_scale != 0 || cm->vert_scale != 0) { #if CONFIG_SPATIAL_RESAMPLING - int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); - int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); - int tmp_height; + int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); + int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); + int tmp_height; - if (cm->vert_scale == 3) - tmp_height = 9; - else - tmp_height = 11; - - Scale2Ratio(cm->horiz_scale, &hr, &hs); - Scale2Ratio(cm->vert_scale, &vr, &vs); - - vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer, - tmp_height, hs, hr, vs, vr, 0); - - vp8_yv12_extend_frame_borders(&cpi->scaled_source); - cpi->Source = &cpi->scaled_source; -#endif + if (cm->vert_scale == 3) { + tmp_height = 9; + } else { + tmp_height = 11; } - else - cpi->Source = sd; + + Scale2Ratio(cm->horiz_scale, &hr, &hs); + Scale2Ratio(cm->vert_scale, &vr, &vs); + + vpx_scale_frame(sd, &cpi->scaled_source, cm->temp_scale_frame.y_buffer, + tmp_height, hs, hr, vs, vr, 0); + + vp8_yv12_extend_frame_borders(&cpi->scaled_source); + cpi->Source = &cpi->scaled_source; +#endif + } else { + cpi->Source = sd; + } } - -static int resize_key_frame(VP8_COMP *cpi) -{ +static int resize_key_frame(VP8_COMP *cpi) { #if CONFIG_SPATIAL_RESAMPLING - VP8_COMMON *cm = &cpi->common; + VP8_COMMON *cm = &cpi->common; - /* Do we need to apply resampling for one pass cbr. - * In one pass this is more limited than in two pass cbr. - * The test and any change is only made once per key frame sequence. + /* Do we need to apply resampling for one pass cbr. + * In one pass this is more limited than in two pass cbr. + * The test and any change is only made once per key frame sequence. + */ + if (cpi->oxcf.allow_spatial_resampling && + (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) { + int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); + int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); + int new_width, new_height; + + /* If we are below the resample DOWN watermark then scale down a + * notch. */ - if (cpi->oxcf.allow_spatial_resampling && (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) - { - int UNINITIALIZED_IS_SAFE(hr), UNINITIALIZED_IS_SAFE(hs); - int UNINITIALIZED_IS_SAFE(vr), UNINITIALIZED_IS_SAFE(vs); - int new_width, new_height; - - /* If we are below the resample DOWN watermark then scale down a - * notch. - */ - if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * cpi->oxcf.optimal_buffer_level / 100)) - { - cm->horiz_scale = (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO; - cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO; - } - /* Should we now start scaling back up */ - else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * cpi->oxcf.optimal_buffer_level / 100)) - { - cm->horiz_scale = (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL; - cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL; - } - - /* Get the new height and width */ - Scale2Ratio(cm->horiz_scale, &hr, &hs); - Scale2Ratio(cm->vert_scale, &vr, &vs); - new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; - new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; - - /* If the image size has changed we need to reallocate the buffers - * and resample the source image - */ - if ((cm->Width != new_width) || (cm->Height != new_height)) - { - cm->Width = new_width; - cm->Height = new_height; - vp8_alloc_compressor_data(cpi); - scale_and_extend_source(cpi->un_scaled_source, cpi); - return 1; - } + if (cpi->buffer_level < (cpi->oxcf.resample_down_water_mark * + cpi->oxcf.optimal_buffer_level / 100)) { + cm->horiz_scale = + (cm->horiz_scale < ONETWO) ? cm->horiz_scale + 1 : ONETWO; + cm->vert_scale = (cm->vert_scale < ONETWO) ? cm->vert_scale + 1 : ONETWO; + } + /* Should we now start scaling back up */ + else if (cpi->buffer_level > (cpi->oxcf.resample_up_water_mark * + cpi->oxcf.optimal_buffer_level / 100)) { + cm->horiz_scale = + (cm->horiz_scale > NORMAL) ? cm->horiz_scale - 1 : NORMAL; + cm->vert_scale = (cm->vert_scale > NORMAL) ? cm->vert_scale - 1 : NORMAL; } + /* Get the new height and width */ + Scale2Ratio(cm->horiz_scale, &hr, &hs); + Scale2Ratio(cm->vert_scale, &vr, &vs); + new_width = ((hs - 1) + (cpi->oxcf.Width * hr)) / hs; + new_height = ((vs - 1) + (cpi->oxcf.Height * vr)) / vs; + + /* If the image size has changed we need to reallocate the buffers + * and resample the source image + */ + if ((cm->Width != new_width) || (cm->Height != new_height)) { + cm->Width = new_width; + cm->Height = new_height; + vp8_alloc_compressor_data(cpi); + scale_and_extend_source(cpi->un_scaled_source, cpi); + return 1; + } + } + #endif - return 0; + return 0; } +static void update_alt_ref_frame_stats(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; -static void update_alt_ref_frame_stats(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; + /* Select an interval before next GF or altref */ + if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; - /* Select an interval before next GF or altref */ - if (!cpi->auto_gold) - cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; + if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) { + cpi->current_gf_interval = cpi->frames_till_gf_update_due; - if ((cpi->pass != 2) && cpi->frames_till_gf_update_due) - { - cpi->current_gf_interval = cpi->frames_till_gf_update_due; + /* Set the bits per frame that we should try and recover in + * subsequent inter frames to account for the extra GF spend... + * note that his does not apply for GF updates that occur + * coincident with a key frame as the extra cost of key frames is + * dealt with elsewhere. + */ + cpi->gf_overspend_bits += cpi->projected_frame_size; + cpi->non_gf_bitrate_adjustment = + cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; + } - /* Set the bits per frame that we should try and recover in - * subsequent inter frames to account for the extra GF spend... - * note that his does not apply for GF updates that occur - * coincident with a key frame as the extra cost of key frames is - * dealt with elsewhere. + /* Update data structure that monitors level of reference to last GF */ + memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); + cpi->gf_active_count = cm->mb_rows * cm->mb_cols; + + /* this frame refreshes means next frames don't unless specified by user */ + cpi->frames_since_golden = 0; + + /* Clear the alternate reference update pending flag. */ + cpi->source_alt_ref_pending = 0; + + /* Set the alternate reference frame active flag */ + cpi->source_alt_ref_active = 1; +} +static void update_golden_frame_stats(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; + + /* Update the Golden frame usage counts. */ + if (cm->refresh_golden_frame) { + /* Select an interval before next GF */ + if (!cpi->auto_gold) cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; + + if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) { + cpi->current_gf_interval = cpi->frames_till_gf_update_due; + + /* Set the bits per frame that we should try and recover in + * subsequent inter frames to account for the extra GF spend... + * note that his does not apply for GF updates that occur + * coincident with a key frame as the extra cost of key frames + * is dealt with elsewhere. + */ + if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) { + /* Calcluate GF bits to be recovered + * Projected size - av frame bits available for inter + * frames for clip as a whole */ - cpi->gf_overspend_bits += cpi->projected_frame_size; - cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; + cpi->gf_overspend_bits += + (cpi->projected_frame_size - cpi->inter_frame_target); + } + + cpi->non_gf_bitrate_adjustment = + cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; } /* Update data structure that monitors level of reference to last GF */ memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); cpi->gf_active_count = cm->mb_rows * cm->mb_cols; - /* this frame refreshes means next frames don't unless specified by user */ + /* this frame refreshes means next frames don't unless specified by + * user + */ + cm->refresh_golden_frame = 0; cpi->frames_since_golden = 0; - /* Clear the alternate reference update pending flag. */ - cpi->source_alt_ref_pending = 0; - - /* Set the alternate reference frame active flag */ - cpi->source_alt_ref_active = 1; - - -} -static void update_golden_frame_stats(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; - - /* Update the Golden frame usage counts. */ - if (cm->refresh_golden_frame) - { - /* Select an interval before next GF */ - if (!cpi->auto_gold) - cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; - - if ((cpi->pass != 2) && (cpi->frames_till_gf_update_due > 0)) - { - cpi->current_gf_interval = cpi->frames_till_gf_update_due; - - /* Set the bits per frame that we should try and recover in - * subsequent inter frames to account for the extra GF spend... - * note that his does not apply for GF updates that occur - * coincident with a key frame as the extra cost of key frames - * is dealt with elsewhere. - */ - if ((cm->frame_type != KEY_FRAME) && !cpi->source_alt_ref_active) - { - /* Calcluate GF bits to be recovered - * Projected size - av frame bits available for inter - * frames for clip as a whole - */ - cpi->gf_overspend_bits += (cpi->projected_frame_size - cpi->inter_frame_target); - } - - cpi->non_gf_bitrate_adjustment = cpi->gf_overspend_bits / cpi->frames_till_gf_update_due; - - } - - /* Update data structure that monitors level of reference to last GF */ - memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); - cpi->gf_active_count = cm->mb_rows * cm->mb_cols; - - /* this frame refreshes means next frames don't unless specified by - * user - */ - cm->refresh_golden_frame = 0; - cpi->frames_since_golden = 0; - - cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; - cpi->recent_ref_frame_usage[LAST_FRAME] = 1; - cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; - cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; - - /* ******** Fixed Q test code only ************ */ - /* If we are going to use the ALT reference for the next group of - * frames set a flag to say so. - */ - if (cpi->oxcf.fixed_q >= 0 && - cpi->oxcf.play_alternate && !cpi->common.refresh_alt_ref_frame) - { - cpi->source_alt_ref_pending = 1; - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - } - - if (!cpi->source_alt_ref_pending) - cpi->source_alt_ref_active = 0; - - /* Decrement count down till next gf */ - if (cpi->frames_till_gf_update_due > 0) - cpi->frames_till_gf_update_due--; + cpi->recent_ref_frame_usage[INTRA_FRAME] = 1; + cpi->recent_ref_frame_usage[LAST_FRAME] = 1; + cpi->recent_ref_frame_usage[GOLDEN_FRAME] = 1; + cpi->recent_ref_frame_usage[ALTREF_FRAME] = 1; + /* ******** Fixed Q test code only ************ */ + /* If we are going to use the ALT reference for the next group of + * frames set a flag to say so. + */ + if (cpi->oxcf.fixed_q >= 0 && cpi->oxcf.play_alternate && + !cpi->common.refresh_alt_ref_frame) { + cpi->source_alt_ref_pending = 1; + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; } - else if (!cpi->common.refresh_alt_ref_frame) - { - /* Decrement count down till next gf */ - if (cpi->frames_till_gf_update_due > 0) - cpi->frames_till_gf_update_due--; - if (cpi->frames_till_alt_ref_frame) - cpi->frames_till_alt_ref_frame --; + if (!cpi->source_alt_ref_pending) cpi->source_alt_ref_active = 0; - cpi->frames_since_golden ++; + /* Decrement count down till next gf */ + if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; - if (cpi->frames_since_golden > 1) - { - cpi->recent_ref_frame_usage[INTRA_FRAME] += - cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]; - cpi->recent_ref_frame_usage[LAST_FRAME] += - cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]; - cpi->recent_ref_frame_usage[GOLDEN_FRAME] += - cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]; - cpi->recent_ref_frame_usage[ALTREF_FRAME] += - cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; - } + } else if (!cpi->common.refresh_alt_ref_frame) { + /* Decrement count down till next gf */ + if (cpi->frames_till_gf_update_due > 0) cpi->frames_till_gf_update_due--; + + if (cpi->frames_till_alt_ref_frame) cpi->frames_till_alt_ref_frame--; + + cpi->frames_since_golden++; + + if (cpi->frames_since_golden > 1) { + cpi->recent_ref_frame_usage[INTRA_FRAME] += + cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME]; + cpi->recent_ref_frame_usage[LAST_FRAME] += + cpi->mb.count_mb_ref_frame_usage[LAST_FRAME]; + cpi->recent_ref_frame_usage[GOLDEN_FRAME] += + cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME]; + cpi->recent_ref_frame_usage[ALTREF_FRAME] += + cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; } + } } /* This function updates the reference frame probability estimates that * will be used during mode selection */ -static void update_rd_ref_frame_probs(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +static void update_rd_ref_frame_probs(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - const int *const rfct = cpi->mb.count_mb_ref_frame_usage; - const int rf_intra = rfct[INTRA_FRAME]; - const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; + const int *const rfct = cpi->mb.count_mb_ref_frame_usage; + const int rf_intra = rfct[INTRA_FRAME]; + const int rf_inter = + rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]; - if (cm->frame_type == KEY_FRAME) - { - cpi->prob_intra_coded = 255; - cpi->prob_last_coded = 128; - cpi->prob_gf_coded = 128; - } - else if (!(rf_intra + rf_inter)) - { - cpi->prob_intra_coded = 63; - cpi->prob_last_coded = 128; - cpi->prob_gf_coded = 128; - } - - /* update reference frame costs since we can do better than what we got - * last frame. - */ - if (cpi->oxcf.number_of_layers == 1) - { - if (cpi->common.refresh_alt_ref_frame) - { - cpi->prob_intra_coded += 40; - if (cpi->prob_intra_coded > 255) - cpi->prob_intra_coded = 255; - cpi->prob_last_coded = 200; - cpi->prob_gf_coded = 1; - } - else if (cpi->frames_since_golden == 0) - { - cpi->prob_last_coded = 214; - } - else if (cpi->frames_since_golden == 1) - { - cpi->prob_last_coded = 192; - cpi->prob_gf_coded = 220; - } - else if (cpi->source_alt_ref_active) - { - cpi->prob_gf_coded -= 20; - - if (cpi->prob_gf_coded < 10) - cpi->prob_gf_coded = 10; - } - if (!cpi->source_alt_ref_active) - cpi->prob_gf_coded = 255; + if (cm->frame_type == KEY_FRAME) { + cpi->prob_intra_coded = 255; + cpi->prob_last_coded = 128; + cpi->prob_gf_coded = 128; + } else if (!(rf_intra + rf_inter)) { + cpi->prob_intra_coded = 63; + cpi->prob_last_coded = 128; + cpi->prob_gf_coded = 128; + } + + /* update reference frame costs since we can do better than what we got + * last frame. + */ + if (cpi->oxcf.number_of_layers == 1) { + if (cpi->common.refresh_alt_ref_frame) { + cpi->prob_intra_coded += 40; + if (cpi->prob_intra_coded > 255) cpi->prob_intra_coded = 255; + cpi->prob_last_coded = 200; + cpi->prob_gf_coded = 1; + } else if (cpi->frames_since_golden == 0) { + cpi->prob_last_coded = 214; + } else if (cpi->frames_since_golden == 1) { + cpi->prob_last_coded = 192; + cpi->prob_gf_coded = 220; + } else if (cpi->source_alt_ref_active) { + cpi->prob_gf_coded -= 20; + + if (cpi->prob_gf_coded < 10) cpi->prob_gf_coded = 10; } + if (!cpi->source_alt_ref_active) cpi->prob_gf_coded = 255; + } } - #if !CONFIG_REALTIME_ONLY /* 1 = key, 0 = inter */ -static int decide_key_frame(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +static int decide_key_frame(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - int code_key_frame = 0; + int code_key_frame = 0; - cpi->kf_boost = 0; + cpi->kf_boost = 0; - if (cpi->Speed > 11) - return 0; + if (cpi->Speed > 11) return 0; - /* Clear down mmx registers */ - vp8_clear_system_state(); + /* Clear down mmx registers */ + vp8_clear_system_state(); - if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) - { - double change = 1.0 * abs((int)(cpi->mb.intra_error - - cpi->last_intra_error)) / (1 + cpi->last_intra_error); - double change2 = 1.0 * abs((int)(cpi->mb.prediction_error - - cpi->last_prediction_error)) / (1 + cpi->last_prediction_error); - double minerror = cm->MBs * 256; + if ((cpi->compressor_speed == 2) && (cpi->Speed >= 5) && (cpi->sf.RD == 0)) { + double change = 1.0 * + abs((int)(cpi->mb.intra_error - cpi->last_intra_error)) / + (1 + cpi->last_intra_error); + double change2 = + 1.0 * + abs((int)(cpi->mb.prediction_error - cpi->last_prediction_error)) / + (1 + cpi->last_prediction_error); + double minerror = cm->MBs * 256; - cpi->last_intra_error = cpi->mb.intra_error; - cpi->last_prediction_error = cpi->mb.prediction_error; - - if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 - && cpi->mb.prediction_error > minerror - && (change > .25 || change2 > .25)) - { - /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > cpi->last_frame_percent_intra + 3*/ - return 1; - } - - return 0; + cpi->last_intra_error = cpi->mb.intra_error; + cpi->last_prediction_error = cpi->mb.prediction_error; + if (10 * cpi->mb.intra_error / (1 + cpi->mb.prediction_error) < 15 && + cpi->mb.prediction_error > minerror && + (change > .25 || change2 > .25)) { + /*(change > 1.4 || change < .75)&& cpi->this_frame_percent_intra > + * cpi->last_frame_percent_intra + 3*/ + return 1; } - /* If the following are true we might as well code a key frame */ - if (((cpi->this_frame_percent_intra == 100) && - (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) || - ((cpi->this_frame_percent_intra > 95) && - (cpi->this_frame_percent_intra >= (cpi->last_frame_percent_intra + 5)))) - { - code_key_frame = 1; - } - /* in addition if the following are true and this is not a golden frame - * then code a key frame Note that on golden frames there often seems - * to be a pop in intra useage anyway hence this restriction is - * designed to prevent spurious key frames. The Intra pop needs to be - * investigated. - */ - else if (((cpi->this_frame_percent_intra > 60) && - (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 2))) || - ((cpi->this_frame_percent_intra > 75) && - (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra * 3 / 2))) || - ((cpi->this_frame_percent_intra > 90) && - (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 10)))) - { - if (!cm->refresh_golden_frame) - code_key_frame = 1; - } + return 0; + } - return code_key_frame; + /* If the following are true we might as well code a key frame */ + if (((cpi->this_frame_percent_intra == 100) && + (cpi->this_frame_percent_intra > (cpi->last_frame_percent_intra + 2))) || + ((cpi->this_frame_percent_intra > 95) && + (cpi->this_frame_percent_intra >= + (cpi->last_frame_percent_intra + 5)))) { + code_key_frame = 1; + } + /* in addition if the following are true and this is not a golden frame + * then code a key frame Note that on golden frames there often seems + * to be a pop in intra useage anyway hence this restriction is + * designed to prevent spurious key frames. The Intra pop needs to be + * investigated. + */ + else if (((cpi->this_frame_percent_intra > 60) && + (cpi->this_frame_percent_intra > + (cpi->last_frame_percent_intra * 2))) || + ((cpi->this_frame_percent_intra > 75) && + (cpi->this_frame_percent_intra > + (cpi->last_frame_percent_intra * 3 / 2))) || + ((cpi->this_frame_percent_intra > 90) && + (cpi->this_frame_percent_intra > + (cpi->last_frame_percent_intra + 10)))) { + if (!cm->refresh_golden_frame) code_key_frame = 1; + } + return code_key_frame; } -static void Pass1Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned int *frame_flags) -{ - (void) size; - (void) dest; - (void) frame_flags; - vp8_set_quantizer(cpi, 26); +static void Pass1Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest, + unsigned int *frame_flags) { + (void)size; + (void)dest; + (void)frame_flags; + vp8_set_quantizer(cpi, 26); - vp8_first_pass(cpi); + vp8_first_pass(cpi); } #endif @@ -3119,21 +2769,21 @@ void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) sprintf(filename, "cx\\y%04d.raw", this_frame); yframe = fopen(filename, "wb"); - for (i = 0; i < frame->y_height; i++) + for (i = 0; i < frame->y_height; ++i) fwrite(frame->y_buffer + i * frame->y_stride, frame->y_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\u%04d.raw", this_frame); yframe = fopen(filename, "wb"); - for (i = 0; i < frame->uv_height; i++) + for (i = 0; i < frame->uv_height; ++i) fwrite(frame->u_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); sprintf(filename, "cx\\v%04d.raw", this_frame); yframe = fopen(filename, "wb"); - for (i = 0; i < frame->uv_height; i++) + for (i = 0; i < frame->uv_height; ++i) fwrite(frame->v_buffer + i * frame->uv_stride, frame->uv_width, 1, yframe); fclose(yframe); @@ -3145,255 +2795,211 @@ void write_cx_frame_to_file(YV12_BUFFER_CONFIG *frame, int this_frame) /* Function to test for conditions that indeicate we should loop * back and recode a frame. */ -static int recode_loop_test( VP8_COMP *cpi, - int high_limit, int low_limit, - int q, int maxq, int minq ) -{ - int force_recode = 0; - VP8_COMMON *cm = &cpi->common; +static int recode_loop_test(VP8_COMP *cpi, int high_limit, int low_limit, int q, + int maxq, int minq) { + int force_recode = 0; + VP8_COMMON *cm = &cpi->common; - /* Is frame recode allowed at all - * Yes if either recode mode 1 is selected or mode two is selcted - * and the frame is a key frame. golden frame or alt_ref_frame - */ - if ( (cpi->sf.recode_loop == 1) || - ( (cpi->sf.recode_loop == 2) && - ( (cm->frame_type == KEY_FRAME) || - cm->refresh_golden_frame || - cm->refresh_alt_ref_frame ) ) ) - { - /* General over and under shoot tests */ - if ( ((cpi->projected_frame_size > high_limit) && (q < maxq)) || - ((cpi->projected_frame_size < low_limit) && (q > minq)) ) - { - force_recode = 1; - } - /* Special Constrained quality tests */ - else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) - { - /* Undershoot and below auto cq level */ - if ( (q > cpi->cq_target_quality) && - (cpi->projected_frame_size < - ((cpi->this_frame_target * 7) >> 3))) - { - force_recode = 1; - } - /* Severe undershoot and between auto and user cq level */ - else if ( (q > cpi->oxcf.cq_level) && - (cpi->projected_frame_size < cpi->min_frame_bandwidth) && - (cpi->active_best_quality > cpi->oxcf.cq_level)) - { - force_recode = 1; - cpi->active_best_quality = cpi->oxcf.cq_level; - } - } + /* Is frame recode allowed at all + * Yes if either recode mode 1 is selected or mode two is selcted + * and the frame is a key frame. golden frame or alt_ref_frame + */ + if ((cpi->sf.recode_loop == 1) || + ((cpi->sf.recode_loop == 2) && + ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || + cm->refresh_alt_ref_frame))) { + /* General over and under shoot tests */ + if (((cpi->projected_frame_size > high_limit) && (q < maxq)) || + ((cpi->projected_frame_size < low_limit) && (q > minq))) { + force_recode = 1; } + /* Special Constrained quality tests */ + else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { + /* Undershoot and below auto cq level */ + if ((q > cpi->cq_target_quality) && + (cpi->projected_frame_size < ((cpi->this_frame_target * 7) >> 3))) { + force_recode = 1; + } + /* Severe undershoot and between auto and user cq level */ + else if ((q > cpi->oxcf.cq_level) && + (cpi->projected_frame_size < cpi->min_frame_bandwidth) && + (cpi->active_best_quality > cpi->oxcf.cq_level)) { + force_recode = 1; + cpi->active_best_quality = cpi->oxcf.cq_level; + } + } + } - return force_recode; + return force_recode; } #endif // !CONFIG_REALTIME_ONLY -static void update_reference_frames(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; - YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb; +static void update_reference_frames(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; + YV12_BUFFER_CONFIG *yv12_fb = cm->yv12_fb; - /* At this point the new frame has been encoded. - * If any buffer copy / swapping is signaled it should be done here. - */ + /* At this point the new frame has been encoded. + * If any buffer copy / swapping is signaled it should be done here. + */ - if (cm->frame_type == KEY_FRAME) - { - yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME ; + if (cm->frame_type == KEY_FRAME) { + yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME | VP8_ALTR_FRAME; - yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; - yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; + yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; + yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; - cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx; + cm->alt_fb_idx = cm->gld_fb_idx = cm->new_fb_idx; - cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame; - cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame; - } - else /* For non key frames */ - { - if (cm->refresh_alt_ref_frame) - { - assert(!cm->copy_buffer_to_arf); + cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame; + cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame; + } else /* For non key frames */ + { + if (cm->refresh_alt_ref_frame) { + assert(!cm->copy_buffer_to_arf); - cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME; - cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; - cm->alt_fb_idx = cm->new_fb_idx; + cm->yv12_fb[cm->new_fb_idx].flags |= VP8_ALTR_FRAME; + cm->yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; + cm->alt_fb_idx = cm->new_fb_idx; - cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame; + cpi->current_ref_frames[ALTREF_FRAME] = cm->current_video_frame; + } else if (cm->copy_buffer_to_arf) { + assert(!(cm->copy_buffer_to_arf & ~0x3)); + + if (cm->copy_buffer_to_arf == 1) { + if (cm->alt_fb_idx != cm->lst_fb_idx) { + yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME; + yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; + cm->alt_fb_idx = cm->lst_fb_idx; + + cpi->current_ref_frames[ALTREF_FRAME] = + cpi->current_ref_frames[LAST_FRAME]; } - else if (cm->copy_buffer_to_arf) - { - assert(!(cm->copy_buffer_to_arf & ~0x3)); + } else /* if (cm->copy_buffer_to_arf == 2) */ + { + if (cm->alt_fb_idx != cm->gld_fb_idx) { + yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME; + yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; + cm->alt_fb_idx = cm->gld_fb_idx; - if (cm->copy_buffer_to_arf == 1) - { - if(cm->alt_fb_idx != cm->lst_fb_idx) - { - yv12_fb[cm->lst_fb_idx].flags |= VP8_ALTR_FRAME; - yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; - cm->alt_fb_idx = cm->lst_fb_idx; - - cpi->current_ref_frames[ALTREF_FRAME] = - cpi->current_ref_frames[LAST_FRAME]; - } - } - else /* if (cm->copy_buffer_to_arf == 2) */ - { - if(cm->alt_fb_idx != cm->gld_fb_idx) - { - yv12_fb[cm->gld_fb_idx].flags |= VP8_ALTR_FRAME; - yv12_fb[cm->alt_fb_idx].flags &= ~VP8_ALTR_FRAME; - cm->alt_fb_idx = cm->gld_fb_idx; - - cpi->current_ref_frames[ALTREF_FRAME] = - cpi->current_ref_frames[GOLDEN_FRAME]; - } - } - } - - if (cm->refresh_golden_frame) - { - assert(!cm->copy_buffer_to_gf); - - cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME; - cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; - cm->gld_fb_idx = cm->new_fb_idx; - - cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame; - } - else if (cm->copy_buffer_to_gf) - { - assert(!(cm->copy_buffer_to_arf & ~0x3)); - - if (cm->copy_buffer_to_gf == 1) - { - if(cm->gld_fb_idx != cm->lst_fb_idx) - { - yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME; - yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; - cm->gld_fb_idx = cm->lst_fb_idx; - - cpi->current_ref_frames[GOLDEN_FRAME] = - cpi->current_ref_frames[LAST_FRAME]; - } - } - else /* if (cm->copy_buffer_to_gf == 2) */ - { - if(cm->alt_fb_idx != cm->gld_fb_idx) - { - yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME; - yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; - cm->gld_fb_idx = cm->alt_fb_idx; - - cpi->current_ref_frames[GOLDEN_FRAME] = - cpi->current_ref_frames[ALTREF_FRAME]; - } - } + cpi->current_ref_frames[ALTREF_FRAME] = + cpi->current_ref_frames[GOLDEN_FRAME]; } + } } - if (cm->refresh_last_frame) - { - cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME; - cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME; - cm->lst_fb_idx = cm->new_fb_idx; + if (cm->refresh_golden_frame) { + assert(!cm->copy_buffer_to_gf); - cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame; + cm->yv12_fb[cm->new_fb_idx].flags |= VP8_GOLD_FRAME; + cm->yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; + cm->gld_fb_idx = cm->new_fb_idx; + + cpi->current_ref_frames[GOLDEN_FRAME] = cm->current_video_frame; + } else if (cm->copy_buffer_to_gf) { + assert(!(cm->copy_buffer_to_arf & ~0x3)); + + if (cm->copy_buffer_to_gf == 1) { + if (cm->gld_fb_idx != cm->lst_fb_idx) { + yv12_fb[cm->lst_fb_idx].flags |= VP8_GOLD_FRAME; + yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; + cm->gld_fb_idx = cm->lst_fb_idx; + + cpi->current_ref_frames[GOLDEN_FRAME] = + cpi->current_ref_frames[LAST_FRAME]; + } + } else /* if (cm->copy_buffer_to_gf == 2) */ + { + if (cm->alt_fb_idx != cm->gld_fb_idx) { + yv12_fb[cm->alt_fb_idx].flags |= VP8_GOLD_FRAME; + yv12_fb[cm->gld_fb_idx].flags &= ~VP8_GOLD_FRAME; + cm->gld_fb_idx = cm->alt_fb_idx; + + cpi->current_ref_frames[GOLDEN_FRAME] = + cpi->current_ref_frames[ALTREF_FRAME]; + } + } } + } + + if (cm->refresh_last_frame) { + cm->yv12_fb[cm->new_fb_idx].flags |= VP8_LAST_FRAME; + cm->yv12_fb[cm->lst_fb_idx].flags &= ~VP8_LAST_FRAME; + cm->lst_fb_idx = cm->new_fb_idx; + + cpi->current_ref_frames[LAST_FRAME] = cm->current_video_frame; + } #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) + if (cpi->oxcf.noise_sensitivity) { + /* we shouldn't have to keep multiple copies as we know in advance which + * buffer we should start - for now to get something up and running + * I've chosen to copy the buffers + */ + if (cm->frame_type == KEY_FRAME) { + int i; + for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) + vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_running_avg[i]); + } else /* For non key frames */ { - /* we shouldn't have to keep multiple copies as we know in advance which - * buffer we should start - for now to get something up and running - * I've chosen to copy the buffers - */ - if (cm->frame_type == KEY_FRAME) - { - int i; - for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) - vp8_yv12_copy_frame(cpi->Source, - &cpi->denoiser.yv12_running_avg[i]); - } - else /* For non key frames */ - { - vp8_yv12_extend_frame_borders( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME]); - - if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) - { - vp8_yv12_copy_frame( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], - &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]); - } - if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) - { - vp8_yv12_copy_frame( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], - &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]); - } - if(cm->refresh_last_frame) - { - vp8_yv12_copy_frame( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], - &cpi->denoiser.yv12_running_avg[LAST_FRAME]); - } - } - if (cpi->oxcf.noise_sensitivity == 4) - vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source); + vp8_yv12_extend_frame_borders( + &cpi->denoiser.yv12_running_avg[INTRA_FRAME]); + if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf) { + vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME], + &cpi->denoiser.yv12_running_avg[ALTREF_FRAME]); + } + if (cm->refresh_golden_frame || cm->copy_buffer_to_gf) { + vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME], + &cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]); + } + if (cm->refresh_last_frame) { + vp8_yv12_copy_frame(&cpi->denoiser.yv12_running_avg[INTRA_FRAME], + &cpi->denoiser.yv12_running_avg[LAST_FRAME]); + } } + if (cpi->oxcf.noise_sensitivity == 4) + vp8_yv12_copy_frame(cpi->Source, &cpi->denoiser.yv12_last_source); + } #endif - } static int measure_square_diff_partial(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest, - VP8_COMP *cpi) - { - int i, j; - int Total = 0; - int num_blocks = 0; - int skip = 2; - int min_consec_zero_last = 10; - int tot_num_blocks = (source->y_height * source->y_width) >> 8; - unsigned char *src = source->y_buffer; - unsigned char *dst = dest->y_buffer; + VP8_COMP *cpi) { + int i, j; + int Total = 0; + int num_blocks = 0; + int skip = 2; + int min_consec_zero_last = 10; + int tot_num_blocks = (source->y_height * source->y_width) >> 8; + unsigned char *src = source->y_buffer; + unsigned char *dst = dest->y_buffer; - /* Loop through the Y plane, every |skip| blocks along rows and colmumns, - * summing the square differences, and only for blocks that have been - * zero_last mode at least |x| frames in a row. - */ - for (i = 0; i < source->y_height; i += 16 * skip) - { - int block_index_row = (i >> 4) * cpi->common.mb_cols; - for (j = 0; j < source->y_width; j += 16 * skip) - { - int index = block_index_row + (j >> 4); - if (cpi->consec_zero_last[index] >= min_consec_zero_last) { - unsigned int sse; - Total += vpx_mse16x16(src + j, - source->y_stride, - dst + j, dest->y_stride, - &sse); - num_blocks++; - } - } - src += 16 * skip * source->y_stride; - dst += 16 * skip * dest->y_stride; - } - // Only return non-zero if we have at least ~1/16 samples for estimate. - if (num_blocks > (tot_num_blocks >> 4)) { - return (Total / num_blocks); - } else { - return 0; - } + /* Loop through the Y plane, every |skip| blocks along rows and colmumns, + * summing the square differences, and only for blocks that have been + * zero_last mode at least |x| frames in a row. + */ + for (i = 0; i < source->y_height; i += 16 * skip) { + int block_index_row = (i >> 4) * cpi->common.mb_cols; + for (j = 0; j < source->y_width; j += 16 * skip) { + int index = block_index_row + (j >> 4); + if (cpi->consec_zero_last[index] >= min_consec_zero_last) { + unsigned int sse; + Total += vpx_mse16x16(src + j, source->y_stride, dst + j, + dest->y_stride, &sse); + num_blocks++; + } } + src += 16 * skip * source->y_stride; + dst += 16 * skip * dest->y_stride; + } + // Only return non-zero if we have at least ~1/16 samples for estimate. + if (num_blocks > (tot_num_blocks >> 4)) { + return (Total / num_blocks); + } else { + return 0; + } +} #if CONFIG_TEMPORAL_DENOISING static void process_denoiser_mode_change(VP8_COMP *cpi) { @@ -3421,9 +3027,9 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { int ystride = cpi->Source->y_stride; unsigned char *src = cpi->Source->y_buffer; unsigned char *dst = cpi->denoiser.yv12_last_source.y_buffer; - static const unsigned char const_source[16] = { - 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128}; + static const unsigned char const_source[16] = { 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128 }; int bandwidth = (int)(cpi->target_bandwidth); // For temporal layers, use full bandwidth (top layer). if (cpi->oxcf.number_of_layers > 1) { @@ -3441,23 +3047,16 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { int index = block_index_row + (j >> 4); if (cpi->consec_zero_last[index] >= min_consec_zero_last) { unsigned int sse; - const unsigned int var = vpx_variance16x16(src + j, - ystride, - dst + j, - ystride, - &sse); + const unsigned int var = + vpx_variance16x16(src + j, ystride, dst + j, ystride, &sse); // Only consider this block as valid for noise measurement // if the sum_diff average of the current and previous frame // is small (to avoid effects from lighting change). if ((sse - var) < 128) { unsigned int sse2; - const unsigned int act = vpx_variance16x16(src + j, - ystride, - const_source, - 0, - &sse2); - if (act > 0) - total += sse / act; + const unsigned int act = + vpx_variance16x16(src + j, ystride, const_source, 0, &sse2); + if (act > 0) total += sse / act; num_blocks++; } } @@ -3470,8 +3069,7 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { // Only consider this frame as valid sample if we have computed nmse over // at least ~1/16 blocks, and Total > 0 (Total == 0 can happen if the // application inputs duplicate frames, or contrast is all zero). - if (total > 0 && - (num_blocks > (tot_num_blocks >> 4))) { + if (total > 0 && (num_blocks > (tot_num_blocks >> 4))) { // Update the recursive mean square source_diff. total = (total << 8) / num_blocks; if (cpi->denoiser.nmse_source_diff_count == 0) { @@ -3480,10 +3078,10 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { cpi->denoiser.qp_avg = cm->base_qindex; } else { // For subsequent samples, use average with weight ~1/4 for new sample. - cpi->denoiser.nmse_source_diff = (int)((total + - 3 * cpi->denoiser.nmse_source_diff) >> 2); - cpi->denoiser.qp_avg = (int)((cm->base_qindex + - 3 * cpi->denoiser.qp_avg) >> 2); + cpi->denoiser.nmse_source_diff = + (int)((total + 3 * cpi->denoiser.nmse_source_diff) >> 2); + cpi->denoiser.qp_avg = + (int)((cm->base_qindex + 3 * cpi->denoiser.qp_avg) >> 2); } cpi->denoiser.nmse_source_diff_count++; } @@ -3493,18 +3091,18 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { // Check for going up: from normal to aggressive mode. if ((cpi->denoiser.denoiser_mode == kDenoiserOnYUV) && (cpi->denoiser.nmse_source_diff > - cpi->denoiser.threshold_aggressive_mode) && + cpi->denoiser.threshold_aggressive_mode) && (cpi->denoiser.qp_avg < cpi->denoiser.qp_threshold_up && bandwidth > cpi->denoiser.bitrate_threshold)) { vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUVAggressive); } else { // Check for going down: from aggressive to normal mode. if (((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) && - (cpi->denoiser.nmse_source_diff < - cpi->denoiser.threshold_aggressive_mode)) || + (cpi->denoiser.nmse_source_diff < + cpi->denoiser.threshold_aggressive_mode)) || ((cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) && - (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down || - bandwidth < cpi->denoiser.bitrate_threshold))) { + (cpi->denoiser.qp_avg > cpi->denoiser.qp_threshold_down || + bandwidth < cpi->denoiser.bitrate_threshold))) { vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV); } } @@ -3516,321 +3114,294 @@ static void process_denoiser_mode_change(VP8_COMP *cpi) { } #endif -void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) -{ - const FRAME_TYPE frame_type = cm->frame_type; +void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) { + const FRAME_TYPE frame_type = cm->frame_type; - int update_any_ref_buffers = 1; - if (cpi->common.refresh_last_frame == 0 && - cpi->common.refresh_golden_frame == 0 && - cpi->common.refresh_alt_ref_frame == 0) { - update_any_ref_buffers = 0; - } + int update_any_ref_buffers = 1; + if (cpi->common.refresh_last_frame == 0 && + cpi->common.refresh_golden_frame == 0 && + cpi->common.refresh_alt_ref_frame == 0) { + update_any_ref_buffers = 0; + } - if (cm->no_lpf) - { - cm->filter_level = 0; - } - else - { - struct vpx_usec_timer timer; + if (cm->no_lpf) { + cm->filter_level = 0; + } else { + struct vpx_usec_timer timer; - vp8_clear_system_state(); - - vpx_usec_timer_start(&timer); - if (cpi->sf.auto_filter == 0) { -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) { - // Use the denoised buffer for selecting base loop filter level. - // Denoised signal for current frame is stored in INTRA_FRAME. - // No denoising on key frames. - vp8cx_pick_filter_level_fast( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi); - } else { - vp8cx_pick_filter_level_fast(cpi->Source, cpi); - } -#else - vp8cx_pick_filter_level_fast(cpi->Source, cpi); -#endif - } else { -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) { - // Use the denoised buffer for selecting base loop filter level. - // Denoised signal for current frame is stored in INTRA_FRAME. - // No denoising on key frames. - vp8cx_pick_filter_level( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi); - } else { - vp8cx_pick_filter_level(cpi->Source, cpi); - } -#else - vp8cx_pick_filter_level(cpi->Source, cpi); -#endif - } - - - if (cm->filter_level > 0) - { - vp8cx_set_alt_lf_level(cpi, cm->filter_level); - } - - vpx_usec_timer_mark(&timer); - cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer); - } - -#if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded) - sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */ -#endif - - // No need to apply loop-filter if the encoded frame does not update - // any reference buffers. - if (cm->filter_level > 0 && update_any_ref_buffers) - { - vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type); - } - - vp8_yv12_extend_frame_borders(cm->frame_to_show); - -} - -static void encode_frame_to_data_rate -( - VP8_COMP *cpi, - unsigned long *size, - unsigned char *dest, - unsigned char* dest_end, - unsigned int *frame_flags -) -{ - int Q; - int frame_over_shoot_limit; - int frame_under_shoot_limit; - - int Loop = 0; - int loop_count; - - VP8_COMMON *cm = &cpi->common; - int active_worst_qchanged = 0; - -#if !CONFIG_REALTIME_ONLY - int q_low; - int q_high; - int zbin_oq_high; - int zbin_oq_low = 0; - int top_index; - int bottom_index; - int overshoot_seen = 0; - int undershoot_seen = 0; -#endif - - int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark * - cpi->oxcf.optimal_buffer_level / 100); - int drop_mark75 = drop_mark * 2 / 3; - int drop_mark50 = drop_mark / 4; - int drop_mark25 = drop_mark / 8; - - - /* Clear down mmx registers to allow floating point in what follows */ vp8_clear_system_state(); -#if CONFIG_MULTITHREAD - /* wait for the last picture loopfilter thread done */ - if (cpi->b_lpf_running) - { - sem_wait(&cpi->h_event_end_lpf); - cpi->b_lpf_running = 0; - } -#endif - - if(cpi->force_next_frame_intra) - { - cm->frame_type = KEY_FRAME; /* delayed intra frame */ - cpi->force_next_frame_intra = 0; - } - - /* For an alt ref frame in 2 pass we skip the call to the second pass - * function that sets the target bandwidth - */ -#if !CONFIG_REALTIME_ONLY - - if (cpi->pass == 2) - { - if (cpi->common.refresh_alt_ref_frame) - { - /* Per frame bit target for the alt ref frame */ - cpi->per_frame_bandwidth = cpi->twopass.gf_bits; - /* per second target bitrate */ - cpi->target_bandwidth = (int)(cpi->twopass.gf_bits * - cpi->output_framerate); - } - } - else -#endif - cpi->per_frame_bandwidth = (int)(cpi->target_bandwidth / cpi->output_framerate); - - /* Default turn off buffer to buffer copying */ - cm->copy_buffer_to_gf = 0; - cm->copy_buffer_to_arf = 0; - - /* Clear zbin over-quant value and mode boost values. */ - cpi->mb.zbin_over_quant = 0; - cpi->mb.zbin_mode_boost = 0; - - /* Enable or disable mode based tweaking of the zbin - * For 2 Pass Only used where GF/ARF prediction quality - * is above a threshold - */ - cpi->mb.zbin_mode_boost_enabled = 1; - if (cpi->pass == 2) - { - if ( cpi->gfu_boost <= 400 ) - { - cpi->mb.zbin_mode_boost_enabled = 0; - } - } - - /* Current default encoder behaviour for the altref sign bias */ - if (cpi->source_alt_ref_active) - cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; - else - cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0; - - /* Check to see if a key frame is signaled - * For two pass with auto key frame enabled cm->frame_type may already - * be set, but not for one pass. - */ - if ((cm->current_video_frame == 0) || - (cm->frame_flags & FRAMEFLAGS_KEY) || - (cpi->oxcf.auto_key && (cpi->frames_since_key % cpi->key_frame_frequency == 0))) - { - /* Key frame from VFW/auto-keyframe/first frame */ - cm->frame_type = KEY_FRAME; + vpx_usec_timer_start(&timer); + if (cpi->sf.auto_filter == 0) { #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity == 4) { - // For adaptive mode, reset denoiser to normal mode on key frame. - vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV); - } + if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) { + // Use the denoised buffer for selecting base loop filter level. + // Denoised signal for current frame is stored in INTRA_FRAME. + // No denoising on key frames. + vp8cx_pick_filter_level_fast( + &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi); + } else { + vp8cx_pick_filter_level_fast(cpi->Source, cpi); + } +#else + vp8cx_pick_filter_level_fast(cpi->Source, cpi); +#endif + } else { +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity && cm->frame_type != KEY_FRAME) { + // Use the denoised buffer for selecting base loop filter level. + // Denoised signal for current frame is stored in INTRA_FRAME. + // No denoising on key frames. + vp8cx_pick_filter_level(&cpi->denoiser.yv12_running_avg[INTRA_FRAME], + cpi); + } else { + vp8cx_pick_filter_level(cpi->Source, cpi); + } +#else + vp8cx_pick_filter_level(cpi->Source, cpi); #endif } + if (cm->filter_level > 0) { + vp8cx_set_alt_lf_level(cpi, cm->filter_level); + } + + vpx_usec_timer_mark(&timer); + cpi->time_pick_lpf += vpx_usec_timer_elapsed(&timer); + } + +#if CONFIG_MULTITHREAD + if (cpi->b_multi_threaded) { + sem_post(&cpi->h_event_end_lpf); /* signal that we have set filter_level */ + } +#endif + + // No need to apply loop-filter if the encoded frame does not update + // any reference buffers. + if (cm->filter_level > 0 && update_any_ref_buffers) { + vp8_loop_filter_frame(cm, &cpi->mb.e_mbd, frame_type); + } + + vp8_yv12_extend_frame_borders(cm->frame_to_show); +} + +static void encode_frame_to_data_rate(VP8_COMP *cpi, size_t *size, + unsigned char *dest, + unsigned char *dest_end, + unsigned int *frame_flags) { + int Q; + int frame_over_shoot_limit; + int frame_under_shoot_limit; + + int Loop = 0; + int loop_count; + + VP8_COMMON *cm = &cpi->common; + int active_worst_qchanged = 0; + +#if !CONFIG_REALTIME_ONLY + int q_low; + int q_high; + int zbin_oq_high; + int zbin_oq_low = 0; + int top_index; + int bottom_index; + int overshoot_seen = 0; + int undershoot_seen = 0; +#endif + + int drop_mark = (int)(cpi->oxcf.drop_frames_water_mark * + cpi->oxcf.optimal_buffer_level / 100); + int drop_mark75 = drop_mark * 2 / 3; + int drop_mark50 = drop_mark / 4; + int drop_mark25 = drop_mark / 8; + + /* Clear down mmx registers to allow floating point in what follows */ + vp8_clear_system_state(); + + if (cpi->force_next_frame_intra) { + cm->frame_type = KEY_FRAME; /* delayed intra frame */ + cpi->force_next_frame_intra = 0; + } + + /* For an alt ref frame in 2 pass we skip the call to the second pass + * function that sets the target bandwidth + */ + switch (cpi->pass) { +#if !CONFIG_REALTIME_ONLY + case 2: + if (cpi->common.refresh_alt_ref_frame) { + /* Per frame bit target for the alt ref frame */ + cpi->per_frame_bandwidth = cpi->twopass.gf_bits; + /* per second target bitrate */ + cpi->target_bandwidth = + (int)(cpi->twopass.gf_bits * cpi->output_framerate); + } + break; +#endif // !CONFIG_REALTIME_ONLY + default: + cpi->per_frame_bandwidth = + (int)(cpi->target_bandwidth / cpi->output_framerate); + break; + } + + /* Default turn off buffer to buffer copying */ + cm->copy_buffer_to_gf = 0; + cm->copy_buffer_to_arf = 0; + + /* Clear zbin over-quant value and mode boost values. */ + cpi->mb.zbin_over_quant = 0; + cpi->mb.zbin_mode_boost = 0; + + /* Enable or disable mode based tweaking of the zbin + * For 2 Pass Only used where GF/ARF prediction quality + * is above a threshold + */ + cpi->mb.zbin_mode_boost_enabled = 1; + if (cpi->pass == 2) { + if (cpi->gfu_boost <= 400) { + cpi->mb.zbin_mode_boost_enabled = 0; + } + } + + /* Current default encoder behaviour for the altref sign bias */ + if (cpi->source_alt_ref_active) { + cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 1; + } else { + cpi->common.ref_frame_sign_bias[ALTREF_FRAME] = 0; + } + + /* Check to see if a key frame is signaled + * For two pass with auto key frame enabled cm->frame_type may already + * be set, but not for one pass. + */ + if ((cm->current_video_frame == 0) || (cm->frame_flags & FRAMEFLAGS_KEY) || + (cpi->oxcf.auto_key && + (cpi->frames_since_key % cpi->key_frame_frequency == 0))) { + /* Key frame from VFW/auto-keyframe/first frame */ + cm->frame_type = KEY_FRAME; +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity == 4) { + // For adaptive mode, reset denoiser to normal mode on key frame. + vp8_denoiser_set_parameters(&cpi->denoiser, kDenoiserOnYUV); + } +#endif + } + #if CONFIG_MULTI_RES_ENCODING - if (cpi->oxcf.mr_total_resolutions > 1) { - LOWER_RES_FRAME_INFO* low_res_frame_info - = (LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info; + if (cpi->oxcf.mr_total_resolutions > 1) { + LOWER_RES_FRAME_INFO *low_res_frame_info = + (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info; - if (cpi->oxcf.mr_encoder_id) { + if (cpi->oxcf.mr_encoder_id) { + // TODO(marpan): This constraint shouldn't be needed, as we would like + // to allow for key frame setting (forced or periodic) defined per + // spatial layer. For now, keep this in. + cm->frame_type = low_res_frame_info->frame_type; - // TODO(marpan): This constraint shouldn't be needed, as we would like - // to allow for key frame setting (forced or periodic) defined per - // spatial layer. For now, keep this in. - cm->frame_type = low_res_frame_info->frame_type; + // Check if lower resolution is available for motion vector reuse. + if (cm->frame_type != KEY_FRAME) { + cpi->mr_low_res_mv_avail = 1; + cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped); - // Check if lower resolution is available for motion vector reuse. - if(cm->frame_type != KEY_FRAME) - { - cpi->mr_low_res_mv_avail = 1; - cpi->mr_low_res_mv_avail &= !(low_res_frame_info->is_frame_dropped); + if (cpi->ref_frame_flags & VP8_LAST_FRAME) + cpi->mr_low_res_mv_avail &= + (cpi->current_ref_frames[LAST_FRAME] == + low_res_frame_info->low_res_ref_frames[LAST_FRAME]); - if (cpi->ref_frame_flags & VP8_LAST_FRAME) - cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[LAST_FRAME] - == low_res_frame_info->low_res_ref_frames[LAST_FRAME]); + if (cpi->ref_frame_flags & VP8_GOLD_FRAME) + cpi->mr_low_res_mv_avail &= + (cpi->current_ref_frames[GOLDEN_FRAME] == + low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]); - if (cpi->ref_frame_flags & VP8_GOLD_FRAME) - cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[GOLDEN_FRAME] - == low_res_frame_info->low_res_ref_frames[GOLDEN_FRAME]); - - // Don't use altref to determine whether low res is available. - // TODO (marpan): Should we make this type of condition on a - // per-reference frame basis? - /* - if (cpi->ref_frame_flags & VP8_ALTR_FRAME) - cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME] - == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]); - */ - } + // Don't use altref to determine whether low res is available. + // TODO (marpan): Should we make this type of condition on a + // per-reference frame basis? + /* + if (cpi->ref_frame_flags & VP8_ALTR_FRAME) + cpi->mr_low_res_mv_avail &= (cpi->current_ref_frames[ALTREF_FRAME] + == low_res_frame_info->low_res_ref_frames[ALTREF_FRAME]); + */ } - - // On a key frame: For the lowest resolution, keep track of the key frame - // counter value. For the higher resolutions, reset the current video - // frame counter to that of the lowest resolution. - // This is done to the handle the case where we may stop/start encoding - // higher layer(s). The restart-encoding of higher layer is only signaled - // by a key frame for now. - // TODO (marpan): Add flag to indicate restart-encoding of higher layer. - if (cm->frame_type == KEY_FRAME) { - if (cpi->oxcf.mr_encoder_id) { - // If the initial starting value of the buffer level is zero (this can - // happen because we may have not started encoding this higher stream), - // then reset it to non-zero value based on |starting_buffer_level|. - if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) { - unsigned int i; - cpi->bits_off_target = cpi->oxcf.starting_buffer_level; - cpi->buffer_level = cpi->oxcf.starting_buffer_level; - for (i = 0; i < cpi->oxcf.number_of_layers; i++) { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - lc->bits_off_target = lc->starting_buffer_level; - lc->buffer_level = lc->starting_buffer_level; - } - } - cpi->common.current_video_frame = - low_res_frame_info->key_frame_counter_value; - } else { - low_res_frame_info->key_frame_counter_value = - cpi->common.current_video_frame; - } - } - } + + // On a key frame: For the lowest resolution, keep track of the key frame + // counter value. For the higher resolutions, reset the current video + // frame counter to that of the lowest resolution. + // This is done to the handle the case where we may stop/start encoding + // higher layer(s). The restart-encoding of higher layer is only signaled + // by a key frame for now. + // TODO (marpan): Add flag to indicate restart-encoding of higher layer. + if (cm->frame_type == KEY_FRAME) { + if (cpi->oxcf.mr_encoder_id) { + // If the initial starting value of the buffer level is zero (this can + // happen because we may have not started encoding this higher stream), + // then reset it to non-zero value based on |starting_buffer_level|. + if (cpi->common.current_video_frame == 0 && cpi->buffer_level == 0) { + unsigned int i; + cpi->bits_off_target = cpi->oxcf.starting_buffer_level; + cpi->buffer_level = cpi->oxcf.starting_buffer_level; + for (i = 0; i < cpi->oxcf.number_of_layers; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + lc->bits_off_target = lc->starting_buffer_level; + lc->buffer_level = lc->starting_buffer_level; + } + } + cpi->common.current_video_frame = + low_res_frame_info->key_frame_counter_value; + } else { + low_res_frame_info->key_frame_counter_value = + cpi->common.current_video_frame; + } + } + } #endif - // Find the reference frame closest to the current frame. - cpi->closest_reference_frame = LAST_FRAME; - if(cm->frame_type != KEY_FRAME) { - int i; - MV_REFERENCE_FRAME closest_ref = INTRA_FRAME; - if (cpi->ref_frame_flags & VP8_LAST_FRAME) { - closest_ref = LAST_FRAME; - } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) { - closest_ref = GOLDEN_FRAME; - } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) { - closest_ref = ALTREF_FRAME; - } - for(i = 1; i <= 3; i++) { - vpx_ref_frame_type_t ref_frame_type = (vpx_ref_frame_type_t) - ((i == 3) ? 4 : i); - if (cpi->ref_frame_flags & ref_frame_type) { - if ((cm->current_video_frame - cpi->current_ref_frames[i]) < - (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) { - closest_ref = i; - } + // Find the reference frame closest to the current frame. + cpi->closest_reference_frame = LAST_FRAME; + if (cm->frame_type != KEY_FRAME) { + int i; + MV_REFERENCE_FRAME closest_ref = INTRA_FRAME; + if (cpi->ref_frame_flags & VP8_LAST_FRAME) { + closest_ref = LAST_FRAME; + } else if (cpi->ref_frame_flags & VP8_GOLD_FRAME) { + closest_ref = GOLDEN_FRAME; + } else if (cpi->ref_frame_flags & VP8_ALTR_FRAME) { + closest_ref = ALTREF_FRAME; + } + for (i = 1; i <= 3; ++i) { + vpx_ref_frame_type_t ref_frame_type = + (vpx_ref_frame_type_t)((i == 3) ? 4 : i); + if (cpi->ref_frame_flags & ref_frame_type) { + if ((cm->current_video_frame - cpi->current_ref_frames[i]) < + (cm->current_video_frame - cpi->current_ref_frames[closest_ref])) { + closest_ref = i; } } - cpi->closest_reference_frame = closest_ref; + } + cpi->closest_reference_frame = closest_ref; + } + + /* Set various flags etc to special state if it is a key frame */ + if (cm->frame_type == KEY_FRAME) { + int i; + + // Set the loop filter deltas and segmentation map update + setup_features(cpi); + + /* The alternate reference frame cannot be active for a key frame */ + cpi->source_alt_ref_active = 0; + + /* Reset the RD threshold multipliers to default of * 1 (128) */ + for (i = 0; i < MAX_MODES; ++i) { + cpi->mb.rd_thresh_mult[i] = 128; } - /* Set various flags etc to special state if it is a key frame */ - if (cm->frame_type == KEY_FRAME) - { - int i; - - // Set the loop filter deltas and segmentation map update - setup_features(cpi); - - /* The alternate reference frame cannot be active for a key frame */ - cpi->source_alt_ref_active = 0; - - /* Reset the RD threshold multipliers to default of * 1 (128) */ - for (i = 0; i < MAX_MODES; i++) - { - cpi->mb.rd_thresh_mult[i] = 128; - } - - // Reset the zero_last counter to 0 on key frame. - memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols); - memset(cpi->consec_zero_last_mvbias, 0, - (cpi->common.mb_rows * cpi->common.mb_cols)); - } + // Reset the zero_last counter to 0 on key frame. + memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols); + memset(cpi->consec_zero_last_mvbias, 0, + (cpi->common.mb_rows * cpi->common.mb_cols)); + } #if 0 /* Experimental code for lagged compress and one pass @@ -3852,486 +3423,446 @@ static void encode_frame_to_data_rate } #endif - update_rd_ref_frame_probs(cpi); + update_rd_ref_frame_probs(cpi); - if (cpi->drop_frames_allowed) - { - /* The reset to decimation 0 is only done here for one pass. - * Once it is set two pass leaves decimation on till the next kf. - */ - if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) - cpi->decimation_factor --; - - if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) - cpi->decimation_factor = 1; - - else if (cpi->buffer_level < drop_mark25 && (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) - { - cpi->decimation_factor = 3; - } - else if (cpi->buffer_level < drop_mark50 && (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) - { - cpi->decimation_factor = 2; - } - else if (cpi->buffer_level < drop_mark75 && (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) - { - cpi->decimation_factor = 1; - } + if (cpi->drop_frames_allowed) { + /* The reset to decimation 0 is only done here for one pass. + * Once it is set two pass leaves decimation on till the next kf. + */ + if ((cpi->buffer_level > drop_mark) && (cpi->decimation_factor > 0)) { + cpi->decimation_factor--; } - /* The following decimates the frame rate according to a regular - * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help - * prevent buffer under-run in CBR mode. Alternatively it might be - * desirable in some situations to drop frame rate but throw more bits - * at each frame. - * - * Note that dropping a key frame can be problematic if spatial - * resampling is also active + if (cpi->buffer_level > drop_mark75 && cpi->decimation_factor > 0) { + cpi->decimation_factor = 1; + + } else if (cpi->buffer_level < drop_mark25 && + (cpi->decimation_factor == 2 || cpi->decimation_factor == 3)) { + cpi->decimation_factor = 3; + } else if (cpi->buffer_level < drop_mark50 && + (cpi->decimation_factor == 1 || cpi->decimation_factor == 2)) { + cpi->decimation_factor = 2; + } else if (cpi->buffer_level < drop_mark75 && + (cpi->decimation_factor == 0 || cpi->decimation_factor == 1)) { + cpi->decimation_factor = 1; + } + } + + /* The following decimates the frame rate according to a regular + * pattern (i.e. to 1/2 or 2/3 frame rate) This can be used to help + * prevent buffer under-run in CBR mode. Alternatively it might be + * desirable in some situations to drop frame rate but throw more bits + * at each frame. + * + * Note that dropping a key frame can be problematic if spatial + * resampling is also active + */ + if (cpi->decimation_factor > 0) { + switch (cpi->decimation_factor) { + case 1: + cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2; + break; + case 2: + cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; + break; + case 3: + cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; + break; + } + + /* Note that we should not throw out a key frame (especially when + * spatial resampling is enabled). */ - if (cpi->decimation_factor > 0) - { - switch (cpi->decimation_factor) - { - case 1: - cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 3 / 2; - break; - case 2: - cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; - break; - case 3: - cpi->per_frame_bandwidth = cpi->per_frame_bandwidth * 5 / 4; - break; - } + if (cm->frame_type == KEY_FRAME) { + cpi->decimation_count = cpi->decimation_factor; + } else if (cpi->decimation_count > 0) { + cpi->decimation_count--; - /* Note that we should not throw out a key frame (especially when - * spatial resampling is enabled). - */ - if (cm->frame_type == KEY_FRAME) - { - cpi->decimation_count = cpi->decimation_factor; - } - else if (cpi->decimation_count > 0) - { - cpi->decimation_count --; - - cpi->bits_off_target += cpi->av_per_frame_bandwidth; - if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) - cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; + cpi->bits_off_target += cpi->av_per_frame_bandwidth; + if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) { + cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; + } #if CONFIG_MULTI_RES_ENCODING - vp8_store_drop_frame_info(cpi); + vp8_store_drop_frame_info(cpi); #endif - cm->current_video_frame++; - cpi->frames_since_key++; - // We advance the temporal pattern for dropped frames. - cpi->temporal_pattern_counter++; + cm->current_video_frame++; + cpi->frames_since_key++; + // We advance the temporal pattern for dropped frames. + cpi->temporal_pattern_counter++; #if CONFIG_INTERNAL_STATS - cpi->count ++; + cpi->count++; #endif - cpi->buffer_level = cpi->bits_off_target; + cpi->buffer_level = cpi->bits_off_target; - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; - /* Propagate bits saved by dropping the frame to higher - * layers - */ - for (i=cpi->current_layer+1; ioxcf.number_of_layers; i++) - { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - lc->bits_off_target += (int)(lc->target_bandwidth / - lc->framerate); - if (lc->bits_off_target > lc->maximum_buffer_size) - lc->bits_off_target = lc->maximum_buffer_size; - lc->buffer_level = lc->bits_off_target; - } - } - - return; - } - else - cpi->decimation_count = cpi->decimation_factor; - } - else - cpi->decimation_count = 0; - - /* Decide how big to make the frame */ - if (!vp8_pick_frame_size(cpi)) - { - /*TODO: 2 drop_frame and return code could be put together. */ -#if CONFIG_MULTI_RES_ENCODING - vp8_store_drop_frame_info(cpi); -#endif - cm->current_video_frame++; - cpi->frames_since_key++; - // We advance the temporal pattern for dropped frames. - cpi->temporal_pattern_counter++; - return; - } - - /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full. - * This has a knock on effect on active best quality as well. - * For CBR if the buffer reaches its maximum level then we can no longer - * save up bits for later frames so we might as well use them up - * on the current frame. - */ - if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && - (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && cpi->buffered_mode) - { - /* Max adjustment is 1/4 */ - int Adjustment = cpi->active_worst_quality / 4; - - if (Adjustment) - { - int buff_lvl_step; - - if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) - { - buff_lvl_step = (int) - ((cpi->oxcf.maximum_buffer_size - - cpi->oxcf.optimal_buffer_level) / - Adjustment); - - if (buff_lvl_step) - Adjustment = (int) - ((cpi->buffer_level - - cpi->oxcf.optimal_buffer_level) / - buff_lvl_step); - else - Adjustment = 0; - } - - cpi->active_worst_quality -= Adjustment; - - if(cpi->active_worst_quality < cpi->active_best_quality) - cpi->active_worst_quality = cpi->active_best_quality; - } - } - - /* Set an active best quality and if necessary active worst quality - * There is some odd behavior for one pass here that needs attention. - */ - if ( (cpi->pass == 2) || (cpi->ni_frames > 150)) - { - vp8_clear_system_state(); - - Q = cpi->active_worst_quality; - - if ( cm->frame_type == KEY_FRAME ) - { - if ( cpi->pass == 2 ) - { - if (cpi->gfu_boost > 600) - cpi->active_best_quality = kf_low_motion_minq[Q]; - else - cpi->active_best_quality = kf_high_motion_minq[Q]; - - /* Special case for key frames forced because we have reached - * the maximum key frame interval. Here force the Q to a range - * based on the ambient Q to reduce the risk of popping - */ - if ( cpi->this_key_frame_forced ) - { - if ( cpi->active_best_quality > cpi->avg_frame_qindex * 7/8) - cpi->active_best_quality = cpi->avg_frame_qindex * 7/8; - else if ( cpi->active_best_quality < cpi->avg_frame_qindex >> 2 ) - cpi->active_best_quality = cpi->avg_frame_qindex >> 2; - } - } - /* One pass more conservative */ - else - cpi->active_best_quality = kf_high_motion_minq[Q]; - } - - else if (cpi->oxcf.number_of_layers==1 && - (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) - { - /* Use the lower of cpi->active_worst_quality and recent - * average Q as basis for GF/ARF Q limit unless last frame was - * a key frame. - */ - if ( (cpi->frames_since_key > 1) && - (cpi->avg_frame_qindex < cpi->active_worst_quality) ) - { - Q = cpi->avg_frame_qindex; - } - - /* For constrained quality dont allow Q less than the cq level */ - if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && - (Q < cpi->cq_target_quality) ) - { - Q = cpi->cq_target_quality; - } - - if ( cpi->pass == 2 ) - { - if ( cpi->gfu_boost > 1000 ) - cpi->active_best_quality = gf_low_motion_minq[Q]; - else if ( cpi->gfu_boost < 400 ) - cpi->active_best_quality = gf_high_motion_minq[Q]; - else - cpi->active_best_quality = gf_mid_motion_minq[Q]; - - /* Constrained quality use slightly lower active best. */ - if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY ) - { - cpi->active_best_quality = - cpi->active_best_quality * 15/16; - } - } - /* One pass more conservative */ - else - cpi->active_best_quality = gf_high_motion_minq[Q]; - } - else - { - cpi->active_best_quality = inter_minq[Q]; - - /* For the constant/constrained quality mode we dont want - * q to fall below the cq level. - */ - if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && - (cpi->active_best_quality < cpi->cq_target_quality) ) - { - /* If we are strongly undershooting the target rate in the last - * frames then use the user passed in cq value not the auto - * cq value. - */ - if ( cpi->rolling_actual_bits < cpi->min_frame_bandwidth ) - cpi->active_best_quality = cpi->oxcf.cq_level; - else - cpi->active_best_quality = cpi->cq_target_quality; - } - } - - /* If CBR and the buffer is as full then it is reasonable to allow - * higher quality on the frames to prevent bits just going to waste. + /* Propagate bits saved by dropping the frame to higher + * layers */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - /* Note that the use of >= here elliminates the risk of a devide - * by 0 error in the else if clause - */ - if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) - cpi->active_best_quality = cpi->best_quality; - - else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) - { - int Fraction = (int) - (((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) - / (cpi->oxcf.maximum_buffer_size - - cpi->oxcf.optimal_buffer_level)); - int min_qadjustment = ((cpi->active_best_quality - - cpi->best_quality) * Fraction) / 128; - - cpi->active_best_quality -= min_qadjustment; - } + for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate); + if (lc->bits_off_target > lc->maximum_buffer_size) { + lc->bits_off_target = lc->maximum_buffer_size; + } + lc->buffer_level = lc->bits_off_target; } + } + + return; + } else { + cpi->decimation_count = cpi->decimation_factor; } - /* Make sure constrained quality mode limits are adhered to for the first - * few frames of one pass encodes - */ - else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) - { - if ( (cm->frame_type == KEY_FRAME) || - cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame ) - { - cpi->active_best_quality = cpi->best_quality; + } else { + cpi->decimation_count = 0; + } + + /* Decide how big to make the frame */ + if (!vp8_pick_frame_size(cpi)) { +/*TODO: 2 drop_frame and return code could be put together. */ +#if CONFIG_MULTI_RES_ENCODING + vp8_store_drop_frame_info(cpi); +#endif + cm->current_video_frame++; + cpi->frames_since_key++; + // We advance the temporal pattern for dropped frames. + cpi->temporal_pattern_counter++; + return; + } + + /* Reduce active_worst_allowed_q for CBR if our buffer is getting too full. + * This has a knock on effect on active best quality as well. + * For CBR if the buffer reaches its maximum level then we can no longer + * save up bits for later frames so we might as well use them up + * on the current frame. + */ + if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && + (cpi->buffer_level >= cpi->oxcf.optimal_buffer_level) && + cpi->buffered_mode) { + /* Max adjustment is 1/4 */ + int Adjustment = cpi->active_worst_quality / 4; + + if (Adjustment) { + int buff_lvl_step; + + if (cpi->buffer_level < cpi->oxcf.maximum_buffer_size) { + buff_lvl_step = (int)((cpi->oxcf.maximum_buffer_size - + cpi->oxcf.optimal_buffer_level) / + Adjustment); + + if (buff_lvl_step) { + Adjustment = + (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) / + buff_lvl_step); + } else { + Adjustment = 0; } - else if (cpi->active_best_quality < cpi->cq_target_quality) - { - cpi->active_best_quality = cpi->cq_target_quality; - } - } + } - /* Clip the active best and worst quality values to limits */ - if (cpi->active_worst_quality > cpi->worst_quality) - cpi->active_worst_quality = cpi->worst_quality; + cpi->active_worst_quality -= Adjustment; - if (cpi->active_best_quality < cpi->best_quality) - cpi->active_best_quality = cpi->best_quality; - - if ( cpi->active_worst_quality < cpi->active_best_quality ) + if (cpi->active_worst_quality < cpi->active_best_quality) { cpi->active_worst_quality = cpi->active_best_quality; - - /* Determine initial Q to try */ - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - -#if !CONFIG_REALTIME_ONLY - - /* Set highest allowed value for Zbin over quant */ - if (cm->frame_type == KEY_FRAME) - zbin_oq_high = 0; - else if ((cpi->oxcf.number_of_layers == 1) && ((cm->refresh_alt_ref_frame || - (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) - { - zbin_oq_high = 16; - } - else - zbin_oq_high = ZBIN_OQ_MAX; -#endif - - /* Setup background Q adjustment for error resilient mode. - * For multi-layer encodes only enable this for the base layer. - */ - if (cpi->cyclic_refresh_mode_enabled) - { - // Special case for screen_content_mode with golden frame updates. - int disable_cr_gf = (cpi->oxcf.screen_content_mode == 2 && - cm->refresh_golden_frame); - if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) - cyclic_background_refresh(cpi, Q, 0); - else - disable_segmentation(cpi); - } - - vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); - -#if !CONFIG_REALTIME_ONLY - /* Limit Q range for the adaptive loop. */ - bottom_index = cpi->active_best_quality; - top_index = cpi->active_worst_quality; - q_low = cpi->active_best_quality; - q_high = cpi->active_worst_quality; -#endif - - vp8_save_coding_context(cpi); - - loop_count = 0; - - scale_and_extend_source(cpi->un_scaled_source, cpi); - -#if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC - // Option to apply spatial blur under the aggressive or adaptive - // (temporal denoising) mode. - if (cpi->oxcf.noise_sensitivity >= 3) { - if (cpi->denoiser.denoise_pars.spatial_blur != 0) { - vp8_de_noise(cm, cpi->Source, cpi->Source, - cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0); } } + } + + /* Set an active best quality and if necessary active worst quality + * There is some odd behavior for one pass here that needs attention. + */ + if ((cpi->pass == 2) || (cpi->ni_frames > 150)) { + vp8_clear_system_state(); + + Q = cpi->active_worst_quality; + + if (cm->frame_type == KEY_FRAME) { + if (cpi->pass == 2) { + if (cpi->gfu_boost > 600) { + cpi->active_best_quality = kf_low_motion_minq[Q]; + } else { + cpi->active_best_quality = kf_high_motion_minq[Q]; + } + + /* Special case for key frames forced because we have reached + * the maximum key frame interval. Here force the Q to a range + * based on the ambient Q to reduce the risk of popping + */ + if (cpi->this_key_frame_forced) { + if (cpi->active_best_quality > cpi->avg_frame_qindex * 7 / 8) { + cpi->active_best_quality = cpi->avg_frame_qindex * 7 / 8; + } else if (cpi->active_best_qualityavg_frame_qindex>> 2) { + cpi->active_best_quality = cpi->avg_frame_qindex >> 2; + } + } + } + /* One pass more conservative */ + else { + cpi->active_best_quality = kf_high_motion_minq[Q]; + } + } + + else if (cpi->oxcf.number_of_layers == 1 && + (cm->refresh_golden_frame || cpi->common.refresh_alt_ref_frame)) { + /* Use the lower of cpi->active_worst_quality and recent + * average Q as basis for GF/ARF Q limit unless last frame was + * a key frame. + */ + if ((cpi->frames_since_key > 1) && + (cpi->avg_frame_qindex < cpi->active_worst_quality)) { + Q = cpi->avg_frame_qindex; + } + + /* For constrained quality dont allow Q less than the cq level */ + if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && + (Q < cpi->cq_target_quality)) { + Q = cpi->cq_target_quality; + } + + if (cpi->pass == 2) { + if (cpi->gfu_boost > 1000) { + cpi->active_best_quality = gf_low_motion_minq[Q]; + } else if (cpi->gfu_boost < 400) { + cpi->active_best_quality = gf_high_motion_minq[Q]; + } else { + cpi->active_best_quality = gf_mid_motion_minq[Q]; + } + + /* Constrained quality use slightly lower active best. */ + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { + cpi->active_best_quality = cpi->active_best_quality * 15 / 16; + } + } + /* One pass more conservative */ + else { + cpi->active_best_quality = gf_high_motion_minq[Q]; + } + } else { + cpi->active_best_quality = inter_minq[Q]; + + /* For the constant/constrained quality mode we dont want + * q to fall below the cq level. + */ + if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && + (cpi->active_best_quality < cpi->cq_target_quality)) { + /* If we are strongly undershooting the target rate in the last + * frames then use the user passed in cq value not the auto + * cq value. + */ + if (cpi->rolling_actual_bits < cpi->min_frame_bandwidth) { + cpi->active_best_quality = cpi->oxcf.cq_level; + } else { + cpi->active_best_quality = cpi->cq_target_quality; + } + } + } + + /* If CBR and the buffer is as full then it is reasonable to allow + * higher quality on the frames to prevent bits just going to waste. + */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + /* Note that the use of >= here elliminates the risk of a devide + * by 0 error in the else if clause + */ + if (cpi->buffer_level >= cpi->oxcf.maximum_buffer_size) { + cpi->active_best_quality = cpi->best_quality; + + } else if (cpi->buffer_level > cpi->oxcf.optimal_buffer_level) { + int Fraction = + (int)(((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) * 128) / + (cpi->oxcf.maximum_buffer_size - + cpi->oxcf.optimal_buffer_level)); + int min_qadjustment = + ((cpi->active_best_quality - cpi->best_quality) * Fraction) / 128; + + cpi->active_best_quality -= min_qadjustment; + } + } + } + /* Make sure constrained quality mode limits are adhered to for the first + * few frames of one pass encodes + */ + else if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { + if ((cm->frame_type == KEY_FRAME) || cm->refresh_golden_frame || + cpi->common.refresh_alt_ref_frame) { + cpi->active_best_quality = cpi->best_quality; + } else if (cpi->active_best_quality < cpi->cq_target_quality) { + cpi->active_best_quality = cpi->cq_target_quality; + } + } + + /* Clip the active best and worst quality values to limits */ + if (cpi->active_worst_quality > cpi->worst_quality) { + cpi->active_worst_quality = cpi->worst_quality; + } + + if (cpi->active_best_quality < cpi->best_quality) { + cpi->active_best_quality = cpi->best_quality; + } + + if (cpi->active_worst_quality < cpi->active_best_quality) { + cpi->active_worst_quality = cpi->active_best_quality; + } + + /* Determine initial Q to try */ + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + +#if !CONFIG_REALTIME_ONLY + + /* Set highest allowed value for Zbin over quant */ + if (cm->frame_type == KEY_FRAME) { + zbin_oq_high = 0; + } else if ((cpi->oxcf.number_of_layers == 1) && + ((cm->refresh_alt_ref_frame || + (cm->refresh_golden_frame && !cpi->source_alt_ref_active)))) { + zbin_oq_high = 16; + } else { + zbin_oq_high = ZBIN_OQ_MAX; + } +#endif + + /* Setup background Q adjustment for error resilient mode. + * For multi-layer encodes only enable this for the base layer. + */ + if (cpi->cyclic_refresh_mode_enabled) { + // Special case for screen_content_mode with golden frame updates. + int disable_cr_gf = + (cpi->oxcf.screen_content_mode == 2 && cm->refresh_golden_frame); + if (cpi->current_layer == 0 && cpi->force_maxqp == 0 && !disable_cr_gf) { + cyclic_background_refresh(cpi, Q, 0); + } else { + disable_segmentation(cpi); + } + } + + vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, + &frame_over_shoot_limit); + +#if !CONFIG_REALTIME_ONLY + /* Limit Q range for the adaptive loop. */ + bottom_index = cpi->active_best_quality; + top_index = cpi->active_worst_quality; + q_low = cpi->active_best_quality; + q_high = cpi->active_worst_quality; +#endif + + vp8_save_coding_context(cpi); + + loop_count = 0; + + scale_and_extend_source(cpi->un_scaled_source, cpi); + +#if CONFIG_TEMPORAL_DENOISING && CONFIG_POSTPROC + // Option to apply spatial blur under the aggressive or adaptive + // (temporal denoising) mode. + if (cpi->oxcf.noise_sensitivity >= 3) { + if (cpi->denoiser.denoise_pars.spatial_blur != 0) { + vp8_de_noise(cm, cpi->Source, cpi->Source, + cpi->denoiser.denoise_pars.spatial_blur, 1, 0, 0); + } + } #endif #if !(CONFIG_REALTIME_ONLY) && CONFIG_POSTPROC && !(CONFIG_TEMPORAL_DENOISING) - if (cpi->oxcf.noise_sensitivity > 0) - { - unsigned char *src; - int l = 0; + if (cpi->oxcf.noise_sensitivity > 0) { + unsigned char *src; + int l = 0; - switch (cpi->oxcf.noise_sensitivity) - { - case 1: - l = 20; - break; - case 2: - l = 40; - break; - case 3: - l = 60; - break; - case 4: - l = 80; - break; - case 5: - l = 100; - break; - case 6: - l = 150; - break; - } - - - if (cm->frame_type == KEY_FRAME) - { - vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1); - } - else - { - vp8_de_noise(cm, cpi->Source, cpi->Source, l , 1, 0, 1); - - src = cpi->Source->y_buffer; - - if (cpi->Source->y_stride < 0) - { - src += cpi->Source->y_stride * (cpi->Source->y_height - 1); - } - } + switch (cpi->oxcf.noise_sensitivity) { + case 1: l = 20; break; + case 2: l = 40; break; + case 3: l = 60; break; + case 4: l = 80; break; + case 5: l = 100; break; + case 6: l = 150; break; } -#endif + if (cm->frame_type == KEY_FRAME) { + vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1); + } else { + vp8_de_noise(cm, cpi->Source, cpi->Source, l, 1, 0, 1); + src = cpi->Source->y_buffer; + + if (cpi->Source->y_stride < 0) { + src += cpi->Source->y_stride * (cpi->Source->y_height - 1); + } + } + } + +#endif #ifdef OUTPUT_YUV_SRC - vp8_write_yuv_frame(yuv_file, cpi->Source); + vp8_write_yuv_frame(yuv_file, cpi->Source); #endif - do - { - vp8_clear_system_state(); + do { + vp8_clear_system_state(); - vp8_set_quantizer(cpi, Q); + vp8_set_quantizer(cpi, Q); - /* setup skip prob for costing in mode/mv decision */ - if (cpi->common.mb_no_coeff_skip) - { - cpi->prob_skip_false = cpi->base_skip_false_prob[Q]; + /* setup skip prob for costing in mode/mv decision */ + if (cpi->common.mb_no_coeff_skip) { + cpi->prob_skip_false = cpi->base_skip_false_prob[Q]; - if (cm->frame_type != KEY_FRAME) - { - if (cpi->common.refresh_alt_ref_frame) - { - if (cpi->last_skip_false_probs[2] != 0) - cpi->prob_skip_false = cpi->last_skip_false_probs[2]; + if (cm->frame_type != KEY_FRAME) { + if (cpi->common.refresh_alt_ref_frame) { + if (cpi->last_skip_false_probs[2] != 0) { + cpi->prob_skip_false = cpi->last_skip_false_probs[2]; + } - /* - if(cpi->last_skip_false_probs[2]!=0 && abs(Q- cpi->last_skip_probs_q[2])<=16 ) - cpi->prob_skip_false = cpi->last_skip_false_probs[2]; - else if (cpi->last_skip_false_probs[2]!=0) - cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + cpi->prob_skip_false ) / 2; - */ - } - else if (cpi->common.refresh_golden_frame) - { - if (cpi->last_skip_false_probs[1] != 0) - cpi->prob_skip_false = cpi->last_skip_false_probs[1]; + /* + if(cpi->last_skip_false_probs[2]!=0 && abs(Q- + cpi->last_skip_probs_q[2])<=16 ) + cpi->prob_skip_false = cpi->last_skip_false_probs[2]; + else if (cpi->last_skip_false_probs[2]!=0) + cpi->prob_skip_false = (cpi->last_skip_false_probs[2] + + cpi->prob_skip_false ) / 2; + */ + } else if (cpi->common.refresh_golden_frame) { + if (cpi->last_skip_false_probs[1] != 0) { + cpi->prob_skip_false = cpi->last_skip_false_probs[1]; + } - /* - if(cpi->last_skip_false_probs[1]!=0 && abs(Q- cpi->last_skip_probs_q[1])<=16 ) - cpi->prob_skip_false = cpi->last_skip_false_probs[1]; - else if (cpi->last_skip_false_probs[1]!=0) - cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + cpi->prob_skip_false ) / 2; - */ - } - else - { - if (cpi->last_skip_false_probs[0] != 0) - cpi->prob_skip_false = cpi->last_skip_false_probs[0]; + /* + if(cpi->last_skip_false_probs[1]!=0 && abs(Q- + cpi->last_skip_probs_q[1])<=16 ) + cpi->prob_skip_false = cpi->last_skip_false_probs[1]; + else if (cpi->last_skip_false_probs[1]!=0) + cpi->prob_skip_false = (cpi->last_skip_false_probs[1] + + cpi->prob_skip_false ) / 2; + */ + } else { + if (cpi->last_skip_false_probs[0] != 0) { + cpi->prob_skip_false = cpi->last_skip_false_probs[0]; + } - /* - if(cpi->last_skip_false_probs[0]!=0 && abs(Q- cpi->last_skip_probs_q[0])<=16 ) - cpi->prob_skip_false = cpi->last_skip_false_probs[0]; - else if(cpi->last_skip_false_probs[0]!=0) - cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + cpi->prob_skip_false ) / 2; - */ - } + /* + if(cpi->last_skip_false_probs[0]!=0 && abs(Q- + cpi->last_skip_probs_q[0])<=16 ) + cpi->prob_skip_false = cpi->last_skip_false_probs[0]; + else if(cpi->last_skip_false_probs[0]!=0) + cpi->prob_skip_false = (cpi->last_skip_false_probs[0] + + cpi->prob_skip_false ) / 2; + */ + } - /* as this is for cost estimate, let's make sure it does not - * go extreme eitehr way - */ - if (cpi->prob_skip_false < 5) - cpi->prob_skip_false = 5; + /* as this is for cost estimate, let's make sure it does not + * go extreme eitehr way + */ + if (cpi->prob_skip_false < 5) cpi->prob_skip_false = 5; - if (cpi->prob_skip_false > 250) - cpi->prob_skip_false = 250; + if (cpi->prob_skip_false > 250) cpi->prob_skip_false = 250; - if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) - cpi->prob_skip_false = 1; - } + if (cpi->oxcf.number_of_layers == 1 && cpi->is_src_frame_alt_ref) { + cpi->prob_skip_false = 1; + } + } #if 0 @@ -4343,352 +3874,335 @@ static void encode_frame_to_data_rate } #endif + } + if (cm->frame_type == KEY_FRAME) { + if (resize_key_frame(cpi)) { + /* If the frame size has changed, need to reset Q, quantizer, + * and background refresh. + */ + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + if (cpi->cyclic_refresh_mode_enabled) { + if (cpi->current_layer == 0) { + cyclic_background_refresh(cpi, Q, 0); + } else { + disable_segmentation(cpi); + } } + // Reset the zero_last counter to 0 on key frame. + memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols); + memset(cpi->consec_zero_last_mvbias, 0, + (cpi->common.mb_rows * cpi->common.mb_cols)); + vp8_set_quantizer(cpi, Q); + } - if (cm->frame_type == KEY_FRAME) - { - if(resize_key_frame(cpi)) - { - /* If the frame size has changed, need to reset Q, quantizer, - * and background refresh. - */ - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - if (cpi->cyclic_refresh_mode_enabled) - { - if (cpi->current_layer==0) - cyclic_background_refresh(cpi, Q, 0); - else - disable_segmentation(cpi); - } - // Reset the zero_last counter to 0 on key frame. - memset(cpi->consec_zero_last, 0, cm->mb_rows * cm->mb_cols); - memset(cpi->consec_zero_last_mvbias, 0, - (cpi->common.mb_rows * cpi->common.mb_cols)); - vp8_set_quantizer(cpi, Q); - } - - vp8_setup_key_frame(cpi); - } - - + vp8_setup_key_frame(cpi); + } #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - { - if(cpi->oxcf.error_resilient_mode) - cm->refresh_entropy_probs = 0; + { + if (cpi->oxcf.error_resilient_mode) cm->refresh_entropy_probs = 0; - if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) - { - if (cm->frame_type == KEY_FRAME) - cm->refresh_entropy_probs = 1; - } + if (cpi->oxcf.error_resilient_mode & VPX_ERROR_RESILIENT_PARTITIONS) { + if (cm->frame_type == KEY_FRAME) cm->refresh_entropy_probs = 1; + } - if (cm->refresh_entropy_probs == 0) - { - /* save a copy for later refresh */ - memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc)); - } + if (cm->refresh_entropy_probs == 0) { + /* save a copy for later refresh */ + memcpy(&cm->lfc, &cm->fc, sizeof(cm->fc)); + } - vp8_update_coef_context(cpi); + vp8_update_coef_context(cpi); - vp8_update_coef_probs(cpi); + vp8_update_coef_probs(cpi); - /* transform / motion compensation build reconstruction frame - * +pack coef partitions - */ - vp8_encode_frame(cpi); + /* transform / motion compensation build reconstruction frame + * +pack coef partitions + */ + vp8_encode_frame(cpi); - /* cpi->projected_frame_size is not needed for RT mode */ - } -#else - /* transform / motion compensation build reconstruction frame */ - vp8_encode_frame(cpi); - - if (cpi->oxcf.screen_content_mode == 2) { - if (vp8_drop_encodedframe_overshoot(cpi, Q)) - return; - } - - cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi); - cpi->projected_frame_size = (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0; -#endif - vp8_clear_system_state(); - - /* Test to see if the stats generated for this frame indicate that - * we should have coded a key frame (assuming that we didn't)! - */ - - if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME - && cpi->compressor_speed != 2) - { -#if !CONFIG_REALTIME_ONLY - if (decide_key_frame(cpi)) - { - /* Reset all our sizing numbers and recode */ - cm->frame_type = KEY_FRAME; - - vp8_pick_frame_size(cpi); - - /* Clear the Alt reference frame active flag when we have - * a key frame - */ - cpi->source_alt_ref_active = 0; - - // Set the loop filter deltas and segmentation map update - setup_features(cpi); - - vp8_restore_coding_context(cpi); - - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - - vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, &frame_over_shoot_limit); - - /* Limit Q range for the adaptive loop. */ - bottom_index = cpi->active_best_quality; - top_index = cpi->active_worst_quality; - q_low = cpi->active_best_quality; - q_high = cpi->active_worst_quality; - - loop_count++; - Loop = 1; - - continue; - } -#endif - } - - vp8_clear_system_state(); - - if (frame_over_shoot_limit == 0) - frame_over_shoot_limit = 1; - - /* Are we are overshooting and up against the limit of active max Q. */ - if (((cpi->pass != 2) || (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) && - (Q == cpi->active_worst_quality) && - (cpi->active_worst_quality < cpi->worst_quality) && - (cpi->projected_frame_size > frame_over_shoot_limit)) - { - int over_size_percent = ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / frame_over_shoot_limit; - - /* If so is there any scope for relaxing it */ - while ((cpi->active_worst_quality < cpi->worst_quality) && (over_size_percent > 0)) - { - cpi->active_worst_quality++; - /* Assume 1 qstep = about 4% on frame size. */ - over_size_percent = (int)(over_size_percent * 0.96); - } -#if !CONFIG_REALTIME_ONLY - top_index = cpi->active_worst_quality; -#endif // !CONFIG_REALTIME_ONLY - /* If we have updated the active max Q do not call - * vp8_update_rate_correction_factors() this loop. - */ - active_worst_qchanged = 1; - } - else - active_worst_qchanged = 0; - -#if !CONFIG_REALTIME_ONLY - /* Special case handling for forced key frames */ - if ( (cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced ) - { - int last_q = Q; - int kf_err = vp8_calc_ss_err(cpi->Source, - &cm->yv12_fb[cm->new_fb_idx]); - - /* The key frame is not good enough */ - if ( kf_err > ((cpi->ambient_err * 7) >> 3) ) - { - /* Lower q_high */ - q_high = (Q > q_low) ? (Q - 1) : q_low; - - /* Adjust Q */ - Q = (q_high + q_low) >> 1; - } - /* The key frame is much better than the previous frame */ - else if ( kf_err < (cpi->ambient_err >> 1) ) - { - /* Raise q_low */ - q_low = (Q < q_high) ? (Q + 1) : q_high; - - /* Adjust Q */ - Q = (q_high + q_low + 1) >> 1; - } - - /* Clamp Q to upper and lower limits: */ - if (Q > q_high) - Q = q_high; - else if (Q < q_low) - Q = q_low; - - Loop = Q != last_q; - } - - /* Is the projected frame size out of range and are we allowed - * to attempt to recode. - */ - else if ( recode_loop_test( cpi, - frame_over_shoot_limit, frame_under_shoot_limit, - Q, top_index, bottom_index ) ) - { - int last_q = Q; - int Retries = 0; - - /* Frame size out of permitted range. Update correction factor - * & compute new Q to try... - */ - - /* Frame is too large */ - if (cpi->projected_frame_size > cpi->this_frame_target) - { - /* Raise Qlow as to at least the current value */ - q_low = (Q < q_high) ? (Q + 1) : q_high; - - /* If we are using over quant do the same for zbin_oq_low */ - if (cpi->mb.zbin_over_quant > 0) - zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ? - (cpi->mb.zbin_over_quant + 1) : zbin_oq_high; - - if (undershoot_seen) - { - /* Update rate_correction_factor unless - * cpi->active_worst_quality has changed. - */ - if (!active_worst_qchanged) - vp8_update_rate_correction_factors(cpi, 1); - - Q = (q_high + q_low + 1) / 2; - - /* Adjust cpi->zbin_over_quant (only allowed when Q - * is max) - */ - if (Q < MAXQ) - cpi->mb.zbin_over_quant = 0; - else - { - zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) ? - (cpi->mb.zbin_over_quant + 1) : zbin_oq_high; - cpi->mb.zbin_over_quant = - (zbin_oq_high + zbin_oq_low) / 2; - } - } - else - { - /* Update rate_correction_factor unless - * cpi->active_worst_quality has changed. - */ - if (!active_worst_qchanged) - vp8_update_rate_correction_factors(cpi, 0); - - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - - while (((Q < q_low) || - (cpi->mb.zbin_over_quant < zbin_oq_low)) && - (Retries < 10)) - { - vp8_update_rate_correction_factors(cpi, 0); - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - Retries ++; - } - } - - overshoot_seen = 1; - } - /* Frame is too small */ - else - { - if (cpi->mb.zbin_over_quant == 0) - /* Lower q_high if not using over quant */ - q_high = (Q > q_low) ? (Q - 1) : q_low; - else - /* else lower zbin_oq_high */ - zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) ? - (cpi->mb.zbin_over_quant - 1) : zbin_oq_low; - - if (overshoot_seen) - { - /* Update rate_correction_factor unless - * cpi->active_worst_quality has changed. - */ - if (!active_worst_qchanged) - vp8_update_rate_correction_factors(cpi, 1); - - Q = (q_high + q_low) / 2; - - /* Adjust cpi->zbin_over_quant (only allowed when Q - * is max) - */ - if (Q < MAXQ) - cpi->mb.zbin_over_quant = 0; - else - cpi->mb.zbin_over_quant = - (zbin_oq_high + zbin_oq_low) / 2; - } - else - { - /* Update rate_correction_factor unless - * cpi->active_worst_quality has changed. - */ - if (!active_worst_qchanged) - vp8_update_rate_correction_factors(cpi, 0); - - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - - /* Special case reset for qlow for constrained quality. - * This should only trigger where there is very substantial - * undershoot on a frame and the auto cq level is above - * the user passsed in value. - */ - if ( (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && - (Q < q_low) ) - { - q_low = Q; - } - - while (((Q > q_high) || - (cpi->mb.zbin_over_quant > zbin_oq_high)) && - (Retries < 10)) - { - vp8_update_rate_correction_factors(cpi, 0); - Q = vp8_regulate_q(cpi, cpi->this_frame_target); - Retries ++; - } - } - - undershoot_seen = 1; - } - - /* Clamp Q to upper and lower limits: */ - if (Q > q_high) - Q = q_high; - else if (Q < q_low) - Q = q_low; - - /* Clamp cpi->zbin_over_quant */ - cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) ? - zbin_oq_low : (cpi->mb.zbin_over_quant > zbin_oq_high) ? - zbin_oq_high : cpi->mb.zbin_over_quant; - - Loop = Q != last_q; - } - else -#endif - Loop = 0; - - if (cpi->is_src_frame_alt_ref) - Loop = 0; - - if (Loop == 1) - { - vp8_restore_coding_context(cpi); - loop_count++; -#if CONFIG_INTERNAL_STATS - cpi->tot_recode_hits++; -#endif - } + /* cpi->projected_frame_size is not needed for RT mode */ } - while (Loop == 1); +#else + /* transform / motion compensation build reconstruction frame */ + vp8_encode_frame(cpi); + + if (cpi->oxcf.screen_content_mode == 2) { + if (vp8_drop_encodedframe_overshoot(cpi, Q)) return; + } + + cpi->projected_frame_size -= vp8_estimate_entropy_savings(cpi); + cpi->projected_frame_size = + (cpi->projected_frame_size > 0) ? cpi->projected_frame_size : 0; +#endif + vp8_clear_system_state(); + + /* Test to see if the stats generated for this frame indicate that + * we should have coded a key frame (assuming that we didn't)! + */ + + if (cpi->pass != 2 && cpi->oxcf.auto_key && cm->frame_type != KEY_FRAME && + cpi->compressor_speed != 2) { +#if !CONFIG_REALTIME_ONLY + if (decide_key_frame(cpi)) { + /* Reset all our sizing numbers and recode */ + cm->frame_type = KEY_FRAME; + + vp8_pick_frame_size(cpi); + + /* Clear the Alt reference frame active flag when we have + * a key frame + */ + cpi->source_alt_ref_active = 0; + + // Set the loop filter deltas and segmentation map update + setup_features(cpi); + + vp8_restore_coding_context(cpi); + + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + + vp8_compute_frame_size_bounds(cpi, &frame_under_shoot_limit, + &frame_over_shoot_limit); + + /* Limit Q range for the adaptive loop. */ + bottom_index = cpi->active_best_quality; + top_index = cpi->active_worst_quality; + q_low = cpi->active_best_quality; + q_high = cpi->active_worst_quality; + + loop_count++; + Loop = 1; + + continue; + } +#endif + } + + vp8_clear_system_state(); + + if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1; + + /* Are we are overshooting and up against the limit of active max Q. */ + if (((cpi->pass != 2) || + (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER)) && + (Q == cpi->active_worst_quality) && + (cpi->active_worst_quality < cpi->worst_quality) && + (cpi->projected_frame_size > frame_over_shoot_limit)) { + int over_size_percent = + ((cpi->projected_frame_size - frame_over_shoot_limit) * 100) / + frame_over_shoot_limit; + + /* If so is there any scope for relaxing it */ + while ((cpi->active_worst_quality < cpi->worst_quality) && + (over_size_percent > 0)) { + cpi->active_worst_quality++; + /* Assume 1 qstep = about 4% on frame size. */ + over_size_percent = (int)(over_size_percent * 0.96); + } +#if !CONFIG_REALTIME_ONLY + top_index = cpi->active_worst_quality; +#endif // !CONFIG_REALTIME_ONLY + /* If we have updated the active max Q do not call + * vp8_update_rate_correction_factors() this loop. + */ + active_worst_qchanged = 1; + } else { + active_worst_qchanged = 0; + } + +#if CONFIG_REALTIME_ONLY + Loop = 0; +#else + /* Special case handling for forced key frames */ + if ((cm->frame_type == KEY_FRAME) && cpi->this_key_frame_forced) { + int last_q = Q; + int kf_err = vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]); + + /* The key frame is not good enough */ + if (kf_err > ((cpi->ambient_err * 7) >> 3)) { + /* Lower q_high */ + q_high = (Q > q_low) ? (Q - 1) : q_low; + + /* Adjust Q */ + Q = (q_high + q_low) >> 1; + } + /* The key frame is much better than the previous frame */ + else if (kf_err < (cpi->ambient_err >> 1)) { + /* Raise q_low */ + q_low = (Q < q_high) ? (Q + 1) : q_high; + + /* Adjust Q */ + Q = (q_high + q_low + 1) >> 1; + } + + /* Clamp Q to upper and lower limits: */ + if (Q > q_high) { + Q = q_high; + } else if (Q < q_low) { + Q = q_low; + } + + Loop = Q != last_q; + } + + /* Is the projected frame size out of range and are we allowed + * to attempt to recode. + */ + else if (recode_loop_test(cpi, frame_over_shoot_limit, + frame_under_shoot_limit, Q, top_index, + bottom_index)) { + int last_q = Q; + int Retries = 0; + + /* Frame size out of permitted range. Update correction factor + * & compute new Q to try... + */ + + /* Frame is too large */ + if (cpi->projected_frame_size > cpi->this_frame_target) { + /* Raise Qlow as to at least the current value */ + q_low = (Q < q_high) ? (Q + 1) : q_high; + + /* If we are using over quant do the same for zbin_oq_low */ + if (cpi->mb.zbin_over_quant > 0) { + zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) + ? (cpi->mb.zbin_over_quant + 1) + : zbin_oq_high; + } + + if (undershoot_seen) { + /* Update rate_correction_factor unless + * cpi->active_worst_quality has changed. + */ + if (!active_worst_qchanged) { + vp8_update_rate_correction_factors(cpi, 1); + } + + Q = (q_high + q_low + 1) / 2; + + /* Adjust cpi->zbin_over_quant (only allowed when Q + * is max) + */ + if (Q < MAXQ) { + cpi->mb.zbin_over_quant = 0; + } else { + zbin_oq_low = (cpi->mb.zbin_over_quant < zbin_oq_high) + ? (cpi->mb.zbin_over_quant + 1) + : zbin_oq_high; + cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; + } + } else { + /* Update rate_correction_factor unless + * cpi->active_worst_quality has changed. + */ + if (!active_worst_qchanged) { + vp8_update_rate_correction_factors(cpi, 0); + } + + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + + while (((Q < q_low) || (cpi->mb.zbin_over_quant < zbin_oq_low)) && + (Retries < 10)) { + vp8_update_rate_correction_factors(cpi, 0); + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + Retries++; + } + } + + overshoot_seen = 1; + } + /* Frame is too small */ + else { + if (cpi->mb.zbin_over_quant == 0) { + /* Lower q_high if not using over quant */ + q_high = (Q > q_low) ? (Q - 1) : q_low; + } else { + /* else lower zbin_oq_high */ + zbin_oq_high = (cpi->mb.zbin_over_quant > zbin_oq_low) + ? (cpi->mb.zbin_over_quant - 1) + : zbin_oq_low; + } + + if (overshoot_seen) { + /* Update rate_correction_factor unless + * cpi->active_worst_quality has changed. + */ + if (!active_worst_qchanged) { + vp8_update_rate_correction_factors(cpi, 1); + } + + Q = (q_high + q_low) / 2; + + /* Adjust cpi->zbin_over_quant (only allowed when Q + * is max) + */ + if (Q < MAXQ) { + cpi->mb.zbin_over_quant = 0; + } else { + cpi->mb.zbin_over_quant = (zbin_oq_high + zbin_oq_low) / 2; + } + } else { + /* Update rate_correction_factor unless + * cpi->active_worst_quality has changed. + */ + if (!active_worst_qchanged) { + vp8_update_rate_correction_factors(cpi, 0); + } + + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + + /* Special case reset for qlow for constrained quality. + * This should only trigger where there is very substantial + * undershoot on a frame and the auto cq level is above + * the user passsed in value. + */ + if ((cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) && + (Q < q_low)) { + q_low = Q; + } + + while (((Q > q_high) || (cpi->mb.zbin_over_quant > zbin_oq_high)) && + (Retries < 10)) { + vp8_update_rate_correction_factors(cpi, 0); + Q = vp8_regulate_q(cpi, cpi->this_frame_target); + Retries++; + } + } + + undershoot_seen = 1; + } + + /* Clamp Q to upper and lower limits: */ + if (Q > q_high) { + Q = q_high; + } else if (Q < q_low) { + Q = q_low; + } + + /* Clamp cpi->zbin_over_quant */ + cpi->mb.zbin_over_quant = (cpi->mb.zbin_over_quant < zbin_oq_low) + ? zbin_oq_low + : (cpi->mb.zbin_over_quant > zbin_oq_high) + ? zbin_oq_high + : cpi->mb.zbin_over_quant; + + Loop = Q != last_q; + } else { + Loop = 0; + } +#endif // CONFIG_REALTIME_ONLY + + if (cpi->is_src_frame_alt_ref) Loop = 0; + + if (Loop == 1) { + vp8_restore_coding_context(cpi); + loop_count++; +#if CONFIG_INTERNAL_STATS + cpi->tot_recode_hits++; +#endif + } + } while (Loop == 1); #if 0 /* Experimental code for lagged and one pass @@ -4701,86 +4215,84 @@ static void encode_frame_to_data_rate } #endif - /* Special case code to reduce pulsing when key frames are forced at a - * fixed interval. Note the reconstruction error if it is the frame before - * the force key frame - */ - if ( cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0) ) - { - cpi->ambient_err = vp8_calc_ss_err(cpi->Source, - &cm->yv12_fb[cm->new_fb_idx]); - } + /* Special case code to reduce pulsing when key frames are forced at a + * fixed interval. Note the reconstruction error if it is the frame before + * the force key frame + */ + if (cpi->next_key_frame_forced && (cpi->twopass.frames_to_key == 0)) { + cpi->ambient_err = + vp8_calc_ss_err(cpi->Source, &cm->yv12_fb[cm->new_fb_idx]); + } - /* This frame's MVs are saved and will be used in next frame's MV predictor. - * Last frame has one more line(add to bottom) and one more column(add to - * right) than cm->mip. The edge elements are initialized to 0. - */ +/* This frame's MVs are saved and will be used in next frame's MV predictor. + * Last frame has one more line(add to bottom) and one more column(add to + * right) than cm->mip. The edge elements are initialized to 0. + */ #if CONFIG_MULTI_RES_ENCODING - if(!cpi->oxcf.mr_encoder_id && cm->show_frame) + if (!cpi->oxcf.mr_encoder_id && cm->show_frame) #else - if(cm->show_frame) /* do not save for altref frame */ + if (cm->show_frame) /* do not save for altref frame */ #endif - { - int mb_row; - int mb_col; - /* Point to beginning of allocated MODE_INFO arrays. */ - MODE_INFO *tmp = cm->mip; + { + int mb_row; + int mb_col; + /* Point to beginning of allocated MODE_INFO arrays. */ + MODE_INFO *tmp = cm->mip; - if(cm->frame_type != KEY_FRAME) - { - for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++) - { - for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++) - { - if(tmp->mbmi.ref_frame != INTRA_FRAME) - cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride+1)].as_int = tmp->mbmi.mv.as_int; + if (cm->frame_type != KEY_FRAME) { + for (mb_row = 0; mb_row < cm->mb_rows + 1; ++mb_row) { + for (mb_col = 0; mb_col < cm->mb_cols + 1; ++mb_col) { + if (tmp->mbmi.ref_frame != INTRA_FRAME) { + cpi->lfmv[mb_col + mb_row * (cm->mode_info_stride + 1)].as_int = + tmp->mbmi.mv.as_int; + } - cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride+1)] = cm->ref_frame_sign_bias[tmp->mbmi.ref_frame]; - cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride+1)] = tmp->mbmi.ref_frame; - tmp++; - } - } + cpi->lf_ref_frame_sign_bias[mb_col + + mb_row * (cm->mode_info_stride + 1)] = + cm->ref_frame_sign_bias[tmp->mbmi.ref_frame]; + cpi->lf_ref_frame[mb_col + mb_row * (cm->mode_info_stride + 1)] = + tmp->mbmi.ref_frame; + tmp++; } + } } + } - /* Count last ref frame 0,0 usage on current encoded frame. */ - { - int mb_row; - int mb_col; - /* Point to beginning of MODE_INFO arrays. */ - MODE_INFO *tmp = cm->mi; + /* Count last ref frame 0,0 usage on current encoded frame. */ + { + int mb_row; + int mb_col; + /* Point to beginning of MODE_INFO arrays. */ + MODE_INFO *tmp = cm->mi; - cpi->zeromv_count = 0; + cpi->zeromv_count = 0; - if(cm->frame_type != KEY_FRAME) - { - for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++) - { - for (mb_col = 0; mb_col < cm->mb_cols; mb_col ++) - { - if (tmp->mbmi.mode == ZEROMV && - tmp->mbmi.ref_frame == LAST_FRAME) - cpi->zeromv_count++; - tmp++; - } - tmp++; - } + if (cm->frame_type != KEY_FRAME) { + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + if (tmp->mbmi.mode == ZEROMV && tmp->mbmi.ref_frame == LAST_FRAME) { + cpi->zeromv_count++; + } + tmp++; } + tmp++; + } } + } #if CONFIG_MULTI_RES_ENCODING - vp8_cal_dissimilarity(cpi); + vp8_cal_dissimilarity(cpi); #endif - /* Update the GF useage maps. - * This is done after completing the compression of a frame when all - * modes etc. are finalized but before loop filter - */ - if (cpi->oxcf.number_of_layers == 1) - vp8_update_gf_useage_maps(cpi, cm, &cpi->mb); + /* Update the GF useage maps. + * This is done after completing the compression of a frame when all + * modes etc. are finalized but before loop filter + */ + if (cpi->oxcf.number_of_layers == 1) { + vp8_update_gf_useage_maps(cpi, cm, &cpi->mb); + } - if (cm->frame_type == KEY_FRAME) - cm->refresh_last_frame = 1; + if (cm->frame_type == KEY_FRAME) cm->refresh_last_frame = 1; #if 0 { @@ -4790,267 +4302,248 @@ static void encode_frame_to_data_rate } #endif - /* For inter frames the current default behavior is that when - * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer - * This is purely an encoder decision at present. - */ - if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) - cm->copy_buffer_to_arf = 2; - else - cm->copy_buffer_to_arf = 0; + /* For inter frames the current default behavior is that when + * cm->refresh_golden_frame is set we copy the old GF over to the ARF buffer + * This is purely an encoder decision at present. + */ + if (!cpi->oxcf.error_resilient_mode && cm->refresh_golden_frame) { + cm->copy_buffer_to_arf = 2; + } else { + cm->copy_buffer_to_arf = 0; + } - cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; + cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; #if CONFIG_TEMPORAL_DENOISING - // Get some measure of the amount of noise, by measuring the (partial) mse - // between source and denoised buffer, for y channel. Partial refers to - // computing the sse for a sub-sample of the frame (i.e., skip x blocks along row/column), - // and only for blocks in that set that are consecutive ZEROMV_LAST mode. - // Do this every ~8 frames, to further reduce complexity. - // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity < 4, - // should be removed in favor of the process_denoiser_mode_change() function below. - if (cpi->oxcf.noise_sensitivity > 0 && - cpi->oxcf.noise_sensitivity < 4 && - !cpi->oxcf.screen_content_mode && - cpi->frames_since_key%8 == 0 && - cm->frame_type != KEY_FRAME) { - cpi->mse_source_denoised = measure_square_diff_partial( - &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi); - } + // Get some measure of the amount of noise, by measuring the (partial) mse + // between source and denoised buffer, for y channel. Partial refers to + // computing the sse for a sub-sample of the frame (i.e., skip x blocks along + // row/column), + // and only for blocks in that set that are consecutive ZEROMV_LAST mode. + // Do this every ~8 frames, to further reduce complexity. + // TODO(marpan): Keep this for now for the case cpi->oxcf.noise_sensitivity < + // 4, + // should be removed in favor of the process_denoiser_mode_change() function + // below. + if (cpi->oxcf.noise_sensitivity > 0 && cpi->oxcf.noise_sensitivity < 4 && + !cpi->oxcf.screen_content_mode && cpi->frames_since_key % 8 == 0 && + cm->frame_type != KEY_FRAME) { + cpi->mse_source_denoised = measure_square_diff_partial( + &cpi->denoiser.yv12_running_avg[INTRA_FRAME], cpi->Source, cpi); + } - // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse - // of source diff (between current and previous frame), and determine if we - // should switch the denoiser mode. Sampling refers to computing the mse for - // a sub-sample of the frame (i.e., skip x blocks along row/column), and - // only for blocks in that set that have used ZEROMV LAST, along with some - // constraint on the sum diff between blocks. This process is called every - // ~8 frames, to further reduce complexity. - if (cpi->oxcf.noise_sensitivity == 4 && - !cpi->oxcf.screen_content_mode && - cpi->frames_since_key % 8 == 0 && - cm->frame_type != KEY_FRAME) { - process_denoiser_mode_change(cpi); - } + // For the adaptive denoising mode (noise_sensitivity == 4), sample the mse + // of source diff (between current and previous frame), and determine if we + // should switch the denoiser mode. Sampling refers to computing the mse for + // a sub-sample of the frame (i.e., skip x blocks along row/column), and + // only for blocks in that set that have used ZEROMV LAST, along with some + // constraint on the sum diff between blocks. This process is called every + // ~8 frames, to further reduce complexity. + if (cpi->oxcf.noise_sensitivity == 4 && !cpi->oxcf.screen_content_mode && + cpi->frames_since_key % 8 == 0 && cm->frame_type != KEY_FRAME) { + process_denoiser_mode_change(cpi); + } #endif #if CONFIG_MULTITHREAD - if (cpi->b_multi_threaded) - { - /* start loopfilter in separate thread */ - sem_post(&cpi->h_event_start_lpf); - cpi->b_lpf_running = 1; - } - else + if (cpi->b_multi_threaded) { + /* start loopfilter in separate thread */ + sem_post(&cpi->h_event_start_lpf); + cpi->b_lpf_running = 1; + } else #endif - { - vp8_loopfilter_frame(cpi, cm); - } + { + vp8_loopfilter_frame(cpi, cm); + } - update_reference_frames(cpi); + update_reference_frames(cpi); #ifdef OUTPUT_YUV_DENOISED - vp8_write_yuv_frame(yuv_denoised_file, - &cpi->denoiser.yv12_running_avg[INTRA_FRAME]); + vp8_write_yuv_frame(yuv_denoised_file, + &cpi->denoiser.yv12_running_avg[INTRA_FRAME]); #endif #if !(CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - if (cpi->oxcf.error_resilient_mode) - { - cm->refresh_entropy_probs = 0; - } + if (cpi->oxcf.error_resilient_mode) { + cm->refresh_entropy_probs = 0; + } #endif #if CONFIG_MULTITHREAD - /* wait that filter_level is picked so that we can continue with stream packing */ - if (cpi->b_multi_threaded) - sem_wait(&cpi->h_event_end_lpf); + /* wait that filter_level is picked so that we can continue with stream + * packing */ + if (cpi->b_multi_threaded) sem_wait(&cpi->h_event_end_lpf); #endif - /* build the bitstream */ - vp8_pack_bitstream(cpi, dest, dest_end, size); + /* build the bitstream */ + vp8_pack_bitstream(cpi, dest, dest_end, size); -#if CONFIG_MULTITHREAD - /* if PSNR packets are generated we have to wait for the lpf */ - if (cpi->b_lpf_running && cpi->b_calculate_psnr) - { - sem_wait(&cpi->h_event_end_lpf); - cpi->b_lpf_running = 0; + /* Move storing frame_type out of the above loop since it is also + * needed in motion search besides loopfilter */ + cm->last_frame_type = cm->frame_type; + + /* Update rate control heuristics */ + cpi->total_byte_count += (*size); + cpi->projected_frame_size = (int)(*size) << 3; + + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; + for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) { + cpi->layer_context[i].total_byte_count += (*size); } -#endif + } - /* Move storing frame_type out of the above loop since it is also - * needed in motion search besides loopfilter */ - cm->last_frame_type = cm->frame_type; + if (!active_worst_qchanged) vp8_update_rate_correction_factors(cpi, 2); - /* Update rate control heuristics */ - cpi->total_byte_count += (*size); - cpi->projected_frame_size = (*size) << 3; + cpi->last_q[cm->frame_type] = cm->base_qindex; - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; - for (i=cpi->current_layer+1; ioxcf.number_of_layers; i++) - cpi->layer_context[i].total_byte_count += (*size); - } + if (cm->frame_type == KEY_FRAME) { + vp8_adjust_key_frame_context(cpi); + } - if (!active_worst_qchanged) - vp8_update_rate_correction_factors(cpi, 2); + /* Keep a record of ambient average Q. */ + if (cm->frame_type != KEY_FRAME) { + cpi->avg_frame_qindex = + (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2; + } - cpi->last_q[cm->frame_type] = cm->base_qindex; + /* Keep a record from which we can calculate the average Q excluding + * GF updates and key frames + */ + if ((cm->frame_type != KEY_FRAME) && + ((cpi->oxcf.number_of_layers > 1) || + (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) { + cpi->ni_frames++; - if (cm->frame_type == KEY_FRAME) - { - vp8_adjust_key_frame_context(cpi); - } - - /* Keep a record of ambient average Q. */ - if (cm->frame_type != KEY_FRAME) - cpi->avg_frame_qindex = (2 + 3 * cpi->avg_frame_qindex + cm->base_qindex) >> 2; - - /* Keep a record from which we can calculate the average Q excluding - * GF updates and key frames + /* Calculate the average Q for normal inter frames (not key or GFU + * frames). */ - if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) || - (!cm->refresh_golden_frame && !cm->refresh_alt_ref_frame))) - { - cpi->ni_frames++; + if (cpi->pass == 2) { + cpi->ni_tot_qi += Q; + cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); + } else { + /* Damp value for first few frames */ + if (cpi->ni_frames > 150) { + cpi->ni_tot_qi += Q; + cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); + } + /* For one pass, early in the clip ... average the current frame Q + * value with the worstq entered by the user as a dampening measure + */ + else { + cpi->ni_tot_qi += Q; + cpi->ni_av_qi = + ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2; + } - /* Calculate the average Q for normal inter frames (not key or GFU - * frames). - */ - if ( cpi->pass == 2 ) - { - cpi->ni_tot_qi += Q; - cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); - } - else - { - /* Damp value for first few frames */ - if (cpi->ni_frames > 150 ) - { - cpi->ni_tot_qi += Q; - cpi->ni_av_qi = (cpi->ni_tot_qi / cpi->ni_frames); - } - /* For one pass, early in the clip ... average the current frame Q - * value with the worstq entered by the user as a dampening measure - */ - else - { - cpi->ni_tot_qi += Q; - cpi->ni_av_qi = ((cpi->ni_tot_qi / cpi->ni_frames) + cpi->worst_quality + 1) / 2; - } - - /* If the average Q is higher than what was used in the last - * frame (after going through the recode loop to keep the frame - * size within range) then use the last frame value - 1. The -1 - * is designed to stop Q and hence the data rate, from - * progressively falling away during difficult sections, but at - * the same time reduce the number of itterations around the - * recode loop. - */ - if (Q > cpi->ni_av_qi) - cpi->ni_av_qi = Q - 1; - } + /* If the average Q is higher than what was used in the last + * frame (after going through the recode loop to keep the frame + * size within range) then use the last frame value - 1. The -1 + * is designed to stop Q and hence the data rate, from + * progressively falling away during difficult sections, but at + * the same time reduce the number of itterations around the + * recode loop. + */ + if (Q > cpi->ni_av_qi) cpi->ni_av_qi = Q - 1; } + } - /* Update the buffer level variable. */ - /* Non-viewable frames are a special case and are treated as pure overhead. */ - if ( !cm->show_frame ) - cpi->bits_off_target -= cpi->projected_frame_size; - else - cpi->bits_off_target += cpi->av_per_frame_bandwidth - cpi->projected_frame_size; + /* Update the buffer level variable. */ + /* Non-viewable frames are a special case and are treated as pure overhead. */ + if (!cm->show_frame) { + cpi->bits_off_target -= cpi->projected_frame_size; + } else { + cpi->bits_off_target += + cpi->av_per_frame_bandwidth - cpi->projected_frame_size; + } - /* Clip the buffer level to the maximum specified buffer size */ - if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) - cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; + /* Clip the buffer level to the maximum specified buffer size */ + if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) { + cpi->bits_off_target = cpi->oxcf.maximum_buffer_size; + } - // If the frame dropper is not enabled, don't let the buffer level go below - // some threshold, given here by -|maximum_buffer_size|. For now we only do - // this for screen content input. - if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode && - cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) - cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size; + // If the frame dropper is not enabled, don't let the buffer level go below + // some threshold, given here by -|maximum_buffer_size|. For now we only do + // this for screen content input. + if (cpi->drop_frames_allowed == 0 && cpi->oxcf.screen_content_mode && + cpi->bits_off_target < -cpi->oxcf.maximum_buffer_size) { + cpi->bits_off_target = -cpi->oxcf.maximum_buffer_size; + } - /* Rolling monitors of whether we are over or underspending used to - * help regulate min and Max Q in two pass. - */ - cpi->rolling_target_bits = ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4; - cpi->rolling_actual_bits = ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4; - cpi->long_rolling_target_bits = ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32; - cpi->long_rolling_actual_bits = ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / 32; + /* Rolling monitors of whether we are over or underspending used to + * help regulate min and Max Q in two pass. + */ + cpi->rolling_target_bits = + ((cpi->rolling_target_bits * 3) + cpi->this_frame_target + 2) / 4; + cpi->rolling_actual_bits = + ((cpi->rolling_actual_bits * 3) + cpi->projected_frame_size + 2) / 4; + cpi->long_rolling_target_bits = + ((cpi->long_rolling_target_bits * 31) + cpi->this_frame_target + 16) / 32; + cpi->long_rolling_actual_bits = + ((cpi->long_rolling_actual_bits * 31) + cpi->projected_frame_size + 16) / + 32; - /* Actual bits spent */ - cpi->total_actual_bits += cpi->projected_frame_size; + /* Actual bits spent */ + cpi->total_actual_bits += cpi->projected_frame_size; - /* Debug stats */ - cpi->total_target_vs_actual += (cpi->this_frame_target - cpi->projected_frame_size); + /* Debug stats */ + cpi->total_target_vs_actual += + (cpi->this_frame_target - cpi->projected_frame_size); - cpi->buffer_level = cpi->bits_off_target; + cpi->buffer_level = cpi->bits_off_target; - /* Propagate values to higher temporal layers */ - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; + /* Propagate values to higher temporal layers */ + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; - for (i=cpi->current_layer+1; ioxcf.number_of_layers; i++) - { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - int bits_off_for_this_layer = - (int)(lc->target_bandwidth / lc->framerate - - cpi->projected_frame_size); + for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate - + cpi->projected_frame_size); - lc->bits_off_target += bits_off_for_this_layer; + lc->bits_off_target += bits_off_for_this_layer; - /* Clip buffer level to maximum buffer size for the layer */ - if (lc->bits_off_target > lc->maximum_buffer_size) - lc->bits_off_target = lc->maximum_buffer_size; + /* Clip buffer level to maximum buffer size for the layer */ + if (lc->bits_off_target > lc->maximum_buffer_size) { + lc->bits_off_target = lc->maximum_buffer_size; + } - lc->total_actual_bits += cpi->projected_frame_size; - lc->total_target_vs_actual += bits_off_for_this_layer; - lc->buffer_level = lc->bits_off_target; - } + lc->total_actual_bits += cpi->projected_frame_size; + lc->total_target_vs_actual += bits_off_for_this_layer; + lc->buffer_level = lc->bits_off_target; } + } - /* Update bits left to the kf and gf groups to account for overshoot - * or undershoot on these frames - */ - if (cm->frame_type == KEY_FRAME) - { - cpi->twopass.kf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; + /* Update bits left to the kf and gf groups to account for overshoot + * or undershoot on these frames + */ + if (cm->frame_type == KEY_FRAME) { + cpi->twopass.kf_group_bits += + cpi->this_frame_target - cpi->projected_frame_size; - if (cpi->twopass.kf_group_bits < 0) - cpi->twopass.kf_group_bits = 0 ; - } - else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) - { - cpi->twopass.gf_group_bits += cpi->this_frame_target - cpi->projected_frame_size; - - if (cpi->twopass.gf_group_bits < 0) - cpi->twopass.gf_group_bits = 0 ; - } - - if (cm->frame_type != KEY_FRAME) - { - if (cpi->common.refresh_alt_ref_frame) - { - cpi->last_skip_false_probs[2] = cpi->prob_skip_false; - cpi->last_skip_probs_q[2] = cm->base_qindex; - } - else if (cpi->common.refresh_golden_frame) - { - cpi->last_skip_false_probs[1] = cpi->prob_skip_false; - cpi->last_skip_probs_q[1] = cm->base_qindex; - } - else - { - cpi->last_skip_false_probs[0] = cpi->prob_skip_false; - cpi->last_skip_probs_q[0] = cm->base_qindex; - - /* update the baseline */ - cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false; - - } + if (cpi->twopass.kf_group_bits < 0) cpi->twopass.kf_group_bits = 0; + } else if (cm->refresh_golden_frame || cm->refresh_alt_ref_frame) { + cpi->twopass.gf_group_bits += + cpi->this_frame_target - cpi->projected_frame_size; + + if (cpi->twopass.gf_group_bits < 0) cpi->twopass.gf_group_bits = 0; + } + + if (cm->frame_type != KEY_FRAME) { + if (cpi->common.refresh_alt_ref_frame) { + cpi->last_skip_false_probs[2] = cpi->prob_skip_false; + cpi->last_skip_probs_q[2] = cm->base_qindex; + } else if (cpi->common.refresh_golden_frame) { + cpi->last_skip_false_probs[1] = cpi->prob_skip_false; + cpi->last_skip_probs_q[1] = cm->base_qindex; + } else { + cpi->last_skip_false_probs[0] = cpi->prob_skip_false; + cpi->last_skip_probs_q[0] = cm->base_qindex; + + /* update the baseline */ + cpi->base_skip_false_prob[cm->base_qindex] = cpi->prob_skip_false; } + } #if 0 && CONFIG_INTERNAL_STATS { @@ -5117,98 +4610,90 @@ static void encode_frame_to_data_rate #endif - if (cm->refresh_golden_frame == 1) - cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN; - else - cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_GOLDEN; + if (cm->refresh_golden_frame == 1) { + cm->frame_flags = cm->frame_flags | FRAMEFLAGS_GOLDEN; + } else { + cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_GOLDEN; + } - if (cm->refresh_alt_ref_frame == 1) - cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF; - else - cm->frame_flags = cm->frame_flags&~FRAMEFLAGS_ALTREF; + if (cm->refresh_alt_ref_frame == 1) { + cm->frame_flags = cm->frame_flags | FRAMEFLAGS_ALTREF; + } else { + cm->frame_flags = cm->frame_flags & ~FRAMEFLAGS_ALTREF; + } + if (cm->refresh_last_frame & cm->refresh_golden_frame) { /* both refreshed */ + cpi->gold_is_last = 1; + } else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) { + /* 1 refreshed but not the other */ + cpi->gold_is_last = 0; + } - if (cm->refresh_last_frame & cm->refresh_golden_frame) - /* both refreshed */ - cpi->gold_is_last = 1; - else if (cm->refresh_last_frame ^ cm->refresh_golden_frame) - /* 1 refreshed but not the other */ - cpi->gold_is_last = 0; + if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) { /* both refreshed */ + cpi->alt_is_last = 1; + } else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) { + /* 1 refreshed but not the other */ + cpi->alt_is_last = 0; + } - if (cm->refresh_last_frame & cm->refresh_alt_ref_frame) - /* both refreshed */ - cpi->alt_is_last = 1; - else if (cm->refresh_last_frame ^ cm->refresh_alt_ref_frame) - /* 1 refreshed but not the other */ - cpi->alt_is_last = 0; + if (cm->refresh_alt_ref_frame & + cm->refresh_golden_frame) { /* both refreshed */ + cpi->gold_is_alt = 1; + } else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) { + /* 1 refreshed but not the other */ + cpi->gold_is_alt = 0; + } - if (cm->refresh_alt_ref_frame & cm->refresh_golden_frame) - /* both refreshed */ - cpi->gold_is_alt = 1; - else if (cm->refresh_alt_ref_frame ^ cm->refresh_golden_frame) - /* 1 refreshed but not the other */ - cpi->gold_is_alt = 0; + cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME; - cpi->ref_frame_flags = VP8_ALTR_FRAME | VP8_GOLD_FRAME | VP8_LAST_FRAME; + if (cpi->gold_is_last) cpi->ref_frame_flags &= ~VP8_GOLD_FRAME; - if (cpi->gold_is_last) - cpi->ref_frame_flags &= ~VP8_GOLD_FRAME; + if (cpi->alt_is_last) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME; - if (cpi->alt_is_last) - cpi->ref_frame_flags &= ~VP8_ALTR_FRAME; + if (cpi->gold_is_alt) cpi->ref_frame_flags &= ~VP8_ALTR_FRAME; - if (cpi->gold_is_alt) - cpi->ref_frame_flags &= ~VP8_ALTR_FRAME; - - - if (!cpi->oxcf.error_resilient_mode) - { - if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && (cm->frame_type != KEY_FRAME)) - /* Update the alternate reference frame stats as appropriate. */ - update_alt_ref_frame_stats(cpi); - else - /* Update the Golden frame stats as appropriate. */ - update_golden_frame_stats(cpi); + if (!cpi->oxcf.error_resilient_mode) { + if (cpi->oxcf.play_alternate && cm->refresh_alt_ref_frame && + (cm->frame_type != KEY_FRAME)) { + /* Update the alternate reference frame stats as appropriate. */ + update_alt_ref_frame_stats(cpi); + } else { + /* Update the Golden frame stats as appropriate. */ + update_golden_frame_stats(cpi); } + } - if (cm->frame_type == KEY_FRAME) - { - /* Tell the caller that the frame was coded as a key frame */ - *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY; + if (cm->frame_type == KEY_FRAME) { + /* Tell the caller that the frame was coded as a key frame */ + *frame_flags = cm->frame_flags | FRAMEFLAGS_KEY; - /* As this frame is a key frame the next defaults to an inter frame. */ - cm->frame_type = INTER_FRAME; + /* As this frame is a key frame the next defaults to an inter frame. */ + cm->frame_type = INTER_FRAME; - cpi->last_frame_percent_intra = 100; - } - else - { - *frame_flags = cm->frame_flags&~FRAMEFLAGS_KEY; + cpi->last_frame_percent_intra = 100; + } else { + *frame_flags = cm->frame_flags & ~FRAMEFLAGS_KEY; - cpi->last_frame_percent_intra = cpi->this_frame_percent_intra; - } + cpi->last_frame_percent_intra = cpi->this_frame_percent_intra; + } - /* Clear the one shot update flags for segmentation map and mode/ref - * loop filter deltas. - */ - cpi->mb.e_mbd.update_mb_segmentation_map = 0; - cpi->mb.e_mbd.update_mb_segmentation_data = 0; - cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; - - - /* Dont increment frame counters if this was an altref buffer update - * not a real frame - */ - if (cm->show_frame) - { - cm->current_video_frame++; - cpi->frames_since_key++; - cpi->temporal_pattern_counter++; - } - - /* reset to normal state now that we are done. */ + /* Clear the one shot update flags for segmentation map and mode/ref + * loop filter deltas. + */ + cpi->mb.e_mbd.update_mb_segmentation_map = 0; + cpi->mb.e_mbd.update_mb_segmentation_data = 0; + cpi->mb.e_mbd.mode_ref_lf_delta_update = 0; + /* Dont increment frame counters if this was an altref buffer update + * not a real frame + */ + if (cm->show_frame) { + cm->current_video_frame++; + cpi->frames_since_key++; + cpi->temporal_pattern_counter++; + } +/* reset to normal state now that we are done. */ #if 0 { @@ -5222,327 +4707,289 @@ static void encode_frame_to_data_rate } #endif - /* DEBUG */ - /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */ - - + /* DEBUG */ + /* vp8_write_yuv_frame("encoder_recon.yuv", cm->frame_to_show); */ } #if !CONFIG_REALTIME_ONLY -static void Pass2Encode(VP8_COMP *cpi, unsigned long *size, unsigned char *dest, unsigned char * dest_end, unsigned int *frame_flags) -{ +static void Pass2Encode(VP8_COMP *cpi, size_t *size, unsigned char *dest, + unsigned char *dest_end, unsigned int *frame_flags) { + if (!cpi->common.refresh_alt_ref_frame) vp8_second_pass(cpi); - if (!cpi->common.refresh_alt_ref_frame) - vp8_second_pass(cpi); + encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags); + cpi->twopass.bits_left -= 8 * (int)(*size); - encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags); - cpi->twopass.bits_left -= 8 * *size; - - if (!cpi->common.refresh_alt_ref_frame) - { - double two_pass_min_rate = (double)(cpi->oxcf.target_bandwidth - *cpi->oxcf.two_pass_vbrmin_section / 100); - cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate); - } + if (!cpi->common.refresh_alt_ref_frame) { + double two_pass_min_rate = + (double)(cpi->oxcf.target_bandwidth * + cpi->oxcf.two_pass_vbrmin_section / 100); + cpi->twopass.bits_left += (int64_t)(two_pass_min_rate / cpi->framerate); + } } #endif -int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) -{ - struct vpx_usec_timer timer; - int res = 0; +int vp8_receive_raw_frame(VP8_COMP *cpi, unsigned int frame_flags, + YV12_BUFFER_CONFIG *sd, int64_t time_stamp, + int64_t end_time) { + struct vpx_usec_timer timer; + int res = 0; - vpx_usec_timer_start(&timer); + vpx_usec_timer_start(&timer); - /* Reinit the lookahead buffer if the frame size changes */ - if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) - { - assert(cpi->oxcf.lag_in_frames < 2); - dealloc_raw_frame_buffers(cpi); - alloc_raw_frame_buffers(cpi); + /* Reinit the lookahead buffer if the frame size changes */ + if (sd->y_width != cpi->oxcf.Width || sd->y_height != cpi->oxcf.Height) { + assert(cpi->oxcf.lag_in_frames < 2); + dealloc_raw_frame_buffers(cpi); + alloc_raw_frame_buffers(cpi); + } + + if (vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags, + cpi->active_map_enabled ? cpi->active_map : NULL)) { + res = -1; + } + vpx_usec_timer_mark(&timer); + cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); + + return res; +} + +static int frame_is_reference(const VP8_COMP *cpi) { + const VP8_COMMON *cm = &cpi->common; + const MACROBLOCKD *xd = &cpi->mb.e_mbd; + + return cm->frame_type == KEY_FRAME || cm->refresh_last_frame || + cm->refresh_golden_frame || cm->refresh_alt_ref_frame || + cm->copy_buffer_to_gf || cm->copy_buffer_to_arf || + cm->refresh_entropy_probs || xd->mode_ref_lf_delta_update || + xd->update_mb_segmentation_map || xd->update_mb_segmentation_data; +} + +int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, + size_t *size, unsigned char *dest, + unsigned char *dest_end, int64_t *time_stamp, + int64_t *time_end, int flush) { + VP8_COMMON *cm; + struct vpx_usec_timer tsctimer; + struct vpx_usec_timer ticktimer; + struct vpx_usec_timer cmptimer; + YV12_BUFFER_CONFIG *force_src_buffer = NULL; + + if (!cpi) return -1; + + cm = &cpi->common; + + if (setjmp(cpi->common.error.jmp)) { + cpi->common.error.setjmp = 0; + vp8_clear_system_state(); + return VPX_CODEC_CORRUPT_FRAME; + } + + cpi->common.error.setjmp = 1; + + vpx_usec_timer_start(&cmptimer); + + cpi->source = NULL; + +#if !CONFIG_REALTIME_ONLY + /* Should we code an alternate reference frame */ + if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.play_alternate && + cpi->source_alt_ref_pending) { + if ((cpi->source = vp8_lookahead_peek( + cpi->lookahead, cpi->frames_till_gf_update_due, PEEK_FORWARD))) { + cpi->alt_ref_source = cpi->source; + if (cpi->oxcf.arnr_max_frames > 0) { + vp8_temporal_filter_prepare_c(cpi, cpi->frames_till_gf_update_due); + force_src_buffer = &cpi->alt_ref_buffer; + } + cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due; + cm->refresh_alt_ref_frame = 1; + cm->refresh_golden_frame = 0; + cm->refresh_last_frame = 0; + cm->show_frame = 0; + /* Clear Pending alt Ref flag. */ + cpi->source_alt_ref_pending = 0; + cpi->is_src_frame_alt_ref = 0; } + } +#endif - if(vp8_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, - frame_flags, cpi->active_map_enabled ? cpi->active_map : NULL)) - res = -1; - vpx_usec_timer_mark(&timer); - cpi->time_receive_data += vpx_usec_timer_elapsed(&timer); - - return res; -} - - -static int frame_is_reference(const VP8_COMP *cpi) -{ - const VP8_COMMON *cm = &cpi->common; - const MACROBLOCKD *xd = &cpi->mb.e_mbd; - - return cm->frame_type == KEY_FRAME || cm->refresh_last_frame - || cm->refresh_golden_frame || cm->refresh_alt_ref_frame - || cm->copy_buffer_to_gf || cm->copy_buffer_to_arf - || cm->refresh_entropy_probs - || xd->mode_ref_lf_delta_update - || xd->update_mb_segmentation_map || xd->update_mb_segmentation_data; -} - - -int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned long *size, unsigned char *dest, unsigned char *dest_end, int64_t *time_stamp, int64_t *time_end, int flush) -{ - VP8_COMMON *cm; - struct vpx_usec_timer tsctimer; - struct vpx_usec_timer ticktimer; - struct vpx_usec_timer cmptimer; - YV12_BUFFER_CONFIG *force_src_buffer = NULL; - - if (!cpi) + if (!cpi->source) { + /* Read last frame source if we are encoding first pass. */ + if (cpi->pass == 1 && cm->current_video_frame > 0) { + if ((cpi->last_source = + vp8_lookahead_peek(cpi->lookahead, 1, PEEK_BACKWARD)) == NULL) { return -1; - - cm = &cpi->common; - - if (setjmp(cpi->common.error.jmp)) - { - cpi->common.error.setjmp = 0; - vp8_clear_system_state(); - return VPX_CODEC_CORRUPT_FRAME; + } } - cpi->common.error.setjmp = 1; + if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) { + cm->show_frame = 1; - vpx_usec_timer_start(&cmptimer); + cpi->is_src_frame_alt_ref = + cpi->alt_ref_source && (cpi->source == cpi->alt_ref_source); - cpi->source = NULL; - -#if !CONFIG_REALTIME_ONLY - /* Should we code an alternate reference frame */ - if (cpi->oxcf.error_resilient_mode == 0 && - cpi->oxcf.play_alternate && - cpi->source_alt_ref_pending) - { - if ((cpi->source = vp8_lookahead_peek(cpi->lookahead, - cpi->frames_till_gf_update_due, - PEEK_FORWARD))) - { - cpi->alt_ref_source = cpi->source; - if (cpi->oxcf.arnr_max_frames > 0) - { - vp8_temporal_filter_prepare_c(cpi, - cpi->frames_till_gf_update_due); - force_src_buffer = &cpi->alt_ref_buffer; - } - cpi->frames_till_alt_ref_frame = cpi->frames_till_gf_update_due; - cm->refresh_alt_ref_frame = 1; - cm->refresh_golden_frame = 0; - cm->refresh_last_frame = 0; - cm->show_frame = 0; - /* Clear Pending alt Ref flag. */ - cpi->source_alt_ref_pending = 0; - cpi->is_src_frame_alt_ref = 0; - } + if (cpi->is_src_frame_alt_ref) cpi->alt_ref_source = NULL; } -#endif + } - if (!cpi->source) - { - /* Read last frame source if we are encoding first pass. */ - if (cpi->pass == 1 && cm->current_video_frame > 0) - { - if((cpi->last_source = vp8_lookahead_peek(cpi->lookahead, 1, - PEEK_BACKWARD)) == NULL) - return -1; - } + if (cpi->source) { + cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img; + cpi->un_scaled_source = cpi->Source; + *time_stamp = cpi->source->ts_start; + *time_end = cpi->source->ts_end; + *frame_flags = cpi->source->flags; - - if ((cpi->source = vp8_lookahead_pop(cpi->lookahead, flush))) - { - cm->show_frame = 1; - - cpi->is_src_frame_alt_ref = cpi->alt_ref_source - && (cpi->source == cpi->alt_ref_source); - - if(cpi->is_src_frame_alt_ref) - cpi->alt_ref_source = NULL; - } + if (cpi->pass == 1 && cm->current_video_frame > 0) { + cpi->last_frame_unscaled_source = &cpi->last_source->img; } - - if (cpi->source) - { - cpi->Source = force_src_buffer ? force_src_buffer : &cpi->source->img; - cpi->un_scaled_source = cpi->Source; - *time_stamp = cpi->source->ts_start; - *time_end = cpi->source->ts_end; - *frame_flags = cpi->source->flags; - - if (cpi->pass == 1 && cm->current_video_frame > 0) - { - cpi->last_frame_unscaled_source = &cpi->last_source->img; - } - } - else - { - *size = 0; + } else { + *size = 0; #if !CONFIG_REALTIME_ONLY - if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) - { - vp8_end_first_pass(cpi); /* get last stats packet */ - cpi->twopass.first_pass_done = 1; - } + if (flush && cpi->pass == 1 && !cpi->twopass.first_pass_done) { + vp8_end_first_pass(cpi); /* get last stats packet */ + cpi->twopass.first_pass_done = 1; + } #endif - return -1; + return -1; + } + + if (cpi->source->ts_start < cpi->first_time_stamp_ever) { + cpi->first_time_stamp_ever = cpi->source->ts_start; + cpi->last_end_time_stamp_seen = cpi->source->ts_start; + } + + /* adjust frame rates based on timestamps given */ + if (cm->show_frame) { + int64_t this_duration; + int step = 0; + + if (cpi->source->ts_start == cpi->first_time_stamp_ever) { + this_duration = cpi->source->ts_end - cpi->source->ts_start; + step = 1; + } else { + int64_t last_duration; + + this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen; + last_duration = cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen; + /* do a step update if the duration changes by 10% */ + if (last_duration) { + step = (int)(((this_duration - last_duration) * 10 / last_duration)); + } } - if (cpi->source->ts_start < cpi->first_time_stamp_ever) - { - cpi->first_time_stamp_ever = cpi->source->ts_start; - cpi->last_end_time_stamp_seen = cpi->source->ts_start; - } + if (this_duration) { + if (step) { + cpi->ref_framerate = 10000000.0 / this_duration; + } else { + double avg_duration, interval; - /* adjust frame rates based on timestamps given */ - if (cm->show_frame) - { - int64_t this_duration; - int step = 0; + /* Average this frame's rate into the last second's average + * frame rate. If we haven't seen 1 second yet, then average + * over the whole interval seen. + */ + interval = (double)(cpi->source->ts_end - cpi->first_time_stamp_ever); + if (interval > 10000000.0) interval = 10000000; - if (cpi->source->ts_start == cpi->first_time_stamp_ever) - { - this_duration = cpi->source->ts_end - cpi->source->ts_start; - step = 1; - } - else - { - int64_t last_duration; + avg_duration = 10000000.0 / cpi->ref_framerate; + avg_duration *= (interval - avg_duration + this_duration); + avg_duration /= interval; - this_duration = cpi->source->ts_end - cpi->last_end_time_stamp_seen; - last_duration = cpi->last_end_time_stamp_seen - - cpi->last_time_stamp_seen; - /* do a step update if the duration changes by 10% */ - if (last_duration) - step = (int)(((this_duration - last_duration) * - 10 / last_duration)); - } - - if (this_duration) - { - if (step) - cpi->ref_framerate = 10000000.0 / this_duration; - else - { - double avg_duration, interval; - - /* Average this frame's rate into the last second's average - * frame rate. If we haven't seen 1 second yet, then average - * over the whole interval seen. - */ - interval = (double)(cpi->source->ts_end - - cpi->first_time_stamp_ever); - if(interval > 10000000.0) - interval = 10000000; - - avg_duration = 10000000.0 / cpi->ref_framerate; - avg_duration *= (interval - avg_duration + this_duration); - avg_duration /= interval; - - cpi->ref_framerate = 10000000.0 / avg_duration; - } + cpi->ref_framerate = 10000000.0 / avg_duration; + } #if CONFIG_MULTI_RES_ENCODING - if (cpi->oxcf.mr_total_resolutions > 1) { - LOWER_RES_FRAME_INFO* low_res_frame_info = (LOWER_RES_FRAME_INFO*) - cpi->oxcf.mr_low_res_mode_info; - // Frame rate should be the same for all spatial layers in - // multi-res-encoding (simulcast), so we constrain the frame for - // higher layers to be that of lowest resolution. This is needed - // as he application may decide to skip encoding a high layer and - // then start again, in which case a big jump in time-stamps will - // be received for that high layer, which will yield an incorrect - // frame rate (from time-stamp adjustment in above calculation). - if (cpi->oxcf.mr_encoder_id) { - cpi->ref_framerate = low_res_frame_info->low_res_framerate; - } - else { - // Keep track of frame rate for lowest resolution. - low_res_frame_info->low_res_framerate = cpi->ref_framerate; - } - } -#endif - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; - - /* Update frame rates for each layer */ - assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS); - for (i = 0; i < cpi->oxcf.number_of_layers && - i < VPX_TS_MAX_LAYERS; ++i) - { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - lc->framerate = cpi->ref_framerate / - cpi->oxcf.rate_decimator[i]; - } - } - else - vp8_new_framerate(cpi, cpi->ref_framerate); - } - - cpi->last_time_stamp_seen = cpi->source->ts_start; - cpi->last_end_time_stamp_seen = cpi->source->ts_end; - } - - if (cpi->oxcf.number_of_layers > 1) - { - int layer; - - update_layer_contexts (cpi); - - /* Restore layer specific context & set frame rate */ - if (cpi->temporal_layer_id >= 0) { - layer = cpi->temporal_layer_id; + if (cpi->oxcf.mr_total_resolutions > 1) { + LOWER_RES_FRAME_INFO *low_res_frame_info = + (LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info; + // Frame rate should be the same for all spatial layers in + // multi-res-encoding (simulcast), so we constrain the frame for + // higher layers to be that of lowest resolution. This is needed + // as he application may decide to skip encoding a high layer and + // then start again, in which case a big jump in time-stamps will + // be received for that high layer, which will yield an incorrect + // frame rate (from time-stamp adjustment in above calculation). + if (cpi->oxcf.mr_encoder_id) { + cpi->ref_framerate = low_res_frame_info->low_res_framerate; } else { - layer = cpi->oxcf.layer_id[ - cpi->temporal_pattern_counter % cpi->oxcf.periodicity]; + // Keep track of frame rate for lowest resolution. + low_res_frame_info->low_res_framerate = cpi->ref_framerate; } - restore_layer_context (cpi, layer); - vp8_new_framerate(cpi, cpi->layer_context[layer].framerate); + } +#endif + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; + + /* Update frame rates for each layer */ + assert(cpi->oxcf.number_of_layers <= VPX_TS_MAX_LAYERS); + for (i = 0; i < cpi->oxcf.number_of_layers && i < VPX_TS_MAX_LAYERS; + ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + lc->framerate = cpi->ref_framerate / cpi->oxcf.rate_decimator[i]; + } + } else { + vp8_new_framerate(cpi, cpi->ref_framerate); + } } - if (cpi->compressor_speed == 2) - { - vpx_usec_timer_start(&tsctimer); - vpx_usec_timer_start(&ticktimer); - } + cpi->last_time_stamp_seen = cpi->source->ts_start; + cpi->last_end_time_stamp_seen = cpi->source->ts_end; + } - cpi->lf_zeromv_pct = (cpi->zeromv_count * 100)/cm->MBs; + if (cpi->oxcf.number_of_layers > 1) { + int layer; + + update_layer_contexts(cpi); + + /* Restore layer specific context & set frame rate */ + if (cpi->temporal_layer_id >= 0) { + layer = cpi->temporal_layer_id; + } else { + layer = + cpi->oxcf + .layer_id[cpi->temporal_pattern_counter % cpi->oxcf.periodicity]; + } + restore_layer_context(cpi, layer); + vp8_new_framerate(cpi, cpi->layer_context[layer].framerate); + } + + if (cpi->compressor_speed == 2) { + vpx_usec_timer_start(&tsctimer); + vpx_usec_timer_start(&ticktimer); + } + + cpi->lf_zeromv_pct = (cpi->zeromv_count * 100) / cm->MBs; #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - { - int i; - const int num_part = (1 << cm->multi_token_partition); - /* the available bytes in dest */ - const unsigned long dest_size = dest_end - dest; - const int tok_part_buff_size = (dest_size * 9) / (10 * num_part); + { + int i; + const int num_part = (1 << cm->multi_token_partition); + /* the available bytes in dest */ + const unsigned long dest_size = dest_end - dest; + const int tok_part_buff_size = (dest_size * 9) / (10 * num_part); - unsigned char *dp = dest; + unsigned char *dp = dest; - cpi->partition_d[0] = dp; - dp += dest_size/10; /* reserve 1/10 for control partition */ - cpi->partition_d_end[0] = dp; + cpi->partition_d[0] = dp; + dp += dest_size / 10; /* reserve 1/10 for control partition */ + cpi->partition_d_end[0] = dp; - for(i = 0; i < num_part; i++) - { - cpi->partition_d[i + 1] = dp; - dp += tok_part_buff_size; - cpi->partition_d_end[i + 1] = dp; - } + for (i = 0; i < num_part; ++i) { + cpi->partition_d[i + 1] = dp; + dp += tok_part_buff_size; + cpi->partition_d_end[i + 1] = dp; } + } #endif - /* start with a 0 size frame */ - *size = 0; + /* start with a 0 size frame */ + *size = 0; - /* Clear down mmx registers */ - vp8_clear_system_state(); + /* Clear down mmx registers */ + vp8_clear_system_state(); - cm->frame_type = INTER_FRAME; - cm->frame_flags = *frame_flags; + cm->frame_type = INTER_FRAME; + cm->frame_flags = *frame_flags; #if 0 @@ -5558,243 +5005,187 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l } #endif - /* find a free buffer for the new frame */ - { - int i = 0; - for(; i < NUM_YV12_BUFFERS; i++) - { - if(!cm->yv12_fb[i].flags) - { - cm->new_fb_idx = i; - break; - } - } - - assert(i < NUM_YV12_BUFFERS ); + /* find a free buffer for the new frame */ + { + int i = 0; + for (; i < NUM_YV12_BUFFERS; ++i) { + if (!cm->yv12_fb[i].flags) { + cm->new_fb_idx = i; + break; + } } + + assert(i < NUM_YV12_BUFFERS); + } + switch (cpi->pass) { #if !CONFIG_REALTIME_ONLY + case 1: Pass1Encode(cpi, size, dest, frame_flags); break; + case 2: Pass2Encode(cpi, size, dest, dest_end, frame_flags); break; +#endif // !CONFIG_REALTIME_ONLY + default: + encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags); + break; + } - if (cpi->pass == 1) - { - Pass1Encode(cpi, size, dest, frame_flags); + if (cpi->compressor_speed == 2) { + unsigned int duration, duration2; + vpx_usec_timer_mark(&tsctimer); + vpx_usec_timer_mark(&ticktimer); + + duration = (int)(vpx_usec_timer_elapsed(&ticktimer)); + duration2 = (unsigned int)((double)duration / 2); + + if (cm->frame_type != KEY_FRAME) { + if (cpi->avg_encode_time == 0) { + cpi->avg_encode_time = duration; + } else { + cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3; + } } - else if (cpi->pass == 2) - { - Pass2Encode(cpi, size, dest, dest_end, frame_flags); - } - else -#endif - encode_frame_to_data_rate(cpi, size, dest, dest_end, frame_flags); - if (cpi->compressor_speed == 2) - { - unsigned int duration, duration2; - vpx_usec_timer_mark(&tsctimer); - vpx_usec_timer_mark(&ticktimer); - - duration = (int)(vpx_usec_timer_elapsed(&ticktimer)); - duration2 = (unsigned int)((double)duration / 2); - - if (cm->frame_type != KEY_FRAME) - { - if (cpi->avg_encode_time == 0) - cpi->avg_encode_time = duration; - else - cpi->avg_encode_time = (7 * cpi->avg_encode_time + duration) >> 3; + if (duration2) { + { + if (cpi->avg_pick_mode_time == 0) { + cpi->avg_pick_mode_time = duration2; + } else { + cpi->avg_pick_mode_time = + (7 * cpi->avg_pick_mode_time + duration2) >> 3; } - - if (duration2) - { - { - - if (cpi->avg_pick_mode_time == 0) - cpi->avg_pick_mode_time = duration2; - else - cpi->avg_pick_mode_time = (7 * cpi->avg_pick_mode_time + duration2) >> 3; - } - } - + } } + } - if (cm->refresh_entropy_probs == 0) - { - memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc)); - } + if (cm->refresh_entropy_probs == 0) { + memcpy(&cm->fc, &cm->lfc, sizeof(cm->fc)); + } - /* Save the contexts separately for alt ref, gold and last. */ - /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */ - if(cm->refresh_alt_ref_frame) - memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc)); + /* Save the contexts separately for alt ref, gold and last. */ + /* (TODO jbb -> Optimize this with pointers to avoid extra copies. ) */ + if (cm->refresh_alt_ref_frame) memcpy(&cpi->lfc_a, &cm->fc, sizeof(cm->fc)); - if(cm->refresh_golden_frame) - memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc)); + if (cm->refresh_golden_frame) memcpy(&cpi->lfc_g, &cm->fc, sizeof(cm->fc)); - if(cm->refresh_last_frame) - memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc)); + if (cm->refresh_last_frame) memcpy(&cpi->lfc_n, &cm->fc, sizeof(cm->fc)); - /* if its a dropped frame honor the requests on subsequent frames */ - if (*size > 0) - { - cpi->droppable = !frame_is_reference(cpi); + /* if its a dropped frame honor the requests on subsequent frames */ + if (*size > 0) { + cpi->droppable = !frame_is_reference(cpi); - /* return to normal state */ - cm->refresh_entropy_probs = 1; - cm->refresh_alt_ref_frame = 0; - cm->refresh_golden_frame = 0; - cm->refresh_last_frame = 1; - cm->frame_type = INTER_FRAME; + /* return to normal state */ + cm->refresh_entropy_probs = 1; + cm->refresh_alt_ref_frame = 0; + cm->refresh_golden_frame = 0; + cm->refresh_last_frame = 1; + cm->frame_type = INTER_FRAME; + } - } + /* Save layer specific state */ + if (cpi->oxcf.number_of_layers > 1) save_layer_context(cpi); - /* Save layer specific state */ - if (cpi->oxcf.number_of_layers > 1) - save_layer_context (cpi); + vpx_usec_timer_mark(&cmptimer); + cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); - vpx_usec_timer_mark(&cmptimer); - cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); - - if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) - { - generate_psnr_packet(cpi); - } + if (cpi->b_calculate_psnr && cpi->pass != 1 && cm->show_frame) { + generate_psnr_packet(cpi); + } #if CONFIG_INTERNAL_STATS - if (cpi->pass != 1) - { - cpi->bytes += *size; + if (cpi->pass != 1) { + cpi->bytes += *size; - if (cm->show_frame) - { - cpi->common.show_frame_mi = cpi->common.mi; - cpi->count ++; + if (cm->show_frame) { + cpi->common.show_frame_mi = cpi->common.mi; + cpi->count++; - if (cpi->b_calculate_psnr) - { - uint64_t ye,ue,ve; - double frame_psnr; - YV12_BUFFER_CONFIG *orig = cpi->Source; - YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; - unsigned int y_width = cpi->common.Width; - unsigned int y_height = cpi->common.Height; - unsigned int uv_width = (y_width + 1) / 2; - unsigned int uv_height = (y_height + 1) / 2; - int y_samples = y_height * y_width; - int uv_samples = uv_height * uv_width; - int t_samples = y_samples + 2 * uv_samples; - double sq_error; + if (cpi->b_calculate_psnr) { + uint64_t ye, ue, ve; + double frame_psnr; + YV12_BUFFER_CONFIG *orig = cpi->Source; + YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; + unsigned int y_width = cpi->common.Width; + unsigned int y_height = cpi->common.Height; + unsigned int uv_width = (y_width + 1) / 2; + unsigned int uv_height = (y_height + 1) / 2; + int y_samples = y_height * y_width; + int uv_samples = uv_height * uv_width; + int t_samples = y_samples + 2 * uv_samples; + double sq_error; - ye = calc_plane_error(orig->y_buffer, orig->y_stride, - recon->y_buffer, recon->y_stride, y_width, y_height); + ye = calc_plane_error(orig->y_buffer, orig->y_stride, recon->y_buffer, + recon->y_stride, y_width, y_height); - ue = calc_plane_error(orig->u_buffer, orig->uv_stride, - recon->u_buffer, recon->uv_stride, uv_width, uv_height); + ue = calc_plane_error(orig->u_buffer, orig->uv_stride, recon->u_buffer, + recon->uv_stride, uv_width, uv_height); - ve = calc_plane_error(orig->v_buffer, orig->uv_stride, - recon->v_buffer, recon->uv_stride, uv_width, uv_height); + ve = calc_plane_error(orig->v_buffer, orig->uv_stride, recon->v_buffer, + recon->uv_stride, uv_width, uv_height); - sq_error = (double)(ye + ue + ve); + sq_error = (double)(ye + ue + ve); - frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error); + frame_psnr = vpx_sse_to_psnr(t_samples, 255.0, sq_error); - cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye); - cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue); - cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve); - cpi->total_sq_error += sq_error; - cpi->total += frame_psnr; + cpi->total_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye); + cpi->total_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue); + cpi->total_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve); + cpi->total_sq_error += sq_error; + cpi->total += frame_psnr; #if CONFIG_POSTPROC - { - YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; - double sq_error2; - double frame_psnr2, frame_ssim2 = 0; - double weight = 0; + { + YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; + double sq_error2; + double frame_psnr2, frame_ssim2 = 0; + double weight = 0; - vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, cm->filter_level * 10 / 6, 1, 0); - vp8_clear_system_state(); + vp8_deblock(cm, cm->frame_to_show, &cm->post_proc_buffer, + cm->filter_level * 10 / 6, 1, 0); + vp8_clear_system_state(); - ye = calc_plane_error(orig->y_buffer, orig->y_stride, - pp->y_buffer, pp->y_stride, y_width, y_height); + ye = calc_plane_error(orig->y_buffer, orig->y_stride, pp->y_buffer, + pp->y_stride, y_width, y_height); - ue = calc_plane_error(orig->u_buffer, orig->uv_stride, - pp->u_buffer, pp->uv_stride, uv_width, uv_height); + ue = calc_plane_error(orig->u_buffer, orig->uv_stride, pp->u_buffer, + pp->uv_stride, uv_width, uv_height); - ve = calc_plane_error(orig->v_buffer, orig->uv_stride, - pp->v_buffer, pp->uv_stride, uv_width, uv_height); + ve = calc_plane_error(orig->v_buffer, orig->uv_stride, pp->v_buffer, + pp->uv_stride, uv_width, uv_height); - sq_error2 = (double)(ye + ue + ve); + sq_error2 = (double)(ye + ue + ve); - frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2); + frame_psnr2 = vpx_sse_to_psnr(t_samples, 255.0, sq_error2); - cpi->totalp_y += vpx_sse_to_psnr(y_samples, - 255.0, (double)ye); - cpi->totalp_u += vpx_sse_to_psnr(uv_samples, - 255.0, (double)ue); - cpi->totalp_v += vpx_sse_to_psnr(uv_samples, - 255.0, (double)ve); - cpi->total_sq_error2 += sq_error2; - cpi->totalp += frame_psnr2; + cpi->totalp_y += vpx_sse_to_psnr(y_samples, 255.0, (double)ye); + cpi->totalp_u += vpx_sse_to_psnr(uv_samples, 255.0, (double)ue); + cpi->totalp_v += vpx_sse_to_psnr(uv_samples, 255.0, (double)ve); + cpi->total_sq_error2 += sq_error2; + cpi->totalp += frame_psnr2; - frame_ssim2 = vpx_calc_ssim(cpi->Source, - &cm->post_proc_buffer, &weight); + frame_ssim2 = + vpx_calc_ssim(cpi->Source, &cm->post_proc_buffer, &weight); - cpi->summed_quality += frame_ssim2 * weight; - cpi->summed_weights += weight; + cpi->summed_quality += frame_ssim2 * weight; + cpi->summed_weights += weight; - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; - for (i=cpi->current_layer; - ioxcf.number_of_layers; i++) - { - cpi->frames_in_layer[i]++; + for (i = cpi->current_layer; i < cpi->oxcf.number_of_layers; ++i) { + cpi->frames_in_layer[i]++; - cpi->bytes_in_layer[i] += *size; - cpi->sum_psnr[i] += frame_psnr; - cpi->sum_psnr_p[i] += frame_psnr2; - cpi->total_error2[i] += sq_error; - cpi->total_error2_p[i] += sq_error2; - cpi->sum_ssim[i] += frame_ssim2 * weight; - cpi->sum_weights[i] += weight; - } - } - } -#endif + cpi->bytes_in_layer[i] += *size; + cpi->sum_psnr[i] += frame_psnr; + cpi->sum_psnr_p[i] += frame_psnr2; + cpi->total_error2[i] += sq_error; + cpi->total_error2_p[i] += sq_error2; + cpi->sum_ssim[i] += frame_ssim2 * weight; + cpi->sum_weights[i] += weight; } - - if (cpi->b_calculate_ssimg) - { - double y, u, v, frame_all; - frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, - &y, &u, &v); - - if (cpi->oxcf.number_of_layers > 1) - { - unsigned int i; - - for (i=cpi->current_layer; - ioxcf.number_of_layers; i++) - { - if (!cpi->b_calculate_psnr) - cpi->frames_in_layer[i]++; - - cpi->total_ssimg_y_in_layer[i] += y; - cpi->total_ssimg_u_in_layer[i] += u; - cpi->total_ssimg_v_in_layer[i] += v; - cpi->total_ssimg_all_in_layer[i] += frame_all; - } - } - else - { - cpi->total_ssimg_y += y; - cpi->total_ssimg_u += u; - cpi->total_ssimg_v += v; - cpi->total_ssimg_all += frame_all; - } - } - + } } +#endif + } } + } #if 0 @@ -5821,184 +5212,172 @@ int vp8_get_compressed_data(VP8_COMP *cpi, unsigned int *frame_flags, unsigned l #endif #endif - cpi->common.error.setjmp = 0; - - return 0; -} - -int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp8_ppflags_t *flags) -{ - if (cpi->common.refresh_alt_ref_frame) - return -1; - else - { - int ret; + cpi->common.error.setjmp = 0; #if CONFIG_MULTITHREAD - if(cpi->b_lpf_running) - { - sem_wait(&cpi->h_event_end_lpf); - cpi->b_lpf_running = 0; - } + /* wait for the lpf thread done */ + if (cpi->b_multi_threaded && cpi->b_lpf_running) { + sem_wait(&cpi->h_event_end_lpf); + cpi->b_lpf_running = 0; + } #endif + return 0; +} + +int vp8_get_preview_raw_frame(VP8_COMP *cpi, YV12_BUFFER_CONFIG *dest, + vp8_ppflags_t *flags) { + if (cpi->common.refresh_alt_ref_frame) { + return -1; + } else { + int ret; + #if CONFIG_POSTPROC - cpi->common.show_frame_mi = cpi->common.mi; - ret = vp8_post_proc_frame(&cpi->common, dest, flags); + cpi->common.show_frame_mi = cpi->common.mi; + ret = vp8_post_proc_frame(&cpi->common, dest, flags); #else - (void)flags; + (void)flags; - if (cpi->common.frame_to_show) - { - *dest = *cpi->common.frame_to_show; - dest->y_width = cpi->common.Width; - dest->y_height = cpi->common.Height; - dest->uv_height = cpi->common.Height / 2; - ret = 0; - } - else - { - ret = -1; - } + if (cpi->common.frame_to_show) { + *dest = *cpi->common.frame_to_show; + dest->y_width = cpi->common.Width; + dest->y_height = cpi->common.Height; + dest->uv_height = cpi->common.Height / 2; + ret = 0; + } else { + ret = -1; + } #endif - vp8_clear_system_state(); - return ret; - } + vp8_clear_system_state(); + return ret; + } } -int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols, int delta_q[4], int delta_lf[4], unsigned int threshold[4]) -{ - signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; - int internal_delta_q[MAX_MB_SEGMENTS]; - const int range = 63; - int i; +int vp8_set_roimap(VP8_COMP *cpi, unsigned char *map, unsigned int rows, + unsigned int cols, int delta_q[4], int delta_lf[4], + unsigned int threshold[4]) { + signed char feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; + int internal_delta_q[MAX_MB_SEGMENTS]; + const int range = 63; + int i; - // This method is currently incompatible with the cyclic refresh method - if ( cpi->cyclic_refresh_mode_enabled ) - return -1; + // This method is currently incompatible with the cyclic refresh method + if (cpi->cyclic_refresh_mode_enabled) return -1; - // Check number of rows and columns match - if (cpi->common.mb_rows != rows || cpi->common.mb_cols != cols) - return -1; + // Check number of rows and columns match + if (cpi->common.mb_rows != (int)rows || cpi->common.mb_cols != (int)cols) { + return -1; + } - // Range check the delta Q values and convert the external Q range values - // to internal ones. - if ( (abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) || - (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range) ) - return -1; + // Range check the delta Q values and convert the external Q range values + // to internal ones. + if ((abs(delta_q[0]) > range) || (abs(delta_q[1]) > range) || + (abs(delta_q[2]) > range) || (abs(delta_q[3]) > range)) { + return -1; + } - // Range check the delta lf values - if ( (abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) || - (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range) ) - return -1; + // Range check the delta lf values + if ((abs(delta_lf[0]) > range) || (abs(delta_lf[1]) > range) || + (abs(delta_lf[2]) > range) || (abs(delta_lf[3]) > range)) { + return -1; + } - if (!map) - { - disable_segmentation(cpi); - return 0; + if (!map) { + disable_segmentation(cpi); + return 0; + } + + // Translate the external delta q values to internal values. + for (i = 0; i < MAX_MB_SEGMENTS; ++i) { + internal_delta_q[i] = + (delta_q[i] >= 0) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]]; + } + + /* Set the segmentation Map */ + set_segmentation_map(cpi, map); + + /* Activate segmentation. */ + enable_segmentation(cpi); + + /* Set up the quant segment data */ + feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0]; + feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1]; + feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2]; + feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3]; + + /* Set up the loop segment data s */ + feature_data[MB_LVL_ALT_LF][0] = delta_lf[0]; + feature_data[MB_LVL_ALT_LF][1] = delta_lf[1]; + feature_data[MB_LVL_ALT_LF][2] = delta_lf[2]; + feature_data[MB_LVL_ALT_LF][3] = delta_lf[3]; + + cpi->segment_encode_breakout[0] = threshold[0]; + cpi->segment_encode_breakout[1] = threshold[1]; + cpi->segment_encode_breakout[2] = threshold[2]; + cpi->segment_encode_breakout[3] = threshold[3]; + + /* Initialise the feature data structure */ + set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA); + + return 0; +} + +int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, + unsigned int cols) { + if ((int)rows == cpi->common.mb_rows && (int)cols == cpi->common.mb_cols) { + if (map) { + memcpy(cpi->active_map, map, rows * cols); + cpi->active_map_enabled = 1; + } else { + cpi->active_map_enabled = 0; } - // Translate the external delta q values to internal values. - for ( i = 0; i < MAX_MB_SEGMENTS; i++ ) - internal_delta_q[i] = - ( delta_q[i] >= 0 ) ? q_trans[delta_q[i]] : -q_trans[-delta_q[i]]; - - /* Set the segmentation Map */ - set_segmentation_map(cpi, map); - - /* Activate segmentation. */ - enable_segmentation(cpi); - - /* Set up the quant segment data */ - feature_data[MB_LVL_ALT_Q][0] = internal_delta_q[0]; - feature_data[MB_LVL_ALT_Q][1] = internal_delta_q[1]; - feature_data[MB_LVL_ALT_Q][2] = internal_delta_q[2]; - feature_data[MB_LVL_ALT_Q][3] = internal_delta_q[3]; - - /* Set up the loop segment data s */ - feature_data[MB_LVL_ALT_LF][0] = delta_lf[0]; - feature_data[MB_LVL_ALT_LF][1] = delta_lf[1]; - feature_data[MB_LVL_ALT_LF][2] = delta_lf[2]; - feature_data[MB_LVL_ALT_LF][3] = delta_lf[3]; - - cpi->segment_encode_breakout[0] = threshold[0]; - cpi->segment_encode_breakout[1] = threshold[1]; - cpi->segment_encode_breakout[2] = threshold[2]; - cpi->segment_encode_breakout[3] = threshold[3]; - - /* Initialise the feature data structure */ - set_segment_data(cpi, &feature_data[0][0], SEGMENT_DELTADATA); - return 0; + } else { + return -1; + } } -int vp8_set_active_map(VP8_COMP *cpi, unsigned char *map, unsigned int rows, unsigned int cols) -{ - if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) - { - if (map) - { - memcpy(cpi->active_map, map, rows * cols); - cpi->active_map_enabled = 1; - } - else - cpi->active_map_enabled = 0; +int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, + VPX_SCALING vert_mode) { + if (horiz_mode <= ONETWO) { + cpi->common.horiz_scale = horiz_mode; + } else { + return -1; + } - return 0; - } - else - { - return -1 ; - } + if (vert_mode <= ONETWO) { + cpi->common.vert_scale = vert_mode; + } else { + return -1; + } + + return 0; } -int vp8_set_internal_size(VP8_COMP *cpi, VPX_SCALING horiz_mode, VPX_SCALING vert_mode) -{ - if (horiz_mode <= ONETWO) - cpi->common.horiz_scale = horiz_mode; - else - return -1; +int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) { + int i, j; + int Total = 0; - if (vert_mode <= ONETWO) - cpi->common.vert_scale = vert_mode; - else - return -1; + unsigned char *src = source->y_buffer; + unsigned char *dst = dest->y_buffer; - return 0; -} - - - -int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest) -{ - int i, j; - int Total = 0; - - unsigned char *src = source->y_buffer; - unsigned char *dst = dest->y_buffer; - - /* Loop through the Y plane raw and reconstruction data summing - * (square differences) - */ - for (i = 0; i < source->y_height; i += 16) - { - for (j = 0; j < source->y_width; j += 16) - { - unsigned int sse; - Total += vpx_mse16x16(src + j, source->y_stride, - dst + j, dest->y_stride, &sse); - } - - src += 16 * source->y_stride; - dst += 16 * dest->y_stride; + /* Loop through the Y plane raw and reconstruction data summing + * (square differences) + */ + for (i = 0; i < source->y_height; i += 16) { + for (j = 0; j < source->y_width; j += 16) { + unsigned int sse; + Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, + &sse); } - return Total; + src += 16 * source->y_stride; + dst += 16 * dest->y_stride; + } + + return Total; } - -int vp8_get_quantizer(VP8_COMP *cpi) -{ - return cpi->common.base_qindex; -} +int vp8_get_quantizer(VP8_COMP *cpi) { return cpi->common.base_qindex; } diff --git a/libs/libvpx/vp8/encoder/onyx_int.h b/libs/libvpx/vp8/encoder/onyx_int.h index 2b2f7a0a9a..59ad5773a6 100644 --- a/libs/libvpx/vp8/encoder/onyx_int.h +++ b/libs/libvpx/vp8/encoder/onyx_int.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_ONYX_INT_H_ #define VP8_ENCODER_ONYX_INT_H_ @@ -37,422 +36,397 @@ extern "C" { #endif -#define MIN_GF_INTERVAL 4 -#define DEFAULT_GF_INTERVAL 7 +#define MIN_GF_INTERVAL 4 +#define DEFAULT_GF_INTERVAL 7 #define KEY_FRAME_CONTEXT 5 -#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY? 1 : 25) +#define MAX_LAG_BUFFERS (CONFIG_REALTIME_ONLY ? 1 : 25) -#define AF_THRESH 25 -#define AF_THRESH2 100 +#define AF_THRESH 25 +#define AF_THRESH2 100 #define ARF_DECAY_THRESH 12 - -#define MIN_THRESHMULT 32 -#define MAX_THRESHMULT 512 +#define MIN_THRESHMULT 32 +#define MAX_THRESHMULT 512 #define GF_ZEROMV_ZBIN_BOOST 12 #define LF_ZEROMV_ZBIN_BOOST 6 -#define MV_ZBIN_BOOST 4 +#define MV_ZBIN_BOOST 4 #define ZBIN_OQ_MAX 192 -#if !(CONFIG_REALTIME_ONLY) -#define VP8_TEMPORAL_ALT_REF 1 -#endif +#define VP8_TEMPORAL_ALT_REF !CONFIG_REALTIME_ONLY -typedef struct -{ - int kf_indicated; - unsigned int frames_since_key; - unsigned int frames_since_golden; - int filter_level; - int frames_till_gf_update_due; - int recent_ref_frame_usage[MAX_REF_FRAMES]; +typedef struct { + int kf_indicated; + unsigned int frames_since_key; + unsigned int frames_since_golden; + int filter_level; + int frames_till_gf_update_due; + int recent_ref_frame_usage[MAX_REF_FRAMES]; - MV_CONTEXT mvc[2]; - int mvcosts[2][MVvals+1]; + MV_CONTEXT mvc[2]; + int mvcosts[2][MVvals + 1]; #ifdef MODE_STATS - int y_modes[5]; - int uv_modes[4]; - int b_modes[10]; - int inter_y_modes[10]; - int inter_uv_modes[4]; - int inter_b_modes[10]; + int y_modes[5]; + int uv_modes[4]; + int b_modes[10]; + int inter_y_modes[10]; + int inter_uv_modes[4]; + int inter_b_modes[10]; #endif - vp8_prob ymode_prob[4], uv_mode_prob[3]; /* interframe intra mode probs */ - vp8_prob kf_ymode_prob[4], kf_uv_mode_prob[3]; /* keyframe "" */ + vp8_prob ymode_prob[4], uv_mode_prob[3]; /* interframe intra mode probs */ + vp8_prob kf_ymode_prob[4], kf_uv_mode_prob[3]; /* keyframe "" */ - int ymode_count[5], uv_mode_count[4]; /* intra MB type cts this frame */ + int ymode_count[5], uv_mode_count[4]; /* intra MB type cts this frame */ - int count_mb_ref_frame_usage[MAX_REF_FRAMES]; - - int this_frame_percent_intra; - int last_frame_percent_intra; + int count_mb_ref_frame_usage[MAX_REF_FRAMES]; + int this_frame_percent_intra; + int last_frame_percent_intra; } CODING_CONTEXT; -typedef struct -{ - double frame; - double intra_error; - double coded_error; - double ssim_weighted_pred_err; - double pcnt_inter; - double pcnt_motion; - double pcnt_second_ref; - double pcnt_neutral; - double MVr; - double mvr_abs; - double MVc; - double mvc_abs; - double MVrv; - double MVcv; - double mv_in_out_count; - double new_mv_count; - double duration; - double count; -} -FIRSTPASS_STATS; +typedef struct { + double frame; + double intra_error; + double coded_error; + double ssim_weighted_pred_err; + double pcnt_inter; + double pcnt_motion; + double pcnt_second_ref; + double pcnt_neutral; + double MVr; + double mvr_abs; + double MVc; + double mvc_abs; + double MVrv; + double MVcv; + double mv_in_out_count; + double new_mv_count; + double duration; + double count; +} FIRSTPASS_STATS; -typedef struct -{ - int frames_so_far; - double frame_intra_error; - double frame_coded_error; - double frame_pcnt_inter; - double frame_pcnt_motion; - double frame_mvr; - double frame_mvr_abs; - double frame_mvc; - double frame_mvc_abs; +typedef struct { + int frames_so_far; + double frame_intra_error; + double frame_coded_error; + double frame_pcnt_inter; + double frame_pcnt_motion; + double frame_mvr; + double frame_mvr_abs; + double frame_mvc; + double frame_mvc_abs; } ONEPASS_FRAMESTATS; +typedef enum { + THR_ZERO1 = 0, + THR_DC = 1, -typedef enum -{ - THR_ZERO1 = 0, - THR_DC = 1, + THR_NEAREST1 = 2, + THR_NEAR1 = 3, - THR_NEAREST1 = 2, - THR_NEAR1 = 3, + THR_ZERO2 = 4, + THR_NEAREST2 = 5, - THR_ZERO2 = 4, - THR_NEAREST2 = 5, + THR_ZERO3 = 6, + THR_NEAREST3 = 7, - THR_ZERO3 = 6, - THR_NEAREST3 = 7, + THR_NEAR2 = 8, + THR_NEAR3 = 9, - THR_NEAR2 = 8, - THR_NEAR3 = 9, + THR_V_PRED = 10, + THR_H_PRED = 11, + THR_TM = 12, - THR_V_PRED = 10, - THR_H_PRED = 11, - THR_TM = 12, + THR_NEW1 = 13, + THR_NEW2 = 14, + THR_NEW3 = 15, - THR_NEW1 = 13, - THR_NEW2 = 14, - THR_NEW3 = 15, + THR_SPLIT1 = 16, + THR_SPLIT2 = 17, + THR_SPLIT3 = 18, - THR_SPLIT1 = 16, - THR_SPLIT2 = 17, - THR_SPLIT3 = 18, + THR_B_PRED = 19 +} THR_MODES; - THR_B_PRED = 19 -} -THR_MODES; +typedef enum { DIAMOND = 0, NSTEP = 1, HEX = 2 } SEARCH_METHODS; -typedef enum -{ - DIAMOND = 0, - NSTEP = 1, - HEX = 2 -} SEARCH_METHODS; +typedef struct { + int RD; + SEARCH_METHODS search_method; + int improved_quant; + int improved_dct; + int auto_filter; + int recode_loop; + int iterative_sub_pixel; + int half_pixel_search; + int quarter_pixel_search; + int thresh_mult[MAX_MODES]; + int max_step_search_steps; + int first_step; + int optimize_coefficients; -typedef struct -{ - int RD; - SEARCH_METHODS search_method; - int improved_quant; - int improved_dct; - int auto_filter; - int recode_loop; - int iterative_sub_pixel; - int half_pixel_search; - int quarter_pixel_search; - int thresh_mult[MAX_MODES]; - int max_step_search_steps; - int first_step; - int optimize_coefficients; - - int use_fastquant_for_pick; - int no_skip_block4x4_search; - int improved_mv_pred; + int use_fastquant_for_pick; + int no_skip_block4x4_search; + int improved_mv_pred; } SPEED_FEATURES; -typedef struct -{ - MACROBLOCK mb; - int segment_counts[MAX_MB_SEGMENTS]; - int totalrate; +typedef struct { + MACROBLOCK mb; + int segment_counts[MAX_MB_SEGMENTS]; + int totalrate; } MB_ROW_COMP; -typedef struct -{ - TOKENEXTRA *start; - TOKENEXTRA *stop; +typedef struct { + TOKENEXTRA *start; + TOKENEXTRA *stop; } TOKENLIST; -typedef struct -{ - int ithread; - void *ptr1; - void *ptr2; +typedef struct { + int ithread; + void *ptr1; + void *ptr2; } ENCODETHREAD_DATA; -typedef struct -{ - int ithread; - void *ptr1; +typedef struct { + int ithread; + void *ptr1; } LPFTHREAD_DATA; -enum -{ - BLOCK_16X8, - BLOCK_8X16, - BLOCK_8X8, - BLOCK_4X4, - BLOCK_16X16, - BLOCK_MAX_SEGMENTS +enum { + BLOCK_16X8, + BLOCK_8X16, + BLOCK_8X8, + BLOCK_4X4, + BLOCK_16X16, + BLOCK_MAX_SEGMENTS }; -typedef struct -{ - /* Layer configuration */ - double framerate; - int target_bandwidth; +typedef struct { + /* Layer configuration */ + double framerate; + int target_bandwidth; - /* Layer specific coding parameters */ - int64_t starting_buffer_level; - int64_t optimal_buffer_level; - int64_t maximum_buffer_size; - int64_t starting_buffer_level_in_ms; - int64_t optimal_buffer_level_in_ms; - int64_t maximum_buffer_size_in_ms; + /* Layer specific coding parameters */ + int64_t starting_buffer_level; + int64_t optimal_buffer_level; + int64_t maximum_buffer_size; + int64_t starting_buffer_level_in_ms; + int64_t optimal_buffer_level_in_ms; + int64_t maximum_buffer_size_in_ms; - int avg_frame_size_for_layer; + int avg_frame_size_for_layer; - int64_t buffer_level; - int64_t bits_off_target; + int64_t buffer_level; + int64_t bits_off_target; - int64_t total_actual_bits; - int total_target_vs_actual; + int64_t total_actual_bits; + int total_target_vs_actual; - int worst_quality; - int active_worst_quality; - int best_quality; - int active_best_quality; + int worst_quality; + int active_worst_quality; + int best_quality; + int active_best_quality; - int ni_av_qi; - int ni_tot_qi; - int ni_frames; - int avg_frame_qindex; + int ni_av_qi; + int ni_tot_qi; + int ni_frames; + int avg_frame_qindex; - double rate_correction_factor; - double key_frame_rate_correction_factor; - double gf_rate_correction_factor; + double rate_correction_factor; + double key_frame_rate_correction_factor; + double gf_rate_correction_factor; - int zbin_over_quant; + int zbin_over_quant; - int inter_frame_target; - int64_t total_byte_count; + int inter_frame_target; + int64_t total_byte_count; - int filter_level; + int filter_level; - int last_frame_percent_intra; + int last_frame_percent_intra; - int count_mb_ref_frame_usage[MAX_REF_FRAMES]; + int count_mb_ref_frame_usage[MAX_REF_FRAMES]; } LAYER_CONTEXT; -typedef struct VP8_COMP -{ +typedef struct VP8_COMP { + DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y1quant_shift[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y1quant[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y1quant_shift[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y1zbin[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y1round[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y2quant_shift[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y2quant[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y2quant_shift[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y2zbin[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y2round[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, UVquant_shift[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, UVquant[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, UVquant_shift[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, UVzbin[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, UVround[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y1quant_fast[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, Y2quant_fast[QINDEX_RANGE][16]); + DECLARE_ALIGNED(16, short, UVquant_fast[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, zrun_zbin_boost_y1[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, zrun_zbin_boost_y2[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, zrun_zbin_boost_uv[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y1quant_fast[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, Y2quant_fast[QINDEX_RANGE][16]); - DECLARE_ALIGNED(16, short, UVquant_fast[QINDEX_RANGE][16]); + MACROBLOCK mb; + VP8_COMMON common; + vp8_writer bc[9]; /* one boolcoder for each partition */ + VP8_CONFIG oxcf; - MACROBLOCK mb; - VP8_COMMON common; - vp8_writer bc[9]; /* one boolcoder for each partition */ + struct lookahead_ctx *lookahead; + struct lookahead_entry *source; + struct lookahead_entry *alt_ref_source; + struct lookahead_entry *last_source; - VP8_CONFIG oxcf; + YV12_BUFFER_CONFIG *Source; + YV12_BUFFER_CONFIG *un_scaled_source; + YV12_BUFFER_CONFIG scaled_source; + YV12_BUFFER_CONFIG *last_frame_unscaled_source; - struct lookahead_ctx *lookahead; - struct lookahead_entry *source; - struct lookahead_entry *alt_ref_source; - struct lookahead_entry *last_source; + unsigned int frames_till_alt_ref_frame; + /* frame in src_buffers has been identified to be encoded as an alt ref */ + int source_alt_ref_pending; + /* an alt ref frame has been encoded and is usable */ + int source_alt_ref_active; + /* source of frame to encode is an exact copy of an alt ref frame */ + int is_src_frame_alt_ref; - YV12_BUFFER_CONFIG *Source; - YV12_BUFFER_CONFIG *un_scaled_source; - YV12_BUFFER_CONFIG scaled_source; - YV12_BUFFER_CONFIG *last_frame_unscaled_source; + /* golden frame same as last frame ( short circuit gold searches) */ + int gold_is_last; + /* Alt reference frame same as last ( short circuit altref search) */ + int alt_is_last; + /* don't do both alt and gold search ( just do gold). */ + int gold_is_alt; - unsigned int frames_till_alt_ref_frame; - /* frame in src_buffers has been identified to be encoded as an alt ref */ - int source_alt_ref_pending; - /* an alt ref frame has been encoded and is usable */ - int source_alt_ref_active; - /* source of frame to encode is an exact copy of an alt ref frame */ - int is_src_frame_alt_ref; + YV12_BUFFER_CONFIG pick_lf_lvl_frame; - /* golden frame same as last frame ( short circuit gold searches) */ - int gold_is_last; - /* Alt reference frame same as last ( short circuit altref search) */ - int alt_is_last; - /* don't do both alt and gold search ( just do gold). */ - int gold_is_alt; + TOKENEXTRA *tok; + unsigned int tok_count; - YV12_BUFFER_CONFIG pick_lf_lvl_frame; + unsigned int frames_since_key; + unsigned int key_frame_frequency; + unsigned int this_key_frame_forced; + unsigned int next_key_frame_forced; - TOKENEXTRA *tok; - unsigned int tok_count; + /* Ambient reconstruction err target for force key frames */ + int ambient_err; + unsigned int mode_check_freq[MAX_MODES]; - unsigned int frames_since_key; - unsigned int key_frame_frequency; - unsigned int this_key_frame_forced; - unsigned int next_key_frame_forced; + int rd_baseline_thresh[MAX_MODES]; - /* Ambient reconstruction err target for force key frames */ - int ambient_err; + int RDMULT; + int RDDIV; - unsigned int mode_check_freq[MAX_MODES]; + CODING_CONTEXT coding_context; - int rd_baseline_thresh[MAX_MODES]; + /* Rate targetting variables */ + int64_t last_prediction_error; + int64_t last_intra_error; - int RDMULT; - int RDDIV ; + int this_frame_target; + int projected_frame_size; + int last_q[2]; /* Separate values for Intra/Inter */ - CODING_CONTEXT coding_context; + double rate_correction_factor; + double key_frame_rate_correction_factor; + double gf_rate_correction_factor; - /* Rate targetting variables */ - int64_t last_prediction_error; - int64_t last_intra_error; + int frames_since_golden; + /* Count down till next GF */ + int frames_till_gf_update_due; - int this_frame_target; - int projected_frame_size; - int last_q[2]; /* Separate values for Intra/Inter */ + /* GF interval chosen when we coded the last GF */ + int current_gf_interval; - double rate_correction_factor; - double key_frame_rate_correction_factor; - double gf_rate_correction_factor; + /* Total bits overspent becasue of GF boost (cumulative) */ + int gf_overspend_bits; - unsigned int frames_since_golden; - /* Count down till next GF */ - int frames_till_gf_update_due; + /* Used in the few frames following a GF to recover the extra bits + * spent in that GF + */ + int non_gf_bitrate_adjustment; - /* GF interval chosen when we coded the last GF */ - int current_gf_interval; + /* Extra bits spent on key frames that need to be recovered */ + int kf_overspend_bits; - /* Total bits overspent becasue of GF boost (cumulative) */ - int gf_overspend_bits; + /* Current number of bit s to try and recover on each inter frame. */ + int kf_bitrate_adjustment; + int max_gf_interval; + int baseline_gf_interval; + int active_arnr_frames; - /* Used in the few frames following a GF to recover the extra bits - * spent in that GF - */ - int non_gf_bitrate_adjustment; + int64_t key_frame_count; + int prior_key_frame_distance[KEY_FRAME_CONTEXT]; + /* Current section per frame bandwidth target */ + int per_frame_bandwidth; + /* Average frame size target for clip */ + int av_per_frame_bandwidth; + /* Minimum allocation that should be used for any frame */ + int min_frame_bandwidth; + int inter_frame_target; + double output_framerate; + int64_t last_time_stamp_seen; + int64_t last_end_time_stamp_seen; + int64_t first_time_stamp_ever; - /* Extra bits spent on key frames that need to be recovered */ - int kf_overspend_bits; + int ni_av_qi; + int ni_tot_qi; + int ni_frames; + int avg_frame_qindex; - /* Current number of bit s to try and recover on each inter frame. */ - int kf_bitrate_adjustment; - int max_gf_interval; - int baseline_gf_interval; - int active_arnr_frames; + int64_t total_byte_count; - int64_t key_frame_count; - int prior_key_frame_distance[KEY_FRAME_CONTEXT]; - /* Current section per frame bandwidth target */ - int per_frame_bandwidth; - /* Average frame size target for clip */ - int av_per_frame_bandwidth; - /* Minimum allocation that should be used for any frame */ - int min_frame_bandwidth; - int inter_frame_target; - double output_framerate; - int64_t last_time_stamp_seen; - int64_t last_end_time_stamp_seen; - int64_t first_time_stamp_ever; + int buffered_mode; - int ni_av_qi; - int ni_tot_qi; - int ni_frames; - int avg_frame_qindex; + double framerate; + double ref_framerate; + int64_t buffer_level; + int64_t bits_off_target; - int64_t total_byte_count; + int rolling_target_bits; + int rolling_actual_bits; - int buffered_mode; + int long_rolling_target_bits; + int long_rolling_actual_bits; - double framerate; - double ref_framerate; - int64_t buffer_level; - int64_t bits_off_target; + int64_t total_actual_bits; + int total_target_vs_actual; /* debug stats */ - int rolling_target_bits; - int rolling_actual_bits; + int worst_quality; + int active_worst_quality; + int best_quality; + int active_best_quality; - int long_rolling_target_bits; - int long_rolling_actual_bits; + int cq_target_quality; - int64_t total_actual_bits; - int total_target_vs_actual; /* debug stats */ + int drop_frames_allowed; /* Are we permitted to drop frames? */ + int drop_frame; /* Drop this frame? */ - int worst_quality; - int active_worst_quality; - int best_quality; - int active_best_quality; + vp8_prob frame_coef_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [ENTROPY_NODES]; + char update_probs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES]; - int cq_target_quality; + unsigned int frame_branch_ct[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [ENTROPY_NODES][2]; - int drop_frames_allowed; /* Are we permitted to drop frames? */ - int drop_frame; /* Drop this frame? */ + int gfu_boost; + int kf_boost; + int last_boost; - vp8_prob frame_coef_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; - char update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]; - - unsigned int frame_branch_ct [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES][2]; - - int gfu_boost; - int kf_boost; - int last_boost; - - int target_bandwidth; - struct vpx_codec_pkt_list *output_pkt_list; + int target_bandwidth; + struct vpx_codec_pkt_list *output_pkt_list; #if 0 /* Experimental code for lagged and one pass */ @@ -460,262 +434,249 @@ typedef struct VP8_COMP int one_pass_frame_index; #endif - int decimation_factor; - int decimation_count; + int decimation_factor; + int decimation_count; - /* for real time encoding */ - int avg_encode_time; /* microsecond */ - int avg_pick_mode_time; /* microsecond */ - int Speed; - int compressor_speed; + /* for real time encoding */ + int avg_encode_time; /* microsecond */ + int avg_pick_mode_time; /* microsecond */ + int Speed; + int compressor_speed; - int auto_gold; - int auto_adjust_gold_quantizer; - int auto_worst_q; - int cpu_used; - int pass; + int auto_gold; + int auto_adjust_gold_quantizer; + int auto_worst_q; + int cpu_used; + int pass; + int prob_intra_coded; + int prob_last_coded; + int prob_gf_coded; + int prob_skip_false; + int last_skip_false_probs[3]; + int last_skip_probs_q[3]; + int recent_ref_frame_usage[MAX_REF_FRAMES]; - int prob_intra_coded; - int prob_last_coded; - int prob_gf_coded; - int prob_skip_false; - int last_skip_false_probs[3]; - int last_skip_probs_q[3]; - int recent_ref_frame_usage[MAX_REF_FRAMES]; + int this_frame_percent_intra; + int last_frame_percent_intra; - int this_frame_percent_intra; - int last_frame_percent_intra; + int ref_frame_flags; - int ref_frame_flags; + SPEED_FEATURES sf; - SPEED_FEATURES sf; + /* Count ZEROMV on all reference frames. */ + int zeromv_count; + int lf_zeromv_pct; - /* Count ZEROMV on all reference frames. */ - int zeromv_count; - int lf_zeromv_pct; + unsigned char *segmentation_map; + signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; + int segment_encode_breakout[MAX_MB_SEGMENTS]; - unsigned char *segmentation_map; - signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; - int segment_encode_breakout[MAX_MB_SEGMENTS]; + unsigned char *active_map; + unsigned int active_map_enabled; - unsigned char *active_map; - unsigned int active_map_enabled; + /* Video conferencing cyclic refresh mode flags. This is a mode + * designed to clean up the background over time in live encoding + * scenarious. It uses segmentation. + */ + int cyclic_refresh_mode_enabled; + int cyclic_refresh_mode_max_mbs_perframe; + int cyclic_refresh_mode_index; + int cyclic_refresh_q; + signed char *cyclic_refresh_map; + // Count on how many (consecutive) times a macroblock uses ZER0MV_LAST. + unsigned char *consec_zero_last; + // Counter that is reset when a block is checked for a mode-bias against + // ZEROMV_LASTREF. + unsigned char *consec_zero_last_mvbias; - /* Video conferencing cyclic refresh mode flags. This is a mode - * designed to clean up the background over time in live encoding - * scenarious. It uses segmentation. - */ - int cyclic_refresh_mode_enabled; - int cyclic_refresh_mode_max_mbs_perframe; - int cyclic_refresh_mode_index; - int cyclic_refresh_q; - signed char *cyclic_refresh_map; - // Count on how many (consecutive) times a macroblock uses ZER0MV_LAST. - unsigned char *consec_zero_last; - // Counter that is reset when a block is checked for a mode-bias against - // ZEROMV_LASTREF. - unsigned char *consec_zero_last_mvbias; + // Frame counter for the temporal pattern. Counter is rest when the temporal + // layers are changed dynamically (run-time change). + unsigned int temporal_pattern_counter; + // Temporal layer id. + int temporal_layer_id; - // Frame counter for the temporal pattern. Counter is rest when the temporal - // layers are changed dynamically (run-time change). - unsigned int temporal_pattern_counter; - // Temporal layer id. - int temporal_layer_id; + // Measure of average squared difference between source and denoised signal. + int mse_source_denoised; - // Measure of average squared difference between source and denoised signal. - int mse_source_denoised; - - int force_maxqp; + int force_maxqp; #if CONFIG_MULTITHREAD - /* multithread data */ - pthread_mutex_t *pmutex; - pthread_mutex_t mt_mutex; /* mutex for b_multi_threaded */ - int * mt_current_mb_col; - int mt_sync_range; - int b_multi_threaded; - int encoding_thread_count; - int b_lpf_running; + /* multithread data */ + int *mt_current_mb_col; + int mt_sync_range; + int b_multi_threaded; + int encoding_thread_count; + int b_lpf_running; - pthread_t *h_encoding_thread; - pthread_t h_filter_thread; + pthread_t *h_encoding_thread; + pthread_t h_filter_thread; - MB_ROW_COMP *mb_row_ei; - ENCODETHREAD_DATA *en_thread_data; - LPFTHREAD_DATA lpf_thread_data; + MB_ROW_COMP *mb_row_ei; + ENCODETHREAD_DATA *en_thread_data; + LPFTHREAD_DATA lpf_thread_data; - /* events */ - sem_t *h_event_start_encoding; - sem_t h_event_end_encoding; - sem_t h_event_start_lpf; - sem_t h_event_end_lpf; + /* events */ + sem_t *h_event_start_encoding; + sem_t *h_event_end_encoding; + sem_t h_event_start_lpf; + sem_t h_event_end_lpf; #endif - TOKENLIST *tplist; - unsigned int partition_sz[MAX_PARTITIONS]; - unsigned char *partition_d[MAX_PARTITIONS]; - unsigned char *partition_d_end[MAX_PARTITIONS]; + TOKENLIST *tplist; + unsigned int partition_sz[MAX_PARTITIONS]; + unsigned char *partition_d[MAX_PARTITIONS]; + unsigned char *partition_d_end[MAX_PARTITIONS]; + fractional_mv_step_fp *find_fractional_mv_step; + vp8_full_search_fn_t full_search_sad; + vp8_refining_search_fn_t refining_search_sad; + vp8_diamond_search_fn_t diamond_search_sad; + vp8_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SEGMENTS]; + uint64_t time_receive_data; + uint64_t time_compress_data; + uint64_t time_pick_lpf; + uint64_t time_encode_mb_row; - fractional_mv_step_fp *find_fractional_mv_step; - vp8_full_search_fn_t full_search_sad; - vp8_refining_search_fn_t refining_search_sad; - vp8_diamond_search_fn_t diamond_search_sad; - vp8_variance_fn_ptr_t fn_ptr[BLOCK_MAX_SEGMENTS]; - uint64_t time_receive_data; - uint64_t time_compress_data; - uint64_t time_pick_lpf; - uint64_t time_encode_mb_row; + int base_skip_false_prob[128]; - int base_skip_false_prob[128]; + FRAME_CONTEXT lfc_n; /* last frame entropy */ + FRAME_CONTEXT lfc_a; /* last alt ref entropy */ + FRAME_CONTEXT lfc_g; /* last gold ref entropy */ - FRAME_CONTEXT lfc_n; /* last frame entropy */ - FRAME_CONTEXT lfc_a; /* last alt ref entropy */ - FRAME_CONTEXT lfc_g; /* last gold ref entropy */ - - - struct twopass_rc - { - unsigned int section_intra_rating; - double section_max_qfactor; - unsigned int next_iiratio; - unsigned int this_iiratio; - FIRSTPASS_STATS total_stats; - FIRSTPASS_STATS this_frame_stats; - FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start; - FIRSTPASS_STATS total_left_stats; - int first_pass_done; - int64_t bits_left; - int64_t clip_bits_total; - double avg_iiratio; - double modified_error_total; - double modified_error_used; - double modified_error_left; - double kf_intra_err_min; - double gf_intra_err_min; - int frames_to_key; - int maxq_max_limit; - int maxq_min_limit; - int gf_decay_rate; - int static_scene_max_gf_interval; - int kf_bits; - /* Remaining error from uncoded frames in a gf group. */ - int gf_group_error_left; - /* Projected total bits available for a key frame group of frames */ - int64_t kf_group_bits; - /* Error score of frames still to be coded in kf group */ - int64_t kf_group_error_left; - /* Projected Bits available for a group including 1 GF or ARF */ - int64_t gf_group_bits; - /* Bits for the golden frame or ARF */ - int gf_bits; - int alt_extra_bits; - double est_max_qcorrection_factor; - } twopass; + struct twopass_rc { + unsigned int section_intra_rating; + double section_max_qfactor; + unsigned int next_iiratio; + unsigned int this_iiratio; + FIRSTPASS_STATS total_stats; + FIRSTPASS_STATS this_frame_stats; + FIRSTPASS_STATS *stats_in, *stats_in_end, *stats_in_start; + FIRSTPASS_STATS total_left_stats; + int first_pass_done; + int64_t bits_left; + int64_t clip_bits_total; + double avg_iiratio; + double modified_error_total; + double modified_error_used; + double modified_error_left; + double kf_intra_err_min; + double gf_intra_err_min; + int frames_to_key; + int maxq_max_limit; + int maxq_min_limit; + int gf_decay_rate; + int static_scene_max_gf_interval; + int kf_bits; + /* Remaining error from uncoded frames in a gf group. */ + int gf_group_error_left; + /* Projected total bits available for a key frame group of frames */ + int64_t kf_group_bits; + /* Error score of frames still to be coded in kf group */ + int64_t kf_group_error_left; + /* Projected Bits available for a group including 1 GF or ARF */ + int64_t gf_group_bits; + /* Bits for the golden frame or ARF */ + int gf_bits; + int alt_extra_bits; + double est_max_qcorrection_factor; + } twopass; #if VP8_TEMPORAL_ALT_REF - YV12_BUFFER_CONFIG alt_ref_buffer; - YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS]; - int fixed_divide[512]; + YV12_BUFFER_CONFIG alt_ref_buffer; + YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS]; + int fixed_divide[512]; #endif #if CONFIG_INTERNAL_STATS - int count; - double total_y; - double total_u; - double total_v; - double total ; - double total_sq_error; - double totalp_y; - double totalp_u; - double totalp_v; - double totalp; - double total_sq_error2; - int bytes; - double summed_quality; - double summed_weights; - unsigned int tot_recode_hits; + int count; + double total_y; + double total_u; + double total_v; + double total; + double total_sq_error; + double totalp_y; + double totalp_u; + double totalp_v; + double totalp; + double total_sq_error2; + int bytes; + double summed_quality; + double summed_weights; + unsigned int tot_recode_hits; - - double total_ssimg_y; - double total_ssimg_u; - double total_ssimg_v; - double total_ssimg_all; - - int b_calculate_ssimg; + int b_calculate_ssimg; #endif - int b_calculate_psnr; + int b_calculate_psnr; - /* Per MB activity measurement */ - unsigned int activity_avg; - unsigned int * mb_activity_map; + /* Per MB activity measurement */ + unsigned int activity_avg; + unsigned int *mb_activity_map; - /* Record of which MBs still refer to last golden frame either - * directly or through 0,0 - */ - unsigned char *gf_active_flags; - int gf_active_count; + /* Record of which MBs still refer to last golden frame either + * directly or through 0,0 + */ + unsigned char *gf_active_flags; + int gf_active_count; - int output_partition; + int output_partition; - /* Store last frame's MV info for next frame MV prediction */ - int_mv *lfmv; - int *lf_ref_frame_sign_bias; - int *lf_ref_frame; + /* Store last frame's MV info for next frame MV prediction */ + int_mv *lfmv; + int *lf_ref_frame_sign_bias; + int *lf_ref_frame; - /* force next frame to intra when kf_auto says so */ - int force_next_frame_intra; + /* force next frame to intra when kf_auto says so */ + int force_next_frame_intra; - int droppable; + int droppable; - int initial_width; - int initial_height; + int initial_width; + int initial_height; #if CONFIG_TEMPORAL_DENOISING - VP8_DENOISER denoiser; + VP8_DENOISER denoiser; #endif - /* Coding layer state variables */ - unsigned int current_layer; - LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS]; + /* Coding layer state variables */ + unsigned int current_layer; + LAYER_CONTEXT layer_context[VPX_TS_MAX_LAYERS]; - int64_t frames_in_layer[VPX_TS_MAX_LAYERS]; - int64_t bytes_in_layer[VPX_TS_MAX_LAYERS]; - double sum_psnr[VPX_TS_MAX_LAYERS]; - double sum_psnr_p[VPX_TS_MAX_LAYERS]; - double total_error2[VPX_TS_MAX_LAYERS]; - double total_error2_p[VPX_TS_MAX_LAYERS]; - double sum_ssim[VPX_TS_MAX_LAYERS]; - double sum_weights[VPX_TS_MAX_LAYERS]; + int64_t frames_in_layer[VPX_TS_MAX_LAYERS]; + int64_t bytes_in_layer[VPX_TS_MAX_LAYERS]; + double sum_psnr[VPX_TS_MAX_LAYERS]; + double sum_psnr_p[VPX_TS_MAX_LAYERS]; + double total_error2[VPX_TS_MAX_LAYERS]; + double total_error2_p[VPX_TS_MAX_LAYERS]; + double sum_ssim[VPX_TS_MAX_LAYERS]; + double sum_weights[VPX_TS_MAX_LAYERS]; - double total_ssimg_y_in_layer[VPX_TS_MAX_LAYERS]; - double total_ssimg_u_in_layer[VPX_TS_MAX_LAYERS]; - double total_ssimg_v_in_layer[VPX_TS_MAX_LAYERS]; - double total_ssimg_all_in_layer[VPX_TS_MAX_LAYERS]; + double total_ssimg_y_in_layer[VPX_TS_MAX_LAYERS]; + double total_ssimg_u_in_layer[VPX_TS_MAX_LAYERS]; + double total_ssimg_v_in_layer[VPX_TS_MAX_LAYERS]; + double total_ssimg_all_in_layer[VPX_TS_MAX_LAYERS]; #if CONFIG_MULTI_RES_ENCODING - /* Number of MBs per row at lower-resolution level */ - int mr_low_res_mb_cols; - /* Indicate if lower-res mv info is available */ - unsigned char mr_low_res_mv_avail; + /* Number of MBs per row at lower-resolution level */ + int mr_low_res_mb_cols; + /* Indicate if lower-res mv info is available */ + unsigned char mr_low_res_mv_avail; #endif - /* The frame number of each reference frames */ - unsigned int current_ref_frames[MAX_REF_FRAMES]; - // Closest reference frame to current frame. - MV_REFERENCE_FRAME closest_reference_frame; + /* The frame number of each reference frames */ + unsigned int current_ref_frames[MAX_REF_FRAMES]; + // Closest reference frame to current frame. + MV_REFERENCE_FRAME closest_reference_frame; - struct rd_costs_struct - { - int mvcosts[2][MVvals+1]; - int mvsadcosts[2][MVfpvals+1]; - int mbmode_cost[2][MB_MODE_COUNT]; - int intra_uv_mode_cost[2][MB_MODE_COUNT]; - int bmode_costs[10][10][10]; - int inter_bmode_costs[B_MODE_COUNT]; - int token_costs[BLOCK_TYPES][COEF_BANDS] - [PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS]; - } rd_costs; + struct rd_costs_struct { + int mvcosts[2][MVvals + 1]; + int mvsadcosts[2][MVfpvals + 1]; + int mbmode_cost[2][MB_MODE_COUNT]; + int intra_uv_mode_cost[2][MB_MODE_COUNT]; + int bmode_costs[10][10][10]; + int inter_bmode_costs[B_MODE_COUNT]; + int token_costs[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [MAX_ENTROPY_TOKENS]; + } rd_costs; } VP8_COMP; void vp8_initialize_enc(void); @@ -726,27 +687,29 @@ void vp8_new_framerate(VP8_COMP *cpi, double framerate); void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm); void vp8_pack_bitstream(VP8_COMP *cpi, unsigned char *dest, - unsigned char *dest_end, unsigned long *size); + unsigned char *dest_end, size_t *size); void vp8_tokenize_mb(VP8_COMP *, MACROBLOCK *, TOKENEXTRA **); void vp8_set_speed_features(VP8_COMP *cpi); #if CONFIG_DEBUG -#define CHECK_MEM_ERROR(lval,expr) do {\ - lval = (expr); \ - if(!lval) \ - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\ - "Failed to allocate "#lval" at %s:%d", \ - __FILE__,__LINE__);\ - } while(0) +#define CHECK_MEM_ERROR(lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval " at %s:%d", __FILE__, \ + __LINE__); \ + } while (0) #else -#define CHECK_MEM_ERROR(lval,expr) do {\ - lval = (expr); \ - if(!lval) \ - vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR,\ - "Failed to allocate "#lval);\ - } while(0) +#define CHECK_MEM_ERROR(lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval); \ + } while (0) #endif #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/pickinter.c b/libs/libvpx/vp8/encoder/pickinter.c index 0ea0632918..7b68d35f50 100644 --- a/libs/libvpx/vp8/encoder/pickinter.c +++ b/libs/libvpx/vp8/encoder/pickinter.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vpx_config.h" #include "./vpx_dsp_rtcd.h" @@ -36,7 +35,7 @@ extern unsigned int cnt_pm; #endif -#define MODEL_MODE 0 +#define MODEL_MODE 1 extern const int vp8_ref_frame_order[MAX_MODES]; extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES]; @@ -47,10 +46,14 @@ extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES]; // skin color classifier is defined. // Fixed-point skin color model parameters. -static const int skin_mean[5][2] = - {{7463, 9614}, {6400, 10240}, {7040, 10240}, {8320, 9280}, {6800, 9614}}; -static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16 -static const int skin_threshold[2] = {1570636, 800000}; // q18 +static const int skin_mean[5][2] = { { 7463, 9614 }, + { 6400, 10240 }, + { 7040, 10240 }, + { 8320, 9280 }, + { 6800, 9614 } }; +static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16 +static const int skin_threshold[6] = { 1570636, 1400000, 800000, + 800000, 800000, 800000 }; // q18 // Evaluates the Mahalanobis distance measure for the input CbCr values. static int evaluate_skin_color_difference(int cb, int cr, int idx) { @@ -65,34 +68,42 @@ static int evaluate_skin_color_difference(int cb, int cr, int idx) { const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10; const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10; const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10; - const int skin_diff = skin_inv_cov[0] * cb_diff_q2 + - skin_inv_cov[1] * cbcr_diff_q2 + - skin_inv_cov[2] * cbcr_diff_q2 + - skin_inv_cov[3] * cr_diff_q2; + const int skin_diff = + skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 + + skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2; return skin_diff; } // Checks if the input yCbCr values corresponds to skin color. -static int is_skin_color(int y, int cb, int cr) -{ - if (y < 40 || y > 220) - { +static int is_skin_color(int y, int cb, int cr, int consec_zeromv) { + if (y < 40 || y > 220) { return 0; - } - else - { - if (MODEL_MODE == 0) - { + } else { + if (MODEL_MODE == 0) { return (evaluate_skin_color_difference(cb, cr, 0) < skin_threshold[0]); - } - else - { + } else { int i = 0; - for (; i < 5; i++) - { - if (evaluate_skin_color_difference(cb, cr, i) < skin_threshold[1]) - { - return 1; + // No skin if block has been zero motion for long consecutive time. + if (consec_zeromv > 60) return 0; + // Exit on grey. + if (cb == 128 && cr == 128) return 0; + // Exit on very strong cb. + if (cb > 150 && cr < 110) return 0; + for (; i < 5; ++i) { + int skin_color_diff = evaluate_skin_color_difference(cb, cr, i); + if (skin_color_diff < skin_threshold[i + 1]) { + if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) { + return 0; + } else if (consec_zeromv > 25 && + skin_color_diff > (skin_threshold[i + 1] >> 1)) { + return 0; + } else { + return 1; + } + } + // Exit if difference is much large than the threshold. + if (skin_color_diff > (skin_threshold[i + 1] << 3)) { + return 0; } } return 0; @@ -100,9 +111,9 @@ static int is_skin_color(int y, int cb, int cr) } } -static int macroblock_corner_grad(unsigned char* signal, int stride, - int offsetx, int offsety, int sgnx, int sgny) -{ +static int macroblock_corner_grad(unsigned char *signal, int stride, + int offsetx, int offsety, int sgnx, + int sgny) { int y1 = signal[offsetx * stride + offsety]; int y2 = signal[offsetx * stride + offsety + sgny]; int y3 = signal[(offsetx + sgnx) * stride + offsety]; @@ -110,15 +121,10 @@ static int macroblock_corner_grad(unsigned char* signal, int stride, return VPXMAX(VPXMAX(abs(y1 - y2), abs(y1 - y3)), abs(y1 - y4)); } -static int check_dot_artifact_candidate(VP8_COMP *cpi, - MACROBLOCK *x, - unsigned char *target_last, - int stride, - unsigned char* last_ref, - int mb_row, - int mb_col, - int channel) -{ +static int check_dot_artifact_candidate(VP8_COMP *cpi, MACROBLOCK *x, + unsigned char *target_last, int stride, + unsigned char *last_ref, int mb_row, + int mb_col, int channel) { int threshold1 = 6; int threshold2 = 3; unsigned int max_num = (cpi->common.MBs) / 10; @@ -131,8 +137,7 @@ static int check_dot_artifact_candidate(VP8_COMP *cpi, if (channel > 0) { shift = 7; } - if (cpi->oxcf.number_of_layers > 1) - { + if (cpi->oxcf.number_of_layers > 1) { num_frames = 20; } x->zero_last_dot_suppress = 0; @@ -144,8 +149,7 @@ static int check_dot_artifact_candidate(VP8_COMP *cpi, if (cpi->current_layer == 0 && cpi->consec_zero_last_mvbias[index] > num_frames && x->mbs_zero_last_dot_suppress < max_num && - !cpi->oxcf.screen_content_mode) - { + !cpi->oxcf.screen_content_mode) { // If this block is checked here, label it so we don't check it again until // ~|x| framaes later. x->zero_last_dot_suppress = 1; @@ -156,32 +160,29 @@ static int check_dot_artifact_candidate(VP8_COMP *cpi, // Top-left: grad_last = macroblock_corner_grad(last_ref, stride, 0, 0, 1, 1); grad_source = macroblock_corner_grad(target_last, stride, 0, 0, 1, 1); - if (grad_last >= threshold1 && grad_source <= threshold2) - { - x->mbs_zero_last_dot_suppress++; - return 1; + if (grad_last >= threshold1 && grad_source <= threshold2) { + x->mbs_zero_last_dot_suppress++; + return 1; } // Top-right: grad_last = macroblock_corner_grad(last_ref, stride, 0, shift, 1, -1); grad_source = macroblock_corner_grad(target_last, stride, 0, shift, 1, -1); - if (grad_last >= threshold1 && grad_source <= threshold2) - { + if (grad_last >= threshold1 && grad_source <= threshold2) { x->mbs_zero_last_dot_suppress++; return 1; } // Bottom-left: grad_last = macroblock_corner_grad(last_ref, stride, shift, 0, -1, 1); grad_source = macroblock_corner_grad(target_last, stride, shift, 0, -1, 1); - if (grad_last >= threshold1 && grad_source <= threshold2) - { + if (grad_last >= threshold1 && grad_source <= threshold2) { x->mbs_zero_last_dot_suppress++; return 1; } // Bottom-right: grad_last = macroblock_corner_grad(last_ref, stride, shift, shift, -1, -1); - grad_source = macroblock_corner_grad(target_last, stride, shift, shift, -1, -1); - if (grad_last >= threshold1 && grad_source <= threshold2) - { + grad_source = + macroblock_corner_grad(target_last, stride, shift, shift, -1, -1); + if (grad_last >= threshold1 && grad_source <= threshold2) { x->mbs_zero_last_dot_suppress++; return 1; } @@ -195,1391 +196,1234 @@ int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, int error_per_bit, const vp8_variance_fn_ptr_t *vfp, int *mvcost[2], int *distortion, - unsigned int *sse) -{ - (void) b; - (void) d; - (void) ref_mv; - (void) error_per_bit; - (void) vfp; - (void) mb; - (void) mvcost; - (void) distortion; - (void) sse; - bestmv->as_mv.row <<= 3; - bestmv->as_mv.col <<= 3; - return 0; + unsigned int *sse) { + (void)b; + (void)d; + (void)ref_mv; + (void)error_per_bit; + (void)vfp; + (void)mb; + (void)mvcost; + (void)distortion; + (void)sse; + bestmv->as_mv.row <<= 3; + bestmv->as_mv.col <<= 3; + return 0; } +int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp, + unsigned int *sse, int_mv this_mv) { + BLOCK *b = &mb->block[0]; + BLOCKD *d = &mb->e_mbd.block[0]; + unsigned char *what = (*(b->base_src) + b->src); + int what_stride = b->src_stride; + int pre_stride = mb->e_mbd.pre.y_stride; + unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset; + int in_what_stride = pre_stride; + int xoffset = this_mv.as_mv.col & 7; + int yoffset = this_mv.as_mv.row & 7; -int vp8_get_inter_mbpred_error(MACROBLOCK *mb, - const vp8_variance_fn_ptr_t *vfp, - unsigned int *sse, - int_mv this_mv) -{ - - BLOCK *b = &mb->block[0]; - BLOCKD *d = &mb->e_mbd.block[0]; - unsigned char *what = (*(b->base_src) + b->src); - int what_stride = b->src_stride; - int pre_stride = mb->e_mbd.pre.y_stride; - unsigned char *in_what = mb->e_mbd.pre.y_buffer + d->offset ; - int in_what_stride = pre_stride; - int xoffset = this_mv.as_mv.col & 7; - int yoffset = this_mv.as_mv.row & 7; - - in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3); - - if (xoffset | yoffset) - { - return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what, what_stride, sse); - } - else - { - return vfp->vf(what, what_stride, in_what, in_what_stride, sse); - } + in_what += (this_mv.as_mv.row >> 3) * pre_stride + (this_mv.as_mv.col >> 3); + if (xoffset | yoffset) { + return vfp->svf(in_what, in_what_stride, xoffset, yoffset, what, + what_stride, sse); + } else { + return vfp->vf(what, what_stride, in_what, in_what_stride, sse); + } } -static int get_prediction_error(BLOCK *be, BLOCKD *b) -{ - unsigned char *sptr; - unsigned char *dptr; - sptr = (*(be->base_src) + be->src); - dptr = b->predictor; - - return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16); +static int get_prediction_error(BLOCK *be, BLOCKD *b) { + unsigned char *sptr; + unsigned char *dptr; + sptr = (*(be->base_src) + be->src); + dptr = b->predictor; + return vpx_get4x4sse_cs(sptr, be->src_stride, dptr, 16); } -static int pick_intra4x4block( - MACROBLOCK *x, - int ib, - B_PREDICTION_MODE *best_mode, - const int *mode_costs, +static int pick_intra4x4block(MACROBLOCK *x, int ib, + B_PREDICTION_MODE *best_mode, + const int *mode_costs, - int *bestrate, - int *bestdistortion) -{ + int *bestrate, int *bestdistortion) { + BLOCKD *b = &x->e_mbd.block[ib]; + BLOCK *be = &x->block[ib]; + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; + B_PREDICTION_MODE mode; + int best_rd = INT_MAX; + int rate; + int distortion; - BLOCKD *b = &x->e_mbd.block[ib]; - BLOCK *be = &x->block[ib]; - int dst_stride = x->e_mbd.dst.y_stride; - unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; - B_PREDICTION_MODE mode; - int best_rd = INT_MAX; - int rate; - int distortion; + unsigned char *Above = dst - dst_stride; + unsigned char *yleft = dst - 1; + unsigned char top_left = Above[-1]; - unsigned char *Above = dst - dst_stride; - unsigned char *yleft = dst - 1; - unsigned char top_left = Above[-1]; + for (mode = B_DC_PRED; mode <= B_HE_PRED; ++mode) { + int this_rd; - for (mode = B_DC_PRED; mode <= B_HE_PRED; mode++) - { - int this_rd; + rate = mode_costs[mode]; - rate = mode_costs[mode]; + vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16, + top_left); + distortion = get_prediction_error(be, b); + this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - vp8_intra4x4_predict(Above, yleft, dst_stride, mode, - b->predictor, 16, top_left); - distortion = get_prediction_error(be, b); - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - - if (this_rd < best_rd) - { - *bestrate = rate; - *bestdistortion = distortion; - best_rd = this_rd; - *best_mode = mode; - } + if (this_rd < best_rd) { + *bestrate = rate; + *bestdistortion = distortion; + best_rd = this_rd; + *best_mode = mode; } + } - b->bmi.as_mode = *best_mode; - vp8_encode_intra4x4block(x, ib); - return best_rd; + b->bmi.as_mode = *best_mode; + vp8_encode_intra4x4block(x, ib); + return best_rd; } +static int pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *best_dist) { + MACROBLOCKD *const xd = &mb->e_mbd; + int i; + int cost = mb->mbmode_cost[xd->frame_type][B_PRED]; + int error; + int distortion = 0; + const int *bmode_costs; -static int pick_intra4x4mby_modes -( - MACROBLOCK *mb, - int *Rate, - int *best_dist -) -{ - MACROBLOCKD *const xd = &mb->e_mbd; - int i; - int cost = mb->mbmode_cost [xd->frame_type] [B_PRED]; - int error; - int distortion = 0; - const int *bmode_costs; + intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); - intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); + bmode_costs = mb->inter_bmode_costs; - bmode_costs = mb->inter_bmode_costs; + for (i = 0; i < 16; ++i) { + MODE_INFO *const mic = xd->mode_info_context; + const int mis = xd->mode_info_stride; - for (i = 0; i < 16; i++) - { - MODE_INFO *const mic = xd->mode_info_context; - const int mis = xd->mode_info_stride; + B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); + int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d); - B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); - int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(d); + if (mb->e_mbd.frame_type == KEY_FRAME) { + const B_PREDICTION_MODE A = above_block_mode(mic, i, mis); + const B_PREDICTION_MODE L = left_block_mode(mic, i); - if (mb->e_mbd.frame_type == KEY_FRAME) - { - const B_PREDICTION_MODE A = above_block_mode(mic, i, mis); - const B_PREDICTION_MODE L = left_block_mode(mic, i); - - bmode_costs = mb->bmode_costs[A][L]; - } - - - pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d); - - cost += r; - distortion += d; - mic->bmi[i].as_mode = best_mode; - - /* Break out case where we have already exceeded best so far value - * that was passed in - */ - if (distortion > *best_dist) - break; + bmode_costs = mb->bmode_costs[A][L]; } - *Rate = cost; + pick_intra4x4block(mb, i, &best_mode, bmode_costs, &r, &d); - if (i == 16) - { - *best_dist = distortion; - error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion); - } - else - { - *best_dist = INT_MAX; - error = INT_MAX; - } + cost += r; + distortion += d; + mic->bmi[i].as_mode = best_mode; - return error; + /* Break out case where we have already exceeded best so far value + * that was passed in + */ + if (distortion > *best_dist) break; + } + + *Rate = cost; + + if (i == 16) { + *best_dist = distortion; + error = RDCOST(mb->rdmult, mb->rddiv, cost, distortion); + } else { + *best_dist = INT_MAX; + error = INT_MAX; + } + + return error; } -static void pick_intra_mbuv_mode(MACROBLOCK *mb) -{ +static void pick_intra_mbuv_mode(MACROBLOCK *mb) { + MACROBLOCKD *x = &mb->e_mbd; + unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride; + unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride; + unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src); + unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src); + int uvsrc_stride = mb->block[16].src_stride; + unsigned char uleft_col[8]; + unsigned char vleft_col[8]; + unsigned char utop_left = uabove_row[-1]; + unsigned char vtop_left = vabove_row[-1]; + int i, j; + int expected_udc; + int expected_vdc; + int shift; + int Uaverage = 0; + int Vaverage = 0; + int diff; + int pred_error[4] = { 0, 0, 0, 0 }, best_error = INT_MAX; + MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); - MACROBLOCKD *x = &mb->e_mbd; - unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride; - unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride; - unsigned char *usrc_ptr = (mb->block[16].src + *mb->block[16].base_src); - unsigned char *vsrc_ptr = (mb->block[20].src + *mb->block[20].base_src); - int uvsrc_stride = mb->block[16].src_stride; - unsigned char uleft_col[8]; - unsigned char vleft_col[8]; - unsigned char utop_left = uabove_row[-1]; - unsigned char vtop_left = vabove_row[-1]; - int i, j; - int expected_udc; - int expected_vdc; - int shift; - int Uaverage = 0; - int Vaverage = 0; - int diff; - int pred_error[4] = {0, 0, 0, 0}, best_error = INT_MAX; - MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); + for (i = 0; i < 8; ++i) { + uleft_col[i] = x->dst.u_buffer[i * x->dst.uv_stride - 1]; + vleft_col[i] = x->dst.v_buffer[i * x->dst.uv_stride - 1]; + } + if (!x->up_available && !x->left_available) { + expected_udc = 128; + expected_vdc = 128; + } else { + shift = 2; - for (i = 0; i < 8; i++) - { - uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1]; - vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1]; + if (x->up_available) { + for (i = 0; i < 8; ++i) { + Uaverage += uabove_row[i]; + Vaverage += vabove_row[i]; + } + + shift++; } - if (!x->up_available && !x->left_available) - { - expected_udc = 128; - expected_vdc = 128; - } - else - { - shift = 2; + if (x->left_available) { + for (i = 0; i < 8; ++i) { + Uaverage += uleft_col[i]; + Vaverage += vleft_col[i]; + } - if (x->up_available) - { - - for (i = 0; i < 8; i++) - { - Uaverage += uabove_row[i]; - Vaverage += vabove_row[i]; - } - - shift ++; - - } - - if (x->left_available) - { - for (i = 0; i < 8; i++) - { - Uaverage += uleft_col[i]; - Vaverage += vleft_col[i]; - } - - shift ++; - - } - - expected_udc = (Uaverage + (1 << (shift - 1))) >> shift; - expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift; + shift++; } + expected_udc = (Uaverage + (1 << (shift - 1))) >> shift; + expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift; + } - for (i = 0; i < 8; i++) - { - for (j = 0; j < 8; j++) - { + for (i = 0; i < 8; ++i) { + for (j = 0; j < 8; ++j) { + int predu = uleft_col[i] + uabove_row[j] - utop_left; + int predv = vleft_col[i] + vabove_row[j] - vtop_left; + int u_p, v_p; - int predu = uleft_col[i] + uabove_row[j] - utop_left; - int predv = vleft_col[i] + vabove_row[j] - vtop_left; - int u_p, v_p; + u_p = usrc_ptr[j]; + v_p = vsrc_ptr[j]; - u_p = usrc_ptr[j]; - v_p = vsrc_ptr[j]; + if (predu < 0) predu = 0; - if (predu < 0) - predu = 0; + if (predu > 255) predu = 255; - if (predu > 255) - predu = 255; + if (predv < 0) predv = 0; - if (predv < 0) - predv = 0; + if (predv > 255) predv = 255; - if (predv > 255) - predv = 255; - - - diff = u_p - expected_udc; - pred_error[DC_PRED] += diff * diff; - diff = v_p - expected_vdc; - pred_error[DC_PRED] += diff * diff; - - - diff = u_p - uabove_row[j]; - pred_error[V_PRED] += diff * diff; - diff = v_p - vabove_row[j]; - pred_error[V_PRED] += diff * diff; - - - diff = u_p - uleft_col[i]; - pred_error[H_PRED] += diff * diff; - diff = v_p - vleft_col[i]; - pred_error[H_PRED] += diff * diff; - - - diff = u_p - predu; - pred_error[TM_PRED] += diff * diff; - diff = v_p - predv; - pred_error[TM_PRED] += diff * diff; - - - } - - usrc_ptr += uvsrc_stride; - vsrc_ptr += uvsrc_stride; - - if (i == 3) - { - usrc_ptr = (mb->block[18].src + *mb->block[18].base_src); - vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src); - } + diff = u_p - expected_udc; + pred_error[DC_PRED] += diff * diff; + diff = v_p - expected_vdc; + pred_error[DC_PRED] += diff * diff; + diff = u_p - uabove_row[j]; + pred_error[V_PRED] += diff * diff; + diff = v_p - vabove_row[j]; + pred_error[V_PRED] += diff * diff; + diff = u_p - uleft_col[i]; + pred_error[H_PRED] += diff * diff; + diff = v_p - vleft_col[i]; + pred_error[H_PRED] += diff * diff; + diff = u_p - predu; + pred_error[TM_PRED] += diff * diff; + diff = v_p - predv; + pred_error[TM_PRED] += diff * diff; } + usrc_ptr += uvsrc_stride; + vsrc_ptr += uvsrc_stride; - for (i = DC_PRED; i <= TM_PRED; i++) - { - if (best_error > pred_error[i]) - { - best_error = pred_error[i]; - best_mode = (MB_PREDICTION_MODE)i; - } + if (i == 3) { + usrc_ptr = (mb->block[18].src + *mb->block[18].base_src); + vsrc_ptr = (mb->block[22].src + *mb->block[22].base_src); } + } + for (i = DC_PRED; i <= TM_PRED; ++i) { + if (best_error > pred_error[i]) { + best_error = pred_error[i]; + best_mode = (MB_PREDICTION_MODE)i; + } + } - mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode; - + mb->e_mbd.mode_info_context->mbmi.uv_mode = best_mode; } -static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) -{ - MACROBLOCKD *xd = &x->e_mbd; - /* Split MV modes currently not supported when RD is nopt enabled, - * therefore, only need to modify MVcount in NEWMV mode. */ - if (xd->mode_info_context->mbmi.mode == NEWMV) - { - x->MVcount[0][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.row - - best_ref_mv->as_mv.row) >> 1)]++; - x->MVcount[1][mv_max+((xd->mode_info_context->mbmi.mv.as_mv.col - - best_ref_mv->as_mv.col) >> 1)]++; - } +static void update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) { + MACROBLOCKD *xd = &x->e_mbd; + /* Split MV modes currently not supported when RD is nopt enabled, + * therefore, only need to modify MVcount in NEWMV mode. */ + if (xd->mode_info_context->mbmi.mode == NEWMV) { + x->MVcount[0][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.row - + best_ref_mv->as_mv.row) >> + 1)]++; + x->MVcount[1][mv_max + ((xd->mode_info_context->mbmi.mv.as_mv.col - + best_ref_mv->as_mv.col) >> + 1)]++; + } } - #if CONFIG_MULTI_RES_ENCODING -static -void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, int *dissim, - int *parent_ref_frame, - MB_PREDICTION_MODE *parent_mode, - int_mv *parent_ref_mv, int mb_row, int mb_col) -{ - LOWER_RES_MB_INFO* store_mode_info - = ((LOWER_RES_FRAME_INFO*)cpi->oxcf.mr_low_res_mode_info)->mb_info; - unsigned int parent_mb_index; +static void get_lower_res_motion_info(VP8_COMP *cpi, MACROBLOCKD *xd, + int *dissim, int *parent_ref_frame, + MB_PREDICTION_MODE *parent_mode, + int_mv *parent_ref_mv, int mb_row, + int mb_col) { + LOWER_RES_MB_INFO *store_mode_info = + ((LOWER_RES_FRAME_INFO *)cpi->oxcf.mr_low_res_mode_info)->mb_info; + unsigned int parent_mb_index; - /* Consider different down_sampling_factor. */ - { - /* TODO: Removed the loop that supports special down_sampling_factor - * such as 2, 4, 8. Will revisit it if needed. - * Should also try using a look-up table to see if it helps - * performance. */ - int parent_mb_row, parent_mb_col; + /* Consider different down_sampling_factor. */ + { + /* TODO: Removed the loop that supports special down_sampling_factor + * such as 2, 4, 8. Will revisit it if needed. + * Should also try using a look-up table to see if it helps + * performance. */ + int parent_mb_row, parent_mb_col; - parent_mb_row = mb_row*cpi->oxcf.mr_down_sampling_factor.den - /cpi->oxcf.mr_down_sampling_factor.num; - parent_mb_col = mb_col*cpi->oxcf.mr_down_sampling_factor.den - /cpi->oxcf.mr_down_sampling_factor.num; - parent_mb_index = parent_mb_row*cpi->mr_low_res_mb_cols + parent_mb_col; - } + parent_mb_row = mb_row * cpi->oxcf.mr_down_sampling_factor.den / + cpi->oxcf.mr_down_sampling_factor.num; + parent_mb_col = mb_col * cpi->oxcf.mr_down_sampling_factor.den / + cpi->oxcf.mr_down_sampling_factor.num; + parent_mb_index = parent_mb_row * cpi->mr_low_res_mb_cols + parent_mb_col; + } - /* Read lower-resolution mode & motion result from memory.*/ - *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame; - *parent_mode = store_mode_info[parent_mb_index].mode; - *dissim = store_mode_info[parent_mb_index].dissim; + /* Read lower-resolution mode & motion result from memory.*/ + *parent_ref_frame = store_mode_info[parent_mb_index].ref_frame; + *parent_mode = store_mode_info[parent_mb_index].mode; + *dissim = store_mode_info[parent_mb_index].dissim; - /* For highest-resolution encoder, adjust dissim value. Lower its quality - * for good performance. */ - if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1)) - *dissim>>=1; + /* For highest-resolution encoder, adjust dissim value. Lower its quality + * for good performance. */ + if (cpi->oxcf.mr_encoder_id == (cpi->oxcf.mr_total_resolutions - 1)) + *dissim >>= 1; - if(*parent_ref_frame != INTRA_FRAME) - { - /* Consider different down_sampling_factor. - * The result can be rounded to be more precise, but it takes more time. - */ - (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row - *cpi->oxcf.mr_down_sampling_factor.num - /cpi->oxcf.mr_down_sampling_factor.den; - (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col - *cpi->oxcf.mr_down_sampling_factor.num - /cpi->oxcf.mr_down_sampling_factor.den; + if (*parent_ref_frame != INTRA_FRAME) { + /* Consider different down_sampling_factor. + * The result can be rounded to be more precise, but it takes more time. + */ + (*parent_ref_mv).as_mv.row = store_mode_info[parent_mb_index].mv.as_mv.row * + cpi->oxcf.mr_down_sampling_factor.num / + cpi->oxcf.mr_down_sampling_factor.den; + (*parent_ref_mv).as_mv.col = store_mode_info[parent_mb_index].mv.as_mv.col * + cpi->oxcf.mr_down_sampling_factor.num / + cpi->oxcf.mr_down_sampling_factor.den; - vp8_clamp_mv2(parent_ref_mv, xd); - } + vp8_clamp_mv2(parent_ref_mv, xd); + } } #endif -static void check_for_encode_breakout(unsigned int sse, MACROBLOCK* x) -{ - MACROBLOCKD *xd = &x->e_mbd; +static void check_for_encode_breakout(unsigned int sse, MACROBLOCK *x) { + MACROBLOCKD *xd = &x->e_mbd; - unsigned int threshold = (xd->block[0].dequant[1] - * xd->block[0].dequant[1] >>4); + unsigned int threshold = + (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4); - if(threshold < x->encode_breakout) - threshold = x->encode_breakout; + if (threshold < x->encode_breakout) threshold = x->encode_breakout; - if (sse < threshold ) - { - /* Check u and v to make sure skip is ok */ - unsigned int sse2 = 0; + if (sse < threshold) { + /* Check u and v to make sure skip is ok */ + unsigned int sse2 = 0; - sse2 = VP8_UVSSE(x); + sse2 = VP8_UVSSE(x); - if (sse2 * 2 < x->encode_breakout) - x->skip = 1; - else - x->skip = 0; + if (sse2 * 2 < x->encode_breakout) { + x->skip = 1; + } else { + x->skip = 0; } + } } -static int evaluate_inter_mode(unsigned int* sse, int rate2, int* distortion2, - VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) -{ - MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; - int_mv mv = x->e_mbd.mode_info_context->mbmi.mv; - int this_rd; - int denoise_aggressive = 0; - /* Exit early and don't compute the distortion if this macroblock - * is marked inactive. */ - if (cpi->active_map_enabled && x->active_ptr[0] == 0) - { - *sse = 0; - *distortion2 = 0; - x->skip = 1; - return INT_MAX; - } +static int evaluate_inter_mode(unsigned int *sse, int rate2, int *distortion2, + VP8_COMP *cpi, MACROBLOCK *x, int rd_adj) { + MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; + int_mv mv = x->e_mbd.mode_info_context->mbmi.mv; + int this_rd; + int denoise_aggressive = 0; + /* Exit early and don't compute the distortion if this macroblock + * is marked inactive. */ + if (cpi->active_map_enabled && x->active_ptr[0] == 0) { + *sse = 0; + *distortion2 = 0; + x->skip = 1; + return INT_MAX; + } - if((this_mode != NEWMV) || - !(cpi->sf.half_pixel_search) || cpi->common.full_pixel==1) - *distortion2 = vp8_get_inter_mbpred_error(x, - &cpi->fn_ptr[BLOCK_16X16], - sse, mv); + if ((this_mode != NEWMV) || !(cpi->sf.half_pixel_search) || + cpi->common.full_pixel == 1) { + *distortion2 = + vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], sse, mv); + } - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2); + this_rd = RDCOST(x->rdmult, x->rddiv, rate2, *distortion2); #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { - denoise_aggressive = + if (cpi->oxcf.noise_sensitivity > 0) { + denoise_aggressive = (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) ? 1 : 0; - } + } #endif - // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame. - // TODO: We should also add condition on distance of closest to current. - if(!cpi->oxcf.screen_content_mode && - this_mode == ZEROMV && - x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME && - (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) - { - // No adjustment if block is considered to be skin area. - if(x->is_skin) - rd_adj = 100; + // Adjust rd for ZEROMV and LAST, if LAST is the closest reference frame. + // TODO: We should also add condition on distance of closest to current. + if (!cpi->oxcf.screen_content_mode && this_mode == ZEROMV && + x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME && + (denoise_aggressive || (cpi->closest_reference_frame == LAST_FRAME))) { + // No adjustment if block is considered to be skin area. + if (x->is_skin) rd_adj = 100; - this_rd = ((int64_t)this_rd) * rd_adj / 100; - } + this_rd = (int)(((int64_t)this_rd) * rd_adj / 100); + } - check_for_encode_breakout(*sse, x); - return this_rd; + check_for_encode_breakout(*sse, x); + return this_rd; } static void calculate_zeromv_rd_adjustment(VP8_COMP *cpi, MACROBLOCK *x, - int *rd_adjustment) -{ - MODE_INFO *mic = x->e_mbd.mode_info_context; - int_mv mv_l, mv_a, mv_al; - int local_motion_check = 0; + int *rd_adjustment) { + MODE_INFO *mic = x->e_mbd.mode_info_context; + int_mv mv_l, mv_a, mv_al; + int local_motion_check = 0; - if (cpi->lf_zeromv_pct > 40) - { - /* left mb */ - mic -= 1; - mv_l = mic->mbmi.mv; + if (cpi->lf_zeromv_pct > 40) { + /* left mb */ + mic -= 1; + mv_l = mic->mbmi.mv; - if (mic->mbmi.ref_frame != INTRA_FRAME) - if( abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) - local_motion_check++; - - /* above-left mb */ - mic -= x->e_mbd.mode_info_stride; - mv_al = mic->mbmi.mv; - - if (mic->mbmi.ref_frame != INTRA_FRAME) - if( abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) - local_motion_check++; - - /* above mb */ - mic += 1; - mv_a = mic->mbmi.mv; - - if (mic->mbmi.ref_frame != INTRA_FRAME) - if( abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) - local_motion_check++; - - if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) - && local_motion_check >0) || local_motion_check >2 ) - *rd_adjustment = 80; - else if (local_motion_check > 0) - *rd_adjustment = 90; + if (mic->mbmi.ref_frame != INTRA_FRAME) { + if (abs(mv_l.as_mv.row) < 8 && abs(mv_l.as_mv.col) < 8) { + local_motion_check++; + } } + + /* above-left mb */ + mic -= x->e_mbd.mode_info_stride; + mv_al = mic->mbmi.mv; + + if (mic->mbmi.ref_frame != INTRA_FRAME) { + if (abs(mv_al.as_mv.row) < 8 && abs(mv_al.as_mv.col) < 8) { + local_motion_check++; + } + } + + /* above mb */ + mic += 1; + mv_a = mic->mbmi.mv; + + if (mic->mbmi.ref_frame != INTRA_FRAME) { + if (abs(mv_a.as_mv.row) < 8 && abs(mv_a.as_mv.col) < 8) { + local_motion_check++; + } + } + + if (((!x->e_mbd.mb_to_top_edge || !x->e_mbd.mb_to_left_edge) && + local_motion_check > 0) || + local_motion_check > 2) { + *rd_adjustment = 80; + } else if (local_motion_check > 0) { + *rd_adjustment = 90; + } + } } void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, int *returndistortion, int *returnintra, int mb_row, - int mb_col) -{ - BLOCK *b = &x->block[0]; - BLOCKD *d = &x->e_mbd.block[0]; - MACROBLOCKD *xd = &x->e_mbd; - MB_MODE_INFO best_mbmode; + int mb_col) { + BLOCK *b = &x->block[0]; + BLOCKD *d = &x->e_mbd.block[0]; + MACROBLOCKD *xd = &x->e_mbd; + MB_MODE_INFO best_mbmode; - int_mv best_ref_mv_sb[2]; - int_mv mode_mv_sb[2][MB_MODE_COUNT]; - int_mv best_ref_mv; - int_mv *mode_mv; - MB_PREDICTION_MODE this_mode; - int num00; - int mdcounts[4]; - int best_rd = INT_MAX; - int rd_adjustment = 100; - int best_intra_rd = INT_MAX; - int mode_index; - int rate; - int rate2; - int distortion2; - int bestsme = INT_MAX; - int best_mode_index = 0; - unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX; + int_mv best_ref_mv_sb[2]; + int_mv mode_mv_sb[2][MB_MODE_COUNT]; + int_mv best_ref_mv; + int_mv *mode_mv; + MB_PREDICTION_MODE this_mode; + int num00; + int mdcounts[4]; + int best_rd = INT_MAX; + int rd_adjustment = 100; + int best_intra_rd = INT_MAX; + int mode_index; + int rate; + int rate2; + int distortion2; + int bestsme = INT_MAX; + int best_mode_index = 0; + unsigned int sse = UINT_MAX, best_rd_sse = UINT_MAX; #if CONFIG_TEMPORAL_DENOISING - unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX; + unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX; #endif - int sf_improved_mv_pred = cpi->sf.improved_mv_pred; + int sf_improved_mv_pred = cpi->sf.improved_mv_pred; #if CONFIG_MULTI_RES_ENCODING - int dissim = INT_MAX; - int parent_ref_frame = 0; - int_mv parent_ref_mv; - MB_PREDICTION_MODE parent_mode = 0; - int parent_ref_valid = 0; + int dissim = INT_MAX; + int parent_ref_frame = 0; + int_mv parent_ref_mv; + MB_PREDICTION_MODE parent_mode = 0; + int parent_ref_valid = 0; #endif - int_mv mvp; + int_mv mvp; - int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7}; - int saddone=0; - /* search range got from mv_pred(). It uses step_param levels. (0-7) */ - int sr=0; + int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + int saddone = 0; + /* search range got from mv_pred(). It uses step_param levels. (0-7) */ + int sr = 0; - unsigned char *plane[4][3]; - int ref_frame_map[4]; - int sign_bias = 0; - int dot_artifact_candidate = 0; - get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset); + unsigned char *plane[4][3]; + int ref_frame_map[4]; + int sign_bias = 0; + int dot_artifact_candidate = 0; + get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset); - // If the current frame is using LAST as a reference, check for - // biasing the mode selection for dot artifacts. - if (cpi->ref_frame_flags & VP8_LAST_FRAME) { - unsigned char* target_y = x->src.y_buffer; - unsigned char* target_u = x->block[16].src + *x->block[16].base_src; - unsigned char* target_v = x->block[20].src + *x->block[20].base_src; - int stride = x->src.y_stride; - int stride_uv = x->block[16].src_stride; -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) { - const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0; - target_y = - cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset; - stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride; - if (uv_denoise) { - target_u = - cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer + - recon_uvoffset; - target_v = - cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer + - recon_uvoffset; - stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride; - } - } -#endif - dot_artifact_candidate = - check_dot_artifact_candidate(cpi, x, target_y, stride, - plane[LAST_FRAME][0], mb_row, mb_col, 0); - // If not found in Y channel, check UV channel. - if (!dot_artifact_candidate) { - dot_artifact_candidate = - check_dot_artifact_candidate(cpi, x, target_u, stride_uv, - plane[LAST_FRAME][1], mb_row, mb_col, 1); - if (!dot_artifact_candidate) { - dot_artifact_candidate = - check_dot_artifact_candidate(cpi, x, target_v, stride_uv, - plane[LAST_FRAME][2], mb_row, mb_col, 2); - } - } - } - -#if CONFIG_MULTI_RES_ENCODING - // |parent_ref_valid| will be set here if potentially we can do mv resue for - // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame. - // |parent_ref_valid| may be reset depending on |parent_ref_frame| for - // the current macroblock below. - parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail; - if (parent_ref_valid) - { - int parent_ref_flag; - - get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, - &parent_mode, &parent_ref_mv, mb_row, mb_col); - - /* TODO(jkoleszar): The references available (ref_frame_flags) to the - * lower res encoder should match those available to this encoder, but - * there seems to be a situation where this mismatch can happen in the - * case of frame dropping and temporal layers. For example, - * GOLD being disallowed in ref_frame_flags, but being returned as - * parent_ref_frame. - * - * In this event, take the conservative approach of disabling the - * lower res info for this MB. - */ - - parent_ref_flag = 0; - // Note availability for mv reuse is only based on last and golden. - if (parent_ref_frame == LAST_FRAME) - parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME); - else if (parent_ref_frame == GOLDEN_FRAME) - parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME); - - //assert(!parent_ref_frame || parent_ref_flag); - - // If |parent_ref_frame| did not match either last or golden then - // shut off mv reuse. - if (parent_ref_frame && !parent_ref_flag) - parent_ref_valid = 0; - - // Don't do mv reuse since we want to allow for another mode besides - // ZEROMV_LAST to remove dot artifact. - if (dot_artifact_candidate) - parent_ref_valid = 0; - } -#endif - - // Check if current macroblock is in skin area. - { - const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] + - x->src.y_buffer[7 * x->src.y_stride + 8] + - x->src.y_buffer[8 * x->src.y_stride + 7] + - x->src.y_buffer[8 * x->src.y_stride + 8]) >> 2; - const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] + - x->src.u_buffer[3 * x->src.uv_stride + 4] + - x->src.u_buffer[4 * x->src.uv_stride + 3] + - x->src.u_buffer[4 * x->src.uv_stride + 4]) >> 2; - const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] + - x->src.v_buffer[3 * x->src.uv_stride + 4] + - x->src.v_buffer[4 * x->src.uv_stride + 3] + - x->src.v_buffer[4 * x->src.uv_stride + 4]) >> 2; - x->is_skin = 0; - if (!cpi->oxcf.screen_content_mode) - x->is_skin = is_skin_color(y, cb, cr); - } + // If the current frame is using LAST as a reference, check for + // biasing the mode selection for dot artifacts. + if (cpi->ref_frame_flags & VP8_LAST_FRAME) { + unsigned char *target_y = x->src.y_buffer; + unsigned char *target_u = x->block[16].src + *x->block[16].base_src; + unsigned char *target_v = x->block[20].src + *x->block[20].base_src; + int stride = x->src.y_stride; + int stride_uv = x->block[16].src_stride; #if CONFIG_TEMPORAL_DENOISING if (cpi->oxcf.noise_sensitivity) { - // Under aggressive denoising mode, should we use skin map to reduce denoiser - // and ZEROMV bias? Will need to revisit the accuracy of this detection for - // very noisy input. For now keep this as is (i.e., don't turn it off). - // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) - // x->is_skin = 0; + const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0; + target_y = + cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset; + stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride; + if (uv_denoise) { + target_u = cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer + + recon_uvoffset; + target_v = cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer + + recon_uvoffset; + stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride; + } } #endif + dot_artifact_candidate = check_dot_artifact_candidate( + cpi, x, target_y, stride, plane[LAST_FRAME][0], mb_row, mb_col, 0); + // If not found in Y channel, check UV channel. + if (!dot_artifact_candidate) { + dot_artifact_candidate = check_dot_artifact_candidate( + cpi, x, target_u, stride_uv, plane[LAST_FRAME][1], mb_row, mb_col, 1); + if (!dot_artifact_candidate) { + dot_artifact_candidate = check_dot_artifact_candidate( + cpi, x, target_v, stride_uv, plane[LAST_FRAME][2], mb_row, mb_col, + 2); + } + } + } - mode_mv = mode_mv_sb[sign_bias]; - best_ref_mv.as_int = 0; - memset(mode_mv_sb, 0, sizeof(mode_mv_sb)); - memset(&best_mbmode, 0, sizeof(best_mbmode)); - - /* Setup search priorities */ #if CONFIG_MULTI_RES_ENCODING - if (parent_ref_valid && parent_ref_frame && dissim < 8) - { - ref_frame_map[0] = -1; - ref_frame_map[1] = parent_ref_frame; - ref_frame_map[2] = -1; - ref_frame_map[3] = -1; - } else + // |parent_ref_valid| will be set here if potentially we can do mv resue for + // this higher resol (|cpi->oxcf.mr_encoder_id| > 0) frame. + // |parent_ref_valid| may be reset depending on |parent_ref_frame| for + // the current macroblock below. + parent_ref_valid = cpi->oxcf.mr_encoder_id && cpi->mr_low_res_mv_avail; + if (parent_ref_valid) { + int parent_ref_flag; + + get_lower_res_motion_info(cpi, xd, &dissim, &parent_ref_frame, &parent_mode, + &parent_ref_mv, mb_row, mb_col); + + /* TODO(jkoleszar): The references available (ref_frame_flags) to the + * lower res encoder should match those available to this encoder, but + * there seems to be a situation where this mismatch can happen in the + * case of frame dropping and temporal layers. For example, + * GOLD being disallowed in ref_frame_flags, but being returned as + * parent_ref_frame. + * + * In this event, take the conservative approach of disabling the + * lower res info for this MB. + */ + + parent_ref_flag = 0; + // Note availability for mv reuse is only based on last and golden. + if (parent_ref_frame == LAST_FRAME) + parent_ref_flag = (cpi->ref_frame_flags & VP8_LAST_FRAME); + else if (parent_ref_frame == GOLDEN_FRAME) + parent_ref_flag = (cpi->ref_frame_flags & VP8_GOLD_FRAME); + + // assert(!parent_ref_frame || parent_ref_flag); + + // If |parent_ref_frame| did not match either last or golden then + // shut off mv reuse. + if (parent_ref_frame && !parent_ref_flag) parent_ref_valid = 0; + + // Don't do mv reuse since we want to allow for another mode besides + // ZEROMV_LAST to remove dot artifact. + if (dot_artifact_candidate) parent_ref_valid = 0; + } +#endif + + // Check if current macroblock is in skin area. + { + const int y = (x->src.y_buffer[7 * x->src.y_stride + 7] + + x->src.y_buffer[7 * x->src.y_stride + 8] + + x->src.y_buffer[8 * x->src.y_stride + 7] + + x->src.y_buffer[8 * x->src.y_stride + 8]) >> + 2; + const int cb = (x->src.u_buffer[3 * x->src.uv_stride + 3] + + x->src.u_buffer[3 * x->src.uv_stride + 4] + + x->src.u_buffer[4 * x->src.uv_stride + 3] + + x->src.u_buffer[4 * x->src.uv_stride + 4]) >> + 2; + const int cr = (x->src.v_buffer[3 * x->src.uv_stride + 3] + + x->src.v_buffer[3 * x->src.uv_stride + 4] + + x->src.v_buffer[4 * x->src.uv_stride + 3] + + x->src.v_buffer[4 * x->src.uv_stride + 4]) >> + 2; + x->is_skin = 0; + if (!cpi->oxcf.screen_content_mode) { + int block_index = mb_row * cpi->common.mb_cols + mb_col; + x->is_skin = is_skin_color(y, cb, cr, cpi->consec_zero_last[block_index]); + } + } +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity) { + // Under aggressive denoising mode, should we use skin map to reduce + // denoiser + // and ZEROMV bias? Will need to revisit the accuracy of this detection for + // very noisy input. For now keep this as is (i.e., don't turn it off). + // if (cpi->denoiser.denoiser_mode == kDenoiserOnYUVAggressive) + // x->is_skin = 0; + } +#endif + + mode_mv = mode_mv_sb[sign_bias]; + best_ref_mv.as_int = 0; + memset(mode_mv_sb, 0, sizeof(mode_mv_sb)); + memset(&best_mbmode, 0, sizeof(best_mbmode)); + +/* Setup search priorities */ +#if CONFIG_MULTI_RES_ENCODING + if (parent_ref_valid && parent_ref_frame && dissim < 8) { + ref_frame_map[0] = -1; + ref_frame_map[1] = parent_ref_frame; + ref_frame_map[2] = -1; + ref_frame_map[3] = -1; + } else #endif get_reference_search_order(cpi, ref_frame_map); - /* Check to see if there is at least 1 valid reference frame that we need - * to calculate near_mvs. - */ - if (ref_frame_map[1] > 0) - { - sign_bias = vp8_find_near_mvs_bias(&x->e_mbd, - x->e_mbd.mode_info_context, - mode_mv_sb, - best_ref_mv_sb, - mdcounts, - ref_frame_map[1], - cpi->common.ref_frame_sign_bias); + /* Check to see if there is at least 1 valid reference frame that we need + * to calculate near_mvs. + */ + if (ref_frame_map[1] > 0) { + sign_bias = vp8_find_near_mvs_bias( + &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb, + mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias); + mode_mv = mode_mv_sb[sign_bias]; + best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; + } + + /* Count of the number of MBs tested so far this frame */ + x->mbs_tested_so_far++; + + *returnintra = INT_MAX; + x->skip = 0; + + x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; + + /* If the frame has big static background and current MB is in low + * motion area, its mode decision is biased to ZEROMV mode. + * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12). + * At such speed settings, ZEROMV is already heavily favored. + */ + if (cpi->Speed < 12) { + calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment); + } + +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity) { + rd_adjustment = (int)(rd_adjustment * + cpi->denoiser.denoise_pars.pickmode_mv_bias / 100); + } +#endif + + if (dot_artifact_candidate) { + // Bias against ZEROMV_LAST mode. + rd_adjustment = 150; + } + + /* if we encode a new mv this is important + * find the best new motion vector + */ + for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) { + int frame_cost; + int this_rd = INT_MAX; + int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]]; + + if (best_rd <= x->rd_threshes[mode_index]) continue; + + if (this_ref_frame < 0) continue; + + x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; + + /* everything but intra */ + if (x->e_mbd.mode_info_context->mbmi.ref_frame) { + x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; + x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; + x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; + + if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) { + sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame]; mode_mv = mode_mv_sb[sign_bias]; best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; + } + +#if CONFIG_MULTI_RES_ENCODING + if (parent_ref_valid) { + if (vp8_mode_order[mode_index] == NEARESTMV && + mode_mv[NEARESTMV].as_int == 0) + continue; + if (vp8_mode_order[mode_index] == NEARMV && mode_mv[NEARMV].as_int == 0) + continue; + + if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV && + best_ref_mv.as_int == 0) + continue; + else if (vp8_mode_order[mode_index] == NEWMV && dissim == 0 && + best_ref_mv.as_int == parent_ref_mv.as_int) + continue; + } +#endif } - /* Count of the number of MBs tested so far this frame */ - x->mbs_tested_so_far++; + /* Check to see if the testing frequency for this mode is at its max + * If so then prevent it from being tested and increase the threshold + * for its testing */ + if (x->mode_test_hit_counts[mode_index] && + (cpi->mode_check_freq[mode_index] > 1)) { + if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] * + x->mode_test_hit_counts[mode_index])) { + /* Increase the threshold for coding this mode to make it less + * likely to be chosen */ + x->rd_thresh_mult[mode_index] += 4; - *returnintra = INT_MAX; - x->skip = 0; + if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) { + x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; + } - x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; + x->rd_threshes[mode_index] = + (cpi->rd_baseline_thresh[mode_index] >> 7) * + x->rd_thresh_mult[mode_index]; + continue; + } + } - /* If the frame has big static background and current MB is in low - * motion area, its mode decision is biased to ZEROMV mode. - * No adjustment if cpu_used is <= -12 (i.e., cpi->Speed >= 12). - * At such speed settings, ZEROMV is already heavily favored. - */ - if (cpi->Speed < 12) { - calculate_zeromv_rd_adjustment(cpi, x, &rd_adjustment); + /* We have now reached the point where we are going to test the current + * mode so increment the counter for the number of times it has been + * tested */ + x->mode_test_hit_counts[mode_index]++; + + rate2 = 0; + distortion2 = 0; + + this_mode = vp8_mode_order[mode_index]; + + x->e_mbd.mode_info_context->mbmi.mode = this_mode; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + + /* Work out the cost assosciated with selecting the reference frame */ + frame_cost = x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; + rate2 += frame_cost; + + /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame, + * unless ARNR filtering is enabled in which case we want + * an unfiltered alternative */ + if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) { + if (this_mode != ZEROMV || + x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) { + continue; + } + } + + switch (this_mode) { + case B_PRED: + /* Pass best so far to pick_intra4x4mby_modes to use as breakout */ + distortion2 = best_rd_sse; + pick_intra4x4mby_modes(x, &rate, &distortion2); + + if (distortion2 == INT_MAX) { + this_rd = INT_MAX; + } else { + rate2 += rate; + distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride, + x->e_mbd.predictor, 16, &sse); + this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); + + if (this_rd < best_intra_rd) { + best_intra_rd = this_rd; + *returnintra = distortion2; + } + } + + break; + + case SPLITMV: + + /* Split MV modes currently not supported when RD is not enabled. */ + break; + + case DC_PRED: + case V_PRED: + case H_PRED: + case TM_PRED: + vp8_build_intra_predictors_mby_s( + xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1, + xd->dst.y_stride, xd->predictor, 16); + distortion2 = vpx_variance16x16(*(b->base_src), b->src_stride, + x->e_mbd.predictor, 16, &sse); + rate2 += x->mbmode_cost[x->e_mbd.frame_type] + [x->e_mbd.mode_info_context->mbmi.mode]; + this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); + + if (this_rd < best_intra_rd) { + best_intra_rd = this_rd; + *returnintra = distortion2; + } + break; + + case NEWMV: { + int thissme; + int step_param; + int further_steps; + int n = 0; + int sadpb = x->sadperbit16; + int_mv mvp_full; + + int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL; + int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL; + int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL; + int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL; + + int tmp_col_min = x->mv_col_min; + int tmp_col_max = x->mv_col_max; + int tmp_row_min = x->mv_row_min; + int tmp_row_max = x->mv_row_max; + + int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8) ? 3 : 2) : 1; + + /* Further step/diamond searches as necessary */ + step_param = cpi->sf.first_step + speed_adjust; + +#if CONFIG_MULTI_RES_ENCODING + /* If lower-res frame is not available for mv reuse (because of + frame dropping or different temporal layer pattern), then higher + resol encoder does motion search without any previous knowledge. + Also, since last frame motion info is not stored, then we can not + use improved_mv_pred. */ + if (cpi->oxcf.mr_encoder_id) sf_improved_mv_pred = 0; + + // Only use parent MV as predictor if this candidate reference frame + // (|this_ref_frame|) is equal to |parent_ref_frame|. + if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) { + /* Use parent MV as predictor. Adjust search range + * accordingly. + */ + mvp.as_int = parent_ref_mv.as_int; + mvp_full.as_mv.col = parent_ref_mv.as_mv.col >> 3; + mvp_full.as_mv.row = parent_ref_mv.as_mv.row >> 3; + + if (dissim <= 32) + step_param += 3; + else if (dissim <= 128) + step_param += 2; + else + step_param += 1; + } else +#endif + { + if (sf_improved_mv_pred) { + if (!saddone) { + vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]); + saddone = 1; + } + + vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp, + x->e_mbd.mode_info_context->mbmi.ref_frame, + cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]); + + sr += speed_adjust; + /* adjust search range according to sr from mv prediction */ + if (sr > step_param) step_param = sr; + + mvp_full.as_mv.col = mvp.as_mv.col >> 3; + mvp_full.as_mv.row = mvp.as_mv.row >> 3; + } else { + mvp.as_int = best_ref_mv.as_int; + mvp_full.as_mv.col = best_ref_mv.as_mv.col >> 3; + mvp_full.as_mv.row = best_ref_mv.as_mv.row >> 3; + } + } + +#if CONFIG_MULTI_RES_ENCODING + if (parent_ref_valid && (parent_ref_frame == this_ref_frame) && + dissim <= 2 && + VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row), + abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= 4) { + d->bmi.mv.as_int = mvp_full.as_int; + mode_mv[NEWMV].as_int = mvp_full.as_int; + + cpi->find_fractional_mv_step( + x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit, + &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse); + } else +#endif + { + /* Get intersection of UMV window and valid MV window to + * reduce # of checks in diamond search. */ + if (x->mv_col_min < col_min) x->mv_col_min = col_min; + if (x->mv_col_max > col_max) x->mv_col_max = col_max; + if (x->mv_row_min < row_min) x->mv_row_min = row_min; + if (x->mv_row_max > row_max) x->mv_row_max = row_max; + + further_steps = + (cpi->Speed >= 8) + ? 0 + : (cpi->sf.max_step_search_steps - 1 - step_param); + + if (cpi->sf.search_method == HEX) { +#if CONFIG_MULTI_RES_ENCODING + /* TODO: In higher-res pick_inter_mode, step_param is used to + * modify hex search range. Here, set step_param to 0 not to + * change the behavior in lowest-resolution encoder. + * Will improve it later. + */ + /* Set step_param to 0 to ensure large-range motion search + * when mv reuse if not valid (i.e. |parent_ref_valid| = 0), + * or if this candidate reference frame (|this_ref_frame|) is + * not equal to |parent_ref_frame|. + */ + if (!parent_ref_valid || (parent_ref_frame != this_ref_frame)) + step_param = 0; +#endif + bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, step_param, + sadpb, &cpi->fn_ptr[BLOCK_16X16], + x->mvsadcost, x->mvcost, &best_ref_mv); + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + } else { + bestsme = cpi->diamond_search_sad( + x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00, + &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + + /* Further step/diamond searches as necessary */ + n = num00; + num00 = 0; + + while (n < further_steps) { + n++; + + if (num00) { + num00--; + } else { + thissme = cpi->diamond_search_sad( + x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb, + &num00, &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); + if (thissme < bestsme) { + bestsme = thissme; + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + } else { + d->bmi.mv.as_int = mode_mv[NEWMV].as_int; + } + } + } + } + + x->mv_col_min = tmp_col_min; + x->mv_col_max = tmp_col_max; + x->mv_row_min = tmp_row_min; + x->mv_row_max = tmp_row_max; + + if (bestsme < INT_MAX) { + cpi->find_fractional_mv_step( + x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit, + &cpi->fn_ptr[BLOCK_16X16], cpi->mb.mvcost, &distortion2, &sse); + } + } + + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + // The clamp below is not necessary from the perspective + // of VP8 bitstream, but is added to improve ChromeCast + // mirroring's robustness. Please do not remove. + vp8_clamp_mv2(&mode_mv[this_mode], xd); + /* mv cost; */ + rate2 += + vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, cpi->mb.mvcost, 128); + } + + case NEARESTMV: + case NEARMV: + if (mode_mv[this_mode].as_int == 0) continue; + + case ZEROMV: + + /* Trap vectors that reach beyond the UMV borders + * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops + * through to this point because of the lack of break statements + * in the previous two cases. + */ + if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || + ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || + ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || + ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) { + continue; + } + + rate2 += vp8_cost_mv_ref(this_mode, mdcounts); + x->e_mbd.mode_info_context->mbmi.mv.as_int = mode_mv[this_mode].as_int; + this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, + rd_adjustment); + + break; + default: break; } #if CONFIG_TEMPORAL_DENOISING if (cpi->oxcf.noise_sensitivity) { - rd_adjustment = (int)(rd_adjustment * - cpi->denoiser.denoise_pars.pickmode_mv_bias / 100); + /* Store for later use by denoiser. */ + // Dont' denoise with GOLDEN OR ALTREF is they are old reference + // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past). + int skip_old_reference = ((this_ref_frame != LAST_FRAME) && + (cpi->common.current_video_frame - + cpi->current_ref_frames[this_ref_frame] > + MAX_GF_ARF_DENOISE_RANGE)) + ? 1 + : 0; + if (this_mode == ZEROMV && sse < zero_mv_sse && !skip_old_reference) { + zero_mv_sse = sse; + x->best_zeromv_reference_frame = + x->e_mbd.mode_info_context->mbmi.ref_frame; + } + + // Store the best NEWMV in x for later use in the denoiser. + if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse && + !skip_old_reference) { + best_sse = sse; + x->best_sse_inter_mode = NEWMV; + x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv; + x->need_to_clamp_best_mvs = + x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs; + x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame; + } } #endif - if (dot_artifact_candidate) - { - // Bias against ZEROMV_LAST mode. - rd_adjustment = 150; - } + if (this_rd < best_rd || x->skip) { + /* Note index of best mode */ + best_mode_index = mode_index; + *returnrate = rate2; + *returndistortion = distortion2; + best_rd_sse = sse; + best_rd = this_rd; + memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, + sizeof(MB_MODE_INFO)); - /* if we encode a new mv this is important - * find the best new motion vector - */ - for (mode_index = 0; mode_index < MAX_MODES; mode_index++) - { - int frame_cost; - int this_rd = INT_MAX; - int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]]; - - if (best_rd <= x->rd_threshes[mode_index]) - continue; - - if (this_ref_frame < 0) - continue; - - x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; - - /* everything but intra */ - if (x->e_mbd.mode_info_context->mbmi.ref_frame) - { - x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; - x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; - x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; - - if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) - { - sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame]; - mode_mv = mode_mv_sb[sign_bias]; - best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; - } - -#if CONFIG_MULTI_RES_ENCODING - if (parent_ref_valid) - { - if (vp8_mode_order[mode_index] == NEARESTMV && - mode_mv[NEARESTMV].as_int ==0) - continue; - if (vp8_mode_order[mode_index] == NEARMV && - mode_mv[NEARMV].as_int ==0) - continue; - - if (vp8_mode_order[mode_index] == NEWMV && parent_mode == ZEROMV - && best_ref_mv.as_int==0) - continue; - else if(vp8_mode_order[mode_index] == NEWMV && dissim==0 - && best_ref_mv.as_int==parent_ref_mv.as_int) - continue; - } -#endif - } - - /* Check to see if the testing frequency for this mode is at its max - * If so then prevent it from being tested and increase the threshold - * for its testing */ - if (x->mode_test_hit_counts[mode_index] && - (cpi->mode_check_freq[mode_index] > 1)) - { - if (x->mbs_tested_so_far <= (cpi->mode_check_freq[mode_index] * - x->mode_test_hit_counts[mode_index])) - { - /* Increase the threshold for coding this mode to make it less - * likely to be chosen */ - x->rd_thresh_mult[mode_index] += 4; - - if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) - x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; - - x->rd_threshes[mode_index] = - (cpi->rd_baseline_thresh[mode_index] >> 7) * - x->rd_thresh_mult[mode_index]; - continue; - } - } - - /* We have now reached the point where we are going to test the current - * mode so increment the counter for the number of times it has been - * tested */ - x->mode_test_hit_counts[mode_index] ++; - - rate2 = 0; - distortion2 = 0; - - this_mode = vp8_mode_order[mode_index]; - - x->e_mbd.mode_info_context->mbmi.mode = this_mode; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - - /* Work out the cost assosciated with selecting the reference frame */ - frame_cost = - x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; - rate2 += frame_cost; - - /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame, - * unless ARNR filtering is enabled in which case we want - * an unfiltered alternative */ - if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) - { - if (this_mode != ZEROMV || - x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) - continue; - } - - switch (this_mode) - { - case B_PRED: - /* Pass best so far to pick_intra4x4mby_modes to use as breakout */ - distortion2 = best_rd_sse; - pick_intra4x4mby_modes(x, &rate, &distortion2); - - if (distortion2 == INT_MAX) - { - this_rd = INT_MAX; - } - else - { - rate2 += rate; - distortion2 = vpx_variance16x16( - *(b->base_src), b->src_stride, - x->e_mbd.predictor, 16, &sse); - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); - - if (this_rd < best_intra_rd) - { - best_intra_rd = this_rd; - *returnintra = distortion2; - } - } - - break; - - case SPLITMV: - - /* Split MV modes currently not supported when RD is not enabled. */ - break; - - case DC_PRED: - case V_PRED: - case H_PRED: - case TM_PRED: - vp8_build_intra_predictors_mby_s(xd, - xd->dst.y_buffer - xd->dst.y_stride, - xd->dst.y_buffer - 1, - xd->dst.y_stride, - xd->predictor, - 16); - distortion2 = vpx_variance16x16 - (*(b->base_src), b->src_stride, - x->e_mbd.predictor, 16, &sse); - rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode]; - this_rd = RDCOST(x->rdmult, x->rddiv, rate2, distortion2); - - if (this_rd < best_intra_rd) - { - best_intra_rd = this_rd; - *returnintra = distortion2; - } - break; - - case NEWMV: - { - int thissme; - int step_param; - int further_steps; - int n = 0; - int sadpb = x->sadperbit16; - int_mv mvp_full; - - int col_min = ((best_ref_mv.as_mv.col+7)>>3) - MAX_FULL_PEL_VAL; - int row_min = ((best_ref_mv.as_mv.row+7)>>3) - MAX_FULL_PEL_VAL; - int col_max = (best_ref_mv.as_mv.col>>3) - + MAX_FULL_PEL_VAL; - int row_max = (best_ref_mv.as_mv.row>>3) - + MAX_FULL_PEL_VAL; - - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; - - int speed_adjust = (cpi->Speed > 5) ? ((cpi->Speed >= 8)? 3 : 2) : 1; - - /* Further step/diamond searches as necessary */ - step_param = cpi->sf.first_step + speed_adjust; - -#if CONFIG_MULTI_RES_ENCODING - /* If lower-res frame is not available for mv reuse (because of - frame dropping or different temporal layer pattern), then higher - resol encoder does motion search without any previous knowledge. - Also, since last frame motion info is not stored, then we can not - use improved_mv_pred. */ - if (cpi->oxcf.mr_encoder_id) - sf_improved_mv_pred = 0; - - // Only use parent MV as predictor if this candidate reference frame - // (|this_ref_frame|) is equal to |parent_ref_frame|. - if (parent_ref_valid && (parent_ref_frame == this_ref_frame)) - { - /* Use parent MV as predictor. Adjust search range - * accordingly. - */ - mvp.as_int = parent_ref_mv.as_int; - mvp_full.as_mv.col = parent_ref_mv.as_mv.col>>3; - mvp_full.as_mv.row = parent_ref_mv.as_mv.row>>3; - - if(dissim <=32) step_param += 3; - else if(dissim <=128) step_param += 2; - else step_param += 1; - }else -#endif - { - if(sf_improved_mv_pred) - { - if(!saddone) - { - vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] ); - saddone = 1; - } - - vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, - &mvp,x->e_mbd.mode_info_context->mbmi.ref_frame, - cpi->common.ref_frame_sign_bias, &sr, - &near_sadidx[0]); - - sr += speed_adjust; - /* adjust search range according to sr from mv prediction */ - if(sr > step_param) - step_param = sr; - - mvp_full.as_mv.col = mvp.as_mv.col>>3; - mvp_full.as_mv.row = mvp.as_mv.row>>3; - }else - { - mvp.as_int = best_ref_mv.as_int; - mvp_full.as_mv.col = best_ref_mv.as_mv.col>>3; - mvp_full.as_mv.row = best_ref_mv.as_mv.row>>3; - } - } - -#if CONFIG_MULTI_RES_ENCODING - if (parent_ref_valid && (parent_ref_frame == this_ref_frame) && - dissim <= 2 && - VPXMAX(abs(best_ref_mv.as_mv.row - parent_ref_mv.as_mv.row), - abs(best_ref_mv.as_mv.col - parent_ref_mv.as_mv.col)) <= - 4) - { - d->bmi.mv.as_int = mvp_full.as_int; - mode_mv[NEWMV].as_int = mvp_full.as_int; - - cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv, - x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - cpi->mb.mvcost, - &distortion2,&sse); - }else -#endif - { - /* Get intersection of UMV window and valid MV window to - * reduce # of checks in diamond search. */ - if (x->mv_col_min < col_min ) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max ) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min ) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max ) - x->mv_row_max = row_max; - - further_steps = (cpi->Speed >= 8)? - 0: (cpi->sf.max_step_search_steps - 1 - step_param); - - if (cpi->sf.search_method == HEX) - { -#if CONFIG_MULTI_RES_ENCODING - /* TODO: In higher-res pick_inter_mode, step_param is used to - * modify hex search range. Here, set step_param to 0 not to - * change the behavior in lowest-resolution encoder. - * Will improve it later. - */ - /* Set step_param to 0 to ensure large-range motion search - * when mv reuse if not valid (i.e. |parent_ref_valid| = 0), - * or if this candidate reference frame (|this_ref_frame|) is - * not equal to |parent_ref_frame|. - */ - if (!parent_ref_valid || (parent_ref_frame != this_ref_frame)) - step_param = 0; -#endif - bestsme = vp8_hex_search(x, b, d, &mvp_full, &d->bmi.mv, - step_param, sadpb, - &cpi->fn_ptr[BLOCK_16X16], - x->mvsadcost, x->mvcost, &best_ref_mv); - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - } - else - { - bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, - &d->bmi.mv, step_param, sadpb, &num00, - &cpi->fn_ptr[BLOCK_16X16], - x->mvcost, &best_ref_mv); - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - - /* Further step/diamond searches as necessary */ - n = num00; - num00 = 0; - - while (n < further_steps) - { - n++; - - if (num00) - num00--; - else - { - thissme = - cpi->diamond_search_sad(x, b, d, &mvp_full, - &d->bmi.mv, - step_param + n, - sadpb, &num00, - &cpi->fn_ptr[BLOCK_16X16], - x->mvcost, &best_ref_mv); - if (thissme < bestsme) - { - bestsme = thissme; - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - } - else - { - d->bmi.mv.as_int = mode_mv[NEWMV].as_int; - } - } - } - } - - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - - if (bestsme < INT_MAX) - cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, - &best_ref_mv, x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - cpi->mb.mvcost, - &distortion2,&sse); - } - - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - // The clamp below is not necessary from the perspective - // of VP8 bitstream, but is added to improve ChromeCast - // mirroring's robustness. Please do not remove. - vp8_clamp_mv2(&mode_mv[this_mode], xd); - /* mv cost; */ - rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, - cpi->mb.mvcost, 128); - } - - case NEARESTMV: - case NEARMV: - if (mode_mv[this_mode].as_int == 0) - continue; - - case ZEROMV: - - /* Trap vectors that reach beyond the UMV borders - * Note that ALL New MV, Nearest MV Near MV and Zero MV code drops - * through to this point because of the lack of break statements - * in the previous two cases. - */ - if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || - ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || - ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || - ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) - continue; - - rate2 += vp8_cost_mv_ref(this_mode, mdcounts); - x->e_mbd.mode_info_context->mbmi.mv.as_int = - mode_mv[this_mode].as_int; - this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, - rd_adjustment); - - break; - default: - break; - } - -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) - { - /* Store for later use by denoiser. */ - // Dont' denoise with GOLDEN OR ALTREF is they are old reference - // frames (greater than MAX_GF_ARF_DENOISE_RANGE frames in past). - int skip_old_reference = ((this_ref_frame != LAST_FRAME) && - (cpi->common.current_video_frame - - cpi->current_ref_frames[this_ref_frame] > - MAX_GF_ARF_DENOISE_RANGE)) ? 1 : 0; - if (this_mode == ZEROMV && sse < zero_mv_sse && - !skip_old_reference) - { - zero_mv_sse = sse; - x->best_zeromv_reference_frame = - x->e_mbd.mode_info_context->mbmi.ref_frame; - } - - // Store the best NEWMV in x for later use in the denoiser. - if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && - sse < best_sse && !skip_old_reference) - { - best_sse = sse; - x->best_sse_inter_mode = NEWMV; - x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv; - x->need_to_clamp_best_mvs = - x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs; - x->best_reference_frame = - x->e_mbd.mode_info_context->mbmi.ref_frame; - } - } -#endif - - if (this_rd < best_rd || x->skip) - { - /* Note index of best mode */ - best_mode_index = mode_index; - - *returnrate = rate2; - *returndistortion = distortion2; - best_rd_sse = sse; - best_rd = this_rd; - memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, - sizeof(MB_MODE_INFO)); - - /* Testing this mode gave rise to an improvement in best error - * score. Lower threshold a bit for next time - */ - x->rd_thresh_mult[mode_index] = - (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? - x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT; - x->rd_threshes[mode_index] = - (cpi->rd_baseline_thresh[mode_index] >> 7) * + /* Testing this mode gave rise to an improvement in best error + * score. Lower threshold a bit for next time + */ + x->rd_thresh_mult[mode_index] = + (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) + ? x->rd_thresh_mult[mode_index] - 2 + : MIN_THRESHMULT; + x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * x->rd_thresh_mult[mode_index]; - } - - /* If the mode did not help improve the best error case then raise the - * threshold for testing that mode next time around. - */ - else - { - x->rd_thresh_mult[mode_index] += 4; - - if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) - x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; - - x->rd_threshes[mode_index] = - (cpi->rd_baseline_thresh[mode_index] >> 7) * - x->rd_thresh_mult[mode_index]; - } - - if (x->skip) - break; } - /* Reduce the activation RD thresholds for the best choice mode */ - if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) - { - int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3); + /* If the mode did not help improve the best error case then raise the + * threshold for testing that mode next time around. + */ + else { + x->rd_thresh_mult[mode_index] += 4; - x->rd_thresh_mult[best_mode_index] = - (x->rd_thresh_mult[best_mode_index] - >= (MIN_THRESHMULT + best_adjustment)) ? - x->rd_thresh_mult[best_mode_index] - best_adjustment : - MIN_THRESHMULT; - x->rd_threshes[best_mode_index] = - (cpi->rd_baseline_thresh[best_mode_index] >> 7) * - x->rd_thresh_mult[best_mode_index]; + if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) { + x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; + } + + x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * + x->rd_thresh_mult[mode_index]; } + if (x->skip) break; + } - { - int this_rdbin = (*returndistortion >> 7); + /* Reduce the activation RD thresholds for the best choice mode */ + if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && + (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) { + int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 3); - if (this_rdbin >= 1024) - { - this_rdbin = 1023; - } + x->rd_thresh_mult[best_mode_index] = + (x->rd_thresh_mult[best_mode_index] >= + (MIN_THRESHMULT + best_adjustment)) + ? x->rd_thresh_mult[best_mode_index] - best_adjustment + : MIN_THRESHMULT; + x->rd_threshes[best_mode_index] = + (cpi->rd_baseline_thresh[best_mode_index] >> 7) * + x->rd_thresh_mult[best_mode_index]; + } - x->error_bins[this_rdbin] ++; + { + int this_rdbin = (*returndistortion >> 7); + + if (this_rdbin >= 1024) { + this_rdbin = 1023; } + x->error_bins[this_rdbin]++; + } + #if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) - { - int block_index = mb_row * cpi->common.mb_cols + mb_col; - int reevaluate = 0; - int is_noisy = 0; - if (x->best_sse_inter_mode == DC_PRED) - { - /* No best MV found. */ - x->best_sse_inter_mode = best_mbmode.mode; - x->best_sse_mv = best_mbmode.mv; - x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs; - x->best_reference_frame = best_mbmode.ref_frame; - best_sse = best_rd_sse; - } - // For non-skin blocks that have selected ZEROMV for this current frame, - // and have been selecting ZEROMV_LAST (on the base layer frame) at - // least |x~20| consecutive past frames in a row, label the block for - // possible increase in denoising strength. We also condition this - // labeling on there being significant denoising in the scene - if (cpi->oxcf.noise_sensitivity == 4) { - if (cpi->denoiser.nmse_source_diff > - 70 * cpi->denoiser.threshold_aggressive_mode / 100) - is_noisy = 1; - } else { - if (cpi->mse_source_denoised > 1000) - is_noisy = 1; - } - x->increase_denoising = 0; - if (!x->is_skin && - x->best_sse_inter_mode == ZEROMV && - (x->best_reference_frame == LAST_FRAME || - x->best_reference_frame == cpi->closest_reference_frame) && - cpi->consec_zero_last[block_index] >= 20 && - is_noisy) { - x->increase_denoising = 1; - } - x->denoise_zeromv = 0; - vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse, - recon_yoffset, recon_uvoffset, - &cpi->common.lf_info, mb_row, mb_col, - block_index); - - // Reevaluate ZEROMV after denoising: for large noise content - // (i.e., cpi->mse_source_denoised is above threshold), do this for all - // blocks that did not pick ZEROMV as best mode but are using ZEROMV - // for denoising. Otherwise, always re-evaluate for blocks that picked - // INTRA mode as best mode. - // Avoid blocks that have been biased against ZERO_LAST - // (i.e., dot artifact candidate blocks). - reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) || - (best_mbmode.mode != ZEROMV && - x->denoise_zeromv && - cpi->mse_source_denoised > 2000); - if (!dot_artifact_candidate && - reevaluate && - x->best_zeromv_reference_frame != INTRA_FRAME) - { - int this_rd = 0; - int this_ref_frame = x->best_zeromv_reference_frame; - rd_adjustment = 100; - rate2 = x->ref_frame_cost[this_ref_frame] + - vp8_cost_mv_ref(ZEROMV, mdcounts); - distortion2 = 0; - - /* set up the proper prediction buffers for the frame */ - x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; - x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; - x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; - x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; - - x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; - this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, - rd_adjustment); - - if (this_rd < best_rd) - { - memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, - sizeof(MB_MODE_INFO)); - } - } - + if (cpi->oxcf.noise_sensitivity) { + int block_index = mb_row * cpi->common.mb_cols + mb_col; + int reevaluate = 0; + int is_noisy = 0; + if (x->best_sse_inter_mode == DC_PRED) { + /* No best MV found. */ + x->best_sse_inter_mode = best_mbmode.mode; + x->best_sse_mv = best_mbmode.mv; + x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs; + x->best_reference_frame = best_mbmode.ref_frame; + best_sse = best_rd_sse; } + // For non-skin blocks that have selected ZEROMV for this current frame, + // and have been selecting ZEROMV_LAST (on the base layer frame) at + // least |x~20| consecutive past frames in a row, label the block for + // possible increase in denoising strength. We also condition this + // labeling on there being significant denoising in the scene + if (cpi->oxcf.noise_sensitivity == 4) { + if (cpi->denoiser.nmse_source_diff > + 70 * cpi->denoiser.threshold_aggressive_mode / 100) { + is_noisy = 1; + } + } else { + if (cpi->mse_source_denoised > 1000) is_noisy = 1; + } + x->increase_denoising = 0; + if (!x->is_skin && x->best_sse_inter_mode == ZEROMV && + (x->best_reference_frame == LAST_FRAME || + x->best_reference_frame == cpi->closest_reference_frame) && + cpi->consec_zero_last[block_index] >= 20 && is_noisy) { + x->increase_denoising = 1; + } + x->denoise_zeromv = 0; + vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse, + recon_yoffset, recon_uvoffset, &cpi->common.lf_info, + mb_row, mb_col, block_index, + cpi->consec_zero_last_mvbias[block_index]); + + // Reevaluate ZEROMV after denoising: for large noise content + // (i.e., cpi->mse_source_denoised is above threshold), do this for all + // blocks that did not pick ZEROMV as best mode but are using ZEROMV + // for denoising. Otherwise, always re-evaluate for blocks that picked + // INTRA mode as best mode. + // Avoid blocks that have been biased against ZERO_LAST + // (i.e., dot artifact candidate blocks). + reevaluate = (best_mbmode.ref_frame == INTRA_FRAME) || + (best_mbmode.mode != ZEROMV && x->denoise_zeromv && + cpi->mse_source_denoised > 2000); + if (!dot_artifact_candidate && reevaluate && + x->best_zeromv_reference_frame != INTRA_FRAME) { + int this_rd = 0; + int this_ref_frame = x->best_zeromv_reference_frame; + rd_adjustment = 100; + rate2 = + x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts); + distortion2 = 0; + + /* set up the proper prediction buffers for the frame */ + x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; + x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; + x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; + x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; + + x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; + this_rd = + evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x, rd_adjustment); + + if (this_rd < best_rd) { + memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, + sizeof(MB_MODE_INFO)); + } + } + } #endif - if (cpi->is_src_frame_alt_ref && - (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) - { - x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; - x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME; - x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = - (cpi->common.mb_no_coeff_skip); - x->e_mbd.mode_info_context->mbmi.partitioning = 0; + if (cpi->is_src_frame_alt_ref && + (best_mbmode.mode != ZEROMV || best_mbmode.ref_frame != ALTREF_FRAME)) { + x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; + x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME; + x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = + (cpi->common.mb_no_coeff_skip); + x->e_mbd.mode_info_context->mbmi.partitioning = 0; - return; - } + return; + } - /* set to the best mb mode, this copy can be skip if x->skip since it - * already has the right content */ - if (!x->skip) - memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, - sizeof(MB_MODE_INFO)); - - if (best_mbmode.mode <= B_PRED) - { - /* set mode_info_context->mbmi.uv_mode */ - pick_intra_mbuv_mode(x); - } - - if (sign_bias - != cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) - best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int; - - update_mvcount(x, &best_ref_mv); -} - -void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) -{ - int error4x4, error16x16 = INT_MAX; - int rate, best_rate = 0, distortion, best_sse; - MB_PREDICTION_MODE mode, best_mode = DC_PRED; - int this_rd; - unsigned int sse; - BLOCK *b = &x->block[0]; - MACROBLOCKD *xd = &x->e_mbd; - - xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME; + /* set to the best mb mode, this copy can be skip if x->skip since it + * already has the right content */ + if (!x->skip) { + memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mbmode, + sizeof(MB_MODE_INFO)); + } + if (best_mbmode.mode <= B_PRED) { + /* set mode_info_context->mbmi.uv_mode */ pick_intra_mbuv_mode(x); + } - for (mode = DC_PRED; mode <= TM_PRED; mode ++) - { - xd->mode_info_context->mbmi.mode = mode; - vp8_build_intra_predictors_mby_s(xd, - xd->dst.y_buffer - xd->dst.y_stride, - xd->dst.y_buffer - 1, - xd->dst.y_stride, - xd->predictor, - 16); - distortion = vpx_variance16x16 - (*(b->base_src), b->src_stride, xd->predictor, 16, &sse); - rate = x->mbmode_cost[xd->frame_type][mode]; - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + if (sign_bias != + cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) { + best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int; + } - if (error16x16 > this_rd) - { - error16x16 = this_rd; - best_mode = mode; - best_sse = sse; - best_rate = rate; - } - } - xd->mode_info_context->mbmi.mode = best_mode; - - error4x4 = pick_intra4x4mby_modes(x, &rate, - &best_sse); - if (error4x4 < error16x16) - { - xd->mode_info_context->mbmi.mode = B_PRED; - best_rate = rate; - } - - *rate_ = best_rate; + update_mvcount(x, &best_ref_mv); +} + +void vp8_pick_intra_mode(MACROBLOCK *x, int *rate_) { + int error4x4, error16x16 = INT_MAX; + int rate, best_rate = 0, distortion, best_sse; + MB_PREDICTION_MODE mode, best_mode = DC_PRED; + int this_rd; + unsigned int sse; + BLOCK *b = &x->block[0]; + MACROBLOCKD *xd = &x->e_mbd; + + xd->mode_info_context->mbmi.ref_frame = INTRA_FRAME; + + pick_intra_mbuv_mode(x); + + for (mode = DC_PRED; mode <= TM_PRED; ++mode) { + xd->mode_info_context->mbmi.mode = mode; + vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride, + xd->dst.y_buffer - 1, xd->dst.y_stride, + xd->predictor, 16); + distortion = vpx_variance16x16(*(b->base_src), b->src_stride, xd->predictor, + 16, &sse); + rate = x->mbmode_cost[xd->frame_type][mode]; + this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + + if (error16x16 > this_rd) { + error16x16 = this_rd; + best_mode = mode; + best_sse = sse; + best_rate = rate; + } + } + xd->mode_info_context->mbmi.mode = best_mode; + + error4x4 = pick_intra4x4mby_modes(x, &rate, &best_sse); + if (error4x4 < error16x16) { + xd->mode_info_context->mbmi.mode = B_PRED; + best_rate = rate; + } + + *rate_ = best_rate; } diff --git a/libs/libvpx/vp8/encoder/pickinter.h b/libs/libvpx/vp8/encoder/pickinter.h index cf3b1f8d49..bf1d0c9749 100644 --- a/libs/libvpx/vp8/encoder/pickinter.h +++ b/libs/libvpx/vp8/encoder/pickinter.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_PICKINTER_H_ #define VP8_ENCODER_PICKINTER_H_ #include "vpx_config.h" @@ -26,8 +25,7 @@ extern void vp8_pick_intra_mode(MACROBLOCK *x, int *rate); extern int vp8_get_inter_mbpred_error(MACROBLOCK *mb, const vp8_variance_fn_ptr_t *vfp, - unsigned int *sse, - int_mv this_mv); + unsigned int *sse, int_mv this_mv); #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp8/encoder/picklpf.c b/libs/libvpx/vp8/encoder/picklpf.c index debd304130..6f287322ec 100644 --- a/libs/libvpx/vp8/encoder/picklpf.c +++ b/libs/libvpx/vp8/encoder/picklpf.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "./vpx_dsp_rtcd.h" #include "./vpx_scale_rtcd.h" #include "vp8/common/onyxc_int.h" @@ -22,386 +21,371 @@ #include "vpx_ports/arm.h" #endif -extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, YV12_BUFFER_CONFIG *dest); +extern int vp8_calc_ss_err(YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *dest); static void yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, - YV12_BUFFER_CONFIG *dst_ybc) -{ - unsigned char *src_y, *dst_y; - int yheight; - int ystride; - int yoffset; - int linestocopy; + YV12_BUFFER_CONFIG *dst_ybc) { + unsigned char *src_y, *dst_y; + int yheight; + int ystride; + int yoffset; + int linestocopy; - yheight = src_ybc->y_height; - ystride = src_ybc->y_stride; + yheight = src_ybc->y_height; + ystride = src_ybc->y_stride; - /* number of MB rows to use in partial filtering */ - linestocopy = (yheight >> 4) / PARTIAL_FRAME_FRACTION; - linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ + /* number of MB rows to use in partial filtering */ + linestocopy = (yheight >> 4) / PARTIAL_FRAME_FRACTION; + linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ - /* Copy extra 4 so that full filter context is available if filtering done - * on the copied partial frame and not original. Partial filter does mb - * filtering for top row also, which can modify3 pixels above. - */ - linestocopy += 4; - /* partial image starts at ~middle of frame (macroblock border)*/ - yoffset = ystride * (((yheight >> 5) * 16) - 4); - src_y = src_ybc->y_buffer + yoffset; - dst_y = dst_ybc->y_buffer + yoffset; + /* Copy extra 4 so that full filter context is available if filtering done + * on the copied partial frame and not original. Partial filter does mb + * filtering for top row also, which can modify3 pixels above. + */ + linestocopy += 4; + /* partial image starts at ~middle of frame (macroblock border)*/ + yoffset = ystride * (((yheight >> 5) * 16) - 4); + src_y = src_ybc->y_buffer + yoffset; + dst_y = dst_ybc->y_buffer + yoffset; - memcpy(dst_y, src_y, ystride * linestocopy); + memcpy(dst_y, src_y, ystride * linestocopy); } static int calc_partial_ssl_err(YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *dest) -{ - int i, j; - int Total = 0; - int srcoffset, dstoffset; - unsigned char *src = source->y_buffer; - unsigned char *dst = dest->y_buffer; + YV12_BUFFER_CONFIG *dest) { + int i, j; + int Total = 0; + int srcoffset, dstoffset; + unsigned char *src = source->y_buffer; + unsigned char *dst = dest->y_buffer; - int linestocopy; + int linestocopy; - /* number of MB rows to use in partial filtering */ - linestocopy = (source->y_height >> 4) / PARTIAL_FRAME_FRACTION; - linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ + /* number of MB rows to use in partial filtering */ + linestocopy = (source->y_height >> 4) / PARTIAL_FRAME_FRACTION; + linestocopy = linestocopy ? linestocopy << 4 : 16; /* 16 lines per MB */ + /* partial image starts at ~middle of frame (macroblock border)*/ + srcoffset = source->y_stride * ((dest->y_height >> 5) * 16); + dstoffset = dest->y_stride * ((dest->y_height >> 5) * 16); - /* partial image starts at ~middle of frame (macroblock border)*/ - srcoffset = source->y_stride * ((dest->y_height >> 5) * 16); - dstoffset = dest->y_stride * ((dest->y_height >> 5) * 16); + src += srcoffset; + dst += dstoffset; - src += srcoffset; - dst += dstoffset; - - /* Loop through the Y plane raw and reconstruction data summing - * (square differences) - */ - for (i = 0; i < linestocopy; i += 16) - { - for (j = 0; j < source->y_width; j += 16) - { - unsigned int sse; - Total += vpx_mse16x16(src + j, source->y_stride, - dst + j, dest->y_stride, - &sse); - } - - src += 16 * source->y_stride; - dst += 16 * dest->y_stride; + /* Loop through the Y plane raw and reconstruction data summing + * (square differences) + */ + for (i = 0; i < linestocopy; i += 16) { + for (j = 0; j < source->y_width; j += 16) { + unsigned int sse; + Total += vpx_mse16x16(src + j, source->y_stride, dst + j, dest->y_stride, + &sse); } - return Total; + src += 16 * source->y_stride; + dst += 16 * dest->y_stride; + } + + return Total; } /* Enforce a minimum filter level based upon baseline Q */ -static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) -{ - int min_filter_level; +static int get_min_filter_level(VP8_COMP *cpi, int base_qindex) { + int min_filter_level; - if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && - !cpi->common.refresh_alt_ref_frame) - min_filter_level = 0; - else - { - if (base_qindex <= 6) - min_filter_level = 0; - else if (base_qindex <= 16) - min_filter_level = 1; - else - min_filter_level = (base_qindex / 8); + if (cpi->source_alt_ref_active && cpi->common.refresh_golden_frame && + !cpi->common.refresh_alt_ref_frame) { + min_filter_level = 0; + } else { + if (base_qindex <= 6) { + min_filter_level = 0; + } else if (base_qindex <= 16) { + min_filter_level = 1; + } else { + min_filter_level = (base_qindex / 8); } + } - return min_filter_level; + return min_filter_level; } /* Enforce a maximum filter level based upon baseline Q */ -static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) -{ - /* PGW August 2006: Highest filter values almost always a bad idea */ +static int get_max_filter_level(VP8_COMP *cpi, int base_qindex) { + /* PGW August 2006: Highest filter values almost always a bad idea */ - /* jbb chg: 20100118 - not so any more with this overquant stuff allow - * high values with lots of intra coming in. - */ - int max_filter_level = MAX_LOOP_FILTER; - (void)base_qindex; + /* jbb chg: 20100118 - not so any more with this overquant stuff allow + * high values with lots of intra coming in. + */ + int max_filter_level = MAX_LOOP_FILTER; + (void)base_qindex; - if (cpi->twopass.section_intra_rating > 8) - max_filter_level = MAX_LOOP_FILTER * 3 / 4; + if (cpi->twopass.section_intra_rating > 8) { + max_filter_level = MAX_LOOP_FILTER * 3 / 4; + } - return max_filter_level; + return max_filter_level; } -void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +void vp8cx_pick_filter_level_fast(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - int best_err = 0; - int filt_err = 0; - int min_filter_level = get_min_filter_level(cpi, cm->base_qindex); - int max_filter_level = get_max_filter_level(cpi, cm->base_qindex); - int filt_val; - int best_filt_val; - YV12_BUFFER_CONFIG * saved_frame = cm->frame_to_show; + int best_err = 0; + int filt_err = 0; + int min_filter_level = get_min_filter_level(cpi, cm->base_qindex); + int max_filter_level = get_max_filter_level(cpi, cm->base_qindex); + int filt_val; + int best_filt_val; + YV12_BUFFER_CONFIG *saved_frame = cm->frame_to_show; - /* Replace unfiltered frame buffer with a new one */ - cm->frame_to_show = &cpi->pick_lf_lvl_frame; + /* Replace unfiltered frame buffer with a new one */ + cm->frame_to_show = &cpi->pick_lf_lvl_frame; - if (cm->frame_type == KEY_FRAME) - cm->sharpness_level = 0; - else - cm->sharpness_level = cpi->oxcf.Sharpness; + if (cm->frame_type == KEY_FRAME) { + cm->sharpness_level = 0; + } else { + cm->sharpness_level = cpi->oxcf.Sharpness; + } - if (cm->sharpness_level != cm->last_sharpness_level) - { - vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level); - cm->last_sharpness_level = cm->sharpness_level; - } + if (cm->sharpness_level != cm->last_sharpness_level) { + vp8_loop_filter_update_sharpness(&cm->lf_info, cm->sharpness_level); + cm->last_sharpness_level = cm->sharpness_level; + } - /* Start the search at the previous frame filter level unless it is - * now out of range. - */ - if (cm->filter_level < min_filter_level) - cm->filter_level = min_filter_level; - else if (cm->filter_level > max_filter_level) - cm->filter_level = max_filter_level; + /* Start the search at the previous frame filter level unless it is + * now out of range. + */ + if (cm->filter_level < min_filter_level) { + cm->filter_level = min_filter_level; + } else if (cm->filter_level > max_filter_level) { + cm->filter_level = max_filter_level; + } - filt_val = cm->filter_level; - best_filt_val = filt_val; + filt_val = cm->filter_level; + best_filt_val = filt_val; - /* Get the err using the previous frame's filter value. */ + /* Get the err using the previous frame's filter value. */ - /* Copy the unfiltered / processed recon buffer to the new buffer */ + /* Copy the unfiltered / processed recon buffer to the new buffer */ + yv12_copy_partial_frame(saved_frame, cm->frame_to_show); + vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); + + best_err = calc_partial_ssl_err(sd, cm->frame_to_show); + + filt_val -= 1 + (filt_val > 10); + + /* Search lower filter levels */ + while (filt_val >= min_filter_level) { + /* Apply the loop filter */ yv12_copy_partial_frame(saved_frame, cm->frame_to_show); vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); - best_err = calc_partial_ssl_err(sd, cm->frame_to_show); + /* Get the err for filtered frame */ + filt_err = calc_partial_ssl_err(sd, cm->frame_to_show); + /* Update the best case record or exit loop. */ + if (filt_err < best_err) { + best_err = filt_err; + best_filt_val = filt_val; + } else { + break; + } + + /* Adjust filter level */ filt_val -= 1 + (filt_val > 10); + } - /* Search lower filter levels */ - while (filt_val >= min_filter_level) - { - /* Apply the loop filter */ - yv12_copy_partial_frame(saved_frame, cm->frame_to_show); - vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); + /* Search up (note that we have already done filt_val = cm->filter_level) */ + filt_val = cm->filter_level + 1 + (filt_val > 10); - /* Get the err for filtered frame */ - filt_err = calc_partial_ssl_err(sd, cm->frame_to_show); + if (best_filt_val == cm->filter_level) { + /* Resist raising filter level for very small gains */ + best_err -= (best_err >> 10); - /* Update the best case record or exit loop. */ - if (filt_err < best_err) - { - best_err = filt_err; - best_filt_val = filt_val; - } - else - break; + while (filt_val < max_filter_level) { + /* Apply the loop filter */ + yv12_copy_partial_frame(saved_frame, cm->frame_to_show); - /* Adjust filter level */ - filt_val -= 1 + (filt_val > 10); + vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); + + /* Get the err for filtered frame */ + filt_err = calc_partial_ssl_err(sd, cm->frame_to_show); + + /* Update the best case record or exit loop. */ + if (filt_err < best_err) { + /* Do not raise filter level if improvement is < 1 part + * in 4096 + */ + best_err = filt_err - (filt_err >> 10); + + best_filt_val = filt_val; + } else { + break; + } + + /* Adjust filter level */ + filt_val += 1 + (filt_val > 10); } + } - /* Search up (note that we have already done filt_val = cm->filter_level) */ - filt_val = cm->filter_level + 1 + (filt_val > 10); + cm->filter_level = best_filt_val; - if (best_filt_val == cm->filter_level) - { - /* Resist raising filter level for very small gains */ - best_err -= (best_err >> 10); + if (cm->filter_level < min_filter_level) cm->filter_level = min_filter_level; - while (filt_val < max_filter_level) - { - /* Apply the loop filter */ - yv12_copy_partial_frame(saved_frame, cm->frame_to_show); + if (cm->filter_level > max_filter_level) cm->filter_level = max_filter_level; - vp8_loop_filter_partial_frame(cm, &cpi->mb.e_mbd, filt_val); - - /* Get the err for filtered frame */ - filt_err = calc_partial_ssl_err(sd, cm->frame_to_show); - - /* Update the best case record or exit loop. */ - if (filt_err < best_err) - { - /* Do not raise filter level if improvement is < 1 part - * in 4096 - */ - best_err = filt_err - (filt_err >> 10); - - best_filt_val = filt_val; - } - else - break; - - /* Adjust filter level */ - filt_val += 1 + (filt_val > 10); - } - } - - cm->filter_level = best_filt_val; - - if (cm->filter_level < min_filter_level) - cm->filter_level = min_filter_level; - - if (cm->filter_level > max_filter_level) - cm->filter_level = max_filter_level; - - /* restore unfiltered frame pointer */ - cm->frame_to_show = saved_frame; + /* restore unfiltered frame pointer */ + cm->frame_to_show = saved_frame; } /* Stub function for now Alt LF not used */ -void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) -{ - MACROBLOCKD *mbd = &cpi->mb.e_mbd; - (void) filt_val; +void vp8cx_set_alt_lf_level(VP8_COMP *cpi, int filt_val) { + MACROBLOCKD *mbd = &cpi->mb.e_mbd; + (void)filt_val; - mbd->segment_feature_data[MB_LVL_ALT_LF][0] = cpi->segment_feature_data[MB_LVL_ALT_LF][0]; - mbd->segment_feature_data[MB_LVL_ALT_LF][1] = cpi->segment_feature_data[MB_LVL_ALT_LF][1]; - mbd->segment_feature_data[MB_LVL_ALT_LF][2] = cpi->segment_feature_data[MB_LVL_ALT_LF][2]; - mbd->segment_feature_data[MB_LVL_ALT_LF][3] = cpi->segment_feature_data[MB_LVL_ALT_LF][3]; + mbd->segment_feature_data[MB_LVL_ALT_LF][0] = + cpi->segment_feature_data[MB_LVL_ALT_LF][0]; + mbd->segment_feature_data[MB_LVL_ALT_LF][1] = + cpi->segment_feature_data[MB_LVL_ALT_LF][1]; + mbd->segment_feature_data[MB_LVL_ALT_LF][2] = + cpi->segment_feature_data[MB_LVL_ALT_LF][2]; + mbd->segment_feature_data[MB_LVL_ALT_LF][3] = + cpi->segment_feature_data[MB_LVL_ALT_LF][3]; } -void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +void vp8cx_pick_filter_level(YV12_BUFFER_CONFIG *sd, VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - int best_err = 0; - int filt_err = 0; - int min_filter_level = get_min_filter_level(cpi, cm->base_qindex); - int max_filter_level = get_max_filter_level(cpi, cm->base_qindex); + int best_err = 0; + int filt_err = 0; + int min_filter_level = get_min_filter_level(cpi, cm->base_qindex); + int max_filter_level = get_max_filter_level(cpi, cm->base_qindex); - int filter_step; - int filt_high = 0; - int filt_mid; - int filt_low = 0; - int filt_best; - int filt_direction = 0; + int filter_step; + int filt_high = 0; + int filt_mid; + int filt_low = 0; + int filt_best; + int filt_direction = 0; - /* Bias against raising loop filter and in favor of lowering it */ - int Bias = 0; + /* Bias against raising loop filter and in favor of lowering it */ + int Bias = 0; - int ss_err[MAX_LOOP_FILTER + 1]; + int ss_err[MAX_LOOP_FILTER + 1]; - YV12_BUFFER_CONFIG * saved_frame = cm->frame_to_show; + YV12_BUFFER_CONFIG *saved_frame = cm->frame_to_show; - memset(ss_err, 0, sizeof(ss_err)); + memset(ss_err, 0, sizeof(ss_err)); - /* Replace unfiltered frame buffer with a new one */ - cm->frame_to_show = &cpi->pick_lf_lvl_frame; + /* Replace unfiltered frame buffer with a new one */ + cm->frame_to_show = &cpi->pick_lf_lvl_frame; - if (cm->frame_type == KEY_FRAME) - cm->sharpness_level = 0; - else - cm->sharpness_level = cpi->oxcf.Sharpness; + if (cm->frame_type == KEY_FRAME) { + cm->sharpness_level = 0; + } else { + cm->sharpness_level = cpi->oxcf.Sharpness; + } - /* Start the search at the previous frame filter level unless it is - * now out of range. - */ - filt_mid = cm->filter_level; + /* Start the search at the previous frame filter level unless it is + * now out of range. + */ + filt_mid = cm->filter_level; - if (filt_mid < min_filter_level) - filt_mid = min_filter_level; - else if (filt_mid > max_filter_level) - filt_mid = max_filter_level; + if (filt_mid < min_filter_level) { + filt_mid = min_filter_level; + } else if (filt_mid > max_filter_level) { + filt_mid = max_filter_level; + } - /* Define the initial step size */ - filter_step = (filt_mid < 16) ? 4 : filt_mid / 4; + /* Define the initial step size */ + filter_step = (filt_mid < 16) ? 4 : filt_mid / 4; - /* Get baseline error score */ + /* Get baseline error score */ - /* Copy the unfiltered / processed recon buffer to the new buffer */ - vpx_yv12_copy_y(saved_frame, cm->frame_to_show); + /* Copy the unfiltered / processed recon buffer to the new buffer */ + vpx_yv12_copy_y(saved_frame, cm->frame_to_show); - vp8cx_set_alt_lf_level(cpi, filt_mid); - vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid); + vp8cx_set_alt_lf_level(cpi, filt_mid); + vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_mid); - best_err = vp8_calc_ss_err(sd, cm->frame_to_show); + best_err = vp8_calc_ss_err(sd, cm->frame_to_show); - ss_err[filt_mid] = best_err; + ss_err[filt_mid] = best_err; - filt_best = filt_mid; + filt_best = filt_mid; - while (filter_step > 0) - { - Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; + while (filter_step > 0) { + Bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; - if (cpi->twopass.section_intra_rating < 20) - Bias = Bias * cpi->twopass.section_intra_rating / 20; - - filt_high = ((filt_mid + filter_step) > max_filter_level) ? max_filter_level : (filt_mid + filter_step); - filt_low = ((filt_mid - filter_step) < min_filter_level) ? min_filter_level : (filt_mid - filter_step); - - if ((filt_direction <= 0) && (filt_low != filt_mid)) - { - if(ss_err[filt_low] == 0) - { - /* Get Low filter error score */ - vpx_yv12_copy_y(saved_frame, cm->frame_to_show); - vp8cx_set_alt_lf_level(cpi, filt_low); - vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low); - - filt_err = vp8_calc_ss_err(sd, cm->frame_to_show); - ss_err[filt_low] = filt_err; - } - else - filt_err = ss_err[filt_low]; - - /* If value is close to the best so far then bias towards a - * lower loop filter value. - */ - if ((filt_err - Bias) < best_err) - { - /* Was it actually better than the previous best? */ - if (filt_err < best_err) - best_err = filt_err; - - filt_best = filt_low; - } - } - - /* Now look at filt_high */ - if ((filt_direction >= 0) && (filt_high != filt_mid)) - { - if(ss_err[filt_high] == 0) - { - vpx_yv12_copy_y(saved_frame, cm->frame_to_show); - vp8cx_set_alt_lf_level(cpi, filt_high); - vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high); - - filt_err = vp8_calc_ss_err(sd, cm->frame_to_show); - ss_err[filt_high] = filt_err; - } - else - filt_err = ss_err[filt_high]; - - /* Was it better than the previous best? */ - if (filt_err < (best_err - Bias)) - { - best_err = filt_err; - filt_best = filt_high; - } - } - - /* Half the step distance if the best filter value was the same - * as last time - */ - if (filt_best == filt_mid) - { - filter_step = filter_step / 2; - filt_direction = 0; - } - else - { - filt_direction = (filt_best < filt_mid) ? -1 : 1; - filt_mid = filt_best; - } + if (cpi->twopass.section_intra_rating < 20) { + Bias = Bias * cpi->twopass.section_intra_rating / 20; } - cm->filter_level = filt_best; + filt_high = ((filt_mid + filter_step) > max_filter_level) + ? max_filter_level + : (filt_mid + filter_step); + filt_low = ((filt_mid - filter_step) < min_filter_level) + ? min_filter_level + : (filt_mid - filter_step); - /* restore unfiltered frame pointer */ - cm->frame_to_show = saved_frame; + if ((filt_direction <= 0) && (filt_low != filt_mid)) { + if (ss_err[filt_low] == 0) { + /* Get Low filter error score */ + vpx_yv12_copy_y(saved_frame, cm->frame_to_show); + vp8cx_set_alt_lf_level(cpi, filt_low); + vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_low); + + filt_err = vp8_calc_ss_err(sd, cm->frame_to_show); + ss_err[filt_low] = filt_err; + } else { + filt_err = ss_err[filt_low]; + } + + /* If value is close to the best so far then bias towards a + * lower loop filter value. + */ + if ((filt_err - Bias) < best_err) { + /* Was it actually better than the previous best? */ + if (filt_err < best_err) best_err = filt_err; + + filt_best = filt_low; + } + } + + /* Now look at filt_high */ + if ((filt_direction >= 0) && (filt_high != filt_mid)) { + if (ss_err[filt_high] == 0) { + vpx_yv12_copy_y(saved_frame, cm->frame_to_show); + vp8cx_set_alt_lf_level(cpi, filt_high); + vp8_loop_filter_frame_yonly(cm, &cpi->mb.e_mbd, filt_high); + + filt_err = vp8_calc_ss_err(sd, cm->frame_to_show); + ss_err[filt_high] = filt_err; + } else { + filt_err = ss_err[filt_high]; + } + + /* Was it better than the previous best? */ + if (filt_err < (best_err - Bias)) { + best_err = filt_err; + filt_best = filt_high; + } + } + + /* Half the step distance if the best filter value was the same + * as last time + */ + if (filt_best == filt_mid) { + filter_step = filter_step / 2; + filt_direction = 0; + } else { + filt_direction = (filt_best < filt_mid) ? -1 : 1; + filt_mid = filt_best; + } + } + + cm->filter_level = filt_best; + + /* restore unfiltered frame pointer */ + cm->frame_to_show = saved_frame; } diff --git a/libs/libvpx/vp8/encoder/quantize.h b/libs/libvpx/vp8/encoder/quantize.h index 7d36c2b45f..267150f99f 100644 --- a/libs/libvpx/vp8/encoder/quantize.h +++ b/libs/libvpx/vp8/encoder/quantize.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_QUANTIZE_H_ #define VP8_ENCODER_QUANTIZE_H_ @@ -24,7 +23,8 @@ extern void vp8_quantize_mbuv(struct macroblock *x); extern void vp8_set_quantizer(struct VP8_COMP *cpi, int Q); extern void vp8cx_frame_init_quantizer(struct VP8_COMP *cpi); extern void vp8_update_zbin_extra(struct VP8_COMP *cpi, struct macroblock *x); -extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, struct macroblock *x, int ok_to_skip); +extern void vp8cx_mb_init_quantizer(struct VP8_COMP *cpi, struct macroblock *x, + int ok_to_skip); extern void vp8cx_init_quantizer(struct VP8_COMP *cpi); #ifdef __cplusplus diff --git a/libs/libvpx/vp8/encoder/ratectrl.c b/libs/libvpx/vp8/encoder/ratectrl.c index 7da3d71adc..649f696dc7 100644 --- a/libs/libvpx/vp8/encoder/ratectrl.c +++ b/libs/libvpx/vp8/encoder/ratectrl.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -24,14 +23,11 @@ #include "encodemv.h" #include "vpx_dsp/vpx_dsp_common.h" - -#define MIN_BPB_FACTOR 0.01 -#define MAX_BPB_FACTOR 50 +#define MIN_BPB_FACTOR 0.01 +#define MAX_BPB_FACTOR 50 extern const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES]; - - #ifdef MODE_STATS extern int y_modes[5]; extern int uv_modes[4]; @@ -43,94 +39,75 @@ extern int inter_b_modes[10]; #endif /* Bits Per MB at different Q (Multiplied by 512) */ -#define BPER_MB_NORMBITS 9 +#define BPER_MB_NORMBITS 9 /* Work in progress recalibration of baseline rate tables based on * the assumption that bits per mb is inversely proportional to the * quantizer value. */ -const int vp8_bits_per_mb[2][QINDEX_RANGE] = -{ - /* Intra case 450000/Qintra */ - { - 1125000,900000, 750000, 642857, 562500, 500000, 450000, 450000, - 409090, 375000, 346153, 321428, 300000, 281250, 264705, 264705, - 250000, 236842, 225000, 225000, 214285, 214285, 204545, 204545, - 195652, 195652, 187500, 180000, 180000, 173076, 166666, 160714, - 155172, 150000, 145161, 140625, 136363, 132352, 128571, 125000, - 121621, 121621, 118421, 115384, 112500, 109756, 107142, 104651, - 102272, 100000, 97826, 97826, 95744, 93750, 91836, 90000, - 88235, 86538, 84905, 83333, 81818, 80357, 78947, 77586, - 76271, 75000, 73770, 72580, 71428, 70312, 69230, 68181, - 67164, 66176, 65217, 64285, 63380, 62500, 61643, 60810, - 60000, 59210, 59210, 58441, 57692, 56962, 56250, 55555, - 54878, 54216, 53571, 52941, 52325, 51724, 51136, 50561, - 49450, 48387, 47368, 46875, 45918, 45000, 44554, 44117, - 43269, 42452, 41666, 40909, 40178, 39473, 38793, 38135, - 36885, 36290, 35714, 35156, 34615, 34090, 33582, 33088, - 32608, 32142, 31468, 31034, 30405, 29801, 29220, 28662, - }, - /* Inter case 285000/Qinter */ - { - 712500, 570000, 475000, 407142, 356250, 316666, 285000, 259090, - 237500, 219230, 203571, 190000, 178125, 167647, 158333, 150000, - 142500, 135714, 129545, 123913, 118750, 114000, 109615, 105555, - 101785, 98275, 95000, 91935, 89062, 86363, 83823, 81428, - 79166, 77027, 75000, 73076, 71250, 69512, 67857, 66279, - 64772, 63333, 61956, 60638, 59375, 58163, 57000, 55882, - 54807, 53773, 52777, 51818, 50892, 50000, 49137, 47500, - 45967, 44531, 43181, 41911, 40714, 39583, 38513, 37500, - 36538, 35625, 34756, 33928, 33139, 32386, 31666, 30978, - 30319, 29687, 29081, 28500, 27941, 27403, 26886, 26388, - 25909, 25446, 25000, 24568, 23949, 23360, 22800, 22265, - 21755, 21268, 20802, 20357, 19930, 19520, 19127, 18750, - 18387, 18037, 17701, 17378, 17065, 16764, 16473, 16101, - 15745, 15405, 15079, 14766, 14467, 14179, 13902, 13636, - 13380, 13133, 12895, 12666, 12445, 12179, 11924, 11632, - 11445, 11220, 11003, 10795, 10594, 10401, 10215, 10035, - } +const int vp8_bits_per_mb[2][QINDEX_RANGE] = { + /* Intra case 450000/Qintra */ + { + 1125000, 900000, 750000, 642857, 562500, 500000, 450000, 450000, 409090, + 375000, 346153, 321428, 300000, 281250, 264705, 264705, 250000, 236842, + 225000, 225000, 214285, 214285, 204545, 204545, 195652, 195652, 187500, + 180000, 180000, 173076, 166666, 160714, 155172, 150000, 145161, 140625, + 136363, 132352, 128571, 125000, 121621, 121621, 118421, 115384, 112500, + 109756, 107142, 104651, 102272, 100000, 97826, 97826, 95744, 93750, + 91836, 90000, 88235, 86538, 84905, 83333, 81818, 80357, 78947, + 77586, 76271, 75000, 73770, 72580, 71428, 70312, 69230, 68181, + 67164, 66176, 65217, 64285, 63380, 62500, 61643, 60810, 60000, + 59210, 59210, 58441, 57692, 56962, 56250, 55555, 54878, 54216, + 53571, 52941, 52325, 51724, 51136, 50561, 49450, 48387, 47368, + 46875, 45918, 45000, 44554, 44117, 43269, 42452, 41666, 40909, + 40178, 39473, 38793, 38135, 36885, 36290, 35714, 35156, 34615, + 34090, 33582, 33088, 32608, 32142, 31468, 31034, 30405, 29801, + 29220, 28662, + }, + /* Inter case 285000/Qinter */ + { + 712500, 570000, 475000, 407142, 356250, 316666, 285000, 259090, 237500, + 219230, 203571, 190000, 178125, 167647, 158333, 150000, 142500, 135714, + 129545, 123913, 118750, 114000, 109615, 105555, 101785, 98275, 95000, + 91935, 89062, 86363, 83823, 81428, 79166, 77027, 75000, 73076, + 71250, 69512, 67857, 66279, 64772, 63333, 61956, 60638, 59375, + 58163, 57000, 55882, 54807, 53773, 52777, 51818, 50892, 50000, + 49137, 47500, 45967, 44531, 43181, 41911, 40714, 39583, 38513, + 37500, 36538, 35625, 34756, 33928, 33139, 32386, 31666, 30978, + 30319, 29687, 29081, 28500, 27941, 27403, 26886, 26388, 25909, + 25446, 25000, 24568, 23949, 23360, 22800, 22265, 21755, 21268, + 20802, 20357, 19930, 19520, 19127, 18750, 18387, 18037, 17701, + 17378, 17065, 16764, 16473, 16101, 15745, 15405, 15079, 14766, + 14467, 14179, 13902, 13636, 13380, 13133, 12895, 12666, 12445, + 12179, 11924, 11632, 11445, 11220, 11003, 10795, 10594, 10401, + 10215, 10035, + } }; -static const int kf_boost_qadjustment[QINDEX_RANGE] = -{ - 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 192, 193, 194, 195, 196, 197, 198, 199, - 200, 200, 201, 201, 202, 203, 203, 203, - 204, 204, 205, 205, 206, 206, 207, 207, - 208, 208, 209, 209, 210, 210, 211, 211, - 212, 212, 213, 213, 214, 214, 215, 215, - 216, 216, 217, 217, 218, 218, 219, 219, - 220, 220, 220, 220, 220, 220, 220, 220, - 220, 220, 220, 220, 220, 220, 220, 220, +static const int kf_boost_qadjustment[QINDEX_RANGE] = { + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 200, 201, + 201, 202, 203, 203, 203, 204, 204, 205, 205, 206, 206, 207, 207, 208, 208, + 209, 209, 210, 210, 211, 211, 212, 212, 213, 213, 214, 214, 215, 215, 216, + 216, 217, 217, 218, 218, 219, 219, 220, 220, 220, 220, 220, 220, 220, 220, + 220, 220, 220, 220, 220, 220, 220, 220, }; /* #define GFQ_ADJUSTMENT (Q+100) */ #define GFQ_ADJUSTMENT vp8_gf_boost_qadjustment[Q] -const int vp8_gf_boost_qadjustment[QINDEX_RANGE] = -{ - 80, 82, 84, 86, 88, 90, 92, 94, - 96, 97, 98, 99, 100, 101, 102, 103, - 104, 105, 106, 107, 108, 109, 110, 111, - 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 130, 131, 132, 133, 134, 135, - 136, 137, 138, 139, 140, 141, 142, 143, - 144, 145, 146, 147, 148, 149, 150, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 184, 185, 185, 186, 186, 187, 187, - 188, 188, 189, 189, 190, 190, 191, 191, - 192, 192, 193, 193, 194, 194, 194, 194, - 195, 195, 196, 196, 197, 197, 198, 198 +const int vp8_gf_boost_qadjustment[QINDEX_RANGE] = { + 80, 82, 84, 86, 88, 90, 92, 94, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 184, 185, 185, 186, 186, 187, 187, 188, + 188, 189, 189, 190, 190, 191, 191, 192, 192, 193, 193, 194, 194, 194, 194, + 195, 195, 196, 196, 197, 197, 198, 198 }; /* @@ -155,272 +132,234 @@ const int vp8_gf_boost_qadjustment[QINDEX_RANGE] = }; */ -static const int kf_gf_boost_qlimits[QINDEX_RANGE] = -{ - 150, 155, 160, 165, 170, 175, 180, 185, - 190, 195, 200, 205, 210, 215, 220, 225, - 230, 235, 240, 245, 250, 255, 260, 265, - 270, 275, 280, 285, 290, 295, 300, 305, - 310, 320, 330, 340, 350, 360, 370, 380, - 390, 400, 410, 420, 430, 440, 450, 460, - 470, 480, 490, 500, 510, 520, 530, 540, - 550, 560, 570, 580, 590, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, - 600, 600, 600, 600, 600, 600, 600, 600, +static const int kf_gf_boost_qlimits[QINDEX_RANGE] = { + 150, 155, 160, 165, 170, 175, 180, 185, 190, 195, 200, 205, 210, 215, 220, + 225, 230, 235, 240, 245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 295, + 300, 305, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, + 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, + 590, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, + 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, + 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, + 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, + 600, 600, 600, 600, 600, 600, 600, 600, }; -static const int gf_adjust_table[101] = -{ - 100, - 115, 130, 145, 160, 175, 190, 200, 210, 220, 230, - 240, 260, 270, 280, 290, 300, 310, 320, 330, 340, - 350, 360, 370, 380, 390, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, - 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, +static const int gf_adjust_table[101] = { + 100, 115, 130, 145, 160, 175, 190, 200, 210, 220, 230, 240, 260, 270, 280, + 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 400, 400, 400, + 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, + 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, + 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, + 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, + 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, 400, }; -static const int gf_intra_usage_adjustment[20] = -{ - 125, 120, 115, 110, 105, 100, 95, 85, 80, 75, - 70, 65, 60, 55, 50, 50, 50, 50, 50, 50, +static const int gf_intra_usage_adjustment[20] = { + 125, 120, 115, 110, 105, 100, 95, 85, 80, 75, + 70, 65, 60, 55, 50, 50, 50, 50, 50, 50, }; -static const int gf_interval_table[101] = -{ - 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, +static const int gf_interval_table[101] = { + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, }; -static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3, 4, 5 }; +static const unsigned int prior_key_frame_weight[KEY_FRAME_CONTEXT] = { 1, 2, 3, + 4, 5 }; +void vp8_save_coding_context(VP8_COMP *cpi) { + CODING_CONTEXT *const cc = &cpi->coding_context; -void vp8_save_coding_context(VP8_COMP *cpi) -{ - CODING_CONTEXT *const cc = & cpi->coding_context; + /* Stores a snapshot of key state variables which can subsequently be + * restored with a call to vp8_restore_coding_context. These functions are + * intended for use in a re-code loop in vp8_compress_frame where the + * quantizer value is adjusted between loop iterations. + */ - /* Stores a snapshot of key state variables which can subsequently be - * restored with a call to vp8_restore_coding_context. These functions are - * intended for use in a re-code loop in vp8_compress_frame where the - * quantizer value is adjusted between loop iterations. - */ + cc->frames_since_key = cpi->frames_since_key; + cc->filter_level = cpi->common.filter_level; + cc->frames_till_gf_update_due = cpi->frames_till_gf_update_due; + cc->frames_since_golden = cpi->frames_since_golden; - cc->frames_since_key = cpi->frames_since_key; - cc->filter_level = cpi->common.filter_level; - cc->frames_till_gf_update_due = cpi->frames_till_gf_update_due; - cc->frames_since_golden = cpi->frames_since_golden; + vp8_copy(cc->mvc, cpi->common.fc.mvc); + vp8_copy(cc->mvcosts, cpi->rd_costs.mvcosts); - vp8_copy(cc->mvc, cpi->common.fc.mvc); - vp8_copy(cc->mvcosts, cpi->rd_costs.mvcosts); + vp8_copy(cc->ymode_prob, cpi->common.fc.ymode_prob); + vp8_copy(cc->uv_mode_prob, cpi->common.fc.uv_mode_prob); - vp8_copy(cc->ymode_prob, cpi->common.fc.ymode_prob); - vp8_copy(cc->uv_mode_prob, cpi->common.fc.uv_mode_prob); + vp8_copy(cc->ymode_count, cpi->mb.ymode_count); + vp8_copy(cc->uv_mode_count, cpi->mb.uv_mode_count); - vp8_copy(cc->ymode_count, cpi->mb.ymode_count); - vp8_copy(cc->uv_mode_count, cpi->mb.uv_mode_count); - - - /* Stats */ +/* Stats */ #ifdef MODE_STATS - vp8_copy(cc->y_modes, y_modes); - vp8_copy(cc->uv_modes, uv_modes); - vp8_copy(cc->b_modes, b_modes); - vp8_copy(cc->inter_y_modes, inter_y_modes); - vp8_copy(cc->inter_uv_modes, inter_uv_modes); - vp8_copy(cc->inter_b_modes, inter_b_modes); + vp8_copy(cc->y_modes, y_modes); + vp8_copy(cc->uv_modes, uv_modes); + vp8_copy(cc->b_modes, b_modes); + vp8_copy(cc->inter_y_modes, inter_y_modes); + vp8_copy(cc->inter_uv_modes, inter_uv_modes); + vp8_copy(cc->inter_b_modes, inter_b_modes); #endif - cc->this_frame_percent_intra = cpi->this_frame_percent_intra; + cc->this_frame_percent_intra = cpi->this_frame_percent_intra; } +void vp8_restore_coding_context(VP8_COMP *cpi) { + CODING_CONTEXT *const cc = &cpi->coding_context; -void vp8_restore_coding_context(VP8_COMP *cpi) -{ - CODING_CONTEXT *const cc = & cpi->coding_context; + /* Restore key state variables to the snapshot state stored in the + * previous call to vp8_save_coding_context. + */ - /* Restore key state variables to the snapshot state stored in the - * previous call to vp8_save_coding_context. - */ + cpi->frames_since_key = cc->frames_since_key; + cpi->common.filter_level = cc->filter_level; + cpi->frames_till_gf_update_due = cc->frames_till_gf_update_due; + cpi->frames_since_golden = cc->frames_since_golden; - cpi->frames_since_key = cc->frames_since_key; - cpi->common.filter_level = cc->filter_level; - cpi->frames_till_gf_update_due = cc->frames_till_gf_update_due; - cpi->frames_since_golden = cc->frames_since_golden; + vp8_copy(cpi->common.fc.mvc, cc->mvc); - vp8_copy(cpi->common.fc.mvc, cc->mvc); + vp8_copy(cpi->rd_costs.mvcosts, cc->mvcosts); - vp8_copy(cpi->rd_costs.mvcosts, cc->mvcosts); + vp8_copy(cpi->common.fc.ymode_prob, cc->ymode_prob); + vp8_copy(cpi->common.fc.uv_mode_prob, cc->uv_mode_prob); - vp8_copy(cpi->common.fc.ymode_prob, cc->ymode_prob); - vp8_copy(cpi->common.fc.uv_mode_prob, cc->uv_mode_prob); + vp8_copy(cpi->mb.ymode_count, cc->ymode_count); + vp8_copy(cpi->mb.uv_mode_count, cc->uv_mode_count); - vp8_copy(cpi->mb.ymode_count, cc->ymode_count); - vp8_copy(cpi->mb.uv_mode_count, cc->uv_mode_count); - - /* Stats */ +/* Stats */ #ifdef MODE_STATS - vp8_copy(y_modes, cc->y_modes); - vp8_copy(uv_modes, cc->uv_modes); - vp8_copy(b_modes, cc->b_modes); - vp8_copy(inter_y_modes, cc->inter_y_modes); - vp8_copy(inter_uv_modes, cc->inter_uv_modes); - vp8_copy(inter_b_modes, cc->inter_b_modes); + vp8_copy(y_modes, cc->y_modes); + vp8_copy(uv_modes, cc->uv_modes); + vp8_copy(b_modes, cc->b_modes); + vp8_copy(inter_y_modes, cc->inter_y_modes); + vp8_copy(inter_uv_modes, cc->inter_uv_modes); + vp8_copy(inter_b_modes, cc->inter_b_modes); #endif - - cpi->this_frame_percent_intra = cc->this_frame_percent_intra; + cpi->this_frame_percent_intra = cc->this_frame_percent_intra; } +void vp8_setup_key_frame(VP8_COMP *cpi) { + /* Setup for Key frame: */ -void vp8_setup_key_frame(VP8_COMP *cpi) -{ - /* Setup for Key frame: */ + vp8_default_coef_probs(&cpi->common); - vp8_default_coef_probs(& cpi->common); + memcpy(cpi->common.fc.mvc, vp8_default_mv_context, + sizeof(vp8_default_mv_context)); + { + int flag[2] = { 1, 1 }; + vp8_build_component_cost_table( + cpi->mb.mvcost, (const MV_CONTEXT *)cpi->common.fc.mvc, flag); + } - memcpy(cpi->common.fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); - { - int flag[2] = {1, 1}; - vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cpi->common.fc.mvc, flag); - } + /* Make sure we initialize separate contexts for altref,gold, and normal. + * TODO shouldn't need 3 different copies of structure to do this! + */ + memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc)); + memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc)); + memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc)); - /* Make sure we initialize separate contexts for altref,gold, and normal. - * TODO shouldn't need 3 different copies of structure to do this! - */ - memcpy(&cpi->lfc_a, &cpi->common.fc, sizeof(cpi->common.fc)); - memcpy(&cpi->lfc_g, &cpi->common.fc, sizeof(cpi->common.fc)); - memcpy(&cpi->lfc_n, &cpi->common.fc, sizeof(cpi->common.fc)); + cpi->common.filter_level = cpi->common.base_qindex * 3 / 8; - cpi->common.filter_level = cpi->common.base_qindex * 3 / 8 ; + /* Provisional interval before next GF */ + if (cpi->auto_gold) { + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; + } else { + cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; + } - /* Provisional interval before next GF */ - if (cpi->auto_gold) - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - else - cpi->frames_till_gf_update_due = DEFAULT_GF_INTERVAL; - - cpi->common.refresh_golden_frame = 1; - cpi->common.refresh_alt_ref_frame = 1; + cpi->common.refresh_golden_frame = 1; + cpi->common.refresh_alt_ref_frame = 1; } - static int estimate_bits_at_q(int frame_kind, int Q, int MBs, - double correction_factor) -{ - int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb[frame_kind][Q]); + double correction_factor) { + int Bpm = (int)(.5 + correction_factor * vp8_bits_per_mb[frame_kind][Q]); - /* Attempt to retain reasonable accuracy without overflow. The cutoff is - * chosen such that the maximum product of Bpm and MBs fits 31 bits. The - * largest Bpm takes 20 bits. - */ - if (MBs > (1 << 11)) - return (Bpm >> BPER_MB_NORMBITS) * MBs; - else - return (Bpm * MBs) >> BPER_MB_NORMBITS; + /* Attempt to retain reasonable accuracy without overflow. The cutoff is + * chosen such that the maximum product of Bpm and MBs fits 31 bits. The + * largest Bpm takes 20 bits. + */ + if (MBs > (1 << 11)) { + return (Bpm >> BPER_MB_NORMBITS) * MBs; + } else { + return (Bpm * MBs) >> BPER_MB_NORMBITS; + } } +static void calc_iframe_target_size(VP8_COMP *cpi) { + /* boost defaults to half second */ + int kf_boost; + uint64_t target; -static void calc_iframe_target_size(VP8_COMP *cpi) -{ - /* boost defaults to half second */ - int kf_boost; - uint64_t target; + /* Clear down mmx registers to allow floating point in what follows */ + vp8_clear_system_state(); - /* Clear down mmx registers to allow floating point in what follows */ - vp8_clear_system_state(); + if (cpi->oxcf.fixed_q >= 0) { + int Q = cpi->oxcf.key_q; - if (cpi->oxcf.fixed_q >= 0) - { - int Q = cpi->oxcf.key_q; - - target = estimate_bits_at_q(INTRA_FRAME, Q, cpi->common.MBs, - cpi->key_frame_rate_correction_factor); - } - else if (cpi->pass == 2) - { - /* New Two pass RC */ - target = cpi->per_frame_bandwidth; - } - /* First Frame is a special case */ - else if (cpi->common.current_video_frame == 0) - { - /* 1 Pass there is no information on which to base size so use - * bandwidth per second * fraction of the initial buffer - * level - */ - target = cpi->oxcf.starting_buffer_level / 2; - - if(target > cpi->oxcf.target_bandwidth * 3 / 2) - target = cpi->oxcf.target_bandwidth * 3 / 2; - } - else - { - /* if this keyframe was forced, use a more recent Q estimate */ - int Q = (cpi->common.frame_flags & FRAMEFLAGS_KEY) - ? cpi->avg_frame_qindex : cpi->ni_av_qi; - - int initial_boost = 32; /* |3.0 * per_frame_bandwidth| */ - /* Boost depends somewhat on frame rate: only used for 1 layer case. */ - if (cpi->oxcf.number_of_layers == 1) { - kf_boost = VPXMAX(initial_boost, - (int)(2 * cpi->output_framerate - 16)); - } - else { - /* Initial factor: set target size to: |3.0 * per_frame_bandwidth|. */ - kf_boost = initial_boost; - } - - /* adjustment up based on q: this factor ranges from ~1.2 to 2.2. */ - kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100; - - /* frame separation adjustment ( down) */ - if (cpi->frames_since_key < cpi->output_framerate / 2) - kf_boost = (int)(kf_boost - * cpi->frames_since_key / (cpi->output_framerate / 2)); - - /* Minimal target size is |2* per_frame_bandwidth|. */ - if (kf_boost < 16) - kf_boost = 16; - - target = ((16 + kf_boost) * cpi->per_frame_bandwidth) >> 4; - } - - - if (cpi->oxcf.rc_max_intra_bitrate_pct) - { - unsigned int max_rate = cpi->per_frame_bandwidth - * cpi->oxcf.rc_max_intra_bitrate_pct / 100; - - if (target > max_rate) - target = max_rate; - } - - cpi->this_frame_target = (int)target; - - /* TODO: if we separate rate targeting from Q targetting, move this. - * Reset the active worst quality to the baseline value for key frames. + target = estimate_bits_at_q(INTRA_FRAME, Q, cpi->common.MBs, + cpi->key_frame_rate_correction_factor); + } else if (cpi->pass == 2) { + /* New Two pass RC */ + target = cpi->per_frame_bandwidth; + } + /* First Frame is a special case */ + else if (cpi->common.current_video_frame == 0) { + /* 1 Pass there is no information on which to base size so use + * bandwidth per second * fraction of the initial buffer + * level */ - if (cpi->pass != 2) - cpi->active_worst_quality = cpi->worst_quality; + target = cpi->oxcf.starting_buffer_level / 2; + + if (target > cpi->oxcf.target_bandwidth * 3 / 2) { + target = cpi->oxcf.target_bandwidth * 3 / 2; + } + } else { + /* if this keyframe was forced, use a more recent Q estimate */ + int Q = (cpi->common.frame_flags & FRAMEFLAGS_KEY) ? cpi->avg_frame_qindex + : cpi->ni_av_qi; + + int initial_boost = 32; /* |3.0 * per_frame_bandwidth| */ + /* Boost depends somewhat on frame rate: only used for 1 layer case. */ + if (cpi->oxcf.number_of_layers == 1) { + kf_boost = VPXMAX(initial_boost, (int)(2 * cpi->output_framerate - 16)); + } else { + /* Initial factor: set target size to: |3.0 * per_frame_bandwidth|. */ + kf_boost = initial_boost; + } + + /* adjustment up based on q: this factor ranges from ~1.2 to 2.2. */ + kf_boost = kf_boost * kf_boost_qadjustment[Q] / 100; + + /* frame separation adjustment ( down) */ + if (cpi->frames_since_key < cpi->output_framerate / 2) { + kf_boost = + (int)(kf_boost * cpi->frames_since_key / (cpi->output_framerate / 2)); + } + + /* Minimal target size is |2* per_frame_bandwidth|. */ + if (kf_boost < 16) kf_boost = 16; + + target = ((16 + kf_boost) * cpi->per_frame_bandwidth) >> 4; + } + + if (cpi->oxcf.rc_max_intra_bitrate_pct) { + unsigned int max_rate = + cpi->per_frame_bandwidth * cpi->oxcf.rc_max_intra_bitrate_pct / 100; + + if (target > max_rate) target = max_rate; + } + + cpi->this_frame_target = (int)target; + + /* TODO: if we separate rate targeting from Q targetting, move this. + * Reset the active worst quality to the baseline value for key frames. + */ + if (cpi->pass != 2) cpi->active_worst_quality = cpi->worst_quality; #if 0 { @@ -435,40 +374,39 @@ static void calc_iframe_target_size(VP8_COMP *cpi) #endif } - /* Do the best we can to define the parameters for the next GF based on what * information we have available. */ -static void calc_gf_params(VP8_COMP *cpi) -{ - int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; - int Boost = 0; +static void calc_gf_params(VP8_COMP *cpi) { + int Q = + (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; + int Boost = 0; - int gf_frame_useage = 0; /* Golden frame useage since last GF */ - int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] + - cpi->recent_ref_frame_usage[LAST_FRAME] + - cpi->recent_ref_frame_usage[GOLDEN_FRAME] + - cpi->recent_ref_frame_usage[ALTREF_FRAME]; + int gf_frame_useage = 0; /* Golden frame useage since last GF */ + int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] + + cpi->recent_ref_frame_usage[LAST_FRAME] + + cpi->recent_ref_frame_usage[GOLDEN_FRAME] + + cpi->recent_ref_frame_usage[ALTREF_FRAME]; - int pct_gf_active = (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols); + int pct_gf_active = (100 * cpi->gf_active_count) / + (cpi->common.mb_rows * cpi->common.mb_cols); - if (tot_mbs) - gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * 100 / tot_mbs; + if (tot_mbs) { + gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * + 100 / tot_mbs; + } - if (pct_gf_active > gf_frame_useage) - gf_frame_useage = pct_gf_active; + if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active; - /* Not two pass */ - if (cpi->pass != 2) - { - /* Single Pass lagged mode: TBD */ - if (0) - { - } + /* Not two pass */ + if (cpi->pass != 2) { + /* Single Pass lagged mode: TBD */ + if (0) { + } - /* Single Pass compression: Has to use current and historical data */ - else - { + /* Single Pass compression: Has to use current and historical data */ + else { #if 0 /* Experimental code */ int index = cpi->one_pass_frame_index; @@ -517,430 +455,399 @@ static void calc_gf_params(VP8_COMP *cpi) */ #else - /*************************************************************/ - /* OLD code */ + /*************************************************************/ + /* OLD code */ - /* Adjust boost based upon ambient Q */ - Boost = GFQ_ADJUSTMENT; + /* Adjust boost based upon ambient Q */ + Boost = GFQ_ADJUSTMENT; - /* Adjust based upon most recently measure intra useage */ - Boost = Boost * gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15) ? cpi->this_frame_percent_intra : 14] / 100; + /* Adjust based upon most recently measure intra useage */ + Boost = Boost * + gf_intra_usage_adjustment[(cpi->this_frame_percent_intra < 15) + ? cpi->this_frame_percent_intra + : 14] / + 100; - /* Adjust gf boost based upon GF usage since last GF */ - Boost = Boost * gf_adjust_table[gf_frame_useage] / 100; + /* Adjust gf boost based upon GF usage since last GF */ + Boost = Boost * gf_adjust_table[gf_frame_useage] / 100; #endif - } - - /* golden frame boost without recode loop often goes awry. be - * safe by keeping numbers down. - */ - if (!cpi->sf.recode_loop) - { - if (cpi->compressor_speed == 2) - Boost = Boost / 2; - } - - /* Apply an upper limit based on Q for 1 pass encodes */ - if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0)) - Boost = kf_gf_boost_qlimits[Q]; - - /* Apply lower limits to boost. */ - else if (Boost < 110) - Boost = 110; - - /* Note the boost used */ - cpi->last_boost = Boost; - } - /* Estimate next interval - * This is updated once the real frame size/boost is known. + /* golden frame boost without recode loop often goes awry. be + * safe by keeping numbers down. */ - if (cpi->oxcf.fixed_q == -1) - { - if (cpi->pass == 2) /* 2 Pass */ - { - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - } - else /* 1 Pass */ - { - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - - if (cpi->last_boost > 750) - cpi->frames_till_gf_update_due++; - - if (cpi->last_boost > 1000) - cpi->frames_till_gf_update_due++; - - if (cpi->last_boost > 1250) - cpi->frames_till_gf_update_due++; - - if (cpi->last_boost >= 1500) - cpi->frames_till_gf_update_due ++; - - if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due) - cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage]; - - if (cpi->frames_till_gf_update_due > cpi->max_gf_interval) - cpi->frames_till_gf_update_due = cpi->max_gf_interval; - } + if (!cpi->sf.recode_loop) { + if (cpi->compressor_speed == 2) Boost = Boost / 2; } - else - cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; - /* ARF on or off */ - if (cpi->pass != 2) - { - /* For now Alt ref is not allowed except in 2 pass modes. */ - cpi->source_alt_ref_pending = 0; + /* Apply an upper limit based on Q for 1 pass encodes */ + if (Boost > kf_gf_boost_qlimits[Q] && (cpi->pass == 0)) { + Boost = kf_gf_boost_qlimits[Q]; - /*if ( cpi->oxcf.fixed_q == -1) - { - if ( cpi->oxcf.play_alternate && (cpi->last_boost > (100 + (AF_THRESH*cpi->frames_till_gf_update_due)) ) ) - cpi->source_alt_ref_pending = 1; - else - cpi->source_alt_ref_pending = 0; - }*/ + /* Apply lower limits to boost. */ + } else if (Boost < 110) { + Boost = 110; } + + /* Note the boost used */ + cpi->last_boost = Boost; + } + + /* Estimate next interval + * This is updated once the real frame size/boost is known. + */ + if (cpi->oxcf.fixed_q == -1) { + if (cpi->pass == 2) /* 2 Pass */ + { + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; + } else /* 1 Pass */ + { + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; + + if (cpi->last_boost > 750) cpi->frames_till_gf_update_due++; + + if (cpi->last_boost > 1000) cpi->frames_till_gf_update_due++; + + if (cpi->last_boost > 1250) cpi->frames_till_gf_update_due++; + + if (cpi->last_boost >= 1500) cpi->frames_till_gf_update_due++; + + if (gf_interval_table[gf_frame_useage] > cpi->frames_till_gf_update_due) { + cpi->frames_till_gf_update_due = gf_interval_table[gf_frame_useage]; + } + + if (cpi->frames_till_gf_update_due > cpi->max_gf_interval) { + cpi->frames_till_gf_update_due = cpi->max_gf_interval; + } + } + } else { + cpi->frames_till_gf_update_due = cpi->baseline_gf_interval; + } + + /* ARF on or off */ + if (cpi->pass != 2) { + /* For now Alt ref is not allowed except in 2 pass modes. */ + cpi->source_alt_ref_pending = 0; + + /*if ( cpi->oxcf.fixed_q == -1) + { + if ( cpi->oxcf.play_alternate && (cpi->last_boost > (100 + + (AF_THRESH*cpi->frames_till_gf_update_due)) ) ) + cpi->source_alt_ref_pending = 1; + else + cpi->source_alt_ref_pending = 0; + }*/ + } } +static void calc_pframe_target_size(VP8_COMP *cpi) { + int min_frame_target; + int old_per_frame_bandwidth = cpi->per_frame_bandwidth; -static void calc_pframe_target_size(VP8_COMP *cpi) -{ - int min_frame_target; - int old_per_frame_bandwidth = cpi->per_frame_bandwidth; + if (cpi->current_layer > 0) { + cpi->per_frame_bandwidth = + cpi->layer_context[cpi->current_layer].avg_frame_size_for_layer; + } - if ( cpi->current_layer > 0) - cpi->per_frame_bandwidth = - cpi->layer_context[cpi->current_layer].avg_frame_size_for_layer; + min_frame_target = 0; - min_frame_target = 0; + if (cpi->pass == 2) { + min_frame_target = cpi->min_frame_bandwidth; - if (cpi->pass == 2) - { - min_frame_target = cpi->min_frame_bandwidth; - - if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5)) - min_frame_target = cpi->av_per_frame_bandwidth >> 5; + if (min_frame_target < (cpi->av_per_frame_bandwidth >> 5)) { + min_frame_target = cpi->av_per_frame_bandwidth >> 5; } - else if (min_frame_target < cpi->per_frame_bandwidth / 4) - min_frame_target = cpi->per_frame_bandwidth / 4; + } else if (min_frame_target < cpi->per_frame_bandwidth / 4) { + min_frame_target = cpi->per_frame_bandwidth / 4; + } - - /* Special alt reference frame case */ - if((cpi->common.refresh_alt_ref_frame) && (cpi->oxcf.number_of_layers == 1)) - { - if (cpi->pass == 2) - { - /* Per frame bit target for the alt ref frame */ - cpi->per_frame_bandwidth = cpi->twopass.gf_bits; - cpi->this_frame_target = cpi->per_frame_bandwidth; - } - - /* One Pass ??? TBD */ + /* Special alt reference frame case */ + if ((cpi->common.refresh_alt_ref_frame) && + (cpi->oxcf.number_of_layers == 1)) { + if (cpi->pass == 2) { + /* Per frame bit target for the alt ref frame */ + cpi->per_frame_bandwidth = cpi->twopass.gf_bits; + cpi->this_frame_target = cpi->per_frame_bandwidth; } - /* Normal frames (gf,and inter) */ - else - { - /* 2 pass */ - if (cpi->pass == 2) - { - cpi->this_frame_target = cpi->per_frame_bandwidth; - } - /* 1 pass */ - else - { - int Adjustment; - /* Make rate adjustment to recover bits spent in key frame - * Test to see if the key frame inter data rate correction - * should still be in force - */ - if (cpi->kf_overspend_bits > 0) - { - Adjustment = (cpi->kf_bitrate_adjustment <= cpi->kf_overspend_bits) ? cpi->kf_bitrate_adjustment : cpi->kf_overspend_bits; + /* One Pass ??? TBD */ + } - if (Adjustment > (cpi->per_frame_bandwidth - min_frame_target)) - Adjustment = (cpi->per_frame_bandwidth - min_frame_target); - - cpi->kf_overspend_bits -= Adjustment; - - /* Calculate an inter frame bandwidth target for the next - * few frames designed to recover any extra bits spent on - * the key frame. - */ - cpi->this_frame_target = cpi->per_frame_bandwidth - Adjustment; - - if (cpi->this_frame_target < min_frame_target) - cpi->this_frame_target = min_frame_target; - } - else - cpi->this_frame_target = cpi->per_frame_bandwidth; - - /* If appropriate make an adjustment to recover bits spent on a - * recent GF - */ - if ((cpi->gf_overspend_bits > 0) && (cpi->this_frame_target > min_frame_target)) - { - Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) ? cpi->non_gf_bitrate_adjustment : cpi->gf_overspend_bits; - - if (Adjustment > (cpi->this_frame_target - min_frame_target)) - Adjustment = (cpi->this_frame_target - min_frame_target); - - cpi->gf_overspend_bits -= Adjustment; - cpi->this_frame_target -= Adjustment; - } - - /* Apply small + and - boosts for non gf frames */ - if ((cpi->last_boost > 150) && (cpi->frames_till_gf_update_due > 0) && - (cpi->current_gf_interval >= (MIN_GF_INTERVAL << 1))) - { - /* % Adjustment limited to the range 1% to 10% */ - Adjustment = (cpi->last_boost - 100) >> 5; - - if (Adjustment < 1) - Adjustment = 1; - else if (Adjustment > 10) - Adjustment = 10; - - /* Convert to bits */ - Adjustment = (cpi->this_frame_target * Adjustment) / 100; - - if (Adjustment > (cpi->this_frame_target - min_frame_target)) - Adjustment = (cpi->this_frame_target - min_frame_target); - - if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1)) - { - Adjustment = (cpi->current_gf_interval - 1) * Adjustment; - // Limit adjustment to 10% of current target. - if (Adjustment > (10 * cpi->this_frame_target) / 100) - Adjustment = (10 * cpi->this_frame_target) / 100; - cpi->this_frame_target += Adjustment; - } - else - cpi->this_frame_target -= Adjustment; - } - } + /* Normal frames (gf,and inter) */ + else { + /* 2 pass */ + if (cpi->pass == 2) { + cpi->this_frame_target = cpi->per_frame_bandwidth; } + /* 1 pass */ + else { + int Adjustment; + /* Make rate adjustment to recover bits spent in key frame + * Test to see if the key frame inter data rate correction + * should still be in force + */ + if (cpi->kf_overspend_bits > 0) { + Adjustment = (cpi->kf_bitrate_adjustment <= cpi->kf_overspend_bits) + ? cpi->kf_bitrate_adjustment + : cpi->kf_overspend_bits; - /* Sanity check that the total sum of adjustments is not above the - * maximum allowed That is that having allowed for KF and GF penalties - * we have not pushed the current interframe target to low. If the - * adjustment we apply here is not capable of recovering all the extra - * bits we have spent in the KF or GF then the remainder will have to - * be recovered over a longer time span via other buffer / rate control - * mechanisms. - */ - if (cpi->this_frame_target < min_frame_target) - cpi->this_frame_target = min_frame_target; - - if (!cpi->common.refresh_alt_ref_frame) - /* Note the baseline target data rate for this inter frame. */ - cpi->inter_frame_target = cpi->this_frame_target; - - /* One Pass specific code */ - if (cpi->pass == 0) - { - /* Adapt target frame size with respect to any buffering constraints: */ - if (cpi->buffered_mode) - { - int one_percent_bits = (int) - (1 + cpi->oxcf.optimal_buffer_level / 100); - - if ((cpi->buffer_level < cpi->oxcf.optimal_buffer_level) || - (cpi->bits_off_target < cpi->oxcf.optimal_buffer_level)) - { - int percent_low = 0; - - /* Decide whether or not we need to adjust the frame data - * rate target. - * - * If we are are below the optimal buffer fullness level - * and adherence to buffering constraints is important to - * the end usage then adjust the per frame target. - */ - if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && - (cpi->buffer_level < cpi->oxcf.optimal_buffer_level)) - { - percent_low = (int) - ((cpi->oxcf.optimal_buffer_level - cpi->buffer_level) / - one_percent_bits); - } - /* Are we overshooting the long term clip data rate... */ - else if (cpi->bits_off_target < 0) - { - /* Adjust per frame data target downwards to compensate. */ - percent_low = (int)(100 * -cpi->bits_off_target / - (cpi->total_byte_count * 8)); - } - - if (percent_low > cpi->oxcf.under_shoot_pct) - percent_low = cpi->oxcf.under_shoot_pct; - else if (percent_low < 0) - percent_low = 0; - - /* lower the target bandwidth for this frame. */ - cpi->this_frame_target -= - (cpi->this_frame_target * percent_low) / 200; - - /* Are we using allowing control of active_worst_allowed_q - * according to buffer level. - */ - if (cpi->auto_worst_q && cpi->ni_frames > 150) - { - int64_t critical_buffer_level; - - /* For streaming applications the most important factor is - * cpi->buffer_level as this takes into account the - * specified short term buffering constraints. However, - * hitting the long term clip data rate target is also - * important. - */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - /* Take the smaller of cpi->buffer_level and - * cpi->bits_off_target - */ - critical_buffer_level = - (cpi->buffer_level < cpi->bits_off_target) - ? cpi->buffer_level : cpi->bits_off_target; - } - /* For local file playback short term buffering constraints - * are less of an issue - */ - else - { - /* Consider only how we are doing for the clip as a - * whole - */ - critical_buffer_level = cpi->bits_off_target; - } - - /* Set the active worst quality based upon the selected - * buffer fullness number. - */ - if (critical_buffer_level < cpi->oxcf.optimal_buffer_level) - { - if ( critical_buffer_level > - (cpi->oxcf.optimal_buffer_level >> 2) ) - { - int64_t qadjustment_range = - cpi->worst_quality - cpi->ni_av_qi; - int64_t above_base = - (critical_buffer_level - - (cpi->oxcf.optimal_buffer_level >> 2)); - - /* Step active worst quality down from - * cpi->ni_av_qi when (critical_buffer_level == - * cpi->optimal_buffer_level) to - * cpi->worst_quality when - * (critical_buffer_level == - * cpi->optimal_buffer_level >> 2) - */ - cpi->active_worst_quality = - cpi->worst_quality - - (int)((qadjustment_range * above_base) / - (cpi->oxcf.optimal_buffer_level*3>>2)); - } - else - { - cpi->active_worst_quality = cpi->worst_quality; - } - } - else - { - cpi->active_worst_quality = cpi->ni_av_qi; - } - } - else - { - cpi->active_worst_quality = cpi->worst_quality; - } - } - else - { - int percent_high = 0; - - if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - && (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)) - { - percent_high = (int)((cpi->buffer_level - - cpi->oxcf.optimal_buffer_level) - / one_percent_bits); - } - else if (cpi->bits_off_target > cpi->oxcf.optimal_buffer_level) - { - percent_high = (int)((100 * cpi->bits_off_target) - / (cpi->total_byte_count * 8)); - } - - if (percent_high > cpi->oxcf.over_shoot_pct) - percent_high = cpi->oxcf.over_shoot_pct; - else if (percent_high < 0) - percent_high = 0; - - cpi->this_frame_target += (cpi->this_frame_target * - percent_high) / 200; - - /* Are we allowing control of active_worst_allowed_q according - * to buffer level. - */ - if (cpi->auto_worst_q && cpi->ni_frames > 150) - { - /* When using the relaxed buffer model stick to the - * user specified value - */ - cpi->active_worst_quality = cpi->ni_av_qi; - } - else - { - cpi->active_worst_quality = cpi->worst_quality; - } - } - - /* Set active_best_quality to prevent quality rising too high */ - cpi->active_best_quality = cpi->best_quality; - - /* Worst quality obviously must not be better than best quality */ - if (cpi->active_worst_quality <= cpi->active_best_quality) - cpi->active_worst_quality = cpi->active_best_quality + 1; - - if(cpi->active_worst_quality > 127) - cpi->active_worst_quality = 127; - } - /* Unbuffered mode (eg. video conferencing) */ - else - { - /* Set the active worst quality */ - cpi->active_worst_quality = cpi->worst_quality; + if (Adjustment > (cpi->per_frame_bandwidth - min_frame_target)) { + Adjustment = (cpi->per_frame_bandwidth - min_frame_target); } - /* Special trap for constrained quality mode - * "active_worst_quality" may never drop below cq level - * for any frame type. + cpi->kf_overspend_bits -= Adjustment; + + /* Calculate an inter frame bandwidth target for the next + * few frames designed to recover any extra bits spent on + * the key frame. */ - if ( cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && - cpi->active_worst_quality < cpi->cq_target_quality) - { - cpi->active_worst_quality = cpi->cq_target_quality; + cpi->this_frame_target = cpi->per_frame_bandwidth - Adjustment; + + if (cpi->this_frame_target < min_frame_target) { + cpi->this_frame_target = min_frame_target; } + } else { + cpi->this_frame_target = cpi->per_frame_bandwidth; + } + + /* If appropriate make an adjustment to recover bits spent on a + * recent GF + */ + if ((cpi->gf_overspend_bits > 0) && + (cpi->this_frame_target > min_frame_target)) { + Adjustment = (cpi->non_gf_bitrate_adjustment <= cpi->gf_overspend_bits) + ? cpi->non_gf_bitrate_adjustment + : cpi->gf_overspend_bits; + + if (Adjustment > (cpi->this_frame_target - min_frame_target)) { + Adjustment = (cpi->this_frame_target - min_frame_target); + } + + cpi->gf_overspend_bits -= Adjustment; + cpi->this_frame_target -= Adjustment; + } + + /* Apply small + and - boosts for non gf frames */ + if ((cpi->last_boost > 150) && (cpi->frames_till_gf_update_due > 0) && + (cpi->current_gf_interval >= (MIN_GF_INTERVAL << 1))) { + /* % Adjustment limited to the range 1% to 10% */ + Adjustment = (cpi->last_boost - 100) >> 5; + + if (Adjustment < 1) { + Adjustment = 1; + } else if (Adjustment > 10) { + Adjustment = 10; + } + + /* Convert to bits */ + Adjustment = (cpi->this_frame_target * Adjustment) / 100; + + if (Adjustment > (cpi->this_frame_target - min_frame_target)) { + Adjustment = (cpi->this_frame_target - min_frame_target); + } + + if (cpi->frames_since_golden == (cpi->current_gf_interval >> 1)) { + Adjustment = (cpi->current_gf_interval - 1) * Adjustment; + // Limit adjustment to 10% of current target. + if (Adjustment > (10 * cpi->this_frame_target) / 100) { + Adjustment = (10 * cpi->this_frame_target) / 100; + } + cpi->this_frame_target += Adjustment; + } else { + cpi->this_frame_target -= Adjustment; + } + } + } + } + + /* Sanity check that the total sum of adjustments is not above the + * maximum allowed That is that having allowed for KF and GF penalties + * we have not pushed the current interframe target to low. If the + * adjustment we apply here is not capable of recovering all the extra + * bits we have spent in the KF or GF then the remainder will have to + * be recovered over a longer time span via other buffer / rate control + * mechanisms. + */ + if (cpi->this_frame_target < min_frame_target) { + cpi->this_frame_target = min_frame_target; + } + + if (!cpi->common.refresh_alt_ref_frame) { + /* Note the baseline target data rate for this inter frame. */ + cpi->inter_frame_target = cpi->this_frame_target; + } + + /* One Pass specific code */ + if (cpi->pass == 0) { + /* Adapt target frame size with respect to any buffering constraints: */ + if (cpi->buffered_mode) { + int one_percent_bits = (int)(1 + cpi->oxcf.optimal_buffer_level / 100); + + if ((cpi->buffer_level < cpi->oxcf.optimal_buffer_level) || + (cpi->bits_off_target < cpi->oxcf.optimal_buffer_level)) { + int percent_low = 0; + + /* Decide whether or not we need to adjust the frame data + * rate target. + * + * If we are are below the optimal buffer fullness level + * and adherence to buffering constraints is important to + * the end usage then adjust the per frame target. + */ + if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && + (cpi->buffer_level < cpi->oxcf.optimal_buffer_level)) { + percent_low = + (int)((cpi->oxcf.optimal_buffer_level - cpi->buffer_level) / + one_percent_bits); + } + /* Are we overshooting the long term clip data rate... */ + else if (cpi->bits_off_target < 0) { + /* Adjust per frame data target downwards to compensate. */ + percent_low = + (int)(100 * -cpi->bits_off_target / (cpi->total_byte_count * 8)); + } + + if (percent_low > cpi->oxcf.under_shoot_pct) { + percent_low = cpi->oxcf.under_shoot_pct; + } else if (percent_low < 0) { + percent_low = 0; + } + + /* lower the target bandwidth for this frame. */ + cpi->this_frame_target -= (cpi->this_frame_target * percent_low) / 200; + + /* Are we using allowing control of active_worst_allowed_q + * according to buffer level. + */ + if (cpi->auto_worst_q && cpi->ni_frames > 150) { + int64_t critical_buffer_level; + + /* For streaming applications the most important factor is + * cpi->buffer_level as this takes into account the + * specified short term buffering constraints. However, + * hitting the long term clip data rate target is also + * important. + */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + /* Take the smaller of cpi->buffer_level and + * cpi->bits_off_target + */ + critical_buffer_level = (cpi->buffer_level < cpi->bits_off_target) + ? cpi->buffer_level + : cpi->bits_off_target; + } + /* For local file playback short term buffering constraints + * are less of an issue + */ + else { + /* Consider only how we are doing for the clip as a + * whole + */ + critical_buffer_level = cpi->bits_off_target; + } + + /* Set the active worst quality based upon the selected + * buffer fullness number. + */ + if (critical_buffer_level < cpi->oxcf.optimal_buffer_level) { + if (critical_buffer_level > (cpi->oxcf.optimal_buffer_level >> 2)) { + int64_t qadjustment_range = cpi->worst_quality - cpi->ni_av_qi; + int64_t above_base = (critical_buffer_level - + (cpi->oxcf.optimal_buffer_level >> 2)); + + /* Step active worst quality down from + * cpi->ni_av_qi when (critical_buffer_level == + * cpi->optimal_buffer_level) to + * cpi->worst_quality when + * (critical_buffer_level == + * cpi->optimal_buffer_level >> 2) + */ + cpi->active_worst_quality = + cpi->worst_quality - + (int)((qadjustment_range * above_base) / + (cpi->oxcf.optimal_buffer_level * 3 >> 2)); + } else { + cpi->active_worst_quality = cpi->worst_quality; + } + } else { + cpi->active_worst_quality = cpi->ni_av_qi; + } + } else { + cpi->active_worst_quality = cpi->worst_quality; + } + } else { + int percent_high = 0; + + if ((cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && + (cpi->buffer_level > cpi->oxcf.optimal_buffer_level)) { + percent_high = + (int)((cpi->buffer_level - cpi->oxcf.optimal_buffer_level) / + one_percent_bits); + } else if (cpi->bits_off_target > cpi->oxcf.optimal_buffer_level) { + percent_high = + (int)((100 * cpi->bits_off_target) / (cpi->total_byte_count * 8)); + } + + if (percent_high > cpi->oxcf.over_shoot_pct) { + percent_high = cpi->oxcf.over_shoot_pct; + } else if (percent_high < 0) { + percent_high = 0; + } + + cpi->this_frame_target += (cpi->this_frame_target * percent_high) / 200; + + /* Are we allowing control of active_worst_allowed_q according + * to buffer level. + */ + if (cpi->auto_worst_q && cpi->ni_frames > 150) { + /* When using the relaxed buffer model stick to the + * user specified value + */ + cpi->active_worst_quality = cpi->ni_av_qi; + } else { + cpi->active_worst_quality = cpi->worst_quality; + } + } + + /* Set active_best_quality to prevent quality rising too high */ + cpi->active_best_quality = cpi->best_quality; + + /* Worst quality obviously must not be better than best quality */ + if (cpi->active_worst_quality <= cpi->active_best_quality) { + cpi->active_worst_quality = cpi->active_best_quality + 1; + } + + if (cpi->active_worst_quality > 127) cpi->active_worst_quality = 127; + } + /* Unbuffered mode (eg. video conferencing) */ + else { + /* Set the active worst quality */ + cpi->active_worst_quality = cpi->worst_quality; } - /* Test to see if we have to drop a frame - * The auto-drop frame code is only used in buffered mode. - * In unbufferd mode (eg vide conferencing) the descision to - * code or drop a frame is made outside the codec in response to real - * world comms or buffer considerations. + /* Special trap for constrained quality mode + * "active_worst_quality" may never drop below cq level + * for any frame type. */ - if (cpi->drop_frames_allowed && - (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && - ((cpi->common.frame_type != KEY_FRAME))) - { - /* Check for a buffer underun-crisis in which case we have to drop - * a frame - */ - if ((cpi->buffer_level < 0)) - { + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY && + cpi->active_worst_quality < cpi->cq_target_quality) { + cpi->active_worst_quality = cpi->cq_target_quality; + } + } + + /* Test to see if we have to drop a frame + * The auto-drop frame code is only used in buffered mode. + * In unbufferd mode (eg vide conferencing) the descision to + * code or drop a frame is made outside the codec in response to real + * world comms or buffer considerations. + */ + if (cpi->drop_frames_allowed && + (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) && + ((cpi->common.frame_type != KEY_FRAME))) { + /* Check for a buffer underun-crisis in which case we have to drop + * a frame + */ + if ((cpi->buffer_level < 0)) { #if 0 FILE *f = fopen("dec.stt", "a"); fprintf(f, "%10d %10d %10d %10d ***** BUFFER EMPTY\n", @@ -949,64 +856,68 @@ static void calc_pframe_target_size(VP8_COMP *cpi) (cpi->buffer_level * 100) / cpi->oxcf.optimal_buffer_level); fclose(f); #endif - cpi->drop_frame = 1; + cpi->drop_frame = 1; - /* Update the buffer level variable. */ - cpi->bits_off_target += cpi->av_per_frame_bandwidth; - if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) - cpi->bits_off_target = (int)cpi->oxcf.maximum_buffer_size; - cpi->buffer_level = cpi->bits_off_target; + /* Update the buffer level variable. */ + cpi->bits_off_target += cpi->av_per_frame_bandwidth; + if (cpi->bits_off_target > cpi->oxcf.maximum_buffer_size) { + cpi->bits_off_target = (int)cpi->oxcf.maximum_buffer_size; + } + cpi->buffer_level = cpi->bits_off_target; - if (cpi->oxcf.number_of_layers > 1) { - unsigned int i; + if (cpi->oxcf.number_of_layers > 1) { + unsigned int i; - // Propagate bits saved by dropping the frame to higher layers. - for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; - i++) { - LAYER_CONTEXT *lc = &cpi->layer_context[i]; - lc->bits_off_target += (int)(lc->target_bandwidth / - lc->framerate); - if (lc->bits_off_target > lc->maximum_buffer_size) - lc->bits_off_target = lc->maximum_buffer_size; - lc->buffer_level = lc->bits_off_target; - } - } + // Propagate bits saved by dropping the frame to higher layers. + for (i = cpi->current_layer + 1; i < cpi->oxcf.number_of_layers; ++i) { + LAYER_CONTEXT *lc = &cpi->layer_context[i]; + lc->bits_off_target += (int)(lc->target_bandwidth / lc->framerate); + if (lc->bits_off_target > lc->maximum_buffer_size) { + lc->bits_off_target = lc->maximum_buffer_size; + } + lc->buffer_level = lc->bits_off_target; } + } + } + } + + /* Adjust target frame size for Golden Frames: */ + if (cpi->oxcf.error_resilient_mode == 0 && + (cpi->frames_till_gf_update_due == 0) && !cpi->drop_frame) { + int Q = + (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; + + int gf_frame_useage = 0; /* Golden frame useage since last GF */ + int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] + + cpi->recent_ref_frame_usage[LAST_FRAME] + + cpi->recent_ref_frame_usage[GOLDEN_FRAME] + + cpi->recent_ref_frame_usage[ALTREF_FRAME]; + + int pct_gf_active = (100 * cpi->gf_active_count) / + (cpi->common.mb_rows * cpi->common.mb_cols); + + if (tot_mbs) { + gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * + 100 / tot_mbs; } - /* Adjust target frame size for Golden Frames: */ - if (cpi->oxcf.error_resilient_mode == 0 && - (cpi->frames_till_gf_update_due == 0) && !cpi->drop_frame) - { - int Q = (cpi->oxcf.fixed_q < 0) ? cpi->last_q[INTER_FRAME] : cpi->oxcf.fixed_q; + if (pct_gf_active > gf_frame_useage) gf_frame_useage = pct_gf_active; - int gf_frame_useage = 0; /* Golden frame useage since last GF */ - int tot_mbs = cpi->recent_ref_frame_usage[INTRA_FRAME] + - cpi->recent_ref_frame_usage[LAST_FRAME] + - cpi->recent_ref_frame_usage[GOLDEN_FRAME] + - cpi->recent_ref_frame_usage[ALTREF_FRAME]; + /* Is a fixed manual GF frequency being used */ + if (cpi->auto_gold) { + /* For one pass throw a GF if recent frame intra useage is + * low or the GF useage is high + */ + if ((cpi->pass == 0) && + (cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5)) { + cpi->common.refresh_golden_frame = 1; - int pct_gf_active = (100 * cpi->gf_active_count) / (cpi->common.mb_rows * cpi->common.mb_cols); - - if (tot_mbs) - gf_frame_useage = (cpi->recent_ref_frame_usage[GOLDEN_FRAME] + cpi->recent_ref_frame_usage[ALTREF_FRAME]) * 100 / tot_mbs; - - if (pct_gf_active > gf_frame_useage) - gf_frame_useage = pct_gf_active; - - /* Is a fixed manual GF frequency being used */ - if (cpi->auto_gold) - { - /* For one pass throw a GF if recent frame intra useage is - * low or the GF useage is high - */ - if ((cpi->pass == 0) && (cpi->this_frame_percent_intra < 15 || gf_frame_useage >= 5)) - cpi->common.refresh_golden_frame = 1; - - /* Two pass GF descision */ - else if (cpi->pass == 2) - cpi->common.refresh_golden_frame = 1; - } + /* Two pass GF descision */ + } else if (cpi->pass == 2) { + cpi->common.refresh_golden_frame = 1; + } + } #if 0 @@ -1023,8 +934,7 @@ static void calc_pframe_target_size(VP8_COMP *cpi) #endif - if (cpi->common.refresh_golden_frame == 1) - { + if (cpi->common.refresh_golden_frame == 1) { #if 0 if (0) @@ -1038,533 +948,476 @@ static void calc_pframe_target_size(VP8_COMP *cpi) #endif - if (cpi->auto_adjust_gold_quantizer) - { - calc_gf_params(cpi); - } + if (cpi->auto_adjust_gold_quantizer) { + calc_gf_params(cpi); + } - /* If we are using alternate ref instead of gf then do not apply the - * boost It will instead be applied to the altref update Jims - * modified boost + /* If we are using alternate ref instead of gf then do not apply the + * boost It will instead be applied to the altref update Jims + * modified boost + */ + if (!cpi->source_alt_ref_active) { + if (cpi->oxcf.fixed_q < 0) { + if (cpi->pass == 2) { + /* The spend on the GF is defined in the two pass + * code for two pass encodes */ - if (!cpi->source_alt_ref_active) - { - if (cpi->oxcf.fixed_q < 0) - { - if (cpi->pass == 2) - { - /* The spend on the GF is defined in the two pass - * code for two pass encodes - */ - cpi->this_frame_target = cpi->per_frame_bandwidth; - } - else - { - int Boost = cpi->last_boost; - int frames_in_section = cpi->frames_till_gf_update_due + 1; - int allocation_chunks = (frames_in_section * 100) + (Boost - 100); - int bits_in_section = cpi->inter_frame_target * frames_in_section; + cpi->this_frame_target = cpi->per_frame_bandwidth; + } else { + int Boost = cpi->last_boost; + int frames_in_section = cpi->frames_till_gf_update_due + 1; + int allocation_chunks = (frames_in_section * 100) + (Boost - 100); + int bits_in_section = cpi->inter_frame_target * frames_in_section; - /* Normalize Altboost and allocations chunck down to - * prevent overflow - */ - while (Boost > 1000) - { - Boost /= 2; - allocation_chunks /= 2; - } - - /* Avoid loss of precision but avoid overflow */ - if ((bits_in_section >> 7) > allocation_chunks) - cpi->this_frame_target = Boost * (bits_in_section / allocation_chunks); - else - cpi->this_frame_target = (Boost * bits_in_section) / allocation_chunks; - } - } - else - cpi->this_frame_target = - (estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0) - * cpi->last_boost) / 100; - - } - /* If there is an active ARF at this location use the minimum - * bits on this frame even if it is a contructed arf. - * The active maximum quantizer insures that an appropriate - * number of bits will be spent if needed for contstructed ARFs. + /* Normalize Altboost and allocations chunck down to + * prevent overflow */ - else - { - cpi->this_frame_target = 0; + while (Boost > 1000) { + Boost /= 2; + allocation_chunks /= 2; } - cpi->current_gf_interval = cpi->frames_till_gf_update_due; - + /* Avoid loss of precision but avoid overflow */ + if ((bits_in_section >> 7) > allocation_chunks) { + cpi->this_frame_target = + Boost * (bits_in_section / allocation_chunks); + } else { + cpi->this_frame_target = + (Boost * bits_in_section) / allocation_chunks; + } + } + } else { + cpi->this_frame_target = + (estimate_bits_at_q(1, Q, cpi->common.MBs, 1.0) * + cpi->last_boost) / + 100; } - } - cpi->per_frame_bandwidth = old_per_frame_bandwidth; + } + /* If there is an active ARF at this location use the minimum + * bits on this frame even if it is a contructed arf. + * The active maximum quantizer insures that an appropriate + * number of bits will be spent if needed for contstructed ARFs. + */ + else { + cpi->this_frame_target = 0; + } + + cpi->current_gf_interval = cpi->frames_till_gf_update_due; + } + } + + cpi->per_frame_bandwidth = old_per_frame_bandwidth; } +void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) { + int Q = cpi->common.base_qindex; + int correction_factor = 100; + double rate_correction_factor; + double adjustment_limit; -void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var) -{ - int Q = cpi->common.base_qindex; - int correction_factor = 100; - double rate_correction_factor; - double adjustment_limit; + int projected_size_based_on_q = 0; - int projected_size_based_on_q = 0; + /* Clear down mmx registers to allow floating point in what follows */ + vp8_clear_system_state(); - /* Clear down mmx registers to allow floating point in what follows */ - vp8_clear_system_state(); - - if (cpi->common.frame_type == KEY_FRAME) - { - rate_correction_factor = cpi->key_frame_rate_correction_factor; + if (cpi->common.frame_type == KEY_FRAME) { + rate_correction_factor = cpi->key_frame_rate_correction_factor; + } else { + if (cpi->oxcf.number_of_layers == 1 && (cpi->common.refresh_alt_ref_frame || + cpi->common.refresh_golden_frame)) { + rate_correction_factor = cpi->gf_rate_correction_factor; + } else { + rate_correction_factor = cpi->rate_correction_factor; } - else - { - if (cpi->oxcf.number_of_layers == 1 && - (cpi->common.refresh_alt_ref_frame || - cpi->common.refresh_golden_frame)) - rate_correction_factor = cpi->gf_rate_correction_factor; - else - rate_correction_factor = cpi->rate_correction_factor; + } + + /* Work out how big we would have expected the frame to be at this Q + * given the current correction factor. Stay in double to avoid int + * overflow when values are large + */ + projected_size_based_on_q = + (int)(((.5 + + rate_correction_factor * + vp8_bits_per_mb[cpi->common.frame_type][Q]) * + cpi->common.MBs) / + (1 << BPER_MB_NORMBITS)); + + /* Make some allowance for cpi->zbin_over_quant */ + if (cpi->mb.zbin_over_quant > 0) { + int Z = cpi->mb.zbin_over_quant; + double Factor = 0.99; + double factor_adjustment = 0.01 / 256.0; + + while (Z > 0) { + Z--; + projected_size_based_on_q = (int)(Factor * projected_size_based_on_q); + Factor += factor_adjustment; + + if (Factor >= 0.999) Factor = 0.999; } + } - /* Work out how big we would have expected the frame to be at this Q - * given the current correction factor. Stay in double to avoid int - * overflow when values are large - */ - projected_size_based_on_q = (int)(((.5 + rate_correction_factor * vp8_bits_per_mb[cpi->common.frame_type][Q]) * cpi->common.MBs) / (1 << BPER_MB_NORMBITS)); + /* Work out a size correction factor. */ + if (projected_size_based_on_q > 0) { + correction_factor = + (100 * cpi->projected_frame_size) / projected_size_based_on_q; + } - /* Make some allowance for cpi->zbin_over_quant */ - if (cpi->mb.zbin_over_quant > 0) - { - int Z = cpi->mb.zbin_over_quant; - double Factor = 0.99; - double factor_adjustment = 0.01 / 256.0; - - while (Z > 0) - { - Z --; - projected_size_based_on_q = - (int)(Factor * projected_size_based_on_q); - Factor += factor_adjustment; - - if (Factor >= 0.999) - Factor = 0.999; - } - } - - /* Work out a size correction factor. */ - if (projected_size_based_on_q > 0) - correction_factor = (100 * cpi->projected_frame_size) / projected_size_based_on_q; - - /* More heavily damped adjustment used if we have been oscillating - * either side of target - */ - switch (damp_var) - { - case 0: - adjustment_limit = 0.75; - break; - case 1: - adjustment_limit = 0.375; - break; + /* More heavily damped adjustment used if we have been oscillating + * either side of target + */ + switch (damp_var) { + case 0: adjustment_limit = 0.75; break; + case 1: adjustment_limit = 0.375; break; case 2: - default: - adjustment_limit = 0.25; - break; - } + default: adjustment_limit = 0.25; break; + } - if (correction_factor > 102) - { - /* We are not already at the worst allowable quality */ - correction_factor = (int)(100.5 + ((correction_factor - 100) * adjustment_limit)); - rate_correction_factor = ((rate_correction_factor * correction_factor) / 100); + if (correction_factor > 102) { + /* We are not already at the worst allowable quality */ + correction_factor = + (int)(100.5 + ((correction_factor - 100) * adjustment_limit)); + rate_correction_factor = + ((rate_correction_factor * correction_factor) / 100); - /* Keep rate_correction_factor within limits */ - if (rate_correction_factor > MAX_BPB_FACTOR) - rate_correction_factor = MAX_BPB_FACTOR; + /* Keep rate_correction_factor within limits */ + if (rate_correction_factor > MAX_BPB_FACTOR) { + rate_correction_factor = MAX_BPB_FACTOR; } - else if (correction_factor < 99) - { - /* We are not already at the best allowable quality */ - correction_factor = (int)(100.5 - ((100 - correction_factor) * adjustment_limit)); - rate_correction_factor = ((rate_correction_factor * correction_factor) / 100); + } else if (correction_factor < 99) { + /* We are not already at the best allowable quality */ + correction_factor = + (int)(100.5 - ((100 - correction_factor) * adjustment_limit)); + rate_correction_factor = + ((rate_correction_factor * correction_factor) / 100); - /* Keep rate_correction_factor within limits */ - if (rate_correction_factor < MIN_BPB_FACTOR) - rate_correction_factor = MIN_BPB_FACTOR; + /* Keep rate_correction_factor within limits */ + if (rate_correction_factor < MIN_BPB_FACTOR) { + rate_correction_factor = MIN_BPB_FACTOR; } + } - if (cpi->common.frame_type == KEY_FRAME) - cpi->key_frame_rate_correction_factor = rate_correction_factor; - else - { - if (cpi->oxcf.number_of_layers == 1 && - (cpi->common.refresh_alt_ref_frame || - cpi->common.refresh_golden_frame)) - cpi->gf_rate_correction_factor = rate_correction_factor; - else - cpi->rate_correction_factor = rate_correction_factor; + if (cpi->common.frame_type == KEY_FRAME) { + cpi->key_frame_rate_correction_factor = rate_correction_factor; + } else { + if (cpi->oxcf.number_of_layers == 1 && (cpi->common.refresh_alt_ref_frame || + cpi->common.refresh_golden_frame)) { + cpi->gf_rate_correction_factor = rate_correction_factor; + } else { + cpi->rate_correction_factor = rate_correction_factor; } + } } +int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) { + int Q = cpi->active_worst_quality; -int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame) -{ - int Q = cpi->active_worst_quality; + if (cpi->force_maxqp == 1) { + cpi->active_worst_quality = cpi->worst_quality; + return cpi->worst_quality; + } - if (cpi->force_maxqp == 1) { - cpi->active_worst_quality = cpi->worst_quality; - return cpi->worst_quality; + /* Reset Zbin OQ value */ + cpi->mb.zbin_over_quant = 0; + + if (cpi->oxcf.fixed_q >= 0) { + Q = cpi->oxcf.fixed_q; + + if (cpi->common.frame_type == KEY_FRAME) { + Q = cpi->oxcf.key_q; + } else if (cpi->oxcf.number_of_layers == 1 && + cpi->common.refresh_alt_ref_frame) { + Q = cpi->oxcf.alt_q; + } else if (cpi->oxcf.number_of_layers == 1 && + cpi->common.refresh_golden_frame) { + Q = cpi->oxcf.gold_q; } - - /* Reset Zbin OQ value */ - cpi->mb.zbin_over_quant = 0; - - if (cpi->oxcf.fixed_q >= 0) - { - Q = cpi->oxcf.fixed_q; - - if (cpi->common.frame_type == KEY_FRAME) - { - Q = cpi->oxcf.key_q; - } - else if (cpi->oxcf.number_of_layers == 1 && - cpi->common.refresh_alt_ref_frame) - { - Q = cpi->oxcf.alt_q; - } - else if (cpi->oxcf.number_of_layers == 1 && - cpi->common.refresh_golden_frame) - { - Q = cpi->oxcf.gold_q; - } - } - else - { - int i; - int last_error = INT_MAX; - int target_bits_per_mb; - int bits_per_mb_at_this_q; - double correction_factor; - - /* Select the appropriate correction factor based upon type of frame. */ - if (cpi->common.frame_type == KEY_FRAME) - correction_factor = cpi->key_frame_rate_correction_factor; - else - { - if (cpi->oxcf.number_of_layers == 1 && - (cpi->common.refresh_alt_ref_frame || - cpi->common.refresh_golden_frame)) - correction_factor = cpi->gf_rate_correction_factor; - else - correction_factor = cpi->rate_correction_factor; - } - - /* Calculate required scaling factor based on target frame size and - * size of frame produced using previous Q - */ - if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS)) - /* Case where we would overflow int */ - target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) << BPER_MB_NORMBITS; - else - target_bits_per_mb = (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs; - - i = cpi->active_best_quality; - - do - { - bits_per_mb_at_this_q = (int)(.5 + correction_factor * vp8_bits_per_mb[cpi->common.frame_type][i]); - - if (bits_per_mb_at_this_q <= target_bits_per_mb) - { - if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) - Q = i; - else - Q = i - 1; - - break; - } - else - last_error = bits_per_mb_at_this_q - target_bits_per_mb; - } - while (++i <= cpi->active_worst_quality); - - - /* If we are at MAXQ then enable Q over-run which seeks to claw - * back additional bits through things like the RD multiplier - * and zero bin size. - */ - if (Q >= MAXQ) - { - int zbin_oqmax; - - double Factor = 0.99; - double factor_adjustment = 0.01 / 256.0; - - if (cpi->common.frame_type == KEY_FRAME) - zbin_oqmax = 0; - else if (cpi->oxcf.number_of_layers == 1 && - (cpi->common.refresh_alt_ref_frame || - (cpi->common.refresh_golden_frame && - !cpi->source_alt_ref_active))) - zbin_oqmax = 16; - else - zbin_oqmax = ZBIN_OQ_MAX; - - /*{ - double Factor = (double)target_bits_per_mb/(double)bits_per_mb_at_this_q; - double Oq; - - Factor = Factor/1.2683; - - Oq = pow( Factor, (1.0/-0.165) ); - - if ( Oq > zbin_oqmax ) - Oq = zbin_oqmax; - - cpi->zbin_over_quant = (int)Oq; - }*/ - - /* Each incrment in the zbin is assumed to have a fixed effect - * on bitrate. This is not of course true. The effect will be - * highly clip dependent and may well have sudden steps. The - * idea here is to acheive higher effective quantizers than the - * normal maximum by expanding the zero bin and hence - * decreasing the number of low magnitude non zero coefficients. - */ - while (cpi->mb.zbin_over_quant < zbin_oqmax) - { - cpi->mb.zbin_over_quant ++; - - if (cpi->mb.zbin_over_quant > zbin_oqmax) - cpi->mb.zbin_over_quant = zbin_oqmax; - - /* Adjust bits_per_mb_at_this_q estimate */ - bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q); - Factor += factor_adjustment; - - if (Factor >= 0.999) - Factor = 0.999; - - /* Break out if we get down to the target rate */ - if (bits_per_mb_at_this_q <= target_bits_per_mb) - break; - } - - } - } - - return Q; -} - - -static int estimate_keyframe_frequency(VP8_COMP *cpi) -{ + } else { int i; + int last_error = INT_MAX; + int target_bits_per_mb; + int bits_per_mb_at_this_q; + double correction_factor; - /* Average key frame frequency */ - int av_key_frame_frequency = 0; + /* Select the appropriate correction factor based upon type of frame. */ + if (cpi->common.frame_type == KEY_FRAME) { + correction_factor = cpi->key_frame_rate_correction_factor; + } else { + if (cpi->oxcf.number_of_layers == 1 && + (cpi->common.refresh_alt_ref_frame || + cpi->common.refresh_golden_frame)) { + correction_factor = cpi->gf_rate_correction_factor; + } else { + correction_factor = cpi->rate_correction_factor; + } + } - /* First key frame at start of sequence is a special case. We have no - * frequency data. + /* Calculate required scaling factor based on target frame size and + * size of frame produced using previous Q */ - if (cpi->key_frame_count == 1) - { - /* Assume a default of 1 kf every 2 seconds, or the max kf interval, - * whichever is smaller. - */ - int key_freq = cpi->oxcf.key_freq>0 ? cpi->oxcf.key_freq : 1; - av_key_frame_frequency = 1 + (int)cpi->output_framerate * 2; - - if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq) - av_key_frame_frequency = key_freq; - - cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1] - = av_key_frame_frequency; + if (target_bits_per_frame >= (INT_MAX >> BPER_MB_NORMBITS)) { + /* Case where we would overflow int */ + target_bits_per_mb = (target_bits_per_frame / cpi->common.MBs) + << BPER_MB_NORMBITS; + } else { + target_bits_per_mb = + (target_bits_per_frame << BPER_MB_NORMBITS) / cpi->common.MBs; } - else - { - unsigned int total_weight = 0; - int last_kf_interval = - (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1; - /* reset keyframe context and calculate weighted average of last - * KEY_FRAME_CONTEXT keyframes - */ - for (i = 0; i < KEY_FRAME_CONTEXT; i++) - { - if (i < KEY_FRAME_CONTEXT - 1) - cpi->prior_key_frame_distance[i] - = cpi->prior_key_frame_distance[i+1]; - else - cpi->prior_key_frame_distance[i] = last_kf_interval; + i = cpi->active_best_quality; - av_key_frame_frequency += prior_key_frame_weight[i] - * cpi->prior_key_frame_distance[i]; - total_weight += prior_key_frame_weight[i]; + do { + bits_per_mb_at_this_q = + (int)(.5 + + correction_factor * vp8_bits_per_mb[cpi->common.frame_type][i]); + + if (bits_per_mb_at_this_q <= target_bits_per_mb) { + if ((target_bits_per_mb - bits_per_mb_at_this_q) <= last_error) { + Q = i; + } else { + Q = i - 1; } - av_key_frame_frequency /= total_weight; + break; + } else { + last_error = bits_per_mb_at_this_q - target_bits_per_mb; + } + } while (++i <= cpi->active_worst_quality); + /* If we are at MAXQ then enable Q over-run which seeks to claw + * back additional bits through things like the RD multiplier + * and zero bin size. + */ + if (Q >= MAXQ) { + int zbin_oqmax; + + double Factor = 0.99; + double factor_adjustment = 0.01 / 256.0; + + if (cpi->common.frame_type == KEY_FRAME) { + zbin_oqmax = 0; + } else if (cpi->oxcf.number_of_layers == 1 && + (cpi->common.refresh_alt_ref_frame || + (cpi->common.refresh_golden_frame && + !cpi->source_alt_ref_active))) { + zbin_oqmax = 16; + } else { + zbin_oqmax = ZBIN_OQ_MAX; + } + + /*{ + double Factor = + (double)target_bits_per_mb/(double)bits_per_mb_at_this_q; + double Oq; + + Factor = Factor/1.2683; + + Oq = pow( Factor, (1.0/-0.165) ); + + if ( Oq > zbin_oqmax ) + Oq = zbin_oqmax; + + cpi->zbin_over_quant = (int)Oq; + }*/ + + /* Each incrment in the zbin is assumed to have a fixed effect + * on bitrate. This is not of course true. The effect will be + * highly clip dependent and may well have sudden steps. The + * idea here is to acheive higher effective quantizers than the + * normal maximum by expanding the zero bin and hence + * decreasing the number of low magnitude non zero coefficients. + */ + while (cpi->mb.zbin_over_quant < zbin_oqmax) { + cpi->mb.zbin_over_quant++; + + if (cpi->mb.zbin_over_quant > zbin_oqmax) { + cpi->mb.zbin_over_quant = zbin_oqmax; + } + + /* Adjust bits_per_mb_at_this_q estimate */ + bits_per_mb_at_this_q = (int)(Factor * bits_per_mb_at_this_q); + Factor += factor_adjustment; + + if (Factor >= 0.999) Factor = 0.999; + + /* Break out if we get down to the target rate */ + if (bits_per_mb_at_this_q <= target_bits_per_mb) break; + } } - // TODO (marpan): Given the checks above, |av_key_frame_frequency| - // should always be above 0. But for now we keep the sanity check in. - if (av_key_frame_frequency == 0) - av_key_frame_frequency = 1; - return av_key_frame_frequency; + } + + return Q; } +static int estimate_keyframe_frequency(VP8_COMP *cpi) { + int i; -void vp8_adjust_key_frame_context(VP8_COMP *cpi) -{ - /* Clear down mmx registers to allow floating point in what follows */ - vp8_clear_system_state(); + /* Average key frame frequency */ + int av_key_frame_frequency = 0; - /* Do we have any key frame overspend to recover? */ - /* Two-pass overspend handled elsewhere. */ - if ((cpi->pass != 2) - && (cpi->projected_frame_size > cpi->per_frame_bandwidth)) - { - int overspend; + /* First key frame at start of sequence is a special case. We have no + * frequency data. + */ + if (cpi->key_frame_count == 1) { + /* Assume a default of 1 kf every 2 seconds, or the max kf interval, + * whichever is smaller. + */ + int key_freq = cpi->oxcf.key_freq > 0 ? cpi->oxcf.key_freq : 1; + av_key_frame_frequency = 1 + (int)cpi->output_framerate * 2; - /* Update the count of key frame overspend to be recovered in - * subsequent frames. A portion of the KF overspend is treated as gf - * overspend (and hence recovered more quickly) as the kf is also a - * gf. Otherwise the few frames following each kf tend to get more - * bits allocated than those following other gfs. - */ - overspend = (cpi->projected_frame_size - cpi->per_frame_bandwidth); - - if (cpi->oxcf.number_of_layers > 1) - cpi->kf_overspend_bits += overspend; - else - { - cpi->kf_overspend_bits += overspend * 7 / 8; - cpi->gf_overspend_bits += overspend * 1 / 8; - } - - /* Work out how much to try and recover per frame. */ - cpi->kf_bitrate_adjustment = cpi->kf_overspend_bits - / estimate_keyframe_frequency(cpi); + if (cpi->oxcf.auto_key && av_key_frame_frequency > key_freq) { + av_key_frame_frequency = key_freq; } - cpi->frames_since_key = 0; - cpi->key_frame_count++; + cpi->prior_key_frame_distance[KEY_FRAME_CONTEXT - 1] = + av_key_frame_frequency; + } else { + unsigned int total_weight = 0; + int last_kf_interval = + (cpi->frames_since_key > 0) ? cpi->frames_since_key : 1; + + /* reset keyframe context and calculate weighted average of last + * KEY_FRAME_CONTEXT keyframes + */ + for (i = 0; i < KEY_FRAME_CONTEXT; ++i) { + if (i < KEY_FRAME_CONTEXT - 1) { + cpi->prior_key_frame_distance[i] = cpi->prior_key_frame_distance[i + 1]; + } else { + cpi->prior_key_frame_distance[i] = last_kf_interval; + } + + av_key_frame_frequency += + prior_key_frame_weight[i] * cpi->prior_key_frame_distance[i]; + total_weight += prior_key_frame_weight[i]; + } + + av_key_frame_frequency /= total_weight; + } + // TODO (marpan): Given the checks above, |av_key_frame_frequency| + // should always be above 0. But for now we keep the sanity check in. + if (av_key_frame_frequency == 0) av_key_frame_frequency = 1; + return av_key_frame_frequency; } +void vp8_adjust_key_frame_context(VP8_COMP *cpi) { + /* Clear down mmx registers to allow floating point in what follows */ + vp8_clear_system_state(); -void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit) -{ - /* Set-up bounds on acceptable frame size: */ - if (cpi->oxcf.fixed_q >= 0) - { - /* Fixed Q scenario: frame size never outranges target - * (there is no target!) - */ - *frame_under_shoot_limit = 0; - *frame_over_shoot_limit = INT_MAX; + /* Do we have any key frame overspend to recover? */ + /* Two-pass overspend handled elsewhere. */ + if ((cpi->pass != 2) && + (cpi->projected_frame_size > cpi->per_frame_bandwidth)) { + int overspend; + + /* Update the count of key frame overspend to be recovered in + * subsequent frames. A portion of the KF overspend is treated as gf + * overspend (and hence recovered more quickly) as the kf is also a + * gf. Otherwise the few frames following each kf tend to get more + * bits allocated than those following other gfs. + */ + overspend = (cpi->projected_frame_size - cpi->per_frame_bandwidth); + + if (cpi->oxcf.number_of_layers > 1) { + cpi->kf_overspend_bits += overspend; + } else { + cpi->kf_overspend_bits += overspend * 7 / 8; + cpi->gf_overspend_bits += overspend * 1 / 8; } - else - { - if (cpi->common.frame_type == KEY_FRAME) - { - *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; - } - else - { - if (cpi->oxcf.number_of_layers > 1 || - cpi->common.refresh_alt_ref_frame || - cpi->common.refresh_golden_frame) - { - *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; - } - else - { - /* For CBR take buffer fullness into account */ - if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) - { - if (cpi->buffer_level >= ((cpi->oxcf.optimal_buffer_level + cpi->oxcf.maximum_buffer_size) >> 1)) - { - /* Buffer is too full so relax overshoot and tighten - * undershoot - */ - *frame_over_shoot_limit = cpi->this_frame_target * 12 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 6 / 8; - } - else if (cpi->buffer_level <= (cpi->oxcf.optimal_buffer_level >> 1)) - { - /* Buffer is too low so relax undershoot and tighten - * overshoot - */ - *frame_over_shoot_limit = cpi->this_frame_target * 10 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 4 / 8; - } - else - { - *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8; - } - } - /* VBR and CQ mode */ - /* Note that tighter restrictions here can help quality - * but hurt encode speed - */ - else - { - /* Stron overshoot limit for constrained quality */ - if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) - { - *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8; - } - else - { - *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; - *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8; - } - } - } - } - /* For very small rate targets where the fractional adjustment - * (eg * 7/8) may be tiny make sure there is at least a minimum - * range. - */ - *frame_over_shoot_limit += 200; - *frame_under_shoot_limit -= 200; - if ( *frame_under_shoot_limit < 0 ) - *frame_under_shoot_limit = 0; + /* Work out how much to try and recover per frame. */ + cpi->kf_bitrate_adjustment = + cpi->kf_overspend_bits / estimate_keyframe_frequency(cpi); + } - } + cpi->frames_since_key = 0; + cpi->key_frame_count++; } +void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, + int *frame_over_shoot_limit) { + /* Set-up bounds on acceptable frame size: */ + if (cpi->oxcf.fixed_q >= 0) { + /* Fixed Q scenario: frame size never outranges target + * (there is no target!) + */ + *frame_under_shoot_limit = 0; + *frame_over_shoot_limit = INT_MAX; + } else { + if (cpi->common.frame_type == KEY_FRAME) { + *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; + } else { + if (cpi->oxcf.number_of_layers > 1 || cpi->common.refresh_alt_ref_frame || + cpi->common.refresh_golden_frame) { + *frame_over_shoot_limit = cpi->this_frame_target * 9 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 7 / 8; + } else { + /* For CBR take buffer fullness into account */ + if (cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER) { + if (cpi->buffer_level >= ((cpi->oxcf.optimal_buffer_level + + cpi->oxcf.maximum_buffer_size) >> + 1)) { + /* Buffer is too full so relax overshoot and tighten + * undershoot + */ + *frame_over_shoot_limit = cpi->this_frame_target * 12 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 6 / 8; + } else if (cpi->buffer_level <= + (cpi->oxcf.optimal_buffer_level >> 1)) { + /* Buffer is too low so relax undershoot and tighten + * overshoot + */ + *frame_over_shoot_limit = cpi->this_frame_target * 10 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 4 / 8; + } else { + *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8; + } + } + /* VBR and CQ mode */ + /* Note that tighter restrictions here can help quality + * but hurt encode speed + */ + else { + /* Stron overshoot limit for constrained quality */ + if (cpi->oxcf.end_usage == USAGE_CONSTRAINED_QUALITY) { + *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 2 / 8; + } else { + *frame_over_shoot_limit = cpi->this_frame_target * 11 / 8; + *frame_under_shoot_limit = cpi->this_frame_target * 5 / 8; + } + } + } + } + + /* For very small rate targets where the fractional adjustment + * (eg * 7/8) may be tiny make sure there is at least a minimum + * range. + */ + *frame_over_shoot_limit += 200; + *frame_under_shoot_limit -= 200; + if (*frame_under_shoot_limit < 0) *frame_under_shoot_limit = 0; + } +} /* return of 0 means drop frame */ -int vp8_pick_frame_size(VP8_COMP *cpi) -{ - VP8_COMMON *cm = &cpi->common; +int vp8_pick_frame_size(VP8_COMP *cpi) { + VP8_COMMON *cm = &cpi->common; - if (cm->frame_type == KEY_FRAME) - calc_iframe_target_size(cpi); - else - { - calc_pframe_target_size(cpi); + if (cm->frame_type == KEY_FRAME) { + calc_iframe_target_size(cpi); + } else { + calc_pframe_target_size(cpi); - /* Check if we're dropping the frame: */ - if (cpi->drop_frame) - { - cpi->drop_frame = 0; - return 0; - } + /* Check if we're dropping the frame: */ + if (cpi->drop_frame) { + cpi->drop_frame = 0; + return 0; } - return 1; + } + return 1; } // If this just encoded frame (mcomp/transform/quant, but before loopfilter and // pack_bitstream) has large overshoot, and was not being encoded close to the @@ -1573,10 +1426,8 @@ int vp8_pick_frame_size(VP8_COMP *cpi) // TODO(marpan): Should do this exit condition during the encode_frame // (i.e., halfway during the encoding of the frame) to save cycles. int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) { - if (cpi->pass == 0 && - cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && - cpi->drop_frames_allowed == 0 && - cpi->common.frame_type != KEY_FRAME) { + if (cpi->pass == 0 && cpi->oxcf.end_usage == USAGE_STREAM_FROM_SERVER && + cpi->drop_frames_allowed == 0 && cpi->common.frame_type != KEY_FRAME) { // Note: the "projected_frame_size" from encode_frame() only gives estimate // of mode/motion vector rate (in non-rd mode): so below we only require // that projected_frame_size is somewhat greater than per-frame-bandwidth, @@ -1590,10 +1441,9 @@ int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) { // residual error over 16x16 block. Should add QP dependence on threshold? int thresh_pred_err_mb = (256 << 4); int pred_err_mb = (int)(cpi->mb.prediction_error / cpi->common.MBs); - if (Q < thresh_qp && - cpi->projected_frame_size > thresh_rate && + if (Q < thresh_qp && cpi->projected_frame_size > thresh_rate && pred_err_mb > thresh_pred_err_mb) { - double new_correction_factor = cpi->rate_correction_factor; + double new_correction_factor; const int target_size = cpi->av_per_frame_bandwidth; int target_bits_per_mb; // Drop this frame: advance frame counters, and set force_maxqp flag. @@ -1611,20 +1461,24 @@ int vp8_drop_encodedframe_overshoot(VP8_COMP *cpi, int Q) { // undershoots significantly, and then we end up dropping every other // frame because the QP/rate_correction_factor may have been too low // before the drop and then takes too long to come up. - if (target_size >= (INT_MAX >> BPER_MB_NORMBITS)) - target_bits_per_mb = - (target_size / cpi->common.MBs) << BPER_MB_NORMBITS; - else + if (target_size >= (INT_MAX >> BPER_MB_NORMBITS)) { + target_bits_per_mb = (target_size / cpi->common.MBs) + << BPER_MB_NORMBITS; + } else { target_bits_per_mb = (target_size << BPER_MB_NORMBITS) / cpi->common.MBs; + } // Rate correction factor based on target_size_per_mb and max_QP. - new_correction_factor = (double)target_bits_per_mb / + new_correction_factor = + (double)target_bits_per_mb / (double)vp8_bits_per_mb[INTER_FRAME][cpi->worst_quality]; - if (new_correction_factor > cpi->rate_correction_factor) + if (new_correction_factor > cpi->rate_correction_factor) { cpi->rate_correction_factor = VPXMIN(2.0 * cpi->rate_correction_factor, new_correction_factor); - if (cpi->rate_correction_factor > MAX_BPB_FACTOR) + } + if (cpi->rate_correction_factor > MAX_BPB_FACTOR) { cpi->rate_correction_factor = MAX_BPB_FACTOR; + } return 1; } else { cpi->force_maxqp = 0; diff --git a/libs/libvpx/vp8/encoder/ratectrl.h b/libs/libvpx/vp8/encoder/ratectrl.h index 703de9ff55..249de4e706 100644 --- a/libs/libvpx/vp8/encoder/ratectrl.h +++ b/libs/libvpx/vp8/encoder/ratectrl.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_RATECTRL_H_ #define VP8_ENCODER_RATECTRL_H_ @@ -25,7 +24,9 @@ extern void vp8_setup_key_frame(VP8_COMP *cpi); extern void vp8_update_rate_correction_factors(VP8_COMP *cpi, int damp_var); extern int vp8_regulate_q(VP8_COMP *cpi, int target_bits_per_frame); extern void vp8_adjust_key_frame_context(VP8_COMP *cpi); -extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, int *frame_under_shoot_limit, int *frame_over_shoot_limit); +extern void vp8_compute_frame_size_bounds(VP8_COMP *cpi, + int *frame_under_shoot_limit, + int *frame_over_shoot_limit); /* return of 0 means drop frame */ extern int vp8_pick_frame_size(VP8_COMP *cpi); diff --git a/libs/libvpx/vp8/encoder/rdopt.c b/libs/libvpx/vp8/encoder/rdopt.c index ab0ad15990..9ba301e080 100644 --- a/libs/libvpx/vp8/encoder/rdopt.c +++ b/libs/libvpx/vp8/encoder/rdopt.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -40,19 +39,17 @@ #endif extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x); -#define MAXF(a,b) (((a) > (b)) ? (a) : (b)) +#define MAXF(a, b) (((a) > (b)) ? (a) : (b)) -typedef struct rate_distortion_struct -{ - int rate2; - int rate_y; - int rate_uv; - int distortion2; - int distortion_uv; +typedef struct rate_distortion_struct { + int rate2; + int rate_y; + int rate_uv; + int distortion2; + int distortion_uv; } RATE_DISTORTION; -typedef struct best_mode_struct -{ +typedef struct best_mode_struct { int yrd; int rd; int intra_rd; @@ -61,290 +58,208 @@ typedef struct best_mode_struct PARTITION_INFO partition; } BEST_MODE; -static const int auto_speed_thresh[17] = -{ - 1000, - 200, - 150, - 130, - 150, - 125, - 120, - 115, - 115, - 115, - 115, - 115, - 115, - 115, - 115, - 115, - 105 -}; +static const int auto_speed_thresh[17] = { 1000, 200, 150, 130, 150, 125, + 120, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 105 }; -const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] = -{ - ZEROMV, - DC_PRED, +const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] = { + ZEROMV, DC_PRED, - NEARESTMV, - NEARMV, + NEARESTMV, NEARMV, - ZEROMV, - NEARESTMV, + ZEROMV, NEARESTMV, - ZEROMV, - NEARESTMV, + ZEROMV, NEARESTMV, - NEARMV, - NEARMV, + NEARMV, NEARMV, - V_PRED, - H_PRED, - TM_PRED, + V_PRED, H_PRED, TM_PRED, - NEWMV, - NEWMV, - NEWMV, + NEWMV, NEWMV, NEWMV, - SPLITMV, - SPLITMV, - SPLITMV, + SPLITMV, SPLITMV, SPLITMV, - B_PRED, + B_PRED, }; /* This table determines the search order in reference frame priority order, * which may not necessarily match INTRA,LAST,GOLDEN,ARF */ -const int vp8_ref_frame_order[MAX_MODES] = -{ - 1, - 0, +const int vp8_ref_frame_order[MAX_MODES] = { + 1, 0, - 1, - 1, + 1, 1, - 2, - 2, + 2, 2, - 3, - 3, + 3, 3, - 2, - 3, + 2, 3, - 0, - 0, - 0, + 0, 0, 0, - 1, - 2, - 3, + 1, 2, 3, - 1, - 2, - 3, + 1, 2, 3, - 0, + 0, }; -static void fill_token_costs( - int c[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][MAX_ENTROPY_TOKENS], - const vp8_prob p[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS][ENTROPY_NODES] -) -{ - int i, j, k; +static void fill_token_costs(int c[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [MAX_ENTROPY_TOKENS], + const vp8_prob p[BLOCK_TYPES][COEF_BANDS] + [PREV_COEF_CONTEXTS] + [ENTROPY_NODES]) { + int i, j, k; - - for (i = 0; i < BLOCK_TYPES; i++) - for (j = 0; j < COEF_BANDS; j++) - for (k = 0; k < PREV_COEF_CONTEXTS; k++) - - /* check for pt=0 and band > 1 if block type 0 - * and 0 if blocktype 1 - */ - if (k == 0 && j > (i == 0)) - vp8_cost_tokens2(c[i][j][k], p [i][j][k], vp8_coef_tree, 2); - else - vp8_cost_tokens(c[i][j][k], p [i][j][k], vp8_coef_tree); + for (i = 0; i < BLOCK_TYPES; ++i) { + for (j = 0; j < COEF_BANDS; ++j) { + for (k = 0; k < PREV_COEF_CONTEXTS; ++k) { + /* check for pt=0 and band > 1 if block type 0 + * and 0 if blocktype 1 + */ + if (k == 0 && j > (i == 0)) { + vp8_cost_tokens2(c[i][j][k], p[i][j][k], vp8_coef_tree, 2); + } else { + vp8_cost_tokens(c[i][j][k], p[i][j][k], vp8_coef_tree); + } + } + } + } } -static const int rd_iifactor[32] = -{ - 4, 4, 3, 2, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; +static const int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* values are now correlated to quantizer */ -static const int sad_per_bit16lut[QINDEX_RANGE] = -{ - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 10, 10, - 10, 10, 10, 10, 10, 10, 11, 11, - 11, 11, 11, 11, 12, 12, 12, 12, - 12, 12, 13, 13, 13, 13, 14, 14 +static const int sad_per_bit16lut[QINDEX_RANGE] = { + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, + 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14 }; -static const int sad_per_bit4lut[QINDEX_RANGE] = -{ - 2, 2, 2, 2, 2, 2, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 5, 5, - 5, 5, 5, 5, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, - 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 8, 8, 8, - 8, 8, 9, 9, 9, 9, 9, 9, - 10, 10, 10, 10, 10, 10, 10, 10, - 11, 11, 11, 11, 11, 11, 11, 11, - 12, 12, 12, 12, 12, 12, 12, 12, - 13, 13, 13, 13, 13, 13, 13, 14, - 14, 14, 14, 14, 15, 15, 15, 15, - 16, 16, 16, 16, 17, 17, 17, 18, - 18, 18, 19, 19, 19, 20, 20, 20, +static const int sad_per_bit4lut[QINDEX_RANGE] = { + 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, + 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, + 12, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, + 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 20, 20, 20, }; -void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) -{ - cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex]; - cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex]; +void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex) { + cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex]; + cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex]; } -void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) -{ - int q; - int i; - double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0; - double rdconst = 2.80; +void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue) { + int q; + int i; + double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0; + double rdconst = 2.80; - vp8_clear_system_state(); + vp8_clear_system_state(); - /* Further tests required to see if optimum is different - * for key frames, golden frames and arf frames. + /* Further tests required to see if optimum is different + * for key frames, golden frames and arf frames. + */ + cpi->RDMULT = (int)(rdconst * (capped_q * capped_q)); + + /* Extend rate multiplier along side quantizer zbin increases */ + if (cpi->mb.zbin_over_quant > 0) { + double oq_factor; + double modq; + + /* Experimental code using the same basic equation as used for Q above + * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size */ - cpi->RDMULT = (int)(rdconst * (capped_q * capped_q)); + oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant); + modq = (int)((double)capped_q * oq_factor); + cpi->RDMULT = (int)(rdconst * (modq * modq)); + } - /* Extend rate multiplier along side quantizer zbin increases */ - if (cpi->mb.zbin_over_quant > 0) - { - double oq_factor; - double modq; + if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { + if (cpi->twopass.next_iiratio > 31) { + cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4; + } else { + cpi->RDMULT += + (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4; + } + } - /* Experimental code using the same basic equation as used for Q above - * The units of cpi->mb.zbin_over_quant are 1/128 of Q bin size - */ - oq_factor = 1.0 + ((double)0.0015625 * cpi->mb.zbin_over_quant); - modq = (int)((double)capped_q * oq_factor); - cpi->RDMULT = (int)(rdconst * (modq * modq)); + cpi->mb.errorperbit = (cpi->RDMULT / 110); + cpi->mb.errorperbit += (cpi->mb.errorperbit == 0); + + vp8_set_speed_features(cpi); + + for (i = 0; i < MAX_MODES; ++i) { + x->mode_test_hit_counts[i] = 0; + } + + q = (int)pow(Qvalue, 1.25); + + if (q < 8) q = 8; + + if (cpi->RDMULT > 1000) { + cpi->RDDIV = 1; + cpi->RDMULT /= 100; + + for (i = 0; i < MAX_MODES; ++i) { + if (cpi->sf.thresh_mult[i] < INT_MAX) { + x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100; + } else { + x->rd_threshes[i] = INT_MAX; + } + + cpi->rd_baseline_thresh[i] = x->rd_threshes[i]; + } + } else { + cpi->RDDIV = 100; + + for (i = 0; i < MAX_MODES; ++i) { + if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) { + x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q; + } else { + x->rd_threshes[i] = INT_MAX; + } + + cpi->rd_baseline_thresh[i] = x->rd_threshes[i]; + } + } + + { + /* build token cost array for the type of frame we have now */ + FRAME_CONTEXT *l = &cpi->lfc_n; + + if (cpi->common.refresh_alt_ref_frame) { + l = &cpi->lfc_a; + } else if (cpi->common.refresh_golden_frame) { + l = &cpi->lfc_g; } - if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) - { - if (cpi->twopass.next_iiratio > 31) - cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4; - else - cpi->RDMULT += - (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4; - } - - cpi->mb.errorperbit = (cpi->RDMULT / 110); - cpi->mb.errorperbit += (cpi->mb.errorperbit==0); - - vp8_set_speed_features(cpi); - - for (i = 0; i < MAX_MODES; i++) - { - x->mode_test_hit_counts[i] = 0; - } - - q = (int)pow(Qvalue, 1.25); - - if (q < 8) - q = 8; - - if (cpi->RDMULT > 1000) - { - cpi->RDDIV = 1; - cpi->RDMULT /= 100; - - for (i = 0; i < MAX_MODES; i++) - { - if (cpi->sf.thresh_mult[i] < INT_MAX) - { - x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100; - } - else - { - x->rd_threshes[i] = INT_MAX; - } - - cpi->rd_baseline_thresh[i] = x->rd_threshes[i]; - } - } - else - { - cpi->RDDIV = 100; - - for (i = 0; i < MAX_MODES; i++) - { - if (cpi->sf.thresh_mult[i] < (INT_MAX / q)) - { - x->rd_threshes[i] = cpi->sf.thresh_mult[i] * q; - } - else - { - x->rd_threshes[i] = INT_MAX; - } - - cpi->rd_baseline_thresh[i] = x->rd_threshes[i]; - } - } - - { - /* build token cost array for the type of frame we have now */ - FRAME_CONTEXT *l = &cpi->lfc_n; - - if(cpi->common.refresh_alt_ref_frame) - l = &cpi->lfc_a; - else if(cpi->common.refresh_golden_frame) - l = &cpi->lfc_g; - - fill_token_costs( - cpi->mb.token_costs, - (const vp8_prob( *)[8][3][11]) l->coef_probs - ); - /* - fill_token_costs( - cpi->mb.token_costs, - (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs); - */ - - - /* TODO make these mode costs depend on last,alt or gold too. (jbb) */ - vp8_init_mode_costs(cpi); - } + fill_token_costs(cpi->mb.token_costs, + (const vp8_prob(*)[8][3][11])l->coef_probs); + /* + fill_token_costs( + cpi->mb.token_costs, + (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs); + */ + /* TODO make these mode costs depend on last,alt or gold too. (jbb) */ + vp8_init_mode_costs(cpi); + } } -void vp8_auto_select_speed(VP8_COMP *cpi) -{ - int milliseconds_for_compress = (int)(1000000 / cpi->framerate); +void vp8_auto_select_speed(VP8_COMP *cpi) { + int milliseconds_for_compress = (int)(1000000 / cpi->framerate); - milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16; + milliseconds_for_compress = + milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16; #if 0 @@ -360,728 +275,648 @@ void vp8_auto_select_speed(VP8_COMP *cpi) #endif - if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress) - { - if (cpi->avg_pick_mode_time == 0) - { - cpi->Speed = 4; - } - else - { - if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95) - { - cpi->Speed += 2; - cpi->avg_pick_mode_time = 0; - cpi->avg_encode_time = 0; - - if (cpi->Speed > 16) - { - cpi->Speed = 16; - } - } - - if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) - { - cpi->Speed -= 1; - cpi->avg_pick_mode_time = 0; - cpi->avg_encode_time = 0; - - /* In real-time mode, cpi->speed is in [4, 16]. */ - if (cpi->Speed < 4) - { - cpi->Speed = 4; - } - } - } - } - else - { - cpi->Speed += 4; - - if (cpi->Speed > 16) - cpi->Speed = 16; - - + if (cpi->avg_pick_mode_time < milliseconds_for_compress && + (cpi->avg_encode_time - cpi->avg_pick_mode_time) < + milliseconds_for_compress) { + if (cpi->avg_pick_mode_time == 0) { + cpi->Speed = 4; + } else { + if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95) { + cpi->Speed += 2; cpi->avg_pick_mode_time = 0; cpi->avg_encode_time = 0; - } -} -int vp8_block_error_c(short *coeff, short *dqcoeff) -{ - int i; - int error = 0; - - for (i = 0; i < 16; i++) - { - int this_diff = coeff[i] - dqcoeff[i]; - error += this_diff * this_diff; - } - - return error; -} - -int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) -{ - BLOCK *be; - BLOCKD *bd; - int i, j; - int berror, error = 0; - - for (i = 0; i < 16; i++) - { - be = &mb->block[i]; - bd = &mb->e_mbd.block[i]; - - berror = 0; - - for (j = dc; j < 16; j++) - { - int this_diff = be->coeff[j] - bd->dqcoeff[j]; - berror += this_diff * this_diff; + if (cpi->Speed > 16) { + cpi->Speed = 16; } + } - error += berror; - } + if (milliseconds_for_compress * 100 > + cpi->avg_encode_time * auto_speed_thresh[cpi->Speed]) { + cpi->Speed -= 1; + cpi->avg_pick_mode_time = 0; + cpi->avg_encode_time = 0; - return error; -} - -int vp8_mbuverror_c(MACROBLOCK *mb) -{ - - BLOCK *be; - BLOCKD *bd; - - - int i; - int error = 0; - - for (i = 16; i < 24; i++) - { - be = &mb->block[i]; - bd = &mb->e_mbd.block[i]; - - error += vp8_block_error_c(be->coeff, bd->dqcoeff); - } - - return error; -} - -int VP8_UVSSE(MACROBLOCK *x) -{ - unsigned char *uptr, *vptr; - unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src); - unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src); - int uv_stride = x->block[16].src_stride; - - unsigned int sse1 = 0; - unsigned int sse2 = 0; - int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row; - int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col; - int offset; - int pre_stride = x->e_mbd.pre.uv_stride; - - if (mv_row < 0) - mv_row -= 1; - else - mv_row += 1; - - if (mv_col < 0) - mv_col -= 1; - else - mv_col += 1; - - mv_row /= 2; - mv_col /= 2; - - offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); - uptr = x->e_mbd.pre.u_buffer + offset; - vptr = x->e_mbd.pre.v_buffer + offset; - - if ((mv_row | mv_col) & 7) - { - vpx_sub_pixel_variance8x8(uptr, pre_stride, - mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2); - vpx_sub_pixel_variance8x8(vptr, pre_stride, - mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1); - sse2 += sse1; - } - else - { - vpx_variance8x8(uptr, pre_stride, - upred_ptr, uv_stride, &sse2); - vpx_variance8x8(vptr, pre_stride, - vpred_ptr, uv_stride, &sse1); - sse2 += sse1; - } - return sse2; - -} - -static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) -{ - int c = !type; /* start at coef 0, unless Y with Y2 */ - int eob = (int)(*b->eob); - int pt ; /* surrounding block/prev coef predictor */ - int cost = 0; - short *qcoeff_ptr = b->qcoeff; - - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - - assert(eob <= 16); - for (; c < eob; c++) - { - const int v = qcoeff_ptr[vp8_default_zig_zag1d[c]]; - const int t = vp8_dct_value_tokens_ptr[v].Token; - cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [t]; - cost += vp8_dct_value_cost_ptr[v]; - pt = vp8_prev_token_class[t]; - } - - if (c < 16) - cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN]; - - pt = (c != !type); /* is eob first coefficient; */ - *a = *l = pt; - - return cost; -} - -static int vp8_rdcost_mby(MACROBLOCK *mb) -{ - int cost = 0; - int b; - MACROBLOCKD *x = &mb->e_mbd; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; - - memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); - - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; - - for (b = 0; b < 16; b++) - cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC, - ta + vp8_block2above[b], tl + vp8_block2left[b]); - - cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2, - ta + vp8_block2above[24], tl + vp8_block2left[24]); - - return cost; -} - -static void macro_block_yrd( MACROBLOCK *mb, - int *Rate, - int *Distortion) -{ - int b; - MACROBLOCKD *const x = &mb->e_mbd; - BLOCK *const mb_y2 = mb->block + 24; - BLOCKD *const x_y2 = x->block + 24; - short *Y2DCPtr = mb_y2->src_diff; - BLOCK *beptr; - int d; - - vp8_subtract_mby( mb->src_diff, *(mb->block[0].base_src), - mb->block[0].src_stride, mb->e_mbd.predictor, 16); - - /* Fdct and building the 2nd order block */ - for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) - { - mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32); - *Y2DCPtr++ = beptr->coeff[0]; - *Y2DCPtr++ = beptr->coeff[16]; - } - - /* 2nd order fdct */ - mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8); - - /* Quantization */ - for (b = 0; b < 16; b++) - { - mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]); - } - - /* DC predication and Quantization of 2nd Order block */ - mb->quantize_b(mb_y2, x_y2); - - /* Distortion */ - d = vp8_mbblock_error(mb, 1) << 2; - d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff); - - *Distortion = (d >> 4); - - /* rate */ - *Rate = vp8_rdcost_mby(mb); -} - -static void copy_predictor(unsigned char *dst, const unsigned char *predictor) -{ - const unsigned int *p = (const unsigned int *)predictor; - unsigned int *d = (unsigned int *)dst; - d[0] = p[0]; - d[4] = p[4]; - d[8] = p[8]; - d[12] = p[12]; -} -static int rd_pick_intra4x4block( - MACROBLOCK *x, - BLOCK *be, - BLOCKD *b, - B_PREDICTION_MODE *best_mode, - const int *bmode_costs, - ENTROPY_CONTEXT *a, - ENTROPY_CONTEXT *l, - - int *bestrate, - int *bestratey, - int *bestdistortion) -{ - B_PREDICTION_MODE mode; - int best_rd = INT_MAX; - int rate = 0; - int distortion; - - ENTROPY_CONTEXT ta = *a, tempa = *a; - ENTROPY_CONTEXT tl = *l, templ = *l; - /* - * The predictor buffer is a 2d buffer with a stride of 16. Create - * a temp buffer that meets the stride requirements, but we are only - * interested in the left 4x4 block - * */ - DECLARE_ALIGNED(16, unsigned char, best_predictor[16*4]); - DECLARE_ALIGNED(16, short, best_dqcoeff[16]); - int dst_stride = x->e_mbd.dst.y_stride; - unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; - - unsigned char *Above = dst - dst_stride; - unsigned char *yleft = dst - 1; - unsigned char top_left = Above[-1]; - - for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++) - { - int this_rd; - int ratey; - - rate = bmode_costs[mode]; - - vp8_intra4x4_predict(Above, yleft, dst_stride, mode, - b->predictor, 16, top_left); - vp8_subtract_b(be, b, 16); - x->short_fdct4x4(be->src_diff, be->coeff, 32); - x->quantize_b(be, b); - - tempa = ta; - templ = tl; - - ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ); - rate += ratey; - distortion = vp8_block_error(be->coeff, b->dqcoeff) >> 2; - - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - - if (this_rd < best_rd) - { - *bestrate = rate; - *bestratey = ratey; - *bestdistortion = distortion; - best_rd = this_rd; - *best_mode = mode; - *a = tempa; - *l = templ; - copy_predictor(best_predictor, b->predictor); - memcpy(best_dqcoeff, b->dqcoeff, 32); + /* In real-time mode, cpi->speed is in [4, 16]. */ + if (cpi->Speed < 4) { + cpi->Speed = 4; } + } } - b->bmi.as_mode = *best_mode; + } else { + cpi->Speed += 4; - vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, dst, dst_stride); + if (cpi->Speed > 16) cpi->Speed = 16; - return best_rd; + cpi->avg_pick_mode_time = 0; + cpi->avg_encode_time = 0; + } } -static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, - int *rate_y, int *Distortion, int best_rd) -{ - MACROBLOCKD *const xd = &mb->e_mbd; - int i; - int cost = mb->mbmode_cost [xd->frame_type] [B_PRED]; - int distortion = 0; - int tot_rate_y = 0; - int64_t total_rd = 0; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; - const int *bmode_costs; +int vp8_block_error_c(short *coeff, short *dqcoeff) { + int i; + int error = 0; - memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + for (i = 0; i < 16; ++i) { + int this_diff = coeff[i] - dqcoeff[i]; + error += this_diff * this_diff; + } - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; - - intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); - - bmode_costs = mb->inter_bmode_costs; - - for (i = 0; i < 16; i++) - { - MODE_INFO *const mic = xd->mode_info_context; - const int mis = xd->mode_info_stride; - B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); - int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d); - - if (mb->e_mbd.frame_type == KEY_FRAME) - { - const B_PREDICTION_MODE A = above_block_mode(mic, i, mis); - const B_PREDICTION_MODE L = left_block_mode(mic, i); - - bmode_costs = mb->bmode_costs[A][L]; - } - - total_rd += rd_pick_intra4x4block( - mb, mb->block + i, xd->block + i, &best_mode, bmode_costs, - ta + vp8_block2above[i], - tl + vp8_block2left[i], &r, &ry, &d); - - cost += r; - distortion += d; - tot_rate_y += ry; - - mic->bmi[i].as_mode = best_mode; - - if(total_rd >= (int64_t)best_rd) - break; - } - - if(total_rd >= (int64_t)best_rd) - return INT_MAX; - - *Rate = cost; - *rate_y = tot_rate_y; - *Distortion = distortion; - - return RDCOST(mb->rdmult, mb->rddiv, cost, distortion); + return error; } +int vp8_mbblock_error_c(MACROBLOCK *mb, int dc) { + BLOCK *be; + BLOCKD *bd; + int i, j; + int berror, error = 0; -static int rd_pick_intra16x16mby_mode(MACROBLOCK *x, - int *Rate, - int *rate_y, - int *Distortion) -{ - MB_PREDICTION_MODE mode; - MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); - int rate, ratey; - int distortion; - int best_rd = INT_MAX; + for (i = 0; i < 16; ++i) { + be = &mb->block[i]; + bd = &mb->e_mbd.block[i]; + + berror = 0; + + for (j = dc; j < 16; ++j) { + int this_diff = be->coeff[j] - bd->dqcoeff[j]; + berror += this_diff * this_diff; + } + + error += berror; + } + + return error; +} + +int vp8_mbuverror_c(MACROBLOCK *mb) { + BLOCK *be; + BLOCKD *bd; + + int i; + int error = 0; + + for (i = 16; i < 24; ++i) { + be = &mb->block[i]; + bd = &mb->e_mbd.block[i]; + + error += vp8_block_error_c(be->coeff, bd->dqcoeff); + } + + return error; +} + +int VP8_UVSSE(MACROBLOCK *x) { + unsigned char *uptr, *vptr; + unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src); + unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src); + int uv_stride = x->block[16].src_stride; + + unsigned int sse1 = 0; + unsigned int sse2 = 0; + int mv_row = x->e_mbd.mode_info_context->mbmi.mv.as_mv.row; + int mv_col = x->e_mbd.mode_info_context->mbmi.mv.as_mv.col; + int offset; + int pre_stride = x->e_mbd.pre.uv_stride; + + if (mv_row < 0) { + mv_row -= 1; + } else { + mv_row += 1; + } + + if (mv_col < 0) { + mv_col -= 1; + } else { + mv_col += 1; + } + + mv_row /= 2; + mv_col /= 2; + + offset = (mv_row >> 3) * pre_stride + (mv_col >> 3); + uptr = x->e_mbd.pre.u_buffer + offset; + vptr = x->e_mbd.pre.v_buffer + offset; + + if ((mv_row | mv_col) & 7) { + vpx_sub_pixel_variance8x8(uptr, pre_stride, mv_col & 7, mv_row & 7, + upred_ptr, uv_stride, &sse2); + vpx_sub_pixel_variance8x8(vptr, pre_stride, mv_col & 7, mv_row & 7, + vpred_ptr, uv_stride, &sse1); + sse2 += sse1; + } else { + vpx_variance8x8(uptr, pre_stride, upred_ptr, uv_stride, &sse2); + vpx_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1); + sse2 += sse1; + } + return sse2; +} + +static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l) { + int c = !type; /* start at coef 0, unless Y with Y2 */ + int eob = (int)(*b->eob); + int pt; /* surrounding block/prev coef predictor */ + int cost = 0; + short *qcoeff_ptr = b->qcoeff; + + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + + assert(eob <= 16); + for (; c < eob; ++c) { + const int v = qcoeff_ptr[vp8_default_zig_zag1d[c]]; + const int t = vp8_dct_value_tokens_ptr[v].Token; + cost += mb->token_costs[type][vp8_coef_bands[c]][pt][t]; + cost += vp8_dct_value_cost_ptr[v]; + pt = vp8_prev_token_class[t]; + } + + if (c < 16) { + cost += mb->token_costs[type][vp8_coef_bands[c]][pt][DCT_EOB_TOKEN]; + } + + pt = (c != !type); /* is eob first coefficient; */ + *a = *l = pt; + + return cost; +} + +static int vp8_rdcost_mby(MACROBLOCK *mb) { + int cost = 0; + int b; + MACROBLOCKD *x = &mb->e_mbd; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; + + memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; + + for (b = 0; b < 16; ++b) { + cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC, + ta + vp8_block2above[b], tl + vp8_block2left[b]); + } + + cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2, + ta + vp8_block2above[24], tl + vp8_block2left[24]); + + return cost; +} + +static void macro_block_yrd(MACROBLOCK *mb, int *Rate, int *Distortion) { + int b; + MACROBLOCKD *const x = &mb->e_mbd; + BLOCK *const mb_y2 = mb->block + 24; + BLOCKD *const x_y2 = x->block + 24; + short *Y2DCPtr = mb_y2->src_diff; + BLOCK *beptr; + int d; + + vp8_subtract_mby(mb->src_diff, *(mb->block[0].base_src), + mb->block[0].src_stride, mb->e_mbd.predictor, 16); + + /* Fdct and building the 2nd order block */ + for (beptr = mb->block; beptr < mb->block + 16; beptr += 2) { + mb->short_fdct8x4(beptr->src_diff, beptr->coeff, 32); + *Y2DCPtr++ = beptr->coeff[0]; + *Y2DCPtr++ = beptr->coeff[16]; + } + + /* 2nd order fdct */ + mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8); + + /* Quantization */ + for (b = 0; b < 16; ++b) { + mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]); + } + + /* DC predication and Quantization of 2nd Order block */ + mb->quantize_b(mb_y2, x_y2); + + /* Distortion */ + d = vp8_mbblock_error(mb, 1) << 2; + d += vp8_block_error(mb_y2->coeff, x_y2->dqcoeff); + + *Distortion = (d >> 4); + + /* rate */ + *Rate = vp8_rdcost_mby(mb); +} + +static void copy_predictor(unsigned char *dst, const unsigned char *predictor) { + const unsigned int *p = (const unsigned int *)predictor; + unsigned int *d = (unsigned int *)dst; + d[0] = p[0]; + d[4] = p[4]; + d[8] = p[8]; + d[12] = p[12]; +} +static int rd_pick_intra4x4block(MACROBLOCK *x, BLOCK *be, BLOCKD *b, + B_PREDICTION_MODE *best_mode, + const int *bmode_costs, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l, + + int *bestrate, int *bestratey, + int *bestdistortion) { + B_PREDICTION_MODE mode; + int best_rd = INT_MAX; + int rate = 0; + int distortion; + + ENTROPY_CONTEXT ta = *a, tempa = *a; + ENTROPY_CONTEXT tl = *l, templ = *l; + /* + * The predictor buffer is a 2d buffer with a stride of 16. Create + * a temp buffer that meets the stride requirements, but we are only + * interested in the left 4x4 block + * */ + DECLARE_ALIGNED(16, unsigned char, best_predictor[16 * 4]); + DECLARE_ALIGNED(16, short, best_dqcoeff[16]); + int dst_stride = x->e_mbd.dst.y_stride; + unsigned char *dst = x->e_mbd.dst.y_buffer + b->offset; + + unsigned char *Above = dst - dst_stride; + unsigned char *yleft = dst - 1; + unsigned char top_left = Above[-1]; + + for (mode = B_DC_PRED; mode <= B_HU_PRED; ++mode) { int this_rd; - MACROBLOCKD *xd = &x->e_mbd; + int ratey; - /* Y Search for 16x16 intra prediction mode */ - for (mode = DC_PRED; mode <= TM_PRED; mode++) - { - xd->mode_info_context->mbmi.mode = mode; + rate = bmode_costs[mode]; - vp8_build_intra_predictors_mby_s(xd, - xd->dst.y_buffer - xd->dst.y_stride, - xd->dst.y_buffer - 1, - xd->dst.y_stride, - xd->predictor, - 16); + vp8_intra4x4_predict(Above, yleft, dst_stride, mode, b->predictor, 16, + top_left); + vp8_subtract_b(be, b, 16); + x->short_fdct4x4(be->src_diff, be->coeff, 32); + x->quantize_b(be, b); - macro_block_yrd(x, &ratey, &distortion); - rate = ratey + x->mbmode_cost[xd->frame_type] - [xd->mode_info_context->mbmi.mode]; + tempa = ta; + templ = tl; - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ); + rate += ratey; + distortion = vp8_block_error(be->coeff, b->dqcoeff) >> 2; - if (this_rd < best_rd) - { - mode_selected = mode; - best_rd = this_rd; - *Rate = rate; - *rate_y = ratey; - *Distortion = distortion; - } + this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + + if (this_rd < best_rd) { + *bestrate = rate; + *bestratey = ratey; + *bestdistortion = distortion; + best_rd = this_rd; + *best_mode = mode; + *a = tempa; + *l = templ; + copy_predictor(best_predictor, b->predictor); + memcpy(best_dqcoeff, b->dqcoeff, 32); + } + } + b->bmi.as_mode = *best_mode; + + vp8_short_idct4x4llm(best_dqcoeff, best_predictor, 16, dst, dst_stride); + + return best_rd; +} + +static int rd_pick_intra4x4mby_modes(MACROBLOCK *mb, int *Rate, int *rate_y, + int *Distortion, int best_rd) { + MACROBLOCKD *const xd = &mb->e_mbd; + int i; + int cost = mb->mbmode_cost[xd->frame_type][B_PRED]; + int distortion = 0; + int tot_rate_y = 0; + int64_t total_rd = 0; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; + const int *bmode_costs; + + memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; + + intra_prediction_down_copy(xd, xd->dst.y_buffer - xd->dst.y_stride + 16); + + bmode_costs = mb->inter_bmode_costs; + + for (i = 0; i < 16; ++i) { + MODE_INFO *const mic = xd->mode_info_context; + const int mis = xd->mode_info_stride; + B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); + int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), + UNINITIALIZED_IS_SAFE(d); + + if (mb->e_mbd.frame_type == KEY_FRAME) { + const B_PREDICTION_MODE A = above_block_mode(mic, i, mis); + const B_PREDICTION_MODE L = left_block_mode(mic, i); + + bmode_costs = mb->bmode_costs[A][L]; } - xd->mode_info_context->mbmi.mode = mode_selected; - return best_rd; + total_rd += rd_pick_intra4x4block( + mb, mb->block + i, xd->block + i, &best_mode, bmode_costs, + ta + vp8_block2above[i], tl + vp8_block2left[i], &r, &ry, &d); + + cost += r; + distortion += d; + tot_rate_y += ry; + + mic->bmi[i].as_mode = best_mode; + + if (total_rd >= (int64_t)best_rd) break; + } + + if (total_rd >= (int64_t)best_rd) return INT_MAX; + + *Rate = cost; + *rate_y = tot_rate_y; + *Distortion = distortion; + + return RDCOST(mb->rdmult, mb->rddiv, cost, distortion); } -static int rd_cost_mbuv(MACROBLOCK *mb) -{ - int b; - int cost = 0; - MACROBLOCKD *x = &mb->e_mbd; - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; +static int rd_pick_intra16x16mby_mode(MACROBLOCK *x, int *Rate, int *rate_y, + int *Distortion) { + MB_PREDICTION_MODE mode; + MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); + int rate, ratey; + int distortion; + int best_rd = INT_MAX; + int this_rd; + MACROBLOCKD *xd = &x->e_mbd; - memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + /* Y Search for 16x16 intra prediction mode */ + for (mode = DC_PRED; mode <= TM_PRED; ++mode) { + xd->mode_info_context->mbmi.mode = mode; - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; + vp8_build_intra_predictors_mby_s(xd, xd->dst.y_buffer - xd->dst.y_stride, + xd->dst.y_buffer - 1, xd->dst.y_stride, + xd->predictor, 16); - for (b = 16; b < 24; b++) - cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV, - ta + vp8_block2above[b], tl + vp8_block2left[b]); + macro_block_yrd(x, &ratey, &distortion); + rate = ratey + + x->mbmode_cost[xd->frame_type][xd->mode_info_context->mbmi.mode]; - return cost; + this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + + if (this_rd < best_rd) { + mode_selected = mode; + best_rd = this_rd; + *Rate = rate; + *rate_y = ratey; + *Distortion = distortion; + } + } + + xd->mode_info_context->mbmi.mode = mode_selected; + return best_rd; } +static int rd_cost_mbuv(MACROBLOCK *mb) { + int b; + int cost = 0; + MACROBLOCKD *x = &mb->e_mbd; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; + + memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; + + for (b = 16; b < 24; ++b) { + cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV, + ta + vp8_block2above[b], tl + vp8_block2left[b]); + } + + return cost; +} static int rd_inter16x16_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, - int *distortion, int fullpixel) -{ - (void)cpi; - (void)fullpixel; + int *distortion, int fullpixel) { + (void)cpi; + (void)fullpixel; - vp8_build_inter16x16_predictors_mbuv(&x->e_mbd); - vp8_subtract_mbuv(x->src_diff, - x->src.u_buffer, x->src.v_buffer, x->src.uv_stride, - &x->e_mbd.predictor[256], &x->e_mbd.predictor[320], 8); + vp8_build_inter16x16_predictors_mbuv(&x->e_mbd); + vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer, + x->src.uv_stride, &x->e_mbd.predictor[256], + &x->e_mbd.predictor[320], 8); - vp8_transform_mbuv(x); - vp8_quantize_mbuv(x); + vp8_transform_mbuv(x); + vp8_quantize_mbuv(x); - *rate = rd_cost_mbuv(x); - *distortion = vp8_mbuverror(x) / 4; + *rate = rd_cost_mbuv(x); + *distortion = vp8_mbuverror(x) / 4; - return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); + return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } static int rd_inter4x4_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, - int *distortion, int fullpixel) -{ - (void)cpi; - (void)fullpixel; + int *distortion, int fullpixel) { + (void)cpi; + (void)fullpixel; - vp8_build_inter4x4_predictors_mbuv(&x->e_mbd); - vp8_subtract_mbuv(x->src_diff, - x->src.u_buffer, x->src.v_buffer, x->src.uv_stride, - &x->e_mbd.predictor[256], &x->e_mbd.predictor[320], 8); + vp8_build_inter4x4_predictors_mbuv(&x->e_mbd); + vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer, + x->src.uv_stride, &x->e_mbd.predictor[256], + &x->e_mbd.predictor[320], 8); - vp8_transform_mbuv(x); - vp8_quantize_mbuv(x); + vp8_transform_mbuv(x); + vp8_quantize_mbuv(x); - *rate = rd_cost_mbuv(x); - *distortion = vp8_mbuverror(x) / 4; + *rate = rd_cost_mbuv(x); + *distortion = vp8_mbuverror(x) / 4; - return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); + return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } static void rd_pick_intra_mbuv_mode(MACROBLOCK *x, int *rate, - int *rate_tokenonly, int *distortion) -{ - MB_PREDICTION_MODE mode; - MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); - int best_rd = INT_MAX; - int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r); - int rate_to; - MACROBLOCKD *xd = &x->e_mbd; + int *rate_tokenonly, int *distortion) { + MB_PREDICTION_MODE mode; + MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); + int best_rd = INT_MAX; + int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r); + int rate_to; + MACROBLOCKD *xd = &x->e_mbd; - for (mode = DC_PRED; mode <= TM_PRED; mode++) - { - int this_rate; - int this_distortion; - int this_rd; + for (mode = DC_PRED; mode <= TM_PRED; ++mode) { + int this_rate; + int this_distortion; + int this_rd; - xd->mode_info_context->mbmi.uv_mode = mode; + xd->mode_info_context->mbmi.uv_mode = mode; - vp8_build_intra_predictors_mbuv_s(xd, - xd->dst.u_buffer - xd->dst.uv_stride, - xd->dst.v_buffer - xd->dst.uv_stride, - xd->dst.u_buffer - 1, - xd->dst.v_buffer - 1, - xd->dst.uv_stride, - &xd->predictor[256], &xd->predictor[320], - 8); + vp8_build_intra_predictors_mbuv_s( + xd, xd->dst.u_buffer - xd->dst.uv_stride, + xd->dst.v_buffer - xd->dst.uv_stride, xd->dst.u_buffer - 1, + xd->dst.v_buffer - 1, xd->dst.uv_stride, &xd->predictor[256], + &xd->predictor[320], 8); + vp8_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer, + x->src.uv_stride, &xd->predictor[256], + &xd->predictor[320], 8); + vp8_transform_mbuv(x); + vp8_quantize_mbuv(x); - vp8_subtract_mbuv(x->src_diff, - x->src.u_buffer, x->src.v_buffer, x->src.uv_stride, - &xd->predictor[256], &xd->predictor[320], 8); - vp8_transform_mbuv(x); - vp8_quantize_mbuv(x); + rate_to = rd_cost_mbuv(x); + this_rate = rate_to + + x->intra_uv_mode_cost[xd->frame_type] + [xd->mode_info_context->mbmi.uv_mode]; - rate_to = rd_cost_mbuv(x); - this_rate = rate_to + x->intra_uv_mode_cost[xd->frame_type][xd->mode_info_context->mbmi.uv_mode]; + this_distortion = vp8_mbuverror(x) / 4; - this_distortion = vp8_mbuverror(x) / 4; + this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); - this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); + if (this_rd < best_rd) { + best_rd = this_rd; + d = this_distortion; + r = this_rate; + *rate_tokenonly = rate_to; + mode_selected = mode; + } + } - if (this_rd < best_rd) - { - best_rd = this_rd; - d = this_distortion; - r = this_rate; - *rate_tokenonly = rate_to; - mode_selected = mode; - } + *rate = r; + *distortion = d; + + xd->mode_info_context->mbmi.uv_mode = mode_selected; +} + +int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]) { + vp8_prob p[VP8_MVREFS - 1]; + assert(NEARESTMV <= m && m <= SPLITMV); + vp8_mv_ref_probs(p, near_mv_ref_ct); + return vp8_cost_token(vp8_mv_ref_tree, p, + vp8_mv_ref_encoding_array + (m - NEARESTMV)); +} + +void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) { + x->e_mbd.mode_info_context->mbmi.mode = mb; + x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int; +} + +static int labels2mode(MACROBLOCK *x, int const *labelings, int which_label, + B_PREDICTION_MODE this_mode, int_mv *this_mv, + int_mv *best_ref_mv, int *mvcost[2]) { + MACROBLOCKD *const xd = &x->e_mbd; + MODE_INFO *const mic = xd->mode_info_context; + const int mis = xd->mode_info_stride; + + int cost = 0; + int thismvcost = 0; + + /* We have to be careful retrieving previously-encoded motion vectors. + Ones from this macroblock have to be pulled from the BLOCKD array + as they have not yet made it to the bmi array in our MB_MODE_INFO. */ + + int i = 0; + + do { + BLOCKD *const d = xd->block + i; + const int row = i >> 2, col = i & 3; + + B_PREDICTION_MODE m; + + if (labelings[i] != which_label) continue; + + if (col && labelings[i] == labelings[i - 1]) { + m = LEFT4X4; + } else if (row && labelings[i] == labelings[i - 4]) { + m = ABOVE4X4; + } else { + /* the only time we should do costing for new motion vector + * or mode is when we are on a new label (jbb May 08, 2007) + */ + switch (m = this_mode) { + case NEW4X4: + thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102); + break; + case LEFT4X4: + this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i); + break; + case ABOVE4X4: + this_mv->as_int = + row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis); + break; + case ZERO4X4: this_mv->as_int = 0; break; + default: break; + } + + if (m == ABOVE4X4) /* replace above with left if same */ + { + int_mv left_mv; + + left_mv.as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i); + + if (left_mv.as_int == this_mv->as_int) m = LEFT4X4; + } + + cost = x->inter_bmode_costs[m]; } - *rate = r; - *distortion = d; + d->bmi.mv.as_int = this_mv->as_int; - xd->mode_info_context->mbmi.uv_mode = mode_selected; -} + x->partition_info->bmi[i].mode = m; + x->partition_info->bmi[i].mv.as_int = this_mv->as_int; -int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]) -{ - vp8_prob p [VP8_MVREFS-1]; - assert(NEARESTMV <= m && m <= SPLITMV); - vp8_mv_ref_probs(p, near_mv_ref_ct); - return vp8_cost_token(vp8_mv_ref_tree, p, - vp8_mv_ref_encoding_array + (m - NEARESTMV)); -} + } while (++i < 16); -void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) -{ - x->e_mbd.mode_info_context->mbmi.mode = mb; - x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int; -} - -static int labels2mode( - MACROBLOCK *x, - int const *labelings, int which_label, - B_PREDICTION_MODE this_mode, - int_mv *this_mv, int_mv *best_ref_mv, - int *mvcost[2] -) -{ - MACROBLOCKD *const xd = & x->e_mbd; - MODE_INFO *const mic = xd->mode_info_context; - const int mis = xd->mode_info_stride; - - int cost = 0; - int thismvcost = 0; - - /* We have to be careful retrieving previously-encoded motion vectors. - Ones from this macroblock have to be pulled from the BLOCKD array - as they have not yet made it to the bmi array in our MB_MODE_INFO. */ - - int i = 0; - - do - { - BLOCKD *const d = xd->block + i; - const int row = i >> 2, col = i & 3; - - B_PREDICTION_MODE m; - - if (labelings[i] != which_label) - continue; - - if (col && labelings[i] == labelings[i-1]) - m = LEFT4X4; - else if (row && labelings[i] == labelings[i-4]) - m = ABOVE4X4; - else - { - /* the only time we should do costing for new motion vector - * or mode is when we are on a new label (jbb May 08, 2007) - */ - switch (m = this_mode) - { - case NEW4X4 : - thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102); - break; - case LEFT4X4: - this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i); - break; - case ABOVE4X4: - this_mv->as_int = row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis); - break; - case ZERO4X4: - this_mv->as_int = 0; - break; - default: - break; - } - - if (m == ABOVE4X4) /* replace above with left if same */ - { - int_mv left_mv; - - left_mv.as_int = col ? d[-1].bmi.mv.as_int : - left_block_mv(mic, i); - - if (left_mv.as_int == this_mv->as_int) - m = LEFT4X4; - } - - cost = x->inter_bmode_costs[ m]; - } - - d->bmi.mv.as_int = this_mv->as_int; - - x->partition_info->bmi[i].mode = m; - x->partition_info->bmi[i].mv.as_int = this_mv->as_int; - - } - while (++i < 16); - - cost += thismvcost ; - return cost; + cost += thismvcost; + return cost; } static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels, int which_label, ENTROPY_CONTEXT *ta, - ENTROPY_CONTEXT *tl) -{ - int cost = 0; - int b; - MACROBLOCKD *x = &mb->e_mbd; + ENTROPY_CONTEXT *tl) { + int cost = 0; + int b; + MACROBLOCKD *x = &mb->e_mbd; - for (b = 0; b < 16; b++) - if (labels[ b] == which_label) - cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC, - ta + vp8_block2above[b], - tl + vp8_block2left[b]); - - return cost; - -} -static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels, int which_label) -{ - int i; - unsigned int distortion = 0; - int pre_stride = x->e_mbd.pre.y_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - - - for (i = 0; i < 16; i++) - { - if (labels[i] == which_label) - { - BLOCKD *bd = &x->e_mbd.block[i]; - BLOCK *be = &x->block[i]; - - vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, x->e_mbd.subpixel_predict); - vp8_subtract_b(be, bd, 16); - x->short_fdct4x4(be->src_diff, be->coeff, 32); - x->quantize_b(be, bd); - - distortion += vp8_block_error(be->coeff, bd->dqcoeff); - } + for (b = 0; b < 16; ++b) { + if (labels[b] == which_label) { + cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC, + ta + vp8_block2above[b], tl + vp8_block2left[b]); } + } - return distortion; + return cost; +} +static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, + int const *labels, + int which_label) { + int i; + unsigned int distortion = 0; + int pre_stride = x->e_mbd.pre.y_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + + for (i = 0; i < 16; ++i) { + if (labels[i] == which_label) { + BLOCKD *bd = &x->e_mbd.block[i]; + BLOCK *be = &x->block[i]; + + vp8_build_inter_predictors_b(bd, 16, base_pre, pre_stride, + x->e_mbd.subpixel_predict); + vp8_subtract_b(be, bd, 16); + x->short_fdct4x4(be->src_diff, be->coeff, 32); + x->quantize_b(be, bd); + + distortion += vp8_block_error(be->coeff, bd->dqcoeff); + } + } + + return distortion; } +static const unsigned int segmentation_to_sseshift[4] = { 3, 3, 2, 0 }; -static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0}; - - -typedef struct -{ +typedef struct { int_mv *ref_mv; int_mv mvp; @@ -1102,1544 +937,1449 @@ typedef struct } BEST_SEG_INFO; +static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, BEST_SEG_INFO *bsi, + unsigned int segmentation) { + int i; + int const *labels; + int br = 0; + int bd = 0; + B_PREDICTION_MODE this_mode; -static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x, - BEST_SEG_INFO *bsi, unsigned int segmentation) -{ - int i; - int const *labels; - int br = 0; - int bd = 0; - B_PREDICTION_MODE this_mode; + int label_count; + int this_segment_rd = 0; + int label_mv_thresh; + int rate = 0; + int sbr = 0; + int sbd = 0; + int segmentyrate = 0; + vp8_variance_fn_ptr_t *v_fn_ptr; - int label_count; - int this_segment_rd = 0; - int label_mv_thresh; - int rate = 0; - int sbr = 0; - int sbd = 0; - int segmentyrate = 0; + ENTROPY_CONTEXT_PLANES t_above, t_left; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; + ENTROPY_CONTEXT_PLANES t_above_b, t_left_b; + ENTROPY_CONTEXT *ta_b; + ENTROPY_CONTEXT *tl_b; - vp8_variance_fn_ptr_t *v_fn_ptr; + memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); - ENTROPY_CONTEXT_PLANES t_above, t_left; - ENTROPY_CONTEXT *ta; - ENTROPY_CONTEXT *tl; - ENTROPY_CONTEXT_PLANES t_above_b, t_left_b; - ENTROPY_CONTEXT *ta_b; - ENTROPY_CONTEXT *tl_b; + ta = (ENTROPY_CONTEXT *)&t_above; + tl = (ENTROPY_CONTEXT *)&t_left; + ta_b = (ENTROPY_CONTEXT *)&t_above_b; + tl_b = (ENTROPY_CONTEXT *)&t_left_b; - memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES)); + br = 0; + bd = 0; - ta = (ENTROPY_CONTEXT *)&t_above; - tl = (ENTROPY_CONTEXT *)&t_left; - ta_b = (ENTROPY_CONTEXT *)&t_above_b; - tl_b = (ENTROPY_CONTEXT *)&t_left_b; + v_fn_ptr = &cpi->fn_ptr[segmentation]; + labels = vp8_mbsplits[segmentation]; + label_count = vp8_mbsplit_count[segmentation]; - br = 0; - bd = 0; + /* 64 makes this threshold really big effectively making it so that we + * very rarely check mvs on segments. setting this to 1 would make mv + * thresh roughly equal to what it is for macroblocks + */ + label_mv_thresh = 1 * bsi->mvthresh / label_count; - v_fn_ptr = &cpi->fn_ptr[segmentation]; - labels = vp8_mbsplits[segmentation]; - label_count = vp8_mbsplit_count[segmentation]; + /* Segmentation method overheads */ + rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, + vp8_mbsplit_encodings + segmentation); + rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts); + this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0); + br += rate; - /* 64 makes this threshold really big effectively making it so that we - * very rarely check mvs on segments. setting this to 1 would make mv - * thresh roughly equal to what it is for macroblocks - */ - label_mv_thresh = 1 * bsi->mvthresh / label_count ; + for (i = 0; i < label_count; ++i) { + int_mv mode_mv[B_MODE_COUNT]; + int best_label_rd = INT_MAX; + B_PREDICTION_MODE mode_selected = ZERO4X4; + int bestlabelyrate = 0; - /* Segmentation method overheads */ - rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation); - rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts); - this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0); - br += rate; + /* search for the best motion vector on this segment */ + for (this_mode = LEFT4X4; this_mode <= NEW4X4; ++this_mode) { + int this_rd; + int distortion; + int labelyrate; + ENTROPY_CONTEXT_PLANES t_above_s, t_left_s; + ENTROPY_CONTEXT *ta_s; + ENTROPY_CONTEXT *tl_s; - for (i = 0; i < label_count; i++) - { - int_mv mode_mv[B_MODE_COUNT]; - int best_label_rd = INT_MAX; - B_PREDICTION_MODE mode_selected = ZERO4X4; - int bestlabelyrate = 0; + memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES)); - /* search for the best motion vector on this segment */ - for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++) - { - int this_rd; - int distortion; - int labelyrate; - ENTROPY_CONTEXT_PLANES t_above_s, t_left_s; - ENTROPY_CONTEXT *ta_s; - ENTROPY_CONTEXT *tl_s; + ta_s = (ENTROPY_CONTEXT *)&t_above_s; + tl_s = (ENTROPY_CONTEXT *)&t_left_s; - memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES)); + if (this_mode == NEW4X4) { + int sseshift; + int num00; + int step_param = 0; + int further_steps; + int n; + int thissme; + int bestsme = INT_MAX; + int_mv temp_mv; + BLOCK *c; + BLOCKD *e; - ta_s = (ENTROPY_CONTEXT *)&t_above_s; - tl_s = (ENTROPY_CONTEXT *)&t_left_s; + /* Is the best so far sufficiently good that we cant justify + * doing a new motion search. + */ + if (best_label_rd < label_mv_thresh) break; - if (this_mode == NEW4X4) - { - int sseshift; - int num00; - int step_param = 0; - int further_steps; - int n; - int thissme; - int bestsme = INT_MAX; - int_mv temp_mv; - BLOCK *c; - BLOCKD *e; - - /* Is the best so far sufficiently good that we cant justify - * doing a new motion search. - */ - if (best_label_rd < label_mv_thresh) - break; - - if(cpi->compressor_speed) - { - if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) - { - bsi->mvp.as_int = bsi->sv_mvp[i].as_int; - if (i==1 && segmentation == BLOCK_16X8) - bsi->mvp.as_int = bsi->sv_mvp[2].as_int; - - step_param = bsi->sv_istep[i]; - } - - /* use previous block's result as next block's MV - * predictor. - */ - if (segmentation == BLOCK_4X4 && i>0) - { - bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int; - if (i==4 || i==8 || i==12) - bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.mv.as_int; - step_param = 2; - } - } - - further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; - - { - int sadpb = x->sadperbit4; - int_mv mvp_full; - - mvp_full.as_mv.row = bsi->mvp.as_mv.row >>3; - mvp_full.as_mv.col = bsi->mvp.as_mv.col >>3; - - /* find first label */ - n = vp8_mbsplit_offset[segmentation][i]; - - c = &x->block[n]; - e = &x->e_mbd.block[n]; - - { - bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full, - &mode_mv[NEW4X4], step_param, - sadpb, &num00, v_fn_ptr, - x->mvcost, bsi->ref_mv); - - n = num00; - num00 = 0; - - while (n < further_steps) - { - n++; - - if (num00) - num00--; - else - { - thissme = cpi->diamond_search_sad(x, c, e, - &mvp_full, &temp_mv, - step_param + n, sadpb, - &num00, v_fn_ptr, - x->mvcost, bsi->ref_mv); - - if (thissme < bestsme) - { - bestsme = thissme; - mode_mv[NEW4X4].as_int = temp_mv.as_int; - } - } - } - } - - sseshift = segmentation_to_sseshift[segmentation]; - - /* Should we do a full search (best quality only) */ - if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) - { - /* Check if mvp_full is within the range. */ - vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); - - thissme = cpi->full_search_sad(x, c, e, &mvp_full, - sadpb, 16, v_fn_ptr, - x->mvcost, bsi->ref_mv); - - if (thissme < bestsme) - { - bestsme = thissme; - mode_mv[NEW4X4].as_int = e->bmi.mv.as_int; - } - else - { - /* The full search result is actually worse so - * re-instate the previous best vector - */ - e->bmi.mv.as_int = mode_mv[NEW4X4].as_int; - } - } - } - - if (bestsme < INT_MAX) - { - int disto; - unsigned int sse; - cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4], - bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost, - &disto, &sse); - } - } /* NEW4X4 */ - - rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode], - bsi->ref_mv, x->mvcost); - - /* Trap vectors that reach beyond the UMV borders */ - if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || - ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) - { - continue; + if (cpi->compressor_speed) { + if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8) { + bsi->mvp.as_int = bsi->sv_mvp[i].as_int; + if (i == 1 && segmentation == BLOCK_16X8) { + bsi->mvp.as_int = bsi->sv_mvp[2].as_int; } - distortion = vp8_encode_inter_mb_segment(x, labels, i) / 4; - - labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s); - rate += labelyrate; - - this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); - - if (this_rd < best_label_rd) - { - sbr = rate; - sbd = distortion; - bestlabelyrate = labelyrate; - mode_selected = this_mode; - best_label_rd = this_rd; - - memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES)); + step_param = bsi->sv_istep[i]; + } + /* use previous block's result as next block's MV + * predictor. + */ + if (segmentation == BLOCK_4X4 && i > 0) { + bsi->mvp.as_int = x->e_mbd.block[i - 1].bmi.mv.as_int; + if (i == 4 || i == 8 || i == 12) { + bsi->mvp.as_int = x->e_mbd.block[i - 4].bmi.mv.as_int; } - } /*for each 4x4 mode*/ - - memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES)); - memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES)); - - labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected], - bsi->ref_mv, x->mvcost); - - br += sbr; - bd += sbd; - segmentyrate += bestlabelyrate; - this_segment_rd += best_label_rd; - - if (this_segment_rd >= bsi->segment_rd) - break; - - } /* for each label */ - - if (this_segment_rd < bsi->segment_rd) - { - bsi->r = br; - bsi->d = bd; - bsi->segment_yrate = segmentyrate; - bsi->segment_rd = this_segment_rd; - bsi->segment_num = segmentation; - - /* store everything needed to come back to this!! */ - for (i = 0; i < 16; i++) - { - bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv; - bsi->modes[i] = x->partition_info->bmi[i].mode; - bsi->eobs[i] = x->e_mbd.eobs[i]; + step_param = 2; + } } + + further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param; + + { + int sadpb = x->sadperbit4; + int_mv mvp_full; + + mvp_full.as_mv.row = bsi->mvp.as_mv.row >> 3; + mvp_full.as_mv.col = bsi->mvp.as_mv.col >> 3; + + /* find first label */ + n = vp8_mbsplit_offset[segmentation][i]; + + c = &x->block[n]; + e = &x->e_mbd.block[n]; + + { + bestsme = cpi->diamond_search_sad( + x, c, e, &mvp_full, &mode_mv[NEW4X4], step_param, sadpb, &num00, + v_fn_ptr, x->mvcost, bsi->ref_mv); + + n = num00; + num00 = 0; + + while (n < further_steps) { + n++; + + if (num00) { + num00--; + } else { + thissme = cpi->diamond_search_sad( + x, c, e, &mvp_full, &temp_mv, step_param + n, sadpb, &num00, + v_fn_ptr, x->mvcost, bsi->ref_mv); + + if (thissme < bestsme) { + bestsme = thissme; + mode_mv[NEW4X4].as_int = temp_mv.as_int; + } + } + } + } + + sseshift = segmentation_to_sseshift[segmentation]; + + /* Should we do a full search (best quality only) */ + if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000) { + /* Check if mvp_full is within the range. */ + vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, + x->mv_row_max); + + thissme = cpi->full_search_sad(x, c, e, &mvp_full, sadpb, 16, + v_fn_ptr, x->mvcost, bsi->ref_mv); + + if (thissme < bestsme) { + bestsme = thissme; + mode_mv[NEW4X4].as_int = e->bmi.mv.as_int; + } else { + /* The full search result is actually worse so + * re-instate the previous best vector + */ + e->bmi.mv.as_int = mode_mv[NEW4X4].as_int; + } + } + } + + if (bestsme < INT_MAX) { + int disto; + unsigned int sse; + cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4], bsi->ref_mv, + x->errorperbit, v_fn_ptr, x->mvcost, + &disto, &sse); + } + } /* NEW4X4 */ + + rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode], + bsi->ref_mv, x->mvcost); + + /* Trap vectors that reach beyond the UMV borders */ + if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || + ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || + ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || + ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) { + continue; + } + + distortion = vp8_encode_inter_mb_segment(x, labels, i) / 4; + + labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s); + rate += labelyrate; + + this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); + + if (this_rd < best_label_rd) { + sbr = rate; + sbd = distortion; + bestlabelyrate = labelyrate; + mode_selected = this_mode; + best_label_rd = this_rd; + + memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES)); + } + } /*for each 4x4 mode*/ + + memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES)); + memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES)); + + labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected], + bsi->ref_mv, x->mvcost); + + br += sbr; + bd += sbd; + segmentyrate += bestlabelyrate; + this_segment_rd += best_label_rd; + + if (this_segment_rd >= bsi->segment_rd) break; + + } /* for each label */ + + if (this_segment_rd < bsi->segment_rd) { + bsi->r = br; + bsi->d = bd; + bsi->segment_yrate = segmentyrate; + bsi->segment_rd = this_segment_rd; + bsi->segment_num = segmentation; + + /* store everything needed to come back to this!! */ + for (i = 0; i < 16; ++i) { + bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv; + bsi->modes[i] = x->partition_info->bmi[i].mode; + bsi->eobs[i] = x->e_mbd.eobs[i]; } + } } -static -void vp8_cal_step_param(int sr, int *sp) -{ - int step = 0; +static void vp8_cal_step_param(int sr, int *sp) { + int step = 0; - if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP; - else if (sr < 1) sr = 1; + if (sr > MAX_FIRST_STEP) { + sr = MAX_FIRST_STEP; + } else if (sr < 1) { + sr = 1; + } - while (sr>>=1) - step++; + while (sr >>= 1) step++; - *sp = MAX_MVSEARCH_STEPS - 1 - step; + *sp = MAX_MVSEARCH_STEPS - 1 - step; } static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv, int best_rd, int *mdcounts, int *returntotrate, - int *returnyrate, int *returndistortion, - int mvthresh) -{ - int i; - BEST_SEG_INFO bsi; + int *returnyrate, + int *returndistortion, + int mvthresh) { + int i; + BEST_SEG_INFO bsi; - memset(&bsi, 0, sizeof(bsi)); + memset(&bsi, 0, sizeof(bsi)); - bsi.segment_rd = best_rd; - bsi.ref_mv = best_ref_mv; - bsi.mvp.as_int = best_ref_mv->as_int; - bsi.mvthresh = mvthresh; - bsi.mdcounts = mdcounts; + bsi.segment_rd = best_rd; + bsi.ref_mv = best_ref_mv; + bsi.mvp.as_int = best_ref_mv->as_int; + bsi.mvthresh = mvthresh; + bsi.mdcounts = mdcounts; - for(i = 0; i < 16; i++) - { - bsi.modes[i] = ZERO4X4; - } + for (i = 0; i < 16; ++i) { + bsi.modes[i] = ZERO4X4; + } + + if (cpi->compressor_speed == 0) { + /* for now, we will keep the original segmentation order + when in best quality mode */ + rd_check_segment(cpi, x, &bsi, BLOCK_16X8); + rd_check_segment(cpi, x, &bsi, BLOCK_8X16); + rd_check_segment(cpi, x, &bsi, BLOCK_8X8); + rd_check_segment(cpi, x, &bsi, BLOCK_4X4); + } else { + int sr; + + rd_check_segment(cpi, x, &bsi, BLOCK_8X8); + + if (bsi.segment_rd < best_rd) { + int col_min = ((best_ref_mv->as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL; + int row_min = ((best_ref_mv->as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL; + int col_max = (best_ref_mv->as_mv.col >> 3) + MAX_FULL_PEL_VAL; + int row_max = (best_ref_mv->as_mv.row >> 3) + MAX_FULL_PEL_VAL; + + int tmp_col_min = x->mv_col_min; + int tmp_col_max = x->mv_col_max; + int tmp_row_min = x->mv_row_min; + int tmp_row_max = x->mv_row_max; + + /* Get intersection of UMV window and valid MV window to reduce # of + * checks in diamond search. */ + if (x->mv_col_min < col_min) x->mv_col_min = col_min; + if (x->mv_col_max > col_max) x->mv_col_max = col_max; + if (x->mv_row_min < row_min) x->mv_row_min = row_min; + if (x->mv_row_max > row_max) x->mv_row_max = row_max; + + /* Get 8x8 result */ + bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int; + bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int; + bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int; + bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int; + + /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range + * according to the closeness of 2 MV. */ + /* block 8X16 */ + { + sr = + MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row)) >> 3, + (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col)) >> 3); + vp8_cal_step_param(sr, &bsi.sv_istep[0]); + + sr = + MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, + (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3); + vp8_cal_step_param(sr, &bsi.sv_istep[1]); - if(cpi->compressor_speed == 0) - { - /* for now, we will keep the original segmentation order - when in best quality mode */ - rd_check_segment(cpi, x, &bsi, BLOCK_16X8); rd_check_segment(cpi, x, &bsi, BLOCK_8X16); - rd_check_segment(cpi, x, &bsi, BLOCK_8X8); + } + + /* block 16X8 */ + { + sr = + MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row)) >> 3, + (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col)) >> 3); + vp8_cal_step_param(sr, &bsi.sv_istep[0]); + + sr = + MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row)) >> 3, + (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col)) >> 3); + vp8_cal_step_param(sr, &bsi.sv_istep[1]); + + rd_check_segment(cpi, x, &bsi, BLOCK_16X8); + } + + /* If 8x8 is better than 16x8/8x16, then do 4x4 search */ + /* Not skip 4x4 if speed=0 (good quality) */ + if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) + /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */ + { + bsi.mvp.as_int = bsi.sv_mvp[0].as_int; rd_check_segment(cpi, x, &bsi, BLOCK_4X4); + } + + /* restore UMV window */ + x->mv_col_min = tmp_col_min; + x->mv_col_max = tmp_col_max; + x->mv_row_min = tmp_row_min; + x->mv_row_max = tmp_row_max; } - else - { - int sr; + } - rd_check_segment(cpi, x, &bsi, BLOCK_8X8); + /* set it to the best */ + for (i = 0; i < 16; ++i) { + BLOCKD *bd = &x->e_mbd.block[i]; - if (bsi.segment_rd < best_rd) - { - int col_min = ((best_ref_mv->as_mv.col+7)>>3) - MAX_FULL_PEL_VAL; - int row_min = ((best_ref_mv->as_mv.row+7)>>3) - MAX_FULL_PEL_VAL; - int col_max = (best_ref_mv->as_mv.col>>3) + MAX_FULL_PEL_VAL; - int row_max = (best_ref_mv->as_mv.row>>3) + MAX_FULL_PEL_VAL; + bd->bmi.mv.as_int = bsi.mvs[i].as_int; + *bd->eob = bsi.eobs[i]; + } - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; + *returntotrate = bsi.r; + *returndistortion = bsi.d; + *returnyrate = bsi.segment_yrate; - /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */ - if (x->mv_col_min < col_min ) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max ) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min ) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max ) - x->mv_row_max = row_max; + /* save partitions */ + x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num; + x->partition_info->count = vp8_mbsplit_count[bsi.segment_num]; - /* Get 8x8 result */ - bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int; - bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int; - bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int; - bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int; + for (i = 0; i < x->partition_info->count; ++i) { + int j; - /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */ - /* block 8X16 */ - { - sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col))>>3); - vp8_cal_step_param(sr, &bsi.sv_istep[0]); + j = vp8_mbsplit_offset[bsi.segment_num][i]; - sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3); - vp8_cal_step_param(sr, &bsi.sv_istep[1]); + x->partition_info->bmi[i].mode = bsi.modes[j]; + x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv; + } + /* + * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int + */ + x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int; - rd_check_segment(cpi, x, &bsi, BLOCK_8X16); - } - - /* block 16X8 */ - { - sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col))>>3); - vp8_cal_step_param(sr, &bsi.sv_istep[0]); - - sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3); - vp8_cal_step_param(sr, &bsi.sv_istep[1]); - - rd_check_segment(cpi, x, &bsi, BLOCK_16X8); - } - - /* If 8x8 is better than 16x8/8x16, then do 4x4 search */ - /* Not skip 4x4 if speed=0 (good quality) */ - if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */ - { - bsi.mvp.as_int = bsi.sv_mvp[0].as_int; - rd_check_segment(cpi, x, &bsi, BLOCK_4X4); - } - - /* restore UMV window */ - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - } - } - - /* set it to the best */ - for (i = 0; i < 16; i++) - { - BLOCKD *bd = &x->e_mbd.block[i]; - - bd->bmi.mv.as_int = bsi.mvs[i].as_int; - *bd->eob = bsi.eobs[i]; - } - - *returntotrate = bsi.r; - *returndistortion = bsi.d; - *returnyrate = bsi.segment_yrate; - - /* save partitions */ - x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num; - x->partition_info->count = vp8_mbsplit_count[bsi.segment_num]; - - for (i = 0; i < x->partition_info->count; i++) - { - int j; - - j = vp8_mbsplit_offset[bsi.segment_num][i]; - - x->partition_info->bmi[i].mode = bsi.modes[j]; - x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv; - } - /* - * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int - */ - x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int; - - return bsi.segment_rd; + return bsi.segment_rd; } /* The improved MV prediction */ -void vp8_mv_pred -( - VP8_COMP *cpi, - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv *mvp, - int refframe, - int *ref_frame_sign_bias, - int *sr, - int near_sadidx[] -) -{ - const MODE_INFO *above = here - xd->mode_info_stride; - const MODE_INFO *left = here - 1; - const MODE_INFO *aboveleft = above - 1; - int_mv near_mvs[8]; - int near_ref[8]; - int_mv mv; - int vcnt=0; - int find=0; - int mb_offset; +void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here, + int_mv *mvp, int refframe, int *ref_frame_sign_bias, int *sr, + int near_sadidx[]) { + const MODE_INFO *above = here - xd->mode_info_stride; + const MODE_INFO *left = here - 1; + const MODE_INFO *aboveleft = above - 1; + int_mv near_mvs[8]; + int near_ref[8]; + int_mv mv; + int vcnt = 0; + int find = 0; + int mb_offset; - int mvx[8]; - int mvy[8]; - int i; + int mvx[8]; + int mvy[8]; + int i; - mv.as_int = 0; + mv.as_int = 0; - if(here->mbmi.ref_frame != INTRA_FRAME) - { - near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0; - near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0; + if (here->mbmi.ref_frame != INTRA_FRAME) { + near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = + near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = + near_mvs[6].as_int = near_mvs[7].as_int = 0; + near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = + near_ref[5] = near_ref[6] = near_ref[7] = 0; - /* read in 3 nearby block's MVs from current frame as prediction - * candidates. - */ - if (above->mbmi.ref_frame != INTRA_FRAME) - { - near_mvs[vcnt].as_int = above->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = above->mbmi.ref_frame; - } - vcnt++; - if (left->mbmi.ref_frame != INTRA_FRAME) - { - near_mvs[vcnt].as_int = left->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = left->mbmi.ref_frame; - } - vcnt++; - if (aboveleft->mbmi.ref_frame != INTRA_FRAME) - { - near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int; - mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = aboveleft->mbmi.ref_frame; - } - vcnt++; - - /* read in 5 nearby block's MVs from last frame. */ - if(cpi->common.last_frame_type != KEY_FRAME) - { - mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ; - - /* current in last frame */ - if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME) - { - near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int; - mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = cpi->lf_ref_frame[mb_offset]; - } - vcnt++; - - /* above in last frame */ - if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME) - { - near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int; - mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride-1], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1]; - } - vcnt++; - - /* left in last frame */ - if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME) - { - near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int; - mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset -1], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1]; - } - vcnt++; - - /* right in last frame */ - if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME) - { - near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int; - mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = cpi->lf_ref_frame[mb_offset +1]; - } - vcnt++; - - /* below in last frame */ - if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME) - { - near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int; - mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias); - near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1]; - } - vcnt++; - } - - for(i=0; i< vcnt; i++) - { - if(near_ref[near_sadidx[i]] != INTRA_FRAME) - { - if(here->mbmi.ref_frame == near_ref[near_sadidx[i]]) - { - mv.as_int = near_mvs[near_sadidx[i]].as_int; - find = 1; - if (i < 3) - *sr = 3; - else - *sr = 2; - break; - } - } - } - - if(!find) - { - for(i=0; ias_int = mv.as_int; - vp8_clamp_mv2(mvp, xd); -} - -void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]) -{ - /* near_sad indexes: - * 0-cf above, 1-cf left, 2-cf aboveleft, - * 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below + /* read in 3 nearby block's MVs from current frame as prediction + * candidates. */ - int near_sad[8] = {0}; - BLOCK *b = &x->block[0]; - unsigned char *src_y_ptr = *(b->base_src); + if (above->mbmi.ref_frame != INTRA_FRAME) { + near_mvs[vcnt].as_int = above->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = above->mbmi.ref_frame; + } + vcnt++; + if (left->mbmi.ref_frame != INTRA_FRAME) { + near_mvs[vcnt].as_int = left->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = left->mbmi.ref_frame; + } + vcnt++; + if (aboveleft->mbmi.ref_frame != INTRA_FRAME) { + near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = aboveleft->mbmi.ref_frame; + } + vcnt++; - /* calculate sad for current frame 3 nearby MBs. */ - if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0) - { - near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX; - }else if(xd->mb_to_top_edge==0) - { /* only has left MB for sad calculation. */ - near_sad[0] = near_sad[2] = INT_MAX; - near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride); - }else if(xd->mb_to_left_edge ==0) - { /* only has left MB for sad calculation. */ - near_sad[1] = near_sad[2] = INT_MAX; - near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride); - }else - { - near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride); - near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride); - near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16 -16,xd->dst.y_stride); + /* read in 5 nearby block's MVs from last frame. */ + if (cpi->common.last_frame_type != KEY_FRAME) { + mb_offset = (-xd->mb_to_top_edge / 128 + 1) * (xd->mode_info_stride + 1) + + (-xd->mb_to_left_edge / 128 + 1); + + /* current in last frame */ + if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME) { + near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int; + mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = cpi->lf_ref_frame[mb_offset]; + } + vcnt++; + + /* above in last frame */ + if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1] != + INTRA_FRAME) { + near_mvs[vcnt].as_int = + cpi->lfmv[mb_offset - xd->mode_info_stride - 1].as_int; + mv_bias( + cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride - 1], + refframe, &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = + cpi->lf_ref_frame[mb_offset - xd->mode_info_stride - 1]; + } + vcnt++; + + /* left in last frame */ + if (cpi->lf_ref_frame[mb_offset - 1] != INTRA_FRAME) { + near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - 1].as_int; + mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - 1], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1]; + } + vcnt++; + + /* right in last frame */ + if (cpi->lf_ref_frame[mb_offset + 1] != INTRA_FRAME) { + near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + 1].as_int; + mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + 1], refframe, + &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + 1]; + } + vcnt++; + + /* below in last frame */ + if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1] != + INTRA_FRAME) { + near_mvs[vcnt].as_int = + cpi->lfmv[mb_offset + xd->mode_info_stride + 1].as_int; + mv_bias( + cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride + 1], + refframe, &near_mvs[vcnt], ref_frame_sign_bias); + near_ref[vcnt] = + cpi->lf_ref_frame[mb_offset + xd->mode_info_stride + 1]; + } + vcnt++; } - if(cpi->common.last_frame_type != KEY_FRAME) - { - /* calculate sad for last frame 5 nearby MBs. */ - unsigned char *pre_y_buffer = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset; - int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride; - - if(xd->mb_to_top_edge==0) near_sad[4] = INT_MAX; - if(xd->mb_to_left_edge ==0) near_sad[5] = INT_MAX; - if(xd->mb_to_right_edge ==0) near_sad[6] = INT_MAX; - if(xd->mb_to_bottom_edge==0) near_sad[7] = INT_MAX; - - if(near_sad[4] != INT_MAX) - near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride *16, pre_y_stride); - if(near_sad[5] != INT_MAX) - near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride); - near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer, pre_y_stride); - if(near_sad[6] != INT_MAX) - near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride); - if(near_sad[7] != INT_MAX) - near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride *16, pre_y_stride); - } - - if(cpi->common.last_frame_type != KEY_FRAME) - { - insertsortsad(near_sad, near_sadidx, 8); - }else - { - insertsortsad(near_sad, near_sadidx, 3); - } -} - -static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) -{ - if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) - { - int i; - - for (i = 0; i < x->partition_info->count; i++) - { - if (x->partition_info->bmi[i].mode == NEW4X4) - { - x->MVcount[0][mv_max+((x->partition_info->bmi[i].mv.as_mv.row - - best_ref_mv->as_mv.row) >> 1)]++; - x->MVcount[1][mv_max+((x->partition_info->bmi[i].mv.as_mv.col - - best_ref_mv->as_mv.col) >> 1)]++; - } + for (i = 0; i < vcnt; ++i) { + if (near_ref[near_sadidx[i]] != INTRA_FRAME) { + if (here->mbmi.ref_frame == near_ref[near_sadidx[i]]) { + mv.as_int = near_mvs[near_sadidx[i]].as_int; + find = 1; + if (i < 3) { + *sr = 3; + } else { + *sr = 2; + } + break; } + } } - else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) - { - x->MVcount[0][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row - - best_ref_mv->as_mv.row) >> 1)]++; - x->MVcount[1][mv_max+((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col - - best_ref_mv->as_mv.col) >> 1)]++; + + if (!find) { + for (i = 0; i < vcnt; ++i) { + mvx[i] = near_mvs[i].as_mv.row; + mvy[i] = near_mvs[i].as_mv.col; + } + + insertsortmv(mvx, vcnt); + insertsortmv(mvy, vcnt); + mv.as_mv.row = mvx[vcnt / 2]; + mv.as_mv.col = mvy[vcnt / 2]; + + /* sr is set to 0 to allow calling function to decide the search + * range. + */ + *sr = 0; } + } + + /* Set up return values */ + mvp->as_int = mv.as_int; + vp8_clamp_mv2(mvp, xd); } -static int evaluate_inter_mode_rd(int mdcounts[4], - RATE_DISTORTION* rd, - int* disable_skip, - VP8_COMP *cpi, MACROBLOCK *x) -{ - MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; - BLOCK *b = &x->block[0]; - MACROBLOCKD *xd = &x->e_mbd; - int distortion; - vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor, 16); +void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, + int recon_yoffset, int near_sadidx[]) { + /* near_sad indexes: + * 0-cf above, 1-cf left, 2-cf aboveleft, + * 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below + */ + int near_sad[8] = { 0 }; + BLOCK *b = &x->block[0]; + unsigned char *src_y_ptr = *(b->base_src); - if (cpi->active_map_enabled && x->active_ptr[0] == 0) { - x->skip = 1; + /* calculate sad for current frame 3 nearby MBs. */ + if (xd->mb_to_top_edge == 0 && xd->mb_to_left_edge == 0) { + near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX; + } else if (xd->mb_to_top_edge == + 0) { /* only has left MB for sad calculation. */ + near_sad[0] = near_sad[2] = INT_MAX; + near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride); + } else if (xd->mb_to_left_edge == + 0) { /* only has left MB for sad calculation. */ + near_sad[1] = near_sad[2] = INT_MAX; + near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16, + xd->dst.y_stride); + } else { + near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16, + xd->dst.y_stride); + near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, xd->dst.y_buffer - 16, xd->dst.y_stride); + near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride * 16 - 16, + xd->dst.y_stride); + } + + if (cpi->common.last_frame_type != KEY_FRAME) { + /* calculate sad for last frame 5 nearby MBs. */ + unsigned char *pre_y_buffer = + cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_buffer + recon_yoffset; + int pre_y_stride = cpi->common.yv12_fb[cpi->common.lst_fb_idx].y_stride; + + if (xd->mb_to_top_edge == 0) near_sad[4] = INT_MAX; + if (xd->mb_to_left_edge == 0) near_sad[5] = INT_MAX; + if (xd->mb_to_right_edge == 0) near_sad[6] = INT_MAX; + if (xd->mb_to_bottom_edge == 0) near_sad[7] = INT_MAX; + + if (near_sad[4] != INT_MAX) { + near_sad[4] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, pre_y_buffer - pre_y_stride * 16, + pre_y_stride); } - else if (x->encode_breakout) - { - unsigned int sse; - unsigned int var; - unsigned int threshold = (xd->block[0].dequant[1] - * xd->block[0].dequant[1] >>4); + if (near_sad[5] != INT_MAX) { + near_sad[5] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, pre_y_buffer - 16, pre_y_stride); + } + near_sad[3] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, + pre_y_buffer, pre_y_stride); + if (near_sad[6] != INT_MAX) { + near_sad[6] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, pre_y_buffer + 16, pre_y_stride); + } + if (near_sad[7] != INT_MAX) { + near_sad[7] = cpi->fn_ptr[BLOCK_16X16].sdf( + src_y_ptr, b->src_stride, pre_y_buffer + pre_y_stride * 16, + pre_y_stride); + } + } - if(threshold < x->encode_breakout) - threshold = x->encode_breakout; + if (cpi->common.last_frame_type != KEY_FRAME) { + insertsortsad(near_sad, near_sadidx, 8); + } else { + insertsortsad(near_sad, near_sadidx, 3); + } +} - var = vpx_variance16x16 - (*(b->base_src), b->src_stride, - x->e_mbd.predictor, 16, &sse); +static void rd_update_mvcount(MACROBLOCK *x, int_mv *best_ref_mv) { + if (x->e_mbd.mode_info_context->mbmi.mode == SPLITMV) { + int i; - if (sse < threshold) - { - unsigned int q2dc = xd->block[24].dequant[0]; - /* If theres is no codeable 2nd order dc - or a very small uniform pixel change change */ - if ((sse - var < q2dc * q2dc >>4) || - (sse /2 > var && sse-var < 64)) - { - /* Check u and v to make sure skip is ok */ - unsigned int sse2 = VP8_UVSSE(x); - if (sse2 * 2 < threshold) - { - x->skip = 1; - rd->distortion2 = sse + sse2; - rd->rate2 = 500; + for (i = 0; i < x->partition_info->count; ++i) { + if (x->partition_info->bmi[i].mode == NEW4X4) { + x->MVcount[0][mv_max + ((x->partition_info->bmi[i].mv.as_mv.row - + best_ref_mv->as_mv.row) >> + 1)]++; + x->MVcount[1][mv_max + ((x->partition_info->bmi[i].mv.as_mv.col - + best_ref_mv->as_mv.col) >> + 1)]++; + } + } + } else if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV) { + x->MVcount[0][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.row - + best_ref_mv->as_mv.row) >> + 1)]++; + x->MVcount[1][mv_max + ((x->e_mbd.mode_info_context->mbmi.mv.as_mv.col - + best_ref_mv->as_mv.col) >> + 1)]++; + } +} - /* for best_yrd calculation */ - rd->rate_uv = 0; - rd->distortion_uv = sse2; +static int evaluate_inter_mode_rd(int mdcounts[4], RATE_DISTORTION *rd, + int *disable_skip, VP8_COMP *cpi, + MACROBLOCK *x) { + MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; + BLOCK *b = &x->block[0]; + MACROBLOCKD *xd = &x->e_mbd; + int distortion; + vp8_build_inter16x16_predictors_mby(&x->e_mbd, x->e_mbd.predictor, 16); - *disable_skip = 1; - return RDCOST(x->rdmult, x->rddiv, rd->rate2, - rd->distortion2); - } - } + if (cpi->active_map_enabled && x->active_ptr[0] == 0) { + x->skip = 1; + } else if (x->encode_breakout) { + unsigned int sse; + unsigned int var; + unsigned int threshold = + (xd->block[0].dequant[1] * xd->block[0].dequant[1] >> 4); + + if (threshold < x->encode_breakout) threshold = x->encode_breakout; + + var = vpx_variance16x16(*(b->base_src), b->src_stride, x->e_mbd.predictor, + 16, &sse); + + if (sse < threshold) { + unsigned int q2dc = xd->block[24].dequant[0]; + /* If theres is no codeable 2nd order dc + or a very small uniform pixel change change */ + if ((sse - var> 4) || (sse / 2 > var && sse - var < 64)) { + /* Check u and v to make sure skip is ok */ + unsigned int sse2 = VP8_UVSSE(x); + if (sse2 * 2 < threshold) { + x->skip = 1; + rd->distortion2 = sse + sse2; + rd->rate2 = 500; + + /* for best_yrd calculation */ + rd->rate_uv = 0; + rd->distortion_uv = sse2; + + *disable_skip = 1; + return RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2); } + } } + } + /* Add in the Mv/mode cost */ + rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts); - /* Add in the Mv/mode cost */ - rd->rate2 += vp8_cost_mv_ref(this_mode, mdcounts); + /* Y cost and distortion */ + macro_block_yrd(x, &rd->rate_y, &distortion); + rd->rate2 += rd->rate_y; + rd->distortion2 += distortion; - /* Y cost and distortion */ - macro_block_yrd(x, &rd->rate_y, &distortion); - rd->rate2 += rd->rate_y; - rd->distortion2 += distortion; - - /* UV cost and distortion */ - rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv, - cpi->common.full_pixel); - rd->rate2 += rd->rate_uv; - rd->distortion2 += rd->distortion_uv; - return INT_MAX; + /* UV cost and distortion */ + rd_inter16x16_uv(cpi, x, &rd->rate_uv, &rd->distortion_uv, + cpi->common.full_pixel); + rd->rate2 += rd->rate_uv; + rd->distortion2 += rd->distortion_uv; + return INT_MAX; } -static int calculate_final_rd_costs(int this_rd, - RATE_DISTORTION* rd, - int* other_cost, - int disable_skip, - int uv_intra_tteob, - int intra_rd_penalty, - VP8_COMP *cpi, MACROBLOCK *x) -{ - MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; +static int calculate_final_rd_costs(int this_rd, RATE_DISTORTION *rd, + int *other_cost, int disable_skip, + int uv_intra_tteob, int intra_rd_penalty, + VP8_COMP *cpi, MACROBLOCK *x) { + MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; - /* Where skip is allowable add in the default per mb cost for the no - * skip case. where we then decide to skip we have to delete this and - * replace it with the cost of signalling a skip + /* Where skip is allowable add in the default per mb cost for the no + * skip case. where we then decide to skip we have to delete this and + * replace it with the cost of signalling a skip + */ + if (cpi->common.mb_no_coeff_skip) { + *other_cost += vp8_cost_bit(cpi->prob_skip_false, 0); + rd->rate2 += *other_cost; + } + + /* Estimate the reference frame signaling cost and add it + * to the rolling cost variable. + */ + rd->rate2 += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; + + if (!disable_skip) { + /* Test for the condition where skip block will be activated + * because there are no non zero coefficients and make any + * necessary adjustment for rate */ - if (cpi->common.mb_no_coeff_skip) - { - *other_cost += vp8_cost_bit(cpi->prob_skip_false, 0); - rd->rate2 += *other_cost; - } + if (cpi->common.mb_no_coeff_skip) { + int i; + int tteob; + int has_y2_block = (this_mode != SPLITMV && this_mode != B_PRED); - /* Estimate the reference frame signaling cost and add it - * to the rolling cost variable. - */ - rd->rate2 += - x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; + tteob = 0; + if (has_y2_block) tteob += x->e_mbd.eobs[24]; - if (!disable_skip) - { - /* Test for the condition where skip block will be activated - * because there are no non zero coefficients and make any - * necessary adjustment for rate - */ - if (cpi->common.mb_no_coeff_skip) - { - int i; - int tteob; - int has_y2_block = (this_mode!=SPLITMV && this_mode!=B_PRED); + for (i = 0; i < 16; ++i) tteob += (x->e_mbd.eobs[i] > has_y2_block); - tteob = 0; - if(has_y2_block) - tteob += x->e_mbd.eobs[24]; + if (x->e_mbd.mode_info_context->mbmi.ref_frame) { + for (i = 16; i < 24; ++i) tteob += x->e_mbd.eobs[i]; + } else { + tteob += uv_intra_tteob; + } - for (i = 0; i < 16; i++) - tteob += (x->e_mbd.eobs[i] > has_y2_block); + if (tteob == 0) { + rd->rate2 -= (rd->rate_y + rd->rate_uv); + /* for best_yrd calculation */ + rd->rate_uv = 0; - if (x->e_mbd.mode_info_context->mbmi.ref_frame) - { - for (i = 16; i < 24; i++) - tteob += x->e_mbd.eobs[i]; - } - else - tteob += uv_intra_tteob; + /* Back out no skip flag costing and add in skip flag costing */ + if (cpi->prob_skip_false) { + int prob_skip_cost; - if (tteob == 0) - { - rd->rate2 -= (rd->rate_y + rd->rate_uv); - /* for best_yrd calculation */ - rd->rate_uv = 0; - - /* Back out no skip flag costing and add in skip flag costing */ - if (cpi->prob_skip_false) - { - int prob_skip_cost; - - prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 1); - prob_skip_cost -= vp8_cost_bit(cpi->prob_skip_false, 0); - rd->rate2 += prob_skip_cost; - *other_cost += prob_skip_cost; - } - } + prob_skip_cost = vp8_cost_bit(cpi->prob_skip_false, 1); + prob_skip_cost -= (int)vp8_cost_bit(cpi->prob_skip_false, 0); + rd->rate2 += prob_skip_cost; + *other_cost += prob_skip_cost; } - /* Calculate the final RD estimate for this mode */ - this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2); - if (this_rd < INT_MAX && x->e_mbd.mode_info_context->mbmi.ref_frame - == INTRA_FRAME) - this_rd += intra_rd_penalty; + } } - return this_rd; + /* Calculate the final RD estimate for this mode */ + this_rd = RDCOST(x->rdmult, x->rddiv, rd->rate2, rd->distortion2); + if (this_rd < INT_MAX && + x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) { + this_rd += intra_rd_penalty; + } + } + return this_rd; } -static void update_best_mode(BEST_MODE* best_mode, int this_rd, - RATE_DISTORTION* rd, int other_cost, MACROBLOCK *x) -{ - MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; +static void update_best_mode(BEST_MODE *best_mode, int this_rd, + RATE_DISTORTION *rd, int other_cost, + MACROBLOCK *x) { + MB_PREDICTION_MODE this_mode = x->e_mbd.mode_info_context->mbmi.mode; - other_cost += - x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; + other_cost += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame]; - /* Calculate the final y RD estimate for this mode */ - best_mode->yrd = RDCOST(x->rdmult, x->rddiv, (rd->rate2-rd->rate_uv-other_cost), - (rd->distortion2-rd->distortion_uv)); + /* Calculate the final y RD estimate for this mode */ + best_mode->yrd = + RDCOST(x->rdmult, x->rddiv, (rd->rate2 - rd->rate_uv - other_cost), + (rd->distortion2 - rd->distortion_uv)); - best_mode->rd = this_rd; - memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, sizeof(MB_MODE_INFO)); - memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO)); + best_mode->rd = this_rd; + memcpy(&best_mode->mbmode, &x->e_mbd.mode_info_context->mbmi, + sizeof(MB_MODE_INFO)); + memcpy(&best_mode->partition, x->partition_info, sizeof(PARTITION_INFO)); - if ((this_mode == B_PRED) || (this_mode == SPLITMV)) - { - int i; - for (i = 0; i < 16; i++) - { - best_mode->bmodes[i] = x->e_mbd.block[i].bmi; - } + if ((this_mode == B_PRED) || (this_mode == SPLITMV)) { + int i; + for (i = 0; i < 16; ++i) { + best_mode->bmodes[i] = x->e_mbd.block[i].bmi; } + } } void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int recon_uvoffset, int *returnrate, - int *returndistortion, int *returnintra, - int mb_row, int mb_col) -{ - BLOCK *b = &x->block[0]; - BLOCKD *d = &x->e_mbd.block[0]; - MACROBLOCKD *xd = &x->e_mbd; - int_mv best_ref_mv_sb[2]; - int_mv mode_mv_sb[2][MB_MODE_COUNT]; - int_mv best_ref_mv; - int_mv *mode_mv; - MB_PREDICTION_MODE this_mode; - int num00; - int best_mode_index = 0; - BEST_MODE best_mode; + int *returndistortion, int *returnintra, int mb_row, + int mb_col) { + BLOCK *b = &x->block[0]; + BLOCKD *d = &x->e_mbd.block[0]; + MACROBLOCKD *xd = &x->e_mbd; + int_mv best_ref_mv_sb[2]; + int_mv mode_mv_sb[2][MB_MODE_COUNT]; + int_mv best_ref_mv; + int_mv *mode_mv; + MB_PREDICTION_MODE this_mode; + int num00; + int best_mode_index = 0; + BEST_MODE best_mode; - int i; - int mode_index; - int mdcounts[4]; - int rate; - RATE_DISTORTION rd; - int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly; - int uv_intra_tteob = 0; - int uv_intra_done = 0; + int i; + int mode_index; + int mdcounts[4]; + int rate; + RATE_DISTORTION rd; + int uv_intra_rate, uv_intra_distortion, uv_intra_rate_tokenonly; + int uv_intra_tteob = 0; + int uv_intra_done = 0; - MB_PREDICTION_MODE uv_intra_mode = 0; - int_mv mvp; - int near_sadidx[8] = {0, 1, 2, 3, 4, 5, 6, 7}; - int saddone=0; - /* search range got from mv_pred(). It uses step_param levels. (0-7) */ - int sr=0; + MB_PREDICTION_MODE uv_intra_mode = 0; + int_mv mvp; + int near_sadidx[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; + int saddone = 0; + /* search range got from mv_pred(). It uses step_param levels. (0-7) */ + int sr = 0; - unsigned char *plane[4][3]; - int ref_frame_map[4]; - int sign_bias = 0; + unsigned char *plane[4][3]; + int ref_frame_map[4]; + int sign_bias = 0; - int intra_rd_penalty = 10* vp8_dc_quant(cpi->common.base_qindex, - cpi->common.y1dc_delta_q); + int intra_rd_penalty = + 10 * vp8_dc_quant(cpi->common.base_qindex, cpi->common.y1dc_delta_q); #if CONFIG_TEMPORAL_DENOISING - unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX, - best_rd_sse = UINT_MAX; + unsigned int zero_mv_sse = UINT_MAX, best_sse = UINT_MAX, + best_rd_sse = UINT_MAX; #endif + mode_mv = mode_mv_sb[sign_bias]; + best_ref_mv.as_int = 0; + best_mode.rd = INT_MAX; + best_mode.yrd = INT_MAX; + best_mode.intra_rd = INT_MAX; + memset(mode_mv_sb, 0, sizeof(mode_mv_sb)); + memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode)); + memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes)); + + /* Setup search priorities */ + get_reference_search_order(cpi, ref_frame_map); + + /* Check to see if there is at least 1 valid reference frame that we need + * to calculate near_mvs. + */ + if (ref_frame_map[1] > 0) { + sign_bias = vp8_find_near_mvs_bias( + &x->e_mbd, x->e_mbd.mode_info_context, mode_mv_sb, best_ref_mv_sb, + mdcounts, ref_frame_map[1], cpi->common.ref_frame_sign_bias); + mode_mv = mode_mv_sb[sign_bias]; - best_ref_mv.as_int = 0; - best_mode.rd = INT_MAX; - best_mode.yrd = INT_MAX; - best_mode.intra_rd = INT_MAX; - memset(mode_mv_sb, 0, sizeof(mode_mv_sb)); - memset(&best_mode.mbmode, 0, sizeof(best_mode.mbmode)); - memset(&best_mode.bmodes, 0, sizeof(best_mode.bmodes)); + best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; + } - /* Setup search priorities */ - get_reference_search_order(cpi, ref_frame_map); + get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset); - /* Check to see if there is at least 1 valid reference frame that we need - * to calculate near_mvs. + *returnintra = INT_MAX; + /* Count of the number of MBs tested so far this frame */ + x->mbs_tested_so_far++; + + x->skip = 0; + + for (mode_index = 0; mode_index < MAX_MODES; ++mode_index) { + int this_rd = INT_MAX; + int disable_skip = 0; + int other_cost = 0; + int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]]; + + /* Test best rd so far against threshold for trying this mode. */ + if (best_mode.rd <= x->rd_threshes[mode_index]) continue; + + if (this_ref_frame < 0) continue; + + /* These variables hold are rolling total cost and distortion for + * this mode */ - if (ref_frame_map[1] > 0) - { - sign_bias = vp8_find_near_mvs_bias(&x->e_mbd, - x->e_mbd.mode_info_context, - mode_mv_sb, - best_ref_mv_sb, - mdcounts, - ref_frame_map[1], - cpi->common.ref_frame_sign_bias); + rd.rate2 = 0; + rd.distortion2 = 0; + this_mode = vp8_mode_order[mode_index]; + + x->e_mbd.mode_info_context->mbmi.mode = this_mode; + x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; + + /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame, + * unless ARNR filtering is enabled in which case we want + * an unfiltered alternative + */ + if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) { + if (this_mode != ZEROMV || + x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) { + continue; + } + } + + /* everything but intra */ + if (x->e_mbd.mode_info_context->mbmi.ref_frame) { + x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; + x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; + x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; + + if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) { + sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame]; mode_mv = mode_mv_sb[sign_bias]; best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; + } } - get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset); - - *returnintra = INT_MAX; - /* Count of the number of MBs tested so far this frame */ - x->mbs_tested_so_far++; - - x->skip = 0; - - for (mode_index = 0; mode_index < MAX_MODES; mode_index++) - { - int this_rd = INT_MAX; - int disable_skip = 0; - int other_cost = 0; - int this_ref_frame = ref_frame_map[vp8_ref_frame_order[mode_index]]; - - /* Test best rd so far against threshold for trying this mode. */ - if (best_mode.rd <= x->rd_threshes[mode_index]) - continue; - - if (this_ref_frame < 0) - continue; - - /* These variables hold are rolling total cost and distortion for - * this mode + /* Check to see if the testing frequency for this mode is at its + * max If so then prevent it from being tested and increase the + * threshold for its testing + */ + if (x->mode_test_hit_counts[mode_index] && + (cpi->mode_check_freq[mode_index] > 1)) { + if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * + x->mode_test_hit_counts[mode_index]) { + /* Increase the threshold for coding this mode to make it + * less likely to be chosen */ - rd.rate2 = 0; - rd.distortion2 = 0; + x->rd_thresh_mult[mode_index] += 4; - this_mode = vp8_mode_order[mode_index]; - - x->e_mbd.mode_info_context->mbmi.mode = this_mode; - x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; - - /* Only consider ZEROMV/ALTREF_FRAME for alt ref frame, - * unless ARNR filtering is enabled in which case we want - * an unfiltered alternative - */ - if (cpi->is_src_frame_alt_ref && (cpi->oxcf.arnr_max_frames == 0)) - { - if (this_mode != ZEROMV || x->e_mbd.mode_info_context->mbmi.ref_frame != ALTREF_FRAME) - continue; + if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) { + x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; } - /* everything but intra */ - if (x->e_mbd.mode_info_context->mbmi.ref_frame) - { - x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; - x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; - x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; - - if (sign_bias != cpi->common.ref_frame_sign_bias[this_ref_frame]) - { - sign_bias = cpi->common.ref_frame_sign_bias[this_ref_frame]; - mode_mv = mode_mv_sb[sign_bias]; - best_ref_mv.as_int = best_ref_mv_sb[sign_bias].as_int; - } - } - - /* Check to see if the testing frequency for this mode is at its - * max If so then prevent it from being tested and increase the - * threshold for its testing - */ - if (x->mode_test_hit_counts[mode_index] && (cpi->mode_check_freq[mode_index] > 1)) - { - if (x->mbs_tested_so_far <= cpi->mode_check_freq[mode_index] * x->mode_test_hit_counts[mode_index]) - { - /* Increase the threshold for coding this mode to make it - * less likely to be chosen - */ - x->rd_thresh_mult[mode_index] += 4; - - if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) - x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; - - x->rd_threshes[mode_index] = - (cpi->rd_baseline_thresh[mode_index] >> 7) * - x->rd_thresh_mult[mode_index]; - - continue; - } - } - - /* We have now reached the point where we are going to test the - * current mode so increment the counter for the number of times - * it has been tested - */ - x->mode_test_hit_counts[mode_index] ++; - - /* Experimental code. Special case for gf and arf zeromv modes. - * Increase zbin size to supress noise - */ - if (x->zbin_mode_boost_enabled) - { - if ( this_ref_frame == INTRA_FRAME ) - x->zbin_mode_boost = 0; - else - { - if (vp8_mode_order[mode_index] == ZEROMV) - { - if (this_ref_frame != LAST_FRAME) - x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; - else - x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; - } - else if (vp8_mode_order[mode_index] == SPLITMV) - x->zbin_mode_boost = 0; - else - x->zbin_mode_boost = MV_ZBIN_BOOST; - } - - vp8_update_zbin_extra(cpi, x); - } - - if(!uv_intra_done && this_ref_frame == INTRA_FRAME) - { - rd_pick_intra_mbuv_mode(x, &uv_intra_rate, - &uv_intra_rate_tokenonly, - &uv_intra_distortion); - uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode; - - /* - * Total of the eobs is used later to further adjust rate2. Since uv - * block's intra eobs will be overwritten when we check inter modes, - * we need to save uv_intra_tteob here. - */ - for (i = 16; i < 24; i++) - uv_intra_tteob += x->e_mbd.eobs[i]; - - uv_intra_done = 1; - } - - switch (this_mode) - { - case B_PRED: - { - int tmp_rd; - - /* Note the rate value returned here includes the cost of - * coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED] - */ - int distortion; - tmp_rd = rd_pick_intra4x4mby_modes(x, &rate, &rd.rate_y, &distortion, best_mode.yrd); - rd.rate2 += rate; - rd.distortion2 += distortion; - - if(tmp_rd < best_mode.yrd) - { - rd.rate2 += uv_intra_rate; - rd.rate_uv = uv_intra_rate_tokenonly; - rd.distortion2 += uv_intra_distortion; - rd.distortion_uv = uv_intra_distortion; - } - else - { - this_rd = INT_MAX; - disable_skip = 1; - } - } - break; - - case SPLITMV: - { - int tmp_rd; - int this_rd_thresh; - int distortion; - - this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) ? - x->rd_threshes[THR_NEW1] : x->rd_threshes[THR_NEW3]; - this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) ? - x->rd_threshes[THR_NEW2] : this_rd_thresh; - - tmp_rd = vp8_rd_pick_best_mbsegmentation(cpi, x, &best_ref_mv, - best_mode.yrd, mdcounts, - &rate, &rd.rate_y, &distortion, this_rd_thresh) ; - - rd.rate2 += rate; - rd.distortion2 += distortion; - - /* If even the 'Y' rd value of split is higher than best so far - * then dont bother looking at UV - */ - if (tmp_rd < best_mode.yrd) - { - /* Now work out UV cost and add it in */ - rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv, cpi->common.full_pixel); - rd.rate2 += rd.rate_uv; - rd.distortion2 += rd.distortion_uv; - } - else - { - this_rd = INT_MAX; - disable_skip = 1; - } - } - break; - case DC_PRED: - case V_PRED: - case H_PRED: - case TM_PRED: - { - int distortion; - x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; - - vp8_build_intra_predictors_mby_s(xd, - xd->dst.y_buffer - xd->dst.y_stride, - xd->dst.y_buffer - 1, - xd->dst.y_stride, - xd->predictor, - 16); - macro_block_yrd(x, &rd.rate_y, &distortion) ; - rd.rate2 += rd.rate_y; - rd.distortion2 += distortion; - rd.rate2 += x->mbmode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.mode]; - rd.rate2 += uv_intra_rate; - rd.rate_uv = uv_intra_rate_tokenonly; - rd.distortion2 += uv_intra_distortion; - rd.distortion_uv = uv_intra_distortion; - } - break; - - case NEWMV: - { - int thissme; - int bestsme = INT_MAX; - int step_param = cpi->sf.first_step; - int further_steps; - int n; - int do_refine=1; /* If last step (1-away) of n-step search doesn't pick the center point as the best match, - we will do a final 1-away diamond refining search */ - - int sadpb = x->sadperbit16; - int_mv mvp_full; - - int col_min = ((best_ref_mv.as_mv.col+7)>>3) - MAX_FULL_PEL_VAL; - int row_min = ((best_ref_mv.as_mv.row+7)>>3) - MAX_FULL_PEL_VAL; - int col_max = (best_ref_mv.as_mv.col>>3) + MAX_FULL_PEL_VAL; - int row_max = (best_ref_mv.as_mv.row>>3) + MAX_FULL_PEL_VAL; - - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; - - if(!saddone) - { - vp8_cal_sad(cpi,xd,x, recon_yoffset ,&near_sadidx[0] ); - saddone = 1; - } - - vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp, - x->e_mbd.mode_info_context->mbmi.ref_frame, cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]); - - mvp_full.as_mv.col = mvp.as_mv.col>>3; - mvp_full.as_mv.row = mvp.as_mv.row>>3; - - /* Get intersection of UMV window and valid MV window to - * reduce # of checks in diamond search. - */ - if (x->mv_col_min < col_min ) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max ) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min ) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max ) - x->mv_row_max = row_max; - - /* adjust search range according to sr from mv prediction */ - if(sr > step_param) - step_param = sr; - - /* Initial step/diamond search */ - { - bestsme = cpi->diamond_search_sad(x, b, d, &mvp_full, &d->bmi.mv, - step_param, sadpb, &num00, - &cpi->fn_ptr[BLOCK_16X16], - x->mvcost, &best_ref_mv); - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - - /* Further step/diamond searches as necessary */ - further_steps = (cpi->sf.max_step_search_steps - 1) - step_param; - - n = num00; - num00 = 0; - - /* If there won't be more n-step search, check to see if refining search is needed. */ - if (n > further_steps) - do_refine = 0; - - while (n < further_steps) - { - n++; - - if (num00) - num00--; - else - { - thissme = cpi->diamond_search_sad(x, b, d, &mvp_full, - &d->bmi.mv, step_param + n, sadpb, &num00, - &cpi->fn_ptr[BLOCK_16X16], x->mvcost, - &best_ref_mv); - - /* check to see if refining search is needed. */ - if (num00 > (further_steps-n)) - do_refine = 0; - - if (thissme < bestsme) - { - bestsme = thissme; - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - } - else - { - d->bmi.mv.as_int = mode_mv[NEWMV].as_int; - } - } - } - } - - /* final 1-away diamond refining search */ - if (do_refine == 1) - { - int search_range; - - search_range = 8; - - thissme = cpi->refining_search_sad(x, b, d, &d->bmi.mv, sadpb, - search_range, &cpi->fn_ptr[BLOCK_16X16], - x->mvcost, &best_ref_mv); - - if (thissme < bestsme) - { - bestsme = thissme; - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - } - else - { - d->bmi.mv.as_int = mode_mv[NEWMV].as_int; - } - } - - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; - - if (bestsme < INT_MAX) - { - int dis; /* TODO: use dis in distortion calculation later. */ - unsigned int sse; - cpi->find_fractional_mv_step(x, b, d, &d->bmi.mv, &best_ref_mv, - x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - x->mvcost, &dis, &sse); - } - - mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - - /* Add the new motion vector cost to our rolling cost variable */ - rd.rate2 += vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96); - } - - case NEARESTMV: - case NEARMV: - /* Clip "next_nearest" so that it does not extend to far out - * of image - */ - vp8_clamp_mv2(&mode_mv[this_mode], xd); - - /* Do not bother proceeding if the vector (from newmv, nearest - * or near) is 0,0 as this should then be coded using the zeromv - * mode. - */ - if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && (mode_mv[this_mode].as_int == 0)) - continue; - - case ZEROMV: - - /* Trap vectors that reach beyond the UMV borders - * Note that ALL New MV, Nearest MV Near MV and Zero MV code - * drops through to this point because of the lack of break - * statements in the previous two cases. - */ - if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || - ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) - continue; - - vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]); - this_rd = evaluate_inter_mode_rd(mdcounts, &rd, - &disable_skip, cpi, x); - break; - - default: - break; - } - - this_rd = calculate_final_rd_costs(this_rd, &rd, &other_cost, - disable_skip, uv_intra_tteob, - intra_rd_penalty, cpi, x); - - /* Keep record of best intra distortion */ - if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) && - (this_rd < best_mode.intra_rd) ) - { - best_mode.intra_rd = this_rd; - *returnintra = rd.distortion2 ; - } -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) - { - unsigned int sse; - vp8_get_inter_mbpred_error(x,&cpi->fn_ptr[BLOCK_16X16],&sse, - mode_mv[this_mode]); - - if (sse < best_rd_sse) - best_rd_sse = sse; - - /* Store for later use by denoiser. */ - if (this_mode == ZEROMV && sse < zero_mv_sse ) - { - zero_mv_sse = sse; - x->best_zeromv_reference_frame = - x->e_mbd.mode_info_context->mbmi.ref_frame; - } - - /* Store the best NEWMV in x for later use in the denoiser. */ - if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && - sse < best_sse) - { - best_sse = sse; - vp8_get_inter_mbpred_error(x,&cpi->fn_ptr[BLOCK_16X16],&best_sse, - mode_mv[this_mode]); - x->best_sse_inter_mode = NEWMV; - x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv; - x->need_to_clamp_best_mvs = - x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs; - x->best_reference_frame = - x->e_mbd.mode_info_context->mbmi.ref_frame; - } - } -#endif - - /* Did this mode help.. i.i is it the new best mode */ - if (this_rd < best_mode.rd || x->skip) - { - /* Note index of best mode so far */ - best_mode_index = mode_index; - *returnrate = rd.rate2; - *returndistortion = rd.distortion2; - if (this_mode <= B_PRED) - { - x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode; - /* required for left and above block mv */ - x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; - } - update_best_mode(&best_mode, this_rd, &rd, other_cost, x); - - - /* Testing this mode gave rise to an improvement in best error - * score. Lower threshold a bit for next time - */ - x->rd_thresh_mult[mode_index] = - (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) ? - x->rd_thresh_mult[mode_index] - 2 : MIN_THRESHMULT; - } - - /* If the mode did not help improve the best error case then raise - * the threshold for testing that mode next time around. - */ - else - { - x->rd_thresh_mult[mode_index] += 4; - - if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) - x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; - } x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * - x->rd_thresh_mult[mode_index]; - - if (x->skip) - break; + x->rd_thresh_mult[mode_index]; + continue; + } } - /* Reduce the activation RD thresholds for the best choice mode */ - if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) - { - int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2); + /* We have now reached the point where we are going to test the + * current mode so increment the counter for the number of times + * it has been tested + */ + x->mode_test_hit_counts[mode_index]++; - x->rd_thresh_mult[best_mode_index] = - (x->rd_thresh_mult[best_mode_index] >= - (MIN_THRESHMULT + best_adjustment)) ? - x->rd_thresh_mult[best_mode_index] - best_adjustment : - MIN_THRESHMULT; - x->rd_threshes[best_mode_index] = - (cpi->rd_baseline_thresh[best_mode_index] >> 7) * - x->rd_thresh_mult[best_mode_index]; - } - -#if CONFIG_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity) - { - int block_index = mb_row * cpi->common.mb_cols + mb_col; - if (x->best_sse_inter_mode == DC_PRED) - { - /* No best MV found. */ - x->best_sse_inter_mode = best_mode.mbmode.mode; - x->best_sse_mv = best_mode.mbmode.mv; - x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs; - x->best_reference_frame = best_mode.mbmode.ref_frame; - best_sse = best_rd_sse; + /* Experimental code. Special case for gf and arf zeromv modes. + * Increase zbin size to supress noise + */ + if (x->zbin_mode_boost_enabled) { + if (this_ref_frame == INTRA_FRAME) { + x->zbin_mode_boost = 0; + } else { + if (vp8_mode_order[mode_index] == ZEROMV) { + if (this_ref_frame != LAST_FRAME) { + x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; + } else { + x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; + } + } else if (vp8_mode_order[mode_index] == SPLITMV) { + x->zbin_mode_boost = 0; + } else { + x->zbin_mode_boost = MV_ZBIN_BOOST; } - vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse, - recon_yoffset, recon_uvoffset, - &cpi->common.lf_info, mb_row, mb_col, - block_index); + } - /* Reevaluate ZEROMV after denoising. */ - if (best_mode.mbmode.ref_frame == INTRA_FRAME && - x->best_zeromv_reference_frame != INTRA_FRAME) + vp8_update_zbin_extra(cpi, x); + } + + if (!uv_intra_done && this_ref_frame == INTRA_FRAME) { + rd_pick_intra_mbuv_mode(x, &uv_intra_rate, &uv_intra_rate_tokenonly, + &uv_intra_distortion); + uv_intra_mode = x->e_mbd.mode_info_context->mbmi.uv_mode; + + /* + * Total of the eobs is used later to further adjust rate2. Since uv + * block's intra eobs will be overwritten when we check inter modes, + * we need to save uv_intra_tteob here. + */ + for (i = 16; i < 24; ++i) uv_intra_tteob += x->e_mbd.eobs[i]; + + uv_intra_done = 1; + } + + switch (this_mode) { + case B_PRED: { + int tmp_rd; + + /* Note the rate value returned here includes the cost of + * coding the BPRED mode: x->mbmode_cost[x->e_mbd.frame_type][BPRED] + */ + int distortion; + tmp_rd = rd_pick_intra4x4mby_modes(x, &rate, &rd.rate_y, &distortion, + best_mode.yrd); + rd.rate2 += rate; + rd.distortion2 += distortion; + + if (tmp_rd < best_mode.yrd) { + rd.rate2 += uv_intra_rate; + rd.rate_uv = uv_intra_rate_tokenonly; + rd.distortion2 += uv_intra_distortion; + rd.distortion_uv = uv_intra_distortion; + } else { + this_rd = INT_MAX; + disable_skip = 1; + } + break; + } + + case SPLITMV: { + int tmp_rd; + int this_rd_thresh; + int distortion; + + this_rd_thresh = (vp8_ref_frame_order[mode_index] == 1) + ? x->rd_threshes[THR_NEW1] + : x->rd_threshes[THR_NEW3]; + this_rd_thresh = (vp8_ref_frame_order[mode_index] == 2) + ? x->rd_threshes[THR_NEW2] + : this_rd_thresh; + + tmp_rd = vp8_rd_pick_best_mbsegmentation( + cpi, x, &best_ref_mv, best_mode.yrd, mdcounts, &rate, &rd.rate_y, + &distortion, this_rd_thresh); + + rd.rate2 += rate; + rd.distortion2 += distortion; + + /* If even the 'Y' rd value of split is higher than best so far + * then dont bother looking at UV + */ + if (tmp_rd < best_mode.yrd) { + /* Now work out UV cost and add it in */ + rd_inter4x4_uv(cpi, x, &rd.rate_uv, &rd.distortion_uv, + cpi->common.full_pixel); + rd.rate2 += rd.rate_uv; + rd.distortion2 += rd.distortion_uv; + } else { + this_rd = INT_MAX; + disable_skip = 1; + } + break; + } + case DC_PRED: + case V_PRED: + case H_PRED: + case TM_PRED: { + int distortion; + x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; + + vp8_build_intra_predictors_mby_s( + xd, xd->dst.y_buffer - xd->dst.y_stride, xd->dst.y_buffer - 1, + xd->dst.y_stride, xd->predictor, 16); + macro_block_yrd(x, &rd.rate_y, &distortion); + rd.rate2 += rd.rate_y; + rd.distortion2 += distortion; + rd.rate2 += x->mbmode_cost[x->e_mbd.frame_type] + [x->e_mbd.mode_info_context->mbmi.mode]; + rd.rate2 += uv_intra_rate; + rd.rate_uv = uv_intra_rate_tokenonly; + rd.distortion2 += uv_intra_distortion; + rd.distortion_uv = uv_intra_distortion; + break; + } + + case NEWMV: { + int thissme; + int bestsme = INT_MAX; + int step_param = cpi->sf.first_step; + int further_steps; + int n; + /* If last step (1-away) of n-step search doesn't pick the center point + as the best match, we will do a final 1-away diamond refining search + */ + int do_refine = 1; + + int sadpb = x->sadperbit16; + int_mv mvp_full; + + int col_min = ((best_ref_mv.as_mv.col + 7) >> 3) - MAX_FULL_PEL_VAL; + int row_min = ((best_ref_mv.as_mv.row + 7) >> 3) - MAX_FULL_PEL_VAL; + int col_max = (best_ref_mv.as_mv.col >> 3) + MAX_FULL_PEL_VAL; + int row_max = (best_ref_mv.as_mv.row >> 3) + MAX_FULL_PEL_VAL; + + int tmp_col_min = x->mv_col_min; + int tmp_col_max = x->mv_col_max; + int tmp_row_min = x->mv_row_min; + int tmp_row_max = x->mv_row_max; + + if (!saddone) { + vp8_cal_sad(cpi, xd, x, recon_yoffset, &near_sadidx[0]); + saddone = 1; + } + + vp8_mv_pred(cpi, &x->e_mbd, x->e_mbd.mode_info_context, &mvp, + x->e_mbd.mode_info_context->mbmi.ref_frame, + cpi->common.ref_frame_sign_bias, &sr, &near_sadidx[0]); + + mvp_full.as_mv.col = mvp.as_mv.col >> 3; + mvp_full.as_mv.row = mvp.as_mv.row >> 3; + + /* Get intersection of UMV window and valid MV window to + * reduce # of checks in diamond search. + */ + if (x->mv_col_min < col_min) x->mv_col_min = col_min; + if (x->mv_col_max > col_max) x->mv_col_max = col_max; + if (x->mv_row_min < row_min) x->mv_row_min = row_min; + if (x->mv_row_max > row_max) x->mv_row_max = row_max; + + /* adjust search range according to sr from mv prediction */ + if (sr > step_param) step_param = sr; + + /* Initial step/diamond search */ { - int this_rd = INT_MAX; - int disable_skip = 0; - int other_cost = 0; - int this_ref_frame = x->best_zeromv_reference_frame; - rd.rate2 = x->ref_frame_cost[this_ref_frame] + - vp8_cost_mv_ref(ZEROMV, mdcounts); - rd.distortion2 = 0; + bestsme = cpi->diamond_search_sad( + x, b, d, &mvp_full, &d->bmi.mv, step_param, sadpb, &num00, + &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; - /* set up the proper prediction buffers for the frame */ - x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; - x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; - x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; - x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; + /* Further step/diamond searches as necessary */ + further_steps = (cpi->sf.max_step_search_steps - 1) - step_param; - x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; + n = num00; + num00 = 0; - this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x); - this_rd = calculate_final_rd_costs(this_rd, &rd, &other_cost, - disable_skip, uv_intra_tteob, - intra_rd_penalty, cpi, x); - if (this_rd < best_mode.rd || x->skip) - { - *returnrate = rd.rate2; - *returndistortion = rd.distortion2; - update_best_mode(&best_mode, this_rd, &rd, other_cost, x); + /* If there won't be more n-step search, check to see if refining + * search is needed. */ + if (n > further_steps) do_refine = 0; + + while (n < further_steps) { + n++; + + if (num00) { + num00--; + } else { + thissme = cpi->diamond_search_sad( + x, b, d, &mvp_full, &d->bmi.mv, step_param + n, sadpb, &num00, + &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); + + /* check to see if refining search is needed. */ + if (num00 > (further_steps - n)) do_refine = 0; + + if (thissme < bestsme) { + bestsme = thissme; + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + } else { + d->bmi.mv.as_int = mode_mv[NEWMV].as_int; + } } + } } + /* final 1-away diamond refining search */ + if (do_refine == 1) { + int search_range; + + search_range = 8; + + thissme = cpi->refining_search_sad( + x, b, d, &d->bmi.mv, sadpb, search_range, + &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &best_ref_mv); + + if (thissme < bestsme) { + bestsme = thissme; + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + } else { + d->bmi.mv.as_int = mode_mv[NEWMV].as_int; + } + } + + x->mv_col_min = tmp_col_min; + x->mv_col_max = tmp_col_max; + x->mv_row_min = tmp_row_min; + x->mv_row_max = tmp_row_max; + + if (bestsme < INT_MAX) { + int dis; /* TODO: use dis in distortion calculation later. */ + unsigned int sse; + cpi->find_fractional_mv_step( + x, b, d, &d->bmi.mv, &best_ref_mv, x->errorperbit, + &cpi->fn_ptr[BLOCK_16X16], x->mvcost, &dis, &sse); + } + + mode_mv[NEWMV].as_int = d->bmi.mv.as_int; + + /* Add the new motion vector cost to our rolling cost variable */ + rd.rate2 += + vp8_mv_bit_cost(&mode_mv[NEWMV], &best_ref_mv, x->mvcost, 96); + } + + case NEARESTMV: + case NEARMV: + /* Clip "next_nearest" so that it does not extend to far out + * of image + */ + vp8_clamp_mv2(&mode_mv[this_mode], xd); + + /* Do not bother proceeding if the vector (from newmv, nearest + * or near) is 0,0 as this should then be coded using the zeromv + * mode. + */ + if (((this_mode == NEARMV) || (this_mode == NEARESTMV)) && + (mode_mv[this_mode].as_int == 0)) { + continue; + } + + case ZEROMV: + + /* Trap vectors that reach beyond the UMV borders + * Note that ALL New MV, Nearest MV Near MV and Zero MV code + * drops through to this point because of the lack of break + * statements in the previous two cases. + */ + if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || + ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) || + ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || + ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max)) { + continue; + } + + vp8_set_mbmode_and_mvs(x, this_mode, &mode_mv[this_mode]); + this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x); + break; + + default: break; + } + + this_rd = + calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip, + uv_intra_tteob, intra_rd_penalty, cpi, x); + + /* Keep record of best intra distortion */ + if ((x->e_mbd.mode_info_context->mbmi.ref_frame == INTRA_FRAME) && + (this_rd < best_mode.intra_rd)) { + best_mode.intra_rd = this_rd; + *returnintra = rd.distortion2; + } +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity) { + unsigned int sse; + vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &sse, + mode_mv[this_mode]); + + if (sse < best_rd_sse) best_rd_sse = sse; + + /* Store for later use by denoiser. */ + if (this_mode == ZEROMV && sse < zero_mv_sse) { + zero_mv_sse = sse; + x->best_zeromv_reference_frame = + x->e_mbd.mode_info_context->mbmi.ref_frame; + } + + /* Store the best NEWMV in x for later use in the denoiser. */ + if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && sse < best_sse) { + best_sse = sse; + vp8_get_inter_mbpred_error(x, &cpi->fn_ptr[BLOCK_16X16], &best_sse, + mode_mv[this_mode]); + x->best_sse_inter_mode = NEWMV; + x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv; + x->need_to_clamp_best_mvs = + x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs; + x->best_reference_frame = x->e_mbd.mode_info_context->mbmi.ref_frame; + } } #endif - if (cpi->is_src_frame_alt_ref && - (best_mode.mbmode.mode != ZEROMV || best_mode.mbmode.ref_frame != ALTREF_FRAME)) - { - x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; - x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME; + /* Did this mode help.. i.i is it the new best mode */ + if (this_rd < best_mode.rd || x->skip) { + /* Note index of best mode so far */ + best_mode_index = mode_index; + *returnrate = rd.rate2; + *returndistortion = rd.distortion2; + if (this_mode <= B_PRED) { + x->e_mbd.mode_info_context->mbmi.uv_mode = uv_intra_mode; + /* required for left and above block mv */ x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; - x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; - x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = - (cpi->common.mb_no_coeff_skip); - x->e_mbd.mode_info_context->mbmi.partitioning = 0; - return; + } + update_best_mode(&best_mode, this_rd, &rd, other_cost, x); + + /* Testing this mode gave rise to an improvement in best error + * score. Lower threshold a bit for next time + */ + x->rd_thresh_mult[mode_index] = + (x->rd_thresh_mult[mode_index] >= (MIN_THRESHMULT + 2)) + ? x->rd_thresh_mult[mode_index] - 2 + : MIN_THRESHMULT; } + /* If the mode did not help improve the best error case then raise + * the threshold for testing that mode next time around. + */ + else { + x->rd_thresh_mult[mode_index] += 4; - /* macroblock modes */ - memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, sizeof(MB_MODE_INFO)); + if (x->rd_thresh_mult[mode_index] > MAX_THRESHMULT) { + x->rd_thresh_mult[mode_index] = MAX_THRESHMULT; + } + } + x->rd_threshes[mode_index] = (cpi->rd_baseline_thresh[mode_index] >> 7) * + x->rd_thresh_mult[mode_index]; - if (best_mode.mbmode.mode == B_PRED) - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode; + if (x->skip) break; + } + + /* Reduce the activation RD thresholds for the best choice mode */ + if ((cpi->rd_baseline_thresh[best_mode_index] > 0) && + (cpi->rd_baseline_thresh[best_mode_index] < (INT_MAX >> 2))) { + int best_adjustment = (x->rd_thresh_mult[best_mode_index] >> 2); + + x->rd_thresh_mult[best_mode_index] = + (x->rd_thresh_mult[best_mode_index] >= + (MIN_THRESHMULT + best_adjustment)) + ? x->rd_thresh_mult[best_mode_index] - best_adjustment + : MIN_THRESHMULT; + x->rd_threshes[best_mode_index] = + (cpi->rd_baseline_thresh[best_mode_index] >> 7) * + x->rd_thresh_mult[best_mode_index]; + } + +#if CONFIG_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity) { + int block_index = mb_row * cpi->common.mb_cols + mb_col; + if (x->best_sse_inter_mode == DC_PRED) { + /* No best MV found. */ + x->best_sse_inter_mode = best_mode.mbmode.mode; + x->best_sse_mv = best_mode.mbmode.mv; + x->need_to_clamp_best_mvs = best_mode.mbmode.need_to_clamp_mvs; + x->best_reference_frame = best_mode.mbmode.ref_frame; + best_sse = best_rd_sse; + } + vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse, + recon_yoffset, recon_uvoffset, &cpi->common.lf_info, + mb_row, mb_col, block_index, 0); + + /* Reevaluate ZEROMV after denoising. */ + if (best_mode.mbmode.ref_frame == INTRA_FRAME && + x->best_zeromv_reference_frame != INTRA_FRAME) { + int this_rd = INT_MAX; + int disable_skip = 0; + int other_cost = 0; + int this_ref_frame = x->best_zeromv_reference_frame; + rd.rate2 = + x->ref_frame_cost[this_ref_frame] + vp8_cost_mv_ref(ZEROMV, mdcounts); + rd.distortion2 = 0; + + /* set up the proper prediction buffers for the frame */ + x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame; + x->e_mbd.pre.y_buffer = plane[this_ref_frame][0]; + x->e_mbd.pre.u_buffer = plane[this_ref_frame][1]; + x->e_mbd.pre.v_buffer = plane[this_ref_frame][2]; + + x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; + + this_rd = evaluate_inter_mode_rd(mdcounts, &rd, &disable_skip, cpi, x); + this_rd = + calculate_final_rd_costs(this_rd, &rd, &other_cost, disable_skip, + uv_intra_tteob, intra_rd_penalty, cpi, x); + if (this_rd < best_mode.rd || x->skip) { + *returnrate = rd.rate2; + *returndistortion = rd.distortion2; + update_best_mode(&best_mode, this_rd, &rd, other_cost, x); + } + } + } +#endif + + if (cpi->is_src_frame_alt_ref && + (best_mode.mbmode.mode != ZEROMV || + best_mode.mbmode.ref_frame != ALTREF_FRAME)) { + x->e_mbd.mode_info_context->mbmi.mode = ZEROMV; + x->e_mbd.mode_info_context->mbmi.ref_frame = ALTREF_FRAME; + x->e_mbd.mode_info_context->mbmi.mv.as_int = 0; + x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; + x->e_mbd.mode_info_context->mbmi.mb_skip_coeff = + (cpi->common.mb_no_coeff_skip); + x->e_mbd.mode_info_context->mbmi.partitioning = 0; + return; + } + + /* macroblock modes */ + memcpy(&x->e_mbd.mode_info_context->mbmi, &best_mode.mbmode, + sizeof(MB_MODE_INFO)); + + if (best_mode.mbmode.mode == B_PRED) { + for (i = 0; i < 16; ++i) { + xd->mode_info_context->bmi[i].as_mode = best_mode.bmodes[i].as_mode; + } + } + + if (best_mode.mbmode.mode == SPLITMV) { + for (i = 0; i < 16; ++i) { + xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int; } - if (best_mode.mbmode.mode == SPLITMV) - { - for (i = 0; i < 16; i++) - xd->mode_info_context->bmi[i].mv.as_int = best_mode.bmodes[i].mv.as_int; + memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO)); - memcpy(x->partition_info, &best_mode.partition, sizeof(PARTITION_INFO)); + x->e_mbd.mode_info_context->mbmi.mv.as_int = + x->partition_info->bmi[15].mv.as_int; + } - x->e_mbd.mode_info_context->mbmi.mv.as_int = - x->partition_info->bmi[15].mv.as_int; - } + if (sign_bias != + cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) { + best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int; + } - if (sign_bias - != cpi->common.ref_frame_sign_bias[xd->mode_info_context->mbmi.ref_frame]) - best_ref_mv.as_int = best_ref_mv_sb[!sign_bias].as_int; - - rd_update_mvcount(x, &best_ref_mv); + rd_update_mvcount(x, &best_ref_mv); } -void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) -{ - int error4x4, error16x16; - int rate4x4, rate16x16 = 0, rateuv; - int dist4x4, dist16x16, distuv; - int rate; - int rate4x4_tokenonly = 0; - int rate16x16_tokenonly = 0; - int rateuv_tokenonly = 0; +void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate_) { + int error4x4, error16x16; + int rate4x4, rate16x16 = 0, rateuv; + int dist4x4, dist16x16, distuv; + int rate; + int rate4x4_tokenonly = 0; + int rate16x16_tokenonly = 0; + int rateuv_tokenonly = 0; - x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; + x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; - rd_pick_intra_mbuv_mode(x, &rateuv, &rateuv_tokenonly, &distuv); - rate = rateuv; + rd_pick_intra_mbuv_mode(x, &rateuv, &rateuv_tokenonly, &distuv); + rate = rateuv; - error16x16 = rd_pick_intra16x16mby_mode(x, &rate16x16, &rate16x16_tokenonly, - &dist16x16); + error16x16 = rd_pick_intra16x16mby_mode(x, &rate16x16, &rate16x16_tokenonly, + &dist16x16); - error4x4 = rd_pick_intra4x4mby_modes(x, &rate4x4, &rate4x4_tokenonly, - &dist4x4, error16x16); + error4x4 = rd_pick_intra4x4mby_modes(x, &rate4x4, &rate4x4_tokenonly, + &dist4x4, error16x16); - if (error4x4 < error16x16) - { - x->e_mbd.mode_info_context->mbmi.mode = B_PRED; - rate += rate4x4; - } - else - { - rate += rate16x16; - } + if (error4x4 < error16x16) { + x->e_mbd.mode_info_context->mbmi.mode = B_PRED; + rate += rate4x4; + } else { + rate += rate16x16; + } - *rate_ = rate; + *rate_ = rate; } diff --git a/libs/libvpx/vp8/encoder/rdopt.h b/libs/libvpx/vp8/encoder/rdopt.h index 1cb1a07266..8186ff1051 100644 --- a/libs/libvpx/vp8/encoder/rdopt.h +++ b/libs/libvpx/vp8/encoder/rdopt.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_RDOPT_H_ #define VP8_ENCODER_RDOPT_H_ @@ -18,57 +17,47 @@ extern "C" { #endif -#define RDCOST(RM,DM,R,D) ( ((128+(R)*(RM)) >> 8) + (DM)*(D) ) +#define RDCOST(RM, DM, R, D) (((128 + (R) * (RM)) >> 8) + (DM) * (D)) -static INLINE void insertsortmv(int arr[], int len) -{ - int i, j, k; +static INLINE void insertsortmv(int arr[], int len) { + int i, j, k; - for ( i = 1 ; i <= len-1 ; i++ ) - { - for ( j = 0 ; j < i ; j++ ) - { - if ( arr[j] > arr[i] ) - { - int temp; + for (i = 1; i <= len - 1; ++i) { + for (j = 0; j < i; ++j) { + if (arr[j] > arr[i]) { + int temp; - temp = arr[i]; + temp = arr[i]; - for ( k = i; k >j; k--) - arr[k] = arr[k - 1] ; + for (k = i; k > j; k--) arr[k] = arr[k - 1]; - arr[j] = temp ; - } - } + arr[j] = temp; + } } + } } -static INLINE void insertsortsad(int arr[],int idx[], int len) -{ - int i, j, k; +static INLINE void insertsortsad(int arr[], int idx[], int len) { + int i, j, k; - for ( i = 1 ; i <= len-1 ; i++ ) - { - for ( j = 0 ; j < i ; j++ ) - { - if ( arr[j] > arr[i] ) - { - int temp, tempi; + for (i = 1; i <= len - 1; ++i) { + for (j = 0; j < i; ++j) { + if (arr[j] > arr[i]) { + int temp, tempi; - temp = arr[i]; - tempi = idx[i]; + temp = arr[i]; + tempi = idx[i]; - for ( k = i; k >j; k--) - { - arr[k] = arr[k - 1] ; - idx[k] = idx[k - 1]; - } - - arr[j] = temp ; - idx[j] = tempi; - } + for (k = i; k > j; k--) { + arr[k] = arr[k - 1]; + idx[k] = idx[k - 1]; } + + arr[j] = temp; + idx[j] = tempi; + } } + } } extern void vp8_initialize_rd_consts(VP8_COMP *cpi, MACROBLOCK *x, int Qvalue); @@ -78,66 +67,51 @@ extern void vp8_rd_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int *returnintra, int mb_row, int mb_col); extern void vp8_rd_pick_intra_mode(MACROBLOCK *x, int *rate); - static INLINE void get_plane_pointers(const YV12_BUFFER_CONFIG *fb, unsigned char *plane[3], unsigned int recon_yoffset, - unsigned int recon_uvoffset) -{ - plane[0] = fb->y_buffer + recon_yoffset; - plane[1] = fb->u_buffer + recon_uvoffset; - plane[2] = fb->v_buffer + recon_uvoffset; + unsigned int recon_uvoffset) { + plane[0] = fb->y_buffer + recon_yoffset; + plane[1] = fb->u_buffer + recon_uvoffset; + plane[2] = fb->v_buffer + recon_uvoffset; } - static INLINE void get_predictor_pointers(const VP8_COMP *cpi, unsigned char *plane[4][3], unsigned int recon_yoffset, - unsigned int recon_uvoffset) -{ - if (cpi->ref_frame_flags & VP8_LAST_FRAME) - get_plane_pointers(&cpi->common.yv12_fb[cpi->common.lst_fb_idx], - plane[LAST_FRAME], recon_yoffset, recon_uvoffset); + unsigned int recon_uvoffset) { + if (cpi->ref_frame_flags & VP8_LAST_FRAME) { + get_plane_pointers(&cpi->common.yv12_fb[cpi->common.lst_fb_idx], + plane[LAST_FRAME], recon_yoffset, recon_uvoffset); + } - if (cpi->ref_frame_flags & VP8_GOLD_FRAME) - get_plane_pointers(&cpi->common.yv12_fb[cpi->common.gld_fb_idx], - plane[GOLDEN_FRAME], recon_yoffset, recon_uvoffset); + if (cpi->ref_frame_flags & VP8_GOLD_FRAME) { + get_plane_pointers(&cpi->common.yv12_fb[cpi->common.gld_fb_idx], + plane[GOLDEN_FRAME], recon_yoffset, recon_uvoffset); + } - if (cpi->ref_frame_flags & VP8_ALTR_FRAME) - get_plane_pointers(&cpi->common.yv12_fb[cpi->common.alt_fb_idx], - plane[ALTREF_FRAME], recon_yoffset, recon_uvoffset); + if (cpi->ref_frame_flags & VP8_ALTR_FRAME) { + get_plane_pointers(&cpi->common.yv12_fb[cpi->common.alt_fb_idx], + plane[ALTREF_FRAME], recon_yoffset, recon_uvoffset); + } } - static INLINE void get_reference_search_order(const VP8_COMP *cpi, - int ref_frame_map[4]) -{ - int i=0; + int ref_frame_map[4]) { + int i = 0; - ref_frame_map[i++] = INTRA_FRAME; - if (cpi->ref_frame_flags & VP8_LAST_FRAME) - ref_frame_map[i++] = LAST_FRAME; - if (cpi->ref_frame_flags & VP8_GOLD_FRAME) - ref_frame_map[i++] = GOLDEN_FRAME; - if (cpi->ref_frame_flags & VP8_ALTR_FRAME) - ref_frame_map[i++] = ALTREF_FRAME; - for(; i<4; i++) - ref_frame_map[i] = -1; + ref_frame_map[i++] = INTRA_FRAME; + if (cpi->ref_frame_flags & VP8_LAST_FRAME) ref_frame_map[i++] = LAST_FRAME; + if (cpi->ref_frame_flags & VP8_GOLD_FRAME) ref_frame_map[i++] = GOLDEN_FRAME; + if (cpi->ref_frame_flags & VP8_ALTR_FRAME) ref_frame_map[i++] = ALTREF_FRAME; + for (; i < 4; ++i) ref_frame_map[i] = -1; } - -extern void vp8_mv_pred -( - VP8_COMP *cpi, - MACROBLOCKD *xd, - const MODE_INFO *here, - int_mv *mvp, - int refframe, - int *ref_frame_sign_bias, - int *sr, - int near_sadidx[] -); -void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[]); +extern void vp8_mv_pred(VP8_COMP *cpi, MACROBLOCKD *xd, const MODE_INFO *here, + int_mv *mvp, int refframe, int *ref_frame_sign_bias, + int *sr, int near_sadidx[]); +void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, + int recon_yoffset, int near_sadidx[]); int VP8_UVSSE(MACROBLOCK *x); int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4]); void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv); diff --git a/libs/libvpx/vp8/encoder/segmentation.c b/libs/libvpx/vp8/encoder/segmentation.c index fdd22fceb6..dcb68119e1 100644 --- a/libs/libvpx/vp8/encoder/segmentation.c +++ b/libs/libvpx/vp8/encoder/segmentation.c @@ -8,59 +8,48 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "segmentation.h" #include "vpx_mem/vpx_mem.h" -void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) -{ - int mb_row, mb_col; +void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x) { + int mb_row, mb_col; - MODE_INFO *this_mb_mode_info = cm->mi; + MODE_INFO *this_mb_mode_info = cm->mi; - x->gf_active_ptr = (signed char *)cpi->gf_active_flags; + x->gf_active_ptr = (signed char *)cpi->gf_active_flags; - if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame)) - { - /* Reset Gf useage monitors */ - memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); - cpi->gf_active_count = cm->mb_rows * cm->mb_cols; - } - else - { - /* for each macroblock row in image */ - for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) - { - /* for each macroblock col in image */ - for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) - { - - /* If using golden then set GF active flag if not already set. - * If using last frame 0,0 mode then leave flag as it is - * else if using non 0,0 motion or intra modes then clear - * flag if it is currently set - */ - if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) || (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME)) - { - if (*(x->gf_active_ptr) == 0) - { - *(x->gf_active_ptr) = 1; - cpi->gf_active_count ++; - } - } - else if ((this_mb_mode_info->mbmi.mode != ZEROMV) && *(x->gf_active_ptr)) - { - *(x->gf_active_ptr) = 0; - cpi->gf_active_count--; - } - - x->gf_active_ptr++; /* Step onto next entry */ - this_mb_mode_info++; /* skip to next mb */ - - } - - /* this is to account for the border */ - this_mb_mode_info++; + if ((cm->frame_type == KEY_FRAME) || (cm->refresh_golden_frame)) { + /* Reset Gf useage monitors */ + memset(cpi->gf_active_flags, 1, (cm->mb_rows * cm->mb_cols)); + cpi->gf_active_count = cm->mb_rows * cm->mb_cols; + } else { + /* for each macroblock row in image */ + for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { + /* for each macroblock col in image */ + for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { + /* If using golden then set GF active flag if not already set. + * If using last frame 0,0 mode then leave flag as it is + * else if using non 0,0 motion or intra modes then clear + * flag if it is currently set + */ + if ((this_mb_mode_info->mbmi.ref_frame == GOLDEN_FRAME) || + (this_mb_mode_info->mbmi.ref_frame == ALTREF_FRAME)) { + if (*(x->gf_active_ptr) == 0) { + *(x->gf_active_ptr) = 1; + cpi->gf_active_count++; + } + } else if ((this_mb_mode_info->mbmi.mode != ZEROMV) && + *(x->gf_active_ptr)) { + *(x->gf_active_ptr) = 0; + cpi->gf_active_count--; } + + x->gf_active_ptr++; /* Step onto next entry */ + this_mb_mode_info++; /* skip to next mb */ + } + + /* this is to account for the border */ + this_mb_mode_info++; } + } } diff --git a/libs/libvpx/vp8/encoder/segmentation.h b/libs/libvpx/vp8/encoder/segmentation.h index 6b5500594e..1395a34118 100644 --- a/libs/libvpx/vp8/encoder/segmentation.h +++ b/libs/libvpx/vp8/encoder/segmentation.h @@ -19,7 +19,8 @@ extern "C" { #endif -extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, MACROBLOCK *x); +extern void vp8_update_gf_useage_maps(VP8_COMP *cpi, VP8_COMMON *cm, + MACROBLOCK *x); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/temporal_filter.c b/libs/libvpx/vp8/encoder/temporal_filter.c index 85d26c20f4..1b2f46bb69 100644 --- a/libs/libvpx/vp8/encoder/temporal_filter.c +++ b/libs/libvpx/vp8/encoder/temporal_filter.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vp8/common/onyxc_int.h" #include "onyx_int.h" #include "vp8/common/systemdependent.h" @@ -29,493 +28,405 @@ #include #include -#define ALT_REF_MC_ENABLED 1 /* dis/enable MC in AltRef filtering */ -#define ALT_REF_SUBPEL_ENABLED 1 /* dis/enable subpel in MC AltRef filtering */ +#define ALT_REF_MC_ENABLED 1 /* toggle MC in AltRef filtering */ +#define ALT_REF_SUBPEL_ENABLED 1 /* toggle subpel in MC AltRef filtering */ #if VP8_TEMPORAL_ALT_REF -static void vp8_temporal_filter_predictors_mb_c -( - MACROBLOCKD *x, - unsigned char *y_mb_ptr, - unsigned char *u_mb_ptr, - unsigned char *v_mb_ptr, - int stride, - int mv_row, - int mv_col, - unsigned char *pred -) -{ - int offset; - unsigned char *yptr, *uptr, *vptr; +static void vp8_temporal_filter_predictors_mb_c( + MACROBLOCKD *x, unsigned char *y_mb_ptr, unsigned char *u_mb_ptr, + unsigned char *v_mb_ptr, int stride, int mv_row, int mv_col, + unsigned char *pred) { + int offset; + unsigned char *yptr, *uptr, *vptr; - /* Y */ - yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3); + /* Y */ + yptr = y_mb_ptr + (mv_row >> 3) * stride + (mv_col >> 3); - if ((mv_row | mv_col) & 7) - { - x->subpixel_predict16x16(yptr, stride, - mv_col & 7, mv_row & 7, &pred[0], 16); - } - else - { - vp8_copy_mem16x16(yptr, stride, &pred[0], 16); - } + if ((mv_row | mv_col) & 7) { + x->subpixel_predict16x16(yptr, stride, mv_col & 7, mv_row & 7, &pred[0], + 16); + } else { + vp8_copy_mem16x16(yptr, stride, &pred[0], 16); + } - /* U & V */ - mv_row >>= 1; - mv_col >>= 1; - stride = (stride + 1) >> 1; - offset = (mv_row >> 3) * stride + (mv_col >> 3); - uptr = u_mb_ptr + offset; - vptr = v_mb_ptr + offset; + /* U & V */ + mv_row >>= 1; + mv_col >>= 1; + stride = (stride + 1) >> 1; + offset = (mv_row >> 3) * stride + (mv_col >> 3); + uptr = u_mb_ptr + offset; + vptr = v_mb_ptr + offset; - if ((mv_row | mv_col) & 7) - { - x->subpixel_predict8x8(uptr, stride, - mv_col & 7, mv_row & 7, &pred[256], 8); - x->subpixel_predict8x8(vptr, stride, - mv_col & 7, mv_row & 7, &pred[320], 8); - } - else - { - vp8_copy_mem8x8(uptr, stride, &pred[256], 8); - vp8_copy_mem8x8(vptr, stride, &pred[320], 8); - } + if ((mv_row | mv_col) & 7) { + x->subpixel_predict8x8(uptr, stride, mv_col & 7, mv_row & 7, &pred[256], 8); + x->subpixel_predict8x8(vptr, stride, mv_col & 7, mv_row & 7, &pred[320], 8); + } else { + vp8_copy_mem8x8(uptr, stride, &pred[256], 8); + vp8_copy_mem8x8(vptr, stride, &pred[320], 8); + } } -void vp8_temporal_filter_apply_c -( - unsigned char *frame1, - unsigned int stride, - unsigned char *frame2, - unsigned int block_size, - int strength, - int filter_weight, - unsigned int *accumulator, - unsigned short *count -) -{ - unsigned int i, j, k; - int modifier; - int byte = 0; - const int rounding = strength > 0 ? 1 << (strength - 1) : 0; +void vp8_temporal_filter_apply_c(unsigned char *frame1, unsigned int stride, + unsigned char *frame2, unsigned int block_size, + int strength, int filter_weight, + unsigned int *accumulator, + unsigned short *count) { + unsigned int i, j, k; + int modifier; + int byte = 0; + const int rounding = strength > 0 ? 1 << (strength - 1) : 0; - for (i = 0,k = 0; i < block_size; i++) - { - for (j = 0; j < block_size; j++, k++) - { + for (i = 0, k = 0; i < block_size; ++i) { + for (j = 0; j < block_size; j++, k++) { + int src_byte = frame1[byte]; + int pixel_value = *frame2++; - int src_byte = frame1[byte]; - int pixel_value = *frame2++; + modifier = src_byte - pixel_value; + /* This is an integer approximation of: + * float coeff = (3.0 * modifer * modifier) / pow(2, strength); + * modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); + */ + modifier *= modifier; + modifier *= 3; + modifier += rounding; + modifier >>= strength; - modifier = src_byte - pixel_value; - /* This is an integer approximation of: - * float coeff = (3.0 * modifer * modifier) / pow(2, strength); - * modifier = (int)roundf(coeff > 16 ? 0 : 16-coeff); - */ - modifier *= modifier; - modifier *= 3; - modifier += rounding; - modifier >>= strength; + if (modifier > 16) modifier = 16; - if (modifier > 16) - modifier = 16; + modifier = 16 - modifier; + modifier *= filter_weight; - modifier = 16 - modifier; - modifier *= filter_weight; + count[k] += modifier; + accumulator[k] += modifier * pixel_value; - count[k] += modifier; - accumulator[k] += modifier * pixel_value; - - byte++; - } - - byte += stride - block_size; + byte++; } + + byte += stride - block_size; + } } #if ALT_REF_MC_ENABLED -static int vp8_temporal_filter_find_matching_mb_c -( - VP8_COMP *cpi, - YV12_BUFFER_CONFIG *arf_frame, - YV12_BUFFER_CONFIG *frame_ptr, - int mb_offset, - int error_thresh -) -{ - MACROBLOCK *x = &cpi->mb; - int step_param; - int sadpb = x->sadperbit16; - int bestsme = INT_MAX; +static int vp8_temporal_filter_find_matching_mb_c(VP8_COMP *cpi, + YV12_BUFFER_CONFIG *arf_frame, + YV12_BUFFER_CONFIG *frame_ptr, + int mb_offset, + int error_thresh) { + MACROBLOCK *x = &cpi->mb; + int step_param; + int sadpb = x->sadperbit16; + int bestsme = INT_MAX; - BLOCK *b = &x->block[0]; - BLOCKD *d = &x->e_mbd.block[0]; - int_mv best_ref_mv1; - int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ + BLOCK *b = &x->block[0]; + BLOCKD *d = &x->e_mbd.block[0]; + int_mv best_ref_mv1; + int_mv best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ - /* Save input state */ - unsigned char **base_src = b->base_src; - int src = b->src; - int src_stride = b->src_stride; - unsigned char *base_pre = x->e_mbd.pre.y_buffer; - int pre = d->offset; - int pre_stride = x->e_mbd.pre.y_stride; + /* Save input state */ + unsigned char **base_src = b->base_src; + int src = b->src; + int src_stride = b->src_stride; + unsigned char *base_pre = x->e_mbd.pre.y_buffer; + int pre = d->offset; + int pre_stride = x->e_mbd.pre.y_stride; - (void)error_thresh; + (void)error_thresh; - best_ref_mv1.as_int = 0; - best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >>3; - best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >>3; + best_ref_mv1.as_int = 0; + best_ref_mv1_full.as_mv.col = best_ref_mv1.as_mv.col >> 3; + best_ref_mv1_full.as_mv.row = best_ref_mv1.as_mv.row >> 3; - /* Setup frame pointers */ - b->base_src = &arf_frame->y_buffer; - b->src_stride = arf_frame->y_stride; - b->src = mb_offset; + /* Setup frame pointers */ + b->base_src = &arf_frame->y_buffer; + b->src_stride = arf_frame->y_stride; + b->src = mb_offset; - x->e_mbd.pre.y_buffer = frame_ptr->y_buffer; - x->e_mbd.pre.y_stride = frame_ptr->y_stride; - d->offset = mb_offset; + x->e_mbd.pre.y_buffer = frame_ptr->y_buffer; + x->e_mbd.pre.y_stride = frame_ptr->y_stride; + d->offset = mb_offset; - /* Further step/diamond searches as necessary */ - if (cpi->Speed < 8) - { - step_param = cpi->sf.first_step + (cpi->Speed > 5); - } - else - { - step_param = cpi->sf.first_step + 2; - } + /* Further step/diamond searches as necessary */ + if (cpi->Speed < 8) { + step_param = cpi->sf.first_step + (cpi->Speed > 5); + } else { + step_param = cpi->sf.first_step + 2; + } - /* TODO Check that the 16x16 vf & sdf are selected here */ - /* Ignore mv costing by sending NULL cost arrays */ - bestsme = vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv, - step_param, sadpb, - &cpi->fn_ptr[BLOCK_16X16], - NULL, NULL, &best_ref_mv1); + /* TODO Check that the 16x16 vf & sdf are selected here */ + /* Ignore mv costing by sending NULL cost arrays */ + bestsme = + vp8_hex_search(x, b, d, &best_ref_mv1_full, &d->bmi.mv, step_param, sadpb, + &cpi->fn_ptr[BLOCK_16X16], NULL, NULL, &best_ref_mv1); #if ALT_REF_SUBPEL_ENABLED - /* Try sub-pixel MC? */ - { - int distortion; - unsigned int sse; - /* Ignore mv costing by sending NULL cost array */ - bestsme = cpi->find_fractional_mv_step(x, b, d, - &d->bmi.mv, - &best_ref_mv1, - x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - NULL, &distortion, &sse); - } + /* Try sub-pixel MC? */ + { + int distortion; + unsigned int sse; + /* Ignore mv costing by sending NULL cost array */ + bestsme = cpi->find_fractional_mv_step( + x, b, d, &d->bmi.mv, &best_ref_mv1, x->errorperbit, + &cpi->fn_ptr[BLOCK_16X16], NULL, &distortion, &sse); + } #endif - /* Save input state */ - b->base_src = base_src; - b->src = src; - b->src_stride = src_stride; - x->e_mbd.pre.y_buffer = base_pre; - d->offset = pre; - x->e_mbd.pre.y_stride = pre_stride; + /* Save input state */ + b->base_src = base_src; + b->src = src; + b->src_stride = src_stride; + x->e_mbd.pre.y_buffer = base_pre; + d->offset = pre; + x->e_mbd.pre.y_stride = pre_stride; - return bestsme; + return bestsme; } #endif -static void vp8_temporal_filter_iterate_c -( - VP8_COMP *cpi, - int frame_count, - int alt_ref_index, - int strength -) -{ - int byte; - int frame; - int mb_col, mb_row; - unsigned int filter_weight; - int mb_cols = cpi->common.mb_cols; - int mb_rows = cpi->common.mb_rows; - int mb_y_offset = 0; - int mb_uv_offset = 0; - DECLARE_ALIGNED(16, unsigned int, accumulator[16*16 + 8*8 + 8*8]); - DECLARE_ALIGNED(16, unsigned short, count[16*16 + 8*8 + 8*8]); - MACROBLOCKD *mbd = &cpi->mb.e_mbd; - YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; - unsigned char *dst1, *dst2; - DECLARE_ALIGNED(16, unsigned char, predictor[16*16 + 8*8 + 8*8]); +static void vp8_temporal_filter_iterate_c(VP8_COMP *cpi, int frame_count, + int alt_ref_index, int strength) { + int byte; + int frame; + int mb_col, mb_row; + unsigned int filter_weight; + int mb_cols = cpi->common.mb_cols; + int mb_rows = cpi->common.mb_rows; + int mb_y_offset = 0; + int mb_uv_offset = 0; + DECLARE_ALIGNED(16, unsigned int, accumulator[16 * 16 + 8 * 8 + 8 * 8]); + DECLARE_ALIGNED(16, unsigned short, count[16 * 16 + 8 * 8 + 8 * 8]); + MACROBLOCKD *mbd = &cpi->mb.e_mbd; + YV12_BUFFER_CONFIG *f = cpi->frames[alt_ref_index]; + unsigned char *dst1, *dst2; + DECLARE_ALIGNED(16, unsigned char, predictor[16 * 16 + 8 * 8 + 8 * 8]); - /* Save input state */ - unsigned char *y_buffer = mbd->pre.y_buffer; - unsigned char *u_buffer = mbd->pre.u_buffer; - unsigned char *v_buffer = mbd->pre.v_buffer; + /* Save input state */ + unsigned char *y_buffer = mbd->pre.y_buffer; + unsigned char *u_buffer = mbd->pre.u_buffer; + unsigned char *v_buffer = mbd->pre.v_buffer; - for (mb_row = 0; mb_row < mb_rows; mb_row++) - { + for (mb_row = 0; mb_row < mb_rows; ++mb_row) { #if ALT_REF_MC_ENABLED - /* Source frames are extended to 16 pixels. This is different than - * L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS) - * A 6 tap filter is used for motion search. This requires 2 pixels - * before and 3 pixels after. So the largest Y mv on a border would - * then be 16 - 3. The UV blocks are half the size of the Y and - * therefore only extended by 8. The largest mv that a UV block - * can support is 8 - 3. A UV mv is half of a Y mv. - * (16 - 3) >> 1 == 6 which is greater than 8 - 3. - * To keep the mv in play for both Y and UV planes the max that it - * can be on a border is therefore 16 - 5. - */ - cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5)); - cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16) - + (16 - 5); + /* Source frames are extended to 16 pixels. This is different than + * L/A/G reference frames that have a border of 32 (VP8BORDERINPIXELS) + * A 6 tap filter is used for motion search. This requires 2 pixels + * before and 3 pixels after. So the largest Y mv on a border would + * then be 16 - 3. The UV blocks are half the size of the Y and + * therefore only extended by 8. The largest mv that a UV block + * can support is 8 - 3. A UV mv is half of a Y mv. + * (16 - 3) >> 1 == 6 which is greater than 8 - 3. + * To keep the mv in play for both Y and UV planes the max that it + * can be on a border is therefore 16 - 5. + */ + cpi->mb.mv_row_min = -((mb_row * 16) + (16 - 5)); + cpi->mb.mv_row_max = ((cpi->common.mb_rows - 1 - mb_row) * 16) + (16 - 5); #endif - for (mb_col = 0; mb_col < mb_cols; mb_col++) - { - int i, j, k; - int stride; + for (mb_col = 0; mb_col < mb_cols; ++mb_col) { + int i, j, k; + int stride; - memset(accumulator, 0, 384*sizeof(unsigned int)); - memset(count, 0, 384*sizeof(unsigned short)); + memset(accumulator, 0, 384 * sizeof(unsigned int)); + memset(count, 0, 384 * sizeof(unsigned short)); #if ALT_REF_MC_ENABLED - cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5)); - cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) - + (16 - 5); + cpi->mb.mv_col_min = -((mb_col * 16) + (16 - 5)); + cpi->mb.mv_col_max = ((cpi->common.mb_cols - 1 - mb_col) * 16) + (16 - 5); #endif - for (frame = 0; frame < frame_count; frame++) - { - if (cpi->frames[frame] == NULL) - continue; + for (frame = 0; frame < frame_count; ++frame) { + if (cpi->frames[frame] == NULL) continue; - mbd->block[0].bmi.mv.as_mv.row = 0; - mbd->block[0].bmi.mv.as_mv.col = 0; + mbd->block[0].bmi.mv.as_mv.row = 0; + mbd->block[0].bmi.mv.as_mv.col = 0; - if (frame == alt_ref_index) - { - filter_weight = 2; - } - else - { - int err = 0; + if (frame == alt_ref_index) { + filter_weight = 2; + } else { + int err = 0; #if ALT_REF_MC_ENABLED -#define THRESH_LOW 10000 -#define THRESH_HIGH 20000 - /* Find best match in this frame by MC */ - err = vp8_temporal_filter_find_matching_mb_c - (cpi, - cpi->frames[alt_ref_index], - cpi->frames[frame], - mb_y_offset, - THRESH_LOW); +#define THRESH_LOW 10000 +#define THRESH_HIGH 20000 + /* Find best match in this frame by MC */ + err = vp8_temporal_filter_find_matching_mb_c( + cpi, cpi->frames[alt_ref_index], cpi->frames[frame], mb_y_offset, + THRESH_LOW); #endif - /* Assign higher weight to matching MB if it's error - * score is lower. If not applying MC default behavior - * is to weight all MBs equal. - */ - filter_weight = errframes[frame]->y_buffer + mb_y_offset, - cpi->frames[frame]->u_buffer + mb_uv_offset, - cpi->frames[frame]->v_buffer + mb_uv_offset, - cpi->frames[frame]->y_stride, - mbd->block[0].bmi.mv.as_mv.row, - mbd->block[0].bmi.mv.as_mv.col, - predictor); - - /* Apply the filter (YUV) */ - vp8_temporal_filter_apply - (f->y_buffer + mb_y_offset, - f->y_stride, - predictor, - 16, - strength, - filter_weight, - accumulator, - count); - - vp8_temporal_filter_apply - (f->u_buffer + mb_uv_offset, - f->uv_stride, - predictor + 256, - 8, - strength, - filter_weight, - accumulator + 256, - count + 256); - - vp8_temporal_filter_apply - (f->v_buffer + mb_uv_offset, - f->uv_stride, - predictor + 320, - 8, - strength, - filter_weight, - accumulator + 320, - count + 320); - } - } - - /* Normalize filter output to produce AltRef frame */ - dst1 = cpi->alt_ref_buffer.y_buffer; - stride = cpi->alt_ref_buffer.y_stride; - byte = mb_y_offset; - for (i = 0,k = 0; i < 16; i++) - { - for (j = 0; j < 16; j++, k++) - { - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= cpi->fixed_divide[count[k]]; - pval >>= 19; - - dst1[byte] = (unsigned char)pval; - - /* move to next pixel */ - byte++; - } - - byte += stride - 16; - } - - dst1 = cpi->alt_ref_buffer.u_buffer; - dst2 = cpi->alt_ref_buffer.v_buffer; - stride = cpi->alt_ref_buffer.uv_stride; - byte = mb_uv_offset; - for (i = 0,k = 256; i < 8; i++) - { - for (j = 0; j < 8; j++, k++) - { - int m=k+64; - - /* U */ - unsigned int pval = accumulator[k] + (count[k] >> 1); - pval *= cpi->fixed_divide[count[k]]; - pval >>= 19; - dst1[byte] = (unsigned char)pval; - - /* V */ - pval = accumulator[m] + (count[m] >> 1); - pval *= cpi->fixed_divide[count[m]]; - pval >>= 19; - dst2[byte] = (unsigned char)pval; - - /* move to next pixel */ - byte++; - } - - byte += stride - 8; - } - - mb_y_offset += 16; - mb_uv_offset += 8; + /* Assign higher weight to matching MB if it's error + * score is lower. If not applying MC default behavior + * is to weight all MBs equal. + */ + filter_weight = err < THRESH_LOW ? 2 : err < THRESH_HIGH ? 1 : 0; } - mb_y_offset += 16*(f->y_stride-mb_cols); - mb_uv_offset += 8*(f->uv_stride-mb_cols); + if (filter_weight != 0) { + /* Construct the predictors */ + vp8_temporal_filter_predictors_mb_c( + mbd, cpi->frames[frame]->y_buffer + mb_y_offset, + cpi->frames[frame]->u_buffer + mb_uv_offset, + cpi->frames[frame]->v_buffer + mb_uv_offset, + cpi->frames[frame]->y_stride, mbd->block[0].bmi.mv.as_mv.row, + mbd->block[0].bmi.mv.as_mv.col, predictor); + + /* Apply the filter (YUV) */ + vp8_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride, + predictor, 16, strength, filter_weight, + accumulator, count); + + vp8_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride, + predictor + 256, 8, strength, filter_weight, + accumulator + 256, count + 256); + + vp8_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride, + predictor + 320, 8, strength, filter_weight, + accumulator + 320, count + 320); + } + } + + /* Normalize filter output to produce AltRef frame */ + dst1 = cpi->alt_ref_buffer.y_buffer; + stride = cpi->alt_ref_buffer.y_stride; + byte = mb_y_offset; + for (i = 0, k = 0; i < 16; ++i) { + for (j = 0; j < 16; j++, k++) { + unsigned int pval = accumulator[k] + (count[k] >> 1); + pval *= cpi->fixed_divide[count[k]]; + pval >>= 19; + + dst1[byte] = (unsigned char)pval; + + /* move to next pixel */ + byte++; + } + + byte += stride - 16; + } + + dst1 = cpi->alt_ref_buffer.u_buffer; + dst2 = cpi->alt_ref_buffer.v_buffer; + stride = cpi->alt_ref_buffer.uv_stride; + byte = mb_uv_offset; + for (i = 0, k = 256; i < 8; ++i) { + for (j = 0; j < 8; j++, k++) { + int m = k + 64; + + /* U */ + unsigned int pval = accumulator[k] + (count[k] >> 1); + pval *= cpi->fixed_divide[count[k]]; + pval >>= 19; + dst1[byte] = (unsigned char)pval; + + /* V */ + pval = accumulator[m] + (count[m] >> 1); + pval *= cpi->fixed_divide[count[m]]; + pval >>= 19; + dst2[byte] = (unsigned char)pval; + + /* move to next pixel */ + byte++; + } + + byte += stride - 8; + } + + mb_y_offset += 16; + mb_uv_offset += 8; } - /* Restore input state */ - mbd->pre.y_buffer = y_buffer; - mbd->pre.u_buffer = u_buffer; - mbd->pre.v_buffer = v_buffer; + mb_y_offset += 16 * (f->y_stride - mb_cols); + mb_uv_offset += 8 * (f->uv_stride - mb_cols); + } + + /* Restore input state */ + mbd->pre.y_buffer = y_buffer; + mbd->pre.u_buffer = u_buffer; + mbd->pre.v_buffer = v_buffer; } -void vp8_temporal_filter_prepare_c -( - VP8_COMP *cpi, - int distance -) -{ - int frame = 0; +void vp8_temporal_filter_prepare_c(VP8_COMP *cpi, int distance) { + int frame = 0; - int num_frames_backward = 0; - int num_frames_forward = 0; - int frames_to_blur_backward = 0; - int frames_to_blur_forward = 0; - int frames_to_blur = 0; - int start_frame = 0; + int num_frames_backward = 0; + int num_frames_forward = 0; + int frames_to_blur_backward = 0; + int frames_to_blur_forward = 0; + int frames_to_blur = 0; + int start_frame = 0; - int strength = cpi->oxcf.arnr_strength; + int strength = cpi->oxcf.arnr_strength; - int blur_type = cpi->oxcf.arnr_type; + int blur_type = cpi->oxcf.arnr_type; - int max_frames = cpi->active_arnr_frames; + int max_frames = cpi->active_arnr_frames; - num_frames_backward = distance; - num_frames_forward = vp8_lookahead_depth(cpi->lookahead) - - (num_frames_backward + 1); + num_frames_backward = distance; + num_frames_forward = + vp8_lookahead_depth(cpi->lookahead) - (num_frames_backward + 1); - switch (blur_type) - { + switch (blur_type) { case 1: - /* Backward Blur */ + /* Backward Blur */ - frames_to_blur_backward = num_frames_backward; + frames_to_blur_backward = num_frames_backward; - if (frames_to_blur_backward >= max_frames) - frames_to_blur_backward = max_frames - 1; + if (frames_to_blur_backward >= max_frames) { + frames_to_blur_backward = max_frames - 1; + } - frames_to_blur = frames_to_blur_backward + 1; - break; + frames_to_blur = frames_to_blur_backward + 1; + break; case 2: - /* Forward Blur */ + /* Forward Blur */ - frames_to_blur_forward = num_frames_forward; + frames_to_blur_forward = num_frames_forward; - if (frames_to_blur_forward >= max_frames) - frames_to_blur_forward = max_frames - 1; + if (frames_to_blur_forward >= max_frames) { + frames_to_blur_forward = max_frames - 1; + } - frames_to_blur = frames_to_blur_forward + 1; - break; + frames_to_blur = frames_to_blur_forward + 1; + break; case 3: default: - /* Center Blur */ - frames_to_blur_forward = num_frames_forward; - frames_to_blur_backward = num_frames_backward; + /* Center Blur */ + frames_to_blur_forward = num_frames_forward; + frames_to_blur_backward = num_frames_backward; - if (frames_to_blur_forward > frames_to_blur_backward) - frames_to_blur_forward = frames_to_blur_backward; + if (frames_to_blur_forward > frames_to_blur_backward) { + frames_to_blur_forward = frames_to_blur_backward; + } - if (frames_to_blur_backward > frames_to_blur_forward) - frames_to_blur_backward = frames_to_blur_forward; + if (frames_to_blur_backward > frames_to_blur_forward) { + frames_to_blur_backward = frames_to_blur_forward; + } - /* When max_frames is even we have 1 more frame backward than forward */ - if (frames_to_blur_forward > (max_frames - 1) / 2) - frames_to_blur_forward = ((max_frames - 1) / 2); + /* When max_frames is even we have 1 more frame backward than forward */ + if (frames_to_blur_forward > (max_frames - 1) / 2) { + frames_to_blur_forward = ((max_frames - 1) / 2); + } - if (frames_to_blur_backward > (max_frames / 2)) - frames_to_blur_backward = (max_frames / 2); + if (frames_to_blur_backward > (max_frames / 2)) { + frames_to_blur_backward = (max_frames / 2); + } - frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1; - break; - } + frames_to_blur = frames_to_blur_backward + frames_to_blur_forward + 1; + break; + } - start_frame = distance + frames_to_blur_forward; + start_frame = distance + frames_to_blur_forward; - /* Setup frame pointers, NULL indicates frame not included in filter */ - memset(cpi->frames, 0, max_frames*sizeof(YV12_BUFFER_CONFIG *)); - for (frame = 0; frame < frames_to_blur; frame++) - { - int which_buffer = start_frame - frame; - struct lookahead_entry* buf = vp8_lookahead_peek(cpi->lookahead, - which_buffer, - PEEK_FORWARD); - cpi->frames[frames_to_blur-1-frame] = &buf->img; - } + /* Setup frame pointers, NULL indicates frame not included in filter */ + memset(cpi->frames, 0, max_frames * sizeof(YV12_BUFFER_CONFIG *)); + for (frame = 0; frame < frames_to_blur; ++frame) { + int which_buffer = start_frame - frame; + struct lookahead_entry *buf = + vp8_lookahead_peek(cpi->lookahead, which_buffer, PEEK_FORWARD); + cpi->frames[frames_to_blur - 1 - frame] = &buf->img; + } - vp8_temporal_filter_iterate_c ( - cpi, - frames_to_blur, - frames_to_blur_backward, - strength ); + vp8_temporal_filter_iterate_c(cpi, frames_to_blur, frames_to_blur_backward, + strength); } #endif diff --git a/libs/libvpx/vp8/encoder/tokenize.c b/libs/libvpx/vp8/encoder/tokenize.c index afd46fb219..ca5f0e3d89 100644 --- a/libs/libvpx/vp8/encoder/tokenize.c +++ b/libs/libvpx/vp8/encoder/tokenize.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include #include @@ -21,16 +20,17 @@ compressions, then generating context.c = initial stats. */ #ifdef VP8_ENTROPY_STATS -_int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; +_int64 context_counters[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [MAX_ENTROPY_TOKENS]; #endif -void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) ; +void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t); void vp8_fix_contexts(MACROBLOCKD *x); #include "dct_value_tokens.h" #include "dct_value_cost.h" -const TOKENVALUE *const vp8_dct_value_tokens_ptr = dct_value_tokens + - DCT_MAX_VALUE; +const TOKENVALUE *const vp8_dct_value_tokens_ptr = + dct_value_tokens + DCT_MAX_VALUE; const short *const vp8_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE; #if 0 @@ -73,7 +73,8 @@ static void fill_value_tokens() t[i].Extra = eb; } - // initialize the cost for extra bits for all possible coefficient value. + // initialize the cost for extra bits for all possible coefficient +value. { int cost = 0; const vp8_extra_bit_struct *p = vp8_extra_bits + t[i].Token; @@ -84,7 +85,8 @@ static void fill_value_tokens() const int Length = p->Len; if (Length) - cost += vp8_treed_cost(p->tree, p->prob, extra >> 1, Length); + cost += vp8_treed_cost(p->tree, p->prob, extra >> 1, +Length); cost += vp8_cost_bit(vp8_prob_half, extra & 1); // sign dct_value_cost[i + DCT_MAX_VALUE] = cost; @@ -100,509 +102,437 @@ static void fill_value_tokens() } */ -static void tokenize2nd_order_b -( - MACROBLOCK *x, - TOKENEXTRA **tp, - VP8_COMP *cpi -) -{ - MACROBLOCKD *xd = &x->e_mbd; - int pt; /* near block/prev token context index */ - int c; /* start at DC */ - TOKENEXTRA *t = *tp;/* store tokens starting here */ - const BLOCKD *b; - const short *qcoeff_ptr; - ENTROPY_CONTEXT * a; - ENTROPY_CONTEXT * l; - int band, rc, v, token; - int eob; +static void tokenize2nd_order_b(MACROBLOCK *x, TOKENEXTRA **tp, VP8_COMP *cpi) { + MACROBLOCKD *xd = &x->e_mbd; + int pt; /* near block/prev token context index */ + int c; /* start at DC */ + TOKENEXTRA *t = *tp; /* store tokens starting here */ + const BLOCKD *b; + const short *qcoeff_ptr; + ENTROPY_CONTEXT *a; + ENTROPY_CONTEXT *l; + int band, rc, v, token; + int eob; - b = xd->block + 24; + b = xd->block + 24; + qcoeff_ptr = b->qcoeff; + a = (ENTROPY_CONTEXT *)xd->above_context + 8; + l = (ENTROPY_CONTEXT *)xd->left_context + 8; + eob = xd->eobs[24]; + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + + if (!eob) { + /* c = band for this case */ + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[1][0][pt]; + t->skip_eob_node = 0; + + ++x->coef_counts[1][0][pt][DCT_EOB_TOKEN]; + t++; + *tp = t; + *a = *l = 0; + return; + } + + v = qcoeff_ptr[0]; + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; + token = vp8_dct_value_tokens_ptr[v].Token; + t->Token = token; + + t->context_tree = cpi->common.fc.coef_probs[1][0][pt]; + t->skip_eob_node = 0; + ++x->coef_counts[1][0][pt][token]; + pt = vp8_prev_token_class[token]; + t++; + c = 1; + + for (; c < eob; ++c) { + rc = vp8_default_zig_zag1d[c]; + band = vp8_coef_bands[c]; + v = qcoeff_ptr[rc]; + + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; + token = vp8_dct_value_tokens_ptr[v].Token; + + t->Token = token; + t->context_tree = cpi->common.fc.coef_probs[1][band][pt]; + + t->skip_eob_node = ((pt == 0)); + + ++x->coef_counts[1][band][pt][token]; + + pt = vp8_prev_token_class[token]; + t++; + } + if (c < 16) { + band = vp8_coef_bands[c]; + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[1][band][pt]; + + t->skip_eob_node = 0; + + ++x->coef_counts[1][band][pt][DCT_EOB_TOKEN]; + + t++; + } + + *tp = t; + *a = *l = 1; +} + +static void tokenize1st_order_b( + MACROBLOCK *x, TOKENEXTRA **tp, + int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */ + VP8_COMP *cpi) { + MACROBLOCKD *xd = &x->e_mbd; + unsigned int block; + const BLOCKD *b; + int pt; /* near block/prev token context index */ + int c; + int token; + TOKENEXTRA *t = *tp; /* store tokens starting here */ + const short *qcoeff_ptr; + ENTROPY_CONTEXT *a; + ENTROPY_CONTEXT *l; + int band, rc, v; + int tmp1, tmp2; + + b = xd->block; + /* Luma */ + for (block = 0; block < 16; block++, b++) { + const int eob = *b->eob; + tmp1 = vp8_block2above[block]; + tmp2 = vp8_block2left[block]; qcoeff_ptr = b->qcoeff; - a = (ENTROPY_CONTEXT *)xd->above_context + 8; - l = (ENTROPY_CONTEXT *)xd->left_context + 8; - eob = xd->eobs[24]; + a = (ENTROPY_CONTEXT *)xd->above_context + tmp1; + l = (ENTROPY_CONTEXT *)xd->left_context + tmp2; + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - if(!eob) - { - /* c = band for this case */ - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt]; - t->skip_eob_node = 0; + c = type ? 0 : 1; - ++x->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN]; - t++; - *tp = t; - *a = *l = 0; - return; + if (c >= eob) { + /* c = band for this case */ + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[type][c][pt]; + t->skip_eob_node = 0; + + ++x->coef_counts[type][c][pt][DCT_EOB_TOKEN]; + t++; + *tp = t; + *a = *l = 0; + continue; + } + + v = qcoeff_ptr[c]; + + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; + token = vp8_dct_value_tokens_ptr[v].Token; + t->Token = token; + + t->context_tree = cpi->common.fc.coef_probs[type][c][pt]; + t->skip_eob_node = 0; + ++x->coef_counts[type][c][pt][token]; + pt = vp8_prev_token_class[token]; + t++; + c++; + + assert(eob <= 16); + for (; c < eob; ++c) { + rc = vp8_default_zig_zag1d[c]; + band = vp8_coef_bands[c]; + v = qcoeff_ptr[rc]; + + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; + token = vp8_dct_value_tokens_ptr[v].Token; + + t->Token = token; + t->context_tree = cpi->common.fc.coef_probs[type][band][pt]; + + t->skip_eob_node = (pt == 0); + ++x->coef_counts[type][band][pt][token]; + + pt = vp8_prev_token_class[token]; + t++; + } + if (c < 16) { + band = vp8_coef_bands[c]; + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[type][band][pt]; + + t->skip_eob_node = 0; + ++x->coef_counts[type][band][pt][DCT_EOB_TOKEN]; + + t++; + } + *tp = t; + *a = *l = 1; + } + + /* Chroma */ + for (block = 16; block < 24; block++, b++) { + const int eob = *b->eob; + tmp1 = vp8_block2above[block]; + tmp2 = vp8_block2left[block]; + qcoeff_ptr = b->qcoeff; + a = (ENTROPY_CONTEXT *)xd->above_context + tmp1; + l = (ENTROPY_CONTEXT *)xd->left_context + tmp2; + + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + + if (!eob) { + /* c = band for this case */ + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[2][0][pt]; + t->skip_eob_node = 0; + + ++x->coef_counts[2][0][pt][DCT_EOB_TOKEN]; + t++; + *tp = t; + *a = *l = 0; + continue; } v = qcoeff_ptr[0]; + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; + token = vp8_dct_value_tokens_ptr[v].Token; t->Token = token; - t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt]; + t->context_tree = cpi->common.fc.coef_probs[2][0][pt]; t->skip_eob_node = 0; - ++x->coef_counts [1] [0] [pt] [token]; + ++x->coef_counts[2][0][pt][token]; pt = vp8_prev_token_class[token]; t++; c = 1; - for (; c < eob; c++) - { - rc = vp8_default_zig_zag1d[c]; - band = vp8_coef_bands[c]; - v = qcoeff_ptr[rc]; + assert(eob <= 16); + for (; c < eob; ++c) { + rc = vp8_default_zig_zag1d[c]; + band = vp8_coef_bands[c]; + v = qcoeff_ptr[rc]; - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; + t->Extra = vp8_dct_value_tokens_ptr[v].Extra; + token = vp8_dct_value_tokens_ptr[v].Token; - t->Token = token; - t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt]; + t->Token = token; + t->context_tree = cpi->common.fc.coef_probs[2][band][pt]; - t->skip_eob_node = ((pt == 0)); + t->skip_eob_node = (pt == 0); - ++x->coef_counts [1] [band] [pt] [token]; + ++x->coef_counts[2][band][pt][token]; - pt = vp8_prev_token_class[token]; - t++; + pt = vp8_prev_token_class[token]; + t++; } - if (c < 16) - { - band = vp8_coef_bands[c]; - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [1] [band] [pt]; + if (c < 16) { + band = vp8_coef_bands[c]; + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[2][band][pt]; - t->skip_eob_node = 0; + t->skip_eob_node = 0; - ++x->coef_counts [1] [band] [pt] [DCT_EOB_TOKEN]; + ++x->coef_counts[2][band][pt][DCT_EOB_TOKEN]; - t++; + t++; } - *tp = t; *a = *l = 1; - + } } -static void tokenize1st_order_b -( - MACROBLOCK *x, - TOKENEXTRA **tp, - int type, /* which plane: 0=Y no DC, 1=Y2, 2=UV, 3=Y with DC */ - VP8_COMP *cpi -) -{ - MACROBLOCKD *xd = &x->e_mbd; - unsigned int block; - const BLOCKD *b; - int pt; /* near block/prev token context index */ - int c; - int token; - TOKENEXTRA *t = *tp;/* store tokens starting here */ - const short *qcoeff_ptr; - ENTROPY_CONTEXT * a; - ENTROPY_CONTEXT * l; - int band, rc, v; - int tmp1, tmp2; +static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) { + int skip = 1; + int i = 0; - b = xd->block; - /* Luma */ - for (block = 0; block < 16; block++, b++) - { - const int eob = *b->eob; - tmp1 = vp8_block2above[block]; - tmp2 = vp8_block2left[block]; - qcoeff_ptr = b->qcoeff; - a = (ENTROPY_CONTEXT *)xd->above_context + tmp1; - l = (ENTROPY_CONTEXT *)xd->left_context + tmp2; + if (has_y2_block) { + for (i = 0; i < 16; ++i) skip &= (x->eobs[i] < 2); + } - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + for (; i < 24 + has_y2_block; ++i) skip &= (!x->eobs[i]); - c = type ? 0 : 1; - - if(c >= eob) - { - /* c = band for this case */ - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [type] [c] [pt]; - t->skip_eob_node = 0; - - ++x->coef_counts [type] [c] [pt] [DCT_EOB_TOKEN]; - t++; - *tp = t; - *a = *l = 0; - continue; - } - - v = qcoeff_ptr[c]; - - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; - t->Token = token; - - t->context_tree = cpi->common.fc.coef_probs [type] [c] [pt]; - t->skip_eob_node = 0; - ++x->coef_counts [type] [c] [pt] [token]; - pt = vp8_prev_token_class[token]; - t++; - c++; - - assert(eob <= 16); - for (; c < eob; c++) - { - rc = vp8_default_zig_zag1d[c]; - band = vp8_coef_bands[c]; - v = qcoeff_ptr[rc]; - - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; - - t->Token = token; - t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt]; - - t->skip_eob_node = (pt == 0); - ++x->coef_counts [type] [band] [pt] [token]; - - pt = vp8_prev_token_class[token]; - t++; - } - if (c < 16) - { - band = vp8_coef_bands[c]; - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt]; - - t->skip_eob_node = 0; - ++x->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN]; - - t++; - } - *tp = t; - *a = *l = 1; - } - - /* Chroma */ - for (block = 16; block < 24; block++, b++) - { - const int eob = *b->eob; - tmp1 = vp8_block2above[block]; - tmp2 = vp8_block2left[block]; - qcoeff_ptr = b->qcoeff; - a = (ENTROPY_CONTEXT *)xd->above_context + tmp1; - l = (ENTROPY_CONTEXT *)xd->left_context + tmp2; - - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - - if(!eob) - { - /* c = band for this case */ - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt]; - t->skip_eob_node = 0; - - ++x->coef_counts [2] [0] [pt] [DCT_EOB_TOKEN]; - t++; - *tp = t; - *a = *l = 0; - continue; - } - - v = qcoeff_ptr[0]; - - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; - t->Token = token; - - t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt]; - t->skip_eob_node = 0; - ++x->coef_counts [2] [0] [pt] [token]; - pt = vp8_prev_token_class[token]; - t++; - c = 1; - - assert(eob <= 16); - for (; c < eob; c++) - { - rc = vp8_default_zig_zag1d[c]; - band = vp8_coef_bands[c]; - v = qcoeff_ptr[rc]; - - t->Extra = vp8_dct_value_tokens_ptr[v].Extra; - token = vp8_dct_value_tokens_ptr[v].Token; - - t->Token = token; - t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt]; - - t->skip_eob_node = (pt == 0); - - ++x->coef_counts [2] [band] [pt] [token]; - - pt = vp8_prev_token_class[token]; - t++; - } - if (c < 16) - { - band = vp8_coef_bands[c]; - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [2] [band] [pt]; - - t->skip_eob_node = 0; - - ++x->coef_counts [2] [band] [pt] [DCT_EOB_TOKEN]; - - t++; - } - *tp = t; - *a = *l = 1; - } + return skip; } +void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) { + MACROBLOCKD *xd = &x->e_mbd; + int plane_type; + int has_y2_block; -static int mb_is_skippable(MACROBLOCKD *x, int has_y2_block) -{ - int skip = 1; - int i = 0; + has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV); - if (has_y2_block) - { - for (i = 0; i < 16; i++) - skip &= (x->eobs[i] < 2); + xd->mode_info_context->mbmi.mb_skip_coeff = mb_is_skippable(xd, has_y2_block); + if (xd->mode_info_context->mbmi.mb_skip_coeff) { + if (!cpi->common.mb_no_coeff_skip) { + vp8_stuff_mb(cpi, x, t); + } else { + vp8_fix_contexts(xd); + x->skip_true_count++; } - for (; i < 24 + has_y2_block; i++) - skip &= (!x->eobs[i]); + return; + } - return skip; + plane_type = 3; + if (has_y2_block) { + tokenize2nd_order_b(x, t, cpi); + plane_type = 0; + } + + tokenize1st_order_b(x, t, plane_type, cpi); } - -void vp8_tokenize_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) -{ - MACROBLOCKD *xd = &x->e_mbd; - int plane_type; - int has_y2_block; - - has_y2_block = (xd->mode_info_context->mbmi.mode != B_PRED - && xd->mode_info_context->mbmi.mode != SPLITMV); - - xd->mode_info_context->mbmi.mb_skip_coeff = - mb_is_skippable(xd, has_y2_block); - if (xd->mode_info_context->mbmi.mb_skip_coeff) - { - if (!cpi->common.mb_no_coeff_skip) - { - vp8_stuff_mb(cpi, x, t); - } - else - { - vp8_fix_contexts(xd); - x->skip_true_count++; - } - - return; - } - - plane_type = 3; - if(has_y2_block) - { - tokenize2nd_order_b(x, t, cpi); - plane_type = 0; - } - - tokenize1st_order_b(x, t, plane_type, cpi); -} - - #ifdef VP8_ENTROPY_STATS -void init_context_counters(void) -{ - memset(context_counters, 0, sizeof(context_counters)); +void init_context_counters(void) { + memset(context_counters, 0, sizeof(context_counters)); } -void print_context_counters() -{ +void print_context_counters() { + int type, band, pt, t; - int type, band, pt, t; + FILE *const f = fopen("context.c", "w"); - FILE *const f = fopen("context.c", "w"); + fprintf(f, "#include \"entropy.h\"\n"); - fprintf(f, "#include \"entropy.h\"\n"); + fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n"); - fprintf(f, "\n/* *** GENERATED FILE: DO NOT EDIT *** */\n\n"); + fprintf(f, + "int Contexts[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] " + "[MAX_ENTROPY_TOKENS];\n\n"); - fprintf(f, "int Contexts[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];\n\n"); + fprintf(f, + "const int default_contexts[BLOCK_TYPES] [COEF_BANDS] " + "[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {"); - fprintf(f, "const int default_contexts[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = {"); +#define Comma(X) (X ? "," : "") -# define Comma( X) (X? ",":"") + type = 0; - type = 0; + do { + fprintf(f, "%s\n { /* block Type %d */", Comma(type), type); - do - { - fprintf(f, "%s\n { /* block Type %d */", Comma(type), type); + band = 0; - band = 0; + do { + fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band); - do - { - fprintf(f, "%s\n { /* Coeff Band %d */", Comma(band), band); + pt = 0; - pt = 0; + do { + fprintf(f, "%s\n {", Comma(pt)); - do - { - fprintf(f, "%s\n {", Comma(pt)); + t = 0; - t = 0; + do { + const _int64 x = context_counters[type][band][pt][t]; + const int y = (int)x; - do - { - const _int64 x = context_counters [type] [band] [pt] [t]; - const int y = (int) x; + assert(x == (_int64)y); /* no overflow handling yet */ + fprintf(f, "%s %d", Comma(t), y); - assert(x == (_int64) y); /* no overflow handling yet */ - fprintf(f, "%s %d", Comma(t), y); + } while (++t < MAX_ENTROPY_TOKENS); - } - while (++t < MAX_ENTROPY_TOKENS); + fprintf(f, "}"); + } while (++pt < PREV_COEF_CONTEXTS); - fprintf(f, "}"); - } - while (++pt < PREV_COEF_CONTEXTS); + fprintf(f, "\n }"); - fprintf(f, "\n }"); + } while (++band < COEF_BANDS); - } - while (++band < COEF_BANDS); + fprintf(f, "\n }"); + } while (++type < BLOCK_TYPES); - fprintf(f, "\n }"); - } - while (++type < BLOCK_TYPES); - - fprintf(f, "\n};\n"); - fclose(f); + fprintf(f, "\n};\n"); + fclose(f); } #endif +static void stuff2nd_order_b(TOKENEXTRA **tp, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l, VP8_COMP *cpi, MACROBLOCK *x) { + int pt; /* near block/prev token context index */ + TOKENEXTRA *t = *tp; /* store tokens starting here */ + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); -static void stuff2nd_order_b -( - TOKENEXTRA **tp, - ENTROPY_CONTEXT *a, - ENTROPY_CONTEXT *l, - VP8_COMP *cpi, - MACROBLOCK *x -) -{ - int pt; /* near block/prev token context index */ - TOKENEXTRA *t = *tp; /* store tokens starting here */ - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[1][0][pt]; + t->skip_eob_node = 0; + ++x->coef_counts[1][0][pt][DCT_EOB_TOKEN]; + ++t; - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [1] [0] [pt]; - t->skip_eob_node = 0; - ++x->coef_counts [1] [0] [pt] [DCT_EOB_TOKEN]; - ++t; - - *tp = t; - pt = 0; - *a = *l = pt; + *tp = t; + pt = 0; + *a = *l = pt; } -static void stuff1st_order_b -( - TOKENEXTRA **tp, - ENTROPY_CONTEXT *a, - ENTROPY_CONTEXT *l, - int type, - VP8_COMP *cpi, - MACROBLOCK *x -) -{ - int pt; /* near block/prev token context index */ - int band; - TOKENEXTRA *t = *tp; /* store tokens starting here */ - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - band = type ? 0 : 1; - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [type] [band] [pt]; - t->skip_eob_node = 0; - ++x->coef_counts [type] [band] [pt] [DCT_EOB_TOKEN]; - ++t; - *tp = t; - pt = 0; /* 0 <-> all coeff data is zero */ - *a = *l = pt; +static void stuff1st_order_b(TOKENEXTRA **tp, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l, int type, VP8_COMP *cpi, + MACROBLOCK *x) { + int pt; /* near block/prev token context index */ + int band; + TOKENEXTRA *t = *tp; /* store tokens starting here */ + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); + band = type ? 0 : 1; + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[type][band][pt]; + t->skip_eob_node = 0; + ++x->coef_counts[type][band][pt][DCT_EOB_TOKEN]; + ++t; + *tp = t; + pt = 0; /* 0 <-> all coeff data is zero */ + *a = *l = pt; } -static -void stuff1st_order_buv -( - TOKENEXTRA **tp, - ENTROPY_CONTEXT *a, - ENTROPY_CONTEXT *l, - VP8_COMP *cpi, - MACROBLOCK *x -) -{ - int pt; /* near block/prev token context index */ - TOKENEXTRA *t = *tp; /* store tokens starting here */ - VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); +static void stuff1st_order_buv(TOKENEXTRA **tp, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l, VP8_COMP *cpi, + MACROBLOCK *x) { + int pt; /* near block/prev token context index */ + TOKENEXTRA *t = *tp; /* store tokens starting here */ + VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l); - t->Token = DCT_EOB_TOKEN; - t->context_tree = cpi->common.fc.coef_probs [2] [0] [pt]; - t->skip_eob_node = 0; - ++x->coef_counts[2] [0] [pt] [DCT_EOB_TOKEN]; - ++t; - *tp = t; - pt = 0; /* 0 <-> all coeff data is zero */ - *a = *l = pt; + t->Token = DCT_EOB_TOKEN; + t->context_tree = cpi->common.fc.coef_probs[2][0][pt]; + t->skip_eob_node = 0; + ++x->coef_counts[2][0][pt][DCT_EOB_TOKEN]; + ++t; + *tp = t; + pt = 0; /* 0 <-> all coeff data is zero */ + *a = *l = pt; } -void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) -{ - MACROBLOCKD *xd = &x->e_mbd; - ENTROPY_CONTEXT * A = (ENTROPY_CONTEXT *)xd->above_context; - ENTROPY_CONTEXT * L = (ENTROPY_CONTEXT *)xd->left_context; - int plane_type; - int b; - plane_type = 3; - if((xd->mode_info_context->mbmi.mode != B_PRED - && xd->mode_info_context->mbmi.mode != SPLITMV)) - { - stuff2nd_order_b(t, - A + vp8_block2above[24], L + vp8_block2left[24], cpi, x); - plane_type = 0; - } +void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t) { + MACROBLOCKD *xd = &x->e_mbd; + ENTROPY_CONTEXT *A = (ENTROPY_CONTEXT *)xd->above_context; + ENTROPY_CONTEXT *L = (ENTROPY_CONTEXT *)xd->left_context; + int plane_type; + int b; + plane_type = 3; + if ((xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV)) { + stuff2nd_order_b(t, A + vp8_block2above[24], L + vp8_block2left[24], cpi, + x); + plane_type = 0; + } - for (b = 0; b < 16; b++) - stuff1st_order_b(t, - A + vp8_block2above[b], - L + vp8_block2left[b], plane_type, cpi, x); - - for (b = 16; b < 24; b++) - stuff1st_order_buv(t, - A + vp8_block2above[b], - L + vp8_block2left[b], cpi, x); + for (b = 0; b < 16; ++b) { + stuff1st_order_b(t, A + vp8_block2above[b], L + vp8_block2left[b], + plane_type, cpi, x); + } + for (b = 16; b < 24; ++b) { + stuff1st_order_buv(t, A + vp8_block2above[b], L + vp8_block2left[b], cpi, + x); + } } -void vp8_fix_contexts(MACROBLOCKD *x) -{ - /* Clear entropy contexts for Y2 blocks */ - if (x->mode_info_context->mbmi.mode != B_PRED && x->mode_info_context->mbmi.mode != SPLITMV) - { - memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); - } - else - { - memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); - memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); - } - +void vp8_fix_contexts(MACROBLOCKD *x) { + /* Clear entropy contexts for Y2 blocks */ + if (x->mode_info_context->mbmi.mode != B_PRED && + x->mode_info_context->mbmi.mode != SPLITMV) { + memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + } else { + memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) - 1); + } } diff --git a/libs/libvpx/vp8/encoder/tokenize.h b/libs/libvpx/vp8/encoder/tokenize.h index b73a9ee1c8..e5dbdfc5af 100644 --- a/libs/libvpx/vp8/encoder/tokenize.h +++ b/libs/libvpx/vp8/encoder/tokenize.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_TOKENIZE_H_ #define VP8_ENCODER_TOKENIZE_H_ @@ -21,18 +20,16 @@ extern "C" { void vp8_tokenize_initialize(); -typedef struct -{ - short Token; - short Extra; +typedef struct { + short Token; + short Extra; } TOKENVALUE; -typedef struct -{ - const vp8_prob *context_tree; - short Extra; - unsigned char Token; - unsigned char skip_eob_node; +typedef struct { + const vp8_prob *context_tree; + short Extra; + unsigned char Token; + unsigned char skip_eob_node; } TOKENEXTRA; int rd_cost_mby(MACROBLOCKD *); @@ -41,7 +38,8 @@ int rd_cost_mby(MACROBLOCKD *); void init_context_counters(); void print_context_counters(); -extern _int64 context_counters[BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS]; +extern _int64 context_counters[BLOCK_TYPES][COEF_BANDS][PREV_COEF_CONTEXTS] + [MAX_ENTROPY_TOKENS]; #endif extern const short *const vp8_dct_value_cost_ptr; diff --git a/libs/libvpx/vp8/encoder/treewriter.c b/libs/libvpx/vp8/encoder/treewriter.c index ef25f670b3..f055f05229 100644 --- a/libs/libvpx/vp8/encoder/treewriter.c +++ b/libs/libvpx/vp8/encoder/treewriter.c @@ -8,36 +8,26 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "treewriter.h" -static void cost( - int *const C, - vp8_tree T, - const vp8_prob *const P, - int i, - int c -) -{ - const vp8_prob p = P [i>>1]; +static void cost(int *const C, vp8_tree T, const vp8_prob *const P, int i, + int c) { + const vp8_prob p = P[i >> 1]; - do - { - const vp8_tree_index j = T[i]; - const int d = c + vp8_cost_bit(p, i & 1); + do { + const vp8_tree_index j = T[i]; + const int d = c + vp8_cost_bit(p, i & 1); - if (j <= 0) - C[-j] = d; - else - cost(C, T, P, j, d); + if (j <= 0) { + C[-j] = d; + } else { + cost(C, T, P, j, d); } - while (++i & 1); + } while (++i & 1); } -void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) -{ - cost(c, t, p, 0, 0); +void vp8_cost_tokens(int *c, const vp8_prob *p, vp8_tree t) { + cost(c, t, p, 0, 0); } -void vp8_cost_tokens2(int *c, const vp8_prob *p, vp8_tree t,int start) -{ - cost(c, t, p, start, 0); +void vp8_cost_tokens2(int *c, const vp8_prob *p, vp8_tree t, int start) { + cost(c, t, p, start, 0); } diff --git a/libs/libvpx/vp8/encoder/treewriter.h b/libs/libvpx/vp8/encoder/treewriter.h index 2debf9276c..dadbbe3f80 100644 --- a/libs/libvpx/vp8/encoder/treewriter.h +++ b/libs/libvpx/vp8/encoder/treewriter.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP8_ENCODER_TREEWRITER_H_ #define VP8_ENCODER_TREEWRITER_H_ @@ -18,7 +17,7 @@ #include "./vpx_config.h" #include "vp8/common/treecoder.h" -#include "boolhuff.h" /* for now */ +#include "boolhuff.h" /* for now */ #ifdef __cplusplus extern "C" { @@ -28,105 +27,75 @@ typedef BOOL_CODER vp8_writer; #define vp8_write vp8_encode_bool #define vp8_write_literal vp8_encode_value -#define vp8_write_bit( W, V) vp8_write( W, V, vp8_prob_half) +#define vp8_write_bit(W, V) vp8_write(W, V, vp8_prob_half) #define vp8bc_write vp8bc_write_bool #define vp8bc_write_literal vp8bc_write_bits -#define vp8bc_write_bit( W, V) vp8bc_write_bits( W, V, 1) - +#define vp8bc_write_bit(W, V) vp8bc_write_bits(W, V, 1) /* Approximate length of an encoded bool in 256ths of a bit at given prob */ -#define vp8_cost_zero( x) ( vp8_prob_cost[x]) -#define vp8_cost_one( x) vp8_cost_zero( vp8_complement(x)) +#define vp8_cost_zero(x) (vp8_prob_cost[x]) +#define vp8_cost_one(x) vp8_cost_zero(vp8_complement(x)) -#define vp8_cost_bit( x, b) vp8_cost_zero( (b)? vp8_complement(x) : (x) ) +#define vp8_cost_bit(x, b) vp8_cost_zero((b) ? vp8_complement(x) : (x)) /* VP8BC version is scaled by 2^20 rather than 2^8; see bool_coder.h */ - /* Both of these return bits, not scaled bits. */ -static INLINE unsigned int vp8_cost_branch(const unsigned int ct[2], vp8_prob p) -{ - /* Imitate existing calculation */ +static INLINE unsigned int vp8_cost_branch(const unsigned int ct[2], + vp8_prob p) { + /* Imitate existing calculation */ - return ((ct[0] * vp8_cost_zero(p)) - + (ct[1] * vp8_cost_one(p))) >> 8; + return ((ct[0] * vp8_cost_zero(p)) + (ct[1] * vp8_cost_one(p))) >> 8; } /* Small functions to write explicit values and tokens, as well as estimate their lengths. */ -static void vp8_treed_write -( - vp8_writer *const w, - vp8_tree t, - const vp8_prob *const p, - int v, - int n /* number of bits in v, assumed nonzero */ -) -{ - vp8_tree_index i = 0; +static void vp8_treed_write(vp8_writer *const w, vp8_tree t, + const vp8_prob *const p, int v, + int n /* number of bits in v, assumed nonzero */ + ) { + vp8_tree_index i = 0; - do - { - const int b = (v >> --n) & 1; - vp8_write(w, b, p[i>>1]); - i = t[i+b]; - } - while (n); + do { + const int b = (v >> --n) & 1; + vp8_write(w, b, p[i >> 1]); + i = t[i + b]; + } while (n); } -static INLINE void vp8_write_token -( - vp8_writer *const w, - vp8_tree t, - const vp8_prob *const p, - vp8_token *const x -) -{ - vp8_treed_write(w, t, p, x->value, x->Len); +static INLINE void vp8_write_token(vp8_writer *const w, vp8_tree t, + const vp8_prob *const p, + vp8_token *const x) { + vp8_treed_write(w, t, p, x->value, x->Len); } -static int vp8_treed_cost( - vp8_tree t, - const vp8_prob *const p, - int v, - int n /* number of bits in v, assumed nonzero */ -) -{ - int c = 0; - vp8_tree_index i = 0; +static int vp8_treed_cost(vp8_tree t, const vp8_prob *const p, int v, + int n /* number of bits in v, assumed nonzero */ + ) { + int c = 0; + vp8_tree_index i = 0; - do - { - const int b = (v >> --n) & 1; - c += vp8_cost_bit(p[i>>1], b); - i = t[i+b]; - } - while (n); + do { + const int b = (v >> --n) & 1; + c += vp8_cost_bit(p[i >> 1], b); + i = t[i + b]; + } while (n); - return c; + return c; } -static INLINE int vp8_cost_token -( - vp8_tree t, - const vp8_prob *const p, - vp8_token *const x -) -{ - return vp8_treed_cost(t, p, x->value, x->Len); +static INLINE int vp8_cost_token(vp8_tree t, const vp8_prob *const p, + vp8_token *const x) { + return vp8_treed_cost(t, p, x->value, x->Len); } /* Fill array of costs for all possible token values. */ -void vp8_cost_tokens( - int *Costs, const vp8_prob *, vp8_tree -); +void vp8_cost_tokens(int *Costs, const vp8_prob *, vp8_tree); -void vp8_cost_tokens2( - int *Costs, const vp8_prob *, vp8_tree, int -); +void vp8_cost_tokens2(int *Costs, const vp8_prob *, vp8_tree, int); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp8/encoder/vp8_quantize.c b/libs/libvpx/vp8/encoder/vp8_quantize.c index ee922c9d69..ff6e04eaad 100644 --- a/libs/libvpx/vp8/encoder/vp8_quantize.c +++ b/libs/libvpx/vp8/encoder/vp8_quantize.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vpx_mem/vpx_mem.h" @@ -16,568 +15,477 @@ #include "vp8/encoder/quantize.h" #include "vp8/common/quant_common.h" -void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int x, y, z, sz; - short *coeff_ptr = b->coeff; - short *round_ptr = b->round; - short *quant_ptr = b->quant_fast; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; +void vp8_fast_quantize_b_c(BLOCK *b, BLOCKD *d) { + int i, rc, eob; + int x, y, z, sz; + short *coeff_ptr = b->coeff; + short *round_ptr = b->round; + short *quant_ptr = b->quant_fast; + short *qcoeff_ptr = d->qcoeff; + short *dqcoeff_ptr = d->dqcoeff; + short *dequant_ptr = d->dequant; - eob = -1; - for (i = 0; i < 16; i++) - { - rc = vp8_default_zig_zag1d[i]; - z = coeff_ptr[rc]; + eob = -1; + for (i = 0; i < 16; ++i) { + rc = vp8_default_zig_zag1d[i]; + z = coeff_ptr[rc]; - sz = (z >> 31); /* sign of z */ - x = (z ^ sz) - sz; /* x = abs(z) */ + sz = (z >> 31); /* sign of z */ + x = (z ^ sz) - sz; /* x = abs(z) */ - y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */ - x = (y ^ sz) - sz; /* get the sign back */ - qcoeff_ptr[rc] = x; /* write to destination */ - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */ + y = ((x + round_ptr[rc]) * quant_ptr[rc]) >> 16; /* quantize (x) */ + x = (y ^ sz) - sz; /* get the sign back */ + qcoeff_ptr[rc] = x; /* write to destination */ + dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */ - if (y) - { - eob = i; /* last nonzero coeffs */ - } + if (y) { + eob = i; /* last nonzero coeffs */ } - *d->eob = (char)(eob + 1); + } + *d->eob = (char)(eob + 1); } -void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) -{ - int i, rc, eob; - int zbin; - int x, y, z, sz; - short *zbin_boost_ptr = b->zrun_zbin_boost; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant; - short *quant_shift_ptr = b->quant_shift; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; - short zbin_oq_value = b->zbin_extra; +void vp8_regular_quantize_b_c(BLOCK *b, BLOCKD *d) { + int i, rc, eob; + int zbin; + int x, y, z, sz; + short *zbin_boost_ptr = b->zrun_zbin_boost; + short *coeff_ptr = b->coeff; + short *zbin_ptr = b->zbin; + short *round_ptr = b->round; + short *quant_ptr = b->quant; + short *quant_shift_ptr = b->quant_shift; + short *qcoeff_ptr = d->qcoeff; + short *dqcoeff_ptr = d->dqcoeff; + short *dequant_ptr = d->dequant; + short zbin_oq_value = b->zbin_extra; - memset(qcoeff_ptr, 0, 32); - memset(dqcoeff_ptr, 0, 32); + memset(qcoeff_ptr, 0, 32); + memset(dqcoeff_ptr, 0, 32); - eob = -1; + eob = -1; - for (i = 0; i < 16; i++) - { - rc = vp8_default_zig_zag1d[i]; - z = coeff_ptr[rc]; + for (i = 0; i < 16; ++i) { + rc = vp8_default_zig_zag1d[i]; + z = coeff_ptr[rc]; - zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value; + zbin = zbin_ptr[rc] + *zbin_boost_ptr + zbin_oq_value; - zbin_boost_ptr ++; - sz = (z >> 31); /* sign of z */ - x = (z ^ sz) - sz; /* x = abs(z) */ + zbin_boost_ptr++; + sz = (z >> 31); /* sign of z */ + x = (z ^ sz) - sz; /* x = abs(z) */ - if (x >= zbin) - { - x += round_ptr[rc]; - y = ((((x * quant_ptr[rc]) >> 16) + x) - * quant_shift_ptr[rc]) >> 16; /* quantize (x) */ - x = (y ^ sz) - sz; /* get the sign back */ - qcoeff_ptr[rc] = x; /* write to destination */ - dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */ + if (x >= zbin) { + x += round_ptr[rc]; + y = ((((x * quant_ptr[rc]) >> 16) + x) * quant_shift_ptr[rc]) >> + 16; /* quantize (x) */ + x = (y ^ sz) - sz; /* get the sign back */ + qcoeff_ptr[rc] = x; /* write to destination */ + dqcoeff_ptr[rc] = x * dequant_ptr[rc]; /* dequantized value */ - if (y) - { - eob = i; /* last nonzero coeffs */ - zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */ - } - } + if (y) { + eob = i; /* last nonzero coeffs */ + zbin_boost_ptr = b->zrun_zbin_boost; /* reset zero runlength */ + } } + } - *d->eob = (char)(eob + 1); + *d->eob = (char)(eob + 1); } -void vp8_quantize_mby(MACROBLOCK *x) -{ - int i; - int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED - && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); +void vp8_quantize_mby(MACROBLOCK *x) { + int i; + int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && + x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); - for (i = 0; i < 16; i++) - x->quantize_b(&x->block[i], &x->e_mbd.block[i]); + for (i = 0; i < 16; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]); - if(has_2nd_order) - x->quantize_b(&x->block[24], &x->e_mbd.block[24]); + if (has_2nd_order) x->quantize_b(&x->block[24], &x->e_mbd.block[24]); } -void vp8_quantize_mb(MACROBLOCK *x) -{ - int i; - int has_2nd_order=(x->e_mbd.mode_info_context->mbmi.mode != B_PRED - && x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); +void vp8_quantize_mb(MACROBLOCK *x) { + int i; + int has_2nd_order = (x->e_mbd.mode_info_context->mbmi.mode != B_PRED && + x->e_mbd.mode_info_context->mbmi.mode != SPLITMV); - for (i = 0; i < 24+has_2nd_order; i++) - x->quantize_b(&x->block[i], &x->e_mbd.block[i]); + for (i = 0; i < 24 + has_2nd_order; ++i) { + x->quantize_b(&x->block[i], &x->e_mbd.block[i]); + } } +void vp8_quantize_mbuv(MACROBLOCK *x) { + int i; -void vp8_quantize_mbuv(MACROBLOCK *x) -{ - int i; - - for (i = 16; i < 24; i++) - x->quantize_b(&x->block[i], &x->e_mbd.block[i]); + for (i = 16; i < 24; ++i) x->quantize_b(&x->block[i], &x->e_mbd.block[i]); } -static const int qrounding_factors[129] = -{ - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48 +static const int qrounding_factors[129] = { + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48 }; - -static const int qzbin_factors[129] = -{ - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80 +static const int qzbin_factors[129] = { + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80 }; - -static const int qrounding_factors_y2[129] = -{ - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48 +static const int qrounding_factors_y2[129] = { + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48 }; - -static const int qzbin_factors_y2[129] = -{ - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 84, 84, 84, 84, 84, 84, 84, 84, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, - 80 +static const int qzbin_factors_y2[129] = { + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, + 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80 }; - -static void invert_quant(int improved_quant, short *quant, - short *shift, short d) -{ - if(improved_quant) - { - unsigned t; - int l; - t = d; - for(l = 0; t > 1; l++) - t>>=1; - t = 1 + (1<<(16+l))/d; - *quant = (short)(t - (1<<16)); - *shift = l; - /* use multiplication and constant shift by 16 */ - *shift = 1 << (16 - *shift); - } - else - { - *quant = (1 << 16) / d; - *shift = 0; - /* use multiplication and constant shift by 16 */ - *shift = 1 << (16 - *shift); - } +static void invert_quant(int improved_quant, short *quant, short *shift, + short d) { + if (improved_quant) { + unsigned t; + int l, m; + t = d; + for (l = 0; t > 1; ++l) t >>= 1; + m = 1 + (1 << (16 + l)) / d; + *quant = (short)(m - (1 << 16)); + *shift = l; + /* use multiplication and constant shift by 16 */ + *shift = 1 << (16 - *shift); + } else { + *quant = (1 << 16) / d; + *shift = 0; + /* use multiplication and constant shift by 16 */ + *shift = 1 << (16 - *shift); + } } +void vp8cx_init_quantizer(VP8_COMP *cpi) { + int i; + int quant_val; + int Q; -void vp8cx_init_quantizer(VP8_COMP *cpi) -{ - int i; - int quant_val; - int Q; + int zbin_boost[16] = { 0, 0, 8, 10, 12, 14, 16, 20, + 24, 28, 32, 36, 40, 44, 44, 44 }; - int zbin_boost[16] = {0, 0, 8, 10, 12, 14, 16, 20, 24, 28, 32, 36, 40, 44, - 44, 44}; + for (Q = 0; Q < QINDEX_RANGE; ++Q) { + /* dc values */ + quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q); + cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0, + cpi->Y1quant_shift[Q] + 0, quant_val); + cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; + cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7; + cpi->common.Y1dequant[Q][0] = quant_val; + cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7; - for (Q = 0; Q < QINDEX_RANGE; Q++) - { - /* dc values */ - quant_val = vp8_dc_quant(Q, cpi->common.y1dc_delta_q); - cpi->Y1quant_fast[Q][0] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 0, - cpi->Y1quant_shift[Q] + 0, quant_val); - cpi->Y1zbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; - cpi->Y1round[Q][0] = (qrounding_factors[Q] * quant_val) >> 7; - cpi->common.Y1dequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_y1[Q][0] = (quant_val * zbin_boost[0]) >> 7; + quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q); + cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0, + cpi->Y2quant_shift[Q] + 0, quant_val); + cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7; + cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7; + cpi->common.Y2dequant[Q][0] = quant_val; + cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7; - quant_val = vp8_dc2quant(Q, cpi->common.y2dc_delta_q); - cpi->Y2quant_fast[Q][0] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 0, - cpi->Y2quant_shift[Q] + 0, quant_val); - cpi->Y2zbin[Q][0] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7; - cpi->Y2round[Q][0] = (qrounding_factors_y2[Q] * quant_val) >> 7; - cpi->common.Y2dequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_y2[Q][0] = (quant_val * zbin_boost[0]) >> 7; + quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q); + cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0, + cpi->UVquant_shift[Q] + 0, quant_val); + cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; + cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7; + cpi->common.UVdequant[Q][0] = quant_val; + cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7; - quant_val = vp8_dc_uv_quant(Q, cpi->common.uvdc_delta_q); - cpi->UVquant_fast[Q][0] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 0, - cpi->UVquant_shift[Q] + 0, quant_val); - cpi->UVzbin[Q][0] = ((qzbin_factors[Q] * quant_val) + 64) >> 7;; - cpi->UVround[Q][0] = (qrounding_factors[Q] * quant_val) >> 7; - cpi->common.UVdequant[Q][0] = quant_val; - cpi->zrun_zbin_boost_uv[Q][0] = (quant_val * zbin_boost[0]) >> 7; + /* all the ac values = ; */ + quant_val = vp8_ac_yquant(Q); + cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1, + cpi->Y1quant_shift[Q] + 1, quant_val); + cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; + cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7; + cpi->common.Y1dequant[Q][1] = quant_val; + cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7; - /* all the ac values = ; */ - quant_val = vp8_ac_yquant(Q); - cpi->Y1quant_fast[Q][1] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->Y1quant[Q] + 1, - cpi->Y1quant_shift[Q] + 1, quant_val); - cpi->Y1zbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; - cpi->Y1round[Q][1] = (qrounding_factors[Q] * quant_val) >> 7; - cpi->common.Y1dequant[Q][1] = quant_val; - cpi->zrun_zbin_boost_y1[Q][1] = (quant_val * zbin_boost[1]) >> 7; + quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q); + cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1, + cpi->Y2quant_shift[Q] + 1, quant_val); + cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7; + cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7; + cpi->common.Y2dequant[Q][1] = quant_val; + cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7; - quant_val = vp8_ac2quant(Q, cpi->common.y2ac_delta_q); - cpi->Y2quant_fast[Q][1] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->Y2quant[Q] + 1, - cpi->Y2quant_shift[Q] + 1, quant_val); - cpi->Y2zbin[Q][1] = ((qzbin_factors_y2[Q] * quant_val) + 64) >> 7; - cpi->Y2round[Q][1] = (qrounding_factors_y2[Q] * quant_val) >> 7; - cpi->common.Y2dequant[Q][1] = quant_val; - cpi->zrun_zbin_boost_y2[Q][1] = (quant_val * zbin_boost[1]) >> 7; + quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q); + cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val; + invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1, + cpi->UVquant_shift[Q] + 1, quant_val); + cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; + cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7; + cpi->common.UVdequant[Q][1] = quant_val; + cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7; - quant_val = vp8_ac_uv_quant(Q, cpi->common.uvac_delta_q); - cpi->UVquant_fast[Q][1] = (1 << 16) / quant_val; - invert_quant(cpi->sf.improved_quant, cpi->UVquant[Q] + 1, - cpi->UVquant_shift[Q] + 1, quant_val); - cpi->UVzbin[Q][1] = ((qzbin_factors[Q] * quant_val) + 64) >> 7; - cpi->UVround[Q][1] = (qrounding_factors[Q] * quant_val) >> 7; - cpi->common.UVdequant[Q][1] = quant_val; - cpi->zrun_zbin_boost_uv[Q][1] = (quant_val * zbin_boost[1]) >> 7; + for (i = 2; i < 16; ++i) { + cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1]; + cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1]; + cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1]; + cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1]; + cpi->Y1round[Q][i] = cpi->Y1round[Q][1]; + cpi->zrun_zbin_boost_y1[Q][i] = + (cpi->common.Y1dequant[Q][1] * zbin_boost[i]) >> 7; - for (i = 2; i < 16; i++) - { - cpi->Y1quant_fast[Q][i] = cpi->Y1quant_fast[Q][1]; - cpi->Y1quant[Q][i] = cpi->Y1quant[Q][1]; - cpi->Y1quant_shift[Q][i] = cpi->Y1quant_shift[Q][1]; - cpi->Y1zbin[Q][i] = cpi->Y1zbin[Q][1]; - cpi->Y1round[Q][i] = cpi->Y1round[Q][1]; - cpi->zrun_zbin_boost_y1[Q][i] = (cpi->common.Y1dequant[Q][1] * - zbin_boost[i]) >> 7; + cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1]; + cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1]; + cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1]; + cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1]; + cpi->Y2round[Q][i] = cpi->Y2round[Q][1]; + cpi->zrun_zbin_boost_y2[Q][i] = + (cpi->common.Y2dequant[Q][1] * zbin_boost[i]) >> 7; - cpi->Y2quant_fast[Q][i] = cpi->Y2quant_fast[Q][1]; - cpi->Y2quant[Q][i] = cpi->Y2quant[Q][1]; - cpi->Y2quant_shift[Q][i] = cpi->Y2quant_shift[Q][1]; - cpi->Y2zbin[Q][i] = cpi->Y2zbin[Q][1]; - cpi->Y2round[Q][i] = cpi->Y2round[Q][1]; - cpi->zrun_zbin_boost_y2[Q][i] = (cpi->common.Y2dequant[Q][1] * - zbin_boost[i]) >> 7; - - cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1]; - cpi->UVquant[Q][i] = cpi->UVquant[Q][1]; - cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1]; - cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1]; - cpi->UVround[Q][i] = cpi->UVround[Q][1]; - cpi->zrun_zbin_boost_uv[Q][i] = (cpi->common.UVdequant[Q][1] * - zbin_boost[i]) >> 7; - } + cpi->UVquant_fast[Q][i] = cpi->UVquant_fast[Q][1]; + cpi->UVquant[Q][i] = cpi->UVquant[Q][1]; + cpi->UVquant_shift[Q][i] = cpi->UVquant_shift[Q][1]; + cpi->UVzbin[Q][i] = cpi->UVzbin[Q][1]; + cpi->UVround[Q][i] = cpi->UVround[Q][1]; + cpi->zrun_zbin_boost_uv[Q][i] = + (cpi->common.UVdequant[Q][1] * zbin_boost[i]) >> 7; } + } } -#define ZBIN_EXTRA_Y \ - (( cpi->common.Y1dequant[QIndex][1] * \ - ( x->zbin_over_quant + \ - x->zbin_mode_boost + \ - x->act_zbin_adj ) ) >> 7) +#define ZBIN_EXTRA_Y \ + ((cpi->common.Y1dequant[QIndex][1] * \ + (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \ + 7) -#define ZBIN_EXTRA_UV \ - (( cpi->common.UVdequant[QIndex][1] * \ - ( x->zbin_over_quant + \ - x->zbin_mode_boost + \ - x->act_zbin_adj ) ) >> 7) +#define ZBIN_EXTRA_UV \ + ((cpi->common.UVdequant[QIndex][1] * \ + (x->zbin_over_quant + x->zbin_mode_boost + x->act_zbin_adj)) >> \ + 7) -#define ZBIN_EXTRA_Y2 \ - (( cpi->common.Y2dequant[QIndex][1] * \ - ( (x->zbin_over_quant / 2) + \ - x->zbin_mode_boost + \ - x->act_zbin_adj ) ) >> 7) +#define ZBIN_EXTRA_Y2 \ + ((cpi->common.Y2dequant[QIndex][1] * \ + ((x->zbin_over_quant / 2) + x->zbin_mode_boost + x->act_zbin_adj)) >> \ + 7) -void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) -{ - int i; - int QIndex; - MACROBLOCKD *xd = &x->e_mbd; - int zbin_extra; +void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x, int ok_to_skip) { + int i; + int QIndex; + MACROBLOCKD *xd = &x->e_mbd; + int zbin_extra; - /* Select the baseline MB Q index. */ - if (xd->segmentation_enabled) - { - /* Abs Value */ - if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) - QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id]; - /* Delta Value */ - else - { - QIndex = cpi->common.base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][xd->mode_info_context->mbmi.segment_id]; - /* Clamp to valid range */ - QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; - } + /* Select the baseline MB Q index. */ + if (xd->segmentation_enabled) { + /* Abs Value */ + if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) { + QIndex = xd->segment_feature_data[MB_LVL_ALT_Q] + [xd->mode_info_context->mbmi.segment_id]; + /* Delta Value */ + } else { + QIndex = cpi->common.base_qindex + + xd->segment_feature_data[MB_LVL_ALT_Q] + [xd->mode_info_context->mbmi.segment_id]; + /* Clamp to valid range */ + QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; } - else - QIndex = cpi->common.base_qindex; + } else { + QIndex = cpi->common.base_qindex; + } - /* This initialization should be called at least once. Use ok_to_skip to - * decide if it is ok to skip. - * Before encoding a frame, this function is always called with ok_to_skip - * =0, which means no skiping of calculations. The "last" values are - * initialized at that time. - */ - if (!ok_to_skip || QIndex != x->q_index) - { + /* This initialization should be called at least once. Use ok_to_skip to + * decide if it is ok to skip. + * Before encoding a frame, this function is always called with ok_to_skip + * =0, which means no skiping of calculations. The "last" values are + * initialized at that time. + */ + if (!ok_to_skip || QIndex != x->q_index) { + xd->dequant_y1_dc[0] = 1; + xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0]; + xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0]; + xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0]; - xd->dequant_y1_dc[0] = 1; - xd->dequant_y1[0] = cpi->common.Y1dequant[QIndex][0]; - xd->dequant_y2[0] = cpi->common.Y2dequant[QIndex][0]; - xd->dequant_uv[0] = cpi->common.UVdequant[QIndex][0]; - - for (i = 1; i < 16; i++) - { - xd->dequant_y1_dc[i] = - xd->dequant_y1[i] = cpi->common.Y1dequant[QIndex][1]; - xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1]; - xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1]; - } + for (i = 1; i < 16; ++i) { + xd->dequant_y1_dc[i] = xd->dequant_y1[i] = + cpi->common.Y1dequant[QIndex][1]; + xd->dequant_y2[i] = cpi->common.Y2dequant[QIndex][1]; + xd->dequant_uv[i] = cpi->common.UVdequant[QIndex][1]; + } #if 1 - /*TODO: Remove dequant from BLOCKD. This is a temporary solution until - * the quantizer code uses a passed in pointer to the dequant constants. - * This will also require modifications to the x86 and neon assembly. - * */ - for (i = 0; i < 16; i++) - x->e_mbd.block[i].dequant = xd->dequant_y1; - for (i = 16; i < 24; i++) - x->e_mbd.block[i].dequant = xd->dequant_uv; - x->e_mbd.block[24].dequant = xd->dequant_y2; + /*TODO: Remove dequant from BLOCKD. This is a temporary solution until + * the quantizer code uses a passed in pointer to the dequant constants. + * This will also require modifications to the x86 and neon assembly. + * */ + for (i = 0; i < 16; ++i) x->e_mbd.block[i].dequant = xd->dequant_y1; + for (i = 16; i < 24; ++i) x->e_mbd.block[i].dequant = xd->dequant_uv; + x->e_mbd.block[24].dequant = xd->dequant_y2; #endif - /* Y */ - zbin_extra = ZBIN_EXTRA_Y; - - for (i = 0; i < 16; i++) - { - x->block[i].quant = cpi->Y1quant[QIndex]; - x->block[i].quant_fast = cpi->Y1quant_fast[QIndex]; - x->block[i].quant_shift = cpi->Y1quant_shift[QIndex]; - x->block[i].zbin = cpi->Y1zbin[QIndex]; - x->block[i].round = cpi->Y1round[QIndex]; - x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex]; - x->block[i].zbin_extra = (short)zbin_extra; - } - - /* UV */ - zbin_extra = ZBIN_EXTRA_UV; - - for (i = 16; i < 24; i++) - { - x->block[i].quant = cpi->UVquant[QIndex]; - x->block[i].quant_fast = cpi->UVquant_fast[QIndex]; - x->block[i].quant_shift = cpi->UVquant_shift[QIndex]; - x->block[i].zbin = cpi->UVzbin[QIndex]; - x->block[i].round = cpi->UVround[QIndex]; - x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex]; - x->block[i].zbin_extra = (short)zbin_extra; - } - - /* Y2 */ - zbin_extra = ZBIN_EXTRA_Y2; - - x->block[24].quant_fast = cpi->Y2quant_fast[QIndex]; - x->block[24].quant = cpi->Y2quant[QIndex]; - x->block[24].quant_shift = cpi->Y2quant_shift[QIndex]; - x->block[24].zbin = cpi->Y2zbin[QIndex]; - x->block[24].round = cpi->Y2round[QIndex]; - x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex]; - x->block[24].zbin_extra = (short)zbin_extra; - - /* save this macroblock QIndex for vp8_update_zbin_extra() */ - x->q_index = QIndex; - - x->last_zbin_over_quant = x->zbin_over_quant; - x->last_zbin_mode_boost = x->zbin_mode_boost; - x->last_act_zbin_adj = x->act_zbin_adj; - - - - } - else if(x->last_zbin_over_quant != x->zbin_over_quant - || x->last_zbin_mode_boost != x->zbin_mode_boost - || x->last_act_zbin_adj != x->act_zbin_adj) - { - /* Y */ - zbin_extra = ZBIN_EXTRA_Y; - - for (i = 0; i < 16; i++) - x->block[i].zbin_extra = (short)zbin_extra; - - /* UV */ - zbin_extra = ZBIN_EXTRA_UV; - - for (i = 16; i < 24; i++) - x->block[i].zbin_extra = (short)zbin_extra; - - /* Y2 */ - zbin_extra = ZBIN_EXTRA_Y2; - x->block[24].zbin_extra = (short)zbin_extra; - - x->last_zbin_over_quant = x->zbin_over_quant; - x->last_zbin_mode_boost = x->zbin_mode_boost; - x->last_act_zbin_adj = x->act_zbin_adj; - } -} - -void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) -{ - int i; - int QIndex = x->q_index; - int zbin_extra; - /* Y */ zbin_extra = ZBIN_EXTRA_Y; - for (i = 0; i < 16; i++) - x->block[i].zbin_extra = (short)zbin_extra; + for (i = 0; i < 16; ++i) { + x->block[i].quant = cpi->Y1quant[QIndex]; + x->block[i].quant_fast = cpi->Y1quant_fast[QIndex]; + x->block[i].quant_shift = cpi->Y1quant_shift[QIndex]; + x->block[i].zbin = cpi->Y1zbin[QIndex]; + x->block[i].round = cpi->Y1round[QIndex]; + x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_y1[QIndex]; + x->block[i].zbin_extra = (short)zbin_extra; + } /* UV */ zbin_extra = ZBIN_EXTRA_UV; - for (i = 16; i < 24; i++) - x->block[i].zbin_extra = (short)zbin_extra; + for (i = 16; i < 24; ++i) { + x->block[i].quant = cpi->UVquant[QIndex]; + x->block[i].quant_fast = cpi->UVquant_fast[QIndex]; + x->block[i].quant_shift = cpi->UVquant_shift[QIndex]; + x->block[i].zbin = cpi->UVzbin[QIndex]; + x->block[i].round = cpi->UVround[QIndex]; + x->block[i].zrun_zbin_boost = cpi->zrun_zbin_boost_uv[QIndex]; + x->block[i].zbin_extra = (short)zbin_extra; + } + + /* Y2 */ + zbin_extra = ZBIN_EXTRA_Y2; + + x->block[24].quant_fast = cpi->Y2quant_fast[QIndex]; + x->block[24].quant = cpi->Y2quant[QIndex]; + x->block[24].quant_shift = cpi->Y2quant_shift[QIndex]; + x->block[24].zbin = cpi->Y2zbin[QIndex]; + x->block[24].round = cpi->Y2round[QIndex]; + x->block[24].zrun_zbin_boost = cpi->zrun_zbin_boost_y2[QIndex]; + x->block[24].zbin_extra = (short)zbin_extra; + + /* save this macroblock QIndex for vp8_update_zbin_extra() */ + x->q_index = QIndex; + + x->last_zbin_over_quant = x->zbin_over_quant; + x->last_zbin_mode_boost = x->zbin_mode_boost; + x->last_act_zbin_adj = x->act_zbin_adj; + + } else if (x->last_zbin_over_quant != x->zbin_over_quant || + x->last_zbin_mode_boost != x->zbin_mode_boost || + x->last_act_zbin_adj != x->act_zbin_adj) { + /* Y */ + zbin_extra = ZBIN_EXTRA_Y; + + for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra; + + /* UV */ + zbin_extra = ZBIN_EXTRA_UV; + + for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra; /* Y2 */ zbin_extra = ZBIN_EXTRA_Y2; x->block[24].zbin_extra = (short)zbin_extra; + + x->last_zbin_over_quant = x->zbin_over_quant; + x->last_zbin_mode_boost = x->zbin_mode_boost; + x->last_act_zbin_adj = x->act_zbin_adj; + } +} + +void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x) { + int i; + int QIndex = x->q_index; + int zbin_extra; + + /* Y */ + zbin_extra = ZBIN_EXTRA_Y; + + for (i = 0; i < 16; ++i) x->block[i].zbin_extra = (short)zbin_extra; + + /* UV */ + zbin_extra = ZBIN_EXTRA_UV; + + for (i = 16; i < 24; ++i) x->block[i].zbin_extra = (short)zbin_extra; + + /* Y2 */ + zbin_extra = ZBIN_EXTRA_Y2; + x->block[24].zbin_extra = (short)zbin_extra; } #undef ZBIN_EXTRA_Y #undef ZBIN_EXTRA_UV #undef ZBIN_EXTRA_Y2 -void vp8cx_frame_init_quantizer(VP8_COMP *cpi) -{ - /* Clear Zbin mode boost for default case */ - cpi->mb.zbin_mode_boost = 0; +void vp8cx_frame_init_quantizer(VP8_COMP *cpi) { + /* Clear Zbin mode boost for default case */ + cpi->mb.zbin_mode_boost = 0; - /* MB level quantizer setup */ - vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0); + /* MB level quantizer setup */ + vp8cx_mb_init_quantizer(cpi, &cpi->mb, 0); } +void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) { + VP8_COMMON *cm = &cpi->common; + MACROBLOCKD *mbd = &cpi->mb.e_mbd; + int update = 0; + int new_delta_q; + int new_uv_delta_q; + cm->base_qindex = Q; -void vp8_set_quantizer(struct VP8_COMP *cpi, int Q) -{ - VP8_COMMON *cm = &cpi->common; - MACROBLOCKD *mbd = &cpi->mb.e_mbd; - int update = 0; - int new_delta_q; - int new_uv_delta_q; - cm->base_qindex = Q; + /* if any of the delta_q values are changing update flag has to be set */ + /* currently only y2dc_delta_q may change */ - /* if any of the delta_q values are changing update flag has to be set */ - /* currently only y2dc_delta_q may change */ + cm->y1dc_delta_q = 0; + cm->y2ac_delta_q = 0; - cm->y1dc_delta_q = 0; - cm->y2ac_delta_q = 0; + if (Q < 4) { + new_delta_q = 4 - Q; + } else { + new_delta_q = 0; + } - if (Q < 4) - { - new_delta_q = 4-Q; + update |= cm->y2dc_delta_q != new_delta_q; + cm->y2dc_delta_q = new_delta_q; + + new_uv_delta_q = 0; + // For screen content, lower the q value for UV channel. For now, select + // conservative delta; same delta for dc and ac, and decrease it with lower + // Q, and set to 0 below some threshold. May want to condition this in + // future on the variance/energy in UV channel. + if (cpi->oxcf.screen_content_mode && Q > 40) { + new_uv_delta_q = -(int)(0.15 * Q); + // Check range: magnitude of delta is 4 bits. + if (new_uv_delta_q < -15) { + new_uv_delta_q = -15; } - else - new_delta_q = 0; + } + update |= cm->uvdc_delta_q != new_uv_delta_q; + cm->uvdc_delta_q = new_uv_delta_q; + cm->uvac_delta_q = new_uv_delta_q; - update |= cm->y2dc_delta_q != new_delta_q; - cm->y2dc_delta_q = new_delta_q; - - new_uv_delta_q = 0; - // For screen content, lower the q value for UV channel. For now, select - // conservative delta; same delta for dc and ac, and decrease it with lower - // Q, and set to 0 below some threshold. May want to condition this in - // future on the variance/energy in UV channel. - if (cpi->oxcf.screen_content_mode && Q > 40) { - new_uv_delta_q = -(int)(0.15 * Q); - // Check range: magnitude of delta is 4 bits. - if (new_uv_delta_q < -15) { - new_uv_delta_q = -15; - } - } - update |= cm->uvdc_delta_q != new_uv_delta_q; - cm->uvdc_delta_q = new_uv_delta_q; - cm->uvac_delta_q = new_uv_delta_q; - - /* Set Segment specific quatizers */ - mbd->segment_feature_data[MB_LVL_ALT_Q][0] = cpi->segment_feature_data[MB_LVL_ALT_Q][0]; - mbd->segment_feature_data[MB_LVL_ALT_Q][1] = cpi->segment_feature_data[MB_LVL_ALT_Q][1]; - mbd->segment_feature_data[MB_LVL_ALT_Q][2] = cpi->segment_feature_data[MB_LVL_ALT_Q][2]; - mbd->segment_feature_data[MB_LVL_ALT_Q][3] = cpi->segment_feature_data[MB_LVL_ALT_Q][3]; - - /* quantizer has to be reinitialized for any delta_q changes */ - if(update) - vp8cx_init_quantizer(cpi); + /* Set Segment specific quatizers */ + mbd->segment_feature_data[MB_LVL_ALT_Q][0] = + cpi->segment_feature_data[MB_LVL_ALT_Q][0]; + mbd->segment_feature_data[MB_LVL_ALT_Q][1] = + cpi->segment_feature_data[MB_LVL_ALT_Q][1]; + mbd->segment_feature_data[MB_LVL_ALT_Q][2] = + cpi->segment_feature_data[MB_LVL_ALT_Q][2]; + mbd->segment_feature_data[MB_LVL_ALT_Q][3] = + cpi->segment_feature_data[MB_LVL_ALT_Q][3]; + /* quantizer has to be reinitialized for any delta_q changes */ + if (update) vp8cx_init_quantizer(cpi); } diff --git a/libs/libvpx/vp8/encoder/x86/denoising_sse2.c b/libs/libvpx/vp8/encoder/x86/denoising_sse2.c index 101d646ef4..89cad53356 100644 --- a/libs/libvpx/vp8/encoder/x86/denoising_sse2.c +++ b/libs/libvpx/vp8/encoder/x86/denoising_sse2.c @@ -20,360 +20,353 @@ /* Compute the sum of all pixel differences of this MB. */ static INLINE unsigned int abs_sum_diff_16x1(__m128i acc_diff) { const __m128i k_1 = _mm_set1_epi16(1); - const __m128i acc_diff_lo = _mm_srai_epi16( - _mm_unpacklo_epi8(acc_diff, acc_diff), 8); - const __m128i acc_diff_hi = _mm_srai_epi16( - _mm_unpackhi_epi8(acc_diff, acc_diff), 8); + const __m128i acc_diff_lo = + _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8); + const __m128i acc_diff_hi = + _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8); const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi); const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1); - const __m128i hgfe_dcba = _mm_add_epi32(hg_fe_dc_ba, - _mm_srli_si128(hg_fe_dc_ba, 8)); - const __m128i hgfedcba = _mm_add_epi32(hgfe_dcba, - _mm_srli_si128(hgfe_dcba, 4)); + const __m128i hgfe_dcba = + _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8)); + const __m128i hgfedcba = + _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4)); unsigned int sum_diff = abs(_mm_cvtsi128_si32(hgfedcba)); return sum_diff; } int vp8_denoiser_filter_sse2(unsigned char *mc_running_avg_y, - int mc_avg_y_stride, - unsigned char *running_avg_y, int avg_y_stride, - unsigned char *sig, int sig_stride, - unsigned int motion_magnitude, - int increase_denoising) -{ - unsigned char *running_avg_y_start = running_avg_y; - unsigned char *sig_start = sig; - unsigned int sum_diff_thresh; - int r; - int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0; - __m128i acc_diff = _mm_setzero_si128(); - const __m128i k_0 = _mm_setzero_si128(); - const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); - const __m128i k_8 = _mm_set1_epi8(8); - const __m128i k_16 = _mm_set1_epi8(16); - /* Modify each level's adjustment according to motion_magnitude. */ - const __m128i l3 = _mm_set1_epi8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? - 7 + shift_inc : 6); - /* Difference between level 3 and level 2 is 2. */ - const __m128i l32 = _mm_set1_epi8(2); - /* Difference between level 2 and level 1 is 1. */ - const __m128i l21 = _mm_set1_epi8(1); + int mc_avg_y_stride, unsigned char *running_avg_y, + int avg_y_stride, unsigned char *sig, + int sig_stride, unsigned int motion_magnitude, + int increase_denoising) { + unsigned char *running_avg_y_start = running_avg_y; + unsigned char *sig_start = sig; + unsigned int sum_diff_thresh; + int r; + int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) + ? 1 + : 0; + __m128i acc_diff = _mm_setzero_si128(); + const __m128i k_0 = _mm_setzero_si128(); + const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); + const __m128i k_8 = _mm_set1_epi8(8); + const __m128i k_16 = _mm_set1_epi8(16); + /* Modify each level's adjustment according to motion_magnitude. */ + const __m128i l3 = _mm_set1_epi8( + (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6); + /* Difference between level 3 and level 2 is 2. */ + const __m128i l32 = _mm_set1_epi8(2); + /* Difference between level 2 and level 1 is 1. */ + const __m128i l21 = _mm_set1_epi8(1); - for (r = 0; r < 16; ++r) - { - /* Calculate differences */ - const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); - const __m128i v_mc_running_avg_y = _mm_loadu_si128( - (__m128i *)(&mc_running_avg_y[0])); - __m128i v_running_avg_y; - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); - /* Obtain the sign. FF if diff is negative. */ - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); - /* Clamp absolute difference to 16 to be used to get mask. Doing this - * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ - const __m128i clamped_absdiff = _mm_min_epu8( - _mm_or_si128(pdiff, ndiff), k_16); - /* Get masks for l2 l1 and l0 adjustments */ - const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); - const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); - const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); - /* Get adjustments for l2, l1, and l0 */ - __m128i adj2 = _mm_and_si128(mask2, l32); - const __m128i adj1 = _mm_and_si128(mask1, l21); - const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); - __m128i adj, padj, nadj; + for (r = 0; r < 16; ++r) { + /* Calculate differences */ + const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); + const __m128i v_mc_running_avg_y = + _mm_loadu_si128((__m128i *)(&mc_running_avg_y[0])); + __m128i v_running_avg_y; + const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); + const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); + /* Obtain the sign. FF if diff is negative. */ + const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); + /* Clamp absolute difference to 16 to be used to get mask. Doing this + * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ + const __m128i clamped_absdiff = + _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); + /* Get masks for l2 l1 and l0 adjustments */ + const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); + const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); + const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); + /* Get adjustments for l2, l1, and l0 */ + __m128i adj2 = _mm_and_si128(mask2, l32); + const __m128i adj1 = _mm_and_si128(mask1, l21); + const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); + __m128i adj, padj, nadj; - /* Combine the adjustments and get absolute adjustments. */ - adj2 = _mm_add_epi8(adj2, adj1); - adj = _mm_sub_epi8(l3, adj2); - adj = _mm_andnot_si128(mask0, adj); - adj = _mm_or_si128(adj, adj0); + /* Combine the adjustments and get absolute adjustments. */ + adj2 = _mm_add_epi8(adj2, adj1); + adj = _mm_sub_epi8(l3, adj2); + adj = _mm_andnot_si128(mask0, adj); + adj = _mm_or_si128(adj, adj0); - /* Restore the sign and get positive and negative adjustments. */ - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); + /* Restore the sign and get positive and negative adjustments. */ + padj = _mm_andnot_si128(diff_sign, adj); + nadj = _mm_and_si128(diff_sign, adj); - /* Calculate filtered value. */ - v_running_avg_y = _mm_adds_epu8(v_sig, padj); - v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); - _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); + /* Calculate filtered value. */ + v_running_avg_y = _mm_adds_epu8(v_sig, padj); + v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj); + _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); - /* Adjustments <=7, and each element in acc_diff can fit in signed - * char. - */ - acc_diff = _mm_adds_epi8(acc_diff, padj); - acc_diff = _mm_subs_epi8(acc_diff, nadj); + /* Adjustments <=7, and each element in acc_diff can fit in signed + * char. + */ + acc_diff = _mm_adds_epi8(acc_diff, padj); + acc_diff = _mm_subs_epi8(acc_diff, nadj); - /* Update pointers for next iteration. */ - sig += sig_stride; - mc_running_avg_y += mc_avg_y_stride; - running_avg_y += avg_y_stride; - } + /* Update pointers for next iteration. */ + sig += sig_stride; + mc_running_avg_y += mc_avg_y_stride; + running_avg_y += avg_y_stride; + } - { - /* Compute the sum of all pixel differences of this MB. */ - unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff); - sum_diff_thresh = SUM_DIFF_THRESHOLD; - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; - if (abs_sum_diff > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), - // check if we can still apply some (weaker) temporal filtering to - // this block, that would otherwise not be denoised at all. Simplest - // is to apply an additional adjustment to running_avg_y to bring it - // closer to sig. The adjustment is capped by a maximum delta, and - // chosen such that in most cases the resulting sum_diff will be - // within the acceptable range given by sum_diff_thresh. + { + /* Compute the sum of all pixel differences of this MB. */ + unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff); + sum_diff_thresh = SUM_DIFF_THRESHOLD; + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH; + if (abs_sum_diff > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), + // check if we can still apply some (weaker) temporal filtering to + // this block, that would otherwise not be denoised at all. Simplest + // is to apply an additional adjustment to running_avg_y to bring it + // closer to sig. The adjustment is capped by a maximum delta, and + // chosen such that in most cases the resulting sum_diff will be + // within the acceptable range given by sum_diff_thresh. - // The delta is set by the excess of absolute pixel diff over the - // threshold. - int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const __m128i k_delta = _mm_set1_epi8(delta); - sig -= sig_stride * 16; - mc_running_avg_y -= mc_avg_y_stride * 16; - running_avg_y -= avg_y_stride * 16; - for (r = 0; r < 16; ++r) { - __m128i v_running_avg_y = - _mm_loadu_si128((__m128i *)(&running_avg_y[0])); - // Calculate differences. - const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); - const __m128i v_mc_running_avg_y = - _mm_loadu_si128((__m128i *)(&mc_running_avg_y[0])); - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); - // Obtain the sign. FF if diff is negative. - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); - // Clamp absolute difference to delta to get the adjustment. - const __m128i adj = - _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); - // Restore the sign and get positive and negative adjustments. - __m128i padj, nadj; - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); - // Calculate filtered value. - v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); - v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); - _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); + // The delta is set by the excess of absolute pixel diff over the + // threshold. + int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + const __m128i k_delta = _mm_set1_epi8(delta); + sig -= sig_stride * 16; + mc_running_avg_y -= mc_avg_y_stride * 16; + running_avg_y -= avg_y_stride * 16; + for (r = 0; r < 16; ++r) { + __m128i v_running_avg_y = + _mm_loadu_si128((__m128i *)(&running_avg_y[0])); + // Calculate differences. + const __m128i v_sig = _mm_loadu_si128((__m128i *)(&sig[0])); + const __m128i v_mc_running_avg_y = + _mm_loadu_si128((__m128i *)(&mc_running_avg_y[0])); + const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig); + const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y); + // Obtain the sign. FF if diff is negative. + const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); + // Clamp absolute difference to delta to get the adjustment. + const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); + // Restore the sign and get positive and negative adjustments. + __m128i padj, nadj; + padj = _mm_andnot_si128(diff_sign, adj); + nadj = _mm_and_si128(diff_sign, adj); + // Calculate filtered value. + v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj); + v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj); + _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y); - // Accumulate the adjustments. - acc_diff = _mm_subs_epi8(acc_diff, padj); - acc_diff = _mm_adds_epi8(acc_diff, nadj); + // Accumulate the adjustments. + acc_diff = _mm_subs_epi8(acc_diff, padj); + acc_diff = _mm_adds_epi8(acc_diff, nadj); - // Update pointers for next iteration. - sig += sig_stride; - mc_running_avg_y += mc_avg_y_stride; - running_avg_y += avg_y_stride; - } - abs_sum_diff = abs_sum_diff_16x1(acc_diff); - if (abs_sum_diff > sum_diff_thresh) { - return COPY_BLOCK; - } - } else { - return COPY_BLOCK; - } + // Update pointers for next iteration. + sig += sig_stride; + mc_running_avg_y += mc_avg_y_stride; + running_avg_y += avg_y_stride; } + abs_sum_diff = abs_sum_diff_16x1(acc_diff); + if (abs_sum_diff > sum_diff_thresh) { + return COPY_BLOCK; + } + } else { + return COPY_BLOCK; + } } + } - vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride); - return FILTER_BLOCK; + vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride); + return FILTER_BLOCK; } int vp8_denoiser_filter_uv_sse2(unsigned char *mc_running_avg, - int mc_avg_stride, - unsigned char *running_avg, int avg_stride, - unsigned char *sig, int sig_stride, - unsigned int motion_magnitude, - int increase_denoising) { - unsigned char *running_avg_start = running_avg; - unsigned char *sig_start = sig; - unsigned int sum_diff_thresh; - int r; - int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 1 : 0; - __m128i acc_diff = _mm_setzero_si128(); - const __m128i k_0 = _mm_setzero_si128(); - const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); - const __m128i k_8 = _mm_set1_epi8(8); - const __m128i k_16 = _mm_set1_epi8(16); - /* Modify each level's adjustment according to motion_magnitude. */ - const __m128i l3 = _mm_set1_epi8( - (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? - 7 + shift_inc : 6); - /* Difference between level 3 and level 2 is 2. */ - const __m128i l32 = _mm_set1_epi8(2); - /* Difference between level 2 and level 1 is 1. */ - const __m128i l21 = _mm_set1_epi8(1); + int mc_avg_stride, unsigned char *running_avg, + int avg_stride, unsigned char *sig, + int sig_stride, unsigned int motion_magnitude, + int increase_denoising) { + unsigned char *running_avg_start = running_avg; + unsigned char *sig_start = sig; + unsigned int sum_diff_thresh; + int r; + int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) + ? 1 + : 0; + __m128i acc_diff = _mm_setzero_si128(); + const __m128i k_0 = _mm_setzero_si128(); + const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); + const __m128i k_8 = _mm_set1_epi8(8); + const __m128i k_16 = _mm_set1_epi8(16); + /* Modify each level's adjustment according to motion_magnitude. */ + const __m128i l3 = _mm_set1_epi8( + (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD_UV) ? 7 + shift_inc : 6); + /* Difference between level 3 and level 2 is 2. */ + const __m128i l32 = _mm_set1_epi8(2); + /* Difference between level 2 and level 1 is 1. */ + const __m128i l21 = _mm_set1_epi8(1); + { + const __m128i k_1 = _mm_set1_epi16(1); + __m128i vec_sum_block = _mm_setzero_si128(); + + // Avoid denoising color signal if its close to average level. + for (r = 0; r < 8; ++r) { + const __m128i v_sig = _mm_loadl_epi64((__m128i *)(&sig[0])); + const __m128i v_sig_unpack = _mm_unpacklo_epi8(v_sig, k_0); + vec_sum_block = _mm_add_epi16(vec_sum_block, v_sig_unpack); + sig += sig_stride; + } + sig -= sig_stride * 8; { - const __m128i k_1 = _mm_set1_epi16(1); - __m128i vec_sum_block = _mm_setzero_si128(); - - // Avoid denoising color signal if its close to average level. - for (r = 0; r < 8; ++r) { - const __m128i v_sig = _mm_loadl_epi64((__m128i *)(&sig[0])); - const __m128i v_sig_unpack = _mm_unpacklo_epi8(v_sig, k_0); - vec_sum_block = _mm_add_epi16(vec_sum_block, v_sig_unpack); - sig += sig_stride; + const __m128i hg_fe_dc_ba = _mm_madd_epi16(vec_sum_block, k_1); + const __m128i hgfe_dcba = + _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8)); + const __m128i hgfedcba = + _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4)); + const int sum_block = _mm_cvtsi128_si32(hgfedcba); + if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + return COPY_BLOCK; } - sig -= sig_stride * 8; - { - const __m128i hg_fe_dc_ba = _mm_madd_epi16(vec_sum_block, k_1); - const __m128i hgfe_dcba = _mm_add_epi32(hg_fe_dc_ba, - _mm_srli_si128(hg_fe_dc_ba, 8)); - const __m128i hgfedcba = _mm_add_epi32(hgfe_dcba, - _mm_srli_si128(hgfe_dcba, 4)); - const int sum_block = _mm_cvtsi128_si32(hgfedcba); - if (abs(sum_block - (128 * 8 * 8)) < SUM_DIFF_FROM_AVG_THRESH_UV) { + } + } + + for (r = 0; r < 4; ++r) { + /* Calculate differences */ + const __m128i v_sig_low = + _mm_castpd_si128(_mm_load_sd((double *)(&sig[0]))); + const __m128i v_sig = _mm_castpd_si128(_mm_loadh_pd( + _mm_castsi128_pd(v_sig_low), (double *)(&sig[sig_stride]))); + const __m128i v_mc_running_avg_low = + _mm_castpd_si128(_mm_load_sd((double *)(&mc_running_avg[0]))); + const __m128i v_mc_running_avg = _mm_castpd_si128( + _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low), + (double *)(&mc_running_avg[mc_avg_stride]))); + const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig); + const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg); + /* Obtain the sign. FF if diff is negative. */ + const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); + /* Clamp absolute difference to 16 to be used to get mask. Doing this + * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ + const __m128i clamped_absdiff = + _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_16); + /* Get masks for l2 l1 and l0 adjustments */ + const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); + const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); + const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); + /* Get adjustments for l2, l1, and l0 */ + __m128i adj2 = _mm_and_si128(mask2, l32); + const __m128i adj1 = _mm_and_si128(mask1, l21); + const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); + __m128i adj, padj, nadj; + __m128i v_running_avg; + + /* Combine the adjustments and get absolute adjustments. */ + adj2 = _mm_add_epi8(adj2, adj1); + adj = _mm_sub_epi8(l3, adj2); + adj = _mm_andnot_si128(mask0, adj); + adj = _mm_or_si128(adj, adj0); + + /* Restore the sign and get positive and negative adjustments. */ + padj = _mm_andnot_si128(diff_sign, adj); + nadj = _mm_and_si128(diff_sign, adj); + + /* Calculate filtered value. */ + v_running_avg = _mm_adds_epu8(v_sig, padj); + v_running_avg = _mm_subs_epu8(v_running_avg, nadj); + + _mm_storel_pd((double *)&running_avg[0], _mm_castsi128_pd(v_running_avg)); + _mm_storeh_pd((double *)&running_avg[avg_stride], + _mm_castsi128_pd(v_running_avg)); + + /* Adjustments <=7, and each element in acc_diff can fit in signed + * char. + */ + acc_diff = _mm_adds_epi8(acc_diff, padj); + acc_diff = _mm_subs_epi8(acc_diff, nadj); + + /* Update pointers for next iteration. */ + sig += sig_stride * 2; + mc_running_avg += mc_avg_stride * 2; + running_avg += avg_stride * 2; + } + + { + unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff); + sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; + if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; + if (abs_sum_diff > sum_diff_thresh) { + // Before returning to copy the block (i.e., apply no denoising), + // check if we can still apply some (weaker) temporal filtering to + // this block, that would otherwise not be denoised at all. Simplest + // is to apply an additional adjustment to running_avg_y to bring it + // closer to sig. The adjustment is capped by a maximum delta, and + // chosen such that in most cases the resulting sum_diff will be + // within the acceptable range given by sum_diff_thresh. + + // The delta is set by the excess of absolute pixel diff over the + // threshold. + int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1; + // Only apply the adjustment for max delta up to 3. + if (delta < 4) { + const __m128i k_delta = _mm_set1_epi8(delta); + sig -= sig_stride * 8; + mc_running_avg -= mc_avg_stride * 8; + running_avg -= avg_stride * 8; + for (r = 0; r < 4; ++r) { + // Calculate differences. + const __m128i v_sig_low = + _mm_castpd_si128(_mm_load_sd((double *)(&sig[0]))); + const __m128i v_sig = _mm_castpd_si128(_mm_loadh_pd( + _mm_castsi128_pd(v_sig_low), (double *)(&sig[sig_stride]))); + const __m128i v_mc_running_avg_low = + _mm_castpd_si128(_mm_load_sd((double *)(&mc_running_avg[0]))); + const __m128i v_mc_running_avg = _mm_castpd_si128( + _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low), + (double *)(&mc_running_avg[mc_avg_stride]))); + const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig); + const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg); + // Obtain the sign. FF if diff is negative. + const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); + // Clamp absolute difference to delta to get the adjustment. + const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); + // Restore the sign and get positive and negative adjustments. + __m128i padj, nadj; + const __m128i v_running_avg_low = + _mm_castpd_si128(_mm_load_sd((double *)(&running_avg[0]))); + __m128i v_running_avg = _mm_castpd_si128( + _mm_loadh_pd(_mm_castsi128_pd(v_running_avg_low), + (double *)(&running_avg[avg_stride]))); + padj = _mm_andnot_si128(diff_sign, adj); + nadj = _mm_and_si128(diff_sign, adj); + // Calculate filtered value. + v_running_avg = _mm_subs_epu8(v_running_avg, padj); + v_running_avg = _mm_adds_epu8(v_running_avg, nadj); + + _mm_storel_pd((double *)&running_avg[0], + _mm_castsi128_pd(v_running_avg)); + _mm_storeh_pd((double *)&running_avg[avg_stride], + _mm_castsi128_pd(v_running_avg)); + + // Accumulate the adjustments. + acc_diff = _mm_subs_epi8(acc_diff, padj); + acc_diff = _mm_adds_epi8(acc_diff, nadj); + + // Update pointers for next iteration. + sig += sig_stride * 2; + mc_running_avg += mc_avg_stride * 2; + running_avg += avg_stride * 2; + } + abs_sum_diff = abs_sum_diff_16x1(acc_diff); + if (abs_sum_diff > sum_diff_thresh) { return COPY_BLOCK; } + } else { + return COPY_BLOCK; } } + } - for (r = 0; r < 4; ++r) { - /* Calculate differences */ - const __m128i v_sig_low = _mm_castpd_si128( - _mm_load_sd((double *)(&sig[0]))); - const __m128i v_sig = _mm_castpd_si128( - _mm_loadh_pd(_mm_castsi128_pd(v_sig_low), - (double *)(&sig[sig_stride]))); - const __m128i v_mc_running_avg_low = _mm_castpd_si128( - _mm_load_sd((double *)(&mc_running_avg[0]))); - const __m128i v_mc_running_avg = _mm_castpd_si128( - _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low), - (double *)(&mc_running_avg[mc_avg_stride]))); - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg); - /* Obtain the sign. FF if diff is negative. */ - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); - /* Clamp absolute difference to 16 to be used to get mask. Doing this - * allows us to use _mm_cmpgt_epi8, which operates on signed byte. */ - const __m128i clamped_absdiff = _mm_min_epu8( - _mm_or_si128(pdiff, ndiff), k_16); - /* Get masks for l2 l1 and l0 adjustments */ - const __m128i mask2 = _mm_cmpgt_epi8(k_16, clamped_absdiff); - const __m128i mask1 = _mm_cmpgt_epi8(k_8, clamped_absdiff); - const __m128i mask0 = _mm_cmpgt_epi8(k_4, clamped_absdiff); - /* Get adjustments for l2, l1, and l0 */ - __m128i adj2 = _mm_and_si128(mask2, l32); - const __m128i adj1 = _mm_and_si128(mask1, l21); - const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); - __m128i adj, padj, nadj; - __m128i v_running_avg; - - /* Combine the adjustments and get absolute adjustments. */ - adj2 = _mm_add_epi8(adj2, adj1); - adj = _mm_sub_epi8(l3, adj2); - adj = _mm_andnot_si128(mask0, adj); - adj = _mm_or_si128(adj, adj0); - - /* Restore the sign and get positive and negative adjustments. */ - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); - - /* Calculate filtered value. */ - v_running_avg = _mm_adds_epu8(v_sig, padj); - v_running_avg = _mm_subs_epu8(v_running_avg, nadj); - - _mm_storel_pd((double *)&running_avg[0], - _mm_castsi128_pd(v_running_avg)); - _mm_storeh_pd((double *)&running_avg[avg_stride], - _mm_castsi128_pd(v_running_avg)); - - /* Adjustments <=7, and each element in acc_diff can fit in signed - * char. - */ - acc_diff = _mm_adds_epi8(acc_diff, padj); - acc_diff = _mm_subs_epi8(acc_diff, nadj); - - /* Update pointers for next iteration. */ - sig += sig_stride * 2; - mc_running_avg += mc_avg_stride * 2; - running_avg += avg_stride * 2; - } - - { - unsigned int abs_sum_diff = abs_sum_diff_16x1(acc_diff); - sum_diff_thresh = SUM_DIFF_THRESHOLD_UV; - if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH_UV; - if (abs_sum_diff > sum_diff_thresh) { - // Before returning to copy the block (i.e., apply no denoising), - // check if we can still apply some (weaker) temporal filtering to - // this block, that would otherwise not be denoised at all. Simplest - // is to apply an additional adjustment to running_avg_y to bring it - // closer to sig. The adjustment is capped by a maximum delta, and - // chosen such that in most cases the resulting sum_diff will be - // within the acceptable range given by sum_diff_thresh. - - // The delta is set by the excess of absolute pixel diff over the - // threshold. - int delta = ((abs_sum_diff - sum_diff_thresh) >> 8) + 1; - // Only apply the adjustment for max delta up to 3. - if (delta < 4) { - const __m128i k_delta = _mm_set1_epi8(delta); - sig -= sig_stride * 8; - mc_running_avg -= mc_avg_stride * 8; - running_avg -= avg_stride * 8; - for (r = 0; r < 4; ++r) { - // Calculate differences. - const __m128i v_sig_low = _mm_castpd_si128( - _mm_load_sd((double *)(&sig[0]))); - const __m128i v_sig = _mm_castpd_si128( - _mm_loadh_pd(_mm_castsi128_pd(v_sig_low), - (double *)(&sig[sig_stride]))); - const __m128i v_mc_running_avg_low = _mm_castpd_si128( - _mm_load_sd((double *)(&mc_running_avg[0]))); - const __m128i v_mc_running_avg = _mm_castpd_si128( - _mm_loadh_pd(_mm_castsi128_pd(v_mc_running_avg_low), - (double *)(&mc_running_avg[mc_avg_stride]))); - const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg, v_sig); - const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg); - // Obtain the sign. FF if diff is negative. - const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); - // Clamp absolute difference to delta to get the adjustment. - const __m128i adj = - _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); - // Restore the sign and get positive and negative adjustments. - __m128i padj, nadj; - const __m128i v_running_avg_low = _mm_castpd_si128( - _mm_load_sd((double *)(&running_avg[0]))); - __m128i v_running_avg = _mm_castpd_si128( - _mm_loadh_pd(_mm_castsi128_pd(v_running_avg_low), - (double *)(&running_avg[avg_stride]))); - padj = _mm_andnot_si128(diff_sign, adj); - nadj = _mm_and_si128(diff_sign, adj); - // Calculate filtered value. - v_running_avg = _mm_subs_epu8(v_running_avg, padj); - v_running_avg = _mm_adds_epu8(v_running_avg, nadj); - - _mm_storel_pd((double *)&running_avg[0], - _mm_castsi128_pd(v_running_avg)); - _mm_storeh_pd((double *)&running_avg[avg_stride], - _mm_castsi128_pd(v_running_avg)); - - // Accumulate the adjustments. - acc_diff = _mm_subs_epi8(acc_diff, padj); - acc_diff = _mm_adds_epi8(acc_diff, nadj); - - // Update pointers for next iteration. - sig += sig_stride * 2; - mc_running_avg += mc_avg_stride * 2; - running_avg += avg_stride * 2; - } - abs_sum_diff = abs_sum_diff_16x1(acc_diff); - if (abs_sum_diff > sum_diff_thresh) { - return COPY_BLOCK; - } - } else { - return COPY_BLOCK; - } - } - } - - vp8_copy_mem8x8(running_avg_start, avg_stride, sig_start, sig_stride); - return FILTER_BLOCK; + vp8_copy_mem8x8(running_avg_start, avg_stride, sig_start, sig_stride); + return FILTER_BLOCK; } diff --git a/libs/libvpx/vp8/encoder/x86/quantize_sse4.c b/libs/libvpx/vp8/encoder/x86/quantize_sse4.c index 601dd23a2f..6f2c163492 100644 --- a/libs/libvpx/vp8/encoder/x86/quantize_sse4.c +++ b/libs/libvpx/vp8/encoder/x86/quantize_sse4.c @@ -8,121 +8,119 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include /* SSE4.1 */ #include "./vp8_rtcd.h" #include "vp8/encoder/block.h" #include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */ -#define SELECT_EOB(i, z, x, y, q) \ - do { \ - short boost = *zbin_boost_ptr; \ - short x_z = _mm_extract_epi16(x, z); \ - short y_z = _mm_extract_epi16(y, z); \ - int cmp = (x_z < boost) | (y_z == 0); \ - zbin_boost_ptr++; \ - if (cmp) \ - break; \ - q = _mm_insert_epi16(q, y_z, z); \ - eob = i; \ - zbin_boost_ptr = b->zrun_zbin_boost; \ - } while (0) +#define SELECT_EOB(i, z, x, y, q) \ + do { \ + short boost = *zbin_boost_ptr; \ + short x_z = _mm_extract_epi16(x, z); \ + short y_z = _mm_extract_epi16(y, z); \ + int cmp = (x_z < boost) | (y_z == 0); \ + zbin_boost_ptr++; \ + if (cmp) break; \ + q = _mm_insert_epi16(q, y_z, z); \ + eob = i; \ + zbin_boost_ptr = b->zrun_zbin_boost; \ + } while (0) void vp8_regular_quantize_b_sse4_1(BLOCK *b, BLOCKD *d) { - char eob = 0; - short *zbin_boost_ptr = b->zrun_zbin_boost; + char eob = 0; + short *zbin_boost_ptr = b->zrun_zbin_boost; - __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1, - dqcoeff0, dqcoeff1; - __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); - __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); - __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); - __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8)); - __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra); - __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); - __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); - __m128i round0 = _mm_load_si128((__m128i *)(b->round)); - __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); - __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); - __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); - __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); - __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); - __m128i qcoeff0 = _mm_setzero_si128(); - __m128i qcoeff1 = _mm_setzero_si128(); + __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1, dqcoeff0, + dqcoeff1; + __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); + __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); + __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); + __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8)); + __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra); + __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); + __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); + __m128i round0 = _mm_load_si128((__m128i *)(b->round)); + __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); + __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); + __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); + __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); + __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); + __m128i qcoeff0 = _mm_setzero_si128(); + __m128i qcoeff1 = _mm_setzero_si128(); - /* Duplicate to all lanes. */ - zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0); - zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra); + /* Duplicate to all lanes. */ + zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0); + zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra); - /* Sign of z: z >> 15 */ - sz0 = _mm_srai_epi16(z0, 15); - sz1 = _mm_srai_epi16(z1, 15); + /* Sign of z: z >> 15 */ + sz0 = _mm_srai_epi16(z0, 15); + sz1 = _mm_srai_epi16(z1, 15); - /* x = abs(z): (z ^ sz) - sz */ - x0 = _mm_xor_si128(z0, sz0); - x1 = _mm_xor_si128(z1, sz1); - x0 = _mm_sub_epi16(x0, sz0); - x1 = _mm_sub_epi16(x1, sz1); + /* x = abs(z): (z ^ sz) - sz */ + x0 = _mm_xor_si128(z0, sz0); + x1 = _mm_xor_si128(z1, sz1); + x0 = _mm_sub_epi16(x0, sz0); + x1 = _mm_sub_epi16(x1, sz1); - /* zbin[] + zbin_extra */ - zbin0 = _mm_add_epi16(zbin0, zbin_extra); - zbin1 = _mm_add_epi16(zbin1, zbin_extra); + /* zbin[] + zbin_extra */ + zbin0 = _mm_add_epi16(zbin0, zbin_extra); + zbin1 = _mm_add_epi16(zbin1, zbin_extra); - /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance - * the equation because boost is the only value which can change: - * x - (zbin[] + extra) >= boost */ - x_minus_zbin0 = _mm_sub_epi16(x0, zbin0); - x_minus_zbin1 = _mm_sub_epi16(x1, zbin1); + /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance + * the equation because boost is the only value which can change: + * x - (zbin[] + extra) >= boost */ + x_minus_zbin0 = _mm_sub_epi16(x0, zbin0); + x_minus_zbin1 = _mm_sub_epi16(x1, zbin1); - /* All the remaining calculations are valid whether they are done now with - * simd or later inside the loop one at a time. */ - x0 = _mm_add_epi16(x0, round0); - x1 = _mm_add_epi16(x1, round1); + /* All the remaining calculations are valid whether they are done now with + * simd or later inside the loop one at a time. */ + x0 = _mm_add_epi16(x0, round0); + x1 = _mm_add_epi16(x1, round1); - y0 = _mm_mulhi_epi16(x0, quant0); - y1 = _mm_mulhi_epi16(x1, quant1); + y0 = _mm_mulhi_epi16(x0, quant0); + y1 = _mm_mulhi_epi16(x1, quant1); - y0 = _mm_add_epi16(y0, x0); - y1 = _mm_add_epi16(y1, x1); + y0 = _mm_add_epi16(y0, x0); + y1 = _mm_add_epi16(y1, x1); - /* Instead of shifting each value independently we convert the scaling - * factor with 1 << (16 - shift) so we can use multiply/return high half. */ - y0 = _mm_mulhi_epi16(y0, quant_shift0); - y1 = _mm_mulhi_epi16(y1, quant_shift1); + /* Instead of shifting each value independently we convert the scaling + * factor with 1 << (16 - shift) so we can use multiply/return high half. */ + y0 = _mm_mulhi_epi16(y0, quant_shift0); + y1 = _mm_mulhi_epi16(y1, quant_shift1); - /* Return the sign: (y ^ sz) - sz */ - y0 = _mm_xor_si128(y0, sz0); - y1 = _mm_xor_si128(y1, sz1); - y0 = _mm_sub_epi16(y0, sz0); - y1 = _mm_sub_epi16(y1, sz1); + /* Return the sign: (y ^ sz) - sz */ + y0 = _mm_xor_si128(y0, sz0); + y1 = _mm_xor_si128(y1, sz1); + y0 = _mm_sub_epi16(y0, sz0); + y1 = _mm_sub_epi16(y1, sz1); - /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */ - SELECT_EOB(1, 0, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(2, 1, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(3, 4, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(4, 0, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(5, 5, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(6, 2, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(7, 3, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(8, 6, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(9, 1, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(10, 4, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(11, 5, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(12, 2, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(13, 7, x_minus_zbin0, y0, qcoeff0); - SELECT_EOB(14, 3, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(15, 6, x_minus_zbin1, y1, qcoeff1); - SELECT_EOB(16, 7, x_minus_zbin1, y1, qcoeff1); + /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */ + SELECT_EOB(1, 0, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(2, 1, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(3, 4, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(4, 0, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(5, 5, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(6, 2, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(7, 3, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(8, 6, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(9, 1, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(10, 4, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(11, 5, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(12, 2, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(13, 7, x_minus_zbin0, y0, qcoeff0); + SELECT_EOB(14, 3, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(15, 6, x_minus_zbin1, y1, qcoeff1); + SELECT_EOB(16, 7, x_minus_zbin1, y1, qcoeff1); - _mm_store_si128((__m128i *)(d->qcoeff), qcoeff0); - _mm_store_si128((__m128i *)(d->qcoeff + 8), qcoeff1); + _mm_store_si128((__m128i *)(d->qcoeff), qcoeff0); + _mm_store_si128((__m128i *)(d->qcoeff + 8), qcoeff1); - dqcoeff0 = _mm_mullo_epi16(qcoeff0, dequant0); - dqcoeff1 = _mm_mullo_epi16(qcoeff1, dequant1); + dqcoeff0 = _mm_mullo_epi16(qcoeff0, dequant0); + dqcoeff1 = _mm_mullo_epi16(qcoeff1, dequant1); - _mm_store_si128((__m128i *)(d->dqcoeff), dqcoeff0); - _mm_store_si128((__m128i *)(d->dqcoeff + 8), dqcoeff1); + _mm_store_si128((__m128i *)(d->dqcoeff), dqcoeff0); + _mm_store_si128((__m128i *)(d->dqcoeff + 8), dqcoeff1); - *d->eob = eob; + *d->eob = eob; } diff --git a/libs/libvpx/vp8/encoder/x86/quantize_ssse3.c b/libs/libvpx/vp8/encoder/x86/quantize_ssse3.c index 14282db801..322f0a151f 100644 --- a/libs/libvpx/vp8/encoder/x86/quantize_ssse3.c +++ b/libs/libvpx/vp8/encoder/x86/quantize_ssse3.c @@ -20,21 +20,19 @@ static int bsr(int mask) { unsigned long eob; _BitScanReverse(&eob, mask); eob++; - if (mask == 0) - eob = 0; + if (mask == 0) eob = 0; return eob; } #else static int bsr(int mask) { int eob; #if defined(__GNUC__) && __GNUC__ - __asm__ __volatile__("bsr %1, %0" : "=r" (eob) : "r" (mask) : "flags"); + __asm__ __volatile__("bsr %1, %0" : "=r"(eob) : "r"(mask) : "flags"); #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) - asm volatile("bsr %1, %0" : "=r" (eob) : "r" (mask) : "flags"); + asm volatile("bsr %1, %0" : "=r"(eob) : "r"(mask) : "flags"); #endif eob++; - if (mask == 0) - eob = 0; + if (mask == 0) eob = 0; return eob; } #endif @@ -53,8 +51,9 @@ void vp8_fast_quantize_b_ssse3(BLOCK *b, BLOCKD *d) { __m128i sz0, sz1, x, x0, x1, y0, y1, zeros, abs0, abs1; - DECLARE_ALIGNED(16, const uint8_t, pshufb_zig_zag_mask[16]) = - { 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 }; + DECLARE_ALIGNED(16, const uint8_t, pshufb_zig_zag_mask[16]) = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 + }; __m128i zig_zag = _mm_load_si128((const __m128i *)pshufb_zig_zag_mask); /* sign of z: z >> 15 */ diff --git a/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c b/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c index 7bf5155c95..d00abd4d08 100644 --- a/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c +++ b/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_mmx.c @@ -8,60 +8,47 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vpx_ports/x86.h" #include "vp8/encoder/block.h" void vp8_short_fdct4x4_mmx(short *input, short *output, int pitch); -void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch) -{ - vp8_short_fdct4x4_mmx(input, output, pitch); - vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch); +void vp8_short_fdct8x4_mmx(short *input, short *output, int pitch) { + vp8_short_fdct4x4_mmx(input, output, pitch); + vp8_short_fdct4x4_mmx(input + 4, output + 16, pitch); } int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr, short *qcoeff_ptr, short *dequant_ptr, const short *scan_mask, short *round_ptr, short *quant_ptr, short *dqcoeff_ptr); -void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d) -{ - const short *scan_mask = vp8_default_zig_zag_mask; - short *coeff_ptr = b->coeff; - short *zbin_ptr = b->zbin; - short *round_ptr = b->round; - short *quant_ptr = b->quant_fast; - short *qcoeff_ptr = d->qcoeff; - short *dqcoeff_ptr = d->dqcoeff; - short *dequant_ptr = d->dequant; +void vp8_fast_quantize_b_mmx(BLOCK *b, BLOCKD *d) { + const short *scan_mask = vp8_default_zig_zag_mask; + short *coeff_ptr = b->coeff; + short *zbin_ptr = b->zbin; + short *round_ptr = b->round; + short *quant_ptr = b->quant_fast; + short *qcoeff_ptr = d->qcoeff; + short *dqcoeff_ptr = d->dqcoeff; + short *dequant_ptr = d->dequant; - *d->eob = (char)vp8_fast_quantize_b_impl_mmx( - coeff_ptr, - zbin_ptr, - qcoeff_ptr, - dequant_ptr, - scan_mask, + *d->eob = (char)vp8_fast_quantize_b_impl_mmx( + coeff_ptr, zbin_ptr, qcoeff_ptr, dequant_ptr, scan_mask, - round_ptr, - quant_ptr, - dqcoeff_ptr - ); + round_ptr, quant_ptr, dqcoeff_ptr); } int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc); -int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc) -{ - short *coeff_ptr = mb->block[0].coeff; - short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff; - return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc); +int vp8_mbblock_error_mmx(MACROBLOCK *mb, int dc) { + short *coeff_ptr = mb->block[0].coeff; + short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff; + return vp8_mbblock_error_mmx_impl(coeff_ptr, dcoef_ptr, dc); } int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr); -int vp8_mbuverror_mmx(MACROBLOCK *mb) -{ - short *s_ptr = &mb->coeff[256]; - short *d_ptr = &mb->e_mbd.dqcoeff[256]; - return vp8_mbuverror_mmx_impl(s_ptr, d_ptr); +int vp8_mbuverror_mmx(MACROBLOCK *mb) { + short *s_ptr = &mb->coeff[256]; + short *d_ptr = &mb->e_mbd.dqcoeff[256]; + return vp8_mbuverror_mmx_impl(s_ptr, d_ptr); } - diff --git a/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c b/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c index be9aaf3c96..29eb1f1b45 100644 --- a/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c +++ b/libs/libvpx/vp8/encoder/x86/vp8_enc_stubs_sse2.c @@ -8,25 +8,21 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vpx_ports/x86.h" #include "vp8/encoder/block.h" int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc); -int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc) -{ - short *coeff_ptr = mb->block[0].coeff; - short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff; - return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc); +int vp8_mbblock_error_xmm(MACROBLOCK *mb, int dc) { + short *coeff_ptr = mb->block[0].coeff; + short *dcoef_ptr = mb->e_mbd.block[0].dqcoeff; + return vp8_mbblock_error_xmm_impl(coeff_ptr, dcoef_ptr, dc); } int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr); -int vp8_mbuverror_xmm(MACROBLOCK *mb) -{ - short *s_ptr = &mb->coeff[256]; - short *d_ptr = &mb->e_mbd.dqcoeff[256]; - return vp8_mbuverror_xmm_impl(s_ptr, d_ptr); +int vp8_mbuverror_xmm(MACROBLOCK *mb) { + short *s_ptr = &mb->coeff[256]; + short *d_ptr = &mb->e_mbd.dqcoeff[256]; + return vp8_mbuverror_xmm_impl(s_ptr, d_ptr); } - diff --git a/libs/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c b/libs/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c index b4e92e04b2..581d2565ee 100644 --- a/libs/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c +++ b/libs/libvpx/vp8/encoder/x86/vp8_quantize_sse2.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_config.h" #include "vp8_rtcd.h" #include "vpx_ports/x86.h" @@ -16,134 +15,131 @@ #include "vp8/encoder/block.h" #include "vp8/common/entropy.h" /* vp8_default_inv_zig_zag */ -#include /* MMX */ +#include /* MMX */ #include /* SSE */ #include /* SSE2 */ -#define SELECT_EOB(i, z) \ - do { \ - short boost = *zbin_boost_ptr; \ - int cmp = (x[z] < boost) | (y[z] == 0); \ - zbin_boost_ptr++; \ - if (cmp) \ - break; \ - qcoeff_ptr[z] = y[z]; \ - eob = i; \ - zbin_boost_ptr = b->zrun_zbin_boost; \ - } while (0) +#define SELECT_EOB(i, z) \ + do { \ + short boost = *zbin_boost_ptr; \ + int cmp = (x[z] < boost) | (y[z] == 0); \ + zbin_boost_ptr++; \ + if (cmp) break; \ + qcoeff_ptr[z] = y[z]; \ + eob = i; \ + zbin_boost_ptr = b->zrun_zbin_boost; \ + } while (0) -void vp8_regular_quantize_b_sse2(BLOCK *b, BLOCKD *d) -{ - char eob = 0; - short *zbin_boost_ptr; - short *qcoeff_ptr = d->qcoeff; - DECLARE_ALIGNED(16, short, x[16]); - DECLARE_ALIGNED(16, short, y[16]); +void vp8_regular_quantize_b_sse2(BLOCK *b, BLOCKD *d) { + char eob = 0; + short *zbin_boost_ptr; + short *qcoeff_ptr = d->qcoeff; + DECLARE_ALIGNED(16, short, x[16]); + DECLARE_ALIGNED(16, short, y[16]); - __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1; - __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); - __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); - __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); - __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8)); - __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra); - __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); - __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); - __m128i round0 = _mm_load_si128((__m128i *)(b->round)); - __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); - __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); - __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); - __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); - __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); + __m128i sz0, x0, sz1, x1, y0, y1, x_minus_zbin0, x_minus_zbin1; + __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); + __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); + __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); + __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8)); + __m128i zbin_extra = _mm_cvtsi32_si128(b->zbin_extra); + __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); + __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); + __m128i round0 = _mm_load_si128((__m128i *)(b->round)); + __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); + __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); + __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); + __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); + __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); - memset(qcoeff_ptr, 0, 32); + memset(qcoeff_ptr, 0, 32); - /* Duplicate to all lanes. */ - zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0); - zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra); + /* Duplicate to all lanes. */ + zbin_extra = _mm_shufflelo_epi16(zbin_extra, 0); + zbin_extra = _mm_unpacklo_epi16(zbin_extra, zbin_extra); - /* Sign of z: z >> 15 */ - sz0 = _mm_srai_epi16(z0, 15); - sz1 = _mm_srai_epi16(z1, 15); + /* Sign of z: z >> 15 */ + sz0 = _mm_srai_epi16(z0, 15); + sz1 = _mm_srai_epi16(z1, 15); - /* x = abs(z): (z ^ sz) - sz */ - x0 = _mm_xor_si128(z0, sz0); - x1 = _mm_xor_si128(z1, sz1); - x0 = _mm_sub_epi16(x0, sz0); - x1 = _mm_sub_epi16(x1, sz1); + /* x = abs(z): (z ^ sz) - sz */ + x0 = _mm_xor_si128(z0, sz0); + x1 = _mm_xor_si128(z1, sz1); + x0 = _mm_sub_epi16(x0, sz0); + x1 = _mm_sub_epi16(x1, sz1); - /* zbin[] + zbin_extra */ - zbin0 = _mm_add_epi16(zbin0, zbin_extra); - zbin1 = _mm_add_epi16(zbin1, zbin_extra); + /* zbin[] + zbin_extra */ + zbin0 = _mm_add_epi16(zbin0, zbin_extra); + zbin1 = _mm_add_epi16(zbin1, zbin_extra); - /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance - * the equation because boost is the only value which can change: - * x - (zbin[] + extra) >= boost */ - x_minus_zbin0 = _mm_sub_epi16(x0, zbin0); - x_minus_zbin1 = _mm_sub_epi16(x1, zbin1); + /* In C x is compared to zbin where zbin = zbin[] + boost + extra. Rebalance + * the equation because boost is the only value which can change: + * x - (zbin[] + extra) >= boost */ + x_minus_zbin0 = _mm_sub_epi16(x0, zbin0); + x_minus_zbin1 = _mm_sub_epi16(x1, zbin1); - _mm_store_si128((__m128i *)(x), x_minus_zbin0); - _mm_store_si128((__m128i *)(x + 8), x_minus_zbin1); + _mm_store_si128((__m128i *)(x), x_minus_zbin0); + _mm_store_si128((__m128i *)(x + 8), x_minus_zbin1); - /* All the remaining calculations are valid whether they are done now with - * simd or later inside the loop one at a time. */ - x0 = _mm_add_epi16(x0, round0); - x1 = _mm_add_epi16(x1, round1); + /* All the remaining calculations are valid whether they are done now with + * simd or later inside the loop one at a time. */ + x0 = _mm_add_epi16(x0, round0); + x1 = _mm_add_epi16(x1, round1); - y0 = _mm_mulhi_epi16(x0, quant0); - y1 = _mm_mulhi_epi16(x1, quant1); + y0 = _mm_mulhi_epi16(x0, quant0); + y1 = _mm_mulhi_epi16(x1, quant1); - y0 = _mm_add_epi16(y0, x0); - y1 = _mm_add_epi16(y1, x1); + y0 = _mm_add_epi16(y0, x0); + y1 = _mm_add_epi16(y1, x1); - /* Instead of shifting each value independently we convert the scaling - * factor with 1 << (16 - shift) so we can use multiply/return high half. */ - y0 = _mm_mulhi_epi16(y0, quant_shift0); - y1 = _mm_mulhi_epi16(y1, quant_shift1); + /* Instead of shifting each value independently we convert the scaling + * factor with 1 << (16 - shift) so we can use multiply/return high half. */ + y0 = _mm_mulhi_epi16(y0, quant_shift0); + y1 = _mm_mulhi_epi16(y1, quant_shift1); - /* Return the sign: (y ^ sz) - sz */ - y0 = _mm_xor_si128(y0, sz0); - y1 = _mm_xor_si128(y1, sz1); - y0 = _mm_sub_epi16(y0, sz0); - y1 = _mm_sub_epi16(y1, sz1); + /* Return the sign: (y ^ sz) - sz */ + y0 = _mm_xor_si128(y0, sz0); + y1 = _mm_xor_si128(y1, sz1); + y0 = _mm_sub_epi16(y0, sz0); + y1 = _mm_sub_epi16(y1, sz1); - _mm_store_si128((__m128i *)(y), y0); - _mm_store_si128((__m128i *)(y + 8), y1); + _mm_store_si128((__m128i *)(y), y0); + _mm_store_si128((__m128i *)(y + 8), y1); - zbin_boost_ptr = b->zrun_zbin_boost; + zbin_boost_ptr = b->zrun_zbin_boost; - /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */ - SELECT_EOB(1, 0); - SELECT_EOB(2, 1); - SELECT_EOB(3, 4); - SELECT_EOB(4, 8); - SELECT_EOB(5, 5); - SELECT_EOB(6, 2); - SELECT_EOB(7, 3); - SELECT_EOB(8, 6); - SELECT_EOB(9, 9); - SELECT_EOB(10, 12); - SELECT_EOB(11, 13); - SELECT_EOB(12, 10); - SELECT_EOB(13, 7); - SELECT_EOB(14, 11); - SELECT_EOB(15, 14); - SELECT_EOB(16, 15); + /* The loop gets unrolled anyway. Avoid the vp8_default_zig_zag1d lookup. */ + SELECT_EOB(1, 0); + SELECT_EOB(2, 1); + SELECT_EOB(3, 4); + SELECT_EOB(4, 8); + SELECT_EOB(5, 5); + SELECT_EOB(6, 2); + SELECT_EOB(7, 3); + SELECT_EOB(8, 6); + SELECT_EOB(9, 9); + SELECT_EOB(10, 12); + SELECT_EOB(11, 13); + SELECT_EOB(12, 10); + SELECT_EOB(13, 7); + SELECT_EOB(14, 11); + SELECT_EOB(15, 14); + SELECT_EOB(16, 15); - y0 = _mm_load_si128((__m128i *)(d->qcoeff)); - y1 = _mm_load_si128((__m128i *)(d->qcoeff + 8)); + y0 = _mm_load_si128((__m128i *)(d->qcoeff)); + y1 = _mm_load_si128((__m128i *)(d->qcoeff + 8)); - /* dqcoeff = qcoeff * dequant */ - y0 = _mm_mullo_epi16(y0, dequant0); - y1 = _mm_mullo_epi16(y1, dequant1); + /* dqcoeff = qcoeff * dequant */ + y0 = _mm_mullo_epi16(y0, dequant0); + y1 = _mm_mullo_epi16(y1, dequant1); - _mm_store_si128((__m128i *)(d->dqcoeff), y0); - _mm_store_si128((__m128i *)(d->dqcoeff + 8), y1); + _mm_store_si128((__m128i *)(d->dqcoeff), y0); + _mm_store_si128((__m128i *)(d->dqcoeff + 8), y1); - *d->eob = eob; + *d->eob = eob; } -void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d) -{ +void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d) { __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8)); __m128i round0 = _mm_load_si128((__m128i *)(b->round)); @@ -152,8 +148,10 @@ void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d) __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8)); __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); - __m128i inv_zig_zag0 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag)); - __m128i inv_zig_zag1 = _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag + 8)); + __m128i inv_zig_zag0 = + _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag)); + __m128i inv_zig_zag1 = + _mm_load_si128((const __m128i *)(vp8_default_inv_zig_zag + 8)); __m128i sz0, sz1, x0, x1, y0, y1, xdq0, xdq1, zeros, ones; @@ -210,17 +208,17 @@ void vp8_fast_quantize_b_sse2(BLOCK *b, BLOCKD *d) x0 = _mm_max_epi16(x0, x1); /* now down to 8 */ - x1 = _mm_shuffle_epi32(x0, 0xE); // 0b00001110 + x1 = _mm_shuffle_epi32(x0, 0xE); // 0b00001110 x0 = _mm_max_epi16(x0, x1); /* only 4 left */ - x1 = _mm_shufflelo_epi16(x0, 0xE); // 0b00001110 + x1 = _mm_shufflelo_epi16(x0, 0xE); // 0b00001110 x0 = _mm_max_epi16(x0, x1); /* okay, just 2! */ - x1 = _mm_shufflelo_epi16(x0, 0x1); // 0b00000001 + x1 = _mm_shufflelo_epi16(x0, 0x1); // 0b00000001 x0 = _mm_max_epi16(x0, x1); diff --git a/libs/libvpx/vp8/vp8_common.mk b/libs/libvpx/vp8/vp8_common.mk index 4c4e856272..886b127d6a 100644 --- a/libs/libvpx/vp8/vp8_common.mk +++ b/libs/libvpx/vp8/vp8_common.mk @@ -16,7 +16,7 @@ VP8_COMMON_SRCS-yes += common/alloccommon.c VP8_COMMON_SRCS-yes += common/blockd.c VP8_COMMON_SRCS-yes += common/coefupdateprobs.h VP8_COMMON_SRCS-yes += common/copy_c.c -VP8_COMMON_SRCS-yes += common/debugmodes.c +# VP8_COMMON_SRCS-yes += common/debugmodes.c VP8_COMMON_SRCS-yes += common/default_coef_probs.h VP8_COMMON_SRCS-yes += common/dequantize.c VP8_COMMON_SRCS-yes += common/entropy.c @@ -68,7 +68,6 @@ VP8_COMMON_SRCS-yes += common/vp8_entropymodedata.h -VP8_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/textblit.c VP8_COMMON_SRCS-yes += common/treecoder.c VP8_COMMON_SRCS-$(ARCH_X86)$(ARCH_X86_64) += common/x86/filter_x86.c @@ -96,9 +95,7 @@ VP8_COMMON_SRCS-$(HAVE_SSE3) += common/x86/copy_sse3.asm VP8_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/subpixel_ssse3.asm ifeq ($(CONFIG_POSTPROC),yes) -VP8_COMMON_SRCS-$(HAVE_MMX) += common/x86/postproc_mmx.asm VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/mfqe_sse2.asm -VP8_COMMON_SRCS-$(HAVE_SSE2) += common/x86/postproc_sse2.asm endif ifeq ($(ARCH_X86_64),yes) @@ -123,33 +120,10 @@ VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/vp8_macros_msa.h ifeq ($(CONFIG_POSTPROC),yes) VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/mfqe_msa.c -VP8_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/postproc_msa.c endif -# common (c) -VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/filter_arm.c -VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/loopfilter_arm.c -VP8_COMMON_SRCS-$(ARCH_ARM) += common/arm/dequantize_arm.c - -# common (media) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/bilinearfilter_arm.c -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/bilinearfilter_arm.h -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/bilinearfilter_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/copymem8x4_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/copymem8x8_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/copymem16x16_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/dc_only_idct_add_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/iwalsh_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/filter_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/idct_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/loopfilter_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/simpleloopfilter_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/sixtappredict8x4_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/dequant_idct_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/dequantize_v6$(ASM) -VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/idct_blk_v6.c - # common (neon intrinsics) +VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/loopfilter_arm.c VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/bilinearpredict_neon.c VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/copymem_neon.c VP8_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/dc_only_idct_add_neon.c diff --git a/libs/libvpx/vp8/vp8_cx_iface.c b/libs/libvpx/vp8/vp8_cx_iface.c index 257d2a0c4e..fac237eec0 100644 --- a/libs/libvpx/vp8/vp8_cx_iface.c +++ b/libs/libvpx/vp8/vp8_cx_iface.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "./vpx_config.h" #include "./vp8_rtcd.h" #include "./vpx_dsp_rtcd.h" @@ -22,484 +21,454 @@ #include "vpx/vp8cx.h" #include "vp8/encoder/firstpass.h" #include "vp8/common/onyx.h" +#include "vp8/common/common.h" #include #include -struct vp8_extracfg -{ - struct vpx_codec_pkt_list *pkt_list; - int cpu_used; /** available cpu percentage in 1/16*/ - unsigned int enable_auto_alt_ref; /** if encoder decides to uses alternate reference frame */ - unsigned int noise_sensitivity; - unsigned int Sharpness; - unsigned int static_thresh; - unsigned int token_partitions; - unsigned int arnr_max_frames; /* alt_ref Noise Reduction Max Frame Count */ - unsigned int arnr_strength; /* alt_ref Noise Reduction Strength */ - unsigned int arnr_type; /* alt_ref filter type */ - vp8e_tuning tuning; - unsigned int cq_level; /* constrained quality level */ - unsigned int rc_max_intra_bitrate_pct; - unsigned int screen_content_mode; - +struct vp8_extracfg { + struct vpx_codec_pkt_list *pkt_list; + int cpu_used; /** available cpu percentage in 1/16*/ + /** if encoder decides to uses alternate reference frame */ + unsigned int enable_auto_alt_ref; + unsigned int noise_sensitivity; + unsigned int Sharpness; + unsigned int static_thresh; + unsigned int token_partitions; + unsigned int arnr_max_frames; /* alt_ref Noise Reduction Max Frame Count */ + unsigned int arnr_strength; /* alt_ref Noise Reduction Strength */ + unsigned int arnr_type; /* alt_ref filter type */ + vp8e_tuning tuning; + unsigned int cq_level; /* constrained quality level */ + unsigned int rc_max_intra_bitrate_pct; + unsigned int screen_content_mode; }; static struct vp8_extracfg default_extracfg = { NULL, #if !(CONFIG_REALTIME_ONLY) - 0, /* cpu_used */ + 0, /* cpu_used */ #else - 4, /* cpu_used */ + 4, /* cpu_used */ #endif - 0, /* enable_auto_alt_ref */ - 0, /* noise_sensitivity */ - 0, /* Sharpness */ - 0, /* static_thresh */ + 0, /* enable_auto_alt_ref */ + 0, /* noise_sensitivity */ + 0, /* Sharpness */ + 0, /* static_thresh */ #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) VP8_EIGHT_TOKENPARTITION, #else - VP8_ONE_TOKENPARTITION, /* token_partitions */ + VP8_ONE_TOKENPARTITION, /* token_partitions */ #endif - 0, /* arnr_max_frames */ - 3, /* arnr_strength */ - 3, /* arnr_type*/ - 0, /* tuning*/ - 10, /* cq_level */ - 0, /* rc_max_intra_bitrate_pct */ - 0, /* screen_content_mode */ + 0, /* arnr_max_frames */ + 3, /* arnr_strength */ + 3, /* arnr_type*/ + 0, /* tuning*/ + 10, /* cq_level */ + 0, /* rc_max_intra_bitrate_pct */ + 0, /* screen_content_mode */ }; -struct vpx_codec_alg_priv -{ - vpx_codec_priv_t base; - vpx_codec_enc_cfg_t cfg; - struct vp8_extracfg vp8_cfg; - VP8_CONFIG oxcf; - struct VP8_COMP *cpi; - unsigned char *cx_data; - unsigned int cx_data_sz; - vpx_image_t preview_img; - unsigned int next_frame_flag; - vp8_postproc_cfg_t preview_ppcfg; - /* pkt_list size depends on the maximum number of lagged frames allowed. */ - vpx_codec_pkt_list_decl(64) pkt_list; - unsigned int fixed_kf_cntr; - vpx_enc_frame_flags_t control_frame_flags; +struct vpx_codec_alg_priv { + vpx_codec_priv_t base; + vpx_codec_enc_cfg_t cfg; + struct vp8_extracfg vp8_cfg; + VP8_CONFIG oxcf; + struct VP8_COMP *cpi; + unsigned char *cx_data; + unsigned int cx_data_sz; + vpx_image_t preview_img; + unsigned int next_frame_flag; + vp8_postproc_cfg_t preview_ppcfg; + /* pkt_list size depends on the maximum number of lagged frames allowed. */ + vpx_codec_pkt_list_decl(64) pkt_list; + unsigned int fixed_kf_cntr; + vpx_enc_frame_flags_t control_frame_flags; }; +static vpx_codec_err_t update_error_state( + vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) { + vpx_codec_err_t res; -static vpx_codec_err_t -update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) -{ - vpx_codec_err_t res; + if ((res = error->error_code)) { + ctx->base.err_detail = error->has_detail ? error->detail : NULL; + } - if ((res = error->error_code)) - ctx->base.err_detail = error->has_detail - ? error->detail - : NULL; - - return res; + return res; } - #undef ERROR -#define ERROR(str) do {\ - ctx->base.err_detail = str;\ - return VPX_CODEC_INVALID_PARAM;\ - } while(0) +#define ERROR(str) \ + do { \ + ctx->base.err_detail = str; \ + return VPX_CODEC_INVALID_PARAM; \ + } while (0) -#define RANGE_CHECK(p,memb,lo,hi) do {\ - if(!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \ - ERROR(#memb " out of range ["#lo".."#hi"]");\ - } while(0) +#define RANGE_CHECK(p, memb, lo, hi) \ + do { \ + if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \ + ERROR(#memb " out of range [" #lo ".." #hi "]"); \ + } while (0) -#define RANGE_CHECK_HI(p,memb,hi) do {\ - if(!((p)->memb <= (hi))) \ - ERROR(#memb " out of range [.."#hi"]");\ - } while(0) +#define RANGE_CHECK_HI(p, memb, hi) \ + do { \ + if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \ + } while (0) -#define RANGE_CHECK_LO(p,memb,lo) do {\ - if(!((p)->memb >= (lo))) \ - ERROR(#memb " out of range ["#lo"..]");\ - } while(0) +#define RANGE_CHECK_LO(p, memb, lo) \ + do { \ + if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \ + } while (0) -#define RANGE_CHECK_BOOL(p,memb) do {\ - if(!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\ - } while(0) +#define RANGE_CHECK_BOOL(p, memb) \ + do { \ + if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \ + } while (0) -static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, +static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg, const struct vp8_extracfg *vp8_cfg, - int finalize) -{ - RANGE_CHECK(cfg, g_w, 1, 16383); /* 14 bits available */ - RANGE_CHECK(cfg, g_h, 1, 16383); /* 14 bits available */ - RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000); - RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000); - RANGE_CHECK_HI(cfg, g_profile, 3); - RANGE_CHECK_HI(cfg, rc_max_quantizer, 63); - RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer); - RANGE_CHECK_HI(cfg, g_threads, 64); + int finalize) { + RANGE_CHECK(cfg, g_w, 1, 16383); /* 14 bits available */ + RANGE_CHECK(cfg, g_h, 1, 16383); /* 14 bits available */ + RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000); + RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000); + RANGE_CHECK_HI(cfg, g_profile, 3); + RANGE_CHECK_HI(cfg, rc_max_quantizer, 63); + RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer); + RANGE_CHECK_HI(cfg, g_threads, 64); #if CONFIG_REALTIME_ONLY - RANGE_CHECK_HI(cfg, g_lag_in_frames, 0); + RANGE_CHECK_HI(cfg, g_lag_in_frames, 0); #elif CONFIG_MULTI_RES_ENCODING - if (ctx->base.enc.total_encoders > 1) - RANGE_CHECK_HI(cfg, g_lag_in_frames, 0); + if (ctx->base.enc.total_encoders > 1) RANGE_CHECK_HI(cfg, g_lag_in_frames, 0); #else - RANGE_CHECK_HI(cfg, g_lag_in_frames, 25); + RANGE_CHECK_HI(cfg, g_lag_in_frames, 25); #endif - RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q); - RANGE_CHECK_HI(cfg, rc_undershoot_pct, 1000); - RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000); - RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100); - RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO); + RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q); + RANGE_CHECK_HI(cfg, rc_undershoot_pct, 1000); + RANGE_CHECK_HI(cfg, rc_overshoot_pct, 1000); + RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100); + RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO); /* TODO: add spatial re-sampling support and frame dropping in * multi-res-encoder.*/ #if CONFIG_MULTI_RES_ENCODING - if (ctx->base.enc.total_encoders > 1) - RANGE_CHECK_HI(cfg, rc_resize_allowed, 0); + if (ctx->base.enc.total_encoders > 1) + RANGE_CHECK_HI(cfg, rc_resize_allowed, 0); #else - RANGE_CHECK_BOOL(cfg, rc_resize_allowed); + RANGE_CHECK_BOOL(cfg, rc_resize_allowed); #endif - RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100); - RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100); - RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100); + RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100); + RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100); + RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100); #if CONFIG_REALTIME_ONLY - RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS); + RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS); #elif CONFIG_MULTI_RES_ENCODING - if (ctx->base.enc.total_encoders > 1) - RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS); + if (ctx->base.enc.total_encoders > 1) + RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_ONE_PASS); #else - RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS); + RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS); #endif - /* VP8 does not support a lower bound on the keyframe interval in - * automatic keyframe placement mode. - */ - if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist - && cfg->kf_min_dist > 0) - ERROR("kf_min_dist not supported in auto mode, use 0 " - "or kf_max_dist instead."); + /* VP8 does not support a lower bound on the keyframe interval in + * automatic keyframe placement mode. + */ + if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist && + cfg->kf_min_dist > 0) + ERROR( + "kf_min_dist not supported in auto mode, use 0 " + "or kf_max_dist instead."); - RANGE_CHECK_BOOL(vp8_cfg, enable_auto_alt_ref); - RANGE_CHECK(vp8_cfg, cpu_used, -16, 16); + RANGE_CHECK_BOOL(vp8_cfg, enable_auto_alt_ref); + RANGE_CHECK(vp8_cfg, cpu_used, -16, 16); #if CONFIG_REALTIME_ONLY && !CONFIG_TEMPORAL_DENOISING - RANGE_CHECK(vp8_cfg, noise_sensitivity, 0, 0); + RANGE_CHECK(vp8_cfg, noise_sensitivity, 0, 0); #else - RANGE_CHECK_HI(vp8_cfg, noise_sensitivity, 6); + RANGE_CHECK_HI(vp8_cfg, noise_sensitivity, 6); #endif - RANGE_CHECK(vp8_cfg, token_partitions, VP8_ONE_TOKENPARTITION, - VP8_EIGHT_TOKENPARTITION); - RANGE_CHECK_HI(vp8_cfg, Sharpness, 7); - RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15); - RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6); - RANGE_CHECK(vp8_cfg, arnr_type, 1, 3); - RANGE_CHECK(vp8_cfg, cq_level, 0, 63); - RANGE_CHECK_HI(vp8_cfg, screen_content_mode, 2); - if (finalize && (cfg->rc_end_usage == VPX_CQ || cfg->rc_end_usage == VPX_Q)) - RANGE_CHECK(vp8_cfg, cq_level, - cfg->rc_min_quantizer, cfg->rc_max_quantizer); + RANGE_CHECK(vp8_cfg, token_partitions, VP8_ONE_TOKENPARTITION, + VP8_EIGHT_TOKENPARTITION); + RANGE_CHECK_HI(vp8_cfg, Sharpness, 7); + RANGE_CHECK(vp8_cfg, arnr_max_frames, 0, 15); + RANGE_CHECK_HI(vp8_cfg, arnr_strength, 6); + RANGE_CHECK(vp8_cfg, arnr_type, 1, 3); + RANGE_CHECK(vp8_cfg, cq_level, 0, 63); + RANGE_CHECK_HI(vp8_cfg, screen_content_mode, 2); + if (finalize && (cfg->rc_end_usage == VPX_CQ || cfg->rc_end_usage == VPX_Q)) + RANGE_CHECK(vp8_cfg, cq_level, cfg->rc_min_quantizer, + cfg->rc_max_quantizer); #if !(CONFIG_REALTIME_ONLY) - if (cfg->g_pass == VPX_RC_LAST_PASS) - { - size_t packet_sz = sizeof(FIRSTPASS_STATS); - int n_packets = (int)(cfg->rc_twopass_stats_in.sz / - packet_sz); - FIRSTPASS_STATS *stats; + if (cfg->g_pass == VPX_RC_LAST_PASS) { + size_t packet_sz = sizeof(FIRSTPASS_STATS); + int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz); + FIRSTPASS_STATS *stats; - if (!cfg->rc_twopass_stats_in.buf) - ERROR("rc_twopass_stats_in.buf not set."); + if (!cfg->rc_twopass_stats_in.buf) + ERROR("rc_twopass_stats_in.buf not set."); - if (cfg->rc_twopass_stats_in.sz % packet_sz) - ERROR("rc_twopass_stats_in.sz indicates truncated packet."); + if (cfg->rc_twopass_stats_in.sz % packet_sz) + ERROR("rc_twopass_stats_in.sz indicates truncated packet."); - if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz) - ERROR("rc_twopass_stats_in requires at least two packets."); + if (cfg->rc_twopass_stats_in.sz < 2 * packet_sz) + ERROR("rc_twopass_stats_in requires at least two packets."); - stats = (void*)((char *)cfg->rc_twopass_stats_in.buf - + (n_packets - 1) * packet_sz); + stats = (void *)((char *)cfg->rc_twopass_stats_in.buf + + (n_packets - 1) * packet_sz); - if ((int)(stats->count + 0.5) != n_packets - 1) - ERROR("rc_twopass_stats_in missing EOS stats packet"); - } + if ((int)(stats->count + 0.5) != n_packets - 1) + ERROR("rc_twopass_stats_in missing EOS stats packet"); + } #endif - RANGE_CHECK(cfg, ts_number_layers, 1, 5); + RANGE_CHECK(cfg, ts_number_layers, 1, 5); - if (cfg->ts_number_layers > 1) - { - unsigned int i; - RANGE_CHECK_HI(cfg, ts_periodicity, 16); + if (cfg->ts_number_layers > 1) { + unsigned int i; + RANGE_CHECK_HI(cfg, ts_periodicity, 16); - for (i=1; its_number_layers; i++) - if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i-1] && - cfg->rc_target_bitrate > 0) - ERROR("ts_target_bitrate entries are not strictly increasing"); - - RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers-1], 1, 1); - for (i=cfg->ts_number_layers-2; i>0; i--) - if (cfg->ts_rate_decimator[i-1] != 2*cfg->ts_rate_decimator[i]) - ERROR("ts_rate_decimator factors are not powers of 2"); - - RANGE_CHECK_HI(cfg, ts_layer_id[i], cfg->ts_number_layers-1); + for (i = 1; i < cfg->ts_number_layers; ++i) { + if (cfg->ts_target_bitrate[i] <= cfg->ts_target_bitrate[i - 1] && + cfg->rc_target_bitrate > 0) + ERROR("ts_target_bitrate entries are not strictly increasing"); } + RANGE_CHECK(cfg, ts_rate_decimator[cfg->ts_number_layers - 1], 1, 1); + for (i = cfg->ts_number_layers - 2; i > 0; i--) { + if (cfg->ts_rate_decimator[i - 1] != 2 * cfg->ts_rate_decimator[i]) + ERROR("ts_rate_decimator factors are not powers of 2"); + } + + RANGE_CHECK_HI(cfg, ts_layer_id[i], cfg->ts_number_layers - 1); + } + #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) - if(cfg->g_threads > (1 << vp8_cfg->token_partitions)) - ERROR("g_threads cannot be bigger than number of token partitions"); + if (cfg->g_threads > (1 << vp8_cfg->token_partitions)) + ERROR("g_threads cannot be bigger than number of token partitions"); #endif - return VPX_CODEC_OK; + return VPX_CODEC_OK; } - static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img) -{ - switch (img->fmt) - { + const vpx_image_t *img) { + switch (img->fmt) { case VPX_IMG_FMT_YV12: case VPX_IMG_FMT_I420: case VPX_IMG_FMT_VPXI420: - case VPX_IMG_FMT_VPXYV12: - break; + case VPX_IMG_FMT_VPXYV12: break; default: - ERROR("Invalid image format. Only YV12 and I420 images are supported"); - } + ERROR("Invalid image format. Only YV12 and I420 images are supported"); + } - if ((img->d_w != ctx->cfg.g_w) || (img->d_h != ctx->cfg.g_h)) - ERROR("Image size must match encoder init configuration size"); + if ((img->d_w != ctx->cfg.g_w) || (img->d_h != ctx->cfg.g_h)) + ERROR("Image size must match encoder init configuration size"); - return VPX_CODEC_OK; + return VPX_CODEC_OK; } - static vpx_codec_err_t set_vp8e_config(VP8_CONFIG *oxcf, vpx_codec_enc_cfg_t cfg, struct vp8_extracfg vp8_cfg, - vpx_codec_priv_enc_mr_cfg_t *mr_cfg) -{ - oxcf->multi_threaded = cfg.g_threads; - oxcf->Version = cfg.g_profile; + vpx_codec_priv_enc_mr_cfg_t *mr_cfg) { + oxcf->multi_threaded = cfg.g_threads; + oxcf->Version = cfg.g_profile; - oxcf->Width = cfg.g_w; - oxcf->Height = cfg.g_h; - oxcf->timebase = cfg.g_timebase; + oxcf->Width = cfg.g_w; + oxcf->Height = cfg.g_h; + oxcf->timebase = cfg.g_timebase; - oxcf->error_resilient_mode = cfg.g_error_resilient; + oxcf->error_resilient_mode = cfg.g_error_resilient; - switch (cfg.g_pass) - { - case VPX_RC_ONE_PASS: - oxcf->Mode = MODE_BESTQUALITY; - break; - case VPX_RC_FIRST_PASS: - oxcf->Mode = MODE_FIRSTPASS; - break; - case VPX_RC_LAST_PASS: - oxcf->Mode = MODE_SECONDPASS_BEST; - break; - } + switch (cfg.g_pass) { + case VPX_RC_ONE_PASS: oxcf->Mode = MODE_BESTQUALITY; break; + case VPX_RC_FIRST_PASS: oxcf->Mode = MODE_FIRSTPASS; break; + case VPX_RC_LAST_PASS: oxcf->Mode = MODE_SECONDPASS_BEST; break; + } - if (cfg.g_pass == VPX_RC_FIRST_PASS || cfg.g_pass == VPX_RC_ONE_PASS) - { - oxcf->allow_lag = 0; - oxcf->lag_in_frames = 0; - } - else - { - oxcf->allow_lag = (cfg.g_lag_in_frames) > 0; - oxcf->lag_in_frames = cfg.g_lag_in_frames; - } + if (cfg.g_pass == VPX_RC_FIRST_PASS || cfg.g_pass == VPX_RC_ONE_PASS) { + oxcf->allow_lag = 0; + oxcf->lag_in_frames = 0; + } else { + oxcf->allow_lag = (cfg.g_lag_in_frames) > 0; + oxcf->lag_in_frames = cfg.g_lag_in_frames; + } - oxcf->allow_df = (cfg.rc_dropframe_thresh > 0); - oxcf->drop_frames_water_mark = cfg.rc_dropframe_thresh; + oxcf->allow_df = (cfg.rc_dropframe_thresh > 0); + oxcf->drop_frames_water_mark = cfg.rc_dropframe_thresh; - oxcf->allow_spatial_resampling = cfg.rc_resize_allowed; - oxcf->resample_up_water_mark = cfg.rc_resize_up_thresh; - oxcf->resample_down_water_mark = cfg.rc_resize_down_thresh; + oxcf->allow_spatial_resampling = cfg.rc_resize_allowed; + oxcf->resample_up_water_mark = cfg.rc_resize_up_thresh; + oxcf->resample_down_water_mark = cfg.rc_resize_down_thresh; - if (cfg.rc_end_usage == VPX_VBR) { - oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK; - } else if (cfg.rc_end_usage == VPX_CBR) { - oxcf->end_usage = USAGE_STREAM_FROM_SERVER; - } else if (cfg.rc_end_usage == VPX_CQ) { - oxcf->end_usage = USAGE_CONSTRAINED_QUALITY; - } else if (cfg.rc_end_usage == VPX_Q) { - oxcf->end_usage = USAGE_CONSTANT_QUALITY; - } + if (cfg.rc_end_usage == VPX_VBR) { + oxcf->end_usage = USAGE_LOCAL_FILE_PLAYBACK; + } else if (cfg.rc_end_usage == VPX_CBR) { + oxcf->end_usage = USAGE_STREAM_FROM_SERVER; + } else if (cfg.rc_end_usage == VPX_CQ) { + oxcf->end_usage = USAGE_CONSTRAINED_QUALITY; + } else if (cfg.rc_end_usage == VPX_Q) { + oxcf->end_usage = USAGE_CONSTANT_QUALITY; + } - oxcf->target_bandwidth = cfg.rc_target_bitrate; - oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct; + oxcf->target_bandwidth = cfg.rc_target_bitrate; + oxcf->rc_max_intra_bitrate_pct = vp8_cfg.rc_max_intra_bitrate_pct; - oxcf->best_allowed_q = cfg.rc_min_quantizer; - oxcf->worst_allowed_q = cfg.rc_max_quantizer; - oxcf->cq_level = vp8_cfg.cq_level; - oxcf->fixed_q = -1; + oxcf->best_allowed_q = cfg.rc_min_quantizer; + oxcf->worst_allowed_q = cfg.rc_max_quantizer; + oxcf->cq_level = vp8_cfg.cq_level; + oxcf->fixed_q = -1; - oxcf->under_shoot_pct = cfg.rc_undershoot_pct; - oxcf->over_shoot_pct = cfg.rc_overshoot_pct; + oxcf->under_shoot_pct = cfg.rc_undershoot_pct; + oxcf->over_shoot_pct = cfg.rc_overshoot_pct; - oxcf->maximum_buffer_size_in_ms = cfg.rc_buf_sz; - oxcf->starting_buffer_level_in_ms = cfg.rc_buf_initial_sz; - oxcf->optimal_buffer_level_in_ms = cfg.rc_buf_optimal_sz; + oxcf->maximum_buffer_size_in_ms = cfg.rc_buf_sz; + oxcf->starting_buffer_level_in_ms = cfg.rc_buf_initial_sz; + oxcf->optimal_buffer_level_in_ms = cfg.rc_buf_optimal_sz; - oxcf->maximum_buffer_size = cfg.rc_buf_sz; - oxcf->starting_buffer_level = cfg.rc_buf_initial_sz; - oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz; + oxcf->maximum_buffer_size = cfg.rc_buf_sz; + oxcf->starting_buffer_level = cfg.rc_buf_initial_sz; + oxcf->optimal_buffer_level = cfg.rc_buf_optimal_sz; - oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct; - oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct; - oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct; + oxcf->two_pass_vbrbias = cfg.rc_2pass_vbr_bias_pct; + oxcf->two_pass_vbrmin_section = cfg.rc_2pass_vbr_minsection_pct; + oxcf->two_pass_vbrmax_section = cfg.rc_2pass_vbr_maxsection_pct; - oxcf->auto_key = cfg.kf_mode == VPX_KF_AUTO - && cfg.kf_min_dist != cfg.kf_max_dist; - oxcf->key_freq = cfg.kf_max_dist; + oxcf->auto_key = + cfg.kf_mode == VPX_KF_AUTO && cfg.kf_min_dist != cfg.kf_max_dist; + oxcf->key_freq = cfg.kf_max_dist; - oxcf->number_of_layers = cfg.ts_number_layers; - oxcf->periodicity = cfg.ts_periodicity; + oxcf->number_of_layers = cfg.ts_number_layers; + oxcf->periodicity = cfg.ts_periodicity; - if (oxcf->number_of_layers > 1) - { - memcpy (oxcf->target_bitrate, cfg.ts_target_bitrate, - sizeof(cfg.ts_target_bitrate)); - memcpy (oxcf->rate_decimator, cfg.ts_rate_decimator, - sizeof(cfg.ts_rate_decimator)); - memcpy (oxcf->layer_id, cfg.ts_layer_id, sizeof(cfg.ts_layer_id)); - } + if (oxcf->number_of_layers > 1) { + memcpy(oxcf->target_bitrate, cfg.ts_target_bitrate, + sizeof(cfg.ts_target_bitrate)); + memcpy(oxcf->rate_decimator, cfg.ts_rate_decimator, + sizeof(cfg.ts_rate_decimator)); + memcpy(oxcf->layer_id, cfg.ts_layer_id, sizeof(cfg.ts_layer_id)); + } #if CONFIG_MULTI_RES_ENCODING - /* When mr_cfg is NULL, oxcf->mr_total_resolutions and oxcf->mr_encoder_id - * are both memset to 0, which ensures the correct logic under this - * situation. - */ - if(mr_cfg) - { - oxcf->mr_total_resolutions = mr_cfg->mr_total_resolutions; - oxcf->mr_encoder_id = mr_cfg->mr_encoder_id; - oxcf->mr_down_sampling_factor.num = mr_cfg->mr_down_sampling_factor.num; - oxcf->mr_down_sampling_factor.den = mr_cfg->mr_down_sampling_factor.den; - oxcf->mr_low_res_mode_info = mr_cfg->mr_low_res_mode_info; - } + /* When mr_cfg is NULL, oxcf->mr_total_resolutions and oxcf->mr_encoder_id + * are both memset to 0, which ensures the correct logic under this + * situation. + */ + if (mr_cfg) { + oxcf->mr_total_resolutions = mr_cfg->mr_total_resolutions; + oxcf->mr_encoder_id = mr_cfg->mr_encoder_id; + oxcf->mr_down_sampling_factor.num = mr_cfg->mr_down_sampling_factor.num; + oxcf->mr_down_sampling_factor.den = mr_cfg->mr_down_sampling_factor.den; + oxcf->mr_low_res_mode_info = mr_cfg->mr_low_res_mode_info; + } #else - (void)mr_cfg; + (void)mr_cfg; #endif - oxcf->cpu_used = vp8_cfg.cpu_used; - oxcf->encode_breakout = vp8_cfg.static_thresh; - oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref; - oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity; - oxcf->Sharpness = vp8_cfg.Sharpness; - oxcf->token_partitions = vp8_cfg.token_partitions; + oxcf->cpu_used = vp8_cfg.cpu_used; + oxcf->encode_breakout = vp8_cfg.static_thresh; + oxcf->play_alternate = vp8_cfg.enable_auto_alt_ref; + oxcf->noise_sensitivity = vp8_cfg.noise_sensitivity; + oxcf->Sharpness = vp8_cfg.Sharpness; + oxcf->token_partitions = vp8_cfg.token_partitions; - oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in; - oxcf->output_pkt_list = vp8_cfg.pkt_list; + oxcf->two_pass_stats_in = cfg.rc_twopass_stats_in; + oxcf->output_pkt_list = vp8_cfg.pkt_list; - oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames; - oxcf->arnr_strength = vp8_cfg.arnr_strength; - oxcf->arnr_type = vp8_cfg.arnr_type; + oxcf->arnr_max_frames = vp8_cfg.arnr_max_frames; + oxcf->arnr_strength = vp8_cfg.arnr_strength; + oxcf->arnr_type = vp8_cfg.arnr_type; - oxcf->tuning = vp8_cfg.tuning; + oxcf->tuning = vp8_cfg.tuning; - oxcf->screen_content_mode = vp8_cfg.screen_content_mode; + oxcf->screen_content_mode = vp8_cfg.screen_content_mode; - /* - printf("Current VP8 Settings: \n"); - printf("target_bandwidth: %d\n", oxcf->target_bandwidth); - printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity); - printf("Sharpness: %d\n", oxcf->Sharpness); - printf("cpu_used: %d\n", oxcf->cpu_used); - printf("Mode: %d\n", oxcf->Mode); - printf("auto_key: %d\n", oxcf->auto_key); - printf("key_freq: %d\n", oxcf->key_freq); - printf("end_usage: %d\n", oxcf->end_usage); - printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct); - printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct); - printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level); - printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level); - printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size); - printf("fixed_q: %d\n", oxcf->fixed_q); - printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q); - printf("best_allowed_q: %d\n", oxcf->best_allowed_q); - printf("allow_spatial_resampling: %d\n", oxcf->allow_spatial_resampling); - printf("resample_down_water_mark: %d\n", oxcf->resample_down_water_mark); - printf("resample_up_water_mark: %d\n", oxcf->resample_up_water_mark); - printf("allow_df: %d\n", oxcf->allow_df); - printf("drop_frames_water_mark: %d\n", oxcf->drop_frames_water_mark); - printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias); - printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section); - printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section); - printf("allow_lag: %d\n", oxcf->allow_lag); - printf("lag_in_frames: %d\n", oxcf->lag_in_frames); - printf("play_alternate: %d\n", oxcf->play_alternate); - printf("Version: %d\n", oxcf->Version); - printf("multi_threaded: %d\n", oxcf->multi_threaded); - printf("encode_breakout: %d\n", oxcf->encode_breakout); - */ - return VPX_CODEC_OK; + /* + printf("Current VP8 Settings: \n"); + printf("target_bandwidth: %d\n", oxcf->target_bandwidth); + printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity); + printf("Sharpness: %d\n", oxcf->Sharpness); + printf("cpu_used: %d\n", oxcf->cpu_used); + printf("Mode: %d\n", oxcf->Mode); + printf("auto_key: %d\n", oxcf->auto_key); + printf("key_freq: %d\n", oxcf->key_freq); + printf("end_usage: %d\n", oxcf->end_usage); + printf("under_shoot_pct: %d\n", oxcf->under_shoot_pct); + printf("over_shoot_pct: %d\n", oxcf->over_shoot_pct); + printf("starting_buffer_level: %d\n", oxcf->starting_buffer_level); + printf("optimal_buffer_level: %d\n", oxcf->optimal_buffer_level); + printf("maximum_buffer_size: %d\n", oxcf->maximum_buffer_size); + printf("fixed_q: %d\n", oxcf->fixed_q); + printf("worst_allowed_q: %d\n", oxcf->worst_allowed_q); + printf("best_allowed_q: %d\n", oxcf->best_allowed_q); + printf("allow_spatial_resampling: %d\n", oxcf->allow_spatial_resampling); + printf("resample_down_water_mark: %d\n", oxcf->resample_down_water_mark); + printf("resample_up_water_mark: %d\n", oxcf->resample_up_water_mark); + printf("allow_df: %d\n", oxcf->allow_df); + printf("drop_frames_water_mark: %d\n", oxcf->drop_frames_water_mark); + printf("two_pass_vbrbias: %d\n", oxcf->two_pass_vbrbias); + printf("two_pass_vbrmin_section: %d\n", oxcf->two_pass_vbrmin_section); + printf("two_pass_vbrmax_section: %d\n", oxcf->two_pass_vbrmax_section); + printf("allow_lag: %d\n", oxcf->allow_lag); + printf("lag_in_frames: %d\n", oxcf->lag_in_frames); + printf("play_alternate: %d\n", oxcf->play_alternate); + printf("Version: %d\n", oxcf->Version); + printf("multi_threaded: %d\n", oxcf->multi_threaded); + printf("encode_breakout: %d\n", oxcf->encode_breakout); + */ + return VPX_CODEC_OK; } -static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx, - const vpx_codec_enc_cfg_t *cfg) -{ - vpx_codec_err_t res; +static vpx_codec_err_t vp8e_set_config(vpx_codec_alg_priv_t *ctx, + const vpx_codec_enc_cfg_t *cfg) { + vpx_codec_err_t res; - if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) - { - if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS) - ERROR("Cannot change width or height after initialization"); - if ((ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) || - (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height)) - ERROR("Cannot increase width or height larger than their initial values"); - } + if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) { + if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS) + ERROR("Cannot change width or height after initialization"); + if ((ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) || + (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height)) + ERROR("Cannot increase width or height larger than their initial values"); + } - /* Prevent increasing lag_in_frames. This check is stricter than it needs - * to be -- the limit is not increasing past the first lag_in_frames - * value, but we don't track the initial config, only the last successful - * config. - */ - if ((cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames)) - ERROR("Cannot increase lag_in_frames"); + /* Prevent increasing lag_in_frames. This check is stricter than it needs + * to be -- the limit is not increasing past the first lag_in_frames + * value, but we don't track the initial config, only the last successful + * config. + */ + if ((cfg->g_lag_in_frames > ctx->cfg.g_lag_in_frames)) + ERROR("Cannot increase lag_in_frames"); - res = validate_config(ctx, cfg, &ctx->vp8_cfg, 0); + res = validate_config(ctx, cfg, &ctx->vp8_cfg, 0); - if (!res) - { - ctx->cfg = *cfg; - set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg, NULL); - vp8_change_config(ctx->cpi, &ctx->oxcf); - } + if (!res) { + ctx->cfg = *cfg; + set_vp8e_config(&ctx->oxcf, ctx->cfg, ctx->vp8_cfg, NULL); + vp8_change_config(ctx->cpi, &ctx->oxcf); + } - return res; + return res; } -static vpx_codec_err_t get_quantizer(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t get_quantizer(vpx_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; *arg = vp8_get_quantizer(ctx->cpi); return VPX_CODEC_OK; } -static vpx_codec_err_t get_quantizer64(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t get_quantizer64(vpx_codec_alg_priv_t *ctx, + va_list args) { int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; *arg = vp8_reverse_trans(vp8_get_quantizer(ctx->cpi)); return VPX_CODEC_OK; } static vpx_codec_err_t update_extracfg(vpx_codec_alg_priv_t *ctx, - const struct vp8_extracfg *extra_cfg) -{ + const struct vp8_extracfg *extra_cfg) { const vpx_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg, 0); if (res == VPX_CODEC_OK) { ctx->vp8_cfg = *extra_cfg; @@ -509,92 +478,80 @@ static vpx_codec_err_t update_extracfg(vpx_codec_alg_priv_t *ctx, return res; } -static vpx_codec_err_t set_cpu_used(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t set_cpu_used(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_enable_auto_alt_ref(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_noise_sensitivity(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.noise_sensitivity = CAST(VP8E_SET_NOISE_SENSITIVITY, args); return update_extracfg(ctx, &extra_cfg); } -static vpx_codec_err_t set_sharpness(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t set_sharpness(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.Sharpness = CAST(VP8E_SET_SHARPNESS, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_static_thresh(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_token_partitions(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.token_partitions = CAST(VP8E_SET_TOKEN_PARTITIONS, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_arnr_max_frames(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_arnr_strength(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args); return update_extracfg(ctx, &extra_cfg); } -static vpx_codec_err_t set_arnr_type(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t set_arnr_type(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.arnr_type = CAST(VP8E_SET_ARNR_TYPE, args); return update_extracfg(ctx, &extra_cfg); } -static vpx_codec_err_t set_tuning(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t set_tuning(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.tuning = CAST(VP8E_SET_TUNING, args); return update_extracfg(ctx, &extra_cfg); } -static vpx_codec_err_t set_cq_level(vpx_codec_alg_priv_t *ctx, va_list args) -{ +static vpx_codec_err_t set_cq_level(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t set_rc_max_intra_bitrate_pct(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; extra_cfg.rc_max_intra_bitrate_pct = CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args); @@ -602,770 +559,694 @@ static vpx_codec_err_t set_rc_max_intra_bitrate_pct(vpx_codec_alg_priv_t *ctx, } static vpx_codec_err_t set_screen_content_mode(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { struct vp8_extracfg extra_cfg = ctx->vp8_cfg; - extra_cfg.screen_content_mode = - CAST(VP8E_SET_SCREEN_CONTENT_MODE, args); + extra_cfg.screen_content_mode = CAST(VP8E_SET_SCREEN_CONTENT_MODE, args); return update_extracfg(ctx, &extra_cfg); } static vpx_codec_err_t vp8e_mr_alloc_mem(const vpx_codec_enc_cfg_t *cfg, - void **mem_loc) -{ - vpx_codec_err_t res = 0; + void **mem_loc) { + vpx_codec_err_t res = 0; #if CONFIG_MULTI_RES_ENCODING - LOWER_RES_FRAME_INFO *shared_mem_loc; - int mb_rows = ((cfg->g_w + 15) >>4); - int mb_cols = ((cfg->g_h + 15) >>4); + LOWER_RES_FRAME_INFO *shared_mem_loc; + int mb_rows = ((cfg->g_w + 15) >> 4); + int mb_cols = ((cfg->g_h + 15) >> 4); - shared_mem_loc = calloc(1, sizeof(LOWER_RES_FRAME_INFO)); - if(!shared_mem_loc) - { - res = VPX_CODEC_MEM_ERROR; - } + shared_mem_loc = calloc(1, sizeof(LOWER_RES_FRAME_INFO)); + if (!shared_mem_loc) { + res = VPX_CODEC_MEM_ERROR; + } - shared_mem_loc->mb_info = calloc(mb_rows*mb_cols, sizeof(LOWER_RES_MB_INFO)); - if(!(shared_mem_loc->mb_info)) - { - res = VPX_CODEC_MEM_ERROR; - } - else - { - *mem_loc = (void *)shared_mem_loc; - res = VPX_CODEC_OK; - } + shared_mem_loc->mb_info = + calloc(mb_rows * mb_cols, sizeof(LOWER_RES_MB_INFO)); + if (!(shared_mem_loc->mb_info)) { + res = VPX_CODEC_MEM_ERROR; + } else { + *mem_loc = (void *)shared_mem_loc; + res = VPX_CODEC_OK; + } #else - (void)cfg; - (void)mem_loc; + (void)cfg; + (void)mem_loc; #endif - return res; + return res; } static vpx_codec_err_t vp8e_init(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *mr_cfg) -{ - vpx_codec_err_t res = VPX_CODEC_OK; + vpx_codec_priv_enc_mr_cfg_t *mr_cfg) { + vpx_codec_err_t res = VPX_CODEC_OK; + vp8_rtcd(); + vpx_dsp_rtcd(); + vpx_scale_rtcd(); - vp8_rtcd(); - vpx_dsp_rtcd(); - vpx_scale_rtcd(); + if (!ctx->priv) { + struct vpx_codec_alg_priv *priv = + (struct vpx_codec_alg_priv *)vpx_calloc(1, sizeof(*priv)); - if (!ctx->priv) - { - struct vpx_codec_alg_priv *priv = - (struct vpx_codec_alg_priv *)vpx_calloc(1, sizeof(*priv)); - - if (!priv) - { - return VPX_CODEC_MEM_ERROR; - } - - ctx->priv = (vpx_codec_priv_t *)priv; - ctx->priv->init_flags = ctx->init_flags; - - if (ctx->config.enc) - { - /* Update the reference to the config structure to an - * internal copy. - */ - priv->cfg = *ctx->config.enc; - ctx->config.enc = &priv->cfg; - } - - priv->vp8_cfg = default_extracfg; - priv->vp8_cfg.pkt_list = &priv->pkt_list.head; - - priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2; - - if (priv->cx_data_sz < 32768) priv->cx_data_sz = 32768; - - priv->cx_data = malloc(priv->cx_data_sz); - - if (!priv->cx_data) - { - return VPX_CODEC_MEM_ERROR; - } - - if(mr_cfg) - ctx->priv->enc.total_encoders = mr_cfg->mr_total_resolutions; - else - ctx->priv->enc.total_encoders = 1; - - once(vp8_initialize_enc); - - res = validate_config(priv, &priv->cfg, &priv->vp8_cfg, 0); - - if (!res) - { - set_vp8e_config(&priv->oxcf, priv->cfg, priv->vp8_cfg, mr_cfg); - priv->cpi = vp8_create_compressor(&priv->oxcf); - if (!priv->cpi) - res = VPX_CODEC_MEM_ERROR; - } + if (!priv) { + return VPX_CODEC_MEM_ERROR; } - return res; + ctx->priv = (vpx_codec_priv_t *)priv; + ctx->priv->init_flags = ctx->init_flags; + + if (ctx->config.enc) { + /* Update the reference to the config structure to an + * internal copy. + */ + priv->cfg = *ctx->config.enc; + ctx->config.enc = &priv->cfg; + } + + priv->vp8_cfg = default_extracfg; + priv->vp8_cfg.pkt_list = &priv->pkt_list.head; + + priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2; + + if (priv->cx_data_sz < 32768) priv->cx_data_sz = 32768; + + priv->cx_data = malloc(priv->cx_data_sz); + + if (!priv->cx_data) { + return VPX_CODEC_MEM_ERROR; + } + + if (mr_cfg) { + ctx->priv->enc.total_encoders = mr_cfg->mr_total_resolutions; + } else { + ctx->priv->enc.total_encoders = 1; + } + + once(vp8_initialize_enc); + + res = validate_config(priv, &priv->cfg, &priv->vp8_cfg, 0); + + if (!res) { + set_vp8e_config(&priv->oxcf, priv->cfg, priv->vp8_cfg, mr_cfg); + priv->cpi = vp8_create_compressor(&priv->oxcf); + if (!priv->cpi) res = VPX_CODEC_MEM_ERROR; + } + } + + return res; } -static vpx_codec_err_t vp8e_destroy(vpx_codec_alg_priv_t *ctx) -{ +static vpx_codec_err_t vp8e_destroy(vpx_codec_alg_priv_t *ctx) { #if CONFIG_MULTI_RES_ENCODING - /* Free multi-encoder shared memory */ - if (ctx->oxcf.mr_total_resolutions > 0 && (ctx->oxcf.mr_encoder_id == ctx->oxcf.mr_total_resolutions-1)) - { - LOWER_RES_FRAME_INFO *shared_mem_loc = (LOWER_RES_FRAME_INFO *)ctx->oxcf.mr_low_res_mode_info; - free(shared_mem_loc->mb_info); - free(ctx->oxcf.mr_low_res_mode_info); - } + /* Free multi-encoder shared memory */ + if (ctx->oxcf.mr_total_resolutions > 0 && + (ctx->oxcf.mr_encoder_id == ctx->oxcf.mr_total_resolutions - 1)) { + LOWER_RES_FRAME_INFO *shared_mem_loc = + (LOWER_RES_FRAME_INFO *)ctx->oxcf.mr_low_res_mode_info; + free(shared_mem_loc->mb_info); + free(ctx->oxcf.mr_low_res_mode_info); + } #endif - free(ctx->cx_data); - vp8_remove_compressor(&ctx->cpi); - vpx_free(ctx); - return VPX_CODEC_OK; + free(ctx->cx_data); + vp8_remove_compressor(&ctx->cpi); + vpx_free(ctx); + return VPX_CODEC_OK; } -static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, - YV12_BUFFER_CONFIG *yv12) -{ - const int y_w = img->d_w; - const int y_h = img->d_h; - const int uv_w = (img->d_w + 1) / 2; - const int uv_h = (img->d_h + 1) / 2; - vpx_codec_err_t res = VPX_CODEC_OK; - yv12->y_buffer = img->planes[VPX_PLANE_Y]; - yv12->u_buffer = img->planes[VPX_PLANE_U]; - yv12->v_buffer = img->planes[VPX_PLANE_V]; +static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, + YV12_BUFFER_CONFIG *yv12) { + const int y_w = img->d_w; + const int y_h = img->d_h; + const int uv_w = (img->d_w + 1) / 2; + const int uv_h = (img->d_h + 1) / 2; + vpx_codec_err_t res = VPX_CODEC_OK; + yv12->y_buffer = img->planes[VPX_PLANE_Y]; + yv12->u_buffer = img->planes[VPX_PLANE_U]; + yv12->v_buffer = img->planes[VPX_PLANE_V]; - yv12->y_crop_width = y_w; - yv12->y_crop_height = y_h; - yv12->y_width = y_w; - yv12->y_height = y_h; - yv12->uv_crop_width = uv_w; - yv12->uv_crop_height = uv_h; - yv12->uv_width = uv_w; - yv12->uv_height = uv_h; + yv12->y_crop_width = y_w; + yv12->y_crop_height = y_h; + yv12->y_width = y_w; + yv12->y_height = y_h; + yv12->uv_crop_width = uv_w; + yv12->uv_crop_height = uv_h; + yv12->uv_width = uv_w; + yv12->uv_height = uv_h; - yv12->y_stride = img->stride[VPX_PLANE_Y]; - yv12->uv_stride = img->stride[VPX_PLANE_U]; + yv12->y_stride = img->stride[VPX_PLANE_Y]; + yv12->uv_stride = img->stride[VPX_PLANE_U]; - yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; - return res; + yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; + return res; } -static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, - unsigned long duration, - unsigned long deadline) -{ - unsigned int new_qc; +static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, + unsigned long duration, + unsigned long deadline) { + int new_qc; #if !(CONFIG_REALTIME_ONLY) - /* Use best quality mode if no deadline is given. */ - new_qc = MODE_BESTQUALITY; + /* Use best quality mode if no deadline is given. */ + new_qc = MODE_BESTQUALITY; - if (deadline) - { - uint64_t duration_us; + if (deadline) { + uint64_t duration_us; - /* Convert duration parameter from stream timebase to microseconds */ - duration_us = (uint64_t)duration * 1000000 - * (uint64_t)ctx->cfg.g_timebase.num - / (uint64_t)ctx->cfg.g_timebase.den; + /* Convert duration parameter from stream timebase to microseconds */ + duration_us = (uint64_t)duration * 1000000 * + (uint64_t)ctx->cfg.g_timebase.num / + (uint64_t)ctx->cfg.g_timebase.den; - /* If the deadline is more that the duration this frame is to be shown, - * use good quality mode. Otherwise use realtime mode. - */ - new_qc = (deadline > duration_us) ? MODE_GOODQUALITY : MODE_REALTIME; - } + /* If the deadline is more that the duration this frame is to be shown, + * use good quality mode. Otherwise use realtime mode. + */ + new_qc = (deadline > duration_us) ? MODE_GOODQUALITY : MODE_REALTIME; + } #else - new_qc = MODE_REALTIME; + (void)duration; + new_qc = MODE_REALTIME; #endif - if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS) - new_qc = MODE_FIRSTPASS; - else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS) - new_qc = (new_qc == MODE_BESTQUALITY) - ? MODE_SECONDPASS_BEST - : MODE_SECONDPASS; + if (deadline == VPX_DL_REALTIME) { + new_qc = MODE_REALTIME; + } else if (ctx->cfg.g_pass == VPX_RC_FIRST_PASS) { + new_qc = MODE_FIRSTPASS; + } else if (ctx->cfg.g_pass == VPX_RC_LAST_PASS) { + new_qc = + (new_qc == MODE_BESTQUALITY) ? MODE_SECONDPASS_BEST : MODE_SECONDPASS; + } - if (ctx->oxcf.Mode != new_qc) - { - ctx->oxcf.Mode = new_qc; - vp8_change_config(ctx->cpi, &ctx->oxcf); - } + if (ctx->oxcf.Mode != new_qc) { + ctx->oxcf.Mode = new_qc; + vp8_change_config(ctx->cpi, &ctx->oxcf); + } } static vpx_codec_err_t set_reference_and_update(vpx_codec_alg_priv_t *ctx, - int flags) -{ + vpx_enc_frame_flags_t flags) { + /* Handle Flags */ + if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) || + ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) { + ctx->base.err_detail = "Conflicting flags."; + return VPX_CODEC_INVALID_PARAM; + } - /* Handle Flags */ - if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) - || ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) - { - ctx->base.err_detail = "Conflicting flags."; - return VPX_CODEC_INVALID_PARAM; - } + if (flags & + (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) { + int ref = 7; - if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF - | VP8_EFLAG_NO_REF_ARF)) - { - int ref = 7; + if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VP8_LAST_FRAME; - if (flags & VP8_EFLAG_NO_REF_LAST) - ref ^= VP8_LAST_FRAME; + if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VP8_GOLD_FRAME; - if (flags & VP8_EFLAG_NO_REF_GF) - ref ^= VP8_GOLD_FRAME; + if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VP8_ALTR_FRAME; - if (flags & VP8_EFLAG_NO_REF_ARF) - ref ^= VP8_ALTR_FRAME; + vp8_use_as_reference(ctx->cpi, ref); + } - vp8_use_as_reference(ctx->cpi, ref); - } + if (flags & + (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | + VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) { + int upd = 7; - if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF - | VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF - | VP8_EFLAG_FORCE_ARF)) - { - int upd = 7; + if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VP8_LAST_FRAME; - if (flags & VP8_EFLAG_NO_UPD_LAST) - upd ^= VP8_LAST_FRAME; + if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VP8_GOLD_FRAME; - if (flags & VP8_EFLAG_NO_UPD_GF) - upd ^= VP8_GOLD_FRAME; + if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VP8_ALTR_FRAME; - if (flags & VP8_EFLAG_NO_UPD_ARF) - upd ^= VP8_ALTR_FRAME; + vp8_update_reference(ctx->cpi, upd); + } - vp8_update_reference(ctx->cpi, upd); - } + if (flags & VP8_EFLAG_NO_UPD_ENTROPY) { + vp8_update_entropy(ctx->cpi, 0); + } - if (flags & VP8_EFLAG_NO_UPD_ENTROPY) - { - vp8_update_entropy(ctx->cpi, 0); - } - - return VPX_CODEC_OK; + return VPX_CODEC_OK; } -static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned long duration, - vpx_enc_frame_flags_t flags, - unsigned long deadline) -{ - vpx_codec_err_t res = VPX_CODEC_OK; +static vpx_codec_err_t vp8e_encode(vpx_codec_alg_priv_t *ctx, + const vpx_image_t *img, vpx_codec_pts_t pts, + unsigned long duration, + vpx_enc_frame_flags_t flags, + unsigned long deadline) { + vpx_codec_err_t res = VPX_CODEC_OK; - if (!ctx->cfg.rc_target_bitrate) - return res; + if (!ctx->cfg.rc_target_bitrate) return res; - if (img) - res = validate_img(ctx, img); + if (img) res = validate_img(ctx, img); - if (!res) - res = validate_config(ctx, &ctx->cfg, &ctx->vp8_cfg, 1); + if (!res) res = validate_config(ctx, &ctx->cfg, &ctx->vp8_cfg, 1); - pick_quickcompress_mode(ctx, duration, deadline); - vpx_codec_pkt_list_init(&ctx->pkt_list); + pick_quickcompress_mode(ctx, duration, deadline); + vpx_codec_pkt_list_init(&ctx->pkt_list); - // If no flags are set in the encode call, then use the frame flags as - // defined via the control function: vp8e_set_frame_flags. - if (!flags) { - flags = ctx->control_frame_flags; + // If no flags are set in the encode call, then use the frame flags as + // defined via the control function: vp8e_set_frame_flags. + if (!flags) { + flags = ctx->control_frame_flags; + } + ctx->control_frame_flags = 0; + + if (!res) res = set_reference_and_update(ctx, flags); + + /* Handle fixed keyframe intervals */ + if (ctx->cfg.kf_mode == VPX_KF_AUTO && + ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) { + if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) { + flags |= VPX_EFLAG_FORCE_KF; + ctx->fixed_kf_cntr = 1; } - ctx->control_frame_flags = 0; + } - if (!res) - res = set_reference_and_update(ctx, flags); + /* Initialize the encoder instance on the first frame*/ + if (!res && ctx->cpi) { + unsigned int lib_flags; + YV12_BUFFER_CONFIG sd; + int64_t dst_time_stamp, dst_end_time_stamp; + size_t size, cx_data_sz; + unsigned char *cx_data; + unsigned char *cx_data_end; + int comp_data_state = 0; - /* Handle fixed keyframe intervals */ - if (ctx->cfg.kf_mode == VPX_KF_AUTO - && ctx->cfg.kf_min_dist == ctx->cfg.kf_max_dist) - { - if (++ctx->fixed_kf_cntr > ctx->cfg.kf_min_dist) - { - flags |= VPX_EFLAG_FORCE_KF; - ctx->fixed_kf_cntr = 1; - } + /* Set up internal flags */ + if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) { + ((VP8_COMP *)ctx->cpi)->b_calculate_psnr = 1; } - /* Initialize the encoder instance on the first frame*/ - if (!res && ctx->cpi) - { - unsigned int lib_flags; - YV12_BUFFER_CONFIG sd; - int64_t dst_time_stamp, dst_end_time_stamp; - unsigned long size, cx_data_sz; - unsigned char *cx_data; - unsigned char *cx_data_end; - int comp_data_state = 0; + if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION) { + ((VP8_COMP *)ctx->cpi)->output_partition = 1; + } - /* Set up internal flags */ - if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) - ((VP8_COMP *)ctx->cpi)->b_calculate_psnr = 1; + /* Convert API flags to internal codec lib flags */ + lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0; - if (ctx->base.init_flags & VPX_CODEC_USE_OUTPUT_PARTITION) - ((VP8_COMP *)ctx->cpi)->output_partition = 1; + /* vp8 use 10,000,000 ticks/second as time stamp */ + dst_time_stamp = + pts * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den; + dst_end_time_stamp = (pts + duration) * 10000000 * ctx->cfg.g_timebase.num / + ctx->cfg.g_timebase.den; - /* Convert API flags to internal codec lib flags */ - lib_flags = (flags & VPX_EFLAG_FORCE_KF) ? FRAMEFLAGS_KEY : 0; + if (img != NULL) { + res = image2yuvconfig(img, &sd); - /* vp8 use 10,000,000 ticks/second as time stamp */ - dst_time_stamp = pts * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den; - dst_end_time_stamp = (pts + duration) * 10000000 * ctx->cfg.g_timebase.num / ctx->cfg.g_timebase.den; + if (vp8_receive_raw_frame(ctx->cpi, ctx->next_frame_flag | lib_flags, &sd, + dst_time_stamp, dst_end_time_stamp)) { + VP8_COMP *cpi = (VP8_COMP *)ctx->cpi; + res = update_error_state(ctx, &cpi->common.error); + } - if (img != NULL) - { - res = image2yuvconfig(img, &sd); + /* reset for next frame */ + ctx->next_frame_flag = 0; + } - if (vp8_receive_raw_frame(ctx->cpi, ctx->next_frame_flag | lib_flags, - &sd, dst_time_stamp, dst_end_time_stamp)) - { - VP8_COMP *cpi = (VP8_COMP *)ctx->cpi; - res = update_error_state(ctx, &cpi->common.error); - } + cx_data = ctx->cx_data; + cx_data_sz = ctx->cx_data_sz; + cx_data_end = ctx->cx_data + cx_data_sz; + lib_flags = 0; - /* reset for next frame */ - ctx->next_frame_flag = 0; + while (cx_data_sz >= ctx->cx_data_sz / 2) { + comp_data_state = vp8_get_compressed_data( + ctx->cpi, &lib_flags, &size, cx_data, cx_data_end, &dst_time_stamp, + &dst_end_time_stamp, !img); + + if (comp_data_state == VPX_CODEC_CORRUPT_FRAME) { + return VPX_CODEC_CORRUPT_FRAME; + } else if (comp_data_state == -1) { + break; + } + + if (size) { + vpx_codec_pts_t round, delta; + vpx_codec_cx_pkt_t pkt; + VP8_COMP *cpi = (VP8_COMP *)ctx->cpi; + + /* Add the frame packet to the list of returned packets. */ + round = (vpx_codec_pts_t)10000000 * ctx->cfg.g_timebase.num / 2 - 1; + delta = (dst_end_time_stamp - dst_time_stamp); + pkt.kind = VPX_CODEC_CX_FRAME_PKT; + pkt.data.frame.pts = + (dst_time_stamp * ctx->cfg.g_timebase.den + round) / + ctx->cfg.g_timebase.num / 10000000; + pkt.data.frame.duration = + (unsigned long)((delta * ctx->cfg.g_timebase.den + round) / + ctx->cfg.g_timebase.num / 10000000); + pkt.data.frame.flags = lib_flags << 16; + + if (lib_flags & FRAMEFLAGS_KEY) { + pkt.data.frame.flags |= VPX_FRAME_IS_KEY; } - cx_data = ctx->cx_data; - cx_data_sz = ctx->cx_data_sz; - cx_data_end = ctx->cx_data + cx_data_sz; - lib_flags = 0; + if (!cpi->common.show_frame) { + pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE; - while (cx_data_sz >= ctx->cx_data_sz / 2) - { - comp_data_state = vp8_get_compressed_data(ctx->cpi, - &lib_flags, - &size, - cx_data, - cx_data_end, - &dst_time_stamp, - &dst_end_time_stamp, - !img); + /* This timestamp should be as close as possible to the + * prior PTS so that if a decoder uses pts to schedule when + * to do this, we start right after last frame was decoded. + * Invisible frames have no duration. + */ + pkt.data.frame.pts = + ((cpi->last_time_stamp_seen * ctx->cfg.g_timebase.den + round) / + ctx->cfg.g_timebase.num / 10000000) + + 1; + pkt.data.frame.duration = 0; + } - if(comp_data_state == VPX_CODEC_CORRUPT_FRAME) - return VPX_CODEC_CORRUPT_FRAME; - else if(comp_data_state == -1) - break; + if (cpi->droppable) pkt.data.frame.flags |= VPX_FRAME_IS_DROPPABLE; - if (size) - { - vpx_codec_pts_t round, delta; - vpx_codec_cx_pkt_t pkt; - VP8_COMP *cpi = (VP8_COMP *)ctx->cpi; + if (cpi->output_partition) { + int i; + const int num_partitions = + (1 << cpi->common.multi_token_partition) + 1; - /* Add the frame packet to the list of returned packets. */ - round = (vpx_codec_pts_t)10000000 - * ctx->cfg.g_timebase.num / 2 - 1; - delta = (dst_end_time_stamp - dst_time_stamp); - pkt.kind = VPX_CODEC_CX_FRAME_PKT; - pkt.data.frame.pts = - (dst_time_stamp * ctx->cfg.g_timebase.den + round) - / ctx->cfg.g_timebase.num / 10000000; - pkt.data.frame.duration = (unsigned long) - ((delta * ctx->cfg.g_timebase.den + round) - / ctx->cfg.g_timebase.num / 10000000); - pkt.data.frame.flags = lib_flags << 16; + pkt.data.frame.flags |= VPX_FRAME_IS_FRAGMENT; - if (lib_flags & FRAMEFLAGS_KEY) - pkt.data.frame.flags |= VPX_FRAME_IS_KEY; - - if (!cpi->common.show_frame) - { - pkt.data.frame.flags |= VPX_FRAME_IS_INVISIBLE; - - /* This timestamp should be as close as possible to the - * prior PTS so that if a decoder uses pts to schedule when - * to do this, we start right after last frame was decoded. - * Invisible frames have no duration. - */ - pkt.data.frame.pts = ((cpi->last_time_stamp_seen - * ctx->cfg.g_timebase.den + round) - / ctx->cfg.g_timebase.num / 10000000) + 1; - pkt.data.frame.duration = 0; - } - - if (cpi->droppable) - pkt.data.frame.flags |= VPX_FRAME_IS_DROPPABLE; - - if (cpi->output_partition) - { - int i; - const int num_partitions = - (1 << cpi->common.multi_token_partition) + 1; - - pkt.data.frame.flags |= VPX_FRAME_IS_FRAGMENT; - - for (i = 0; i < num_partitions; ++i) - { + for (i = 0; i < num_partitions; ++i) { #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - pkt.data.frame.buf = cpi->partition_d[i]; + pkt.data.frame.buf = cpi->partition_d[i]; #else - pkt.data.frame.buf = cx_data; - cx_data += cpi->partition_sz[i]; - cx_data_sz -= cpi->partition_sz[i]; + pkt.data.frame.buf = cx_data; + cx_data += cpi->partition_sz[i]; + cx_data_sz -= cpi->partition_sz[i]; #endif - pkt.data.frame.sz = cpi->partition_sz[i]; - pkt.data.frame.partition_id = i; - /* don't set the fragment bit for the last partition */ - if (i == (num_partitions - 1)) - pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT; - vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt); - } -#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING - /* In lagged mode the encoder can buffer multiple frames. - * We don't want this in partitioned output because - * partitions are spread all over the output buffer. - * So, force an exit! - */ - cx_data_sz -= ctx->cx_data_sz / 2; -#endif - } - else - { - pkt.data.frame.buf = cx_data; - pkt.data.frame.sz = size; - pkt.data.frame.partition_id = -1; - vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt); - cx_data += size; - cx_data_sz -= size; - } + pkt.data.frame.sz = cpi->partition_sz[i]; + pkt.data.frame.partition_id = i; + /* don't set the fragment bit for the last partition */ + if (i == (num_partitions - 1)) { + pkt.data.frame.flags &= ~VPX_FRAME_IS_FRAGMENT; } + vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt); + } +#if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING + /* In lagged mode the encoder can buffer multiple frames. + * We don't want this in partitioned output because + * partitions are spread all over the output buffer. + * So, force an exit! + */ + cx_data_sz -= ctx->cx_data_sz / 2; +#endif + } else { + pkt.data.frame.buf = cx_data; + pkt.data.frame.sz = size; + pkt.data.frame.partition_id = -1; + vpx_codec_pkt_list_add(&ctx->pkt_list.head, &pkt); + cx_data += size; + cx_data_sz -= size; } + } } + } - return res; + return res; } - -static const vpx_codec_cx_pkt_t *vp8e_get_cxdata(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter) -{ - return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter); +static const vpx_codec_cx_pkt_t *vp8e_get_cxdata(vpx_codec_alg_priv_t *ctx, + vpx_codec_iter_t *iter) { + return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter); } static vpx_codec_err_t vp8e_set_reference(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + va_list args) { + vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); - if (data) - { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - vp8_set_reference(ctx->cpi, frame->frame_type, &sd); - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + if (data) { + vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + YV12_BUFFER_CONFIG sd; + image2yuvconfig(&frame->img, &sd); + vp8_set_reference(ctx->cpi, frame->frame_type, &sd); + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8e_get_reference(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { + vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + if (data) { + vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + YV12_BUFFER_CONFIG sd; - if (data) - { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - vp8_get_reference(ctx->cpi, frame->frame_type, &sd); - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + image2yuvconfig(&frame->img, &sd); + vp8_get_reference(ctx->cpi, frame->frame_type, &sd); + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8e_set_previewpp(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { #if CONFIG_POSTPROC - vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); + vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); - if (data) - { - ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data); - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + if (data) { + ctx->preview_ppcfg = *((vp8_postproc_cfg_t *)data); + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } #else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; + (void)ctx; + (void)args; + return VPX_CODEC_INCAPABLE; #endif } +static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) { + YV12_BUFFER_CONFIG sd; + vp8_ppflags_t flags; + vp8_zero(flags); -static vpx_image_t *vp8e_get_preview(vpx_codec_alg_priv_t *ctx) -{ + if (ctx->preview_ppcfg.post_proc_flag) { + flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag; + flags.deblocking_level = ctx->preview_ppcfg.deblocking_level; + flags.noise_level = ctx->preview_ppcfg.noise_level; + } - YV12_BUFFER_CONFIG sd; - vp8_ppflags_t flags = {0}; + if (0 == vp8_get_preview_raw_frame(ctx->cpi, &sd, &flags)) { + /* + vpx_img_wrap(&ctx->preview_img, VPX_IMG_FMT_YV12, + sd.y_width + 2*VP8BORDERINPIXELS, + sd.y_height + 2*VP8BORDERINPIXELS, + 1, + sd.buffer_alloc); + vpx_img_set_rect(&ctx->preview_img, + VP8BORDERINPIXELS, VP8BORDERINPIXELS, + sd.y_width, sd.y_height); + */ - if (ctx->preview_ppcfg.post_proc_flag) - { - flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag; - flags.deblocking_level = ctx->preview_ppcfg.deblocking_level; - flags.noise_level = ctx->preview_ppcfg.noise_level; - } + ctx->preview_img.bps = 12; + ctx->preview_img.planes[VPX_PLANE_Y] = sd.y_buffer; + ctx->preview_img.planes[VPX_PLANE_U] = sd.u_buffer; + ctx->preview_img.planes[VPX_PLANE_V] = sd.v_buffer; - if (0 == vp8_get_preview_raw_frame(ctx->cpi, &sd, &flags)) - { + ctx->preview_img.fmt = VPX_IMG_FMT_I420; + ctx->preview_img.x_chroma_shift = 1; + ctx->preview_img.y_chroma_shift = 1; - /* - vpx_img_wrap(&ctx->preview_img, VPX_IMG_FMT_YV12, - sd.y_width + 2*VP8BORDERINPIXELS, - sd.y_height + 2*VP8BORDERINPIXELS, - 1, - sd.buffer_alloc); - vpx_img_set_rect(&ctx->preview_img, - VP8BORDERINPIXELS, VP8BORDERINPIXELS, - sd.y_width, sd.y_height); - */ + ctx->preview_img.d_w = sd.y_width; + ctx->preview_img.d_h = sd.y_height; + ctx->preview_img.stride[VPX_PLANE_Y] = sd.y_stride; + ctx->preview_img.stride[VPX_PLANE_U] = sd.uv_stride; + ctx->preview_img.stride[VPX_PLANE_V] = sd.uv_stride; + ctx->preview_img.w = sd.y_width; + ctx->preview_img.h = sd.y_height; - ctx->preview_img.bps = 12; - ctx->preview_img.planes[VPX_PLANE_Y] = sd.y_buffer; - ctx->preview_img.planes[VPX_PLANE_U] = sd.u_buffer; - ctx->preview_img.planes[VPX_PLANE_V] = sd.v_buffer; - - ctx->preview_img.fmt = VPX_IMG_FMT_I420; - ctx->preview_img.x_chroma_shift = 1; - ctx->preview_img.y_chroma_shift = 1; - - ctx->preview_img.d_w = sd.y_width; - ctx->preview_img.d_h = sd.y_height; - ctx->preview_img.stride[VPX_PLANE_Y] = sd.y_stride; - ctx->preview_img.stride[VPX_PLANE_U] = sd.uv_stride; - ctx->preview_img.stride[VPX_PLANE_V] = sd.uv_stride; - ctx->preview_img.w = sd.y_width; - ctx->preview_img.h = sd.y_height; - - return &ctx->preview_img; - } - else - return NULL; + return &ctx->preview_img; + } else { + return NULL; + } } static vpx_codec_err_t vp8e_set_frame_flags(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - int frame_flags = va_arg(args, int); - ctx->control_frame_flags = frame_flags; - return set_reference_and_update(ctx, frame_flags); + va_list args) { + int frame_flags = va_arg(args, int); + ctx->control_frame_flags = frame_flags; + return set_reference_and_update(ctx, frame_flags); } static vpx_codec_err_t vp8e_set_temporal_layer_id(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - int layer_id = va_arg(args, int); - if (layer_id < 0 || layer_id >= (int)ctx->cfg.ts_number_layers) { - return VPX_CODEC_INVALID_PARAM; - } - ctx->cpi->temporal_layer_id = layer_id; - return VPX_CODEC_OK; + va_list args) { + int layer_id = va_arg(args, int); + if (layer_id < 0 || layer_id >= (int)ctx->cfg.ts_number_layers) { + return VPX_CODEC_INVALID_PARAM; + } + ctx->cpi->temporal_layer_id = layer_id; + return VPX_CODEC_OK; } static vpx_codec_err_t vp8e_set_roi_map(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *); + va_list args) { + vpx_roi_map_t *data = va_arg(args, vpx_roi_map_t *); - if (data) - { - vpx_roi_map_t *roi = (vpx_roi_map_t *)data; + if (data) { + vpx_roi_map_t *roi = (vpx_roi_map_t *)data; - if (!vp8_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols, roi->delta_q, roi->delta_lf, roi->static_threshold)) - return VPX_CODEC_OK; - else - return VPX_CODEC_INVALID_PARAM; + if (!vp8_set_roimap(ctx->cpi, roi->roi_map, roi->rows, roi->cols, + roi->delta_q, roi->delta_lf, roi->static_threshold)) { + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; } - else - return VPX_CODEC_INVALID_PARAM; + } else { + return VPX_CODEC_INVALID_PARAM; + } } - static vpx_codec_err_t vp8e_set_activemap(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - vpx_active_map_t *data = va_arg(args, vpx_active_map_t *); + va_list args) { + vpx_active_map_t *data = va_arg(args, vpx_active_map_t *); - if (data) - { + if (data) { + vpx_active_map_t *map = (vpx_active_map_t *)data; - vpx_active_map_t *map = (vpx_active_map_t *)data; - - if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols)) - return VPX_CODEC_OK; - else - return VPX_CODEC_INVALID_PARAM; + if (!vp8_set_active_map(ctx->cpi, map->active_map, map->rows, map->cols)) { + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; } - else - return VPX_CODEC_INVALID_PARAM; + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8e_set_scalemode(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { + vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *); - vpx_scaling_mode_t *data = va_arg(args, vpx_scaling_mode_t *); + if (data) { + int res; + vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data; + res = vp8_set_internal_size(ctx->cpi, (VPX_SCALING)scalemode.h_scaling_mode, + (VPX_SCALING)scalemode.v_scaling_mode); - if (data) - { - int res; - vpx_scaling_mode_t scalemode = *(vpx_scaling_mode_t *)data ; - res = vp8_set_internal_size(ctx->cpi, - (VPX_SCALING)scalemode.h_scaling_mode, - (VPX_SCALING)scalemode.v_scaling_mode); - - if (!res) - { - /*force next frame a key frame to effect scaling mode */ - ctx->next_frame_flag |= FRAMEFLAGS_KEY; - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + if (!res) { + /*force next frame a key frame to effect scaling mode */ + ctx->next_frame_flag |= FRAMEFLAGS_KEY; + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; } - else - return VPX_CODEC_INVALID_PARAM; + } else { + return VPX_CODEC_INVALID_PARAM; + } } - -static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = -{ - {VP8_SET_REFERENCE, vp8e_set_reference}, - {VP8_COPY_REFERENCE, vp8e_get_reference}, - {VP8_SET_POSTPROC, vp8e_set_previewpp}, - {VP8E_SET_FRAME_FLAGS, vp8e_set_frame_flags}, - {VP8E_SET_TEMPORAL_LAYER_ID, vp8e_set_temporal_layer_id}, - {VP8E_SET_ROI_MAP, vp8e_set_roi_map}, - {VP8E_SET_ACTIVEMAP, vp8e_set_activemap}, - {VP8E_SET_SCALEMODE, vp8e_set_scalemode}, - {VP8E_SET_CPUUSED, set_cpu_used}, - {VP8E_SET_NOISE_SENSITIVITY, set_noise_sensitivity}, - {VP8E_SET_ENABLEAUTOALTREF, set_enable_auto_alt_ref}, - {VP8E_SET_SHARPNESS, set_sharpness}, - {VP8E_SET_STATIC_THRESHOLD, set_static_thresh}, - {VP8E_SET_TOKEN_PARTITIONS, set_token_partitions}, - {VP8E_GET_LAST_QUANTIZER, get_quantizer}, - {VP8E_GET_LAST_QUANTIZER_64, get_quantizer64}, - {VP8E_SET_ARNR_MAXFRAMES, set_arnr_max_frames}, - {VP8E_SET_ARNR_STRENGTH , set_arnr_strength}, - {VP8E_SET_ARNR_TYPE , set_arnr_type}, - {VP8E_SET_TUNING, set_tuning}, - {VP8E_SET_CQ_LEVEL, set_cq_level}, - {VP8E_SET_MAX_INTRA_BITRATE_PCT, set_rc_max_intra_bitrate_pct}, - {VP8E_SET_SCREEN_CONTENT_MODE, set_screen_content_mode}, - { -1, NULL}, +static vpx_codec_ctrl_fn_map_t vp8e_ctf_maps[] = { + { VP8_SET_REFERENCE, vp8e_set_reference }, + { VP8_COPY_REFERENCE, vp8e_get_reference }, + { VP8_SET_POSTPROC, vp8e_set_previewpp }, + { VP8E_SET_FRAME_FLAGS, vp8e_set_frame_flags }, + { VP8E_SET_TEMPORAL_LAYER_ID, vp8e_set_temporal_layer_id }, + { VP8E_SET_ROI_MAP, vp8e_set_roi_map }, + { VP8E_SET_ACTIVEMAP, vp8e_set_activemap }, + { VP8E_SET_SCALEMODE, vp8e_set_scalemode }, + { VP8E_SET_CPUUSED, set_cpu_used }, + { VP8E_SET_NOISE_SENSITIVITY, set_noise_sensitivity }, + { VP8E_SET_ENABLEAUTOALTREF, set_enable_auto_alt_ref }, + { VP8E_SET_SHARPNESS, set_sharpness }, + { VP8E_SET_STATIC_THRESHOLD, set_static_thresh }, + { VP8E_SET_TOKEN_PARTITIONS, set_token_partitions }, + { VP8E_GET_LAST_QUANTIZER, get_quantizer }, + { VP8E_GET_LAST_QUANTIZER_64, get_quantizer64 }, + { VP8E_SET_ARNR_MAXFRAMES, set_arnr_max_frames }, + { VP8E_SET_ARNR_STRENGTH, set_arnr_strength }, + { VP8E_SET_ARNR_TYPE, set_arnr_type }, + { VP8E_SET_TUNING, set_tuning }, + { VP8E_SET_CQ_LEVEL, set_cq_level }, + { VP8E_SET_MAX_INTRA_BITRATE_PCT, set_rc_max_intra_bitrate_pct }, + { VP8E_SET_SCREEN_CONTENT_MODE, set_screen_content_mode }, + { -1, NULL }, }; -static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = -{ +static vpx_codec_enc_cfg_map_t vp8e_usage_cfg_map[] = { + { 0, { - 0, - { - 0, /* g_usage */ - 0, /* g_threads */ - 0, /* g_profile */ + 0, /* g_usage */ + 0, /* g_threads */ + 0, /* g_profile */ - 320, /* g_width */ - 240, /* g_height */ - VPX_BITS_8, /* g_bit_depth */ - 8, /* g_input_bit_depth */ + 320, /* g_width */ + 240, /* g_height */ + VPX_BITS_8, /* g_bit_depth */ + 8, /* g_input_bit_depth */ - {1, 30}, /* g_timebase */ + { 1, 30 }, /* g_timebase */ - 0, /* g_error_resilient */ + 0, /* g_error_resilient */ - VPX_RC_ONE_PASS, /* g_pass */ + VPX_RC_ONE_PASS, /* g_pass */ - 0, /* g_lag_in_frames */ + 0, /* g_lag_in_frames */ - 0, /* rc_dropframe_thresh */ - 0, /* rc_resize_allowed */ - 1, /* rc_scaled_width */ - 1, /* rc_scaled_height */ - 60, /* rc_resize_down_thresold */ - 30, /* rc_resize_up_thresold */ + 0, /* rc_dropframe_thresh */ + 0, /* rc_resize_allowed */ + 1, /* rc_scaled_width */ + 1, /* rc_scaled_height */ + 60, /* rc_resize_down_thresold */ + 30, /* rc_resize_up_thresold */ - VPX_VBR, /* rc_end_usage */ - {0}, /* rc_twopass_stats_in */ - {0}, /* rc_firstpass_mb_stats_in */ - 256, /* rc_target_bandwidth */ - 4, /* rc_min_quantizer */ - 63, /* rc_max_quantizer */ - 100, /* rc_undershoot_pct */ - 100, /* rc_overshoot_pct */ + VPX_VBR, /* rc_end_usage */ + { NULL, 0 }, /* rc_twopass_stats_in */ + { NULL, 0 }, /* rc_firstpass_mb_stats_in */ + 256, /* rc_target_bandwidth */ + 4, /* rc_min_quantizer */ + 63, /* rc_max_quantizer */ + 100, /* rc_undershoot_pct */ + 100, /* rc_overshoot_pct */ - 6000, /* rc_max_buffer_size */ - 4000, /* rc_buffer_initial_size; */ - 5000, /* rc_buffer_optimal_size; */ + 6000, /* rc_max_buffer_size */ + 4000, /* rc_buffer_initial_size; */ + 5000, /* rc_buffer_optimal_size; */ - 50, /* rc_two_pass_vbrbias */ - 0, /* rc_two_pass_vbrmin_section */ - 400, /* rc_two_pass_vbrmax_section */ + 50, /* rc_two_pass_vbrbias */ + 0, /* rc_two_pass_vbrmin_section */ + 400, /* rc_two_pass_vbrmax_section */ /* keyframing settings (kf) */ - VPX_KF_AUTO, /* g_kfmode*/ - 0, /* kf_min_dist */ - 128, /* kf_max_dist */ + VPX_KF_AUTO, /* g_kfmode*/ + 0, /* kf_min_dist */ + 128, /* kf_max_dist */ VPX_SS_DEFAULT_LAYERS, /* ss_number_layers */ - {0}, - {0}, /* ss_target_bitrate */ - 1, /* ts_number_layers */ - {0}, /* ts_target_bitrate */ - {0}, /* ts_rate_decimator */ - 0, /* ts_periodicity */ - {0}, /* ts_layer_id */ - }}, + { 0 }, + { 0 }, /* ss_target_bitrate */ + 1, /* ts_number_layers */ + { 0 }, /* ts_target_bitrate */ + { 0 }, /* ts_rate_decimator */ + 0, /* ts_periodicity */ + { 0 }, /* ts_layer_id */ + { 0 }, /* layer_target_bitrate */ + 0 /* temporal_layering_mode */ + } }, }; - #ifndef VERSION_STRING #define VERSION_STRING #endif -CODEC_INTERFACE(vpx_codec_vp8_cx) = -{ - "WebM Project VP8 Encoder" VERSION_STRING, - VPX_CODEC_INTERNAL_ABI_VERSION, - VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR | - VPX_CODEC_CAP_OUTPUT_PARTITION, - /* vpx_codec_caps_t caps; */ - vp8e_init, /* vpx_codec_init_fn_t init; */ - vp8e_destroy, /* vpx_codec_destroy_fn_t destroy; */ - vp8e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ - { - NULL, /* vpx_codec_peek_si_fn_t peek_si; */ - NULL, /* vpx_codec_get_si_fn_t get_si; */ - NULL, /* vpx_codec_decode_fn_t decode; */ - NULL, /* vpx_codec_frame_get_fn_t frame_get; */ - NULL, /* vpx_codec_set_fb_fn_t set_fb_fn; */ - }, - { - 1, /* 1 cfg map */ - vp8e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t cfg_maps; */ - vp8e_encode, /* vpx_codec_encode_fn_t encode; */ - vp8e_get_cxdata, /* vpx_codec_get_cx_data_fn_t get_cx_data; */ - vp8e_set_config, - NULL, - vp8e_get_preview, - vp8e_mr_alloc_mem, - } /* encoder functions */ +CODEC_INTERFACE(vpx_codec_vp8_cx) = { + "WebM Project VP8 Encoder" VERSION_STRING, + VPX_CODEC_INTERNAL_ABI_VERSION, + VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR | VPX_CODEC_CAP_OUTPUT_PARTITION, + /* vpx_codec_caps_t caps; */ + vp8e_init, /* vpx_codec_init_fn_t init; */ + vp8e_destroy, /* vpx_codec_destroy_fn_t destroy; */ + vp8e_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ + { + NULL, /* vpx_codec_peek_si_fn_t peek_si; */ + NULL, /* vpx_codec_get_si_fn_t get_si; */ + NULL, /* vpx_codec_decode_fn_t decode; */ + NULL, /* vpx_codec_frame_get_fn_t frame_get; */ + NULL, /* vpx_codec_set_fb_fn_t set_fb_fn; */ + }, + { + 1, /* 1 cfg map */ + vp8e_usage_cfg_map, /* vpx_codec_enc_cfg_map_t cfg_maps; */ + vp8e_encode, /* vpx_codec_encode_fn_t encode; */ + vp8e_get_cxdata, /* vpx_codec_get_cx_data_fn_t get_cx_data; */ + vp8e_set_config, NULL, vp8e_get_preview, vp8e_mr_alloc_mem, + } /* encoder functions */ }; diff --git a/libs/libvpx/vp8/vp8_dx_iface.c b/libs/libvpx/vp8/vp8_dx_iface.c index a12a2ad0e1..b1f8340d67 100644 --- a/libs/libvpx/vp8/vp8_dx_iface.c +++ b/libs/libvpx/vp8/vp8_dx_iface.c @@ -8,7 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ - +#include #include #include #include "./vp8_rtcd.h" @@ -30,663 +30,537 @@ #include "decoder/decoderthreading.h" #define VP8_CAP_POSTPROC (CONFIG_POSTPROC ? VPX_CODEC_CAP_POSTPROC : 0) -#define VP8_CAP_ERROR_CONCEALMENT (CONFIG_ERROR_CONCEALMENT ? \ - VPX_CODEC_CAP_ERROR_CONCEALMENT : 0) +#define VP8_CAP_ERROR_CONCEALMENT \ + (CONFIG_ERROR_CONCEALMENT ? VPX_CODEC_CAP_ERROR_CONCEALMENT : 0) -typedef vpx_codec_stream_info_t vp8_stream_info_t; +typedef vpx_codec_stream_info_t vp8_stream_info_t; /* Structures for handling memory allocations */ -typedef enum -{ - VP8_SEG_ALG_PRIV = 256, - VP8_SEG_MAX -} mem_seg_id_t; -#define NELEMENTS(x) ((int)(sizeof(x)/sizeof(x[0]))) +typedef enum { VP8_SEG_ALG_PRIV = 256, VP8_SEG_MAX } mem_seg_id_t; +#define NELEMENTS(x) ((int)(sizeof(x) / sizeof(x[0]))) -struct vpx_codec_alg_priv -{ - vpx_codec_priv_t base; - vpx_codec_dec_cfg_t cfg; - vp8_stream_info_t si; - int decoder_init; - int postproc_cfg_set; - vp8_postproc_cfg_t postproc_cfg; -#if CONFIG_POSTPROC_VISUALIZER - unsigned int dbg_postproc_flag; - int dbg_color_ref_frame_flag; - int dbg_color_mb_modes_flag; - int dbg_color_b_modes_flag; - int dbg_display_mv_flag; -#endif - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; - vpx_image_t img; - int img_setup; - struct frame_buffers yv12_frame_buffers; - void *user_priv; - FRAGMENT_DATA fragments; +struct vpx_codec_alg_priv { + vpx_codec_priv_t base; + vpx_codec_dec_cfg_t cfg; + vp8_stream_info_t si; + int decoder_init; + int postproc_cfg_set; + vp8_postproc_cfg_t postproc_cfg; + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; + vpx_image_t img; + int img_setup; + struct frame_buffers yv12_frame_buffers; + void *user_priv; + FRAGMENT_DATA fragments; }; -static void vp8_init_ctx(vpx_codec_ctx_t *ctx) -{ - vpx_codec_alg_priv_t *priv = - (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv)); +static int vp8_init_ctx(vpx_codec_ctx_t *ctx) { + vpx_codec_alg_priv_t *priv = + (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv)); + if (!priv) return 1; - ctx->priv = (vpx_codec_priv_t *)priv; - ctx->priv->init_flags = ctx->init_flags; + ctx->priv = (vpx_codec_priv_t *)priv; + ctx->priv->init_flags = ctx->init_flags; - priv->si.sz = sizeof(priv->si); - priv->decrypt_cb = NULL; - priv->decrypt_state = NULL; + priv->si.sz = sizeof(priv->si); + priv->decrypt_cb = NULL; + priv->decrypt_state = NULL; - if (ctx->config.dec) - { - /* Update the reference to the config structure to an internal copy. */ - priv->cfg = *ctx->config.dec; - ctx->config.dec = &priv->cfg; - } + if (ctx->config.dec) { + /* Update the reference to the config structure to an internal copy. */ + priv->cfg = *ctx->config.dec; + ctx->config.dec = &priv->cfg; + } + + return 0; } static vpx_codec_err_t vp8_init(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *data) -{ - vpx_codec_err_t res = VPX_CODEC_OK; - vpx_codec_alg_priv_t *priv = NULL; - (void) data; + vpx_codec_priv_enc_mr_cfg_t *data) { + vpx_codec_err_t res = VPX_CODEC_OK; + vpx_codec_alg_priv_t *priv = NULL; + (void)data; - vp8_rtcd(); - vpx_dsp_rtcd(); - vpx_scale_rtcd(); + vp8_rtcd(); + vpx_dsp_rtcd(); + vpx_scale_rtcd(); - /* This function only allocates space for the vpx_codec_alg_priv_t - * structure. More memory may be required at the time the stream - * information becomes known. - */ - if (!ctx->priv) { - vp8_init_ctx(ctx); - priv = (vpx_codec_alg_priv_t *)ctx->priv; + /* This function only allocates space for the vpx_codec_alg_priv_t + * structure. More memory may be required at the time the stream + * information becomes known. + */ + if (!ctx->priv) { + if (vp8_init_ctx(ctx)) return VPX_CODEC_MEM_ERROR; + priv = (vpx_codec_alg_priv_t *)ctx->priv; - /* initialize number of fragments to zero */ - priv->fragments.count = 0; - /* is input fragments enabled? */ - priv->fragments.enabled = - (priv->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS); + /* initialize number of fragments to zero */ + priv->fragments.count = 0; + /* is input fragments enabled? */ + priv->fragments.enabled = + (priv->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS); - /*post processing level initialized to do nothing */ - } else { - priv = (vpx_codec_alg_priv_t *)ctx->priv; - } + /*post processing level initialized to do nothing */ + } else { + priv = (vpx_codec_alg_priv_t *)ctx->priv; + } - priv->yv12_frame_buffers.use_frame_threads = - (ctx->priv->init_flags & VPX_CODEC_USE_FRAME_THREADING); + priv->yv12_frame_buffers.use_frame_threads = + (ctx->priv->init_flags & VPX_CODEC_USE_FRAME_THREADING); - /* for now, disable frame threading */ - priv->yv12_frame_buffers.use_frame_threads = 0; + /* for now, disable frame threading */ + priv->yv12_frame_buffers.use_frame_threads = 0; - if (priv->yv12_frame_buffers.use_frame_threads && - ((ctx->priv->init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT) || - (ctx->priv->init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS))) { - /* row-based threading, error concealment, and input fragments will - * not be supported when using frame-based threading */ - res = VPX_CODEC_INVALID_PARAM; - } + if (priv->yv12_frame_buffers.use_frame_threads && + ((ctx->priv->init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT) || + (ctx->priv->init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS))) { + /* row-based threading, error concealment, and input fragments will + * not be supported when using frame-based threading */ + res = VPX_CODEC_INVALID_PARAM; + } - return res; + return res; } -static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) -{ - vp8_remove_decoder_instances(&ctx->yv12_frame_buffers); +static vpx_codec_err_t vp8_destroy(vpx_codec_alg_priv_t *ctx) { + vp8_remove_decoder_instances(&ctx->yv12_frame_buffers); - vpx_free(ctx); + vpx_free(ctx); - return VPX_CODEC_OK; + return VPX_CODEC_OK; } static vpx_codec_err_t vp8_peek_si_internal(const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si, vpx_decrypt_cb decrypt_cb, - void *decrypt_state) -{ - vpx_codec_err_t res = VPX_CODEC_OK; + void *decrypt_state) { + vpx_codec_err_t res = VPX_CODEC_OK; - if(data + data_sz <= data) - { - res = VPX_CODEC_INVALID_PARAM; + assert(data != NULL); + + if (data + data_sz <= data) { + res = VPX_CODEC_INVALID_PARAM; + } else { + /* Parse uncompresssed part of key frame header. + * 3 bytes:- including version, frame type and an offset + * 3 bytes:- sync code (0x9d, 0x01, 0x2a) + * 4 bytes:- including image width and height in the lowest 14 bits + * of each 2-byte value. + */ + uint8_t clear_buffer[10]; + const uint8_t *clear = data; + if (decrypt_cb) { + int n = VPXMIN(sizeof(clear_buffer), data_sz); + decrypt_cb(decrypt_state, data, clear_buffer, n); + clear = clear_buffer; } - else + si->is_kf = 0; + + if (data_sz >= 10 && !(clear[0] & 0x01)) /* I-Frame */ { - /* Parse uncompresssed part of key frame header. - * 3 bytes:- including version, frame type and an offset - * 3 bytes:- sync code (0x9d, 0x01, 0x2a) - * 4 bytes:- including image width and height in the lowest 14 bits - * of each 2-byte value. - */ - uint8_t clear_buffer[10]; - const uint8_t *clear = data; - if (decrypt_cb) - { - int n = VPXMIN(sizeof(clear_buffer), data_sz); - decrypt_cb(decrypt_state, data, clear_buffer, n); - clear = clear_buffer; - } - si->is_kf = 0; + si->is_kf = 1; - if (data_sz >= 10 && !(clear[0] & 0x01)) /* I-Frame */ - { - si->is_kf = 1; + /* vet via sync code */ + if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a) { + return VPX_CODEC_UNSUP_BITSTREAM; + } - /* vet via sync code */ - if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a) - return VPX_CODEC_UNSUP_BITSTREAM; + si->w = (clear[6] | (clear[7] << 8)) & 0x3fff; + si->h = (clear[8] | (clear[9] << 8)) & 0x3fff; - si->w = (clear[6] | (clear[7] << 8)) & 0x3fff; - si->h = (clear[8] | (clear[9] << 8)) & 0x3fff; - - /*printf("w=%d, h=%d\n", si->w, si->h);*/ - if (!(si->h | si->w)) - res = VPX_CODEC_UNSUP_BITSTREAM; - } - else - { - res = VPX_CODEC_UNSUP_BITSTREAM; - } + /*printf("w=%d, h=%d\n", si->w, si->h);*/ + if (!(si->h && si->w)) res = VPX_CODEC_CORRUPT_FRAME; + } else { + res = VPX_CODEC_UNSUP_BITSTREAM; } + } - return res; + return res; } -static vpx_codec_err_t vp8_peek_si(const uint8_t *data, - unsigned int data_sz, +static vpx_codec_err_t vp8_peek_si(const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si) { - return vp8_peek_si_internal(data, data_sz, si, NULL, NULL); + return vp8_peek_si_internal(data, data_sz, si, NULL, NULL); } -static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx, - vpx_codec_stream_info_t *si) -{ +static vpx_codec_err_t vp8_get_si(vpx_codec_alg_priv_t *ctx, + vpx_codec_stream_info_t *si) { + unsigned int sz; - unsigned int sz; + if (si->sz >= sizeof(vp8_stream_info_t)) { + sz = sizeof(vp8_stream_info_t); + } else { + sz = sizeof(vpx_codec_stream_info_t); + } - if (si->sz >= sizeof(vp8_stream_info_t)) - sz = sizeof(vp8_stream_info_t); - else - sz = sizeof(vpx_codec_stream_info_t); + memcpy(si, &ctx->si, sz); + si->sz = sz; - memcpy(si, &ctx->si, sz); - si->sz = sz; - - return VPX_CODEC_OK; + return VPX_CODEC_OK; } +static vpx_codec_err_t update_error_state( + vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) { + vpx_codec_err_t res; -static vpx_codec_err_t -update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) -{ - vpx_codec_err_t res; + if ((res = error->error_code)) { + ctx->base.err_detail = error->has_detail ? error->detail : NULL; + } - if ((res = error->error_code)) - ctx->base.err_detail = error->has_detail - ? error->detail - : NULL; - - return res; + return res; } -static void yuvconfig2image(vpx_image_t *img, - const YV12_BUFFER_CONFIG *yv12, - void *user_priv) -{ - /** vpx_img_wrap() doesn't allow specifying independent strides for - * the Y, U, and V planes, nor other alignment adjustments that - * might be representable by a YV12_BUFFER_CONFIG, so we just - * initialize all the fields.*/ - img->fmt = VPX_IMG_FMT_I420; - img->w = yv12->y_stride; - img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15; - img->d_w = img->r_w = yv12->y_width; - img->d_h = img->r_h = yv12->y_height; - img->x_chroma_shift = 1; - img->y_chroma_shift = 1; - img->planes[VPX_PLANE_Y] = yv12->y_buffer; - img->planes[VPX_PLANE_U] = yv12->u_buffer; - img->planes[VPX_PLANE_V] = yv12->v_buffer; - img->planes[VPX_PLANE_ALPHA] = NULL; - img->stride[VPX_PLANE_Y] = yv12->y_stride; - img->stride[VPX_PLANE_U] = yv12->uv_stride; - img->stride[VPX_PLANE_V] = yv12->uv_stride; - img->stride[VPX_PLANE_ALPHA] = yv12->y_stride; - img->bit_depth = 8; - img->bps = 12; - img->user_priv = user_priv; - img->img_data = yv12->buffer_alloc; - img->img_data_owner = 0; - img->self_allocd = 0; +static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, + void *user_priv) { + /** vpx_img_wrap() doesn't allow specifying independent strides for + * the Y, U, and V planes, nor other alignment adjustments that + * might be representable by a YV12_BUFFER_CONFIG, so we just + * initialize all the fields.*/ + img->fmt = VPX_IMG_FMT_I420; + img->w = yv12->y_stride; + img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15; + img->d_w = img->r_w = yv12->y_width; + img->d_h = img->r_h = yv12->y_height; + img->x_chroma_shift = 1; + img->y_chroma_shift = 1; + img->planes[VPX_PLANE_Y] = yv12->y_buffer; + img->planes[VPX_PLANE_U] = yv12->u_buffer; + img->planes[VPX_PLANE_V] = yv12->v_buffer; + img->planes[VPX_PLANE_ALPHA] = NULL; + img->stride[VPX_PLANE_Y] = yv12->y_stride; + img->stride[VPX_PLANE_U] = yv12->uv_stride; + img->stride[VPX_PLANE_V] = yv12->uv_stride; + img->stride[VPX_PLANE_ALPHA] = yv12->y_stride; + img->bit_depth = 8; + img->bps = 12; + img->user_priv = user_priv; + img->img_data = yv12->buffer_alloc; + img->img_data_owner = 0; + img->self_allocd = 0; } -static int -update_fragments(vpx_codec_alg_priv_t *ctx, - const uint8_t *data, - unsigned int data_sz, - vpx_codec_err_t *res) -{ - *res = VPX_CODEC_OK; +static int update_fragments(vpx_codec_alg_priv_t *ctx, const uint8_t *data, + unsigned int data_sz, vpx_codec_err_t *res) { + *res = VPX_CODEC_OK; - if (ctx->fragments.count == 0) - { - /* New frame, reset fragment pointers and sizes */ - memset((void*)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs)); - memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes)); + if (ctx->fragments.count == 0) { + /* New frame, reset fragment pointers and sizes */ + memset((void *)ctx->fragments.ptrs, 0, sizeof(ctx->fragments.ptrs)); + memset(ctx->fragments.sizes, 0, sizeof(ctx->fragments.sizes)); + } + if (ctx->fragments.enabled && !(data == NULL && data_sz == 0)) { + /* Store a pointer to this fragment and return. We haven't + * received the complete frame yet, so we will wait with decoding. + */ + ctx->fragments.ptrs[ctx->fragments.count] = data; + ctx->fragments.sizes[ctx->fragments.count] = data_sz; + ctx->fragments.count++; + if (ctx->fragments.count > (1 << EIGHT_PARTITION) + 1) { + ctx->fragments.count = 0; + *res = VPX_CODEC_INVALID_PARAM; + return -1; } - if (ctx->fragments.enabled && !(data == NULL && data_sz == 0)) - { - /* Store a pointer to this fragment and return. We haven't - * received the complete frame yet, so we will wait with decoding. - */ - ctx->fragments.ptrs[ctx->fragments.count] = data; - ctx->fragments.sizes[ctx->fragments.count] = data_sz; - ctx->fragments.count++; - if (ctx->fragments.count > (1 << EIGHT_PARTITION) + 1) - { - ctx->fragments.count = 0; - *res = VPX_CODEC_INVALID_PARAM; - return -1; + return 0; + } + + if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) { + return 0; + } + + if (!ctx->fragments.enabled) { + ctx->fragments.ptrs[0] = data; + ctx->fragments.sizes[0] = data_sz; + ctx->fragments.count = 1; + } + + return 1; +} + +static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx, + const uint8_t *data, unsigned int data_sz, + void *user_priv, long deadline) { + vpx_codec_err_t res = VPX_CODEC_OK; + unsigned int resolution_change = 0; + unsigned int w, h; + + if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) { + return 0; + } + + /* Update the input fragment data */ + if (update_fragments(ctx, data, data_sz, &res) <= 0) return res; + + /* Determine the stream parameters. Note that we rely on peek_si to + * validate that we have a buffer that does not wrap around the top + * of the heap. + */ + w = ctx->si.w; + h = ctx->si.h; + + res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0], + &ctx->si, ctx->decrypt_cb, ctx->decrypt_state); + + if ((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf) { + /* the peek function returns an error for non keyframes, however for + * this case, it is not an error */ + res = VPX_CODEC_OK; + } + + if (!ctx->decoder_init && !ctx->si.is_kf) res = VPX_CODEC_UNSUP_BITSTREAM; + + if ((ctx->si.h != h) || (ctx->si.w != w)) resolution_change = 1; + + /* Initialize the decoder instance on the first frame*/ + if (!res && !ctx->decoder_init) { + VP8D_CONFIG oxcf; + + oxcf.Width = ctx->si.w; + oxcf.Height = ctx->si.h; + oxcf.Version = 9; + oxcf.postprocess = 0; + oxcf.max_threads = ctx->cfg.threads; + oxcf.error_concealment = + (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT); + + /* If postprocessing was enabled by the application and a + * configuration has not been provided, default it. + */ + if (!ctx->postproc_cfg_set && + (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) { + ctx->postproc_cfg.post_proc_flag = + VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE; + ctx->postproc_cfg.deblocking_level = 4; + ctx->postproc_cfg.noise_level = 0; + } + + res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf); + if (res == VPX_CODEC_OK) ctx->decoder_init = 1; + } + + /* Set these even if already initialized. The caller may have changed the + * decrypt config between frames. + */ + if (ctx->decoder_init) { + ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb; + ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state; + } + + if (!res) { + VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0]; + if (resolution_change) { + VP8_COMMON *const pc = &pbi->common; + MACROBLOCKD *const xd = &pbi->mb; +#if CONFIG_MULTITHREAD + int i; +#endif + pc->Width = ctx->si.w; + pc->Height = ctx->si.h; + { + int prev_mb_rows = pc->mb_rows; + + if (setjmp(pbi->common.error.jmp)) { + pbi->common.error.setjmp = 0; + /* on failure clear the cached resolution to ensure a full + * reallocation is attempted on resync. */ + ctx->si.w = 0; + ctx->si.h = 0; + vp8_clear_system_state(); + /* same return value as used in vp8dx_receive_compressed_data */ + return -1; } - return 0; - } - if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) - { - return 0; - } + pbi->common.error.setjmp = 1; - if (!ctx->fragments.enabled) - { - ctx->fragments.ptrs[0] = data; - ctx->fragments.sizes[0] = data_sz; - ctx->fragments.count = 1; - } + if (pc->Width <= 0) { + pc->Width = w; + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Invalid frame width"); + } - return 1; -} + if (pc->Height <= 0) { + pc->Height = h; + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Invalid frame height"); + } -static vpx_codec_err_t vp8_decode(vpx_codec_alg_priv_t *ctx, - const uint8_t *data, - unsigned int data_sz, - void *user_priv, - long deadline) -{ - vpx_codec_err_t res = VPX_CODEC_OK; - unsigned int resolution_change = 0; - unsigned int w, h; + if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height)) { + vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffers"); + } - if (!ctx->fragments.enabled && (data == NULL && data_sz == 0)) - { - return 0; - } - - /* Update the input fragment data */ - if(update_fragments(ctx, data, data_sz, &res) <= 0) - return res; - - /* Determine the stream parameters. Note that we rely on peek_si to - * validate that we have a buffer that does not wrap around the top - * of the heap. - */ - w = ctx->si.w; - h = ctx->si.h; - - res = vp8_peek_si_internal(ctx->fragments.ptrs[0], ctx->fragments.sizes[0], - &ctx->si, ctx->decrypt_cb, ctx->decrypt_state); - - if((res == VPX_CODEC_UNSUP_BITSTREAM) && !ctx->si.is_kf) - { - /* the peek function returns an error for non keyframes, however for - * this case, it is not an error */ - res = VPX_CODEC_OK; - } - - if(!ctx->decoder_init && !ctx->si.is_kf) - res = VPX_CODEC_UNSUP_BITSTREAM; - - if ((ctx->si.h != h) || (ctx->si.w != w)) - resolution_change = 1; - - /* Initialize the decoder instance on the first frame*/ - if (!res && !ctx->decoder_init) - { - VP8D_CONFIG oxcf; - - oxcf.Width = ctx->si.w; - oxcf.Height = ctx->si.h; - oxcf.Version = 9; - oxcf.postprocess = 0; - oxcf.max_threads = ctx->cfg.threads; - oxcf.error_concealment = - (ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT); - - /* If postprocessing was enabled by the application and a - * configuration has not been provided, default it. - */ - if (!ctx->postproc_cfg_set - && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) { - ctx->postproc_cfg.post_proc_flag = - VP8_DEBLOCK | VP8_DEMACROBLOCK | VP8_MFQE; - ctx->postproc_cfg.deblocking_level = 4; - ctx->postproc_cfg.noise_level = 0; - } - - res = vp8_create_decoder_instances(&ctx->yv12_frame_buffers, &oxcf); - ctx->decoder_init = 1; - } - - /* Set these even if already initialized. The caller may have changed the - * decrypt config between frames. - */ - if (ctx->decoder_init) { - ctx->yv12_frame_buffers.pbi[0]->decrypt_cb = ctx->decrypt_cb; - ctx->yv12_frame_buffers.pbi[0]->decrypt_state = ctx->decrypt_state; - } - - if (!res) - { - VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0]; - if (resolution_change) - { - VP8_COMMON *const pc = & pbi->common; - MACROBLOCKD *const xd = & pbi->mb; -#if CONFIG_MULTITHREAD - int i; -#endif - pc->Width = ctx->si.w; - pc->Height = ctx->si.h; - { - int prev_mb_rows = pc->mb_rows; - - if (setjmp(pbi->common.error.jmp)) - { - pbi->common.error.setjmp = 0; - vp8_clear_system_state(); - /* same return value as used in vp8dx_receive_compressed_data */ - return -1; - } - - pbi->common.error.setjmp = 1; - - if (pc->Width <= 0) - { - pc->Width = w; - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Invalid frame width"); - } - - if (pc->Height <= 0) - { - pc->Height = h; - vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, - "Invalid frame height"); - } - - if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height)) - vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate frame buffers"); - - xd->pre = pc->yv12_fb[pc->lst_fb_idx]; - xd->dst = pc->yv12_fb[pc->new_fb_idx]; + xd->pre = pc->yv12_fb[pc->lst_fb_idx]; + xd->dst = pc->yv12_fb[pc->new_fb_idx]; #if CONFIG_MULTITHREAD - for (i = 0; i < pbi->allocated_decoding_thread_count; i++) - { - pbi->mb_row_di[i].mbd.dst = pc->yv12_fb[pc->new_fb_idx]; - vp8_build_block_doffsets(&pbi->mb_row_di[i].mbd); - } + for (i = 0; i < pbi->allocated_decoding_thread_count; ++i) { + pbi->mb_row_di[i].mbd.dst = pc->yv12_fb[pc->new_fb_idx]; + vp8_build_block_doffsets(&pbi->mb_row_di[i].mbd); + } #endif - vp8_build_block_doffsets(&pbi->mb); + vp8_build_block_doffsets(&pbi->mb); - /* allocate memory for last frame MODE_INFO array */ +/* allocate memory for last frame MODE_INFO array */ #if CONFIG_ERROR_CONCEALMENT - if (pbi->ec_enabled) - { - /* old prev_mip was released by vp8_de_alloc_frame_buffers() - * called in vp8_alloc_frame_buffers() */ - pc->prev_mip = vpx_calloc( - (pc->mb_cols + 1) * (pc->mb_rows + 1), - sizeof(MODE_INFO)); + if (pbi->ec_enabled) { + /* old prev_mip was released by vp8_de_alloc_frame_buffers() + * called in vp8_alloc_frame_buffers() */ + pc->prev_mip = vpx_calloc((pc->mb_cols + 1) * (pc->mb_rows + 1), + sizeof(MODE_INFO)); - if (!pc->prev_mip) - { - vp8_de_alloc_frame_buffers(pc); - vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate" - "last frame MODE_INFO array"); - } + if (!pc->prev_mip) { + vp8_de_alloc_frame_buffers(pc); + vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate" + "last frame MODE_INFO array"); + } - pc->prev_mi = pc->prev_mip + pc->mode_info_stride + 1; + pc->prev_mi = pc->prev_mip + pc->mode_info_stride + 1; - if (vp8_alloc_overlap_lists(pbi)) - vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, - "Failed to allocate overlap lists " - "for error concealment"); - } + if (vp8_alloc_overlap_lists(pbi)) + vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate overlap lists " + "for error concealment"); + } #endif #if CONFIG_MULTITHREAD - if (pbi->b_multithreaded_rd) - vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows); + if (pbi->b_multithreaded_rd) { + vp8mt_alloc_temp_buffers(pbi, pc->Width, prev_mb_rows); + } #else - (void)prev_mb_rows; + (void)prev_mb_rows; #endif - } + } - pbi->common.error.setjmp = 0; + pbi->common.error.setjmp = 0; - /* required to get past the first get_free_fb() call */ - pbi->common.fb_idx_ref_cnt[0] = 0; - } - - /* update the pbi fragment data */ - pbi->fragments = ctx->fragments; - - ctx->user_priv = user_priv; - if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline)) - { - res = update_error_state(ctx, &pbi->common.error); - } - - /* get ready for the next series of fragments */ - ctx->fragments.count = 0; + /* required to get past the first get_free_fb() call */ + pbi->common.fb_idx_ref_cnt[0] = 0; } - return res; -} + /* update the pbi fragment data */ + pbi->fragments = ctx->fragments; -static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter) -{ - vpx_image_t *img = NULL; - - /* iter acts as a flip flop, so an image is only returned on the first - * call to get_frame. - */ - if (!(*iter) && ctx->yv12_frame_buffers.pbi[0]) - { - YV12_BUFFER_CONFIG sd; - int64_t time_stamp = 0, time_end_stamp = 0; - vp8_ppflags_t flags = {0}; - - if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) - { - flags.post_proc_flag= ctx->postproc_cfg.post_proc_flag -#if CONFIG_POSTPROC_VISUALIZER - - | ((ctx->dbg_color_ref_frame_flag != 0) ? VP8D_DEBUG_CLR_FRM_REF_BLKS : 0) - | ((ctx->dbg_color_mb_modes_flag != 0) ? VP8D_DEBUG_CLR_BLK_MODES : 0) - | ((ctx->dbg_color_b_modes_flag != 0) ? VP8D_DEBUG_CLR_BLK_MODES : 0) - | ((ctx->dbg_display_mv_flag != 0) ? VP8D_DEBUG_DRAW_MV : 0) -#endif - ; - flags.deblocking_level = ctx->postproc_cfg.deblocking_level; - flags.noise_level = ctx->postproc_cfg.noise_level; -#if CONFIG_POSTPROC_VISUALIZER - flags.display_ref_frame_flag= ctx->dbg_color_ref_frame_flag; - flags.display_mb_modes_flag = ctx->dbg_color_mb_modes_flag; - flags.display_b_modes_flag = ctx->dbg_color_b_modes_flag; - flags.display_mv_flag = ctx->dbg_display_mv_flag; -#endif - } - - if (0 == vp8dx_get_raw_frame(ctx->yv12_frame_buffers.pbi[0], &sd, - &time_stamp, &time_end_stamp, &flags)) - { - yuvconfig2image(&ctx->img, &sd, ctx->user_priv); - - img = &ctx->img; - *iter = img; - } + ctx->user_priv = user_priv; + if (vp8dx_receive_compressed_data(pbi, data_sz, data, deadline)) { + res = update_error_state(ctx, &pbi->common.error); } - return img; + /* get ready for the next series of fragments */ + ctx->fragments.count = 0; + } + + return res; } -static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, - YV12_BUFFER_CONFIG *yv12) -{ - const int y_w = img->d_w; - const int y_h = img->d_h; - const int uv_w = (img->d_w + 1) / 2; - const int uv_h = (img->d_h + 1) / 2; - vpx_codec_err_t res = VPX_CODEC_OK; - yv12->y_buffer = img->planes[VPX_PLANE_Y]; - yv12->u_buffer = img->planes[VPX_PLANE_U]; - yv12->v_buffer = img->planes[VPX_PLANE_V]; +static vpx_image_t *vp8_get_frame(vpx_codec_alg_priv_t *ctx, + vpx_codec_iter_t *iter) { + vpx_image_t *img = NULL; - yv12->y_crop_width = y_w; - yv12->y_crop_height = y_h; - yv12->y_width = y_w; - yv12->y_height = y_h; - yv12->uv_crop_width = uv_w; - yv12->uv_crop_height = uv_h; - yv12->uv_width = uv_w; - yv12->uv_height = uv_h; + /* iter acts as a flip flop, so an image is only returned on the first + * call to get_frame. + */ + if (!(*iter) && ctx->yv12_frame_buffers.pbi[0]) { + YV12_BUFFER_CONFIG sd; + int64_t time_stamp = 0, time_end_stamp = 0; + vp8_ppflags_t flags; + vp8_zero(flags); - yv12->y_stride = img->stride[VPX_PLANE_Y]; - yv12->uv_stride = img->stride[VPX_PLANE_U]; + if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC) { + flags.post_proc_flag = ctx->postproc_cfg.post_proc_flag; + flags.deblocking_level = ctx->postproc_cfg.deblocking_level; + flags.noise_level = ctx->postproc_cfg.noise_level; + } - yv12->border = (img->stride[VPX_PLANE_Y] - img->d_w) / 2; - return res; + if (0 == vp8dx_get_raw_frame(ctx->yv12_frame_buffers.pbi[0], &sd, + &time_stamp, &time_end_stamp, &flags)) { + yuvconfig2image(&ctx->img, &sd, ctx->user_priv); + + img = &ctx->img; + *iter = img; + } + } + + return img; } +static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, + YV12_BUFFER_CONFIG *yv12) { + const int y_w = img->d_w; + const int y_h = img->d_h; + const int uv_w = (img->d_w + 1) / 2; + const int uv_h = (img->d_h + 1) / 2; + vpx_codec_err_t res = VPX_CODEC_OK; + yv12->y_buffer = img->planes[VPX_PLANE_Y]; + yv12->u_buffer = img->planes[VPX_PLANE_U]; + yv12->v_buffer = img->planes[VPX_PLANE_V]; + + yv12->y_crop_width = y_w; + yv12->y_crop_height = y_h; + yv12->y_width = y_w; + yv12->y_height = y_h; + yv12->uv_crop_width = uv_w; + yv12->uv_crop_height = uv_h; + yv12->uv_width = uv_w; + yv12->uv_height = uv_h; + + yv12->y_stride = img->stride[VPX_PLANE_Y]; + yv12->uv_stride = img->stride[VPX_PLANE_U]; + + yv12->border = (img->stride[VPX_PLANE_Y] - img->d_w) / 2; + return res; +} static vpx_codec_err_t vp8_set_reference(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { + vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + if (data && !ctx->yv12_frame_buffers.use_frame_threads) { + vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + YV12_BUFFER_CONFIG sd; - if (data && !ctx->yv12_frame_buffers.use_frame_threads) - { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - - return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0], - frame->frame_type, &sd); - } - else - return VPX_CODEC_INVALID_PARAM; + image2yuvconfig(&frame->img, &sd); + return vp8dx_set_reference(ctx->yv12_frame_buffers.pbi[0], + frame->frame_type, &sd); + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8_get_reference(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { + vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); - vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *); + if (data && !ctx->yv12_frame_buffers.use_frame_threads) { + vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; + YV12_BUFFER_CONFIG sd; - if (data && !ctx->yv12_frame_buffers.use_frame_threads) - { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; - YV12_BUFFER_CONFIG sd; - - image2yuvconfig(&frame->img, &sd); - - return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0], - frame->frame_type, &sd); - } - else - return VPX_CODEC_INVALID_PARAM; + image2yuvconfig(&frame->img, &sd); + return vp8dx_get_reference(ctx->yv12_frame_buffers.pbi[0], + frame->frame_type, &sd); + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8_set_postproc(vpx_codec_alg_priv_t *ctx, - va_list args) -{ + va_list args) { #if CONFIG_POSTPROC - vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); + vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *); - if (data) - { - ctx->postproc_cfg_set = 1; - ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data); - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + if (data) { + ctx->postproc_cfg_set = 1; + ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data); + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - - -static vpx_codec_err_t vp8_set_dbg_color_ref_frame(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC - ctx->dbg_color_ref_frame_flag = va_arg(args, int); - return VPX_CODEC_OK; -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - -static vpx_codec_err_t vp8_set_dbg_color_mb_modes(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC - ctx->dbg_color_mb_modes_flag = va_arg(args, int); - return VPX_CODEC_OK; -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - -static vpx_codec_err_t vp8_set_dbg_color_b_modes(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC - ctx->dbg_color_b_modes_flag = va_arg(args, int); - return VPX_CODEC_OK; -#else - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -#endif -} - -static vpx_codec_err_t vp8_set_dbg_display_mv(vpx_codec_alg_priv_t *ctx, - va_list args) { -#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC - ctx->dbg_display_mv_flag = va_arg(args, int); - return VPX_CODEC_OK; #else (void)ctx; (void)args; @@ -695,126 +569,108 @@ static vpx_codec_err_t vp8_set_dbg_display_mv(vpx_codec_alg_priv_t *ctx, } static vpx_codec_err_t vp8_get_last_ref_updates(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - int *update_info = va_arg(args, int *); + va_list args) { + int *update_info = va_arg(args, int *); - if (update_info && !ctx->yv12_frame_buffers.use_frame_threads) - { - VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; + if (update_info && !ctx->yv12_frame_buffers.use_frame_threads) { + VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; - *update_info = pbi->common.refresh_alt_ref_frame * (int) VP8_ALTR_FRAME - + pbi->common.refresh_golden_frame * (int) VP8_GOLD_FRAME - + pbi->common.refresh_last_frame * (int) VP8_LAST_FRAME; + *update_info = pbi->common.refresh_alt_ref_frame * (int)VP8_ALTR_FRAME + + pbi->common.refresh_golden_frame * (int)VP8_GOLD_FRAME + + pbi->common.refresh_last_frame * (int)VP8_LAST_FRAME; - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } } -extern int vp8dx_references_buffer( VP8_COMMON *oci, int ref_frame ); +extern int vp8dx_references_buffer(VP8_COMMON *oci, int ref_frame); static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - int *ref_info = va_arg(args, int *); + va_list args) { + int *ref_info = va_arg(args, int *); - if (ref_info && !ctx->yv12_frame_buffers.use_frame_threads) - { - VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; - VP8_COMMON *oci = &pbi->common; - *ref_info = - (vp8dx_references_buffer( oci, ALTREF_FRAME )?VP8_ALTR_FRAME:0) | - (vp8dx_references_buffer( oci, GOLDEN_FRAME )?VP8_GOLD_FRAME:0) | - (vp8dx_references_buffer( oci, LAST_FRAME )?VP8_LAST_FRAME:0); + if (ref_info && !ctx->yv12_frame_buffers.use_frame_threads) { + VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; + VP8_COMMON *oci = &pbi->common; + *ref_info = + (vp8dx_references_buffer(oci, ALTREF_FRAME) ? VP8_ALTR_FRAME : 0) | + (vp8dx_references_buffer(oci, GOLDEN_FRAME) ? VP8_GOLD_FRAME : 0) | + (vp8dx_references_buffer(oci, LAST_FRAME) ? VP8_LAST_FRAME : 0); - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8_get_frame_corrupted(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - - int *corrupted = va_arg(args, int *); - VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; - - if (corrupted && pbi) - { - const YV12_BUFFER_CONFIG *const frame = pbi->common.frame_to_show; - if (frame == NULL) return VPX_CODEC_ERROR; - *corrupted = frame->corrupted; - return VPX_CODEC_OK; - } - else - return VPX_CODEC_INVALID_PARAM; + va_list args) { + int *corrupted = va_arg(args, int *); + VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0]; + if (corrupted && pbi) { + const YV12_BUFFER_CONFIG *const frame = pbi->common.frame_to_show; + if (frame == NULL) return VPX_CODEC_ERROR; + *corrupted = frame->corrupted; + return VPX_CODEC_OK; + } else { + return VPX_CODEC_INVALID_PARAM; + } } static vpx_codec_err_t vp8_set_decryptor(vpx_codec_alg_priv_t *ctx, - va_list args) -{ - vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *); + va_list args) { + vpx_decrypt_init *init = va_arg(args, vpx_decrypt_init *); - if (init) - { - ctx->decrypt_cb = init->decrypt_cb; - ctx->decrypt_state = init->decrypt_state; - } - else - { - ctx->decrypt_cb = NULL; - ctx->decrypt_state = NULL; - } - return VPX_CODEC_OK; + if (init) { + ctx->decrypt_cb = init->decrypt_cb; + ctx->decrypt_state = init->decrypt_state; + } else { + ctx->decrypt_cb = NULL; + ctx->decrypt_state = NULL; + } + return VPX_CODEC_OK; } -vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = -{ - {VP8_SET_REFERENCE, vp8_set_reference}, - {VP8_COPY_REFERENCE, vp8_get_reference}, - {VP8_SET_POSTPROC, vp8_set_postproc}, - {VP8_SET_DBG_COLOR_REF_FRAME, vp8_set_dbg_color_ref_frame}, - {VP8_SET_DBG_COLOR_MB_MODES, vp8_set_dbg_color_mb_modes}, - {VP8_SET_DBG_COLOR_B_MODES, vp8_set_dbg_color_b_modes}, - {VP8_SET_DBG_DISPLAY_MV, vp8_set_dbg_display_mv}, - {VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates}, - {VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted}, - {VP8D_GET_LAST_REF_USED, vp8_get_last_ref_frame}, - {VPXD_SET_DECRYPTOR, vp8_set_decryptor}, - { -1, NULL}, +vpx_codec_ctrl_fn_map_t vp8_ctf_maps[] = { + { VP8_SET_REFERENCE, vp8_set_reference }, + { VP8_COPY_REFERENCE, vp8_get_reference }, + { VP8_SET_POSTPROC, vp8_set_postproc }, + { VP8D_GET_LAST_REF_UPDATES, vp8_get_last_ref_updates }, + { VP8D_GET_FRAME_CORRUPTED, vp8_get_frame_corrupted }, + { VP8D_GET_LAST_REF_USED, vp8_get_last_ref_frame }, + { VPXD_SET_DECRYPTOR, vp8_set_decryptor }, + { -1, NULL }, }; - #ifndef VERSION_STRING #define VERSION_STRING #endif -CODEC_INTERFACE(vpx_codec_vp8_dx) = -{ - "WebM Project VP8 Decoder" VERSION_STRING, - VPX_CODEC_INTERNAL_ABI_VERSION, - VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT | - VPX_CODEC_CAP_INPUT_FRAGMENTS, - /* vpx_codec_caps_t caps; */ - vp8_init, /* vpx_codec_init_fn_t init; */ - vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */ - vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ - { - vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */ - vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */ - vp8_decode, /* vpx_codec_decode_fn_t decode; */ - vp8_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */ - NULL, - }, - { /* encoder functions */ - 0, - NULL, - NULL, - NULL, - NULL, - NULL, - NULL - } +CODEC_INTERFACE(vpx_codec_vp8_dx) = { + "WebM Project VP8 Decoder" VERSION_STRING, + VPX_CODEC_INTERNAL_ABI_VERSION, + VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT | + VPX_CODEC_CAP_INPUT_FRAGMENTS, + /* vpx_codec_caps_t caps; */ + vp8_init, /* vpx_codec_init_fn_t init; */ + vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */ + vp8_ctf_maps, /* vpx_codec_ctrl_fn_map_t *ctrl_maps; */ + { + vp8_peek_si, /* vpx_codec_peek_si_fn_t peek_si; */ + vp8_get_si, /* vpx_codec_get_si_fn_t get_si; */ + vp8_decode, /* vpx_codec_decode_fn_t decode; */ + vp8_get_frame, /* vpx_codec_frame_get_fn_t frame_get; */ + NULL, + }, + { + /* encoder functions */ + 0, NULL, /* vpx_codec_enc_cfg_map_t */ + NULL, /* vpx_codec_encode_fn_t */ + NULL, /* vpx_codec_get_cx_data_fn_t */ + NULL, /* vpx_codec_enc_config_set_fn_t */ + NULL, /* vpx_codec_get_global_headers_fn_t */ + NULL, /* vpx_codec_get_preview_frame_fn_t */ + NULL /* vpx_codec_enc_mr_get_mem_loc_fn_t */ + } }; diff --git a/libs/libvpx/vp8/vp8cx.mk b/libs/libvpx/vp8/vp8cx.mk index 857a631bff..4e3d31e3b7 100644 --- a/libs/libvpx/vp8/vp8cx.mk +++ b/libs/libvpx/vp8/vp8cx.mk @@ -16,10 +16,6 @@ VP8_CX_SRCS-no += $(VP8_COMMON_SRCS-no) VP8_CX_SRCS_REMOVE-yes += $(VP8_COMMON_SRCS_REMOVE-yes) VP8_CX_SRCS_REMOVE-no += $(VP8_COMMON_SRCS_REMOVE-no) -ifeq ($(ARCH_ARM),yes) - include $(SRC_PATH_BARE)/$(VP8_PREFIX)vp8cx_arm.mk -endif - VP8_CX_SRCS-yes += vp8cx.mk VP8_CX_SRCS-yes += vp8_cx_iface.c @@ -101,6 +97,11 @@ ifeq ($(CONFIG_REALTIME_ONLY),yes) VP8_CX_SRCS_REMOVE-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm endif +VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/denoising_neon.c +VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/fastquantizeb_neon.c +VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/shortfdct_neon.c +VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_shortwalsh4x4_neon.c + VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/dct_msa.c VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/encodeopt_msa.c VP8_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/quantize_msa.c diff --git a/libs/libvpx/vp8/vp8cx_arm.mk b/libs/libvpx/vp8/vp8cx_arm.mk deleted file mode 100644 index 838b53d84c..0000000000 --- a/libs/libvpx/vp8/vp8cx_arm.mk +++ /dev/null @@ -1,28 +0,0 @@ -## -## Copyright (c) 2010 The WebM project authors. All Rights Reserved. -## -## Use of this source code is governed by a BSD-style license -## that can be found in the LICENSE file in the root of the source -## tree. An additional intellectual property rights grant can be found -## in the file PATENTS. All contributing project authors may -## be found in the AUTHORS file in the root of the source tree. -## - - -VP8_CX_SRCS-$(ARCH_ARM) += vp8cx_arm.mk - -#File list for arm -# encoder -VP8_CX_SRCS-$(ARCH_ARM) += encoder/arm/dct_arm.c - -#File list for media -# encoder -VP8_CX_SRCS-$(HAVE_MEDIA) += encoder/arm/armv6/vp8_short_fdct4x4_armv6$(ASM) -VP8_CX_SRCS-$(HAVE_MEDIA) += encoder/arm/armv6/walsh_v6$(ASM) - -#File list for neon -# encoder -VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/denoising_neon.c -VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/fastquantizeb_neon.c -VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/shortfdct_neon.c -VP8_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp8_shortwalsh4x4_neon.c diff --git a/libs/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.c b/libs/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.c index 1761fada2f..eca655e728 100644 --- a/libs/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.c +++ b/libs/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.c @@ -23,226 +23,211 @@ static int16_t cospi_8_64 = 0x3b21; static int16_t cospi_16_64 = 0x2d41; static int16_t cospi_24_64 = 0x187e; -static INLINE void TRANSPOSE4X4( - int16x8_t *q8s16, - int16x8_t *q9s16) { - int32x4_t q8s32, q9s32; - int16x4x2_t d0x2s16, d1x2s16; - int32x4x2_t q0x2s32; +static INLINE void TRANSPOSE4X4(int16x8_t *q8s16, int16x8_t *q9s16) { + int32x4_t q8s32, q9s32; + int16x4x2_t d0x2s16, d1x2s16; + int32x4x2_t q0x2s32; - d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16)); - d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16)); + d0x2s16 = vtrn_s16(vget_low_s16(*q8s16), vget_high_s16(*q8s16)); + d1x2s16 = vtrn_s16(vget_low_s16(*q9s16), vget_high_s16(*q9s16)); - q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1])); - q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1])); - q0x2s32 = vtrnq_s32(q8s32, q9s32); + q8s32 = vreinterpretq_s32_s16(vcombine_s16(d0x2s16.val[0], d0x2s16.val[1])); + q9s32 = vreinterpretq_s32_s16(vcombine_s16(d1x2s16.val[0], d1x2s16.val[1])); + q0x2s32 = vtrnq_s32(q8s32, q9s32); - *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]); - *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]); - return; + *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]); + *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]); + return; } -static INLINE void GENERATE_COSINE_CONSTANTS( - int16x4_t *d0s16, - int16x4_t *d1s16, - int16x4_t *d2s16) { - *d0s16 = vdup_n_s16(cospi_8_64); - *d1s16 = vdup_n_s16(cospi_16_64); - *d2s16 = vdup_n_s16(cospi_24_64); - return; +static INLINE void GENERATE_COSINE_CONSTANTS(int16x4_t *d0s16, int16x4_t *d1s16, + int16x4_t *d2s16) { + *d0s16 = vdup_n_s16(cospi_8_64); + *d1s16 = vdup_n_s16(cospi_16_64); + *d2s16 = vdup_n_s16(cospi_24_64); + return; } -static INLINE void GENERATE_SINE_CONSTANTS( - int16x4_t *d3s16, - int16x4_t *d4s16, - int16x4_t *d5s16, - int16x8_t *q3s16) { - *d3s16 = vdup_n_s16(sinpi_1_9); - *d4s16 = vdup_n_s16(sinpi_2_9); - *q3s16 = vdupq_n_s16(sinpi_3_9); - *d5s16 = vdup_n_s16(sinpi_4_9); - return; +static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16, + int16x4_t *d5s16, int16x8_t *q3s16) { + *d3s16 = vdup_n_s16(sinpi_1_9); + *d4s16 = vdup_n_s16(sinpi_2_9); + *q3s16 = vdupq_n_s16(sinpi_3_9); + *d5s16 = vdup_n_s16(sinpi_4_9); + return; } -static INLINE void IDCT4x4_1D( - int16x4_t *d0s16, - int16x4_t *d1s16, - int16x4_t *d2s16, - int16x8_t *q8s16, - int16x8_t *q9s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16; - int16x4_t d26s16, d27s16, d28s16, d29s16; - int32x4_t q10s32, q13s32, q14s32, q15s32; - int16x8_t q13s16, q14s16; +static INLINE void IDCT4x4_1D(int16x4_t *d0s16, int16x4_t *d1s16, + int16x4_t *d2s16, int16x8_t *q8s16, + int16x8_t *q9s16) { + int16x4_t d16s16, d17s16, d18s16, d19s16, d23s16, d24s16; + int16x4_t d26s16, d27s16, d28s16, d29s16; + int32x4_t q10s32, q13s32, q14s32, q15s32; + int16x8_t q13s16, q14s16; - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); + d16s16 = vget_low_s16(*q8s16); + d17s16 = vget_high_s16(*q8s16); + d18s16 = vget_low_s16(*q9s16); + d19s16 = vget_high_s16(*q9s16); - d23s16 = vadd_s16(d16s16, d18s16); - d24s16 = vsub_s16(d16s16, d18s16); + d23s16 = vadd_s16(d16s16, d18s16); + d24s16 = vsub_s16(d16s16, d18s16); - q15s32 = vmull_s16(d17s16, *d2s16); - q10s32 = vmull_s16(d17s16, *d0s16); - q13s32 = vmull_s16(d23s16, *d1s16); - q14s32 = vmull_s16(d24s16, *d1s16); - q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16); - q10s32 = vmlal_s16(q10s32, d19s16, *d2s16); + q15s32 = vmull_s16(d17s16, *d2s16); + q10s32 = vmull_s16(d17s16, *d0s16); + q13s32 = vmull_s16(d23s16, *d1s16); + q14s32 = vmull_s16(d24s16, *d1s16); + q15s32 = vmlsl_s16(q15s32, d19s16, *d0s16); + q10s32 = vmlal_s16(q10s32, d19s16, *d2s16); - d26s16 = vqrshrn_n_s32(q13s32, 14); - d27s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d28s16 = vqrshrn_n_s32(q10s32, 14); + d26s16 = vqrshrn_n_s32(q13s32, 14); + d27s16 = vqrshrn_n_s32(q14s32, 14); + d29s16 = vqrshrn_n_s32(q15s32, 14); + d28s16 = vqrshrn_n_s32(q10s32, 14); - q13s16 = vcombine_s16(d26s16, d27s16); - q14s16 = vcombine_s16(d28s16, d29s16); - *q8s16 = vaddq_s16(q13s16, q14s16); - *q9s16 = vsubq_s16(q13s16, q14s16); - *q9s16 = vcombine_s16(vget_high_s16(*q9s16), - vget_low_s16(*q9s16)); // vswp - return; + q13s16 = vcombine_s16(d26s16, d27s16); + q14s16 = vcombine_s16(d28s16, d29s16); + *q8s16 = vaddq_s16(q13s16, q14s16); + *q9s16 = vsubq_s16(q13s16, q14s16); + *q9s16 = vcombine_s16(vget_high_s16(*q9s16), vget_low_s16(*q9s16)); // vswp + return; } -static INLINE void IADST4x4_1D( - int16x4_t *d3s16, - int16x4_t *d4s16, - int16x4_t *d5s16, - int16x8_t *q3s16, - int16x8_t *q8s16, - int16x8_t *q9s16) { - int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16; - int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; +static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16, + int16x4_t *d5s16, int16x8_t *q3s16, + int16x8_t *q8s16, int16x8_t *q9s16) { + int16x4_t d6s16, d16s16, d17s16, d18s16, d19s16; + int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; - d6s16 = vget_low_s16(*q3s16); + d6s16 = vget_low_s16(*q3s16); - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); + d16s16 = vget_low_s16(*q8s16); + d17s16 = vget_high_s16(*q8s16); + d18s16 = vget_low_s16(*q9s16); + d19s16 = vget_high_s16(*q9s16); - q10s32 = vmull_s16(*d3s16, d16s16); - q11s32 = vmull_s16(*d4s16, d16s16); - q12s32 = vmull_s16(d6s16, d17s16); - q13s32 = vmull_s16(*d5s16, d18s16); - q14s32 = vmull_s16(*d3s16, d18s16); - q15s32 = vmovl_s16(d16s16); - q15s32 = vaddw_s16(q15s32, d19s16); - q8s32 = vmull_s16(*d4s16, d19s16); - q15s32 = vsubw_s16(q15s32, d18s16); - q9s32 = vmull_s16(*d5s16, d19s16); + q10s32 = vmull_s16(*d3s16, d16s16); + q11s32 = vmull_s16(*d4s16, d16s16); + q12s32 = vmull_s16(d6s16, d17s16); + q13s32 = vmull_s16(*d5s16, d18s16); + q14s32 = vmull_s16(*d3s16, d18s16); + q15s32 = vmovl_s16(d16s16); + q15s32 = vaddw_s16(q15s32, d19s16); + q8s32 = vmull_s16(*d4s16, d19s16); + q15s32 = vsubw_s16(q15s32, d18s16); + q9s32 = vmull_s16(*d5s16, d19s16); - q10s32 = vaddq_s32(q10s32, q13s32); - q10s32 = vaddq_s32(q10s32, q8s32); - q11s32 = vsubq_s32(q11s32, q14s32); - q8s32 = vdupq_n_s32(sinpi_3_9); - q11s32 = vsubq_s32(q11s32, q9s32); - q15s32 = vmulq_s32(q15s32, q8s32); + q10s32 = vaddq_s32(q10s32, q13s32); + q10s32 = vaddq_s32(q10s32, q8s32); + q11s32 = vsubq_s32(q11s32, q14s32); + q8s32 = vdupq_n_s32(sinpi_3_9); + q11s32 = vsubq_s32(q11s32, q9s32); + q15s32 = vmulq_s32(q15s32, q8s32); - q13s32 = vaddq_s32(q10s32, q12s32); - q10s32 = vaddq_s32(q10s32, q11s32); - q14s32 = vaddq_s32(q11s32, q12s32); - q10s32 = vsubq_s32(q10s32, q12s32); + q13s32 = vaddq_s32(q10s32, q12s32); + q10s32 = vaddq_s32(q10s32, q11s32); + q14s32 = vaddq_s32(q11s32, q12s32); + q10s32 = vsubq_s32(q10s32, q12s32); - d16s16 = vqrshrn_n_s32(q13s32, 14); - d17s16 = vqrshrn_n_s32(q14s32, 14); - d18s16 = vqrshrn_n_s32(q15s32, 14); - d19s16 = vqrshrn_n_s32(q10s32, 14); + d16s16 = vqrshrn_n_s32(q13s32, 14); + d17s16 = vqrshrn_n_s32(q14s32, 14); + d18s16 = vqrshrn_n_s32(q15s32, 14); + d19s16 = vqrshrn_n_s32(q10s32, 14); - *q8s16 = vcombine_s16(d16s16, d17s16); - *q9s16 = vcombine_s16(d18s16, d19s16); - return; + *q8s16 = vcombine_s16(d16s16, d17s16); + *q9s16 = vcombine_s16(d18s16, d19s16); + return; } void vp9_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type) { - uint8x8_t d26u8, d27u8; - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16; - uint32x2_t d26u32, d27u32; - int16x8_t q3s16, q8s16, q9s16; - uint16x8_t q8u16, q9u16; + uint8x8_t d26u8, d27u8; + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16; + uint32x2_t d26u32, d27u32; + int16x8_t q3s16, q8s16, q9s16; + uint16x8_t q8u16, q9u16; - d26u32 = d27u32 = vdup_n_u32(0); + d26u32 = d27u32 = vdup_n_u32(0); - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); + q8s16 = vld1q_s16(input); + q9s16 = vld1q_s16(input + 8); - TRANSPOSE4X4(&q8s16, &q9s16); + TRANSPOSE4X4(&q8s16, &q9s16); - switch (tx_type) { - case 0: // idct_idct is not supported. Fall back to C - vp9_iht4x4_16_add_c(input, dest, dest_stride, tx_type); - return; - break; - case 1: // iadst_idct - // generate constants - GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); + switch (tx_type) { + case 0: // idct_idct is not supported. Fall back to C + vp9_iht4x4_16_add_c(input, dest, dest_stride, tx_type); + return; + break; + case 1: // iadst_idct + // generate constants + GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); + GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - // first transform rows - IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); + // first transform rows + IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); + // transpose the matrix + TRANSPOSE4X4(&q8s16, &q9s16); - // then transform columns - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - break; - case 2: // idct_iadst - // generate constantsyy - GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); + // then transform columns + IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); + break; + case 2: // idct_iadst + // generate constantsyy + GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16); + GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - // first transform rows - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); + // first transform rows + IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); + // transpose the matrix + TRANSPOSE4X4(&q8s16, &q9s16); - // then transform columns - IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); - break; - case 3: // iadst_iadst - // generate constants - GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); + // then transform columns + IDCT4x4_1D(&d0s16, &d1s16, &d2s16, &q8s16, &q9s16); + break; + case 3: // iadst_iadst + // generate constants + GENERATE_SINE_CONSTANTS(&d3s16, &d4s16, &d5s16, &q3s16); - // first transform rows - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); + // first transform rows + IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - // transpose the matrix - TRANSPOSE4X4(&q8s16, &q9s16); + // transpose the matrix + TRANSPOSE4X4(&q8s16, &q9s16); - // then transform columns - IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); - break; - default: // iadst_idct - assert(0); - break; - } + // then transform columns + IADST4x4_1D(&d3s16, &d4s16, &d5s16, &q3s16, &q8s16, &q9s16); + break; + default: // iadst_idct + assert(0); + break; + } - q8s16 = vrshrq_n_s16(q8s16, 4); - q9s16 = vrshrq_n_s16(q9s16, 4); + q8s16 = vrshrq_n_s16(q8s16, 4); + q9s16 = vrshrq_n_s16(q9s16, 4); - d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0); - dest += dest_stride; - d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1); - dest += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0); - dest += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1); + d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 0); + dest += dest_stride; + d26u32 = vld1_lane_u32((const uint32_t *)dest, d26u32, 1); + dest += dest_stride; + d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 0); + dest += dest_stride; + d27u32 = vld1_lane_u32((const uint32_t *)dest, d27u32, 1); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); - d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1); - dest -= dest_stride; - vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0); - return; + vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 1); + dest -= dest_stride; + vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d27u8), 0); + dest -= dest_stride; + vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1); + dest -= dest_stride; + vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0); + return; } diff --git a/libs/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.c b/libs/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.c index 04b342c3d3..0e3febf159 100644 --- a/libs/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.c +++ b/libs/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.c @@ -14,6 +14,7 @@ #include "./vp9_rtcd.h" #include "./vpx_config.h" #include "vp9/common/vp9_common.h" +#include "vpx_dsp/arm/transpose_neon.h" static int16_t cospi_2_64 = 16305; static int16_t cospi_4_64 = 16069; @@ -31,594 +32,513 @@ static int16_t cospi_26_64 = 4756; static int16_t cospi_28_64 = 3196; static int16_t cospi_30_64 = 1606; -static INLINE void TRANSPOSE8X8( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; +static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, + int16x8_t *q10s16, int16x8_t *q11s16, + int16x8_t *q12s16, int16x8_t *q13s16, + int16x8_t *q14s16, int16x8_t *q15s16) { + int16x4_t d0s16, d1s16, d2s16, d3s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); + d0s16 = vdup_n_s16(cospi_28_64); + d1s16 = vdup_n_s16(cospi_4_64); + d2s16 = vdup_n_s16(cospi_12_64); + d3s16 = vdup_n_s16(cospi_20_64); - *q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - *q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - *q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - *q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - *q12s16 = vcombine_s16(d17s16, d25s16); - *q13s16 = vcombine_s16(d19s16, d27s16); - *q14s16 = vcombine_s16(d21s16, d29s16); - *q15s16 = vcombine_s16(d23s16, d31s16); + d16s16 = vget_low_s16(*q8s16); + d17s16 = vget_high_s16(*q8s16); + d18s16 = vget_low_s16(*q9s16); + d19s16 = vget_high_s16(*q9s16); + d20s16 = vget_low_s16(*q10s16); + d21s16 = vget_high_s16(*q10s16); + d22s16 = vget_low_s16(*q11s16); + d23s16 = vget_high_s16(*q11s16); + d24s16 = vget_low_s16(*q12s16); + d25s16 = vget_high_s16(*q12s16); + d26s16 = vget_low_s16(*q13s16); + d27s16 = vget_high_s16(*q13s16); + d28s16 = vget_low_s16(*q14s16); + d29s16 = vget_high_s16(*q14s16); + d30s16 = vget_low_s16(*q15s16); + d31s16 = vget_high_s16(*q15s16); - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16), - vreinterpretq_s32_s16(*q10s16)); - q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16), - vreinterpretq_s32_s16(*q11s16)); - q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16), - vreinterpretq_s32_s16(*q14s16)); - q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16), - vreinterpretq_s32_s16(*q15s16)); + q2s32 = vmull_s16(d18s16, d0s16); + q3s32 = vmull_s16(d19s16, d0s16); + q5s32 = vmull_s16(d26s16, d2s16); + q6s32 = vmull_s16(d27s16, d2s16); - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 + q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); + q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); + q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); + q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); - *q8s16 = q0x2s16.val[0]; - *q9s16 = q0x2s16.val[1]; - *q10s16 = q1x2s16.val[0]; - *q11s16 = q1x2s16.val[1]; - *q12s16 = q2x2s16.val[0]; - *q13s16 = q2x2s16.val[1]; - *q14s16 = q3x2s16.val[0]; - *q15s16 = q3x2s16.val[1]; - return; + d8s16 = vqrshrn_n_s32(q2s32, 14); + d9s16 = vqrshrn_n_s32(q3s32, 14); + d10s16 = vqrshrn_n_s32(q5s32, 14); + d11s16 = vqrshrn_n_s32(q6s32, 14); + q4s16 = vcombine_s16(d8s16, d9s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + q2s32 = vmull_s16(d18s16, d1s16); + q3s32 = vmull_s16(d19s16, d1s16); + q9s32 = vmull_s16(d26s16, d3s16); + q13s32 = vmull_s16(d27s16, d3s16); + + q2s32 = vmlal_s16(q2s32, d30s16, d0s16); + q3s32 = vmlal_s16(q3s32, d31s16, d0s16); + q9s32 = vmlal_s16(q9s32, d22s16, d2s16); + q13s32 = vmlal_s16(q13s32, d23s16, d2s16); + + d14s16 = vqrshrn_n_s32(q2s32, 14); + d15s16 = vqrshrn_n_s32(q3s32, 14); + d12s16 = vqrshrn_n_s32(q9s32, 14); + d13s16 = vqrshrn_n_s32(q13s32, 14); + q6s16 = vcombine_s16(d12s16, d13s16); + q7s16 = vcombine_s16(d14s16, d15s16); + + d0s16 = vdup_n_s16(cospi_16_64); + + q2s32 = vmull_s16(d16s16, d0s16); + q3s32 = vmull_s16(d17s16, d0s16); + q13s32 = vmull_s16(d16s16, d0s16); + q15s32 = vmull_s16(d17s16, d0s16); + + q2s32 = vmlal_s16(q2s32, d24s16, d0s16); + q3s32 = vmlal_s16(q3s32, d25s16, d0s16); + q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); + q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); + + d0s16 = vdup_n_s16(cospi_24_64); + d1s16 = vdup_n_s16(cospi_8_64); + + d18s16 = vqrshrn_n_s32(q2s32, 14); + d19s16 = vqrshrn_n_s32(q3s32, 14); + d22s16 = vqrshrn_n_s32(q13s32, 14); + d23s16 = vqrshrn_n_s32(q15s32, 14); + *q9s16 = vcombine_s16(d18s16, d19s16); + *q11s16 = vcombine_s16(d22s16, d23s16); + + q2s32 = vmull_s16(d20s16, d0s16); + q3s32 = vmull_s16(d21s16, d0s16); + q8s32 = vmull_s16(d20s16, d1s16); + q12s32 = vmull_s16(d21s16, d1s16); + + q2s32 = vmlsl_s16(q2s32, d28s16, d1s16); + q3s32 = vmlsl_s16(q3s32, d29s16, d1s16); + q8s32 = vmlal_s16(q8s32, d28s16, d0s16); + q12s32 = vmlal_s16(q12s32, d29s16, d0s16); + + d26s16 = vqrshrn_n_s32(q2s32, 14); + d27s16 = vqrshrn_n_s32(q3s32, 14); + d30s16 = vqrshrn_n_s32(q8s32, 14); + d31s16 = vqrshrn_n_s32(q12s32, 14); + *q13s16 = vcombine_s16(d26s16, d27s16); + *q15s16 = vcombine_s16(d30s16, d31s16); + + q0s16 = vaddq_s16(*q9s16, *q15s16); + q1s16 = vaddq_s16(*q11s16, *q13s16); + q2s16 = vsubq_s16(*q11s16, *q13s16); + q3s16 = vsubq_s16(*q9s16, *q15s16); + + *q13s16 = vsubq_s16(q4s16, q5s16); + q4s16 = vaddq_s16(q4s16, q5s16); + *q14s16 = vsubq_s16(q7s16, q6s16); + q7s16 = vaddq_s16(q7s16, q6s16); + d26s16 = vget_low_s16(*q13s16); + d27s16 = vget_high_s16(*q13s16); + d28s16 = vget_low_s16(*q14s16); + d29s16 = vget_high_s16(*q14s16); + + d16s16 = vdup_n_s16(cospi_16_64); + + q9s32 = vmull_s16(d28s16, d16s16); + q10s32 = vmull_s16(d29s16, d16s16); + q11s32 = vmull_s16(d28s16, d16s16); + q12s32 = vmull_s16(d29s16, d16s16); + + q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); + q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); + q11s32 = vmlal_s16(q11s32, d26s16, d16s16); + q12s32 = vmlal_s16(q12s32, d27s16, d16s16); + + d10s16 = vqrshrn_n_s32(q9s32, 14); + d11s16 = vqrshrn_n_s32(q10s32, 14); + d12s16 = vqrshrn_n_s32(q11s32, 14); + d13s16 = vqrshrn_n_s32(q12s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + *q8s16 = vaddq_s16(q0s16, q7s16); + *q9s16 = vaddq_s16(q1s16, q6s16); + *q10s16 = vaddq_s16(q2s16, q5s16); + *q11s16 = vaddq_s16(q3s16, q4s16); + *q12s16 = vsubq_s16(q3s16, q4s16); + *q13s16 = vsubq_s16(q2s16, q5s16); + *q14s16 = vsubq_s16(q1s16, q6s16); + *q15s16 = vsubq_s16(q0s16, q7s16); + return; } -static INLINE void IDCT8x8_1D( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d0s16, d1s16, d2s16, d3s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; - - d0s16 = vdup_n_s16(cospi_28_64); - d1s16 = vdup_n_s16(cospi_4_64); - d2s16 = vdup_n_s16(cospi_12_64); - d3s16 = vdup_n_s16(cospi_20_64); - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); - - q2s32 = vmull_s16(d18s16, d0s16); - q3s32 = vmull_s16(d19s16, d0s16); - q5s32 = vmull_s16(d26s16, d2s16); - q6s32 = vmull_s16(d27s16, d2s16); - - q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); - q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); - q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); - - d8s16 = vqrshrn_n_s32(q2s32, 14); - d9s16 = vqrshrn_n_s32(q3s32, 14); - d10s16 = vqrshrn_n_s32(q5s32, 14); - d11s16 = vqrshrn_n_s32(q6s32, 14); - q4s16 = vcombine_s16(d8s16, d9s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - q2s32 = vmull_s16(d18s16, d1s16); - q3s32 = vmull_s16(d19s16, d1s16); - q9s32 = vmull_s16(d26s16, d3s16); - q13s32 = vmull_s16(d27s16, d3s16); - - q2s32 = vmlal_s16(q2s32, d30s16, d0s16); - q3s32 = vmlal_s16(q3s32, d31s16, d0s16); - q9s32 = vmlal_s16(q9s32, d22s16, d2s16); - q13s32 = vmlal_s16(q13s32, d23s16, d2s16); - - d14s16 = vqrshrn_n_s32(q2s32, 14); - d15s16 = vqrshrn_n_s32(q3s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q13s32, 14); - q6s16 = vcombine_s16(d12s16, d13s16); - q7s16 = vcombine_s16(d14s16, d15s16); - - d0s16 = vdup_n_s16(cospi_16_64); - - q2s32 = vmull_s16(d16s16, d0s16); - q3s32 = vmull_s16(d17s16, d0s16); - q13s32 = vmull_s16(d16s16, d0s16); - q15s32 = vmull_s16(d17s16, d0s16); - - q2s32 = vmlal_s16(q2s32, d24s16, d0s16); - q3s32 = vmlal_s16(q3s32, d25s16, d0s16); - q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); - q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); - - d0s16 = vdup_n_s16(cospi_24_64); - d1s16 = vdup_n_s16(cospi_8_64); - - d18s16 = vqrshrn_n_s32(q2s32, 14); - d19s16 = vqrshrn_n_s32(q3s32, 14); - d22s16 = vqrshrn_n_s32(q13s32, 14); - d23s16 = vqrshrn_n_s32(q15s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - *q11s16 = vcombine_s16(d22s16, d23s16); - - q2s32 = vmull_s16(d20s16, d0s16); - q3s32 = vmull_s16(d21s16, d0s16); - q8s32 = vmull_s16(d20s16, d1s16); - q12s32 = vmull_s16(d21s16, d1s16); - - q2s32 = vmlsl_s16(q2s32, d28s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d29s16, d1s16); - q8s32 = vmlal_s16(q8s32, d28s16, d0s16); - q12s32 = vmlal_s16(q12s32, d29s16, d0s16); - - d26s16 = vqrshrn_n_s32(q2s32, 14); - d27s16 = vqrshrn_n_s32(q3s32, 14); - d30s16 = vqrshrn_n_s32(q8s32, 14); - d31s16 = vqrshrn_n_s32(q12s32, 14); - *q13s16 = vcombine_s16(d26s16, d27s16); - *q15s16 = vcombine_s16(d30s16, d31s16); - - q0s16 = vaddq_s16(*q9s16, *q15s16); - q1s16 = vaddq_s16(*q11s16, *q13s16); - q2s16 = vsubq_s16(*q11s16, *q13s16); - q3s16 = vsubq_s16(*q9s16, *q15s16); - - *q13s16 = vsubq_s16(q4s16, q5s16); - q4s16 = vaddq_s16(q4s16, q5s16); - *q14s16 = vsubq_s16(q7s16, q6s16); - q7s16 = vaddq_s16(q7s16, q6s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - - d16s16 = vdup_n_s16(cospi_16_64); - - q9s32 = vmull_s16(d28s16, d16s16); - q10s32 = vmull_s16(d29s16, d16s16); - q11s32 = vmull_s16(d28s16, d16s16); - q12s32 = vmull_s16(d29s16, d16s16); - - q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); - q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); - q11s32 = vmlal_s16(q11s32, d26s16, d16s16); - q12s32 = vmlal_s16(q12s32, d27s16, d16s16); - - d10s16 = vqrshrn_n_s32(q9s32, 14); - d11s16 = vqrshrn_n_s32(q10s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q12s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - *q8s16 = vaddq_s16(q0s16, q7s16); - *q9s16 = vaddq_s16(q1s16, q6s16); - *q10s16 = vaddq_s16(q2s16, q5s16); - *q11s16 = vaddq_s16(q3s16, q4s16); - *q12s16 = vsubq_s16(q3s16, q4s16); - *q13s16 = vsubq_s16(q2s16, q5s16); - *q14s16 = vsubq_s16(q1s16, q6s16); - *q15s16 = vsubq_s16(q0s16, q7s16); - return; -} - -static INLINE void IADST8X8_1D( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q2s16, q4s16, q5s16, q6s16; - int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; - int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; - - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); - - d14s16 = vdup_n_s16(cospi_2_64); - d15s16 = vdup_n_s16(cospi_30_64); - - q1s32 = vmull_s16(d30s16, d14s16); - q2s32 = vmull_s16(d31s16, d14s16); - q3s32 = vmull_s16(d30s16, d15s16); - q4s32 = vmull_s16(d31s16, d15s16); - - d30s16 = vdup_n_s16(cospi_18_64); - d31s16 = vdup_n_s16(cospi_14_64); - - q1s32 = vmlal_s16(q1s32, d16s16, d15s16); - q2s32 = vmlal_s16(q2s32, d17s16, d15s16); - q3s32 = vmlsl_s16(q3s32, d16s16, d14s16); - q4s32 = vmlsl_s16(q4s32, d17s16, d14s16); - - q5s32 = vmull_s16(d22s16, d30s16); - q6s32 = vmull_s16(d23s16, d30s16); - q7s32 = vmull_s16(d22s16, d31s16); - q8s32 = vmull_s16(d23s16, d31s16); - - q5s32 = vmlal_s16(q5s32, d24s16, d31s16); - q6s32 = vmlal_s16(q6s32, d25s16, d31s16); - q7s32 = vmlsl_s16(q7s32, d24s16, d30s16); - q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); - - q11s32 = vaddq_s32(q1s32, q5s32); - q12s32 = vaddq_s32(q2s32, q6s32); - q1s32 = vsubq_s32(q1s32, q5s32); - q2s32 = vsubq_s32(q2s32, q6s32); - - d22s16 = vqrshrn_n_s32(q11s32, 14); - d23s16 = vqrshrn_n_s32(q12s32, 14); - *q11s16 = vcombine_s16(d22s16, d23s16); - - q12s32 = vaddq_s32(q3s32, q7s32); - q15s32 = vaddq_s32(q4s32, q8s32); - q3s32 = vsubq_s32(q3s32, q7s32); - q4s32 = vsubq_s32(q4s32, q8s32); - - d2s16 = vqrshrn_n_s32(q1s32, 14); - d3s16 = vqrshrn_n_s32(q2s32, 14); - d24s16 = vqrshrn_n_s32(q12s32, 14); - d25s16 = vqrshrn_n_s32(q15s32, 14); - d6s16 = vqrshrn_n_s32(q3s32, 14); - d7s16 = vqrshrn_n_s32(q4s32, 14); - *q12s16 = vcombine_s16(d24s16, d25s16); - - d0s16 = vdup_n_s16(cospi_10_64); - d1s16 = vdup_n_s16(cospi_22_64); - q4s32 = vmull_s16(d26s16, d0s16); - q5s32 = vmull_s16(d27s16, d0s16); - q2s32 = vmull_s16(d26s16, d1s16); - q6s32 = vmull_s16(d27s16, d1s16); - - d30s16 = vdup_n_s16(cospi_26_64); - d31s16 = vdup_n_s16(cospi_6_64); - - q4s32 = vmlal_s16(q4s32, d20s16, d1s16); - q5s32 = vmlal_s16(q5s32, d21s16, d1s16); - q2s32 = vmlsl_s16(q2s32, d20s16, d0s16); - q6s32 = vmlsl_s16(q6s32, d21s16, d0s16); - - q0s32 = vmull_s16(d18s16, d30s16); - q13s32 = vmull_s16(d19s16, d30s16); - - q0s32 = vmlal_s16(q0s32, d28s16, d31s16); - q13s32 = vmlal_s16(q13s32, d29s16, d31s16); - - q10s32 = vmull_s16(d18s16, d31s16); - q9s32 = vmull_s16(d19s16, d31s16); - - q10s32 = vmlsl_s16(q10s32, d28s16, d30s16); - q9s32 = vmlsl_s16(q9s32, d29s16, d30s16); - - q14s32 = vaddq_s32(q2s32, q10s32); - q15s32 = vaddq_s32(q6s32, q9s32); - q2s32 = vsubq_s32(q2s32, q10s32); - q6s32 = vsubq_s32(q6s32, q9s32); - - d28s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d4s16 = vqrshrn_n_s32(q2s32, 14); - d5s16 = vqrshrn_n_s32(q6s32, 14); - *q14s16 = vcombine_s16(d28s16, d29s16); - - q9s32 = vaddq_s32(q4s32, q0s32); - q10s32 = vaddq_s32(q5s32, q13s32); - q4s32 = vsubq_s32(q4s32, q0s32); - q5s32 = vsubq_s32(q5s32, q13s32); - - d30s16 = vdup_n_s16(cospi_8_64); - d31s16 = vdup_n_s16(cospi_24_64); - - d18s16 = vqrshrn_n_s32(q9s32, 14); - d19s16 = vqrshrn_n_s32(q10s32, 14); - d8s16 = vqrshrn_n_s32(q4s32, 14); - d9s16 = vqrshrn_n_s32(q5s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - - q5s32 = vmull_s16(d2s16, d30s16); - q6s32 = vmull_s16(d3s16, d30s16); - q7s32 = vmull_s16(d2s16, d31s16); - q0s32 = vmull_s16(d3s16, d31s16); - - q5s32 = vmlal_s16(q5s32, d6s16, d31s16); - q6s32 = vmlal_s16(q6s32, d7s16, d31s16); - q7s32 = vmlsl_s16(q7s32, d6s16, d30s16); - q0s32 = vmlsl_s16(q0s32, d7s16, d30s16); - - q1s32 = vmull_s16(d4s16, d30s16); - q3s32 = vmull_s16(d5s16, d30s16); - q10s32 = vmull_s16(d4s16, d31s16); - q2s32 = vmull_s16(d5s16, d31s16); - - q1s32 = vmlsl_s16(q1s32, d8s16, d31s16); - q3s32 = vmlsl_s16(q3s32, d9s16, d31s16); - q10s32 = vmlal_s16(q10s32, d8s16, d30s16); - q2s32 = vmlal_s16(q2s32, d9s16, d30s16); - - *q8s16 = vaddq_s16(*q11s16, *q9s16); - *q11s16 = vsubq_s16(*q11s16, *q9s16); - q4s16 = vaddq_s16(*q12s16, *q14s16); - *q12s16 = vsubq_s16(*q12s16, *q14s16); - - q14s32 = vaddq_s32(q5s32, q1s32); - q15s32 = vaddq_s32(q6s32, q3s32); - q5s32 = vsubq_s32(q5s32, q1s32); - q6s32 = vsubq_s32(q6s32, q3s32); - - d18s16 = vqrshrn_n_s32(q14s32, 14); - d19s16 = vqrshrn_n_s32(q15s32, 14); - d10s16 = vqrshrn_n_s32(q5s32, 14); - d11s16 = vqrshrn_n_s32(q6s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - - q1s32 = vaddq_s32(q7s32, q10s32); - q3s32 = vaddq_s32(q0s32, q2s32); - q7s32 = vsubq_s32(q7s32, q10s32); - q0s32 = vsubq_s32(q0s32, q2s32); - - d28s16 = vqrshrn_n_s32(q1s32, 14); - d29s16 = vqrshrn_n_s32(q3s32, 14); - d14s16 = vqrshrn_n_s32(q7s32, 14); - d15s16 = vqrshrn_n_s32(q0s32, 14); - *q14s16 = vcombine_s16(d28s16, d29s16); - - d30s16 = vdup_n_s16(cospi_16_64); - - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - q2s32 = vmull_s16(d22s16, d30s16); - q3s32 = vmull_s16(d23s16, d30s16); - q13s32 = vmull_s16(d22s16, d30s16); - q1s32 = vmull_s16(d23s16, d30s16); - - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - q2s32 = vmlal_s16(q2s32, d24s16, d30s16); - q3s32 = vmlal_s16(q3s32, d25s16, d30s16); - q13s32 = vmlsl_s16(q13s32, d24s16, d30s16); - q1s32 = vmlsl_s16(q1s32, d25s16, d30s16); - - d4s16 = vqrshrn_n_s32(q2s32, 14); - d5s16 = vqrshrn_n_s32(q3s32, 14); - d24s16 = vqrshrn_n_s32(q13s32, 14); - d25s16 = vqrshrn_n_s32(q1s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - *q12s16 = vcombine_s16(d24s16, d25s16); - - q13s32 = vmull_s16(d10s16, d30s16); - q1s32 = vmull_s16(d11s16, d30s16); - q11s32 = vmull_s16(d10s16, d30s16); - q0s32 = vmull_s16(d11s16, d30s16); - - q13s32 = vmlal_s16(q13s32, d14s16, d30s16); - q1s32 = vmlal_s16(q1s32, d15s16, d30s16); - q11s32 = vmlsl_s16(q11s32, d14s16, d30s16); - q0s32 = vmlsl_s16(q0s32, d15s16, d30s16); - - d20s16 = vqrshrn_n_s32(q13s32, 14); - d21s16 = vqrshrn_n_s32(q1s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q0s32, 14); - *q10s16 = vcombine_s16(d20s16, d21s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - q5s16 = vdupq_n_s16(0); - - *q9s16 = vsubq_s16(q5s16, *q9s16); - *q11s16 = vsubq_s16(q5s16, q2s16); - *q13s16 = vsubq_s16(q5s16, q6s16); - *q15s16 = vsubq_s16(q5s16, q4s16); - return; +static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16, + int16x8_t *q10s16, int16x8_t *q11s16, + int16x8_t *q12s16, int16x8_t *q13s16, + int16x8_t *q14s16, int16x8_t *q15s16) { + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; + int16x8_t q2s16, q4s16, q5s16, q6s16; + int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q7s32, q8s32; + int32x4_t q9s32, q10s32, q11s32, q12s32, q13s32, q14s32, q15s32; + + d16s16 = vget_low_s16(*q8s16); + d17s16 = vget_high_s16(*q8s16); + d18s16 = vget_low_s16(*q9s16); + d19s16 = vget_high_s16(*q9s16); + d20s16 = vget_low_s16(*q10s16); + d21s16 = vget_high_s16(*q10s16); + d22s16 = vget_low_s16(*q11s16); + d23s16 = vget_high_s16(*q11s16); + d24s16 = vget_low_s16(*q12s16); + d25s16 = vget_high_s16(*q12s16); + d26s16 = vget_low_s16(*q13s16); + d27s16 = vget_high_s16(*q13s16); + d28s16 = vget_low_s16(*q14s16); + d29s16 = vget_high_s16(*q14s16); + d30s16 = vget_low_s16(*q15s16); + d31s16 = vget_high_s16(*q15s16); + + d14s16 = vdup_n_s16(cospi_2_64); + d15s16 = vdup_n_s16(cospi_30_64); + + q1s32 = vmull_s16(d30s16, d14s16); + q2s32 = vmull_s16(d31s16, d14s16); + q3s32 = vmull_s16(d30s16, d15s16); + q4s32 = vmull_s16(d31s16, d15s16); + + d30s16 = vdup_n_s16(cospi_18_64); + d31s16 = vdup_n_s16(cospi_14_64); + + q1s32 = vmlal_s16(q1s32, d16s16, d15s16); + q2s32 = vmlal_s16(q2s32, d17s16, d15s16); + q3s32 = vmlsl_s16(q3s32, d16s16, d14s16); + q4s32 = vmlsl_s16(q4s32, d17s16, d14s16); + + q5s32 = vmull_s16(d22s16, d30s16); + q6s32 = vmull_s16(d23s16, d30s16); + q7s32 = vmull_s16(d22s16, d31s16); + q8s32 = vmull_s16(d23s16, d31s16); + + q5s32 = vmlal_s16(q5s32, d24s16, d31s16); + q6s32 = vmlal_s16(q6s32, d25s16, d31s16); + q7s32 = vmlsl_s16(q7s32, d24s16, d30s16); + q8s32 = vmlsl_s16(q8s32, d25s16, d30s16); + + q11s32 = vaddq_s32(q1s32, q5s32); + q12s32 = vaddq_s32(q2s32, q6s32); + q1s32 = vsubq_s32(q1s32, q5s32); + q2s32 = vsubq_s32(q2s32, q6s32); + + d22s16 = vqrshrn_n_s32(q11s32, 14); + d23s16 = vqrshrn_n_s32(q12s32, 14); + *q11s16 = vcombine_s16(d22s16, d23s16); + + q12s32 = vaddq_s32(q3s32, q7s32); + q15s32 = vaddq_s32(q4s32, q8s32); + q3s32 = vsubq_s32(q3s32, q7s32); + q4s32 = vsubq_s32(q4s32, q8s32); + + d2s16 = vqrshrn_n_s32(q1s32, 14); + d3s16 = vqrshrn_n_s32(q2s32, 14); + d24s16 = vqrshrn_n_s32(q12s32, 14); + d25s16 = vqrshrn_n_s32(q15s32, 14); + d6s16 = vqrshrn_n_s32(q3s32, 14); + d7s16 = vqrshrn_n_s32(q4s32, 14); + *q12s16 = vcombine_s16(d24s16, d25s16); + + d0s16 = vdup_n_s16(cospi_10_64); + d1s16 = vdup_n_s16(cospi_22_64); + q4s32 = vmull_s16(d26s16, d0s16); + q5s32 = vmull_s16(d27s16, d0s16); + q2s32 = vmull_s16(d26s16, d1s16); + q6s32 = vmull_s16(d27s16, d1s16); + + d30s16 = vdup_n_s16(cospi_26_64); + d31s16 = vdup_n_s16(cospi_6_64); + + q4s32 = vmlal_s16(q4s32, d20s16, d1s16); + q5s32 = vmlal_s16(q5s32, d21s16, d1s16); + q2s32 = vmlsl_s16(q2s32, d20s16, d0s16); + q6s32 = vmlsl_s16(q6s32, d21s16, d0s16); + + q0s32 = vmull_s16(d18s16, d30s16); + q13s32 = vmull_s16(d19s16, d30s16); + + q0s32 = vmlal_s16(q0s32, d28s16, d31s16); + q13s32 = vmlal_s16(q13s32, d29s16, d31s16); + + q10s32 = vmull_s16(d18s16, d31s16); + q9s32 = vmull_s16(d19s16, d31s16); + + q10s32 = vmlsl_s16(q10s32, d28s16, d30s16); + q9s32 = vmlsl_s16(q9s32, d29s16, d30s16); + + q14s32 = vaddq_s32(q2s32, q10s32); + q15s32 = vaddq_s32(q6s32, q9s32); + q2s32 = vsubq_s32(q2s32, q10s32); + q6s32 = vsubq_s32(q6s32, q9s32); + + d28s16 = vqrshrn_n_s32(q14s32, 14); + d29s16 = vqrshrn_n_s32(q15s32, 14); + d4s16 = vqrshrn_n_s32(q2s32, 14); + d5s16 = vqrshrn_n_s32(q6s32, 14); + *q14s16 = vcombine_s16(d28s16, d29s16); + + q9s32 = vaddq_s32(q4s32, q0s32); + q10s32 = vaddq_s32(q5s32, q13s32); + q4s32 = vsubq_s32(q4s32, q0s32); + q5s32 = vsubq_s32(q5s32, q13s32); + + d30s16 = vdup_n_s16(cospi_8_64); + d31s16 = vdup_n_s16(cospi_24_64); + + d18s16 = vqrshrn_n_s32(q9s32, 14); + d19s16 = vqrshrn_n_s32(q10s32, 14); + d8s16 = vqrshrn_n_s32(q4s32, 14); + d9s16 = vqrshrn_n_s32(q5s32, 14); + *q9s16 = vcombine_s16(d18s16, d19s16); + + q5s32 = vmull_s16(d2s16, d30s16); + q6s32 = vmull_s16(d3s16, d30s16); + q7s32 = vmull_s16(d2s16, d31s16); + q0s32 = vmull_s16(d3s16, d31s16); + + q5s32 = vmlal_s16(q5s32, d6s16, d31s16); + q6s32 = vmlal_s16(q6s32, d7s16, d31s16); + q7s32 = vmlsl_s16(q7s32, d6s16, d30s16); + q0s32 = vmlsl_s16(q0s32, d7s16, d30s16); + + q1s32 = vmull_s16(d4s16, d30s16); + q3s32 = vmull_s16(d5s16, d30s16); + q10s32 = vmull_s16(d4s16, d31s16); + q2s32 = vmull_s16(d5s16, d31s16); + + q1s32 = vmlsl_s16(q1s32, d8s16, d31s16); + q3s32 = vmlsl_s16(q3s32, d9s16, d31s16); + q10s32 = vmlal_s16(q10s32, d8s16, d30s16); + q2s32 = vmlal_s16(q2s32, d9s16, d30s16); + + *q8s16 = vaddq_s16(*q11s16, *q9s16); + *q11s16 = vsubq_s16(*q11s16, *q9s16); + q4s16 = vaddq_s16(*q12s16, *q14s16); + *q12s16 = vsubq_s16(*q12s16, *q14s16); + + q14s32 = vaddq_s32(q5s32, q1s32); + q15s32 = vaddq_s32(q6s32, q3s32); + q5s32 = vsubq_s32(q5s32, q1s32); + q6s32 = vsubq_s32(q6s32, q3s32); + + d18s16 = vqrshrn_n_s32(q14s32, 14); + d19s16 = vqrshrn_n_s32(q15s32, 14); + d10s16 = vqrshrn_n_s32(q5s32, 14); + d11s16 = vqrshrn_n_s32(q6s32, 14); + *q9s16 = vcombine_s16(d18s16, d19s16); + + q1s32 = vaddq_s32(q7s32, q10s32); + q3s32 = vaddq_s32(q0s32, q2s32); + q7s32 = vsubq_s32(q7s32, q10s32); + q0s32 = vsubq_s32(q0s32, q2s32); + + d28s16 = vqrshrn_n_s32(q1s32, 14); + d29s16 = vqrshrn_n_s32(q3s32, 14); + d14s16 = vqrshrn_n_s32(q7s32, 14); + d15s16 = vqrshrn_n_s32(q0s32, 14); + *q14s16 = vcombine_s16(d28s16, d29s16); + + d30s16 = vdup_n_s16(cospi_16_64); + + d22s16 = vget_low_s16(*q11s16); + d23s16 = vget_high_s16(*q11s16); + q2s32 = vmull_s16(d22s16, d30s16); + q3s32 = vmull_s16(d23s16, d30s16); + q13s32 = vmull_s16(d22s16, d30s16); + q1s32 = vmull_s16(d23s16, d30s16); + + d24s16 = vget_low_s16(*q12s16); + d25s16 = vget_high_s16(*q12s16); + q2s32 = vmlal_s16(q2s32, d24s16, d30s16); + q3s32 = vmlal_s16(q3s32, d25s16, d30s16); + q13s32 = vmlsl_s16(q13s32, d24s16, d30s16); + q1s32 = vmlsl_s16(q1s32, d25s16, d30s16); + + d4s16 = vqrshrn_n_s32(q2s32, 14); + d5s16 = vqrshrn_n_s32(q3s32, 14); + d24s16 = vqrshrn_n_s32(q13s32, 14); + d25s16 = vqrshrn_n_s32(q1s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + *q12s16 = vcombine_s16(d24s16, d25s16); + + q13s32 = vmull_s16(d10s16, d30s16); + q1s32 = vmull_s16(d11s16, d30s16); + q11s32 = vmull_s16(d10s16, d30s16); + q0s32 = vmull_s16(d11s16, d30s16); + + q13s32 = vmlal_s16(q13s32, d14s16, d30s16); + q1s32 = vmlal_s16(q1s32, d15s16, d30s16); + q11s32 = vmlsl_s16(q11s32, d14s16, d30s16); + q0s32 = vmlsl_s16(q0s32, d15s16, d30s16); + + d20s16 = vqrshrn_n_s32(q13s32, 14); + d21s16 = vqrshrn_n_s32(q1s32, 14); + d12s16 = vqrshrn_n_s32(q11s32, 14); + d13s16 = vqrshrn_n_s32(q0s32, 14); + *q10s16 = vcombine_s16(d20s16, d21s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + q5s16 = vdupq_n_s16(0); + + *q9s16 = vsubq_s16(q5s16, *q9s16); + *q11s16 = vsubq_s16(q5s16, q2s16); + *q13s16 = vsubq_s16(q5s16, q6s16); + *q15s16 = vsubq_s16(q5s16, q4s16); + return; } void vp9_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type) { - int i; - uint8_t *d1, *d2; - uint8x8_t d0u8, d1u8, d2u8, d3u8; - uint64x1_t d0u64, d1u64, d2u64, d3u64; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - uint16x8_t q8u16, q9u16, q10u16, q11u16; + int i; + uint8_t *d1, *d2; + uint8x8_t d0u8, d1u8, d2u8, d3u8; + uint64x1_t d0u64, d1u64, d2u64, d3u64; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + uint16x8_t q8u16, q9u16, q10u16, q11u16; - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); - q10s16 = vld1q_s16(input + 8 * 2); - q11s16 = vld1q_s16(input + 8 * 3); - q12s16 = vld1q_s16(input + 8 * 4); - q13s16 = vld1q_s16(input + 8 * 5); - q14s16 = vld1q_s16(input + 8 * 6); - q15s16 = vld1q_s16(input + 8 * 7); + q8s16 = vld1q_s16(input); + q9s16 = vld1q_s16(input + 8); + q10s16 = vld1q_s16(input + 8 * 2); + q11s16 = vld1q_s16(input + 8 * 3); + q12s16 = vld1q_s16(input + 8 * 4); + q13s16 = vld1q_s16(input + 8 * 5); + q14s16 = vld1q_s16(input + 8 * 6); + q15s16 = vld1q_s16(input + 8 * 7); - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - switch (tx_type) { - case 0: // idct_idct is not supported. Fall back to C - vp9_iht8x8_64_add_c(input, dest, dest_stride, tx_type); - return; - break; - case 1: // iadst_idct - // generate IDCT constants - // GENERATE_IDCT_CONSTANTS + switch (tx_type) { + case 0: // idct_idct is not supported. Fall back to C + vp9_iht8x8_64_add_c(input, dest, dest_stride, tx_type); + return; + break; + case 1: // iadst_idct + // generate IDCT constants + // GENERATE_IDCT_CONSTANTS - // first transform rows - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // first transform rows + IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // transpose the matrix + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, + &q14s16, &q15s16); - // generate IADST constants - // GENERATE_IADST_CONSTANTS + // generate IADST constants + // GENERATE_IADST_CONSTANTS - // then transform columns - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - case 2: // idct_iadst - // generate IADST constants - // GENERATE_IADST_CONSTANTS + // then transform columns + IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + break; + case 2: // idct_iadst + // generate IADST constants + // GENERATE_IADST_CONSTANTS - // first transform rows - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // first transform rows + IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // transpose the matrix + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, + &q14s16, &q15s16); - // generate IDCT constants - // GENERATE_IDCT_CONSTANTS + // generate IDCT constants + // GENERATE_IDCT_CONSTANTS - // then transform columns - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - case 3: // iadst_iadst - // generate IADST constants - // GENERATE_IADST_CONSTANTS + // then transform columns + IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + break; + case 3: // iadst_iadst + // generate IADST constants + // GENERATE_IADST_CONSTANTS - // first transform rows - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // first transform rows + IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - // transpose the matrix - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // transpose the matrix + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, + &q14s16, &q15s16); - // then transform columns - IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - break; - default: // iadst_idct - assert(0); - break; + // then transform columns + IADST8X8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + break; + default: // iadst_idct + assert(0); + break; + } + + q8s16 = vrshrq_n_s16(q8s16, 5); + q9s16 = vrshrq_n_s16(q9s16, 5); + q10s16 = vrshrq_n_s16(q10s16, 5); + q11s16 = vrshrq_n_s16(q11s16, 5); + q12s16 = vrshrq_n_s16(q12s16, 5); + q13s16 = vrshrq_n_s16(q13s16, 5); + q14s16 = vrshrq_n_s16(q14s16, 5); + q15s16 = vrshrq_n_s16(q15s16, 5); + + for (d1 = d2 = dest, i = 0; i < 2; i++) { + if (i != 0) { + q8s16 = q12s16; + q9s16 = q13s16; + q10s16 = q14s16; + q11s16 = q15s16; } - q8s16 = vrshrq_n_s16(q8s16, 5); - q9s16 = vrshrq_n_s16(q9s16, 5); - q10s16 = vrshrq_n_s16(q10s16, 5); - q11s16 = vrshrq_n_s16(q11s16, 5); - q12s16 = vrshrq_n_s16(q12s16, 5); - q13s16 = vrshrq_n_s16(q13s16, 5); - q14s16 = vrshrq_n_s16(q14s16, 5); - q15s16 = vrshrq_n_s16(q15s16, 5); + d0u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d1u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d2u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; - for (d1 = d2 = dest, i = 0; i < 2; i++) { - if (i != 0) { - q8s16 = q12s16; - q9s16 = q13s16; - q10s16 = q14s16; - q11s16 = q15s16; - } + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); + q10u16 = + vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); + q11u16 = + vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; + d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); - - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - } - return; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + } + return; } diff --git a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c index 6ca83a00c5..e68d01e9fd 100644 --- a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c +++ b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans16_dspr2.c @@ -21,27 +21,23 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, - int pitch, int tx_type) { +void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch, + int tx_type) { int i, j; - DECLARE_ALIGNED(32, int16_t, out[16 * 16]); + DECLARE_ALIGNED(32, int16_t, out[16 * 16]); int16_t *outptr = out; int16_t temp_out[16]; uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical + case DCT_DCT: // DCT in both horizontal and vertical idct16_rows_dspr2(input, outptr, 16); idct16_cols_add_blk_dspr2(out, dest, pitch); break; - case ADST_DCT: // ADST in vertical, DCT in horizontal + case ADST_DCT: // ADST in vertical, DCT in horizontal idct16_rows_dspr2(input, outptr, 16); outptr = out; @@ -50,13 +46,12 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, iadst16_dspr2(outptr, temp_out); for (j = 0; j < 16; ++j) - dest[j * pitch + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * pitch + i]); + dest[j * pitch + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) + + dest[j * pitch + i]); outptr += 16; } break; - case DCT_ADST: // DCT in vertical, ADST in horizontal + case DCT_ADST: // DCT in vertical, ADST in horizontal { int16_t temp_in[16 * 16]; @@ -70,13 +65,12 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, } for (i = 0; i < 16; ++i) - for (j = 0; j < 16; ++j) - temp_in[j * 16 + i] = out[i * 16 + j]; + for (j = 0; j < 16; ++j) temp_in[j * 16 + i] = out[i * 16 + j]; idct16_cols_add_blk_dspr2(temp_in, dest, pitch); + break; } - break; - case ADST_ADST: // ADST in both directions + case ADST_ADST: // ADST in both directions { int16_t temp_in[16]; @@ -90,19 +84,15 @@ void vp9_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, } for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; iadst16_dspr2(temp_in, temp_out); for (j = 0; j < 16; ++j) - dest[j * pitch + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) - + dest[j * pitch + i]); + dest[j * pitch + i] = clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 6) + + dest[j * pitch + i]); } - } - break; - default: - printf("vp9_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break; + } + default: printf("vp9_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break; } } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c index c10979b645..2d4839174d 100644 --- a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c +++ b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans4_dspr2.c @@ -30,14 +30,12 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical + case DCT_DCT: // DCT in both horizontal and vertical vpx_idct4_rows_dspr2(input, outptr); vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); break; @@ -50,9 +48,8 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, iadst4_dspr2(outptr, temp_out); for (j = 0; j < 4; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4) - + dest[j * dest_stride + i]); + dest[j * dest_stride + i] = clip_pixel( + ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]); outptr += 4; } @@ -60,7 +57,7 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, case DCT_ADST: // DCT in vertical, ADST in horizontal for (i = 0; i < 4; ++i) { iadst4_dspr2(input, outptr); - input += 4; + input += 4; outptr += 4; } @@ -74,24 +71,20 @@ void vp9_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, case ADST_ADST: // ADST in both directions for (i = 0; i < 4; ++i) { iadst4_dspr2(input, outptr); - input += 4; + input += 4; outptr += 4; } for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; iadst4_dspr2(temp_in, temp_out); for (j = 0; j < 4; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 4) - + dest[j * dest_stride + i]); + dest[j * dest_stride + i] = clip_pixel( + ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]); } break; - default: - printf("vp9_short_iht4x4_add_dspr2 : Invalid tx_type\n"); - break; + default: printf("vp9_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break; } } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c index 37f3ca9fcb..86896f04ca 100644 --- a/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c +++ b/libs/libvpx/vp9/common/mips/dspr2/vp9_itrans8_dspr2.c @@ -29,30 +29,25 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); switch (tx_type) { - case DCT_DCT: // DCT in both horizontal and vertical + case DCT_DCT: // DCT in both horizontal and vertical idct8_rows_dspr2(input, outptr, 8); idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); break; - case ADST_DCT: // ADST in vertical, DCT in horizontal + case ADST_DCT: // ADST in vertical, DCT in horizontal idct8_rows_dspr2(input, outptr, 8); for (i = 0; i < 8; ++i) { iadst8_dspr2(&out[i * 8], temp_out); for (j = 0; j < 8; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) - + dest[j * dest_stride + i]); + dest[j * dest_stride + i] = clip_pixel( + ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]); } break; - case DCT_ADST: // DCT in vertical, ADST in horizontal + case DCT_ADST: // DCT in vertical, ADST in horizontal for (i = 0; i < 8; ++i) { iadst8_dspr2(input, outptr); input += 8; @@ -66,7 +61,7 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, } idct8_columns_add_blk_dspr2(&temp_in[0], dest, dest_stride); break; - case ADST_ADST: // ADST in both directions + case ADST_ADST: // ADST in both directions for (i = 0; i < 8; ++i) { iadst8_dspr2(input, outptr); input += 8; @@ -74,20 +69,16 @@ void vp9_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, } for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; iadst8_dspr2(temp_in, temp_out); for (j = 0; j < 8; ++j) - dest[j * dest_stride + i] = - clip_pixel(ROUND_POWER_OF_TWO(temp_out[j], 5) - + dest[j * dest_stride + i]); + dest[j * dest_stride + i] = clip_pixel( + ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]); } break; - default: - printf("vp9_short_iht8x8_add_dspr2 : Invalid tx_type\n"); - break; + default: printf("vp9_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break; } } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vp9/common/mips/msa/vp9_idct16x16_msa.c b/libs/libvpx/vp9/common/mips/msa/vp9_idct16x16_msa.c index 5adf0aaac1..3e3530116d 100644 --- a/libs/libvpx/vp9/common/mips/msa/vp9_idct16x16_msa.c +++ b/libs/libvpx/vp9/common/mips/msa/vp9_idct16x16_msa.c @@ -74,8 +74,6 @@ void vp9_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst, (dst + (i << 3)), dst_stride); } break; - default: - assert(0); - break; + default: assert(0); break; } } diff --git a/libs/libvpx/vp9/common/mips/msa/vp9_idct4x4_msa.c b/libs/libvpx/vp9/common/mips/msa/vp9_idct4x4_msa.c index 75977b11fa..786fbdb794 100644 --- a/libs/libvpx/vp9/common/mips/msa/vp9_idct4x4_msa.c +++ b/libs/libvpx/vp9/common/mips/msa/vp9_idct4x4_msa.c @@ -50,9 +50,7 @@ void vp9_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst, TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); break; - default: - assert(0); - break; + default: assert(0); break; } /* final rounding (add 2^3, divide by 2^4) and shift */ diff --git a/libs/libvpx/vp9/common/mips/msa/vp9_idct8x8_msa.c b/libs/libvpx/vp9/common/mips/msa/vp9_idct8x8_msa.c index 65d2993e8a..e4166775da 100644 --- a/libs/libvpx/vp9/common/mips/msa/vp9_idct8x8_msa.c +++ b/libs/libvpx/vp9/common/mips/msa/vp9_idct8x8_msa.c @@ -20,53 +20,51 @@ void vp9_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst, /* load vector elements of 8x8 block */ LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); switch (tx_type) { case DCT_DCT: /* DCT in horizontal */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* DCT in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); break; case ADST_DCT: /* DCT in horizontal */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* ADST in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; case DCT_ADST: /* ADST in horizontal */ - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); /* DCT in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); break; case ADST_ADST: /* ADST in horizontal */ - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); /* ADST in vertical */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - default: - assert(0); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; + default: assert(0); break; } /* final rounding (add 2^4, divide by 2^5) and shift */ diff --git a/libs/libvpx/vp9/common/mips/msa/vp9_mfqe_msa.c b/libs/libvpx/vp9/common/mips/msa/vp9_mfqe_msa.c index 7257cd629d..2c3840958e 100644 --- a/libs/libvpx/vp9/common/mips/msa/vp9_mfqe_msa.c +++ b/libs/libvpx/vp9/common/mips/msa/vp9_mfqe_msa.c @@ -65,10 +65,8 @@ static void filter_by_weight8x8_msa(const uint8_t *src_ptr, int32_t src_stride, } static void filter_by_weight16x16_msa(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - int32_t src_weight) { + int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, int32_t src_weight) { int32_t dst_weight = (1 << MFQE_PRECISION) - src_weight; int32_t row; v16i8 src0, src1, src2, src3, dst0, dst1, dst2, dst3; @@ -125,8 +123,7 @@ static void filter_by_weight16x16_msa(const uint8_t *src_ptr, } void vp9_filter_by_weight8x8_msa(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int src_weight) { + uint8_t *dst, int dst_stride, int src_weight) { filter_by_weight8x8_msa(src, src_stride, dst, dst_stride, src_weight); } diff --git a/libs/libvpx/vp9/common/vp9_alloccommon.c b/libs/libvpx/vp9/common/vp9_alloccommon.c index 7dd1005d3f..66aa733b9e 100644 --- a/libs/libvpx/vp9/common/vp9_alloccommon.c +++ b/libs/libvpx/vp9/common/vp9_alloccommon.c @@ -53,8 +53,7 @@ static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) { for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) { cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1); - if (cm->seg_map_array[i] == NULL) - return 1; + if (cm->seg_map_array[i] == NULL) return 1; } cm->seg_map_alloc_size = seg_map_size; @@ -103,6 +102,10 @@ void vp9_free_postproc_buffers(VP9_COMMON *cm) { #if CONFIG_VP9_POSTPROC vpx_free_frame_buffer(&cm->post_proc_buffer); vpx_free_frame_buffer(&cm->post_proc_buffer_int); + vpx_free(cm->postproc_state.limits); + cm->postproc_state.limits = NULL; + vpx_free(cm->postproc_state.generated_noise); + cm->postproc_state.generated_noise = NULL; #else (void)cm; #endif @@ -119,7 +122,6 @@ void vp9_free_context_buffers(VP9_COMMON *cm) { cm->lf.lfm = NULL; } - int vp9_alloc_loop_filter(VP9_COMMON *cm) { vpx_free(cm->lf.lfm); // Each lfm holds bit masks for all the 8x8 blocks in a 64x64 region. The @@ -128,8 +130,7 @@ int vp9_alloc_loop_filter(VP9_COMMON *cm) { cm->lf.lfm = (LOOP_FILTER_MASK *)vpx_calloc( ((cm->mi_rows + (MI_BLOCK_SIZE - 1)) >> 3) * cm->lf.lfm_stride, sizeof(*cm->lf.lfm)); - if (!cm->lf.lfm) - return 1; + if (!cm->lf.lfm) return 1; return 0; } @@ -140,15 +141,13 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) { new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows); if (cm->mi_alloc_size < new_mi_size) { cm->free_mi(cm); - if (cm->alloc_mi(cm, new_mi_size)) - goto fail; + if (cm->alloc_mi(cm, new_mi_size)) goto fail; } if (cm->seg_map_alloc_size < cm->mi_rows * cm->mi_cols) { // Create the segmentation map structure and set to 0. free_seg_map(cm); - if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols)) - goto fail; + if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols)) goto fail; } if (cm->above_context_alloc_cols < cm->mi_cols) { @@ -165,12 +164,13 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) { cm->above_context_alloc_cols = cm->mi_cols; } - if (vp9_alloc_loop_filter(cm)) - goto fail; + if (vp9_alloc_loop_filter(cm)) goto fail; return 0; - fail: +fail: + // clear the mi_* values to force a realloc on resync + vp9_set_mb_mi(cm, 0, 0); vp9_free_context_buffers(cm); return 1; } diff --git a/libs/libvpx/vp9/common/vp9_alloccommon.h b/libs/libvpx/vp9/common/vp9_alloccommon.h index e53955b998..a3a1638572 100644 --- a/libs/libvpx/vp9/common/vp9_alloccommon.h +++ b/libs/libvpx/vp9/common/vp9_alloccommon.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_COMMON_VP9_ALLOCCOMMON_H_ #define VP9_COMMON_VP9_ALLOCCOMMON_H_ diff --git a/libs/libvpx/vp9/common/vp9_blockd.c b/libs/libvpx/vp9/common/vp9_blockd.c index 7bab27d4fd..b0249687fd 100644 --- a/libs/libvpx/vp9/common/vp9_blockd.c +++ b/libs/libvpx/vp9/common/vp9_blockd.c @@ -13,8 +13,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, const MODE_INFO *left_mi, int b) { if (b == 0 || b == 2) { - if (!left_mi || is_inter_block(left_mi)) - return DC_PRED; + if (!left_mi || is_inter_block(left_mi)) return DC_PRED; return get_y_mode(left_mi, b + 1); } else { @@ -26,8 +25,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi, const MODE_INFO *above_mi, int b) { if (b == 0 || b == 1) { - if (!above_mi || is_inter_block(above_mi)) - return DC_PRED; + if (!above_mi || is_inter_block(above_mi)) return DC_PRED; return get_y_mode(above_mi, b + 2); } else { @@ -40,12 +38,11 @@ void vp9_foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, foreach_transformed_block_visitor visit, void *arg) { const struct macroblockd_plane *const pd = &xd->plane[plane]; - const MODE_INFO* mi = xd->mi[0]; + const MODE_INFO *mi = xd->mi[0]; // block and transform sizes, in number of 4x4 blocks log 2 ("*_b") // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // transform size varies per plane, look it up in a common way. - const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) - : mi->tx_size; + const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; @@ -55,10 +52,13 @@ void vp9_foreach_transformed_block_in_plane( // If mb_to_right_edge is < 0 we are in a situation in which // the current block size extends into the UMV and we won't // visit the sub blocks that are wholly within the UMV. - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : - xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : - xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + const int max_blocks_wide = + num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> + (5 + pd->subsampling_x)); + const int max_blocks_high = + num_4x4_h + (xd->mb_to_bottom_edge >= 0 + ? 0 + : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); const int extra_step = ((num_4x4_w - max_blocks_wide) >> tx_size) * step; // Keep track of the row and column of the blocks we use so that we know @@ -66,14 +66,14 @@ void vp9_foreach_transformed_block_in_plane( for (r = 0; r < max_blocks_high; r += (1 << tx_size)) { // Skip visiting the sub blocks that are wholly within the UMV. for (c = 0; c < max_blocks_wide; c += (1 << tx_size)) { - visit(plane, i, plane_bsize, tx_size, arg); + visit(plane, i, r, c, plane_bsize, tx_size, arg); i += step; } i += extra_step; } } -void vp9_foreach_transformed_block(const MACROBLOCKD* const xd, +void vp9_foreach_transformed_block(const MACROBLOCKD *const xd, BLOCK_SIZE bsize, foreach_transformed_block_visitor visit, void *arg) { @@ -99,10 +99,8 @@ void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, if (above_contexts + aoff > blocks_wide) above_contexts = blocks_wide - aoff; - for (i = 0; i < above_contexts; ++i) - a[i] = has_eob; - for (i = above_contexts; i < tx_size_in_blocks; ++i) - a[i] = 0; + for (i = 0; i < above_contexts; ++i) a[i] = has_eob; + for (i = above_contexts; i < tx_size_in_blocks; ++i) a[i] = 0; } else { memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); } @@ -113,13 +111,10 @@ void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, const int blocks_high = num_4x4_blocks_high_lookup[plane_bsize] + (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); int left_contexts = tx_size_in_blocks; - if (left_contexts + loff > blocks_high) - left_contexts = blocks_high - loff; + if (left_contexts + loff > blocks_high) left_contexts = blocks_high - loff; - for (i = 0; i < left_contexts; ++i) - l[i] = has_eob; - for (i = left_contexts; i < tx_size_in_blocks; ++i) - l[i] = 0; + for (i = 0; i < left_contexts; ++i) l[i] = has_eob; + for (i = left_contexts; i < tx_size_in_blocks; ++i) l[i] = 0; } else { memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); } diff --git a/libs/libvpx/vp9/common/vp9_blockd.h b/libs/libvpx/vp9/common/vp9_blockd.h index ae2f66a4a7..780b29208b 100644 --- a/libs/libvpx/vp9/common/vp9_blockd.h +++ b/libs/libvpx/vp9/common/vp9_blockd.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_COMMON_VP9_BLOCKD_H_ #define VP9_COMMON_VP9_BLOCKD_H_ @@ -55,12 +54,12 @@ typedef struct { // decoder implementation modules critically rely on the defined entry values // specified herein. They should be refactored concurrently. -#define NONE -1 -#define INTRA_FRAME 0 -#define LAST_FRAME 1 -#define GOLDEN_FRAME 2 -#define ALTREF_FRAME 3 -#define MAX_REF_FRAMES 4 +#define NONE -1 +#define INTRA_FRAME 0 +#define LAST_FRAME 1 +#define GOLDEN_FRAME 2 +#define ALTREF_FRAME 3 +#define MAX_REF_FRAMES 4 typedef int8_t MV_REFERENCE_FRAME; // This structure now relates to 8x8 block regions. @@ -78,6 +77,9 @@ typedef struct MODE_INFO { // Only for INTER blocks INTERP_FILTER interp_filter; + + // if ref_frame[idx] is equal to ALTREF_FRAME then + // MACROBLOCKD::block_ref[idx] is an altref MV_REFERENCE_FRAME ref_frame[2]; // TODO(slavarnway): Delete and use bmi[3].as_mv[] instead. @@ -87,8 +89,7 @@ typedef struct MODE_INFO { } MODE_INFO; static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) { - return mi->sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode - : mi->mode; + return mi->sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode : mi->mode; } static INLINE int is_inter_block(const MODE_INFO *mi) { @@ -105,10 +106,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi, PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi, const MODE_INFO *above_mi, int b); -enum mv_precision { - MV_PRECISION_Q3, - MV_PRECISION_Q4 -}; +enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 }; struct buf_2d { uint8_t *buf; @@ -134,7 +132,7 @@ struct macroblockd_plane { const int16_t *dequant; }; -#define BLOCK_OFFSET(x, i) ((x) + (i) * 16) +#define BLOCK_OFFSET(x, i) ((x) + (i)*16) typedef struct RefBuffer { // TODO(dkovalev): idx is not really required and should be removed, now it @@ -154,12 +152,15 @@ typedef struct macroblockd { int mi_stride; + // Grid of 8x8 cells is placed over the block. + // If some of them belong to the same mbtree-block + // they will just have same mi[i][j] value MODE_INFO **mi; MODE_INFO *left_mi; MODE_INFO *above_mi; - int up_available; - int left_available; + unsigned int max_blocks_wide; + unsigned int max_blocks_high; const vpx_prob (*partition_probs)[PARTITION_TYPES - 1]; @@ -227,24 +228,17 @@ static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, void vp9_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y); -static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize, - int xss, int yss) { - if (bsize < BLOCK_8X8) { - return TX_4X4; - } else { - const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss]; - return VPXMIN(y_tx_size, max_txsize_lookup[plane_bsize]); - } -} - static INLINE TX_SIZE get_uv_tx_size(const MODE_INFO *mi, const struct macroblockd_plane *pd) { - return get_uv_tx_size_impl(mi->tx_size, mi->sb_type, pd->subsampling_x, - pd->subsampling_y); + assert(mi->sb_type < BLOCK_8X8 || + ss_size_lookup[mi->sb_type][pd->subsampling_x][pd->subsampling_y] != + BLOCK_INVALID); + return uv_txsize_lookup[mi->sb_type][mi->tx_size][pd->subsampling_x] + [pd->subsampling_y]; } -static INLINE BLOCK_SIZE get_plane_block_size(BLOCK_SIZE bsize, - const struct macroblockd_plane *pd) { +static INLINE BLOCK_SIZE +get_plane_block_size(BLOCK_SIZE bsize, const struct macroblockd_plane *pd) { return ss_size_lookup[bsize][pd->subsampling_x][pd->subsampling_y]; } @@ -269,30 +263,19 @@ static INLINE const vpx_prob *get_y_mode_probs(const MODE_INFO *mi, return vp9_kf_y_mode_prob[above][left]; } -typedef void (*foreach_transformed_block_visitor)(int plane, int block, +typedef void (*foreach_transformed_block_visitor)(int plane, int block, int row, + int col, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, - void *arg); + TX_SIZE tx_size, void *arg); void vp9_foreach_transformed_block_in_plane( const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, foreach_transformed_block_visitor visit, void *arg); - -void vp9_foreach_transformed_block( - const MACROBLOCKD* const xd, BLOCK_SIZE bsize, - foreach_transformed_block_visitor visit, void *arg); - -static INLINE void txfrm_block_to_raster_xy(BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, int block, - int *x, int *y) { - const int bwl = b_width_log2_lookup[plane_bsize]; - const int tx_cols_log2 = bwl - tx_size; - const int tx_cols = 1 << tx_cols_log2; - const int raster_mb = block >> (tx_size << 1); - *x = (raster_mb & (tx_cols - 1)) << tx_size; - *y = (raster_mb >> tx_cols_log2) << tx_size; -} +void vp9_foreach_transformed_block(const MACROBLOCKD *const xd, + BLOCK_SIZE bsize, + foreach_transformed_block_visitor visit, + void *arg); void vp9_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob, diff --git a/libs/libvpx/vp9/common/vp9_common.h b/libs/libvpx/vp9/common/vp9_common.h index 76e7cd440b..666c3beaf0 100644 --- a/libs/libvpx/vp9/common/vp9_common.h +++ b/libs/libvpx/vp9/common/vp9_common.h @@ -26,15 +26,17 @@ extern "C" { #endif // Only need this for fixed-size arrays, for structs just assign. -#define vp9_copy(dest, src) { \ +#define vp9_copy(dest, src) \ + { \ assert(sizeof(dest) == sizeof(src)); \ - memcpy(dest, src, sizeof(src)); \ + memcpy(dest, src, sizeof(src)); \ } // Use this for variably-sized arrays. -#define vp9_copy_array(dest, src, n) { \ - assert(sizeof(*dest) == sizeof(*src)); \ - memcpy(dest, src, n * sizeof(*src)); \ +#define vp9_copy_array(dest, src, n) \ + { \ + assert(sizeof(*dest) == sizeof(*src)); \ + memcpy(dest, src, n * sizeof(*src)); \ } #define vp9_zero(dest) memset(&(dest), 0, sizeof(dest)) @@ -45,19 +47,21 @@ static INLINE int get_unsigned_bits(unsigned int num_values) { } #if CONFIG_DEBUG -#define CHECK_MEM_ERROR(cm, lval, expr) do { \ - lval = (expr); \ - if (!lval) \ - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \ - "Failed to allocate "#lval" at %s:%d", \ - __FILE__, __LINE__); \ +#define CHECK_MEM_ERROR(cm, lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval " at %s:%d", __FILE__, \ + __LINE__); \ } while (0) #else -#define CHECK_MEM_ERROR(cm, lval, expr) do { \ - lval = (expr); \ - if (!lval) \ - vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, \ - "Failed to allocate "#lval); \ +#define CHECK_MEM_ERROR(cm, lval, expr) \ + do { \ + lval = (expr); \ + if (!lval) \ + vpx_internal_error(&(cm)->error, VPX_CODEC_MEM_ERROR, \ + "Failed to allocate " #lval); \ } while (0) #endif @@ -67,7 +71,6 @@ static INLINE int get_unsigned_bits(unsigned int num_values) { #define VP9_FRAME_MARKER 0x2 - #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp9/common/vp9_common_data.c b/libs/libvpx/vp9/common/vp9_common_data.c index a6dae6a1c8..4a10833229 100644 --- a/libs/libvpx/vp9/common/vp9_common_data.c +++ b/libs/libvpx/vp9/common/vp9_common_data.c @@ -12,130 +12,213 @@ #include "vpx_dsp/vpx_dsp_common.h" // Log 2 conversion lookup tables for block width and height -const uint8_t b_width_log2_lookup[BLOCK_SIZES] = - {0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4}; -const uint8_t b_height_log2_lookup[BLOCK_SIZES] = - {0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4}; -const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16}; -const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] = - {1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16}; +const uint8_t b_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 1, 1, 1, 2, 2, + 2, 3, 3, 3, 4, 4 }; +const uint8_t b_height_log2_lookup[BLOCK_SIZES] = { 0, 1, 0, 1, 2, 1, 2, + 3, 2, 3, 4, 3, 4 }; +const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 2, 2, 2, 4, 4, + 4, 8, 8, 8, 16, 16 }; +const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] = { 1, 2, 1, 2, 4, 2, 4, + 8, 4, 8, 16, 8, 16 }; // Log 2 conversion lookup tables for modeinfo width and height -const uint8_t mi_width_log2_lookup[BLOCK_SIZES] = - {0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3}; -const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8}; -const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8}; +const uint8_t mi_width_log2_lookup[BLOCK_SIZES] = { 0, 0, 0, 0, 0, 1, 1, + 1, 2, 2, 2, 3, 3 }; +const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 2, 2, + 2, 4, 4, 4, 8, 8 }; +const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 2, 1, 2, + 4, 2, 4, 8, 4, 8 }; // VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize))) -const uint8_t size_group_lookup[BLOCK_SIZES] = - {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3}; +const uint8_t size_group_lookup[BLOCK_SIZES] = { 0, 0, 0, 1, 1, 1, 2, + 2, 2, 3, 3, 3, 3 }; -const uint8_t num_pels_log2_lookup[BLOCK_SIZES] = - {4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12}; +const uint8_t num_pels_log2_lookup[BLOCK_SIZES] = { 4, 5, 5, 6, 7, 7, 8, + 9, 9, 10, 11, 11, 12 }; const PARTITION_TYPE partition_lookup[][BLOCK_SIZES] = { - { // 4X4 + { // 4X4 // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 - PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID - }, { // 8X8 + PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID }, + { // 8X8 // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID - }, { // 16X16 + PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID }, + { // 16X16 // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID - }, { // 32X32 + PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID }, + { // 32X32 // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, - PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, - PARTITION_INVALID, PARTITION_INVALID - }, { // 64X64 + PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID, + PARTITION_INVALID }, + { // 64X64 // 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64 PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, - PARTITION_NONE - } + PARTITION_NONE } }; const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES] = { - { // PARTITION_NONE - BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, - BLOCK_8X8, BLOCK_8X16, BLOCK_16X8, - BLOCK_16X16, BLOCK_16X32, BLOCK_32X16, - BLOCK_32X32, BLOCK_32X64, BLOCK_64X32, - BLOCK_64X64, - }, { // PARTITION_HORZ - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X4, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_64X32, - }, { // PARTITION_VERT - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_4X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X32, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X64, - }, { // PARTITION_SPLIT - BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_4X4, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_8X8, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_16X16, BLOCK_INVALID, BLOCK_INVALID, - BLOCK_32X32, - } + { // PARTITION_NONE + BLOCK_4X4, BLOCK_4X8, BLOCK_8X4, BLOCK_8X8, BLOCK_8X16, BLOCK_16X8, + BLOCK_16X16, BLOCK_16X32, BLOCK_32X16, BLOCK_32X32, BLOCK_32X64, + BLOCK_64X32, BLOCK_64X64 }, + { // PARTITION_HORZ + BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_8X4, BLOCK_INVALID, + BLOCK_INVALID, BLOCK_16X8, BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X16, + BLOCK_INVALID, BLOCK_INVALID, BLOCK_64X32 }, + { // PARTITION_VERT + BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X8, BLOCK_INVALID, + BLOCK_INVALID, BLOCK_8X16, BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X32, + BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X64 }, + { // PARTITION_SPLIT + BLOCK_INVALID, BLOCK_INVALID, BLOCK_INVALID, BLOCK_4X4, BLOCK_INVALID, + BLOCK_INVALID, BLOCK_8X8, BLOCK_INVALID, BLOCK_INVALID, BLOCK_16X16, + BLOCK_INVALID, BLOCK_INVALID, BLOCK_32X32 } }; const TX_SIZE max_txsize_lookup[BLOCK_SIZES] = { - TX_4X4, TX_4X4, TX_4X4, - TX_8X8, TX_8X8, TX_8X8, - TX_16X16, TX_16X16, TX_16X16, - TX_32X32, TX_32X32, TX_32X32, TX_32X32 + TX_4X4, TX_4X4, TX_4X4, TX_8X8, TX_8X8, TX_8X8, TX_16X16, + TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_32X32, TX_32X32 }; const BLOCK_SIZE txsize_to_bsize[TX_SIZES] = { - BLOCK_4X4, // TX_4X4 - BLOCK_8X8, // TX_8X8 - BLOCK_16X16, // TX_16X16 - BLOCK_32X32, // TX_32X32 + BLOCK_4X4, // TX_4X4 + BLOCK_8X8, // TX_8X8 + BLOCK_16X16, // TX_16X16 + BLOCK_32X32, // TX_32X32 }; const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES] = { - TX_4X4, // ONLY_4X4 - TX_8X8, // ALLOW_8X8 + TX_4X4, // ONLY_4X4 + TX_8X8, // ALLOW_8X8 TX_16X16, // ALLOW_16X16 TX_32X32, // ALLOW_32X32 TX_32X32, // TX_MODE_SELECT }; const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = { -// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 -// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 - {{BLOCK_4X4, BLOCK_INVALID}, {BLOCK_INVALID, BLOCK_INVALID}}, - {{BLOCK_4X8, BLOCK_4X4}, {BLOCK_INVALID, BLOCK_INVALID}}, - {{BLOCK_8X4, BLOCK_INVALID}, {BLOCK_4X4, BLOCK_INVALID}}, - {{BLOCK_8X8, BLOCK_8X4}, {BLOCK_4X8, BLOCK_4X4}}, - {{BLOCK_8X16, BLOCK_8X8}, {BLOCK_INVALID, BLOCK_4X8}}, - {{BLOCK_16X8, BLOCK_INVALID}, {BLOCK_8X8, BLOCK_8X4}}, - {{BLOCK_16X16, BLOCK_16X8}, {BLOCK_8X16, BLOCK_8X8}}, - {{BLOCK_16X32, BLOCK_16X16}, {BLOCK_INVALID, BLOCK_8X16}}, - {{BLOCK_32X16, BLOCK_INVALID}, {BLOCK_16X16, BLOCK_16X8}}, - {{BLOCK_32X32, BLOCK_32X16}, {BLOCK_16X32, BLOCK_16X16}}, - {{BLOCK_32X64, BLOCK_32X32}, {BLOCK_INVALID, BLOCK_16X32}}, - {{BLOCK_64X32, BLOCK_INVALID}, {BLOCK_32X32, BLOCK_32X16}}, - {{BLOCK_64X64, BLOCK_64X32}, {BLOCK_32X64, BLOCK_32X32}}, + // ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 + // ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 + { { BLOCK_4X4, BLOCK_INVALID }, { BLOCK_INVALID, BLOCK_INVALID } }, + { { BLOCK_4X8, BLOCK_4X4 }, { BLOCK_INVALID, BLOCK_INVALID } }, + { { BLOCK_8X4, BLOCK_INVALID }, { BLOCK_4X4, BLOCK_INVALID } }, + { { BLOCK_8X8, BLOCK_8X4 }, { BLOCK_4X8, BLOCK_4X4 } }, + { { BLOCK_8X16, BLOCK_8X8 }, { BLOCK_INVALID, BLOCK_4X8 } }, + { { BLOCK_16X8, BLOCK_INVALID }, { BLOCK_8X8, BLOCK_8X4 } }, + { { BLOCK_16X16, BLOCK_16X8 }, { BLOCK_8X16, BLOCK_8X8 } }, + { { BLOCK_16X32, BLOCK_16X16 }, { BLOCK_INVALID, BLOCK_8X16 } }, + { { BLOCK_32X16, BLOCK_INVALID }, { BLOCK_16X16, BLOCK_16X8 } }, + { { BLOCK_32X32, BLOCK_32X16 }, { BLOCK_16X32, BLOCK_16X16 } }, + { { BLOCK_32X64, BLOCK_32X32 }, { BLOCK_INVALID, BLOCK_16X32 } }, + { { BLOCK_64X32, BLOCK_INVALID }, { BLOCK_32X32, BLOCK_32X16 } }, + { { BLOCK_64X64, BLOCK_64X32 }, { BLOCK_32X64, BLOCK_32X32 } }, +}; + +const TX_SIZE uv_txsize_lookup[BLOCK_SIZES][TX_SIZES][2][2] = { + // ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 + // ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 + { + // BLOCK_4X4 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + }, + { + // BLOCK_4X8 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + }, + { + // BLOCK_8X4 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + }, + { + // BLOCK_8X8 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } }, + }, + { + // BLOCK_8X16 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } }, + }, + { + // BLOCK_16X8 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_4X4 }, { TX_8X8, TX_4X4 } }, + { { TX_8X8, TX_4X4 }, { TX_8X8, TX_8X8 } }, + { { TX_8X8, TX_4X4 }, { TX_8X8, TX_8X8 } }, + }, + { + // BLOCK_16X16 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } }, + }, + { + // BLOCK_16X32 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } }, + }, + { + // BLOCK_32X16 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } }, + { { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } }, + }, + { + // BLOCK_32X32 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } }, + { { TX_32X32, TX_16X16 }, { TX_16X16, TX_16X16 } }, + }, + { + // BLOCK_32X64 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } }, + { { TX_32X32, TX_32X32 }, { TX_16X16, TX_16X16 } }, + }, + { + // BLOCK_64X32 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } }, + { { TX_32X32, TX_16X16 }, { TX_32X32, TX_16X16 } }, + }, + { + // BLOCK_64X64 + { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, + { { TX_8X8, TX_8X8 }, { TX_8X8, TX_8X8 } }, + { { TX_16X16, TX_16X16 }, { TX_16X16, TX_16X16 } }, + { { TX_32X32, TX_32X32 }, { TX_32X32, TX_32X32 } }, + }, }; // Generates 4 bit field in which each bit set to 1 represents @@ -144,18 +227,33 @@ const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2] = { const struct { PARTITION_CONTEXT above; PARTITION_CONTEXT left; -} partition_context_lookup[BLOCK_SIZES]= { - {15, 15}, // 4X4 - {0b1111, 0b1111} - {15, 14}, // 4X8 - {0b1111, 0b1110} - {14, 15}, // 8X4 - {0b1110, 0b1111} - {14, 14}, // 8X8 - {0b1110, 0b1110} - {14, 12}, // 8X16 - {0b1110, 0b1100} - {12, 14}, // 16X8 - {0b1100, 0b1110} - {12, 12}, // 16X16 - {0b1100, 0b1100} - {12, 8 }, // 16X32 - {0b1100, 0b1000} - {8, 12}, // 32X16 - {0b1000, 0b1100} - {8, 8 }, // 32X32 - {0b1000, 0b1000} - {8, 0 }, // 32X64 - {0b1000, 0b0000} - {0, 8 }, // 64X32 - {0b0000, 0b1000} - {0, 0 }, // 64X64 - {0b0000, 0b0000} +} partition_context_lookup[BLOCK_SIZES] = { + { 15, 15 }, // 4X4 - {0b1111, 0b1111} + { 15, 14 }, // 4X8 - {0b1111, 0b1110} + { 14, 15 }, // 8X4 - {0b1110, 0b1111} + { 14, 14 }, // 8X8 - {0b1110, 0b1110} + { 14, 12 }, // 8X16 - {0b1110, 0b1100} + { 12, 14 }, // 16X8 - {0b1100, 0b1110} + { 12, 12 }, // 16X16 - {0b1100, 0b1100} + { 12, 8 }, // 16X32 - {0b1100, 0b1000} + { 8, 12 }, // 32X16 - {0b1000, 0b1100} + { 8, 8 }, // 32X32 - {0b1000, 0b1000} + { 8, 0 }, // 32X64 - {0b1000, 0b0000} + { 0, 8 }, // 64X32 - {0b0000, 0b1000} + { 0, 0 }, // 64X64 - {0b0000, 0b0000} }; + +#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH +const uint8_t need_top_left[INTRA_MODES] = { + 0, // DC_PRED + 0, // V_PRED + 0, // H_PRED + 0, // D45_PRED + 1, // D135_PRED + 1, // D117_PRED + 1, // D153_PRED + 0, // D207_PRED + 0, // D63_PRED + 1, // TM_PRED +}; +#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp9/common/vp9_common_data.h b/libs/libvpx/vp9/common/vp9_common_data.h index 95a1179617..5c6a7e8ff3 100644 --- a/libs/libvpx/vp9/common/vp9_common_data.h +++ b/libs/libvpx/vp9/common/vp9_common_data.h @@ -33,6 +33,10 @@ extern const TX_SIZE max_txsize_lookup[BLOCK_SIZES]; extern const BLOCK_SIZE txsize_to_bsize[TX_SIZES]; extern const TX_SIZE tx_mode_to_biggest_tx_size[TX_MODES]; extern const BLOCK_SIZE ss_size_lookup[BLOCK_SIZES][2][2]; +extern const TX_SIZE uv_txsize_lookup[BLOCK_SIZES][TX_SIZES][2][2]; +#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH +extern const uint8_t need_top_left[INTRA_MODES]; +#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/common/vp9_debugmodes.c b/libs/libvpx/vp9/common/vp9_debugmodes.c index d9c1fd9686..7d128c9f7f 100644 --- a/libs/libvpx/vp9/common/vp9_debugmodes.c +++ b/libs/libvpx/vp9/common/vp9_debugmodes.c @@ -34,9 +34,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor, for (mi_row = 0; mi_row < rows; mi_row++) { fprintf(file, "%c ", prefix); for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(file, "%2d ", - *((int*) ((char *) (mi[0]) + - member_offset))); + fprintf(file, "%2d ", *((int *)((char *)(mi[0]) + member_offset))); mi++; } fprintf(file, "\n"); @@ -78,8 +76,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) { for (mi_row = 0; mi_row < rows; mi_row++) { fprintf(mvs, "V "); for (mi_col = 0; mi_col < cols; mi_col++) { - fprintf(mvs, "%4d:%4d ", mi[0]->mv[0].as_mv.row, - mi[0]->mv[0].as_mv.col); + fprintf(mvs, "%4d:%4d ", mi[0]->mv[0].as_mv.row, mi[0]->mv[0].as_mv.col); mi++; } fprintf(mvs, "\n"); diff --git a/libs/libvpx/vp9/common/vp9_entropy.c b/libs/libvpx/vp9/common/vp9_entropy.c index fc022093c7..a575bda729 100644 --- a/libs/libvpx/vp9/common/vp9_entropy.c +++ b/libs/libvpx/vp9/common/vp9_entropy.c @@ -16,6 +16,7 @@ #include "vpx/vpx_integer.h" // Unconstrained Node Tree +/* clang-format off */ const vpx_tree_index vp9_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = { 2, 6, // 0 = LOW_VAL -TWO_TOKEN, 4, // 1 = TWO @@ -26,98 +27,71 @@ const vpx_tree_index vp9_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = { -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 6 = CAT_THREE -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 7 = CAT_FIVE }; +/* clang-format on */ const vpx_prob vp9_cat1_prob[] = { 159 }; const vpx_prob vp9_cat2_prob[] = { 165, 145 }; const vpx_prob vp9_cat3_prob[] = { 173, 148, 140 }; const vpx_prob vp9_cat4_prob[] = { 176, 155, 140, 135 }; const vpx_prob vp9_cat5_prob[] = { 180, 157, 141, 134, 130 }; -const vpx_prob vp9_cat6_prob[] = { - 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129 -}; +const vpx_prob vp9_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230, + 196, 177, 153, 140, 133, 130, 129 }; #if CONFIG_VP9_HIGHBITDEPTH -const vpx_prob vp9_cat6_prob_high12[] = { - 255, 255, 255, 255, 254, 254, 254, 252, 249, - 243, 230, 196, 177, 153, 140, 133, 130, 129 -}; +const vpx_prob vp9_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254, + 254, 252, 249, 243, 230, 196, + 177, 153, 140, 133, 130, 129 }; #endif const uint8_t vp9_coefband_trans_8x8plus[1024] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 5, + 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, // beyond MAXBAND_INDEX+1 all values are filled as 5 - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, }; const uint8_t vp9_coefband_trans_4x4[16] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, }; -const uint8_t vp9_pt_energy_class[ENTROPY_TOKENS] = { - 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 5 -}; +const uint8_t vp9_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4, + 4, 5, 5, 5, 5, 5 }; // Model obtained from a 2-sided zero-centerd distribuition derived // from a Pareto distribution. The cdf of the distribution is: @@ -134,604 +108,926 @@ const uint8_t vp9_pt_energy_class[ENTROPY_TOKENS] = { // vp9_pareto8_full[l][node] = (vp9_pareto8_full[l-1][node] + // vp9_pareto8_full[l+1][node] ) >> 1; const vpx_prob vp9_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = { - { 3, 86, 128, 6, 86, 23, 88, 29}, - { 6, 86, 128, 11, 87, 42, 91, 52}, - { 9, 86, 129, 17, 88, 61, 94, 76}, - { 12, 86, 129, 22, 88, 77, 97, 93}, - { 15, 87, 129, 28, 89, 93, 100, 110}, - { 17, 87, 129, 33, 90, 105, 103, 123}, - { 20, 88, 130, 38, 91, 118, 106, 136}, - { 23, 88, 130, 43, 91, 128, 108, 146}, - { 26, 89, 131, 48, 92, 139, 111, 156}, - { 28, 89, 131, 53, 93, 147, 114, 163}, - { 31, 90, 131, 58, 94, 156, 117, 171}, - { 34, 90, 131, 62, 94, 163, 119, 177}, - { 37, 90, 132, 66, 95, 171, 122, 184}, - { 39, 90, 132, 70, 96, 177, 124, 189}, - { 42, 91, 132, 75, 97, 183, 127, 194}, - { 44, 91, 132, 79, 97, 188, 129, 198}, - { 47, 92, 133, 83, 98, 193, 132, 202}, - { 49, 92, 133, 86, 99, 197, 134, 205}, - { 52, 93, 133, 90, 100, 201, 137, 208}, - { 54, 93, 133, 94, 100, 204, 139, 211}, - { 57, 94, 134, 98, 101, 208, 142, 214}, - { 59, 94, 134, 101, 102, 211, 144, 216}, - { 62, 94, 135, 105, 103, 214, 146, 218}, - { 64, 94, 135, 108, 103, 216, 148, 220}, - { 66, 95, 135, 111, 104, 219, 151, 222}, - { 68, 95, 135, 114, 105, 221, 153, 223}, - { 71, 96, 136, 117, 106, 224, 155, 225}, - { 73, 96, 136, 120, 106, 225, 157, 226}, - { 76, 97, 136, 123, 107, 227, 159, 228}, - { 78, 97, 136, 126, 108, 229, 160, 229}, - { 80, 98, 137, 129, 109, 231, 162, 231}, - { 82, 98, 137, 131, 109, 232, 164, 232}, - { 84, 98, 138, 134, 110, 234, 166, 233}, - { 86, 98, 138, 137, 111, 235, 168, 234}, - { 89, 99, 138, 140, 112, 236, 170, 235}, - { 91, 99, 138, 142, 112, 237, 171, 235}, - { 93, 100, 139, 145, 113, 238, 173, 236}, - { 95, 100, 139, 147, 114, 239, 174, 237}, - { 97, 101, 140, 149, 115, 240, 176, 238}, - { 99, 101, 140, 151, 115, 241, 177, 238}, - {101, 102, 140, 154, 116, 242, 179, 239}, - {103, 102, 140, 156, 117, 242, 180, 239}, - {105, 103, 141, 158, 118, 243, 182, 240}, - {107, 103, 141, 160, 118, 243, 183, 240}, - {109, 104, 141, 162, 119, 244, 185, 241}, - {111, 104, 141, 164, 119, 244, 186, 241}, - {113, 104, 142, 166, 120, 245, 187, 242}, - {114, 104, 142, 168, 121, 245, 188, 242}, - {116, 105, 143, 170, 122, 246, 190, 243}, - {118, 105, 143, 171, 122, 246, 191, 243}, - {120, 106, 143, 173, 123, 247, 192, 244}, - {121, 106, 143, 175, 124, 247, 193, 244}, - {123, 107, 144, 177, 125, 248, 195, 244}, - {125, 107, 144, 178, 125, 248, 196, 244}, - {127, 108, 145, 180, 126, 249, 197, 245}, - {128, 108, 145, 181, 127, 249, 198, 245}, - {130, 109, 145, 183, 128, 249, 199, 245}, - {132, 109, 145, 184, 128, 249, 200, 245}, - {134, 110, 146, 186, 129, 250, 201, 246}, - {135, 110, 146, 187, 130, 250, 202, 246}, - {137, 111, 147, 189, 131, 251, 203, 246}, - {138, 111, 147, 190, 131, 251, 204, 246}, - {140, 112, 147, 192, 132, 251, 205, 247}, - {141, 112, 147, 193, 132, 251, 206, 247}, - {143, 113, 148, 194, 133, 251, 207, 247}, - {144, 113, 148, 195, 134, 251, 207, 247}, - {146, 114, 149, 197, 135, 252, 208, 248}, - {147, 114, 149, 198, 135, 252, 209, 248}, - {149, 115, 149, 199, 136, 252, 210, 248}, - {150, 115, 149, 200, 137, 252, 210, 248}, - {152, 115, 150, 201, 138, 252, 211, 248}, - {153, 115, 150, 202, 138, 252, 212, 248}, - {155, 116, 151, 204, 139, 253, 213, 249}, - {156, 116, 151, 205, 139, 253, 213, 249}, - {158, 117, 151, 206, 140, 253, 214, 249}, - {159, 117, 151, 207, 141, 253, 215, 249}, - {161, 118, 152, 208, 142, 253, 216, 249}, - {162, 118, 152, 209, 142, 253, 216, 249}, - {163, 119, 153, 210, 143, 253, 217, 249}, - {164, 119, 153, 211, 143, 253, 217, 249}, - {166, 120, 153, 212, 144, 254, 218, 250}, - {167, 120, 153, 212, 145, 254, 219, 250}, - {168, 121, 154, 213, 146, 254, 220, 250}, - {169, 121, 154, 214, 146, 254, 220, 250}, - {171, 122, 155, 215, 147, 254, 221, 250}, - {172, 122, 155, 216, 147, 254, 221, 250}, - {173, 123, 155, 217, 148, 254, 222, 250}, - {174, 123, 155, 217, 149, 254, 222, 250}, - {176, 124, 156, 218, 150, 254, 223, 250}, - {177, 124, 156, 219, 150, 254, 223, 250}, - {178, 125, 157, 220, 151, 254, 224, 251}, - {179, 125, 157, 220, 151, 254, 224, 251}, - {180, 126, 157, 221, 152, 254, 225, 251}, - {181, 126, 157, 221, 152, 254, 225, 251}, - {183, 127, 158, 222, 153, 254, 226, 251}, - {184, 127, 158, 223, 154, 254, 226, 251}, - {185, 128, 159, 224, 155, 255, 227, 251}, - {186, 128, 159, 224, 155, 255, 227, 251}, - {187, 129, 160, 225, 156, 255, 228, 251}, - {188, 130, 160, 225, 156, 255, 228, 251}, - {189, 131, 160, 226, 157, 255, 228, 251}, - {190, 131, 160, 226, 158, 255, 228, 251}, - {191, 132, 161, 227, 159, 255, 229, 251}, - {192, 132, 161, 227, 159, 255, 229, 251}, - {193, 133, 162, 228, 160, 255, 230, 252}, - {194, 133, 162, 229, 160, 255, 230, 252}, - {195, 134, 163, 230, 161, 255, 231, 252}, - {196, 134, 163, 230, 161, 255, 231, 252}, - {197, 135, 163, 231, 162, 255, 231, 252}, - {198, 135, 163, 231, 162, 255, 231, 252}, - {199, 136, 164, 232, 163, 255, 232, 252}, - {200, 136, 164, 232, 164, 255, 232, 252}, - {201, 137, 165, 233, 165, 255, 233, 252}, - {201, 137, 165, 233, 165, 255, 233, 252}, - {202, 138, 166, 233, 166, 255, 233, 252}, - {203, 138, 166, 233, 166, 255, 233, 252}, - {204, 139, 166, 234, 167, 255, 234, 252}, - {205, 139, 166, 234, 167, 255, 234, 252}, - {206, 140, 167, 235, 168, 255, 235, 252}, - {206, 140, 167, 235, 168, 255, 235, 252}, - {207, 141, 168, 236, 169, 255, 235, 252}, - {208, 141, 168, 236, 170, 255, 235, 252}, - {209, 142, 169, 237, 171, 255, 236, 252}, - {209, 143, 169, 237, 171, 255, 236, 252}, - {210, 144, 169, 237, 172, 255, 236, 252}, - {211, 144, 169, 237, 172, 255, 236, 252}, - {212, 145, 170, 238, 173, 255, 237, 252}, - {213, 145, 170, 238, 173, 255, 237, 252}, - {214, 146, 171, 239, 174, 255, 237, 253}, - {214, 146, 171, 239, 174, 255, 237, 253}, - {215, 147, 172, 240, 175, 255, 238, 253}, - {215, 147, 172, 240, 175, 255, 238, 253}, - {216, 148, 173, 240, 176, 255, 238, 253}, - {217, 148, 173, 240, 176, 255, 238, 253}, - {218, 149, 173, 241, 177, 255, 239, 253}, - {218, 149, 173, 241, 178, 255, 239, 253}, - {219, 150, 174, 241, 179, 255, 239, 253}, - {219, 151, 174, 241, 179, 255, 239, 253}, - {220, 152, 175, 242, 180, 255, 240, 253}, - {221, 152, 175, 242, 180, 255, 240, 253}, - {222, 153, 176, 242, 181, 255, 240, 253}, - {222, 153, 176, 242, 181, 255, 240, 253}, - {223, 154, 177, 243, 182, 255, 240, 253}, - {223, 154, 177, 243, 182, 255, 240, 253}, - {224, 155, 178, 244, 183, 255, 241, 253}, - {224, 155, 178, 244, 183, 255, 241, 253}, - {225, 156, 178, 244, 184, 255, 241, 253}, - {225, 157, 178, 244, 184, 255, 241, 253}, - {226, 158, 179, 244, 185, 255, 242, 253}, - {227, 158, 179, 244, 185, 255, 242, 253}, - {228, 159, 180, 245, 186, 255, 242, 253}, - {228, 159, 180, 245, 186, 255, 242, 253}, - {229, 160, 181, 245, 187, 255, 242, 253}, - {229, 160, 181, 245, 187, 255, 242, 253}, - {230, 161, 182, 246, 188, 255, 243, 253}, - {230, 162, 182, 246, 188, 255, 243, 253}, - {231, 163, 183, 246, 189, 255, 243, 253}, - {231, 163, 183, 246, 189, 255, 243, 253}, - {232, 164, 184, 247, 190, 255, 243, 253}, - {232, 164, 184, 247, 190, 255, 243, 253}, - {233, 165, 185, 247, 191, 255, 244, 253}, - {233, 165, 185, 247, 191, 255, 244, 253}, - {234, 166, 185, 247, 192, 255, 244, 253}, - {234, 167, 185, 247, 192, 255, 244, 253}, - {235, 168, 186, 248, 193, 255, 244, 253}, - {235, 168, 186, 248, 193, 255, 244, 253}, - {236, 169, 187, 248, 194, 255, 244, 253}, - {236, 169, 187, 248, 194, 255, 244, 253}, - {236, 170, 188, 248, 195, 255, 245, 253}, - {236, 170, 188, 248, 195, 255, 245, 253}, - {237, 171, 189, 249, 196, 255, 245, 254}, - {237, 172, 189, 249, 196, 255, 245, 254}, - {238, 173, 190, 249, 197, 255, 245, 254}, - {238, 173, 190, 249, 197, 255, 245, 254}, - {239, 174, 191, 249, 198, 255, 245, 254}, - {239, 174, 191, 249, 198, 255, 245, 254}, - {240, 175, 192, 249, 199, 255, 246, 254}, - {240, 176, 192, 249, 199, 255, 246, 254}, - {240, 177, 193, 250, 200, 255, 246, 254}, - {240, 177, 193, 250, 200, 255, 246, 254}, - {241, 178, 194, 250, 201, 255, 246, 254}, - {241, 178, 194, 250, 201, 255, 246, 254}, - {242, 179, 195, 250, 202, 255, 246, 254}, - {242, 180, 195, 250, 202, 255, 246, 254}, - {242, 181, 196, 250, 203, 255, 247, 254}, - {242, 181, 196, 250, 203, 255, 247, 254}, - {243, 182, 197, 251, 204, 255, 247, 254}, - {243, 183, 197, 251, 204, 255, 247, 254}, - {244, 184, 198, 251, 205, 255, 247, 254}, - {244, 184, 198, 251, 205, 255, 247, 254}, - {244, 185, 199, 251, 206, 255, 247, 254}, - {244, 185, 199, 251, 206, 255, 247, 254}, - {245, 186, 200, 251, 207, 255, 247, 254}, - {245, 187, 200, 251, 207, 255, 247, 254}, - {246, 188, 201, 252, 207, 255, 248, 254}, - {246, 188, 201, 252, 207, 255, 248, 254}, - {246, 189, 202, 252, 208, 255, 248, 254}, - {246, 190, 202, 252, 208, 255, 248, 254}, - {247, 191, 203, 252, 209, 255, 248, 254}, - {247, 191, 203, 252, 209, 255, 248, 254}, - {247, 192, 204, 252, 210, 255, 248, 254}, - {247, 193, 204, 252, 210, 255, 248, 254}, - {248, 194, 205, 252, 211, 255, 248, 254}, - {248, 194, 205, 252, 211, 255, 248, 254}, - {248, 195, 206, 252, 212, 255, 249, 254}, - {248, 196, 206, 252, 212, 255, 249, 254}, - {249, 197, 207, 253, 213, 255, 249, 254}, - {249, 197, 207, 253, 213, 255, 249, 254}, - {249, 198, 208, 253, 214, 255, 249, 254}, - {249, 199, 209, 253, 214, 255, 249, 254}, - {250, 200, 210, 253, 215, 255, 249, 254}, - {250, 200, 210, 253, 215, 255, 249, 254}, - {250, 201, 211, 253, 215, 255, 249, 254}, - {250, 202, 211, 253, 215, 255, 249, 254}, - {250, 203, 212, 253, 216, 255, 249, 254}, - {250, 203, 212, 253, 216, 255, 249, 254}, - {251, 204, 213, 253, 217, 255, 250, 254}, - {251, 205, 213, 253, 217, 255, 250, 254}, - {251, 206, 214, 254, 218, 255, 250, 254}, - {251, 206, 215, 254, 218, 255, 250, 254}, - {252, 207, 216, 254, 219, 255, 250, 254}, - {252, 208, 216, 254, 219, 255, 250, 254}, - {252, 209, 217, 254, 220, 255, 250, 254}, - {252, 210, 217, 254, 220, 255, 250, 254}, - {252, 211, 218, 254, 221, 255, 250, 254}, - {252, 212, 218, 254, 221, 255, 250, 254}, - {253, 213, 219, 254, 222, 255, 250, 254}, - {253, 213, 220, 254, 222, 255, 250, 254}, - {253, 214, 221, 254, 223, 255, 250, 254}, - {253, 215, 221, 254, 223, 255, 250, 254}, - {253, 216, 222, 254, 224, 255, 251, 254}, - {253, 217, 223, 254, 224, 255, 251, 254}, - {253, 218, 224, 254, 225, 255, 251, 254}, - {253, 219, 224, 254, 225, 255, 251, 254}, - {254, 220, 225, 254, 225, 255, 251, 254}, - {254, 221, 226, 254, 225, 255, 251, 254}, - {254, 222, 227, 255, 226, 255, 251, 254}, - {254, 223, 227, 255, 226, 255, 251, 254}, - {254, 224, 228, 255, 227, 255, 251, 254}, - {254, 225, 229, 255, 227, 255, 251, 254}, - {254, 226, 230, 255, 228, 255, 251, 254}, - {254, 227, 230, 255, 229, 255, 251, 254}, - {255, 228, 231, 255, 230, 255, 251, 254}, - {255, 229, 232, 255, 230, 255, 251, 254}, - {255, 230, 233, 255, 231, 255, 252, 254}, - {255, 231, 234, 255, 231, 255, 252, 254}, - {255, 232, 235, 255, 232, 255, 252, 254}, - {255, 233, 236, 255, 232, 255, 252, 254}, - {255, 235, 237, 255, 233, 255, 252, 254}, - {255, 236, 238, 255, 234, 255, 252, 254}, - {255, 238, 240, 255, 235, 255, 252, 255}, - {255, 239, 241, 255, 235, 255, 252, 254}, - {255, 241, 243, 255, 236, 255, 252, 254}, - {255, 243, 245, 255, 237, 255, 252, 254}, - {255, 246, 247, 255, 239, 255, 253, 255}, + { 3, 86, 128, 6, 86, 23, 88, 29 }, + { 6, 86, 128, 11, 87, 42, 91, 52 }, + { 9, 86, 129, 17, 88, 61, 94, 76 }, + { 12, 86, 129, 22, 88, 77, 97, 93 }, + { 15, 87, 129, 28, 89, 93, 100, 110 }, + { 17, 87, 129, 33, 90, 105, 103, 123 }, + { 20, 88, 130, 38, 91, 118, 106, 136 }, + { 23, 88, 130, 43, 91, 128, 108, 146 }, + { 26, 89, 131, 48, 92, 139, 111, 156 }, + { 28, 89, 131, 53, 93, 147, 114, 163 }, + { 31, 90, 131, 58, 94, 156, 117, 171 }, + { 34, 90, 131, 62, 94, 163, 119, 177 }, + { 37, 90, 132, 66, 95, 171, 122, 184 }, + { 39, 90, 132, 70, 96, 177, 124, 189 }, + { 42, 91, 132, 75, 97, 183, 127, 194 }, + { 44, 91, 132, 79, 97, 188, 129, 198 }, + { 47, 92, 133, 83, 98, 193, 132, 202 }, + { 49, 92, 133, 86, 99, 197, 134, 205 }, + { 52, 93, 133, 90, 100, 201, 137, 208 }, + { 54, 93, 133, 94, 100, 204, 139, 211 }, + { 57, 94, 134, 98, 101, 208, 142, 214 }, + { 59, 94, 134, 101, 102, 211, 144, 216 }, + { 62, 94, 135, 105, 103, 214, 146, 218 }, + { 64, 94, 135, 108, 103, 216, 148, 220 }, + { 66, 95, 135, 111, 104, 219, 151, 222 }, + { 68, 95, 135, 114, 105, 221, 153, 223 }, + { 71, 96, 136, 117, 106, 224, 155, 225 }, + { 73, 96, 136, 120, 106, 225, 157, 226 }, + { 76, 97, 136, 123, 107, 227, 159, 228 }, + { 78, 97, 136, 126, 108, 229, 160, 229 }, + { 80, 98, 137, 129, 109, 231, 162, 231 }, + { 82, 98, 137, 131, 109, 232, 164, 232 }, + { 84, 98, 138, 134, 110, 234, 166, 233 }, + { 86, 98, 138, 137, 111, 235, 168, 234 }, + { 89, 99, 138, 140, 112, 236, 170, 235 }, + { 91, 99, 138, 142, 112, 237, 171, 235 }, + { 93, 100, 139, 145, 113, 238, 173, 236 }, + { 95, 100, 139, 147, 114, 239, 174, 237 }, + { 97, 101, 140, 149, 115, 240, 176, 238 }, + { 99, 101, 140, 151, 115, 241, 177, 238 }, + { 101, 102, 140, 154, 116, 242, 179, 239 }, + { 103, 102, 140, 156, 117, 242, 180, 239 }, + { 105, 103, 141, 158, 118, 243, 182, 240 }, + { 107, 103, 141, 160, 118, 243, 183, 240 }, + { 109, 104, 141, 162, 119, 244, 185, 241 }, + { 111, 104, 141, 164, 119, 244, 186, 241 }, + { 113, 104, 142, 166, 120, 245, 187, 242 }, + { 114, 104, 142, 168, 121, 245, 188, 242 }, + { 116, 105, 143, 170, 122, 246, 190, 243 }, + { 118, 105, 143, 171, 122, 246, 191, 243 }, + { 120, 106, 143, 173, 123, 247, 192, 244 }, + { 121, 106, 143, 175, 124, 247, 193, 244 }, + { 123, 107, 144, 177, 125, 248, 195, 244 }, + { 125, 107, 144, 178, 125, 248, 196, 244 }, + { 127, 108, 145, 180, 126, 249, 197, 245 }, + { 128, 108, 145, 181, 127, 249, 198, 245 }, + { 130, 109, 145, 183, 128, 249, 199, 245 }, + { 132, 109, 145, 184, 128, 249, 200, 245 }, + { 134, 110, 146, 186, 129, 250, 201, 246 }, + { 135, 110, 146, 187, 130, 250, 202, 246 }, + { 137, 111, 147, 189, 131, 251, 203, 246 }, + { 138, 111, 147, 190, 131, 251, 204, 246 }, + { 140, 112, 147, 192, 132, 251, 205, 247 }, + { 141, 112, 147, 193, 132, 251, 206, 247 }, + { 143, 113, 148, 194, 133, 251, 207, 247 }, + { 144, 113, 148, 195, 134, 251, 207, 247 }, + { 146, 114, 149, 197, 135, 252, 208, 248 }, + { 147, 114, 149, 198, 135, 252, 209, 248 }, + { 149, 115, 149, 199, 136, 252, 210, 248 }, + { 150, 115, 149, 200, 137, 252, 210, 248 }, + { 152, 115, 150, 201, 138, 252, 211, 248 }, + { 153, 115, 150, 202, 138, 252, 212, 248 }, + { 155, 116, 151, 204, 139, 253, 213, 249 }, + { 156, 116, 151, 205, 139, 253, 213, 249 }, + { 158, 117, 151, 206, 140, 253, 214, 249 }, + { 159, 117, 151, 207, 141, 253, 215, 249 }, + { 161, 118, 152, 208, 142, 253, 216, 249 }, + { 162, 118, 152, 209, 142, 253, 216, 249 }, + { 163, 119, 153, 210, 143, 253, 217, 249 }, + { 164, 119, 153, 211, 143, 253, 217, 249 }, + { 166, 120, 153, 212, 144, 254, 218, 250 }, + { 167, 120, 153, 212, 145, 254, 219, 250 }, + { 168, 121, 154, 213, 146, 254, 220, 250 }, + { 169, 121, 154, 214, 146, 254, 220, 250 }, + { 171, 122, 155, 215, 147, 254, 221, 250 }, + { 172, 122, 155, 216, 147, 254, 221, 250 }, + { 173, 123, 155, 217, 148, 254, 222, 250 }, + { 174, 123, 155, 217, 149, 254, 222, 250 }, + { 176, 124, 156, 218, 150, 254, 223, 250 }, + { 177, 124, 156, 219, 150, 254, 223, 250 }, + { 178, 125, 157, 220, 151, 254, 224, 251 }, + { 179, 125, 157, 220, 151, 254, 224, 251 }, + { 180, 126, 157, 221, 152, 254, 225, 251 }, + { 181, 126, 157, 221, 152, 254, 225, 251 }, + { 183, 127, 158, 222, 153, 254, 226, 251 }, + { 184, 127, 158, 223, 154, 254, 226, 251 }, + { 185, 128, 159, 224, 155, 255, 227, 251 }, + { 186, 128, 159, 224, 155, 255, 227, 251 }, + { 187, 129, 160, 225, 156, 255, 228, 251 }, + { 188, 130, 160, 225, 156, 255, 228, 251 }, + { 189, 131, 160, 226, 157, 255, 228, 251 }, + { 190, 131, 160, 226, 158, 255, 228, 251 }, + { 191, 132, 161, 227, 159, 255, 229, 251 }, + { 192, 132, 161, 227, 159, 255, 229, 251 }, + { 193, 133, 162, 228, 160, 255, 230, 252 }, + { 194, 133, 162, 229, 160, 255, 230, 252 }, + { 195, 134, 163, 230, 161, 255, 231, 252 }, + { 196, 134, 163, 230, 161, 255, 231, 252 }, + { 197, 135, 163, 231, 162, 255, 231, 252 }, + { 198, 135, 163, 231, 162, 255, 231, 252 }, + { 199, 136, 164, 232, 163, 255, 232, 252 }, + { 200, 136, 164, 232, 164, 255, 232, 252 }, + { 201, 137, 165, 233, 165, 255, 233, 252 }, + { 201, 137, 165, 233, 165, 255, 233, 252 }, + { 202, 138, 166, 233, 166, 255, 233, 252 }, + { 203, 138, 166, 233, 166, 255, 233, 252 }, + { 204, 139, 166, 234, 167, 255, 234, 252 }, + { 205, 139, 166, 234, 167, 255, 234, 252 }, + { 206, 140, 167, 235, 168, 255, 235, 252 }, + { 206, 140, 167, 235, 168, 255, 235, 252 }, + { 207, 141, 168, 236, 169, 255, 235, 252 }, + { 208, 141, 168, 236, 170, 255, 235, 252 }, + { 209, 142, 169, 237, 171, 255, 236, 252 }, + { 209, 143, 169, 237, 171, 255, 236, 252 }, + { 210, 144, 169, 237, 172, 255, 236, 252 }, + { 211, 144, 169, 237, 172, 255, 236, 252 }, + { 212, 145, 170, 238, 173, 255, 237, 252 }, + { 213, 145, 170, 238, 173, 255, 237, 252 }, + { 214, 146, 171, 239, 174, 255, 237, 253 }, + { 214, 146, 171, 239, 174, 255, 237, 253 }, + { 215, 147, 172, 240, 175, 255, 238, 253 }, + { 215, 147, 172, 240, 175, 255, 238, 253 }, + { 216, 148, 173, 240, 176, 255, 238, 253 }, + { 217, 148, 173, 240, 176, 255, 238, 253 }, + { 218, 149, 173, 241, 177, 255, 239, 253 }, + { 218, 149, 173, 241, 178, 255, 239, 253 }, + { 219, 150, 174, 241, 179, 255, 239, 253 }, + { 219, 151, 174, 241, 179, 255, 239, 253 }, + { 220, 152, 175, 242, 180, 255, 240, 253 }, + { 221, 152, 175, 242, 180, 255, 240, 253 }, + { 222, 153, 176, 242, 181, 255, 240, 253 }, + { 222, 153, 176, 242, 181, 255, 240, 253 }, + { 223, 154, 177, 243, 182, 255, 240, 253 }, + { 223, 154, 177, 243, 182, 255, 240, 253 }, + { 224, 155, 178, 244, 183, 255, 241, 253 }, + { 224, 155, 178, 244, 183, 255, 241, 253 }, + { 225, 156, 178, 244, 184, 255, 241, 253 }, + { 225, 157, 178, 244, 184, 255, 241, 253 }, + { 226, 158, 179, 244, 185, 255, 242, 253 }, + { 227, 158, 179, 244, 185, 255, 242, 253 }, + { 228, 159, 180, 245, 186, 255, 242, 253 }, + { 228, 159, 180, 245, 186, 255, 242, 253 }, + { 229, 160, 181, 245, 187, 255, 242, 253 }, + { 229, 160, 181, 245, 187, 255, 242, 253 }, + { 230, 161, 182, 246, 188, 255, 243, 253 }, + { 230, 162, 182, 246, 188, 255, 243, 253 }, + { 231, 163, 183, 246, 189, 255, 243, 253 }, + { 231, 163, 183, 246, 189, 255, 243, 253 }, + { 232, 164, 184, 247, 190, 255, 243, 253 }, + { 232, 164, 184, 247, 190, 255, 243, 253 }, + { 233, 165, 185, 247, 191, 255, 244, 253 }, + { 233, 165, 185, 247, 191, 255, 244, 253 }, + { 234, 166, 185, 247, 192, 255, 244, 253 }, + { 234, 167, 185, 247, 192, 255, 244, 253 }, + { 235, 168, 186, 248, 193, 255, 244, 253 }, + { 235, 168, 186, 248, 193, 255, 244, 253 }, + { 236, 169, 187, 248, 194, 255, 244, 253 }, + { 236, 169, 187, 248, 194, 255, 244, 253 }, + { 236, 170, 188, 248, 195, 255, 245, 253 }, + { 236, 170, 188, 248, 195, 255, 245, 253 }, + { 237, 171, 189, 249, 196, 255, 245, 254 }, + { 237, 172, 189, 249, 196, 255, 245, 254 }, + { 238, 173, 190, 249, 197, 255, 245, 254 }, + { 238, 173, 190, 249, 197, 255, 245, 254 }, + { 239, 174, 191, 249, 198, 255, 245, 254 }, + { 239, 174, 191, 249, 198, 255, 245, 254 }, + { 240, 175, 192, 249, 199, 255, 246, 254 }, + { 240, 176, 192, 249, 199, 255, 246, 254 }, + { 240, 177, 193, 250, 200, 255, 246, 254 }, + { 240, 177, 193, 250, 200, 255, 246, 254 }, + { 241, 178, 194, 250, 201, 255, 246, 254 }, + { 241, 178, 194, 250, 201, 255, 246, 254 }, + { 242, 179, 195, 250, 202, 255, 246, 254 }, + { 242, 180, 195, 250, 202, 255, 246, 254 }, + { 242, 181, 196, 250, 203, 255, 247, 254 }, + { 242, 181, 196, 250, 203, 255, 247, 254 }, + { 243, 182, 197, 251, 204, 255, 247, 254 }, + { 243, 183, 197, 251, 204, 255, 247, 254 }, + { 244, 184, 198, 251, 205, 255, 247, 254 }, + { 244, 184, 198, 251, 205, 255, 247, 254 }, + { 244, 185, 199, 251, 206, 255, 247, 254 }, + { 244, 185, 199, 251, 206, 255, 247, 254 }, + { 245, 186, 200, 251, 207, 255, 247, 254 }, + { 245, 187, 200, 251, 207, 255, 247, 254 }, + { 246, 188, 201, 252, 207, 255, 248, 254 }, + { 246, 188, 201, 252, 207, 255, 248, 254 }, + { 246, 189, 202, 252, 208, 255, 248, 254 }, + { 246, 190, 202, 252, 208, 255, 248, 254 }, + { 247, 191, 203, 252, 209, 255, 248, 254 }, + { 247, 191, 203, 252, 209, 255, 248, 254 }, + { 247, 192, 204, 252, 210, 255, 248, 254 }, + { 247, 193, 204, 252, 210, 255, 248, 254 }, + { 248, 194, 205, 252, 211, 255, 248, 254 }, + { 248, 194, 205, 252, 211, 255, 248, 254 }, + { 248, 195, 206, 252, 212, 255, 249, 254 }, + { 248, 196, 206, 252, 212, 255, 249, 254 }, + { 249, 197, 207, 253, 213, 255, 249, 254 }, + { 249, 197, 207, 253, 213, 255, 249, 254 }, + { 249, 198, 208, 253, 214, 255, 249, 254 }, + { 249, 199, 209, 253, 214, 255, 249, 254 }, + { 250, 200, 210, 253, 215, 255, 249, 254 }, + { 250, 200, 210, 253, 215, 255, 249, 254 }, + { 250, 201, 211, 253, 215, 255, 249, 254 }, + { 250, 202, 211, 253, 215, 255, 249, 254 }, + { 250, 203, 212, 253, 216, 255, 249, 254 }, + { 250, 203, 212, 253, 216, 255, 249, 254 }, + { 251, 204, 213, 253, 217, 255, 250, 254 }, + { 251, 205, 213, 253, 217, 255, 250, 254 }, + { 251, 206, 214, 254, 218, 255, 250, 254 }, + { 251, 206, 215, 254, 218, 255, 250, 254 }, + { 252, 207, 216, 254, 219, 255, 250, 254 }, + { 252, 208, 216, 254, 219, 255, 250, 254 }, + { 252, 209, 217, 254, 220, 255, 250, 254 }, + { 252, 210, 217, 254, 220, 255, 250, 254 }, + { 252, 211, 218, 254, 221, 255, 250, 254 }, + { 252, 212, 218, 254, 221, 255, 250, 254 }, + { 253, 213, 219, 254, 222, 255, 250, 254 }, + { 253, 213, 220, 254, 222, 255, 250, 254 }, + { 253, 214, 221, 254, 223, 255, 250, 254 }, + { 253, 215, 221, 254, 223, 255, 250, 254 }, + { 253, 216, 222, 254, 224, 255, 251, 254 }, + { 253, 217, 223, 254, 224, 255, 251, 254 }, + { 253, 218, 224, 254, 225, 255, 251, 254 }, + { 253, 219, 224, 254, 225, 255, 251, 254 }, + { 254, 220, 225, 254, 225, 255, 251, 254 }, + { 254, 221, 226, 254, 225, 255, 251, 254 }, + { 254, 222, 227, 255, 226, 255, 251, 254 }, + { 254, 223, 227, 255, 226, 255, 251, 254 }, + { 254, 224, 228, 255, 227, 255, 251, 254 }, + { 254, 225, 229, 255, 227, 255, 251, 254 }, + { 254, 226, 230, 255, 228, 255, 251, 254 }, + { 254, 227, 230, 255, 229, 255, 251, 254 }, + { 255, 228, 231, 255, 230, 255, 251, 254 }, + { 255, 229, 232, 255, 230, 255, 251, 254 }, + { 255, 230, 233, 255, 231, 255, 252, 254 }, + { 255, 231, 234, 255, 231, 255, 252, 254 }, + { 255, 232, 235, 255, 232, 255, 252, 254 }, + { 255, 233, 236, 255, 232, 255, 252, 254 }, + { 255, 235, 237, 255, 233, 255, 252, 254 }, + { 255, 236, 238, 255, 234, 255, 252, 254 }, + { 255, 238, 240, 255, 235, 255, 252, 255 }, + { 255, 239, 241, 255, 235, 255, 252, 254 }, + { 255, 241, 243, 255, 236, 255, 252, 254 }, + { 255, 243, 245, 255, 237, 255, 252, 254 }, + { 255, 246, 247, 255, 239, 255, 253, 255 }, }; static const vp9_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 195, 29, 183 }, { 84, 49, 136 }, { 8, 42, 71 } - }, { // Band 1 - { 31, 107, 169 }, { 35, 99, 159 }, { 17, 82, 140 }, - { 8, 66, 114 }, { 2, 44, 76 }, { 1, 19, 32 } - }, { // Band 2 - { 40, 132, 201 }, { 29, 114, 187 }, { 13, 91, 157 }, - { 7, 75, 127 }, { 3, 58, 95 }, { 1, 28, 47 } - }, { // Band 3 - { 69, 142, 221 }, { 42, 122, 201 }, { 15, 91, 159 }, - { 6, 67, 121 }, { 1, 42, 77 }, { 1, 17, 31 } - }, { // Band 4 - { 102, 148, 228 }, { 67, 117, 204 }, { 17, 82, 154 }, - { 6, 59, 114 }, { 2, 39, 75 }, { 1, 15, 29 } - }, { // Band 5 - { 156, 57, 233 }, { 119, 57, 212 }, { 58, 48, 163 }, - { 29, 40, 124 }, { 12, 30, 81 }, { 3, 12, 31 } - } - }, { // Inter - { // Band 0 - { 191, 107, 226 }, { 124, 117, 204 }, { 25, 99, 155 } - }, { // Band 1 - { 29, 148, 210 }, { 37, 126, 194 }, { 8, 93, 157 }, - { 2, 68, 118 }, { 1, 39, 69 }, { 1, 17, 33 } - }, { // Band 2 - { 41, 151, 213 }, { 27, 123, 193 }, { 3, 82, 144 }, - { 1, 58, 105 }, { 1, 32, 60 }, { 1, 13, 26 } - }, { // Band 3 - { 59, 159, 220 }, { 23, 126, 198 }, { 4, 88, 151 }, - { 1, 66, 114 }, { 1, 38, 71 }, { 1, 18, 34 } - }, { // Band 4 - { 114, 136, 232 }, { 51, 114, 207 }, { 11, 83, 155 }, - { 3, 56, 105 }, { 1, 33, 65 }, { 1, 17, 34 } - }, { // Band 5 - { 149, 65, 234 }, { 121, 57, 215 }, { 61, 49, 166 }, - { 28, 36, 114 }, { 12, 25, 76 }, { 3, 16, 42 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 214, 49, 220 }, { 132, 63, 188 }, { 42, 65, 137 } - }, { // Band 1 - { 85, 137, 221 }, { 104, 131, 216 }, { 49, 111, 192 }, - { 21, 87, 155 }, { 2, 49, 87 }, { 1, 16, 28 } - }, { // Band 2 - { 89, 163, 230 }, { 90, 137, 220 }, { 29, 100, 183 }, - { 10, 70, 135 }, { 2, 42, 81 }, { 1, 17, 33 } - }, { // Band 3 - { 108, 167, 237 }, { 55, 133, 222 }, { 15, 97, 179 }, - { 4, 72, 135 }, { 1, 45, 85 }, { 1, 19, 38 } - }, { // Band 4 - { 124, 146, 240 }, { 66, 124, 224 }, { 17, 88, 175 }, - { 4, 58, 122 }, { 1, 36, 75 }, { 1, 18, 37 } - }, { // Band 5 - { 141, 79, 241 }, { 126, 70, 227 }, { 66, 58, 182 }, - { 30, 44, 136 }, { 12, 34, 96 }, { 2, 20, 47 } - } - }, { // Inter - { // Band 0 - { 229, 99, 249 }, { 143, 111, 235 }, { 46, 109, 192 } - }, { // Band 1 - { 82, 158, 236 }, { 94, 146, 224 }, { 25, 117, 191 }, - { 9, 87, 149 }, { 3, 56, 99 }, { 1, 33, 57 } - }, { // Band 2 - { 83, 167, 237 }, { 68, 145, 222 }, { 10, 103, 177 }, - { 2, 72, 131 }, { 1, 41, 79 }, { 1, 20, 39 } - }, { // Band 3 - { 99, 167, 239 }, { 47, 141, 224 }, { 10, 104, 178 }, - { 2, 73, 133 }, { 1, 44, 85 }, { 1, 22, 47 } - }, { // Band 4 - { 127, 145, 243 }, { 71, 129, 228 }, { 17, 93, 177 }, - { 3, 61, 124 }, { 1, 41, 84 }, { 1, 21, 52 } - }, { // Band 5 - { 157, 78, 244 }, { 140, 72, 231 }, { 69, 58, 184 }, - { 31, 44, 137 }, { 14, 38, 105 }, { 8, 23, 61 } - } - } - } + { // Y plane + { // Intra + { // Band 0 + { 195, 29, 183 }, + { 84, 49, 136 }, + { 8, 42, 71 } }, + { // Band 1 + { 31, 107, 169 }, + { 35, 99, 159 }, + { 17, 82, 140 }, + { 8, 66, 114 }, + { 2, 44, 76 }, + { 1, 19, 32 } }, + { // Band 2 + { 40, 132, 201 }, + { 29, 114, 187 }, + { 13, 91, 157 }, + { 7, 75, 127 }, + { 3, 58, 95 }, + { 1, 28, 47 } }, + { // Band 3 + { 69, 142, 221 }, + { 42, 122, 201 }, + { 15, 91, 159 }, + { 6, 67, 121 }, + { 1, 42, 77 }, + { 1, 17, 31 } }, + { // Band 4 + { 102, 148, 228 }, + { 67, 117, 204 }, + { 17, 82, 154 }, + { 6, 59, 114 }, + { 2, 39, 75 }, + { 1, 15, 29 } }, + { // Band 5 + { 156, 57, 233 }, + { 119, 57, 212 }, + { 58, 48, 163 }, + { 29, 40, 124 }, + { 12, 30, 81 }, + { 3, 12, 31 } } }, + { // Inter + { // Band 0 + { 191, 107, 226 }, + { 124, 117, 204 }, + { 25, 99, 155 } }, + { // Band 1 + { 29, 148, 210 }, + { 37, 126, 194 }, + { 8, 93, 157 }, + { 2, 68, 118 }, + { 1, 39, 69 }, + { 1, 17, 33 } }, + { // Band 2 + { 41, 151, 213 }, + { 27, 123, 193 }, + { 3, 82, 144 }, + { 1, 58, 105 }, + { 1, 32, 60 }, + { 1, 13, 26 } }, + { // Band 3 + { 59, 159, 220 }, + { 23, 126, 198 }, + { 4, 88, 151 }, + { 1, 66, 114 }, + { 1, 38, 71 }, + { 1, 18, 34 } }, + { // Band 4 + { 114, 136, 232 }, + { 51, 114, 207 }, + { 11, 83, 155 }, + { 3, 56, 105 }, + { 1, 33, 65 }, + { 1, 17, 34 } }, + { // Band 5 + { 149, 65, 234 }, + { 121, 57, 215 }, + { 61, 49, 166 }, + { 28, 36, 114 }, + { 12, 25, 76 }, + { 3, 16, 42 } } } }, + { // UV plane + { // Intra + { // Band 0 + { 214, 49, 220 }, + { 132, 63, 188 }, + { 42, 65, 137 } }, + { // Band 1 + { 85, 137, 221 }, + { 104, 131, 216 }, + { 49, 111, 192 }, + { 21, 87, 155 }, + { 2, 49, 87 }, + { 1, 16, 28 } }, + { // Band 2 + { 89, 163, 230 }, + { 90, 137, 220 }, + { 29, 100, 183 }, + { 10, 70, 135 }, + { 2, 42, 81 }, + { 1, 17, 33 } }, + { // Band 3 + { 108, 167, 237 }, + { 55, 133, 222 }, + { 15, 97, 179 }, + { 4, 72, 135 }, + { 1, 45, 85 }, + { 1, 19, 38 } }, + { // Band 4 + { 124, 146, 240 }, + { 66, 124, 224 }, + { 17, 88, 175 }, + { 4, 58, 122 }, + { 1, 36, 75 }, + { 1, 18, 37 } }, + { // Band 5 + { 141, 79, 241 }, + { 126, 70, 227 }, + { 66, 58, 182 }, + { 30, 44, 136 }, + { 12, 34, 96 }, + { 2, 20, 47 } } }, + { // Inter + { // Band 0 + { 229, 99, 249 }, + { 143, 111, 235 }, + { 46, 109, 192 } }, + { // Band 1 + { 82, 158, 236 }, + { 94, 146, 224 }, + { 25, 117, 191 }, + { 9, 87, 149 }, + { 3, 56, 99 }, + { 1, 33, 57 } }, + { // Band 2 + { 83, 167, 237 }, + { 68, 145, 222 }, + { 10, 103, 177 }, + { 2, 72, 131 }, + { 1, 41, 79 }, + { 1, 20, 39 } }, + { // Band 3 + { 99, 167, 239 }, + { 47, 141, 224 }, + { 10, 104, 178 }, + { 2, 73, 133 }, + { 1, 44, 85 }, + { 1, 22, 47 } }, + { // Band 4 + { 127, 145, 243 }, + { 71, 129, 228 }, + { 17, 93, 177 }, + { 3, 61, 124 }, + { 1, 41, 84 }, + { 1, 21, 52 } }, + { // Band 5 + { 157, 78, 244 }, + { 140, 72, 231 }, + { 69, 58, 184 }, + { 31, 44, 137 }, + { 14, 38, 105 }, + { 8, 23, 61 } } } } }; static const vp9_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 125, 34, 187 }, { 52, 41, 133 }, { 6, 31, 56 } - }, { // Band 1 - { 37, 109, 153 }, { 51, 102, 147 }, { 23, 87, 128 }, - { 8, 67, 101 }, { 1, 41, 63 }, { 1, 19, 29 } - }, { // Band 2 - { 31, 154, 185 }, { 17, 127, 175 }, { 6, 96, 145 }, - { 2, 73, 114 }, { 1, 51, 82 }, { 1, 28, 45 } - }, { // Band 3 - { 23, 163, 200 }, { 10, 131, 185 }, { 2, 93, 148 }, - { 1, 67, 111 }, { 1, 41, 69 }, { 1, 14, 24 } - }, { // Band 4 - { 29, 176, 217 }, { 12, 145, 201 }, { 3, 101, 156 }, - { 1, 69, 111 }, { 1, 39, 63 }, { 1, 14, 23 } - }, { // Band 5 - { 57, 192, 233 }, { 25, 154, 215 }, { 6, 109, 167 }, - { 3, 78, 118 }, { 1, 48, 69 }, { 1, 21, 29 } - } - }, { // Inter - { // Band 0 - { 202, 105, 245 }, { 108, 106, 216 }, { 18, 90, 144 } - }, { // Band 1 - { 33, 172, 219 }, { 64, 149, 206 }, { 14, 117, 177 }, - { 5, 90, 141 }, { 2, 61, 95 }, { 1, 37, 57 } - }, { // Band 2 - { 33, 179, 220 }, { 11, 140, 198 }, { 1, 89, 148 }, - { 1, 60, 104 }, { 1, 33, 57 }, { 1, 12, 21 } - }, { // Band 3 - { 30, 181, 221 }, { 8, 141, 198 }, { 1, 87, 145 }, - { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 20 } - }, { // Band 4 - { 32, 186, 224 }, { 7, 142, 198 }, { 1, 86, 143 }, - { 1, 58, 100 }, { 1, 31, 55 }, { 1, 12, 22 } - }, { // Band 5 - { 57, 192, 227 }, { 20, 143, 204 }, { 3, 96, 154 }, - { 1, 68, 112 }, { 1, 42, 69 }, { 1, 19, 32 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 212, 35, 215 }, { 113, 47, 169 }, { 29, 48, 105 } - }, { // Band 1 - { 74, 129, 203 }, { 106, 120, 203 }, { 49, 107, 178 }, - { 19, 84, 144 }, { 4, 50, 84 }, { 1, 15, 25 } - }, { // Band 2 - { 71, 172, 217 }, { 44, 141, 209 }, { 15, 102, 173 }, - { 6, 76, 133 }, { 2, 51, 89 }, { 1, 24, 42 } - }, { // Band 3 - { 64, 185, 231 }, { 31, 148, 216 }, { 8, 103, 175 }, - { 3, 74, 131 }, { 1, 46, 81 }, { 1, 18, 30 } - }, { // Band 4 - { 65, 196, 235 }, { 25, 157, 221 }, { 5, 105, 174 }, - { 1, 67, 120 }, { 1, 38, 69 }, { 1, 15, 30 } - }, { // Band 5 - { 65, 204, 238 }, { 30, 156, 224 }, { 7, 107, 177 }, - { 2, 70, 124 }, { 1, 42, 73 }, { 1, 18, 34 } - } - }, { // Inter - { // Band 0 - { 225, 86, 251 }, { 144, 104, 235 }, { 42, 99, 181 } - }, { // Band 1 - { 85, 175, 239 }, { 112, 165, 229 }, { 29, 136, 200 }, - { 12, 103, 162 }, { 6, 77, 123 }, { 2, 53, 84 } - }, { // Band 2 - { 75, 183, 239 }, { 30, 155, 221 }, { 3, 106, 171 }, - { 1, 74, 128 }, { 1, 44, 76 }, { 1, 17, 28 } - }, { // Band 3 - { 73, 185, 240 }, { 27, 159, 222 }, { 2, 107, 172 }, - { 1, 75, 127 }, { 1, 42, 73 }, { 1, 17, 29 } - }, { // Band 4 - { 62, 190, 238 }, { 21, 159, 222 }, { 2, 107, 172 }, - { 1, 72, 122 }, { 1, 40, 71 }, { 1, 18, 32 } - }, { // Band 5 - { 61, 199, 240 }, { 27, 161, 226 }, { 4, 113, 180 }, - { 1, 76, 129 }, { 1, 46, 80 }, { 1, 23, 41 } - } - } - } + { // Y plane + { // Intra + { // Band 0 + { 125, 34, 187 }, + { 52, 41, 133 }, + { 6, 31, 56 } }, + { // Band 1 + { 37, 109, 153 }, + { 51, 102, 147 }, + { 23, 87, 128 }, + { 8, 67, 101 }, + { 1, 41, 63 }, + { 1, 19, 29 } }, + { // Band 2 + { 31, 154, 185 }, + { 17, 127, 175 }, + { 6, 96, 145 }, + { 2, 73, 114 }, + { 1, 51, 82 }, + { 1, 28, 45 } }, + { // Band 3 + { 23, 163, 200 }, + { 10, 131, 185 }, + { 2, 93, 148 }, + { 1, 67, 111 }, + { 1, 41, 69 }, + { 1, 14, 24 } }, + { // Band 4 + { 29, 176, 217 }, + { 12, 145, 201 }, + { 3, 101, 156 }, + { 1, 69, 111 }, + { 1, 39, 63 }, + { 1, 14, 23 } }, + { // Band 5 + { 57, 192, 233 }, + { 25, 154, 215 }, + { 6, 109, 167 }, + { 3, 78, 118 }, + { 1, 48, 69 }, + { 1, 21, 29 } } }, + { // Inter + { // Band 0 + { 202, 105, 245 }, + { 108, 106, 216 }, + { 18, 90, 144 } }, + { // Band 1 + { 33, 172, 219 }, + { 64, 149, 206 }, + { 14, 117, 177 }, + { 5, 90, 141 }, + { 2, 61, 95 }, + { 1, 37, 57 } }, + { // Band 2 + { 33, 179, 220 }, + { 11, 140, 198 }, + { 1, 89, 148 }, + { 1, 60, 104 }, + { 1, 33, 57 }, + { 1, 12, 21 } }, + { // Band 3 + { 30, 181, 221 }, + { 8, 141, 198 }, + { 1, 87, 145 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 20 } }, + { // Band 4 + { 32, 186, 224 }, + { 7, 142, 198 }, + { 1, 86, 143 }, + { 1, 58, 100 }, + { 1, 31, 55 }, + { 1, 12, 22 } }, + { // Band 5 + { 57, 192, 227 }, + { 20, 143, 204 }, + { 3, 96, 154 }, + { 1, 68, 112 }, + { 1, 42, 69 }, + { 1, 19, 32 } } } }, + { // UV plane + { // Intra + { // Band 0 + { 212, 35, 215 }, + { 113, 47, 169 }, + { 29, 48, 105 } }, + { // Band 1 + { 74, 129, 203 }, + { 106, 120, 203 }, + { 49, 107, 178 }, + { 19, 84, 144 }, + { 4, 50, 84 }, + { 1, 15, 25 } }, + { // Band 2 + { 71, 172, 217 }, + { 44, 141, 209 }, + { 15, 102, 173 }, + { 6, 76, 133 }, + { 2, 51, 89 }, + { 1, 24, 42 } }, + { // Band 3 + { 64, 185, 231 }, + { 31, 148, 216 }, + { 8, 103, 175 }, + { 3, 74, 131 }, + { 1, 46, 81 }, + { 1, 18, 30 } }, + { // Band 4 + { 65, 196, 235 }, + { 25, 157, 221 }, + { 5, 105, 174 }, + { 1, 67, 120 }, + { 1, 38, 69 }, + { 1, 15, 30 } }, + { // Band 5 + { 65, 204, 238 }, + { 30, 156, 224 }, + { 7, 107, 177 }, + { 2, 70, 124 }, + { 1, 42, 73 }, + { 1, 18, 34 } } }, + { // Inter + { // Band 0 + { 225, 86, 251 }, + { 144, 104, 235 }, + { 42, 99, 181 } }, + { // Band 1 + { 85, 175, 239 }, + { 112, 165, 229 }, + { 29, 136, 200 }, + { 12, 103, 162 }, + { 6, 77, 123 }, + { 2, 53, 84 } }, + { // Band 2 + { 75, 183, 239 }, + { 30, 155, 221 }, + { 3, 106, 171 }, + { 1, 74, 128 }, + { 1, 44, 76 }, + { 1, 17, 28 } }, + { // Band 3 + { 73, 185, 240 }, + { 27, 159, 222 }, + { 2, 107, 172 }, + { 1, 75, 127 }, + { 1, 42, 73 }, + { 1, 17, 29 } }, + { // Band 4 + { 62, 190, 238 }, + { 21, 159, 222 }, + { 2, 107, 172 }, + { 1, 72, 122 }, + { 1, 40, 71 }, + { 1, 18, 32 } }, + { // Band 5 + { 61, 199, 240 }, + { 27, 161, 226 }, + { 4, 113, 180 }, + { 1, 76, 129 }, + { 1, 46, 80 }, + { 1, 23, 41 } } } } }; static const vp9_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 7, 27, 153 }, { 5, 30, 95 }, { 1, 16, 30 } - }, { // Band 1 - { 50, 75, 127 }, { 57, 75, 124 }, { 27, 67, 108 }, - { 10, 54, 86 }, { 1, 33, 52 }, { 1, 12, 18 } - }, { // Band 2 - { 43, 125, 151 }, { 26, 108, 148 }, { 7, 83, 122 }, - { 2, 59, 89 }, { 1, 38, 60 }, { 1, 17, 27 } - }, { // Band 3 - { 23, 144, 163 }, { 13, 112, 154 }, { 2, 75, 117 }, - { 1, 50, 81 }, { 1, 31, 51 }, { 1, 14, 23 } - }, { // Band 4 - { 18, 162, 185 }, { 6, 123, 171 }, { 1, 78, 125 }, - { 1, 51, 86 }, { 1, 31, 54 }, { 1, 14, 23 } - }, { // Band 5 - { 15, 199, 227 }, { 3, 150, 204 }, { 1, 91, 146 }, - { 1, 55, 95 }, { 1, 30, 53 }, { 1, 11, 20 } - } - }, { // Inter - { // Band 0 - { 19, 55, 240 }, { 19, 59, 196 }, { 3, 52, 105 } - }, { // Band 1 - { 41, 166, 207 }, { 104, 153, 199 }, { 31, 123, 181 }, - { 14, 101, 152 }, { 5, 72, 106 }, { 1, 36, 52 } - }, { // Band 2 - { 35, 176, 211 }, { 12, 131, 190 }, { 2, 88, 144 }, - { 1, 60, 101 }, { 1, 36, 60 }, { 1, 16, 28 } - }, { // Band 3 - { 28, 183, 213 }, { 8, 134, 191 }, { 1, 86, 142 }, - { 1, 56, 96 }, { 1, 30, 53 }, { 1, 12, 20 } - }, { // Band 4 - { 20, 190, 215 }, { 4, 135, 192 }, { 1, 84, 139 }, - { 1, 53, 91 }, { 1, 28, 49 }, { 1, 11, 20 } - }, { // Band 5 - { 13, 196, 216 }, { 2, 137, 192 }, { 1, 86, 143 }, - { 1, 57, 99 }, { 1, 32, 56 }, { 1, 13, 24 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 211, 29, 217 }, { 96, 47, 156 }, { 22, 43, 87 } - }, { // Band 1 - { 78, 120, 193 }, { 111, 116, 186 }, { 46, 102, 164 }, - { 15, 80, 128 }, { 2, 49, 76 }, { 1, 18, 28 } - }, { // Band 2 - { 71, 161, 203 }, { 42, 132, 192 }, { 10, 98, 150 }, - { 3, 69, 109 }, { 1, 44, 70 }, { 1, 18, 29 } - }, { // Band 3 - { 57, 186, 211 }, { 30, 140, 196 }, { 4, 93, 146 }, - { 1, 62, 102 }, { 1, 38, 65 }, { 1, 16, 27 } - }, { // Band 4 - { 47, 199, 217 }, { 14, 145, 196 }, { 1, 88, 142 }, - { 1, 57, 98 }, { 1, 36, 62 }, { 1, 15, 26 } - }, { // Band 5 - { 26, 219, 229 }, { 5, 155, 207 }, { 1, 94, 151 }, - { 1, 60, 104 }, { 1, 36, 62 }, { 1, 16, 28 } - } - }, { // Inter - { // Band 0 - { 233, 29, 248 }, { 146, 47, 220 }, { 43, 52, 140 } - }, { // Band 1 - { 100, 163, 232 }, { 179, 161, 222 }, { 63, 142, 204 }, - { 37, 113, 174 }, { 26, 89, 137 }, { 18, 68, 97 } - }, { // Band 2 - { 85, 181, 230 }, { 32, 146, 209 }, { 7, 100, 164 }, - { 3, 71, 121 }, { 1, 45, 77 }, { 1, 18, 30 } - }, { // Band 3 - { 65, 187, 230 }, { 20, 148, 207 }, { 2, 97, 159 }, - { 1, 68, 116 }, { 1, 40, 70 }, { 1, 14, 29 } - }, { // Band 4 - { 40, 194, 227 }, { 8, 147, 204 }, { 1, 94, 155 }, - { 1, 65, 112 }, { 1, 39, 66 }, { 1, 14, 26 } - }, { // Band 5 - { 16, 208, 228 }, { 3, 151, 207 }, { 1, 98, 160 }, - { 1, 67, 117 }, { 1, 41, 74 }, { 1, 17, 31 } - } - } - } + { // Y plane + { // Intra + { // Band 0 + { 7, 27, 153 }, + { 5, 30, 95 }, + { 1, 16, 30 } }, + { // Band 1 + { 50, 75, 127 }, + { 57, 75, 124 }, + { 27, 67, 108 }, + { 10, 54, 86 }, + { 1, 33, 52 }, + { 1, 12, 18 } }, + { // Band 2 + { 43, 125, 151 }, + { 26, 108, 148 }, + { 7, 83, 122 }, + { 2, 59, 89 }, + { 1, 38, 60 }, + { 1, 17, 27 } }, + { // Band 3 + { 23, 144, 163 }, + { 13, 112, 154 }, + { 2, 75, 117 }, + { 1, 50, 81 }, + { 1, 31, 51 }, + { 1, 14, 23 } }, + { // Band 4 + { 18, 162, 185 }, + { 6, 123, 171 }, + { 1, 78, 125 }, + { 1, 51, 86 }, + { 1, 31, 54 }, + { 1, 14, 23 } }, + { // Band 5 + { 15, 199, 227 }, + { 3, 150, 204 }, + { 1, 91, 146 }, + { 1, 55, 95 }, + { 1, 30, 53 }, + { 1, 11, 20 } } }, + { // Inter + { // Band 0 + { 19, 55, 240 }, + { 19, 59, 196 }, + { 3, 52, 105 } }, + { // Band 1 + { 41, 166, 207 }, + { 104, 153, 199 }, + { 31, 123, 181 }, + { 14, 101, 152 }, + { 5, 72, 106 }, + { 1, 36, 52 } }, + { // Band 2 + { 35, 176, 211 }, + { 12, 131, 190 }, + { 2, 88, 144 }, + { 1, 60, 101 }, + { 1, 36, 60 }, + { 1, 16, 28 } }, + { // Band 3 + { 28, 183, 213 }, + { 8, 134, 191 }, + { 1, 86, 142 }, + { 1, 56, 96 }, + { 1, 30, 53 }, + { 1, 12, 20 } }, + { // Band 4 + { 20, 190, 215 }, + { 4, 135, 192 }, + { 1, 84, 139 }, + { 1, 53, 91 }, + { 1, 28, 49 }, + { 1, 11, 20 } }, + { // Band 5 + { 13, 196, 216 }, + { 2, 137, 192 }, + { 1, 86, 143 }, + { 1, 57, 99 }, + { 1, 32, 56 }, + { 1, 13, 24 } } } }, + { // UV plane + { // Intra + { // Band 0 + { 211, 29, 217 }, + { 96, 47, 156 }, + { 22, 43, 87 } }, + { // Band 1 + { 78, 120, 193 }, + { 111, 116, 186 }, + { 46, 102, 164 }, + { 15, 80, 128 }, + { 2, 49, 76 }, + { 1, 18, 28 } }, + { // Band 2 + { 71, 161, 203 }, + { 42, 132, 192 }, + { 10, 98, 150 }, + { 3, 69, 109 }, + { 1, 44, 70 }, + { 1, 18, 29 } }, + { // Band 3 + { 57, 186, 211 }, + { 30, 140, 196 }, + { 4, 93, 146 }, + { 1, 62, 102 }, + { 1, 38, 65 }, + { 1, 16, 27 } }, + { // Band 4 + { 47, 199, 217 }, + { 14, 145, 196 }, + { 1, 88, 142 }, + { 1, 57, 98 }, + { 1, 36, 62 }, + { 1, 15, 26 } }, + { // Band 5 + { 26, 219, 229 }, + { 5, 155, 207 }, + { 1, 94, 151 }, + { 1, 60, 104 }, + { 1, 36, 62 }, + { 1, 16, 28 } } }, + { // Inter + { // Band 0 + { 233, 29, 248 }, + { 146, 47, 220 }, + { 43, 52, 140 } }, + { // Band 1 + { 100, 163, 232 }, + { 179, 161, 222 }, + { 63, 142, 204 }, + { 37, 113, 174 }, + { 26, 89, 137 }, + { 18, 68, 97 } }, + { // Band 2 + { 85, 181, 230 }, + { 32, 146, 209 }, + { 7, 100, 164 }, + { 3, 71, 121 }, + { 1, 45, 77 }, + { 1, 18, 30 } }, + { // Band 3 + { 65, 187, 230 }, + { 20, 148, 207 }, + { 2, 97, 159 }, + { 1, 68, 116 }, + { 1, 40, 70 }, + { 1, 14, 29 } }, + { // Band 4 + { 40, 194, 227 }, + { 8, 147, 204 }, + { 1, 94, 155 }, + { 1, 65, 112 }, + { 1, 39, 66 }, + { 1, 14, 26 } }, + { // Band 5 + { 16, 208, 228 }, + { 3, 151, 207 }, + { 1, 98, 160 }, + { 1, 67, 117 }, + { 1, 41, 74 }, + { 1, 17, 31 } } } } }; static const vp9_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = { - { // Y plane - { // Intra - { // Band 0 - { 17, 38, 140 }, { 7, 34, 80 }, { 1, 17, 29 } - }, { // Band 1 - { 37, 75, 128 }, { 41, 76, 128 }, { 26, 66, 116 }, - { 12, 52, 94 }, { 2, 32, 55 }, { 1, 10, 16 } - }, { // Band 2 - { 50, 127, 154 }, { 37, 109, 152 }, { 16, 82, 121 }, - { 5, 59, 85 }, { 1, 35, 54 }, { 1, 13, 20 } - }, { // Band 3 - { 40, 142, 167 }, { 17, 110, 157 }, { 2, 71, 112 }, - { 1, 44, 72 }, { 1, 27, 45 }, { 1, 11, 17 } - }, { // Band 4 - { 30, 175, 188 }, { 9, 124, 169 }, { 1, 74, 116 }, - { 1, 48, 78 }, { 1, 30, 49 }, { 1, 11, 18 } - }, { // Band 5 - { 10, 222, 223 }, { 2, 150, 194 }, { 1, 83, 128 }, - { 1, 48, 79 }, { 1, 27, 45 }, { 1, 11, 17 } - } - }, { // Inter - { // Band 0 - { 36, 41, 235 }, { 29, 36, 193 }, { 10, 27, 111 } - }, { // Band 1 - { 85, 165, 222 }, { 177, 162, 215 }, { 110, 135, 195 }, - { 57, 113, 168 }, { 23, 83, 120 }, { 10, 49, 61 } - }, { // Band 2 - { 85, 190, 223 }, { 36, 139, 200 }, { 5, 90, 146 }, - { 1, 60, 103 }, { 1, 38, 65 }, { 1, 18, 30 } - }, { // Band 3 - { 72, 202, 223 }, { 23, 141, 199 }, { 2, 86, 140 }, - { 1, 56, 97 }, { 1, 36, 61 }, { 1, 16, 27 } - }, { // Band 4 - { 55, 218, 225 }, { 13, 145, 200 }, { 1, 86, 141 }, - { 1, 57, 99 }, { 1, 35, 61 }, { 1, 13, 22 } - }, { // Band 5 - { 15, 235, 212 }, { 1, 132, 184 }, { 1, 84, 139 }, - { 1, 57, 97 }, { 1, 34, 56 }, { 1, 14, 23 } - } - } - }, { // UV plane - { // Intra - { // Band 0 - { 181, 21, 201 }, { 61, 37, 123 }, { 10, 38, 71 } - }, { // Band 1 - { 47, 106, 172 }, { 95, 104, 173 }, { 42, 93, 159 }, - { 18, 77, 131 }, { 4, 50, 81 }, { 1, 17, 23 } - }, { // Band 2 - { 62, 147, 199 }, { 44, 130, 189 }, { 28, 102, 154 }, - { 18, 75, 115 }, { 2, 44, 65 }, { 1, 12, 19 } - }, { // Band 3 - { 55, 153, 210 }, { 24, 130, 194 }, { 3, 93, 146 }, - { 1, 61, 97 }, { 1, 31, 50 }, { 1, 10, 16 } - }, { // Band 4 - { 49, 186, 223 }, { 17, 148, 204 }, { 1, 96, 142 }, - { 1, 53, 83 }, { 1, 26, 44 }, { 1, 11, 17 } - }, { // Band 5 - { 13, 217, 212 }, { 2, 136, 180 }, { 1, 78, 124 }, - { 1, 50, 83 }, { 1, 29, 49 }, { 1, 14, 23 } - } - }, { // Inter - { // Band 0 - { 197, 13, 247 }, { 82, 17, 222 }, { 25, 17, 162 } - }, { // Band 1 - { 126, 186, 247 }, { 234, 191, 243 }, { 176, 177, 234 }, - { 104, 158, 220 }, { 66, 128, 186 }, { 55, 90, 137 } - }, { // Band 2 - { 111, 197, 242 }, { 46, 158, 219 }, { 9, 104, 171 }, - { 2, 65, 125 }, { 1, 44, 80 }, { 1, 17, 91 } - }, { // Band 3 - { 104, 208, 245 }, { 39, 168, 224 }, { 3, 109, 162 }, - { 1, 79, 124 }, { 1, 50, 102 }, { 1, 43, 102 } - }, { // Band 4 - { 84, 220, 246 }, { 31, 177, 231 }, { 2, 115, 180 }, - { 1, 79, 134 }, { 1, 55, 77 }, { 1, 60, 79 } - }, { // Band 5 - { 43, 243, 240 }, { 8, 180, 217 }, { 1, 115, 166 }, - { 1, 84, 121 }, { 1, 51, 67 }, { 1, 16, 6 } - } - } - } + { // Y plane + { // Intra + { // Band 0 + { 17, 38, 140 }, + { 7, 34, 80 }, + { 1, 17, 29 } }, + { // Band 1 + { 37, 75, 128 }, + { 41, 76, 128 }, + { 26, 66, 116 }, + { 12, 52, 94 }, + { 2, 32, 55 }, + { 1, 10, 16 } }, + { // Band 2 + { 50, 127, 154 }, + { 37, 109, 152 }, + { 16, 82, 121 }, + { 5, 59, 85 }, + { 1, 35, 54 }, + { 1, 13, 20 } }, + { // Band 3 + { 40, 142, 167 }, + { 17, 110, 157 }, + { 2, 71, 112 }, + { 1, 44, 72 }, + { 1, 27, 45 }, + { 1, 11, 17 } }, + { // Band 4 + { 30, 175, 188 }, + { 9, 124, 169 }, + { 1, 74, 116 }, + { 1, 48, 78 }, + { 1, 30, 49 }, + { 1, 11, 18 } }, + { // Band 5 + { 10, 222, 223 }, + { 2, 150, 194 }, + { 1, 83, 128 }, + { 1, 48, 79 }, + { 1, 27, 45 }, + { 1, 11, 17 } } }, + { // Inter + { // Band 0 + { 36, 41, 235 }, + { 29, 36, 193 }, + { 10, 27, 111 } }, + { // Band 1 + { 85, 165, 222 }, + { 177, 162, 215 }, + { 110, 135, 195 }, + { 57, 113, 168 }, + { 23, 83, 120 }, + { 10, 49, 61 } }, + { // Band 2 + { 85, 190, 223 }, + { 36, 139, 200 }, + { 5, 90, 146 }, + { 1, 60, 103 }, + { 1, 38, 65 }, + { 1, 18, 30 } }, + { // Band 3 + { 72, 202, 223 }, + { 23, 141, 199 }, + { 2, 86, 140 }, + { 1, 56, 97 }, + { 1, 36, 61 }, + { 1, 16, 27 } }, + { // Band 4 + { 55, 218, 225 }, + { 13, 145, 200 }, + { 1, 86, 141 }, + { 1, 57, 99 }, + { 1, 35, 61 }, + { 1, 13, 22 } }, + { // Band 5 + { 15, 235, 212 }, + { 1, 132, 184 }, + { 1, 84, 139 }, + { 1, 57, 97 }, + { 1, 34, 56 }, + { 1, 14, 23 } } } }, + { // UV plane + { // Intra + { // Band 0 + { 181, 21, 201 }, + { 61, 37, 123 }, + { 10, 38, 71 } }, + { // Band 1 + { 47, 106, 172 }, + { 95, 104, 173 }, + { 42, 93, 159 }, + { 18, 77, 131 }, + { 4, 50, 81 }, + { 1, 17, 23 } }, + { // Band 2 + { 62, 147, 199 }, + { 44, 130, 189 }, + { 28, 102, 154 }, + { 18, 75, 115 }, + { 2, 44, 65 }, + { 1, 12, 19 } }, + { // Band 3 + { 55, 153, 210 }, + { 24, 130, 194 }, + { 3, 93, 146 }, + { 1, 61, 97 }, + { 1, 31, 50 }, + { 1, 10, 16 } }, + { // Band 4 + { 49, 186, 223 }, + { 17, 148, 204 }, + { 1, 96, 142 }, + { 1, 53, 83 }, + { 1, 26, 44 }, + { 1, 11, 17 } }, + { // Band 5 + { 13, 217, 212 }, + { 2, 136, 180 }, + { 1, 78, 124 }, + { 1, 50, 83 }, + { 1, 29, 49 }, + { 1, 14, 23 } } }, + { // Inter + { // Band 0 + { 197, 13, 247 }, + { 82, 17, 222 }, + { 25, 17, 162 } }, + { // Band 1 + { 126, 186, 247 }, + { 234, 191, 243 }, + { 176, 177, 234 }, + { 104, 158, 220 }, + { 66, 128, 186 }, + { 55, 90, 137 } }, + { // Band 2 + { 111, 197, 242 }, + { 46, 158, 219 }, + { 9, 104, 171 }, + { 2, 65, 125 }, + { 1, 44, 80 }, + { 1, 17, 91 } }, + { // Band 3 + { 104, 208, 245 }, + { 39, 168, 224 }, + { 3, 109, 162 }, + { 1, 79, 124 }, + { 1, 50, 102 }, + { 1, 43, 102 } }, + { // Band 4 + { 84, 220, 246 }, + { 31, 177, 231 }, + { 2, 115, 180 }, + { 1, 79, 134 }, + { 1, 55, 77 }, + { 1, 60, 79 } }, + { // Band 5 + { 43, 243, 240 }, + { 8, 180, 217 }, + { 1, 115, 166 }, + { 1, 84, 121 }, + { 1, 51, 67 }, + { 1, 16, 6 } } } } }; static void extend_to_full_distribution(vpx_prob *probs, vpx_prob p) { - // TODO(aconverse): model[PIVOT_NODE] should never be zero. - // https://code.google.com/p/webm/issues/detail?id=1089 - memcpy(probs, vp9_pareto8_full[p == 0 ? 254 : p - 1], - MODEL_NODES * sizeof(vpx_prob)); + assert(p != 0); + memcpy(probs, vp9_pareto8_full[p - 1], MODEL_NODES * sizeof(vpx_prob)); } void vp9_model_to_full_probs(const vpx_prob *model, vpx_prob *full) { @@ -761,7 +1057,7 @@ static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE tx_size, vp9_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size]; const vp9_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size]; vp9_coeff_count_model *counts = cm->counts.coef[tx_size]; - unsigned int (*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = + unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = cm->counts.eob_branch[tx_size]; int i, j, k, l, m; @@ -774,14 +1070,12 @@ static void adapt_coef_probs(VP9_COMMON *cm, TX_SIZE tx_size, const int n2 = counts[i][j][k][l][TWO_TOKEN]; const int neob = counts[i][j][k][l][EOB_MODEL_TOKEN]; const unsigned int branch_ct[UNCONSTRAINED_NODES][2] = { - { neob, eob_counts[i][j][k][l] - neob }, - { n0, n1 + n2 }, - { n1, n2 } + { neob, eob_counts[i][j][k][l] - neob }, { n0, n1 + n2 }, { n1, n2 } }; for (m = 0; m < UNCONSTRAINED_NODES; ++m) - probs[i][j][k][l][m] = merge_probs(pre_probs[i][j][k][l][m], - branch_ct[m], - count_sat, update_factor); + probs[i][j][k][l][m] = + merge_probs(pre_probs[i][j][k][l][m], branch_ct[m], count_sat, + update_factor); } } @@ -793,7 +1087,7 @@ void vp9_adapt_coef_probs(VP9_COMMON *cm) { update_factor = COEF_MAX_UPDATE_FACTOR_KEY; count_sat = COEF_COUNT_SAT_KEY; } else if (cm->last_frame_type == KEY_FRAME) { - update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */ + update_factor = COEF_MAX_UPDATE_FACTOR_AFTER_KEY; /* adapt quickly */ count_sat = COEF_COUNT_SAT_AFTER_KEY; } else { update_factor = COEF_MAX_UPDATE_FACTOR; diff --git a/libs/libvpx/vp9/common/vp9_entropy.h b/libs/libvpx/vp9/common/vp9_entropy.h index 63b3bff5d9..1da4911668 100644 --- a/libs/libvpx/vp9/common/vp9_entropy.h +++ b/libs/libvpx/vp9/common/vp9_entropy.h @@ -24,18 +24,18 @@ extern "C" { #define DIFF_UPDATE_PROB 252 // Coefficient token alphabet -#define ZERO_TOKEN 0 // 0 Extra Bits 0+0 -#define ONE_TOKEN 1 // 1 Extra Bits 0+1 -#define TWO_TOKEN 2 // 2 Extra Bits 0+1 -#define THREE_TOKEN 3 // 3 Extra Bits 0+1 -#define FOUR_TOKEN 4 // 4 Extra Bits 0+1 +#define ZERO_TOKEN 0 // 0 Extra Bits 0+0 +#define ONE_TOKEN 1 // 1 Extra Bits 0+1 +#define TWO_TOKEN 2 // 2 Extra Bits 0+1 +#define THREE_TOKEN 3 // 3 Extra Bits 0+1 +#define FOUR_TOKEN 4 // 4 Extra Bits 0+1 #define CATEGORY1_TOKEN 5 // 5-6 Extra Bits 1+1 #define CATEGORY2_TOKEN 6 // 7-10 Extra Bits 2+1 #define CATEGORY3_TOKEN 7 // 11-18 Extra Bits 3+1 #define CATEGORY4_TOKEN 8 // 19-34 Extra Bits 4+1 #define CATEGORY5_TOKEN 9 // 35-66 Extra Bits 5+1 #define CATEGORY6_TOKEN 10 // 67+ Extra Bits 14+1 -#define EOB_TOKEN 11 // EOB Extra Bits 0+0 +#define EOB_TOKEN 11 // EOB Extra Bits 0+0 #define ENTROPY_TOKENS 12 @@ -43,12 +43,12 @@ extern "C" { DECLARE_ALIGNED(16, extern const uint8_t, vp9_pt_energy_class[ENTROPY_TOKENS]); -#define CAT1_MIN_VAL 5 -#define CAT2_MIN_VAL 7 -#define CAT3_MIN_VAL 11 -#define CAT4_MIN_VAL 19 -#define CAT5_MIN_VAL 35 -#define CAT6_MIN_VAL 67 +#define CAT1_MIN_VAL 5 +#define CAT2_MIN_VAL 7 +#define CAT3_MIN_VAL 11 +#define CAT4_MIN_VAL 19 +#define CAT5_MIN_VAL 35 +#define CAT6_MIN_VAL 67 // Extra bit probabilities. DECLARE_ALIGNED(16, extern const uint8_t, vp9_cat1_prob[1]); @@ -75,10 +75,10 @@ DECLARE_ALIGNED(16, extern const uint8_t, vp9_cat6_prob_high12[18]); #define EOB_MODEL_TOKEN 3 -#define DCT_MAX_VALUE 16384 +#define DCT_MAX_VALUE 16384 #if CONFIG_VP9_HIGHBITDEPTH -#define DCT_MAX_VALUE_HIGH10 65536 -#define DCT_MAX_VALUE_HIGH12 262144 +#define DCT_MAX_VALUE_HIGH10 65536 +#define DCT_MAX_VALUE_HIGH12 262144 #endif // CONFIG_VP9_HIGHBITDEPTH /* Coefficients are predicted via a 3-dimensional probability table. */ @@ -114,8 +114,8 @@ typedef unsigned int vp9_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] typedef unsigned int vp9_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] [ENTROPY_NODES][2]; -#define SUBEXP_PARAM 4 /* Subexponential code parameter */ -#define MODULUS_PARAM 13 /* Modulus parameter */ +#define SUBEXP_PARAM 4 /* Subexponential code parameter */ +#define MODULUS_PARAM 13 /* Modulus parameter */ struct VP9Common; void vp9_default_coef_probs(struct VP9Common *cm); @@ -140,16 +140,16 @@ static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) { #define COEFF_PROB_MODELS 255 -#define UNCONSTRAINED_NODES 3 +#define UNCONSTRAINED_NODES 3 -#define PIVOT_NODE 2 // which node is pivot +#define PIVOT_NODE 2 // which node is pivot #define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES) extern const vpx_tree_index vp9_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)]; extern const vpx_prob vp9_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES]; -typedef vpx_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS] - [COEFF_CONTEXTS][UNCONSTRAINED_NODES]; +typedef vpx_prob vp9_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] + [UNCONSTRAINED_NODES]; typedef unsigned int vp9_coeff_count_model[REF_TYPES][COEF_BANDS] [COEFF_CONTEXTS] @@ -175,19 +175,17 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, break; case TX_8X8: above_ec = !!*(const uint16_t *)a; - left_ec = !!*(const uint16_t *)l; + left_ec = !!*(const uint16_t *)l; break; case TX_16X16: above_ec = !!*(const uint32_t *)a; - left_ec = !!*(const uint32_t *)l; + left_ec = !!*(const uint32_t *)l; break; case TX_32X32: above_ec = !!*(const uint64_t *)a; - left_ec = !!*(const uint64_t *)l; - break; - default: - assert(0 && "Invalid transform size."); + left_ec = !!*(const uint64_t *)l; break; + default: assert(0 && "Invalid transform size."); break; } return combine_entropy_contexts(above_ec, left_ec); diff --git a/libs/libvpx/vp9/common/vp9_entropymode.c b/libs/libvpx/vp9/common/vp9_entropymode.c index 670348bafd..22365efc50 100644 --- a/libs/libvpx/vp9/common/vp9_entropymode.c +++ b/libs/libvpx/vp9/common/vp9_entropymode.c @@ -14,234 +14,250 @@ #include "vp9/common/vp9_seg_common.h" const vpx_prob vp9_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] = { - { // above = dc - { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc - { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v - { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, // left = h - { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, // left = d45 - { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, // left = d135 - { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, // left = d117 - { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, // left = d153 - { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, // left = d207 - { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, // left = d63 - { 59, 67, 44, 140, 161, 202, 78, 67, 119 } // left = tm - }, { // above = v - { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, // left = dc - { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, // left = v - { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, // left = h - { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, // left = d45 - { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, // left = d135 - { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, // left = d117 - { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, // left = d153 - { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, // left = d207 - { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, // left = d63 - { 36, 61, 116, 114, 128, 162, 80, 125, 82 } // left = tm - }, { // above = h - { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, // left = dc - { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, // left = v - { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, // left = h - { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, // left = d45 - { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, // left = d135 - { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, // left = d117 - { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, // left = d153 - { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, // left = d207 - { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, // left = d63 - { 38, 72, 19, 168, 203, 212, 50, 50, 107 } // left = tm - }, { // above = d45 - { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, // left = dc - { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, // left = v - { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, // left = h - { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, // left = d45 - { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, // left = d135 - { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, // left = d117 - { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, // left = d153 - { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, // left = d207 - { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, // left = d63 - { 47, 47, 43, 114, 137, 181, 100, 99, 95 } // left = tm - }, { // above = d135 - { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, // left = dc - { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, // left = v - { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, // left = h - { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, // left = d45 - { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, // left = d135 - { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, // left = d117 - { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, // left = d153 - { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, // left = d207 - { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, // left = d63 - { 47, 54, 34, 146, 108, 203, 72, 103, 151 } // left = tm - }, { // above = d117 - { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, // left = dc - { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, // left = v - { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, // left = h - { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, // left = d45 - { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, // left = d135 - { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, // left = d117 - { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, // left = d153 - { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, // left = d207 - { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, // left = d63 - { 38, 44, 51, 136, 74, 162, 57, 97, 121 } // left = tm - }, { // above = d153 - { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, // left = dc - { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, // left = v - { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, // left = h - { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, // left = d45 - { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, // left = d135 - { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, // left = d117 - { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, // left = d153 - { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, // left = d207 - { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, // left = d63 - { 37, 49, 25, 129, 168, 164, 41, 54, 148 } // left = tm - }, { // above = d207 - { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, // left = dc - { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, // left = v - { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, // left = h - { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, // left = d45 - { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, // left = d135 - { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, // left = d117 - { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, // left = d153 - { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, // left = d207 - { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, // left = d63 - { 40, 61, 26, 126, 152, 206, 61, 59, 93 } // left = tm - }, { // above = d63 - { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, // left = dc - { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, // left = v - { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, // left = h - { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, // left = d45 - { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, // left = d135 - { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, // left = d117 - { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, // left = d153 - { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, // left = d207 - { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, // left = d63 - { 36, 38, 48, 92, 122, 165, 88, 137, 91 } // left = tm - }, { // above = tm - { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, // left = dc - { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, // left = v - { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, // left = h - { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, // left = d45 - { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, // left = d135 - { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, // left = d117 - { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, // left = d153 - { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, // left = d207 - { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63 - { 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm + { + // above = dc + { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, // left = dc + { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, // left = v + { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, // left = h + { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, // left = d45 + { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, // left = d135 + { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, // left = d117 + { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, // left = d153 + { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, // left = d207 + { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, // left = d63 + { 59, 67, 44, 140, 161, 202, 78, 67, 119 } // left = tm + }, + { + // above = v + { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, // left = dc + { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, // left = v + { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, // left = h + { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, // left = d45 + { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, // left = d135 + { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, // left = d117 + { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, // left = d153 + { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, // left = d207 + { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, // left = d63 + { 36, 61, 116, 114, 128, 162, 80, 125, 82 } // left = tm + }, + { + // above = h + { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, // left = dc + { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, // left = v + { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, // left = h + { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, // left = d45 + { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, // left = d135 + { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, // left = d117 + { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, // left = d153 + { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, // left = d207 + { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, // left = d63 + { 38, 72, 19, 168, 203, 212, 50, 50, 107 } // left = tm + }, + { + // above = d45 + { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, // left = dc + { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, // left = v + { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, // left = h + { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, // left = d45 + { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, // left = d135 + { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, // left = d117 + { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, // left = d153 + { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, // left = d207 + { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, // left = d63 + { 47, 47, 43, 114, 137, 181, 100, 99, 95 } // left = tm + }, + { + // above = d135 + { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, // left = dc + { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, // left = v + { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, // left = h + { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, // left = d45 + { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, // left = d135 + { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, // left = d117 + { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, // left = d153 + { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, // left = d207 + { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, // left = d63 + { 47, 54, 34, 146, 108, 203, 72, 103, 151 } // left = tm + }, + { + // above = d117 + { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, // left = dc + { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, // left = v + { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, // left = h + { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, // left = d45 + { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, // left = d135 + { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, // left = d117 + { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, // left = d153 + { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, // left = d207 + { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, // left = d63 + { 38, 44, 51, 136, 74, 162, 57, 97, 121 } // left = tm + }, + { + // above = d153 + { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, // left = dc + { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, // left = v + { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, // left = h + { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, // left = d45 + { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, // left = d135 + { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, // left = d117 + { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, // left = d153 + { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, // left = d207 + { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, // left = d63 + { 37, 49, 25, 129, 168, 164, 41, 54, 148 } // left = tm + }, + { + // above = d207 + { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, // left = dc + { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, // left = v + { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, // left = h + { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, // left = d45 + { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, // left = d135 + { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, // left = d117 + { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, // left = d153 + { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, // left = d207 + { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, // left = d63 + { 40, 61, 26, 126, 152, 206, 61, 59, 93 } // left = tm + }, + { + // above = d63 + { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, // left = dc + { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, // left = v + { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, // left = h + { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, // left = d45 + { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, // left = d135 + { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, // left = d117 + { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, // left = d153 + { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, // left = d207 + { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, // left = d63 + { 36, 38, 48, 92, 122, 165, 88, 137, 91 } // left = tm + }, + { + // above = tm + { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, // left = dc + { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, // left = v + { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, // left = h + { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, // left = d45 + { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, // left = d135 + { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, // left = d117 + { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, // left = d153 + { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, // left = d207 + { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, // left = d63 + { 43, 81, 53, 140, 169, 204, 68, 84, 72 } // left = tm } }; const vpx_prob vp9_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = { - { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc - { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v - { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h - { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, // y = d45 - { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, // y = d135 - { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, // y = d117 - { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, // y = d153 - { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, // y = d207 - { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, // y = d63 - { 102, 19, 66, 162, 182, 122, 35, 59, 128 } // y = tm + { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc + { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v + { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h + { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, // y = d45 + { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, // y = d135 + { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, // y = d117 + { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, // y = d153 + { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, // y = d207 + { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, // y = d63 + { 102, 19, 66, 162, 182, 122, 35, 59, 128 } // y = tm }; static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = { - { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8 - { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16 - { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32 - { 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32 + { 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8 + { 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16 + { 173, 80, 19, 176, 240, 193, 64, 35, 46 }, // block_size < 32x32 + { 221, 135, 38, 194, 248, 121, 96, 85, 29 } // block_size >= 32x32 }; static const vpx_prob default_if_uv_probs[INTRA_MODES][INTRA_MODES - 1] = { - { 120, 7, 76, 176, 208, 126, 28, 54, 103 }, // y = dc - { 48, 12, 154, 155, 139, 90, 34, 117, 119 }, // y = v - { 67, 6, 25, 204, 243, 158, 13, 21, 96 }, // y = h - { 97, 5, 44, 131, 176, 139, 48, 68, 97 }, // y = d45 - { 83, 5, 42, 156, 111, 152, 26, 49, 152 }, // y = d135 - { 80, 5, 58, 178, 74, 83, 33, 62, 145 }, // y = d117 - { 86, 5, 32, 154, 192, 168, 14, 22, 163 }, // y = d153 - { 85, 5, 32, 156, 216, 148, 19, 29, 73 }, // y = d207 - { 77, 7, 64, 116, 132, 122, 37, 126, 120 }, // y = d63 - { 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm + { 120, 7, 76, 176, 208, 126, 28, 54, 103 }, // y = dc + { 48, 12, 154, 155, 139, 90, 34, 117, 119 }, // y = v + { 67, 6, 25, 204, 243, 158, 13, 21, 96 }, // y = h + { 97, 5, 44, 131, 176, 139, 48, 68, 97 }, // y = d45 + { 83, 5, 42, 156, 111, 152, 26, 49, 152 }, // y = d135 + { 80, 5, 58, 178, 74, 83, 33, 62, 145 }, // y = d117 + { 86, 5, 32, 154, 192, 168, 14, 22, 163 }, // y = d153 + { 85, 5, 32, 156, 216, 148, 19, 29, 73 }, // y = d207 + { 77, 7, 64, 116, 132, 122, 37, 126, 120 }, // y = d63 + { 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm }; const vpx_prob vp9_kf_partition_probs[PARTITION_CONTEXTS] [PARTITION_TYPES - 1] = { - // 8x8 -> 4x4 - { 158, 97, 94 }, // a/l both not split - { 93, 24, 99 }, // a split, l not split - { 85, 119, 44 }, // l split, a not split - { 62, 59, 67 }, // a/l both split - // 16x16 -> 8x8 - { 149, 53, 53 }, // a/l both not split - { 94, 20, 48 }, // a split, l not split - { 83, 53, 24 }, // l split, a not split - { 52, 18, 18 }, // a/l both split - // 32x32 -> 16x16 - { 150, 40, 39 }, // a/l both not split - { 78, 12, 26 }, // a split, l not split - { 67, 33, 11 }, // l split, a not split - { 24, 7, 5 }, // a/l both split - // 64x64 -> 32x32 - { 174, 35, 49 }, // a/l both not split - { 68, 11, 27 }, // a split, l not split - { 57, 15, 9 }, // l split, a not split - { 12, 3, 3 }, // a/l both split -}; + // 8x8 -> 4x4 + { 158, 97, 94 }, // a/l both not split + { 93, 24, 99 }, // a split, l not split + { 85, 119, 44 }, // l split, a not split + { 62, 59, 67 }, // a/l both split + // 16x16 -> 8x8 + { 149, 53, 53 }, // a/l both not split + { 94, 20, 48 }, // a split, l not split + { 83, 53, 24 }, // l split, a not split + { 52, 18, 18 }, // a/l both split + // 32x32 -> 16x16 + { 150, 40, 39 }, // a/l both not split + { 78, 12, 26 }, // a split, l not split + { 67, 33, 11 }, // l split, a not split + { 24, 7, 5 }, // a/l both split + // 64x64 -> 32x32 + { 174, 35, 49 }, // a/l both not split + { 68, 11, 27 }, // a split, l not split + { 57, 15, 9 }, // l split, a not split + { 12, 3, 3 }, // a/l both split + }; -static const vpx_prob default_partition_probs[PARTITION_CONTEXTS] - [PARTITION_TYPES - 1] = { - // 8x8 -> 4x4 - { 199, 122, 141 }, // a/l both not split - { 147, 63, 159 }, // a split, l not split - { 148, 133, 118 }, // l split, a not split - { 121, 104, 114 }, // a/l both split - // 16x16 -> 8x8 - { 174, 73, 87 }, // a/l both not split - { 92, 41, 83 }, // a split, l not split - { 82, 99, 50 }, // l split, a not split - { 53, 39, 39 }, // a/l both split - // 32x32 -> 16x16 - { 177, 58, 59 }, // a/l both not split - { 68, 26, 63 }, // a split, l not split - { 52, 79, 25 }, // l split, a not split - { 17, 14, 12 }, // a/l both split - // 64x64 -> 32x32 - { 222, 34, 30 }, // a/l both not split - { 72, 16, 44 }, // a split, l not split - { 58, 32, 12 }, // l split, a not split - { 10, 7, 6 }, // a/l both split -}; +static const vpx_prob + default_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = { + // 8x8 -> 4x4 + { 199, 122, 141 }, // a/l both not split + { 147, 63, 159 }, // a split, l not split + { 148, 133, 118 }, // l split, a not split + { 121, 104, 114 }, // a/l both split + // 16x16 -> 8x8 + { 174, 73, 87 }, // a/l both not split + { 92, 41, 83 }, // a split, l not split + { 82, 99, 50 }, // l split, a not split + { 53, 39, 39 }, // a/l both split + // 32x32 -> 16x16 + { 177, 58, 59 }, // a/l both not split + { 68, 26, 63 }, // a split, l not split + { 52, 79, 25 }, // l split, a not split + { 17, 14, 12 }, // a/l both split + // 64x64 -> 32x32 + { 222, 34, 30 }, // a/l both not split + { 72, 16, 44 }, // a split, l not split + { 58, 32, 12 }, // l split, a not split + { 10, 7, 6 }, // a/l both split + }; -static const vpx_prob default_inter_mode_probs[INTER_MODE_CONTEXTS] - [INTER_MODES - 1] = { - {2, 173, 34}, // 0 = both zero mv - {7, 145, 85}, // 1 = one zero mv + one a predicted mv - {7, 166, 63}, // 2 = two predicted mvs - {7, 94, 66}, // 3 = one predicted/zero and one new mv - {8, 64, 46}, // 4 = two new mvs - {17, 81, 31}, // 5 = one intra neighbour + x - {25, 29, 30}, // 6 = two intra neighbours -}; +static const vpx_prob + default_inter_mode_probs[INTER_MODE_CONTEXTS][INTER_MODES - 1] = { + { 2, 173, 34 }, // 0 = both zero mv + { 7, 145, 85 }, // 1 = one zero mv + one a predicted mv + { 7, 166, 63 }, // 2 = two predicted mvs + { 7, 94, 66 }, // 3 = one predicted/zero and one new mv + { 8, 64, 46 }, // 4 = two new mvs + { 17, 81, 31 }, // 5 = one intra neighbour + x + { 25, 29, 30 }, // 6 = two intra neighbours + }; /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */ const vpx_tree_index vp9_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = { - -DC_PRED, 2, /* 0 = DC_NODE */ - -TM_PRED, 4, /* 1 = TM_NODE */ - -V_PRED, 6, /* 2 = V_NODE */ - 8, 12, /* 3 = COM_NODE */ - -H_PRED, 10, /* 4 = H_NODE */ - -D135_PRED, -D117_PRED, /* 5 = D135_NODE */ - -D45_PRED, 14, /* 6 = D45_NODE */ - -D63_PRED, 16, /* 7 = D63_NODE */ - -D153_PRED, -D207_PRED /* 8 = D153_NODE */ + -DC_PRED, 2, /* 0 = DC_NODE */ + -TM_PRED, 4, /* 1 = TM_NODE */ + -V_PRED, 6, /* 2 = V_NODE */ + 8, 12, /* 3 = COM_NODE */ + -H_PRED, 10, /* 4 = H_NODE */ + -D135_PRED, -D117_PRED, /* 5 = D135_NODE */ + -D45_PRED, 14, /* 6 = D45_NODE */ + -D63_PRED, 16, /* 7 = D63_NODE */ + -D153_PRED, -D207_PRED /* 8 = D153_NODE */ }; const vpx_tree_index vp9_inter_mode_tree[TREE_SIZE(INTER_MODES)] = { - -INTER_OFFSET(ZEROMV), 2, - -INTER_OFFSET(NEARESTMV), 4, - -INTER_OFFSET(NEARMV), -INTER_OFFSET(NEWMV) + -INTER_OFFSET(ZEROMV), 2, -INTER_OFFSET(NEARESTMV), 4, -INTER_OFFSET(NEARMV), + -INTER_OFFSET(NEWMV) }; const vpx_tree_index vp9_partition_tree[TREE_SIZE(PARTITION_TYPES)] = { - -PARTITION_NONE, 2, - -PARTITION_HORZ, 4, - -PARTITION_VERT, -PARTITION_SPLIT + -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT }; static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = { @@ -249,41 +265,30 @@ static const vpx_prob default_intra_inter_p[INTRA_INTER_CONTEXTS] = { }; static const vpx_prob default_comp_inter_p[COMP_INTER_CONTEXTS] = { - 239, 183, 119, 96, 41 + 239, 183, 119, 96, 41 }; -static const vpx_prob default_comp_ref_p[REF_CONTEXTS] = { - 50, 126, 123, 221, 226 -}; +static const vpx_prob default_comp_ref_p[REF_CONTEXTS] = { 50, 126, 123, 221, + 226 }; static const vpx_prob default_single_ref_p[REF_CONTEXTS][2] = { - { 33, 16 }, - { 77, 74 }, - { 142, 142 }, - { 172, 170 }, - { 238, 247 } + { 33, 16 }, { 77, 74 }, { 142, 142 }, { 172, 170 }, { 238, 247 } }; -static const struct tx_probs default_tx_probs = { - { { 3, 136, 37 }, - { 5, 52, 13 } }, +static const struct tx_probs default_tx_probs = { { { 3, 136, 37 }, + { 5, 52, 13 } }, - { { 20, 152 }, - { 15, 101 } }, + { { 20, 152 }, { 15, 101 } }, - { { 100 }, - { 66 } } -}; + { { 100 }, { 66 } } }; void tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p, unsigned int (*ct_32x32p)[2]) { ct_32x32p[0][0] = tx_count_32x32p[TX_4X4]; - ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + - tx_count_32x32p[TX_16X16] + + ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + tx_count_32x32p[TX_16X16] + tx_count_32x32p[TX_32X32]; ct_32x32p[1][0] = tx_count_32x32p[TX_8X8]; - ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] + - tx_count_32x32p[TX_32X32]; + ct_32x32p[1][1] = tx_count_32x32p[TX_16X16] + tx_count_32x32p[TX_32X32]; ct_32x32p[2][0] = tx_count_32x32p[TX_16X16]; ct_32x32p[2][1] = tx_count_32x32p[TX_32X32]; } @@ -302,17 +307,15 @@ void tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p, ct_8x8p[0][1] = tx_count_8x8p[TX_8X8]; } -static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { - 192, 128, 64 -}; +static const vpx_prob default_skip_probs[SKIP_CONTEXTS] = { 192, 128, 64 }; static const vpx_prob default_switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS] [SWITCHABLE_FILTERS - 1] = { - { 235, 162, }, - { 36, 255, }, - { 34, 3, }, - { 149, 144, }, -}; + { 235, 162 }, + { 36, 255 }, + { 34, 3 }, + { 149, 144 }, + }; static void init_mode_probs(FRAME_CONTEXT *fc) { vp9_copy(fc->uv_mode_prob, default_if_uv_probs); @@ -328,11 +331,8 @@ static void init_mode_probs(FRAME_CONTEXT *fc) { vp9_copy(fc->inter_mode_probs, default_inter_mode_probs); } -const vpx_tree_index vp9_switchable_interp_tree - [TREE_SIZE(SWITCHABLE_FILTERS)] = { - -EIGHTTAP, 2, - -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP -}; +const vpx_tree_index vp9_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)] = + { -EIGHTTAP, 2, -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP }; void vp9_adapt_mode_probs(VP9_COMMON *cm) { int i, j; @@ -344,11 +344,11 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { fc->intra_inter_prob[i] = mode_mv_merge_probs(pre_fc->intra_inter_prob[i], counts->intra_inter[i]); for (i = 0; i < COMP_INTER_CONTEXTS; i++) - fc->comp_inter_prob[i] = mode_mv_merge_probs(pre_fc->comp_inter_prob[i], - counts->comp_inter[i]); + fc->comp_inter_prob[i] = + mode_mv_merge_probs(pre_fc->comp_inter_prob[i], counts->comp_inter[i]); for (i = 0; i < REF_CONTEXTS; i++) - fc->comp_ref_prob[i] = mode_mv_merge_probs(pre_fc->comp_ref_prob[i], - counts->comp_ref[i]); + fc->comp_ref_prob[i] = + mode_mv_merge_probs(pre_fc->comp_ref_prob[i], counts->comp_ref[i]); for (i = 0; i < REF_CONTEXTS; i++) for (j = 0; j < 2; j++) fc->single_ref_prob[i][j] = mode_mv_merge_probs( @@ -356,11 +356,11 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { for (i = 0; i < INTER_MODE_CONTEXTS; i++) vpx_tree_merge_probs(vp9_inter_mode_tree, pre_fc->inter_mode_probs[i], - counts->inter_mode[i], fc->inter_mode_probs[i]); + counts->inter_mode[i], fc->inter_mode_probs[i]); for (i = 0; i < BLOCK_SIZE_GROUPS; i++) vpx_tree_merge_probs(vp9_intra_mode_tree, pre_fc->y_mode_prob[i], - counts->y_mode[i], fc->y_mode_prob[i]); + counts->y_mode[i], fc->y_mode_prob[i]); for (i = 0; i < INTRA_MODES; ++i) vpx_tree_merge_probs(vp9_intra_mode_tree, pre_fc->uv_mode_prob[i], @@ -372,10 +372,9 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { if (cm->interp_filter == SWITCHABLE) { for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - vpx_tree_merge_probs(vp9_switchable_interp_tree, - pre_fc->switchable_interp_prob[i], - counts->switchable_interp[i], - fc->switchable_interp_prob[i]); + vpx_tree_merge_probs( + vp9_switchable_interp_tree, pre_fc->switchable_interp_prob[i], + counts->switchable_interp[i], fc->switchable_interp_prob[i]); } if (cm->tx_mode == TX_MODE_SELECT) { @@ -387,8 +386,8 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { for (i = 0; i < TX_SIZE_CONTEXTS; ++i) { tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p); for (j = 0; j < TX_SIZES - 3; ++j) - fc->tx_probs.p8x8[i][j] = mode_mv_merge_probs( - pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]); + fc->tx_probs.p8x8[i][j] = + mode_mv_merge_probs(pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]); tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p); for (j = 0; j < TX_SIZES - 2; ++j) @@ -403,8 +402,8 @@ void vp9_adapt_mode_probs(VP9_COMMON *cm) { } for (i = 0; i < SKIP_CONTEXTS; ++i) - fc->skip_probs[i] = mode_mv_merge_probs( - pre_fc->skip_probs[i], counts->skip[i]); + fc->skip_probs[i] = + mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]); } static void set_default_lf_deltas(struct loopfilter *lf) { @@ -448,11 +447,10 @@ void vp9_setup_past_independence(VP9_COMMON *cm) { vp9_init_mv_probs(cm); cm->fc->initialized = 1; - if (cm->frame_type == KEY_FRAME || - cm->error_resilient_mode || cm->reset_frame_context == 3) { + if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || + cm->reset_frame_context == 3) { // Reset all frame contexts. - for (i = 0; i < FRAME_CONTEXTS; ++i) - cm->frame_contexts[i] = *cm->fc; + for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc; } else if (cm->reset_frame_context == 2) { // Reset only the frame context specified in the frame header. cm->frame_contexts[cm->frame_context_idx] = *cm->fc; diff --git a/libs/libvpx/vp9/common/vp9_entropymode.h b/libs/libvpx/vp9/common/vp9_entropymode.h index 0285be1557..0ee663fe88 100644 --- a/libs/libvpx/vp9/common/vp9_entropymode.h +++ b/libs/libvpx/vp9/common/vp9_entropymode.h @@ -24,7 +24,7 @@ extern "C" { #define TX_SIZE_CONTEXTS 2 -#define INTER_OFFSET(mode) ((mode) - NEARESTMV) +#define INTER_OFFSET(mode) ((mode)-NEARESTMV) struct VP9Common; @@ -64,8 +64,8 @@ typedef struct FRAME_COUNTS { unsigned int uv_mode[INTRA_MODES][INTRA_MODES]; unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES]; vp9_coeff_count_model coef[TX_SIZES][PLANE_TYPES]; - unsigned int eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES] - [COEF_BANDS][COEFF_CONTEXTS]; + unsigned int eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS] + [COEFF_CONTEXTS]; unsigned int switchable_interp[SWITCHABLE_FILTER_CONTEXTS] [SWITCHABLE_FILTERS]; unsigned int inter_mode[INTER_MODE_CONTEXTS][INTER_MODES]; @@ -86,8 +86,8 @@ extern const vpx_prob vp9_kf_partition_probs[PARTITION_CONTEXTS] extern const vpx_tree_index vp9_intra_mode_tree[TREE_SIZE(INTRA_MODES)]; extern const vpx_tree_index vp9_inter_mode_tree[TREE_SIZE(INTER_MODES)]; extern const vpx_tree_index vp9_partition_tree[TREE_SIZE(PARTITION_TYPES)]; -extern const vpx_tree_index vp9_switchable_interp_tree - [TREE_SIZE(SWITCHABLE_FILTERS)]; +extern const vpx_tree_index + vp9_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)]; void vp9_setup_past_independence(struct VP9Common *cm); diff --git a/libs/libvpx/vp9/common/vp9_entropymv.c b/libs/libvpx/vp9/common/vp9_entropymv.c index 566ae91cf7..a18a290cfd 100644 --- a/libs/libvpx/vp9/common/vp9_entropymv.c +++ b/libs/libvpx/vp9/common/vp9_entropymv.c @@ -12,104 +12,90 @@ #include "vp9/common/vp9_entropymv.h" const vpx_tree_index vp9_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = { - -MV_JOINT_ZERO, 2, - -MV_JOINT_HNZVZ, 4, - -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ + -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ }; const vpx_tree_index vp9_mv_class_tree[TREE_SIZE(MV_CLASSES)] = { - -MV_CLASS_0, 2, - -MV_CLASS_1, 4, - 6, 8, - -MV_CLASS_2, -MV_CLASS_3, - 10, 12, - -MV_CLASS_4, -MV_CLASS_5, - -MV_CLASS_6, 14, - 16, 18, - -MV_CLASS_7, -MV_CLASS_8, - -MV_CLASS_9, -MV_CLASS_10, + -MV_CLASS_0, 2, -MV_CLASS_1, 4, 6, + 8, -MV_CLASS_2, -MV_CLASS_3, 10, 12, + -MV_CLASS_4, -MV_CLASS_5, -MV_CLASS_6, 14, 16, + 18, -MV_CLASS_7, -MV_CLASS_8, -MV_CLASS_9, -MV_CLASS_10, }; const vpx_tree_index vp9_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = { -0, -1, }; -const vpx_tree_index vp9_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { - -0, 2, - -1, 4, - -2, -3 -}; +const vpx_tree_index vp9_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2, -1, + 4, -2, -3 }; static const nmv_context default_nmv_context = { - {32, 64, 96}, - { - { // Vertical component - 128, // sign - {224, 144, 192, 168, 192, 176, 192, 198, 198, 245}, // class - {216}, // class0 - {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, // bits - {{128, 128, 64}, {96, 112, 64}}, // class0_fp - {64, 96, 64}, // fp - 160, // class0_hp bit - 128, // hp + { 32, 64, 96 }, + { { + // Vertical component + 128, // sign + { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, // class + { 216 }, // class0 + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, // bits + { { 128, 128, 64 }, { 96, 112, 64 } }, // class0_fp + { 64, 96, 64 }, // fp + 160, // class0_hp bit + 128, // hp }, - { // Horizontal component - 128, // sign - {216, 128, 176, 160, 176, 176, 192, 198, 198, 208}, // class - {208}, // class0 - {136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, // bits - {{128, 128, 64}, {96, 112, 64}}, // class0_fp - {64, 96, 64}, // fp - 160, // class0_hp bit - 128, // hp - } - }, + { + // Horizontal component + 128, // sign + { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 }, // class + { 208 }, // class0 + { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, // bits + { { 128, 128, 64 }, { 96, 112, 64 } }, // class0_fp + { 64, 96, 64 }, // fp + 160, // class0_hp bit + 128, // hp + } }, }; static const uint8_t log_in_base_2[] = { - 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10 + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, + 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10 }; static INLINE int mv_class_base(MV_CLASS_TYPE c) { @@ -117,27 +103,27 @@ static INLINE int mv_class_base(MV_CLASS_TYPE c) { } MV_CLASS_TYPE vp9_get_mv_class(int z, int *offset) { - const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096) ? - MV_CLASS_10 : (MV_CLASS_TYPE)log_in_base_2[z >> 3]; - if (offset) - *offset = z - mv_class_base(c); + const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096) + ? MV_CLASS_10 + : (MV_CLASS_TYPE)log_in_base_2[z >> 3]; + if (offset) *offset = z - mv_class_base(c); return c; } -static void inc_mv_component(int v, nmv_component_counts *comp_counts, - int incr, int usehp) { +static void inc_mv_component(int v, nmv_component_counts *comp_counts, int incr, + int usehp) { int s, z, c, o, d, e, f; - assert(v != 0); /* should not be zero */ + assert(v != 0); /* should not be zero */ s = v < 0; comp_counts->sign[s] += incr; - z = (s ? -v : v) - 1; /* magnitude - 1 */ + z = (s ? -v : v) - 1; /* magnitude - 1 */ c = vp9_get_mv_class(z, &o); comp_counts->classes[c] += incr; - d = (o >> 3); /* int mv data */ - f = (o >> 1) & 3; /* fractional pel mv data */ - e = (o & 1); /* high precision mv data */ + d = (o >> 3); /* int mv data */ + f = (o >> 1) & 3; /* fractional pel mv data */ + e = (o & 1); /* high precision mv data */ if (c == MV_CLASS_0) { comp_counts->class0[d] += incr; @@ -146,8 +132,7 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts, } else { int i; int b = c + CLASS0_BITS - 1; // number of bits - for (i = 0; i < b; ++i) - comp_counts->bits[i][((d >> i) & 1)] += incr; + for (i = 0; i < b; ++i) comp_counts->bits[i][((d >> i) & 1)] += incr; comp_counts->fp[f] += incr; comp_counts->hp[e] += usehp * incr; } @@ -205,6 +190,4 @@ void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) { } } -void vp9_init_mv_probs(VP9_COMMON *cm) { - cm->fc->nmvc = default_nmv_context; -} +void vp9_init_mv_probs(VP9_COMMON *cm) { cm->fc->nmvc = default_nmv_context; } diff --git a/libs/libvpx/vp9/common/vp9_entropymv.h b/libs/libvpx/vp9/common/vp9_entropymv.h index 2f05ad44b6..a8cc7e93a2 100644 --- a/libs/libvpx/vp9/common/vp9_entropymv.h +++ b/libs/libvpx/vp9/common/vp9_entropymv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_COMMON_VP9_ENTROPYMV_H_ #define VP9_COMMON_VP9_ENTROPYMV_H_ @@ -39,12 +38,12 @@ static INLINE int use_mv_hp(const MV *ref) { #define MV_UPDATE_PROB 252 /* Symbols for coding which components are zero jointly */ -#define MV_JOINTS 4 +#define MV_JOINTS 4 typedef enum { - MV_JOINT_ZERO = 0, /* Zero vector */ - MV_JOINT_HNZVZ = 1, /* Vert zero, hor nonzero */ - MV_JOINT_HZVNZ = 2, /* Hor zero, vert nonzero */ - MV_JOINT_HNZVNZ = 3, /* Both components nonzero */ + MV_JOINT_ZERO = 0, /* Zero vector */ + MV_JOINT_HNZVZ = 1, /* Vert zero, hor nonzero */ + MV_JOINT_HZVNZ = 2, /* Hor zero, vert nonzero */ + MV_JOINT_HNZVNZ = 3, /* Both components nonzero */ } MV_JOINT_TYPE; static INLINE int mv_joint_vertical(MV_JOINT_TYPE type) { @@ -56,33 +55,33 @@ static INLINE int mv_joint_horizontal(MV_JOINT_TYPE type) { } /* Symbols for coding magnitude class of nonzero components */ -#define MV_CLASSES 11 +#define MV_CLASSES 11 typedef enum { - MV_CLASS_0 = 0, /* (0, 2] integer pel */ - MV_CLASS_1 = 1, /* (2, 4] integer pel */ - MV_CLASS_2 = 2, /* (4, 8] integer pel */ - MV_CLASS_3 = 3, /* (8, 16] integer pel */ - MV_CLASS_4 = 4, /* (16, 32] integer pel */ - MV_CLASS_5 = 5, /* (32, 64] integer pel */ - MV_CLASS_6 = 6, /* (64, 128] integer pel */ - MV_CLASS_7 = 7, /* (128, 256] integer pel */ - MV_CLASS_8 = 8, /* (256, 512] integer pel */ - MV_CLASS_9 = 9, /* (512, 1024] integer pel */ - MV_CLASS_10 = 10, /* (1024,2048] integer pel */ + MV_CLASS_0 = 0, /* (0, 2] integer pel */ + MV_CLASS_1 = 1, /* (2, 4] integer pel */ + MV_CLASS_2 = 2, /* (4, 8] integer pel */ + MV_CLASS_3 = 3, /* (8, 16] integer pel */ + MV_CLASS_4 = 4, /* (16, 32] integer pel */ + MV_CLASS_5 = 5, /* (32, 64] integer pel */ + MV_CLASS_6 = 6, /* (64, 128] integer pel */ + MV_CLASS_7 = 7, /* (128, 256] integer pel */ + MV_CLASS_8 = 8, /* (256, 512] integer pel */ + MV_CLASS_9 = 9, /* (512, 1024] integer pel */ + MV_CLASS_10 = 10, /* (1024,2048] integer pel */ } MV_CLASS_TYPE; -#define CLASS0_BITS 1 /* bits at integer precision for class 0 */ -#define CLASS0_SIZE (1 << CLASS0_BITS) +#define CLASS0_BITS 1 /* bits at integer precision for class 0 */ +#define CLASS0_SIZE (1 << CLASS0_BITS) #define MV_OFFSET_BITS (MV_CLASSES + CLASS0_BITS - 2) #define MV_FP_SIZE 4 -#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2) -#define MV_MAX ((1 << MV_MAX_BITS) - 1) -#define MV_VALS ((MV_MAX << 1) + 1) +#define MV_MAX_BITS (MV_CLASSES + CLASS0_BITS + 2) +#define MV_MAX ((1 << MV_MAX_BITS) - 1) +#define MV_VALS ((MV_MAX << 1) + 1) #define MV_IN_USE_BITS 14 -#define MV_UPP ((1 << MV_IN_USE_BITS) - 1) -#define MV_LOW (-(1 << MV_IN_USE_BITS)) +#define MV_UPP ((1 << MV_IN_USE_BITS) - 1) +#define MV_LOW (-(1 << MV_IN_USE_BITS)) extern const vpx_tree_index vp9_mv_joint_tree[]; extern const vpx_tree_index vp9_mv_class_tree[]; diff --git a/libs/libvpx/vp9/common/vp9_enums.h b/libs/libvpx/vp9/common/vp9_enums.h index d089f23f97..056b298b3d 100644 --- a/libs/libvpx/vp9/common/vp9_enums.h +++ b/libs/libvpx/vp9/common/vp9_enums.h @@ -21,7 +21,7 @@ extern "C" { #define MI_SIZE_LOG2 3 #define MI_BLOCK_SIZE_LOG2 (6 - MI_SIZE_LOG2) // 64 = 2^6 -#define MI_SIZE (1 << MI_SIZE_LOG2) // pixels per mi-unit +#define MI_SIZE (1 << MI_SIZE_LOG2) // pixels per mi-unit #define MI_BLOCK_SIZE (1 << MI_BLOCK_SIZE_LOG2) // mi-units per max block #define MI_MASK (MI_BLOCK_SIZE - 1) @@ -41,20 +41,20 @@ typedef enum BITSTREAM_PROFILE { MAX_PROFILES } BITSTREAM_PROFILE; -#define BLOCK_4X4 0 -#define BLOCK_4X8 1 -#define BLOCK_8X4 2 -#define BLOCK_8X8 3 -#define BLOCK_8X16 4 -#define BLOCK_16X8 5 -#define BLOCK_16X16 6 -#define BLOCK_16X32 7 -#define BLOCK_32X16 8 -#define BLOCK_32X32 9 -#define BLOCK_32X64 10 -#define BLOCK_64X32 11 -#define BLOCK_64X64 12 -#define BLOCK_SIZES 13 +#define BLOCK_4X4 0 +#define BLOCK_4X8 1 +#define BLOCK_8X4 2 +#define BLOCK_8X8 3 +#define BLOCK_8X16 4 +#define BLOCK_16X8 5 +#define BLOCK_16X16 6 +#define BLOCK_16X32 7 +#define BLOCK_32X16 8 +#define BLOCK_32X32 9 +#define BLOCK_32X64 10 +#define BLOCK_64X32 11 +#define BLOCK_64X64 12 +#define BLOCK_SIZES 13 #define BLOCK_INVALID BLOCK_SIZES typedef uint8_t BLOCK_SIZE; @@ -68,32 +68,32 @@ typedef enum PARTITION_TYPE { } PARTITION_TYPE; typedef char PARTITION_CONTEXT; -#define PARTITION_PLOFFSET 4 // number of probability models per block size +#define PARTITION_PLOFFSET 4 // number of probability models per block size #define PARTITION_CONTEXTS (4 * PARTITION_PLOFFSET) // block transform size typedef uint8_t TX_SIZE; -#define TX_4X4 ((TX_SIZE)0) // 4x4 transform -#define TX_8X8 ((TX_SIZE)1) // 8x8 transform -#define TX_16X16 ((TX_SIZE)2) // 16x16 transform -#define TX_32X32 ((TX_SIZE)3) // 32x32 transform +#define TX_4X4 ((TX_SIZE)0) // 4x4 transform +#define TX_8X8 ((TX_SIZE)1) // 8x8 transform +#define TX_16X16 ((TX_SIZE)2) // 16x16 transform +#define TX_32X32 ((TX_SIZE)3) // 32x32 transform #define TX_SIZES ((TX_SIZE)4) // frame transform mode typedef enum { - ONLY_4X4 = 0, // only 4x4 transform used - ALLOW_8X8 = 1, // allow block transform size up to 8x8 - ALLOW_16X16 = 2, // allow block transform size up to 16x16 - ALLOW_32X32 = 3, // allow block transform size up to 32x32 - TX_MODE_SELECT = 4, // transform specified for each block - TX_MODES = 5, + ONLY_4X4 = 0, // only 4x4 transform used + ALLOW_8X8 = 1, // allow block transform size up to 8x8 + ALLOW_16X16 = 2, // allow block transform size up to 16x16 + ALLOW_32X32 = 3, // allow block transform size up to 32x32 + TX_MODE_SELECT = 4, // transform specified for each block + TX_MODES = 5, } TX_MODE; typedef enum { - DCT_DCT = 0, // DCT in both horizontal and vertical - ADST_DCT = 1, // ADST in vertical, DCT in horizontal - DCT_ADST = 2, // DCT in vertical, ADST in horizontal - ADST_ADST = 3, // ADST in both directions + DCT_DCT = 0, // DCT in both horizontal and vertical + ADST_DCT = 1, // ADST in vertical, DCT in horizontal + DCT_ADST = 2, // DCT in vertical, ADST in horizontal + ADST_ADST = 3, // ADST in both directions TX_TYPES = 4 } TX_TYPE; @@ -103,26 +103,22 @@ typedef enum { VP9_ALT_FLAG = 1 << 2, } VP9_REFFRAME; -typedef enum { - PLANE_TYPE_Y = 0, - PLANE_TYPE_UV = 1, - PLANE_TYPES -} PLANE_TYPE; +typedef enum { PLANE_TYPE_Y = 0, PLANE_TYPE_UV = 1, PLANE_TYPES } PLANE_TYPE; -#define DC_PRED 0 // Average of above and left pixels -#define V_PRED 1 // Vertical -#define H_PRED 2 // Horizontal -#define D45_PRED 3 // Directional 45 deg = round(arctan(1/1) * 180/pi) -#define D135_PRED 4 // Directional 135 deg = 180 - 45 -#define D117_PRED 5 // Directional 117 deg = 180 - 63 -#define D153_PRED 6 // Directional 153 deg = 180 - 27 -#define D207_PRED 7 // Directional 207 deg = 180 + 27 -#define D63_PRED 8 // Directional 63 deg = round(arctan(2/1) * 180/pi) -#define TM_PRED 9 // True-motion +#define DC_PRED 0 // Average of above and left pixels +#define V_PRED 1 // Vertical +#define H_PRED 2 // Horizontal +#define D45_PRED 3 // Directional 45 deg = round(arctan(1/1) * 180/pi) +#define D135_PRED 4 // Directional 135 deg = 180 - 45 +#define D117_PRED 5 // Directional 117 deg = 180 - 63 +#define D153_PRED 6 // Directional 153 deg = 180 - 27 +#define D207_PRED 7 // Directional 207 deg = 180 + 27 +#define D63_PRED 8 // Directional 63 deg = round(arctan(2/1) * 180/pi) +#define TM_PRED 9 // True-motion #define NEARESTMV 10 -#define NEARMV 11 -#define ZEROMV 12 -#define NEWMV 13 +#define NEARMV 11 +#define ZEROMV 12 +#define NEWMV 13 #define MB_MODE_COUNT 14 typedef uint8_t PREDICTION_MODE; diff --git a/libs/libvpx/vp9/common/vp9_filter.c b/libs/libvpx/vp9/common/vp9_filter.c index 4b2198fc40..6c43af8ce8 100644 --- a/libs/libvpx/vp9/common/vp9_filter.c +++ b/libs/libvpx/vp9/common/vp9_filter.c @@ -14,91 +14,55 @@ DECLARE_ALIGNED(256, static const InterpKernel, bilinear_filters[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0 }, - { 0, 0, 0, 120, 8, 0, 0, 0 }, - { 0, 0, 0, 112, 16, 0, 0, 0 }, - { 0, 0, 0, 104, 24, 0, 0, 0 }, - { 0, 0, 0, 96, 32, 0, 0, 0 }, - { 0, 0, 0, 88, 40, 0, 0, 0 }, - { 0, 0, 0, 80, 48, 0, 0, 0 }, - { 0, 0, 0, 72, 56, 0, 0, 0 }, - { 0, 0, 0, 64, 64, 0, 0, 0 }, - { 0, 0, 0, 56, 72, 0, 0, 0 }, - { 0, 0, 0, 48, 80, 0, 0, 0 }, - { 0, 0, 0, 40, 88, 0, 0, 0 }, - { 0, 0, 0, 32, 96, 0, 0, 0 }, - { 0, 0, 0, 24, 104, 0, 0, 0 }, - { 0, 0, 0, 16, 112, 0, 0, 0 }, - { 0, 0, 0, 8, 120, 0, 0, 0 } + { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 0, 0, 120, 8, 0, 0, 0 }, + { 0, 0, 0, 112, 16, 0, 0, 0 }, { 0, 0, 0, 104, 24, 0, 0, 0 }, + { 0, 0, 0, 96, 32, 0, 0, 0 }, { 0, 0, 0, 88, 40, 0, 0, 0 }, + { 0, 0, 0, 80, 48, 0, 0, 0 }, { 0, 0, 0, 72, 56, 0, 0, 0 }, + { 0, 0, 0, 64, 64, 0, 0, 0 }, { 0, 0, 0, 56, 72, 0, 0, 0 }, + { 0, 0, 0, 48, 80, 0, 0, 0 }, { 0, 0, 0, 40, 88, 0, 0, 0 }, + { 0, 0, 0, 32, 96, 0, 0, 0 }, { 0, 0, 0, 24, 104, 0, 0, 0 }, + { 0, 0, 0, 16, 112, 0, 0, 0 }, { 0, 0, 0, 8, 120, 0, 0, 0 } }; // Lagrangian interpolation filter DECLARE_ALIGNED(256, static const InterpKernel, sub_pel_filters_8[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0}, - { 0, 1, -5, 126, 8, -3, 1, 0}, - { -1, 3, -10, 122, 18, -6, 2, 0}, - { -1, 4, -13, 118, 27, -9, 3, -1}, - { -1, 4, -16, 112, 37, -11, 4, -1}, - { -1, 5, -18, 105, 48, -14, 4, -1}, - { -1, 5, -19, 97, 58, -16, 5, -1}, - { -1, 6, -19, 88, 68, -18, 5, -1}, - { -1, 6, -19, 78, 78, -19, 6, -1}, - { -1, 5, -18, 68, 88, -19, 6, -1}, - { -1, 5, -16, 58, 97, -19, 5, -1}, - { -1, 4, -14, 48, 105, -18, 5, -1}, - { -1, 4, -11, 37, 112, -16, 4, -1}, - { -1, 3, -9, 27, 118, -13, 4, -1}, - { 0, 2, -6, 18, 122, -10, 3, -1}, - { 0, 1, -3, 8, 126, -5, 1, 0} + { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 1, -5, 126, 8, -3, 1, 0 }, + { -1, 3, -10, 122, 18, -6, 2, 0 }, { -1, 4, -13, 118, 27, -9, 3, -1 }, + { -1, 4, -16, 112, 37, -11, 4, -1 }, { -1, 5, -18, 105, 48, -14, 4, -1 }, + { -1, 5, -19, 97, 58, -16, 5, -1 }, { -1, 6, -19, 88, 68, -18, 5, -1 }, + { -1, 6, -19, 78, 78, -19, 6, -1 }, { -1, 5, -18, 68, 88, -19, 6, -1 }, + { -1, 5, -16, 58, 97, -19, 5, -1 }, { -1, 4, -14, 48, 105, -18, 5, -1 }, + { -1, 4, -11, 37, 112, -16, 4, -1 }, { -1, 3, -9, 27, 118, -13, 4, -1 }, + { 0, 2, -6, 18, 122, -10, 3, -1 }, { 0, 1, -3, 8, 126, -5, 1, 0 } }; // DCT based filter DECLARE_ALIGNED(256, static const InterpKernel, sub_pel_filters_8s[SUBPEL_SHIFTS]) = { - {0, 0, 0, 128, 0, 0, 0, 0}, - {-1, 3, -7, 127, 8, -3, 1, 0}, - {-2, 5, -13, 125, 17, -6, 3, -1}, - {-3, 7, -17, 121, 27, -10, 5, -2}, - {-4, 9, -20, 115, 37, -13, 6, -2}, - {-4, 10, -23, 108, 48, -16, 8, -3}, - {-4, 10, -24, 100, 59, -19, 9, -3}, - {-4, 11, -24, 90, 70, -21, 10, -4}, - {-4, 11, -23, 80, 80, -23, 11, -4}, - {-4, 10, -21, 70, 90, -24, 11, -4}, - {-3, 9, -19, 59, 100, -24, 10, -4}, - {-3, 8, -16, 48, 108, -23, 10, -4}, - {-2, 6, -13, 37, 115, -20, 9, -4}, - {-2, 5, -10, 27, 121, -17, 7, -3}, - {-1, 3, -6, 17, 125, -13, 5, -2}, - {0, 1, -3, 8, 127, -7, 3, -1} + { 0, 0, 0, 128, 0, 0, 0, 0 }, { -1, 3, -7, 127, 8, -3, 1, 0 }, + { -2, 5, -13, 125, 17, -6, 3, -1 }, { -3, 7, -17, 121, 27, -10, 5, -2 }, + { -4, 9, -20, 115, 37, -13, 6, -2 }, { -4, 10, -23, 108, 48, -16, 8, -3 }, + { -4, 10, -24, 100, 59, -19, 9, -3 }, { -4, 11, -24, 90, 70, -21, 10, -4 }, + { -4, 11, -23, 80, 80, -23, 11, -4 }, { -4, 10, -21, 70, 90, -24, 11, -4 }, + { -3, 9, -19, 59, 100, -24, 10, -4 }, { -3, 8, -16, 48, 108, -23, 10, -4 }, + { -2, 6, -13, 37, 115, -20, 9, -4 }, { -2, 5, -10, 27, 121, -17, 7, -3 }, + { -1, 3, -6, 17, 125, -13, 5, -2 }, { 0, 1, -3, 8, 127, -7, 3, -1 } }; // freqmultiplier = 0.5 DECLARE_ALIGNED(256, static const InterpKernel, sub_pel_filters_8lp[SUBPEL_SHIFTS]) = { - { 0, 0, 0, 128, 0, 0, 0, 0}, - {-3, -1, 32, 64, 38, 1, -3, 0}, - {-2, -2, 29, 63, 41, 2, -3, 0}, - {-2, -2, 26, 63, 43, 4, -4, 0}, - {-2, -3, 24, 62, 46, 5, -4, 0}, - {-2, -3, 21, 60, 49, 7, -4, 0}, - {-1, -4, 18, 59, 51, 9, -4, 0}, - {-1, -4, 16, 57, 53, 12, -4, -1}, - {-1, -4, 14, 55, 55, 14, -4, -1}, - {-1, -4, 12, 53, 57, 16, -4, -1}, - { 0, -4, 9, 51, 59, 18, -4, -1}, - { 0, -4, 7, 49, 60, 21, -3, -2}, - { 0, -4, 5, 46, 62, 24, -3, -2}, - { 0, -4, 4, 43, 63, 26, -2, -2}, - { 0, -3, 2, 41, 63, 29, -2, -2}, - { 0, -3, 1, 38, 64, 32, -1, -3} + { 0, 0, 0, 128, 0, 0, 0, 0 }, { -3, -1, 32, 64, 38, 1, -3, 0 }, + { -2, -2, 29, 63, 41, 2, -3, 0 }, { -2, -2, 26, 63, 43, 4, -4, 0 }, + { -2, -3, 24, 62, 46, 5, -4, 0 }, { -2, -3, 21, 60, 49, 7, -4, 0 }, + { -1, -4, 18, 59, 51, 9, -4, 0 }, { -1, -4, 16, 57, 53, 12, -4, -1 }, + { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 12, 53, 57, 16, -4, -1 }, + { 0, -4, 9, 51, 59, 18, -4, -1 }, { 0, -4, 7, 49, 60, 21, -3, -2 }, + { 0, -4, 5, 46, 62, 24, -3, -2 }, { 0, -4, 4, 43, 63, 26, -2, -2 }, + { 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 1, 38, 64, 32, -1, -3 } }; - const InterpKernel *vp9_filter_kernels[4] = { - sub_pel_filters_8, - sub_pel_filters_8lp, - sub_pel_filters_8s, - bilinear_filters + sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters }; diff --git a/libs/libvpx/vp9/common/vp9_filter.h b/libs/libvpx/vp9/common/vp9_filter.h index efa24bc67b..9d2b8e1dbf 100644 --- a/libs/libvpx/vp9/common/vp9_filter.h +++ b/libs/libvpx/vp9/common/vp9_filter.h @@ -16,16 +16,15 @@ #include "vpx_dsp/vpx_filter.h" #include "vpx_ports/mem.h" - #ifdef __cplusplus extern "C" { #endif -#define EIGHTTAP 0 -#define EIGHTTAP_SMOOTH 1 -#define EIGHTTAP_SHARP 2 -#define SWITCHABLE_FILTERS 3 /* Number of switchable filters */ -#define BILINEAR 3 +#define EIGHTTAP 0 +#define EIGHTTAP_SMOOTH 1 +#define EIGHTTAP_SHARP 2 +#define SWITCHABLE_FILTERS 3 /* Number of switchable filters */ +#define BILINEAR 3 // The codec can operate in four possible inter prediction filter mode: // 8-tap, 8-tap-smooth, 8-tap-sharp, and switching between the three. #define SWITCHABLE_FILTER_CONTEXTS (SWITCHABLE_FILTERS + 1) diff --git a/libs/libvpx/vp9/common/vp9_frame_buffers.c b/libs/libvpx/vp9/common/vp9_frame_buffers.c index 0f41d66985..efcf2bf885 100644 --- a/libs/libvpx/vp9/common/vp9_frame_buffers.c +++ b/libs/libvpx/vp9/common/vp9_frame_buffers.c @@ -19,9 +19,8 @@ int vp9_alloc_internal_frame_buffers(InternalFrameBufferList *list) { list->num_internal_frame_buffers = VP9_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS; - list->int_fb = - (InternalFrameBuffer *)vpx_calloc(list->num_internal_frame_buffers, - sizeof(*list->int_fb)); + list->int_fb = (InternalFrameBuffer *)vpx_calloc( + list->num_internal_frame_buffers, sizeof(*list->int_fb)); return (list->int_fb == NULL); } @@ -43,23 +42,19 @@ int vp9_get_frame_buffer(void *cb_priv, size_t min_size, int i; InternalFrameBufferList *const int_fb_list = (InternalFrameBufferList *)cb_priv; - if (int_fb_list == NULL) - return -1; + if (int_fb_list == NULL) return -1; // Find a free frame buffer. for (i = 0; i < int_fb_list->num_internal_frame_buffers; ++i) { - if (!int_fb_list->int_fb[i].in_use) - break; + if (!int_fb_list->int_fb[i].in_use) break; } - if (i == int_fb_list->num_internal_frame_buffers) - return -1; + if (i == int_fb_list->num_internal_frame_buffers) return -1; if (int_fb_list->int_fb[i].size < min_size) { int_fb_list->int_fb[i].data = (uint8_t *)vpx_realloc(int_fb_list->int_fb[i].data, min_size); - if (!int_fb_list->int_fb[i].data) - return -1; + if (!int_fb_list->int_fb[i].data) return -1; // This memset is needed for fixing valgrind error from C loop filter // due to access uninitialized memory in frame border. It could be @@ -80,7 +75,6 @@ int vp9_get_frame_buffer(void *cb_priv, size_t min_size, int vp9_release_frame_buffer(void *cb_priv, vpx_codec_frame_buffer_t *fb) { InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv; (void)cb_priv; - if (int_fb) - int_fb->in_use = 0; + if (int_fb) int_fb->in_use = 0; return 0; } diff --git a/libs/libvpx/vp9/common/vp9_idct.c b/libs/libvpx/vp9/common/vp9_idct.c index 1b420143bb..dc2e03946d 100644 --- a/libs/libvpx/vp9/common/vp9_idct.c +++ b/libs/libvpx/vp9/common/vp9_idct.c @@ -20,10 +20,10 @@ void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride, int tx_type) { const transform_2d IHT_4[] = { - { idct4_c, idct4_c }, // DCT_DCT = 0 - { iadst4_c, idct4_c }, // ADST_DCT = 1 - { idct4_c, iadst4_c }, // DCT_ADST = 2 - { iadst4_c, iadst4_c } // ADST_ADST = 3 + { idct4_c, idct4_c }, // DCT_DCT = 0 + { iadst4_c, idct4_c }, // ADST_DCT = 1 + { idct4_c, iadst4_c }, // DCT_ADST = 2 + { iadst4_c, iadst4_c } // ADST_ADST = 3 }; int i, j; @@ -34,14 +34,13 @@ void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride, // inverse transform row vectors for (i = 0; i < 4; ++i) { IHT_4[tx_type].rows(input, outptr); - input += 4; + input += 4; outptr += 4; } // inverse transform column vectors for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; IHT_4[tx_type].cols(temp_in, temp_out); for (j = 0; j < 4; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -51,10 +50,10 @@ void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride, } static const transform_2d IHT_8[] = { - { idct8_c, idct8_c }, // DCT_DCT = 0 - { iadst8_c, idct8_c }, // ADST_DCT = 1 - { idct8_c, iadst8_c }, // DCT_ADST = 2 - { iadst8_c, iadst8_c } // ADST_ADST = 3 + { idct8_c, idct8_c }, // DCT_DCT = 0 + { iadst8_c, idct8_c }, // ADST_DCT = 1 + { idct8_c, iadst8_c }, // DCT_ADST = 2 + { iadst8_c, iadst8_c } // ADST_ADST = 3 }; void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride, @@ -74,8 +73,7 @@ void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride, // inverse transform column vectors for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; ht.cols(temp_in, temp_out); for (j = 0; j < 8; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -85,10 +83,10 @@ void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride, } static const transform_2d IHT_16[] = { - { idct16_c, idct16_c }, // DCT_DCT = 0 - { iadst16_c, idct16_c }, // ADST_DCT = 1 - { idct16_c, iadst16_c }, // DCT_ADST = 2 - { iadst16_c, iadst16_c } // ADST_ADST = 3 + { idct16_c, idct16_c }, // DCT_DCT = 0 + { iadst16_c, idct16_c }, // ADST_DCT = 1 + { idct16_c, iadst16_c }, // DCT_ADST = 2 + { iadst16_c, iadst16_c } // ADST_ADST = 3 }; void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride, @@ -108,8 +106,7 @@ void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride, // Columns for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; ht.cols(temp_in, temp_out); for (j = 0; j < 16; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -127,7 +124,6 @@ void vp9_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, vpx_idct4x4_1_add(input, dest, stride); } - void vp9_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, int eob) { if (eob > 1) @@ -143,8 +139,6 @@ void vp9_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride, // The calculation can be simplified if there are not many non-zero dct // coefficients. Use eobs to decide what to do. - // TODO(yunqingwang): "eobs = 1" case is also handled in vp9_short_idct8x8_c. - // Combine that with code here. if (eob == 1) // DC only DCT coefficient vpx_idct8x8_1_add(input, dest, stride); @@ -158,8 +152,7 @@ void vp9_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride, int eob) { /* The calculation can be simplified if there are not many non-zero dct * coefficients. Use eobs to separate different cases. */ - if (eob == 1) - /* DC only DCT coefficient. */ + if (eob == 1) /* DC only DCT coefficient. */ vpx_idct16x16_1_add(input, dest, stride); else if (eob <= 10) vpx_idct16x16_10_add(input, dest, stride); @@ -209,13 +202,25 @@ void vp9_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input, uint8_t *dest, } #if CONFIG_VP9_HIGHBITDEPTH + +// 12 signal input bits + 7 forward transform amplify bits + 1 bit +// for contingency in rounding and quantizing +#define VALID_IHT_MAGNITUDE_RANGE (1 << 20) + +static INLINE int detect_invalid_iht_input(const tran_low_t *input, int size) { + int i; + for (i = 0; i < size; ++i) + if (abs(input[i]) >= VALID_IHT_MAGNITUDE_RANGE) return 1; + return 0; +} + void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int tx_type, int bd) { const highbd_transform_2d IHT_4[] = { - { vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0 - { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // ADST_DCT = 1 - { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2 - { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3 + { vpx_highbd_idct4_c, vpx_highbd_idct4_c }, // DCT_DCT = 0 + { vpx_highbd_iadst4_c, vpx_highbd_idct4_c }, // ADST_DCT = 1 + { vpx_highbd_idct4_c, vpx_highbd_iadst4_c }, // DCT_ADST = 2 + { vpx_highbd_iadst4_c, vpx_highbd_iadst4_c } // ADST_ADST = 3 }; uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); @@ -224,17 +229,23 @@ void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, tran_low_t *outptr = out; tran_low_t temp_in[4], temp_out[4]; + if (detect_invalid_iht_input(input, 16)) { +#if CONFIG_COEFFICIENT_RANGE_CHECKING + assert(0 && "invalid highbd iht input"); +#endif // CONFIG_COEFFICIENT_RANGE_CHECKING + return; + } + // Inverse transform row vectors. for (i = 0; i < 4; ++i) { IHT_4[tx_type].rows(input, outptr, bd); - input += 4; + input += 4; outptr += 4; } // Inverse transform column vectors. for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; IHT_4[tx_type].cols(temp_in, temp_out, bd); for (j = 0; j < 4; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -244,10 +255,10 @@ void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, } static const highbd_transform_2d HIGH_IHT_8[] = { - { vpx_highbd_idct8_c, vpx_highbd_idct8_c }, // DCT_DCT = 0 - { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // ADST_DCT = 1 - { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_ADST = 2 - { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3 + { vpx_highbd_idct8_c, vpx_highbd_idct8_c }, // DCT_DCT = 0 + { vpx_highbd_iadst8_c, vpx_highbd_idct8_c }, // ADST_DCT = 1 + { vpx_highbd_idct8_c, vpx_highbd_iadst8_c }, // DCT_ADST = 2 + { vpx_highbd_iadst8_c, vpx_highbd_iadst8_c } // ADST_ADST = 3 }; void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, @@ -259,6 +270,13 @@ void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, const highbd_transform_2d ht = HIGH_IHT_8[tx_type]; uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); + if (detect_invalid_iht_input(input, 64)) { +#if CONFIG_COEFFICIENT_RANGE_CHECKING + assert(0 && "invalid highbd iht input"); +#endif // CONFIG_COEFFICIENT_RANGE_CHECKING + return; + } + // Inverse transform row vectors. for (i = 0; i < 8; ++i) { ht.rows(input, outptr, bd); @@ -268,8 +286,7 @@ void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, // Inverse transform column vectors. for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; ht.cols(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -279,10 +296,10 @@ void vp9_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, } static const highbd_transform_2d HIGH_IHT_16[] = { - { vpx_highbd_idct16_c, vpx_highbd_idct16_c }, // DCT_DCT = 0 - { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // ADST_DCT = 1 - { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_ADST = 2 - { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3 + { vpx_highbd_idct16_c, vpx_highbd_idct16_c }, // DCT_DCT = 0 + { vpx_highbd_iadst16_c, vpx_highbd_idct16_c }, // ADST_DCT = 1 + { vpx_highbd_idct16_c, vpx_highbd_iadst16_c }, // DCT_ADST = 2 + { vpx_highbd_iadst16_c, vpx_highbd_iadst16_c } // ADST_ADST = 3 }; void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, @@ -294,6 +311,13 @@ void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, const highbd_transform_2d ht = HIGH_IHT_16[tx_type]; uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); + if (detect_invalid_iht_input(input, 256)) { +#if CONFIG_COEFFICIENT_RANGE_CHECKING + assert(0 && "invalid highbd iht input"); +#endif // CONFIG_COEFFICIENT_RANGE_CHECKING + return; + } + // Rows for (i = 0; i < 16; ++i) { ht.rows(input, outptr, bd); @@ -303,8 +327,7 @@ void vp9_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, // Columns for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; ht.cols(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -322,7 +345,6 @@ void vp9_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride, vpx_highbd_idct4x4_1_add(input, dest, stride, bd); } - void vp9_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride, int eob, int bd) { if (eob > 1) @@ -338,8 +360,6 @@ void vp9_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride, // The calculation can be simplified if there are not many non-zero dct // coefficients. Use eobs to decide what to do. - // TODO(yunqingwang): "eobs = 1" case is also handled in vp9_short_idct8x8_c. - // Combine that with code here. // DC only DCT coefficient if (eob == 1) { vpx_highbd_idct8x8_1_add(input, dest, stride, bd); @@ -395,7 +415,7 @@ void vp9_highbd_iht8x8_add(TX_TYPE tx_type, const tran_low_t *input, } void vp9_highbd_iht16x16_add(TX_TYPE tx_type, const tran_low_t *input, - uint8_t *dest, int stride, int eob, int bd) { + uint8_t *dest, int stride, int eob, int bd) { if (tx_type == DCT_DCT) { vp9_highbd_idct16x16_add(input, dest, stride, eob, bd); } else { diff --git a/libs/libvpx/vp9/common/vp9_idct.h b/libs/libvpx/vp9/common/vp9_idct.h index b5a3fbf362..ea958a38c0 100644 --- a/libs/libvpx/vp9/common/vp9_idct.h +++ b/libs/libvpx/vp9/common/vp9_idct.h @@ -24,14 +24,14 @@ extern "C" { #endif -typedef void (*transform_1d)(const tran_low_t*, tran_low_t*); +typedef void (*transform_1d)(const tran_low_t *, tran_low_t *); typedef struct { transform_1d cols, rows; // vertical and horizontal } transform_2d; #if CONFIG_VP9_HIGHBITDEPTH -typedef void (*highbd_transform_1d)(const tran_low_t*, tran_low_t*, int bd); +typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd); typedef struct { highbd_transform_1d cols, rows; // vertical and horizontal diff --git a/libs/libvpx/vp9/common/vp9_loopfilter.c b/libs/libvpx/vp9/common/vp9_loopfilter.c index 79c3c4820d..d796d7161c 100644 --- a/libs/libvpx/vp9/common/vp9_loopfilter.c +++ b/libs/libvpx/vp9/common/vp9_loopfilter.c @@ -36,7 +36,7 @@ // 10101010 // // A loopfilter should be applied to every other 8x8 horizontally. -static const uint64_t left_64x64_txform_mask[TX_SIZES]= { +static const uint64_t left_64x64_txform_mask[TX_SIZES] = { 0xffffffffffffffffULL, // TX_4X4 0xffffffffffffffffULL, // TX_8x8 0x5555555555555555ULL, // TX_16x16 @@ -60,7 +60,7 @@ static const uint64_t left_64x64_txform_mask[TX_SIZES]= { // 00000000 // // A loopfilter should be applied to every other 4 the row vertically. -static const uint64_t above_64x64_txform_mask[TX_SIZES]= { +static const uint64_t above_64x64_txform_mask[TX_SIZES] = { 0xffffffffffffffffULL, // TX_4X4 0xffffffffffffffffULL, // TX_8x8 0x00ff00ff00ff00ffULL, // TX_16x16 @@ -134,18 +134,18 @@ static const uint64_t size_mask[BLOCK_SIZES] = { }; // These are used for masking the left and above borders. -static const uint64_t left_border = 0x1111111111111111ULL; +static const uint64_t left_border = 0x1111111111111111ULL; static const uint64_t above_border = 0x000000ff000000ffULL; // 16 bit masks for uv transform sizes. -static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= { +static const uint16_t left_64x64_txform_mask_uv[TX_SIZES] = { 0xffff, // TX_4X4 0xffff, // TX_8x8 0x5555, // TX_16x16 0x1111, // TX_32x32 }; -static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= { +static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = { 0xffff, // TX_4X4 0xffff, // TX_8x8 0x0f0f, // TX_16x16 @@ -201,7 +201,7 @@ static const uint16_t size_mask_uv[BLOCK_SIZES] = { 0x00ff, // BLOCK_64X32, 0xffff, // BLOCK_64X64 }; -static const uint16_t left_border_uv = 0x1111; +static const uint16_t left_border_uv = 0x1111; static const uint16_t above_border_uv = 0x000f; static const int mode_lf_lut[MB_MODE_COUNT] = { @@ -222,8 +222,7 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { block_inside_limit = (9 - sharpness_lvl); } - if (block_inside_limit < 1) - block_inside_limit = 1; + if (block_inside_limit < 1) block_inside_limit = 1; memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH); memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit), @@ -233,8 +232,7 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) { static uint8_t get_filter_level(const loop_filter_info_n *lfi_n, const MODE_INFO *mi) { - return lfi_n->lvl[mi->segment_id][mi->ref_frame[0]] - [mode_lf_lut[mi->mode]]; + return lfi_n->lvl[mi->segment_id][mi->ref_frame[0]][mode_lf_lut[mi->mode]]; } void vp9_loop_filter_init(VP9_COMMON *cm) { @@ -271,9 +269,9 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { int lvl_seg = default_filt_lvl; if (segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) { const int data = get_segdata(seg, seg_id, SEG_LVL_ALT_LF); - lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ? - data : default_filt_lvl + data, - 0, MAX_LOOP_FILTER); + lvl_seg = clamp( + seg->abs_delta == SEGMENT_ABSDATA ? data : default_filt_lvl + data, 0, + MAX_LOOP_FILTER); } if (!lf->mode_ref_delta_enabled) { @@ -287,8 +285,8 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) { for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) { - const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale - + lf->mode_deltas[mode] * scale; + const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale + + lf->mode_deltas[mode] * scale; lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER); } } @@ -296,235 +294,188 @@ void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) { } } -static void filter_selectively_vert_row2(int subsampling_factor, - uint8_t *s, int pitch, - unsigned int mask_16x16_l, - unsigned int mask_8x8_l, - unsigned int mask_4x4_l, - unsigned int mask_4x4_int_l, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { - const int mask_shift = subsampling_factor ? 4 : 8; - const int mask_cutoff = subsampling_factor ? 0xf : 0xff; +static void filter_selectively_vert_row2( + int subsampling_factor, uint8_t *s, int pitch, unsigned int mask_16x16, + unsigned int mask_8x8, unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl) { + const int dual_mask_cutoff = subsampling_factor ? 0xff : 0xffff; const int lfl_forward = subsampling_factor ? 4 : 8; - - unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff; - unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; - unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; - unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; - unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; - unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; + const unsigned int dual_one = 1 | (1 << lfl_forward); unsigned int mask; + uint8_t *ss[2]; + ss[0] = s; - for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | - mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; - mask; mask >>= 1) { - const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; - const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); + for (mask = + (mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int) & dual_mask_cutoff; + mask; mask = (mask & ~dual_one) >> 1) { + if (mask & dual_one) { + const loop_filter_thresh *lfis[2]; + lfis[0] = lfthr + *lfl; + lfis[1] = lfthr + *(lfl + lfl_forward); + ss[1] = ss[0] + 8 * pitch; - // TODO(yunqingwang): count in loopfilter functions should be removed. - if (mask & 1) { - if ((mask_16x16_0 | mask_16x16_1) & 1) { - if ((mask_16x16_0 & mask_16x16_1) & 1) { - vpx_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr); - } else if (mask_16x16_0 & 1) { - vpx_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr); + if (mask_16x16 & dual_one) { + if ((mask_16x16 & dual_one) == dual_one) { + vpx_lpf_vertical_16_dual(ss[0], pitch, lfis[0]->mblim, lfis[0]->lim, + lfis[0]->hev_thr); } else { - vpx_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr); + const loop_filter_thresh *lfi = lfis[!(mask_16x16 & 1)]; + vpx_lpf_vertical_16(ss[!(mask_16x16 & 1)], pitch, lfi->mblim, + lfi->lim, lfi->hev_thr); } } - if ((mask_8x8_0 | mask_8x8_1) & 1) { - if ((mask_8x8_0 & mask_8x8_1) & 1) { - vpx_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_8x8_0 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); + if (mask_8x8 & dual_one) { + if ((mask_8x8 & dual_one) == dual_one) { + vpx_lpf_vertical_8_dual(ss[0], pitch, lfis[0]->mblim, lfis[0]->lim, + lfis[0]->hev_thr, lfis[1]->mblim, + lfis[1]->lim, lfis[1]->hev_thr); } else { - vpx_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + const loop_filter_thresh *lfi = lfis[!(mask_8x8 & 1)]; + vpx_lpf_vertical_8(ss[!(mask_8x8 & 1)], pitch, lfi->mblim, lfi->lim, + lfi->hev_thr); } } - if ((mask_4x4_0 | mask_4x4_1) & 1) { - if ((mask_4x4_0 & mask_4x4_1) & 1) { - vpx_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_4x4_0 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr, - 1); + if (mask_4x4 & dual_one) { + if ((mask_4x4 & dual_one) == dual_one) { + vpx_lpf_vertical_4_dual(ss[0], pitch, lfis[0]->mblim, lfis[0]->lim, + lfis[0]->hev_thr, lfis[1]->mblim, + lfis[1]->lim, lfis[1]->hev_thr); } else { - vpx_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + const loop_filter_thresh *lfi = lfis[!(mask_4x4 & 1)]; + vpx_lpf_vertical_4(ss[!(mask_4x4 & 1)], pitch, lfi->mblim, lfi->lim, + lfi->hev_thr); } } - if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) { - if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) { - vpx_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr); - } else if (mask_4x4_int_0 & 1) { - vpx_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1); + if (mask_4x4_int & dual_one) { + if ((mask_4x4_int & dual_one) == dual_one) { + vpx_lpf_vertical_4_dual( + ss[0] + 4, pitch, lfis[0]->mblim, lfis[0]->lim, lfis[0]->hev_thr, + lfis[1]->mblim, lfis[1]->lim, lfis[1]->hev_thr); } else { - vpx_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, 1); + const loop_filter_thresh *lfi = lfis[!(mask_4x4_int & 1)]; + vpx_lpf_vertical_4(ss[!(mask_4x4_int & 1)] + 4, pitch, lfi->mblim, + lfi->lim, lfi->hev_thr); } } } - s += 8; + ss[0] += 8; lfl += 1; - mask_16x16_0 >>= 1; - mask_8x8_0 >>= 1; - mask_4x4_0 >>= 1; - mask_4x4_int_0 >>= 1; - mask_16x16_1 >>= 1; - mask_8x8_1 >>= 1; - mask_4x4_1 >>= 1; - mask_4x4_int_1 >>= 1; + mask_16x16 >>= 1; + mask_8x8 >>= 1; + mask_4x4 >>= 1; + mask_4x4_int >>= 1; } } #if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_vert_row2(int subsampling_factor, - uint16_t *s, int pitch, - unsigned int mask_16x16_l, - unsigned int mask_8x8_l, - unsigned int mask_4x4_l, - unsigned int mask_4x4_int_l, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { - const int mask_shift = subsampling_factor ? 4 : 8; - const int mask_cutoff = subsampling_factor ? 0xf : 0xff; +static void highbd_filter_selectively_vert_row2( + int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16, + unsigned int mask_8x8, unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl, int bd) { + const int dual_mask_cutoff = subsampling_factor ? 0xff : 0xffff; const int lfl_forward = subsampling_factor ? 4 : 8; - - unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff; - unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff; - unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff; - unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff; - unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff; - unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff; - unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff; + const unsigned int dual_one = 1 | (1 << lfl_forward); unsigned int mask; + uint16_t *ss[2]; + ss[0] = s; - for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 | - mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1; - mask; mask >>= 1) { - const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl; - const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward); + for (mask = + (mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int) & dual_mask_cutoff; + mask; mask = (mask & ~dual_one) >> 1) { + if (mask & dual_one) { + const loop_filter_thresh *lfis[2]; + lfis[0] = lfthr + *lfl; + lfis[1] = lfthr + *(lfl + lfl_forward); + ss[1] = ss[0] + 8 * pitch; - // TODO(yunqingwang): count in loopfilter functions should be removed. - if (mask & 1) { - if ((mask_16x16_0 | mask_16x16_1) & 1) { - if ((mask_16x16_0 & mask_16x16_1) & 1) { - vpx_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, bd); - } else if (mask_16x16_0 & 1) { - vpx_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, bd); + if (mask_16x16 & dual_one) { + if ((mask_16x16 & dual_one) == dual_one) { + vpx_highbd_lpf_vertical_16_dual(ss[0], pitch, lfis[0]->mblim, + lfis[0]->lim, lfis[0]->hev_thr, bd); } else { - vpx_highbd_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, bd); + const loop_filter_thresh *lfi = lfis[!(mask_16x16 & 1)]; + vpx_highbd_lpf_vertical_16(ss[!(mask_16x16 & 1)], pitch, lfi->mblim, + lfi->lim, lfi->hev_thr, bd); } } - if ((mask_8x8_0 | mask_8x8_1) & 1) { - if ((mask_8x8_0 & mask_8x8_1) & 1) { - vpx_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_8x8_0 & 1) { - vpx_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + if (mask_8x8 & dual_one) { + if ((mask_8x8 & dual_one) == dual_one) { + vpx_highbd_lpf_vertical_8_dual( + ss[0], pitch, lfis[0]->mblim, lfis[0]->lim, lfis[0]->hev_thr, + lfis[1]->mblim, lfis[1]->lim, lfis[1]->hev_thr, bd); } else { - vpx_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + const loop_filter_thresh *lfi = lfis[!(mask_8x8 & 1)]; + vpx_highbd_lpf_vertical_8(ss[!(mask_8x8 & 1)], pitch, lfi->mblim, + lfi->lim, lfi->hev_thr, bd); } } - if ((mask_4x4_0 | mask_4x4_1) & 1) { - if ((mask_4x4_0 & mask_4x4_1) & 1) { - vpx_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_4x4_0 & 1) { - vpx_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + if (mask_4x4 & dual_one) { + if ((mask_4x4 & dual_one) == dual_one) { + vpx_highbd_lpf_vertical_4_dual( + ss[0], pitch, lfis[0]->mblim, lfis[0]->lim, lfis[0]->hev_thr, + lfis[1]->mblim, lfis[1]->lim, lfis[1]->hev_thr, bd); } else { - vpx_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + const loop_filter_thresh *lfi = lfis[!(mask_4x4 & 1)]; + vpx_highbd_lpf_vertical_4(ss[!(mask_4x4 & 1)], pitch, lfi->mblim, + lfi->lim, lfi->hev_thr, bd); } } - if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) { - if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) { - vpx_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, lfi1->mblim, lfi1->lim, - lfi1->hev_thr, bd); - } else if (mask_4x4_int_0 & 1) { - vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim, - lfi0->hev_thr, 1, bd); + if (mask_4x4_int & dual_one) { + if ((mask_4x4_int & dual_one) == dual_one) { + vpx_highbd_lpf_vertical_4_dual( + ss[0] + 4, pitch, lfis[0]->mblim, lfis[0]->lim, lfis[0]->hev_thr, + lfis[1]->mblim, lfis[1]->lim, lfis[1]->hev_thr, bd); } else { - vpx_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, - lfi1->lim, lfi1->hev_thr, 1, bd); + const loop_filter_thresh *lfi = lfis[!(mask_4x4_int & 1)]; + vpx_highbd_lpf_vertical_4(ss[!(mask_4x4_int & 1)] + 4, pitch, + lfi->mblim, lfi->lim, lfi->hev_thr, bd); } } } - s += 8; + ss[0] += 8; lfl += 1; - mask_16x16_0 >>= 1; - mask_8x8_0 >>= 1; - mask_4x4_0 >>= 1; - mask_4x4_int_0 >>= 1; - mask_16x16_1 >>= 1; - mask_8x8_1 >>= 1; - mask_4x4_1 >>= 1; - mask_4x4_int_1 >>= 1; + mask_16x16 >>= 1; + mask_8x8 >>= 1; + mask_4x4 >>= 1; + mask_4x4_int >>= 1; } } #endif // CONFIG_VP9_HIGHBITDEPTH -static void filter_selectively_horiz(uint8_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { +static void filter_selectively_horiz( + uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8, + unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl) { unsigned int mask; int count; - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= count) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - + for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask; + mask >>= count) { count = 1; if (mask & 1) { + const loop_filter_thresh *lfi = lfthr + *lfl; + if (mask_16x16 & 1) { if ((mask_16x16 & 3) == 3) { - vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 2); + vpx_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr); count = 2; } else { - vpx_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + vpx_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr); } } else if (mask_8x8 & 1) { if ((mask_8x8 & 3) == 3) { // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); + const loop_filter_thresh *lfin = lfthr + *(lfl + 1); vpx_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, lfin->mblim, lfin->lim, @@ -537,23 +488,23 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, } else { if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); else if (mask_4x4_int & 2) vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); + lfin->lim, lfin->hev_thr); } count = 2; } else { - vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } } else if (mask_4x4 & 1) { if ((mask_4x4 & 3) == 3) { // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); + const loop_filter_thresh *lfin = lfthr + *(lfl + 1); vpx_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, lfin->mblim, lfin->lim, @@ -565,22 +516,22 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, } else { if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); else if (mask_4x4_int & 2) vpx_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1); + lfin->lim, lfin->hev_thr); } count = 2; } else { - vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); if (mask_4x4_int & 1) vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } - } else if (mask_4x4_int & 1) { + } else { vpx_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1); + lfi->hev_thr); } } s += 8 * count; @@ -593,99 +544,94 @@ static void filter_selectively_horiz(uint8_t *s, int pitch, } #if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_horiz(uint16_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { +static void highbd_filter_selectively_horiz( + uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8, + unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl, int bd) { unsigned int mask; int count; - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= count) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; - + for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask; + mask >>= count) { count = 1; if (mask & 1) { + const loop_filter_thresh *lfi = lfthr + *lfl; + if (mask_16x16 & 1) { if ((mask_16x16 & 3) == 3) { - vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 2, bd); + vpx_highbd_lpf_horizontal_edge_16(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, bd); count = 2; } else { - vpx_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + vpx_highbd_lpf_horizontal_edge_8(s, pitch, lfi->mblim, lfi->lim, + lfi->hev_thr, bd); } } else if (mask_8x8 & 1) { if ((mask_8x8 & 3) == 3) { // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); + const loop_filter_thresh *lfin = lfthr + *(lfl + 1); vpx_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, lfin->mblim, lfin->lim, lfin->hev_thr, bd); if ((mask_4x4_int & 3) == 3) { - vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, - lfin->mblim, lfin->lim, - lfin->hev_thr, bd); + vpx_highbd_lpf_horizontal_4_dual( + s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, + lfin->mblim, lfin->lim, lfin->hev_thr, bd); } else { if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } else if (mask_4x4_int & 2) { vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); + lfin->lim, lfin->hev_thr, bd); } } count = 2; } else { vpx_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } } } else if (mask_4x4 & 1) { if ((mask_4x4 & 3) == 3) { // Next block's thresholds. - const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1); + const loop_filter_thresh *lfin = lfthr + *(lfl + 1); vpx_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, lfin->mblim, lfin->lim, lfin->hev_thr, bd); if ((mask_4x4_int & 3) == 3) { - vpx_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, - lfin->mblim, lfin->lim, - lfin->hev_thr, bd); + vpx_highbd_lpf_horizontal_4_dual( + s + 4 * pitch, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, + lfin->mblim, lfin->lim, lfin->hev_thr, bd); } else { if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } else if (mask_4x4_int & 2) { vpx_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim, - lfin->lim, lfin->hev_thr, 1, bd); + lfin->lim, lfin->hev_thr, bd); } } count = 2; } else { vpx_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); if (mask_4x4_int & 1) { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, - lfi->lim, lfi->hev_thr, 1, bd); + lfi->lim, lfi->hev_thr, bd); } } - } else if (mask_4x4_int & 1) { + } else { vpx_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); } } s += 8 * count; @@ -704,14 +650,12 @@ static void highbd_filter_selectively_horiz(uint16_t *s, int pitch, // whether there were any coefficients encoded, and the loop filter strength // block we are currently looking at. Shift is used to position the // 1's we produce. -// TODO(JBB) Need another function for different resolution color.. static void build_masks(const loop_filter_info_n *const lfi_n, const MODE_INFO *mi, const int shift_y, - const int shift_uv, - LOOP_FILTER_MASK *lfm) { + const int shift_uv, LOOP_FILTER_MASK *lfm) { const BLOCK_SIZE block_size = mi->sb_type; const TX_SIZE tx_size_y = mi->tx_size; - const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1); + const TX_SIZE tx_size_uv = uv_txsize_lookup[block_size][tx_size_y][1][1]; const int filter_level = get_filter_level(lfi_n, mi); uint64_t *const left_y = &lfm->left_y[tx_size_y]; uint64_t *const above_y = &lfm->above_y[tx_size_y]; @@ -753,29 +697,28 @@ static void build_masks(const loop_filter_info_n *const lfi_n, // If the block has no coefficients and is not intra we skip applying // the loop filter on block edges. - if (mi->skip && is_inter_block(mi)) - return; + if (mi->skip && is_inter_block(mi)) return; // Here we are adding a mask for the transform size. The transform // size mask is set to be correct for a 64x64 prediction block size. We // mask to match the size of the block we are working on and then shift it // into place.. - *above_y |= (size_mask[block_size] & - above_64x64_txform_mask[tx_size_y]) << shift_y; - *above_uv |= (size_mask_uv[block_size] & - above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; + *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y]) + << shift_y; + *above_uv |= + (size_mask_uv[block_size] & above_64x64_txform_mask_uv[tx_size_uv]) + << shift_uv; - *left_y |= (size_mask[block_size] & - left_64x64_txform_mask[tx_size_y]) << shift_y; - *left_uv |= (size_mask_uv[block_size] & - left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; + *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y]) + << shift_y; + *left_uv |= (size_mask_uv[block_size] & left_64x64_txform_mask_uv[tx_size_uv]) + << shift_uv; // Here we are trying to determine what to do with the internal 4x4 block // boundaries. These differ from the 4x4 boundaries on the outside edge of // an 8x8 in that the internal ones can be skipped and don't depend on // the prediction block size. - if (tx_size_y == TX_4X4) - *int_4x4_y |= size_mask[block_size] << shift_y; + if (tx_size_y == TX_4X4) *int_4x4_y |= size_mask[block_size] << shift_y; if (tx_size_uv == TX_4X4) *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; @@ -810,21 +753,19 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, *above_y |= above_prediction_mask[block_size] << shift_y; *left_y |= left_prediction_mask[block_size] << shift_y; - if (mi->skip && is_inter_block(mi)) - return; + if (mi->skip && is_inter_block(mi)) return; - *above_y |= (size_mask[block_size] & - above_64x64_txform_mask[tx_size_y]) << shift_y; + *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y]) + << shift_y; - *left_y |= (size_mask[block_size] & - left_64x64_txform_mask[tx_size_y]) << shift_y; + *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y]) + << shift_y; - if (tx_size_y == TX_4X4) - *int_4x4_y |= size_mask[block_size] << shift_y; + if (tx_size_y == TX_4X4) *int_4x4_y |= size_mask[block_size] << shift_y; } -void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, - const int mi_col, LOOP_FILTER_MASK *lfm) { +void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, + LOOP_FILTER_MASK *lfm) { int i; // The largest loopfilter we have is 16x16 so we use the 16x16 mask @@ -851,8 +792,8 @@ void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, const uint64_t rows = cm->mi_rows - mi_row; // Each pixel inside the border gets a 1, - const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1); - const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1); + const uint64_t mask_y = (((uint64_t)1 << (rows << 3)) - 1); + const uint16_t mask_uv = (((uint16_t)1 << (((rows + 1) >> 1) << 2)) - 1); // Remove values completely outside our border. for (i = 0; i < TX_32X32; i++) { @@ -881,7 +822,7 @@ void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, // Each pixel inside the border gets a 1, the multiply copies the border // to where we need it. - const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL; + const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101ULL; const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111; // Internal edges are not applied on the last column of the image so @@ -923,7 +864,7 @@ void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4])); assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4])); assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16])); - assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8])); + assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_8X8])); assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4])); assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4])); assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16])); @@ -939,7 +880,6 @@ void vp9_adjust_mask(VP9_COMMON *const cm, const int mi_row, // This function sets up the bit masks for the entire 64x64 region represented // by mi_row, mi_col. -// TODO(JBB): This function only works for yv12. void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, MODE_INFO **mi, const int mode_info_stride, LOOP_FILTER_MASK *lfm) { @@ -952,48 +892,43 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, // added to the mi ptr as we go through each loop. It helps us to avoid // setting up special row and column counters for each index. The last step // brings us out back to the starting position. - const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4, - -(mode_info_stride << 2) - 4}; - const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2, - -(mode_info_stride << 1) - 2}; - const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1}; + const int offset_32[] = { 4, (mode_info_stride << 2) - 4, 4, + -(mode_info_stride << 2) - 4 }; + const int offset_16[] = { 2, (mode_info_stride << 1) - 2, 2, + -(mode_info_stride << 1) - 2 }; + const int offset[] = { 1, mode_info_stride - 1, 1, -mode_info_stride - 1 }; // Following variables represent shifts to position the current block // mask over the appropriate block. A shift of 36 to the left will move // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left // 4 rows to the appropriate spot. - const int shift_32_y[] = {0, 4, 32, 36}; - const int shift_16_y[] = {0, 2, 16, 18}; - const int shift_8_y[] = {0, 1, 8, 9}; - const int shift_32_uv[] = {0, 2, 8, 10}; - const int shift_16_uv[] = {0, 1, 4, 5}; - const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? - cm->mi_rows - mi_row : MI_BLOCK_SIZE); - const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? - cm->mi_cols - mi_col : MI_BLOCK_SIZE); + const int shift_32_y[] = { 0, 4, 32, 36 }; + const int shift_16_y[] = { 0, 2, 16, 18 }; + const int shift_8_y[] = { 0, 1, 8, 9 }; + const int shift_32_uv[] = { 0, 2, 8, 10 }; + const int shift_16_uv[] = { 0, 1, 4, 5 }; + const int max_rows = + (mi_row + MI_BLOCK_SIZE > cm->mi_rows ? cm->mi_rows - mi_row + : MI_BLOCK_SIZE); + const int max_cols = + (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? cm->mi_cols - mi_col + : MI_BLOCK_SIZE); vp9_zero(*lfm); assert(mip[0] != NULL); - // TODO(jimbankoski): Try moving most of the following code into decode - // loop and storing lfm in the mbmi structure so that we don't have to go - // through the recursive loop structure multiple times. switch (mip[0]->sb_type) { - case BLOCK_64X64: - build_masks(lfi_n, mip[0] , 0, 0, lfm); - break; + case BLOCK_64X64: build_masks(lfi_n, mip[0], 0, 0, lfm); break; case BLOCK_64X32: build_masks(lfi_n, mip[0], 0, 0, lfm); mip2 = mip + mode_info_stride * 4; - if (4 >= max_rows) - break; + if (4 >= max_rows) break; build_masks(lfi_n, mip2[0], 32, 8, lfm); break; case BLOCK_32X64: build_masks(lfi_n, mip[0], 0, 0, lfm); mip2 = mip + 4; - if (4 >= max_cols) - break; + if (4 >= max_cols) break; build_masks(lfi_n, mip2[0], 4, 2, lfm); break; default: @@ -1010,15 +945,13 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, break; case BLOCK_32X16: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_32_row_offset + 2 >= max_rows) - continue; + if (mi_32_row_offset + 2 >= max_rows) continue; mip2 = mip + mode_info_stride * 2; build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm); break; case BLOCK_16X32: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_32_col_offset + 2 >= max_cols) - continue; + if (mi_32_col_offset + 2 >= max_cols) continue; mip2 = mip + 2; build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm); break; @@ -1026,10 +959,10 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16]; const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16]; - const int mi_16_col_offset = mi_32_col_offset + - ((idx_16 & 1) << 1); - const int mi_16_row_offset = mi_32_row_offset + - ((idx_16 >> 1) << 1); + const int mi_16_col_offset = + mi_32_col_offset + ((idx_16 & 1) << 1); + const int mi_16_row_offset = + mi_32_row_offset + ((idx_16 >> 1) << 1); if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows) continue; @@ -1040,32 +973,28 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, break; case BLOCK_16X8: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_16_row_offset + 1 >= max_rows) - continue; + if (mi_16_row_offset + 1 >= max_rows) continue; mip2 = mip + mode_info_stride; - build_y_mask(lfi_n, mip2[0], shift_y+8, lfm); + build_y_mask(lfi_n, mip2[0], shift_y + 8, lfm); break; case BLOCK_8X16: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); - if (mi_16_col_offset +1 >= max_cols) - continue; + if (mi_16_col_offset + 1 >= max_cols) continue; mip2 = mip + 1; - build_y_mask(lfi_n, mip2[0], shift_y+1, lfm); + build_y_mask(lfi_n, mip2[0], shift_y + 1, lfm); break; default: { - const int shift_y = shift_32_y[idx_32] + - shift_16_y[idx_16] + - shift_8_y[0]; + const int shift_y = + shift_32_y[idx_32] + shift_16_y[idx_16] + shift_8_y[0]; build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm); mip += offset[0]; for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { const int shift_y = shift_32_y[idx_32] + - shift_16_y[idx_16] + - shift_8_y[idx_8]; - const int mi_8_col_offset = mi_16_col_offset + - ((idx_8 & 1)); - const int mi_8_row_offset = mi_16_row_offset + - ((idx_8 >> 1)); + shift_16_y[idx_16] + shift_8_y[idx_8]; + const int mi_8_col_offset = + mi_16_col_offset + ((idx_8 & 1)); + const int mi_8_row_offset = + mi_16_row_offset + ((idx_8 >> 1)); if (mi_8_col_offset >= max_cols || mi_8_row_offset >= max_rows) @@ -1081,34 +1010,29 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, } break; } - - vp9_adjust_mask(cm, mi_row, mi_col, lfm); } -static void filter_selectively_vert(uint8_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl) { +static void filter_selectively_vert( + uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8, + unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl) { unsigned int mask; - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= 1) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; + for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask; + mask >>= 1) { + const loop_filter_thresh *lfi = lfthr + *lfl; if (mask & 1) { if (mask_16x16 & 1) { vpx_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } else if (mask_8x8 & 1) { - vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } else if (mask_4x4 & 1) { - vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); } } if (mask_4x4_int & 1) - vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1); + vpx_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr); s += 8; lfl += 1; mask_16x16 >>= 1; @@ -1119,34 +1043,31 @@ static void filter_selectively_vert(uint8_t *s, int pitch, } #if CONFIG_VP9_HIGHBITDEPTH -static void highbd_filter_selectively_vert(uint16_t *s, int pitch, - unsigned int mask_16x16, - unsigned int mask_8x8, - unsigned int mask_4x4, - unsigned int mask_4x4_int, - const loop_filter_info_n *lfi_n, - const uint8_t *lfl, int bd) { +static void highbd_filter_selectively_vert( + uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8, + unsigned int mask_4x4, unsigned int mask_4x4_int, + const loop_filter_thresh *lfthr, const uint8_t *lfl, int bd) { unsigned int mask; - for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; - mask; mask >>= 1) { - const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl; + for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int; mask; + mask >>= 1) { + const loop_filter_thresh *lfi = lfthr + *lfl; if (mask & 1) { if (mask_16x16 & 1) { - vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, bd); + vpx_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, + bd); } else if (mask_8x8 & 1) { - vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + vpx_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, + bd); } else if (mask_4x4 & 1) { - vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + vpx_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, + bd); } } if (mask_4x4_int & 1) vpx_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, - lfi->hev_thr, 1, bd); + lfi->hev_thr, bd); s += 8; lfl += 1; mask_16x16 >>= 1; @@ -1159,19 +1080,18 @@ static void highbd_filter_selectively_vert(uint16_t *s, int pitch, void vp9_filter_block_plane_non420(VP9_COMMON *cm, struct macroblockd_plane *plane, - MODE_INFO **mi_8x8, - int mi_row, int mi_col) { + MODE_INFO **mi_8x8, int mi_row, int mi_col) { const int ss_x = plane->subsampling_x; const int ss_y = plane->subsampling_y; const int row_step = 1 << ss_y; const int col_step = 1 << ss_x; const int row_step_stride = cm->mi_stride * row_step; struct buf_2d *const dst = &plane->dst; - uint8_t* const dst0 = dst->buf; - unsigned int mask_16x16[MI_BLOCK_SIZE] = {0}; - unsigned int mask_8x8[MI_BLOCK_SIZE] = {0}; - unsigned int mask_4x4[MI_BLOCK_SIZE] = {0}; - unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0}; + uint8_t *const dst0 = dst->buf; + unsigned int mask_16x16[MI_BLOCK_SIZE] = { 0 }; + unsigned int mask_8x8[MI_BLOCK_SIZE] = { 0 }; + unsigned int mask_4x4[MI_BLOCK_SIZE] = { 0 }; + unsigned int mask_4x4_int[MI_BLOCK_SIZE] = { 0 }; uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE]; int r, c; @@ -1187,20 +1107,23 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm, const BLOCK_SIZE sb_type = mi[0].sb_type; const int skip_this = mi[0].skip && is_inter_block(mi); // left edge of current unit is block/partition edge -> no skip - const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ? - !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1; + const int block_edge_left = + (num_4x4_blocks_wide_lookup[sb_type] > 1) + ? !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) + : 1; const int skip_this_c = skip_this && !block_edge_left; // top edge of current unit is block/partition edge -> no skip - const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ? - !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1; + const int block_edge_above = + (num_4x4_blocks_high_lookup[sb_type] > 1) + ? !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) + : 1; const int skip_this_r = skip_this && !block_edge_above; const TX_SIZE tx_size = get_uv_tx_size(mi, plane); const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1; const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1; // Filter level can vary per MI - if (!(lfl[(r << 3) + (c >> ss_x)] = - get_filter_level(&cm->lf_info, mi))) + if (!(lfl[(r << 3) + (c >> ss_x)] = get_filter_level(&cm->lf_info, mi))) continue; // Build masks based on the transform size of each block @@ -1255,29 +1178,19 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm, border_mask = ~(mi_col == 0); #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - highbd_filter_selectively_vert(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, - mask_16x16_c & border_mask, - mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3], - (int)cm->bit_depth); + highbd_filter_selectively_vert( + CONVERT_TO_SHORTPTR(dst->buf), dst->stride, + mask_16x16_c & border_mask, mask_8x8_c & border_mask, + mask_4x4_c & border_mask, mask_4x4_int[r], cm->lf_info.lfthr, + &lfl[r << 3], (int)cm->bit_depth); } else { - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16_c & border_mask, +#endif // CONFIG_VP9_HIGHBITDEPTH + filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask, mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3]); + mask_4x4_c & border_mask, mask_4x4_int[r], + cm->lf_info.lfthr, &lfl[r << 3]); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - filter_selectively_vert(dst->buf, dst->stride, - mask_16x16_c & border_mask, - mask_8x8_c & border_mask, - mask_4x4_c & border_mask, - mask_4x4_int[r], - &cm->lf_info, &lfl[r << 3]); #endif // CONFIG_VP9_HIGHBITDEPTH dst->buf += 8 * dst->stride; mi_8x8 += row_step_stride; @@ -1304,29 +1217,17 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm, } #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3], - (int)cm->bit_depth); + highbd_filter_selectively_horiz( + CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r, + mask_4x4_r, mask_4x4_int_r, cm->lf_info.lfthr, &lfl[r << 3], + (int)cm->bit_depth); } else { - filter_selectively_horiz(dst->buf, dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3]); +#endif // CONFIG_VP9_HIGHBITDEPTH + filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, + mask_4x4_r, mask_4x4_int_r, cm->lf_info.lfthr, + &lfl[r << 3]); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - filter_selectively_horiz(dst->buf, dst->stride, - mask_16x16_r, - mask_8x8_r, - mask_4x4_r, - mask_4x4_int_r, - &cm->lf_info, &lfl[r << 3]); #endif // CONFIG_VP9_HIGHBITDEPTH dst->buf += 8 * dst->stride; } @@ -1334,8 +1235,7 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm, void vp9_filter_block_plane_ss00(VP9_COMMON *const cm, struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm) { + int mi_row, LOOP_FILTER_MASK *lfm) { struct buf_2d *const dst = &plane->dst; uint8_t *const dst0 = dst->buf; int r; @@ -1348,27 +1248,23 @@ void vp9_filter_block_plane_ss00(VP9_COMMON *const cm, // Vertical pass: do 2 rows at one time for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) { - unsigned int mask_16x16_l = mask_16x16 & 0xffff; - unsigned int mask_8x8_l = mask_8x8 & 0xffff; - unsigned int mask_4x4_l = mask_4x4 & 0xffff; - unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff; - -// Disable filtering on the leftmost column. #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { + // Disable filtering on the leftmost column. highbd_filter_selectively_vert_row2( plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, + (unsigned int)mask_16x16, (unsigned int)mask_8x8, + (unsigned int)mask_4x4, (unsigned int)mask_4x4_int, cm->lf_info.lfthr, &lfm->lfl_y[r << 3], (int)cm->bit_depth); } else { +#endif // CONFIG_VP9_HIGHBITDEPTH + // Disable filtering on the leftmost column. filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l, - mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]); + plane->subsampling_x, dst->buf, dst->stride, (unsigned int)mask_16x16, + (unsigned int)mask_8x8, (unsigned int)mask_4x4, + (unsigned int)mask_4x4_int, cm->lf_info.lfthr, &lfm->lfl_y[r << 3]); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l, - mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]); #endif // CONFIG_VP9_HIGHBITDEPTH dst->buf += 16 * dst->stride; mask_16x16 >>= 16; @@ -1403,17 +1299,15 @@ void vp9_filter_block_plane_ss00(VP9_COMMON *const cm, if (cm->use_highbitdepth) { highbd_filter_selectively_horiz( CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, &lfm->lfl_y[r << 3], - (int)cm->bit_depth); + mask_4x4_r, mask_4x4_int & 0xff, cm->lf_info.lfthr, + &lfm->lfl_y[r << 3], (int)cm->bit_depth); } else { +#endif // CONFIG_VP9_HIGHBITDEPTH filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, - &lfm->lfl_y[r << 3]); + mask_4x4_r, mask_4x4_int & 0xff, + cm->lf_info.lfthr, &lfm->lfl_y[r << 3]); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info, - &lfm->lfl_y[r << 3]); #endif // CONFIG_VP9_HIGHBITDEPTH dst->buf += 8 * dst->stride; @@ -1426,8 +1320,7 @@ void vp9_filter_block_plane_ss00(VP9_COMMON *const cm, void vp9_filter_block_plane_ss11(VP9_COMMON *const cm, struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm) { + int mi_row, LOOP_FILTER_MASK *lfm) { struct buf_2d *const dst = &plane->dst; uint8_t *const dst0 = dst->buf; int r, c; @@ -1447,38 +1340,30 @@ void vp9_filter_block_plane_ss11(VP9_COMMON *const cm, lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) + (c << 1)]; } - { - unsigned int mask_16x16_l = mask_16x16 & 0xff; - unsigned int mask_8x8_l = mask_8x8 & 0xff; - unsigned int mask_4x4_l = mask_4x4 & 0xff; - unsigned int mask_4x4_int_l = mask_4x4_int & 0xff; - -// Disable filtering on the leftmost column. #if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - highbd_filter_selectively_vert_row2( - plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfl_uv[r << 1], (int)cm->bit_depth); - } else { - filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfl_uv[r << 1]); - } -#else + if (cm->use_highbitdepth) { + // Disable filtering on the leftmost column. + highbd_filter_selectively_vert_row2( + plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride, + (unsigned int)mask_16x16, (unsigned int)mask_8x8, + (unsigned int)mask_4x4, (unsigned int)mask_4x4_int, cm->lf_info.lfthr, + &lfl_uv[r << 1], (int)cm->bit_depth); + } else { +#endif // CONFIG_VP9_HIGHBITDEPTH + // Disable filtering on the leftmost column. filter_selectively_vert_row2( - plane->subsampling_x, dst->buf, dst->stride, - mask_16x16_l, mask_8x8_l, mask_4x4_l, mask_4x4_int_l, &cm->lf_info, - &lfl_uv[r << 1]); + plane->subsampling_x, dst->buf, dst->stride, (unsigned int)mask_16x16, + (unsigned int)mask_8x8, (unsigned int)mask_4x4, + (unsigned int)mask_4x4_int, cm->lf_info.lfthr, &lfl_uv[r << 1]); +#if CONFIG_VP9_HIGHBITDEPTH + } #endif // CONFIG_VP9_HIGHBITDEPTH - dst->buf += 16 * dst->stride; - mask_16x16 >>= 8; - mask_8x8 >>= 8; - mask_4x4 >>= 8; - mask_4x4_int >>= 8; - } + dst->buf += 16 * dst->stride; + mask_16x16 >>= 8; + mask_8x8 >>= 8; + mask_4x4 >>= 8; + mask_4x4_int >>= 8; } // Horizontal pass @@ -1508,19 +1393,17 @@ void vp9_filter_block_plane_ss11(VP9_COMMON *const cm, #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf), - dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, - &lfl_uv[r << 1], (int)cm->bit_depth); + highbd_filter_selectively_horiz( + CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r, + mask_4x4_r, mask_4x4_int_r, cm->lf_info.lfthr, &lfl_uv[r << 1], + (int)cm->bit_depth); } else { +#endif // CONFIG_VP9_HIGHBITDEPTH filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, + mask_4x4_r, mask_4x4_int_r, cm->lf_info.lfthr, &lfl_uv[r << 1]); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r, - mask_4x4_r, mask_4x4_int_r, &cm->lf_info, - &lfl_uv[r << 1]); #endif // CONFIG_VP9_HIGHBITDEPTH dst->buf += 8 * dst->stride; @@ -1556,7 +1439,7 @@ static void loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP9_COMMON *cm, vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); - // TODO(JBB): Make setup_mask work for non 420. + // TODO(jimbankoski): For 444 only need to do y mask. vp9_adjust_mask(cm, mi_row, mi_col, lfm); vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, lfm); @@ -1578,10 +1461,9 @@ static void loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP9_COMMON *cm, } } -void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, - VP9_COMMON *cm, MACROBLOCKD *xd, - int frame_filter_level, - int y_only, int partial_frame) { +void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, + MACROBLOCKD *xd, int frame_filter_level, int y_only, + int partial_frame) { int start_mi_row, end_mi_row, mi_rows_to_filter; if (!frame_filter_level) return; start_mi_row = 0; @@ -1596,6 +1478,8 @@ void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, } // Used by the encoder to build the loopfilter masks. +// TODO(slavarnway): Do the encoder the same way the decoder does it and +// build the masks in line as part of the encode process. void vp9_build_mask_frame(VP9_COMMON *cm, int frame_filter_level, int partial_frame) { int start_mi_row, end_mi_row, mi_rows_to_filter; @@ -1625,26 +1509,22 @@ void vp9_build_mask_frame(VP9_COMMON *cm, int frame_filter_level, // 8x8 blocks in a superblock. A "1" represents the first block in a 16x16 // or greater area. static const uint8_t first_block_in_16x16[8][8] = { - {1, 0, 1, 0, 1, 0, 1, 0}, - {0, 0, 0, 0, 0, 0, 0, 0}, - {1, 0, 1, 0, 1, 0, 1, 0}, - {0, 0, 0, 0, 0, 0, 0, 0}, - {1, 0, 1, 0, 1, 0, 1, 0}, - {0, 0, 0, 0, 0, 0, 0, 0}, - {1, 0, 1, 0, 1, 0, 1, 0}, - {0, 0, 0, 0, 0, 0, 0, 0} + { 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 }, + { 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 } }; // This function sets up the bit masks for a block represented // by mi_row, mi_col in a 64x64 region. // TODO(SJL): This function only works for yv12. -void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row, - int mi_col, int bw, int bh) { +void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row, int mi_col, + int bw, int bh) { const BLOCK_SIZE block_size = mi->sb_type; const TX_SIZE tx_size_y = mi->tx_size; const loop_filter_info_n *const lfi_n = &cm->lf_info; const int filter_level = get_filter_level(lfi_n, mi); - const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1); + const TX_SIZE tx_size_uv = uv_txsize_lookup[block_size][tx_size_y][1][1]; LOOP_FILTER_MASK *const lfm = get_lfm(&cm->lf, mi_row, mi_col); uint64_t *const left_y = &lfm->left_y[tx_size_y]; uint64_t *const above_y = &lfm->above_y[tx_size_y]; @@ -1691,30 +1571,30 @@ void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row, // If the block has no coefficients and is not intra we skip applying // the loop filter on block edges. - if (mi->skip && is_inter_block(mi)) - return; + if (mi->skip && is_inter_block(mi)) return; // Add a mask for the transform size. The transform size mask is set to // be correct for a 64x64 prediction block size. Mask to match the size of // the block we are working on and then shift it into place. - *above_y |= (size_mask[block_size] & - above_64x64_txform_mask[tx_size_y]) << shift_y; - *left_y |= (size_mask[block_size] & - left_64x64_txform_mask[tx_size_y]) << shift_y; + *above_y |= (size_mask[block_size] & above_64x64_txform_mask[tx_size_y]) + << shift_y; + *left_y |= (size_mask[block_size] & left_64x64_txform_mask[tx_size_y]) + << shift_y; if (build_uv) { - *above_uv |= (size_mask_uv[block_size] & - above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; + *above_uv |= + (size_mask_uv[block_size] & above_64x64_txform_mask_uv[tx_size_uv]) + << shift_uv; - *left_uv |= (size_mask_uv[block_size] & - left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv; + *left_uv |= + (size_mask_uv[block_size] & left_64x64_txform_mask_uv[tx_size_uv]) + << shift_uv; } // Try to determine what to do with the internal 4x4 block boundaries. These // differ from the 4x4 boundaries on the outside edge of an 8x8 in that the // internal ones can be skipped and don't depend on the prediction block size. - if (tx_size_y == TX_4X4) - *int_4x4_y |= size_mask[block_size] << shift_y; + if (tx_size_y == TX_4X4) *int_4x4_y |= size_mask[block_size] << shift_y; if (build_uv && tx_size_uv == TX_4X4) *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv; @@ -1733,9 +1613,8 @@ void vp9_loop_filter_data_reset( void vp9_reset_lfm(VP9_COMMON *const cm) { if (cm->lf.filter_level) { - memset(cm->lf.lfm, 0, - ((cm->mi_rows + (MI_BLOCK_SIZE - 1)) >> 3) * cm->lf.lfm_stride * - sizeof(*cm->lf.lfm)); + memset(cm->lf.lfm, 0, ((cm->mi_rows + (MI_BLOCK_SIZE - 1)) >> 3) * + cm->lf.lfm_stride * sizeof(*cm->lf.lfm)); } } diff --git a/libs/libvpx/vp9/common/vp9_loopfilter.h b/libs/libvpx/vp9/common/vp9_loopfilter.h index fca8830fa1..da37a6ebde 100644 --- a/libs/libvpx/vp9/common/vp9_loopfilter.h +++ b/libs/libvpx/vp9/common/vp9_loopfilter.h @@ -26,8 +26,8 @@ extern "C" { #define SIMD_WIDTH 16 -#define MAX_REF_LF_DELTAS 4 -#define MAX_MODE_LF_DELTAS 2 +#define MAX_REF_LF_DELTAS 4 +#define MAX_MODE_LF_DELTAS 2 enum lf_path { LF_PATH_420, @@ -96,25 +96,21 @@ struct VP9LfSyncData; // This function sets up the bit masks for the entire 64x64 region represented // by mi_row, mi_col. -void vp9_setup_mask(struct VP9Common *const cm, - const int mi_row, const int mi_col, - MODE_INFO **mi_8x8, const int mode_info_stride, - LOOP_FILTER_MASK *lfm); +void vp9_setup_mask(struct VP9Common *const cm, const int mi_row, + const int mi_col, MODE_INFO **mi_8x8, + const int mode_info_stride, LOOP_FILTER_MASK *lfm); void vp9_filter_block_plane_ss00(struct VP9Common *const cm, struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm); + int mi_row, LOOP_FILTER_MASK *lfm); void vp9_filter_block_plane_ss11(struct VP9Common *const cm, struct macroblockd_plane *const plane, - int mi_row, - LOOP_FILTER_MASK *lfm); + int mi_row, LOOP_FILTER_MASK *lfm); void vp9_filter_block_plane_non420(struct VP9Common *cm, struct macroblockd_plane *plane, - MODE_INFO **mi_8x8, - int mi_row, int mi_col); + MODE_INFO **mi_8x8, int mi_row, int mi_col); void vp9_loop_filter_init(struct VP9Common *cm); @@ -123,10 +119,8 @@ void vp9_loop_filter_init(struct VP9Common *cm); // calls this function directly. void vp9_loop_filter_frame_init(struct VP9Common *cm, int default_filt_lvl); -void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, - struct VP9Common *cm, - struct macroblockd *mbd, - int filter_level, +void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP9Common *cm, + struct macroblockd *mbd, int filter_level, int y_only, int partial_frame); // Get the superblock lfm for a given mi_row, mi_col. diff --git a/libs/libvpx/vp9/common/vp9_mfqe.c b/libs/libvpx/vp9/common/vp9_mfqe.c index f5264665bd..e76d771b8d 100644 --- a/libs/libvpx/vp9/common/vp9_mfqe.c +++ b/libs/libvpx/vp9/common/vp9_mfqe.c @@ -19,39 +19,36 @@ // TODO(jackychen): Replace this function with SSE2 code. There is // one SSE2 implementation in vp8, so will consider how to share it // between vp8 and vp9. -static void filter_by_weight(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int block_size, int src_weight) { +static void filter_by_weight(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int block_size, int src_weight) { const int dst_weight = (1 << MFQE_PRECISION) - src_weight; const int rounding_bit = 1 << (MFQE_PRECISION - 1); int r, c; for (r = 0; r < block_size; r++) { for (c = 0; c < block_size; c++) { - dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) - >> MFQE_PRECISION; + dst[c] = (src[c] * src_weight + dst[c] * dst_weight + rounding_bit) >> + MFQE_PRECISION; } src += src_stride; dst += dst_stride; } } -void vp9_filter_by_weight8x8_c(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, int src_weight) { +void vp9_filter_by_weight8x8_c(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int src_weight) { filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight); } void vp9_filter_by_weight16x16_c(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int src_weight) { + uint8_t *dst, int dst_stride, int src_weight) { filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight); } static void filter_by_weight32x32(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int weight) { vp9_filter_by_weight16x16(src, src_stride, dst, dst_stride, weight); - vp9_filter_by_weight16x16(src + 16, src_stride, dst + 16, dst_stride, - weight); + vp9_filter_by_weight16x16(src + 16, src_stride, dst + 16, dst_stride, weight); vp9_filter_by_weight16x16(src + src_stride * 16, src_stride, dst + dst_stride * 16, dst_stride, weight); vp9_filter_by_weight16x16(src + src_stride * 16 + 16, src_stride, @@ -61,8 +58,7 @@ static void filter_by_weight32x32(const uint8_t *src, int src_stride, static void filter_by_weight64x64(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int weight) { filter_by_weight32x32(src, src_stride, dst, dst_stride, weight); - filter_by_weight32x32(src + 32, src_stride, dst + 32, - dst_stride, weight); + filter_by_weight32x32(src + 32, src_stride, dst + 32, dst_stride, weight); filter_by_weight32x32(src + src_stride * 32, src_stride, dst + dst_stride * 32, dst_stride, weight); filter_by_weight32x32(src + src_stride * 32 + 32, src_stride, @@ -72,8 +68,7 @@ static void filter_by_weight64x64(const uint8_t *src, int src_stride, static void apply_ifactor(const uint8_t *y, int y_stride, uint8_t *yd, int yd_stride, const uint8_t *u, const uint8_t *v, int uv_stride, uint8_t *ud, uint8_t *vd, - int uvd_stride, BLOCK_SIZE block_size, - int weight) { + int uvd_stride, BLOCK_SIZE block_size, int weight) { if (block_size == BLOCK_16X16) { vp9_filter_by_weight16x16(y, y_stride, yd, yd_stride, weight); vp9_filter_by_weight8x8(u, uv_stride, ud, uvd_stride, weight); @@ -90,8 +85,8 @@ static void apply_ifactor(const uint8_t *y, int y_stride, uint8_t *yd, } // TODO(jackychen): Determine whether replace it with assembly code. -static void copy_mem8x8(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { +static void copy_mem8x8(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride) { int r; for (r = 0; r < 8; r++) { memcpy(dst, src, 8); @@ -100,8 +95,8 @@ static void copy_mem8x8(const uint8_t *src, int src_stride, } } -static void copy_mem16x16(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { +static void copy_mem16x16(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride) { int r; for (r = 0; r < 16; r++) { memcpy(dst, src, 16); @@ -110,22 +105,22 @@ static void copy_mem16x16(const uint8_t *src, int src_stride, } } -static void copy_mem32x32(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { +static void copy_mem32x32(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride) { copy_mem16x16(src, src_stride, dst, dst_stride); copy_mem16x16(src + 16, src_stride, dst + 16, dst_stride); - copy_mem16x16(src + src_stride * 16, src_stride, - dst + dst_stride * 16, dst_stride); + copy_mem16x16(src + src_stride * 16, src_stride, dst + dst_stride * 16, + dst_stride); copy_mem16x16(src + src_stride * 16 + 16, src_stride, dst + dst_stride * 16 + 16, dst_stride); } -static void copy_mem64x64(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride) { +static void copy_mem64x64(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride) { copy_mem32x32(src, src_stride, dst, dst_stride); copy_mem32x32(src + 32, src_stride, dst + 32, dst_stride); - copy_mem32x32(src + src_stride * 32, src_stride, - dst + src_stride * 32, dst_stride); + copy_mem32x32(src + src_stride * 32, src_stride, dst + src_stride * 32, + dst_stride); copy_mem32x32(src + src_stride * 32 + 32, src_stride, dst + src_stride * 32 + 32, dst_stride); } @@ -195,30 +190,27 @@ static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u, uvd_stride, bs, ifactor); } else { // Copy the block from current frame (i.e., no mfqe is done). - copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, - yd_stride, uvd_stride, bs); + copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, yd_stride, uvd_stride, + bs); } } static int mfqe_decision(MODE_INFO *mi, BLOCK_SIZE cur_bs) { // Check the motion in current block(for inter frame), // or check the motion in the correlated block in last frame (for keyframe). - const int mv_len_square = mi->mv[0].as_mv.row * - mi->mv[0].as_mv.row + - mi->mv[0].as_mv.col * - mi->mv[0].as_mv.col; + const int mv_len_square = mi->mv[0].as_mv.row * mi->mv[0].as_mv.row + + mi->mv[0].as_mv.col * mi->mv[0].as_mv.col; const int mv_threshold = 100; return mi->mode >= NEARESTMV && // Not an intra block - cur_bs >= BLOCK_16X16 && - mv_len_square <= mv_threshold; + cur_bs >= BLOCK_16X16 && mv_len_square <= mv_threshold; } // Process each partiton in a super block, recursively. static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, - const uint8_t *y, const uint8_t *u, - const uint8_t *v, int y_stride, int uv_stride, - uint8_t *yd, uint8_t *ud, uint8_t *vd, - int yd_stride, int uvd_stride) { + const uint8_t *y, const uint8_t *u, const uint8_t *v, + int y_stride, int uv_stride, uint8_t *yd, + uint8_t *ud, uint8_t *vd, int yd_stride, + int uvd_stride) { int mi_offset, y_offset, uv_offset; const BLOCK_SIZE cur_bs = mi->sb_type; const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex; @@ -255,12 +247,12 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, } if (mfqe_decision(mi, mfqe_bs)) { // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); + mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, yd, ud, vd, yd_stride, + uvd_stride, qdiff); // Do mfqe on the second square partition. - mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, - y_stride, uv_stride, yd + y_offset, ud + uv_offset, - vd + uv_offset, yd_stride, uvd_stride, qdiff); + mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, y_stride, + uv_stride, yd + y_offset, ud + uv_offset, vd + uv_offset, + yd_stride, uvd_stride, qdiff); } if (mfqe_decision(mi + mi_offset * cm->mi_stride, mfqe_bs)) { // Do mfqe on the first square partition. @@ -271,11 +263,11 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, // Do mfqe on the second square partition. mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset, u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, + v + uv_offset * uv_stride + uv_offset, y_stride, uv_stride, + yd + y_offset * yd_stride + y_offset, ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride, qdiff); + vd + uv_offset * uvd_stride + uv_offset, yd_stride, + uvd_stride, qdiff); } break; case PARTITION_VERT: @@ -288,8 +280,8 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, } if (mfqe_decision(mi, mfqe_bs)) { // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); + mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride, yd, ud, vd, yd_stride, + uvd_stride, qdiff); // Do mfqe on the second square partition. mfqe_block(bs_tmp, y + y_offset * y_stride, u + uv_offset * uv_stride, v + uv_offset * uv_stride, y_stride, uv_stride, @@ -298,28 +290,28 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, } if (mfqe_decision(mi + mi_offset, mfqe_bs)) { // Do mfqe on the first square partition. - mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, - y_stride, uv_stride, yd + y_offset, ud + uv_offset, - vd + uv_offset, yd_stride, uvd_stride, qdiff); + mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset, y_stride, + uv_stride, yd + y_offset, ud + uv_offset, vd + uv_offset, + yd_stride, uvd_stride, qdiff); // Do mfqe on the second square partition. mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset, u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, + v + uv_offset * uv_stride + uv_offset, y_stride, uv_stride, + yd + y_offset * yd_stride + y_offset, ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride, qdiff); + vd + uv_offset * uvd_stride + uv_offset, yd_stride, + uvd_stride, qdiff); } break; case PARTITION_NONE: if (mfqe_decision(mi, cur_bs)) { // Do mfqe on this partition. - mfqe_block(cur_bs, y, u, v, y_stride, uv_stride, - yd, ud, vd, yd_stride, uvd_stride, qdiff); + mfqe_block(cur_bs, y, u, v, y_stride, uv_stride, yd, ud, vd, yd_stride, + uvd_stride, qdiff); } else { // Copy the block from current frame(i.e., no mfqe is done). - copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, - yd_stride, uvd_stride, bs); + copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd, yd_stride, + uvd_stride, bs); } break; case PARTITION_SPLIT: @@ -335,17 +327,16 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs, v + uv_offset * uv_stride, y_stride, uv_stride, yd + y_offset * yd_stride, ud + uv_offset * uvd_stride, vd + uv_offset * uvd_stride, yd_stride, uvd_stride); - mfqe_partition(cm, mi + mi_offset * cm->mi_stride + mi_offset, - subsize, y + y_offset * y_stride + y_offset, + mfqe_partition(cm, mi + mi_offset * cm->mi_stride + mi_offset, subsize, + y + y_offset * y_stride + y_offset, u + uv_offset * uv_stride + uv_offset, - v + uv_offset * uv_stride + uv_offset, y_stride, - uv_stride, yd + y_offset * yd_stride + y_offset, + v + uv_offset * uv_stride + uv_offset, y_stride, uv_stride, + yd + y_offset * yd_stride + y_offset, ud + uv_offset * uvd_stride + uv_offset, - vd + uv_offset * uvd_stride + uv_offset, - yd_stride, uvd_stride); + vd + uv_offset * uvd_stride + uv_offset, yd_stride, + uvd_stride); break; - default: - assert(0); + default: assert(0); } } @@ -361,8 +352,8 @@ void vp9_mfqe(VP9_COMMON *cm) { MODE_INFO *mi; MODE_INFO *mi_local = cm->mi + (mi_row * cm->mi_stride + mi_col); // Motion Info in last frame. - MODE_INFO *mi_prev = cm->postproc_state.prev_mi + - (mi_row * cm->mi_stride + mi_col); + MODE_INFO *mi_prev = + cm->postproc_state.prev_mi + (mi_row * cm->mi_stride + mi_col); const uint32_t y_stride = show->y_stride; const uint32_t uv_stride = show->uv_stride; const uint32_t yd_stride = dest->y_stride; @@ -371,17 +362,15 @@ void vp9_mfqe(VP9_COMMON *cm) { const uint32_t row_offset_uv = mi_row << 2; const uint32_t col_offset_y = mi_col << 3; const uint32_t col_offset_uv = mi_col << 2; - const uint8_t *y = show->y_buffer + row_offset_y * y_stride + - col_offset_y; - const uint8_t *u = show->u_buffer + row_offset_uv * uv_stride + - col_offset_uv; - const uint8_t *v = show->v_buffer + row_offset_uv * uv_stride + - col_offset_uv; + const uint8_t *y = + show->y_buffer + row_offset_y * y_stride + col_offset_y; + const uint8_t *u = + show->u_buffer + row_offset_uv * uv_stride + col_offset_uv; + const uint8_t *v = + show->v_buffer + row_offset_uv * uv_stride + col_offset_uv; uint8_t *yd = dest->y_buffer + row_offset_y * yd_stride + col_offset_y; - uint8_t *ud = dest->u_buffer + row_offset_uv * uvd_stride + - col_offset_uv; - uint8_t *vd = dest->v_buffer + row_offset_uv * uvd_stride + - col_offset_uv; + uint8_t *ud = dest->u_buffer + row_offset_uv * uvd_stride + col_offset_uv; + uint8_t *vd = dest->v_buffer + row_offset_uv * uvd_stride + col_offset_uv; if (frame_is_intra_only(cm)) { mi = mi_prev; } else { diff --git a/libs/libvpx/vp9/common/vp9_mv.h b/libs/libvpx/vp9/common/vp9_mv.h index 5d89da8c25..4c8eac7213 100644 --- a/libs/libvpx/vp9/common/vp9_mv.h +++ b/libs/libvpx/vp9/common/vp9_mv.h @@ -39,11 +39,11 @@ static INLINE int is_zero_mv(const MV *mv) { } static INLINE int is_equal_mv(const MV *a, const MV *b) { - return *((const uint32_t *)a) == *((const uint32_t *)b); + return *((const uint32_t *)a) == *((const uint32_t *)b); } -static INLINE void clamp_mv(MV *mv, int min_col, int max_col, - int min_row, int max_row) { +static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row, + int max_row) { mv->col = clamp(mv->col, min_col, max_col); mv->row = clamp(mv->row, min_row, max_row); } diff --git a/libs/libvpx/vp9/common/vp9_mvref_common.c b/libs/libvpx/vp9/common/vp9_mvref_common.c index 0eb01a51ba..70f77aba1f 100644 --- a/libs/libvpx/vp9/common/vp9_mvref_common.c +++ b/libs/libvpx/vp9/common/vp9_mvref_common.c @@ -15,16 +15,17 @@ // to try and find candidate reference vectors. static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int block, int mi_row, int mi_col, - uint8_t *mode_context) { + int_mv *mv_ref_list, int block, int mi_row, + int mi_col, uint8_t *mode_context) { const int *ref_sign_bias = cm->ref_frame_sign_bias; int i, refmv_count = 0; const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type]; int different_ref_found = 0; int context_counter = 0; - const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ? - cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL; + const MV_REF *const prev_frame_mvs = + cm->use_prev_frame_mvs + ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col + : NULL; const TileInfo *const tile = &xd->tile; // Blank the reference vector list @@ -36,8 +37,8 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, for (i = 0; i < 2; ++i) { const POSITION *const mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * - xd->mi_stride]; + const MODE_INFO *const candidate_mi = + xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; // Keep counts for entropy encoding. context_counter += mode_2_counter[candidate_mi->mode]; different_ref_found = 1; @@ -120,7 +121,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, } } - Done: +Done: mode_context[ref_frame] = counter_to_context[context_counter]; @@ -131,16 +132,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, - int mi_row, int mi_col, + int_mv *mv_ref_list, int mi_row, int mi_col, uint8_t *mode_context) { - find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, - mi_row, mi_col, mode_context); + find_mv_refs_idx(cm, xd, mi, ref_frame, mv_ref_list, -1, mi_row, mi_col, + mode_context); } -void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, - int_mv *mvlist, int_mv *nearest_mv, - int_mv *near_mv) { +void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, int_mv *mvlist, + int_mv *nearest_mv, int_mv *near_mv) { int i; // Make sure all the candidates are properly clamped etc for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) { @@ -151,8 +150,8 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, *near_mv = mvlist[1]; } -void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, - int block, int ref, int mi_row, int mi_col, +void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, int block, + int ref, int mi_row, int mi_col, int_mv *nearest_mv, int_mv *near_mv, uint8_t *mode_context) { int_mv mv_list[MAX_MV_REF_CANDIDATES]; @@ -162,8 +161,8 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, assert(MAX_MV_REF_CANDIDATES == 2); - find_mv_refs_idx(cm, xd, mi, mi->ref_frame[ref], mv_list, block, - mi_row, mi_col, mode_context); + find_mv_refs_idx(cm, xd, mi, mi->ref_frame[ref], mv_list, block, mi_row, + mi_col, mode_context); near_mv->as_int = 0; switch (block) { @@ -195,7 +194,6 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, } break; } - default: - assert(0 && "Invalid block index."); + default: assert(0 && "Invalid block index."); } } diff --git a/libs/libvpx/vp9/common/vp9_mvref_common.h b/libs/libvpx/vp9/common/vp9_mvref_common.h index 4380843e24..2b2c1ba9ee 100644 --- a/libs/libvpx/vp9/common/vp9_mvref_common.h +++ b/libs/libvpx/vp9/common/vp9_mvref_common.h @@ -18,8 +18,8 @@ extern "C" { #endif #define LEFT_TOP_MARGIN ((VP9_ENC_BORDER_IN_PIXELS - VP9_INTERP_EXTEND) << 3) -#define RIGHT_BOTTOM_MARGIN ((VP9_ENC_BORDER_IN_PIXELS -\ - VP9_INTERP_EXTEND) << 3) +#define RIGHT_BOTTOM_MARGIN \ + ((VP9_ENC_BORDER_IN_PIXELS - VP9_INTERP_EXTEND) << 3) #define MVREF_NEIGHBOURS 8 @@ -65,61 +65,149 @@ static const int mode_2_counter[MB_MODE_COUNT] = { // 2. However the actual count can never be greater than 2 so the highest // counter we need is 18. 9 is an invalid counter that's never used. static const int counter_to_context[19] = { - BOTH_PREDICTED, // 0 - NEW_PLUS_NON_INTRA, // 1 - BOTH_NEW, // 2 - ZERO_PLUS_PREDICTED, // 3 - NEW_PLUS_NON_INTRA, // 4 - INVALID_CASE, // 5 - BOTH_ZERO, // 6 - INVALID_CASE, // 7 - INVALID_CASE, // 8 + BOTH_PREDICTED, // 0 + NEW_PLUS_NON_INTRA, // 1 + BOTH_NEW, // 2 + ZERO_PLUS_PREDICTED, // 3 + NEW_PLUS_NON_INTRA, // 4 + INVALID_CASE, // 5 + BOTH_ZERO, // 6 + INVALID_CASE, // 7 + INVALID_CASE, // 8 INTRA_PLUS_NON_INTRA, // 9 INTRA_PLUS_NON_INTRA, // 10 - INVALID_CASE, // 11 + INVALID_CASE, // 11 INTRA_PLUS_NON_INTRA, // 12 - INVALID_CASE, // 13 - INVALID_CASE, // 14 - INVALID_CASE, // 15 - INVALID_CASE, // 16 - INVALID_CASE, // 17 - BOTH_INTRA // 18 + INVALID_CASE, // 13 + INVALID_CASE, // 14 + INVALID_CASE, // 15 + INVALID_CASE, // 16 + INVALID_CASE, // 17 + BOTH_INTRA // 18 }; static const POSITION mv_ref_blocks[BLOCK_SIZES][MVREF_NEIGHBOURS] = { // 4X4 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, + { { -1, 0 }, + { 0, -1 }, + { -1, -1 }, + { -2, 0 }, + { 0, -2 }, + { -2, -1 }, + { -1, -2 }, + { -2, -2 } }, // 4X8 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, + { { -1, 0 }, + { 0, -1 }, + { -1, -1 }, + { -2, 0 }, + { 0, -2 }, + { -2, -1 }, + { -1, -2 }, + { -2, -2 } }, // 8X4 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, + { { -1, 0 }, + { 0, -1 }, + { -1, -1 }, + { -2, 0 }, + { 0, -2 }, + { -2, -1 }, + { -1, -2 }, + { -2, -2 } }, // 8X8 - {{-1, 0}, {0, -1}, {-1, -1}, {-2, 0}, {0, -2}, {-2, -1}, {-1, -2}, {-2, -2}}, + { { -1, 0 }, + { 0, -1 }, + { -1, -1 }, + { -2, 0 }, + { 0, -2 }, + { -2, -1 }, + { -1, -2 }, + { -2, -2 } }, // 8X16 - {{0, -1}, {-1, 0}, {1, -1}, {-1, -1}, {0, -2}, {-2, 0}, {-2, -1}, {-1, -2}}, + { { 0, -1 }, + { -1, 0 }, + { 1, -1 }, + { -1, -1 }, + { 0, -2 }, + { -2, 0 }, + { -2, -1 }, + { -1, -2 } }, // 16X8 - {{-1, 0}, {0, -1}, {-1, 1}, {-1, -1}, {-2, 0}, {0, -2}, {-1, -2}, {-2, -1}}, + { { -1, 0 }, + { 0, -1 }, + { -1, 1 }, + { -1, -1 }, + { -2, 0 }, + { 0, -2 }, + { -1, -2 }, + { -2, -1 } }, // 16X16 - {{-1, 0}, {0, -1}, {-1, 1}, {1, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, + { { -1, 0 }, + { 0, -1 }, + { -1, 1 }, + { 1, -1 }, + { -1, -1 }, + { -3, 0 }, + { 0, -3 }, + { -3, -3 } }, // 16X32 - {{0, -1}, {-1, 0}, {2, -1}, {-1, -1}, {-1, 1}, {0, -3}, {-3, 0}, {-3, -3}}, + { { 0, -1 }, + { -1, 0 }, + { 2, -1 }, + { -1, -1 }, + { -1, 1 }, + { 0, -3 }, + { -3, 0 }, + { -3, -3 } }, // 32X16 - {{-1, 0}, {0, -1}, {-1, 2}, {-1, -1}, {1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, + { { -1, 0 }, + { 0, -1 }, + { -1, 2 }, + { -1, -1 }, + { 1, -1 }, + { -3, 0 }, + { 0, -3 }, + { -3, -3 } }, // 32X32 - {{-1, 1}, {1, -1}, {-1, 2}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-3, -3}}, + { { -1, 1 }, + { 1, -1 }, + { -1, 2 }, + { 2, -1 }, + { -1, -1 }, + { -3, 0 }, + { 0, -3 }, + { -3, -3 } }, // 32X64 - {{0, -1}, {-1, 0}, {4, -1}, {-1, 2}, {-1, -1}, {0, -3}, {-3, 0}, {2, -1}}, + { { 0, -1 }, + { -1, 0 }, + { 4, -1 }, + { -1, 2 }, + { -1, -1 }, + { 0, -3 }, + { -3, 0 }, + { 2, -1 } }, // 64X32 - {{-1, 0}, {0, -1}, {-1, 4}, {2, -1}, {-1, -1}, {-3, 0}, {0, -3}, {-1, 2}}, + { { -1, 0 }, + { 0, -1 }, + { -1, 4 }, + { 2, -1 }, + { -1, -1 }, + { -3, 0 }, + { 0, -3 }, + { -1, 2 } }, // 64X64 - {{-1, 3}, {3, -1}, {-1, 4}, {4, -1}, {-1, -1}, {-1, 0}, {0, -1}, {-1, 6}} + { { -1, 3 }, + { 3, -1 }, + { -1, 4 }, + { 4, -1 }, + { -1, -1 }, + { -1, 0 }, + { 0, -1 }, + { -1, 6 } } }; static const int idx_n_column_to_subblock[4][2] = { - {1, 2}, - {1, 3}, - {3, 2}, - {3, 3} + { 1, 2 }, { 1, 3 }, { 3, 2 }, { 3, 3 } }; // clamp_mv_ref @@ -127,9 +215,8 @@ static const int idx_n_column_to_subblock[4][2] = { static INLINE void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) { clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER, - xd->mb_to_right_edge + MV_BORDER, - xd->mb_to_top_edge - MV_BORDER, - xd->mb_to_bottom_edge + MV_BORDER); + xd->mb_to_right_edge + MV_BORDER, xd->mb_to_top_edge - MV_BORDER, + xd->mb_to_bottom_edge + MV_BORDER); } // This function returns either the appropriate sub block or block's mv @@ -137,12 +224,12 @@ static INLINE void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) { static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv, int search_col, int block_idx) { return block_idx >= 0 && candidate->sb_type < BLOCK_8X8 - ? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]] - .as_mv[which_mv] - : candidate->mv[which_mv]; + ? candidate + ->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]] + .as_mv[which_mv] + : candidate->mv[which_mv]; } - // Performs mv sign inversion if indicated by the reference frame combination. static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref, const MV_REFERENCE_FRAME this_ref_frame, @@ -159,40 +246,37 @@ static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref, // already in the list. If it's the second motion vector it will also // skip all additional processing and jump to Done! #define ADD_MV_REF_LIST(mv, refmv_count, mv_ref_list, Done) \ - do { \ - if (refmv_count) { \ - if ((mv).as_int != (mv_ref_list)[0].as_int) { \ - (mv_ref_list)[(refmv_count)] = (mv); \ - goto Done; \ - } \ - } else { \ - (mv_ref_list)[(refmv_count)++] = (mv); \ - } \ + do { \ + if (refmv_count) { \ + if ((mv).as_int != (mv_ref_list)[0].as_int) { \ + (mv_ref_list)[(refmv_count)] = (mv); \ + goto Done; \ + } \ + } else { \ + (mv_ref_list)[(refmv_count)++] = (mv); \ + } \ } while (0) // If either reference frame is different, not INTRA, and they // are different from each other scale and add the mv to our list. #define IF_DIFF_REF_FRAME_ADD_MV(mbmi, ref_frame, ref_sign_bias, refmv_count, \ - mv_ref_list, Done) \ - do { \ - if (is_inter_block(mbmi)) { \ - if ((mbmi)->ref_frame[0] != ref_frame) \ - ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, Done); \ - if (has_second_ref(mbmi) && \ - (mbmi)->ref_frame[1] != ref_frame && \ - (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \ - ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, Done); \ - } \ + mv_ref_list, Done) \ + do { \ + if (is_inter_block(mbmi)) { \ + if ((mbmi)->ref_frame[0] != ref_frame) \ + ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \ + refmv_count, mv_ref_list, Done); \ + if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \ + (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \ + ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \ + refmv_count, mv_ref_list, Done); \ + } \ } while (0) - // Checks that the given mi_row, mi_col and search point // are inside the borders of the tile. -static INLINE int is_inside(const TileInfo *const tile, - int mi_col, int mi_row, int mi_rows, - const POSITION *mi_pos) { +static INLINE int is_inside(const TileInfo *const tile, int mi_col, int mi_row, + int mi_rows, const POSITION *mi_pos) { return !(mi_row + mi_pos->row < 0 || mi_col + mi_pos->col < tile->mi_col_start || mi_row + mi_pos->row >= mi_rows || @@ -202,18 +286,16 @@ static INLINE int is_inside(const TileInfo *const tile, // TODO(jingning): this mv clamping function should be block size dependent. static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) { clamp_mv(mv, xd->mb_to_left_edge - LEFT_TOP_MARGIN, - xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN, - xd->mb_to_top_edge - LEFT_TOP_MARGIN, - xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN); + xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN, + xd->mb_to_top_edge - LEFT_TOP_MARGIN, + xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN); } static INLINE void lower_mv_precision(MV *mv, int allow_hp) { const int use_hp = allow_hp && use_mv_hp(mv); if (!use_hp) { - if (mv->row & 1) - mv->row += (mv->row > 0 ? -1 : 1); - if (mv->col & 1) - mv->col += (mv->col > 0 ? -1 : 1); + if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1); + if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1); } } @@ -226,11 +308,11 @@ void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, // check a list of motion vectors by sad score using a number rows of pixels // above and a number cols of pixels in the left to select the one with best // score to use as ref motion vector -void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, - int_mv *mvlist, int_mv *nearest_mv, int_mv *near_mv); +void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, int_mv *mvlist, + int_mv *nearest_mv, int_mv *near_mv); -void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, - int block, int ref, int mi_row, int mi_col, +void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, int block, + int ref, int mi_row, int mi_col, int_mv *nearest_mv, int_mv *near_mv, uint8_t *mode_context); diff --git a/libs/libvpx/vp9/common/vp9_onyxc_int.h b/libs/libvpx/vp9/common/vp9_onyxc_int.h index fd674cbc6f..32db7b7aa7 100644 --- a/libs/libvpx/vp9/common/vp9_onyxc_int.h +++ b/libs/libvpx/vp9/common/vp9_onyxc_int.h @@ -55,12 +55,11 @@ extern const struct { PARTITION_CONTEXT left; } partition_context_lookup[BLOCK_SIZES]; - typedef enum { - SINGLE_REFERENCE = 0, - COMPOUND_REFERENCE = 1, + SINGLE_REFERENCE = 0, + COMPOUND_REFERENCE = 1, REFERENCE_MODE_SELECT = 2, - REFERENCE_MODES = 3, + REFERENCE_MODES = 3, } REFERENCE_MODE; typedef struct { @@ -90,9 +89,9 @@ typedef struct { } RefCntBuffer; typedef struct BufferPool { - // Protect BufferPool from being accessed by several FrameWorkers at - // the same time during frame parallel decode. - // TODO(hkuang): Try to use atomic variable instead of locking the whole pool. +// Protect BufferPool from being accessed by several FrameWorkers at +// the same time during frame parallel decode. +// TODO(hkuang): Try to use atomic variable instead of locking the whole pool. #if CONFIG_MULTITHREAD pthread_mutex_t pool_mutex; #endif @@ -110,7 +109,7 @@ typedef struct BufferPool { } BufferPool; typedef struct VP9Common { - struct vpx_internal_error_info error; + struct vpx_internal_error_info error; vpx_color_space_t color_space; vpx_color_range_t color_range; int width; @@ -155,7 +154,7 @@ typedef struct VP9Common { YV12_BUFFER_CONFIG post_proc_buffer_int; #endif - FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/ + FRAME_TYPE last_frame_type; /* last frame's frame type for motion search.*/ FRAME_TYPE frame_type; int show_frame; @@ -229,9 +228,9 @@ typedef struct VP9Common { loop_filter_info_n lf_info; - int refresh_frame_context; /* Two state 0 = NO, 1 = YES */ + int refresh_frame_context; /* Two state 0 = NO, 1 = YES */ - int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ + int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */ struct loopfilter lf; struct segmentation seg; @@ -245,9 +244,9 @@ typedef struct VP9Common { MV_REFERENCE_FRAME comp_var_ref[2]; REFERENCE_MODE reference_mode; - FRAME_CONTEXT *fc; /* this frame entropy */ - FRAME_CONTEXT *frame_contexts; // FRAME_CONTEXTS - unsigned int frame_context_idx; /* Context to use/update */ + FRAME_CONTEXT *fc; /* this frame entropy */ + FRAME_CONTEXT *frame_contexts; // FRAME_CONTEXTS + unsigned int frame_context_idx; /* Context to use/update */ FRAME_COUNTS counts; unsigned int current_video_frame; @@ -258,7 +257,7 @@ typedef struct VP9Common { vpx_bit_depth_t dequant_bit_depth; // bit_depth of current dequantizer #if CONFIG_VP9_POSTPROC - struct postproc_state postproc_state; + struct postproc_state postproc_state; #endif int error_resilient_mode; @@ -290,10 +289,8 @@ void lock_buffer_pool(BufferPool *const pool); void unlock_buffer_pool(BufferPool *const pool); static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP9_COMMON *cm, int index) { - if (index < 0 || index >= REF_FRAMES) - return NULL; - if (cm->ref_frame_map[index] < 0) - return NULL; + if (index < 0 || index >= REF_FRAMES) return NULL; + if (cm->ref_frame_map[index] < 0) return NULL; assert(cm->ref_frame_map[index] < FRAME_BUFFERS); return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf; } @@ -308,8 +305,7 @@ static INLINE int get_free_fb(VP9_COMMON *cm) { lock_buffer_pool(cm->buffer_pool); for (i = 0; i < FRAME_BUFFERS; ++i) - if (frame_bufs[i].ref_count == 0) - break; + if (frame_bufs[i].ref_count == 0) break; if (i != FRAME_BUFFERS) { frame_bufs[i].ref_count = 1; @@ -344,9 +340,9 @@ static INLINE int frame_is_intra_only(const VP9_COMMON *const cm) { static INLINE void set_partition_probs(const VP9_COMMON *const cm, MACROBLOCKD *const xd) { xd->partition_probs = - frame_is_intra_only(cm) ? - &vp9_kf_partition_probs[0] : - (const vpx_prob (*)[PARTITION_TYPES - 1])cm->fc->partition_prob; + frame_is_intra_only(cm) + ? &vp9_kf_partition_probs[0] + : (const vpx_prob(*)[PARTITION_TYPES - 1])cm->fc->partition_prob; } static INLINE void vp9_init_macroblockd(VP9_COMMON *cm, MACROBLOCKD *xd, @@ -355,7 +351,8 @@ static INLINE void vp9_init_macroblockd(VP9_COMMON *cm, MACROBLOCKD *xd, for (i = 0; i < MAX_MB_PLANE; ++i) { xd->plane[i].dqcoeff = dqcoeff; - xd->above_context[i] = cm->above_context + + xd->above_context[i] = + cm->above_context + i * sizeof(*cm->above_context) * 2 * mi_cols_aligned_to_sb(cm->mi_cols); if (get_plane_type(i) == PLANE_TYPE_Y) { @@ -373,7 +370,7 @@ static INLINE void vp9_init_macroblockd(VP9_COMMON *cm, MACROBLOCKD *xd, set_partition_probs(cm, xd); } -static INLINE const vpx_prob* get_partition_probs(const MACROBLOCKD *xd, +static INLINE const vpx_prob *get_partition_probs(const MACROBLOCKD *xd, int ctx) { return xd->partition_probs[ctx]; } @@ -395,34 +392,20 @@ static INLINE int calc_mi_size(int len) { } static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, - int mi_row, int bh, - int mi_col, int bw, + int mi_row, int bh, int mi_col, int bw, int mi_rows, int mi_cols) { - xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8); + xd->mb_to_top_edge = -((mi_row * MI_SIZE) * 8); xd->mb_to_bottom_edge = ((mi_rows - bh - mi_row) * MI_SIZE) * 8; - xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8); - xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8; + xd->mb_to_left_edge = -((mi_col * MI_SIZE) * 8); + xd->mb_to_right_edge = ((mi_cols - bw - mi_col) * MI_SIZE) * 8; // Are edges available for intra prediction? - xd->up_available = (mi_row != 0); - xd->left_available = (mi_col > tile->mi_col_start); - // TODO(slavarnway): eliminate up/left available ??? - if (xd->up_available) { - xd->above_mi = xd->mi[-xd->mi_stride]; - } else { - xd->above_mi = NULL; - } - - if (xd->left_available) { - xd->left_mi = xd->mi[-1]; - } else { - xd->left_mi = NULL; - } + xd->above_mi = (mi_row != 0) ? xd->mi[-xd->mi_stride] : NULL; + xd->left_mi = (mi_col > tile->mi_col_start) ? xd->mi[-1] : NULL; } -static INLINE void update_partition_context(MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE subsize, +static INLINE void update_partition_context(MACROBLOCKD *xd, int mi_row, + int mi_col, BLOCK_SIZE subsize, BLOCK_SIZE bsize) { PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col; PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK); @@ -437,13 +420,12 @@ static INLINE void update_partition_context(MACROBLOCKD *xd, memset(left_ctx, partition_context_lookup[subsize].left, bs); } -static INLINE int partition_plane_context(const MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE bsize) { +static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row, + int mi_col, BLOCK_SIZE bsize) { const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col; const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK); const int bsl = mi_width_log2_lookup[bsize]; - int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1; + int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1; assert(b_width_log2_lookup[bsize] == b_height_log2_lookup[bsize]); assert(bsl >= 0); diff --git a/libs/libvpx/vp9/common/vp9_postproc.c b/libs/libvpx/vp9/common/vp9_postproc.c index b685d813b7..b105e5d45a 100644 --- a/libs/libvpx/vp9/common/vp9_postproc.c +++ b/libs/libvpx/vp9/common/vp9_postproc.c @@ -12,11 +12,13 @@ #include #include +#include "./vpx_dsp_rtcd.h" #include "./vpx_config.h" #include "./vpx_scale_rtcd.h" #include "./vp9_rtcd.h" #include "vpx_dsp/vpx_dsp_common.h" +#include "vpx_dsp/postproc.h" #include "vpx_ports/mem.h" #include "vpx_ports/system_state.h" #include "vpx_scale/vpx_scale.h" @@ -24,144 +26,21 @@ #include "vp9/common/vp9_onyxc_int.h" #include "vp9/common/vp9_postproc.h" -#include "vp9/common/vp9_textblit.h" #if CONFIG_VP9_POSTPROC -static const int16_t kernel5[] = { - 1, 1, 4, 1, 1 -}; - -const int16_t vp9_rv[] = { - 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, - 0, 3, 9, 0, 0, 0, 8, 3, 14, 4, - 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, - 8, 6, 10, 0, 0, 8, 9, 0, 3, 14, - 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, - 1, 2, 3, 14, 13, 1, 8, 2, 9, 7, - 3, 3, 1, 13, 13, 6, 6, 5, 2, 7, - 11, 9, 11, 8, 7, 3, 2, 0, 13, 13, - 14, 4, 12, 5, 12, 10, 8, 10, 13, 10, - 4, 14, 4, 10, 0, 8, 11, 1, 13, 7, - 7, 14, 6, 14, 13, 2, 13, 5, 4, 4, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, - 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, - 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, - 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, - 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, - 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, - 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, - 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, - 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, - 11, 12, 12, 8, 0, 11, 13, 1, 2, 0, - 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, - 0, 3, 10, 5, 8, 0, 11, 6, 7, 8, - 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, - 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, - 3, 8, 3, 7, 8, 5, 11, 4, 12, 3, - 11, 9, 14, 8, 14, 13, 4, 3, 1, 2, - 14, 6, 5, 4, 4, 11, 4, 6, 2, 1, - 5, 8, 8, 12, 13, 5, 14, 10, 12, 13, - 0, 9, 5, 5, 11, 10, 13, 9, 10, 13, -}; static const uint8_t q_diff_thresh = 20; static const uint8_t last_q_thresh = 170; - -void vp9_post_proc_down_and_across_c(const uint8_t *src_ptr, - uint8_t *dst_ptr, - int src_pixels_per_line, - int dst_pixels_per_line, - int rows, - int cols, - int flimit) { - uint8_t const *p_src; - uint8_t *p_dst; - int row, col, i, v, kernel; - int pitch = src_pixels_per_line; - uint8_t d[8]; - (void)dst_pixels_per_line; - - for (row = 0; row < rows; row++) { - /* post_proc_down for one row */ - p_src = src_ptr; - p_dst = dst_ptr; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i * pitch]) > flimit) - goto down_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i * pitch]; - } - - v = (kernel >> 3); - down_skip_convolve: - p_dst[col] = v; - } - - /* now post_proc_across */ - p_src = dst_ptr; - p_dst = dst_ptr; - - for (i = 0; i < 8; i++) - d[i] = p_src[i]; - - for (col = 0; col < cols; col++) { - kernel = 4; - v = p_src[col]; - - d[col & 7] = v; - - for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i]) > flimit) - goto across_skip_convolve; - - kernel += kernel5[2 + i] * p_src[col + i]; - } - - d[col & 7] = (kernel >> 3); - across_skip_convolve: - - if (col >= 2) - p_dst[col - 2] = d[(col - 2) & 7]; - } - - /* handle the last two pixels */ - p_dst[col - 2] = d[(col - 2) & 7]; - p_dst[col - 1] = d[(col - 1) & 7]; - - - /* next row */ - src_ptr += pitch; - dst_ptr += pitch; - } -} +extern const int16_t vpx_rv[]; #if CONFIG_VP9_HIGHBITDEPTH +static const int16_t kernel5[] = { 1, 1, 4, 1, 1 }; + void vp9_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, uint16_t *dst_ptr, int src_pixels_per_line, - int dst_pixels_per_line, - int rows, - int cols, - int flimit) { + int dst_pixels_per_line, int rows, + int cols, int flimit) { uint16_t const *p_src; uint16_t *p_dst; int row, col, i, v, kernel; @@ -178,8 +57,7 @@ void vp9_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, v = p_src[col]; for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i * pitch]) > flimit) - goto down_skip_convolve; + if (abs(v - p_src[col + i * pitch]) > flimit) goto down_skip_convolve; kernel += kernel5[2 + i] * p_src[col + i * pitch]; } @@ -194,8 +72,7 @@ void vp9_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, p_src = dst_ptr; p_dst = dst_ptr; - for (i = 0; i < 8; i++) - d[i] = p_src[i]; + for (i = 0; i < 8; i++) d[i] = p_src[i]; for (col = 0; col < cols; col++) { kernel = 4; @@ -204,8 +81,7 @@ void vp9_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, d[col & 7] = v; for (i = -2; i <= 2; i++) { - if (abs(v - p_src[col + i]) > flimit) - goto across_skip_convolve; + if (abs(v - p_src[col + i]) > flimit) goto across_skip_convolve; kernel += kernel5[2 + i] * p_src[col + i]; } @@ -213,15 +89,13 @@ void vp9_highbd_post_proc_down_and_across_c(const uint16_t *src_ptr, d[col & 7] = (kernel >> 3); across_skip_convolve: - if (col >= 2) - p_dst[col - 2] = d[(col - 2) & 7]; + if (col >= 2) p_dst[col - 2] = d[(col - 2) & 7]; } /* handle the last two pixels */ p_dst[col - 2] = d[(col - 2) & 7]; p_dst[col - 1] = d[(col - 1) & 7]; - /* next row */ src_ptr += pitch; dst_ptr += dst_pixels_per_line; @@ -236,11 +110,13 @@ static int q2mbl(int x) { return x * x / 3; } -void vp9_mbpost_proc_across_ip_c(uint8_t *src, int pitch, - int rows, int cols, int flimit) { +#if CONFIG_VP9_HIGHBITDEPTH +void vp9_highbd_mbpost_proc_across_ip_c(uint16_t *src, int pitch, int rows, + int cols, int flimit) { int r, c, i; - uint8_t *s = src; - uint8_t d[16]; + + uint16_t *s = src; + uint16_t d[16]; for (r = 0; r < rows; r++) { int sumsq = 0; @@ -267,87 +143,17 @@ void vp9_mbpost_proc_across_ip_c(uint8_t *src, int pitch, s[c - 8] = d[(c - 8) & 15]; } - s += pitch; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_mbpost_proc_across_ip_c(uint16_t *src, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - - uint16_t *s = src; - uint16_t d[16]; - - - for (r = 0; r < rows; r++) { - int sumsq = 0; - int sum = 0; - - for (i = -8; i <= 6; i++) { - sumsq += s[i] * s[i]; - sum += s[i]; - d[i + 8] = 0; - } - - for (c = 0; c < cols + 8; c++) { - int x = s[c + 7] - s[c - 8]; - int y = s[c + 7] + s[c - 8]; - - sum += x; - sumsq += x * y; - - d[c & 15] = s[c]; - - if (sumsq * 15 - sum * sum < flimit) { - d[c & 15] = (8 + sum + s[c]) >> 4; - } - - s[c - 8] = d[(c - 8) & 15]; - } s += pitch; } } #endif // CONFIG_VP9_HIGHBITDEPTH -void vp9_mbpost_proc_down_c(uint8_t *dst, int pitch, - int rows, int cols, int flimit) { - int r, c, i; - const short *rv3 = &vp9_rv[63 & rand()]; // NOLINT - - for (c = 0; c < cols; c++) { - uint8_t *s = &dst[c]; - int sumsq = 0; - int sum = 0; - uint8_t d[16]; - const int16_t *rv2 = rv3 + ((c * 17) & 127); - - for (i = -8; i <= 6; i++) { - sumsq += s[i * pitch] * s[i * pitch]; - sum += s[i * pitch]; - } - - for (r = 0; r < rows + 8; r++) { - sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; - sum += s[7 * pitch] - s[-8 * pitch]; - d[r & 15] = s[0]; - - if (sumsq * 15 - sum * sum < flimit) { - d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4; - } - - s[-8 * pitch] = d[(r - 8) & 15]; - s += pitch; - } - } -} - #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_mbpost_proc_down_c(uint16_t *dst, int pitch, - int rows, int cols, int flimit) { +void vp9_highbd_mbpost_proc_down_c(uint16_t *dst, int pitch, int rows, int cols, + int flimit) { int r, c, i; - const int16_t *rv3 = &vp9_rv[63 & rand()]; // NOLINT + const int16_t *rv3 = &vpx_rv[63 & rand()]; // NOLINT for (c = 0; c < cols; c++) { uint16_t *s = &dst[c]; @@ -362,7 +168,7 @@ void vp9_highbd_mbpost_proc_down_c(uint16_t *dst, int pitch, } for (r = 0; r < rows + 8; r++) { - sumsq += s[7 * pitch] * s[ 7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; + sumsq += s[7 * pitch] * s[7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; sum += s[7 * pitch] - s[-8 * pitch]; d[r & 15] = s[0]; @@ -377,240 +183,102 @@ void vp9_highbd_mbpost_proc_down_c(uint16_t *dst, int pitch, } #endif // CONFIG_VP9_HIGHBITDEPTH -static void deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source, - YV12_BUFFER_CONFIG *post, - int q, - int low_var_thresh, - int flag) { - double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; - int ppl = (int)(level + .5); - (void) low_var_thresh; - (void) flag; - +static void deblock_and_de_macro_block(YV12_BUFFER_CONFIG *source, + YV12_BUFFER_CONFIG *post, int q, + int low_var_thresh, int flag, + uint8_t *limits) { + (void)low_var_thresh; + (void)flag; #if CONFIG_VP9_HIGHBITDEPTH if (source->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->y_buffer), - CONVERT_TO_SHORTPTR(post->y_buffer), - source->y_stride, post->y_stride, - source->y_height, source->y_width, - ppl); + double level = 6.0e-05 * q * q * q - .0067 * q * q + .306 * q + .0065; + int ppl = (int)(level + .5); + vp9_highbd_post_proc_down_and_across( + CONVERT_TO_SHORTPTR(source->y_buffer), + CONVERT_TO_SHORTPTR(post->y_buffer), source->y_stride, post->y_stride, + source->y_height, source->y_width, ppl); vp9_highbd_mbpost_proc_across_ip(CONVERT_TO_SHORTPTR(post->y_buffer), post->y_stride, post->y_height, post->y_width, q2mbl(q)); vp9_highbd_mbpost_proc_down(CONVERT_TO_SHORTPTR(post->y_buffer), - post->y_stride, post->y_height, - post->y_width, q2mbl(q)); + post->y_stride, post->y_height, post->y_width, + q2mbl(q)); - vp9_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->u_buffer), - CONVERT_TO_SHORTPTR(post->u_buffer), - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, - ppl); - vp9_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(source->v_buffer), - CONVERT_TO_SHORTPTR(post->v_buffer), - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, - ppl); + vp9_highbd_post_proc_down_and_across( + CONVERT_TO_SHORTPTR(source->u_buffer), + CONVERT_TO_SHORTPTR(post->u_buffer), source->uv_stride, post->uv_stride, + source->uv_height, source->uv_width, ppl); + vp9_highbd_post_proc_down_and_across( + CONVERT_TO_SHORTPTR(source->v_buffer), + CONVERT_TO_SHORTPTR(post->v_buffer), source->uv_stride, post->uv_stride, + source->uv_height, source->uv_width, ppl); } else { - vp9_post_proc_down_and_across(source->y_buffer, post->y_buffer, - source->y_stride, post->y_stride, - source->y_height, source->y_width, ppl); - - vp9_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, +#endif // CONFIG_VP9_HIGHBITDEPTH + vp9_deblock(source, post, q, limits); + vpx_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q)); - - vp9_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, + vpx_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, post->y_width, q2mbl(q)); - - vp9_post_proc_down_and_across(source->u_buffer, post->u_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); - vp9_post_proc_down_and_across(source->v_buffer, post->v_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); +#if CONFIG_VP9_HIGHBITDEPTH } -#else - vp9_post_proc_down_and_across(source->y_buffer, post->y_buffer, - source->y_stride, post->y_stride, - source->y_height, source->y_width, ppl); - - vp9_mbpost_proc_across_ip(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp9_mbpost_proc_down(post->y_buffer, post->y_stride, post->y_height, - post->y_width, q2mbl(q)); - - vp9_post_proc_down_and_across(source->u_buffer, post->u_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); - vp9_post_proc_down_and_across(source->v_buffer, post->v_buffer, - source->uv_stride, post->uv_stride, - source->uv_height, source->uv_width, ppl); #endif // CONFIG_VP9_HIGHBITDEPTH } -void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, - int q) { - const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q - + 0.0065 + 0.5); - int i; - - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width}; - const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height}; - - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - - for (i = 0; i < MAX_MB_PLANE; ++i) { +void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q, + uint8_t *limits) { + const int ppl = + (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q + 0.0065 + 0.5); #if CONFIG_VP9_HIGHBITDEPTH - assert((src->flags & YV12_FLAG_HIGHBITDEPTH) == - (dst->flags & YV12_FLAG_HIGHBITDEPTH)); - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_highbd_post_proc_down_and_across(CONVERT_TO_SHORTPTR(srcs[i]), - CONVERT_TO_SHORTPTR(dsts[i]), - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); - } else { - vp9_post_proc_down_and_across(srcs[i], dsts[i], - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); + if (src->flags & YV12_FLAG_HIGHBITDEPTH) { + int i; + const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer, + src->v_buffer }; + const int src_strides[3] = { src->y_stride, src->uv_stride, + src->uv_stride }; + const int src_widths[3] = { src->y_width, src->uv_width, src->uv_width }; + const int src_heights[3] = { src->y_height, src->uv_height, + src->uv_height }; + + uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer }; + const int dst_strides[3] = { dst->y_stride, dst->uv_stride, + dst->uv_stride }; + for (i = 0; i < MAX_MB_PLANE; ++i) { + vp9_highbd_post_proc_down_and_across( + CONVERT_TO_SHORTPTR(srcs[i]), CONVERT_TO_SHORTPTR(dsts[i]), + src_strides[i], dst_strides[i], src_heights[i], src_widths[i], ppl); } -#else - vp9_post_proc_down_and_across(srcs[i], dsts[i], - src_strides[i], dst_strides[i], - src_heights[i], src_widths[i], ppl); + } else { #endif // CONFIG_VP9_HIGHBITDEPTH - } -} + int mbr; + const int mb_rows = src->y_height / 16; + const int mb_cols = src->y_width / 16; -void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, - int q) { - const int ppl = (int)(6.0e-05 * q * q * q - 0.0067 * q * q + 0.306 * q - + 0.0065 + 0.5); - int i; - - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_width, src->uv_width, src->uv_width}; - const int src_heights[3] = {src->y_height, src->uv_height, src->uv_height}; - - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - - for (i = 0; i < MAX_MB_PLANE; ++i) { - const int src_stride = src_strides[i]; - const int src_width = src_widths[i] - 4; - const int src_height = src_heights[i] - 4; - const int dst_stride = dst_strides[i]; + memset(limits, (unsigned char)ppl, 16 * mb_cols); + for (mbr = 0; mbr < mb_rows; mbr++) { + vpx_post_proc_down_and_across_mb_row( + src->y_buffer + 16 * mbr * src->y_stride, + dst->y_buffer + 16 * mbr * dst->y_stride, src->y_stride, + dst->y_stride, src->y_width, limits, 16); + vpx_post_proc_down_and_across_mb_row( + src->u_buffer + 8 * mbr * src->uv_stride, + dst->u_buffer + 8 * mbr * dst->uv_stride, src->uv_stride, + dst->uv_stride, src->uv_width, limits, 8); + vpx_post_proc_down_and_across_mb_row( + src->v_buffer + 8 * mbr * src->uv_stride, + dst->v_buffer + 8 * mbr * dst->uv_stride, src->uv_stride, + dst->uv_stride, src->uv_width, limits, 8); + } #if CONFIG_VP9_HIGHBITDEPTH - assert((src->flags & YV12_FLAG_HIGHBITDEPTH) == - (dst->flags & YV12_FLAG_HIGHBITDEPTH)); - if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - const uint16_t *const src_plane = CONVERT_TO_SHORTPTR( - srcs[i] + 2 * src_stride + 2); - uint16_t *const dst_plane = CONVERT_TO_SHORTPTR( - dsts[i] + 2 * dst_stride + 2); - vp9_highbd_post_proc_down_and_across(src_plane, dst_plane, src_stride, - dst_stride, src_height, src_width, - ppl); - } else { - const uint8_t *const src_plane = srcs[i] + 2 * src_stride + 2; - uint8_t *const dst_plane = dsts[i] + 2 * dst_stride + 2; - - vp9_post_proc_down_and_across(src_plane, dst_plane, src_stride, - dst_stride, src_height, src_width, ppl); - } -#else - const uint8_t *const src_plane = srcs[i] + 2 * src_stride + 2; - uint8_t *const dst_plane = dsts[i] + 2 * dst_stride + 2; - vp9_post_proc_down_and_across(src_plane, dst_plane, src_stride, dst_stride, - src_height, src_width, ppl); -#endif } +#endif // CONFIG_VP9_HIGHBITDEPTH } -static double gaussian(double sigma, double mu, double x) { - return 1 / (sigma * sqrt(2.0 * 3.14159265)) * - (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))); -} - -static void fillrd(struct postproc_state *state, int q, int a) { - char char_dist[300]; - - double sigma; - int ai = a, qi = q, i; - - vpx_clear_system_state(); - - sigma = ai + .5 + .6 * (63 - qi) / 63.0; - - /* set up a lookup table of 256 entries that matches - * a gaussian distribution with sigma determined by q. - */ - { - int next, j; - - next = 0; - - for (i = -32; i < 32; i++) { - int a_i = (int)(0.5 + 256 * gaussian(sigma, 0, i)); - - if (a_i) { - for (j = 0; j < a_i; j++) { - char_dist[next + j] = (char) i; - } - - next = next + j; - } - } - - for (; next < 256; next++) - char_dist[next] = 0; - } - - for (i = 0; i < 3072; i++) { - state->noise[i] = char_dist[rand() & 0xff]; // NOLINT - } - - for (i = 0; i < 16; i++) { - state->blackclamp[i] = -char_dist[0]; - state->whiteclamp[i] = -char_dist[0]; - state->bothclamp[i] = -2 * char_dist[0]; - } - - state->last_q = q; - state->last_noise = a; -} - -void vp9_plane_add_noise_c(uint8_t *start, char *noise, - char blackclamp[16], - char whiteclamp[16], - char bothclamp[16], - unsigned int width, unsigned int height, int pitch) { - unsigned int i, j; - - // TODO(jbb): why does simd code use both but c doesn't, normalize and - // fix.. - (void) bothclamp; - for (i = 0; i < height; i++) { - uint8_t *pos = start + i * pitch; - char *ref = (char *)(noise + (rand() & 0xff)); // NOLINT - - for (j = 0; j < width; j++) { - if (pos[j] < blackclamp[0]) - pos[j] = blackclamp[0]; - - if (pos[j] > 255 + whiteclamp[0]) - pos[j] = 255 + whiteclamp[0]; - - pos[j] += ref[j]; - } - } +void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q, + uint8_t *limits) { + vp9_deblock(src, dst, q, limits); } static void swap_mi_and_prev_mi(VP9_COMMON *cm) { @@ -624,15 +292,14 @@ static void swap_mi_and_prev_mi(VP9_COMMON *cm) { cm->postproc_state.prev_mi = cm->postproc_state.prev_mip + cm->mi_stride + 1; } -int vp9_post_proc_frame(struct VP9Common *cm, - YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *ppflags) { +int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest, + vp9_ppflags_t *ppflags) { const int q = VPXMIN(105, cm->lf.filter_level * 2); const int flags = ppflags->post_proc_flag; YV12_BUFFER_CONFIG *const ppbuf = &cm->post_proc_buffer; struct postproc_state *const ppstate = &cm->postproc_state; - if (!cm->frame_to_show) - return -1; + if (!cm->frame_to_show) return -1; if (!flags) { *dest = *cm->frame_to_show; @@ -643,15 +310,16 @@ int vp9_post_proc_frame(struct VP9Common *cm, // Alloc memory for prev_mip in the first frame. if (cm->current_video_frame == 1) { - cm->postproc_state.last_base_qindex = cm->base_qindex; - cm->postproc_state.last_frame_valid = 1; + ppstate->last_base_qindex = cm->base_qindex; + ppstate->last_frame_valid = 1; + } + + if ((flags & VP9D_MFQE) && ppstate->prev_mip == NULL) { ppstate->prev_mip = vpx_calloc(cm->mi_alloc_size, sizeof(*cm->mip)); if (!ppstate->prev_mip) { return 1; } ppstate->prev_mi = ppstate->prev_mip + cm->mi_stride + 1; - memset(ppstate->prev_mip, 0, - cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip)); } // Allocate post_proc_buffer_int if needed. @@ -688,10 +356,25 @@ int vp9_post_proc_frame(struct VP9Common *cm, vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate post-processing buffer"); + if (flags & (VP9D_DEMACROBLOCK | VP9D_DEBLOCK)) { + if (!cm->postproc_state.limits) { + cm->postproc_state.limits = + vpx_calloc(cm->width, sizeof(*cm->postproc_state.limits)); + } + } + + if (flags & VP9D_ADDNOISE) { + if (!cm->postproc_state.generated_noise) { + cm->postproc_state.generated_noise = vpx_calloc( + cm->width + 256, sizeof(*cm->postproc_state.generated_noise)); + if (!cm->postproc_state.generated_noise) return 1; + } + } + if ((flags & VP9D_MFQE) && cm->current_video_frame >= 2 && - cm->postproc_state.last_frame_valid && cm->bit_depth == 8 && - cm->postproc_state.last_base_qindex <= last_q_thresh && - cm->base_qindex - cm->postproc_state.last_base_qindex >= q_diff_thresh) { + ppstate->last_frame_valid && cm->bit_depth == 8 && + ppstate->last_base_qindex <= last_q_thresh && + cm->base_qindex - ppstate->last_base_qindex >= q_diff_thresh) { vp9_mfqe(cm); // TODO(jackychen): Consider whether enable deblocking by default // if mfqe is enabled. Need to take both the quality and the speed @@ -701,35 +384,40 @@ int vp9_post_proc_frame(struct VP9Common *cm, } if ((flags & VP9D_DEMACROBLOCK) && cm->post_proc_buffer_int.buffer_alloc) { deblock_and_de_macro_block(&cm->post_proc_buffer_int, ppbuf, - q + (ppflags->deblocking_level - 5) * 10, - 1, 0); + q + (ppflags->deblocking_level - 5) * 10, 1, 0, + cm->postproc_state.limits); } else if (flags & VP9D_DEBLOCK) { - vp9_deblock(&cm->post_proc_buffer_int, ppbuf, q); + vp9_deblock(&cm->post_proc_buffer_int, ppbuf, q, + cm->postproc_state.limits); } else { vp8_yv12_copy_frame(&cm->post_proc_buffer_int, ppbuf); } } else if (flags & VP9D_DEMACROBLOCK) { deblock_and_de_macro_block(cm->frame_to_show, ppbuf, - q + (ppflags->deblocking_level - 5) * 10, 1, 0); + q + (ppflags->deblocking_level - 5) * 10, 1, 0, + cm->postproc_state.limits); } else if (flags & VP9D_DEBLOCK) { - vp9_deblock(cm->frame_to_show, ppbuf, q); + vp9_deblock(cm->frame_to_show, ppbuf, q, cm->postproc_state.limits); } else { vp8_yv12_copy_frame(cm->frame_to_show, ppbuf); } - cm->postproc_state.last_base_qindex = cm->base_qindex; - cm->postproc_state.last_frame_valid = 1; - + ppstate->last_base_qindex = cm->base_qindex; + ppstate->last_frame_valid = 1; if (flags & VP9D_ADDNOISE) { const int noise_level = ppflags->noise_level; - if (ppstate->last_q != q || - ppstate->last_noise != noise_level) { - fillrd(ppstate, 63 - q, noise_level); + if (ppstate->last_q != q || ppstate->last_noise != noise_level) { + double sigma; + vpx_clear_system_state(); + sigma = noise_level + .5 + .6 * q / 63.0; + ppstate->clamp = + vpx_setup_noise(sigma, ppstate->generated_noise, cm->width + 256); + ppstate->last_q = q; + ppstate->last_noise = noise_level; } - - vp9_plane_add_noise(ppbuf->y_buffer, ppstate->noise, ppstate->blackclamp, - ppstate->whiteclamp, ppstate->bothclamp, - ppbuf->y_width, ppbuf->y_height, ppbuf->y_stride); + vpx_plane_add_noise(ppbuf->y_buffer, ppstate->generated_noise, + ppstate->clamp, ppstate->clamp, ppbuf->y_width, + ppbuf->y_height, ppbuf->y_stride); } *dest = *ppbuf; @@ -740,7 +428,7 @@ int vp9_post_proc_frame(struct VP9Common *cm, dest->uv_width = dest->y_width >> cm->subsampling_x; dest->uv_height = dest->y_height >> cm->subsampling_y; - swap_mi_and_prev_mi(cm); + if (flags & VP9D_MFQE) swap_mi_and_prev_mi(cm); return 0; } #endif // CONFIG_VP9_POSTPROC diff --git a/libs/libvpx/vp9/common/vp9_postproc.h b/libs/libvpx/vp9/common/vp9_postproc.h index 035c9cdf84..6059094114 100644 --- a/libs/libvpx/vp9/common/vp9_postproc.h +++ b/libs/libvpx/vp9/common/vp9_postproc.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_COMMON_VP9_POSTPROC_H_ #define VP9_COMMON_VP9_POSTPROC_H_ @@ -25,26 +24,27 @@ extern "C" { struct postproc_state { int last_q; int last_noise; - char noise[3072]; int last_base_qindex; int last_frame_valid; MODE_INFO *prev_mip; MODE_INFO *prev_mi; - DECLARE_ALIGNED(16, char, blackclamp[16]); - DECLARE_ALIGNED(16, char, whiteclamp[16]); - DECLARE_ALIGNED(16, char, bothclamp[16]); + int clamp; + uint8_t *limits; + int8_t *generated_noise; }; struct VP9Common; #define MFQE_PRECISION 4 -int vp9_post_proc_frame(struct VP9Common *cm, - YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *flags); +int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest, + vp9_ppflags_t *flags); -void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); +void vp9_denoise(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q, + uint8_t *limits); -void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q); +void vp9_deblock(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, int q, + uint8_t *limits); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/common/vp9_ppflags.h b/libs/libvpx/vp9/common/vp9_ppflags.h index 12b989f43a..6dcfa412be 100644 --- a/libs/libvpx/vp9/common/vp9_ppflags.h +++ b/libs/libvpx/vp9/common/vp9_ppflags.h @@ -16,18 +16,18 @@ extern "C" { #endif enum { - VP9D_NOFILTERING = 0, - VP9D_DEBLOCK = 1 << 0, - VP9D_DEMACROBLOCK = 1 << 1, - VP9D_ADDNOISE = 1 << 2, - VP9D_DEBUG_TXT_FRAME_INFO = 1 << 3, - VP9D_DEBUG_TXT_MBLK_MODES = 1 << 4, - VP9D_DEBUG_TXT_DC_DIFF = 1 << 5, - VP9D_DEBUG_TXT_RATE_INFO = 1 << 6, - VP9D_DEBUG_DRAW_MV = 1 << 7, - VP9D_DEBUG_CLR_BLK_MODES = 1 << 8, + VP9D_NOFILTERING = 0, + VP9D_DEBLOCK = 1 << 0, + VP9D_DEMACROBLOCK = 1 << 1, + VP9D_ADDNOISE = 1 << 2, + VP9D_DEBUG_TXT_FRAME_INFO = 1 << 3, + VP9D_DEBUG_TXT_MBLK_MODES = 1 << 4, + VP9D_DEBUG_TXT_DC_DIFF = 1 << 5, + VP9D_DEBUG_TXT_RATE_INFO = 1 << 6, + VP9D_DEBUG_DRAW_MV = 1 << 7, + VP9D_DEBUG_CLR_BLK_MODES = 1 << 8, VP9D_DEBUG_CLR_FRM_REF_BLKS = 1 << 9, - VP9D_MFQE = 1 << 10 + VP9D_MFQE = 1 << 10 }; typedef struct { diff --git a/libs/libvpx/vp9/common/vp9_pred_common.c b/libs/libvpx/vp9/common/vp9_pred_common.c index c201890a8b..a7ddc0b951 100644 --- a/libs/libvpx/vp9/common/vp9_pred_common.c +++ b/libs/libvpx/vp9/common/vp9_pred_common.c @@ -13,61 +13,13 @@ #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_seg_common.h" -// Returns a context number for the given MB prediction signal -int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) { - // Note: - // The mode info data structure has a one element border above and to the - // left of the entries corresponding to real macroblocks. - // The prediction flags in these dummy entries are initialized to 0. - const MODE_INFO *const left_mi = xd->left_mi; - const int left_type = xd->left_available && is_inter_block(left_mi) ? - left_mi->interp_filter : SWITCHABLE_FILTERS; - const MODE_INFO *const above_mi = xd->above_mi; - const int above_type = xd->up_available && is_inter_block(above_mi) ? - above_mi->interp_filter : SWITCHABLE_FILTERS; - - if (left_type == above_type) - return left_type; - else if (left_type == SWITCHABLE_FILTERS && above_type != SWITCHABLE_FILTERS) - return above_type; - else if (left_type != SWITCHABLE_FILTERS && above_type == SWITCHABLE_FILTERS) - return left_type; - else - return SWITCHABLE_FILTERS; -} - -// The mode info data structure has a one element border above and to the -// left of the entries corresponding to real macroblocks. -// The prediction flags in these dummy entries are initialized to 0. -// 0 - inter/inter, inter/--, --/inter, --/-- -// 1 - intra/inter, inter/intra -// 2 - intra/--, --/intra -// 3 - intra/intra -int vp9_get_intra_inter_context(const MACROBLOCKD *xd) { - const MODE_INFO *const above_mi = xd->above_mi; - const MODE_INFO *const left_mi = xd->left_mi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - - if (has_above && has_left) { // both edges available - const int above_intra = !is_inter_block(above_mi); - const int left_intra = !is_inter_block(left_mi); - return left_intra && above_intra ? 3 - : left_intra || above_intra; - } else if (has_above || has_left) { // one edge available - return 2 * !is_inter_block(has_above ? above_mi : left_mi); - } else { - return 0; - } -} - int vp9_get_reference_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd) { int ctx; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; + const int has_above = !!above_mi; + const int has_left = !!left_mi; // Note: // The mode info data structure has a one element border above and to the // left of the entries corresponding to real macroblocks. @@ -109,8 +61,8 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, int pred_context; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int above_in_image = xd->up_available; - const int left_in_image = xd->left_available; + const int above_in_image = !!above_mi; + const int left_in_image = !!left_mi; // Note: // The mode info data structure has a one element border above and to the @@ -131,15 +83,15 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, if (!has_second_ref(edge_mi)) // single pred (1/3) pred_context = 1 + 2 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]); else // comp pred (1/3) - pred_context = 1 + 2 * (edge_mi->ref_frame[var_ref_idx] - != cm->comp_var_ref[1]); + pred_context = + 1 + 2 * (edge_mi->ref_frame[var_ref_idx] != cm->comp_var_ref[1]); } else { // inter/inter const int l_sg = !has_second_ref(left_mi); const int a_sg = !has_second_ref(above_mi); - const MV_REFERENCE_FRAME vrfa = a_sg ? above_mi->ref_frame[0] - : above_mi->ref_frame[var_ref_idx]; - const MV_REFERENCE_FRAME vrfl = l_sg ? left_mi->ref_frame[0] - : left_mi->ref_frame[var_ref_idx]; + const MV_REFERENCE_FRAME vrfa = + a_sg ? above_mi->ref_frame[0] : above_mi->ref_frame[var_ref_idx]; + const MV_REFERENCE_FRAME vrfl = + l_sg ? left_mi->ref_frame[0] : left_mi->ref_frame[var_ref_idx]; if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) { pred_context = 0; @@ -173,8 +125,8 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm, pred_context = 2; } else { if (has_second_ref(edge_mi)) - pred_context = 4 * (edge_mi->ref_frame[var_ref_idx] - != cm->comp_var_ref[1]); + pred_context = + 4 * (edge_mi->ref_frame[var_ref_idx] != cm->comp_var_ref[1]); else pred_context = 3 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]); } @@ -190,8 +142,8 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) { int pred_context; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; + const int has_above = !!above_mi; + const int has_left = !!left_mi; // Note: // The mode info data structure has a one element border above and to the // left of the entries corresponding to real macroblocks. @@ -256,8 +208,8 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { int pred_context; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; + const int has_above = !!above_mi; + const int has_left = !!left_mi; // Note: // The mode info data structure has a one element border above and to the @@ -277,8 +229,9 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { else pred_context = 4 * (edge_mi->ref_frame[0] == GOLDEN_FRAME); } else { - pred_context = 1 + 2 * (edge_mi->ref_frame[0] == GOLDEN_FRAME || - edge_mi->ref_frame[1] == GOLDEN_FRAME); + pred_context = 1 + + 2 * (edge_mi->ref_frame[0] == GOLDEN_FRAME || + edge_mi->ref_frame[1] == GOLDEN_FRAME); } } else { // inter/inter const int above_has_second = has_second_ref(above_mi); @@ -290,10 +243,9 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { if (above_has_second && left_has_second) { if (above0 == left0 && above1 == left1) - pred_context = 3 * (above0 == GOLDEN_FRAME || - above1 == GOLDEN_FRAME || - left0 == GOLDEN_FRAME || - left1 == GOLDEN_FRAME); + pred_context = + 3 * (above0 == GOLDEN_FRAME || above1 == GOLDEN_FRAME || + left0 == GOLDEN_FRAME || left1 == GOLDEN_FRAME); else pred_context = 2; } else if (above_has_second || left_has_second) { @@ -311,12 +263,12 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { if (above0 == LAST_FRAME && left0 == LAST_FRAME) { pred_context = 3; } else if (above0 == LAST_FRAME || left0 == LAST_FRAME) { - const MV_REFERENCE_FRAME edge0 = (above0 == LAST_FRAME) ? left0 - : above0; + const MV_REFERENCE_FRAME edge0 = + (above0 == LAST_FRAME) ? left0 : above0; pred_context = 4 * (edge0 == GOLDEN_FRAME); } else { - pred_context = 2 * (above0 == GOLDEN_FRAME) + - 2 * (left0 == GOLDEN_FRAME); + pred_context = + 2 * (above0 == GOLDEN_FRAME) + 2 * (left0 == GOLDEN_FRAME); } } } diff --git a/libs/libvpx/vp9/common/vp9_pred_common.h b/libs/libvpx/vp9/common/vp9_pred_common.h index 254cb8b749..8400bd70f1 100644 --- a/libs/libvpx/vp9/common/vp9_pred_common.h +++ b/libs/libvpx/vp9/common/vp9_pred_common.h @@ -20,8 +20,8 @@ extern "C" { #endif static INLINE int get_segment_id(const VP9_COMMON *cm, - const uint8_t *segment_ids, - BLOCK_SIZE bsize, int mi_row, int mi_col) { + const uint8_t *segment_ids, BLOCK_SIZE bsize, + int mi_row, int mi_col) { const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; @@ -41,8 +41,7 @@ static INLINE int get_segment_id(const VP9_COMMON *cm, static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) { const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int above_sip = (above_mi != NULL) ? - above_mi->seg_id_predicted : 0; + const int above_sip = (above_mi != NULL) ? above_mi->seg_id_predicted : 0; const int left_sip = (left_mi != NULL) ? left_mi->seg_id_predicted : 0; return above_sip + left_sip; @@ -66,13 +65,54 @@ static INLINE vpx_prob vp9_get_skip_prob(const VP9_COMMON *cm, return cm->fc->skip_probs[vp9_get_skip_context(xd)]; } -int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd); +// Returns a context number for the given MB prediction signal +static INLINE int get_pred_context_switchable_interp(const MACROBLOCKD *xd) { + // Note: + // The mode info data structure has a one element border above and to the + // left of the entries corresponding to real macroblocks. + // The prediction flags in these dummy entries are initialized to 0. + const MODE_INFO *const left_mi = xd->left_mi; + const int left_type = left_mi ? left_mi->interp_filter : SWITCHABLE_FILTERS; + const MODE_INFO *const above_mi = xd->above_mi; + const int above_type = + above_mi ? above_mi->interp_filter : SWITCHABLE_FILTERS; -int vp9_get_intra_inter_context(const MACROBLOCKD *xd); + if (left_type == above_type) + return left_type; + else if (left_type == SWITCHABLE_FILTERS) + return above_type; + else if (above_type == SWITCHABLE_FILTERS) + return left_type; + else + return SWITCHABLE_FILTERS; +} + +// The mode info data structure has a one element border above and to the +// left of the entries corresponding to real macroblocks. +// The prediction flags in these dummy entries are initialized to 0. +// 0 - inter/inter, inter/--, --/inter, --/-- +// 1 - intra/inter, inter/intra +// 2 - intra/--, --/intra +// 3 - intra/intra +static INLINE int get_intra_inter_context(const MACROBLOCKD *xd) { + const MODE_INFO *const above_mi = xd->above_mi; + const MODE_INFO *const left_mi = xd->left_mi; + const int has_above = !!above_mi; + const int has_left = !!left_mi; + + if (has_above && has_left) { // both edges available + const int above_intra = !is_inter_block(above_mi); + const int left_intra = !is_inter_block(left_mi); + return left_intra && above_intra ? 3 : left_intra || above_intra; + } else if (has_above || has_left) { // one edge available + return 2 * !is_inter_block(has_above ? above_mi : left_mi); + } + return 0; +} static INLINE vpx_prob vp9_get_intra_inter_prob(const VP9_COMMON *cm, const MACROBLOCKD *xd) { - return cm->fc->intra_inter_prob[vp9_get_intra_inter_context(xd)]; + return cm->fc->intra_inter_prob[get_intra_inter_context(xd)]; } int vp9_get_reference_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd); @@ -113,17 +153,15 @@ static INLINE int get_tx_size_context(const MACROBLOCKD *xd) { const int max_tx_size = max_txsize_lookup[xd->mi[0]->sb_type]; const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; - const int has_above = xd->up_available; - const int has_left = xd->left_available; - int above_ctx = (has_above && !above_mi->skip) ? (int)above_mi->tx_size - : max_tx_size; - int left_ctx = (has_left && !left_mi->skip) ? (int)left_mi->tx_size - : max_tx_size; - if (!has_left) - left_ctx = above_ctx; + const int has_above = !!above_mi; + const int has_left = !!left_mi; + int above_ctx = + (has_above && !above_mi->skip) ? (int)above_mi->tx_size : max_tx_size; + int left_ctx = + (has_left && !left_mi->skip) ? (int)left_mi->tx_size : max_tx_size; + if (!has_left) left_ctx = above_ctx; - if (!has_above) - above_ctx = left_ctx; + if (!has_above) above_ctx = left_ctx; return (above_ctx + left_ctx) > max_tx_size; } @@ -131,15 +169,10 @@ static INLINE int get_tx_size_context(const MACROBLOCKD *xd) { static INLINE const vpx_prob *get_tx_probs(TX_SIZE max_tx_size, int ctx, const struct tx_probs *tx_probs) { switch (max_tx_size) { - case TX_8X8: - return tx_probs->p8x8[ctx]; - case TX_16X16: - return tx_probs->p16x16[ctx]; - case TX_32X32: - return tx_probs->p32x32[ctx]; - default: - assert(0 && "Invalid max_tx_size."); - return NULL; + case TX_8X8: return tx_probs->p8x8[ctx]; + case TX_16X16: return tx_probs->p16x16[ctx]; + case TX_32X32: return tx_probs->p32x32[ctx]; + default: assert(0 && "Invalid max_tx_size."); return NULL; } } @@ -152,15 +185,10 @@ static INLINE const vpx_prob *get_tx_probs2(TX_SIZE max_tx_size, static INLINE unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx, struct tx_counts *tx_counts) { switch (max_tx_size) { - case TX_8X8: - return tx_counts->p8x8[ctx]; - case TX_16X16: - return tx_counts->p16x16[ctx]; - case TX_32X32: - return tx_counts->p32x32[ctx]; - default: - assert(0 && "Invalid max_tx_size."); - return NULL; + case TX_8X8: return tx_counts->p8x8[ctx]; + case TX_16X16: return tx_counts->p16x16[ctx]; + case TX_32X32: return tx_counts->p32x32[ctx]; + default: assert(0 && "Invalid max_tx_size."); return NULL; } } diff --git a/libs/libvpx/vp9/common/vp9_quant_common.c b/libs/libvpx/vp9/common/vp9_quant_common.c index d83f3c1a2f..1dc18dc6df 100644 --- a/libs/libvpx/vp9/common/vp9_quant_common.c +++ b/libs/libvpx/vp9/common/vp9_quant_common.c @@ -13,234 +13,166 @@ #include "vp9/common/vp9_seg_common.h" static const int16_t dc_qlookup[QINDEX_RANGE] = { - 4, 8, 8, 9, 10, 11, 12, 12, - 13, 14, 15, 16, 17, 18, 19, 19, - 20, 21, 22, 23, 24, 25, 26, 26, - 27, 28, 29, 30, 31, 32, 32, 33, - 34, 35, 36, 37, 38, 38, 39, 40, - 41, 42, 43, 43, 44, 45, 46, 47, - 48, 48, 49, 50, 51, 52, 53, 53, - 54, 55, 56, 57, 57, 58, 59, 60, - 61, 62, 62, 63, 64, 65, 66, 66, - 67, 68, 69, 70, 70, 71, 72, 73, - 74, 74, 75, 76, 77, 78, 78, 79, - 80, 81, 81, 82, 83, 84, 85, 85, - 87, 88, 90, 92, 93, 95, 96, 98, - 99, 101, 102, 104, 105, 107, 108, 110, - 111, 113, 114, 116, 117, 118, 120, 121, - 123, 125, 127, 129, 131, 134, 136, 138, - 140, 142, 144, 146, 148, 150, 152, 154, - 156, 158, 161, 164, 166, 169, 172, 174, - 177, 180, 182, 185, 187, 190, 192, 195, - 199, 202, 205, 208, 211, 214, 217, 220, - 223, 226, 230, 233, 237, 240, 243, 247, - 250, 253, 257, 261, 265, 269, 272, 276, - 280, 284, 288, 292, 296, 300, 304, 309, - 313, 317, 322, 326, 330, 335, 340, 344, - 349, 354, 359, 364, 369, 374, 379, 384, - 389, 395, 400, 406, 411, 417, 423, 429, - 435, 441, 447, 454, 461, 467, 475, 482, - 489, 497, 505, 513, 522, 530, 539, 549, - 559, 569, 579, 590, 602, 614, 626, 640, - 654, 668, 684, 700, 717, 736, 755, 775, - 796, 819, 843, 869, 896, 925, 955, 988, - 1022, 1058, 1098, 1139, 1184, 1232, 1282, 1336, + 4, 8, 8, 9, 10, 11, 12, 12, 13, 14, 15, 16, 17, 18, + 19, 19, 20, 21, 22, 23, 24, 25, 26, 26, 27, 28, 29, 30, + 31, 32, 32, 33, 34, 35, 36, 37, 38, 38, 39, 40, 41, 42, + 43, 43, 44, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 53, + 54, 55, 56, 57, 57, 58, 59, 60, 61, 62, 62, 63, 64, 65, + 66, 66, 67, 68, 69, 70, 70, 71, 72, 73, 74, 74, 75, 76, + 77, 78, 78, 79, 80, 81, 81, 82, 83, 84, 85, 85, 87, 88, + 90, 92, 93, 95, 96, 98, 99, 101, 102, 104, 105, 107, 108, 110, + 111, 113, 114, 116, 117, 118, 120, 121, 123, 125, 127, 129, 131, 134, + 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 161, 164, + 166, 169, 172, 174, 177, 180, 182, 185, 187, 190, 192, 195, 199, 202, + 205, 208, 211, 214, 217, 220, 223, 226, 230, 233, 237, 240, 243, 247, + 250, 253, 257, 261, 265, 269, 272, 276, 280, 284, 288, 292, 296, 300, + 304, 309, 313, 317, 322, 326, 330, 335, 340, 344, 349, 354, 359, 364, + 369, 374, 379, 384, 389, 395, 400, 406, 411, 417, 423, 429, 435, 441, + 447, 454, 461, 467, 475, 482, 489, 497, 505, 513, 522, 530, 539, 549, + 559, 569, 579, 590, 602, 614, 626, 640, 654, 668, 684, 700, 717, 736, + 755, 775, 796, 819, 843, 869, 896, 925, 955, 988, 1022, 1058, 1098, 1139, + 1184, 1232, 1282, 1336, }; #if CONFIG_VP9_HIGHBITDEPTH static const int16_t dc_qlookup_10[QINDEX_RANGE] = { - 4, 9, 10, 13, 15, 17, 20, 22, - 25, 28, 31, 34, 37, 40, 43, 47, - 50, 53, 57, 60, 64, 68, 71, 75, - 78, 82, 86, 90, 93, 97, 101, 105, - 109, 113, 116, 120, 124, 128, 132, 136, - 140, 143, 147, 151, 155, 159, 163, 166, - 170, 174, 178, 182, 185, 189, 193, 197, - 200, 204, 208, 212, 215, 219, 223, 226, - 230, 233, 237, 241, 244, 248, 251, 255, - 259, 262, 266, 269, 273, 276, 280, 283, - 287, 290, 293, 297, 300, 304, 307, 310, - 314, 317, 321, 324, 327, 331, 334, 337, - 343, 350, 356, 362, 369, 375, 381, 387, - 394, 400, 406, 412, 418, 424, 430, 436, - 442, 448, 454, 460, 466, 472, 478, 484, - 490, 499, 507, 516, 525, 533, 542, 550, - 559, 567, 576, 584, 592, 601, 609, 617, - 625, 634, 644, 655, 666, 676, 687, 698, - 708, 718, 729, 739, 749, 759, 770, 782, - 795, 807, 819, 831, 844, 856, 868, 880, - 891, 906, 920, 933, 947, 961, 975, 988, - 1001, 1015, 1030, 1045, 1061, 1076, 1090, 1105, - 1120, 1137, 1153, 1170, 1186, 1202, 1218, 1236, - 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379, - 1398, 1416, 1436, 1456, 1476, 1496, 1516, 1537, - 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717, - 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929, - 1958, 1990, 2021, 2054, 2088, 2123, 2159, 2197, - 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561, - 2616, 2675, 2737, 2802, 2871, 2944, 3020, 3102, - 3188, 3280, 3375, 3478, 3586, 3702, 3823, 3953, - 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347, + 4, 9, 10, 13, 15, 17, 20, 22, 25, 28, 31, 34, 37, + 40, 43, 47, 50, 53, 57, 60, 64, 68, 71, 75, 78, 82, + 86, 90, 93, 97, 101, 105, 109, 113, 116, 120, 124, 128, 132, + 136, 140, 143, 147, 151, 155, 159, 163, 166, 170, 174, 178, 182, + 185, 189, 193, 197, 200, 204, 208, 212, 215, 219, 223, 226, 230, + 233, 237, 241, 244, 248, 251, 255, 259, 262, 266, 269, 273, 276, + 280, 283, 287, 290, 293, 297, 300, 304, 307, 310, 314, 317, 321, + 324, 327, 331, 334, 337, 343, 350, 356, 362, 369, 375, 381, 387, + 394, 400, 406, 412, 418, 424, 430, 436, 442, 448, 454, 460, 466, + 472, 478, 484, 490, 499, 507, 516, 525, 533, 542, 550, 559, 567, + 576, 584, 592, 601, 609, 617, 625, 634, 644, 655, 666, 676, 687, + 698, 708, 718, 729, 739, 749, 759, 770, 782, 795, 807, 819, 831, + 844, 856, 868, 880, 891, 906, 920, 933, 947, 961, 975, 988, 1001, + 1015, 1030, 1045, 1061, 1076, 1090, 1105, 1120, 1137, 1153, 1170, 1186, 1202, + 1218, 1236, 1253, 1271, 1288, 1306, 1323, 1342, 1361, 1379, 1398, 1416, 1436, + 1456, 1476, 1496, 1516, 1537, 1559, 1580, 1601, 1624, 1647, 1670, 1692, 1717, + 1741, 1766, 1791, 1817, 1844, 1871, 1900, 1929, 1958, 1990, 2021, 2054, 2088, + 2123, 2159, 2197, 2236, 2276, 2319, 2363, 2410, 2458, 2508, 2561, 2616, 2675, + 2737, 2802, 2871, 2944, 3020, 3102, 3188, 3280, 3375, 3478, 3586, 3702, 3823, + 3953, 4089, 4236, 4394, 4559, 4737, 4929, 5130, 5347, }; static const int16_t dc_qlookup_12[QINDEX_RANGE] = { - 4, 12, 18, 25, 33, 41, 50, 60, - 70, 80, 91, 103, 115, 127, 140, 153, - 166, 180, 194, 208, 222, 237, 251, 266, - 281, 296, 312, 327, 343, 358, 374, 390, - 405, 421, 437, 453, 469, 484, 500, 516, - 532, 548, 564, 580, 596, 611, 627, 643, - 659, 674, 690, 706, 721, 737, 752, 768, - 783, 798, 814, 829, 844, 859, 874, 889, - 904, 919, 934, 949, 964, 978, 993, 1008, - 1022, 1037, 1051, 1065, 1080, 1094, 1108, 1122, - 1136, 1151, 1165, 1179, 1192, 1206, 1220, 1234, - 1248, 1261, 1275, 1288, 1302, 1315, 1329, 1342, - 1368, 1393, 1419, 1444, 1469, 1494, 1519, 1544, - 1569, 1594, 1618, 1643, 1668, 1692, 1717, 1741, - 1765, 1789, 1814, 1838, 1862, 1885, 1909, 1933, - 1957, 1992, 2027, 2061, 2096, 2130, 2165, 2199, - 2233, 2267, 2300, 2334, 2367, 2400, 2434, 2467, - 2499, 2532, 2575, 2618, 2661, 2704, 2746, 2788, - 2830, 2872, 2913, 2954, 2995, 3036, 3076, 3127, - 3177, 3226, 3275, 3324, 3373, 3421, 3469, 3517, - 3565, 3621, 3677, 3733, 3788, 3843, 3897, 3951, - 4005, 4058, 4119, 4181, 4241, 4301, 4361, 4420, - 4479, 4546, 4612, 4677, 4742, 4807, 4871, 4942, - 5013, 5083, 5153, 5222, 5291, 5367, 5442, 5517, - 5591, 5665, 5745, 5825, 5905, 5984, 6063, 6149, - 6234, 6319, 6404, 6495, 6587, 6678, 6769, 6867, - 6966, 7064, 7163, 7269, 7376, 7483, 7599, 7715, - 7832, 7958, 8085, 8214, 8352, 8492, 8635, 8788, - 8945, 9104, 9275, 9450, 9639, 9832, 10031, 10245, - 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409, - 12750, 13118, 13501, 13913, 14343, 14807, 15290, 15812, - 16356, 16943, 17575, 18237, 18949, 19718, 20521, 21387, + 4, 12, 18, 25, 33, 41, 50, 60, 70, 80, 91, + 103, 115, 127, 140, 153, 166, 180, 194, 208, 222, 237, + 251, 266, 281, 296, 312, 327, 343, 358, 374, 390, 405, + 421, 437, 453, 469, 484, 500, 516, 532, 548, 564, 580, + 596, 611, 627, 643, 659, 674, 690, 706, 721, 737, 752, + 768, 783, 798, 814, 829, 844, 859, 874, 889, 904, 919, + 934, 949, 964, 978, 993, 1008, 1022, 1037, 1051, 1065, 1080, + 1094, 1108, 1122, 1136, 1151, 1165, 1179, 1192, 1206, 1220, 1234, + 1248, 1261, 1275, 1288, 1302, 1315, 1329, 1342, 1368, 1393, 1419, + 1444, 1469, 1494, 1519, 1544, 1569, 1594, 1618, 1643, 1668, 1692, + 1717, 1741, 1765, 1789, 1814, 1838, 1862, 1885, 1909, 1933, 1957, + 1992, 2027, 2061, 2096, 2130, 2165, 2199, 2233, 2267, 2300, 2334, + 2367, 2400, 2434, 2467, 2499, 2532, 2575, 2618, 2661, 2704, 2746, + 2788, 2830, 2872, 2913, 2954, 2995, 3036, 3076, 3127, 3177, 3226, + 3275, 3324, 3373, 3421, 3469, 3517, 3565, 3621, 3677, 3733, 3788, + 3843, 3897, 3951, 4005, 4058, 4119, 4181, 4241, 4301, 4361, 4420, + 4479, 4546, 4612, 4677, 4742, 4807, 4871, 4942, 5013, 5083, 5153, + 5222, 5291, 5367, 5442, 5517, 5591, 5665, 5745, 5825, 5905, 5984, + 6063, 6149, 6234, 6319, 6404, 6495, 6587, 6678, 6769, 6867, 6966, + 7064, 7163, 7269, 7376, 7483, 7599, 7715, 7832, 7958, 8085, 8214, + 8352, 8492, 8635, 8788, 8945, 9104, 9275, 9450, 9639, 9832, 10031, + 10245, 10465, 10702, 10946, 11210, 11482, 11776, 12081, 12409, 12750, 13118, + 13501, 13913, 14343, 14807, 15290, 15812, 16356, 16943, 17575, 18237, 18949, + 19718, 20521, 21387, }; #endif static const int16_t ac_qlookup[QINDEX_RANGE] = { - 4, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, - 23, 24, 25, 26, 27, 28, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 41, 42, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, - 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 75, 76, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 92, 93, 94, - 95, 96, 97, 98, 99, 100, 101, 102, - 104, 106, 108, 110, 112, 114, 116, 118, - 120, 122, 124, 126, 128, 130, 132, 134, - 136, 138, 140, 142, 144, 146, 148, 150, - 152, 155, 158, 161, 164, 167, 170, 173, - 176, 179, 182, 185, 188, 191, 194, 197, - 200, 203, 207, 211, 215, 219, 223, 227, - 231, 235, 239, 243, 247, 251, 255, 260, - 265, 270, 275, 280, 285, 290, 295, 300, - 305, 311, 317, 323, 329, 335, 341, 347, - 353, 359, 366, 373, 380, 387, 394, 401, - 408, 416, 424, 432, 440, 448, 456, 465, - 474, 483, 492, 501, 510, 520, 530, 540, - 550, 560, 571, 582, 593, 604, 615, 627, - 639, 651, 663, 676, 689, 702, 715, 729, - 743, 757, 771, 786, 801, 816, 832, 848, - 864, 881, 898, 915, 933, 951, 969, 988, - 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151, - 1173, 1196, 1219, 1243, 1267, 1292, 1317, 1343, - 1369, 1396, 1423, 1451, 1479, 1508, 1537, 1567, - 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828, + 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 104, 106, 108, 110, 112, 114, 116, 118, + 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, + 146, 148, 150, 152, 155, 158, 161, 164, 167, 170, 173, 176, 179, + 182, 185, 188, 191, 194, 197, 200, 203, 207, 211, 215, 219, 223, + 227, 231, 235, 239, 243, 247, 251, 255, 260, 265, 270, 275, 280, + 285, 290, 295, 300, 305, 311, 317, 323, 329, 335, 341, 347, 353, + 359, 366, 373, 380, 387, 394, 401, 408, 416, 424, 432, 440, 448, + 456, 465, 474, 483, 492, 501, 510, 520, 530, 540, 550, 560, 571, + 582, 593, 604, 615, 627, 639, 651, 663, 676, 689, 702, 715, 729, + 743, 757, 771, 786, 801, 816, 832, 848, 864, 881, 898, 915, 933, + 951, 969, 988, 1007, 1026, 1046, 1066, 1087, 1108, 1129, 1151, 1173, 1196, + 1219, 1243, 1267, 1292, 1317, 1343, 1369, 1396, 1423, 1451, 1479, 1508, 1537, + 1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828, }; #if CONFIG_VP9_HIGHBITDEPTH static const int16_t ac_qlookup_10[QINDEX_RANGE] = { - 4, 9, 11, 13, 16, 18, 21, 24, - 27, 30, 33, 37, 40, 44, 48, 51, - 55, 59, 63, 67, 71, 75, 79, 83, - 88, 92, 96, 100, 105, 109, 114, 118, - 122, 127, 131, 136, 140, 145, 149, 154, - 158, 163, 168, 172, 177, 181, 186, 190, - 195, 199, 204, 208, 213, 217, 222, 226, - 231, 235, 240, 244, 249, 253, 258, 262, - 267, 271, 275, 280, 284, 289, 293, 297, - 302, 306, 311, 315, 319, 324, 328, 332, - 337, 341, 345, 349, 354, 358, 362, 367, - 371, 375, 379, 384, 388, 392, 396, 401, - 409, 417, 425, 433, 441, 449, 458, 466, - 474, 482, 490, 498, 506, 514, 523, 531, - 539, 547, 555, 563, 571, 579, 588, 596, - 604, 616, 628, 640, 652, 664, 676, 688, - 700, 713, 725, 737, 749, 761, 773, 785, - 797, 809, 825, 841, 857, 873, 889, 905, - 922, 938, 954, 970, 986, 1002, 1018, 1038, - 1058, 1078, 1098, 1118, 1138, 1158, 1178, 1198, - 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386, - 1411, 1435, 1463, 1491, 1519, 1547, 1575, 1603, - 1631, 1663, 1695, 1727, 1759, 1791, 1823, 1859, - 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159, - 2199, 2239, 2283, 2327, 2371, 2415, 2459, 2507, - 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915, - 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391, - 3455, 3523, 3591, 3659, 3731, 3803, 3876, 3952, - 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604, - 4692, 4784, 4876, 4972, 5068, 5168, 5268, 5372, - 5476, 5584, 5692, 5804, 5916, 6032, 6148, 6268, - 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312, + 4, 9, 11, 13, 16, 18, 21, 24, 27, 30, 33, 37, 40, + 44, 48, 51, 55, 59, 63, 67, 71, 75, 79, 83, 88, 92, + 96, 100, 105, 109, 114, 118, 122, 127, 131, 136, 140, 145, 149, + 154, 158, 163, 168, 172, 177, 181, 186, 190, 195, 199, 204, 208, + 213, 217, 222, 226, 231, 235, 240, 244, 249, 253, 258, 262, 267, + 271, 275, 280, 284, 289, 293, 297, 302, 306, 311, 315, 319, 324, + 328, 332, 337, 341, 345, 349, 354, 358, 362, 367, 371, 375, 379, + 384, 388, 392, 396, 401, 409, 417, 425, 433, 441, 449, 458, 466, + 474, 482, 490, 498, 506, 514, 523, 531, 539, 547, 555, 563, 571, + 579, 588, 596, 604, 616, 628, 640, 652, 664, 676, 688, 700, 713, + 725, 737, 749, 761, 773, 785, 797, 809, 825, 841, 857, 873, 889, + 905, 922, 938, 954, 970, 986, 1002, 1018, 1038, 1058, 1078, 1098, 1118, + 1138, 1158, 1178, 1198, 1218, 1242, 1266, 1290, 1314, 1338, 1362, 1386, 1411, + 1435, 1463, 1491, 1519, 1547, 1575, 1603, 1631, 1663, 1695, 1727, 1759, 1791, + 1823, 1859, 1895, 1931, 1967, 2003, 2039, 2079, 2119, 2159, 2199, 2239, 2283, + 2327, 2371, 2415, 2459, 2507, 2555, 2603, 2651, 2703, 2755, 2807, 2859, 2915, + 2971, 3027, 3083, 3143, 3203, 3263, 3327, 3391, 3455, 3523, 3591, 3659, 3731, + 3803, 3876, 3952, 4028, 4104, 4184, 4264, 4348, 4432, 4516, 4604, 4692, 4784, + 4876, 4972, 5068, 5168, 5268, 5372, 5476, 5584, 5692, 5804, 5916, 6032, 6148, + 6268, 6388, 6512, 6640, 6768, 6900, 7036, 7172, 7312, }; static const int16_t ac_qlookup_12[QINDEX_RANGE] = { - 4, 13, 19, 27, 35, 44, 54, 64, - 75, 87, 99, 112, 126, 139, 154, 168, - 183, 199, 214, 230, 247, 263, 280, 297, - 314, 331, 349, 366, 384, 402, 420, 438, - 456, 475, 493, 511, 530, 548, 567, 586, - 604, 623, 642, 660, 679, 698, 716, 735, - 753, 772, 791, 809, 828, 846, 865, 884, - 902, 920, 939, 957, 976, 994, 1012, 1030, - 1049, 1067, 1085, 1103, 1121, 1139, 1157, 1175, - 1193, 1211, 1229, 1246, 1264, 1282, 1299, 1317, - 1335, 1352, 1370, 1387, 1405, 1422, 1440, 1457, - 1474, 1491, 1509, 1526, 1543, 1560, 1577, 1595, - 1627, 1660, 1693, 1725, 1758, 1791, 1824, 1856, - 1889, 1922, 1954, 1987, 2020, 2052, 2085, 2118, - 2150, 2183, 2216, 2248, 2281, 2313, 2346, 2378, - 2411, 2459, 2508, 2556, 2605, 2653, 2701, 2750, - 2798, 2847, 2895, 2943, 2992, 3040, 3088, 3137, - 3185, 3234, 3298, 3362, 3426, 3491, 3555, 3619, - 3684, 3748, 3812, 3876, 3941, 4005, 4069, 4149, - 4230, 4310, 4390, 4470, 4550, 4631, 4711, 4791, - 4871, 4967, 5064, 5160, 5256, 5352, 5448, 5544, - 5641, 5737, 5849, 5961, 6073, 6185, 6297, 6410, - 6522, 6650, 6778, 6906, 7034, 7162, 7290, 7435, - 7579, 7723, 7867, 8011, 8155, 8315, 8475, 8635, - 8795, 8956, 9132, 9308, 9484, 9660, 9836, 10028, - 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661, - 11885, 12109, 12333, 12573, 12813, 13053, 13309, 13565, - 13821, 14093, 14365, 14637, 14925, 15213, 15502, 15806, - 16110, 16414, 16734, 17054, 17390, 17726, 18062, 18414, - 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486, - 21902, 22334, 22766, 23214, 23662, 24126, 24590, 25070, - 25551, 26047, 26559, 27071, 27599, 28143, 28687, 29247, + 4, 13, 19, 27, 35, 44, 54, 64, 75, 87, 99, + 112, 126, 139, 154, 168, 183, 199, 214, 230, 247, 263, + 280, 297, 314, 331, 349, 366, 384, 402, 420, 438, 456, + 475, 493, 511, 530, 548, 567, 586, 604, 623, 642, 660, + 679, 698, 716, 735, 753, 772, 791, 809, 828, 846, 865, + 884, 902, 920, 939, 957, 976, 994, 1012, 1030, 1049, 1067, + 1085, 1103, 1121, 1139, 1157, 1175, 1193, 1211, 1229, 1246, 1264, + 1282, 1299, 1317, 1335, 1352, 1370, 1387, 1405, 1422, 1440, 1457, + 1474, 1491, 1509, 1526, 1543, 1560, 1577, 1595, 1627, 1660, 1693, + 1725, 1758, 1791, 1824, 1856, 1889, 1922, 1954, 1987, 2020, 2052, + 2085, 2118, 2150, 2183, 2216, 2248, 2281, 2313, 2346, 2378, 2411, + 2459, 2508, 2556, 2605, 2653, 2701, 2750, 2798, 2847, 2895, 2943, + 2992, 3040, 3088, 3137, 3185, 3234, 3298, 3362, 3426, 3491, 3555, + 3619, 3684, 3748, 3812, 3876, 3941, 4005, 4069, 4149, 4230, 4310, + 4390, 4470, 4550, 4631, 4711, 4791, 4871, 4967, 5064, 5160, 5256, + 5352, 5448, 5544, 5641, 5737, 5849, 5961, 6073, 6185, 6297, 6410, + 6522, 6650, 6778, 6906, 7034, 7162, 7290, 7435, 7579, 7723, 7867, + 8011, 8155, 8315, 8475, 8635, 8795, 8956, 9132, 9308, 9484, 9660, + 9836, 10028, 10220, 10412, 10604, 10812, 11020, 11228, 11437, 11661, 11885, + 12109, 12333, 12573, 12813, 13053, 13309, 13565, 13821, 14093, 14365, 14637, + 14925, 15213, 15502, 15806, 16110, 16414, 16734, 17054, 17390, 17726, 18062, + 18414, 18766, 19134, 19502, 19886, 20270, 20670, 21070, 21486, 21902, 22334, + 22766, 23214, 23662, 24126, 24590, 25070, 25551, 26047, 26559, 27071, 27599, + 28143, 28687, 29247, }; #endif int16_t vp9_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) { #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - return dc_qlookup[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_10: - return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_12: - return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_12: return dc_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; } #else - (void) bit_depth; + (void)bit_depth; return dc_qlookup[clamp(qindex + delta, 0, MAXQ)]; #endif } @@ -248,18 +180,15 @@ int16_t vp9_dc_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) { int16_t vp9_ac_quant(int qindex, int delta, vpx_bit_depth_t bit_depth) { #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - return ac_qlookup[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_10: - return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; - case VPX_BITS_12: - return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)]; + case VPX_BITS_12: return ac_qlookup_12[clamp(qindex + delta, 0, MAXQ)]; default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; } #else - (void) bit_depth; + (void)bit_depth; return ac_qlookup[clamp(qindex + delta, 0, MAXQ)]; #endif } @@ -268,11 +197,10 @@ int vp9_get_qindex(const struct segmentation *seg, int segment_id, int base_qindex) { if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) { const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q); - const int seg_qindex = seg->abs_delta == SEGMENT_ABSDATA ? - data : base_qindex + data; + const int seg_qindex = + seg->abs_delta == SEGMENT_ABSDATA ? data : base_qindex + data; return clamp(seg_qindex, 0, MAXQ); } else { return base_qindex; } } - diff --git a/libs/libvpx/vp9/common/vp9_reconinter.c b/libs/libvpx/vp9/common/vp9_reconinter.c index 74bc1d23eb..8eb7126898 100644 --- a/libs/libvpx/vp9/common/vp9_reconinter.c +++ b/libs/libvpx/vp9/common/vp9_reconinter.c @@ -20,27 +20,11 @@ #include "vp9/common/vp9_reconintra.h" #if CONFIG_VP9_HIGHBITDEPTH -void high_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const int subpel_x, - const int subpel_y, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - int xs, int ys, int bd) { - sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref]( - src, src_stride, dst, dst_stride, - kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd); -} - -void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *src_mv, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y, int bd) { +void vp9_highbd_build_inter_predictor( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, + const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref, + const InterpKernel *kernel, enum mv_precision precision, int x, int y, + int bd) { const int is_q4 = precision == MV_PRECISION_Q4; const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, is_q4 ? src_mv->col : src_mv->col * 2 }; @@ -50,19 +34,17 @@ void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride, src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); - high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, - sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd); + highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, + sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, + bd); } #endif // CONFIG_VP9_HIGHBITDEPTH -void vp9_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *src_mv, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y) { +void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, const MV *src_mv, + const struct scale_factors *sf, int w, int h, + int ref, const InterpKernel *kernel, + enum mv_precision precision, int x, int y) { const int is_q4 = precision == MV_PRECISION_Q4; const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, is_q4 ? src_mv->col : src_mv->col * 2 }; @@ -72,8 +54,8 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride, src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); - inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, - sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4); + inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf, w, + h, ref, kernel, sf->x_step_q4, sf->y_step_q4); } static INLINE int round_mv_comp_q4(int value) { @@ -81,14 +63,14 @@ static INLINE int round_mv_comp_q4(int value) { } static MV mi_mv_pred_q4(const MODE_INFO *mi, int idx) { - MV res = { round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.row + - mi->bmi[1].as_mv[idx].as_mv.row + - mi->bmi[2].as_mv[idx].as_mv.row + - mi->bmi[3].as_mv[idx].as_mv.row), - round_mv_comp_q4(mi->bmi[0].as_mv[idx].as_mv.col + - mi->bmi[1].as_mv[idx].as_mv.col + - mi->bmi[2].as_mv[idx].as_mv.col + - mi->bmi[3].as_mv[idx].as_mv.col) }; + MV res = { + round_mv_comp_q4( + mi->bmi[0].as_mv[idx].as_mv.row + mi->bmi[1].as_mv[idx].as_mv.row + + mi->bmi[2].as_mv[idx].as_mv.row + mi->bmi[3].as_mv[idx].as_mv.row), + round_mv_comp_q4( + mi->bmi[0].as_mv[idx].as_mv.col + mi->bmi[1].as_mv[idx].as_mv.col + + mi->bmi[2].as_mv[idx].as_mv.col + mi->bmi[3].as_mv[idx].as_mv.col) + }; return res; } @@ -105,8 +87,8 @@ static MV mi_mv_pred_q2(const MODE_INFO *mi, int idx, int block0, int block1) { } // TODO(jkoleszar): yet another mv clamping function :-( -MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, - int bw, int bh, int ss_x, int ss_y) { +MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, + int bh, int ss_x, int ss_y) { // If the MV points so far into the UMV border that no visible pixels // are used for reconstruction, the subpel part of the MV can be // discarded and the MV limited to 16 pixels with equivalent results. @@ -114,15 +96,12 @@ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, const int spel_right = spel_left - SUBPEL_SHIFTS; const int spel_top = (VP9_INTERP_EXTEND + bh) << SUBPEL_BITS; const int spel_bottom = spel_top - SUBPEL_SHIFTS; - MV clamped_mv = { - src_mv->row * (1 << (1 - ss_y)), - src_mv->col * (1 << (1 - ss_x)) - }; + MV clamped_mv = { src_mv->row * (1 << (1 - ss_y)), + src_mv->col * (1 << (1 - ss_x)) }; assert(ss_x <= 1); assert(ss_y <= 1); - clamp_mv(&clamped_mv, - xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, + clamp_mv(&clamped_mv, xd->mb_to_left_edge * (1 << (1 - ss_x)) - spel_left, xd->mb_to_right_edge * (1 << (1 - ss_x)) + spel_right, xd->mb_to_top_edge * (1 << (1 - ss_y)) - spel_top, xd->mb_to_bottom_edge * (1 << (1 - ss_y)) + spel_bottom); @@ -130,32 +109,22 @@ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, return clamped_mv; } -MV average_split_mvs(const struct macroblockd_plane *pd, - const MODE_INFO *mi, int ref, int block) { +MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi, + int ref, int block) { const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0); - MV res = {0, 0}; + MV res = { 0, 0 }; switch (ss_idx) { - case 0: - res = mi->bmi[block].as_mv[ref].as_mv; - break; - case 1: - res = mi_mv_pred_q2(mi, ref, block, block + 2); - break; - case 2: - res = mi_mv_pred_q2(mi, ref, block, block + 1); - break; - case 3: - res = mi_mv_pred_q4(mi, ref); - break; - default: - assert(ss_idx <= 3 && ss_idx >= 0); + case 0: res = mi->bmi[block].as_mv[ref].as_mv; break; + case 1: res = mi_mv_pred_q2(mi, ref, block, block + 2); break; + case 2: res = mi_mv_pred_q2(mi, ref, block, block + 1); break; + case 3: res = mi_mv_pred_q4(mi, ref); break; + default: assert(ss_idx <= 3 && ss_idx >= 0); } return res; } static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, - int bw, int bh, - int x, int y, int w, int h, + int bw, int bh, int x, int y, int w, int h, int mi_x, int mi_y) { struct macroblockd_plane *const pd = &xd->plane[plane]; const MODE_INFO *mi = xd->mi[0]; @@ -169,17 +138,16 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, struct buf_2d *const dst_buf = &pd->dst; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; const MV mv = mi->sb_type < BLOCK_8X8 - ? average_split_mvs(pd, mi, ref, block) - : mi->mv[ref].as_mv; + ? average_split_mvs(pd, mi, ref, block) + : mi->mv[ref].as_mv; // TODO(jkoleszar): This clamping is done in the incorrect place for the // scaling case. It needs to be done on the scaled MV, not the pre-scaling // MV. Note however that it performs the subsampling aware scaling so // that the result is always q4. // mv_precision precision is MV_PRECISION_Q4. - const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh, - pd->subsampling_x, - pd->subsampling_y); + const MV mv_q4 = clamp_mv_to_umv_border_sb( + xd, &mv, bw, bh, pd->subsampling_x, pd->subsampling_y); uint8_t *pre; MV32 scaled_mv; @@ -190,7 +158,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, // Co-ordinate of containing block to pixel precision. const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); -#if CONFIG_BETTER_HW_COMPATIBILITY +#if 0 // CONFIG_BETTER_HW_COMPATIBILITY assert(xd->mi[0]->sb_type != BLOCK_4X8 && xd->mi[0]->sb_type != BLOCK_8X4); assert(mv_q4.row == mv.row * (1 << (1 - pd->subsampling_y)) && @@ -203,8 +171,8 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, else pre_buf->buf = xd->block_refs[ref]->buf->v_buffer; - pre_buf->buf += scaled_buffer_offset(x_start + x, y_start + y, - pre_buf->stride, sf); + pre_buf->buf += + scaled_buffer_offset(x_start + x, y_start + y, pre_buf->stride, sf); pre = pre_buf->buf; scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf); xs = sf->x_step_q4; @@ -217,21 +185,21 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, } subpel_x = scaled_mv.col & SUBPEL_MASK; subpel_y = scaled_mv.row & SUBPEL_MASK; - pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride - + (scaled_mv.col >> SUBPEL_BITS); + pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride + + (scaled_mv.col >> SUBPEL_BITS); #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys, - xd->bd); + highbd_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, + subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys, + xd->bd); } else { - inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); + inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, + subpel_y, sf, w, h, ref, kernel, xs, ys); } #else - inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, - subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); + inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, + subpel_y, sf, w, h, ref, kernel, xs, ys); #endif // CONFIG_VP9_HIGHBITDEPTH } } @@ -243,8 +211,8 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, const int mi_x = mi_col * MI_SIZE; const int mi_y = mi_row * MI_SIZE; for (plane = plane_from; plane <= plane_to; ++plane) { - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, - &xd->plane[plane]); + const BLOCK_SIZE plane_bsize = + get_plane_block_size(bsize, &xd->plane[plane]); const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; const int bw = 4 * num_4x4_w; @@ -255,11 +223,10 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, assert(bsize == BLOCK_8X8); for (y = 0; y < num_4x4_h; ++y) for (x = 0; x < num_4x4_w; ++x) - build_inter_predictors(xd, plane, i++, bw, bh, - 4 * x, 4 * y, 4, 4, mi_x, mi_y); + build_inter_predictors(xd, plane, i++, bw, bh, 4 * x, 4 * y, 4, 4, + mi_x, mi_y); } else { - build_inter_predictors(xd, plane, 0, bw, bh, - 0, 0, bw, bh, mi_x, mi_y); + build_inter_predictors(xd, plane, 0, bw, bh, 0, 0, bw, bh, mi_x, mi_y); } } } @@ -287,12 +254,12 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, } void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col) { + const YV12_BUFFER_CONFIG *src, int mi_row, + int mi_col) { uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer, - src->v_buffer}; + src->v_buffer }; const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride, - src->uv_stride}; + src->uv_stride }; int i; for (i = 0; i < MAX_MB_PLANE; ++i) { @@ -303,15 +270,14 @@ void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], } void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, + const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, const struct scale_factors *sf) { if (src != NULL) { int i; uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer, - src->v_buffer}; + src->v_buffer }; const int strides[MAX_MB_PLANE] = { src->y_stride, src->uv_stride, - src->uv_stride}; + src->uv_stride }; for (i = 0; i < MAX_MB_PLANE; ++i) { struct macroblockd_plane *const pd = &xd->plane[i]; setup_pred_plane(&pd->pre[idx], buffers[i], strides[i], mi_row, mi_col, diff --git a/libs/libvpx/vp9/common/vp9_reconinter.h b/libs/libvpx/vp9/common/vp9_reconinter.h index 7d907748e6..4fed4f7f6e 100644 --- a/libs/libvpx/vp9/common/vp9_reconinter.h +++ b/libs/libvpx/vp9/common/vp9_reconinter.h @@ -22,33 +22,31 @@ extern "C" { static INLINE void inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, - const int subpel_x, - const int subpel_y, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - int xs, int ys) { + const int subpel_x, const int subpel_y, + const struct scale_factors *sf, int w, int h, + int ref, const InterpKernel *kernel, int xs, + int ys) { sf->predict[subpel_x != 0][subpel_y != 0][ref]( - src, src_stride, dst, dst_stride, - kernel[subpel_x], xs, kernel[subpel_y], ys, w, h); + src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y], + ys, w, h); } #if CONFIG_VP9_HIGHBITDEPTH -void high_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const int subpel_x, - const int subpel_y, - const struct scale_factors *sf, - int w, int h, int ref, - const InterpKernel *kernel, - int xs, int ys, int bd); +static INLINE void highbd_inter_predictor( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, + const int subpel_x, const int subpel_y, const struct scale_factors *sf, + int w, int h, int ref, const InterpKernel *kernel, int xs, int ys, int bd) { + sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref]( + src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y], + ys, w, h, bd); +} #endif // CONFIG_VP9_HIGHBITDEPTH MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi, int ref, int block); -MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, - int bw, int bh, int ss_x, int ss_y); +MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv, int bw, + int bh, int ss_x, int ss_y); void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize); @@ -62,24 +60,18 @@ void vp9_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col, void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize); -void vp9_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *mv_q3, - const struct scale_factors *sf, - int w, int h, int do_avg, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y); +void vp9_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, const MV *mv_q3, + const struct scale_factors *sf, int w, int h, + int do_avg, const InterpKernel *kernel, + enum mv_precision precision, int x, int y); #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - const MV *mv_q3, - const struct scale_factors *sf, - int w, int h, int do_avg, - const InterpKernel *kernel, - enum mv_precision precision, - int x, int y, int bd); +void vp9_highbd_build_inter_predictor( + const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, + const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg, + const InterpKernel *kernel, enum mv_precision precision, int x, int y, + int bd); #endif static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride, @@ -89,9 +81,8 @@ static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride, return y * stride + x; } -static INLINE void setup_pred_plane(struct buf_2d *dst, - uint8_t *src, int stride, - int mi_row, int mi_col, +static INLINE void setup_pred_plane(struct buf_2d *dst, uint8_t *src, + int stride, int mi_row, int mi_col, const struct scale_factors *scale, int subsampling_x, int subsampling_y) { const int x = (MI_SIZE * mi_col) >> subsampling_x; @@ -101,8 +92,8 @@ static INLINE void setup_pred_plane(struct buf_2d *dst, } void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col); + const YV12_BUFFER_CONFIG *src, int mi_row, + int mi_col); void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx, const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, diff --git a/libs/libvpx/vp9/common/vp9_reconintra.c b/libs/libvpx/vp9/common/vp9_reconintra.c index 13a95ae8f0..3e5ed616d3 100644 --- a/libs/libvpx/vp9/common/vp9_reconintra.c +++ b/libs/libvpx/vp9/common/vp9_reconintra.c @@ -41,16 +41,16 @@ enum { }; static const uint8_t extend_modes[INTRA_MODES] = { - NEED_ABOVE | NEED_LEFT, // DC - NEED_ABOVE, // V - NEED_LEFT, // H - NEED_ABOVERIGHT, // D45 - NEED_LEFT | NEED_ABOVE, // D135 - NEED_LEFT | NEED_ABOVE, // D117 - NEED_LEFT | NEED_ABOVE, // D153 - NEED_LEFT, // D207 - NEED_ABOVERIGHT, // D63 - NEED_LEFT | NEED_ABOVE, // TM + NEED_ABOVE | NEED_LEFT, // DC + NEED_ABOVE, // V + NEED_LEFT, // H + NEED_ABOVERIGHT, // D45 + NEED_LEFT | NEED_ABOVE, // D135 + NEED_LEFT | NEED_ABOVE, // D117 + NEED_LEFT | NEED_ABOVE, // D153 + NEED_LEFT, // D207 + NEED_ABOVERIGHT, // D63 + NEED_LEFT | NEED_ABOVE, // TM }; typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride, @@ -68,9 +68,9 @@ static intra_high_pred_fn dc_pred_high[2][2][4]; #endif // CONFIG_VP9_HIGHBITDEPTH static void vp9_init_intra_predictors_internal(void) { -#define INIT_ALL_SIZES(p, type) \ - p[TX_4X4] = vpx_##type##_predictor_4x4; \ - p[TX_8X8] = vpx_##type##_predictor_8x8; \ +#define INIT_ALL_SIZES(p, type) \ + p[TX_4X4] = vpx_##type##_predictor_4x4; \ + p[TX_8X8] = vpx_##type##_predictor_8x8; \ p[TX_16X16] = vpx_##type##_predictor_16x16; \ p[TX_32X32] = vpx_##type##_predictor_32x32 @@ -110,18 +110,10 @@ static void vp9_init_intra_predictors_internal(void) { } #if CONFIG_VP9_HIGHBITDEPTH -static void build_intra_predictors_high(const MACROBLOCKD *xd, - const uint8_t *ref8, - int ref_stride, - uint8_t *dst8, - int dst_stride, - PREDICTION_MODE mode, - TX_SIZE tx_size, - int up_available, - int left_available, - int right_available, - int x, int y, - int plane, int bd) { +static void build_intra_predictors_high( + const MACROBLOCKD *xd, const uint8_t *ref8, int ref_stride, uint8_t *dst8, + int dst_stride, PREDICTION_MODE mode, TX_SIZE tx_size, int up_available, + int left_available, int right_available, int x, int y, int plane, int bd) { int i; uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); @@ -142,6 +134,7 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, // 129 C D .. W X // 129 E F .. U V // 129 G H .. S T T T T T + // For 10 bit and 12 bit, 127 and 129 are replaced by base -1 and base + 1. // Get current frame pointer, width and height. if (plane == 0) { @@ -162,8 +155,7 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, if (xd->mb_to_bottom_edge < 0) { /* slower path if the block needs border extension */ if (y0 + bs <= frame_height) { - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; + for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1]; } else { const int extend_bottom = frame_height - y0; for (i = 0; i < extend_bottom; ++i) @@ -173,11 +165,9 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, } } else { /* faster path if the block does not need extension */ - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; + for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1]; } } else { - // TODO(Peter): this value should probably change for high bitdepth vpx_memset16(left_col, base + 1, bs); } } @@ -239,7 +229,6 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, vpx_memset16(above_row + r, above_row[r - 1], x0 + 2 * bs - frame_width); } - // TODO(Peter) this value should probably change for high bitdepth above_row[-1] = left_available ? above_ref[-1] : (base + 1); } else { /* faster path if the block does not need extension */ @@ -251,22 +240,19 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd, memcpy(above_row + bs, above_ref + bs, bs * sizeof(above_row[0])); else vpx_memset16(above_row + bs, above_row[bs - 1], bs); - // TODO(Peter): this value should probably change for high bitdepth above_row[-1] = left_available ? above_ref[-1] : (base + 1); } } } else { vpx_memset16(above_row, base - 1, bs * 2); - // TODO(Peter): this value should probably change for high bitdepth above_row[-1] = base - 1; } } // predict if (mode == DC_PRED) { - dc_pred_high[left_available][up_available][tx_size](dst, dst_stride, - const_above_row, - left_col, xd->bd); + dc_pred_high[left_available][up_available][tx_size]( + dst, dst_stride, const_above_row, left_col, xd->bd); } else { pred_high[mode][tx_size](dst, dst_stride, const_above_row, left_col, xd->bd); @@ -316,8 +302,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, if (xd->mb_to_bottom_edge < 0) { /* slower path if the block needs border extension */ if (y0 + bs <= frame_height) { - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; + for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1]; } else { const int extend_bottom = frame_height - y0; for (i = 0; i < extend_bottom; ++i) @@ -327,8 +312,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, } } else { /* faster path if the block does not need extension */ - for (i = 0; i < bs; ++i) - left_col[i] = ref[i * ref_stride - 1]; + for (i = 0; i < bs; ++i) left_col[i] = ref[i * ref_stride - 1]; } } else { memset(left_col, 129, bs); @@ -418,15 +402,14 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref, } } -void vp9_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, - TX_SIZE tx_size, PREDICTION_MODE mode, - const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride, +void vp9_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, TX_SIZE tx_size, + PREDICTION_MODE mode, const uint8_t *ref, + int ref_stride, uint8_t *dst, int dst_stride, int aoff, int loff, int plane) { const int bw = (1 << bwl_in); const int txw = (1 << tx_size); - const int have_top = loff || xd->up_available; - const int have_left = aoff || xd->left_available; + const int have_top = loff || (xd->above_mi != NULL); + const int have_left = aoff || (xd->left_mi != NULL); const int have_right = (aoff + txw) < bw; const int x = aoff * 4; const int y = loff * 4; @@ -434,8 +417,8 @@ void vp9_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode, - tx_size, have_top, have_left, have_right, - x, y, plane, xd->bd); + tx_size, have_top, have_left, have_right, x, y, + plane, xd->bd); return; } #endif diff --git a/libs/libvpx/vp9/common/vp9_reconintra.h b/libs/libvpx/vp9/common/vp9_reconintra.h index de453808b7..78e41c8811 100644 --- a/libs/libvpx/vp9/common/vp9_reconintra.h +++ b/libs/libvpx/vp9/common/vp9_reconintra.h @@ -20,10 +20,9 @@ extern "C" { void vp9_init_intra_predictors(void); -void vp9_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, - TX_SIZE tx_size, PREDICTION_MODE mode, - const uint8_t *ref, int ref_stride, - uint8_t *dst, int dst_stride, +void vp9_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, TX_SIZE tx_size, + PREDICTION_MODE mode, const uint8_t *ref, + int ref_stride, uint8_t *dst, int dst_stride, int aoff, int loff, int plane); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/common/vp9_rtcd.c b/libs/libvpx/vp9/common/vp9_rtcd.c index 2dfa09f50e..d8c870aa3f 100644 --- a/libs/libvpx/vp9/common/vp9_rtcd.c +++ b/libs/libvpx/vp9/common/vp9_rtcd.c @@ -13,7 +13,7 @@ #include "vpx_ports/vpx_once.h" void vp9_rtcd() { - // TODO(JBB): Remove this once, by insuring that both the encoder and - // decoder setup functions are protected by once(); - once(setup_rtcd_internal); + // TODO(JBB): Remove this once, by insuring that both the encoder and + // decoder setup functions are protected by once(); + once(setup_rtcd_internal); } diff --git a/libs/libvpx/vp9/common/vp9_rtcd_defs.pl b/libs/libvpx/vp9/common/vp9_rtcd_defs.pl index d6a0ce96d4..37a867323d 100644 --- a/libs/libvpx/vp9/common/vp9_rtcd_defs.pl +++ b/libs/libvpx/vp9/common/vp9_rtcd_defs.pl @@ -21,29 +21,6 @@ EOF } forward_decls qw/vp9_common_forward_decls/; -# x86inc.asm had specific constraints. break it out so it's easy to disable. -# zero all the variables to avoid tricky else conditions. -$mmx_x86inc = $sse_x86inc = $sse2_x86inc = $ssse3_x86inc = $avx_x86inc = - $avx2_x86inc = ''; -$mmx_x86_64_x86inc = $sse_x86_64_x86inc = $sse2_x86_64_x86inc = - $ssse3_x86_64_x86inc = $avx_x86_64_x86inc = $avx2_x86_64_x86inc = ''; -if (vpx_config("CONFIG_USE_X86INC") eq "yes") { - $mmx_x86inc = 'mmx'; - $sse_x86inc = 'sse'; - $sse2_x86inc = 'sse2'; - $ssse3_x86inc = 'ssse3'; - $avx_x86inc = 'avx'; - $avx2_x86inc = 'avx2'; - if ($opts{arch} eq "x86_64") { - $mmx_x86_64_x86inc = 'mmx'; - $sse_x86_64_x86inc = 'sse'; - $sse2_x86_64_x86inc = 'sse2'; - $ssse3_x86_64_x86inc = 'ssse3'; - $avx_x86_64_x86inc = 'avx'; - $avx2_x86_64_x86inc = 'avx2'; - } -} - # functions that are 64 bit only. $mmx_x86_64 = $sse2_x86_64 = $ssse3_x86_64 = $avx_x86_64 = $avx2_x86_64 = ''; if ($opts{arch} eq "x86_64") { @@ -58,22 +35,6 @@ if ($opts{arch} eq "x86_64") { # post proc # if (vpx_config("CONFIG_VP9_POSTPROC") eq "yes") { -add_proto qw/void vp9_mbpost_proc_down/, "uint8_t *dst, int pitch, int rows, int cols, int flimit"; -specialize qw/vp9_mbpost_proc_down sse2/; -$vp9_mbpost_proc_down_sse2=vp9_mbpost_proc_down_xmm; - -add_proto qw/void vp9_mbpost_proc_across_ip/, "uint8_t *src, int pitch, int rows, int cols, int flimit"; -specialize qw/vp9_mbpost_proc_across_ip sse2/; -$vp9_mbpost_proc_across_ip_sse2=vp9_mbpost_proc_across_ip_xmm; - -add_proto qw/void vp9_post_proc_down_and_across/, "const uint8_t *src_ptr, uint8_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit"; -specialize qw/vp9_post_proc_down_and_across sse2/; -$vp9_post_proc_down_and_across_sse2=vp9_post_proc_down_and_across_xmm; - -add_proto qw/void vp9_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch"; -specialize qw/vp9_plane_add_noise sse2/; -$vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt; - add_proto qw/void vp9_filter_by_weight16x16/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int src_weight"; specialize qw/vp9_filter_by_weight16x16 sse2 msa/; @@ -130,33 +91,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # High bitdepth functions if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { - # - # Sub Pixel Filters - # - add_proto qw/void vp9_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve_copy/; - - add_proto qw/void vp9_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve_avg/; - - add_proto qw/void vp9_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8/, "$sse2_x86_64"; - - add_proto qw/void vp9_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8_horiz/, "$sse2_x86_64"; - - add_proto qw/void vp9_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8_vert/, "$sse2_x86_64"; - - add_proto qw/void vp9_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8_avg/, "$sse2_x86_64"; - - add_proto qw/void vp9_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8_avg_horiz/, "$sse2_x86_64"; - - add_proto qw/void vp9_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vp9_highbd_convolve8_avg_vert/, "$sse2_x86_64"; - # # post proc # @@ -169,9 +103,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vp9_highbd_post_proc_down_and_across/, "const uint16_t *src_ptr, uint16_t *dst_ptr, int src_pixels_per_line, int dst_pixels_per_line, int rows, int cols, int flimit"; specialize qw/vp9_highbd_post_proc_down_and_across/; - - add_proto qw/void vp9_highbd_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch"; - specialize qw/vp9_highbd_plane_add_noise/; } # @@ -209,10 +140,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vp9_block_error/; add_proto qw/int64_t vp9_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd"; - specialize qw/vp9_highbd_block_error/, "$sse2_x86inc"; + specialize qw/vp9_highbd_block_error sse2/; add_proto qw/int64_t vp9_highbd_block_error_8bit/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz"; - specialize qw/vp9_highbd_block_error_8bit/, "$sse2_x86inc", "$avx_x86inc"; + specialize qw/vp9_highbd_block_error_8bit sse2 avx/; add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; specialize qw/vp9_quantize_fp/; @@ -224,16 +155,16 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vp9_fdct8x8_quant/; } else { add_proto qw/int64_t vp9_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz"; - specialize qw/vp9_block_error avx2 msa/, "$sse2_x86inc"; + specialize qw/vp9_block_error avx2 msa sse2/; add_proto qw/int64_t vp9_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size"; - specialize qw/vp9_block_error_fp neon/, "$sse2_x86inc"; + specialize qw/vp9_block_error_fp neon sse2/; add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vp9_quantize_fp neon sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vp9_quantize_fp neon sse2/, "$ssse3_x86_64"; add_proto qw/void vp9_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vp9_quantize_fp_32x32/, "$ssse3_x86_64_x86inc"; + specialize qw/vp9_quantize_fp_32x32/, "$ssse3_x86_64"; add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; specialize qw/vp9_fdct8x8_quant sse2 ssse3 neon/; @@ -252,7 +183,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vp9_fht16x16 sse2/; add_proto qw/void vp9_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride"; - specialize qw/vp9_fwht4x4/, "$mmx_x86inc"; + specialize qw/vp9_fwht4x4 sse2/; } else { add_proto qw/void vp9_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type"; specialize qw/vp9_fht4x4 sse2 msa/; @@ -264,7 +195,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vp9_fht16x16 sse2 msa/; add_proto qw/void vp9_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride"; - specialize qw/vp9_fwht4x4 msa/, "$mmx_x86inc"; + specialize qw/vp9_fwht4x4 msa sse2/; } # diff --git a/libs/libvpx/vp9/common/vp9_scale.c b/libs/libvpx/vp9/common/vp9_scale.c index b763b925b3..8aedd66222 100644 --- a/libs/libvpx/vp9/common/vp9_scale.c +++ b/libs/libvpx/vp9/common/vp9_scale.c @@ -22,7 +22,7 @@ static INLINE int scaled_y(int val, const struct scale_factors *sf) { } static int unscaled_value(int val, const struct scale_factors *sf) { - (void) sf; + (void)sf; return val; } @@ -37,22 +37,18 @@ static int get_fixed_point_scale_factor(int other_size, int this_size) { MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK; const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK; - const MV32 res = { - scaled_y(mv->row, sf) + y_off_q4, - scaled_x(mv->col, sf) + x_off_q4 - }; + const MV32 res = { scaled_y(mv->row, sf) + y_off_q4, + scaled_x(mv->col, sf) + x_off_q4 }; return res; } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h, +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h, int use_highbd) { #else -void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h) { +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h) { #endif if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) { sf->x_scale_fp = REF_INVALID_SCALE; diff --git a/libs/libvpx/vp9/common/vp9_scale.h b/libs/libvpx/vp9/common/vp9_scale.h index 5e91041079..ada8dbaad5 100644 --- a/libs/libvpx/vp9/common/vp9_scale.h +++ b/libs/libvpx/vp9/common/vp9_scale.h @@ -23,8 +23,8 @@ extern "C" { #define REF_INVALID_SCALE -1 struct scale_factors { - int x_scale_fp; // horizontal fixed point scale factor - int y_scale_fp; // vertical fixed point scale factor + int x_scale_fp; // horizontal fixed point scale factor + int y_scale_fp; // vertical fixed point scale factor int x_step_q4; int y_step_q4; @@ -40,14 +40,12 @@ struct scale_factors { MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf); #if CONFIG_VP9_HIGHBITDEPTH -void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h, +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h, int use_high); #else -void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, - int other_w, int other_h, - int this_w, int this_h); +void vp9_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, + int other_h, int this_w, int this_h); #endif static INLINE int vp9_is_valid_scale(const struct scale_factors *sf) { @@ -61,11 +59,9 @@ static INLINE int vp9_is_scaled(const struct scale_factors *sf) { } static INLINE int valid_ref_frame_size(int ref_width, int ref_height, - int this_width, int this_height) { - return 2 * this_width >= ref_width && - 2 * this_height >= ref_height && - this_width <= 16 * ref_width && - this_height <= 16 * ref_height; + int this_width, int this_height) { + return 2 * this_width >= ref_width && 2 * this_height >= ref_height && + this_width <= 16 * ref_width && this_height <= 16 * ref_height; } #ifdef __cplusplus diff --git a/libs/libvpx/vp9/common/vp9_scan.c b/libs/libvpx/vp9/common/vp9_scan.c index d6fb8b2d7b..0fef263510 100644 --- a/libs/libvpx/vp9/common/vp9_scan.c +++ b/libs/libvpx/vp9/common/vp9_scan.c @@ -13,523 +13,502 @@ #include "vp9/common/vp9_scan.h" DECLARE_ALIGNED(16, static const int16_t, default_scan_4x4[16]) = { - 0, 4, 1, 5, - 8, 2, 12, 9, - 3, 6, 13, 10, - 7, 14, 11, 15, + 0, 4, 1, 5, 8, 2, 12, 9, 3, 6, 13, 10, 7, 14, 11, 15, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_4x4[16]) = { - 0, 4, 8, 1, - 12, 5, 9, 2, - 13, 6, 10, 3, - 7, 14, 11, 15, + 0, 4, 8, 1, 12, 5, 9, 2, 13, 6, 10, 3, 7, 14, 11, 15, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_4x4[16]) = { - 0, 1, 4, 2, - 5, 3, 6, 8, - 9, 7, 12, 10, - 13, 11, 14, 15, + 0, 1, 4, 2, 5, 3, 6, 8, 9, 7, 12, 10, 13, 11, 14, 15, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8[64]) = { - 0, 8, 1, 16, 9, 2, 17, 24, - 10, 3, 18, 25, 32, 11, 4, 26, - 33, 19, 40, 12, 34, 27, 5, 41, - 20, 48, 13, 35, 42, 28, 21, 6, - 49, 56, 36, 43, 29, 7, 14, 50, - 57, 44, 22, 37, 15, 51, 58, 30, - 45, 23, 52, 59, 38, 31, 60, 53, - 46, 39, 61, 54, 47, 62, 55, 63, + 0, 8, 1, 16, 9, 2, 17, 24, 10, 3, 18, 25, 32, 11, 4, 26, + 33, 19, 40, 12, 34, 27, 5, 41, 20, 48, 13, 35, 42, 28, 21, 6, + 49, 56, 36, 43, 29, 7, 14, 50, 57, 44, 22, 37, 15, 51, 58, 30, + 45, 23, 52, 59, 38, 31, 60, 53, 46, 39, 61, 54, 47, 62, 55, 63, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_8x8[64]) = { - 0, 8, 16, 1, 24, 9, 32, 17, - 2, 40, 25, 10, 33, 18, 48, 3, - 26, 41, 11, 56, 19, 34, 4, 49, - 27, 42, 12, 35, 20, 57, 50, 28, - 5, 43, 13, 36, 58, 51, 21, 44, - 6, 29, 59, 37, 14, 52, 22, 7, - 45, 60, 30, 15, 38, 53, 23, 46, - 31, 61, 39, 54, 47, 62, 55, 63, + 0, 8, 16, 1, 24, 9, 32, 17, 2, 40, 25, 10, 33, 18, 48, 3, + 26, 41, 11, 56, 19, 34, 4, 49, 27, 42, 12, 35, 20, 57, 50, 28, + 5, 43, 13, 36, 58, 51, 21, 44, 6, 29, 59, 37, 14, 52, 22, 7, + 45, 60, 30, 15, 38, 53, 23, 46, 31, 61, 39, 54, 47, 62, 55, 63, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_8x8[64]) = { - 0, 1, 2, 8, 9, 3, 16, 10, - 4, 17, 11, 24, 5, 18, 25, 12, - 19, 26, 32, 6, 13, 20, 33, 27, - 7, 34, 40, 21, 28, 41, 14, 35, - 48, 42, 29, 36, 49, 22, 43, 15, - 56, 37, 50, 44, 30, 57, 23, 51, - 58, 45, 38, 52, 31, 59, 53, 46, - 60, 39, 61, 47, 54, 55, 62, 63, + 0, 1, 2, 8, 9, 3, 16, 10, 4, 17, 11, 24, 5, 18, 25, 12, + 19, 26, 32, 6, 13, 20, 33, 27, 7, 34, 40, 21, 28, 41, 14, 35, + 48, 42, 29, 36, 49, 22, 43, 15, 56, 37, 50, 44, 30, 57, 23, 51, + 58, 45, 38, 52, 31, 59, 53, 46, 60, 39, 61, 47, 54, 55, 62, 63, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_16x16[256]) = { - 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, 80, - 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, 21, 52, - 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, 129, 38, 69, - 100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, 101, 131, 160, 146, - 55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, 176, 162, 87, 56, 25, - 133, 118, 177, 148, 72, 103, 41, 163, 10, 192, 178, 88, 57, 134, 149, 119, - 26, 164, 73, 104, 193, 42, 179, 208, 11, 135, 89, 165, 120, 150, 58, 194, - 180, 27, 74, 209, 105, 151, 136, 43, 90, 224, 166, 195, 181, 121, 210, 59, - 12, 152, 106, 167, 196, 75, 137, 225, 211, 240, 182, 122, 91, 28, 197, 13, - 226, 168, 183, 153, 44, 212, 138, 107, 241, 60, 29, 123, 198, 184, 227, 169, - 242, 76, 213, 154, 45, 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108, - 77, 155, 30, 15, 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140, - 230, 62, 216, 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141, - 63, 232, 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142, - 219, 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, - 251, + 0, 16, 1, 32, 17, 2, 48, 33, 18, 3, 64, 34, 49, 19, 65, + 80, 50, 4, 35, 66, 20, 81, 96, 51, 5, 36, 82, 97, 67, 112, + 21, 52, 98, 37, 83, 113, 6, 68, 128, 53, 22, 99, 114, 84, 7, + 129, 38, 69, 100, 115, 144, 130, 85, 54, 23, 8, 145, 39, 70, 116, + 101, 131, 160, 146, 55, 86, 24, 71, 132, 117, 161, 40, 9, 102, 147, + 176, 162, 87, 56, 25, 133, 118, 177, 148, 72, 103, 41, 163, 10, 192, + 178, 88, 57, 134, 149, 119, 26, 164, 73, 104, 193, 42, 179, 208, 11, + 135, 89, 165, 120, 150, 58, 194, 180, 27, 74, 209, 105, 151, 136, 43, + 90, 224, 166, 195, 181, 121, 210, 59, 12, 152, 106, 167, 196, 75, 137, + 225, 211, 240, 182, 122, 91, 28, 197, 13, 226, 168, 183, 153, 44, 212, + 138, 107, 241, 60, 29, 123, 198, 184, 227, 169, 242, 76, 213, 154, 45, + 92, 14, 199, 139, 61, 228, 214, 170, 185, 243, 108, 77, 155, 30, 15, + 200, 229, 124, 215, 244, 93, 46, 186, 171, 201, 109, 140, 230, 62, 216, + 245, 31, 125, 78, 156, 231, 47, 187, 202, 217, 94, 246, 141, 63, 232, + 172, 110, 247, 157, 79, 218, 203, 126, 233, 188, 248, 95, 173, 142, 219, + 111, 249, 234, 158, 127, 189, 204, 250, 235, 143, 174, 220, 205, 159, 251, 190, 221, 175, 236, 237, 191, 206, 252, 222, 253, 207, 238, 223, 254, 239, 255, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_16x16[256]) = { - 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, 81, - 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, 129, 4, - 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, 68, 115, 21, - 146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, 116, 193, 147, 85, - 22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, 7, 148, 194, 86, 179, - 225, 23, 133, 39, 164, 8, 102, 210, 241, 55, 195, 118, 149, 71, 180, 24, - 87, 226, 134, 165, 211, 40, 103, 56, 72, 150, 196, 242, 119, 9, 181, 227, - 88, 166, 25, 135, 41, 104, 212, 57, 151, 197, 120, 73, 243, 182, 136, 167, - 213, 89, 10, 228, 105, 152, 198, 26, 42, 121, 183, 244, 168, 58, 137, 229, - 74, 214, 90, 153, 199, 184, 11, 106, 245, 27, 122, 230, 169, 43, 215, 59, - 200, 138, 185, 246, 75, 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170, - 60, 247, 232, 76, 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202, - 233, 171, 61, 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125, - 62, 172, 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79, - 126, 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205, - 236, + 0, 16, 32, 48, 1, 64, 17, 80, 33, 96, 49, 2, 65, 112, 18, + 81, 34, 128, 50, 97, 3, 66, 144, 19, 113, 35, 82, 160, 98, 51, + 129, 4, 67, 176, 20, 114, 145, 83, 36, 99, 130, 52, 192, 5, 161, + 68, 115, 21, 146, 84, 208, 177, 37, 131, 100, 53, 162, 224, 69, 6, + 116, 193, 147, 85, 22, 240, 132, 38, 178, 101, 163, 54, 209, 117, 70, + 7, 148, 194, 86, 179, 225, 23, 133, 39, 164, 8, 102, 210, 241, 55, + 195, 118, 149, 71, 180, 24, 87, 226, 134, 165, 211, 40, 103, 56, 72, + 150, 196, 242, 119, 9, 181, 227, 88, 166, 25, 135, 41, 104, 212, 57, + 151, 197, 120, 73, 243, 182, 136, 167, 213, 89, 10, 228, 105, 152, 198, + 26, 42, 121, 183, 244, 168, 58, 137, 229, 74, 214, 90, 153, 199, 184, + 11, 106, 245, 27, 122, 230, 169, 43, 215, 59, 200, 138, 185, 246, 75, + 12, 91, 154, 216, 231, 107, 28, 44, 201, 123, 170, 60, 247, 232, 76, + 139, 13, 92, 217, 186, 248, 155, 108, 29, 124, 45, 202, 233, 171, 61, + 14, 77, 140, 15, 249, 93, 30, 187, 156, 218, 46, 109, 125, 62, 172, + 78, 203, 31, 141, 234, 94, 47, 188, 63, 157, 110, 250, 219, 79, 126, + 204, 173, 142, 95, 189, 111, 235, 158, 220, 251, 127, 174, 143, 205, 236, 159, 190, 221, 252, 175, 206, 237, 191, 253, 222, 238, 207, 254, 223, 239, 255, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_16x16[256]) = { - 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, 20, - 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, 66, 52, - 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, 83, 97, 69, - 25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, 41, 56, 114, 100, - 13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, 116, 14, 87, 130, 102, - 144, 73, 131, 117, 28, 58, 15, 88, 43, 145, 103, 132, 146, 118, 74, 160, - 89, 133, 104, 29, 59, 147, 119, 44, 161, 148, 90, 105, 134, 162, 120, 176, - 75, 135, 149, 30, 60, 163, 177, 45, 121, 91, 106, 164, 178, 150, 192, 136, - 165, 179, 31, 151, 193, 76, 122, 61, 137, 194, 107, 152, 180, 208, 46, 166, - 167, 195, 92, 181, 138, 209, 123, 153, 224, 196, 77, 168, 210, 182, 240, 108, - 197, 62, 154, 225, 183, 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170, - 124, 155, 199, 78, 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186, - 156, 229, 243, 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110, - 157, 245, 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111, - 158, - 188, 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, - 175, + 0, 1, 2, 16, 3, 17, 4, 18, 32, 5, 33, 19, 6, 34, 48, + 20, 49, 7, 35, 21, 50, 64, 8, 36, 65, 22, 51, 37, 80, 9, + 66, 52, 23, 38, 81, 67, 10, 53, 24, 82, 68, 96, 39, 11, 54, + 83, 97, 69, 25, 98, 84, 40, 112, 55, 12, 70, 99, 113, 85, 26, + 41, 56, 114, 100, 13, 71, 128, 86, 27, 115, 101, 129, 42, 57, 72, + 116, 14, 87, 130, 102, 144, 73, 131, 117, 28, 58, 15, 88, 43, 145, + 103, 132, 146, 118, 74, 160, 89, 133, 104, 29, 59, 147, 119, 44, 161, + 148, 90, 105, 134, 162, 120, 176, 75, 135, 149, 30, 60, 163, 177, 45, + 121, 91, 106, 164, 178, 150, 192, 136, 165, 179, 31, 151, 193, 76, 122, + 61, 137, 194, 107, 152, 180, 208, 46, 166, 167, 195, 92, 181, 138, 209, + 123, 153, 224, 196, 77, 168, 210, 182, 240, 108, 197, 62, 154, 225, 183, + 169, 211, 47, 139, 93, 184, 226, 212, 241, 198, 170, 124, 155, 199, 78, + 213, 185, 109, 227, 200, 63, 228, 242, 140, 214, 171, 186, 156, 229, 243, + 125, 94, 201, 244, 215, 216, 230, 141, 187, 202, 79, 172, 110, 157, 245, + 217, 231, 95, 246, 232, 126, 203, 247, 233, 173, 218, 142, 111, 158, 188, + 248, 127, 234, 219, 249, 189, 204, 143, 174, 159, 250, 235, 205, 220, 175, 190, 251, 221, 191, 206, 236, 207, 237, 252, 222, 253, 223, 238, 239, 254, 255, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32[1024]) = { - 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, 160, - 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, 162, 193, - 68, 131, 37, 100, - 225, 194, 256, 163, 69, 132, 6, 226, 257, 288, 195, 101, 164, 38, - 258, 7, 227, 289, 133, 320, 70, 196, 165, 290, 259, 228, 39, 321, - 102, 352, 8, 197, - 71, 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, - 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, 293, - 41, 417, 199, 136, - 262, 387, 448, 325, 356, 10, 73, 418, 231, 168, 449, 294, 388, 105, - 419, 263, 42, 200, 357, 450, 137, 480, 74, 326, 232, 11, 389, 169, - 295, 420, 106, 451, - 481, 358, 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, - 75, 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, - 453, 139, 44, 234, - 484, 297, 360, 171, 76, 515, 545, 266, 329, 454, 13, 423, 203, 108, - 546, 485, 576, 298, 235, 140, 361, 330, 172, 547, 45, 455, 267, 577, - 486, 77, 204, 362, - 608, 14, 299, 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, - 610, 363, 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, - 111, 238, 48, 143, - 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, 50, 82, 114, 51, - 83, 115, 640, 516, 392, 268, 144, 20, 672, 641, 548, 517, 424, - 393, 300, 269, 176, 145, - 52, 21, 704, 673, 642, 580, 549, 518, 456, 425, 394, 332, 301, - 270, 208, 177, 146, 84, 53, 22, 736, 705, 674, 643, 612, 581, - 550, 519, 488, 457, 426, 395, - 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, 23, 737, - 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, 241, - 210, 179, 117, 86, 55, 738, 707, - 614, 583, 490, 459, 366, 335, 242, 211, 118, 87, 739, 615, 491, - 367, 243, 119, 768, 644, 520, 396, 272, 148, 24, 800, 769, 676, - 645, 552, 521, 428, 397, 304, - 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, 553, - 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, 26, - 864, 833, 802, 771, 740, 709, - 678, 647, 616, 585, 554, 523, 492, 461, 430, 399, 368, 337, 306, - 275, 244, 213, 182, 151, 120, 89, 58, 27, 865, 834, 803, 741, - 710, 679, 617, 586, 555, 493, - 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, - 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, - 743, 619, 495, 371, 247, 123, - 896, 772, 648, 524, 400, 276, 152, 28, 928, 897, 804, 773, 680, - 649, 556, 525, 432, 401, 308, 277, 184, 153, 60, 29, 960, 929, - 898, 836, 805, 774, 712, 681, - 650, 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, - 92, 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, - 651, 620, 589, 558, 527, - 496, 465, 434, 403, 372, 341, 310, 279, 248, 217, 186, 155, 124, - 93, 62, 31, 993, 962, 931, 869, 838, 807, 745, 714, 683, 621, 590, - 559, 497, 466, 435, 373, - 342, 311, 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, - 622, 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, - 499, 375, 251, 127, - 900, 776, 652, 528, 404, 280, 156, 932, 901, 808, 777, 684, 653, 560, - 529, 436, 405, 312, 281, 188, 157, 964, 933, 902, 840, 809, 778, 716, - 685, 654, 592, 561, - 530, 468, 437, 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, - 872, 841, 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, - 438, 407, 376, 345, - 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, 842, 811, 749, 718, - 687, 625, 594, 563, 501, 470, 439, 377, 346, 315, 253, 222, 191, 998, - 967, 874, 843, 750, - 719, 626, 595, 502, 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, - 379, 255, 904, 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, - 564, 533, 440, 409, - 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, 596, 565, 534, - 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, 876, 845, 814, 783, - 752, 721, 690, 659, - 628, 597, 566, 535, 504, 473, 442, 411, 380, 349, 318, 287, 1001, 970, - 939, 877, 846, 815, 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, - 350, 319, 1002, 971, - 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, 755, 631, - 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, 692, 661, 568, - 537, 444, 413, 972, - 941, 910, 848, 817, 786, 724, 693, 662, 600, 569, 538, 476, 445, 414, - 1004, 973, 942, 911, 880, 849, 818, 787, 756, 725, 694, 663, 632, 601, - 570, 539, 508, 477, - 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, 571, - 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, 479, - 1007, 883, 759, 635, 511, - 912, 788, 664, 540, 944, 913, 820, 789, 696, 665, 572, 541, 976, 945, - 914, 852, 821, 790, 728, 697, 666, 604, 573, 542, 1008, 977, 946, 915, - 884, 853, 822, 791, - 760, 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, - 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, 607, - 1011, 887, 763, 639, - 916, 792, 668, 948, 917, 824, 793, 700, 669, 980, 949, 918, 856, 825, - 794, 732, 701, 670, 1012, 981, 950, 919, 888, 857, 826, 795, 764, 733, - 702, 671, 1013, 982, - 951, 889, 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, - 891, 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, - 1016, 985, 954, 923, - 892, 861, 830, 799, 1017, 986, 955, 893, 862, 831, 1018, 987, 894, 863, - 1019, 895, 924, 956, 925, 988, 957, 926, 1020, 989, 958, 927, 1021, - 990, 959, 1022, 991, 1023, + 0, 32, 1, 64, 33, 2, 96, 65, 34, 128, 3, 97, 66, + 160, 129, 35, 98, 4, 67, 130, 161, 192, 36, 99, 224, 5, + 162, 193, 68, 131, 37, 100, 225, 194, 256, 163, 69, 132, 6, + 226, 257, 288, 195, 101, 164, 38, 258, 7, 227, 289, 133, 320, + 70, 196, 165, 290, 259, 228, 39, 321, 102, 352, 8, 197, 71, + 134, 322, 291, 260, 353, 384, 229, 166, 103, 40, 354, 323, 292, + 135, 385, 198, 261, 72, 9, 416, 167, 386, 355, 230, 324, 104, + 293, 41, 417, 199, 136, 262, 387, 448, 325, 356, 10, 73, 418, + 231, 168, 449, 294, 388, 105, 419, 263, 42, 200, 357, 450, 137, + 480, 74, 326, 232, 11, 389, 169, 295, 420, 106, 451, 481, 358, + 264, 327, 201, 43, 138, 512, 482, 390, 296, 233, 170, 421, 75, + 452, 359, 12, 513, 265, 483, 328, 107, 202, 514, 544, 422, 391, + 453, 139, 44, 234, 484, 297, 360, 171, 76, 515, 545, 266, 329, + 454, 13, 423, 203, 108, 546, 485, 576, 298, 235, 140, 361, 330, + 172, 547, 45, 455, 267, 577, 486, 77, 204, 362, 608, 14, 299, + 578, 109, 236, 487, 609, 331, 141, 579, 46, 15, 173, 610, 363, + 78, 205, 16, 110, 237, 611, 142, 47, 174, 79, 206, 17, 111, + 238, 48, 143, 80, 175, 112, 207, 49, 18, 239, 81, 113, 19, + 50, 82, 114, 51, 83, 115, 640, 516, 392, 268, 144, 20, 672, + 641, 548, 517, 424, 393, 300, 269, 176, 145, 52, 21, 704, 673, + 642, 580, 549, 518, 456, 425, 394, 332, 301, 270, 208, 177, 146, + 84, 53, 22, 736, 705, 674, 643, 612, 581, 550, 519, 488, 457, + 426, 395, 364, 333, 302, 271, 240, 209, 178, 147, 116, 85, 54, + 23, 737, 706, 675, 613, 582, 551, 489, 458, 427, 365, 334, 303, + 241, 210, 179, 117, 86, 55, 738, 707, 614, 583, 490, 459, 366, + 335, 242, 211, 118, 87, 739, 615, 491, 367, 243, 119, 768, 644, + 520, 396, 272, 148, 24, 800, 769, 676, 645, 552, 521, 428, 397, + 304, 273, 180, 149, 56, 25, 832, 801, 770, 708, 677, 646, 584, + 553, 522, 460, 429, 398, 336, 305, 274, 212, 181, 150, 88, 57, + 26, 864, 833, 802, 771, 740, 709, 678, 647, 616, 585, 554, 523, + 492, 461, 430, 399, 368, 337, 306, 275, 244, 213, 182, 151, 120, + 89, 58, 27, 865, 834, 803, 741, 710, 679, 617, 586, 555, 493, + 462, 431, 369, 338, 307, 245, 214, 183, 121, 90, 59, 866, 835, + 742, 711, 618, 587, 494, 463, 370, 339, 246, 215, 122, 91, 867, + 743, 619, 495, 371, 247, 123, 896, 772, 648, 524, 400, 276, 152, + 28, 928, 897, 804, 773, 680, 649, 556, 525, 432, 401, 308, 277, + 184, 153, 60, 29, 960, 929, 898, 836, 805, 774, 712, 681, 650, + 588, 557, 526, 464, 433, 402, 340, 309, 278, 216, 185, 154, 92, + 61, 30, 992, 961, 930, 899, 868, 837, 806, 775, 744, 713, 682, + 651, 620, 589, 558, 527, 496, 465, 434, 403, 372, 341, 310, 279, + 248, 217, 186, 155, 124, 93, 62, 31, 993, 962, 931, 869, 838, + 807, 745, 714, 683, 621, 590, 559, 497, 466, 435, 373, 342, 311, + 249, 218, 187, 125, 94, 63, 994, 963, 870, 839, 746, 715, 622, + 591, 498, 467, 374, 343, 250, 219, 126, 95, 995, 871, 747, 623, + 499, 375, 251, 127, 900, 776, 652, 528, 404, 280, 156, 932, 901, + 808, 777, 684, 653, 560, 529, 436, 405, 312, 281, 188, 157, 964, + 933, 902, 840, 809, 778, 716, 685, 654, 592, 561, 530, 468, 437, + 406, 344, 313, 282, 220, 189, 158, 996, 965, 934, 903, 872, 841, + 810, 779, 748, 717, 686, 655, 624, 593, 562, 531, 500, 469, 438, + 407, 376, 345, 314, 283, 252, 221, 190, 159, 997, 966, 935, 873, + 842, 811, 749, 718, 687, 625, 594, 563, 501, 470, 439, 377, 346, + 315, 253, 222, 191, 998, 967, 874, 843, 750, 719, 626, 595, 502, + 471, 378, 347, 254, 223, 999, 875, 751, 627, 503, 379, 255, 904, + 780, 656, 532, 408, 284, 936, 905, 812, 781, 688, 657, 564, 533, + 440, 409, 316, 285, 968, 937, 906, 844, 813, 782, 720, 689, 658, + 596, 565, 534, 472, 441, 410, 348, 317, 286, 1000, 969, 938, 907, + 876, 845, 814, 783, 752, 721, 690, 659, 628, 597, 566, 535, 504, + 473, 442, 411, 380, 349, 318, 287, 1001, 970, 939, 877, 846, 815, + 753, 722, 691, 629, 598, 567, 505, 474, 443, 381, 350, 319, 1002, + 971, 878, 847, 754, 723, 630, 599, 506, 475, 382, 351, 1003, 879, + 755, 631, 507, 383, 908, 784, 660, 536, 412, 940, 909, 816, 785, + 692, 661, 568, 537, 444, 413, 972, 941, 910, 848, 817, 786, 724, + 693, 662, 600, 569, 538, 476, 445, 414, 1004, 973, 942, 911, 880, + 849, 818, 787, 756, 725, 694, 663, 632, 601, 570, 539, 508, 477, + 446, 415, 1005, 974, 943, 881, 850, 819, 757, 726, 695, 633, 602, + 571, 509, 478, 447, 1006, 975, 882, 851, 758, 727, 634, 603, 510, + 479, 1007, 883, 759, 635, 511, 912, 788, 664, 540, 944, 913, 820, + 789, 696, 665, 572, 541, 976, 945, 914, 852, 821, 790, 728, 697, + 666, 604, 573, 542, 1008, 977, 946, 915, 884, 853, 822, 791, 760, + 729, 698, 667, 636, 605, 574, 543, 1009, 978, 947, 885, 854, 823, + 761, 730, 699, 637, 606, 575, 1010, 979, 886, 855, 762, 731, 638, + 607, 1011, 887, 763, 639, 916, 792, 668, 948, 917, 824, 793, 700, + 669, 980, 949, 918, 856, 825, 794, 732, 701, 670, 1012, 981, 950, + 919, 888, 857, 826, 795, 764, 733, 702, 671, 1013, 982, 951, 889, + 858, 827, 765, 734, 703, 1014, 983, 890, 859, 766, 735, 1015, 891, + 767, 920, 796, 952, 921, 828, 797, 984, 953, 922, 860, 829, 798, + 1016, 985, 954, 923, 892, 861, 830, 799, 1017, 986, 955, 893, 862, + 831, 1018, 987, 894, 863, 1019, 895, 924, 956, 925, 988, 957, 926, + 1020, 989, 958, 927, 1021, 990, 959, 1022, 991, 1023, }; -// Neighborhood 5-tuples for various scans and blocksizes, -// in {top, left, topleft, topright, bottomleft} order -// for each position in raster scan order. -// -1 indicates the neighbor does not exist. +// Neighborhood 2-tuples for various scans and blocksizes, +// in {top, left} order for each position in corresponding scan order. DECLARE_ALIGNED(16, static const int16_t, default_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 1, 4, 4, 4, 1, 1, 8, 8, 5, 8, 2, 2, 2, 5, 9, 12, 6, 9, - 3, 6, 10, 13, 7, 10, 11, 14, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 4, 4, 4, 1, 1, 8, 8, 5, 8, 2, + 2, 2, 5, 9, 12, 6, 9, 3, 6, 10, 13, 7, 10, 11, 14, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 4, 4, 0, 0, 8, 8, 1, 1, 5, 5, 1, 1, 9, 9, 2, 2, 6, 6, 2, 2, 3, - 3, 10, 10, 7, 7, 11, 11, 0, 0, + 0, 0, 0, 0, 4, 4, 0, 0, 8, 8, 1, 1, 5, 5, 1, 1, 9, + 9, 2, 2, 6, 6, 2, 2, 3, 3, 10, 10, 7, 7, 11, 11, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_4x4_neighbors[17 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 1, 1, 4, 4, 2, 2, 5, 5, 4, 4, 8, 8, 6, 6, 8, 8, 9, 9, 12, - 12, 10, 10, 13, 13, 14, 14, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 4, 4, 2, 2, 5, 5, 4, 4, 8, + 8, 6, 6, 8, 8, 9, 9, 12, 12, 10, 10, 13, 13, 14, 14, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 8, 8, 0, 0, 16, 16, 1, 1, 24, 24, 9, 9, 1, 1, 32, 32, 17, 17, 2, - 2, 25, 25, 10, 10, 40, 40, 2, 2, 18, 18, 33, 33, 3, 3, 48, 48, 11, 11, 26, - 26, 3, 3, 41, 41, 19, 19, 34, 34, 4, 4, 27, 27, 12, 12, 49, 49, 42, 42, 20, - 20, 4, 4, 35, 35, 5, 5, 28, 28, 50, 50, 43, 43, 13, 13, 36, 36, 5, 5, 21, 21, - 51, 51, 29, 29, 6, 6, 44, 44, 14, 14, 6, 6, 37, 37, 52, 52, 22, 22, 7, 7, 30, - 30, 45, 45, 15, 15, 38, 38, 23, 23, 53, 53, 31, 31, 46, 46, 39, 39, 54, 54, - 47, 47, 55, 55, 0, 0, + 0, 0, 0, 0, 8, 8, 0, 0, 16, 16, 1, 1, 24, 24, 9, 9, 1, 1, 32, + 32, 17, 17, 2, 2, 25, 25, 10, 10, 40, 40, 2, 2, 18, 18, 33, 33, 3, 3, + 48, 48, 11, 11, 26, 26, 3, 3, 41, 41, 19, 19, 34, 34, 4, 4, 27, 27, 12, + 12, 49, 49, 42, 42, 20, 20, 4, 4, 35, 35, 5, 5, 28, 28, 50, 50, 43, 43, + 13, 13, 36, 36, 5, 5, 21, 21, 51, 51, 29, 29, 6, 6, 44, 44, 14, 14, 6, + 6, 37, 37, 52, 52, 22, 22, 7, 7, 30, 30, 45, 45, 15, 15, 38, 38, 23, 23, + 53, 53, 31, 31, 46, 46, 39, 39, 54, 54, 47, 47, 55, 55, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 1, 1, 0, 0, 8, 8, 2, 2, 8, 8, 9, 9, 3, 3, 16, 16, 10, 10, 16, 16, - 4, 4, 17, 17, 24, 24, 11, 11, 18, 18, 25, 25, 24, 24, 5, 5, 12, 12, 19, 19, - 32, 32, 26, 26, 6, 6, 33, 33, 32, 32, 20, 20, 27, 27, 40, 40, 13, 13, 34, 34, - 40, 40, 41, 41, 28, 28, 35, 35, 48, 48, 21, 21, 42, 42, 14, 14, 48, 48, 36, - 36, 49, 49, 43, 43, 29, 29, 56, 56, 22, 22, 50, 50, 57, 57, 44, 44, 37, 37, - 51, 51, 30, 30, 58, 58, 52, 52, 45, 45, 59, 59, 38, 38, 60, 60, 46, 46, 53, - 53, 54, 54, 61, 61, 62, 62, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 8, 8, 2, 2, 8, 8, 9, 9, 3, 3, 16, + 16, 10, 10, 16, 16, 4, 4, 17, 17, 24, 24, 11, 11, 18, 18, 25, 25, 24, 24, + 5, 5, 12, 12, 19, 19, 32, 32, 26, 26, 6, 6, 33, 33, 32, 32, 20, 20, 27, + 27, 40, 40, 13, 13, 34, 34, 40, 40, 41, 41, 28, 28, 35, 35, 48, 48, 21, 21, + 42, 42, 14, 14, 48, 48, 36, 36, 49, 49, 43, 43, 29, 29, 56, 56, 22, 22, 50, + 50, 57, 57, 44, 44, 37, 37, 51, 51, 30, 30, 58, 58, 52, 52, 45, 45, 59, 59, + 38, 38, 60, 60, 46, 46, 53, 53, 54, 54, 61, 61, 62, 62, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_8x8_neighbors[65 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 8, 8, 1, 8, 1, 1, 9, 16, 16, 16, 2, 9, 2, 2, 10, 17, 17, - 24, 24, 24, 3, 10, 3, 3, 18, 25, 25, 32, 11, 18, 32, 32, 4, 11, 26, 33, 19, - 26, 4, 4, 33, 40, 12, 19, 40, 40, 5, 12, 27, 34, 34, 41, 20, 27, 13, 20, 5, - 5, 41, 48, 48, 48, 28, 35, 35, 42, 21, 28, 6, 6, 6, 13, 42, 49, 49, 56, 36, - 43, 14, 21, 29, 36, 7, 14, 43, 50, 50, 57, 22, 29, 37, 44, 15, 22, 44, 51, - 51, 58, 30, 37, 23, 30, 52, 59, 45, 52, 38, 45, 31, 38, 53, 60, 46, 53, 39, - 46, 54, 61, 47, 54, 55, 62, 0, 0, + 0, 0, 0, 0, 0, 0, 8, 8, 1, 8, 1, 1, 9, 16, 16, 16, 2, 9, 2, + 2, 10, 17, 17, 24, 24, 24, 3, 10, 3, 3, 18, 25, 25, 32, 11, 18, 32, 32, + 4, 11, 26, 33, 19, 26, 4, 4, 33, 40, 12, 19, 40, 40, 5, 12, 27, 34, 34, + 41, 20, 27, 13, 20, 5, 5, 41, 48, 48, 48, 28, 35, 35, 42, 21, 28, 6, 6, + 6, 13, 42, 49, 49, 56, 36, 43, 14, 21, 29, 36, 7, 14, 43, 50, 50, 57, 22, + 29, 37, 44, 15, 22, 44, 51, 51, 58, 30, 37, 23, 30, 52, 59, 45, 52, 38, 45, + 31, 38, 53, 60, 46, 53, 39, 46, 54, 61, 47, 54, 55, 62, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, col_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 16, 16, 32, 32, 0, 0, 48, 48, 1, 1, 64, 64, - 17, 17, 80, 80, 33, 33, 1, 1, 49, 49, 96, 96, 2, 2, 65, 65, - 18, 18, 112, 112, 34, 34, 81, 81, 2, 2, 50, 50, 128, 128, 3, 3, - 97, 97, 19, 19, 66, 66, 144, 144, 82, 82, 35, 35, 113, 113, 3, 3, - 51, 51, 160, 160, 4, 4, 98, 98, 129, 129, 67, 67, 20, 20, 83, 83, - 114, 114, 36, 36, 176, 176, 4, 4, 145, 145, 52, 52, 99, 99, 5, 5, - 130, 130, 68, 68, 192, 192, 161, 161, 21, 21, 115, 115, 84, 84, 37, 37, - 146, 146, 208, 208, 53, 53, 5, 5, 100, 100, 177, 177, 131, 131, 69, 69, - 6, 6, 224, 224, 116, 116, 22, 22, 162, 162, 85, 85, 147, 147, 38, 38, - 193, 193, 101, 101, 54, 54, 6, 6, 132, 132, 178, 178, 70, 70, 163, 163, - 209, 209, 7, 7, 117, 117, 23, 23, 148, 148, 7, 7, 86, 86, 194, 194, - 225, 225, 39, 39, 179, 179, 102, 102, 133, 133, 55, 55, 164, 164, 8, 8, - 71, 71, 210, 210, 118, 118, 149, 149, 195, 195, 24, 24, 87, 87, 40, 40, - 56, 56, 134, 134, 180, 180, 226, 226, 103, 103, 8, 8, 165, 165, 211, 211, - 72, 72, 150, 150, 9, 9, 119, 119, 25, 25, 88, 88, 196, 196, 41, 41, - 135, 135, 181, 181, 104, 104, 57, 57, 227, 227, 166, 166, 120, 120, 151, 151, - 197, 197, 73, 73, 9, 9, 212, 212, 89, 89, 136, 136, 182, 182, 10, 10, - 26, 26, 105, 105, 167, 167, 228, 228, 152, 152, 42, 42, 121, 121, 213, 213, - 58, 58, 198, 198, 74, 74, 137, 137, 183, 183, 168, 168, 10, 10, 90, 90, - 229, 229, 11, 11, 106, 106, 214, 214, 153, 153, 27, 27, 199, 199, 43, 43, - 184, 184, 122, 122, 169, 169, 230, 230, 59, 59, 11, 11, 75, 75, 138, 138, - 200, 200, 215, 215, 91, 91, 12, 12, 28, 28, 185, 185, 107, 107, 154, 154, - 44, 44, 231, 231, 216, 216, 60, 60, 123, 123, 12, 12, 76, 76, 201, 201, - 170, 170, 232, 232, 139, 139, 92, 92, 13, 13, 108, 108, 29, 29, 186, 186, - 217, 217, 155, 155, 45, 45, 13, 13, 61, 61, 124, 124, 14, 14, 233, 233, - 77, 77, 14, 14, 171, 171, 140, 140, 202, 202, 30, 30, 93, 93, 109, 109, - 46, 46, 156, 156, 62, 62, 187, 187, 15, 15, 125, 125, 218, 218, 78, 78, - 31, 31, 172, 172, 47, 47, 141, 141, 94, 94, 234, 234, 203, 203, 63, 63, - 110, 110, 188, 188, 157, 157, 126, 126, 79, 79, 173, 173, 95, 95, 219, 219, - 142, 142, 204, 204, 235, 235, 111, 111, 158, 158, 127, 127, 189, 189, 220, - 220, 143, 143, 174, 174, 205, 205, 236, 236, 159, 159, 190, 190, 221, 221, - 175, 175, 237, 237, 206, 206, 222, 222, 191, 191, 238, 238, 207, 207, 223, - 223, 239, 239, 0, 0, + 0, 0, 0, 0, 16, 16, 32, 32, 0, 0, 48, 48, 1, 1, 64, + 64, 17, 17, 80, 80, 33, 33, 1, 1, 49, 49, 96, 96, 2, 2, + 65, 65, 18, 18, 112, 112, 34, 34, 81, 81, 2, 2, 50, 50, 128, + 128, 3, 3, 97, 97, 19, 19, 66, 66, 144, 144, 82, 82, 35, 35, + 113, 113, 3, 3, 51, 51, 160, 160, 4, 4, 98, 98, 129, 129, 67, + 67, 20, 20, 83, 83, 114, 114, 36, 36, 176, 176, 4, 4, 145, 145, + 52, 52, 99, 99, 5, 5, 130, 130, 68, 68, 192, 192, 161, 161, 21, + 21, 115, 115, 84, 84, 37, 37, 146, 146, 208, 208, 53, 53, 5, 5, + 100, 100, 177, 177, 131, 131, 69, 69, 6, 6, 224, 224, 116, 116, 22, + 22, 162, 162, 85, 85, 147, 147, 38, 38, 193, 193, 101, 101, 54, 54, + 6, 6, 132, 132, 178, 178, 70, 70, 163, 163, 209, 209, 7, 7, 117, + 117, 23, 23, 148, 148, 7, 7, 86, 86, 194, 194, 225, 225, 39, 39, + 179, 179, 102, 102, 133, 133, 55, 55, 164, 164, 8, 8, 71, 71, 210, + 210, 118, 118, 149, 149, 195, 195, 24, 24, 87, 87, 40, 40, 56, 56, + 134, 134, 180, 180, 226, 226, 103, 103, 8, 8, 165, 165, 211, 211, 72, + 72, 150, 150, 9, 9, 119, 119, 25, 25, 88, 88, 196, 196, 41, 41, + 135, 135, 181, 181, 104, 104, 57, 57, 227, 227, 166, 166, 120, 120, 151, + 151, 197, 197, 73, 73, 9, 9, 212, 212, 89, 89, 136, 136, 182, 182, + 10, 10, 26, 26, 105, 105, 167, 167, 228, 228, 152, 152, 42, 42, 121, + 121, 213, 213, 58, 58, 198, 198, 74, 74, 137, 137, 183, 183, 168, 168, + 10, 10, 90, 90, 229, 229, 11, 11, 106, 106, 214, 214, 153, 153, 27, + 27, 199, 199, 43, 43, 184, 184, 122, 122, 169, 169, 230, 230, 59, 59, + 11, 11, 75, 75, 138, 138, 200, 200, 215, 215, 91, 91, 12, 12, 28, + 28, 185, 185, 107, 107, 154, 154, 44, 44, 231, 231, 216, 216, 60, 60, + 123, 123, 12, 12, 76, 76, 201, 201, 170, 170, 232, 232, 139, 139, 92, + 92, 13, 13, 108, 108, 29, 29, 186, 186, 217, 217, 155, 155, 45, 45, + 13, 13, 61, 61, 124, 124, 14, 14, 233, 233, 77, 77, 14, 14, 171, + 171, 140, 140, 202, 202, 30, 30, 93, 93, 109, 109, 46, 46, 156, 156, + 62, 62, 187, 187, 15, 15, 125, 125, 218, 218, 78, 78, 31, 31, 172, + 172, 47, 47, 141, 141, 94, 94, 234, 234, 203, 203, 63, 63, 110, 110, + 188, 188, 157, 157, 126, 126, 79, 79, 173, 173, 95, 95, 219, 219, 142, + 142, 204, 204, 235, 235, 111, 111, 158, 158, 127, 127, 189, 189, 220, 220, + 143, 143, 174, 174, 205, 205, 236, 236, 159, 159, 190, 190, 221, 221, 175, + 175, 237, 237, 206, 206, 222, 222, 191, 191, 238, 238, 207, 207, 223, 223, + 239, 239, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, row_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 16, 16, 3, 3, 17, 17, - 16, 16, 4, 4, 32, 32, 18, 18, 5, 5, 33, 33, 32, 32, 19, 19, - 48, 48, 6, 6, 34, 34, 20, 20, 49, 49, 48, 48, 7, 7, 35, 35, - 64, 64, 21, 21, 50, 50, 36, 36, 64, 64, 8, 8, 65, 65, 51, 51, - 22, 22, 37, 37, 80, 80, 66, 66, 9, 9, 52, 52, 23, 23, 81, 81, - 67, 67, 80, 80, 38, 38, 10, 10, 53, 53, 82, 82, 96, 96, 68, 68, - 24, 24, 97, 97, 83, 83, 39, 39, 96, 96, 54, 54, 11, 11, 69, 69, - 98, 98, 112, 112, 84, 84, 25, 25, 40, 40, 55, 55, 113, 113, 99, 99, - 12, 12, 70, 70, 112, 112, 85, 85, 26, 26, 114, 114, 100, 100, 128, 128, - 41, 41, 56, 56, 71, 71, 115, 115, 13, 13, 86, 86, 129, 129, 101, 101, - 128, 128, 72, 72, 130, 130, 116, 116, 27, 27, 57, 57, 14, 14, 87, 87, - 42, 42, 144, 144, 102, 102, 131, 131, 145, 145, 117, 117, 73, 73, 144, 144, - 88, 88, 132, 132, 103, 103, 28, 28, 58, 58, 146, 146, 118, 118, 43, 43, - 160, 160, 147, 147, 89, 89, 104, 104, 133, 133, 161, 161, 119, 119, 160, 160, - 74, 74, 134, 134, 148, 148, 29, 29, 59, 59, 162, 162, 176, 176, 44, 44, - 120, 120, 90, 90, 105, 105, 163, 163, 177, 177, 149, 149, 176, 176, 135, 135, - 164, 164, 178, 178, 30, 30, 150, 150, 192, 192, 75, 75, 121, 121, 60, 60, - 136, 136, 193, 193, 106, 106, 151, 151, 179, 179, 192, 192, 45, 45, 165, 165, - 166, 166, 194, 194, 91, 91, 180, 180, 137, 137, 208, 208, 122, 122, 152, 152, - 208, 208, 195, 195, 76, 76, 167, 167, 209, 209, 181, 181, 224, 224, 107, 107, - 196, 196, 61, 61, 153, 153, 224, 224, 182, 182, 168, 168, 210, 210, 46, 46, - 138, 138, 92, 92, 183, 183, 225, 225, 211, 211, 240, 240, 197, 197, 169, 169, - 123, 123, 154, 154, 198, 198, 77, 77, 212, 212, 184, 184, 108, 108, 226, 226, - 199, 199, 62, 62, 227, 227, 241, 241, 139, 139, 213, 213, 170, 170, 185, 185, - 155, 155, 228, 228, 242, 242, 124, 124, 93, 93, 200, 200, 243, 243, 214, 214, - 215, 215, 229, 229, 140, 140, 186, 186, 201, 201, 78, 78, 171, 171, 109, 109, - 156, 156, 244, 244, 216, 216, 230, 230, 94, 94, 245, 245, 231, 231, 125, 125, - 202, 202, 246, 246, 232, 232, 172, 172, 217, 217, 141, 141, 110, 110, 157, - 157, 187, 187, 247, 247, 126, 126, 233, 233, 218, 218, 248, 248, 188, 188, - 203, 203, 142, 142, 173, 173, 158, 158, 249, 249, 234, 234, 204, 204, 219, - 219, 174, 174, 189, 189, 250, 250, 220, 220, 190, 190, 205, 205, 235, 235, - 206, 206, 236, 236, 251, 251, 221, 221, 252, 252, 222, 222, 237, 237, 238, - 238, 253, 253, 254, 254, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 16, 16, 3, 3, 17, + 17, 16, 16, 4, 4, 32, 32, 18, 18, 5, 5, 33, 33, 32, 32, + 19, 19, 48, 48, 6, 6, 34, 34, 20, 20, 49, 49, 48, 48, 7, + 7, 35, 35, 64, 64, 21, 21, 50, 50, 36, 36, 64, 64, 8, 8, + 65, 65, 51, 51, 22, 22, 37, 37, 80, 80, 66, 66, 9, 9, 52, + 52, 23, 23, 81, 81, 67, 67, 80, 80, 38, 38, 10, 10, 53, 53, + 82, 82, 96, 96, 68, 68, 24, 24, 97, 97, 83, 83, 39, 39, 96, + 96, 54, 54, 11, 11, 69, 69, 98, 98, 112, 112, 84, 84, 25, 25, + 40, 40, 55, 55, 113, 113, 99, 99, 12, 12, 70, 70, 112, 112, 85, + 85, 26, 26, 114, 114, 100, 100, 128, 128, 41, 41, 56, 56, 71, 71, + 115, 115, 13, 13, 86, 86, 129, 129, 101, 101, 128, 128, 72, 72, 130, + 130, 116, 116, 27, 27, 57, 57, 14, 14, 87, 87, 42, 42, 144, 144, + 102, 102, 131, 131, 145, 145, 117, 117, 73, 73, 144, 144, 88, 88, 132, + 132, 103, 103, 28, 28, 58, 58, 146, 146, 118, 118, 43, 43, 160, 160, + 147, 147, 89, 89, 104, 104, 133, 133, 161, 161, 119, 119, 160, 160, 74, + 74, 134, 134, 148, 148, 29, 29, 59, 59, 162, 162, 176, 176, 44, 44, + 120, 120, 90, 90, 105, 105, 163, 163, 177, 177, 149, 149, 176, 176, 135, + 135, 164, 164, 178, 178, 30, 30, 150, 150, 192, 192, 75, 75, 121, 121, + 60, 60, 136, 136, 193, 193, 106, 106, 151, 151, 179, 179, 192, 192, 45, + 45, 165, 165, 166, 166, 194, 194, 91, 91, 180, 180, 137, 137, 208, 208, + 122, 122, 152, 152, 208, 208, 195, 195, 76, 76, 167, 167, 209, 209, 181, + 181, 224, 224, 107, 107, 196, 196, 61, 61, 153, 153, 224, 224, 182, 182, + 168, 168, 210, 210, 46, 46, 138, 138, 92, 92, 183, 183, 225, 225, 211, + 211, 240, 240, 197, 197, 169, 169, 123, 123, 154, 154, 198, 198, 77, 77, + 212, 212, 184, 184, 108, 108, 226, 226, 199, 199, 62, 62, 227, 227, 241, + 241, 139, 139, 213, 213, 170, 170, 185, 185, 155, 155, 228, 228, 242, 242, + 124, 124, 93, 93, 200, 200, 243, 243, 214, 214, 215, 215, 229, 229, 140, + 140, 186, 186, 201, 201, 78, 78, 171, 171, 109, 109, 156, 156, 244, 244, + 216, 216, 230, 230, 94, 94, 245, 245, 231, 231, 125, 125, 202, 202, 246, + 246, 232, 232, 172, 172, 217, 217, 141, 141, 110, 110, 157, 157, 187, 187, + 247, 247, 126, 126, 233, 233, 218, 218, 248, 248, 188, 188, 203, 203, 142, + 142, 173, 173, 158, 158, 249, 249, 234, 234, 204, 204, 219, 219, 174, 174, + 189, 189, 250, 250, 220, 220, 190, 190, 205, 205, 235, 235, 206, 206, 236, + 236, 251, 251, 221, 221, 252, 252, 222, 222, 237, 237, 238, 238, 253, 253, + 254, 254, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_16x16_neighbors[257 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 16, 16, 1, 16, 1, 1, 32, 32, 17, 32, - 2, 17, 2, 2, 48, 48, 18, 33, 33, 48, 3, 18, 49, 64, 64, 64, - 34, 49, 3, 3, 19, 34, 50, 65, 4, 19, 65, 80, 80, 80, 35, 50, - 4, 4, 20, 35, 66, 81, 81, 96, 51, 66, 96, 96, 5, 20, 36, 51, - 82, 97, 21, 36, 67, 82, 97, 112, 5, 5, 52, 67, 112, 112, 37, 52, - 6, 21, 83, 98, 98, 113, 68, 83, 6, 6, 113, 128, 22, 37, 53, 68, - 84, 99, 99, 114, 128, 128, 114, 129, 69, 84, 38, 53, 7, 22, 7, 7, - 129, 144, 23, 38, 54, 69, 100, 115, 85, 100, 115, 130, 144, 144, 130, 145, - 39, 54, 70, 85, 8, 23, 55, 70, 116, 131, 101, 116, 145, 160, 24, 39, - 8, 8, 86, 101, 131, 146, 160, 160, 146, 161, 71, 86, 40, 55, 9, 24, - 117, 132, 102, 117, 161, 176, 132, 147, 56, 71, 87, 102, 25, 40, 147, 162, - 9, 9, 176, 176, 162, 177, 72, 87, 41, 56, 118, 133, 133, 148, 103, 118, - 10, 25, 148, 163, 57, 72, 88, 103, 177, 192, 26, 41, 163, 178, 192, 192, - 10, 10, 119, 134, 73, 88, 149, 164, 104, 119, 134, 149, 42, 57, 178, 193, - 164, 179, 11, 26, 58, 73, 193, 208, 89, 104, 135, 150, 120, 135, 27, 42, - 74, 89, 208, 208, 150, 165, 179, 194, 165, 180, 105, 120, 194, 209, 43, 58, - 11, 11, 136, 151, 90, 105, 151, 166, 180, 195, 59, 74, 121, 136, 209, 224, - 195, 210, 224, 224, 166, 181, 106, 121, 75, 90, 12, 27, 181, 196, 12, 12, - 210, 225, 152, 167, 167, 182, 137, 152, 28, 43, 196, 211, 122, 137, 91, 106, - 225, 240, 44, 59, 13, 28, 107, 122, 182, 197, 168, 183, 211, 226, 153, 168, - 226, 241, 60, 75, 197, 212, 138, 153, 29, 44, 76, 91, 13, 13, 183, 198, - 123, 138, 45, 60, 212, 227, 198, 213, 154, 169, 169, 184, 227, 242, 92, 107, - 61, 76, 139, 154, 14, 29, 14, 14, 184, 199, 213, 228, 108, 123, 199, 214, - 228, 243, 77, 92, 30, 45, 170, 185, 155, 170, 185, 200, 93, 108, 124, 139, - 214, 229, 46, 61, 200, 215, 229, 244, 15, 30, 109, 124, 62, 77, 140, 155, - 215, 230, 31, 46, 171, 186, 186, 201, 201, 216, 78, 93, 230, 245, 125, 140, - 47, 62, 216, 231, 156, 171, 94, 109, 231, 246, 141, 156, 63, 78, 202, 217, - 187, 202, 110, 125, 217, 232, 172, 187, 232, 247, 79, 94, 157, 172, 126, 141, - 203, 218, 95, 110, 233, 248, 218, 233, 142, 157, 111, 126, 173, 188, 188, 203, - 234, 249, 219, 234, 127, 142, 158, 173, 204, 219, 189, 204, 143, 158, 235, - 250, 174, 189, 205, 220, 159, 174, 220, 235, 221, 236, 175, 190, 190, 205, - 236, 251, 206, 221, 237, 252, 191, 206, 222, 237, 207, 222, 238, 253, 223, - 238, 239, 254, 0, 0, + 0, 0, 0, 0, 0, 0, 16, 16, 1, 16, 1, 1, 32, 32, 17, + 32, 2, 17, 2, 2, 48, 48, 18, 33, 33, 48, 3, 18, 49, 64, + 64, 64, 34, 49, 3, 3, 19, 34, 50, 65, 4, 19, 65, 80, 80, + 80, 35, 50, 4, 4, 20, 35, 66, 81, 81, 96, 51, 66, 96, 96, + 5, 20, 36, 51, 82, 97, 21, 36, 67, 82, 97, 112, 5, 5, 52, + 67, 112, 112, 37, 52, 6, 21, 83, 98, 98, 113, 68, 83, 6, 6, + 113, 128, 22, 37, 53, 68, 84, 99, 99, 114, 128, 128, 114, 129, 69, + 84, 38, 53, 7, 22, 7, 7, 129, 144, 23, 38, 54, 69, 100, 115, + 85, 100, 115, 130, 144, 144, 130, 145, 39, 54, 70, 85, 8, 23, 55, + 70, 116, 131, 101, 116, 145, 160, 24, 39, 8, 8, 86, 101, 131, 146, + 160, 160, 146, 161, 71, 86, 40, 55, 9, 24, 117, 132, 102, 117, 161, + 176, 132, 147, 56, 71, 87, 102, 25, 40, 147, 162, 9, 9, 176, 176, + 162, 177, 72, 87, 41, 56, 118, 133, 133, 148, 103, 118, 10, 25, 148, + 163, 57, 72, 88, 103, 177, 192, 26, 41, 163, 178, 192, 192, 10, 10, + 119, 134, 73, 88, 149, 164, 104, 119, 134, 149, 42, 57, 178, 193, 164, + 179, 11, 26, 58, 73, 193, 208, 89, 104, 135, 150, 120, 135, 27, 42, + 74, 89, 208, 208, 150, 165, 179, 194, 165, 180, 105, 120, 194, 209, 43, + 58, 11, 11, 136, 151, 90, 105, 151, 166, 180, 195, 59, 74, 121, 136, + 209, 224, 195, 210, 224, 224, 166, 181, 106, 121, 75, 90, 12, 27, 181, + 196, 12, 12, 210, 225, 152, 167, 167, 182, 137, 152, 28, 43, 196, 211, + 122, 137, 91, 106, 225, 240, 44, 59, 13, 28, 107, 122, 182, 197, 168, + 183, 211, 226, 153, 168, 226, 241, 60, 75, 197, 212, 138, 153, 29, 44, + 76, 91, 13, 13, 183, 198, 123, 138, 45, 60, 212, 227, 198, 213, 154, + 169, 169, 184, 227, 242, 92, 107, 61, 76, 139, 154, 14, 29, 14, 14, + 184, 199, 213, 228, 108, 123, 199, 214, 228, 243, 77, 92, 30, 45, 170, + 185, 155, 170, 185, 200, 93, 108, 124, 139, 214, 229, 46, 61, 200, 215, + 229, 244, 15, 30, 109, 124, 62, 77, 140, 155, 215, 230, 31, 46, 171, + 186, 186, 201, 201, 216, 78, 93, 230, 245, 125, 140, 47, 62, 216, 231, + 156, 171, 94, 109, 231, 246, 141, 156, 63, 78, 202, 217, 187, 202, 110, + 125, 217, 232, 172, 187, 232, 247, 79, 94, 157, 172, 126, 141, 203, 218, + 95, 110, 233, 248, 218, 233, 142, 157, 111, 126, 173, 188, 188, 203, 234, + 249, 219, 234, 127, 142, 158, 173, 204, 219, 189, 204, 143, 158, 235, 250, + 174, 189, 205, 220, 159, 174, 220, 235, 221, 236, 175, 190, 190, 205, 236, + 251, 206, 221, 237, 252, 191, 206, 222, 237, 207, 222, 238, 253, 223, 238, + 239, 254, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, default_scan_32x32_neighbors[1025 * MAX_NEIGHBORS]) = { - 0, 0, 0, 0, 0, 0, 32, 32, 1, 32, 1, 1, 64, 64, 33, 64, - 2, 33, 96, 96, 2, 2, 65, 96, 34, 65, 128, 128, 97, 128, 3, 34, - 66, 97, 3, 3, 35, 66, 98, 129, 129, 160, 160, 160, 4, 35, 67, 98, - 192, 192, 4, 4, 130, 161, 161, 192, 36, 67, 99, 130, 5, 36, 68, 99, - 193, 224, 162, 193, 224, 224, 131, 162, 37, 68, 100, 131, 5, 5, 194, 225, - 225, 256, 256, 256, 163, 194, 69, 100, 132, 163, 6, 37, 226, 257, 6, 6, - 195, 226, 257, 288, 101, 132, 288, 288, 38, 69, 164, 195, 133, 164, 258, 289, - 227, 258, 196, 227, 7, 38, 289, 320, 70, 101, 320, 320, 7, 7, 165, 196, - 39, 70, 102, 133, 290, 321, 259, 290, 228, 259, 321, 352, 352, 352, 197, 228, - 134, 165, 71, 102, 8, 39, 322, 353, 291, 322, 260, 291, 103, 134, 353, 384, - 166, 197, 229, 260, 40, 71, 8, 8, 384, 384, 135, 166, 354, 385, 323, 354, - 198, 229, 292, 323, 72, 103, 261, 292, 9, 40, 385, 416, 167, 198, 104, 135, - 230, 261, 355, 386, 416, 416, 293, 324, 324, 355, 9, 9, 41, 72, 386, 417, - 199, 230, 136, 167, 417, 448, 262, 293, 356, 387, 73, 104, 387, 418, 231, 262, - 10, 41, 168, 199, 325, 356, 418, 449, 105, 136, 448, 448, 42, 73, 294, 325, - 200, 231, 10, 10, 357, 388, 137, 168, 263, 294, 388, 419, 74, 105, 419, 450, - 449, 480, 326, 357, 232, 263, 295, 326, 169, 200, 11, 42, 106, 137, 480, 480, - 450, 481, 358, 389, 264, 295, 201, 232, 138, 169, 389, 420, 43, 74, 420, 451, - 327, 358, 11, 11, 481, 512, 233, 264, 451, 482, 296, 327, 75, 106, 170, 201, - 482, 513, 512, 512, 390, 421, 359, 390, 421, 452, 107, 138, 12, 43, 202, 233, - 452, 483, 265, 296, 328, 359, 139, 170, 44, 75, 483, 514, 513, 544, 234, 265, - 297, 328, 422, 453, 12, 12, 391, 422, 171, 202, 76, 107, 514, 545, 453, 484, - 544, 544, 266, 297, 203, 234, 108, 139, 329, 360, 298, 329, 140, 171, 515, - 546, 13, 44, 423, 454, 235, 266, 545, 576, 454, 485, 45, 76, 172, 203, 330, - 361, 576, 576, 13, 13, 267, 298, 546, 577, 77, 108, 204, 235, 455, 486, 577, - 608, 299, 330, 109, 140, 547, 578, 14, 45, 14, 14, 141, 172, 578, 609, 331, - 362, 46, 77, 173, 204, 15, 15, 78, 109, 205, 236, 579, 610, 110, 141, 15, 46, - 142, 173, 47, 78, 174, 205, 16, 16, 79, 110, 206, 237, 16, 47, 111, 142, - 48, 79, 143, 174, 80, 111, 175, 206, 17, 48, 17, 17, 207, 238, 49, 80, - 81, 112, 18, 18, 18, 49, 50, 81, 82, 113, 19, 50, 51, 82, 83, 114, 608, 608, - 484, 515, 360, 391, 236, 267, 112, 143, 19, 19, 640, 640, 609, 640, 516, 547, - 485, 516, 392, 423, 361, 392, 268, 299, 237, 268, 144, 175, 113, 144, 20, 51, - 20, 20, 672, 672, 641, 672, 610, 641, 548, 579, 517, 548, 486, 517, 424, 455, - 393, 424, 362, 393, 300, 331, 269, 300, 238, 269, 176, 207, 145, 176, 114, - 145, 52, 83, 21, 52, 21, 21, 704, 704, 673, 704, 642, 673, 611, 642, 580, - 611, 549, 580, 518, 549, 487, 518, 456, 487, 425, 456, 394, 425, 363, 394, - 332, 363, 301, 332, 270, 301, 239, 270, 208, 239, 177, 208, 146, 177, 115, - 146, 84, 115, 53, 84, 22, 53, 22, 22, 705, 736, 674, 705, 643, 674, 581, 612, - 550, 581, 519, 550, 457, 488, 426, 457, 395, 426, 333, 364, 302, 333, 271, - 302, 209, 240, 178, 209, 147, 178, 85, 116, 54, 85, 23, 54, 706, 737, 675, - 706, 582, 613, 551, 582, 458, 489, 427, 458, 334, 365, 303, 334, 210, 241, - 179, 210, 86, 117, 55, 86, 707, 738, 583, 614, 459, 490, 335, 366, 211, 242, - 87, 118, 736, 736, 612, 643, 488, 519, 364, 395, 240, 271, 116, 147, 23, 23, - 768, 768, 737, 768, 644, 675, 613, 644, 520, 551, 489, 520, 396, 427, 365, - 396, 272, 303, 241, 272, 148, 179, 117, 148, 24, 55, 24, 24, 800, 800, 769, - 800, 738, 769, 676, 707, 645, 676, 614, 645, 552, 583, 521, 552, 490, 521, - 428, 459, 397, 428, 366, 397, 304, 335, 273, 304, 242, 273, 180, 211, 149, - 180, 118, 149, 56, 87, 25, 56, 25, 25, 832, 832, 801, 832, 770, 801, 739, - 770, 708, 739, 677, 708, 646, 677, 615, 646, 584, 615, 553, 584, 522, 553, - 491, 522, 460, 491, 429, 460, 398, 429, 367, 398, 336, 367, 305, 336, 274, - 305, 243, 274, 212, 243, 181, 212, 150, 181, 119, 150, 88, 119, 57, 88, 26, - 57, 26, 26, 833, 864, 802, 833, 771, 802, 709, 740, 678, 709, 647, 678, 585, - 616, 554, 585, 523, 554, 461, 492, 430, 461, 399, 430, 337, 368, 306, 337, - 275, 306, 213, 244, 182, 213, 151, 182, 89, 120, 58, 89, 27, 58, 834, 865, - 803, 834, 710, 741, 679, 710, 586, 617, 555, 586, 462, 493, 431, 462, 338, - 369, 307, 338, 214, 245, 183, 214, 90, 121, 59, 90, 835, 866, 711, 742, 587, - 618, 463, 494, 339, 370, 215, 246, 91, 122, 864, 864, 740, 771, 616, 647, - 492, 523, 368, 399, 244, 275, 120, 151, 27, 27, 896, 896, 865, 896, 772, 803, - 741, 772, 648, 679, 617, 648, 524, 555, 493, 524, 400, 431, 369, 400, 276, - 307, 245, 276, 152, 183, 121, 152, 28, 59, 28, 28, 928, 928, 897, 928, 866, - 897, 804, 835, 773, 804, 742, 773, 680, 711, 649, 680, 618, 649, 556, 587, - 525, 556, 494, 525, 432, 463, 401, 432, 370, 401, 308, 339, 277, 308, 246, - 277, 184, 215, 153, 184, 122, 153, 60, 91, 29, 60, 29, 29, 960, 960, 929, - 960, 898, 929, 867, 898, 836, 867, 805, 836, 774, 805, 743, 774, 712, 743, - 681, 712, 650, 681, 619, 650, 588, 619, 557, 588, 526, 557, 495, 526, 464, - 495, 433, 464, 402, 433, 371, 402, 340, 371, 309, 340, 278, 309, 247, 278, - 216, 247, 185, 216, 154, 185, 123, 154, 92, 123, 61, 92, 30, 61, 30, 30, - 961, 992, 930, 961, 899, 930, 837, 868, 806, 837, 775, 806, 713, 744, 682, - 713, 651, 682, 589, 620, 558, 589, 527, 558, 465, 496, 434, 465, 403, 434, - 341, 372, 310, 341, 279, 310, 217, 248, 186, 217, 155, 186, 93, 124, 62, 93, - 31, 62, 962, 993, 931, 962, 838, 869, 807, 838, 714, 745, 683, 714, 590, 621, - 559, 590, 466, 497, 435, 466, 342, 373, 311, 342, 218, 249, 187, 218, 94, - 125, 63, 94, 963, 994, 839, 870, 715, 746, 591, 622, 467, 498, 343, 374, 219, - 250, 95, 126, 868, 899, 744, 775, 620, 651, 496, 527, 372, 403, 248, 279, - 124, 155, 900, 931, 869, 900, 776, 807, 745, 776, 652, 683, 621, 652, 528, - 559, 497, 528, 404, 435, 373, 404, 280, 311, 249, 280, 156, 187, 125, 156, - 932, 963, 901, 932, 870, 901, 808, 839, 777, 808, 746, 777, 684, 715, 653, - 684, 622, 653, 560, 591, 529, 560, 498, 529, 436, 467, 405, 436, 374, 405, - 312, 343, 281, 312, 250, 281, 188, 219, 157, 188, 126, 157, 964, 995, 933, - 964, 902, 933, 871, 902, 840, 871, 809, 840, 778, 809, 747, 778, 716, 747, - 685, 716, 654, 685, 623, 654, 592, 623, 561, 592, 530, 561, 499, 530, 468, - 499, 437, 468, 406, 437, 375, 406, 344, 375, 313, 344, 282, 313, 251, 282, - 220, 251, 189, 220, 158, 189, 127, 158, 965, 996, 934, 965, 903, 934, 841, - 872, 810, 841, 779, 810, 717, 748, 686, 717, 655, 686, 593, 624, 562, 593, - 531, 562, 469, 500, 438, 469, 407, 438, 345, 376, 314, 345, 283, 314, 221, - 252, 190, 221, 159, 190, 966, 997, 935, 966, 842, 873, 811, 842, 718, 749, - 687, 718, 594, 625, 563, 594, 470, 501, 439, 470, 346, 377, 315, 346, 222, - 253, 191, 222, 967, 998, 843, 874, 719, 750, 595, 626, 471, 502, 347, 378, - 223, 254, 872, 903, 748, 779, 624, 655, 500, 531, 376, 407, 252, 283, 904, - 935, 873, 904, 780, 811, 749, 780, 656, 687, 625, 656, 532, 563, 501, 532, - 408, 439, 377, 408, 284, 315, 253, 284, 936, 967, 905, 936, 874, 905, 812, - 843, 781, 812, 750, 781, 688, 719, 657, 688, 626, 657, 564, 595, 533, 564, - 502, 533, 440, 471, 409, 440, 378, 409, 316, 347, 285, 316, 254, 285, 968, - 999, 937, 968, 906, 937, 875, 906, 844, 875, 813, 844, 782, 813, 751, 782, - 720, 751, 689, 720, 658, 689, 627, 658, 596, 627, 565, 596, 534, 565, 503, - 534, 472, 503, 441, 472, 410, 441, 379, 410, 348, 379, 317, 348, 286, 317, - 255, 286, 969, 1000, 938, 969, 907, 938, 845, 876, 814, 845, 783, 814, 721, - 752, 690, 721, 659, 690, 597, 628, 566, 597, 535, 566, 473, 504, 442, 473, - 411, 442, 349, 380, 318, 349, 287, 318, 970, 1001, 939, 970, 846, 877, 815, - 846, 722, 753, 691, 722, 598, 629, 567, 598, 474, 505, 443, 474, 350, 381, - 319, 350, 971, 1002, 847, 878, 723, 754, 599, 630, 475, 506, 351, 382, 876, - 907, 752, 783, 628, 659, 504, 535, 380, 411, 908, 939, 877, 908, 784, 815, - 753, 784, 660, 691, 629, 660, 536, 567, 505, 536, 412, 443, 381, 412, 940, - 971, 909, 940, 878, 909, 816, 847, 785, 816, 754, 785, 692, 723, 661, 692, - 630, 661, 568, 599, 537, 568, 506, 537, 444, 475, 413, 444, 382, 413, 972, - 1003, 941, 972, 910, 941, 879, 910, 848, 879, 817, 848, 786, 817, 755, 786, - 724, 755, 693, 724, 662, 693, 631, 662, 600, 631, 569, 600, 538, 569, 507, - 538, 476, 507, 445, 476, 414, 445, 383, 414, 973, 1004, 942, 973, 911, 942, - 849, 880, 818, 849, 787, 818, 725, 756, 694, 725, 663, 694, 601, 632, 570, - 601, 539, 570, 477, 508, 446, 477, 415, 446, 974, 1005, 943, 974, 850, 881, - 819, 850, 726, 757, 695, 726, 602, 633, 571, 602, 478, 509, 447, 478, 975, - 1006, 851, 882, 727, 758, 603, 634, 479, 510, 880, 911, 756, 787, 632, 663, - 508, 539, 912, 943, 881, 912, 788, 819, 757, 788, 664, 695, 633, 664, 540, - 571, 509, 540, 944, 975, 913, 944, 882, 913, 820, 851, 789, 820, 758, 789, - 696, 727, 665, 696, 634, 665, 572, 603, 541, 572, 510, 541, 976, 1007, 945, - 976, 914, 945, 883, 914, 852, 883, 821, 852, 790, 821, 759, 790, 728, 759, - 697, 728, 666, 697, 635, 666, 604, 635, 573, 604, 542, 573, 511, 542, 977, - 1008, 946, 977, 915, 946, 853, 884, 822, 853, 791, 822, 729, 760, 698, 729, - 667, 698, 605, 636, 574, 605, 543, 574, 978, 1009, 947, 978, 854, 885, 823, - 854, 730, 761, 699, 730, 606, 637, 575, 606, 979, 1010, 855, 886, 731, 762, - 607, 638, 884, 915, 760, 791, 636, 667, 916, 947, 885, 916, 792, 823, 761, - 792, 668, 699, 637, 668, 948, 979, 917, 948, 886, 917, 824, 855, 793, 824, - 762, 793, 700, 731, 669, 700, 638, 669, 980, 1011, 949, 980, 918, 949, 887, - 918, 856, 887, 825, 856, 794, 825, 763, 794, 732, 763, 701, 732, 670, 701, - 639, 670, 981, 1012, 950, 981, 919, 950, 857, 888, 826, 857, 795, 826, 733, - 764, 702, 733, 671, 702, 982, 1013, 951, 982, 858, 889, 827, 858, 734, 765, - 703, 734, 983, 1014, 859, 890, 735, 766, 888, 919, 764, 795, 920, 951, 889, - 920, 796, 827, 765, 796, 952, 983, 921, 952, 890, 921, 828, 859, 797, 828, - 766, 797, 984, 1015, 953, 984, 922, 953, 891, 922, 860, 891, 829, 860, 798, - 829, 767, 798, 985, 1016, 954, 985, 923, 954, 861, 892, 830, 861, 799, 830, - 986, 1017, 955, 986, 862, 893, 831, 862, 987, 1018, 863, 894, 892, 923, 924, - 955, 893, 924, 956, 987, 925, 956, 894, 925, 988, 1019, 957, 988, 926, 957, - 895, 926, 989, 1020, 958, 989, 927, 958, 990, 1021, 959, 990, 991, 1022, 0, 0, + 0, 0, 0, 0, 0, 0, 32, 32, 1, 32, 1, 1, 64, 64, + 33, 64, 2, 33, 96, 96, 2, 2, 65, 96, 34, 65, 128, 128, + 97, 128, 3, 34, 66, 97, 3, 3, 35, 66, 98, 129, 129, 160, + 160, 160, 4, 35, 67, 98, 192, 192, 4, 4, 130, 161, 161, 192, + 36, 67, 99, 130, 5, 36, 68, 99, 193, 224, 162, 193, 224, 224, + 131, 162, 37, 68, 100, 131, 5, 5, 194, 225, 225, 256, 256, 256, + 163, 194, 69, 100, 132, 163, 6, 37, 226, 257, 6, 6, 195, 226, + 257, 288, 101, 132, 288, 288, 38, 69, 164, 195, 133, 164, 258, 289, + 227, 258, 196, 227, 7, 38, 289, 320, 70, 101, 320, 320, 7, 7, + 165, 196, 39, 70, 102, 133, 290, 321, 259, 290, 228, 259, 321, 352, + 352, 352, 197, 228, 134, 165, 71, 102, 8, 39, 322, 353, 291, 322, + 260, 291, 103, 134, 353, 384, 166, 197, 229, 260, 40, 71, 8, 8, + 384, 384, 135, 166, 354, 385, 323, 354, 198, 229, 292, 323, 72, 103, + 261, 292, 9, 40, 385, 416, 167, 198, 104, 135, 230, 261, 355, 386, + 416, 416, 293, 324, 324, 355, 9, 9, 41, 72, 386, 417, 199, 230, + 136, 167, 417, 448, 262, 293, 356, 387, 73, 104, 387, 418, 231, 262, + 10, 41, 168, 199, 325, 356, 418, 449, 105, 136, 448, 448, 42, 73, + 294, 325, 200, 231, 10, 10, 357, 388, 137, 168, 263, 294, 388, 419, + 74, 105, 419, 450, 449, 480, 326, 357, 232, 263, 295, 326, 169, 200, + 11, 42, 106, 137, 480, 480, 450, 481, 358, 389, 264, 295, 201, 232, + 138, 169, 389, 420, 43, 74, 420, 451, 327, 358, 11, 11, 481, 512, + 233, 264, 451, 482, 296, 327, 75, 106, 170, 201, 482, 513, 512, 512, + 390, 421, 359, 390, 421, 452, 107, 138, 12, 43, 202, 233, 452, 483, + 265, 296, 328, 359, 139, 170, 44, 75, 483, 514, 513, 544, 234, 265, + 297, 328, 422, 453, 12, 12, 391, 422, 171, 202, 76, 107, 514, 545, + 453, 484, 544, 544, 266, 297, 203, 234, 108, 139, 329, 360, 298, 329, + 140, 171, 515, 546, 13, 44, 423, 454, 235, 266, 545, 576, 454, 485, + 45, 76, 172, 203, 330, 361, 576, 576, 13, 13, 267, 298, 546, 577, + 77, 108, 204, 235, 455, 486, 577, 608, 299, 330, 109, 140, 547, 578, + 14, 45, 14, 14, 141, 172, 578, 609, 331, 362, 46, 77, 173, 204, + 15, 15, 78, 109, 205, 236, 579, 610, 110, 141, 15, 46, 142, 173, + 47, 78, 174, 205, 16, 16, 79, 110, 206, 237, 16, 47, 111, 142, + 48, 79, 143, 174, 80, 111, 175, 206, 17, 48, 17, 17, 207, 238, + 49, 80, 81, 112, 18, 18, 18, 49, 50, 81, 82, 113, 19, 50, + 51, 82, 83, 114, 608, 608, 484, 515, 360, 391, 236, 267, 112, 143, + 19, 19, 640, 640, 609, 640, 516, 547, 485, 516, 392, 423, 361, 392, + 268, 299, 237, 268, 144, 175, 113, 144, 20, 51, 20, 20, 672, 672, + 641, 672, 610, 641, 548, 579, 517, 548, 486, 517, 424, 455, 393, 424, + 362, 393, 300, 331, 269, 300, 238, 269, 176, 207, 145, 176, 114, 145, + 52, 83, 21, 52, 21, 21, 704, 704, 673, 704, 642, 673, 611, 642, + 580, 611, 549, 580, 518, 549, 487, 518, 456, 487, 425, 456, 394, 425, + 363, 394, 332, 363, 301, 332, 270, 301, 239, 270, 208, 239, 177, 208, + 146, 177, 115, 146, 84, 115, 53, 84, 22, 53, 22, 22, 705, 736, + 674, 705, 643, 674, 581, 612, 550, 581, 519, 550, 457, 488, 426, 457, + 395, 426, 333, 364, 302, 333, 271, 302, 209, 240, 178, 209, 147, 178, + 85, 116, 54, 85, 23, 54, 706, 737, 675, 706, 582, 613, 551, 582, + 458, 489, 427, 458, 334, 365, 303, 334, 210, 241, 179, 210, 86, 117, + 55, 86, 707, 738, 583, 614, 459, 490, 335, 366, 211, 242, 87, 118, + 736, 736, 612, 643, 488, 519, 364, 395, 240, 271, 116, 147, 23, 23, + 768, 768, 737, 768, 644, 675, 613, 644, 520, 551, 489, 520, 396, 427, + 365, 396, 272, 303, 241, 272, 148, 179, 117, 148, 24, 55, 24, 24, + 800, 800, 769, 800, 738, 769, 676, 707, 645, 676, 614, 645, 552, 583, + 521, 552, 490, 521, 428, 459, 397, 428, 366, 397, 304, 335, 273, 304, + 242, 273, 180, 211, 149, 180, 118, 149, 56, 87, 25, 56, 25, 25, + 832, 832, 801, 832, 770, 801, 739, 770, 708, 739, 677, 708, 646, 677, + 615, 646, 584, 615, 553, 584, 522, 553, 491, 522, 460, 491, 429, 460, + 398, 429, 367, 398, 336, 367, 305, 336, 274, 305, 243, 274, 212, 243, + 181, 212, 150, 181, 119, 150, 88, 119, 57, 88, 26, 57, 26, 26, + 833, 864, 802, 833, 771, 802, 709, 740, 678, 709, 647, 678, 585, 616, + 554, 585, 523, 554, 461, 492, 430, 461, 399, 430, 337, 368, 306, 337, + 275, 306, 213, 244, 182, 213, 151, 182, 89, 120, 58, 89, 27, 58, + 834, 865, 803, 834, 710, 741, 679, 710, 586, 617, 555, 586, 462, 493, + 431, 462, 338, 369, 307, 338, 214, 245, 183, 214, 90, 121, 59, 90, + 835, 866, 711, 742, 587, 618, 463, 494, 339, 370, 215, 246, 91, 122, + 864, 864, 740, 771, 616, 647, 492, 523, 368, 399, 244, 275, 120, 151, + 27, 27, 896, 896, 865, 896, 772, 803, 741, 772, 648, 679, 617, 648, + 524, 555, 493, 524, 400, 431, 369, 400, 276, 307, 245, 276, 152, 183, + 121, 152, 28, 59, 28, 28, 928, 928, 897, 928, 866, 897, 804, 835, + 773, 804, 742, 773, 680, 711, 649, 680, 618, 649, 556, 587, 525, 556, + 494, 525, 432, 463, 401, 432, 370, 401, 308, 339, 277, 308, 246, 277, + 184, 215, 153, 184, 122, 153, 60, 91, 29, 60, 29, 29, 960, 960, + 929, 960, 898, 929, 867, 898, 836, 867, 805, 836, 774, 805, 743, 774, + 712, 743, 681, 712, 650, 681, 619, 650, 588, 619, 557, 588, 526, 557, + 495, 526, 464, 495, 433, 464, 402, 433, 371, 402, 340, 371, 309, 340, + 278, 309, 247, 278, 216, 247, 185, 216, 154, 185, 123, 154, 92, 123, + 61, 92, 30, 61, 30, 30, 961, 992, 930, 961, 899, 930, 837, 868, + 806, 837, 775, 806, 713, 744, 682, 713, 651, 682, 589, 620, 558, 589, + 527, 558, 465, 496, 434, 465, 403, 434, 341, 372, 310, 341, 279, 310, + 217, 248, 186, 217, 155, 186, 93, 124, 62, 93, 31, 62, 962, 993, + 931, 962, 838, 869, 807, 838, 714, 745, 683, 714, 590, 621, 559, 590, + 466, 497, 435, 466, 342, 373, 311, 342, 218, 249, 187, 218, 94, 125, + 63, 94, 963, 994, 839, 870, 715, 746, 591, 622, 467, 498, 343, 374, + 219, 250, 95, 126, 868, 899, 744, 775, 620, 651, 496, 527, 372, 403, + 248, 279, 124, 155, 900, 931, 869, 900, 776, 807, 745, 776, 652, 683, + 621, 652, 528, 559, 497, 528, 404, 435, 373, 404, 280, 311, 249, 280, + 156, 187, 125, 156, 932, 963, 901, 932, 870, 901, 808, 839, 777, 808, + 746, 777, 684, 715, 653, 684, 622, 653, 560, 591, 529, 560, 498, 529, + 436, 467, 405, 436, 374, 405, 312, 343, 281, 312, 250, 281, 188, 219, + 157, 188, 126, 157, 964, 995, 933, 964, 902, 933, 871, 902, 840, 871, + 809, 840, 778, 809, 747, 778, 716, 747, 685, 716, 654, 685, 623, 654, + 592, 623, 561, 592, 530, 561, 499, 530, 468, 499, 437, 468, 406, 437, + 375, 406, 344, 375, 313, 344, 282, 313, 251, 282, 220, 251, 189, 220, + 158, 189, 127, 158, 965, 996, 934, 965, 903, 934, 841, 872, 810, 841, + 779, 810, 717, 748, 686, 717, 655, 686, 593, 624, 562, 593, 531, 562, + 469, 500, 438, 469, 407, 438, 345, 376, 314, 345, 283, 314, 221, 252, + 190, 221, 159, 190, 966, 997, 935, 966, 842, 873, 811, 842, 718, 749, + 687, 718, 594, 625, 563, 594, 470, 501, 439, 470, 346, 377, 315, 346, + 222, 253, 191, 222, 967, 998, 843, 874, 719, 750, 595, 626, 471, 502, + 347, 378, 223, 254, 872, 903, 748, 779, 624, 655, 500, 531, 376, 407, + 252, 283, 904, 935, 873, 904, 780, 811, 749, 780, 656, 687, 625, 656, + 532, 563, 501, 532, 408, 439, 377, 408, 284, 315, 253, 284, 936, 967, + 905, 936, 874, 905, 812, 843, 781, 812, 750, 781, 688, 719, 657, 688, + 626, 657, 564, 595, 533, 564, 502, 533, 440, 471, 409, 440, 378, 409, + 316, 347, 285, 316, 254, 285, 968, 999, 937, 968, 906, 937, 875, 906, + 844, 875, 813, 844, 782, 813, 751, 782, 720, 751, 689, 720, 658, 689, + 627, 658, 596, 627, 565, 596, 534, 565, 503, 534, 472, 503, 441, 472, + 410, 441, 379, 410, 348, 379, 317, 348, 286, 317, 255, 286, 969, 1000, + 938, 969, 907, 938, 845, 876, 814, 845, 783, 814, 721, 752, 690, 721, + 659, 690, 597, 628, 566, 597, 535, 566, 473, 504, 442, 473, 411, 442, + 349, 380, 318, 349, 287, 318, 970, 1001, 939, 970, 846, 877, 815, 846, + 722, 753, 691, 722, 598, 629, 567, 598, 474, 505, 443, 474, 350, 381, + 319, 350, 971, 1002, 847, 878, 723, 754, 599, 630, 475, 506, 351, 382, + 876, 907, 752, 783, 628, 659, 504, 535, 380, 411, 908, 939, 877, 908, + 784, 815, 753, 784, 660, 691, 629, 660, 536, 567, 505, 536, 412, 443, + 381, 412, 940, 971, 909, 940, 878, 909, 816, 847, 785, 816, 754, 785, + 692, 723, 661, 692, 630, 661, 568, 599, 537, 568, 506, 537, 444, 475, + 413, 444, 382, 413, 972, 1003, 941, 972, 910, 941, 879, 910, 848, 879, + 817, 848, 786, 817, 755, 786, 724, 755, 693, 724, 662, 693, 631, 662, + 600, 631, 569, 600, 538, 569, 507, 538, 476, 507, 445, 476, 414, 445, + 383, 414, 973, 1004, 942, 973, 911, 942, 849, 880, 818, 849, 787, 818, + 725, 756, 694, 725, 663, 694, 601, 632, 570, 601, 539, 570, 477, 508, + 446, 477, 415, 446, 974, 1005, 943, 974, 850, 881, 819, 850, 726, 757, + 695, 726, 602, 633, 571, 602, 478, 509, 447, 478, 975, 1006, 851, 882, + 727, 758, 603, 634, 479, 510, 880, 911, 756, 787, 632, 663, 508, 539, + 912, 943, 881, 912, 788, 819, 757, 788, 664, 695, 633, 664, 540, 571, + 509, 540, 944, 975, 913, 944, 882, 913, 820, 851, 789, 820, 758, 789, + 696, 727, 665, 696, 634, 665, 572, 603, 541, 572, 510, 541, 976, 1007, + 945, 976, 914, 945, 883, 914, 852, 883, 821, 852, 790, 821, 759, 790, + 728, 759, 697, 728, 666, 697, 635, 666, 604, 635, 573, 604, 542, 573, + 511, 542, 977, 1008, 946, 977, 915, 946, 853, 884, 822, 853, 791, 822, + 729, 760, 698, 729, 667, 698, 605, 636, 574, 605, 543, 574, 978, 1009, + 947, 978, 854, 885, 823, 854, 730, 761, 699, 730, 606, 637, 575, 606, + 979, 1010, 855, 886, 731, 762, 607, 638, 884, 915, 760, 791, 636, 667, + 916, 947, 885, 916, 792, 823, 761, 792, 668, 699, 637, 668, 948, 979, + 917, 948, 886, 917, 824, 855, 793, 824, 762, 793, 700, 731, 669, 700, + 638, 669, 980, 1011, 949, 980, 918, 949, 887, 918, 856, 887, 825, 856, + 794, 825, 763, 794, 732, 763, 701, 732, 670, 701, 639, 670, 981, 1012, + 950, 981, 919, 950, 857, 888, 826, 857, 795, 826, 733, 764, 702, 733, + 671, 702, 982, 1013, 951, 982, 858, 889, 827, 858, 734, 765, 703, 734, + 983, 1014, 859, 890, 735, 766, 888, 919, 764, 795, 920, 951, 889, 920, + 796, 827, 765, 796, 952, 983, 921, 952, 890, 921, 828, 859, 797, 828, + 766, 797, 984, 1015, 953, 984, 922, 953, 891, 922, 860, 891, 829, 860, + 798, 829, 767, 798, 985, 1016, 954, 985, 923, 954, 861, 892, 830, 861, + 799, 830, 986, 1017, 955, 986, 862, 893, 831, 862, 987, 1018, 863, 894, + 892, 923, 924, 955, 893, 924, 956, 987, 925, 956, 894, 925, 988, 1019, + 957, 988, 926, 957, 895, 926, 989, 1020, 958, 989, 927, 958, 990, 1021, + 959, 990, 991, 1022, 0, 0, }; DECLARE_ALIGNED(16, static const int16_t, vp9_default_iscan_4x4[16]) = { @@ -545,183 +524,201 @@ DECLARE_ALIGNED(16, static const int16_t, vp9_row_iscan_4x4[16]) = { }; DECLARE_ALIGNED(16, static const int16_t, vp9_col_iscan_8x8[64]) = { - 0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51, - 2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56, - 6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60, + 0, 3, 8, 15, 22, 32, 40, 47, 1, 5, 11, 18, 26, 34, 44, 51, + 2, 7, 13, 20, 28, 38, 46, 54, 4, 10, 16, 24, 31, 41, 50, 56, + 6, 12, 21, 27, 35, 43, 52, 58, 9, 17, 25, 33, 39, 48, 55, 60, 14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63, }; DECLARE_ALIGNED(16, static const int16_t, vp9_row_iscan_8x8[64]) = { - 0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39, - 6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52, + 0, 1, 2, 5, 8, 12, 19, 24, 3, 4, 7, 10, 15, 20, 30, 39, + 6, 9, 13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52, 18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59, 32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63, }; DECLARE_ALIGNED(16, static const int16_t, vp9_default_iscan_8x8[64]) = { - 0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44, - 3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53, + 0, 2, 5, 9, 14, 22, 31, 37, 1, 4, 8, 13, 19, 26, 38, 44, + 3, 6, 10, 17, 24, 30, 42, 49, 7, 11, 15, 21, 29, 36, 47, 53, 12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60, 25, 32, 39, 45, 50, 55, 59, 62, 33, 40, 46, 51, 54, 58, 61, 63, }; DECLARE_ALIGNED(16, static const int16_t, vp9_col_iscan_16x16[256]) = { - 0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198, - 1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212, - 2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216, - 3, 10, 18, 29, 41, 55, 71, 89, 103, 119, 141, 159, 176, 194, 208, 218, - 5, 12, 21, 32, 45, 58, 74, 93, 104, 123, 144, 164, 179, 196, 210, 223, - 7, 15, 26, 37, 49, 63, 78, 96, 112, 129, 146, 166, 182, 200, 215, 228, - 9, 19, 28, 39, 54, 69, 86, 102, 117, 132, 151, 170, 187, 206, 220, 230, - 13, 24, 35, 46, 60, 73, 91, 108, 122, 137, 154, 174, 189, 207, 224, 235, - 17, 30, 40, 53, 66, 82, 98, 115, 126, 142, 161, 180, 197, 213, 227, 237, - 22, 36, 48, 62, 76, 92, 105, 120, 133, 147, 167, 186, 203, 219, 232, 240, - 27, 44, 56, 70, 84, 99, 113, 127, 140, 156, 175, 193, 209, 226, 236, 244, - 33, 51, 68, 79, 94, 110, 125, 138, 149, 162, 184, 202, 217, 229, 241, 247, - 42, 61, 77, 90, 106, 121, 134, 148, 160, 173, 191, 211, 225, 238, 245, 251, - 50, 72, 87, 100, 118, 128, 145, 158, 168, 183, 204, 222, 233, 242, 249, 253, - 57, 80, 97, 111, 131, 143, 155, 169, 178, 192, 214, 231, 239, 246, 250, 254, + 0, 4, 11, 20, 31, 43, 59, 75, 85, 109, 130, 150, 165, 181, 195, 198, + 1, 6, 14, 23, 34, 47, 64, 81, 95, 114, 135, 153, 171, 188, 201, 212, + 2, 8, 16, 25, 38, 52, 67, 83, 101, 116, 136, 157, 172, 190, 205, 216, + 3, 10, 18, 29, 41, 55, 71, 89, 103, 119, 141, 159, 176, 194, 208, 218, + 5, 12, 21, 32, 45, 58, 74, 93, 104, 123, 144, 164, 179, 196, 210, 223, + 7, 15, 26, 37, 49, 63, 78, 96, 112, 129, 146, 166, 182, 200, 215, 228, + 9, 19, 28, 39, 54, 69, 86, 102, 117, 132, 151, 170, 187, 206, 220, 230, + 13, 24, 35, 46, 60, 73, 91, 108, 122, 137, 154, 174, 189, 207, 224, 235, + 17, 30, 40, 53, 66, 82, 98, 115, 126, 142, 161, 180, 197, 213, 227, 237, + 22, 36, 48, 62, 76, 92, 105, 120, 133, 147, 167, 186, 203, 219, 232, 240, + 27, 44, 56, 70, 84, 99, 113, 127, 140, 156, 175, 193, 209, 226, 236, 244, + 33, 51, 68, 79, 94, 110, 125, 138, 149, 162, 184, 202, 217, 229, 241, 247, + 42, 61, 77, 90, 106, 121, 134, 148, 160, 173, 191, 211, 225, 238, 245, 251, + 50, 72, 87, 100, 118, 128, 145, 158, 168, 183, 204, 222, 233, 242, 249, 253, + 57, 80, 97, 111, 131, 143, 155, 169, 178, 192, 214, 231, 239, 246, 250, 254, 65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255, }; DECLARE_ALIGNED(16, static const int16_t, vp9_row_iscan_16x16[256]) = { - 0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76, 86, - 3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99, 115, 130, - 8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103, 119, 142, 167, - 14, 16, 20, 26, 31, 37, 44, 53, 61, 73, 85, 100, 116, 135, 161, 185, - 21, 24, 30, 35, 40, 47, 55, 65, 74, 81, 94, 112, 133, 154, 179, 205, - 28, 34, 39, 45, 50, 58, 67, 77, 87, 96, 106, 121, 146, 169, 196, 212, - 41, 46, 49, 56, 63, 70, 79, 90, 98, 107, 122, 138, 159, 182, 207, 222, - 52, 57, 62, 69, 75, 83, 93, 102, 110, 120, 134, 150, 176, 195, 215, 226, - 66, 71, 78, 82, 91, 97, 108, 113, 127, 136, 148, 168, 188, 202, 221, 232, - 80, 89, 92, 101, 105, 114, 125, 131, 139, 151, 162, 177, 192, 208, 223, 234, - 95, 104, 109, 117, 123, 128, 143, 144, 155, 165, 175, 190, 206, 219, 233, 239, - 111, 118, 124, 129, 140, 147, 157, 164, 170, 181, 191, 203, 224, 230, 240, - 243, 126, 132, 137, 145, 153, 160, 174, 178, 184, 197, 204, 216, 231, 237, - 244, 246, 141, 149, 156, 166, 172, 180, 189, 199, 200, 210, 220, 228, 238, - 242, 249, 251, 152, 163, 171, 183, 186, 193, 201, 211, 214, 218, 227, 236, - 245, 247, 252, 253, 158, 173, 187, 194, 198, 209, 213, 217, 225, 229, 235, - 241, 248, 250, 254, 255, + 0, 1, 2, 4, 6, 9, 12, 17, 22, 29, 36, 43, 54, 64, 76, + 86, 3, 5, 7, 11, 15, 19, 25, 32, 38, 48, 59, 68, 84, 99, + 115, 130, 8, 10, 13, 18, 23, 27, 33, 42, 51, 60, 72, 88, 103, + 119, 142, 167, 14, 16, 20, 26, 31, 37, 44, 53, 61, 73, 85, 100, + 116, 135, 161, 185, 21, 24, 30, 35, 40, 47, 55, 65, 74, 81, 94, + 112, 133, 154, 179, 205, 28, 34, 39, 45, 50, 58, 67, 77, 87, 96, + 106, 121, 146, 169, 196, 212, 41, 46, 49, 56, 63, 70, 79, 90, 98, + 107, 122, 138, 159, 182, 207, 222, 52, 57, 62, 69, 75, 83, 93, 102, + 110, 120, 134, 150, 176, 195, 215, 226, 66, 71, 78, 82, 91, 97, 108, + 113, 127, 136, 148, 168, 188, 202, 221, 232, 80, 89, 92, 101, 105, 114, + 125, 131, 139, 151, 162, 177, 192, 208, 223, 234, 95, 104, 109, 117, 123, + 128, 143, 144, 155, 165, 175, 190, 206, 219, 233, 239, 111, 118, 124, 129, + 140, 147, 157, 164, 170, 181, 191, 203, 224, 230, 240, 243, 126, 132, 137, + 145, 153, 160, 174, 178, 184, 197, 204, 216, 231, 237, 244, 246, 141, 149, + 156, 166, 172, 180, 189, 199, 200, 210, 220, 228, 238, 242, 249, 251, 152, + 163, 171, 183, 186, 193, 201, 211, 214, 218, 227, 236, 245, 247, 252, 253, + 158, 173, 187, 194, 198, 209, 213, 217, 225, 229, 235, 241, 248, 250, 254, + 255, }; DECLARE_ALIGNED(16, static const int16_t, vp9_default_iscan_16x16[256]) = { - 0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166, 179, - 1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154, 178, 196, - 3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148, 164, 186, 201, - 6, 12, 16, 23, 31, 39, 53, 64, 78, 92, 110, 127, 153, 169, 193, 208, - 10, 14, 19, 28, 37, 47, 58, 67, 84, 98, 114, 133, 161, 176, 198, 214, - 15, 21, 26, 34, 43, 52, 65, 77, 91, 106, 120, 140, 165, 185, 205, 221, - 22, 27, 32, 41, 48, 60, 73, 85, 99, 116, 130, 151, 175, 190, 211, 225, - 29, 35, 42, 49, 59, 69, 81, 95, 108, 125, 139, 155, 182, 197, 217, 229, - 38, 45, 51, 61, 68, 80, 93, 105, 118, 134, 150, 168, 191, 207, 223, 234, - 50, 56, 63, 74, 83, 94, 109, 117, 129, 147, 163, 177, 199, 213, 228, 238, - 62, 70, 76, 87, 97, 107, 122, 131, 145, 159, 172, 188, 210, 222, 235, 242, - 75, 82, 90, 102, 112, 124, 138, 146, 157, 173, 187, 202, 219, 230, 240, 245, - 89, 100, 111, 123, 132, 142, 156, 167, 180, 189, 203, 216, 231, 237, 246, 250, - 103, 115, 126, 136, 149, 162, 171, 183, 194, 204, 215, 224, 236, 241, 248, - 252, 121, 135, 144, 158, 170, 181, 192, 200, 209, 218, 227, 233, 243, 244, - 251, 254, 137, 152, 160, 174, 184, 195, 206, 212, 220, 226, 232, 239, 247, - 249, 253, 255, + 0, 2, 5, 9, 17, 24, 36, 44, 55, 72, 88, 104, 128, 143, 166, + 179, 1, 4, 8, 13, 20, 30, 40, 54, 66, 79, 96, 113, 141, 154, + 178, 196, 3, 7, 11, 18, 25, 33, 46, 57, 71, 86, 101, 119, 148, + 164, 186, 201, 6, 12, 16, 23, 31, 39, 53, 64, 78, 92, 110, 127, + 153, 169, 193, 208, 10, 14, 19, 28, 37, 47, 58, 67, 84, 98, 114, + 133, 161, 176, 198, 214, 15, 21, 26, 34, 43, 52, 65, 77, 91, 106, + 120, 140, 165, 185, 205, 221, 22, 27, 32, 41, 48, 60, 73, 85, 99, + 116, 130, 151, 175, 190, 211, 225, 29, 35, 42, 49, 59, 69, 81, 95, + 108, 125, 139, 155, 182, 197, 217, 229, 38, 45, 51, 61, 68, 80, 93, + 105, 118, 134, 150, 168, 191, 207, 223, 234, 50, 56, 63, 74, 83, 94, + 109, 117, 129, 147, 163, 177, 199, 213, 228, 238, 62, 70, 76, 87, 97, + 107, 122, 131, 145, 159, 172, 188, 210, 222, 235, 242, 75, 82, 90, 102, + 112, 124, 138, 146, 157, 173, 187, 202, 219, 230, 240, 245, 89, 100, 111, + 123, 132, 142, 156, 167, 180, 189, 203, 216, 231, 237, 246, 250, 103, 115, + 126, 136, 149, 162, 171, 183, 194, 204, 215, 224, 236, 241, 248, 252, 121, + 135, 144, 158, 170, 181, 192, 200, 209, 218, 227, 233, 243, 244, 251, 254, + 137, 152, 160, 174, 184, 195, 206, 212, 220, 226, 232, 239, 247, 249, 253, + 255, }; DECLARE_ALIGNED(16, static const int16_t, vp9_default_iscan_32x32[1024]) = { - 0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145, 170, 193, 204, - 210, 219, 229, 233, 245, 257, 275, 299, 342, 356, 377, 405, 455, 471, 495, - 527, 1, 4, 8, 15, 22, 30, 45, 58, 74, 92, 112, 133, 158, 184, 203, 215, 222, - 228, 234, 237, 256, 274, 298, 317, 355, 376, 404, 426, 470, 494, 526, 551, - 3, 7, 12, 18, 28, 36, 52, 64, 82, 102, 118, 142, 164, 189, 208, 217, 224, - 231, 235, 238, 273, 297, 316, 329, 375, 403, 425, 440, 493, 525, 550, 567, - 6, 11, 16, 23, 31, 43, 60, 73, 90, 109, 126, 150, 173, 196, 211, 220, 226, - 232, 236, 239, 296, 315, 328, 335, 402, 424, 439, 447, 524, 549, 566, 575, - 9, 14, 19, 29, 37, 50, 65, 78, 95, 116, 134, 157, 179, 201, 214, 223, 244, - 255, 272, 295, 341, 354, 374, 401, 454, 469, 492, 523, 582, 596, 617, 645, - 13, 20, 26, 35, 44, 54, 72, 85, 105, 123, 140, 163, 182, 205, 216, 225, - 254, 271, 294, 314, 353, 373, 400, 423, 468, 491, 522, 548, 595, 616, 644, - 666, 21, 27, 33, 42, 53, 63, 80, 94, 113, 132, 151, 172, 190, 209, 218, 227, - 270, 293, 313, 327, 372, 399, 422, 438, 490, 521, 547, 565, 615, 643, 665, - 680, 24, 32, 39, 48, 57, 71, 88, 104, 120, 139, 159, 178, 197, 212, 221, 230, - 292, 312, 326, 334, 398, 421, 437, 446, 520, 546, 564, 574, 642, 664, 679, - 687, 34, 40, 46, 56, 68, 81, 96, 111, 130, 147, 167, 186, 243, 253, 269, 291, - 340, 352, 371, 397, 453, 467, 489, 519, 581, 594, 614, 641, 693, 705, 723, - 747, 41, 49, 55, 67, 77, 91, 107, 124, 138, 161, 177, 194, 252, 268, 290, - 311, 351, 370, 396, 420, 466, 488, 518, 545, 593, 613, 640, 663, 704, 722, - 746, 765, 51, 59, 66, 76, 89, 99, 119, 131, 149, 168, 181, 200, 267, 289, - 310, 325, 369, 395, 419, 436, 487, 517, 544, 563, 612, 639, 662, 678, 721, - 745, 764, 777, 61, 69, 75, 87, 100, 114, 129, 144, 162, 180, 191, 207, 288, - 309, 324, 333, 394, 418, 435, 445, 516, 543, 562, 573, 638, 661, 677, 686, - 744, 763, 776, 783, 70, 79, 86, 97, 108, 122, 137, 155, 242, 251, 266, 287, - 339, 350, 368, 393, 452, 465, 486, 515, 580, 592, 611, 637, 692, 703, 720, - 743, 788, 798, 813, 833, 84, 93, 103, 110, 125, 141, 154, 171, 250, 265, 286, - 308, 349, 367, 392, 417, 464, 485, 514, 542, 591, 610, 636, 660, 702, 719, - 742, 762, 797, 812, 832, 848, 98, 106, 115, 127, 143, 156, 169, 185, 264, - 285, 307, 323, 366, 391, 416, 434, 484, 513, 541, 561, 609, 635, 659, 676, - 718, 741, 761, 775, 811, 831, 847, 858, 117, 128, 136, 148, 160, 175, 188, - 198, 284, 306, 322, 332, 390, 415, 433, 444, 512, 540, 560, 572, 634, 658, - 675, 685, 740, 760, 774, 782, 830, 846, 857, 863, 135, 146, 152, 165, 241, - 249, 263, 283, 338, 348, 365, 389, 451, 463, 483, 511, 579, 590, 608, 633, - 691, 701, 717, 739, 787, 796, 810, 829, 867, 875, 887, 903, 153, 166, 174, - 183, 248, 262, 282, 305, 347, 364, 388, 414, 462, 482, 510, 539, 589, 607, - 632, 657, 700, 716, 738, 759, 795, 809, 828, 845, 874, 886, 902, 915, 176, - 187, 195, 202, 261, 281, 304, 321, 363, 387, 413, 432, 481, 509, 538, 559, - 606, 631, 656, 674, 715, 737, 758, 773, 808, 827, 844, 856, 885, 901, 914, - 923, 192, 199, 206, 213, 280, 303, 320, 331, 386, 412, 431, 443, 508, 537, - 558, 571, 630, 655, 673, 684, 736, 757, 772, 781, 826, 843, 855, 862, 900, - 913, 922, 927, 240, 247, 260, 279, 337, 346, 362, 385, 450, 461, 480, 507, - 578, 588, 605, 629, 690, 699, 714, 735, 786, 794, 807, 825, 866, 873, 884, - 899, 930, 936, 945, 957, 246, 259, 278, 302, 345, 361, 384, 411, 460, 479, - 506, 536, 587, 604, 628, 654, 698, 713, 734, 756, 793, 806, 824, 842, 872, - 883, 898, 912, 935, 944, 956, 966, 258, 277, 301, 319, 360, 383, 410, 430, - 478, 505, 535, 557, 603, 627, 653, 672, 712, 733, 755, 771, 805, 823, 841, - 854, 882, 897, 911, 921, 943, 955, 965, 972, 276, 300, 318, 330, 382, 409, - 429, 442, 504, 534, 556, 570, 626, 652, 671, 683, 732, 754, 770, 780, 822, - 840, 853, 861, 896, 910, 920, 926, 954, 964, 971, 975, 336, 344, 359, 381, - 449, 459, 477, 503, 577, 586, 602, 625, 689, 697, 711, 731, 785, 792, 804, - 821, 865, 871, 881, 895, 929, 934, 942, 953, 977, 981, 987, 995, 343, 358, - 380, 408, 458, 476, 502, 533, 585, 601, 624, 651, 696, 710, 730, 753, 791, - 803, 820, 839, 870, 880, 894, 909, 933, 941, 952, 963, 980, 986, 994, 1001, - 357, 379, 407, 428, 475, 501, 532, 555, 600, 623, 650, 670, 709, 729, 752, - 769, 802, 819, 838, 852, 879, 893, 908, 919, 940, 951, 962, 970, 985, 993, - 1000, 1005, 378, 406, 427, 441, 500, 531, 554, 569, 622, 649, 669, 682, 728, - 751, 768, 779, 818, 837, 851, 860, 892, 907, 918, 925, 950, 961, 969, 974, - 992, 999, 1004, 1007, 448, 457, 474, 499, 576, 584, 599, 621, 688, 695, 708, - 727, 784, 790, 801, 817, 864, 869, 878, 891, 928, 932, 939, 949, 976, 979, - 984, 991, 1008, 1010, 1013, 1017, 456, 473, 498, 530, 583, 598, 620, 648, - 694, 707, 726, 750, 789, 800, 816, 836, 868, 877, 890, 906, 931, 938, 948, - 960, 978, 983, 990, 998, 1009, 1012, 1016, 1020, 472, 497, 529, 553, 597, - 619, 647, 668, 706, 725, 749, 767, 799, 815, 835, 850, 876, 889, 905, 917, - 937, 947, 959, 968, 982, 989, 997, 1003, 1011, 1015, 1019, 1022, 496, 528, - 552, 568, 618, 646, 667, 681, 724, 748, 766, 778, 814, 834, 849, 859, 888, - 904, 916, 924, 946, 958, 967, 973, 988, 996, 1002, 1006, 1014, 1018, 1021, - 1023, + 0, 2, 5, 10, 17, 25, 38, 47, 62, 83, 101, 121, 145, + 170, 193, 204, 210, 219, 229, 233, 245, 257, 275, 299, 342, 356, + 377, 405, 455, 471, 495, 527, 1, 4, 8, 15, 22, 30, 45, + 58, 74, 92, 112, 133, 158, 184, 203, 215, 222, 228, 234, 237, + 256, 274, 298, 317, 355, 376, 404, 426, 470, 494, 526, 551, 3, + 7, 12, 18, 28, 36, 52, 64, 82, 102, 118, 142, 164, 189, + 208, 217, 224, 231, 235, 238, 273, 297, 316, 329, 375, 403, 425, + 440, 493, 525, 550, 567, 6, 11, 16, 23, 31, 43, 60, 73, + 90, 109, 126, 150, 173, 196, 211, 220, 226, 232, 236, 239, 296, + 315, 328, 335, 402, 424, 439, 447, 524, 549, 566, 575, 9, 14, + 19, 29, 37, 50, 65, 78, 95, 116, 134, 157, 179, 201, 214, + 223, 244, 255, 272, 295, 341, 354, 374, 401, 454, 469, 492, 523, + 582, 596, 617, 645, 13, 20, 26, 35, 44, 54, 72, 85, 105, + 123, 140, 163, 182, 205, 216, 225, 254, 271, 294, 314, 353, 373, + 400, 423, 468, 491, 522, 548, 595, 616, 644, 666, 21, 27, 33, + 42, 53, 63, 80, 94, 113, 132, 151, 172, 190, 209, 218, 227, + 270, 293, 313, 327, 372, 399, 422, 438, 490, 521, 547, 565, 615, + 643, 665, 680, 24, 32, 39, 48, 57, 71, 88, 104, 120, 139, + 159, 178, 197, 212, 221, 230, 292, 312, 326, 334, 398, 421, 437, + 446, 520, 546, 564, 574, 642, 664, 679, 687, 34, 40, 46, 56, + 68, 81, 96, 111, 130, 147, 167, 186, 243, 253, 269, 291, 340, + 352, 371, 397, 453, 467, 489, 519, 581, 594, 614, 641, 693, 705, + 723, 747, 41, 49, 55, 67, 77, 91, 107, 124, 138, 161, 177, + 194, 252, 268, 290, 311, 351, 370, 396, 420, 466, 488, 518, 545, + 593, 613, 640, 663, 704, 722, 746, 765, 51, 59, 66, 76, 89, + 99, 119, 131, 149, 168, 181, 200, 267, 289, 310, 325, 369, 395, + 419, 436, 487, 517, 544, 563, 612, 639, 662, 678, 721, 745, 764, + 777, 61, 69, 75, 87, 100, 114, 129, 144, 162, 180, 191, 207, + 288, 309, 324, 333, 394, 418, 435, 445, 516, 543, 562, 573, 638, + 661, 677, 686, 744, 763, 776, 783, 70, 79, 86, 97, 108, 122, + 137, 155, 242, 251, 266, 287, 339, 350, 368, 393, 452, 465, 486, + 515, 580, 592, 611, 637, 692, 703, 720, 743, 788, 798, 813, 833, + 84, 93, 103, 110, 125, 141, 154, 171, 250, 265, 286, 308, 349, + 367, 392, 417, 464, 485, 514, 542, 591, 610, 636, 660, 702, 719, + 742, 762, 797, 812, 832, 848, 98, 106, 115, 127, 143, 156, 169, + 185, 264, 285, 307, 323, 366, 391, 416, 434, 484, 513, 541, 561, + 609, 635, 659, 676, 718, 741, 761, 775, 811, 831, 847, 858, 117, + 128, 136, 148, 160, 175, 188, 198, 284, 306, 322, 332, 390, 415, + 433, 444, 512, 540, 560, 572, 634, 658, 675, 685, 740, 760, 774, + 782, 830, 846, 857, 863, 135, 146, 152, 165, 241, 249, 263, 283, + 338, 348, 365, 389, 451, 463, 483, 511, 579, 590, 608, 633, 691, + 701, 717, 739, 787, 796, 810, 829, 867, 875, 887, 903, 153, 166, + 174, 183, 248, 262, 282, 305, 347, 364, 388, 414, 462, 482, 510, + 539, 589, 607, 632, 657, 700, 716, 738, 759, 795, 809, 828, 845, + 874, 886, 902, 915, 176, 187, 195, 202, 261, 281, 304, 321, 363, + 387, 413, 432, 481, 509, 538, 559, 606, 631, 656, 674, 715, 737, + 758, 773, 808, 827, 844, 856, 885, 901, 914, 923, 192, 199, 206, + 213, 280, 303, 320, 331, 386, 412, 431, 443, 508, 537, 558, 571, + 630, 655, 673, 684, 736, 757, 772, 781, 826, 843, 855, 862, 900, + 913, 922, 927, 240, 247, 260, 279, 337, 346, 362, 385, 450, 461, + 480, 507, 578, 588, 605, 629, 690, 699, 714, 735, 786, 794, 807, + 825, 866, 873, 884, 899, 930, 936, 945, 957, 246, 259, 278, 302, + 345, 361, 384, 411, 460, 479, 506, 536, 587, 604, 628, 654, 698, + 713, 734, 756, 793, 806, 824, 842, 872, 883, 898, 912, 935, 944, + 956, 966, 258, 277, 301, 319, 360, 383, 410, 430, 478, 505, 535, + 557, 603, 627, 653, 672, 712, 733, 755, 771, 805, 823, 841, 854, + 882, 897, 911, 921, 943, 955, 965, 972, 276, 300, 318, 330, 382, + 409, 429, 442, 504, 534, 556, 570, 626, 652, 671, 683, 732, 754, + 770, 780, 822, 840, 853, 861, 896, 910, 920, 926, 954, 964, 971, + 975, 336, 344, 359, 381, 449, 459, 477, 503, 577, 586, 602, 625, + 689, 697, 711, 731, 785, 792, 804, 821, 865, 871, 881, 895, 929, + 934, 942, 953, 977, 981, 987, 995, 343, 358, 380, 408, 458, 476, + 502, 533, 585, 601, 624, 651, 696, 710, 730, 753, 791, 803, 820, + 839, 870, 880, 894, 909, 933, 941, 952, 963, 980, 986, 994, 1001, + 357, 379, 407, 428, 475, 501, 532, 555, 600, 623, 650, 670, 709, + 729, 752, 769, 802, 819, 838, 852, 879, 893, 908, 919, 940, 951, + 962, 970, 985, 993, 1000, 1005, 378, 406, 427, 441, 500, 531, 554, + 569, 622, 649, 669, 682, 728, 751, 768, 779, 818, 837, 851, 860, + 892, 907, 918, 925, 950, 961, 969, 974, 992, 999, 1004, 1007, 448, + 457, 474, 499, 576, 584, 599, 621, 688, 695, 708, 727, 784, 790, + 801, 817, 864, 869, 878, 891, 928, 932, 939, 949, 976, 979, 984, + 991, 1008, 1010, 1013, 1017, 456, 473, 498, 530, 583, 598, 620, 648, + 694, 707, 726, 750, 789, 800, 816, 836, 868, 877, 890, 906, 931, + 938, 948, 960, 978, 983, 990, 998, 1009, 1012, 1016, 1020, 472, 497, + 529, 553, 597, 619, 647, 668, 706, 725, 749, 767, 799, 815, 835, + 850, 876, 889, 905, 917, 937, 947, 959, 968, 982, 989, 997, 1003, + 1011, 1015, 1019, 1022, 496, 528, 552, 568, 618, 646, 667, 681, 724, + 748, 766, 778, 814, 834, 849, 859, 888, 904, 916, 924, 946, 958, + 967, 973, 988, 996, 1002, 1006, 1014, 1018, 1021, 1023, }; const scan_order vp9_default_scan_orders[TX_SIZES] = { - {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors}, - {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors}, - {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors}, - {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, + { default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors }, + { default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors }, + { default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors }, + { default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors }, }; const scan_order vp9_scan_orders[TX_SIZES][TX_TYPES] = { - { // TX_4X4 - {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors}, - {row_scan_4x4, vp9_row_iscan_4x4, row_scan_4x4_neighbors}, - {col_scan_4x4, vp9_col_iscan_4x4, col_scan_4x4_neighbors}, - {default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors} - }, { // TX_8X8 - {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors}, - {row_scan_8x8, vp9_row_iscan_8x8, row_scan_8x8_neighbors}, - {col_scan_8x8, vp9_col_iscan_8x8, col_scan_8x8_neighbors}, - {default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors} - }, { // TX_16X16 - {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors}, - {row_scan_16x16, vp9_row_iscan_16x16, row_scan_16x16_neighbors}, - {col_scan_16x16, vp9_col_iscan_16x16, col_scan_16x16_neighbors}, - {default_scan_16x16, vp9_default_iscan_16x16, default_scan_16x16_neighbors} - }, { // TX_32X32 - {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, - {default_scan_32x32, vp9_default_iscan_32x32, default_scan_32x32_neighbors}, - } + { // TX_4X4 + { default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors }, + { row_scan_4x4, vp9_row_iscan_4x4, row_scan_4x4_neighbors }, + { col_scan_4x4, vp9_col_iscan_4x4, col_scan_4x4_neighbors }, + { default_scan_4x4, vp9_default_iscan_4x4, default_scan_4x4_neighbors } }, + { // TX_8X8 + { default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors }, + { row_scan_8x8, vp9_row_iscan_8x8, row_scan_8x8_neighbors }, + { col_scan_8x8, vp9_col_iscan_8x8, col_scan_8x8_neighbors }, + { default_scan_8x8, vp9_default_iscan_8x8, default_scan_8x8_neighbors } }, + { // TX_16X16 + { default_scan_16x16, vp9_default_iscan_16x16, + default_scan_16x16_neighbors }, + { row_scan_16x16, vp9_row_iscan_16x16, row_scan_16x16_neighbors }, + { col_scan_16x16, vp9_col_iscan_16x16, col_scan_16x16_neighbors }, + { default_scan_16x16, vp9_default_iscan_16x16, + default_scan_16x16_neighbors } }, + { // TX_32X32 + { default_scan_32x32, vp9_default_iscan_32x32, + default_scan_32x32_neighbors }, + { default_scan_32x32, vp9_default_iscan_32x32, + default_scan_32x32_neighbors }, + { default_scan_32x32, vp9_default_iscan_32x32, + default_scan_32x32_neighbors }, + { default_scan_32x32, vp9_default_iscan_32x32, + default_scan_32x32_neighbors } } }; diff --git a/libs/libvpx/vp9/common/vp9_scan.h b/libs/libvpx/vp9/common/vp9_scan.h index 4c1ee8107c..b3520e7dcc 100644 --- a/libs/libvpx/vp9/common/vp9_scan.h +++ b/libs/libvpx/vp9/common/vp9_scan.h @@ -35,7 +35,8 @@ extern const scan_order vp9_scan_orders[TX_SIZES][TX_TYPES]; static INLINE int get_coef_context(const int16_t *neighbors, const uint8_t *token_cache, int c) { return (1 + token_cache[neighbors[MAX_NEIGHBORS * c + 0]] + - token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> 1; + token_cache[neighbors[MAX_NEIGHBORS * c + 1]]) >> + 1; } static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size, diff --git a/libs/libvpx/vp9/common/vp9_seg_common.c b/libs/libvpx/vp9/common/vp9_seg_common.c index c8ef618b77..1c7a1d2e9a 100644 --- a/libs/libvpx/vp9/common/vp9_seg_common.c +++ b/libs/libvpx/vp9/common/vp9_seg_common.c @@ -17,8 +17,8 @@ static const int seg_feature_data_signed[SEG_LVL_MAX] = { 1, 1, 0, 0 }; -static const int seg_feature_data_max[SEG_LVL_MAX] = { - MAXQ, MAX_LOOP_FILTER, 3, 0 }; +static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, MAX_LOOP_FILTER, 3, + 0 }; // These functions provide access to new segment level features. // Eventually these function may be "optimized out" but for the moment, @@ -28,6 +28,7 @@ static const int seg_feature_data_max[SEG_LVL_MAX] = { void vp9_clearall_segfeatures(struct segmentation *seg) { vp9_zero(seg->feature_data); vp9_zero(seg->feature_mask); + seg->aq_av_offset = 0; } void vp9_enable_segfeature(struct segmentation *seg, int segment_id, @@ -55,9 +56,7 @@ void vp9_set_segdata(struct segmentation *seg, int segment_id, } const vpx_tree_index vp9_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = { - 2, 4, 6, 8, 10, 12, - 0, -1, -2, -3, -4, -5, -6, -7 + 2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7 }; - // TBD? Functions to read and write segment data with range / validity checking diff --git a/libs/libvpx/vp9/common/vp9_seg_common.h b/libs/libvpx/vp9/common/vp9_seg_common.h index 5b75d8d4ee..b9bf75d580 100644 --- a/libs/libvpx/vp9/common/vp9_seg_common.h +++ b/libs/libvpx/vp9/common/vp9_seg_common.h @@ -17,24 +17,23 @@ extern "C" { #endif -#define SEGMENT_DELTADATA 0 -#define SEGMENT_ABSDATA 1 +#define SEGMENT_DELTADATA 0 +#define SEGMENT_ABSDATA 1 -#define MAX_SEGMENTS 8 -#define SEG_TREE_PROBS (MAX_SEGMENTS-1) +#define MAX_SEGMENTS 8 +#define SEG_TREE_PROBS (MAX_SEGMENTS - 1) #define PREDICTION_PROBS 3 // Segment level features. typedef enum { - SEG_LVL_ALT_Q = 0, // Use alternate Quantizer .... - SEG_LVL_ALT_LF = 1, // Use alternate loop filter value... - SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame - SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode - SEG_LVL_MAX = 4 // Number of features supported + SEG_LVL_ALT_Q = 0, // Use alternate Quantizer .... + SEG_LVL_ALT_LF = 1, // Use alternate loop filter value... + SEG_LVL_REF_FRAME = 2, // Optional Segment reference frame + SEG_LVL_SKIP = 3, // Optional Segment (0,0) + skip mode + SEG_LVL_MAX = 4 // Number of features supported } SEG_LVL_FEATURES; - struct segmentation { uint8_t enabled; uint8_t update_map; @@ -46,30 +45,27 @@ struct segmentation { vpx_prob pred_probs[PREDICTION_PROBS]; int16_t feature_data[MAX_SEGMENTS][SEG_LVL_MAX]; - unsigned int feature_mask[MAX_SEGMENTS]; + uint32_t feature_mask[MAX_SEGMENTS]; + int aq_av_offset; }; static INLINE int segfeature_active(const struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id) { - return seg->enabled && - (seg->feature_mask[segment_id] & (1 << feature_id)); + return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id)); } void vp9_clearall_segfeatures(struct segmentation *seg); -void vp9_enable_segfeature(struct segmentation *seg, - int segment_id, +void vp9_enable_segfeature(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id); int vp9_seg_feature_data_max(SEG_LVL_FEATURES feature_id); int vp9_is_segfeature_signed(SEG_LVL_FEATURES feature_id); -void vp9_set_segdata(struct segmentation *seg, - int segment_id, - SEG_LVL_FEATURES feature_id, - int seg_data); +void vp9_set_segdata(struct segmentation *seg, int segment_id, + SEG_LVL_FEATURES feature_id, int seg_data); static INLINE int get_segdata(const struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id) { @@ -83,4 +79,3 @@ extern const vpx_tree_index vp9_segment_tree[TREE_SIZE(MAX_SEGMENTS)]; #endif #endif // VP9_COMMON_VP9_SEG_COMMON_H_ - diff --git a/libs/libvpx/vp9/common/vp9_textblit.c b/libs/libvpx/vp9/common/vp9_textblit.c deleted file mode 100644 index 60e95e08f5..0000000000 --- a/libs/libvpx/vp9/common/vp9_textblit.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vp9/common/vp9_textblit.h" - -static const int font[] = { - 0x0, 0x5C00, 0x8020, 0xAFABEA, 0xD7EC0, 0x1111111, 0x1855740, 0x18000, - 0x45C0, 0x74400, 0x51140, 0x23880, 0xC4000, 0x21080, 0x80000, 0x111110, - 0xE9D72E, 0x87E40, 0x12AD732, 0xAAD62A, 0x4F94C4, 0x4D6B7, 0x456AA, - 0x3E8423, 0xAAD6AA, 0xAAD6A2, 0x2800, 0x2A00, 0x8A880, 0x52940, 0x22A20, - 0x15422, 0x6AD62E, 0x1E4A53E, 0xAAD6BF, 0x8C62E, 0xE8C63F, 0x118D6BF, - 0x1094BF, 0xCAC62E, 0x1F2109F, 0x118FE31, 0xF8C628, 0x8A89F, 0x108421F, - 0x1F1105F, 0x1F4105F, 0xE8C62E, 0x2294BF, 0x164C62E, 0x12694BF, 0x8AD6A2, - 0x10FC21, 0x1F8421F, 0x744107, 0xF8220F, 0x1151151, 0x117041, 0x119D731, - 0x47E0, 0x1041041, 0xFC400, 0x10440, 0x1084210, 0x820 -}; - -static void plot(int x, int y, unsigned char *image, int pitch) { - image[x + y * pitch] ^= 255; -} - -void vp9_blit_text(const char *msg, unsigned char *address, const int pitch) { - int letter_bitmap; - unsigned char *output_pos = address; - int colpos = 0; - - while (msg[colpos] != 0) { - char letter = msg[colpos]; - int fontcol, fontrow; - - if (letter <= 'Z' && letter >= ' ') - letter_bitmap = font[letter - ' ']; - else if (letter <= 'z' && letter >= 'a') - letter_bitmap = font[letter - 'a' + 'A' - ' ']; - else - letter_bitmap = font[0]; - - for (fontcol = 6; fontcol >= 0; fontcol--) - for (fontrow = 0; fontrow < 5; fontrow++) - output_pos[fontrow * pitch + fontcol] = - ((letter_bitmap >> (fontcol * 5)) & (1 << fontrow) ? 255 : 0); - - output_pos += 7; - colpos++; - } -} - - - -/* Bresenham line algorithm */ -void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, - int pitch) { - int steep = abs(y1 - y0) > abs(x1 - x0); - int deltax, deltay; - int error, ystep, y, x; - - if (steep) { - int t; - t = x0; - x0 = y0; - y0 = t; - - t = x1; - x1 = y1; - y1 = t; - } - - if (x0 > x1) { - int t; - t = x0; - x0 = x1; - x1 = t; - - t = y0; - y0 = y1; - y1 = t; - } - - deltax = x1 - x0; - deltay = abs(y1 - y0); - error = deltax / 2; - - y = y0; - - if (y0 < y1) - ystep = 1; - else - ystep = -1; - - if (steep) { - for (x = x0; x <= x1; x++) { - plot(y, x, image, pitch); - - error = error - deltay; - if (error < 0) { - y = y + ystep; - error = error + deltax; - } - } - } else { - for (x = x0; x <= x1; x++) { - plot(x, y, image, pitch); - - error = error - deltay; - if (error < 0) { - y = y + ystep; - error = error + deltax; - } - } - } -} diff --git a/libs/libvpx/vp9/common/vp9_textblit.h b/libs/libvpx/vp9/common/vp9_textblit.h deleted file mode 100644 index 158ec1b37e..0000000000 --- a/libs/libvpx/vp9/common/vp9_textblit.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VP9_COMMON_VP9_TEXTBLIT_H_ -#define VP9_COMMON_VP9_TEXTBLIT_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -void vp9_blit_text(const char *msg, unsigned char *address, int pitch); - -void vp9_blit_line(int x0, int x1, int y0, int y1, unsigned char *image, - int pitch); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VP9_COMMON_VP9_TEXTBLIT_H_ diff --git a/libs/libvpx/vp9/common/vp9_thread_common.c b/libs/libvpx/vp9/common/vp9_thread_common.c index db78d6be89..07e659d235 100644 --- a/libs/libvpx/vp9/common/vp9_thread_common.c +++ b/libs/libvpx/vp9/common/vp9_thread_common.c @@ -29,8 +29,7 @@ static INLINE void mutex_lock(pthread_mutex_t *const mutex) { } } - if (!locked) - pthread_mutex_lock(mutex); + if (!locked) pthread_mutex_lock(mutex); } #endif // CONFIG_MULTITHREAD @@ -64,8 +63,7 @@ static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, if (c < sb_cols - 1) { cur = c; - if (c % nsync) - sig = 0; + if (c % nsync) sig = 0; } else { cur = sb_cols + nsync; } @@ -87,12 +85,10 @@ static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, } // Implement row loopfiltering for each thread. -static INLINE -void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer, - VP9_COMMON *const cm, - struct macroblockd_plane planes[MAX_MB_PLANE], - int start, int stop, int y_only, - VP9LfSync *const lf_sync) { +static INLINE void thread_loop_filter_rows( + const YV12_BUFFER_CONFIG *const frame_buffer, VP9_COMMON *const cm, + struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop, + int y_only, VP9LfSync *const lf_sync) { const int num_planes = y_only ? 1 : MAX_MB_PLANE; const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; int mi_row, mi_col; @@ -152,8 +148,7 @@ static int loop_filter_row_worker(VP9LfSync *const lf_sync, return 1; } -static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, - VP9_COMMON *cm, +static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop, int y_only, VPxWorker *workers, int nworkers, @@ -212,13 +207,11 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, } } -void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, - VP9_COMMON *cm, +void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, struct macroblockd_plane planes[MAX_MB_PLANE], - int frame_filter_level, - int y_only, int partial_frame, - VPxWorker *workers, int num_workers, - VP9LfSync *lf_sync) { + int frame_filter_level, int y_only, + int partial_frame, VPxWorker *workers, + int num_workers, VP9LfSync *lf_sync) { int start_mi_row, end_mi_row, mi_rows_to_filter; if (!frame_filter_level) return; @@ -233,8 +226,8 @@ void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, end_mi_row = start_mi_row + mi_rows_to_filter; vp9_loop_filter_frame_init(cm, frame_filter_level); - loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, - y_only, workers, num_workers, lf_sync); + loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only, + workers, num_workers, lf_sync); } // Set up nsync by width. @@ -342,8 +335,7 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, accum->eob_branch[i][j][k][l][m] += counts->eob_branch[i][j][k][l][m]; for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) - accum->coef[i][j][k][l][m][n] += - counts->coef[i][j][k][l][m][n]; + accum->coef[i][j][k][l][m][n] += counts->coef[i][j][k][l][m][n]; } } else { for (i = 0; i < TX_SIZES; i++) @@ -353,11 +345,11 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, for (m = 0; m < COEFF_CONTEXTS; m++) accum->eob_branch[i][j][k][l][m] += counts->eob_branch[i][j][k][l][m]; - // In the encoder, coef is only updated at frame - // level, so not need to accumulate it here. - // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) - // accum->coef[i][j][k][l][m][n] += - // counts->coef[i][j][k][l][m][n]; + // In the encoder, coef is only updated at frame + // level, so not need to accumulate it here. + // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) + // accum->coef[i][j][k][l][m][n] += + // counts->coef[i][j][k][l][m][n]; } for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) @@ -373,17 +365,15 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, accum->intra_inter[i][j] += counts->intra_inter[i][j]; for (i = 0; i < COMP_INTER_CONTEXTS; i++) - for (j = 0; j < 2; j++) - accum->comp_inter[i][j] += counts->comp_inter[i][j]; + for (j = 0; j < 2; j++) accum->comp_inter[i][j] += counts->comp_inter[i][j]; for (i = 0; i < REF_CONTEXTS; i++) for (j = 0; j < 2; j++) for (k = 0; k < 2; k++) - accum->single_ref[i][j][k] += counts->single_ref[i][j][k]; + accum->single_ref[i][j][k] += counts->single_ref[i][j][k]; for (i = 0; i < REF_CONTEXTS; i++) - for (j = 0; j < 2; j++) - accum->comp_ref[i][j] += counts->comp_ref[i][j]; + for (j = 0; j < 2; j++) accum->comp_ref[i][j] += counts->comp_ref[i][j]; for (i = 0; i < TX_SIZE_CONTEXTS; i++) { for (j = 0; j < TX_SIZES; j++) @@ -400,11 +390,9 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, accum->tx.tx_totals[i] += counts->tx.tx_totals[i]; for (i = 0; i < SKIP_CONTEXTS; i++) - for (j = 0; j < 2; j++) - accum->skip[i][j] += counts->skip[i][j]; + for (j = 0; j < 2; j++) accum->skip[i][j] += counts->skip[i][j]; - for (i = 0; i < MV_JOINTS; i++) - accum->mv.joints[i] += counts->mv.joints[i]; + for (i = 0; i < MV_JOINTS; i++) accum->mv.joints[i] += counts->mv.joints[i]; for (k = 0; k < 2; k++) { nmv_component_counts *const comps = &accum->mv.comps[k]; @@ -416,8 +404,7 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, comps->hp[i] += comps_t->hp[i]; } - for (i = 0; i < MV_CLASSES; i++) - comps->classes[i] += comps_t->classes[i]; + for (i = 0; i < MV_CLASSES; i++) comps->classes[i] += comps_t->classes[i]; for (i = 0; i < CLASS0_SIZE; i++) { comps->class0[i] += comps_t->class0[i]; @@ -426,10 +413,8 @@ void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, } for (i = 0; i < MV_OFFSET_BITS; i++) - for (j = 0; j < 2; j++) - comps->bits[i][j] += comps_t->bits[i][j]; + for (j = 0; j < 2; j++) comps->bits[i][j] += comps_t->bits[i][j]; - for (i = 0; i < MV_FP_SIZE; i++) - comps->fp[i] += comps_t->fp[i]; + for (i = 0; i < MV_FP_SIZE; i++) comps->fp[i] += comps_t->fp[i]; } } diff --git a/libs/libvpx/vp9/common/vp9_thread_common.h b/libs/libvpx/vp9/common/vp9_thread_common.h index b3b60c253f..0f7c3ff748 100644 --- a/libs/libvpx/vp9/common/vp9_thread_common.h +++ b/libs/libvpx/vp9/common/vp9_thread_common.h @@ -47,13 +47,11 @@ void vp9_loop_filter_alloc(VP9LfSync *lf_sync, struct VP9Common *cm, int rows, void vp9_loop_filter_dealloc(VP9LfSync *lf_sync); // Multi-threaded loopfilter that uses the tile threads. -void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, - struct VP9Common *cm, +void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP9Common *cm, struct macroblockd_plane planes[MAX_MB_PLANE], - int frame_filter_level, - int y_only, int partial_frame, - VPxWorker *workers, int num_workers, - VP9LfSync *lf_sync); + int frame_filter_level, int y_only, + int partial_frame, VPxWorker *workers, + int num_workers, VP9LfSync *lf_sync); void vp9_accumulate_frame_counts(struct FRAME_COUNTS *accum, const struct FRAME_COUNTS *counts, int is_dec); diff --git a/libs/libvpx/vp9/common/vp9_tile_common.c b/libs/libvpx/vp9/common/vp9_tile_common.c index 9fcb97c854..672f808adc 100644 --- a/libs/libvpx/vp9/common/vp9_tile_common.c +++ b/libs/libvpx/vp9/common/vp9_tile_common.c @@ -38,20 +38,18 @@ void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, int row, int col) { static int get_min_log2_tile_cols(const int sb64_cols) { int min_log2 = 0; - while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols) - ++min_log2; + while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols) ++min_log2; return min_log2; } static int get_max_log2_tile_cols(const int sb64_cols) { int max_log2 = 1; - while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64) - ++max_log2; + while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64) ++max_log2; return max_log2 - 1; } -void vp9_get_tile_n_bits(int mi_cols, - int *min_log2_tile_cols, int *max_log2_tile_cols) { +void vp9_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols, + int *max_log2_tile_cols) { const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2; *min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols); *max_log2_tile_cols = get_max_log2_tile_cols(sb64_cols); diff --git a/libs/libvpx/vp9/common/vp9_tile_common.h b/libs/libvpx/vp9/common/vp9_tile_common.h index ae58805de1..1b11c2680d 100644 --- a/libs/libvpx/vp9/common/vp9_tile_common.h +++ b/libs/libvpx/vp9/common/vp9_tile_common.h @@ -24,14 +24,14 @@ typedef struct TileInfo { // initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)' -void vp9_tile_init(TileInfo *tile, const struct VP9Common *cm, - int row, int col); +void vp9_tile_init(TileInfo *tile, const struct VP9Common *cm, int row, + int col); void vp9_tile_set_row(TileInfo *tile, const struct VP9Common *cm, int row); void vp9_tile_set_col(TileInfo *tile, const struct VP9Common *cm, int col); -void vp9_get_tile_n_bits(int mi_cols, - int *min_log2_tile_cols, int *max_log2_tile_cols); +void vp9_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols, + int *max_log2_tile_cols); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c b/libs/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c index 8d312d03f9..dcfc454aa0 100644 --- a/libs/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c +++ b/libs/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "./vp9_rtcd.h" #include "vpx_dsp/x86/inv_txfm_sse2.h" #include "vpx_dsp/x86/txfm_common_sse2.h" #include "vpx_ports/mem.h" @@ -38,9 +39,7 @@ void vp9_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride, iadst4_sse2(in); iadst4_sse2(in); break; - default: - assert(0); - break; + default: assert(0); break; } // Final round and shift @@ -110,9 +109,7 @@ void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride, iadst8_sse2(in); iadst8_sse2(in); break; - default: - assert(0); - break; + default: assert(0); break; } // Final rounding and shift @@ -169,9 +166,7 @@ void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, iadst16_sse2(in0, in1); iadst16_sse2(in0, in1); break; - default: - assert(0); - break; + default: assert(0); break; } write_buffer_8x16(dest, in0, stride); diff --git a/libs/libvpx/vp9/common/x86/vp9_mfqe_sse2.asm b/libs/libvpx/vp9/common/x86/vp9_mfqe_sse2.asm index 6029420d11..30852049b4 100644 --- a/libs/libvpx/vp9/common/x86/vp9_mfqe_sse2.asm +++ b/libs/libvpx/vp9/common/x86/vp9_mfqe_sse2.asm @@ -46,7 +46,7 @@ sym(vp9_filter_by_weight16x16_sse2): mov rcx, 16 ; loop count pxor xmm6, xmm6 -.combine +.combine: movdqa xmm2, [rax] movdqa xmm4, [rdx] add rax, rsi @@ -123,7 +123,7 @@ sym(vp9_filter_by_weight8x8_sse2): mov rcx, 8 ; loop count pxor xmm4, xmm4 -.combine +.combine: movq xmm2, [rax] movq xmm3, [rdx] add rax, rsi @@ -190,7 +190,7 @@ sym(vp9_variance_and_sad_16x16_sse2): ; Because we're working with the actual output frames ; we can't depend on any kind of data alignment. -.accumulate +.accumulate: movdqa xmm0, [rax] ; src1 movdqa xmm1, [rdx] ; src2 add rax, rcx ; src1 + stride1 diff --git a/libs/libvpx/vp9/common/x86/vp9_postproc_sse2.asm b/libs/libvpx/vp9/common/x86/vp9_postproc_sse2.asm deleted file mode 100644 index ec8bfdb18f..0000000000 --- a/libs/libvpx/vp9/common/x86/vp9_postproc_sse2.asm +++ /dev/null @@ -1,694 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -;void vp9_post_proc_down_and_across_xmm -;( -; unsigned char *src_ptr, -; unsigned char *dst_ptr, -; int src_pixels_per_line, -; int dst_pixels_per_line, -; int rows, -; int cols, -; int flimit -;) -global sym(vp9_post_proc_down_and_across_xmm) PRIVATE -sym(vp9_post_proc_down_and_across_xmm): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - -%if ABI_IS_32BIT=1 && CONFIG_PIC=1 - ALIGN_STACK 16, rax - ; move the global rd onto the stack, since we don't have enough registers - ; to do PIC addressing - movdqa xmm0, [GLOBAL(rd42)] - sub rsp, 16 - movdqa [rsp], xmm0 -%define RD42 [rsp] -%else -%define RD42 [GLOBAL(rd42)] -%endif - - - movd xmm2, dword ptr arg(6) ;flimit - punpcklwd xmm2, xmm2 - punpckldq xmm2, xmm2 - punpcklqdq xmm2, xmm2 - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(1) ;dst_ptr - - movsxd rcx, DWORD PTR arg(4) ;rows - movsxd rax, DWORD PTR arg(2) ;src_pixels_per_line ; destination pitch? - pxor xmm0, xmm0 ; mm0 = 00000000 - -.nextrow: - - xor rdx, rdx ; clear out rdx for use as loop counter -.nextcol: - movq xmm3, QWORD PTR [rsi] ; mm4 = r0 p0..p7 - punpcklbw xmm3, xmm0 ; mm3 = p0..p3 - movdqa xmm1, xmm3 ; mm1 = p0..p3 - psllw xmm3, 2 ; - - movq xmm5, QWORD PTR [rsi + rax] ; mm4 = r1 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r1 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm6 - - ; thresholding - movdqa xmm7, xmm1 ; mm7 = r0 p0..p3 - psubusw xmm7, xmm5 ; mm7 = r0 p0..p3 - r1 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r1 p0..p3 - r0 p0..p3 - paddusw xmm7, xmm5 ; mm7 = abs(r0 p0..p3 - r1 p0..p3) - pcmpgtw xmm7, xmm2 - - movq xmm5, QWORD PTR [rsi + 2*rax] ; mm4 = r2 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r2 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm5 ; mm6 = r0 p0..p3 - r2 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r2 p0..p3 - r2 p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r2 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - neg rax - movq xmm5, QWORD PTR [rsi+2*rax] ; mm4 = r-2 p0..p7 - punpcklbw xmm5, xmm0 ; mm5 = r-2 p0..p3 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - r-2 p0..p3 - psubusw xmm5, xmm1 ; mm5 = r-2 p0..p3 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(r0 p0..p3 - r-2 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - movq xmm4, QWORD PTR [rsi+rax] ; mm4 = r-1 p0..p7 - punpcklbw xmm4, xmm0 ; mm4 = r-1 p0..p3 - paddusw xmm3, xmm4 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = r0 p0..p3 - psubusw xmm6, xmm4 ; mm6 = p0..p3 - r-2 p0..p3 - psubusw xmm4, xmm1 ; mm5 = r-1 p0..p3 - p0..p3 - paddusw xmm6, xmm4 ; mm6 = abs(r0 p0..p3 - r-1 p0..p3) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - paddusw xmm3, RD42 ; mm3 += round value - psraw xmm3, 3 ; mm3 /= 8 - - pand xmm1, xmm7 ; mm1 select vals > thresh from source - pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result - paddusw xmm1, xmm7 ; combination - - packuswb xmm1, xmm0 ; pack to bytes - movq QWORD PTR [rdi], xmm1 ; - - neg rax ; pitch is positive - add rsi, 8 - add rdi, 8 - - add rdx, 8 - cmp edx, dword arg(5) ;cols - - jl .nextcol - - ; done with the all cols, start the across filtering in place - sub rsi, rdx - sub rdi, rdx - - xor rdx, rdx - movq mm0, QWORD PTR [rdi-8]; - -.acrossnextcol: - movq xmm7, QWORD PTR [rdi +rdx -2] - movd xmm4, DWORD PTR [rdi +rdx +6] - - pslldq xmm4, 8 - por xmm4, xmm7 - - movdqa xmm3, xmm4 - psrldq xmm3, 2 - punpcklbw xmm3, xmm0 ; mm3 = p0..p3 - movdqa xmm1, xmm3 ; mm1 = p0..p3 - psllw xmm3, 2 - - - movdqa xmm5, xmm4 - psrldq xmm5, 3 - punpcklbw xmm5, xmm0 ; mm5 = p1..p4 - paddusw xmm3, xmm5 ; mm3 += mm6 - - ; thresholding - movdqa xmm7, xmm1 ; mm7 = p0..p3 - psubusw xmm7, xmm5 ; mm7 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm7, xmm5 ; mm7 = abs(p0..p3 - p1..p4) - pcmpgtw xmm7, xmm2 - - movdqa xmm5, xmm4 - psrldq xmm5, 4 - punpcklbw xmm5, xmm0 ; mm5 = p2..p5 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - - movdqa xmm5, xmm4 ; mm5 = p-2..p5 - punpcklbw xmm5, xmm0 ; mm5 = p-2..p1 - paddusw xmm3, xmm5 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm5 ; mm6 = p0..p3 - p1..p4 - psubusw xmm5, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm5 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - psrldq xmm4, 1 ; mm4 = p-1..p5 - punpcklbw xmm4, xmm0 ; mm4 = p-1..p2 - paddusw xmm3, xmm4 ; mm3 += mm5 - - ; thresholding - movdqa xmm6, xmm1 ; mm6 = p0..p3 - psubusw xmm6, xmm4 ; mm6 = p0..p3 - p1..p4 - psubusw xmm4, xmm1 ; mm5 = p1..p4 - p0..p3 - paddusw xmm6, xmm4 ; mm6 = abs(p0..p3 - p1..p4) - pcmpgtw xmm6, xmm2 - por xmm7, xmm6 ; accumulate thresholds - - paddusw xmm3, RD42 ; mm3 += round value - psraw xmm3, 3 ; mm3 /= 8 - - pand xmm1, xmm7 ; mm1 select vals > thresh from source - pandn xmm7, xmm3 ; mm7 select vals < thresh from blurred result - paddusw xmm1, xmm7 ; combination - - packuswb xmm1, xmm0 ; pack to bytes - movq QWORD PTR [rdi+rdx-8], mm0 ; store previous four bytes - movdq2q mm0, xmm1 - - add rdx, 8 - cmp edx, dword arg(5) ;cols - jl .acrossnextcol; - - ; last 8 pixels - movq QWORD PTR [rdi+rdx-8], mm0 - - ; done with this rwo - add rsi,rax ; next line - mov eax, dword arg(3) ;dst_pixels_per_line ; destination pitch? - add rdi,rax ; next destination - mov eax, dword arg(2) ;src_pixels_per_line ; destination pitch? - - dec rcx ; decrement count - jnz .nextrow ; next row - -%if ABI_IS_32BIT=1 && CONFIG_PIC=1 - add rsp,16 - pop rsp -%endif - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret -%undef RD42 - - -;void vp9_mbpost_proc_down_xmm(unsigned char *dst, -; int pitch, int rows, int cols,int flimit) -extern sym(vp9_rv) -global sym(vp9_mbpost_proc_down_xmm) PRIVATE -sym(vp9_mbpost_proc_down_xmm): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 5 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - ALIGN_STACK 16, rax - sub rsp, 128+16 - - ; unsigned char d[16][8] at [rsp] - ; create flimit2 at [rsp+128] - mov eax, dword ptr arg(4) ;flimit - mov [rsp+128], eax - mov [rsp+128+4], eax - mov [rsp+128+8], eax - mov [rsp+128+12], eax -%define flimit4 [rsp+128] - -%if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp9_rv))] -%endif - - ;rows +=8; - add dword arg(2), 8 - - ;for(c=0; cref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) - return 1; + if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1; return 0; } static void setup_compound_reference_mode(VP9_COMMON *cm) { if (cm->ref_frame_sign_bias[LAST_FRAME] == - cm->ref_frame_sign_bias[GOLDEN_FRAME]) { + cm->ref_frame_sign_bias[GOLDEN_FRAME]) { cm->comp_fixed_ref = ALTREF_FRAME; cm->comp_var_ref[0] = LAST_FRAME; cm->comp_var_ref[1] = GOLDEN_FRAME; } else if (cm->ref_frame_sign_bias[LAST_FRAME] == - cm->ref_frame_sign_bias[ALTREF_FRAME]) { + cm->ref_frame_sign_bias[ALTREF_FRAME]) { cm->comp_fixed_ref = GOLDEN_FRAME; cm->comp_var_ref[0] = LAST_FRAME; cm->comp_var_ref[1] = ALTREF_FRAME; @@ -83,8 +82,7 @@ static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) { static TX_MODE read_tx_mode(vpx_reader *r) { TX_MODE tx_mode = vpx_read_literal(r, 2); - if (tx_mode == ALLOW_32X32) - tx_mode += vpx_read_bit(r); + if (tx_mode == ALLOW_32X32) tx_mode += vpx_read_bit(r); return tx_mode; } @@ -121,9 +119,9 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) { static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm, vpx_reader *r) { if (is_compound_reference_allowed(cm)) { - return vpx_read_bit(r) ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT - : COMPOUND_REFERENCE) - : SINGLE_REFERENCE; + return vpx_read_bit(r) + ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT : COMPOUND_REFERENCE) + : SINGLE_REFERENCE; } else { return SINGLE_REFERENCE; } @@ -151,8 +149,7 @@ static void read_frame_reference_mode_probs(VP9_COMMON *cm, vpx_reader *r) { static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) { int i; for (i = 0; i < n; ++i) - if (vpx_read(r, MV_UPDATE_PROB)) - p[i] = (vpx_read_literal(r, 7) << 1) | 1; + if (vpx_read(r, MV_UPDATE_PROB)) p[i] = (vpx_read_literal(r, 7) << 1) | 1; } static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) { @@ -184,233 +181,194 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) { } } -static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane, - const TX_SIZE tx_size, - uint8_t *dst, int stride, - int eob) { +static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane, + const TX_SIZE tx_size, uint8_t *dst, + int stride, int eob) { struct macroblockd_plane *const pd = &xd->plane[plane]; - if (eob > 0) { - tran_low_t *const dqcoeff = pd->dqcoeff; + tran_low_t *const dqcoeff = pd->dqcoeff; + assert(eob > 0); #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - if (xd->lossless) { - vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd); - } else { - switch (tx_size) { - case TX_4X4: - vp9_highbd_idct4x4_add(dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_8X8: - vp9_highbd_idct8x8_add(dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_16X16: - vp9_highbd_idct16x16_add(dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_32X32: - vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd); - break; - default: - assert(0 && "Invalid transform size"); - } - } + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + if (xd->lossless) { + vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd); } else { - if (xd->lossless) { - vp9_iwht4x4_add(dqcoeff, dst, stride, eob); - } else { - switch (tx_size) { - case TX_4X4: - vp9_idct4x4_add(dqcoeff, dst, stride, eob); - break; - case TX_8X8: - vp9_idct8x8_add(dqcoeff, dst, stride, eob); - break; - case TX_16X16: - vp9_idct16x16_add(dqcoeff, dst, stride, eob); - break; - case TX_32X32: - vp9_idct32x32_add(dqcoeff, dst, stride, eob); - break; - default: - assert(0 && "Invalid transform size"); - return; - } + switch (tx_size) { + case TX_4X4: + vp9_highbd_idct4x4_add(dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_8X8: + vp9_highbd_idct8x8_add(dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_16X16: + vp9_highbd_idct16x16_add(dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_32X32: + vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd); + break; + default: assert(0 && "Invalid transform size"); } } -#else + } else { if (xd->lossless) { vp9_iwht4x4_add(dqcoeff, dst, stride, eob); } else { switch (tx_size) { - case TX_4X4: - vp9_idct4x4_add(dqcoeff, dst, stride, eob); - break; - case TX_8X8: - vp9_idct8x8_add(dqcoeff, dst, stride, eob); - break; - case TX_16X16: - vp9_idct16x16_add(dqcoeff, dst, stride, eob); - break; - case TX_32X32: - vp9_idct32x32_add(dqcoeff, dst, stride, eob); - break; - default: - assert(0 && "Invalid transform size"); - return; + case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break; + case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break; + case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break; + case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; + default: assert(0 && "Invalid transform size"); return; } } + } +#else + if (xd->lossless) { + vp9_iwht4x4_add(dqcoeff, dst, stride, eob); + } else { + switch (tx_size) { + case TX_4X4: vp9_idct4x4_add(dqcoeff, dst, stride, eob); break; + case TX_8X8: vp9_idct8x8_add(dqcoeff, dst, stride, eob); break; + case TX_16X16: vp9_idct16x16_add(dqcoeff, dst, stride, eob); break; + case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; + default: assert(0 && "Invalid transform size"); return; + } + } #endif // CONFIG_VP9_HIGHBITDEPTH - if (eob == 1) { - dqcoeff[0] = 0; - } else { - if (tx_size <= TX_16X16 && eob <= 10) - memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); - else if (tx_size == TX_32X32 && eob <= 34) - memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); - else - memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); - } + if (eob == 1) { + dqcoeff[0] = 0; + } else { + if (tx_size <= TX_16X16 && eob <= 10) + memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); + else if (tx_size == TX_32X32 && eob <= 34) + memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); + else + memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); } } -static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane, +static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane, const TX_TYPE tx_type, - const TX_SIZE tx_size, - uint8_t *dst, int stride, - int eob) { + const TX_SIZE tx_size, uint8_t *dst, + int stride, int eob) { struct macroblockd_plane *const pd = &xd->plane[plane]; - if (eob > 0) { - tran_low_t *const dqcoeff = pd->dqcoeff; + tran_low_t *const dqcoeff = pd->dqcoeff; + assert(eob > 0); #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - if (xd->lossless) { - vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd); - } else { - switch (tx_size) { - case TX_4X4: - vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_8X8: - vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_16X16: - vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); - break; - case TX_32X32: - vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd); - break; - default: - assert(0 && "Invalid transform size"); - } - } + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + if (xd->lossless) { + vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd); } else { - if (xd->lossless) { - vp9_iwht4x4_add(dqcoeff, dst, stride, eob); - } else { - switch (tx_size) { - case TX_4X4: - vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); - break; - case TX_8X8: - vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); - break; - case TX_16X16: - vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); - break; - case TX_32X32: - vp9_idct32x32_add(dqcoeff, dst, stride, eob); - break; - default: - assert(0 && "Invalid transform size"); - return; - } + switch (tx_size) { + case TX_4X4: + vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_8X8: + vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_16X16: + vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd); + break; + case TX_32X32: + vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd); + break; + default: assert(0 && "Invalid transform size"); } } -#else + } else { if (xd->lossless) { vp9_iwht4x4_add(dqcoeff, dst, stride, eob); } else { switch (tx_size) { - case TX_4X4: - vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); - break; - case TX_8X8: - vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); - break; + case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break; + case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break; case TX_16X16: vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); break; - case TX_32X32: - vp9_idct32x32_add(dqcoeff, dst, stride, eob); - break; - default: - assert(0 && "Invalid transform size"); - return; + case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; + default: assert(0 && "Invalid transform size"); return; } } + } +#else + if (xd->lossless) { + vp9_iwht4x4_add(dqcoeff, dst, stride, eob); + } else { + switch (tx_size) { + case TX_4X4: vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob); break; + case TX_8X8: vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob); break; + case TX_16X16: + vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob); + break; + case TX_32X32: vp9_idct32x32_add(dqcoeff, dst, stride, eob); break; + default: assert(0 && "Invalid transform size"); return; + } + } #endif // CONFIG_VP9_HIGHBITDEPTH - if (eob == 1) { - dqcoeff[0] = 0; - } else { - if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) - memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); - else if (tx_size == TX_32X32 && eob <= 34) - memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); - else - memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); - } + if (eob == 1) { + dqcoeff[0] = 0; + } else { + if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10) + memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0])); + else if (tx_size == TX_32X32 && eob <= 34) + memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0])); + else + memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0])); } } -static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd, - vpx_reader *r, - MODE_INFO *const mi, - int plane, +static void predict_and_reconstruct_intra_block(TileWorkerData *twd, + MODE_INFO *const mi, int plane, int row, int col, TX_SIZE tx_size) { + MACROBLOCKD *const xd = &twd->xd; struct macroblockd_plane *const pd = &xd->plane[plane]; PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode; uint8_t *dst; dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col]; if (mi->sb_type < BLOCK_8X8) - if (plane == 0) - mode = xd->mi[0]->bmi[(row << 1) + col].as_mode; + if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode; - vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, - dst, pd->dst.stride, dst, pd->dst.stride, - col, row, plane); + vp9_predict_intra_block(xd, pd->n4_wl, tx_size, mode, dst, pd->dst.stride, + dst, pd->dst.stride, col, row, plane); if (!mi->skip) { - const TX_TYPE tx_type = (plane || xd->lossless) ? - DCT_DCT : intra_mode_to_tx_type_lookup[mode]; - const scan_order *sc = (plane || xd->lossless) ? - &vp9_default_scan_orders[tx_size] : &vp9_scan_orders[tx_size][tx_type]; - const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size, - r, mi->segment_id); - inverse_transform_block_intra(xd, plane, tx_type, tx_size, - dst, pd->dst.stride, eob); + const TX_TYPE tx_type = + (plane || xd->lossless) ? DCT_DCT : intra_mode_to_tx_type_lookup[mode]; + const scan_order *sc = (plane || xd->lossless) + ? &vp9_default_scan_orders[tx_size] + : &vp9_scan_orders[tx_size][tx_type]; + const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size, + mi->segment_id); + if (eob > 0) { + inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst, + pd->dst.stride, eob); + } } } -static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r, - MODE_INFO *const mi, int plane, - int row, int col, TX_SIZE tx_size) { +static int reconstruct_inter_block(TileWorkerData *twd, MODE_INFO *const mi, + int plane, int row, int col, + TX_SIZE tx_size) { + MACROBLOCKD *const xd = &twd->xd; struct macroblockd_plane *const pd = &xd->plane[plane]; const scan_order *sc = &vp9_default_scan_orders[tx_size]; - const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size, r, + const int eob = vp9_decode_block_tokens(twd, plane, sc, col, row, tx_size, mi->segment_id); - inverse_transform_block_inter(xd, plane, tx_size, - &pd->dst.buf[4 * row * pd->dst.stride + 4 * col], - pd->dst.stride, eob); + if (eob > 0) { + inverse_transform_block_inter( + xd, plane, tx_size, &pd->dst.buf[4 * row * pd->dst.stride + 4 * col], + pd->dst.stride, eob); + } return eob; } -static void build_mc_border(const uint8_t *src, int src_stride, - uint8_t *dst, int dst_stride, - int x, int y, int b_w, int b_h, int w, int h) { +static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst, + int dst_stride, int x, int y, int b_w, int b_h, + int w, int h) { // Get a pointer to the start of the real data for this row. const uint8_t *ref_row = src - x - y * src_stride; @@ -423,39 +381,31 @@ static void build_mc_border(const uint8_t *src, int src_stride, int right = 0, copy; int left = x < 0 ? -x : 0; - if (left > b_w) - left = b_w; + if (left > b_w) left = b_w; - if (x + b_w > w) - right = x + b_w - w; + if (x + b_w > w) right = x + b_w - w; - if (right > b_w) - right = b_w; + if (right > b_w) right = b_w; copy = b_w - left - right; - if (left) - memset(dst, ref_row[0], left); + if (left) memset(dst, ref_row[0], left); - if (copy) - memcpy(dst + left, ref_row + x + left, copy); + if (copy) memcpy(dst + left, ref_row + x + left, copy); - if (right) - memset(dst + left + copy, ref_row[w - 1], right); + if (right) memset(dst + left + copy, ref_row[w - 1], right); dst += dst_stride; ++y; - if (y > 0 && y < h) - ref_row += src_stride; + if (y > 0 && y < h) ref_row += src_stride; } while (--b_h); } #if CONFIG_VP9_HIGHBITDEPTH static void high_build_mc_border(const uint8_t *src8, int src_stride, - uint16_t *dst, int dst_stride, - int x, int y, int b_w, int b_h, - int w, int h) { + uint16_t *dst, int dst_stride, int x, int y, + int b_w, int b_h, int w, int h) { // Get a pointer to the start of the real data for this row. const uint16_t *src = CONVERT_TO_SHORTPTR(src8); const uint16_t *ref_row = src - x - y * src_stride; @@ -469,31 +419,24 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride, int right = 0, copy; int left = x < 0 ? -x : 0; - if (left > b_w) - left = b_w; + if (left > b_w) left = b_w; - if (x + b_w > w) - right = x + b_w - w; + if (x + b_w > w) right = x + b_w - w; - if (right > b_w) - right = b_w; + if (right > b_w) right = b_w; copy = b_w - left - right; - if (left) - vpx_memset16(dst, ref_row[0], left); + if (left) vpx_memset16(dst, ref_row[0], left); - if (copy) - memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); + if (copy) memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t)); - if (right) - vpx_memset16(dst + left + copy, ref_row[w - 1], right); + if (right) vpx_memset16(dst + left + copy, ref_row[w - 1], right); dst += dst_stride; ++y; - if (y > 0 && y < h) - ref_row += src_stride; + if (y > 0 && y < h) ref_row += src_stride; } while (--b_h); } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -502,70 +445,64 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride, static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, int x0, int y0, int b_w, int b_h, int frame_width, int frame_height, - int border_offset, - uint8_t *const dst, int dst_buf_stride, - int subpel_x, int subpel_y, + int border_offset, uint8_t *const dst, + int dst_buf_stride, int subpel_x, int subpel_y, const InterpKernel *kernel, - const struct scale_factors *sf, - MACROBLOCKD *xd, + const struct scale_factors *sf, MACROBLOCKD *xd, int w, int h, int ref, int xs, int ys) { DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]); const uint8_t *buf_ptr; if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); + high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w, x0, y0, + b_w, b_h, frame_width, frame_height); buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset; } else { - build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); + build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w, x0, + y0, b_w, b_h, frame_width, frame_height); buf_ptr = ((uint8_t *)mc_buf_high) + border_offset; } if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); + highbd_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, + subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); } else { - inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); + inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, + w, h, ref, kernel, xs, ys); } } #else static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride, int x0, int y0, int b_w, int b_h, int frame_width, int frame_height, - int border_offset, - uint8_t *const dst, int dst_buf_stride, - int subpel_x, int subpel_y, + int border_offset, uint8_t *const dst, + int dst_buf_stride, int subpel_x, int subpel_y, const InterpKernel *kernel, - const struct scale_factors *sf, - int w, int h, int ref, int xs, int ys) { + const struct scale_factors *sf, int w, int h, + int ref, int xs, int ys) { DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]); const uint8_t *buf_ptr; - build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, - x0, y0, b_w, b_h, frame_width, frame_height); + build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w, x0, y0, b_w, b_h, + frame_width, frame_height); buf_ptr = mc_buf + border_offset; - inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); + inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w, + h, ref, kernel, xs, ys); } #endif // CONFIG_VP9_HIGHBITDEPTH -static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, - int plane, int bw, int bh, int x, - int y, int w, int h, int mi_x, int mi_y, - const InterpKernel *kernel, - const struct scale_factors *sf, - struct buf_2d *pre_buf, - struct buf_2d *dst_buf, const MV* mv, - RefCntBuffer *ref_frame_buf, - int is_scaled, int ref) { +static void dec_build_inter_predictors( + VPxWorker *const worker, MACROBLOCKD *xd, int plane, int bw, int bh, int x, + int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel, + const struct scale_factors *sf, struct buf_2d *pre_buf, + struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf, + int is_scaled, int ref) { struct macroblockd_plane *const pd = &xd->plane[plane]; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; MV32 scaled_mv; - int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, - buf_stride, subpel_x, subpel_y; + int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride, + subpel_x, subpel_y; uint8_t *ref_frame, *buf_ptr; // Get reference frame pointer, width and height. @@ -576,18 +513,17 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, } else { frame_width = ref_frame_buf->buf.uv_crop_width; frame_height = ref_frame_buf->buf.uv_crop_height; - ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer - : ref_frame_buf->buf.v_buffer; + ref_frame = + plane == 1 ? ref_frame_buf->buf.u_buffer : ref_frame_buf->buf.v_buffer; } if (is_scaled) { - const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh, - pd->subsampling_x, - pd->subsampling_y); + const MV mv_q4 = clamp_mv_to_umv_border_sb( + xd, mv, bw, bh, pd->subsampling_x, pd->subsampling_y); // Co-ordinate of containing block to pixel precision. int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)); int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)); -#if CONFIG_BETTER_HW_COMPATIBILITY +#if 0 // CONFIG_BETTER_HW_COMPATIBILITY assert(xd->mi[0]->sb_type != BLOCK_4X8 && xd->mi[0]->sb_type != BLOCK_8X4); assert(mv_q4.row == mv->row * (1 << (1 - pd->subsampling_y)) && @@ -640,8 +576,8 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, // Do border extension if there is motion or the // width/height is not a multiple of 8 pixels. - if (is_scaled || scaled_mv.col || scaled_mv.row || - (frame_width & 0x7) || (frame_height & 0x7)) { + if (is_scaled || scaled_mv.col || scaled_mv.row || (frame_width & 0x7) || + (frame_height & 0x7)) { int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1; // Get reference block bottom right horizontal coordinate. @@ -663,8 +599,8 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, // Wait until reference block is ready. Pad 7 more pixels as last 7 // pixels of each superblock row can be changed by next superblock row. if (worker != NULL) - vp9_frameworker_wait(worker, ref_frame_buf, - VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + vp9_frameworker_wait(worker, ref_frame_buf, VPXMAX(0, (y1 + 7)) + << (plane == 0 ? 0 : 1)); // Skip border extension if block is inside the frame. if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || @@ -675,11 +611,9 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, const int b_h = y1 - y0 + 1; const int border_offset = y_pad * 3 * b_w + x_pad * 3; - extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, - frame_width, frame_height, border_offset, - dst, dst_buf->stride, - subpel_x, subpel_y, - kernel, sf, + extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width, + frame_height, border_offset, dst, dst_buf->stride, + subpel_x, subpel_y, kernel, sf, #if CONFIG_VP9_HIGHBITDEPTH xd, #endif @@ -691,27 +625,27 @@ static void dec_build_inter_predictors(VPxWorker *const worker, MACROBLOCKD *xd, // pixels of each superblock row can be changed by next superblock row. if (worker != NULL) { const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS; - vp9_frameworker_wait(worker, ref_frame_buf, - VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + vp9_frameworker_wait(worker, ref_frame_buf, VPXMAX(0, (y1 + 7)) + << (plane == 0 ? 0 : 1)); } } #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); + highbd_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, + subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd); } else { inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys); } #else - inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, - subpel_y, sf, w, h, ref, kernel, xs, ys); + inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y, + sf, w, h, ref, kernel, xs, ys); #endif // CONFIG_VP9_HIGHBITDEPTH } static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, - MACROBLOCKD *xd, - int mi_row, int mi_col) { + MACROBLOCKD *xd, int mi_row, + int mi_col) { int plane; const int mi_x = mi_col * MI_SIZE; const int mi_y = mi_row * MI_SIZE; @@ -721,8 +655,8 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, const int is_compound = has_second_ref(mi); int ref; int is_scaled; - VPxWorker *const fwo = pbi->frame_parallel_decode ? - pbi->frame_worker_owner : NULL; + VPxWorker *const fwo = + pbi->frame_parallel_decode ? pbi->frame_worker_owner : NULL; for (ref = 0; ref < 1 + is_compound; ++ref) { const MV_REFERENCE_FRAME frame = mi->ref_frame[ref]; @@ -754,10 +688,10 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, for (y = 0; y < num_4x4_h; ++y) { for (x = 0; x < num_4x4_w; ++x) { const MV mv = average_split_mvs(pd, mi, ref, i++); - dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, - 4 * x, 4 * y, 4, 4, mi_x, mi_y, kernel, - sf, pre_buf, dst_buf, &mv, - ref_frame_buf, is_scaled, ref); + dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, 4 * x, + 4 * y, 4, 4, mi_x, mi_y, kernel, sf, + pre_buf, dst_buf, &mv, ref_frame_buf, + is_scaled, ref); } } } @@ -771,22 +705,14 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, const int n4w_x4 = 4 * num_4x4_w; const int n4h_x4 = 4 * num_4x4_h; struct buf_2d *const pre_buf = &pd->pre[ref]; - dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, - 0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel, - sf, pre_buf, dst_buf, &mv, - ref_frame_buf, is_scaled, ref); + dec_build_inter_predictors(fwo, xd, plane, n4w_x4, n4h_x4, 0, 0, n4w_x4, + n4h_x4, mi_x, mi_y, kernel, sf, pre_buf, + dst_buf, &mv, ref_frame_buf, is_scaled, ref); } } } } -static INLINE TX_SIZE dec_get_uv_tx_size(const MODE_INFO *mi, - int n4_wl, int n4_hl) { - // get minimum log2 num4x4s dimension - const int x = VPXMIN(n4_wl, n4_hl); - return VPXMIN(mi->tx_size, x); -} - static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) { int i; for (i = 0; i < MAX_MB_PLANE; i++) { @@ -808,9 +734,8 @@ static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl, } static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, - BLOCK_SIZE bsize, int mi_row, int mi_col, - int bw, int bh, int x_mis, int y_mis, - int bwl, int bhl) { + BLOCK_SIZE bsize, int mi_row, int mi_col, int bw, + int bh, int x_mis, int y_mis, int bwl, int bhl) { const int offset = mi_row * cm->mi_stride + mi_col; int x, y; const TileInfo *const tile = &xd->tile; @@ -837,29 +762,29 @@ static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, return xd->mi[0]; } -static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, - int mi_row, int mi_col, - vpx_reader *r, BLOCK_SIZE bsize, - int bwl, int bhl) { +static void decode_block(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row, + int mi_col, BLOCK_SIZE bsize, int bwl, int bhl) { VP9_COMMON *const cm = &pbi->common; const int less8x8 = bsize < BLOCK_8X8; const int bw = 1 << (bwl - 1); const int bh = 1 << (bhl - 1); const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); + vpx_reader *r = &twd->bit_reader; + MACROBLOCKD *const xd = &twd->xd; - MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, - bw, bh, x_mis, y_mis, bwl, bhl); + MODE_INFO *mi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, + y_mis, bwl, bhl); if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) { const BLOCK_SIZE uv_subsize = ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y]; if (uv_subsize == BLOCK_INVALID) - vpx_internal_error(xd->error_info, - VPX_CODEC_CORRUPT_FRAME, "Invalid block size."); + vpx_internal_error(xd->error_info, VPX_CODEC_CORRUPT_FRAME, + "Invalid block size."); } - vpx_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis); + vp9_read_mode_info(twd, pbi, mi_row, mi_col, x_mis, y_mis); if (mi->skip) { dec_reset_skip_context(xd); @@ -869,22 +794,27 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, int plane; for (plane = 0; plane < MAX_MB_PLANE; ++plane) { const struct macroblockd_plane *const pd = &xd->plane[plane]; - const TX_SIZE tx_size = - plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) - : mi->tx_size; + const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; const int num_4x4_w = pd->n4_w; const int num_4x4_h = pd->n4_h; const int step = (1 << tx_size); int row, col; - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? - 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? - 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + const int max_blocks_wide = + num_4x4_w + (xd->mb_to_right_edge >= 0 + ? 0 + : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); + const int max_blocks_high = + num_4x4_h + (xd->mb_to_bottom_edge >= 0 + ? 0 + : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + + xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide; + xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high; for (row = 0; row < max_blocks_high; row += step) for (col = 0; col < max_blocks_wide; col += step) - predict_and_reconstruct_intra_block(xd, r, mi, plane, - row, col, tx_size); + predict_and_reconstruct_intra_block(twd, mi, plane, row, col, + tx_size); } } else { // Prediction @@ -897,26 +827,30 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, for (plane = 0; plane < MAX_MB_PLANE; ++plane) { const struct macroblockd_plane *const pd = &xd->plane[plane]; - const TX_SIZE tx_size = - plane ? dec_get_uv_tx_size(mi, pd->n4_wl, pd->n4_hl) - : mi->tx_size; + const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; const int num_4x4_w = pd->n4_w; const int num_4x4_h = pd->n4_h; const int step = (1 << tx_size); int row, col; - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? - 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? - 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + const int max_blocks_wide = + num_4x4_w + (xd->mb_to_right_edge >= 0 + ? 0 + : xd->mb_to_right_edge >> (5 + pd->subsampling_x)); + const int max_blocks_high = + num_4x4_h + + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> + (5 + pd->subsampling_y)); + + xd->max_blocks_wide = xd->mb_to_right_edge >= 0 ? 0 : max_blocks_wide; + xd->max_blocks_high = xd->mb_to_bottom_edge >= 0 ? 0 : max_blocks_high; for (row = 0; row < max_blocks_high; row += step) for (col = 0; col < max_blocks_wide; col += step) - eobtotal += reconstruct_inter_block(xd, r, mi, plane, row, col, - tx_size); + eobtotal += + reconstruct_inter_block(twd, mi, plane, row, col, tx_size); } - if (!less8x8 && eobtotal == 0) - mi->skip = 1; // skip loopfilter + if (!less8x8 && eobtotal == 0) mi->skip = 1; // skip loopfilter } } @@ -927,24 +861,24 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, } } -static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd, - int mi_row, int mi_col, - int bsl) { - const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col; - const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK); - int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1; +static INLINE int dec_partition_plane_context(TileWorkerData *twd, int mi_row, + int mi_col, int bsl) { + const PARTITION_CONTEXT *above_ctx = twd->xd.above_seg_context + mi_col; + const PARTITION_CONTEXT *left_ctx = + twd->xd.left_seg_context + (mi_row & MI_MASK); + int above = (*above_ctx >> bsl) & 1, left = (*left_ctx >> bsl) & 1; -// assert(bsl >= 0); + // assert(bsl >= 0); return (left * 2 + above) + bsl * PARTITION_PLOFFSET; } -static INLINE void dec_update_partition_context(MACROBLOCKD *xd, - int mi_row, int mi_col, - BLOCK_SIZE subsize, +static INLINE void dec_update_partition_context(TileWorkerData *twd, int mi_row, + int mi_col, BLOCK_SIZE subsize, int bw) { - PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col; - PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK); + PARTITION_CONTEXT *const above_ctx = twd->xd.above_seg_context + mi_col; + PARTITION_CONTEXT *const left_ctx = + twd->xd.left_seg_context + (mi_row & MI_MASK); // update the partition context at the end notes. set partition bits // of block sizes larger than the current one to be one, and partition @@ -953,13 +887,14 @@ static INLINE void dec_update_partition_context(MACROBLOCKD *xd, memset(left_ctx, partition_context_lookup[subsize].left, bw); } -static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col, - vpx_reader *r, - int has_rows, int has_cols, int bsl) { - const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl); - const vpx_prob *const probs = get_partition_probs(xd, ctx); - FRAME_COUNTS *counts = xd->counts; +static PARTITION_TYPE read_partition(TileWorkerData *twd, int mi_row, + int mi_col, int has_rows, int has_cols, + int bsl) { + const int ctx = dec_partition_plane_context(twd, mi_row, mi_col, bsl); + const vpx_prob *const probs = twd->xd.partition_probs[ctx]; + FRAME_COUNTS *counts = twd->xd.counts; PARTITION_TYPE p; + vpx_reader *r = &twd->bit_reader; if (has_rows && has_cols) p = (PARTITION_TYPE)vpx_read_tree(r, vp9_partition_tree, probs); @@ -970,16 +905,15 @@ static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col, else p = PARTITION_SPLIT; - if (counts) - ++counts->partition[ctx][p]; + if (counts) ++counts->partition[ctx][p]; return p; } // TODO(slavarnway): eliminate bsize and subsize in future commits -static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd, - int mi_row, int mi_col, - vpx_reader* r, BLOCK_SIZE bsize, int n4x4_l2) { +static void decode_partition(TileWorkerData *twd, VP9Decoder *const pbi, + int mi_row, int mi_col, BLOCK_SIZE bsize, + int n4x4_l2) { VP9_COMMON *const cm = &pbi->common; const int n8x8_l2 = n4x4_l2 - 1; const int num_8x8_wh = 1 << n8x8_l2; @@ -988,59 +922,55 @@ static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd, BLOCK_SIZE subsize; const int has_rows = (mi_row + hbs) < cm->mi_rows; const int has_cols = (mi_col + hbs) < cm->mi_cols; + MACROBLOCKD *const xd = &twd->xd; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - partition = read_partition(xd, mi_row, mi_col, r, has_rows, has_cols, - n8x8_l2); + partition = read_partition(twd, mi_row, mi_col, has_rows, has_cols, n8x8_l2); subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition); if (!hbs) { // calculate bmode block dimensions (log 2) xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT); xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ); - decode_block(pbi, xd, mi_row, mi_col, r, subsize, 1, 1); + decode_block(twd, pbi, mi_row, mi_col, subsize, 1, 1); } else { switch (partition) { case PARTITION_NONE: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n4x4_l2); + decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n4x4_l2); break; case PARTITION_HORZ: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n8x8_l2); + decode_block(twd, pbi, mi_row, mi_col, subsize, n4x4_l2, n8x8_l2); if (has_rows) - decode_block(pbi, xd, mi_row + hbs, mi_col, r, subsize, n4x4_l2, + decode_block(twd, pbi, mi_row + hbs, mi_col, subsize, n4x4_l2, n8x8_l2); break; case PARTITION_VERT: - decode_block(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2, n4x4_l2); + decode_block(twd, pbi, mi_row, mi_col, subsize, n8x8_l2, n4x4_l2); if (has_cols) - decode_block(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2, + decode_block(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2, n4x4_l2); break; case PARTITION_SPLIT: - decode_partition(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row + hbs, mi_col, r, subsize, n8x8_l2); - decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize, + decode_partition(twd, pbi, mi_row, mi_col, subsize, n8x8_l2); + decode_partition(twd, pbi, mi_row, mi_col + hbs, subsize, n8x8_l2); + decode_partition(twd, pbi, mi_row + hbs, mi_col, subsize, n8x8_l2); + decode_partition(twd, pbi, mi_row + hbs, mi_col + hbs, subsize, n8x8_l2); break; - default: - assert(0 && "Invalid partition type"); + default: assert(0 && "Invalid partition type"); } } // update partition context if (bsize >= BLOCK_8X8 && (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) - dec_update_partition_context(xd, mi_row, mi_col, subsize, num_8x8_wh); + dec_update_partition_context(twd, mi_row, mi_col, subsize, num_8x8_wh); } -static void setup_token_decoder(const uint8_t *data, - const uint8_t *data_end, +static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end, size_t read_size, struct vpx_internal_error_info *error_info, - vpx_reader *r, - vpx_decrypt_cb decrypt_cb, + vpx_reader *r, vpx_decrypt_cb decrypt_cb, void *decrypt_state) { // Validate the calculated partition length. If the buffer // described by the partition can't be fully read, then restrict @@ -1067,12 +997,11 @@ static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs, vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]); } -static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, - vpx_reader *r) { - const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; - TX_SIZE tx_size; - for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) - read_coef_probs_common(fc->coef_probs[tx_size], r); +static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, vpx_reader *r) { + const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; + TX_SIZE tx_size; + for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) + read_coef_probs_common(fc->coef_probs[tx_size], r); } static void setup_segmentation(struct segmentation *seg, @@ -1083,24 +1012,22 @@ static void setup_segmentation(struct segmentation *seg, seg->update_data = 0; seg->enabled = vpx_rb_read_bit(rb); - if (!seg->enabled) - return; + if (!seg->enabled) return; // Segmentation map update seg->update_map = vpx_rb_read_bit(rb); if (seg->update_map) { for (i = 0; i < SEG_TREE_PROBS; i++) - seg->tree_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) - : MAX_PROB; + seg->tree_probs[i] = + vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB; seg->temporal_update = vpx_rb_read_bit(rb); if (seg->temporal_update) { for (i = 0; i < PREDICTION_PROBS; i++) - seg->pred_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) - : MAX_PROB; + seg->pred_probs[i] = + vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8) : MAX_PROB; } else { - for (i = 0; i < PREDICTION_PROBS; i++) - seg->pred_probs[i] = MAX_PROB; + for (i = 0; i < PREDICTION_PROBS; i++) seg->pred_probs[i] = MAX_PROB; } } @@ -1164,10 +1091,8 @@ static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd, cm->uv_dc_delta_q = read_delta_q(rb); cm->uv_ac_delta_q = read_delta_q(rb); cm->dequant_bit_depth = cm->bit_depth; - xd->lossless = cm->base_qindex == 0 && - cm->y_dc_delta_q == 0 && - cm->uv_dc_delta_q == 0 && - cm->uv_ac_delta_q == 0; + xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 && + cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; #if CONFIG_VP9_HIGHBITDEPTH xd->bd = (int)cm->bit_depth; @@ -1180,13 +1105,13 @@ static void setup_segmentation_dequant(VP9_COMMON *const cm) { int i; for (i = 0; i < MAX_SEGMENTS; ++i) { const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex); - cm->y_dequant[i][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, - cm->bit_depth); + cm->y_dequant[i][0] = + vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[i][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q, - cm->bit_depth); - cm->uv_dequant[i][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q, - cm->bit_depth); + cm->uv_dequant[i][0] = + vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); + cm->uv_dequant[i][1] = + vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); } } else { const int qindex = cm->base_qindex; @@ -1194,18 +1119,16 @@ static void setup_segmentation_dequant(VP9_COMMON *const cm) { // remaining are don't cares. cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[0][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q, - cm->bit_depth); - cm->uv_dequant[0][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q, - cm->bit_depth); + cm->uv_dequant[0][0] = + vp9_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); + cm->uv_dequant[0][1] = + vp9_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); } } static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) { - const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, - EIGHTTAP, - EIGHTTAP_SHARP, - BILINEAR }; + const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH, EIGHTTAP, + EIGHTTAP_SHARP, BILINEAR }; return vpx_rb_read_bit(rb) ? SWITCHABLE : literal_to_filter[vpx_rb_read_literal(rb, 2)]; } @@ -1221,8 +1144,9 @@ static void resize_mv_buffer(VP9_COMMON *cm) { vpx_free(cm->cur_frame->mvs); cm->cur_frame->mi_rows = cm->mi_rows; cm->cur_frame->mi_cols = cm->mi_cols; - cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, - sizeof(*cm->cur_frame->mvs)); + CHECK_MEM_ERROR(cm, cm->cur_frame->mvs, + (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, + sizeof(*cm->cur_frame->mvs))); } static void resize_context_buffers(VP9_COMMON *cm, int width, int height) { @@ -1236,7 +1160,7 @@ static void resize_context_buffers(VP9_COMMON *cm, int width, int height) { const int new_mi_rows = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2; const int new_mi_cols = - ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; + ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2; // Allocations in vp9_alloc_context_buffers() depend on individual // dimensions as well as the overall size. @@ -1266,13 +1190,12 @@ static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { lock_buffer_pool(pool); if (vpx_realloc_frame_buffer( - get_frame_new_buffer(cm), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, + get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x, + cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, #endif - VP9_DEC_BORDER_IN_PIXELS, - cm->byte_alignment, + VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment, &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, pool->cb_priv)) { unlock_buffer_pool(pool); @@ -1286,7 +1209,7 @@ static void setup_frame_size(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; - pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; + pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; } @@ -1306,16 +1229,20 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm, BufferPool *const pool = cm->buffer_pool; for (i = 0; i < REFS_PER_FRAME; ++i) { if (vpx_rb_read_bit(rb)) { - YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; - width = buf->y_crop_width; - height = buf->y_crop_height; - found = 1; - break; + if (cm->frame_refs[i].idx != INVALID_IDX) { + YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf; + width = buf->y_crop_width; + height = buf->y_crop_height; + found = 1; + break; + } else { + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, + "Failed to decode frame size"); + } } } - if (!found) - vp9_read_frame_size(rb, &width, &height); + if (!found) vp9_read_frame_size(rb, &width, &height); if (width <= 0 || height <= 0) vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, @@ -1325,22 +1252,21 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm, // has valid dimensions. for (i = 0; i < REFS_PER_FRAME; ++i) { RefBuffer *const ref_frame = &cm->frame_refs[i]; - has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width, - ref_frame->buf->y_crop_height, - width, height); + has_valid_ref_frame |= + (ref_frame->idx != INVALID_IDX && + valid_ref_frame_size(ref_frame->buf->y_crop_width, + ref_frame->buf->y_crop_height, width, height)); } if (!has_valid_ref_frame) vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Referenced frame has invalid size"); for (i = 0; i < REFS_PER_FRAME; ++i) { RefBuffer *const ref_frame = &cm->frame_refs[i]; - if (!valid_ref_frame_img_fmt( - ref_frame->buf->bit_depth, - ref_frame->buf->subsampling_x, - ref_frame->buf->subsampling_y, - cm->bit_depth, - cm->subsampling_x, - cm->subsampling_y)) + if (ref_frame->idx == INVALID_IDX || + !valid_ref_frame_img_fmt(ref_frame->buf->bit_depth, + ref_frame->buf->subsampling_x, + ref_frame->buf->subsampling_y, cm->bit_depth, + cm->subsampling_x, cm->subsampling_y)) vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Referenced frame has incompatible color format"); } @@ -1350,13 +1276,12 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm, lock_buffer_pool(pool); if (vpx_realloc_frame_buffer( - get_frame_new_buffer(cm), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, + get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x, + cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, #endif - VP9_DEC_BORDER_IN_PIXELS, - cm->byte_alignment, + VP9_DEC_BORDER_IN_PIXELS, cm->byte_alignment, &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb, pool->cb_priv)) { unlock_buffer_pool(pool); @@ -1370,7 +1295,7 @@ static void setup_frame_size_with_refs(VP9_COMMON *cm, pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth; pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space; pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range; - pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; + pool->frame_bufs[cm->new_fb_idx].buf.render_width = cm->render_width; pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height; } @@ -1381,8 +1306,7 @@ static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { // columns max_ones = max_log2_tile_cols - min_log2_tile_cols; cm->log2_tile_cols = min_log2_tile_cols; - while (max_ones-- && vpx_rb_read_bit(rb)) - cm->log2_tile_cols++; + while (max_ones-- && vpx_rb_read_bit(rb)) cm->log2_tile_cols++; if (cm->log2_tile_cols > 6) vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, @@ -1390,18 +1314,15 @@ static void setup_tile_info(VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { // rows cm->log2_tile_rows = vpx_rb_read_bit(rb); - if (cm->log2_tile_rows) - cm->log2_tile_rows += vpx_rb_read_bit(rb); + if (cm->log2_tile_rows) cm->log2_tile_rows += vpx_rb_read_bit(rb); } // Reads the next tile returning its size and adjusting '*data' accordingly // based on 'is_last'. -static void get_tile_buffer(const uint8_t *const data_end, - int is_last, +static void get_tile_buffer(const uint8_t *const data_end, int is_last, struct vpx_internal_error_info *error_info, - const uint8_t **data, - vpx_decrypt_cb decrypt_cb, void *decrypt_state, - TileBuffer *buf) { + const uint8_t **data, vpx_decrypt_cb decrypt_cb, + void *decrypt_state, TileBuffer *buf) { size_t size; if (!is_last) { @@ -1431,9 +1352,9 @@ static void get_tile_buffer(const uint8_t *const data_end, *data += size; } -static void get_tile_buffers(VP9Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - int tile_cols, int tile_rows, +static void get_tile_buffers(VP9Decoder *pbi, const uint8_t *data, + const uint8_t *data_end, int tile_cols, + int tile_rows, TileBuffer (*tile_buffers)[1 << 6]) { int r, c; @@ -1448,8 +1369,7 @@ static void get_tile_buffers(VP9Decoder *pbi, } } -static const uint8_t *decode_tiles(VP9Decoder *pbi, - const uint8_t *data, +static const uint8_t *decode_tiles(VP9Decoder *pbi, const uint8_t *data, const uint8_t *data_end) { VP9_COMMON *const cm = &pbi->common; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); @@ -1459,7 +1379,7 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, TileBuffer tile_buffers[4][1 << 6]; int tile_row, tile_col; int mi_row, mi_col; - TileData *tile_data = NULL; + TileWorkerData *tile_data = NULL; if (cm->lf.filter_level && !cm->skip_loop_filter && pbi->lf_worker.data1 == NULL) { @@ -1473,7 +1393,7 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, } if (cm->lf.filter_level && !cm->skip_loop_filter) { - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; + LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; // Be sure to sync as we might be resuming after a failed frame decode. winterface->sync(&pbi->lf_worker); vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm, @@ -1495,28 +1415,17 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers); - if (pbi->tile_data == NULL || - (tile_cols * tile_rows) != pbi->total_tiles) { - vpx_free(pbi->tile_data); - CHECK_MEM_ERROR( - cm, - pbi->tile_data, - vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data)))); - pbi->total_tiles = tile_rows * tile_cols; - } - // Load all tile information into tile_data. for (tile_row = 0; tile_row < tile_rows; ++tile_row) { for (tile_col = 0; tile_col < tile_cols; ++tile_col) { const TileBuffer *const buf = &tile_buffers[tile_row][tile_col]; - tile_data = pbi->tile_data + tile_cols * tile_row + tile_col; - tile_data->cm = cm; + tile_data = pbi->tile_worker_data + tile_cols * tile_row + tile_col; tile_data->xd = pbi->mb; tile_data->xd.corrupted = 0; - tile_data->xd.counts = cm->frame_parallel_decoding_mode ? - NULL : &cm->counts; + tile_data->xd.counts = + cm->frame_parallel_decoding_mode ? NULL : &cm->counts; vp9_zero(tile_data->dqcoeff); - vp9_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col); + vp9_tile_init(&tile_data->xd.tile, cm, tile_row, tile_col); setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &tile_data->bit_reader, pbi->decrypt_cb, pbi->decrypt_state); @@ -1530,26 +1439,25 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end; mi_row += MI_BLOCK_SIZE) { for (tile_col = 0; tile_col < tile_cols; ++tile_col) { - const int col = pbi->inv_tile_order ? - tile_cols - tile_col - 1 : tile_col; - tile_data = pbi->tile_data + tile_cols * tile_row + col; - vp9_tile_set_col(&tile, tile_data->cm, col); + const int col = + pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col; + tile_data = pbi->tile_worker_data + tile_cols * tile_row + col; + vp9_tile_set_col(&tile, cm, col); vp9_zero(tile_data->xd.left_context); vp9_zero(tile_data->xd.left_seg_context); for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; mi_col += MI_BLOCK_SIZE) { - decode_partition(pbi, &tile_data->xd, mi_row, - mi_col, &tile_data->bit_reader, BLOCK_64X64, 4); + decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4); } pbi->mb.corrupted |= tile_data->xd.corrupted; if (pbi->mb.corrupted) - vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, - "Failed to decode tile data"); + vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, + "Failed to decode tile data"); } // Loopfilter one row. if (cm->lf.filter_level && !cm->skip_loop_filter) { const int lf_start = mi_row - MI_BLOCK_SIZE; - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; + LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; // delay the loopfilter by 1 macroblock row. if (lf_start < 0) continue; @@ -1570,14 +1478,13 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, // still be changed by the longest loopfilter of the next superblock // row. if (pbi->frame_parallel_decode) - vp9_frameworker_broadcast(pbi->cur_buf, - mi_row << MI_BLOCK_SIZE_LOG2); + vp9_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2); } } // Loopfilter remaining rows in the frame. if (cm->lf.filter_level && !cm->skip_loop_filter) { - LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1; + LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1; winterface->sync(&pbi->lf_worker); lf_data->start = lf_data->stop; lf_data->stop = cm->mi_rows; @@ -1585,7 +1492,7 @@ static const uint8_t *decode_tiles(VP9Decoder *pbi, } // Get last tile data. - tile_data = pbi->tile_data + tile_cols * tile_rows - 1; + tile_data = pbi->tile_worker_data + tile_cols * tile_rows - 1; if (pbi->frame_parallel_decode) vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX); @@ -1629,8 +1536,7 @@ static int tile_worker_hook(TileWorkerData *const tile_data, vp9_zero(tile_data->xd.left_seg_context); for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; mi_col += MI_BLOCK_SIZE) { - decode_partition(pbi, &tile_data->xd, mi_row, mi_col, - &tile_data->bit_reader, BLOCK_64X64, 4); + decode_partition(tile_data, pbi, mi_row, mi_col, BLOCK_64X64, 4); } } @@ -1645,13 +1551,12 @@ static int tile_worker_hook(TileWorkerData *const tile_data, // sorts in descending order static int compare_tile_buffers(const void *a, const void *b) { - const TileBuffer *const buf1 = (const TileBuffer*)a; - const TileBuffer *const buf2 = (const TileBuffer*)b; + const TileBuffer *const buf1 = (const TileBuffer *)a; + const TileBuffer *const buf2 = (const TileBuffer *)b; return (int)(buf2->size - buf1->size); } -static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, - const uint8_t *data, +static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const uint8_t *data, const uint8_t *data_end) { VP9_COMMON *const cm = &pbi->common; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); @@ -1670,12 +1575,6 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const int num_threads = pbi->max_threads; CHECK_MEM_ERROR(cm, pbi->tile_workers, vpx_malloc(num_threads * sizeof(*pbi->tile_workers))); - // Ensure tile data offsets will be properly aligned. This may fail on - // platforms without DECLARE_ALIGNED(). - assert((sizeof(*pbi->tile_worker_data) % 16) == 0); - CHECK_MEM_ERROR(cm, pbi->tile_worker_data, - vpx_memalign(32, num_threads * - sizeof(*pbi->tile_worker_data))); for (n = 0; n < num_threads; ++n) { VPxWorker *const worker = &pbi->tile_workers[n]; ++pbi->num_tile_workers; @@ -1691,7 +1590,8 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, // Reset tile decoding hook for (n = 0; n < num_workers; ++n) { VPxWorker *const worker = &pbi->tile_workers[n]; - TileWorkerData *const tile_data = &pbi->tile_worker_data[n]; + TileWorkerData *const tile_data = + &pbi->tile_worker_data[n + pbi->total_tiles]; winterface->sync(worker); tile_data->xd = pbi->mb; tile_data->xd.counts = @@ -1746,7 +1646,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, if (!cm->frame_parallel_decoding_mode) { for (n = 0; n < num_workers; ++n) { TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[n].data1; + (TileWorkerData *)pbi->tile_workers[n].data1; vp9_zero(tile_data->counts); } } @@ -1759,7 +1659,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, for (n = 0; n < num_workers; ++n) { const int count = base + (remain + n) / num_workers; VPxWorker *const worker = &pbi->tile_workers[n]; - TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; + TileWorkerData *const tile_data = (TileWorkerData *)worker->data1; tile_data->buf_start = buf_start; tile_data->buf_end = buf_start + count - 1; @@ -1777,7 +1677,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, for (; n > 0; --n) { VPxWorker *const worker = &pbi->tile_workers[n - 1]; - TileWorkerData *const tile_data = (TileWorkerData*)worker->data1; + TileWorkerData *const tile_data = (TileWorkerData *)worker->data1; // TODO(jzern): The tile may have specific error data associated with // its vpx_internal_error_info which could be propagated to the main info // in cm. Additionally once the threads have been synced and an error is @@ -1791,7 +1691,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, if (!cm->frame_parallel_decoding_mode) { for (n = 0; n < num_workers; ++n) { TileWorkerData *const tile_data = - (TileWorkerData*)pbi->tile_workers[n].data1; + (TileWorkerData *)pbi->tile_workers[n].data1; vp9_accumulate_frame_counts(&cm->counts, &tile_data->counts, 1); } } @@ -1805,8 +1705,8 @@ static void error_handler(void *data) { vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet"); } -static void read_bitdepth_colorspace_sampling( - VP9_COMMON *cm, struct vpx_read_bit_buffer *rb) { +static void read_bitdepth_colorspace_sampling(VP9_COMMON *cm, + struct vpx_read_bit_buffer *rb) { if (cm->profile >= PROFILE_2) { cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10; #if CONFIG_VP9_HIGHBITDEPTH @@ -1861,8 +1761,8 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, cm->last_intra_only = cm->intra_only; if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER) - vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid frame marker"); + vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, + "Invalid frame marker"); cm->profile = vp9_read_profile(rb); #if CONFIG_VP9_HIGHBITDEPTH @@ -1900,7 +1800,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, return 0; } - cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb); + cm->frame_type = (FRAME_TYPE)vpx_rb_read_bit(rb); cm->show_frame = vpx_rb_read_bit(rb); cm->error_resilient_mode = vpx_rb_read_bit(rb); @@ -1925,8 +1825,8 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, } else { cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb); - cm->reset_frame_context = cm->error_resilient_mode ? - 0 : vpx_rb_read_literal(rb, 2); + cm->reset_frame_context = + cm->error_resilient_mode ? 0 : vpx_rb_read_literal(rb, 2); if (cm->intra_only) { if (!vp9_read_sync_code(rb)) @@ -1954,7 +1854,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); pbi->need_resync = 0; } - } else if (pbi->need_resync != 1) { /* Skip if need resync */ + } else if (pbi->need_resync != 1) { /* Skip if need resync */ pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES); for (i = 0; i < REFS_PER_FRAME; ++i) { const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2); @@ -1973,16 +1873,14 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, for (i = 0; i < REFS_PER_FRAME; ++i) { RefBuffer *const ref_buf = &cm->frame_refs[i]; #if CONFIG_VP9_HIGHBITDEPTH - vp9_setup_scale_factors_for_frame(&ref_buf->sf, - ref_buf->buf->y_crop_width, - ref_buf->buf->y_crop_height, - cm->width, cm->height, - cm->use_highbitdepth); + vp9_setup_scale_factors_for_frame( + &ref_buf->sf, ref_buf->buf->y_crop_width, + ref_buf->buf->y_crop_height, cm->width, cm->height, + cm->use_highbitdepth); #else - vp9_setup_scale_factors_for_frame(&ref_buf->sf, - ref_buf->buf->y_crop_width, - ref_buf->buf->y_crop_height, - cm->width, cm->height); + vp9_setup_scale_factors_for_frame( + &ref_buf->sf, ref_buf->buf->y_crop_width, + ref_buf->buf->y_crop_height, cm->width, cm->height); #endif } } @@ -1992,7 +1890,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, #endif get_frame_new_buffer(cm)->color_space = cm->color_space; get_frame_new_buffer(cm)->color_range = cm->color_range; - get_frame_new_buffer(cm)->render_width = cm->render_width; + get_frame_new_buffer(cm)->render_width = cm->render_width; get_frame_new_buffer(cm)->render_height = cm->render_height; if (pbi->need_resync) { @@ -2004,8 +1902,7 @@ static size_t read_uncompressed_header(VP9Decoder *pbi, if (!cm->error_resilient_mode) { cm->refresh_frame_context = vpx_rb_read_bit(rb); cm->frame_parallel_decoding_mode = vpx_rb_read_bit(rb); - if (!cm->frame_parallel_decoding_mode) - vp9_zero(cm->counts); + if (!cm->frame_parallel_decoding_mode) vp9_zero(cm->counts); } else { cm->refresh_frame_context = 0; cm->frame_parallel_decoding_mode = 1; @@ -2071,8 +1968,7 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data, "Failed to allocate bool decoder 0"); cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r); - if (cm->tx_mode == TX_MODE_SELECT) - read_tx_mode_probs(&fc->tx_probs, &r); + if (cm->tx_mode == TX_MODE_SELECT) read_tx_mode_probs(&fc->tx_probs, &r); read_coef_probs(fc, cm->tx_mode, &r); for (k = 0; k < SKIP_CONTEXTS; ++k) @@ -2084,8 +1980,7 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data, read_inter_mode_probs(fc, &r); - if (cm->interp_filter == SWITCHABLE) - read_switchable_interp_probs(fc, &r); + if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r); for (i = 0; i < INTRA_INTER_CONTEXTS; i++) vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]); @@ -2110,11 +2005,8 @@ static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data, } static struct vpx_read_bit_buffer *init_read_bit_buffer( - VP9Decoder *pbi, - struct vpx_read_bit_buffer *rb, - const uint8_t *data, - const uint8_t *data_end, - uint8_t clear_data[MAX_VP9_HEADER_SIZE]) { + VP9Decoder *pbi, struct vpx_read_bit_buffer *rb, const uint8_t *data, + const uint8_t *data_end, uint8_t clear_data[MAX_VP9_HEADER_SIZE]) { rb->bit_offset = 0; rb->error_handler = error_handler; rb->error_handler_data = &pbi->common; @@ -2138,8 +2030,8 @@ int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb) { vpx_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2; } -void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, - int *width, int *height) { +void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width, + int *height) { *width = vpx_rb_read_literal(rb, 16) + 1; *height = vpx_rb_read_literal(rb, 16) + 1; } @@ -2147,21 +2039,19 @@ void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb) { int profile = vpx_rb_read_bit(rb); profile |= vpx_rb_read_bit(rb) << 1; - if (profile > 2) - profile += vpx_rb_read_bit(rb); - return (BITSTREAM_PROFILE) profile; + if (profile > 2) profile += vpx_rb_read_bit(rb); + return (BITSTREAM_PROFILE)profile; } -void vp9_decode_frame(VP9Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - const uint8_t **p_data_end) { +void vp9_decode_frame(VP9Decoder *pbi, const uint8_t *data, + const uint8_t *data_end, const uint8_t **p_data_end) { VP9_COMMON *const cm = &pbi->common; MACROBLOCKD *const xd = &pbi->mb; struct vpx_read_bit_buffer rb; int context_updated = 0; uint8_t clear_data[MAX_VP9_HEADER_SIZE]; - const size_t first_partition_size = read_uncompressed_header(pbi, - init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); + const size_t first_partition_size = read_uncompressed_header( + pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data)); const int tile_rows = 1 << cm->log2_tile_rows; const int tile_cols = 1 << cm->log2_tile_cols; YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm); @@ -2178,12 +2068,10 @@ void vp9_decode_frame(VP9Decoder *pbi, vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet or corrupt header length"); - cm->use_prev_frame_mvs = !cm->error_resilient_mode && - cm->width == cm->last_width && - cm->height == cm->last_height && - !cm->last_intra_only && - cm->last_show_frame && - (cm->last_frame_type != KEY_FRAME); + cm->use_prev_frame_mvs = + !cm->error_resilient_mode && cm->width == cm->last_width && + cm->height == cm->last_height && !cm->last_intra_only && + cm->last_show_frame && (cm->last_frame_type != KEY_FRAME); vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); @@ -2220,6 +2108,19 @@ void vp9_decode_frame(VP9Decoder *pbi, vp9_frameworker_unlock_stats(worker); } + if (pbi->tile_worker_data == NULL || + (tile_cols * tile_rows) != pbi->total_tiles) { + const int num_tile_workers = + tile_cols * tile_rows + ((pbi->max_threads > 1) ? pbi->max_threads : 0); + const size_t twd_size = num_tile_workers * sizeof(*pbi->tile_worker_data); + // Ensure tile data offsets will be properly aligned. This may fail on + // platforms without DECLARE_ALIGNED(). + assert((sizeof(*pbi->tile_worker_data) % 16) == 0); + vpx_free(pbi->tile_worker_data); + CHECK_MEM_ERROR(cm, pbi->tile_worker_data, vpx_memalign(32, twd_size)); + pbi->total_tiles = tile_rows * tile_cols; + } + if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) { // Multi-threaded tile decoder *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end); @@ -2227,9 +2128,9 @@ void vp9_decode_frame(VP9Decoder *pbi, if (!cm->skip_loop_filter) { // If multiple threads are used to decode tiles, then we use those // threads to do parallel loopfiltering. - vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, - cm->lf.filter_level, 0, 0, pbi->tile_workers, - pbi->num_tile_workers, &pbi->lf_row_sync); + vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level, + 0, 0, pbi->tile_workers, pbi->num_tile_workers, + &pbi->lf_row_sync); } } else { vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, diff --git a/libs/libvpx/vp9/decoder/vp9_decodeframe.h b/libs/libvpx/vp9/decoder/vp9_decodeframe.h index ce33cbdbd9..44717f546a 100644 --- a/libs/libvpx/vp9/decoder/vp9_decodeframe.h +++ b/libs/libvpx/vp9/decoder/vp9_decodeframe.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_DECODER_VP9_DECODEFRAME_H_ #define VP9_DECODER_VP9_DECODEFRAME_H_ @@ -22,13 +21,12 @@ struct VP9Decoder; struct vpx_read_bit_buffer; int vp9_read_sync_code(struct vpx_read_bit_buffer *const rb); -void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, - int *width, int *height); +void vp9_read_frame_size(struct vpx_read_bit_buffer *rb, int *width, + int *height); BITSTREAM_PROFILE vp9_read_profile(struct vpx_read_bit_buffer *rb); -void vp9_decode_frame(struct VP9Decoder *pbi, - const uint8_t *data, const uint8_t *data_end, - const uint8_t **p_data_end); +void vp9_decode_frame(struct VP9Decoder *pbi, const uint8_t *data, + const uint8_t *data_end, const uint8_t **p_data_end); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/decoder/vp9_decodemv.c b/libs/libvpx/vp9/decoder/vp9_decodemv.c index 86044207c2..4372ba0371 100644 --- a/libs/libvpx/vp9/decoder/vp9_decodemv.c +++ b/libs/libvpx/vp9/decoder/vp9_decodemv.c @@ -33,29 +33,26 @@ static PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, MACROBLOCKD *xd, const PREDICTION_MODE y_mode = read_intra_mode(r, cm->fc->y_mode_prob[size_group]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->y_mode[size_group][y_mode]; + if (counts) ++counts->y_mode[size_group][y_mode]; return y_mode; } static PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, MACROBLOCKD *xd, vpx_reader *r, PREDICTION_MODE y_mode) { - const PREDICTION_MODE uv_mode = read_intra_mode(r, - cm->fc->uv_mode_prob[y_mode]); + const PREDICTION_MODE uv_mode = + read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->uv_mode[y_mode][uv_mode]; + if (counts) ++counts->uv_mode[y_mode][uv_mode]; return uv_mode; } static PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, MACROBLOCKD *xd, vpx_reader *r, int ctx) { - const int mode = vpx_read_tree(r, vp9_inter_mode_tree, - cm->fc->inter_mode_probs[ctx]); + const int mode = + vpx_read_tree(r, vp9_inter_mode_tree, cm->fc->inter_mode_probs[ctx]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->inter_mode[ctx][mode]; + if (counts) ++counts->inter_mode[ctx][mode]; return NEARESTMV + mode; } @@ -76,8 +73,7 @@ static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, tx_size += vpx_read(r, tx_probs[2]); } - if (counts) - ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size]; + if (counts) ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size]; return (TX_SIZE)tx_size; } @@ -105,8 +101,8 @@ static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids, return segment_id; } -static void set_segment_id(VP9_COMMON *cm, int mi_offset, - int x_mis, int y_mis, int segment_id) { +static void set_segment_id(VP9_COMMON *cm, int mi_offset, int x_mis, int y_mis, + int segment_id) { int x, y; assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); @@ -117,25 +113,24 @@ static void set_segment_id(VP9_COMMON *cm, int mi_offset, } static void copy_segment_id(const VP9_COMMON *cm, - const uint8_t *last_segment_ids, - uint8_t *current_segment_ids, - int mi_offset, int x_mis, int y_mis) { + const uint8_t *last_segment_ids, + uint8_t *current_segment_ids, int mi_offset, + int x_mis, int y_mis) { int x, y; for (y = 0; y < y_mis; y++) for (x = 0; x < x_mis; x++) - current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ? - last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0; + current_segment_ids[mi_offset + y * cm->mi_cols + x] = + last_segment_ids ? last_segment_ids[mi_offset + y * cm->mi_cols + x] + : 0; } -static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, - int x_mis, int y_mis, - vpx_reader *r) { +static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, int x_mis, + int y_mis, vpx_reader *r) { struct segmentation *const seg = &cm->seg; int segment_id; - if (!seg->enabled) - return 0; // Default for disabled segmentation + if (!seg->enabled) return 0; // Default for disabled segmentation if (!seg->update_map) { copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map, @@ -149,24 +144,19 @@ static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset, } static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { + int mi_row, int mi_col, vpx_reader *r, + int x_mis, int y_mis) { struct segmentation *const seg = &cm->seg; MODE_INFO *const mi = xd->mi[0]; int predicted_segment_id, segment_id; const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = xd->plane[0].n4_w >> 1; - const int bh = xd->plane[0].n4_h >> 1; - // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); - const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); + if (!seg->enabled) return 0; // Default for disabled segmentation - if (!seg->enabled) - return 0; // Default for disabled segmentation - - predicted_segment_id = cm->last_frame_seg_map ? - dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) : - 0; + predicted_segment_id = cm->last_frame_seg_map + ? dec_get_segment_id(cm, cm->last_frame_seg_map, + mi_offset, x_mis, y_mis) + : 0; if (!seg->update_map) { copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map, @@ -177,8 +167,8 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, if (seg->temporal_update) { const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); mi->seg_id_predicted = vpx_read(r, pred_prob); - segment_id = mi->seg_id_predicted ? predicted_segment_id - : read_segment_id(r, seg); + segment_id = + mi->seg_id_predicted ? predicted_segment_id : read_segment_id(r, seg); } else { segment_id = read_segment_id(r, seg); } @@ -186,35 +176,29 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, return segment_id; } -static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, - int segment_id, vpx_reader *r) { +static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd, int segment_id, + vpx_reader *r) { if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { return 1; } else { const int ctx = vp9_get_skip_context(xd); const int skip = vpx_read(r, cm->fc->skip_probs[ctx]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->skip[ctx][skip]; + if (counts) ++counts->skip[ctx][skip]; return skip; } } static void read_intra_frame_mode_info(VP9_COMMON *const cm, - MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { + MACROBLOCKD *const xd, int mi_row, + int mi_col, vpx_reader *r, int x_mis, + int y_mis) { MODE_INFO *const mi = xd->mi[0]; const MODE_INFO *above_mi = xd->above_mi; - const MODE_INFO *left_mi = xd->left_mi; + const MODE_INFO *left_mi = xd->left_mi; const BLOCK_SIZE bsize = mi->sb_type; int i; const int mi_offset = mi_row * cm->mi_cols + mi_col; - const int bw = xd->plane[0].n4_w >> 1; - const int bh = xd->plane[0].n4_h >> 1; - - // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); - const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); mi->segment_id = read_intra_segment_id(cm, mi_offset, x_mis, y_mis, r); mi->skip = read_skip(cm, xd, mi->segment_id, r); @@ -242,15 +226,14 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm, read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2)); break; default: - mi->mode = read_intra_mode(r, - get_y_mode_probs(mi, above_mi, left_mi, 0)); + mi->mode = read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0)); } mi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mi->mode]); } -static int read_mv_component(vpx_reader *r, - const nmv_component *mvcomp, int usehp) { +static int read_mv_component(vpx_reader *r, const nmv_component *mvcomp, + int usehp) { int mag, d, fr, hp; const int sign = vpx_read(r, mvcomp->sign); const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes); @@ -258,25 +241,23 @@ static int read_mv_component(vpx_reader *r, // Integer part if (class0) { - d = vpx_read_tree(r, vp9_mv_class0_tree, mvcomp->class0); + d = vpx_read(r, mvcomp->class0[0]); mag = 0; } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits d = 0; - for (i = 0; i < n; ++i) - d |= vpx_read(r, mvcomp->bits[i]) << i; + for (i = 0; i < n; ++i) d |= vpx_read(r, mvcomp->bits[i]) << i; mag = CLASS0_SIZE << (mv_class + 2); } // Fractional part - fr = vpx_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d] - : mvcomp->fp); + fr = vpx_read_tree(r, vp9_mv_fp_tree, + class0 ? mvcomp->class0_fp[d] : mvcomp->fp); // High precision part (if hp is not used, the default value of the hp is 1) - hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) - : 1; + hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp) : 1; // Result mag += ((d << 3) | (fr << 1) | hp) + 1; @@ -284,12 +265,12 @@ static int read_mv_component(vpx_reader *r, } static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref, - const nmv_context *ctx, - nmv_context_counts *counts, int allow_hp) { + const nmv_context *ctx, nmv_context_counts *counts, + int allow_hp) { const MV_JOINT_TYPE joint_type = (MV_JOINT_TYPE)vpx_read_tree(r, vp9_mv_joint_tree, ctx->joints); const int use_hp = allow_hp && use_mv_hp(ref); - MV diff = {0, 0}; + MV diff = { 0, 0 }; if (mv_joint_vertical(joint_type)) diff.row = read_mv_component(r, &ctx->comps[0], use_hp); @@ -311,8 +292,7 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm, const REFERENCE_MODE mode = (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->comp_inter[ctx][mode]; + if (counts) ++counts->comp_inter[ctx][mode]; return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE } else { return cm->reference_mode; @@ -321,8 +301,8 @@ static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm, // Read the referncence frame static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, - vpx_reader *r, - int segment_id, MV_REFERENCE_FRAME ref_frame[2]) { + vpx_reader *r, int segment_id, + MV_REFERENCE_FRAME ref_frame[2]) { FRAME_CONTEXT *const fc = cm->fc; FRAME_COUNTS *counts = xd->counts; @@ -337,20 +317,17 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref]; const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd); const int bit = vpx_read(r, fc->comp_ref_prob[ctx]); - if (counts) - ++counts->comp_ref[ctx][bit]; + if (counts) ++counts->comp_ref[ctx][bit]; ref_frame[idx] = cm->comp_fixed_ref; ref_frame[!idx] = cm->comp_var_ref[bit]; } else if (mode == SINGLE_REFERENCE) { const int ctx0 = vp9_get_pred_context_single_ref_p1(xd); const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]); - if (counts) - ++counts->single_ref[ctx0][0][bit0]; + if (counts) ++counts->single_ref[ctx0][0][bit0]; if (bit0) { const int ctx1 = vp9_get_pred_context_single_ref_p2(xd); const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]); - if (counts) - ++counts->single_ref[ctx1][1][bit1]; + if (counts) ++counts->single_ref[ctx1][1][bit1]; ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME; } else { ref_frame[0] = LAST_FRAME; @@ -363,17 +340,14 @@ static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd, } } - -static INLINE INTERP_FILTER read_switchable_interp_filter( - VP9_COMMON *const cm, MACROBLOCKD *const xd, - vpx_reader *r) { - const int ctx = vp9_get_pred_context_switchable_interp(xd); - const INTERP_FILTER type = - (INTERP_FILTER)vpx_read_tree(r, vp9_switchable_interp_tree, - cm->fc->switchable_interp_prob[ctx]); +static INLINE INTERP_FILTER read_switchable_interp_filter(VP9_COMMON *const cm, + MACROBLOCKD *const xd, + vpx_reader *r) { + const int ctx = get_pred_context_switchable_interp(xd); + const INTERP_FILTER type = (INTERP_FILTER)vpx_read_tree( + r, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->switchable_interp[ctx][type]; + if (counts) ++counts->switchable_interp[ctx][type]; return type; } @@ -383,9 +357,6 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm, const BLOCK_SIZE bsize = mi->sb_type; int i; - mi->ref_frame[0] = INTRA_FRAME; - mi->ref_frame[1] = NONE; - switch (bsize) { case BLOCK_4X4: for (i = 0; i < 4; ++i) @@ -393,27 +364,31 @@ static void read_intra_block_mode_info(VP9_COMMON *const cm, mi->mode = mi->bmi[3].as_mode; break; case BLOCK_4X8: - mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, - r, 0); + mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0); mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode = read_intra_mode_y(cm, xd, r, 0); break; case BLOCK_8X4: - mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, - r, 0); + mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0); mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode = read_intra_mode_y(cm, xd, r, 0); break; - default: - mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]); + default: mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]); } mi->uv_mode = read_intra_mode_uv(cm, xd, r, mi->mode); + + // Initialize interp_filter here so we do not have to check for inter block + // modes in get_pred_context_switchable_interp() + mi->interp_filter = SWITCHABLE_FILTERS; + + mi->ref_frame[0] = INTRA_FRAME; + mi->ref_frame[1] = NONE; } static INLINE int is_mv_valid(const MV *mv) { - return mv->row > MV_LOW && mv->row < MV_UPP && - mv->col > MV_LOW && mv->col < MV_UPP; + return mv->row > MV_LOW && mv->row < MV_UPP && mv->col > MV_LOW && + mv->col < MV_UPP; } static INLINE void copy_mv_pair(int_mv *dst, const int_mv *src) { @@ -425,9 +400,8 @@ static INLINE void zero_mv_pair(int_mv *dst) { } static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd, - PREDICTION_MODE mode, - int_mv mv[2], int_mv ref_mv[2], - int_mv near_nearest_mv[2], + PREDICTION_MODE mode, int_mv mv[2], + int_mv ref_mv[2], int_mv near_nearest_mv[2], int is_compound, int allow_hp, vpx_reader *r) { int i; int ret = 1; @@ -452,9 +426,7 @@ static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd, zero_mv_pair(mv); break; } - default: { - return 0; - } + default: { return 0; } } return ret; } @@ -464,23 +436,21 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd, if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME; } else { - const int ctx = vp9_get_intra_inter_context(xd); + const int ctx = get_intra_inter_context(xd); const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]); FRAME_COUNTS *counts = xd->counts; - if (counts) - ++counts->intra_inter[ctx][is_inter]; + if (counts) ++counts->intra_inter[ctx][is_inter]; return is_inter; } } -static void dec_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp, int_mv *mvlist, - int_mv *best_mv, int refmv_count) { +static void dec_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *best_mv, + int refmv_count) { int i; // Make sure all the candidates are properly clamped etc for (i = 0; i < refmv_count; ++i) { lower_mv_precision(&mvlist[i].as_mv, allow_hp); - clamp_mv2(&mvlist[i].as_mv, xd); *best_mv = mvlist[i]; } } @@ -495,35 +465,33 @@ static void fpm_sync(void *const data, int mi_row) { // already in the list. If it's the second motion vector or early_break // it will also skip all additional processing and jump to Done! #define ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done) \ - do { \ - if (refmv_count) { \ - if ((mv).as_int != (mv_ref_list)[0].as_int) { \ - (mv_ref_list)[(refmv_count)] = (mv); \ - refmv_count++; \ - goto Done; \ - } \ - } else { \ - (mv_ref_list)[(refmv_count)++] = (mv); \ - if (early_break) \ - goto Done; \ - } \ + do { \ + if (refmv_count) { \ + if ((mv).as_int != (mv_ref_list)[0].as_int) { \ + (mv_ref_list)[(refmv_count)] = (mv); \ + refmv_count++; \ + goto Done; \ + } \ + } else { \ + (mv_ref_list)[(refmv_count)++] = (mv); \ + if (early_break) goto Done; \ + } \ } while (0) // If either reference frame is different, not INTRA, and they // are different from each other scale and add the mv to our list. -#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \ - refmv_count, mv_ref_list, Done) \ - do { \ - if (is_inter_block(mbmi)) { \ - if ((mbmi)->ref_frame[0] != ref_frame) \ +#define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \ + refmv_count, mv_ref_list, Done) \ + do { \ + if (is_inter_block(mbmi)) { \ + if ((mbmi)->ref_frame[0] != ref_frame) \ ADD_MV_REF_LIST_EB(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, Done); \ - if (has_second_ref(mbmi) && \ - (mbmi)->ref_frame[1] != ref_frame && \ - (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \ + refmv_count, mv_ref_list, Done); \ + if (has_second_ref(mbmi) && (mbmi)->ref_frame[1] != ref_frame && \ + (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \ ADD_MV_REF_LIST_EB(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \ - refmv_count, mv_ref_list, Done); \ - } \ + refmv_count, mv_ref_list, Done); \ + } \ } while (0) // This function searches the neighborhood of a given MB/SB @@ -531,14 +499,16 @@ static void fpm_sync(void *const data, int mi_row) { static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame, const POSITION *const mv_ref_search, - int_mv *mv_ref_list, - int mi_row, int mi_col, int block, int is_sub8x8, - find_mv_refs_sync sync, void *const data) { + int_mv *mv_ref_list, int mi_row, int mi_col, + int block, int is_sub8x8, find_mv_refs_sync sync, + void *const data) { const int *ref_sign_bias = cm->ref_frame_sign_bias; int i, refmv_count = 0; int different_ref_found = 0; - const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ? - cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL; + const MV_REF *const prev_frame_mvs = + cm->use_prev_frame_mvs + ? cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col + : NULL; const TileInfo *const tile = &xd->tile; // If mode is nearestmv or newmv (uses nearestmv as a reference) then stop // searching after the first mv is found. @@ -561,11 +531,11 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, if (candidate_mi->ref_frame[0] == ref_frame) ADD_MV_REF_LIST_EB( get_sub_block_mv(candidate_mi, 0, mv_ref->col, block), - refmv_count, mv_ref_list, Done); + refmv_count, mv_ref_list, Done); else if (candidate_mi->ref_frame[1] == ref_frame) ADD_MV_REF_LIST_EB( get_sub_block_mv(candidate_mi, 1, mv_ref->col, block), - refmv_count, mv_ref_list, Done); + refmv_count, mv_ref_list, Done); } } } @@ -587,14 +557,14 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, } } - // TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast - // on windows platform. The sync here is unnecessary if use_prev_frame_mvs - // is 0. But after removing it, there will be hang in the unit test on windows - // due to several threads waiting for a thread's signal. +// TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast +// on windows platform. The sync here is unnecessary if use_prev_frame_mvs +// is 0. But after removing it, there will be hang in the unit test on windows +// due to several threads waiting for a thread's signal. #if defined(_WIN32) && !HAVE_PTHREAD_H - if (cm->frame_parallel_decode && sync != NULL) { - sync(data, mi_row); - } + if (cm->frame_parallel_decode && sync != NULL) { + sync(data, mi_row); + } #endif // Check the last frame's mode and mv info. @@ -660,10 +630,9 @@ static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd, // we only care about the nearestmv for the remaining modes refmv_count = 1; - Done: +Done: // Clamp vectors - for (i = 0; i < refmv_count; ++i) - clamp_mv_ref(&mv_ref_list[i].as_mv, xd); + for (i = 0; i < refmv_count; ++i) clamp_mv_ref(&mv_ref_list[i].as_mv, xd); return refmv_count; } @@ -681,14 +650,12 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, assert(MAX_MV_REF_CANDIDATES == 2); - refmv_count = dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], - mv_ref_search, mv_list, mi_row, mi_col, block, - 1, NULL, NULL); + refmv_count = + dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref], mv_ref_search, + mv_list, mi_row, mi_col, block, 1, NULL, NULL); switch (block) { - case 0: - best_sub8x8->as_int = mv_list[refmv_count - 1].as_int; - break; + case 0: best_sub8x8->as_int = mv_list[refmv_count - 1].as_int; break; case 1: case 2: if (b_mode == NEARESTMV) { @@ -719,14 +686,13 @@ static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, } } break; - default: - assert(0 && "Invalid block index."); + default: assert(0 && "Invalid block index."); } } static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd, - const POSITION *const mv_ref_search, - int mi_row, int mi_col) { + const POSITION *const mv_ref_search, int mi_row, + int mi_col) { int i; int context_counter = 0; const TileInfo *const tile = &xd->tile; @@ -747,8 +713,8 @@ static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd, static void read_inter_block_mode_info(VP9Decoder *const pbi, MACROBLOCKD *const xd, - MODE_INFO *const mi, - int mi_row, int mi_col, vpx_reader *r) { + MODE_INFO *const mi, int mi_row, + int mi_col, vpx_reader *r) { VP9_COMMON *const cm = &pbi->common; const BLOCK_SIZE bsize = mi->sb_type; const int allow_hp = cm->allow_high_precision_mv; @@ -764,9 +730,9 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi, if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) { mi->mode = ZEROMV; if (bsize < BLOCK_8X8) { - vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM, - "Invalid usage of segement feature on small blocks"); - return; + vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM, + "Invalid usage of segement feature on small blocks"); + return; } } else { if (bsize >= BLOCK_8X8) @@ -784,19 +750,19 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi, const MV_REFERENCE_FRAME frame = mi->ref_frame[ref]; int refmv_count; - refmv_count = dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search, - tmp_mvs, mi_row, mi_col, -1, 0, - fpm_sync, (void *)pbi); + refmv_count = + dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search, tmp_mvs, + mi_row, mi_col, -1, 0, fpm_sync, (void *)pbi); - dec_find_best_ref_mvs(xd, allow_hp, tmp_mvs, &best_ref_mvs[ref], + dec_find_best_ref_mvs(allow_hp, tmp_mvs, &best_ref_mvs[ref], refmv_count); } } } mi->interp_filter = (cm->interp_filter == SWITCHABLE) - ? read_switchable_interp_filter(cm, xd, r) - : cm->interp_filter; + ? read_switchable_interp_filter(cm, xd, r) + : cm->interp_filter; if (bsize < BLOCK_8X8) { const int num_4x4_w = 1 << xd->bmode_blocks_wl; @@ -821,10 +787,8 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi, break; } - if (num_4x4_h == 2) - mi->bmi[j + 2] = mi->bmi[j]; - if (num_4x4_w == 2) - mi->bmi[j + 1] = mi->bmi[j]; + if (num_4x4_h == 2) mi->bmi[j + 2] = mi->bmi[j]; + if (num_4x4_w == 2) mi->bmi[j + 1] = mi->bmi[j]; } } @@ -838,13 +802,15 @@ static void read_inter_block_mode_info(VP9Decoder *const pbi, } static void read_inter_frame_mode_info(VP9Decoder *const pbi, - MACROBLOCKD *const xd, - int mi_row, int mi_col, vpx_reader *r) { + MACROBLOCKD *const xd, int mi_row, + int mi_col, vpx_reader *r, int x_mis, + int y_mis) { VP9_COMMON *const cm = &pbi->common; MODE_INFO *const mi = xd->mi[0]; int inter_block; - mi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r); + mi->segment_id = + read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis, y_mis); mi->skip = read_skip(cm, xd, mi->segment_id, r); inter_block = read_is_inter_block(cm, xd, mi->segment_id, r); mi->tx_size = read_tx_size(cm, xd, !mi->skip || !inter_block, r); @@ -860,18 +826,19 @@ static INLINE void copy_ref_frame_pair(MV_REFERENCE_FRAME *dst, memcpy(dst, src, sizeof(*dst) * 2); } -void vpx_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, - int mi_row, int mi_col, vpx_reader *r, - int x_mis, int y_mis) { +void vp9_read_mode_info(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row, + int mi_col, int x_mis, int y_mis) { + vpx_reader *r = &twd->bit_reader; + MACROBLOCKD *const xd = &twd->xd; VP9_COMMON *const cm = &pbi->common; MODE_INFO *const mi = xd->mi[0]; - MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; + MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; int w, h; if (frame_is_intra_only(cm)) { - read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r); + read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r, x_mis, y_mis); } else { - read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r); + read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis); for (h = 0; h < y_mis; ++h) { for (w = 0; w < x_mis; ++w) { @@ -882,4 +849,10 @@ void vpx_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, frame_mvs += cm->mi_cols; } } +#if 0 // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH + if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && + (xd->above_mi == NULL || xd->left_mi == NULL) && + !is_inter_block(mi) && need_top_left[mi->uv_mode]) + assert(0); +#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH } diff --git a/libs/libvpx/vp9/decoder/vp9_decodemv.h b/libs/libvpx/vp9/decoder/vp9_decodemv.h index 75f568cf1f..b460cb8fb1 100644 --- a/libs/libvpx/vp9/decoder/vp9_decodemv.h +++ b/libs/libvpx/vp9/decoder/vp9_decodemv.h @@ -19,9 +19,8 @@ extern "C" { #endif -void vpx_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd, - int mi_row, int mi_col, vpx_reader *r, - int x_mis, int y_mis); +void vp9_read_mode_info(TileWorkerData *twd, VP9Decoder *const pbi, int mi_row, + int mi_col, int x_mis, int y_mis); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/decoder/vp9_decoder.c b/libs/libvpx/vp9/decoder/vp9_decoder.c index f5da07ea02..37693f0944 100644 --- a/libs/libvpx/vp9/decoder/vp9_decoder.c +++ b/libs/libvpx/vp9/decoder/vp9_decoder.c @@ -57,12 +57,10 @@ static void vp9_dec_setup_mi(VP9_COMMON *cm) { static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) { cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); - if (!cm->mip) - return 1; + if (!cm->mip) return 1; cm->mi_alloc_size = mi_size; - cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->mi_grid_base) - return 1; + cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *)); + if (!cm->mi_grid_base) return 1; return 0; } @@ -77,8 +75,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) { VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi)); VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL; - if (!cm) - return NULL; + if (!cm) return NULL; vp9_zero(*pbi); @@ -90,11 +87,10 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) { cm->error.setjmp = 1; - CHECK_MEM_ERROR(cm, cm->fc, - (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); - CHECK_MEM_ERROR(cm, cm->frame_contexts, - (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, - sizeof(*cm->frame_contexts))); + CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); + CHECK_MEM_ERROR( + cm, cm->frame_contexts, + (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts))); pbi->need_resync = 1; once(initialize_dec); @@ -126,16 +122,16 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) { void vp9_decoder_remove(VP9Decoder *pbi) { int i; - if (!pbi) - return; + if (!pbi) return; vpx_get_worker_interface()->end(&pbi->lf_worker); vpx_free(pbi->lf_worker.data1); - vpx_free(pbi->tile_data); + for (i = 0; i < pbi->num_tile_workers; ++i) { VPxWorker *const worker = &pbi->tile_workers[i]; vpx_get_worker_interface()->end(worker); } + vpx_free(pbi->tile_worker_data); vpx_free(pbi->tile_workers); @@ -148,8 +144,8 @@ void vp9_decoder_remove(VP9Decoder *pbi) { static int equal_dimensions(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b) { - return a->y_height == b->y_height && a->y_width == b->y_width && - a->uv_height == b->uv_height && a->uv_width == b->uv_width; + return a->y_height == b->y_height && a->y_width == b->y_width && + a->uv_height == b->uv_height && a->uv_width == b->uv_width; } vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi, @@ -175,55 +171,53 @@ vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi, else vp8_yv12_copy_frame(cfg, sd); } else { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Invalid reference frame"); + vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); } return cm->error.error_code; } - vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm, VP9_REFFRAME ref_frame_flag, YV12_BUFFER_CONFIG *sd) { - RefBuffer *ref_buf = NULL; - RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; + int idx; + YV12_BUFFER_CONFIG *ref_buf = NULL; // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the // encoder is using the frame buffers for. This is just a stub to keep the // vpxenc --test-decode functionality working, and will be replaced in a // later commit that adds VP9-specific controls for this functionality. + // (Yunqing) The set_reference control depends on the following setting in + // encoder. + // cpi->lst_fb_idx = 0; + // cpi->gld_fb_idx = 1; + // cpi->alt_fb_idx = 2; if (ref_frame_flag == VP9_LAST_FLAG) { - ref_buf = &cm->frame_refs[0]; + idx = cm->ref_frame_map[0]; } else if (ref_frame_flag == VP9_GOLD_FLAG) { - ref_buf = &cm->frame_refs[1]; + idx = cm->ref_frame_map[1]; } else if (ref_frame_flag == VP9_ALT_FLAG) { - ref_buf = &cm->frame_refs[2]; + idx = cm->ref_frame_map[2]; } else { - vpx_internal_error(&cm->error, VPX_CODEC_ERROR, - "Invalid reference frame"); + vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); return cm->error.error_code; } - if (!equal_dimensions(ref_buf->buf, sd)) { + if (idx < 0 || idx >= FRAME_BUFFERS) { + vpx_internal_error(&cm->error, VPX_CODEC_ERROR, + "Invalid reference frame map"); + return cm->error.error_code; + } + + // Get the destination reference buffer. + ref_buf = &cm->buffer_pool->frame_bufs[idx].buf; + + if (!equal_dimensions(ref_buf, sd)) { vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Incorrect buffer dimensions"); } else { - int *ref_fb_ptr = &ref_buf->idx; - - // Find an empty frame buffer. - const int free_fb = get_free_fb(cm); - if (cm->new_fb_idx == INVALID_IDX) - return VPX_CODEC_MEM_ERROR; - - // Decrease ref_count since it will be increased again in - // ref_cnt_fb() below. - --frame_bufs[free_fb].ref_count; - - // Manage the reference counters and copy image. - ref_cnt_fb(frame_bufs, ref_fb_ptr, free_fb); - ref_buf->buf = &frame_bufs[*ref_fb_ptr].buf; - vp8_yv12_copy_frame(sd, ref_buf->buf); + // Overwrite the reference frame buffer. + vp8_yv12_copy_frame(sd, ref_buf); } return cm->error.error_code; @@ -271,8 +265,8 @@ static void swap_frame_buffers(VP9Decoder *pbi) { cm->frame_refs[ref_index].idx = -1; } -int vp9_receive_compressed_data(VP9Decoder *pbi, - size_t size, const uint8_t **psource) { +int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size, + const uint8_t **psource) { VP9_COMMON *volatile const cm = &pbi->common; BufferPool *volatile const pool = cm->buffer_pool; RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; @@ -299,14 +293,17 @@ int vp9_receive_compressed_data(VP9Decoder *pbi, // Check if the previous frame was a frame without any references to it. // Release frame buffer if not decoding in frame parallel mode. - if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0 - && frame_bufs[cm->new_fb_idx].ref_count == 0) + if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0 && + frame_bufs[cm->new_fb_idx].ref_count == 0) pool->release_fb_cb(pool->cb_priv, &frame_bufs[cm->new_fb_idx].raw_frame_buffer); // Find a free frame buffer. Return error if can not find any. cm->new_fb_idx = get_free_fb(cm); - if (cm->new_fb_idx == INVALID_IDX) - return VPX_CODEC_MEM_ERROR; + if (cm->new_fb_idx == INVALID_IDX) { + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Unable to find free frame buffer"); + return cm->error.error_code; + } // Assign a MV array to the frame buffer. cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; @@ -325,7 +322,6 @@ int vp9_receive_compressed_data(VP9Decoder *pbi, pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; } - if (setjmp(cm->error.jmp)) { const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); int i; @@ -420,14 +416,12 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, (void)*flags; #endif - if (pbi->ready_for_new_data == 1) - return ret; + if (pbi->ready_for_new_data == 1) return ret; pbi->ready_for_new_data = 1; /* no raw frame to show!!! */ - if (!cm->show_frame) - return ret; + if (!cm->show_frame) return ret; pbi->ready_for_new_data = 1; @@ -446,8 +440,7 @@ int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, return ret; } -vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, - size_t data_sz, +vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz, uint32_t sizes[8], int *count, vpx_decrypt_cb decrypt_cb, void *decrypt_state) { @@ -470,18 +463,16 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, // This chunk is marked as having a superframe index but doesn't have // enough data for it, thus it's an invalid superframe index. - if (data_sz < index_sz) - return VPX_CODEC_CORRUPT_FRAME; + if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME; { - const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state, - data + data_sz - index_sz); + const uint8_t marker2 = + read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz); // This chunk is marked as having a superframe index but doesn't have // the matching marker byte at the front of the index therefore it's an // invalid chunk. - if (marker != marker2) - return VPX_CODEC_CORRUPT_FRAME; + if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME; } { @@ -500,8 +491,7 @@ vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, for (i = 0; i < frames; ++i) { uint32_t this_sz = 0; - for (j = 0; j < mag; ++j) - this_sz |= (*x++) << (j * 8); + for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8); sizes[i] = this_sz; } *count = frames; diff --git a/libs/libvpx/vp9/decoder/vp9_decoder.h b/libs/libvpx/vp9/decoder/vp9_decoder.h index afa400941d..427baf1e0b 100644 --- a/libs/libvpx/vp9/decoder/vp9_decoder.h +++ b/libs/libvpx/vp9/decoder/vp9_decoder.h @@ -27,15 +27,6 @@ extern "C" { #endif -// TODO(hkuang): combine this with TileWorkerData. -typedef struct TileData { - VP9_COMMON *cm; - vpx_reader bit_reader; - DECLARE_ALIGNED(16, MACROBLOCKD, xd); - /* dqcoeff are shared by all the planes. So planes must be decoded serially */ - DECLARE_ALIGNED(16, tran_low_t, dqcoeff[32 * 32]); -} TileData; - typedef struct TileBuffer { const uint8_t *data; size_t size; @@ -66,16 +57,14 @@ typedef struct VP9Decoder { // TODO(hkuang): Combine this with cur_buf in macroblockd as they are // the same. - RefCntBuffer *cur_buf; // Current decoding frame buffer. + RefCntBuffer *cur_buf; // Current decoding frame buffer. - VPxWorker *frame_worker_owner; // frame_worker that owns this pbi. + VPxWorker *frame_worker_owner; // frame_worker that owns this pbi. VPxWorker lf_worker; VPxWorker *tile_workers; TileWorkerData *tile_worker_data; TileBuffer tile_buffers[64]; int num_tile_workers; - - TileData *tile_data; int total_tiles; VP9LfSync lf_row_sync; @@ -85,12 +74,12 @@ typedef struct VP9Decoder { int max_threads; int inv_tile_order; - int need_resync; // wait for key/intra-only frame. + int need_resync; // wait for key/intra-only frame. int hold_ref_buf; // hold the reference buffer. } VP9Decoder; -int vp9_receive_compressed_data(struct VP9Decoder *pbi, - size_t size, const uint8_t **dest); +int vp9_receive_compressed_data(struct VP9Decoder *pbi, size_t size, + const uint8_t **dest); int vp9_get_raw_frame(struct VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, vp9_ppflags_t *flags); @@ -104,8 +93,7 @@ vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm, YV12_BUFFER_CONFIG *sd); static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb, - void *decrypt_state, - const uint8_t *data) { + void *decrypt_state, const uint8_t *data) { if (decrypt_cb) { uint8_t marker; decrypt_cb(decrypt_state, data, &marker, 1); @@ -116,8 +104,7 @@ static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb, // This function is exposed for use in tests, as well as the inlined function // "read_marker". -vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, - size_t data_sz, +vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz, uint32_t sizes[8], int *count, vpx_decrypt_cb decrypt_cb, void *decrypt_state); diff --git a/libs/libvpx/vp9/decoder/vp9_detokenize.c b/libs/libvpx/vp9/decoder/vp9_detokenize.c index dcc75b9d2d..7048fb1ca0 100644 --- a/libs/libvpx/vp9/decoder/vp9_detokenize.c +++ b/libs/libvpx/vp9/decoder/vp9_detokenize.c @@ -20,25 +20,58 @@ #include "vp9/decoder/vp9_detokenize.h" -#define EOB_CONTEXT_NODE 0 -#define ZERO_CONTEXT_NODE 1 -#define ONE_CONTEXT_NODE 2 +#define EOB_CONTEXT_NODE 0 +#define ZERO_CONTEXT_NODE 1 +#define ONE_CONTEXT_NODE 2 -#define INCREMENT_COUNT(token) \ - do { \ - if (counts) \ - ++coef_counts[band][ctx][token]; \ +#define INCREMENT_COUNT(token) \ + do { \ + if (counts) ++coef_counts[band][ctx][token]; \ } while (0) -static INLINE int read_coeff(const vpx_prob *probs, int n, vpx_reader *r) { +static INLINE int read_bool(vpx_reader *r, int prob, BD_VALUE *value, + int *count, unsigned int *range) { + const unsigned int split = (*range * prob + (256 - prob)) >> CHAR_BIT; + const BD_VALUE bigsplit = (BD_VALUE)split << (BD_VALUE_SIZE - CHAR_BIT); + + if (*count < 0) { + r->value = *value; + r->count = *count; + vpx_reader_fill(r); + *value = r->value; + *count = r->count; + } + + if (*value >= bigsplit) { + *range = *range - split; + *value = *value - bigsplit; + { + const int shift = vpx_norm[*range]; + *range <<= shift; + *value <<= shift; + *count -= shift; + } + return 1; + } + *range = split; + { + const int shift = vpx_norm[*range]; + *range <<= shift; + *value <<= shift; + *count -= shift; + } + return 0; +} + +static INLINE int read_coeff(vpx_reader *r, const vpx_prob *probs, int n, + BD_VALUE *value, int *count, unsigned int *range) { int i, val = 0; for (i = 0; i < n; ++i) - val = (val << 1) | vpx_read(r, probs[i]); + val = (val << 1) | read_bool(r, probs[i], value, count, range); return val; } -static int decode_coefs(const MACROBLOCKD *xd, - PLANE_TYPE type, +static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, TX_SIZE tx_size, const int16_t *dq, int ctx, const int16_t *scan, const int16_t *nb, vpx_reader *r) { @@ -47,28 +80,33 @@ static int decode_coefs(const MACROBLOCKD *xd, const FRAME_CONTEXT *const fc = xd->fc; const int ref = is_inter_block(xd->mi[0]); int band, c = 0; - const vpx_prob (*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = + const vpx_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = fc->coef_probs[tx_size][type][ref]; const vpx_prob *prob; - unsigned int (*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; - unsigned int (*eob_branch_count)[COEFF_CONTEXTS]; + unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1]; + unsigned int(*eob_branch_count)[COEFF_CONTEXTS]; uint8_t token_cache[32 * 32]; const uint8_t *band_translate = get_band_translate(tx_size); const int dq_shift = (tx_size == TX_32X32); - int v, token; + int v; int16_t dqv = dq[0]; const uint8_t *const cat6_prob = #if CONFIG_VP9_HIGHBITDEPTH - (xd->bd == VPX_BITS_12) ? vp9_cat6_prob_high12 : - (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 : + (xd->bd == VPX_BITS_12) + ? vp9_cat6_prob_high12 + : (xd->bd == VPX_BITS_10) ? vp9_cat6_prob_high12 + 2 : #endif // CONFIG_VP9_HIGHBITDEPTH - vp9_cat6_prob; + vp9_cat6_prob; const int cat6_bits = #if CONFIG_VP9_HIGHBITDEPTH - (xd->bd == VPX_BITS_12) ? 18 : - (xd->bd == VPX_BITS_10) ? 16 : + (xd->bd == VPX_BITS_12) ? 18 : (xd->bd == VPX_BITS_10) ? 16 : #endif // CONFIG_VP9_HIGHBITDEPTH - 14; + 14; + // Keep value, range, and count as locals. The compiler produces better + // results with the locals than using r directly. + BD_VALUE value = r->value; + unsigned int range = r->range; + int count = r->count; if (counts) { coef_counts = counts->coef[tx_size][type][ref]; @@ -79,138 +117,171 @@ static int decode_coefs(const MACROBLOCKD *xd, int val = -1; band = *band_translate++; prob = coef_probs[band][ctx]; - if (counts) - ++eob_branch_count[band][ctx]; - if (!vpx_read(r, prob[EOB_CONTEXT_NODE])) { + if (counts) ++eob_branch_count[band][ctx]; + if (!read_bool(r, prob[EOB_CONTEXT_NODE], &value, &count, &range)) { INCREMENT_COUNT(EOB_MODEL_TOKEN); break; } - while (!vpx_read(r, prob[ZERO_CONTEXT_NODE])) { + while (!read_bool(r, prob[ZERO_CONTEXT_NODE], &value, &count, &range)) { INCREMENT_COUNT(ZERO_TOKEN); dqv = dq[1]; token_cache[scan[c]] = 0; ++c; - if (c >= max_eob) + if (c >= max_eob) { + r->value = value; + r->range = range; + r->count = count; return c; // zero tokens at the end (no eob token) + } ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; prob = coef_probs[band][ctx]; } - if (!vpx_read(r, prob[ONE_CONTEXT_NODE])) { - INCREMENT_COUNT(ONE_TOKEN); - token = ONE_TOKEN; - val = 1; - } else { + if (read_bool(r, prob[ONE_CONTEXT_NODE], &value, &count, &range)) { + const vpx_prob *p = vp9_pareto8_full[prob[PIVOT_NODE] - 1]; INCREMENT_COUNT(TWO_TOKEN); - token = vpx_read_tree(r, vp9_coef_con_tree, - vp9_pareto8_full[prob[PIVOT_NODE] - 1]); - switch (token) { - case TWO_TOKEN: - case THREE_TOKEN: - case FOUR_TOKEN: - val = token; - break; - case CATEGORY1_TOKEN: - val = CAT1_MIN_VAL + read_coeff(vp9_cat1_prob, 1, r); - break; - case CATEGORY2_TOKEN: - val = CAT2_MIN_VAL + read_coeff(vp9_cat2_prob, 2, r); - break; - case CATEGORY3_TOKEN: - val = CAT3_MIN_VAL + read_coeff(vp9_cat3_prob, 3, r); - break; - case CATEGORY4_TOKEN: - val = CAT4_MIN_VAL + read_coeff(vp9_cat4_prob, 4, r); - break; - case CATEGORY5_TOKEN: - val = CAT5_MIN_VAL + read_coeff(vp9_cat5_prob, 5, r); - break; - case CATEGORY6_TOKEN: - val = CAT6_MIN_VAL + read_coeff(cat6_prob, cat6_bits, r); - break; + if (read_bool(r, p[0], &value, &count, &range)) { + if (read_bool(r, p[3], &value, &count, &range)) { + token_cache[scan[c]] = 5; + if (read_bool(r, p[5], &value, &count, &range)) { + if (read_bool(r, p[7], &value, &count, &range)) { + val = CAT6_MIN_VAL + + read_coeff(r, cat6_prob, cat6_bits, &value, &count, &range); + } else { + val = CAT5_MIN_VAL + + read_coeff(r, vp9_cat5_prob, 5, &value, &count, &range); + } + } else if (read_bool(r, p[6], &value, &count, &range)) { + val = CAT4_MIN_VAL + + read_coeff(r, vp9_cat4_prob, 4, &value, &count, &range); + } else { + val = CAT3_MIN_VAL + + read_coeff(r, vp9_cat3_prob, 3, &value, &count, &range); + } + } else { + token_cache[scan[c]] = 4; + if (read_bool(r, p[4], &value, &count, &range)) { + val = CAT2_MIN_VAL + + read_coeff(r, vp9_cat2_prob, 2, &value, &count, &range); + } else { + val = CAT1_MIN_VAL + + read_coeff(r, vp9_cat1_prob, 1, &value, &count, &range); + } + } + v = (val * dqv) >> dq_shift; + } else { + if (read_bool(r, p[1], &value, &count, &range)) { + token_cache[scan[c]] = 3; + v = ((3 + read_bool(r, p[2], &value, &count, &range)) * dqv) >> + dq_shift; + } else { + token_cache[scan[c]] = 2; + v = (2 * dqv) >> dq_shift; + } } + } else { + INCREMENT_COUNT(ONE_TOKEN); + token_cache[scan[c]] = 1; + v = dqv >> dq_shift; } - v = (val * dqv) >> dq_shift; #if CONFIG_COEFFICIENT_RANGE_CHECKING #if CONFIG_VP9_HIGHBITDEPTH - dqcoeff[scan[c]] = highbd_check_range((vpx_read_bit(r) ? -v : v), - xd->bd); + dqcoeff[scan[c]] = + highbd_check_range(read_bool(r, 128, &value, &count, &range) ? -v : v), + xd->bd); #else - dqcoeff[scan[c]] = check_range(vpx_read_bit(r) ? -v : v); + dqcoeff[scan[c]] = + check_range(read_bool(r, 128, &value, &count, &range) ? -v : v); #endif // CONFIG_VP9_HIGHBITDEPTH #else - dqcoeff[scan[c]] = vpx_read_bit(r) ? -v : v; + if (read_bool(r, 128, &value, &count, &range)) { + dqcoeff[scan[c]] = -v; + } else { + dqcoeff[scan[c]] = v; + } #endif // CONFIG_COEFFICIENT_RANGE_CHECKING - token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; ctx = get_coef_context(nb, token_cache, c); dqv = dq[1]; } + r->value = value; + r->range = range; + r->count = count; return c; } -// TODO(slavarnway): Decode version of vp9_set_context. Modify vp9_set_context -// after testing is complete, then delete this version. -static -void dec_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd, - TX_SIZE tx_size, int has_eob, - int aoff, int loff) { - ENTROPY_CONTEXT *const a = pd->above_context + aoff; - ENTROPY_CONTEXT *const l = pd->left_context + loff; - const int tx_size_in_blocks = 1 << tx_size; - - // above - if (has_eob && xd->mb_to_right_edge < 0) { - int i; - const int blocks_wide = pd->n4_w + - (xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - int above_contexts = tx_size_in_blocks; - if (above_contexts + aoff > blocks_wide) - above_contexts = blocks_wide - aoff; - - for (i = 0; i < above_contexts; ++i) - a[i] = has_eob; - for (i = above_contexts; i < tx_size_in_blocks; ++i) - a[i] = 0; - } else { - memset(a, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); +static void get_ctx_shift(MACROBLOCKD *xd, int *ctx_shift_a, int *ctx_shift_l, + int x, int y, unsigned int tx_size_in_blocks) { + if (xd->max_blocks_wide) { + if (tx_size_in_blocks + x > xd->max_blocks_wide) + *ctx_shift_a = (tx_size_in_blocks - (xd->max_blocks_wide - x)) * 8; } - - // left - if (has_eob && xd->mb_to_bottom_edge < 0) { - int i; - const int blocks_high = pd->n4_h + - (xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); - int left_contexts = tx_size_in_blocks; - if (left_contexts + loff > blocks_high) - left_contexts = blocks_high - loff; - - for (i = 0; i < left_contexts; ++i) - l[i] = has_eob; - for (i = left_contexts; i < tx_size_in_blocks; ++i) - l[i] = 0; - } else { - memset(l, has_eob, sizeof(ENTROPY_CONTEXT) * tx_size_in_blocks); + if (xd->max_blocks_high) { + if (tx_size_in_blocks + y > xd->max_blocks_high) + *ctx_shift_l = (tx_size_in_blocks - (xd->max_blocks_high - y)) * 8; } } -int vp9_decode_block_tokens(MACROBLOCKD *xd, - int plane, const scan_order *sc, - int x, int y, - TX_SIZE tx_size, vpx_reader *r, +int vp9_decode_block_tokens(TileWorkerData *twd, int plane, + const scan_order *sc, int x, int y, TX_SIZE tx_size, int seg_id) { + vpx_reader *r = &twd->bit_reader; + MACROBLOCKD *xd = &twd->xd; struct macroblockd_plane *const pd = &xd->plane[plane]; const int16_t *const dequant = pd->seg_dequant[seg_id]; - const int ctx = get_entropy_context(tx_size, pd->above_context + x, - pd->left_context + y); - const int eob = decode_coefs(xd, get_plane_type(plane), - pd->dqcoeff, tx_size, - dequant, ctx, sc->scan, sc->neighbors, r); - dec_set_contexts(xd, pd, tx_size, eob > 0, x, y); + int eob; + ENTROPY_CONTEXT *a = pd->above_context + x; + ENTROPY_CONTEXT *l = pd->left_context + y; + int ctx; + int ctx_shift_a = 0; + int ctx_shift_l = 0; + + switch (tx_size) { + case TX_4X4: + ctx = a[0] != 0; + ctx += l[0] != 0; + eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size, + dequant, ctx, sc->scan, sc->neighbors, r); + a[0] = l[0] = (eob > 0); + break; + case TX_8X8: + get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_8X8); + ctx = !!*(const uint16_t *)a; + ctx += !!*(const uint16_t *)l; + eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size, + dequant, ctx, sc->scan, sc->neighbors, r); + *(uint16_t *)a = ((eob > 0) * 0x0101) >> ctx_shift_a; + *(uint16_t *)l = ((eob > 0) * 0x0101) >> ctx_shift_l; + break; + case TX_16X16: + get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_16X16); + ctx = !!*(const uint32_t *)a; + ctx += !!*(const uint32_t *)l; + eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size, + dequant, ctx, sc->scan, sc->neighbors, r); + *(uint32_t *)a = ((eob > 0) * 0x01010101) >> ctx_shift_a; + *(uint32_t *)l = ((eob > 0) * 0x01010101) >> ctx_shift_l; + break; + case TX_32X32: + get_ctx_shift(xd, &ctx_shift_a, &ctx_shift_l, x, y, 1 << TX_32X32); + // NOTE: casting to uint64_t here is safe because the default memory + // alignment is at least 8 bytes and the TX_32X32 is aligned on 8 byte + // boundaries. + ctx = !!*(const uint64_t *)a; + ctx += !!*(const uint64_t *)l; + eob = decode_coefs(xd, get_plane_type(plane), pd->dqcoeff, tx_size, + dequant, ctx, sc->scan, sc->neighbors, r); + *(uint64_t *)a = ((eob > 0) * 0x0101010101010101ULL) >> ctx_shift_a; + *(uint64_t *)l = ((eob > 0) * 0x0101010101010101ULL) >> ctx_shift_l; + break; + default: + assert(0 && "Invalid transform size."); + eob = 0; + break; + } + return eob; } - - diff --git a/libs/libvpx/vp9/decoder/vp9_detokenize.h b/libs/libvpx/vp9/decoder/vp9_detokenize.h index d242d4466e..7b0d876016 100644 --- a/libs/libvpx/vp9/decoder/vp9_detokenize.h +++ b/libs/libvpx/vp9/decoder/vp9_detokenize.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_DECODER_VP9_DETOKENIZE_H_ #define VP9_DECODER_VP9_DETOKENIZE_H_ @@ -20,10 +19,8 @@ extern "C" { #endif -int vp9_decode_block_tokens(MACROBLOCKD *xd, - int plane, const scan_order *sc, - int x, int y, - TX_SIZE tx_size, vpx_reader *r, +int vp9_decode_block_tokens(TileWorkerData *twd, int plane, + const scan_order *sc, int x, int y, TX_SIZE tx_size, int seg_id); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/decoder/vp9_dsubexp.c b/libs/libvpx/vp9/decoder/vp9_dsubexp.c index 05b38538ae..126ba0b96e 100644 --- a/libs/libvpx/vp9/decoder/vp9_dsubexp.c +++ b/libs/libvpx/vp9/decoder/vp9_dsubexp.c @@ -15,8 +15,7 @@ #include "vp9/decoder/vp9_dsubexp.h" static int inv_recenter_nonneg(int v, int m) { - if (v > 2 * m) - return v; + if (v > 2 * m) return v; return (v & 1) ? m - ((v + 1) >> 1) : m + (v >> 1); } @@ -25,19 +24,19 @@ static int decode_uniform(vpx_reader *r) { const int l = 8; const int m = (1 << l) - 191; const int v = vpx_read_literal(r, l - 1); - return v < m ? v : (v << 1) - m + vpx_read_bit(r); + return v < m ? v : (v << 1) - m + vpx_read_bit(r); } static int inv_remap_prob(int v, int m) { static uint8_t inv_map_table[MAX_PROB] = { - 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189, - 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, - 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, - 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189, + 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, @@ -59,16 +58,13 @@ static int inv_remap_prob(int v, int m) { } static int decode_term_subexp(vpx_reader *r) { - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 4); - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 4) + 16; - if (!vpx_read_bit(r)) - return vpx_read_literal(r, 5) + 32; + if (!vpx_read_bit(r)) return vpx_read_literal(r, 4); + if (!vpx_read_bit(r)) return vpx_read_literal(r, 4) + 16; + if (!vpx_read_bit(r)) return vpx_read_literal(r, 5) + 32; return decode_uniform(r) + 64; } -void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p) { +void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p) { if (vpx_read(r, DIFF_UPDATE_PROB)) { const int delp = decode_term_subexp(r); *p = (vpx_prob)inv_remap_prob(delp, *p); diff --git a/libs/libvpx/vp9/decoder/vp9_dsubexp.h b/libs/libvpx/vp9/decoder/vp9_dsubexp.h index a8bcc70be9..5a8ec8300c 100644 --- a/libs/libvpx/vp9/decoder/vp9_dsubexp.h +++ b/libs/libvpx/vp9/decoder/vp9_dsubexp.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_DECODER_VP9_DSUBEXP_H_ #define VP9_DECODER_VP9_DSUBEXP_H_ @@ -18,7 +17,7 @@ extern "C" { #endif -void vp9_diff_update_prob(vpx_reader *r, vpx_prob* p); +void vp9_diff_update_prob(vpx_reader *r, vpx_prob *p); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/decoder/vp9_dthread.c b/libs/libvpx/vp9/decoder/vp9_dthread.c index 14a71448fe..52bc2a0f60 100644 --- a/libs/libvpx/vp9/decoder/vp9_dthread.c +++ b/libs/libvpx/vp9/decoder/vp9_dthread.c @@ -62,8 +62,7 @@ void vp9_frameworker_signal_stats(VPxWorker *const worker) { void vp9_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf, int row) { #if CONFIG_MULTITHREAD - if (!ref_buf) - return; + if (!ref_buf) return; #ifndef BUILDING_WITH_TSAN // The following line of code will get harmless tsan error but it is the key @@ -147,11 +146,12 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker, vp9_frameworker_lock_stats(src_worker); while (!src_worker_data->frame_context_ready) { pthread_cond_wait(&src_worker_data->stats_cond, - &src_worker_data->stats_mutex); + &src_worker_data->stats_mutex); } - dst_cm->last_frame_seg_map = src_cm->seg.enabled ? - src_cm->current_frame_seg_map : src_cm->last_frame_seg_map; + dst_cm->last_frame_seg_map = src_cm->seg.enabled + ? src_cm->current_frame_seg_map + : src_cm->last_frame_seg_map; dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync; vp9_frameworker_unlock_stats(src_worker); @@ -159,17 +159,18 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker, #if CONFIG_VP9_HIGHBITDEPTH dst_cm->use_highbitdepth = src_cm->use_highbitdepth; #endif - dst_cm->prev_frame = src_cm->show_existing_frame ? - src_cm->prev_frame : src_cm->cur_frame; - dst_cm->last_width = !src_cm->show_existing_frame ? - src_cm->width : src_cm->last_width; - dst_cm->last_height = !src_cm->show_existing_frame ? - src_cm->height : src_cm->last_height; + dst_cm->prev_frame = + src_cm->show_existing_frame ? src_cm->prev_frame : src_cm->cur_frame; + dst_cm->last_width = + !src_cm->show_existing_frame ? src_cm->width : src_cm->last_width; + dst_cm->last_height = + !src_cm->show_existing_frame ? src_cm->height : src_cm->last_height; dst_cm->subsampling_x = src_cm->subsampling_x; dst_cm->subsampling_y = src_cm->subsampling_y; dst_cm->frame_type = src_cm->frame_type; - dst_cm->last_show_frame = !src_cm->show_existing_frame ? - src_cm->show_frame : src_cm->last_show_frame; + dst_cm->last_show_frame = !src_cm->show_existing_frame + ? src_cm->show_frame + : src_cm->last_show_frame; for (i = 0; i < REF_FRAMES; ++i) dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i]; @@ -183,7 +184,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker, memcpy(dst_cm->frame_contexts, src_cm->frame_contexts, FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0])); #else - (void) dst_worker; - (void) src_worker; + (void)dst_worker; + (void)src_worker; #endif // CONFIG_MULTITHREAD } diff --git a/libs/libvpx/vp9/decoder/vp9_dthread.h b/libs/libvpx/vp9/decoder/vp9_dthread.h index ba7c38a511..fce0fe7fe3 100644 --- a/libs/libvpx/vp9/decoder/vp9_dthread.h +++ b/libs/libvpx/vp9/decoder/vp9_dthread.h @@ -68,7 +68,7 @@ void vp9_frameworker_copy_context(VPxWorker *const dst_worker, VPxWorker *const src_worker); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif #endif // VP9_DECODER_VP9_DTHREAD_H_ diff --git a/libs/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c b/libs/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c index 11e8773060..afffc77178 100644 --- a/libs/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c +++ b/libs/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c @@ -18,14 +18,13 @@ #include "vpx_dsp/txfm_common.h" void vp9_fdct8x8_quant_neon(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, - int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr, - const int16_t* dequant_ptr, uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { + int16_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, + int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan_ptr, + const int16_t *iscan_ptr) { int16_t temp_buffer[64]; (void)coeff_ptr; diff --git a/libs/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c b/libs/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c index 47363c75ba..33c2fc7feb 100644 --- a/libs/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c +++ b/libs/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c @@ -26,8 +26,8 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { + uint16_t *eob_ptr, const int16_t *scan, + const int16_t *iscan) { // TODO(jingning) Decide the need of these arguments after the // quantization process is completed. (void)zbin_ptr; @@ -54,12 +54,12 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); - const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), - vget_low_s16(v_quant)); - const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), - vget_high_s16(v_quant)); - const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), - vshrn_n_s32(v_tmp_hi, 16)); + const int32x4_t v_tmp_lo = + vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); + const int32x4_t v_tmp_hi = + vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); + const int16x8_t v_tmp2 = + vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); @@ -79,12 +79,12 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]); const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15); const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero); - const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp), - vget_low_s16(v_quant)); - const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp), - vget_high_s16(v_quant)); - const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), - vshrn_n_s32(v_tmp_hi, 16)); + const int32x4_t v_tmp_lo = + vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant)); + const int32x4_t v_tmp_hi = + vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant)); + const int16x8_t v_tmp2 = + vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16)); const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero); const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one); const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1); @@ -96,9 +96,8 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count, vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff); } { - const int16x4_t v_eobmax_3210 = - vmax_s16(vget_low_s16(v_eobmax_76543210), - vget_high_s16(v_eobmax_76543210)); + const int16x4_t v_eobmax_3210 = vmax_s16( + vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210)); const int64x1_t v_eobmax_xx32 = vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32); const int16x4_t v_eobmax_tmp = diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_error_msa.c b/libs/libvpx/vp9/encoder/mips/msa/vp9_error_msa.c index 1dc70bd82f..188d04d8f6 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_error_msa.c +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_error_msa.c @@ -11,74 +11,73 @@ #include "./vp9_rtcd.h" #include "vpx_dsp/mips/macros_msa.h" -#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \ -static int64_t block_error_##BSize##size_msa(const int16_t *coeff_ptr, \ - const int16_t *dq_coeff_ptr, \ - int64_t *ssz) { \ - int64_t err = 0; \ - uint32_t loop_cnt; \ - v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \ - v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \ - v2i64 sq_coeff_r, sq_coeff_l; \ - v2i64 err0, err_dup0, err1, err_dup1; \ - \ - coeff = LD_SH(coeff_ptr); \ - dq_coeff = LD_SH(dq_coeff_ptr); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, \ - sq_coeff_r, sq_coeff_l); \ - DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \ - \ - coeff = LD_SH(coeff_ptr + 8); \ - dq_coeff = LD_SH(dq_coeff_ptr + 8); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff_ptr += 16; \ - dq_coeff_ptr += 16; \ - \ - for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \ - coeff = LD_SH(coeff_ptr); \ - dq_coeff = LD_SH(dq_coeff_ptr); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff = LD_SH(coeff_ptr + 8); \ - dq_coeff = LD_SH(dq_coeff_ptr + 8); \ - UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ - ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ - HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ - DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ - DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ - \ - coeff_ptr += 16; \ - dq_coeff_ptr += 16; \ - } \ - \ - err_dup0 = __msa_splati_d(sq_coeff_r, 1); \ - err_dup1 = __msa_splati_d(sq_coeff_l, 1); \ - sq_coeff_r += err_dup0; \ - sq_coeff_l += err_dup1; \ - *ssz = __msa_copy_s_d(sq_coeff_r, 0); \ - *ssz += __msa_copy_s_d(sq_coeff_l, 0); \ - \ - err_dup0 = __msa_splati_d(err0, 1); \ - err_dup1 = __msa_splati_d(err1, 1); \ - err0 += err_dup0; \ - err1 += err_dup1; \ - err = __msa_copy_s_d(err0, 0); \ - err += __msa_copy_s_d(err1, 0); \ - \ - return err; \ -} +#define BLOCK_ERROR_BLOCKSIZE_MSA(BSize) \ + static int64_t block_error_##BSize##size_msa( \ + const int16_t *coeff_ptr, const int16_t *dq_coeff_ptr, int64_t *ssz) { \ + int64_t err = 0; \ + uint32_t loop_cnt; \ + v8i16 coeff, dq_coeff, coeff_r_h, coeff_l_h; \ + v4i32 diff_r, diff_l, coeff_r_w, coeff_l_w; \ + v2i64 sq_coeff_r, sq_coeff_l; \ + v2i64 err0, err_dup0, err1, err_dup1; \ + \ + coeff = LD_SH(coeff_ptr); \ + dq_coeff = LD_SH(dq_coeff_ptr); \ + UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ + ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ + HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ + DOTP_SW2_SD(coeff_r_w, coeff_l_w, coeff_r_w, coeff_l_w, sq_coeff_r, \ + sq_coeff_l); \ + DOTP_SW2_SD(diff_r, diff_l, diff_r, diff_l, err0, err1); \ + \ + coeff = LD_SH(coeff_ptr + 8); \ + dq_coeff = LD_SH(dq_coeff_ptr + 8); \ + UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ + ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ + HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ + DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ + DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ + \ + coeff_ptr += 16; \ + dq_coeff_ptr += 16; \ + \ + for (loop_cnt = ((BSize >> 4) - 1); loop_cnt--;) { \ + coeff = LD_SH(coeff_ptr); \ + dq_coeff = LD_SH(dq_coeff_ptr); \ + UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ + ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ + HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ + DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ + DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ + \ + coeff = LD_SH(coeff_ptr + 8); \ + dq_coeff = LD_SH(dq_coeff_ptr + 8); \ + UNPCK_SH_SW(coeff, coeff_r_w, coeff_l_w); \ + ILVRL_H2_SH(coeff, dq_coeff, coeff_r_h, coeff_l_h); \ + HSUB_UH2_SW(coeff_r_h, coeff_l_h, diff_r, diff_l); \ + DPADD_SD2_SD(coeff_r_w, coeff_l_w, sq_coeff_r, sq_coeff_l); \ + DPADD_SD2_SD(diff_r, diff_l, err0, err1); \ + \ + coeff_ptr += 16; \ + dq_coeff_ptr += 16; \ + } \ + \ + err_dup0 = __msa_splati_d(sq_coeff_r, 1); \ + err_dup1 = __msa_splati_d(sq_coeff_l, 1); \ + sq_coeff_r += err_dup0; \ + sq_coeff_l += err_dup1; \ + *ssz = __msa_copy_s_d(sq_coeff_r, 0); \ + *ssz += __msa_copy_s_d(sq_coeff_l, 0); \ + \ + err_dup0 = __msa_splati_d(err0, 1); \ + err_dup1 = __msa_splati_d(err1, 1); \ + err0 += err_dup0; \ + err1 += err_dup1; \ + err = __msa_copy_s_d(err0, 0); \ + err += __msa_copy_s_d(err1, 0); \ + \ + return err; \ + } BLOCK_ERROR_BLOCKSIZE_MSA(16); BLOCK_ERROR_BLOCKSIZE_MSA(64); @@ -86,25 +85,17 @@ BLOCK_ERROR_BLOCKSIZE_MSA(256); BLOCK_ERROR_BLOCKSIZE_MSA(1024); int64_t vp9_block_error_msa(const tran_low_t *coeff_ptr, - const tran_low_t *dq_coeff_ptr, - intptr_t blk_size, int64_t *ssz) { + const tran_low_t *dq_coeff_ptr, intptr_t blk_size, + int64_t *ssz) { int64_t err; const int16_t *coeff = (const int16_t *)coeff_ptr; const int16_t *dq_coeff = (const int16_t *)dq_coeff_ptr; switch (blk_size) { - case 16: - err = block_error_16size_msa(coeff, dq_coeff, ssz); - break; - case 64: - err = block_error_64size_msa(coeff, dq_coeff, ssz); - break; - case 256: - err = block_error_256size_msa(coeff, dq_coeff, ssz); - break; - case 1024: - err = block_error_1024size_msa(coeff, dq_coeff, ssz); - break; + case 16: err = block_error_16size_msa(coeff, dq_coeff, ssz); break; + case 64: err = block_error_64size_msa(coeff, dq_coeff, ssz); break; + case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break; + case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break; default: err = vp9_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz); break; diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c index 6dabb58900..0831e59148 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct16x16_msa.c @@ -159,8 +159,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) { /* load input data */ LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); + TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6, + r7); FDCT_POSTPROC_2V_NEG_H(r0, r1); FDCT_POSTPROC_2V_NEG_H(r2, r3); FDCT_POSTPROC_2V_NEG_H(r4, r5); @@ -169,8 +169,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) { out += 64; LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); + TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11, + r12, r13, r14, r15); FDCT_POSTPROC_2V_NEG_H(r8, r9); FDCT_POSTPROC_2V_NEG_H(r10, r11); FDCT_POSTPROC_2V_NEG_H(r12, r13); @@ -181,8 +181,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) { /* load input data */ input += 128; LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); + TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6, + r7); FDCT_POSTPROC_2V_NEG_H(r0, r1); FDCT_POSTPROC_2V_NEG_H(r2, r3); FDCT_POSTPROC_2V_NEG_H(r4, r5); @@ -191,8 +191,8 @@ static void fadst16_transpose_postproc_msa(int16_t *input, int16_t *out) { out += 64; LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); + TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11, + r12, r13, r14, r15); FDCT_POSTPROC_2V_NEG_H(r8, r9); FDCT_POSTPROC_2V_NEG_H(r10, r11); FDCT_POSTPROC_2V_NEG_H(r12, r13); @@ -339,24 +339,24 @@ static void fadst16_transpose_msa(int16_t *input, int16_t *out) { v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; /* load input data */ - LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, - l4, l12, l5, l13, l6, l14, l7, l15); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); + LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, + l7, l15); + TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6, + r7); + TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11, + r12, r13, r14, r15); ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); out += 16 * 8; /* load input data */ input += 128; - LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, - l4, l12, l5, l13, l6, l14, l7, l15); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - r0, r1, r2, r3, r4, r5, r6, r7); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - r8, r9, r10, r11, r12, r13, r14, r15); + LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, + l7, l15); + TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, r0, r1, r2, r3, r4, r5, r6, + r7); + TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, r8, r9, r10, r11, + r12, r13, r14, r15); ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); } @@ -371,10 +371,10 @@ static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) { LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7); temp = intermediate + 8; LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, - in8, in9, in10, in11, in12, in13, in14, in15); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, + in10, in11, in12, in13, in14, in15); FDCT_POSTPROC_2V_NEG_H(in0, in1); FDCT_POSTPROC_2V_NEG_H(in2, in3); FDCT_POSTPROC_2V_NEG_H(in4, in5); @@ -383,29 +383,28 @@ static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) { FDCT_POSTPROC_2V_NEG_H(in10, in11); FDCT_POSTPROC_2V_NEG_H(in12, in13); FDCT_POSTPROC_2V_NEG_H(in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - in8, in9, in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, + tmp7, in8, in9, in10, in11, in12, in13, in14, in15); temp = intermediate; ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); - FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); + FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, + tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); temp = intermediate; LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); - FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, - tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3); + FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0, + tmp1, in1, tmp2, in2, tmp3, in3); ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16); - TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, - tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7); + TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4, + tmp5, in5, tmp6, in6, tmp7, in7); out = output + 8; ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16); } -void vp9_fht16x16_msa(const int16_t *input, int16_t *output, - int32_t stride, int32_t tx_type) { +void vp9_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride, + int32_t tx_type) { DECLARE_ALIGNED(32, int16_t, tmp[256]); DECLARE_ALIGNED(32, int16_t, trans_buf[256]); DECLARE_ALIGNED(32, int16_t, tmp_buf[128]); @@ -413,35 +412,31 @@ void vp9_fht16x16_msa(const int16_t *input, int16_t *output, int16_t *ptmpbuf = &tmp_buf[0]; int16_t *trans = &trans_buf[0]; const int32_t const_arr[29 * 4] = { - 52707308, 52707308, 52707308, 52707308, - -1072430300, -1072430300, -1072430300, -1072430300, - 795618043, 795618043, 795618043, 795618043, - -721080468, -721080468, -721080468, -721080468, - 459094491, 459094491, 459094491, 459094491, - -970646691, -970646691, -970646691, -970646691, - 1010963856, 1010963856, 1010963856, 1010963856, - -361743294, -361743294, -361743294, -361743294, - 209469125, 209469125, 209469125, 209469125, - -1053094788, -1053094788, -1053094788, -1053094788, - 1053160324, 1053160324, 1053160324, 1053160324, - 639644520, 639644520, 639644520, 639644520, - -862444000, -862444000, -862444000, -862444000, - 1062144356, 1062144356, 1062144356, 1062144356, - -157532337, -157532337, -157532337, -157532337, - 260914709, 260914709, 260914709, 260914709, - -1041559667, -1041559667, -1041559667, -1041559667, - 920985831, 920985831, 920985831, 920985831, - -551995675, -551995675, -551995675, -551995675, - 596522295, 596522295, 596522295, 596522295, - 892853362, 892853362, 892853362, 892853362, - -892787826, -892787826, -892787826, -892787826, - 410925857, 410925857, 410925857, 410925857, - -992012162, -992012162, -992012162, -992012162, - 992077698, 992077698, 992077698, 992077698, - 759246145, 759246145, 759246145, 759246145, - -759180609, -759180609, -759180609, -759180609, - -759222975, -759222975, -759222975, -759222975, - 759288511, 759288511, 759288511, 759288511 }; + 52707308, 52707308, 52707308, 52707308, -1072430300, + -1072430300, -1072430300, -1072430300, 795618043, 795618043, + 795618043, 795618043, -721080468, -721080468, -721080468, + -721080468, 459094491, 459094491, 459094491, 459094491, + -970646691, -970646691, -970646691, -970646691, 1010963856, + 1010963856, 1010963856, 1010963856, -361743294, -361743294, + -361743294, -361743294, 209469125, 209469125, 209469125, + 209469125, -1053094788, -1053094788, -1053094788, -1053094788, + 1053160324, 1053160324, 1053160324, 1053160324, 639644520, + 639644520, 639644520, 639644520, -862444000, -862444000, + -862444000, -862444000, 1062144356, 1062144356, 1062144356, + 1062144356, -157532337, -157532337, -157532337, -157532337, + 260914709, 260914709, 260914709, 260914709, -1041559667, + -1041559667, -1041559667, -1041559667, 920985831, 920985831, + 920985831, 920985831, -551995675, -551995675, -551995675, + -551995675, 596522295, 596522295, 596522295, 596522295, + 892853362, 892853362, 892853362, 892853362, -892787826, + -892787826, -892787826, -892787826, 410925857, 410925857, + 410925857, 410925857, -992012162, -992012162, -992012162, + -992012162, 992077698, 992077698, 992077698, 992077698, + 759246145, 759246145, 759246145, 759246145, -759180609, + -759180609, -759180609, -759180609, -759222975, -759222975, + -759222975, -759222975, 759288511, 759288511, 759288511, + 759288511 + }; switch (tx_type) { case DCT_DCT: @@ -500,8 +495,6 @@ void vp9_fht16x16_msa(const int16_t *input, int16_t *output, fadst16_transpose_msa(tmp, output); break; - default: - assert(0); - break; + default: assert(0); break; } } diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c index 574016f155..fa36f09ab8 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct4x4_msa.c @@ -86,9 +86,7 @@ void vp9_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride, TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3); break; - default: - assert(0); - break; + default: assert(0); break; } TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c index 7c3c635f8d..604db853c4 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct8x8_msa.c @@ -23,44 +23,42 @@ void vp9_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride, switch (tx_type) { case DCT_DCT: - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; case ADST_DCT: - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; case DCT_ADST: - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; case ADST_ADST: - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - break; - default: - assert(0); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, + in3, in4, in5, in6, in7); + VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); break; + default: assert(0); break; } - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7); ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); } diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct_msa.h b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct_msa.h index d7d40cb72c..794bec70b6 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct_msa.h +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_fdct_msa.h @@ -15,103 +15,102 @@ #include "vpx_dsp/mips/txfm_macros_msa.h" #include "vpx_ports/mem.h" -#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ - v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ - cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ - v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ - cospi_24_64, -cospi_24_64, 0, 0 }; \ - \ - SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ - ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in7, in0, \ - in4, in3); \ - \ - SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in5, in2, \ - in6, in1); \ - BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ - out7 = -s0_m; \ - out0 = s1_m; \ - \ - SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ - \ - ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - cnst1_m = cnst0_m; \ - \ - ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst2_m, cnst3_m, cnst1_m, out1, out6, \ - s0_m, s1_m); \ - \ - SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ - \ - out1 = -out1; \ - out3 = -out3; \ - out5 = -out5; \ -} +#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ + out3, out4, out5, out6, out7) \ + { \ + v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ + v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ + v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ + cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ + v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ + cospi_24_64, -cospi_24_64, 0, 0 }; \ + \ + SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ + cnst2_m = -cnst0_m; \ + ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ + SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ + cnst4_m = -cnst2_m; \ + ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ + \ + ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ + ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \ + cnst2_m, cnst3_m, in7, in0, in4, in3); \ + \ + SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ + cnst2_m = -cnst0_m; \ + ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ + SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ + cnst4_m = -cnst2_m; \ + ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ + \ + ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ + ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ + \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \ + cnst2_m, cnst3_m, in5, in2, in6, in1); \ + BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ + out7 = -s0_m; \ + out0 = s1_m; \ + \ + SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ + \ + ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ + cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + cnst1_m = cnst0_m; \ + \ + ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ + ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m, \ + cnst3_m, cnst1_m, out1, out6, s0_m, s1_m); \ + \ + SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + \ + ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ + ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ + out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ + out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ + out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ + \ + out1 = -out1; \ + out3 = -out3; \ + out5 = -out5; \ + } -#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \ - v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \ - \ - UNPCK_R_SH_SW(in0, in0_r_m); \ - UNPCK_R_SH_SW(in1, in1_r_m); \ - UNPCK_R_SH_SW(in2, in2_r_m); \ - UNPCK_R_SH_SW(in3, in3_r_m); \ - \ - constant_m = __msa_fill_w(sinpi_4_9); \ - MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \ - \ - constant_m = __msa_fill_w(sinpi_1_9); \ - s0_m += in0_r_m * constant_m; \ - s1_m -= in1_r_m * constant_m; \ - \ - constant_m = __msa_fill_w(sinpi_2_9); \ - s0_m += in1_r_m * constant_m; \ - s1_m += in3_r_m * constant_m; \ - \ - s2_m = in0_r_m + in1_r_m - in3_r_m; \ - \ - constant_m = __msa_fill_w(sinpi_3_9); \ - MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \ - \ - in0_r_m = s0_m + s3_m; \ - s2_m = s1_m - s3_m; \ - s3_m = s1_m - s0_m + s3_m; \ - \ - SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \ - PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, \ - s3_m, s3_m, out0, out1, out2, out3); \ -} -#endif /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */ +#define VP9_FADST4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v4i32 s0_m, s1_m, s2_m, s3_m, constant_m; \ + v4i32 in0_r_m, in1_r_m, in2_r_m, in3_r_m; \ + \ + UNPCK_R_SH_SW(in0, in0_r_m); \ + UNPCK_R_SH_SW(in1, in1_r_m); \ + UNPCK_R_SH_SW(in2, in2_r_m); \ + UNPCK_R_SH_SW(in3, in3_r_m); \ + \ + constant_m = __msa_fill_w(sinpi_4_9); \ + MUL2(in0_r_m, constant_m, in3_r_m, constant_m, s1_m, s0_m); \ + \ + constant_m = __msa_fill_w(sinpi_1_9); \ + s0_m += in0_r_m * constant_m; \ + s1_m -= in1_r_m * constant_m; \ + \ + constant_m = __msa_fill_w(sinpi_2_9); \ + s0_m += in1_r_m * constant_m; \ + s1_m += in3_r_m * constant_m; \ + \ + s2_m = in0_r_m + in1_r_m - in3_r_m; \ + \ + constant_m = __msa_fill_w(sinpi_3_9); \ + MUL2(in2_r_m, constant_m, s2_m, constant_m, s3_m, in1_r_m); \ + \ + in0_r_m = s0_m + s3_m; \ + s2_m = s1_m - s3_m; \ + s3_m = s1_m - s0_m + s3_m; \ + \ + SRARI_W4_SW(in0_r_m, in1_r_m, s2_m, s3_m, DCT_CONST_BITS); \ + PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \ + out0, out1, out2, out3); \ + } +#endif /* VP9_ENCODER_MIPS_MSA_VP9_FDCT_MSA_H_ */ diff --git a/libs/libvpx/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c b/libs/libvpx/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c index 363aabb7cb..23f7ebace4 100644 --- a/libs/libvpx/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c +++ b/libs/libvpx/vp9/encoder/mips/msa/vp9_temporal_filter_msa.c @@ -11,12 +11,9 @@ #include "./vp9_rtcd.h" #include "vpx_dsp/mips/macros_msa.h" -static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, - uint32_t stride, - uint8_t *frm2_ptr, - int32_t filt_sth, - int32_t filt_wgt, - uint32_t *acc, +static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride, + uint8_t *frm2_ptr, int32_t filt_sth, + int32_t filt_wgt, uint32_t *acc, uint16_t *cnt) { uint32_t row; uint64_t f0, f1, f2, f3; @@ -54,10 +51,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); UNPCK_SH_SW(diff0, diff0_r, diff0_l); UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w, + mod1_w, mod2_w, mod3_w); SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); diff0_r = (mod0_w < cnst16); @@ -65,8 +62,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, diff1_r = (mod2_w < cnst16); diff1_l = (mod3_w < cnst16); - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); + SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w, + mod1_w, mod2_w, mod3_w); mod0_w = diff0_r & mod0_w; mod1_w = diff0_l & mod1_w; @@ -85,8 +82,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); ST_SW2(mod0_w, mod1_w, acc, 4); acc += 8; @@ -101,10 +98,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, HSUB_UB2_SH(frm_r, frm_l, diff0, diff1); UNPCK_SH_SW(diff0, diff0_r, diff0_l); UNPCK_SH_SW(diff1, diff1_r, diff1_l); - MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, - diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, + MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w, + mod1_w, mod2_w, mod3_w); SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); diff0_r = (mod0_w < cnst16); @@ -112,8 +109,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, diff1_r = (mod2_w < cnst16); diff1_l = (mod3_w < cnst16); - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); + SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w, + mod1_w, mod2_w, mod3_w); mod0_w = diff0_r & mod0_w; mod1_w = diff0_l & mod1_w; @@ -131,8 +128,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); ST_SW2(mod0_w, mod1_w, acc, 4); acc += 8; @@ -141,13 +138,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, } } -static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, - uint32_t stride, +static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride, uint8_t *frm2_ptr, - int32_t filt_sth, - int32_t filt_wgt, - uint32_t *acc, - uint16_t *cnt) { + int32_t filt_sth, int32_t filt_wgt, + uint32_t *acc, uint16_t *cnt) { uint32_t row; v16i8 frm1, frm2, frm3, frm4; v16u8 frm_r, frm_l; @@ -183,8 +177,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(diff1, diff1_r, diff1_l); MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w, + mod1_w, mod2_w, mod3_w); SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); diff0_r = (mod0_w < cnst16); @@ -192,8 +186,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, diff1_r = (mod2_w < cnst16); diff1_l = (mod3_w < cnst16); - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); + SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w, + mod1_w, mod2_w, mod3_w); mod0_w = diff0_r & mod0_w; mod1_w = diff0_l & mod1_w; @@ -212,8 +206,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); ST_SW2(mod0_w, mod1_w, acc, 4); acc += 8; @@ -230,8 +224,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(diff1, diff1_r, diff1_l); MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l, mod0_w, mod1_w, mod2_w, mod3_w); - MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, - mod0_w, mod1_w, mod2_w, mod3_w); + MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w, + mod1_w, mod2_w, mod3_w); SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength); diff0_r = (mod0_w < cnst16); @@ -239,8 +233,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, diff1_r = (mod2_w < cnst16); diff1_l = (mod3_w < cnst16); - SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, - mod0_w, mod1_w, mod2_w, mod3_w); + SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w, + mod1_w, mod2_w, mod3_w); mod0_w = diff0_r & mod0_w; mod1_w = diff0_l & mod1_w; @@ -259,8 +253,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll); MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll, mod0_w, mod1_w, mod2_w, mod3_w); - ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, - mod0_w, mod1_w, mod2_w, mod3_w); + ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, + mod2_w, mod3_w); ST_SW2(mod0_w, mod1_w, acc, 4); acc += 8; ST_SW2(mod2_w, mod3_w, acc, 4); @@ -277,11 +271,11 @@ void vp9_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride, int32_t filt_wgt, uint32_t *accu, uint16_t *cnt) { if (8 == (blk_w * blk_h)) { - temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, - strength, filt_wgt, accu, cnt); + temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength, + filt_wgt, accu, cnt); } else if (16 == (blk_w * blk_h)) { - temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, - strength, filt_wgt, accu, cnt); + temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength, + filt_wgt, accu, cnt); } else { vp9_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h, strength, filt_wgt, accu, cnt); diff --git a/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.c b/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.c new file mode 100644 index 0000000000..3aeefb5845 --- /dev/null +++ b/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file in the root of the source tree. An additional + * intellectual property rights grant can be found in the file PATENTS. + * All contributing project authors may be found in the AUTHORS file in + * the root of the source tree. + */ + +#include "vp9/encoder/vp9_encoder.h" +#include "vp9/encoder/vp9_alt_ref_aq.h" + +struct ALT_REF_AQ { + int dummy; +}; + +struct ALT_REF_AQ *vp9_alt_ref_aq_create() { + return (struct ALT_REF_AQ *)vpx_malloc(sizeof(struct ALT_REF_AQ)); +} + +void vp9_alt_ref_aq_destroy(struct ALT_REF_AQ *const self) { vpx_free(self); } + +void vp9_alt_ref_aq_upload_map(struct ALT_REF_AQ *const self, + const struct MATX_8U *segmentation_map) { + (void)self; + (void)segmentation_map; +} + +void vp9_alt_ref_aq_set_nsegments(struct ALT_REF_AQ *const self, + int nsegments) { + (void)self; + (void)nsegments; +} + +void vp9_alt_ref_aq_setup_mode(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi) { + (void)cpi; + (void)self; +} + +// set basic segmentation to the altref's one +void vp9_alt_ref_aq_setup_map(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi) { + (void)cpi; + (void)self; +} + +// restore cpi->aq_mode +void vp9_alt_ref_aq_unset_all(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi) { + (void)cpi; + (void)self; +} + +int vp9_alt_ref_aq_disable_if(const struct ALT_REF_AQ *self, + int segmentation_overhead, int bandwidth) { + (void)bandwidth; + (void)self; + (void)segmentation_overhead; + + return 0; +} diff --git a/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.h b/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.h new file mode 100644 index 0000000000..18acd8a85b --- /dev/null +++ b/libs/libvpx/vp9/encoder/vp9_alt_ref_aq.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file in the root of the source tree. An additional + * intellectual property rights grant can be found in the file PATENTS. + * All contributing project authors may be found in the AUTHORS file in + * the root of the source tree. + */ + +/* + * \file vp9_alt_ref_aq.h + * + * This file contains public interface for setting up adaptive segmentation + * for altref frames. Go to alt_ref_aq_private.h for implmentation details. + */ + +#ifndef VP9_ENCODER_VP9_ALT_REF_AQ_H_ +#define VP9_ENCODER_VP9_ALT_REF_AQ_H_ + +#include "vpx/vpx_integer.h" + +// Where to disable segmentation +#define ALT_REF_AQ_LOW_BITRATE_BOUNDARY 150 + +// Last frame always has overall quality = 0, +// so it is questionable if I can process it +#define ALT_REF_AQ_APPLY_TO_LAST_FRAME 1 + +// If I should try to compare gain +// against segmentation overhead +#define ALT_REF_AQ_PROTECT_GAIN 0 + +// Threshold to disable segmentation +#define ALT_REF_AQ_PROTECT_GAIN_THRESH 0.5 + +#ifdef __cplusplus +extern "C" { +#endif + +// Simple structure for storing images +struct MATX_8U { + int rows; + int cols; + int stride; + + uint8_t *data; +}; + +struct VP9_COMP; +struct ALT_REF_AQ; + +/*!\brief Constructor + * + * \return Instance of the class + */ +struct ALT_REF_AQ *vp9_alt_ref_aq_create(); + +/*!\brief Upload segmentation_map to self object + * + * \param self Instance of the class + * \param segmentation_map Segmentation map to upload + */ +void vp9_alt_ref_aq_upload_map(struct ALT_REF_AQ *const self, + const struct MATX_8U *segmentation_map); + +/*!\brief Return pointer to the altref segmentation map + * + * \param self Instance of the class + * \param segmentation_overhead Segmentation overhead in bytes + * \param bandwidth Current frame bandwidth in bytes + * + * \return Boolean value to disable segmentation + */ +int vp9_alt_ref_aq_disable_if(const struct ALT_REF_AQ *self, + int segmentation_overhead, int bandwidth); + +/*!\brief Set number of segments + * + * It is used for delta quantizer computations + * and thus it can be larger than + * maximum value of the segmentation map + * + * \param self Instance of the class + * \param nsegments Maximum number of segments + */ +void vp9_alt_ref_aq_set_nsegments(struct ALT_REF_AQ *const self, int nsegments); + +/*!\brief Set up LOOKAHEAD_AQ segmentation mode + * + * Set up segmentation mode to LOOKAHEAD_AQ + * (expected future frames prediction + * quality refering to the current frame). + * + * \param self Instance of the class + * \param cpi Encoder context + */ +void vp9_alt_ref_aq_setup_mode(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi); + +/*!\brief Set up LOOKAHEAD_AQ segmentation map and delta quantizers + * + * \param self Instance of the class + * \param cpi Encoder context + */ +void vp9_alt_ref_aq_setup_map(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi); + +/*!\brief Restore main segmentation map mode and reset the class variables + * + * \param self Instance of the class + * \param cpi Encoder context + */ +void vp9_alt_ref_aq_unset_all(struct ALT_REF_AQ *const self, + struct VP9_COMP *const cpi); + +/*!\brief Destructor + * + * \param self Instance of the class + */ +void vp9_alt_ref_aq_destroy(struct ALT_REF_AQ *const self); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP9_ENCODER_VP9_ALT_REF_AQ_H_ diff --git a/libs/libvpx/vp9/encoder/vp9_aq_360.c b/libs/libvpx/vp9/encoder/vp9_aq_360.c index f8c187cc5d..dba017ffcc 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_360.c +++ b/libs/libvpx/vp9/encoder/vp9_aq_360.c @@ -13,6 +13,7 @@ #include "vpx_ports/mem.h" #include "vpx_ports/system_state.h" +#include "vp9/encoder/vp9_aq_360.h" #include "vp9/encoder/vp9_aq_variance.h" #include "vp9/common/vp9_seg_common.h" @@ -21,8 +22,8 @@ #include "vp9/encoder/vp9_rd.h" #include "vp9/encoder/vp9_segmentation.h" -static const double rate_ratio[MAX_SEGMENTS] = - {1.0, 0.75, 0.6, 0.5, 0.4, 0.3, 0.25}; +static const double rate_ratio[MAX_SEGMENTS] = { 1.0, 0.75, 0.6, 0.5, + 0.4, 0.3, 0.25 }; // Sets segment id 0 for the equatorial region, 1 for temperate region // and 2 for the polar regions @@ -40,7 +41,8 @@ void vp9_360aq_frame_setup(VP9_COMP *cpi) { struct segmentation *seg = &cm->seg; int i; - if (frame_is_intra_only(cm) || cm->error_resilient_mode) { + if (frame_is_intra_only(cm) || cpi->force_update_segmentation || + cm->error_resilient_mode) { vp9_enable_segmentation(seg); vp9_clearall_segfeatures(seg); diff --git a/libs/libvpx/vp9/encoder/vp9_aq_360.h b/libs/libvpx/vp9/encoder/vp9_aq_360.h index fb861cb052..b1b56561d8 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_360.h +++ b/libs/libvpx/vp9/encoder/vp9_aq_360.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_AQ_360_H_ #define VP9_ENCODER_VP9_AQ_360_H_ diff --git a/libs/libvpx/vp9/encoder/vp9_aq_complexity.c b/libs/libvpx/vp9/encoder/vp9_aq_complexity.c index 2d979ec70b..bd3812036c 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_complexity.c +++ b/libs/libvpx/vp9/encoder/vp9_aq_complexity.c @@ -19,21 +19,24 @@ #include "vp9/common/vp9_seg_common.h" #include "vp9/encoder/vp9_segmentation.h" -#define AQ_C_SEGMENTS 5 -#define DEFAULT_AQ2_SEG 3 // Neutral Q segment +#define AQ_C_SEGMENTS 5 +#define DEFAULT_AQ2_SEG 3 // Neutral Q segment #define AQ_C_STRENGTHS 3 -static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {1.75, 1.25, 1.05, 1.00, 0.90}, - {2.00, 1.50, 1.15, 1.00, 0.85}, - {2.50, 1.75, 1.25, 1.00, 0.80} }; -static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {0.15, 0.30, 0.55, 2.00, 100.0}, - {0.20, 0.40, 0.65, 2.00, 100.0}, - {0.25, 0.50, 0.75, 2.00, 100.0} }; -static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = - { {-4.0, -3.0, -2.0, 100.00, 100.0}, - {-3.5, -2.5, -1.5, 100.00, 100.0}, - {-3.0, -2.0, -1.0, 100.00, 100.0} }; +static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = { + { 1.75, 1.25, 1.05, 1.00, 0.90 }, + { 2.00, 1.50, 1.15, 1.00, 0.85 }, + { 2.50, 1.75, 1.25, 1.00, 0.80 } +}; +static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = { + { 0.15, 0.30, 0.55, 2.00, 100.0 }, + { 0.20, 0.40, 0.65, 2.00, 100.0 }, + { 0.25, 0.50, 0.75, 2.00, 100.0 } +}; +static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = { + { -4.0, -3.0, -2.0, 100.00, 100.0 }, + { -3.5, -2.5, -1.5, 100.00, 100.0 }, + { -3.0, -2.0, -1.0, 100.00, 100.0 } +}; static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) { // Approximate base quatizer (truncated to int) @@ -49,7 +52,7 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) { vpx_clear_system_state(); if (frame_is_intra_only(cm) || cm->error_resilient_mode || - cpi->refresh_alt_ref_frame || + cpi->refresh_alt_ref_frame || cpi->force_update_segmentation || (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { int segment; const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth); @@ -78,14 +81,11 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) { for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) { int qindex_delta; - if (segment == DEFAULT_AQ2_SEG) - continue; - - qindex_delta = - vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex, - aq_c_q_adj_factor[aq_strength][segment], - cm->bit_depth); + if (segment == DEFAULT_AQ2_SEG) continue; + qindex_delta = vp9_compute_qdelta_by_rate( + &cpi->rc, cm->frame_type, cm->base_qindex, + aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth); // For AQ complexity mode, we dont allow Q0 in a segment if the base // Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment @@ -125,26 +125,25 @@ void vp9_caq_select_segment(VP9_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, } else { // Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh). // It is converted to bits * 256 units. - const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) / - (bw * bh); + const int target_rate = + (cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh); double logvar; double low_var_thresh; const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth); vpx_clear_system_state(); - low_var_thresh = (cpi->oxcf.pass == 2) - ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) - : DEFAULT_LV_THRESH; + low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy, + MIN_DEFAULT_LV_THRESH) + : DEFAULT_LV_THRESH; vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col); logvar = vp9_log_block_var(cpi, mb, bs); - segment = AQ_C_SEGMENTS - 1; // Just in case no break out below. + segment = AQ_C_SEGMENTS - 1; // Just in case no break out below. for (i = 0; i < AQ_C_SEGMENTS; ++i) { // Test rate against a threshold value and variance against a threshold. // Increasing segment number (higher variance and complexity) = higher Q. - if ((projected_rate < - target_rate * aq_c_transitions[aq_strength][i]) && + if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) && (logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) { segment = i; break; diff --git a/libs/libvpx/vp9/encoder/vp9_aq_complexity.h b/libs/libvpx/vp9/encoder/vp9_aq_complexity.h index e9acb1ca50..a00d34e702 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_complexity.h +++ b/libs/libvpx/vp9/encoder/vp9_aq_complexity.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_AQ_COMPLEXITY_H_ #define VP9_ENCODER_VP9_AQ_COMPLEXITY_H_ @@ -23,8 +22,8 @@ struct macroblock; // Select a segment for the current Block. void vp9_caq_select_segment(struct VP9_COMP *cpi, struct macroblock *, - BLOCK_SIZE bs, - int mi_row, int mi_col, int projected_rate); + BLOCK_SIZE bs, int mi_row, int mi_col, + int projected_rate); // This function sets up a set of segments with delta Q values around // the baseline frame quantizer. diff --git a/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c b/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c index b7cfdf6bf4..072d92e4e9 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c +++ b/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c @@ -23,39 +23,28 @@ CYCLIC_REFRESH *vp9_cyclic_refresh_alloc(int mi_rows, int mi_cols) { size_t last_coded_q_map_size; - size_t consec_zero_mv_size; CYCLIC_REFRESH *const cr = vpx_calloc(1, sizeof(*cr)); - if (cr == NULL) - return NULL; + if (cr == NULL) return NULL; cr->map = vpx_calloc(mi_rows * mi_cols, sizeof(*cr->map)); if (cr->map == NULL) { - vpx_free(cr); + vp9_cyclic_refresh_free(cr); return NULL; } last_coded_q_map_size = mi_rows * mi_cols * sizeof(*cr->last_coded_q_map); cr->last_coded_q_map = vpx_malloc(last_coded_q_map_size); if (cr->last_coded_q_map == NULL) { - vpx_free(cr); + vp9_cyclic_refresh_free(cr); return NULL; } assert(MAXQ <= 255); memset(cr->last_coded_q_map, MAXQ, last_coded_q_map_size); - - consec_zero_mv_size = mi_rows * mi_cols * sizeof(*cr->consec_zero_mv); - cr->consec_zero_mv = vpx_malloc(consec_zero_mv_size); - if (cr->consec_zero_mv == NULL) { - vpx_free(cr); - return NULL; - } - memset(cr->consec_zero_mv, 0, consec_zero_mv_size); return cr; } void vp9_cyclic_refresh_free(CYCLIC_REFRESH *cr) { vpx_free(cr->map); vpx_free(cr->last_coded_q_map); - vpx_free(cr->consec_zero_mv); vpx_free(cr); } @@ -63,11 +52,8 @@ void vp9_cyclic_refresh_free(CYCLIC_REFRESH *cr) { // (lower-qp coding). Decision can be based on various factors, such as // size of the coding block (i.e., below min_block size rejected), coding // mode, and rate/distortion. -static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, - const MODE_INFO *mi, - int64_t rate, - int64_t dist, - int bsize) { +static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, const MODE_INFO *mi, + int64_t rate, int64_t dist, int bsize) { MV mv = mi->mv[0].as_mv; // Reject the block for lower-qp coding if projected distortion // is above the threshold, and any of the following is true: @@ -79,11 +65,9 @@ static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh || !is_inter_block(mi))) return CR_SEGMENT_ID_BASE; - else if (bsize >= BLOCK_16X16 && - rate < cr->thresh_rate_sb && - is_inter_block(mi) && - mi->mv[0].as_int == 0 && - cr->rate_boost_fac > 10) + else if (bsize >= BLOCK_16X16 && rate < cr->thresh_rate_sb && + is_inter_block(mi) && mi->mv[0].as_int == 0 && + cr->rate_boost_fac > 10) // More aggressive delta-q for bigger blocks with zero motion. return CR_SEGMENT_ID_BOOST2; else @@ -94,9 +78,8 @@ static int candidate_refresh_aq(const CYCLIC_REFRESH *cr, static int compute_deltaq(const VP9_COMP *cpi, int q, double rate_factor) { const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; const RATE_CONTROL *const rc = &cpi->rc; - int deltaq = vp9_compute_qdelta_by_rate(rc, cpi->common.frame_type, - q, rate_factor, - cpi->common.bit_depth); + int deltaq = vp9_compute_qdelta_by_rate(rc, cpi->common.frame_type, q, + rate_factor, cpi->common.bit_depth); if ((-deltaq) > cr->max_qdelta_perc * q / 100) { deltaq = -cr->max_qdelta_perc * q / 100; } @@ -119,17 +102,18 @@ int vp9_cyclic_refresh_estimate_bits_at_q(const VP9_COMP *cpi, double weight_segment1 = (double)cr->actual_num_seg1_blocks / num8x8bl; double weight_segment2 = (double)cr->actual_num_seg2_blocks / num8x8bl; // Take segment weighted average for estimated bits. - estimated_bits = (int)((1.0 - weight_segment1 - weight_segment2) * - vp9_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs, - correction_factor, cm->bit_depth) + - weight_segment1 * - vp9_estimate_bits_at_q(cm->frame_type, - cm->base_qindex + cr->qindex_delta[1], mbs, - correction_factor, cm->bit_depth) + - weight_segment2 * - vp9_estimate_bits_at_q(cm->frame_type, - cm->base_qindex + cr->qindex_delta[2], mbs, - correction_factor, cm->bit_depth)); + estimated_bits = + (int)((1.0 - weight_segment1 - weight_segment2) * + vp9_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs, + correction_factor, cm->bit_depth) + + weight_segment1 * + vp9_estimate_bits_at_q(cm->frame_type, + cm->base_qindex + cr->qindex_delta[1], + mbs, correction_factor, cm->bit_depth) + + weight_segment2 * + vp9_estimate_bits_at_q(cm->frame_type, + cm->base_qindex + cr->qindex_delta[2], + mbs, correction_factor, cm->bit_depth)); return estimated_bits; } @@ -147,30 +131,29 @@ int vp9_cyclic_refresh_rc_bits_per_mb(const VP9_COMP *cpi, int i, // Weight for segment prior to encoding: take the average of the target // number for the frame to be encoded and the actual from the previous frame. int target_refresh = cr->percent_refresh * cm->mi_rows * cm->mi_cols / 100; - double weight_segment = (double)((target_refresh + - cr->actual_num_seg1_blocks + cr->actual_num_seg2_blocks) >> 1) / + double weight_segment = + (double)((target_refresh + cr->actual_num_seg1_blocks + + cr->actual_num_seg2_blocks) >> + 1) / num8x8bl; // Compute delta-q corresponding to qindex i. int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta); // Take segment weighted average for bits per mb. bits_per_mb = (int)((1.0 - weight_segment) * - vp9_rc_bits_per_mb(cm->frame_type, i, correction_factor, cm->bit_depth) + - weight_segment * - vp9_rc_bits_per_mb(cm->frame_type, i + deltaq, correction_factor, - cm->bit_depth)); + vp9_rc_bits_per_mb(cm->frame_type, i, + correction_factor, cm->bit_depth) + + weight_segment * + vp9_rc_bits_per_mb(cm->frame_type, i + deltaq, + correction_factor, cm->bit_depth)); return bits_per_mb; } // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col), // check if we should reset the segment_id, and update the cyclic_refresh map // and segmentation map. -void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, - MODE_INFO *const mi, - int mi_row, int mi_col, - BLOCK_SIZE bsize, - int64_t rate, - int64_t dist, - int skip, +void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, MODE_INFO *const mi, + int mi_row, int mi_col, BLOCK_SIZE bsize, + int64_t rate, int64_t dist, int skip, struct macroblock_plane *const p) { const VP9_COMMON *const cm = &cpi->common; CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; @@ -182,29 +165,27 @@ void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, int refresh_this_block = candidate_refresh_aq(cr, mi, rate, dist, bsize); // Default is to not update the refresh map. int new_map_value = cr->map[block_index]; - int x = 0; int y = 0; + int x = 0; + int y = 0; int is_skin = 0; - if (refresh_this_block == 0 && - bsize <= BLOCK_16X16 && + if (refresh_this_block == 0 && bsize <= BLOCK_16X16 && cpi->use_skin_detection) { - is_skin = vp9_compute_skin_block(p[0].src.buf, - p[1].src.buf, - p[2].src.buf, - p[0].src.stride, - p[1].src.stride, - bsize); - if (is_skin) - refresh_this_block = 1; + is_skin = + vp9_compute_skin_block(p[0].src.buf, p[1].src.buf, p[2].src.buf, + p[0].src.stride, p[1].src.stride, bsize, 0, 0); + if (is_skin) refresh_this_block = 1; } + if (cpi->oxcf.rc_mode == VPX_VBR && mi->ref_frame[0] == GOLDEN_FRAME) + refresh_this_block = 0; + // If this block is labeled for refresh, check if we should reset the // segment_id. if (cyclic_refresh_segment_id_boosted(mi->segment_id)) { mi->segment_id = refresh_this_block; // Reset segment_id if it will be skipped. - if (skip) - mi->segment_id = CR_SEGMENT_ID_BASE; + if (skip) mi->segment_id = CR_SEGMENT_ID_BASE; } // Update the cyclic refresh map, to be used for setting segmentation map @@ -217,8 +198,7 @@ void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, // Else if it is accepted as candidate for refresh, and has not already // been refreshed (marked as 1) then mark it as a candidate for cleanup // for future time (marked as 0), otherwise don't update it. - if (cr->map[block_index] == 1) - new_map_value = 0; + if (cr->map[block_index] == 1) new_map_value = 0; } else { // Leave it marked as block that is not candidate for refresh. new_map_value = 1; @@ -240,7 +220,6 @@ void vp9_cyclic_refresh_update_sb_postencode(VP9_COMP *const cpi, BLOCK_SIZE bsize) { const VP9_COMMON *const cm = &cpi->common; CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; - MV mv = mi->mv[0].as_mv; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); @@ -256,23 +235,15 @@ void vp9_cyclic_refresh_update_sb_postencode(VP9_COMP *const cpi, // the map for this spatial location is not entirely correct. if ((!is_inter_block(mi) || !mi->skip) && mi->segment_id <= CR_SEGMENT_ID_BOOST2) { - cr->last_coded_q_map[map_offset] = clamp( - cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ); + cr->last_coded_q_map[map_offset] = + clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ); } else if (is_inter_block(mi) && mi->skip && mi->segment_id <= CR_SEGMENT_ID_BOOST2) { cr->last_coded_q_map[map_offset] = VPXMIN( - clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id], - 0, MAXQ), + clamp(cm->base_qindex + cr->qindex_delta[mi->segment_id], 0, MAXQ), cr->last_coded_q_map[map_offset]); - // Update the consecutive zero/low_mv count. - if (is_inter_block(mi) && (abs(mv.row) < 8 && abs(mv.col) < 8)) { - if (cr->consec_zero_mv[map_offset] < 255) - cr->consec_zero_mv[map_offset]++; - } else { - cr->consec_zero_mv[map_offset] = 0; } } - } } // Update the actual number of blocks that were applied the segment delta q. @@ -285,11 +256,12 @@ void vp9_cyclic_refresh_postencode(VP9_COMP *const cpi) { cr->actual_num_seg2_blocks = 0; for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { - if (cyclic_refresh_segment_id( - seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST1) + if (cyclic_refresh_segment_id(seg_map[mi_row * cm->mi_cols + mi_col]) == + CR_SEGMENT_ID_BOOST1) cr->actual_num_seg1_blocks++; else if (cyclic_refresh_segment_id( - seg_map[mi_row * cm->mi_cols + mi_col]) == CR_SEGMENT_ID_BOOST2) + seg_map[mi_row * cm->mi_cols + mi_col]) == + CR_SEGMENT_ID_BOOST2) cr->actual_num_seg2_blocks++; } } @@ -305,6 +277,7 @@ void vp9_cyclic_refresh_set_golden_update(VP9_COMP *const cpi) { rc->baseline_gf_interval = VPXMIN(4 * (100 / cr->percent_refresh), 40); else rc->baseline_gf_interval = 40; + if (cpi->oxcf.rc_mode == VPX_VBR) rc->baseline_gf_interval = 20; } // Update some encoding stats (from the just encoded frame). If this frame's @@ -317,42 +290,40 @@ void vp9_cyclic_refresh_check_golden_update(VP9_COMP *const cpi) { int mi_row, mi_col; double fraction_low = 0.0; int low_content_frame = 0; - MODE_INFO **mi = cm->mi_grid_visible; RATE_CONTROL *const rc = &cpi->rc; const int rows = cm->mi_rows, cols = cm->mi_cols; int cnt1 = 0, cnt2 = 0; int force_gf_refresh = 0; - + int flag_force_gf_high_motion = 0; for (mi_row = 0; mi_row < rows; mi_row++) { for (mi_col = 0; mi_col < cols; mi_col++) { - int16_t abs_mvr = mi[0]->mv[0].as_mv.row >= 0 ? - mi[0]->mv[0].as_mv.row : -1 * mi[0]->mv[0].as_mv.row; - int16_t abs_mvc = mi[0]->mv[0].as_mv.col >= 0 ? - mi[0]->mv[0].as_mv.col : -1 * mi[0]->mv[0].as_mv.col; - - // Calculate the motion of the background. - if (abs_mvr <= 16 && abs_mvc <= 16) { - cnt1++; - if (abs_mvr == 0 && abs_mvc == 0) - cnt2++; + if (flag_force_gf_high_motion == 1) { + int16_t abs_mvr = mi[0]->mv[0].as_mv.row >= 0 + ? mi[0]->mv[0].as_mv.row + : -1 * mi[0]->mv[0].as_mv.row; + int16_t abs_mvc = mi[0]->mv[0].as_mv.col >= 0 + ? mi[0]->mv[0].as_mv.col + : -1 * mi[0]->mv[0].as_mv.col; + // Calculate the motion of the background. + if (abs_mvr <= 16 && abs_mvc <= 16) { + cnt1++; + if (abs_mvr == 0 && abs_mvc == 0) cnt2++; + } } mi++; - // Accumulate low_content_frame. - if (cr->map[mi_row * cols + mi_col] < 1) - low_content_frame++; + if (cr->map[mi_row * cols + mi_col] < 1) low_content_frame++; } mi += 8; } - // For video conference clips, if the background has high motion in current // frame because of the camera movement, set this frame as the golden frame. // Use 70% and 5% as the thresholds for golden frame refreshing. // Also, force this frame as a golden update frame if this frame will change // the resolution (resize_pending != 0). if (cpi->resize_pending != 0 || - (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) { + (cnt1 * 100 > (70 * rows * cols) && cnt2 * 20 < cnt1)) { vp9_cyclic_refresh_set_golden_update(cpi); rc->frames_till_gf_update_due = rc->baseline_gf_interval; @@ -361,9 +332,7 @@ void vp9_cyclic_refresh_check_golden_update(VP9_COMP *const cpi) { cpi->refresh_golden_frame = 1; force_gf_refresh = 1; } - - fraction_low = - (double)low_content_frame / (rows * cols); + fraction_low = (double)low_content_frame / (rows * cols); // Update average. cr->low_content_avg = (fraction_low + 3 * cr->low_content_avg) / 4; if (!force_gf_refresh && cpi->refresh_golden_frame == 1) { @@ -407,13 +376,18 @@ static void cyclic_refresh_update_map(VP9_COMP *const cpi) { cr->target_num_seg_blocks = 0; if (cpi->oxcf.content != VP9E_CONTENT_SCREEN) { consec_zero_mv_thresh = 100; - if (cpi->noise_estimate.enabled && cpi->noise_estimate.level >= kMedium) - consec_zero_mv_thresh = 80; } qindex_thresh = cpi->oxcf.content == VP9E_CONTENT_SCREEN - ? vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex) - : vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex); + ? vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex) + : vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex); + // More aggressive settings for noisy content. + if (cpi->noise_estimate.enabled && cpi->noise_estimate.level >= kMedium) { + consec_zero_mv_thresh = 80; + qindex_thresh = + VPXMAX(vp9_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST1, cm->base_qindex), + 7 * cm->base_qindex >> 3); + } do { int sum_map = 0; // Get the mi_row/mi_col corresponding to superblock index i. @@ -438,7 +412,7 @@ static void cyclic_refresh_update_map(VP9_COMP *const cpi) { if (cr->map[bl_index2] == 0) { count_tot++; if (cr->last_coded_q_map[bl_index2] > qindex_thresh || - cr->consec_zero_mv[bl_index2] < consec_zero_mv_thresh) { + cpi->consec_zero_mv[bl_index2] < consec_zero_mv_thresh) { sum_map++; count_sel++; } @@ -463,8 +437,7 @@ static void cyclic_refresh_update_map(VP9_COMP *const cpi) { } while (cr->target_num_seg_blocks < block_count && i != cr->sb_index); cr->sb_index = i; cr->reduce_refresh = 0; - if (count_sel < (3 * count_tot) >> 2) - cr->reduce_refresh = 1; + if (count_sel<(3 * count_tot)>> 2) cr->reduce_refresh = 1; } // Set cyclic refresh parameters. @@ -473,37 +446,47 @@ void vp9_cyclic_refresh_update_parameters(VP9_COMP *const cpi) { const VP9_COMMON *const cm = &cpi->common; CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; cr->percent_refresh = 10; - if (cr->reduce_refresh) - cr->percent_refresh = 5; + if (cr->reduce_refresh) cr->percent_refresh = 5; cr->max_qdelta_perc = 50; cr->time_for_refresh = 0; + cr->motion_thresh = 32; + cr->rate_boost_fac = 15; // Use larger delta-qp (increase rate_ratio_qdelta) for first few (~4) // periods of the refresh cycle, after a key frame. // Account for larger interval on base layer for temporal layers. if (cr->percent_refresh > 0 && - rc->frames_since_key < (4 * cpi->svc.number_temporal_layers) * - (100 / cr->percent_refresh)) { + rc->frames_since_key < + (4 * cpi->svc.number_temporal_layers) * (100 / cr->percent_refresh)) { cr->rate_ratio_qdelta = 3.0; } else { cr->rate_ratio_qdelta = 2.0; - if (cpi->noise_estimate.enabled && cpi->noise_estimate.level >= kMedium) - // Reduce the delta-qp if the estimated source noise is above threshold. - cr->rate_ratio_qdelta = 1.5; + if (cpi->noise_estimate.enabled && cpi->noise_estimate.level >= kMedium) { + // Reduce the delta-qp if the estimated source noise is above threshold. + cr->rate_ratio_qdelta = 1.7; + cr->rate_boost_fac = 13; + } } // Adjust some parameters for low resolutions at low bitrates. - if (cm->width <= 352 && - cm->height <= 288 && - rc->avg_frame_bandwidth < 3400) { + if (cm->width <= 352 && cm->height <= 288 && rc->avg_frame_bandwidth < 3400) { cr->motion_thresh = 4; cr->rate_boost_fac = 10; - } else { - cr->motion_thresh = 32; - cr->rate_boost_fac = 15; } if (cpi->svc.spatial_layer_id > 0) { cr->motion_thresh = 4; cr->rate_boost_fac = 12; } + if (cpi->oxcf.rc_mode == VPX_VBR) { + // To be adjusted for VBR mode, e.g., based on gf period and boost. + // For now use smaller qp-delta (than CBR), no second boosted seg, and + // turn-off (no refresh) on golden refresh (since it's already boosted). + cr->percent_refresh = 10; + cr->rate_ratio_qdelta = 1.5; + cr->rate_boost_fac = 10; + if (cpi->refresh_golden_frame == 1) { + cr->percent_refresh = 0; + cr->rate_ratio_qdelta = 1.0; + } + } } // Setup cyclic background refresh: set delta q and segmentation map. @@ -516,12 +499,10 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) { // instead of completely shutting off at low bitrates. For now keep it on. // const int apply_cyclic_refresh = apply_cyclic_refresh_bitrate(cm, rc); const int apply_cyclic_refresh = 1; - if (cm->current_video_frame == 0) - cr->low_content_avg = 0.0; + if (cm->current_video_frame == 0) cr->low_content_avg = 0.0; // Don't apply refresh on key frame or temporal enhancement layer frames. - if (!apply_cyclic_refresh || - (cm->frame_type == KEY_FRAME) || - (cpi->svc.temporal_layer_id > 0)) { + if (!apply_cyclic_refresh || (cm->frame_type == KEY_FRAME) || + (cpi->force_update_segmentation) || (cpi->svc.temporal_layer_id > 0)) { // Set segmentation map to 0 and disable. unsigned char *const seg_map = cpi->segmentation_map; memset(seg_map, 0, cm->mi_rows * cm->mi_cols); @@ -529,8 +510,6 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) { if (cm->frame_type == KEY_FRAME) { memset(cr->last_coded_q_map, MAXQ, cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map)); - memset(cr->consec_zero_mv, 0, - cm->mi_rows * cm->mi_cols * sizeof(*cr->consec_zero_mv)); cr->sb_index = 0; } return; @@ -588,8 +567,7 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) { vp9_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta); // Reset if resoluton change has occurred. - if (cpi->resize_pending != 0) - vp9_cyclic_refresh_reset_resize(cpi); + if (cpi->resize_pending != 0) vp9_cyclic_refresh_reset_resize(cpi); // Update the segmentation and refresh map. cyclic_refresh_update_map(cpi); @@ -605,7 +583,6 @@ void vp9_cyclic_refresh_reset_resize(VP9_COMP *const cpi) { CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; memset(cr->map, 0, cm->mi_rows * cm->mi_cols); memset(cr->last_coded_q_map, MAXQ, cm->mi_rows * cm->mi_cols); - memset(cr->consec_zero_mv, 0, cm->mi_rows * cm->mi_cols); cr->sb_index = 0; cpi->refresh_golden_frame = 1; cpi->refresh_alt_ref_frame = 1; diff --git a/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h b/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h index 095b9283f9..a4be031295 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h +++ b/libs/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_ #define VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_ @@ -23,9 +22,9 @@ extern "C" { // The segment ids used in cyclic refresh: from base (no boost) to increasing // boost (higher delta-qp). -#define CR_SEGMENT_ID_BASE 0 -#define CR_SEGMENT_ID_BOOST1 1 -#define CR_SEGMENT_ID_BOOST2 2 +#define CR_SEGMENT_ID_BASE 0 +#define CR_SEGMENT_ID_BOOST1 1 +#define CR_SEGMENT_ID_BOOST2 2 // Maximum rate target ratio for setting segment delta-qp. #define CR_MAX_RATE_TARGET_RATIO 4.0 @@ -53,8 +52,6 @@ struct CYCLIC_REFRESH { signed char *map; // Map of the last q a block was coded at. uint8_t *last_coded_q_map; - // Count on how many consecutive times a block uses ZER0MV for encoding. - uint8_t *consec_zero_mv; // Thresholds applied to the projected rate/distortion of the coding block, // when deciding whether block should be refreshed. int64_t thresh_rate_sb; @@ -93,8 +90,8 @@ int vp9_cyclic_refresh_rc_bits_per_mb(const struct VP9_COMP *cpi, int i, // check if we should reset the segment_id, and update the cyclic_refresh map // and segmentation map. void vp9_cyclic_refresh_update_segment(struct VP9_COMP *const cpi, - MODE_INFO *const mi, - int mi_row, int mi_col, BLOCK_SIZE bsize, + MODE_INFO *const mi, int mi_row, + int mi_col, BLOCK_SIZE bsize, int64_t rate, int64_t dist, int skip, struct macroblock_plane *const p); diff --git a/libs/libvpx/vp9/encoder/vp9_aq_variance.c b/libs/libvpx/vp9/encoder/vp9_aq_variance.c index d8f7d07213..477f62ba5a 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_variance.c +++ b/libs/libvpx/vp9/encoder/vp9_aq_variance.c @@ -23,19 +23,19 @@ #define ENERGY_MIN (-4) #define ENERGY_MAX (1) -#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1) -#define ENERGY_IN_BOUNDS(energy)\ +#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1) +#define ENERGY_IN_BOUNDS(energy) \ assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX) -static const double rate_ratio[MAX_SEGMENTS] = - {2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0}; -static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4}; +static const double rate_ratio[MAX_SEGMENTS] = { 2.5, 2.0, 1.5, 1.0, + 0.75, 1.0, 1.0, 1.0 }; +static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 }; -#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN] +#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN] -DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = {0}; +DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 }; #if CONFIG_VP9_HIGHBITDEPTH -DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = {0}; +DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = { 0 }; #endif unsigned int vp9_vaq_segment_id(int energy) { @@ -49,7 +49,7 @@ void vp9_vaq_frame_setup(VP9_COMP *cpi) { int i; if (frame_is_intra_only(cm) || cm->error_resilient_mode || - cpi->refresh_alt_ref_frame || + cpi->refresh_alt_ref_frame || cpi->force_update_segmentation || (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { vp9_enable_segmentation(seg); vp9_clearall_segfeatures(seg); @@ -85,9 +85,9 @@ void vp9_vaq_frame_setup(VP9_COMP *cpi) { /* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions * of variance() and highbd_8_variance(). It should not. */ -static void aq_variance(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, unsigned int *sse, int *sum) { +static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int w, int h, unsigned int *sse, + int *sum) { int i, j; *sum = 0; @@ -106,9 +106,9 @@ static void aq_variance(const uint8_t *a, int a_stride, } #if CONFIG_VP9_HIGHBITDEPTH -static void aq_highbd_variance64(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint64_t *sse, uint64_t *sum) { +static void aq_highbd_variance64(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + uint64_t *sse, uint64_t *sum) { int i, j; uint16_t *a = CONVERT_TO_SHORTPTR(a8); @@ -127,9 +127,9 @@ static void aq_highbd_variance64(const uint8_t *a8, int a_stride, } } -static void aq_highbd_8_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, unsigned int *sse, int *sum) { +static void aq_highbd_8_variance(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + unsigned int *sse, int *sum) { uint64_t sse_long = 0; uint64_t sum_long = 0; aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); @@ -142,10 +142,10 @@ static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { MACROBLOCKD *xd = &x->e_mbd; unsigned int var, sse; - int right_overflow = (xd->mb_to_right_edge < 0) ? - ((-xd->mb_to_right_edge) >> 3) : 0; - int bottom_overflow = (xd->mb_to_bottom_edge < 0) ? - ((-xd->mb_to_bottom_edge) >> 3) : 0; + int right_overflow = + (xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0; + int bottom_overflow = + (xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0; if (right_overflow || bottom_overflow) { const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow; @@ -159,33 +159,30 @@ static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x, sse >>= 2 * (xd->bd - 8); avg >>= (xd->bd - 8); } else { - aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, - vp9_64_zeros, 0, bw, bh, &sse, &avg); + aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0, + bw, bh, &sse, &avg); } #else - aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, - vp9_64_zeros, 0, bw, bh, &sse, &avg); + aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0, + bw, bh, &sse, &avg); #endif // CONFIG_VP9_HIGHBITDEPTH - var = sse - (((int64_t)avg * avg) / (bw * bh)); - return (256 * var) / (bw * bh); + var = sse - (unsigned int)(((int64_t)avg * avg) / (bw * bh)); + return (unsigned int)(((uint64_t)256 * var) / (bw * bh)); } else { #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, - CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), - 0, &sse); + var = + cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride, + CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, &sse); } else { - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, + var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0, &sse); } #else - var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, - x->plane[0].src.stride, + var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0, &sse); #endif // CONFIG_VP9_HIGHBITDEPTH - return (256 * var) >> num_pels_log2_lookup[bs]; + return (unsigned int)(((uint64_t)256 * var) >> num_pels_log2_lookup[bs]); } } @@ -201,7 +198,7 @@ int vp9_block_energy(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) { double energy_midpoint; vpx_clear_system_state(); energy_midpoint = - (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT; + (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT; energy = vp9_log_block_var(cpi, x, bs) - energy_midpoint; return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX); } diff --git a/libs/libvpx/vp9/encoder/vp9_aq_variance.h b/libs/libvpx/vp9/encoder/vp9_aq_variance.h index a0effa3116..211a69f392 100644 --- a/libs/libvpx/vp9/encoder/vp9_aq_variance.h +++ b/libs/libvpx/vp9/encoder/vp9_aq_variance.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_AQ_VARIANCE_H_ #define VP9_ENCODER_VP9_AQ_VARIANCE_H_ diff --git a/libs/libvpx/vp9/encoder/vp9_bitstream.c b/libs/libvpx/vp9/encoder/vp9_bitstream.c index 5600ed4585..860da83330 100644 --- a/libs/libvpx/vp9/encoder/vp9_bitstream.c +++ b/libs/libvpx/vp9/encoder/vp9_bitstream.c @@ -36,14 +36,17 @@ #include "vp9/encoder/vp9_tokenize.h" static const struct vp9_token intra_mode_encodings[INTRA_MODES] = { - {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7}, - {62, 6}, {2, 2}}; + { 0, 1 }, { 6, 3 }, { 28, 5 }, { 30, 5 }, { 58, 6 }, + { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 } +}; static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] = - {{0, 1}, {2, 2}, {3, 2}}; -static const struct vp9_token partition_encodings[PARTITION_TYPES] = - {{0, 1}, {2, 2}, {6, 3}, {7, 3}}; -static const struct vp9_token inter_mode_encodings[INTER_MODES] = - {{2, 2}, {6, 3}, {0, 1}, {7, 3}}; + { { 0, 1 }, { 2, 2 }, { 3, 2 } }; +static const struct vp9_token partition_encodings[PARTITION_TYPES] = { + { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 } +}; +static const struct vp9_token inter_mode_encodings[INTER_MODES] = { + { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 } +}; static void write_intra_mode(vpx_writer *w, PREDICTION_MODE mode, const vpx_prob *probs) { @@ -57,15 +60,15 @@ static void write_inter_mode(vpx_writer *w, PREDICTION_MODE mode, &inter_mode_encodings[INTER_OFFSET(mode)]); } -static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, - int data, int max) { +static void encode_unsigned_max(struct vpx_write_bit_buffer *wb, int data, + int max) { vpx_wb_write_literal(wb, data, get_unsigned_bits(max)); } static void prob_diff_update(const vpx_tree_index *tree, vpx_prob probs[/*n - 1*/], - const unsigned int counts[/*n - 1*/], - int n, vpx_writer *w) { + const unsigned int counts[/*n - 1*/], int n, + vpx_writer *w) { int i; unsigned int branch_ct[32][2]; @@ -77,13 +80,13 @@ static void prob_diff_update(const vpx_tree_index *tree, vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]); } -static void write_selected_tx_size(const VP9_COMMON *cm, - const MACROBLOCKD *xd, vpx_writer *w) { +static void write_selected_tx_size(const VP9_COMMON *cm, const MACROBLOCKD *xd, + vpx_writer *w) { TX_SIZE tx_size = xd->mi[0]->tx_size; BLOCK_SIZE bsize = xd->mi[0]->sb_type; const TX_SIZE max_tx_size = max_txsize_lookup[bsize]; - const vpx_prob *const tx_probs = get_tx_probs2(max_tx_size, xd, - &cm->fc->tx_probs); + const vpx_prob *const tx_probs = + get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs); vpx_write(w, tx_size != TX_4X4, tx_probs[0]); if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) { vpx_write(w, tx_size != TX_8X8, tx_probs[1]); @@ -120,18 +123,18 @@ static void update_switchable_interp_probs(VP9_COMMON *cm, vpx_writer *w, counts->switchable_interp[j], SWITCHABLE_FILTERS, w); } -static void pack_mb_tokens(vpx_writer *w, - TOKENEXTRA **tp, const TOKENEXTRA *const stop, +static void pack_mb_tokens(vpx_writer *w, TOKENEXTRA **tp, + const TOKENEXTRA *const stop, vpx_bit_depth_t bit_depth) { const TOKENEXTRA *p; const vp9_extra_bit *const extra_bits = #if CONFIG_VP9_HIGHBITDEPTH - (bit_depth == VPX_BITS_12) ? vp9_extra_bits_high12 : - (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 : - vp9_extra_bits; + (bit_depth == VPX_BITS_12) + ? vp9_extra_bits_high12 + : (bit_depth == VPX_BITS_10) ? vp9_extra_bits_high10 : vp9_extra_bits; #else - vp9_extra_bits; - (void) bit_depth; + vp9_extra_bits; + (void)bit_depth; #endif // CONFIG_VP9_HIGHBITDEPTH for (p = *tp; p < stop && p->token != EOSB_TOKEN; ++p) { @@ -144,7 +147,7 @@ static void pack_mb_tokens(vpx_writer *w, vpx_write(w, 0, p->context_tree[1]); ++p; if (p == stop || p->token == EOSB_TOKEN) { - *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN); + *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN); return; } } @@ -182,7 +185,7 @@ static void pack_mb_tokens(vpx_writer *w, } } } - *tp = (TOKENEXTRA*)(uintptr_t)p + (p->token == EOSB_TOKEN); + *tp = (TOKENEXTRA *)(uintptr_t)p + (p->token == EOSB_TOKEN); } static void write_segment_id(vpx_writer *w, const struct segmentation *seg, @@ -203,7 +206,7 @@ static void write_ref_frames(const VP9_COMMON *cm, const MACROBLOCKD *xd, if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { assert(!is_compound); assert(mi->ref_frame[0] == - get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); + get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); } else { // does the feature use compound prediction or not // (if not specified at the frame/segment level) @@ -248,8 +251,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, const int pred_flag = mi->seg_id_predicted; vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd); vpx_write(w, pred_flag, pred_prob); - if (!pred_flag) - write_segment_id(w, seg, segment_id); + if (!pred_flag) write_segment_id(w, seg, segment_id); } else { write_segment_id(w, seg, segment_id); } @@ -293,7 +295,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi, } if (cm->interp_filter == SWITCHABLE) { - const int ctx = vp9_get_pred_context_switchable_interp(xd); + const int ctx = get_pred_context_switchable_interp(xd); vp9_write_token(w, vp9_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx], &switchable_interp_encodings[mi->interp_filter]); @@ -338,8 +340,7 @@ static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd, const MODE_INFO *const left_mi = xd->left_mi; const BLOCK_SIZE bsize = mi->sb_type; - if (seg->update_map) - write_segment_id(w, seg, mi->segment_id); + if (seg->update_map) write_segment_id(w, seg, mi->segment_id); write_skip(cm, xd, mi->segment_id, mi, w); @@ -367,8 +368,8 @@ static void write_mb_modes_kf(const VP9_COMMON *cm, const MACROBLOCKD *xd, static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, vpx_writer *w, TOKENEXTRA **tok, - const TOKENEXTRA *const tok_end, - int mi_row, int mi_col) { + const TOKENEXTRA *const tok_end, int mi_row, + int mi_col) { const VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; MODE_INFO *m; @@ -376,13 +377,12 @@ static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col); m = xd->mi[0]; - cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base + - (mi_row * cm->mi_cols + mi_col); + cpi->td.mb.mbmi_ext = + cpi->td.mb.mbmi_ext_base + (mi_row * cm->mi_cols + mi_col); - set_mi_row_col(xd, tile, - mi_row, num_8x8_blocks_high_lookup[m->sb_type], - mi_col, num_8x8_blocks_wide_lookup[m->sb_type], - cm->mi_rows, cm->mi_cols); + set_mi_row_col(xd, tile, mi_row, num_8x8_blocks_high_lookup[m->sb_type], + mi_col, num_8x8_blocks_wide_lookup[m->sb_type], cm->mi_rows, + cm->mi_cols); if (frame_is_intra_only(cm)) { write_mb_modes_kf(cm, xd, xd->mi, w); } else { @@ -394,9 +394,9 @@ static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile, } static void write_partition(const VP9_COMMON *const cm, - const MACROBLOCKD *const xd, - int hbs, int mi_row, int mi_col, - PARTITION_TYPE p, BLOCK_SIZE bsize, vpx_writer *w) { + const MACROBLOCKD *const xd, int hbs, int mi_row, + int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize, + vpx_writer *w) { const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); const vpx_prob *const probs = xd->partition_probs[ctx]; const int has_rows = (mi_row + hbs) < cm->mi_rows; @@ -415,10 +415,10 @@ static void write_partition(const VP9_COMMON *const cm, } } -static void write_modes_sb(VP9_COMP *cpi, - const TileInfo *const tile, vpx_writer *w, - TOKENEXTRA **tok, const TOKENEXTRA *const tok_end, - int mi_row, int mi_col, BLOCK_SIZE bsize) { +static void write_modes_sb(VP9_COMP *cpi, const TileInfo *const tile, + vpx_writer *w, TOKENEXTRA **tok, + const TOKENEXTRA *const tok_end, int mi_row, + int mi_col, BLOCK_SIZE bsize) { const VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; @@ -428,8 +428,7 @@ static void write_modes_sb(VP9_COMP *cpi, BLOCK_SIZE subsize; const MODE_INFO *m = NULL; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]; @@ -462,8 +461,7 @@ static void write_modes_sb(VP9_COMP *cpi, write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs, subsize); break; - default: - assert(0); + default: assert(0); } } @@ -473,9 +471,9 @@ static void write_modes_sb(VP9_COMP *cpi, update_partition_context(xd, mi_row, mi_col, subsize, bsize); } -static void write_modes(VP9_COMP *cpi, - const TileInfo *const tile, vpx_writer *w, - TOKENEXTRA **tok, const TOKENEXTRA *const tok_end) { +static void write_modes(VP9_COMP *cpi, const TileInfo *const tile, + vpx_writer *w, TOKENEXTRA **tok, + const TOKENEXTRA *const tok_end) { const VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; int mi_row, mi_col; @@ -487,8 +485,7 @@ static void write_modes(VP9_COMP *cpi, vp9_zero(xd->left_seg_context); for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; mi_col += MI_BLOCK_SIZE) - write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, - BLOCK_64X64); + write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64); } } @@ -496,7 +493,7 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, vp9_coeff_stats *coef_branch_ct, vp9_coeff_probs_model *coef_probs) { vp9_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size]; - unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = + unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] = cpi->common.counts.eob_branch[tx_size]; int i, j, k, l, m; @@ -507,19 +504,19 @@ static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size, vp9_tree_probs_from_distribution(vp9_coef_tree, coef_branch_ct[i][j][k][l], coef_counts[i][j][k][l]); - coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] - - coef_branch_ct[i][j][k][l][0][0]; + coef_branch_ct[i][j][k][l][0][1] = + eob_branch_ct[i][j][k][l] - coef_branch_ct[i][j][k][l][0][0]; for (m = 0; m < UNCONSTRAINED_NODES; ++m) - coef_probs[i][j][k][l][m] = get_binary_prob( - coef_branch_ct[i][j][k][l][m][0], - coef_branch_ct[i][j][k][l][m][1]); + coef_probs[i][j][k][l][m] = + get_binary_prob(coef_branch_ct[i][j][k][l][m][0], + coef_branch_ct[i][j][k][l][m][1]); } } } } } -static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, +static void update_coef_probs_common(vpx_writer *const bc, VP9_COMP *cpi, TX_SIZE tx_size, vp9_coeff_stats *frame_branch_ct, vp9_coeff_probs_model *new_coef_probs) { @@ -533,7 +530,7 @@ static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, case TWO_LOOP: { /* dry run to see if there is any update at all needed */ int savings = 0; - int update[2] = {0, 0}; + int update[2] = { 0, 0 }; for (i = 0; i < PLANE_TYPES; ++i) { for (j = 0; j < REF_TYPES; ++j) { for (k = 0; k < COEF_BANDS; ++k) { @@ -545,13 +542,12 @@ static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, int u = 0; if (t == PIVOT_NODE) s = vp9_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); + frame_branch_ct[i][j][k][l][0], oldp, &newp, upd, + stepsize); else s = vp9_prob_diff_update_savings_search( frame_branch_ct[i][j][k][l][t], oldp, &newp, upd); - if (s > 0 && newp != oldp) - u = 1; + if (s > 0 && newp != oldp) u = 1; if (u) savings += s - (int)(vp9_cost_zero(upd)); else @@ -583,14 +579,12 @@ static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, int u = 0; if (t == PIVOT_NODE) s = vp9_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); + frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd, + stepsize); else s = vp9_prob_diff_update_savings_search( - frame_branch_ct[i][j][k][l][t], - *oldp, &newp, upd); - if (s > 0 && newp != *oldp) - u = 1; + frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd); + if (s > 0 && newp != *oldp) u = 1; vpx_write(bc, u, upd); if (u) { /* send/use new probability */ @@ -621,16 +615,14 @@ static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, if (t == PIVOT_NODE) { s = vp9_prob_diff_update_savings_search_model( - frame_branch_ct[i][j][k][l][0], - old_coef_probs[i][j][k][l], &newp, upd, stepsize); + frame_branch_ct[i][j][k][l][0], *oldp, &newp, upd, + stepsize); } else { s = vp9_prob_diff_update_savings_search( - frame_branch_ct[i][j][k][l][t], - *oldp, &newp, upd); + frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd); } - if (s > 0 && newp != *oldp) - u = 1; + if (s > 0 && newp != *oldp) u = 1; updates += u; if (u == 0 && updates == 0) { noupdates_before_first++; @@ -659,12 +651,11 @@ static void update_coef_probs_common(vpx_writer* const bc, VP9_COMP *cpi, } return; } - default: - assert(0); + default: assert(0); } } -static void update_coef_probs(VP9_COMP *cpi, vpx_writer* w) { +static void update_coef_probs(VP9_COMP *cpi, vpx_writer *w) { const TX_MODE tx_mode = cpi->common.tx_mode; const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode]; TX_SIZE tx_size; @@ -675,8 +666,7 @@ static void update_coef_probs(VP9_COMP *cpi, vpx_writer* w) { (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) { vpx_write_bit(w, 0); } else { - build_tree_distribution(cpi, tx_size, frame_branch_ct, - frame_coef_probs); + build_tree_distribution(cpi, tx_size, frame_branch_ct, frame_coef_probs); update_coef_probs_common(w, cpi, tx_size, frame_branch_ct, frame_coef_probs); } @@ -748,8 +738,7 @@ static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, const struct segmentation *seg = &cm->seg; vpx_wb_write_bit(wb, seg->enabled); - if (!seg->enabled) - return; + if (!seg->enabled) return; // Segmentation map vpx_wb_write_bit(wb, seg->update_map); @@ -761,8 +750,7 @@ static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, const int prob = seg->tree_probs[i]; const int update = prob != MAX_PROB; vpx_wb_write_bit(wb, update); - if (update) - vpx_wb_write_literal(wb, prob, 8); + if (update) vpx_wb_write_literal(wb, prob, 8); } // Write out the chosen coding method. @@ -772,8 +760,7 @@ static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, const int prob = seg->pred_probs[i]; const int update = prob != MAX_PROB; vpx_wb_write_bit(wb, update); - if (update) - vpx_wb_write_literal(wb, prob, 8); + if (update) vpx_wb_write_literal(wb, prob, 8); } } } @@ -817,7 +804,6 @@ static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w, unsigned int ct_16x16p[TX_SIZES - 2][2]; unsigned int ct_32x32p[TX_SIZES - 1][2]; - for (i = 0; i < TX_SIZE_CONTEXTS; i++) { tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p); for (j = 0; j < TX_SIZES - 3; j++) @@ -879,19 +865,16 @@ static void write_tile_info(const VP9_COMMON *const cm, // columns ones = cm->log2_tile_cols - min_log2_tile_cols; - while (ones--) - vpx_wb_write_bit(wb, 1); + while (ones--) vpx_wb_write_bit(wb, 1); - if (cm->log2_tile_cols < max_log2_tile_cols) - vpx_wb_write_bit(wb, 0); + if (cm->log2_tile_cols < max_log2_tile_cols) vpx_wb_write_bit(wb, 0); // rows vpx_wb_write_bit(wb, cm->log2_tile_rows != 0); - if (cm->log2_tile_rows != 0) - vpx_wb_write_bit(wb, cm->log2_tile_rows != 1); + if (cm->log2_tile_rows != 0) vpx_wb_write_bit(wb, cm->log2_tile_rows != 1); } -static int get_refresh_mask(VP9_COMP *cpi) { +int vp9_get_refresh_mask(VP9_COMP *cpi) { if (vp9_preserve_existing_gf(cpi)) { // We have decided to preserve the previously existing golden frame as our // new ARF frame. However, in the short term we leave it in the GF slot and, @@ -935,15 +918,15 @@ static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; tok_end = cpi->tile_tok[tile_row][tile_col] + - cpi->tok_count[tile_row][tile_col]; + cpi->tok_count[tile_row][tile_col]; if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) vpx_start_encode(&residual_bc, data_ptr + total_size + 4); else vpx_start_encode(&residual_bc, data_ptr + total_size); - write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, - &residual_bc, &tok, tok_end); + write_modes(cpi, &cpi->tile_data[tile_idx].tile_info, &residual_bc, &tok, + tok_end); assert(tok == tok_end); vpx_stop_encode(&residual_bc); if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) { @@ -961,8 +944,8 @@ static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) { static void write_render_size(const VP9_COMMON *cm, struct vpx_write_bit_buffer *wb) { - const int scaling_active = cm->width != cm->render_width || - cm->height != cm->render_height; + const int scaling_active = + cm->width != cm->render_width || cm->height != cm->render_height; vpx_wb_write_bit(wb, scaling_active); if (scaling_active) { vpx_wb_write_literal(wb, cm->render_width - 1, 16); @@ -990,17 +973,17 @@ static void write_frame_size_with_refs(VP9_COMP *cpi, // Set "found" to 0 for temporal svc and for spatial svc key frame if (cpi->use_svc && ((cpi->svc.number_temporal_layers > 1 && - cpi->oxcf.rc_mode == VPX_CBR) || - (cpi->svc.number_spatial_layers > 1 && - cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) || - (is_two_pass_svc(cpi) && - cpi->svc.encode_empty_frame_state == ENCODING && - cpi->svc.layer_context[0].frames_from_key_frame < - cpi->svc.number_temporal_layers + 1))) { + cpi->oxcf.rc_mode == VPX_CBR) || + (cpi->svc.number_spatial_layers > 1 && + cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame) || + (is_two_pass_svc(cpi) && + cpi->svc.encode_empty_frame_state == ENCODING && + cpi->svc.layer_context[0].frames_from_key_frame < + cpi->svc.number_temporal_layers + 1))) { found = 0; } else if (cfg != NULL) { - found = cm->width == cfg->y_crop_width && - cm->height == cfg->y_crop_height; + found = + cm->width == cfg->y_crop_width && cm->height == cfg->y_crop_height; } vpx_wb_write_bit(wb, found); if (found) { @@ -1025,20 +1008,11 @@ static void write_sync_code(struct vpx_write_bit_buffer *wb) { static void write_profile(BITSTREAM_PROFILE profile, struct vpx_write_bit_buffer *wb) { switch (profile) { - case PROFILE_0: - vpx_wb_write_literal(wb, 0, 2); - break; - case PROFILE_1: - vpx_wb_write_literal(wb, 2, 2); - break; - case PROFILE_2: - vpx_wb_write_literal(wb, 1, 2); - break; - case PROFILE_3: - vpx_wb_write_literal(wb, 6, 3); - break; - default: - assert(0); + case PROFILE_0: vpx_wb_write_literal(wb, 0, 2); break; + case PROFILE_1: vpx_wb_write_literal(wb, 2, 2); break; + case PROFILE_2: vpx_wb_write_literal(wb, 1, 2); break; + case PROFILE_3: vpx_wb_write_literal(wb, 6, 3); break; + default: assert(0); } } @@ -1093,8 +1067,7 @@ static void write_uncompressed_header(VP9_COMP *cpi, // will change to show_frame flag to 0, then add an one byte frame with // show_existing_frame flag which tells the decoder which frame we want to // show. - if (!cm->show_frame) - vpx_wb_write_bit(wb, cm->intra_only); + if (!cm->show_frame) vpx_wb_write_bit(wb, cm->intra_only); if (!cm->error_resilient_mode) vpx_wb_write_literal(wb, cm->reset_frame_context, 2); @@ -1107,11 +1080,11 @@ static void write_uncompressed_header(VP9_COMP *cpi, write_bitdepth_colorspace_sampling(cm, wb); } - vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); + vpx_wb_write_literal(wb, vp9_get_refresh_mask(cpi), REF_FRAMES); write_frame_size(cm, wb); } else { MV_REFERENCE_FRAME ref_frame; - vpx_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES); + vpx_wb_write_literal(wb, vp9_get_refresh_mask(cpi), REF_FRAMES); for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { assert(get_ref_frame_map_idx(cpi, ref_frame) != INVALID_IDX); vpx_wb_write_literal(wb, get_ref_frame_map_idx(cpi, ref_frame), @@ -1222,7 +1195,7 @@ static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) { void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) { uint8_t *data = dest; size_t first_part_size, uncompressed_hdr_size; - struct vpx_write_bit_buffer wb = {data, 0}; + struct vpx_write_bit_buffer wb = { data, 0 }; struct vpx_write_bit_buffer saved_wb; write_uncompressed_header(cpi, &wb); diff --git a/libs/libvpx/vp9/encoder/vp9_bitstream.h b/libs/libvpx/vp9/encoder/vp9_bitstream.h index da6b414642..8c97d37f77 100644 --- a/libs/libvpx/vp9/encoder/vp9_bitstream.h +++ b/libs/libvpx/vp9/encoder/vp9_bitstream.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_BITSTREAM_H_ #define VP9_ENCODER_VP9_BITSTREAM_H_ @@ -18,15 +17,16 @@ extern "C" { #include "vp9/encoder/vp9_encoder.h" +int vp9_get_refresh_mask(VP9_COMP *cpi); + void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size); static INLINE int vp9_preserve_existing_gf(VP9_COMP *cpi) { return !cpi->multi_arf_allowed && cpi->refresh_golden_frame && cpi->rc.is_src_frame_alt_ref && - (!cpi->use_svc || // Add spatial svc base layer case here - (is_two_pass_svc(cpi) && - cpi->svc.spatial_layer_id == 0 && - cpi->svc.layer_context[0].gold_ref_idx >=0 && + (!cpi->use_svc || // Add spatial svc base layer case here + (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id == 0 && + cpi->svc.layer_context[0].gold_ref_idx >= 0 && cpi->oxcf.ss_enable_auto_arf[0])); } diff --git a/libs/libvpx/vp9/encoder/vp9_block.h b/libs/libvpx/vp9/encoder/vp9_block.h index 147743e8d8..1ea5fdf1ff 100644 --- a/libs/libvpx/vp9/encoder/vp9_block.h +++ b/libs/libvpx/vp9/encoder/vp9_block.h @@ -52,6 +52,13 @@ typedef struct { uint8_t mode_context[MAX_REF_FRAMES]; } MB_MODE_INFO_EXT; +typedef struct { + int col_min; + int col_max; + int row_min; + int row_max; +} MvLimits; + typedef struct macroblock MACROBLOCK; struct macroblock { struct macroblock_plane plane[MAX_MB_PLANE]; @@ -64,6 +71,8 @@ struct macroblock { int skip_recode; int skip_optimize; int q_index; + int block_qcoeff_opt; + int block_tx_domain; // The equivalent error at the current rdmult of one whole bit (not one // bitcost unit). @@ -77,8 +86,8 @@ struct macroblock { int rddiv; int rdmult; int mb_energy; - int * m_search_count_ptr; - int * ex_search_count_ptr; + int *m_search_count_ptr; + int *ex_search_count_ptr; // These are set to their default values at the beginning, and then adjusted // further in the encoding process. @@ -103,10 +112,7 @@ struct macroblock { // These define limits to motion vector components to prevent them // from extending outside the UMV borders - int mv_col_min; - int mv_col_max; - int mv_row_min; - int mv_row_max; + MvLimits mv_limits; // Notes transform blocks where no coefficents are coded. // Set during mode selection. Read during block encoding. @@ -130,9 +136,9 @@ struct macroblock { // skip forward transform and quantization uint8_t skip_txfm[MAX_MB_PLANE << 2]; - #define SKIP_TXFM_NONE 0 - #define SKIP_TXFM_AC_DC 1 - #define SKIP_TXFM_AC_ONLY 2 +#define SKIP_TXFM_NONE 0 +#define SKIP_TXFM_AC_DC 1 +#define SKIP_TXFM_AC_ONLY 2 int64_t bsse[MAX_MB_PLANE << 2]; @@ -145,6 +151,11 @@ struct macroblock { uint8_t sb_is_skin; + // Used to save the status of whether a block has a low variance in + // choose_partitioning. 0 for 64x64, 1~2 for 64x32, 3~4 for 32x64, 5~8 for + // 32x32, 9~24 for 16x16. + uint8_t variance_low[25]; + void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride); void (*itxm_add)(const tran_low_t *input, uint8_t *dest, int stride, int eob); #if CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp9/encoder/vp9_blockiness.c b/libs/libvpx/vp9/encoder/vp9_blockiness.c index 1a89ce4f0d..9ab57b57c7 100644 --- a/libs/libvpx/vp9/encoder/vp9_blockiness.c +++ b/libs/libvpx/vp9/encoder/vp9_blockiness.c @@ -63,9 +63,9 @@ static int blockiness_vertical(const uint8_t *s, int sp, const uint8_t *r, s_blockiness += horizontal_filter(s); r_blockiness += horizontal_filter(r); sum_0 += s[0]; - sum_sq_0 += s[0]*s[0]; + sum_sq_0 += s[0] * s[0]; sum_1 += s[-1]; - sum_sq_1 += s[-1]*s[-1]; + sum_sq_1 += s[-1] * s[-1]; } var_0 = variance(sum_0, sum_sq_0, size); var_1 = variance(sum_1, sum_sq_1, size); @@ -113,19 +113,19 @@ static int blockiness_horizontal(const uint8_t *s, int sp, const uint8_t *r, // This function returns the blockiness for the entire frame currently by // looking at all borders in steps of 4. double vp9_get_blockiness(const uint8_t *img1, int img1_pitch, - const uint8_t *img2, int img2_pitch, - int width, int height) { + const uint8_t *img2, int img2_pitch, int width, + int height) { double blockiness = 0; int i, j; vpx_clear_system_state(); - for (i = 0; i < height; i += 4, img1 += img1_pitch * 4, - img2 += img2_pitch * 4) { + for (i = 0; i < height; + i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) { for (j = 0; j < width; j += 4) { if (i > 0 && i < height && j > 0 && j < width) { - blockiness += blockiness_vertical(img1 + j, img1_pitch, - img2 + j, img2_pitch, 4); - blockiness += blockiness_horizontal(img1 + j, img1_pitch, - img2 + j, img2_pitch, 4); + blockiness += + blockiness_vertical(img1 + j, img1_pitch, img2 + j, img2_pitch, 4); + blockiness += blockiness_horizontal(img1 + j, img1_pitch, img2 + j, + img2_pitch, 4); } } } diff --git a/libs/libvpx/vp9/encoder/vp9_context_tree.c b/libs/libvpx/vp9/encoder/vp9_context_tree.c index 396ed3fe73..2f7e544332 100644 --- a/libs/libvpx/vp9/encoder/vp9_context_tree.c +++ b/libs/libvpx/vp9/encoder/vp9_context_tree.c @@ -12,10 +12,7 @@ #include "vp9/encoder/vp9_encoder.h" static const BLOCK_SIZE square[] = { - BLOCK_8X8, - BLOCK_16X16, - BLOCK_32X32, - BLOCK_64X64, + BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64, }; static void alloc_mode_context(VP9_COMMON *cm, int num_4x4_blk, @@ -25,8 +22,7 @@ static void alloc_mode_context(VP9_COMMON *cm, int num_4x4_blk, int i, k; ctx->num_4x4_blk = num_blk; - CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, - vpx_calloc(num_blk, sizeof(uint8_t))); + CHECK_MEM_ERROR(cm, ctx->zcoeff_blk, vpx_calloc(num_blk, sizeof(uint8_t))); for (i = 0; i < MAX_MB_PLANE; ++i) { for (k = 0; k < 3; ++k) { CHECK_MEM_ERROR(cm, ctx->coeff[i][k], @@ -37,10 +33,10 @@ static void alloc_mode_context(VP9_COMMON *cm, int num_4x4_blk, vpx_memalign(32, num_pix * sizeof(*ctx->dqcoeff[i][k]))); CHECK_MEM_ERROR(cm, ctx->eobs[i][k], vpx_memalign(32, num_blk * sizeof(*ctx->eobs[i][k]))); - ctx->coeff_pbuf[i][k] = ctx->coeff[i][k]; - ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k]; + ctx->coeff_pbuf[i][k] = ctx->coeff[i][k]; + ctx->qcoeff_pbuf[i][k] = ctx->qcoeff[i][k]; ctx->dqcoeff_pbuf[i][k] = ctx->dqcoeff[i][k]; - ctx->eobs_pbuf[i][k] = ctx->eobs[i][k]; + ctx->eobs_pbuf[i][k] = ctx->eobs[i][k]; } } } @@ -66,12 +62,12 @@ static void free_mode_context(PICK_MODE_CONTEXT *ctx) { static void alloc_tree_contexts(VP9_COMMON *cm, PC_TREE *tree, int num_4x4_blk) { alloc_mode_context(cm, num_4x4_blk, &tree->none); - alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[0]); - alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[0]); + alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]); + alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[0]); if (num_4x4_blk > 4) { - alloc_mode_context(cm, num_4x4_blk/2, &tree->horizontal[1]); - alloc_mode_context(cm, num_4x4_blk/2, &tree->vertical[1]); + alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[1]); + alloc_mode_context(cm, num_4x4_blk / 2, &tree->vertical[1]); } else { memset(&tree->horizontal[1], 0, sizeof(tree->horizontal[1])); memset(&tree->vertical[1], 0, sizeof(tree->vertical[1])); @@ -101,19 +97,18 @@ void vp9_setup_pc_tree(VP9_COMMON *cm, ThreadData *td) { int nodes; vpx_free(td->leaf_tree); - CHECK_MEM_ERROR(cm, td->leaf_tree, vpx_calloc(leaf_nodes, - sizeof(*td->leaf_tree))); + CHECK_MEM_ERROR(cm, td->leaf_tree, + vpx_calloc(leaf_nodes, sizeof(*td->leaf_tree))); vpx_free(td->pc_tree); - CHECK_MEM_ERROR(cm, td->pc_tree, vpx_calloc(tree_nodes, - sizeof(*td->pc_tree))); + CHECK_MEM_ERROR(cm, td->pc_tree, + vpx_calloc(tree_nodes, sizeof(*td->pc_tree))); this_pc = &td->pc_tree[0]; this_leaf = &td->leaf_tree[0]; // 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same // context so we only need to allocate 1 for each 8x8 block. - for (i = 0; i < leaf_nodes; ++i) - alloc_mode_context(cm, 1, &td->leaf_tree[i]); + for (i = 0; i < leaf_nodes; ++i) alloc_mode_context(cm, 1, &td->leaf_tree[i]); // Sets up all the leaf nodes in the tree. for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) { @@ -121,8 +116,7 @@ void vp9_setup_pc_tree(VP9_COMMON *cm, ThreadData *td) { tree->block_size = square[0]; alloc_tree_contexts(cm, tree, 4); tree->leaf_split[0] = this_leaf++; - for (j = 1; j < 4; j++) - tree->leaf_split[j] = tree->leaf_split[0]; + for (j = 1; j < 4; j++) tree->leaf_split[j] = tree->leaf_split[0]; } // Each node has 4 leaf nodes, fill each block_size level of the tree @@ -132,8 +126,7 @@ void vp9_setup_pc_tree(VP9_COMMON *cm, ThreadData *td) { PC_TREE *const tree = &td->pc_tree[pc_tree_index]; alloc_tree_contexts(cm, tree, 4 << (2 * square_index)); tree->block_size = square[square_index]; - for (j = 0; j < 4; j++) - tree->split[j] = this_pc++; + for (j = 0; j < 4; j++) tree->split[j] = this_pc++; ++pc_tree_index; } ++square_index; @@ -147,12 +140,10 @@ void vp9_free_pc_tree(ThreadData *td) { int i; // Set up all 4x4 mode contexts - for (i = 0; i < 64; ++i) - free_mode_context(&td->leaf_tree[i]); + for (i = 0; i < 64; ++i) free_mode_context(&td->leaf_tree[i]); // Sets up all the leaf nodes in the tree. - for (i = 0; i < tree_nodes; ++i) - free_tree_contexts(&td->pc_tree[i]); + for (i = 0; i < tree_nodes; ++i) free_tree_contexts(&td->pc_tree[i]); vpx_free(td->pc_tree); td->pc_tree = NULL; diff --git a/libs/libvpx/vp9/encoder/vp9_cost.c b/libs/libvpx/vp9/encoder/vp9_cost.c index c85f763221..81581a80c2 100644 --- a/libs/libvpx/vp9/encoder/vp9_cost.c +++ b/libs/libvpx/vp9/encoder/vp9_cost.c @@ -12,37 +12,36 @@ #include "vp9/encoder/vp9_cost.h" /* round(-log2(i/256.) * (1 << VP9_PROB_COST_SHIFT)) - Begins and ends with a bogus entry to satisfy use of prob=0 in the firstpass. - https://code.google.com/p/webm/issues/detail?id=1089 */ -const uint16_t vp9_prob_cost[257] = { - 4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, - 2260, 2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, - 1748, 1718, 1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, - 1449, 1429, 1409, 1390, 1371, 1353, 1335, 1318, 1301, 1284, 1268, 1252, - 1236, 1221, 1206, 1192, 1177, 1163, 1149, 1136, 1123, 1110, 1097, 1084, - 1072, 1059, 1047, 1036, 1024, 1013, 1001, 990, 979, 968, 958, 947, - 937, 927, 917, 907, 897, 887, 878, 868, 859, 850, 841, 832, - 823, 814, 806, 797, 789, 780, 772, 764, 756, 748, 740, 732, - 724, 717, 709, 702, 694, 687, 680, 673, 665, 658, 651, 644, - 637, 631, 624, 617, 611, 604, 598, 591, 585, 578, 572, 566, - 560, 554, 547, 541, 535, 530, 524, 518, 512, 506, 501, 495, - 489, 484, 478, 473, 467, 462, 456, 451, 446, 441, 435, 430, - 425, 420, 415, 410, 405, 400, 395, 390, 385, 380, 375, 371, - 366, 361, 356, 352, 347, 343, 338, 333, 329, 324, 320, 316, - 311, 307, 302, 298, 294, 289, 285, 281, 277, 273, 268, 264, - 260, 256, 252, 248, 244, 240, 236, 232, 228, 224, 220, 216, - 212, 209, 205, 201, 197, 194, 190, 186, 182, 179, 175, 171, - 168, 164, 161, 157, 153, 150, 146, 143, 139, 136, 132, 129, - 125, 122, 119, 115, 112, 109, 105, 102, 99, 95, 92, 89, - 86, 82, 79, 76, 73, 70, 66, 63, 60, 57, 54, 51, - 48, 45, 42, 38, 35, 32, 29, 26, 23, 20, 18, 15, - 12, 9, 6, 3, 3}; + Begins with a bogus entry for simpler addressing. */ +const uint16_t vp9_prob_cost[256] = { + 4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260, + 2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718, + 1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409, + 1390, 1371, 1353, 1335, 1318, 1301, 1284, 1268, 1252, 1236, 1221, 1206, 1192, + 1177, 1163, 1149, 1136, 1123, 1110, 1097, 1084, 1072, 1059, 1047, 1036, 1024, + 1013, 1001, 990, 979, 968, 958, 947, 937, 927, 917, 907, 897, 887, + 878, 868, 859, 850, 841, 832, 823, 814, 806, 797, 789, 780, 772, + 764, 756, 748, 740, 732, 724, 717, 709, 702, 694, 687, 680, 673, + 665, 658, 651, 644, 637, 631, 624, 617, 611, 604, 598, 591, 585, + 578, 572, 566, 560, 554, 547, 541, 535, 530, 524, 518, 512, 506, + 501, 495, 489, 484, 478, 473, 467, 462, 456, 451, 446, 441, 435, + 430, 425, 420, 415, 410, 405, 400, 395, 390, 385, 380, 375, 371, + 366, 361, 356, 352, 347, 343, 338, 333, 329, 324, 320, 316, 311, + 307, 302, 298, 294, 289, 285, 281, 277, 273, 268, 264, 260, 256, + 252, 248, 244, 240, 236, 232, 228, 224, 220, 216, 212, 209, 205, + 201, 197, 194, 190, 186, 182, 179, 175, 171, 168, 164, 161, 157, + 153, 150, 146, 143, 139, 136, 132, 129, 125, 122, 119, 115, 112, + 109, 105, 102, 99, 95, 92, 89, 86, 82, 79, 76, 73, 70, + 66, 63, 60, 57, 54, 51, 48, 45, 42, 38, 35, 32, 29, + 26, 23, 20, 18, 15, 12, 9, 6, 3 +}; -static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, - int i, int c) { +static void cost(int *costs, vpx_tree tree, const vpx_prob *probs, int i, + int c) { const vpx_prob prob = probs[i / 2]; int b; + assert(prob != 0); for (b = 0; b <= 1; ++b) { const int cc = c + vp9_cost_bit(prob, b); const vpx_tree_index ii = tree[i + b]; diff --git a/libs/libvpx/vp9/encoder/vp9_cost.h b/libs/libvpx/vp9/encoder/vp9_cost.h index 9831013b18..70a1a2e0e9 100644 --- a/libs/libvpx/vp9/encoder/vp9_cost.h +++ b/libs/libvpx/vp9/encoder/vp9_cost.h @@ -18,7 +18,7 @@ extern "C" { #endif -extern const uint16_t vp9_prob_cost[257]; +extern const uint16_t vp9_prob_cost[256]; // The factor to scale from cost in bits to cost in vp9_prob_cost units. #define VP9_PROB_COST_SHIFT 9 @@ -27,16 +27,15 @@ extern const uint16_t vp9_prob_cost[257]; #define vp9_cost_one(prob) vp9_cost_zero(256 - (prob)) -#define vp9_cost_bit(prob, bit) vp9_cost_zero((bit) ? 256 - (prob) \ - : (prob)) +#define vp9_cost_bit(prob, bit) vp9_cost_zero((bit) ? 256 - (prob) : (prob)) static INLINE unsigned int cost_branch256(const unsigned int ct[2], vpx_prob p) { return ct[0] * vp9_cost_zero(p) + ct[1] * vp9_cost_one(p); } -static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, - int bits, int len) { +static INLINE int treed_cost(vpx_tree tree, const vpx_prob *probs, int bits, + int len) { int cost = 0; vpx_tree_index i = 0; diff --git a/libs/libvpx/vp9/encoder/vp9_dct.c b/libs/libvpx/vp9/encoder/vp9_dct.c index f94540baa0..bb8c23fdb9 100644 --- a/libs/libvpx/vp9/encoder/vp9_dct.c +++ b/libs/libvpx/vp9/encoder/vp9_dct.c @@ -61,8 +61,8 @@ static void fdct8(const tran_low_t *input, tran_low_t *output) { x3 = s0 - s3; t0 = (x0 + x1) * cospi_16_64; t1 = (x0 - x1) * cospi_16_64; - t2 = x2 * cospi_24_64 + x3 * cospi_8_64; - t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; + t2 = x2 * cospi_24_64 + x3 * cospi_8_64; + t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; output[0] = (tran_low_t)fdct_round_shift(t0); output[2] = (tran_low_t)fdct_round_shift(t2); output[4] = (tran_low_t)fdct_round_shift(t1); @@ -81,10 +81,10 @@ static void fdct8(const tran_low_t *input, tran_low_t *output) { x3 = s7 + t3; // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; + t0 = x0 * cospi_28_64 + x3 * cospi_4_64; + t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; + t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; output[1] = (tran_low_t)fdct_round_shift(t0); output[3] = (tran_low_t)fdct_round_shift(t2); output[5] = (tran_low_t)fdct_round_shift(t1); @@ -105,11 +105,11 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) { input[3] = in[3] + in[12]; input[4] = in[4] + in[11]; input[5] = in[5] + in[10]; - input[6] = in[6] + in[ 9]; - input[7] = in[7] + in[ 8]; + input[6] = in[6] + in[9]; + input[7] = in[7] + in[8]; - step1[0] = in[7] - in[ 8]; - step1[1] = in[6] - in[ 9]; + step1[0] = in[7] - in[8]; + step1[1] = in[6] - in[9]; step1[2] = in[5] - in[10]; step1[3] = in[4] - in[11]; step1[4] = in[3] - in[12]; @@ -140,7 +140,7 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) { x3 = s0 - s3; t0 = (x0 + x1) * cospi_16_64; t1 = (x0 - x1) * cospi_16_64; - t2 = x3 * cospi_8_64 + x2 * cospi_24_64; + t2 = x3 * cospi_8_64 + x2 * cospi_24_64; t3 = x3 * cospi_24_64 - x2 * cospi_8_64; out[0] = (tran_low_t)fdct_round_shift(t0); out[4] = (tran_low_t)fdct_round_shift(t2); @@ -160,10 +160,10 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) { x3 = s7 + t3; // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; + t0 = x0 * cospi_28_64 + x3 * cospi_4_64; + t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; + t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; out[2] = (tran_low_t)fdct_round_shift(t0); out[6] = (tran_low_t)fdct_round_shift(t2); out[10] = (tran_low_t)fdct_round_shift(t1); @@ -191,12 +191,12 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) { step3[7] = step1[7] + step2[4]; // step 4 - temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; - temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; + temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; + temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; step2[1] = fdct_round_shift(temp1); step2[2] = fdct_round_shift(temp2); temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; - temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; + temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; step2[5] = fdct_round_shift(temp1); step2[6] = fdct_round_shift(temp2); @@ -211,23 +211,23 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) { step1[7] = step3[7] + step2[6]; // step 6 - temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; + temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64; out[1] = (tran_low_t)fdct_round_shift(temp1); out[9] = (tran_low_t)fdct_round_shift(temp2); temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64; - temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; + temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; out[5] = (tran_low_t)fdct_round_shift(temp1); out[13] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; + temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64; out[3] = (tran_low_t)fdct_round_shift(temp1); out[11] = (tran_low_t)fdct_round_shift(temp2); temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64; - temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; + temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; out[7] = (tran_low_t)fdct_round_shift(temp1); out[15] = (tran_low_t)fdct_round_shift(temp2); } @@ -285,14 +285,14 @@ static void fadst8(const tran_low_t *input, tran_low_t *output) { tran_high_t x7 = input[6]; // stage 1 - s0 = cospi_2_64 * x0 + cospi_30_64 * x1; - s1 = cospi_30_64 * x0 - cospi_2_64 * x1; + s0 = cospi_2_64 * x0 + cospi_30_64 * x1; + s1 = cospi_30_64 * x0 - cospi_2_64 * x1; s2 = cospi_10_64 * x2 + cospi_22_64 * x3; s3 = cospi_22_64 * x2 - cospi_10_64 * x3; s4 = cospi_18_64 * x4 + cospi_14_64 * x5; s5 = cospi_14_64 * x4 - cospi_18_64 * x5; - s6 = cospi_26_64 * x6 + cospi_6_64 * x7; - s7 = cospi_6_64 * x6 - cospi_26_64 * x7; + s6 = cospi_26_64 * x6 + cospi_6_64 * x7; + s7 = cospi_6_64 * x6 - cospi_26_64 * x7; x0 = fdct_round_shift(s0 + s4); x1 = fdct_round_shift(s1 + s5); @@ -308,10 +308,10 @@ static void fadst8(const tran_low_t *input, tran_low_t *output) { s1 = x1; s2 = x2; s3 = x3; - s4 = cospi_8_64 * x4 + cospi_24_64 * x5; - s5 = cospi_24_64 * x4 - cospi_8_64 * x5; - s6 = - cospi_24_64 * x6 + cospi_8_64 * x7; - s7 = cospi_8_64 * x6 + cospi_24_64 * x7; + s4 = cospi_8_64 * x4 + cospi_24_64 * x5; + s5 = cospi_24_64 * x4 - cospi_8_64 * x5; + s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; + s7 = cospi_8_64 * x6 + cospi_24_64 * x7; x0 = s0 + s2; x1 = s1 + s3; @@ -365,11 +365,11 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { tran_high_t x15 = input[14]; // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; s5 = x4 * cospi_23_64 - x5 * cospi_9_64; s6 = x6 * cospi_13_64 + x7 * cospi_19_64; s7 = x6 * cospi_19_64 - x7 * cospi_13_64; @@ -378,9 +378,9 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { s10 = x10 * cospi_21_64 + x11 * cospi_11_64; s11 = x10 * cospi_11_64 - x11 * cospi_21_64; s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; x0 = fdct_round_shift(s0 + s8); x1 = fdct_round_shift(s1 + s9); @@ -390,8 +390,8 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { x5 = fdct_round_shift(s5 + s13); x6 = fdct_round_shift(s6 + s14); x7 = fdct_round_shift(s7 + s15); - x8 = fdct_round_shift(s0 - s8); - x9 = fdct_round_shift(s1 - s9); + x8 = fdct_round_shift(s0 - s8); + x9 = fdct_round_shift(s1 - s9); x10 = fdct_round_shift(s2 - s10); x11 = fdct_round_shift(s3 - s11); x12 = fdct_round_shift(s4 - s12); @@ -408,14 +408,14 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { s5 = x5; s6 = x6; s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; + s8 = x8 * cospi_4_64 + x9 * cospi_28_64; + s9 = x8 * cospi_28_64 - x9 * cospi_4_64; + s10 = x10 * cospi_20_64 + x11 * cospi_12_64; + s11 = x10 * cospi_12_64 - x11 * cospi_20_64; + s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; + s13 = x12 * cospi_4_64 + x13 * cospi_28_64; + s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; + s15 = x14 * cospi_20_64 + x15 * cospi_12_64; x0 = s0 + s4; x1 = s1 + s5; @@ -439,18 +439,18 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { s1 = x1; s2 = x2; s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; + s4 = x4 * cospi_8_64 + x5 * cospi_24_64; s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; + s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; + s7 = x6 * cospi_8_64 + x7 * cospi_24_64; s8 = x8; s9 = x9; s10 = x10; s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; + s12 = x12 * cospi_8_64 + x13 * cospi_24_64; s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; + s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; + s15 = x14 * cospi_8_64 + x15 * cospi_24_64; x0 = s0 + s2; x1 = s1 + s3; @@ -470,13 +470,13 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { x15 = fdct_round_shift(s13 - s15); // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); + s2 = (-cospi_16_64) * (x2 + x3); s3 = cospi_16_64 * (x2 - x3); s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (- x6 + x7); + s7 = cospi_16_64 * (-x6 + x7); s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (- x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); + s11 = cospi_16_64 * (-x10 + x11); + s14 = (-cospi_16_64) * (x14 + x15); s15 = cospi_16_64 * (x14 - x15); x2 = fdct_round_shift(s2); @@ -507,28 +507,28 @@ static void fadst16(const tran_low_t *input, tran_low_t *output) { } static const transform_2d FHT_4[] = { - { fdct4, fdct4 }, // DCT_DCT = 0 - { fadst4, fdct4 }, // ADST_DCT = 1 - { fdct4, fadst4 }, // DCT_ADST = 2 - { fadst4, fadst4 } // ADST_ADST = 3 + { fdct4, fdct4 }, // DCT_DCT = 0 + { fadst4, fdct4 }, // ADST_DCT = 1 + { fdct4, fadst4 }, // DCT_ADST = 2 + { fadst4, fadst4 } // ADST_ADST = 3 }; static const transform_2d FHT_8[] = { - { fdct8, fdct8 }, // DCT_DCT = 0 - { fadst8, fdct8 }, // ADST_DCT = 1 - { fdct8, fadst8 }, // DCT_ADST = 2 - { fadst8, fadst8 } // ADST_ADST = 3 + { fdct8, fdct8 }, // DCT_DCT = 0 + { fadst8, fdct8 }, // ADST_DCT = 1 + { fdct8, fadst8 }, // DCT_ADST = 2 + { fadst8, fadst8 } // ADST_ADST = 3 }; static const transform_2d FHT_16[] = { - { fdct16, fdct16 }, // DCT_DCT = 0 - { fadst16, fdct16 }, // ADST_DCT = 1 - { fdct16, fadst16 }, // DCT_ADST = 2 - { fadst16, fadst16 } // ADST_ADST = 3 + { fdct16, fdct16 }, // DCT_DCT = 0 + { fadst16, fdct16 }, // ADST_DCT = 1 + { fdct16, fadst16 }, // DCT_ADST = 2 + { fadst16, fadst16 } // ADST_ADST = 3 }; -void vp9_fht4x4_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht4x4_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { if (tx_type == DCT_DCT) { vpx_fdct4x4_c(input, output, stride); } else { @@ -539,36 +539,29 @@ void vp9_fht4x4_c(const int16_t *input, tran_low_t *output, // Columns for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = input[j * stride + i] * 16; - if (i == 0 && temp_in[0]) - temp_in[0] += 1; + for (j = 0; j < 4; ++j) temp_in[j] = input[j * stride + i] * 16; + if (i == 0 && temp_in[0]) temp_in[0] += 1; ht.cols(temp_in, temp_out); - for (j = 0; j < 4; ++j) - out[j * 4 + i] = temp_out[j]; + for (j = 0; j < 4; ++j) out[j * 4 + i] = temp_out[j]; } // Rows for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j + i * 4]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j + i * 4]; ht.rows(temp_in, temp_out); - for (j = 0; j < 4; ++j) - output[j + i * 4] = (temp_out[j] + 1) >> 2; + for (j = 0; j < 4; ++j) output[j + i * 4] = (temp_out[j] + 1) >> 2; } } } void vp9_fdct8x8_quant_c(const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan, + const int16_t *iscan) { int eob = -1; int i, j; @@ -600,8 +593,8 @@ void vp9_fdct8x8_quant_c(const int16_t *input, int stride, x3 = s0 - s3; t0 = (x0 + x1) * cospi_16_64; t1 = (x0 - x1) * cospi_16_64; - t2 = x2 * cospi_24_64 + x3 * cospi_8_64; - t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; + t2 = x2 * cospi_24_64 + x3 * cospi_8_64; + t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; output[0 * 8] = (tran_low_t)fdct_round_shift(t0); output[2 * 8] = (tran_low_t)fdct_round_shift(t2); output[4 * 8] = (tran_low_t)fdct_round_shift(t1); @@ -620,10 +613,10 @@ void vp9_fdct8x8_quant_c(const int16_t *input, int stride, x3 = s7 + t3; // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; + t0 = x0 * cospi_28_64 + x3 * cospi_4_64; + t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; + t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; output[1 * 8] = (tran_low_t)fdct_round_shift(t0); output[3 * 8] = (tran_low_t)fdct_round_shift(t2); output[5 * 8] = (tran_low_t)fdct_round_shift(t1); @@ -636,8 +629,7 @@ void vp9_fdct8x8_quant_c(const int16_t *input, int stride, // Rows for (i = 0; i < 8; ++i) { fdct8(&intermediate[i * 8], &coeff_ptr[i * 8]); - for (j = 0; j < 8; ++j) - coeff_ptr[j + i * 8] /= 2; + for (j = 0; j < 8; ++j) coeff_ptr[j + i * 8] /= 2; } // TODO(jingning) Decide the need of these arguments after the @@ -664,15 +656,14 @@ void vp9_fdct8x8_quant_c(const int16_t *input, int stride, qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (tmp) - eob = i; + if (tmp) eob = i; } } *eob_ptr = eob + 1; } -void vp9_fht8x8_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht8x8_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { if (tx_type == DCT_DCT) { vpx_fdct8x8_c(input, output, stride); } else { @@ -683,17 +674,14 @@ void vp9_fht8x8_c(const int16_t *input, tran_low_t *output, // Columns for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = input[j * stride + i] * 4; + for (j = 0; j < 8; ++j) temp_in[j] = input[j * stride + i] * 4; ht.cols(temp_in, temp_out); - for (j = 0; j < 8; ++j) - out[j * 8 + i] = temp_out[j]; + for (j = 0; j < 8; ++j) out[j * 8 + i] = temp_out[j]; } // Rows for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j + i * 8]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j + i * 8]; ht.rows(temp_in, temp_out); for (j = 0; j < 8; ++j) output[j + i * 8] = (temp_out[j] + (temp_out[j] < 0)) >> 1; @@ -757,8 +745,8 @@ void vp9_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) { } } -void vp9_fht16x16_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht16x16_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { if (tx_type == DCT_DCT) { vpx_fdct16x16_c(input, output, stride); } else { @@ -769,8 +757,7 @@ void vp9_fht16x16_c(const int16_t *input, tran_low_t *output, // Columns for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = input[j * stride + i] * 4; + for (j = 0; j < 16; ++j) temp_in[j] = input[j * stride + i] * 4; ht.cols(temp_in, temp_out); for (j = 0; j < 16; ++j) out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2; @@ -778,23 +765,21 @@ void vp9_fht16x16_c(const int16_t *input, tran_low_t *output, // Rows for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j + i * 16]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j + i * 16]; ht.rows(temp_in, temp_out); - for (j = 0; j < 16; ++j) - output[j + i * 16] = temp_out[j]; + for (j = 0; j < 16; ++j) output[j + i * 16] = temp_out[j]; } } } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { vp9_fht4x4_c(input, output, stride, tx_type); } -void vp9_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { vp9_fht8x8_c(input, output, stride, tx_type); } @@ -803,8 +788,8 @@ void vp9_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output, vp9_fwht4x4_c(input, output, stride); } -void vp9_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_highbd_fht16x16_c(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { vp9_fht16x16_c(input, output, stride, tx_type); } #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp9/encoder/vp9_denoiser.c b/libs/libvpx/vp9/encoder/vp9_denoiser.c index e419cffd8f..1d9a6702df 100644 --- a/libs/libvpx/vp9/encoder/vp9_denoiser.c +++ b/libs/libvpx/vp9/encoder/vp9_denoiser.c @@ -21,12 +21,6 @@ #include "vp9/encoder/vp9_denoiser.h" #include "vp9/encoder/vp9_encoder.h" -/* The VP9 denoiser is similar to that of the VP8 denoiser. While - * choosing the motion vectors / reference frames, the denoiser is run, and if - * it did not modify the signal to much, the denoised block is copied to the - * signal. - */ - #ifdef OUTPUT_YUV_DENOISED static void make_grayscale(YV12_BUFFER_CONFIG *yuv); #endif @@ -49,16 +43,18 @@ static int noise_motion_thresh(BLOCK_SIZE bs, int increase_denoising) { } static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) { - return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 60 : 40); + return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 80 : 40); } static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising, int motion_magnitude) { - if (motion_magnitude > - noise_motion_thresh(bs, increase_denoising)) { - return 0; + if (motion_magnitude > noise_motion_thresh(bs, increase_denoising)) { + if (increase_denoising) + return (1 << num_pels_log2_lookup[bs]) << 2; + else + return 0; } else { - return (1 << num_pels_log2_lookup[bs]) * 20; + return (1 << num_pels_log2_lookup[bs]) << 4; } } @@ -70,18 +66,15 @@ static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) { // we might need to update the code for calculating 'total_adj' in // case the C code is not bit-exact with corresponding sse2 code. int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, - const uint8_t *mc_avg, - int mc_avg_stride, - uint8_t *avg, int avg_stride, - int increase_denoising, - BLOCK_SIZE bs, - int motion_magnitude) { + const uint8_t *mc_avg, int mc_avg_stride, + uint8_t *avg, int avg_stride, int increase_denoising, + BLOCK_SIZE bs, int motion_magnitude) { int r, c; const uint8_t *sig_start = sig; const uint8_t *mc_avg_start = mc_avg; uint8_t *avg_start = avg; int diff, adj, absdiff, delta; - int adj_val[] = {3, 4, 6}; + int adj_val[] = { 3, 4, 6 }; int total_adj = 0; int shift_inc = 1; @@ -108,15 +101,19 @@ int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, total_adj += diff; } else { switch (absdiff) { - case 4: case 5: case 6: case 7: - adj = adj_val[0]; - break; - case 8: case 9: case 10: case 11: - case 12: case 13: case 14: case 15: - adj = adj_val[1]; - break; - default: - adj = adj_val[2]; + case 4: + case 5: + case 6: + case 7: adj = adj_val[0]; break; + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: adj = adj_val[1]; break; + default: adj = adj_val[2]; } if (diff > 0) { avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj); @@ -138,14 +135,15 @@ int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, } // Otherwise, we try to dampen the filter if the delta is not too high. - delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) - >> num_pels_log2_lookup[bs]) + 1; + delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) >> + num_pels_log2_lookup[bs]) + + 1; if (delta >= delta_thresh(bs, increase_denoising)) { return COPY_BLOCK; } - mc_avg = mc_avg_start; + mc_avg = mc_avg_start; avg = avg_start; sig = sig_start; for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { @@ -181,43 +179,39 @@ int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, return COPY_BLOCK; } -static uint8_t *block_start(uint8_t *framebuf, int stride, - int mi_row, int mi_col) { - return framebuf + (stride * mi_row * 8) + (mi_col * 8); +static uint8_t *block_start(uint8_t *framebuf, int stride, int mi_row, + int mi_col) { + return framebuf + (stride * mi_row << 3) + (mi_col << 3); } -static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, - MACROBLOCK *mb, - BLOCK_SIZE bs, - int increase_denoising, - int mi_row, - int mi_col, - PICK_MODE_CONTEXT *ctx, - int motion_magnitude, - int is_skin, - int *zeromv_filter) { - int mv_col, mv_row; +static VP9_DENOISER_DECISION perform_motion_compensation( + VP9_DENOISER *denoiser, MACROBLOCK *mb, BLOCK_SIZE bs, + int increase_denoising, int mi_row, int mi_col, PICK_MODE_CONTEXT *ctx, + int motion_magnitude, int is_skin, int *zeromv_filter, int consec_zeromv) { int sse_diff = ctx->zeromv_sse - ctx->newmv_sse; MV_REFERENCE_FRAME frame; MACROBLOCKD *filter_mbd = &mb->e_mbd; MODE_INFO *mi = filter_mbd->mi[0]; MODE_INFO saved_mi; - int i, j; + int i; struct buf_2d saved_dst[MAX_MB_PLANE]; - struct buf_2d saved_pre[MAX_MB_PLANE][2]; // 2 pre buffers + struct buf_2d saved_pre[MAX_MB_PLANE]; - mv_col = ctx->best_sse_mv.as_mv.col; - mv_row = ctx->best_sse_mv.as_mv.row; frame = ctx->best_reference_frame; - saved_mi = *mi; - if (is_skin && motion_magnitude > 0) + if (is_skin && (motion_magnitude > 0 || consec_zeromv < 4)) return COPY_BLOCK; + + // Avoid denoising for small block (unless motion is small). + // Small blocks are selected in variance partition (before encoding) and + // will typically lie on moving areas. + if (denoiser->denoising_level < kDenHigh && motion_magnitude > 16 && + bs <= BLOCK_8X8) return COPY_BLOCK; // If the best reference frame uses inter-prediction and there is enough of a // difference in sum-squared-error, use it. - if (frame != INTRA_FRAME && + if (frame != INTRA_FRAME && ctx->newmv_sse != UINT_MAX && sse_diff > sse_diff_thresh(bs, increase_denoising, motion_magnitude)) { mi->ref_frame[0] = ctx->best_reference_frame; mi->mode = ctx->best_sse_inter_mode; @@ -228,7 +222,7 @@ static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, ctx->newmv_sse = ctx->zeromv_sse; // Bias to last reference. if (frame != LAST_FRAME && - ((ctx->zeromv_lastref_sse < (5 * ctx->zeromv_sse) >> 2) || + ((ctx->zeromv_lastref_sse<(5 * ctx->zeromv_sse)>> 2) || denoiser->denoising_level >= kDenHigh)) { frame = LAST_FRAME; ctx->newmv_sse = ctx->zeromv_lastref_sse; @@ -239,6 +233,9 @@ static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, ctx->best_sse_inter_mode = ZEROMV; ctx->best_sse_mv.as_int = 0; *zeromv_filter = 1; + if (denoiser->denoising_level > kDenMedium) { + motion_magnitude = 0; + } } if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) { @@ -246,8 +243,7 @@ static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, *mi = saved_mi; return COPY_BLOCK; } - if (motion_magnitude > - (noise_motion_thresh(bs, increase_denoising) << 3)) { + if (motion_magnitude > (noise_motion_thresh(bs, increase_denoising) << 3)) { // Restore everything to its original state *mi = saved_mi; return COPY_BLOCK; @@ -255,128 +251,131 @@ static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser, // We will restore these after motion compensation. for (i = 0; i < MAX_MB_PLANE; ++i) { - for (j = 0; j < 2; ++j) { - saved_pre[i][j] = filter_mbd->plane[i].pre[j]; - } + saved_pre[i] = filter_mbd->plane[i].pre[0]; saved_dst[i] = filter_mbd->plane[i].dst; } // Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser // struct. - for (j = 0; j < 2; ++j) { - filter_mbd->plane[0].pre[j].buf = - block_start(denoiser->running_avg_y[frame].y_buffer, - denoiser->running_avg_y[frame].y_stride, - mi_row, mi_col); - filter_mbd->plane[0].pre[j].stride = - denoiser->running_avg_y[frame].y_stride; - filter_mbd->plane[1].pre[j].buf = - block_start(denoiser->running_avg_y[frame].u_buffer, - denoiser->running_avg_y[frame].uv_stride, - mi_row, mi_col); - filter_mbd->plane[1].pre[j].stride = - denoiser->running_avg_y[frame].uv_stride; - filter_mbd->plane[2].pre[j].buf = - block_start(denoiser->running_avg_y[frame].v_buffer, - denoiser->running_avg_y[frame].uv_stride, - mi_row, mi_col); - filter_mbd->plane[2].pre[j].stride = - denoiser->running_avg_y[frame].uv_stride; - } + filter_mbd->plane[0].pre[0].buf = + block_start(denoiser->running_avg_y[frame].y_buffer, + denoiser->running_avg_y[frame].y_stride, mi_row, mi_col); + filter_mbd->plane[0].pre[0].stride = denoiser->running_avg_y[frame].y_stride; + filter_mbd->plane[1].pre[0].buf = + block_start(denoiser->running_avg_y[frame].u_buffer, + denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col); + filter_mbd->plane[1].pre[0].stride = denoiser->running_avg_y[frame].uv_stride; + filter_mbd->plane[2].pre[0].buf = + block_start(denoiser->running_avg_y[frame].v_buffer, + denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col); + filter_mbd->plane[2].pre[0].stride = denoiser->running_avg_y[frame].uv_stride; + filter_mbd->plane[0].dst.buf = block_start(denoiser->mc_running_avg_y.y_buffer, - denoiser->mc_running_avg_y.y_stride, - mi_row, mi_col); + denoiser->mc_running_avg_y.y_stride, mi_row, mi_col); filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride; filter_mbd->plane[1].dst.buf = block_start(denoiser->mc_running_avg_y.u_buffer, - denoiser->mc_running_avg_y.uv_stride, - mi_row, mi_col); + denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col); filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride; filter_mbd->plane[2].dst.buf = block_start(denoiser->mc_running_avg_y.v_buffer, - denoiser->mc_running_avg_y.uv_stride, - mi_row, mi_col); + denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col); filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride; - vp9_build_inter_predictors_sby(filter_mbd, mv_row, mv_col, bs); + vp9_build_inter_predictors_sby(filter_mbd, mi_row, mi_col, bs); // Restore everything to its original state *mi = saved_mi; for (i = 0; i < MAX_MB_PLANE; ++i) { - for (j = 0; j < 2; ++j) { - filter_mbd->plane[i].pre[j] = saved_pre[i][j]; - } + filter_mbd->plane[i].pre[0] = saved_pre[i]; filter_mbd->plane[i].dst = saved_dst[i]; } - mv_row = ctx->best_sse_mv.as_mv.row; - mv_col = ctx->best_sse_mv.as_mv.col; - return FILTER_BLOCK; } -void vp9_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb, - int mi_row, int mi_col, BLOCK_SIZE bs, - PICK_MODE_CONTEXT *ctx, +void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col, + BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx, VP9_DENOISER_DECISION *denoiser_decision) { int mv_col, mv_row; int motion_magnitude = 0; int zeromv_filter = 0; + VP9_DENOISER *denoiser = &cpi->denoiser; VP9_DENOISER_DECISION decision = COPY_BLOCK; YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME]; YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y; uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col); - uint8_t *mc_avg_start = block_start(mc_avg.y_buffer, mc_avg.y_stride, - mi_row, mi_col); + uint8_t *mc_avg_start = + block_start(mc_avg.y_buffer, mc_avg.y_stride, mi_row, mi_col); struct buf_2d src = mb->plane[0].src; int is_skin = 0; - - if (bs <= BLOCK_32X32 && denoiser->denoising_level >= kDenLow) { - is_skin = vp9_compute_skin_block(mb->plane[0].src.buf, - mb->plane[1].src.buf, - mb->plane[2].src.buf, - mb->plane[0].src.stride, - mb->plane[1].src.stride, - bs); - } - + int consec_zeromv = 0; mv_col = ctx->best_sse_mv.as_mv.col; mv_row = ctx->best_sse_mv.as_mv.row; motion_magnitude = mv_row * mv_row + mv_col * mv_col; - if (!is_skin && - denoiser->denoising_level == kDenHigh && - motion_magnitude < 16) { + + if (cpi->use_skin_detection && bs <= BLOCK_32X32 && + denoiser->denoising_level < kDenHigh) { + int motion_level = (motion_magnitude < 16) ? 0 : 1; + // If motion for current block is small/zero, compute consec_zeromv for + // skin detection (early exit in skin detection is done for large + // consec_zeromv when current block has small/zero motion). + consec_zeromv = 0; + if (motion_level == 0) { + VP9_COMMON *const cm = &cpi->common; + int j, i; + // Loop through the 8x8 sub-blocks. + const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; + const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); + const int block_index = mi_row * cm->mi_cols + mi_col; + consec_zeromv = 100; + for (i = 0; i < ymis; i++) { + for (j = 0; j < xmis; j++) { + int bl_index = block_index + i * cm->mi_cols + j; + consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index], consec_zeromv); + // No need to keep checking 8x8 blocks if any of the sub-blocks + // has small consec_zeromv (since threshold for no_skin based on + // zero/small motion in skin detection is high, i.e, > 4). + if (consec_zeromv < 4) { + i = ymis; + j = xmis; + } + } + } + } + // TODO(marpan): Compute skin detection over sub-blocks. + is_skin = vp9_compute_skin_block( + mb->plane[0].src.buf, mb->plane[1].src.buf, mb->plane[2].src.buf, + mb->plane[0].src.stride, mb->plane[1].src.stride, bs, consec_zeromv, + motion_level); + } + if (!is_skin && denoiser->denoising_level == kDenHigh) { denoiser->increase_denoising = 1; } else { denoiser->increase_denoising = 0; } if (denoiser->denoising_level >= kDenLow) - decision = perform_motion_compensation(denoiser, mb, bs, - denoiser->increase_denoising, - mi_row, mi_col, ctx, - motion_magnitude, - is_skin, - &zeromv_filter); + decision = perform_motion_compensation( + denoiser, mb, bs, denoiser->increase_denoising, mi_row, mi_col, ctx, + motion_magnitude, is_skin, &zeromv_filter, consec_zeromv); if (decision == FILTER_BLOCK) { - decision = vp9_denoiser_filter(src.buf, src.stride, - mc_avg_start, mc_avg.y_stride, - avg_start, avg.y_stride, - denoiser->increase_denoising, - bs, motion_magnitude); + decision = vp9_denoiser_filter( + src.buf, src.stride, mc_avg_start, mc_avg.y_stride, avg_start, + avg.y_stride, denoiser->increase_denoising, bs, motion_magnitude); } if (decision == FILTER_BLOCK) { - vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, - NULL, 0, NULL, 0, - num_4x4_blocks_wide_lookup[bs] << 2, + vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, NULL, 0, + NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2, num_4x4_blocks_high_lookup[bs] << 2); } else { // COPY_BLOCK - vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, - NULL, 0, NULL, 0, - num_4x4_blocks_wide_lookup[bs] << 2, + vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, NULL, 0, + NULL, 0, num_4x4_blocks_wide_lookup[bs] << 2, num_4x4_blocks_high_lookup[bs] << 2); } *denoiser_decision = decision; @@ -384,8 +383,8 @@ void vp9_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb, *denoiser_decision = FILTER_ZEROMV_BLOCK; } -static void copy_frame(YV12_BUFFER_CONFIG * const dest, - const YV12_BUFFER_CONFIG * const src) { +static void copy_frame(YV12_BUFFER_CONFIG *const dest, + const YV12_BUFFER_CONFIG *const src) { int r; const uint8_t *srcbuf = src->y_buffer; uint8_t *destbuf = dest->y_buffer; @@ -400,8 +399,8 @@ static void copy_frame(YV12_BUFFER_CONFIG * const dest, } } -static void swap_frame_buffer(YV12_BUFFER_CONFIG * const dest, - YV12_BUFFER_CONFIG * const src) { +static void swap_frame_buffer(YV12_BUFFER_CONFIG *const dest, + YV12_BUFFER_CONFIG *const src) { uint8_t *tmp_buf = dest->y_buffer; assert(dest->y_width == src->y_width); assert(dest->y_height == src->y_height); @@ -414,21 +413,20 @@ void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser, FRAME_TYPE frame_type, int refresh_alt_ref_frame, int refresh_golden_frame, - int refresh_last_frame, - int resized) { + int refresh_last_frame, int resized) { // Copy source into denoised reference buffers on KEY_FRAME or // if the just encoded frame was resized. - if (frame_type == KEY_FRAME || resized != 0) { + if (frame_type == KEY_FRAME || resized != 0 || denoiser->reset) { int i; // Start at 1 so as not to overwrite the INTRA_FRAME for (i = 1; i < MAX_REF_FRAMES; ++i) copy_frame(&denoiser->running_avg_y[i], &src); + denoiser->reset = 0; return; } // If more than one refresh occurs, must copy frame buffer. - if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame) - > 1) { + if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame) > 1) { if (refresh_alt_ref_frame) { copy_frame(&denoiser->running_avg_y[ALTREF_FRAME], &denoiser->running_avg_y[INTRA_FRAME]); @@ -461,17 +459,16 @@ void vp9_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx) { ctx->zeromv_sse = UINT_MAX; ctx->newmv_sse = UINT_MAX; ctx->zeromv_lastref_sse = UINT_MAX; + ctx->best_sse_mv.as_int = 0; } void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse, PREDICTION_MODE mode, PICK_MODE_CONTEXT *ctx) { - // TODO(tkopp): Use both MVs if possible if (mi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) { ctx->zeromv_sse = sse; ctx->best_zeromv_reference_frame = mi->ref_frame[0]; - if (mi->ref_frame[0] == LAST_FRAME) - ctx->zeromv_lastref_sse = sse; + if (mi->ref_frame[0] == LAST_FRAME) ctx->zeromv_lastref_sse = sse; } if (mi->mv[0].as_int != 0 && sse < ctx->newmv_sse) { @@ -482,8 +479,8 @@ void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse, } } -int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, - int ssx, int ssy, +int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, int ssx, + int ssy, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif @@ -508,8 +505,8 @@ int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, #endif } - fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, - ssx, ssy, + fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, ssx, + ssy, #if CONFIG_VP9_HIGHBITDEPTH use_highbitdepth, #endif @@ -519,8 +516,7 @@ int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, return 1; } - fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height, - ssx, ssy, + fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy, #if CONFIG_VP9_HIGHBITDEPTH use_highbitdepth, #endif @@ -535,15 +531,17 @@ int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, denoiser->increase_denoising = 0; denoiser->frame_buffer_initialized = 1; denoiser->denoising_level = kDenLow; + denoiser->prev_denoising_level = kDenLow; + denoiser->reset = 0; return 0; } void vp9_denoiser_free(VP9_DENOISER *denoiser) { int i; - denoiser->frame_buffer_initialized = 0; if (denoiser == NULL) { return; } + denoiser->frame_buffer_initialized = 0; for (i = 0; i < MAX_REF_FRAMES; ++i) { vpx_free_frame_buffer(&denoiser->running_avg_y[i]); } @@ -551,9 +549,14 @@ void vp9_denoiser_free(VP9_DENOISER *denoiser) { vpx_free_frame_buffer(&denoiser->last_source); } -void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, - int noise_level) { +void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level) { denoiser->denoising_level = noise_level; + if (denoiser->denoising_level > kDenLowLow && + denoiser->prev_denoising_level == kDenLowLow) + denoiser->reset = 1; + else + denoiser->reset = 0; + denoiser->prev_denoising_level = denoiser->denoising_level; } #ifdef OUTPUT_YUV_DENOISED diff --git a/libs/libvpx/vp9/encoder/vp9_denoiser.h b/libs/libvpx/vp9/encoder/vp9_denoiser.h index 9f13bd533e..fcfaa5051a 100644 --- a/libs/libvpx/vp9/encoder/vp9_denoiser.h +++ b/libs/libvpx/vp9/encoder/vp9_denoiser.h @@ -40,9 +40,23 @@ typedef struct vp9_denoiser { YV12_BUFFER_CONFIG last_source; int increase_denoising; int frame_buffer_initialized; + int reset; VP9_DENOISER_LEVEL denoising_level; + VP9_DENOISER_LEVEL prev_denoising_level; } VP9_DENOISER; +typedef struct { + int64_t zero_last_cost_orig; + int *ref_frame_cost; + int_mv (*frame_mv)[MAX_REF_FRAMES]; + int reuse_inter_pred; + TX_SIZE best_tx_size; + PREDICTION_MODE best_mode; + MV_REFERENCE_FRAME best_ref_frame; + INTERP_FILTER best_pred_filter; + uint8_t best_mode_skip_txfm; +} VP9_PICKMODE_CTX_DEN; + struct VP9_COMP; void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser, @@ -50,22 +64,20 @@ void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser, FRAME_TYPE frame_type, int refresh_alt_ref_frame, int refresh_golden_frame, - int refresh_last_frame, - int resized); + int refresh_last_frame, int resized); -void vp9_denoiser_denoise(VP9_DENOISER *denoiser, MACROBLOCK *mb, - int mi_row, int mi_col, BLOCK_SIZE bs, - PICK_MODE_CONTEXT *ctx , +void vp9_denoiser_denoise(struct VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, + int mi_col, BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx, VP9_DENOISER_DECISION *denoiser_decision); void vp9_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx); -void vp9_denoiser_update_frame_stats(MODE_INFO *mi, - unsigned int sse, PREDICTION_MODE mode, +void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse, + PREDICTION_MODE mode, PICK_MODE_CONTEXT *ctx); -int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, - int ssx, int ssy, +int vp9_denoiser_alloc(VP9_DENOISER *denoiser, int width, int height, int ssx, + int ssy, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif @@ -83,8 +95,7 @@ static INLINE int total_adj_strong_thresh(BLOCK_SIZE bs, void vp9_denoiser_free(VP9_DENOISER *denoiser); -void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, - int noise_level); +void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/encoder/vp9_encodeframe.c b/libs/libvpx/vp9/encoder/vp9_encodeframe.c index 4109c19a9c..335faca82b 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodeframe.c +++ b/libs/libvpx/vp9/encoder/vp9_encodeframe.c @@ -48,58 +48,53 @@ #include "vp9/encoder/vp9_segmentation.h" #include "vp9/encoder/vp9_tokenize.h" -static void encode_superblock(VP9_COMP *cpi, ThreadData * td, - TOKENEXTRA **t, int output_enabled, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx); +static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t, + int output_enabled, int mi_row, int mi_col, + BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx); // This is used as a reference when computing the source variance for the -// purposes of activity masking. +// purpose of activity masking. // Eventually this should be replaced by custom no-reference routines, // which will be faster. static const uint8_t VP9_VAR_OFFS[64] = { - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128 + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; #if CONFIG_VP9_HIGHBITDEPTH static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = { - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128, - 128, 128, 128, 128, 128, 128, 128, 128 + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }; static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = { - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, - 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4 + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, + 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4, 128 * 4 }; static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = { - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, - 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16 + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, + 128 * 16 }; #endif // CONFIG_VP9_HIGHBITDEPTH @@ -107,34 +102,35 @@ unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs) { unsigned int sse; - const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - VP9_VAR_OFFS, 0, &sse); + const unsigned int var = + cpi->fn_ptr[bs].vf(ref->buf, ref->stride, VP9_VAR_OFFS, 0, &sse); return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); } #if CONFIG_VP9_HIGHBITDEPTH -unsigned int vp9_high_get_sby_perpixel_variance( - VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) { +unsigned int vp9_high_get_sby_perpixel_variance(VP9_COMP *cpi, + const struct buf_2d *ref, + BLOCK_SIZE bs, int bd) { unsigned int var, sse; switch (bd) { case 10: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), - 0, &sse); + var = + cpi->fn_ptr[bs].vf(ref->buf, ref->stride, + CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10), 0, &sse); break; case 12: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), - 0, &sse); + var = + cpi->fn_ptr[bs].vf(ref->buf, ref->stride, + CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12), 0, &sse); break; case 8: default: - var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, - CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), - 0, &sse); + var = + cpi->fn_ptr[bs].vf(ref->buf, ref->stride, + CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8), 0, &sse); break; } - return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]); + return ROUND64_POWER_OF_TWO((int64_t)var, num_pels_log2_lookup[bs]); } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -154,11 +150,9 @@ static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi, } static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x, - int mi_row, - int mi_col) { - unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, - mi_row, mi_col, - BLOCK_64X64); + int mi_row, int mi_col) { + unsigned int var = get_sby_perpixel_diff_variance( + cpi, &x->plane[0].src, mi_row, mi_col, BLOCK_64X64); if (var < 8) return BLOCK_64X64; else if (var < 128) @@ -173,8 +167,7 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x, // pointers. static INLINE void set_mode_info_offsets(VP9_COMMON *const cm, MACROBLOCK *const x, - MACROBLOCKD *const xd, - int mi_row, + MACROBLOCKD *const xd, int mi_row, int mi_col) { const int idx_str = xd->mi_stride * mi_row + mi_col; xd->mi = cm->mi_grid_visible + idx_str; @@ -191,6 +184,7 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, const int mi_width = num_8x8_blocks_wide_lookup[bsize]; const int mi_height = num_8x8_blocks_high_lookup[bsize]; const struct segmentation *const seg = &cm->seg; + MvLimits *const mv_limits = &x->mv_limits; set_skip_context(xd, mi_row, mi_col); @@ -203,15 +197,15 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, // Set up limit values for MV components. // Mv beyond the range do not produce new/different prediction block. - x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND); - x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND); - x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND; - x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND; + mv_limits->row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND); + mv_limits->col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND); + mv_limits->row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND; + mv_limits->col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND; // Set up distance of MB to edge of frame in 1/8th pel units. assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1))); - set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, - cm->mi_rows, cm->mi_cols); + set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, cm->mi_rows, + cm->mi_cols); // Set up source buffers. vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col); @@ -222,10 +216,10 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, // Setup segment ID. if (seg->enabled) { - if (cpi->oxcf.aq_mode != VARIANCE_AQ && + if (cpi->oxcf.aq_mode != VARIANCE_AQ && cpi->oxcf.aq_mode != LOOKAHEAD_AQ && cpi->oxcf.aq_mode != EQUATOR360_AQ) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } vp9_init_plane_quantizers(cpi, x); @@ -243,20 +237,20 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile, static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { - const int block_width = num_8x8_blocks_wide_lookup[bsize]; - const int block_height = num_8x8_blocks_high_lookup[bsize]; + const int block_width = + VPXMIN(num_8x8_blocks_wide_lookup[bsize], cm->mi_cols - mi_col); + const int block_height = + VPXMIN(num_8x8_blocks_high_lookup[bsize], cm->mi_rows - mi_row); + const int mi_stride = xd->mi_stride; + MODE_INFO *const src_mi = xd->mi[0]; int i, j; + for (j = 0; j < block_height; ++j) - for (i = 0; i < block_width; ++i) { - if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols) - xd->mi[j * xd->mi_stride + i] = xd->mi[0]; - } + for (i = 0; i < block_width; ++i) xd->mi[j * mi_stride + i] = src_mi; } -static void set_block_size(VP9_COMP * const cpi, - MACROBLOCK *const x, - MACROBLOCKD *const xd, - int mi_row, int mi_col, +static void set_block_size(VP9_COMP *const cpi, MACROBLOCK *const x, + MACROBLOCKD *const xd, int mi_row, int mi_col, BLOCK_SIZE bsize) { if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) { set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col); @@ -318,38 +312,37 @@ static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) { node->part_variances = NULL; switch (bsize) { case BLOCK_64X64: { - v64x64 *vt = (v64x64 *) data; + v64x64 *vt = (v64x64 *)data; node->part_variances = &vt->part_variances; for (i = 0; i < 4; i++) node->split[i] = &vt->split[i].part_variances.none; break; } case BLOCK_32X32: { - v32x32 *vt = (v32x32 *) data; + v32x32 *vt = (v32x32 *)data; node->part_variances = &vt->part_variances; for (i = 0; i < 4; i++) node->split[i] = &vt->split[i].part_variances.none; break; } case BLOCK_16X16: { - v16x16 *vt = (v16x16 *) data; + v16x16 *vt = (v16x16 *)data; node->part_variances = &vt->part_variances; for (i = 0; i < 4; i++) node->split[i] = &vt->split[i].part_variances.none; break; } case BLOCK_8X8: { - v8x8 *vt = (v8x8 *) data; + v8x8 *vt = (v8x8 *)data; node->part_variances = &vt->part_variances; for (i = 0; i < 4; i++) node->split[i] = &vt->split[i].part_variances.none; break; } case BLOCK_4X4: { - v4x4 *vt = (v4x4 *) data; + v4x4 *vt = (v4x4 *)data; node->part_variances = &vt->part_variances; - for (i = 0; i < 4; i++) - node->split[i] = &vt->split[i]; + for (i = 0; i < 4; i++) node->split[i] = &vt->split[i]; break; } default: { @@ -367,8 +360,10 @@ static void fill_variance(int64_t s2, int64_t s, int c, var *v) { } static void get_variance(var *v) { - v->variance = (int)(256 * (v->sum_square_error - - ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count); + v->variance = + (int)(256 * (v->sum_square_error - + ((v->sum_error * v->sum_error) >> v->log2_count)) >> + v->log2_count); } static void sum_2_variances(const var *a, const var *b, var *r) { @@ -389,17 +384,12 @@ static void fill_variance_tree(void *data, BLOCK_SIZE bsize) { &node.part_variances->none); } -static int set_vt_partitioning(VP9_COMP *cpi, - MACROBLOCK *const x, - MACROBLOCKD *const xd, - void *data, - BLOCK_SIZE bsize, - int mi_row, - int mi_col, - int64_t threshold, - BLOCK_SIZE bsize_min, +static int set_vt_partitioning(VP9_COMP *cpi, MACROBLOCK *const x, + MACROBLOCKD *const xd, void *data, + BLOCK_SIZE bsize, int mi_row, int mi_col, + int64_t threshold, BLOCK_SIZE bsize_min, int force_split) { - VP9_COMMON * const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; variance_node vt; const int block_width = num_8x8_blocks_wide_lookup[bsize]; const int block_height = num_8x8_blocks_high_lookup[bsize]; @@ -407,16 +397,14 @@ static int set_vt_partitioning(VP9_COMP *cpi, assert(block_height == block_width); tree_to_node(data, bsize, &vt); - if (force_split == 1) - return 0; + if (force_split == 1) return 0; // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if // variance is below threshold, otherwise split will be selected. // No check for vert/horiz split as too few samples for variance. if (bsize == bsize_min) { // Variance already computed to set the force_split. - if (cm->frame_type == KEY_FRAME) - get_variance(&vt.part_variances->none); + if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none); if (mi_col + block_width / 2 < cm->mi_cols && mi_row + block_height / 2 < cm->mi_rows && vt.part_variances->none.variance < threshold) { @@ -426,12 +414,11 @@ static int set_vt_partitioning(VP9_COMP *cpi, return 0; } else if (bsize > bsize_min) { // Variance already computed to set the force_split. - if (cm->frame_type == KEY_FRAME) - get_variance(&vt.part_variances->none); + if (cm->frame_type == KEY_FRAME) get_variance(&vt.part_variances->none); // For key frame: take split for bsize above 32X32 or very high variance. if (cm->frame_type == KEY_FRAME && (bsize > BLOCK_32X32 || - vt.part_variances->none.variance > (threshold << 4))) { + vt.part_variances->none.variance > (threshold << 4))) { return 0; } // If variance is low, take the bsize (no split). @@ -482,8 +469,8 @@ static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) { VP9_COMMON *const cm = &cpi->common; const int is_key_frame = (cm->frame_type == KEY_FRAME); const int threshold_multiplier = is_key_frame ? 20 : 1; - int64_t threshold_base = (int64_t)(threshold_multiplier * - cpi->y_dequant[q][1]); + int64_t threshold_base = + (int64_t)(threshold_multiplier * cpi->y_dequant[q][1]); if (is_key_frame) { thresholds[0] = threshold_base; thresholds[1] = threshold_base >> 2; @@ -492,8 +479,8 @@ static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) { } else { // Increase base variance threshold based on estimated noise level. if (cpi->noise_estimate.enabled) { - NOISE_LEVEL noise_level = vp9_noise_estimate_extract_level( - &cpi->noise_estimate); + NOISE_LEVEL noise_level = + vp9_noise_estimate_extract_level(&cpi->noise_estimate); if (noise_level == kHigh) threshold_base = 3 * threshold_base; else if (noise_level == kMedium) @@ -501,16 +488,18 @@ static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) { else if (noise_level < kLow) threshold_base = (7 * threshold_base) >> 3; } + thresholds[0] = threshold_base; + thresholds[2] = threshold_base << cpi->oxcf.speed; if (cm->width <= 352 && cm->height <= 288) { thresholds[0] = threshold_base >> 3; thresholds[1] = threshold_base >> 1; thresholds[2] = threshold_base << 3; - } else { - thresholds[0] = threshold_base; + } else if (cm->width < 1280 && cm->height < 720) { thresholds[1] = (5 * threshold_base) >> 2; - if (cm->width >= 1920 && cm->height >= 1080) - thresholds[1] = (7 * threshold_base) >> 2; - thresholds[2] = threshold_base << cpi->oxcf.speed; + } else if (cm->width < 1920 && cm->height < 1080) { + thresholds[1] = threshold_base << 1; + } else { + thresholds[1] = (5 * threshold_base) >> 1; } } } @@ -532,8 +521,9 @@ void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q) { if (cm->width <= 352 && cm->height <= 288) cpi->vbp_threshold_sad = 10; else - cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ? - (cpi->y_dequant[q][1] << 1) : 1000; + cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 + ? (cpi->y_dequant[q][1] << 1) + : 1000; cpi->vbp_bsize_min = BLOCK_16X16; } cpi->vbp_threshold_minmax = 15 + (q >> 3); @@ -546,8 +536,7 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif - int pixels_wide, - int pixels_high) { + int pixels_wide, int pixels_high) { int k; int minmax_max = 0; int minmax_min = 255; @@ -561,22 +550,17 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d, #if CONFIG_VP9_HIGHBITDEPTH if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) { vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, - &min, &max); + d + y8_idx * dp + x8_idx, dp, &min, &max); } else { - vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, - &min, &max); + vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, + dp, &min, &max); } #else - vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, - d + y8_idx * dp + x8_idx, dp, + vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp, d + y8_idx * dp + x8_idx, dp, &min, &max); #endif - if ((max - min) > minmax_max) - minmax_max = (max - min); - if ((max - min) < minmax_min) - minmax_min = (max - min); + if ((max - min) > minmax_max) minmax_max = (max - min); + if ((max - min) < minmax_min) minmax_min = (max - min); } } return (minmax_max - minmax_min); @@ -587,8 +571,7 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif - int pixels_wide, - int pixels_high, + int pixels_wide, int pixels_high, int is_key_frame) { int k; for (k = 0; k < 4; k++) { @@ -606,13 +589,11 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d, d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp); } else { s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); + if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); } #else s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); + if (!is_key_frame) d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp); #endif sum = s_avg - d_avg; sse = sum * sum; @@ -626,8 +607,7 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d, #if CONFIG_VP9_HIGHBITDEPTH int highbd_flag, #endif - int pixels_wide, - int pixels_high, + int pixels_wide, int pixels_high, int is_key_frame) { int k; for (k = 0; k < 4; k++) { @@ -645,13 +625,11 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d, d_avg = vpx_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp); } else { s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); + if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); } #else s_avg = vpx_avg_8x8(s + y8_idx * sp + x8_idx, sp); - if (!is_key_frame) - d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); + if (!is_key_frame) d_avg = vpx_avg_8x8(d + y8_idx * dp + x8_idx, dp); #endif sum = s_avg - d_avg; sse = sum * sum; @@ -660,13 +638,157 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d, } } +#if !CONFIG_VP9_HIGHBITDEPTH +// Check if most of the superblock is skin content, and if so, force split to +// 32x32, and set x->sb_is_skin for use in mode selection. +static int skin_sb_split(VP9_COMP *cpi, MACROBLOCK *x, const int low_res, + int mi_row, int mi_col, int *force_split) { + VP9_COMMON *const cm = &cpi->common; + // Avoid checking superblocks on/near boundary and avoid low resolutions. + // Note superblock may still pick 64X64 if y_sad is very small + // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is. + if (!low_res && (mi_col >= 8 && mi_col + 8 < cm->mi_cols && mi_row >= 8 && + mi_row + 8 < cm->mi_rows)) { + int num_16x16_skin = 0; + int num_16x16_nonskin = 0; + uint8_t *ysignal = x->plane[0].src.buf; + uint8_t *usignal = x->plane[1].src.buf; + uint8_t *vsignal = x->plane[2].src.buf; + int sp = x->plane[0].src.stride; + int spuv = x->plane[1].src.stride; + const int block_index = mi_row * cm->mi_cols + mi_col; + const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; + const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); + // Loop through the 16x16 sub-blocks. + int i, j; + for (i = 0; i < ymis; i += 2) { + for (j = 0; j < xmis; j += 2) { + int bl_index = block_index + i * cm->mi_cols + j; + int bl_index1 = bl_index + 1; + int bl_index2 = bl_index + cm->mi_cols; + int bl_index3 = bl_index2 + 1; + int consec_zeromv = + VPXMIN(cpi->consec_zero_mv[bl_index], + VPXMIN(cpi->consec_zero_mv[bl_index1], + VPXMIN(cpi->consec_zero_mv[bl_index2], + cpi->consec_zero_mv[bl_index3]))); + int is_skin = vp9_compute_skin_block( + ysignal, usignal, vsignal, sp, spuv, BLOCK_16X16, consec_zeromv, 0); + num_16x16_skin += is_skin; + num_16x16_nonskin += (1 - is_skin); + if (num_16x16_nonskin > 3) { + // Exit loop if at least 4 of the 16x16 blocks are not skin. + i = ymis; + break; + } + ysignal += 16; + usignal += 8; + vsignal += 8; + } + ysignal += (sp << 4) - 64; + usignal += (spuv << 3) - 32; + vsignal += (spuv << 3) - 32; + } + if (num_16x16_skin > 12) { + *force_split = 1; + return 1; + } + } + return 0; +} +#endif + +static void set_low_temp_var_flag(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, + v64x64 *vt, int64_t thresholds[], + MV_REFERENCE_FRAME ref_frame_partition, + int mi_col, int mi_row) { + int i, j; + VP9_COMMON *const cm = &cpi->common; + const int mv_thr = cm->width > 640 ? 8 : 4; + // Check temporal variance for bsize >= 16x16, if LAST_FRAME was selected and + // int_pro mv is small. If the temporal variance is small set the flag + // variance_low for the block. The variance threshold can be adjusted, the + // higher the more aggressive. + if (ref_frame_partition == LAST_FRAME && + (cpi->sf.short_circuit_low_temp_var == 1 || + (xd->mi[0]->mv[0].as_mv.col < mv_thr && + xd->mi[0]->mv[0].as_mv.col > -mv_thr && + xd->mi[0]->mv[0].as_mv.row < mv_thr && + xd->mi[0]->mv[0].as_mv.row > -mv_thr))) { + if (xd->mi[0]->sb_type == BLOCK_64X64) { + if ((vt->part_variances).none.variance < (thresholds[0] >> 1)) + x->variance_low[0] = 1; + } else if (xd->mi[0]->sb_type == BLOCK_64X32) { + for (i = 0; i < 2; i++) { + if (vt->part_variances.horz[i].variance < (thresholds[0] >> 2)) + x->variance_low[i + 1] = 1; + } + } else if (xd->mi[0]->sb_type == BLOCK_32X64) { + for (i = 0; i < 2; i++) { + if (vt->part_variances.vert[i].variance < (thresholds[0] >> 2)) + x->variance_low[i + 3] = 1; + } + } else { + for (i = 0; i < 4; i++) { + const int idx[4][2] = { { 0, 0 }, { 0, 4 }, { 4, 0 }, { 4, 4 } }; + const int idx_str = + cm->mi_stride * (mi_row + idx[i][0]) + mi_col + idx[i][1]; + MODE_INFO **this_mi = cm->mi_grid_visible + idx_str; + + if (cm->mi_cols <= mi_col + idx[i][1] || + cm->mi_rows <= mi_row + idx[i][0]) + continue; + + if ((*this_mi)->sb_type == BLOCK_32X32) { + if (vt->split[i].part_variances.none.variance < (thresholds[1] >> 1)) + x->variance_low[i + 5] = 1; + } else if (cpi->sf.short_circuit_low_temp_var == 2) { + // For 32x16 and 16x32 blocks, the flag is set on each 16x16 block + // inside. + if ((*this_mi)->sb_type == BLOCK_16X16 || + (*this_mi)->sb_type == BLOCK_32X16 || + (*this_mi)->sb_type == BLOCK_16X32) { + for (j = 0; j < 4; j++) { + if (vt->split[i].split[j].part_variances.none.variance < + (thresholds[2] >> 8)) + x->variance_low[(i << 2) + j + 9] = 1; + } + } + } + } + } + } +} + +static void chroma_check(VP9_COMP *cpi, MACROBLOCK *x, int bsize, + unsigned int y_sad, int is_key_frame) { + int i; + MACROBLOCKD *xd = &x->e_mbd; + if (is_key_frame) return; + + for (i = 1; i <= 2; ++i) { + unsigned int uv_sad = UINT_MAX; + struct macroblock_plane *p = &x->plane[i]; + struct macroblockd_plane *pd = &xd->plane[i]; + const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); + + if (bs != BLOCK_INVALID) + uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, pd->dst.buf, + pd->dst.stride); + + // TODO(marpan): Investigate if we should lower this threshold if + // superblock is detected as skin. + x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2); + } +} + // This function chooses partitioning based on the variance between source and // reconstructed last, where variance is computed for down-sampled inputs. -static int choose_partitioning(VP9_COMP *cpi, - const TileInfo *const tile, - MACROBLOCK *x, - int mi_row, int mi_col) { - VP9_COMMON * const cm = &cpi->common; +static int choose_partitioning(VP9_COMP *cpi, const TileInfo *const tile, + MACROBLOCK *x, int mi_row, int mi_col) { + VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; int i, j, k, m; v64x64 vt; @@ -678,38 +800,39 @@ static int choose_partitioning(VP9_COMP *cpi, const uint8_t *d; int sp; int dp; + unsigned int y_sad = UINT_MAX; + BLOCK_SIZE bsize = BLOCK_64X64; + // Ref frame used in partitioning. + MV_REFERENCE_FRAME ref_frame_partition = LAST_FRAME; int pixels_wide = 64, pixels_high = 64; - int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1], - cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]}; + int64_t thresholds[4] = { cpi->vbp_thresholds[0], cpi->vbp_thresholds[1], + cpi->vbp_thresholds[2], cpi->vbp_thresholds[3] }; // For the variance computation under SVC mode, we treat the frame as key if // the reference (base layer frame) is key frame (i.e., is_key_frame == 1). - const int is_key_frame = (cm->frame_type == KEY_FRAME || - (is_one_pass_cbr_svc(cpi) && - cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)); + const int is_key_frame = + (cm->frame_type == KEY_FRAME || + (is_one_pass_cbr_svc(cpi) && + cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)); // Always use 4x4 partition for key frame. const int use_4x4_partition = cm->frame_type == KEY_FRAME; const int low_res = (cm->width <= 352 && cm->height <= 288); int variance4x4downsample[16]; + int segment_id; - int segment_id = CR_SEGMENT_ID_BASE; + set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); + segment_id = xd->mi[0]->segment_id; if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map : - cm->last_frame_seg_map; - segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col); - if (cyclic_refresh_segment_id_boosted(segment_id)) { int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex); set_vbp_thresholds(cpi, thresholds, q); } } - set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64); + memset(x->variance_low, 0, sizeof(x->variance_low)); - if (xd->mb_to_right_edge < 0) - pixels_wide += (xd->mb_to_right_edge >> 3); - if (xd->mb_to_bottom_edge < 0) - pixels_high += (xd->mb_to_bottom_edge >> 3); + if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3); + if (xd->mb_to_bottom_edge < 0) pixels_high += (xd->mb_to_bottom_edge >> 3); s = x->plane[0].src.buf; sp = x->plane[0].src.stride; @@ -723,13 +846,12 @@ static int choose_partitioning(VP9_COMP *cpi, // that the temporal reference frame will always be of type LAST_FRAME. // TODO(marpan): If that assumption is broken, we need to revisit this code. MODE_INFO *mi = xd->mi[0]; - unsigned int uv_sad; const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); const YV12_BUFFER_CONFIG *yv12_g = NULL; - unsigned int y_sad, y_sad_g; - const BLOCK_SIZE bsize = BLOCK_32X32 - + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows); + unsigned int y_sad_g, y_sad_thr; + bsize = BLOCK_32X32 + (mi_col + 4 < cm->mi_cols) * 2 + + (mi_row + 4 < cm->mi_rows); assert(yv12 != NULL); @@ -739,14 +861,12 @@ static int choose_partitioning(VP9_COMP *cpi, yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME); } - if (yv12_g && yv12_g != yv12 && - (cpi->ref_frame_flags & VP9_GOLD_FLAG)) { + if (yv12_g && yv12_g != yv12 && (cpi->ref_frame_flags & VP9_GOLD_FLAG)) { vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col, &cm->frame_refs[GOLDEN_FRAME - 1].sf); - y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, - x->plane[0].src.stride, - xd->plane[0].pre[0].buf, - xd->plane[0].pre[0].stride); + y_sad_g = cpi->fn_ptr[bsize].sdf( + x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf, + xd->plane[0].pre[0].stride); } else { y_sad_g = UINT_MAX; } @@ -760,90 +880,43 @@ static int choose_partitioning(VP9_COMP *cpi, mi->interp_filter = BILINEAR; y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col); - if (y_sad_g < y_sad) { + // Pick ref frame for partitioning, bias last frame when y_sad_g and y_sad + // are close if short_circuit_low_temp_var is on. + y_sad_thr = cpi->sf.short_circuit_low_temp_var ? (y_sad * 7) >> 3 : y_sad; + if (y_sad_g < y_sad_thr) { vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col, &cm->frame_refs[GOLDEN_FRAME - 1].sf); mi->ref_frame[0] = GOLDEN_FRAME; mi->mv[0].as_int = 0; y_sad = y_sad_g; + ref_frame_partition = GOLDEN_FRAME; } else { x->pred_mv[LAST_FRAME] = mi->mv[0].as_mv; + ref_frame_partition = LAST_FRAME; } + set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]); vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64); - // Check if most of the superblock is skin content, and if so, force split - // to 32x32. Avoid checking superblocks on/near boundary and avoid low - // resolutons for now. - // Note superblock may still pick 64X64 if y_sad is very small - // (i.e., y_sad < cpi->vbp_threshold_sad) below. For now leave this as is. x->sb_is_skin = 0; #if !CONFIG_VP9_HIGHBITDEPTH - if (cpi->use_skin_detection && !low_res && (mi_col >= 8 && - mi_col + 8 < cm->mi_cols && mi_row >= 8 && mi_row + 8 < cm->mi_rows)) { - int num_16x16_skin = 0; - int num_16x16_nonskin = 0; - uint8_t *ysignal = x->plane[0].src.buf; - uint8_t *usignal = x->plane[1].src.buf; - uint8_t *vsignal = x->plane[2].src.buf; - int spuv = x->plane[1].src.stride; - for (i = 0; i < 4; i++) { - for (j = 0; j < 4; j++) { - int is_skin = vp9_compute_skin_block(ysignal, - usignal, - vsignal, - sp, - spuv, - BLOCK_16X16); - num_16x16_skin += is_skin; - num_16x16_nonskin += (1 - is_skin); - if (num_16x16_nonskin > 3) { - // Exit loop if at least 4 of the 16x16 blocks are not skin. - i = 4; - j = 4; - } - ysignal += 16; - usignal += 8; - vsignal += 8; - } - ysignal += (sp << 4) - 64; - usignal += (spuv << 3) - 32; - vsignal += (spuv << 3) - 32; - } - if (num_16x16_skin > 12) { - x->sb_is_skin = 1; - force_split[0] = 1; - } - } + if (cpi->use_skin_detection) + x->sb_is_skin = + skin_sb_split(cpi, x, low_res, mi_row, mi_col, force_split); #endif - for (i = 1; i <= 2; ++i) { - struct macroblock_plane *p = &x->plane[i]; - struct macroblockd_plane *pd = &xd->plane[i]; - const BLOCK_SIZE bs = get_plane_block_size(bsize, pd); - - if (bs == BLOCK_INVALID) - uv_sad = UINT_MAX; - else - uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride, - pd->dst.buf, pd->dst.stride); - - // TODO(marpan): Investigate if we should lower this threshold if - // superblock is detected as skin. - x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2); - } d = xd->plane[0].dst.buf; dp = xd->plane[0].dst.stride; // If the y_sad is very small, take 64x64 as partition and exit. // Don't check on boosted segment for now, as 64x64 is suppressed there. - if (segment_id == CR_SEGMENT_ID_BASE && - y_sad < cpi->vbp_threshold_sad) { + if (segment_id == CR_SEGMENT_ID_BASE && y_sad < cpi->vbp_threshold_sad) { const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64]; const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64]; if (mi_col + block_width / 2 < cm->mi_cols && mi_row + block_height / 2 < cm->mi_rows) { set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64); + chroma_check(cpi, x, bsize, y_sad, is_key_frame); return 0; } } @@ -853,16 +926,10 @@ static int choose_partitioning(VP9_COMP *cpi, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { switch (xd->bd) { - case 10: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); - break; - case 12: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); - break; + case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break; + case 12: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12); break; case 8: - default: - d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); - break; + default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break; } } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -886,16 +953,13 @@ static int choose_partitioning(VP9_COMP *cpi, if (!is_key_frame) { fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst, #if CONFIG_VP9_HIGHBITDEPTH - xd->cur_buf->flags, + xd->cur_buf->flags, #endif - pixels_wide, - pixels_high, - is_key_frame); + pixels_wide, pixels_high, is_key_frame); fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16); get_variance(&vt.split[i].split[j].part_variances.none); avg_16x16[i] += vt.split[i].split[j].part_variances.none.variance; - if (vt.split[i].split[j].part_variances.none.variance > - thresholds[2]) { + if (vt.split[i].split[j].part_variances.none.variance > thresholds[2]) { // 16X16 variance is above threshold for split, so force split to 8x8 // for this 16x16 block (this also forces splits for upper levels). force_split[split_index] = 1; @@ -903,7 +967,7 @@ static int choose_partitioning(VP9_COMP *cpi, force_split[0] = 1; } else if (cpi->oxcf.speed < 8 && vt.split[i].split[j].part_variances.none.variance > - thresholds[1] && + thresholds[1] && !cyclic_refresh_segment_id_boosted(segment_id)) { // We have some nominal amount of 16x16 variance (based on average), // compute the minmax over the 8x8 sub-blocks, and if above threshold, @@ -921,23 +985,20 @@ static int choose_partitioning(VP9_COMP *cpi, } } if (is_key_frame || (low_res && - vt.split[i].split[j].part_variances.none.variance > - (thresholds[1] << 1))) { + vt.split[i].split[j].part_variances.none.variance > + (thresholds[1] << 1))) { force_split[split_index] = 0; // Go down to 4x4 down-sampling for variance. variance4x4downsample[i2 + j] = 1; for (k = 0; k < 4; k++) { int x8_idx = x16_idx + ((k & 1) << 3); int y8_idx = y16_idx + ((k >> 1) << 3); - v8x8 *vst2 = is_key_frame ? &vst->split[k] : - &vt2[i2 + j].split[k]; + v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k]; fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2, #if CONFIG_VP9_HIGHBITDEPTH xd->cur_buf->flags, #endif - pixels_wide, - pixels_high, - is_key_frame); + pixels_wide, pixels_high, is_key_frame); } } } @@ -948,10 +1009,8 @@ static int choose_partitioning(VP9_COMP *cpi, const int i2 = i << 2; for (j = 0; j < 4; j++) { if (variance4x4downsample[i2 + j] == 1) { - v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : - &vt.split[i].split[j]; - for (m = 0; m < 4; m++) - fill_variance_tree(&vtemp->split[m], BLOCK_8X8); + v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] : &vt.split[i].split[j]; + for (m = 0; m < 4; m++) fill_variance_tree(&vtemp->split[m], BLOCK_8X8); fill_variance_tree(vtemp, BLOCK_16X16); // If variance of this 16x16 block is above the threshold, force block // to split. This also forces a split on the upper levels. @@ -972,8 +1031,8 @@ static int choose_partitioning(VP9_COMP *cpi, get_variance(&vt.split[i].part_variances.none); if (vt.split[i].part_variances.none.variance > thresholds[1] || (!is_key_frame && - vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) && - vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) { + vt.split[i].part_variances.none.variance > (thresholds[1] >> 1) && + vt.split[i].part_variances.none.variance > (avg_16x16[i] >> 1))) { force_split[i + 1] = 1; force_split[0] = 1; } @@ -985,14 +1044,13 @@ static int choose_partitioning(VP9_COMP *cpi, get_variance(&vt.part_variances.none); // If variance of this 64x64 block is above (some threshold of) the average // variance over the sub-32x32 blocks, then force this block to split. - if (!is_key_frame && - vt.part_variances.none.variance > (5 * avg_32x32) >> 4) + if (!is_key_frame && vt.part_variances.none.variance > (5 * avg_32x32) >> 4) force_split[0] = 1; } // Now go through the entire structure, splitting every block size until // we get to one that's got a variance lower than our threshold. - if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || + if (mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows || !set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col, thresholds[0], BLOCK_16X16, force_split[0])) { for (i = 0; i < 4; ++i) { @@ -1009,15 +1067,13 @@ static int choose_partitioning(VP9_COMP *cpi, // For inter frames: if variance4x4downsample[] == 1 for this 16x16 // block, then the variance is based on 4x4 down-sampling, so use vt2 // in set_vt_partioning(), otherwise use vt. - v16x16 *vtemp = (!is_key_frame && - variance4x4downsample[i2 + j] == 1) ? - &vt2[i2 + j] : &vt.split[i].split[j]; - if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16, - mi_row + y32_idx + y16_idx, - mi_col + x32_idx + x16_idx, - thresholds[2], - cpi->vbp_bsize_min, - force_split[5 + i2 + j])) { + v16x16 *vtemp = (!is_key_frame && variance4x4downsample[i2 + j] == 1) + ? &vt2[i2 + j] + : &vt.split[i].split[j]; + if (!set_vt_partitioning( + cpi, x, xd, vtemp, BLOCK_16X16, mi_row + y32_idx + y16_idx, + mi_col + x32_idx + x16_idx, thresholds[2], cpi->vbp_bsize_min, + force_split[5 + i2 + j])) { for (k = 0; k < 4; ++k) { const int x8_idx = (k & 1); const int y8_idx = (k >> 1); @@ -1027,16 +1083,14 @@ static int choose_partitioning(VP9_COMP *cpi, mi_row + y32_idx + y16_idx + y8_idx, mi_col + x32_idx + x16_idx + x8_idx, thresholds[3], BLOCK_8X8, 0)) { - set_block_size(cpi, x, xd, - (mi_row + y32_idx + y16_idx + y8_idx), - (mi_col + x32_idx + x16_idx + x8_idx), - BLOCK_4X4); + set_block_size( + cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx), + (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_4X4); } } else { - set_block_size(cpi, x, xd, - (mi_row + y32_idx + y16_idx + y8_idx), - (mi_col + x32_idx + x16_idx + x8_idx), - BLOCK_8X8); + set_block_size( + cpi, x, xd, (mi_row + y32_idx + y16_idx + y8_idx), + (mi_col + x32_idx + x16_idx + x8_idx), BLOCK_8X8); } } } @@ -1044,11 +1098,17 @@ static int choose_partitioning(VP9_COMP *cpi, } } } + + if (cpi->sf.short_circuit_low_temp_var) { + set_low_temp_var_flag(cpi, x, xd, &vt, thresholds, ref_frame_partition, + mi_col, mi_row); + } + + chroma_check(cpi, x, bsize, y_sad, is_key_frame); return 0; } -static void update_state(VP9_COMP *cpi, ThreadData *td, - PICK_MODE_CONTEXT *ctx, +static void update_state(VP9_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, BLOCK_SIZE bsize, int output_enabled) { int i, x_idx, y; @@ -1066,8 +1126,7 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, const int bh = num_8x8_blocks_high_lookup[mi->sb_type]; const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); - MV_REF *const frame_mvs = - cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; + MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; int w, h; const int mis = cm->mi_stride; @@ -1084,17 +1143,15 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, if (seg->enabled) { // For in frame complexity AQ copy the segment id from the segment map. if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; - mi_addr->segment_id = - get_segment_id(cm, map, bsize, mi_row, mi_col); + const uint8_t *const map = + seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; + mi_addr->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } // Else for cyclic refresh mode update the segment map, set the segment id // and then update the quantizer. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { - vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, - mi_col, bsize, ctx->rate, ctx->dist, - x->skip, p); + vp9_cyclic_refresh_update_segment(cpi, xd->mi[0], mi_row, mi_col, bsize, + ctx->rate, ctx->dist, x->skip, p); } } @@ -1117,13 +1174,12 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, // when the mode was picked for it for (y = 0; y < mi_height; y++) for (x_idx = 0; x_idx < mi_width; x_idx++) - if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx - && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { + if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx && + (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { xd->mi[x_idx + y * mis] = mi_addr; } - if (cpi->oxcf.aq_mode) - vp9_init_plane_quantizers(cpi, x); + if (cpi->oxcf.aq_mode != NO_AQ) vp9_init_plane_quantizers(cpi, x); if (is_inter_block(xdmi) && xdmi->sb_type < BLOCK_8X8) { xdmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; @@ -1134,22 +1190,16 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, memcpy(x->zcoeff_blk[xdmi->tx_size], ctx->zcoeff_blk, sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); - if (!output_enabled) - return; + if (!output_enabled) return; #if CONFIG_INTERNAL_STATS if (frame_is_intra_only(cm)) { static const int kf_mode_index[] = { - THR_DC /*DC_PRED*/, - THR_V_PRED /*V_PRED*/, - THR_H_PRED /*H_PRED*/, - THR_D45_PRED /*D45_PRED*/, - THR_D135_PRED /*D135_PRED*/, - THR_D117_PRED /*D117_PRED*/, - THR_D153_PRED /*D153_PRED*/, - THR_D207_PRED /*D207_PRED*/, - THR_D63_PRED /*D63_PRED*/, - THR_TM /*TM_PRED*/, + THR_DC /*DC_PRED*/, THR_V_PRED /*V_PRED*/, + THR_H_PRED /*H_PRED*/, THR_D45_PRED /*D45_PRED*/, + THR_D135_PRED /*D135_PRED*/, THR_D117_PRED /*D117_PRED*/, + THR_D153_PRED /*D153_PRED*/, THR_D207_PRED /*D207_PRED*/, + THR_D63_PRED /*D63_PRED*/, THR_TM /*TM_PRED*/, }; ++cpi->mode_chosen_counts[kf_mode_index[xdmi->mode]]; } else { @@ -1162,7 +1212,7 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, vp9_update_mv_count(td); if (cm->interp_filter == SWITCHABLE) { - const int ctx = vp9_get_pred_context_switchable_interp(xd); + const int ctx = get_pred_context_switchable_interp(xd); ++td->counts->switchable_interp[ctx][xdmi->interp_filter]; } } @@ -1189,8 +1239,8 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col) { - uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer }; - const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride }; + uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer }; + const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride }; int i; // Set current frame pointer. @@ -1208,12 +1258,8 @@ static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, MODE_INFO *const mi = xd->mi[0]; INTERP_FILTER filter_ref; - if (xd->up_available) - filter_ref = xd->mi[-xd->mi_stride]->interp_filter; - else if (xd->left_available) - filter_ref = xd->mi[-1]->interp_filter; - else - filter_ref = EIGHTTAP; + filter_ref = get_pred_context_switchable_interp(xd); + if (filter_ref == SWITCHABLE_FILTERS) filter_ref = EIGHTTAP; mi->sb_type = bsize; mi->mode = ZEROMV; @@ -1232,24 +1278,20 @@ static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, vp9_rd_cost_init(rd_cost); } -static int set_segment_rdmult(VP9_COMP *const cpi, - MACROBLOCK *const x, - int8_t segment_id) { +static int set_segment_rdmult(VP9_COMP *const cpi, MACROBLOCK *const x, + int8_t segment_id) { int segment_qindex; VP9_COMMON *const cm = &cpi->common; vp9_init_plane_quantizers(cpi, x); vpx_clear_system_state(); - segment_qindex = vp9_get_qindex(&cm->seg, segment_id, - cm->base_qindex); + segment_qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex); return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q); } -static void rd_pick_sb_modes(VP9_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *const x, - int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd) { +static void rd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data, + MACROBLOCK *const x, int mi_row, int mi_col, + RD_COST *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx, int64_t best_rd) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCKD *const xd = &x->e_mbd; @@ -1284,48 +1326,69 @@ static void rd_pick_sb_modes(VP9_COMP *cpi, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - x->source_variance = - vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src, - bsize, xd->bd); + x->source_variance = vp9_high_get_sby_perpixel_variance( + cpi, &x->plane[0].src, bsize, xd->bd); } else { x->source_variance = - vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); + vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); } #else x->source_variance = - vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); + vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize); #endif // CONFIG_VP9_HIGHBITDEPTH // Save rdmult before it might be changed, so it can be restored later. orig_rdmult = x->rdmult; + if ((cpi->sf.tx_domain_thresh > 0.0) || (cpi->sf.quant_opt_thresh > 0.0)) { + double logvar = vp9_log_block_var(cpi, x, bsize); + // Check block complexity as part of descision on using pixel or transform + // domain distortion in rd tests. + x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion && + (logvar >= cpi->sf.tx_domain_thresh); + + // Check block complexity as part of descision on using quantized + // coefficient optimisation inside the rd loop. + x->block_qcoeff_opt = + cpi->sf.allow_quant_coeff_opt && (logvar <= cpi->sf.quant_opt_thresh); + } else { + x->block_tx_domain = cpi->sf.allow_txfm_domain_distortion; + x->block_qcoeff_opt = cpi->sf.allow_quant_coeff_opt; + } + if (aq_mode == VARIANCE_AQ) { - const int energy = bsize <= BLOCK_16X16 ? x->mb_energy - : vp9_block_energy(cpi, x, bsize); - if (cm->frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame || + const int energy = + bsize <= BLOCK_16X16 ? x->mb_energy : vp9_block_energy(cpi, x, bsize); + + if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame || + cpi->force_update_segmentation || (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) { mi->segment_id = vp9_vaq_segment_id(energy); } else { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map; mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id); + } else if (aq_mode == LOOKAHEAD_AQ) { + const uint8_t *const map = cpi->segmentation_map; + + // I do not change rdmult here consciously. + mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } else if (aq_mode == EQUATOR360_AQ) { - if (cm->frame_type == KEY_FRAME) { + if (cm->frame_type == KEY_FRAME || cpi->force_update_segmentation) { mi->segment_id = vp9_360aq_segment_id(mi_row, cm->mi_rows); } else { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map; mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id); } else if (aq_mode == COMPLEXITY_AQ) { x->rdmult = set_segment_rdmult(cpi, x, mi->segment_id); } else if (aq_mode == CYCLIC_REFRESH_AQ) { - const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map; // If segment is boosted, use rdmult for that segment. if (cyclic_refresh_segment_id_boosted( get_segment_id(cm, map, bsize, mi_row, mi_col))) @@ -1342,20 +1405,18 @@ static void rd_pick_sb_modes(VP9_COMP *cpi, vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize, ctx, best_rd); else - vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, - rd_cost, bsize, ctx, best_rd); + vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost, + bsize, ctx, best_rd); } else { - vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, - rd_cost, bsize, ctx, best_rd); + vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost, + bsize, ctx, best_rd); } } - // Examine the resulting rate and for AQ mode 2 make a segment choice. - if ((rd_cost->rate != INT_MAX) && - (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) && - (cm->frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame || + if ((rd_cost->rate != INT_MAX) && (aq_mode == COMPLEXITY_AQ) && + (bsize >= BLOCK_16X16) && + (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame || (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) { vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate); } @@ -1364,8 +1425,7 @@ static void rd_pick_sb_modes(VP9_COMP *cpi, // TODO(jingning) The rate-distortion optimization flow needs to be // refactored to provide proper exit/return handle. - if (rd_cost->rate == INT_MAX) - rd_cost->rdcost = INT64_MAX; + if (rd_cost->rate == INT_MAX) rd_cost->rdcost = INT64_MAX; ctx->rate = rd_cost->rate; ctx->dist = rd_cost->dist; @@ -1381,10 +1441,10 @@ static void update_stats(VP9_COMMON *cm, ThreadData *td) { if (!frame_is_intra_only(cm)) { FRAME_COUNTS *const counts = td->counts; const int inter_block = is_inter_block(mi); - const int seg_ref_active = segfeature_active(&cm->seg, mi->segment_id, - SEG_LVL_REF_FRAME); + const int seg_ref_active = + segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_REF_FRAME); if (!seg_ref_active) { - counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++; + counts->intra_inter[get_intra_inter_context(xd)][inter_block]++; // If the segment reference feature is enabled we have only a single // reference frame allowed for the segment so exclude it from // the reference frame counts used to work out probabilities. @@ -1440,17 +1500,15 @@ static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col, int mi_width = num_8x8_blocks_wide_lookup[bsize]; int mi_height = num_8x8_blocks_high_lookup[bsize]; for (p = 0; p < MAX_MB_PLANE; p++) { - memcpy( - xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x), - a + num_4x4_blocks_wide * p, - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> - xd->plane[p].subsampling_x); - memcpy( - xd->left_context[p] - + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), - l + num_4x4_blocks_high * p, - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> - xd->plane[p].subsampling_y); + memcpy(xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x), + a + num_4x4_blocks_wide * p, + (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> + xd->plane[p].subsampling_x); + memcpy(xd->left_context[p] + + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), + l + num_4x4_blocks_high * p, + (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> + xd->plane[p].subsampling_y); } memcpy(xd->above_seg_context + mi_col, sa, sizeof(*xd->above_seg_context) * mi_width); @@ -1472,17 +1530,15 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col, // buffer the above/left context information of the block in search. for (p = 0; p < MAX_MB_PLANE; ++p) { - memcpy( - a + num_4x4_blocks_wide * p, - xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x), - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> - xd->plane[p].subsampling_x); - memcpy( - l + num_4x4_blocks_high * p, - xd->left_context[p] - + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), - (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> - xd->plane[p].subsampling_y); + memcpy(a + num_4x4_blocks_wide * p, + xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x), + (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >> + xd->plane[p].subsampling_x); + memcpy(l + num_4x4_blocks_high * p, + xd->left_context[p] + + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y), + (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >> + xd->plane[p].subsampling_y); } memcpy(sa, xd->above_seg_context + mi_col, sizeof(*xd->above_seg_context) * mi_width); @@ -1490,8 +1546,7 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col, sizeof(xd->left_seg_context[0]) * mi_height); } -static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, - ThreadData *td, +static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, ThreadData *td, TOKENEXTRA **tp, int mi_row, int mi_col, int output_enabled, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { @@ -1508,11 +1563,9 @@ static void encode_b(VP9_COMP *cpi, const TileInfo *const tile, } } -static void encode_sb(VP9_COMP *cpi, ThreadData *td, - const TileInfo *const tile, +static void encode_sb(VP9_COMP *cpi, ThreadData *td, const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, - PC_TREE *pc_tree) { + int output_enabled, BLOCK_SIZE bsize, PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; @@ -1522,8 +1575,7 @@ static void encode_sb(VP9_COMP *cpi, ThreadData *td, PARTITION_TYPE partition; BLOCK_SIZE subsize = bsize; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; if (bsize >= BLOCK_8X8) { ctx = partition_plane_context(xd, mi_row, mi_col, bsize); @@ -1573,9 +1625,7 @@ static void encode_sb(VP9_COMP *cpi, ThreadData *td, subsize, pc_tree->split[3]); } break; - default: - assert(0 && "Invalid partition type."); - break; + default: assert(0 && "Invalid partition type."); break; } if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) @@ -1585,9 +1635,8 @@ static void encode_sb(VP9_COMP *cpi, ThreadData *td, // Check to see if the given partition size is allowed for a specified number // of 8x8 block rows and columns remaining in the image. // If not then return the largest allowed partition size -static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, - int rows_left, int cols_left, - int *bh, int *bw) { +static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left, + int cols_left, int *bh, int *bw) { if (rows_left <= 0 || cols_left <= 0) { return VPXMIN(bsize, BLOCK_8X8); } else { @@ -1602,9 +1651,10 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, return bsize; } -static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, - int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining, - BLOCK_SIZE bsize, MODE_INFO **mi_8x8) { +static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in, + int bw_in, int row8x8_remaining, + int col8x8_remaining, BLOCK_SIZE bsize, + MODE_INFO **mi_8x8) { int bh = bh_in; int r, c; for (r = 0; r < MI_BLOCK_SIZE; r += bh) { @@ -1612,8 +1662,8 @@ static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, for (c = 0; c < MI_BLOCK_SIZE; c += bw) { const int index = r * mis + c; mi_8x8[index] = mi + index; - mi_8x8[index]->sb_type = find_partition_size(bsize, - row8x8_remaining - r, col8x8_remaining - c, &bh, &bw); + mi_8x8[index]->sb_type = find_partition_size( + bsize, row8x8_remaining - r, col8x8_remaining - c, &bh, &bw); } } } @@ -1650,7 +1700,7 @@ static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile, } else { // Else this is a partial SB64. set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining, - col8x8_remaining, bsize, mi_8x8); + col8x8_remaining, bsize, mi_8x8); } } @@ -1658,21 +1708,33 @@ static const struct { int row; int col; } coord_lookup[16] = { - // 32x32 index = 0 - {0, 0}, {0, 2}, {2, 0}, {2, 2}, - // 32x32 index = 1 - {0, 4}, {0, 6}, {2, 4}, {2, 6}, - // 32x32 index = 2 - {4, 0}, {4, 2}, {6, 0}, {6, 2}, - // 32x32 index = 3 - {4, 4}, {4, 6}, {6, 4}, {6, 6}, + // 32x32 index = 0 + { 0, 0 }, + { 0, 2 }, + { 2, 0 }, + { 2, 2 }, + // 32x32 index = 1 + { 0, 4 }, + { 0, 6 }, + { 2, 4 }, + { 2, 6 }, + // 32x32 index = 2 + { 4, 0 }, + { 4, 2 }, + { 6, 0 }, + { 6, 2 }, + // 32x32 index = 3 + { 4, 4 }, + { 4, 6 }, + { 6, 4 }, + { 6, 6 }, }; static void set_source_var_based_partition(VP9_COMP *cpi, const TileInfo *const tile, MACROBLOCK *const x, - MODE_INFO **mi_8x8, - int mi_row, int mi_col) { + MODE_INFO **mi_8x8, int mi_row, + int mi_col) { VP9_COMMON *const cm = &cpi->common; const int mis = cm->mi_stride; const int row8x8_remaining = tile->mi_row_end - mi_row; @@ -1702,8 +1764,7 @@ static void set_source_var_based_partition(VP9_COMP *cpi, for (j = 0; j < 4; j++) { int b_mi_row = coord_lookup[i * 4 + j].row; int b_mi_col = coord_lookup[i * 4 + j].col; - int boffset = b_mi_row / 2 * cm->mb_cols + - b_mi_col / 2; + int boffset = b_mi_row / 2 * cm->mb_cols + b_mi_col / 2; d16[j] = cpi->source_diff_var + offset + boffset; @@ -1716,7 +1777,7 @@ static void set_source_var_based_partition(VP9_COMP *cpi, } is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) && - (d16[2]->var < thr) && (d16[3]->var < thr); + (d16[2]->var < thr) && (d16[3]->var < thr); // Use 32x32 partition if (is_larger_better) { @@ -1727,9 +1788,12 @@ static void set_source_var_based_partition(VP9_COMP *cpi, d32[i].sum += d16[j]->sum; } - d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10); + d32[i].var = + (unsigned int)(d32[i].sse - + (unsigned int)(((int64_t)d32[i].sum * d32[i].sum) >> + 10)); - index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col; + index = coord_lookup[i * 4].row * mis + coord_lookup[i * 4].col; mi_8x8[index] = mi_upper_left + index; mi_8x8[index]->sb_type = BLOCK_32X32; } @@ -1738,7 +1802,7 @@ static void set_source_var_based_partition(VP9_COMP *cpi, if (use32x32 == 4) { thr <<= 1; is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) && - (d32[2].var < thr) && (d32[3].var < thr); + (d32[2].var < thr) && (d32[3].var < thr); // Use 64x64 partition if (is_larger_better) { @@ -1746,17 +1810,17 @@ static void set_source_var_based_partition(VP9_COMP *cpi, mi_8x8[0]->sb_type = BLOCK_64X64; } } - } else { // partial in-image SB64 + } else { // partial in-image SB64 int bh = num_8x8_blocks_high_lookup[BLOCK_16X16]; int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16]; - set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, - row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8); + set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining, + col8x8_remaining, BLOCK_16X16, mi_8x8); } } static void update_state_rt(VP9_COMP *cpi, ThreadData *td, - PICK_MODE_CONTEXT *ctx, - int mi_row, int mi_col, int bsize) { + PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, + int bsize) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; @@ -1771,17 +1835,15 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, *(xd->mi[0]) = ctx->mic; *(x->mbmi_ext) = ctx->mbmi_ext; - if (seg->enabled && cpi->oxcf.aq_mode) { + if (seg->enabled && cpi->oxcf.aq_mode != NO_AQ) { // For in frame complexity AQ or variance AQ, copy segment_id from // segmentation_map. - if (cpi->oxcf.aq_mode == COMPLEXITY_AQ || - cpi->oxcf.aq_mode == VARIANCE_AQ || - cpi->oxcf.aq_mode == EQUATOR360_AQ) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + if (cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ) { + const uint8_t *const map = + seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; mi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); } else { - // Setting segmentation map for cyclic_refresh. + // Setting segmentation map for cyclic_refresh. vp9_cyclic_refresh_update_segment(cpi, mi, mi_row, mi_col, bsize, ctx->rate, ctx->dist, x->skip, p); } @@ -1791,7 +1853,7 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, if (is_inter_block(mi)) { vp9_update_mv_count(td); if (cm->interp_filter == SWITCHABLE) { - const int pred_ctx = vp9_get_pred_context_switchable_interp(xd); + const int pred_ctx = get_pred_context_switchable_interp(xd); ++td->counts->switchable_interp[pred_ctx][mi->interp_filter]; } @@ -1802,8 +1864,8 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, } if (cm->use_prev_frame_mvs || - (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 - && cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) { + (cpi->svc.use_base_mv && cpi->svc.number_spatial_layers > 1 && + cpi->svc.spatial_layer_id != cpi->svc.number_spatial_layers - 1)) { MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; int w, h; @@ -1825,9 +1887,8 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, } static void encode_b_rt(VP9_COMP *cpi, ThreadData *td, - const TileInfo *const tile, - TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, + const TileInfo *const tile, TOKENEXTRA **tp, int mi_row, + int mi_col, int output_enabled, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { MACROBLOCK *const x = &td->mb; set_offsets(cpi, tile, x, mi_row, mi_col, bsize); @@ -1841,10 +1902,9 @@ static void encode_b_rt(VP9_COMP *cpi, ThreadData *td, } static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td, - const TileInfo *const tile, - TOKENEXTRA **tp, int mi_row, int mi_col, - int output_enabled, BLOCK_SIZE bsize, - PC_TREE *pc_tree) { + const TileInfo *const tile, TOKENEXTRA **tp, + int mi_row, int mi_col, int output_enabled, + BLOCK_SIZE bsize, PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; @@ -1854,12 +1914,11 @@ static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td, PARTITION_TYPE partition; BLOCK_SIZE subsize; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; if (bsize >= BLOCK_8X8) { const int idx_str = xd->mi_stride * mi_row + mi_col; - MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str; + MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str; ctx = partition_plane_context(xd, mi_row, mi_col, bsize); subsize = mi_8x8[0]->sb_type; } else { @@ -1903,22 +1962,17 @@ static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td, encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled, subsize, pc_tree->split[3]); break; - default: - assert(0 && "Invalid partition type."); - break; + default: assert(0 && "Invalid partition type."); break; } if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) update_partition_context(xd, mi_row, mi_col, subsize, bsize); } -static void rd_use_partition(VP9_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - MODE_INFO **mi_8x8, TOKENEXTRA **tp, - int mi_row, int mi_col, - BLOCK_SIZE bsize, - int *rate, int64_t *dist, +static void rd_use_partition(VP9_COMP *cpi, ThreadData *td, + TileDataEnc *tile_data, MODE_INFO **mi_8x8, + TOKENEXTRA **tp, int mi_row, int mi_col, + BLOCK_SIZE bsize, int *rate, int64_t *dist, int do_recon, PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; @@ -1940,8 +1994,7 @@ static void rd_use_partition(VP9_COMP *cpi, int do_partition_search = 1; PICK_MODE_CONTEXT *ctx = &pc_tree->none; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; assert(num_4x4_blocks_wide_lookup[bsize] == num_4x4_blocks_high_lookup[bsize]); @@ -1956,7 +2009,7 @@ static void rd_use_partition(VP9_COMP *cpi, pc_tree->partitioning = partition; save_context(x, mi_row, mi_col, a, l, sa, sl, bsize); - if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) { + if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ) { set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); x->mb_energy = vp9_block_energy(cpi, x, bsize); } @@ -1983,15 +2036,15 @@ static void rd_use_partition(VP9_COMP *cpi, mi_row + (mi_step >> 1) < cm->mi_rows && mi_col + (mi_step >> 1) < cm->mi_cols) { pc_tree->partitioning = PARTITION_NONE; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, - ctx, INT64_MAX); + rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize, ctx, + INT64_MAX); pl = partition_plane_context(xd, mi_row, mi_col, bsize); if (none_rdc.rate < INT_MAX) { none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate, - none_rdc.dist); + none_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, none_rdc.rate, none_rdc.dist); } restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); @@ -2002,23 +2055,21 @@ static void rd_use_partition(VP9_COMP *cpi, switch (partition) { case PARTITION_NONE: - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - bsize, ctx, INT64_MAX); + rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, bsize, + ctx, INT64_MAX); break; case PARTITION_HORZ: rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, - subsize, &pc_tree->horizontal[0], - INT64_MAX); - if (last_part_rdc.rate != INT_MAX && - bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) { + subsize, &pc_tree->horizontal[0], INT64_MAX); + if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 && + mi_row + (mi_step >> 1) < cm->mi_rows) { RD_COST tmp_rdc; PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0]; vp9_rd_cost_init(&tmp_rdc); update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - rd_pick_sb_modes(cpi, tile_data, x, - mi_row + (mi_step >> 1), mi_col, &tmp_rdc, - subsize, &pc_tree->horizontal[1], INT64_MAX); + rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col, + &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { vp9_rd_cost_reset(&last_part_rdc); break; @@ -2031,17 +2082,16 @@ static void rd_use_partition(VP9_COMP *cpi, case PARTITION_VERT: rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, subsize, &pc_tree->vertical[0], INT64_MAX); - if (last_part_rdc.rate != INT_MAX && - bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) { + if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 && + mi_col + (mi_step >> 1) < cm->mi_cols) { RD_COST tmp_rdc; PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0]; vp9_rd_cost_init(&tmp_rdc); update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - rd_pick_sb_modes(cpi, tile_data, x, - mi_row, mi_col + (mi_step >> 1), &tmp_rdc, - subsize, &pc_tree->vertical[bsize > BLOCK_8X8], - INT64_MAX); + rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1), + &tmp_rdc, subsize, + &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { vp9_rd_cost_reset(&last_part_rdc); break; @@ -2069,11 +2119,10 @@ static void rd_use_partition(VP9_COMP *cpi, continue; vp9_rd_cost_init(&tmp_rdc); - rd_use_partition(cpi, td, tile_data, - mi_8x8 + jj * bss * mis + ii * bss, tp, - mi_row + y_idx, mi_col + x_idx, subsize, - &tmp_rdc.rate, &tmp_rdc.dist, - i != 3, pc_tree->split[i]); + rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss, + tp, mi_row + y_idx, mi_col + x_idx, subsize, + &tmp_rdc.rate, &tmp_rdc.dist, i != 3, + pc_tree->split[i]); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { vp9_rd_cost_reset(&last_part_rdc); break; @@ -2082,26 +2131,23 @@ static void rd_use_partition(VP9_COMP *cpi, last_part_rdc.dist += tmp_rdc.dist; } break; - default: - assert(0); - break; + default: assert(0); break; } pl = partition_plane_context(xd, mi_row, mi_col, bsize); if (last_part_rdc.rate < INT_MAX) { last_part_rdc.rate += cpi->partition_cost[pl][partition]; - last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - last_part_rdc.rate, last_part_rdc.dist); + last_part_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, last_part_rdc.rate, last_part_rdc.dist); } - if (do_partition_search - && cpi->sf.adjust_partitioning_from_last_frame - && cpi->sf.partition_search_type == SEARCH_PARTITION - && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 - && (mi_row + mi_step < cm->mi_rows || - mi_row + (mi_step >> 1) == cm->mi_rows) - && (mi_col + mi_step < cm->mi_cols || - mi_col + (mi_step >> 1) == cm->mi_cols)) { + if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame && + cpi->sf.partition_search_type == SEARCH_PARTITION && + partition != PARTITION_SPLIT && bsize > BLOCK_8X8 && + (mi_row + mi_step < cm->mi_rows || + mi_row + (mi_step >> 1) == cm->mi_rows) && + (mi_col + mi_step < cm->mi_cols || + mi_col + (mi_step >> 1) == cm->mi_cols)) { BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); chosen_rdc.rate = 0; chosen_rdc.dist = 0; @@ -2121,9 +2167,9 @@ static void rd_use_partition(VP9_COMP *cpi, save_context(x, mi_row, mi_col, a, l, sa, sl, bsize); pc_tree->split[i]->partitioning = PARTITION_NONE; - rd_pick_sb_modes(cpi, tile_data, x, - mi_row + y_idx, mi_col + x_idx, &tmp_rdc, - split_subsize, &pc_tree->split[i]->none, INT64_MAX); + rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx, + &tmp_rdc, split_subsize, &pc_tree->split[i]->none, + INT64_MAX); restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); @@ -2136,7 +2182,7 @@ static void rd_use_partition(VP9_COMP *cpi, chosen_rdc.dist += tmp_rdc.dist; if (i != 3) - encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0, + encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0, split_subsize, pc_tree->split[i]); pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx, @@ -2146,22 +2192,20 @@ static void rd_use_partition(VP9_COMP *cpi, pl = partition_plane_context(xd, mi_row, mi_col, bsize); if (chosen_rdc.rate < INT_MAX) { chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; - chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - chosen_rdc.rate, chosen_rdc.dist); + chosen_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, chosen_rdc.rate, chosen_rdc.dist); } } // If last_part is better set the partitioning to that. if (last_part_rdc.rdcost < chosen_rdc.rdcost) { mi_8x8[0]->sb_type = bsize; - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = partition; + if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition; chosen_rdc = last_part_rdc; } // If none was better set the partitioning to that. if (none_rdc.rdcost < chosen_rdc.rdcost) { - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = PARTITION_NONE; + if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE; chosen_rdc = none_rdc; } @@ -2183,22 +2227,17 @@ static void rd_use_partition(VP9_COMP *cpi, } static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = { - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, - BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, - BLOCK_16X16 + BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, + BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, BLOCK_16X16, + BLOCK_16X16, BLOCK_16X16, BLOCK_16X16 }; static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = { - BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, - BLOCK_16X16, BLOCK_32X32, BLOCK_32X32, - BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, - BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, - BLOCK_64X64 + BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32, + BLOCK_32X32, BLOCK_32X32, BLOCK_64X64, BLOCK_64X64, BLOCK_64X64, + BLOCK_64X64, BLOCK_64X64, BLOCK_64X64 }; - // Look at all the mode_info entries for blocks that are part of this // partition and find the min and max values for sb_type. // At the moment this is designed to work on a 64x64 SB but could be @@ -2211,14 +2250,14 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8, BLOCK_SIZE *max_block_size, int bs_hist[BLOCK_SIZES]) { int sb_width_in_blocks = MI_BLOCK_SIZE; - int sb_height_in_blocks = MI_BLOCK_SIZE; + int sb_height_in_blocks = MI_BLOCK_SIZE; int i, j; int index = 0; // Check the sb_type for each block that belongs to this region. for (i = 0; i < sb_height_in_blocks; ++i) { for (j = 0; j < sb_width_in_blocks; ++j) { - MODE_INFO *mi = mi_8x8[index+j]; + MODE_INFO *mi = mi_8x8[index + j]; BLOCK_SIZE sb_type = mi ? mi->sb_type : 0; bs_hist[sb_type]++; *min_block_size = VPXMIN(*min_block_size, sb_type); @@ -2230,30 +2269,27 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8, // Next square block size less or equal than current block size. static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = { - BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, - BLOCK_8X8, BLOCK_8X8, BLOCK_8X8, - BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, - BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, - BLOCK_64X64 + BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, BLOCK_8X8, + BLOCK_8X8, BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, BLOCK_32X32, + BLOCK_32X32, BLOCK_32X32, BLOCK_64X64 }; // Look at neighboring blocks and set a min and max partition size based on // what they chose. static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, - MACROBLOCKD *const xd, - int mi_row, int mi_col, - BLOCK_SIZE *min_block_size, + MACROBLOCKD *const xd, int mi_row, + int mi_col, BLOCK_SIZE *min_block_size, BLOCK_SIZE *max_block_size) { VP9_COMMON *const cm = &cpi->common; MODE_INFO **mi = xd->mi; - const int left_in_image = xd->left_available && mi[-1]; - const int above_in_image = xd->up_available && mi[-xd->mi_stride]; + const int left_in_image = !!xd->left_mi; + const int above_in_image = !!xd->above_mi; const int row8x8_remaining = tile->mi_row_end - mi_row; const int col8x8_remaining = tile->mi_col_end - mi_col; int bh, bw; BLOCK_SIZE min_size = BLOCK_4X4; BLOCK_SIZE max_size = BLOCK_64X64; - int bs_hist[BLOCK_SIZES] = {0}; + int bs_hist[BLOCK_SIZES] = { 0 }; // Trap case where we do not have a prediction. if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) { @@ -2290,8 +2326,7 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, } // Check border cases where max and min from neighbors may not be legal. - max_size = find_partition_size(max_size, - row8x8_remaining, col8x8_remaining, + max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining, &bh, &bw); // Test for blocks at the edge of the active image. // This may be the actual edge of the image or where there are formatting @@ -2308,7 +2343,7 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, // *min_block_size. if (cpi->sf.use_square_partition_only && next_square_size[max_size] < min_size) { - min_size = next_square_size[max_size]; + min_size = next_square_size[max_size]; } *min_block_size = min_size; @@ -2316,10 +2351,10 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, } // TODO(jingning) refactor functions setting partition search range -static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, - int mi_row, int mi_col, BLOCK_SIZE bsize, +static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, int mi_row, + int mi_col, BLOCK_SIZE bsize, BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) { - int mi_width = num_8x8_blocks_wide_lookup[bsize]; + int mi_width = num_8x8_blocks_wide_lookup[bsize]; int mi_height = num_8x8_blocks_high_lookup[bsize]; int idx, idy; @@ -2342,7 +2377,7 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, } } - if (xd->left_available) { + if (xd->left_mi) { for (idy = 0; idy < mi_height; ++idy) { mi = xd->mi[idy * cm->mi_stride - 1]; bs = mi ? mi->sb_type : bsize; @@ -2351,7 +2386,7 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, } } - if (xd->up_available) { + if (xd->above_mi) { for (idx = 0; idx < mi_width; ++idx) { mi = xd->mi[idx - cm->mi_stride]; bs = mi ? mi->sb_type : bsize; @@ -2378,16 +2413,19 @@ static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) { } #if CONFIG_FP_MB_STATS -const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4}; -const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4}; -const int qindex_skip_threshold_lookup[BLOCK_SIZES] = - {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120}; -const int qindex_split_threshold_lookup[BLOCK_SIZES] = - {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120}; -const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = - {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6}; +const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1, + 1, 2, 2, 2, 4, 4 }; +const int num_16x16_blocks_high_lookup[BLOCK_SIZES] = { 1, 1, 1, 1, 1, 1, 1, + 2, 1, 2, 4, 2, 4 }; +const int qindex_skip_threshold_lookup[BLOCK_SIZES] = { + 0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120 +}; +const int qindex_split_threshold_lookup[BLOCK_SIZES] = { + 0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120 +}; +const int complexity_16x16_blocks_threshold[BLOCK_SIZES] = { + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6 +}; typedef enum { MV_ZERO = 0, @@ -2426,10 +2464,10 @@ static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv, // unlikely to be selected depending on previous rate-distortion optimization // results, for encoding speed-up. static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, - TileDataEnc *tile_data, - TOKENEXTRA **tp, int mi_row, int mi_col, - BLOCK_SIZE bsize, RD_COST *rd_cost, - int64_t best_rd, PC_TREE *pc_tree) { + TileDataEnc *tile_data, TOKENEXTRA **tp, + int mi_row, int mi_col, BLOCK_SIZE bsize, + RD_COST *rd_cost, int64_t best_rd, + PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCK *const x = &td->mb; @@ -2439,11 +2477,13 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, PARTITION_CONTEXT sl[8], sa[8]; TOKENEXTRA *tp_orig = *tp; PICK_MODE_CONTEXT *ctx = &pc_tree->none; - int i, pl; + int i; + const int pl = partition_plane_context(xd, mi_row, mi_col, bsize); BLOCK_SIZE subsize; RD_COST this_rdc, sum_rdc, best_rdc; int do_split = bsize >= BLOCK_8X8; int do_rect = 1; + INTERP_FILTER pred_interp_filter; // Override skipping rectangular partition operations for edge blocks const int force_horz_split = (mi_row + mi_step >= cm->mi_rows); @@ -2460,10 +2500,10 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, #endif int partition_none_allowed = !force_horz_split && !force_vert_split; - int partition_horz_allowed = !force_vert_split && yss <= xss && - bsize >= BLOCK_8X8; - int partition_vert_allowed = !force_horz_split && xss <= yss && - bsize >= BLOCK_8X8; + int partition_horz_allowed = + !force_vert_split && yss <= xss && bsize >= BLOCK_8X8; + int partition_vert_allowed = + !force_horz_split && xss <= yss && bsize >= BLOCK_8X8; int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr; int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr; @@ -2471,11 +2511,11 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, (void)*tp_orig; assert(num_8x8_blocks_wide_lookup[bsize] == - num_8x8_blocks_high_lookup[bsize]); + num_8x8_blocks_high_lookup[bsize]); // Adjust dist breakout threshold according to the partition size. - dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] + - b_height_log2_lookup[bsize]); + dist_breakout_thr >>= + 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); rate_breakout_thr *= num_pels_log2_lookup[bsize]; vp9_rd_cost_init(&this_rdc); @@ -2485,12 +2525,15 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) + if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode != NO_AQ && + cpi->oxcf.aq_mode != LOOKAHEAD_AQ) x->mb_energy = vp9_block_energy(cpi, x, bsize); if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) { - int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3) - + get_chessboard_index(cm->current_video_frame)) & 0x1; + int cb_partition_search_ctrl = + ((pc_tree->index == 0 || pc_tree->index == 3) + + get_chessboard_index(cm->current_video_frame)) & + 0x1; if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size) set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size); @@ -2500,10 +2543,10 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, // The threshold set here has to be of square block size. if (cpi->sf.auto_min_max_partition_size) { partition_none_allowed &= (bsize <= max_size && bsize >= min_size); - partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) || - force_horz_split); - partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) || - force_vert_split); + partition_horz_allowed &= + ((bsize <= max_size && bsize > min_size) || force_horz_split); + partition_vert_allowed &= + ((bsize <= max_size && bsize > min_size) || force_vert_split); do_split &= bsize > min_size; } @@ -2525,8 +2568,8 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, #if CONFIG_FP_MB_STATS if (cpi->use_fp_mb_stats) { set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); - src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, - mi_row, mi_col, bsize); + src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src, mi_row, + mi_col, bsize); } #endif @@ -2546,7 +2589,7 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, // compute a complexity measure, basically measure inconsistency of motion // vectors obtained from the first pass in the current block - for (r = mb_row; r < mb_row_end ; r++) { + for (r = mb_row; r < mb_row_end; r++) { for (c = mb_col; c < mb_col_end; c++) { const int mb_index = r * cm->mb_cols + c; @@ -2583,25 +2626,23 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, // PARTITION_NONE if (partition_none_allowed) { - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, - &this_rdc, bsize, ctx, best_rdc.rdcost); + rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, ctx, + best_rdc.rdcost); if (this_rdc.rate != INT_MAX) { if (bsize >= BLOCK_8X8) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); + this_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); } if (this_rdc.rdcost < best_rdc.rdcost) { best_rdc = this_rdc; - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = PARTITION_NONE; + if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE; // If all y, u, v transform blocks in this partition are skippable, and // the dist & rate are within the thresholds, the partition search is // terminated for current branch of the partition search tree. - if (!x->e_mbd.lossless && ctx->skippable && + if (!x->e_mbd.lossless && ctx->skippable && ((best_rdc.dist < (dist_breakout_thr >> 2)) || (best_rdc.dist < dist_breakout_thr && best_rdc.rate < rate_breakout_thr))) { @@ -2641,6 +2682,7 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, break; } } + if (skip) { if (src_diff_var == UINT_MAX) { set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize); @@ -2660,8 +2702,15 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } // store estimated motion vector - if (cpi->sf.adaptive_motion_search) - store_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx); + + // If the interp_filter is marked as SWITCHABLE_FILTERS, it was for an + // intra block and used for context purposes. + if (ctx->mic.interp_filter == SWITCHABLE_FILTERS) { + pred_interp_filter = EIGHTTAP; + } else { + pred_interp_filter = ctx->mic.interp_filter; + } // PARTITION_SPLIT // TODO(jingning): use the motion vectors given by the above search as @@ -2671,27 +2720,24 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (bsize == BLOCK_8X8) { i = 4; if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed) - pc_tree->leaf_split[0]->pred_interp_filter = - ctx->mic.interp_filter; + pc_tree->leaf_split[0]->pred_interp_filter = pred_interp_filter; rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, pc_tree->leaf_split[0], best_rdc.rdcost); - if (sum_rdc.rate == INT_MAX) - sum_rdc.rdcost = INT64_MAX; + + if (sum_rdc.rate == INT_MAX) sum_rdc.rdcost = INT64_MAX; } else { for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) { - const int x_idx = (i & 1) * mi_step; - const int y_idx = (i >> 1) * mi_step; + const int x_idx = (i & 1) * mi_step; + const int y_idx = (i >> 1) * mi_step; if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) continue; - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); pc_tree->split[i]->index = i; - rd_pick_partition(cpi, td, tile_data, tp, - mi_row + y_idx, mi_col + x_idx, - subsize, &this_rdc, + rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx, + mi_col + x_idx, subsize, &this_rdc, best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]); if (this_rdc.rate == INT_MAX) { @@ -2706,20 +2752,17 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT]; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); + sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); if (sum_rdc.rdcost < best_rdc.rdcost) { best_rdc = sum_rdc; pc_tree->partitioning = PARTITION_SPLIT; // Rate and distortion based partition search termination clause. - if (!x->e_mbd.lossless && - ((best_rdc.dist < (dist_breakout_thr >> 2)) || - (best_rdc.dist < dist_breakout_thr && - best_rdc.rate < rate_breakout_thr))) { + if (!x->e_mbd.lossless && ((best_rdc.dist < (dist_breakout_thr >> 2)) || + (best_rdc.dist < dist_breakout_thr && + best_rdc.rate < rate_breakout_thr))) { do_rect = 0; } } @@ -2738,12 +2781,10 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (partition_horz_allowed && (do_rect || vp9_active_h_edge(cpi, mi_row, mi_step))) { subsize = get_subsize(bsize, PARTITION_HORZ); - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && partition_none_allowed) - pc_tree->horizontal[0].pred_interp_filter = - ctx->mic.interp_filter; + pc_tree->horizontal[0].pred_interp_filter = pred_interp_filter; rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, &pc_tree->horizontal[0], best_rdc.rdcost); @@ -2753,14 +2794,12 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0); encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx); - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && partition_none_allowed) - pc_tree->horizontal[1].pred_interp_filter = - ctx->mic.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, - &this_rdc, subsize, &pc_tree->horizontal[1], + pc_tree->horizontal[1].pred_interp_filter = pred_interp_filter; + rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc, + subsize, &pc_tree->horizontal[1], best_rdc.rdcost - sum_rdc.rdcost); if (this_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; @@ -2772,7 +2811,6 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } if (sum_rdc.rdcost < best_rdc.rdcost) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ]; sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); if (sum_rdc.rdcost < best_rdc.rdcost) { @@ -2786,17 +2824,16 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize); } + // PARTITION_VERT if (partition_vert_allowed && (do_rect || vp9_active_v_edge(cpi, mi_col, mi_step))) { subsize = get_subsize(bsize, PARTITION_VERT); - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && partition_none_allowed) - pc_tree->vertical[0].pred_interp_filter = - ctx->mic.interp_filter; + pc_tree->vertical[0].pred_interp_filter = pred_interp_filter; rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, &pc_tree->vertical[0], best_rdc.rdcost); if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols && @@ -2805,15 +2842,13 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, &pc_tree->vertical[0]); - if (cpi->sf.adaptive_motion_search) - load_pred_mv(x, ctx); + if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx); if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 && partition_none_allowed) - pc_tree->vertical[1].pred_interp_filter = - ctx->mic.interp_filter; - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, - &this_rdc, subsize, - &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost); + pc_tree->vertical[1].pred_interp_filter = pred_interp_filter; + rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc, + subsize, &pc_tree->vertical[1], + best_rdc.rdcost - sum_rdc.rdcost); if (this_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; } else { @@ -2824,10 +2859,8 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } if (sum_rdc.rdcost < best_rdc.rdcost) { - pl = partition_plane_context(xd, mi_row, mi_col, bsize); sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT]; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); + sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); if (sum_rdc.rdcost < best_rdc.rdcost) { best_rdc = sum_rdc; pc_tree->partitioning = PARTITION_VERT; @@ -2840,14 +2873,14 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, // warning related to the fact that best_rd isn't used after this // point. This code should be refactored so that the duplicate // checks occur in some sub function and thus are used... - (void) best_rd; + (void)best_rd; *rd_cost = best_rdc; if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && pc_tree->index != 3) { int output_enabled = (bsize == BLOCK_64X64); - encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, - bsize, pc_tree); + encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize, + pc_tree); } if (bsize == BLOCK_64X64) { @@ -2859,16 +2892,16 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, } } -static void encode_rd_sb_row(VP9_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - int mi_row, +static void encode_rd_sb_row(VP9_COMP *cpi, ThreadData *td, + TileDataEnc *tile_data, int mi_row, TOKENEXTRA **tp) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; SPEED_FEATURES *const sf = &cpi->sf; + const int mi_col_start = tile_info->mi_col_start; + const int mi_col_end = tile_info->mi_col_end; int mi_col; // Initialize the left context for the new SB row @@ -2876,8 +2909,7 @@ static void encode_rd_sb_row(VP9_COMP *cpi, memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); // Code each SB in the row - for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end; - mi_col += MI_BLOCK_SIZE) { + for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += MI_BLOCK_SIZE) { const struct segmentation *const seg = &cm->seg; int dummy_rate; int64_t dummy_dist; @@ -2889,8 +2921,7 @@ static void encode_rd_sb_row(VP9_COMP *cpi, MODE_INFO **mi = cm->mi_grid_visible + idx_str; if (sf->adaptive_pred_interp_filter) { - for (i = 0; i < 64; ++i) - td->leaf_tree[i].pred_interp_filter = SWITCHABLE; + for (i = 0; i < 64; ++i) td->leaf_tree[i].pred_interp_filter = SWITCHABLE; for (i = 0; i < 64; ++i) { td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE; @@ -2904,8 +2935,8 @@ static void encode_rd_sb_row(VP9_COMP *cpi, td->pc_root->index = 0; if (seg->enabled) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col); seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP); } @@ -2916,27 +2947,26 @@ static void encode_rd_sb_row(VP9_COMP *cpi, seg_skip ? BLOCK_64X64 : sf->always_this_block_size; set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); + rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64, + &dummy_rate, &dummy_dist, 1, td->pc_root); } else if (cpi->partition_search_skippable_frame) { BLOCK_SIZE bsize; set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col); set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); + rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64, + &dummy_rate, &dummy_dist, 1, td->pc_root); } else if (sf->partition_search_type == VAR_BASED_PARTITION && cm->frame_type != KEY_FRAME) { choose_partitioning(cpi, tile_info, x, mi_row, mi_col); - rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root); + rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64, + &dummy_rate, &dummy_dist, 1, td->pc_root); } else { // If required set upper and lower partition size limits if (sf->auto_min_max_partition_size) { set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col, - &x->min_partition_size, - &x->max_partition_size); + &x->min_partition_size, &x->max_partition_size); } rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rdc, INT64_MAX, td->pc_root); @@ -2958,8 +2988,7 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) { // Note: this memset assumes above_context[0], [1] and [2] // are allocated as part of the same buffer. memset(xd->above_context[0], 0, - sizeof(*xd->above_context[0]) * - 2 * aligned_mi_cols * MAX_MB_PLANE); + sizeof(*xd->above_context[0]) * 2 * aligned_mi_cols * MAX_MB_PLANE); memset(xd->above_seg_context, 0, sizeof(*xd->above_seg_context) * aligned_mi_cols); } @@ -2970,8 +2999,8 @@ static int check_dual_ref_flags(VP9_COMP *cpi) { if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) { return 0; } else { - return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) - + !!(ref_flags & VP9_ALT_FLAG)) >= 2; + return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG) + + !!(ref_flags & VP9_ALT_FLAG)) >= 2; } } @@ -3000,14 +3029,12 @@ static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) { } static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) { - if (xd->lossless) - return ONLY_4X4; - if (cpi->common.frame_type == KEY_FRAME && - cpi->sf.use_nonrd_pick_mode) + if (xd->lossless) return ONLY_4X4; + if (cpi->common.frame_type == KEY_FRAME && cpi->sf.use_nonrd_pick_mode) return ALLOW_16X16; if (cpi->sf.tx_size_search_method == USE_LARGESTALL) return ALLOW_32X32; - else if (cpi->sf.tx_size_search_method == USE_FULL_RD|| + else if (cpi->sf.tx_size_search_method == USE_FULL_RD || cpi->sf.tx_size_search_method == USE_TX_8X8) return TX_MODE_SELECT; else @@ -3023,10 +3050,10 @@ static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x, vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx); } -static void nonrd_pick_sb_modes(VP9_COMP *cpi, - TileDataEnc *tile_data, MACROBLOCK *const x, - int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { +static void nonrd_pick_sb_modes(VP9_COMP *cpi, TileDataEnc *tile_data, + MACROBLOCK *const x, int mi_row, int mi_col, + RD_COST *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCKD *const xd = &x->e_mbd; @@ -3058,11 +3085,9 @@ static void nonrd_pick_sb_modes(VP9_COMP *cpi, else if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize); else if (bsize >= BLOCK_8X8) - vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, - rd_cost, bsize, ctx); + vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col, rd_cost, bsize, ctx); else - vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, - rd_cost, bsize, ctx); + vp9_pick_inter_mode_sub8x8(cpi, x, mi_row, mi_col, rd_cost, bsize, ctx); duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize); @@ -3074,17 +3099,14 @@ static void nonrd_pick_sb_modes(VP9_COMP *cpi, (sizeof(l[0]) * num_4x4_blocks_high) >> pd->subsampling_y); } - if (rd_cost->rate == INT_MAX) - vp9_rd_cost_reset(rd_cost); + if (rd_cost->rate == INT_MAX) vp9_rd_cost_reset(rd_cost); ctx->rate = rd_cost->rate; ctx->dist = rd_cost->dist; } -static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, - int mi_row, int mi_col, - BLOCK_SIZE bsize, - PC_TREE *pc_tree) { +static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, int mi_row, + int mi_col, BLOCK_SIZE bsize, PC_TREE *pc_tree) { MACROBLOCKD *xd = &x->e_mbd; int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4; PARTITION_TYPE partition = pc_tree->partitioning; @@ -3092,8 +3114,7 @@ static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, assert(bsize >= BLOCK_8X8); - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; switch (partition) { case PARTITION_NONE: @@ -3137,8 +3158,7 @@ static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x, pc_tree->split[3]); break; } - default: - break; + default: break; } } @@ -3153,17 +3173,15 @@ static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) { if (bsize > BLOCK_8X8) { BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT); int i; - for (i = 0; i < 4; ++i) - pred_pixel_ready_reset(pc_tree->split[i], subsize); + for (i = 0; i < 4; ++i) pred_pixel_ready_reset(pc_tree->split[i], subsize); } } static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, - TileDataEnc *tile_data, - TOKENEXTRA **tp, int mi_row, - int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost, - int do_recon, int64_t best_rd, - PC_TREE *pc_tree) { + TileDataEnc *tile_data, TOKENEXTRA **tp, + int mi_row, int mi_col, BLOCK_SIZE bsize, + RD_COST *rd_cost, int do_recon, + int64_t best_rd, PC_TREE *pc_tree) { const SPEED_FEATURES *const sf = &cpi->sf; VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; @@ -3184,14 +3202,14 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, const int yss = x->e_mbd.plane[1].subsampling_y; int partition_none_allowed = !force_horz_split && !force_vert_split; - int partition_horz_allowed = !force_vert_split && yss <= xss && - bsize >= BLOCK_8X8; - int partition_vert_allowed = !force_horz_split && xss <= yss && - bsize >= BLOCK_8X8; - (void) *tp_orig; + int partition_horz_allowed = + !force_vert_split && yss <= xss && bsize >= BLOCK_8X8; + int partition_vert_allowed = + !force_horz_split && xss <= yss && bsize >= BLOCK_8X8; + (void)*tp_orig; assert(num_8x8_blocks_wide_lookup[bsize] == - num_8x8_blocks_high_lookup[bsize]); + num_8x8_blocks_high_lookup[bsize]); vp9_rd_cost_init(&sum_rdc); vp9_rd_cost_reset(&best_rdc); @@ -3200,14 +3218,14 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, // Determine partition types in search according to the speed features. // The threshold set here has to be of square block size. if (sf->auto_min_max_partition_size) { - partition_none_allowed &= (bsize <= x->max_partition_size && - bsize >= x->min_partition_size); - partition_horz_allowed &= ((bsize <= x->max_partition_size && - bsize > x->min_partition_size) || - force_horz_split); - partition_vert_allowed &= ((bsize <= x->max_partition_size && - bsize > x->min_partition_size) || - force_vert_split); + partition_none_allowed &= + (bsize <= x->max_partition_size && bsize >= x->min_partition_size); + partition_horz_allowed &= + ((bsize <= x->max_partition_size && bsize > x->min_partition_size) || + force_horz_split); + partition_vert_allowed &= + ((bsize <= x->max_partition_size && bsize > x->min_partition_size) || + force_vert_split); do_split &= bsize > x->min_partition_size; } if (sf->use_square_partition_only) { @@ -3215,14 +3233,13 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, partition_vert_allowed &= force_vert_split; } - ctx->pred_pixel_ready = !(partition_vert_allowed || - partition_horz_allowed || - do_split); + ctx->pred_pixel_ready = + !(partition_vert_allowed || partition_horz_allowed || do_split); // PARTITION_NONE if (partition_none_allowed) { - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, - &this_rdc, bsize, ctx); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, bsize, + ctx); ctx->mic = *xd->mi[0]; ctx->mbmi_ext = *x->mbmi_ext; ctx->skip_txfm[0] = x->skip_txfm[0]; @@ -3231,23 +3248,21 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (this_rdc.rate != INT_MAX) { int pl = partition_plane_context(xd, mi_row, mi_col, bsize); this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE]; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); + this_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); if (this_rdc.rdcost < best_rdc.rdcost) { int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr; int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr; - dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] + - b_height_log2_lookup[bsize]); + dist_breakout_thr >>= + 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); rate_breakout_thr *= num_pels_log2_lookup[bsize]; best_rdc = this_rdc; - if (bsize >= BLOCK_8X8) - pc_tree->partitioning = PARTITION_NONE; + if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE; - if (!x->e_mbd.lossless && - this_rdc.rate < rate_breakout_thr && + if (!x->e_mbd.lossless && this_rdc.rate < rate_breakout_thr && this_rdc.dist < dist_breakout_thr) { do_split = 0; do_rect = 0; @@ -3272,9 +3287,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols) continue; load_pred_mv(x, ctx); - nonrd_pick_partition(cpi, td, tile_data, tp, - mi_row + y_idx, mi_col + x_idx, - subsize, &this_rdc, 0, + nonrd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx, + mi_col + x_idx, subsize, &this_rdc, 0, best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]); if (this_rdc.rate == INT_MAX) { @@ -3292,16 +3306,14 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, } else { // skip rectangular partition test when larger block size // gives better rd cost - if (sf->less_rectangular_check) - do_rect &= !partition_none_allowed; + if (sf->less_rectangular_check) do_rect &= !partition_none_allowed; } } // PARTITION_HORZ if (partition_horz_allowed && do_rect) { subsize = get_subsize(bsize, PARTITION_HORZ); - if (sf->adaptive_motion_search) - load_pred_mv(x, ctx); + if (sf->adaptive_motion_search) load_pred_mv(x, ctx); pc_tree->horizontal[0].pred_pixel_ready = 1; nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, &pc_tree->horizontal[0]); @@ -3314,9 +3326,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) { load_pred_mv(x, ctx); pc_tree->horizontal[1].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, - &this_rdc, subsize, - &pc_tree->horizontal[1]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col, &this_rdc, + subsize, &pc_tree->horizontal[1]); pc_tree->horizontal[1].mic = *xd->mi[0]; pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext; @@ -3330,8 +3341,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ]; sum_rdc.rate += this_rdc.rate; sum_rdc.dist += this_rdc.dist; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); + sum_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); } } @@ -3346,8 +3357,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, // PARTITION_VERT if (partition_vert_allowed && do_rect) { subsize = get_subsize(bsize, PARTITION_VERT); - if (sf->adaptive_motion_search) - load_pred_mv(x, ctx); + if (sf->adaptive_motion_search) load_pred_mv(x, ctx); pc_tree->vertical[0].pred_pixel_ready = 1; nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize, &pc_tree->vertical[0]); @@ -3359,9 +3369,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) { load_pred_mv(x, ctx); pc_tree->vertical[1].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, - &this_rdc, subsize, - &pc_tree->vertical[1]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms, &this_rdc, + subsize, &pc_tree->vertical[1]); pc_tree->vertical[1].mic = *xd->mi[0]; pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0]; @@ -3374,8 +3383,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT]; sum_rdc.rate += this_rdc.rate; sum_rdc.dist += this_rdc.dist; - sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - sum_rdc.rate, sum_rdc.dist); + sum_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist); } } @@ -3399,8 +3408,8 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) { int output_enabled = (bsize == BLOCK_64X64); - encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, - bsize, pc_tree); + encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize, + pc_tree); } if (bsize == BLOCK_64X64 && do_recon) { @@ -3412,12 +3421,9 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td, } } -static void nonrd_select_partition(VP9_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - MODE_INFO **mi, - TOKENEXTRA **tp, - int mi_row, int mi_col, +static void nonrd_select_partition(VP9_COMP *cpi, ThreadData *td, + TileDataEnc *tile_data, MODE_INFO **mi, + TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, int output_enabled, RD_COST *rd_cost, PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; @@ -3431,8 +3437,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, RD_COST this_rdc; vp9_rd_cost_reset(&this_rdc); - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4; partition = partition_lookup[bsl][subsize]; @@ -3440,25 +3445,25 @@ static void nonrd_select_partition(VP9_COMP *cpi, if (bsize == BLOCK_32X32 && subsize == BLOCK_32X32) { x->max_partition_size = BLOCK_32X32; x->min_partition_size = BLOCK_16X16; - nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, - rd_cost, 0, INT64_MAX, pc_tree); + nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost, + 0, INT64_MAX, pc_tree); } else if (bsize == BLOCK_32X32 && partition != PARTITION_NONE && subsize >= BLOCK_16X16) { x->max_partition_size = BLOCK_32X32; x->min_partition_size = BLOCK_8X8; - nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, - rd_cost, 0, INT64_MAX, pc_tree); + nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost, + 0, INT64_MAX, pc_tree); } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) { x->max_partition_size = BLOCK_16X16; x->min_partition_size = BLOCK_8X8; - nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, - rd_cost, 0, INT64_MAX, pc_tree); + nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize, rd_cost, + 0, INT64_MAX, pc_tree); } else { switch (partition) { case PARTITION_NONE: pc_tree->none.pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, - subsize, &pc_tree->none); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, + &pc_tree->none); pc_tree->none.mic = *xd->mi[0]; pc_tree->none.mbmi_ext = *x->mbmi_ext; pc_tree->none.skip_txfm[0] = x->skip_txfm[0]; @@ -3466,8 +3471,8 @@ static void nonrd_select_partition(VP9_COMP *cpi, break; case PARTITION_VERT: pc_tree->vertical[0].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, - subsize, &pc_tree->vertical[0]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, + &pc_tree->vertical[0]); pc_tree->vertical[0].mic = *xd->mi[0]; pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0]; @@ -3489,8 +3494,8 @@ static void nonrd_select_partition(VP9_COMP *cpi, break; case PARTITION_HORZ: pc_tree->horizontal[0].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, - subsize, &pc_tree->horizontal[0]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost, subsize, + &pc_tree->horizontal[0]); pc_tree->horizontal[0].mic = *xd->mi[0]; pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0]; @@ -3515,9 +3520,9 @@ static void nonrd_select_partition(VP9_COMP *cpi, nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize, output_enabled, rd_cost, pc_tree->split[0]); - nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, - mi_row, mi_col + hbs, subsize, output_enabled, - &this_rdc, pc_tree->split[1]); + nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp, mi_row, + mi_col + hbs, subsize, output_enabled, &this_rdc, + pc_tree->split[1]); if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX && rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) { rd_cost->rate += this_rdc.rate; @@ -3540,9 +3545,7 @@ static void nonrd_select_partition(VP9_COMP *cpi, rd_cost->dist += this_rdc.dist; } break; - default: - assert(0 && "Invalid partition type."); - break; + default: assert(0 && "Invalid partition type."); break; } } @@ -3550,13 +3553,9 @@ static void nonrd_select_partition(VP9_COMP *cpi, encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree); } - -static void nonrd_use_partition(VP9_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - MODE_INFO **mi, - TOKENEXTRA **tp, - int mi_row, int mi_col, +static void nonrd_use_partition(VP9_COMP *cpi, ThreadData *td, + TileDataEnc *tile_data, MODE_INFO **mi, + TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, int output_enabled, RD_COST *dummy_cost, PC_TREE *pc_tree) { VP9_COMMON *const cm = &cpi->common; @@ -3568,8 +3567,7 @@ static void nonrd_use_partition(VP9_COMP *cpi, PARTITION_TYPE partition; BLOCK_SIZE subsize; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; subsize = (bsize >= BLOCK_8X8) ? mi[0]->sb_type : BLOCK_4X4; partition = partition_lookup[bsl][subsize]; @@ -3603,8 +3601,8 @@ static void nonrd_use_partition(VP9_COMP *cpi, subsize, &pc_tree->vertical[0]); if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) { pc_tree->vertical[1].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, - dummy_cost, subsize, &pc_tree->vertical[1]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, dummy_cost, + subsize, &pc_tree->vertical[1]); pc_tree->vertical[1].mic = *xd->mi[0]; pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext; pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0]; @@ -3626,8 +3624,8 @@ static void nonrd_use_partition(VP9_COMP *cpi, if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) { pc_tree->horizontal[1].pred_pixel_ready = 1; - nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, - dummy_cost, subsize, &pc_tree->horizontal[1]); + nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, dummy_cost, + subsize, &pc_tree->horizontal[1]); pc_tree->horizontal[1].mic = *xd->mi[0]; pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext; pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0]; @@ -3641,15 +3639,14 @@ static void nonrd_use_partition(VP9_COMP *cpi, if (bsize == BLOCK_8X8) { nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost, subsize, pc_tree->leaf_split[0]); - encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, - output_enabled, subsize, pc_tree->leaf_split[0]); + encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, + subsize, pc_tree->leaf_split[0]); } else { - nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, - subsize, output_enabled, dummy_cost, - pc_tree->split[0]); - nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, - mi_row, mi_col + hbs, subsize, output_enabled, - dummy_cost, pc_tree->split[1]); + nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, subsize, + output_enabled, dummy_cost, pc_tree->split[0]); + nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp, mi_row, + mi_col + hbs, subsize, output_enabled, dummy_cost, + pc_tree->split[1]); nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp, mi_row + hbs, mi_col, subsize, output_enabled, dummy_cost, pc_tree->split[2]); @@ -3658,25 +3655,23 @@ static void nonrd_use_partition(VP9_COMP *cpi, dummy_cost, pc_tree->split[3]); } break; - default: - assert(0 && "Invalid partition type."); - break; + default: assert(0 && "Invalid partition type."); break; } if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) update_partition_context(xd, mi_row, mi_col, subsize, bsize); } -static void encode_nonrd_sb_row(VP9_COMP *cpi, - ThreadData *td, - TileDataEnc *tile_data, - int mi_row, +static void encode_nonrd_sb_row(VP9_COMP *cpi, ThreadData *td, + TileDataEnc *tile_data, int mi_row, TOKENEXTRA **tp) { SPEED_FEATURES *const sf = &cpi->sf; VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; + const int mi_col_start = tile_info->mi_col_start; + const int mi_col_end = tile_info->mi_col_end; int mi_col; // Initialize the left context for the new SB row @@ -3684,8 +3679,7 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context)); // Code each SB in the row - for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end; - mi_col += MI_BLOCK_SIZE) { + for (mi_col = mi_col_start; mi_col < mi_col_end; mi_col += MI_BLOCK_SIZE) { const struct segmentation *const seg = &cm->seg; RD_COST dummy_rdc; const int idx_str = cm->mi_stride * mi_row + mi_col; @@ -3701,8 +3695,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, x->sb_is_skin = 0; if (seg->enabled) { - const uint8_t *const map = seg->update_map ? cpi->segmentation_map - : cm->last_frame_seg_map; + const uint8_t *const map = + seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; int segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col); seg_skip = segfeature_active(seg, segment_id, SEG_LVL_SKIP); if (seg_skip) { @@ -3727,16 +3721,21 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, BLOCK_64X64, 1, &dummy_rdc, td->pc_root); break; case FIXED_PARTITION: - if (!seg_skip) - bsize = sf->always_this_block_size; + if (!seg_skip) bsize = sf->always_this_block_size; set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, BLOCK_64X64, 1, &dummy_rdc, td->pc_root); break; case REFERENCE_PARTITION: set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64); - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled && - xd->mi[0]->segment_id) { + // Use nonrd_pick_partition on scene-cut for VBR, or on qp-segment + // if cyclic_refresh is enabled. + // nonrd_pick_partition does not support 4x4 partition, so avoid it + // on key frame for now. + if ((cpi->oxcf.rc_mode == VPX_VBR && cpi->rc.high_source_sad && + cm->frame_type != KEY_FRAME) || + (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled && + xd->mi[0]->segment_id)) { // Use lower max_partition_size for low resoultions. if (cm->width <= 352 && cm->height <= 288) x->max_partition_size = BLOCK_32X32; @@ -3744,8 +3743,8 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, x->max_partition_size = BLOCK_64X64; x->min_partition_size = BLOCK_8X8; nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, - BLOCK_64X64, &dummy_rdc, 1, - INT64_MAX, td->pc_root); + BLOCK_64X64, &dummy_rdc, 1, INT64_MAX, + td->pc_root); } else { choose_partitioning(cpi, tile_info, x, mi_row, mi_col); // TODO(marpan): Seems like nonrd_select_partition does not support @@ -3760,9 +3759,7 @@ static void encode_nonrd_sb_row(VP9_COMP *cpi, } break; - default: - assert(0); - break; + default: assert(0); break; } } } @@ -3778,9 +3775,9 @@ static int set_var_thresh_from_histogram(VP9_COMP *cpi) { const int last_stride = cpi->Last_Source->y_stride; // Pick cutoff threshold - const int cutoff = (VPXMIN(cm->width, cm->height) >= 720) ? - (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) : - (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100); + const int cutoff = (VPXMIN(cm->width, cm->height) >= 720) + ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) + : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100); DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]); diff *var16 = cpi->source_diff_var; @@ -3796,31 +3793,31 @@ static int set_var_thresh_from_histogram(VP9_COMP *cpi) { switch (cm->bit_depth) { case VPX_BITS_8: vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride, - &var16->sse, &var16->sum); + &var16->sse, &var16->sum); break; case VPX_BITS_10: vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride, - &var16->sse, &var16->sum); + &var16->sse, &var16->sum); break; case VPX_BITS_12: vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride, &var16->sse, &var16->sum); break; default: - assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10" + assert(0 && + "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10" " or VPX_BITS_12"); return -1; } } else { - vpx_get16x16var(src, src_stride, last_src, last_stride, - &var16->sse, &var16->sum); + vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse, + &var16->sum); } #else - vpx_get16x16var(src, src_stride, last_src, last_stride, - &var16->sse, &var16->sum); + vpx_get16x16var(src, src_stride, last_src, last_stride, &var16->sse, + &var16->sum); #endif // CONFIG_VP9_HIGHBITDEPTH - var16->var = var16->sse - - (((uint32_t)var16->sum * var16->sum) >> 8); + var16->var = var16->sse - (((uint32_t)var16->sum * var16->sum) >> 8); if (var16->var >= VAR_HIST_MAX_BG_VAR) hist[VAR_HIST_BINS - 1]++; @@ -3863,8 +3860,7 @@ static void source_var_based_partition_search_method(VP9_COMP *cpi) { sf->partition_search_type = FIXED_PARTITION; } else { if (cm->last_width != cm->width || cm->last_height != cm->height) { - if (cpi->source_diff_var) - vpx_free(cpi->source_diff_var); + if (cpi->source_diff_var) vpx_free(cpi->source_diff_var); CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff))); @@ -3889,8 +3885,7 @@ static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) { inter_count += td->counts->intra_inter[j][1]; } - return (intra_count << 2) < inter_count && - cm->frame_type != KEY_FRAME && + return (intra_count << 2) < inter_count && cm->frame_type != KEY_FRAME && cm->show_frame; } @@ -3903,10 +3898,9 @@ void vp9_init_tile_data(VP9_COMP *cpi) { int tile_tok = 0; if (cpi->tile_data == NULL || cpi->allocated_tiles < tile_cols * tile_rows) { - if (cpi->tile_data != NULL) - vpx_free(cpi->tile_data); - CHECK_MEM_ERROR(cm, cpi->tile_data, - vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data))); + if (cpi->tile_data != NULL) vpx_free(cpi->tile_data); + CHECK_MEM_ERROR(cm, cpi->tile_data, vpx_malloc(tile_cols * tile_rows * + sizeof(*cpi->tile_data))); cpi->allocated_tiles = tile_cols * tile_rows; for (tile_row = 0; tile_row < tile_rows; ++tile_row) @@ -3936,22 +3930,22 @@ void vp9_init_tile_data(VP9_COMP *cpi) { } } -void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, - int tile_row, int tile_col) { +void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, int tile_row, + int tile_col) { VP9_COMMON *const cm = &cpi->common; const int tile_cols = 1 << cm->log2_tile_cols; - TileDataEnc *this_tile = - &cpi->tile_data[tile_row * tile_cols + tile_col]; - const TileInfo * const tile_info = &this_tile->tile_info; + TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col]; + const TileInfo *const tile_info = &this_tile->tile_info; TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col]; + const int mi_row_start = tile_info->mi_row_start; + const int mi_row_end = tile_info->mi_row_end; int mi_row; // Set up pointers to per thread motion search counters. td->mb.m_search_count_ptr = &td->rd_counts.m_search_count; td->mb.ex_search_count_ptr = &td->rd_counts.ex_search_count; - for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end; - mi_row += MI_BLOCK_SIZE) { + for (mi_row = mi_row_start; mi_row < mi_row_end; mi_row += MI_BLOCK_SIZE) { if (cpi->sf.use_nonrd_pick_mode) encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok); else @@ -3960,7 +3954,7 @@ void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td, cpi->tok_count[tile_row][tile_col] = (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]); assert(tok - cpi->tile_tok[tile_row][tile_col] <= - allocated_tokens(*tile_info)); + allocated_tokens(*tile_info)); } static void encode_tiles(VP9_COMP *cpi) { @@ -3980,10 +3974,9 @@ static void encode_tiles(VP9_COMP *cpi) { static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats, VP9_COMMON *cm, uint8_t **this_frame_mb_stats) { uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start + - cm->current_video_frame * cm->MBs * sizeof(uint8_t); + cm->current_video_frame * cm->MBs * sizeof(uint8_t); - if (mb_stats_in > firstpass_mb_stats->mb_stats_end) - return EOF; + if (mb_stats_in > firstpass_mb_stats->mb_stats_end) return EOF; *this_frame_mb_stats = mb_stats_in; @@ -3997,38 +3990,29 @@ static void encode_frame_internal(VP9_COMP *cpi) { MACROBLOCK *const x = &td->mb; VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; - RD_COUNTS *const rdc = &cpi->td.rd_counts; xd->mi = cm->mi_grid_visible; xd->mi[0] = cm->mi; vp9_zero(*td->counts); - vp9_zero(rdc->coef_counts); - vp9_zero(rdc->comp_pred_diff); - vp9_zero(rdc->filter_diff); - rdc->m_search_count = 0; // Count of motion search hits. - rdc->ex_search_count = 0; // Exhaustive mesh search hits. + vp9_zero(cpi->td.rd_counts); - - xd->lossless = cm->base_qindex == 0 && - cm->y_dc_delta_q == 0 && - cm->uv_dc_delta_q == 0 && - cm->uv_ac_delta_q == 0; + xd->lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0 && + cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4; else x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4; - x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add : - vp9_highbd_idct4x4_add; + x->highbd_itxm_add = + xd->lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add; #else x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vpx_fdct4x4; #endif // CONFIG_VP9_HIGHBITDEPTH x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; - if (xd->lossless) - x->optimize = 0; + if (xd->lossless) x->optimize = 0; cm->tx_mode = select_tx_mode(cpi, xd); @@ -4037,15 +4021,13 @@ static void encode_frame_internal(VP9_COMP *cpi) { vp9_initialize_rd_consts(cpi); vp9_initialize_me_consts(cpi, x, cm->base_qindex); init_encode_frame_mb_context(cpi); - cm->use_prev_frame_mvs = !cm->error_resilient_mode && - cm->width == cm->last_width && - cm->height == cm->last_height && - !cm->intra_only && - cm->last_show_frame; + cm->use_prev_frame_mvs = + !cm->error_resilient_mode && cm->width == cm->last_width && + cm->height == cm->last_height && !cm->intra_only && cm->last_show_frame; // Special case: set prev_mi to NULL when the previous mode info // context cannot be used. - cm->prev_mi = cm->use_prev_frame_mvs ? - cm->prev_mip + cm->mi_stride + 1 : NULL; + cm->prev_mi = + cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL; x->quant_fp = cpi->sf.use_quant_fp; vp9_zero(x->skip_txfm); @@ -4065,8 +4047,7 @@ static void encode_frame_internal(VP9_COMP *cpi) { } vp9_zero(x->zcoeff_blk); - if (cm->frame_type != KEY_FRAME && - cpi->rc.frames_since_golden == 0 && + if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0 && !cpi->use_svc) cpi->ref_frame_flags &= (~VP9_GOLD_FLAG); @@ -4079,10 +4060,10 @@ static void encode_frame_internal(VP9_COMP *cpi) { vpx_usec_timer_start(&emr_timer); #if CONFIG_FP_MB_STATS - if (cpi->use_fp_mb_stats) { - input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm, - &cpi->twopass.this_frame_mb_stats); - } + if (cpi->use_fp_mb_stats) { + input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm, + &cpi->twopass.this_frame_mb_stats); + } #endif // If allowed, encoding tiles in parallel with one thread handling one tile. @@ -4095,8 +4076,8 @@ static void encode_frame_internal(VP9_COMP *cpi) { cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer); } - sf->skip_encode_frame = sf->skip_encode_sb ? - get_skip_encode_frame(cm, td) : 0; + sf->skip_encode_frame = + sf->skip_encode_sb ? get_skip_encode_frame(cm, td) : 0; #if 0 // Keep record of the total distortion this time around for future use @@ -4106,8 +4087,7 @@ static void encode_frame_internal(VP9_COMP *cpi) { static INTERP_FILTER get_interp_filter( const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) { - if (!is_alt_ref && - threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] && + if (!is_alt_ref && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] && threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] && threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) { return EIGHTTAP_SMOOTH; @@ -4121,6 +4101,31 @@ static INTERP_FILTER get_interp_filter( } } +static int compute_frame_aq_offset(struct VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible; + struct segmentation *const seg = &cm->seg; + + int mi_row, mi_col; + int sum_delta = 0; + int map_index = 0; + int qdelta_index; + int segment_id; + + for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { + MODE_INFO **mi_8x8 = mi_8x8_ptr; + for (mi_col = 0; mi_col < cm->mi_cols; mi_col++, mi_8x8++) { + segment_id = mi_8x8[0]->segment_id; + qdelta_index = get_segdata(seg, segment_id, SEG_LVL_ALT_Q); + sum_delta += qdelta_index; + map_index++; + } + mi_8x8_ptr += cm->mi_stride; + } + + return sum_delta / (cm->mi_rows * cm->mi_cols); +} + void vp9_encode_frame(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; @@ -4132,9 +4137,9 @@ void vp9_encode_frame(VP9_COMP *cpi) { // the other two. if (!frame_is_intra_only(cm)) { if ((cm->ref_frame_sign_bias[ALTREF_FRAME] == - cm->ref_frame_sign_bias[GOLDEN_FRAME]) || + cm->ref_frame_sign_bias[GOLDEN_FRAME]) || (cm->ref_frame_sign_bias[ALTREF_FRAME] == - cm->ref_frame_sign_bias[LAST_FRAME])) { + cm->ref_frame_sign_bias[LAST_FRAME])) { cpi->allow_comp_inter_inter = 0; } else { cpi->allow_comp_inter_inter = 1; @@ -4154,9 +4159,9 @@ void vp9_encode_frame(VP9_COMP *cpi) { // either compound, single or hybrid prediction as per whatever has // worked best for that type of frame in the past. // It also predicts whether another coding mode would have worked - // better that this coding mode. If that is the case, it remembers + // better than this coding mode. If that is the case, it remembers // that for subsequent frames. - // It does the same analysis for transform size selection also. + // It also does the same analysis for transform size selection. const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi); int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type]; int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type]; @@ -4166,10 +4171,8 @@ void vp9_encode_frame(VP9_COMP *cpi) { if (is_alt_ref || !cpi->allow_comp_inter_inter) cm->reference_mode = SINGLE_REFERENCE; else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] && - mode_thrs[COMPOUND_REFERENCE] > - mode_thrs[REFERENCE_MODE_SELECT] && - check_dual_ref_flags(cpi) && - cpi->static_mb_pct == 100) + mode_thrs[COMPOUND_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT] && + check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100) cm->reference_mode = COMPOUND_REFERENCE; else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT]) cm->reference_mode = SINGLE_REFERENCE; @@ -4243,6 +4246,12 @@ void vp9_encode_frame(VP9_COMP *cpi) { cm->reference_mode = SINGLE_REFERENCE; encode_frame_internal(cpi); } + + // If segmented AQ is enabled compute the average AQ weighting. + if (cm->seg.enabled && (cpi->oxcf.aq_mode != NO_AQ) && + (cm->seg.update_map || cm->seg.update_data)) { + cm->seg.aq_av_offset = compute_frame_aq_offset(cpi); + } } static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { @@ -4264,21 +4273,39 @@ static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) { ++counts->uv_mode[y_mode][uv_mode]; } -static void encode_superblock(VP9_COMP *cpi, ThreadData *td, - TOKENEXTRA **t, int output_enabled, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx) { +static void update_zeromv_cnt(VP9_COMP *const cpi, const MODE_INFO *const mi, + int mi_row, int mi_col, BLOCK_SIZE bsize) { + const VP9_COMMON *const cm = &cpi->common; + MV mv = mi->mv[0].as_mv; + const int bw = num_8x8_blocks_wide_lookup[bsize]; + const int bh = num_8x8_blocks_high_lookup[bsize]; + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); + const int block_index = mi_row * cm->mi_cols + mi_col; + int x, y; + for (y = 0; y < ymis; y++) + for (x = 0; x < xmis; x++) { + int map_offset = block_index + y * cm->mi_cols + x; + if (is_inter_block(mi) && mi->segment_id <= CR_SEGMENT_ID_BOOST2) { + if (abs(mv.row) < 8 && abs(mv.col) < 8) { + if (cpi->consec_zero_mv[map_offset] < 255) + cpi->consec_zero_mv[map_offset]++; + } else { + cpi->consec_zero_mv[map_offset] = 0; + } + } + } +} + +static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t, + int output_enabled, int mi_row, int mi_col, + BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { VP9_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO **mi_8x8 = xd->mi; - MODE_INFO *mi = mi_8x8[0]; - const int seg_skip = segfeature_active(&cm->seg, mi->segment_id, - SEG_LVL_SKIP); - const int mis = cm->mi_stride; - const int mi_width = num_8x8_blocks_wide_lookup[bsize]; - const int mi_height = num_8x8_blocks_high_lookup[bsize]; - + MODE_INFO *mi = xd->mi[0]; + const int seg_skip = + segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP); x->skip_recode = !x->select_tx_size && mi->sb_type >= BLOCK_8X8 && cpi->oxcf.aq_mode != COMPLEXITY_AQ && cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ && @@ -4293,24 +4320,28 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td, x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH); - if (x->skip_encode) - return; + if (x->skip_encode) return; if (!is_inter_block(mi)) { int plane; +#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH + if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && + (xd->above_mi == NULL || xd->left_mi == NULL) && + need_top_left[mi->uv_mode]) + assert(0); +#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH mi->skip = 1; for (plane = 0; plane < MAX_MB_PLANE; ++plane) - vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane); - if (output_enabled) - sum_intra_stats(td->counts, mi); - vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); + vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane, 1); + if (output_enabled) sum_intra_stats(td->counts, mi); + vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip, + VPXMAX(bsize, BLOCK_8X8)); } else { int ref; const int is_compound = has_second_ref(mi); set_ref_ptrs(cm, xd, mi->ref_frame[0], mi->ref_frame[1]); for (ref = 0; ref < 1 + is_compound; ++ref) { - YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, - mi->ref_frame[ref]); + YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mi->ref_frame[ref]); assert(cfg != NULL); vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf); @@ -4323,34 +4354,34 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td, VPXMAX(bsize, BLOCK_8X8)); vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8)); - vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); + vp9_tokenize_sb(cpi, td, t, !output_enabled, seg_skip, + VPXMAX(bsize, BLOCK_8X8)); + } + + if (seg_skip) { + assert(mi->skip); } if (output_enabled) { - if (cm->tx_mode == TX_MODE_SELECT && - mi->sb_type >= BLOCK_8X8 && - !(is_inter_block(mi) && (mi->skip || seg_skip))) { + if (cm->tx_mode == TX_MODE_SELECT && mi->sb_type >= BLOCK_8X8 && + !(is_inter_block(mi) && mi->skip)) { ++get_tx_counts(max_txsize_lookup[bsize], get_tx_size_context(xd), &td->counts->tx)[mi->tx_size]; } else { - int x, y; - TX_SIZE tx_size; // The new intra coding scheme requires no change of transform size if (is_inter_block(mi)) { - tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode], - max_txsize_lookup[bsize]); + mi->tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode], + max_txsize_lookup[bsize]); } else { - tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4; + mi->tx_size = (bsize >= BLOCK_8X8) ? mi->tx_size : TX_4X4; } - - for (y = 0; y < mi_height; y++) - for (x = 0; x < mi_width; x++) - if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows) - mi_8x8[mis * y + x]->tx_size = tx_size; } + ++td->counts->tx.tx_totals[mi->tx_size]; ++td->counts->tx.tx_totals[get_uv_tx_size(mi, &xd->plane[1])]; if (cm->seg.enabled && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) vp9_cyclic_refresh_update_sb_postencode(cpi, mi, mi_row, mi_col, bsize); + if (cpi->oxcf.pass == 0 && cpi->svc.temporal_layer_id == 0) + update_zeromv_cnt(cpi, mi, mi_row, mi_col, bsize); } } diff --git a/libs/libvpx/vp9/encoder/vp9_encodeframe.h b/libs/libvpx/vp9/encoder/vp9_encodeframe.h index 6aaa56463b..aa54947858 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodeframe.h +++ b/libs/libvpx/vp9/encoder/vp9_encodeframe.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_ENCODEFRAME_H_ #define VP9_ENCODER_VP9_ENCODEFRAME_H_ @@ -31,14 +30,14 @@ struct ThreadData; #define VAR_HIST_SMALL_CUT_OFF 45 void vp9_setup_src_planes(struct macroblock *x, - const struct yv12_buffer_config *src, - int mi_row, int mi_col); + const struct yv12_buffer_config *src, int mi_row, + int mi_col); void vp9_encode_frame(struct VP9_COMP *cpi); void vp9_init_tile_data(struct VP9_COMP *cpi); -void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td, - int tile_row, int tile_col); +void vp9_encode_tile(struct VP9_COMP *cpi, struct ThreadData *td, int tile_row, + int tile_col); void vp9_set_variance_partition_thresholds(struct VP9_COMP *cpi, int q); diff --git a/libs/libvpx/vp9/encoder/vp9_encodemb.c b/libs/libvpx/vp9/encoder/vp9_encodemb.c index 689e8c0d9c..20ebe68197 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodemb.c +++ b/libs/libvpx/vp9/encoder/vp9_encodemb.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "./vp9_rtcd.h" #include "./vpx_config.h" #include "./vpx_dsp_rtcd.h" @@ -50,37 +49,30 @@ void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { pd->dst.buf, pd->dst.stride); } -#define RDTRUNC(RM, DM, R, D) \ - (((1 << (VP9_PROB_COST_SHIFT - 1)) + (R) * (RM)) & \ - ((1 << VP9_PROB_COST_SHIFT) - 1)) - typedef struct vp9_token_state { - int rate; - int error; - int next; - int16_t token; - int16_t qc; + int64_t error; + int rate; + int16_t next; + int16_t token; + tran_low_t qc; + tran_low_t dqc; + uint8_t best_index; } vp9_token_state; -// TODO(jimbankoski): experiment to find optimal RD numbers. -static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 }; +static const int plane_rd_mult[REF_TYPES][PLANE_TYPES] = { + { 10, 6 }, { 8, 5 }, +}; -#define UPDATE_RD_COST()\ -{\ - rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0);\ - rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1);\ - if (rd_cost0 == rd_cost1) {\ - rd_cost0 = RDTRUNC(rdmult, rddiv, rate0, error0);\ - rd_cost1 = RDTRUNC(rdmult, rddiv, rate1, error1);\ - }\ -} +#define UPDATE_RD_COST() \ + { \ + rd_cost0 = RDCOST(rdmult, rddiv, rate0, error0); \ + rd_cost1 = RDCOST(rdmult, rddiv, rate1, error1); \ + } // This function is a place holder for now but may ultimately need // to scan previous tokens to work out the correct context. -static int trellis_get_coeff_context(const int16_t *scan, - const int16_t *nb, - int idx, int token, - uint8_t *token_cache) { +static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb, + int idx, int token, uint8_t *token_cache) { int bak = token_cache[scan[idx]], pt; token_cache[scan[idx]] = vp9_pt_energy_class[token]; pt = get_coef_context(nb, token_cache, idx + 1); @@ -88,14 +80,13 @@ static int trellis_get_coeff_context(const int16_t *scan, return pt; } -static int optimize_b(MACROBLOCK *mb, int plane, int block, - TX_SIZE tx_size, int ctx) { +int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size, + int ctx) { MACROBLOCKD *const xd = &mb->e_mbd; struct macroblock_plane *const p = &mb->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; const int ref = is_inter_block(xd->mi[0]); vp9_token_state tokens[1025][2]; - unsigned best_index[1025][2]; uint8_t token_cache[1024]; const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block); tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); @@ -103,16 +94,19 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, const int eob = p->eobs[block]; const PLANE_TYPE type = get_plane_type(plane); const int default_eob = 16 << (tx_size << 1); - const int mul = 1 + (tx_size == TX_32X32); - const int16_t *dequant_ptr = pd->dequant; + const int shift = (tx_size == TX_32X32); + const int16_t *const dequant_ptr = pd->dequant; const uint8_t *const band_translate = get_band_translate(tx_size); const scan_order *const so = get_scan(xd, tx_size, type, block); const int16_t *const scan = so->scan; const int16_t *const nb = so->neighbors; + const int dq_step[2] = { dequant_ptr[0] >> shift, dequant_ptr[1] >> shift }; int next = eob, sz = 0; - int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv; + const int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][type]) >> 1; + const int64_t rddiv = mb->rddiv; int64_t rd_cost0, rd_cost1; - int rate0, rate1, error0, error1; + int rate0, rate1; + int64_t error0, error1; int16_t t0, t1; EXTRABIT e0; int best, band, pt, i, final_eob; @@ -126,9 +120,6 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, assert(eob <= default_eob); /* Now set up a Viterbi trellis to evaluate alternative roundings. */ - if (!ref) - rdmult = (rdmult * 9) >> 4; - /* Initialize the sentinel node of the trellis. */ tokens[eob][0].rate = 0; tokens[eob][0].error = 0; @@ -138,8 +129,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, tokens[eob][1] = tokens[eob][0]; for (i = 0; i < eob; i++) - token_cache[scan[i]] = - vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])]; + token_cache[scan[i]] = vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])]; for (i = eob; i-- > 0;) { int base_bits, d2, dx; @@ -167,7 +157,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, /* And pick the best. */ best = rd_cost1 < rd_cost0; base_bits = vp9_get_cost(t0, e0, cat6_high_cost); - dx = mul * (dqcoeff[rc] - coeff[rc]); + dx = (dqcoeff[rc] - coeff[rc]) * (1 << shift); #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { dx >>= xd->bd - 8; @@ -179,15 +169,16 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, tokens[i][0].next = next; tokens[i][0].token = t0; tokens[i][0].qc = x; - best_index[i][0] = best; + tokens[i][0].dqc = dqcoeff[rc]; + tokens[i][0].best_index = best; /* Evaluate the second possibility for this state. */ rate0 = tokens[next][0].rate; rate1 = tokens[next][1].rate; - if ((abs(x) * dequant_ptr[rc != 0] > abs(coeff[rc]) * mul) && - (abs(x) * dequant_ptr[rc != 0] < abs(coeff[rc]) * mul + - dequant_ptr[rc != 0])) + if ((abs(x) * dequant_ptr[rc != 0] > (abs(coeff[rc]) << shift)) && + (abs(x) * dequant_ptr[rc != 0] < + (abs(coeff[rc]) << shift) + dequant_ptr[rc != 0])) shortcut = 1; else shortcut = 0; @@ -195,6 +186,10 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, if (shortcut) { sz = -(x < 0); x -= 2 * sz + 1; + } else { + tokens[i][1] = tokens[i][0]; + next = i; + continue; } /* Consider both possible successor states. */ @@ -245,7 +240,24 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, tokens[i][1].next = next; tokens[i][1].token = best ? t1 : t0; tokens[i][1].qc = x; - best_index[i][1] = best; + + if (x) { + tran_low_t offset = dq_step[rc != 0]; + // The 32x32 transform coefficient uses half quantization step size. + // Account for the rounding difference in the dequantized coefficeint + // value when the quantization index is dropped from an even number + // to an odd number. + if (shift & x) offset += (dequant_ptr[rc != 0] & 0x01); + + if (sz == 0) + tokens[i][1].dqc = dqcoeff[rc] - offset; + else + tokens[i][1].dqc = dqcoeff[rc] + offset; + } else { + tokens[i][1].dqc = 0; + } + + tokens[i][1].best_index = best; /* Finally, make this the new head of the trellis. */ next = i; } else { @@ -253,20 +265,21 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, * add a new trellis node, but we do need to update the costs. */ band = band_translate[i + 1]; + pt = get_coef_context(nb, token_cache, i + 1); t0 = tokens[next][0].token; t1 = tokens[next][1].token; /* Update the cost of each path if we're past the EOB token. */ if (t0 != EOB_TOKEN) { tokens[next][0].rate += - mb->token_costs[tx_size][type][ref][band][1][0][t0]; + mb->token_costs[tx_size][type][ref][band][1][pt][t0]; tokens[next][0].token = ZERO_TOKEN; } if (t1 != EOB_TOKEN) { tokens[next][1].rate += - mb->token_costs[tx_size][type][ref][band][1][0][t1]; + mb->token_costs[tx_size][type][ref][band][1][pt][t1]; tokens[next][1].token = ZERO_TOKEN; } - best_index[i][0] = best_index[i][1] = 0; + tokens[i][0].best_index = tokens[i][1].best_index = 0; /* Don't update next, because we didn't add a new node. */ } } @@ -284,20 +297,15 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, UPDATE_RD_COST(); best = rd_cost1 < rd_cost0; final_eob = -1; - memset(qcoeff, 0, sizeof(*qcoeff) * (16 << (tx_size * 2))); - memset(dqcoeff, 0, sizeof(*dqcoeff) * (16 << (tx_size * 2))); + for (i = next; i < eob; i = next) { const int x = tokens[i][best].qc; const int rc = scan[i]; - if (x) { - final_eob = i; - } - + if (x) final_eob = i; qcoeff[rc] = x; - dqcoeff[rc] = (x * dequant_ptr[rc != 0]) / mul; - + dqcoeff[rc] = tokens[i][best].dqc; next = tokens[i][best].next; - best = best_index[i][best]; + best = tokens[i][best].best_index; } final_eob++; @@ -305,9 +313,8 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, return final_eob; } -static INLINE void fdct32x32(int rd_transform, - const int16_t *src, tran_low_t *dst, - int src_stride) { +static INLINE void fdct32x32(int rd_transform, const int16_t *src, + tran_low_t *dst, int src_stride) { if (rd_transform) vpx_fdct32x32_rd(src, dst, src_stride); else @@ -324,7 +331,7 @@ static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src, } #endif // CONFIG_VP9_HIGHBITDEPTH -void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { MACROBLOCKD *const xd = &x->e_mbd; const struct macroblock_plane *const p = &x->plane[plane]; @@ -335,10 +342,8 @@ void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); uint16_t *const eob = &p->eobs[block]; const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - int i, j; const int16_t *src_diff; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - src_diff = &p->src_diff[4 * (j * diff_stride + i)]; + src_diff = &p->src_diff[4 * (row * diff_stride + col)]; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { @@ -347,33 +352,31 @@ void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vp9_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, - qcoeff, dqcoeff, pd->dequant, - eob, scan_order->scan, - scan_order->iscan); + qcoeff, dqcoeff, pd->dequant, eob, + scan_order->scan, scan_order->iscan); break; case TX_16X16: vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); vp9_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_8X8: vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); vp9_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; - default: - assert(0); + default: assert(0); } return; } @@ -390,31 +393,26 @@ void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, case TX_16X16: vpx_fdct16x16(src_diff, coeff, diff_stride); vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant, + eob, scan_order->scan, scan_order->iscan); break; case TX_8X8: - vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64, - x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block, + p->zbin, p->round_fp, p->quant_fp, p->quant_shift, + qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, - p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); + p->quant_fp, p->quant_shift, qcoeff, dqcoeff, pd->dequant, + eob, scan_order->scan, scan_order->iscan); break; + default: assert(0); break; } } -void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { MACROBLOCKD *const xd = &x->e_mbd; const struct macroblock_plane *const p = &x->plane[plane]; @@ -424,12 +422,8 @@ void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); uint16_t *const eob = &p->eobs[block]; const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - int i, j; const int16_t *src_diff; - - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - src_diff = &p->src_diff[4 * (j * diff_stride + i)]; - + src_diff = &p->src_diff[4 * (row * diff_stride + col)]; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { switch (tx_size) { @@ -442,23 +436,22 @@ void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, case TX_16X16: vpx_highbd_fdct16x16_1(src_diff, coeff, diff_stride); vpx_highbd_quantize_dc(coeff, 256, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0], + eob); break; case TX_8X8: vpx_highbd_fdct8x8_1(src_diff, coeff, diff_stride); vpx_highbd_quantize_dc(coeff, 64, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0], + eob); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vpx_highbd_quantize_dc(coeff, 16, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + p->quant_fp[0], qcoeff, dqcoeff, pd->dequant[0], + eob); break; - default: - assert(0); + default: assert(0); } return; } @@ -467,35 +460,29 @@ void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, switch (tx_size) { case TX_32X32: vpx_fdct32x32_1(src_diff, coeff, diff_stride); - vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + vpx_quantize_dc_32x32(coeff, x->skip_block, p->round, p->quant_fp[0], + qcoeff, dqcoeff, pd->dequant[0], eob); break; case TX_16X16: vpx_fdct16x16_1(src_diff, coeff, diff_stride); - vpx_quantize_dc(coeff, 256, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + vpx_quantize_dc(coeff, 256, x->skip_block, p->round, p->quant_fp[0], + qcoeff, dqcoeff, pd->dequant[0], eob); break; case TX_8X8: vpx_fdct8x8_1(src_diff, coeff, diff_stride); - vpx_quantize_dc(coeff, 64, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); + vpx_quantize_dc(coeff, 64, x->skip_block, p->round, p->quant_fp[0], + qcoeff, dqcoeff, pd->dequant[0], eob); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); - vpx_quantize_dc(coeff, 16, x->skip_block, p->round, - p->quant_fp[0], qcoeff, dqcoeff, - pd->dequant[0], eob); - break; - default: - assert(0); + vpx_quantize_dc(coeff, 16, x->skip_block, p->round, p->quant_fp[0], + qcoeff, dqcoeff, pd->dequant[0], eob); break; + default: assert(0); break; } } -void vp9_xform_quant(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) { MACROBLOCKD *const xd = &x->e_mbd; const struct macroblock_plane *const p = &x->plane[plane]; @@ -506,44 +493,41 @@ void vp9_xform_quant(MACROBLOCK *x, int plane, int block, tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); uint16_t *const eob = &p->eobs[block]; const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; - int i, j; const int16_t *src_diff; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - src_diff = &p->src_diff[4 * (j * diff_stride + i)]; + src_diff = &p->src_diff[4 * (row * diff_stride + col)]; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - switch (tx_size) { + switch (tx_size) { case TX_32X32: highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, - dqcoeff, pd->dequant, eob, - scan_order->scan, scan_order->iscan); + dqcoeff, pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_16X16: vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_8X8: vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; - default: - assert(0); + default: assert(0); } return; } @@ -559,47 +543,39 @@ void vp9_xform_quant(MACROBLOCK *x, int plane, int block, break; case TX_16X16: vpx_fdct16x16(src_diff, coeff, diff_stride); - vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, + vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, scan_order->iscan); break; case TX_8X8: vpx_fdct8x8(src_diff, coeff, diff_stride); - vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, + vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); - vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, + vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, scan_order->iscan); break; - default: - assert(0); - break; + default: assert(0); break; } } -static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { +static void encode_block(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct encode_b_args *const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; - struct optimize_ctx *const ctx = args->ctx; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - int i, j; uint8_t *dst; ENTROPY_CONTEXT *a, *l; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; - a = &ctx->ta[plane][i]; - l = &ctx->tl[plane][j]; + dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col]; + a = &args->ta[col]; + l = &args->tl[row]; // TODO(jingning): per transformed block zero forcing only enabled for // luma component. will integrate chroma components as well. @@ -618,17 +594,17 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, *a = *l = 0; return; } else { - vp9_xform_quant_fp(x, plane, block, plane_bsize, tx_size); + vp9_xform_quant_fp(x, plane, block, row, col, plane_bsize, tx_size); } } else { if (max_txsize_lookup[plane_bsize] == tx_size) { int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1)); if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) { // full forward transform and quantization - vp9_xform_quant(x, plane, block, plane_bsize, tx_size); + vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size); } else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) { // fast path forward transform and quantization - vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size); + vp9_xform_quant_dc(x, plane, block, row, col, plane_bsize, tx_size); } else { // skip forward transform p->eobs[block] = 0; @@ -636,47 +612,44 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, return; } } else { - vp9_xform_quant(x, plane, block, plane_bsize, tx_size); + vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size); } } } if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { const int ctx = combine_entropy_contexts(*a, *l); - *a = *l = optimize_b(x, plane, block, tx_size, ctx) > 0; + *a = *l = vp9_optimize_b(x, plane, block, tx_size, ctx) > 0; } else { *a = *l = p->eobs[block] > 0; } - if (p->eobs[block]) - *(args->skip) = 0; + if (p->eobs[block]) *(args->skip) = 0; - if (x->skip_encode || p->eobs[block] == 0) - return; + if (x->skip_encode || p->eobs[block] == 0) return; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { switch (tx_size) { case TX_32X32: - vp9_highbd_idct32x32_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); + vp9_highbd_idct32x32_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], + xd->bd); break; case TX_16X16: - vp9_highbd_idct16x16_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); + vp9_highbd_idct16x16_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], + xd->bd); break; case TX_8X8: - vp9_highbd_idct8x8_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); + vp9_highbd_idct8x8_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], + xd->bd); break; case TX_4X4: // this is like vp9_short_idct4x4 but has a special case around eob<=1 // which is significant (not just an optimization) for the lossless // case. - x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, - p->eobs[block], xd->bd); + x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], + xd->bd); break; - default: - assert(0 && "Invalid transform size"); + default: assert(0 && "Invalid transform size"); } return; } @@ -698,31 +671,28 @@ static void encode_block(int plane, int block, BLOCK_SIZE plane_bsize, // case. x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); break; - default: - assert(0 && "Invalid transform size"); - break; + default: assert(0 && "Invalid transform size"); break; } } -static void encode_block_pass1(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { +static void encode_block_pass1(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + void *arg) { MACROBLOCK *const x = (MACROBLOCK *)arg; MACROBLOCKD *const xd = &x->e_mbd; struct macroblock_plane *const p = &x->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane]; tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); - int i, j; uint8_t *dst; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - dst = &pd->dst.buf[4 * j * pd->dst.stride + 4 * i]; + dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col]; - vp9_xform_quant(x, plane, block, plane_bsize, tx_size); + vp9_xform_quant(x, plane, block, row, col, plane_bsize, tx_size); if (p->eobs[block] > 0) { #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], xd->bd); - return; + x->highbd_itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block], xd->bd); + return; } #endif // CONFIG_VP9_HIGHBITDEPTH x->itxm_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]); @@ -739,33 +709,37 @@ void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) { MACROBLOCKD *const xd = &x->e_mbd; struct optimize_ctx ctx; MODE_INFO *mi = xd->mi[0]; - struct encode_b_args arg = {x, &ctx, &mi->skip}; + struct encode_b_args arg = { x, 1, NULL, NULL, &mi->skip }; int plane; mi->skip = 1; - if (x->skip) - return; + if (x->skip) return; for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - if (!x->skip_recode) - vp9_subtract_plane(x, bsize, plane); + if (!x->skip_recode) vp9_subtract_plane(x, bsize, plane); if (x->optimize && (!x->skip_recode || !x->skip_optimize)) { - const struct macroblockd_plane* const pd = &xd->plane[plane]; + const struct macroblockd_plane *const pd = &xd->plane[plane]; const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd) : mi->tx_size; - vp9_get_entropy_contexts(bsize, tx_size, pd, - ctx.ta[plane], ctx.tl[plane]); + vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], + ctx.tl[plane]); + arg.enable_coeff_opt = 1; + } else { + arg.enable_coeff_opt = 0; } + arg.ta = ctx.ta[plane]; + arg.tl = ctx.tl[plane]; vp9_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block, &arg); } } -void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct encode_b_args* const args = arg; +void vp9_encode_block_intra(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + void *arg) { + struct encode_b_args *const args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; @@ -784,11 +758,17 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, uint16_t *eob = &p->eobs[block]; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; - int i, j; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); - dst = &pd->dst.buf[4 * (j * dst_stride + i)]; - src = &p->src.buf[4 * (j * src_stride + i)]; - src_diff = &p->src_diff[4 * (j * diff_stride + i)]; + ENTROPY_CONTEXT *a = NULL; + ENTROPY_CONTEXT *l = NULL; + int entropy_ctx = 0; + dst = &pd->dst.buf[4 * (row * dst_stride + col)]; + src = &p->src.buf[4 * (row * src_stride + col)]; + src_diff = &p->src_diff[4 * (row * diff_stride + col)]; + if (args->enable_coeff_opt) { + a = &args->ta[col]; + l = &args->tl[row]; + entropy_ctx = combine_entropy_contexts(*a, *l); + } if (tx_size == TX_4X4) { tx_type = get_tx_type_4x4(get_plane_type(plane), xd, block); @@ -805,16 +785,16 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, } vp9_predict_intra_block(xd, bwl, tx_size, mode, x->skip_encode ? src : dst, - x->skip_encode ? src_stride : dst_stride, - dst, dst_stride, i, j, plane); + x->skip_encode ? src_stride : dst_stride, dst, + dst_stride, col, row, plane); #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { switch (tx_size) { case TX_32X32: if (!x->skip_recode) { - vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); + vpx_highbd_subtract_block(32, 32, src_diff, diff_stride, src, + src_stride, dst, dst_stride, xd->bd); highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vpx_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, @@ -827,34 +807,34 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, break; case TX_16X16: if (!x->skip_recode) { - vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); + vpx_highbd_subtract_block(16, 16, src_diff, diff_stride, src, + src_stride, dst, dst_stride, xd->bd); if (tx_type == DCT_DCT) vpx_highbd_fdct16x16(src_diff, coeff, diff_stride); else vp9_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type); vpx_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) { - vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, - *eob, xd->bd); + vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob, + xd->bd); } break; case TX_8X8: if (!x->skip_recode) { - vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); + vpx_highbd_subtract_block(8, 8, src_diff, diff_stride, src, + src_stride, dst, dst_stride, xd->bd); if (tx_type == DCT_DCT) vpx_highbd_fdct8x8(src_diff, coeff, diff_stride); else vp9_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type); vpx_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) { vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob, @@ -863,16 +843,16 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, break; case TX_4X4: if (!x->skip_recode) { - vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, - src, src_stride, dst, dst_stride, xd->bd); + vpx_highbd_subtract_block(4, 4, src_diff, diff_stride, src, + src_stride, dst, dst_stride, xd->bd); if (tx_type != DCT_DCT) vp9_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type); else x->fwd_txm4x4(src_diff, coeff, diff_stride); vpx_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); } if (!x->skip_encode && *eob) { @@ -886,12 +866,9 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, } } break; - default: - assert(0); - return; + default: assert(0); return; } - if (*eob) - *(args->skip) = 0; + if (*eob) *(args->skip) = 0; return; } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -899,57 +876,65 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, switch (tx_size) { case TX_32X32: if (!x->skip_recode) { - vpx_subtract_block(32, 32, src_diff, diff_stride, - src, src_stride, dst, dst_stride); + vpx_subtract_block(32, 32, src_diff, diff_stride, src, src_stride, dst, + dst_stride); fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride); vpx_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan, scan_order->iscan); } + if (args->enable_coeff_opt && !x->skip_recode) { + *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0; + } if (!x->skip_encode && *eob) vp9_idct32x32_add(dqcoeff, dst, dst_stride, *eob); break; case TX_16X16: if (!x->skip_recode) { - vpx_subtract_block(16, 16, src_diff, diff_stride, - src, src_stride, dst, dst_stride); + vpx_subtract_block(16, 16, src_diff, diff_stride, src, src_stride, dst, + dst_stride); vp9_fht16x16(src_diff, coeff, diff_stride, tx_type); - vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, - p->quant, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); + vpx_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round, p->quant, + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, + scan_order->scan, scan_order->iscan); + } + if (args->enable_coeff_opt && !x->skip_recode) { + *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0; } if (!x->skip_encode && *eob) vp9_iht16x16_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_8X8: if (!x->skip_recode) { - vpx_subtract_block(8, 8, src_diff, diff_stride, - src, src_stride, dst, dst_stride); + vpx_subtract_block(8, 8, src_diff, diff_stride, src, src_stride, dst, + dst_stride); vp9_fht8x8(src_diff, coeff, diff_stride, tx_type); vpx_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant, - p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, + scan_order->scan, scan_order->iscan); + } + if (args->enable_coeff_opt && !x->skip_recode) { + *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0; } if (!x->skip_encode && *eob) vp9_iht8x8_add(tx_type, dqcoeff, dst, dst_stride, *eob); break; case TX_4X4: if (!x->skip_recode) { - vpx_subtract_block(4, 4, src_diff, diff_stride, - src, src_stride, dst, dst_stride); + vpx_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst, + dst_stride); if (tx_type != DCT_DCT) vp9_fht4x4(src_diff, coeff, diff_stride, tx_type); else x->fwd_txm4x4(src_diff, coeff, diff_stride); vpx_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant, - p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, scan_order->scan, - scan_order->iscan); + p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob, + scan_order->scan, scan_order->iscan); + } + if (args->enable_coeff_opt && !x->skip_recode) { + *a = *l = vp9_optimize_b(x, plane, block, tx_size, entropy_ctx) > 0; } - if (!x->skip_encode && *eob) { if (tx_type == DCT_DCT) // this is like vp9_short_idct4x4 but has a special case around eob<=1 @@ -960,17 +945,27 @@ void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, vp9_iht4x4_16_add(dqcoeff, dst, dst_stride, tx_type); } break; - default: - assert(0); - break; + default: assert(0); break; } - if (*eob) - *(args->skip) = 0; + if (*eob) *(args->skip) = 0; } -void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { +void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane, + int enable_optimize_b) { const MACROBLOCKD *const xd = &x->e_mbd; - struct encode_b_args arg = {x, NULL, &xd->mi[0]->skip}; + struct optimize_ctx ctx; + struct encode_b_args arg = { x, enable_optimize_b, ctx.ta[plane], + ctx.tl[plane], &xd->mi[0]->skip }; + + if (enable_optimize_b && x->optimize && + (!x->skip_recode || !x->skip_optimize)) { + const struct macroblockd_plane *const pd = &xd->plane[plane]; + const TX_SIZE tx_size = + plane ? get_uv_tx_size(xd->mi[0], pd) : xd->mi[0]->tx_size; + vp9_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]); + } else { + arg.enable_coeff_opt = 0; + } vp9_foreach_transformed_block_in_plane(xd, bsize, plane, vp9_encode_block_intra, &arg); diff --git a/libs/libvpx/vp9/encoder/vp9_encodemb.h b/libs/libvpx/vp9/encoder/vp9_encodemb.h index 97df8a66be..cf943bedfd 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodemb.h +++ b/libs/libvpx/vp9/encoder/vp9_encodemb.h @@ -20,24 +20,29 @@ extern "C" { struct encode_b_args { MACROBLOCK *x; - struct optimize_ctx *ctx; + int enable_coeff_opt; + ENTROPY_CONTEXT *ta; + ENTROPY_CONTEXT *tl; int8_t *skip; }; +int vp9_optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size, + int ctx); void vp9_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize); void vp9_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize); -void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant_fp(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size); -void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant_dc(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size); -void vp9_xform_quant(MACROBLOCK *x, int plane, int block, +void vp9_xform_quant(MACROBLOCK *x, int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size); void vp9_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); -void vp9_encode_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg); +void vp9_encode_block_intra(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg); -void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane); +void vp9_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane, + int enable_optimize_b); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/encoder/vp9_encodemv.c b/libs/libvpx/vp9/encoder/vp9_encodemv.c index 8f4d80cbbd..874a8e4b98 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodemv.c +++ b/libs/libvpx/vp9/encoder/vp9_encodemv.c @@ -21,24 +21,22 @@ static struct vp9_token mv_joint_encodings[MV_JOINTS]; static struct vp9_token mv_class_encodings[MV_CLASSES]; static struct vp9_token mv_fp_encodings[MV_FP_SIZE]; -static struct vp9_token mv_class0_encodings[CLASS0_SIZE]; void vp9_entropy_mv_init(void) { vp9_tokens_from_tree(mv_joint_encodings, vp9_mv_joint_tree); vp9_tokens_from_tree(mv_class_encodings, vp9_mv_class_tree); - vp9_tokens_from_tree(mv_class0_encodings, vp9_mv_class0_tree); vp9_tokens_from_tree(mv_fp_encodings, vp9_mv_fp_tree); } -static void encode_mv_component(vpx_writer* w, int comp, - const nmv_component* mvcomp, int usehp) { +static void encode_mv_component(vpx_writer *w, int comp, + const nmv_component *mvcomp, int usehp) { int offset; const int sign = comp < 0; const int mag = sign ? -comp : comp; const int mv_class = vp9_get_mv_class(mag - 1, &offset); - const int d = offset >> 3; // int mv data - const int fr = (offset >> 1) & 3; // fractional mv data - const int hp = offset & 1; // high precision mv data + const int d = offset >> 3; // int mv data + const int fr = (offset >> 1) & 3; // fractional mv data + const int hp = offset & 1; // high precision mv data assert(comp != 0); @@ -51,35 +49,32 @@ static void encode_mv_component(vpx_writer* w, int comp, // Integer bits if (mv_class == MV_CLASS_0) { - vp9_write_token(w, vp9_mv_class0_tree, mvcomp->class0, - &mv_class0_encodings[d]); + vpx_write(w, d, mvcomp->class0[0]); } else { int i; const int n = mv_class + CLASS0_BITS - 1; // number of bits - for (i = 0; i < n; ++i) - vpx_write(w, (d >> i) & 1, mvcomp->bits[i]); + for (i = 0; i < n; ++i) vpx_write(w, (d >> i) & 1, mvcomp->bits[i]); } // Fractional bits vp9_write_token(w, vp9_mv_fp_tree, - mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, + mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp, &mv_fp_encodings[fr]); // High precision bit if (usehp) - vpx_write(w, hp, - mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp); + vpx_write(w, hp, mv_class == MV_CLASS_0 ? mvcomp->class0_hp : mvcomp->hp); } - static void build_nmv_component_cost_table(int *mvcost, - const nmv_component* const mvcomp, + const nmv_component *const mvcomp, int usehp) { - int i, v; int sign_cost[2], class_cost[MV_CLASSES], class0_cost[CLASS0_SIZE]; int bits_cost[MV_OFFSET_BITS][2]; int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE]; int class0_hp_cost[2], hp_cost[2]; + int i; + int c, o; sign_cost[0] = vp9_cost_zero(mvcomp->sign); sign_cost[1] = vp9_cost_one(mvcomp->sign); @@ -94,44 +89,55 @@ static void build_nmv_component_cost_table(int *mvcost, vp9_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp9_mv_fp_tree); vp9_cost_tokens(fp_cost, mvcomp->fp, vp9_mv_fp_tree); - if (usehp) { - class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp); - class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp); - hp_cost[0] = vp9_cost_zero(mvcomp->hp); - hp_cost[1] = vp9_cost_one(mvcomp->hp); - } + // Always build the hp costs to avoid an uninitialized warning from gcc + class0_hp_cost[0] = vp9_cost_zero(mvcomp->class0_hp); + class0_hp_cost[1] = vp9_cost_one(mvcomp->class0_hp); + hp_cost[0] = vp9_cost_zero(mvcomp->hp); + hp_cost[1] = vp9_cost_one(mvcomp->hp); + mvcost[0] = 0; - for (v = 1; v <= MV_MAX; ++v) { - int z, c, o, d, e, f, cost = 0; - z = v - 1; - c = vp9_get_mv_class(z, &o); - cost += class_cost[c]; - d = (o >> 3); /* int mv data */ - f = (o >> 1) & 3; /* fractional pel mv data */ - e = (o & 1); /* high precision mv data */ - if (c == MV_CLASS_0) { - cost += class0_cost[d]; - } else { - int i, b; - b = c + CLASS0_BITS - 1; /* number of bits */ - for (i = 0; i < b; ++i) - cost += bits_cost[i][((d >> i) & 1)]; - } - if (c == MV_CLASS_0) { - cost += class0_fp_cost[d][f]; - } else { - cost += fp_cost[f]; - } + // MV_CLASS_0 + for (o = 0; o < (CLASS0_SIZE << 3); ++o) { + int d, e, f; + int cost = class_cost[MV_CLASS_0]; + int v = o + 1; + d = (o >> 3); /* int mv data */ + f = (o >> 1) & 3; /* fractional pel mv data */ + cost += class0_cost[d]; + cost += class0_fp_cost[d][f]; if (usehp) { - if (c == MV_CLASS_0) { - cost += class0_hp_cost[e]; - } else { - cost += hp_cost[e]; - } + e = (o & 1); /* high precision mv data */ + cost += class0_hp_cost[e]; } mvcost[v] = cost + sign_cost[0]; mvcost[-v] = cost + sign_cost[1]; } + for (c = MV_CLASS_1; c < MV_CLASSES; ++c) { + int d; + for (d = 0; d < (1 << c); ++d) { + int f; + int whole_cost = class_cost[c]; + int b = c + CLASS0_BITS - 1; /* number of bits */ + for (i = 0; i < b; ++i) whole_cost += bits_cost[i][((d >> i) & 1)]; + for (f = 0; f < 4; ++f) { + int cost = whole_cost + fp_cost[f]; + int v = (CLASS0_SIZE << (c + 2)) + d * 8 + f * 2 /* + e */ + 1; + if (usehp) { + mvcost[v] = cost + hp_cost[0] + sign_cost[0]; + mvcost[-v] = cost + hp_cost[0] + sign_cost[1]; + if (v + 1 > MV_MAX) break; + mvcost[v + 1] = cost + hp_cost[1] + sign_cost[0]; + mvcost[-v - 1] = cost + hp_cost[1] + sign_cost[1]; + } else { + mvcost[v] = cost + sign_cost[0]; + mvcost[-v] = cost + sign_cost[1]; + if (v + 1 > MV_MAX) break; + mvcost[v + 1] = cost + sign_cost[0]; + mvcost[-v - 1] = cost + sign_cost[1]; + } + } + } + } } static int update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p, @@ -150,8 +156,8 @@ static int update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p, static void write_mv_update(const vpx_tree_index *tree, vpx_prob probs[/*n - 1*/], - const unsigned int counts[/*n - 1*/], - int n, vpx_writer *w) { + const unsigned int counts[/*n - 1*/], int n, + vpx_writer *w) { int i; unsigned int branch_ct[32][2]; @@ -201,11 +207,9 @@ void vp9_write_nmv_probs(VP9_COMMON *cm, int usehp, vpx_writer *w, } } -void vp9_encode_mv(VP9_COMP* cpi, vpx_writer* w, - const MV* mv, const MV* ref, - const nmv_context* mvctx, int usehp) { - const MV diff = {mv->row - ref->row, - mv->col - ref->col}; +void vp9_encode_mv(VP9_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref, + const nmv_context *mvctx, int usehp) { + const MV diff = { mv->row - ref->row, mv->col - ref->col }; const MV_JOINT_TYPE j = vp9_get_mv_joint(&diff); usehp = usehp && use_mv_hp(ref); @@ -225,21 +229,20 @@ void vp9_encode_mv(VP9_COMP* cpi, vpx_writer* w, } void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2], - const nmv_context* ctx, int usehp) { + const nmv_context *ctx, int usehp) { vp9_cost_tokens(mvjoint, ctx->joints, vp9_mv_joint_tree); build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp); build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp); } static void inc_mvs(const MODE_INFO *mi, const MB_MODE_INFO_EXT *mbmi_ext, - const int_mv mvs[2], - nmv_context_counts *counts) { + const int_mv mvs[2], nmv_context_counts *counts) { int i; for (i = 0; i < 1 + has_second_ref(mi); ++i) { const MV *ref = &mbmi_ext->ref_mvs[mi->ref_frame[i]][0].as_mv; - const MV diff = {mvs[i].as_mv.row - ref->row, - mvs[i].as_mv.col - ref->col}; + const MV diff = { mvs[i].as_mv.row - ref->row, + mvs[i].as_mv.col - ref->col }; vp9_inc_mv(&diff, counts); } } @@ -262,8 +265,6 @@ void vp9_update_mv_count(ThreadData *td) { } } } else { - if (mi->mode == NEWMV) - inc_mvs(mi, mbmi_ext, mi->mv, &td->counts->mv); + if (mi->mode == NEWMV) inc_mvs(mi, mbmi_ext, mi->mv, &td->counts->mv); } } - diff --git a/libs/libvpx/vp9/encoder/vp9_encodemv.h b/libs/libvpx/vp9/encoder/vp9_encodemv.h index 5fb114cc1e..ad77b8154f 100644 --- a/libs/libvpx/vp9/encoder/vp9_encodemv.h +++ b/libs/libvpx/vp9/encoder/vp9_encodemv.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_ENCODEMV_H_ #define VP9_ENCODER_VP9_ENCODEMV_H_ @@ -23,11 +22,11 @@ void vp9_entropy_mv_init(void); void vp9_write_nmv_probs(VP9_COMMON *cm, int usehp, vpx_writer *w, nmv_context_counts *const counts); -void vp9_encode_mv(VP9_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref, - const nmv_context* mvctx, int usehp); +void vp9_encode_mv(VP9_COMP *cpi, vpx_writer *w, const MV *mv, const MV *ref, + const nmv_context *mvctx, int usehp); void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2], - const nmv_context* mvctx, int usehp); + const nmv_context *mvctx, int usehp); void vp9_update_mv_count(ThreadData *td); diff --git a/libs/libvpx/vp9/encoder/vp9_encoder.c b/libs/libvpx/vp9/encoder/vp9_encoder.c index 0f4f93d9c6..e8f5ec55ab 100644 --- a/libs/libvpx/vp9/encoder/vp9_encoder.c +++ b/libs/libvpx/vp9/encoder/vp9_encoder.c @@ -16,7 +16,7 @@ #include "./vpx_config.h" #include "./vpx_dsp_rtcd.h" #include "./vpx_scale_rtcd.h" -#include "vpx/internal/vpx_psnr.h" +#include "vpx_dsp/psnr.h" #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_dsp/vpx_filter.h" #if CONFIG_INTERNAL_STATS @@ -36,6 +36,7 @@ #include "vp9/common/vp9_reconintra.h" #include "vp9/common/vp9_tile_common.h" +#include "vp9/encoder/vp9_alt_ref_aq.h" #include "vp9/encoder/vp9_aq_360.h" #include "vp9/encoder/vp9_aq_complexity.h" #include "vp9/encoder/vp9_aq_cyclicrefresh.h" @@ -45,6 +46,7 @@ #include "vp9/encoder/vp9_encodeframe.h" #include "vp9/encoder/vp9_encodemv.h" #include "vp9/encoder/vp9_encoder.h" +#include "vp9/encoder/vp9_extend.h" #include "vp9/encoder/vp9_ethread.h" #include "vp9/encoder/vp9_firstpass.h" #include "vp9/encoder/vp9_mbgraph.h" @@ -62,12 +64,12 @@ #define AM_SEGMENT_ID_INACTIVE 7 #define AM_SEGMENT_ID_ACTIVE 0 -#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv - // for altref computation. -#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision - // mv. Choose a very high value for - // now so that HIGH_PRECISION is always - // chosen. +#define ALTREF_HIGH_PRECISION_MV 1 // Whether to use high precision mv + // for altref computation. +#define HIGH_PRECISION_MV_QTHRESH 200 // Q threshold for high precision + // mv. Choose a very high value for + // now so that HIGH_PRECISION is always + // chosen. // #define OUTPUT_YUV_REC #ifdef OUTPUT_YUV_DENOISED @@ -86,6 +88,46 @@ FILE *kf_list; FILE *keyfile; #endif +#ifdef ENABLE_KF_DENOISE +// Test condition for spatial denoise of source. +static int is_spatial_denoise_enabled(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + const VP9EncoderConfig *const oxcf = &cpi->oxcf; + + return (oxcf->pass != 1) && !is_lossless_requested(&cpi->oxcf) && + frame_is_intra_only(cm); +} +#endif + +// Test for whether to calculate metrics for the frame. +static int is_psnr_calc_enabled(VP9_COMP *cpi) { + VP9_COMMON *const cm = &cpi->common; + const VP9EncoderConfig *const oxcf = &cpi->oxcf; + + return cpi->b_calculate_psnr && (oxcf->pass != 1) && cm->show_frame; +} + +/* clang-format off */ +static const Vp9LevelSpec vp9_level_defs[VP9_LEVELS] = { + { LEVEL_1, 829440, 36864, 200, 400, 2, 1, 4, 8 }, + { LEVEL_1_1, 2764800, 73728, 800, 1000, 2, 1, 4, 8 }, + { LEVEL_2, 4608000, 122880, 1800, 1500, 2, 1, 4, 8 }, + { LEVEL_2_1, 9216000, 245760, 3600, 2800, 2, 2, 4, 8 }, + { LEVEL_3, 20736000, 552960, 7200, 6000, 2, 4, 4, 8 }, + { LEVEL_3_1, 36864000, 983040, 12000, 10000, 2, 4, 4, 8 }, + { LEVEL_4, 83558400, 2228224, 18000, 16000, 4, 4, 4, 8 }, + { LEVEL_4_1, 160432128, 2228224, 30000, 18000, 4, 4, 5, 6 }, + { LEVEL_5, 311951360, 8912896, 60000, 36000, 6, 8, 6, 4 }, + { LEVEL_5_1, 588251136, 8912896, 120000, 46000, 8, 8, 10, 4 }, + // TODO(huisu): update max_cpb_size for level 5_2 ~ 6_2 when + // they are finalized (currently TBD). + { LEVEL_5_2, 1176502272, 8912896, 180000, 0, 8, 8, 10, 4 }, + { LEVEL_6, 1176502272, 35651584, 180000, 0, 8, 16, 10, 4 }, + { LEVEL_6_1, 2353004544u, 35651584, 240000, 0, 8, 16, 10, 4 }, + { LEVEL_6_2, 4706009088u, 35651584, 480000, 0, 8, 16, 10, 4 }, +}; +/* clang-format on */ + static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) { switch (mode) { case NORMAL: @@ -99,15 +141,15 @@ static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) { case THREEFIVE: *hr = 3; *hs = 5; - break; + break; case ONETWO: *hr = 1; *hs = 2; - break; + break; default: *hr = 1; *hs = 1; - assert(0); + assert(0); break; } } @@ -116,11 +158,16 @@ static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) { // so memset cannot be used, instead only inactive blocks should be reset. static void suppress_active_map(VP9_COMP *cpi) { unsigned char *const seg_map = cpi->segmentation_map; - int i; - if (cpi->active_map.enabled || cpi->active_map.update) - for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i) + + if (cpi->active_map.enabled || cpi->active_map.update) { + const int rows = cpi->common.mi_rows; + const int cols = cpi->common.mi_cols; + int i; + + for (i = 0; i < rows * cols; ++i) if (seg_map[i] == AM_SEGMENT_ID_INACTIVE) seg_map[i] = AM_SEGMENT_ID_ACTIVE; + } } static void apply_active_map(VP9_COMP *cpi) { @@ -145,8 +192,8 @@ static void apply_active_map(VP9_COMP *cpi) { vp9_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); // Setting the data to -MAX_LOOP_FILTER will result in the computed loop // filter level being zero regardless of the value of seg->abs_delta. - vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, - SEG_LVL_ALT_LF, -MAX_LOOP_FILTER); + vp9_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF, + -MAX_LOOP_FILTER); } else { vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP); vp9_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF); @@ -159,9 +206,40 @@ static void apply_active_map(VP9_COMP *cpi) { } } -int vp9_set_active_map(VP9_COMP* cpi, - unsigned char* new_map_16x16, - int rows, +static void init_level_info(Vp9LevelInfo *level_info) { + Vp9LevelStats *const level_stats = &level_info->level_stats; + Vp9LevelSpec *const level_spec = &level_info->level_spec; + + memset(level_stats, 0, sizeof(*level_stats)); + memset(level_spec, 0, sizeof(*level_spec)); + level_spec->level = LEVEL_UNKNOWN; + level_spec->min_altref_distance = INT_MAX; +} + +VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec) { + int i; + const Vp9LevelSpec *this_level; + + vpx_clear_system_state(); + + for (i = 0; i < VP9_LEVELS; ++i) { + this_level = &vp9_level_defs[i]; + if ((double)level_spec->max_luma_sample_rate * (1 + SAMPLE_RATE_GRACE_P) > + (double)this_level->max_luma_sample_rate || + level_spec->max_luma_picture_size > this_level->max_luma_picture_size || + level_spec->average_bitrate > this_level->average_bitrate || + level_spec->max_cpb_size > this_level->max_cpb_size || + level_spec->compression_ratio < this_level->compression_ratio || + level_spec->max_col_tiles > this_level->max_col_tiles || + level_spec->min_altref_distance < this_level->min_altref_distance || + level_spec->max_ref_frame_buffers > this_level->max_ref_frame_buffers) + continue; + break; + } + return (i == VP9_LEVELS) ? LEVEL_UNKNOWN : vp9_level_defs[i].level; +} + +int vp9_set_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows, int cols) { if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) { unsigned char *const active_map_8x8 = cpi->active_map.map; @@ -188,13 +266,11 @@ int vp9_set_active_map(VP9_COMP* cpi, } } -int vp9_get_active_map(VP9_COMP* cpi, - unsigned char* new_map_16x16, - int rows, +int vp9_get_active_map(VP9_COMP *cpi, unsigned char *new_map_16x16, int rows, int cols) { if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols && new_map_16x16) { - unsigned char* const seg_map_8x8 = cpi->segmentation_map; + unsigned char *const seg_map_8x8 = cpi->segmentation_map; const int mi_rows = cpi->common.mi_rows; const int mi_cols = cpi->common.mi_cols; memset(new_map_16x16, !cpi->active_map.enabled, rows * cols); @@ -237,13 +313,11 @@ static void setup_frame(VP9_COMP *cpi) { if (frame_is_intra_only(cm) || cm->error_resilient_mode) { vp9_setup_past_independence(cm); } else { - if (!cpi->use_svc) - cm->frame_context_idx = cpi->refresh_alt_ref_frame; + if (!cpi->use_svc) cm->frame_context_idx = cpi->refresh_alt_ref_frame; } if (cm->frame_type == KEY_FRAME) { - if (!is_two_pass_svc(cpi)) - cpi->refresh_golden_frame = 1; + if (!is_two_pass_svc(cpi)) cpi->refresh_golden_frame = 1; cpi->refresh_alt_ref_frame = 1; vp9_zero(cpi->interp_filter_selected); } else { @@ -272,19 +346,16 @@ static void vp9_enc_setup_mi(VP9_COMMON *cm) { static int vp9_enc_alloc_mi(VP9_COMMON *cm, int mi_size) { cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); - if (!cm->mip) - return 1; + if (!cm->mip) return 1; cm->prev_mip = vpx_calloc(mi_size, sizeof(*cm->prev_mip)); - if (!cm->prev_mip) - return 1; + if (!cm->prev_mip) return 1; cm->mi_alloc_size = mi_size; - cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->mi_grid_base) - return 1; - cm->prev_mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO*)); - if (!cm->prev_mi_grid_base) - return 1; + cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *)); + if (!cm->mi_grid_base) return 1; + cm->prev_mi_grid_base = + (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *)); + if (!cm->prev_mi_grid_base) return 1; return 0; } @@ -343,7 +414,6 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { vpx_free(cpi->tile_data); cpi->tile_data = NULL; - // Delete sementation map vpx_free(cpi->segmentation_map); cpi->segmentation_map = NULL; vpx_free(cpi->coding_context.last_frame_seg_map_copy); @@ -375,6 +445,9 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { vpx_free(cpi->active_map.map); cpi->active_map.map = NULL; + vpx_free(cpi->consec_zero_mv); + cpi->consec_zero_mv = NULL; + vp9_free_ref_frame_buffers(cm->buffer_pool); #if CONFIG_VP9_POSTPROC vp9_free_postproc_buffers(cm); @@ -385,6 +458,11 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { vpx_free_frame_buffer(&cpi->scaled_source); vpx_free_frame_buffer(&cpi->scaled_last_source); vpx_free_frame_buffer(&cpi->alt_ref_buffer); +#ifdef ENABLE_KF_DENOISE + vpx_free_frame_buffer(&cpi->raw_unscaled_source); + vpx_free_frame_buffer(&cpi->raw_scaled_source); +#endif + vp9_lookahead_destroy(cpi->lookahead); vpx_free(cpi->tile_tok[0][0]); @@ -410,6 +488,9 @@ static void dealloc_compressor_data(VP9_COMP *cpi) { memset(&cpi->svc.scaled_frames[0], 0, MAX_LAG_BUFFERS * sizeof(cpi->svc.scaled_frames[0])); + vpx_free_frame_buffer(&cpi->svc.scaled_temp); + memset(&cpi->svc.scaled_temp, 0, sizeof(cpi->svc.scaled_temp)); + vpx_free_frame_buffer(&cpi->svc.empty_frame.img); memset(&cpi->svc.empty_frame, 0, sizeof(cpi->svc.empty_frame)); @@ -424,7 +505,7 @@ static void save_coding_context(VP9_COMP *cpi) { // restored with a call to vp9_restore_coding_context. These functions are // intended for use in a re-code loop in vp9_compress_frame where the // quantizer value is adjusted between loop iterations. - vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost); + vp9_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost); memcpy(cc->nmvcosts[0], cpi->nmvcosts[0], MV_VALS * sizeof(*cpi->nmvcosts[0])); @@ -437,8 +518,8 @@ static void save_coding_context(VP9_COMP *cpi) { vp9_copy(cc->segment_pred_probs, cm->seg.pred_probs); - memcpy(cpi->coding_context.last_frame_seg_map_copy, - cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols)); + memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map, + (cm->mi_rows * cm->mi_cols)); vp9_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas); vp9_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas); @@ -463,8 +544,7 @@ static void restore_coding_context(VP9_COMP *cpi) { vp9_copy(cm->seg.pred_probs, cc->segment_pred_probs); - memcpy(cm->last_frame_seg_map, - cpi->coding_context.last_frame_seg_map_copy, + memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy, (cm->mi_rows * cm->mi_cols)); vp9_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas); @@ -516,8 +596,8 @@ static void configure_static_seg_features(VP9_COMP *cpi) { seg->update_map = 1; seg->update_data = 1; - qi_delta = vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, - cm->bit_depth); + qi_delta = + vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth); vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2); vp9_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2); @@ -538,8 +618,8 @@ static void configure_static_seg_features(VP9_COMP *cpi) { seg->update_data = 1; seg->abs_delta = SEGMENT_DELTADATA; - qi_delta = vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, - cm->bit_depth); + qi_delta = + vp9_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125, cm->bit_depth); vp9_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2); vp9_enable_segfeature(seg, 1, SEG_LVL_ALT_Q); @@ -621,16 +701,15 @@ static void alloc_raw_frame_buffers(VP9_COMP *cpi) { cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, + cm->use_highbitdepth, #endif - oxcf->lag_in_frames); + oxcf->lag_in_frames); if (!cpi->lookahead) vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate lag buffers"); // TODO(agrange) Check if ARF is enabled and skip allocation if not. - if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, - oxcf->width, oxcf->height, + if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, @@ -643,8 +722,7 @@ static void alloc_raw_frame_buffers(VP9_COMP *cpi) { static void alloc_util_frame_buffers(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; - if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, - cm->width, cm->height, + if (vpx_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, @@ -654,8 +732,7 @@ static void alloc_util_frame_buffers(VP9_COMP *cpi) { vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate last frame buffer"); - if (vpx_realloc_frame_buffer(&cpi->scaled_source, - cm->width, cm->height, + if (vpx_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, @@ -665,8 +742,23 @@ static void alloc_util_frame_buffers(VP9_COMP *cpi) { vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled source buffer"); - if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, - cm->width, cm->height, + // For 1 pass cbr: allocate scaled_frame that may be used as an intermediate + // buffer for a 2 stage down-sampling: two stages of 1:2 down-sampling for a + // target of 1/4x1/4. + if (is_one_pass_cbr_svc(cpi) && !cpi->svc.scaled_temp_is_alloc) { + cpi->svc.scaled_temp_is_alloc = 1; + if (vpx_realloc_frame_buffer( + &cpi->svc.scaled_temp, cm->width >> 1, cm->height >> 1, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL, NULL, NULL)) + vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate scaled_frame for svc "); + } + + if (vpx_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, @@ -675,16 +767,35 @@ static void alloc_util_frame_buffers(VP9_COMP *cpi) { NULL, NULL, NULL)) vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate scaled last source buffer"); -} +#ifdef ENABLE_KF_DENOISE + if (vpx_realloc_frame_buffer(&cpi->raw_unscaled_source, cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, + NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate unscaled raw source frame buffer"); + if (vpx_realloc_frame_buffer(&cpi->raw_scaled_source, cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, +#if CONFIG_VP9_HIGHBITDEPTH + cm->use_highbitdepth, +#endif + VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, + NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate scaled raw source frame buffer"); +#endif +} static int alloc_context_buffers_ext(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; int mi_size = cm->mi_cols * cm->mi_rows; cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base)); - if (!cpi->mbmi_ext_base) - return 1; + if (!cpi->mbmi_ext_base) return 1; return 0; } @@ -701,7 +812,7 @@ static void alloc_compressor_data(VP9_COMP *cpi) { { unsigned int tokens = get_token_alloc(cm->mb_rows, cm->mb_cols); CHECK_MEM_ERROR(cm, cpi->tile_tok[0][0], - vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0]))); + vpx_calloc(tokens, sizeof(*cpi->tile_tok[0][0]))); } vp9_setup_pc_tree(&cpi->common, &cpi->td); @@ -718,14 +829,13 @@ static void set_tile_limits(VP9_COMP *cpi) { int min_log2_tile_cols, max_log2_tile_cols; vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); - if (is_two_pass_svc(cpi) && - (cpi->svc.encode_empty_frame_state == ENCODING || - cpi->svc.number_spatial_layers > 1)) { + if (is_two_pass_svc(cpi) && (cpi->svc.encode_empty_frame_state == ENCODING || + cpi->svc.number_spatial_layers > 1)) { cm->log2_tile_cols = 0; cm->log2_tile_rows = 0; } else { - cm->log2_tile_cols = clamp(cpi->oxcf.tile_columns, - min_log2_tile_cols, max_log2_tile_cols); + cm->log2_tile_cols = + clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols); cm->log2_tile_rows = cpi->oxcf.tile_rows; } } @@ -744,8 +854,7 @@ static void update_frame_size(VP9_COMP *cpi) { set_tile_limits(cpi); if (is_two_pass_svc(cpi)) { - if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, - cm->width, cm->height, + if (vpx_realloc_frame_buffer(&cpi->alt_ref_buffer, cm->width, cm->height, cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, @@ -768,7 +877,6 @@ static void init_config(struct VP9_COMP *cpi, VP9EncoderConfig *oxcf) { cpi->oxcf = *oxcf; cpi->framerate = oxcf->init_framerate; - cm->profile = oxcf->profile; cm->bit_depth = oxcf->bit_depth; #if CONFIG_VP9_HIGHBITDEPTH @@ -777,6 +885,9 @@ static void init_config(struct VP9_COMP *cpi, VP9EncoderConfig *oxcf) { cm->color_space = oxcf->color_space; cm->color_range = oxcf->color_range; + cpi->target_level = oxcf->target_level; + cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX; + cm->width = oxcf->width; cm->height = oxcf->height; alloc_compressor_data(cpi); @@ -817,153 +928,120 @@ static void set_rc_buffer_sizes(RATE_CONTROL *rc, const int64_t maximum = oxcf->maximum_buffer_size_ms; rc->starting_buffer_level = starting * bandwidth / 1000; - rc->optimal_buffer_level = (optimal == 0) ? bandwidth / 8 - : optimal * bandwidth / 1000; - rc->maximum_buffer_size = (maximum == 0) ? bandwidth / 8 - : maximum * bandwidth / 1000; + rc->optimal_buffer_level = + (optimal == 0) ? bandwidth / 8 : optimal * bandwidth / 1000; + rc->maximum_buffer_size = + (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000; } #if CONFIG_VP9_HIGHBITDEPTH #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \ - cpi->fn_ptr[BT].sdf = SDF; \ - cpi->fn_ptr[BT].sdaf = SDAF; \ - cpi->fn_ptr[BT].vf = VF; \ - cpi->fn_ptr[BT].svf = SVF; \ - cpi->fn_ptr[BT].svaf = SVAF; \ - cpi->fn_ptr[BT].sdx3f = SDX3F; \ - cpi->fn_ptr[BT].sdx8f = SDX8F; \ - cpi->fn_ptr[BT].sdx4df = SDX4DF; + cpi->fn_ptr[BT].sdf = SDF; \ + cpi->fn_ptr[BT].sdaf = SDAF; \ + cpi->fn_ptr[BT].vf = VF; \ + cpi->fn_ptr[BT].svf = SVF; \ + cpi->fn_ptr[BT].svaf = SVAF; \ + cpi->fn_ptr[BT].sdx3f = SDX3F; \ + cpi->fn_ptr[BT].sdx8f = SDX8F; \ + cpi->fn_ptr[BT].sdx4df = SDX4DF; -#define MAKE_BFP_SAD_WRAPPER(fnname) \ -static unsigned int fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \ -} \ -static unsigned int fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \ -} \ -static unsigned int fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \ -} +#define MAKE_BFP_SAD_WRAPPER(fnname) \ + static unsigned int fnname##_bits8(const uint8_t *src_ptr, \ + int source_stride, \ + const uint8_t *ref_ptr, int ref_stride) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride); \ + } \ + static unsigned int fnname##_bits10( \ + const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \ + int ref_stride) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 2; \ + } \ + static unsigned int fnname##_bits12( \ + const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \ + int ref_stride) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride) >> 4; \ + } -#define MAKE_BFP_SADAVG_WRAPPER(fnname) static unsigned int \ -fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \ -} \ -static unsigned int fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \ - second_pred) >> 2; \ -} \ -static unsigned int fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - return fnname(src_ptr, source_stride, ref_ptr, ref_stride, \ - second_pred) >> 4; \ -} +#define MAKE_BFP_SADAVG_WRAPPER(fnname) \ + static unsigned int fnname##_bits8( \ + const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \ + int ref_stride, const uint8_t *second_pred) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred); \ + } \ + static unsigned int fnname##_bits10( \ + const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \ + int ref_stride, const uint8_t *second_pred) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \ + 2; \ + } \ + static unsigned int fnname##_bits12( \ + const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, \ + int ref_stride, const uint8_t *second_pred) { \ + return fnname(src_ptr, source_stride, ref_ptr, ref_stride, second_pred) >> \ + 4; \ + } -#define MAKE_BFP_SAD3_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 3; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 3; i++) \ - sad_array[i] >>= 4; \ -} +#define MAKE_BFP_SAD3_WRAPPER(fnname) \ + static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + } \ + static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 3; i++) sad_array[i] >>= 2; \ + } \ + static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 3; i++) sad_array[i] >>= 4; \ + } -#define MAKE_BFP_SAD8_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 8; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 8; i++) \ - sad_array[i] >>= 4; \ -} -#define MAKE_BFP_SAD4D_WRAPPER(fnname) \ -static void fnname##_bits8(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ -} \ -static void fnname##_bits10(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 4; i++) \ - sad_array[i] >>= 2; \ -} \ -static void fnname##_bits12(const uint8_t *src_ptr, \ - int source_stride, \ - const uint8_t* const ref_ptr[], \ - int ref_stride, \ - unsigned int *sad_array) { \ - int i; \ - fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ - for (i = 0; i < 4; i++) \ - sad_array[i] >>= 4; \ -} +#define MAKE_BFP_SAD8_WRAPPER(fnname) \ + static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + } \ + static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 8; i++) sad_array[i] >>= 2; \ + } \ + static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *ref_ptr, int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 8; i++) sad_array[i] >>= 4; \ + } +#define MAKE_BFP_SAD4D_WRAPPER(fnname) \ + static void fnname##_bits8(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *const ref_ptr[], int ref_stride, \ + unsigned int *sad_array) { \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + } \ + static void fnname##_bits10(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *const ref_ptr[], int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 4; i++) sad_array[i] >>= 2; \ + } \ + static void fnname##_bits12(const uint8_t *src_ptr, int source_stride, \ + const uint8_t *const ref_ptr[], int ref_stride, \ + unsigned int *sad_array) { \ + int i; \ + fnname(src_ptr, source_stride, ref_ptr, ref_stride, sad_array); \ + for (i = 0; i < 4; i++) sad_array[i] >>= 4; \ + } MAKE_BFP_SAD_WRAPPER(vpx_highbd_sad32x16) MAKE_BFP_SADAVG_WRAPPER(vpx_highbd_sad32x16_avg) @@ -1021,409 +1099,267 @@ MAKE_BFP_SAD3_WRAPPER(vpx_highbd_sad4x4x3) MAKE_BFP_SAD8_WRAPPER(vpx_highbd_sad4x4x8) MAKE_BFP_SAD4D_WRAPPER(vpx_highbd_sad4x4x4d) -static void highbd_set_var_fns(VP9_COMP *const cpi) { +static void highbd_set_var_fns(VP9_COMP *const cpi) { VP9_COMMON *const cm = &cpi->common; if (cm->use_highbitdepth) { switch (cm->bit_depth) { case VPX_BITS_8: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits8, - vpx_highbd_sad32x16_avg_bits8, - vpx_highbd_8_variance32x16, + HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits8, + vpx_highbd_sad32x16_avg_bits8, vpx_highbd_8_variance32x16, vpx_highbd_8_sub_pixel_variance32x16, - vpx_highbd_8_sub_pixel_avg_variance32x16, - NULL, - NULL, + vpx_highbd_8_sub_pixel_avg_variance32x16, NULL, NULL, vpx_highbd_sad32x16x4d_bits8) - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits8, - vpx_highbd_sad16x32_avg_bits8, - vpx_highbd_8_variance16x32, + HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits8, + vpx_highbd_sad16x32_avg_bits8, vpx_highbd_8_variance16x32, vpx_highbd_8_sub_pixel_variance16x32, - vpx_highbd_8_sub_pixel_avg_variance16x32, - NULL, - NULL, + vpx_highbd_8_sub_pixel_avg_variance16x32, NULL, NULL, vpx_highbd_sad16x32x4d_bits8) - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits8, - vpx_highbd_sad64x32_avg_bits8, - vpx_highbd_8_variance64x32, + HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits8, + vpx_highbd_sad64x32_avg_bits8, vpx_highbd_8_variance64x32, vpx_highbd_8_sub_pixel_variance64x32, - vpx_highbd_8_sub_pixel_avg_variance64x32, - NULL, - NULL, + vpx_highbd_8_sub_pixel_avg_variance64x32, NULL, NULL, vpx_highbd_sad64x32x4d_bits8) - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits8, - vpx_highbd_sad32x64_avg_bits8, - vpx_highbd_8_variance32x64, + HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits8, + vpx_highbd_sad32x64_avg_bits8, vpx_highbd_8_variance32x64, vpx_highbd_8_sub_pixel_variance32x64, - vpx_highbd_8_sub_pixel_avg_variance32x64, - NULL, - NULL, + vpx_highbd_8_sub_pixel_avg_variance32x64, NULL, NULL, vpx_highbd_sad32x64x4d_bits8) - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits8, - vpx_highbd_sad32x32_avg_bits8, - vpx_highbd_8_variance32x32, + HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits8, + vpx_highbd_sad32x32_avg_bits8, vpx_highbd_8_variance32x32, vpx_highbd_8_sub_pixel_variance32x32, vpx_highbd_8_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits8, - vpx_highbd_sad32x32x8_bits8, + vpx_highbd_sad32x32x3_bits8, vpx_highbd_sad32x32x8_bits8, vpx_highbd_sad32x32x4d_bits8) - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits8, - vpx_highbd_sad64x64_avg_bits8, - vpx_highbd_8_variance64x64, + HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits8, + vpx_highbd_sad64x64_avg_bits8, vpx_highbd_8_variance64x64, vpx_highbd_8_sub_pixel_variance64x64, vpx_highbd_8_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits8, - vpx_highbd_sad64x64x8_bits8, + vpx_highbd_sad64x64x3_bits8, vpx_highbd_sad64x64x8_bits8, vpx_highbd_sad64x64x4d_bits8) - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits8, - vpx_highbd_sad16x16_avg_bits8, - vpx_highbd_8_variance16x16, + HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits8, + vpx_highbd_sad16x16_avg_bits8, vpx_highbd_8_variance16x16, vpx_highbd_8_sub_pixel_variance16x16, vpx_highbd_8_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits8, - vpx_highbd_sad16x16x8_bits8, + vpx_highbd_sad16x16x3_bits8, vpx_highbd_sad16x16x8_bits8, vpx_highbd_sad16x16x4d_bits8) - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits8, - vpx_highbd_sad16x8_avg_bits8, - vpx_highbd_8_variance16x8, - vpx_highbd_8_sub_pixel_variance16x8, - vpx_highbd_8_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits8, - vpx_highbd_sad16x8x8_bits8, - vpx_highbd_sad16x8x4d_bits8) + HIGHBD_BFP( + BLOCK_16X8, vpx_highbd_sad16x8_bits8, vpx_highbd_sad16x8_avg_bits8, + vpx_highbd_8_variance16x8, vpx_highbd_8_sub_pixel_variance16x8, + vpx_highbd_8_sub_pixel_avg_variance16x8, vpx_highbd_sad16x8x3_bits8, + vpx_highbd_sad16x8x8_bits8, vpx_highbd_sad16x8x4d_bits8) - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits8, - vpx_highbd_sad8x16_avg_bits8, - vpx_highbd_8_variance8x16, - vpx_highbd_8_sub_pixel_variance8x16, - vpx_highbd_8_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits8, - vpx_highbd_sad8x16x8_bits8, - vpx_highbd_sad8x16x4d_bits8) + HIGHBD_BFP( + BLOCK_8X16, vpx_highbd_sad8x16_bits8, vpx_highbd_sad8x16_avg_bits8, + vpx_highbd_8_variance8x16, vpx_highbd_8_sub_pixel_variance8x16, + vpx_highbd_8_sub_pixel_avg_variance8x16, vpx_highbd_sad8x16x3_bits8, + vpx_highbd_sad8x16x8_bits8, vpx_highbd_sad8x16x4d_bits8) - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits8, - vpx_highbd_sad8x8_avg_bits8, - vpx_highbd_8_variance8x8, - vpx_highbd_8_sub_pixel_variance8x8, - vpx_highbd_8_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits8, - vpx_highbd_sad8x8x8_bits8, - vpx_highbd_sad8x8x4d_bits8) + HIGHBD_BFP( + BLOCK_8X8, vpx_highbd_sad8x8_bits8, vpx_highbd_sad8x8_avg_bits8, + vpx_highbd_8_variance8x8, vpx_highbd_8_sub_pixel_variance8x8, + vpx_highbd_8_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits8, + vpx_highbd_sad8x8x8_bits8, vpx_highbd_sad8x8x4d_bits8) - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits8, - vpx_highbd_sad8x4_avg_bits8, - vpx_highbd_8_variance8x4, + HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits8, + vpx_highbd_sad8x4_avg_bits8, vpx_highbd_8_variance8x4, vpx_highbd_8_sub_pixel_variance8x4, - vpx_highbd_8_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits8, - vpx_highbd_sad8x4x4d_bits8) + vpx_highbd_8_sub_pixel_avg_variance8x4, NULL, + vpx_highbd_sad8x4x8_bits8, vpx_highbd_sad8x4x4d_bits8) - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits8, - vpx_highbd_sad4x8_avg_bits8, - vpx_highbd_8_variance4x8, + HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits8, + vpx_highbd_sad4x8_avg_bits8, vpx_highbd_8_variance4x8, vpx_highbd_8_sub_pixel_variance4x8, - vpx_highbd_8_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits8, - vpx_highbd_sad4x8x4d_bits8) + vpx_highbd_8_sub_pixel_avg_variance4x8, NULL, + vpx_highbd_sad4x8x8_bits8, vpx_highbd_sad4x8x4d_bits8) - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits8, - vpx_highbd_sad4x4_avg_bits8, - vpx_highbd_8_variance4x4, - vpx_highbd_8_sub_pixel_variance4x4, - vpx_highbd_8_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits8, - vpx_highbd_sad4x4x8_bits8, - vpx_highbd_sad4x4x4d_bits8) + HIGHBD_BFP( + BLOCK_4X4, vpx_highbd_sad4x4_bits8, vpx_highbd_sad4x4_avg_bits8, + vpx_highbd_8_variance4x4, vpx_highbd_8_sub_pixel_variance4x4, + vpx_highbd_8_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits8, + vpx_highbd_sad4x4x8_bits8, vpx_highbd_sad4x4x4d_bits8) break; case VPX_BITS_10: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits10, - vpx_highbd_sad32x16_avg_bits10, - vpx_highbd_10_variance32x16, + HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits10, + vpx_highbd_sad32x16_avg_bits10, vpx_highbd_10_variance32x16, vpx_highbd_10_sub_pixel_variance32x16, - vpx_highbd_10_sub_pixel_avg_variance32x16, - NULL, - NULL, + vpx_highbd_10_sub_pixel_avg_variance32x16, NULL, NULL, vpx_highbd_sad32x16x4d_bits10) - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits10, - vpx_highbd_sad16x32_avg_bits10, - vpx_highbd_10_variance16x32, + HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits10, + vpx_highbd_sad16x32_avg_bits10, vpx_highbd_10_variance16x32, vpx_highbd_10_sub_pixel_variance16x32, - vpx_highbd_10_sub_pixel_avg_variance16x32, - NULL, - NULL, + vpx_highbd_10_sub_pixel_avg_variance16x32, NULL, NULL, vpx_highbd_sad16x32x4d_bits10) - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits10, - vpx_highbd_sad64x32_avg_bits10, - vpx_highbd_10_variance64x32, + HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits10, + vpx_highbd_sad64x32_avg_bits10, vpx_highbd_10_variance64x32, vpx_highbd_10_sub_pixel_variance64x32, - vpx_highbd_10_sub_pixel_avg_variance64x32, - NULL, - NULL, + vpx_highbd_10_sub_pixel_avg_variance64x32, NULL, NULL, vpx_highbd_sad64x32x4d_bits10) - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits10, - vpx_highbd_sad32x64_avg_bits10, - vpx_highbd_10_variance32x64, + HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits10, + vpx_highbd_sad32x64_avg_bits10, vpx_highbd_10_variance32x64, vpx_highbd_10_sub_pixel_variance32x64, - vpx_highbd_10_sub_pixel_avg_variance32x64, - NULL, - NULL, + vpx_highbd_10_sub_pixel_avg_variance32x64, NULL, NULL, vpx_highbd_sad32x64x4d_bits10) - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits10, - vpx_highbd_sad32x32_avg_bits10, - vpx_highbd_10_variance32x32, + HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits10, + vpx_highbd_sad32x32_avg_bits10, vpx_highbd_10_variance32x32, vpx_highbd_10_sub_pixel_variance32x32, vpx_highbd_10_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits10, - vpx_highbd_sad32x32x8_bits10, + vpx_highbd_sad32x32x3_bits10, vpx_highbd_sad32x32x8_bits10, vpx_highbd_sad32x32x4d_bits10) - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits10, - vpx_highbd_sad64x64_avg_bits10, - vpx_highbd_10_variance64x64, + HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits10, + vpx_highbd_sad64x64_avg_bits10, vpx_highbd_10_variance64x64, vpx_highbd_10_sub_pixel_variance64x64, vpx_highbd_10_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits10, - vpx_highbd_sad64x64x8_bits10, + vpx_highbd_sad64x64x3_bits10, vpx_highbd_sad64x64x8_bits10, vpx_highbd_sad64x64x4d_bits10) - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits10, - vpx_highbd_sad16x16_avg_bits10, - vpx_highbd_10_variance16x16, + HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits10, + vpx_highbd_sad16x16_avg_bits10, vpx_highbd_10_variance16x16, vpx_highbd_10_sub_pixel_variance16x16, vpx_highbd_10_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits10, - vpx_highbd_sad16x16x8_bits10, + vpx_highbd_sad16x16x3_bits10, vpx_highbd_sad16x16x8_bits10, vpx_highbd_sad16x16x4d_bits10) - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits10, - vpx_highbd_sad16x8_avg_bits10, - vpx_highbd_10_variance16x8, + HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits10, + vpx_highbd_sad16x8_avg_bits10, vpx_highbd_10_variance16x8, vpx_highbd_10_sub_pixel_variance16x8, vpx_highbd_10_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits10, - vpx_highbd_sad16x8x8_bits10, + vpx_highbd_sad16x8x3_bits10, vpx_highbd_sad16x8x8_bits10, vpx_highbd_sad16x8x4d_bits10) - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits10, - vpx_highbd_sad8x16_avg_bits10, - vpx_highbd_10_variance8x16, + HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits10, + vpx_highbd_sad8x16_avg_bits10, vpx_highbd_10_variance8x16, vpx_highbd_10_sub_pixel_variance8x16, vpx_highbd_10_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits10, - vpx_highbd_sad8x16x8_bits10, + vpx_highbd_sad8x16x3_bits10, vpx_highbd_sad8x16x8_bits10, vpx_highbd_sad8x16x4d_bits10) - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits10, - vpx_highbd_sad8x8_avg_bits10, - vpx_highbd_10_variance8x8, - vpx_highbd_10_sub_pixel_variance8x8, - vpx_highbd_10_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits10, - vpx_highbd_sad8x8x8_bits10, - vpx_highbd_sad8x8x4d_bits10) + HIGHBD_BFP( + BLOCK_8X8, vpx_highbd_sad8x8_bits10, vpx_highbd_sad8x8_avg_bits10, + vpx_highbd_10_variance8x8, vpx_highbd_10_sub_pixel_variance8x8, + vpx_highbd_10_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits10, + vpx_highbd_sad8x8x8_bits10, vpx_highbd_sad8x8x4d_bits10) - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits10, - vpx_highbd_sad8x4_avg_bits10, - vpx_highbd_10_variance8x4, + HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits10, + vpx_highbd_sad8x4_avg_bits10, vpx_highbd_10_variance8x4, vpx_highbd_10_sub_pixel_variance8x4, - vpx_highbd_10_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits10, - vpx_highbd_sad8x4x4d_bits10) + vpx_highbd_10_sub_pixel_avg_variance8x4, NULL, + vpx_highbd_sad8x4x8_bits10, vpx_highbd_sad8x4x4d_bits10) - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits10, - vpx_highbd_sad4x8_avg_bits10, - vpx_highbd_10_variance4x8, + HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits10, + vpx_highbd_sad4x8_avg_bits10, vpx_highbd_10_variance4x8, vpx_highbd_10_sub_pixel_variance4x8, - vpx_highbd_10_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits10, - vpx_highbd_sad4x8x4d_bits10) + vpx_highbd_10_sub_pixel_avg_variance4x8, NULL, + vpx_highbd_sad4x8x8_bits10, vpx_highbd_sad4x8x4d_bits10) - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits10, - vpx_highbd_sad4x4_avg_bits10, - vpx_highbd_10_variance4x4, - vpx_highbd_10_sub_pixel_variance4x4, - vpx_highbd_10_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits10, - vpx_highbd_sad4x4x8_bits10, - vpx_highbd_sad4x4x4d_bits10) + HIGHBD_BFP( + BLOCK_4X4, vpx_highbd_sad4x4_bits10, vpx_highbd_sad4x4_avg_bits10, + vpx_highbd_10_variance4x4, vpx_highbd_10_sub_pixel_variance4x4, + vpx_highbd_10_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits10, + vpx_highbd_sad4x4x8_bits10, vpx_highbd_sad4x4x4d_bits10) break; case VPX_BITS_12: - HIGHBD_BFP(BLOCK_32X16, - vpx_highbd_sad32x16_bits12, - vpx_highbd_sad32x16_avg_bits12, - vpx_highbd_12_variance32x16, + HIGHBD_BFP(BLOCK_32X16, vpx_highbd_sad32x16_bits12, + vpx_highbd_sad32x16_avg_bits12, vpx_highbd_12_variance32x16, vpx_highbd_12_sub_pixel_variance32x16, - vpx_highbd_12_sub_pixel_avg_variance32x16, - NULL, - NULL, + vpx_highbd_12_sub_pixel_avg_variance32x16, NULL, NULL, vpx_highbd_sad32x16x4d_bits12) - HIGHBD_BFP(BLOCK_16X32, - vpx_highbd_sad16x32_bits12, - vpx_highbd_sad16x32_avg_bits12, - vpx_highbd_12_variance16x32, + HIGHBD_BFP(BLOCK_16X32, vpx_highbd_sad16x32_bits12, + vpx_highbd_sad16x32_avg_bits12, vpx_highbd_12_variance16x32, vpx_highbd_12_sub_pixel_variance16x32, - vpx_highbd_12_sub_pixel_avg_variance16x32, - NULL, - NULL, + vpx_highbd_12_sub_pixel_avg_variance16x32, NULL, NULL, vpx_highbd_sad16x32x4d_bits12) - HIGHBD_BFP(BLOCK_64X32, - vpx_highbd_sad64x32_bits12, - vpx_highbd_sad64x32_avg_bits12, - vpx_highbd_12_variance64x32, + HIGHBD_BFP(BLOCK_64X32, vpx_highbd_sad64x32_bits12, + vpx_highbd_sad64x32_avg_bits12, vpx_highbd_12_variance64x32, vpx_highbd_12_sub_pixel_variance64x32, - vpx_highbd_12_sub_pixel_avg_variance64x32, - NULL, - NULL, + vpx_highbd_12_sub_pixel_avg_variance64x32, NULL, NULL, vpx_highbd_sad64x32x4d_bits12) - HIGHBD_BFP(BLOCK_32X64, - vpx_highbd_sad32x64_bits12, - vpx_highbd_sad32x64_avg_bits12, - vpx_highbd_12_variance32x64, + HIGHBD_BFP(BLOCK_32X64, vpx_highbd_sad32x64_bits12, + vpx_highbd_sad32x64_avg_bits12, vpx_highbd_12_variance32x64, vpx_highbd_12_sub_pixel_variance32x64, - vpx_highbd_12_sub_pixel_avg_variance32x64, - NULL, - NULL, + vpx_highbd_12_sub_pixel_avg_variance32x64, NULL, NULL, vpx_highbd_sad32x64x4d_bits12) - HIGHBD_BFP(BLOCK_32X32, - vpx_highbd_sad32x32_bits12, - vpx_highbd_sad32x32_avg_bits12, - vpx_highbd_12_variance32x32, + HIGHBD_BFP(BLOCK_32X32, vpx_highbd_sad32x32_bits12, + vpx_highbd_sad32x32_avg_bits12, vpx_highbd_12_variance32x32, vpx_highbd_12_sub_pixel_variance32x32, vpx_highbd_12_sub_pixel_avg_variance32x32, - vpx_highbd_sad32x32x3_bits12, - vpx_highbd_sad32x32x8_bits12, + vpx_highbd_sad32x32x3_bits12, vpx_highbd_sad32x32x8_bits12, vpx_highbd_sad32x32x4d_bits12) - HIGHBD_BFP(BLOCK_64X64, - vpx_highbd_sad64x64_bits12, - vpx_highbd_sad64x64_avg_bits12, - vpx_highbd_12_variance64x64, + HIGHBD_BFP(BLOCK_64X64, vpx_highbd_sad64x64_bits12, + vpx_highbd_sad64x64_avg_bits12, vpx_highbd_12_variance64x64, vpx_highbd_12_sub_pixel_variance64x64, vpx_highbd_12_sub_pixel_avg_variance64x64, - vpx_highbd_sad64x64x3_bits12, - vpx_highbd_sad64x64x8_bits12, + vpx_highbd_sad64x64x3_bits12, vpx_highbd_sad64x64x8_bits12, vpx_highbd_sad64x64x4d_bits12) - HIGHBD_BFP(BLOCK_16X16, - vpx_highbd_sad16x16_bits12, - vpx_highbd_sad16x16_avg_bits12, - vpx_highbd_12_variance16x16, + HIGHBD_BFP(BLOCK_16X16, vpx_highbd_sad16x16_bits12, + vpx_highbd_sad16x16_avg_bits12, vpx_highbd_12_variance16x16, vpx_highbd_12_sub_pixel_variance16x16, vpx_highbd_12_sub_pixel_avg_variance16x16, - vpx_highbd_sad16x16x3_bits12, - vpx_highbd_sad16x16x8_bits12, + vpx_highbd_sad16x16x3_bits12, vpx_highbd_sad16x16x8_bits12, vpx_highbd_sad16x16x4d_bits12) - HIGHBD_BFP(BLOCK_16X8, - vpx_highbd_sad16x8_bits12, - vpx_highbd_sad16x8_avg_bits12, - vpx_highbd_12_variance16x8, + HIGHBD_BFP(BLOCK_16X8, vpx_highbd_sad16x8_bits12, + vpx_highbd_sad16x8_avg_bits12, vpx_highbd_12_variance16x8, vpx_highbd_12_sub_pixel_variance16x8, vpx_highbd_12_sub_pixel_avg_variance16x8, - vpx_highbd_sad16x8x3_bits12, - vpx_highbd_sad16x8x8_bits12, + vpx_highbd_sad16x8x3_bits12, vpx_highbd_sad16x8x8_bits12, vpx_highbd_sad16x8x4d_bits12) - HIGHBD_BFP(BLOCK_8X16, - vpx_highbd_sad8x16_bits12, - vpx_highbd_sad8x16_avg_bits12, - vpx_highbd_12_variance8x16, + HIGHBD_BFP(BLOCK_8X16, vpx_highbd_sad8x16_bits12, + vpx_highbd_sad8x16_avg_bits12, vpx_highbd_12_variance8x16, vpx_highbd_12_sub_pixel_variance8x16, vpx_highbd_12_sub_pixel_avg_variance8x16, - vpx_highbd_sad8x16x3_bits12, - vpx_highbd_sad8x16x8_bits12, + vpx_highbd_sad8x16x3_bits12, vpx_highbd_sad8x16x8_bits12, vpx_highbd_sad8x16x4d_bits12) - HIGHBD_BFP(BLOCK_8X8, - vpx_highbd_sad8x8_bits12, - vpx_highbd_sad8x8_avg_bits12, - vpx_highbd_12_variance8x8, - vpx_highbd_12_sub_pixel_variance8x8, - vpx_highbd_12_sub_pixel_avg_variance8x8, - vpx_highbd_sad8x8x3_bits12, - vpx_highbd_sad8x8x8_bits12, - vpx_highbd_sad8x8x4d_bits12) + HIGHBD_BFP( + BLOCK_8X8, vpx_highbd_sad8x8_bits12, vpx_highbd_sad8x8_avg_bits12, + vpx_highbd_12_variance8x8, vpx_highbd_12_sub_pixel_variance8x8, + vpx_highbd_12_sub_pixel_avg_variance8x8, vpx_highbd_sad8x8x3_bits12, + vpx_highbd_sad8x8x8_bits12, vpx_highbd_sad8x8x4d_bits12) - HIGHBD_BFP(BLOCK_8X4, - vpx_highbd_sad8x4_bits12, - vpx_highbd_sad8x4_avg_bits12, - vpx_highbd_12_variance8x4, + HIGHBD_BFP(BLOCK_8X4, vpx_highbd_sad8x4_bits12, + vpx_highbd_sad8x4_avg_bits12, vpx_highbd_12_variance8x4, vpx_highbd_12_sub_pixel_variance8x4, - vpx_highbd_12_sub_pixel_avg_variance8x4, - NULL, - vpx_highbd_sad8x4x8_bits12, - vpx_highbd_sad8x4x4d_bits12) + vpx_highbd_12_sub_pixel_avg_variance8x4, NULL, + vpx_highbd_sad8x4x8_bits12, vpx_highbd_sad8x4x4d_bits12) - HIGHBD_BFP(BLOCK_4X8, - vpx_highbd_sad4x8_bits12, - vpx_highbd_sad4x8_avg_bits12, - vpx_highbd_12_variance4x8, + HIGHBD_BFP(BLOCK_4X8, vpx_highbd_sad4x8_bits12, + vpx_highbd_sad4x8_avg_bits12, vpx_highbd_12_variance4x8, vpx_highbd_12_sub_pixel_variance4x8, - vpx_highbd_12_sub_pixel_avg_variance4x8, - NULL, - vpx_highbd_sad4x8x8_bits12, - vpx_highbd_sad4x8x4d_bits12) + vpx_highbd_12_sub_pixel_avg_variance4x8, NULL, + vpx_highbd_sad4x8x8_bits12, vpx_highbd_sad4x8x4d_bits12) - HIGHBD_BFP(BLOCK_4X4, - vpx_highbd_sad4x4_bits12, - vpx_highbd_sad4x4_avg_bits12, - vpx_highbd_12_variance4x4, - vpx_highbd_12_sub_pixel_variance4x4, - vpx_highbd_12_sub_pixel_avg_variance4x4, - vpx_highbd_sad4x4x3_bits12, - vpx_highbd_sad4x4x8_bits12, - vpx_highbd_sad4x4x4d_bits12) + HIGHBD_BFP( + BLOCK_4X4, vpx_highbd_sad4x4_bits12, vpx_highbd_sad4x4_avg_bits12, + vpx_highbd_12_variance4x4, vpx_highbd_12_sub_pixel_variance4x4, + vpx_highbd_12_sub_pixel_avg_variance4x4, vpx_highbd_sad4x4x3_bits12, + vpx_highbd_sad4x4x8_bits12, vpx_highbd_sad4x4x4d_bits12) break; default: - assert(0 && "cm->bit_depth should be VPX_BITS_8, " - "VPX_BITS_10 or VPX_BITS_12"); + assert(0 && + "cm->bit_depth should be VPX_BITS_8, " + "VPX_BITS_10 or VPX_BITS_12"); } } } @@ -1438,8 +1374,7 @@ static void realloc_segmentation_maps(VP9_COMP *cpi) { vpx_calloc(cm->mi_rows * cm->mi_cols, 1)); // Create a map used for cyclic background refresh. - if (cpi->cyclic_refresh) - vp9_cyclic_refresh_free(cpi->cyclic_refresh); + if (cpi->cyclic_refresh) vp9_cyclic_refresh_free(cpi->cyclic_refresh); CHECK_MEM_ERROR(cm, cpi->cyclic_refresh, vp9_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols)); @@ -1461,12 +1396,14 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) { int last_w = cpi->oxcf.width; int last_h = cpi->oxcf.height; - if (cm->profile != oxcf->profile) - cm->profile = oxcf->profile; + if (cm->profile != oxcf->profile) cm->profile = oxcf->profile; cm->bit_depth = oxcf->bit_depth; cm->color_space = oxcf->color_space; cm->color_range = oxcf->color_range; + cpi->target_level = oxcf->target_level; + cpi->keep_level_stats = oxcf->target_level != LEVEL_MAX; + if (cm->profile <= PROFILE_1) assert(cm->bit_depth == VPX_BITS_8); else @@ -1539,19 +1476,21 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) { cpi->initial_width = cpi->initial_height = 0; cpi->external_resize = 0; } else if (cm->mi_alloc_size == new_mi_size && - (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) { - vp9_alloc_loop_filter(cm); + (cpi->oxcf.width > last_w || cpi->oxcf.height > last_h)) { + vp9_alloc_loop_filter(cm); } } update_frame_size(cpi); - if ((last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) && - cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) - vp9_cyclic_refresh_reset_resize(cpi); + if (last_w != cpi->oxcf.width || last_h != cpi->oxcf.height) { + memset(cpi->consec_zero_mv, 0, + cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv)); + if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) + vp9_cyclic_refresh_reset_resize(cpi); + } - if ((cpi->svc.number_temporal_layers > 1 && - cpi->oxcf.rc_mode == VPX_CBR) || + if ((cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) || ((cpi->svc.number_temporal_layers > 1 || cpi->svc.number_spatial_layers > 1) && cpi->oxcf.pass != 1)) { @@ -1581,7 +1520,7 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) { #ifndef M_LOG2_E #define M_LOG2_E 0.693147180559945309417 #endif -#define log2f(x) (log (x) / (float) M_LOG2_E) +#define log2f(x) (log(x) / (float)M_LOG2_E) /*********************************************************************** * Read before modifying 'cal_nmvjointsadcost' or 'cal_nmvsadcosts' * @@ -1646,15 +1585,13 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) { } while (++i <= MV_MAX); } - VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, BufferPool *const pool) { unsigned int i; VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP)); VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL; - if (!cm) - return NULL; + if (!cm) return NULL; vp9_zero(*cpi); @@ -1669,11 +1606,10 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, cm->free_mi = vp9_enc_free_mi; cm->setup_mi = vp9_enc_setup_mi; - CHECK_MEM_ERROR(cm, cm->fc, - (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); - CHECK_MEM_ERROR(cm, cm->frame_contexts, - (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, - sizeof(*cm->frame_contexts))); + CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); + CHECK_MEM_ERROR( + cm, cm->frame_contexts, + (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts))); cpi->use_svc = 0; cpi->resize_state = 0; @@ -1683,7 +1619,7 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, cpi->use_skin_detection = 0; cpi->common.buffer_pool = pool; - cpi->rc.high_source_sad = 0; + cpi->force_update_segmentation = 0; init_config(cpi, oxcf); vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc); @@ -1694,6 +1630,12 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, realloc_segmentation_maps(cpi); + CHECK_MEM_ERROR(cm, cpi->alt_ref_aq, vp9_alt_ref_aq_create()); + + CHECK_MEM_ERROR( + cm, cpi->consec_zero_mv, + vpx_calloc(cm->mi_rows * cm->mi_cols, sizeof(*cpi->consec_zero_mv))); + CHECK_MEM_ERROR(cm, cpi->nmvcosts[0], vpx_calloc(MV_VALS, sizeof(*cpi->nmvcosts[0]))); CHECK_MEM_ERROR(cm, cpi->nmvcosts[1], @@ -1711,11 +1653,11 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, CHECK_MEM_ERROR(cm, cpi->nmvsadcosts_hp[1], vpx_calloc(MV_VALS, sizeof(*cpi->nmvsadcosts_hp[1]))); - for (i = 0; i < (sizeof(cpi->mbgraph_stats) / - sizeof(cpi->mbgraph_stats[0])); i++) { - CHECK_MEM_ERROR(cm, cpi->mbgraph_stats[i].mb_stats, - vpx_calloc(cm->MBs * - sizeof(*cpi->mbgraph_stats[i].mb_stats), 1)); + for (i = 0; i < (sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0])); + i++) { + CHECK_MEM_ERROR( + cm, cpi->mbgraph_stats[i].mb_stats, + vpx_calloc(cm->MBs * sizeof(*cpi->mbgraph_stats[i].mb_stats), 1)); } #if CONFIG_FP_MB_STATS @@ -1733,8 +1675,10 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, cpi->multi_arf_last_grp_enabled = 0; cpi->b_calculate_psnr = CONFIG_INTERNAL_STATS; + + init_level_info(&cpi->level_info); + #if CONFIG_INTERNAL_STATS - cpi->b_calculate_ssimg = 0; cpi->b_calculate_blockiness = 1; cpi->b_calculate_consistency = 1; cpi->total_inconsistency = 0; @@ -1758,9 +1702,6 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, cpi->summedp_weights = 0; } - if (cpi->b_calculate_ssimg) { - cpi->ssimg.worst= 100.0; - } cpi->fastssim.worst = 100.0; cpi->psnrhvs.worst = 100.0; @@ -1771,8 +1712,9 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, } if (cpi->b_calculate_consistency) { - cpi->ssim_vars = vpx_malloc(sizeof(*cpi->ssim_vars) * - 4 * cpi->common.mi_rows * cpi->common.mi_cols); + CHECK_MEM_ERROR(cm, cpi->ssim_vars, + vpx_malloc(sizeof(*cpi->ssim_vars) * 4 * + cpi->common.mi_rows * cpi->common.mi_cols)); cpi->worst_consistency = 100.0; } @@ -1822,10 +1764,10 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, const size_t packet_sz = sizeof(FIRSTPASS_STATS); const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz); - if (cpi->svc.number_spatial_layers > 1 - || cpi->svc.number_temporal_layers > 1) { + if (cpi->svc.number_spatial_layers > 1 || + cpi->svc.number_temporal_layers > 1) { FIRSTPASS_STATS *const stats = oxcf->two_pass_stats_in.buf; - FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = {0}; + FIRSTPASS_STATS *stats_copy[VPX_SS_MAX_LAYERS] = { 0 }; int i; for (i = 0; i < oxcf->ss_number_layers; ++i) { @@ -1843,16 +1785,16 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, vpx_malloc(lc->rc_twopass_stats_in.sz)); lc->twopass.stats_in_start = lc->rc_twopass_stats_in.buf; lc->twopass.stats_in = lc->twopass.stats_in_start; - lc->twopass.stats_in_end = lc->twopass.stats_in_start - + packets_in_layer - 1; + lc->twopass.stats_in_end = + lc->twopass.stats_in_start + packets_in_layer - 1; stats_copy[layer_id] = lc->rc_twopass_stats_in.buf; } } for (i = 0; i < packets; ++i) { const int layer_id = (int)stats[i].spatial_layer_id; - if (layer_id >= 0 && layer_id < oxcf->ss_number_layers - && stats_copy[layer_id] != NULL) { + if (layer_id >= 0 && layer_id < oxcf->ss_number_layers && + stats_copy[layer_id] != NULL) { *stats_copy[layer_id] = stats[i]; ++stats_copy[layer_id]; } @@ -1885,79 +1827,71 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, vp9_set_speed_features_framesize_dependent(cpi); // Allocate memory to store variances for a frame. - CHECK_MEM_ERROR(cm, cpi->source_diff_var, - vpx_calloc(cm->MBs, sizeof(diff))); + CHECK_MEM_ERROR(cm, cpi->source_diff_var, vpx_calloc(cm->MBs, sizeof(diff))); cpi->source_var_thresh = 0; cpi->frames_till_next_var_check = 0; -#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF)\ - cpi->fn_ptr[BT].sdf = SDF; \ - cpi->fn_ptr[BT].sdaf = SDAF; \ - cpi->fn_ptr[BT].vf = VF; \ - cpi->fn_ptr[BT].svf = SVF; \ - cpi->fn_ptr[BT].svaf = SVAF; \ - cpi->fn_ptr[BT].sdx3f = SDX3F; \ - cpi->fn_ptr[BT].sdx8f = SDX8F; \ - cpi->fn_ptr[BT].sdx4df = SDX4DF; +#define BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \ + cpi->fn_ptr[BT].sdf = SDF; \ + cpi->fn_ptr[BT].sdaf = SDAF; \ + cpi->fn_ptr[BT].vf = VF; \ + cpi->fn_ptr[BT].svf = SVF; \ + cpi->fn_ptr[BT].svaf = SVAF; \ + cpi->fn_ptr[BT].sdx3f = SDX3F; \ + cpi->fn_ptr[BT].sdx8f = SDX8F; \ + cpi->fn_ptr[BT].sdx4df = SDX4DF; - BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, - vpx_variance32x16, vpx_sub_pixel_variance32x16, - vpx_sub_pixel_avg_variance32x16, NULL, NULL, vpx_sad32x16x4d) + BFP(BLOCK_32X16, vpx_sad32x16, vpx_sad32x16_avg, vpx_variance32x16, + vpx_sub_pixel_variance32x16, vpx_sub_pixel_avg_variance32x16, NULL, NULL, + vpx_sad32x16x4d) - BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, - vpx_variance16x32, vpx_sub_pixel_variance16x32, - vpx_sub_pixel_avg_variance16x32, NULL, NULL, vpx_sad16x32x4d) + BFP(BLOCK_16X32, vpx_sad16x32, vpx_sad16x32_avg, vpx_variance16x32, + vpx_sub_pixel_variance16x32, vpx_sub_pixel_avg_variance16x32, NULL, NULL, + vpx_sad16x32x4d) - BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, - vpx_variance64x32, vpx_sub_pixel_variance64x32, - vpx_sub_pixel_avg_variance64x32, NULL, NULL, vpx_sad64x32x4d) + BFP(BLOCK_64X32, vpx_sad64x32, vpx_sad64x32_avg, vpx_variance64x32, + vpx_sub_pixel_variance64x32, vpx_sub_pixel_avg_variance64x32, NULL, NULL, + vpx_sad64x32x4d) - BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, - vpx_variance32x64, vpx_sub_pixel_variance32x64, - vpx_sub_pixel_avg_variance32x64, NULL, NULL, vpx_sad32x64x4d) + BFP(BLOCK_32X64, vpx_sad32x64, vpx_sad32x64_avg, vpx_variance32x64, + vpx_sub_pixel_variance32x64, vpx_sub_pixel_avg_variance32x64, NULL, NULL, + vpx_sad32x64x4d) - BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, - vpx_variance32x32, vpx_sub_pixel_variance32x32, - vpx_sub_pixel_avg_variance32x32, vpx_sad32x32x3, vpx_sad32x32x8, - vpx_sad32x32x4d) + BFP(BLOCK_32X32, vpx_sad32x32, vpx_sad32x32_avg, vpx_variance32x32, + vpx_sub_pixel_variance32x32, vpx_sub_pixel_avg_variance32x32, + vpx_sad32x32x3, vpx_sad32x32x8, vpx_sad32x32x4d) - BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, - vpx_variance64x64, vpx_sub_pixel_variance64x64, - vpx_sub_pixel_avg_variance64x64, vpx_sad64x64x3, vpx_sad64x64x8, - vpx_sad64x64x4d) + BFP(BLOCK_64X64, vpx_sad64x64, vpx_sad64x64_avg, vpx_variance64x64, + vpx_sub_pixel_variance64x64, vpx_sub_pixel_avg_variance64x64, + vpx_sad64x64x3, vpx_sad64x64x8, vpx_sad64x64x4d) - BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, - vpx_variance16x16, vpx_sub_pixel_variance16x16, - vpx_sub_pixel_avg_variance16x16, vpx_sad16x16x3, vpx_sad16x16x8, - vpx_sad16x16x4d) + BFP(BLOCK_16X16, vpx_sad16x16, vpx_sad16x16_avg, vpx_variance16x16, + vpx_sub_pixel_variance16x16, vpx_sub_pixel_avg_variance16x16, + vpx_sad16x16x3, vpx_sad16x16x8, vpx_sad16x16x4d) - BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, - vpx_variance16x8, vpx_sub_pixel_variance16x8, - vpx_sub_pixel_avg_variance16x8, - vpx_sad16x8x3, vpx_sad16x8x8, vpx_sad16x8x4d) + BFP(BLOCK_16X8, vpx_sad16x8, vpx_sad16x8_avg, vpx_variance16x8, + vpx_sub_pixel_variance16x8, vpx_sub_pixel_avg_variance16x8, vpx_sad16x8x3, + vpx_sad16x8x8, vpx_sad16x8x4d) - BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, - vpx_variance8x16, vpx_sub_pixel_variance8x16, - vpx_sub_pixel_avg_variance8x16, - vpx_sad8x16x3, vpx_sad8x16x8, vpx_sad8x16x4d) + BFP(BLOCK_8X16, vpx_sad8x16, vpx_sad8x16_avg, vpx_variance8x16, + vpx_sub_pixel_variance8x16, vpx_sub_pixel_avg_variance8x16, vpx_sad8x16x3, + vpx_sad8x16x8, vpx_sad8x16x4d) - BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, - vpx_variance8x8, vpx_sub_pixel_variance8x8, - vpx_sub_pixel_avg_variance8x8, - vpx_sad8x8x3, vpx_sad8x8x8, vpx_sad8x8x4d) + BFP(BLOCK_8X8, vpx_sad8x8, vpx_sad8x8_avg, vpx_variance8x8, + vpx_sub_pixel_variance8x8, vpx_sub_pixel_avg_variance8x8, vpx_sad8x8x3, + vpx_sad8x8x8, vpx_sad8x8x4d) - BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, - vpx_variance8x4, vpx_sub_pixel_variance8x4, - vpx_sub_pixel_avg_variance8x4, NULL, vpx_sad8x4x8, vpx_sad8x4x4d) + BFP(BLOCK_8X4, vpx_sad8x4, vpx_sad8x4_avg, vpx_variance8x4, + vpx_sub_pixel_variance8x4, vpx_sub_pixel_avg_variance8x4, NULL, + vpx_sad8x4x8, vpx_sad8x4x4d) - BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, - vpx_variance4x8, vpx_sub_pixel_variance4x8, - vpx_sub_pixel_avg_variance4x8, NULL, vpx_sad4x8x8, vpx_sad4x8x4d) + BFP(BLOCK_4X8, vpx_sad4x8, vpx_sad4x8_avg, vpx_variance4x8, + vpx_sub_pixel_variance4x8, vpx_sub_pixel_avg_variance4x8, NULL, + vpx_sad4x8x8, vpx_sad4x8x4d) - BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, - vpx_variance4x4, vpx_sub_pixel_variance4x4, - vpx_sub_pixel_avg_variance4x4, - vpx_sad4x4x3, vpx_sad4x4x8, vpx_sad4x4x4d) + BFP(BLOCK_4X4, vpx_sad4x4, vpx_sad4x4_avg, vpx_variance4x4, + vpx_sub_pixel_variance4x4, vpx_sub_pixel_avg_variance4x4, vpx_sad4x4x3, + vpx_sad4x4x8, vpx_sad4x4x4d) #if CONFIG_VP9_HIGHBITDEPTH highbd_set_var_fns(cpi); @@ -1978,8 +1912,7 @@ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf, } #if CONFIG_INTERNAL_STATS -#define SNPRINT(H, T) \ - snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T)) +#define SNPRINT(H, T) snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T)) #define SNPRINT2(H, T, V) \ snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V)) @@ -1990,8 +1923,7 @@ void vp9_remove_compressor(VP9_COMP *cpi) { unsigned int i; int t; - if (!cpi) - return; + if (!cpi) return; cm = &cpi->common; if (cm->current_video_frame > 0) { @@ -1999,28 +1931,29 @@ void vp9_remove_compressor(VP9_COMP *cpi) { vpx_clear_system_state(); if (cpi->oxcf.pass != 1) { - char headings[512] = {0}; - char results[512] = {0}; + char headings[512] = { 0 }; + char results[512] = { 0 }; FILE *f = fopen("opsnr.stt", "a"); - double time_encoded = (cpi->last_end_time_stamp_seen - - cpi->first_time_stamp_ever) / 10000000.000; - double total_encode_time = (cpi->time_receive_data + - cpi->time_compress_data) / 1000.000; + double time_encoded = + (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) / + 10000000.000; + double total_encode_time = + (cpi->time_receive_data + cpi->time_compress_data) / 1000.000; const double dr = - (double)cpi->bytes * (double) 8 / (double)1000 / time_encoded; + (double)cpi->bytes * (double)8 / (double)1000 / time_encoded; const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1); + const double target_rate = (double)cpi->oxcf.target_bandwidth / 1000; + const double rate_err = ((100.0 * (dr - target_rate)) / target_rate); if (cpi->b_calculate_psnr) { - const double total_psnr = - vpx_sse_to_psnr((double)cpi->total_samples, peak, - (double)cpi->total_sq_error); - const double totalp_psnr = - vpx_sse_to_psnr((double)cpi->totalp_samples, peak, - (double)cpi->totalp_sq_error); - const double total_ssim = 100 * pow(cpi->summed_quality / - cpi->summed_weights, 8.0); - const double totalp_ssim = 100 * pow(cpi->summedp_quality / - cpi->summedp_weights, 8.0); + const double total_psnr = vpx_sse_to_psnr( + (double)cpi->total_samples, peak, (double)cpi->total_sq_error); + const double totalp_psnr = vpx_sse_to_psnr( + (double)cpi->totalp_samples, peak, (double)cpi->totalp_sq_error); + const double total_ssim = + 100 * pow(cpi->summed_quality / cpi->summed_weights, 8.0); + const double totalp_ssim = + 100 * pow(cpi->summedp_quality / cpi->summedp_weights, 8.0); snprintf(headings, sizeof(headings), "Bitrate\tAVGPsnr\tGLBPsnr\tAVPsnrP\tGLPsnrP\t" @@ -2031,12 +1964,10 @@ void vp9_remove_compressor(VP9_COMP *cpi) { "%7.3f\t%7.3f\t%7.3f\t%7.3f\t" "%7.3f\t%7.3f\t%7.3f\t%7.3f", dr, cpi->psnr.stat[ALL] / cpi->count, total_psnr, - cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, - total_ssim, totalp_ssim, - cpi->fastssim.stat[ALL] / cpi->count, - cpi->psnrhvs.stat[ALL] / cpi->count, - cpi->psnr.worst, cpi->worst_ssim, cpi->fastssim.worst, - cpi->psnrhvs.worst); + cpi->psnrp.stat[ALL] / cpi->count, totalp_psnr, total_ssim, + totalp_ssim, cpi->fastssim.stat[ALL] / cpi->count, + cpi->psnrhvs.stat[ALL] / cpi->count, cpi->psnr.worst, + cpi->worst_ssim, cpi->fastssim.worst, cpi->psnrhvs.worst); if (cpi->b_calculate_blockiness) { SNPRINT(headings, "\t Block\tWstBlck"); @@ -2054,14 +1985,9 @@ void vp9_remove_compressor(VP9_COMP *cpi) { SNPRINT2(results, "\t%7.3f", cpi->worst_consistency); } - if (cpi->b_calculate_ssimg) { - SNPRINT(headings, "\t SSIMG\tWtSSIMG"); - SNPRINT2(results, "\t%7.3f", cpi->ssimg.stat[ALL] / cpi->count); - SNPRINT2(results, "\t%7.3f", cpi->ssimg.worst); - } - - fprintf(f, "%s\t Time\n", headings); - fprintf(f, "%s\t%8.0f\n", results, total_encode_time); + fprintf(f, "%s\t Time\tRcErr\tAbsErr\n", headings); + fprintf(f, "%s\t%8.0f\t%7.2f\t%7.2f\n", results, total_encode_time, + rate_err, fabs(rate_err)); } fclose(f); @@ -2102,13 +2028,14 @@ void vp9_remove_compressor(VP9_COMP *cpi) { vpx_free(cpi->tile_thr_data); vpx_free(cpi->workers); - if (cpi->num_workers > 1) - vp9_loop_filter_dealloc(&cpi->lf_row_sync); + if (cpi->num_workers > 1) vp9_loop_filter_dealloc(&cpi->lf_row_sync); + + vp9_alt_ref_aq_destroy(cpi->alt_ref_aq); dealloc_compressor_data(cpi); - for (i = 0; i < sizeof(cpi->mbgraph_stats) / - sizeof(cpi->mbgraph_stats[0]); ++i) { + for (i = 0; i < sizeof(cpi->mbgraph_stats) / sizeof(cpi->mbgraph_stats[0]); + ++i) { vpx_free(cpi->mbgraph_stats[i].mb_stats); } @@ -2152,271 +2079,15 @@ void vp9_remove_compressor(VP9_COMP *cpi) { #endif } -/* TODO(yaowu): The block_variance calls the unoptimized versions of variance() - * and highbd_8_variance(). It should not. - */ -static void encoder_variance(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, unsigned int *sse, int *sum) { - int i, j; - - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - - a += a_stride; - b += b_stride; - } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static void encoder_highbd_variance64(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint64_t *sse, - uint64_t *sum) { - int i, j; - - uint16_t *a = CONVERT_TO_SHORTPTR(a8); - uint16_t *b = CONVERT_TO_SHORTPTR(b8); - *sum = 0; - *sse = 0; - - for (i = 0; i < h; i++) { - for (j = 0; j < w; j++) { - const int diff = a[j] - b[j]; - *sum += diff; - *sse += diff * diff; - } - a += a_stride; - b += b_stride; - } -} - -static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, - unsigned int *sse, int *sum) { - uint64_t sse_long = 0; - uint64_t sum_long = 0; - encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, - &sse_long, &sum_long); - *sse = (unsigned int)sse_long; - *sum = (int)sum_long; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -static int64_t get_sse(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int width, int height) { - const int dw = width % 16; - const int dh = height % 16; - int64_t total_sse = 0; - unsigned int sse = 0; - int sum = 0; - int x, y; - - if (dw > 0) { - encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, - dw, height, &sse, &sum); - total_sse += sse; - } - - if (dh > 0) { - encoder_variance(&a[(height - dh) * a_stride], a_stride, - &b[(height - dh) * b_stride], b_stride, - width - dw, dh, &sse, &sum); - total_sse += sse; - } - - for (y = 0; y < height / 16; ++y) { - const uint8_t *pa = a; - const uint8_t *pb = b; - for (x = 0; x < width / 16; ++x) { - vpx_mse16x16(pa, a_stride, pb, b_stride, &sse); - total_sse += sse; - - pa += 16; - pb += 16; - } - - a += 16 * a_stride; - b += 16 * b_stride; - } - - return total_sse; -} - -#if CONFIG_VP9_HIGHBITDEPTH -static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int width, int height, - unsigned int input_shift) { - const uint16_t *a = CONVERT_TO_SHORTPTR(a8); - const uint16_t *b = CONVERT_TO_SHORTPTR(b8); - int64_t total_sse = 0; - int x, y; - for (y = 0; y < height; ++y) { - for (x = 0; x < width; ++x) { - int64_t diff; - diff = (a[x] >> input_shift) - (b[x] >> input_shift); - total_sse += diff * diff; - } - a += a_stride; - b += b_stride; - } - return total_sse; -} - -static int64_t highbd_get_sse(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int width, int height) { - int64_t total_sse = 0; - int x, y; - const int dw = width % 16; - const int dh = height % 16; - unsigned int sse = 0; - int sum = 0; - if (dw > 0) { - encoder_highbd_8_variance(&a[width - dw], a_stride, - &b[width - dw], b_stride, - dw, height, &sse, &sum); - total_sse += sse; - } - if (dh > 0) { - encoder_highbd_8_variance(&a[(height - dh) * a_stride], a_stride, - &b[(height - dh) * b_stride], b_stride, - width - dw, dh, &sse, &sum); - total_sse += sse; - } - for (y = 0; y < height / 16; ++y) { - const uint8_t *pa = a; - const uint8_t *pb = b; - for (x = 0; x < width / 16; ++x) { - vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse); - total_sse += sse; - pa += 16; - pb += 16; - } - a += 16 * a_stride; - b += 16 * b_stride; - } - return total_sse; -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -typedef struct { - double psnr[4]; // total/y/u/v - uint64_t sse[4]; // total/y/u/v - uint32_t samples[4]; // total/y/u/v -} PSNR_STATS; - -#if CONFIG_VP9_HIGHBITDEPTH -static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b, - PSNR_STATS *psnr, - unsigned int bit_depth, - unsigned int in_bit_depth) { - const int widths[3] = - {a->y_crop_width, a->uv_crop_width, a->uv_crop_width }; - const int heights[3] = - {a->y_crop_height, a->uv_crop_height, a->uv_crop_height}; - const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer }; - const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride}; - const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer }; - const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride}; - int i; - uint64_t total_sse = 0; - uint32_t total_samples = 0; - const double peak = (double)((1 << in_bit_depth) - 1); - const unsigned int input_shift = bit_depth - in_bit_depth; - - for (i = 0; i < 3; ++i) { - const int w = widths[i]; - const int h = heights[i]; - const uint32_t samples = w * h; - uint64_t sse; - if (a->flags & YV12_FLAG_HIGHBITDEPTH) { - if (input_shift) { - sse = highbd_get_sse_shift(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], w, h, - input_shift); - } else { - sse = highbd_get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], w, h); - } - } else { - sse = get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], - w, h); - } - psnr->sse[1 + i] = sse; - psnr->samples[1 + i] = samples; - psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); - - total_sse += sse; - total_samples += samples; - } - - psnr->sse[0] = total_sse; - psnr->samples[0] = total_samples; - psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak, - (double)total_sse); -} - -#else // !CONFIG_VP9_HIGHBITDEPTH - -static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, - PSNR_STATS *psnr) { - static const double peak = 255.0; - const int widths[3] = { - a->y_crop_width, a->uv_crop_width, a->uv_crop_width}; - const int heights[3] = { - a->y_crop_height, a->uv_crop_height, a->uv_crop_height}; - const uint8_t *a_planes[3] = {a->y_buffer, a->u_buffer, a->v_buffer}; - const int a_strides[3] = {a->y_stride, a->uv_stride, a->uv_stride}; - const uint8_t *b_planes[3] = {b->y_buffer, b->u_buffer, b->v_buffer}; - const int b_strides[3] = {b->y_stride, b->uv_stride, b->uv_stride}; - int i; - uint64_t total_sse = 0; - uint32_t total_samples = 0; - - for (i = 0; i < 3; ++i) { - const int w = widths[i]; - const int h = heights[i]; - const uint32_t samples = w * h; - const uint64_t sse = get_sse(a_planes[i], a_strides[i], - b_planes[i], b_strides[i], - w, h); - psnr->sse[1 + i] = sse; - psnr->samples[1 + i] = samples; - psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); - - total_sse += sse; - total_samples += samples; - } - - psnr->sse[0] = total_sse; - psnr->samples[0] = total_samples; - psnr->psnr[0] = vpx_sse_to_psnr((double)total_samples, peak, - (double)total_sse); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - static void generate_psnr_packet(VP9_COMP *cpi) { struct vpx_codec_cx_pkt pkt; int i; PSNR_STATS psnr; #if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr, - cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth); + vpx_calc_highbd_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr, + cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth); #else - calc_psnr(cpi->Source, cpi->common.frame_to_show, &psnr); + vpx_calc_psnr(cpi->raw_source_frame, cpi->common.frame_to_show, &psnr); #endif for (i = 0; i < 4; ++i) { @@ -2426,15 +2097,16 @@ static void generate_psnr_packet(VP9_COMP *cpi) { } pkt.kind = VPX_CODEC_PSNR_PKT; if (cpi->use_svc) - cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers].psnr_pkt = pkt.data.psnr; + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * + cpi->svc.number_temporal_layers] + .psnr_pkt = pkt.data.psnr; else vpx_codec_pkt_list_add(cpi->output_pkt_list, &pkt); } int vp9_use_as_reference(VP9_COMP *cpi, int ref_frame_flags) { - if (ref_frame_flags > 7) - return -1; + if (ref_frame_flags > 7) return -1; cpi->ref_frame_flags = ref_frame_flags; return 0; @@ -2447,8 +2119,8 @@ void vp9_update_reference(VP9_COMP *cpi, int ref_frame_flags) { cpi->ext_refresh_frame_flags_pending = 1; } -static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer(VP9_COMP *cpi, - VP9_REFFRAME ref_frame_flag) { +static YV12_BUFFER_CONFIG *get_vp9_ref_frame_buffer( + VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag) { MV_REFERENCE_FRAME ref_frame = NONE; if (ref_frame_flag == VP9_LAST_FLAG) ref_frame = LAST_FRAME; @@ -2482,7 +2154,7 @@ int vp9_set_reference_enc(VP9_COMP *cpi, VP9_REFFRAME ref_frame_flag, } } -int vp9_update_entropy(VP9_COMP * cpi, int update) { +int vp9_update_entropy(VP9_COMP *cpi, int update) { cpi->ext_refresh_frame_context = update; cpi->ext_refresh_frame_context_pending = 1; return 0; @@ -2531,7 +2203,7 @@ void vp9_write_yuv_rec_frame(VP9_COMMON *cm) { uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer); do { - fwrite(src16, s->y_width, 2, yuv_rec_file); + fwrite(src16, s->y_width, 2, yuv_rec_file); src16 += s->y_stride; } while (--h); @@ -2539,7 +2211,7 @@ void vp9_write_yuv_rec_frame(VP9_COMMON *cm) { h = s->uv_height; do { - fwrite(src16, s->uv_width, 2, yuv_rec_file); + fwrite(src16, s->uv_width, 2, yuv_rec_file); src16 += s->uv_stride; } while (--h); @@ -2557,7 +2229,7 @@ void vp9_write_yuv_rec_frame(VP9_COMMON *cm) { #endif // CONFIG_VP9_HIGHBITDEPTH do { - fwrite(src, s->y_width, 1, yuv_rec_file); + fwrite(src, s->y_width, 1, yuv_rec_file); src += s->y_stride; } while (--h); @@ -2565,7 +2237,7 @@ void vp9_write_yuv_rec_frame(VP9_COMMON *cm) { h = s->uv_height; do { - fwrite(src, s->uv_width, 1, yuv_rec_file); + fwrite(src, s->uv_width, 1, yuv_rec_file); src += s->uv_stride; } while (--h); @@ -2591,18 +2263,19 @@ static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src, #endif // CONFIG_VP9_HIGHBITDEPTH // TODO(dkovalev): replace YV12_BUFFER_CONFIG with vpx_image_t int i; - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - const int src_widths[3] = {src->y_crop_width, src->uv_crop_width, - src->uv_crop_width }; - const int src_heights[3] = {src->y_crop_height, src->uv_crop_height, - src->uv_crop_height}; - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; - const int dst_widths[3] = {dst->y_crop_width, dst->uv_crop_width, - dst->uv_crop_width}; - const int dst_heights[3] = {dst->y_crop_height, dst->uv_crop_height, - dst->uv_crop_height}; + const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer, + src->v_buffer }; + const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride }; + const int src_widths[3] = { src->y_crop_width, src->uv_crop_width, + src->uv_crop_width }; + const int src_heights[3] = { src->y_crop_height, src->uv_crop_height, + src->uv_crop_height }; + uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer }; + const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride }; + const int dst_widths[3] = { dst->y_crop_width, dst->uv_crop_width, + dst->uv_crop_width }; + const int dst_heights[3] = { dst->y_crop_height, dst->uv_crop_height, + dst->uv_crop_height }; for (i = 0; i < MAX_MB_PLANE; ++i) { #if CONFIG_VP9_HIGHBITDEPTH @@ -2629,10 +2302,11 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src, const int src_h = src->y_crop_height; const int dst_w = dst->y_crop_width; const int dst_h = dst->y_crop_height; - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; + const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer, + src->v_buffer }; + const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride }; + uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer }; + const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride }; const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP]; int x, y, i; @@ -2644,8 +2318,9 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src, const int y_q4 = y * (16 / factor) * src_h / dst_h; for (x = 0; x < dst_w; x += 16) { const int x_q4 = x * (16 / factor) * src_w / dst_w; - const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h * - src_stride + (x / factor) * src_w / dst_w; + const uint8_t *src_ptr = srcs[i] + + (y / factor) * src_h / dst_h * src_stride + + (x / factor) * src_w / dst_w; uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor); if (src->flags & YV12_FLAG_HIGHBITDEPTH) { @@ -2656,8 +2331,8 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src, } else { vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride, kernel[x_q4 & 0xf], 16 * src_w / dst_w, - kernel[y_q4 & 0xf], 16 * src_h / dst_h, - 16 / factor, 16 / factor); + kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor, + 16 / factor); } } } @@ -2672,10 +2347,11 @@ void vp9_scale_and_extend_frame_c(const YV12_BUFFER_CONFIG *src, const int src_h = src->y_crop_height; const int dst_w = dst->y_crop_width; const int dst_h = dst->y_crop_height; - const uint8_t *const srcs[3] = {src->y_buffer, src->u_buffer, src->v_buffer}; - const int src_strides[3] = {src->y_stride, src->uv_stride, src->uv_stride}; - uint8_t *const dsts[3] = {dst->y_buffer, dst->u_buffer, dst->v_buffer}; - const int dst_strides[3] = {dst->y_stride, dst->uv_stride, dst->uv_stride}; + const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer, + src->v_buffer }; + const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride }; + uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer }; + const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride }; const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP]; int x, y, i; @@ -2687,14 +2363,15 @@ void vp9_scale_and_extend_frame_c(const YV12_BUFFER_CONFIG *src, const int y_q4 = y * (16 / factor) * src_h / dst_h; for (x = 0; x < dst_w; x += 16) { const int x_q4 = x * (16 / factor) * src_w / dst_w; - const uint8_t *src_ptr = srcs[i] + (y / factor) * src_h / dst_h * - src_stride + (x / factor) * src_w / dst_w; + const uint8_t *src_ptr = srcs[i] + + (y / factor) * src_h / dst_h * src_stride + + (x / factor) * src_w / dst_w; uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor); vpx_scaled_2d(src_ptr, src_stride, dst_ptr, dst_stride, kernel[x_q4 & 0xf], 16 * src_w / dst_w, - kernel[y_q4 & 0xf], 16 * src_h / dst_h, - 16 / factor, 16 / factor); + kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor, + 16 / factor); } } } @@ -2711,33 +2388,53 @@ static int scale_down(VP9_COMP *cpi, int q) { if (rc->frame_size_selector == UNSCALED && q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) { - const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1] - * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth)); + const int max_size_thresh = + (int)(rate_thresh_mult[SCALE_STEP1] * + VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth)); scale = rc->projected_frame_size > max_size_thresh ? 1 : 0; } return scale; } +static int big_rate_miss(VP9_COMP *cpi, int high_limit, int low_limit) { + const RATE_CONTROL *const rc = &cpi->rc; + + return (rc->projected_frame_size > ((high_limit * 3) / 2)) || + (rc->projected_frame_size < (low_limit / 2)); +} + +// test in two pass for the first +static int two_pass_first_group_inter(VP9_COMP *cpi) { + TWO_PASS *const twopass = &cpi->twopass; + GF_GROUP *const gf_group = &twopass->gf_group; + if ((cpi->oxcf.pass == 2) && + (gf_group->index == gf_group->first_inter_index)) { + return 1; + } else { + return 0; + } +} + // Function to test for conditions that indicate we should loop // back and recode a frame. -static int recode_loop_test(VP9_COMP *cpi, - int high_limit, int low_limit, - int q, int maxq, int minq) { +static int recode_loop_test(VP9_COMP *cpi, int high_limit, int low_limit, int q, + int maxq, int minq) { const RATE_CONTROL *const rc = &cpi->rc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi); int force_recode = 0; if ((rc->projected_frame_size >= rc->max_frame_bandwidth) || + big_rate_miss(cpi, high_limit, low_limit) || (cpi->sf.recode_loop == ALLOW_RECODE) || - (frame_is_kfgfarf && - (cpi->sf.recode_loop == ALLOW_RECODE_KFARFGF))) { - if (frame_is_kfgfarf && - (oxcf->resize_mode == RESIZE_DYNAMIC) && + (two_pass_first_group_inter(cpi) && + (cpi->sf.recode_loop == ALLOW_RECODE_FIRST)) || + (frame_is_kfgfarf && (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF))) { + if (frame_is_kfgfarf && (oxcf->resize_mode == RESIZE_DYNAMIC) && scale_down(cpi, q)) { - // Code this group at a lower resolution. - cpi->resize_pending = 1; - return 1; + // Code this group at a lower resolution. + cpi->resize_pending = 1; + return 1; } // TODO(agrange) high_limit could be greater than the scale-down threshold. @@ -2757,20 +2454,20 @@ static int recode_loop_test(VP9_COMP *cpi, } void vp9_update_reference_frames(VP9_COMP *cpi) { - VP9_COMMON * const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; BufferPool *const pool = cm->buffer_pool; // At this point the new frame has been encoded. // If any buffer copy / swapping is signaled it should be done here. if (cm->frame_type == KEY_FRAME) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], + cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], + cm->new_fb_idx); } else if (vp9_preserve_existing_gf(cpi)) { // We have decided to preserve the previously existing golden frame as our // new ARF frame. However, in the short term in function - // vp9_bitstream.c::get_refresh_mask() we left it in the GF slot and, if + // vp9_get_refresh_mask() we left it in the GF slot and, if // we're updating the GF with the current decoded frame, we save it to the // ARF slot instead. // We now have to update the ARF with the current frame and swap gld_fb_idx @@ -2778,8 +2475,8 @@ void vp9_update_reference_frames(VP9_COMP *cpi) { // slot and, if we're updating the GF, the current frame becomes the new GF. int tmp; - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], + cm->new_fb_idx); tmp = cpi->alt_fb_idx; cpi->alt_fb_idx = cpi->gld_fb_idx; @@ -2797,16 +2494,15 @@ void vp9_update_reference_frames(VP9_COMP *cpi) { arf_idx = gf_group->arf_update_idx[gf_group->index]; } - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[arf_idx], cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx); memcpy(cpi->interp_filter_selected[ALTREF_FRAME], cpi->interp_filter_selected[0], sizeof(cpi->interp_filter_selected[0])); } if (cpi->refresh_golden_frame) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], + cm->new_fb_idx); if (!cpi->rc.is_src_frame_alt_ref) memcpy(cpi->interp_filter_selected[GOLDEN_FRAME], cpi->interp_filter_selected[0], @@ -2819,22 +2515,20 @@ void vp9_update_reference_frames(VP9_COMP *cpi) { } if (cpi->refresh_last_frame) { - ref_cnt_fb(pool->frame_bufs, - &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx); + ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx], + cm->new_fb_idx); if (!cpi->rc.is_src_frame_alt_ref) memcpy(cpi->interp_filter_selected[LAST_FRAME], cpi->interp_filter_selected[0], sizeof(cpi->interp_filter_selected[0])); } #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { - vp9_denoiser_update_frame_info(&cpi->denoiser, - *cpi->Source, - cpi->common.frame_type, - cpi->refresh_alt_ref_frame, - cpi->refresh_golden_frame, - cpi->refresh_last_frame, - cpi->resize_pending); + if (cpi->oxcf.noise_sensitivity > 0 && + cpi->denoiser.denoising_level > kDenLowLow) { + vp9_denoiser_update_frame_info( + &cpi->denoiser, *cpi->Source, cpi->common.frame_type, + cpi->refresh_alt_ref_frame, cpi->refresh_golden_frame, + cpi->refresh_last_frame, cpi->resize_pending); } #endif if (is_one_pass_cbr_svc(cpi)) { @@ -2860,8 +2554,8 @@ static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) { struct loopfilter *lf = &cm->lf; if (xd->lossless) { - lf->filter_level = 0; - lf->last_filt_level = 0; + lf->filter_level = 0; + lf->last_filt_level = 0; } else { struct vpx_usec_timer timer; @@ -2889,9 +2583,8 @@ static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) { if (cpi->num_workers > 1) vp9_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane, - lf->filter_level, 0, 0, - cpi->workers, cpi->num_workers, - &cpi->lf_row_sync); + lf->filter_level, 0, 0, cpi->workers, + cpi->num_workers, &cpi->lf_row_sync); else vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0); } @@ -2899,16 +2592,14 @@ static void loopfilter_frame(VP9_COMP *cpi, VP9_COMMON *cm) { vpx_extend_frame_inner_borders(cm->frame_to_show); } -static INLINE void alloc_frame_mvs(const VP9_COMMON *cm, - int buffer_idx) { +static INLINE void alloc_frame_mvs(VP9_COMMON *const cm, int buffer_idx) { RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx]; - if (new_fb_ptr->mvs == NULL || - new_fb_ptr->mi_rows < cm->mi_rows || + if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows || new_fb_ptr->mi_cols < cm->mi_cols) { vpx_free(new_fb_ptr->mvs); - new_fb_ptr->mvs = - (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, - sizeof(*new_fb_ptr->mvs)); + CHECK_MEM_ERROR(cm, new_fb_ptr->mvs, + (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols, + sizeof(*new_fb_ptr->mvs))); new_fb_ptr->mi_rows = cm->mi_rows; new_fb_ptr->mi_cols = cm->mi_cols; } @@ -2917,14 +2608,15 @@ static INLINE void alloc_frame_mvs(const VP9_COMMON *cm, void vp9_scale_references(VP9_COMP *cpi) { VP9_COMMON *cm = &cpi->common; MV_REFERENCE_FRAME ref_frame; - const VP9_REFFRAME ref_mask[3] = {VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG}; + const VP9_REFFRAME ref_mask[3] = { VP9_LAST_FLAG, VP9_GOLD_FLAG, + VP9_ALT_FLAG }; for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { // Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1). if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) { BufferPool *const pool = cm->buffer_pool; - const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, - ref_frame); + const YV12_BUFFER_CONFIG *const ref = + get_ref_frame_buffer(cpi, ref_frame); if (ref == NULL) { cpi->scaled_ref_idx[ref_frame - 1] = INVALID_IDX; @@ -2940,18 +2632,17 @@ void vp9_scale_references(VP9_COMP *cpi) { new_fb = get_free_fb(cm); force_scaling = 1; } - if (new_fb == INVALID_IDX) - return; + if (new_fb == INVALID_IDX) return; new_fb_ptr = &pool->frame_bufs[new_fb]; - if (force_scaling || - new_fb_ptr->buf.y_crop_width != cm->width || + if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width || new_fb_ptr->buf.y_crop_height != cm->height) { - vpx_realloc_frame_buffer(&new_fb_ptr->buf, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - cm->use_highbitdepth, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); + if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, + cm->use_highbitdepth, + VP9_ENC_BORDER_IN_PIXELS, + cm->byte_alignment, NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); scale_and_extend_frame(ref, &new_fb_ptr->buf, (int)cm->bit_depth); cpi->scaled_ref_idx[ref_frame - 1] = new_fb; alloc_frame_mvs(cm, new_fb); @@ -2965,17 +2656,16 @@ void vp9_scale_references(VP9_COMP *cpi) { new_fb = get_free_fb(cm); force_scaling = 1; } - if (new_fb == INVALID_IDX) - return; + if (new_fb == INVALID_IDX) return; new_fb_ptr = &pool->frame_bufs[new_fb]; - if (force_scaling || - new_fb_ptr->buf.y_crop_width != cm->width || + if (force_scaling || new_fb_ptr->buf.y_crop_width != cm->width || new_fb_ptr->buf.y_crop_height != cm->height) { - vpx_realloc_frame_buffer(&new_fb_ptr->buf, - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); + if (vpx_realloc_frame_buffer(&new_fb_ptr->buf, cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, + VP9_ENC_BORDER_IN_PIXELS, + cm->byte_alignment, NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); vp9_scale_and_extend_frame(ref, &new_fb_ptr->buf); cpi->scaled_ref_idx[ref_frame - 1] = new_fb; alloc_frame_mvs(cm, new_fb); @@ -3019,22 +2709,21 @@ static void release_scaled_references(VP9_COMP *cpi) { refresh[2] = (cpi->refresh_alt_ref_frame) ? 1 : 0; for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { const int idx = cpi->scaled_ref_idx[i - 1]; - RefCntBuffer *const buf = idx != INVALID_IDX ? - &cm->buffer_pool->frame_bufs[idx] : NULL; + RefCntBuffer *const buf = + idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL; const YV12_BUFFER_CONFIG *const ref = get_ref_frame_buffer(cpi, i); if (buf != NULL && - (refresh[i - 1] || - (buf->buf.y_crop_width == ref->y_crop_width && - buf->buf.y_crop_height == ref->y_crop_height))) { + (refresh[i - 1] || (buf->buf.y_crop_width == ref->y_crop_width && + buf->buf.y_crop_height == ref->y_crop_height))) { --buf->ref_count; - cpi->scaled_ref_idx[i -1] = INVALID_IDX; + cpi->scaled_ref_idx[i - 1] = INVALID_IDX; } } } else { for (i = 0; i < MAX_REF_FRAMES; ++i) { const int idx = cpi->scaled_ref_idx[i]; - RefCntBuffer *const buf = idx != INVALID_IDX ? - &cm->buffer_pool->frame_bufs[idx] : NULL; + RefCntBuffer *const buf = + idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[idx] : NULL; if (buf != NULL) { --buf->ref_count; cpi->scaled_ref_idx[i] = INVALID_IDX; @@ -3073,16 +2762,45 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { vpx_clear_system_state(); - recon_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) { + recon_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + } else { + recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + } +#else + recon_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); +#endif // CONFIG_VP9_HIGHBITDEPTH + + + if (cpi->twopass.total_left_stats.coded_error != 0.0) { + double dc_quant_devisor; +#if CONFIG_VP9_HIGHBITDEPTH + switch (cm->bit_depth) { + case VPX_BITS_8: + dc_quant_devisor = 4.0; + break; + case VPX_BITS_10: + dc_quant_devisor = 16.0; + break; + case VPX_BITS_12: + dc_quant_devisor = 64.0; + break; + default: + assert(0 && "bit_depth must be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); + break; + } +#else + dc_quant_devisor = 4.0; +#endif - if (cpi->twopass.total_left_stats.coded_error != 0.0) fprintf(f, "%10u %dx%d %10d %10d %d %d %10d %10d %10d %10d" "%10"PRId64" %10"PRId64" %5d %5d %10"PRId64" " "%10"PRId64" %10"PRId64" %10d " "%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf" "%6d %6d %5d %5d %5d " "%10"PRId64" %10.3lf" - "%10lf %8u %10"PRId64" %10d %10d %10d %10d\n", + "%10lf %8u %10"PRId64" %10d %10d %10d %10d %10d\n", cpi->common.current_video_frame, cm->width, cm->height, cpi->td.rd_counts.m_search_count, @@ -3101,7 +2819,8 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target), cpi->rc.total_actual_bits, cm->base_qindex, vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth), - (double)vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0, + (double)vp9_dc_quant(cm->base_qindex, 0, cm->bit_depth) / + dc_quant_devisor, vp9_convert_qindex_to_q(cpi->twopass.active_worst_quality, cm->bit_depth), cpi->rc.avg_q, @@ -3115,8 +2834,9 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { cpi->tot_recode_hits, recon_err, cpi->rc.kf_boost, cpi->twopass.kf_zeromotion_pct, cpi->twopass.fr_content_type, - cm->lf.filter_level); - + cm->lf.filter_level, + cm->seg.aq_av_offset); + } fclose(f); if (0) { @@ -3169,8 +2889,8 @@ static void set_size_independent_vars(VP9_COMP *cpi) { cpi->common.interp_filter = cpi->sf.default_interp_filter; } -static void set_size_dependent_vars(VP9_COMP *cpi, int *q, - int *bottom_index, int *top_index) { +static void set_size_dependent_vars(VP9_COMP *cpi, int *q, int *bottom_index, + int *top_index) { VP9_COMMON *const cm = &cpi->common; const VP9EncoderConfig *const oxcf = &cpi->oxcf; @@ -3195,24 +2915,18 @@ static void set_size_dependent_vars(VP9_COMP *cpi, int *q, if (oxcf->noise_sensitivity > 0) { int l = 0; switch (oxcf->noise_sensitivity) { - case 1: - l = 20; - break; - case 2: - l = 40; - break; - case 3: - l = 60; - break; + case 1: l = 20; break; + case 2: l = 40; break; + case 3: l = 60; break; case 4: - case 5: - l = 100; - break; - case 6: - l = 150; - break; + case 5: l = 100; break; + case 6: l = 150; break; } - vp9_denoise(cpi->Source, cpi->Source, l); + if (!cpi->common.postproc_state.limits) { + cpi->common.postproc_state.limits = vpx_calloc( + cpi->common.width, sizeof(*cpi->common.postproc_state.limits)); + } + vp9_denoise(cpi->Source, cpi->Source, l, cpi->common.postproc_state.limits); } #endif // CONFIG_VP9_POSTPROC } @@ -3222,12 +2936,14 @@ static void setup_denoiser_buffer(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; if (cpi->oxcf.noise_sensitivity > 0 && !cpi->denoiser.frame_buffer_initialized) { - vp9_denoiser_alloc(&(cpi->denoiser), cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, + if (vp9_denoiser_alloc(&cpi->denoiser, cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, + cm->use_highbitdepth, #endif - VP9_ENC_BORDER_IN_PIXELS); + VP9_ENC_BORDER_IN_PIXELS)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate denoiser"); } } #endif @@ -3248,30 +2964,25 @@ static void set_frame_size(VP9_COMP *cpi) { VP9EncoderConfig *const oxcf = &cpi->oxcf; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; - if (oxcf->pass == 2 && - oxcf->rc_mode == VPX_VBR && + if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR && ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) || - (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) { - calculate_coded_size( - cpi, &oxcf->scaled_frame_width, &oxcf->scaled_frame_height); + (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) { + calculate_coded_size(cpi, &oxcf->scaled_frame_width, + &oxcf->scaled_frame_height); // There has been a change in frame size. vp9_set_size_literal(cpi, oxcf->scaled_frame_width, oxcf->scaled_frame_height); } - if (oxcf->pass == 0 && - oxcf->rc_mode == VPX_CBR && - !cpi->use_svc && - oxcf->resize_mode == RESIZE_DYNAMIC && - cpi->resize_pending != 0) { + if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR && !cpi->use_svc && + oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending != 0) { oxcf->scaled_frame_width = (oxcf->width * cpi->resize_scale_num) / cpi->resize_scale_den; oxcf->scaled_frame_height = - (oxcf->height * cpi->resize_scale_num) /cpi->resize_scale_den; + (oxcf->height * cpi->resize_scale_num) / cpi->resize_scale_den; // There has been a change in frame size. - vp9_set_size_literal(cpi, - oxcf->scaled_frame_width, + vp9_set_size_literal(cpi, oxcf->scaled_frame_width, oxcf->scaled_frame_height); // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed. @@ -3291,23 +3002,23 @@ static void set_frame_size(VP9_COMP *cpi) { } if ((oxcf->pass == 2) && - (!cpi->use_svc || - (is_two_pass_svc(cpi) && - cpi->svc.encode_empty_frame_state != ENCODING))) { + (!cpi->use_svc || (is_two_pass_svc(cpi) && + cpi->svc.encode_empty_frame_state != ENCODING))) { vp9_set_target_rate(cpi); } alloc_frame_mvs(cm, cm->new_fb_idx); // Reset the frame pointers to the current frame size. - vpx_realloc_frame_buffer(get_frame_new_buffer(cm), - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, + if (vpx_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height, + cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, + cm->use_highbitdepth, #endif - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, - NULL, NULL, NULL); + VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment, + NULL, NULL, NULL)) + vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate frame buffer"); alloc_util_frame_buffers(cpi); init_motion_estimation(cpi); @@ -3322,18 +3033,15 @@ static void set_frame_size(VP9_COMP *cpi) { YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf; ref_buf->buf = buf; #if CONFIG_VP9_HIGHBITDEPTH - vp9_setup_scale_factors_for_frame(&ref_buf->sf, - buf->y_crop_width, buf->y_crop_height, - cm->width, cm->height, - (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? - 1 : 0); + vp9_setup_scale_factors_for_frame( + &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width, + cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0); #else - vp9_setup_scale_factors_for_frame(&ref_buf->sf, - buf->y_crop_width, buf->y_crop_height, - cm->width, cm->height); + vp9_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width, + buf->y_crop_height, cm->width, + cm->height); #endif // CONFIG_VP9_HIGHBITDEPTH - if (vp9_is_scaled(&ref_buf->sf)) - vpx_extend_frame_borders(buf); + if (vp9_is_scaled(&ref_buf->sf)) vpx_extend_frame_borders(buf); } else { ref_buf->buf = NULL; } @@ -3342,8 +3050,7 @@ static void set_frame_size(VP9_COMP *cpi) { set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME); } -static void encode_without_recode_loop(VP9_COMP *cpi, - size_t *size, +static void encode_without_recode_loop(VP9_COMP *cpi, size_t *size, uint8_t *dest) { VP9_COMMON *const cm = &cpi->common; int q = 0, bottom_index = 0, top_index = 0; // Dummy variables. @@ -3351,46 +3058,84 @@ static void encode_without_recode_loop(VP9_COMP *cpi, vpx_clear_system_state(); set_frame_size(cpi); - cpi->Source = vp9_scale_if_required(cm, - cpi->un_scaled_source, - &cpi->scaled_source, - (cpi->oxcf.pass == 0)); + + if (is_one_pass_cbr_svc(cpi) && + cpi->un_scaled_source->y_width == cm->width << 2 && + cpi->un_scaled_source->y_height == cm->height << 2 && + cpi->svc.scaled_temp.y_width == cm->width << 1 && + cpi->svc.scaled_temp.y_height == cm->height << 1) { + // For svc, if it is a 1/4x1/4 downscaling, do a two-stage scaling to take + // advantage of the 1:2 optimized scaler. In the process, the 1/2x1/2 + // result will be saved in scaled_temp and might be used later. + cpi->Source = vp9_svc_twostage_scale( + cm, cpi->un_scaled_source, &cpi->scaled_source, &cpi->svc.scaled_temp); + cpi->svc.scaled_one_half = 1; + } else if (is_one_pass_cbr_svc(cpi) && + cpi->un_scaled_source->y_width == cm->width << 1 && + cpi->un_scaled_source->y_height == cm->height << 1 && + cpi->svc.scaled_one_half) { + // If the spatial layer is 1/2x1/2 and the scaling is already done in the + // two-stage scaling, use the result directly. + cpi->Source = &cpi->svc.scaled_temp; + cpi->svc.scaled_one_half = 0; + } else { + cpi->Source = vp9_scale_if_required( + cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0)); + } + // Unfiltered raw source used in metrics calculation if the source + // has been filtered. + if (is_psnr_calc_enabled(cpi)) { +#ifdef ENABLE_KF_DENOISE + if (is_spatial_denoise_enabled(cpi)) { + cpi->raw_source_frame = + vp9_scale_if_required(cm, &cpi->raw_unscaled_source, + &cpi->raw_scaled_source, (cpi->oxcf.pass == 0)); + } else { + cpi->raw_source_frame = cpi->Source; + } +#else + cpi->raw_source_frame = cpi->Source; +#endif + } // Avoid scaling last_source unless its needed. - // Last source is currently only used for screen-content mode, - // if partition_search_type == SOURCE_VAR_BASED_PARTITION, or if noise + // Last source is needed if vp9_avg_source_sad() is used, or if + // partition_search_type == SOURCE_VAR_BASED_PARTITION, or if noise // estimation is enabled. if (cpi->unscaled_last_source != NULL && (cpi->oxcf.content == VP9E_CONTENT_SCREEN || - cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION || - cpi->noise_estimate.enabled)) - cpi->Last_Source = vp9_scale_if_required(cm, - cpi->unscaled_last_source, - &cpi->scaled_last_source, - (cpi->oxcf.pass == 0)); + (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_VBR && + cpi->oxcf.mode == REALTIME && cpi->oxcf.speed >= 5) || + cpi->sf.partition_search_type == SOURCE_VAR_BASED_PARTITION || + cpi->noise_estimate.enabled)) + cpi->Last_Source = + vp9_scale_if_required(cm, cpi->unscaled_last_source, + &cpi->scaled_last_source, (cpi->oxcf.pass == 0)); + + if (cm->frame_type == KEY_FRAME || cpi->resize_pending != 0) { + memset(cpi->consec_zero_mv, 0, + cm->mi_rows * cm->mi_cols * sizeof(*cpi->consec_zero_mv)); + } + vp9_update_noise_estimate(cpi); - if (cpi->oxcf.pass == 0 && - cpi->oxcf.rc_mode == VPX_CBR && - cpi->resize_state == 0 && - cm->frame_type != KEY_FRAME && - cpi->oxcf.content == VP9E_CONTENT_SCREEN) + if (cpi->oxcf.pass == 0 && cpi->oxcf.mode == REALTIME && + cpi->oxcf.speed >= 5 && cpi->resize_state == 0 && + (cpi->oxcf.content == VP9E_CONTENT_SCREEN || + cpi->oxcf.rc_mode == VPX_VBR)) vp9_avg_source_sad(cpi); - // TODO(wonkap/marpan): For 1 pass SVC, since only ZERMOV is allowed for - // upsampled reference frame (i.e, svc->force_zero_mode_spatial_ref = 0), - // we should be able to avoid this frame-level upsampling. - // Keeping it for now as there is an asan error in the multi-threaded SVC - // rate control test if this upsampling is removed. - if (frame_is_intra_only(cm) == 0) { + // For 1 pass SVC, since only ZEROMV is allowed for upsampled reference + // frame (i.e, svc->force_zero_mode_spatial_ref = 0), we can avoid this + // frame-level upsampling. + if (frame_is_intra_only(cm) == 0 && !is_one_pass_cbr_svc(cpi)) { vp9_scale_references(cpi); } set_size_independent_vars(cpi); set_size_dependent_vars(cpi, &q, &bottom_index, &top_index); - if (cpi->oxcf.speed >= 5 && - cpi->oxcf.pass == 0 && + if (cpi->oxcf.speed >= 5 && cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.content != VP9E_CONTENT_SCREEN && cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { @@ -3403,6 +3148,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi, setup_frame(cpi); suppress_active_map(cpi); + // Variance adaptive and in frame q adjustment experiments are mutually // exclusive. if (cpi->oxcf.aq_mode == VARIANCE_AQ) { @@ -3413,18 +3159,20 @@ static void encode_without_recode_loop(VP9_COMP *cpi, vp9_setup_in_frame_q_adj(cpi); } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { vp9_cyclic_refresh_setup(cpi); + } else if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) { + // it may be pretty bad for rate-control, + // and I should handle it somehow + vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi); } + apply_active_map(cpi); - // transform / motion compensation build reconstruction frame vp9_encode_frame(cpi); // Check if we should drop this frame because of high overshoot. - // Only for frames where high temporal-source sad is detected. - if (cpi->oxcf.pass == 0 && - cpi->oxcf.rc_mode == VPX_CBR && - cpi->resize_state == 0 && - cm->frame_type != KEY_FRAME && + // Only for frames where high temporal-source SAD is detected. + if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR && + cpi->resize_state == 0 && cm->frame_type != KEY_FRAME && cpi->oxcf.content == VP9E_CONTENT_SCREEN && cpi->rc.high_source_sad == 1) { int frame_size = 0; @@ -3453,10 +3201,8 @@ static void encode_without_recode_loop(VP9_COMP *cpi, // Update some stats from cyclic refresh, and check if we should not update // golden reference, for non-SVC 1 pass CBR. - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cm->frame_type != KEY_FRAME && - !cpi->use_svc && - cpi->ext_refresh_frame_flags_pending == 0 && + if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME && + !cpi->use_svc && cpi->ext_refresh_frame_flags_pending == 0 && (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR)) vp9_cyclic_refresh_check_golden_update(cpi); @@ -3466,8 +3212,7 @@ static void encode_without_recode_loop(VP9_COMP *cpi, vpx_clear_system_state(); } -static void encode_with_recode_loop(VP9_COMP *cpi, - size_t *size, +static void encode_with_recode_loop(VP9_COMP *cpi, size_t *size, uint8_t *dest) { VP9_COMMON *const cm = &cpi->common; RATE_CONTROL *const rc = &cpi->rc; @@ -3514,9 +3259,24 @@ static void encode_with_recode_loop(VP9_COMP *cpi, &frame_over_shoot_limit); } - cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source, - &cpi->scaled_source, - (cpi->oxcf.pass == 0)); + cpi->Source = vp9_scale_if_required( + cm, cpi->un_scaled_source, &cpi->scaled_source, (cpi->oxcf.pass == 0)); + + // Unfiltered raw source used in metrics calculation if the source + // has been filtered. + if (is_psnr_calc_enabled(cpi)) { +#ifdef ENABLE_KF_DENOISE + if (is_spatial_denoise_enabled(cpi)) { + cpi->raw_source_frame = vp9_scale_if_required( + cm, &cpi->raw_unscaled_source, &cpi->raw_scaled_source, + (cpi->oxcf.pass == 0)); + } else { + cpi->raw_source_frame = cpi->Source; + } +#else + cpi->raw_source_frame = cpi->Source; +#endif + } if (cpi->unscaled_last_source != NULL) cpi->Last_Source = vp9_scale_if_required(cm, cpi->unscaled_last_source, @@ -3532,8 +3292,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi, vp9_set_quantizer(cm, q); - if (loop_count == 0) - setup_frame(cpi); + if (loop_count == 0) setup_frame(cpi); // Variance adaptive and in frame q adjustment experiments are mutually // exclusive. @@ -3543,9 +3302,10 @@ static void encode_with_recode_loop(VP9_COMP *cpi, vp9_360aq_frame_setup(cpi); } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) { vp9_setup_in_frame_q_adj(cpi); + } else if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) { + vp9_alt_ref_aq_setup_map(cpi->alt_ref_aq, cpi); } - // transform / motion compensation build reconstruction frame vp9_encode_frame(cpi); // Update the skip mb flag probabilities based on the distribution @@ -3559,22 +3319,19 @@ static void encode_with_recode_loop(VP9_COMP *cpi, // to recode. if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) { save_coding_context(cpi); - if (!cpi->sf.use_nonrd_pick_mode) - vp9_pack_bitstream(cpi, dest, size); + if (!cpi->sf.use_nonrd_pick_mode) vp9_pack_bitstream(cpi, dest, size); rc->projected_frame_size = (int)(*size) << 3; restore_coding_context(cpi); - if (frame_over_shoot_limit == 0) - frame_over_shoot_limit = 1; + if (frame_over_shoot_limit == 0) frame_over_shoot_limit = 1; } if (cpi->oxcf.rc_mode == VPX_Q) { loop = 0; } else { - if ((cm->frame_type == KEY_FRAME) && - rc->this_key_frame_forced && - (rc->projected_frame_size < rc->max_frame_bandwidth)) { + if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced && + (rc->projected_frame_size < rc->max_frame_bandwidth)) { int last_q = q; int64_t kf_err; @@ -3583,12 +3340,12 @@ static void encode_with_recode_loop(VP9_COMP *cpi, #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - kf_err = vp9_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + kf_err = vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); } else { - kf_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); } #else - kf_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + kf_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); #endif // CONFIG_VP9_HIGHBITDEPTH // Prevent possible divide by zero error below for perfect KF @@ -3621,9 +3378,9 @@ static void encode_with_recode_loop(VP9_COMP *cpi, q = clamp(q, q_low, q_high); loop = q != last_q; - } else if (recode_loop_test( - cpi, frame_over_shoot_limit, frame_under_shoot_limit, - q, VPXMAX(q_high, top_index), bottom_index)) { + } else if (recode_loop_test(cpi, frame_over_shoot_limit, + frame_under_shoot_limit, q, + VPXMAX(q_high, top_index), bottom_index)) { // Is the projected frame size out of range and are we allowed // to attempt to recode. int last_q = q; @@ -3664,13 +3421,13 @@ static void encode_with_recode_loop(VP9_COMP *cpi, // Update rate_correction_factor unless vp9_rc_update_rate_correction_factors(cpi); - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, VPXMAX(q_high, top_index)); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index, + VPXMAX(q_high, top_index)); while (q < q_low && retries < 10) { vp9_rc_update_rate_correction_factors(cpi); - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, VPXMAX(q_high, top_index)); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index, + VPXMAX(q_high, top_index)); retries++; } } @@ -3685,21 +3442,20 @@ static void encode_with_recode_loop(VP9_COMP *cpi, q = (q_high + q_low) / 2; } else { vp9_rc_update_rate_correction_factors(cpi); - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, top_index); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index, + top_index); // Special case reset for qlow for constrained quality. // This should only trigger where there is very substantial // undershoot on a frame and the auto cq level is above // the user passsed in value. - if (cpi->oxcf.rc_mode == VPX_CQ && - q < q_low) { + if (cpi->oxcf.rc_mode == VPX_CQ && q < q_low) { q_low = q; } while (q > q_high && retries < 10) { vp9_rc_update_rate_correction_factors(cpi); - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, top_index); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, bottom_index, + top_index); retries++; } } @@ -3739,19 +3495,16 @@ static int get_ref_frame_flags(const VP9_COMP *cpi) { const int gold_is_alt = map[cpi->gld_fb_idx] == map[cpi->alt_fb_idx]; int flags = VP9_ALT_FLAG | VP9_GOLD_FLAG | VP9_LAST_FLAG; - if (gold_is_last) - flags &= ~VP9_GOLD_FLAG; + if (gold_is_last) flags &= ~VP9_GOLD_FLAG; if (cpi->rc.frames_till_gf_update_due == INT_MAX && (cpi->svc.number_temporal_layers == 1 && cpi->svc.number_spatial_layers == 1)) flags &= ~VP9_GOLD_FLAG; - if (alt_is_last) - flags &= ~VP9_ALT_FLAG; + if (alt_is_last) flags &= ~VP9_ALT_FLAG; - if (gold_is_alt) - flags &= ~VP9_ALT_FLAG; + if (gold_is_alt) flags &= ~VP9_ALT_FLAG; return flags; } @@ -3772,6 +3525,25 @@ static void set_ext_overrides(VP9_COMP *cpi) { } } +YV12_BUFFER_CONFIG *vp9_svc_twostage_scale(VP9_COMMON *cm, + YV12_BUFFER_CONFIG *unscaled, + YV12_BUFFER_CONFIG *scaled, + YV12_BUFFER_CONFIG *scaled_temp) { + if (cm->mi_cols * MI_SIZE != unscaled->y_width || + cm->mi_rows * MI_SIZE != unscaled->y_height) { +#if CONFIG_VP9_HIGHBITDEPTH + scale_and_extend_frame(unscaled, scaled_temp, (int)cm->bit_depth); + scale_and_extend_frame(scaled_temp, scaled, (int)cm->bit_depth); +#else + vp9_scale_and_extend_frame(unscaled, scaled_temp); + vp9_scale_and_extend_frame(scaled_temp, scaled); +#endif // CONFIG_VP9_HIGHBITDEPTH + return scaled; + } else { + return unscaled; + } +} + YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, @@ -3779,15 +3551,13 @@ YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm, if (cm->mi_cols * MI_SIZE != unscaled->y_width || cm->mi_rows * MI_SIZE != unscaled->y_height) { #if CONFIG_VP9_HIGHBITDEPTH - if (use_normative_scaler && - unscaled->y_width <= (scaled->y_width << 1) && + if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) && unscaled->y_height <= (scaled->y_height << 1)) scale_and_extend_frame(unscaled, scaled, (int)cm->bit_depth); else scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth); #else - if (use_normative_scaler && - unscaled->y_width <= (scaled->y_width << 1) && + if (use_normative_scaler && unscaled->y_width <= (scaled->y_width << 1) && unscaled->y_height <= (scaled->y_height << 1)) vp9_scale_and_extend_frame(unscaled, scaled); else @@ -3810,18 +3580,17 @@ static void set_arf_sign_bias(VP9_COMP *cpi) { (gf_group->rf_level[gf_group->index] == GF_ARF_LOW)); } else { arf_sign_bias = - (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame); + (cpi->rc.source_alt_ref_active && !cpi->refresh_alt_ref_frame); } cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias; } static int setup_interp_filter_search_mask(VP9_COMP *cpi) { INTERP_FILTER ifilter; - int ref_total[MAX_REF_FRAMES] = {0}; + int ref_total[MAX_REF_FRAMES] = { 0 }; MV_REFERENCE_FRAME ref; int mask = 0; - if (cpi->common.last_frame_type == KEY_FRAME || - cpi->refresh_alt_ref_frame) + if (cpi->common.last_frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame) return mask; for (ref = LAST_FRAME; ref <= ALTREF_FRAME; ++ref) for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) @@ -3829,20 +3598,199 @@ static int setup_interp_filter_search_mask(VP9_COMP *cpi) { for (ifilter = EIGHTTAP; ifilter <= EIGHTTAP_SHARP; ++ifilter) { if ((ref_total[LAST_FRAME] && - cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) && + cpi->interp_filter_selected[LAST_FRAME][ifilter] == 0) && (ref_total[GOLDEN_FRAME] == 0 || - cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 - < ref_total[GOLDEN_FRAME]) && + cpi->interp_filter_selected[GOLDEN_FRAME][ifilter] * 50 < + ref_total[GOLDEN_FRAME]) && (ref_total[ALTREF_FRAME] == 0 || - cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 - < ref_total[ALTREF_FRAME])) + cpi->interp_filter_selected[ALTREF_FRAME][ifilter] * 50 < + ref_total[ALTREF_FRAME])) mask |= 1 << ifilter; } return mask; } -static void encode_frame_to_data_rate(VP9_COMP *cpi, - size_t *size, +#ifdef ENABLE_KF_DENOISE +// Baseline Kernal weights for denoise +static uint8_t dn_kernal_3[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 }; +static uint8_t dn_kernal_5[25] = { 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 4, + 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1 }; + +static INLINE void add_denoise_point(int centre_val, int data_val, int thresh, + uint8_t point_weight, int *sum_val, + int *sum_weight) { + if (abs(centre_val - data_val) <= thresh) { + *sum_weight += point_weight; + *sum_val += (int)data_val * (int)point_weight; + } +} + +static void spatial_denoise_point(uint8_t *src_ptr, const int stride, + const int strength) { + int sum_weight = 0; + int sum_val = 0; + int thresh = strength; + int kernal_size = 5; + int half_k_size = 2; + int i, j; + int max_diff = 0; + uint8_t *tmp_ptr; + uint8_t *kernal_ptr; + + // Find the maximum deviation from the source point in the locale. + tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1); + for (i = 0; i < kernal_size + 2; ++i) { + for (j = 0; j < kernal_size + 2; ++j) { + max_diff = VPXMAX(max_diff, abs((int)*src_ptr - (int)tmp_ptr[j])); + } + tmp_ptr += stride; + } + + // Select the kernal size. + if (max_diff > (strength + (strength >> 1))) { + kernal_size = 3; + half_k_size = 1; + thresh = thresh >> 1; + } + kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5; + + // Apply the kernal + tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size; + for (i = 0; i < kernal_size; ++i) { + for (j = 0; j < kernal_size; ++j) { + add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr, + &sum_val, &sum_weight); + ++kernal_ptr; + } + tmp_ptr += stride; + } + + // Update the source value with the new filtered value + *src_ptr = (uint8_t)((sum_val + (sum_weight >> 1)) / sum_weight); +} + +#if CONFIG_VP9_HIGHBITDEPTH +static void highbd_spatial_denoise_point(uint16_t *src_ptr, const int stride, + const int strength) { + int sum_weight = 0; + int sum_val = 0; + int thresh = strength; + int kernal_size = 5; + int half_k_size = 2; + int i, j; + int max_diff = 0; + uint16_t *tmp_ptr; + uint8_t *kernal_ptr; + + // Find the maximum deviation from the source point in the locale. + tmp_ptr = src_ptr - (stride * (half_k_size + 1)) - (half_k_size + 1); + for (i = 0; i < kernal_size + 2; ++i) { + for (j = 0; j < kernal_size + 2; ++j) { + max_diff = VPXMAX(max_diff, abs((int)src_ptr - (int)tmp_ptr[j])); + } + tmp_ptr += stride; + } + + // Select the kernal size. + if (max_diff > (strength + (strength >> 1))) { + kernal_size = 3; + half_k_size = 1; + thresh = thresh >> 1; + } + kernal_ptr = (kernal_size == 3) ? dn_kernal_3 : dn_kernal_5; + + // Apply the kernal + tmp_ptr = src_ptr - (stride * half_k_size) - half_k_size; + for (i = 0; i < kernal_size; ++i) { + for (j = 0; j < kernal_size; ++j) { + add_denoise_point((int)*src_ptr, (int)tmp_ptr[j], thresh, *kernal_ptr, + &sum_val, &sum_weight); + ++kernal_ptr; + } + tmp_ptr += stride; + } + + // Update the source value with the new filtered value + *src_ptr = (uint16_t)((sum_val + (sum_weight >> 1)) / sum_weight); +} +#endif // CONFIG_VP9_HIGHBITDEPTH + +// Apply thresholded spatial noise supression to a given buffer. +static void spatial_denoise_buffer(VP9_COMP *cpi, uint8_t *buffer, + const int stride, const int width, + const int height, const int strength) { + VP9_COMMON *const cm = &cpi->common; + uint8_t *src_ptr = buffer; + int row; + int col; + + for (row = 0; row < height; ++row) { + for (col = 0; col < width; ++col) { +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) + highbd_spatial_denoise_point(CONVERT_TO_SHORTPTR(&src_ptr[col]), stride, + strength); + else + spatial_denoise_point(&src_ptr[col], stride, strength); +#else + spatial_denoise_point(&src_ptr[col], stride, strength); +#endif // CONFIG_VP9_HIGHBITDEPTH + } + src_ptr += stride; + } +} + +// Apply thresholded spatial noise supression to source. +static void spatial_denoise_frame(VP9_COMP *cpi) { + YV12_BUFFER_CONFIG *src = cpi->Source; + const VP9EncoderConfig *const oxcf = &cpi->oxcf; + TWO_PASS *const twopass = &cpi->twopass; + VP9_COMMON *const cm = &cpi->common; + + // Base the filter strength on the current active max Q. + const int q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality, + cm->bit_depth)); + int strength = + VPXMAX(oxcf->arnr_strength >> 2, VPXMIN(oxcf->arnr_strength, (q >> 4))); + + // Denoise each of Y,U and V buffers. + spatial_denoise_buffer(cpi, src->y_buffer, src->y_stride, src->y_width, + src->y_height, strength); + + strength += (strength >> 1); + spatial_denoise_buffer(cpi, src->u_buffer, src->uv_stride, src->uv_width, + src->uv_height, strength << 1); + + spatial_denoise_buffer(cpi, src->v_buffer, src->uv_stride, src->uv_width, + src->uv_height, strength << 1); +} +#endif // ENABLE_KF_DENOISE + +static void vp9_try_disable_lookahead_aq(VP9_COMP *cpi, size_t *size, + uint8_t *dest) { + if (cpi->common.seg.enabled) + if (ALT_REF_AQ_PROTECT_GAIN) { + size_t nsize = *size; + int overhead; + + // TODO(yuryg): optimize this, as + // we don't really need to repack + + save_coding_context(cpi); + vp9_disable_segmentation(&cpi->common.seg); + vp9_pack_bitstream(cpi, dest, &nsize); + restore_coding_context(cpi); + + overhead = (int)*size - (int)nsize; + + if (vp9_alt_ref_aq_disable_if(cpi->alt_ref_aq, overhead, (int)*size)) + vp9_encode_frame(cpi); + else + vp9_enable_segmentation(&cpi->common.seg); + } +} + +static void encode_frame_to_data_rate(VP9_COMP *cpi, size_t *size, uint8_t *dest, unsigned int *frame_flags) { VP9_COMMON *const cm = &cpi->common; @@ -3853,16 +3801,19 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, set_ext_overrides(cpi); vpx_clear_system_state(); +#ifdef ENABLE_KF_DENOISE + // Spatial denoise of key frame. + if (is_spatial_denoise_enabled(cpi)) spatial_denoise_frame(cpi); +#endif + // Set the arf sign bias for this frame. set_arf_sign_bias(cpi); // Set default state for segment based loop filter update flags. cm->lf.mode_ref_delta_update = 0; - if (cpi->oxcf.pass == 2 && - cpi->sf.adaptive_interp_filter_search) - cpi->sf.interp_filter_search_mask = - setup_interp_filter_search_mask(cpi); + if (cpi->oxcf.pass == 2 && cpi->sf.adaptive_interp_filter_search) + cpi->sf.interp_filter_search_mask = setup_interp_filter_search_mask(cpi); // Set various flags etc to special state if it is a key frame. if (frame_is_intra_only(cm)) { @@ -3900,9 +3851,9 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, else cm->frame_context_idx = FRAME_CONTEXTS - 1; } else { - cm->frame_context_idx = - cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers + - cpi->svc.temporal_layer_id; + cm->frame_context_idx = + cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers + + cpi->svc.temporal_layer_id; } cm->frame_parallel_decoding_mode = oxcf->frame_parallel_decoding_mode; @@ -3933,8 +3884,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // For 1 pass CBR, check if we are dropping this frame. // For spatial layers, for now only check for frame-dropping on first spatial // layer, and if decision is to drop, we drop whole super-frame. - if (oxcf->pass == 0 && - oxcf->rc_mode == VPX_CBR && + if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR && cm->frame_type != KEY_FRAME) { if (vp9_rc_drop_frame(cpi) || (is_one_pass_cbr_svc(cpi) && cpi->svc.rc_drop_superframe == 1)) { @@ -3942,6 +3892,12 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, ++cm->current_video_frame; cpi->ext_refresh_frame_flags_pending = 0; cpi->svc.rc_drop_superframe = 1; + // TODO(marpan): Advancing the svc counters on dropped frames can break + // the referencing scheme for the fixed svc patterns defined in + // vp9_one_pass_cbr_svc_start_layer(). Look into fixing this issue, but + // for now, don't advance the svc frame counters on dropped frame. + // if (cpi->use_svc) + // vp9_inc_frame_in_layer(cpi); return; } } @@ -3959,6 +3915,10 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, encode_with_recode_loop(cpi, size, dest); } + // Disable segmentation if it decrease rate/distortion ratio + if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) + vp9_try_disable_lookahead_aq(cpi, size, dest); + #if CONFIG_VP9_TEMPORAL_DENOISING #ifdef OUTPUT_YUV_DENOISED if (oxcf->noise_sensitivity > 0) { @@ -3979,24 +3939,23 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) { #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - cpi->ambient_err = vp9_highbd_get_y_sse(cpi->Source, - get_frame_new_buffer(cm)); + cpi->ambient_err = + vpx_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); } else { - cpi->ambient_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); } #else - cpi->ambient_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); + cpi->ambient_err = vpx_get_y_sse(cpi->Source, get_frame_new_buffer(cm)); #endif // CONFIG_VP9_HIGHBITDEPTH } // If the encoder forced a KEY_FRAME decision - if (cm->frame_type == KEY_FRAME) - cpi->refresh_last_frame = 1; + if (cm->frame_type == KEY_FRAME) cpi->refresh_last_frame = 1; cm->frame_to_show = get_frame_new_buffer(cm); cm->frame_to_show->color_space = cm->color_space; cm->frame_to_show->color_range = cm->color_range; - cm->frame_to_show->render_width = cm->render_width; + cm->frame_to_show->render_width = cm->render_width; cm->frame_to_show->render_height = cm->render_height; // Pick the loop filter level for the frame. @@ -4005,8 +3964,7 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, // build the bitstream vp9_pack_bitstream(cpi, dest, size); - if (cm->seg.update_map) - update_reference_segmentation_map(cpi); + if (cm->seg.update_map) update_reference_segmentation_map(cpi); if (frame_is_intra_only(cm) == 0) { release_scaled_references(cpi); @@ -4068,24 +4026,28 @@ static void encode_frame_to_data_rate(VP9_COMP *cpi, cm->last_height = cm->height; // reset to normal state now that we are done. - if (!cm->show_existing_frame) - cm->last_show_frame = cm->show_frame; + if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame; if (cm->show_frame) { vp9_swap_mi_and_prev_mi(cm); // Don't increment frame counters if this was an altref buffer // update not a real frame ++cm->current_video_frame; - if (cpi->use_svc) - vp9_inc_frame_in_layer(cpi); + if (cpi->use_svc) vp9_inc_frame_in_layer(cpi); } cm->prev_frame = cm->cur_frame; if (cpi->use_svc) - cpi->svc.layer_context[cpi->svc.spatial_layer_id * + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers + - cpi->svc.temporal_layer_id].last_frame_type = - cm->frame_type; + cpi->svc.temporal_layer_id] + .last_frame_type = cm->frame_type; + + cpi->force_update_segmentation = 0; + + if (cpi->oxcf.aq_mode == LOOKAHEAD_AQ) + vp9_alt_ref_aq_unset_all(cpi->alt_ref_aq, cpi); } static void SvcEncode(VP9_COMP *cpi, size_t *size, uint8_t *dest, @@ -4104,8 +4066,8 @@ static void Pass0Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest, encode_frame_to_data_rate(cpi, size, dest, frame_flags); } -static void Pass2Encode(VP9_COMP *cpi, size_t *size, - uint8_t *dest, unsigned int *frame_flags) { +static void Pass2Encode(VP9_COMP *cpi, size_t *size, uint8_t *dest, + unsigned int *frame_flags) { cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED; encode_frame_to_data_rate(cpi, size, dest, frame_flags); @@ -4154,24 +4116,18 @@ static void check_initial_width(VP9_COMP *cpi, } } -int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags, +int vp9_receive_raw_frame(VP9_COMP *cpi, vpx_enc_frame_flags_t frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time) { - VP9_COMMON *volatile const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; struct vpx_usec_timer timer; - volatile int res = 0; + int res = 0; const int subsampling_x = sd->subsampling_x; const int subsampling_y = sd->subsampling_y; #if CONFIG_VP9_HIGHBITDEPTH const int use_highbitdepth = (sd->flags & YV12_FLAG_HIGHBITDEPTH) != 0; #endif - if (setjmp(cm->error.jmp)) { - cm->error.setjmp = 0; - return -1; - } - cm->error.setjmp = 1; - #if CONFIG_VP9_HIGHBITDEPTH check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y); #else @@ -4205,22 +4161,16 @@ int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags, res = -1; } - cm->error.setjmp = 0; return res; } - static int frame_is_reference(const VP9_COMP *cpi) { const VP9_COMMON *cm = &cpi->common; - return cm->frame_type == KEY_FRAME || - cpi->refresh_last_frame || - cpi->refresh_golden_frame || - cpi->refresh_alt_ref_frame || - cm->refresh_frame_context || - cm->lf.mode_ref_delta_update || - cm->seg.update_map || - cm->seg.update_data; + return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame || + cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame || + cm->refresh_frame_context || cm->lf.mode_ref_delta_update || + cm->seg.update_map || cm->seg.update_data; } static void adjust_frame_rate(VP9_COMP *cpi, @@ -4232,8 +4182,8 @@ static void adjust_frame_rate(VP9_COMP *cpi, this_duration = source->ts_end - source->ts_start; step = 1; } else { - int64_t last_duration = cpi->last_end_time_stamp_seen - - cpi->last_time_stamp_seen; + int64_t last_duration = + cpi->last_end_time_stamp_seen - cpi->last_time_stamp_seen; this_duration = source->ts_end - cpi->last_end_time_stamp_seen; @@ -4287,10 +4237,10 @@ static void check_src_altref(VP9_COMP *cpi, if (cpi->oxcf.pass == 2) { const GF_GROUP *const gf_group = &cpi->twopass.gf_group; rc->is_src_frame_alt_ref = - (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE); + (gf_group->update_type[gf_group->index] == OVERLAY_UPDATE); } else { - rc->is_src_frame_alt_ref = cpi->alt_ref_source && - (source == cpi->alt_ref_source); + rc->is_src_frame_alt_ref = + cpi->alt_ref_source && (source == cpi->alt_ref_source); } if (rc->is_src_frame_alt_ref) { @@ -4305,8 +4255,8 @@ static void check_src_altref(VP9_COMP *cpi, #if CONFIG_INTERNAL_STATS extern double vp9_get_blockiness(const uint8_t *img1, int img1_pitch, - const uint8_t *img2, int img2_pitch, - int width, int height); + const uint8_t *img2, int img2_pitch, int width, + int height); static void adjust_image_stat(double y, double u, double v, double all, ImageStat *s) { @@ -4318,14 +4268,134 @@ static void adjust_image_stat(double y, double u, double v, double all, } #endif // CONFIG_INTERNAL_STATS +static void update_level_info(VP9_COMP *cpi, size_t *size, int arf_src_index) { + VP9_COMMON *const cm = &cpi->common; + Vp9LevelInfo *const level_info = &cpi->level_info; + Vp9LevelSpec *const level_spec = &level_info->level_spec; + Vp9LevelStats *const level_stats = &level_info->level_stats; + int i, idx; + uint64_t luma_samples, dur_end; + const uint32_t luma_pic_size = cm->width * cm->height; + double cpb_data_size; + + vpx_clear_system_state(); + + // update level_stats + level_stats->total_compressed_size += *size; + if (cm->show_frame) { + level_stats->total_uncompressed_size += + luma_pic_size + + 2 * (luma_pic_size >> (cm->subsampling_x + cm->subsampling_y)); + level_stats->time_encoded = + (cpi->last_end_time_stamp_seen - cpi->first_time_stamp_ever) / + (double)TICKS_PER_SEC; + } + + if (arf_src_index > 0) { + if (!level_stats->seen_first_altref) { + level_stats->seen_first_altref = 1; + } else if (level_stats->frames_since_last_altref < + level_spec->min_altref_distance) { + level_spec->min_altref_distance = level_stats->frames_since_last_altref; + } + level_stats->frames_since_last_altref = 0; + } else { + ++level_stats->frames_since_last_altref; + } + + if (level_stats->frame_window_buffer.len < FRAME_WINDOW_SIZE - 1) { + idx = (level_stats->frame_window_buffer.start + + level_stats->frame_window_buffer.len++) % + FRAME_WINDOW_SIZE; + } else { + idx = level_stats->frame_window_buffer.start; + level_stats->frame_window_buffer.start = (idx + 1) % FRAME_WINDOW_SIZE; + } + level_stats->frame_window_buffer.buf[idx].ts = cpi->last_time_stamp_seen; + level_stats->frame_window_buffer.buf[idx].size = (uint32_t)(*size); + level_stats->frame_window_buffer.buf[idx].luma_samples = luma_pic_size; + + if (cm->frame_type == KEY_FRAME) { + level_stats->ref_refresh_map = 0; + } else { + int count = 0; + level_stats->ref_refresh_map |= vp9_get_refresh_mask(cpi); + // Also need to consider the case where the encoder refers to a buffer + // that has been implicitly refreshed after encoding a keyframe. + if (!cm->intra_only) { + level_stats->ref_refresh_map |= (1 << cpi->lst_fb_idx); + level_stats->ref_refresh_map |= (1 << cpi->gld_fb_idx); + level_stats->ref_refresh_map |= (1 << cpi->alt_fb_idx); + } + for (i = 0; i < REF_FRAMES; ++i) { + count += (level_stats->ref_refresh_map >> i) & 1; + } + if (count > level_spec->max_ref_frame_buffers) { + level_spec->max_ref_frame_buffers = count; + } + } + + // update average_bitrate + level_spec->average_bitrate = (double)level_stats->total_compressed_size / + 125.0 / level_stats->time_encoded; + + // update max_luma_sample_rate + luma_samples = 0; + for (i = 0; i < level_stats->frame_window_buffer.len; ++i) { + idx = (level_stats->frame_window_buffer.start + + level_stats->frame_window_buffer.len - 1 - i) % + FRAME_WINDOW_SIZE; + if (i == 0) { + dur_end = level_stats->frame_window_buffer.buf[idx].ts; + } + if (dur_end - level_stats->frame_window_buffer.buf[idx].ts >= + TICKS_PER_SEC) { + break; + } + luma_samples += level_stats->frame_window_buffer.buf[idx].luma_samples; + } + if (luma_samples > level_spec->max_luma_sample_rate) { + level_spec->max_luma_sample_rate = luma_samples; + } + + // update max_cpb_size + cpb_data_size = 0; + for (i = 0; i < CPB_WINDOW_SIZE; ++i) { + if (i >= level_stats->frame_window_buffer.len) break; + idx = (level_stats->frame_window_buffer.start + + level_stats->frame_window_buffer.len - 1 - i) % + FRAME_WINDOW_SIZE; + cpb_data_size += level_stats->frame_window_buffer.buf[idx].size; + } + cpb_data_size = cpb_data_size / 125.0; + if (cpb_data_size > level_spec->max_cpb_size) { + level_spec->max_cpb_size = cpb_data_size; + } + + // update max_luma_picture_size + if (luma_pic_size > level_spec->max_luma_picture_size) { + level_spec->max_luma_picture_size = luma_pic_size; + } + + // update compression_ratio + level_spec->compression_ratio = (double)level_stats->total_uncompressed_size * + cm->bit_depth / + level_stats->total_compressed_size / 8.0; + + // update max_col_tiles + if (level_spec->max_col_tiles < (1 << cm->log2_tile_cols)) { + level_spec->max_col_tiles = (1 << cm->log2_tile_cols); + } +} + int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, - size_t *size, uint8_t *dest, - int64_t *time_stamp, int64_t *time_end, int flush) { + size_t *size, uint8_t *dest, int64_t *time_stamp, + int64_t *time_end, int flush) { const VP9EncoderConfig *const oxcf = &cpi->oxcf; VP9_COMMON *const cm = &cpi->common; BufferPool *const pool = cm->buffer_pool; RATE_CONTROL *const rc = &cpi->rc; - struct vpx_usec_timer cmptimer; + struct vpx_usec_timer cmptimer; YV12_BUFFER_CONFIG *force_src_buffer = NULL; struct lookahead_entry *last_source = NULL; struct lookahead_entry *source = NULL; @@ -4339,8 +4409,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, if (cpi->svc.encode_empty_frame_state == ENCODING) source = &cpi->svc.empty_frame; #endif - if (oxcf->pass == 2) - vp9_restore_layer_context(cpi); + if (oxcf->pass == 2) vp9_restore_layer_context(cpi); } else if (is_one_pass_cbr_svc(cpi)) { vp9_one_pass_cbr_svc_start_layer(cpi); } @@ -4352,8 +4421,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, // Is multi-arf enabled. // Note that at the moment multi_arf is only configured for 2 pass VBR and // will not work properly with svc. - if ((oxcf->pass == 2) && !cpi->use_svc && - (cpi->oxcf.enable_auto_arf > 1)) + if ((oxcf->pass == 2) && !cpi->use_svc && (cpi->oxcf.enable_auto_arf > 1)) cpi->multi_arf_allowed = 1; else cpi->multi_arf_allowed = 0; @@ -4371,8 +4439,21 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, arf_src_index = get_arf_src_index(cpi); // Skip alt frame if we encode the empty frame - if (is_two_pass_svc(cpi) && source != NULL) - arf_src_index = 0; + if (is_two_pass_svc(cpi) && source != NULL) arf_src_index = 0; + + if (arf_src_index) { + for (i = 0; i <= arf_src_index; ++i) { + struct lookahead_entry *e = vp9_lookahead_peek(cpi->lookahead, i); + // Avoid creating an alt-ref if there's a forced keyframe pending. + if (e == NULL) { + break; + } else if (e->flags == VPX_EFLAG_FORCE_KF) { + arf_src_index = 0; + flush = 1; + break; + } + } + } if (arf_src_index) { assert(arf_src_index <= rc->frames_to_key); @@ -4395,9 +4476,21 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, #endif if ((oxcf->arnr_max_frames > 0) && (oxcf->arnr_strength > 0)) { + int bitrate = cpi->rc.avg_frame_bandwidth / 40; + int not_low_bitrate = bitrate > ALT_REF_AQ_LOW_BITRATE_BOUNDARY; + + int not_last_frame = (cpi->lookahead->sz - arf_src_index > 1); + not_last_frame |= ALT_REF_AQ_APPLY_TO_LAST_FRAME; + // Produce the filtered ARF frame. vp9_temporal_filter(cpi, arf_src_index); vpx_extend_frame_borders(&cpi->alt_ref_buffer); + + // for small bitrates segmentation overhead usually + // eats all bitrate gain from enabling delta quantizers + if (cpi->oxcf.alt_ref_aq != 0 && not_low_bitrate && not_last_frame) + vp9_alt_ref_aq_setup_mode(cpi->alt_ref_aq, cpi); + force_src_buffer = &cpi->alt_ref_buffer; } @@ -4444,8 +4537,14 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, } if (source) { - cpi->un_scaled_source = cpi->Source = force_src_buffer ? force_src_buffer - : &source->img; + cpi->un_scaled_source = cpi->Source = + force_src_buffer ? force_src_buffer : &source->img; + +#ifdef ENABLE_KF_DENOISE + // Copy of raw source for metrics calculation. + if (is_psnr_calc_enabled(cpi)) + vp9_copy_and_extend_frame(cpi->Source, &cpi->raw_unscaled_source); +#endif cpi->unscaled_last_source = last_source != NULL ? &last_source->img : NULL; @@ -4456,7 +4555,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, } else { *size = 0; if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) { - vp9_end_first_pass(cpi); /* get last stats packet */ + vp9_end_first_pass(cpi); /* get last stats packet */ cpi->twopass.first_pass_done = 1; } return -1; @@ -4487,8 +4586,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, } cm->new_fb_idx = get_free_fb(cm); - if (cm->new_fb_idx == INVALID_IDX) - return -1; + if (cm->new_fb_idx == INVALID_IDX) return -1; cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; @@ -4507,39 +4605,33 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, cpi->frame_flags = *frame_flags; if ((oxcf->pass == 2) && - (!cpi->use_svc || - (is_two_pass_svc(cpi) && - cpi->svc.encode_empty_frame_state != ENCODING))) { + (!cpi->use_svc || (is_two_pass_svc(cpi) && + cpi->svc.encode_empty_frame_state != ENCODING))) { vp9_rc_get_second_pass_params(cpi); } else if (oxcf->pass == 1) { set_frame_size(cpi); } - if (cpi->oxcf.pass != 0 || - cpi->use_svc || - frame_is_intra_only(cm) == 1) { - for (i = 0; i < MAX_REF_FRAMES; ++i) - cpi->scaled_ref_idx[i] = INVALID_IDX; + if (cpi->oxcf.pass != 0 || cpi->use_svc || frame_is_intra_only(cm) == 1) { + for (i = 0; i < MAX_REF_FRAMES; ++i) cpi->scaled_ref_idx[i] = INVALID_IDX; } - if (oxcf->pass == 1 && - (!cpi->use_svc || is_two_pass_svc(cpi))) { + if (oxcf->pass == 1 && (!cpi->use_svc || is_two_pass_svc(cpi))) { const int lossless = is_lossless_requested(oxcf); #if CONFIG_VP9_HIGHBITDEPTH if (cpi->oxcf.use_highbitdepth) - cpi->td.mb.fwd_txm4x4 = lossless ? - vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4; + cpi->td.mb.fwd_txm4x4 = + lossless ? vp9_highbd_fwht4x4 : vpx_highbd_fdct4x4; else cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4; - cpi->td.mb.highbd_itxm_add = lossless ? vp9_highbd_iwht4x4_add : - vp9_highbd_idct4x4_add; + cpi->td.mb.highbd_itxm_add = + lossless ? vp9_highbd_iwht4x4_add : vp9_highbd_idct4x4_add; #else cpi->td.mb.fwd_txm4x4 = lossless ? vp9_fwht4x4 : vpx_fdct4x4; #endif // CONFIG_VP9_HIGHBITDEPTH cpi->td.mb.itxm_add = lossless ? vp9_iwht4x4_add : vp9_idct4x4_add; vp9_first_pass(cpi, source); - } else if (oxcf->pass == 2 && - (!cpi->use_svc || is_two_pass_svc(cpi))) { + } else if (oxcf->pass == 2 && (!cpi->use_svc || is_two_pass_svc(cpi))) { Pass2Encode(cpi, size, dest, frame_flags); } else if (cpi->use_svc) { SvcEncode(cpi, size, dest, frame_flags); @@ -4561,18 +4653,20 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, } // Save layer specific state. - if (is_one_pass_cbr_svc(cpi) || - ((cpi->svc.number_temporal_layers > 1 || - cpi->svc.number_spatial_layers > 1) && - oxcf->pass == 2)) { + if (is_one_pass_cbr_svc(cpi) || ((cpi->svc.number_temporal_layers > 1 || + cpi->svc.number_spatial_layers > 1) && + oxcf->pass == 2)) { vp9_save_layer_context(cpi); } vpx_usec_timer_mark(&cmptimer); cpi->time_compress_data += vpx_usec_timer_elapsed(&cmptimer); - if (cpi->b_calculate_psnr && oxcf->pass != 1 && cm->show_frame) - generate_psnr_packet(cpi); + // Should we calculate metrics for the frame. + if (is_psnr_calc_enabled(cpi)) generate_psnr_packet(cpi); + + if (cpi->keep_level_stats && oxcf->pass != 1) + update_level_info(cpi, size, arf_src_index); #if CONFIG_INTERNAL_STATS @@ -4581,18 +4675,26 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, cpi->bytes += (int)(*size); if (cm->show_frame) { + uint32_t bit_depth = 8; + uint32_t in_bit_depth = 8; cpi->count++; +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) { + in_bit_depth = cpi->oxcf.input_bit_depth; + bit_depth = cm->bit_depth; + } +#endif if (cpi->b_calculate_psnr) { - YV12_BUFFER_CONFIG *orig = cpi->Source; + YV12_BUFFER_CONFIG *orig = cpi->raw_source_frame; YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show; YV12_BUFFER_CONFIG *pp = &cm->post_proc_buffer; PSNR_STATS psnr; #if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd, - cpi->oxcf.input_bit_depth); + vpx_calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd, + in_bit_depth); #else - calc_psnr(orig, recon, &psnr); + vpx_calc_psnr(orig, recon, &psnr); #endif // CONFIG_VP9_HIGHBITDEPTH adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3], @@ -4605,28 +4707,31 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, PSNR_STATS psnr2; double frame_ssim2 = 0, weight = 0; #if CONFIG_VP9_POSTPROC - if (vpx_alloc_frame_buffer(&cm->post_proc_buffer, - recon->y_crop_width, recon->y_crop_height, - cm->subsampling_x, cm->subsampling_y, + if (vpx_alloc_frame_buffer( + pp, recon->y_crop_width, recon->y_crop_height, + cm->subsampling_x, cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH - cm->use_highbitdepth, + cm->use_highbitdepth, #endif - VP9_ENC_BORDER_IN_PIXELS, - cm->byte_alignment) < 0) { + VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment) < 0) { vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to allocate post processing buffer"); } - - vp9_deblock(cm->frame_to_show, &cm->post_proc_buffer, - cm->lf.filter_level * 10 / 6); + { + vp9_ppflags_t ppflags; + ppflags.post_proc_flag = VP9D_DEBLOCK; + ppflags.deblocking_level = 0; // not used in vp9_post_proc_frame() + ppflags.noise_level = 0; // not used in vp9_post_proc_frame() + vp9_post_proc_frame(cm, pp, &ppflags); + } #endif vpx_clear_system_state(); #if CONFIG_VP9_HIGHBITDEPTH - calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd, - cpi->oxcf.input_bit_depth); + vpx_calc_highbd_psnr(orig, pp, &psnr2, cpi->td.mb.e_mbd.bd, + cpi->oxcf.input_bit_depth); #else - calc_psnr(orig, pp, &psnr2); + vpx_calc_psnr(orig, pp, &psnr2); #endif // CONFIG_VP9_HIGHBITDEPTH cpi->totalp_sq_error += psnr2.sse[0]; @@ -4636,8 +4741,8 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight, - (int)cm->bit_depth); + frame_ssim2 = vpx_highbd_calc_ssim(orig, recon, &weight, bit_depth, + in_bit_depth); } else { frame_ssim2 = vpx_calc_ssim(orig, recon, &weight); } @@ -4651,13 +4756,13 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - frame_ssim2 = vpx_highbd_calc_ssim( - orig, &cm->post_proc_buffer, &weight, (int)cm->bit_depth); + frame_ssim2 = vpx_highbd_calc_ssim(orig, pp, &weight, bit_depth, + in_bit_depth); } else { - frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight); + frame_ssim2 = vpx_calc_ssim(orig, pp, &weight); } #else - frame_ssim2 = vpx_calc_ssim(orig, &cm->post_proc_buffer, &weight); + frame_ssim2 = vpx_calc_ssim(orig, pp, &weight); #endif // CONFIG_VP9_HIGHBITDEPTH cpi->summedp_quality += frame_ssim2 * weight; @@ -4700,8 +4805,8 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, &cpi->metrics, 1); const double peak = (double)((1 << cpi->oxcf.input_bit_depth) - 1); - double consistency = vpx_sse_to_psnr(samples, peak, - (double)cpi->total_inconsistency); + double consistency = + vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency); if (consistency > 0.0) cpi->worst_consistency = VPXMIN(cpi->worst_consistency, consistency); @@ -4709,37 +4814,16 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, } } - if (cpi->b_calculate_ssimg) { - double y, u, v, frame_all; -#if CONFIG_VP9_HIGHBITDEPTH - if (cm->use_highbitdepth) { - frame_all = vpx_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y, - &u, &v, (int)cm->bit_depth); - } else { - frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, - &v); - } -#else - frame_all = vpx_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v); -#endif // CONFIG_VP9_HIGHBITDEPTH - adjust_image_stat(y, u, v, frame_all, &cpi->ssimg); - } -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif { double y, u, v, frame_all; frame_all = vpx_calc_fastssim(cpi->Source, cm->frame_to_show, &y, &u, - &v); + &v, bit_depth, in_bit_depth); adjust_image_stat(y, u, v, frame_all, &cpi->fastssim); - /* TODO(JBB): add 10/12 bit support */ } -#if CONFIG_VP9_HIGHBITDEPTH - if (!cm->use_highbitdepth) -#endif { double y, u, v, frame_all; - frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v); + frame_all = vpx_psnrhvs(cpi->Source, cm->frame_to_show, &y, &u, &v, + bit_depth, in_bit_depth); adjust_image_stat(y, u, v, frame_all, &cpi->psnrhvs); } } @@ -4768,6 +4852,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, cpi->svc.spatial_layer_to_encode = 0; } } + vpx_clear_system_state(); return 0; } @@ -4802,13 +4887,12 @@ int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest, } } -int vp9_set_internal_size(VP9_COMP *cpi, - VPX_SCALING horiz_mode, VPX_SCALING vert_mode) { +int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode, + VPX_SCALING vert_mode) { VP9_COMMON *cm = &cpi->common; int hr = 0, hs = 0, vr = 0, vs = 0; - if (horiz_mode > ONETWO || vert_mode > ONETWO) - return -1; + if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1; Scale2Ratio(horiz_mode, &hr, &hs); Scale2Ratio(vert_mode, &vr, &vs); @@ -4867,62 +4951,32 @@ void vp9_set_svc(VP9_COMP *cpi, int use_svc) { return; } -int64_t vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b) { - assert(a->y_crop_width == b->y_crop_width); - assert(a->y_crop_height == b->y_crop_height); - - return get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, - a->y_crop_width, a->y_crop_height); -} - -#if CONFIG_VP9_HIGHBITDEPTH -int64_t vp9_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a, - const YV12_BUFFER_CONFIG *b) { - assert(a->y_crop_width == b->y_crop_width); - assert(a->y_crop_height == b->y_crop_height); - assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0); - assert((b->flags & YV12_FLAG_HIGHBITDEPTH) != 0); - - return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, - a->y_crop_width, a->y_crop_height); -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -int vp9_get_quantizer(VP9_COMP *cpi) { - return cpi->common.base_qindex; -} +int vp9_get_quantizer(VP9_COMP *cpi) { return cpi->common.base_qindex; } void vp9_apply_encoding_flags(VP9_COMP *cpi, vpx_enc_frame_flags_t flags) { - if (flags & (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | - VP8_EFLAG_NO_REF_ARF)) { + if (flags & + (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) { int ref = 7; - if (flags & VP8_EFLAG_NO_REF_LAST) - ref ^= VP9_LAST_FLAG; + if (flags & VP8_EFLAG_NO_REF_LAST) ref ^= VP9_LAST_FLAG; - if (flags & VP8_EFLAG_NO_REF_GF) - ref ^= VP9_GOLD_FLAG; + if (flags & VP8_EFLAG_NO_REF_GF) ref ^= VP9_GOLD_FLAG; - if (flags & VP8_EFLAG_NO_REF_ARF) - ref ^= VP9_ALT_FLAG; + if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VP9_ALT_FLAG; vp9_use_as_reference(cpi, ref); } - if (flags & (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | - VP8_EFLAG_NO_UPD_ARF | VP8_EFLAG_FORCE_GF | - VP8_EFLAG_FORCE_ARF)) { + if (flags & + (VP8_EFLAG_NO_UPD_LAST | VP8_EFLAG_NO_UPD_GF | VP8_EFLAG_NO_UPD_ARF | + VP8_EFLAG_FORCE_GF | VP8_EFLAG_FORCE_ARF)) { int upd = 7; - if (flags & VP8_EFLAG_NO_UPD_LAST) - upd ^= VP9_LAST_FLAG; + if (flags & VP8_EFLAG_NO_UPD_LAST) upd ^= VP9_LAST_FLAG; - if (flags & VP8_EFLAG_NO_UPD_GF) - upd ^= VP9_GOLD_FLAG; + if (flags & VP8_EFLAG_NO_UPD_GF) upd ^= VP9_GOLD_FLAG; - if (flags & VP8_EFLAG_NO_UPD_ARF) - upd ^= VP9_ALT_FLAG; + if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VP9_ALT_FLAG; vp9_update_reference(cpi, upd); } diff --git a/libs/libvpx/vp9/encoder/vp9_encoder.h b/libs/libvpx/vp9/encoder/vp9_encoder.h index c486ac2589..66e41492b5 100644 --- a/libs/libvpx/vp9/encoder/vp9_encoder.h +++ b/libs/libvpx/vp9/encoder/vp9_encoder.h @@ -20,6 +20,7 @@ #include "vpx_dsp/ssim.h" #endif #include "vpx_dsp/variance.h" +#include "vpx_ports/system_state.h" #include "vpx_util/vpx_thread.h" #include "vp9/common/vp9_alloccommon.h" @@ -28,6 +29,7 @@ #include "vp9/common/vp9_thread_common.h" #include "vp9/common/vp9_onyxc_int.h" +#include "vp9/encoder/vp9_alt_ref_aq.h" #include "vp9/encoder/vp9_aq_cyclicrefresh.h" #include "vp9/encoder/vp9_context_tree.h" #include "vp9/encoder/vp9_encodemb.h" @@ -51,6 +53,9 @@ extern "C" { #endif +// vp9 uses 10,000,000 ticks/second as time stamp +#define TICKS_PER_SEC 10000000 + typedef struct { int nmvjointcost[MV_JOINTS]; int nmvcosts[2][MV_VALS]; @@ -68,7 +73,6 @@ typedef struct { FRAME_CONTEXT fc; } CODING_CONTEXT; - typedef enum { // encode_breakout is disabled. ENCODE_BREAKOUT_DISABLED = 0, @@ -79,10 +83,10 @@ typedef enum { } ENCODE_BREAKOUT_TYPE; typedef enum { - NORMAL = 0, - FOURFIVE = 1, - THREEFIVE = 2, - ONETWO = 3 + NORMAL = 0, + FOURFIVE = 1, + THREEFIVE = 2, + ONETWO = 3 } VPX_SCALING; typedef enum { @@ -102,7 +106,7 @@ typedef enum { } MODE; typedef enum { - FRAMEFLAGS_KEY = 1 << 0, + FRAMEFLAGS_KEY = 1 << 0, FRAMEFLAGS_GOLDEN = 1 << 1, FRAMEFLAGS_ALTREF = 1 << 2, } FRAMETYPE_FLAGS; @@ -113,6 +117,9 @@ typedef enum { COMPLEXITY_AQ = 2, CYCLIC_REFRESH_AQ = 3, EQUATOR360_AQ = 4, + // AQ based on lookahead temporal + // variance (only valid for altref frames) + LOOKAHEAD_AQ = 5, AQ_MODE_COUNT // This should always be the last member of the enum } AQ_MODE; @@ -125,14 +132,14 @@ typedef enum { typedef struct VP9EncoderConfig { BITSTREAM_PROFILE profile; vpx_bit_depth_t bit_depth; // Codec bit-depth. - int width; // width of data passed to the compressor - int height; // height of data passed to the compressor + int width; // width of data passed to the compressor + int height; // height of data passed to the compressor unsigned int input_bit_depth; // Input bit depth. - double init_framerate; // set to passed in framerate - int64_t target_bandwidth; // bandwidth to be used in kilobits per second + double init_framerate; // set to passed in framerate + int64_t target_bandwidth; // bandwidth to be used in bits per second int noise_sensitivity; // pre processing blur: recommendation 0 - int sharpness; // sharpening output: recommendation 0: + int sharpness; // sharpening output: recommendation 0: int speed; // maximum allowed bitrate for any intra frame in % of bitrate target. unsigned int rc_max_intra_bitrate_pct; @@ -175,6 +182,9 @@ typedef struct VP9EncoderConfig { int cq_level; AQ_MODE aq_mode; // Adaptive Quantization mode + // Special handling of Adaptive Quantization for AltRef frames + int alt_ref_aq; + // Internal frame size scaling. RESIZE_TYPE resize_mode; int scaled_frame_width; @@ -184,7 +194,7 @@ typedef struct VP9EncoderConfig { int frame_periodic_boost; // two pass datarate control - int two_pass_vbrbias; // two pass datarate control tweaks + int two_pass_vbrbias; // two pass datarate control tweaks int two_pass_vbrmin_section; int two_pass_vbrmax_section; // END DATARATE CONTROL OPTIONS @@ -227,6 +237,8 @@ typedef struct VP9EncoderConfig { int max_threads; + int target_level; + vpx_fixed_buf_t two_pass_stats_in; struct vpx_codec_pkt_list *output_pkt_list; @@ -283,18 +295,79 @@ typedef struct ActiveMap { unsigned char *map; } ActiveMap; -typedef enum { - Y, - U, - V, - ALL -} STAT_TYPE; +typedef enum { Y, U, V, ALL } STAT_TYPE; typedef struct IMAGE_STAT { - double stat[ALL+1]; + double stat[ALL + 1]; double worst; } ImageStat; +// Kf noise filtering currently disabled by default in build. +// #define ENABLE_KF_DENOISE 1 + +#define CPB_WINDOW_SIZE 4 +#define FRAME_WINDOW_SIZE 128 +#define SAMPLE_RATE_GRACE_P 0.015 +#define VP9_LEVELS 14 + +typedef enum { + LEVEL_UNKNOWN = 0, + LEVEL_1 = 10, + LEVEL_1_1 = 11, + LEVEL_2 = 20, + LEVEL_2_1 = 21, + LEVEL_3 = 30, + LEVEL_3_1 = 31, + LEVEL_4 = 40, + LEVEL_4_1 = 41, + LEVEL_5 = 50, + LEVEL_5_1 = 51, + LEVEL_5_2 = 52, + LEVEL_6 = 60, + LEVEL_6_1 = 61, + LEVEL_6_2 = 62, + LEVEL_MAX = 255 +} VP9_LEVEL; + +typedef struct { + VP9_LEVEL level; + uint64_t max_luma_sample_rate; + uint32_t max_luma_picture_size; + double average_bitrate; // in kilobits per second + double max_cpb_size; // in kilobits + double compression_ratio; + uint8_t max_col_tiles; + uint32_t min_altref_distance; + uint8_t max_ref_frame_buffers; +} Vp9LevelSpec; + +typedef struct { + int64_t ts; // timestamp + uint32_t luma_samples; + uint32_t size; // in bytes +} FrameRecord; + +typedef struct { + FrameRecord buf[FRAME_WINDOW_SIZE]; + uint8_t start; + uint8_t len; +} FrameWindowBuffer; + +typedef struct { + uint8_t seen_first_altref; + uint32_t frames_since_last_altref; + uint64_t total_compressed_size; + uint64_t total_uncompressed_size; + double time_encoded; // in seconds + FrameWindowBuffer frame_window_buffer; + int ref_refresh_map; +} Vp9LevelStats; + +typedef struct { + Vp9LevelStats level_stats; + Vp9LevelSpec level_spec; +} Vp9LevelInfo; + typedef struct VP9_COMP { QUANTS quants; ThreadData td; @@ -303,8 +376,8 @@ typedef struct VP9_COMP { DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]); VP9_COMMON common; VP9EncoderConfig oxcf; - struct lookahead_ctx *lookahead; - struct lookahead_entry *alt_ref_source; + struct lookahead_ctx *lookahead; + struct lookahead_entry *alt_ref_source; YV12_BUFFER_CONFIG *Source; YV12_BUFFER_CONFIG *Last_Source; // NULL for first frame and alt_ref frames @@ -312,6 +385,11 @@ typedef struct VP9_COMP { YV12_BUFFER_CONFIG scaled_source; YV12_BUFFER_CONFIG *unscaled_last_source; YV12_BUFFER_CONFIG scaled_last_source; +#ifdef ENABLE_KF_DENOISE + YV12_BUFFER_CONFIG raw_unscaled_source; + YV12_BUFFER_CONFIG raw_scaled_source; +#endif + YV12_BUFFER_CONFIG *raw_source_frame; TileDataEnc *tile_data; int allocated_tiles; // Keep track of memory allocated for tiles. @@ -339,7 +417,7 @@ typedef struct VP9_COMP { YV12_BUFFER_CONFIG last_frame_uf; TOKENEXTRA *tile_tok[4][1 << 6]; - unsigned int tok_count[4][1 << 6]; + uint32_t tok_count[4][1 << 6]; // Ambient reconstruction err target for force key frames int64_t ambient_err; @@ -362,16 +440,16 @@ typedef struct VP9_COMP { int interp_filter_selected[MAX_REF_FRAMES][SWITCHABLE]; - struct vpx_codec_pkt_list *output_pkt_list; + struct vpx_codec_pkt_list *output_pkt_list; MBGRAPH_FRAME_STATS mbgraph_stats[MAX_LAG_BUFFERS]; - int mbgraph_n_frames; // number of frames filled in the above - int static_mb_pct; // % forced skip mbs by segmentation + int mbgraph_n_frames; // number of frames filled in the above + int static_mb_pct; // % forced skip mbs by segmentation int ref_frame_flags; SPEED_FEATURES sf; - unsigned int max_mv_magnitude; + uint32_t max_mv_magnitude; int mv_step_param; int allow_comp_inter_inter; @@ -383,10 +461,10 @@ typedef struct VP9_COMP { // clips, and 300 for < HD clips. int encode_breakout; - unsigned char *segmentation_map; + uint8_t *segmentation_map; // segment threashold for encode breakout - int segment_encode_breakout[MAX_SEGMENTS]; + int segment_encode_breakout[MAX_SEGMENTS]; CYCLIC_REFRESH *cyclic_refresh; ActiveMap active_map; @@ -406,13 +484,19 @@ typedef struct VP9_COMP { TWO_PASS twopass; + // Force recalculation of segment_ids for each mode info + uint8_t force_update_segmentation; + YV12_BUFFER_CONFIG alt_ref_buffer; + // class responsible for adaptive + // quantization of altref frames + struct ALT_REF_AQ *alt_ref_aq; #if CONFIG_INTERNAL_STATS unsigned int mode_chosen_counts[MAX_MODES]; - int count; + int count; uint64_t total_sq_error; uint64_t total_samples; ImageStat psnr; @@ -424,7 +508,7 @@ typedef struct VP9_COMP { double total_blockiness; double worst_blockiness; - int bytes; + int bytes; double summed_quality; double summed_weights; double summedp_quality; @@ -497,8 +581,13 @@ typedef struct VP9_COMP { int use_skin_detection; + int target_level; + NOISE_ESTIMATE noise_estimate; + // Count on how many consecutive times a block uses small/zeromv for encoding. + uint8_t *consec_zero_mv; + // VAR_BASED_PARTITION thresholds // 0 - threshold_64x64; 1 - threshold_32x32; // 2 - threshold_16x16; 3 - vbp_threshold_8x8; @@ -512,6 +601,9 @@ typedef struct VP9_COMP { VPxWorker *workers; struct EncWorkerData *tile_thr_data; VP9LfSync lf_row_sync; + + int keep_level_stats; + Vp9LevelInfo level_info; } VP9_COMP; void vp9_initialize_enc(void); @@ -522,15 +614,15 @@ void vp9_remove_compressor(VP9_COMP *cpi); void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf); - // receive a frames worth of data. caller can assume that a copy of this - // frame is made and not just a copy of the pointer.. -int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags, +// receive a frames worth of data. caller can assume that a copy of this +// frame is made and not just a copy of the pointer.. +int vp9_receive_raw_frame(VP9_COMP *cpi, vpx_enc_frame_flags_t frame_flags, YV12_BUFFER_CONFIG *sd, int64_t time_stamp, int64_t end_time_stamp); int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, - size_t *size, uint8_t *dest, - int64_t *time_stamp, int64_t *time_end, int flush); + size_t *size, uint8_t *dest, int64_t *time_stamp, + int64_t *time_end, int flush); int vp9_get_preview_raw_frame(VP9_COMP *cpi, YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *flags); @@ -551,8 +643,8 @@ int vp9_set_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols); int vp9_get_active_map(VP9_COMP *cpi, unsigned char *map, int rows, int cols); -int vp9_set_internal_size(VP9_COMP *cpi, - VPX_SCALING horiz_mode, VPX_SCALING vert_mode); +int vp9_set_internal_size(VP9_COMP *cpi, VPX_SCALING horiz_mode, + VPX_SCALING vert_mode); int vp9_set_size_literal(VP9_COMP *cpi, unsigned int width, unsigned int height); @@ -562,8 +654,7 @@ void vp9_set_svc(VP9_COMP *cpi, int use_svc); int vp9_get_quantizer(struct VP9_COMP *cpi); static INLINE int frame_is_kf_gf_arf(const VP9_COMP *cpi) { - return frame_is_intra_only(&cpi->common) || - cpi->refresh_alt_ref_frame || + return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame || (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref); } @@ -589,8 +680,8 @@ static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer( VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) { VP9_COMMON *const cm = &cpi->common; const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame); - return - buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf : NULL; + return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf + : NULL; } static INLINE int get_token_alloc(int mb_rows, int mb_cols) { @@ -623,6 +714,11 @@ void vp9_update_reference_frames(VP9_COMP *cpi); void vp9_set_high_precision_mv(VP9_COMP *cpi, int allow_high_precision_mv); +YV12_BUFFER_CONFIG *vp9_svc_twostage_scale(VP9_COMMON *cm, + YV12_BUFFER_CONFIG *unscaled, + YV12_BUFFER_CONFIG *scaled, + YV12_BUFFER_CONFIG *scaled_temp); + YV12_BUFFER_CONFIG *vp9_scale_if_required(VP9_COMMON *cm, YV12_BUFFER_CONFIG *unscaled, YV12_BUFFER_CONFIG *scaled, @@ -648,10 +744,10 @@ static INLINE int is_altref_enabled(const VP9_COMP *const cpi) { static INLINE void set_ref_ptrs(VP9_COMMON *cm, MACROBLOCKD *xd, MV_REFERENCE_FRAME ref0, MV_REFERENCE_FRAME ref1) { - xd->block_refs[0] = &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME - : 0]; - xd->block_refs[1] = &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME - : 0]; + xd->block_refs[0] = + &cm->frame_refs[ref0 >= LAST_FRAME ? ref0 - LAST_FRAME : 0]; + xd->block_refs[1] = + &cm->frame_refs[ref1 >= LAST_FRAME ? ref1 - LAST_FRAME : 0]; } static INLINE int get_chessboard_index(const int frame_index) { @@ -662,6 +758,8 @@ static INLINE int *cond_cost_list(const struct VP9_COMP *cpi, int *cost_list) { return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL; } +VP9_LEVEL vp9_get_level(const Vp9LevelSpec *const level_spec); + void vp9_new_framerate(VP9_COMP *cpi, double framerate); #define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl)) diff --git a/libs/libvpx/vp9/encoder/vp9_ethread.c b/libs/libvpx/vp9/encoder/vp9_ethread.c index 1d1926caeb..7657573bbf 100644 --- a/libs/libvpx/vp9/encoder/vp9_ethread.c +++ b/libs/libvpx/vp9/encoder/vp9_ethread.c @@ -43,10 +43,10 @@ static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) { const int tile_rows = 1 << cm->log2_tile_rows; int t; - (void) unused; + (void)unused; for (t = thread_data->start; t < tile_rows * tile_cols; - t += cpi->num_workers) { + t += cpi->num_workers) { int tile_row = t / tile_cols; int tile_col = t % tile_cols; @@ -63,8 +63,8 @@ static int get_max_tile_cols(VP9_COMP *cpi) { int log2_tile_cols; vp9_get_tile_n_bits(mi_cols, &min_log2_tile_cols, &max_log2_tile_cols); - log2_tile_cols = clamp(cpi->oxcf.tile_columns, - min_log2_tile_cols, max_log2_tile_cols); + log2_tile_cols = + clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols); return (1 << log2_tile_cols); } @@ -92,8 +92,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { vpx_malloc(allocated_workers * sizeof(*cpi->workers))); CHECK_MEM_ERROR(cm, cpi->tile_thr_data, - vpx_calloc(allocated_workers, - sizeof(*cpi->tile_thr_data))); + vpx_calloc(allocated_workers, sizeof(*cpi->tile_thr_data))); for (i = 0; i < allocated_workers; i++) { VPxWorker *const worker = &cpi->workers[i]; @@ -140,7 +139,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { worker->hook = (VPxWorkerHook)enc_worker_hook; worker->data1 = &cpi->tile_thr_data[i]; worker->data2 = NULL; - thread_data = (EncWorkerData*)worker->data1; + thread_data = (EncWorkerData *)worker->data1; // Before encoding a frame, copy the thread data from cpi. if (thread_data->td != &cpi->td) { @@ -173,7 +172,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { // Encode a frame for (i = 0; i < num_workers; i++) { VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *const thread_data = (EncWorkerData*)worker->data1; + EncWorkerData *const thread_data = (EncWorkerData *)worker->data1; // Set the starting tile for each thread. thread_data->start = i; @@ -192,7 +191,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { for (i = 0; i < num_workers; i++) { VPxWorker *const worker = &cpi->workers[i]; - EncWorkerData *const thread_data = (EncWorkerData*)worker->data1; + EncWorkerData *const thread_data = (EncWorkerData *)worker->data1; // Accumulate counters. if (i < cpi->num_workers - 1) { diff --git a/libs/libvpx/vp9/encoder/vp9_extend.c b/libs/libvpx/vp9/encoder/vp9_extend.c index 92585b82a4..f8e24610ae 100644 --- a/libs/libvpx/vp9/encoder/vp9_extend.c +++ b/libs/libvpx/vp9/encoder/vp9_extend.c @@ -16,8 +16,7 @@ #include "vp9/encoder/vp9_extend.h" static void copy_and_extend_plane(const uint8_t *src, int src_pitch, - uint8_t *dst, int dst_pitch, - int w, int h, + uint8_t *dst, int dst_pitch, int w, int h, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i, linesize; @@ -43,7 +42,7 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch, src_ptr1 = dst - extend_left; src_ptr2 = dst + dst_pitch * (h - 1) - extend_left; dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left; - dst_ptr2 = dst + dst_pitch * (h) - extend_left; + dst_ptr2 = dst + dst_pitch * (h)-extend_left; linesize = extend_left + extend_right + w; for (i = 0; i < extend_top; i++) { @@ -59,9 +58,8 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch, #if CONFIG_VP9_HIGHBITDEPTH static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch, - uint8_t *dst8, int dst_pitch, - int w, int h, - int extend_top, int extend_left, + uint8_t *dst8, int dst_pitch, int w, + int h, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i, linesize; uint16_t *src = CONVERT_TO_SHORTPTR(src8); @@ -88,7 +86,7 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch, src_ptr1 = dst - extend_left; src_ptr2 = dst + dst_pitch * (h - 1) - extend_left; dst_ptr1 = dst + dst_pitch * (-extend_top) - extend_left; - dst_ptr2 = dst + dst_pitch * (h) - extend_left; + dst_ptr2 = dst + dst_pitch * (h)-extend_left; linesize = extend_left + extend_right + w; for (i = 0; i < extend_top; i++) { @@ -127,51 +125,46 @@ void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, #if CONFIG_VP9_HIGHBITDEPTH if (src->flags & YV12_FLAG_HIGHBITDEPTH) { - highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, - src->y_crop_width, src->y_crop_height, - et_y, el_y, eb_y, er_y); + highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer, + dst->y_stride, src->y_crop_width, + src->y_crop_height, et_y, el_y, eb_y, er_y); - highbd_copy_and_extend_plane(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); + highbd_copy_and_extend_plane( + src->u_buffer, src->uv_stride, dst->u_buffer, dst->uv_stride, + src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv); - highbd_copy_and_extend_plane(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, - et_uv, el_uv, eb_uv, er_uv); + highbd_copy_and_extend_plane( + src->v_buffer, src->uv_stride, dst->v_buffer, dst->uv_stride, + src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv); return; } #endif // CONFIG_VP9_HIGHBITDEPTH - copy_and_extend_plane(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, - src->y_crop_width, src->y_crop_height, + copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer, + dst->y_stride, src->y_crop_width, src->y_crop_height, et_y, el_y, eb_y, er_y); - copy_and_extend_plane(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, + copy_and_extend_plane(src->u_buffer, src->uv_stride, dst->u_buffer, + dst->uv_stride, src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv); - copy_and_extend_plane(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, - src->uv_crop_width, src->uv_crop_height, + copy_and_extend_plane(src->v_buffer, src->uv_stride, dst->v_buffer, + dst->uv_stride, src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv); } void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw) { + YV12_BUFFER_CONFIG *dst, int srcy, + int srcx, int srch, int srcw) { // If the side is not touching the bounder then don't extend. const int et_y = srcy ? 0 : dst->border; const int el_y = srcx ? 0 : dst->border; - const int eb_y = srcy + srch != src->y_height ? 0 : - dst->border + dst->y_height - src->y_height; - const int er_y = srcx + srcw != src->y_width ? 0 : - dst->border + dst->y_width - src->y_width; + const int eb_y = srcy + srch != src->y_height + ? 0 + : dst->border + dst->y_height - src->y_height; + const int er_y = srcx + srcw != src->y_width + ? 0 + : dst->border + dst->y_width - src->y_width; const int src_y_offset = srcy * src->y_stride + srcx; const int dst_y_offset = srcy * dst->y_stride + srcx; @@ -185,17 +178,14 @@ void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, const int srcw_uv = ROUND_POWER_OF_TWO(srcw, 1); copy_and_extend_plane(src->y_buffer + src_y_offset, src->y_stride, - dst->y_buffer + dst_y_offset, dst->y_stride, - srcw, srch, + dst->y_buffer + dst_y_offset, dst->y_stride, srcw, srch, et_y, el_y, eb_y, er_y); copy_and_extend_plane(src->u_buffer + src_uv_offset, src->uv_stride, - dst->u_buffer + dst_uv_offset, dst->uv_stride, - srcw_uv, srch_uv, - et_uv, el_uv, eb_uv, er_uv); + dst->u_buffer + dst_uv_offset, dst->uv_stride, srcw_uv, + srch_uv, et_uv, el_uv, eb_uv, er_uv); copy_and_extend_plane(src->v_buffer + src_uv_offset, src->uv_stride, - dst->v_buffer + dst_uv_offset, dst->uv_stride, - srcw_uv, srch_uv, - et_uv, el_uv, eb_uv, er_uv); + dst->v_buffer + dst_uv_offset, dst->uv_stride, srcw_uv, + srch_uv, et_uv, el_uv, eb_uv, er_uv); } diff --git a/libs/libvpx/vp9/encoder/vp9_extend.h b/libs/libvpx/vp9/encoder/vp9_extend.h index 058fe09cf9..c0dd757159 100644 --- a/libs/libvpx/vp9/encoder/vp9_extend.h +++ b/libs/libvpx/vp9/encoder/vp9_extend.h @@ -18,14 +18,12 @@ extern "C" { #endif - void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst); void vp9_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - int srcy, int srcx, - int srch, int srcw); + YV12_BUFFER_CONFIG *dst, int srcy, + int srcx, int srch, int srcw); #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp9/encoder/vp9_firstpass.c b/libs/libvpx/vp9/encoder/vp9_firstpass.c index 9d3b15407b..1c716a1028 100644 --- a/libs/libvpx/vp9/encoder/vp9_firstpass.c +++ b/libs/libvpx/vp9/encoder/vp9_firstpass.c @@ -38,36 +38,33 @@ #include "vp9/encoder/vp9_rd.h" #include "vpx_dsp/variance.h" -#define OUTPUT_FPF 0 -#define ARF_STATS_OUTPUT 0 +#define OUTPUT_FPF 0 +#define ARF_STATS_OUTPUT 0 -#define GROUP_ADAPTIVE_MAXQ 1 - -#define BOOST_BREAKOUT 12.5 -#define BOOST_FACTOR 12.5 -#define ERR_DIVISOR 128.0 -#define FACTOR_PT_LOW 0.70 -#define FACTOR_PT_HIGH 0.90 -#define FIRST_PASS_Q 10.0 -#define GF_MAX_BOOST 96.0 -#define INTRA_MODE_PENALTY 1024 -#define KF_MAX_BOOST 128.0 -#define MIN_ARF_GF_BOOST 240 -#define MIN_DECAY_FACTOR 0.01 -#define MIN_KF_BOOST 300 +#define BOOST_BREAKOUT 12.5 +#define BOOST_FACTOR 12.5 +#define FACTOR_PT_LOW 0.70 +#define FACTOR_PT_HIGH 0.90 +#define FIRST_PASS_Q 10.0 +#define GF_MAX_BOOST 96.0 +#define INTRA_MODE_PENALTY 1024 +#define KF_MAX_BOOST 128.0 +#define MIN_ARF_GF_BOOST 240 +#define MIN_DECAY_FACTOR 0.01 +#define MIN_KF_BOOST 300 #define NEW_MV_MODE_PENALTY 32 -#define SVC_FACTOR_PT_LOW 0.45 -#define DARK_THRESH 64 -#define DEFAULT_GRP_WEIGHT 1.0 -#define RC_FACTOR_MIN 0.75 -#define RC_FACTOR_MAX 1.75 - +#define SVC_FACTOR_PT_LOW 0.45 +#define DARK_THRESH 64 +#define DEFAULT_GRP_WEIGHT 1.0 +#define RC_FACTOR_MIN 0.75 +#define RC_FACTOR_MAX 1.75 +#define SECTION_NOISE_DEF 250.0 +#define LOW_I_THRESH 24000 #define NCOUNT_INTRA_THRESH 8192 #define NCOUNT_INTRA_FACTOR 3 -#define NCOUNT_FRAME_II_THRESH 5.0 -#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001) +#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x)-0.000001 : (x) + 0.000001) #if ARF_STATS_OUTPUT unsigned int arf_count = 0; @@ -75,8 +72,7 @@ unsigned int arf_count = 0; // Resets the first pass file to the given position using a relative seek from // the current position. -static void reset_fpf_position(TWO_PASS *p, - const FIRSTPASS_STATS *position) { +static void reset_fpf_position(TWO_PASS *p, const FIRSTPASS_STATS *position) { p->stats_in = position; } @@ -91,8 +87,7 @@ static const FIRSTPASS_STATS *read_frame_stats(const TWO_PASS *p, int offset) { } static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) { - if (p->stats_in >= p->stats_in_end) - return EOF; + if (p->stats_in >= p->stats_in_end) return EOF; *fps = *p->stats_in; ++p->stats_in; @@ -113,31 +108,19 @@ static void output_stats(FIRSTPASS_STATS *stats, FILE *fpfile; fpfile = fopen("firstpass.stt", "a"); - fprintf(fpfile, "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf %12.4lf" + fprintf(fpfile, + "%12.0lf %12.4lf %12.0lf %12.0lf %12.0lf %12.0lf %12.4lf" "%12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf %12.4lf" - "%12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf\n", - stats->frame, - stats->weight, - stats->intra_error, - stats->coded_error, - stats->sr_coded_error, - stats->pcnt_inter, - stats->pcnt_motion, - stats->pcnt_second_ref, - stats->pcnt_neutral, - stats->intra_skip_pct, - stats->inactive_zone_rows, - stats->inactive_zone_cols, - stats->MVr, - stats->mvr_abs, - stats->MVc, - stats->mvc_abs, - stats->MVrv, - stats->MVcv, - stats->mv_in_out_count, - stats->new_mv_count, - stats->count, - stats->duration); + "%12.4lf %12.4lf %12.4lf %12.4lf %12.0lf %12.0lf %12.0lf %12.4lf" + "\n", + stats->frame, stats->weight, stats->intra_error, stats->coded_error, + stats->sr_coded_error, stats->frame_noise_energy, stats->pcnt_inter, + stats->pcnt_motion, stats->pcnt_second_ref, stats->pcnt_neutral, + stats->intra_skip_pct, stats->intra_smooth_pct, + stats->inactive_zone_rows, stats->inactive_zone_cols, stats->MVr, + stats->mvr_abs, stats->MVc, stats->mvc_abs, stats->MVrv, + stats->MVcv, stats->mv_in_out_count, stats->new_mv_count, + stats->count, stats->duration); fclose(fpfile); } #endif @@ -145,7 +128,7 @@ static void output_stats(FIRSTPASS_STATS *stats, #if CONFIG_FP_MB_STATS static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP9_COMMON *cm, - struct vpx_codec_pkt_list *pktlist) { + struct vpx_codec_pkt_list *pktlist) { struct vpx_codec_cx_pkt pkt; pkt.kind = VPX_CODEC_FPMB_STATS_PKT; pkt.data.firstpass_mb_stats.buf = this_frame_mb_stats; @@ -160,23 +143,25 @@ static void zero_stats(FIRSTPASS_STATS *section) { section->intra_error = 0.0; section->coded_error = 0.0; section->sr_coded_error = 0.0; - section->pcnt_inter = 0.0; - section->pcnt_motion = 0.0; + section->frame_noise_energy = 0.0; + section->pcnt_inter = 0.0; + section->pcnt_motion = 0.0; section->pcnt_second_ref = 0.0; section->pcnt_neutral = 0.0; section->intra_skip_pct = 0.0; + section->intra_smooth_pct = 0.0; section->inactive_zone_rows = 0.0; section->inactive_zone_cols = 0.0; section->MVr = 0.0; - section->mvr_abs = 0.0; - section->MVc = 0.0; - section->mvc_abs = 0.0; - section->MVrv = 0.0; - section->MVcv = 0.0; - section->mv_in_out_count = 0.0; + section->mvr_abs = 0.0; + section->MVc = 0.0; + section->mvc_abs = 0.0; + section->MVrv = 0.0; + section->MVcv = 0.0; + section->mv_in_out_count = 0.0; section->new_mv_count = 0.0; - section->count = 0.0; - section->duration = 1.0; + section->count = 0.0; + section->duration = 1.0; section->spatial_layer_id = 0; } @@ -188,23 +173,25 @@ static void accumulate_stats(FIRSTPASS_STATS *section, section->intra_error += frame->intra_error; section->coded_error += frame->coded_error; section->sr_coded_error += frame->sr_coded_error; - section->pcnt_inter += frame->pcnt_inter; + section->frame_noise_energy += frame->frame_noise_energy; + section->pcnt_inter += frame->pcnt_inter; section->pcnt_motion += frame->pcnt_motion; section->pcnt_second_ref += frame->pcnt_second_ref; section->pcnt_neutral += frame->pcnt_neutral; section->intra_skip_pct += frame->intra_skip_pct; + section->intra_smooth_pct += frame->intra_smooth_pct; section->inactive_zone_rows += frame->inactive_zone_rows; section->inactive_zone_cols += frame->inactive_zone_cols; section->MVr += frame->MVr; - section->mvr_abs += frame->mvr_abs; - section->MVc += frame->MVc; - section->mvc_abs += frame->mvc_abs; - section->MVrv += frame->MVrv; - section->MVcv += frame->MVcv; - section->mv_in_out_count += frame->mv_in_out_count; + section->mvr_abs += frame->mvr_abs; + section->MVc += frame->MVc; + section->mvc_abs += frame->mvc_abs; + section->MVrv += frame->MVrv; + section->MVcv += frame->MVcv; + section->mv_in_out_count += frame->mv_in_out_count; section->new_mv_count += frame->new_mv_count; - section->count += frame->count; - section->duration += frame->duration; + section->count += frame->count; + section->duration += frame->duration; } static void subtract_stats(FIRSTPASS_STATS *section, @@ -214,23 +201,25 @@ static void subtract_stats(FIRSTPASS_STATS *section, section->intra_error -= frame->intra_error; section->coded_error -= frame->coded_error; section->sr_coded_error -= frame->sr_coded_error; - section->pcnt_inter -= frame->pcnt_inter; + section->frame_noise_energy -= frame->frame_noise_energy; + section->pcnt_inter -= frame->pcnt_inter; section->pcnt_motion -= frame->pcnt_motion; section->pcnt_second_ref -= frame->pcnt_second_ref; section->pcnt_neutral -= frame->pcnt_neutral; section->intra_skip_pct -= frame->intra_skip_pct; + section->intra_smooth_pct -= frame->intra_smooth_pct; section->inactive_zone_rows -= frame->inactive_zone_rows; section->inactive_zone_cols -= frame->inactive_zone_cols; section->MVr -= frame->MVr; - section->mvr_abs -= frame->mvr_abs; - section->MVc -= frame->MVc; - section->mvc_abs -= frame->mvc_abs; - section->MVrv -= frame->MVrv; - section->MVcv -= frame->MVcv; - section->mv_in_out_count -= frame->mv_in_out_count; + section->mvr_abs -= frame->mvr_abs; + section->MVc -= frame->MVc; + section->mvc_abs -= frame->mvc_abs; + section->MVrv -= frame->MVrv; + section->MVcv -= frame->MVcv; + section->mv_in_out_count -= frame->mv_in_out_count; section->new_mv_count -= frame->new_mv_count; - section->count -= frame->count; - section->duration -= frame->duration; + section->count -= frame->count; + section->duration -= frame->duration; } // Calculate an active area of the image that discounts formatting @@ -241,9 +230,10 @@ static double calculate_active_area(const VP9_COMP *cpi, const FIRSTPASS_STATS *this_frame) { double active_pct; - active_pct = 1.0 - - ((this_frame->intra_skip_pct / 2) + - ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows)); + active_pct = + 1.0 - + ((this_frame->intra_skip_pct / 2) + + ((this_frame->inactive_zone_rows * 2) / (double)cpi->common.mb_rows)); return fclamp(active_pct, MIN_ACTIVE_AREA, MAX_ACTIVE_AREA); } @@ -258,8 +248,9 @@ static double calculate_modified_err(const VP9_COMP *cpi, const double av_weight = stats->weight / stats->count; const double av_err = (stats->coded_error * av_weight) / stats->count; double modified_error = - av_err * pow(this_frame->coded_error * this_frame->weight / - DOUBLE_DIVIDE_CHECK(av_err), oxcf->two_pass_vbrbias / 100.0); + av_err * pow(this_frame->coded_error * this_frame->weight / + DOUBLE_DIVIDE_CHECK(av_err), + oxcf->two_pass_vbrbias / 100.0); // Correction for active area. Frames with a reduced active area // (eg due to formatting bars) have a higher error per mb for the @@ -267,17 +258,18 @@ static double calculate_modified_err(const VP9_COMP *cpi, // 0.5N blocks of complexity 2X is a little easier than coding N // blocks of complexity X. modified_error *= - pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION); + pow(calculate_active_area(cpi, this_frame), ACT_AREA_CORRECTION); - return fclamp(modified_error, - twopass->modified_error_min, twopass->modified_error_max); + return fclamp(modified_error, twopass->modified_error_min, + twopass->modified_error_max); } // This function returns the maximum target rate per frame. static int frame_max_bits(const RATE_CONTROL *rc, const VP9EncoderConfig *oxcf) { int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth * - (int64_t)oxcf->two_pass_vbrmax_section) / 100; + (int64_t)oxcf->two_pass_vbrmax_section) / + 100; if (max_bits < 0) max_bits = 0; else if (max_bits > rc->max_frame_bandwidth) @@ -304,14 +296,10 @@ void vp9_end_first_pass(VP9_COMP *cpi) { static vpx_variance_fn_t get_block_variance_fn(BLOCK_SIZE bsize) { switch (bsize) { - case BLOCK_8X8: - return vpx_mse8x8; - case BLOCK_16X8: - return vpx_mse16x8; - case BLOCK_8X16: - return vpx_mse8x16; - default: - return vpx_mse16x16; + case BLOCK_8X8: return vpx_mse8x8; + case BLOCK_16X8: return vpx_mse16x8; + case BLOCK_8X16: return vpx_mse8x16; + default: return vpx_mse16x16; } } @@ -330,38 +318,26 @@ static vpx_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize, switch (bd) { default: switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_8_mse8x8; - case BLOCK_16X8: - return vpx_highbd_8_mse16x8; - case BLOCK_8X16: - return vpx_highbd_8_mse8x16; - default: - return vpx_highbd_8_mse16x16; + case BLOCK_8X8: return vpx_highbd_8_mse8x8; + case BLOCK_16X8: return vpx_highbd_8_mse16x8; + case BLOCK_8X16: return vpx_highbd_8_mse8x16; + default: return vpx_highbd_8_mse16x16; } break; case 10: switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_10_mse8x8; - case BLOCK_16X8: - return vpx_highbd_10_mse16x8; - case BLOCK_8X16: - return vpx_highbd_10_mse8x16; - default: - return vpx_highbd_10_mse16x16; + case BLOCK_8X8: return vpx_highbd_10_mse8x8; + case BLOCK_16X8: return vpx_highbd_10_mse16x8; + case BLOCK_8X16: return vpx_highbd_10_mse8x16; + default: return vpx_highbd_10_mse16x16; } break; case 12: switch (bsize) { - case BLOCK_8X8: - return vpx_highbd_12_mse8x8; - case BLOCK_16X8: - return vpx_highbd_12_mse16x8; - case BLOCK_8X16: - return vpx_highbd_12_mse8x16; - default: - return vpx_highbd_12_mse16x16; + case BLOCK_8X8: return vpx_highbd_12_mse8x8; + case BLOCK_16X8: return vpx_highbd_12_mse16x8; + case BLOCK_8X16: return vpx_highbd_12_mse8x16; + default: return vpx_highbd_12_mse16x16; } break; } @@ -384,8 +360,7 @@ static int get_search_range(const VP9_COMP *cpi) { int sr = 0; const int dim = VPXMIN(cpi->initial_width, cpi->initial_height); - while ((dim << sr) < MAX_FULL_PEL_VAL) - ++sr; + while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr; return sr; } @@ -393,8 +368,8 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, const MV *ref_mv, MV *best_mv, int *best_motion_err) { MACROBLOCKD *const xd = &x->e_mbd; - MV tmp_mv = {0, 0}; - MV ref_mv_full = {ref_mv->row >> 3, ref_mv->col >> 3}; + MV tmp_mv = { 0, 0 }; + MV ref_mv_full = { ref_mv->row >> 3, ref_mv->col >> 3 }; int num00, tmp_err, n; const BLOCK_SIZE bsize = xd->mi[0]->sb_type; vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[bsize]; @@ -416,12 +391,11 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, // Center the initial step/diamond search on best mv. tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv, - step_param, - x->sadperbit16, &num00, &v_fn_ptr, ref_mv); + step_param, x->sadperbit16, &num00, + &v_fn_ptr, ref_mv); if (tmp_err < INT_MAX) tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); - if (tmp_err < INT_MAX - new_mv_mode_penalty) - tmp_err += new_mv_mode_penalty; + if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty; if (tmp_err < *best_motion_err) { *best_motion_err = tmp_err; @@ -439,8 +413,8 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, --num00; } else { tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv, - step_param + n, x->sadperbit16, - &num00, &v_fn_ptr, ref_mv); + step_param + n, x->sadperbit16, &num00, + &v_fn_ptr, ref_mv); if (tmp_err < INT_MAX) tmp_err = vp9_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1); if (tmp_err < INT_MAX - new_mv_mode_penalty) @@ -456,11 +430,9 @@ static void first_pass_motion_search(VP9_COMP *cpi, MACROBLOCK *x, static BLOCK_SIZE get_bsize(const VP9_COMMON *cm, int mb_row, int mb_col) { if (2 * mb_col + 1 < cm->mi_cols) { - return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 - : BLOCK_16X8; + return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8; } else { - return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 - : BLOCK_8X8; + return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_8X16 : BLOCK_8X8; } } @@ -468,11 +440,9 @@ static int find_fp_qindex(vpx_bit_depth_t bit_depth) { int i; for (i = 0; i < QINDEX_RANGE; ++i) - if (vp9_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) - break; + if (vp9_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break; - if (i == QINDEX_RANGE) - i--; + if (i == QINDEX_RANGE) i--; return i; } @@ -480,8 +450,7 @@ static int find_fp_qindex(vpx_bit_depth_t bit_depth) { static void set_first_pass_params(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; if (!cpi->refresh_alt_ref_frame && - (cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY))) { + (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) { cm->frame_type = KEY_FRAME; } else { cm->frame_type = INTER_FRAME; @@ -490,7 +459,199 @@ static void set_first_pass_params(VP9_COMP *cpi) { cpi->rc.frames_to_key = INT_MAX; } +// Scale an sse threshold to account for 8/10/12 bit. +static int scale_sse_threshold(VP9_COMMON *cm, int thresh) { + int ret_val = thresh; +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) { + switch (cm->bit_depth) { + case VPX_BITS_8: ret_val = thresh; break; + case VPX_BITS_10: ret_val = thresh >> 4; break; + case VPX_BITS_12: ret_val = thresh >> 8; break; + default: + assert(0 && + "cm->bit_depth should be VPX_BITS_8, " + "VPX_BITS_10 or VPX_BITS_12"); + } + } +#else + (void)cm; +#endif // CONFIG_VP9_HIGHBITDEPTH + return ret_val; +} + +// This threshold is used to track blocks where to all intents and purposes +// the intra prediction error 0. Though the metric we test against +// is technically a sse we are mainly interested in blocks where all the pixels +// in the 8 bit domain have an error of <= 1 (where error = sse) so a +// linear scaling for 10 and 12 bit gives similar results. #define UL_INTRA_THRESH 50 +static int get_ul_intra_threshold(VP9_COMMON *cm) { + int ret_val = UL_INTRA_THRESH; +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) { + switch (cm->bit_depth) { + case VPX_BITS_8: ret_val = UL_INTRA_THRESH; break; + case VPX_BITS_10: ret_val = UL_INTRA_THRESH << 2; break; + case VPX_BITS_12: ret_val = UL_INTRA_THRESH << 4; break; + default: + assert(0 && + "cm->bit_depth should be VPX_BITS_8, " + "VPX_BITS_10 or VPX_BITS_12"); + } + } +#else + (void)cm; +#endif // CONFIG_VP9_HIGHBITDEPTH + return ret_val; +} + +#define SMOOTH_INTRA_THRESH 4000 +static int get_smooth_intra_threshold(VP9_COMMON *cm) { + int ret_val = SMOOTH_INTRA_THRESH; +#if CONFIG_VP9_HIGHBITDEPTH + if (cm->use_highbitdepth) { + switch (cm->bit_depth) { + case VPX_BITS_8: ret_val = SMOOTH_INTRA_THRESH; break; + case VPX_BITS_10: ret_val = SMOOTH_INTRA_THRESH << 4; break; + case VPX_BITS_12: ret_val = SMOOTH_INTRA_THRESH << 8; break; + default: + assert(0 && + "cm->bit_depth should be VPX_BITS_8, " + "VPX_BITS_10 or VPX_BITS_12"); + } + } +#else + (void)cm; +#endif // CONFIG_VP9_HIGHBITDEPTH + return ret_val; +} + +#define FP_DN_THRESH 8 +#define FP_MAX_DN_THRESH 16 +#define KERNEL_SIZE 3 + +// Baseline Kernal weights for first pass noise metric +static uint8_t fp_dn_kernal_3[KERNEL_SIZE * KERNEL_SIZE] = { 1, 2, 1, 2, 4, + 2, 1, 2, 1 }; + +// Estimate noise at a single point based on the impace of a spatial kernal +// on the point value +static int fp_estimate_point_noise(uint8_t *src_ptr, const int stride) { + int sum_weight = 0; + int sum_val = 0; + int i, j; + int max_diff = 0; + int diff; + int dn_diff; + uint8_t *tmp_ptr; + uint8_t *kernal_ptr; + uint8_t dn_val; + uint8_t centre_val = *src_ptr; + + kernal_ptr = fp_dn_kernal_3; + + // Apply the kernal + tmp_ptr = src_ptr - stride - 1; + for (i = 0; i < KERNEL_SIZE; ++i) { + for (j = 0; j < KERNEL_SIZE; ++j) { + diff = abs((int)centre_val - (int)tmp_ptr[j]); + max_diff = VPXMAX(max_diff, diff); + if (diff <= FP_DN_THRESH) { + sum_weight += *kernal_ptr; + sum_val += (int)tmp_ptr[j] * (int)*kernal_ptr; + } + ++kernal_ptr; + } + tmp_ptr += stride; + } + + if (max_diff < FP_MAX_DN_THRESH) + // Update the source value with the new filtered value + dn_val = (sum_val + (sum_weight >> 1)) / sum_weight; + else + dn_val = *src_ptr; + + // return the noise energy as the square of the difference between the + // denoised and raw value. + dn_diff = (int)*src_ptr - (int)dn_val; + return dn_diff * dn_diff; +} +#if CONFIG_VP9_HIGHBITDEPTH +static int fp_highbd_estimate_point_noise(uint8_t *src_ptr, const int stride) { + int sum_weight = 0; + int sum_val = 0; + int i, j; + int max_diff = 0; + int diff; + int dn_diff; + uint8_t *tmp_ptr; + uint16_t *tmp_ptr16; + uint8_t *kernal_ptr; + uint16_t dn_val; + uint16_t centre_val = *CONVERT_TO_SHORTPTR(src_ptr); + + kernal_ptr = fp_dn_kernal_3; + + // Apply the kernal + tmp_ptr = src_ptr - stride - 1; + for (i = 0; i < KERNEL_SIZE; ++i) { + tmp_ptr16 = CONVERT_TO_SHORTPTR(tmp_ptr); + for (j = 0; j < KERNEL_SIZE; ++j) { + diff = abs((int)centre_val - (int)tmp_ptr16[j]); + max_diff = VPXMAX(max_diff, diff); + if (diff <= FP_DN_THRESH) { + sum_weight += *kernal_ptr; + sum_val += (int)tmp_ptr16[j] * (int)*kernal_ptr; + } + ++kernal_ptr; + } + tmp_ptr += stride; + } + + if (max_diff < FP_MAX_DN_THRESH) + // Update the source value with the new filtered value + dn_val = (sum_val + (sum_weight >> 1)) / sum_weight; + else + dn_val = *CONVERT_TO_SHORTPTR(src_ptr); + + // return the noise energy as the square of the difference between the + // denoised and raw value. + dn_diff = (int)(*CONVERT_TO_SHORTPTR(src_ptr)) - (int)dn_val; + return dn_diff * dn_diff; +} +#endif + +// Estimate noise for a block. +static int fp_estimate_block_noise(MACROBLOCK *x, BLOCK_SIZE bsize) { +#if CONFIG_VP9_HIGHBITDEPTH + MACROBLOCKD *xd = &x->e_mbd; +#endif + uint8_t *src_ptr = &x->plane[0].src.buf[0]; + const int width = num_4x4_blocks_wide_lookup[bsize] * 4; + const int height = num_4x4_blocks_high_lookup[bsize] * 4; + int w, h; + int stride = x->plane[0].src.stride; + int block_noise = 0; + + // Sampled points to reduce cost overhead. + for (h = 0; h < height; h += 2) { + for (w = 0; w < width; w += 2) { +#if CONFIG_VP9_HIGHBITDEPTH + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) + block_noise += fp_highbd_estimate_point_noise(src_ptr, stride); + else + block_noise += fp_estimate_point_noise(src_ptr, stride); +#else + block_noise += fp_estimate_point_noise(src_ptr, stride); +#endif + ++src_ptr; + } + src_ptr += (stride - width); + } + return block_noise << 2; // Scale << 2 to account for sampling. +} + #define INVALID_ROW -1 void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { int mb_row, mb_col; @@ -507,6 +668,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { int64_t intra_error = 0; int64_t coded_error = 0; int64_t sr_coded_error = 0; + int64_t frame_noise_energy = 0; int sum_mvr = 0, sum_mvc = 0; int sum_mvr_abs = 0, sum_mvc_abs = 0; @@ -517,12 +679,13 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { const int intrapenalty = INTRA_MODE_PENALTY; double neutral_count; int intra_skip_count = 0; + int intra_smooth_count = 0; int image_data_start_row = INVALID_ROW; int new_mv_count = 0; int sum_in_vectors = 0; - MV lastmv = {0, 0}; + MV lastmv = { 0, 0 }; TWO_PASS *twopass = &cpi->twopass; - const MV zero_mv = {0, 0}; + const MV zero_mv = { 0, 0 }; int recon_y_stride, recon_uv_stride, uv_mb_height; YV12_BUFFER_CONFIG *const lst_yv12 = get_ref_frame_buffer(cpi, LAST_FRAME); @@ -530,11 +693,13 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { YV12_BUFFER_CONFIG *const new_yv12 = get_frame_new_buffer(cm); const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12; - LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ? - &cpi->svc.layer_context[cpi->svc.spatial_layer_id] : NULL; + LAYER_CONTEXT *const lc = + is_two_pass_svc(cpi) ? &cpi->svc.layer_context[cpi->svc.spatial_layer_id] + : NULL; double intra_factor; double brightness_factor; BufferPool *const pool = cm->buffer_pool; + MODE_INFO mi_above, mi_left; // First pass code requires valid last and new frame buffers. assert(new_yv12 != NULL); @@ -571,8 +736,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { cpi->refresh_golden_frame = 0; } - if (lc->current_video_frame_in_layer == 0) - cpi->ref_frame_flags = 0; + if (lc->current_video_frame_in_layer == 0) cpi->ref_frame_flags = 0; vp9_scale_references(cpi); @@ -593,7 +757,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { } set_ref_ptrs(cm, xd, - (cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME: NONE, + (cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME : NONE, (cpi->ref_frame_flags & VP9_GOLD_FLAG) ? GOLDEN_FRAME : NONE); cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source, @@ -633,7 +797,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height); for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { - MV best_ref_mv = {0, 0}; + MV best_ref_mv = { 0, 0 }; // Reset above block coeffs. recon_yoffset = (mb_row * recon_y_stride * 16); @@ -641,12 +805,13 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Set up limit values for motion vectors to prevent them extending // outside the UMV borders. - x->mv_row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16); - x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) - + BORDER_MV_PIXELS_B16; + x->mv_limits.row_min = -((mb_row * 16) + BORDER_MV_PIXELS_B16); + x->mv_limits.row_max = + ((cm->mb_rows - 1 - mb_row) * 16) + BORDER_MV_PIXELS_B16; for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { int this_error; + int this_intra_error; const int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row); const BLOCK_SIZE bsize = get_bsize(cm, mb_row, mb_col); double log_intra; @@ -663,44 +828,62 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { xd->plane[2].dst.buf = new_yv12->v_buffer + recon_uvoffset; xd->mi[0]->sb_type = bsize; xd->mi[0]->ref_frame[0] = INTRA_FRAME; - set_mi_row_col(xd, &tile, - mb_row << 1, num_8x8_blocks_high_lookup[bsize], + set_mi_row_col(xd, &tile, mb_row << 1, num_8x8_blocks_high_lookup[bsize], mb_col << 1, num_8x8_blocks_wide_lookup[bsize], cm->mi_rows, cm->mi_cols); + // Are edges available for intra prediction? + // Since the firstpass does not populate the mi_grid_visible, + // above_mi/left_mi must be overwritten with a nonzero value when edges + // are available. Required by vp9_predict_intra_block(). + xd->above_mi = (mb_row != 0) ? &mi_above : NULL; + xd->left_mi = (mb_col > tile.mi_col_start) ? &mi_left : NULL; // Do intra 16x16 prediction. x->skip_encode = 0; xd->mi[0]->mode = DC_PRED; - xd->mi[0]->tx_size = use_dc_pred ? - (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4; - vp9_encode_intra_block_plane(x, bsize, 0); + xd->mi[0]->tx_size = + use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4; + vp9_encode_intra_block_plane(x, bsize, 0, 0); this_error = vpx_get_mb_ss(x->plane[0].src_diff); + this_intra_error = this_error; - // Keep a record of blocks that have almost no intra error residual + // Keep a record of blocks that have very low intra error residual // (i.e. are in effect completely flat and untextured in the intra // domain). In natural videos this is uncommon, but it is much more // common in animations, graphics and screen content, so may be used // as a signal to detect these types of content. - if (this_error < UL_INTRA_THRESH) { + if (this_error < get_ul_intra_threshold(cm)) { ++intra_skip_count; } else if ((mb_col > 0) && (image_data_start_row == INVALID_ROW)) { image_data_start_row = mb_row; } + // Blocks that are mainly smooth in the intra domain. + // Some special accounting for CQ but also these are better for testing + // noise levels. + if (this_error < get_smooth_intra_threshold(cm)) { + ++intra_smooth_count; + } + + // Special case noise measurement for first frame. + if (cm->current_video_frame == 0) { + if (this_intra_error < scale_sse_threshold(cm, LOW_I_THRESH)) { + frame_noise_energy += fp_estimate_block_noise(x, bsize); + } else { + frame_noise_energy += (int64_t)SECTION_NOISE_DEF; + } + } + #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { switch (cm->bit_depth) { - case VPX_BITS_8: - break; - case VPX_BITS_10: - this_error >>= 4; - break; - case VPX_BITS_12: - this_error >>= 8; - break; + case VPX_BITS_8: break; + case VPX_BITS_10: this_error >>= 4; break; + case VPX_BITS_12: this_error >>= 8; break; default: - assert(0 && "cm->bit_depth should be VPX_BITS_8, " - "VPX_BITS_10 or VPX_BITS_12"); + assert(0 && + "cm->bit_depth should be VPX_BITS_8, " + "VPX_BITS_10 or VPX_BITS_12"); return; } } @@ -747,15 +930,16 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Set up limit values for motion vectors to prevent them extending // outside the UMV borders. - x->mv_col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16); - x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16; + x->mv_limits.col_min = -((mb_col * 16) + BORDER_MV_PIXELS_B16); + x->mv_limits.col_max = + ((cm->mb_cols - 1 - mb_col) * 16) + BORDER_MV_PIXELS_B16; // Other than for the first frame do a motion search. if ((lc == NULL && cm->current_video_frame > 0) || (lc != NULL && lc->current_video_frame_in_layer > 0)) { int tmp_err, motion_error, raw_motion_error; // Assume 0,0 motion with no mv overhead. - MV mv = {0, 0} , tmp_mv = {0, 0}; + MV mv = { 0, 0 }, tmp_mv = { 0, 0 }; struct buf_2d unscaled_last_source_buf_2d; xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset; @@ -764,12 +948,12 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { motion_error = highbd_get_prediction_error( bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd); } else { - motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); + motion_error = get_prediction_error(bsize, &x->plane[0].src, + &xd->plane[0].pre[0]); } #else - motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); + motion_error = + get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]); #endif // CONFIG_VP9_HIGHBITDEPTH // Compute the motion error of the 0,0 motion using the last source @@ -784,12 +968,12 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { raw_motion_error = highbd_get_prediction_error( bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd); } else { - raw_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &unscaled_last_source_buf_2d); + raw_motion_error = get_prediction_error(bsize, &x->plane[0].src, + &unscaled_last_source_buf_2d); } #else - raw_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &unscaled_last_source_buf_2d); + raw_motion_error = get_prediction_error(bsize, &x->plane[0].src, + &unscaled_last_source_buf_2d); #endif // CONFIG_VP9_HIGHBITDEPTH // TODO(pengchong): Replace the hard-coded threshold @@ -812,8 +996,8 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Search in an older reference frame. if (((lc == NULL && cm->current_video_frame > 1) || - (lc != NULL && lc->current_video_frame_in_layer > 1)) - && gld_yv12 != NULL) { + (lc != NULL && lc->current_video_frame_in_layer > 1)) && + gld_yv12 != NULL) { // Assume 0,0 motion with no mv overhead. int gf_motion_error; @@ -823,12 +1007,12 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { gf_motion_error = highbd_get_prediction_error( bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd); } else { - gf_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); + gf_motion_error = get_prediction_error(bsize, &x->plane[0].src, + &xd->plane[0].pre[0]); } #else - gf_motion_error = get_prediction_error( - bsize, &x->plane[0].src, &xd->plane[0].pre[0]); + gf_motion_error = get_prediction_error(bsize, &x->plane[0].src, + &xd->plane[0].pre[0]); #endif // CONFIG_VP9_HIGHBITDEPTH first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv, @@ -863,7 +1047,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { #if CONFIG_FP_MB_STATS if (cpi->use_fp_mb_stats) { - // intra predication statistics + // intra prediction statistics cpi->twopass.frame_mb_stats_buf[mb_index] = 0; cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_DCINTRA_MASK; cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK; @@ -884,12 +1068,12 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { if (((this_error - intrapenalty) * 9 <= motion_error * 10) && (this_error < (2 * intrapenalty))) { neutral_count += 1.0; - // Also track cases where the intra is not much worse than the inter - // and use this in limiting the GF/arf group length. + // Also track cases where the intra is not much worse than the inter + // and use this in limiting the GF/arf group length. } else if ((this_error > NCOUNT_INTRA_THRESH) && (this_error < (NCOUNT_INTRA_FACTOR * motion_error))) { - neutral_count += (double)motion_error / - DOUBLE_DIVIDE_CHECK((double)this_error); + neutral_count += + (double)motion_error / DOUBLE_DIVIDE_CHECK((double)this_error); } mv.row *= 8; @@ -914,7 +1098,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { #if CONFIG_FP_MB_STATS if (cpi->use_fp_mb_stats) { - // inter predication statistics + // inter prediction statistics cpi->twopass.frame_mb_stats_buf[mb_index] = 0; cpi->twopass.frame_mb_stats_buf[mb_index] &= ~FPMB_DCINTRA_MASK; cpi->twopass.frame_mb_stats_buf[mb_index] |= FPMB_MOTION_ZERO_MASK; @@ -959,8 +1143,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { #endif // Non-zero vector, was it different from the last non zero vector? - if (!is_equal_mv(&mv, &lastmv)) - ++new_mv_count; + if (!is_equal_mv(&mv, &lastmv)) ++new_mv_count; lastmv = mv; // Does the row vector point inwards or outwards? @@ -988,7 +1171,17 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { else if (mv.col < 0) --sum_in_vectors; } + frame_noise_energy += (int64_t)SECTION_NOISE_DEF; + } else if (this_intra_error < scale_sse_threshold(cm, LOW_I_THRESH)) { + frame_noise_energy += fp_estimate_block_noise(x, bsize); + } else { // 0,0 mv but high error + frame_noise_energy += (int64_t)SECTION_NOISE_DEF; } + } else { // Intra < inter error + if (this_intra_error < scale_sse_threshold(cm, LOW_I_THRESH)) + frame_noise_energy += fp_estimate_block_noise(x, bsize); + else + frame_noise_energy += (int64_t)SECTION_NOISE_DEF; } } else { sr_coded_error += (int64_t)this_error; @@ -1006,10 +1199,10 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Adjust to the next row of MBs. x->plane[0].src.buf += 16 * x->plane[0].src.stride - 16 * cm->mb_cols; - x->plane[1].src.buf += uv_mb_height * x->plane[1].src.stride - - uv_mb_height * cm->mb_cols; - x->plane[2].src.buf += uv_mb_height * x->plane[1].src.stride - - uv_mb_height * cm->mb_cols; + x->plane[1].src.buf += + uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols; + x->plane[2].src.buf += + uv_mb_height * x->plane[1].src.stride - uv_mb_height * cm->mb_cols; vpx_clear_system_state(); } @@ -1034,7 +1227,8 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Initial estimate here uses sqrt(mbs) to define the min_err, where the // number of mbs is proportional to the image area. const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; + ? cpi->initial_mbs + : cpi->common.MBs; const double min_err = 200 * sqrt(num_mbs); intra_factor = intra_factor / (double)num_mbs; @@ -1046,23 +1240,26 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { fps.coded_error = (double)(coded_error >> 8) + min_err; fps.sr_coded_error = (double)(sr_coded_error >> 8) + min_err; fps.intra_error = (double)(intra_error >> 8) + min_err; + fps.frame_noise_energy = (double)frame_noise_energy / (double)num_mbs; fps.count = 1.0; fps.pcnt_inter = (double)intercount / num_mbs; fps.pcnt_second_ref = (double)second_ref_count / num_mbs; fps.pcnt_neutral = (double)neutral_count / num_mbs; fps.intra_skip_pct = (double)intra_skip_count / num_mbs; + fps.intra_smooth_pct = (double)intra_smooth_count / num_mbs; fps.inactive_zone_rows = (double)image_data_start_row; - fps.inactive_zone_cols = (double)0; // TODO(paulwilkins): fix + // Currently set to 0 as most issues relate to letter boxing. + fps.inactive_zone_cols = (double)0; if (mvcount > 0) { fps.MVr = (double)sum_mvr / mvcount; fps.mvr_abs = (double)sum_mvr_abs / mvcount; fps.MVc = (double)sum_mvc / mvcount; fps.mvc_abs = (double)sum_mvc_abs / mvcount; - fps.MVrv = ((double)sum_mvrs - - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount; - fps.MVcv = ((double)sum_mvcs - - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount; + fps.MVrv = + ((double)sum_mvrs - ((double)sum_mvr * sum_mvr / mvcount)) / mvcount; + fps.MVcv = + ((double)sum_mvcs - ((double)sum_mvc * sum_mvc / mvcount)) / mvcount; fps.mv_in_out_count = (double)sum_in_vectors / (mvcount * 2); fps.new_mv_count = new_mv_count; fps.pcnt_motion = (double)mvcount / num_mbs; @@ -1078,10 +1275,9 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { fps.pcnt_motion = 0.0; } - // TODO(paulwilkins): Handle the case when duration is set to 0, or - // something less than the full time between subsequent values of - // cpi->source_time_stamp. - fps.duration = (double)(source->ts_end - source->ts_start); + // Dont allow a value of 0 for duration. + // (Section duration is also defaulted to minimum of 1.0). + fps.duration = VPXMAX(1.0, (double)(source->ts_end - source->ts_start)); // Don't want to do output stats with a stack variable! twopass->this_frame_stats = fps; @@ -1146,15 +1342,11 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { } ++cm->current_video_frame; - if (cpi->use_svc) - vp9_inc_frame_in_layer(cpi); + if (cpi->use_svc) vp9_inc_frame_in_layer(cpi); } -static double calc_correction_factor(double err_per_mb, - double err_divisor, - double pt_low, - double pt_high, - int q, +static double calc_correction_factor(double err_per_mb, double err_divisor, + double pt_low, double pt_high, int q, vpx_bit_depth_t bit_depth) { const double error_term = err_per_mb / err_divisor; @@ -1163,69 +1355,70 @@ static double calc_correction_factor(double err_per_mb, VPXMIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); // Calculate correction factor. - if (power_term < 1.0) - assert(error_term >= 0.0); + if (power_term < 1.0) assert(error_term >= 0.0); return fclamp(pow(error_term, power_term), 0.05, 5.0); } -// Larger image formats are expected to be a little harder to code relatively -// given the same prediction error score. This in part at least relates to the -// increased size and hence coding cost of motion vectors. -#define EDIV_SIZE_FACTOR 800 - -static int get_twopass_worst_quality(const VP9_COMP *cpi, - const double section_err, - double inactive_zone, - int section_target_bandwidth, - double group_weight_factor) { +#define ERR_DIVISOR 115.0 +#define NOISE_FACTOR_MIN 0.9 +#define NOISE_FACTOR_MAX 1.1 +static int get_twopass_worst_quality(VP9_COMP *cpi, const double section_err, + double inactive_zone, double section_noise, + int section_target_bandwidth) { const RATE_CONTROL *const rc = &cpi->rc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; + TWO_PASS *const twopass = &cpi->twopass; + // Clamp the target rate to VBR min / max limts. const int target_rate = vp9_rc_clamp_pframe_target_size(cpi, section_target_bandwidth); - + double noise_factor = pow((section_noise / SECTION_NOISE_DEF), 0.5); + noise_factor = fclamp(noise_factor, NOISE_FACTOR_MIN, NOISE_FACTOR_MAX); inactive_zone = fclamp(inactive_zone, 0.0, 1.0); if (target_rate <= 0) { return rc->worst_quality; // Highest value allowed } else { const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; + ? cpi->initial_mbs + : cpi->common.MBs; const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone)); const double av_err_per_mb = section_err / active_mbs; const double speed_term = 1.0 + 0.04 * oxcf->speed; - const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR; - const int target_norm_bits_per_mb = ((uint64_t)target_rate << - BPER_MB_NORMBITS) / active_mbs; - + double last_group_rate_err; + const int target_norm_bits_per_mb = + (int)(((uint64_t)target_rate << BPER_MB_NORMBITS) / active_mbs); int q; int is_svc_upper_layer = 0; if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0) is_svc_upper_layer = 1; + // based on recent history adjust expectations of bits per macroblock. + last_group_rate_err = + (double)twopass->rolling_arf_group_actual_bits / + DOUBLE_DIVIDE_CHECK((double)twopass->rolling_arf_group_target_bits); + last_group_rate_err = VPXMAX(0.25, VPXMIN(4.0, last_group_rate_err)); + twopass->bpm_factor *= (3.0 + last_group_rate_err) / 4.0; + twopass->bpm_factor = VPXMAX(0.25, VPXMIN(4.0, twopass->bpm_factor)); // Try and pick a max Q that will be high enough to encode the // content at the given rate. for (q = rc->best_quality; q < rc->worst_quality; ++q) { - const double factor = - calc_correction_factor(av_err_per_mb, - ERR_DIVISOR - ediv_size_correction, - is_svc_upper_layer ? SVC_FACTOR_PT_LOW : - FACTOR_PT_LOW, FACTOR_PT_HIGH, q, - cpi->common.bit_depth); - const int bits_per_mb = - vp9_rc_bits_per_mb(INTER_FRAME, q, - factor * speed_term * group_weight_factor, - cpi->common.bit_depth); - if (bits_per_mb <= target_norm_bits_per_mb) - break; + const double factor = calc_correction_factor( + av_err_per_mb, ERR_DIVISOR, + is_svc_upper_layer ? SVC_FACTOR_PT_LOW : FACTOR_PT_LOW, + FACTOR_PT_HIGH, q, cpi->common.bit_depth); + const int bits_per_mb = vp9_rc_bits_per_mb( + INTER_FRAME, q, + factor * speed_term * cpi->twopass.bpm_factor * noise_factor, + cpi->common.bit_depth); + if (bits_per_mb <= target_norm_bits_per_mb) break; } // Restriction on active max q for constrained quality mode. - if (cpi->oxcf.rc_mode == VPX_CQ) - q = VPXMAX(q, oxcf->cq_level); + if (cpi->oxcf.rc_mode == VPX_CQ) q = VPXMAX(q, oxcf->cq_level); return q; } } @@ -1255,8 +1448,7 @@ static void init_subsampling(VP9_COMP *cpi) { setup_rf_level_maxq(cpi); } -void calculate_coded_size(VP9_COMP *cpi, - int *scaled_frame_width, +void calculate_coded_size(VP9_COMP *cpi, int *scaled_frame_width, int *scaled_frame_height) { RATE_CONTROL *const rc = &cpi->rc; *scaled_frame_width = rc->frame_width[rc->frame_size_selector]; @@ -1266,18 +1458,19 @@ void calculate_coded_size(VP9_COMP *cpi, void vp9_init_second_pass(VP9_COMP *cpi) { SVC *const svc = &cpi->svc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; - const int is_two_pass_svc = (svc->number_spatial_layers > 1) || - (svc->number_temporal_layers > 1); - TWO_PASS *const twopass = is_two_pass_svc ? - &svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass; + const int is_two_pass_svc = + (svc->number_spatial_layers > 1) || (svc->number_temporal_layers > 1); + RATE_CONTROL *const rc = &cpi->rc; + TWO_PASS *const twopass = + is_two_pass_svc ? &svc->layer_context[svc->spatial_layer_id].twopass + : &cpi->twopass; double frame_rate; FIRSTPASS_STATS *stats; zero_stats(&twopass->total_stats); zero_stats(&twopass->total_left_stats); - if (!twopass->stats_in_end) - return; + if (!twopass->stats_in_end) return; stats = &twopass->total_stats; @@ -1293,13 +1486,14 @@ void vp9_init_second_pass(VP9_COMP *cpi) { if (is_two_pass_svc) { vp9_update_spatial_layer_framerate(cpi, frame_rate); - twopass->bits_left = (int64_t)(stats->duration * - svc->layer_context[svc->spatial_layer_id].target_bandwidth / - 10000000.0); + twopass->bits_left = + (int64_t)(stats->duration * + svc->layer_context[svc->spatial_layer_id].target_bandwidth / + 10000000.0); } else { vp9_new_framerate(cpi, frame_rate); - twopass->bits_left = (int64_t)(stats->duration * oxcf->target_bandwidth / - 10000000.0); + twopass->bits_left = + (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0); } // This variable monitors how far behind the second ref update is lagging. @@ -1308,14 +1502,14 @@ void vp9_init_second_pass(VP9_COMP *cpi) { // Scan the first pass file and calculate a modified total error based upon // the bias/power function used to allocate bits. { - const double avg_error = stats->coded_error / - DOUBLE_DIVIDE_CHECK(stats->count); + const double avg_error = + stats->coded_error / DOUBLE_DIVIDE_CHECK(stats->count); const FIRSTPASS_STATS *s = twopass->stats_in; double modified_error_total = 0.0; - twopass->modified_error_min = (avg_error * - oxcf->two_pass_vbrmin_section) / 100; - twopass->modified_error_max = (avg_error * - oxcf->two_pass_vbrmax_section) / 100; + twopass->modified_error_min = + (avg_error * oxcf->two_pass_vbrmin_section) / 100; + twopass->modified_error_max = + (avg_error * oxcf->two_pass_vbrmax_section) / 100; while (s < twopass->stats_in_end) { modified_error_total += calculate_modified_err(cpi, twopass, oxcf, s); ++s; @@ -1324,51 +1518,60 @@ void vp9_init_second_pass(VP9_COMP *cpi) { } // Reset the vbr bits off target counters - cpi->rc.vbr_bits_off_target = 0; - cpi->rc.vbr_bits_off_target_fast = 0; - - cpi->rc.rate_error_estimate = 0; + rc->vbr_bits_off_target = 0; + rc->vbr_bits_off_target_fast = 0; + rc->rate_error_estimate = 0; // Static sequence monitor variables. twopass->kf_zeromotion_pct = 100; twopass->last_kfgroup_zeromotion_pct = 100; + // Initialize bits per macro_block estimate correction factor. + twopass->bpm_factor = 1.0; + // Initialize actual and target bits counters for ARF groups so that + // at the start we have a neutral bpm adjustment. + twopass->rolling_arf_group_target_bits = 1; + twopass->rolling_arf_group_actual_bits = 1; + if (oxcf->resize_mode != RESIZE_NONE) { init_subsampling(cpi); } + + // Initialize the arnr strangth adjustment to 0 + twopass->arnr_strength_adjustment = 0; } #define SR_DIFF_PART 0.0015 -#define MOTION_AMP_PART 0.003 #define INTRA_PART 0.005 #define DEFAULT_DECAY_LIMIT 0.75 #define LOW_SR_DIFF_TRHESH 0.1 #define SR_DIFF_MAX 128.0 +#define LOW_CODED_ERR_PER_MB 10.0 +#define NCOUNT_FRAME_II_THRESH 6.0 static double get_sr_decay_rate(const VP9_COMP *cpi, const FIRSTPASS_STATS *frame) { - const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; - double sr_diff = - (frame->sr_coded_error - frame->coded_error) / num_mbs; + const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs + : cpi->common.MBs; + double sr_diff = (frame->sr_coded_error - frame->coded_error) / num_mbs; double sr_decay = 1.0; double modified_pct_inter; double modified_pcnt_intra; - const double motion_amplitude_factor = - frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / 2); + const double motion_amplitude_part = + frame->pcnt_motion * ((frame->mvc_abs + frame->mvr_abs) / + (cpi->initial_height + cpi->initial_width)); modified_pct_inter = frame->pcnt_inter; - if ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) < - (double)NCOUNT_FRAME_II_THRESH) { + if (((frame->coded_error / num_mbs) > LOW_CODED_ERR_PER_MB) && + ((frame->intra_error / DOUBLE_DIVIDE_CHECK(frame->coded_error)) < + (double)NCOUNT_FRAME_II_THRESH)) { modified_pct_inter = frame->pcnt_inter - frame->pcnt_neutral; } modified_pcnt_intra = 100 * (1.0 - modified_pct_inter); - if ((sr_diff > LOW_SR_DIFF_TRHESH)) { sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX); - sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - - (MOTION_AMP_PART * motion_amplitude_factor) - + sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - motion_amplitude_part - (INTRA_PART * modified_pcnt_intra); } return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); @@ -1378,8 +1581,7 @@ static double get_sr_decay_rate(const VP9_COMP *cpi, // quality is decaying from frame to frame. static double get_zero_motion_factor(const VP9_COMP *cpi, const FIRSTPASS_STATS *frame) { - const double zero_motion_pct = frame->pcnt_inter - - frame->pcnt_motion; + const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion; double sr_decay = get_sr_decay_rate(cpi, frame); return VPXMIN(sr_decay, zero_motion_pct); } @@ -1390,8 +1592,8 @@ static double get_prediction_decay_rate(const VP9_COMP *cpi, const FIRSTPASS_STATS *next_frame) { const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame); const double zero_motion_factor = - (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion), - ZM_POWER_FACTOR)); + (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion), + ZM_POWER_FACTOR)); return VPXMAX(zero_motion_factor, (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); @@ -1400,8 +1602,8 @@ static double get_prediction_decay_rate(const VP9_COMP *cpi, // Function to test for a condition where a complex transition is followed // by a static section. For example in slide shows where there is a fade // between slides. This is to help with more optimal kf and gf positioning. -static int detect_transition_to_still(VP9_COMP *cpi, - int frame_interval, int still_interval, +static int detect_transition_to_still(VP9_COMP *cpi, int frame_interval, + int still_interval, double loop_decay_rate, double last_decay_rate) { TWO_PASS *const twopass = &cpi->twopass; @@ -1410,19 +1612,16 @@ static int detect_transition_to_still(VP9_COMP *cpi, // Break clause to detect very still sections after motion // For example a static image after a fade or other transition // instead of a clean scene cut. - if (frame_interval > rc->min_gf_interval && - loop_decay_rate >= 0.999 && + if (frame_interval > rc->min_gf_interval && loop_decay_rate >= 0.999 && last_decay_rate < 0.9) { int j; // Look ahead a few frames to see if static condition persists... for (j = 0; j < still_interval; ++j) { const FIRSTPASS_STATS *stats = &twopass->stats_in[j]; - if (stats >= twopass->stats_in_end) - break; + if (stats >= twopass->stats_in_end) break; - if (stats->pcnt_inter - stats->pcnt_motion < 0.999) - break; + if (stats->pcnt_inter - stats->pcnt_motion < 0.999) break; } // Only if it does do we signal a transition to still. @@ -1464,30 +1663,27 @@ static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats, // Accumulate a measure of how uniform (or conversely how random) the motion // field is (a ratio of abs(mv) / mv). if (pct > 0.05) { - const double mvr_ratio = fabs(stats->mvr_abs) / - DOUBLE_DIVIDE_CHECK(fabs(stats->MVr)); - const double mvc_ratio = fabs(stats->mvc_abs) / - DOUBLE_DIVIDE_CHECK(fabs(stats->MVc)); + const double mvr_ratio = + fabs(stats->mvr_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVr)); + const double mvc_ratio = + fabs(stats->mvc_abs) / DOUBLE_DIVIDE_CHECK(fabs(stats->MVc)); - *mv_ratio_accumulator += pct * (mvr_ratio < stats->mvr_abs ? - mvr_ratio : stats->mvr_abs); - *mv_ratio_accumulator += pct * (mvc_ratio < stats->mvc_abs ? - mvc_ratio : stats->mvc_abs); + *mv_ratio_accumulator += + pct * (mvr_ratio < stats->mvr_abs ? mvr_ratio : stats->mvr_abs); + *mv_ratio_accumulator += + pct * (mvc_ratio < stats->mvc_abs ? mvc_ratio : stats->mvc_abs); } } #define BASELINE_ERR_PER_MB 1000.0 -static double calc_frame_boost(VP9_COMP *cpi, - const FIRSTPASS_STATS *this_frame, - double this_frame_mv_in_out, - double max_boost) { +static double calc_frame_boost(VP9_COMP *cpi, const FIRSTPASS_STATS *this_frame, + double this_frame_mv_in_out, double max_boost) { double frame_boost; - const double lq = - vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME], - cpi->common.bit_depth); + const double lq = vp9_convert_qindex_to_q( + cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth); const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5); - int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; + int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs + : cpi->common.MBs; // Correct for any inactive region in the image num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame)); @@ -1509,8 +1705,7 @@ static double calc_frame_boost(VP9_COMP *cpi, return VPXMIN(frame_boost, max_boost * boost_q_correction); } -static int calc_arf_boost(VP9_COMP *cpi, int offset, - int f_frames, int b_frames, +static int calc_arf_boost(VP9_COMP *cpi, int offset, int f_frames, int b_frames, int *f_boost, int *b_boost) { TWO_PASS *const twopass = &cpi->twopass; int i; @@ -1526,14 +1721,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, // Search forward from the proposed arf/next gf position. for (i = 0; i < f_frames; ++i) { const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset); - if (this_frame == NULL) - break; + if (this_frame == NULL) break; // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); + accumulate_frame_motion_stats( + this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); // We want to discount the flash frame itself and the recovery // frame that follows as both will have poor scores. @@ -1544,12 +1737,13 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, if (!flash_detected) { decay_accumulator *= get_prediction_decay_rate(cpi, this_frame); decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR - ? MIN_DECAY_FACTOR : decay_accumulator; + ? MIN_DECAY_FACTOR + : decay_accumulator; } - boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); + boost_score += + decay_accumulator * + calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST); } *f_boost = (int)boost_score; @@ -1565,14 +1759,12 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, // Search backward towards last gf position. for (i = -1; i >= -b_frames; --i) { const FIRSTPASS_STATS *this_frame = read_frame_stats(twopass, i + offset); - if (this_frame == NULL) - break; + if (this_frame == NULL) break; // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(this_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); + accumulate_frame_motion_stats( + this_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); // We want to discount the the flash frame itself and the recovery // frame that follows as both will have poor scores. @@ -1583,12 +1775,13 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, if (!flash_detected) { decay_accumulator *= get_prediction_decay_rate(cpi, this_frame); decay_accumulator = decay_accumulator < MIN_DECAY_FACTOR - ? MIN_DECAY_FACTOR : decay_accumulator; + ? MIN_DECAY_FACTOR + : decay_accumulator; } - boost_score += decay_accumulator * calc_frame_boost(cpi, this_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); + boost_score += + decay_accumulator * + calc_frame_boost(cpi, this_frame, this_frame_mv_in_out, GF_MAX_BOOST); } *b_boost = (int)boost_score; @@ -1636,9 +1829,10 @@ static int64_t calculate_total_gf_group_bits(VP9_COMP *cpi, } // Clamp odd edge cases. - total_group_bits = (total_group_bits < 0) ? - 0 : (total_group_bits > twopass->kf_group_bits) ? - twopass->kf_group_bits : total_group_bits; + total_group_bits = + (total_group_bits < 0) ? 0 : (total_group_bits > twopass->kf_group_bits) + ? twopass->kf_group_bits + : total_group_bits; // Clip based on user supplied data rate variability limit. if (total_group_bits > (int64_t)max_bits * rc->baseline_gf_interval) @@ -1648,13 +1842,12 @@ static int64_t calculate_total_gf_group_bits(VP9_COMP *cpi, } // Calculate the number bits extra to assign to boosted frames in a group. -static int calculate_boost_bits(int frame_count, - int boost, int64_t total_group_bits) { +static int calculate_boost_bits(int frame_count, int boost, + int64_t total_group_bits) { int allocation_chunks; // return 0 for invalid inputs (could arise e.g. through rounding errors) - if (!boost || (total_group_bits <= 0) || (frame_count <= 0) ) - return 0; + if (!boost || (total_group_bits <= 0) || (frame_count <= 0)) return 0; allocation_chunks = (frame_count * 100) + boost; @@ -1683,9 +1876,8 @@ static void get_arf_buffer_indices(unsigned char *arf_buffer_indices) { } static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, - double group_error, int gf_arf_bits) { + int gf_arf_bits) { RATE_CONTROL *const rc = &cpi->rc; - const VP9EncoderConfig *const oxcf = &cpi->oxcf; TWO_PASS *const twopass = &cpi->twopass; GF_GROUP *const gf_group = &twopass->gf_group; FIRSTPASS_STATS frame_stats; @@ -1695,21 +1887,22 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, int key_frame; const int max_bits = frame_max_bits(&cpi->rc, &cpi->oxcf); int64_t total_group_bits = gf_group_bits; - double modified_err = 0.0; - double err_fraction; int mid_boost_bits = 0; int mid_frame_idx; unsigned char arf_buffer_indices[MAX_ACTIVE_ARFS]; int alt_frame_index = frame_index; - int has_temporal_layers = is_two_pass_svc(cpi) && - cpi->svc.number_temporal_layers > 1; + int has_temporal_layers = + is_two_pass_svc(cpi) && cpi->svc.number_temporal_layers > 1; + int normal_frames; + int normal_frame_bits; + int last_frame_bits; + int last_frame_reduction; // Only encode alt reference frame in temporal base layer. - if (has_temporal_layers) - alt_frame_index = cpi->svc.number_temporal_layers; + if (has_temporal_layers) alt_frame_index = cpi->svc.number_temporal_layers; - key_frame = cpi->common.frame_type == KEY_FRAME || - vp9_is_upper_layer_key_frame(cpi); + key_frame = + cpi->common.frame_type == KEY_FRAME || vp9_is_upper_layer_key_frame(cpi); get_arf_buffer_indices(arf_buffer_indices); @@ -1729,14 +1922,12 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, gf_group->arf_ref_idx[0] = arf_buffer_indices[0]; // Step over the golden frame / overlay frame - if (EOF == input_stats(twopass, &frame_stats)) - return; + if (EOF == input_stats(twopass, &frame_stats)) return; } // Deduct the boost bits for arf (or gf if it is not a key frame) // from the group total. - if (rc->source_alt_ref_pending || !key_frame) - total_group_bits -= gf_arf_bits; + if (rc->source_alt_ref_pending || !key_frame) total_group_bits -= gf_arf_bits; // Store the bits to spend on the ARF if there is one. if (rc->source_alt_ref_pending) { @@ -1754,57 +1945,68 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, gf_group->arf_update_idx[alt_frame_index] = arf_buffer_indices[0]; gf_group->arf_ref_idx[alt_frame_index] = - arf_buffer_indices[cpi->multi_arf_last_grp_enabled && - rc->source_alt_ref_active]; - if (!has_temporal_layers) - ++frame_index; + arf_buffer_indices[cpi->multi_arf_last_grp_enabled && + rc->source_alt_ref_active]; + if (!has_temporal_layers) ++frame_index; if (cpi->multi_arf_enabled) { // Set aside a slot for a level 1 arf. gf_group->update_type[frame_index] = ARF_UPDATE; gf_group->rf_level[frame_index] = GF_ARF_LOW; gf_group->arf_src_offset[frame_index] = - (unsigned char)((rc->baseline_gf_interval >> 1) - 1); + (unsigned char)((rc->baseline_gf_interval >> 1) - 1); gf_group->arf_update_idx[frame_index] = arf_buffer_indices[1]; gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0]; ++frame_index; } } + // Note index of the first normal inter frame int eh group (not gf kf arf) + gf_group->first_inter_index = frame_index; + // Define middle frame mid_frame_idx = frame_index + (rc->baseline_gf_interval >> 1) - 1; + normal_frames = (rc->baseline_gf_interval - rc->source_alt_ref_pending); + + // The last frame in the group is used less as a predictor so reduce + // its allocation a little. + if (normal_frames > 1) { + normal_frame_bits = (int)(total_group_bits / normal_frames); + last_frame_reduction = normal_frame_bits / 16; + last_frame_bits = normal_frame_bits - last_frame_reduction; + } else { + normal_frame_bits = (int)total_group_bits; + last_frame_bits = normal_frame_bits; + last_frame_reduction = 0; + } + // Allocate bits to the other frames in the group. - for (i = 0; i < rc->baseline_gf_interval - rc->source_alt_ref_pending; ++i) { + for (i = 0; i < normal_frames; ++i) { int arf_idx = 0; - if (EOF == input_stats(twopass, &frame_stats)) - break; + if (EOF == input_stats(twopass, &frame_stats)) break; if (has_temporal_layers && frame_index == alt_frame_index) { ++frame_index; } - modified_err = calculate_modified_err(cpi, twopass, oxcf, &frame_stats); - - if (group_error > 0) - err_fraction = modified_err / DOUBLE_DIVIDE_CHECK(group_error); - else - err_fraction = 0.0; - - target_frame_size = (int)((double)total_group_bits * err_fraction); + target_frame_size = (i == (normal_frames - 1)) + ? last_frame_bits + : (i == mid_frame_idx) + ? normal_frame_bits + last_frame_reduction + : normal_frame_bits; if (rc->source_alt_ref_pending && cpi->multi_arf_enabled) { mid_boost_bits += (target_frame_size >> 4); target_frame_size -= (target_frame_size >> 4); - if (frame_index <= mid_frame_idx) - arf_idx = 1; + if (frame_index <= mid_frame_idx) arf_idx = 1; } gf_group->arf_update_idx[frame_index] = arf_buffer_indices[arf_idx]; gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx]; - target_frame_size = clamp(target_frame_size, 0, - VPXMIN(max_bits, (int)total_group_bits)); + target_frame_size = + clamp(target_frame_size, 0, VPXMIN(max_bits, (int)total_group_bits)); gf_group->update_type[frame_index] = LF_UPDATE; gf_group->rf_level[frame_index] = INTER_NORMAL; @@ -1840,6 +2042,20 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, cpi->multi_arf_last_grp_enabled = cpi->multi_arf_enabled; } +// Adjusts the ARNF filter for a GF group. +static void adjust_group_arnr_filter(VP9_COMP *cpi, double section_noise, + double section_inter, + double section_motion) { + TWO_PASS *const twopass = &cpi->twopass; + double section_zeromv = section_inter - section_motion; + + twopass->arnr_strength_adjustment = 0; + + if ((section_zeromv < 0.10) || (section_noise <= (SECTION_NOISE_DEF * 0.75))) + twopass->arnr_strength_adjustment -= 1; + if (section_zeromv > 0.50) twopass->arnr_strength_adjustment += 1; +} + // Analyse and define a gf/arf group. static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { VP9_COMMON *const cm = &cpi->common; @@ -1853,11 +2069,12 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { double boost_score = 0.0; double old_boost_score = 0.0; double gf_group_err = 0.0; -#if GROUP_ADAPTIVE_MAXQ double gf_group_raw_error = 0.0; -#endif + double gf_group_noise = 0.0; double gf_group_skip_pct = 0.0; double gf_group_inactive_zone_rows = 0.0; + double gf_group_inter = 0.0; + double gf_group_motion = 0.0; double gf_first_frame_err = 0.0; double mod_frame_err = 0.0; @@ -1880,7 +2097,6 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int active_max_gf_interval; int active_min_gf_interval; int64_t gf_group_bits; - double gf_group_error_left; int gf_arf_bits; const int is_key_frame = frame_is_intra_only(cm); const int arf_active_or_kf = is_key_frame || rc->source_alt_ref_active; @@ -1905,11 +2121,12 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // the error score / cost of this frame has already been accounted for. if (arf_active_or_kf) { gf_group_err -= gf_first_frame_err; -#if GROUP_ADAPTIVE_MAXQ gf_group_raw_error -= this_frame->coded_error; -#endif + gf_group_noise -= this_frame->frame_noise_energy; gf_group_skip_pct -= this_frame->intra_skip_pct; gf_group_inactive_zone_rows -= this_frame->inactive_zone_rows; + gf_group_inter -= this_frame->pcnt_inter; + gf_group_motion -= this_frame->pcnt_motion; } // Motion breakout threshold for loop below depends on image size. @@ -1919,13 +2136,12 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Set a maximum and minimum interval for the GF group. // If the image appears almost completely static we can extend beyond this. { - int int_max_q = - (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality, - cpi->common.bit_depth)); - int int_lbq = - (int)(vp9_convert_qindex_to_q(rc->last_boosted_qindex, - cpi->common.bit_depth)); - active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200); + int int_max_q = (int)(vp9_convert_qindex_to_q(twopass->active_worst_quality, + cpi->common.bit_depth)); + int int_lbq = (int)(vp9_convert_qindex_to_q(rc->last_boosted_qindex, + cpi->common.bit_depth)); + active_min_gf_interval = + rc->min_gf_interval + arf_active_or_kf + VPXMIN(2, int_max_q / 200); if (active_min_gf_interval > rc->max_gf_interval) active_min_gf_interval = rc->max_gf_interval; @@ -1936,13 +2152,18 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // bits to spare and are better with a smaller interval and smaller boost. // At high Q when there are few bits to spare we are better with a longer // interval to spread the cost of the GF. - active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6)); + active_max_gf_interval = 12 + arf_active_or_kf + VPXMIN(4, (int_lbq / 6)); // We have: active_min_gf_interval <= rc->max_gf_interval if (active_max_gf_interval < active_min_gf_interval) active_max_gf_interval = active_min_gf_interval; else if (active_max_gf_interval > rc->max_gf_interval) active_max_gf_interval = rc->max_gf_interval; + + // Would the active max drop us out just before the near the next kf? + if ((active_max_gf_interval <= rc->frames_to_key) && + (active_max_gf_interval >= (rc->frames_to_key - rc->min_gf_interval))) + active_max_gf_interval = rc->frames_to_key / 2; } } @@ -1953,24 +2174,23 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Accumulate error score of frames in this gf group. mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame); gf_group_err += mod_frame_err; -#if GROUP_ADAPTIVE_MAXQ gf_group_raw_error += this_frame->coded_error; -#endif + gf_group_noise += this_frame->frame_noise_energy; gf_group_skip_pct += this_frame->intra_skip_pct; gf_group_inactive_zone_rows += this_frame->inactive_zone_rows; + gf_group_inter += this_frame->pcnt_inter; + gf_group_motion += this_frame->pcnt_motion; - if (EOF == input_stats(twopass, &next_frame)) - break; + if (EOF == input_stats(twopass, &next_frame)) break; // Test for the case where there is a brief flash but the prediction // quality back to an earlier frame is then restored. flash_detected = detect_flash(twopass, 0); // Update the motion related elements to the boost calculation. - accumulate_frame_motion_stats(&next_frame, - &this_frame_mv_in_out, &mv_in_out_accumulator, - &abs_mv_in_out_accumulator, - &mv_ratio_accumulator); + accumulate_frame_motion_stats( + &next_frame, &this_frame_mv_in_out, &mv_in_out_accumulator, + &abs_mv_in_out_accumulator, &mv_ratio_accumulator); // Accumulate the effect of prediction quality decay. if (!flash_detected) { @@ -1993,23 +2213,24 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } // Calculate a boost number for this frame. - boost_score += decay_accumulator * calc_frame_boost(cpi, &next_frame, - this_frame_mv_in_out, - GF_MAX_BOOST); + boost_score += + decay_accumulator * + calc_frame_boost(cpi, &next_frame, this_frame_mv_in_out, GF_MAX_BOOST); // Break out conditions. if ( - // Break at active_max_gf_interval unless almost totally static. - (i >= (active_max_gf_interval + arf_active_or_kf) && - zero_motion_accumulator < 0.995) || - ( - // Don't break out with a very short interval. - (i >= active_min_gf_interval + arf_active_or_kf) && - (!flash_detected) && - ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) || - (abs_mv_in_out_accumulator > 3.0) || - (mv_in_out_accumulator < -2.0) || - ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) { + // Break at active_max_gf_interval unless almost totally static. + ((i >= active_max_gf_interval) && (zero_motion_accumulator < 0.995)) || + ( + // Don't break out with a very short interval. + (i >= active_min_gf_interval) && + // If possible dont break very close to a kf + ((rc->frames_to_key - i) >= rc->min_gf_interval) && + (!flash_detected) && + ((mv_ratio_accumulator > mv_ratio_accumulator_thresh) || + (abs_mv_in_out_accumulator > 3.0) || + (mv_in_out_accumulator < -2.0) || + ((boost_score - old_boost_score) < BOOST_BREAKOUT)))) { boost_score = old_boost_score; break; } @@ -2018,24 +2239,23 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { old_boost_score = boost_score; } - twopass->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0); - // Was the group length constrained by the requirement for a new KF? rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0; // Should we use the alternate reference frame. - if (allow_alt_ref && - (i < cpi->oxcf.lag_in_frames) && - (i >= rc->min_gf_interval)) { + if (allow_alt_ref && (i < cpi->oxcf.lag_in_frames) && + (i >= rc->min_gf_interval)) { // Calculate the boost for alt ref. - rc->gfu_boost = calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, - &b_boost); + rc->gfu_boost = + calc_arf_boost(cpi, 0, (i - 1), (i - 1), &f_boost, &b_boost); rc->source_alt_ref_pending = 1; // Test to see if multi arf is appropriate. cpi->multi_arf_enabled = - (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) && - (zero_motion_accumulator < 0.995)) ? 1 : 0; + (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) && + (zero_motion_accumulator < 0.995)) + ? 1 + : 0; } else { rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST); rc->source_alt_ref_pending = 0; @@ -2052,14 +2272,14 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int new_gf_interval = (rc->baseline_gf_interval + count) & (~count); int j; for (j = 0; j < new_gf_interval - rc->baseline_gf_interval; ++j) { - if (EOF == input_stats(twopass, this_frame)) - break; + if (EOF == input_stats(twopass, this_frame)) break; gf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame); -#if GROUP_ADAPTIVE_MAXQ gf_group_raw_error += this_frame->coded_error; -#endif + gf_group_noise += this_frame->frame_noise_energy; gf_group_skip_pct += this_frame->intra_skip_pct; gf_group_inactive_zone_rows += this_frame->inactive_zone_rows; + gf_group_inter += this_frame->pcnt_inter; + gf_group_motion += this_frame->pcnt_motion; } rc->baseline_gf_interval = new_gf_interval; } @@ -2072,7 +2292,6 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Calculate the bits to be allocated to the gf/arf group as a whole gf_group_bits = calculate_total_gf_group_bits(cpi, gf_group_err); -#if GROUP_ADAPTIVE_MAXQ // Calculate an estimate of the maxq needed for the group. // We are more agressive about correcting for sections // where there could be significant overshoot than for easier @@ -2080,72 +2299,57 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // of the allocated bit budget. if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) { const int vbr_group_bits_per_frame = - (int)(gf_group_bits / rc->baseline_gf_interval); - const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval; + (int)(gf_group_bits / rc->baseline_gf_interval); + const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval; + const double group_av_noise = gf_group_noise / rc->baseline_gf_interval; const double group_av_skip_pct = - gf_group_skip_pct / rc->baseline_gf_interval; + gf_group_skip_pct / rc->baseline_gf_interval; const double group_av_inactive_zone = - ((gf_group_inactive_zone_rows * 2) / - (rc->baseline_gf_interval * (double)cm->mb_rows)); - - int tmp_q; - // rc factor is a weight factor that corrects for local rate control drift. - double rc_factor = 1.0; - if (rc->rate_error_estimate > 0) { - rc_factor = VPXMAX(RC_FACTOR_MIN, - (double)(100 - rc->rate_error_estimate) / 100.0); - } else { - rc_factor = VPXMIN(RC_FACTOR_MAX, - (double)(100 - rc->rate_error_estimate) / 100.0); - } - tmp_q = - get_twopass_worst_quality(cpi, group_av_err, - (group_av_skip_pct + group_av_inactive_zone), - vbr_group_bits_per_frame, - twopass->kfgroup_inter_fraction * rc_factor); + ((gf_group_inactive_zone_rows * 2) / + (rc->baseline_gf_interval * (double)cm->mb_rows)); + int tmp_q = get_twopass_worst_quality( + cpi, group_av_err, (group_av_skip_pct + group_av_inactive_zone), + group_av_noise, vbr_group_bits_per_frame); twopass->active_worst_quality = - VPXMAX(tmp_q, twopass->active_worst_quality >> 1); + (tmp_q + (twopass->active_worst_quality * 3)) >> 2; + } + + // Context Adjustment of ARNR filter strength + if (rc->baseline_gf_interval > 1) { + adjust_group_arnr_filter(cpi, (gf_group_noise / rc->baseline_gf_interval), + (gf_group_inter / rc->baseline_gf_interval), + (gf_group_motion / rc->baseline_gf_interval)); + } else { + twopass->arnr_strength_adjustment = 0; } -#endif // Calculate the extra bits to be used for boosted frame(s) - gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, - rc->gfu_boost, gf_group_bits); + gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval, rc->gfu_boost, + gf_group_bits); // Adjust KF group bits and error remaining. twopass->kf_group_error_left -= (int64_t)gf_group_err; - // If this is an arf update we want to remove the score for the overlay - // frame at the end which will usually be very cheap to code. - // The overlay frame has already, in effect, been coded so we want to spread - // the remaining bits among the other frames. - // For normal GFs remove the score for the GF itself unless this is - // also a key frame in which case it has already been accounted for. - if (rc->source_alt_ref_pending) { - gf_group_error_left = gf_group_err - mod_frame_err; - } else if (is_key_frame == 0) { - gf_group_error_left = gf_group_err - gf_first_frame_err; - } else { - gf_group_error_left = gf_group_err; - } - // Allocate bits to each of the frames in the GF group. - allocate_gf_group_bits(cpi, gf_group_bits, gf_group_error_left, gf_arf_bits); + allocate_gf_group_bits(cpi, gf_group_bits, gf_arf_bits); // Reset the file position. reset_fpf_position(twopass, start_pos); // Calculate a section intra ratio used in setting max loop filter. if (cpi->common.frame_type != KEY_FRAME) { - twopass->section_intra_rating = - calculate_section_intra_ratio(start_pos, twopass->stats_in_end, - rc->baseline_gf_interval); + twopass->section_intra_rating = calculate_section_intra_ratio( + start_pos, twopass->stats_in_end, rc->baseline_gf_interval); } if (oxcf->resize_mode == RESIZE_DYNAMIC) { // Default to starting GF groups at normal frame size. cpi->rc.next_frame_size_selector = UNSCALED; } + + // Reset rolling actual and target bits counters for ARF groups. + twopass->rolling_arf_group_target_bits = 0; + twopass->rolling_arf_group_actual_bits = 0; } // Threshold for use of the lagging second reference frame. High second ref @@ -2181,7 +2385,7 @@ static int test_candidate_kf(TWO_PASS *twopass, int is_viable_kf = 0; double pcnt_intra = 1.0 - this_frame->pcnt_inter; double modified_pcnt_inter = - this_frame->pcnt_inter - this_frame->pcnt_neutral; + this_frame->pcnt_inter - this_frame->pcnt_neutral; // Does the frame satisfy the primary criteria of a key frame? // See above for an explanation of the test criteria. @@ -2193,15 +2397,15 @@ static int test_candidate_kf(TWO_PASS *twopass, (pcnt_intra > (INTRA_VS_INTER_THRESH * modified_pcnt_inter)) && ((this_frame->intra_error / DOUBLE_DIVIDE_CHECK(this_frame->coded_error)) < - KF_II_ERR_THRESHOLD) && + KF_II_ERR_THRESHOLD) && ((fabs(last_frame->coded_error - this_frame->coded_error) / - DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > + DOUBLE_DIVIDE_CHECK(this_frame->coded_error) > ERR_CHANGE_THRESHOLD) || (fabs(last_frame->intra_error - this_frame->intra_error) / - DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > + DOUBLE_DIVIDE_CHECK(this_frame->intra_error) > ERR_CHANGE_THRESHOLD) || ((next_frame->intra_error / - DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > + DOUBLE_DIVIDE_CHECK(next_frame->coded_error)) > II_IMPROVEMENT_THRESHOLD))))) { int i; const FIRSTPASS_STATS *start_pos = twopass->stats_in; @@ -2215,8 +2419,7 @@ static int test_candidate_kf(TWO_PASS *twopass, double next_iiratio = (BOOST_FACTOR * local_next_frame.intra_error / DOUBLE_DIVIDE_CHECK(local_next_frame.coded_error)); - if (next_iiratio > KF_II_MAX) - next_iiratio = KF_II_MAX; + if (next_iiratio > KF_II_MAX) next_iiratio = KF_II_MAX; // Cumulative effect of decay in prediction quality. if (local_next_frame.pcnt_inter > 0.85) @@ -2228,10 +2431,9 @@ static int test_candidate_kf(TWO_PASS *twopass, boost_score += (decay_accumulator * next_iiratio); // Test various breakout clauses. - if ((local_next_frame.pcnt_inter < 0.05) || - (next_iiratio < 1.5) || - (((local_next_frame.pcnt_inter - - local_next_frame.pcnt_neutral) < 0.20) && + if ((local_next_frame.pcnt_inter < 0.05) || (next_iiratio < 1.5) || + (((local_next_frame.pcnt_inter - local_next_frame.pcnt_neutral) < + 0.20) && (next_iiratio < 3.0)) || ((boost_score - old_boost_score) < 3.0) || (local_next_frame.intra_error < 200)) { @@ -2241,8 +2443,7 @@ static int test_candidate_kf(TWO_PASS *twopass, old_boost_score = boost_score; // Get the next frame details - if (EOF == input_stats(twopass, &local_next_frame)) - break; + if (EOF == input_stats(twopass, &local_next_frame)) break; } // If there is tolerable prediction for at least the next 3 frames then @@ -2308,8 +2509,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { kf_mod_err = calculate_modified_err(cpi, twopass, oxcf, this_frame); // Initialize the decay rates for the recent frames to check - for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) - recent_loop_decay[j] = 1.0; + for (j = 0; j < FRAMES_TO_CHECK_DECAY; ++j) recent_loop_decay[j] = 1.0; // Find the next keyframe. i = 0; @@ -2353,8 +2553,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // If we don't have a real key frame within the next two // key_freq intervals then break out of the loop. - if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) - break; + if (rc->frames_to_key >= 2 * cpi->oxcf.key_freq) break; } else { ++rc->frames_to_key; } @@ -2365,8 +2564,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // We already breakout of the loop above at 2x max. // This code centers the extra kf if the actual natural interval // is between 1x and 2x. - if (cpi->oxcf.auto_key && - rc->frames_to_key > cpi->oxcf.key_freq) { + if (cpi->oxcf.auto_key && rc->frames_to_key > cpi->oxcf.key_freq) { FIRSTPASS_STATS tmp_frame = first_frame; rc->frames_to_key /= 2; @@ -2394,8 +2592,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int new_frame_to_key = (rc->frames_to_key + count) & (~count); int j; for (j = 0; j < new_frame_to_key - rc->frames_to_key; ++j) { - if (EOF == input_stats(twopass, this_frame)) - break; + if (EOF == input_stats(twopass, this_frame)) break; kf_group_err += calculate_modified_err(cpi, twopass, oxcf, this_frame); } rc->frames_to_key = new_frame_to_key; @@ -2417,8 +2614,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Default allocation based on bits left and relative // complexity of the section. - twopass->kf_group_bits = (int64_t)(twopass->bits_left * - (kf_group_err / twopass->modified_error_left)); + twopass->kf_group_bits = (int64_t)( + twopass->bits_left * (kf_group_err / twopass->modified_error_left)); // Clip based on maximum per frame rate defined by the user. max_grp_bits = (int64_t)max_bits * (int64_t)rc->frames_to_key; @@ -2437,23 +2634,22 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { decay_accumulator = 1.0; boost_score = 0.0; for (i = 0; i < (rc->frames_to_key - 1); ++i) { - if (EOF == input_stats(twopass, &next_frame)) - break; + if (EOF == input_stats(twopass, &next_frame)) break; // Monitor for static sections. - zero_motion_accumulator = VPXMIN( - zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); + zero_motion_accumulator = VPXMIN(zero_motion_accumulator, + get_zero_motion_factor(cpi, &next_frame)); // Not all frames in the group are necessarily used in calculating boost. if ((i <= rc->max_gf_interval) || ((i <= (rc->max_gf_interval * 4)) && (decay_accumulator > 0.5))) { const double frame_boost = - calc_frame_boost(cpi, &next_frame, 0, KF_MAX_BOOST); + calc_frame_boost(cpi, &next_frame, 0, KF_MAX_BOOST); // How fast is prediction quality decaying. if (!detect_flash(twopass, 0)) { const double loop_decay_rate = - get_prediction_decay_rate(cpi, &next_frame); + get_prediction_decay_rate(cpi, &next_frame); decay_accumulator *= loop_decay_rate; decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR); av_decay_accumulator += decay_accumulator; @@ -2470,9 +2666,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { twopass->kf_zeromotion_pct = (int)(zero_motion_accumulator * 100.0); // Calculate a section intra ratio used in setting max loop filter. - twopass->section_intra_rating = - calculate_section_intra_ratio(start_position, twopass->stats_in_end, - rc->frames_to_key); + twopass->section_intra_rating = calculate_section_intra_ratio( + start_position, twopass->stats_in_end, rc->frames_to_key); // Apply various clamps for min and max boost rc->kf_boost = (int)(av_decay_accumulator * boost_score); @@ -2480,18 +2675,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST); // Work out how many bits to allocate for the key frame itself. - kf_bits = calculate_boost_bits((rc->frames_to_key - 1), - rc->kf_boost, twopass->kf_group_bits); - - // Work out the fraction of the kf group bits reserved for the inter frames - // within the group after discounting the bits for the kf itself. - if (twopass->kf_group_bits) { - twopass->kfgroup_inter_fraction = - (double)(twopass->kf_group_bits - kf_bits) / - (double)twopass->kf_group_bits; - } else { - twopass->kfgroup_inter_fraction = 1.0; - } + kf_bits = calculate_boost_bits((rc->frames_to_key - 1), rc->kf_boost, + twopass->kf_group_bits); twopass->kf_group_bits -= kf_bits; @@ -2546,9 +2731,7 @@ static void configure_buffer_updates(VP9_COMP *cpi) { cpi->refresh_golden_frame = 0; cpi->refresh_alt_ref_frame = 1; break; - default: - assert(0); - break; + default: assert(0); break; } if (is_two_pass_svc(cpi)) { if (cpi->svc.temporal_layer_id > 0) { @@ -2557,8 +2740,7 @@ static void configure_buffer_updates(VP9_COMP *cpi) { } if (cpi->svc.layer_context[cpi->svc.spatial_layer_id].gold_ref_idx < 0) cpi->refresh_golden_frame = 0; - if (cpi->alt_ref_source == NULL) - cpi->refresh_alt_ref_frame = 0; + if (cpi->alt_ref_source == NULL) cpi->refresh_alt_ref_frame = 0; } } @@ -2568,17 +2750,20 @@ static int is_skippable_frame(const VP9_COMP *cpi) { // can be skipped for partition check, and the partition size is assigned // according to the variance const SVC *const svc = &cpi->svc; - const TWO_PASS *const twopass = is_two_pass_svc(cpi) ? - &svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass; + const TWO_PASS *const twopass = + is_two_pass_svc(cpi) ? &svc->layer_context[svc->spatial_layer_id].twopass + : &cpi->twopass; return (!frame_is_intra_only(&cpi->common) && - twopass->stats_in - 2 > twopass->stats_in_start && - twopass->stats_in < twopass->stats_in_end && - (twopass->stats_in - 1)->pcnt_inter - (twopass->stats_in - 1)->pcnt_motion - == 1 && - (twopass->stats_in - 2)->pcnt_inter - (twopass->stats_in - 2)->pcnt_motion - == 1 && - twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1); + twopass->stats_in - 2 > twopass->stats_in_start && + twopass->stats_in < twopass->stats_in_end && + (twopass->stats_in - 1)->pcnt_inter - + (twopass->stats_in - 1)->pcnt_motion == + 1 && + (twopass->stats_in - 2)->pcnt_inter - + (twopass->stats_in - 2)->pcnt_motion == + 1 && + twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1); } void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { @@ -2586,23 +2771,14 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { RATE_CONTROL *const rc = &cpi->rc; TWO_PASS *const twopass = &cpi->twopass; GF_GROUP *const gf_group = &twopass->gf_group; - int frames_left; FIRSTPASS_STATS this_frame; int target_rate; - LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ? - &cpi->svc.layer_context[cpi->svc.spatial_layer_id] : 0; + LAYER_CONTEXT *const lc = + is_two_pass_svc(cpi) ? &cpi->svc.layer_context[cpi->svc.spatial_layer_id] + : 0; - if (lc != NULL) { - frames_left = (int)(twopass->total_stats.count - - lc->current_video_frame_in_layer); - } else { - frames_left = (int)(twopass->total_stats.count - - cm->current_video_frame); - } - - if (!twopass->stats_in) - return; + if (!twopass->stats_in) return; // If this is an arf frame then we dont want to read the stats file or // advance the input pointer as we already have what we need. @@ -2621,15 +2797,14 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { } else { lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame; - if (lc->is_key_frame) - cpi->ref_frame_flags &= (~VP9_LAST_FLAG); + if (lc->is_key_frame) cpi->ref_frame_flags &= (~VP9_LAST_FLAG); } } // Do the firstpass stats indicate that this frame is skippable for the // partition search? - if (cpi->sf.allow_partition_search_skip && - cpi->oxcf.pass == 2 && (!cpi->use_svc || is_two_pass_svc(cpi))) { + if (cpi->sf.allow_partition_search_skip && cpi->oxcf.pass == 2 && + (!cpi->use_svc || is_two_pass_svc(cpi))) { cpi->partition_search_skippable_frame = is_skippable_frame(cpi); } @@ -2642,21 +2817,28 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { twopass->active_worst_quality = cpi->oxcf.cq_level; } else if (cm->current_video_frame == 0 || (lc != NULL && lc->current_video_frame_in_layer == 0)) { + const int frames_left = + (int)(twopass->total_stats.count - + ((lc != NULL) ? lc->current_video_frame_in_layer + : cm->current_video_frame)); // Special case code for first frame. - const int section_target_bandwidth = (int)(twopass->bits_left / - frames_left); + const int section_target_bandwidth = + (int)(twopass->bits_left / frames_left); const double section_length = twopass->total_left_stats.count; const double section_error = - twopass->total_left_stats.coded_error / section_length; + twopass->total_left_stats.coded_error / section_length; const double section_intra_skip = - twopass->total_left_stats.intra_skip_pct / section_length; + twopass->total_left_stats.intra_skip_pct / section_length; const double section_inactive_zone = - (twopass->total_left_stats.inactive_zone_rows * 2) / - ((double)cm->mb_rows * section_length); - const int tmp_q = - get_twopass_worst_quality(cpi, section_error, - section_intra_skip + section_inactive_zone, - section_target_bandwidth, DEFAULT_GRP_WEIGHT); + (twopass->total_left_stats.inactive_zone_rows * 2) / + ((double)cm->mb_rows * section_length); + const double section_noise = + twopass->total_left_stats.frame_noise_energy / section_length; + int tmp_q; + + tmp_q = get_twopass_worst_quality( + cpi, section_error, section_intra_skip + section_inactive_zone, + section_noise, section_target_bandwidth); twopass->active_worst_quality = tmp_q; twopass->baseline_active_worst_quality = tmp_q; @@ -2668,8 +2850,7 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME]; } vp9_zero(this_frame); - if (EOF == input_stats(twopass, &this_frame)) - return; + if (EOF == input_stats(twopass, &this_frame)) return; // Set the frame content type flag. if (this_frame.intra_skip_pct >= FC_ANIMATION_THRESH) @@ -2714,17 +2895,16 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { define_gf_group(cpi, &this_frame); rc->frames_till_gf_update_due = rc->baseline_gf_interval; - if (lc != NULL) - cpi->refresh_golden_frame = 1; + if (lc != NULL) cpi->refresh_golden_frame = 1; #if ARF_STATS_OUTPUT { FILE *fpfile; fpfile = fopen("arf.stt", "a"); ++arf_count; - fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", - cm->current_video_frame, rc->frames_till_gf_update_due, - rc->kf_boost, arf_count, rc->gfu_boost); + fprintf(fpfile, "%10d %10ld %10d %10d %10ld\n", cm->current_video_frame, + rc->frames_till_gf_update_due, rc->kf_boost, arf_count, + rc->gfu_boost); fclose(fpfile); } @@ -2745,11 +2925,13 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { { const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) - ? cpi->initial_mbs : cpi->common.MBs; + ? cpi->initial_mbs + : cpi->common.MBs; // The multiplication by 256 reverses a scaling factor of (>> 8) // applied when combining MB error values for the frame. twopass->mb_av_energy = - log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0); + log(((this_frame.intra_error * 256.0) / num_mbs) + 1.0); + twopass->mb_smooth_pct = this_frame.intra_smooth_pct; } // Update the total stats remaining structure. @@ -2762,6 +2944,7 @@ void vp9_rc_get_second_pass_params(VP9_COMP *cpi) { void vp9_twopass_postencode_update(VP9_COMP *cpi) { TWO_PASS *const twopass = &cpi->twopass; RATE_CONTROL *const rc = &cpi->rc; + VP9_COMMON *const cm = &cpi->common; const int bits_used = rc->base_frame_target; // VBR correction is done through rc->vbr_bits_off_target. Based on the @@ -2772,10 +2955,14 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size; twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0); + // Target vs actual bits for this arf group. + twopass->rolling_arf_group_target_bits += rc->this_frame_target; + twopass->rolling_arf_group_actual_bits += rc->projected_frame_size; + // Calculate the pct rc error. if (rc->total_actual_bits) { rc->rate_error_estimate = - (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits); + (int)((rc->vbr_bits_off_target * 100) / rc->total_actual_bits); rc->rate_error_estimate = clamp(rc->rate_error_estimate, -100, 100); } else { rc->rate_error_estimate = 0; @@ -2792,20 +2979,34 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { ++twopass->gf_group.index; // If the rate control is drifting consider adjustment to min or maxq. - if ((cpi->oxcf.rc_mode != VPX_Q) && - (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD) && - !cpi->rc.is_src_frame_alt_ref) { + if ((cpi->oxcf.rc_mode != VPX_Q) && !cpi->rc.is_src_frame_alt_ref) { const int maxq_adj_limit = - rc->worst_quality - twopass->active_worst_quality; + rc->worst_quality - twopass->active_worst_quality; const int minq_adj_limit = (cpi->oxcf.rc_mode == VPX_CQ ? MINQ_ADJ_LIMIT_CQ : MINQ_ADJ_LIMIT); + int aq_extend_min = 0; + int aq_extend_max = 0; + + // Extend min or Max Q range to account for imbalance from the base + // value when using AQ. + if (cpi->oxcf.aq_mode != NO_AQ) { + if (cm->seg.aq_av_offset < 0) { + // The balance of the AQ map tends towarda lowering the average Q. + aq_extend_min = 0; + aq_extend_max = VPXMIN(maxq_adj_limit, -cm->seg.aq_av_offset); + } else { + // The balance of the AQ map tends towards raising the average Q. + aq_extend_min = VPXMIN(minq_adj_limit, cm->seg.aq_av_offset); + aq_extend_max = 0; + } + } // Undershoot. if (rc->rate_error_estimate > cpi->oxcf.under_shoot_pct) { --twopass->extend_maxq; if (rc->rolling_target_bits >= rc->rolling_actual_bits) ++twopass->extend_minq; - // Overshoot. + // Overshoot. } else if (rc->rate_error_estimate < -cpi->oxcf.over_shoot_pct) { --twopass->extend_minq; if (rc->rolling_target_bits < rc->rolling_actual_bits) @@ -2823,8 +3024,10 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { --twopass->extend_maxq; } - twopass->extend_minq = clamp(twopass->extend_minq, 0, minq_adj_limit); - twopass->extend_maxq = clamp(twopass->extend_maxq, 0, maxq_adj_limit); + twopass->extend_minq = + clamp(twopass->extend_minq, aq_extend_min, minq_adj_limit); + twopass->extend_maxq = + clamp(twopass->extend_maxq, aq_extend_max, maxq_adj_limit); // If there is a big and undexpected undershoot then feed the extra // bits back in quickly. One situation where this may happen is if a @@ -2834,14 +3037,14 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { int fast_extra_thresh = rc->base_frame_target / HIGH_UNDERSHOOT_RATIO; if (rc->projected_frame_size < fast_extra_thresh) { rc->vbr_bits_off_target_fast += - fast_extra_thresh - rc->projected_frame_size; + fast_extra_thresh - rc->projected_frame_size; rc->vbr_bits_off_target_fast = - VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); + VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); // Fast adaptation of minQ if necessary to use up the extra bits. if (rc->avg_frame_bandwidth) { twopass->extend_minq_fast = - (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth); + (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth); } twopass->extend_minq_fast = VPXMIN( twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); diff --git a/libs/libvpx/vp9/encoder/vp9_firstpass.h b/libs/libvpx/vp9/encoder/vp9_firstpass.h index 5875a7b9b5..6aa39cdc00 100644 --- a/libs/libvpx/vp9/encoder/vp9_firstpass.h +++ b/libs/libvpx/vp9/encoder/vp9_firstpass.h @@ -39,19 +39,19 @@ typedef struct { } FIRSTPASS_MB_STATS; #endif -#define VLOW_MOTION_THRESHOLD 950 - typedef struct { double frame; double weight; double intra_error; double coded_error; double sr_coded_error; + double frame_noise_energy; double pcnt_inter; double pcnt_motion; double pcnt_second_ref; double pcnt_neutral; double intra_skip_pct; + double intra_smooth_pct; // % of blocks that are smooth double inactive_zone_rows; // Image mask rows top and bottom. double inactive_zone_cols; // Image mask columns at left and right edges. double MVr; @@ -85,6 +85,7 @@ typedef enum { typedef struct { unsigned char index; + unsigned char first_inter_index; RATE_FACTOR_LEVEL rf_level[(MAX_LAG_BUFFERS * 2) + 1]; FRAME_UPDATE_TYPE update_type[(MAX_LAG_BUFFERS * 2) + 1]; unsigned char arf_src_offset[(MAX_LAG_BUFFERS * 2) + 1]; @@ -107,6 +108,7 @@ typedef struct { double modified_error_max; double modified_error_left; double mb_av_energy; + double mb_smooth_pct; #if CONFIG_FP_MB_STATS uint8_t *frame_mb_stats_buf; @@ -122,19 +124,19 @@ typedef struct { // Error score of frames still to be coded in kf group int64_t kf_group_error_left; - // The fraction for a kf groups total bits allocated to the inter frames - double kfgroup_inter_fraction; + double bpm_factor; + int rolling_arf_group_target_bits; + int rolling_arf_group_actual_bits; int sr_update_lag; - int kf_zeromotion_pct; int last_kfgroup_zeromotion_pct; - int gf_zeromotion_pct; int active_worst_quality; int baseline_active_worst_quality; int extend_minq; int extend_maxq; int extend_minq_fast; + int arnr_strength_adjustment; GF_GROUP gf_group; } TWO_PASS; @@ -153,8 +155,7 @@ void vp9_twopass_postencode_update(struct VP9_COMP *cpi); // Post encode update of the rate control parameters for 2-pass void vp9_twopass_postencode_update(struct VP9_COMP *cpi); -void calculate_coded_size(struct VP9_COMP *cpi, - int *scaled_frame_width, +void calculate_coded_size(struct VP9_COMP *cpi, int *scaled_frame_width, int *scaled_frame_height); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/encoder/vp9_lookahead.c b/libs/libvpx/vp9/encoder/vp9_lookahead.c index def9b8c1db..392cd5d418 100644 --- a/libs/libvpx/vp9/encoder/vp9_lookahead.c +++ b/libs/libvpx/vp9/encoder/vp9_lookahead.c @@ -19,33 +19,28 @@ #include "vp9/encoder/vp9_lookahead.h" /* Return the buffer at the given absolute index and increment the index */ -static struct lookahead_entry *pop(struct lookahead_ctx *ctx, - unsigned int *idx) { - unsigned int index = *idx; +static struct lookahead_entry *pop(struct lookahead_ctx *ctx, int *idx) { + int index = *idx; struct lookahead_entry *buf = ctx->buf + index; assert(index < ctx->max_sz); - if (++index >= ctx->max_sz) - index -= ctx->max_sz; + if (++index >= ctx->max_sz) index -= ctx->max_sz; *idx = index; return buf; } - void vp9_lookahead_destroy(struct lookahead_ctx *ctx) { if (ctx) { if (ctx->buf) { - unsigned int i; + int i; - for (i = 0; i < ctx->max_sz; i++) - vpx_free_frame_buffer(&ctx->buf[i].img); + for (i = 0; i < ctx->max_sz; i++) vpx_free_frame_buffer(&ctx->buf[i].img); free(ctx->buf); } free(ctx); } } - struct lookahead_ctx *vp9_lookahead_init(unsigned int width, unsigned int height, unsigned int subsampling_x, @@ -69,32 +64,30 @@ struct lookahead_ctx *vp9_lookahead_init(unsigned int width, unsigned int i; ctx->max_sz = depth; ctx->buf = calloc(depth, sizeof(*ctx->buf)); - if (!ctx->buf) - goto bail; + if (!ctx->buf) goto bail; for (i = 0; i < depth; i++) - if (vpx_alloc_frame_buffer(&ctx->buf[i].img, - width, height, subsampling_x, subsampling_y, + if (vpx_alloc_frame_buffer( + &ctx->buf[i].img, width, height, subsampling_x, subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH - use_highbitdepth, + use_highbitdepth, #endif - VP9_ENC_BORDER_IN_PIXELS, - legacy_byte_alignment)) + VP9_ENC_BORDER_IN_PIXELS, legacy_byte_alignment)) goto bail; } return ctx; - bail: +bail: vp9_lookahead_destroy(ctx); return NULL; } #define USE_PARTIAL_COPY 0 -int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, +int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, int64_t ts_start, int64_t ts_end, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif - unsigned int flags) { + vpx_enc_frame_flags_t flags) { struct lookahead_entry *buf; #if USE_PARTIAL_COPY int row, col, active_end; @@ -109,8 +102,7 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, int subsampling_y = src->subsampling_y; int larger_dimensions, new_dimensions; - if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) - return 1; + if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz) return 1; ctx->sz++; buf = pop(ctx, &ctx->write_idx); @@ -118,8 +110,7 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, height != buf->img.y_crop_height || uv_width != buf->img.uv_crop_width || uv_height != buf->img.uv_crop_height; - larger_dimensions = width > buf->img.y_width || - height > buf->img.y_height || + larger_dimensions = width > buf->img.y_width || height > buf->img.y_height || uv_width > buf->img.uv_width || uv_height > buf->img.uv_height; assert(!larger_dimensions || new_dimensions); @@ -139,27 +130,22 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, while (1) { // Find the first active macroblock in this row. for (; col < mb_cols; ++col) { - if (active_map[col]) - break; + if (active_map[col]) break; } // No more active macroblock in this row. - if (col == mb_cols) - break; + if (col == mb_cols) break; // Find the end of active region in this row. active_end = col; for (; active_end < mb_cols; ++active_end) { - if (!active_map[active_end]) - break; + if (!active_map[active_end]) break; } // Only copy this active region. - vp9_copy_and_extend_frame_with_rect(src, &buf->img, - row << 4, - col << 4, 16, - (active_end - col) << 4); + vp9_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4, + 16, (active_end - col) << 4); // Start again from the end of this active region. col = active_end; @@ -172,14 +158,13 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, if (larger_dimensions) { YV12_BUFFER_CONFIG new_img; memset(&new_img, 0, sizeof(new_img)); - if (vpx_alloc_frame_buffer(&new_img, - width, height, subsampling_x, subsampling_y, + if (vpx_alloc_frame_buffer(&new_img, width, height, subsampling_x, + subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH use_highbitdepth, #endif - VP9_ENC_BORDER_IN_PIXELS, - 0)) - return 1; + VP9_ENC_BORDER_IN_PIXELS, 0)) + return 1; vpx_free_frame_buffer(&buf->img); buf->img = new_img; } else if (new_dimensions) { @@ -202,7 +187,6 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, return 0; } - struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, int drain) { struct lookahead_entry *buf = NULL; @@ -214,25 +198,22 @@ struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, return buf; } - struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx, int index) { struct lookahead_entry *buf = NULL; if (index >= 0) { // Forward peek - if (index < (int)ctx->sz) { + if (index < ctx->sz) { index += ctx->read_idx; - if (index >= (int)ctx->max_sz) - index -= ctx->max_sz; + if (index >= ctx->max_sz) index -= ctx->max_sz; buf = ctx->buf + index; } } else if (index < 0) { // Backward peek if (-index <= MAX_PRE_FRAMES) { index += ctx->read_idx; - if (index < 0) - index += ctx->max_sz; + if (index < 0) index += ctx->max_sz; buf = ctx->buf + index; } } @@ -240,6 +221,4 @@ struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx, return buf; } -unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { - return ctx->sz; -} +unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; } diff --git a/libs/libvpx/vp9/encoder/vp9_lookahead.h b/libs/libvpx/vp9/encoder/vp9_lookahead.h index 13820380ff..88be0ffcd5 100644 --- a/libs/libvpx/vp9/encoder/vp9_lookahead.h +++ b/libs/libvpx/vp9/encoder/vp9_lookahead.h @@ -12,11 +12,11 @@ #define VP9_ENCODER_VP9_LOOKAHEAD_H_ #include "vpx_scale/yv12config.h" +#include "vpx/vpx_encoder.h" #include "vpx/vpx_integer.h" #if CONFIG_SPATIAL_SVC #include "vpx/vp8cx.h" -#include "vpx/vpx_encoder.h" #endif #ifdef __cplusplus @@ -26,20 +26,20 @@ extern "C" { #define MAX_LAG_BUFFERS 25 struct lookahead_entry { - YV12_BUFFER_CONFIG img; - int64_t ts_start; - int64_t ts_end; - unsigned int flags; + YV12_BUFFER_CONFIG img; + int64_t ts_start; + int64_t ts_end; + vpx_enc_frame_flags_t flags; }; // The max of past frames we want to keep in the queue. #define MAX_PRE_FRAMES 1 struct lookahead_ctx { - unsigned int max_sz; /* Absolute size of the queue */ - unsigned int sz; /* Number of buffers currently in the queue */ - unsigned int read_idx; /* Read index */ - unsigned int write_idx; /* Write index */ + int max_sz; /* Absolute size of the queue */ + int sz; /* Number of buffers currently in the queue */ + int read_idx; /* Read index */ + int write_idx; /* Write index */ struct lookahead_entry *buf; /* Buffer list */ }; @@ -57,12 +57,10 @@ struct lookahead_ctx *vp9_lookahead_init(unsigned int width, #endif unsigned int depth); - /**\brief Destroys the lookahead stage */ void vp9_lookahead_destroy(struct lookahead_ctx *ctx); - /**\brief Enqueue a source buffer * * This function will copy the source image into a new framebuffer with @@ -83,8 +81,7 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif - unsigned int flags); - + vpx_enc_frame_flags_t flags); /**\brief Get the next source buffer to encode * @@ -96,9 +93,7 @@ int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src, * \retval NULL, if drain set and queue is empty * \retval NULL, if drain not set and queue not of the configured depth */ -struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, - int drain); - +struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, int drain); /**\brief Get a future source buffer to encode * @@ -110,7 +105,6 @@ struct lookahead_entry *vp9_lookahead_pop(struct lookahead_ctx *ctx, struct lookahead_entry *vp9_lookahead_peek(struct lookahead_ctx *ctx, int index); - /**\brief Get the number of frames currently in the lookahead queue * * \param[in] ctx Pointer to the lookahead context diff --git a/libs/libvpx/vp9/encoder/vp9_mbgraph.c b/libs/libvpx/vp9/encoder/vp9_mbgraph.c index 7ce86b45dc..e000220b97 100644 --- a/libs/libvpx/vp9/encoder/vp9_mbgraph.c +++ b/libs/libvpx/vp9/encoder/vp9_mbgraph.c @@ -22,22 +22,15 @@ #include "vp9/common/vp9_reconinter.h" #include "vp9/common/vp9_reconintra.h" - -static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, - const MV *ref_mv, - MV *dst_mv, - int mb_row, +static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, const MV *ref_mv, + MV *dst_mv, int mb_row, int mb_col) { MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; MV_SPEED_FEATURES *const mv_sf = &cpi->sf.mv; const SEARCH_METHODS old_search_method = mv_sf->search_method; const vp9_variance_fn_ptr_t v_fn_ptr = cpi->fn_ptr[BLOCK_16X16]; - - const int tmp_col_min = x->mv_col_min; - const int tmp_col_max = x->mv_col_max; - const int tmp_row_min = x->mv_row_min; - const int tmp_row_max = x->mv_row_max; + const MvLimits tmp_mv_limits = x->mv_limits; MV ref_full; int cost_list[5]; @@ -45,7 +38,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, int step_param = mv_sf->reduce_first_step_size; step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); - vp9_set_mv_search_range(x, ref_mv); + vp9_set_mv_search_range(&x->mv_limits, ref_mv); ref_full.col = ref_mv->col >> 3; ref_full.row = ref_mv->row >> 3; @@ -59,14 +52,13 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, // Try sub-pixel MC // if (bestsme > error_thresh && bestsme < INT_MAX) { - int distortion; - unsigned int sse; + uint32_t distortion; + uint32_t sse; cpi->find_fractional_mv_step( x, dst_mv, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit, &v_fn_ptr, 0, mv_sf->subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - NULL, NULL, - &distortion, &sse, NULL, 0, 0); + cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, + 0); } xd->mi[0]->mode = NEWMV; @@ -75,10 +67,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, vp9_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16); /* restore UMV window */ - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; + x->mv_limits = tmp_mv_limits; return vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride); @@ -109,10 +98,10 @@ static int do_16x16_motion_search(VP9_COMP *cpi, const MV *ref_mv, // based search as well. if (ref_mv->row != 0 || ref_mv->col != 0) { unsigned int tmp_err; - MV zero_ref_mv = {0, 0}, tmp_mv; + MV zero_ref_mv = { 0, 0 }, tmp_mv; - tmp_err = do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, - mb_row, mb_col); + tmp_err = + do_16x16_motion_iteration(cpi, &zero_ref_mv, &tmp_mv, mb_row, mb_col); if (tmp_err < err) { dst_mv->as_mv = tmp_mv; err = tmp_err; @@ -137,7 +126,7 @@ static int do_16x16_zerozero_search(VP9_COMP *cpi, int_mv *dst_mv) { return err; } static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) { - MACROBLOCK *const x = &cpi->td.mb; + MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; PREDICTION_MODE best_mode = -1, mode; unsigned int best_err = INT_MAX; @@ -148,38 +137,30 @@ static int find_best_16x16_intra(VP9_COMP *cpi, PREDICTION_MODE *pbest_mode) { unsigned int err; xd->mi[0]->mode = mode; - vp9_predict_intra_block(xd, 2, TX_16X16, mode, - x->plane[0].src.buf, x->plane[0].src.stride, - xd->plane[0].dst.buf, xd->plane[0].dst.stride, - 0, 0, 0); + vp9_predict_intra_block(xd, 2, TX_16X16, mode, x->plane[0].src.buf, + x->plane[0].src.stride, xd->plane[0].dst.buf, + xd->plane[0].dst.stride, 0, 0, 0); err = vpx_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].dst.buf, xd->plane[0].dst.stride); // find best if (err < best_err) { - best_err = err; + best_err = err; best_mode = mode; } } - if (pbest_mode) - *pbest_mode = best_mode; + if (pbest_mode) *pbest_mode = best_mode; return best_err; } -static void update_mbgraph_mb_stats -( - VP9_COMP *cpi, - MBGRAPH_MB_STATS *stats, - YV12_BUFFER_CONFIG *buf, - int mb_y_offset, - YV12_BUFFER_CONFIG *golden_ref, - const MV *prev_golden_ref_mv, - YV12_BUFFER_CONFIG *alt_ref, - int mb_row, - int mb_col -) { +static void update_mbgraph_mb_stats(VP9_COMP *cpi, MBGRAPH_MB_STATS *stats, + YV12_BUFFER_CONFIG *buf, int mb_y_offset, + YV12_BUFFER_CONFIG *golden_ref, + const MV *prev_golden_ref_mv, + YV12_BUFFER_CONFIG *alt_ref, int mb_row, + int mb_col) { MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; int intra_error; @@ -193,10 +174,8 @@ static void update_mbgraph_mb_stats xd->plane[0].dst.stride = get_frame_new_buffer(cm)->y_stride; // do intra 16x16 prediction - intra_error = find_best_16x16_intra(cpi, - &stats->ref[INTRA_FRAME].m.mode); - if (intra_error <= 0) - intra_error = 1; + intra_error = find_best_16x16_intra(cpi, &stats->ref[INTRA_FRAME].m.mode); + if (intra_error <= 0) intra_error = 1; stats->ref[INTRA_FRAME].err = intra_error; // Golden frame MV search, if it exists and is different than last frame @@ -204,10 +183,9 @@ static void update_mbgraph_mb_stats int g_motion_error; xd->plane[0].pre[0].buf = golden_ref->y_buffer + mb_y_offset; xd->plane[0].pre[0].stride = golden_ref->y_stride; - g_motion_error = do_16x16_motion_search(cpi, - prev_golden_ref_mv, - &stats->ref[GOLDEN_FRAME].m.mv, - mb_row, mb_col); + g_motion_error = + do_16x16_motion_search(cpi, prev_golden_ref_mv, + &stats->ref[GOLDEN_FRAME].m.mv, mb_row, mb_col); stats->ref[GOLDEN_FRAME].err = g_motion_error; } else { stats->ref[GOLDEN_FRAME].err = INT_MAX; @@ -220,8 +198,8 @@ static void update_mbgraph_mb_stats int a_motion_error; xd->plane[0].pre[0].buf = alt_ref->y_buffer + mb_y_offset; xd->plane[0].pre[0].stride = alt_ref->y_stride; - a_motion_error = do_16x16_zerozero_search(cpi, - &stats->ref[ALTREF_FRAME].m.mv); + a_motion_error = + do_16x16_zerozero_search(cpi, &stats->ref[ALTREF_FRAME].m.mv); stats->ref[ALTREF_FRAME].err = a_motion_error; } else { @@ -241,17 +219,20 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi, int mb_col, mb_row, offset = 0; int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0; - MV gld_top_mv = {0, 0}; + MV gld_top_mv = { 0, 0 }; MODE_INFO mi_local; + MODE_INFO mi_above, mi_left; vp9_zero(mi_local); // Set up limit values for motion vectors to prevent them extending outside // the UMV borders. - x->mv_row_min = -BORDER_MV_PIXELS_B16; - x->mv_row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; - xd->up_available = 0; - xd->plane[0].dst.stride = buf->y_stride; - xd->plane[0].pre[0].stride = buf->y_stride; + x->mv_limits.row_min = -BORDER_MV_PIXELS_B16; + x->mv_limits.row_max = (cm->mb_rows - 1) * 8 + BORDER_MV_PIXELS_B16; + // Signal to vp9_predict_intra_block() that above is not available + xd->above_mi = NULL; + + xd->plane[0].dst.stride = buf->y_stride; + xd->plane[0].pre[0].stride = buf->y_stride; xd->plane[1].dst.stride = buf->uv_stride; xd->mi[0] = &mi_local; mi_local.sb_type = BLOCK_16X16; @@ -260,41 +241,45 @@ static void update_mbgraph_frame_stats(VP9_COMP *cpi, for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) { MV gld_left_mv = gld_top_mv; - int mb_y_in_offset = mb_y_offset; + int mb_y_in_offset = mb_y_offset; int arf_y_in_offset = arf_y_offset; int gld_y_in_offset = gld_y_offset; // Set up limit values for motion vectors to prevent them extending outside // the UMV borders. - x->mv_col_min = -BORDER_MV_PIXELS_B16; - x->mv_col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; - xd->left_available = 0; + x->mv_limits.col_min = -BORDER_MV_PIXELS_B16; + x->mv_limits.col_max = (cm->mb_cols - 1) * 8 + BORDER_MV_PIXELS_B16; + // Signal to vp9_predict_intra_block() that left is not available + xd->left_mi = NULL; for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) { MBGRAPH_MB_STATS *mb_stats = &stats->mb_stats[offset + mb_col]; - update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, - golden_ref, &gld_left_mv, alt_ref, - mb_row, mb_col); + update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset, golden_ref, + &gld_left_mv, alt_ref, mb_row, mb_col); gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv; if (mb_col == 0) { gld_top_mv = gld_left_mv; } - xd->left_available = 1; - mb_y_in_offset += 16; - gld_y_in_offset += 16; - arf_y_in_offset += 16; - x->mv_col_min -= 16; - x->mv_col_max -= 16; + // Signal to vp9_predict_intra_block() that left is available + xd->left_mi = &mi_left; + + mb_y_in_offset += 16; + gld_y_in_offset += 16; + arf_y_in_offset += 16; + x->mv_limits.col_min -= 16; + x->mv_limits.col_max -= 16; } - xd->up_available = 1; - mb_y_offset += buf->y_stride * 16; - gld_y_offset += golden_ref->y_stride * 16; - if (alt_ref) - arf_y_offset += alt_ref->y_stride * 16; - x->mv_row_min -= 16; - x->mv_row_max -= 16; - offset += cm->mb_cols; + + // Signal to vp9_predict_intra_block() that above is available + xd->above_mi = &mi_above; + + mb_y_offset += buf->y_stride * 16; + gld_y_offset += golden_ref->y_stride * 16; + if (alt_ref) arf_y_offset += alt_ref->y_stride * 16; + x->mv_limits.row_min -= 16; + x->mv_limits.row_max -= 16; + offset += cm->mb_cols; } } @@ -308,9 +293,9 @@ static void separate_arf_mbs(VP9_COMP *cpi) { int *arf_not_zz; - CHECK_MEM_ERROR(cm, arf_not_zz, - vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), - 1)); + CHECK_MEM_ERROR( + cm, arf_not_zz, + vpx_calloc(cm->mb_rows * cm->mb_cols * sizeof(*arf_not_zz), 1)); // We are not interested in results beyond the alt ref itself. if (n_frames > cpi->rc.frames_till_gf_update_due) @@ -326,12 +311,11 @@ static void separate_arf_mbs(VP9_COMP *cpi) { MBGRAPH_MB_STATS *mb_stats = &frame_stats->mb_stats[offset + mb_col]; int altref_err = mb_stats->ref[ALTREF_FRAME].err; - int intra_err = mb_stats->ref[INTRA_FRAME ].err; + int intra_err = mb_stats->ref[INTRA_FRAME].err; int golden_err = mb_stats->ref[GOLDEN_FRAME].err; // Test for altref vs intra and gf and that its mv was 0,0. - if (altref_err > 1000 || - altref_err > intra_err || + if (altref_err > 1000 || altref_err > intra_err || altref_err > golden_err) { arf_not_zz[offset + mb_col]++; } @@ -386,11 +370,9 @@ void vp9_update_mbgraph_stats(VP9_COMP *cpi) { // we need to look ahead beyond where the ARF transitions into // being a GF - so exit if we don't look ahead beyond that - if (n_frames <= cpi->rc.frames_till_gf_update_due) - return; + if (n_frames <= cpi->rc.frames_till_gf_update_due) return; - if (n_frames > MAX_LAG_BUFFERS) - n_frames = MAX_LAG_BUFFERS; + if (n_frames > MAX_LAG_BUFFERS) n_frames = MAX_LAG_BUFFERS; cpi->mbgraph_n_frames = n_frames; for (i = 0; i < n_frames; i++) { @@ -409,8 +391,8 @@ void vp9_update_mbgraph_stats(VP9_COMP *cpi) { assert(q_cur != NULL); - update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, - golden_ref, cpi->Source); + update_mbgraph_frame_stats(cpi, frame_stats, &q_cur->img, golden_ref, + cpi->Source); } vpx_clear_system_state(); diff --git a/libs/libvpx/vp9/encoder/vp9_mbgraph.h b/libs/libvpx/vp9/encoder/vp9_mbgraph.h index c3af972bc0..df2fb98efa 100644 --- a/libs/libvpx/vp9/encoder/vp9_mbgraph.h +++ b/libs/libvpx/vp9/encoder/vp9_mbgraph.h @@ -25,9 +25,7 @@ typedef struct { } ref[MAX_REF_FRAMES]; } MBGRAPH_MB_STATS; -typedef struct { - MBGRAPH_MB_STATS *mb_stats; -} MBGRAPH_FRAME_STATS; +typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS; struct VP9_COMP; diff --git a/libs/libvpx/vp9/encoder/vp9_mcomp.c b/libs/libvpx/vp9/encoder/vp9_mcomp.c index 8b7825e7b6..2d9bcbda67 100644 --- a/libs/libvpx/vp9/encoder/vp9_mcomp.c +++ b/libs/libvpx/vp9/encoder/vp9_mcomp.c @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include #include #include #include @@ -32,7 +33,7 @@ static INLINE const uint8_t *get_buf_from_mv(const struct buf_2d *buf, return &buf->buf[mv->row * buf->stride + mv->col]; } -void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { +void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv) { int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0); int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0); int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; @@ -45,14 +46,10 @@ void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { // Get intersection of UMV window and valid MV window to reduce # of checks // in diamond search. - if (x->mv_col_min < col_min) - x->mv_col_min = col_min; - if (x->mv_col_max > col_max) - x->mv_col_max = col_max; - if (x->mv_row_min < row_min) - x->mv_row_min = row_min; - if (x->mv_row_max > row_max) - x->mv_row_max = row_max; + if (mv_limits->col_min < col_min) mv_limits->col_min = col_min; + if (mv_limits->col_max > col_max) mv_limits->col_max = col_max; + if (mv_limits->row_min < row_min) mv_limits->row_min = row_min; + if (mv_limits->row_max > row_max) mv_limits->row_max = row_max; } int vp9_init_search_range(int size) { @@ -60,23 +57,23 @@ int vp9_init_search_range(int size) { // Minimum search size no matter what the passed in value. size = VPXMAX(16, size); - while ((size << sr) < MAX_FULL_PEL_VAL) - sr++; + while ((size << sr) < MAX_FULL_PEL_VAL) sr++; sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2); return sr; } -static INLINE int mv_cost(const MV *mv, - const int *joint_cost, int *const comp_cost[2]) { - return joint_cost[vp9_get_mv_joint(mv)] + - comp_cost[0][mv->row] + comp_cost[1][mv->col]; +static INLINE int mv_cost(const MV *mv, const int *joint_cost, + int *const comp_cost[2]) { + assert(mv->row >= -MV_MAX && mv->row < MV_MAX); + assert(mv->col >= -MV_MAX && mv->col < MV_MAX); + return joint_cost[vp9_get_mv_joint(mv)] + comp_cost[0][mv->row] + + comp_cost[1][mv->col]; } -int vp9_mv_bit_cost(const MV *mv, const MV *ref, - const int *mvjcost, int *mvcost[2], int weight) { - const MV diff = { mv->row - ref->row, - mv->col - ref->col }; +int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, + int *mvcost[2], int weight) { + const MV diff = { mv->row - ref->row, mv->col - ref->col }; return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7); } @@ -84,7 +81,7 @@ int vp9_mv_bit_cost(const MV *mv, const MV *ref, static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost, int *mvcost[2], int error_per_bit) { if (mvcost) { - const MV diff = {mv->row - ref->row, mv->col - ref->col}; + const MV diff = { mv->row - ref->row, mv->col - ref->col }; // This product sits at a 32-bit ceiling right now and any additional // accuracy in either bit cost or error cost will cause it to overflow. return ROUND_POWER_OF_TWO( @@ -97,11 +94,9 @@ static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost, static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref, int sad_per_bit) { - const MV diff = { mv->row - ref->row, - mv->col - ref->col }; + const MV diff = { mv->row - ref->row, mv->col - ref->col }; return ROUND_POWER_OF_TWO( - (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * - sad_per_bit, + (unsigned)mv_cost(&diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit, VP9_PROB_COST_SHIFT); } @@ -111,7 +106,7 @@ void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride) { for (len = MAX_FIRST_STEP; len > 0; len /= 2) { // Generate offsets for 4 search sites per step. - const MV ss_mvs[] = {{-len, 0}, {len, 0}, {0, -len}, {0, len}}; + const MV ss_mvs[] = { { -len, 0 }, { len, 0 }, { 0, -len }, { 0, len } }; int i; for (i = 0; i < 4; ++i, ++ss_count) { cfg->ss_mv[ss_count] = ss_mvs[i]; @@ -129,10 +124,9 @@ void vp9_init3smotion_compensation(search_site_config *cfg, int stride) { for (len = MAX_FIRST_STEP; len > 0; len /= 2) { // Generate offsets for 8 search sites per step. - const MV ss_mvs[8] = { - {-len, 0 }, {len, 0 }, { 0, -len}, {0, len}, - {-len, -len}, {-len, len}, {len, -len}, {len, len} - }; + const MV ss_mvs[8] = { { -len, 0 }, { len, 0 }, { 0, -len }, + { 0, len }, { -len, -len }, { -len, len }, + { len, -len }, { len, len } }; int i; for (i = 0; i < 8; ++i, ++ss_count) { cfg->ss_mv[ss_count] = ss_mvs[i]; @@ -144,138 +138,139 @@ void vp9_init3smotion_compensation(search_site_config *cfg, int stride) { cfg->total_steps = ss_count / cfg->searches_per_step; } -/* - * To avoid the penalty for crossing cache-line read, preload the reference - * area in a small buffer, which is aligned to make sure there won't be crossing - * cache-line read while reading from this buffer. This reduced the cpu - * cycles spent on reading ref data in sub-pixel filter functions. - * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x - * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we - * could reduce the area. - */ - /* Estimated (square) error cost of a motion vector (r,c). The 14 scale comes * from the same math as in mv_err_cost(). */ -#define MVC(r, c) \ - (mvcost ? \ - ((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \ - mvcost[0][((r) - rr)] + mvcost[1][((c) - rc)]) * \ - error_per_bit + 8192) >> 14 : 0) - +#define MVC(r, c) \ + (mvcost \ + ? ((unsigned)(mvjcost[((r) != rr) * 2 + ((c) != rc)] + \ + mvcost[0][((r)-rr)] + mvcost[1][((c)-rc)]) * \ + error_per_bit + \ + 8192) >> \ + 14 \ + : 0) // convert motion vector component to offset for sv[a]f calc -static INLINE int sp(int x) { - return x & 7; -} +static INLINE int sp(int x) { return x & 7; } static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) { return &buf[(r >> 3) * stride + (c >> 3)]; } +#if CONFIG_VP9_HIGHBITDEPTH /* checks if (r, c) has better score than previous best */ -#define CHECK_BETTER(v, r, c) \ - if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \ - if (second_pred == NULL) \ - thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ - src_stride, &sse); \ - else \ - thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \ - z, src_stride, &sse, second_pred); \ - if ((v = MVC(r, c) + thismse) < besterr) { \ - besterr = v; \ - br = r; \ - bc = c; \ - *distortion = thismse; \ - *sse1 = sse; \ - } \ - } else { \ - v = INT_MAX; \ +#define CHECK_BETTER(v, r, c) \ + if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \ + int64_t tmpmse; \ + if (second_pred == NULL) { \ + thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ + src_stride, &sse); \ + } else { \ + thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ + src_stride, &sse, second_pred); \ + } \ + tmpmse = thismse; \ + tmpmse += MVC(r, c); \ + if (tmpmse >= INT_MAX) { \ + v = INT_MAX; \ + } else if ((v = (uint32_t)tmpmse) < besterr) { \ + besterr = v; \ + br = r; \ + bc = c; \ + *distortion = thismse; \ + *sse1 = sse; \ + } \ + } else { \ + v = INT_MAX; \ + } +#else +/* checks if (r, c) has better score than previous best */ +#define CHECK_BETTER(v, r, c) \ + if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \ + if (second_pred == NULL) \ + thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ + src_stride, &sse); \ + else \ + thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), z, \ + src_stride, &sse, second_pred); \ + if ((v = MVC(r, c) + thismse) < besterr) { \ + besterr = v; \ + br = r; \ + bc = c; \ + *distortion = thismse; \ + *sse1 = sse; \ + } \ + } else { \ + v = INT_MAX; \ } -#define FIRST_LEVEL_CHECKS \ - { \ - unsigned int left, right, up, down, diag; \ - CHECK_BETTER(left, tr, tc - hstep); \ - CHECK_BETTER(right, tr, tc + hstep); \ - CHECK_BETTER(up, tr - hstep, tc); \ - CHECK_BETTER(down, tr + hstep, tc); \ - whichdir = (left < right ? 0 : 1) + \ - (up < down ? 0 : 2); \ - switch (whichdir) { \ - case 0: \ - CHECK_BETTER(diag, tr - hstep, tc - hstep); \ - break; \ - case 1: \ - CHECK_BETTER(diag, tr - hstep, tc + hstep); \ - break; \ - case 2: \ - CHECK_BETTER(diag, tr + hstep, tc - hstep); \ - break; \ - case 3: \ - CHECK_BETTER(diag, tr + hstep, tc + hstep); \ - break; \ - } \ +#endif +#define FIRST_LEVEL_CHECKS \ + { \ + unsigned int left, right, up, down, diag; \ + CHECK_BETTER(left, tr, tc - hstep); \ + CHECK_BETTER(right, tr, tc + hstep); \ + CHECK_BETTER(up, tr - hstep, tc); \ + CHECK_BETTER(down, tr + hstep, tc); \ + whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2); \ + switch (whichdir) { \ + case 0: CHECK_BETTER(diag, tr - hstep, tc - hstep); break; \ + case 1: CHECK_BETTER(diag, tr - hstep, tc + hstep); break; \ + case 2: CHECK_BETTER(diag, tr + hstep, tc - hstep); break; \ + case 3: CHECK_BETTER(diag, tr + hstep, tc + hstep); break; \ + } \ } -#define SECOND_LEVEL_CHECKS \ - { \ - int kr, kc; \ - unsigned int second; \ - if (tr != br && tc != bc) { \ - kr = br - tr; \ - kc = bc - tc; \ - CHECK_BETTER(second, tr + kr, tc + 2 * kc); \ - CHECK_BETTER(second, tr + 2 * kr, tc + kc); \ - } else if (tr == br && tc != bc) { \ - kc = bc - tc; \ - CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \ - CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \ - switch (whichdir) { \ - case 0: \ - case 1: \ - CHECK_BETTER(second, tr + hstep, tc + kc); \ - break; \ - case 2: \ - case 3: \ - CHECK_BETTER(second, tr - hstep, tc + kc); \ - break; \ - } \ - } else if (tr != br && tc == bc) { \ - kr = br - tr; \ - CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \ - CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \ - switch (whichdir) { \ - case 0: \ - case 2: \ - CHECK_BETTER(second, tr + kr, tc + hstep); \ - break; \ - case 1: \ - case 3: \ - CHECK_BETTER(second, tr + kr, tc - hstep); \ - break; \ - } \ - } \ +#define SECOND_LEVEL_CHECKS \ + { \ + int kr, kc; \ + unsigned int second; \ + if (tr != br && tc != bc) { \ + kr = br - tr; \ + kc = bc - tc; \ + CHECK_BETTER(second, tr + kr, tc + 2 * kc); \ + CHECK_BETTER(second, tr + 2 * kr, tc + kc); \ + } else if (tr == br && tc != bc) { \ + kc = bc - tc; \ + CHECK_BETTER(second, tr + hstep, tc + 2 * kc); \ + CHECK_BETTER(second, tr - hstep, tc + 2 * kc); \ + switch (whichdir) { \ + case 0: \ + case 1: CHECK_BETTER(second, tr + hstep, tc + kc); break; \ + case 2: \ + case 3: CHECK_BETTER(second, tr - hstep, tc + kc); break; \ + } \ + } else if (tr != br && tc == bc) { \ + kr = br - tr; \ + CHECK_BETTER(second, tr + 2 * kr, tc + hstep); \ + CHECK_BETTER(second, tr + 2 * kr, tc - hstep); \ + switch (whichdir) { \ + case 0: \ + case 2: CHECK_BETTER(second, tr + kr, tc + hstep); break; \ + case 1: \ + case 3: CHECK_BETTER(second, tr + kr, tc - hstep); break; \ + } \ + } \ } // TODO(yunqingwang): SECOND_LEVEL_CHECKS_BEST was a rewrote of // SECOND_LEVEL_CHECKS, and SECOND_LEVEL_CHECKS should be rewritten // later in the same way. -#define SECOND_LEVEL_CHECKS_BEST \ - { \ - unsigned int second; \ - int br0 = br; \ - int bc0 = bc; \ - assert(tr == br || tc == bc); \ - if (tr == br && tc != bc) { \ - kc = bc - tc; \ - } else if (tr != br && tc == bc) { \ - kr = br - tr; \ - } \ - CHECK_BETTER(second, br0 + kr, bc0); \ - CHECK_BETTER(second, br0, bc0 + kc); \ - if (br0 != br || bc0 != bc) { \ - CHECK_BETTER(second, br0 + kr, bc0 + kc); \ - } \ +#define SECOND_LEVEL_CHECKS_BEST \ + { \ + unsigned int second; \ + int br0 = br; \ + int bc0 = bc; \ + assert(tr == br || tc == bc); \ + if (tr == br && tc != bc) { \ + kc = bc - tc; \ + } else if (tr != br && tc == bc) { \ + kr = br - tr; \ + } \ + CHECK_BETTER(second, br0 + kr, bc0); \ + CHECK_BETTER(second, br0, bc0 + kc); \ + if (br0 != br || bc0 != bc) { \ + CHECK_BETTER(second, br0 + kr, bc0 + kc); \ + } \ } #define SETUP_SUBPEL_SEARCH \ @@ -298,39 +293,31 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) { int br = bestmv->row * 8; \ int bc = bestmv->col * 8; \ int hstep = 4; \ - const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ - const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ - const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ - const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ + const int minc = VPXMAX(x->mv_limits.col_min * 8, ref_mv->col - MV_MAX); \ + const int maxc = VPXMIN(x->mv_limits.col_max * 8, ref_mv->col + MV_MAX); \ + const int minr = VPXMAX(x->mv_limits.row_min * 8, ref_mv->row - MV_MAX); \ + const int maxr = VPXMIN(x->mv_limits.row_max * 8, ref_mv->row + MV_MAX); \ int tr = br; \ int tc = bc; \ \ bestmv->row *= 8; \ bestmv->col *= 8; -static unsigned int setup_center_error(const MACROBLOCKD *xd, - const MV *bestmv, - const MV *ref_mv, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - const uint8_t *const src, - const int src_stride, - const uint8_t *const y, - int y_stride, - const uint8_t *second_pred, - int w, int h, int offset, - int *mvjcost, int *mvcost[2], - unsigned int *sse1, - int *distortion) { - unsigned int besterr; +static unsigned int setup_center_error( + const MACROBLOCKD *xd, const MV *bestmv, const MV *ref_mv, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, + const uint8_t *const src, const int src_stride, const uint8_t *const y, + int y_stride, const uint8_t *second_pred, int w, int h, int offset, + int *mvjcost, int *mvcost[2], uint32_t *sse1, uint32_t *distortion) { #if CONFIG_VP9_HIGHBITDEPTH + uint64_t besterr; if (second_pred != NULL) { if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]); vpx_highbd_comp_avg_pred(comp_pred16, second_pred, w, h, y + offset, y_stride); - besterr = vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, - sse1); + besterr = + vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1); } else { DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); @@ -339,10 +326,13 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd, } else { besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1); } - *distortion = besterr; + *distortion = (uint32_t)besterr; besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); + if (besterr >= UINT_MAX) return UINT_MAX; + return (uint32_t)besterr; #else - (void) xd; + uint32_t besterr; + (void)xd; if (second_pred != NULL) { DECLARE_ALIGNED(16, uint8_t, comp_pred[64 * 64]); vpx_comp_avg_pred(comp_pred, second_pred, w, h, y + offset, y_stride); @@ -352,8 +342,8 @@ static unsigned int setup_center_error(const MACROBLOCKD *xd, } *distortion = besterr; besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit); -#endif // CONFIG_VP9_HIGHBITDEPTH return besterr; +#endif // CONFIG_VP9_HIGHBITDEPTH } static INLINE int divide_and_round(const int n, const int d) { @@ -361,10 +351,8 @@ static INLINE int divide_and_round(const int n, const int d) { } static INLINE int is_cost_list_wellbehaved(int *cost_list) { - return cost_list[0] < cost_list[1] && - cost_list[0] < cost_list[2] && - cost_list[0] < cost_list[3] && - cost_list[0] < cost_list[4]; + return cost_list[0] < cost_list[1] && cost_list[0] < cost_list[2] && + cost_list[0] < cost_list[3] && cost_list[0] < cost_list[4]; } // Returns surface minima estimate at given precision in 1/2^n bits. @@ -375,46 +363,72 @@ static INLINE int is_cost_list_wellbehaved(int *cost_list) { // x0 = 1/2 (S1 - S3)/(S1 + S3 - 2*S0), // y0 = 1/2 (S4 - S2)/(S4 + S2 - 2*S0). // The code below is an integerized version of that. -static void get_cost_surf_min(int *cost_list, int *ir, int *ic, - int bits) { +static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) { *ic = divide_and_round((cost_list[1] - cost_list[3]) * (1 << (bits - 1)), (cost_list[1] - 2 * cost_list[0] + cost_list[3])); *ir = divide_and_round((cost_list[4] - cost_list[2]) * (1 << (bits - 1)), (cost_list[4] - 2 * cost_list[0] + cost_list[2])); } -int vp9_find_best_sub_pixel_tree_pruned_evenmore( - const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { +uint32_t vp9_skip_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv, + const MV *ref_mv, int allow_hp, + int error_per_bit, + const vp9_variance_fn_ptr_t *vfp, + int forced_stop, int iters_per_step, + int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, + const uint8_t *second_pred, int w, int h) { SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - (void) halfiters; - (void) quarteriters; - (void) eighthiters; - (void) whichdir; - (void) allow_hp; - (void) forced_stop; - (void) hstep; + besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z, + src_stride, y, y_stride, second_pred, w, h, + offset, mvjcost, mvcost, sse1, distortion); + (void)halfiters; + (void)quarteriters; + (void)eighthiters; + (void)whichdir; + (void)allow_hp; + (void)forced_stop; + (void)hstep; + (void)rr; + (void)rc; + (void)minr; + (void)minc; + (void)maxr; + (void)maxc; + (void)tr; + (void)tc; + (void)sse; + (void)thismse; + (void)cost_list; - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && + if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || + (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) + return UINT_MAX; + + return besterr; +} + +uint32_t vp9_find_best_sub_pixel_tree_pruned_evenmore( + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop, + int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w, + int h) { + SETUP_SUBPEL_SEARCH; + besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z, + src_stride, y, y_stride, second_pred, w, h, + offset, mvjcost, mvcost, sse1, distortion); + (void)halfiters; + (void)quarteriters; + (void)eighthiters; + (void)whichdir; + (void)allow_hp; + (void)forced_stop; + (void)hstep; + + if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && - cost_list[4] != INT_MAX && - is_cost_list_wellbehaved(cost_list)) { + cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) { int ir, ic; unsigned int minpt; get_cost_surf_min(cost_list, &ir, &ic, 2); @@ -463,29 +477,19 @@ int vp9_find_best_sub_pixel_tree_pruned_evenmore( return besterr; } -int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { +uint32_t vp9_find_best_sub_pixel_tree_pruned_more( + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop, + int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w, + int h) { SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && + besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z, + src_stride, y, y_stride, second_pred, w, h, + offset, mvjcost, mvcost, sse1, distortion); + if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && - cost_list[4] != INT_MAX && - is_cost_list_wellbehaved(cost_list)) { + cost_list[4] != INT_MAX && is_cost_list_wellbehaved(cost_list)) { unsigned int minpt; int ir, ic; get_cost_surf_min(cost_list, &ir, &ic, 1); @@ -524,39 +528,30 @@ int vp9_find_best_sub_pixel_tree_pruned_more(const MACROBLOCK *x, } // These lines insure static analysis doesn't warn that // tr and tc aren't used after the above point. - (void) tr; - (void) tc; + (void)tr; + (void)tc; bestmv->row = br; bestmv->col = bc; if ((abs(bestmv->col - ref_mv->col) > (MAX_FULL_PEL_VAL << 3)) || (abs(bestmv->row - ref_mv->row) > (MAX_FULL_PEL_VAL << 3))) - return INT_MAX; + return UINT_MAX; return besterr; } -int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { +uint32_t vp9_find_best_sub_pixel_tree_pruned( + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop, + int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w, + int h) { SETUP_SUBPEL_SEARCH; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); - if (cost_list && - cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && + besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z, + src_stride, y, y_stride, second_pred, w, h, + offset, mvjcost, mvcost, sse1, distortion); + if (cost_list && cost_list[0] != INT_MAX && cost_list[1] != INT_MAX && cost_list[2] != INT_MAX && cost_list[3] != INT_MAX && cost_list[4] != INT_MAX) { unsigned int left, right, up, down, diag; @@ -619,8 +614,8 @@ int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x, } // These lines insure static analysis doesn't warn that // tr and tc aren't used after the above point. - (void) tr; - (void) tc; + (void)tr; + (void)tc; bestmv->row = br; bestmv->col = bc; @@ -632,26 +627,21 @@ int vp9_find_best_sub_pixel_tree_pruned(const MACROBLOCK *x, return besterr; } +/* clang-format off */ static const MV search_step_table[12] = { - // left, right, up, down - {0, -4}, {0, 4}, {-4, 0}, {4, 0}, - {0, -2}, {0, 2}, {-2, 0}, {2, 0}, - {0, -1}, {0, 1}, {-1, 0}, {1, 0} + // left, right, up, down + { 0, -4 }, { 0, 4 }, { -4, 0 }, { 4, 0 }, + { 0, -2 }, { 0, 2 }, { -2, 0 }, { 2, 0 }, + { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 } }; +/* clang-format on */ -int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, - int forced_stop, - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, - unsigned int *sse1, - const uint8_t *second_pred, - int w, int h) { +uint32_t vp9_find_best_sub_pixel_tree( + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop, + int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w, + int h) { const uint8_t *const z = x->plane[0].src.buf; const uint8_t *const src_address = z; const int src_stride = x->plane[0].src.stride; @@ -669,10 +659,10 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, int bc = bestmv->col * 8; int hstep = 4; int iter, round = 3 - forced_stop; - const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); + const int minc = VPXMAX(x->mv_limits.col_min * 8, ref_mv->col - MV_MAX); + const int maxc = VPXMIN(x->mv_limits.col_max * 8, ref_mv->col + MV_MAX); + const int minr = VPXMAX(x->mv_limits.row_min * 8, ref_mv->row - MV_MAX); + const int maxr = VPXMIN(x->mv_limits.row_max * 8, ref_mv->row + MV_MAX); int tr = br; int tc = bc; const MV *search_step = search_step_table; @@ -681,18 +671,16 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, int kr, kc; if (!(allow_hp && use_mv_hp(ref_mv))) - if (round == 3) - round = 2; + if (round == 3) round = 2; bestmv->row *= 8; bestmv->col *= 8; - besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, - z, src_stride, y, y_stride, second_pred, - w, h, offset, mvjcost, mvcost, - sse1, distortion); + besterr = setup_center_error(xd, bestmv, ref_mv, error_per_bit, vfp, z, + src_stride, y, y_stride, second_pred, w, h, + offset, mvjcost, mvcost, sse1, distortion); - (void) cost_list; // to silence compiler warning + (void)cost_list; // to silence compiler warning for (iter = 0; iter < round; ++iter) { // Check vertical and horizontal sub-pixel positions. @@ -705,13 +693,13 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, this_mv.row = tr; this_mv.col = tc; if (second_pred == NULL) - thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse); + thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address, + src_stride, &sse); else thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address, src_stride, &sse, second_pred); - cost_array[idx] = thismse + - mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); + cost_array[idx] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, + mvcost, error_per_bit); if (cost_array[idx] < besterr) { best_idx = idx; @@ -732,15 +720,15 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, tr = br + kr; if (tc >= minc && tc <= maxc && tr >= minr && tr <= maxr) { const uint8_t *const pre_address = y + (tr >> 3) * y_stride + (tc >> 3); - MV this_mv = {tr, tc}; + MV this_mv = { tr, tc }; if (second_pred == NULL) - thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse); + thismse = vfp->svf(pre_address, y_stride, sp(tc), sp(tr), src_address, + src_stride, &sse); else - thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), - src_address, src_stride, &sse, second_pred); - cost_array[4] = thismse + - mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); + thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr), src_address, + src_stride, &sse, second_pred); + cost_array[4] = thismse + mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, + error_per_bit); if (cost_array[4] < besterr) { best_idx = 4; @@ -760,8 +748,7 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, bc = tc; } - if (iters_per_step > 1 && best_idx != -1) - SECOND_LEVEL_CHECKS_BEST; + if (iters_per_step > 1 && best_idx != -1) SECOND_LEVEL_CHECKS_BEST; tr = br; tc = bc; @@ -776,8 +763,8 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, // These lines insure static analysis doesn't warn that // tr and tc aren't used after the above point. - (void) tr; - (void) tc; + (void)tr; + (void)tc; bestmv->row = br; bestmv->col = bc; @@ -790,49 +777,46 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, } #undef MVC -#undef PRE #undef CHECK_BETTER -static INLINE int check_bounds(const MACROBLOCK *x, int row, int col, +static INLINE int check_bounds(const MvLimits *mv_limits, int row, int col, int range) { - return ((row - range) >= x->mv_row_min) & - ((row + range) <= x->mv_row_max) & - ((col - range) >= x->mv_col_min) & - ((col + range) <= x->mv_col_max); + return ((row - range) >= mv_limits->row_min) & + ((row + range) <= mv_limits->row_max) & + ((col - range) >= mv_limits->col_min) & + ((col + range) <= mv_limits->col_max); } -static INLINE int is_mv_in(const MACROBLOCK *x, const MV *mv) { - return (mv->col >= x->mv_col_min) && (mv->col <= x->mv_col_max) && - (mv->row >= x->mv_row_min) && (mv->row <= x->mv_row_max); +static INLINE int is_mv_in(const MvLimits *mv_limits, const MV *mv) { + return (mv->col >= mv_limits->col_min) && (mv->col <= mv_limits->col_max) && + (mv->row >= mv_limits->row_min) && (mv->row <= mv_limits->row_max); } -#define CHECK_BETTER \ - {\ - if (thissad < bestsad) {\ - if (use_mvcost) \ - thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit);\ - if (thissad < bestsad) {\ - bestsad = thissad;\ - best_site = i;\ - }\ - }\ +#define CHECK_BETTER \ + { \ + if (thissad < bestsad) { \ + if (use_mvcost) \ + thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); \ + if (thissad < bestsad) { \ + bestsad = thissad; \ + best_site = i; \ + } \ + } \ } -#define MAX_PATTERN_SCALES 11 -#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale -#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates +#define MAX_PATTERN_SCALES 11 +#define MAX_PATTERN_CANDIDATES 8 // max number of canddiates per scale +#define PATTERN_CANDIDATES_REF 3 // number of refinement candidates // Calculate and return a sad+mvcost list around an integer best pel. -static INLINE void calc_int_cost_list(const MACROBLOCK *x, - const MV *ref_mv, +static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv, int sadpb, const vp9_variance_fn_ptr_t *fn_ptr, - const MV *best_mv, - int *cost_list) { - static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}}; + const MV *best_mv, int *cost_list) { + static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } }; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &x->e_mbd.plane[0].pre[0]; - const MV fcenter_mv = {ref_mv->row >> 3, ref_mv->col >> 3}; + const MV fcenter_mv = { ref_mv->row >> 3, ref_mv->col >> 3 }; int br = best_mv->row; int bc = best_mv->col; MV this_mv; @@ -841,34 +825,30 @@ static INLINE void calc_int_cost_list(const MACROBLOCK *x, this_mv.row = br; this_mv.col = bc; - cost_list[0] = fn_ptr->vf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride, &sse) + + cost_list[0] = + fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv), + in_what->stride, &sse) + mvsad_err_cost(x, &this_mv, &fcenter_mv, sadpb); - if (check_bounds(x, br, bc, 1)) { + if (check_bounds(&x->mv_limits, br, bc, 1)) { for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; + const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col }; cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv), in_what->stride, &sse) + - mv_err_cost(&this_mv, &fcenter_mv, - x->nmvjointcost, x->mvcost, - x->errorperbit); + mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, + x->mvcost, x->errorperbit); } } else { for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - if (!is_mv_in(x, &this_mv)) + const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) cost_list[i + 1] = INT_MAX; else cost_list[i + 1] = fn_ptr->vf(what->buf, what->stride, get_buf_from_mv(in_what, &this_mv), in_what->stride, &sse) + - mv_err_cost(&this_mv, &fcenter_mv, - x->nmvjointcost, x->mvcost, - x->errorperbit); + mv_err_cost(&this_mv, &fcenter_mv, x->nmvjointcost, + x->mvcost, x->errorperbit); } } } @@ -878,16 +858,10 @@ static INLINE void calc_int_cost_list(const MACROBLOCK *x, // candidates as indicated in the num_candidates and candidates arrays // passed into this function // -static int vp9_pattern_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv, +static int vp9_pattern_search(const MACROBLOCK *x, MV *ref_mv, int search_param, + int sad_per_bit, int do_init_search, + int *cost_list, const vp9_variance_fn_ptr_t *vfp, + int use_mvcost, const MV *center_mv, MV *best_mv, const int num_candidates[MAX_PATTERN_SCALES], const MV candidates[MAX_PATTERN_SCALES] [MAX_PATTERN_CANDIDATES]) { @@ -902,17 +876,18 @@ static int vp9_pattern_search(const MACROBLOCK *x, int bestsad = INT_MAX; int thissad; int k = -1; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; int best_init_s = search_param_to_steps[search_param]; // adjust ref_mv to make sure it is within MV range - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max, + x->mv_limits.row_min, x->mv_limits.row_max); br = ref_mv->row; bc = ref_mv->col; // Work out the start point for the search - bestsad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); + bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride) + + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); // Search all possible scales upto the search param around the center point // pick the scale of the point that is best as the starting scale of @@ -922,24 +897,23 @@ static int vp9_pattern_search(const MACROBLOCK *x, best_init_s = -1; for (t = 0; t <= s; ++t) { int best_site = -1; - if (check_bounds(x, br, bc, 1 << t)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << t)) { for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[t][i].row, + bc + candidates[t][i].col }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[t][i].row, + bc + candidates[t][i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -965,24 +939,23 @@ static int vp9_pattern_search(const MACROBLOCK *x, do { // No need to search all 6 points the 1st time if initial search was used if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1003,24 +976,27 @@ static int vp9_pattern_search(const MACROBLOCK *x, next_chkpts_indices[1] = k; next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1053,19 +1029,12 @@ static int vp9_pattern_search(const MACROBLOCK *x, // are 4 1-away neighbors, and cost_list is non-null // TODO(debargha): Merge this function with the one above. Also remove // use_mvcost option since it is always 1, to save unnecessary branches. -static int vp9_pattern_search_sad(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv, - const int num_candidates[MAX_PATTERN_SCALES], - const MV candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES]) { +static int vp9_pattern_search_sad( + const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, + int do_init_search, int *cost_list, const vp9_variance_fn_ptr_t *vfp, + int use_mvcost, const MV *center_mv, MV *best_mv, + const int num_candidates[MAX_PATTERN_SCALES], + const MV candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES]) { const MACROBLOCKD *const xd = &x->e_mbd; static const int search_param_to_steps[MAX_MVSEARCH_STEPS] = { 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, @@ -1077,10 +1046,11 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, int bestsad = INT_MAX; int thissad; int k = -1; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; int best_init_s = search_param_to_steps[search_param]; // adjust ref_mv to make sure it is within MV range - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max, + x->mv_limits.row_min, x->mv_limits.row_max); br = ref_mv->row; bc = ref_mv->col; if (cost_list != NULL) { @@ -1089,9 +1059,9 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, } // Work out the start point for the search - bestsad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + - mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); + bestsad = vfp->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride) + + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); // Search all possible scales upto the search param around the center point // pick the scale of the point that is best as the starting scale of @@ -1101,24 +1071,23 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, best_init_s = -1; for (t = 0; t <= s; ++t) { int best_site = -1; - if (check_bounds(x, br, bc, 1 << t)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << t)) { for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[t][i].row, + bc + candidates[t][i].col }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < num_candidates[t]; i++) { - const MV this_mv = {br + candidates[t][i].row, - bc + candidates[t][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[t][i].row, + bc + candidates[t][i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1144,24 +1113,23 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, for (; s >= do_sad; s--) { if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1182,24 +1150,27 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, next_chkpts_indices[1] = k; next_chkpts_indices[2] = (k == num_candidates[s] - 1) ? 0 : k + 1; - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) - continue; - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1216,26 +1187,23 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, if (s == 0) { cost_list[0] = bestsad; if (!do_init_search || s != best_init_s) { - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - cost_list[i + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + cost_list[i + 1] = thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < num_candidates[s]; i++) { - const MV this_mv = {br + candidates[s][i].row, - bc + candidates[s][i].col}; - if (!is_mv_in(x, &this_mv)) - continue; - cost_list[i + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + candidates[s][i].row, + bc + candidates[s][i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) continue; + cost_list[i + 1] = thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1256,28 +1224,30 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, cost_list[((k + 2) % 4) + 1] = cost_list[0]; cost_list[0] = bestsad; - if (check_bounds(x, br, bc, 1 << s)) { + if (check_bounds(&x->mv_limits, br, bc, 1 << s)) { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - cost_list[next_chkpts_indices[i] + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + cost_list[next_chkpts_indices[i] + 1] = thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } else { for (i = 0; i < PATTERN_CANDIDATES_REF; i++) { - const MV this_mv = {br + candidates[s][next_chkpts_indices[i]].row, - bc + candidates[s][next_chkpts_indices[i]].col}; - if (!is_mv_in(x, &this_mv)) { + const MV this_mv = { + br + candidates[s][next_chkpts_indices[i]].row, + bc + candidates[s][next_chkpts_indices[i]].col + }; + if (!is_mv_in(&x->mv_limits, &this_mv)) { cost_list[next_chkpts_indices[i] + 1] = INT_MAX; continue; } - cost_list[next_chkpts_indices[i] + 1] = - thissad = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + cost_list[next_chkpts_indices[i] + 1] = thissad = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); CHECK_BETTER } } @@ -1298,34 +1268,31 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, // cost_list[3]: sad at delta { 0, 1} (right) from the best integer pel // cost_list[4]: sad at delta {-1, 0} (top) from the best integer pel if (cost_list) { - static const MV neighbors[4] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}}; + static const MV neighbors[4] = { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } }; if (cost_list[0] == INT_MAX) { cost_list[0] = bestsad; - if (check_bounds(x, br, bc, 1)) { + if (check_bounds(&x->mv_limits, br, bc, 1)) { for (i = 0; i < 4; i++) { - const MV this_mv = { br + neighbors[i].row, - bc + neighbors[i].col }; - cost_list[i + 1] = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col }; + cost_list[i + 1] = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); } } else { for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; - if (!is_mv_in(x, &this_mv)) + const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col }; + if (!is_mv_in(&x->mv_limits, &this_mv)) cost_list[i + 1] = INT_MAX; else - cost_list[i + 1] = vfp->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &this_mv), - in_what->stride); + cost_list[i + 1] = + vfp->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &this_mv), in_what->stride); } } } else { if (use_mvcost) { for (i = 0; i < 4; i++) { - const MV this_mv = {br + neighbors[i].row, - bc + neighbors[i].col}; + const MV this_mv = { br + neighbors[i].row, bc + neighbors[i].col }; if (cost_list[i + 1] != INT_MAX) { cost_list[i + 1] += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); @@ -1339,201 +1306,197 @@ static int vp9_pattern_search_sad(const MACROBLOCK *x, return bestsad; } -int vp9_get_mvpred_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const vp9_variance_fn_ptr_t *vfp, +int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv, + const MV *center_mv, const vp9_variance_fn_ptr_t *vfp, int use_mvcost) { const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV mv = {best_mv->row * 8, best_mv->col * 8}; - unsigned int unused; - - return vfp->vf(what->buf, what->stride, - get_buf_from_mv(in_what, best_mv), in_what->stride, &unused) + - (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, - x->mvcost, x->errorperbit) : 0); + const MV mv = { best_mv->row * 8, best_mv->col * 8 }; + uint32_t unused; +#if CONFIG_VP9_HIGHBITDEPTH + uint64_t err = + vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv), + in_what->stride, &unused); + err += (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost, + x->errorperbit) + : 0); + if (err >= INT_MAX) return INT_MAX; + return (int)err; +#else + return vfp->vf(what->buf, what->stride, get_buf_from_mv(in_what, best_mv), + in_what->stride, &unused) + + (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost, + x->errorperbit) + : 0); +#endif } -int vp9_get_mvpred_av_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const uint8_t *second_pred, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost) { +int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv, + const MV *center_mv, const uint8_t *second_pred, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost) { const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV mv = {best_mv->row * 8, best_mv->col * 8}; + const MV mv = { best_mv->row * 8, best_mv->col * 8 }; unsigned int unused; return vfp->svaf(get_buf_from_mv(in_what, best_mv), in_what->stride, 0, 0, what->buf, what->stride, &unused, second_pred) + - (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, - x->mvcost, x->errorperbit) : 0); + (use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost, + x->errorperbit) + : 0); } -static int hex_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, +static int hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param, + int sad_per_bit, int do_init_search, int *cost_list, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost, const MV *center_mv, MV *best_mv) { // First scale has 8-closest points, the rest have 6 points in hex shape // at increasing scales - static const int hex_num_candidates[MAX_PATTERN_SCALES] = { - 8, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 - }; + static const int hex_num_candidates[MAX_PATTERN_SCALES] = { 8, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6 }; // Note that the largest candidate step at each scale is 2^scale + /* clang-format off */ static const MV hex_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = { - {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, { 0, 1}, { -1, 1}, {-1, 0}}, - {{-1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0}}, - {{-2, -4}, {2, -4}, {4, 0}, {2, 4}, { -2, 4}, { -4, 0}}, - {{-4, -8}, {4, -8}, {8, 0}, {4, 8}, { -4, 8}, { -8, 0}}, - {{-8, -16}, {8, -16}, {16, 0}, {8, 16}, { -8, 16}, { -16, 0}}, - {{-16, -32}, {16, -32}, {32, 0}, {16, 32}, { -16, 32}, { -32, 0}}, - {{-32, -64}, {32, -64}, {64, 0}, {32, 64}, { -32, 64}, { -64, 0}}, - {{-64, -128}, {64, -128}, {128, 0}, {64, 128}, { -64, 128}, { -128, 0}}, - {{-128, -256}, {128, -256}, {256, 0}, {128, 256}, { -128, 256}, { -256, 0}}, - {{-256, -512}, {256, -512}, {512, 0}, {256, 512}, { -256, 512}, { -512, 0}}, - {{-512, -1024}, {512, -1024}, {1024, 0}, {512, 1024}, { -512, 1024}, - { -1024, 0}}, + { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, + { -1, 0 } }, + { { -1, -2 }, { 1, -2 }, { 2, 0 }, { 1, 2 }, { -1, 2 }, { -2, 0 } }, + { { -2, -4 }, { 2, -4 }, { 4, 0 }, { 2, 4 }, { -2, 4 }, { -4, 0 } }, + { { -4, -8 }, { 4, -8 }, { 8, 0 }, { 4, 8 }, { -4, 8 }, { -8, 0 } }, + { { -8, -16 }, { 8, -16 }, { 16, 0 }, { 8, 16 }, { -8, 16 }, { -16, 0 } }, + { { -16, -32 }, { 16, -32 }, { 32, 0 }, { 16, 32 }, { -16, 32 }, + { -32, 0 } }, + { { -32, -64 }, { 32, -64 }, { 64, 0 }, { 32, 64 }, { -32, 64 }, + { -64, 0 } }, + { { -64, -128 }, { 64, -128 }, { 128, 0 }, { 64, 128 }, { -64, 128 }, + { -128, 0 } }, + { { -128, -256 }, { 128, -256 }, { 256, 0 }, { 128, 256 }, { -128, 256 }, + { -256, 0 } }, + { { -256, -512 }, { 256, -512 }, { 512, 0 }, { 256, 512 }, { -256, 512 }, + { -512, 0 } }, + { { -512, -1024 }, { 512, -1024 }, { 1024, 0 }, { 512, 1024 }, + { -512, 1024 }, { -1024, 0 } } }; - return vp9_pattern_search(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - hex_num_candidates, hex_candidates); + /* clang-format on */ + return vp9_pattern_search( + x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp, + use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates); } -static int bigdia_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { +static int bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param, + int sad_per_bit, int do_init_search, int *cost_list, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost, + const MV *center_mv, MV *best_mv) { // First scale has 4-closest points, the rest have 8 points in diamond // shape at increasing scales static const int bigdia_num_candidates[MAX_PATTERN_SCALES] = { 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; // Note that the largest candidate step at each scale is 2^scale - static const MV bigdia_candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES] = { - {{0, -1}, {1, 0}, { 0, 1}, {-1, 0}}, - {{-1, -1}, {0, -2}, {1, -1}, {2, 0}, {1, 1}, {0, 2}, {-1, 1}, {-2, 0}}, - {{-2, -2}, {0, -4}, {2, -2}, {4, 0}, {2, 2}, {0, 4}, {-2, 2}, {-4, 0}}, - {{-4, -4}, {0, -8}, {4, -4}, {8, 0}, {4, 4}, {0, 8}, {-4, 4}, {-8, 0}}, - {{-8, -8}, {0, -16}, {8, -8}, {16, 0}, {8, 8}, {0, 16}, {-8, 8}, {-16, 0}}, - {{-16, -16}, {0, -32}, {16, -16}, {32, 0}, {16, 16}, {0, 32}, - {-16, 16}, {-32, 0}}, - {{-32, -32}, {0, -64}, {32, -32}, {64, 0}, {32, 32}, {0, 64}, - {-32, 32}, {-64, 0}}, - {{-64, -64}, {0, -128}, {64, -64}, {128, 0}, {64, 64}, {0, 128}, - {-64, 64}, {-128, 0}}, - {{-128, -128}, {0, -256}, {128, -128}, {256, 0}, {128, 128}, {0, 256}, - {-128, 128}, {-256, 0}}, - {{-256, -256}, {0, -512}, {256, -256}, {512, 0}, {256, 256}, {0, 512}, - {-256, 256}, {-512, 0}}, - {{-512, -512}, {0, -1024}, {512, -512}, {1024, 0}, {512, 512}, {0, 1024}, - {-512, 512}, {-1024, 0}}, - }; - return vp9_pattern_search_sad(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - bigdia_num_candidates, bigdia_candidates); + /* clang-format off */ + static const MV + bigdia_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = { + { { 0, -1 }, { 1, 0 }, { 0, 1 }, { -1, 0 } }, + { { -1, -1 }, { 0, -2 }, { 1, -1 }, { 2, 0 }, { 1, 1 }, { 0, 2 }, + { -1, 1 }, { -2, 0 } }, + { { -2, -2 }, { 0, -4 }, { 2, -2 }, { 4, 0 }, { 2, 2 }, { 0, 4 }, + { -2, 2 }, { -4, 0 } }, + { { -4, -4 }, { 0, -8 }, { 4, -4 }, { 8, 0 }, { 4, 4 }, { 0, 8 }, + { -4, 4 }, { -8, 0 } }, + { { -8, -8 }, { 0, -16 }, { 8, -8 }, { 16, 0 }, { 8, 8 }, { 0, 16 }, + { -8, 8 }, { -16, 0 } }, + { { -16, -16 }, { 0, -32 }, { 16, -16 }, { 32, 0 }, { 16, 16 }, + { 0, 32 }, { -16, 16 }, { -32, 0 } }, + { { -32, -32 }, { 0, -64 }, { 32, -32 }, { 64, 0 }, { 32, 32 }, + { 0, 64 }, { -32, 32 }, { -64, 0 } }, + { { -64, -64 }, { 0, -128 }, { 64, -64 }, { 128, 0 }, { 64, 64 }, + { 0, 128 }, { -64, 64 }, { -128, 0 } }, + { { -128, -128 }, { 0, -256 }, { 128, -128 }, { 256, 0 }, { 128, 128 }, + { 0, 256 }, { -128, 128 }, { -256, 0 } }, + { { -256, -256 }, { 0, -512 }, { 256, -256 }, { 512, 0 }, { 256, 256 }, + { 0, 512 }, { -256, 256 }, { -512, 0 } }, + { { -512, -512 }, { 0, -1024 }, { 512, -512 }, { 1024, 0 }, + { 512, 512 }, { 0, 1024 }, { -512, 512 }, { -1024, 0 } } + }; + /* clang-format on */ + return vp9_pattern_search_sad( + x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp, + use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates); } -static int square_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { +static int square_search(const MACROBLOCK *x, MV *ref_mv, int search_param, + int sad_per_bit, int do_init_search, int *cost_list, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost, + const MV *center_mv, MV *best_mv) { // All scales have 8 closest points in square shape static const int square_num_candidates[MAX_PATTERN_SCALES] = { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; // Note that the largest candidate step at each scale is 2^scale - static const MV square_candidates[MAX_PATTERN_SCALES] - [MAX_PATTERN_CANDIDATES] = { - {{-1, -1}, {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}}, - {{-2, -2}, {0, -2}, {2, -2}, {2, 0}, {2, 2}, {0, 2}, {-2, 2}, {-2, 0}}, - {{-4, -4}, {0, -4}, {4, -4}, {4, 0}, {4, 4}, {0, 4}, {-4, 4}, {-4, 0}}, - {{-8, -8}, {0, -8}, {8, -8}, {8, 0}, {8, 8}, {0, 8}, {-8, 8}, {-8, 0}}, - {{-16, -16}, {0, -16}, {16, -16}, {16, 0}, {16, 16}, {0, 16}, - {-16, 16}, {-16, 0}}, - {{-32, -32}, {0, -32}, {32, -32}, {32, 0}, {32, 32}, {0, 32}, - {-32, 32}, {-32, 0}}, - {{-64, -64}, {0, -64}, {64, -64}, {64, 0}, {64, 64}, {0, 64}, - {-64, 64}, {-64, 0}}, - {{-128, -128}, {0, -128}, {128, -128}, {128, 0}, {128, 128}, {0, 128}, - {-128, 128}, {-128, 0}}, - {{-256, -256}, {0, -256}, {256, -256}, {256, 0}, {256, 256}, {0, 256}, - {-256, 256}, {-256, 0}}, - {{-512, -512}, {0, -512}, {512, -512}, {512, 0}, {512, 512}, {0, 512}, - {-512, 512}, {-512, 0}}, - {{-1024, -1024}, {0, -1024}, {1024, -1024}, {1024, 0}, {1024, 1024}, - {0, 1024}, {-1024, 1024}, {-1024, 0}}, - }; - return vp9_pattern_search(x, ref_mv, search_param, sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv, - square_num_candidates, square_candidates); + /* clang-format off */ + static const MV + square_candidates[MAX_PATTERN_SCALES][MAX_PATTERN_CANDIDATES] = { + { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, + { -1, 1 }, { -1, 0 } }, + { { -2, -2 }, { 0, -2 }, { 2, -2 }, { 2, 0 }, { 2, 2 }, { 0, 2 }, + { -2, 2 }, { -2, 0 } }, + { { -4, -4 }, { 0, -4 }, { 4, -4 }, { 4, 0 }, { 4, 4 }, { 0, 4 }, + { -4, 4 }, { -4, 0 } }, + { { -8, -8 }, { 0, -8 }, { 8, -8 }, { 8, 0 }, { 8, 8 }, { 0, 8 }, + { -8, 8 }, { -8, 0 } }, + { { -16, -16 }, { 0, -16 }, { 16, -16 }, { 16, 0 }, { 16, 16 }, + { 0, 16 }, { -16, 16 }, { -16, 0 } }, + { { -32, -32 }, { 0, -32 }, { 32, -32 }, { 32, 0 }, { 32, 32 }, + { 0, 32 }, { -32, 32 }, { -32, 0 } }, + { { -64, -64 }, { 0, -64 }, { 64, -64 }, { 64, 0 }, { 64, 64 }, + { 0, 64 }, { -64, 64 }, { -64, 0 } }, + { { -128, -128 }, { 0, -128 }, { 128, -128 }, { 128, 0 }, { 128, 128 }, + { 0, 128 }, { -128, 128 }, { -128, 0 } }, + { { -256, -256 }, { 0, -256 }, { 256, -256 }, { 256, 0 }, { 256, 256 }, + { 0, 256 }, { -256, 256 }, { -256, 0 } }, + { { -512, -512 }, { 0, -512 }, { 512, -512 }, { 512, 0 }, { 512, 512 }, + { 0, 512 }, { -512, 512 }, { -512, 0 } }, + { { -1024, -1024 }, { 0, -1024 }, { 1024, -1024 }, { 1024, 0 }, + { 1024, 1024 }, { 0, 1024 }, { -1024, 1024 }, { -1024, 0 } } + }; + /* clang-format on */ + return vp9_pattern_search( + x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp, + use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates); } -static int fast_hex_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, +static int fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit, int do_init_search, // must be zero for fast_hex - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { + int *cost_list, const vp9_variance_fn_ptr_t *vfp, + int use_mvcost, const MV *center_mv, MV *best_mv) { return hex_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); } -static int fast_dia_search(const MACROBLOCK *x, - MV *ref_mv, - int search_param, - int sad_per_bit, - int do_init_search, - int *cost_list, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost, - const MV *center_mv, - MV *best_mv) { - return bigdia_search( - x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, - do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); +static int fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param, + int sad_per_bit, int do_init_search, int *cost_list, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost, + const MV *center_mv, MV *best_mv) { + return bigdia_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), + sad_per_bit, do_init_search, cost_list, vfp, use_mvcost, + center_mv, best_mv); } #undef CHECK_BETTER // Exhuastive motion search around a given centre position with a given // step size. -static int exhuastive_mesh_search(const MACROBLOCK *x, - MV *ref_mv, MV *best_mv, +static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv, int range, int step, int sad_per_bit, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv) { const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - MV fcenter_mv = {center_mv->row, center_mv->col}; + MV fcenter_mv = { center_mv->row, center_mv->col }; unsigned int best_sad = INT_MAX; int r, c, i; int start_col, end_col, start_row, end_row; @@ -1541,24 +1504,26 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, assert(step >= 1); - clamp_mv(&fcenter_mv, x->mv_col_min, x->mv_col_max, - x->mv_row_min, x->mv_row_max); + clamp_mv(&fcenter_mv, x->mv_limits.col_min, x->mv_limits.col_max, + x->mv_limits.row_min, x->mv_limits.row_max); *best_mv = fcenter_mv; - best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) + - mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit); - start_row = VPXMAX(-range, x->mv_row_min - fcenter_mv.row); - start_col = VPXMAX(-range, x->mv_col_min - fcenter_mv.col); - end_row = VPXMIN(range, x->mv_row_max - fcenter_mv.row); - end_col = VPXMIN(range, x->mv_col_max - fcenter_mv.col); + best_sad = + fn_ptr->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &fcenter_mv), in_what->stride) + + mvsad_err_cost(x, &fcenter_mv, ref_mv, sad_per_bit); + start_row = VPXMAX(-range, x->mv_limits.row_min - fcenter_mv.row); + start_col = VPXMAX(-range, x->mv_limits.col_min - fcenter_mv.col); + end_row = VPXMIN(range, x->mv_limits.row_max - fcenter_mv.row); + end_col = VPXMIN(range, x->mv_limits.col_max - fcenter_mv.col); for (r = start_row; r <= end_row; r += step) { for (c = start_col; c <= end_col; c += col_step) { // Step > 1 means we are not checking every location in this pass. if (step > 1) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c}; - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride); + const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c }; + unsigned int sad = + fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv), + in_what->stride); if (sad < best_sad) { sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); if (sad < best_sad) { @@ -1572,17 +1537,16 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, unsigned int sads[4]; const uint8_t *addrs[4]; for (i = 0; i < 4; ++i) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; + const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i }; addrs[i] = get_buf_from_mv(in_what, &mv); } - fn_ptr->sdx4df(what->buf, what->stride, addrs, - in_what->stride, sads); + fn_ptr->sdx4df(what->buf, what->stride, addrs, in_what->stride, sads); for (i = 0; i < 4; ++i) { if (sads[i] < best_sad) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; - const unsigned int sad = sads[i] + - mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); + const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i }; + const unsigned int sad = + sads[i] + mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; *best_mv = mv; @@ -1591,9 +1555,10 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, } } else { for (i = 0; i < end_col - c; ++i) { - const MV mv = {fcenter_mv.row + r, fcenter_mv.col + c + i}; - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride); + const MV mv = { fcenter_mv.row + r, fcenter_mv.col + c + i }; + unsigned int sad = + fn_ptr->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &mv), in_what->stride); if (sad < best_sad) { sad += mvsad_err_cost(x, &mv, ref_mv, sad_per_bit); if (sad < best_sad) { @@ -1610,8 +1575,7 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, return best_sad; } -int vp9_diamond_search_sad_c(const MACROBLOCK *x, - const search_site_config *cfg, +int vp9_diamond_search_sad_c(const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv, int search_param, int sad_per_bit, int *num00, const vp9_variance_fn_ptr_t *fn_ptr, @@ -1637,13 +1601,14 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, // 0 = initial step (MAX_FIRST_STEP) pel // 1 = (MAX_FIRST_STEP/2) pel, // 2 = (MAX_FIRST_STEP/4) pel... -// const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step]; + // const search_site *ss = &cfg->ss[search_param * cfg->searches_per_step]; const MV *ss_mv = &cfg->ss_mv[search_param * cfg->searches_per_step]; const intptr_t *ss_os = &cfg->ss_os[search_param * cfg->searches_per_step]; const int tot_steps = cfg->total_steps - search_param; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max); + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; + clamp_mv(ref_mv, x->mv_limits.col_min, x->mv_limits.col_max, + x->mv_limits.row_min, x->mv_limits.row_max); ref_row = ref_mv->row; ref_col = ref_mv->col; *num00 = 0; @@ -1655,8 +1620,8 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, best_address = in_what; // Check the starting position - bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) - + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit); + bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride) + + mvsad_err_cost(x, best_mv, &fcenter_mv, sad_per_bit); i = 0; @@ -1665,10 +1630,10 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, // All_in is true if every one of the points we are checking are within // the bounds of the image. - all_in &= ((best_mv->row + ss_mv[i].row) > x->mv_row_min); - all_in &= ((best_mv->row + ss_mv[i + 1].row) < x->mv_row_max); - all_in &= ((best_mv->col + ss_mv[i + 2].col) > x->mv_col_min); - all_in &= ((best_mv->col + ss_mv[i + 3].col) < x->mv_col_max); + all_in &= ((best_mv->row + ss_mv[i].row) > x->mv_limits.row_min); + all_in &= ((best_mv->row + ss_mv[i + 1].row) < x->mv_limits.row_max); + all_in &= ((best_mv->col + ss_mv[i + 2].col) > x->mv_limits.col_min); + all_in &= ((best_mv->col + ss_mv[i + 3].col) < x->mv_limits.col_max); // If all the pixels are within the bounds we don't check whether the // search point is valid in this loop, otherwise we check each point @@ -1679,18 +1644,17 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, for (j = 0; j < cfg->searches_per_step; j += 4) { unsigned char const *block_offset[4]; - for (t = 0; t < 4; t++) - block_offset[t] = ss_os[i + t] + best_address; + for (t = 0; t < 4; t++) block_offset[t] = ss_os[i + t] + best_address; fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array); for (t = 0; t < 4; t++, i++) { if (sad_array[t] < bestsad) { - const MV this_mv = {best_mv->row + ss_mv[i].row, - best_mv->col + ss_mv[i].col}; - sad_array[t] += mvsad_err_cost(x, &this_mv, &fcenter_mv, - sad_per_bit); + const MV this_mv = { best_mv->row + ss_mv[i].row, + best_mv->col + ss_mv[i].col }; + sad_array[t] += + mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); if (sad_array[t] < bestsad) { bestsad = sad_array[t]; best_site = i; @@ -1701,13 +1665,13 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, } else { for (j = 0; j < cfg->searches_per_step; j++) { // Trap illegal vectors - const MV this_mv = {best_mv->row + ss_mv[i].row, - best_mv->col + ss_mv[i].col}; + const MV this_mv = { best_mv->row + ss_mv[i].row, + best_mv->col + ss_mv[i].col }; - if (is_mv_in(x, &this_mv)) { + if (is_mv_in(&x->mv_limits, &this_mv)) { const uint8_t *const check_here = ss_os[i] + best_address; - unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, - in_what_stride); + unsigned int thissad = + fn_ptr->sdf(what, what_stride, check_here, in_what_stride); if (thissad < bestsad) { thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); @@ -1727,12 +1691,12 @@ int vp9_diamond_search_sad_c(const MACROBLOCK *x, last_site = best_site; #if defined(NEW_DIAMOND_SEARCH) while (1) { - const MV this_mv = {best_mv->row + ss_mv[best_site].row, - best_mv->col + ss_mv[best_site].col}; - if (is_mv_in(x, &this_mv)) { + const MV this_mv = { best_mv->row + ss_mv[best_site].row, + best_mv->col + ss_mv[best_site].col }; + if (is_mv_in(&x->mv_limits, &this_mv)) { const uint8_t *const check_here = ss_os[best_site] + best_address; - unsigned int thissad = fn_ptr->sdf(what, what_stride, check_here, - in_what_stride); + unsigned int thissad = + fn_ptr->sdf(what, what_stride, check_here, in_what_stride); if (thissad < bestsad) { thissad += mvsad_err_cost(x, &this_mv, &fcenter_mv, sad_per_bit); if (thissad < bestsad) { @@ -1772,8 +1736,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) { for (d = -8; d <= 8; d += 16) { int this_pos = offset + d; // check limit - if (this_pos < 0 || this_pos > bw) - continue; + if (this_pos < 0 || this_pos > bw) continue; this_sad = vpx_vector_var(&ref[this_pos], src, bwl); if (this_sad < best_sad) { best_sad = this_sad; @@ -1785,8 +1748,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) { for (d = -4; d <= 4; d += 8) { int this_pos = offset + d; // check limit - if (this_pos < 0 || this_pos > bw) - continue; + if (this_pos < 0 || this_pos > bw) continue; this_sad = vpx_vector_var(&ref[this_pos], src, bwl); if (this_sad < best_sad) { best_sad = this_sad; @@ -1798,8 +1760,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) { for (d = -2; d <= 2; d += 4) { int this_pos = offset + d; // check limit - if (this_pos < 0 || this_pos > bw) - continue; + if (this_pos < 0 || this_pos > bw) continue; this_sad = vpx_vector_var(&ref[this_pos], src, bwl); if (this_sad < best_sad) { best_sad = this_sad; @@ -1811,8 +1772,7 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) { for (d = -1; d <= 1; d += 2) { int this_pos = offset + d; // check limit - if (this_pos < 0 || this_pos > bw) - continue; + if (this_pos < 0 || this_pos > bw) continue; this_sad = vpx_vector_var(&ref[this_pos], src, bwl); if (this_sad < best_sad) { best_sad = this_sad; @@ -1824,15 +1784,15 @@ static int vector_match(int16_t *ref, int16_t *src, int bwl) { } static const MV search_pos[4] = { - {-1, 0}, {0, -1}, {0, 1}, {1, 0}, + { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 }, }; unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int mi_row, int mi_col) { + BLOCK_SIZE bsize, int mi_row, + int mi_col) { MACROBLOCKD *xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; - struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}}; + struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } }; DECLARE_ALIGNED(16, int16_t, hbuf[128]); DECLARE_ALIGNED(16, int16_t, vbuf[128]); DECLARE_ALIGNED(16, int16_t, src_hbuf[64]); @@ -1857,8 +1817,7 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, // Swap out the reference frame for a version that's been scaled to // match the resolution of the current frame, allowing the existing // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[i] = xd->plane[i].pre[0]; + for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); } @@ -1872,8 +1831,7 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, if (scaled_ref_frame) { int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; + for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; } return this_sad; } @@ -1914,11 +1872,8 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, best_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride); { - const uint8_t * const pos[4] = { - ref_buf - ref_stride, - ref_buf - 1, - ref_buf + 1, - ref_buf + ref_stride, + const uint8_t *const pos[4] = { + ref_buf - ref_stride, ref_buf - 1, ref_buf + 1, ref_buf + ref_stride, }; cpi->fn_ptr[bsize].sdx4df(src_buf, src_stride, pos, ref_stride, this_sad); @@ -1944,8 +1899,7 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, ref_buf = xd->plane[0].pre[0].buf + this_mv.row * ref_stride + this_mv.col; - tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, - ref_buf, ref_stride); + tmp_sad = cpi->fn_ptr[bsize].sdf(src_buf, src_stride, ref_buf, ref_stride); if (best_sad > tmp_sad) { *tmp_mv = this_mv; best_sad = tmp_sad; @@ -1956,8 +1910,7 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, if (scaled_ref_frame) { int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; + for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; } return best_sad; @@ -1967,25 +1920,22 @@ unsigned int vp9_int_pro_motion_estimation(const VP9_COMP *cpi, MACROBLOCK *x, /* do_refine: If last step (1-away) of n-step search doesn't pick the center point as the best match, we will do a final 1-away diamond refining search */ -static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, - MV *mvp_full, int step_param, - int sadpb, int further_steps, int do_refine, - int *cost_list, +static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, MV *mvp_full, + int step_param, int sadpb, int further_steps, + int do_refine, int *cost_list, const vp9_variance_fn_ptr_t *fn_ptr, const MV *ref_mv, MV *dst_mv) { MV temp_mv; int thissme, n, num00 = 0; int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv, - step_param, sadpb, &n, - fn_ptr, ref_mv); + step_param, sadpb, &n, fn_ptr, ref_mv); if (bestsme < INT_MAX) bestsme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); *dst_mv = temp_mv; // If there won't be more n-step search, check to see if refining search is // needed. - if (n > further_steps) - do_refine = 0; + if (n > further_steps) do_refine = 0; while (n < further_steps) { ++n; @@ -1994,14 +1944,13 @@ static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, num00--; } else { thissme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv, - step_param + n, sadpb, &num00, - fn_ptr, ref_mv); + step_param + n, sadpb, &num00, fn_ptr, + ref_mv); if (thissme < INT_MAX) thissme = vp9_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1); // check to see if refining search is needed. - if (num00 > further_steps - n) - do_refine = 0; + if (num00 > further_steps - n) do_refine = 0; if (thissme < bestsme) { bestsme = thissme; @@ -2014,8 +1963,8 @@ static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, if (do_refine) { const int search_range = 8; MV best_mv = *dst_mv; - thissme = vp9_refining_search_sad(x, &best_mv, sadpb, search_range, - fn_ptr, ref_mv); + thissme = vp9_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr, + ref_mv); if (thissme < INT_MAX) thissme = vp9_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1); if (thissme < bestsme) { @@ -2037,12 +1986,12 @@ static int full_pixel_diamond(const VP9_COMP *cpi, MACROBLOCK *x, // Runs an limited range exhaustive mesh search using a pattern set // according to the encode speed profile. static int full_pixel_exhaustive(VP9_COMP *cpi, MACROBLOCK *x, - MV *centre_mv_full, int sadpb, int *cost_list, + MV *centre_mv_full, int sadpb, int *cost_list, const vp9_variance_fn_ptr_t *fn_ptr, const MV *ref_mv, MV *dst_mv) { const SPEED_FEATURES *const sf = &cpi->sf; - MV temp_mv = {centre_mv_full->row, centre_mv_full->col}; - MV f_ref_mv = {ref_mv->row >> 3, ref_mv->col >> 3}; + MV temp_mv = { centre_mv_full->row, centre_mv_full->col }; + MV f_ref_mv = { ref_mv->row >> 3, ref_mv->col >> 3 }; int bestsme; int i; int interval = sf->mesh_patterns[0].interval; @@ -2053,8 +2002,8 @@ static int full_pixel_exhaustive(VP9_COMP *cpi, MACROBLOCK *x, ++(*x->ex_search_count_ptr); // Trap illegal values for interval and range for this function. - if ((range < MIN_RANGE) || (range > MAX_RANGE) || - (interval < MIN_INTERVAL) || (interval > range)) + if ((range < MIN_RANGE) || (range > MAX_RANGE) || (interval < MIN_INTERVAL) || + (interval > range)) return INT_MAX; baseline_interval_divisor = range / interval; @@ -2066,21 +2015,19 @@ static int full_pixel_exhaustive(VP9_COMP *cpi, MACROBLOCK *x, interval = VPXMAX(interval, range / baseline_interval_divisor); // initial search - bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, - interval, sadpb, fn_ptr, &temp_mv); + bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, range, interval, + sadpb, fn_ptr, &temp_mv); if ((interval > MIN_INTERVAL) && (range > MIN_RANGE)) { // Progressive searches with range and step size decreasing each time // till we reach a step size of 1. Then break out. for (i = 1; i < MAX_MESH_STEP; ++i) { // First pass with coarser step and longer range - bestsme = exhuastive_mesh_search(x, &f_ref_mv, &temp_mv, - sf->mesh_patterns[i].range, - sf->mesh_patterns[i].interval, - sadpb, fn_ptr, &temp_mv); + bestsme = exhuastive_mesh_search( + x, &f_ref_mv, &temp_mv, sf->mesh_patterns[i].range, + sf->mesh_patterns[i].interval, sadpb, fn_ptr, &temp_mv); - if (sf->mesh_patterns[i].interval == 1) - break; + if (sf->mesh_patterns[i].interval == 1) break; } } @@ -2103,22 +2050,24 @@ int vp9_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_limits.row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_limits.row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_limits.col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_limits.col_max); + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; + int best_sad = + fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); *best_mv = *ref_mv; for (r = row_min; r < row_max; ++r) { for (c = col_min; c < col_max; ++c) { - const MV mv = {r, c}; - const int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride) + - mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); + const MV mv = { r, c }; + const int sad = + fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, &mv), + in_what->stride) + + mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; *best_mv = mv; @@ -2136,13 +2085,14 @@ int vp9_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_limits.row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_limits.row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_limits.col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_limits.col_max); + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; + unsigned int best_sad = + fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); *best_mv = *ref_mv; @@ -2161,7 +2111,7 @@ int vp9_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, for (i = 0; i < 3; ++i) { unsigned int sad = sads[i]; if (sad < best_sad) { - const MV mv = {r, c}; + const MV mv = { r, c }; sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; @@ -2175,10 +2125,10 @@ int vp9_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, } while (c < col_max) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - check_here, in_what->stride); + unsigned int sad = + fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride); if (sad < best_sad) { - const MV mv = {r, c}; + const MV mv = { r, c }; sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; @@ -2201,13 +2151,14 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride) + + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_limits.row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_limits.row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_limits.col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_limits.col_max); + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; + unsigned int best_sad = + fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); *best_mv = *ref_mv; @@ -2226,7 +2177,7 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, for (i = 0; i < 8; ++i) { unsigned int sad = sads[i]; if (sad < best_sad) { - const MV mv = {r, c}; + const MV mv = { r, c }; sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; @@ -2250,7 +2201,7 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, for (i = 0; i < 3; ++i) { unsigned int sad = sads[i]; if (sad < best_sad) { - const MV mv = {r, c}; + const MV mv = { r, c }; sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; @@ -2264,10 +2215,10 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, } while (c < col_max) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - check_here, in_what->stride); + unsigned int sad = + fn_ptr->sdf(what->buf, what->stride, check_here, in_what->stride); if (sad < best_sad) { - const MV mv = {r, c}; + const MV mv = { r, c }; sad += mvsad_err_cost(x, &mv, &fcenter_mv, sad_per_bit); if (sad < best_sad) { best_sad = sad; @@ -2282,44 +2233,40 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, return best_sad; } -int vp9_refining_search_sad(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, +int vp9_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit, int search_range, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv) { const MACROBLOCKD *const xd = &x->e_mbd; - const MV neighbors[4] = {{ -1, 0}, {0, -1}, {0, 1}, {1, 0}}; + const MV neighbors[4] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 } }; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; const uint8_t *best_address = get_buf_from_mv(in_what, ref_mv); - unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, best_address, - in_what->stride) + + unsigned int best_sad = + fn_ptr->sdf(what->buf, what->stride, best_address, in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit); int i, j; for (i = 0; i < search_range; i++) { int best_site = -1; - const int all_in = ((ref_mv->row - 1) > x->mv_row_min) & - ((ref_mv->row + 1) < x->mv_row_max) & - ((ref_mv->col - 1) > x->mv_col_min) & - ((ref_mv->col + 1) < x->mv_col_max); + const int all_in = ((ref_mv->row - 1) > x->mv_limits.row_min) & + ((ref_mv->row + 1) < x->mv_limits.row_max) & + ((ref_mv->col - 1) > x->mv_limits.col_min) & + ((ref_mv->col + 1) < x->mv_limits.col_max); if (all_in) { unsigned int sads[4]; - const uint8_t *const positions[4] = { - best_address - in_what->stride, - best_address - 1, - best_address + 1, - best_address + in_what->stride - }; + const uint8_t *const positions[4] = { best_address - in_what->stride, + best_address - 1, best_address + 1, + best_address + in_what->stride }; fn_ptr->sdx4df(what->buf, what->stride, positions, in_what->stride, sads); for (j = 0; j < 4; ++j) { if (sads[j] < best_sad) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; + const MV mv = { ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col }; sads[j] += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); if (sads[j] < best_sad) { best_sad = sads[j]; @@ -2329,13 +2276,13 @@ int vp9_refining_search_sad(const MACROBLOCK *x, } } else { for (j = 0; j < 4; ++j) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; + const MV mv = { ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col }; - if (is_mv_in(x, &mv)) { - unsigned int sad = fn_ptr->sdf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), - in_what->stride); + if (is_mv_in(&x->mv_limits, &mv)) { + unsigned int sad = + fn_ptr->sdf(what->buf, what->stride, + get_buf_from_mv(in_what, &mv), in_what->stride); if (sad < best_sad) { sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); if (sad < best_sad) { @@ -2361,20 +2308,19 @@ int vp9_refining_search_sad(const MACROBLOCK *x, // This function is called when we do joint motion search in comp_inter_inter // mode. -int vp9_refining_search_8p_c(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, +int vp9_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv, int error_per_bit, int search_range, const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv, - const uint8_t *second_pred) { - const MV neighbors[8] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}, - {-1, -1}, {1, -1}, {-1, 1}, {1, 1}}; + const MV *center_mv, const uint8_t *second_pred) { + const MV neighbors[8] = { { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 }, + { -1, -1 }, { 1, -1 }, { -1, 1 }, { 1, 1 } }; const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; - unsigned int best_sad = fn_ptr->sdaf(what->buf, what->stride, - get_buf_from_mv(in_what, ref_mv), in_what->stride, second_pred) + + const MV fcenter_mv = { center_mv->row >> 3, center_mv->col >> 3 }; + unsigned int best_sad = + fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), + in_what->stride, second_pred) + mvsad_err_cost(x, ref_mv, &fcenter_mv, error_per_bit); int i, j; @@ -2382,12 +2328,13 @@ int vp9_refining_search_8p_c(const MACROBLOCK *x, int best_site = -1; for (j = 0; j < 8; ++j) { - const MV mv = {ref_mv->row + neighbors[j].row, - ref_mv->col + neighbors[j].col}; + const MV mv = { ref_mv->row + neighbors[j].row, + ref_mv->col + neighbors[j].col }; - if (is_mv_in(x, &mv)) { - unsigned int sad = fn_ptr->sdaf(what->buf, what->stride, - get_buf_from_mv(in_what, &mv), in_what->stride, second_pred); + if (is_mv_in(&x->mv_limits, &mv)) { + unsigned int sad = + fn_ptr->sdaf(what->buf, what->stride, get_buf_from_mv(in_what, &mv), + in_what->stride, second_pred); if (sad < best_sad) { sad += mvsad_err_cost(x, &mv, &fcenter_mv, error_per_bit); if (sad < best_sad) { @@ -2411,20 +2358,18 @@ int vp9_refining_search_8p_c(const MACROBLOCK *x, #define MIN_EX_SEARCH_LIMIT 128 static int is_exhaustive_allowed(VP9_COMP *cpi, MACROBLOCK *x) { const SPEED_FEATURES *const sf = &cpi->sf; - const int max_ex = VPXMAX(MIN_EX_SEARCH_LIMIT, - (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100); + const int max_ex = + VPXMAX(MIN_EX_SEARCH_LIMIT, + (*x->m_search_count_ptr * sf->max_exaustive_pct) / 100); return sf->allow_exhaustive_searches && - (sf->exhaustive_searches_thresh < INT_MAX) && - (*x->ex_search_count_ptr <= max_ex) && - !cpi->rc.is_src_frame_alt_ref; + (sf->exhaustive_searches_thresh < INT_MAX) && + (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref; } -int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, MV *mvp_full, - int step_param, int error_per_bit, - int *cost_list, - const MV *ref_mv, MV *tmp_mv, +int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, + MV *mvp_full, int step_param, int error_per_bit, + int *cost_list, const MV *ref_mv, MV *tmp_mv, int var_max, int rd) { const SPEED_FEATURES *const sf = &cpi->sf; const SEARCH_METHODS method = sf->mv.search_method; @@ -2451,35 +2396,34 @@ int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, cost_list, fn_ptr, 1, ref_mv, tmp_mv); break; case HEX: - var = hex_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); + var = hex_search(x, mvp_full, step_param, error_per_bit, 1, cost_list, + fn_ptr, 1, ref_mv, tmp_mv); break; case SQUARE: - var = square_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); + var = square_search(x, mvp_full, step_param, error_per_bit, 1, cost_list, + fn_ptr, 1, ref_mv, tmp_mv); break; case BIGDIA: - var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1, - cost_list, fn_ptr, 1, ref_mv, tmp_mv); + var = bigdia_search(x, mvp_full, step_param, error_per_bit, 1, cost_list, + fn_ptr, 1, ref_mv, tmp_mv); break; case NSTEP: var = full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit, - MAX_MVSEARCH_STEPS - 1 - step_param, - 1, cost_list, fn_ptr, ref_mv, tmp_mv); + MAX_MVSEARCH_STEPS - 1 - step_param, 1, + cost_list, fn_ptr, ref_mv, tmp_mv); // Should we allow a follow on exhaustive search? if (is_exhaustive_allowed(cpi, x)) { int64_t exhuastive_thr = sf->exhaustive_searches_thresh; - exhuastive_thr >>= 8 - (b_width_log2_lookup[bsize] + - b_height_log2_lookup[bsize]); + exhuastive_thr >>= + 8 - (b_width_log2_lookup[bsize] + b_height_log2_lookup[bsize]); // Threshold variance for an exhaustive full search. if (var > exhuastive_thr) { - int var_ex; + int var_ex; MV tmp_mv_ex; - var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, - error_per_bit, cost_list, fn_ptr, - ref_mv, &tmp_mv_ex); + var_ex = full_pixel_exhaustive(cpi, x, tmp_mv, error_per_bit, + cost_list, fn_ptr, ref_mv, &tmp_mv_ex); if (var_ex < var) { var = var_ex; @@ -2488,8 +2432,7 @@ int vp9_full_pixel_search(VP9_COMP *cpi, MACROBLOCK *x, } } break; - default: - assert(0 && "Invalid search method."); + default: assert(0 && "Invalid search method."); } if (method != NSTEP && rd && var < var_max) diff --git a/libs/libvpx/vp9/encoder/vp9_mcomp.h b/libs/libvpx/vp9/encoder/vp9_mcomp.h index 1c101f2e20..2726b9e230 100644 --- a/libs/libvpx/vp9/encoder/vp9_mcomp.h +++ b/libs/libvpx/vp9/encoder/vp9_mcomp.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_MCOMP_H_ #define VP9_ENCODER_VP9_MCOMP_H_ @@ -26,106 +25,88 @@ extern "C" { // Enable the use of motion vector in range [-1023, 1023]. #define MAX_FULL_PEL_VAL ((1 << (MAX_MVSEARCH_STEPS - 1)) - 1) // Maximum size of the first step in full pel units -#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS-1)) +#define MAX_FIRST_STEP (1 << (MAX_MVSEARCH_STEPS - 1)) // Allowed motion vector pixel distance outside image border // for Block_16x16 #define BORDER_MV_PIXELS_B16 (16 + VP9_INTERP_EXTEND) typedef struct search_site_config { // motion search sites - MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector - intptr_t ss_os[8 * MAX_MVSEARCH_STEPS]; // Offset + MV ss_mv[8 * MAX_MVSEARCH_STEPS]; // Motion vector + intptr_t ss_os[8 * MAX_MVSEARCH_STEPS]; // Offset int searches_per_step; int total_steps; } search_site_config; void vp9_init_dsmotion_compensation(search_site_config *cfg, int stride); -void vp9_init3smotion_compensation(search_site_config *cfg, int stride); +void vp9_init3smotion_compensation(search_site_config *cfg, int stride); -void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv); -int vp9_mv_bit_cost(const MV *mv, const MV *ref, - const int *mvjcost, int *mvcost[2], int weight); +void vp9_set_mv_search_range(MvLimits *mv_limits, const MV *mv); +int vp9_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost, + int *mvcost[2], int weight); // Utility to compute variance + MV rate cost for a given MV -int vp9_get_mvpred_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const vp9_variance_fn_ptr_t *vfp, +int vp9_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv, + const MV *center_mv, const vp9_variance_fn_ptr_t *vfp, int use_mvcost); -int vp9_get_mvpred_av_var(const MACROBLOCK *x, - const MV *best_mv, const MV *center_mv, - const uint8_t *second_pred, - const vp9_variance_fn_ptr_t *vfp, - int use_mvcost); +int vp9_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv, + const MV *center_mv, const uint8_t *second_pred, + const vp9_variance_fn_ptr_t *vfp, int use_mvcost); struct VP9_COMP; struct SPEED_FEATURES; int vp9_init_search_range(int size); -int vp9_refining_search_sad(const struct macroblock *x, - struct mv *ref_mv, +int vp9_refining_search_sad(const struct macroblock *x, struct mv *ref_mv, int sad_per_bit, int distance, const struct vp9_variance_vtable *fn_ptr, const struct mv *center_mv); // Perform integral projection based motion estimation. unsigned int vp9_int_pro_motion_estimation(const struct VP9_COMP *cpi, - MACROBLOCK *x, - BLOCK_SIZE bsize, + MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, int mi_col); -typedef int (fractional_mv_step_fp) ( - const MACROBLOCK *x, - MV *bestmv, const MV *ref_mv, - int allow_hp, - int error_per_bit, - const vp9_variance_fn_ptr_t *vfp, +typedef uint32_t(fractional_mv_step_fp)( + const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp, + int error_per_bit, const vp9_variance_fn_ptr_t *vfp, int forced_stop, // 0 - full, 1 - qtr only, 2 - half only - int iters_per_step, - int *cost_list, - int *mvjcost, int *mvcost[2], - int *distortion, unsigned int *sse1, - const uint8_t *second_pred, - int w, int h); + int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2], + uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w, + int h); extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree; extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned; extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned_more; extern fractional_mv_step_fp vp9_find_best_sub_pixel_tree_pruned_evenmore; +extern fractional_mv_step_fp vp9_skip_sub_pixel_tree; -typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x, - const MV *ref_mv, int sad_per_bit, - int distance, +typedef int (*vp9_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv, + int sad_per_bit, int distance, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv, MV *best_mv); -typedef int (*vp9_refining_search_fn_t)(const MACROBLOCK *x, - MV *ref_mv, int sad_per_bit, - int distance, +typedef int (*vp9_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv, + int sad_per_bit, int distance, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv); -typedef int (*vp9_diamond_search_fn_t)(const MACROBLOCK *x, - const search_site_config *cfg, - MV *ref_mv, MV *best_mv, - int search_param, int sad_per_bit, - int *num00, - const vp9_variance_fn_ptr_t *fn_ptr, - const MV *center_mv); +typedef int (*vp9_diamond_search_fn_t)( + const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv, + int search_param, int sad_per_bit, int *num00, + const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv); -int vp9_refining_search_8p_c(const MACROBLOCK *x, - MV *ref_mv, int error_per_bit, +int vp9_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv, int error_per_bit, int search_range, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv, const uint8_t *second_pred); struct VP9_COMP; -int vp9_full_pixel_search(struct VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, MV *mvp_full, - int step_param, int error_per_bit, - int *cost_list, - const MV *ref_mv, MV *tmp_mv, +int vp9_full_pixel_search(struct VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, + MV *mvp_full, int step_param, int error_per_bit, + int *cost_list, const MV *ref_mv, MV *tmp_mv, int var_max, int rd); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/encoder/vp9_noise_estimate.c b/libs/libvpx/vp9/encoder/vp9_noise_estimate.c index e56cc9b017..0e5d8ade4a 100644 --- a/libs/libvpx/vp9/encoder/vp9_noise_estimate.c +++ b/libs/libvpx/vp9/encoder/vp9_noise_estimate.c @@ -21,9 +21,7 @@ #include "vp9/encoder/vp9_noise_estimate.h" #include "vp9/encoder/vp9_encoder.h" -void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, - int width, - int height) { +void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, int width, int height) { ne->enabled = 0; ne->level = kLowLow; ne->value = 0; @@ -39,27 +37,19 @@ void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, ne->num_frames_estimate = 20; } -int enable_noise_estimation(VP9_COMP *const cpi) { - // Enable noise estimation if denoising is on (and cyclic refresh, since - // noise estimate is currently using a struct defined in cyclic refresh). +static int enable_noise_estimation(VP9_COMP *const cpi) { +// Enable noise estimation if denoising is on. #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0 && - cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) - return 1; + if (cpi->oxcf.noise_sensitivity > 0) return 1; #endif // Only allow noise estimate under certain encoding mode. // Enabled for 1 pass CBR, speed >=5, and if resolution is same as original. // Not enabled for SVC mode and screen_content_mode. // Not enabled for low resolutions. - if (cpi->oxcf.pass == 0 && - cpi->oxcf.rc_mode == VPX_CBR && - cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cpi->oxcf.speed >= 5 && - cpi->resize_state == ORIG && - cpi->resize_pending == 0 && - !cpi->use_svc && - cpi->oxcf.content != VP9E_CONTENT_SCREEN && - cpi->common.width >= 640 && + if (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR && + cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->oxcf.speed >= 5 && + cpi->resize_state == ORIG && cpi->resize_pending == 0 && !cpi->use_svc && + cpi->oxcf.content != VP9E_CONTENT_SCREEN && cpi->common.width >= 640 && cpi->common.height >= 480) return 1; else @@ -67,8 +57,8 @@ int enable_noise_estimation(VP9_COMP *const cpi) { } #if CONFIG_VP9_TEMPORAL_DENOISING -static void copy_frame(YV12_BUFFER_CONFIG * const dest, - const YV12_BUFFER_CONFIG * const src) { +static void copy_frame(YV12_BUFFER_CONFIG *const dest, + const YV12_BUFFER_CONFIG *const src) { int r; const uint8_t *srcbuf = src->y_buffer; uint8_t *destbuf = dest->y_buffer; @@ -91,7 +81,7 @@ NOISE_LEVEL vp9_noise_estimate_extract_level(NOISE_ESTIMATE *const ne) { } else { if (ne->value > ne->thresh) noise_level = kMedium; - else if (ne->value > (ne->thresh >> 1)) + else if (ne->value > ((9 * ne->thresh) >> 4)) noise_level = kLow; else noise_level = kLowLow; @@ -101,11 +91,10 @@ NOISE_LEVEL vp9_noise_estimate_extract_level(NOISE_ESTIMATE *const ne) { void vp9_update_noise_estimate(VP9_COMP *const cpi) { const VP9_COMMON *const cm = &cpi->common; - CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; NOISE_ESTIMATE *const ne = &cpi->noise_estimate; // Estimate of noise level every frame_period frames. - int frame_period = 10; - int thresh_consec_zeromv = 8; + int frame_period = 8; + int thresh_consec_zeromv = 6; unsigned int thresh_sum_diff = 100; unsigned int thresh_sum_spatial = (200 * 200) << 8; unsigned int thresh_spatial_var = (32 * 32) << 8; @@ -113,30 +102,35 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) { // Estimate is between current source and last source. YV12_BUFFER_CONFIG *last_source = cpi->Last_Source; #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) - last_source = &cpi->denoiser.last_source; + if (cpi->oxcf.noise_sensitivity > 0) last_source = &cpi->denoiser.last_source; #endif ne->enabled = enable_noise_estimation(cpi); - if (!ne->enabled || - cm->current_video_frame % frame_period != 0 || - last_source == NULL || - ne->last_w != cm->width || + if (!ne->enabled || cm->current_video_frame % frame_period != 0 || + last_source == NULL || ne->last_w != cm->width || ne->last_h != cm->height) { #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) - copy_frame(&cpi->denoiser.last_source, cpi->Source); + if (cpi->oxcf.noise_sensitivity > 0) + copy_frame(&cpi->denoiser.last_source, cpi->Source); #endif if (last_source != NULL) { ne->last_w = cm->width; ne->last_h = cm->height; } return; + } else if (cpi->rc.avg_frame_low_motion < 50) { + // Force noise estimation to 0 and denoiser off if content has high motion. + ne->level = kLowLow; +#if CONFIG_VP9_TEMPORAL_DENOISING + if (cpi->oxcf.noise_sensitivity > 0) + vp9_denoiser_set_noise_level(&cpi->denoiser, ne->level); +#endif + return; } else { int num_samples = 0; uint64_t avg_est = 0; int bsize = BLOCK_16X16; - static const unsigned char const_source[16] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + static const unsigned char const_source[16] = { 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; // Loop over sub-sample of 16x16 blocks of frame, and for blocks that have // been encoded as zero/small mv at least x consecutive frames, compute // the variance to update estimate of noise in the source. @@ -153,7 +147,7 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) { for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { int bl_index = mi_row * cm->mi_cols + mi_col; - if (cr->consec_zero_mv[bl_index] > thresh_consec_zeromv) + if (cpi->consec_zero_mv[bl_index] > thresh_consec_zeromv) num_low_motion++; } } @@ -162,8 +156,7 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) { for (mi_row = 0; mi_row < cm->mi_rows; mi_row++) { for (mi_col = 0; mi_col < cm->mi_cols; mi_col++) { // 16x16 blocks, 1/4 sample of frame. - if (mi_row % 4 == 0 && mi_col % 4 == 0 && - mi_row < cm->mi_rows - 1 && + if (mi_row % 4 == 0 && mi_col % 4 == 0 && mi_row < cm->mi_rows - 1 && mi_col < cm->mi_cols - 1) { int bl_index = mi_row * cm->mi_cols + mi_col; int bl_index1 = bl_index + 1; @@ -173,33 +166,34 @@ void vp9_update_noise_estimate(VP9_COMP *const cpi) { // been encoded as zero/low motion x (= thresh_consec_zeromv) frames // in a row. consec_zero_mv[] defined for 8x8 blocks, so consider all // 4 sub-blocks for 16x16 block. Also, avoid skin blocks. - int is_skin = vp9_compute_skin_block(src_y, - src_u, - src_v, - src_ystride, - src_uvstride, - bsize); + int consec_zeromv = + VPXMIN(cpi->consec_zero_mv[bl_index], + VPXMIN(cpi->consec_zero_mv[bl_index1], + VPXMIN(cpi->consec_zero_mv[bl_index2], + cpi->consec_zero_mv[bl_index3]))); + int is_skin = 0; + if (cpi->use_skin_detection) { + is_skin = + vp9_compute_skin_block(src_y, src_u, src_v, src_ystride, + src_uvstride, bsize, consec_zeromv, 0); + } if (frame_low_motion && - cr->consec_zero_mv[bl_index] > thresh_consec_zeromv && - cr->consec_zero_mv[bl_index1] > thresh_consec_zeromv && - cr->consec_zero_mv[bl_index2] > thresh_consec_zeromv && - cr->consec_zero_mv[bl_index3] > thresh_consec_zeromv && + cpi->consec_zero_mv[bl_index] > thresh_consec_zeromv && + cpi->consec_zero_mv[bl_index1] > thresh_consec_zeromv && + cpi->consec_zero_mv[bl_index2] > thresh_consec_zeromv && + cpi->consec_zero_mv[bl_index3] > thresh_consec_zeromv && !is_skin) { // Compute variance. unsigned int sse; - unsigned int variance = cpi->fn_ptr[bsize].vf(src_y, - src_ystride, - last_src_y, - last_src_ystride, - &sse); + unsigned int variance = cpi->fn_ptr[bsize].vf( + src_y, src_ystride, last_src_y, last_src_ystride, &sse); // Only consider this block as valid for noise measurement if the // average term (sse - variance = N * avg^{2}, N = 16X16) of the // temporal residual is small (avoid effects from lighting change). if ((sse - variance) < thresh_sum_diff) { unsigned int sse2; - const unsigned int spatial_variance = - cpi->fn_ptr[bsize].vf(src_y, src_ystride, const_source, - 0, &sse2); + const unsigned int spatial_variance = cpi->fn_ptr[bsize].vf( + src_y, src_ystride, const_source, 0, &sse2); // Avoid blocks with high brightness and high spatial variance. if ((sse2 - spatial_variance) < thresh_sum_spatial && spatial_variance < thresh_spatial_var) { diff --git a/libs/libvpx/vp9/encoder/vp9_noise_estimate.h b/libs/libvpx/vp9/encoder/vp9_noise_estimate.h index 826d125b5b..335cdbe643 100644 --- a/libs/libvpx/vp9/encoder/vp9_noise_estimate.h +++ b/libs/libvpx/vp9/encoder/vp9_noise_estimate.h @@ -23,12 +23,7 @@ extern "C" { #endif -typedef enum noise_level { - kLowLow, - kLow, - kMedium, - kHigh -} NOISE_LEVEL; +typedef enum noise_level { kLowLow, kLow, kMedium, kHigh } NOISE_LEVEL; typedef struct noise_estimate { int enabled; @@ -43,9 +38,7 @@ typedef struct noise_estimate { struct VP9_COMP; -void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, - int width, - int height); +void vp9_noise_estimate_init(NOISE_ESTIMATE *const ne, int width, int height); NOISE_LEVEL vp9_noise_estimate_extract_level(NOISE_ESTIMATE *const ne); diff --git a/libs/libvpx/vp9/encoder/vp9_picklpf.c b/libs/libvpx/vp9/encoder/vp9_picklpf.c index f6b1dfcd58..1583cc8abc 100644 --- a/libs/libvpx/vp9/encoder/vp9_picklpf.c +++ b/libs/libvpx/vp9/encoder/vp9_picklpf.c @@ -12,7 +12,7 @@ #include #include "./vpx_scale_rtcd.h" - +#include "vpx_dsp/psnr.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" @@ -33,10 +33,9 @@ static int get_max_filter_level(const VP9_COMP *cpi) { } } - static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd, - VP9_COMP *const cpi, - int filt_level, int partial_frame) { + VP9_COMP *const cpi, int filt_level, + int partial_frame) { VP9_COMMON *const cm = &cpi->common; int64_t filt_err; @@ -44,20 +43,20 @@ static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd, if (cpi->num_workers > 1) vp9_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane, - filt_level, 1, partial_frame, - cpi->workers, cpi->num_workers, &cpi->lf_row_sync); + filt_level, 1, partial_frame, cpi->workers, + cpi->num_workers, &cpi->lf_row_sync); else vp9_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, 1, partial_frame); #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) { - filt_err = vp9_highbd_get_y_sse(sd, cm->frame_to_show); + filt_err = vpx_highbd_get_y_sse(sd, cm->frame_to_show); } else { - filt_err = vp9_get_y_sse(sd, cm->frame_to_show); + filt_err = vpx_get_y_sse(sd, cm->frame_to_show); } #else - filt_err = vp9_get_y_sse(sd, cm->frame_to_show); + filt_err = vpx_get_y_sse(sd, cm->frame_to_show); #endif // CONFIG_VP9_HIGHBITDEPTH // Re-instate the unfiltered frame @@ -78,8 +77,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, // Start the search at the previous frame filter level unless it is now out of // range. - int filt_mid = - clamp(lf->last_filt_level, min_filter_level, max_filter_level); + int filt_mid = clamp(lf->last_filt_level, min_filter_level, max_filter_level); int filter_step = filt_mid < 16 ? 4 : filt_mid / 4; // Sum squared error at each filter level int64_t ss_err[MAX_LOOP_FILTER + 1]; @@ -105,8 +103,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, bias = (bias * cpi->twopass.section_intra_rating) / 20; // yx, bias less for large block size - if (cm->tx_mode != ONLY_4X4) - bias >>= 1; + if (cm->tx_mode != ONLY_4X4) bias >>= 1; if (filt_direction <= 0 && filt_low != filt_mid) { // Get Low filter error score @@ -117,8 +114,7 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, // filter value. if ((ss_err[filt_low] - bias) < best_err) { // Was it actually better than the previous best? - if (ss_err[filt_low] < best_err) - best_err = ss_err[filt_low]; + if (ss_err[filt_low] < best_err) best_err = ss_err[filt_low]; filt_best = filt_low; } @@ -154,17 +150,16 @@ void vp9_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; struct loopfilter *const lf = &cm->lf; - lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 - : cpi->oxcf.sharpness; + lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness; if (method == LPF_PICK_MINIMAL_LPF && lf->filter_level) { - lf->filter_level = 0; + lf->filter_level = 0; } else if (method >= LPF_PICK_FROM_Q) { const int min_filter_level = 0; const int max_filter_level = get_max_filter_level(cpi); const int q = vp9_ac_quant(cm->base_qindex, 0, cm->bit_depth); - // These values were determined by linear fitting the result of the - // searched level, filt_guess = q * 0.316206 + 3.87252 +// These values were determined by linear fitting the result of the +// searched level, filt_guess = q * 0.316206 + 3.87252 #if CONFIG_VP9_HIGHBITDEPTH int filt_guess; switch (cm->bit_depth) { @@ -178,18 +173,18 @@ void vp9_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 16242526, 22); break; default: - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 " - "or VPX_BITS_12"); + assert(0 && + "bit_depth should be VPX_BITS_8, VPX_BITS_10 " + "or VPX_BITS_12"); return; } #else int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18); #endif // CONFIG_VP9_HIGHBITDEPTH - if (cm->frame_type == KEY_FRAME) - filt_guess -= 4; + if (cm->frame_type == KEY_FRAME) filt_guess -= 4; lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level); } else { - lf->filter_level = search_filter_level(sd, cpi, - method == LPF_PICK_FROM_SUBIMAGE); + lf->filter_level = + search_filter_level(sd, cpi, method == LPF_PICK_FROM_SUBIMAGE); } } diff --git a/libs/libvpx/vp9/encoder/vp9_picklpf.h b/libs/libvpx/vp9/encoder/vp9_picklpf.h index 33c490f693..cecca058b4 100644 --- a/libs/libvpx/vp9/encoder/vp9_picklpf.h +++ b/libs/libvpx/vp9/encoder/vp9_picklpf.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_PICKLPF_H_ #define VP9_ENCODER_VP9_PICKLPF_H_ diff --git a/libs/libvpx/vp9/encoder/vp9_pickmode.c b/libs/libvpx/vp9/encoder/vp9_pickmode.c index d861f80967..76d2611d89 100644 --- a/libs/libvpx/vp9/encoder/vp9_pickmode.c +++ b/libs/libvpx/vp9/encoder/vp9_pickmode.c @@ -16,6 +16,7 @@ #include "./vp9_rtcd.h" #include "./vpx_dsp_rtcd.h" +#include "vpx/vpx_codec.h" #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_mem/vpx_mem.h" #include "vpx_ports/mem.h" @@ -40,13 +41,15 @@ typedef struct { int in_use; } PRED_BUFFER; -static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, - const MACROBLOCK *x, - const MACROBLOCKD *xd, - const TileInfo *const tile, +static const int pos_shift_16x16[4][4] = { + { 9, 10, 13, 14 }, { 11, 12, 15, 16 }, { 17, 18, 21, 22 }, { 19, 20, 23, 24 } +}; + +static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, const MACROBLOCK *x, + const MACROBLOCKD *xd, const TileInfo *const tile, MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame, - int_mv *mv_ref_list, int_mv *base_mv, - int mi_row, int mi_col, int use_base_mv) { + int_mv *mv_ref_list, int_mv *base_mv, int mi_row, + int mi_col, int use_base_mv) { const int *ref_sign_bias = cm->ref_frame_sign_bias; int i, refmv_count = 0; @@ -65,8 +68,8 @@ static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, for (i = 0; i < 2; ++i) { const POSITION *const mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * - xd->mi_stride]; + const MODE_INFO *const candidate_mi = + xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; // Keep counts for entropy encoding. context_counter += mode_2_counter[candidate_mi->mode]; different_ref_found = 1; @@ -85,8 +88,8 @@ static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, for (; i < MVREF_NEIGHBOURS && !refmv_count; ++i) { const POSITION *const mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * - xd->mi_stride]; + const MODE_INFO *const candidate_mi = + xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; different_ref_found = 1; if (candidate_mi->ref_frame[0] == ref_frame) @@ -101,8 +104,8 @@ static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, for (i = 0; i < MVREF_NEIGHBOURS; ++i) { const POSITION *mv_ref = &mv_ref_search[i]; if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { - const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row - * xd->mi_stride]; + const MODE_INFO *const candidate_mi = + xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]; // If the candidate is INTRA we don't want to consider its mv. IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias, @@ -115,17 +118,18 @@ static int mv_refs_rt(VP9_COMP *cpi, const VP9_COMMON *cm, ref_frame == LAST_FRAME) { // Get base layer mv. MV_REF *candidate = - &cm->prev_frame->mvs[(mi_col>>1) + (mi_row>>1) * (cm->mi_cols>>1)]; + &cm->prev_frame + ->mvs[(mi_col >> 1) + (mi_row >> 1) * (cm->mi_cols >> 1)]; if (candidate->mv[0].as_int != INVALID_MV) { - base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2); - base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2); + base_mv->as_mv.row = (candidate->mv[0].as_mv.row * 2); + base_mv->as_mv.col = (candidate->mv[0].as_mv.col * 2); clamp_mv_ref(&base_mv->as_mv, xd); } else { base_mv->as_int = INVALID_MV; } } - Done: +Done: x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter]; @@ -142,33 +146,29 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int64_t best_rd_sofar, int use_base_mv) { MACROBLOCKD *xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; - struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}}; + struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } }; const int step_param = cpi->sf.mv.fullpel_search_step_param; const int sadpb = x->sadperbit16; MV mvp_full; const int ref = mi->ref_frame[0]; const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; MV center_mv; - int dis; + uint32_t dis; int rate_mode; - const int tmp_col_min = x->mv_col_min; - const int tmp_col_max = x->mv_col_max; - const int tmp_row_min = x->mv_row_min; - const int tmp_row_max = x->mv_row_max; + const MvLimits tmp_mv_limits = x->mv_limits; int rv = 0; int cost_list[5]; - const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi, - ref); + const YV12_BUFFER_CONFIG *scaled_ref_frame = + vp9_get_scaled_ref_frame(cpi, ref); if (scaled_ref_frame) { int i; // Swap out the reference frame for a version that's been scaled to // match the resolution of the current frame, allowing the existing // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[i] = xd->plane[i].pre[0]; + for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); } - vp9_set_mv_search_range(x, &ref_mv); + vp9_set_mv_search_range(&x->mv_limits, &ref_mv); assert(x->mv_best_ref_index[ref] <= 2); if (x->mv_best_ref_index[ref] < 2) @@ -185,53 +185,50 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x, center_mv = tmp_mv->as_mv; vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb, - cond_cost_list(cpi, cost_list), - ¢er_mv, &tmp_mv->as_mv, INT_MAX, 0); + cond_cost_list(cpi, cost_list), ¢er_mv, + &tmp_mv->as_mv, INT_MAX, 0); - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; + x->mv_limits = tmp_mv_limits; // calculate the bit cost on motion vector mvp_full.row = tmp_mv->as_mv.row * 8; mvp_full.col = tmp_mv->as_mv.col * 8; - *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + *rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv, x->nmvjointcost, x->mvcost, + MV_COST_WEIGHT); - rate_mode = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]] - [INTER_OFFSET(NEWMV)]; - rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > - best_rd_sofar); + rate_mode = + cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]][INTER_OFFSET(NEWMV)]; + rv = + !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) > best_rd_sofar); if (rv) { - cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &dis, &x->pred_sse[ref], NULL, 0, 0); - *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + const int subpel_force_stop = use_base_mv && cpi->sf.base_mv_aggressive + ? 2 + : cpi->sf.mv.subpel_force_stop; + cpi->find_fractional_mv_step( + x, &tmp_mv->as_mv, &ref_mv, cpi->common.allow_high_precision_mv, + x->errorperbit, &cpi->fn_ptr[bsize], subpel_force_stop, + cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list), + x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0); + *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost, + x->mvcost, MV_COST_WEIGHT); } if (scaled_ref_frame) { int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; + for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; } return rv; } static void block_variance(const uint8_t *src, int src_stride, - const uint8_t *ref, int ref_stride, - int w, int h, unsigned int *sse, int *sum, - int block_size, unsigned int *sse8x8, - int *sum8x8, unsigned int *var8x8) { + const uint8_t *ref, int ref_stride, int w, int h, + unsigned int *sse, int *sum, int block_size, +#if CONFIG_VP9_HIGHBITDEPTH + int use_highbitdepth, vpx_bit_depth_t bd, +#endif + uint32_t *sse8x8, int *sum8x8, uint32_t *var8x8) { int i, j, k = 0; *sse = 0; @@ -239,12 +236,38 @@ static void block_variance(const uint8_t *src, int src_stride, for (i = 0; i < h; i += block_size) { for (j = 0; j < w; j += block_size) { +#if CONFIG_VP9_HIGHBITDEPTH + if (use_highbitdepth) { + switch (bd) { + case VPX_BITS_8: + vpx_highbd_8_get8x8var(src + src_stride * i + j, src_stride, + ref + ref_stride * i + j, ref_stride, + &sse8x8[k], &sum8x8[k]); + break; + case VPX_BITS_10: + vpx_highbd_10_get8x8var(src + src_stride * i + j, src_stride, + ref + ref_stride * i + j, ref_stride, + &sse8x8[k], &sum8x8[k]); + break; + case VPX_BITS_12: + vpx_highbd_12_get8x8var(src + src_stride * i + j, src_stride, + ref + ref_stride * i + j, ref_stride, + &sse8x8[k], &sum8x8[k]); + break; + } + } else { + vpx_get8x8var(src + src_stride * i + j, src_stride, + ref + ref_stride * i + j, ref_stride, &sse8x8[k], + &sum8x8[k]); + } +#else vpx_get8x8var(src + src_stride * i + j, src_stride, - ref + ref_stride * i + j, ref_stride, - &sse8x8[k], &sum8x8[k]); + ref + ref_stride * i + j, ref_stride, &sse8x8[k], + &sum8x8[k]); +#endif *sse += sse8x8[k]; *sum += sum8x8[k]; - var8x8[k] = sse8x8[k] - (((unsigned int)sum8x8[k] * sum8x8[k]) >> 6); + var8x8[k] = sse8x8[k] - (uint32_t)(((int64_t)sum8x8[k] * sum8x8[k]) >> 6); k++; } } @@ -262,12 +285,12 @@ static void calculate_variance(int bw, int bh, TX_SIZE tx_size, for (i = 0; i < nh; i += 2) { for (j = 0; j < nw; j += 2) { sse_o[k] = sse_i[i * nw + j] + sse_i[i * nw + j + 1] + - sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1]; + sse_i[(i + 1) * nw + j] + sse_i[(i + 1) * nw + j + 1]; sum_o[k] = sum_i[i * nw + j] + sum_i[i * nw + j + 1] + - sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1]; - var_o[k] = sse_o[k] - (((unsigned int)sum_o[k] * sum_o[k]) >> - (b_width_log2_lookup[unit_size] + - b_height_log2_lookup[unit_size] + 6)); + sum_i[(i + 1) * nw + j] + sum_i[(i + 1) * nw + j + 1]; + var_o[k] = sse_o[k] - (uint32_t)(((int64_t)sum_o[k] * sum_o[k]) >> + (b_width_log2_lookup[unit_size] + + b_height_log2_lookup[unit_size] + 6)); k++; } } @@ -297,17 +320,23 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, const int bw = b_width_log2_lookup[bsize]; const int bh = b_height_log2_lookup[bsize]; const int num8x8 = 1 << (bw + bh - 2); - unsigned int sse8x8[64] = {0}; - int sum8x8[64] = {0}; - unsigned int var8x8[64] = {0}; + unsigned int sse8x8[64] = { 0 }; + int sum8x8[64] = { 0 }; + unsigned int var8x8[64] = { 0 }; TX_SIZE tx_size; int i, k; - +#if CONFIG_VP9_HIGHBITDEPTH + const vpx_bit_depth_t bd = cpi->common.bit_depth; +#endif // Calculate variance for whole partition, and also save 8x8 blocks' variance // to be used in following transform skipping test. block_variance(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, - 4 << bw, 4 << bh, &sse, &sum, 8, sse8x8, sum8x8, var8x8); - var = sse - (((int64_t)sum * sum) >> (bw + bh + 4)); + 4 << bw, 4 << bh, &sse, &sum, 8, +#if CONFIG_VP9_HIGHBITDEPTH + cpi->common.use_highbitdepth, bd, +#endif + sse8x8, sum8x8, var8x8); + var = sse - (unsigned int)(((int64_t)sum * sum) >> (bw + bh + 4)); *var_y = var; *sse_y = sse; @@ -334,24 +363,27 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, // Evaluate if the partition block is a skippable block in Y plane. { - unsigned int sse16x16[16] = {0}; - int sum16x16[16] = {0}; - unsigned int var16x16[16] = {0}; + unsigned int sse16x16[16] = { 0 }; + int sum16x16[16] = { 0 }; + unsigned int var16x16[16] = { 0 }; const int num16x16 = num8x8 >> 2; - unsigned int sse32x32[4] = {0}; - int sum32x32[4] = {0}; - unsigned int var32x32[4] = {0}; + unsigned int sse32x32[4] = { 0 }; + int sum32x32[4] = { 0 }; + unsigned int var32x32[4] = { 0 }; const int num32x32 = num8x8 >> 4; int ac_test = 1; int dc_test = 1; - const int num = (tx_size == TX_8X8) ? num8x8 : - ((tx_size == TX_16X16) ? num16x16 : num32x32); - const unsigned int *sse_tx = (tx_size == TX_8X8) ? sse8x8 : - ((tx_size == TX_16X16) ? sse16x16 : sse32x32); - const unsigned int *var_tx = (tx_size == TX_8X8) ? var8x8 : - ((tx_size == TX_16X16) ? var16x16 : var32x32); + const int num = (tx_size == TX_8X8) + ? num8x8 + : ((tx_size == TX_16X16) ? num16x16 : num32x32); + const unsigned int *sse_tx = + (tx_size == TX_8X8) ? sse8x8 + : ((tx_size == TX_16X16) ? sse16x16 : sse32x32); + const unsigned int *var_tx = + (tx_size == TX_8X8) ? var8x8 + : ((tx_size == TX_16X16) ? var16x16 : var32x32); // Calculate variance if tx_size > TX_8X8 if (tx_size >= TX_16X16) @@ -380,15 +412,14 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, if (ac_test) { x->skip_txfm[0] = SKIP_TXFM_AC_ONLY; - if (dc_test) - x->skip_txfm[0] = SKIP_TXFM_AC_DC; + if (dc_test) x->skip_txfm[0] = SKIP_TXFM_AC_DC; } else if (dc_test) { skip_dc = 1; } } if (x->skip_txfm[0] == SKIP_TXFM_AC_DC) { - int skip_uv[2] = {0}; + int skip_uv[2] = { 0 }; unsigned int var_uv[2]; unsigned int sse_uv[2]; @@ -405,14 +436,14 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, const int uv_bw = b_width_log2_lookup[uv_bsize]; const int uv_bh = b_height_log2_lookup[uv_bsize]; const int sf = (uv_bw - b_width_log2_lookup[unit_size]) + - (uv_bh - b_height_log2_lookup[unit_size]); + (uv_bh - b_height_log2_lookup[unit_size]); const uint32_t uv_dc_thr = pd->dequant[0] * pd->dequant[0] >> (6 - sf); const uint32_t uv_ac_thr = pd->dequant[1] * pd->dequant[1] >> (6 - sf); int j = i - 1; vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, i); - var_uv[j] = cpi->fn_ptr[uv_bsize].vf(p->src.buf, p->src.stride, - pd->dst.buf, pd->dst.stride, &sse_uv[j]); + var_uv[j] = cpi->fn_ptr[uv_bsize].vf( + p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse_uv[j]); if ((var_uv[j] < uv_ac_thr || var_uv[j] == 0) && (sse_uv[j] - var_uv[j] < uv_dc_thr || sse_uv[j] == var_uv[j])) @@ -432,13 +463,8 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, if (!skip_dc) { #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], - dc_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], - dc_quant >> 3, &rate, &dist); - } + vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], + dc_quant >> (xd->bd - 5), &rate, &dist); #else vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], dc_quant >> 3, &rate, &dist); @@ -454,26 +480,21 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, } #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> 3, &rate, &dist); - } -#else vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> 3, &rate, &dist); + ac_quant >> (xd->bd - 5), &rate, &dist); +#else + vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3, + &rate, &dist); #endif // CONFIG_VP9_HIGHBITDEPTH *out_rate_sum += rate; *out_dist_sum += dist << 4; } -static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, - MACROBLOCK *x, MACROBLOCKD *xd, - int *out_rate_sum, int64_t *out_dist_sum, - unsigned int *var_y, unsigned int *sse_y) { +static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x, + MACROBLOCKD *xd, int *out_rate_sum, + int64_t *out_dist_sum, unsigned int *var_y, + unsigned int *sse_y) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. @@ -514,8 +535,7 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, // Evaluate if the partition block is a skippable block in Y plane. { - const BLOCK_SIZE unit_size = - txsize_to_bsize[xd->mi[0]->tx_size]; + const BLOCK_SIZE unit_size = txsize_to_bsize[xd->mi[0]->tx_size]; const unsigned int num_blk_log2 = (b_width_log2_lookup[bsize] - b_width_log2_lookup[unit_size]) + (b_height_log2_lookup[bsize] - b_height_log2_lookup[unit_size]); @@ -530,8 +550,7 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, if (sse_tx - var_tx < dc_thr || sse == var) x->skip_txfm[0] = SKIP_TXFM_AC_DC; } else { - if (sse_tx - var_tx < dc_thr || sse == var) - skip_dc = 1; + if (sse_tx - var_tx < dc_thr || sse == var) skip_dc = 1; } } @@ -543,13 +562,8 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, if (!skip_dc) { #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], - dc_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], - dc_quant >> 3, &rate, &dist); - } + vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], + dc_quant >> (xd->bd - 5), &rate, &dist); #else vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bsize], dc_quant >> 3, &rate, &dist); @@ -565,16 +579,11 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, } #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> 3, &rate, &dist); - } -#else vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], - ac_quant >> 3, &rate, &dist); + ac_quant >> (xd->bd - 5), &rate, &dist); +#else + vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bsize], ac_quant >> 3, + &rate, &dist); #endif // CONFIG_VP9_HIGHBITDEPTH *out_rate_sum += rate; @@ -582,39 +591,46 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, } #if CONFIG_VP9_HIGHBITDEPTH -static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist, - int *skippable, int64_t *sse, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size) { +static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc, + int *skippable, int64_t *sse, BLOCK_SIZE bsize, + TX_SIZE tx_size) { MACROBLOCKD *xd = &x->e_mbd; unsigned int var_y, sse_y; - (void)plane; + (void)tx_size; - model_rd_for_sb_y(cpi, bsize, x, xd, rate, dist, &var_y, &sse_y); + model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc->rate, &this_rdc->dist, &var_y, + &sse_y); *sse = INT_MAX; *skippable = 0; return; } #else -static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist, - int *skippable, int64_t *sse, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size) { +static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *this_rdc, + int *skippable, int64_t *sse, BLOCK_SIZE bsize, + TX_SIZE tx_size) { MACROBLOCKD *xd = &x->e_mbd; - const struct macroblockd_plane *pd = &xd->plane[plane]; - const struct macroblock_plane *const p = &x->plane[plane]; + const struct macroblockd_plane *pd = &xd->plane[0]; + struct macroblock_plane *const p = &x->plane[0]; const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; const int step = 1 << (tx_size << 1); const int block_step = (1 << tx_size); int block = 0, r, c; - int shift = tx_size == TX_32X32 ? 0 : 2; - const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : - xd->mb_to_right_edge >> (5 + pd->subsampling_x)); - const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : - xd->mb_to_bottom_edge >> (5 + pd->subsampling_y)); + const int max_blocks_wide = + num_4x4_w + (xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >> 5); + const int max_blocks_high = + num_4x4_h + (xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >> 5); int eob_cost = 0; + const int bw = 4 * num_4x4_w; + const int bh = 4 * num_4x4_h; (void)cpi; - vp9_subtract_plane(x, bsize, plane); + + // The max tx_size passed in is TX_16X16. + assert(tx_size != TX_32X32); + + vpx_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride, + pd->dst.buf, pd->dst.stride); *skippable = 1; // Keep track of the row and column of the blocks we use so that we know // if we are in the unrestricted motion border. @@ -626,42 +642,33 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist, tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); uint16_t *const eob = &p->eobs[block]; - const int diff_stride = 4 * num_4x4_blocks_wide_lookup[bsize]; + const int diff_stride = bw; const int16_t *src_diff; src_diff = &p->src_diff[(r * diff_stride + c) << 2]; switch (tx_size) { - case TX_32X32: - vpx_fdct32x32_rd(src_diff, coeff, diff_stride); - vp9_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, - p->round_fp, p->quant_fp, p->quant_shift, - qcoeff, dqcoeff, pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; case TX_16X16: vpx_hadamard_16x16(src_diff, diff_stride, (int16_t *)coeff); vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_8X8: vpx_hadamard_8x8(src_diff, diff_stride, (int16_t *)coeff); vp9_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; case TX_4X4: x->fwd_txm4x4(src_diff, coeff, diff_stride); vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp, p->quant_fp, p->quant_shift, qcoeff, dqcoeff, - pd->dequant, eob, - scan_order->scan, scan_order->iscan); - break; - default: - assert(0); + pd->dequant, eob, scan_order->scan, + scan_order->iscan); break; + default: assert(0); break; } *skippable &= (*eob == 0); eob_cost += 1; @@ -670,18 +677,17 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist, } } - if (*skippable && *sse < INT64_MAX) { - *rate = 0; - *dist = (*sse << 6) >> shift; - *sse = *dist; - return; + this_rdc->rate = 0; + if (*sse < INT64_MAX) { + *sse = (*sse << 6) >> 2; + if (*skippable) { + this_rdc->dist = *sse; + return; + } } block = 0; - *rate = 0; - *dist = 0; - if (*sse < INT64_MAX) - *sse = (*sse << 6) >> shift; + this_rdc->dist = 0; for (r = 0; r < max_blocks_high; r += block_step) { for (c = 0; c < num_4x4_w; c += block_step) { if (c < max_blocks_wide) { @@ -691,28 +697,27 @@ static void block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *dist, uint16_t *const eob = &p->eobs[block]; if (*eob == 1) - *rate += (int)abs(qcoeff[0]); + this_rdc->rate += (int)abs(qcoeff[0]); else if (*eob > 1) - *rate += vpx_satd((const int16_t *)qcoeff, step << 4); + this_rdc->rate += vpx_satd((const int16_t *)qcoeff, step << 4); - *dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> shift; + this_rdc->dist += vp9_block_error_fp(coeff, dqcoeff, step << 4) >> 2; } block += step; } } - if (*skippable == 0) { - *rate <<= (2 + VP9_PROB_COST_SHIFT); - *rate += (eob_cost << VP9_PROB_COST_SHIFT); - } + // If skippable is set, rate gets clobbered later. + this_rdc->rate <<= (2 + VP9_PROB_COST_SHIFT); + this_rdc->rate += (eob_cost << VP9_PROB_COST_SHIFT); } #endif static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize, MACROBLOCK *x, MACROBLOCKD *xd, - int *out_rate_sum, int64_t *out_dist_sum, - unsigned int *var_y, unsigned int *sse_y, - int start_plane, int stop_plane) { + RD_COST *this_rdc, unsigned int *var_y, + unsigned int *sse_y, int start_plane, + int stop_plane) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. @@ -720,9 +725,16 @@ static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize, int rate; int64_t dist; int i; +#if CONFIG_VP9_HIGHBITDEPTH + uint64_t tot_var = *var_y; + uint64_t tot_sse = *sse_y; +#else + uint32_t tot_var = *var_y; + uint32_t tot_sse = *sse_y; +#endif - *out_rate_sum = 0; - *out_dist_sum = 0; + this_rdc->rate = 0; + this_rdc->dist = 0; for (i = start_plane; i <= stop_plane; ++i) { struct macroblock_plane *const p = &x->plane[i]; @@ -731,47 +743,44 @@ static void model_rd_for_sb_uv(VP9_COMP *cpi, BLOCK_SIZE plane_bsize, const uint32_t ac_quant = pd->dequant[1]; const BLOCK_SIZE bs = plane_bsize; unsigned int var; + if (!x->color_sensitivity[i - 1]) continue; - if (!x->color_sensitivity[i - 1]) - continue; + var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, + pd->dst.stride, &sse); + assert(sse >= var); + tot_var += var; + tot_sse += sse; - var = cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, - pd->dst.buf, pd->dst.stride, &sse); - *var_y += var; - *sse_y += sse; - - #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], - dc_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], - dc_quant >> 3, &rate, &dist); - } - #else +#if CONFIG_VP9_HIGHBITDEPTH + vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], + dc_quant >> (xd->bd - 5), &rate, &dist); +#else vp9_model_rd_from_var_lapndz(sse - var, num_pels_log2_lookup[bs], dc_quant >> 3, &rate, &dist); - #endif // CONFIG_VP9_HIGHBITDEPTH +#endif // CONFIG_VP9_HIGHBITDEPTH - *out_rate_sum += rate >> 1; - *out_dist_sum += dist << 3; + this_rdc->rate += rate >> 1; + this_rdc->dist += dist << 3; - #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], - ac_quant >> (xd->bd - 5), &rate, &dist); - } else { - vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], - ac_quant >> 3, &rate, &dist); - } - #else +#if CONFIG_VP9_HIGHBITDEPTH vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], - ac_quant >> 3, &rate, &dist); - #endif // CONFIG_VP9_HIGHBITDEPTH + ac_quant >> (xd->bd - 5), &rate, &dist); +#else + vp9_model_rd_from_var_lapndz(var, num_pels_log2_lookup[bs], ac_quant >> 3, + &rate, &dist); +#endif // CONFIG_VP9_HIGHBITDEPTH - *out_rate_sum += rate; - *out_dist_sum += dist << 4; + this_rdc->rate += rate; + this_rdc->dist += dist << 4; } + +#if CONFIG_VP9_HIGHBITDEPTH + *var_y = tot_var > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_var; + *sse_y = tot_sse > UINT32_MAX ? UINT32_MAX : (uint32_t)tot_sse; +#else + *var_y = tot_var; + *sse_y = tot_sse; +#endif } static int get_pred_buffer(PRED_BUFFER *p, int len) { @@ -787,15 +796,14 @@ static int get_pred_buffer(PRED_BUFFER *p, int len) { } static void free_pred_buffer(PRED_BUFFER *p) { - if (p != NULL) - p->in_use = 0; + if (p != NULL) p->in_use = 0; } -static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, int mi_row, int mi_col, +static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, + int mi_row, int mi_col, MV_REFERENCE_FRAME ref_frame, - PREDICTION_MODE this_mode, - unsigned int var_y, unsigned int sse_y, + PREDICTION_MODE this_mode, unsigned int var_y, + unsigned int sse_y, struct buf_2d yv12_mb[][MAX_MB_PLANE], int *rate, int64_t *dist) { MACROBLOCKD *xd = &x->e_mbd; @@ -807,10 +815,8 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, // Skipping threshold for dc. unsigned int thresh_dc; int motion_low = 1; - if (mi->mv[0].as_mv.row > 64 || - mi->mv[0].as_mv.row < -64 || - mi->mv[0].as_mv.col > 64 || - mi->mv[0].as_mv.col < -64) + if (mi->mv[0].as_mv.row > 64 || mi->mv[0].as_mv.row < -64 || + mi->mv[0].as_mv.col > 64 || mi->mv[0].as_mv.col < -64) motion_low = 0; if (x->encode_breakout > 0 && motion_low == 1) { // Set a maximum for threshold to avoid big PSNR loss in low bit rate @@ -867,17 +873,15 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, bsize); } - var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, - x->plane[1].src.stride, + var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf, x->plane[1].src.stride, xd->plane[1].dst.buf, xd->plane[1].dst.stride, &sse_u); // U skipping condition checking if (((var_u << 2) <= thresh_ac_uv) && (sse_u - var_u <= thresh_dc_uv)) { - var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf, - x->plane[2].src.stride, - xd->plane[2].dst.buf, - xd->plane[2].dst.stride, &sse_v); + var_v = cpi->fn_ptr[uv_size].vf( + x->plane[2].src.buf, x->plane[2].src.stride, xd->plane[2].dst.buf, + xd->plane[2].dst.stride, &sse_v); // V skipping condition checking if (((var_v << 2) <= thresh_ac_uv) && (sse_v - var_v <= thresh_dc_uv)) { @@ -906,13 +910,14 @@ struct estimate_block_intra_args { VP9_COMP *cpi; MACROBLOCK *x; PREDICTION_MODE mode; - int rate; - int64_t dist; + int skippable; + RD_COST *rdc; }; -static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct estimate_block_intra_args* const args = arg; +static void estimate_block_intra(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + void *arg) { + struct estimate_block_intra_args *const args = arg; VP9_COMP *const cpi = args->cpi; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; @@ -923,86 +928,74 @@ static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, uint8_t *const dst_buf_base = pd->dst.buf; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; - int i, j; - int rate; - int64_t dist; + RD_COST this_rdc; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &i, &j); + (void)block; - p->src.buf = &src_buf_base[4 * (j * src_stride + i)]; - pd->dst.buf = &dst_buf_base[4 * (j * dst_stride + i)]; + p->src.buf = &src_buf_base[4 * (row * src_stride + col)]; + pd->dst.buf = &dst_buf_base[4 * (row * dst_stride + col)]; // Use source buffer as an approximation for the fully reconstructed buffer. - vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], - tx_size, args->mode, - x->skip_encode ? p->src.buf : pd->dst.buf, - x->skip_encode ? src_stride : dst_stride, - pd->dst.buf, dst_stride, - i, j, plane); + vp9_predict_intra_block(xd, b_width_log2_lookup[plane_bsize], tx_size, + args->mode, x->skip_encode ? p->src.buf : pd->dst.buf, + x->skip_encode ? src_stride : dst_stride, pd->dst.buf, + dst_stride, col, row, plane); if (plane == 0) { int64_t this_sse = INT64_MAX; - int is_skippable; // TODO(jingning): This needs further refactoring. - block_yrd(cpi, x, &rate, &dist, &is_skippable, &this_sse, 0, - bsize_tx, VPXMIN(tx_size, TX_16X16)); - x->skip_txfm[0] = is_skippable; - // TODO(jingning): Skip is signalled per prediciton block not per tx block. - rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), is_skippable); + block_yrd(cpi, x, &this_rdc, &args->skippable, &this_sse, bsize_tx, + VPXMIN(tx_size, TX_16X16)); } else { unsigned int var = 0; unsigned int sse = 0; - model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &rate, &dist, &var, &sse, - plane, plane); + model_rd_for_sb_uv(cpi, plane_bsize, x, xd, &this_rdc, &var, &sse, plane, + plane); } p->src.buf = src_buf_base; pd->dst.buf = dst_buf_base; - args->rate += rate; - args->dist += dist; + args->rdc->rate += this_rdc.rate; + args->rdc->dist += this_rdc.dist; } static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][4] = { - {THR_DC, THR_V_PRED, THR_H_PRED, THR_TM}, - {THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV}, - {THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG}, + { THR_DC, THR_V_PRED, THR_H_PRED, THR_TM }, + { THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV }, + { THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG }, }; -static const PREDICTION_MODE intra_mode_list[] = { - DC_PRED, V_PRED, H_PRED, TM_PRED -}; +static const PREDICTION_MODE intra_mode_list[] = { DC_PRED, V_PRED, H_PRED, + TM_PRED }; static int mode_offset(const PREDICTION_MODE mode) { if (mode >= NEARESTMV) { return INTER_OFFSET(mode); } else { switch (mode) { - case DC_PRED: - return 0; - case V_PRED: - return 1; - case H_PRED: - return 2; - case TM_PRED: - return 3; - default: - return -1; + case DC_PRED: return 0; + case V_PRED: return 1; + case H_PRED: return 2; + case TM_PRED: return 3; + default: return -1; } } } -static INLINE void update_thresh_freq_fact(VP9_COMP *cpi, - TileDataEnc *tile_data, - BLOCK_SIZE bsize, - MV_REFERENCE_FRAME ref_frame, - THR_MODES best_mode_idx, - PREDICTION_MODE mode) { +static INLINE void update_thresh_freq_fact( + VP9_COMP *cpi, TileDataEnc *tile_data, int source_variance, + BLOCK_SIZE bsize, MV_REFERENCE_FRAME ref_frame, THR_MODES best_mode_idx, + PREDICTION_MODE mode) { THR_MODES thr_mode_idx = mode_idx[ref_frame][mode_offset(mode)]; int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx]; if (thr_mode_idx == best_mode_idx) *freq_fact -= (*freq_fact >> 4); - else + else if (cpi->sf.limit_newmv_early_exit && mode == NEWMV && + ref_frame == LAST_FRAME && source_variance < 5) { + *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, 32); + } else { *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); + } } void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, @@ -1011,23 +1004,27 @@ void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, MODE_INFO *const mi = xd->mi[0]; RD_COST this_rdc, best_rdc; PREDICTION_MODE this_mode; - struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 }; + struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 }; const TX_SIZE intra_tx_size = VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); MODE_INFO *const mic = xd->mi[0]; int *bmode_costs; - const MODE_INFO *above_mi = xd->mi[-xd->mi_stride]; - const MODE_INFO *left_mi = xd->left_available ? xd->mi[-1] : NULL; + const MODE_INFO *above_mi = xd->above_mi; + const MODE_INFO *left_mi = xd->left_mi; const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, 0); const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, 0); bmode_costs = cpi->y_mode_costs[A][L]; - (void) ctx; + (void)ctx; vp9_rd_cost_reset(&best_rdc); vp9_rd_cost_reset(&this_rdc); mi->ref_frame[0] = INTRA_FRAME; + // Initialize interp_filter here so we do not have to check for inter block + // modes in get_pred_context_switchable_interp() + mi->interp_filter = SWITCHABLE_FILTERS; + mi->mv[0].as_int = INVALID_MV; mi->uv_mode = DC_PRED; memset(x->skip_txfm, 0, sizeof(x->skip_txfm)); @@ -1035,17 +1032,22 @@ void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, // Change the limit of this loop to add other intra prediction // mode tests. for (this_mode = DC_PRED; this_mode <= H_PRED; ++this_mode) { + this_rdc.dist = this_rdc.rate = 0; args.mode = this_mode; - args.rate = 0; - args.dist = 0; + args.skippable = 1; + args.rdc = &this_rdc; mi->tx_size = intra_tx_size; - vp9_foreach_transformed_block_in_plane(xd, bsize, 0, - estimate_block_intra, &args); - this_rdc.rate = args.rate; - this_rdc.dist = args.dist; + vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra, + &args); + if (args.skippable) { + x->skip_txfm[0] = SKIP_TXFM_AC_DC; + this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1); + } else { + x->skip_txfm[0] = SKIP_TXFM_NONE; + this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0); + } this_rdc.rate += bmode_costs[this_mode]; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); + this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); if (this_rdc.rdcost < best_rdc.rdcost) { best_rdc = this_rdc; @@ -1056,8 +1058,7 @@ void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, *rd_cost = best_rdc; } -static void init_ref_frame_cost(VP9_COMMON *const cm, - MACROBLOCKD *const xd, +static void init_ref_frame_cost(VP9_COMMON *const cm, MACROBLOCKD *const xd, int ref_frame_cost[MAX_REF_FRAMES]) { vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd); vpx_prob ref_single_p1 = vp9_get_pred_prob_single_ref_p1(cm, xd); @@ -1065,7 +1066,7 @@ static void init_ref_frame_cost(VP9_COMMON *const cm, ref_frame_cost[INTRA_FRAME] = vp9_cost_bit(intra_inter_p, 0); ref_frame_cost[LAST_FRAME] = ref_frame_cost[GOLDEN_FRAME] = - ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1); + ref_frame_cost[ALTREF_FRAME] = vp9_cost_bit(intra_inter_p, 1); ref_frame_cost[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0); ref_frame_cost[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1); @@ -1081,84 +1082,257 @@ typedef struct { #define RT_INTER_MODES 8 static const REF_MODE ref_mode_set[RT_INTER_MODES] = { - {LAST_FRAME, ZEROMV}, - {LAST_FRAME, NEARESTMV}, - {GOLDEN_FRAME, ZEROMV}, - {LAST_FRAME, NEARMV}, - {LAST_FRAME, NEWMV}, - {GOLDEN_FRAME, NEARESTMV}, - {GOLDEN_FRAME, NEARMV}, - {GOLDEN_FRAME, NEWMV} + { LAST_FRAME, ZEROMV }, { LAST_FRAME, NEARESTMV }, + { GOLDEN_FRAME, ZEROMV }, { LAST_FRAME, NEARMV }, + { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEARESTMV }, + { GOLDEN_FRAME, NEARMV }, { GOLDEN_FRAME, NEWMV } }; static const REF_MODE ref_mode_set_svc[RT_INTER_MODES] = { - {LAST_FRAME, ZEROMV}, - {GOLDEN_FRAME, ZEROMV}, - {LAST_FRAME, NEARESTMV}, - {LAST_FRAME, NEARMV}, - {GOLDEN_FRAME, NEARESTMV}, - {GOLDEN_FRAME, NEARMV}, - {LAST_FRAME, NEWMV}, - {GOLDEN_FRAME, NEWMV} + { LAST_FRAME, ZEROMV }, { GOLDEN_FRAME, ZEROMV }, + { LAST_FRAME, NEARESTMV }, { LAST_FRAME, NEARMV }, + { GOLDEN_FRAME, NEARESTMV }, { GOLDEN_FRAME, NEARMV }, + { LAST_FRAME, NEWMV }, { GOLDEN_FRAME, NEWMV } }; -int set_intra_cost_penalty(const VP9_COMP *const cpi, BLOCK_SIZE bsize) { +static int set_intra_cost_penalty(const VP9_COMP *const cpi, BLOCK_SIZE bsize) { const VP9_COMMON *const cm = &cpi->common; // Reduce the intra cost penalty for small blocks (<=16x16). int reduction_fac = (bsize <= BLOCK_16X16) ? ((bsize <= BLOCK_8X8) ? 4 : 2) : 0; if (cpi->noise_estimate.enabled && cpi->noise_estimate.level == kHigh) - // Don't reduce intra cost penalty if estimated noise level is high. - reduction_fac = 0; - return vp9_get_intra_cost_penalty( - cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth) >> reduction_fac; + // Don't reduce intra cost penalty if estimated noise level is high. + reduction_fac = 0; + return vp9_get_intra_cost_penalty(cm->base_qindex, cm->y_dc_delta_q, + cm->bit_depth) >> + reduction_fac; } -static INLINE void find_predictors(VP9_COMP *cpi, MACROBLOCK *x, - MV_REFERENCE_FRAME ref_frame, - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], - int const_motion[MAX_REF_FRAMES], - int *ref_frame_skip_mask, - const int flag_list[4], - TileDataEnc *tile_data, - int mi_row, int mi_col, - struct buf_2d yv12_mb[4][MAX_MB_PLANE], - BLOCK_SIZE bsize) { +static INLINE void find_predictors( + VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame, + int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], + int const_motion[MAX_REF_FRAMES], int *ref_frame_skip_mask, + const int flag_list[4], TileDataEnc *tile_data, int mi_row, int mi_col, + struct buf_2d yv12_mb[4][MAX_MB_PLANE], BLOCK_SIZE bsize, + int force_skip_low_temp_var) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); TileInfo *const tile_info = &tile_data->tile_info; -// TODO(jingning) placeholder for inter-frame non-RD mode decision. + // TODO(jingning) placeholder for inter-frame non-RD mode decision. x->pred_mv_sad[ref_frame] = INT_MAX; frame_mv[NEWMV][ref_frame].as_int = INVALID_MV; frame_mv[ZEROMV][ref_frame].as_int = 0; -// this needs various further optimizations. to be continued.. + // this needs various further optimizations. to be continued.. if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame]; const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; - vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, - sf, sf); - if (cm->use_prev_frame_mvs) - vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, - candidates, mi_row, mi_col, + vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf); + if (cm->use_prev_frame_mvs) { + vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, x->mbmi_ext->mode_context); - else - const_motion[ref_frame] = - mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame, - candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col, - (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id)); + } else { + const_motion[ref_frame] = + mv_refs_rt(cpi, cm, x, xd, tile_info, xd->mi[0], ref_frame, + candidates, &frame_mv[NEWMV][ref_frame], mi_row, mi_col, + (int)(cpi->svc.use_base_mv && cpi->svc.spatial_layer_id)); + } vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, &frame_mv[NEARESTMV][ref_frame], &frame_mv[NEARMV][ref_frame]); - if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8) { - vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, - ref_frame, bsize); + // Early exit for golden frame if force_skip_low_temp_var is set. + if (!vp9_is_scaled(sf) && bsize >= BLOCK_8X8 && + !(force_skip_low_temp_var && ref_frame == GOLDEN_FRAME)) { + vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame, + bsize); } } else { *ref_frame_skip_mask |= (1 << ref_frame); } } -void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, - TileDataEnc *tile_data, + +static void vp9_NEWMV_diff_bias(const NOISE_ESTIMATE *ne, MACROBLOCKD *xd, + PREDICTION_MODE this_mode, RD_COST *this_rdc, + BLOCK_SIZE bsize, int mv_row, int mv_col, + int is_last_frame) { + // Bias against MVs associated with NEWMV mode that are very different from + // top/left neighbors. + if (this_mode == NEWMV) { + int al_mv_average_row; + int al_mv_average_col; + int left_row, left_col; + int row_diff, col_diff; + int above_mv_valid = 0; + int left_mv_valid = 0; + int above_row = 0; + int above_col = 0; + + if (xd->above_mi) { + above_mv_valid = xd->above_mi->mv[0].as_int != INVALID_MV; + above_row = xd->above_mi->mv[0].as_mv.row; + above_col = xd->above_mi->mv[0].as_mv.col; + } + if (xd->left_mi) { + left_mv_valid = xd->left_mi->mv[0].as_int != INVALID_MV; + left_row = xd->left_mi->mv[0].as_mv.row; + left_col = xd->left_mi->mv[0].as_mv.col; + } + if (above_mv_valid && left_mv_valid) { + al_mv_average_row = (above_row + left_row + 1) >> 1; + al_mv_average_col = (above_col + left_col + 1) >> 1; + } else if (above_mv_valid) { + al_mv_average_row = above_row; + al_mv_average_col = above_col; + } else if (left_mv_valid) { + al_mv_average_row = left_row; + al_mv_average_col = left_col; + } else { + al_mv_average_row = al_mv_average_col = 0; + } + row_diff = (al_mv_average_row - mv_row); + col_diff = (al_mv_average_col - mv_col); + if (row_diff > 48 || row_diff < -48 || col_diff > 48 || col_diff < -48) { + if (bsize > BLOCK_32X32) + this_rdc->rdcost = this_rdc->rdcost << 1; + else + this_rdc->rdcost = 3 * this_rdc->rdcost >> 1; + } + } + // If noise estimation is enabled, and estimated level is above threshold, + // add a bias to LAST reference with small motion, for large blocks. + if (ne->enabled && ne->level >= kMedium && bsize >= BLOCK_32X32 && + is_last_frame && mv_row < 8 && mv_row > -8 && mv_col < 8 && mv_col > -8) { + this_rdc->rdcost = 7 * this_rdc->rdcost >> 3; + } +} + +#if CONFIG_VP9_TEMPORAL_DENOISING +static void vp9_pickmode_ctx_den_update( + VP9_PICKMODE_CTX_DEN *ctx_den, int64_t zero_last_cost_orig, + int ref_frame_cost[MAX_REF_FRAMES], + int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int reuse_inter_pred, + TX_SIZE best_tx_size, PREDICTION_MODE best_mode, + MV_REFERENCE_FRAME best_ref_frame, INTERP_FILTER best_pred_filter, + uint8_t best_mode_skip_txfm) { + ctx_den->zero_last_cost_orig = zero_last_cost_orig; + ctx_den->ref_frame_cost = ref_frame_cost; + ctx_den->frame_mv = frame_mv; + ctx_den->reuse_inter_pred = reuse_inter_pred; + ctx_den->best_tx_size = best_tx_size; + ctx_den->best_mode = best_mode; + ctx_den->best_ref_frame = best_ref_frame; + ctx_den->best_pred_filter = best_pred_filter; + ctx_den->best_mode_skip_txfm = best_mode_skip_txfm; +} + +static void recheck_zeromv_after_denoising( + VP9_COMP *cpi, MODE_INFO *const mi, MACROBLOCK *x, MACROBLOCKD *const xd, + VP9_DENOISER_DECISION decision, VP9_PICKMODE_CTX_DEN *ctx_den, + struct buf_2d yv12_mb[4][MAX_MB_PLANE], RD_COST *best_rdc, BLOCK_SIZE bsize, + int mi_row, int mi_col) { + // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on + // denoised result. Only do this under noise conditions, and if rdcost of + // ZEROMV onoriginal source is not significantly higher than rdcost of best + // mode. + if (cpi->noise_estimate.enabled && cpi->noise_estimate.level > kLow && + ctx_den->zero_last_cost_orig < (best_rdc->rdcost << 3) && + ((ctx_den->best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) || + (ctx_den->best_ref_frame == GOLDEN_FRAME && + decision == FILTER_ZEROMV_BLOCK))) { + // Check if we should pick ZEROMV on denoised signal. + int rate = 0; + int64_t dist = 0; + uint32_t var_y = UINT_MAX; + uint32_t sse_y = UINT_MAX; + RD_COST this_rdc; + mi->mode = ZEROMV; + mi->ref_frame[0] = LAST_FRAME; + mi->ref_frame[1] = NONE; + mi->mv[0].as_int = 0; + mi->interp_filter = EIGHTTAP; + xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0]; + vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); + model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y); + this_rdc.rate = rate + ctx_den->ref_frame_cost[LAST_FRAME] + + cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]] + [INTER_OFFSET(ZEROMV)]; + this_rdc.dist = dist; + this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist); + // Switch to ZEROMV if the rdcost for ZEROMV on denoised source + // is lower than best_ref mode (on original source). + if (this_rdc.rdcost > best_rdc->rdcost) { + this_rdc = *best_rdc; + mi->mode = ctx_den->best_mode; + mi->ref_frame[0] = ctx_den->best_ref_frame; + mi->interp_filter = ctx_den->best_pred_filter; + if (ctx_den->best_ref_frame == INTRA_FRAME) + mi->mv[0].as_int = INVALID_MV; + else if (ctx_den->best_ref_frame == GOLDEN_FRAME) { + mi->mv[0].as_int = + ctx_den->frame_mv[ctx_den->best_mode][ctx_den->best_ref_frame] + .as_int; + if (ctx_den->reuse_inter_pred) { + xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0]; + vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); + } + } + mi->tx_size = ctx_den->best_tx_size; + x->skip_txfm[0] = ctx_den->best_mode_skip_txfm; + } else { + ctx_den->best_ref_frame = LAST_FRAME; + *best_rdc = this_rdc; + } + } +} +#endif // CONFIG_VP9_TEMPORAL_DENOISING + +static INLINE int get_force_skip_low_temp_var(uint8_t *variance_low, int mi_row, + int mi_col, BLOCK_SIZE bsize) { + const int i = (mi_row & 0x7) >> 1; + const int j = (mi_col & 0x7) >> 1; + int force_skip_low_temp_var = 0; + // Set force_skip_low_temp_var based on the block size and block offset. + if (bsize == BLOCK_64X64) { + force_skip_low_temp_var = variance_low[0]; + } else if (bsize == BLOCK_64X32) { + if (!(mi_col & 0x7) && !(mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[1]; + } else if (!(mi_col & 0x7) && (mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[2]; + } + } else if (bsize == BLOCK_32X64) { + if (!(mi_col & 0x7) && !(mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[3]; + } else if ((mi_col & 0x7) && !(mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[4]; + } + } else if (bsize == BLOCK_32X32) { + if (!(mi_col & 0x7) && !(mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[5]; + } else if ((mi_col & 0x7) && !(mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[6]; + } else if (!(mi_col & 0x7) && (mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[7]; + } else if ((mi_col & 0x7) && (mi_row & 0x7)) { + force_skip_low_temp_var = variance_low[8]; + } + } else if (bsize == BLOCK_16X16) { + force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]]; + } else if (bsize == BLOCK_32X16) { + // The col shift index for the second 16x16 block. + const int j2 = ((mi_col + 2) & 0x7) >> 1; + // Only if each 16x16 block inside has low temporal variance. + force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] && + variance_low[pos_shift_16x16[i][j2]]; + } else if (bsize == BLOCK_16X32) { + // The row shift index for the second 16x16 block. + const int i2 = ((mi_row + 2) & 0x7) >> 1; + force_skip_low_temp_var = variance_low[pos_shift_16x16[i][j]] && + variance_low[pos_shift_16x16[i2][j]]; + } + return force_skip_low_temp_var; +} + +void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data, int mi_row, int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { VP9_COMMON *const cm = &cpi->common; @@ -1182,15 +1356,18 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, unsigned int var_y = UINT_MAX; unsigned int sse_y = UINT_MAX; const int intra_cost_penalty = set_intra_cost_penalty(cpi, bsize); - int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv, - intra_cost_penalty, 0); + int64_t inter_mode_thresh = + RDCOST(x->rdmult, x->rddiv, intra_cost_penalty, 0); const int *const rd_threshes = cpi->rd.threshes[mi->segment_id][bsize]; const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize]; INTERP_FILTER filter_ref; const int bsl = mi_width_log2_lookup[bsize]; - const int pred_filter_search = cm->interp_filter == SWITCHABLE ? - (((mi_row + mi_col) >> bsl) + - get_chessboard_index(cm->current_video_frame)) & 0x1 : 0; + const int pred_filter_search = + cm->interp_filter == SWITCHABLE + ? (((mi_row + mi_col) >> bsl) + + get_chessboard_index(cm->current_video_frame)) & + 0x1 + : 0; int const_motion[MAX_REF_FRAMES] = { 0 }; const int bh = num_4x4_blocks_high_lookup[bsize] << 2; const int bw = num_4x4_blocks_wide_lookup[bsize] << 2; @@ -1212,9 +1389,12 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int best_pred_sad = INT_MAX; int best_early_term = 0; int ref_frame_cost[MAX_REF_FRAMES]; - int svc_force_zero_mode[3] = {0}; + int svc_force_zero_mode[3] = { 0 }; int perform_intra_pred = 1; + int use_golden_nonzeromv = 1; + int force_skip_low_temp_var = 0; #if CONFIG_VP9_TEMPORAL_DENOISING + VP9_PICKMODE_CTX_DEN ctx_den; int64_t zero_last_cost_orig = INT64_MAX; #endif @@ -1242,10 +1422,14 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; x->skip = 0; - if (xd->up_available) - filter_ref = xd->mi[-xd->mi_stride]->interp_filter; - else if (xd->left_available) - filter_ref = xd->mi[-1]->interp_filter; + // Instead of using vp9_get_pred_context_switchable_interp(xd) to assign + // filter_ref, we use a less strict condition on assigning filter_ref. + // This is to reduce the probabily of entering the flow of not assigning + // filter_ref and then skip filter search. + if (xd->above_mi && is_inter_block(xd->above_mi)) + filter_ref = xd->above_mi->interp_filter; + else if (xd->left_mi && is_inter_block(xd->left_mi)) + filter_ref = xd->left_mi->interp_filter; else filter_ref = cm->interp_filter; @@ -1255,10 +1439,11 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, mi->sb_type = bsize; mi->ref_frame[0] = NONE; mi->ref_frame[1] = NONE; - mi->tx_size = VPXMIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cm->tx_mode]); - if (sf->short_circuit_flat_blocks) { + mi->tx_size = + VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[cm->tx_mode]); + + if (sf->short_circuit_flat_blocks || sf->limit_newmv_early_exit) { #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) x->source_variance = vp9_high_get_sby_perpixel_variance( @@ -1270,7 +1455,10 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } #if CONFIG_VP9_TEMPORAL_DENOISING - vp9_denoiser_reset_frame_stats(ctx); + if (cpi->oxcf.noise_sensitivity > 0 && + cpi->denoiser.denoising_level > kDenLowLow) { + vp9_denoiser_reset_frame_stats(ctx); + } #endif if (cpi->rc.frames_since_golden == 0 && !cpi->use_svc) { @@ -1279,20 +1467,33 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, usable_ref_frame = GOLDEN_FRAME; } - // If the reference is temporally aligned with current superframe - // (e.g., spatial reference within superframe), constrain the inter mode: - // for now only test zero motion. - if (cpi->use_svc && svc ->force_zero_mode_spatial_ref) { - if (svc->ref_frame_index[cpi->lst_fb_idx] == svc->current_superframe) - svc_force_zero_mode[LAST_FRAME - 1] = 1; - if (svc->ref_frame_index[cpi->gld_fb_idx] == svc->current_superframe) - svc_force_zero_mode[GOLDEN_FRAME - 1] = 1; + // For svc mode, on spatial_layer_id > 0: if the reference has different scale + // constrain the inter mode to only test zero motion. + if (cpi->use_svc && svc->force_zero_mode_spatial_ref && + cpi->svc.spatial_layer_id > 0) { + if (cpi->ref_frame_flags & flag_list[LAST_FRAME]) { + struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf; + if (vp9_is_scaled(sf)) svc_force_zero_mode[LAST_FRAME - 1] = 1; + } + if (cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) { + struct scale_factors *const sf = &cm->frame_refs[GOLDEN_FRAME - 1].sf; + if (vp9_is_scaled(sf)) svc_force_zero_mode[GOLDEN_FRAME - 1] = 1; + } } + if (cpi->sf.short_circuit_low_temp_var) { + force_skip_low_temp_var = + get_force_skip_low_temp_var(&x->variance_low[0], mi_row, mi_col, bsize); + } + + if (!((cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) && + !svc_force_zero_mode[GOLDEN_FRAME - 1] && !force_skip_low_temp_var)) + use_golden_nonzeromv = 0; + for (ref_frame = LAST_FRAME; ref_frame <= usable_ref_frame; ++ref_frame) { find_predictors(cpi, x, ref_frame, frame_mv, const_motion, &ref_frame_skip_mask, flag_list, tile_data, mi_row, mi_col, - yv12_mb, bsize); + yv12_mb, bsize, force_skip_low_temp_var); } for (idx = 0; idx < RT_INTER_MODES; ++idx) { @@ -1304,26 +1505,37 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int is_skippable; int this_early_term = 0; PREDICTION_MODE this_mode = ref_mode_set[idx].pred_mode; - if (cpi->use_svc) - this_mode = ref_mode_set_svc[idx].pred_mode; + + if (cpi->use_svc) this_mode = ref_mode_set_svc[idx].pred_mode; if (sf->short_circuit_flat_blocks && x->source_variance == 0 && this_mode != NEARESTMV) { continue; } - if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) - continue; + if (!(cpi->sf.inter_mode_mask[bsize] & (1 << this_mode))) continue; ref_frame = ref_mode_set[idx].ref_frame; if (cpi->use_svc) { ref_frame = ref_mode_set_svc[idx].ref_frame; } - if (!(cpi->ref_frame_flags & flag_list[ref_frame])) + if (!(cpi->ref_frame_flags & flag_list[ref_frame])) continue; + + if (const_motion[ref_frame] && this_mode == NEARMV) continue; + + // Skip non-zeromv mode search for golden frame if force_skip_low_temp_var + // is set. If nearestmv for golden frame is 0, zeromv mode will be skipped + // later. + if (force_skip_low_temp_var && ref_frame == GOLDEN_FRAME && + frame_mv[this_mode][ref_frame].as_int != 0) { continue; - if (const_motion[ref_frame] && this_mode == NEARMV) + } + + if (cpi->sf.short_circuit_low_temp_var == 2 && force_skip_low_temp_var && + ref_frame == LAST_FRAME && this_mode == NEWMV) { continue; + } if (cpi->use_svc) { if (svc_force_zero_mode[ref_frame - 1] && @@ -1331,15 +1543,15 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, continue; } - if (!(frame_mv[this_mode][ref_frame].as_int == 0 && - ref_frame == LAST_FRAME)) { + if (!force_skip_low_temp_var && + !(frame_mv[this_mode][ref_frame].as_int == 0 && + ref_frame == LAST_FRAME)) { i = (ref_frame == LAST_FRAME) ? GOLDEN_FRAME : LAST_FRAME; if ((cpi->ref_frame_flags & flag_list[i]) && sf->reference_masking) if (x->pred_mv_sad[ref_frame] > (x->pred_mv_sad[i] << 1)) ref_frame_skip_mask |= (1 << ref_frame); } - if (ref_frame_skip_mask & (1 << ref_frame)) - continue; + if (ref_frame_skip_mask & (1 << ref_frame)) continue; // Select prediction reference frames. for (i = 0; i < MAX_MB_PLANE; i++) @@ -1349,105 +1561,99 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, set_ref_ptrs(cm, xd, ref_frame, NONE); mode_index = mode_idx[ref_frame][INTER_OFFSET(this_mode)]; - mode_rd_thresh = best_mode_skip_txfm ? - rd_threshes[mode_index] << 1 : rd_threshes[mode_index]; + mode_rd_thresh = best_mode_skip_txfm ? rd_threshes[mode_index] << 1 + : rd_threshes[mode_index]; + + // Increase mode_rd_thresh value for GOLDEN_FRAME for improved encoding + // speed with little/no subjective quality loss. + if (cpi->sf.bias_golden && ref_frame == GOLDEN_FRAME && + cpi->rc.frames_since_golden > 4) + mode_rd_thresh = mode_rd_thresh << 3; + if (rd_less_than_thresh(best_rdc.rdcost, mode_rd_thresh, rd_thresh_freq_fact[mode_index])) continue; if (this_mode == NEWMV) { - if (ref_frame > LAST_FRAME && !cpi->use_svc) { + if (ref_frame > LAST_FRAME && !cpi->use_svc && + cpi->oxcf.rc_mode == VPX_CBR) { int tmp_sad; - int dis, cost_list[5]; + uint32_t dis; + int cost_list[5]; - if (bsize < BLOCK_16X16) - continue; + if (bsize < BLOCK_16X16) continue; tmp_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col); - if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) - continue; + if (tmp_sad > x->pred_mv_sad[LAST_FRAME]) continue; if (tmp_sad + (num_pels_log2_lookup[bsize] << 4) > best_pred_sad) continue; frame_mv[NEWMV][ref_frame].as_int = mi->mv[0].as_int; rate_mv = vp9_mv_bit_cost(&frame_mv[NEWMV][ref_frame].as_mv, - &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, + x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); frame_mv[NEWMV][ref_frame].as_mv.row >>= 3; frame_mv[NEWMV][ref_frame].as_mv.col >>= 3; - cpi->find_fractional_mv_step(x, &frame_mv[NEWMV][ref_frame].as_mv, - &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, &dis, - &x->pred_sse[ref_frame], NULL, 0, 0); + cpi->find_fractional_mv_step( + x, &frame_mv[NEWMV][ref_frame].as_mv, + &x->mbmi_ext->ref_mvs[ref_frame][0].as_mv, + cpi->common.allow_high_precision_mv, x->errorperbit, + &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop, + cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list), + x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref_frame], NULL, 0, + 0); } else if (svc->use_base_mv && svc->spatial_layer_id) { - if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV && - frame_mv[NEWMV][ref_frame].as_int != 0) { + if (frame_mv[NEWMV][ref_frame].as_int != INVALID_MV) { const int pre_stride = xd->plane[0].pre[0].stride; int base_mv_sad = INT_MAX; - const uint8_t * const pre_buf = xd->plane[0].pre[0].buf + + const float base_mv_bias = sf->base_mv_aggressive ? 1.5f : 1.0f; + const uint8_t *const pre_buf = + xd->plane[0].pre[0].buf + (frame_mv[NEWMV][ref_frame].as_mv.row >> 3) * pre_stride + (frame_mv[NEWMV][ref_frame].as_mv.col >> 3); - base_mv_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, - x->plane[0].src.stride, - pre_buf, pre_stride); + base_mv_sad = cpi->fn_ptr[bsize].sdf( + x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride); - // TODO(wonkap): make the decision to use base layer mv on RD; - // not just SAD. - if (base_mv_sad < x->pred_mv_sad[ref_frame]) { + if (base_mv_sad < (int)(base_mv_bias * x->pred_mv_sad[ref_frame])) { // Base layer mv is good. if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, - &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 1)) { - continue; + &frame_mv[NEWMV][ref_frame], &rate_mv, + best_rdc.rdcost, 1)) { + continue; } } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, - &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) { + &frame_mv[NEWMV][ref_frame], + &rate_mv, best_rdc.rdcost, 0)) { continue; } } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, - &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) { + &frame_mv[NEWMV][ref_frame], + &rate_mv, best_rdc.rdcost, 0)) { continue; } } else if (!combined_motion_search(cpi, x, bsize, mi_row, mi_col, - &frame_mv[NEWMV][ref_frame], &rate_mv, best_rdc.rdcost, 0)) { + &frame_mv[NEWMV][ref_frame], &rate_mv, + best_rdc.rdcost, 0)) { continue; } } - if (this_mode == NEWMV && ref_frame == LAST_FRAME && + // If use_golden_nonzeromv is false, NEWMV mode is skipped for golden, no + // need to compute best_pred_sad which is only used to skip golden NEWMV. + if (use_golden_nonzeromv && this_mode == NEWMV && ref_frame == LAST_FRAME && frame_mv[NEWMV][LAST_FRAME].as_int != INVALID_MV) { const int pre_stride = xd->plane[0].pre[0].stride; - const uint8_t * const pre_buf = xd->plane[0].pre[0].buf + + const uint8_t *const pre_buf = + xd->plane[0].pre[0].buf + (frame_mv[NEWMV][LAST_FRAME].as_mv.row >> 3) * pre_stride + (frame_mv[NEWMV][LAST_FRAME].as_mv.col >> 3); - best_pred_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, - x->plane[0].src.stride, - pre_buf, pre_stride); + best_pred_sad = cpi->fn_ptr[bsize].sdf( + x->plane[0].src.buf, x->plane[0].src.stride, pre_buf, pre_stride); x->pred_mv_sad[LAST_FRAME] = best_pred_sad; } - if (cpi->use_svc) { - if (this_mode == NEWMV && ref_frame == GOLDEN_FRAME && - frame_mv[NEWMV][GOLDEN_FRAME].as_int != INVALID_MV) { - const int pre_stride = xd->plane[0].pre[0].stride; - const uint8_t * const pre_buf = xd->plane[0].pre[0].buf + - (frame_mv[NEWMV][GOLDEN_FRAME].as_mv.row >> 3) * pre_stride + - (frame_mv[NEWMV][GOLDEN_FRAME].as_mv.col >> 3); - best_pred_sad = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf, - x->plane[0].src.stride, - pre_buf, pre_stride); - x->pred_mv_sad[GOLDEN_FRAME] = best_pred_sad; - } - } - - if (this_mode != NEARESTMV && frame_mv[this_mode][ref_frame].as_int == frame_mv[NEARESTMV][ref_frame].as_int) @@ -1469,10 +1675,12 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && pred_filter_search - && (ref_frame == LAST_FRAME || - (ref_frame == GOLDEN_FRAME && cpi->use_svc)) - && (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) { + if ((this_mode == NEWMV || filter_ref == SWITCHABLE) && + pred_filter_search && + (ref_frame == LAST_FRAME || + (ref_frame == GOLDEN_FRAME && + (cpi->use_svc || cpi->oxcf.rc_mode == VPX_VBR))) && + (((mi->mv[0].as_mv.row | mi->mv[0].as_mv.col) & 0x07) != 0)) { int pf_rate[3]; int64_t pf_dist[3]; unsigned int pf_var[3]; @@ -1501,12 +1709,9 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, free_pred_buffer(this_mode_pred); this_mode_pred = current_pred; } - - if (filter < EIGHTTAP_SHARP) { - current_pred = &tmp[get_pred_buffer(tmp, 3)]; - pd->dst.buf = current_pred->data; - pd->dst.stride = bw; - } + current_pred = &tmp[get_pred_buffer(tmp, 3)]; + pd->dst.buf = current_pred->data; + pd->dst.stride = bw; } } } @@ -1526,13 +1731,22 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, pd->dst.stride = this_mode_pred->stride; } } else { +// TODO(jackychen): the low-bitdepth condition causes a segfault in +// high-bitdepth builds. +// https://bugs.chromium.org/p/webm/issues/detail?id=1250 +#if CONFIG_VP9_HIGHBITDEPTH + const int large_block = bsize > BLOCK_32X32; +#else + const int large_block = + x->sb_is_skin ? bsize > BLOCK_32X32 : bsize >= BLOCK_32X32; +#endif mi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP : filter_ref; vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); // For large partition blocks, extra testing is done. - if (bsize > BLOCK_32X32 && - !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) && - cm->base_qindex) { + if (cpi->oxcf.rc_mode == VPX_CBR && large_block && + !cyclic_refresh_segment_id_boosted(xd->mi[0]->segment_id) && + cm->base_qindex) { model_rd_for_sb_y_large(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist, &var_y, &sse_y, mi_row, mi_col, &this_early_term); @@ -1544,8 +1758,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (!this_early_term) { this_sse = (int64_t)sse_y; - block_yrd(cpi, x, &this_rdc.rate, &this_rdc.dist, &is_skippable, - &this_sse, 0, bsize, VPXMIN(mi->tx_size, TX_16X16)); + block_yrd(cpi, x, &this_rdc, &is_skippable, &this_sse, bsize, + VPXMIN(mi->tx_size, TX_16X16)); x->skip_txfm[0] = is_skippable; if (is_skippable) { this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); @@ -1565,57 +1779,38 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, this_rdc.rate += vp9_get_switchable_rate(cpi, xd); } } else { - this_rdc.rate += cm->interp_filter == SWITCHABLE ? - vp9_get_switchable_rate(cpi, xd) : 0; + this_rdc.rate += cm->interp_filter == SWITCHABLE + ? vp9_get_switchable_rate(cpi, xd) + : 0; this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); } if (x->color_sensitivity[0] || x->color_sensitivity[1]) { - int uv_rate = 0; - int64_t uv_dist = 0; + RD_COST rdc_uv; const BLOCK_SIZE uv_bsize = get_plane_block_size(bsize, &xd->plane[1]); if (x->color_sensitivity[0]) vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 1); if (x->color_sensitivity[1]) vp9_build_inter_predictors_sbp(xd, mi_row, mi_col, bsize, 2); - model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &uv_rate, &uv_dist, - &var_y, &sse_y, 1, 2); - this_rdc.rate += uv_rate; - this_rdc.dist += uv_dist; + model_rd_for_sb_uv(cpi, uv_bsize, x, xd, &rdc_uv, &var_y, &sse_y, 1, 2); + this_rdc.rate += rdc_uv.rate; + this_rdc.dist += rdc_uv.dist; } this_rdc.rate += rate_mv; - this_rdc.rate += - cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]][INTER_OFFSET( - this_mode)]; + this_rdc.rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] + [INTER_OFFSET(this_mode)]; this_rdc.rate += ref_frame_cost[ref_frame]; this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); - if (cpi->oxcf.speed >= 5 && - cpi->oxcf.content != VP9E_CONTENT_SCREEN && - !x->sb_is_skin) { - // Bias against non-zero (above some threshold) motion for large blocks. - // This is temporary fix to avoid selection of large mv for big blocks. - if (frame_mv[this_mode][ref_frame].as_mv.row > 64 || - frame_mv[this_mode][ref_frame].as_mv.row < -64 || - frame_mv[this_mode][ref_frame].as_mv.col > 64 || - frame_mv[this_mode][ref_frame].as_mv.col < -64) { - if (bsize == BLOCK_64X64) - this_rdc.rdcost = this_rdc.rdcost << 1; - else if (bsize >= BLOCK_32X32) - this_rdc.rdcost = 3 * this_rdc.rdcost >> 1; - } - // If noise estimation is enabled, and estimated level is above threshold, - // add a bias to LAST reference with small motion, for large blocks. - if (cpi->noise_estimate.enabled && - cpi->noise_estimate.level >= kMedium && - bsize >= BLOCK_32X32 && - ref_frame == LAST_FRAME && - frame_mv[this_mode][ref_frame].as_mv.row < 8 && - frame_mv[this_mode][ref_frame].as_mv.row > -8 && - frame_mv[this_mode][ref_frame].as_mv.col < 8 && - frame_mv[this_mode][ref_frame].as_mv.col > -8) - this_rdc.rdcost = 7 * this_rdc.rdcost >> 3; + // Bias against NEWMV that is very different from its neighbors, and bias + // to small motion-lastref for noisy input. + if (cpi->oxcf.rc_mode == VPX_CBR && cpi->oxcf.speed >= 5 && + cpi->oxcf.content != VP9E_CONTENT_SCREEN) { + vp9_NEWMV_diff_bias(&cpi->noise_estimate, xd, this_mode, &this_rdc, bsize, + frame_mv[this_mode][ref_frame].as_mv.row, + frame_mv[this_mode][ref_frame].as_mv.col, + ref_frame == LAST_FRAME); } // Skipping checking: test to see if this block can be reconstructed by @@ -1626,13 +1821,14 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, &this_rdc.dist); if (x->skip) { this_rdc.rate += rate_mv; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, this_rdc.rate, - this_rdc.dist); + this_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); } } #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0) { + if (cpi->oxcf.noise_sensitivity > 0 && + cpi->denoiser.denoising_level > kDenLowLow) { vp9_denoiser_update_frame_stats(mi, sse_y, this_mode, ctx); // Keep track of zero_last cost. if (ref_frame == LAST_FRAME && frame_mv[this_mode][ref_frame].as_int == 0) @@ -1656,12 +1852,10 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, best_pred = this_mode_pred; } } else { - if (reuse_inter_pred) - free_pred_buffer(this_mode_pred); + if (reuse_inter_pred) free_pred_buffer(this_mode_pred); } - if (x->skip) - break; + if (x->skip) break; // If early termination flag is 1 and at least 2 modes are checked, // the mode search is terminated. @@ -1671,29 +1865,32 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - mi->mode = best_mode; + mi->mode = best_mode; mi->interp_filter = best_pred_filter; - mi->tx_size = best_tx_size; - mi->ref_frame[0] = best_ref_frame; - mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int; + mi->tx_size = best_tx_size; + mi->ref_frame[0] = best_ref_frame; + mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int; xd->mi[0]->bmi[0].as_mv[0].as_int = mi->mv[0].as_int; x->skip_txfm[0] = best_mode_skip_txfm; - // Perform intra prediction only if base layer is chosen as the reference. + // For spatial enhancemanent layer: perform intra prediction only if base + // layer is chosen as the reference. Always perform intra prediction if + // LAST is the only reference or is_key_frame is set. if (cpi->svc.spatial_layer_id) { perform_intra_pred = cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame || - (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame - && svc_force_zero_mode[best_ref_frame]); + !(cpi->ref_frame_flags & flag_list[GOLDEN_FRAME]) || + (!cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame && + svc_force_zero_mode[best_ref_frame - 1]); inter_mode_thresh = (inter_mode_thresh << 1) + inter_mode_thresh; } // Perform intra prediction search, if the best SAD is above a certain // threshold. - if (perform_intra_pred && - ((best_rdc.rdcost == INT64_MAX || - (!x->skip && best_rdc.rdcost > inter_mode_thresh && - bsize <= cpi->sf.max_intra_bsize)))) { - struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 }; + if ((!force_skip_low_temp_var || bsize < BLOCK_32X32) && perform_intra_pred && + (best_rdc.rdcost == INT64_MAX || + (!x->skip && best_rdc.rdcost > inter_mode_thresh && + bsize <= cpi->sf.max_intra_bsize))) { + struct estimate_block_intra_args args = { cpi, x, DC_PRED, 1, 0 }; int i; TX_SIZE best_intra_tx_size = TX_SIZES; TX_SIZE intra_tx_size = @@ -1712,12 +1909,12 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, NULL, 0, NULL, 0, bw, bh, xd->bd); else vpx_convolve_copy(best_pred->data, best_pred->stride, - this_mode_pred->data, this_mode_pred->stride, - NULL, 0, NULL, 0, bw, bh); + this_mode_pred->data, this_mode_pred->stride, NULL, + 0, NULL, 0, bw, bh); #else vpx_convolve_copy(best_pred->data, best_pred->stride, - this_mode_pred->data, this_mode_pred->stride, - NULL, 0, NULL, 0, bw, bh); + this_mode_pred->data, this_mode_pred->stride, NULL, 0, + NULL, 0, bw, bh); #endif // CONFIG_VP9_HIGHBITDEPTH best_pred = this_mode_pred; } @@ -1742,12 +1939,22 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, mi->mode = this_mode; mi->ref_frame[0] = INTRA_FRAME; + this_rdc.dist = this_rdc.rate = 0; args.mode = this_mode; - args.rate = 0; - args.dist = 0; + args.skippable = 1; + args.rdc = &this_rdc; mi->tx_size = intra_tx_size; - vp9_foreach_transformed_block_in_plane(xd, bsize, 0, - estimate_block_intra, &args); + vp9_foreach_transformed_block_in_plane(xd, bsize, 0, estimate_block_intra, + &args); + // Check skip cost here since skippable is not set for for uv, this + // mirrors the behavior used by inter + if (args.skippable) { + x->skip_txfm[0] = SKIP_TXFM_AC_DC; + this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 1); + } else { + x->skip_txfm[0] = SKIP_TXFM_NONE; + this_rdc.rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), 0); + } // Inter and intra RD will mismatch in scale for non-screen content. if (cpi->oxcf.content == VP9E_CONTENT_SCREEN) { if (x->color_sensitivity[0]) @@ -1757,13 +1964,11 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, vp9_foreach_transformed_block_in_plane(xd, bsize, 2, estimate_block_intra, &args); } - this_rdc.rate = args.rate; - this_rdc.dist = args.dist; this_rdc.rate += cpi->mbmode_cost[this_mode]; this_rdc.rate += ref_frame_cost[INTRA_FRAME]; this_rdc.rate += intra_cost_penalty; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); + this_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); if (this_rdc.rdcost < best_rdc.rdcost) { best_rdc = this_rdc; @@ -1789,78 +1994,39 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, mi->ref_frame[0] = best_ref_frame; x->skip_txfm[0] = best_mode_skip_txfm; + if (!is_inter_block(mi)) { + mi->interp_filter = SWITCHABLE_FILTERS; + } + if (reuse_inter_pred && best_pred != NULL) { if (best_pred->data != orig_dst.buf && is_inter_mode(mi->mode)) { #if CONFIG_VP9_HIGHBITDEPTH if (cm->use_highbitdepth) vpx_highbd_convolve_copy(best_pred->data, best_pred->stride, - pd->dst.buf, pd->dst.stride, NULL, 0, - NULL, 0, bw, bh, xd->bd); + pd->dst.buf, pd->dst.stride, NULL, 0, NULL, 0, + bw, bh, xd->bd); else - vpx_convolve_copy(best_pred->data, best_pred->stride, - pd->dst.buf, pd->dst.stride, NULL, 0, - NULL, 0, bw, bh); + vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf, + pd->dst.stride, NULL, 0, NULL, 0, bw, bh); #else - vpx_convolve_copy(best_pred->data, best_pred->stride, - pd->dst.buf, pd->dst.stride, NULL, 0, - NULL, 0, bw, bh); + vpx_convolve_copy(best_pred->data, best_pred->stride, pd->dst.buf, + pd->dst.stride, NULL, 0, NULL, 0, bw, bh); #endif // CONFIG_VP9_HIGHBITDEPTH } } #if CONFIG_VP9_TEMPORAL_DENOISING - if (cpi->oxcf.noise_sensitivity > 0 && - cpi->resize_pending == 0) { + if (cpi->oxcf.noise_sensitivity > 0 && cpi->resize_pending == 0 && + cpi->denoiser.denoising_level > kDenLowLow && cpi->denoiser.reset == 0) { VP9_DENOISER_DECISION decision = COPY_BLOCK; - vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col, - VPXMAX(BLOCK_8X8, bsize), ctx, &decision); - // If INTRA or GOLDEN reference was selected, re-evaluate ZEROMV on denoised - // result. Only do this under noise conditions, and if rdcost of ZEROMV on - // original source is not significantly higher than rdcost of best mode. - if (((best_ref_frame == INTRA_FRAME && decision >= FILTER_BLOCK) || - (best_ref_frame == GOLDEN_FRAME && decision == FILTER_ZEROMV_BLOCK)) && - cpi->noise_estimate.enabled && - cpi->noise_estimate.level > kLow && - zero_last_cost_orig < (best_rdc.rdcost << 3)) { - // Check if we should pick ZEROMV on denoised signal. - int rate = 0; - int64_t dist = 0; - mi->mode = ZEROMV; - mi->ref_frame[0] = LAST_FRAME; - mi->ref_frame[1] = NONE; - mi->mv[0].as_int = 0; - mi->interp_filter = EIGHTTAP; - xd->plane[0].pre[0] = yv12_mb[LAST_FRAME][0]; - vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); - model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y); - this_rdc.rate = rate + ref_frame_cost[LAST_FRAME] + - cpi->inter_mode_cost[x->mbmi_ext->mode_context[LAST_FRAME]] - [INTER_OFFSET(ZEROMV)]; - this_rdc.dist = dist; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, rate, dist); - // Switch to ZEROMV if the rdcost for ZEROMV on denoised source - // is lower than best_ref mode (on original source). - if (this_rdc.rdcost > best_rdc.rdcost) { - this_rdc = best_rdc; - mi->mode = best_mode; - mi->ref_frame[0] = best_ref_frame; - mi->interp_filter = best_pred_filter; - if (best_ref_frame == INTRA_FRAME) - mi->mv[0].as_int = INVALID_MV; - else if (best_ref_frame == GOLDEN_FRAME) { - mi->mv[0].as_int = frame_mv[best_mode][best_ref_frame].as_int; - if (reuse_inter_pred) { - xd->plane[0].pre[0] = yv12_mb[GOLDEN_FRAME][0]; - vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize); - } - } - mi->tx_size = best_tx_size; - x->skip_txfm[0] = best_mode_skip_txfm; - } else { - best_ref_frame = LAST_FRAME; - best_rdc = this_rdc; - } - } + vp9_pickmode_ctx_den_update(&ctx_den, zero_last_cost_orig, ref_frame_cost, + frame_mv, reuse_inter_pred, best_tx_size, + best_mode, best_ref_frame, best_pred_filter, + best_mode_skip_txfm); + vp9_denoiser_denoise(cpi, x, mi_row, mi_col, bsize, ctx, &decision); + recheck_zeromv_after_denoising(cpi, mi, x, xd, decision, &ctx_den, yv12_mb, + &best_rdc, bsize, mi_row, mi_col); + best_ref_frame = ctx_den.best_ref_frame; } #endif @@ -1869,22 +2035,22 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (best_ref_frame == INTRA_FRAME) { // Only consider the modes that are included in the intra_mode_list. - int intra_modes = sizeof(intra_mode_list)/sizeof(PREDICTION_MODE); + int intra_modes = sizeof(intra_mode_list) / sizeof(PREDICTION_MODE); int i; // TODO(yunqingwang): Check intra mode mask and only update freq_fact // for those valid modes. for (i = 0; i < intra_modes; i++) { - update_thresh_freq_fact(cpi, tile_data, bsize, INTRA_FRAME, - best_mode_idx, intra_mode_list[i]); + update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize, + INTRA_FRAME, best_mode_idx, intra_mode_list[i]); } } else { for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) { PREDICTION_MODE this_mode; if (best_ref_frame != ref_frame) continue; for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { - update_thresh_freq_fact(cpi, tile_data, bsize, ref_frame, - best_mode_idx, this_mode); + update_thresh_freq_fact(cpi, tile_data, x->source_variance, bsize, + ref_frame, best_mode_idx, this_mode); } } } @@ -1893,9 +2059,9 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, *rd_cost = best_rdc; } -void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, - int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) { +void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row, + int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx) { VP9_COMMON *const cm = &cpi->common; SPEED_FEATURES *const sf = &cpi->sf; MACROBLOCKD *const xd = &x->e_mbd; @@ -1925,12 +2091,11 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) { int_mv *const candidates = mbmi_ext->ref_mvs[ref_frame]; - const struct scale_factors *const sf = - &cm->frame_refs[ref_frame - 1].sf; - vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, - sf, sf); - vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, - candidates, mi_row, mi_col, mbmi_ext->mode_context); + const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf; + vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, + sf); + vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame, candidates, mi_row, mi_col, + mbmi_ext->mode_context); vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates, &dummy_mv[0], &dummy_mv[1]); @@ -1944,19 +2109,17 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, mi->uv_mode = DC_PRED; mi->ref_frame[0] = LAST_FRAME; mi->ref_frame[1] = NONE; - mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP - : cm->interp_filter; + mi->interp_filter = + cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter; for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) { int64_t this_rd = 0; int plane; - if (ref_frame_skip_mask & (1 << ref_frame)) - continue; + if (ref_frame_skip_mask & (1 << ref_frame)) continue; #if CONFIG_BETTER_HW_COMPATIBILITY - if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && - ref_frame > INTRA_FRAME && + if ((bsize == BLOCK_8X4 || bsize == BLOCK_4X8) && ref_frame > INTRA_FRAME && vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf)) continue; #endif @@ -2004,14 +2167,13 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, pd->dst.buf = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)]; pd->pre[0].buf = - &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, - i, pd->pre[0].stride)]; + &pd->pre[0] + .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)]; b_mv[ZEROMV].as_int = 0; b_mv[NEWMV].as_int = INVALID_MV; vp9_append_sub8x8_mvs_for_idx(cm, xd, i, 0, mi_row, mi_col, - &b_mv[NEARESTMV], - &b_mv[NEARMV], + &b_mv[NEARESTMV], &b_mv[NEARMV], mbmi_ext->mode_context); for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) { @@ -2023,11 +2185,8 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, MV mvp_full; MV tmp_mv; int cost_list[5]; - const int tmp_col_min = x->mv_col_min; - const int tmp_col_max = x->mv_col_max; - const int tmp_row_min = x->mv_row_min; - const int tmp_row_max = x->mv_row_max; - int dummy_dist; + const MvLimits tmp_mv_limits = x->mv_limits; + uint32_t dummy_dist; if (i == 0) { mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3; @@ -2037,44 +2196,35 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, mvp_full.col = xd->mi[0]->bmi[0].as_mv[0].as_mv.col >> 3; } - vp9_set_mv_search_range(x, &mbmi_ext->ref_mvs[0]->as_mv); + vp9_set_mv_search_range(&x->mv_limits, + &mbmi_ext->ref_mvs[0]->as_mv); - vp9_full_pixel_search( - cpi, x, bsize, &mvp_full, step_param, x->sadperbit4, - cond_cost_list(cpi, cost_list), - &mbmi_ext->ref_mvs[ref_frame][0].as_mv, &tmp_mv, - INT_MAX, 0); + vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, + x->sadperbit4, cond_cost_list(cpi, cost_list), + &mbmi_ext->ref_mvs[ref_frame][0].as_mv, + &tmp_mv, INT_MAX, 0); - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; + x->mv_limits = tmp_mv_limits; // calculate the bit cost on motion vector mvp_full.row = tmp_mv.row * 8; mvp_full.col = tmp_mv.col * 8; - b_rate += vp9_mv_bit_cost(&mvp_full, - &mbmi_ext->ref_mvs[ref_frame][0].as_mv, - x->nmvjointcost, x->mvcost, - MV_COST_WEIGHT); + b_rate += vp9_mv_bit_cost( + &mvp_full, &mbmi_ext->ref_mvs[ref_frame][0].as_mv, + x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); b_rate += cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]] [INTER_OFFSET(NEWMV)]; - if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) - continue; + if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd) continue; - cpi->find_fractional_mv_step(x, &tmp_mv, - &mbmi_ext->ref_mvs[ref_frame][0].as_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &dummy_dist, - &x->pred_sse[ref_frame], NULL, 0, 0); + cpi->find_fractional_mv_step( + x, &tmp_mv, &mbmi_ext->ref_mvs[ref_frame][0].as_mv, + cpi->common.allow_high_precision_mv, x->errorperbit, + &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop, + cpi->sf.mv.subpel_iters_per_step, + cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost, + &dummy_dist, &x->pred_sse[ref_frame], NULL, 0, 0); xd->mi[0]->bmi[i].as_mv[0].as_mv = tmp_mv; } else { @@ -2084,28 +2234,22 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_highbd_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride, - pd->dst.buf, pd->dst.stride, - &xd->mi[0]->bmi[i].as_mv[0].as_mv, - &xd->block_refs[0]->sf, - 4 * num_4x4_blocks_wide, - 4 * num_4x4_blocks_high, 0, - vp9_filter_kernels[mi->interp_filter], - MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * (i & 0x01), - mi_row * MI_SIZE + 4 * (i >> 1), xd->bd); + vp9_highbd_build_inter_predictor( + pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride, + &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf, + 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0, + vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i & 0x01), + mi_row * MI_SIZE + 4 * (i >> 1), xd->bd); } else { #endif - vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride, - pd->dst.buf, pd->dst.stride, - &xd->mi[0]->bmi[i].as_mv[0].as_mv, - &xd->block_refs[0]->sf, - 4 * num_4x4_blocks_wide, - 4 * num_4x4_blocks_high, 0, - vp9_filter_kernels[mi->interp_filter], - MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * (i & 0x01), - mi_row * MI_SIZE + 4 * (i >> 1)); + vp9_build_inter_predictor( + pd->pre[0].buf, pd->pre[0].stride, pd->dst.buf, pd->dst.stride, + &xd->mi[0]->bmi[i].as_mv[0].as_mv, &xd->block_refs[0]->sf, + 4 * num_4x4_blocks_wide, 4 * num_4x4_blocks_high, 0, + vp9_filter_kernels[mi->interp_filter], MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i & 0x01), + mi_row * MI_SIZE + 4 * (i >> 1)); #if CONFIG_VP9_HIGHBITDEPTH } @@ -2115,8 +2259,8 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, &var_y, &sse_y); this_rdc.rate += b_rate; - this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, - this_rdc.rate, this_rdc.dist); + this_rdc.rdcost = + RDCOST(x->rdmult, x->rddiv, this_rdc.rate, this_rdc.dist); if (this_rdc.rdcost < b_best_rd) { b_best_rd = this_rdc.rdcost; bsi[ref_frame][i].as_mode = this_mode; @@ -2131,10 +2275,8 @@ void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, this_rd += b_best_rd; xd->mi[0]->bmi[i] = bsi[ref_frame][i]; - if (num_4x4_blocks_wide > 1) - xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i]; - if (num_4x4_blocks_high > 1) - xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i]; + if (num_4x4_blocks_wide > 1) xd->mi[0]->bmi[i + 1] = xd->mi[0]->bmi[i]; + if (num_4x4_blocks_high > 1) xd->mi[0]->bmi[i + 2] = xd->mi[0]->bmi[i]; } } // loop through sub8x8 blocks diff --git a/libs/libvpx/vp9/encoder/vp9_pickmode.h b/libs/libvpx/vp9/encoder/vp9_pickmode.h index a43bb81260..9aa00c4fab 100644 --- a/libs/libvpx/vp9/encoder/vp9_pickmode.h +++ b/libs/libvpx/vp9/encoder/vp9_pickmode.h @@ -20,15 +20,12 @@ extern "C" { void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx); -void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, - TileDataEnc *tile_data, +void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, TileDataEnc *tile_data, int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx); + BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx); -void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, - int mi_row, int mi_col, RD_COST *rd_cost, - BLOCK_SIZE bsize, +void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x, int mi_row, + int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/encoder/vp9_quantize.c b/libs/libvpx/vp9/encoder/vp9_quantize.c index 91f877ed7e..de96c6e068 100644 --- a/libs/libvpx/vp9/encoder/vp9_quantize.c +++ b/libs/libvpx/vp9/encoder/vp9_quantize.c @@ -21,13 +21,12 @@ #include "vp9/encoder/vp9_rd.h" void vp9_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan, + const int16_t *iscan) { int i, eob = -1; // TODO(jingning) Decide the need of these arguments after the // quantization process is completed. @@ -53,27 +52,21 @@ void vp9_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (tmp) - eob = i; + if (tmp) eob = i; } } *eob_ptr = eob + 1; } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, - intptr_t count, - int skip_block, - const int16_t *zbin_ptr, +void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count, + int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, - const int16_t *iscan) { + tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { int i; int eob = -1; // TODO(jingning) Decide the need of these arguments after the @@ -94,11 +87,10 @@ void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; const int64_t tmp = abs_coeff + round_ptr[rc != 0]; - const uint32_t abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 16); + const int abs_qcoeff = (int)((tmp * quant_ptr[rc != 0]) >> 16); qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (abs_qcoeff) - eob = i; + if (abs_qcoeff) eob = i; } } *eob_ptr = eob + 1; @@ -108,13 +100,11 @@ void vp9_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, // TODO(jingning) Refactor this file and combine functions with similar // operations. void vp9_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { int i, eob = -1; (void)zbin_ptr; @@ -140,25 +130,19 @@ void vp9_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; } - if (tmp) - eob = i; + if (tmp) eob = i; } } *eob_ptr = eob + 1; } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, - intptr_t n_coeffs, int skip_block, - const int16_t *zbin_ptr, - const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { +void vp9_highbd_quantize_fp_32x32_c( + const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, + const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { int i, eob = -1; (void)zbin_ptr; (void)quant_shift_ptr; @@ -176,15 +160,14 @@ void vp9_highbd_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; if (abs_coeff >= (dequant_ptr[rc != 0] >> 2)) { - const int64_t tmp = abs_coeff - + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); - abs_qcoeff = (uint32_t) ((tmp * quant_ptr[rc != 0]) >> 15); + const int64_t tmp = + abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); + abs_qcoeff = (uint32_t)((tmp * quant_ptr[rc != 0]) >> 15); qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; } - if (abs_qcoeff) - eob = i; + if (abs_qcoeff) eob = i; } } *eob_ptr = eob + 1; @@ -199,32 +182,28 @@ void vp9_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), - 16, x->skip_block, + vpx_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin, p->round, p->quant, p->quant_shift, BLOCK_OFFSET(p->qcoeff, block), - BLOCK_OFFSET(pd->dqcoeff, block), - pd->dequant, &p->eobs[block], - scan, iscan); + BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant, + &p->eobs[block], scan, iscan); return; } #endif - vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), - 16, x->skip_block, - p->zbin, p->round, p->quant, p->quant_shift, + vpx_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block, p->zbin, + p->round, p->quant, p->quant_shift, BLOCK_OFFSET(p->qcoeff, block), - BLOCK_OFFSET(pd->dqcoeff, block), - pd->dequant, &p->eobs[block], scan, iscan); + BLOCK_OFFSET(pd->dqcoeff, block), pd->dequant, &p->eobs[block], + scan, iscan); } static void invert_quant(int16_t *quant, int16_t *shift, int d) { unsigned t; - int l; + int l, m; t = d; - for (l = 0; t > 1; l++) - t >>= 1; - t = 1 + (1 << (16 + l)) / d; - *quant = (int16_t)(t - (1 << 16)); + for (l = 0; t > 1; l++) t >>= 1; + m = 1 + (1 << (16 + l)) / d; + *quant = (int16_t)(m - (1 << 16)); *shift = 1 << (16 - l); } @@ -232,18 +211,15 @@ static int get_qzbin_factor(int q, vpx_bit_depth_t bit_depth) { const int quant = vp9_dc_quant(q, 0, bit_depth); #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - return q == 0 ? 64 : (quant < 148 ? 84 : 80); - case VPX_BITS_10: - return q == 0 ? 64 : (quant < 592 ? 84 : 80); - case VPX_BITS_12: - return q == 0 ? 64 : (quant < 2368 ? 84 : 80); + case VPX_BITS_8: return q == 0 ? 64 : (quant < 148 ? 84 : 80); + case VPX_BITS_10: return q == 0 ? 64 : (quant < 592 ? 84 : 80); + case VPX_BITS_12: return q == 0 ? 64 : (quant < 2368 ? 84 : 80); default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; } #else - (void) bit_depth; + (void)bit_depth; return q == 0 ? 64 : (quant < 148 ? 84 : 80); #endif } @@ -259,8 +235,7 @@ void vp9_init_quantizer(VP9_COMP *cpi) { for (i = 0; i < 2; ++i) { int qrounding_factor_fp = i == 0 ? 48 : 42; - if (q == 0) - qrounding_factor_fp = 64; + if (q == 0) qrounding_factor_fp = 64; // y quant = i == 0 ? vp9_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth) @@ -275,8 +250,8 @@ void vp9_init_quantizer(VP9_COMP *cpi) { // uv quant = i == 0 ? vp9_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth) : vp9_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth); - invert_quant(&quants->uv_quant[q][i], - &quants->uv_quant_shift[q][i], quant); + invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i], + quant); quants->uv_quant_fp[q][i] = (1 << 16) / quant; quants->uv_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7; quants->uv_zbin[q][i] = ROUND_POWER_OF_TWO(qzbin_factor * quant, 7); @@ -363,14 +338,11 @@ void vp9_set_quantizer(VP9_COMMON *cm, int q) { // Table that converts 0-63 Q-range values passed in outside to the Qindex // range used internally. static const int quantizer_to_qindex[] = { - 0, 4, 8, 12, 16, 20, 24, 28, - 32, 36, 40, 44, 48, 52, 56, 60, - 64, 68, 72, 76, 80, 84, 88, 92, - 96, 100, 104, 108, 112, 116, 120, 124, - 128, 132, 136, 140, 144, 148, 152, 156, - 160, 164, 168, 172, 176, 180, 184, 188, - 192, 196, 200, 204, 208, 212, 216, 220, - 224, 228, 232, 236, 240, 244, 249, 255, + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, + 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, + 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, + 156, 160, 164, 168, 172, 176, 180, 184, 188, 192, 196, 200, 204, + 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255, }; int vp9_quantizer_to_qindex(int quantizer) { @@ -381,8 +353,7 @@ int vp9_qindex_to_quantizer(int qindex) { int quantizer; for (quantizer = 0; quantizer < 64; ++quantizer) - if (quantizer_to_qindex[quantizer] >= qindex) - return quantizer; + if (quantizer_to_qindex[quantizer] >= qindex) return quantizer; return 63; } diff --git a/libs/libvpx/vp9/encoder/vp9_ratectrl.c b/libs/libvpx/vp9/encoder/vp9_ratectrl.c index 5df2909cca..93eddd655a 100644 --- a/libs/libvpx/vp9/encoder/vp9_ratectrl.c +++ b/libs/libvpx/vp9/encoder/vp9_ratectrl.c @@ -46,29 +46,24 @@ #define FRAME_OVERHEAD_BITS 200 #if CONFIG_VP9_HIGHBITDEPTH -#define ASSIGN_MINQ_TABLE(bit_depth, name) \ - do { \ - switch (bit_depth) { \ - case VPX_BITS_8: \ - name = name##_8; \ - break; \ - case VPX_BITS_10: \ - name = name##_10; \ - break; \ - case VPX_BITS_12: \ - name = name##_12; \ - break; \ - default: \ - assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10" \ - " or VPX_BITS_12"); \ - name = NULL; \ - } \ +#define ASSIGN_MINQ_TABLE(bit_depth, name) \ + do { \ + switch (bit_depth) { \ + case VPX_BITS_8: name = name##_8; break; \ + case VPX_BITS_10: name = name##_10; break; \ + case VPX_BITS_12: name = name##_12; break; \ + default: \ + assert(0 && \ + "bit_depth should be VPX_BITS_8, VPX_BITS_10" \ + " or VPX_BITS_12"); \ + name = NULL; \ + } \ } while (0) #else #define ASSIGN_MINQ_TABLE(bit_depth, name) \ - do { \ - (void) bit_depth; \ - name = name##_8; \ + do { \ + (void)bit_depth; \ + name = name##_8; \ } while (0) #endif @@ -107,25 +102,22 @@ static int kf_low = 400; static int get_minq_index(double maxq, double x3, double x2, double x1, vpx_bit_depth_t bit_depth) { int i; - const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, - maxq); + const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq); // Special case handling to deal with the step from q2.0 // down to lossless mode represented by q 1.0. - if (minqtarget <= 2.0) - return 0; + if (minqtarget <= 2.0) return 0; for (i = 0; i < QINDEX_RANGE; i++) { - if (minqtarget <= vp9_convert_qindex_to_q(i, bit_depth)) - return i; + if (minqtarget <= vp9_convert_qindex_to_q(i, bit_depth)) return i; } return QINDEX_RANGE - 1; } -static void init_minq_luts(int *kf_low_m, int *kf_high_m, - int *arfgf_low, int *arfgf_high, - int *inter, int *rtc, vpx_bit_depth_t bit_depth) { +static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low, + int *arfgf_high, int *inter, int *rtc, + vpx_bit_depth_t bit_depth) { int i; for (i = 0; i < QINDEX_RANGE; i++) { const double maxq = vp9_convert_qindex_to_q(i, bit_depth); @@ -133,7 +125,7 @@ static void init_minq_luts(int *kf_low_m, int *kf_high_m, kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth); arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth); arfgf_high[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth); - inter[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.90, bit_depth); + inter[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.70, bit_depth); rtc[i] = get_minq_index(maxq, 0.00000271, -0.00113, 0.70, bit_depth); } } @@ -156,15 +148,12 @@ void vp9_rc_init_minq_luts(void) { // quantizer tables easier. If necessary they can be replaced by lookup // tables if and when things settle down in the experimental bitstream double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) { - // Convert the index to a real Q value (scaled down to match old Q values) +// Convert the index to a real Q value (scaled down to match old Q values) #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - return vp9_ac_quant(qindex, 0, bit_depth) / 4.0; - case VPX_BITS_10: - return vp9_ac_quant(qindex, 0, bit_depth) / 16.0; - case VPX_BITS_12: - return vp9_ac_quant(qindex, 0, bit_depth) / 64.0; + case VPX_BITS_8: return vp9_ac_quant(qindex, 0, bit_depth) / 4.0; + case VPX_BITS_10: return vp9_ac_quant(qindex, 0, bit_depth) / 16.0; + case VPX_BITS_12: return vp9_ac_quant(qindex, 0, bit_depth) / 64.0; default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1.0; @@ -175,8 +164,7 @@ double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth) { } int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, - double correction_factor, - vpx_bit_depth_t bit_depth) { + double correction_factor, vpx_bit_depth_t bit_depth) { const double q = vp9_convert_qindex_to_q(qindex, bit_depth); int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000; @@ -191,8 +179,8 @@ int vp9_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex, int vp9_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, double correction_factor, vpx_bit_depth_t bit_depth) { - const int bpm = (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor, - bit_depth)); + const int bpm = + (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth)); return VPXMAX(FRAME_OVERHEAD_BITS, (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); } @@ -200,10 +188,9 @@ int vp9_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { const RATE_CONTROL *rc = &cpi->rc; const VP9EncoderConfig *oxcf = &cpi->oxcf; - const int min_frame_target = VPXMAX(rc->min_frame_bandwidth, - rc->avg_frame_bandwidth >> 5); - if (target < min_frame_target) - target = min_frame_target; + const int min_frame_target = + VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5); + if (target < min_frame_target) target = min_frame_target; if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) { // If there is an active ARF at this location use the minimum // bits on this frame even if it is a constructed arf. @@ -212,11 +199,10 @@ int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { target = min_frame_target; } // Clip the frame target to the maximum allowed value. - if (target > rc->max_frame_bandwidth) - target = rc->max_frame_bandwidth; + if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth; if (oxcf->rc_max_inter_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_inter_bitrate_pct / 100; + const int max_rate = + rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; target = VPXMIN(target, max_rate); } return target; @@ -226,12 +212,11 @@ int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) { const RATE_CONTROL *rc = &cpi->rc; const VP9EncoderConfig *oxcf = &cpi->oxcf; if (oxcf->rc_max_intra_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_intra_bitrate_pct / 100; + const int max_rate = + rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100; target = VPXMIN(target, max_rate); } - if (target > rc->max_frame_bandwidth) - target = rc->max_frame_bandwidth; + if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth; return target; } @@ -240,14 +225,13 @@ int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) { static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) { int i = 0; int current_temporal_layer = svc->temporal_layer_id; - for (i = current_temporal_layer + 1; - i < svc->number_temporal_layers; ++i) { - const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, i, - svc->number_temporal_layers); + for (i = current_temporal_layer + 1; i < svc->number_temporal_layers; ++i) { + const int layer = + LAYER_IDS_TO_IDX(svc->spatial_layer_id, i, svc->number_temporal_layers); LAYER_CONTEXT *lc = &svc->layer_context[layer]; RATE_CONTROL *lrc = &lc->rc; - int bits_off_for_this_layer = (int)(lc->target_bandwidth / lc->framerate - - encoded_frame_size); + int bits_off_for_this_layer = + (int)(lc->target_bandwidth / lc->framerate - encoded_frame_size); lrc->bits_off_target += bits_off_for_this_layer; // Clip buffer level to maximum buffer size for the layer. @@ -285,8 +269,8 @@ static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) { } } -int vp9_rc_get_default_min_gf_interval( - int width, int height, double framerate) { +int vp9_rc_get_default_min_gf_interval(int width, int height, + double framerate) { // Assume we do not need any constraint lower than 4K 20 fps static const double factor_safe = 3840 * 2160 * 20.0; const double factor = width * height * framerate; @@ -317,27 +301,36 @@ void vp9_rc_init(const VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) { rc->avg_frame_qindex[KEY_FRAME] = oxcf->worst_allowed_q; rc->avg_frame_qindex[INTER_FRAME] = oxcf->worst_allowed_q; } else { - rc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; - rc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; + rc->avg_frame_qindex[KEY_FRAME] = + (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2; + rc->avg_frame_qindex[INTER_FRAME] = + (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2; } rc->last_q[KEY_FRAME] = oxcf->best_allowed_q; rc->last_q[INTER_FRAME] = oxcf->worst_allowed_q; - rc->buffer_level = rc->starting_buffer_level; + rc->buffer_level = rc->starting_buffer_level; rc->bits_off_target = rc->starting_buffer_level; - rc->rolling_target_bits = rc->avg_frame_bandwidth; - rc->rolling_actual_bits = rc->avg_frame_bandwidth; + rc->rolling_target_bits = rc->avg_frame_bandwidth; + rc->rolling_actual_bits = rc->avg_frame_bandwidth; rc->long_rolling_target_bits = rc->avg_frame_bandwidth; rc->long_rolling_actual_bits = rc->avg_frame_bandwidth; rc->total_actual_bits = 0; rc->total_target_bits = 0; rc->total_target_vs_actual = 0; - + rc->avg_frame_low_motion = 0; + rc->count_last_scene_change = 0; + rc->af_ratio_onepass_vbr = 10; + rc->prev_avg_source_sad_lag = 0; + rc->high_source_sad = 0; + rc->high_source_sad_lagindex = -1; + rc->fac_active_worst_inter = 150; + rc->fac_active_worst_gf = 100; + rc->force_qpmin = 0; + for (i = 0; i < MAX_LAG_BUFFERS; ++i) rc->avg_source_sad[i] = 0; rc->frames_since_key = 8; // Sensible default for first frame. rc->this_key_frame_forced = 0; rc->next_key_frame_forced = 0; @@ -381,13 +374,11 @@ int vp9_rc_drop_frame(VP9_COMP *cpi) { } else { // If buffer is below drop_mark, for now just drop every other frame // (starting with the next frame) until it increases back over drop_mark. - int drop_mark = (int)(oxcf->drop_frames_water_mark * - rc->optimal_buffer_level / 100); - if ((rc->buffer_level > drop_mark) && - (rc->decimation_factor > 0)) { + int drop_mark = + (int)(oxcf->drop_frames_water_mark * rc->optimal_buffer_level / 100); + if ((rc->buffer_level > drop_mark) && (rc->decimation_factor > 0)) { --rc->decimation_factor; - } else if (rc->buffer_level <= drop_mark && - rc->decimation_factor == 0) { + } else if (rc->buffer_level <= drop_mark && rc->decimation_factor == 0) { rc->decimation_factor = 1; } if (rc->decimation_factor > 0) { @@ -414,7 +405,7 @@ static double get_rate_correction_factor(const VP9_COMP *cpi) { rcf = rc->rate_correction_factors[KF_STD]; } else if (cpi->oxcf.pass == 2) { RATE_FACTOR_LEVEL rf_lvl = - cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; + cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; rcf = rc->rate_correction_factors[rf_lvl]; } else { if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && @@ -440,7 +431,7 @@ static void set_rate_correction_factor(VP9_COMP *cpi, double factor) { rc->rate_correction_factors[KF_STD] = factor; } else if (cpi->oxcf.pass == 2) { RATE_FACTOR_LEVEL rf_lvl = - cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; + cpi->twopass.gf_group.rf_level[cpi->twopass.gf_group.index]; rc->rate_correction_factors[rf_lvl] = factor; } else { if ((cpi->refresh_alt_ref_frame || cpi->refresh_golden_frame) && @@ -461,8 +452,7 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) { int projected_size_based_on_q = 0; // Do not update the rate factors for arf overlay frames. - if (cpi->rc.is_src_frame_alt_ref) - return; + if (cpi->rc.is_src_frame_alt_ref) return; // Clear down mmx registers to allow floating point in what follows vpx_clear_system_state(); @@ -474,21 +464,19 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) { projected_size_based_on_q = vp9_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor); } else { - projected_size_based_on_q = vp9_estimate_bits_at_q(cpi->common.frame_type, - cm->base_qindex, - cm->MBs, - rate_correction_factor, - cm->bit_depth); + projected_size_based_on_q = + vp9_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex, cm->MBs, + rate_correction_factor, cm->bit_depth); } // Work out a size correction factor. if (projected_size_based_on_q > FRAME_OVERHEAD_BITS) correction_factor = (int)((100 * (int64_t)cpi->rc.projected_frame_size) / - projected_size_based_on_q); + projected_size_based_on_q); // More heavily damped adjustment used if we have been oscillating either side // of target. - adjustment_limit = 0.25 + - 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor))); + adjustment_limit = + 0.25 + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor))); cpi->rc.q_2_frame = cpi->rc.q_1_frame; cpi->rc.q_1_frame = cm->base_qindex; @@ -508,16 +496,16 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) { if (correction_factor > 102) { // We are not already at the worst allowable quality - correction_factor = (int)(100 + ((correction_factor - 100) * - adjustment_limit)); + correction_factor = + (int)(100 + ((correction_factor - 100) * adjustment_limit)); rate_correction_factor = (rate_correction_factor * correction_factor) / 100; // Keep rate_correction_factor within limits if (rate_correction_factor > MAX_BPB_FACTOR) rate_correction_factor = MAX_BPB_FACTOR; } else if (correction_factor < 99) { // We are not already at the best allowable quality - correction_factor = (int)(100 - ((100 - correction_factor) * - adjustment_limit)); + correction_factor = + (int)(100 - ((100 - correction_factor) * adjustment_limit)); rate_correction_factor = (rate_correction_factor * correction_factor) / 100; // Keep rate_correction_factor within limits @@ -528,7 +516,6 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) { set_rate_correction_factor(cpi, rate_correction_factor); } - int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame, int active_best_quality, int active_worst_quality) { const VP9_COMMON *const cm = &cpi->common; @@ -540,20 +527,18 @@ int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame, // Calculate required scaling factor based on target frame size and size of // frame produced using previous Q. target_bits_per_mb = - ((uint64_t)target_bits_per_frame << BPER_MB_NORMBITS) / cm->MBs; + (int)(((uint64_t)target_bits_per_frame << BPER_MB_NORMBITS) / cm->MBs); i = active_best_quality; do { - if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cm->seg.enabled && + if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled && cpi->svc.temporal_layer_id == 0) { bits_per_mb_at_this_q = (int)vp9_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor); } else { - bits_per_mb_at_this_q = (int)vp9_rc_bits_per_mb(cm->frame_type, i, - correction_factor, - cm->bit_depth); + bits_per_mb_at_this_q = (int)vp9_rc_bits_per_mb( + cm->frame_type, i, correction_factor, cm->bit_depth); } if (bits_per_mb_at_this_q <= target_bits_per_mb) { @@ -620,16 +605,20 @@ static int calc_active_worst_quality_one_pass_vbr(const VP9_COMP *cpi) { int active_worst_quality; if (cpi->common.frame_type == KEY_FRAME) { - active_worst_quality = curr_frame == 0 ? rc->worst_quality - : rc->last_q[KEY_FRAME] * 2; + active_worst_quality = + curr_frame == 0 ? rc->worst_quality : rc->last_q[KEY_FRAME] << 1; } else { if (!rc->is_src_frame_alt_ref && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 5 / 4 - : rc->last_q[INTER_FRAME]; + active_worst_quality = + curr_frame == 1 + ? rc->last_q[KEY_FRAME] * 5 >> 2 + : rc->last_q[INTER_FRAME] * rc->fac_active_worst_gf / 100; } else { - active_worst_quality = curr_frame == 1 ? rc->last_q[KEY_FRAME] * 2 - : rc->last_q[INTER_FRAME] * 2; + active_worst_quality = curr_frame == 1 + ? rc->last_q[KEY_FRAME] << 1 + : rc->avg_frame_qindex[INTER_FRAME] * + rc->fac_active_worst_inter / 100; } } return VPXMIN(active_worst_quality, rc->worst_quality); @@ -651,28 +640,27 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) { int active_worst_quality; int ambient_qp; unsigned int num_frames_weight_key = 5 * cpi->svc.number_temporal_layers; - if (cm->frame_type == KEY_FRAME) - return rc->worst_quality; + if (cm->frame_type == KEY_FRAME) return rc->worst_quality; // For ambient_qp we use minimum of avg_frame_qindex[KEY_FRAME/INTER_FRAME] // for the first few frames following key frame. These are both initialized // to worst_quality and updated with (3/4, 1/4) average in postencode_update. // So for first few frames following key, the qp of that key frame is weighted // into the active_worst_quality setting. - ambient_qp = (cm->current_video_frame < num_frames_weight_key) ? - VPXMIN(rc->avg_frame_qindex[INTER_FRAME], - rc->avg_frame_qindex[KEY_FRAME]) : - rc->avg_frame_qindex[INTER_FRAME]; - active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4); + ambient_qp = (cm->current_video_frame < num_frames_weight_key) + ? VPXMIN(rc->avg_frame_qindex[INTER_FRAME], + rc->avg_frame_qindex[KEY_FRAME]) + : rc->avg_frame_qindex[INTER_FRAME]; + active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 >> 2); if (rc->buffer_level > rc->optimal_buffer_level) { // Adjust down. // Maximum limit for down adjustment, ~30%. int max_adjustment_down = active_worst_quality / 3; if (max_adjustment_down) { - buff_lvl_step = ((rc->maximum_buffer_size - - rc->optimal_buffer_level) / max_adjustment_down); + buff_lvl_step = ((rc->maximum_buffer_size - rc->optimal_buffer_level) / + max_adjustment_down); if (buff_lvl_step) adjustment = (int)((rc->buffer_level - rc->optimal_buffer_level) / - buff_lvl_step); + buff_lvl_step); active_worst_quality -= adjustment; } } else if (rc->buffer_level > critical_level) { @@ -712,18 +700,16 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, if (rc->this_key_frame_forced) { int qindex = rc->last_boosted_qindex; double last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, - (last_boosted_q * 0.75), - cm->bit_depth); + int delta_qindex = vp9_compute_qdelta( + rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth); active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else if (cm->current_video_frame > 0) { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; double q_val; - active_best_quality = - get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME], - cm->bit_depth); + active_best_quality = get_kf_active_quality( + rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth); // Allow somewhat lower kf minq with small image formats. if ((cm->width * cm->height) <= (352 * 288)) { @@ -733,12 +719,10 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, // Convert the adjustment factor to a qindex delta // on active_best_quality. q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp9_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); + active_best_quality += + vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth); } - } else if (!rc->is_src_frame_alt_ref && - !cpi->use_svc && + } else if (!rc->is_src_frame_alt_ref && !cpi->use_svc && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { // Use the lower of active_worst_quality and recent // average Q as basis for GF/ARF best Q limit unless last frame was @@ -766,24 +750,22 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, } // Clip the active best and worst quality values to limits - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); + active_best_quality = + clamp(active_best_quality, rc->best_quality, rc->worst_quality); + active_worst_quality = + clamp(active_worst_quality, active_best_quality, rc->worst_quality); *top_index = active_worst_quality; *bottom_index = active_best_quality; #if LIMIT_QRANGE_FOR_ALTREF_AND_KEY // Limit Q range for the adaptive loop. - if (cm->frame_type == KEY_FRAME && - !rc->this_key_frame_forced && + if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced && !(cm->current_video_frame == 0)) { int qdelta = 0; vpx_clear_system_state(); - qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 2.0, - cm->bit_depth); + qdelta = vp9_compute_qdelta_by_rate( + &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth); *top_index = active_worst_quality + qdelta; *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index; } @@ -793,8 +775,8 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) { q = rc->last_boosted_qindex; } else { - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality, + active_worst_quality); if (q > *top_index) { // Special case when we are targeting the max allowed rate if (rc->this_frame_target >= rc->max_frame_bandwidth) @@ -803,20 +785,18 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, q = *top_index; } } - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); + assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality); assert(*bottom_index <= rc->worst_quality && *bottom_index >= rc->best_quality); assert(q <= rc->worst_quality && q >= rc->best_quality); return q; } -static int get_active_cq_level(const RATE_CONTROL *rc, - const VP9EncoderConfig *const oxcf) { +static int get_active_cq_level_one_pass(const RATE_CONTROL *rc, + const VP9EncoderConfig *const oxcf) { static const double cq_adjust_threshold = 0.1; int active_cq_level = oxcf->cq_level; - if (oxcf->rc_mode == VPX_CQ && - rc->total_target_bits > 0) { + if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) { const double x = (double)rc->total_actual_bits / rc->total_target_bits; if (x < cq_adjust_threshold) { active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold); @@ -825,13 +805,36 @@ static int get_active_cq_level(const RATE_CONTROL *rc, return active_cq_level; } +#define SMOOTH_PCT_MIN 0.1 +#define SMOOTH_PCT_DIV 0.05 +static int get_active_cq_level_two_pass(const TWO_PASS *twopass, + const RATE_CONTROL *rc, + const VP9EncoderConfig *const oxcf) { + static const double cq_adjust_threshold = 0.1; + int active_cq_level = oxcf->cq_level; + if (oxcf->rc_mode == VPX_CQ) { + if (twopass->mb_smooth_pct > SMOOTH_PCT_MIN) { + active_cq_level -= + (int)((twopass->mb_smooth_pct - SMOOTH_PCT_MIN) / SMOOTH_PCT_DIV); + active_cq_level = VPXMAX(active_cq_level, 0); + } + if (rc->total_target_bits > 0) { + const double x = (double)rc->total_actual_bits / rc->total_target_bits; + if (x < cq_adjust_threshold) { + active_cq_level = (int)(active_cq_level * x / cq_adjust_threshold); + } + } + } + return active_cq_level; +} + static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, int *bottom_index, int *top_index) { const VP9_COMMON *const cm = &cpi->common; const RATE_CONTROL *const rc = &cpi->rc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; - const int cq_level = get_active_cq_level(rc, oxcf); + const int cq_level = get_active_cq_level_one_pass(rc, oxcf); int active_best_quality; int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi); int q; @@ -842,8 +845,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, if (oxcf->rc_mode == VPX_Q) { int qindex = cq_level; double q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp9_compute_qdelta(rc, q, q * 0.25, - cm->bit_depth); + int delta_qindex = vp9_compute_qdelta(rc, q, q * 0.25, cm->bit_depth); active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else if (rc->this_key_frame_forced) { // Handle the special case for key frames forced when we have reached @@ -851,18 +853,16 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, // based on the ambient Q to reduce the risk of popping. int qindex = rc->last_boosted_qindex; double last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); - int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 0.75, - cm->bit_depth); + int delta_qindex = vp9_compute_qdelta( + rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth); active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; double q_val; - active_best_quality = - get_kf_active_quality(rc, rc->avg_frame_qindex[KEY_FRAME], - cm->bit_depth); + active_best_quality = get_kf_active_quality( + rc, rc->avg_frame_qindex[KEY_FRAME], cm->bit_depth); // Allow somewhat lower kf minq with small image formats. if ((cm->width * cm->height) <= (352 * 288)) { @@ -872,25 +872,26 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, // Convert the adjustment factor to a qindex delta // on active_best_quality. q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp9_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); + active_best_quality += + vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth); } } else if (!rc->is_src_frame_alt_ref && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { // Use the lower of active_worst_quality and recent // average Q as basis for GF/ARF best Q limit unless last frame was // a key frame. - if (rc->frames_since_key > 1 && - rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { - q = rc->avg_frame_qindex[INTER_FRAME]; + if (rc->frames_since_key > 1) { + if (rc->avg_frame_qindex[INTER_FRAME] < active_worst_quality) { + q = rc->avg_frame_qindex[INTER_FRAME]; + } else { + q = active_worst_quality; + } } else { q = rc->avg_frame_qindex[KEY_FRAME]; } // For constrained quality dont allow Q less than the cq level if (oxcf->rc_mode == VPX_CQ) { - if (q < cq_level) - q = cq_level; + if (q < cq_level) q = cq_level; active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); @@ -913,33 +914,34 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, if (oxcf->rc_mode == VPX_Q) { int qindex = cq_level; double q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); - double delta_rate[FIXED_GF_INTERVAL] = - {0.50, 1.0, 0.85, 1.0, 0.70, 1.0, 0.85, 1.0}; - int delta_qindex = - vp9_compute_qdelta(rc, q, - q * delta_rate[cm->current_video_frame % - FIXED_GF_INTERVAL], cm->bit_depth); + double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0, + 0.70, 1.0, 0.85, 1.0 }; + int delta_qindex = vp9_compute_qdelta( + rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL], + cm->bit_depth); active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else { - // Use the lower of active_worst_quality and recent/average Q. - if (cm->current_video_frame > 1) - active_best_quality = inter_minq[rc->avg_frame_qindex[INTER_FRAME]]; - else + // Use the min of the average Q and active_worst_quality as basis for + // active_best. + if (cm->current_video_frame > 1) { + q = VPXMIN(rc->avg_frame_qindex[INTER_FRAME], active_worst_quality); + active_best_quality = inter_minq[q]; + } else { active_best_quality = inter_minq[rc->avg_frame_qindex[KEY_FRAME]]; + } // For the constrained quality mode we don't want // q to fall below the cq level. - if ((oxcf->rc_mode == VPX_CQ) && - (active_best_quality < cq_level)) { + if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) { active_best_quality = cq_level; } } } // Clip the active best and worst quality values to limits - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); + active_best_quality = + clamp(active_best_quality, rc->best_quality, rc->worst_quality); + active_worst_quality = + clamp(active_worst_quality, active_best_quality, rc->worst_quality); *top_index = active_worst_quality; *bottom_index = active_best_quality; @@ -950,17 +952,14 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, vpx_clear_system_state(); // Limit Q range for the adaptive loop. - if (cm->frame_type == KEY_FRAME && - !rc->this_key_frame_forced && + if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced && !(cm->current_video_frame == 0)) { - qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 2.0, - cm->bit_depth); + qdelta = vp9_compute_qdelta_by_rate( + &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth); } else if (!rc->is_src_frame_alt_ref && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { - qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, - active_worst_quality, 1.75, - cm->bit_depth); + qdelta = vp9_compute_qdelta_by_rate( + &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth); } *top_index = active_worst_quality + qdelta; *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index; @@ -969,12 +968,12 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, if (oxcf->rc_mode == VPX_Q) { q = active_best_quality; - // Special case code to try and match quality with forced key frames + // Special case code to try and match quality with forced key frames } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) { q = rc->last_boosted_qindex; } else { - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality, + active_worst_quality); if (q > *top_index) { // Special case when we are targeting the max allowed rate if (rc->this_frame_target >= rc->max_frame_bandwidth) @@ -984,8 +983,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, } } - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); + assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality); assert(*bottom_index <= rc->worst_quality && *bottom_index >= rc->best_quality); assert(q <= rc->worst_quality && q >= rc->best_quality); @@ -1000,24 +998,24 @@ int vp9_frame_type_qdelta(const VP9_COMP *cpi, int rf_level, int q) { 1.75, // GF_ARF_STD 2.00, // KF_STD }; - static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = - {INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME}; + static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = { + INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME + }; const VP9_COMMON *const cm = &cpi->common; - int qdelta = vp9_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], - q, rate_factor_deltas[rf_level], - cm->bit_depth); + int qdelta = + vp9_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q, + rate_factor_deltas[rf_level], cm->bit_depth); return qdelta; } #define STATIC_MOTION_THRESH 95 -static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, - int *bottom_index, +static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int *bottom_index, int *top_index) { const VP9_COMMON *const cm = &cpi->common; const RATE_CONTROL *const rc = &cpi->rc; const VP9EncoderConfig *const oxcf = &cpi->oxcf; const GF_GROUP *gf_group = &cpi->twopass.gf_group; - const int cq_level = get_active_cq_level(rc, oxcf); + const int cq_level = get_active_cq_level_two_pass(&cpi->twopass, rc, oxcf); int active_best_quality; int active_worst_quality = cpi->twopass.active_worst_quality; int q; @@ -1038,16 +1036,14 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, active_best_quality = qindex; last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 1.25, - cm->bit_depth); + last_boosted_q * 1.25, cm->bit_depth); active_worst_quality = VPXMIN(qindex + delta_qindex, active_worst_quality); } else { qindex = rc->last_boosted_qindex; last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, - last_boosted_q * 0.75, - cm->bit_depth); + last_boosted_q * 0.75, cm->bit_depth); active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } } else { @@ -1055,8 +1051,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, double q_adj_factor = 1.0; double q_val; // Baseline value derived from cpi->active_worst_quality and kf boost. - active_best_quality = get_kf_active_quality(rc, active_worst_quality, - cm->bit_depth); + active_best_quality = + get_kf_active_quality(rc, active_worst_quality, cm->bit_depth); // Allow somewhat lower kf minq with small image formats. if ((cm->width * cm->height) <= (352 * 288)) { @@ -1069,9 +1065,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, // Convert the adjustment factor to a qindex delta // on active_best_quality. q_val = vp9_convert_qindex_to_q(active_best_quality, cm->bit_depth); - active_best_quality += vp9_compute_qdelta(rc, q_val, - q_val * q_adj_factor, - cm->bit_depth); + active_best_quality += + vp9_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth); } } else if (!rc->is_src_frame_alt_ref && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) { @@ -1086,8 +1081,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, } // For constrained quality dont allow Q less than the cq level if (oxcf->rc_mode == VPX_CQ) { - if (q < cq_level) - q = cq_level; + if (q < cq_level) q = cq_level; active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth); @@ -1116,8 +1110,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, // For the constrained quality mode we don't want // q to fall below the cq level. - if ((oxcf->rc_mode == VPX_CQ) && - (active_best_quality < cq_level)) { + if ((oxcf->rc_mode == VPX_CQ) && (active_best_quality < cq_level)) { active_best_quality = cq_level; } } @@ -1125,17 +1118,16 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, // Extension to max or min Q if undershoot or overshoot is outside // the permitted range. - if ((cpi->oxcf.rc_mode != VPX_Q) && - (cpi->twopass.gf_zeromotion_pct < VLOW_MOTION_THRESHOLD)) { + if (cpi->oxcf.rc_mode != VPX_Q) { if (frame_is_intra_only(cm) || (!rc->is_src_frame_alt_ref && (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) { active_best_quality -= - (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast); + (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast); active_worst_quality += (cpi->twopass.extend_maxq / 2); } else { active_best_quality -= - (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2; + (cpi->twopass.extend_minq + cpi->twopass.extend_minq_fast) / 2; active_worst_quality += cpi->twopass.extend_maxq; } } @@ -1148,28 +1140,27 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) { int qdelta = vp9_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index], active_worst_quality); - active_worst_quality = VPXMAX(active_worst_quality + qdelta, - active_best_quality); + active_worst_quality = + VPXMAX(active_worst_quality + qdelta, active_best_quality); } #endif // Modify active_best_quality for downscaled normal frames. if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) { - int qdelta = vp9_compute_qdelta_by_rate(rc, cm->frame_type, - active_best_quality, 2.0, - cm->bit_depth); + int qdelta = vp9_compute_qdelta_by_rate( + rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth); active_best_quality = VPXMAX(active_best_quality + qdelta, rc->best_quality); } - active_best_quality = clamp(active_best_quality, - rc->best_quality, rc->worst_quality); - active_worst_quality = clamp(active_worst_quality, - active_best_quality, rc->worst_quality); + active_best_quality = + clamp(active_best_quality, rc->best_quality, rc->worst_quality); + active_worst_quality = + clamp(active_worst_quality, active_best_quality, rc->worst_quality); if (oxcf->rc_mode == VPX_Q) { q = active_best_quality; - // Special case code to try and match quality with forced key frames. + // Special case code to try and match quality with forced key frames. } else if ((frame_is_intra_only(cm) || vp9_is_upper_layer_key_frame(cpi)) && rc->this_key_frame_forced) { // If static since last kf use better of last boosted and last kf q. @@ -1179,8 +1170,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, q = rc->last_boosted_qindex; } } else { - q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - active_best_quality, active_worst_quality); + q = vp9_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality, + active_worst_quality); if (q > active_worst_quality) { // Special case when we are targeting the max allowed rate. if (rc->this_frame_target >= rc->max_frame_bandwidth) @@ -1194,16 +1185,15 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, *top_index = active_worst_quality; *bottom_index = active_best_quality; - assert(*top_index <= rc->worst_quality && - *top_index >= rc->best_quality); + assert(*top_index <= rc->worst_quality && *top_index >= rc->best_quality); assert(*bottom_index <= rc->worst_quality && *bottom_index >= rc->best_quality); assert(q <= rc->worst_quality && q >= rc->best_quality); return q; } -int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, - int *bottom_index, int *top_index) { +int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, int *bottom_index, + int *top_index) { int q; if (cpi->oxcf.pass == 0) { if (cpi->oxcf.rc_mode == VPX_CBR) @@ -1214,8 +1204,7 @@ int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, q = rc_pick_q_and_bounds_two_pass(cpi, bottom_index, top_index); } if (cpi->sf.use_nonrd_pick_mode) { - if (cpi->sf.force_frame_boost == 1) - q -= cpi->sf.max_delta_qindex; + if (cpi->sf.force_frame_boost == 1) q -= cpi->sf.max_delta_qindex; if (q < *bottom_index) *bottom_index = q; @@ -1225,20 +1214,20 @@ int vp9_rc_pick_q_and_bounds(const VP9_COMP *cpi, return q; } -void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi, - int frame_target, +void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi, int frame_target, int *frame_under_shoot_limit, int *frame_over_shoot_limit) { if (cpi->oxcf.rc_mode == VPX_Q) { *frame_under_shoot_limit = 0; - *frame_over_shoot_limit = INT_MAX; + *frame_over_shoot_limit = INT_MAX; } else { // For very small rate targets where the fractional adjustment // may be tiny make sure there is at least a minimum range. - const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100; - *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0); - *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200, - cpi->rc.max_frame_bandwidth); + const int tol_low = (cpi->sf.recode_tolerance_low * frame_target) / 100; + const int tol_high = (cpi->sf.recode_tolerance_high * frame_target) / 100; + *frame_under_shoot_limit = VPXMAX(frame_target - tol_low - 100, 0); + *frame_over_shoot_limit = + VPXMIN(frame_target + tol_high + 100, cpi->rc.max_frame_bandwidth); } } @@ -1251,12 +1240,12 @@ void vp9_rc_set_frame_target(VP9_COMP *cpi, int target) { // Modify frame size target when down-scaling. if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC && rc->frame_size_selector != UNSCALED) - rc->this_frame_target = (int)(rc->this_frame_target - * rate_thresh_mult[rc->frame_size_selector]); + rc->this_frame_target = (int)(rc->this_frame_target * + rate_thresh_mult[rc->frame_size_selector]); // Target rate per SB64 (including partial SB64s. - rc->sb64_target_rate = ((int64_t)rc->this_frame_target * 64 * 64) / - (cm->width * cm->height); + rc->sb64_target_rate = (int)(((int64_t)rc->this_frame_target * 64 * 64) / + (cm->width * cm->height)); } static void update_alt_ref_frame_stats(VP9_COMP *cpi) { @@ -1290,18 +1279,35 @@ static void update_golden_frame_stats(VP9_COMP *cpi) { } // Decrement count down till next gf - if (rc->frames_till_gf_update_due > 0) - rc->frames_till_gf_update_due--; + if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--; } else if (!cpi->refresh_alt_ref_frame) { // Decrement count down till next gf - if (rc->frames_till_gf_update_due > 0) - rc->frames_till_gf_update_due--; + if (rc->frames_till_gf_update_due > 0) rc->frames_till_gf_update_due--; rc->frames_since_golden++; } } +static void compute_frame_low_motion(VP9_COMP *const cpi) { + VP9_COMMON *const cm = &cpi->common; + int mi_row, mi_col; + MODE_INFO **mi = cm->mi_grid_visible; + RATE_CONTROL *const rc = &cpi->rc; + const int rows = cm->mi_rows, cols = cm->mi_cols; + int cnt_zeromv = 0; + for (mi_row = 0; mi_row < rows; mi_row++) { + for (mi_col = 0; mi_col < cols; mi_col++) { + if (abs(mi[0]->mv[0].as_mv.row) < 16 && abs(mi[0]->mv[0].as_mv.col) < 16) + cnt_zeromv++; + mi++; + } + mi += 8; + } + cnt_zeromv = 100 * cnt_zeromv / (rows * cols); + rc->avg_frame_low_motion = (3 * rc->avg_frame_low_motion + cnt_zeromv) >> 2; +} + void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { const VP9_COMMON *const cm = &cpi->common; const VP9EncoderConfig *const oxcf = &cpi->oxcf; @@ -1341,7 +1347,7 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { !(cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame))) { rc->last_q[INTER_FRAME] = qindex; rc->avg_frame_qindex[INTER_FRAME] = - ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2); + ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2); rc->ni_frames++; rc->tot_q += vp9_convert_qindex_to_q(qindex, cm->bit_depth); rc->avg_q = rc->tot_q / rc->ni_frames; @@ -1357,15 +1363,13 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { // If all mbs in this group are skipped only update if the Q value is // better than that already stored. // This is used to help set quality in forced key frames to reduce popping - if ((qindex < rc->last_boosted_qindex) || - (cm->frame_type == KEY_FRAME) || + if ((qindex < rc->last_boosted_qindex) || (cm->frame_type == KEY_FRAME) || (!rc->constrained_gf_group && (cpi->refresh_alt_ref_frame || (cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) { rc->last_boosted_qindex = qindex; } - if (cm->frame_type == KEY_FRAME) - rc->last_kf_qindex = qindex; + if (cm->frame_type == KEY_FRAME) rc->last_kf_qindex = qindex; update_buffer_level(cpi, rc->projected_frame_size); @@ -1398,8 +1402,7 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { update_golden_frame_stats(cpi); } - if (cm->frame_type == KEY_FRAME) - rc->frames_since_key = 0; + if (cm->frame_type == KEY_FRAME) rc->frames_since_key = 0; if (cm->show_frame) { rc->frames_since_key++; rc->frames_to_key--; @@ -1411,6 +1414,10 @@ void vp9_rc_postencode_update(VP9_COMP *cpi, uint64_t bytes_used) { rc->next_frame_size_selector != rc->frame_size_selector; rc->frame_size_selector = rc->next_frame_size_selector; } + + if (oxcf->pass == 0) { + if (cm->frame_type != KEY_FRAME) compute_frame_low_motion(cpi); + } } void vp9_rc_postencode_update_drop_frame(VP9_COMP *cpi) { @@ -1423,19 +1430,20 @@ void vp9_rc_postencode_update_drop_frame(VP9_COMP *cpi) { } // Use this macro to turn on/off use of alt-refs in one-pass mode. -#define USE_ALTREF_FOR_ONE_PASS 1 +#define USE_ALTREF_FOR_ONE_PASS 1 static int calc_pframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) { - static const int af_ratio = 10; const RATE_CONTROL *const rc = &cpi->rc; int target; + const int af_ratio = rc->af_ratio_onepass_vbr; #if USE_ALTREF_FOR_ONE_PASS - target = (!rc->is_src_frame_alt_ref && - (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) ? - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) / - (rc->baseline_gf_interval + af_ratio - 1) : - (rc->avg_frame_bandwidth * rc->baseline_gf_interval) / - (rc->baseline_gf_interval + af_ratio - 1); + target = + (!rc->is_src_frame_alt_ref && + (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) + ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio) / + (rc->baseline_gf_interval + af_ratio - 1) + : (rc->avg_frame_bandwidth * rc->baseline_gf_interval) / + (rc->baseline_gf_interval + af_ratio - 1); #else target = rc->avg_frame_bandwidth; #endif @@ -1449,19 +1457,36 @@ static int calc_iframe_target_size_one_pass_vbr(const VP9_COMP *const cpi) { return vp9_rc_clamp_iframe_target_size(cpi, target); } +static void adjust_gfint_frame_constraint(VP9_COMP *cpi, int frame_constraint) { + RATE_CONTROL *const rc = &cpi->rc; + rc->constrained_gf_group = 0; + // Reset gf interval to make more equal spacing for frame_constraint. + if ((frame_constraint <= 7 * rc->baseline_gf_interval >> 2) && + (frame_constraint > rc->baseline_gf_interval)) { + rc->baseline_gf_interval = frame_constraint >> 1; + if (rc->baseline_gf_interval < 5) + rc->baseline_gf_interval = frame_constraint; + rc->constrained_gf_group = 1; + } else { + // Reset to keep gf_interval <= frame_constraint. + if (rc->baseline_gf_interval > frame_constraint) { + rc->baseline_gf_interval = frame_constraint; + rc->constrained_gf_group = 1; + } + } +} + void vp9_rc_get_one_pass_vbr_params(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; RATE_CONTROL *const rc = &cpi->rc; int target; // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic. if (!cpi->refresh_alt_ref_frame && - (cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY) || - rc->frames_to_key == 0 || - (cpi->oxcf.auto_key && 0))) { + (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) || + rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) { cm->frame_type = KEY_FRAME; - rc->this_key_frame_forced = cm->current_video_frame != 0 && - rc->frames_to_key == 0; + rc->this_key_frame_forced = + cm->current_video_frame != 0 && rc->frames_to_key == 0; rc->frames_to_key = cpi->oxcf.key_freq; rc->kf_boost = DEFAULT_KF_BOOST; rc->source_alt_ref_active = 0; @@ -1469,24 +1494,46 @@ void vp9_rc_get_one_pass_vbr_params(VP9_COMP *cpi) { cm->frame_type = INTER_FRAME; } if (rc->frames_till_gf_update_due == 0) { - rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2; - rc->frames_till_gf_update_due = rc->baseline_gf_interval; - // NOTE: frames_till_gf_update_due must be <= frames_to_key. - if (rc->frames_till_gf_update_due > rc->frames_to_key) { - rc->frames_till_gf_update_due = rc->frames_to_key; - rc->constrained_gf_group = 1; + double rate_err = 1.0; + rc->gfu_boost = DEFAULT_GF_BOOST; + if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->oxcf.pass == 0) { + vp9_cyclic_refresh_set_golden_update(cpi); } else { - rc->constrained_gf_group = 0; + rc->baseline_gf_interval = + (rc->min_gf_interval + rc->max_gf_interval) / 2; } + rc->af_ratio_onepass_vbr = 10; + if (rc->rolling_target_bits > 0) + rate_err = + (double)rc->rolling_actual_bits / (double)rc->rolling_target_bits; + if (cm->current_video_frame > 30) { + if (rc->avg_frame_qindex[INTER_FRAME] > (7 * rc->worst_quality) >> 3 && + rate_err > 3.5) { + rc->baseline_gf_interval = + VPXMIN(15, (3 * rc->baseline_gf_interval) >> 1); + } else if (rc->avg_frame_low_motion < 20) { + // Decrease gf interval for high motion case. + rc->baseline_gf_interval = VPXMAX(6, rc->baseline_gf_interval >> 1); + } + // Adjust boost and af_ratio based on avg_frame_low_motion, which varies + // between 0 and 100 (stationary, 100% zero/small motion). + rc->gfu_boost = + VPXMAX(500, DEFAULT_GF_BOOST * (rc->avg_frame_low_motion << 1) / + (rc->avg_frame_low_motion + 100)); + rc->af_ratio_onepass_vbr = VPXMIN(15, VPXMAX(5, 3 * rc->gfu_boost / 400)); + } + adjust_gfint_frame_constraint(cpi, rc->frames_to_key); + rc->frames_till_gf_update_due = rc->baseline_gf_interval; cpi->refresh_golden_frame = 1; rc->source_alt_ref_pending = USE_ALTREF_FOR_ONE_PASS; - rc->gfu_boost = DEFAULT_GF_BOOST; } if (cm->frame_type == KEY_FRAME) target = calc_iframe_target_size_one_pass_vbr(cpi); else target = calc_pframe_target_size_one_pass_vbr(cpi); vp9_rc_set_frame_target(cpi, target); + if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->oxcf.pass == 0) + vp9_cyclic_refresh_update_parameters(cpi); } static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { @@ -1501,11 +1548,12 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { if (oxcf->gf_cbr_boost_pct) { const int af_ratio_pct = oxcf->gf_cbr_boost_pct + 100; - target = cpi->refresh_golden_frame ? - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * af_ratio_pct) / - (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) : - (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) / - (rc->baseline_gf_interval * 100 + af_ratio_pct - 100); + target = cpi->refresh_golden_frame + ? (rc->avg_frame_bandwidth * rc->baseline_gf_interval * + af_ratio_pct) / + (rc->baseline_gf_interval * 100 + af_ratio_pct - 100) + : (rc->avg_frame_bandwidth * rc->baseline_gf_interval * 100) / + (rc->baseline_gf_interval * 100 + af_ratio_pct - 100); } else { target = rc->avg_frame_bandwidth; } @@ -1513,9 +1561,8 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { // Note that for layers, avg_frame_bandwidth is the cumulative // per-frame-bandwidth. For the target size of this frame, use the // layer average frame size (i.e., non-cumulative per-frame-bw). - int layer = - LAYER_IDS_TO_IDX(svc->spatial_layer_id, - svc->temporal_layer_id, svc->number_temporal_layers); + int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id, + svc->number_temporal_layers); const LAYER_CONTEXT *lc = &svc->layer_context[layer]; target = lc->avg_frame_size; min_frame_target = VPXMAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); @@ -1531,8 +1578,8 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { target += (target * pct_high) / 200; } if (oxcf->rc_max_inter_bitrate_pct) { - const int max_rate = rc->avg_frame_bandwidth * - oxcf->rc_max_inter_bitrate_pct / 100; + const int max_rate = + rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; target = VPXMIN(target, max_rate); } return VPXMAX(min_frame_target, target); @@ -1545,67 +1592,53 @@ static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { int target; if (cpi->common.current_video_frame == 0) { target = ((rc->starting_buffer_level / 2) > INT_MAX) - ? INT_MAX : (int)(rc->starting_buffer_level / 2); + ? INT_MAX + : (int)(rc->starting_buffer_level / 2); } else { int kf_boost = 32; double framerate = cpi->framerate; - if (svc->number_temporal_layers > 1 && - oxcf->rc_mode == VPX_CBR) { + if (svc->number_temporal_layers > 1 && oxcf->rc_mode == VPX_CBR) { // Use the layer framerate for temporal layers CBR mode. - const int layer = LAYER_IDS_TO_IDX(svc->spatial_layer_id, - svc->temporal_layer_id, svc->number_temporal_layers); + const int layer = + LAYER_IDS_TO_IDX(svc->spatial_layer_id, svc->temporal_layer_id, + svc->number_temporal_layers); const LAYER_CONTEXT *lc = &svc->layer_context[layer]; framerate = lc->framerate; } kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16)); - if (rc->frames_since_key < framerate / 2) { - kf_boost = (int)(kf_boost * rc->frames_since_key / - (framerate / 2)); + if (rc->frames_since_key < framerate / 2) { + kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2)); } target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4; } return vp9_rc_clamp_iframe_target_size(cpi, target); } -// Reset information needed to set proper reference frames and buffer updates -// for temporal layering. This is called when a key frame is encoded. -static void reset_temporal_layer_to_zero(VP9_COMP *cpi) { - int sl; - LAYER_CONTEXT *lc = NULL; - cpi->svc.temporal_layer_id = 0; - - for (sl = 0; sl < cpi->svc.number_spatial_layers; ++sl) { - lc = &cpi->svc.layer_context[sl * cpi->svc.number_temporal_layers]; - lc->current_video_frame_in_layer = 0; - lc->frames_from_key_frame = 0; - } -} - void vp9_rc_get_svc_params(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; RATE_CONTROL *const rc = &cpi->rc; int target = rc->avg_frame_bandwidth; - int layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id, - cpi->svc.temporal_layer_id, cpi->svc.number_temporal_layers); - - if ((cm->current_video_frame == 0) || - (cpi->frame_flags & FRAMEFLAGS_KEY) || - (cpi->oxcf.auto_key && (rc->frames_since_key % - cpi->oxcf.key_freq == 0))) { + int layer = + LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id, cpi->svc.temporal_layer_id, + cpi->svc.number_temporal_layers); + // Periodic key frames is based on the super-frame counter + // (svc.current_superframe), also only base spatial layer is key frame. + if ((cm->current_video_frame == 0) || (cpi->frame_flags & FRAMEFLAGS_KEY) || + (cpi->oxcf.auto_key && + (cpi->svc.current_superframe % cpi->oxcf.key_freq == 0) && + cpi->svc.spatial_layer_id == 0)) { cm->frame_type = KEY_FRAME; rc->source_alt_ref_active = 0; - if (is_two_pass_svc(cpi)) { cpi->svc.layer_context[layer].is_key_frame = 1; - cpi->ref_frame_flags &= - (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG); + cpi->ref_frame_flags &= (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG); } else if (is_one_pass_cbr_svc(cpi)) { - reset_temporal_layer_to_zero(cpi); + if (cm->current_video_frame > 0) vp9_svc_reset_key_frame(cpi); layer = LAYER_IDS_TO_IDX(cpi->svc.spatial_layer_id, - cpi->svc.temporal_layer_id, cpi->svc.number_temporal_layers); + cpi->svc.temporal_layer_id, + cpi->svc.number_temporal_layers); cpi->svc.layer_context[layer].is_key_frame = 1; - cpi->ref_frame_flags &= - (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG); + cpi->ref_frame_flags &= (~VP9_LAST_FLAG & ~VP9_GOLD_FLAG & ~VP9_ALT_FLAG); // Assumption here is that LAST_FRAME is being updated for a keyframe. // Thus no change in update flags. target = calc_iframe_target_size_one_pass_cbr(cpi); @@ -1619,8 +1652,7 @@ void vp9_rc_get_svc_params(VP9_COMP *cpi) { } else { lc->is_key_frame = cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame; - if (lc->is_key_frame) - cpi->ref_frame_flags &= (~VP9_LAST_FLAG); + if (lc->is_key_frame) cpi->ref_frame_flags &= (~VP9_LAST_FLAG); } cpi->ref_frame_flags &= (~VP9_ALT_FLAG); } else if (is_one_pass_cbr_svc(cpi)) { @@ -1650,13 +1682,11 @@ void vp9_rc_get_one_pass_cbr_params(VP9_COMP *cpi) { RATE_CONTROL *const rc = &cpi->rc; int target; // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic. - if ((cm->current_video_frame == 0 || - (cpi->frame_flags & FRAMEFLAGS_KEY) || - rc->frames_to_key == 0 || - (cpi->oxcf.auto_key && 0))) { + if ((cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY) || + rc->frames_to_key == 0 || (cpi->oxcf.auto_key && 0))) { cm->frame_type = KEY_FRAME; - rc->this_key_frame_forced = cm->current_video_frame != 0 && - rc->frames_to_key == 0; + rc->this_key_frame_forced = + cm->current_video_frame != 0 && rc->frames_to_key == 0; rc->frames_to_key = cpi->oxcf.key_freq; rc->kf_boost = DEFAULT_KF_BOOST; rc->source_alt_ref_active = 0; @@ -1703,15 +1733,13 @@ int vp9_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget, // Convert the average q value to an index. for (i = rc->best_quality; i < rc->worst_quality; ++i) { start_index = i; - if (vp9_convert_qindex_to_q(i, bit_depth) >= qstart) - break; + if (vp9_convert_qindex_to_q(i, bit_depth) >= qstart) break; } // Convert the q target to an index for (i = rc->best_quality; i < rc->worst_quality; ++i) { target_index = i; - if (vp9_convert_qindex_to_q(i, bit_depth) >= qtarget) - break; + if (vp9_convert_qindex_to_q(i, bit_depth) >= qtarget) break; } return target_index - start_index; @@ -1724,8 +1752,8 @@ int vp9_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type, int i; // Look up the current projected bits per block for the base index - const int base_bits_per_mb = vp9_rc_bits_per_mb(frame_type, qindex, 1.0, - bit_depth); + const int base_bits_per_mb = + vp9_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth); // Find the target bits per mb based on the base value and given ratio. const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb); @@ -1784,8 +1812,8 @@ void vp9_rc_update_framerate(VP9_COMP *cpi) { int vbr_max_bits; rc->avg_frame_bandwidth = (int)(oxcf->target_bandwidth / cpi->framerate); - rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth * - oxcf->two_pass_vbrmin_section / 100); + rc->min_frame_bandwidth = + (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100); rc->min_frame_bandwidth = VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); @@ -1797,8 +1825,9 @@ void vp9_rc_update_framerate(VP9_COMP *cpi) { // a very high rate is given on the command line or the the rate cannnot // be acheived because of a user specificed max q (e.g. when the user // specifies lossless encode. - vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth * - oxcf->two_pass_vbrmax_section) / 100); + vbr_max_bits = + (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) / + 100); rc->max_frame_bandwidth = VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits); @@ -1811,27 +1840,28 @@ static void vbr_rate_correction(VP9_COMP *cpi, int *this_frame_target) { RATE_CONTROL *const rc = &cpi->rc; int64_t vbr_bits_off_target = rc->vbr_bits_off_target; int max_delta; - double position_factor = 1.0; + int frame_window = VPXMIN(16, ((int)cpi->twopass.total_stats.count - + cpi->common.current_video_frame)); - // How far through the clip are we. - // This number is used to damp the per frame rate correction. - // Range 0 - 1.0 - if (cpi->twopass.total_stats.count) { - position_factor = sqrt((double)cpi->common.current_video_frame / - cpi->twopass.total_stats.count); - } - max_delta = (int)(position_factor * - ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100)); + // Calcluate the adjustment to rate for this frame. + if (frame_window > 0) { + max_delta = (vbr_bits_off_target > 0) + ? (int)(vbr_bits_off_target / frame_window) + : (int)(-vbr_bits_off_target / frame_window); - // vbr_bits_off_target > 0 means we have extra bits to spend - if (vbr_bits_off_target > 0) { - *this_frame_target += - (vbr_bits_off_target > max_delta) ? max_delta - : (int)vbr_bits_off_target; - } else { - *this_frame_target -= - (vbr_bits_off_target < -max_delta) ? max_delta - : (int)-vbr_bits_off_target; + max_delta = VPXMIN(max_delta, + ((*this_frame_target * VBR_PCT_ADJUSTMENT_LIMIT) / 100)); + + // vbr_bits_off_target > 0 means we have extra bits to spend + if (vbr_bits_off_target > 0) { + *this_frame_target += (vbr_bits_off_target > max_delta) + ? max_delta + : (int)vbr_bits_off_target; + } else { + *this_frame_target -= (vbr_bits_off_target < -max_delta) + ? max_delta + : (int)-vbr_bits_off_target; + } } // Fast redistribution of bits arising from massive local undershoot. @@ -1890,8 +1920,7 @@ int vp9_resize_one_pass_cbr(VP9_COMP *cpi) { down_size_on = 0; } else { if (cpi->resize_state == ORIG && - (cm->width * 3 / 4 < min_width || - cm->height * 3 / 4 < min_height)) + (cm->width * 3 / 4 < min_width || cm->height * 3 / 4 < min_height)) return 0; else if (cpi->resize_state == THREE_QUARTER && ((cpi->oxcf.width >> 1) < min_width || @@ -1967,106 +1996,296 @@ int vp9_resize_one_pass_cbr(VP9_COMP *cpi) { cpi->resize_scale_den = 1; } tot_scale_change = (cpi->resize_scale_den * cpi->resize_scale_den) / - (cpi->resize_scale_num * cpi->resize_scale_num); + (cpi->resize_scale_num * cpi->resize_scale_num); // Reset buffer level to optimal, update target size. rc->buffer_level = rc->optimal_buffer_level; rc->bits_off_target = rc->optimal_buffer_level; rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi); // Get the projected qindex, based on the scaled target frame size (scaled // so target_bits_per_mb in vp9_rc_regulate_q will be correct target). - target_bits_per_frame = (resize_action >= 0) ? - rc->this_frame_target * tot_scale_change : - rc->this_frame_target / tot_scale_change; + target_bits_per_frame = (resize_action >= 0) + ? rc->this_frame_target * tot_scale_change + : rc->this_frame_target / tot_scale_change; active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi); - qindex = vp9_rc_regulate_q(cpi, - target_bits_per_frame, - rc->best_quality, + qindex = vp9_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality, active_worst_quality); // If resize is down, check if projected q index is close to worst_quality, // and if so, reduce the rate correction factor (since likely can afford // lower q for resized frame). - if (resize_action > 0 && - qindex > 90 * cpi->rc.worst_quality / 100) { + if (resize_action > 0 && qindex > 90 * cpi->rc.worst_quality / 100) { rc->rate_correction_factors[INTER_NORMAL] *= 0.85; } // If resize is back up, check if projected q index is too much above the // current base_qindex, and if so, reduce the rate correction factor // (since prefer to keep q for resized frame at least close to previous q). - if (resize_action < 0 && - qindex > 130 * cm->base_qindex / 100) { + if (resize_action < 0 && qindex > 130 * cm->base_qindex / 100) { rc->rate_correction_factors[INTER_NORMAL] *= 0.9; } } return resize_action; } +void adjust_gf_boost_lag_one_pass_vbr(VP9_COMP *cpi, uint64_t avg_sad_current) { + VP9_COMMON *const cm = &cpi->common; + RATE_CONTROL *const rc = &cpi->rc; + int target; + int found = 0; + int found2 = 0; + int frame; + int i; + uint64_t avg_source_sad_lag = avg_sad_current; + int high_source_sad_lagindex = -1; + int steady_sad_lagindex = -1; + uint32_t sad_thresh1 = 60000; + uint32_t sad_thresh2 = 120000; + int low_content = 0; + int high_content = 0; + double rate_err = 1.0; + // Get measure of complexity over the future frames, and get the first + // future frame with high_source_sad/scene-change. + int tot_frames = (int)vp9_lookahead_depth(cpi->lookahead) - 1; + for (frame = tot_frames; frame >= 1; --frame) { + const int lagframe_idx = tot_frames - frame + 1; + uint64_t reference_sad = rc->avg_source_sad[0]; + for (i = 1; i < lagframe_idx; ++i) { + if (rc->avg_source_sad[i] > 0) + reference_sad = (3 * reference_sad + rc->avg_source_sad[i]) >> 2; + } + // Detect up-coming scene change. + if (!found && + (rc->avg_source_sad[lagframe_idx] > + VPXMAX(sad_thresh1, (unsigned int)(reference_sad << 1)) || + rc->avg_source_sad[lagframe_idx] > + VPXMAX(3 * sad_thresh1 >> 2, + (unsigned int)(reference_sad << 2)))) { + high_source_sad_lagindex = lagframe_idx; + found = 1; + } + // Detect change from motion to steady. + if (!found2 && lagframe_idx > 1 && lagframe_idx < tot_frames && + rc->avg_source_sad[lagframe_idx - 1] > (sad_thresh1 >> 2)) { + found2 = 1; + for (i = lagframe_idx; i < tot_frames; ++i) { + if (!(rc->avg_source_sad[i] > 0 && + rc->avg_source_sad[i] < (sad_thresh1 >> 2) && + rc->avg_source_sad[i] < + (rc->avg_source_sad[lagframe_idx - 1] >> 1))) { + found2 = 0; + i = tot_frames; + } + } + if (found2) steady_sad_lagindex = lagframe_idx; + } + avg_source_sad_lag += rc->avg_source_sad[lagframe_idx]; + } + if (tot_frames > 0) avg_source_sad_lag = avg_source_sad_lag / tot_frames; + // Constrain distance between detected scene cuts. + if (high_source_sad_lagindex != -1 && + high_source_sad_lagindex != rc->high_source_sad_lagindex - 1 && + abs(high_source_sad_lagindex - rc->high_source_sad_lagindex) < 4) + rc->high_source_sad_lagindex = -1; + else + rc->high_source_sad_lagindex = high_source_sad_lagindex; + // Adjust some factors for the next GF group, ignore initial key frame, + // and only for lag_in_frames not too small. + if (cpi->refresh_golden_frame == 1 && cm->frame_type != KEY_FRAME && + cm->current_video_frame > 30 && cpi->oxcf.lag_in_frames > 8) { + int frame_constraint; + if (rc->rolling_target_bits > 0) + rate_err = + (double)rc->rolling_actual_bits / (double)rc->rolling_target_bits; + high_content = high_source_sad_lagindex != -1 || + avg_source_sad_lag > (rc->prev_avg_source_sad_lag << 1) || + avg_source_sad_lag > sad_thresh2; + low_content = high_source_sad_lagindex == -1 && + ((avg_source_sad_lag < (rc->prev_avg_source_sad_lag >> 1)) || + (avg_source_sad_lag < sad_thresh1)); + if (low_content) { + rc->gfu_boost = DEFAULT_GF_BOOST; + rc->baseline_gf_interval = + VPXMIN(15, (3 * rc->baseline_gf_interval) >> 1); + } else if (high_content) { + rc->gfu_boost = DEFAULT_GF_BOOST >> 1; + rc->baseline_gf_interval = (rate_err > 3.0) + ? VPXMAX(10, rc->baseline_gf_interval >> 1) + : VPXMAX(6, rc->baseline_gf_interval >> 1); + } + // Check for constraining gf_interval for up-coming scene/content changes, + // or for up-coming key frame, whichever is closer. + frame_constraint = rc->frames_to_key; + if (rc->high_source_sad_lagindex > 0 && + frame_constraint > rc->high_source_sad_lagindex) + frame_constraint = rc->high_source_sad_lagindex; + if (steady_sad_lagindex > 3 && frame_constraint > steady_sad_lagindex) + frame_constraint = steady_sad_lagindex; + adjust_gfint_frame_constraint(cpi, frame_constraint); + rc->frames_till_gf_update_due = rc->baseline_gf_interval; + // Adjust factors for active_worst setting & af_ratio for next gf interval. + rc->fac_active_worst_inter = 150; // corresponds to 3/2 (= 150 /100). + rc->fac_active_worst_gf = 100; + if (rate_err < 1.5 && !high_content) { + rc->fac_active_worst_inter = 120; + rc->fac_active_worst_gf = 90; + } + if (low_content && rc->avg_frame_low_motion > 80) { + rc->af_ratio_onepass_vbr = 15; + } else if (high_content || rc->avg_frame_low_motion < 30) { + rc->af_ratio_onepass_vbr = 5; + rc->gfu_boost = DEFAULT_GF_BOOST >> 2; + } + target = calc_pframe_target_size_one_pass_vbr(cpi); + vp9_rc_set_frame_target(cpi, target); + } + rc->prev_avg_source_sad_lag = avg_source_sad_lag; +} + // Compute average source sad (temporal sad: between current source and // previous source) over a subset of superblocks. Use this is detect big changes // in content and allow rate control to react. -// TODO(marpan): Superblock sad is computed again in variance partition for -// non-rd mode (but based on last reconstructed frame). Should try to reuse -// these computations. +// This function also handles special case of lag_in_frames, to measure content +// level in #future frames set by the lag_in_frames. void vp9_avg_source_sad(VP9_COMP *cpi) { - VP9_COMMON * const cm = &cpi->common; + VP9_COMMON *const cm = &cpi->common; RATE_CONTROL *const rc = &cpi->rc; rc->high_source_sad = 0; - if (cpi->Last_Source != NULL) { - const uint8_t *src_y = cpi->Source->y_buffer; - const int src_ystride = cpi->Source->y_stride; - const uint8_t *last_src_y = cpi->Last_Source->y_buffer; - const int last_src_ystride = cpi->Last_Source->y_stride; - int sbi_row, sbi_col; - const BLOCK_SIZE bsize = BLOCK_64X64; - // Loop over sub-sample of frame, and compute average sad over 64x64 blocks. - uint64_t avg_sad = 0; - int num_samples = 0; - int sb_cols = (cm->mi_cols + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; - int sb_rows = (cm->mi_rows + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; - for (sbi_row = 0; sbi_row < sb_rows; sbi_row ++) { - for (sbi_col = 0; sbi_col < sb_cols; sbi_col ++) { - // Checker-board pattern, ignore boundary. - if ((sbi_row > 0 && sbi_col > 0) && - (sbi_row < sb_rows - 1 && sbi_col < sb_cols - 1) && - ((sbi_row % 2 == 0 && sbi_col % 2 == 0) || - (sbi_row % 2 != 0 && sbi_col % 2 != 0))) { - num_samples++; - avg_sad += cpi->fn_ptr[bsize].sdf(src_y, - src_ystride, - last_src_y, - last_src_ystride); - } - src_y += 64; - last_src_y += 64; - } - src_y += (src_ystride << 6) - (sb_cols << 6); - last_src_y += (last_src_ystride << 6) - (sb_cols << 6); + if (cpi->Last_Source != NULL && + cpi->Last_Source->y_width == cpi->Source->y_width && + cpi->Last_Source->y_height == cpi->Source->y_height) { + YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL }; + uint8_t *src_y = cpi->Source->y_buffer; + int src_ystride = cpi->Source->y_stride; + uint8_t *last_src_y = cpi->Last_Source->y_buffer; + int last_src_ystride = cpi->Last_Source->y_stride; + int start_frame = 0; + int frames_to_buffer = 1; + int frame = 0; + uint64_t avg_sad_current = 0; + uint32_t min_thresh = 4000; + float thresh = 8.0f; + if (cpi->oxcf.rc_mode == VPX_VBR) { + min_thresh = 60000; + thresh = 2.1f; } - if (num_samples > 0) - avg_sad = avg_sad / num_samples; - // Set high_source_sad flag if we detect very high increase in avg_sad - // between current and the previous frame value(s). Use a minimum threshold - // for cases where there is small change from content that is completely - // static. - if (avg_sad > VPXMAX(4000, (rc->avg_source_sad << 3)) && - rc->frames_since_key > 1) - rc->high_source_sad = 1; - else - rc->high_source_sad = 0; - rc->avg_source_sad = (rc->avg_source_sad + avg_sad) >> 1; + if (cpi->oxcf.lag_in_frames > 0) { + frames_to_buffer = (cm->current_video_frame == 1) + ? (int)vp9_lookahead_depth(cpi->lookahead) - 1 + : 2; + start_frame = (int)vp9_lookahead_depth(cpi->lookahead) - 1; + for (frame = 0; frame < frames_to_buffer; ++frame) { + const int lagframe_idx = start_frame - frame; + if (lagframe_idx >= 0) { + struct lookahead_entry *buf = + vp9_lookahead_peek(cpi->lookahead, lagframe_idx); + frames[frame] = &buf->img; + } + } + // The avg_sad for this current frame is the value of frame#1 + // (first future frame) from previous frame. + avg_sad_current = rc->avg_source_sad[1]; + if (avg_sad_current > + VPXMAX(min_thresh, + (unsigned int)(rc->avg_source_sad[0] * thresh)) && + cm->current_video_frame > (unsigned int)cpi->oxcf.lag_in_frames) + rc->high_source_sad = 1; + else + rc->high_source_sad = 0; + // Update recursive average for current frame. + if (avg_sad_current > 0) + rc->avg_source_sad[0] = + (3 * rc->avg_source_sad[0] + avg_sad_current) >> 2; + // Shift back data, starting at frame#1. + for (frame = 1; frame < cpi->oxcf.lag_in_frames - 1; ++frame) + rc->avg_source_sad[frame] = rc->avg_source_sad[frame + 1]; + } + for (frame = 0; frame < frames_to_buffer; ++frame) { + if (cpi->oxcf.lag_in_frames == 0 || + (frames[frame] != NULL && frames[frame + 1] != NULL && + frames[frame]->y_width == frames[frame + 1]->y_width && + frames[frame]->y_height == frames[frame + 1]->y_height)) { + int sbi_row, sbi_col; + const int lagframe_idx = + (cpi->oxcf.lag_in_frames == 0) ? 0 : start_frame - frame + 1; + const BLOCK_SIZE bsize = BLOCK_64X64; + // Loop over sub-sample of frame, compute average sad over 64x64 blocks. + uint64_t avg_sad = 0; + int num_samples = 0; + int sb_cols = (cm->mi_cols + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; + int sb_rows = (cm->mi_rows + MI_BLOCK_SIZE - 1) / MI_BLOCK_SIZE; + if (cpi->oxcf.lag_in_frames > 0) { + src_y = frames[frame]->y_buffer; + src_ystride = frames[frame]->y_stride; + last_src_y = frames[frame + 1]->y_buffer; + last_src_ystride = frames[frame + 1]->y_stride; + } + for (sbi_row = 0; sbi_row < sb_rows; ++sbi_row) { + for (sbi_col = 0; sbi_col < sb_cols; ++sbi_col) { + // Checker-board pattern, ignore boundary. + if ((sbi_row > 0 && sbi_col > 0) && + (sbi_row < sb_rows - 1 && sbi_col < sb_cols - 1) && + ((sbi_row % 2 == 0 && sbi_col % 2 == 0) || + (sbi_row % 2 != 0 && sbi_col % 2 != 0))) { + num_samples++; + avg_sad += cpi->fn_ptr[bsize].sdf(src_y, src_ystride, last_src_y, + last_src_ystride); + } + src_y += 64; + last_src_y += 64; + } + src_y += (src_ystride << 6) - (sb_cols << 6); + last_src_y += (last_src_ystride << 6) - (sb_cols << 6); + } + if (num_samples > 0) avg_sad = avg_sad / num_samples; + // Set high_source_sad flag if we detect very high increase in avg_sad + // between current and previous frame value(s). Use minimum threshold + // for cases where there is small change from content that is completely + // static. + if (lagframe_idx == 0) { + if (avg_sad > + VPXMAX(min_thresh, + (unsigned int)(rc->avg_source_sad[0] * thresh)) && + rc->frames_since_key > 1) + rc->high_source_sad = 1; + else + rc->high_source_sad = 0; + if (avg_sad > 0 || cpi->oxcf.rc_mode == VPX_CBR) + rc->avg_source_sad[0] = (3 * rc->avg_source_sad[0] + avg_sad) >> 2; + } else { + rc->avg_source_sad[lagframe_idx] = avg_sad; + } + } + } + // For VBR, under scene change/high content change, force golden refresh. + if (cpi->oxcf.rc_mode == VPX_VBR && cm->frame_type != KEY_FRAME && + rc->high_source_sad && rc->frames_to_key > 3 && + rc->count_last_scene_change > 4 && + cpi->ext_refresh_frame_flags_pending == 0) { + int target; + cpi->refresh_golden_frame = 1; + rc->gfu_boost = DEFAULT_GF_BOOST >> 1; + rc->baseline_gf_interval = + VPXMIN(20, VPXMAX(10, rc->baseline_gf_interval)); + adjust_gfint_frame_constraint(cpi, rc->frames_to_key); + rc->frames_till_gf_update_due = rc->baseline_gf_interval; + target = calc_pframe_target_size_one_pass_vbr(cpi); + vp9_rc_set_frame_target(cpi, target); + rc->count_last_scene_change = 0; + } else { + rc->count_last_scene_change++; + } + // If lag_in_frame is used, set the gf boost and interval. + if (cpi->oxcf.lag_in_frames > 0) + adjust_gf_boost_lag_one_pass_vbr(cpi, avg_sad_current); } } // Test if encoded frame will significantly overshoot the target bitrate, and // if so, set the QP, reset/adjust some rate control parameters, and return 1. -int vp9_encodedframe_overshoot(VP9_COMP *cpi, - int frame_size, - int *q) { - VP9_COMMON * const cm = &cpi->common; +int vp9_encodedframe_overshoot(VP9_COMP *cpi, int frame_size, int *q) { + VP9_COMMON *const cm = &cpi->common; RATE_CONTROL *const rc = &cpi->rc; int thresh_qp = 3 * (rc->worst_quality >> 2); int thresh_rate = rc->avg_frame_bandwidth * 10; - if (cm->base_qindex < thresh_qp && - frame_size > thresh_rate) { + if (cm->base_qindex < thresh_qp && frame_size > thresh_rate) { double rate_correction_factor = cpi->rc.rate_correction_factors[INTER_NORMAL]; const int target_size = cpi->rc.avg_frame_bandwidth; @@ -2087,7 +2306,8 @@ int vp9_encodedframe_overshoot(VP9_COMP *cpi, cpi->rc.rc_1_frame = 0; cpi->rc.rc_2_frame = 0; // Adjust rate correction factor. - target_bits_per_mb = ((uint64_t)target_size << BPER_MB_NORMBITS) / cm->MBs; + target_bits_per_mb = + (int)(((uint64_t)target_size << BPER_MB_NORMBITS) / cm->MBs); // Rate correction factor based on target_bits_per_mb and qp (==max_QP). // This comes from the inverse computation of vp9_rc_bits_per_mb(). q2 = vp9_convert_qindex_to_q(*q, cm->bit_depth); @@ -2116,8 +2336,7 @@ int vp9_encodedframe_overshoot(VP9_COMP *cpi, lrc->bits_off_target = rc->optimal_buffer_level; lrc->rc_1_frame = 0; lrc->rc_2_frame = 0; - lrc->rate_correction_factors[INTER_NORMAL] = - rate_correction_factor; + lrc->rate_correction_factors[INTER_NORMAL] = rate_correction_factor; } } return 1; diff --git a/libs/libvpx/vp9/encoder/vp9_ratectrl.h b/libs/libvpx/vp9/encoder/vp9_ratectrl.h index 3df909cb18..6006e9b051 100644 --- a/libs/libvpx/vp9/encoder/vp9_ratectrl.h +++ b/libs/libvpx/vp9/encoder/vp9_ratectrl.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_RATECTRL_H_ #define VP9_ENCODER_VP9_RATECTRL_H_ @@ -16,18 +15,19 @@ #include "vpx/vpx_integer.h" #include "vp9/common/vp9_blockd.h" +#include "vp9/encoder/vp9_lookahead.h" #ifdef __cplusplus extern "C" { #endif // Bits Per MB at different Q (Multiplied by 512) -#define BPER_MB_NORMBITS 9 +#define BPER_MB_NORMBITS 9 -#define MIN_GF_INTERVAL 4 -#define MAX_GF_INTERVAL 16 -#define FIXED_GF_INTERVAL 8 // Used in some testing modes only -#define ONEHALFONLY_RESIZE 0 +#define MIN_GF_INTERVAL 4 +#define MAX_GF_INTERVAL 16 +#define FIXED_GF_INTERVAL 8 // Used in some testing modes only +#define ONEHALFONLY_RESIZE 0 typedef enum { INTER_NORMAL = 0, @@ -53,36 +53,32 @@ typedef enum { UP_ORIG = -2, // From 1/2 or 3/4 to orig. } RESIZE_ACTION; -typedef enum { - ORIG = 0, - THREE_QUARTER = 1, - ONE_HALF = 2 -} RESIZE_STATE; +typedef enum { ORIG = 0, THREE_QUARTER = 1, ONE_HALF = 2 } RESIZE_STATE; // Frame dimensions multiplier wrt the native frame size, in 1/16ths, // specified for the scale-up case. // e.g. 24 => 16/24 = 2/3 of native size. The restriction to 1/16th is // intended to match the capabilities of the normative scaling filters, // giving precedence to the up-scaling accuracy. -static const int frame_scale_factor[FRAME_SCALE_STEPS] = {16, 24}; +static const int frame_scale_factor[FRAME_SCALE_STEPS] = { 16, 24 }; // Multiplier of the target rate to be used as threshold for triggering scaling. -static const double rate_thresh_mult[FRAME_SCALE_STEPS] = {1.0, 2.0}; +static const double rate_thresh_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 }; // Scale dependent Rate Correction Factor multipliers. Compensates for the // greater number of bits per pixel generated in down-scaled frames. -static const double rcf_mult[FRAME_SCALE_STEPS] = {1.0, 2.0}; +static const double rcf_mult[FRAME_SCALE_STEPS] = { 1.0, 2.0 }; typedef struct { // Rate targetting variables - int base_frame_target; // A baseline frame target before adjustment - // for previous under or over shoot. - int this_frame_target; // Actual frame target after rc adjustment. + int base_frame_target; // A baseline frame target before adjustment + // for previous under or over shoot. + int this_frame_target; // Actual frame target after rc adjustment. int projected_frame_size; int sb64_target_rate; - int last_q[FRAME_TYPES]; // Separate values for Intra/Inter - int last_boosted_qindex; // Last boosted GF/KF/ARF q - int last_kf_qindex; // Q index of the last key frame coded. + int last_q[FRAME_TYPES]; // Separate values for Intra/Inter + int last_boosted_qindex; // Last boosted GF/KF/ARF q + int last_kf_qindex; // Q index of the last key frame coded. int gfu_boost; int last_boost; @@ -159,8 +155,16 @@ typedef struct { int frame_height[FRAME_SCALE_STEPS]; int rf_level_maxq[RATE_FACTOR_LEVELS]; - uint64_t avg_source_sad; + int fac_active_worst_inter; + int fac_active_worst_gf; + uint64_t avg_source_sad[MAX_LAG_BUFFERS]; + uint64_t prev_avg_source_sad_lag; + int high_source_sad_lagindex; int high_source_sad; + int count_last_scene_change; + int avg_frame_low_motion; + int af_ratio_onepass_vbr; + int force_qpmin; } RATE_CONTROL; struct VP9_COMP; @@ -170,8 +174,7 @@ void vp9_rc_init(const struct VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc); int vp9_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs, - double correction_factor, - vpx_bit_depth_t bit_depth); + double correction_factor, vpx_bit_depth_t bit_depth); double vp9_convert_qindex_to_q(int qindex, vpx_bit_depth_t bit_depth); @@ -232,8 +235,7 @@ void vp9_rc_compute_frame_size_bounds(const struct VP9_COMP *cpi, int *frame_over_shoot_limit); // Picks q and q bounds given the target for bits -int vp9_rc_pick_q_and_bounds(const struct VP9_COMP *cpi, - int *bottom_index, +int vp9_rc_pick_q_and_bounds(const struct VP9_COMP *cpi, int *bottom_index, int *top_index); // Estimates q to achieve a target bits per frame diff --git a/libs/libvpx/vp9/encoder/vp9_rd.c b/libs/libvpx/vp9/encoder/vp9_rd.c index fc32d19112..76c639a647 100644 --- a/libs/libvpx/vp9/encoder/vp9_rd.c +++ b/libs/libvpx/vp9/encoder/vp9_rd.c @@ -40,7 +40,7 @@ #include "vp9/encoder/vp9_rd.h" #include "vp9/encoder/vp9_tokenize.h" -#define RD_THRESH_POW 1.25 +#define RD_THRESH_POW 1.25 // Factor to weigh the rate for switchable interp filters. #define SWITCHABLE_INTERP_RATE_FACTOR 1 @@ -98,8 +98,7 @@ static void fill_token_costs(vp9_coeff_cost *c, for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) { vpx_prob probs[ENTROPY_NODES]; vp9_model_to_full_probs(p[t][i][j][k][l], probs); - vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs, - vp9_coef_tree); + vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp9_coef_tree); vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs, vp9_coef_tree); assert(c[t][i][j][k][0][l][EOB_TOKEN] == @@ -142,28 +141,19 @@ void vp9_init_me_luts(void) { #endif } -static const int rd_boost_factor[16] = { - 64, 32, 32, 32, 24, 16, 12, 12, - 8, 8, 4, 4, 2, 2, 1, 0 -}; -static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { - 128, 144, 128, 128, 144 -}; +static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12, + 8, 8, 4, 4, 2, 2, 1, 0 }; +static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128, + 128, 144 }; int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) { const int64_t q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth); #if CONFIG_VP9_HIGHBITDEPTH int64_t rdmult = 0; switch (cpi->common.bit_depth) { - case VPX_BITS_8: - rdmult = 88 * q * q / 24; - break; - case VPX_BITS_10: - rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); - break; - case VPX_BITS_12: - rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); - break; + case VPX_BITS_8: rdmult = 88 * q * q / 24; break; + case VPX_BITS_10: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4); break; + case VPX_BITS_12: rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8); break; default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; @@ -179,8 +169,7 @@ int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) { rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7; rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7); } - if (rdmult < 1) - rdmult = 1; + if (rdmult < 1) rdmult = 1; return (int)rdmult; } @@ -188,21 +177,15 @@ static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) { double q; #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; - break; - case VPX_BITS_10: - q = vp9_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; - break; - case VPX_BITS_12: - q = vp9_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; - break; + case VPX_BITS_8: q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break; + case VPX_BITS_10: q = vp9_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break; + case VPX_BITS_12: q = vp9_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break; default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; } #else - (void) bit_depth; + (void)bit_depth; q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; #endif // CONFIG_VP9_HIGHBITDEPTH // TODO(debargha): Adjust the function below. @@ -240,7 +223,8 @@ static void set_block_thresholds(const VP9_COMMON *cm, RD_OPT *rd) { for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) { const int qindex = clamp(vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex) + - cm->y_dc_delta_q, 0, MAXQ); + cm->y_dc_delta_q, + 0, MAXQ); const int q = compute_rd_thresh_factor(qindex, cm->bit_depth); for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) { @@ -251,10 +235,9 @@ static void set_block_thresholds(const VP9_COMMON *cm, RD_OPT *rd) { if (bsize >= BLOCK_8X8) { for (i = 0; i < MAX_MODES; ++i) - rd->threshes[segment_id][bsize][i] = - rd->thresh_mult[i] < thresh_max - ? rd->thresh_mult[i] * t / 4 - : INT_MAX; + rd->threshes[segment_id][bsize][i] = rd->thresh_mult[i] < thresh_max + ? rd->thresh_mult[i] * t / 4 + : INT_MAX; } else { for (i = 0; i < MAX_REFS; ++i) rd->threshes[segment_id][bsize][i] = @@ -281,34 +264,44 @@ void vp9_initialize_rd_consts(VP9_COMP *cpi) { set_error_per_bit(x, rd->RDMULT); x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL && - cm->frame_type != KEY_FRAME) ? 0 : 1; + cm->frame_type != KEY_FRAME) + ? 0 + : 1; set_block_thresholds(cm, rd); set_partition_probs(cm, xd); - if (!cpi->sf.use_nonrd_pick_mode || cm->frame_type == KEY_FRAME) - fill_token_costs(x->token_costs, cm->fc->coef_probs); + if (cpi->oxcf.pass == 1) { + if (!frame_is_intra_only(cm)) + vp9_build_nmv_cost_table( + x->nmvjointcost, + cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, + &cm->fc->nmvc, cm->allow_high_precision_mv); + } else { + if (!cpi->sf.use_nonrd_pick_mode || cm->frame_type == KEY_FRAME) + fill_token_costs(x->token_costs, cm->fc->coef_probs); - if (cpi->sf.partition_search_type != VAR_BASED_PARTITION || - cm->frame_type == KEY_FRAME) { - for (i = 0; i < PARTITION_CONTEXTS; ++i) - vp9_cost_tokens(cpi->partition_cost[i], get_partition_probs(xd, i), - vp9_partition_tree); - } + if (cpi->sf.partition_search_type != VAR_BASED_PARTITION || + cm->frame_type == KEY_FRAME) { + for (i = 0; i < PARTITION_CONTEXTS; ++i) + vp9_cost_tokens(cpi->partition_cost[i], get_partition_probs(xd, i), + vp9_partition_tree); + } - if (!cpi->sf.use_nonrd_pick_mode || (cm->current_video_frame & 0x07) == 1 || - cm->frame_type == KEY_FRAME) { - fill_mode_costs(cpi); + if (!cpi->sf.use_nonrd_pick_mode || (cm->current_video_frame & 0x07) == 1 || + cm->frame_type == KEY_FRAME) { + fill_mode_costs(cpi); - if (!frame_is_intra_only(cm)) { - vp9_build_nmv_cost_table(x->nmvjointcost, - cm->allow_high_precision_mv ? x->nmvcost_hp - : x->nmvcost, - &cm->fc->nmvc, cm->allow_high_precision_mv); + if (!frame_is_intra_only(cm)) { + vp9_build_nmv_cost_table( + x->nmvjointcost, + cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, + &cm->fc->nmvc, cm->allow_high_precision_mv); - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) - vp9_cost_tokens((int *)cpi->inter_mode_cost[i], - cm->fc->inter_mode_probs[i], vp9_inter_mode_tree); + for (i = 0; i < INTER_MODE_CONTEXTS; ++i) + vp9_cost_tokens((int *)cpi->inter_mode_cost[i], + cm->fc->inter_mode_probs[i], vp9_inter_mode_tree); + } } } } @@ -327,20 +320,17 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) { // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance), // and H(x) is the binary entropy function. static const int rate_tab_q10[] = { - 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, - 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811, - 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186, - 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, - 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130, - 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651, - 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, - 1159, 1086, 1021, 963, 911, 864, 821, 781, - 745, 680, 623, 574, 530, 490, 455, 424, - 395, 345, 304, 269, 239, 213, 190, 171, - 154, 126, 104, 87, 73, 61, 52, 44, - 38, 28, 21, 16, 12, 10, 8, 6, - 5, 3, 2, 1, 1, 1, 0, 0, + 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651, 4553, 4389, 4255, 4142, + 4044, 3958, 3881, 3811, 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186, + 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638, 2589, 2501, 2423, 2353, + 2290, 2232, 2179, 2130, 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651, + 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199, 1159, 1086, 1021, 963, + 911, 864, 821, 781, 745, 680, 623, 574, 530, 490, 455, 424, + 395, 345, 304, 269, 239, 213, 190, 171, 154, 126, 104, 87, + 73, 61, 52, 44, 38, 28, 21, 16, 12, 10, 8, 6, + 5, 3, 2, 1, 1, 1, 0, 0, }; + // Normalized distortion: // This table models the normalized distortion for a Laplacian source // with given variance when quantized with a uniform quantizer @@ -349,34 +339,29 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) { // where x = qpstep / sqrt(variance). // Note the actual distortion is Dn * variance. static const int dist_tab_q10[] = { - 0, 0, 1, 1, 1, 2, 2, 2, - 3, 3, 4, 5, 5, 6, 7, 7, - 8, 9, 11, 12, 13, 15, 16, 17, - 18, 21, 24, 26, 29, 31, 34, 36, - 39, 44, 49, 54, 59, 64, 69, 73, - 78, 88, 97, 106, 115, 124, 133, 142, - 151, 167, 184, 200, 215, 231, 245, 260, - 274, 301, 327, 351, 375, 397, 418, 439, - 458, 495, 528, 559, 587, 613, 637, 659, - 680, 717, 749, 777, 801, 823, 842, 859, - 874, 899, 919, 936, 949, 960, 969, 977, - 983, 994, 1001, 1006, 1010, 1013, 1015, 1017, - 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024, + 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5, + 5, 6, 7, 7, 8, 9, 11, 12, 13, 15, 16, 17, + 18, 21, 24, 26, 29, 31, 34, 36, 39, 44, 49, 54, + 59, 64, 69, 73, 78, 88, 97, 106, 115, 124, 133, 142, + 151, 167, 184, 200, 215, 231, 245, 260, 274, 301, 327, 351, + 375, 397, 418, 439, 458, 495, 528, 559, 587, 613, 637, 659, + 680, 717, 749, 777, 801, 823, 842, 859, 874, 899, 919, 936, + 949, 960, 969, 977, 983, 994, 1001, 1006, 1010, 1013, 1015, 1017, + 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024, }; static const int xsq_iq_q10[] = { - 0, 4, 8, 12, 16, 20, 24, 28, - 32, 40, 48, 56, 64, 72, 80, 88, - 96, 112, 128, 144, 160, 176, 192, 208, - 224, 256, 288, 320, 352, 384, 416, 448, - 480, 544, 608, 672, 736, 800, 864, 928, - 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888, - 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808, - 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648, - 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328, - 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, - 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408, - 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848, - 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728, + 0, 4, 8, 12, 16, 20, 24, 28, 32, + 40, 48, 56, 64, 72, 80, 88, 96, 112, + 128, 144, 160, 176, 192, 208, 224, 256, 288, + 320, 352, 384, 416, 448, 480, 544, 608, 672, + 736, 800, 864, 928, 992, 1120, 1248, 1376, 1504, + 1632, 1760, 1888, 2016, 2272, 2528, 2784, 3040, 3296, + 3552, 3808, 4064, 4576, 5088, 5600, 6112, 6624, 7136, + 7648, 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328, + 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688, 32736, + 36832, 40928, 45024, 49120, 53216, 57312, 61408, 65504, 73696, + 81888, 90080, 98272, 106464, 114656, 122848, 131040, 147424, 163808, + 180192, 196576, 212960, 229344, 245728, }; const int tmp = (xsq_q10 >> 2) + 8; const int k = get_msb(tmp) - 3; @@ -446,15 +431,12 @@ void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size, for (i = 0; i < num_4x4_h; i += 8) t_left[i] = !!*(const uint64_t *)&left[i]; break; - default: - assert(0 && "Invalid transform size."); - break; + default: assert(0 && "Invalid transform size."); break; } } -void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, - uint8_t *ref_y_buffer, int ref_y_stride, - int ref_frame, BLOCK_SIZE block_size) { +void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer, + int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) { int i; int zero_seen = 0; int best_index = 0; @@ -464,9 +446,9 @@ void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, int near_same_nearest; uint8_t *src_y_ptr = x->plane[0].src.buf; uint8_t *ref_y_ptr; - const int num_mv_refs = MAX_MV_REF_CANDIDATES + - (cpi->sf.adaptive_motion_search && - block_size < x->max_partition_size); + const int num_mv_refs = + MAX_MV_REF_CANDIDATES + + (cpi->sf.adaptive_motion_search && block_size < x->max_partition_size); MV pred_mv[3]; pred_mv[0] = x->mbmi_ext->ref_mvs[ref_frame][0].as_mv; @@ -474,25 +456,22 @@ void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, pred_mv[2] = x->pred_mv[ref_frame]; assert(num_mv_refs <= (int)(sizeof(pred_mv) / sizeof(pred_mv[0]))); - near_same_nearest = - x->mbmi_ext->ref_mvs[ref_frame][0].as_int == - x->mbmi_ext->ref_mvs[ref_frame][1].as_int; + near_same_nearest = x->mbmi_ext->ref_mvs[ref_frame][0].as_int == + x->mbmi_ext->ref_mvs[ref_frame][1].as_int; // Get the sad for each candidate reference mv. for (i = 0; i < num_mv_refs; ++i) { const MV *this_mv = &pred_mv[i]; int fp_row, fp_col; - if (i == 1 && near_same_nearest) - continue; + if (i == 1 && near_same_nearest) continue; fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3; fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3; max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3); - if (fp_row ==0 && fp_col == 0 && zero_seen) - continue; - zero_seen |= (fp_row ==0 && fp_col == 0); + if (fp_row == 0 && fp_col == 0 && zero_seen) continue; + zero_seen |= (fp_row == 0 && fp_col == 0); - ref_y_ptr =&ref_y_buffer[ref_y_stride * fp_row + fp_col]; + ref_y_ptr = &ref_y_buffer[ref_y_stride * fp_row + fp_col]; // Find sad for current vector. this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride, ref_y_ptr, ref_y_stride); @@ -511,8 +490,7 @@ void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, void vp9_setup_pred_block(const MACROBLOCKD *xd, struct buf_2d dst[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, + const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, const struct scale_factors *scale, const struct scale_factors *scale_uv) { int i; @@ -525,21 +503,21 @@ void vp9_setup_pred_block(const MACROBLOCKD *xd, for (i = 0; i < MAX_MB_PLANE; ++i) { setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col, - i ? scale_uv : scale, - xd->plane[i].subsampling_x, xd->plane[i].subsampling_y); + i ? scale_uv : scale, xd->plane[i].subsampling_x, + xd->plane[i].subsampling_y); } } -int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, - int raster_block, int stride) { +int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block, + int stride) { const int bw = b_width_log2_lookup[plane_bsize]; const int y = 4 * (raster_block >> bw); const int x = 4 * (raster_block & ((1 << bw) - 1)); return y * stride + x; } -int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, - int raster_block, int16_t *base) { +int16_t *vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block, + int16_t *base) { const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; return base + vp9_raster_block_offset(plane_bsize, raster_block, stride); } @@ -549,16 +527,16 @@ YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi, const VP9_COMMON *const cm = &cpi->common; const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1]; const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame); - return - (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) ? - &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL; + return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX) + ? &cm->buffer_pool->frame_bufs[scaled_idx].buf + : NULL; } int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) { const MODE_INFO *const mi = xd->mi[0]; - const int ctx = vp9_get_pred_context_switchable_interp(xd); + const int ctx = get_pred_context_switchable_interp(xd); return SWITCHABLE_INTERP_RATE_FACTOR * - cpi->switchable_interp_costs[ctx][mi->interp_filter]; + cpi->switchable_interp_costs[ctx][mi->interp_filter]; } void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) { @@ -607,7 +585,7 @@ void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) { rd->thresh_mult[THR_H_PRED] += 2000; rd->thresh_mult[THR_V_PRED] += 2000; - rd->thresh_mult[THR_D45_PRED ] += 2500; + rd->thresh_mult[THR_D45_PRED] += 2500; rd->thresh_mult[THR_D135_PRED] += 2500; rd->thresh_mult[THR_D117_PRED] += 2500; rd->thresh_mult[THR_D153_PRED] += 2500; @@ -616,9 +594,10 @@ void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) { } void vp9_set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) { - static const int thresh_mult[2][MAX_REFS] = - {{2500, 2500, 2500, 4500, 4500, 2500}, - {2000, 2000, 2000, 4000, 4000, 2000}}; + static const int thresh_mult[2][MAX_REFS] = { + { 2500, 2500, 2500, 4500, 4500, 2500 }, + { 2000, 2000, 2000, 4000, 4000, 2000 } + }; RD_OPT *const rd = &cpi->rd; const int idx = cpi->oxcf.mode == BEST; memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx])); @@ -650,12 +629,9 @@ int vp9_get_intra_cost_penalty(int qindex, int qdelta, const int q = vp9_dc_quant(qindex, qdelta, bit_depth); #if CONFIG_VP9_HIGHBITDEPTH switch (bit_depth) { - case VPX_BITS_8: - return 20 * q; - case VPX_BITS_10: - return 5 * q; - case VPX_BITS_12: - return ROUND_POWER_OF_TWO(5 * q, 2); + case VPX_BITS_8: return 20 * q; + case VPX_BITS_10: return 5 * q; + case VPX_BITS_12: return ROUND_POWER_OF_TWO(5 * q, 2); default: assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12"); return -1; @@ -664,4 +640,3 @@ int vp9_get_intra_cost_penalty(int qindex, int qdelta, return 20 * q; #endif // CONFIG_VP9_HIGHBITDEPTH } - diff --git a/libs/libvpx/vp9/encoder/vp9_rd.h b/libs/libvpx/vp9/encoder/vp9_rd.h index 9b8e2732c5..05344b6cf0 100644 --- a/libs/libvpx/vp9/encoder/vp9_rd.h +++ b/libs/libvpx/vp9/encoder/vp9_rd.h @@ -23,23 +23,23 @@ extern "C" { #endif -#define RDDIV_BITS 7 -#define RD_EPB_SHIFT 6 +#define RDDIV_BITS 7 +#define RD_EPB_SHIFT 6 #define RDCOST(RM, DM, R, D) \ (ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM)) -#define QIDX_SKIP_THRESH 115 +#define QIDX_SKIP_THRESH 115 -#define MV_COST_WEIGHT 108 -#define MV_COST_WEIGHT_SUB 120 +#define MV_COST_WEIGHT 108 +#define MV_COST_WEIGHT_SUB 120 #define INVALID_MV 0x80008000 #define MAX_MODES 30 -#define MAX_REFS 6 +#define MAX_REFS 6 #define RD_THRESH_MAX_FACT 64 -#define RD_THRESH_INC 1 +#define RD_THRESH_INC 1 // This enumerator type needs to be kept aligned with the mode order in // const MODE_DEFINITION vp9_mode_order[MAX_MODES] used in the rd code. @@ -135,17 +135,16 @@ void vp9_initialize_rd_consts(struct VP9_COMP *cpi); void vp9_initialize_me_consts(struct VP9_COMP *cpi, MACROBLOCK *x, int qindex); void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n, - unsigned int qstep, int *rate, - int64_t *dist); + unsigned int qstep, int *rate, int64_t *dist); int vp9_get_switchable_rate(const struct VP9_COMP *cpi, const MACROBLOCKD *const xd); -int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, - int raster_block, int stride); +int vp9_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block, + int stride); -int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, - int raster_block, int16_t *base); +int16_t *vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize, int raster_block, + int16_t *base); YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const struct VP9_COMP *cpi, int ref_frame); @@ -161,12 +160,12 @@ void vp9_set_rd_speed_thresholds(struct VP9_COMP *cpi); void vp9_set_rd_speed_thresholds_sub8x8(struct VP9_COMP *cpi); -void vp9_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh, - int bsize, int best_mode_index); +void vp9_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh, int bsize, + int best_mode_index); static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh, int thresh_fact) { - return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX; + return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX; } static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) { @@ -174,14 +173,12 @@ static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) { x->errorperbit += (x->errorperbit == 0); } -void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x, - uint8_t *ref_y_buffer, int ref_y_stride, - int ref_frame, BLOCK_SIZE block_size); +void vp9_mv_pred(struct VP9_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer, + int ref_y_stride, int ref_frame, BLOCK_SIZE block_size); void vp9_setup_pred_block(const MACROBLOCKD *xd, struct buf_2d dst[MAX_MB_PLANE], - const YV12_BUFFER_CONFIG *src, - int mi_row, int mi_col, + const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, const struct scale_factors *scale, const struct scale_factors *scale_uv); diff --git a/libs/libvpx/vp9/encoder/vp9_rdopt.c b/libs/libvpx/vp9/encoder/vp9_rdopt.c index 193c9d33cf..27d4e9d6d3 100644 --- a/libs/libvpx/vp9/encoder/vp9_rdopt.c +++ b/libs/libvpx/vp9/encoder/vp9_rdopt.c @@ -42,28 +42,27 @@ #include "vp9/encoder/vp9_rdopt.h" #include "vp9/encoder/vp9_aq_variance.h" -#define LAST_FRAME_MODE_MASK ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | \ - (1 << INTRA_FRAME)) -#define GOLDEN_FRAME_MODE_MASK ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | \ - (1 << INTRA_FRAME)) -#define ALT_REF_MODE_MASK ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | \ - (1 << INTRA_FRAME)) +#define LAST_FRAME_MODE_MASK \ + ((1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME)) +#define GOLDEN_FRAME_MODE_MASK \ + ((1 << LAST_FRAME) | (1 << ALTREF_FRAME) | (1 << INTRA_FRAME)) +#define ALT_REF_MODE_MASK \ + ((1 << LAST_FRAME) | (1 << GOLDEN_FRAME) | (1 << INTRA_FRAME)) -#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01) +#define SECOND_REF_FRAME_MASK ((1 << ALTREF_FRAME) | 0x01) -#define MIN_EARLY_TERM_INDEX 3 -#define NEW_MV_DISCOUNT_FACTOR 8 +#define MIN_EARLY_TERM_INDEX 3 +#define NEW_MV_DISCOUNT_FACTOR 8 typedef struct { PREDICTION_MODE mode; MV_REFERENCE_FRAME ref_frame[2]; } MODE_DEFINITION; -typedef struct { - MV_REFERENCE_FRAME ref_frame[2]; -} REF_DEFINITION; +typedef struct { MV_REFERENCE_FRAME ref_frame[2]; } REF_DEFINITION; struct rdcost_block_args { + const VP9_COMP *cpi; MACROBLOCK *x; ENTROPY_CONTEXT t_above[16]; ENTROPY_CONTEXT t_left[16]; @@ -80,85 +79,82 @@ struct rdcost_block_args { #define LAST_NEW_MV_INDEX 6 static const MODE_DEFINITION vp9_mode_order[MAX_MODES] = { - {NEARESTMV, {LAST_FRAME, NONE}}, - {NEARESTMV, {ALTREF_FRAME, NONE}}, - {NEARESTMV, {GOLDEN_FRAME, NONE}}, + { NEARESTMV, { LAST_FRAME, NONE } }, + { NEARESTMV, { ALTREF_FRAME, NONE } }, + { NEARESTMV, { GOLDEN_FRAME, NONE } }, - {DC_PRED, {INTRA_FRAME, NONE}}, + { DC_PRED, { INTRA_FRAME, NONE } }, - {NEWMV, {LAST_FRAME, NONE}}, - {NEWMV, {ALTREF_FRAME, NONE}}, - {NEWMV, {GOLDEN_FRAME, NONE}}, + { NEWMV, { LAST_FRAME, NONE } }, + { NEWMV, { ALTREF_FRAME, NONE } }, + { NEWMV, { GOLDEN_FRAME, NONE } }, - {NEARMV, {LAST_FRAME, NONE}}, - {NEARMV, {ALTREF_FRAME, NONE}}, - {NEARMV, {GOLDEN_FRAME, NONE}}, + { NEARMV, { LAST_FRAME, NONE } }, + { NEARMV, { ALTREF_FRAME, NONE } }, + { NEARMV, { GOLDEN_FRAME, NONE } }, - {ZEROMV, {LAST_FRAME, NONE}}, - {ZEROMV, {GOLDEN_FRAME, NONE}}, - {ZEROMV, {ALTREF_FRAME, NONE}}, + { ZEROMV, { LAST_FRAME, NONE } }, + { ZEROMV, { GOLDEN_FRAME, NONE } }, + { ZEROMV, { ALTREF_FRAME, NONE } }, - {NEARESTMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEARESTMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + { NEARESTMV, { LAST_FRAME, ALTREF_FRAME } }, + { NEARESTMV, { GOLDEN_FRAME, ALTREF_FRAME } }, - {TM_PRED, {INTRA_FRAME, NONE}}, + { TM_PRED, { INTRA_FRAME, NONE } }, - {NEARMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEWMV, {LAST_FRAME, ALTREF_FRAME}}, - {NEARMV, {GOLDEN_FRAME, ALTREF_FRAME}}, - {NEWMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + { NEARMV, { LAST_FRAME, ALTREF_FRAME } }, + { NEWMV, { LAST_FRAME, ALTREF_FRAME } }, + { NEARMV, { GOLDEN_FRAME, ALTREF_FRAME } }, + { NEWMV, { GOLDEN_FRAME, ALTREF_FRAME } }, - {ZEROMV, {LAST_FRAME, ALTREF_FRAME}}, - {ZEROMV, {GOLDEN_FRAME, ALTREF_FRAME}}, + { ZEROMV, { LAST_FRAME, ALTREF_FRAME } }, + { ZEROMV, { GOLDEN_FRAME, ALTREF_FRAME } }, - {H_PRED, {INTRA_FRAME, NONE}}, - {V_PRED, {INTRA_FRAME, NONE}}, - {D135_PRED, {INTRA_FRAME, NONE}}, - {D207_PRED, {INTRA_FRAME, NONE}}, - {D153_PRED, {INTRA_FRAME, NONE}}, - {D63_PRED, {INTRA_FRAME, NONE}}, - {D117_PRED, {INTRA_FRAME, NONE}}, - {D45_PRED, {INTRA_FRAME, NONE}}, + { H_PRED, { INTRA_FRAME, NONE } }, + { V_PRED, { INTRA_FRAME, NONE } }, + { D135_PRED, { INTRA_FRAME, NONE } }, + { D207_PRED, { INTRA_FRAME, NONE } }, + { D153_PRED, { INTRA_FRAME, NONE } }, + { D63_PRED, { INTRA_FRAME, NONE } }, + { D117_PRED, { INTRA_FRAME, NONE } }, + { D45_PRED, { INTRA_FRAME, NONE } }, }; static const REF_DEFINITION vp9_ref_order[MAX_REFS] = { - {{LAST_FRAME, NONE}}, - {{GOLDEN_FRAME, NONE}}, - {{ALTREF_FRAME, NONE}}, - {{LAST_FRAME, ALTREF_FRAME}}, - {{GOLDEN_FRAME, ALTREF_FRAME}}, - {{INTRA_FRAME, NONE}}, + { { LAST_FRAME, NONE } }, { { GOLDEN_FRAME, NONE } }, + { { ALTREF_FRAME, NONE } }, { { LAST_FRAME, ALTREF_FRAME } }, + { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } }, }; -static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, - int m, int n, int min_plane, int max_plane) { +static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n, + int min_plane, int max_plane) { int i; for (i = min_plane; i < max_plane; ++i) { struct macroblock_plane *const p = &x->plane[i]; struct macroblockd_plane *const pd = &x->e_mbd.plane[i]; - p->coeff = ctx->coeff_pbuf[i][m]; - p->qcoeff = ctx->qcoeff_pbuf[i][m]; + p->coeff = ctx->coeff_pbuf[i][m]; + p->qcoeff = ctx->qcoeff_pbuf[i][m]; pd->dqcoeff = ctx->dqcoeff_pbuf[i][m]; - p->eobs = ctx->eobs_pbuf[i][m]; + p->eobs = ctx->eobs_pbuf[i][m]; - ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n]; - ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n]; + ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n]; + ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n]; ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n]; - ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n]; + ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n]; - ctx->coeff_pbuf[i][n] = p->coeff; - ctx->qcoeff_pbuf[i][n] = p->qcoeff; + ctx->coeff_pbuf[i][n] = p->coeff; + ctx->qcoeff_pbuf[i][n] = p->qcoeff; ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff; - ctx->eobs_pbuf[i][n] = p->eobs; + ctx->eobs_pbuf[i][n] = p->eobs; } } -static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, - MACROBLOCK *x, MACROBLOCKD *xd, - int *out_rate_sum, int64_t *out_dist_sum, - int *skip_txfm_sb, int64_t *skip_sse_sb) { +static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x, + MACROBLOCKD *xd, int *out_rate_sum, + int64_t *out_dist_sum, int *skip_txfm_sb, + int64_t *skip_sse_sb) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. @@ -176,10 +172,9 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, int64_t dist; const int dequant_shift = #if CONFIG_VP9_HIGHBITDEPTH - (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? - xd->bd - 5 : + (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 : #endif // CONFIG_VP9_HIGHBITDEPTH - 3; + 3; x->pred_sse[ref] = 0; @@ -210,8 +205,8 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, int block_idx = (idy << 1) + idx; int low_err_skip = 0; - var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, - dst, pd->dst.stride, &sse); + var = cpi->fn_ptr[unit_size].vf(src, p->src.stride, dst, pd->dst.stride, + &sse); x->bsse[(i << 2) + block_idx] = sse; sum_sse += sse; @@ -231,11 +226,9 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, } } - if (skip_flag && !low_err_skip) - skip_flag = 0; + if (skip_flag && !low_err_skip) skip_flag = 0; - if (i == 0) - x->pred_sse[ref] += sse; + if (i == 0) x->pred_sse[ref] += sse; } } @@ -256,8 +249,8 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, dist_sum += dist; } else { vp9_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs], - pd->dequant[1] >> dequant_shift, - &rate, &dist); + pd->dequant[1] >> dequant_shift, &rate, + &dist); rate_sum += rate; dist_sum += dist; } @@ -271,8 +264,7 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, #if CONFIG_VP9_HIGHBITDEPTH int64_t vp9_highbd_block_error_c(const tran_low_t *coeff, - const tran_low_t *dqcoeff, - intptr_t block_size, + const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd) { int i; int64_t error = 0, sqcoeff = 0; @@ -281,7 +273,7 @@ int64_t vp9_highbd_block_error_c(const tran_low_t *coeff, for (i = 0; i < block_size; i++) { const int64_t diff = coeff[i] - dqcoeff[i]; - error += diff * diff; + error += diff * diff; sqcoeff += (int64_t)coeff[i] * (int64_t)coeff[i]; } assert(error >= 0 && sqcoeff >= 0); @@ -294,8 +286,7 @@ int64_t vp9_highbd_block_error_c(const tran_low_t *coeff, int64_t vp9_highbd_block_error_8bit_c(const tran_low_t *coeff, const tran_low_t *dqcoeff, - intptr_t block_size, - int64_t *ssz) { + intptr_t block_size, int64_t *ssz) { // Note that the C versions of these 2 functions (vp9_block_error and // vp9_highbd_block_error_8bit are the same, but the optimized assembly // routines are not compatible in the non high bitdepth configuration, so @@ -322,7 +313,7 @@ int64_t vp9_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff, for (i = 0; i < block_size; i++) { const int diff = coeff[i] - dqcoeff[i]; - error += diff * diff; + error += diff * diff; sqcoeff += coeff[i] * coeff[i]; } @@ -337,7 +328,7 @@ int64_t vp9_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff, for (i = 0; i < block_size; i++) { const int diff = coeff[i] - dqcoeff[i]; - error += diff * diff; + error += diff * diff; } return error; @@ -349,16 +340,13 @@ int64_t vp9_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff, * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block, * were non-zero). */ static const int16_t band_counts[TX_SIZES][8] = { - { 1, 2, 3, 4, 3, 16 - 13, 0 }, - { 1, 2, 3, 4, 11, 64 - 21, 0 }, - { 1, 2, 3, 4, 11, 256 - 21, 0 }, + { 1, 2, 3, 4, 3, 16 - 13, 0 }, + { 1, 2, 3, 4, 11, 64 - 21, 0 }, + { 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 }, }; -static int cost_coeffs(MACROBLOCK *x, - int plane, int block, - ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L, - TX_SIZE tx_size, - const int16_t *scan, const int16_t *nb, +static int cost_coeffs(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, + int pt, const int16_t *scan, const int16_t *nb, int use_fast_coef_costing) { MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; @@ -367,10 +355,9 @@ static int cost_coeffs(MACROBLOCK *x, const int16_t *band_count = &band_counts[tx_size][1]; const int eob = p->eobs[block]; const tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block); - unsigned int (*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] = - x->token_costs[tx_size][type][is_inter_block(mi)]; + unsigned int(*token_costs)[2][COEFF_CONTEXTS][ENTROPY_TOKENS] = + x->token_costs[tx_size][type][is_inter_block(mi)]; uint8_t token_cache[32 * 32]; - int pt = combine_entropy_contexts(*A, *L); int c, cost; #if CONFIG_VP9_HIGHBITDEPTH const int *cat6_high_cost = vp9_get_high_cost_table(xd->bd); @@ -379,115 +366,289 @@ static int cost_coeffs(MACROBLOCK *x, #endif // Check for consistency of tx_size with mode info - assert(type == PLANE_TYPE_Y ? mi->tx_size == tx_size : - get_uv_tx_size(mi, &xd->plane[plane]) == tx_size); + assert(type == PLANE_TYPE_Y + ? mi->tx_size == tx_size + : get_uv_tx_size(mi, &xd->plane[plane]) == tx_size); if (eob == 0) { // single eob token cost = token_costs[0][0][pt][EOB_TOKEN]; c = 0; } else { - int band_left = *band_count++; + if (use_fast_coef_costing) { + int band_left = *band_count++; - // dc token - int v = qcoeff[0]; - int16_t prev_t; - EXTRABIT e; - vp9_get_token_extra(v, &prev_t, &e); - cost = (*token_costs)[0][pt][prev_t] + - vp9_get_cost(prev_t, e, cat6_high_cost); + // dc token + int v = qcoeff[0]; + int16_t prev_t; + cost = vp9_get_token_cost(v, &prev_t, cat6_high_cost); + cost += (*token_costs)[0][pt][prev_t]; - token_cache[0] = vp9_pt_energy_class[prev_t]; - ++token_costs; + token_cache[0] = vp9_pt_energy_class[prev_t]; + ++token_costs; - // ac tokens - for (c = 1; c < eob; c++) { - const int rc = scan[c]; - int16_t t; + // ac tokens + for (c = 1; c < eob; c++) { + const int rc = scan[c]; + int16_t t; - v = qcoeff[rc]; - vp9_get_token_extra(v, &t, &e); - if (use_fast_coef_costing) { - cost += (*token_costs)[!prev_t][!prev_t][t] + - vp9_get_cost(t, e, cat6_high_cost); - } else { + v = qcoeff[rc]; + cost += vp9_get_token_cost(v, &t, cat6_high_cost); + cost += (*token_costs)[!prev_t][!prev_t][t]; + prev_t = t; + if (!--band_left) { + band_left = *band_count++; + ++token_costs; + } + } + + // eob token + if (band_left) cost += (*token_costs)[0][!prev_t][EOB_TOKEN]; + + } else { // !use_fast_coef_costing + int band_left = *band_count++; + + // dc token + int v = qcoeff[0]; + int16_t tok; + unsigned int(*tok_cost_ptr)[COEFF_CONTEXTS][ENTROPY_TOKENS]; + cost = vp9_get_token_cost(v, &tok, cat6_high_cost); + cost += (*token_costs)[0][pt][tok]; + + token_cache[0] = vp9_pt_energy_class[tok]; + ++token_costs; + + tok_cost_ptr = &((*token_costs)[!tok]); + + // ac tokens + for (c = 1; c < eob; c++) { + const int rc = scan[c]; + + v = qcoeff[rc]; + cost += vp9_get_token_cost(v, &tok, cat6_high_cost); pt = get_coef_context(nb, token_cache, c); - cost += (*token_costs)[!prev_t][pt][t] + - vp9_get_cost(t, e, cat6_high_cost); - token_cache[rc] = vp9_pt_energy_class[t]; + cost += (*tok_cost_ptr)[pt][tok]; + token_cache[rc] = vp9_pt_energy_class[tok]; + if (!--band_left) { + band_left = *band_count++; + ++token_costs; + } + tok_cost_ptr = &((*token_costs)[!tok]); } - prev_t = t; - if (!--band_left) { - band_left = *band_count++; - ++token_costs; - } - } - // eob token - if (band_left) { - if (use_fast_coef_costing) { - cost += (*token_costs)[0][!prev_t][EOB_TOKEN]; - } else { + // eob token + if (band_left) { pt = get_coef_context(nb, token_cache, c); cost += (*token_costs)[0][pt][EOB_TOKEN]; } } } - // is eob first coefficient; - *A = *L = (c > 0); - return cost; } -static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, - int64_t *out_dist, int64_t *out_sse) { - const int ss_txfrm_size = tx_size << 1; - MACROBLOCKD* const xd = &x->e_mbd; +static INLINE int num_4x4_to_edge(int plane_4x4_dim, int mb_to_edge_dim, + int subsampling_dim, int blk_dim) { + return plane_4x4_dim + (mb_to_edge_dim >> (5 + subsampling_dim)) - blk_dim; +} + +// Compute the pixel domain sum square error on all visible 4x4s in the +// transform block. +static unsigned pixel_sse(const VP9_COMP *const cpi, const MACROBLOCKD *xd, + const struct macroblockd_plane *const pd, + const uint8_t *src, const int src_stride, + const uint8_t *dst, const int dst_stride, int blk_row, + int blk_col, const BLOCK_SIZE plane_bsize, + const BLOCK_SIZE tx_bsize) { + unsigned int sse = 0; + const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize]; + const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize]; + int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge, + pd->subsampling_x, blk_col); + int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge, + pd->subsampling_y, blk_row); + if (tx_bsize == BLOCK_4X4 || + (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) { + cpi->fn_ptr[tx_bsize].vf(src, src_stride, dst, dst_stride, &sse); + } else { + const vpx_variance_fn_t vf_4x4 = cpi->fn_ptr[BLOCK_4X4].vf; + int r, c; + unsigned this_sse = 0; + int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h); + int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w); + sse = 0; + // if we are in the unrestricted motion border. + for (r = 0; r < max_r; ++r) { + // Skip visiting the sub blocks that are wholly within the UMV. + for (c = 0; c < max_c; ++c) { + vf_4x4(src + r * src_stride * 4 + c * 4, src_stride, + dst + r * dst_stride * 4 + c * 4, dst_stride, &this_sse); + sse += this_sse; + } + } + } + return sse; +} + +// Compute the squares sum squares on all visible 4x4s in the transform block. +static int64_t sum_squares_visible(const MACROBLOCKD *xd, + const struct macroblockd_plane *const pd, + const int16_t *diff, const int diff_stride, + int blk_row, int blk_col, + const BLOCK_SIZE plane_bsize, + const BLOCK_SIZE tx_bsize) { + int64_t sse; + const int plane_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; + const int plane_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; + const int tx_4x4_w = num_4x4_blocks_wide_lookup[tx_bsize]; + const int tx_4x4_h = num_4x4_blocks_high_lookup[tx_bsize]; + int b4x4s_to_right_edge = num_4x4_to_edge(plane_4x4_w, xd->mb_to_right_edge, + pd->subsampling_x, blk_col); + int b4x4s_to_bottom_edge = num_4x4_to_edge(plane_4x4_h, xd->mb_to_bottom_edge, + pd->subsampling_y, blk_row); + if (tx_bsize == BLOCK_4X4 || + (b4x4s_to_right_edge >= tx_4x4_w && b4x4s_to_bottom_edge >= tx_4x4_h)) { + sse = (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, tx_bsize); + } else { + int r, c; + int max_r = VPXMIN(b4x4s_to_bottom_edge, tx_4x4_h); + int max_c = VPXMIN(b4x4s_to_right_edge, tx_4x4_w); + sse = 0; + // if we are in the unrestricted motion border. + for (r = 0; r < max_r; ++r) { + // Skip visiting the sub blocks that are wholly within the UMV. + for (c = 0; c < max_c; ++c) { + sse += (int64_t)vpx_sum_squares_2d_i16(diff, diff_stride, BLOCK_4X4); + } + } + } + return sse; +} + +static void dist_block(const VP9_COMP *cpi, MACROBLOCK *x, int plane, + BLOCK_SIZE plane_bsize, int block, int blk_row, + int blk_col, TX_SIZE tx_size, int64_t *out_dist, + int64_t *out_sse) { + MACROBLOCKD *const xd = &x->e_mbd; const struct macroblock_plane *const p = &x->plane[plane]; const struct macroblockd_plane *const pd = &xd->plane[plane]; - int64_t this_sse; - int shift = tx_size == TX_32X32 ? 0 : 2; - tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); - tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); -#if CONFIG_VP9_HIGHBITDEPTH - const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8; - *out_dist = vp9_highbd_block_error_dispatch(coeff, dqcoeff, - 16 << ss_txfrm_size, - &this_sse, bd) >> shift; -#else - *out_dist = vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, - &this_sse) >> shift; -#endif // CONFIG_VP9_HIGHBITDEPTH - *out_sse = this_sse >> shift; - if (x->skip_encode && !is_inter_block(xd->mi[0])) { - // TODO(jingning): tune the model to better capture the distortion. - int64_t p = (pd->dequant[1] * pd->dequant[1] * - (1 << ss_txfrm_size)) >> + if (x->block_tx_domain) { + const int ss_txfrm_size = tx_size << 1; + int64_t this_sse; + const int shift = tx_size == TX_32X32 ? 0 : 2; + const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block); + const tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); #if CONFIG_VP9_HIGHBITDEPTH - (shift + 2 + (bd - 8) * 2); + const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8; + *out_dist = vp9_highbd_block_error_dispatch( + coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse, bd) >> + shift; #else - (shift + 2); + *out_dist = + vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> + shift; #endif // CONFIG_VP9_HIGHBITDEPTH - *out_dist += (p >> 4); - *out_sse += p; + *out_sse = this_sse >> shift; + + if (x->skip_encode && !is_inter_block(xd->mi[0])) { + // TODO(jingning): tune the model to better capture the distortion. + const int64_t p = + (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >> +#if CONFIG_VP9_HIGHBITDEPTH + (shift + 2 + (bd - 8) * 2); +#else + (shift + 2); +#endif // CONFIG_VP9_HIGHBITDEPTH + *out_dist += (p >> 4); + *out_sse += p; + } + } else { + const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size]; + const int bs = 4 * num_4x4_blocks_wide_lookup[tx_bsize]; + const int src_stride = p->src.stride; + const int dst_stride = pd->dst.stride; + const int src_idx = 4 * (blk_row * src_stride + blk_col); + const int dst_idx = 4 * (blk_row * dst_stride + blk_col); + const uint8_t *src = &p->src.buf[src_idx]; + const uint8_t *dst = &pd->dst.buf[dst_idx]; + const tran_low_t *dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block); + const uint16_t *eob = &p->eobs[block]; + unsigned int tmp; + + tmp = pixel_sse(cpi, xd, pd, src, src_stride, dst, dst_stride, blk_row, + blk_col, plane_bsize, tx_bsize); + *out_sse = (int64_t)tmp * 16; + + if (*eob) { +#if CONFIG_VP9_HIGHBITDEPTH + DECLARE_ALIGNED(16, uint16_t, recon16[1024]); + uint8_t *recon = (uint8_t *)recon16; +#else + DECLARE_ALIGNED(16, uint8_t, recon[1024]); +#endif // CONFIG_VP9_HIGHBITDEPTH + +#if CONFIG_VP9_HIGHBITDEPTH + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + recon = CONVERT_TO_BYTEPTR(recon); + vpx_highbd_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, NULL, 0, + bs, bs, xd->bd); + if (xd->lossless) { + vp9_highbd_iwht4x4_add(dqcoeff, recon, 32, *eob, xd->bd); + } else { + switch (tx_size) { + case TX_4X4: + vp9_highbd_idct4x4_add(dqcoeff, recon, 32, *eob, xd->bd); + break; + case TX_8X8: + vp9_highbd_idct8x8_add(dqcoeff, recon, 32, *eob, xd->bd); + break; + case TX_16X16: + vp9_highbd_idct16x16_add(dqcoeff, recon, 32, *eob, xd->bd); + break; + case TX_32X32: + vp9_highbd_idct32x32_add(dqcoeff, recon, 32, *eob, xd->bd); + break; + default: assert(0 && "Invalid transform size"); + } + } + } else { +#endif // CONFIG_VP9_HIGHBITDEPTH + vpx_convolve_copy(dst, dst_stride, recon, 32, NULL, 0, NULL, 0, bs, bs); + switch (tx_size) { + case TX_32X32: vp9_idct32x32_add(dqcoeff, recon, 32, *eob); break; + case TX_16X16: vp9_idct16x16_add(dqcoeff, recon, 32, *eob); break; + case TX_8X8: vp9_idct8x8_add(dqcoeff, recon, 32, *eob); break; + case TX_4X4: + // this is like vp9_short_idct4x4 but has a special case around + // eob<=1, which is significant (not just an optimization) for + // the lossless case. + x->itxm_add(dqcoeff, recon, 32, *eob); + break; + default: assert(0 && "Invalid transform size"); break; + } +#if CONFIG_VP9_HIGHBITDEPTH + } +#endif // CONFIG_VP9_HIGHBITDEPTH + + tmp = pixel_sse(cpi, xd, pd, src, src_stride, recon, 32, blk_row, blk_col, + plane_bsize, tx_bsize); + } + + *out_dist = (int64_t)tmp * 16; } } -static int rate_block(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, struct rdcost_block_args* args) { - int x_idx, y_idx; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x_idx, &y_idx); - - return cost_coeffs(args->x, plane, block, args->t_above + x_idx, - args->t_left + y_idx, tx_size, - args->so->scan, args->so->neighbors, - args->use_fast_coef_costing); +static int rate_block(int plane, int block, TX_SIZE tx_size, int coeff_ctx, + struct rdcost_block_args *args) { + return cost_coeffs(args->x, plane, block, tx_size, coeff_ctx, args->so->scan, + args->so->neighbors, args->use_fast_coef_costing); } -static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { +static void block_rd_txfm(int plane, int block, int blk_row, int blk_col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { struct rdcost_block_args *args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; @@ -496,27 +657,58 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, int rate; int64_t dist; int64_t sse; + const int coeff_ctx = + combine_entropy_contexts(args->t_left[blk_row], args->t_above[blk_col]); - if (args->exit_early) - return; + if (args->exit_early) return; if (!is_inter_block(mi)) { - struct encode_b_args arg = {x, NULL, &mi->skip}; - vp9_encode_block_intra(plane, block, plane_bsize, tx_size, &arg); - dist_block(x, plane, block, tx_size, &dist, &sse); + struct encode_b_args intra_arg = { x, x->block_qcoeff_opt, args->t_above, + args->t_left, &mi->skip }; + vp9_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize, tx_size, + &intra_arg); + if (x->block_tx_domain) { + dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col, + tx_size, &dist, &sse); + } else { + const BLOCK_SIZE tx_bsize = txsize_to_bsize[tx_size]; + const struct macroblock_plane *const p = &x->plane[plane]; + const struct macroblockd_plane *const pd = &xd->plane[plane]; + const int src_stride = p->src.stride; + const int dst_stride = pd->dst.stride; + const int diff_stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize]; + const uint8_t *src = &p->src.buf[4 * (blk_row * src_stride + blk_col)]; + const uint8_t *dst = &pd->dst.buf[4 * (blk_row * dst_stride + blk_col)]; + const int16_t *diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)]; + unsigned int tmp; + sse = sum_squares_visible(xd, pd, diff, diff_stride, blk_row, blk_col, + plane_bsize, tx_bsize); +#if CONFIG_VP9_HIGHBITDEPTH + if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && (xd->bd > 8)) + sse = ROUND64_POWER_OF_TWO(sse, (xd->bd - 8) * 2); +#endif // CONFIG_VP9_HIGHBITDEPTH + sse = sse * 16; + tmp = pixel_sse(args->cpi, xd, pd, src, src_stride, dst, dst_stride, + blk_row, blk_col, plane_bsize, tx_bsize); + dist = (int64_t)tmp * 16; + } } else if (max_txsize_lookup[plane_bsize] == tx_size) { if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] == SKIP_TXFM_NONE) { // full forward transform and quantization - vp9_xform_quant(x, plane, block, plane_bsize, tx_size); - dist_block(x, plane, block, tx_size, &dist, &sse); + vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size); + if (x->block_qcoeff_opt) + vp9_optimize_b(x, plane, block, tx_size, coeff_ctx); + dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col, + tx_size, &dist, &sse); } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] == SKIP_TXFM_AC_ONLY) { // compute DC coefficient - tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block); + tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block); tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block); - vp9_xform_quant_dc(x, plane, block, plane_bsize, tx_size); - sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; + vp9_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize, + tx_size); + sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; dist = sse; if (x->plane[plane].eobs[block]) { const int64_t orig_sse = (int64_t)coeff[0] * coeff[0]; @@ -525,8 +717,7 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, #if CONFIG_VP9_HIGHBITDEPTH dc_correct >>= ((xd->bd - 8) * 2); #endif - if (tx_size != TX_32X32) - dc_correct >>= 2; + if (tx_size != TX_32X32) dc_correct >>= 2; dist = VPXMAX(0, sse - dc_correct); } @@ -534,13 +725,16 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, // SKIP_TXFM_AC_DC // skip forward transform x->plane[plane].eobs[block] = 0; - sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; + sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4; dist = sse; } } else { // full forward transform and quantization - vp9_xform_quant(x, plane, block, plane_bsize, tx_size); - dist_block(x, plane, block, tx_size, &dist, &sse); + vp9_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size); + if (x->block_qcoeff_opt) + vp9_optimize_b(x, plane, block, tx_size, coeff_ctx); + dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col, + tx_size, &dist, &sse); } rd = RDCOST(x->rdmult, x->rddiv, 0, dist); @@ -549,15 +743,17 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, return; } - rate = rate_block(plane, block, plane_bsize, tx_size, args); + rate = rate_block(plane, block, tx_size, coeff_ctx, args); + args->t_above[blk_col] = (x->plane[plane].eobs[block] > 0) ? 1 : 0; + args->t_left[blk_row] = (x->plane[plane].eobs[block] > 0) ? 1 : 0; rd1 = RDCOST(x->rdmult, x->rddiv, rate, dist); rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse); // TODO(jingning): temporarily enabled only for luma component rd = VPXMIN(rd1, rd2); if (plane == 0) - x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] || - (rd1 > rd2 && !xd->lossless); + x->zcoeff_blk[tx_size][block] = + !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless); args->this_rate += rate; args->this_dist += dist; @@ -572,48 +768,44 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, args->skippable &= !x->plane[plane].eobs[block]; } -static void txfm_rd_in_plane(MACROBLOCK *x, - int *rate, int64_t *distortion, - int *skippable, int64_t *sse, - int64_t ref_best_rd, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size, - int use_fast_coef_casting) { +static void txfm_rd_in_plane(const VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skippable, int64_t *sse, + int64_t ref_best_rd, int plane, BLOCK_SIZE bsize, + TX_SIZE tx_size, int use_fast_coef_casting) { MACROBLOCKD *const xd = &x->e_mbd; const struct macroblockd_plane *const pd = &xd->plane[plane]; struct rdcost_block_args args; vp9_zero(args); + args.cpi = cpi; args.x = x; args.best_rd = ref_best_rd; args.use_fast_coef_costing = use_fast_coef_casting; args.skippable = 1; - if (plane == 0) - xd->mi[0]->tx_size = tx_size; + if (plane == 0) xd->mi[0]->tx_size = tx_size; vp9_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left); args.so = get_scan(xd, tx_size, get_plane_type(plane), 0); - vp9_foreach_transformed_block_in_plane(xd, bsize, plane, - block_rd_txfm, &args); + vp9_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm, + &args); if (args.exit_early) { - *rate = INT_MAX; + *rate = INT_MAX; *distortion = INT64_MAX; - *sse = INT64_MAX; - *skippable = 0; + *sse = INT64_MAX; + *skippable = 0; } else { *distortion = args.this_dist; - *rate = args.this_rate; - *sse = args.this_sse; - *skippable = args.skippable; + *rate = args.this_rate; + *sse = args.this_sse; + *skippable = args.skippable; } } -static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, - int *rate, int64_t *distortion, - int *skip, int64_t *sse, - int64_t ref_best_rd, - BLOCK_SIZE bs) { +static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skip, int64_t *sse, + int64_t ref_best_rd, BLOCK_SIZE bs) { const TX_SIZE max_tx_size = max_txsize_lookup[bs]; VP9_COMMON *const cm = &cpi->common; const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode]; @@ -622,17 +814,13 @@ static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, mi->tx_size = VPXMIN(max_tx_size, largest_tx_size); - txfm_rd_in_plane(x, rate, distortion, skip, - sse, ref_best_rd, 0, bs, + txfm_rd_in_plane(cpi, x, rate, distortion, skip, sse, ref_best_rd, 0, bs, mi->tx_size, cpi->sf.use_fast_coef_costing); } -static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, - int *rate, - int64_t *distortion, - int *skip, - int64_t *psse, - int64_t ref_best_rd, +static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skip, + int64_t *psse, int64_t ref_best_rd, BLOCK_SIZE bs) { const TX_SIZE max_tx_size = max_txsize_lookup[bs]; VP9_COMMON *const cm = &cpi->common; @@ -641,10 +829,10 @@ static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, vpx_prob skip_prob = vp9_get_skip_prob(cm, xd); int r[TX_SIZES][2], s[TX_SIZES]; int64_t d[TX_SIZES], sse[TX_SIZES]; - int64_t rd[TX_SIZES][2] = {{INT64_MAX, INT64_MAX}, - {INT64_MAX, INT64_MAX}, - {INT64_MAX, INT64_MAX}, - {INT64_MAX, INT64_MAX}}; + int64_t rd[TX_SIZES][2] = { { INT64_MAX, INT64_MAX }, + { INT64_MAX, INT64_MAX }, + { INT64_MAX, INT64_MAX }, + { INT64_MAX, INT64_MAX } }; int n, m; int s0, s1; int64_t best_rd = INT64_MAX; @@ -660,23 +848,22 @@ static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, start_tx = max_tx_size; end_tx = 0; } else { - TX_SIZE chosen_tx_size = VPXMIN(max_tx_size, - tx_mode_to_biggest_tx_size[cm->tx_mode]); + TX_SIZE chosen_tx_size = + VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[cm->tx_mode]); start_tx = chosen_tx_size; end_tx = chosen_tx_size; } for (n = start_tx; n >= end_tx; n--) { int r_tx_size = 0; - for (m = 0; m <= n - (n == (int) max_tx_size); m++) { + for (m = 0; m <= n - (n == (int)max_tx_size); m++) { if (m == n) r_tx_size += vp9_cost_zero(tx_probs[m]); else r_tx_size += vp9_cost_one(tx_probs[m]); } - txfm_rd_in_plane(x, &r[n][0], &d[n], &s[n], - &sse[n], ref_best_rd, 0, bs, n, - cpi->sf.use_fast_coef_costing); + txfm_rd_in_plane(cpi, x, &r[n][0], &d[n], &s[n], &sse[n], ref_best_rd, 0, + bs, n, cpi->sf.use_fast_coef_costing); r[n][1] = r[n][0]; if (r[n][0] < INT_MAX) { r[n][1] += r_tx_size; @@ -704,8 +891,7 @@ static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, // Early termination in transform size search. if (cpi->sf.tx_size_search_breakout && (rd[n][1] == INT64_MAX || - (n < (int) max_tx_size && rd[n][1] > rd[n + 1][1]) || - s[n] == 1)) + (n < (int)max_tx_size && rd[n][1] > rd[n + 1][1]) || s[n] == 1)) break; if (rd[n][1] < best_rd) { @@ -716,15 +902,14 @@ static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, mi->tx_size = best_tx; *distortion = d[mi->tx_size]; - *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT]; - *skip = s[mi->tx_size]; - *psse = sse[mi->tx_size]; + *rate = r[mi->tx_size][cm->tx_mode == TX_MODE_SELECT]; + *skip = s[mi->tx_size]; + *psse = sse[mi->tx_size]; } static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, - int64_t *distortion, int *skip, - int64_t *psse, BLOCK_SIZE bs, - int64_t ref_best_rd) { + int64_t *distortion, int *skip, int64_t *psse, + BLOCK_SIZE bs, int64_t ref_best_rd) { MACROBLOCKD *xd = &x->e_mbd; int64_t sse; int64_t *ret_sse = psse ? psse : &sse; @@ -735,39 +920,33 @@ static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, choose_largest_tx_size(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd, bs); } else { - choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, - ref_best_rd, bs); + choose_tx_size_from_rd(cpi, x, rate, distortion, skip, ret_sse, ref_best_rd, + bs); } } static int conditional_skipintra(PREDICTION_MODE mode, PREDICTION_MODE best_intra_mode) { - if (mode == D117_PRED && - best_intra_mode != V_PRED && + if (mode == D117_PRED && best_intra_mode != V_PRED && best_intra_mode != D135_PRED) return 1; - if (mode == D63_PRED && - best_intra_mode != V_PRED && + if (mode == D63_PRED && best_intra_mode != V_PRED && best_intra_mode != D45_PRED) return 1; - if (mode == D207_PRED && - best_intra_mode != H_PRED && + if (mode == D207_PRED && best_intra_mode != H_PRED && best_intra_mode != D45_PRED) return 1; - if (mode == D153_PRED && - best_intra_mode != H_PRED && + if (mode == D153_PRED && best_intra_mode != H_PRED && best_intra_mode != D135_PRED) return 1; return 0; } -static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, - int row, int col, - PREDICTION_MODE *best_mode, - const int *bmode_costs, - ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, - int *bestrate, int *bestratey, - int64_t *bestdistortion, +static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int row, + int col, PREDICTION_MODE *best_mode, + const int *bmode_costs, ENTROPY_CONTEXT *a, + ENTROPY_CONTEXT *l, int *bestrate, + int *bestratey, int64_t *bestdistortion, BLOCK_SIZE bsize, int64_t rd_thresh) { PREDICTION_MODE mode; MACROBLOCKD *const xd = &x->e_mbd; @@ -800,14 +979,12 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int64_t distortion = 0; int rate = bmode_costs[mode]; - if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) - continue; + if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue; // Only do the oblique modes if the best so far is // one of the neighboring directional modes if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(mode, *best_mode)) - continue; + if (conditional_skipintra(mode, *best_mode)) continue; } memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0])); @@ -818,45 +995,47 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, const int block = (row + idy) * 2 + (col + idx); const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride]; uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride]; - int16_t *const src_diff = vp9_raster_block_offset_int16(BLOCK_8X8, - block, - p->src_diff); + int16_t *const src_diff = + vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff); tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block); xd->mi[0]->bmi[block].as_mode = mode; vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst, - x->skip_encode ? src_stride : dst_stride, - dst, dst_stride, - col + idx, row + idy, 0); - vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, - dst, dst_stride, xd->bd); + x->skip_encode ? src_stride : dst_stride, dst, + dst_stride, col + idx, row + idy, 0); + vpx_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, + dst_stride, xd->bd); if (xd->lossless) { const scan_order *so = &vp9_default_scan_orders[TX_4X4]; + const int coeff_ctx = + combine_entropy_contexts(tempa[idx], templ[idy]); vp9_highbd_fwht4x4(src_diff, coeff, 8); vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); + ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan, + so->neighbors, cpi->sf.use_fast_coef_costing); + tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0); if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) goto next_highbd; - vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, - p->eobs[block], xd->bd); + vp9_highbd_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, + dst_stride, p->eobs[block], xd->bd); } else { int64_t unused; const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block); const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type]; + const int coeff_ctx = + combine_entropy_contexts(tempa[idx], templ[idy]); if (tx_type == DCT_DCT) vpx_highbd_fdct4x4(src_diff, coeff, 8); else vp9_highbd_fht4x4(src_diff, coeff, 8, tx_type); vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); + ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan, + so->neighbors, cpi->sf.use_fast_coef_costing); distortion += vp9_highbd_block_error_dispatch( - coeff, BLOCK_OFFSET(pd->dqcoeff, block), - 16, &unused, xd->bd) >> 2; + coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, + &unused, xd->bd) >> + 2; + tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0 ? 1 : 0); if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) goto next_highbd; vp9_highbd_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), @@ -882,16 +1061,13 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, num_4x4_blocks_wide * 4 * sizeof(uint16_t)); } } - next_highbd: - {} + next_highbd : {} } - if (best_rd >= rd_thresh || x->skip_encode) - return best_rd; + if (best_rd >= rd_thresh || x->skip_encode) return best_rd; for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) { memcpy(CONVERT_TO_SHORTPTR(dst_init + idy * dst_stride), - best_dst16 + idy * 8, - num_4x4_blocks_wide * 4 * sizeof(uint16_t)); + best_dst16 + idy * 8, num_4x4_blocks_wide * 4 * sizeof(uint16_t)); } return best_rd; @@ -904,14 +1080,12 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int64_t distortion = 0; int rate = bmode_costs[mode]; - if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) - continue; + if (!(cpi->sf.intra_y_mode_mask[TX_4X4] & (1 << mode))) continue; // Only do the oblique modes if the best so far is // one of the neighboring directional modes if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(mode, *best_mode)) - continue; + if (conditional_skipintra(mode, *best_mode)) continue; } memcpy(tempa, ta, num_4x4_blocks_wide * sizeof(ta[0])); @@ -926,19 +1100,20 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff); tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block); xd->mi[0]->bmi[block].as_mode = mode; - vp9_predict_intra_block(xd, 1, TX_4X4, mode, - x->skip_encode ? src : dst, - x->skip_encode ? src_stride : dst_stride, - dst, dst_stride, col + idx, row + idy, 0); + vp9_predict_intra_block(xd, 1, TX_4X4, mode, x->skip_encode ? src : dst, + x->skip_encode ? src_stride : dst_stride, dst, + dst_stride, col + idx, row + idy, 0); vpx_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride); if (xd->lossless) { const scan_order *so = &vp9_default_scan_orders[TX_4X4]; + const int coeff_ctx = + combine_entropy_contexts(tempa[idx], templ[idy]); vp9_fwht4x4(src_diff, coeff, 8); vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); + ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan, + so->neighbors, cpi->sf.use_fast_coef_costing); + tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0; if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) goto next; vp9_iwht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block), dst, dst_stride, @@ -947,22 +1122,27 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int64_t unused; const TX_TYPE tx_type = get_tx_type_4x4(PLANE_TYPE_Y, xd, block); const scan_order *so = &vp9_scan_orders[TX_4X4][tx_type]; + const int coeff_ctx = + combine_entropy_contexts(tempa[idx], templ[idy]); vp9_fht4x4(src_diff, coeff, 8, tx_type); vp9_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan); - ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); + ratey += cost_coeffs(x, 0, block, TX_4X4, coeff_ctx, so->scan, + so->neighbors, cpi->sf.use_fast_coef_costing); + tempa[idx] = templ[idy] = (x->plane[0].eobs[block] > 0) ? 1 : 0; #if CONFIG_VP9_HIGHBITDEPTH - distortion += vp9_highbd_block_error_8bit( - coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, &unused) >> 2; + distortion += + vp9_highbd_block_error_8bit( + coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16, &unused) >> + 2; #else distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), - 16, &unused) >> 2; + 16, &unused) >> + 2; #endif if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) goto next; - vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), - dst, dst_stride, p->eobs[block]); + vp9_iht4x4_add(tx_type, BLOCK_OFFSET(pd->dqcoeff, block), dst, + dst_stride, p->eobs[block]); } } } @@ -982,12 +1162,10 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, memcpy(best_dst + idy * 8, dst_init + idy * dst_stride, num_4x4_blocks_wide * 4); } - next: - {} + next : {} } - if (best_rd >= rd_thresh || x->skip_encode) - return best_rd; + if (best_rd >= rd_thresh || x->skip_encode) return best_rd; for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) memcpy(dst_init + idy * dst_stride, best_dst + idy * 8, @@ -1026,17 +1204,15 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb, const PREDICTION_MODE A = vp9_above_block_mode(mic, above_mi, i); const PREDICTION_MODE L = vp9_left_block_mode(mic, left_mi, i); - bmode_costs = cpi->y_mode_costs[A][L]; + bmode_costs = cpi->y_mode_costs[A][L]; } - this_rd = rd_pick_intra4x4block(cpi, mb, idy, idx, &best_mode, - bmode_costs, - xd->plane[0].above_context + idx, - xd->plane[0].left_context + idy, - &r, &ry, &d, bsize, best_rd - total_rd); + this_rd = rd_pick_intra4x4block( + cpi, mb, idy, idx, &best_mode, bmode_costs, + xd->plane[0].above_context + idx, xd->plane[0].left_context + idy, &r, + &ry, &d, bsize, best_rd - total_rd); - if (this_rd >= best_rd - total_rd) - return INT64_MAX; + if (this_rd >= best_rd - total_rd) return INT64_MAX; total_rd += this_rd; cost += r; @@ -1049,8 +1225,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb, for (j = 1; j < num_4x4_blocks_wide; ++j) mic->bmi[i + j].as_mode = best_mode; - if (total_rd >= best_rd) - return INT64_MAX; + if (total_rd >= best_rd) return INT64_MAX; } } @@ -1063,10 +1238,9 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP9_COMP *cpi, MACROBLOCK *mb, } // This function is used only for intra_only frames -static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize, +static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int *rate_tokenonly, int64_t *distortion, + int *skippable, BLOCK_SIZE bsize, int64_t best_rd) { PREDICTION_MODE mode; PREDICTION_MODE mode_selected = DC_PRED; @@ -1088,31 +1262,28 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, if (cpi->sf.use_nonrd_pick_mode) { // These speed features are turned on in hybrid non-RD and RD mode // for key frame coding in the context of real-time setting. - if (conditional_skipintra(mode, mode_selected)) - continue; - if (*skippable) - break; + if (conditional_skipintra(mode, mode_selected)) continue; + if (*skippable) break; } mic->mode = mode; - super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, - &s, NULL, bsize, best_rd); + super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL, + bsize, best_rd); - if (this_rate_tokenonly == INT_MAX) - continue; + if (this_rate_tokenonly == INT_MAX) continue; this_rate = this_rate_tokenonly + bmode_costs[mode]; this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); if (this_rd < best_rd) { - mode_selected = mode; - best_rd = this_rd; - best_tx = mic->tx_size; - *rate = this_rate; + mode_selected = mode; + best_rd = this_rd; + best_tx = mic->tx_size; + *rate = this_rate; *rate_tokenonly = this_rate_tokenonly; - *distortion = this_distortion; - *skippable = s; + *distortion = this_distortion; + *skippable = s; } } @@ -1124,10 +1295,9 @@ static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, // Return value 0: early termination triggered, no valid rd cost available; // 1: rd cost values are valid. -static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, - int *rate, int64_t *distortion, int *skippable, - int64_t *sse, BLOCK_SIZE bsize, - int64_t ref_best_rd) { +static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int64_t *distortion, int *skippable, int64_t *sse, + BLOCK_SIZE bsize, int64_t ref_best_rd) { MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *const mi = xd->mi[0]; const TX_SIZE uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]); @@ -1136,8 +1306,7 @@ static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, int64_t pndist = 0, pnsse = 0; int is_cost_valid = 1; - if (ref_best_rd < 0) - is_cost_valid = 0; + if (ref_best_rd < 0) is_cost_valid = 0; if (is_inter_block(mi) && is_cost_valid) { int plane; @@ -1151,9 +1320,8 @@ static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, *skippable = 1; for (plane = 1; plane < MAX_MB_PLANE; ++plane) { - txfm_rd_in_plane(x, &pnrate, &pndist, &pnskip, &pnsse, - ref_best_rd, plane, bsize, uv_tx_size, - cpi->sf.use_fast_coef_costing); + txfm_rd_in_plane(cpi, x, &pnrate, &pndist, &pnskip, &pnsse, ref_best_rd, + plane, bsize, uv_tx_size, cpi->sf.use_fast_coef_costing); if (pnrate == INT_MAX) { is_cost_valid = 0; break; @@ -1176,10 +1344,10 @@ static int super_block_uvrd(const VP9_COMP *cpi, MACROBLOCK *x, } static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, - PICK_MODE_CONTEXT *ctx, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize, TX_SIZE max_tx_size) { + PICK_MODE_CONTEXT *ctx, int *rate, + int *rate_tokenonly, int64_t *distortion, + int *skippable, BLOCK_SIZE bsize, + TX_SIZE max_tx_size) { MACROBLOCKD *xd = &x->e_mbd; PREDICTION_MODE mode; PREDICTION_MODE mode_selected = DC_PRED; @@ -1189,28 +1357,31 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); for (mode = DC_PRED; mode <= TM_PRED; ++mode) { - if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) + if (!(cpi->sf.intra_uv_mode_mask[max_tx_size] & (1 << mode))) continue; +#if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH + if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) && + (xd->above_mi == NULL || xd->left_mi == NULL) && need_top_left[mode]) continue; +#endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH xd->mi[0]->uv_mode = mode; - if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, - &this_distortion, &s, &this_sse, bsize, best_rd)) + if (!super_block_uvrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, + &this_sse, bsize, best_rd)) continue; - this_rate = this_rate_tokenonly + - cpi->intra_uv_mode_cost[cpi->common.frame_type] - [xd->mi[0]->mode][mode]; + this_rate = + this_rate_tokenonly + + cpi->intra_uv_mode_cost[cpi->common.frame_type][xd->mi[0]->mode][mode]; this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); if (this_rd < best_rd) { - mode_selected = mode; - best_rd = this_rd; - *rate = this_rate; + mode_selected = mode; + best_rd = this_rd; + *rate = this_rate; *rate_tokenonly = this_rate_tokenonly; - *distortion = this_distortion; - *skippable = s; - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE); + *distortion = this_distortion; + *skippable = s; + if (!x->select_tx_size) swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE); } } @@ -1218,40 +1389,38 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, return best_rd; } -static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, - int *rate, int *rate_tokenonly, - int64_t *distortion, int *skippable, - BLOCK_SIZE bsize) { +static int64_t rd_sbuv_dcpred(const VP9_COMP *cpi, MACROBLOCK *x, int *rate, + int *rate_tokenonly, int64_t *distortion, + int *skippable, BLOCK_SIZE bsize) { const VP9_COMMON *cm = &cpi->common; int64_t unused; x->e_mbd.mi[0]->uv_mode = DC_PRED; memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); - super_block_uvrd(cpi, x, rate_tokenonly, distortion, - skippable, &unused, bsize, INT64_MAX); - *rate = *rate_tokenonly + - cpi->intra_uv_mode_cost[cm->frame_type] - [x->e_mbd.mi[0]->mode][DC_PRED]; + super_block_uvrd(cpi, x, rate_tokenonly, distortion, skippable, &unused, + bsize, INT64_MAX); + *rate = + *rate_tokenonly + + cpi->intra_uv_mode_cost[cm->frame_type][x->e_mbd.mi[0]->mode][DC_PRED]; return RDCOST(x->rdmult, x->rddiv, *rate, *distortion); } static void choose_intra_uv_mode(VP9_COMP *cpi, MACROBLOCK *const x, - PICK_MODE_CONTEXT *ctx, - BLOCK_SIZE bsize, TX_SIZE max_tx_size, - int *rate_uv, int *rate_uv_tokenonly, - int64_t *dist_uv, int *skip_uv, - PREDICTION_MODE *mode_uv) { + PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize, + TX_SIZE max_tx_size, int *rate_uv, + int *rate_uv_tokenonly, int64_t *dist_uv, + int *skip_uv, PREDICTION_MODE *mode_uv) { // Use an estimated rd for uv_intra based on DC_PRED if the // appropriate speed flag is set. if (cpi->sf.use_uv_intra_rd_estimate) { - rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, - skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); - // Else do a proper rd search for each possible transform size that may - // be considered in the main rd loop. + rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); + // Else do a proper rd search for each possible transform size that may + // be considered in the main rd loop. } else { - rd_pick_intra_sbuv_mode(cpi, x, ctx, - rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, max_tx_size); + rd_pick_intra_sbuv_mode(cpi, x, ctx, rate_uv, rate_uv_tokenonly, dist_uv, + skip_uv, bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, + max_tx_size); } *mode_uv = x->e_mbd.mi[0]->uv_mode; } @@ -1263,8 +1432,7 @@ static int cost_mv_ref(const VP9_COMP *cpi, PREDICTION_MODE mode, } static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, - int i, - PREDICTION_MODE mode, int_mv this_mv[2], + int i, PREDICTION_MODE mode, int_mv this_mv[2], int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int_mv seg_mvs[MAX_REF_FRAMES], int_mv *best_ref_mv[2], const int *mvjcost, @@ -1296,16 +1464,13 @@ static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, break; case ZEROMV: this_mv[0].as_int = 0; - if (is_compound) - this_mv[1].as_int = 0; - break; - default: + if (is_compound) this_mv[1].as_int = 0; break; + default: break; } mi->bmi[i].as_mv[0].as_int = this_mv[0].as_int; - if (is_compound) - mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int; + if (is_compound) mi->bmi[i].as_mv[1].as_int = this_mv[1].as_int; mi->bmi[i].as_mode = mode; @@ -1314,17 +1479,13 @@ static int set_and_cost_bmi_mvs(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd, memmove(&mi->bmi[i + idy * 2 + idx], &mi->bmi[i], sizeof(mi->bmi[i])); return cost_mv_ref(cpi, mode, mbmi_ext->mode_context[mi->ref_frame[0]]) + - thismvcost; + thismvcost; } -static int64_t encode_inter_mb_segment(VP9_COMP *cpi, - MACROBLOCK *x, - int64_t best_yrd, - int i, - int *labelyrate, +static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x, + int64_t best_yrd, int i, int *labelyrate, int64_t *distortion, int64_t *sse, - ENTROPY_CONTEXT *ta, - ENTROPY_CONTEXT *tl, + ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl, int mi_row, int mi_col) { int k; MACROBLOCKD *xd = &x->e_mbd; @@ -1338,8 +1499,8 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, const uint8_t *const src = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)]; - uint8_t *const dst = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, - pd->dst.stride)]; + uint8_t *const dst = + &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)]; int64_t thisdistortion = 0, thissse = 0; int thisrate = 0, ref; const scan_order *so = &vp9_default_scan_orders[TX_4X4]; @@ -1360,35 +1521,26 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, y_stride = xd->block_refs[ref]->buf->y_stride; pre = xd->block_refs[ref]->buf->y_buffer; - pre += scaled_buffer_offset(x_start + w, y_start + h, - y_stride, sf); + pre += scaled_buffer_offset(x_start + w, y_start + h, y_stride, sf); } #if CONFIG_VP9_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_highbd_build_inter_predictor(pre, y_stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, - ref, kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * (i % 2), - mi_row * MI_SIZE + 4 * (i / 2), xd->bd); - } else { - vp9_build_inter_predictor(pre, y_stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, ref, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * (i % 2), - mi_row * MI_SIZE + 4 * (i / 2)); - } + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + vp9_highbd_build_inter_predictor( + pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv, + &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2), + xd->bd); + } else { + vp9_build_inter_predictor( + pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv, + &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2)); + } #else - vp9_build_inter_predictor(pre, y_stride, - dst, pd->dst.stride, - &mi->bmi[i].as_mv[ref].as_mv, - &xd->block_refs[ref]->sf, width, height, ref, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE + 4 * (i % 2), - mi_row * MI_SIZE + 4 * (i / 2)); + vp9_build_inter_predictor( + pre, y_stride, dst, pd->dst.stride, &mi->bmi[i].as_mv[ref].as_mv, + &xd->block_refs[ref]->sf, width, height, ref, kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE + 4 * (i % 2), mi_row * MI_SIZE + 4 * (i / 2)); #endif // CONFIG_VP9_HIGHBITDEPTH } @@ -1398,9 +1550,9 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src, p->src.stride, dst, pd->dst.stride, xd->bd); } else { - vpx_subtract_block( - height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), - 8, src, p->src.stride, dst, pd->dst.stride); + vpx_subtract_block(height, width, + vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), + 8, src, p->src.stride, dst, pd->dst.stride); } #else vpx_subtract_block(height, width, @@ -1415,9 +1567,10 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8; #endif int64_t ssz, rd, rd1, rd2; - tran_low_t* coeff; - + tran_low_t *coeff; + int coeff_ctx; k += (idy * 2 + idx); + coeff_ctx = combine_entropy_contexts(ta[k & 1], tl[k >> 1]); coeff = BLOCK_OFFSET(p->coeff, k); x->fwd_txm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff), coeff, 8); @@ -1426,18 +1579,17 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, thisdistortion += vp9_highbd_block_error_dispatch( coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, bd); #else - thisdistortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), - 16, &ssz); + thisdistortion += + vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz); #endif // CONFIG_VP9_HIGHBITDEPTH thissse += ssz; - thisrate += cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, - so->scan, so->neighbors, - cpi->sf.use_fast_coef_costing); + thisrate += cost_coeffs(x, 0, k, TX_4X4, coeff_ctx, so->scan, + so->neighbors, cpi->sf.use_fast_coef_costing); + ta[k & 1] = tl[k >> 1] = (x->plane[0].eobs[k] > 0) ? 1 : 0; rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2); rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2); rd = VPXMIN(rd1, rd2); - if (rd >= best_yrd) - return INT64_MAX; + if (rd >= best_yrd) return INT64_MAX; } } @@ -1474,11 +1626,11 @@ typedef struct { int mvthresh; } BEST_SEG_INFO; -static INLINE int mv_check_bounds(const MACROBLOCK *x, const MV *mv) { - return (mv->row >> 3) < x->mv_row_min || - (mv->row >> 3) > x->mv_row_max || - (mv->col >> 3) < x->mv_col_min || - (mv->col >> 3) > x->mv_col_max; +static INLINE int mv_check_bounds(const MvLimits *mv_limits, const MV *mv) { + return (mv->row >> 3) < mv_limits->row_min || + (mv->row >> 3) > mv_limits->row_max || + (mv->col >> 3) < mv_limits->col_min || + (mv->col >> 3) > mv_limits->col_max; } static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { @@ -1486,14 +1638,15 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &x->e_mbd.plane[0]; - p->src.buf = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, - p->src.stride)]; + p->src.buf = + &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)]; assert(((intptr_t)pd->pre[0].buf & 0x7) == 0); - pd->pre[0].buf = &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, - pd->pre[0].stride)]; + pd->pre[0].buf = + &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)]; if (has_second_ref(mi)) - pd->pre[1].buf = &pd->pre[1].buf[vp9_raster_block_offset(BLOCK_8X8, i, - pd->pre[1].stride)]; + pd->pre[1].buf = + &pd->pre[1] + .buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)]; } static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, @@ -1501,8 +1654,7 @@ static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src, MODE_INFO *mi = x->e_mbd.mi[0]; x->plane[0].src = orig_src; x->e_mbd.plane[0].pre[0] = orig_pre[0]; - if (has_second_ref(mi)) - x->e_mbd.plane[0].pre[1] = orig_pre[1]; + if (has_second_ref(mi)) x->e_mbd.plane[0].pre[1] = orig_pre[1]; } static INLINE int mv_has_subpel(const MV *mv) { @@ -1511,10 +1663,11 @@ static INLINE int mv_has_subpel(const MV *mv) { // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion. // TODO(aconverse): Find out if this is still productive then clean up or remove -static int check_best_zero_mv( - const VP9_COMP *cpi, const uint8_t mode_context[MAX_REF_FRAMES], - int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int this_mode, - const MV_REFERENCE_FRAME ref_frames[2]) { +static int check_best_zero_mv(const VP9_COMP *cpi, + const uint8_t mode_context[MAX_REF_FRAMES], + int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], + int this_mode, + const MV_REFERENCE_FRAME ref_frames[2]) { if ((this_mode == NEARMV || this_mode == NEARESTMV || this_mode == ZEROMV) && frame_mv[this_mode][ref_frames[0]].as_int == 0 && (ref_frames[1] == NONE || @@ -1546,10 +1699,8 @@ static int check_best_zero_mv( return 1; } -static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int_mv *frame_mv, - int mi_row, int mi_col, +static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, + int_mv *frame_mv, int mi_row, int mi_col, int_mv single_newmv[MAX_REF_FRAMES], int *rate_mv) { const VP9_COMMON *const cm = &cpi->common; @@ -1557,8 +1708,8 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, const int ph = 4 * num_4x4_blocks_high_lookup[bsize]; MACROBLOCKD *xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; - const int refs[2] = {mi->ref_frame[0], - mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]}; + const int refs[2] = { mi->ref_frame[0], + mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1] }; int_mv ref_mv[2]; int ite, ref; const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter]; @@ -1566,13 +1717,13 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, // Do joint motion search in compound mode to get more accurate mv. struct buf_2d backup_yv12[2][MAX_MB_PLANE]; - int last_besterr[2] = {INT_MAX, INT_MAX}; + uint32_t last_besterr[2] = { UINT_MAX, UINT_MAX }; const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = { vp9_get_scaled_ref_frame(cpi, mi->ref_frame[0]), vp9_get_scaled_ref_frame(cpi, mi->ref_frame[1]) }; - // Prediction buffer from second frame. +// Prediction buffer from second frame. #if CONFIG_VP9_HIGHBITDEPTH DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]); uint8_t *second_pred; @@ -1597,30 +1748,26 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, frame_mv[refs[ref]].as_int = single_newmv[refs[ref]].as_int; } - // Since we have scaled the reference frames to match the size of the current - // frame we must use a unit scaling factor during mode selection. +// Since we have scaled the reference frames to match the size of the current +// frame we must use a unit scaling factor during mode selection. #if CONFIG_VP9_HIGHBITDEPTH - vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, - cm->width, cm->height, - cm->use_highbitdepth); + vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width, + cm->height, cm->use_highbitdepth); #else - vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, - cm->width, cm->height); + vp9_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width, + cm->height); #endif // CONFIG_VP9_HIGHBITDEPTH // Allow joint search multiple times iteratively for each reference frame // and break out of the search loop if it couldn't find a better mv. for (ite = 0; ite < 4; ite++) { struct buf_2d ref_yv12[2]; - int bestsme = INT_MAX; + uint32_t bestsme = UINT_MAX; int sadpb = x->sadperbit16; MV tmp_mv; int search_range = 3; - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; + const MvLimits tmp_mv_limits = x->mv_limits; int id = ite % 2; // Even iterations search in the first reference frame, // odd iterations search in the second. The predictor // found for the 'other' reference frame is factored in. @@ -1629,42 +1776,31 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, ref_yv12[0] = xd->plane[0].pre[0]; ref_yv12[1] = xd->plane[0].pre[1]; - // Get the prediction block from the 'other' reference frame. +// Get the prediction block from the 'other' reference frame. #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16); - vp9_highbd_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, - mi_col * MI_SIZE, mi_row * MI_SIZE, - xd->bd); + vp9_highbd_build_inter_predictor( + ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw, + &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3, + mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd); } else { second_pred = (uint8_t *)second_pred_alloc_16; - vp9_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, + vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride, + second_pred, pw, &frame_mv[refs[!id]].as_mv, + &sf, pw, ph, 0, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE); } #else - vp9_build_inter_predictor(ref_yv12[!id].buf, - ref_yv12[!id].stride, - second_pred, pw, - &frame_mv[refs[!id]].as_mv, - &sf, pw, ph, 0, - kernel, MV_PRECISION_Q3, + vp9_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride, + second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf, + pw, ph, 0, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE, mi_row * MI_SIZE); #endif // CONFIG_VP9_HIGHBITDEPTH // Do compound motion search on the current reference frame. - if (id) - xd->plane[0].pre[0] = ref_yv12[id]; - vp9_set_mv_search_range(x, &ref_mv[id].as_mv); + if (id) xd->plane[0].pre[0] = ref_yv12[id]; + vp9_set_mv_search_range(&x->mv_limits, &ref_mv[id].as_mv); // Use the mv result from the single mode as mv predictor. tmp_mv = frame_mv[refs[id]].as_mv; @@ -1673,38 +1809,27 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, tmp_mv.row >>= 3; // Small-range full-pixel motion search. - bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, - search_range, - &cpi->fn_ptr[bsize], - &ref_mv[id].as_mv, second_pred); - if (bestsme < INT_MAX) + bestsme = vp9_refining_search_8p_c(x, &tmp_mv, sadpb, search_range, + &cpi->fn_ptr[bsize], &ref_mv[id].as_mv, + second_pred); + if (bestsme < UINT_MAX) bestsme = vp9_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv, second_pred, &cpi->fn_ptr[bsize], 1); - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; + x->mv_limits = tmp_mv_limits; - if (bestsme < INT_MAX) { - int dis; /* TODO: use dis in distortion calculation later. */ - unsigned int sse; + if (bestsme < UINT_MAX) { + uint32_t dis; /* TODO: use dis in distortion calculation later. */ + uint32_t sse; bestsme = cpi->find_fractional_mv_step( - x, &tmp_mv, - &ref_mv[id].as_mv, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - 0, cpi->sf.mv.subpel_iters_per_step, - NULL, - x->nmvjointcost, x->mvcost, - &dis, &sse, second_pred, - pw, ph); + x, &tmp_mv, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv, + x->errorperbit, &cpi->fn_ptr[bsize], 0, + cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost, + &dis, &sse, second_pred, pw, ph); } // Restore the pointer to the first (possibly scaled) prediction buffer. - if (id) - xd->plane[0].pre[0] = ref_yv12[0]; + if (id) xd->plane[0].pre[0] = ref_yv12[0]; if (bestsme < last_besterr[id]) { frame_mv[refs[id]].as_mv = tmp_mv; @@ -1730,17 +1855,12 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, } } -static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, - int_mv *best_ref_mv, - int_mv *second_best_ref_mv, - int64_t best_rd, int *returntotrate, - int *returnyrate, - int64_t *returndistortion, - int *skippable, int64_t *psse, - int mvthresh, - int_mv seg_mvs[4][MAX_REF_FRAMES], - BEST_SEG_INFO *bsi_buf, int filter_idx, - int mi_row, int mi_col) { +static int64_t rd_pick_best_sub8x8_mode( + VP9_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv, + int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate, + int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse, + int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf, + int filter_idx, int mi_row, int mi_col) { int i; BEST_SEG_INFO *bsi = bsi_buf + filter_idx; MACROBLOCKD *xd = &x->e_mbd; @@ -1774,8 +1894,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, bsi->mvp.as_int = best_ref_mv->as_int; bsi->mvthresh = mvthresh; - for (i = 0; i < 4; i++) - bsi->modes[i] = ZEROMV; + for (i = 0; i < 4; i++) bsi->modes[i] = ZEROMV; memcpy(t_above, pd->above_context, sizeof(t_above)); memcpy(t_left, pd->left_context, sizeof(t_left)); @@ -1801,10 +1920,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, for (ref = 0; ref < 1 + has_second_rf; ++ref) { const MV_REFERENCE_FRAME frame = mi->ref_frame[ref]; frame_mv[ZEROMV][frame].as_int = 0; - vp9_append_sub8x8_mvs_for_idx(cm, xd, i, ref, mi_row, mi_col, - &frame_mv[NEARESTMV][frame], - &frame_mv[NEARMV][frame], - mbmi_ext->mode_context); + vp9_append_sub8x8_mvs_for_idx( + cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame], + &frame_mv[NEARMV][frame], mbmi_ext->mode_context); } // search for the best motion vector on this segment @@ -1814,8 +1932,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, mode_idx = INTER_OFFSET(this_mode); bsi->rdstat[i][mode_idx].brdcost = INT64_MAX; - if (!(inter_mode_mask & (1 << this_mode))) - continue; + if (!(inter_mode_mask & (1 << this_mode))) continue; if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode, mi->ref_frame)) @@ -1832,23 +1949,22 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, seg_mvs[i][mi->ref_frame[0]].as_int == INVALID_MV) { MV *const new_mv = &mode_mv[NEWMV][0].as_mv; int step_param = 0; - int bestsme = INT_MAX; + uint32_t bestsme = UINT_MAX; int sadpb = x->sadperbit4; MV mvp_full; int max_mv; int cost_list[5]; + const MvLimits tmp_mv_limits = x->mv_limits; /* Is the best so far sufficiently good that we cant justify doing * and new motion search. */ - if (best_rd < label_mv_thresh) - break; + if (best_rd < label_mv_thresh) break; if (cpi->oxcf.mode != BEST) { // use previous block's result as next block's MV predictor. if (i > 0) { bsi->mvp.as_int = mi->bmi[i - 1].as_mv[0].as_int; - if (i == 2) - bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int; + if (i == 2) bsi->mvp.as_int = mi->bmi[i - 2].as_mv[0].as_int; } } if (i == 0) @@ -1861,8 +1977,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, // Take wtd average of the step_params based on the last frame's // max mv magnitude and the best ref mvs of the current block for // the given reference. - step_param = (vp9_init_search_range(max_mv) + - cpi->mv_step_param) / 2; + step_param = + (vp9_init_search_range(max_mv) + cpi->mv_step_param) / 2; } else { step_param = cpi->mv_step_param; } @@ -1879,29 +1995,23 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, // adjust src pointer for this block mi_buf_shift(x, i); - vp9_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv); + vp9_set_mv_search_range(&x->mv_limits, &bsi->ref_mv[0]->as_mv); bestsme = vp9_full_pixel_search( cpi, x, bsize, &mvp_full, step_param, sadpb, sf->mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL, - &bsi->ref_mv[0]->as_mv, new_mv, - INT_MAX, 1); + &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1); - if (bestsme < INT_MAX) { - int distortion; + x->mv_limits = tmp_mv_limits; + + if (bestsme < UINT_MAX) { + uint32_t distortion; cpi->find_fractional_mv_step( - x, - new_mv, - &bsi->ref_mv[0]->as_mv, - cm->allow_high_precision_mv, - x->errorperbit, &cpi->fn_ptr[bsize], - sf->mv.subpel_force_stop, - sf->mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &distortion, - &x->pred_sse[mi->ref_frame[0]], - NULL, 0, 0); + x, new_mv, &bsi->ref_mv[0]->as_mv, cm->allow_high_precision_mv, + x->errorperbit, &cpi->fn_ptr[bsize], sf->mv.subpel_force_stop, + sf->mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list), + x->nmvjointcost, x->mvcost, &distortion, + &x->pred_sse[mi->ref_frame[0]], NULL, 0, 0); // save motion search result for use in compound prediction seg_mvs[i][mi->ref_frame[0]].as_mv = *new_mv; @@ -1926,9 +2036,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, mi_buf_shift(x, i); if (sf->comp_inter_joint_search_thresh <= bsize) { int rate_mv; - joint_motion_search(cpi, x, bsize, frame_mv[this_mode], - mi_row, mi_col, seg_mvs[i], - &rate_mv); + joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row, + mi_col, seg_mvs[i], &rate_mv); seg_mvs[i][mi->ref_frame[0]].as_int = frame_mv[this_mode][mi->ref_frame[0]].as_int; seg_mvs[i][mi->ref_frame[1]].as_int = @@ -1938,10 +2047,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, mi_buf_restore(x, orig_src, orig_pre); } - bsi->rdstat[i][mode_idx].brate = - set_and_cost_bmi_mvs(cpi, x, xd, i, this_mode, mode_mv[this_mode], - frame_mv, seg_mvs[i], bsi->ref_mv, - x->nmvjointcost, x->mvcost); + bsi->rdstat[i][mode_idx].brate = set_and_cost_bmi_mvs( + cpi, x, xd, i, this_mode, mode_mv[this_mode], frame_mv, seg_mvs[i], + bsi->ref_mv, x->nmvjointcost, x->mvcost); for (ref = 0; ref < 1 + has_second_rf; ++ref) { bsi->rdstat[i][mode_idx].mvs[ref].as_int = @@ -1955,9 +2063,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, } // Trap vectors that reach beyond the UMV borders - if (mv_check_bounds(x, &mode_mv[this_mode][0].as_mv) || + if (mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][0].as_mv) || (has_second_rf && - mv_check_bounds(x, &mode_mv[this_mode][1].as_mv))) + mv_check_bounds(&x->mv_limits, &mode_mv[this_mode][1].as_mv))) continue; if (filter_idx > 0) { @@ -1968,7 +2076,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, for (ref = 0; ref < 1 + has_second_rf; ++ref) { subpelmv |= mv_has_subpel(&mode_mv[this_mode][ref].as_mv); have_ref &= mode_mv[this_mode][ref].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; + ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; } if (filter_idx > 1 && !subpelmv && !have_ref) { @@ -1976,7 +2084,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, have_ref = 1; for (ref = 0; ref < 1 + has_second_rf; ++ref) have_ref &= mode_mv[this_mode][ref].as_int == - ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; + ref_bsi->rdstat[i][mode_idx].mvs[ref].as_int; } if (!subpelmv && have_ref && @@ -1998,18 +2106,14 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - bsi->rdstat[i][mode_idx].brdcost = - encode_inter_mb_segment(cpi, x, - bsi->segment_rd - this_segment_rd, i, - &bsi->rdstat[i][mode_idx].byrate, - &bsi->rdstat[i][mode_idx].bdist, - &bsi->rdstat[i][mode_idx].bsse, - bsi->rdstat[i][mode_idx].ta, - bsi->rdstat[i][mode_idx].tl, - mi_row, mi_col); + bsi->rdstat[i][mode_idx].brdcost = encode_inter_mb_segment( + cpi, x, bsi->segment_rd - this_segment_rd, i, + &bsi->rdstat[i][mode_idx].byrate, &bsi->rdstat[i][mode_idx].bdist, + &bsi->rdstat[i][mode_idx].bsse, bsi->rdstat[i][mode_idx].ta, + bsi->rdstat[i][mode_idx].tl, mi_row, mi_col); if (bsi->rdstat[i][mode_idx].brdcost < INT64_MAX) { - bsi->rdstat[i][mode_idx].brdcost += RDCOST(x->rdmult, x->rddiv, - bsi->rdstat[i][mode_idx].brate, 0); + bsi->rdstat[i][mode_idx].brdcost += + RDCOST(x->rdmult, x->rddiv, bsi->rdstat[i][mode_idx].brate, 0); bsi->rdstat[i][mode_idx].brate += bsi->rdstat[i][mode_idx].byrate; bsi->rdstat[i][mode_idx].eobs = p->eobs[i]; if (num_4x4_blocks_wide > 1) @@ -2065,11 +2169,9 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, bsi->sse = block_sse; // update the coding decisions - for (k = 0; k < 4; ++k) - bsi->modes[k] = mi->bmi[k].as_mode; + for (k = 0; k < 4; ++k) bsi->modes[k] = mi->bmi[k].as_mode; - if (bsi->segment_rd > best_rd) - return INT64_MAX; + if (bsi->segment_rd > best_rd) return INT64_MAX; /* set it to the best */ for (i = 0; i < 4; i++) { mode_idx = INTER_OFFSET(bsi->modes[i]); @@ -2094,16 +2196,15 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, } static void estimate_ref_frame_costs(const VP9_COMMON *cm, - const MACROBLOCKD *xd, - int segment_id, + const MACROBLOCKD *xd, int segment_id, unsigned int *ref_costs_single, unsigned int *ref_costs_comp, vpx_prob *comp_mode_p) { - int seg_ref_active = segfeature_active(&cm->seg, segment_id, - SEG_LVL_REF_FRAME); + int seg_ref_active = + segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME); if (seg_ref_active) { memset(ref_costs_single, 0, MAX_REF_FRAMES * sizeof(*ref_costs_single)); - memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp)); + memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp)); *comp_mode_p = 128; } else { vpx_prob intra_inter_p = vp9_get_intra_inter_prob(cm, xd); @@ -2128,13 +2229,13 @@ static void estimate_ref_frame_costs(const VP9_COMMON *cm, ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] = ref_costs_single[ALTREF_FRAME] = base_cost; - ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0); + ref_costs_single[LAST_FRAME] += vp9_cost_bit(ref_single_p1, 0); ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p1, 1); ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p1, 1); ref_costs_single[GOLDEN_FRAME] += vp9_cost_bit(ref_single_p2, 0); ref_costs_single[ALTREF_FRAME] += vp9_cost_bit(ref_single_p2, 1); } else { - ref_costs_single[LAST_FRAME] = 512; + ref_costs_single[LAST_FRAME] = 512; ref_costs_single[GOLDEN_FRAME] = 512; ref_costs_single[ALTREF_FRAME] = 512; } @@ -2145,20 +2246,19 @@ static void estimate_ref_frame_costs(const VP9_COMMON *cm, if (cm->reference_mode == REFERENCE_MODE_SELECT) base_cost += vp9_cost_bit(comp_inter_p, 1); - ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0); + ref_costs_comp[LAST_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 0); ref_costs_comp[GOLDEN_FRAME] = base_cost + vp9_cost_bit(ref_comp_p, 1); } else { - ref_costs_comp[LAST_FRAME] = 512; + ref_costs_comp[LAST_FRAME] = 512; ref_costs_comp[GOLDEN_FRAME] = 512; } } } -static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, - int mode_index, - int64_t comp_pred_diff[REFERENCE_MODES], - int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], - int skippable) { +static void store_coding_context( + MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int mode_index, + int64_t comp_pred_diff[REFERENCE_MODES], + int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS], int skippable) { MACROBLOCKD *const xd = &x->e_mbd; // Take a snapshot of the coding context so it can be @@ -2169,7 +2269,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, ctx->mic = *xd->mi[0]; ctx->mbmi_ext = *x->mbmi_ext; ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE]; - ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE]; + ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE]; ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT]; memcpy(ctx->best_filter_diff, best_filter_diff, @@ -2178,8 +2278,7 @@ static void store_coding_context(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, MV_REFERENCE_FRAME ref_frame, - BLOCK_SIZE block_size, - int mi_row, int mi_col, + BLOCK_SIZE block_size, int mi_row, int mi_col, int_mv frame_nearest_mv[MAX_REF_FRAMES], int_mv frame_near_mv[MAX_REF_FRAMES], struct buf_2d yv12_mb[4][MAX_MB_PLANE]) { @@ -2210,33 +2309,28 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x, // in full and choose the best as the centre point for subsequent searches. // The current implementation doesn't support scaling. if (!vp9_is_scaled(sf) && block_size >= BLOCK_8X8) - vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, - ref_frame, block_size); + vp9_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame, + block_size); } -static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int mi_row, int mi_col, - int_mv *tmp_mv, int *rate_mv) { +static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, + int mi_row, int mi_col, int_mv *tmp_mv, + int *rate_mv) { MACROBLOCKD *xd = &x->e_mbd; const VP9_COMMON *cm = &cpi->common; MODE_INFO *mi = xd->mi[0]; - struct buf_2d backup_yv12[MAX_MB_PLANE] = {{0, 0}}; + struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } }; int bestsme = INT_MAX; int step_param; int sadpb = x->sadperbit16; MV mvp_full; int ref = mi->ref_frame[0]; MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv; - - int tmp_col_min = x->mv_col_min; - int tmp_col_max = x->mv_col_max; - int tmp_row_min = x->mv_row_min; - int tmp_row_max = x->mv_row_max; + const MvLimits tmp_mv_limits = x->mv_limits; int cost_list[5]; - const YV12_BUFFER_CONFIG *scaled_ref_frame = vp9_get_scaled_ref_frame(cpi, - ref); + const YV12_BUFFER_CONFIG *scaled_ref_frame = + vp9_get_scaled_ref_frame(cpi, ref); MV pred_mv[3]; pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv; @@ -2248,22 +2342,20 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, // Swap out the reference frame for a version that's been scaled to // match the resolution of the current frame, allowing the existing // motion search code to be used without additional modifications. - for (i = 0; i < MAX_MB_PLANE; i++) - backup_yv12[i] = xd->plane[i].pre[0]; + for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0]; vp9_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL); } - vp9_set_mv_search_range(x, &ref_mv); - // Work out the size of the first step in the mv step search. // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc. if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's // max mv magnitude and that based on the best ref mvs of the current // block for the given reference. - step_param = (vp9_init_search_range(x->max_mv_context[ref]) + - cpi->mv_step_param) / 2; + step_param = + (vp9_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) / + 2; } else { step_param = cpi->mv_step_param; } @@ -2280,8 +2372,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, int bhl = b_height_log2_lookup[bsize]; int tlevel = x->pred_mv_sad[ref] >> (bwl + bhl + 4); - if (tlevel < 5) - step_param += 2; + if (tlevel < 5) step_param += 2; // prev_mv_sad is not setup for dynamically scaled frames. if (cpi->oxcf.resize_mode != RESIZE_DYNAMIC) { @@ -2303,47 +2394,40 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, } } + // Note: MV limits are modified here. Always restore the original values + // after full-pixel motion search. + vp9_set_mv_search_range(&x->mv_limits, &ref_mv); + mvp_full = pred_mv[x->mv_best_ref_index[ref]]; mvp_full.col >>= 3; mvp_full.row >>= 3; bestsme = vp9_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb, - cond_cost_list(cpi, cost_list), - &ref_mv, &tmp_mv->as_mv, INT_MAX, 1); + cond_cost_list(cpi, cost_list), &ref_mv, + &tmp_mv->as_mv, INT_MAX, 1); - x->mv_col_min = tmp_col_min; - x->mv_col_max = tmp_col_max; - x->mv_row_min = tmp_row_min; - x->mv_row_max = tmp_row_max; + x->mv_limits = tmp_mv_limits; if (bestsme < INT_MAX) { - int dis; /* TODO: use dis in distortion calculation later. */ - cpi->find_fractional_mv_step(x, &tmp_mv->as_mv, &ref_mv, - cm->allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[bsize], - cpi->sf.mv.subpel_force_stop, - cpi->sf.mv.subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - x->nmvjointcost, x->mvcost, - &dis, &x->pred_sse[ref], NULL, 0, 0); + uint32_t dis; /* TODO: use dis in distortion calculation later. */ + cpi->find_fractional_mv_step( + x, &tmp_mv->as_mv, &ref_mv, cm->allow_high_precision_mv, x->errorperbit, + &cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop, + cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list), + x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0); } - *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + *rate_mv = vp9_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost, + x->mvcost, MV_COST_WEIGHT); - if (cpi->sf.adaptive_motion_search) - x->pred_mv[ref] = tmp_mv->as_mv; + if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv; if (scaled_ref_frame) { int i; - for (i = 0; i < MAX_MB_PLANE; i++) - xd->plane[i].pre[0] = backup_yv12[i]; + for (i = 0; i < MAX_MB_PLANE; i++) xd->plane[i].pre[0] = backup_yv12[i]; } } - - static INLINE void restore_dst_buf(MACROBLOCKD *xd, uint8_t *orig_dst[MAX_MB_PLANE], int orig_dst_stride[MAX_MB_PLANE]) { @@ -2361,13 +2445,11 @@ static INLINE void restore_dst_buf(MACROBLOCKD *xd, // However, once established that vector may be usable through the nearest and // near mv modes to reduce distortion in subsequent blocks and also improve // visual quality. -static int discount_newmv_test(const VP9_COMP *cpi, - int this_mode, +static int discount_newmv_test(const VP9_COMP *cpi, int this_mode, int_mv this_mv, int_mv (*mode_mv)[MAX_REF_FRAMES], int ref_frame) { - return (!cpi->rc.is_src_frame_alt_ref && - (this_mode == NEWMV) && + return (!cpi->rc.is_src_frame_alt_ref && (this_mode == NEWMV) && (this_mv.as_int != 0) && ((mode_mv[NEARESTMV][ref_frame].as_int == 0) || (mode_mv[NEARESTMV][ref_frame].as_int == INVALID_MV)) && @@ -2375,21 +2457,14 @@ static int discount_newmv_test(const VP9_COMP *cpi, (mode_mv[NEARMV][ref_frame].as_int == INVALID_MV))); } -static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, - BLOCK_SIZE bsize, - int *rate2, int64_t *distortion, - int *skippable, - int *rate_y, int *rate_uv, - int *disable_skip, - int_mv (*mode_mv)[MAX_REF_FRAMES], - int mi_row, int mi_col, - int_mv single_newmv[MAX_REF_FRAMES], - INTERP_FILTER (*single_filter)[MAX_REF_FRAMES], - int (*single_skippable)[MAX_REF_FRAMES], - int64_t *psse, - const int64_t ref_best_rd, - int64_t *mask_filter, - int64_t filter_cache[]) { +static int64_t handle_inter_mode( + VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2, + int64_t *distortion, int *skippable, int *rate_y, int *rate_uv, + int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row, + int mi_col, int_mv single_newmv[MAX_REF_FRAMES], + INTERP_FILTER (*single_filter)[MAX_REF_FRAMES], + int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse, + const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) { VP9_COMMON *cm = &cpi->common; MACROBLOCKD *xd = &x->e_mbd; MODE_INFO *mi = xd->mi[0]; @@ -2399,7 +2474,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int_mv *frame_mv = mode_mv[this_mode]; int i; int refs[2] = { mi->ref_frame[0], - (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) }; + (mi->ref_frame[1] < 0 ? 0 : mi->ref_frame[1]) }; int_mv cur_mv[2]; #if CONFIG_VP9_HIGHBITDEPTH DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]); @@ -2415,13 +2490,16 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, int orig_dst_stride[MAX_MB_PLANE]; int rs = 0; INTERP_FILTER best_filter = SWITCHABLE; - uint8_t skip_txfm[MAX_MB_PLANE << 2] = {0}; - int64_t bsse[MAX_MB_PLANE << 2] = {0}; + uint8_t skip_txfm[MAX_MB_PLANE << 2] = { 0 }; + int64_t bsse[MAX_MB_PLANE << 2] = { 0 }; int bsl = mi_width_log2_lookup[bsize]; - int pred_filter_search = cpi->sf.cb_pred_filter_search ? - (((mi_row + mi_col) >> bsl) + - get_chessboard_index(cm->current_video_frame)) & 0x1 : 0; + int pred_filter_search = + cpi->sf.cb_pred_filter_search + ? (((mi_row + mi_col) >> bsl) + + get_chessboard_index(cm->current_video_frame)) & + 0x1 + : 0; int skip_txfm_sb = 0; int64_t skip_sse_sb = INT64_MAX; @@ -2437,13 +2515,12 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (pred_filter_search) { INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE; - if (xd->up_available) - af = xd->mi[-xd->mi_stride]->interp_filter; - if (xd->left_available) - lf = xd->mi[-1]->interp_filter; + if (xd->above_mi && is_inter_block(xd->above_mi)) + af = xd->above_mi->interp_filter; + if (xd->left_mi && is_inter_block(xd->left_mi)) + lf = xd->left_mi->interp_filter; - if ((this_mode != NEWMV) || (af == lf)) - best_filter = af; + if ((this_mode != NEWMV) || (af == lf)) best_filter = af; } if (is_comp_pred) { @@ -2466,12 +2543,12 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int; if (cpi->sf.comp_inter_joint_search_thresh <= bsize) { - joint_motion_search(cpi, x, bsize, frame_mv, - mi_row, mi_col, single_newmv, &rate_mv); + joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, + single_newmv, &rate_mv); } else { - rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv, - &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv, - x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); + rate_mv = vp9_mv_bit_cost(&frame_mv[refs[0]].as_mv, + &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv, + x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); rate_mv += vp9_mv_bit_cost(&frame_mv[refs[1]].as_mv, &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv, x->nmvjointcost, x->mvcost, MV_COST_WEIGHT); @@ -2479,13 +2556,11 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, *rate2 += rate_mv; } else { int_mv tmp_mv; - single_motion_search(cpi, x, bsize, mi_row, mi_col, - &tmp_mv, &rate_mv); - if (tmp_mv.as_int == INVALID_MV) - return INT64_MAX; + single_motion_search(cpi, x, bsize, mi_row, mi_col, &tmp_mv, &rate_mv); + if (tmp_mv.as_int == INVALID_MV) return INT64_MAX; - frame_mv[refs[0]].as_int = - xd->mi[0]->bmi[0].as_mv[0].as_int = tmp_mv.as_int; + frame_mv[refs[0]].as_int = xd->mi[0]->bmi[0].as_mv[0].as_int = + tmp_mv.as_int; single_newmv[refs[0]].as_int = tmp_mv.as_int; // Estimate the rate implications of a new mv but discount this @@ -2503,11 +2578,9 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, for (i = 0; i < is_comp_pred + 1; ++i) { cur_mv[i] = frame_mv[refs[i]]; // Clip "next_nearest" so that it does not extend to far out of image - if (this_mode != NEWMV) - clamp_mv2(&cur_mv[i].as_mv, xd); + if (this_mode != NEWMV) clamp_mv2(&cur_mv[i].as_mv, xd); - if (mv_check_bounds(x, &cur_mv[i].as_mv)) - return INT64_MAX; + if (mv_check_bounds(&x->mv_limits, &cur_mv[i].as_mv)) return INT64_MAX; mi->mv[i].as_int = cur_mv[i].as_int; } @@ -2527,12 +2600,11 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // // Under some circumstances we discount the cost of new mv mode to encourage // initiation of a motion field. - if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], - mode_mv, refs[0])) { - *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, - mbmi_ext->mode_context[refs[0]]), - cost_mv_ref(cpi, NEARESTMV, - mbmi_ext->mode_context[refs[0]])); + if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv, + refs[0])) { + *rate2 += + VPXMIN(cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]), + cost_mv_ref(cpi, NEARESTMV, mbmi_ext->mode_context[refs[0]])); } else { *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]); } @@ -2544,13 +2616,11 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, pred_exists = 0; // Are all MVs integer pel for Y and UV intpel_mv = !mv_has_subpel(&mi->mv[0].as_mv); - if (is_comp_pred) - intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv); + if (is_comp_pred) intpel_mv &= !mv_has_subpel(&mi->mv[1].as_mv); // Search for best switchable filter by checking the variance of // pred error irrespective of whether the filter will be used - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX; if (cm->interp_filter != BILINEAR) { if (x->source_variance < cpi->sf.disable_filter_search_var_thresh) { @@ -2575,8 +2645,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - rd += rs_rd; + if (cm->interp_filter == SWITCHABLE) rd += rs_rd; *mask_filter = VPXMAX(*mask_filter, rd); } else { int rate_sum = 0; @@ -2588,8 +2657,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, continue; } - if ((cm->interp_filter == SWITCHABLE && - (!i || best_needs_copy)) || + if ((cm->interp_filter == SWITCHABLE && (!i || best_needs_copy)) || (cm->interp_filter != SWITCHABLE && (cm->interp_filter == mi->interp_filter || (i == 0 && intpel_mv)))) { @@ -2601,15 +2669,14 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); - model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, - &tmp_skip_sb, &tmp_skip_sse); + model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb, + &tmp_skip_sse); rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum); filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - rd += rs_rd; + if (cm->interp_filter == SWITCHABLE) rd += rs_rd; *mask_filter = VPXMAX(*mask_filter, rd); if (i == 0 && intpel_mv) { @@ -2649,8 +2716,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } // Set the appropriate filter - mi->interp_filter = cm->interp_filter != SWITCHABLE ? - cm->interp_filter : best_filter; + mi->interp_filter = + cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter; rs = cm->interp_filter == SWITCHABLE ? vp9_get_switchable_rate(cpi, xd) : 0; if (pred_exists) { @@ -2669,15 +2736,14 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // switchable list (ex. bilinear) is indicated at the frame level, or // skip condition holds. vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize); - model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, - &skip_txfm_sb, &skip_sse_sb); + model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb, + &skip_sse_sb); rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist); memcpy(skip_txfm, x->skip_txfm, sizeof(skip_txfm)); memcpy(bsse, x->bsse, sizeof(bsse)); } - if (!is_comp_pred) - single_filter[this_mode][refs[0]] = mi->interp_filter; + if (!is_comp_pred) single_filter[this_mode][refs[0]] = mi->interp_filter; if (cpi->sf.adaptive_mode_search) if (is_comp_pred) @@ -2694,8 +2760,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, } } - if (cm->interp_filter == SWITCHABLE) - *rate2 += rs; + if (cm->interp_filter == SWITCHABLE) *rate2 += rs; memcpy(x->skip_txfm, skip_txfm, sizeof(skip_txfm)); memcpy(x->bsse, bsse, sizeof(bsse)); @@ -2707,8 +2772,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // Y cost and distortion vp9_subtract_plane(x, bsize, 0); - super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, - bsize, ref_best_rd); + super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize, + ref_best_rd); if (*rate_y == INT_MAX) { *rate2 = INT_MAX; @@ -2745,16 +2810,15 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, *distortion = skip_sse_sb; } - if (!is_comp_pred) - single_skippable[this_mode][refs[0]] = *skippable; + if (!is_comp_pred) single_skippable[this_mode][refs[0]] = *skippable; restore_dst_buf(xd, orig_dst, orig_dst_stride); return 0; // The rate-distortion cost will be re-calculated by caller. } -void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, - RD_COST *rd_cost, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, int64_t best_rd) { +void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, + BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, + int64_t best_rd) { VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; struct macroblockd_plane *const pd = xd->plane; @@ -2766,11 +2830,13 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, ctx->skip = 0; xd->mi[0]->ref_frame[0] = INTRA_FRAME; xd->mi[0]->ref_frame[1] = NONE; + // Initialize interp_filter here so we do not have to check for inter block + // modes in get_pred_context_switchable_interp() + xd->mi[0]->interp_filter = SWITCHABLE_FILTERS; if (bsize >= BLOCK_8X8) { - if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, - &dist_y, &y_skip, bsize, - best_rd) >= best_rd) { + if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y, + &y_skip, bsize, best_rd) >= best_rd) { rd_cost->rate = INT_MAX; return; } @@ -2782,20 +2848,18 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, return; } } - max_uv_tx_size = get_uv_tx_size_impl(xd->mi[0]->tx_size, bsize, - pd[1].subsampling_x, - pd[1].subsampling_y); - rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize), - max_uv_tx_size); + max_uv_tx_size = uv_txsize_lookup[bsize][xd->mi[0]->tx_size] + [pd[1].subsampling_x][pd[1].subsampling_y]; + rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, &dist_uv, + &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size); if (y_skip && uv_skip) { rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); rd_cost->dist = dist_y + dist_uv; } else { - rd_cost->rate = rate_y + rate_uv + - vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); + rd_cost->rate = + rate_y + rate_uv + vp9_cost_bit(vp9_get_skip_prob(cm, xd), 0); rd_cost->dist = dist_y + dist_uv; } @@ -2809,10 +2873,8 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, #define LOW_VAR_THRESH 16 #define VLOW_ADJ_MAX 25 #define VHIGH_ADJ_MAX 8 -static void rd_variance_adjustment(VP9_COMP *cpi, - MACROBLOCK *x, - BLOCK_SIZE bsize, - int64_t *this_rd, +static void rd_variance_adjustment(VP9_COMP *cpi, MACROBLOCK *x, + BLOCK_SIZE bsize, int64_t *this_rd, MV_REFERENCE_FRAME ref_frame, unsigned int source_variance) { MACROBLOCKD *const xd = &x->e_mbd; @@ -2821,30 +2883,28 @@ static void rd_variance_adjustment(VP9_COMP *cpi, int64_t var_error = 0; int64_t var_factor = 0; - if (*this_rd == INT64_MAX) - return; + if (*this_rd == INT64_MAX) return; #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - recon_variance = - vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize, xd->bd); + recon_variance = vp9_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, + bsize, xd->bd); } else { recon_variance = - vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); + vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); } #else - recon_variance = - vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); + recon_variance = vp9_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize); #endif // CONFIG_VP9_HIGHBITDEPTH if ((source_variance + recon_variance) > LOW_VAR_THRESH) { absvar_diff = (source_variance > recon_variance) - ? (source_variance - recon_variance) - : (recon_variance - source_variance); + ? (source_variance - recon_variance) + : (recon_variance - source_variance); var_error = ((int64_t)200 * source_variance * recon_variance) / - (((int64_t)source_variance * source_variance) + - ((int64_t)recon_variance * recon_variance)); + (((int64_t)source_variance * source_variance) + + ((int64_t)recon_variance * recon_variance)); var_error = 100 - var_error; } @@ -2854,8 +2914,8 @@ static void rd_variance_adjustment(VP9_COMP *cpi, if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) && (source_variance > recon_variance)) { var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error)); - // A second possible case of interest is where the source variance - // is very low and we wish to discourage false texture or motion trails. + // A second possible case of interest is where the source variance + // is very low and we wish to discourage false texture or motion trails. } else if ((source_variance < (LOW_VAR_THRESH >> 1)) && (recon_variance > source_variance)) { var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error)); @@ -2863,12 +2923,11 @@ static void rd_variance_adjustment(VP9_COMP *cpi, *this_rd += (*this_rd * var_factor) / 100; } - // Do we have an internal image edge (e.g. formatting bars). int vp9_internal_image_edge(VP9_COMP *cpi) { return (cpi->oxcf.pass == 2) && - ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) || - (cpi->twopass.this_frame_stats.inactive_zone_cols > 0)); + ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) || + (cpi->twopass.this_frame_stats.inactive_zone_cols > 0)); } // Checks to see if a super block is on a horizontal image edge. @@ -2928,19 +2987,15 @@ int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) { // Checks to see if a super block is at the edge of the active image. // In most cases this is the "real" edge unless there are formatting // bars embedded in the stream. -int vp9_active_edge_sb(VP9_COMP *cpi, - int mi_row, int mi_col) { +int vp9_active_edge_sb(VP9_COMP *cpi, int mi_row, int mi_col) { return vp9_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) || vp9_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE); } -void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - int mi_row, int mi_col, +void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, TileDataEnc *tile_data, + MACROBLOCK *x, int mi_row, int mi_col, RD_COST *rd_cost, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far) { + PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) { VP9_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; RD_OPT *const rd_opt = &cpi->rd; @@ -2995,20 +3050,16 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX; estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, &comp_mode_p); - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = INT64_MAX; + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) best_filter_rd[i] = INT64_MAX; - for (i = 0; i < TX_SIZES; i++) - rate_uv_intra[i] = INT_MAX; - for (i = 0; i < MAX_REF_FRAMES; ++i) - x->pred_sse[i] = INT_MAX; + for (i = 0; i < TX_SIZES; i++) rate_uv_intra[i] = INT_MAX; + for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX; for (i = 0; i < MB_MODE_COUNT; ++i) { for (k = 0; k < MAX_REF_FRAMES; ++k) { single_inter_filter[i][k] = SWITCHABLE; @@ -3101,12 +3152,11 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, mode_skip_mask[INTRA_FRAME] |= ~(sf->intra_y_mode_mask[max_txsize_lookup[bsize]]); - for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) - mode_threshold[i] = 0; + for (i = 0; i <= LAST_NEW_MV_INDEX; ++i) mode_threshold[i] = 0; for (i = LAST_NEW_MV_INDEX + 1; i < MAX_MODES; ++i) mode_threshold[i] = ((int64_t)rd_threshes[i] * rd_thresh_freq_fact[i]) >> 5; - midx = sf->schedule_mode_search ? mode_skip_start : 0; + midx = sf->schedule_mode_search ? mode_skip_start : 0; while (midx > 4) { uint8_t end_pos = 0; for (i = 5; i < midx; ++i) { @@ -3141,8 +3191,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // skip mask to look at a subset of the remaining modes. if (midx == mode_skip_start && best_mode_index >= 0) { switch (best_mbmode.ref_frame[0]) { - case INTRA_FRAME: - break; + case INTRA_FRAME: break; case LAST_FRAME: ref_frame_skip_mask[0] |= LAST_FRAME_MODE_MASK; ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; @@ -3151,13 +3200,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, ref_frame_skip_mask[0] |= GOLDEN_FRAME_MODE_MASK; ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; break; - case ALTREF_FRAME: - ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; - break; + case ALTREF_FRAME: ref_frame_skip_mask[0] |= ALT_REF_MODE_MASK; break; case NONE: - case MAX_REF_FRAMES: - assert(0 && "Invalid Reference frame"); - break; + case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break; } } @@ -3165,24 +3210,24 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) continue; - if (mode_skip_mask[ref_frame] & (1 << this_mode)) - continue; + if (mode_skip_mask[ref_frame] & (1 << this_mode)) continue; // Test best rd so far against threshold for trying this mode. if (best_mode_skippable && sf->schedule_mode_search) mode_threshold[mode_index] <<= 1; - if (best_rd < mode_threshold[mode_index]) - continue; + if (best_rd < mode_threshold[mode_index]) continue; if (sf->motion_field_mode_search) { - const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize], - tile_info->mi_col_end - mi_col); + const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize], + tile_info->mi_col_end - mi_col); const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize], tile_info->mi_row_end - mi_row); const int bsl = mi_width_log2_lookup[bsize]; - int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl) - + get_chessboard_index(cm->current_video_frame)) & 0x1; + int cb_partition_search_ctrl = + (((mi_row + mi_col) >> bsl) + + get_chessboard_index(cm->current_video_frame)) & + 0x1; MODE_INFO *ref_mi; int const_motion = 1; int skip_ref_frame = !cb_partition_search_ctrl; @@ -3202,10 +3247,8 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, } if ((mi_col - 1) >= tile_info->mi_col_start) { - if (ref_mv.as_int == INVALID_MV) - ref_mv = xd->mi[-1]->mv[0]; - if (rf == NONE) - rf = xd->mi[-1]->ref_frame[0]; + if (ref_mv.as_int == INVALID_MV) ref_mv = xd->mi[-1]->mv[0]; + if (rf == NONE) rf = xd->mi[-1]->ref_frame[0]; for (i = 0; i < mi_height; ++i) { ref_mi = xd->mi[i * xd->mi_stride - 1]; const_motion &= (ref_mv.as_int == ref_mi->mv[0].as_int) && @@ -3216,27 +3259,22 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, if (skip_ref_frame && this_mode != NEARESTMV && this_mode != NEWMV) if (rf > INTRA_FRAME) - if (ref_frame != rf) - continue; + if (ref_frame != rf) continue; if (const_motion) - if (this_mode == NEARMV || this_mode == ZEROMV) - continue; + if (this_mode == NEARMV || this_mode == ZEROMV) continue; } comp_pred = second_ref_frame > INTRA_FRAME; if (comp_pred) { - if (!cpi->allow_comp_inter_inter) - continue; + if (!cpi->allow_comp_inter_inter) continue; // Skip compound inter modes if ARF is not available. - if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) - continue; + if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; // Do not allow compound prediction if the segment level reference frame // feature is in use as in this case there can only be one reference. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - continue; + if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue; if ((mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) && best_mode_index >= 0 && best_mbmode.ref_frame[0] == INTRA_FRAME) @@ -3265,19 +3303,17 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // one of the neighboring directional modes if ((mode_search_skip_flags & FLAG_SKIP_INTRA_BESTINTER) && (this_mode >= D45_PRED && this_mode <= TM_PRED)) { - if (best_mode_index >= 0 && - best_mbmode.ref_frame[0] > INTRA_FRAME) + if (best_mode_index >= 0 && best_mbmode.ref_frame[0] > INTRA_FRAME) continue; } if (mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { - if (conditional_skipintra(this_mode, best_intra_mode)) - continue; + if (conditional_skipintra(this_mode, best_intra_mode)) continue; } } } else { - const MV_REFERENCE_FRAME ref_frames[2] = {ref_frame, second_ref_frame}; - if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, - this_mode, ref_frames)) + const MV_REFERENCE_FRAME ref_frames[2] = { ref_frame, second_ref_frame }; + if (!check_best_zero_mv(cpi, mbmi_ext->mode_context, frame_mv, this_mode, + ref_frames)) continue; } @@ -3287,8 +3323,8 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, mi->ref_frame[1] = second_ref_frame; // Evaluate all sub-pel filters irrespective of whether we can use // them for this frame. - mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP - : cm->interp_filter; + mi->interp_filter = + cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter; mi->mv[0].as_int = mi->mv[1].as_int = 0; x->skip = 0; @@ -3297,25 +3333,23 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // Select prediction reference frames. for (i = 0; i < MAX_MB_PLANE; i++) { xd->plane[i].pre[0] = yv12_mb[ref_frame][i]; - if (comp_pred) - xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; + if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; } if (ref_frame == INTRA_FRAME) { TX_SIZE uv_tx; struct macroblockd_plane *const pd = &xd->plane[1]; memset(x->skip_txfm, 0, sizeof(x->skip_txfm)); - super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, - NULL, bsize, best_rd); - if (rate_y == INT_MAX) - continue; + super_block_yrd(cpi, x, &rate_y, &distortion_y, &skippable, NULL, bsize, + best_rd); + if (rate_y == INT_MAX) continue; - uv_tx = get_uv_tx_size_impl(mi->tx_size, bsize, pd->subsampling_x, - pd->subsampling_y); + uv_tx = uv_txsize_lookup[bsize][mi->tx_size][pd->subsampling_x] + [pd->subsampling_y]; if (rate_uv_intra[uv_tx] == INT_MAX) { - choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, - &rate_uv_intra[uv_tx], &rate_uv_tokenonly[uv_tx], - &dist_uv[uv_tx], &skip_uv[uv_tx], &mode_uv[uv_tx]); + choose_intra_uv_mode(cpi, x, ctx, bsize, uv_tx, &rate_uv_intra[uv_tx], + &rate_uv_tokenonly[uv_tx], &dist_uv[uv_tx], + &skip_uv[uv_tx], &mode_uv[uv_tx]); } rate_uv = rate_uv_tokenonly[uv_tx]; @@ -3328,21 +3362,16 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, rate2 += intra_cost_penalty; distortion2 = distortion_y + distortion_uv; } else { - this_rd = handle_inter_mode(cpi, x, bsize, - &rate2, &distortion2, &skippable, - &rate_y, &rate_uv, - &disable_skip, frame_mv, - mi_row, mi_col, - single_newmv, single_inter_filter, - single_skippable, &total_sse, best_rd, - &mask_filter, filter_cache); - if (this_rd == INT64_MAX) - continue; + this_rd = handle_inter_mode( + cpi, x, bsize, &rate2, &distortion2, &skippable, &rate_y, &rate_uv, + &disable_skip, frame_mv, mi_row, mi_col, single_newmv, + single_inter_filter, single_skippable, &total_sse, best_rd, + &mask_filter, filter_cache); + if (this_rd == INT64_MAX) continue; compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred); - if (cm->reference_mode == REFERENCE_MODE_SELECT) - rate2 += compmode_cost; + if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost; } // Estimate the reference frame signaling cost and add it @@ -3365,16 +3394,17 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // Cost the skip mb case rate2 += skip_cost1; } else if (ref_frame != INTRA_FRAME && !xd->lossless) { - if (RDCOST(x->rdmult, x->rddiv, - rate_y + rate_uv + skip_cost0, distortion2) < + if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0, + distortion2) < RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) { // Add in the cost of the no skip flag. rate2 += skip_cost0; } else { // FIXME(rbultje) make this work for splitmv also + assert(total_sse >= 0); + rate2 += skip_cost1; distortion2 = total_sse; - assert(total_sse >= 0); rate2 -= (rate_y + rate_uv); this_skip2 = 1; } @@ -3389,11 +3419,11 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // Apply an adjustment to the rd value based on the similarity of the // source variance and reconstructed variance. - rd_variance_adjustment(cpi, x, bsize, &this_rd, - ref_frame, x->source_variance); + rd_variance_adjustment(cpi, x, bsize, &this_rd, ref_frame, + x->source_variance); if (ref_frame == INTRA_FRAME) { - // Keep record of best intra rd + // Keep record of best intra rd if (this_rd < best_intra_rd) { best_intra_rd = this_rd; best_intra_mode = mi->mode; @@ -3418,6 +3448,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, /* required for left and above block mv */ mi->mv[0].as_int = 0; max_plane = 1; + // Initialize interp_filter here so we do not have to check for + // inter block modes in get_pred_context_switchable_interp() + mi->interp_filter = SWITCHABLE_FILTERS; } else { best_pred_sse = x->pred_sse[ref_frame]; } @@ -3430,8 +3463,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, best_skip2 = this_skip2; best_mode_skippable = skippable; - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, max_plane); + if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane); memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mi->tx_size], sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); @@ -3451,8 +3483,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, const int var_adjust = (x->source_variance < 16); scale -= var_adjust; } - if (ref_frame > INTRA_FRAME && - distortion2 * scale < qstep * qstep) { + if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) { early_term = 1; } } @@ -3486,8 +3517,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, /* keep record of best filter type */ if (!mode_excluded && cm->interp_filter != BILINEAR) { - int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->interp_filter]; + int64_t ref = + filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS + : cm->interp_filter]; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { int64_t adj_rd; @@ -3508,11 +3540,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, } } - if (early_term) - break; + if (early_term) break; - if (x->skip && !comp_pred) - break; + if (x->skip && !comp_pred) break; } // The inter modes' rate costs are not calculated precisely in some cases. @@ -3520,20 +3550,23 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, // ZEROMV. Here, checks are added for those cases, and the mode decisions // are corrected. if (best_mbmode.mode == NEWMV) { - const MV_REFERENCE_FRAME refs[2] = {best_mbmode.ref_frame[0], - best_mbmode.ref_frame[1]}; + const MV_REFERENCE_FRAME refs[2] = { best_mbmode.ref_frame[0], + best_mbmode.ref_frame[1] }; int comp_pred_mode = refs[1] > INTRA_FRAME; if (frame_mv[NEARESTMV][refs[0]].as_int == best_mbmode.mv[0].as_int && - ((comp_pred_mode && frame_mv[NEARESTMV][refs[1]].as_int == - best_mbmode.mv[1].as_int) || !comp_pred_mode)) + ((comp_pred_mode && + frame_mv[NEARESTMV][refs[1]].as_int == best_mbmode.mv[1].as_int) || + !comp_pred_mode)) best_mbmode.mode = NEARESTMV; else if (frame_mv[NEARMV][refs[0]].as_int == best_mbmode.mv[0].as_int && - ((comp_pred_mode && frame_mv[NEARMV][refs[1]].as_int == - best_mbmode.mv[1].as_int) || !comp_pred_mode)) + ((comp_pred_mode && + frame_mv[NEARMV][refs[1]].as_int == best_mbmode.mv[1].as_int) || + !comp_pred_mode)) best_mbmode.mode = NEARMV; else if (best_mbmode.mv[0].as_int == 0 && - ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || !comp_pred_mode)) + ((comp_pred_mode && best_mbmode.mv[1].as_int == 0) || + !comp_pred_mode)) best_mbmode.mode = ZEROMV; } @@ -3552,8 +3585,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, uv_tx_size = get_uv_tx_size(mi, &xd->plane[1]); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra[uv_tx_size], &rate_uv_tokenonly[uv_tx_size], - &dist_uv[uv_tx_size], - &skip_uv[uv_tx_size], + &dist_uv[uv_tx_size], &skip_uv[uv_tx_size], bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize, uv_tx_size); } @@ -3598,8 +3630,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, if (!x->skip && !x->select_tx_size) { int has_high_freq_coeff = 0; int plane; - int max_plane = is_inter_block(xd->mi[0]) - ? MAX_MB_PLANE : 1; + int max_plane = is_inter_block(xd->mi[0]) ? MAX_MB_PLANE : 1; for (plane = 0; plane < max_plane; ++plane) { x->plane[plane].eobs = ctx->eobs_pbuf[plane][1]; has_high_freq_coeff |= vp9_has_high_freq_in_plane(x, bsize, plane); @@ -3619,10 +3650,8 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, best_filter_diff, best_mode_skippable); } -void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - RD_COST *rd_cost, +void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, TileDataEnc *tile_data, + MACROBLOCK *x, RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) { @@ -3646,10 +3675,8 @@ void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, &comp_mode_p); - for (i = 0; i < MAX_REF_FRAMES; ++i) - x->pred_sse[i] = INT_MAX; - for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) - x->pred_mv_sad[i] = INT_MAX; + for (i = 0; i < MAX_REF_FRAMES; ++i) x->pred_sse[i] = INT_MAX; + for (i = LAST_FRAME; i < MAX_REF_FRAMES; ++i) x->pred_mv_sad[i] = INT_MAX; rd_cost->rate = INT_MAX; @@ -3713,18 +3740,13 @@ void vp9_rd_pick_inter_mode_sb_seg_skip(VP9_COMP *cpi, vp9_zero(best_pred_diff); vp9_zero(best_filter_diff); - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE); - store_coding_context(x, ctx, THR_ZEROMV, - best_pred_diff, best_filter_diff, 0); + if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE); + store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0); } -void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, - TileDataEnc *tile_data, - MACROBLOCK *x, - int mi_row, int mi_col, - RD_COST *rd_cost, - BLOCK_SIZE bsize, +void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, TileDataEnc *tile_data, + MACROBLOCK *x, int mi_row, int mi_col, + RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) { VP9_COMMON *const cm = &cpi->common; @@ -3756,7 +3778,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int skip_uv; PREDICTION_MODE mode_uv = DC_PRED; const int intra_cost_penalty = vp9_get_intra_cost_penalty( - cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth); + cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth); int_mv seg_mvs[4][MAX_REF_FRAMES]; b_mode_info best_bmodes[4]; int best_skip2 = 0; @@ -3764,26 +3786,23 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int64_t mask_filter = 0; int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS]; int internal_active_edge = - vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi); + vp9_active_edge_sb(cpi, mi_row, mi_col) && vp9_internal_image_edge(cpi); x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH; memset(x->zcoeff_blk[TX_4X4], 0, 4); vp9_zero(best_mbmode); - for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) - filter_cache[i] = INT64_MAX; + for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX; for (i = 0; i < 4; i++) { int j; - for (j = 0; j < MAX_REF_FRAMES; j++) - seg_mvs[i][j].as_int = INVALID_MV; + for (j = 0; j < MAX_REF_FRAMES; j++) seg_mvs[i][j].as_int = INVALID_MV; } estimate_ref_frame_costs(cm, xd, segment_id, ref_costs_single, ref_costs_comp, &comp_mode_p); - for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = INT64_MAX; + for (i = 0; i < REFERENCE_MODES; ++i) best_pred_rd[i] = INT64_MAX; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) best_filter_rd[i] = INT64_MAX; rate_uv_intra = INT_MAX; @@ -3793,8 +3812,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ref_frame++) { if (cpi->ref_frame_flags & flag_list[ref_frame]) { setup_buffer_inter(cpi, x, ref_frame, bsize, mi_row, mi_col, - frame_mv[NEARESTMV], frame_mv[NEARMV], - yv12_mb); + frame_mv[NEARESTMV], frame_mv[NEARMV], yv12_mb); } else { ref_frame_skip_mask[0] |= (1 << ref_frame); ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; @@ -3826,8 +3844,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int ref_scaled = vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf); if (second_ref_frame > INTRA_FRAME) ref_scaled += vp9_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf); - if (ref_scaled) - continue; + if (ref_scaled) continue; } #endif // Look at the reference frame of the best mode so far and set the @@ -3835,8 +3852,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, if (ref_index > 2 && sf->mode_skip_start < MAX_MODES) { if (ref_index == 3) { switch (best_mbmode.ref_frame[0]) { - case INTRA_FRAME: - break; + case INTRA_FRAME: break; case LAST_FRAME: ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << ALTREF_FRAME); ref_frame_skip_mask[1] |= SECOND_REF_FRAME_MASK; @@ -3849,9 +3865,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, ref_frame_skip_mask[0] |= (1 << GOLDEN_FRAME) | (1 << LAST_FRAME); break; case NONE: - case MAX_REF_FRAMES: - assert(0 && "Invalid Reference frame"); - break; + case MAX_REF_FRAMES: assert(0 && "Invalid Reference frame"); break; } } } @@ -3869,14 +3883,11 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, comp_pred = second_ref_frame > INTRA_FRAME; if (comp_pred) { - if (!cpi->allow_comp_inter_inter) - continue; - if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) - continue; + if (!cpi->allow_comp_inter_inter) continue; + if (!(cpi->ref_frame_flags & flag_list[second_ref_frame])) continue; // Do not allow compound prediction if the segment level reference frame // feature is in use as in this case there can only be one reference. - if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) - continue; + if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) continue; if ((sf->mode_search_skip_flags & FLAG_SKIP_COMP_BESTINTRA) && best_mbmode.ref_frame[0] == INTRA_FRAME) @@ -3893,9 +3904,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, if (segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) && get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame) { continue; - // Disable this drop out case if the ref frame - // segment level feature is enabled for this segment. This is to - // prevent the possibility that we end up unable to pick any mode. + // Disable this drop out case if the ref frame + // segment level feature is enabled for this segment. This is to + // prevent the possibility that we end up unable to pick any mode. } else if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME)) { // Only consider ZEROMV/ALTREF_FRAME for alt ref frame, // unless ARNR filtering is enabled in which case we want @@ -3911,33 +3922,29 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, mi->ref_frame[1] = second_ref_frame; // Evaluate all sub-pel filters irrespective of whether we can use // them for this frame. - mi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP - : cm->interp_filter; + mi->interp_filter = + cm->interp_filter == SWITCHABLE ? EIGHTTAP : cm->interp_filter; x->skip = 0; set_ref_ptrs(cm, xd, ref_frame, second_ref_frame); // Select prediction reference frames. for (i = 0; i < MAX_MB_PLANE; i++) { xd->plane[i].pre[0] = yv12_mb[ref_frame][i]; - if (comp_pred) - xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; + if (comp_pred) xd->plane[i].pre[1] = yv12_mb[second_ref_frame][i]; } if (ref_frame == INTRA_FRAME) { int rate; - if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, - &distortion_y, best_rd) >= best_rd) + if (rd_pick_intra_sub_8x8_y_mode(cpi, x, &rate, &rate_y, &distortion_y, + best_rd) >= best_rd) continue; rate2 += rate; rate2 += intra_cost_penalty; distortion2 += distortion_y; if (rate_uv_intra == INT_MAX) { - choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, - &rate_uv_intra, - &rate_uv_tokenonly, - &dist_uv, &skip_uv, - &mode_uv); + choose_intra_uv_mode(cpi, x, ctx, bsize, TX_4X4, &rate_uv_intra, + &rate_uv_tokenonly, &dist_uv, &skip_uv, &mode_uv); } rate2 += rate_uv_intra; rate_uv = rate_uv_tokenonly; @@ -3953,20 +3960,22 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int64_t tmp_best_distortion = INT_MAX, tmp_best_sse, uv_sse; int tmp_best_skippable = 0; int switchable_filter_index; - int_mv *second_ref = comp_pred ? - &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL; + int_mv *second_ref = + comp_pred ? &x->mbmi_ext->ref_mvs[second_ref_frame][0] : NULL; b_mode_info tmp_best_bmodes[16]; MODE_INFO tmp_best_mbmode; BEST_SEG_INFO bsi[SWITCHABLE_FILTERS]; int pred_exists = 0; int uv_skippable; - YV12_BUFFER_CONFIG *scaled_ref_frame[2] = {NULL, NULL}; + YV12_BUFFER_CONFIG *scaled_ref_frame[2] = { NULL, NULL }; int ref; for (ref = 0; ref < 2; ++ref) { - scaled_ref_frame[ref] = mi->ref_frame[ref] > INTRA_FRAME ? - vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref]) : NULL; + scaled_ref_frame[ref] = + mi->ref_frame[ref] > INTRA_FRAME + ? vp9_get_scaled_ref_frame(cpi, mi->ref_frame[ref]) + : NULL; if (scaled_ref_frame[ref]) { int i; @@ -3980,11 +3989,12 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, } } - this_rd_thresh = (ref_frame == LAST_FRAME) ? - rd_opt->threshes[segment_id][bsize][THR_LAST] : - rd_opt->threshes[segment_id][bsize][THR_ALTR]; - this_rd_thresh = (ref_frame == GOLDEN_FRAME) ? - rd_opt->threshes[segment_id][bsize][THR_GOLD] : this_rd_thresh; + this_rd_thresh = (ref_frame == LAST_FRAME) + ? rd_opt->threshes[segment_id][bsize][THR_LAST] + : rd_opt->threshes[segment_id][bsize][THR_ALTR]; + this_rd_thresh = (ref_frame == GOLDEN_FRAME) + ? rd_opt->threshes[segment_id][bsize][THR_GOLD] + : this_rd_thresh; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX; @@ -3996,8 +4006,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, ctx->pred_interp_filter < SWITCHABLE) { tmp_best_filter = ctx->pred_interp_filter; } else if (sf->adaptive_pred_interp_filter == 2) { - tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE ? - ctx->pred_interp_filter : 0; + tmp_best_filter = ctx->pred_interp_filter < SWITCHABLE + ? ctx->pred_interp_filter + : 0; } else { for (switchable_filter_index = 0; switchable_filter_index < SWITCHABLE_FILTERS; @@ -4006,24 +4017,19 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, int64_t rs_rd; MB_MODE_INFO_EXT *mbmi_ext = x->mbmi_ext; mi->interp_filter = switchable_filter_index; - tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &mbmi_ext->ref_mvs[ref_frame][0], - second_ref, best_yrd, &rate, - &rate_y, &distortion, - &skippable, &total_sse, - (int) this_rd_thresh, seg_mvs, - bsi, switchable_filter_index, - mi_row, mi_col); + tmp_rd = rd_pick_best_sub8x8_mode( + cpi, x, &mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd, + &rate, &rate_y, &distortion, &skippable, &total_sse, + (int)this_rd_thresh, seg_mvs, bsi, switchable_filter_index, + mi_row, mi_col); - if (tmp_rd == INT64_MAX) - continue; + if (tmp_rd == INT64_MAX) continue; rs = vp9_get_switchable_rate(cpi, xd); rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); filter_cache[switchable_filter_index] = tmp_rd; filter_cache[SWITCHABLE_FILTERS] = VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd); - if (cm->interp_filter == SWITCHABLE) - tmp_rd += rs_rd; + if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd; mask_filter = VPXMAX(mask_filter, tmp_rd); @@ -4047,8 +4053,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, x->zcoeff_blk[TX_4X4][i] = !x->plane[0].eobs[i]; } pred_exists = 1; - if (switchable_filter_index == 0 && - sf->use_rd_breakout && + if (switchable_filter_index == 0 && sf->use_rd_breakout && best_rd < INT64_MAX) { if (tmp_best_rdu / 2 > best_rd) { // skip searching the other filters if the first is @@ -4063,22 +4068,18 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, } } - if (tmp_best_rdu == INT64_MAX && pred_exists) - continue; + if (tmp_best_rdu == INT64_MAX && pred_exists) continue; - mi->interp_filter = (cm->interp_filter == SWITCHABLE ? - tmp_best_filter : cm->interp_filter); + mi->interp_filter = (cm->interp_filter == SWITCHABLE ? tmp_best_filter + : cm->interp_filter); if (!pred_exists) { // Handles the special case when a filter that is not in the // switchable list (bilinear, 6-tap) is indicated at the frame level - tmp_rd = rd_pick_best_sub8x8_mode(cpi, x, - &x->mbmi_ext->ref_mvs[ref_frame][0], - second_ref, best_yrd, &rate, &rate_y, - &distortion, &skippable, &total_sse, - (int) this_rd_thresh, seg_mvs, bsi, 0, - mi_row, mi_col); - if (tmp_rd == INT64_MAX) - continue; + tmp_rd = rd_pick_best_sub8x8_mode( + cpi, x, &x->mbmi_ext->ref_mvs[ref_frame][0], second_ref, best_yrd, + &rate, &rate_y, &distortion, &skippable, &total_sse, + (int)this_rd_thresh, seg_mvs, bsi, 0, mi_row, mi_col); + if (tmp_rd == INT64_MAX) continue; } else { total_sse = tmp_best_sse; rate = tmp_best_rate; @@ -4086,8 +4087,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, distortion = tmp_best_distortion; skippable = tmp_best_skippable; *mi = tmp_best_mbmode; - for (i = 0; i < 4; i++) - xd->mi[0]->bmi[i] = tmp_best_bmodes[i]; + for (i = 0; i < 4; i++) xd->mi[0]->bmi[i] = tmp_best_bmodes[i]; } rate2 += rate; @@ -4109,8 +4109,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, if (tmp_best_rdu > 0) { // If even the 'Y' rd value of split is higher than best so far // then dont bother looking at UV - vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, - BLOCK_8X8); + vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8); memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm)); if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable, &uv_sse, BLOCK_8X8, tmp_best_rdu)) { @@ -4140,8 +4139,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, } } - if (cm->reference_mode == REFERENCE_MODE_SELECT) - rate2 += compmode_cost; + if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost; // Estimate the reference frame signaling cost and add it // to the rolling cost variable. @@ -4159,8 +4157,8 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, // Skip is never coded at the segment level for sub8x8 blocks and instead // always coded in the bitstream at the mode info level. if (ref_frame != INTRA_FRAME && !xd->lossless) { - if (RDCOST(x->rdmult, x->rddiv, - rate_y + rate_uv + skip_cost0, distortion2) < + if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv + skip_cost0, + distortion2) < RDCOST(x->rdmult, x->rddiv, skip_cost1, total_sse)) { // Add in the cost of the no skip flag. rate2 += skip_cost0; @@ -4201,23 +4199,24 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, /* required for left and above block mv */ mi->mv[0].as_int = 0; max_plane = 1; + // Initialize interp_filter here so we do not have to check for + // inter block modes in get_pred_context_switchable_interp() + mi->interp_filter = SWITCHABLE_FILTERS; } rd_cost->rate = rate2; rd_cost->dist = distortion2; rd_cost->rdcost = this_rd; best_rd = this_rd; - best_yrd = best_rd - - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv); + best_yrd = + best_rd - RDCOST(x->rdmult, x->rddiv, rate_uv, distortion_uv); best_mbmode = *mi; best_skip2 = this_skip2; - if (!x->select_tx_size) - swap_block_ptr(x, ctx, 1, 0, 0, max_plane); + if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, max_plane); memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4], sizeof(ctx->zcoeff_blk[0]) * ctx->num_4x4_blk); - for (i = 0; i < 4; i++) - best_bmodes[i] = xd->mi[0]->bmi[i]; + for (i = 0; i < 4; i++) best_bmodes[i] = xd->mi[0]->bmi[i]; // TODO(debargha): enhance this test with a better distortion prediction // based on qp, activity mask and history @@ -4235,8 +4234,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, const int var_adjust = (x->source_variance < 16); scale -= var_adjust; } - if (ref_frame > INTRA_FRAME && - distortion2 * scale < qstep * qstep) { + if (ref_frame > INTRA_FRAME && distortion2 * scale < qstep * qstep) { early_term = 1; } } @@ -4270,8 +4268,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, /* keep record of best filter type */ if (!mode_excluded && !disable_skip && ref_frame != INTRA_FRAME && cm->interp_filter != BILINEAR) { - int64_t ref = filter_cache[cm->interp_filter == SWITCHABLE ? - SWITCHABLE_FILTERS : cm->interp_filter]; + int64_t ref = + filter_cache[cm->interp_filter == SWITCHABLE ? SWITCHABLE_FILTERS + : cm->interp_filter]; int64_t adj_rd; for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) { if (ref == INT64_MAX) @@ -4290,11 +4289,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, } } - if (early_term) - break; + if (early_term) break; - if (x->skip && !comp_pred) - break; + if (x->skip && !comp_pred) break; } if (best_rd >= best_rd_so_far) { @@ -4308,11 +4305,8 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, // Do Intra UV best rd mode selection if best mode choice above was intra. if (best_mbmode.ref_frame[0] == INTRA_FRAME) { *mi = best_mbmode; - rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, - &rate_uv_tokenonly, - &dist_uv, - &skip_uv, - BLOCK_8X8, TX_4X4); + rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv_intra, &rate_uv_tokenonly, + &dist_uv, &skip_uv, BLOCK_8X8, TX_4X4); } } @@ -4327,15 +4321,14 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, (cm->interp_filter == best_mbmode.interp_filter) || !is_inter_block(&best_mbmode)); - vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, - sf->adaptive_rd_thresh, bsize, best_ref_index); + vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact, sf->adaptive_rd_thresh, + bsize, best_ref_index); // macroblock modes *mi = best_mbmode; x->skip |= best_skip2; if (!is_inter_block(&best_mbmode)) { - for (i = 0; i < 4; i++) - xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode; + for (i = 0; i < 4; i++) xd->mi[0]->bmi[i].as_mode = best_bmodes[i].as_mode; } else { for (i = 0; i < 4; ++i) memcpy(&xd->mi[0]->bmi[i], &best_bmodes[i], sizeof(b_mode_info)); @@ -4364,6 +4357,6 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, vp9_zero(best_filter_diff); } - store_coding_context(x, ctx, best_ref_index, - best_pred_diff, best_filter_diff, 0); + store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff, + 0); } diff --git a/libs/libvpx/vp9/encoder/vp9_rdopt.h b/libs/libvpx/vp9/encoder/vp9_rdopt.h index 253e4a02df..795c91aef7 100644 --- a/libs/libvpx/vp9/encoder/vp9_rdopt.h +++ b/libs/libvpx/vp9/encoder/vp9_rdopt.h @@ -31,19 +31,14 @@ void vp9_rd_pick_intra_mode_sb(struct VP9_COMP *cpi, struct macroblock *x, void vp9_rd_pick_inter_mode_sb(struct VP9_COMP *cpi, struct TileDataEnc *tile_data, - struct macroblock *x, - int mi_row, int mi_col, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); + struct macroblock *x, int mi_row, int mi_col, + struct RD_COST *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far); -void vp9_rd_pick_inter_mode_sb_seg_skip(struct VP9_COMP *cpi, - struct TileDataEnc *tile_data, - struct macroblock *x, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); +void vp9_rd_pick_inter_mode_sb_seg_skip( + struct VP9_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x, + struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, + int64_t best_rd_so_far); int vp9_internal_image_edge(struct VP9_COMP *cpi); int vp9_active_h_edge(struct VP9_COMP *cpi, int mi_row, int mi_step); @@ -52,10 +47,9 @@ int vp9_active_edge_sb(struct VP9_COMP *cpi, int mi_row, int mi_col); void vp9_rd_pick_inter_mode_sub8x8(struct VP9_COMP *cpi, struct TileDataEnc *tile_data, - struct macroblock *x, - int mi_row, int mi_col, - struct RD_COST *rd_cost, - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, + struct macroblock *x, int mi_row, int mi_col, + struct RD_COST *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/encoder/vp9_resize.c b/libs/libvpx/vp9/encoder/vp9_resize.c index f4d0db4d5a..f6c4aad4d3 100644 --- a/libs/libvpx/vp9/encoder/vp9_resize.c +++ b/libs/libvpx/vp9/encoder/vp9_resize.c @@ -23,198 +23,118 @@ #include "vp9/common/vp9_common.h" #include "vp9/encoder/vp9_resize.h" -#define FILTER_BITS 7 +#define FILTER_BITS 7 -#define INTERP_TAPS 8 -#define SUBPEL_BITS 5 -#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) -#define INTERP_PRECISION_BITS 32 +#define INTERP_TAPS 8 +#define SUBPEL_BITS 5 +#define SUBPEL_MASK ((1 << SUBPEL_BITS) - 1) +#define INTERP_PRECISION_BITS 32 typedef int16_t interp_kernel[INTERP_TAPS]; // Filters for interpolation (0.5-band) - note this also filters integer pels. static const interp_kernel filteredinterp_filters500[(1 << SUBPEL_BITS)] = { - {-3, 0, 35, 64, 35, 0, -3, 0}, - {-3, -1, 34, 64, 36, 1, -3, 0}, - {-3, -1, 32, 64, 38, 1, -3, 0}, - {-2, -2, 31, 63, 39, 2, -3, 0}, - {-2, -2, 29, 63, 41, 2, -3, 0}, - {-2, -2, 28, 63, 42, 3, -4, 0}, - {-2, -3, 27, 63, 43, 4, -4, 0}, - {-2, -3, 25, 62, 45, 5, -4, 0}, - {-2, -3, 24, 62, 46, 5, -4, 0}, - {-2, -3, 23, 61, 47, 6, -4, 0}, - {-2, -3, 21, 60, 49, 7, -4, 0}, - {-1, -4, 20, 60, 50, 8, -4, -1}, - {-1, -4, 19, 59, 51, 9, -4, -1}, - {-1, -4, 17, 58, 52, 10, -4, 0}, - {-1, -4, 16, 57, 53, 12, -4, -1}, - {-1, -4, 15, 56, 54, 13, -4, -1}, - {-1, -4, 14, 55, 55, 14, -4, -1}, - {-1, -4, 13, 54, 56, 15, -4, -1}, - {-1, -4, 12, 53, 57, 16, -4, -1}, - {0, -4, 10, 52, 58, 17, -4, -1}, - {-1, -4, 9, 51, 59, 19, -4, -1}, - {-1, -4, 8, 50, 60, 20, -4, -1}, - {0, -4, 7, 49, 60, 21, -3, -2}, - {0, -4, 6, 47, 61, 23, -3, -2}, - {0, -4, 5, 46, 62, 24, -3, -2}, - {0, -4, 5, 45, 62, 25, -3, -2}, - {0, -4, 4, 43, 63, 27, -3, -2}, - {0, -4, 3, 42, 63, 28, -2, -2}, - {0, -3, 2, 41, 63, 29, -2, -2}, - {0, -3, 2, 39, 63, 31, -2, -2}, - {0, -3, 1, 38, 64, 32, -1, -3}, - {0, -3, 1, 36, 64, 34, -1, -3} + { -3, 0, 35, 64, 35, 0, -3, 0 }, { -3, -1, 34, 64, 36, 1, -3, 0 }, + { -3, -1, 32, 64, 38, 1, -3, 0 }, { -2, -2, 31, 63, 39, 2, -3, 0 }, + { -2, -2, 29, 63, 41, 2, -3, 0 }, { -2, -2, 28, 63, 42, 3, -4, 0 }, + { -2, -3, 27, 63, 43, 4, -4, 0 }, { -2, -3, 25, 62, 45, 5, -4, 0 }, + { -2, -3, 24, 62, 46, 5, -4, 0 }, { -2, -3, 23, 61, 47, 6, -4, 0 }, + { -2, -3, 21, 60, 49, 7, -4, 0 }, { -1, -4, 20, 60, 50, 8, -4, -1 }, + { -1, -4, 19, 59, 51, 9, -4, -1 }, { -1, -4, 17, 58, 52, 10, -4, 0 }, + { -1, -4, 16, 57, 53, 12, -4, -1 }, { -1, -4, 15, 56, 54, 13, -4, -1 }, + { -1, -4, 14, 55, 55, 14, -4, -1 }, { -1, -4, 13, 54, 56, 15, -4, -1 }, + { -1, -4, 12, 53, 57, 16, -4, -1 }, { 0, -4, 10, 52, 58, 17, -4, -1 }, + { -1, -4, 9, 51, 59, 19, -4, -1 }, { -1, -4, 8, 50, 60, 20, -4, -1 }, + { 0, -4, 7, 49, 60, 21, -3, -2 }, { 0, -4, 6, 47, 61, 23, -3, -2 }, + { 0, -4, 5, 46, 62, 24, -3, -2 }, { 0, -4, 5, 45, 62, 25, -3, -2 }, + { 0, -4, 4, 43, 63, 27, -3, -2 }, { 0, -4, 3, 42, 63, 28, -2, -2 }, + { 0, -3, 2, 41, 63, 29, -2, -2 }, { 0, -3, 2, 39, 63, 31, -2, -2 }, + { 0, -3, 1, 38, 64, 32, -1, -3 }, { 0, -3, 1, 36, 64, 34, -1, -3 } }; // Filters for interpolation (0.625-band) - note this also filters integer pels. static const interp_kernel filteredinterp_filters625[(1 << SUBPEL_BITS)] = { - {-1, -8, 33, 80, 33, -8, -1, 0}, - {-1, -8, 30, 80, 35, -8, -1, 1}, - {-1, -8, 28, 80, 37, -7, -2, 1}, - {0, -8, 26, 79, 39, -7, -2, 1}, - {0, -8, 24, 79, 41, -7, -2, 1}, - {0, -8, 22, 78, 43, -6, -2, 1}, - {0, -8, 20, 78, 45, -5, -3, 1}, - {0, -8, 18, 77, 48, -5, -3, 1}, - {0, -8, 16, 76, 50, -4, -3, 1}, - {0, -8, 15, 75, 52, -3, -4, 1}, - {0, -7, 13, 74, 54, -3, -4, 1}, - {0, -7, 11, 73, 56, -2, -4, 1}, - {0, -7, 10, 71, 58, -1, -4, 1}, - {1, -7, 8, 70, 60, 0, -5, 1}, - {1, -6, 6, 68, 62, 1, -5, 1}, - {1, -6, 5, 67, 63, 2, -5, 1}, - {1, -6, 4, 65, 65, 4, -6, 1}, - {1, -5, 2, 63, 67, 5, -6, 1}, - {1, -5, 1, 62, 68, 6, -6, 1}, - {1, -5, 0, 60, 70, 8, -7, 1}, - {1, -4, -1, 58, 71, 10, -7, 0}, - {1, -4, -2, 56, 73, 11, -7, 0}, - {1, -4, -3, 54, 74, 13, -7, 0}, - {1, -4, -3, 52, 75, 15, -8, 0}, - {1, -3, -4, 50, 76, 16, -8, 0}, - {1, -3, -5, 48, 77, 18, -8, 0}, - {1, -3, -5, 45, 78, 20, -8, 0}, - {1, -2, -6, 43, 78, 22, -8, 0}, - {1, -2, -7, 41, 79, 24, -8, 0}, - {1, -2, -7, 39, 79, 26, -8, 0}, - {1, -2, -7, 37, 80, 28, -8, -1}, - {1, -1, -8, 35, 80, 30, -8, -1}, + { -1, -8, 33, 80, 33, -8, -1, 0 }, { -1, -8, 30, 80, 35, -8, -1, 1 }, + { -1, -8, 28, 80, 37, -7, -2, 1 }, { 0, -8, 26, 79, 39, -7, -2, 1 }, + { 0, -8, 24, 79, 41, -7, -2, 1 }, { 0, -8, 22, 78, 43, -6, -2, 1 }, + { 0, -8, 20, 78, 45, -5, -3, 1 }, { 0, -8, 18, 77, 48, -5, -3, 1 }, + { 0, -8, 16, 76, 50, -4, -3, 1 }, { 0, -8, 15, 75, 52, -3, -4, 1 }, + { 0, -7, 13, 74, 54, -3, -4, 1 }, { 0, -7, 11, 73, 56, -2, -4, 1 }, + { 0, -7, 10, 71, 58, -1, -4, 1 }, { 1, -7, 8, 70, 60, 0, -5, 1 }, + { 1, -6, 6, 68, 62, 1, -5, 1 }, { 1, -6, 5, 67, 63, 2, -5, 1 }, + { 1, -6, 4, 65, 65, 4, -6, 1 }, { 1, -5, 2, 63, 67, 5, -6, 1 }, + { 1, -5, 1, 62, 68, 6, -6, 1 }, { 1, -5, 0, 60, 70, 8, -7, 1 }, + { 1, -4, -1, 58, 71, 10, -7, 0 }, { 1, -4, -2, 56, 73, 11, -7, 0 }, + { 1, -4, -3, 54, 74, 13, -7, 0 }, { 1, -4, -3, 52, 75, 15, -8, 0 }, + { 1, -3, -4, 50, 76, 16, -8, 0 }, { 1, -3, -5, 48, 77, 18, -8, 0 }, + { 1, -3, -5, 45, 78, 20, -8, 0 }, { 1, -2, -6, 43, 78, 22, -8, 0 }, + { 1, -2, -7, 41, 79, 24, -8, 0 }, { 1, -2, -7, 39, 79, 26, -8, 0 }, + { 1, -2, -7, 37, 80, 28, -8, -1 }, { 1, -1, -8, 35, 80, 30, -8, -1 }, }; // Filters for interpolation (0.75-band) - note this also filters integer pels. static const interp_kernel filteredinterp_filters750[(1 << SUBPEL_BITS)] = { - {2, -11, 25, 96, 25, -11, 2, 0}, - {2, -11, 22, 96, 28, -11, 2, 0}, - {2, -10, 19, 95, 31, -11, 2, 0}, - {2, -10, 17, 95, 34, -12, 2, 0}, - {2, -9, 14, 94, 37, -12, 2, 0}, - {2, -8, 12, 93, 40, -12, 1, 0}, - {2, -8, 9, 92, 43, -12, 1, 1}, - {2, -7, 7, 91, 46, -12, 1, 0}, - {2, -7, 5, 90, 49, -12, 1, 0}, - {2, -6, 3, 88, 52, -12, 0, 1}, - {2, -5, 1, 86, 55, -12, 0, 1}, - {2, -5, -1, 84, 58, -11, 0, 1}, - {2, -4, -2, 82, 61, -11, -1, 1}, - {2, -4, -4, 80, 64, -10, -1, 1}, - {1, -3, -5, 77, 67, -9, -1, 1}, - {1, -3, -6, 75, 70, -8, -2, 1}, - {1, -2, -7, 72, 72, -7, -2, 1}, - {1, -2, -8, 70, 75, -6, -3, 1}, - {1, -1, -9, 67, 77, -5, -3, 1}, - {1, -1, -10, 64, 80, -4, -4, 2}, - {1, -1, -11, 61, 82, -2, -4, 2}, - {1, 0, -11, 58, 84, -1, -5, 2}, - {1, 0, -12, 55, 86, 1, -5, 2}, - {1, 0, -12, 52, 88, 3, -6, 2}, - {0, 1, -12, 49, 90, 5, -7, 2}, - {0, 1, -12, 46, 91, 7, -7, 2}, - {1, 1, -12, 43, 92, 9, -8, 2}, - {0, 1, -12, 40, 93, 12, -8, 2}, - {0, 2, -12, 37, 94, 14, -9, 2}, - {0, 2, -12, 34, 95, 17, -10, 2}, - {0, 2, -11, 31, 95, 19, -10, 2}, - {0, 2, -11, 28, 96, 22, -11, 2} + { 2, -11, 25, 96, 25, -11, 2, 0 }, { 2, -11, 22, 96, 28, -11, 2, 0 }, + { 2, -10, 19, 95, 31, -11, 2, 0 }, { 2, -10, 17, 95, 34, -12, 2, 0 }, + { 2, -9, 14, 94, 37, -12, 2, 0 }, { 2, -8, 12, 93, 40, -12, 1, 0 }, + { 2, -8, 9, 92, 43, -12, 1, 1 }, { 2, -7, 7, 91, 46, -12, 1, 0 }, + { 2, -7, 5, 90, 49, -12, 1, 0 }, { 2, -6, 3, 88, 52, -12, 0, 1 }, + { 2, -5, 1, 86, 55, -12, 0, 1 }, { 2, -5, -1, 84, 58, -11, 0, 1 }, + { 2, -4, -2, 82, 61, -11, -1, 1 }, { 2, -4, -4, 80, 64, -10, -1, 1 }, + { 1, -3, -5, 77, 67, -9, -1, 1 }, { 1, -3, -6, 75, 70, -8, -2, 1 }, + { 1, -2, -7, 72, 72, -7, -2, 1 }, { 1, -2, -8, 70, 75, -6, -3, 1 }, + { 1, -1, -9, 67, 77, -5, -3, 1 }, { 1, -1, -10, 64, 80, -4, -4, 2 }, + { 1, -1, -11, 61, 82, -2, -4, 2 }, { 1, 0, -11, 58, 84, -1, -5, 2 }, + { 1, 0, -12, 55, 86, 1, -5, 2 }, { 1, 0, -12, 52, 88, 3, -6, 2 }, + { 0, 1, -12, 49, 90, 5, -7, 2 }, { 0, 1, -12, 46, 91, 7, -7, 2 }, + { 1, 1, -12, 43, 92, 9, -8, 2 }, { 0, 1, -12, 40, 93, 12, -8, 2 }, + { 0, 2, -12, 37, 94, 14, -9, 2 }, { 0, 2, -12, 34, 95, 17, -10, 2 }, + { 0, 2, -11, 31, 95, 19, -10, 2 }, { 0, 2, -11, 28, 96, 22, -11, 2 } }; // Filters for interpolation (0.875-band) - note this also filters integer pels. static const interp_kernel filteredinterp_filters875[(1 << SUBPEL_BITS)] = { - {3, -8, 13, 112, 13, -8, 3, 0}, - {3, -7, 10, 112, 17, -9, 3, -1}, - {2, -6, 7, 111, 21, -9, 3, -1}, - {2, -5, 4, 111, 24, -10, 3, -1}, - {2, -4, 1, 110, 28, -11, 3, -1}, - {1, -3, -1, 108, 32, -12, 4, -1}, - {1, -2, -3, 106, 36, -13, 4, -1}, - {1, -1, -6, 105, 40, -14, 4, -1}, - {1, -1, -7, 102, 44, -14, 4, -1}, - {1, 0, -9, 100, 48, -15, 4, -1}, - {1, 1, -11, 97, 53, -16, 4, -1}, - {0, 1, -12, 95, 57, -16, 4, -1}, - {0, 2, -13, 91, 61, -16, 4, -1}, - {0, 2, -14, 88, 65, -16, 4, -1}, - {0, 3, -15, 84, 69, -17, 4, 0}, - {0, 3, -16, 81, 73, -16, 3, 0}, - {0, 3, -16, 77, 77, -16, 3, 0}, - {0, 3, -16, 73, 81, -16, 3, 0}, - {0, 4, -17, 69, 84, -15, 3, 0}, - {-1, 4, -16, 65, 88, -14, 2, 0}, - {-1, 4, -16, 61, 91, -13, 2, 0}, - {-1, 4, -16, 57, 95, -12, 1, 0}, - {-1, 4, -16, 53, 97, -11, 1, 1}, - {-1, 4, -15, 48, 100, -9, 0, 1}, - {-1, 4, -14, 44, 102, -7, -1, 1}, - {-1, 4, -14, 40, 105, -6, -1, 1}, - {-1, 4, -13, 36, 106, -3, -2, 1}, - {-1, 4, -12, 32, 108, -1, -3, 1}, - {-1, 3, -11, 28, 110, 1, -4, 2}, - {-1, 3, -10, 24, 111, 4, -5, 2}, - {-1, 3, -9, 21, 111, 7, -6, 2}, - {-1, 3, -9, 17, 112, 10, -7, 3} + { 3, -8, 13, 112, 13, -8, 3, 0 }, { 3, -7, 10, 112, 17, -9, 3, -1 }, + { 2, -6, 7, 111, 21, -9, 3, -1 }, { 2, -5, 4, 111, 24, -10, 3, -1 }, + { 2, -4, 1, 110, 28, -11, 3, -1 }, { 1, -3, -1, 108, 32, -12, 4, -1 }, + { 1, -2, -3, 106, 36, -13, 4, -1 }, { 1, -1, -6, 105, 40, -14, 4, -1 }, + { 1, -1, -7, 102, 44, -14, 4, -1 }, { 1, 0, -9, 100, 48, -15, 4, -1 }, + { 1, 1, -11, 97, 53, -16, 4, -1 }, { 0, 1, -12, 95, 57, -16, 4, -1 }, + { 0, 2, -13, 91, 61, -16, 4, -1 }, { 0, 2, -14, 88, 65, -16, 4, -1 }, + { 0, 3, -15, 84, 69, -17, 4, 0 }, { 0, 3, -16, 81, 73, -16, 3, 0 }, + { 0, 3, -16, 77, 77, -16, 3, 0 }, { 0, 3, -16, 73, 81, -16, 3, 0 }, + { 0, 4, -17, 69, 84, -15, 3, 0 }, { -1, 4, -16, 65, 88, -14, 2, 0 }, + { -1, 4, -16, 61, 91, -13, 2, 0 }, { -1, 4, -16, 57, 95, -12, 1, 0 }, + { -1, 4, -16, 53, 97, -11, 1, 1 }, { -1, 4, -15, 48, 100, -9, 0, 1 }, + { -1, 4, -14, 44, 102, -7, -1, 1 }, { -1, 4, -14, 40, 105, -6, -1, 1 }, + { -1, 4, -13, 36, 106, -3, -2, 1 }, { -1, 4, -12, 32, 108, -1, -3, 1 }, + { -1, 3, -11, 28, 110, 1, -4, 2 }, { -1, 3, -10, 24, 111, 4, -5, 2 }, + { -1, 3, -9, 21, 111, 7, -6, 2 }, { -1, 3, -9, 17, 112, 10, -7, 3 } }; // Filters for interpolation (full-band) - no filtering for integer pixels static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = { - {0, 0, 0, 128, 0, 0, 0, 0}, - {0, 1, -3, 128, 3, -1, 0, 0}, - {-1, 2, -6, 127, 7, -2, 1, 0}, - {-1, 3, -9, 126, 12, -4, 1, 0}, - {-1, 4, -12, 125, 16, -5, 1, 0}, - {-1, 4, -14, 123, 20, -6, 2, 0}, - {-1, 5, -15, 120, 25, -8, 2, 0}, - {-1, 5, -17, 118, 30, -9, 3, -1}, - {-1, 6, -18, 114, 35, -10, 3, -1}, - {-1, 6, -19, 111, 41, -12, 3, -1}, - {-1, 6, -20, 107, 46, -13, 4, -1}, - {-1, 6, -21, 103, 52, -14, 4, -1}, - {-1, 6, -21, 99, 57, -16, 5, -1}, - {-1, 6, -21, 94, 63, -17, 5, -1}, - {-1, 6, -20, 89, 68, -18, 5, -1}, - {-1, 6, -20, 84, 73, -19, 6, -1}, - {-1, 6, -20, 79, 79, -20, 6, -1}, - {-1, 6, -19, 73, 84, -20, 6, -1}, - {-1, 5, -18, 68, 89, -20, 6, -1}, - {-1, 5, -17, 63, 94, -21, 6, -1}, - {-1, 5, -16, 57, 99, -21, 6, -1}, - {-1, 4, -14, 52, 103, -21, 6, -1}, - {-1, 4, -13, 46, 107, -20, 6, -1}, - {-1, 3, -12, 41, 111, -19, 6, -1}, - {-1, 3, -10, 35, 114, -18, 6, -1}, - {-1, 3, -9, 30, 118, -17, 5, -1}, - {0, 2, -8, 25, 120, -15, 5, -1}, - {0, 2, -6, 20, 123, -14, 4, -1}, - {0, 1, -5, 16, 125, -12, 4, -1}, - {0, 1, -4, 12, 126, -9, 3, -1}, - {0, 1, -2, 7, 127, -6, 2, -1}, - {0, 0, -1, 3, 128, -3, 1, 0} + { 0, 0, 0, 128, 0, 0, 0, 0 }, { 0, 1, -3, 128, 3, -1, 0, 0 }, + { -1, 2, -6, 127, 7, -2, 1, 0 }, { -1, 3, -9, 126, 12, -4, 1, 0 }, + { -1, 4, -12, 125, 16, -5, 1, 0 }, { -1, 4, -14, 123, 20, -6, 2, 0 }, + { -1, 5, -15, 120, 25, -8, 2, 0 }, { -1, 5, -17, 118, 30, -9, 3, -1 }, + { -1, 6, -18, 114, 35, -10, 3, -1 }, { -1, 6, -19, 111, 41, -12, 3, -1 }, + { -1, 6, -20, 107, 46, -13, 4, -1 }, { -1, 6, -21, 103, 52, -14, 4, -1 }, + { -1, 6, -21, 99, 57, -16, 5, -1 }, { -1, 6, -21, 94, 63, -17, 5, -1 }, + { -1, 6, -20, 89, 68, -18, 5, -1 }, { -1, 6, -20, 84, 73, -19, 6, -1 }, + { -1, 6, -20, 79, 79, -20, 6, -1 }, { -1, 6, -19, 73, 84, -20, 6, -1 }, + { -1, 5, -18, 68, 89, -20, 6, -1 }, { -1, 5, -17, 63, 94, -21, 6, -1 }, + { -1, 5, -16, 57, 99, -21, 6, -1 }, { -1, 4, -14, 52, 103, -21, 6, -1 }, + { -1, 4, -13, 46, 107, -20, 6, -1 }, { -1, 3, -12, 41, 111, -19, 6, -1 }, + { -1, 3, -10, 35, 114, -18, 6, -1 }, { -1, 3, -9, 30, 118, -17, 5, -1 }, + { 0, 2, -8, 25, 120, -15, 5, -1 }, { 0, 2, -6, 20, 123, -14, 4, -1 }, + { 0, 1, -5, 16, 125, -12, 4, -1 }, { 0, 1, -4, 12, 126, -9, 3, -1 }, + { 0, 1, -2, 7, 127, -6, 2, -1 }, { 0, 0, -1, 3, 128, -3, 1, 0 } }; // Filters for factor of 2 downsampling. -static const int16_t vp9_down2_symeven_half_filter[] = {56, 12, -3, -1}; -static const int16_t vp9_down2_symodd_half_filter[] = {64, 35, 0, -3}; +static const int16_t vp9_down2_symeven_half_filter[] = { 56, 12, -3, -1 }; +static const int16_t vp9_down2_symodd_half_filter[] = { 64, 35, 0, -3 }; static const interp_kernel *choose_interp_filter(int inlength, int outlength) { int outlength16 = outlength * 16; @@ -232,11 +152,14 @@ static const interp_kernel *choose_interp_filter(int inlength, int outlength) { static void interpolate(const uint8_t *const input, int inlength, uint8_t *output, int outlength) { - const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) / - outlength; - const int64_t offset = inlength > outlength ? - (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength : - -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength; + const int64_t delta = + (((uint64_t)inlength << 32) + outlength / 2) / outlength; + const int64_t offset = + inlength > outlength + ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) / + outlength + : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / + outlength; uint8_t *optr = output; int x, x1, x2, sum, k, int_pel, sub_pel; int64_t y; @@ -253,8 +176,8 @@ static void interpolate(const uint8_t *const input, int inlength, x1 = x; x = outlength - 1; y = delta * x + offset; - while ((y >> INTERP_PRECISION_BITS) + - (int64_t)(INTERP_TAPS / 2) >= inlength) { + while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >= + inlength) { x--; y -= delta; } @@ -268,8 +191,8 @@ static void interpolate(const uint8_t *const input, int inlength, sum = 0; for (k = 0; k < INTERP_TAPS; ++k) { const int pk = int_pel - INTERP_TAPS / 2 + 1 + k; - sum += filter[k] * input[(pk < 0 ? 0 : - (pk >= inlength ? inlength - 1 : pk))]; + sum += filter[k] * + input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))]; } *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); } @@ -282,9 +205,9 @@ static void interpolate(const uint8_t *const input, int inlength, filter = interp_filters[sub_pel]; sum = 0; for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ? - 0 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 + ? 0 + : int_pel - INTERP_TAPS / 2 + 1 + k)]; *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); } // Middle part. @@ -306,9 +229,9 @@ static void interpolate(const uint8_t *const input, int inlength, filter = interp_filters[sub_pel]; sum = 0; for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= - inlength ? inlength - 1 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength + ? inlength - 1 + : int_pel - INTERP_TAPS / 2 + 1 + k)]; *optr++ = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); } } @@ -332,7 +255,7 @@ static void down2_symeven(const uint8_t *const input, int length, for (j = 0; j < filter_len_half; ++j) { sum += (input[(i - j < 0 ? 0 : i - j)] + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel(sum); @@ -362,7 +285,7 @@ static void down2_symeven(const uint8_t *const input, int length, for (j = 0; j < filter_len_half; ++j) { sum += (input[i - j] + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel(sum); @@ -388,7 +311,7 @@ static void down2_symodd(const uint8_t *const input, int length, for (j = 1; j < filter_len_half; ++j) { sum += (input[(i - j < 0 ? 0 : i - j)] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel(sum); @@ -417,7 +340,7 @@ static void down2_symodd(const uint8_t *const input, int length, int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; for (j = 1; j < filter_len_half; ++j) { sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel(sum); @@ -427,8 +350,7 @@ static void down2_symodd(const uint8_t *const input, int length, static int get_down2_length(int length, int steps) { int s; - for (s = 0; s < steps; ++s) - length = (length + 1) >> 1; + for (s = 0; s < steps; ++s) length = (length + 1) >> 1; return length; } @@ -442,11 +364,8 @@ static int get_down2_steps(int in_length, int out_length) { return steps; } -static void resize_multistep(const uint8_t *const input, - int length, - uint8_t *output, - int olength, - uint8_t *buf) { +static void resize_multistep(const uint8_t *const input, int length, + uint8_t *output, int olength, uint8_t *otmp) { int steps; if (length == olength) { memcpy(output, input, sizeof(output[0]) * length); @@ -457,15 +376,10 @@ static void resize_multistep(const uint8_t *const input, if (steps > 0) { int s; uint8_t *out = NULL; - uint8_t *tmpbuf = NULL; - uint8_t *otmp, *otmp2; + uint8_t *otmp2; int filteredlength = length; - if (!tmpbuf) { - tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * length); - otmp = tmpbuf; - } else { - otmp = buf; - } + + assert(otmp != NULL); otmp2 = otmp + get_down2_length(length, 1); for (s = 0; s < steps; ++s) { const int proj_filteredlength = get_down2_length(filteredlength, 1); @@ -483,8 +397,6 @@ static void resize_multistep(const uint8_t *const input, if (filteredlength != olength) { interpolate(out, filteredlength, output, olength); } - if (tmpbuf) - free(tmpbuf); } else { interpolate(input, length, output, olength); } @@ -508,34 +420,35 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) { } } -void vp9_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, +void vp9_resize_plane(const uint8_t *const input, int height, int width, + int in_stride, uint8_t *output, int height2, int width2, int out_stride) { int i; uint8_t *intbuf = (uint8_t *)malloc(sizeof(uint8_t) * width2 * height); - uint8_t *tmpbuf = (uint8_t *)malloc(sizeof(uint8_t) * - (width < height ? height : width)); - uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * (height + height2)); + uint8_t *tmpbuf = + (uint8_t *)malloc(sizeof(uint8_t) * (width < height ? height : width)); + uint8_t *arrbuf = (uint8_t *)malloc(sizeof(uint8_t) * height); + uint8_t *arrbuf2 = (uint8_t *)malloc(sizeof(uint8_t) * height2); + if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL) + goto Error; assert(width > 0); assert(height > 0); assert(width2 > 0); assert(height2 > 0); for (i = 0; i < height; ++i) - resize_multistep(input + in_stride * i, width, - intbuf + width2 * i, width2, tmpbuf); + resize_multistep(input + in_stride * i, width, intbuf + width2 * i, width2, + tmpbuf); for (i = 0; i < width2; ++i) { fill_col_to_arr(intbuf + i, width2, height, arrbuf); - resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf); - fill_arr_to_col(output + i, out_stride, height2, arrbuf + height); + resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf); + fill_arr_to_col(output + i, out_stride, height2, arrbuf2); } + +Error: free(intbuf); free(tmpbuf); free(arrbuf); + free(arrbuf2); } #if CONFIG_VP9_HIGHBITDEPTH @@ -543,9 +456,12 @@ static void highbd_interpolate(const uint16_t *const input, int inlength, uint16_t *output, int outlength, int bd) { const int64_t delta = (((uint64_t)inlength << 32) + outlength / 2) / outlength; - const int64_t offset = inlength > outlength ? - (((int64_t)(inlength - outlength) << 31) + outlength / 2) / outlength : - -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / outlength; + const int64_t offset = + inlength > outlength + ? (((int64_t)(inlength - outlength) << 31) + outlength / 2) / + outlength + : -(((int64_t)(outlength - inlength) << 31) + outlength / 2) / + outlength; uint16_t *optr = output; int x, x1, x2, sum, k, int_pel, sub_pel; int64_t y; @@ -562,8 +478,8 @@ static void highbd_interpolate(const uint16_t *const input, int inlength, x1 = x; x = outlength - 1; y = delta * x + offset; - while ((y >> INTERP_PRECISION_BITS) + - (int64_t)(INTERP_TAPS / 2) >= inlength) { + while ((y >> INTERP_PRECISION_BITS) + (int64_t)(INTERP_TAPS / 2) >= + inlength) { x--; y -= delta; } @@ -578,7 +494,7 @@ static void highbd_interpolate(const uint16_t *const input, int inlength, for (k = 0; k < INTERP_TAPS; ++k) { const int pk = int_pel - INTERP_TAPS / 2 + 1 + k; sum += filter[k] * - input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))]; + input[(pk < 0 ? 0 : (pk >= inlength ? inlength - 1 : pk))]; } *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); } @@ -591,9 +507,9 @@ static void highbd_interpolate(const uint16_t *const input, int inlength, filter = interp_filters[sub_pel]; sum = 0; for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * - input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 ? - 0 : int_pel - INTERP_TAPS / 2 + 1 + k)]; + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k < 0 + ? 0 + : int_pel - INTERP_TAPS / 2 + 1 + k)]; *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); } // Middle part. @@ -615,9 +531,9 @@ static void highbd_interpolate(const uint16_t *const input, int inlength, filter = interp_filters[sub_pel]; sum = 0; for (k = 0; k < INTERP_TAPS; ++k) - sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= - inlength ? inlength - 1 : - int_pel - INTERP_TAPS / 2 + 1 + k)]; + sum += filter[k] * input[(int_pel - INTERP_TAPS / 2 + 1 + k >= inlength + ? inlength - 1 + : int_pel - INTERP_TAPS / 2 + 1 + k)]; *optr++ = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); } } @@ -641,7 +557,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length, for (j = 0; j < filter_len_half; ++j) { sum += (input[(i - j < 0 ? 0 : i - j)] + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel_highbd(sum, bd); @@ -671,7 +587,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length, for (j = 0; j < filter_len_half; ++j) { sum += (input[i - j] + input[(i + 1 + j >= length ? length - 1 : i + 1 + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel_highbd(sum, bd); @@ -680,7 +596,7 @@ static void highbd_down2_symeven(const uint16_t *const input, int length, } static void highbd_down2_symodd(const uint16_t *const input, int length, - uint16_t *output, int bd) { + uint16_t *output, int bd) { // Actual filter len = 2 * filter_len_half - 1. static const int16_t *filter = vp9_down2_symodd_half_filter; const int filter_len_half = sizeof(vp9_down2_symodd_half_filter) / 2; @@ -697,7 +613,7 @@ static void highbd_down2_symodd(const uint16_t *const input, int length, for (j = 1; j < filter_len_half; ++j) { sum += (input[(i - j < 0 ? 0 : i - j)] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel_highbd(sum, bd); @@ -726,7 +642,7 @@ static void highbd_down2_symodd(const uint16_t *const input, int length, int sum = (1 << (FILTER_BITS - 1)) + input[i] * filter[0]; for (j = 1; j < filter_len_half; ++j) { sum += (input[i - j] + input[(i + j >= length ? length - 1 : i + j)]) * - filter[j]; + filter[j]; } sum >>= FILTER_BITS; *optr++ = clip_pixel_highbd(sum, bd); @@ -734,12 +650,9 @@ static void highbd_down2_symodd(const uint16_t *const input, int length, } } -static void highbd_resize_multistep(const uint16_t *const input, - int length, - uint16_t *output, - int olength, - uint16_t *buf, - int bd) { +static void highbd_resize_multistep(const uint16_t *const input, int length, + uint16_t *output, int olength, + uint16_t *otmp, int bd) { int steps; if (length == olength) { memcpy(output, input, sizeof(output[0]) * length); @@ -750,15 +663,10 @@ static void highbd_resize_multistep(const uint16_t *const input, if (steps > 0) { int s; uint16_t *out = NULL; - uint16_t *tmpbuf = NULL; - uint16_t *otmp, *otmp2; + uint16_t *otmp2; int filteredlength = length; - if (!tmpbuf) { - tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) * length); - otmp = tmpbuf; - } else { - otmp = buf; - } + + assert(otmp != NULL); otmp2 = otmp + get_down2_length(length, 1); for (s = 0; s < steps; ++s) { const int proj_filteredlength = get_down2_length(filteredlength, 1); @@ -776,8 +684,6 @@ static void highbd_resize_multistep(const uint16_t *const input, if (filteredlength != olength) { highbd_interpolate(out, filteredlength, output, olength, bd); } - if (tmpbuf) - free(tmpbuf); } else { highbd_interpolate(input, length, output, olength, bd); } @@ -803,127 +709,112 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len, } } -void vp9_highbd_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride, - int bd) { +void vp9_highbd_resize_plane(const uint8_t *const input, int height, int width, + int in_stride, uint8_t *output, int height2, + int width2, int out_stride, int bd) { int i; uint16_t *intbuf = (uint16_t *)malloc(sizeof(uint16_t) * width2 * height); - uint16_t *tmpbuf = (uint16_t *)malloc(sizeof(uint16_t) * - (width < height ? height : width)); - uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * (height + height2)); + uint16_t *tmpbuf = + (uint16_t *)malloc(sizeof(uint16_t) * (width < height ? height : width)); + uint16_t *arrbuf = (uint16_t *)malloc(sizeof(uint16_t) * height); + uint16_t *arrbuf2 = (uint16_t *)malloc(sizeof(uint16_t) * height2); + if (intbuf == NULL || tmpbuf == NULL || arrbuf == NULL || arrbuf2 == NULL) + goto Error; for (i = 0; i < height; ++i) { highbd_resize_multistep(CONVERT_TO_SHORTPTR(input + in_stride * i), width, intbuf + width2 * i, width2, tmpbuf, bd); } for (i = 0; i < width2; ++i) { highbd_fill_col_to_arr(intbuf + i, width2, height, arrbuf); - highbd_resize_multistep(arrbuf, height, arrbuf + height, height2, tmpbuf, - bd); + highbd_resize_multistep(arrbuf, height, arrbuf2, height2, tmpbuf, bd); highbd_fill_arr_to_col(CONVERT_TO_SHORTPTR(output + i), out_stride, height2, - arrbuf + height); + arrbuf2); } + +Error: free(intbuf); free(tmpbuf); free(arrbuf); + free(arrbuf2); } #endif // CONFIG_VP9_HIGHBITDEPTH -void vp9_resize_frame420(const uint8_t *const y, - int y_stride, +void vp9_resize_frame420(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp9_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp9_resize_plane(u, height / 2, width / 2, uv_stride, - ou, oheight / 2, owidth / 2, ouv_stride); - vp9_resize_plane(v, height / 2, width / 2, uv_stride, - ov, oheight / 2, owidth / 2, ouv_stride); + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2, + owidth / 2, ouv_stride); + vp9_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2, + owidth / 2, ouv_stride); } void vp9_resize_frame422(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp9_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp9_resize_plane(u, height, width / 2, uv_stride, - ou, oheight, owidth / 2, ouv_stride); - vp9_resize_plane(v, height, width / 2, uv_stride, - ov, oheight, owidth / 2, ouv_stride); + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2, + ouv_stride); + vp9_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2, + ouv_stride); } void vp9_resize_frame444(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth) { - vp9_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride); - vp9_resize_plane(u, height, width, uv_stride, - ou, oheight, owidth, ouv_stride); - vp9_resize_plane(v, height, width, uv_stride, - ov, oheight, owidth, ouv_stride); + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth) { + vp9_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride); + vp9_resize_plane(u, height, width, uv_stride, ou, oheight, owidth, + ouv_stride); + vp9_resize_plane(v, height, width, uv_stride, ov, oheight, owidth, + ouv_stride); } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_resize_frame420(const uint8_t *const y, - int y_stride, +void vp9_highbd_resize_frame420(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp9_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp9_highbd_resize_plane(u, height / 2, width / 2, uv_stride, - ou, oheight / 2, owidth / 2, ouv_stride, bd); - vp9_highbd_resize_plane(v, height / 2, width / 2, uv_stride, - ov, oheight / 2, owidth / 2, ouv_stride, bd); + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd) { + vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth, + oy_stride, bd); + vp9_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2, + owidth / 2, ouv_stride, bd); + vp9_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2, + owidth / 2, ouv_stride, bd); } void vp9_highbd_resize_frame422(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp9_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp9_highbd_resize_plane(u, height, width / 2, uv_stride, - ou, oheight, owidth / 2, ouv_stride, bd); - vp9_highbd_resize_plane(v, height, width / 2, uv_stride, - ov, oheight, owidth / 2, ouv_stride, bd); + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd) { + vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth, + oy_stride, bd); + vp9_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight, + owidth / 2, ouv_stride, bd); + vp9_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight, + owidth / 2, ouv_stride, bd); } void vp9_highbd_resize_frame444(const uint8_t *const y, int y_stride, const uint8_t *const u, const uint8_t *const v, - int uv_stride, - int height, int width, - uint8_t *oy, int oy_stride, - uint8_t *ou, uint8_t *ov, int ouv_stride, - int oheight, int owidth, int bd) { - vp9_highbd_resize_plane(y, height, width, y_stride, - oy, oheight, owidth, oy_stride, bd); - vp9_highbd_resize_plane(u, height, width, uv_stride, - ou, oheight, owidth, ouv_stride, bd); - vp9_highbd_resize_plane(v, height, width, uv_stride, - ov, oheight, owidth, ouv_stride, bd); + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd) { + vp9_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth, + oy_stride, bd); + vp9_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth, + ouv_stride, bd); + vp9_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth, + ouv_stride, bd); } #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vp9/encoder/vp9_resize.h b/libs/libvpx/vp9/encoder/vp9_resize.h index b5feb38606..d3282ee191 100644 --- a/libs/libvpx/vp9/encoder/vp9_resize.h +++ b/libs/libvpx/vp9/encoder/vp9_resize.h @@ -18,116 +18,51 @@ extern "C" { #endif -void vp9_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, +void vp9_resize_plane(const uint8_t *const input, int height, int width, + int in_stride, uint8_t *output, int height2, int width2, int out_stride); -void vp9_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); -void vp9_resize_frame422(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); -void vp9_resize_frame444(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth); +void vp9_resize_frame420(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth); +void vp9_resize_frame422(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth); +void vp9_resize_frame444(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, uint8_t *oy, + int oy_stride, uint8_t *ou, uint8_t *ov, + int ouv_stride, int oheight, int owidth); #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_resize_plane(const uint8_t *const input, - int height, - int width, - int in_stride, - uint8_t *output, - int height2, - int width2, - int out_stride, - int bd); -void vp9_highbd_resize_frame420(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -void vp9_highbd_resize_frame422(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -void vp9_highbd_resize_frame444(const uint8_t *const y, - int y_stride, - const uint8_t *const u, - const uint8_t *const v, - int uv_stride, - int height, - int width, - uint8_t *oy, - int oy_stride, - uint8_t *ou, - uint8_t *ov, - int ouv_stride, - int oheight, - int owidth, - int bd); -#endif // CONFIG_VP9_HIGHBITDEPTH +void vp9_highbd_resize_plane(const uint8_t *const input, int height, int width, + int in_stride, uint8_t *output, int height2, + int width2, int out_stride, int bd); +void vp9_highbd_resize_frame420(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd); +void vp9_highbd_resize_frame422(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd); +void vp9_highbd_resize_frame444(const uint8_t *const y, int y_stride, + const uint8_t *const u, const uint8_t *const v, + int uv_stride, int height, int width, + uint8_t *oy, int oy_stride, uint8_t *ou, + uint8_t *ov, int ouv_stride, int oheight, + int owidth, int bd); +#endif // CONFIG_VP9_HIGHBITDEPTH #ifdef __cplusplus } // extern "C" #endif -#endif // VP9_ENCODER_VP9_RESIZE_H_ +#endif // VP9_ENCODER_VP9_RESIZE_H_ diff --git a/libs/libvpx/vp9/encoder/vp9_segmentation.c b/libs/libvpx/vp9/encoder/vp9_segmentation.c index 5a0a23d489..4a5a68e07a 100644 --- a/libs/libvpx/vp9/encoder/vp9_segmentation.c +++ b/libs/libvpx/vp9/encoder/vp9_segmentation.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include #include "vpx_mem/vpx_mem.h" @@ -31,8 +30,7 @@ void vp9_disable_segmentation(struct segmentation *seg) { seg->update_data = 0; } -void vp9_set_segment_data(struct segmentation *seg, - signed char *feature_data, +void vp9_set_segment_data(struct segmentation *seg, signed char *feature_data, unsigned char abs_delta) { seg->abs_delta = abs_delta; @@ -75,13 +73,11 @@ static int cost_segmap(int *segcounts, vpx_prob *probs) { const int c4567 = c45 + c67; // Cost the top node of the tree - int cost = c0123 * vp9_cost_zero(probs[0]) + - c4567 * vp9_cost_one(probs[0]); + int cost = c0123 * vp9_cost_zero(probs[0]) + c4567 * vp9_cost_one(probs[0]); // Cost subsequent levels if (c0123 > 0) { - cost += c01 * vp9_cost_zero(probs[1]) + - c23 * vp9_cost_one(probs[1]); + cost += c01 * vp9_cost_zero(probs[1]) + c23 * vp9_cost_one(probs[1]); if (c01 > 0) cost += segcounts[0] * vp9_cost_zero(probs[3]) + @@ -92,8 +88,7 @@ static int cost_segmap(int *segcounts, vpx_prob *probs) { } if (c4567 > 0) { - cost += c45 * vp9_cost_zero(probs[2]) + - c67 * vp9_cost_one(probs[2]); + cost += c45 * vp9_cost_zero(probs[2]) + c67 * vp9_cost_one(probs[2]); if (c45 > 0) cost += segcounts[4] * vp9_cost_zero(probs[5]) + @@ -110,12 +105,11 @@ static void count_segs(const VP9_COMMON *cm, MACROBLOCKD *xd, const TileInfo *tile, MODE_INFO **mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], - int *t_unpred_seg_counts, - int bw, int bh, int mi_row, int mi_col) { + int *t_unpred_seg_counts, int bw, int bh, int mi_row, + int mi_col) { int segment_id; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; xd->mi = mi; segment_id = xd->mi[0]->segment_id; @@ -129,8 +123,8 @@ static void count_segs(const VP9_COMMON *cm, MACROBLOCKD *xd, if (cm->frame_type != KEY_FRAME) { const BLOCK_SIZE bsize = xd->mi[0]->sb_type; // Test to see if the segment id matches the predicted value. - const int pred_segment_id = get_segment_id(cm, cm->last_frame_seg_map, - bsize, mi_row, mi_col); + const int pred_segment_id = + get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col); const int pred_flag = pred_segment_id == segment_id; const int pred_context = vp9_get_pred_context_seg_id(xd); @@ -140,8 +134,7 @@ static void count_segs(const VP9_COMMON *cm, MACROBLOCKD *xd, temporal_predictor_count[pred_context][pred_flag]++; // Update the "unpredicted" segment count - if (!pred_flag) - t_unpred_seg_counts[segment_id]++; + if (!pred_flag) t_unpred_seg_counts[segment_id]++; } } @@ -149,15 +142,13 @@ static void count_segs_sb(const VP9_COMMON *cm, MACROBLOCKD *xd, const TileInfo *tile, MODE_INFO **mi, int *no_pred_segcounts, int (*temporal_predictor_count)[2], - int *t_unpred_seg_counts, - int mi_row, int mi_col, + int *t_unpred_seg_counts, int mi_row, int mi_col, BLOCK_SIZE bsize) { const int mis = cm->mi_stride; int bw, bh; const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2; - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) - return; + if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; bw = num_8x8_blocks_wide_lookup[mi[0]->sb_type]; bh = num_8x8_blocks_high_lookup[mi[0]->sb_type]; @@ -174,9 +165,9 @@ static void count_segs_sb(const VP9_COMMON *cm, MACROBLOCKD *xd, } else if (bw < bs && bh == bs) { count_segs(cm, xd, tile, mi, no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, mi_col); - count_segs(cm, xd, tile, mi + hbs, - no_pred_segcounts, temporal_predictor_count, t_unpred_seg_counts, - hbs, bs, mi_row, mi_col + hbs); + count_segs(cm, xd, tile, mi + hbs, no_pred_segcounts, + temporal_predictor_count, t_unpred_seg_counts, hbs, bs, mi_row, + mi_col + hbs); } else { const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize]; int n; @@ -187,9 +178,8 @@ static void count_segs_sb(const VP9_COMMON *cm, MACROBLOCKD *xd, const int mi_dc = hbs * (n & 1); const int mi_dr = hbs * (n >> 1); - count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], - no_pred_segcounts, temporal_predictor_count, - t_unpred_seg_counts, + count_segs_sb(cm, xd, tile, &mi[mi_dr * mis + mi_dc], no_pred_segcounts, + temporal_predictor_count, t_unpred_seg_counts, mi_row + mi_dr, mi_col + mi_dc, subsize); } } @@ -230,8 +220,8 @@ void vp9_choose_segmap_coding_method(VP9_COMMON *cm, MACROBLOCKD *xd) { for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end; mi_col += 8, mi += 8) count_segs_sb(cm, xd, &tile, mi, no_pred_segcounts, - temporal_predictor_count, t_unpred_seg_counts, - mi_row, mi_col, BLOCK_64X64); + temporal_predictor_count, t_unpred_seg_counts, mi_row, + mi_col, BLOCK_64X64); } } diff --git a/libs/libvpx/vp9/encoder/vp9_segmentation.h b/libs/libvpx/vp9/encoder/vp9_segmentation.h index 8c6944ad13..562805543b 100644 --- a/libs/libvpx/vp9/encoder/vp9_segmentation.h +++ b/libs/libvpx/vp9/encoder/vp9_segmentation.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_SEGMENTATION_H_ #define VP9_ENCODER_VP9_SEGMENTATION_H_ @@ -22,11 +21,9 @@ extern "C" { void vp9_enable_segmentation(struct segmentation *seg); void vp9_disable_segmentation(struct segmentation *seg); -void vp9_disable_segfeature(struct segmentation *seg, - int segment_id, +void vp9_disable_segfeature(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id); -void vp9_clear_segdata(struct segmentation *seg, - int segment_id, +void vp9_clear_segdata(struct segmentation *seg, int segment_id, SEG_LVL_FEATURES feature_id); // The values given for each segment can be either deltas (from the default diff --git a/libs/libvpx/vp9/encoder/vp9_skin_detection.c b/libs/libvpx/vp9/encoder/vp9_skin_detection.c index 8e117eb084..3f3d48fb9f 100644 --- a/libs/libvpx/vp9/encoder/vp9_skin_detection.c +++ b/libs/libvpx/vp9/encoder/vp9_skin_detection.c @@ -15,14 +15,17 @@ #include "vp9/encoder/vp9_encoder.h" #include "vp9/encoder/vp9_skin_detection.h" -#define MODEL_MODE 0 +#define MODEL_MODE 1 // Fixed-point skin color model parameters. -static const int skin_mean[5][2] = { - {7463, 9614}, {6400, 10240}, {7040, 10240}, {8320, 9280}, {6800, 9614}}; -static const int skin_inv_cov[4] = {4107, 1663, 1663, 2157}; // q16 -static const int skin_threshold[6] = {1570636, 1400000, 800000, 800000, 800000, - 800000}; // q18 +static const int skin_mean[5][2] = { { 7463, 9614 }, + { 6400, 10240 }, + { 7040, 10240 }, + { 8320, 9280 }, + { 6800, 9614 } }; +static const int skin_inv_cov[4] = { 4107, 1663, 1663, 2157 }; // q16 +static const int skin_threshold[6] = { 1570636, 1400000, 800000, + 800000, 800000, 800000 }; // q18 // Thresholds on luminance. static const int y_low = 40; @@ -41,14 +44,14 @@ static int evaluate_skin_color_difference(int cb, int cr, int idx) { const int cb_diff_q2 = (cb_diff_q12 + (1 << 9)) >> 10; const int cbcr_diff_q2 = (cbcr_diff_q12 + (1 << 9)) >> 10; const int cr_diff_q2 = (cr_diff_q12 + (1 << 9)) >> 10; - const int skin_diff = skin_inv_cov[0] * cb_diff_q2 + - skin_inv_cov[1] * cbcr_diff_q2 + - skin_inv_cov[2] * cbcr_diff_q2 + - skin_inv_cov[3] * cr_diff_q2; + const int skin_diff = + skin_inv_cov[0] * cb_diff_q2 + skin_inv_cov[1] * cbcr_diff_q2 + + skin_inv_cov[2] * cbcr_diff_q2 + skin_inv_cov[3] * cr_diff_q2; return skin_diff; } -int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) { +int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr, + int motion) { if (y < y_low || y > y_high) { return 0; } else { @@ -57,21 +60,22 @@ int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) { } else { int i = 0; // Exit on grey. - if (cb == 128 && cr == 128) - return 0; + if (cb == 128 && cr == 128) return 0; // Exit on very strong cb. - if (cb > 150 && cr < 110) - return 0; - // Exit on (another) low luminance threshold if either color is high. - if (y < 50 && (cb > 140 || cr > 140)) - return 0; + if (cb > 150 && cr < 110) return 0; for (; i < 5; i++) { - if (evaluate_skin_color_difference(cb, cr, i) < skin_threshold[i + 1]) { - return 1; + int skin_color_diff = evaluate_skin_color_difference(cb, cr, i); + if (skin_color_diff < skin_threshold[i + 1]) { + if (y < 60 && skin_color_diff > 3 * (skin_threshold[i + 1] >> 2)) + return 0; + else if (motion == 0 && + skin_color_diff > (skin_threshold[i + 1] >> 1)) + return 0; + else + return 1; } // Exit if difference is much large than the threshold. - if (evaluate_skin_color_difference(cb, cr, i) > - (skin_threshold[i + 1] << 3)) { + if (skin_color_diff > (skin_threshold[i + 1] << 3)) { return 0; } } @@ -81,19 +85,26 @@ int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) { } int vp9_compute_skin_block(const uint8_t *y, const uint8_t *u, const uint8_t *v, - int stride, int strideuv, int bsize) { - // Take center pixel in block to determine is_skin. - const int y_width_shift = (4 << b_width_log2_lookup[bsize]) >> 1; - const int y_height_shift = (4 << b_height_log2_lookup[bsize]) >> 1; - const int uv_width_shift = y_width_shift >> 1; - const int uv_height_shift = y_height_shift >> 1; - const uint8_t ysource = y[y_height_shift * stride + y_width_shift]; - const uint8_t usource = u[uv_height_shift * strideuv + uv_width_shift]; - const uint8_t vsource = v[uv_height_shift * strideuv + uv_width_shift]; - return vp9_skin_pixel(ysource, usource, vsource); + int stride, int strideuv, int bsize, + int consec_zeromv, int curr_motion_magn) { + // No skin if block has been zero/small motion for long consecutive time. + if (consec_zeromv > 60 && curr_motion_magn == 0) { + return 0; + } else { + int motion = 1; + // Take center pixel in block to determine is_skin. + const int y_width_shift = (4 << b_width_log2_lookup[bsize]) >> 1; + const int y_height_shift = (4 << b_height_log2_lookup[bsize]) >> 1; + const int uv_width_shift = y_width_shift >> 1; + const int uv_height_shift = y_height_shift >> 1; + const uint8_t ysource = y[y_height_shift * stride + y_width_shift]; + const uint8_t usource = u[uv_height_shift * strideuv + uv_width_shift]; + const uint8_t vsource = v[uv_height_shift * strideuv + uv_width_shift]; + if (consec_zeromv > 25 && curr_motion_magn == 0) motion = 0; + return vp9_skin_pixel(ysource, usource, vsource, motion); + } } - #ifdef OUTPUT_YUV_SKINMAP // For viewing skin map on input source. void vp9_compute_skin_map(VP9_COMP *const cpi, FILE *yuv_skinmap_file) { @@ -113,14 +124,14 @@ void vp9_compute_skin_map(VP9_COMP *const cpi, FILE *yuv_skinmap_file) { int shuv = shy - 1; int fac = y_bsize / 8; // Use center pixel or average of center 2x2 pixels. - int mode_filter = 1; + int mode_filter = 0; YV12_BUFFER_CONFIG skinmap; memset(&skinmap, 0, sizeof(YV12_BUFFER_CONFIG)); - if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, - VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment)) { - vpx_free_frame_buffer(&skinmap); - return; + if (vpx_alloc_frame_buffer(&skinmap, cm->width, cm->height, cm->subsampling_x, + cm->subsampling_y, VP9_ENC_BORDER_IN_PIXELS, + cm->byte_alignment)) { + vpx_free_frame_buffer(&skinmap); + return; } memset(skinmap.buffer_alloc, 128, skinmap.frame_size); y = skinmap.y_buffer; @@ -130,27 +141,45 @@ void vp9_compute_skin_map(VP9_COMP *const cpi, FILE *yuv_skinmap_file) { for (mi_row = 0; mi_row < cm->mi_rows - 1; mi_row += fac) { num_bl = 0; for (mi_col = 0; mi_col < cm->mi_cols - 1; mi_col += fac) { - // Select pixel for each block for skin detection. - // Use center pixel, or 2x2 average at center. - uint8_t ysource = src_y[ypos * src_ystride + ypos]; - uint8_t usource = src_u[uvpos * src_uvstride + uvpos]; - uint8_t vsource = src_v[uvpos * src_uvstride + uvpos]; - uint8_t ysource2 = src_y[(ypos + 1) * src_ystride + ypos]; - uint8_t usource2 = src_u[(uvpos + 1) * src_uvstride + uvpos]; - uint8_t vsource2 = src_v[(uvpos + 1) * src_uvstride + uvpos]; - uint8_t ysource3 = src_y[ypos * src_ystride + (ypos + 1)]; - uint8_t usource3 = src_u[uvpos * src_uvstride + (uvpos + 1)]; - uint8_t vsource3 = src_v[uvpos * src_uvstride + (uvpos + 1)]; - uint8_t ysource4 = src_y[(ypos + 1) * src_ystride + (ypos + 1)]; - uint8_t usource4 = src_u[(uvpos + 1) * src_uvstride + (uvpos + 1)]; - uint8_t vsource4 = src_v[(uvpos + 1) * src_uvstride + (uvpos + 1)]; int is_skin = 0; if (mode_filter == 1) { + // Use 2x2 average at center. + uint8_t ysource = src_y[ypos * src_ystride + ypos]; + uint8_t usource = src_u[uvpos * src_uvstride + uvpos]; + uint8_t vsource = src_v[uvpos * src_uvstride + uvpos]; + uint8_t ysource2 = src_y[(ypos + 1) * src_ystride + ypos]; + uint8_t usource2 = src_u[(uvpos + 1) * src_uvstride + uvpos]; + uint8_t vsource2 = src_v[(uvpos + 1) * src_uvstride + uvpos]; + uint8_t ysource3 = src_y[ypos * src_ystride + (ypos + 1)]; + uint8_t usource3 = src_u[uvpos * src_uvstride + (uvpos + 1)]; + uint8_t vsource3 = src_v[uvpos * src_uvstride + (uvpos + 1)]; + uint8_t ysource4 = src_y[(ypos + 1) * src_ystride + (ypos + 1)]; + uint8_t usource4 = src_u[(uvpos + 1) * src_uvstride + (uvpos + 1)]; + uint8_t vsource4 = src_v[(uvpos + 1) * src_uvstride + (uvpos + 1)]; ysource = (ysource + ysource2 + ysource3 + ysource4) >> 2; usource = (usource + usource2 + usource3 + usource4) >> 2; vsource = (vsource + vsource2 + vsource3 + vsource4) >> 2; + is_skin = vp9_skin_pixel(ysource, usource, vsource, 1); + } else { + int block_size = BLOCK_8X8; + int consec_zeromv = 0; + int bl_index = mi_row * cm->mi_cols + mi_col; + int bl_index1 = bl_index + 1; + int bl_index2 = bl_index + cm->mi_cols; + int bl_index3 = bl_index2 + 1; + if (y_bsize == 8) + consec_zeromv = cpi->consec_zero_mv[bl_index]; + else + consec_zeromv = + VPXMIN(cpi->consec_zero_mv[bl_index], + VPXMIN(cpi->consec_zero_mv[bl_index1], + VPXMIN(cpi->consec_zero_mv[bl_index2], + cpi->consec_zero_mv[bl_index3]))); + if (y_bsize == 16) block_size = BLOCK_16X16; + is_skin = + vp9_compute_skin_block(src_y, src_u, src_v, src_ystride, + src_uvstride, block_size, consec_zeromv, 0); } - is_skin = vp9_skin_pixel(ysource, usource, vsource); for (i = 0; i < y_bsize; i++) { for (j = 0; j < y_bsize; j++) { if (is_skin) diff --git a/libs/libvpx/vp9/encoder/vp9_skin_detection.h b/libs/libvpx/vp9/encoder/vp9_skin_detection.h index 73f7c39d9a..c77382dbd7 100644 --- a/libs/libvpx/vp9/encoder/vp9_skin_detection.h +++ b/libs/libvpx/vp9/encoder/vp9_skin_detection.h @@ -21,10 +21,12 @@ struct VP9_COMP; // #define OUTPUT_YUV_SKINMAP -int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr); +int vp9_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr, + int motion); int vp9_compute_skin_block(const uint8_t *y, const uint8_t *u, const uint8_t *v, - int stride, int strideuv, int bsize); + int stride, int strideuv, int bsize, + int consec_zeromv, int curr_motion_magn); #ifdef OUTPUT_YUV_SKINMAP // For viewing skin map on input source. diff --git a/libs/libvpx/vp9/encoder/vp9_speed_features.c b/libs/libvpx/vp9/encoder/vp9_speed_features.c index f6845078a6..c1cf48fa7b 100644 --- a/libs/libvpx/vp9/encoder/vp9_speed_features.c +++ b/libs/libvpx/vp9/encoder/vp9_speed_features.c @@ -16,21 +16,23 @@ #include "vpx_dsp/vpx_dsp_common.h" // Mesh search patters for various speed settings -static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = - {{64, 4}, {28, 2}, {15, 1}, {7, 1}}; +static MESH_PATTERN best_quality_mesh_pattern[MAX_MESH_STEP] = { + { 64, 4 }, { 28, 2 }, { 15, 1 }, { 7, 1 } +}; #define MAX_MESH_SPEED 5 // Max speed setting for mesh motion method -static MESH_PATTERN good_quality_mesh_patterns[MAX_MESH_SPEED + 1] - [MAX_MESH_STEP] = - {{{64, 8}, {28, 4}, {15, 1}, {7, 1}}, - {{64, 8}, {28, 4}, {15, 1}, {7, 1}}, - {{64, 8}, {14, 2}, {7, 1}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, - {{64, 16}, {24, 8}, {12, 4}, {7, 1}}, +static MESH_PATTERN + good_quality_mesh_patterns[MAX_MESH_SPEED + 1][MAX_MESH_STEP] = { + { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } }, + { { 64, 8 }, { 28, 4 }, { 15, 1 }, { 7, 1 } }, + { { 64, 8 }, { 14, 2 }, { 7, 1 }, { 7, 1 } }, + { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } }, + { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } }, + { { 64, 16 }, { 24, 8 }, { 12, 4 }, { 7, 1 } }, }; -static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = - {50, 25, 15, 5, 1, 1}; +static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = { + 50, 25, 15, 5, 1, 1 +}; // Intra only frames, golden frames (except alt ref overlays) and // alt ref frames tend to be coded at a higher than ambient quality @@ -67,8 +69,8 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, if (speed >= 1) { if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; + sf->disable_split_mask = + cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->partition_search_breakout_dist_thr = (1 << 23); } else { sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; @@ -78,8 +80,8 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, if (speed >= 2) { if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; + sf->disable_split_mask = + cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->adaptive_pred_interp_filter = 0; sf->partition_search_breakout_dist_thr = (1 << 24); sf->partition_search_breakout_rate_thr = 120; @@ -89,6 +91,17 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, sf->partition_search_breakout_rate_thr = 100; } sf->rd_auto_partition_min_limit = set_partition_min_limit(cm); + + // Use a set of speed features for 4k videos. + if (VPXMIN(cm->width, cm->height) >= 2160) { + sf->use_square_partition_only = 1; + sf->intra_y_mode_mask[TX_32X32] = INTRA_DC; + sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC; + sf->alt_ref_search_fp = 1; + sf->cb_pred_filter_search = 1; + sf->adaptive_interp_filter_search = 1; + sf->disable_split_mask = DISABLE_ALL_SPLIT; + } } if (speed >= 3) { @@ -125,6 +138,9 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, } } +static double tx_dom_thresholds[6] = { 99.0, 14.0, 12.0, 8.0, 4.0, 0.0 }; +static double qopt_thresholds[6] = { 99.0, 12.0, 10.0, 4.0, 2.0, 0.0 }; + static void set_good_speed_feature(VP9_COMP *cpi, VP9_COMMON *cm, SPEED_FEATURES *sf, int speed) { const int boosted = frame_is_boosted(cpi); @@ -139,15 +155,25 @@ static void set_good_speed_feature(VP9_COMP *cpi, VP9_COMMON *cm, sf->use_square_only_threshold = BLOCK_16X16; if (speed >= 1) { - if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) || - vp9_internal_image_edge(cpi)) { - sf->use_square_partition_only = !frame_is_boosted(cpi); + if (cpi->oxcf.pass == 2) { + TWO_PASS *const twopass = &cpi->twopass; + if ((twopass->fr_content_type == FC_GRAPHICS_ANIMATION) || + vp9_internal_image_edge(cpi)) { + sf->use_square_partition_only = !frame_is_boosted(cpi); + } else { + sf->use_square_partition_only = !frame_is_intra_only(cm); + } } else { sf->use_square_partition_only = !frame_is_intra_only(cm); } - sf->use_square_only_threshold = BLOCK_4X4; - sf->less_rectangular_check = 1; + sf->allow_txfm_domain_distortion = 1; + sf->tx_domain_thresh = tx_dom_thresholds[(speed < 6) ? speed : 5]; + sf->allow_quant_coeff_opt = sf->optimize_coefficients; + sf->quant_opt_thresh = qopt_thresholds[(speed < 6) ? speed : 5]; + + sf->use_square_only_threshold = BLOCK_4X4; + sf->less_rectangular_check = 1; sf->use_rd_breakout = 1; sf->adaptive_motion_search = 1; @@ -157,35 +183,40 @@ static void set_good_speed_feature(VP9_COMP *cpi, VP9_COMMON *cm, sf->mode_skip_start = 10; sf->adaptive_pred_interp_filter = 1; - sf->recode_loop = ALLOW_RECODE_KFARFGF; sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V; sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V; sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V; sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V; + + sf->recode_tolerance_low = 15; + sf->recode_tolerance_high = 30; } if (speed >= 2) { - sf->tx_size_search_method = frame_is_boosted(cpi) ? USE_FULL_RD - : USE_LARGESTALL; + sf->recode_loop = ALLOW_RECODE_KFARFGF; + sf->tx_size_search_method = + frame_is_boosted(cpi) ? USE_FULL_RD : USE_LARGESTALL; // Reference masking is not supported in dynamic scaling mode. sf->reference_masking = cpi->oxcf.resize_mode != RESIZE_DYNAMIC ? 1 : 0; - sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 : - FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; + sf->mode_search_skip_flags = + (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_INTRA_LOWVAR; sf->disable_filter_search_var_thresh = 100; sf->comp_inter_joint_search_thresh = BLOCK_SIZES; sf->auto_min_max_partition_size = RELAXED_NEIGHBORING_MIN_MAX; sf->allow_partition_search_skip = 1; + sf->recode_tolerance_low = 15; + sf->recode_tolerance_high = 45; } if (speed >= 3) { sf->use_square_partition_only = !frame_is_intra_only(cm); - sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD - : USE_LARGESTALL; + sf->tx_size_search_method = + frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL; sf->mv.subpel_search_method = SUBPEL_TREE_PRUNED; sf->adaptive_pred_interp_filter = 0; sf->adaptive_mode_search = 1; @@ -232,13 +263,14 @@ static void set_good_speed_feature(VP9_COMP *cpi, VP9_COMMON *cm, } static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, - SPEED_FEATURES *sf, int speed) { + SPEED_FEATURES *sf, + int speed) { VP9_COMMON *const cm = &cpi->common; if (speed >= 1) { if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; + sf->disable_split_mask = + cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { sf->disable_split_mask = DISABLE_COMPOUND_SPLIT; } @@ -246,8 +278,8 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, if (speed >= 2) { if (VPXMIN(cm->width, cm->height) >= 720) { - sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT - : DISABLE_ALL_INTER_SPLIT; + sf->disable_split_mask = + cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { sf->disable_split_mask = LAST_AND_INTRA_SPLIT_ONLY; } @@ -262,13 +294,13 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 7) { - sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ? - 800 : 300; + sf->encode_breakout_thresh = + (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300; } } -static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, - int speed, vp9e_tune_content content) { +static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, int speed, + vp9e_tune_content content) { VP9_COMMON *const cm = &cpi->common; const int is_keyframe = cm->frame_type == KEY_FRAME; const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key; @@ -279,10 +311,14 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, sf->exhaustive_searches_thresh = INT_MAX; if (speed >= 1) { + sf->allow_txfm_domain_distortion = 1; + sf->tx_domain_thresh = 0.0; + sf->allow_quant_coeff_opt = 0; + sf->quant_opt_thresh = 0.0; sf->use_square_partition_only = !frame_is_intra_only(cm); sf->less_rectangular_check = 1; - sf->tx_size_search_method = frame_is_intra_only(cm) ? USE_FULL_RD - : USE_LARGESTALL; + sf->tx_size_search_method = + frame_is_intra_only(cm) ? USE_FULL_RD : USE_LARGESTALL; sf->use_rd_breakout = 1; @@ -296,11 +332,11 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, } if (speed >= 2) { - sf->mode_search_skip_flags = (cm->frame_type == KEY_FRAME) ? 0 : - FLAG_SKIP_INTRA_DIRMISMATCH | - FLAG_SKIP_INTRA_BESTINTER | - FLAG_SKIP_COMP_BESTINTRA | - FLAG_SKIP_INTRA_LOWVAR; + sf->mode_search_skip_flags = + (cm->frame_type == KEY_FRAME) ? 0 : FLAG_SKIP_INTRA_DIRMISMATCH | + FLAG_SKIP_INTRA_BESTINTER | + FLAG_SKIP_COMP_BESTINTRA | + FLAG_SKIP_INTRA_LOWVAR; sf->adaptive_pred_interp_filter = 2; // Reference masking only enabled for 1 spatial layer, and if none of the @@ -311,15 +347,14 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, (cpi->external_resize == 1 || cpi->oxcf.resize_mode == RESIZE_DYNAMIC)) { MV_REFERENCE_FRAME ref_frame; - static const int flag_list[4] = - {0, VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG}; + static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG, + VP9_ALT_FLAG }; for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame); if (yv12 != NULL && (cpi->ref_frame_flags & flag_list[ref_frame])) { const struct scale_factors *const scale_fac = &cm->frame_refs[ref_frame - 1].sf; - if (vp9_is_scaled(scale_fac)) - sf->reference_masking = 0; + if (vp9_is_scaled(scale_fac)) sf->reference_masking = 0; } } } @@ -356,8 +391,8 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, sf->use_fast_coef_costing = 0; sf->auto_min_max_partition_size = STRICT_NEIGHBORING_MIN_MAX; sf->adjust_partitioning_from_last_frame = - cm->last_frame_type != cm->frame_type || (0 == - (frames_since_key + 1) % sf->last_partitioning_redo_frequency); + cm->last_frame_type != cm->frame_type || + (0 == (frames_since_key + 1) % sf->last_partitioning_redo_frequency); sf->mv.subpel_force_stop = 1; for (i = 0; i < TX_SIZES; i++) { sf->intra_y_mode_mask[i] = INTRA_DC_H_V; @@ -377,11 +412,12 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, if (speed >= 5) { sf->use_quant_fp = !is_keyframe; - sf->auto_min_max_partition_size = is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX - : STRICT_NEIGHBORING_MIN_MAX; + sf->auto_min_max_partition_size = + is_keyframe ? RELAXED_NEIGHBORING_MIN_MAX : STRICT_NEIGHBORING_MIN_MAX; sf->default_max_partition_size = BLOCK_32X32; sf->default_min_partition_size = BLOCK_8X8; - sf->force_frame_boost = is_keyframe || + sf->force_frame_boost = + is_keyframe || (frames_since_key % (sf->last_partitioning_redo_frequency << 1) == 1); sf->max_delta_qindex = is_keyframe ? 20 : 15; sf->partition_search_type = REFERENCE_PARTITION; @@ -400,6 +436,7 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, sf->mode_search_skip_flags = FLAG_SKIP_INTRA_DIRMISMATCH; sf->tx_size_search_method = is_keyframe ? USE_LARGESTALL : USE_TX_8X8; sf->simple_model_rd_from_var = 1; + if (cpi->oxcf.rc_mode == VPX_VBR) sf->mv.search_method = NSTEP; if (!is_keyframe) { int i; @@ -418,6 +455,11 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, if (content == VP9E_CONTENT_SCREEN) { sf->short_circuit_flat_blocks = 1; } + if (cpi->oxcf.rc_mode == VPX_CBR && + cpi->oxcf.content != VP9E_CONTENT_SCREEN && !cpi->use_svc) { + sf->limit_newmv_early_exit = 1; + sf->bias_golden = 1; + } } if (speed >= 6) { @@ -427,6 +469,12 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, sf->mv.search_method = NSTEP; sf->mv.reduce_first_step_size = 1; sf->skip_encode_sb = 0; + if (!cpi->use_svc && cpi->oxcf.rc_mode == VPX_CBR && + content != VP9E_CONTENT_SCREEN) { + // Enable short circuit for low temporal variance. + sf->short_circuit_low_temp_var = 1; + } + if (cpi->use_svc) sf->base_mv_aggressive = 1; } if (speed >= 7) { @@ -439,10 +487,24 @@ static void set_rt_speed_feature(VP9_COMP *cpi, SPEED_FEATURES *sf, sf->mv.fullpel_search_step_param = 6; } } + if (speed >= 8) { sf->adaptive_rd_thresh = 4; - sf->mv.subpel_force_stop = 2; + sf->mv.subpel_force_stop = (content == VP9E_CONTENT_SCREEN) ? 3 : 2; sf->lpf_pick = LPF_PICK_MINIMAL_LPF; + // Only keep INTRA_DC mode for speed 8. + if (!is_keyframe) { + int i = 0; + for (i = 0; i < BLOCK_SIZES; ++i) + sf->intra_y_mode_bsize_mask[i] = INTRA_DC; + } + if (!cpi->use_svc && cpi->oxcf.rc_mode == VPX_CBR && + content != VP9E_CONTENT_SCREEN) { + // More aggressive short circuit for speed 8. + sf->short_circuit_low_temp_var = 2; + } + sf->limit_newmv_early_exit = 0; + sf->bias_golden = 0; } } @@ -485,7 +547,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { // best quality defaults sf->frame_parameter_update = 1; sf->mv.search_method = NSTEP; - sf->recode_loop = ALLOW_RECODE; + sf->recode_loop = ALLOW_RECODE_FIRST; sf->mv.subpel_search_method = SUBPEL_TREE; sf->mv.subpel_iters_per_step = 2; sf->mv.subpel_force_stop = 0; @@ -523,6 +585,10 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { sf->disable_filter_search_var_thresh = 0; sf->adaptive_interp_filter_search = 0; sf->allow_partition_search_skip = 0; + sf->allow_txfm_domain_distortion = 0; + sf->tx_domain_thresh = 99.0; + sf->allow_quant_coeff_opt = sf->optimize_coefficients; + sf->quant_opt_thresh = 99.0; for (i = 0; i < TX_SIZES; i++) { sf->intra_y_mode_mask[i] = INTRA_ALL; @@ -538,8 +604,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set sf->schedule_mode_search = 0; sf->use_nonrd_pick_mode = 0; - for (i = 0; i < BLOCK_SIZES; ++i) - sf->inter_mode_mask[i] = INTER_ALL; + for (i = 0; i < BLOCK_SIZES; ++i) sf->inter_mode_mask[i] = INTER_ALL; sf->max_intra_bsize = BLOCK_64X64; sf->reuse_inter_pred_sby = 0; // This setting only takes effect when partition_search_type is set @@ -548,10 +613,15 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { sf->search_type_check_frequency = 50; sf->encode_breakout_thresh = 0; // Recode loop tolerance %. - sf->recode_tolerance = 25; + sf->recode_tolerance_low = 12; + sf->recode_tolerance_high = 25; sf->default_interp_filter = SWITCHABLE; sf->simple_model_rd_from_var = 0; sf->short_circuit_flat_blocks = 0; + sf->short_circuit_low_temp_var = 0; + sf->limit_newmv_early_exit = 0; + sf->bias_golden = 0; + sf->base_mv_aggressive = 0; // Some speed-up features even for best quality as minimal impact on quality. sf->adaptive_rd_thresh = 1; @@ -589,8 +659,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { sf->exhaustive_searches_thresh = sf->exhaustive_searches_thresh << 1; for (i = 0; i < MAX_MESH_STEP; ++i) { - sf->mesh_patterns[i].range = - good_quality_mesh_patterns[speed][i].range; + sf->mesh_patterns[i].range = good_quality_mesh_patterns[speed][i].range; sf->mesh_patterns[i].interval = good_quality_mesh_patterns[speed][i].interval; } @@ -598,8 +667,7 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { // Slow quant, dct and trellis not worthwhile for first pass // so make sure they are always turned off. - if (oxcf->pass == 1) - sf->optimize_coefficients = 0; + if (oxcf->pass == 1) sf->optimize_coefficients = 0; // No recode for 1 pass. if (oxcf->pass == 0) { @@ -607,7 +675,10 @@ void vp9_set_speed_features_framesize_independent(VP9_COMP *cpi) { sf->optimize_coefficients = 0; } - if (sf->mv.subpel_search_method == SUBPEL_TREE) { + if (sf->mv.subpel_force_stop == 3) { + // Whole pel only + cpi->find_fractional_mv_step = vp9_skip_sub_pixel_tree; + } else if (sf->mv.subpel_search_method == SUBPEL_TREE) { cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_tree; } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) { cpi->find_fractional_mv_step = vp9_find_best_sub_pixel_tree_pruned; diff --git a/libs/libvpx/vp9/encoder/vp9_speed_features.h b/libs/libvpx/vp9/encoder/vp9_speed_features.h index fa2f79d31e..6d0b9420a1 100644 --- a/libs/libvpx/vp9/encoder/vp9_speed_features.h +++ b/libs/libvpx/vp9/encoder/vp9_speed_features.h @@ -18,17 +18,14 @@ extern "C" { #endif enum { - INTRA_ALL = (1 << DC_PRED) | - (1 << V_PRED) | (1 << H_PRED) | - (1 << D45_PRED) | (1 << D135_PRED) | - (1 << D117_PRED) | (1 << D153_PRED) | - (1 << D207_PRED) | (1 << D63_PRED) | - (1 << TM_PRED), - INTRA_DC = (1 << DC_PRED), - INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED), - INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED), - INTRA_DC_TM_H_V = (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | - (1 << H_PRED) + INTRA_ALL = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED) | (1 << D45_PRED) | + (1 << D135_PRED) | (1 << D117_PRED) | (1 << D153_PRED) | + (1 << D207_PRED) | (1 << D63_PRED) | (1 << TM_PRED), + INTRA_DC = (1 << DC_PRED), + INTRA_DC_TM = (1 << DC_PRED) | (1 << TM_PRED), + INTRA_DC_H_V = (1 << DC_PRED) | (1 << V_PRED) | (1 << H_PRED), + INTRA_DC_TM_H_V = + (1 << DC_PRED) | (1 << TM_PRED) | (1 << V_PRED) | (1 << H_PRED) }; enum { @@ -42,20 +39,15 @@ enum { }; enum { - DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | - (1 << THR_COMP_LA) | - (1 << THR_ALTR) | - (1 << THR_GOLD) | - (1 << THR_LAST), + DISABLE_ALL_INTER_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) | + (1 << THR_ALTR) | (1 << THR_GOLD) | (1 << THR_LAST), - DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT, + DISABLE_ALL_SPLIT = (1 << THR_INTRA) | DISABLE_ALL_INTER_SPLIT, - DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA), + DISABLE_COMPOUND_SPLIT = (1 << THR_COMP_GA) | (1 << THR_COMP_LA), - LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | - (1 << THR_COMP_LA) | - (1 << THR_ALTR) | - (1 << THR_GOLD) + LAST_AND_INTRA_SPLIT_ONLY = (1 << THR_COMP_GA) | (1 << THR_COMP_LA) | + (1 << THR_ALTR) | (1 << THR_GOLD) }; typedef enum { @@ -75,8 +67,10 @@ typedef enum { ALLOW_RECODE_KFMAXBW = 1, // Allow recode only for KF/ARF/GF frames. ALLOW_RECODE_KFARFGF = 2, + // Allow recode for ARF/GF/KF and first normal frame in each group. + ALLOW_RECODE_FIRST = 3, // Allow recode for all frames based on bitrate constraints. - ALLOW_RECODE = 3, + ALLOW_RECODE = 4, } RECODE_LOOP_TYPE; typedef enum { @@ -188,7 +182,11 @@ typedef struct MV_SPEED_FEATURES { // Maximum number of steps in logarithmic subpel search before giving up. int subpel_iters_per_step; - // Control when to stop subpel search + // Control when to stop subpel search: + // 0: Full subpel search. + // 1: Stop at quarter pixel. + // 2: Stop at half pixel. + // 3: Stop at full pixel. int subpel_force_stop; // This variable sets the step_param used in full pel motion search. @@ -242,6 +240,15 @@ typedef struct SPEED_FEATURES { // Coefficient probability model approximation step size int coeff_prob_appx_step; + // Enable uniform quantizer followed by trellis coefficient optimization + int allow_quant_coeff_opt; + double quant_opt_thresh; + + // Use transform domain distortion. Use pixel domain distortion in speed 0 + // and certain situations in higher speed to improve the RD model precision. + int allow_txfm_domain_distortion; + double tx_domain_thresh; + // The threshold is to determine how slow the motino is, it is used when // use_lastframe_partitioning is set to LAST_FRAME_PARTITION_LOW_MOTION MOTION_THRESHOLD lf_motion_threshold; @@ -396,7 +403,8 @@ typedef struct SPEED_FEATURES { // This feature controls the tolerence vs target used in deciding whether to // recode a frame. It has no meaning if recode is disabled. - int recode_tolerance; + int recode_tolerance_low; + int recode_tolerance_high; // This variable controls the maximum block size where intra blocks can be // used in inter frames. @@ -442,6 +450,25 @@ typedef struct SPEED_FEATURES { // Skip a number of expensive mode evaluations for blocks with zero source // variance. int short_circuit_flat_blocks; + + // Skip a number of expensive mode evaluations for blocks with very low + // temporal variance. + // 1: Skip golden non-zeromv and ALL INTRA for bsize >= 32x32. + // 2: Skip golden non-zeromv and newmv-last for bsize >= 16x16, skip ALL + // INTRA for bsize >= 32x32 and vert/horz INTRA for bsize 16x16, 16x32 and + // 32x16. + int short_circuit_low_temp_var; + + // Limits the rd-threshold update for early exit for the newmv-last mode, + // for non-rd mode. + int limit_newmv_early_exit; + + // Adds a bias against golden reference, for non-rd mode. + int bias_golden; + + // Bias to use base mv and skip 1/4 subpel search when use base mv in + // enhancement layer. + int base_mv_aggressive; } SPEED_FEATURES; struct VP9_COMP; diff --git a/libs/libvpx/vp9/encoder/vp9_subexp.c b/libs/libvpx/vp9/encoder/vp9_subexp.c index cbd3c49466..e8212ce05e 100644 --- a/libs/libvpx/vp9/encoder/vp9_subexp.c +++ b/libs/libvpx/vp9/encoder/vp9_subexp.c @@ -14,26 +14,23 @@ #include "vp9/encoder/vp9_cost.h" #include "vp9/encoder/vp9_subexp.h" -#define vp9_cost_upd256 ((int)(vp9_cost_one(upd) - vp9_cost_zero(upd))) - static const uint8_t update_bits[255] = { - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, - 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 0, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, }; +#define MIN_DELP_BITS 5 static int recenter_nonneg(int v, int m) { if (v > (m << 1)) @@ -49,23 +46,23 @@ static int remap_prob(int v, int m) { static const uint8_t map_table[MAX_PROB - 1] = { // generated by: // map_table[j] = split_index(j, MAX_PROB - 1, MODULUS_PARAM); - 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88, - 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102, - 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116, - 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130, - 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, - 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171, - 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213, - 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, - 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19, + 20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 2, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 4, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 7, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 8, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 13, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 14, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 17, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 18, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 19, }; v--; m--; @@ -116,21 +113,23 @@ void vp9_write_prob_diff_update(vpx_writer *w, vpx_prob newp, vpx_prob oldp) { encode_term_subexp(w, delp); } -int vp9_prob_diff_update_savings_search(const unsigned int *ct, - vpx_prob oldp, vpx_prob *bestp, - vpx_prob upd) { +int vp9_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp, + vpx_prob *bestp, vpx_prob upd) { const int old_b = cost_branch256(ct, oldp); int bestsavings = 0; vpx_prob newp, bestnewp = oldp; const int step = *bestp > oldp ? -1 : 1; + const int upd_cost = vp9_cost_one(upd) - vp9_cost_zero(upd); - for (newp = *bestp; newp != oldp; newp += step) { - const int new_b = cost_branch256(ct, newp); - const int update_b = prob_diff_update_cost(newp, oldp) + vp9_cost_upd256; - const int savings = old_b - new_b - update_b; - if (savings > bestsavings) { - bestsavings = savings; - bestnewp = newp; + if (old_b > upd_cost + (MIN_DELP_BITS << VP9_PROB_COST_SHIFT)) { + for (newp = *bestp; newp != oldp; newp += step) { + const int new_b = cost_branch256(ct, newp); + const int update_b = prob_diff_update_cost(newp, oldp) + upd_cost; + const int savings = old_b - new_b - update_b; + if (savings > bestsavings) { + bestsavings = savings; + bestnewp = newp; + } } } *bestp = bestnewp; @@ -138,41 +137,39 @@ int vp9_prob_diff_update_savings_search(const unsigned int *ct, } int vp9_prob_diff_update_savings_search_model(const unsigned int *ct, - const vpx_prob *oldp, - vpx_prob *bestp, - vpx_prob upd, + const vpx_prob oldp, + vpx_prob *bestp, vpx_prob upd, int stepsize) { int i, old_b, new_b, update_b, savings, bestsavings; int newp; - const int step_sign = *bestp > oldp[PIVOT_NODE] ? -1 : 1; + const int step_sign = *bestp > oldp ? -1 : 1; const int step = stepsize * step_sign; - vpx_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES]; - vp9_model_to_full_probs(oldp, oldplist); - memcpy(newplist, oldp, sizeof(vpx_prob) * UNCONSTRAINED_NODES); - for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i) - old_b += cost_branch256(ct + 2 * i, oldplist[i]); - old_b += cost_branch256(ct + 2 * PIVOT_NODE, oldplist[PIVOT_NODE]); + const int upd_cost = vp9_cost_one(upd) - vp9_cost_zero(upd); + const vpx_prob *newplist, *oldplist; + vpx_prob bestnewp; + oldplist = vp9_pareto8_full[oldp - 1]; + old_b = cost_branch256(ct + 2 * PIVOT_NODE, oldp); + for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i) + old_b += cost_branch256(ct + 2 * i, oldplist[i - UNCONSTRAINED_NODES]); bestsavings = 0; - bestnewp = oldp[PIVOT_NODE]; + bestnewp = oldp; assert(stepsize > 0); - for (newp = *bestp; (newp - oldp[PIVOT_NODE]) * step_sign < 0; - newp += step) { - if (newp < 1 || newp > 255) - continue; - newplist[PIVOT_NODE] = newp; - vp9_model_to_full_probs(newplist, newplist); - for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i) - new_b += cost_branch256(ct + 2 * i, newplist[i]); - new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]); - update_b = prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + - vp9_cost_upd256; - savings = old_b - new_b - update_b; - if (savings > bestsavings) { - bestsavings = savings; - bestnewp = newp; + if (old_b > upd_cost + (MIN_DELP_BITS << VP9_PROB_COST_SHIFT)) { + for (newp = *bestp; (newp - oldp) * step_sign < 0; newp += step) { + if (newp < 1 || newp > 255) continue; + newplist = vp9_pareto8_full[newp - 1]; + new_b = cost_branch256(ct + 2 * PIVOT_NODE, newp); + for (i = UNCONSTRAINED_NODES; i < ENTROPY_NODES; ++i) + new_b += cost_branch256(ct + 2 * i, newplist[i - UNCONSTRAINED_NODES]); + update_b = prob_diff_update_cost(newp, oldp) + upd_cost; + savings = old_b - new_b - update_b; + if (savings > bestsavings) { + bestsavings = savings; + bestnewp = newp; + } } } @@ -184,8 +181,8 @@ void vp9_cond_prob_diff_update(vpx_writer *w, vpx_prob *oldp, const unsigned int ct[2]) { const vpx_prob upd = DIFF_UPDATE_PROB; vpx_prob newp = get_binary_prob(ct[0], ct[1]); - const int savings = vp9_prob_diff_update_savings_search(ct, *oldp, &newp, - upd); + const int savings = + vp9_prob_diff_update_savings_search(ct, *oldp, &newp, upd); assert(newp >= 1); if (savings > 0) { vpx_write(w, 1, upd); diff --git a/libs/libvpx/vp9/encoder/vp9_subexp.h b/libs/libvpx/vp9/encoder/vp9_subexp.h index b968232322..26c89e2ea7 100644 --- a/libs/libvpx/vp9/encoder/vp9_subexp.h +++ b/libs/libvpx/vp9/encoder/vp9_subexp.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VP9_ENCODER_VP9_SUBEXP_H_ #define VP9_ENCODER_VP9_SUBEXP_H_ @@ -20,21 +19,18 @@ extern "C" { struct vpx_writer; -void vp9_write_prob_diff_update(struct vpx_writer *w, - vpx_prob newp, vpx_prob oldp); +void vp9_write_prob_diff_update(struct vpx_writer *w, vpx_prob newp, + vpx_prob oldp); void vp9_cond_prob_diff_update(struct vpx_writer *w, vpx_prob *oldp, const unsigned int ct[2]); -int vp9_prob_diff_update_savings_search(const unsigned int *ct, - vpx_prob oldp, vpx_prob *bestp, - vpx_prob upd); - +int vp9_prob_diff_update_savings_search(const unsigned int *ct, vpx_prob oldp, + vpx_prob *bestp, vpx_prob upd); int vp9_prob_diff_update_savings_search_model(const unsigned int *ct, - const vpx_prob *oldp, - vpx_prob *bestp, - vpx_prob upd, + const vpx_prob oldp, + vpx_prob *bestp, vpx_prob upd, int stepsize); #ifdef __cplusplus diff --git a/libs/libvpx/vp9/encoder/vp9_svc_layercontext.c b/libs/libvpx/vp9/encoder/vp9_svc_layercontext.c index 9724df4cd7..2d29e268b1 100644 --- a/libs/libvpx/vp9/encoder/vp9_svc_layercontext.c +++ b/libs/libvpx/vp9/encoder/vp9_svc_layercontext.c @@ -16,7 +16,7 @@ #include "vp9/encoder/vp9_extend.h" #include "vpx_dsp/vpx_dsp_common.h" -#define SMALL_FRAME_WIDTH 32 +#define SMALL_FRAME_WIDTH 32 #define SMALL_FRAME_HEIGHT 16 void vp9_init_layer_context(VP9_COMP *const cpi) { @@ -33,9 +33,10 @@ void vp9_init_layer_context(VP9_COMP *const cpi) { svc->rc_drop_superframe = 0; svc->force_zero_mode_spatial_ref = 0; svc->use_base_mv = 0; + svc->scaled_temp_is_alloc = 0; + svc->scaled_one_half = 0; svc->current_superframe = 0; - for (i = 0; i < REF_FRAMES; ++i) - svc->ref_frame_index[i] = -1; + for (i = 0; i < REF_FRAMES; ++i) svc->ref_frame_index[i] = -1; for (sl = 0; sl < oxcf->ss_number_layers; ++sl) { cpi->svc.ext_frame_flags[sl] = 0; cpi->svc.ext_lst_fb_idx[sl] = 0; @@ -44,16 +45,14 @@ void vp9_init_layer_context(VP9_COMP *const cpi) { } if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.pass == 2) { - if (vpx_realloc_frame_buffer(&cpi->svc.empty_frame.img, - SMALL_FRAME_WIDTH, SMALL_FRAME_HEIGHT, - cpi->common.subsampling_x, + if (vpx_realloc_frame_buffer(&cpi->svc.empty_frame.img, SMALL_FRAME_WIDTH, + SMALL_FRAME_HEIGHT, cpi->common.subsampling_x, cpi->common.subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cpi->common.use_highbitdepth, #endif VP9_ENC_BORDER_IN_PIXELS, - cpi->common.byte_alignment, - NULL, NULL, NULL)) + cpi->common.byte_alignment, NULL, NULL, NULL)) vpx_internal_error(&cpi->common.error, VPX_CODEC_MEM_ERROR, "Failed to allocate empty frame for multiple frame " "contexts"); @@ -95,10 +94,10 @@ void vp9_init_layer_context(VP9_COMP *const cpi) { lc->target_bandwidth = oxcf->layer_target_bitrate[layer]; lrc->last_q[KEY_FRAME] = oxcf->best_allowed_q; lrc->last_q[INTER_FRAME] = oxcf->best_allowed_q; - lrc->avg_frame_qindex[KEY_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; - lrc->avg_frame_qindex[INTER_FRAME] = (oxcf->worst_allowed_q + - oxcf->best_allowed_q) / 2; + lrc->avg_frame_qindex[KEY_FRAME] = + (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2; + lrc->avg_frame_qindex[INTER_FRAME] = + (oxcf->worst_allowed_q + oxcf->best_allowed_q) / 2; if (oxcf->ss_enable_auto_arf[sl]) lc->alt_ref_idx = alt_ref_idx++; else @@ -106,35 +105,39 @@ void vp9_init_layer_context(VP9_COMP *const cpi) { lc->gold_ref_idx = INVALID_IDX; } - lrc->buffer_level = oxcf->starting_buffer_level_ms * - lc->target_bandwidth / 1000; + lrc->buffer_level = + oxcf->starting_buffer_level_ms * lc->target_bandwidth / 1000; lrc->bits_off_target = lrc->buffer_level; // Initialize the cyclic refresh parameters. If spatial layers are used // (i.e., ss_number_layers > 1), these need to be updated per spatial // layer. // Cyclic refresh is only applied on base temporal layer. - if (oxcf->ss_number_layers > 1 && - tl == 0) { + if (oxcf->ss_number_layers > 1 && tl == 0) { size_t last_coded_q_map_size; size_t consec_zero_mv_size; + VP9_COMMON *const cm = &cpi->common; lc->sb_index = 0; - lc->map = vpx_malloc(mi_rows * mi_cols * sizeof(signed char)); + CHECK_MEM_ERROR(cm, lc->map, + vpx_malloc(mi_rows * mi_cols * sizeof(*lc->map))); memset(lc->map, 0, mi_rows * mi_cols); - last_coded_q_map_size = mi_rows * mi_cols * sizeof(uint8_t); - lc->last_coded_q_map = vpx_malloc(last_coded_q_map_size); + last_coded_q_map_size = + mi_rows * mi_cols * sizeof(*lc->last_coded_q_map); + CHECK_MEM_ERROR(cm, lc->last_coded_q_map, + vpx_malloc(last_coded_q_map_size)); assert(MAXQ <= 255); memset(lc->last_coded_q_map, MAXQ, last_coded_q_map_size); - consec_zero_mv_size = mi_rows * mi_cols * sizeof(uint8_t); - lc->consec_zero_mv = vpx_malloc(consec_zero_mv_size); + consec_zero_mv_size = mi_rows * mi_cols * sizeof(*lc->consec_zero_mv); + CHECK_MEM_ERROR(cm, lc->consec_zero_mv, + vpx_malloc(consec_zero_mv_size)); memset(lc->consec_zero_mv, 0, consec_zero_mv_size); - } + } } } // Still have extra buffer for base layer golden frame - if (!(svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) - && alt_ref_idx < REF_FRAMES) + if (!(svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) && + alt_ref_idx < REF_FRAMES) svc->layer_context[0].gold_ref_idx = alt_ref_idx; } @@ -155,11 +158,12 @@ void vp9_update_layer_context_change_config(VP9_COMP *const cpi, oxcf->layer_target_bitrate[layer]; } - layer = LAYER_IDS_TO_IDX(sl, ((oxcf->ts_number_layers - 1) < 0 ? - 0 : (oxcf->ts_number_layers - 1)), oxcf->ts_number_layers); - spatial_layer_target = - svc->layer_context[layer].target_bandwidth = - oxcf->layer_target_bitrate[layer]; + layer = LAYER_IDS_TO_IDX( + sl, + ((oxcf->ts_number_layers - 1) < 0 ? 0 : (oxcf->ts_number_layers - 1)), + oxcf->ts_number_layers); + spatial_layer_target = svc->layer_context[layer].target_bandwidth = + oxcf->layer_target_bitrate[layer]; for (tl = 0; tl < oxcf->ts_number_layers; ++tl) { LAYER_CONTEXT *const lc = @@ -207,8 +211,8 @@ void vp9_update_layer_context_change_config(VP9_COMP *const cpi, (int64_t)(rc->optimal_buffer_level * bitrate_alloc); lrc->maximum_buffer_size = (int64_t)(rc->maximum_buffer_size * bitrate_alloc); - lrc->bits_off_target = VPXMIN(lrc->bits_off_target, - lrc->maximum_buffer_size); + lrc->bits_off_target = + VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size); lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size); // Update framerate-related quantities. if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) { @@ -228,12 +232,12 @@ void vp9_update_layer_context_change_config(VP9_COMP *const cpi, static LAYER_CONTEXT *get_layer_context(VP9_COMP *const cpi) { if (is_one_pass_cbr_svc(cpi)) return &cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers + cpi->svc.temporal_layer_id]; + cpi->svc.number_temporal_layers + + cpi->svc.temporal_layer_id]; else - return (cpi->svc.number_temporal_layers > 1 && - cpi->oxcf.rc_mode == VPX_CBR) ? - &cpi->svc.layer_context[cpi->svc.temporal_layer_id] : - &cpi->svc.layer_context[cpi->svc.spatial_layer_id]; + return (cpi->svc.number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) + ? &cpi->svc.layer_context[cpi->svc.temporal_layer_id] + : &cpi->svc.layer_context[cpi->svc.spatial_layer_id]; } void vp9_update_temporal_layer_framerate(VP9_COMP *const cpi) { @@ -243,7 +247,7 @@ void vp9_update_temporal_layer_framerate(VP9_COMP *const cpi) { RATE_CONTROL *const lrc = &lc->rc; // Index into spatial+temporal arrays. const int st_idx = svc->spatial_layer_id * svc->number_temporal_layers + - svc->temporal_layer_id; + svc->temporal_layer_id; const int tl = svc->temporal_layer_id; lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[tl]; @@ -270,10 +274,11 @@ void vp9_update_spatial_layer_framerate(VP9_COMP *const cpi, double framerate) { lc->framerate = framerate; lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate); - lrc->min_frame_bandwidth = (int)(lrc->avg_frame_bandwidth * - oxcf->two_pass_vbrmin_section / 100); + lrc->min_frame_bandwidth = + (int)(lrc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100); lrc->max_frame_bandwidth = (int)(((int64_t)lrc->avg_frame_bandwidth * - oxcf->two_pass_vbrmax_section) / 100); + oxcf->two_pass_vbrmax_section) / + 100); vp9_rc_set_gf_interval_range(cpi, lrc); } @@ -286,6 +291,11 @@ void vp9_restore_layer_context(VP9_COMP *const cpi) { cpi->twopass = lc->twopass; cpi->oxcf.target_bandwidth = lc->target_bandwidth; cpi->alt_ref_source = lc->alt_ref_source; + // Check if it is one_pass_cbr_svc mode and lc->speed > 0 (real-time mode + // does not use speed = 0). + if (is_one_pass_cbr_svc(cpi) && lc->speed > 0) { + cpi->oxcf.speed = lc->speed; + } // Reset the frames_since_key and frames_to_key counters to their values // before the layer restore. Keep these defined for the stream (not layer). if (cpi->svc.number_temporal_layers > 1 || @@ -297,17 +307,16 @@ void vp9_restore_layer_context(VP9_COMP *const cpi) { // For spatial-svc, allow cyclic-refresh to be applied on the spatial layers, // for the base temporal layer. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cpi->svc.number_spatial_layers > 1 && - cpi->svc.temporal_layer_id == 0) { + cpi->svc.number_spatial_layers > 1 && cpi->svc.temporal_layer_id == 0) { CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; signed char *temp = cr->map; uint8_t *temp2 = cr->last_coded_q_map; - uint8_t *temp3 = cr->consec_zero_mv; + uint8_t *temp3 = cpi->consec_zero_mv; cr->map = lc->map; lc->map = temp; cr->last_coded_q_map = lc->last_coded_q_map; lc->last_coded_q_map = temp2; - cr->consec_zero_mv = lc->consec_zero_mv; + cpi->consec_zero_mv = lc->consec_zero_mv; lc->consec_zero_mv = temp3; cr->sb_index = lc->sb_index; } @@ -325,8 +334,7 @@ void vp9_save_layer_context(VP9_COMP *const cpi) { // For spatial-svc, allow cyclic-refresh to be applied on the spatial layers, // for the base temporal layer. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && - cpi->svc.number_spatial_layers > 1 && - cpi->svc.temporal_layer_id == 0) { + cpi->svc.number_spatial_layers > 1 && cpi->svc.temporal_layer_id == 0) { CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; signed char *temp = lc->map; uint8_t *temp2 = lc->last_coded_q_map; @@ -335,8 +343,8 @@ void vp9_save_layer_context(VP9_COMP *const cpi) { cr->map = temp; lc->last_coded_q_map = cr->last_coded_q_map; cr->last_coded_q_map = temp2; - lc->consec_zero_mv = cr->consec_zero_mv; - cr->consec_zero_mv = temp3; + lc->consec_zero_mv = cpi->consec_zero_mv; + cpi->consec_zero_mv = temp3; lc->sb_index = cr->sb_index; } } @@ -368,20 +376,20 @@ void vp9_inc_frame_in_layer(VP9_COMP *const cpi) { } int vp9_is_upper_layer_key_frame(const VP9_COMP *const cpi) { - return is_two_pass_svc(cpi) && - cpi->svc.spatial_layer_id > 0 && - cpi->svc.layer_context[cpi->svc.spatial_layer_id * + return is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0 && + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * cpi->svc.number_temporal_layers + - cpi->svc.temporal_layer_id].is_key_frame; + cpi->svc.temporal_layer_id] + .is_key_frame; } static void get_layer_resolution(const int width_org, const int height_org, - const int num, const int den, - int *width_out, int *height_out) { + const int num, const int den, int *width_out, + int *height_out) { int w, h; - if (width_out == NULL || height_out == NULL || den == 0) - return; + if (width_out == NULL || height_out == NULL || den == 0) return; w = width_org * num / den; h = height_org * num / den; @@ -402,11 +410,15 @@ static void set_flags_and_fb_idx_for_temporal_mode3(VP9_COMP *const cpi) { int spatial_id, temporal_id; spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode; frame_num_within_temporal_struct = - cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers].current_video_frame_in_layer % 4; + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * + cpi->svc.number_temporal_layers] + .current_video_frame_in_layer % + 4; temporal_id = cpi->svc.temporal_layer_id = - (frame_num_within_temporal_struct & 1) ? 2 : - (frame_num_within_temporal_struct >> 1); + (frame_num_within_temporal_struct & 1) + ? 2 + : (frame_num_within_temporal_struct >> 1); cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame = cpi->ext_refresh_alt_ref_frame = 0; if (!temporal_id) { @@ -453,7 +465,7 @@ static void set_flags_and_fb_idx_for_temporal_mode3(VP9_COMP *const cpi) { if (spatial_id == cpi->svc.number_spatial_layers - 1) { // top layer cpi->ext_refresh_frame_flags_pending = 1; if (!spatial_id) - cpi->ref_frame_flags = VP9_LAST_FLAG; + cpi->ref_frame_flags = VP9_LAST_FLAG; else cpi->ref_frame_flags = VP9_LAST_FLAG | VP9_GOLD_FLAG; } else if (!spatial_id) { @@ -474,7 +486,7 @@ static void set_flags_and_fb_idx_for_temporal_mode3(VP9_COMP *const cpi) { cpi->lst_fb_idx = spatial_id - 1; cpi->gld_fb_idx = spatial_id; } else { - cpi->gld_fb_idx = spatial_id - 1; + cpi->gld_fb_idx = spatial_id - 1; } } else { cpi->gld_fb_idx = 0; @@ -502,10 +514,13 @@ static void set_flags_and_fb_idx_for_temporal_mode2(VP9_COMP *const cpi) { int spatial_id, temporal_id; spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode; temporal_id = cpi->svc.temporal_layer_id = - cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers].current_video_frame_in_layer & 1; + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * + cpi->svc.number_temporal_layers] + .current_video_frame_in_layer & + 1; cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame = - cpi->ext_refresh_alt_ref_frame = 0; + cpi->ext_refresh_alt_ref_frame = 0; if (!temporal_id) { cpi->ext_refresh_frame_flags_pending = 1; cpi->ext_refresh_last_frame = 1; @@ -536,7 +551,7 @@ static void set_flags_and_fb_idx_for_temporal_mode2(VP9_COMP *const cpi) { cpi->lst_fb_idx = spatial_id - 1; cpi->gld_fb_idx = spatial_id; } else { - cpi->gld_fb_idx = spatial_id - 1; + cpi->gld_fb_idx = spatial_id - 1; } } else { cpi->gld_fb_idx = 0; @@ -555,8 +570,8 @@ static void set_flags_and_fb_idx_for_temporal_mode_noLayering( VP9_COMP *const cpi) { int spatial_id; spatial_id = cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode; - cpi->ext_refresh_last_frame = - cpi->ext_refresh_golden_frame = cpi->ext_refresh_alt_ref_frame = 0; + cpi->ext_refresh_last_frame = cpi->ext_refresh_golden_frame = + cpi->ext_refresh_alt_ref_frame = 0; cpi->ext_refresh_frame_flags_pending = 1; cpi->ext_refresh_last_frame = 1; if (!spatial_id) { @@ -574,7 +589,7 @@ static void set_flags_and_fb_idx_for_temporal_mode_noLayering( cpi->lst_fb_idx = spatial_id - 1; cpi->gld_fb_idx = spatial_id; } else { - cpi->gld_fb_idx = spatial_id - 1; + cpi->gld_fb_idx = spatial_id - 1; } } else { cpi->gld_fb_idx = 0; @@ -584,20 +599,19 @@ static void set_flags_and_fb_idx_for_temporal_mode_noLayering( int vp9_one_pass_cbr_svc_start_layer(VP9_COMP *const cpi) { int width = 0, height = 0; LAYER_CONTEXT *lc = NULL; - if (cpi->svc.number_spatial_layers > 1) - cpi->svc.use_base_mv = 1; + if (cpi->svc.number_spatial_layers > 1) cpi->svc.use_base_mv = 1; cpi->svc.force_zero_mode_spatial_ref = 1; if (cpi->svc.temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0212) { set_flags_and_fb_idx_for_temporal_mode3(cpi); } else if (cpi->svc.temporal_layering_mode == - VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) { + VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) { set_flags_and_fb_idx_for_temporal_mode_noLayering(cpi); } else if (cpi->svc.temporal_layering_mode == - VP9E_TEMPORAL_LAYERING_MODE_0101) { + VP9E_TEMPORAL_LAYERING_MODE_0101) { set_flags_and_fb_idx_for_temporal_mode2(cpi); } else if (cpi->svc.temporal_layering_mode == - VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { + VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { // In the BYPASS/flexible mode, the encoder is relying on the application // to specify, for each spatial layer, the flags and buffer indices for the // layering. @@ -621,7 +635,7 @@ int vp9_one_pass_cbr_svc_start_layer(VP9_COMP *const cpi) { cpi->svc.rc_drop_superframe = 0; lc = &cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers + + cpi->svc.number_temporal_layers + cpi->svc.temporal_layer_id]; // Setting the worst/best_quality via the encoder control: SET_SVC_PARAMETERS, @@ -629,12 +643,12 @@ int vp9_one_pass_cbr_svc_start_layer(VP9_COMP *const cpi) { if (cpi->svc.temporal_layering_mode != VP9E_TEMPORAL_LAYERING_MODE_BYPASS) { RATE_CONTROL *const lrc = &lc->rc; lrc->worst_quality = vp9_quantizer_to_qindex(lc->max_q); - lrc->best_quality = vp9_quantizer_to_qindex(lc->min_q); + lrc->best_quality = vp9_quantizer_to_qindex(lc->min_q); } get_layer_resolution(cpi->oxcf.width, cpi->oxcf.height, - lc->scaling_factor_num, lc->scaling_factor_den, - &width, &height); + lc->scaling_factor_num, lc->scaling_factor_den, &width, + &height); if (vp9_set_size_literal(cpi, width, height) != 0) return VPX_CODEC_INVALID_PARAM; @@ -665,8 +679,8 @@ int vp9_svc_start_frame(VP9_COMP *const cpi) { cpi->lst_fb_idx = cpi->svc.spatial_layer_id; if (cpi->svc.spatial_layer_id == 0) - cpi->gld_fb_idx = (lc->gold_ref_idx >= 0) ? - lc->gold_ref_idx : cpi->lst_fb_idx; + cpi->gld_fb_idx = + (lc->gold_ref_idx >= 0) ? lc->gold_ref_idx : cpi->lst_fb_idx; else cpi->gld_fb_idx = cpi->svc.spatial_layer_id - 1; @@ -680,8 +694,7 @@ int vp9_svc_start_frame(VP9_COMP *const cpi) { } else { if (cpi->oxcf.ss_enable_auto_arf[cpi->svc.spatial_layer_id]) { cpi->alt_fb_idx = lc->alt_ref_idx; - if (!lc->has_alt_frame) - cpi->ref_frame_flags &= (~VP9_ALT_FLAG); + if (!lc->has_alt_frame) cpi->ref_frame_flags &= (~VP9_ALT_FLAG); } else { // Find a proper alt_fb_idx for layers that don't have alt ref frame if (cpi->svc.spatial_layer_id == 0) { @@ -702,8 +715,8 @@ int vp9_svc_start_frame(VP9_COMP *const cpi) { } get_layer_resolution(cpi->oxcf.width, cpi->oxcf.height, - lc->scaling_factor_num, lc->scaling_factor_den, - &width, &height); + lc->scaling_factor_num, lc->scaling_factor_den, &width, + &height); // Workaround for multiple frame contexts. In some frames we can't use prev_mi // since its previous frame could be changed during decoding time. The idea is @@ -728,11 +741,10 @@ int vp9_svc_start_frame(VP9_COMP *const cpi) { cpi->common.show_frame = 0; cpi->ref_frame_flags = 0; cpi->common.frame_type = INTER_FRAME; - cpi->lst_fb_idx = - cpi->gld_fb_idx = cpi->alt_fb_idx = SMALL_FRAME_FB_IDX; + cpi->lst_fb_idx = cpi->gld_fb_idx = cpi->alt_fb_idx = + SMALL_FRAME_FB_IDX; - if (cpi->svc.encode_intra_empty_frame != 0) - cpi->common.intra_only = 1; + if (cpi->svc.encode_intra_empty_frame != 0) cpi->common.intra_only = 1; width = SMALL_FRAME_WIDTH; height = SMALL_FRAME_HEIGHT; @@ -782,12 +794,33 @@ void vp9_free_svc_cyclic_refresh(VP9_COMP *const cpi) { for (tl = 0; tl < oxcf->ts_number_layers; ++tl) { int layer = LAYER_IDS_TO_IDX(sl, tl, oxcf->ts_number_layers); LAYER_CONTEXT *const lc = &svc->layer_context[layer]; - if (lc->map) - vpx_free(lc->map); - if (lc->last_coded_q_map) - vpx_free(lc->last_coded_q_map); - if (lc->consec_zero_mv) - vpx_free(lc->consec_zero_mv); + if (lc->map) vpx_free(lc->map); + if (lc->last_coded_q_map) vpx_free(lc->last_coded_q_map); + if (lc->consec_zero_mv) vpx_free(lc->consec_zero_mv); } } } + +// Reset on key frame: reset counters, references and buffer updates. +void vp9_svc_reset_key_frame(VP9_COMP *const cpi) { + int sl, tl; + SVC *const svc = &cpi->svc; + LAYER_CONTEXT *lc = NULL; + for (sl = 0; sl < svc->number_spatial_layers; ++sl) { + for (tl = 0; tl < svc->number_temporal_layers; ++tl) { + lc = &cpi->svc.layer_context[sl * svc->number_temporal_layers + tl]; + lc->current_video_frame_in_layer = 0; + lc->frames_from_key_frame = 0; + } + } + if (svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0212) { + set_flags_and_fb_idx_for_temporal_mode3(cpi); + } else if (svc->temporal_layering_mode == + VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING) { + set_flags_and_fb_idx_for_temporal_mode_noLayering(cpi); + } else if (svc->temporal_layering_mode == VP9E_TEMPORAL_LAYERING_MODE_0101) { + set_flags_and_fb_idx_for_temporal_mode2(cpi); + } + vp9_update_temporal_layer_framerate(cpi); + vp9_restore_layer_context(cpi); +} diff --git a/libs/libvpx/vp9/encoder/vp9_svc_layercontext.h b/libs/libvpx/vp9/encoder/vp9_svc_layercontext.h index 4e186401fe..ee7a6638b4 100644 --- a/libs/libvpx/vp9/encoder/vp9_svc_layercontext.h +++ b/libs/libvpx/vp9/encoder/vp9_svc_layercontext.h @@ -35,7 +35,7 @@ typedef struct { int is_key_frame; int frames_from_key_frame; FRAME_TYPE last_frame_type; - struct lookahead_entry *alt_ref_source; + struct lookahead_entry *alt_ref_source; int alt_ref_idx; int gold_ref_idx; int has_alt_frame; @@ -46,6 +46,7 @@ typedef struct { signed char *map; uint8_t *last_coded_q_map; uint8_t *consec_zero_mv; + uint8_t speed; } LAYER_CONTEXT; typedef struct { @@ -59,17 +60,17 @@ typedef struct { int rc_drop_superframe; // Workaround for multiple frame contexts - enum { - ENCODED = 0, - ENCODING, - NEED_TO_ENCODE - }encode_empty_frame_state; + enum { ENCODED = 0, ENCODING, NEED_TO_ENCODE } encode_empty_frame_state; struct lookahead_entry empty_frame; int encode_intra_empty_frame; // Store scaled source frames to be used for temporal filter to generate // a alt ref frame. YV12_BUFFER_CONFIG scaled_frames[MAX_LAG_BUFFERS]; + // Temp buffer used for 2-stage down-sampling, for real-time mode. + YV12_BUFFER_CONFIG scaled_temp; + int scaled_one_half; + int scaled_temp_is_alloc; // Layer context used for rate control in one pass temporal CBR mode or // two pass spatial mode. @@ -134,6 +135,8 @@ int vp9_one_pass_cbr_svc_start_layer(struct VP9_COMP *const cpi); void vp9_free_svc_cyclic_refresh(struct VP9_COMP *const cpi); +void vp9_svc_reset_key_frame(struct VP9_COMP *const cpi); + #ifdef __cplusplus } // extern "C" #endif diff --git a/libs/libvpx/vp9/encoder/vp9_temporal_filter.c b/libs/libvpx/vp9/encoder/vp9_temporal_filter.c index 82f566b132..a167eeb15d 100644 --- a/libs/libvpx/vp9/encoder/vp9_temporal_filter.c +++ b/libs/libvpx/vp9/encoder/vp9_temporal_filter.c @@ -31,22 +31,13 @@ static int fixed_divide[512]; -static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, - uint8_t *y_mb_ptr, - uint8_t *u_mb_ptr, - uint8_t *v_mb_ptr, - int stride, - int uv_block_width, - int uv_block_height, - int mv_row, - int mv_col, - uint8_t *pred, - struct scale_factors *scale, - int x, int y) { +static void temporal_filter_predictors_mb_c( + MACROBLOCKD *xd, uint8_t *y_mb_ptr, uint8_t *u_mb_ptr, uint8_t *v_mb_ptr, + int stride, int uv_block_width, int uv_block_height, int mv_row, int mv_col, + uint8_t *pred, struct scale_factors *scale, int x, int y) { const int which_mv = 0; const MV mv = { mv_row, mv_col }; - const InterpKernel *const kernel = - vp9_filter_kernels[xd->mi[0]->interp_filter]; + const InterpKernel *const kernel = vp9_filter_kernels[EIGHTTAP_SHARP]; enum mv_precision mv_precision_uv; int uv_stride; @@ -60,73 +51,46 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd, #if CONFIG_VP9_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - vp9_highbd_build_inter_predictor(y_mb_ptr, stride, - &pred[0], 16, - &mv, - scale, - 16, 16, - which_mv, - kernel, MV_PRECISION_Q3, x, y, xd->bd); + vp9_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, + 16, 16, which_mv, kernel, MV_PRECISION_Q3, + x, y, xd->bd); - vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride, - &pred[256], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y, xd->bd); + vp9_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], + uv_block_width, &mv, scale, uv_block_width, + uv_block_height, which_mv, kernel, + mv_precision_uv, x, y, xd->bd); - vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride, - &pred[512], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y, xd->bd); + vp9_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], + uv_block_width, &mv, scale, uv_block_width, + uv_block_height, which_mv, kernel, + mv_precision_uv, x, y, xd->bd); return; } #endif // CONFIG_VP9_HIGHBITDEPTH - vp9_build_inter_predictor(y_mb_ptr, stride, - &pred[0], 16, - &mv, - scale, - 16, 16, - which_mv, - kernel, MV_PRECISION_Q3, x, y); + (void)xd; + vp9_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16, + which_mv, kernel, MV_PRECISION_Q3, x, y); - vp9_build_inter_predictor(u_mb_ptr, uv_stride, - &pred[256], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y); + vp9_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width, + &mv, scale, uv_block_width, uv_block_height, + which_mv, kernel, mv_precision_uv, x, y); - vp9_build_inter_predictor(v_mb_ptr, uv_stride, - &pred[512], uv_block_width, - &mv, - scale, - uv_block_width, uv_block_height, - which_mv, - kernel, mv_precision_uv, x, y); + vp9_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width, + &mv, scale, uv_block_width, uv_block_height, + which_mv, kernel, mv_precision_uv, x, y); } void vp9_temporal_filter_init(void) { int i; fixed_divide[0] = 0; - for (i = 1; i < 512; ++i) - fixed_divide[i] = 0x80000 / i; + for (i = 1; i < 512; ++i) fixed_divide[i] = 0x80000 / i; } -void vp9_temporal_filter_apply_c(uint8_t *frame1, - unsigned int stride, - uint8_t *frame2, - unsigned int block_width, - unsigned int block_height, - int strength, - int filter_weight, - unsigned int *accumulator, +void vp9_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride, + uint8_t *frame2, unsigned int block_width, + unsigned int block_height, int strength, + int filter_weight, unsigned int *accumulator, uint16_t *count) { unsigned int i, j, k; int modifier; @@ -143,13 +107,13 @@ void vp9_temporal_filter_apply_c(uint8_t *frame1, for (idy = -1; idy <= 1; ++idy) { for (idx = -1; idx <= 1; ++idx) { - int row = i + idy; - int col = j + idx; + int row = (int)i + idy; + int col = (int)j + idx; - if (row >= 0 && row < (int)block_height && - col >= 0 && col < (int)block_width) { + if (row >= 0 && row < (int)block_height && col >= 0 && + col < (int)block_width) { int diff = frame1[byte + idy * (int)stride + idx] - - frame2[idy * (int)block_width + idx]; + frame2[idy * (int)block_width + idx]; diff_sse[index] = diff * diff; ++index; } @@ -159,19 +123,17 @@ void vp9_temporal_filter_apply_c(uint8_t *frame1, assert(index > 0); modifier = 0; - for (idx = 0; idx < 9; ++idx) - modifier += diff_sse[idx]; + for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx]; modifier *= 3; modifier /= index; ++frame2; - modifier += rounding; + modifier += rounding; modifier >>= strength; - if (modifier > 16) - modifier = 16; + if (modifier > 16) modifier = 16; modifier = 16 - modifier; modifier *= filter_weight; @@ -187,15 +149,10 @@ void vp9_temporal_filter_apply_c(uint8_t *frame1, } #if CONFIG_VP9_HIGHBITDEPTH -void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8, - unsigned int stride, - uint8_t *frame2_8, - unsigned int block_width, - unsigned int block_height, - int strength, - int filter_weight, - unsigned int *accumulator, - uint16_t *count) { +void vp9_highbd_temporal_filter_apply_c( + uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8, + unsigned int block_width, unsigned int block_height, int strength, + int filter_weight, unsigned int *accumulator, uint16_t *count) { uint16_t *frame1 = CONVERT_TO_SHORTPTR(frame1_8); uint16_t *frame2 = CONVERT_TO_SHORTPTR(frame2_8); unsigned int i, j, k; @@ -211,13 +168,13 @@ void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8, for (idy = -1; idy <= 1; ++idy) { for (idx = -1; idx <= 1; ++idx) { - int row = i + idy; - int col = j + idx; + int row = (int)i + idy; + int col = (int)j + idx; - if (row >= 0 && row < (int)block_height && - col >= 0 && col < (int)block_width) { + if (row >= 0 && row < (int)block_height && col >= 0 && + col < (int)block_width) { int diff = frame1[byte + idy * (int)stride + idx] - - frame2[idy * (int)block_width + idx]; + frame2[idy * (int)block_width + idx]; diff_sse[index] = diff * diff; ++index; } @@ -226,8 +183,7 @@ void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8, assert(index > 0); modifier = 0; - for (idx = 0; idx < 9; ++idx) - modifier += diff_sse[idx]; + for (idx = 0; idx < 9; ++idx) modifier += diff_sse[idx]; modifier *= 3; modifier /= index; @@ -236,8 +192,7 @@ void vp9_highbd_temporal_filter_apply_c(uint8_t *frame1_8, modifier += rounding; modifier >>= strength; - if (modifier > 16) - modifier = 16; + if (modifier > 16) modifier = 16; modifier = 16 - modifier; modifier *= filter_weight; @@ -264,11 +219,11 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, int step_param; int sadpb = x->sadperbit16; int bestsme = INT_MAX; - int distortion; - unsigned int sse; + uint32_t distortion; + uint32_t sse; int cost_list[5]; - MV best_ref_mv1 = {0, 0}; + MV best_ref_mv1 = { 0, 0 }; MV best_ref_mv1_full; /* full-pixel value of best_ref_mv1 */ MV *ref_mv = &x->e_mbd.mi[0]->bmi[0].as_mv[0].as_mv; @@ -295,15 +250,11 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, mv_sf->search_method = old_search_method; // Ignore mv costing by sending NULL pointer instead of cost array - bestsme = cpi->find_fractional_mv_step(x, ref_mv, - &best_ref_mv1, - cpi->common.allow_high_precision_mv, - x->errorperbit, - &cpi->fn_ptr[BLOCK_16X16], - 0, mv_sf->subpel_iters_per_step, - cond_cost_list(cpi, cost_list), - NULL, NULL, - &distortion, &sse, NULL, 0, 0); + bestsme = cpi->find_fractional_mv_step( + x, ref_mv, &best_ref_mv1, cpi->common.allow_high_precision_mv, + x->errorperbit, &cpi->fn_ptr[BLOCK_16X16], 0, + mv_sf->subpel_iters_per_step, cond_cost_list(cpi, cost_list), NULL, NULL, + &distortion, &sse, NULL, 0, 0); // Restore input state x->plane[0].src = src; @@ -314,8 +265,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, static void temporal_filter_iterate_c(VP9_COMP *cpi, YV12_BUFFER_CONFIG **frames, - int frame_count, - int alt_ref_index, + int frame_count, int alt_ref_index, int strength, struct scale_factors *scale) { int byte; @@ -332,17 +282,17 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, YV12_BUFFER_CONFIG *f = frames[alt_ref_index]; uint8_t *dst1, *dst2; #if CONFIG_VP9_HIGHBITDEPTH - DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]); - DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]); + DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]); + DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]); uint8_t *predictor; #else - DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]); + DECLARE_ALIGNED(16, uint8_t, predictor[16 * 16 * 3]); #endif const int mb_uv_height = 16 >> mbd->plane[1].subsampling_y; - const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x; + const int mb_uv_width = 16 >> mbd->plane[1].subsampling_x; // Save input state - uint8_t* input_buffer[MAX_MB_PLANE]; + uint8_t *input_buffer[MAX_MB_PLANE]; int i; #if CONFIG_VP9_HIGHBITDEPTH if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { @@ -352,8 +302,7 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, } #endif - for (i = 0; i < MAX_MB_PLANE; i++) - input_buffer[i] = mbd->plane[i].pre[0].buf; + for (i = 0; i < MAX_MB_PLANE; i++) input_buffer[i] = mbd->plane[i].pre[0].buf; for (mb_row = 0; mb_row < mb_rows; mb_row++) { // Source frames are extended to 16 pixels. This is different than @@ -367,9 +316,10 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, // 8 - VP9_INTERP_EXTEND. // To keep the mv in play for both Y and UV planes the max that it // can be on a border is therefore 16 - (2*VP9_INTERP_EXTEND+1). - cpi->td.mb.mv_row_min = -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND)); - cpi->td.mb.mv_row_max = ((mb_rows - 1 - mb_row) * 16) - + (17 - 2 * VP9_INTERP_EXTEND); + cpi->td.mb.mv_limits.row_min = + -((mb_row * 16) + (17 - 2 * VP9_INTERP_EXTEND)); + cpi->td.mb.mv_limits.row_max = + ((mb_rows - 1 - mb_row) * 16) + (17 - 2 * VP9_INTERP_EXTEND); for (mb_col = 0; mb_col < mb_cols; mb_col++) { int i, j, k; @@ -378,16 +328,16 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, memset(accumulator, 0, 16 * 16 * 3 * sizeof(accumulator[0])); memset(count, 0, 16 * 16 * 3 * sizeof(count[0])); - cpi->td.mb.mv_col_min = -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND)); - cpi->td.mb.mv_col_max = ((mb_cols - 1 - mb_col) * 16) - + (17 - 2 * VP9_INTERP_EXTEND); + cpi->td.mb.mv_limits.col_min = + -((mb_col * 16) + (17 - 2 * VP9_INTERP_EXTEND)); + cpi->td.mb.mv_limits.col_max = + ((mb_cols - 1 - mb_col) * 16) + (17 - 2 * VP9_INTERP_EXTEND); for (frame = 0; frame < frame_count; frame++) { - const int thresh_low = 10000; + const int thresh_low = 10000; const int thresh_high = 20000; - if (frames[frame] == NULL) - continue; + if (frames[frame] == NULL) continue; mbd->mi[0]->bmi[0].as_mv[0].as_mv.row = 0; mbd->mi[0]->bmi[0].as_mv[0].as_mv.col = 0; @@ -396,87 +346,69 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, filter_weight = 2; } else { // Find best match in this frame by MC - int err = temporal_filter_find_matching_mb_c(cpi, - frames[alt_ref_index]->y_buffer + mb_y_offset, - frames[frame]->y_buffer + mb_y_offset, - frames[frame]->y_stride); + int err = temporal_filter_find_matching_mb_c( + cpi, frames[alt_ref_index]->y_buffer + mb_y_offset, + frames[frame]->y_buffer + mb_y_offset, frames[frame]->y_stride); - // Assign higher weight to matching MB if it's error + // Assign higher weight to matching MB if its error // score is lower. If not applying MC default behavior // is to weight all MBs equal. - filter_weight = err < thresh_low - ? 2 : err < thresh_high ? 1 : 0; + filter_weight = err < thresh_low ? 2 : err < thresh_high ? 1 : 0; } if (filter_weight != 0) { // Construct the predictors - temporal_filter_predictors_mb_c(mbd, - frames[frame]->y_buffer + mb_y_offset, + temporal_filter_predictors_mb_c( + mbd, frames[frame]->y_buffer + mb_y_offset, frames[frame]->u_buffer + mb_uv_offset, - frames[frame]->v_buffer + mb_uv_offset, - frames[frame]->y_stride, - mb_uv_width, mb_uv_height, - mbd->mi[0]->bmi[0].as_mv[0].as_mv.row, - mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, - predictor, scale, + frames[frame]->v_buffer + mb_uv_offset, frames[frame]->y_stride, + mb_uv_width, mb_uv_height, mbd->mi[0]->bmi[0].as_mv[0].as_mv.row, + mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale, mb_col * 16, mb_row * 16); #if CONFIG_VP9_HIGHBITDEPTH if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { int adj_strength = strength + 2 * (mbd->bd - 8); // Apply the filter (YUV) - vp9_highbd_temporal_filter_apply_c(f->y_buffer + mb_y_offset, - f->y_stride, - predictor, 16, 16, adj_strength, - filter_weight, - accumulator, count); - vp9_highbd_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, - f->uv_stride, predictor + 256, - mb_uv_width, mb_uv_height, - adj_strength, - filter_weight, accumulator + 256, - count + 256); - vp9_highbd_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, - f->uv_stride, predictor + 512, - mb_uv_width, mb_uv_height, - adj_strength, filter_weight, - accumulator + 512, count + 512); + vp9_highbd_temporal_filter_apply_c( + f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16, + adj_strength, filter_weight, accumulator, count); + vp9_highbd_temporal_filter_apply_c( + f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256, + mb_uv_width, mb_uv_height, adj_strength, filter_weight, + accumulator + 256, count + 256); + vp9_highbd_temporal_filter_apply_c( + f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512, + mb_uv_width, mb_uv_height, adj_strength, filter_weight, + accumulator + 512, count + 512); } else { // Apply the filter (YUV) vp9_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride, - predictor, 16, 16, - strength, filter_weight, - accumulator, count); - vp9_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, - f->uv_stride, - predictor + 256, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 256, - count + 256); - vp9_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, - f->uv_stride, - predictor + 512, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 512, - count + 512); + predictor, 16, 16, strength, + filter_weight, accumulator, count); + vp9_temporal_filter_apply_c( + f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256, + mb_uv_width, mb_uv_height, strength, filter_weight, + accumulator + 256, count + 256); + vp9_temporal_filter_apply_c( + f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512, + mb_uv_width, mb_uv_height, strength, filter_weight, + accumulator + 512, count + 512); } #else // Apply the filter (YUV) // TODO(jingning): Need SIMD optimization for this. vp9_temporal_filter_apply_c(f->y_buffer + mb_y_offset, f->y_stride, - predictor, 16, 16, - strength, filter_weight, - accumulator, count); + predictor, 16, 16, strength, + filter_weight, accumulator, count); vp9_temporal_filter_apply_c(f->u_buffer + mb_uv_offset, f->uv_stride, - predictor + 256, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 256, - count + 256); + predictor + 256, mb_uv_width, + mb_uv_height, strength, filter_weight, + accumulator + 256, count + 256); vp9_temporal_filter_apply_c(f->v_buffer + mb_uv_offset, f->uv_stride, - predictor + 512, - mb_uv_width, mb_uv_height, strength, - filter_weight, accumulator + 512, - count + 512); + predictor + 512, mb_uv_width, + mb_uv_height, strength, filter_weight, + accumulator + 512, count + 512); #endif // CONFIG_VP9_HIGHBITDEPTH } } @@ -631,50 +563,53 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi, } // Restore input state - for (i = 0; i < MAX_MB_PLANE; i++) - mbd->plane[i].pre[0].buf = input_buffer[i]; + for (i = 0; i < MAX_MB_PLANE; i++) mbd->plane[i].pre[0].buf = input_buffer[i]; } // Apply buffer limits and context specific adjustments to arnr filter. -static void adjust_arnr_filter(VP9_COMP *cpi, - int distance, int group_boost, +static void adjust_arnr_filter(VP9_COMP *cpi, int distance, int group_boost, int *arnr_frames, int *arnr_strength) { const VP9EncoderConfig *const oxcf = &cpi->oxcf; const int frames_after_arf = vp9_lookahead_depth(cpi->lookahead) - distance - 1; int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1; int frames_bwd; - int q, frames, strength; + int q, frames, base_strength, strength; + + // Context dependent two pass adjustment to strength. + if (oxcf->pass == 2) { + base_strength = oxcf->arnr_strength + cpi->twopass.arnr_strength_adjustment; + // Clip to allowed range. + base_strength = VPXMIN(6, VPXMAX(0, base_strength)); + } else { + base_strength = oxcf->arnr_strength; + } // Define the forward and backwards filter limits for this arnr group. - if (frames_fwd > frames_after_arf) - frames_fwd = frames_after_arf; - if (frames_fwd > distance) - frames_fwd = distance; + if (frames_fwd > frames_after_arf) frames_fwd = frames_after_arf; + if (frames_fwd > distance) frames_fwd = distance; frames_bwd = frames_fwd; // For even length filter there is one more frame backward // than forward: e.g. len=6 ==> bbbAff, len=7 ==> bbbAfff. - if (frames_bwd < distance) - frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1; + if (frames_bwd < distance) frames_bwd += (oxcf->arnr_max_frames + 1) & 0x1; // Set the baseline active filter size. frames = frames_bwd + 1 + frames_fwd; // Adjust the strength based on active max q. if (cpi->common.current_video_frame > 1) - q = ((int)vp9_convert_qindex_to_q( - cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth)); + q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME], + cpi->common.bit_depth)); else - q = ((int)vp9_convert_qindex_to_q( - cpi->rc.avg_frame_qindex[KEY_FRAME], cpi->common.bit_depth)); + q = ((int)vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME], + cpi->common.bit_depth)); if (q > 16) { - strength = oxcf->arnr_strength; + strength = base_strength; } else { - strength = oxcf->arnr_strength - ((16 - q) / 2); - if (strength < 0) - strength = 0; + strength = base_strength - ((16 - q) / 2); + if (strength < 0) strength = 0; } // Adjust number of frames in filter and strength based on gf boost level. @@ -710,7 +645,7 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) { int frames_to_blur_backward; int frames_to_blur_forward; struct scale_factors sf; - YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = {NULL}; + YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS] = { NULL }; // Apply context specific adjustments to the arnr filter parameters. adjust_arnr_filter(cpi, distance, rc->gfu_boost, &frames_to_blur, &strength); @@ -721,8 +656,8 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) { // Setup frame pointers, NULL indicates frame not included in filter. for (frame = 0; frame < frames_to_blur; ++frame) { const int which_buffer = start_frame - frame; - struct lookahead_entry *buf = vp9_lookahead_peek(cpi->lookahead, - which_buffer); + struct lookahead_entry *buf = + vp9_lookahead_peek(cpi->lookahead, which_buffer); frames[frames_to_blur - 1 - frame] = &buf->img; } @@ -735,16 +670,13 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) { int frame_used = 0; #if CONFIG_VP9_HIGHBITDEPTH vp9_setup_scale_factors_for_frame( - &sf, - get_frame_new_buffer(cm)->y_crop_width, + &sf, get_frame_new_buffer(cm)->y_crop_width, get_frame_new_buffer(cm)->y_crop_height, get_frame_new_buffer(cm)->y_crop_width, - get_frame_new_buffer(cm)->y_crop_height, - cm->use_highbitdepth); + get_frame_new_buffer(cm)->y_crop_height, cm->use_highbitdepth); #else vp9_setup_scale_factors_for_frame( - &sf, - get_frame_new_buffer(cm)->y_crop_width, + &sf, get_frame_new_buffer(cm)->y_crop_width, get_frame_new_buffer(cm)->y_crop_height, get_frame_new_buffer(cm)->y_crop_width, get_frame_new_buffer(cm)->y_crop_height); @@ -754,14 +686,13 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) { if (cm->mi_cols * MI_SIZE != frames[frame]->y_width || cm->mi_rows * MI_SIZE != frames[frame]->y_height) { if (vpx_realloc_frame_buffer(&cpi->svc.scaled_frames[frame_used], - cm->width, cm->height, - cm->subsampling_x, cm->subsampling_y, + cm->width, cm->height, cm->subsampling_x, + cm->subsampling_y, #if CONFIG_VP9_HIGHBITDEPTH cm->use_highbitdepth, #endif VP9_ENC_BORDER_IN_PIXELS, - cm->byte_alignment, - NULL, NULL, NULL)) { + cm->byte_alignment, NULL, NULL, NULL)) { vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, "Failed to reallocate alt_ref_buffer"); } @@ -774,20 +705,16 @@ void vp9_temporal_filter(VP9_COMP *cpi, int distance) { xd->mi = cm->mi_grid_visible; xd->mi[0] = cm->mi; } else { - // ARF is produced at the native frame size and resized when coded. +// ARF is produced at the native frame size and resized when coded. #if CONFIG_VP9_HIGHBITDEPTH - vp9_setup_scale_factors_for_frame(&sf, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - cm->use_highbitdepth); + vp9_setup_scale_factors_for_frame( + &sf, frames[0]->y_crop_width, frames[0]->y_crop_height, + frames[0]->y_crop_width, frames[0]->y_crop_height, + cm->use_highbitdepth); #else - vp9_setup_scale_factors_for_frame(&sf, - frames[0]->y_crop_width, - frames[0]->y_crop_height, - frames[0]->y_crop_width, - frames[0]->y_crop_height); + vp9_setup_scale_factors_for_frame( + &sf, frames[0]->y_crop_width, frames[0]->y_crop_height, + frames[0]->y_crop_width, frames[0]->y_crop_height); #endif // CONFIG_VP9_HIGHBITDEPTH } } diff --git a/libs/libvpx/vp9/encoder/vp9_tokenize.c b/libs/libvpx/vp9/encoder/vp9_tokenize.c index 93be6d7ae3..dc2616dbe1 100644 --- a/libs/libvpx/vp9/encoder/vp9_tokenize.c +++ b/libs/libvpx/vp9/encoder/vp9_tokenize.c @@ -18,40 +18,60 @@ #include "vp9/common/vp9_entropy.h" #include "vp9/common/vp9_pred_common.h" #include "vp9/common/vp9_scan.h" -#include "vp9/common/vp9_seg_common.h" #include "vp9/encoder/vp9_cost.h" #include "vp9/encoder/vp9_encoder.h" #include "vp9/encoder/vp9_tokenize.h" static const TOKENVALUE dct_cat_lt_10_value_tokens[] = { - {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49}, - {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33}, - {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17}, - {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1}, - {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21}, - {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9}, - {8, 7}, {8, 5}, {8, 3}, {8, 1}, - {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1}, - {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1}, - {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0}, - {1, 0}, {2, 0}, {3, 0}, {4, 0}, - {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6}, - {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14}, - {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12}, - {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24}, - {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2}, - {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16}, - {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28}, - {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40}, - {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52}, - {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62} + { 9, 63 }, { 9, 61 }, { 9, 59 }, { 9, 57 }, { 9, 55 }, { 9, 53 }, { 9, 51 }, + { 9, 49 }, { 9, 47 }, { 9, 45 }, { 9, 43 }, { 9, 41 }, { 9, 39 }, { 9, 37 }, + { 9, 35 }, { 9, 33 }, { 9, 31 }, { 9, 29 }, { 9, 27 }, { 9, 25 }, { 9, 23 }, + { 9, 21 }, { 9, 19 }, { 9, 17 }, { 9, 15 }, { 9, 13 }, { 9, 11 }, { 9, 9 }, + { 9, 7 }, { 9, 5 }, { 9, 3 }, { 9, 1 }, { 8, 31 }, { 8, 29 }, { 8, 27 }, + { 8, 25 }, { 8, 23 }, { 8, 21 }, { 8, 19 }, { 8, 17 }, { 8, 15 }, { 8, 13 }, + { 8, 11 }, { 8, 9 }, { 8, 7 }, { 8, 5 }, { 8, 3 }, { 8, 1 }, { 7, 15 }, + { 7, 13 }, { 7, 11 }, { 7, 9 }, { 7, 7 }, { 7, 5 }, { 7, 3 }, { 7, 1 }, + { 6, 7 }, { 6, 5 }, { 6, 3 }, { 6, 1 }, { 5, 3 }, { 5, 1 }, { 4, 1 }, + { 3, 1 }, { 2, 1 }, { 1, 1 }, { 0, 0 }, { 1, 0 }, { 2, 0 }, { 3, 0 }, + { 4, 0 }, { 5, 0 }, { 5, 2 }, { 6, 0 }, { 6, 2 }, { 6, 4 }, { 6, 6 }, + { 7, 0 }, { 7, 2 }, { 7, 4 }, { 7, 6 }, { 7, 8 }, { 7, 10 }, { 7, 12 }, + { 7, 14 }, { 8, 0 }, { 8, 2 }, { 8, 4 }, { 8, 6 }, { 8, 8 }, { 8, 10 }, + { 8, 12 }, { 8, 14 }, { 8, 16 }, { 8, 18 }, { 8, 20 }, { 8, 22 }, { 8, 24 }, + { 8, 26 }, { 8, 28 }, { 8, 30 }, { 9, 0 }, { 9, 2 }, { 9, 4 }, { 9, 6 }, + { 9, 8 }, { 9, 10 }, { 9, 12 }, { 9, 14 }, { 9, 16 }, { 9, 18 }, { 9, 20 }, + { 9, 22 }, { 9, 24 }, { 9, 26 }, { 9, 28 }, { 9, 30 }, { 9, 32 }, { 9, 34 }, + { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 }, + { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 } }; -const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens + - (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) - / 2; +const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens = + dct_cat_lt_10_value_tokens + + (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) / + 2; +// The corresponding costs of the extrabits for the tokens in the above table +// are stored in the table below. The values are obtained from looking up the +// entry for the specified extrabits in the table corresponding to the token +// (as defined in cost element vp9_extra_bits) +// e.g. {9, 63} maps to cat5_cost[63 >> 1], {1, 1} maps to sign_cost[1 >> 1] +static const int dct_cat_lt_10_value_cost[] = { + 3773, 3750, 3704, 3681, 3623, 3600, 3554, 3531, 3432, 3409, 3363, 3340, 3282, + 3259, 3213, 3190, 3136, 3113, 3067, 3044, 2986, 2963, 2917, 2894, 2795, 2772, + 2726, 2703, 2645, 2622, 2576, 2553, 3197, 3116, 3058, 2977, 2881, 2800, 2742, + 2661, 2615, 2534, 2476, 2395, 2299, 2218, 2160, 2079, 2566, 2427, 2334, 2195, + 2023, 1884, 1791, 1652, 1893, 1696, 1453, 1256, 1229, 864, 512, 512, 512, + 512, 0, 512, 512, 512, 512, 864, 1229, 1256, 1453, 1696, 1893, 1652, + 1791, 1884, 2023, 2195, 2334, 2427, 2566, 2079, 2160, 2218, 2299, 2395, 2476, + 2534, 2615, 2661, 2742, 2800, 2881, 2977, 3058, 3116, 3197, 2553, 2576, 2622, + 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963, 2986, 3044, 3067, 3113, 3136, + 3190, 3213, 3259, 3282, 3340, 3363, 3409, 3432, 3531, 3554, 3600, 3623, 3681, + 3704, 3750, 3773, +}; +const int *vp9_dct_cat_lt_10_value_cost = + dct_cat_lt_10_value_cost + + (sizeof(dct_cat_lt_10_value_cost) / sizeof(*dct_cat_lt_10_value_cost)) / 2; // Array indices are identical to previously-existing CONTEXT_NODE indices +/* clang-format off */ const vpx_tree_index vp9_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = { -EOB_TOKEN, 2, // 0 = EOB -ZERO_TOKEN, 4, // 1 = ZERO @@ -65,244 +85,245 @@ const vpx_tree_index vp9_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = { -CATEGORY3_TOKEN, -CATEGORY4_TOKEN, // 9 = CAT_THREE -CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 10 = CAT_FIVE }; +/* clang-format on */ -static const int16_t zero_cost[] = {0}; -static const int16_t sign_cost[1] = {512}; -static const int16_t cat1_cost[1 << 1] = {864, 1229}; -static const int16_t cat2_cost[1 << 2] = {1256, 1453, 1696, 1893}; -static const int16_t cat3_cost[1 << 3] = {1652, 1791, 1884, 2023, - 2195, 2334, 2427, 2566}; -static const int16_t cat4_cost[1 << 4] = {2079, 2160, 2218, 2299, 2395, 2476, - 2534, 2615, 2661, 2742, 2800, 2881, - 2977, 3058, 3116, 3197}; +static const int16_t zero_cost[] = { 0 }; +static const int16_t sign_cost[1] = { 512 }; +static const int16_t cat1_cost[1 << 1] = { 864, 1229 }; +static const int16_t cat2_cost[1 << 2] = { 1256, 1453, 1696, 1893 }; +static const int16_t cat3_cost[1 << 3] = { 1652, 1791, 1884, 2023, + 2195, 2334, 2427, 2566 }; +static const int16_t cat4_cost[1 << 4] = { 2079, 2160, 2218, 2299, 2395, 2476, + 2534, 2615, 2661, 2742, 2800, 2881, + 2977, 3058, 3116, 3197 }; static const int16_t cat5_cost[1 << 5] = { - 2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963, - 2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363, - 3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773}; + 2553, 2576, 2622, 2645, 2703, 2726, 2772, 2795, 2894, 2917, 2963, + 2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363, + 3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773 +}; const int16_t vp9_cat6_low_cost[256] = { - 3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, - 3574, 3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, - 3810, 3822, 3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, - 4030, 4042, 4053, 4065, 4112, 4124, 4135, 4147, 4169, 4181, 4192, 4204, - 4266, 4278, 4289, 4301, 4323, 4335, 4346, 4358, 4405, 4417, 4428, 4440, - 4462, 4474, 4485, 4497, 4253, 4265, 4276, 4288, 4310, 4322, 4333, 4345, - 4392, 4404, 4415, 4427, 4449, 4461, 4472, 4484, 4546, 4558, 4569, 4581, - 4603, 4615, 4626, 4638, 4685, 4697, 4708, 4720, 4742, 4754, 4765, 4777, - 4848, 4860, 4871, 4883, 4905, 4917, 4928, 4940, 4987, 4999, 5010, 5022, - 5044, 5056, 5067, 5079, 5141, 5153, 5164, 5176, 5198, 5210, 5221, 5233, - 5280, 5292, 5303, 5315, 5337, 5349, 5360, 5372, 4988, 5000, 5011, 5023, - 5045, 5057, 5068, 5080, 5127, 5139, 5150, 5162, 5184, 5196, 5207, 5219, - 5281, 5293, 5304, 5316, 5338, 5350, 5361, 5373, 5420, 5432, 5443, 5455, - 5477, 5489, 5500, 5512, 5583, 5595, 5606, 5618, 5640, 5652, 5663, 5675, - 5722, 5734, 5745, 5757, 5779, 5791, 5802, 5814, 5876, 5888, 5899, 5911, - 5933, 5945, 5956, 5968, 6015, 6027, 6038, 6050, 6072, 6084, 6095, 6107, - 5863, 5875, 5886, 5898, 5920, 5932, 5943, 5955, 6002, 6014, 6025, 6037, - 6059, 6071, 6082, 6094, 6156, 6168, 6179, 6191, 6213, 6225, 6236, 6248, - 6295, 6307, 6318, 6330, 6352, 6364, 6375, 6387, 6458, 6470, 6481, 6493, - 6515, 6527, 6538, 6550, 6597, 6609, 6620, 6632, 6654, 6666, 6677, 6689, - 6751, 6763, 6774, 6786, 6808, 6820, 6831, 6843, 6890, 6902, 6913, 6925, - 6947, 6959, 6970, 6982}; + 3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574, + 3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822, + 3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053, + 4065, 4112, 4124, 4135, 4147, 4169, 4181, 4192, 4204, 4266, 4278, 4289, 4301, + 4323, 4335, 4346, 4358, 4405, 4417, 4428, 4440, 4462, 4474, 4485, 4497, 4253, + 4265, 4276, 4288, 4310, 4322, 4333, 4345, 4392, 4404, 4415, 4427, 4449, 4461, + 4472, 4484, 4546, 4558, 4569, 4581, 4603, 4615, 4626, 4638, 4685, 4697, 4708, + 4720, 4742, 4754, 4765, 4777, 4848, 4860, 4871, 4883, 4905, 4917, 4928, 4940, + 4987, 4999, 5010, 5022, 5044, 5056, 5067, 5079, 5141, 5153, 5164, 5176, 5198, + 5210, 5221, 5233, 5280, 5292, 5303, 5315, 5337, 5349, 5360, 5372, 4988, 5000, + 5011, 5023, 5045, 5057, 5068, 5080, 5127, 5139, 5150, 5162, 5184, 5196, 5207, + 5219, 5281, 5293, 5304, 5316, 5338, 5350, 5361, 5373, 5420, 5432, 5443, 5455, + 5477, 5489, 5500, 5512, 5583, 5595, 5606, 5618, 5640, 5652, 5663, 5675, 5722, + 5734, 5745, 5757, 5779, 5791, 5802, 5814, 5876, 5888, 5899, 5911, 5933, 5945, + 5956, 5968, 6015, 6027, 6038, 6050, 6072, 6084, 6095, 6107, 5863, 5875, 5886, + 5898, 5920, 5932, 5943, 5955, 6002, 6014, 6025, 6037, 6059, 6071, 6082, 6094, + 6156, 6168, 6179, 6191, 6213, 6225, 6236, 6248, 6295, 6307, 6318, 6330, 6352, + 6364, 6375, 6387, 6458, 6470, 6481, 6493, 6515, 6527, 6538, 6550, 6597, 6609, + 6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831, + 6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982 +}; const int vp9_cat6_high_cost[64] = { - 88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305, - 8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889, - 9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666, - 5829, 6305, 8468, 6726, 8889, 9365, 11528, 7244, 9407, 9883, 12046, - 10304, 12467, 12943, 15106, 7244, 9407, 9883, 12046, 10304, 12467, 12943, - 15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684}; + 88, 2251, 2727, 4890, 3148, 5311, 5787, 7950, 3666, 5829, 6305, + 8468, 6726, 8889, 9365, 11528, 3666, 5829, 6305, 8468, 6726, 8889, + 9365, 11528, 7244, 9407, 9883, 12046, 10304, 12467, 12943, 15106, 3666, + 5829, 6305, 8468, 6726, 8889, 9365, 11528, 7244, 9407, 9883, 12046, + 10304, 12467, 12943, 15106, 7244, 9407, 9883, 12046, 10304, 12467, 12943, + 15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684 +}; #if CONFIG_VP9_HIGHBITDEPTH const int vp9_cat6_high10_high_cost[256] = { - 94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311, - 8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895, - 9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672, - 5835, 6311, 8474, 6732, 8895, 9371, 11534, 7250, 9413, 9889, 12052, - 10310, 12473, 12949, 15112, 7250, 9413, 9889, 12052, 10310, 12473, 12949, - 15112, 10828, 12991, 13467, 15630, 13888, 16051, 16527, 18690, 4187, 6350, - 6826, 8989, 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, - 12988, 13464, 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, - 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, - 12567, 10825, 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, - 17042, 19205, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, - 17084, 17560, 19723, 17981, 20144, 20620, 22783, 4187, 6350, 6826, 8989, - 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, 12988, 13464, - 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, 11343, 13506, - 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, 12567, 10825, - 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, - 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, 17084, 17560, - 19723, 17981, 20144, 20620, 22783, 8280, 10443, 10919, 13082, 11340, 13503, - 13979, 16142, 11858, 14021, 14497, 16660, 14918, 17081, 17557, 19720, 11858, - 14021, 14497, 16660, 14918, 17081, 17557, 19720, 15436, 17599, 18075, 20238, - 18496, 20659, 21135, 23298, 11858, 14021, 14497, 16660, 14918, 17081, 17557, - 19720, 15436, 17599, 18075, 20238, 18496, 20659, 21135, 23298, 15436, 17599, - 18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074, - 24237, 24713, 26876}; + 94, 2257, 2733, 4896, 3154, 5317, 5793, 7956, 3672, 5835, 6311, + 8474, 6732, 8895, 9371, 11534, 3672, 5835, 6311, 8474, 6732, 8895, + 9371, 11534, 7250, 9413, 9889, 12052, 10310, 12473, 12949, 15112, 3672, + 5835, 6311, 8474, 6732, 8895, 9371, 11534, 7250, 9413, 9889, 12052, + 10310, 12473, 12949, 15112, 7250, 9413, 9889, 12052, 10310, 12473, 12949, + 15112, 10828, 12991, 13467, 15630, 13888, 16051, 16527, 18690, 4187, 6350, + 6826, 8989, 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, + 12988, 13464, 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, + 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, + 12567, 10825, 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, + 17042, 19205, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, + 17084, 17560, 19723, 17981, 20144, 20620, 22783, 4187, 6350, 6826, 8989, + 7247, 9410, 9886, 12049, 7765, 9928, 10404, 12567, 10825, 12988, 13464, + 15627, 7765, 9928, 10404, 12567, 10825, 12988, 13464, 15627, 11343, 13506, + 13982, 16145, 14403, 16566, 17042, 19205, 7765, 9928, 10404, 12567, 10825, + 12988, 13464, 15627, 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, + 11343, 13506, 13982, 16145, 14403, 16566, 17042, 19205, 14921, 17084, 17560, + 19723, 17981, 20144, 20620, 22783, 8280, 10443, 10919, 13082, 11340, 13503, + 13979, 16142, 11858, 14021, 14497, 16660, 14918, 17081, 17557, 19720, 11858, + 14021, 14497, 16660, 14918, 17081, 17557, 19720, 15436, 17599, 18075, 20238, + 18496, 20659, 21135, 23298, 11858, 14021, 14497, 16660, 14918, 17081, 17557, + 19720, 15436, 17599, 18075, 20238, 18496, 20659, 21135, 23298, 15436, 17599, + 18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074, + 24237, 24713, 26876 +}; const int vp9_cat6_high12_high_cost[1024] = { - 100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317, - 8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901, - 9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678, - 5841, 6317, 8480, 6738, 8901, 9377, 11540, 7256, 9419, 9895, 12058, - 10316, 12479, 12955, 15118, 7256, 9419, 9895, 12058, 10316, 12479, 12955, - 15118, 10834, 12997, 13473, 15636, 13894, 16057, 16533, 18696, 4193, 6356, - 6832, 8995, 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, - 12994, 13470, 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, - 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, - 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, - 17048, 19211, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, - 17090, 17566, 19729, 17987, 20150, 20626, 22789, 4193, 6356, 6832, 8995, - 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, - 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, - 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, - 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, - 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, - 19729, 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, - 13985, 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, - 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, - 18502, 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, - 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, - 18081, 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, - 24243, 24719, 26882, 4193, 6356, 6832, 8995, 7253, 9416, 9892, 12055, - 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 7771, 9934, 10410, - 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, - 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, - 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, 13512, 13988, 16151, - 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, 17987, 20150, 20626, - 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, - 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, - 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, - 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, - 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, - 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 8286, - 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, - 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, - 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, - 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, - 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, - 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, - 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, - 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, - 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, - 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, - 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, - 25752, 27915, 26173, 28336, 28812, 30975, 4193, 6356, 6832, 8995, 7253, - 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, - 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, - 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, - 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, - 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, - 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, - 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, - 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, - 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, - 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, - 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, - 24719, 26882, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, - 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, - 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, - 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, - 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, - 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, - 12379, 14542, 15018, 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, - 20759, 19017, 21180, 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, - 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, - 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, - 22595, 24758, 25234, 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, - 27397, 23113, 25276, 25752, 27915, 26173, 28336, 28812, 30975, 8286, 10449, - 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, 14924, - 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, - 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, 14503, - 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, - 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 19020, - 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, 17181, - 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, - 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, - 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, - 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, - 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, - 27915, 26173, 28336, 28812, 30975, 12379, 14542, 15018, 17181, 15439, 17602, - 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 15957, - 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, - 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, 21180, 21656, - 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 19535, 21698, - 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, 27915, 26173, - 28336, 28812, 30975, 16472, 18635, 19111, 21274, 19532, 21695, 22171, 24334, - 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 20050, 22213, 22689, - 24852, 23110, 25273, 25749, 27912, 23628, 25791, 26267, 28430, 26688, 28851, - 29327, 31490, 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 23628, - 25791, 26267, 28430, 26688, 28851, 29327, 31490, 23628, 25791, 26267, 28430, - 26688, 28851, 29327, 31490, 27206, 29369, 29845, 32008, 30266, 32429, 32905, - 35068}; + 100, 2263, 2739, 4902, 3160, 5323, 5799, 7962, 3678, 5841, 6317, + 8480, 6738, 8901, 9377, 11540, 3678, 5841, 6317, 8480, 6738, 8901, + 9377, 11540, 7256, 9419, 9895, 12058, 10316, 12479, 12955, 15118, 3678, + 5841, 6317, 8480, 6738, 8901, 9377, 11540, 7256, 9419, 9895, 12058, + 10316, 12479, 12955, 15118, 7256, 9419, 9895, 12058, 10316, 12479, 12955, + 15118, 10834, 12997, 13473, 15636, 13894, 16057, 16533, 18696, 4193, 6356, + 6832, 8995, 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, + 12994, 13470, 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, + 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, + 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, + 17048, 19211, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, + 17090, 17566, 19729, 17987, 20150, 20626, 22789, 4193, 6356, 6832, 8995, + 7253, 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, + 15633, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, + 13988, 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, + 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, + 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, + 19729, 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, + 13985, 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, + 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, + 18502, 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, + 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, + 18081, 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, + 24243, 24719, 26882, 4193, 6356, 6832, 8995, 7253, 9416, 9892, 12055, + 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 7771, 9934, 10410, + 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, + 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, + 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, 13512, 13988, 16151, + 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, 17987, 20150, 20626, + 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, + 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, + 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, + 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, + 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, + 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 8286, + 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, + 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, + 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, + 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, + 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, + 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, + 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, + 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, + 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, + 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, + 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, + 25752, 27915, 26173, 28336, 28812, 30975, 4193, 6356, 6832, 8995, 7253, + 9416, 9892, 12055, 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, + 7771, 9934, 10410, 12573, 10831, 12994, 13470, 15633, 11349, 13512, 13988, + 16151, 14409, 16572, 17048, 19211, 7771, 9934, 10410, 12573, 10831, 12994, + 13470, 15633, 11349, 13512, 13988, 16151, 14409, 16572, 17048, 19211, 11349, + 13512, 13988, 16151, 14409, 16572, 17048, 19211, 14927, 17090, 17566, 19729, + 17987, 20150, 20626, 22789, 8286, 10449, 10925, 13088, 11346, 13509, 13985, + 16148, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, + 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, + 20665, 21141, 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, + 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, + 20244, 18502, 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, + 24719, 26882, 8286, 10449, 10925, 13088, 11346, 13509, 13985, 16148, 11864, + 14027, 14503, 16666, 14924, 17087, 17563, 19726, 11864, 14027, 14503, 16666, + 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, 21141, + 23304, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, 15442, 17605, + 18081, 20244, 18502, 20665, 21141, 23304, 15442, 17605, 18081, 20244, 18502, + 20665, 21141, 23304, 19020, 21183, 21659, 23822, 22080, 24243, 24719, 26882, + 12379, 14542, 15018, 17181, 15439, 17602, 18078, 20241, 15957, 18120, 18596, + 20759, 19017, 21180, 21656, 23819, 15957, 18120, 18596, 20759, 19017, 21180, + 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 15957, + 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, + 22595, 24758, 25234, 27397, 19535, 21698, 22174, 24337, 22595, 24758, 25234, + 27397, 23113, 25276, 25752, 27915, 26173, 28336, 28812, 30975, 8286, 10449, + 10925, 13088, 11346, 13509, 13985, 16148, 11864, 14027, 14503, 16666, 14924, + 17087, 17563, 19726, 11864, 14027, 14503, 16666, 14924, 17087, 17563, 19726, + 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 11864, 14027, 14503, + 16666, 14924, 17087, 17563, 19726, 15442, 17605, 18081, 20244, 18502, 20665, + 21141, 23304, 15442, 17605, 18081, 20244, 18502, 20665, 21141, 23304, 19020, + 21183, 21659, 23822, 22080, 24243, 24719, 26882, 12379, 14542, 15018, 17181, + 15439, 17602, 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, + 23819, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, + 22174, 24337, 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, + 21180, 21656, 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, + 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, + 27915, 26173, 28336, 28812, 30975, 12379, 14542, 15018, 17181, 15439, 17602, + 18078, 20241, 15957, 18120, 18596, 20759, 19017, 21180, 21656, 23819, 15957, + 18120, 18596, 20759, 19017, 21180, 21656, 23819, 19535, 21698, 22174, 24337, + 22595, 24758, 25234, 27397, 15957, 18120, 18596, 20759, 19017, 21180, 21656, + 23819, 19535, 21698, 22174, 24337, 22595, 24758, 25234, 27397, 19535, 21698, + 22174, 24337, 22595, 24758, 25234, 27397, 23113, 25276, 25752, 27915, 26173, + 28336, 28812, 30975, 16472, 18635, 19111, 21274, 19532, 21695, 22171, 24334, + 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 20050, 22213, 22689, + 24852, 23110, 25273, 25749, 27912, 23628, 25791, 26267, 28430, 26688, 28851, + 29327, 31490, 20050, 22213, 22689, 24852, 23110, 25273, 25749, 27912, 23628, + 25791, 26267, 28430, 26688, 28851, 29327, 31490, 23628, 25791, 26267, 28430, + 26688, 28851, 29327, 31490, 27206, 29369, 29845, 32008, 30266, 32429, 32905, + 35068 +}; #endif const vp9_extra_bit vp9_extra_bits[ENTROPY_TOKENS] = { - {0, 0, 0, zero_cost}, // ZERO_TOKEN - {0, 0, 1, sign_cost}, // ONE_TOKEN - {0, 0, 2, sign_cost}, // TWO_TOKEN - {0, 0, 3, sign_cost}, // THREE_TOKEN - {0, 0, 4, sign_cost}, // FOUR_TOKEN - {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CATEGORY1_TOKEN - {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CATEGORY2_TOKEN - {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CATEGORY3_TOKEN - {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CATEGORY4_TOKEN - {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CATEGORY5_TOKEN - {vp9_cat6_prob, 14, CAT6_MIN_VAL, 0}, // CATEGORY6_TOKEN - {0, 0, 0, zero_cost} // EOB_TOKEN + { 0, 0, 0, zero_cost }, // ZERO_TOKEN + { 0, 0, 1, sign_cost }, // ONE_TOKEN + { 0, 0, 2, sign_cost }, // TWO_TOKEN + { 0, 0, 3, sign_cost }, // THREE_TOKEN + { 0, 0, 4, sign_cost }, // FOUR_TOKEN + { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CATEGORY1_TOKEN + { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CATEGORY2_TOKEN + { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CATEGORY3_TOKEN + { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CATEGORY4_TOKEN + { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CATEGORY5_TOKEN + { vp9_cat6_prob, 14, CAT6_MIN_VAL, 0 }, // CATEGORY6_TOKEN + { 0, 0, 0, zero_cost } // EOB_TOKEN }; #if CONFIG_VP9_HIGHBITDEPTH const vp9_extra_bit vp9_extra_bits_high10[ENTROPY_TOKENS] = { - {0, 0, 0, zero_cost}, // ZERO - {0, 0, 1, sign_cost}, // ONE - {0, 0, 2, sign_cost}, // TWO - {0, 0, 3, sign_cost}, // THREE - {0, 0, 4, sign_cost}, // FOUR - {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1 - {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2 - {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3 - {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4 - {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5 - {vp9_cat6_prob_high12 + 2, 16, CAT6_MIN_VAL, 0}, // CAT6 - {0, 0, 0, zero_cost} // EOB + { 0, 0, 0, zero_cost }, // ZERO + { 0, 0, 1, sign_cost }, // ONE + { 0, 0, 2, sign_cost }, // TWO + { 0, 0, 3, sign_cost }, // THREE + { 0, 0, 4, sign_cost }, // FOUR + { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1 + { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2 + { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3 + { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4 + { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5 + { vp9_cat6_prob_high12 + 2, 16, CAT6_MIN_VAL, 0 }, // CAT6 + { 0, 0, 0, zero_cost } // EOB }; const vp9_extra_bit vp9_extra_bits_high12[ENTROPY_TOKENS] = { - {0, 0, 0, zero_cost}, // ZERO - {0, 0, 1, sign_cost}, // ONE - {0, 0, 2, sign_cost}, // TWO - {0, 0, 3, sign_cost}, // THREE - {0, 0, 4, sign_cost}, // FOUR - {vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1 - {vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2 - {vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3 - {vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4 - {vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5 - {vp9_cat6_prob_high12, 18, CAT6_MIN_VAL, 0}, // CAT6 - {0, 0, 0, zero_cost} // EOB + { 0, 0, 0, zero_cost }, // ZERO + { 0, 0, 1, sign_cost }, // ONE + { 0, 0, 2, sign_cost }, // TWO + { 0, 0, 3, sign_cost }, // THREE + { 0, 0, 4, sign_cost }, // FOUR + { vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost }, // CAT1 + { vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost }, // CAT2 + { vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost }, // CAT3 + { vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost }, // CAT4 + { vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost }, // CAT5 + { vp9_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 }, // CAT6 + { 0, 0, 0, zero_cost } // EOB }; #endif const struct vp9_token vp9_coef_encodings[ENTROPY_TOKENS] = { - {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7}, - {125, 7}, {126, 7}, {127, 7}, {0, 1} + { 2, 2 }, { 6, 3 }, { 28, 5 }, { 58, 6 }, { 59, 6 }, { 60, 6 }, + { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 } }; - struct tokenize_b_args { VP9_COMP *cpi; ThreadData *td; TOKENEXTRA **tp; }; -static void set_entropy_context_b(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct tokenize_b_args* const args = arg; +static void set_entropy_context_b(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, + void *arg) { + struct tokenize_b_args *const args = arg; ThreadData *const td = args->td; MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; struct macroblock_plane *p = &x->plane[plane]; struct macroblockd_plane *pd = &xd->plane[plane]; - int aoff, loff; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); - vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, - aoff, loff); + vp9_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, col, row); } static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree, @@ -317,23 +338,16 @@ static INLINE void add_token(TOKENEXTRA **t, const vpx_prob *context_tree, static INLINE void add_token_no_extra(TOKENEXTRA **t, const vpx_prob *context_tree, - int16_t token, - unsigned int *counts) { + int16_t token, unsigned int *counts) { (*t)->context_tree = context_tree; (*t)->token = token; (*t)++; ++counts[token]; } -static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id, - TX_SIZE tx_size) { - const int eob_max = 16 << (tx_size << 1); - return segfeature_active(seg, segment_id, SEG_LVL_SKIP) ? 0 : eob_max; -} - -static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, - TX_SIZE tx_size, void *arg) { - struct tokenize_b_args* const args = arg; +static void tokenize_b(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) { + struct tokenize_b_args *const args = arg; VP9_COMP *cpi = args->cpi; ThreadData *const td = args->td; MACROBLOCK *const x = &td->mb; @@ -345,29 +359,25 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, MODE_INFO *mi = xd->mi[0]; int pt; /* near block/prev token context index */ int c; - TOKENEXTRA *t = *tp; /* store tokens starting here */ + TOKENEXTRA *t = *tp; /* store tokens starting here */ int eob = p->eobs[block]; const PLANE_TYPE type = get_plane_type(plane); const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); - const int segment_id = mi->segment_id; const int16_t *scan, *nb; const scan_order *so; const int ref = is_inter_block(mi); - unsigned int (*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = + unsigned int(*const counts)[COEFF_CONTEXTS][ENTROPY_TOKENS] = td->rd_counts.coef_counts[tx_size][type][ref]; - vpx_prob (*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = + vpx_prob(*const coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = cpi->common.fc->coef_probs[tx_size][type][ref]; - unsigned int (*const eob_branch)[COEFF_CONTEXTS] = + unsigned int(*const eob_branch)[COEFF_CONTEXTS] = td->counts->eob_branch[tx_size][type][ref]; const uint8_t *const band = get_band_translate(tx_size); - const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size); + const int tx_eob = 16 << (tx_size << 1); int16_t token; EXTRABIT extra; - int aoff, loff; - txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff); - - pt = get_entropy_context(tx_size, pd->above_context + aoff, - pd->left_context + loff); + pt = get_entropy_context(tx_size, pd->above_context + col, + pd->left_context + row); so = get_scan(xd, tx_size, type, block); scan = so->scan; nb = so->neighbors; @@ -390,14 +400,13 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, vp9_get_token_extra(v, &token, &extra); - add_token(&t, coef_probs[band[c]][pt], token, extra, - counts[band[c]][pt]); + add_token(&t, coef_probs[band[c]][pt], token, extra, counts[band[c]][pt]); token_cache[scan[c]] = vp9_pt_energy_class[token]; ++c; pt = get_coef_context(nb, token_cache, c); } - if (c < seg_eob) { + if (c < tx_eob) { ++eob_branch[band[c]][pt]; add_token_no_extra(&t, coef_probs[band[c]][pt], EOB_TOKEN, counts[band[c]][pt]); @@ -405,20 +414,22 @@ static void tokenize_b(int plane, int block, BLOCK_SIZE plane_bsize, *tp = t; - vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, aoff, loff); + vp9_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, col, row); } struct is_skippable_args { uint16_t *eobs; int *skippable; }; -static void is_skippable(int plane, int block, - BLOCK_SIZE plane_bsize, TX_SIZE tx_size, - void *argv) { + +static void is_skippable(int plane, int block, int row, int col, + BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) { struct is_skippable_args *args = argv; (void)plane; (void)plane_bsize; (void)tx_size; + (void)row; + (void)col; args->skippable[0] &= (!args->eobs[block]); } @@ -426,50 +437,52 @@ static void is_skippable(int plane, int block, // vp9_foreach_transform_block() and simplify is_skippable(). int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { int result = 1; - struct is_skippable_args args = {x->plane[plane].eobs, &result}; + struct is_skippable_args args = { x->plane[plane].eobs, &result }; vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable, &args); return result; } -static void has_high_freq_coeff(int plane, int block, +static void has_high_freq_coeff(int plane, int block, int row, int col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *argv) { struct is_skippable_args *args = argv; int eobs = (tx_size == TX_4X4) ? 3 : 10; - (void) plane; - (void) plane_bsize; - + (void)plane; + (void)plane_bsize; + (void)row; + (void)col; *(args->skippable) |= (args->eobs[block] > eobs); } int vp9_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) { int result = 0; - struct is_skippable_args args = {x->plane[plane].eobs, &result}; + struct is_skippable_args args = { x->plane[plane].eobs, &result }; vp9_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, has_high_freq_coeff, &args); return result; } -void vp9_tokenize_sb(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t, - int dry_run, BLOCK_SIZE bsize) { - VP9_COMMON *const cm = &cpi->common; +void vp9_tokenize_sb(VP9_COMP *cpi, ThreadData *td, TOKENEXTRA **t, int dry_run, + int seg_skip, BLOCK_SIZE bsize) { MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *const mi = xd->mi[0]; const int ctx = vp9_get_skip_context(xd); - const int skip_inc = !segfeature_active(&cm->seg, mi->segment_id, - SEG_LVL_SKIP); - struct tokenize_b_args arg = {cpi, td, t}; + struct tokenize_b_args arg = { cpi, td, t }; + + if (seg_skip) { + assert(mi->skip); + } + if (mi->skip) { - if (!dry_run) - td->counts->skip[ctx][1] += skip_inc; + if (!dry_run && !seg_skip) ++td->counts->skip[ctx][1]; reset_skip_context(xd, bsize); return; } if (!dry_run) { - td->counts->skip[ctx][0] += skip_inc; + ++td->counts->skip[ctx][0]; vp9_foreach_transformed_block(xd, bsize, tokenize_b, &arg); } else { vp9_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg); diff --git a/libs/libvpx/vp9/encoder/vp9_tokenize.h b/libs/libvpx/vp9/encoder/vp9_tokenize.h index df979b25dd..c905715d7d 100644 --- a/libs/libvpx/vp9/encoder/vp9_tokenize.h +++ b/libs/libvpx/vp9/encoder/vp9_tokenize.h @@ -20,15 +20,14 @@ extern "C" { #endif -#define EOSB_TOKEN 127 // Not signalled, encoder only +#define EOSB_TOKEN 127 // Not signalled, encoder only #if CONFIG_VP9_HIGHBITDEPTH - typedef int32_t EXTRABIT; +typedef int32_t EXTRABIT; #else - typedef int16_t EXTRABIT; +typedef int16_t EXTRABIT; #endif - typedef struct { int16_t token; EXTRABIT extra; @@ -51,7 +50,8 @@ struct VP9_COMP; struct ThreadData; void vp9_tokenize_sb(struct VP9_COMP *cpi, struct ThreadData *td, - TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize); + TOKENEXTRA **t, int dry_run, int seg_skip, + BLOCK_SIZE bsize); typedef struct { const vpx_prob *prob; @@ -74,6 +74,7 @@ extern const int16_t *vp9_dct_value_cost_ptr; */ extern const TOKENVALUE *vp9_dct_value_tokens_ptr; extern const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens; +extern const int *vp9_dct_cat_lt_10_value_cost; extern const int16_t vp9_cat6_low_cost[256]; extern const int vp9_cat6_high_cost[64]; extern const int vp9_cat6_high10_high_cost[256]; @@ -82,19 +83,19 @@ static INLINE int vp9_get_cost(int16_t token, EXTRABIT extrabits, const int *cat6_high_table) { if (token != CATEGORY6_TOKEN) return vp9_extra_bits[token].cost[extrabits >> 1]; - return vp9_cat6_low_cost[(extrabits >> 1) & 0xff] - + cat6_high_table[extrabits >> 9]; + return vp9_cat6_low_cost[(extrabits >> 1) & 0xff] + + cat6_high_table[extrabits >> 9]; } #if CONFIG_VP9_HIGHBITDEPTH -static INLINE const int* vp9_get_high_cost_table(int bit_depth) { +static INLINE const int *vp9_get_high_cost_table(int bit_depth) { return bit_depth == 8 ? vp9_cat6_high_cost - : (bit_depth == 10 ? vp9_cat6_high10_high_cost : - vp9_cat6_high12_high_cost); + : (bit_depth == 10 ? vp9_cat6_high10_high_cost + : vp9_cat6_high12_high_cost); } #else -static INLINE const int* vp9_get_high_cost_table(int bit_depth) { - (void) bit_depth; +static INLINE const int *vp9_get_high_cost_table(int bit_depth) { + (void)bit_depth; return vp9_cat6_high_cost; } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -112,11 +113,22 @@ static INLINE void vp9_get_token_extra(int v, int16_t *token, EXTRABIT *extra) { *extra = vp9_dct_cat_lt_10_value_tokens[v].extra; } static INLINE int16_t vp9_get_token(int v) { - if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) - return 10; + if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10; return vp9_dct_cat_lt_10_value_tokens[v].token; } +static INLINE int vp9_get_token_cost(int v, int16_t *token, + const int *cat6_high_table) { + if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) { + EXTRABIT extrabits; + *token = CATEGORY6_TOKEN; + extrabits = abs(v) - CAT6_MIN_VAL; + return vp9_cat6_low_cost[extrabits & 0xff] + + cat6_high_table[extrabits >> 8]; + } + *token = vp9_dct_cat_lt_10_value_tokens[v].token; + return vp9_dct_cat_lt_10_value_cost[v]; +} #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vp9/encoder/vp9_treewriter.h b/libs/libvpx/vp9/encoder/vp9_treewriter.h index 0f89350763..a8b9c2cd31 100644 --- a/libs/libvpx/vp9/encoder/vp9_treewriter.h +++ b/libs/libvpx/vp9/encoder/vp9_treewriter.h @@ -18,15 +18,15 @@ extern "C" { #endif void vp9_tree_probs_from_distribution(vpx_tree tree, - unsigned int branch_ct[ /* n - 1 */ ][2], - const unsigned int num_events[ /* n */ ]); + unsigned int branch_ct[/* n - 1 */][2], + const unsigned int num_events[/* n */]); struct vp9_token { int value; int len; }; -void vp9_tokens_from_tree(struct vp9_token*, const vpx_tree_index *); +void vp9_tokens_from_tree(struct vp9_token *, const vpx_tree_index *); static INLINE void vp9_write_tree(vpx_writer *w, const vpx_tree_index *tree, const vpx_prob *probs, int bits, int len, diff --git a/libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.c b/libs/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c similarity index 95% rename from libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.c rename to libs/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c index fa37b6fed1..0712779b75 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_dct_intrin_sse2.c @@ -78,8 +78,8 @@ static void fdct4_sse2(__m128i *in) { const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); __m128i u[4], v[4]; - u[0]=_mm_unpacklo_epi16(in[0], in[1]); - u[1]=_mm_unpacklo_epi16(in[3], in[2]); + u[0] = _mm_unpacklo_epi16(in[0], in[1]); + u[1] = _mm_unpacklo_epi16(in[3], in[2]); v[0] = _mm_add_epi16(u[0], u[1]); v[1] = _mm_sub_epi16(u[0], u[1]); @@ -151,14 +151,12 @@ static void fadst4_sse2(__m128i *in) { transpose_4x4(in); } -void vp9_fht4x4_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { __m128i in[4]; switch (tx_type) { - case DCT_DCT: - vpx_fdct4x4_sse2(input, output, stride); - break; + case DCT_DCT: vpx_fdct4x4_sse2(input, output, stride); break; case ADST_DCT: load_buffer_4x4(input, in, stride); fadst4_sse2(in); @@ -177,21 +175,18 @@ void vp9_fht4x4_sse2(const int16_t *input, tran_low_t *output, fadst4_sse2(in); write_buffer_4x4(output, in); break; - default: - assert(0); - break; + default: assert(0); break; } } void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { + int16_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, + int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan_ptr, + const int16_t *iscan_ptr) { __m128i zero; int pass; // Constants @@ -208,14 +203,14 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); __m128i *in[8]; int index = 0; @@ -469,9 +464,9 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, // Setup global values { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); + round = _mm_load_si128((const __m128i *)round_ptr); + quant = _mm_load_si128((const __m128i *)quant_ptr); + dequant = _mm_load_si128((const __m128i *)dequant_ptr); } { @@ -503,15 +498,15 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); dequant = _mm_unpackhi_epi64(dequant, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } { @@ -524,8 +519,8 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -568,14 +563,14 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } { @@ -588,8 +583,8 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -615,10 +610,10 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, } } else { do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero); n_coeffs += 8 * 2; } while (n_coeffs < 0); *eob_ptr = 0; @@ -628,14 +623,14 @@ void vp9_fdct8x8_quant_sse2(const int16_t *input, int stride, // load 8x8 array static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in, int stride) { - in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); - in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); - in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride)); - in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride)); - in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride)); + in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); + in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); + in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride)); + in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride)); + in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride)); + in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride)); + in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride)); + in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride)); in[0] = _mm_slli_epi16(in[0], 2); in[1] = _mm_slli_epi16(in[1], 2); @@ -930,14 +925,14 @@ static void fadst8_sse2(__m128i *in) { __m128i in0, in1, in2, in3, in4, in5, in6, in7; // properly aligned for butterfly input - in0 = in[7]; - in1 = in[0]; - in2 = in[5]; - in3 = in[2]; - in4 = in[3]; - in5 = in[4]; - in6 = in[1]; - in7 = in[6]; + in0 = in[7]; + in1 = in[0]; + in2 = in[5]; + in3 = in[2]; + in4 = in[3]; + in5 = in[4]; + in6 = in[1]; + in7 = in[6]; // column transformation // stage 1 @@ -1135,14 +1130,12 @@ static void fadst8_sse2(__m128i *in) { array_transpose_8x8(in, in); } -void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { __m128i in[8]; switch (tx_type) { - case DCT_DCT: - vpx_fdct8x8_sse2(input, output, stride); - break; + case DCT_DCT: vpx_fdct8x8_sse2(input, output, stride); break; case ADST_DCT: load_buffer_8x8(input, in, stride); fadst8_sse2(in); @@ -1164,13 +1157,11 @@ void vp9_fht8x8_sse2(const int16_t *input, tran_low_t *output, right_shift_8x8(in, 1); write_buffer_8x8(output, in, 8); break; - default: - assert(0); - break; + default: assert(0); break; } } -static INLINE void load_buffer_16x16(const int16_t* input, __m128i *in0, +static INLINE void load_buffer_16x16(const int16_t *input, __m128i *in0, __m128i *in1, int stride) { // load first 8 columns load_buffer_8x8(input, in0, stride); @@ -1530,13 +1521,13 @@ static void fdct16_8col(__m128i *in) { v[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); v[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - in[1] = _mm_packs_epi32(v[0], v[1]); - in[9] = _mm_packs_epi32(v[2], v[3]); - in[5] = _mm_packs_epi32(v[4], v[5]); + in[1] = _mm_packs_epi32(v[0], v[1]); + in[9] = _mm_packs_epi32(v[2], v[3]); + in[5] = _mm_packs_epi32(v[4], v[5]); in[13] = _mm_packs_epi32(v[6], v[7]); - in[3] = _mm_packs_epi32(v[8], v[9]); + in[3] = _mm_packs_epi32(v[8], v[9]); in[11] = _mm_packs_epi32(v[10], v[11]); - in[7] = _mm_packs_epi32(v[12], v[13]); + in[7] = _mm_packs_epi32(v[12], v[13]); in[15] = _mm_packs_epi32(v[14], v[15]); } @@ -2022,14 +2013,12 @@ static void fadst16_sse2(__m128i *in0, __m128i *in1) { array_transpose_16x16(in0, in1); } -void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, - int stride, int tx_type) { +void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride, + int tx_type) { __m128i in0[16], in1[16]; switch (tx_type) { - case DCT_DCT: - vpx_fdct16x16_sse2(input, output, stride); - break; + case DCT_DCT: vpx_fdct16x16_sse2(input, output, stride); break; case ADST_DCT: load_buffer_16x16(input, in0, in1, stride); fadst16_sse2(in0, in1); @@ -2051,8 +2040,6 @@ void vp9_fht16x16_sse2(const int16_t *input, tran_low_t *output, fadst16_sse2(in0, in1); write_buffer_16x16(output, in0, in1, 16); break; - default: - assert(0); - break; + default: assert(0); break; } } diff --git a/libs/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm b/libs/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm deleted file mode 100644 index 7a7a6b6555..0000000000 --- a/libs/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm +++ /dev/null @@ -1,104 +0,0 @@ -; -; Copyright (c) 2014 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%define private_prefix vp9 - -%include "third_party/x86inc/x86inc.asm" - -SECTION .text - -%macro TRANSFORM_COLS 0 - paddw m0, m1 - movq m4, m0 - psubw m3, m2 - psubw m4, m3 - psraw m4, 1 - movq m5, m4 - psubw m5, m1 ;b1 - psubw m4, m2 ;c1 - psubw m0, m4 - paddw m3, m5 - ; m0 a0 - SWAP 1, 4 ; m1 c1 - SWAP 2, 3 ; m2 d1 - SWAP 3, 5 ; m3 b1 -%endmacro - -%macro TRANSPOSE_4X4 0 - movq m4, m0 - movq m5, m2 - punpcklwd m4, m1 - punpckhwd m0, m1 - punpcklwd m5, m3 - punpckhwd m2, m3 - movq m1, m4 - movq m3, m0 - punpckldq m1, m5 - punpckhdq m4, m5 - punpckldq m3, m2 - punpckhdq m0, m2 - SWAP 2, 3, 0, 1, 4 -%endmacro - -INIT_MMX mmx -cglobal fwht4x4, 3, 4, 8, input, output, stride - lea r3q, [inputq + strideq*4] - movq m0, [inputq] ;a1 - movq m1, [inputq + strideq*2] ;b1 - movq m2, [r3q] ;c1 - movq m3, [r3q + strideq*2] ;d1 - - TRANSFORM_COLS - TRANSPOSE_4X4 - TRANSFORM_COLS - TRANSPOSE_4X4 - - psllw m0, 2 - psllw m1, 2 - psllw m2, 2 - psllw m3, 2 - -%if CONFIG_VP9_HIGHBITDEPTH - pxor m4, m4 - pxor m5, m5 - pcmpgtw m4, m0 - pcmpgtw m5, m1 - movq m6, m0 - movq m7, m1 - punpcklwd m0, m4 - punpcklwd m1, m5 - punpckhwd m6, m4 - punpckhwd m7, m5 - movq [outputq], m0 - movq [outputq + 8], m6 - movq [outputq + 16], m1 - movq [outputq + 24], m7 - pxor m4, m4 - pxor m5, m5 - pcmpgtw m4, m2 - pcmpgtw m5, m3 - movq m6, m2 - movq m7, m3 - punpcklwd m2, m4 - punpcklwd m3, m5 - punpckhwd m6, m4 - punpckhwd m7, m5 - movq [outputq + 32], m2 - movq [outputq + 40], m6 - movq [outputq + 48], m3 - movq [outputq + 56], m7 -%else - movq [outputq], m0 - movq [outputq + 8], m1 - movq [outputq + 16], m2 - movq [outputq + 24], m3 -%endif - - RET diff --git a/libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.asm b/libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.asm new file mode 100644 index 0000000000..ced37bd16e --- /dev/null +++ b/libs/libvpx/vp9/encoder/x86/vp9_dct_sse2.asm @@ -0,0 +1,86 @@ +; +; Copyright (c) 2016 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + +%define private_prefix vp9 + +%include "third_party/x86inc/x86inc.asm" + +SECTION .text + +%macro TRANSFORM_COLS 0 + paddw m0, m1 + movq m4, m0 + psubw m3, m2 + psubw m4, m3 + psraw m4, 1 + movq m5, m4 + psubw m5, m1 ;b1 + psubw m4, m2 ;c1 + psubw m0, m4 + paddw m3, m5 + ; m0 a0 + SWAP 1, 4 ; m1 c1 + SWAP 2, 3 ; m2 d1 + SWAP 3, 5 ; m3 b1 +%endmacro + +%macro TRANSPOSE_4X4 0 + ; 00 01 02 03 + ; 10 11 12 13 + ; 20 21 22 23 + ; 30 31 32 33 + punpcklwd m0, m1 ; 00 10 01 11 02 12 03 13 + punpcklwd m2, m3 ; 20 30 21 31 22 32 23 33 + mova m1, m0 + punpckldq m0, m2 ; 00 10 20 30 01 11 21 31 + punpckhdq m1, m2 ; 02 12 22 32 03 13 23 33 +%endmacro + +INIT_XMM sse2 +cglobal fwht4x4, 3, 4, 8, input, output, stride + lea r3q, [inputq + strideq*4] + movq m0, [inputq] ;a1 + movq m1, [inputq + strideq*2] ;b1 + movq m2, [r3q] ;c1 + movq m3, [r3q + strideq*2] ;d1 + + TRANSFORM_COLS + TRANSPOSE_4X4 + SWAP 1, 2 + psrldq m1, m0, 8 + psrldq m3, m2, 8 + TRANSFORM_COLS + TRANSPOSE_4X4 + + psllw m0, 2 + psllw m1, 2 + +%if CONFIG_VP9_HIGHBITDEPTH + ; sign extension + mova m2, m0 + mova m3, m1 + punpcklwd m0, m0 + punpcklwd m1, m1 + punpckhwd m2, m2 + punpckhwd m3, m3 + psrad m0, 16 + psrad m1, 16 + psrad m2, 16 + psrad m3, 16 + mova [outputq], m0 + mova [outputq + 16], m2 + mova [outputq + 32], m1 + mova [outputq + 48], m3 +%else + mova [outputq], m0 + mova [outputq + 16], m1 +%endif + + RET diff --git a/libs/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c b/libs/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c index b09eac0d1a..fb2a925417 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c @@ -9,27 +9,18 @@ */ #include -#if defined(_MSC_VER) && _MSC_VER <= 1500 -// Need to include math.h before calling tmmintrin.h/intrin.h -// in certain versions of MSVS. -#include -#endif #include // SSSE3 #include "./vp9_rtcd.h" #include "vpx_dsp/x86/inv_txfm_sse2.h" #include "vpx_dsp/x86/txfm_common_sse2.h" -void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, - int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, - int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { +void vp9_fdct8x8_quant_ssse3( + const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, + const int16_t *quant_ptr, const int16_t *quant_shift_ptr, + int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan_ptr, const int16_t *iscan_ptr) { __m128i zero; int pass; // Constants @@ -47,14 +38,14 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); __m128i *in[8]; int index = 0; @@ -303,9 +294,9 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, // Setup global values { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); + round = _mm_load_si128((const __m128i *)round_ptr); + quant = _mm_load_si128((const __m128i *)quant_ptr); + dequant = _mm_load_si128((const __m128i *)dequant_ptr); } { @@ -337,15 +328,15 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); dequant = _mm_unpackhi_epi64(dequant, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } { @@ -358,8 +349,8 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -393,7 +384,7 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) | - _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); + _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); if (nzflag) { qcoeff0 = _mm_adds_epi16(qcoeff0, round); @@ -407,20 +398,20 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } else { - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero); } } @@ -434,8 +425,8 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -461,10 +452,10 @@ void vp9_fdct8x8_quant_ssse3(const int16_t *input, int stride, } } else { do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero); n_coeffs += 8 * 2; } while (n_coeffs < 0); *eob_ptr = 0; diff --git a/libs/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c b/libs/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c index bf7c7af770..91d0602f9d 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c @@ -37,17 +37,11 @@ static INLINE int sum_diff_16x1(__m128i acc_diff) { } // Denoise a 16x1 vector. -static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, - const uint8_t *mc_running_avg_y, - uint8_t *running_avg_y, - const __m128i *k_0, - const __m128i *k_4, - const __m128i *k_8, - const __m128i *k_16, - const __m128i *l3, - const __m128i *l32, - const __m128i *l21, - __m128i acc_diff) { +static INLINE __m128i vp9_denoiser_16x1_sse2( + const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y, + const __m128i *k_0, const __m128i *k_4, const __m128i *k_8, + const __m128i *k_16, const __m128i *l3, const __m128i *l32, + const __m128i *l21, __m128i acc_diff) { // Calculate differences const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); const __m128i v_mc_running_avg_y = @@ -69,7 +63,7 @@ static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, __m128i adj2 = _mm_and_si128(mask2, *l32); const __m128i adj1 = _mm_and_si128(mask1, *l21); const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff); - __m128i adj, padj, nadj; + __m128i adj, padj, nadj; // Combine the adjustments and get absolute adjustments. adj2 = _mm_add_epi8(adj2, adj1); @@ -95,9 +89,8 @@ static INLINE __m128i vp9_denoiser_16x1_sse2(const uint8_t *sig, // Denoise a 16x1 vector with a weaker filter. static INLINE __m128i vp9_denoiser_adj_16x1_sse2( - const uint8_t *sig, const uint8_t *mc_running_avg_y, - uint8_t *running_avg_y, const __m128i k_0, - const __m128i k_delta, __m128i acc_diff) { + const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y, + const __m128i k_0, const __m128i k_delta, __m128i acc_diff) { __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0])); // Calculate differences. const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0])); @@ -108,8 +101,7 @@ static INLINE __m128i vp9_denoiser_adj_16x1_sse2( // Obtain the sign. FF if diff is negative. const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0); // Clamp absolute difference to delta to get the adjustment. - const __m128i adj = - _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); + const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta); // Restore the sign and get positive and negative adjustments. __m128i padj, nadj; padj = _mm_andnot_si128(diff_sign, adj); @@ -125,15 +117,18 @@ static INLINE __m128i vp9_denoiser_adj_16x1_sse2( return acc_diff; } -// Denoiser for 4xM and 8xM blocks. -static int vp9_denoiser_NxM_sse2_small( - const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y, - int mc_avg_y_stride, uint8_t *running_avg_y, int avg_y_stride, - int increase_denoising, BLOCK_SIZE bs, int motion_magnitude, int width) { +// Denoise 8x8 and 8x16 blocks. +static int vp9_denoiser_NxM_sse2_small(const uint8_t *sig, int sig_stride, + const uint8_t *mc_running_avg_y, + int mc_avg_y_stride, + uint8_t *running_avg_y, int avg_y_stride, + int increase_denoising, BLOCK_SIZE bs, + int motion_magnitude, int width) { int sum_diff_thresh, r, sum_diff = 0; - const int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? - 1 : 0; + const int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) + ? 1 + : 0; uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16]; __m128i acc_diff = _mm_setzero_si128(); const __m128i k_0 = _mm_setzero_si128(); @@ -147,45 +142,25 @@ static int vp9_denoiser_NxM_sse2_small( const __m128i l32 = _mm_set1_epi8(2); // Difference between level 2 and level 1 is 1. const __m128i l21 = _mm_set1_epi8(1); - const uint8_t shift = (width == 4) ? 2 : 1; + const int b_height = (4 << b_height_log2_lookup[bs]) >> 1; - for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { + for (r = 0; r < b_height; ++r) { memcpy(sig_buffer[r], sig, width); memcpy(sig_buffer[r] + width, sig + sig_stride, width); memcpy(mc_running_buffer[r], mc_running_avg_y, width); - memcpy(mc_running_buffer[r] + width, - mc_running_avg_y + mc_avg_y_stride, width); + memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride, + width); memcpy(running_buffer[r], running_avg_y, width); memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width); - if (width == 4) { - memcpy(sig_buffer[r] + width * 2, sig + sig_stride * 2, width); - memcpy(sig_buffer[r] + width * 3, sig + sig_stride * 3, width); - memcpy(mc_running_buffer[r] + width * 2, - mc_running_avg_y + mc_avg_y_stride * 2, width); - memcpy(mc_running_buffer[r] + width * 3, - mc_running_avg_y + mc_avg_y_stride * 3, width); - memcpy(running_buffer[r] + width * 2, - running_avg_y + avg_y_stride * 2, width); - memcpy(running_buffer[r] + width * 3, - running_avg_y + avg_y_stride * 3, width); - } - acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], - mc_running_buffer[r], - running_buffer[r], - &k_0, &k_4, &k_8, &k_16, - &l3, &l32, &l21, acc_diff); + acc_diff = vp9_denoiser_16x1_sse2(sig_buffer[r], mc_running_buffer[r], + running_buffer[r], &k_0, &k_4, &k_8, + &k_16, &l3, &l32, &l21, acc_diff); memcpy(running_avg_y, running_buffer[r], width); memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width); - if (width == 4) { - memcpy(running_avg_y + avg_y_stride * 2, - running_buffer[r] + width * 2, width); - memcpy(running_avg_y + avg_y_stride * 3, - running_buffer[r] + width * 3, width); - } // Update pointers for next iteration. - sig += (sig_stride << shift); - mc_running_avg_y += (mc_avg_y_stride << shift); - running_avg_y += (avg_y_stride << shift); + sig += (sig_stride << 1); + mc_running_avg_y += (mc_avg_y_stride << 1); + running_avg_y += (avg_y_stride << 1); } { @@ -202,27 +177,21 @@ static int vp9_denoiser_NxM_sse2_small( // The delta is set by the excess of absolute pixel diff over the // threshold. - const int delta = ((abs(sum_diff) - sum_diff_thresh) >> - num_pels_log2_lookup[bs]) + 1; + const int delta = + ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1; // Only apply the adjustment for max delta up to 3. if (delta < 4) { const __m128i k_delta = _mm_set1_epi8(delta); - running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); - for (r = 0; r < ((4 << b_height_log2_lookup[bs]) >> shift); ++r) { + running_avg_y -= avg_y_stride * (b_height << 1); + for (r = 0; r < b_height; ++r) { acc_diff = vp9_denoiser_adj_16x1_sse2( - sig_buffer[r], mc_running_buffer[r], running_buffer[r], - k_0, k_delta, acc_diff); + sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_0, + k_delta, acc_diff); memcpy(running_avg_y, running_buffer[r], width); - memcpy(running_avg_y + avg_y_stride, - running_buffer[r] + width, width); - if (width == 4) { - memcpy(running_avg_y + avg_y_stride * 2, - running_buffer[r] + width * 2, width); - memcpy(running_avg_y + avg_y_stride * 3, - running_buffer[r] + width * 3, width); - } + memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, + width); // Update pointers for next iteration. - running_avg_y += (avg_y_stride << shift); + running_avg_y += (avg_y_stride << 1); } sum_diff = sum_diff_16x1(acc_diff); if (abs(sum_diff) > sum_diff_thresh) { @@ -236,18 +205,18 @@ static int vp9_denoiser_NxM_sse2_small( return FILTER_BLOCK; } -// Denoiser for 16xM, 32xM and 64xM blocks +// Denoise 16x16, 16x32, 32x16, 32x32, 32x64, 64x32 and 64x64 blocks. static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, const uint8_t *mc_running_avg_y, int mc_avg_y_stride, - uint8_t *running_avg_y, - int avg_y_stride, + uint8_t *running_avg_y, int avg_y_stride, int increase_denoising, BLOCK_SIZE bs, int motion_magnitude) { int sum_diff_thresh, r, c, sum_diff = 0; - const int shift_inc = (increase_denoising && - motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? - 1 : 0; + const int shift_inc = + (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) + ? 1 + : 0; __m128i acc_diff[4][4]; const __m128i k_0 = _mm_setzero_si128(); const __m128i k_4 = _mm_set1_epi8(4 + shift_inc); @@ -260,76 +229,71 @@ static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, const __m128i l32 = _mm_set1_epi8(2); // Difference between level 2 and level 1 is 1. const __m128i l21 = _mm_set1_epi8(1); + const int b_width = (4 << b_width_log2_lookup[bs]); + const int b_height = (4 << b_height_log2_lookup[bs]); + const int b_width_shift4 = b_width >> 4; - for (c = 0; c < 4; ++c) { - for (r = 0; r < 4; ++r) { + for (r = 0; r < 4; ++r) { + for (c = 0; c < b_width_shift4; ++c) { acc_diff[c][r] = _mm_setzero_si128(); } } - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - acc_diff[c>>4][r>>4] = vp9_denoiser_16x1_sse2( - sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, - &k_8, &k_16, &l3, &l32, &l21, acc_diff[c>>4][r>>4]); + for (r = 0; r < b_height; ++r) { + for (c = 0; c < b_width_shift4; ++c) { + acc_diff[c][r >> 4] = vp9_denoiser_16x1_sse2( + sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, &k_8, &k_16, &l3, + &l32, &l21, acc_diff[c][r >> 4]); // Update pointers for next iteration. sig += 16; mc_running_avg_y += 16; running_avg_y += 16; } - if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); + if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) { + for (c = 0; c < b_width_shift4; ++c) { + sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]); } } // Update pointers for next iteration. - sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; - mc_running_avg_y = mc_running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - mc_avg_y_stride; - running_avg_y = running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - avg_y_stride; + sig = sig - b_width + sig_stride; + mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride; + running_avg_y = running_avg_y - b_width + avg_y_stride; } { sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising); if (abs(sum_diff) > sum_diff_thresh) { - const int delta = ((abs(sum_diff) - sum_diff_thresh) >> - num_pels_log2_lookup[bs]) + 1; + const int delta = + ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1; // Only apply the adjustment for max delta up to 3. if (delta < 4) { const __m128i k_delta = _mm_set1_epi8(delta); - sig -= sig_stride * (4 << b_height_log2_lookup[bs]); - mc_running_avg_y -= mc_avg_y_stride * (4 << b_height_log2_lookup[bs]); - running_avg_y -= avg_y_stride * (4 << b_height_log2_lookup[bs]); + sig -= sig_stride * b_height; + mc_running_avg_y -= mc_avg_y_stride * b_height; + running_avg_y -= avg_y_stride * b_height; sum_diff = 0; - for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - acc_diff[c>>4][r>>4] = vp9_denoiser_adj_16x1_sse2( - sig, mc_running_avg_y, running_avg_y, k_0, - k_delta, acc_diff[c>>4][r>>4]); + for (r = 0; r < b_height; ++r) { + for (c = 0; c < b_width_shift4; ++c) { + acc_diff[c][r >> 4] = + vp9_denoiser_adj_16x1_sse2(sig, mc_running_avg_y, running_avg_y, + k_0, k_delta, acc_diff[c][r >> 4]); // Update pointers for next iteration. sig += 16; mc_running_avg_y += 16; running_avg_y += 16; } - if ((r + 1) % 16 == 0 || (bs == BLOCK_16X8 && r == 7)) { - for (c = 0; c < (4 << b_width_log2_lookup[bs]); c += 16) { - sum_diff += sum_diff_16x1(acc_diff[c>>4][r>>4]); + if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) { + for (c = 0; c < b_width_shift4; ++c) { + sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]); } } - sig = sig - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + sig_stride; - mc_running_avg_y = mc_running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - mc_avg_y_stride; - running_avg_y = running_avg_y - - 16 * ((4 << b_width_log2_lookup[bs]) >> 4) + - avg_y_stride; + sig = sig - b_width + sig_stride; + mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride; + running_avg_y = running_avg_y - b_width + avg_y_stride; } if (abs(sum_diff) > sum_diff_thresh) { return COPY_BLOCK; @@ -343,32 +307,21 @@ static int vp9_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride, } int vp9_denoiser_filter_sse2(const uint8_t *sig, int sig_stride, - const uint8_t *mc_avg, - int mc_avg_stride, + const uint8_t *mc_avg, int mc_avg_stride, uint8_t *avg, int avg_stride, - int increase_denoising, - BLOCK_SIZE bs, + int increase_denoising, BLOCK_SIZE bs, int motion_magnitude) { - if (bs == BLOCK_4X4 || bs == BLOCK_4X8) { - return vp9_denoiser_NxM_sse2_small(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude, 4); - } else if (bs == BLOCK_8X4 || bs == BLOCK_8X8 || bs == BLOCK_8X16) { - return vp9_denoiser_NxM_sse2_small(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude, 8); - } else if (bs == BLOCK_16X8 || bs == BLOCK_16X16 || bs == BLOCK_16X32 || - bs == BLOCK_32X16|| bs == BLOCK_32X32 || bs == BLOCK_32X64 || - bs == BLOCK_64X32 || bs == BLOCK_64X64) { - return vp9_denoiser_NxM_sse2_big(sig, sig_stride, - mc_avg, mc_avg_stride, - avg, avg_stride, - increase_denoising, - bs, motion_magnitude); + // Rank by frequency of the block type to have an early termination. + if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 || + bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 || + bs == BLOCK_32X64 || bs == BLOCK_64X32) { + return vp9_denoiser_NxM_sse2_big(sig, sig_stride, mc_avg, mc_avg_stride, + avg, avg_stride, increase_denoising, bs, + motion_magnitude); + } else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) { + return vp9_denoiser_NxM_sse2_small(sig, sig_stride, mc_avg, mc_avg_stride, + avg, avg_stride, increase_denoising, bs, + motion_magnitude, 8); } else { return COPY_BLOCK; } diff --git a/libs/libvpx/vp9/encoder/x86/vp9_diamond_search_sad_avx.c b/libs/libvpx/vp9/encoder/x86/vp9_diamond_search_sad_avx.c index 0bc417fc15..2f3c66c083 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_diamond_search_sad_avx.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_diamond_search_sad_avx.c @@ -9,7 +9,7 @@ */ #if defined(_MSC_VER) -# include +#include #endif #include #include @@ -19,11 +19,11 @@ #include "vpx_ports/mem.h" #ifdef __GNUC__ -# define LIKELY(v) __builtin_expect(v, 1) -# define UNLIKELY(v) __builtin_expect(v, 0) +#define LIKELY(v) __builtin_expect(v, 1) +#define UNLIKELY(v) __builtin_expect(v, 0) #else -# define LIKELY(v) (v) -# define UNLIKELY(v) (v) +#define LIKELY(v) (v) +#define UNLIKELY(v) (v) #endif static INLINE int_mv pack_int_mv(int16_t row, int16_t col) { @@ -40,23 +40,23 @@ static INLINE MV_JOINT_TYPE get_mv_joint(const int_mv mv) { return mv.as_int == 0 ? 0 : 1; } -static INLINE int mv_cost(const int_mv mv, - const int *joint_cost, int *const comp_cost[2]) { - return joint_cost[get_mv_joint(mv)] + - comp_cost[0][mv.as_mv.row] + comp_cost[1][mv.as_mv.col]; +static INLINE int mv_cost(const int_mv mv, const int *joint_cost, + int *const comp_cost[2]) { + return joint_cost[get_mv_joint(mv)] + comp_cost[0][mv.as_mv.row] + + comp_cost[1][mv.as_mv.col]; } static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref, int sad_per_bit) { - const int_mv diff = pack_int_mv(mv.as_mv.row - ref->row, - mv.as_mv.col - ref->col); - return ROUND_POWER_OF_TWO((unsigned)mv_cost(diff, x->nmvjointsadcost, - x->nmvsadcost) * - sad_per_bit, VP9_PROB_COST_SHIFT); + const int_mv diff = + pack_int_mv(mv.as_mv.row - ref->row, mv.as_mv.col - ref->col); + return ROUND_POWER_OF_TWO( + (unsigned)mv_cost(diff, x->nmvjointsadcost, x->nmvsadcost) * sad_per_bit, + VP9_PROB_COST_SHIFT); } /***************************************************************************** - * This function utilises 3 properties of the cost function lookup tables, * + * This function utilizes 3 properties of the cost function lookup tables, * * constructed in using 'cal_nmvjointsadcost' and 'cal_nmvsadcosts' in * * vp9_encoder.c. * * For the joint cost: * @@ -71,14 +71,13 @@ static int mvsad_err_cost(const MACROBLOCK *x, const int_mv mv, const MV *ref, * which does not rely on these properties. * *****************************************************************************/ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, - const search_site_config *cfg, - MV *ref_mv, MV *best_mv, int search_param, - int sad_per_bit, int *num00, - const vp9_variance_fn_ptr_t *fn_ptr, + const search_site_config *cfg, MV *ref_mv, + MV *best_mv, int search_param, int sad_per_bit, + int *num00, const vp9_variance_fn_ptr_t *fn_ptr, const MV *center_mv) { - const int_mv maxmv = pack_int_mv(x->mv_row_max, x->mv_col_max); + const int_mv maxmv = pack_int_mv(x->mv_limits.row_max, x->mv_limits.col_max); const __m128i v_max_mv_w = _mm_set1_epi32(maxmv.as_int); - const int_mv minmv = pack_int_mv(x->mv_row_min, x->mv_col_min); + const int_mv minmv = pack_int_mv(x->mv_limits.row_min, x->mv_limits.col_min); const __m128i v_min_mv_w = _mm_set1_epi32(minmv.as_int); const __m128i v_spb_d = _mm_set1_epi32(sad_per_bit); @@ -91,12 +90,12 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, // 0 = initial step (MAX_FIRST_STEP) pel // 1 = (MAX_FIRST_STEP/2) pel, // 2 = (MAX_FIRST_STEP/4) pel... - const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param]; + const MV *ss_mv = &cfg->ss_mv[cfg->searches_per_step * search_param]; const intptr_t *ss_os = &cfg->ss_os[cfg->searches_per_step * search_param]; const int tot_steps = cfg->total_steps - search_param; - const int_mv fcenter_mv = pack_int_mv(center_mv->row >> 3, - center_mv->col >> 3); + const int_mv fcenter_mv = + pack_int_mv(center_mv->row >> 3, center_mv->col >> 3); const __m128i vfcmv = _mm_set1_epi32(fcenter_mv.as_int); const int ref_row = clamp(ref_mv->row, minmv.as_mv.row, maxmv.as_mv.row); @@ -109,8 +108,8 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, const int what_stride = x->plane[0].src.stride; const int in_what_stride = x->e_mbd.plane[0].pre[0].stride; const uint8_t *const what = x->plane[0].src.buf; - const uint8_t *const in_what = x->e_mbd.plane[0].pre[0].buf + - ref_row * in_what_stride + ref_col; + const uint8_t *const in_what = + x->e_mbd.plane[0].pre[0].buf + ref_row * in_what_stride + ref_col; // Work out the start point for the search const uint8_t *best_address = in_what; @@ -122,10 +121,7 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, #endif unsigned int best_sad; - - int i; - int j; - int step; + int i, j, step; // Check the prerequisite cost function properties that are easy to check // in an assert. See the function-level documentation for details on all @@ -141,11 +137,7 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, for (i = 0, step = 0; step < tot_steps; step++) { for (j = 0; j < cfg->searches_per_step; j += 4, i += 4) { - __m128i v_sad_d; - __m128i v_cost_d; - __m128i v_outside_d; - __m128i v_inside_d; - __m128i v_diff_mv_w; + __m128i v_sad_d, v_cost_d, v_outside_d, v_inside_d, v_diff_mv_w; #if ARCH_X86_64 __m128i v_blocka[2]; #else @@ -153,7 +145,7 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, #endif // Compute the candidate motion vectors - const __m128i v_ss_mv_w = _mm_loadu_si128((const __m128i*)&ss_mv[i]); + const __m128i v_ss_mv_w = _mm_loadu_si128((const __m128i *)&ss_mv[i]); const __m128i v_these_mv_w = _mm_add_epi16(v_bmv_w, v_ss_mv_w); // Clamp them to the search bounds __m128i v_these_mv_clamp_w = v_these_mv_w; @@ -185,26 +177,24 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, { #if ARCH_X86_64 // sizeof(intptr_t) == 8 // Load the offsets - __m128i v_bo10_q = _mm_loadu_si128((const __m128i*)&ss_os[i+0]); - __m128i v_bo32_q = _mm_loadu_si128((const __m128i*)&ss_os[i+2]); + __m128i v_bo10_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 0]); + __m128i v_bo32_q = _mm_loadu_si128((const __m128i *)&ss_os[i + 2]); // Set the ones falling outside to zero - v_bo10_q = _mm_and_si128(v_bo10_q, - _mm_cvtepi32_epi64(v_inside_d)); - v_bo32_q = _mm_and_si128(v_bo32_q, - _mm_unpackhi_epi32(v_inside_d, v_inside_d)); + v_bo10_q = _mm_and_si128(v_bo10_q, _mm_cvtepi32_epi64(v_inside_d)); + v_bo32_q = + _mm_and_si128(v_bo32_q, _mm_unpackhi_epi32(v_inside_d, v_inside_d)); // Compute the candidate addresses v_blocka[0] = _mm_add_epi64(v_ba_q, v_bo10_q); v_blocka[1] = _mm_add_epi64(v_ba_q, v_bo32_q); #else // ARCH_X86 // sizeof(intptr_t) == 4 - __m128i v_bo_d = _mm_loadu_si128((const __m128i*)&ss_os[i]); + __m128i v_bo_d = _mm_loadu_si128((const __m128i *)&ss_os[i]); v_bo_d = _mm_and_si128(v_bo_d, v_inside_d); v_blocka[0] = _mm_add_epi32(v_ba_d, v_bo_d); #endif } - fn_ptr->sdx4df(what, what_stride, - (const uint8_t **)&v_blocka[0], in_what_stride, - (uint32_t*)&v_sad_d); + fn_ptr->sdx4df(what, what_stride, (const uint8_t **)&v_blocka[0], + in_what_stride, (uint32_t *)&v_sad_d); // Look up the component cost of the residual motion vector { @@ -224,31 +214,28 @@ int vp9_diamond_search_sad_avx(const MACROBLOCK *x, const uint32_t cost3 = x->nmvsadcost[0][row3] + x->nmvsadcost[0][col3]; __m128i v_cost_10_d, v_cost_32_d; - v_cost_10_d = _mm_cvtsi32_si128(cost0); v_cost_10_d = _mm_insert_epi32(v_cost_10_d, cost1, 1); - v_cost_32_d = _mm_cvtsi32_si128(cost2); v_cost_32_d = _mm_insert_epi32(v_cost_32_d, cost3, 1); - v_cost_d = _mm_unpacklo_epi64(v_cost_10_d, v_cost_32_d); } // Now add in the joint cost { - const __m128i v_sel_d = _mm_cmpeq_epi32(v_diff_mv_w, - _mm_setzero_si128()); - const __m128i v_joint_cost_d = _mm_blendv_epi8(v_joint_cost_1_d, - v_joint_cost_0_d, - v_sel_d); + const __m128i v_sel_d = + _mm_cmpeq_epi32(v_diff_mv_w, _mm_setzero_si128()); + const __m128i v_joint_cost_d = + _mm_blendv_epi8(v_joint_cost_1_d, v_joint_cost_0_d, v_sel_d); v_cost_d = _mm_add_epi32(v_cost_d, v_joint_cost_d); } // Multiply by sad_per_bit v_cost_d = _mm_mullo_epi32(v_cost_d, v_spb_d); - // ROUND_POWER_OF_TWO(v_cost_d, 8) - v_cost_d = _mm_add_epi32(v_cost_d, _mm_set1_epi32(0x80)); - v_cost_d = _mm_srai_epi32(v_cost_d, 8); + // ROUND_POWER_OF_TWO(v_cost_d, VP9_PROB_COST_SHIFT) + v_cost_d = _mm_add_epi32(v_cost_d, + _mm_set1_epi32(1 << (VP9_PROB_COST_SHIFT - 1))); + v_cost_d = _mm_srai_epi32(v_cost_d, VP9_PROB_COST_SHIFT); // Add the cost to the sad v_sad_d = _mm_add_epi32(v_sad_d, v_cost_d); diff --git a/libs/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c b/libs/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c index dfebaab0ac..453af2a403 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c @@ -13,10 +13,8 @@ #include "./vp9_rtcd.h" #include "vpx/vpx_integer.h" -int64_t vp9_block_error_avx2(const int16_t *coeff, - const int16_t *dqcoeff, - intptr_t block_size, - int64_t *ssz) { +int64_t vp9_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff, + intptr_t block_size, int64_t *ssz) { __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg; __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi; __m256i sse_reg_64hi, ssz_reg_64hi; @@ -29,7 +27,7 @@ int64_t vp9_block_error_avx2(const int16_t *coeff, sse_reg = _mm256_set1_epi16(0); ssz_reg = _mm256_set1_epi16(0); - for (i = 0 ; i < block_size ; i+= 16) { + for (i = 0; i < block_size; i += 16) { // load 32 bytes from coeff and dqcoeff coeff_reg = _mm256_loadu_si256((const __m256i *)(coeff + i)); dqcoeff_reg = _mm256_loadu_si256((const __m256i *)(dqcoeff + i)); @@ -66,8 +64,8 @@ int64_t vp9_block_error_avx2(const int16_t *coeff, _mm256_extractf128_si256(ssz_reg, 1)); // store the results - _mm_storel_epi64((__m128i*)(&sse), sse_reg128); + _mm_storel_epi64((__m128i *)(&sse), sse_reg128); - _mm_storel_epi64((__m128i*)(ssz), ssz_reg128); + _mm_storel_epi64((__m128i *)(ssz), ssz_reg128); return sse; } diff --git a/libs/libvpx/vp9/encoder/x86/vp9_frame_scale_ssse3.c b/libs/libvpx/vp9/encoder/x86/vp9_frame_scale_ssse3.c index de903fa332..fa2a6449b0 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_frame_scale_ssse3.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_frame_scale_ssse3.c @@ -8,11 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -#if defined(_MSC_VER) && _MSC_VER <= 1500 -// Need to include math.h before calling tmmintrin.h/intrin.h -// in certain versions of MSVS. -#include -#endif #include // SSSE3 #include "./vp9_rtcd.h" @@ -23,24 +18,23 @@ extern void vp9_scale_and_extend_frame_c(const YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst); -void downsample_2_to_1_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - int w, int h) { +static void downsample_2_to_1_ssse3(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, int w, + int h) { const __m128i mask = _mm_set1_epi16(0x00FF); const int max_width = w & ~15; int y; for (y = 0; y < h; ++y) { int x; for (x = 0; x < max_width; x += 16) { - const __m128i a = _mm_loadu_si128((const __m128i *)(src + x * 2 + 0)); + const __m128i a = _mm_loadu_si128((const __m128i *)(src + x * 2 + 0)); const __m128i b = _mm_loadu_si128((const __m128i *)(src + x * 2 + 16)); const __m128i a_and = _mm_and_si128(a, mask); const __m128i b_and = _mm_and_si128(b, mask); const __m128i c = _mm_packus_epi16(a_and, b_and); _mm_storeu_si128((__m128i *)(dst + x), c); } - for (; x < w; ++x) - dst[x] = src[x * 2]; + for (; x < w; ++x) dst[x] = src[x * 2]; src += src_stride * 2; dst += dst_stride; } @@ -52,9 +46,8 @@ static INLINE __m128i filter(const __m128i *const a, const __m128i *const b, const __m128i *const g, const __m128i *const h) { const __m128i coeffs_ab = _mm_set_epi8(6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1, 6, -1); - const __m128i coeffs_cd = - _mm_set_epi8(78, -19, 78, -19, 78, -19, 78, -19, 78, -19, 78, -19, - 78, -19, 78, -19); + const __m128i coeffs_cd = _mm_set_epi8(78, -19, 78, -19, 78, -19, 78, -19, 78, + -19, 78, -19, 78, -19, 78, -19); const __m128i const64_x16 = _mm_set1_epi16(64); const __m128i ab = _mm_unpacklo_epi8(*a, *b); const __m128i cd = _mm_unpacklo_epi8(*c, *d); @@ -92,9 +85,9 @@ static void eight_tap_row_ssse3(const uint8_t *src, uint8_t *dst, int w) { } } -void upsample_1_to_2_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - int dst_w, int dst_h) { +static void upsample_1_to_2_ssse3(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, int dst_w, + int dst_h) { dst_w /= 2; dst_h /= 2; { @@ -121,7 +114,7 @@ void upsample_1_to_2_ssse3(const uint8_t *src, ptrdiff_t src_stride, int x; eight_tap_row_ssse3(src + src_stride * 4 - 3, tmp7, dst_w); for (x = 0; x < max_width; x += 8) { - const __m128i A = _mm_loadl_epi64((const __m128i *)(src + x)); + const __m128i A = _mm_loadl_epi64((const __m128i *)(src + x)); const __m128i B = _mm_loadl_epi64((const __m128i *)(tmp3 + x)); const __m128i AB = _mm_unpacklo_epi8(A, B); __m128i C, D, CD; @@ -184,23 +177,23 @@ void vp9_scale_and_extend_frame_ssse3(const YV12_BUFFER_CONFIG *src, const int dst_uv_h = dst_h / 2; if (dst_w * 2 == src_w && dst_h * 2 == src_h) { - downsample_2_to_1_ssse3(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, dst_w, dst_h); - downsample_2_to_1_ssse3(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, dst_uv_w, dst_uv_h); - downsample_2_to_1_ssse3(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, dst_uv_w, dst_uv_h); + downsample_2_to_1_ssse3(src->y_buffer, src->y_stride, dst->y_buffer, + dst->y_stride, dst_w, dst_h); + downsample_2_to_1_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer, + dst->uv_stride, dst_uv_w, dst_uv_h); + downsample_2_to_1_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer, + dst->uv_stride, dst_uv_w, dst_uv_h); vpx_extend_frame_borders(dst); } else if (dst_w == src_w * 2 && dst_h == src_h * 2) { // The upsample() supports widths up to 1920 * 2. If greater, fall back // to vp9_scale_and_extend_frame_c(). - if (dst_w/2 <= 1920) { - upsample_1_to_2_ssse3(src->y_buffer, src->y_stride, - dst->y_buffer, dst->y_stride, dst_w, dst_h); - upsample_1_to_2_ssse3(src->u_buffer, src->uv_stride, - dst->u_buffer, dst->uv_stride, dst_uv_w, dst_uv_h); - upsample_1_to_2_ssse3(src->v_buffer, src->uv_stride, - dst->v_buffer, dst->uv_stride, dst_uv_w, dst_uv_h); + if (dst_w / 2 <= 1920) { + upsample_1_to_2_ssse3(src->y_buffer, src->y_stride, dst->y_buffer, + dst->y_stride, dst_w, dst_h); + upsample_1_to_2_ssse3(src->u_buffer, src->uv_stride, dst->u_buffer, + dst->uv_stride, dst_uv_w, dst_uv_h); + upsample_1_to_2_ssse3(src->v_buffer, src->uv_stride, dst->v_buffer, + dst->uv_stride, dst_uv_w, dst_uv_h); vpx_extend_frame_borders(dst); } else { vp9_scale_and_extend_frame_c(src, dst); diff --git a/libs/libvpx/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c b/libs/libvpx/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c index c245ccafa8..91f627c343 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c @@ -23,41 +23,41 @@ int64_t vp9_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff, const int shift = 2 * (bps - 8); const int rounding = shift > 0 ? 1 << (shift - 1) : 0; - for (i = 0; i < block_size; i+=8) { + for (i = 0; i < block_size; i += 8) { // Load the data into xmm registers - __m128i mm_coeff = _mm_load_si128((__m128i*) (coeff + i)); - __m128i mm_coeff2 = _mm_load_si128((__m128i*) (coeff + i + 4)); - __m128i mm_dqcoeff = _mm_load_si128((__m128i*) (dqcoeff + i)); - __m128i mm_dqcoeff2 = _mm_load_si128((__m128i*) (dqcoeff + i + 4)); + __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i)); + __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4)); + __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i)); + __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4)); // Check if any values require more than 15 bit max = _mm_set1_epi32(0x3fff); min = _mm_set1_epi32(0xffffc000); cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max), - _mm_cmplt_epi32(mm_coeff, min)); + _mm_cmplt_epi32(mm_coeff, min)); cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max), - _mm_cmplt_epi32(mm_coeff2, min)); + _mm_cmplt_epi32(mm_coeff2, min)); cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max), - _mm_cmplt_epi32(mm_dqcoeff, min)); + _mm_cmplt_epi32(mm_dqcoeff, min)); cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max), - _mm_cmplt_epi32(mm_dqcoeff2, min)); - test = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(cmp0, cmp1), - _mm_or_si128(cmp2, cmp3))); + _mm_cmplt_epi32(mm_dqcoeff2, min)); + test = _mm_movemask_epi8( + _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3))); if (!test) { - __m128i mm_diff, error_sse2, sqcoeff_sse2;; + __m128i mm_diff, error_sse2, sqcoeff_sse2; mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2); mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2); mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff); error_sse2 = _mm_madd_epi16(mm_diff, mm_diff); sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff); - _mm_storeu_si128((__m128i*)temp, error_sse2); + _mm_storeu_si128((__m128i *)temp, error_sse2); error = error + temp[0] + temp[1] + temp[2] + temp[3]; - _mm_storeu_si128((__m128i*)temp, sqcoeff_sse2); + _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2); sqcoeff += temp[0] + temp[1] + temp[2] + temp[3]; } else { for (j = 0; j < 8; j++) { const int64_t diff = coeff[i + j] - dqcoeff[i + j]; - error += diff * diff; + error += diff * diff; sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j]; } } diff --git a/libs/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c b/libs/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c index 2071dfe3c9..3f8ee5f244 100644 --- a/libs/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c +++ b/libs/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c @@ -14,14 +14,13 @@ #include "./vp9_rtcd.h" #include "vpx/vpx_integer.h" -void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr, - int16_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { +void vp9_quantize_fp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr, + int16_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan_ptr, + const int16_t *iscan_ptr) { __m128i zero; __m128i thr; int16_t nzflag; @@ -44,9 +43,9 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, // Setup global values { - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); + round = _mm_load_si128((const __m128i *)round_ptr); + quant = _mm_load_si128((const __m128i *)quant_ptr); + dequant = _mm_load_si128((const __m128i *)dequant_ptr); } { @@ -54,8 +53,8 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, __m128i qcoeff0, qcoeff1; __m128i qtmp0, qtmp1; // Do DC and first 15 AC - coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs)); - coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1); + coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs)); + coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1); // Poor man's sign extract coeff0_sign = _mm_srai_epi16(coeff0, 15); @@ -78,15 +77,15 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); dequant = _mm_unpackhi_epi64(dequant, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } { @@ -99,8 +98,8 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -121,8 +120,8 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, __m128i qcoeff0, qcoeff1; __m128i qtmp0, qtmp1; - coeff0 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs)); - coeff1 = _mm_load_si128((const __m128i*)(coeff_ptr + n_coeffs) + 1); + coeff0 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs)); + coeff1 = _mm_load_si128((const __m128i *)(coeff_ptr + n_coeffs) + 1); // Poor man's sign extract coeff0_sign = _mm_srai_epi16(coeff0, 15); @@ -133,7 +132,7 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); nzflag = _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff0, thr)) | - _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); + _mm_movemask_epi8(_mm_cmpgt_epi16(qcoeff1, thr)); if (nzflag) { qcoeff0 = _mm_adds_epi16(qcoeff0, round); @@ -147,20 +146,20 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), qcoeff0); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), qcoeff0); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, qcoeff1); coeff0 = _mm_mullo_epi16(qcoeff0, dequant); coeff1 = _mm_mullo_epi16(qcoeff1, dequant); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), coeff0); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, coeff1); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), coeff0); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, coeff1); } else { - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero); } } @@ -174,8 +173,8 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -200,10 +199,10 @@ void vp9_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs, } } else { do { - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(dqcoeff_ptr + n_coeffs) + 1, zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs), zero); - _mm_store_si128((__m128i*)(qcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(dqcoeff_ptr + n_coeffs) + 1, zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs), zero); + _mm_store_si128((__m128i *)(qcoeff_ptr + n_coeffs) + 1, zero); n_coeffs += 8 * 2; } while (n_coeffs < 0); *eob_ptr = 0; diff --git a/libs/libvpx/vp9/vp9_common.mk b/libs/libvpx/vp9/vp9_common.mk index d0135c6f8d..5bfc0d3599 100644 --- a/libs/libvpx/vp9/vp9_common.mk +++ b/libs/libvpx/vp9/vp9_common.mk @@ -13,7 +13,7 @@ VP9_COMMON_SRCS-yes += vp9_iface_common.h VP9_COMMON_SRCS-yes += common/vp9_ppflags.h VP9_COMMON_SRCS-yes += common/vp9_alloccommon.c VP9_COMMON_SRCS-yes += common/vp9_blockd.c -VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c +# VP9_COMMON_SRCS-yes += common/vp9_debugmodes.c VP9_COMMON_SRCS-yes += common/vp9_entropy.c VP9_COMMON_SRCS-yes += common/vp9_entropymode.c VP9_COMMON_SRCS-yes += common/vp9_entropymv.c @@ -45,7 +45,6 @@ VP9_COMMON_SRCS-yes += common/vp9_scale.h VP9_COMMON_SRCS-yes += common/vp9_scale.c VP9_COMMON_SRCS-yes += common/vp9_seg_common.h VP9_COMMON_SRCS-yes += common/vp9_seg_common.c -VP9_COMMON_SRCS-yes += common/vp9_textblit.h VP9_COMMON_SRCS-yes += common/vp9_tile_common.h VP9_COMMON_SRCS-yes += common/vp9_tile_common.c VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c @@ -55,7 +54,6 @@ VP9_COMMON_SRCS-yes += common/vp9_mvref_common.h VP9_COMMON_SRCS-yes += common/vp9_quant_common.c VP9_COMMON_SRCS-yes += common/vp9_reconinter.c VP9_COMMON_SRCS-yes += common/vp9_reconintra.c -VP9_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER) += common/vp9_textblit.c VP9_COMMON_SRCS-yes += common/vp9_common_data.c VP9_COMMON_SRCS-yes += common/vp9_common_data.h VP9_COMMON_SRCS-yes += common/vp9_scan.c @@ -67,7 +65,6 @@ VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_mfqe.h VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC) += common/vp9_mfqe.c ifeq ($(CONFIG_VP9_POSTPROC),yes) VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_mfqe_sse2.asm -VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm endif ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes) diff --git a/libs/libvpx/vp9/vp9_cx_iface.c b/libs/libvpx/vp9/vp9_cx_iface.c index db7f537a69..a797b2c262 100644 --- a/libs/libvpx/vp9/vp9_cx_iface.c +++ b/libs/libvpx/vp9/vp9_cx_iface.c @@ -14,6 +14,7 @@ #include "./vpx_config.h" #include "vpx/vpx_encoder.h" #include "vpx_ports/vpx_once.h" +#include "vpx_ports/system_state.h" #include "vpx/internal/vpx_codec_internal.h" #include "./vpx_version.h" #include "vp9/encoder/vp9_encoder.h" @@ -22,101 +23,92 @@ #include "vp9/vp9_iface_common.h" struct vp9_extracfg { - int cpu_used; // available cpu percentage in 1/16 - unsigned int enable_auto_alt_ref; - unsigned int noise_sensitivity; - unsigned int sharpness; - unsigned int static_thresh; - unsigned int tile_columns; - unsigned int tile_rows; - unsigned int arnr_max_frames; - unsigned int arnr_strength; - unsigned int min_gf_interval; - unsigned int max_gf_interval; - vp8e_tuning tuning; - unsigned int cq_level; // constrained quality level - unsigned int rc_max_intra_bitrate_pct; - unsigned int rc_max_inter_bitrate_pct; - unsigned int gf_cbr_boost_pct; - unsigned int lossless; - unsigned int frame_parallel_decoding_mode; - AQ_MODE aq_mode; - unsigned int frame_periodic_boost; - vpx_bit_depth_t bit_depth; - vp9e_tune_content content; - vpx_color_space_t color_space; - vpx_color_range_t color_range; - int render_width; - int render_height; + int cpu_used; // available cpu percentage in 1/16 + unsigned int enable_auto_alt_ref; + unsigned int noise_sensitivity; + unsigned int sharpness; + unsigned int static_thresh; + unsigned int tile_columns; + unsigned int tile_rows; + unsigned int arnr_max_frames; + unsigned int arnr_strength; + unsigned int min_gf_interval; + unsigned int max_gf_interval; + vp8e_tuning tuning; + unsigned int cq_level; // constrained quality level + unsigned int rc_max_intra_bitrate_pct; + unsigned int rc_max_inter_bitrate_pct; + unsigned int gf_cbr_boost_pct; + unsigned int lossless; + unsigned int target_level; + unsigned int frame_parallel_decoding_mode; + AQ_MODE aq_mode; + int alt_ref_aq; + unsigned int frame_periodic_boost; + vpx_bit_depth_t bit_depth; + vp9e_tune_content content; + vpx_color_space_t color_space; + vpx_color_range_t color_range; + int render_width; + int render_height; }; static struct vp9_extracfg default_extra_cfg = { - 0, // cpu_used - 1, // enable_auto_alt_ref - 0, // noise_sensitivity - 0, // sharpness - 0, // static_thresh - 6, // tile_columns - 0, // tile_rows - 7, // arnr_max_frames - 5, // arnr_strength - 0, // min_gf_interval; 0 -> default decision - 0, // max_gf_interval; 0 -> default decision - VP8_TUNE_PSNR, // tuning - 10, // cq_level - 0, // rc_max_intra_bitrate_pct - 0, // rc_max_inter_bitrate_pct - 0, // gf_cbr_boost_pct - 0, // lossless - 1, // frame_parallel_decoding_mode - NO_AQ, // aq_mode - 0, // frame_periodic_delta_q - VPX_BITS_8, // Bit depth - VP9E_CONTENT_DEFAULT, // content - VPX_CS_UNKNOWN, // color space - 0, // color range - 0, // render width - 0, // render height + 0, // cpu_used + 1, // enable_auto_alt_ref + 0, // noise_sensitivity + 0, // sharpness + 0, // static_thresh + 6, // tile_columns + 0, // tile_rows + 7, // arnr_max_frames + 5, // arnr_strength + 0, // min_gf_interval; 0 -> default decision + 0, // max_gf_interval; 0 -> default decision + VP8_TUNE_PSNR, // tuning + 10, // cq_level + 0, // rc_max_intra_bitrate_pct + 0, // rc_max_inter_bitrate_pct + 0, // gf_cbr_boost_pct + 0, // lossless + 255, // target_level + 1, // frame_parallel_decoding_mode + NO_AQ, // aq_mode + 0, // alt_ref_aq + 0, // frame_periodic_delta_q + VPX_BITS_8, // Bit depth + VP9E_CONTENT_DEFAULT, // content + VPX_CS_UNKNOWN, // color space + 0, // color range + 0, // render width + 0, // render height }; struct vpx_codec_alg_priv { - vpx_codec_priv_t base; - vpx_codec_enc_cfg_t cfg; - struct vp9_extracfg extra_cfg; - VP9EncoderConfig oxcf; - VP9_COMP *cpi; - unsigned char *cx_data; - size_t cx_data_sz; - unsigned char *pending_cx_data; - size_t pending_cx_data_sz; - int pending_frame_count; - size_t pending_frame_sizes[8]; - size_t pending_frame_magnitude; - vpx_image_t preview_img; - vpx_enc_frame_flags_t next_frame_flags; - vp8_postproc_cfg_t preview_ppcfg; + vpx_codec_priv_t base; + vpx_codec_enc_cfg_t cfg; + struct vp9_extracfg extra_cfg; + VP9EncoderConfig oxcf; + VP9_COMP *cpi; + unsigned char *cx_data; + size_t cx_data_sz; + unsigned char *pending_cx_data; + size_t pending_cx_data_sz; + int pending_frame_count; + size_t pending_frame_sizes[8]; + size_t pending_frame_magnitude; + vpx_image_t preview_img; + vpx_enc_frame_flags_t next_frame_flags; + vp8_postproc_cfg_t preview_ppcfg; vpx_codec_pkt_list_decl(256) pkt_list; - unsigned int fixed_kf_cntr; + unsigned int fixed_kf_cntr; vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb; // BufferPool that holds all reference frames. - BufferPool *buffer_pool; + BufferPool *buffer_pool; }; -static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) { - switch (frame) { - case VP8_LAST_FRAME: - return VP9_LAST_FLAG; - case VP8_GOLD_FRAME: - return VP9_GOLD_FLAG; - case VP8_ALTR_FRAME: - return VP9_ALT_FLAG; - } - assert(0 && "Invalid Reference Frame"); - return VP9_LAST_FLAG; -} - -static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) { +static vpx_codec_err_t update_error_state( + vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) { const vpx_codec_err_t res = error->error_code; if (res != VPX_CODEC_OK) @@ -125,58 +117,61 @@ static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, return res; } - #undef ERROR -#define ERROR(str) do {\ - ctx->base.err_detail = str;\ - return VPX_CODEC_INVALID_PARAM;\ +#define ERROR(str) \ + do { \ + ctx->base.err_detail = str; \ + return VPX_CODEC_INVALID_PARAM; \ } while (0) -#define RANGE_CHECK(p, memb, lo, hi) do {\ +#define RANGE_CHECK(p, memb, lo, hi) \ + do { \ if (!(((p)->memb == lo || (p)->memb > (lo)) && (p)->memb <= hi)) \ - ERROR(#memb " out of range ["#lo".."#hi"]");\ + ERROR(#memb " out of range [" #lo ".." #hi "]"); \ } while (0) -#define RANGE_CHECK_HI(p, memb, hi) do {\ - if (!((p)->memb <= (hi))) \ - ERROR(#memb " out of range [.."#hi"]");\ +#define RANGE_CHECK_HI(p, memb, hi) \ + do { \ + if (!((p)->memb <= (hi))) ERROR(#memb " out of range [.." #hi "]"); \ } while (0) -#define RANGE_CHECK_LO(p, memb, lo) do {\ - if (!((p)->memb >= (lo))) \ - ERROR(#memb " out of range ["#lo"..]");\ +#define RANGE_CHECK_LO(p, memb, lo) \ + do { \ + if (!((p)->memb >= (lo))) ERROR(#memb " out of range [" #lo "..]"); \ } while (0) -#define RANGE_CHECK_BOOL(p, memb) do {\ - if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean");\ +#define RANGE_CHECK_BOOL(p, memb) \ + do { \ + if (!!((p)->memb) != (p)->memb) ERROR(#memb " expected boolean"); \ } while (0) static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg, const struct vp9_extracfg *extra_cfg) { - RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available - RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available - RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000); - RANGE_CHECK(cfg, g_timebase.num, 1, cfg->g_timebase.den); - RANGE_CHECK_HI(cfg, g_profile, 3); + RANGE_CHECK(cfg, g_w, 1, 65535); // 16 bits available + RANGE_CHECK(cfg, g_h, 1, 65535); // 16 bits available + RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000); + RANGE_CHECK(cfg, g_timebase.num, 1, 1000000000); + RANGE_CHECK_HI(cfg, g_profile, 3); - RANGE_CHECK_HI(cfg, rc_max_quantizer, 63); - RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer); + RANGE_CHECK_HI(cfg, rc_max_quantizer, 63); + RANGE_CHECK_HI(cfg, rc_min_quantizer, cfg->rc_max_quantizer); RANGE_CHECK_BOOL(extra_cfg, lossless); - RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 1); + RANGE_CHECK(extra_cfg, aq_mode, 0, AQ_MODE_COUNT - 2); + RANGE_CHECK(extra_cfg, alt_ref_aq, 0, 1); RANGE_CHECK(extra_cfg, frame_periodic_boost, 0, 1); - RANGE_CHECK_HI(cfg, g_threads, 64); - RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS); - RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q); - RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100); - RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100); + RANGE_CHECK_HI(cfg, g_threads, 64); + RANGE_CHECK_HI(cfg, g_lag_in_frames, MAX_LAG_BUFFERS); + RANGE_CHECK(cfg, rc_end_usage, VPX_VBR, VPX_Q); + RANGE_CHECK_HI(cfg, rc_undershoot_pct, 100); + RANGE_CHECK_HI(cfg, rc_overshoot_pct, 100); RANGE_CHECK_HI(cfg, rc_2pass_vbr_bias_pct, 100); - RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO); - RANGE_CHECK_BOOL(cfg, rc_resize_allowed); - RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100); - RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100); + RANGE_CHECK(cfg, kf_mode, VPX_KF_DISABLED, VPX_KF_AUTO); + RANGE_CHECK_BOOL(cfg, rc_resize_allowed); + RANGE_CHECK_HI(cfg, rc_dropframe_thresh, 100); + RANGE_CHECK_HI(cfg, rc_resize_up_thresh, 100); RANGE_CHECK_HI(cfg, rc_resize_down_thresh, 100); - RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS); + RANGE_CHECK(cfg, g_pass, VPX_RC_ONE_PASS, VPX_RC_LAST_PASS); RANGE_CHECK(extra_cfg, min_gf_interval, 0, (MAX_LAG_BUFFERS - 1)); RANGE_CHECK(extra_cfg, max_gf_interval, 0, (MAX_LAG_BUFFERS - 1)); if (extra_cfg->max_gf_interval > 0) { @@ -184,7 +179,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, } if (extra_cfg->min_gf_interval > 0 && extra_cfg->max_gf_interval > 0) { RANGE_CHECK(extra_cfg, max_gf_interval, extra_cfg->min_gf_interval, - (MAX_LAG_BUFFERS - 1)); + (MAX_LAG_BUFFERS - 1)); } if (cfg->rc_resize_allowed == 1) { @@ -195,17 +190,27 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, RANGE_CHECK(cfg, ss_number_layers, 1, VPX_SS_MAX_LAYERS); RANGE_CHECK(cfg, ts_number_layers, 1, VPX_TS_MAX_LAYERS); + { + unsigned int level = extra_cfg->target_level; + if (level != LEVEL_1 && level != LEVEL_1_1 && level != LEVEL_2 && + level != LEVEL_2_1 && level != LEVEL_3 && level != LEVEL_3_1 && + level != LEVEL_4 && level != LEVEL_4_1 && level != LEVEL_5 && + level != LEVEL_5_1 && level != LEVEL_5_2 && level != LEVEL_6 && + level != LEVEL_6_1 && level != LEVEL_6_2 && level != LEVEL_UNKNOWN && + level != LEVEL_MAX) + ERROR("target_level is invalid"); + } + if (cfg->ss_number_layers * cfg->ts_number_layers > VPX_MAX_LAYERS) ERROR("ss_number_layers * ts_number_layers is out of range"); if (cfg->ts_number_layers > 1) { unsigned int sl, tl; for (sl = 1; sl < cfg->ss_number_layers; ++sl) { for (tl = 1; tl < cfg->ts_number_layers; ++tl) { - const int layer = - LAYER_IDS_TO_IDX(sl, tl, cfg->ts_number_layers); + const int layer = LAYER_IDS_TO_IDX(sl, tl, cfg->ts_number_layers); if (cfg->layer_target_bitrate[layer] < cfg->layer_target_bitrate[layer - 1]) - ERROR("ts_target_bitrate entries are not increasing"); + ERROR("ts_target_bitrate entries are not increasing"); } } @@ -221,24 +226,23 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, cfg->g_pass == VPX_RC_LAST_PASS) { unsigned int i, alt_ref_sum = 0; for (i = 0; i < cfg->ss_number_layers; ++i) { - if (cfg->ss_enable_auto_alt_ref[i]) - ++alt_ref_sum; + if (cfg->ss_enable_auto_alt_ref[i]) ++alt_ref_sum; } if (alt_ref_sum > REF_FRAMES - cfg->ss_number_layers) ERROR("Not enough ref buffers for svc alt ref frames"); if (cfg->ss_number_layers * cfg->ts_number_layers > 3 && cfg->g_error_resilient == 0) - ERROR("Multiple frame context are not supported for more than 3 layers"); + ERROR("Multiple frame context are not supported for more than 3 layers"); } #endif // VP9 does not support a lower bound on the keyframe interval in // automatic keyframe placement mode. - if (cfg->kf_mode != VPX_KF_DISABLED && - cfg->kf_min_dist != cfg->kf_max_dist && + if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist && cfg->kf_min_dist > 0) - ERROR("kf_min_dist not supported in auto mode, use 0 " - "or kf_max_dist instead."); + ERROR( + "kf_min_dist not supported in auto mode, use 0 " + "or kf_max_dist instead."); RANGE_CHECK(extra_cfg, enable_auto_alt_ref, 0, 2); RANGE_CHECK(extra_cfg, cpu_used, -8, 8); @@ -251,12 +255,12 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, RANGE_CHECK(extra_cfg, cq_level, 0, 63); RANGE_CHECK(cfg, g_bit_depth, VPX_BITS_8, VPX_BITS_12); RANGE_CHECK(cfg, g_input_bit_depth, 8, 12); - RANGE_CHECK(extra_cfg, content, - VP9E_CONTENT_DEFAULT, VP9E_CONTENT_INVALID - 1); + RANGE_CHECK(extra_cfg, content, VP9E_CONTENT_DEFAULT, + VP9E_CONTENT_INVALID - 1); // TODO(yaowu): remove this when ssim tuning is implemented for vp9 if (extra_cfg->tuning == VP8_TUNE_SSIM) - ERROR("Option --tune=ssim is not currently supported in VP9."); + ERROR("Option --tune=ssim is not currently supported in VP9."); if (cfg->g_pass == VPX_RC_LAST_PASS) { const size_t packet_sz = sizeof(FIRSTPASS_STATS); @@ -271,7 +275,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, if (cfg->ss_number_layers > 1 || cfg->ts_number_layers > 1) { int i; - unsigned int n_packets_per_layer[VPX_SS_MAX_LAYERS] = {0}; + unsigned int n_packets_per_layer[VPX_SS_MAX_LAYERS] = { 0 }; stats = cfg->rc_twopass_stats_in.buf; for (i = 0; i < n_packets; ++i) { @@ -284,17 +288,18 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, for (i = 0; i < (int)cfg->ss_number_layers; ++i) { unsigned int layer_id; if (n_packets_per_layer[i] < 2) { - ERROR("rc_twopass_stats_in requires at least two packets for each " - "layer."); + ERROR( + "rc_twopass_stats_in requires at least two packets for each " + "layer."); } stats = (const FIRSTPASS_STATS *)cfg->rc_twopass_stats_in.buf + n_packets - cfg->ss_number_layers + i; layer_id = (int)stats->spatial_layer_id; - if (layer_id >= cfg->ss_number_layers - ||(unsigned int)(stats->count + 0.5) != - n_packets_per_layer[layer_id] - 1) + if (layer_id >= cfg->ss_number_layers || + (unsigned int)(stats->count + 0.5) != + n_packets_per_layer[layer_id] - 1) ERROR("rc_twopass_stats_in missing EOS stats packet"); } } else { @@ -318,8 +323,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, cfg->g_bit_depth > VPX_BITS_8) { ERROR("Codec high bit-depth not supported in profile < 2"); } - if (cfg->g_profile <= (unsigned int)PROFILE_1 && - cfg->g_input_bit_depth > 8) { + if (cfg->g_profile <= (unsigned int)PROFILE_1 && cfg->g_input_bit_depth > 8) { ERROR("Source high bit-depth not supported in profile < 2"); } if (cfg->g_profile > (unsigned int)PROFILE_1 && @@ -327,8 +331,7 @@ static vpx_codec_err_t validate_config(vpx_codec_alg_priv_t *ctx, ERROR("Codec bit-depth 8 not supported in profile > 1"); } RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB); - RANGE_CHECK(extra_cfg, color_range, - VPX_CR_STUDIO_RANGE, VPX_CR_FULL_RANGE); + RANGE_CHECK(extra_cfg, color_range, VPX_CR_STUDIO_RANGE, VPX_CR_FULL_RANGE); return VPX_CODEC_OK; } @@ -337,14 +340,14 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx, switch (img->fmt) { case VPX_IMG_FMT_YV12: case VPX_IMG_FMT_I420: - case VPX_IMG_FMT_I42016: - break; + case VPX_IMG_FMT_I42016: break; case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I444: case VPX_IMG_FMT_I440: if (ctx->cfg.g_profile != (unsigned int)PROFILE_1) { - ERROR("Invalid image format. I422, I444, I440 images are " - "not supported in profile."); + ERROR( + "Invalid image format. I422, I444, I440 images are " + "not supported in profile."); } break; case VPX_IMG_FMT_I42216: @@ -352,13 +355,15 @@ static vpx_codec_err_t validate_img(vpx_codec_alg_priv_t *ctx, case VPX_IMG_FMT_I44016: if (ctx->cfg.g_profile != (unsigned int)PROFILE_1 && ctx->cfg.g_profile != (unsigned int)PROFILE_3) { - ERROR("Invalid image format. 16-bit I422, I444, I440 images are " - "not supported in profile."); + ERROR( + "Invalid image format. 16-bit I422, I444, I440 images are " + "not supported in profile."); } break; default: - ERROR("Invalid image format. Only YV12, I420, I422, I444 images are " - "supported."); + ERROR( + "Invalid image format. Only YV12, I420, I422, I444 images are " + "supported."); break; } @@ -385,38 +390,30 @@ static int get_image_bps(const vpx_image_t *img) { } static vpx_codec_err_t set_encoder_config( - VP9EncoderConfig *oxcf, - const vpx_codec_enc_cfg_t *cfg, - const struct vp9_extracfg *extra_cfg) { + VP9EncoderConfig *oxcf, const vpx_codec_enc_cfg_t *cfg, + const struct vp9_extracfg *extra_cfg) { const int is_vbr = cfg->rc_end_usage == VPX_VBR; int sl, tl; oxcf->profile = cfg->g_profile; oxcf->max_threads = (int)cfg->g_threads; - oxcf->width = cfg->g_w; - oxcf->height = cfg->g_h; + oxcf->width = cfg->g_w; + oxcf->height = cfg->g_h; oxcf->bit_depth = cfg->g_bit_depth; oxcf->input_bit_depth = cfg->g_input_bit_depth; // guess a frame rate if out of whack, use 30 oxcf->init_framerate = (double)cfg->g_timebase.den / cfg->g_timebase.num; - if (oxcf->init_framerate > 180) - oxcf->init_framerate = 30; + if (oxcf->init_framerate > 180) oxcf->init_framerate = 30; oxcf->mode = GOOD; switch (cfg->g_pass) { - case VPX_RC_ONE_PASS: - oxcf->pass = 0; - break; - case VPX_RC_FIRST_PASS: - oxcf->pass = 1; - break; - case VPX_RC_LAST_PASS: - oxcf->pass = 2; - break; + case VPX_RC_ONE_PASS: oxcf->pass = 0; break; + case VPX_RC_FIRST_PASS: oxcf->pass = 1; break; + case VPX_RC_LAST_PASS: oxcf->pass = 2; break; } - oxcf->lag_in_frames = cfg->g_pass == VPX_RC_FIRST_PASS ? 0 - : cfg->g_lag_in_frames; + oxcf->lag_in_frames = + cfg->g_pass == VPX_RC_FIRST_PASS ? 0 : cfg->g_lag_in_frames; oxcf->rc_mode = cfg->rc_end_usage; // Convert target bandwidth from Kbit/s to Bit/s @@ -429,55 +426,56 @@ static vpx_codec_err_t set_encoder_config( extra_cfg->lossless ? 0 : vp9_quantizer_to_qindex(cfg->rc_min_quantizer); oxcf->worst_allowed_q = extra_cfg->lossless ? 0 : vp9_quantizer_to_qindex(cfg->rc_max_quantizer); - oxcf->cq_level = vp9_quantizer_to_qindex(extra_cfg->cq_level); + oxcf->cq_level = vp9_quantizer_to_qindex(extra_cfg->cq_level); oxcf->fixed_q = -1; - oxcf->under_shoot_pct = cfg->rc_undershoot_pct; - oxcf->over_shoot_pct = cfg->rc_overshoot_pct; + oxcf->under_shoot_pct = cfg->rc_undershoot_pct; + oxcf->over_shoot_pct = cfg->rc_overshoot_pct; - oxcf->scaled_frame_width = cfg->rc_scaled_width; + oxcf->scaled_frame_width = cfg->rc_scaled_width; oxcf->scaled_frame_height = cfg->rc_scaled_height; if (cfg->rc_resize_allowed == 1) { oxcf->resize_mode = - (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) ? - RESIZE_DYNAMIC : RESIZE_FIXED; + (oxcf->scaled_frame_width == 0 || oxcf->scaled_frame_height == 0) + ? RESIZE_DYNAMIC + : RESIZE_FIXED; } else { oxcf->resize_mode = RESIZE_NONE; } - oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz; + oxcf->maximum_buffer_size_ms = is_vbr ? 240000 : cfg->rc_buf_sz; oxcf->starting_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_initial_sz; - oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz; + oxcf->optimal_buffer_level_ms = is_vbr ? 60000 : cfg->rc_buf_optimal_sz; - oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh; + oxcf->drop_frames_water_mark = cfg->rc_dropframe_thresh; - oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct; - oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct; - oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct; + oxcf->two_pass_vbrbias = cfg->rc_2pass_vbr_bias_pct; + oxcf->two_pass_vbrmin_section = cfg->rc_2pass_vbr_minsection_pct; + oxcf->two_pass_vbrmax_section = cfg->rc_2pass_vbr_maxsection_pct; - oxcf->auto_key = cfg->kf_mode == VPX_KF_AUTO && - cfg->kf_min_dist != cfg->kf_max_dist; + oxcf->auto_key = + cfg->kf_mode == VPX_KF_AUTO && cfg->kf_min_dist != cfg->kf_max_dist; - oxcf->key_freq = cfg->kf_max_dist; + oxcf->key_freq = cfg->kf_max_dist; - oxcf->speed = abs(extra_cfg->cpu_used); - oxcf->encode_breakout = extra_cfg->static_thresh; - oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref; - oxcf->noise_sensitivity = extra_cfg->noise_sensitivity; - oxcf->sharpness = extra_cfg->sharpness; + oxcf->speed = abs(extra_cfg->cpu_used); + oxcf->encode_breakout = extra_cfg->static_thresh; + oxcf->enable_auto_arf = extra_cfg->enable_auto_alt_ref; + oxcf->noise_sensitivity = extra_cfg->noise_sensitivity; + oxcf->sharpness = extra_cfg->sharpness; - oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in; + oxcf->two_pass_stats_in = cfg->rc_twopass_stats_in; #if CONFIG_FP_MB_STATS - oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in; + oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in; #endif oxcf->color_space = extra_cfg->color_space; oxcf->color_range = extra_cfg->color_range; - oxcf->render_width = extra_cfg->render_width; + oxcf->render_width = extra_cfg->render_width; oxcf->render_height = extra_cfg->render_height; oxcf->arnr_max_frames = extra_cfg->arnr_max_frames; - oxcf->arnr_strength = extra_cfg->arnr_strength; + oxcf->arnr_strength = extra_cfg->arnr_strength; oxcf->min_gf_interval = extra_cfg->min_gf_interval; oxcf->max_gf_interval = extra_cfg->max_gf_interval; @@ -485,19 +483,31 @@ static vpx_codec_err_t set_encoder_config( oxcf->content = extra_cfg->content; oxcf->tile_columns = extra_cfg->tile_columns; - oxcf->tile_rows = extra_cfg->tile_rows; - oxcf->error_resilient_mode = cfg->g_error_resilient; + // TODO(yunqing): The dependencies between row tiles cause error in multi- + // threaded encoding. For now, tile_rows is forced to be 0 in this case. + // The further fix can be done by adding synchronizations after a tile row + // is encoded. But this will hurt multi-threaded encoder performance. So, + // it is recommended to use tile-rows=0 while encoding with threads > 1. + if (oxcf->max_threads > 1 && oxcf->tile_columns > 0) + oxcf->tile_rows = 0; + else + oxcf->tile_rows = extra_cfg->tile_rows; + + oxcf->error_resilient_mode = cfg->g_error_resilient; oxcf->frame_parallel_decoding_mode = extra_cfg->frame_parallel_decoding_mode; oxcf->aq_mode = extra_cfg->aq_mode; + oxcf->alt_ref_aq = extra_cfg->alt_ref_aq; - oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost; + oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost; oxcf->ss_number_layers = cfg->ss_number_layers; oxcf->ts_number_layers = cfg->ts_number_layers; - oxcf->temporal_layering_mode = (enum vp9e_temporal_layering_mode) - cfg->temporal_layering_mode; + oxcf->temporal_layering_mode = + (enum vp9e_temporal_layering_mode)cfg->temporal_layering_mode; + + oxcf->target_level = extra_cfg->target_level; for (sl = 0; sl < oxcf->ss_number_layers; ++sl) { #if CONFIG_SPATIAL_SVC @@ -516,8 +526,8 @@ static vpx_codec_err_t set_encoder_config( } if (oxcf->ts_number_layers > 1) { for (tl = 0; tl < VPX_TS_MAX_LAYERS; ++tl) { - oxcf->ts_rate_decimator[tl] = cfg->ts_rate_decimator[tl] ? - cfg->ts_rate_decimator[tl] : 1; + oxcf->ts_rate_decimator[tl] = + cfg->ts_rate_decimator[tl] ? cfg->ts_rate_decimator[tl] : 1; } } else if (oxcf->ts_number_layers == 1) { oxcf->ts_rate_decimator[0] = 1; @@ -525,6 +535,7 @@ static vpx_codec_err_t set_encoder_config( /* printf("Current VP9 Settings: \n"); printf("target_bandwidth: %d\n", oxcf->target_bandwidth); + printf("target_level: %d\n", oxcf->target_level); printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity); printf("sharpness: %d\n", oxcf->sharpness); printf("cpu_used: %d\n", oxcf->cpu_used); @@ -558,7 +569,7 @@ static vpx_codec_err_t set_encoder_config( } static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx, - const vpx_codec_enc_cfg_t *cfg) { + const vpx_codec_enc_cfg_t *cfg) { vpx_codec_err_t res; int force_key = 0; @@ -588,8 +599,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx, vp9_change_config(ctx->cpi, &ctx->oxcf); } - if (force_key) - ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF; + if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF; return res; } @@ -597,8 +607,7 @@ static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; *arg = vp9_get_quantizer(ctx->cpi); return VPX_CODEC_OK; } @@ -606,8 +615,7 @@ static vpx_codec_err_t ctrl_get_quantizer(vpx_codec_alg_priv_t *ctx, static vpx_codec_err_t ctrl_get_quantizer64(vpx_codec_alg_priv_t *ctx, va_list args) { int *const arg = va_arg(args, int *); - if (arg == NULL) - return VPX_CODEC_INVALID_PARAM; + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; *arg = vp9_qindex_to_quantizer(vp9_get_quantizer(ctx->cpi)); return VPX_CODEC_OK; } @@ -723,11 +731,10 @@ static vpx_codec_err_t ctrl_set_rc_max_inter_bitrate_pct( return update_extra_cfg(ctx, &extra_cfg); } -static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct( - vpx_codec_alg_priv_t *ctx, va_list args) { +static vpx_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(vpx_codec_alg_priv_t *ctx, + va_list args) { struct vp9_extracfg extra_cfg = ctx->extra_cfg; - extra_cfg.gf_cbr_boost_pct = - CAST(VP9E_SET_GF_CBR_BOOST_PCT, args); + extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args); return update_extra_cfg(ctx, &extra_cfg); } @@ -753,6 +760,13 @@ static vpx_codec_err_t ctrl_set_aq_mode(vpx_codec_alg_priv_t *ctx, return update_extra_cfg(ctx, &extra_cfg); } +static vpx_codec_err_t ctrl_set_alt_ref_aq(vpx_codec_alg_priv_t *ctx, + va_list args) { + struct vp9_extracfg extra_cfg = ctx->extra_cfg; + extra_cfg.alt_ref_aq = CAST(VP9E_SET_ALT_REF_AQ, args); + return update_extra_cfg(ctx, &extra_cfg); +} + static vpx_codec_err_t ctrl_set_min_gf_interval(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp9_extracfg extra_cfg = ctx->extra_cfg; @@ -774,6 +788,20 @@ static vpx_codec_err_t ctrl_set_frame_periodic_boost(vpx_codec_alg_priv_t *ctx, return update_extra_cfg(ctx, &extra_cfg); } +static vpx_codec_err_t ctrl_set_target_level(vpx_codec_alg_priv_t *ctx, + va_list args) { + struct vp9_extracfg extra_cfg = ctx->extra_cfg; + extra_cfg.target_level = CAST(VP9E_SET_TARGET_LEVEL, args); + return update_extra_cfg(ctx, &extra_cfg); +} + +static vpx_codec_err_t ctrl_get_level(vpx_codec_alg_priv_t *ctx, va_list args) { + int *const arg = va_arg(args, int *); + if (arg == NULL) return VPX_CODEC_INVALID_PARAM; + *arg = (int)vp9_get_level(&ctx->cpi->level_info.level_spec); + return VPX_CODEC_OK; +} + static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data) { vpx_codec_err_t res = VPX_CODEC_OK; @@ -781,16 +809,13 @@ static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx, if (ctx->priv == NULL) { vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv)); - if (priv == NULL) - return VPX_CODEC_MEM_ERROR; + if (priv == NULL) return VPX_CODEC_MEM_ERROR; ctx->priv = (vpx_codec_priv_t *)priv; ctx->priv->init_flags = ctx->init_flags; ctx->priv->enc.total_encoders = 1; - priv->buffer_pool = - (BufferPool *)vpx_calloc(1, sizeof(BufferPool)); - if (priv->buffer_pool == NULL) - return VPX_CODEC_MEM_ERROR; + priv->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool)); + if (priv->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR; #if CONFIG_MULTITHREAD if (pthread_mutex_init(&priv->buffer_pool->pool_mutex, NULL)) { @@ -849,7 +874,8 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, // Convert duration parameter from stream timebase to microseconds. const uint64_t duration_us = (uint64_t)duration * 1000000 * - (uint64_t)cfg->g_timebase.num /(uint64_t)cfg->g_timebase.den; + (uint64_t)cfg->g_timebase.num / + (uint64_t)cfg->g_timebase.den; // If the deadline is more that the duration this frame is to be shown, // use good quality mode. Otherwise use realtime mode. @@ -858,11 +884,13 @@ static void pick_quickcompress_mode(vpx_codec_alg_priv_t *ctx, new_mode = BEST; } break; - case VPX_RC_FIRST_PASS: - break; - case VPX_RC_LAST_PASS: - new_mode = deadline > 0 ? GOOD : BEST; - break; + case VPX_RC_FIRST_PASS: break; + case VPX_RC_LAST_PASS: new_mode = deadline > 0 ? GOOD : BEST; break; + } + + if (deadline == VPX_DL_REALTIME) { + ctx->oxcf.pass = 0; + new_mode = REALTIME; } if (ctx->oxcf.mode != new_mode) { @@ -886,8 +914,7 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) { // Choose the magnitude for (mag = 0, mask = 0xff; mag < 4; mag++) { - if (ctx->pending_frame_magnitude < mask) - break; + if (ctx->pending_frame_magnitude < mask) break; mask <<= 8; mask |= 0xff; } @@ -931,9 +958,6 @@ static int write_superframe_index(vpx_codec_alg_priv_t *ctx) { return index_sz; } -// vp9 uses 10,000,000 ticks/second as time stamp -#define TICKS_PER_SEC 10000000LL - static int64_t timebase_units_to_ticks(const vpx_rational_t *timebase, int64_t n) { return n * TICKS_PER_SEC * timebase->num / timebase->den; @@ -941,7 +965,7 @@ static int64_t timebase_units_to_ticks(const vpx_rational_t *timebase, static int64_t ticks_to_timebase_units(const vpx_rational_t *timebase, int64_t n) { - const int64_t round = TICKS_PER_SEC * timebase->num / 2 - 1; + const int64_t round = (int64_t)TICKS_PER_SEC * timebase->num / 2 - 1; return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC; } @@ -951,44 +975,45 @@ static vpx_codec_frame_flags_t get_frame_pkt_flags(const VP9_COMP *cpi, if (lib_flags & FRAMEFLAGS_KEY || (cpi->use_svc && - cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers + - cpi->svc.temporal_layer_id].is_key_frame) - ) + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * + cpi->svc.number_temporal_layers + + cpi->svc.temporal_layer_id] + .is_key_frame)) flags |= VPX_FRAME_IS_KEY; - if (cpi->droppable) - flags |= VPX_FRAME_IS_DROPPABLE; + if (cpi->droppable) flags |= VPX_FRAME_IS_DROPPABLE; return flags; } -static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, +const size_t kMinCompressedSize = 8192; +static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, const vpx_image_t *img, vpx_codec_pts_t pts, unsigned long duration, - vpx_enc_frame_flags_t flags, + vpx_enc_frame_flags_t enc_flags, unsigned long deadline) { - vpx_codec_err_t res = VPX_CODEC_OK; + volatile vpx_codec_err_t res = VPX_CODEC_OK; + volatile vpx_enc_frame_flags_t flags = enc_flags; VP9_COMP *const cpi = ctx->cpi; const vpx_rational_t *const timebase = &ctx->cfg.g_timebase; size_t data_sz; + if (cpi == NULL) return VPX_CODEC_INVALID_PARAM; + if (img != NULL) { res = validate_img(ctx, img); - // TODO(jzern) the checks related to cpi's validity should be treated as a - // failure condition, encoder setup is done fully in init() currently. - if (res == VPX_CODEC_OK && cpi != NULL) { + if (res == VPX_CODEC_OK) { // There's no codec control for multiple alt-refs so check the encoder // instance for its status to determine the compressed data size. data_sz = ctx->cfg.g_w * ctx->cfg.g_h * get_image_bps(img) / 8 * (cpi->multi_arf_allowed ? 8 : 2); - if (data_sz < 4096) - data_sz = 4096; + if (data_sz < kMinCompressedSize) data_sz = kMinCompressedSize; if (ctx->cx_data == NULL || ctx->cx_data_sz < data_sz) { ctx->cx_data_sz = data_sz; free(ctx->cx_data); - ctx->cx_data = (unsigned char*)malloc(ctx->cx_data_sz); + ctx->cx_data = (unsigned char *)malloc(ctx->cx_data_sz); if (ctx->cx_data == NULL) { return VPX_CODEC_MEM_ERROR; } @@ -1001,11 +1026,19 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, // Handle Flags if (((flags & VP8_EFLAG_NO_UPD_GF) && (flags & VP8_EFLAG_FORCE_GF)) || - ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) { + ((flags & VP8_EFLAG_NO_UPD_ARF) && (flags & VP8_EFLAG_FORCE_ARF))) { ctx->base.err_detail = "Conflicting flags."; return VPX_CODEC_INVALID_PARAM; } + if (setjmp(cpi->common.error.jmp)) { + cpi->common.error.setjmp = 0; + res = update_error_state(ctx, &cpi->common.error); + vpx_clear_system_state(); + return res; + } + cpi->common.error.setjmp = 1; + vp9_apply_encoding_flags(cpi, flags); // Handle fixed keyframe intervals @@ -1017,8 +1050,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, } } - // Initialize the encoder instance on the first frame. - if (res == VPX_CODEC_OK && cpi != NULL) { + if (res == VPX_CODEC_OK) { unsigned int lib_flags = 0; YV12_BUFFER_CONFIG sd; int64_t dst_time_stamp = timebase_units_to_ticks(timebase, pts); @@ -1028,16 +1060,15 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, unsigned char *cx_data; // Set up internal flags - if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) - cpi->b_calculate_psnr = 1; + if (ctx->base.init_flags & VPX_CODEC_USE_PSNR) cpi->b_calculate_psnr = 1; if (img != NULL) { res = image2yuvconfig(img, &sd); // Store the original flags in to the frame buffer. Will extract the // key frame flag when we actually encode this frame. - if (vp9_receive_raw_frame(cpi, flags | ctx->next_frame_flags, - &sd, dst_time_stamp, dst_end_time_stamp)) { + if (vp9_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd, + dst_time_stamp, dst_end_time_stamp)) { res = update_error_state(ctx, &cpi->common.error); } ctx->next_frame_flags = 0; @@ -1057,31 +1088,32 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, * the buffer size anyway. */ if (cx_data_sz < ctx->cx_data_sz / 2) { - ctx->base.err_detail = "Compressed data buffer too small"; + vpx_internal_error(&cpi->common.error, VPX_CODEC_ERROR, + "Compressed data buffer too small"); return VPX_CODEC_ERROR; } } while (cx_data_sz >= ctx->cx_data_sz / 2 && - -1 != vp9_get_compressed_data(cpi, &lib_flags, &size, - cx_data, &dst_time_stamp, - &dst_end_time_stamp, !img)) { + -1 != vp9_get_compressed_data(cpi, &lib_flags, &size, cx_data, + &dst_time_stamp, &dst_end_time_stamp, + !img)) { if (size) { vpx_codec_cx_pkt_t pkt; #if CONFIG_SPATIAL_SVC if (cpi->use_svc) - cpi->svc.layer_context[cpi->svc.spatial_layer_id * - cpi->svc.number_temporal_layers].layer_size += size; + cpi->svc + .layer_context[cpi->svc.spatial_layer_id * + cpi->svc.number_temporal_layers] + .layer_size += size; #endif // Pack invisible frames with the next visible frame if (!cpi->common.show_frame || (cpi->use_svc && - cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1) - ) { - if (ctx->pending_cx_data == 0) - ctx->pending_cx_data = cx_data; + cpi->svc.spatial_layer_id < cpi->svc.number_spatial_layers - 1)) { + if (ctx->pending_cx_data == 0) ctx->pending_cx_data = cx_data; ctx->pending_cx_data_sz += size; ctx->pending_frame_sizes[ctx->pending_frame_count++] = size; ctx->pending_frame_magnitude |= size; @@ -1090,14 +1122,13 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, if (ctx->output_cx_pkt_cb.output_cx_pkt) { pkt.kind = VPX_CODEC_CX_FRAME_PKT; - pkt.data.frame.pts = ticks_to_timebase_units(timebase, - dst_time_stamp); - pkt.data.frame.duration = - (unsigned long)ticks_to_timebase_units(timebase, - dst_end_time_stamp - dst_time_stamp); + pkt.data.frame.pts = + ticks_to_timebase_units(timebase, dst_time_stamp); + pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units( + timebase, dst_end_time_stamp - dst_time_stamp); pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags); pkt.data.frame.buf = ctx->pending_cx_data; - pkt.data.frame.sz = size; + pkt.data.frame.sz = size; ctx->pending_cx_data = NULL; ctx->pending_cx_data_sz = 0; ctx->pending_frame_count = 0; @@ -1111,9 +1142,8 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, // Add the frame packet to the list of returned packets. pkt.kind = VPX_CODEC_CX_FRAME_PKT; pkt.data.frame.pts = ticks_to_timebase_units(timebase, dst_time_stamp); - pkt.data.frame.duration = - (unsigned long)ticks_to_timebase_units(timebase, - dst_end_time_stamp - dst_time_stamp); + pkt.data.frame.duration = (unsigned long)ticks_to_timebase_units( + timebase, dst_end_time_stamp - dst_time_stamp); pkt.data.frame.flags = get_frame_pkt_flags(cpi, lib_flags); if (ctx->pending_cx_data) { @@ -1124,18 +1154,18 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, if (!ctx->output_cx_pkt_cb.output_cx_pkt) size += write_superframe_index(ctx); pkt.data.frame.buf = ctx->pending_cx_data; - pkt.data.frame.sz = ctx->pending_cx_data_sz; + pkt.data.frame.sz = ctx->pending_cx_data_sz; ctx->pending_cx_data = NULL; ctx->pending_cx_data_sz = 0; ctx->pending_frame_count = 0; ctx->pending_frame_magnitude = 0; } else { pkt.data.frame.buf = cx_data; - pkt.data.frame.sz = size; + pkt.data.frame.sz = size; } pkt.data.frame.partition_id = -1; - if(ctx->output_cx_pkt_cb.output_cx_pkt) + if (ctx->output_cx_pkt_cb.output_cx_pkt) ctx->output_cx_pkt_cb.output_cx_pkt(&pkt, ctx->output_cx_pkt_cb.user_priv); else @@ -1175,6 +1205,7 @@ static vpx_codec_err_t encoder_encode(vpx_codec_alg_priv_t *ctx, } } + cpi->common.error.setjmp = 0; return res; } @@ -1247,16 +1278,15 @@ static vpx_codec_err_t ctrl_set_previewpp(vpx_codec_alg_priv_t *ctx, #endif } - static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) { YV12_BUFFER_CONFIG sd; vp9_ppflags_t flags; vp9_zero(flags); if (ctx->preview_ppcfg.post_proc_flag) { - flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag; + flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag; flags.deblocking_level = ctx->preview_ppcfg.deblocking_level; - flags.noise_level = ctx->preview_ppcfg.noise_level; + flags.noise_level = ctx->preview_ppcfg.noise_level; } if (vp9_get_preview_raw_frame(ctx->cpi, &sd, &flags) == 0) { @@ -1276,14 +1306,13 @@ static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx, return VPX_CODEC_INVALID_PARAM; } - static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx, va_list args) { vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *); if (map) { - if (!vp9_set_active_map(ctx->cpi, map->active_map, - (int)map->rows, (int)map->cols)) + if (!vp9_set_active_map(ctx->cpi, map->active_map, (int)map->rows, + (int)map->cols)) return VPX_CODEC_OK; else return VPX_CODEC_INVALID_PARAM; @@ -1297,8 +1326,8 @@ static vpx_codec_err_t ctrl_get_active_map(vpx_codec_alg_priv_t *ctx, vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *); if (map) { - if (!vp9_get_active_map(ctx->cpi, map->active_map, - (int)map->rows, (int)map->cols)) + if (!vp9_get_active_map(ctx->cpi, map->active_map, (int)map->rows, + (int)map->cols)) return VPX_CODEC_OK; else return VPX_CODEC_INVALID_PARAM; @@ -1312,9 +1341,9 @@ static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx, vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *); if (mode) { - const int res = vp9_set_internal_size(ctx->cpi, - (VPX_SCALING)mode->h_scaling_mode, - (VPX_SCALING)mode->v_scaling_mode); + const int res = + vp9_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode, + (VPX_SCALING)mode->v_scaling_mode); return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM; } else { return VPX_CODEC_INVALID_PARAM; @@ -1334,10 +1363,8 @@ static vpx_codec_err_t ctrl_set_svc(vpx_codec_alg_priv_t *ctx, va_list args) { vp9_set_svc(ctx->cpi, data); if (data == 1 && - (cfg->g_pass == VPX_RC_FIRST_PASS || - cfg->g_pass == VPX_RC_LAST_PASS) && - cfg->ss_number_layers > 1 && - cfg->ts_number_layers > 1) { + (cfg->g_pass == VPX_RC_FIRST_PASS || cfg->g_pass == VPX_RC_LAST_PASS) && + cfg->ss_number_layers > 1 && cfg->ts_number_layers > 1) { return VPX_CODEC_INVALID_PARAM; } return VPX_CODEC_OK; @@ -1391,12 +1418,12 @@ static vpx_codec_err_t ctrl_set_svc_parameters(vpx_codec_alg_priv_t *ctx, for (tl = 0; tl < cpi->svc.number_temporal_layers; ++tl) { const int layer = LAYER_IDS_TO_IDX(sl, tl, cpi->svc.number_temporal_layers); - LAYER_CONTEXT *lc = - &cpi->svc.layer_context[layer]; + LAYER_CONTEXT *lc = &cpi->svc.layer_context[layer]; lc->max_q = params->max_quantizers[layer]; lc->min_q = params->min_quantizers[layer]; lc->scaling_factor_num = params->scaling_factor_num[sl]; lc->scaling_factor_den = params->scaling_factor_den[sl]; + lc->speed = params->speed_per_layer[sl]; } } @@ -1452,123 +1479,125 @@ static vpx_codec_err_t ctrl_set_render_size(vpx_codec_alg_priv_t *ctx, va_list args) { struct vp9_extracfg extra_cfg = ctx->extra_cfg; int *const render_size = va_arg(args, int *); - extra_cfg.render_width = render_size[0]; + extra_cfg.render_width = render_size[0]; extra_cfg.render_height = render_size[1]; return update_extra_cfg(ctx, &extra_cfg); } static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = { - {VP8_COPY_REFERENCE, ctrl_copy_reference}, + { VP8_COPY_REFERENCE, ctrl_copy_reference }, // Setters - {VP8_SET_REFERENCE, ctrl_set_reference}, - {VP8_SET_POSTPROC, ctrl_set_previewpp}, - {VP8E_SET_ROI_MAP, ctrl_set_roi_map}, - {VP8E_SET_ACTIVEMAP, ctrl_set_active_map}, - {VP8E_SET_SCALEMODE, ctrl_set_scale_mode}, - {VP8E_SET_CPUUSED, ctrl_set_cpuused}, - {VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref}, - {VP8E_SET_SHARPNESS, ctrl_set_sharpness}, - {VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh}, - {VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns}, - {VP9E_SET_TILE_ROWS, ctrl_set_tile_rows}, - {VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames}, - {VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength}, - {VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type}, - {VP8E_SET_TUNING, ctrl_set_tuning}, - {VP8E_SET_CQ_LEVEL, ctrl_set_cq_level}, - {VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct}, - {VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct}, - {VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct}, - {VP9E_SET_LOSSLESS, ctrl_set_lossless}, - {VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode}, - {VP9E_SET_AQ_MODE, ctrl_set_aq_mode}, - {VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost}, - {VP9E_SET_SVC, ctrl_set_svc}, - {VP9E_SET_SVC_PARAMETERS, ctrl_set_svc_parameters}, - {VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback}, - {VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id}, - {VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content}, - {VP9E_SET_COLOR_SPACE, ctrl_set_color_space}, - {VP9E_SET_COLOR_RANGE, ctrl_set_color_range}, - {VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity}, - {VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval}, - {VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval}, - {VP9E_SET_SVC_REF_FRAME_CONFIG, ctrl_set_svc_ref_frame_config}, - {VP9E_SET_RENDER_SIZE, ctrl_set_render_size}, + { VP8_SET_REFERENCE, ctrl_set_reference }, + { VP8_SET_POSTPROC, ctrl_set_previewpp }, + { VP8E_SET_ROI_MAP, ctrl_set_roi_map }, + { VP8E_SET_ACTIVEMAP, ctrl_set_active_map }, + { VP8E_SET_SCALEMODE, ctrl_set_scale_mode }, + { VP8E_SET_CPUUSED, ctrl_set_cpuused }, + { VP8E_SET_ENABLEAUTOALTREF, ctrl_set_enable_auto_alt_ref }, + { VP8E_SET_SHARPNESS, ctrl_set_sharpness }, + { VP8E_SET_STATIC_THRESHOLD, ctrl_set_static_thresh }, + { VP9E_SET_TILE_COLUMNS, ctrl_set_tile_columns }, + { VP9E_SET_TILE_ROWS, ctrl_set_tile_rows }, + { VP8E_SET_ARNR_MAXFRAMES, ctrl_set_arnr_max_frames }, + { VP8E_SET_ARNR_STRENGTH, ctrl_set_arnr_strength }, + { VP8E_SET_ARNR_TYPE, ctrl_set_arnr_type }, + { VP8E_SET_TUNING, ctrl_set_tuning }, + { VP8E_SET_CQ_LEVEL, ctrl_set_cq_level }, + { VP8E_SET_MAX_INTRA_BITRATE_PCT, ctrl_set_rc_max_intra_bitrate_pct }, + { VP9E_SET_MAX_INTER_BITRATE_PCT, ctrl_set_rc_max_inter_bitrate_pct }, + { VP9E_SET_GF_CBR_BOOST_PCT, ctrl_set_rc_gf_cbr_boost_pct }, + { VP9E_SET_LOSSLESS, ctrl_set_lossless }, + { VP9E_SET_FRAME_PARALLEL_DECODING, ctrl_set_frame_parallel_decoding_mode }, + { VP9E_SET_AQ_MODE, ctrl_set_aq_mode }, + { VP9E_SET_ALT_REF_AQ, ctrl_set_alt_ref_aq }, + { VP9E_SET_FRAME_PERIODIC_BOOST, ctrl_set_frame_periodic_boost }, + { VP9E_SET_SVC, ctrl_set_svc }, + { VP9E_SET_SVC_PARAMETERS, ctrl_set_svc_parameters }, + { VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback }, + { VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id }, + { VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content }, + { VP9E_SET_COLOR_SPACE, ctrl_set_color_space }, + { VP9E_SET_COLOR_RANGE, ctrl_set_color_range }, + { VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity }, + { VP9E_SET_MIN_GF_INTERVAL, ctrl_set_min_gf_interval }, + { VP9E_SET_MAX_GF_INTERVAL, ctrl_set_max_gf_interval }, + { VP9E_SET_SVC_REF_FRAME_CONFIG, ctrl_set_svc_ref_frame_config }, + { VP9E_SET_RENDER_SIZE, ctrl_set_render_size }, + { VP9E_SET_TARGET_LEVEL, ctrl_set_target_level }, // Getters - {VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer}, - {VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64}, - {VP9_GET_REFERENCE, ctrl_get_reference}, - {VP9E_GET_SVC_LAYER_ID, ctrl_get_svc_layer_id}, - {VP9E_GET_ACTIVEMAP, ctrl_get_active_map}, + { VP8E_GET_LAST_QUANTIZER, ctrl_get_quantizer }, + { VP8E_GET_LAST_QUANTIZER_64, ctrl_get_quantizer64 }, + { VP9_GET_REFERENCE, ctrl_get_reference }, + { VP9E_GET_SVC_LAYER_ID, ctrl_get_svc_layer_id }, + { VP9E_GET_ACTIVEMAP, ctrl_get_active_map }, + { VP9E_GET_LEVEL, ctrl_get_level }, - { -1, NULL}, + { -1, NULL }, }; static vpx_codec_enc_cfg_map_t encoder_usage_cfg_map[] = { - { - 0, - { // NOLINT - 0, // g_usage - 8, // g_threads - 0, // g_profile + { 0, + { + // NOLINT + 0, // g_usage + 8, // g_threads + 0, // g_profile - 320, // g_width - 240, // g_height - VPX_BITS_8, // g_bit_depth - 8, // g_input_bit_depth + 320, // g_width + 240, // g_height + VPX_BITS_8, // g_bit_depth + 8, // g_input_bit_depth - {1, 30}, // g_timebase + { 1, 30 }, // g_timebase - 0, // g_error_resilient + 0, // g_error_resilient - VPX_RC_ONE_PASS, // g_pass + VPX_RC_ONE_PASS, // g_pass - 25, // g_lag_in_frames + 25, // g_lag_in_frames - 0, // rc_dropframe_thresh - 0, // rc_resize_allowed - 0, // rc_scaled_width - 0, // rc_scaled_height - 60, // rc_resize_down_thresold - 30, // rc_resize_up_thresold + 0, // rc_dropframe_thresh + 0, // rc_resize_allowed + 0, // rc_scaled_width + 0, // rc_scaled_height + 60, // rc_resize_down_thresold + 30, // rc_resize_up_thresold - VPX_VBR, // rc_end_usage - {NULL, 0}, // rc_twopass_stats_in - {NULL, 0}, // rc_firstpass_mb_stats_in - 256, // rc_target_bandwidth - 0, // rc_min_quantizer - 63, // rc_max_quantizer - 25, // rc_undershoot_pct - 25, // rc_overshoot_pct + VPX_VBR, // rc_end_usage + { NULL, 0 }, // rc_twopass_stats_in + { NULL, 0 }, // rc_firstpass_mb_stats_in + 256, // rc_target_bandwidth + 0, // rc_min_quantizer + 63, // rc_max_quantizer + 25, // rc_undershoot_pct + 25, // rc_overshoot_pct - 6000, // rc_max_buffer_size - 4000, // rc_buffer_initial_size - 5000, // rc_buffer_optimal_size + 6000, // rc_max_buffer_size + 4000, // rc_buffer_initial_size + 5000, // rc_buffer_optimal_size - 50, // rc_two_pass_vbrbias - 0, // rc_two_pass_vbrmin_section - 2000, // rc_two_pass_vbrmax_section + 50, // rc_two_pass_vbrbias + 0, // rc_two_pass_vbrmin_section + 2000, // rc_two_pass_vbrmax_section - // keyframing settings (kf) - VPX_KF_AUTO, // g_kfmode - 0, // kf_min_dist - 9999, // kf_max_dist + // keyframing settings (kf) + VPX_KF_AUTO, // g_kfmode + 0, // kf_min_dist + 128, // kf_max_dist - VPX_SS_DEFAULT_LAYERS, // ss_number_layers - {0}, - {0}, // ss_target_bitrate - 1, // ts_number_layers - {0}, // ts_target_bitrate - {0}, // ts_rate_decimator - 0, // ts_periodicity - {0}, // ts_layer_id - {0}, // layer_taget_bitrate - 0 // temporal_layering_mode - } - }, + VPX_SS_DEFAULT_LAYERS, // ss_number_layers + { 0 }, + { 0 }, // ss_target_bitrate + 1, // ts_number_layers + { 0 }, // ts_target_bitrate + { 0 }, // ts_rate_decimator + 0, // ts_periodicity + { 0 }, // ts_layer_id + { 0 }, // layer_taget_bitrate + 0 // temporal_layering_mode + } }, }; #ifndef VERSION_STRING @@ -1580,25 +1609,27 @@ CODEC_INTERFACE(vpx_codec_vp9_cx) = { #if CONFIG_VP9_HIGHBITDEPTH VPX_CODEC_CAP_HIGHBITDEPTH | #endif - VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t - encoder_init, // vpx_codec_init_fn_t - encoder_destroy, // vpx_codec_destroy_fn_t - encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t - { // NOLINT - NULL, // vpx_codec_peek_si_fn_t - NULL, // vpx_codec_get_si_fn_t - NULL, // vpx_codec_decode_fn_t - NULL, // vpx_codec_frame_get_fn_t - NULL // vpx_codec_set_fb_fn_t + VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR, // vpx_codec_caps_t + encoder_init, // vpx_codec_init_fn_t + encoder_destroy, // vpx_codec_destroy_fn_t + encoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t + { + // NOLINT + NULL, // vpx_codec_peek_si_fn_t + NULL, // vpx_codec_get_si_fn_t + NULL, // vpx_codec_decode_fn_t + NULL, // vpx_codec_frame_get_fn_t + NULL // vpx_codec_set_fb_fn_t }, - { // NOLINT - 1, // 1 cfg map - encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t - encoder_encode, // vpx_codec_encode_fn_t - encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t - encoder_set_config, // vpx_codec_enc_config_set_fn_t - NULL, // vpx_codec_get_global_headers_fn_t - encoder_get_preview, // vpx_codec_get_preview_frame_fn_t - NULL // vpx_codec_enc_mr_get_mem_loc_fn_t + { + // NOLINT + 1, // 1 cfg map + encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t + encoder_encode, // vpx_codec_encode_fn_t + encoder_get_cxdata, // vpx_codec_get_cx_data_fn_t + encoder_set_config, // vpx_codec_enc_config_set_fn_t + NULL, // vpx_codec_get_global_headers_fn_t + encoder_get_preview, // vpx_codec_get_preview_frame_fn_t + NULL // vpx_codec_enc_mr_get_mem_loc_fn_t } }; diff --git a/libs/libvpx/vp9/vp9_dx_iface.c b/libs/libvpx/vp9/vp9_dx_iface.c index be5d1600a5..3b5dc3ddac 100644 --- a/libs/libvpx/vp9/vp9_dx_iface.c +++ b/libs/libvpx/vp9/vp9_dx_iface.c @@ -41,8 +41,7 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx, if (!ctx->priv) { vpx_codec_alg_priv_t *const priv = (vpx_codec_alg_priv_t *)vpx_calloc(1, sizeof(*priv)); - if (priv == NULL) - return VPX_CODEC_MEM_ERROR; + if (priv == NULL) return VPX_CODEC_MEM_ERROR; ctx->priv = (vpx_codec_priv_t *)priv; ctx->priv->init_flags = ctx->init_flags; @@ -51,7 +50,9 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx, // Only do frame parallel decode when threads > 1. priv->frame_parallel_decode = (ctx->config.dec && (ctx->config.dec->threads > 1) && - (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0; + (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) + ? 1 + : 0; if (ctx->config.dec) { priv->cfg = *ctx->config.dec; ctx->config.dec = &priv->cfg; @@ -64,11 +65,16 @@ static vpx_codec_err_t decoder_init(vpx_codec_ctx_t *ctx, static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) { if (ctx->frame_workers != NULL) { int i; + // Shutdown all threads before reclaiming any memory. The frame-level + // parallel decoder may access data from another worker. + for (i = 0; i < ctx->num_frame_workers; ++i) { + VPxWorker *const worker = &ctx->frame_workers[i]; + vpx_get_worker_interface()->end(worker); + } for (i = 0; i < ctx->num_frame_workers; ++i) { VPxWorker *const worker = &ctx->frame_workers[i]; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; - vpx_get_worker_interface()->end(worker); vp9_remove_common(&frame_worker_data->pbi->common); #if CONFIG_VP9_POSTPROC vp9_free_postproc_buffers(&frame_worker_data->pbi->common); @@ -97,11 +103,10 @@ static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) { return VPX_CODEC_OK; } -static int parse_bitdepth_colorspace_sampling( - BITSTREAM_PROFILE profile, struct vpx_read_bit_buffer *rb) { +static int parse_bitdepth_colorspace_sampling(BITSTREAM_PROFILE profile, + struct vpx_read_bit_buffer *rb) { vpx_color_space_t color_space; - if (profile >= PROFILE_2) - rb->bit_offset += 1; // Bit-depth 10 or 12. + if (profile >= PROFILE_2) rb->bit_offset += 1; // Bit-depth 10 or 12. color_space = (vpx_color_space_t)vpx_rb_read_literal(rb, 3); if (color_space != VPX_CS_SRGB) { rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range. @@ -120,17 +125,13 @@ static int parse_bitdepth_colorspace_sampling( return 1; } -static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, - unsigned int data_sz, - vpx_codec_stream_info_t *si, - int *is_intra_only, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state) { +static vpx_codec_err_t decoder_peek_si_internal( + const uint8_t *data, unsigned int data_sz, vpx_codec_stream_info_t *si, + int *is_intra_only, vpx_decrypt_cb decrypt_cb, void *decrypt_state) { int intra_only_flag = 0; - uint8_t clear_buffer[9]; + uint8_t clear_buffer[10]; - if (data + data_sz <= data) - return VPX_CODEC_INVALID_PARAM; + if (data + data_sz <= data) return VPX_CODEC_INVALID_PARAM; si->is_kf = 0; si->w = si->h = 0; @@ -141,6 +142,10 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, data = clear_buffer; } + // A maximum of 6 bits are needed to read the frame marker, profile and + // show_existing_frame. + if (data_sz < 1) return VPX_CODEC_UNSUP_BITSTREAM; + { int show_frame; int error_resilient; @@ -148,30 +153,29 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, const int frame_marker = vpx_rb_read_literal(&rb, 2); const BITSTREAM_PROFILE profile = vp9_read_profile(&rb); - if (frame_marker != VP9_FRAME_MARKER) - return VPX_CODEC_UNSUP_BITSTREAM; + if (frame_marker != VP9_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM; - if (profile >= MAX_PROFILES) - return VPX_CODEC_UNSUP_BITSTREAM; - - if ((profile >= 2 && data_sz <= 1) || data_sz < 1) - return VPX_CODEC_UNSUP_BITSTREAM; + if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM; if (vpx_rb_read_bit(&rb)) { // show an existing frame + // If profile is > 2 and show_existing_frame is true, then at least 1 more + // byte (6+3=9 bits) is needed. + if (profile > 2 && data_sz < 2) return VPX_CODEC_UNSUP_BITSTREAM; vpx_rb_read_literal(&rb, 3); // Frame buffer to show. return VPX_CODEC_OK; } - if (data_sz <= 8) - return VPX_CODEC_UNSUP_BITSTREAM; + // For the rest of the function, a maximum of 9 more bytes are needed + // (computed by taking the maximum possible bits needed in each case). Note + // that this has to be updated if we read any more bits in this function. + if (data_sz < 10) return VPX_CODEC_UNSUP_BITSTREAM; si->is_kf = !vpx_rb_read_bit(&rb); show_frame = vpx_rb_read_bit(&rb); error_resilient = vpx_rb_read_bit(&rb); if (si->is_kf) { - if (!vp9_read_sync_code(&rb)) - return VPX_CODEC_UNSUP_BITSTREAM; + if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM; if (!parse_bitdepth_colorspace_sampling(profile, &rb)) return VPX_CODEC_UNSUP_BITSTREAM; @@ -182,8 +186,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, rb.bit_offset += error_resilient ? 0 : 2; // reset_frame_context if (intra_only_flag) { - if (!vp9_read_sync_code(&rb)) - return VPX_CODEC_UNSUP_BITSTREAM; + if (!vp9_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM; if (profile > PROFILE_0) { if (!parse_bitdepth_colorspace_sampling(profile, &rb)) return VPX_CODEC_UNSUP_BITSTREAM; @@ -193,8 +196,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, } } } - if (is_intra_only != NULL) - *is_intra_only = intra_only_flag; + if (is_intra_only != NULL) *is_intra_only = intra_only_flag; return VPX_CODEC_OK; } @@ -207,8 +209,8 @@ static vpx_codec_err_t decoder_peek_si(const uint8_t *data, static vpx_codec_err_t decoder_get_si(vpx_codec_alg_priv_t *ctx, vpx_codec_stream_info_t *si) { const size_t sz = (si->sz >= sizeof(vp9_stream_info_t)) - ? sizeof(vp9_stream_info_t) - : sizeof(vpx_codec_stream_info_t); + ? sizeof(vp9_stream_info_t) + : sizeof(vpx_codec_stream_info_t); memcpy(si, &ctx->si, sz); si->sz = (unsigned int)sz; @@ -220,8 +222,8 @@ static void set_error_detail(vpx_codec_alg_priv_t *ctx, ctx->base.err_detail = error; } -static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx, - const struct vpx_internal_error_info *error) { +static vpx_codec_err_t update_error_state( + vpx_codec_alg_priv_t *ctx, const struct vpx_internal_error_info *error) { if (error->error_code) set_error_detail(ctx, error->has_detail ? error->detail : NULL); @@ -264,10 +266,8 @@ static void set_default_ppflags(vp8_postproc_cfg_t *cfg) { cfg->noise_level = 0; } -static void set_ppflags(const vpx_codec_alg_priv_t *ctx, - vp9_ppflags_t *flags) { - flags->post_proc_flag = - ctx->postproc_cfg.post_proc_flag; +static void set_ppflags(const vpx_codec_alg_priv_t *ctx, vp9_ppflags_t *flags) { + flags->post_proc_flag = ctx->postproc_cfg.post_proc_flag; flags->deblocking_level = ctx->postproc_cfg.deblocking_level; flags->noise_level = ctx->postproc_cfg.noise_level; @@ -278,10 +278,8 @@ static int frame_worker_hook(void *arg1, void *arg2) { const uint8_t *data = frame_worker_data->data; (void)arg2; - frame_worker_data->result = - vp9_receive_compressed_data(frame_worker_data->pbi, - frame_worker_data->data_size, - &data); + frame_worker_data->result = vp9_receive_compressed_data( + frame_worker_data->pbi, frame_worker_data->data_size, &data); frame_worker_data->data_end = data; if (frame_worker_data->pbi->frame_parallel_decode) { @@ -323,25 +321,24 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) { ctx->num_cache_frames = 0; ctx->need_resync = 1; ctx->num_frame_workers = - (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1; + (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads : 1; if (ctx->num_frame_workers > MAX_DECODE_THREADS) ctx->num_frame_workers = MAX_DECODE_THREADS; ctx->available_threads = ctx->num_frame_workers; ctx->flushed = 0; ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool)); - if (ctx->buffer_pool == NULL) - return VPX_CODEC_MEM_ERROR; + if (ctx->buffer_pool == NULL) return VPX_CODEC_MEM_ERROR; #if CONFIG_MULTITHREAD - if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) { - set_error_detail(ctx, "Failed to allocate buffer pool mutex"); - return VPX_CODEC_MEM_ERROR; - } + if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) { + set_error_detail(ctx, "Failed to allocate buffer pool mutex"); + return VPX_CODEC_MEM_ERROR; + } #endif - ctx->frame_workers = (VPxWorker *) - vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers)); + ctx->frame_workers = (VPxWorker *)vpx_malloc(ctx->num_frame_workers * + sizeof(*ctx->frame_workers)); if (ctx->frame_workers == NULL) { set_error_detail(ctx, "Failed to allocate frame_workers"); return VPX_CODEC_MEM_ERROR; @@ -397,8 +394,7 @@ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) { // If postprocessing was enabled by the application and a // configuration has not been provided, default it. - if (!ctx->postproc_cfg_set && - (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) + if (!ctx->postproc_cfg_set && (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)) set_default_ppflags(&ctx->postproc_cfg); init_buffer_callbacks(ctx); @@ -428,11 +424,9 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, const vpx_codec_err_t res = decoder_peek_si_internal(*data, data_sz, &ctx->si, &is_intra_only, ctx->decrypt_cb, ctx->decrypt_state); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; - if (!ctx->si.is_kf && !is_intra_only) - return VPX_CODEC_ERROR; + if (!ctx->si.is_kf && !is_intra_only) return VPX_CODEC_ERROR; } if (!ctx->frame_parallel_decode) { @@ -506,7 +500,7 @@ static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx, static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) { YV12_BUFFER_CONFIG sd; - vp9_ppflags_t flags = {0, 0, 0}; + vp9_ppflags_t flags = { 0, 0, 0 }; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id]; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; @@ -527,8 +521,7 @@ static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) { frame_worker_data->user_priv); ctx->frame_cache[ctx->frame_cache_write].img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv; - ctx->frame_cache_write = - (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE; + ctx->frame_cache_write = (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE; ++ctx->num_cache_frames; } } @@ -537,7 +530,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, const uint8_t *data, unsigned int data_sz, void *user_priv, long deadline) { const uint8_t *data_start = data; - const uint8_t * const data_end = data + data_sz; + const uint8_t *const data_end = data + data_sz; vpx_codec_err_t res; uint32_t frame_sizes[8]; int frame_count; @@ -553,14 +546,12 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, // Initialize the decoder workers on the first frame. if (ctx->frame_workers == NULL) { const vpx_codec_err_t res = init_decoder(ctx); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; } res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count, ctx->decrypt_cb, ctx->decrypt_state); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; if (ctx->frame_parallel_decode) { // Decode in frame parallel mode. When decoding in this mode, the frame @@ -573,8 +564,8 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, for (i = 0; i < frame_count; ++i) { const uint8_t *data_start_copy = data_start; const uint32_t frame_size = frame_sizes[i]; - if (data_start < data - || frame_size > (uint32_t) (data_end - data_start)) { + if (data_start < data || + frame_size > (uint32_t)(data_end - data_start)) { set_error_detail(ctx, "Invalid frame size in index"); return VPX_CODEC_CORRUPT_FRAME; } @@ -591,10 +582,9 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, } } - res = decode_one(ctx, &data_start_copy, frame_size, user_priv, - deadline); - if (res != VPX_CODEC_OK) - return res; + res = + decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline); + if (res != VPX_CODEC_OK) return res; data_start += frame_size; } } else { @@ -611,8 +601,7 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, } res = decode_one(ctx, &data, data_sz, user_priv, deadline); - if (res != VPX_CODEC_OK) - return res; + if (res != VPX_CODEC_OK) return res; } } else { // Decode in serial mode. @@ -623,33 +612,30 @@ static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx, const uint8_t *data_start_copy = data_start; const uint32_t frame_size = frame_sizes[i]; vpx_codec_err_t res; - if (data_start < data - || frame_size > (uint32_t) (data_end - data_start)) { + if (data_start < data || + frame_size > (uint32_t)(data_end - data_start)) { set_error_detail(ctx, "Invalid frame size in index"); return VPX_CODEC_CORRUPT_FRAME; } - res = decode_one(ctx, &data_start_copy, frame_size, user_priv, - deadline); - if (res != VPX_CODEC_OK) - return res; + res = + decode_one(ctx, &data_start_copy, frame_size, user_priv, deadline); + if (res != VPX_CODEC_OK) return res; data_start += frame_size; } } else { while (data_start < data_end) { - const uint32_t frame_size = (uint32_t) (data_end - data_start); - const vpx_codec_err_t res = decode_one(ctx, &data_start, frame_size, - user_priv, deadline); - if (res != VPX_CODEC_OK) - return res; + const uint32_t frame_size = (uint32_t)(data_end - data_start); + const vpx_codec_err_t res = + decode_one(ctx, &data_start, frame_size, user_priv, deadline); + if (res != VPX_CODEC_OK) return res; // Account for suboptimal termination by the encoder. while (data_start < data_end) { - const uint8_t marker = read_marker(ctx->decrypt_cb, - ctx->decrypt_state, data_start); - if (marker) - break; + const uint8_t marker = + read_marker(ctx->decrypt_cb, ctx->decrypt_state, data_start); + if (marker) break; ++data_start; } } @@ -684,9 +670,8 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, // Output the frames in the cache first. if (ctx->num_cache_frames > 0) { release_last_output_frame(ctx); - ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx; - if (ctx->need_resync) - return NULL; + ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx; + if (ctx->need_resync) return NULL; img = &ctx->frame_cache[ctx->frame_cache_read].img; ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE; --ctx->num_cache_frames; @@ -698,10 +683,9 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, if (*iter == NULL && ctx->frame_workers != NULL) { do { YV12_BUFFER_CONFIG sd; - vp9_ppflags_t flags = {0, 0, 0}; + vp9_ppflags_t flags = { 0, 0, 0 }; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - VPxWorker *const worker = - &ctx->frame_workers[ctx->next_output_worker_id]; + VPxWorker *const worker = &ctx->frame_workers[ctx->next_output_worker_id]; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; ctx->next_output_worker_id = @@ -721,8 +705,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; release_last_output_frame(ctx); ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx; - if (ctx->need_resync) - return NULL; + if (ctx->need_resync) return NULL; yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv); ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv; img = &ctx->img; @@ -733,8 +716,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, frame_worker_data->received_frame = 0; ++ctx->available_threads; ctx->need_resync = 1; - if (ctx->flushed != 1) - return NULL; + if (ctx->flushed != 1) return NULL; } } while (ctx->next_output_worker_id != ctx->next_submit_worker_id); } @@ -742,8 +724,7 @@ static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx, } static vpx_codec_err_t decoder_set_fb_fn( - vpx_codec_alg_priv_t *ctx, - vpx_get_frame_buffer_cb_fn_t cb_get, + vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { if (cb_get == NULL || cb_release == NULL) { return VPX_CODEC_INVALID_PARAM; @@ -776,7 +757,8 @@ static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx, FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; image2yuvconfig(&frame->img, &sd); return vp9_set_reference_dec(&frame_worker_data->pbi->common, - (VP9_REFFRAME)frame->frame_type, &sd); + ref_frame_to_vp9_reframe(frame->frame_type), + &sd); } else { return VPX_CODEC_INVALID_PARAM; } @@ -793,7 +775,7 @@ static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx, } if (data) { - vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data; + vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data; YV12_BUFFER_CONFIG sd; VPxWorker *const worker = ctx->frame_workers; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; @@ -816,7 +798,7 @@ static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx, } if (data) { - YV12_BUFFER_CONFIG* fb; + YV12_BUFFER_CONFIG *fb; VPxWorker *const worker = ctx->frame_workers; FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx); @@ -847,13 +829,6 @@ static vpx_codec_err_t ctrl_set_postproc(vpx_codec_alg_priv_t *ctx, #endif } -static vpx_codec_err_t ctrl_set_dbg_options(vpx_codec_alg_priv_t *ctx, - va_list args) { - (void)ctx; - (void)args; - return VPX_CODEC_INCAPABLE; -} - static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx, va_list args) { int *const update_info = va_arg(args, int *); @@ -1007,8 +982,7 @@ static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx, ctx->byte_alignment = byte_alignment; if (ctx->frame_workers) { VPxWorker *const worker = ctx->frame_workers; - FrameWorkerData *const frame_worker_data = - (FrameWorkerData *)worker->data1; + FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1; frame_worker_data->pbi->common.byte_alignment = byte_alignment; } return VPX_CODEC_OK; @@ -1028,29 +1002,25 @@ static vpx_codec_err_t ctrl_set_skip_loop_filter(vpx_codec_alg_priv_t *ctx, } static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = { - {VP8_COPY_REFERENCE, ctrl_copy_reference}, + { VP8_COPY_REFERENCE, ctrl_copy_reference }, // Setters - {VP8_SET_REFERENCE, ctrl_set_reference}, - {VP8_SET_POSTPROC, ctrl_set_postproc}, - {VP8_SET_DBG_COLOR_REF_FRAME, ctrl_set_dbg_options}, - {VP8_SET_DBG_COLOR_MB_MODES, ctrl_set_dbg_options}, - {VP8_SET_DBG_COLOR_B_MODES, ctrl_set_dbg_options}, - {VP8_SET_DBG_DISPLAY_MV, ctrl_set_dbg_options}, - {VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order}, - {VPXD_SET_DECRYPTOR, ctrl_set_decryptor}, - {VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment}, - {VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter}, + { VP8_SET_REFERENCE, ctrl_set_reference }, + { VP8_SET_POSTPROC, ctrl_set_postproc }, + { VP9_INVERT_TILE_DECODE_ORDER, ctrl_set_invert_tile_order }, + { VPXD_SET_DECRYPTOR, ctrl_set_decryptor }, + { VP9_SET_BYTE_ALIGNMENT, ctrl_set_byte_alignment }, + { VP9_SET_SKIP_LOOP_FILTER, ctrl_set_skip_loop_filter }, // Getters - {VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates}, - {VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted}, - {VP9_GET_REFERENCE, ctrl_get_reference}, - {VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size}, - {VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth}, - {VP9D_GET_FRAME_SIZE, ctrl_get_frame_size}, + { VP8D_GET_LAST_REF_UPDATES, ctrl_get_last_ref_updates }, + { VP8D_GET_FRAME_CORRUPTED, ctrl_get_frame_corrupted }, + { VP9_GET_REFERENCE, ctrl_get_reference }, + { VP9D_GET_DISPLAY_SIZE, ctrl_get_render_size }, + { VP9D_GET_BIT_DEPTH, ctrl_get_bit_depth }, + { VP9D_GET_FRAME_SIZE, ctrl_get_frame_size }, - { -1, NULL}, + { -1, NULL }, }; #ifndef VERSION_STRING @@ -1061,24 +1031,26 @@ CODEC_INTERFACE(vpx_codec_vp9_dx) = { VPX_CODEC_INTERNAL_ABI_VERSION, VPX_CODEC_CAP_DECODER | VP9_CAP_POSTPROC | VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER, // vpx_codec_caps_t - decoder_init, // vpx_codec_init_fn_t - decoder_destroy, // vpx_codec_destroy_fn_t - decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t - { // NOLINT - decoder_peek_si, // vpx_codec_peek_si_fn_t - decoder_get_si, // vpx_codec_get_si_fn_t - decoder_decode, // vpx_codec_decode_fn_t - decoder_get_frame, // vpx_codec_frame_get_fn_t - decoder_set_fb_fn, // vpx_codec_set_fb_fn_t + decoder_init, // vpx_codec_init_fn_t + decoder_destroy, // vpx_codec_destroy_fn_t + decoder_ctrl_maps, // vpx_codec_ctrl_fn_map_t + { + // NOLINT + decoder_peek_si, // vpx_codec_peek_si_fn_t + decoder_get_si, // vpx_codec_get_si_fn_t + decoder_decode, // vpx_codec_decode_fn_t + decoder_get_frame, // vpx_codec_frame_get_fn_t + decoder_set_fb_fn, // vpx_codec_set_fb_fn_t }, - { // NOLINT - 0, - NULL, // vpx_codec_enc_cfg_map_t - NULL, // vpx_codec_encode_fn_t - NULL, // vpx_codec_get_cx_data_fn_t - NULL, // vpx_codec_enc_config_set_fn_t - NULL, // vpx_codec_get_global_headers_fn_t - NULL, // vpx_codec_get_preview_frame_fn_t - NULL // vpx_codec_enc_mr_get_mem_loc_fn_t + { + // NOLINT + 0, + NULL, // vpx_codec_enc_cfg_map_t + NULL, // vpx_codec_encode_fn_t + NULL, // vpx_codec_get_cx_data_fn_t + NULL, // vpx_codec_enc_config_set_fn_t + NULL, // vpx_codec_get_global_headers_fn_t + NULL, // vpx_codec_get_preview_frame_fn_t + NULL // vpx_codec_enc_mr_get_mem_loc_fn_t } }; diff --git a/libs/libvpx/vp9/vp9_dx_iface.h b/libs/libvpx/vp9/vp9_dx_iface.h index e0e948e16c..cc3d51842a 100644 --- a/libs/libvpx/vp9/vp9_dx_iface.h +++ b/libs/libvpx/vp9/vp9_dx_iface.h @@ -17,7 +17,7 @@ typedef vpx_codec_stream_info_t vp9_stream_info_t; // This limit is due to framebuffer numbers. // TODO(hkuang): Remove this limit after implementing ondemand framebuffers. -#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames. +#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames. typedef struct cache_frame { int fb_idx; @@ -25,36 +25,36 @@ typedef struct cache_frame { } cache_frame; struct vpx_codec_alg_priv { - vpx_codec_priv_t base; - vpx_codec_dec_cfg_t cfg; - vp9_stream_info_t si; - int postproc_cfg_set; - vp8_postproc_cfg_t postproc_cfg; - vpx_decrypt_cb decrypt_cb; - void *decrypt_state; - vpx_image_t img; - int img_avail; - int flushed; - int invert_tile_order; - int last_show_frame; // Index of last output frame. - int byte_alignment; - int skip_loop_filter; + vpx_codec_priv_t base; + vpx_codec_dec_cfg_t cfg; + vp9_stream_info_t si; + int postproc_cfg_set; + vp8_postproc_cfg_t postproc_cfg; + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; + vpx_image_t img; + int img_avail; + int flushed; + int invert_tile_order; + int last_show_frame; // Index of last output frame. + int byte_alignment; + int skip_loop_filter; // Frame parallel related. - int frame_parallel_decode; // frame-based threading. - VPxWorker *frame_workers; - int num_frame_workers; - int next_submit_worker_id; - int last_submit_worker_id; - int next_output_worker_id; - int available_threads; - cache_frame frame_cache[FRAME_CACHE_SIZE]; - int frame_cache_write; - int frame_cache_read; - int num_cache_frames; - int need_resync; // wait for key/intra-only frame + int frame_parallel_decode; // frame-based threading. + VPxWorker *frame_workers; + int num_frame_workers; + int next_submit_worker_id; + int last_submit_worker_id; + int next_output_worker_id; + int available_threads; + cache_frame frame_cache[FRAME_CACHE_SIZE]; + int frame_cache_write; + int frame_cache_read; + int num_cache_frames; + int need_resync; // wait for key/intra-only frame // BufferPool that holds all reference frames. Shared by all the FrameWorkers. - BufferPool *buffer_pool; + BufferPool *buffer_pool; // External frame buffer info to save for VP9 common. void *ext_priv; // Private data associated with the external frame buffers. diff --git a/libs/libvpx/vp9/vp9_iface_common.h b/libs/libvpx/vp9/vp9_iface_common.h index 938d4224ba..d68872750b 100644 --- a/libs/libvpx/vp9/vp9_iface_common.h +++ b/libs/libvpx/vp9/vp9_iface_common.h @@ -12,7 +12,7 @@ #include "vpx_ports/mem.h" -static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, +static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, void *user_priv) { /** vpx_img_wrap() doesn't allow specifying independent strides for * the Y, U, and V planes, nor other alignment adjustments that @@ -61,9 +61,9 @@ static void yuvconfig2image(vpx_image_t *img, const YV12_BUFFER_CONFIG *yv12, // of the image. img->fmt = (vpx_img_fmt_t)(img->fmt | VPX_IMG_FMT_HIGHBITDEPTH); img->bit_depth = yv12->bit_depth; - img->planes[VPX_PLANE_Y] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->y_buffer); - img->planes[VPX_PLANE_U] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->u_buffer); - img->planes[VPX_PLANE_V] = (uint8_t*)CONVERT_TO_SHORTPTR(yv12->v_buffer); + img->planes[VPX_PLANE_Y] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->y_buffer); + img->planes[VPX_PLANE_U] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->u_buffer); + img->planes[VPX_PLANE_V] = (uint8_t *)CONVERT_TO_SHORTPTR(yv12->v_buffer); img->planes[VPX_PLANE_ALPHA] = NULL; img->stride[VPX_PLANE_Y] = 2 * yv12->y_stride; img->stride[VPX_PLANE_U] = 2 * yv12->uv_stride; @@ -84,17 +84,17 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, yv12->u_buffer = img->planes[VPX_PLANE_U]; yv12->v_buffer = img->planes[VPX_PLANE_V]; - yv12->y_crop_width = img->d_w; + yv12->y_crop_width = img->d_w; yv12->y_crop_height = img->d_h; - yv12->render_width = img->r_w; + yv12->render_width = img->r_w; yv12->render_height = img->r_h; - yv12->y_width = img->d_w; + yv12->y_width = img->d_w; yv12->y_height = img->d_h; - yv12->uv_width = img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 - : yv12->y_width; - yv12->uv_height = img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 - : yv12->y_height; + yv12->uv_width = + img->x_chroma_shift == 1 ? (1 + yv12->y_width) / 2 : yv12->y_width; + yv12->uv_height = + img->y_chroma_shift == 1 ? (1 + yv12->y_height) / 2 : yv12->y_height; yv12->uv_crop_width = yv12->uv_width; yv12->uv_crop_height = yv12->uv_height; @@ -124,13 +124,22 @@ static vpx_codec_err_t image2yuvconfig(const vpx_image_t *img, } else { yv12->flags = 0; } - yv12->border = (yv12->y_stride - img->w) / 2; + yv12->border = (yv12->y_stride - img->w) / 2; #else - yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; + yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2; #endif // CONFIG_VP9_HIGHBITDEPTH yv12->subsampling_x = img->x_chroma_shift; yv12->subsampling_y = img->y_chroma_shift; return VPX_CODEC_OK; } +static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) { + switch (frame) { + case VP8_LAST_FRAME: return VP9_LAST_FLAG; + case VP8_GOLD_FRAME: return VP9_GOLD_FLAG; + case VP8_ALTR_FRAME: return VP9_ALT_FLAG; + } + assert(0 && "Invalid Reference Frame"); + return VP9_LAST_FLAG; +} #endif // VP9_VP9_IFACE_COMMON_H_ diff --git a/libs/libvpx/vp9/vp9cx.mk b/libs/libvpx/vp9/vp9cx.mk index 2930c23ddf..a8ca0d5935 100644 --- a/libs/libvpx/vp9/vp9cx.mk +++ b/libs/libvpx/vp9/vp9cx.mk @@ -81,6 +81,8 @@ VP9_CX_SRCS-yes += encoder/vp9_aq_cyclicrefresh.c VP9_CX_SRCS-yes += encoder/vp9_aq_cyclicrefresh.h VP9_CX_SRCS-yes += encoder/vp9_aq_complexity.c VP9_CX_SRCS-yes += encoder/vp9_aq_complexity.h +VP9_CX_SRCS-yes += encoder/vp9_alt_ref_aq.h +VP9_CX_SRCS-yes += encoder/vp9_alt_ref_aq.c VP9_CX_SRCS-yes += encoder/vp9_skin_detection.c VP9_CX_SRCS-yes += encoder/vp9_skin_detection.h VP9_CX_SRCS-yes += encoder/vp9_noise_estimate.c @@ -101,23 +103,19 @@ ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_highbd_block_error_intrin_sse2.c endif -ifeq ($(CONFIG_USE_X86INC),yes) -VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_dct_mmx.asm +VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.asm ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_highbd_error_sse2.asm VP9_CX_SRCS-$(HAVE_AVX) += encoder/x86/vp9_highbd_error_avx.asm else VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_error_sse2.asm endif -endif ifeq ($(ARCH_X86_64),yes) -ifeq ($(CONFIG_USE_X86INC),yes) VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_quantize_ssse3_x86_64.asm endif -endif -VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_sse2.c +VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_dct_intrin_sse2.c VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_dct_ssse3.c ifneq ($(CONFIG_VP9_HIGHBITDEPTH),yes) VP9_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/vp9_frame_scale_ssse3.c diff --git a/libs/libvpx/vpx/exports_enc b/libs/libvpx/vpx/exports_enc index e4707ba108..914e36cd4c 100644 --- a/libs/libvpx/vpx/exports_enc +++ b/libs/libvpx/vpx/exports_enc @@ -7,9 +7,3 @@ text vpx_codec_get_cx_data text vpx_codec_get_global_headers text vpx_codec_get_preview_frame text vpx_codec_set_cx_data_buf -text vpx_svc_dump_statistics -text vpx_svc_encode -text vpx_svc_get_message -text vpx_svc_init -text vpx_svc_release -text vpx_svc_set_options diff --git a/libs/libvpx/vpx/exports_spatial_svc b/libs/libvpx/vpx/exports_spatial_svc new file mode 100644 index 0000000000..d258a1d618 --- /dev/null +++ b/libs/libvpx/vpx/exports_spatial_svc @@ -0,0 +1,6 @@ +text vpx_svc_dump_statistics +text vpx_svc_encode +text vpx_svc_get_message +text vpx_svc_init +text vpx_svc_release +text vpx_svc_set_options diff --git a/libs/libvpx/vpx/internal/vpx_codec_internal.h b/libs/libvpx/vpx/internal/vpx_codec_internal.h index 7380fcc7e2..522e5c1684 100644 --- a/libs/libvpx/vpx/internal/vpx_codec_internal.h +++ b/libs/libvpx/vpx/internal/vpx_codec_internal.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\file * \brief Describes the decoder algorithm interface for algorithm * implementations. @@ -61,7 +60,7 @@ extern "C" { */ #define VPX_CODEC_INTERNAL_ABI_VERSION (5) /**<\hideinitializer*/ -typedef struct vpx_codec_alg_priv vpx_codec_alg_priv_t; +typedef struct vpx_codec_alg_priv vpx_codec_alg_priv_t; typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t; /*!\brief init function pointer prototype @@ -77,8 +76,8 @@ typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t; * \retval #VPX_CODEC_MEM_ERROR * Memory operation failed. */ -typedef vpx_codec_err_t (*vpx_codec_init_fn_t)(vpx_codec_ctx_t *ctx, - vpx_codec_priv_enc_mr_cfg_t *data); +typedef vpx_codec_err_t (*vpx_codec_init_fn_t)( + vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data); /*!\brief destroy function pointer prototype * @@ -112,8 +111,8 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx); * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data, - unsigned int data_sz, +typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data, + unsigned int data_sz, vpx_codec_stream_info_t *si); /*!\brief Return information about the current stream. @@ -129,7 +128,7 @@ typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data, * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx, +typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx, vpx_codec_stream_info_t *si); /*!\brief control function pointer prototype @@ -193,11 +192,11 @@ typedef const struct vpx_codec_ctrl_fn_map { * see the descriptions of the other error codes in ::vpx_codec_err_t * for recoverability capabilities. */ -typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, - const uint8_t *data, - unsigned int data_sz, - void *user_priv, - long deadline); +typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, + const uint8_t *data, + unsigned int data_sz, + void *user_priv, + long deadline); /*!\brief Decoded frames iterator * @@ -206,7 +205,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, * complete when this function returns NULL. * * The list of available frames becomes valid upon completion of the - * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode. + * vpx_codec_decode call, and remains valid until the next call to + * vpx_codec_decode. * * \param[in] ctx Pointer to this instance's context * \param[in out] iter Iterator storage, initialized to NULL @@ -215,7 +215,7 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, * produced will always be in PTS (presentation time stamp) order. */ typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter); + vpx_codec_iter_t *iter); /*!\brief Pass in external frame buffers for the decoder to use. * @@ -244,32 +244,28 @@ typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx, * buffers. */ typedef vpx_codec_err_t (*vpx_codec_set_fb_fn_t)( - vpx_codec_alg_priv_t *ctx, - vpx_get_frame_buffer_cb_fn_t cb_get, + vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); +typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx, + const vpx_image_t *img, + vpx_codec_pts_t pts, + unsigned long duration, + vpx_enc_frame_flags_t flags, + unsigned long deadline); +typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)( + vpx_codec_alg_priv_t *ctx, vpx_codec_iter_t *iter); -typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned long duration, - vpx_enc_frame_flags_t flags, - unsigned long deadline); -typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter); +typedef vpx_codec_err_t (*vpx_codec_enc_config_set_fn_t)( + vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg); +typedef vpx_fixed_buf_t *(*vpx_codec_get_global_headers_fn_t)( + vpx_codec_alg_priv_t *ctx); -typedef vpx_codec_err_t -(*vpx_codec_enc_config_set_fn_t)(vpx_codec_alg_priv_t *ctx, - const vpx_codec_enc_cfg_t *cfg); -typedef vpx_fixed_buf_t * -(*vpx_codec_get_global_headers_fn_t)(vpx_codec_alg_priv_t *ctx); +typedef vpx_image_t *(*vpx_codec_get_preview_frame_fn_t)( + vpx_codec_alg_priv_t *ctx); -typedef vpx_image_t * -(*vpx_codec_get_preview_frame_fn_t)(vpx_codec_alg_priv_t *ctx); - -typedef vpx_codec_err_t -(*vpx_codec_enc_mr_get_mem_loc_fn_t)(const vpx_codec_enc_cfg_t *cfg, - void **mem_loc); +typedef vpx_codec_err_t (*vpx_codec_enc_mr_get_mem_loc_fn_t)( + const vpx_codec_enc_cfg_t *cfg, void **mem_loc); /*!\brief usage configuration mapping * @@ -282,7 +278,7 @@ typedef vpx_codec_err_t * */ typedef const struct vpx_codec_enc_cfg_map { - int usage; + int usage; vpx_codec_enc_cfg_t cfg; } vpx_codec_enc_cfg_map_t; @@ -291,41 +287,47 @@ typedef const struct vpx_codec_enc_cfg_map { * All decoders \ref MUST expose a variable of this type. */ struct vpx_codec_iface { - const char *name; /**< Identification String */ - int abi_version; /**< Implemented ABI version */ - vpx_codec_caps_t caps; /**< Decoder capabilities */ - vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */ - vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */ - vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */ + const char *name; /**< Identification String */ + int abi_version; /**< Implemented ABI version */ + vpx_codec_caps_t caps; /**< Decoder capabilities */ + vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */ + vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */ + vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */ struct vpx_codec_dec_iface { - vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */ - vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_get_si_fn_t */ - vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */ - vpx_codec_get_frame_fn_t get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */ - vpx_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::vpx_codec_set_fb_fn_t */ + vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */ + vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_get_si_fn_t */ + vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */ + vpx_codec_get_frame_fn_t + get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */ + vpx_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::vpx_codec_set_fb_fn_t */ } dec; struct vpx_codec_enc_iface { - int cfg_map_count; - vpx_codec_enc_cfg_map_t *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */ - vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */ - vpx_codec_get_cx_data_fn_t get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */ - vpx_codec_enc_config_set_fn_t cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */ - vpx_codec_get_global_headers_fn_t get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */ - vpx_codec_get_preview_frame_fn_t get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */ - vpx_codec_enc_mr_get_mem_loc_fn_t mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */ + int cfg_map_count; + vpx_codec_enc_cfg_map_t + *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */ + vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */ + vpx_codec_get_cx_data_fn_t + get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */ + vpx_codec_enc_config_set_fn_t + cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */ + vpx_codec_get_global_headers_fn_t + get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */ + vpx_codec_get_preview_frame_fn_t + get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */ + vpx_codec_enc_mr_get_mem_loc_fn_t + mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */ } enc; }; /*!\brief Callback function pointer / user data pair storage */ typedef struct vpx_codec_priv_cb_pair { union { - vpx_codec_put_frame_cb_fn_t put_frame; - vpx_codec_put_slice_cb_fn_t put_slice; + vpx_codec_put_frame_cb_fn_t put_frame; + vpx_codec_put_slice_cb_fn_t put_slice; } u; - void *user_priv; + void *user_priv; } vpx_codec_priv_cb_pair_t; - /*!\brief Instance private storage * * This structure is allocated by the algorithm's init function. It can be @@ -335,39 +337,38 @@ typedef struct vpx_codec_priv_cb_pair { * and the pointer cast to the proper type. */ struct vpx_codec_priv { - const char *err_detail; - vpx_codec_flags_t init_flags; + const char *err_detail; + vpx_codec_flags_t init_flags; struct { - vpx_codec_priv_cb_pair_t put_frame_cb; - vpx_codec_priv_cb_pair_t put_slice_cb; + vpx_codec_priv_cb_pair_t put_frame_cb; + vpx_codec_priv_cb_pair_t put_slice_cb; } dec; struct { - vpx_fixed_buf_t cx_data_dst_buf; - unsigned int cx_data_pad_before; - unsigned int cx_data_pad_after; - vpx_codec_cx_pkt_t cx_data_pkt; - unsigned int total_encoders; + vpx_fixed_buf_t cx_data_dst_buf; + unsigned int cx_data_pad_before; + unsigned int cx_data_pad_after; + vpx_codec_cx_pkt_t cx_data_pkt; + unsigned int total_encoders; } enc; }; /* * Multi-resolution encoding internal configuration */ -struct vpx_codec_priv_enc_mr_cfg -{ - unsigned int mr_total_resolutions; - unsigned int mr_encoder_id; - struct vpx_rational mr_down_sampling_factor; - void* mr_low_res_mode_info; +struct vpx_codec_priv_enc_mr_cfg { + unsigned int mr_total_resolutions; + unsigned int mr_encoder_id; + struct vpx_rational mr_down_sampling_factor; + void *mr_low_res_mode_info; }; #undef VPX_CTRL_USE_TYPE #define VPX_CTRL_USE_TYPE(id, typ) \ - static VPX_INLINE typ id##__value(va_list args) {return va_arg(args, typ);} + static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); } #undef VPX_CTRL_USE_TYPE_DEPRECATED #define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \ - static VPX_INLINE typ id##__value(va_list args) {return va_arg(args, typ);} + static VPX_INLINE typ id##__value(va_list args) { return va_arg(args, typ); } #define CAST(id, arg) id##__value(arg) @@ -380,10 +381,9 @@ struct vpx_codec_priv_enc_mr_cfg * the same name as the struct, less the _algo suffix. The CODEC_INTERFACE * macro is provided to define this getter function automatically. */ -#define CODEC_INTERFACE(id)\ - vpx_codec_iface_t* id(void) { return &id##_algo; }\ - vpx_codec_iface_t id##_algo - +#define CODEC_INTERFACE(id) \ + vpx_codec_iface_t *id(void) { return &id##_algo; } \ + vpx_codec_iface_t id##_algo /* Internal Utility Functions * @@ -391,38 +391,39 @@ struct vpx_codec_priv_enc_mr_cfg * utilities for manipulating vpx_codec_* data structures. */ struct vpx_codec_pkt_list { - unsigned int cnt; - unsigned int max; + unsigned int cnt; + unsigned int max; struct vpx_codec_cx_pkt pkts[1]; }; -#define vpx_codec_pkt_list_decl(n)\ - union {struct vpx_codec_pkt_list head;\ - struct {struct vpx_codec_pkt_list head;\ - struct vpx_codec_cx_pkt pkts[n];} alloc;} +#define vpx_codec_pkt_list_decl(n) \ + union { \ + struct vpx_codec_pkt_list head; \ + struct { \ + struct vpx_codec_pkt_list head; \ + struct vpx_codec_cx_pkt pkts[n]; \ + } alloc; \ + } -#define vpx_codec_pkt_list_init(m)\ - (m)->alloc.head.cnt = 0,\ - (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0]) +#define vpx_codec_pkt_list_init(m) \ + (m)->alloc.head.cnt = 0, \ + (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0]) -int -vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *, - const struct vpx_codec_cx_pkt *); - -const vpx_codec_cx_pkt_t * -vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list, - vpx_codec_iter_t *iter); +int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *, + const struct vpx_codec_cx_pkt *); +const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get( + struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter); #include #include struct vpx_internal_error_info { - vpx_codec_err_t error_code; - int has_detail; - char detail[80]; - int setjmp; - jmp_buf jmp; + vpx_codec_err_t error_code; + int has_detail; + char detail[80]; + int setjmp; + jmp_buf jmp; }; #define CLANG_ANALYZER_NORETURN @@ -434,8 +435,7 @@ struct vpx_internal_error_info { #endif void vpx_internal_error(struct vpx_internal_error_info *info, - vpx_codec_err_t error, - const char *fmt, + vpx_codec_err_t error, const char *fmt, ...) CLANG_ANALYZER_NORETURN; #ifdef __cplusplus diff --git a/libs/libvpx/vpx/internal/vpx_psnr.h b/libs/libvpx/vpx/internal/vpx_psnr.h deleted file mode 100644 index 07d81bb8d9..0000000000 --- a/libs/libvpx/vpx/internal/vpx_psnr.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef VPX_INTERNAL_VPX_PSNR_H_ -#define VPX_INTERNAL_VPX_PSNR_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -// TODO(dkovalev) change vpx_sse_to_psnr signature: double -> int64_t - -/*!\brief Converts SSE to PSNR - * - * Converts sum of squared errros (SSE) to peak signal-to-noise ratio (PNSR). - * - * \param[in] samples Number of samples - * \param[in] peak Max sample value - * \param[in] sse Sum of squared errors - */ -double vpx_sse_to_psnr(double samples, double peak, double sse); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // VPX_INTERNAL_VPX_PSNR_H_ diff --git a/libs/libvpx/vpx/src/svc_encodeframe.c b/libs/libvpx/vpx/src/svc_encodeframe.c index 628afca31e..5aa0b8ddb8 100644 --- a/libs/libvpx/vpx/src/svc_encodeframe.c +++ b/libs/libvpx/vpx/src/svc_encodeframe.c @@ -33,8 +33,8 @@ #ifndef MINGW_HAS_SECURE_API // proto from /usr/x86_64-w64-mingw32/include/sec_api/string_s.h _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context); -#endif /* MINGW_HAS_SECURE_API */ -#endif /* __MINGW32__ */ +#endif /* MINGW_HAS_SECURE_API */ +#endif /* __MINGW32__ */ #ifdef _MSC_VER #define strdup _strdup @@ -47,13 +47,11 @@ _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context); #define MAX_QUANTIZER 63 -static const int DEFAULT_SCALE_FACTORS_NUM[VPX_SS_MAX_LAYERS] = { - 4, 5, 7, 11, 16 -}; +static const int DEFAULT_SCALE_FACTORS_NUM[VPX_SS_MAX_LAYERS] = { 4, 5, 7, 11, + 16 }; -static const int DEFAULT_SCALE_FACTORS_DEN[VPX_SS_MAX_LAYERS] = { - 16, 16, 16, 16, 16 -}; +static const int DEFAULT_SCALE_FACTORS_DEN[VPX_SS_MAX_LAYERS] = { 16, 16, 16, + 16, 16 }; typedef enum { QUANTIZER = 0, @@ -63,20 +61,17 @@ typedef enum { ALL_OPTION_TYPES } LAYER_OPTION_TYPE; -static const int option_max_values[ALL_OPTION_TYPES] = { - 63, INT_MAX, INT_MAX, 1 -}; +static const int option_max_values[ALL_OPTION_TYPES] = { 63, INT_MAX, INT_MAX, + 1 }; -static const int option_min_values[ALL_OPTION_TYPES] = { - 0, 0, 1, 0 -}; +static const int option_min_values[ALL_OPTION_TYPES] = { 0, 0, 1, 0 }; // One encoded frame typedef struct FrameData { - void *buf; // compressed data buffer - size_t size; // length of compressed data - vpx_codec_frame_flags_t flags; /**< flags for this frame */ - struct FrameData *next; + void *buf; // compressed data buffer + size_t size; // length of compressed data + vpx_codec_frame_flags_t flags; /**< flags for this frame */ + struct FrameData *next; } FrameData; static SvcInternal_t *get_svc_internal(SvcContext *svc_ctx) { @@ -91,8 +86,7 @@ static SvcInternal_t *get_svc_internal(SvcContext *svc_ctx) { return (SvcInternal_t *)svc_ctx->internal; } -static const SvcInternal_t *get_const_svc_internal( - const SvcContext *svc_ctx) { +static const SvcInternal_t *get_const_svc_internal(const SvcContext *svc_ctx) { if (svc_ctx == NULL) return NULL; return (const SvcInternal_t *)svc_ctx->internal; } @@ -102,8 +96,8 @@ static void svc_log_reset(SvcContext *svc_ctx) { si->message_buffer[0] = '\0'; } -static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level, - const char *fmt, ...) { +static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level, const char *fmt, + ...) { char buf[512]; int retval = 0; va_list ap; @@ -130,14 +124,11 @@ static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level, return retval; } -static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, - char *input, - int *value0, - int *value1) { +static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, char *input, + int *value0, int *value1) { if (type == SCALE_FACTOR) { *value0 = strtol(input, &input, 10); - if (*input++ != '/') - return VPX_CODEC_INVALID_PARAM; + if (*input++ != '/') return VPX_CODEC_INVALID_PARAM; *value1 = strtol(input, &input, 10); if (*value0 < option_min_values[SCALE_FACTOR] || @@ -148,8 +139,7 @@ static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, return VPX_CODEC_INVALID_PARAM; } else { *value0 = atoi(input); - if (*value0 < option_min_values[type] || - *value0 > option_max_values[type]) + if (*value0 < option_min_values[type] || *value0 > option_max_values[type]) return VPX_CODEC_INVALID_PARAM; } return VPX_CODEC_OK; @@ -176,8 +166,7 @@ static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx, for (i = 0; i < svc_ctx->spatial_layers; ++i) { if (token != NULL) { res = extract_option(type, token, option0 + i, option1 + i); - if (res != VPX_CODEC_OK) - break; + if (res != VPX_CODEC_OK) break; token = strtok_r(NULL, delim, &save_ptr); } else { break; @@ -186,7 +175,8 @@ static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx, if (res == VPX_CODEC_OK && i != svc_ctx->spatial_layers) { svc_log(svc_ctx, SVC_LOG_ERROR, "svc: layer params type: %d %d values required, " - "but only %d specified\n", type, svc_ctx->spatial_layers, i); + "but only %d specified\n", + type, svc_ctx->spatial_layers, i); res = VPX_CODEC_INVALID_PARAM; } free(input_string); @@ -233,14 +223,14 @@ static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) { si->svc_params.scaling_factor_den); if (res != VPX_CODEC_OK) break; } else if (strcmp("max-quantizers", option_name) == 0) { - res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value, - si->svc_params.max_quantizers, - NULL); + res = + parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value, + si->svc_params.max_quantizers, NULL); if (res != VPX_CODEC_OK) break; } else if (strcmp("min-quantizers", option_name) == 0) { - res = parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value, - si->svc_params.min_quantizers, - NULL); + res = + parse_layer_options_from_string(svc_ctx, QUANTIZER, option_value, + si->svc_params.min_quantizers, NULL); if (res != VPX_CODEC_OK) break; } else if (strcmp("auto-alt-refs", option_name) == 0) { res = parse_layer_options_from_string(svc_ctx, AUTO_ALT_REF, option_value, @@ -287,8 +277,7 @@ static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) { return res; } -vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, - const char *options) { +vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options) { SvcInternal_t *const si = get_svc_internal(svc_ctx); if (svc_ctx == NULL || options == NULL || si == NULL) { return VPX_CODEC_INVALID_PARAM; @@ -308,34 +297,34 @@ void assign_layer_bitrates(const SvcContext *svc_ctx, if (si->bitrates[0] != 0) { enc_cfg->rc_target_bitrate = 0; for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) { - enc_cfg->ss_target_bitrate[sl*svc_ctx->temporal_layers] = 0; + enc_cfg->ss_target_bitrate[sl * svc_ctx->temporal_layers] = 0; for (tl = 0; tl < svc_ctx->temporal_layers; ++tl) { - enc_cfg->ss_target_bitrate[sl*svc_ctx->temporal_layers] - += (unsigned int)si->bitrates[sl * svc_ctx->temporal_layers + tl]; - enc_cfg->layer_target_bitrate[sl*svc_ctx->temporal_layers + tl] - = si->bitrates[sl * svc_ctx->temporal_layers + tl]; + enc_cfg->ss_target_bitrate[sl * svc_ctx->temporal_layers] += + (unsigned int)si->bitrates[sl * svc_ctx->temporal_layers + tl]; + enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers + tl] = + si->bitrates[sl * svc_ctx->temporal_layers + tl]; } } } else { float total = 0; - float alloc_ratio[VPX_MAX_LAYERS] = {0}; + float alloc_ratio[VPX_MAX_LAYERS] = { 0 }; for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) { if (si->svc_params.scaling_factor_den[sl] > 0) { - alloc_ratio[sl] = (float)( (sl+1) ); + alloc_ratio[sl] = (float)(pow(2, sl)); total += alloc_ratio[sl]; } } for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) { enc_cfg->ss_target_bitrate[sl] = spatial_layer_target = - (unsigned int)(enc_cfg->rc_target_bitrate * - alloc_ratio[sl] / total); + (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[sl] / + total); if (svc_ctx->temporal_layering_mode == 3) { enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers] = - (spatial_layer_target*6)/10; // 60% + (spatial_layer_target * 6) / 10; // 60% enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers + 1] = - (spatial_layer_target*8)/10; // 80% + (spatial_layer_target * 8) / 10; // 80% enc_cfg->layer_target_bitrate[sl * svc_ctx->temporal_layers + 2] = spatial_layer_target; } else if (svc_ctx->temporal_layering_mode == 2 || @@ -359,7 +348,7 @@ void assign_layer_bitrates(const SvcContext *svc_ctx, } } else { float total = 0; - float alloc_ratio[VPX_MAX_LAYERS] = {0}; + float alloc_ratio[VPX_MAX_LAYERS] = { 0 }; for (i = 0; i < svc_ctx->spatial_layers; ++i) { if (si->svc_params.scaling_factor_den[i] > 0) { @@ -372,8 +361,9 @@ void assign_layer_bitrates(const SvcContext *svc_ctx, } for (i = 0; i < VPX_SS_MAX_LAYERS; ++i) { if (total > 0) { - enc_cfg->layer_target_bitrate[i] = (unsigned int) - (enc_cfg->rc_target_bitrate * alloc_ratio[i] / total); + enc_cfg->layer_target_bitrate[i] = + (unsigned int)(enc_cfg->rc_target_bitrate * alloc_ratio[i] / + total); } } } @@ -384,7 +374,7 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *enc_cfg) { vpx_codec_err_t res; - int i, sl , tl; + int i, sl, tl; SvcInternal_t *const si = get_svc_internal(svc_ctx); if (svc_ctx == NULL || codec_ctx == NULL || iface == NULL || enc_cfg == NULL) { @@ -397,13 +387,6 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, si->width = enc_cfg->g_w; si->height = enc_cfg->g_h; -// wonkap: why is this necessary? - /*if (enc_cfg->kf_max_dist < 2) { - svc_log(svc_ctx, SVC_LOG_ERROR, "key frame distance too small: %d\n", - enc_cfg->kf_max_dist); - return VPX_CODEC_INVALID_PARAM; - }*/ - si->kf_dist = enc_cfg->kf_max_dist; if (svc_ctx->spatial_layers == 0) @@ -427,7 +410,9 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, for (sl = 0; sl < VPX_SS_MAX_LAYERS; ++sl) { si->svc_params.scaling_factor_num[sl] = DEFAULT_SCALE_FACTORS_NUM[sl]; si->svc_params.scaling_factor_den[sl] = DEFAULT_SCALE_FACTORS_DEN[sl]; + si->svc_params.speed_per_layer[sl] = svc_ctx->speed; } + for (tl = 0; tl < svc_ctx->temporal_layers; ++tl) { for (sl = 0; sl < svc_ctx->spatial_layers; ++sl) { i = sl * svc_ctx->temporal_layers + tl; @@ -441,23 +426,21 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, res = parse_options(svc_ctx, si->options); if (res != VPX_CODEC_OK) return res; - if (svc_ctx->spatial_layers < 1) - svc_ctx->spatial_layers = 1; + if (svc_ctx->spatial_layers < 1) svc_ctx->spatial_layers = 1; if (svc_ctx->spatial_layers > VPX_SS_MAX_LAYERS) svc_ctx->spatial_layers = VPX_SS_MAX_LAYERS; - if (svc_ctx->temporal_layers < 1) - svc_ctx->temporal_layers = 1; + if (svc_ctx->temporal_layers < 1) svc_ctx->temporal_layers = 1; if (svc_ctx->temporal_layers > VPX_TS_MAX_LAYERS) svc_ctx->temporal_layers = VPX_TS_MAX_LAYERS; if (svc_ctx->temporal_layers * svc_ctx->spatial_layers > VPX_MAX_LAYERS) { - svc_log(svc_ctx, SVC_LOG_ERROR, - "spatial layers * temporal layers exceeds the maximum number of " - "allowed layers of %d\n", - svc_ctx->spatial_layers * svc_ctx->temporal_layers, - (int) VPX_MAX_LAYERS); - return VPX_CODEC_INVALID_PARAM; + svc_log(svc_ctx, SVC_LOG_ERROR, + "spatial layers * temporal layers exceeds the maximum number of " + "allowed layers of %d\n", + svc_ctx->spatial_layers * svc_ctx->temporal_layers, + (int)VPX_MAX_LAYERS); + return VPX_CODEC_INVALID_PARAM; } assign_layer_bitrates(svc_ctx, enc_cfg); @@ -469,14 +452,13 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, if (svc_ctx->temporal_layers > 1) { int i; for (i = 0; i < svc_ctx->temporal_layers; ++i) { - enc_cfg->ts_target_bitrate[i] = enc_cfg->rc_target_bitrate / - svc_ctx->temporal_layers; + enc_cfg->ts_target_bitrate[i] = + enc_cfg->rc_target_bitrate / svc_ctx->temporal_layers; enc_cfg->ts_rate_decimator[i] = 1 << (svc_ctx->temporal_layers - 1 - i); } } - if (svc_ctx->threads) - enc_cfg->g_threads = svc_ctx->threads; + if (svc_ctx->threads) enc_cfg->g_threads = svc_ctx->threads; // Modify encoder configuration enc_cfg->ss_number_layers = svc_ctx->spatial_layers; @@ -514,10 +496,8 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, * Encode a frame into multiple layers * Create a superframe containing the individual layers */ -vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, - vpx_codec_ctx_t *codec_ctx, - struct vpx_image *rawimg, - vpx_codec_pts_t pts, +vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, + struct vpx_image *rawimg, vpx_codec_pts_t pts, int64_t duration, int deadline) { vpx_codec_err_t res; vpx_codec_iter_t iter; @@ -529,8 +509,8 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, svc_log_reset(svc_ctx); - res = vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0, - deadline); + res = + vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0, deadline); if (res != VPX_CODEC_OK) { return res; } @@ -547,23 +527,20 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, layer: %d, PSNR(Total/Y/U/V): " "%2.3f %2.3f %2.3f %2.3f \n", - si->psnr_pkt_received, i, - cx_pkt->data.layer_psnr[i].psnr[0], + si->psnr_pkt_received, i, cx_pkt->data.layer_psnr[i].psnr[0], cx_pkt->data.layer_psnr[i].psnr[1], cx_pkt->data.layer_psnr[i].psnr[2], cx_pkt->data.layer_psnr[i].psnr[3]); svc_log(svc_ctx, SVC_LOG_DEBUG, "SVC frame: %d, layer: %d, SSE(Total/Y/U/V): " "%2.3f %2.3f %2.3f %2.3f \n", - si->psnr_pkt_received, i, - cx_pkt->data.layer_psnr[i].sse[0], + si->psnr_pkt_received, i, cx_pkt->data.layer_psnr[i].sse[0], cx_pkt->data.layer_psnr[i].sse[1], cx_pkt->data.layer_psnr[i].sse[2], cx_pkt->data.layer_psnr[i].sse[3]); for (j = 0; j < COMPONENTS; ++j) { - si->psnr_sum[i][j] += - cx_pkt->data.layer_psnr[i].psnr[j]; + si->psnr_sum[i][j] += cx_pkt->data.layer_psnr[i].psnr[j]; si->sse_sum[i][j] += cx_pkt->data.layer_psnr[i].sse[j]; } } @@ -578,30 +555,25 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, } #endif #endif - case VPX_CODEC_PSNR_PKT: - { + case VPX_CODEC_PSNR_PKT: { #if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION) int j; svc_log(svc_ctx, SVC_LOG_DEBUG, "frame: %d, layer: %d, PSNR(Total/Y/U/V): " "%2.3f %2.3f %2.3f %2.3f \n", - si->psnr_pkt_received, 0, - cx_pkt->data.layer_psnr[0].psnr[0], + si->psnr_pkt_received, 0, cx_pkt->data.layer_psnr[0].psnr[0], cx_pkt->data.layer_psnr[0].psnr[1], cx_pkt->data.layer_psnr[0].psnr[2], cx_pkt->data.layer_psnr[0].psnr[3]); for (j = 0; j < COMPONENTS; ++j) { - si->psnr_sum[0][j] += - cx_pkt->data.layer_psnr[0].psnr[j]; + si->psnr_sum[0][j] += cx_pkt->data.layer_psnr[0].psnr[j]; si->sse_sum[0][j] += cx_pkt->data.layer_psnr[0].sse[j]; } #endif } - ++si->psnr_pkt_received; - break; - default: { + ++si->psnr_pkt_received; break; - } + default: { break; } } } @@ -639,7 +611,6 @@ const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) { svc_log(svc_ctx, SVC_LOG_INFO, "\n"); for (i = 0; i < svc_ctx->spatial_layers; ++i) { - svc_log(svc_ctx, SVC_LOG_INFO, "Layer %d Average PSNR=[%2.3f, %2.3f, %2.3f, %2.3f], Bytes=[%u]\n", i, (double)si->psnr_sum[i][0] / number_of_frames, @@ -690,4 +661,3 @@ void vpx_svc_release(SvcContext *svc_ctx) { svc_ctx->internal = NULL; } } - diff --git a/libs/libvpx/vpx/src/vpx_codec.c b/libs/libvpx/vpx/src/vpx_codec.c index 5a495ce814..10331aa21b 100644 --- a/libs/libvpx/vpx/src/vpx_codec.c +++ b/libs/libvpx/vpx/src/vpx_codec.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\file * \brief Provides the high level interface to wrap decoder algorithms. * @@ -19,67 +18,50 @@ #include "vpx/internal/vpx_codec_internal.h" #include "vpx_version.h" -#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var) +#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) -int vpx_codec_version(void) { - return VERSION_PACKED; -} +int vpx_codec_version(void) { return VERSION_PACKED; } +const char *vpx_codec_version_str(void) { return VERSION_STRING_NOSP; } -const char *vpx_codec_version_str(void) { - return VERSION_STRING_NOSP; -} - - -const char *vpx_codec_version_extra_str(void) { - return VERSION_EXTRA; -} - +const char *vpx_codec_version_extra_str(void) { return VERSION_EXTRA; } const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) { return iface ? iface->name : ""; } -const char *vpx_codec_err_to_string(vpx_codec_err_t err) { +const char *vpx_codec_err_to_string(vpx_codec_err_t err) { switch (err) { - case VPX_CODEC_OK: - return "Success"; - case VPX_CODEC_ERROR: - return "Unspecified internal error"; - case VPX_CODEC_MEM_ERROR: - return "Memory allocation error"; - case VPX_CODEC_ABI_MISMATCH: - return "ABI version mismatch"; + case VPX_CODEC_OK: return "Success"; + case VPX_CODEC_ERROR: return "Unspecified internal error"; + case VPX_CODEC_MEM_ERROR: return "Memory allocation error"; + case VPX_CODEC_ABI_MISMATCH: return "ABI version mismatch"; case VPX_CODEC_INCAPABLE: return "Codec does not implement requested capability"; case VPX_CODEC_UNSUP_BITSTREAM: return "Bitstream not supported by this decoder"; case VPX_CODEC_UNSUP_FEATURE: return "Bitstream required feature not supported by this decoder"; - case VPX_CODEC_CORRUPT_FRAME: - return "Corrupt frame detected"; - case VPX_CODEC_INVALID_PARAM: - return "Invalid parameter"; - case VPX_CODEC_LIST_END: - return "End of iterated list"; + case VPX_CODEC_CORRUPT_FRAME: return "Corrupt frame detected"; + case VPX_CODEC_INVALID_PARAM: return "Invalid parameter"; + case VPX_CODEC_LIST_END: return "End of iterated list"; } return "Unrecognized error code"; } -const char *vpx_codec_error(vpx_codec_ctx_t *ctx) { +const char *vpx_codec_error(vpx_codec_ctx_t *ctx) { return (ctx) ? vpx_codec_err_to_string(ctx->err) - : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM); + : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM); } -const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) { +const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) { if (ctx && ctx->err) return ctx->priv ? ctx->priv->err_detail : ctx->err_detail; return NULL; } - vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) { vpx_codec_err_t res; @@ -99,15 +81,11 @@ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) { return SAVE_STATUS(ctx, res); } - vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) { return (iface) ? iface->caps : 0; } - -vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, - int ctrl_id, - ...) { +vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) { vpx_codec_err_t res; if (!ctx || !ctrl_id) @@ -117,11 +95,11 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, else { vpx_codec_ctrl_fn_map_t *entry; - res = VPX_CODEC_ERROR; + res = VPX_CODEC_INCAPABLE; for (entry = ctx->iface->ctrl_maps; entry && entry->fn; entry++) { if (!entry->ctrl_id || entry->ctrl_id == ctrl_id) { - va_list ap; + va_list ap; va_start(ap, ctrl_id); res = entry->fn((vpx_codec_alg_priv_t *)ctx->priv, ap); @@ -135,16 +113,14 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, } void vpx_internal_error(struct vpx_internal_error_info *info, - vpx_codec_err_t error, - const char *fmt, - ...) { + vpx_codec_err_t error, const char *fmt, ...) { va_list ap; info->error_code = error; info->has_detail = 0; if (fmt) { - size_t sz = sizeof(info->detail); + size_t sz = sizeof(info->detail); info->has_detail = 1; va_start(ap, fmt); @@ -153,6 +129,5 @@ void vpx_internal_error(struct vpx_internal_error_info *info, info->detail[sz - 1] = '\0'; } - if (info->setjmp) - longjmp(info->jmp, info->error_code); + if (info->setjmp) longjmp(info->jmp, info->error_code); } diff --git a/libs/libvpx/vpx/src/vpx_decoder.c b/libs/libvpx/vpx/src/vpx_decoder.c index 802d8edd8a..fc1c2bccae 100644 --- a/libs/libvpx/vpx/src/vpx_decoder.c +++ b/libs/libvpx/vpx/src/vpx_decoder.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\file * \brief Provides the high level interface to wrap decoder algorithms. * @@ -16,17 +15,16 @@ #include #include "vpx/internal/vpx_codec_internal.h" -#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var) +#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) { return (vpx_codec_alg_priv_t *)ctx->priv; } -vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, +vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, + vpx_codec_iface_t *iface, const vpx_codec_dec_cfg_t *cfg, - vpx_codec_flags_t flags, - int ver) { + vpx_codec_flags_t flags, int ver) { vpx_codec_err_t res; if (ver != VPX_DECODER_ABI_VERSION) @@ -35,7 +33,8 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, res = VPX_CODEC_INVALID_PARAM; else if (iface->abi_version != VPX_CODEC_INTERNAL_ABI_VERSION) res = VPX_CODEC_ABI_MISMATCH; - else if ((flags & VPX_CODEC_USE_POSTPROC) && !(iface->caps & VPX_CODEC_CAP_POSTPROC)) + else if ((flags & VPX_CODEC_USE_POSTPROC) && + !(iface->caps & VPX_CODEC_CAP_POSTPROC)) res = VPX_CODEC_INCAPABLE; else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) && !(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT)) @@ -63,15 +62,14 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } - -vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, - const uint8_t *data, - unsigned int data_sz, +vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, + const uint8_t *data, + unsigned int data_sz, vpx_codec_stream_info_t *si) { vpx_codec_err_t res; - if (!iface || !data || !data_sz || !si - || si->sz < sizeof(vpx_codec_stream_info_t)) + if (!iface || !data || !data_sz || !si || + si->sz < sizeof(vpx_codec_stream_info_t)) res = VPX_CODEC_INVALID_PARAM; else { /* Set default/unknown values */ @@ -84,8 +82,7 @@ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, return res; } - -vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, +vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, vpx_codec_stream_info_t *si) { vpx_codec_err_t res; @@ -104,12 +101,9 @@ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } - -vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, - const uint8_t *data, - unsigned int data_sz, - void *user_priv, - long deadline) { +vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, + unsigned int data_sz, void *user_priv, + long deadline) { vpx_codec_err_t res; /* Sanity checks */ @@ -126,8 +120,7 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, - vpx_codec_iter_t *iter) { +vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) { vpx_image_t *img; if (!ctx || !iter || !ctx->iface || !ctx->priv) @@ -138,16 +131,15 @@ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, return img; } - -vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_frame_cb_fn_t cb, - void *user_priv) { +vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, + vpx_codec_put_frame_cb_fn_t cb, + void *user_priv) { vpx_codec_err_t res; if (!ctx || !cb) res = VPX_CODEC_INVALID_PARAM; - else if (!ctx->iface || !ctx->priv - || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME)) + else if (!ctx->iface || !ctx->priv || + !(ctx->iface->caps & VPX_CODEC_CAP_PUT_FRAME)) res = VPX_CODEC_ERROR; else { ctx->priv->dec.put_frame_cb.u.put_frame = cb; @@ -158,16 +150,15 @@ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx return SAVE_STATUS(ctx, res); } - -vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_slice_cb_fn_t cb, - void *user_priv) { +vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, + vpx_codec_put_slice_cb_fn_t cb, + void *user_priv) { vpx_codec_err_t res; if (!ctx || !cb) res = VPX_CODEC_INVALID_PARAM; - else if (!ctx->iface || !ctx->priv - || !(ctx->iface->caps & VPX_CODEC_CAP_PUT_SLICE)) + else if (!ctx->iface || !ctx->priv || + !(ctx->iface->caps & VPX_CODEC_CAP_PUT_SLICE)) res = VPX_CODEC_ERROR; else { ctx->priv->dec.put_slice_cb.u.put_slice = cb; diff --git a/libs/libvpx/vpx/src/vpx_encoder.c b/libs/libvpx/vpx/src/vpx_encoder.c index cd10c411ce..4390cf7c8f 100644 --- a/libs/libvpx/vpx/src/vpx_encoder.c +++ b/libs/libvpx/vpx/src/vpx_encoder.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\file * \brief Provides the high level interface to wrap encoder algorithms. * @@ -18,17 +17,16 @@ #include "vpx_config.h" #include "vpx/internal/vpx_codec_internal.h" -#define SAVE_STATUS(ctx,var) (ctx?(ctx->err = var):var) +#define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) { return (vpx_codec_alg_priv_t *)ctx->priv; } -vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, +vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, + vpx_codec_iface_t *iface, const vpx_codec_enc_cfg_t *cfg, - vpx_codec_flags_t flags, - int ver) { + vpx_codec_flags_t flags, int ver) { vpx_codec_err_t res; if (ver != VPX_ENCODER_ABI_VERSION) @@ -39,11 +37,10 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, res = VPX_CODEC_ABI_MISMATCH; else if (!(iface->caps & VPX_CODEC_CAP_ENCODER)) res = VPX_CODEC_INCAPABLE; - else if ((flags & VPX_CODEC_USE_PSNR) - && !(iface->caps & VPX_CODEC_CAP_PSNR)) + else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR)) res = VPX_CODEC_INCAPABLE; - else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) - && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION)) + else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) && + !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION)) res = VPX_CODEC_INCAPABLE; else { ctx->iface = iface; @@ -62,13 +59,9 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, - int num_enc, - vpx_codec_flags_t flags, - vpx_rational_t *dsf, - int ver) { +vpx_codec_err_t vpx_codec_enc_init_multi_ver( + vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg, + int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver) { vpx_codec_err_t res = VPX_CODEC_OK; if (ver != VPX_ENCODER_ABI_VERSION) @@ -79,11 +72,10 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, res = VPX_CODEC_ABI_MISMATCH; else if (!(iface->caps & VPX_CODEC_CAP_ENCODER)) res = VPX_CODEC_INCAPABLE; - else if ((flags & VPX_CODEC_USE_PSNR) - && !(iface->caps & VPX_CODEC_CAP_PSNR)) + else if ((flags & VPX_CODEC_USE_PSNR) && !(iface->caps & VPX_CODEC_CAP_PSNR)) res = VPX_CODEC_INCAPABLE; - else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) - && !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION)) + else if ((flags & VPX_CODEC_USE_OUTPUT_PARTITION) && + !(iface->caps & VPX_CODEC_CAP_OUTPUT_PARTITION)) res = VPX_CODEC_INCAPABLE; else { int i; @@ -110,8 +102,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, * resolution always use the same frame_type chosen by the * lowest-resolution encoder. */ - if (mr_cfg.mr_encoder_id) - cfg->kf_mode = VPX_KF_DISABLED; + if (mr_cfg.mr_encoder_id) cfg->kf_mode = VPX_KF_DISABLED; ctx->iface = iface; ctx->name = iface->name; @@ -121,8 +112,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, res = ctx->iface->init(ctx, &mr_cfg); if (res) { - const char *error_detail = - ctx->priv ? ctx->priv->err_detail : NULL; + const char *error_detail = ctx->priv ? ctx->priv->err_detail : NULL; /* Destroy current ctx */ ctx->err_detail = error_detail; vpx_codec_destroy(ctx); @@ -136,8 +126,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, } } - if (res) - break; + if (res) break; ctx++; cfg++; @@ -150,10 +139,9 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } - -vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, - unsigned int usage) { +vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, + vpx_codec_enc_cfg_t *cfg, + unsigned int usage) { vpx_codec_err_t res; vpx_codec_enc_cfg_map_t *map; int i; @@ -179,30 +167,28 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, return res; } - #if ARCH_X86 || ARCH_X86_64 /* On X86, disable the x87 unit's internal 80 bit precision for better * consistency with the SSE unit's 64 bit precision. */ #include "vpx_ports/x86.h" -#define FLOATING_POINT_INIT() do {\ +#define FLOATING_POINT_INIT() \ + do { \ unsigned short x87_orig_mode = x87_set_double_precision(); -#define FLOATING_POINT_RESTORE() \ - x87_set_control_word(x87_orig_mode); }while(0) - +#define FLOATING_POINT_RESTORE() \ + x87_set_control_word(x87_orig_mode); \ + } \ + while (0) #else static void FLOATING_POINT_INIT() {} static void FLOATING_POINT_RESTORE() {} #endif - -vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned long duration, - vpx_enc_frame_flags_t flags, - unsigned long deadline) { +vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, + vpx_codec_pts_t pts, unsigned long duration, + vpx_enc_frame_flags_t flags, + unsigned long deadline) { vpx_codec_err_t res = VPX_CODEC_OK; if (!ctx || (img && !duration)) @@ -220,8 +206,8 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, FLOATING_POINT_INIT(); if (num_enc == 1) - res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, - duration, flags, deadline); + res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration, flags, + deadline); else { /* Multi-resolution encoding: * Encode multi-levels in reverse order. For example, @@ -234,8 +220,8 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, if (img) img += num_enc - 1; for (i = num_enc - 1; i >= 0; i--) { - if ((res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, - duration, flags, deadline))) + if ((res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration, + flags, deadline))) break; ctx--; @@ -250,7 +236,6 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } - const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) { const vpx_codec_cx_pkt_t *pkt = NULL; @@ -273,18 +258,18 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, vpx_codec_priv_t *const priv = ctx->priv; char *const dst_buf = (char *)priv->enc.cx_data_dst_buf.buf; - if (dst_buf && - pkt->data.raw.buf != dst_buf && + if (dst_buf && pkt->data.raw.buf != dst_buf && pkt->data.raw.sz + priv->enc.cx_data_pad_before + - priv->enc.cx_data_pad_after <= priv->enc.cx_data_dst_buf.sz) { + priv->enc.cx_data_pad_after <= + priv->enc.cx_data_dst_buf.sz) { vpx_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt; memcpy(dst_buf + priv->enc.cx_data_pad_before, pkt->data.raw.buf, pkt->data.raw.sz); *modified_pkt = *pkt; modified_pkt->data.raw.buf = dst_buf; - modified_pkt->data.raw.sz += priv->enc.cx_data_pad_before + - priv->enc.cx_data_pad_after; + modified_pkt->data.raw.sz += + priv->enc.cx_data_pad_before + priv->enc.cx_data_pad_after; pkt = modified_pkt; } @@ -297,13 +282,11 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, return pkt; } - -vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, +vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, const vpx_fixed_buf_t *buf, - unsigned int pad_before, - unsigned int pad_after) { - if (!ctx || !ctx->priv) - return VPX_CODEC_INVALID_PARAM; + unsigned int pad_before, + unsigned int pad_after) { + if (!ctx || !ctx->priv) return VPX_CODEC_INVALID_PARAM; if (buf) { ctx->priv->enc.cx_data_dst_buf = *buf; @@ -319,8 +302,7 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, return VPX_CODEC_OK; } - -const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) { +const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) { vpx_image_t *img = NULL; if (ctx) { @@ -337,8 +319,7 @@ const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) { return img; } - -vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) { +vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) { vpx_fixed_buf_t *buf = NULL; if (ctx) { @@ -355,9 +336,8 @@ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) { return buf; } - -vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, - const vpx_codec_enc_cfg_t *cfg) { +vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, + const vpx_codec_enc_cfg_t *cfg) { vpx_codec_err_t res; if (!ctx || !ctx->iface || !ctx->priv || !cfg) @@ -370,7 +350,6 @@ vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } - int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list, const struct vpx_codec_cx_pkt *pkt) { if (list->cnt < list->max) { @@ -381,9 +360,8 @@ int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list, return 1; } - -const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get(struct vpx_codec_pkt_list *list, - vpx_codec_iter_t *iter) { +const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get( + struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter) { const vpx_codec_cx_pkt_t *pkt; if (!(*iter)) { diff --git a/libs/libvpx/vpx/src/vpx_image.c b/libs/libvpx/vpx/src/vpx_image.c index 9aae12c794..dba439c10a 100644 --- a/libs/libvpx/vpx/src/vpx_image.c +++ b/libs/libvpx/vpx/src/vpx_image.c @@ -15,10 +15,8 @@ #include "vpx/vpx_integer.h" #include "vpx_mem/vpx_mem.h" -static vpx_image_t *img_alloc_helper(vpx_image_t *img, - vpx_img_fmt_t fmt, - unsigned int d_w, - unsigned int d_h, +static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt, + unsigned int d_w, unsigned int d_h, unsigned int buf_align, unsigned int stride_align, unsigned char *img_data) { @@ -27,68 +25,44 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, int align; /* Treat align==0 like align==1 */ - if (!buf_align) - buf_align = 1; + if (!buf_align) buf_align = 1; /* Validate alignment (must be power of 2) */ - if (buf_align & (buf_align - 1)) - goto fail; + if (buf_align & (buf_align - 1)) goto fail; /* Treat align==0 like align==1 */ - if (!stride_align) - stride_align = 1; + if (!stride_align) stride_align = 1; /* Validate alignment (must be power of 2) */ - if (stride_align & (stride_align - 1)) - goto fail; + if (stride_align & (stride_align - 1)) goto fail; /* Get sample size for this format */ switch (fmt) { case VPX_IMG_FMT_RGB32: case VPX_IMG_FMT_RGB32_LE: case VPX_IMG_FMT_ARGB: - case VPX_IMG_FMT_ARGB_LE: - bps = 32; - break; + case VPX_IMG_FMT_ARGB_LE: bps = 32; break; case VPX_IMG_FMT_RGB24: - case VPX_IMG_FMT_BGR24: - bps = 24; - break; + case VPX_IMG_FMT_BGR24: bps = 24; break; case VPX_IMG_FMT_RGB565: case VPX_IMG_FMT_RGB565_LE: case VPX_IMG_FMT_RGB555: case VPX_IMG_FMT_RGB555_LE: case VPX_IMG_FMT_UYVY: case VPX_IMG_FMT_YUY2: - case VPX_IMG_FMT_YVYU: - bps = 16; - break; + case VPX_IMG_FMT_YVYU: bps = 16; break; case VPX_IMG_FMT_I420: case VPX_IMG_FMT_YV12: case VPX_IMG_FMT_VPXI420: - case VPX_IMG_FMT_VPXYV12: - bps = 12; - break; + case VPX_IMG_FMT_VPXYV12: bps = 12; break; case VPX_IMG_FMT_I422: - case VPX_IMG_FMT_I440: - bps = 16; - break; - case VPX_IMG_FMT_I444: - bps = 24; - break; - case VPX_IMG_FMT_I42016: - bps = 24; - break; + case VPX_IMG_FMT_I440: bps = 16; break; + case VPX_IMG_FMT_I444: bps = 24; break; + case VPX_IMG_FMT_I42016: bps = 24; break; case VPX_IMG_FMT_I42216: - case VPX_IMG_FMT_I44016: - bps = 32; - break; - case VPX_IMG_FMT_I44416: - bps = 48; - break; - default: - bps = 16; - break; + case VPX_IMG_FMT_I44016: bps = 32; break; + case VPX_IMG_FMT_I44416: bps = 48; break; + default: bps = 16; break; } /* Get chroma shift values for this format */ @@ -99,12 +73,8 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, case VPX_IMG_FMT_VPXYV12: case VPX_IMG_FMT_I422: case VPX_IMG_FMT_I42016: - case VPX_IMG_FMT_I42216: - xcs = 1; - break; - default: - xcs = 0; - break; + case VPX_IMG_FMT_I42216: xcs = 1; break; + default: xcs = 0; break; } switch (fmt) { @@ -114,12 +84,8 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, case VPX_IMG_FMT_VPXI420: case VPX_IMG_FMT_VPXYV12: case VPX_IMG_FMT_I42016: - case VPX_IMG_FMT_I44016: - ycs = 1; - break; - default: - ycs = 0; - break; + case VPX_IMG_FMT_I44016: ycs = 1; break; + default: ycs = 0; break; } /* Calculate storage sizes given the chroma subsampling */ @@ -135,8 +101,7 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, if (!img) { img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t)); - if (!img) - goto fail; + if (!img) goto fail; img->self_allocd = 1; } else { @@ -146,18 +111,17 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, img->img_data = img_data; if (!img_data) { - const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) ? - (uint64_t)h * s * bps / 8 : (uint64_t)h * s; + const uint64_t alloc_size = (fmt & VPX_IMG_FMT_PLANAR) + ? (uint64_t)h * s * bps / 8 + : (uint64_t)h * s; - if (alloc_size != (size_t)alloc_size) - goto fail; + if (alloc_size != (size_t)alloc_size) goto fail; img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size); img->img_data_owner = 1; } - if (!img->img_data) - goto fail; + if (!img->img_data) goto fail; img->fmt = fmt; img->bit_depth = (fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 16 : 8; @@ -172,39 +136,30 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs; /* Default viewport to entire image */ - if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) - return img; + if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) return img; fail: vpx_img_free(img); return NULL; } -vpx_image_t *vpx_img_alloc(vpx_image_t *img, - vpx_img_fmt_t fmt, - unsigned int d_w, - unsigned int d_h, - unsigned int align) { +vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt, + unsigned int d_w, unsigned int d_h, + unsigned int align) { return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL); } -vpx_image_t *vpx_img_wrap(vpx_image_t *img, - vpx_img_fmt_t fmt, - unsigned int d_w, - unsigned int d_h, - unsigned int stride_align, - unsigned char *img_data) { +vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, + unsigned int d_h, unsigned int stride_align, + unsigned char *img_data) { /* By setting buf_align = 1, we don't change buffer alignment in this * function. */ return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data); } -int vpx_img_set_rect(vpx_image_t *img, - unsigned int x, - unsigned int y, - unsigned int w, - unsigned int h) { - unsigned char *data; +int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, + unsigned int w, unsigned int h) { + unsigned char *data; if (x + w <= img->w && y + h <= img->h) { img->d_w = w; @@ -213,7 +168,7 @@ int vpx_img_set_rect(vpx_image_t *img, /* Calculate plane pointers */ if (!(img->fmt & VPX_IMG_FMT_PLANAR)) { img->planes[VPX_PLANE_PACKED] = - img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED]; + img->img_data + x * img->bps / 8 + y * img->stride[VPX_PLANE_PACKED]; } else { const int bytes_per_sample = (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1; @@ -225,8 +180,8 @@ int vpx_img_set_rect(vpx_image_t *img, data += img->h * img->stride[VPX_PLANE_ALPHA]; } - img->planes[VPX_PLANE_Y] = data + x * bytes_per_sample + - y * img->stride[VPX_PLANE_Y]; + img->planes[VPX_PLANE_Y] = + data + x * bytes_per_sample + y * img->stride[VPX_PLANE_Y]; data += img->h * img->stride[VPX_PLANE_Y]; if (!(img->fmt & VPX_IMG_FMT_UV_FLIP)) { @@ -262,24 +217,23 @@ void vpx_img_flip(vpx_image_t *img) { img->planes[VPX_PLANE_Y] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_Y]; img->stride[VPX_PLANE_Y] = -img->stride[VPX_PLANE_Y]; - img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) - * img->stride[VPX_PLANE_U]; + img->planes[VPX_PLANE_U] += (signed)((img->d_h >> img->y_chroma_shift) - 1) * + img->stride[VPX_PLANE_U]; img->stride[VPX_PLANE_U] = -img->stride[VPX_PLANE_U]; - img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) - * img->stride[VPX_PLANE_V]; + img->planes[VPX_PLANE_V] += (signed)((img->d_h >> img->y_chroma_shift) - 1) * + img->stride[VPX_PLANE_V]; img->stride[VPX_PLANE_V] = -img->stride[VPX_PLANE_V]; - img->planes[VPX_PLANE_ALPHA] += (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA]; + img->planes[VPX_PLANE_ALPHA] += + (signed)(img->d_h - 1) * img->stride[VPX_PLANE_ALPHA]; img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA]; } void vpx_img_free(vpx_image_t *img) { if (img) { - if (img->img_data && img->img_data_owner) - vpx_free(img->img_data); + if (img->img_data && img->img_data_owner) vpx_free(img->img_data); - if (img->self_allocd) - free(img); + if (img->self_allocd) free(img); } } diff --git a/libs/libvpx/vpx/src/vpx_psnr.c b/libs/libvpx/vpx/src/vpx_psnr.c deleted file mode 100644 index 05843acb61..0000000000 --- a/libs/libvpx/vpx/src/vpx_psnr.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "vpx/internal/vpx_psnr.h" - -#define MAX_PSNR 100.0 - -double vpx_sse_to_psnr(double samples, double peak, double sse) { - if (sse > 0.0) { - const double psnr = 10.0 * log10(samples * peak * peak / sse); - return psnr > MAX_PSNR ? MAX_PSNR : psnr; - } else { - return MAX_PSNR; - } -} diff --git a/libs/libvpx/vpx/svc_context.h b/libs/libvpx/vpx/svc_context.h index 5bc25189ba..c8bde5832a 100644 --- a/libs/libvpx/vpx/svc_context.h +++ b/libs/libvpx/vpx/svc_context.h @@ -31,14 +31,14 @@ typedef enum SVC_LOG_LEVEL { typedef struct { // public interface to svc_command options - int spatial_layers; // number of spatial layers - int temporal_layers; // number of temporal layers + int spatial_layers; // number of spatial layers + int temporal_layers; // number of temporal layers int temporal_layering_mode; SVC_LOG_LEVEL log_level; // amount of information to display - int log_print; // when set, printf log messages instead of returning the - // message with svc_get_message + int log_print; // when set, printf log messages instead of returning the + // message with svc_get_message int output_rc_stat; // for outputting rc stats - int speed; // speed setting for codec + int speed; // speed setting for codec int threads; int aqmode; // turns on aq-mode=3 (cyclic_refresh): 0=off, 1=on. // private storage for vpx_svc_encode @@ -49,7 +49,7 @@ typedef struct { #define COMPONENTS 4 // psnr & sse statistics maintained for total, y, u, v typedef struct SvcInternal { - char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options + char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options // values extracted from option, quantizers vpx_svc_extra_cfg_t svc_params; @@ -57,7 +57,7 @@ typedef struct SvcInternal { int bitrates[VPX_SS_MAX_LAYERS]; // accumulated statistics - double psnr_sum[VPX_SS_MAX_LAYERS][COMPONENTS]; // total/Y/U/V + double psnr_sum[VPX_SS_MAX_LAYERS][COMPONENTS]; // total/Y/U/V uint64_t sse_sum[VPX_SS_MAX_LAYERS][COMPONENTS]; uint32_t bytes_sum[VPX_SS_MAX_LAYERS]; @@ -88,17 +88,14 @@ vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options); /** * initialize SVC encoding */ -vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, - vpx_codec_ctx_t *codec_ctx, +vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg); /** * encode a frame of video with multiple layers */ -vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, - vpx_codec_ctx_t *codec_ctx, - struct vpx_image *rawimg, - vpx_codec_pts_t pts, +vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, + struct vpx_image *rawimg, vpx_codec_pts_t pts, int64_t duration, int deadline); /** diff --git a/libs/libvpx/vpx/vp8.h b/libs/libvpx/vpx/vp8.h index 8a035f9770..059c9d0f65 100644 --- a/libs/libvpx/vpx/vp8.h +++ b/libs/libvpx/vpx/vp8.h @@ -42,21 +42,23 @@ extern "C" { * The set of macros define the control functions of VP8 interface */ enum vp8_com_control_id { - VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */ - VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */ - VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */ - VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */ - VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */ - VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */ - VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */ + /*!\brief pass in an external frame into decoder to be used as reference frame + */ + VP8_SET_REFERENCE = 1, + VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */ + VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */ + VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< \deprecated */ + VP8_SET_DBG_COLOR_MB_MODES = 5, /**< \deprecated */ + VP8_SET_DBG_COLOR_B_MODES = 6, /**< \deprecated */ + VP8_SET_DBG_DISPLAY_MV = 7, /**< \deprecated */ /* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+) * for its control ids. These should be migrated to something like the * VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI. */ - VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */ + VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */ VP8_COMMON_CTRL_ID_MAX, - VP8_DECODER_CTRL_ID_START = 256 + VP8_DECODER_CTRL_ID_START = 256 }; /*!\brief post process flags @@ -64,15 +66,16 @@ enum vp8_com_control_id { * The set of macros define VP8 decoder post processing flags */ enum vp8_postproc_level { - VP8_NOFILTERING = 0, - VP8_DEBLOCK = 1 << 0, - VP8_DEMACROBLOCK = 1 << 1, - VP8_ADDNOISE = 1 << 2, - VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */ - VP8_DEBUG_TXT_MBLK_MODES = 1 << 4, /**< print macro block modes over each macro block */ - VP8_DEBUG_TXT_DC_DIFF = 1 << 5, /**< print dc diff for each macro block */ - VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */ - VP8_MFQE = 1 << 10 + VP8_NOFILTERING = 0, + VP8_DEBLOCK = 1 << 0, + VP8_DEMACROBLOCK = 1 << 1, + VP8_ADDNOISE = 1 << 2, + VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */ + VP8_DEBUG_TXT_MBLK_MODES = + 1 << 4, /**< print macro block modes over each macro block */ + VP8_DEBUG_TXT_DC_DIFF = 1 << 5, /**< print dc diff for each macro block */ + VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */ + VP8_MFQE = 1 << 10 }; /*!\brief post process flags @@ -83,9 +86,11 @@ enum vp8_postproc_level { */ typedef struct vp8_postproc_cfg { - int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */ - int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */ - int noise_level; /**< the strength of additive noise, valid range [0, 16] */ + /*!\brief the types of post processing to be done, should be combination of + * "vp8_postproc_level" */ + int post_proc_flag; + int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */ + int noise_level; /**< the strength of additive noise, valid range [0, 16] */ } vp8_postproc_cfg_t; /*!\brief reference frame type @@ -103,8 +108,8 @@ typedef enum vpx_ref_frame_type { * Define the data struct to access vp8 reference frames. */ typedef struct vpx_ref_frame { - vpx_ref_frame_type_t frame_type; /**< which reference frame */ - vpx_image_t img; /**< reference frame data in image format */ + vpx_ref_frame_type_t frame_type; /**< which reference frame */ + vpx_image_t img; /**< reference frame data in image format */ } vpx_ref_frame_t; /*!\brief VP9 specific reference frame data struct @@ -112,8 +117,8 @@ typedef struct vpx_ref_frame { * Define the data struct to access vp9 reference frames. */ typedef struct vp9_ref_frame { - int idx; /**< frame index to get (input) */ - vpx_image_t img; /**< img structure to populate (output) */ + int idx; /**< frame index to get (input) */ + vpx_image_t img; /**< img structure to populate (output) */ } vp9_ref_frame_t; /*!\cond */ @@ -121,21 +126,21 @@ typedef struct vp9_ref_frame { * * defines the data type for each of VP8 decoder control function requires */ -VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *) +VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *) #define VPX_CTRL_VP8_SET_REFERENCE -VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *) +VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *) #define VPX_CTRL_VP8_COPY_REFERENCE -VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *) +VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *) #define VPX_CTRL_VP8_SET_POSTPROC -VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int) +VPX_CTRL_USE_TYPE_DEPRECATED(VP8_SET_DBG_COLOR_REF_FRAME, int) #define VPX_CTRL_VP8_SET_DBG_COLOR_REF_FRAME -VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int) +VPX_CTRL_USE_TYPE_DEPRECATED(VP8_SET_DBG_COLOR_MB_MODES, int) #define VPX_CTRL_VP8_SET_DBG_COLOR_MB_MODES -VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int) +VPX_CTRL_USE_TYPE_DEPRECATED(VP8_SET_DBG_COLOR_B_MODES, int) #define VPX_CTRL_VP8_SET_DBG_COLOR_B_MODES -VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int) +VPX_CTRL_USE_TYPE_DEPRECATED(VP8_SET_DBG_DISPLAY_MV, int) #define VPX_CTRL_VP8_SET_DBG_DISPLAY_MV -VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *) +VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *) #define VPX_CTRL_VP9_GET_REFERENCE /*!\endcond */ diff --git a/libs/libvpx/vpx/vp8cx.h b/libs/libvpx/vpx/vp8cx.h index bd99c6dc13..8fa25e8bc0 100644 --- a/libs/libvpx/vpx/vp8cx.h +++ b/libs/libvpx/vpx/vp8cx.h @@ -32,7 +32,7 @@ extern "C" { * This interface provides the capability to encode raw VP8 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp8_cx_algo; +extern vpx_codec_iface_t vpx_codec_vp8_cx_algo; extern vpx_codec_iface_t *vpx_codec_vp8_cx(void); /*!@} - end algorithm interface member group*/ @@ -41,19 +41,10 @@ extern vpx_codec_iface_t *vpx_codec_vp8_cx(void); * This interface provides the capability to encode raw VP9 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp9_cx_algo; +extern vpx_codec_iface_t vpx_codec_vp9_cx_algo; extern vpx_codec_iface_t *vpx_codec_vp9_cx(void); /*!@} - end algorithm interface member group*/ -/*!\name Algorithm interface for VP10 - * - * This interface provides the capability to encode raw VP9 streams. - * @{ - */ -extern vpx_codec_iface_t vpx_codec_vp10_cx_algo; -extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); -/*!@} - end algorithm interface member group*/ - /* * Algorithm Flags */ @@ -64,8 +55,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); * predictor. When not set, the encoder will choose whether to use the * last frame or not automatically. */ -#define VP8_EFLAG_NO_REF_LAST (1<<16) - +#define VP8_EFLAG_NO_REF_LAST (1 << 16) /*!\brief Don't reference the golden frame * @@ -73,8 +63,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); * predictor. When not set, the encoder will choose whether to use the * golden frame or not automatically. */ -#define VP8_EFLAG_NO_REF_GF (1<<17) - +#define VP8_EFLAG_NO_REF_GF (1 << 17) /*!\brief Don't reference the alternate reference frame * @@ -82,56 +71,49 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); * predictor. When not set, the encoder will choose whether to use the * alt ref frame or not automatically. */ -#define VP8_EFLAG_NO_REF_ARF (1<<21) - +#define VP8_EFLAG_NO_REF_ARF (1 << 21) /*!\brief Don't update the last frame * * When this flag is set, the encoder will not update the last frame with * the contents of the current frame. */ -#define VP8_EFLAG_NO_UPD_LAST (1<<18) - +#define VP8_EFLAG_NO_UPD_LAST (1 << 18) /*!\brief Don't update the golden frame * * When this flag is set, the encoder will not update the golden frame with * the contents of the current frame. */ -#define VP8_EFLAG_NO_UPD_GF (1<<22) - +#define VP8_EFLAG_NO_UPD_GF (1 << 22) /*!\brief Don't update the alternate reference frame * * When this flag is set, the encoder will not update the alt ref frame with * the contents of the current frame. */ -#define VP8_EFLAG_NO_UPD_ARF (1<<23) - +#define VP8_EFLAG_NO_UPD_ARF (1 << 23) /*!\brief Force golden frame update * * When this flag is set, the encoder copy the contents of the current frame * to the golden frame buffer. */ -#define VP8_EFLAG_FORCE_GF (1<<19) - +#define VP8_EFLAG_FORCE_GF (1 << 19) /*!\brief Force alternate reference frame update * * When this flag is set, the encoder copy the contents of the current frame * to the alternate reference frame buffer. */ -#define VP8_EFLAG_FORCE_ARF (1<<24) - +#define VP8_EFLAG_FORCE_ARF (1 << 24) /*!\brief Disable entropy update * * When this flag is set, the encoder will not update its internal entropy * model based on the entropy of this frame. */ -#define VP8_EFLAG_NO_UPD_ENTROPY (1<<20) - +#define VP8_EFLAG_NO_UPD_ENTROPY (1 << 20) /*!\brief VPx encoder control functions * @@ -145,7 +127,7 @@ enum vp8e_enc_control_id { * * Supported in codecs: VP8, VP9 */ - VP8E_SET_ROI_MAP = 8, + VP8E_SET_ROI_MAP = 8, /*!\brief Codec control function to pass an Active map to encoder. * @@ -157,7 +139,7 @@ enum vp8e_enc_control_id { * * Supported in codecs: VP8, VP9 */ - VP8E_SET_SCALEMODE = 11, + VP8E_SET_SCALEMODE = 11, /*!\brief Codec control function to set encoder internal speed settings. * @@ -170,7 +152,7 @@ enum vp8e_enc_control_id { * * Supported in codecs: VP8, VP9 */ - VP8E_SET_CPUUSED = 13, + VP8E_SET_CPUUSED = 13, /*!\brief Codec control function to enable automatic set and use alf frames. * @@ -498,7 +480,8 @@ enum vp8e_enc_control_id { VP9E_SET_COLOR_SPACE, /*!\brief Codec control function to set temporal layering mode. - * \note Valid ranges: 0..3, default is "0" (VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING). + * \note Valid ranges: 0..3, default is "0" + * (VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING). * 0 = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING * 1 = VP9E_TEMPORAL_LAYERING_MODE_BYPASS * 2 = VP9E_TEMPORAL_LAYERING_MODE_0101 @@ -554,6 +537,31 @@ enum vp8e_enc_control_id { * Supported in codecs: VP9 */ VP9E_SET_RENDER_SIZE, + + /*!\brief Codec control function to set target level. + * + * 255: off (default); 0: only keep level stats; 10: target for level 1.0; + * 11: target for level 1.1; ... 62: target for level 6.2 + * + * Supported in codecs: VP9 + */ + VP9E_SET_TARGET_LEVEL, + + /*!\brief Codec control function to get bitstream level. + * + * Supported in codecs: VP9 + */ + VP9E_GET_LEVEL, + + /*!\brief Codec control function to enable/disable special mode for altref + * adaptive quantization. You can use it with --aq-mode concurrently. + * + * Enable special adaptive quantization for altref frames based on their + * expected prediction quality for the future frames. + * + * Supported in codecs: VP9 + */ + VP9E_SET_ALT_REF_AQ }; /*!\brief vpx 1-D scaling mode @@ -561,10 +569,10 @@ enum vp8e_enc_control_id { * This set of constants define 1-D vpx scaling modes */ typedef enum vpx_scaling_mode_1d { - VP8E_NORMAL = 0, - VP8E_FOURFIVE = 1, - VP8E_THREEFIVE = 2, - VP8E_ONETWO = 3 + VP8E_NORMAL = 0, + VP8E_FOURFIVE = 1, + VP8E_THREEFIVE = 2, + VP8E_ONETWO = 3 } VPX_SCALING_MODE; /*!\brief Temporal layering mode enum for VP9 SVC. @@ -577,21 +585,21 @@ typedef enum vp9e_temporal_layering_mode { /*!\brief No temporal layering. * Used when only spatial layering is used. */ - VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING = 0, + VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING = 0, /*!\brief Bypass mode. * Used when application needs to control temporal layering. * This will only work when the number of spatial layers equals 1. */ - VP9E_TEMPORAL_LAYERING_MODE_BYPASS = 1, + VP9E_TEMPORAL_LAYERING_MODE_BYPASS = 1, /*!\brief 0-1-0-1... temporal layering scheme with two temporal layers. */ - VP9E_TEMPORAL_LAYERING_MODE_0101 = 2, + VP9E_TEMPORAL_LAYERING_MODE_0101 = 2, /*!\brief 0-2-1-2... temporal layering scheme with three temporal layers. */ - VP9E_TEMPORAL_LAYERING_MODE_0212 = 3 + VP9E_TEMPORAL_LAYERING_MODE_0212 = 3 } VP9E_TEMPORAL_LAYERING_MODE; /*!\brief vpx region of interest map @@ -603,13 +611,13 @@ typedef enum vp9e_temporal_layering_mode { typedef struct vpx_roi_map { /*! An id between 0 and 3 for each 16x16 region within a frame. */ unsigned char *roi_map; - unsigned int rows; /**< Number of rows. */ - unsigned int cols; /**< Number of columns. */ + unsigned int rows; /**< Number of rows. */ + unsigned int cols; /**< Number of columns. */ // TODO(paulwilkins): broken for VP9 which has 8 segments // q and loop filter deltas for each segment // (see MAX_MB_SEGMENTS) - int delta_q[4]; /**< Quantizer deltas. */ - int delta_lf[4]; /**< Loop filter deltas. */ + int delta_q[4]; /**< Quantizer deltas. */ + int delta_lf[4]; /**< Loop filter deltas. */ /*! Static breakout threshold for each segment. */ unsigned int static_threshold[4]; } vpx_roi_map_t; @@ -620,11 +628,11 @@ typedef struct vpx_roi_map { * */ - typedef struct vpx_active_map { - unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */ - unsigned int rows; /**< number of rows */ - unsigned int cols; /**< number of cols */ + /*!\brief specify an on (1) or off (0) each 16x16 region within a frame */ + unsigned char *active_map; + unsigned int rows; /**< number of rows */ + unsigned int cols; /**< number of cols */ } vpx_active_map_t; /*!\brief vpx image scaling mode @@ -633,8 +641,8 @@ typedef struct vpx_active_map { * */ typedef struct vpx_scaling_mode { - VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */ - VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */ + VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */ + VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */ } vpx_scaling_mode_t; /*!\brief VP8 token partition mode @@ -645,9 +653,9 @@ typedef struct vpx_scaling_mode { */ typedef enum { - VP8_ONE_TOKENPARTITION = 0, - VP8_TWO_TOKENPARTITION = 1, - VP8_FOUR_TOKENPARTITION = 2, + VP8_ONE_TOKENPARTITION = 0, + VP8_TWO_TOKENPARTITION = 1, + VP8_FOUR_TOKENPARTITION = 2, VP8_EIGHT_TOKENPARTITION = 3 } vp8e_token_partitions; @@ -663,10 +671,7 @@ typedef enum { * Changes the encoder to tune for certain types of input material. * */ -typedef enum { - VP8_TUNE_PSNR, - VP8_TUNE_SSIM -} vp8e_tuning; +typedef enum { VP8_TUNE_PSNR, VP8_TUNE_SSIM } vp8e_tuning; /*!\brief vp9 svc layer parameters * @@ -676,8 +681,8 @@ typedef enum { * */ typedef struct vpx_svc_layer_id { - int spatial_layer_id; /**< Spatial layer id number. */ - int temporal_layer_id; /**< Temporal layer id number. */ + int spatial_layer_id; /**< Spatial layer id number. */ + int temporal_layer_id; /**< Temporal layer id number. */ } vpx_svc_layer_id_t; /*!\brief vp9 svc frame flag parameters. @@ -689,7 +694,7 @@ typedef struct vpx_svc_layer_id { * */ typedef struct vpx_svc_ref_frame_config { - int frame_flags[VPX_TS_MAX_LAYERS]; /**< Frame flags. */ + int frame_flags[VPX_TS_MAX_LAYERS]; /**< Frame flags. */ int lst_fb_idx[VPX_TS_MAX_LAYERS]; /**< Last buffer index. */ int gld_fb_idx[VPX_TS_MAX_LAYERS]; /**< Golden buffer index. */ int alt_fb_idx[VPX_TS_MAX_LAYERS]; /**< Altref buffer index. */ @@ -703,60 +708,60 @@ typedef struct vpx_svc_ref_frame_config { * */ -VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int) +VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int) #define VPX_CTRL_VP8E_SET_FRAME_FLAGS -VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID, int) +VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID, int) #define VPX_CTRL_VP8E_SET_TEMPORAL_LAYER_ID -VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *) #define VPX_CTRL_VP8E_SET_ROI_MAP -VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *) #define VPX_CTRL_VP8E_SET_ACTIVEMAP -VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *) #define VPX_CTRL_VP8E_SET_SCALEMODE -VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int) +VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int) #define VPX_CTRL_VP9E_SET_SVC -VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS, void *) +VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS, void *) #define VPX_CTRL_VP9E_SET_SVC_PARAMETERS -VPX_CTRL_USE_TYPE(VP9E_REGISTER_CX_CALLBACK, void *) +VPX_CTRL_USE_TYPE(VP9E_REGISTER_CX_CALLBACK, void *) #define VPX_CTRL_VP9E_REGISTER_CX_CALLBACK -VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, vpx_svc_layer_id_t *) +VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, vpx_svc_layer_id_t *) #define VPX_CTRL_VP9E_SET_SVC_LAYER_ID -VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int) +VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int) #define VPX_CTRL_VP8E_SET_CPUUSED -VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_ENABLEAUTOALTREF, unsigned int) #define VPX_CTRL_VP8E_SET_ENABLEAUTOALTREF -VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_NOISE_SENSITIVITY, unsigned int) #define VPX_CTRL_VP8E_SET_NOISE_SENSITIVITY -VPX_CTRL_USE_TYPE(VP8E_SET_SHARPNESS, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_SHARPNESS, unsigned int) #define VPX_CTRL_VP8E_SET_SHARPNESS -VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_STATIC_THRESHOLD, unsigned int) #define VPX_CTRL_VP8E_SET_STATIC_THRESHOLD -VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */ +VPX_CTRL_USE_TYPE(VP8E_SET_TOKEN_PARTITIONS, int) /* vp8e_token_partitions */ #define VPX_CTRL_VP8E_SET_TOKEN_PARTITIONS -VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_MAXFRAMES, unsigned int) #define VPX_CTRL_VP8E_SET_ARNR_MAXFRAMES -VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int) #define VPX_CTRL_VP8E_SET_ARNR_STRENGTH -VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE, unsigned int) +VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE, unsigned int) #define VPX_CTRL_VP8E_SET_ARNR_TYPE -VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vp8e_tuning */ +VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vp8e_tuning */ #define VPX_CTRL_VP8E_SET_TUNING -VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int) +VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int) #define VPX_CTRL_VP8E_SET_CQ_LEVEL -VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int) +VPX_CTRL_USE_TYPE(VP9E_SET_TILE_COLUMNS, int) #define VPX_CTRL_VP9E_SET_TILE_COLUMNS -VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int) +VPX_CTRL_USE_TYPE(VP9E_SET_TILE_ROWS, int) #define VPX_CTRL_VP9E_SET_TILE_ROWS -VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *) +VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *) #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER -VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *) +VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *) #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER_64 -VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID, vpx_svc_layer_id_t *) +VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID, vpx_svc_layer_id_t *) #define VPX_CTRL_VP9E_GET_SVC_LAYER_ID VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int) @@ -779,10 +784,13 @@ VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PARALLEL_DECODING, unsigned int) VPX_CTRL_USE_TYPE(VP9E_SET_AQ_MODE, unsigned int) #define VPX_CTRL_VP9E_SET_AQ_MODE +VPX_CTRL_USE_TYPE(VP9E_SET_ALT_REF_AQ, int) +#define VPX_CTRL_VP9E_SET_ALT_REF_AQ + VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PERIODIC_BOOST, unsigned int) #define VPX_CTRL_VP9E_SET_FRAME_PERIODIC_BOOST -VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int) +VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int) #define VPX_CTRL_VP9E_SET_NOISE_SENSITIVITY VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vp9e_tune_content */ @@ -791,10 +799,10 @@ VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vp9e_tune_content */ VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int) #define VPX_CTRL_VP9E_SET_COLOR_SPACE -VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL, unsigned int) +VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL, unsigned int) #define VPX_CTRL_VP9E_SET_MIN_GF_INTERVAL -VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL, unsigned int) +VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL, unsigned int) #define VPX_CTRL_VP9E_SET_MAX_GF_INTERVAL VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, vpx_active_map_t *) @@ -809,6 +817,12 @@ VPX_CTRL_USE_TYPE(VP9E_SET_SVC_REF_FRAME_CONFIG, vpx_svc_ref_frame_config_t *) VPX_CTRL_USE_TYPE(VP9E_SET_RENDER_SIZE, int *) #define VPX_CTRL_VP9E_SET_RENDER_SIZE +VPX_CTRL_USE_TYPE(VP9E_SET_TARGET_LEVEL, unsigned int) +#define VPX_CTRL_VP9E_SET_TARGET_LEVEL + +VPX_CTRL_USE_TYPE(VP9E_GET_LEVEL, int *) +#define VPX_CTRL_VP9E_GET_LEVEL + /*!\endcond */ /*! @} - end defgroup vp8_encoder */ #ifdef __cplusplus diff --git a/libs/libvpx/vpx/vp8dx.h b/libs/libvpx/vpx/vp8dx.h index 1f02fd5958..88204acd37 100644 --- a/libs/libvpx/vpx/vp8dx.h +++ b/libs/libvpx/vpx/vp8dx.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\defgroup vp8_decoder WebM VP8/VP9 Decoder * \ingroup vp8 * @@ -33,7 +32,7 @@ extern "C" { * This interface provides the capability to decode VP8 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp8_dx_algo; +extern vpx_codec_iface_t vpx_codec_vp8_dx_algo; extern vpx_codec_iface_t *vpx_codec_vp8_dx(void); /*!@} - end algorithm interface member group*/ @@ -42,19 +41,10 @@ extern vpx_codec_iface_t *vpx_codec_vp8_dx(void); * This interface provides the capability to decode VP9 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp9_dx_algo; +extern vpx_codec_iface_t vpx_codec_vp9_dx_algo; extern vpx_codec_iface_t *vpx_codec_vp9_dx(void); /*!@} - end algorithm interface member group*/ -/*!\name Algorithm interface for VP10 - * - * This interface provides the capability to decode VP10 streams. - * @{ - */ -extern vpx_codec_iface_t vpx_codec_vp10_dx_algo; -extern vpx_codec_iface_t *vpx_codec_vp10_dx(void); -/*!@} - end algorithm interface member group*/ - /*!\enum vp8_dec_control_id * \brief VP8 decoder control functions * @@ -135,18 +125,17 @@ typedef void (*vpx_decrypt_cb)(void *decrypt_state, const unsigned char *input, * Defines a structure to hold the decryption state and access function. */ typedef struct vpx_decrypt_init { - /*! Decrypt callback. */ - vpx_decrypt_cb decrypt_cb; + /*! Decrypt callback. */ + vpx_decrypt_cb decrypt_cb; - /*! Decryption state. */ - void *decrypt_state; + /*! Decryption state. */ + void *decrypt_state; } vpx_decrypt_init; /*!\brief A deprecated alias for vpx_decrypt_init. */ typedef vpx_decrypt_init vp8_decrypt_init; - /*!\cond */ /*!\brief VP8 decoder control function parameter type * @@ -155,22 +144,21 @@ typedef vpx_decrypt_init vp8_decrypt_init; * */ - -VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *) +VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *) #define VPX_CTRL_VP8D_GET_LAST_REF_UPDATES -VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *) +VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *) #define VPX_CTRL_VP8D_GET_FRAME_CORRUPTED -VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *) +VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *) #define VPX_CTRL_VP8D_GET_LAST_REF_USED -VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *) +VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *) #define VPX_CTRL_VPXD_SET_DECRYPTOR -VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *) +VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *) #define VPX_CTRL_VP8D_SET_DECRYPTOR -VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *) +VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *) #define VPX_CTRL_VP9D_GET_DISPLAY_SIZE -VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH, unsigned int *) +VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH, unsigned int *) #define VPX_CTRL_VP9D_GET_BIT_DEPTH -VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE, int *) +VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE, int *) #define VPX_CTRL_VP9D_GET_FRAME_SIZE VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int) #define VPX_CTRL_VP9_INVERT_TILE_DECODE_ORDER diff --git a/libs/libvpx/vpx/vpx_codec.h b/libs/libvpx/vpx/vpx_codec.h index b6037bb4d7..fe75d23872 100644 --- a/libs/libvpx/vpx/vpx_codec.h +++ b/libs/libvpx/vpx/vpx_codec.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\defgroup codec Common Algorithm Interface * This abstraction allows applications to easily support multiple video * formats with minimal code duplication. This section describes the interface @@ -46,434 +45,419 @@ extern "C" { #include "./vpx_integer.h" #include "./vpx_image.h" - /*!\brief Decorator indicating a function is deprecated */ +/*!\brief Decorator indicating a function is deprecated */ #ifndef DEPRECATED #if defined(__GNUC__) && __GNUC__ -#define DEPRECATED __attribute__ ((deprecated)) +#define DEPRECATED __attribute__((deprecated)) #elif defined(_MSC_VER) #define DEPRECATED #else #define DEPRECATED #endif -#endif /* DEPRECATED */ +#endif /* DEPRECATED */ #ifndef DECLSPEC_DEPRECATED #if defined(__GNUC__) && __GNUC__ #define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */ #elif defined(_MSC_VER) -#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */ +/*!\brief \copydoc #DEPRECATED */ +#define DECLSPEC_DEPRECATED __declspec(deprecated) #else #define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */ #endif -#endif /* DECLSPEC_DEPRECATED */ +#endif /* DECLSPEC_DEPRECATED */ - /*!\brief Decorator indicating a function is potentially unused */ +/*!\brief Decorator indicating a function is potentially unused */ #ifdef UNUSED #elif defined(__GNUC__) || defined(__clang__) -#define UNUSED __attribute__ ((unused)) +#define UNUSED __attribute__((unused)) #else #define UNUSED #endif - /*!\brief Current ABI version number - * - * \internal - * If this file is altered in any way that changes the ABI, this value - * must be bumped. Examples include, but are not limited to, changing - * types, removing or reassigning enums, adding/removing/rearranging - * fields to structures - */ +/*!\brief Current ABI version number + * + * \internal + * If this file is altered in any way that changes the ABI, this value + * must be bumped. Examples include, but are not limited to, changing + * types, removing or reassigning enums, adding/removing/rearranging + * fields to structures + */ #define VPX_CODEC_ABI_VERSION (3 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/ - /*!\brief Algorithm return codes */ - typedef enum { - /*!\brief Operation completed without error */ - VPX_CODEC_OK, +/*!\brief Algorithm return codes */ +typedef enum { + /*!\brief Operation completed without error */ + VPX_CODEC_OK, - /*!\brief Unspecified error */ - VPX_CODEC_ERROR, + /*!\brief Unspecified error */ + VPX_CODEC_ERROR, - /*!\brief Memory operation failed */ - VPX_CODEC_MEM_ERROR, + /*!\brief Memory operation failed */ + VPX_CODEC_MEM_ERROR, - /*!\brief ABI version mismatch */ - VPX_CODEC_ABI_MISMATCH, + /*!\brief ABI version mismatch */ + VPX_CODEC_ABI_MISMATCH, - /*!\brief Algorithm does not have required capability */ - VPX_CODEC_INCAPABLE, + /*!\brief Algorithm does not have required capability */ + VPX_CODEC_INCAPABLE, - /*!\brief The given bitstream is not supported. - * - * The bitstream was unable to be parsed at the highest level. The decoder - * is unable to proceed. This error \ref SHOULD be treated as fatal to the - * stream. */ - VPX_CODEC_UNSUP_BITSTREAM, - - /*!\brief Encoded bitstream uses an unsupported feature - * - * The decoder does not implement a feature required by the encoder. This - * return code should only be used for features that prevent future - * pictures from being properly decoded. This error \ref MAY be treated as - * fatal to the stream or \ref MAY be treated as fatal to the current GOP. - */ - VPX_CODEC_UNSUP_FEATURE, - - /*!\brief The coded data for this stream is corrupt or incomplete - * - * There was a problem decoding the current frame. This return code - * should only be used for failures that prevent future pictures from - * being properly decoded. This error \ref MAY be treated as fatal to the - * stream or \ref MAY be treated as fatal to the current GOP. If decoding - * is continued for the current GOP, artifacts may be present. - */ - VPX_CODEC_CORRUPT_FRAME, - - /*!\brief An application-supplied parameter is not valid. - * - */ - VPX_CODEC_INVALID_PARAM, - - /*!\brief An iterator reached the end of list. - * - */ - VPX_CODEC_LIST_END - - } - vpx_codec_err_t; - - - /*! \brief Codec capabilities bitfield + /*!\brief The given bitstream is not supported. * - * Each codec advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces - * or functionality, and are not required to be supported. + * The bitstream was unable to be parsed at the highest level. The decoder + * is unable to proceed. This error \ref SHOULD be treated as fatal to the + * stream. */ + VPX_CODEC_UNSUP_BITSTREAM, + + /*!\brief Encoded bitstream uses an unsupported feature * - * The available flags are specified by VPX_CODEC_CAP_* defines. + * The decoder does not implement a feature required by the encoder. This + * return code should only be used for features that prevent future + * pictures from being properly decoded. This error \ref MAY be treated as + * fatal to the stream or \ref MAY be treated as fatal to the current GOP. */ - typedef long vpx_codec_caps_t; + VPX_CODEC_UNSUP_FEATURE, + + /*!\brief The coded data for this stream is corrupt or incomplete + * + * There was a problem decoding the current frame. This return code + * should only be used for failures that prevent future pictures from + * being properly decoded. This error \ref MAY be treated as fatal to the + * stream or \ref MAY be treated as fatal to the current GOP. If decoding + * is continued for the current GOP, artifacts may be present. + */ + VPX_CODEC_CORRUPT_FRAME, + + /*!\brief An application-supplied parameter is not valid. + * + */ + VPX_CODEC_INVALID_PARAM, + + /*!\brief An iterator reached the end of list. + * + */ + VPX_CODEC_LIST_END + +} vpx_codec_err_t; + +/*! \brief Codec capabilities bitfield + * + * Each codec advertises the capabilities it supports as part of its + * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces + * or functionality, and are not required to be supported. + * + * The available flags are specified by VPX_CODEC_CAP_* defines. + */ +typedef long vpx_codec_caps_t; #define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */ #define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */ +/*! \brief Initialization-time Feature Enabling + * + * Certain codec features must be known at initialization time, to allow for + * proper memory allocation. + * + * The available flags are specified by VPX_CODEC_USE_* defines. + */ +typedef long vpx_codec_flags_t; - /*! \brief Initialization-time Feature Enabling - * - * Certain codec features must be known at initialization time, to allow for - * proper memory allocation. - * - * The available flags are specified by VPX_CODEC_USE_* defines. - */ - typedef long vpx_codec_flags_t; +/*!\brief Codec interface structure. + * + * Contains function pointers and other data private to the codec + * implementation. This structure is opaque to the application. + */ +typedef const struct vpx_codec_iface vpx_codec_iface_t; +/*!\brief Codec private data structure. + * + * Contains data private to the codec implementation. This structure is opaque + * to the application. + */ +typedef struct vpx_codec_priv vpx_codec_priv_t; - /*!\brief Codec interface structure. - * - * Contains function pointers and other data private to the codec - * implementation. This structure is opaque to the application. - */ - typedef const struct vpx_codec_iface vpx_codec_iface_t; +/*!\brief Iterator + * + * Opaque storage used for iterating over lists. + */ +typedef const void *vpx_codec_iter_t; +/*!\brief Codec context structure + * + * All codecs \ref MUST support this context structure fully. In general, + * this data should be considered private to the codec algorithm, and + * not be manipulated or examined by the calling application. Applications + * may reference the 'name' member to get a printable description of the + * algorithm. + */ +typedef struct vpx_codec_ctx { + const char *name; /**< Printable interface name */ + vpx_codec_iface_t *iface; /**< Interface pointers */ + vpx_codec_err_t err; /**< Last returned error */ + const char *err_detail; /**< Detailed info, if available */ + vpx_codec_flags_t init_flags; /**< Flags passed at init time */ + union { + /**< Decoder Configuration Pointer */ + const struct vpx_codec_dec_cfg *dec; + /**< Encoder Configuration Pointer */ + const struct vpx_codec_enc_cfg *enc; + const void *raw; + } config; /**< Configuration pointer aliasing union */ + vpx_codec_priv_t *priv; /**< Algorithm private storage */ +} vpx_codec_ctx_t; - /*!\brief Codec private data structure. - * - * Contains data private to the codec implementation. This structure is opaque - * to the application. - */ - typedef struct vpx_codec_priv vpx_codec_priv_t; +/*!\brief Bit depth for codec + * * + * This enumeration determines the bit depth of the codec. + */ +typedef enum vpx_bit_depth { + VPX_BITS_8 = 8, /**< 8 bits */ + VPX_BITS_10 = 10, /**< 10 bits */ + VPX_BITS_12 = 12, /**< 12 bits */ +} vpx_bit_depth_t; +/* + * Library Version Number Interface + * + * For example, see the following sample return values: + * vpx_codec_version() (1<<16 | 2<<8 | 3) + * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba" + * vpx_codec_version_extra_str() "rc1-16-gec6a1ba" + */ - /*!\brief Iterator - * - * Opaque storage used for iterating over lists. - */ - typedef const void *vpx_codec_iter_t; +/*!\brief Return the version information (as an integer) + * + * Returns a packed encoding of the library version number. This will only + * include + * the major.minor.patch component of the version number. Note that this encoded + * value should be accessed through the macros provided, as the encoding may + * change + * in the future. + * + */ +int vpx_codec_version(void); +#define VPX_VERSION_MAJOR(v) \ + ((v >> 16) & 0xff) /**< extract major from packed version */ +#define VPX_VERSION_MINOR(v) \ + ((v >> 8) & 0xff) /**< extract minor from packed version */ +#define VPX_VERSION_PATCH(v) \ + ((v >> 0) & 0xff) /**< extract patch from packed version */ +/*!\brief Return the version major number */ +#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff) - /*!\brief Codec context structure - * - * All codecs \ref MUST support this context structure fully. In general, - * this data should be considered private to the codec algorithm, and - * not be manipulated or examined by the calling application. Applications - * may reference the 'name' member to get a printable description of the - * algorithm. - */ - typedef struct vpx_codec_ctx { - const char *name; /**< Printable interface name */ - vpx_codec_iface_t *iface; /**< Interface pointers */ - vpx_codec_err_t err; /**< Last returned error */ - const char *err_detail; /**< Detailed info, if available */ - vpx_codec_flags_t init_flags; /**< Flags passed at init time */ - union { - /**< Decoder Configuration Pointer */ - const struct vpx_codec_dec_cfg *dec; - /**< Encoder Configuration Pointer */ - const struct vpx_codec_enc_cfg *enc; - const void *raw; - } config; /**< Configuration pointer aliasing union */ - vpx_codec_priv_t *priv; /**< Algorithm private storage */ - } vpx_codec_ctx_t; +/*!\brief Return the version minor number */ +#define vpx_codec_version_minor() ((vpx_codec_version() >> 8) & 0xff) - /*!\brief Bit depth for codec - * * - * This enumeration determines the bit depth of the codec. - */ - typedef enum vpx_bit_depth { - VPX_BITS_8 = 8, /**< 8 bits */ - VPX_BITS_10 = 10, /**< 10 bits */ - VPX_BITS_12 = 12, /**< 12 bits */ - } vpx_bit_depth_t; +/*!\brief Return the version patch number */ +#define vpx_codec_version_patch() ((vpx_codec_version() >> 0) & 0xff) - /* - * Library Version Number Interface - * - * For example, see the following sample return values: - * vpx_codec_version() (1<<16 | 2<<8 | 3) - * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba" - * vpx_codec_version_extra_str() "rc1-16-gec6a1ba" - */ +/*!\brief Return the version information (as a string) + * + * Returns a printable string containing the full library version number. This + * may + * contain additional text following the three digit version number, as to + * indicate + * release candidates, prerelease versions, etc. + * + */ +const char *vpx_codec_version_str(void); - /*!\brief Return the version information (as an integer) - * - * Returns a packed encoding of the library version number. This will only include - * the major.minor.patch component of the version number. Note that this encoded - * value should be accessed through the macros provided, as the encoding may change - * in the future. - * - */ - int vpx_codec_version(void); -#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */ -#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */ -#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */ +/*!\brief Return the version information (as a string) + * + * Returns a printable "extra string". This is the component of the string + * returned + * by vpx_codec_version_str() following the three digit version number. + * + */ +const char *vpx_codec_version_extra_str(void); - /*!\brief Return the version major number */ -#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff) +/*!\brief Return the build configuration + * + * Returns a printable string containing an encoded version of the build + * configuration. This may be useful to vpx support. + * + */ +const char *vpx_codec_build_config(void); - /*!\brief Return the version minor number */ -#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff) +/*!\brief Return the name for a given interface + * + * Returns a human readable string for name of the given codec interface. + * + * \param[in] iface Interface pointer + * + */ +const char *vpx_codec_iface_name(vpx_codec_iface_t *iface); - /*!\brief Return the version patch number */ -#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff) +/*!\brief Convert error number to printable string + * + * Returns a human readable string for the last error returned by the + * algorithm. The returned error will be one line and will not contain + * any newline characters. + * + * + * \param[in] err Error number. + * + */ +const char *vpx_codec_err_to_string(vpx_codec_err_t err); +/*!\brief Retrieve error synopsis for codec context + * + * Returns a human readable string for the last error returned by the + * algorithm. The returned error will be one line and will not contain + * any newline characters. + * + * + * \param[in] ctx Pointer to this instance's context. + * + */ +const char *vpx_codec_error(vpx_codec_ctx_t *ctx); - /*!\brief Return the version information (as a string) - * - * Returns a printable string containing the full library version number. This may - * contain additional text following the three digit version number, as to indicate - * release candidates, prerelease versions, etc. - * - */ - const char *vpx_codec_version_str(void); +/*!\brief Retrieve detailed error information for codec context + * + * Returns a human readable string providing detailed information about + * the last error. + * + * \param[in] ctx Pointer to this instance's context. + * + * \retval NULL + * No detailed information is available. + */ +const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx); +/* REQUIRED FUNCTIONS + * + * The following functions are required to be implemented for all codecs. + * They represent the base case functionality expected of all codecs. + */ - /*!\brief Return the version information (as a string) - * - * Returns a printable "extra string". This is the component of the string returned - * by vpx_codec_version_str() following the three digit version number. - * - */ - const char *vpx_codec_version_extra_str(void); +/*!\brief Destroy a codec instance + * + * Destroys a codec context, freeing any associated memory buffers. + * + * \param[in] ctx Pointer to this instance's context + * + * \retval #VPX_CODEC_OK + * The codec algorithm initialized. + * \retval #VPX_CODEC_MEM_ERROR + * Memory allocation failed. + */ +vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx); +/*!\brief Get the capabilities of an algorithm. + * + * Retrieves the capabilities bitfield from the algorithm's interface. + * + * \param[in] iface Pointer to the algorithm interface + * + */ +vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface); - /*!\brief Return the build configuration - * - * Returns a printable string containing an encoded version of the build - * configuration. This may be useful to vpx support. - * - */ - const char *vpx_codec_build_config(void); - - - /*!\brief Return the name for a given interface - * - * Returns a human readable string for name of the given codec interface. - * - * \param[in] iface Interface pointer - * - */ - const char *vpx_codec_iface_name(vpx_codec_iface_t *iface); - - - /*!\brief Convert error number to printable string - * - * Returns a human readable string for the last error returned by the - * algorithm. The returned error will be one line and will not contain - * any newline characters. - * - * - * \param[in] err Error number. - * - */ - const char *vpx_codec_err_to_string(vpx_codec_err_t err); - - - /*!\brief Retrieve error synopsis for codec context - * - * Returns a human readable string for the last error returned by the - * algorithm. The returned error will be one line and will not contain - * any newline characters. - * - * - * \param[in] ctx Pointer to this instance's context. - * - */ - const char *vpx_codec_error(vpx_codec_ctx_t *ctx); - - - /*!\brief Retrieve detailed error information for codec context - * - * Returns a human readable string providing detailed information about - * the last error. - * - * \param[in] ctx Pointer to this instance's context. - * - * \retval NULL - * No detailed information is available. - */ - const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx); - - - /* REQUIRED FUNCTIONS - * - * The following functions are required to be implemented for all codecs. - * They represent the base case functionality expected of all codecs. - */ - - /*!\brief Destroy a codec instance - * - * Destroys a codec context, freeing any associated memory buffers. - * - * \param[in] ctx Pointer to this instance's context - * - * \retval #VPX_CODEC_OK - * The codec algorithm initialized. - * \retval #VPX_CODEC_MEM_ERROR - * Memory allocation failed. - */ - vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx); - - - /*!\brief Get the capabilities of an algorithm. - * - * Retrieves the capabilities bitfield from the algorithm's interface. - * - * \param[in] iface Pointer to the algorithm interface - * - */ - vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface); - - - /*!\brief Control algorithm - * - * This function is used to exchange algorithm specific data with the codec - * instance. This can be used to implement features specific to a particular - * algorithm. - * - * This wrapper function dispatches the request to the helper function - * associated with the given ctrl_id. It tries to call this function - * transparently, but will return #VPX_CODEC_ERROR if the request could not - * be dispatched. - * - * Note that this function should not be used directly. Call the - * #vpx_codec_control wrapper macro instead. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] ctrl_id Algorithm specific control identifier - * - * \retval #VPX_CODEC_OK - * The control request was processed. - * \retval #VPX_CODEC_ERROR - * The control request was not processed. - * \retval #VPX_CODEC_INVALID_PARAM - * The data was not valid. - */ - vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, - int ctrl_id, - ...); +/*!\brief Control algorithm + * + * This function is used to exchange algorithm specific data with the codec + * instance. This can be used to implement features specific to a particular + * algorithm. + * + * This wrapper function dispatches the request to the helper function + * associated with the given ctrl_id. It tries to call this function + * transparently, but will return #VPX_CODEC_ERROR if the request could not + * be dispatched. + * + * Note that this function should not be used directly. Call the + * #vpx_codec_control wrapper macro instead. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] ctrl_id Algorithm specific control identifier + * + * \retval #VPX_CODEC_OK + * The control request was processed. + * \retval #VPX_CODEC_ERROR + * The control request was not processed. + * \retval #VPX_CODEC_INVALID_PARAM + * The data was not valid. + */ +vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...); #if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS -# define vpx_codec_control(ctx,id,data) vpx_codec_control_(ctx,id,data) -# define VPX_CTRL_USE_TYPE(id, typ) -# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) -# define VPX_CTRL_VOID(id, typ) +#define vpx_codec_control(ctx, id, data) vpx_codec_control_(ctx, id, data) +#define VPX_CTRL_USE_TYPE(id, typ) +#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) +#define VPX_CTRL_VOID(id, typ) #else - /*!\brief vpx_codec_control wrapper macro - * - * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). - * - * \internal - * It works by dispatching the call to the control function through a wrapper - * function named with the id parameter. - */ -# define vpx_codec_control(ctx,id,data) vpx_codec_control_##id(ctx,id,data)\ - /**<\hideinitializer*/ +/*!\brief vpx_codec_control wrapper macro + * + * This macro allows for type safe conversions across the variadic parameter + * to vpx_codec_control_(). + * + * \internal + * It works by dispatching the call to the control function through a wrapper + * function named with the id parameter. + */ +#define vpx_codec_control(ctx, id, data) \ + vpx_codec_control_##id(ctx, id, data) /**<\hideinitializer*/ - - /*!\brief vpx_codec_control type definition macro - * - * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). It defines the type of the argument for a given - * control identifier. - * - * \internal - * It defines a static function with - * the correctly typed arguments as a wrapper to the type-unsafe internal - * function. - */ -# define VPX_CTRL_USE_TYPE(id, typ) \ - static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\ - \ - static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\ - return vpx_codec_control_(ctx, ctrl_id, data);\ +/*!\brief vpx_codec_control type definition macro + * + * This macro allows for type safe conversions across the variadic parameter + * to vpx_codec_control_(). It defines the type of the argument for a given + * control identifier. + * + * \internal + * It defines a static function with + * the correctly typed arguments as a wrapper to the type-unsafe internal + * function. + */ +#define VPX_CTRL_USE_TYPE(id, typ) \ + static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int, typ) \ + UNUSED; \ + \ + static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx, \ + int ctrl_id, typ data) { \ + return vpx_codec_control_(ctx, ctrl_id, data); \ } /**<\hideinitializer*/ - - /*!\brief vpx_codec_control deprecated type definition macro - * - * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is - * deprecated and should not be used. Consult the documentation for your - * codec for more information. - * - * \internal - * It defines a static function with the correctly typed arguments as a - * wrapper to the type-unsafe internal function. - */ -# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \ - DECLSPEC_DEPRECATED static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\ - \ - DECLSPEC_DEPRECATED static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\ - return vpx_codec_control_(ctx, ctrl_id, data);\ +/*!\brief vpx_codec_control deprecated type definition macro + * + * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is + * deprecated and should not be used. Consult the documentation for your + * codec for more information. + * + * \internal + * It defines a static function with the correctly typed arguments as a + * wrapper to the type-unsafe internal function. + */ +#define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \ + DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \ + vpx_codec_ctx_t *, int, typ) DEPRECATED UNUSED; \ + \ + DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \ + vpx_codec_ctx_t *ctx, int ctrl_id, typ data) { \ + return vpx_codec_control_(ctx, ctrl_id, data); \ } /**<\hideinitializer*/ - - /*!\brief vpx_codec_control void type definition macro - * - * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). It indicates that a given control identifier takes - * no argument. - * - * \internal - * It defines a static function without a data argument as a wrapper to the - * type-unsafe internal function. - */ -# define VPX_CTRL_VOID(id) \ - static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\ - \ - static vpx_codec_err_t \ - vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\ - return vpx_codec_control_(ctx, ctrl_id);\ +/*!\brief vpx_codec_control void type definition macro + * + * This macro allows for type safe conversions across the variadic parameter + * to vpx_codec_control_(). It indicates that a given control identifier takes + * no argument. + * + * \internal + * It defines a static function without a data argument as a wrapper to the + * type-unsafe internal function. + */ +#define VPX_CTRL_VOID(id) \ + static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int) \ + UNUSED; \ + \ + static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx, \ + int ctrl_id) { \ + return vpx_codec_control_(ctx, ctrl_id); \ } /**<\hideinitializer*/ - #endif - /*!@} - end defgroup codec*/ +/*!@} - end defgroup codec*/ #ifdef __cplusplus } #endif #endif // VPX_VPX_CODEC_H_ - diff --git a/libs/libvpx/vpx/vpx_codec.mk b/libs/libvpx/vpx/vpx_codec.mk index ccdef040c3..b77f45817b 100644 --- a/libs/libvpx/vpx/vpx_codec.mk +++ b/libs/libvpx/vpx/vpx_codec.mk @@ -36,10 +36,8 @@ API_SRCS-yes += vpx_decoder.h API_SRCS-yes += src/vpx_encoder.c API_SRCS-yes += vpx_encoder.h API_SRCS-yes += internal/vpx_codec_internal.h -API_SRCS-yes += internal/vpx_psnr.h API_SRCS-yes += src/vpx_codec.c API_SRCS-yes += src/vpx_image.c -API_SRCS-yes += src/vpx_psnr.c API_SRCS-yes += vpx_codec.h API_SRCS-yes += vpx_codec.mk API_SRCS-yes += vpx_frame_buffer.h diff --git a/libs/libvpx/vpx/vpx_decoder.h b/libs/libvpx/vpx/vpx_decoder.h index 62fd919756..2ff12112bc 100644 --- a/libs/libvpx/vpx/vpx_decoder.h +++ b/libs/libvpx/vpx/vpx_decoder.h @@ -32,347 +32,334 @@ extern "C" { #include "./vpx_codec.h" #include "./vpx_frame_buffer.h" - /*!\brief Current ABI version number - * - * \internal - * If this file is altered in any way that changes the ABI, this value - * must be bumped. Examples include, but are not limited to, changing - * types, removing or reassigning enums, adding/removing/rearranging - * fields to structures - */ -#define VPX_DECODER_ABI_VERSION (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/ +/*!\brief Current ABI version number + * + * \internal + * If this file is altered in any way that changes the ABI, this value + * must be bumped. Examples include, but are not limited to, changing + * types, removing or reassigning enums, adding/removing/rearranging + * fields to structures + */ +#define VPX_DECODER_ABI_VERSION \ + (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/ - /*! \brief Decoder capabilities bitfield - * - * Each decoder advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces - * or functionality, and are not required to be supported by a decoder. - * - * The available flags are specified by VPX_CODEC_CAP_* defines. - */ -#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */ -#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */ -#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */ -#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 /**< Can conceal errors due to - packet loss */ -#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 /**< Can receive encoded frames - one fragment at a time */ +/*! \brief Decoder capabilities bitfield + * + * Each decoder advertises the capabilities it supports as part of its + * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces + * or functionality, and are not required to be supported by a decoder. + * + * The available flags are specified by VPX_CODEC_CAP_* defines. + */ +#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */ +#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */ +#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */ +/*!\brief Can conceal errors due to packet loss */ +#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 +/*!\brief Can receive encoded frames one fragment at a time */ +#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 - /*! \brief Initialization-time Feature Enabling - * - * Certain codec features must be known at initialization time, to allow for - * proper memory allocation. - * - * The available flags are specified by VPX_CODEC_USE_* defines. - */ -#define VPX_CODEC_CAP_FRAME_THREADING 0x200000 /**< Can support frame-based - multi-threading */ -#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000 /**< Can support external - frame buffers */ +/*! \brief Initialization-time Feature Enabling + * + * Certain codec features must be known at initialization time, to allow for + * proper memory allocation. + * + * The available flags are specified by VPX_CODEC_USE_* defines. + */ +/*!\brief Can support frame-based multi-threading */ +#define VPX_CODEC_CAP_FRAME_THREADING 0x200000 +/*!brief Can support external frame buffers */ +#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000 -#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */ -#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded - frames */ -#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 /**< The input frame should be - passed to the decoder one - fragment at a time */ -#define VPX_CODEC_USE_FRAME_THREADING 0x80000 /**< Enable frame-based - multi-threading */ +#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */ +/*!\brief Conceal errors in decoded frames */ +#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 +/*!\brief The input frame should be passed to the decoder one fragment at a + * time */ +#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 +/*!\brief Enable frame-based multi-threading */ +#define VPX_CODEC_USE_FRAME_THREADING 0x80000 - /*!\brief Stream properties - * - * This structure is used to query or set properties of the decoded - * stream. Algorithms may extend this structure with data specific - * to their bitstream by setting the sz member appropriately. - */ - typedef struct vpx_codec_stream_info { - unsigned int sz; /**< Size of this structure */ - unsigned int w; /**< Width (or 0 for unknown/default) */ - unsigned int h; /**< Height (or 0 for unknown/default) */ - unsigned int is_kf; /**< Current frame is a keyframe */ - } vpx_codec_stream_info_t; +/*!\brief Stream properties + * + * This structure is used to query or set properties of the decoded + * stream. Algorithms may extend this structure with data specific + * to their bitstream by setting the sz member appropriately. + */ +typedef struct vpx_codec_stream_info { + unsigned int sz; /**< Size of this structure */ + unsigned int w; /**< Width (or 0 for unknown/default) */ + unsigned int h; /**< Height (or 0 for unknown/default) */ + unsigned int is_kf; /**< Current frame is a keyframe */ +} vpx_codec_stream_info_t; - /* REQUIRED FUNCTIONS - * - * The following functions are required to be implemented for all decoders. - * They represent the base case functionality expected of all decoders. - */ +/* REQUIRED FUNCTIONS + * + * The following functions are required to be implemented for all decoders. + * They represent the base case functionality expected of all decoders. + */ +/*!\brief Initialization Configurations + * + * This structure is used to pass init time configuration options to the + * decoder. + */ +typedef struct vpx_codec_dec_cfg { + unsigned int threads; /**< Maximum number of threads to use, default 1 */ + unsigned int w; /**< Width */ + unsigned int h; /**< Height */ +} vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */ - /*!\brief Initialization Configurations - * - * This structure is used to pass init time configuration options to the - * decoder. - */ - typedef struct vpx_codec_dec_cfg { - unsigned int threads; /**< Maximum number of threads to use, default 1 */ - unsigned int w; /**< Width */ - unsigned int h; /**< Height */ - } vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */ +/*!\brief Initialize a decoder instance + * + * Initializes a decoder context using the given interface. Applications + * should call the vpx_codec_dec_init convenience macro instead of this + * function directly, to ensure that the ABI version number parameter + * is properly initialized. + * + * If the library was configured with --disable-multithread, this call + * is not thread safe and should be guarded with a lock if being used + * in a multithreaded context. + * + * \param[in] ctx Pointer to this instance's context. + * \param[in] iface Pointer to the algorithm interface to use. + * \param[in] cfg Configuration to use, if known. May be NULL. + * \param[in] flags Bitfield of VPX_CODEC_USE_* flags + * \param[in] ver ABI version number. Must be set to + * VPX_DECODER_ABI_VERSION + * \retval #VPX_CODEC_OK + * The decoder algorithm initialized. + * \retval #VPX_CODEC_MEM_ERROR + * Memory allocation failed. + */ +vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, + vpx_codec_iface_t *iface, + const vpx_codec_dec_cfg_t *cfg, + vpx_codec_flags_t flags, int ver); - - /*!\brief Initialize a decoder instance - * - * Initializes a decoder context using the given interface. Applications - * should call the vpx_codec_dec_init convenience macro instead of this - * function directly, to ensure that the ABI version number parameter - * is properly initialized. - * - * If the library was configured with --disable-multithread, this call - * is not thread safe and should be guarded with a lock if being used - * in a multithreaded context. - * - * \param[in] ctx Pointer to this instance's context. - * \param[in] iface Pointer to the algorithm interface to use. - * \param[in] cfg Configuration to use, if known. May be NULL. - * \param[in] flags Bitfield of VPX_CODEC_USE_* flags - * \param[in] ver ABI version number. Must be set to - * VPX_DECODER_ABI_VERSION - * \retval #VPX_CODEC_OK - * The decoder algorithm initialized. - * \retval #VPX_CODEC_MEM_ERROR - * Memory allocation failed. - */ - vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_dec_cfg_t *cfg, - vpx_codec_flags_t flags, - int ver); - - /*!\brief Convenience macro for vpx_codec_dec_init_ver() - * - * Ensures the ABI version parameter is properly set. - */ +/*!\brief Convenience macro for vpx_codec_dec_init_ver() + * + * Ensures the ABI version parameter is properly set. + */ #define vpx_codec_dec_init(ctx, iface, cfg, flags) \ vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION) +/*!\brief Parse stream info from a buffer + * + * Performs high level parsing of the bitstream. Construction of a decoder + * context is not necessary. Can be used to determine if the bitstream is + * of the proper format, and to extract information from the stream. + * + * \param[in] iface Pointer to the algorithm interface + * \param[in] data Pointer to a block of data to parse + * \param[in] data_sz Size of the data buffer + * \param[in,out] si Pointer to stream info to update. The size member + * \ref MUST be properly initialized, but \ref MAY be + * clobbered by the algorithm. This parameter \ref MAY + * be NULL. + * + * \retval #VPX_CODEC_OK + * Bitstream is parsable and stream information updated + */ +vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, + const uint8_t *data, + unsigned int data_sz, + vpx_codec_stream_info_t *si); - /*!\brief Parse stream info from a buffer - * - * Performs high level parsing of the bitstream. Construction of a decoder - * context is not necessary. Can be used to determine if the bitstream is - * of the proper format, and to extract information from the stream. - * - * \param[in] iface Pointer to the algorithm interface - * \param[in] data Pointer to a block of data to parse - * \param[in] data_sz Size of the data buffer - * \param[in,out] si Pointer to stream info to update. The size member - * \ref MUST be properly initialized, but \ref MAY be - * clobbered by the algorithm. This parameter \ref MAY - * be NULL. - * - * \retval #VPX_CODEC_OK - * Bitstream is parsable and stream information updated - */ - vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, - const uint8_t *data, - unsigned int data_sz, - vpx_codec_stream_info_t *si); +/*!\brief Return information about the current stream. + * + * Returns information about the stream that has been parsed during decoding. + * + * \param[in] ctx Pointer to this instance's context + * \param[in,out] si Pointer to stream info to update. The size member + * \ref MUST be properly initialized, but \ref MAY be + * clobbered by the algorithm. This parameter \ref MAY + * be NULL. + * + * \retval #VPX_CODEC_OK + * Bitstream is parsable and stream information updated + */ +vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, + vpx_codec_stream_info_t *si); +/*!\brief Decode data + * + * Processes a buffer of coded data. If the processing results in a new + * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be + * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode + * time stamp) order. Frames produced will always be in PTS (presentation + * time stamp) order. + * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled, + * data and data_sz can contain a fragment of the encoded frame. Fragment + * \#n must contain at least partition \#n, but can also contain subsequent + * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must + * be empty. When no more data is available, this function should be called + * with NULL as data and 0 as data_sz. The memory passed to this function + * must be available until the frame has been decoded. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] data Pointer to this block of new coded data. If + * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted + * for the previously decoded frame. + * \param[in] data_sz Size of the coded data, in bytes. + * \param[in] user_priv Application specific data to associate with + * this frame. + * \param[in] deadline Soft deadline the decoder should attempt to meet, + * in us. Set to zero for unlimited. + * + * \return Returns #VPX_CODEC_OK if the coded data was processed completely + * and future pictures can be decoded without error. Otherwise, + * see the descriptions of the other error codes in ::vpx_codec_err_t + * for recoverability capabilities. + */ +vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, + unsigned int data_sz, void *user_priv, + long deadline); - /*!\brief Return information about the current stream. - * - * Returns information about the stream that has been parsed during decoding. - * - * \param[in] ctx Pointer to this instance's context - * \param[in,out] si Pointer to stream info to update. The size member - * \ref MUST be properly initialized, but \ref MAY be - * clobbered by the algorithm. This parameter \ref MAY - * be NULL. - * - * \retval #VPX_CODEC_OK - * Bitstream is parsable and stream information updated - */ - vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, - vpx_codec_stream_info_t *si); +/*!\brief Decoded frames iterator + * + * Iterates over a list of the frames available for display. The iterator + * storage should be initialized to NULL to start the iteration. Iteration is + * complete when this function returns NULL. + * + * The list of available frames becomes valid upon completion of the + * vpx_codec_decode call, and remains valid until the next call to + * vpx_codec_decode. + * + * \param[in] ctx Pointer to this instance's context + * \param[in,out] iter Iterator storage, initialized to NULL + * + * \return Returns a pointer to an image, if one is ready for display. Frames + * produced will always be in PTS (presentation time stamp) order. + */ +vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter); +/*!\defgroup cap_put_frame Frame-Based Decoding Functions + * + * The following functions are required to be implemented for all decoders + * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these + * functions + * for codecs that don't advertise this capability will result in an error + * code being returned, usually VPX_CODEC_ERROR + * @{ + */ - /*!\brief Decode data - * - * Processes a buffer of coded data. If the processing results in a new - * decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be - * generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode - * time stamp) order. Frames produced will always be in PTS (presentation - * time stamp) order. - * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled, - * data and data_sz can contain a fragment of the encoded frame. Fragment - * \#n must contain at least partition \#n, but can also contain subsequent - * partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must - * be empty. When no more data is available, this function should be called - * with NULL as data and 0 as data_sz. The memory passed to this function - * must be available until the frame has been decoded. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] data Pointer to this block of new coded data. If - * NULL, a VPX_CODEC_CB_PUT_FRAME event is posted - * for the previously decoded frame. - * \param[in] data_sz Size of the coded data, in bytes. - * \param[in] user_priv Application specific data to associate with - * this frame. - * \param[in] deadline Soft deadline the decoder should attempt to meet, - * in us. Set to zero for unlimited. - * - * \return Returns #VPX_CODEC_OK if the coded data was processed completely - * and future pictures can be decoded without error. Otherwise, - * see the descriptions of the other error codes in ::vpx_codec_err_t - * for recoverability capabilities. - */ - vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, - const uint8_t *data, - unsigned int data_sz, - void *user_priv, - long deadline); +/*!\brief put frame callback prototype + * + * This callback is invoked by the decoder to notify the application of + * the availability of decoded image data. + */ +typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv, + const vpx_image_t *img); +/*!\brief Register for notification of frame completion. + * + * Registers a given function to be called when a decoded frame is + * available. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] cb Pointer to the callback function + * \param[in] user_priv User's private data + * + * \retval #VPX_CODEC_OK + * Callback successfully registered. + * \retval #VPX_CODEC_ERROR + * Decoder context not initialized, or algorithm not capable of + * posting slice completion. + */ +vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, + vpx_codec_put_frame_cb_fn_t cb, + void *user_priv); - /*!\brief Decoded frames iterator - * - * Iterates over a list of the frames available for display. The iterator - * storage should be initialized to NULL to start the iteration. Iteration is - * complete when this function returns NULL. - * - * The list of available frames becomes valid upon completion of the - * vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode. - * - * \param[in] ctx Pointer to this instance's context - * \param[in,out] iter Iterator storage, initialized to NULL - * - * \return Returns a pointer to an image, if one is ready for display. Frames - * produced will always be in PTS (presentation time stamp) order. - */ - vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, - vpx_codec_iter_t *iter); +/*!@} - end defgroup cap_put_frame */ +/*!\defgroup cap_put_slice Slice-Based Decoding Functions + * + * The following functions are required to be implemented for all decoders + * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these + * functions + * for codecs that don't advertise this capability will result in an error + * code being returned, usually VPX_CODEC_ERROR + * @{ + */ - /*!\defgroup cap_put_frame Frame-Based Decoding Functions - * - * The following functions are required to be implemented for all decoders - * that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions - * for codecs that don't advertise this capability will result in an error - * code being returned, usually VPX_CODEC_ERROR - * @{ - */ +/*!\brief put slice callback prototype + * + * This callback is invoked by the decoder to notify the application of + * the availability of partially decoded image data. The + */ +typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv, + const vpx_image_t *img, + const vpx_image_rect_t *valid, + const vpx_image_rect_t *update); - /*!\brief put frame callback prototype - * - * This callback is invoked by the decoder to notify the application of - * the availability of decoded image data. - */ - typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv, - const vpx_image_t *img); +/*!\brief Register for notification of slice completion. + * + * Registers a given function to be called when a decoded slice is + * available. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] cb Pointer to the callback function + * \param[in] user_priv User's private data + * + * \retval #VPX_CODEC_OK + * Callback successfully registered. + * \retval #VPX_CODEC_ERROR + * Decoder context not initialized, or algorithm not capable of + * posting slice completion. + */ +vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, + vpx_codec_put_slice_cb_fn_t cb, + void *user_priv); +/*!@} - end defgroup cap_put_slice*/ - /*!\brief Register for notification of frame completion. - * - * Registers a given function to be called when a decoded frame is - * available. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] cb Pointer to the callback function - * \param[in] user_priv User's private data - * - * \retval #VPX_CODEC_OK - * Callback successfully registered. - * \retval #VPX_CODEC_ERROR - * Decoder context not initialized, or algorithm not capable of - * posting slice completion. - */ - vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_frame_cb_fn_t cb, - void *user_priv); +/*!\defgroup cap_external_frame_buffer External Frame Buffer Functions + * + * The following section is required to be implemented for all decoders + * that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability. + * Calling this function for codecs that don't advertise this capability + * will result in an error code being returned, usually VPX_CODEC_ERROR. + * + * \note + * Currently this only works with VP9. + * @{ + */ +/*!\brief Pass in external frame buffers for the decoder to use. + * + * Registers functions to be called when libvpx needs a frame buffer + * to decode the current frame and a function to be called when libvpx does + * not internally reference the frame buffer. This set function must + * be called before the first call to decode or libvpx will assume the + * default behavior of allocating frame buffers internally. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] cb_get Pointer to the get callback function + * \param[in] cb_release Pointer to the release callback function + * \param[in] cb_priv Callback's private data + * + * \retval #VPX_CODEC_OK + * External frame buffers will be used by libvpx. + * \retval #VPX_CODEC_INVALID_PARAM + * One or more of the callbacks were NULL. + * \retval #VPX_CODEC_ERROR + * Decoder context not initialized, or algorithm not capable of + * using external frame buffers. + * + * \note + * When decoding VP9, the application may be required to pass in at least + * #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS external frame + * buffers. + */ +vpx_codec_err_t vpx_codec_set_frame_buffer_functions( + vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, + vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); - /*!@} - end defgroup cap_put_frame */ +/*!@} - end defgroup cap_external_frame_buffer */ - /*!\defgroup cap_put_slice Slice-Based Decoding Functions - * - * The following functions are required to be implemented for all decoders - * that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions - * for codecs that don't advertise this capability will result in an error - * code being returned, usually VPX_CODEC_ERROR - * @{ - */ - - /*!\brief put slice callback prototype - * - * This callback is invoked by the decoder to notify the application of - * the availability of partially decoded image data. The - */ - typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv, - const vpx_image_t *img, - const vpx_image_rect_t *valid, - const vpx_image_rect_t *update); - - - /*!\brief Register for notification of slice completion. - * - * Registers a given function to be called when a decoded slice is - * available. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] cb Pointer to the callback function - * \param[in] user_priv User's private data - * - * \retval #VPX_CODEC_OK - * Callback successfully registered. - * \retval #VPX_CODEC_ERROR - * Decoder context not initialized, or algorithm not capable of - * posting slice completion. - */ - vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_slice_cb_fn_t cb, - void *user_priv); - - - /*!@} - end defgroup cap_put_slice*/ - - /*!\defgroup cap_external_frame_buffer External Frame Buffer Functions - * - * The following section is required to be implemented for all decoders - * that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability. - * Calling this function for codecs that don't advertise this capability - * will result in an error code being returned, usually VPX_CODEC_ERROR. - * - * \note - * Currently this only works with VP9. - * @{ - */ - - /*!\brief Pass in external frame buffers for the decoder to use. - * - * Registers functions to be called when libvpx needs a frame buffer - * to decode the current frame and a function to be called when libvpx does - * not internally reference the frame buffer. This set function must - * be called before the first call to decode or libvpx will assume the - * default behavior of allocating frame buffers internally. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] cb_get Pointer to the get callback function - * \param[in] cb_release Pointer to the release callback function - * \param[in] cb_priv Callback's private data - * - * \retval #VPX_CODEC_OK - * External frame buffers will be used by libvpx. - * \retval #VPX_CODEC_INVALID_PARAM - * One or more of the callbacks were NULL. - * \retval #VPX_CODEC_ERROR - * Decoder context not initialized, or algorithm not capable of - * using external frame buffers. - * - * \note - * When decoding VP9, the application may be required to pass in at least - * #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS external frame - * buffers. - */ - vpx_codec_err_t vpx_codec_set_frame_buffer_functions( - vpx_codec_ctx_t *ctx, - vpx_get_frame_buffer_cb_fn_t cb_get, - vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); - - /*!@} - end defgroup cap_external_frame_buffer */ - - /*!@} - end defgroup decoder*/ +/*!@} - end defgroup decoder*/ #ifdef __cplusplus } #endif #endif // VPX_VPX_DECODER_H_ - diff --git a/libs/libvpx/vpx/vpx_encoder.h b/libs/libvpx/vpx/vpx_encoder.h index 955e873519..28fcd5f999 100644 --- a/libs/libvpx/vpx/vpx_encoder.h +++ b/libs/libvpx/vpx/vpx_encoder.h @@ -31,1013 +31,950 @@ extern "C" { #include "./vpx_codec.h" - /*! Temporal Scalability: Maximum length of the sequence defining frame - * layer membership - */ +/*! Temporal Scalability: Maximum length of the sequence defining frame + * layer membership + */ #define VPX_TS_MAX_PERIODICITY 16 - /*! Temporal Scalability: Maximum number of coding layers */ -#define VPX_TS_MAX_LAYERS 5 +/*! Temporal Scalability: Maximum number of coding layers */ +#define VPX_TS_MAX_LAYERS 5 - /*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */ +/*!\deprecated Use #VPX_TS_MAX_PERIODICITY instead. */ #define MAX_PERIODICITY VPX_TS_MAX_PERIODICITY /*! Temporal+Spatial Scalability: Maximum number of coding layers */ -#define VPX_MAX_LAYERS 12 // 3 temporal + 4 spatial layers are allowed. +#define VPX_MAX_LAYERS 12 // 3 temporal + 4 spatial layers are allowed. /*!\deprecated Use #VPX_MAX_LAYERS instead. */ -#define MAX_LAYERS VPX_MAX_LAYERS // 3 temporal + 4 spatial layers allowed. +#define MAX_LAYERS VPX_MAX_LAYERS // 3 temporal + 4 spatial layers allowed. /*! Spatial Scalability: Maximum number of coding layers */ -#define VPX_SS_MAX_LAYERS 5 +#define VPX_SS_MAX_LAYERS 5 /*! Spatial Scalability: Default number of coding layers */ -#define VPX_SS_DEFAULT_LAYERS 1 +#define VPX_SS_DEFAULT_LAYERS 1 - /*!\brief Current ABI version number - * - * \internal - * If this file is altered in any way that changes the ABI, this value - * must be bumped. Examples include, but are not limited to, changing - * types, removing or reassigning enums, adding/removing/rearranging - * fields to structures - */ -#define VPX_ENCODER_ABI_VERSION (5 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/ +/*!\brief Current ABI version number + * + * \internal + * If this file is altered in any way that changes the ABI, this value + * must be bumped. Examples include, but are not limited to, changing + * types, removing or reassigning enums, adding/removing/rearranging + * fields to structures + */ +#define VPX_ENCODER_ABI_VERSION \ + (5 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/ +/*! \brief Encoder capabilities bitfield + * + * Each encoder advertises the capabilities it supports as part of its + * ::vpx_codec_iface_t interface structure. Capabilities are extra + * interfaces or functionality, and are not required to be supported + * by an encoder. + * + * The available flags are specified by VPX_CODEC_CAP_* defines. + */ +#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */ - /*! \brief Encoder capabilities bitfield - * - * Each encoder advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra - * interfaces or functionality, and are not required to be supported - * by an encoder. - * - * The available flags are specified by VPX_CODEC_CAP_* defines. - */ -#define VPX_CODEC_CAP_PSNR 0x10000 /**< Can issue PSNR packets */ - - /*! Can output one partition at a time. Each partition is returned in its - * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for - * every partition but the last. In this mode all frames are always - * returned partition by partition. - */ -#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000 +/*! Can output one partition at a time. Each partition is returned in its + * own VPX_CODEC_CX_FRAME_PKT, with the FRAME_IS_FRAGMENT flag set for + * every partition but the last. In this mode all frames are always + * returned partition by partition. + */ +#define VPX_CODEC_CAP_OUTPUT_PARTITION 0x20000 /*! Can support input images at greater than 8 bitdepth. */ -#define VPX_CODEC_CAP_HIGHBITDEPTH 0x40000 +#define VPX_CODEC_CAP_HIGHBITDEPTH 0x40000 - /*! \brief Initialization-time Feature Enabling - * - * Certain codec features must be known at initialization time, to allow - * for proper memory allocation. - * - * The available flags are specified by VPX_CODEC_USE_* defines. - */ -#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */ -#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000 /**< Make the encoder output one - partition at a time. */ +/*! \brief Initialization-time Feature Enabling + * + * Certain codec features must be known at initialization time, to allow + * for proper memory allocation. + * + * The available flags are specified by VPX_CODEC_USE_* defines. + */ +#define VPX_CODEC_USE_PSNR 0x10000 /**< Calculate PSNR on each frame */ +/*!\brief Make the encoder output one partition at a time. */ +#define VPX_CODEC_USE_OUTPUT_PARTITION 0x20000 #define VPX_CODEC_USE_HIGHBITDEPTH 0x40000 /**< Use high bitdepth */ +/*!\brief Generic fixed size buffer structure + * + * This structure is able to hold a reference to any fixed size buffer. + */ +typedef struct vpx_fixed_buf { + void *buf; /**< Pointer to the data */ + size_t sz; /**< Length of the buffer, in chars */ +} vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */ - /*!\brief Generic fixed size buffer structure - * - * This structure is able to hold a reference to any fixed size buffer. - */ - typedef struct vpx_fixed_buf { - void *buf; /**< Pointer to the data */ - size_t sz; /**< Length of the buffer, in chars */ - } vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */ +/*!\brief Time Stamp Type + * + * An integer, which when multiplied by the stream's time base, provides + * the absolute time of a sample. + */ +typedef int64_t vpx_codec_pts_t; +/*!\brief Compressed Frame Flags + * + * This type represents a bitfield containing information about a compressed + * frame that may be useful to an application. The most significant 16 bits + * can be used by an algorithm to provide additional detail, for example to + * support frame types that are codec specific (MPEG-1 D-frames for example) + */ +typedef uint32_t vpx_codec_frame_flags_t; +#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */ +/*!\brief frame can be dropped without affecting the stream (no future frame + * depends on this one) */ +#define VPX_FRAME_IS_DROPPABLE 0x2 +/*!\brief frame should be decoded but will not be shown */ +#define VPX_FRAME_IS_INVISIBLE 0x4 +/*!\brief this is a fragment of the encoded frame */ +#define VPX_FRAME_IS_FRAGMENT 0x8 - /*!\brief Time Stamp Type - * - * An integer, which when multiplied by the stream's time base, provides - * the absolute time of a sample. - */ - typedef int64_t vpx_codec_pts_t; +/*!\brief Error Resilient flags + * + * These flags define which error resilient features to enable in the + * encoder. The flags are specified through the + * vpx_codec_enc_cfg::g_error_resilient variable. + */ +typedef uint32_t vpx_codec_er_flags_t; +/*!\brief Improve resiliency against losses of whole frames */ +#define VPX_ERROR_RESILIENT_DEFAULT 0x1 +/*!\brief The frame partitions are independently decodable by the bool decoder, + * meaning that partitions can be decoded even though earlier partitions have + * been lost. Note that intra prediction is still done over the partition + * boundary. */ +#define VPX_ERROR_RESILIENT_PARTITIONS 0x2 - - /*!\brief Compressed Frame Flags - * - * This type represents a bitfield containing information about a compressed - * frame that may be useful to an application. The most significant 16 bits - * can be used by an algorithm to provide additional detail, for example to - * support frame types that are codec specific (MPEG-1 D-frames for example) - */ - typedef uint32_t vpx_codec_frame_flags_t; -#define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */ -#define VPX_FRAME_IS_DROPPABLE 0x2 /**< frame can be dropped without affecting - the stream (no future frame depends on - this one) */ -#define VPX_FRAME_IS_INVISIBLE 0x4 /**< frame should be decoded but will not - be shown */ -#define VPX_FRAME_IS_FRAGMENT 0x8 /**< this is a fragment of the encoded - frame */ - - /*!\brief Error Resilient flags - * - * These flags define which error resilient features to enable in the - * encoder. The flags are specified through the - * vpx_codec_enc_cfg::g_error_resilient variable. - */ - typedef uint32_t vpx_codec_er_flags_t; -#define VPX_ERROR_RESILIENT_DEFAULT 0x1 /**< Improve resiliency against - losses of whole frames */ -#define VPX_ERROR_RESILIENT_PARTITIONS 0x2 /**< The frame partitions are - independently decodable by the - bool decoder, meaning that - partitions can be decoded even - though earlier partitions have - been lost. Note that intra - prediction is still done over - the partition boundary. */ - - /*!\brief Encoder output packet variants - * - * This enumeration lists the different kinds of data packets that can be - * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY - * extend this list to provide additional functionality. - */ - enum vpx_codec_cx_pkt_kind { - VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */ - VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */ - VPX_CODEC_FPMB_STATS_PKT, /**< first pass mb statistics for this frame */ - VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */ - // Spatial SVC is still experimental and may be removed before the next ABI - // bump. +/*!\brief Encoder output packet variants + * + * This enumeration lists the different kinds of data packets that can be + * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY + * extend this list to provide additional functionality. + */ +enum vpx_codec_cx_pkt_kind { + VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */ + VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */ + VPX_CODEC_FPMB_STATS_PKT, /**< first pass mb statistics for this frame */ + VPX_CODEC_PSNR_PKT, /**< PSNR statistics for this frame */ +// Spatial SVC is still experimental and may be removed before the next ABI +// bump. #if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION) - VPX_CODEC_SPATIAL_SVC_LAYER_SIZES, /**< Sizes for each layer in this frame*/ - VPX_CODEC_SPATIAL_SVC_LAYER_PSNR, /**< PSNR for each layer in this frame*/ + VPX_CODEC_SPATIAL_SVC_LAYER_SIZES, /**< Sizes for each layer in this frame*/ + VPX_CODEC_SPATIAL_SVC_LAYER_PSNR, /**< PSNR for each layer in this frame*/ #endif - VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */ - }; + VPX_CODEC_CUSTOM_PKT = 256 /**< Algorithm extensions */ +}; - - /*!\brief Encoder output packet - * - * This structure contains the different kinds of output data the encoder - * may produce while compressing a frame. - */ - typedef struct vpx_codec_cx_pkt { - enum vpx_codec_cx_pkt_kind kind; /**< packet variant */ - union { - struct { - void *buf; /**< compressed data buffer */ - size_t sz; /**< length of compressed data */ - vpx_codec_pts_t pts; /**< time stamp to show frame - (in timebase units) */ - unsigned long duration; /**< duration to show frame - (in timebase units) */ - vpx_codec_frame_flags_t flags; /**< flags for this frame */ - int partition_id; /**< the partition id - defines the decoding order - of the partitions. Only - applicable when "output partition" - mode is enabled. First partition - has id 0.*/ - - } frame; /**< data for compressed frame packet */ - vpx_fixed_buf_t twopass_stats; /**< data for two-pass packet */ - vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */ - struct vpx_psnr_pkt { - unsigned int samples[4]; /**< Number of samples, total/y/u/v */ - uint64_t sse[4]; /**< sum squared error, total/y/u/v */ - double psnr[4]; /**< PSNR, total/y/u/v */ - } psnr; /**< data for PSNR packet */ - vpx_fixed_buf_t raw; /**< data for arbitrary packets */ - // Spatial SVC is still experimental and may be removed before the next - // ABI bump. +/*!\brief Encoder output packet + * + * This structure contains the different kinds of output data the encoder + * may produce while compressing a frame. + */ +typedef struct vpx_codec_cx_pkt { + enum vpx_codec_cx_pkt_kind kind; /**< packet variant */ + union { + struct { + void *buf; /**< compressed data buffer */ + size_t sz; /**< length of compressed data */ + /*!\brief time stamp to show frame (in timebase units) */ + vpx_codec_pts_t pts; + /*!\brief duration to show frame (in timebase units) */ + unsigned long duration; + vpx_codec_frame_flags_t flags; /**< flags for this frame */ + /*!\brief the partition id defines the decoding order of the partitions. + * Only applicable when "output partition" mode is enabled. First + * partition has id 0.*/ + int partition_id; + } frame; /**< data for compressed frame packet */ + vpx_fixed_buf_t twopass_stats; /**< data for two-pass packet */ + vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */ + struct vpx_psnr_pkt { + unsigned int samples[4]; /**< Number of samples, total/y/u/v */ + uint64_t sse[4]; /**< sum squared error, total/y/u/v */ + double psnr[4]; /**< PSNR, total/y/u/v */ + } psnr; /**< data for PSNR packet */ + vpx_fixed_buf_t raw; /**< data for arbitrary packets */ +// Spatial SVC is still experimental and may be removed before the next +// ABI bump. #if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION) - size_t layer_sizes[VPX_SS_MAX_LAYERS]; - struct vpx_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS]; + size_t layer_sizes[VPX_SS_MAX_LAYERS]; + struct vpx_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS]; #endif - /* This packet size is fixed to allow codecs to extend this - * interface without having to manage storage for raw packets, - * i.e., if it's smaller than 128 bytes, you can store in the - * packet list directly. - */ - char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */ - } data; /**< packet data */ - } vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */ + /* This packet size is fixed to allow codecs to extend this + * interface without having to manage storage for raw packets, + * i.e., if it's smaller than 128 bytes, you can store in the + * packet list directly. + */ + char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */ + } data; /**< packet data */ +} vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */ +/*!\brief Encoder return output buffer callback + * + * This callback function, when registered, returns with packets when each + * spatial layer is encoded. + */ +// putting the definitions here for now. (agrange: find if there +// is a better place for this) +typedef void (*vpx_codec_enc_output_cx_pkt_cb_fn_t)(vpx_codec_cx_pkt_t *pkt, + void *user_data); - /*!\brief Encoder return output buffer callback - * - * This callback function, when registered, returns with packets when each - * spatial layer is encoded. +/*!\brief Callback function pointer / user data pair storage */ +typedef struct vpx_codec_enc_output_cx_cb_pair { + vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */ + void *user_priv; /**< Pointer to private data */ +} vpx_codec_priv_output_cx_pkt_cb_pair_t; + +/*!\brief Rational Number + * + * This structure holds a fractional value. + */ +typedef struct vpx_rational { + int num; /**< fraction numerator */ + int den; /**< fraction denominator */ +} vpx_rational_t; /**< alias for struct vpx_rational */ + +/*!\brief Multi-pass Encoding Pass */ +enum vpx_enc_pass { + VPX_RC_ONE_PASS, /**< Single pass mode */ + VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */ + VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */ +}; + +/*!\brief Rate control mode */ +enum vpx_rc_mode { + VPX_VBR, /**< Variable Bit Rate (VBR) mode */ + VPX_CBR, /**< Constant Bit Rate (CBR) mode */ + VPX_CQ, /**< Constrained Quality (CQ) mode */ + VPX_Q, /**< Constant Quality (Q) mode */ +}; + +/*!\brief Keyframe placement mode. + * + * This enumeration determines whether keyframes are placed automatically by + * the encoder or whether this behavior is disabled. Older releases of this + * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled. + * This name is confusing for this behavior, so the new symbols to be used + * are VPX_KF_AUTO and VPX_KF_DISABLED. + */ +enum vpx_kf_mode { + VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */ + VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */ + VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */ +}; + +/*!\brief Encoded Frame Flags + * + * This type indicates a bitfield to be passed to vpx_codec_encode(), defining + * per-frame boolean values. By convention, bits common to all codecs will be + * named VPX_EFLAG_*, and bits specific to an algorithm will be named + * /algo/_eflag_*. The lower order 16 bits are reserved for common use. + */ +typedef long vpx_enc_frame_flags_t; +#define VPX_EFLAG_FORCE_KF (1 << 0) /**< Force this frame to be a keyframe */ + +/*!\brief Encoder configuration structure + * + * This structure contains the encoder settings that have common representations + * across all codecs. This doesn't imply that all codecs support all features, + * however. + */ +typedef struct vpx_codec_enc_cfg { + /* + * generic settings (g) */ - // putting the definitions here for now. (agrange: find if there - // is a better place for this) - typedef void (* vpx_codec_enc_output_cx_pkt_cb_fn_t)(vpx_codec_cx_pkt_t *pkt, - void *user_data); - /*!\brief Callback function pointer / user data pair storage */ - typedef struct vpx_codec_enc_output_cx_cb_pair { - vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */ - void *user_priv; /**< Pointer to private data */ - } vpx_codec_priv_output_cx_pkt_cb_pair_t; - - /*!\brief Rational Number + /*!\brief Algorithm specific "usage" value * - * This structure holds a fractional value. + * Algorithms may define multiple values for usage, which may convey the + * intent of how the application intends to use the stream. If this value + * is non-zero, consult the documentation for the codec to determine its + * meaning. */ - typedef struct vpx_rational { - int num; /**< fraction numerator */ - int den; /**< fraction denominator */ - } vpx_rational_t; /**< alias for struct vpx_rational */ + unsigned int g_usage; - - /*!\brief Multi-pass Encoding Pass */ - enum vpx_enc_pass { - VPX_RC_ONE_PASS, /**< Single pass mode */ - VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */ - VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */ - }; - - - /*!\brief Rate control mode */ - enum vpx_rc_mode { - VPX_VBR, /**< Variable Bit Rate (VBR) mode */ - VPX_CBR, /**< Constant Bit Rate (CBR) mode */ - VPX_CQ, /**< Constrained Quality (CQ) mode */ - VPX_Q, /**< Constant Quality (Q) mode */ - }; - - - /*!\brief Keyframe placement mode. + /*!\brief Maximum number of threads to use * - * This enumeration determines whether keyframes are placed automatically by - * the encoder or whether this behavior is disabled. Older releases of this - * SDK were implemented such that VPX_KF_FIXED meant keyframes were disabled. - * This name is confusing for this behavior, so the new symbols to be used - * are VPX_KF_AUTO and VPX_KF_DISABLED. + * For multi-threaded implementations, use no more than this number of + * threads. The codec may use fewer threads than allowed. The value + * 0 is equivalent to the value 1. */ - enum vpx_kf_mode { - VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */ - VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */ - VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */ - }; + unsigned int g_threads; - - /*!\brief Encoded Frame Flags + /*!\brief Bitstream profile to use * - * This type indicates a bitfield to be passed to vpx_codec_encode(), defining - * per-frame boolean values. By convention, bits common to all codecs will be - * named VPX_EFLAG_*, and bits specific to an algorithm will be named - * /algo/_eflag_*. The lower order 16 bits are reserved for common use. + * Some codecs support a notion of multiple bitstream profiles. Typically + * this maps to a set of features that are turned on or off. Often the + * profile to use is determined by the features of the intended decoder. + * Consult the documentation for the codec to determine the valid values + * for this parameter, or set to zero for a sane default. */ - typedef long vpx_enc_frame_flags_t; -#define VPX_EFLAG_FORCE_KF (1<<0) /**< Force this frame to be a keyframe */ + unsigned int g_profile; /**< profile of bitstream to use */ - - /*!\brief Encoder configuration structure + /*!\brief Width of the frame * - * This structure contains the encoder settings that have common representations - * across all codecs. This doesn't imply that all codecs support all features, - * however. + * This value identifies the presentation resolution of the frame, + * in pixels. Note that the frames passed as input to the encoder must + * have this resolution. Frames will be presented by the decoder in this + * resolution, independent of any spatial resampling the encoder may do. */ - typedef struct vpx_codec_enc_cfg { - /* - * generic settings (g) - */ + unsigned int g_w; - /*!\brief Algorithm specific "usage" value - * - * Algorithms may define multiple values for usage, which may convey the - * intent of how the application intends to use the stream. If this value - * is non-zero, consult the documentation for the codec to determine its - * meaning. - */ - unsigned int g_usage; - - - /*!\brief Maximum number of threads to use - * - * For multi-threaded implementations, use no more than this number of - * threads. The codec may use fewer threads than allowed. The value - * 0 is equivalent to the value 1. - */ - unsigned int g_threads; - - - /*!\brief Bitstream profile to use - * - * Some codecs support a notion of multiple bitstream profiles. Typically - * this maps to a set of features that are turned on or off. Often the - * profile to use is determined by the features of the intended decoder. - * Consult the documentation for the codec to determine the valid values - * for this parameter, or set to zero for a sane default. - */ - unsigned int g_profile; /**< profile of bitstream to use */ - - - - /*!\brief Width of the frame - * - * This value identifies the presentation resolution of the frame, - * in pixels. Note that the frames passed as input to the encoder must - * have this resolution. Frames will be presented by the decoder in this - * resolution, independent of any spatial resampling the encoder may do. - */ - unsigned int g_w; - - - /*!\brief Height of the frame - * - * This value identifies the presentation resolution of the frame, - * in pixels. Note that the frames passed as input to the encoder must - * have this resolution. Frames will be presented by the decoder in this - * resolution, independent of any spatial resampling the encoder may do. - */ - unsigned int g_h; - - /*!\brief Bit-depth of the codec - * - * This value identifies the bit_depth of the codec, - * Only certain bit-depths are supported as identified in the - * vpx_bit_depth_t enum. - */ - vpx_bit_depth_t g_bit_depth; - - /*!\brief Bit-depth of the input frames - * - * This value identifies the bit_depth of the input frames in bits. - * Note that the frames passed as input to the encoder must have - * this bit-depth. - */ - unsigned int g_input_bit_depth; - - /*!\brief Stream timebase units - * - * Indicates the smallest interval of time, in seconds, used by the stream. - * For fixed frame rate material, or variable frame rate material where - * frames are timed at a multiple of a given clock (ex: video capture), - * the \ref RECOMMENDED method is to set the timebase to the reciprocal - * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the - * pts to correspond to the frame number, which can be handy. For - * re-encoding video from containers with absolute time timestamps, the - * \ref RECOMMENDED method is to set the timebase to that of the parent - * container or multimedia framework (ex: 1/1000 for ms, as in FLV). - */ - struct vpx_rational g_timebase; - - - /*!\brief Enable error resilient modes. - * - * The error resilient bitfield indicates to the encoder which features - * it should enable to take measures for streaming over lossy or noisy - * links. - */ - vpx_codec_er_flags_t g_error_resilient; - - - /*!\brief Multi-pass Encoding Mode - * - * This value should be set to the current phase for multi-pass encoding. - * For single pass, set to #VPX_RC_ONE_PASS. - */ - enum vpx_enc_pass g_pass; - - - /*!\brief Allow lagged encoding - * - * If set, this value allows the encoder to consume a number of input - * frames before producing output frames. This allows the encoder to - * base decisions for the current frame on future frames. This does - * increase the latency of the encoding pipeline, so it is not appropriate - * in all situations (ex: realtime encoding). - * - * Note that this is a maximum value -- the encoder may produce frames - * sooner than the given limit. Set this value to 0 to disable this - * feature. - */ - unsigned int g_lag_in_frames; - - - /* - * rate control settings (rc) - */ - - /*!\brief Temporal resampling configuration, if supported by the codec. - * - * Temporal resampling allows the codec to "drop" frames as a strategy to - * meet its target data rate. This can cause temporal discontinuities in - * the encoded video, which may appear as stuttering during playback. This - * trade-off is often acceptable, but for many applications is not. It can - * be disabled in these cases. - * - * Note that not all codecs support this feature. All vpx VPx codecs do. - * For other codecs, consult the documentation for that algorithm. - * - * This threshold is described as a percentage of the target data buffer. - * When the data buffer falls below this percentage of fullness, a - * dropped frame is indicated. Set the threshold to zero (0) to disable - * this feature. - */ - unsigned int rc_dropframe_thresh; - - - /*!\brief Enable/disable spatial resampling, if supported by the codec. - * - * Spatial resampling allows the codec to compress a lower resolution - * version of the frame, which is then upscaled by the encoder to the - * correct presentation resolution. This increases visual quality at - * low data rates, at the expense of CPU time on the encoder/decoder. - */ - unsigned int rc_resize_allowed; - - /*!\brief Internal coded frame width. - * - * If spatial resampling is enabled this specifies the width of the - * encoded frame. - */ - unsigned int rc_scaled_width; - - /*!\brief Internal coded frame height. - * - * If spatial resampling is enabled this specifies the height of the - * encoded frame. - */ - unsigned int rc_scaled_height; - - /*!\brief Spatial resampling up watermark. - * - * This threshold is described as a percentage of the target data buffer. - * When the data buffer rises above this percentage of fullness, the - * encoder will step up to a higher resolution version of the frame. - */ - unsigned int rc_resize_up_thresh; - - - /*!\brief Spatial resampling down watermark. - * - * This threshold is described as a percentage of the target data buffer. - * When the data buffer falls below this percentage of fullness, the - * encoder will step down to a lower resolution version of the frame. - */ - unsigned int rc_resize_down_thresh; - - - /*!\brief Rate control algorithm to use. - * - * Indicates whether the end usage of this stream is to be streamed over - * a bandwidth constrained link, indicating that Constant Bit Rate (CBR) - * mode should be used, or whether it will be played back on a high - * bandwidth link, as from a local disk, where higher variations in - * bitrate are acceptable. - */ - enum vpx_rc_mode rc_end_usage; - - - /*!\brief Two-pass stats buffer. - * - * A buffer containing all of the stats packets produced in the first - * pass, concatenated. - */ - vpx_fixed_buf_t rc_twopass_stats_in; - - /*!\brief first pass mb stats buffer. - * - * A buffer containing all of the first pass mb stats packets produced - * in the first pass, concatenated. - */ - vpx_fixed_buf_t rc_firstpass_mb_stats_in; - - /*!\brief Target data rate - * - * Target bandwidth to use for this stream, in kilobits per second. - */ - unsigned int rc_target_bitrate; - - - /* - * quantizer settings - */ - - - /*!\brief Minimum (Best Quality) Quantizer - * - * The quantizer is the most direct control over the quality of the - * encoded image. The range of valid values for the quantizer is codec - * specific. Consult the documentation for the codec to determine the - * values to use. To determine the range programmatically, call - * vpx_codec_enc_config_default() with a usage value of 0. - */ - unsigned int rc_min_quantizer; - - - /*!\brief Maximum (Worst Quality) Quantizer - * - * The quantizer is the most direct control over the quality of the - * encoded image. The range of valid values for the quantizer is codec - * specific. Consult the documentation for the codec to determine the - * values to use. To determine the range programmatically, call - * vpx_codec_enc_config_default() with a usage value of 0. - */ - unsigned int rc_max_quantizer; - - - /* - * bitrate tolerance - */ - - - /*!\brief Rate control adaptation undershoot control - * - * This value, expressed as a percentage of the target bitrate, - * controls the maximum allowed adaptation speed of the codec. - * This factor controls the maximum amount of bits that can - * be subtracted from the target bitrate in order to compensate - * for prior overshoot. - * - * Valid values in the range 0-1000. - */ - unsigned int rc_undershoot_pct; - - - /*!\brief Rate control adaptation overshoot control - * - * This value, expressed as a percentage of the target bitrate, - * controls the maximum allowed adaptation speed of the codec. - * This factor controls the maximum amount of bits that can - * be added to the target bitrate in order to compensate for - * prior undershoot. - * - * Valid values in the range 0-1000. - */ - unsigned int rc_overshoot_pct; - - - /* - * decoder buffer model parameters - */ - - - /*!\brief Decoder Buffer Size - * - * This value indicates the amount of data that may be buffered by the - * decoding application. Note that this value is expressed in units of - * time (milliseconds). For example, a value of 5000 indicates that the - * client will buffer (at least) 5000ms worth of encoded data. Use the - * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if - * necessary. - */ - unsigned int rc_buf_sz; - - - /*!\brief Decoder Buffer Initial Size - * - * This value indicates the amount of data that will be buffered by the - * decoding application prior to beginning playback. This value is - * expressed in units of time (milliseconds). Use the target bitrate - * (#rc_target_bitrate) to convert to bits/bytes, if necessary. - */ - unsigned int rc_buf_initial_sz; - - - /*!\brief Decoder Buffer Optimal Size - * - * This value indicates the amount of data that the encoder should try - * to maintain in the decoder's buffer. This value is expressed in units - * of time (milliseconds). Use the target bitrate (#rc_target_bitrate) - * to convert to bits/bytes, if necessary. - */ - unsigned int rc_buf_optimal_sz; - - - /* - * 2 pass rate control parameters - */ - - - /*!\brief Two-pass mode CBR/VBR bias - * - * Bias, expressed on a scale of 0 to 100, for determining target size - * for the current frame. The value 0 indicates the optimal CBR mode - * value should be used. The value 100 indicates the optimal VBR mode - * value should be used. Values in between indicate which way the - * encoder should "lean." - */ - unsigned int rc_2pass_vbr_bias_pct; /**< RC mode bias between CBR and VBR(0-100: 0->CBR, 100->VBR) */ - - - /*!\brief Two-pass mode per-GOP minimum bitrate - * - * This value, expressed as a percentage of the target bitrate, indicates - * the minimum bitrate to be used for a single GOP (aka "section") - */ - unsigned int rc_2pass_vbr_minsection_pct; - - - /*!\brief Two-pass mode per-GOP maximum bitrate - * - * This value, expressed as a percentage of the target bitrate, indicates - * the maximum bitrate to be used for a single GOP (aka "section") - */ - unsigned int rc_2pass_vbr_maxsection_pct; - - - /* - * keyframing settings (kf) - */ - - /*!\brief Keyframe placement mode - * - * This value indicates whether the encoder should place keyframes at a - * fixed interval, or determine the optimal placement automatically - * (as governed by the #kf_min_dist and #kf_max_dist parameters) - */ - enum vpx_kf_mode kf_mode; - - - /*!\brief Keyframe minimum interval - * - * This value, expressed as a number of frames, prevents the encoder from - * placing a keyframe nearer than kf_min_dist to the previous keyframe. At - * least kf_min_dist frames non-keyframes will be coded before the next - * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval. - */ - unsigned int kf_min_dist; - - - /*!\brief Keyframe maximum interval - * - * This value, expressed as a number of frames, forces the encoder to code - * a keyframe if one has not been coded in the last kf_max_dist frames. - * A value of 0 implies all frames will be keyframes. Set kf_min_dist - * equal to kf_max_dist for a fixed interval. - */ - unsigned int kf_max_dist; - - /* - * Spatial scalability settings (ss) - */ - - /*!\brief Number of spatial coding layers. - * - * This value specifies the number of spatial coding layers to be used. - */ - unsigned int ss_number_layers; - - /*!\brief Enable auto alt reference flags for each spatial layer. - * - * These values specify if auto alt reference frame is enabled for each - * spatial layer. - */ - int ss_enable_auto_alt_ref[VPX_SS_MAX_LAYERS]; - - /*!\brief Target bitrate for each spatial layer. - * - * These values specify the target coding bitrate to be used for each - * spatial layer. - */ - unsigned int ss_target_bitrate[VPX_SS_MAX_LAYERS]; - - /*!\brief Number of temporal coding layers. - * - * This value specifies the number of temporal layers to be used. - */ - unsigned int ts_number_layers; - - /*!\brief Target bitrate for each temporal layer. - * - * These values specify the target coding bitrate to be used for each - * temporal layer. - */ - unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS]; - - /*!\brief Frame rate decimation factor for each temporal layer. - * - * These values specify the frame rate decimation factors to apply - * to each temporal layer. - */ - unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS]; - - /*!\brief Length of the sequence defining frame temporal layer membership. - * - * This value specifies the length of the sequence that defines the - * membership of frames to temporal layers. For example, if the - * ts_periodicity = 8, then the frames are assigned to coding layers with a - * repeated sequence of length 8. - */ - unsigned int ts_periodicity; - - /*!\brief Template defining the membership of frames to temporal layers. - * - * This array defines the membership of frames to temporal coding layers. - * For a 2-layer encoding that assigns even numbered frames to one temporal - * layer (0) and odd numbered frames to a second temporal layer (1) with - * ts_periodicity=8, then ts_layer_id = (0,1,0,1,0,1,0,1). - */ - unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY]; - - /*!\brief Target bitrate for each spatial/temporal layer. - * - * These values specify the target coding bitrate to be used for each - * spatial/temporal layer. - * - */ - unsigned int layer_target_bitrate[VPX_MAX_LAYERS]; - - /*!\brief Temporal layering mode indicating which temporal layering scheme to use. - * - * The value (refer to VP9E_TEMPORAL_LAYERING_MODE) specifies the - * temporal layering mode to use. - * - */ - int temporal_layering_mode; - } vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */ - - /*!\brief vp9 svc extra configure parameters + /*!\brief Height of the frame * - * This defines max/min quantizers and scale factors for each layer + * This value identifies the presentation resolution of the frame, + * in pixels. Note that the frames passed as input to the encoder must + * have this resolution. Frames will be presented by the decoder in this + * resolution, independent of any spatial resampling the encoder may do. + */ + unsigned int g_h; + + /*!\brief Bit-depth of the codec + * + * This value identifies the bit_depth of the codec, + * Only certain bit-depths are supported as identified in the + * vpx_bit_depth_t enum. + */ + vpx_bit_depth_t g_bit_depth; + + /*!\brief Bit-depth of the input frames + * + * This value identifies the bit_depth of the input frames in bits. + * Note that the frames passed as input to the encoder must have + * this bit-depth. + */ + unsigned int g_input_bit_depth; + + /*!\brief Stream timebase units + * + * Indicates the smallest interval of time, in seconds, used by the stream. + * For fixed frame rate material, or variable frame rate material where + * frames are timed at a multiple of a given clock (ex: video capture), + * the \ref RECOMMENDED method is to set the timebase to the reciprocal + * of the frame rate (ex: 1001/30000 for 29.970 Hz NTSC). This allows the + * pts to correspond to the frame number, which can be handy. For + * re-encoding video from containers with absolute time timestamps, the + * \ref RECOMMENDED method is to set the timebase to that of the parent + * container or multimedia framework (ex: 1/1000 for ms, as in FLV). + */ + struct vpx_rational g_timebase; + + /*!\brief Enable error resilient modes. + * + * The error resilient bitfield indicates to the encoder which features + * it should enable to take measures for streaming over lossy or noisy + * links. + */ + vpx_codec_er_flags_t g_error_resilient; + + /*!\brief Multi-pass Encoding Mode + * + * This value should be set to the current phase for multi-pass encoding. + * For single pass, set to #VPX_RC_ONE_PASS. + */ + enum vpx_enc_pass g_pass; + + /*!\brief Allow lagged encoding + * + * If set, this value allows the encoder to consume a number of input + * frames before producing output frames. This allows the encoder to + * base decisions for the current frame on future frames. This does + * increase the latency of the encoding pipeline, so it is not appropriate + * in all situations (ex: realtime encoding). + * + * Note that this is a maximum value -- the encoder may produce frames + * sooner than the given limit. Set this value to 0 to disable this + * feature. + */ + unsigned int g_lag_in_frames; + + /* + * rate control settings (rc) + */ + + /*!\brief Temporal resampling configuration, if supported by the codec. + * + * Temporal resampling allows the codec to "drop" frames as a strategy to + * meet its target data rate. This can cause temporal discontinuities in + * the encoded video, which may appear as stuttering during playback. This + * trade-off is often acceptable, but for many applications is not. It can + * be disabled in these cases. + * + * Note that not all codecs support this feature. All vpx VPx codecs do. + * For other codecs, consult the documentation for that algorithm. + * + * This threshold is described as a percentage of the target data buffer. + * When the data buffer falls below this percentage of fullness, a + * dropped frame is indicated. Set the threshold to zero (0) to disable + * this feature. + */ + unsigned int rc_dropframe_thresh; + + /*!\brief Enable/disable spatial resampling, if supported by the codec. + * + * Spatial resampling allows the codec to compress a lower resolution + * version of the frame, which is then upscaled by the encoder to the + * correct presentation resolution. This increases visual quality at + * low data rates, at the expense of CPU time on the encoder/decoder. + */ + unsigned int rc_resize_allowed; + + /*!\brief Internal coded frame width. + * + * If spatial resampling is enabled this specifies the width of the + * encoded frame. + */ + unsigned int rc_scaled_width; + + /*!\brief Internal coded frame height. + * + * If spatial resampling is enabled this specifies the height of the + * encoded frame. + */ + unsigned int rc_scaled_height; + + /*!\brief Spatial resampling up watermark. + * + * This threshold is described as a percentage of the target data buffer. + * When the data buffer rises above this percentage of fullness, the + * encoder will step up to a higher resolution version of the frame. + */ + unsigned int rc_resize_up_thresh; + + /*!\brief Spatial resampling down watermark. + * + * This threshold is described as a percentage of the target data buffer. + * When the data buffer falls below this percentage of fullness, the + * encoder will step down to a lower resolution version of the frame. + */ + unsigned int rc_resize_down_thresh; + + /*!\brief Rate control algorithm to use. + * + * Indicates whether the end usage of this stream is to be streamed over + * a bandwidth constrained link, indicating that Constant Bit Rate (CBR) + * mode should be used, or whether it will be played back on a high + * bandwidth link, as from a local disk, where higher variations in + * bitrate are acceptable. + */ + enum vpx_rc_mode rc_end_usage; + + /*!\brief Two-pass stats buffer. + * + * A buffer containing all of the stats packets produced in the first + * pass, concatenated. + */ + vpx_fixed_buf_t rc_twopass_stats_in; + + /*!\brief first pass mb stats buffer. + * + * A buffer containing all of the first pass mb stats packets produced + * in the first pass, concatenated. + */ + vpx_fixed_buf_t rc_firstpass_mb_stats_in; + + /*!\brief Target data rate + * + * Target bandwidth to use for this stream, in kilobits per second. + */ + unsigned int rc_target_bitrate; + + /* + * quantizer settings + */ + + /*!\brief Minimum (Best Quality) Quantizer + * + * The quantizer is the most direct control over the quality of the + * encoded image. The range of valid values for the quantizer is codec + * specific. Consult the documentation for the codec to determine the + * values to use. To determine the range programmatically, call + * vpx_codec_enc_config_default() with a usage value of 0. + */ + unsigned int rc_min_quantizer; + + /*!\brief Maximum (Worst Quality) Quantizer + * + * The quantizer is the most direct control over the quality of the + * encoded image. The range of valid values for the quantizer is codec + * specific. Consult the documentation for the codec to determine the + * values to use. To determine the range programmatically, call + * vpx_codec_enc_config_default() with a usage value of 0. + */ + unsigned int rc_max_quantizer; + + /* + * bitrate tolerance + */ + + /*!\brief Rate control adaptation undershoot control + * + * This value, expressed as a percentage of the target bitrate, + * controls the maximum allowed adaptation speed of the codec. + * This factor controls the maximum amount of bits that can + * be subtracted from the target bitrate in order to compensate + * for prior overshoot. + * + * Valid values in the range 0-1000. + */ + unsigned int rc_undershoot_pct; + + /*!\brief Rate control adaptation overshoot control + * + * This value, expressed as a percentage of the target bitrate, + * controls the maximum allowed adaptation speed of the codec. + * This factor controls the maximum amount of bits that can + * be added to the target bitrate in order to compensate for + * prior undershoot. + * + * Valid values in the range 0-1000. + */ + unsigned int rc_overshoot_pct; + + /* + * decoder buffer model parameters + */ + + /*!\brief Decoder Buffer Size + * + * This value indicates the amount of data that may be buffered by the + * decoding application. Note that this value is expressed in units of + * time (milliseconds). For example, a value of 5000 indicates that the + * client will buffer (at least) 5000ms worth of encoded data. Use the + * target bitrate (#rc_target_bitrate) to convert to bits/bytes, if + * necessary. + */ + unsigned int rc_buf_sz; + + /*!\brief Decoder Buffer Initial Size + * + * This value indicates the amount of data that will be buffered by the + * decoding application prior to beginning playback. This value is + * expressed in units of time (milliseconds). Use the target bitrate + * (#rc_target_bitrate) to convert to bits/bytes, if necessary. + */ + unsigned int rc_buf_initial_sz; + + /*!\brief Decoder Buffer Optimal Size + * + * This value indicates the amount of data that the encoder should try + * to maintain in the decoder's buffer. This value is expressed in units + * of time (milliseconds). Use the target bitrate (#rc_target_bitrate) + * to convert to bits/bytes, if necessary. + */ + unsigned int rc_buf_optimal_sz; + + /* + * 2 pass rate control parameters + */ + + /*!\brief Two-pass mode CBR/VBR bias + * + * Bias, expressed on a scale of 0 to 100, for determining target size + * for the current frame. The value 0 indicates the optimal CBR mode + * value should be used. The value 100 indicates the optimal VBR mode + * value should be used. Values in between indicate which way the + * encoder should "lean." + */ + unsigned int rc_2pass_vbr_bias_pct; + + /*!\brief Two-pass mode per-GOP minimum bitrate + * + * This value, expressed as a percentage of the target bitrate, indicates + * the minimum bitrate to be used for a single GOP (aka "section") + */ + unsigned int rc_2pass_vbr_minsection_pct; + + /*!\brief Two-pass mode per-GOP maximum bitrate + * + * This value, expressed as a percentage of the target bitrate, indicates + * the maximum bitrate to be used for a single GOP (aka "section") + */ + unsigned int rc_2pass_vbr_maxsection_pct; + + /* + * keyframing settings (kf) + */ + + /*!\brief Keyframe placement mode + * + * This value indicates whether the encoder should place keyframes at a + * fixed interval, or determine the optimal placement automatically + * (as governed by the #kf_min_dist and #kf_max_dist parameters) + */ + enum vpx_kf_mode kf_mode; + + /*!\brief Keyframe minimum interval + * + * This value, expressed as a number of frames, prevents the encoder from + * placing a keyframe nearer than kf_min_dist to the previous keyframe. At + * least kf_min_dist frames non-keyframes will be coded before the next + * keyframe. Set kf_min_dist equal to kf_max_dist for a fixed interval. + */ + unsigned int kf_min_dist; + + /*!\brief Keyframe maximum interval + * + * This value, expressed as a number of frames, forces the encoder to code + * a keyframe if one has not been coded in the last kf_max_dist frames. + * A value of 0 implies all frames will be keyframes. Set kf_min_dist + * equal to kf_max_dist for a fixed interval. + */ + unsigned int kf_max_dist; + + /* + * Spatial scalability settings (ss) + */ + + /*!\brief Number of spatial coding layers. + * + * This value specifies the number of spatial coding layers to be used. + */ + unsigned int ss_number_layers; + + /*!\brief Enable auto alt reference flags for each spatial layer. + * + * These values specify if auto alt reference frame is enabled for each + * spatial layer. + */ + int ss_enable_auto_alt_ref[VPX_SS_MAX_LAYERS]; + + /*!\brief Target bitrate for each spatial layer. + * + * These values specify the target coding bitrate to be used for each + * spatial layer. + */ + unsigned int ss_target_bitrate[VPX_SS_MAX_LAYERS]; + + /*!\brief Number of temporal coding layers. + * + * This value specifies the number of temporal layers to be used. + */ + unsigned int ts_number_layers; + + /*!\brief Target bitrate for each temporal layer. + * + * These values specify the target coding bitrate to be used for each + * temporal layer. + */ + unsigned int ts_target_bitrate[VPX_TS_MAX_LAYERS]; + + /*!\brief Frame rate decimation factor for each temporal layer. + * + * These values specify the frame rate decimation factors to apply + * to each temporal layer. + */ + unsigned int ts_rate_decimator[VPX_TS_MAX_LAYERS]; + + /*!\brief Length of the sequence defining frame temporal layer membership. + * + * This value specifies the length of the sequence that defines the + * membership of frames to temporal layers. For example, if the + * ts_periodicity = 8, then the frames are assigned to coding layers with a + * repeated sequence of length 8. + */ + unsigned int ts_periodicity; + + /*!\brief Template defining the membership of frames to temporal layers. + * + * This array defines the membership of frames to temporal coding layers. + * For a 2-layer encoding that assigns even numbered frames to one temporal + * layer (0) and odd numbered frames to a second temporal layer (1) with + * ts_periodicity=8, then ts_layer_id = (0,1,0,1,0,1,0,1). + */ + unsigned int ts_layer_id[VPX_TS_MAX_PERIODICITY]; + + /*!\brief Target bitrate for each spatial/temporal layer. + * + * These values specify the target coding bitrate to be used for each + * spatial/temporal layer. * */ - typedef struct vpx_svc_parameters { - int max_quantizers[VPX_MAX_LAYERS]; /**< Max Q for each layer */ - int min_quantizers[VPX_MAX_LAYERS]; /**< Min Q for each layer */ - int scaling_factor_num[VPX_MAX_LAYERS]; /**< Scaling factor-numerator */ - int scaling_factor_den[VPX_MAX_LAYERS]; /**< Scaling factor-denominator */ - int temporal_layering_mode; /**< Temporal layering mode */ - } vpx_svc_extra_cfg_t; + unsigned int layer_target_bitrate[VPX_MAX_LAYERS]; - - /*!\brief Initialize an encoder instance + /*!\brief Temporal layering mode indicating which temporal layering scheme to + * use. * - * Initializes a encoder context using the given interface. Applications - * should call the vpx_codec_enc_init convenience macro instead of this - * function directly, to ensure that the ABI version number parameter - * is properly initialized. + * The value (refer to VP9E_TEMPORAL_LAYERING_MODE) specifies the + * temporal layering mode to use. * - * If the library was configured with --disable-multithread, this call - * is not thread safe and should be guarded with a lock if being used - * in a multithreaded context. - * - * \param[in] ctx Pointer to this instance's context. - * \param[in] iface Pointer to the algorithm interface to use. - * \param[in] cfg Configuration to use, if known. May be NULL. - * \param[in] flags Bitfield of VPX_CODEC_USE_* flags - * \param[in] ver ABI version number. Must be set to - * VPX_ENCODER_ABI_VERSION - * \retval #VPX_CODEC_OK - * The decoder algorithm initialized. - * \retval #VPX_CODEC_MEM_ERROR - * Memory allocation failed. */ - vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_enc_cfg_t *cfg, - vpx_codec_flags_t flags, - int ver); + int temporal_layering_mode; +} vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */ +/*!\brief vp9 svc extra configure parameters + * + * This defines max/min quantizers and scale factors for each layer + * + */ +typedef struct vpx_svc_parameters { + int max_quantizers[VPX_MAX_LAYERS]; /**< Max Q for each layer */ + int min_quantizers[VPX_MAX_LAYERS]; /**< Min Q for each layer */ + int scaling_factor_num[VPX_MAX_LAYERS]; /**< Scaling factor-numerator */ + int scaling_factor_den[VPX_MAX_LAYERS]; /**< Scaling factor-denominator */ + int speed_per_layer[VPX_MAX_LAYERS]; /**< Speed setting for each sl */ + int temporal_layering_mode; /**< Temporal layering mode */ +} vpx_svc_extra_cfg_t; - /*!\brief Convenience macro for vpx_codec_enc_init_ver() - * - * Ensures the ABI version parameter is properly set. - */ +/*!\brief Initialize an encoder instance + * + * Initializes a encoder context using the given interface. Applications + * should call the vpx_codec_enc_init convenience macro instead of this + * function directly, to ensure that the ABI version number parameter + * is properly initialized. + * + * If the library was configured with --disable-multithread, this call + * is not thread safe and should be guarded with a lock if being used + * in a multithreaded context. + * + * \param[in] ctx Pointer to this instance's context. + * \param[in] iface Pointer to the algorithm interface to use. + * \param[in] cfg Configuration to use, if known. May be NULL. + * \param[in] flags Bitfield of VPX_CODEC_USE_* flags + * \param[in] ver ABI version number. Must be set to + * VPX_ENCODER_ABI_VERSION + * \retval #VPX_CODEC_OK + * The decoder algorithm initialized. + * \retval #VPX_CODEC_MEM_ERROR + * Memory allocation failed. + */ +vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, + vpx_codec_iface_t *iface, + const vpx_codec_enc_cfg_t *cfg, + vpx_codec_flags_t flags, int ver); + +/*!\brief Convenience macro for vpx_codec_enc_init_ver() + * + * Ensures the ABI version parameter is properly set. + */ #define vpx_codec_enc_init(ctx, iface, cfg, flags) \ vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION) +/*!\brief Initialize multi-encoder instance + * + * Initializes multi-encoder context using the given interface. + * Applications should call the vpx_codec_enc_init_multi convenience macro + * instead of this function directly, to ensure that the ABI version number + * parameter is properly initialized. + * + * \param[in] ctx Pointer to this instance's context. + * \param[in] iface Pointer to the algorithm interface to use. + * \param[in] cfg Configuration to use, if known. May be NULL. + * \param[in] num_enc Total number of encoders. + * \param[in] flags Bitfield of VPX_CODEC_USE_* flags + * \param[in] dsf Pointer to down-sampling factors. + * \param[in] ver ABI version number. Must be set to + * VPX_ENCODER_ABI_VERSION + * \retval #VPX_CODEC_OK + * The decoder algorithm initialized. + * \retval #VPX_CODEC_MEM_ERROR + * Memory allocation failed. + */ +vpx_codec_err_t vpx_codec_enc_init_multi_ver( + vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg, + int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver); - /*!\brief Initialize multi-encoder instance - * - * Initializes multi-encoder context using the given interface. - * Applications should call the vpx_codec_enc_init_multi convenience macro - * instead of this function directly, to ensure that the ABI version number - * parameter is properly initialized. - * - * \param[in] ctx Pointer to this instance's context. - * \param[in] iface Pointer to the algorithm interface to use. - * \param[in] cfg Configuration to use, if known. May be NULL. - * \param[in] num_enc Total number of encoders. - * \param[in] flags Bitfield of VPX_CODEC_USE_* flags - * \param[in] dsf Pointer to down-sampling factors. - * \param[in] ver ABI version number. Must be set to - * VPX_ENCODER_ABI_VERSION - * \retval #VPX_CODEC_OK - * The decoder algorithm initialized. - * \retval #VPX_CODEC_MEM_ERROR - * Memory allocation failed. - */ - vpx_codec_err_t vpx_codec_enc_init_multi_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, - int num_enc, - vpx_codec_flags_t flags, - vpx_rational_t *dsf, - int ver); - - - /*!\brief Convenience macro for vpx_codec_enc_init_multi_ver() - * - * Ensures the ABI version parameter is properly set. - */ +/*!\brief Convenience macro for vpx_codec_enc_init_multi_ver() + * + * Ensures the ABI version parameter is properly set. + */ #define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \ - vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \ + vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \ VPX_ENCODER_ABI_VERSION) +/*!\brief Get a default configuration + * + * Initializes a encoder configuration structure with default values. Supports + * the notion of "usages" so that an algorithm may offer different default + * settings depending on the user's intended goal. This function \ref SHOULD + * be called by all applications to initialize the configuration structure + * before specializing the configuration with application specific values. + * + * \param[in] iface Pointer to the algorithm interface to use. + * \param[out] cfg Configuration buffer to populate. + * \param[in] reserved Must set to 0 for VP8 and VP9. + * + * \retval #VPX_CODEC_OK + * The configuration was populated. + * \retval #VPX_CODEC_INCAPABLE + * Interface is not an encoder interface. + * \retval #VPX_CODEC_INVALID_PARAM + * A parameter was NULL, or the usage value was not recognized. + */ +vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, + vpx_codec_enc_cfg_t *cfg, + unsigned int reserved); - /*!\brief Get a default configuration - * - * Initializes a encoder configuration structure with default values. Supports - * the notion of "usages" so that an algorithm may offer different default - * settings depending on the user's intended goal. This function \ref SHOULD - * be called by all applications to initialize the configuration structure - * before specializing the configuration with application specific values. - * - * \param[in] iface Pointer to the algorithm interface to use. - * \param[out] cfg Configuration buffer to populate. - * \param[in] reserved Must set to 0 for VP8 and VP9. - * - * \retval #VPX_CODEC_OK - * The configuration was populated. - * \retval #VPX_CODEC_INCAPABLE - * Interface is not an encoder interface. - * \retval #VPX_CODEC_INVALID_PARAM - * A parameter was NULL, or the usage value was not recognized. - */ - vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, - unsigned int reserved); +/*!\brief Set or change configuration + * + * Reconfigures an encoder instance according to the given configuration. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] cfg Configuration buffer to use + * + * \retval #VPX_CODEC_OK + * The configuration was populated. + * \retval #VPX_CODEC_INCAPABLE + * Interface is not an encoder interface. + * \retval #VPX_CODEC_INVALID_PARAM + * A parameter was NULL, or the usage value was not recognized. + */ +vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, + const vpx_codec_enc_cfg_t *cfg); +/*!\brief Get global stream headers + * + * Retrieves a stream level global header packet, if supported by the codec. + * + * \param[in] ctx Pointer to this instance's context + * + * \retval NULL + * Encoder does not support global header + * \retval Non-NULL + * Pointer to buffer containing global header packet + */ +vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx); - /*!\brief Set or change configuration - * - * Reconfigures an encoder instance according to the given configuration. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] cfg Configuration buffer to use - * - * \retval #VPX_CODEC_OK - * The configuration was populated. - * \retval #VPX_CODEC_INCAPABLE - * Interface is not an encoder interface. - * \retval #VPX_CODEC_INVALID_PARAM - * A parameter was NULL, or the usage value was not recognized. - */ - vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, - const vpx_codec_enc_cfg_t *cfg); +/*!\brief deadline parameter analogous to VPx REALTIME mode. */ +#define VPX_DL_REALTIME (1) +/*!\brief deadline parameter analogous to VPx GOOD QUALITY mode. */ +#define VPX_DL_GOOD_QUALITY (1000000) +/*!\brief deadline parameter analogous to VPx BEST QUALITY mode. */ +#define VPX_DL_BEST_QUALITY (0) +/*!\brief Encode a frame + * + * Encodes a video frame at the given "presentation time." The presentation + * time stamp (PTS) \ref MUST be strictly increasing. + * + * The encoder supports the notion of a soft real-time deadline. Given a + * non-zero value to the deadline parameter, the encoder will make a "best + * effort" guarantee to return before the given time slice expires. It is + * implicit that limiting the available time to encode will degrade the + * output quality. The encoder can be given an unlimited time to produce the + * best possible frame by specifying a deadline of '0'. This deadline + * supercedes the VPx notion of "best quality, good quality, realtime". + * Applications that wish to map these former settings to the new deadline + * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY, + * and #VPX_DL_BEST_QUALITY. + * + * When the last frame has been passed to the encoder, this function should + * continue to be called, with the img parameter set to NULL. This will + * signal the end-of-stream condition to the encoder and allow it to encode + * any held buffers. Encoding is complete when vpx_codec_encode() is called + * and vpx_codec_get_cx_data() returns no data. + * + * \param[in] ctx Pointer to this instance's context + * \param[in] img Image data to encode, NULL to flush. + * \param[in] pts Presentation time stamp, in timebase units. + * \param[in] duration Duration to show frame, in timebase units. + * \param[in] flags Flags to use for encoding this frame. + * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite) + * + * \retval #VPX_CODEC_OK + * The configuration was populated. + * \retval #VPX_CODEC_INCAPABLE + * Interface is not an encoder interface. + * \retval #VPX_CODEC_INVALID_PARAM + * A parameter was NULL, the image format is unsupported, etc. + */ +vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, + vpx_codec_pts_t pts, unsigned long duration, + vpx_enc_frame_flags_t flags, + unsigned long deadline); +/*!\brief Set compressed data output buffer + * + * Sets the buffer that the codec should output the compressed data + * into. This call effectively sets the buffer pointer returned in the + * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be + * appended into this buffer. The buffer is preserved across frames, + * so applications must periodically call this function after flushing + * the accumulated compressed data to disk or to the network to reset + * the pointer to the buffer's head. + * + * `pad_before` bytes will be skipped before writing the compressed + * data, and `pad_after` bytes will be appended to the packet. The size + * of the packet will be the sum of the size of the actual compressed + * data, pad_before, and pad_after. The padding bytes will be preserved + * (not overwritten). + * + * Note that calling this function does not guarantee that the returned + * compressed data will be placed into the specified buffer. In the + * event that the encoded data will not fit into the buffer provided, + * the returned packet \ref MAY point to an internal buffer, as it would + * if this call were never used. In this event, the output packet will + * NOT have any padding, and the application must free space and copy it + * to the proper place. This is of particular note in configurations + * that may output multiple packets for a single encoded frame (e.g., lagged + * encoding) or if the application does not reset the buffer periodically. + * + * Applications may restore the default behavior of the codec providing + * the compressed data buffer by calling this function with a NULL + * buffer. + * + * Applications \ref MUSTNOT call this function during iteration of + * vpx_codec_get_cx_data(). + * + * \param[in] ctx Pointer to this instance's context + * \param[in] buf Buffer to store compressed data into + * \param[in] pad_before Bytes to skip before writing compressed data + * \param[in] pad_after Bytes to skip after writing compressed data + * + * \retval #VPX_CODEC_OK + * The buffer was set successfully. + * \retval #VPX_CODEC_INVALID_PARAM + * A parameter was NULL, the image format is unsupported, etc. + */ +vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, + const vpx_fixed_buf_t *buf, + unsigned int pad_before, + unsigned int pad_after); - /*!\brief Get global stream headers - * - * Retrieves a stream level global header packet, if supported by the codec. - * - * \param[in] ctx Pointer to this instance's context - * - * \retval NULL - * Encoder does not support global header - * \retval Non-NULL - * Pointer to buffer containing global header packet - */ - vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx); +/*!\brief Encoded data iterator + * + * Iterates over a list of data packets to be passed from the encoder to the + * application. The different kinds of packets available are enumerated in + * #vpx_codec_cx_pkt_kind. + * + * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's + * muxer. Multiple compressed frames may be in the list. + * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer. + * + * The application \ref MUST silently ignore any packet kinds that it does + * not recognize or support. + * + * The data buffers returned from this function are only guaranteed to be + * valid until the application makes another call to any vpx_codec_* function. + * + * \param[in] ctx Pointer to this instance's context + * \param[in,out] iter Iterator storage, initialized to NULL + * + * \return Returns a pointer to an output data packet (compressed frame data, + * two-pass statistics, etc.) or NULL to signal end-of-list. + * + */ +const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, + vpx_codec_iter_t *iter); +/*!\brief Get Preview Frame + * + * Returns an image that can be used as a preview. Shows the image as it would + * exist at the decompressor. The application \ref MUST NOT write into this + * image buffer. + * + * \param[in] ctx Pointer to this instance's context + * + * \return Returns a pointer to a preview image, or NULL if no image is + * available. + * + */ +const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx); -#define VPX_DL_REALTIME (1) /**< deadline parameter analogous to - * VPx REALTIME mode. */ -#define VPX_DL_GOOD_QUALITY (1000000) /**< deadline parameter analogous to - * VPx GOOD QUALITY mode. */ -#define VPX_DL_BEST_QUALITY (0) /**< deadline parameter analogous to - * VPx BEST QUALITY mode. */ - /*!\brief Encode a frame - * - * Encodes a video frame at the given "presentation time." The presentation - * time stamp (PTS) \ref MUST be strictly increasing. - * - * The encoder supports the notion of a soft real-time deadline. Given a - * non-zero value to the deadline parameter, the encoder will make a "best - * effort" guarantee to return before the given time slice expires. It is - * implicit that limiting the available time to encode will degrade the - * output quality. The encoder can be given an unlimited time to produce the - * best possible frame by specifying a deadline of '0'. This deadline - * supercedes the VPx notion of "best quality, good quality, realtime". - * Applications that wish to map these former settings to the new deadline - * based system can use the symbols #VPX_DL_REALTIME, #VPX_DL_GOOD_QUALITY, - * and #VPX_DL_BEST_QUALITY. - * - * When the last frame has been passed to the encoder, this function should - * continue to be called, with the img parameter set to NULL. This will - * signal the end-of-stream condition to the encoder and allow it to encode - * any held buffers. Encoding is complete when vpx_codec_encode() is called - * and vpx_codec_get_cx_data() returns no data. - * - * \param[in] ctx Pointer to this instance's context - * \param[in] img Image data to encode, NULL to flush. - * \param[in] pts Presentation time stamp, in timebase units. - * \param[in] duration Duration to show frame, in timebase units. - * \param[in] flags Flags to use for encoding this frame. - * \param[in] deadline Time to spend encoding, in microseconds. (0=infinite) - * - * \retval #VPX_CODEC_OK - * The configuration was populated. - * \retval #VPX_CODEC_INCAPABLE - * Interface is not an encoder interface. - * \retval #VPX_CODEC_INVALID_PARAM - * A parameter was NULL, the image format is unsupported, etc. - */ - vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, - unsigned long duration, - vpx_enc_frame_flags_t flags, - unsigned long deadline); - - /*!\brief Set compressed data output buffer - * - * Sets the buffer that the codec should output the compressed data - * into. This call effectively sets the buffer pointer returned in the - * next VPX_CODEC_CX_FRAME_PKT packet. Subsequent packets will be - * appended into this buffer. The buffer is preserved across frames, - * so applications must periodically call this function after flushing - * the accumulated compressed data to disk or to the network to reset - * the pointer to the buffer's head. - * - * `pad_before` bytes will be skipped before writing the compressed - * data, and `pad_after` bytes will be appended to the packet. The size - * of the packet will be the sum of the size of the actual compressed - * data, pad_before, and pad_after. The padding bytes will be preserved - * (not overwritten). - * - * Note that calling this function does not guarantee that the returned - * compressed data will be placed into the specified buffer. In the - * event that the encoded data will not fit into the buffer provided, - * the returned packet \ref MAY point to an internal buffer, as it would - * if this call were never used. In this event, the output packet will - * NOT have any padding, and the application must free space and copy it - * to the proper place. This is of particular note in configurations - * that may output multiple packets for a single encoded frame (e.g., lagged - * encoding) or if the application does not reset the buffer periodically. - * - * Applications may restore the default behavior of the codec providing - * the compressed data buffer by calling this function with a NULL - * buffer. - * - * Applications \ref MUSTNOT call this function during iteration of - * vpx_codec_get_cx_data(). - * - * \param[in] ctx Pointer to this instance's context - * \param[in] buf Buffer to store compressed data into - * \param[in] pad_before Bytes to skip before writing compressed data - * \param[in] pad_after Bytes to skip after writing compressed data - * - * \retval #VPX_CODEC_OK - * The buffer was set successfully. - * \retval #VPX_CODEC_INVALID_PARAM - * A parameter was NULL, the image format is unsupported, etc. - */ - vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, - const vpx_fixed_buf_t *buf, - unsigned int pad_before, - unsigned int pad_after); - - - /*!\brief Encoded data iterator - * - * Iterates over a list of data packets to be passed from the encoder to the - * application. The different kinds of packets available are enumerated in - * #vpx_codec_cx_pkt_kind. - * - * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's - * muxer. Multiple compressed frames may be in the list. - * #VPX_CODEC_STATS_PKT packets should be appended to a global buffer. - * - * The application \ref MUST silently ignore any packet kinds that it does - * not recognize or support. - * - * The data buffers returned from this function are only guaranteed to be - * valid until the application makes another call to any vpx_codec_* function. - * - * \param[in] ctx Pointer to this instance's context - * \param[in,out] iter Iterator storage, initialized to NULL - * - * \return Returns a pointer to an output data packet (compressed frame data, - * two-pass statistics, etc.) or NULL to signal end-of-list. - * - */ - const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, - vpx_codec_iter_t *iter); - - - /*!\brief Get Preview Frame - * - * Returns an image that can be used as a preview. Shows the image as it would - * exist at the decompressor. The application \ref MUST NOT write into this - * image buffer. - * - * \param[in] ctx Pointer to this instance's context - * - * \return Returns a pointer to a preview image, or NULL if no image is - * available. - * - */ - const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx); - - - /*!@} - end defgroup encoder*/ +/*!@} - end defgroup encoder*/ #ifdef __cplusplus } #endif #endif // VPX_VPX_ENCODER_H_ - diff --git a/libs/libvpx/vpx/vpx_frame_buffer.h b/libs/libvpx/vpx/vpx_frame_buffer.h index 9036459af0..ad70cdd572 100644 --- a/libs/libvpx/vpx/vpx_frame_buffer.h +++ b/libs/libvpx/vpx/vpx_frame_buffer.h @@ -37,9 +37,9 @@ extern "C" { * This structure holds allocated frame buffers used by the decoder. */ typedef struct vpx_codec_frame_buffer { - uint8_t *data; /**< Pointer to the data buffer */ - size_t size; /**< Size of data in bytes */ - void *priv; /**< Frame's private data */ + uint8_t *data; /**< Pointer to the data buffer */ + size_t size; /**< Size of data in bytes */ + void *priv; /**< Frame's private data */ } vpx_codec_frame_buffer_t; /*!\brief get frame buffer callback prototype @@ -60,8 +60,8 @@ typedef struct vpx_codec_frame_buffer { * \param[in] new_size Size in bytes needed by the buffer * \param[in,out] fb Pointer to vpx_codec_frame_buffer_t */ -typedef int (*vpx_get_frame_buffer_cb_fn_t)( - void *priv, size_t min_size, vpx_codec_frame_buffer_t *fb); +typedef int (*vpx_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size, + vpx_codec_frame_buffer_t *fb); /*!\brief release frame buffer callback prototype * @@ -73,8 +73,8 @@ typedef int (*vpx_get_frame_buffer_cb_fn_t)( * \param[in] priv Callback's private data * \param[in] fb Pointer to vpx_codec_frame_buffer_t */ -typedef int (*vpx_release_frame_buffer_cb_fn_t)( - void *priv, vpx_codec_frame_buffer_t *fb); +typedef int (*vpx_release_frame_buffer_cb_fn_t)(void *priv, + vpx_codec_frame_buffer_t *fb); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vpx/vpx_image.h b/libs/libvpx/vpx/vpx_image.h index 7958c69806..d6d3166d2f 100644 --- a/libs/libvpx/vpx/vpx_image.h +++ b/libs/libvpx/vpx/vpx_image.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /*!\file * \brief Describes the vpx image descriptor and associated operations * @@ -20,213 +19,203 @@ extern "C" { #endif - /*!\brief Current ABI version number - * - * \internal - * If this file is altered in any way that changes the ABI, this value - * must be bumped. Examples include, but are not limited to, changing - * types, removing or reassigning enums, adding/removing/rearranging - * fields to structures - */ +/*!\brief Current ABI version number + * + * \internal + * If this file is altered in any way that changes the ABI, this value + * must be bumped. Examples include, but are not limited to, changing + * types, removing or reassigning enums, adding/removing/rearranging + * fields to structures + */ #define VPX_IMAGE_ABI_VERSION (4) /**<\hideinitializer*/ +#define VPX_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */ +#define VPX_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U in memory. */ +#define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel. */ +#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */ -#define VPX_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */ -#define VPX_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U in memory. */ -#define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel. */ -#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */ +/*!\brief List of supported image formats */ +typedef enum vpx_img_fmt { + VPX_IMG_FMT_NONE, + VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */ + VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */ + VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */ + VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */ + VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */ + VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */ + VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */ + VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */ + VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */ + VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */ + VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */ + VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */ + VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */ + VPX_IMG_FMT_YV12 = + VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */ + VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2, + VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | + 3, /** < planar 4:2:0 format with vpx color space */ + VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4, + VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5, + VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6, + VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7, + VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6, + VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH, + VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH, + VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH, + VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH +} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */ - /*!\brief List of supported image formats */ - typedef enum vpx_img_fmt { - VPX_IMG_FMT_NONE, - VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */ - VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */ - VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */ - VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */ - VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */ - VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */ - VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */ - VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */ - VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */ - VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */ - VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */ - VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */ - VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */ - VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */ - VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2, - VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */ - VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4, - VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5, - VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6, - VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7, - VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6, - VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH, - VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH, - VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH, - VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH - } vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */ +/*!\brief List of supported color spaces */ +typedef enum vpx_color_space { + VPX_CS_UNKNOWN = 0, /**< Unknown */ + VPX_CS_BT_601 = 1, /**< BT.601 */ + VPX_CS_BT_709 = 2, /**< BT.709 */ + VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */ + VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */ + VPX_CS_BT_2020 = 5, /**< BT.2020 */ + VPX_CS_RESERVED = 6, /**< Reserved */ + VPX_CS_SRGB = 7 /**< sRGB */ +} vpx_color_space_t; /**< alias for enum vpx_color_space */ - /*!\brief List of supported color spaces */ - typedef enum vpx_color_space { - VPX_CS_UNKNOWN = 0, /**< Unknown */ - VPX_CS_BT_601 = 1, /**< BT.601 */ - VPX_CS_BT_709 = 2, /**< BT.709 */ - VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */ - VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */ - VPX_CS_BT_2020 = 5, /**< BT.2020 */ - VPX_CS_RESERVED = 6, /**< Reserved */ - VPX_CS_SRGB = 7 /**< sRGB */ - } vpx_color_space_t; /**< alias for enum vpx_color_space */ +/*!\brief List of supported color range */ +typedef enum vpx_color_range { + VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */ + VPX_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */ +} vpx_color_range_t; /**< alias for enum vpx_color_range */ - /*!\brief List of supported color range */ - typedef enum vpx_color_range { - VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */ - VPX_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */ - } vpx_color_range_t; /**< alias for enum vpx_color_range */ +/**\brief Image Descriptor */ +typedef struct vpx_image { + vpx_img_fmt_t fmt; /**< Image Format */ + vpx_color_space_t cs; /**< Color Space */ + vpx_color_range_t range; /**< Color Range */ - /**\brief Image Descriptor */ - typedef struct vpx_image { - vpx_img_fmt_t fmt; /**< Image Format */ - vpx_color_space_t cs; /**< Color Space */ - vpx_color_range_t range; /**< Color Range */ + /* Image storage dimensions */ + unsigned int w; /**< Stored image width */ + unsigned int h; /**< Stored image height */ + unsigned int bit_depth; /**< Stored image bit-depth */ - /* Image storage dimensions */ - unsigned int w; /**< Stored image width */ - unsigned int h; /**< Stored image height */ - unsigned int bit_depth; /**< Stored image bit-depth */ + /* Image display dimensions */ + unsigned int d_w; /**< Displayed image width */ + unsigned int d_h; /**< Displayed image height */ - /* Image display dimensions */ - unsigned int d_w; /**< Displayed image width */ - unsigned int d_h; /**< Displayed image height */ + /* Image intended rendering dimensions */ + unsigned int r_w; /**< Intended rendering image width */ + unsigned int r_h; /**< Intended rendering image height */ - /* Image intended rendering dimensions */ - unsigned int r_w; /**< Intended rendering image width */ - unsigned int r_h; /**< Intended rendering image height */ + /* Chroma subsampling info */ + unsigned int x_chroma_shift; /**< subsampling order, X */ + unsigned int y_chroma_shift; /**< subsampling order, Y */ - /* Chroma subsampling info */ - unsigned int x_chroma_shift; /**< subsampling order, X */ - unsigned int y_chroma_shift; /**< subsampling order, Y */ +/* Image data pointers. */ +#define VPX_PLANE_PACKED 0 /**< To be used for all packed formats */ +#define VPX_PLANE_Y 0 /**< Y (Luminance) plane */ +#define VPX_PLANE_U 1 /**< U (Chroma) plane */ +#define VPX_PLANE_V 2 /**< V (Chroma) plane */ +#define VPX_PLANE_ALPHA 3 /**< A (Transparency) plane */ + unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */ + int stride[4]; /**< stride between rows for each plane */ - /* Image data pointers. */ -#define VPX_PLANE_PACKED 0 /**< To be used for all packed formats */ -#define VPX_PLANE_Y 0 /**< Y (Luminance) plane */ -#define VPX_PLANE_U 1 /**< U (Chroma) plane */ -#define VPX_PLANE_V 2 /**< V (Chroma) plane */ -#define VPX_PLANE_ALPHA 3 /**< A (Transparency) plane */ - unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */ - int stride[4]; /**< stride between rows for each plane */ + int bps; /**< bits per sample (for packed formats) */ - int bps; /**< bits per sample (for packed formats) */ - - /* The following member may be set by the application to associate data - * with this image. - */ - void *user_priv; /**< may be set by the application to associate data - * with this image. */ - - /* The following members should be treated as private. */ - unsigned char *img_data; /**< private */ - int img_data_owner; /**< private */ - int self_allocd; /**< private */ - - void *fb_priv; /**< Frame buffer data associated with the image. */ - } vpx_image_t; /**< alias for struct vpx_image */ - - /**\brief Representation of a rectangle on a surface */ - typedef struct vpx_image_rect { - unsigned int x; /**< leftmost column */ - unsigned int y; /**< topmost row */ - unsigned int w; /**< width */ - unsigned int h; /**< height */ - } vpx_image_rect_t; /**< alias for struct vpx_image_rect */ - - /*!\brief Open a descriptor, allocating storage for the underlying image - * - * Returns a descriptor for storing an image of the given format. The - * storage for the descriptor is allocated on the heap. - * - * \param[in] img Pointer to storage for descriptor. If this parameter - * is NULL, the storage for the descriptor will be - * allocated on the heap. - * \param[in] fmt Format for the image - * \param[in] d_w Width of the image - * \param[in] d_h Height of the image - * \param[in] align Alignment, in bytes, of the image buffer and - * each row in the image(stride). - * - * \return Returns a pointer to the initialized image descriptor. If the img - * parameter is non-null, the value of the img parameter will be - * returned. + /*!\brief The following member may be set by the application to associate + * data with this image. */ - vpx_image_t *vpx_img_alloc(vpx_image_t *img, - vpx_img_fmt_t fmt, - unsigned int d_w, - unsigned int d_h, - unsigned int align); + void *user_priv; - /*!\brief Open a descriptor, using existing storage for the underlying image - * - * Returns a descriptor for storing an image of the given format. The - * storage for descriptor has been allocated elsewhere, and a descriptor is - * desired to "wrap" that storage. - * - * \param[in] img Pointer to storage for descriptor. If this parameter - * is NULL, the storage for the descriptor will be - * allocated on the heap. - * \param[in] fmt Format for the image - * \param[in] d_w Width of the image - * \param[in] d_h Height of the image - * \param[in] align Alignment, in bytes, of each row in the image. - * \param[in] img_data Storage to use for the image - * - * \return Returns a pointer to the initialized image descriptor. If the img - * parameter is non-null, the value of the img parameter will be - * returned. - */ - vpx_image_t *vpx_img_wrap(vpx_image_t *img, - vpx_img_fmt_t fmt, - unsigned int d_w, - unsigned int d_h, - unsigned int align, - unsigned char *img_data); + /* The following members should be treated as private. */ + unsigned char *img_data; /**< private */ + int img_data_owner; /**< private */ + int self_allocd; /**< private */ + void *fb_priv; /**< Frame buffer data associated with the image. */ +} vpx_image_t; /**< alias for struct vpx_image */ - /*!\brief Set the rectangle identifying the displayed portion of the image - * - * Updates the displayed rectangle (aka viewport) on the image surface to - * match the specified coordinates and size. - * - * \param[in] img Image descriptor - * \param[in] x leftmost column - * \param[in] y topmost row - * \param[in] w width - * \param[in] h height - * - * \return 0 if the requested rectangle is valid, nonzero otherwise. - */ - int vpx_img_set_rect(vpx_image_t *img, - unsigned int x, - unsigned int y, - unsigned int w, - unsigned int h); +/**\brief Representation of a rectangle on a surface */ +typedef struct vpx_image_rect { + unsigned int x; /**< leftmost column */ + unsigned int y; /**< topmost row */ + unsigned int w; /**< width */ + unsigned int h; /**< height */ +} vpx_image_rect_t; /**< alias for struct vpx_image_rect */ +/*!\brief Open a descriptor, allocating storage for the underlying image + * + * Returns a descriptor for storing an image of the given format. The + * storage for the descriptor is allocated on the heap. + * + * \param[in] img Pointer to storage for descriptor. If this parameter + * is NULL, the storage for the descriptor will be + * allocated on the heap. + * \param[in] fmt Format for the image + * \param[in] d_w Width of the image + * \param[in] d_h Height of the image + * \param[in] align Alignment, in bytes, of the image buffer and + * each row in the image(stride). + * + * \return Returns a pointer to the initialized image descriptor. If the img + * parameter is non-null, the value of the img parameter will be + * returned. + */ +vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt, + unsigned int d_w, unsigned int d_h, + unsigned int align); - /*!\brief Flip the image vertically (top for bottom) - * - * Adjusts the image descriptor's pointers and strides to make the image - * be referenced upside-down. - * - * \param[in] img Image descriptor - */ - void vpx_img_flip(vpx_image_t *img); +/*!\brief Open a descriptor, using existing storage for the underlying image + * + * Returns a descriptor for storing an image of the given format. The + * storage for descriptor has been allocated elsewhere, and a descriptor is + * desired to "wrap" that storage. + * + * \param[in] img Pointer to storage for descriptor. If this parameter + * is NULL, the storage for the descriptor will be + * allocated on the heap. + * \param[in] fmt Format for the image + * \param[in] d_w Width of the image + * \param[in] d_h Height of the image + * \param[in] align Alignment, in bytes, of each row in the image. + * \param[in] img_data Storage to use for the image + * + * \return Returns a pointer to the initialized image descriptor. If the img + * parameter is non-null, the value of the img parameter will be + * returned. + */ +vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, + unsigned int d_h, unsigned int align, + unsigned char *img_data); - /*!\brief Close an image descriptor - * - * Frees all allocated storage associated with an image descriptor. - * - * \param[in] img Image descriptor - */ - void vpx_img_free(vpx_image_t *img); +/*!\brief Set the rectangle identifying the displayed portion of the image + * + * Updates the displayed rectangle (aka viewport) on the image surface to + * match the specified coordinates and size. + * + * \param[in] img Image descriptor + * \param[in] x leftmost column + * \param[in] y topmost row + * \param[in] w width + * \param[in] h height + * + * \return 0 if the requested rectangle is valid, nonzero otherwise. + */ +int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, + unsigned int w, unsigned int h); + +/*!\brief Flip the image vertically (top for bottom) + * + * Adjusts the image descriptor's pointers and strides to make the image + * be referenced upside-down. + * + * \param[in] img Image descriptor + */ +void vpx_img_flip(vpx_image_t *img); + +/*!\brief Close an image descriptor + * + * Frees all allocated storage associated with an image descriptor. + * + * \param[in] img Image descriptor + */ +void vpx_img_free(vpx_image_t *img); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vpx/vpx_integer.h b/libs/libvpx/vpx/vpx_integer.h index 829c9d132c..09bad9222d 100644 --- a/libs/libvpx/vpx/vpx_integer.h +++ b/libs/libvpx/vpx/vpx_integer.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_VPX_INTEGER_H_ #define VPX_VPX_INTEGER_H_ @@ -24,24 +23,14 @@ #define VPX_INLINE inline #endif -#if (defined(_MSC_VER) && (_MSC_VER < 1600)) || defined(VPX_EMULATE_INTTYPES) -typedef signed char int8_t; +#if defined(VPX_EMULATE_INTTYPES) +typedef signed char int8_t; typedef signed short int16_t; -typedef signed int int32_t; +typedef signed int int32_t; -typedef unsigned char uint8_t; +typedef unsigned char uint8_t; typedef unsigned short uint16_t; -typedef unsigned int uint32_t; - -#if (defined(_MSC_VER) && (_MSC_VER < 1600)) -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; -#define INT64_MAX _I64_MAX -#define INT32_MAX _I32_MAX -#define INT32_MIN _I32_MIN -#define INT16_MAX _I16_MAX -#define INT16_MIN _I16_MIN -#endif +typedef unsigned int uint32_t; #ifndef _UINTPTR_T_DEFINED typedef size_t uintptr_t; @@ -52,12 +41,12 @@ typedef size_t uintptr_t; /* Most platforms have the C99 standard integer types. */ #if defined(__cplusplus) -# if !defined(__STDC_FORMAT_MACROS) -# define __STDC_FORMAT_MACROS -# endif -# if !defined(__STDC_LIMIT_MACROS) -# define __STDC_LIMIT_MACROS -# endif +#if !defined(__STDC_FORMAT_MACROS) +#define __STDC_FORMAT_MACROS +#endif +#if !defined(__STDC_LIMIT_MACROS) +#define __STDC_LIMIT_MACROS +#endif #endif // __cplusplus #include diff --git a/libs/libvpx/vpx_dsp/add_noise.c b/libs/libvpx/vpx_dsp/add_noise.c new file mode 100644 index 0000000000..a2b4c9010f --- /dev/null +++ b/libs/libvpx/vpx_dsp/add_noise.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include + +#include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" + +#include "vpx/vpx_integer.h" +#include "vpx_ports/mem.h" + +void vpx_plane_add_noise_c(uint8_t *start, const int8_t *noise, int blackclamp, + int whiteclamp, int width, int height, int pitch) { + int i, j; + int bothclamp = blackclamp + whiteclamp; + for (i = 0; i < height; ++i) { + uint8_t *pos = start + i * pitch; + const int8_t *ref = (const int8_t *)(noise + (rand() & 0xff)); // NOLINT + + for (j = 0; j < width; ++j) { + int v = pos[j]; + + v = clamp(v - blackclamp, 0, 255); + v = clamp(v + bothclamp, 0, 255); + v = clamp(v - whiteclamp, 0, 255); + + pos[j] = v + ref[j]; + } + } +} + +static double gaussian(double sigma, double mu, double x) { + return 1 / (sigma * sqrt(2.0 * 3.14159265)) * + (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma))); +} + +int vpx_setup_noise(double sigma, int8_t *noise, int size) { + int8_t char_dist[256]; + int next = 0, i, j; + + // set up a 256 entry lookup that matches gaussian distribution + for (i = -32; i < 32; ++i) { + const int a_i = (int)(0.5 + 256 * gaussian(sigma, 0, i)); + if (a_i) { + for (j = 0; j < a_i; ++j) { + char_dist[next + j] = (int8_t)i; + } + next = next + j; + } + } + + // Rounding error - might mean we have less than 256. + for (; next < 256; ++next) { + char_dist[next] = 0; + } + + for (i = 0; i < size; ++i) { + noise[i] = char_dist[rand() & 0xff]; // NOLINT + } + + // Returns the highest non 0 value used in distribution. + return -char_dist[0]; +} diff --git a/libs/libvpx/vpx_dsp/arm/avg_neon.c b/libs/libvpx/vpx_dsp/arm/avg_neon.c index d054c4185f..001517d33e 100644 --- a/libs/libvpx/vpx_dsp/arm/avg_neon.c +++ b/libs/libvpx/vpx_dsp/arm/avg_neon.c @@ -197,3 +197,57 @@ int vpx_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) { return s - ((t * t) >> shift_factor); } } + +void vpx_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int *min, int *max) { + // Load and concatenate. + const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride)); + const uint8x16_t a23 = + vcombine_u8(vld1_u8(a + 2 * a_stride), vld1_u8(a + 3 * a_stride)); + const uint8x16_t a45 = + vcombine_u8(vld1_u8(a + 4 * a_stride), vld1_u8(a + 5 * a_stride)); + const uint8x16_t a67 = + vcombine_u8(vld1_u8(a + 6 * a_stride), vld1_u8(a + 7 * a_stride)); + + const uint8x16_t b01 = vcombine_u8(vld1_u8(b), vld1_u8(b + b_stride)); + const uint8x16_t b23 = + vcombine_u8(vld1_u8(b + 2 * b_stride), vld1_u8(b + 3 * b_stride)); + const uint8x16_t b45 = + vcombine_u8(vld1_u8(b + 4 * b_stride), vld1_u8(b + 5 * b_stride)); + const uint8x16_t b67 = + vcombine_u8(vld1_u8(b + 6 * b_stride), vld1_u8(b + 7 * b_stride)); + + // Absolute difference. + const uint8x16_t ab01_diff = vabdq_u8(a01, b01); + const uint8x16_t ab23_diff = vabdq_u8(a23, b23); + const uint8x16_t ab45_diff = vabdq_u8(a45, b45); + const uint8x16_t ab67_diff = vabdq_u8(a67, b67); + + // Max values between the Q vectors. + const uint8x16_t ab0123_max = vmaxq_u8(ab01_diff, ab23_diff); + const uint8x16_t ab4567_max = vmaxq_u8(ab45_diff, ab67_diff); + const uint8x16_t ab0123_min = vminq_u8(ab01_diff, ab23_diff); + const uint8x16_t ab4567_min = vminq_u8(ab45_diff, ab67_diff); + + const uint8x16_t ab07_max = vmaxq_u8(ab0123_max, ab4567_max); + const uint8x16_t ab07_min = vminq_u8(ab0123_min, ab4567_min); + + // Split to D and start doing pairwise. + uint8x8_t ab_max = vmax_u8(vget_high_u8(ab07_max), vget_low_u8(ab07_max)); + uint8x8_t ab_min = vmin_u8(vget_high_u8(ab07_min), vget_low_u8(ab07_min)); + + // Enough runs of vpmax/min propogate the max/min values to every position. + ab_max = vpmax_u8(ab_max, ab_max); + ab_min = vpmin_u8(ab_min, ab_min); + + ab_max = vpmax_u8(ab_max, ab_max); + ab_min = vpmin_u8(ab_min, ab_min); + + ab_max = vpmax_u8(ab_max, ab_max); + ab_min = vpmin_u8(ab_min, ab_min); + + *min = *max = 0; // Clear high bits + // Store directly to avoid costly neon->gpr transfer. + vst1_lane_u8((uint8_t *)max, ab_max, 0); + vst1_lane_u8((uint8_t *)min, ab_min, 0); +} diff --git a/libs/libvpx/vpx_dsp/arm/bilinear_filter_media.asm b/libs/libvpx/vpx_dsp/arm/bilinear_filter_media.asm deleted file mode 100644 index f3f9754c11..0000000000 --- a/libs/libvpx/vpx_dsp/arm/bilinear_filter_media.asm +++ /dev/null @@ -1,237 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_filter_block2d_bil_first_pass_media| - EXPORT |vpx_filter_block2d_bil_second_pass_media| - - AREA |.text|, CODE, READONLY ; name this block of code - -;------------------------------------- -; r0 unsigned char *src_ptr, -; r1 unsigned short *dst_ptr, -; r2 unsigned int src_pitch, -; r3 unsigned int height, -; stack unsigned int width, -; stack const short *vpx_filter -;------------------------------------- -; The output is transposed stroed in output array to make it easy for second pass filtering. -|vpx_filter_block2d_bil_first_pass_media| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vpx_filter address - ldr r4, [sp, #36] ; width - - mov r12, r3 ; outer-loop counter - - add r7, r2, r4 ; preload next row - pld [r0, r7] - - sub r2, r2, r4 ; src increment for height loop - - ldr r5, [r11] ; load up filter coefficients - - mov r3, r3, lsl #1 ; height*2 - add r3, r3, #2 ; plus 2 to make output buffer 4-bit aligned since height is actually (height+1) - - mov r11, r1 ; save dst_ptr for each row - - cmp r5, #128 ; if filter coef = 128, then skip the filter - beq bil_null_1st_filter - -|bil_height_loop_1st_v6| - ldrb r6, [r0] ; load source data - ldrb r7, [r0, #1] - ldrb r8, [r0, #2] - mov lr, r4, lsr #2 ; 4-in-parellel loop counter - -|bil_width_loop_1st_v6| - ldrb r9, [r0, #3] - ldrb r10, [r0, #4] - - pkhbt r6, r6, r7, lsl #16 ; src[1] | src[0] - pkhbt r7, r7, r8, lsl #16 ; src[2] | src[1] - - smuad r6, r6, r5 ; apply the filter - pkhbt r8, r8, r9, lsl #16 ; src[3] | src[2] - smuad r7, r7, r5 - pkhbt r9, r9, r10, lsl #16 ; src[4] | src[3] - - smuad r8, r8, r5 - smuad r9, r9, r5 - - add r0, r0, #4 - subs lr, lr, #1 - - add r6, r6, #0x40 ; round_shift_and_clamp - add r7, r7, #0x40 - usat r6, #16, r6, asr #7 - usat r7, #16, r7, asr #7 - - strh r6, [r1], r3 ; result is transposed and stored - - add r8, r8, #0x40 ; round_shift_and_clamp - strh r7, [r1], r3 - add r9, r9, #0x40 - usat r8, #16, r8, asr #7 - usat r9, #16, r9, asr #7 - - strh r8, [r1], r3 ; result is transposed and stored - - ldrneb r6, [r0] ; load source data - strh r9, [r1], r3 - - ldrneb r7, [r0, #1] - ldrneb r8, [r0, #2] - - bne bil_width_loop_1st_v6 - - add r0, r0, r2 ; move to next input row - subs r12, r12, #1 - - add r9, r2, r4, lsl #1 ; adding back block width - pld [r0, r9] ; preload next row - - add r11, r11, #2 ; move over to next column - mov r1, r11 - - bne bil_height_loop_1st_v6 - - ldmia sp!, {r4 - r11, pc} - -|bil_null_1st_filter| -|bil_height_loop_null_1st| - mov lr, r4, lsr #2 ; loop counter - -|bil_width_loop_null_1st| - ldrb r6, [r0] ; load data - ldrb r7, [r0, #1] - ldrb r8, [r0, #2] - ldrb r9, [r0, #3] - - strh r6, [r1], r3 ; store it to immediate buffer - add r0, r0, #4 - strh r7, [r1], r3 - subs lr, lr, #1 - strh r8, [r1], r3 - strh r9, [r1], r3 - - bne bil_width_loop_null_1st - - subs r12, r12, #1 - add r0, r0, r2 ; move to next input line - add r11, r11, #2 ; move over to next column - mov r1, r11 - - bne bil_height_loop_null_1st - - ldmia sp!, {r4 - r11, pc} - - ENDP ; |vpx_filter_block2d_bil_first_pass_media| - - -;--------------------------------- -; r0 unsigned short *src_ptr, -; r1 unsigned char *dst_ptr, -; r2 int dst_pitch, -; r3 unsigned int height, -; stack unsigned int width, -; stack const short *vpx_filter -;--------------------------------- -|vpx_filter_block2d_bil_second_pass_media| PROC - stmdb sp!, {r4 - r11, lr} - - ldr r11, [sp, #40] ; vpx_filter address - ldr r4, [sp, #36] ; width - - ldr r5, [r11] ; load up filter coefficients - mov r12, r4 ; outer-loop counter = width, since we work on transposed data matrix - mov r11, r1 - - cmp r5, #128 ; if filter coef = 128, then skip the filter - beq bil_null_2nd_filter - -|bil_height_loop_2nd| - ldr r6, [r0] ; load the data - ldr r8, [r0, #4] - ldrh r10, [r0, #8] - mov lr, r3, lsr #2 ; loop counter - -|bil_width_loop_2nd| - pkhtb r7, r6, r8 ; src[1] | src[2] - pkhtb r9, r8, r10 ; src[3] | src[4] - - smuad r6, r6, r5 ; apply filter - smuad r8, r8, r5 ; apply filter - - subs lr, lr, #1 - - smuadx r7, r7, r5 ; apply filter - smuadx r9, r9, r5 ; apply filter - - add r0, r0, #8 - - add r6, r6, #0x40 ; round_shift_and_clamp - add r7, r7, #0x40 - usat r6, #8, r6, asr #7 - usat r7, #8, r7, asr #7 - strb r6, [r1], r2 ; the result is transposed back and stored - - add r8, r8, #0x40 ; round_shift_and_clamp - strb r7, [r1], r2 - add r9, r9, #0x40 - usat r8, #8, r8, asr #7 - usat r9, #8, r9, asr #7 - strb r8, [r1], r2 ; the result is transposed back and stored - - ldrne r6, [r0] ; load data - strb r9, [r1], r2 - ldrne r8, [r0, #4] - ldrneh r10, [r0, #8] - - bne bil_width_loop_2nd - - subs r12, r12, #1 - add r0, r0, #4 ; update src for next row - add r11, r11, #1 - mov r1, r11 - - bne bil_height_loop_2nd - ldmia sp!, {r4 - r11, pc} - -|bil_null_2nd_filter| -|bil_height_loop_null_2nd| - mov lr, r3, lsr #2 - -|bil_width_loop_null_2nd| - ldr r6, [r0], #4 ; load data - subs lr, lr, #1 - ldr r8, [r0], #4 - - strb r6, [r1], r2 ; store data - mov r7, r6, lsr #16 - strb r7, [r1], r2 - mov r9, r8, lsr #16 - strb r8, [r1], r2 - strb r9, [r1], r2 - - bne bil_width_loop_null_2nd - - subs r12, r12, #1 - add r0, r0, #4 - add r11, r11, #1 - mov r1, r11 - - bne bil_height_loop_null_2nd - - ldmia sp!, {r4 - r11, pc} - ENDP ; |vpx_filter_block2d_second_pass_media| - - END diff --git a/libs/libvpx/vpx_dsp/arm/fwd_txfm_neon.c b/libs/libvpx/vpx_dsp/arm/fwd_txfm_neon.c index 9f9de98d90..7cb2ba90d2 100644 --- a/libs/libvpx/vpx_dsp/arm/fwd_txfm_neon.c +++ b/libs/libvpx/vpx_dsp/arm/fwd_txfm_neon.c @@ -131,14 +131,14 @@ void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { // 14 15 16 17 54 55 56 57 // 24 25 26 27 64 65 66 67 // 34 35 36 37 74 75 76 77 - const int32x4x2_t r02_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_0), - vreinterpretq_s32_s16(out_2)); - const int32x4x2_t r13_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_1), - vreinterpretq_s32_s16(out_3)); - const int32x4x2_t r46_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_4), - vreinterpretq_s32_s16(out_6)); - const int32x4x2_t r57_s32 = vtrnq_s32(vreinterpretq_s32_s16(out_5), - vreinterpretq_s32_s16(out_7)); + const int32x4x2_t r02_s32 = + vtrnq_s32(vreinterpretq_s32_s16(out_0), vreinterpretq_s32_s16(out_2)); + const int32x4x2_t r13_s32 = + vtrnq_s32(vreinterpretq_s32_s16(out_1), vreinterpretq_s32_s16(out_3)); + const int32x4x2_t r46_s32 = + vtrnq_s32(vreinterpretq_s32_s16(out_4), vreinterpretq_s32_s16(out_6)); + const int32x4x2_t r57_s32 = + vtrnq_s32(vreinterpretq_s32_s16(out_5), vreinterpretq_s32_s16(out_7)); const int16x8x2_t r01_s16 = vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]), vreinterpretq_s16_s32(r13_s32.val[0])); diff --git a/libs/libvpx/vpx_dsp/arm/hadamard_neon.c b/libs/libvpx/vpx_dsp/arm/hadamard_neon.c new file mode 100644 index 0000000000..977323497a --- /dev/null +++ b/libs/libvpx/vpx_dsp/arm/hadamard_neon.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include + +#include "./vpx_dsp_rtcd.h" +#include "vpx_dsp/arm/transpose_neon.h" + +static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2, + int16x8_t *a3, int16x8_t *a4, int16x8_t *a5, + int16x8_t *a6, int16x8_t *a7) { + const int16x8_t b0 = vaddq_s16(*a0, *a1); + const int16x8_t b1 = vsubq_s16(*a0, *a1); + const int16x8_t b2 = vaddq_s16(*a2, *a3); + const int16x8_t b3 = vsubq_s16(*a2, *a3); + const int16x8_t b4 = vaddq_s16(*a4, *a5); + const int16x8_t b5 = vsubq_s16(*a4, *a5); + const int16x8_t b6 = vaddq_s16(*a6, *a7); + const int16x8_t b7 = vsubq_s16(*a6, *a7); + + const int16x8_t c0 = vaddq_s16(b0, b2); + const int16x8_t c1 = vaddq_s16(b1, b3); + const int16x8_t c2 = vsubq_s16(b0, b2); + const int16x8_t c3 = vsubq_s16(b1, b3); + const int16x8_t c4 = vaddq_s16(b4, b6); + const int16x8_t c5 = vaddq_s16(b5, b7); + const int16x8_t c6 = vsubq_s16(b4, b6); + const int16x8_t c7 = vsubq_s16(b5, b7); + + *a0 = vaddq_s16(c0, c4); + *a1 = vsubq_s16(c2, c6); + *a2 = vsubq_s16(c0, c4); + *a3 = vaddq_s16(c2, c6); + *a4 = vaddq_s16(c3, c7); + *a5 = vsubq_s16(c3, c7); + *a6 = vsubq_s16(c1, c5); + *a7 = vaddq_s16(c1, c5); +} + +void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride, + int16_t *coeff) { + int16x8_t a0 = vld1q_s16(src_diff); + int16x8_t a1 = vld1q_s16(src_diff + src_stride); + int16x8_t a2 = vld1q_s16(src_diff + 2 * src_stride); + int16x8_t a3 = vld1q_s16(src_diff + 3 * src_stride); + int16x8_t a4 = vld1q_s16(src_diff + 4 * src_stride); + int16x8_t a5 = vld1q_s16(src_diff + 5 * src_stride); + int16x8_t a6 = vld1q_s16(src_diff + 6 * src_stride); + int16x8_t a7 = vld1q_s16(src_diff + 7 * src_stride); + + hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); + + transpose_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); + + hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7); + + // Skip the second transpose because it is not required. + + vst1q_s16(coeff + 0, a0); + vst1q_s16(coeff + 8, a1); + vst1q_s16(coeff + 16, a2); + vst1q_s16(coeff + 24, a3); + vst1q_s16(coeff + 32, a4); + vst1q_s16(coeff + 40, a5); + vst1q_s16(coeff + 48, a6); + vst1q_s16(coeff + 56, a7); +} + +void vpx_hadamard_16x16_neon(const int16_t *src_diff, int src_stride, + int16_t *coeff) { + int i; + + /* Rearrange 16x16 to 8x32 and remove stride. + * Top left first. */ + vpx_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0); + /* Top right. */ + vpx_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64); + /* Bottom left. */ + vpx_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128); + /* Bottom right. */ + vpx_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192); + + for (i = 0; i < 64; i += 8) { + const int16x8_t a0 = vld1q_s16(coeff + 0); + const int16x8_t a1 = vld1q_s16(coeff + 64); + const int16x8_t a2 = vld1q_s16(coeff + 128); + const int16x8_t a3 = vld1q_s16(coeff + 192); + + const int16x8_t b0 = vhaddq_s16(a0, a1); + const int16x8_t b1 = vhsubq_s16(a0, a1); + const int16x8_t b2 = vhaddq_s16(a2, a3); + const int16x8_t b3 = vhsubq_s16(a2, a3); + + const int16x8_t c0 = vaddq_s16(b0, b2); + const int16x8_t c1 = vaddq_s16(b1, b3); + const int16x8_t c2 = vsubq_s16(b0, b2); + const int16x8_t c3 = vsubq_s16(b1, b3); + + vst1q_s16(coeff + 0, c0); + vst1q_s16(coeff + 64, c1); + vst1q_s16(coeff + 128, c2); + vst1q_s16(coeff + 192, c3); + + coeff += 8; + } +} diff --git a/libs/libvpx/vpx_dsp/arm/idct16x16_1_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct16x16_1_add_neon.c index f734e48027..466b408893 100644 --- a/libs/libvpx/vpx_dsp/arm/idct16x16_1_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct16x16_1_add_neon.c @@ -13,49 +13,46 @@ #include "vpx_dsp/inv_txfm.h" #include "vpx_ports/mem.h" -void vpx_idct16x16_1_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8x8_t d2u8, d3u8, d30u8, d31u8; - uint64x1_t d2u64, d3u64, d4u64, d5u64; - uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; - int16x8_t q0s16; - uint8_t *d1, *d2; - int16_t i, j, a1, cospi_16_64 = 11585; - int16_t out = dct_const_round_shift(input[0] * cospi_16_64); - out = dct_const_round_shift(out * cospi_16_64); - a1 = ROUND_POWER_OF_TWO(out, 6); +void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8x8_t d2u8, d3u8, d30u8, d31u8; + uint64x1_t d2u64, d3u64, d4u64, d5u64; + uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; + int16x8_t q0s16; + uint8_t *d1, *d2; + int16_t i, j, a1, cospi_16_64 = 11585; + int16_t out = dct_const_round_shift(input[0] * cospi_16_64); + out = dct_const_round_shift(out * cospi_16_64); + a1 = ROUND_POWER_OF_TWO(out, 6); - q0s16 = vdupq_n_s16(a1); - q0u16 = vreinterpretq_u16_s16(q0s16); + q0s16 = vdupq_n_s16(a1); + q0u16 = vreinterpretq_u16_s16(q0s16); - for (d1 = d2 = dest, i = 0; i < 4; i++) { - for (j = 0; j < 2; j++) { - d2u64 = vld1_u64((const uint64_t *)d1); - d3u64 = vld1_u64((const uint64_t *)(d1 + 8)); - d1 += dest_stride; - d4u64 = vld1_u64((const uint64_t *)d1); - d5u64 = vld1_u64((const uint64_t *)(d1 + 8)); - d1 += dest_stride; + for (d1 = d2 = dest, i = 0; i < 4; i++) { + for (j = 0; j < 2; j++) { + d2u64 = vld1_u64((const uint64_t *)d1); + d3u64 = vld1_u64((const uint64_t *)(d1 + 8)); + d1 += dest_stride; + d4u64 = vld1_u64((const uint64_t *)d1); + d5u64 = vld1_u64((const uint64_t *)(d1 + 8)); + d1 += dest_stride; - q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64)); - q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64)); - q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64)); - q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64)); + q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64)); + q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64)); + q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64)); + q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8)); - vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d31u8)); - d2 += dest_stride; - } + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8)); + vst1_u64((uint64_t *)(d2 + 8), vreinterpret_u64_u8(d31u8)); + d2 += dest_stride; } - return; + } + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct16x16_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct16x16_add_neon.c index 651ebb21f9..5ca2afe488 100644 --- a/libs/libvpx/vpx_dsp/arm/idct16x16_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct16x16_add_neon.c @@ -11,1177 +11,675 @@ #include #include "./vpx_config.h" +#include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" -static INLINE void TRANSPOSE8X8( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; +void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, + int output_stride) { + int16x4_t d0s16, d1s16, d2s16, d3s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; + uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64; + uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; + int16x8x2_t q0x2s16; - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); + q0x2s16 = vld2q_s16(in); + q8s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q9s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q10s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q11s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q12s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q13s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q14s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q15s16 = q0x2s16.val[0]; - *q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - *q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - *q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - *q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - *q12s16 = vcombine_s16(d17s16, d25s16); - *q13s16 = vcombine_s16(d19s16, d27s16); - *q14s16 = vcombine_s16(d21s16, d29s16); - *q15s16 = vcombine_s16(d23s16, d31s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16), - vreinterpretq_s32_s16(*q10s16)); - q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16), - vreinterpretq_s32_s16(*q11s16)); - q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16), - vreinterpretq_s32_s16(*q14s16)); - q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16), - vreinterpretq_s32_s16(*q15s16)); + d16s16 = vget_low_s16(q8s16); + d17s16 = vget_high_s16(q8s16); + d18s16 = vget_low_s16(q9s16); + d19s16 = vget_high_s16(q9s16); + d20s16 = vget_low_s16(q10s16); + d21s16 = vget_high_s16(q10s16); + d22s16 = vget_low_s16(q11s16); + d23s16 = vget_high_s16(q11s16); + d24s16 = vget_low_s16(q12s16); + d25s16 = vget_high_s16(q12s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); + d30s16 = vget_low_s16(q15s16); + d31s16 = vget_high_s16(q15s16); - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 + // stage 3 + d0s16 = vdup_n_s16(cospi_28_64); + d1s16 = vdup_n_s16(cospi_4_64); - *q8s16 = q0x2s16.val[0]; - *q9s16 = q0x2s16.val[1]; - *q10s16 = q1x2s16.val[0]; - *q11s16 = q1x2s16.val[1]; - *q12s16 = q2x2s16.val[0]; - *q13s16 = q2x2s16.val[1]; - *q14s16 = q3x2s16.val[0]; - *q15s16 = q3x2s16.val[1]; - return; + q2s32 = vmull_s16(d18s16, d0s16); + q3s32 = vmull_s16(d19s16, d0s16); + q5s32 = vmull_s16(d18s16, d1s16); + q6s32 = vmull_s16(d19s16, d1s16); + + q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); + q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); + q5s32 = vmlal_s16(q5s32, d30s16, d0s16); + q6s32 = vmlal_s16(q6s32, d31s16, d0s16); + + d2s16 = vdup_n_s16(cospi_12_64); + d3s16 = vdup_n_s16(cospi_20_64); + + d8s16 = vqrshrn_n_s32(q2s32, 14); + d9s16 = vqrshrn_n_s32(q3s32, 14); + d14s16 = vqrshrn_n_s32(q5s32, 14); + d15s16 = vqrshrn_n_s32(q6s32, 14); + q4s16 = vcombine_s16(d8s16, d9s16); + q7s16 = vcombine_s16(d14s16, d15s16); + + q2s32 = vmull_s16(d26s16, d2s16); + q3s32 = vmull_s16(d27s16, d2s16); + q9s32 = vmull_s16(d26s16, d3s16); + q15s32 = vmull_s16(d27s16, d3s16); + + q2s32 = vmlsl_s16(q2s32, d22s16, d3s16); + q3s32 = vmlsl_s16(q3s32, d23s16, d3s16); + q9s32 = vmlal_s16(q9s32, d22s16, d2s16); + q15s32 = vmlal_s16(q15s32, d23s16, d2s16); + + d10s16 = vqrshrn_n_s32(q2s32, 14); + d11s16 = vqrshrn_n_s32(q3s32, 14); + d12s16 = vqrshrn_n_s32(q9s32, 14); + d13s16 = vqrshrn_n_s32(q15s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + // stage 4 + d30s16 = vdup_n_s16(cospi_16_64); + + q2s32 = vmull_s16(d16s16, d30s16); + q11s32 = vmull_s16(d17s16, d30s16); + q0s32 = vmull_s16(d24s16, d30s16); + q1s32 = vmull_s16(d25s16, d30s16); + + d30s16 = vdup_n_s16(cospi_24_64); + d31s16 = vdup_n_s16(cospi_8_64); + + q3s32 = vaddq_s32(q2s32, q0s32); + q12s32 = vaddq_s32(q11s32, q1s32); + q13s32 = vsubq_s32(q2s32, q0s32); + q1s32 = vsubq_s32(q11s32, q1s32); + + d16s16 = vqrshrn_n_s32(q3s32, 14); + d17s16 = vqrshrn_n_s32(q12s32, 14); + d18s16 = vqrshrn_n_s32(q13s32, 14); + d19s16 = vqrshrn_n_s32(q1s32, 14); + q8s16 = vcombine_s16(d16s16, d17s16); + q9s16 = vcombine_s16(d18s16, d19s16); + + q0s32 = vmull_s16(d20s16, d31s16); + q1s32 = vmull_s16(d21s16, d31s16); + q12s32 = vmull_s16(d20s16, d30s16); + q13s32 = vmull_s16(d21s16, d30s16); + + q0s32 = vmlal_s16(q0s32, d28s16, d30s16); + q1s32 = vmlal_s16(q1s32, d29s16, d30s16); + q12s32 = vmlsl_s16(q12s32, d28s16, d31s16); + q13s32 = vmlsl_s16(q13s32, d29s16, d31s16); + + d22s16 = vqrshrn_n_s32(q0s32, 14); + d23s16 = vqrshrn_n_s32(q1s32, 14); + d20s16 = vqrshrn_n_s32(q12s32, 14); + d21s16 = vqrshrn_n_s32(q13s32, 14); + q10s16 = vcombine_s16(d20s16, d21s16); + q11s16 = vcombine_s16(d22s16, d23s16); + + q13s16 = vsubq_s16(q4s16, q5s16); + q4s16 = vaddq_s16(q4s16, q5s16); + q14s16 = vsubq_s16(q7s16, q6s16); + q15s16 = vaddq_s16(q6s16, q7s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); + + // stage 5 + q0s16 = vaddq_s16(q8s16, q11s16); + q1s16 = vaddq_s16(q9s16, q10s16); + q2s16 = vsubq_s16(q9s16, q10s16); + q3s16 = vsubq_s16(q8s16, q11s16); + + d16s16 = vdup_n_s16(cospi_16_64); + + q11s32 = vmull_s16(d26s16, d16s16); + q12s32 = vmull_s16(d27s16, d16s16); + q9s32 = vmull_s16(d28s16, d16s16); + q10s32 = vmull_s16(d29s16, d16s16); + + q6s32 = vsubq_s32(q9s32, q11s32); + q13s32 = vsubq_s32(q10s32, q12s32); + q9s32 = vaddq_s32(q9s32, q11s32); + q10s32 = vaddq_s32(q10s32, q12s32); + + d10s16 = vqrshrn_n_s32(q6s32, 14); + d11s16 = vqrshrn_n_s32(q13s32, 14); + d12s16 = vqrshrn_n_s32(q9s32, 14); + d13s16 = vqrshrn_n_s32(q10s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + // stage 6 + q8s16 = vaddq_s16(q0s16, q15s16); + q9s16 = vaddq_s16(q1s16, q6s16); + q10s16 = vaddq_s16(q2s16, q5s16); + q11s16 = vaddq_s16(q3s16, q4s16); + q12s16 = vsubq_s16(q3s16, q4s16); + q13s16 = vsubq_s16(q2s16, q5s16); + q14s16 = vsubq_s16(q1s16, q6s16); + q15s16 = vsubq_s16(q0s16, q15s16); + + d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16)); + d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16)); + d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); + d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); + d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16)); + d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16)); + d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16)); + d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16)); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); + d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); + d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); + d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); + + // store the data + output_stride >>= 1; // output_stride / 2, out is int16_t + vst1_u64((uint64_t *)out, d16u64); + out += output_stride; + vst1_u64((uint64_t *)out, d17u64); + out += output_stride; + vst1_u64((uint64_t *)out, d18u64); + out += output_stride; + vst1_u64((uint64_t *)out, d19u64); + out += output_stride; + vst1_u64((uint64_t *)out, d20u64); + out += output_stride; + vst1_u64((uint64_t *)out, d21u64); + out += output_stride; + vst1_u64((uint64_t *)out, d22u64); + out += output_stride; + vst1_u64((uint64_t *)out, d23u64); + out += output_stride; + vst1_u64((uint64_t *)out, d24u64); + out += output_stride; + vst1_u64((uint64_t *)out, d25u64); + out += output_stride; + vst1_u64((uint64_t *)out, d26u64); + out += output_stride; + vst1_u64((uint64_t *)out, d27u64); + out += output_stride; + vst1_u64((uint64_t *)out, d28u64); + out += output_stride; + vst1_u64((uint64_t *)out, d29u64); + out += output_stride; + vst1_u64((uint64_t *)out, d30u64); + out += output_stride; + vst1_u64((uint64_t *)out, d31u64); + return; } -void vpx_idct16x16_256_add_neon_pass1( - int16_t *in, - int16_t *out, - int output_stride) { - int16x4_t d0s16, d1s16, d2s16, d3s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - uint64x1_t d16u64, d17u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64; - uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4_t q0s32, q1s32, q2s32, q3s32, q5s32, q6s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; - int16x8x2_t q0x2s16; +void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, + int16_t *pass1Output, int16_t skip_adding, + uint8_t *dest, int dest_stride) { + uint8_t *d; + uint8x8_t d12u8, d13u8; + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; + uint64x1_t d24u64, d25u64, d26u64, d27u64; + int64x1_t d12s64, d13s64; + uint16x8_t q2u16, q3u16, q4u16, q5u16, q8u16; + uint16x8_t q9u16, q12u16, q13u16, q14u16, q15u16; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q13s32; + int16x8x2_t q0x2s16; - q0x2s16 = vld2q_s16(in); - q8s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q9s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q10s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q11s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q12s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q13s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q14s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q15s16 = q0x2s16.val[0]; + q0x2s16 = vld2q_s16(src); + q8s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q9s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q10s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q11s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q12s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q13s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q14s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q15s16 = q0x2s16.val[0]; - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - d30s16 = vget_low_s16(q15s16); - d31s16 = vget_high_s16(q15s16); + d16s16 = vget_low_s16(q8s16); + d17s16 = vget_high_s16(q8s16); + d18s16 = vget_low_s16(q9s16); + d19s16 = vget_high_s16(q9s16); + d20s16 = vget_low_s16(q10s16); + d21s16 = vget_high_s16(q10s16); + d22s16 = vget_low_s16(q11s16); + d23s16 = vget_high_s16(q11s16); + d24s16 = vget_low_s16(q12s16); + d25s16 = vget_high_s16(q12s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); + d30s16 = vget_low_s16(q15s16); + d31s16 = vget_high_s16(q15s16); - // stage 3 - d0s16 = vdup_n_s16(cospi_28_64); - d1s16 = vdup_n_s16(cospi_4_64); + // stage 3 + d12s16 = vdup_n_s16(cospi_30_64); + d13s16 = vdup_n_s16(cospi_2_64); - q2s32 = vmull_s16(d18s16, d0s16); - q3s32 = vmull_s16(d19s16, d0s16); - q5s32 = vmull_s16(d18s16, d1s16); - q6s32 = vmull_s16(d19s16, d1s16); + q2s32 = vmull_s16(d16s16, d12s16); + q3s32 = vmull_s16(d17s16, d12s16); + q1s32 = vmull_s16(d16s16, d13s16); + q4s32 = vmull_s16(d17s16, d13s16); - q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); - q5s32 = vmlal_s16(q5s32, d30s16, d0s16); - q6s32 = vmlal_s16(q6s32, d31s16, d0s16); + q2s32 = vmlsl_s16(q2s32, d30s16, d13s16); + q3s32 = vmlsl_s16(q3s32, d31s16, d13s16); + q1s32 = vmlal_s16(q1s32, d30s16, d12s16); + q4s32 = vmlal_s16(q4s32, d31s16, d12s16); - d2s16 = vdup_n_s16(cospi_12_64); - d3s16 = vdup_n_s16(cospi_20_64); + d0s16 = vqrshrn_n_s32(q2s32, 14); + d1s16 = vqrshrn_n_s32(q3s32, 14); + d14s16 = vqrshrn_n_s32(q1s32, 14); + d15s16 = vqrshrn_n_s32(q4s32, 14); + q0s16 = vcombine_s16(d0s16, d1s16); + q7s16 = vcombine_s16(d14s16, d15s16); - d8s16 = vqrshrn_n_s32(q2s32, 14); - d9s16 = vqrshrn_n_s32(q3s32, 14); - d14s16 = vqrshrn_n_s32(q5s32, 14); - d15s16 = vqrshrn_n_s32(q6s32, 14); - q4s16 = vcombine_s16(d8s16, d9s16); - q7s16 = vcombine_s16(d14s16, d15s16); + d30s16 = vdup_n_s16(cospi_14_64); + d31s16 = vdup_n_s16(cospi_18_64); - q2s32 = vmull_s16(d26s16, d2s16); - q3s32 = vmull_s16(d27s16, d2s16); - q9s32 = vmull_s16(d26s16, d3s16); - q15s32 = vmull_s16(d27s16, d3s16); + q2s32 = vmull_s16(d24s16, d30s16); + q3s32 = vmull_s16(d25s16, d30s16); + q4s32 = vmull_s16(d24s16, d31s16); + q5s32 = vmull_s16(d25s16, d31s16); - q2s32 = vmlsl_s16(q2s32, d22s16, d3s16); - q3s32 = vmlsl_s16(q3s32, d23s16, d3s16); - q9s32 = vmlal_s16(q9s32, d22s16, d2s16); - q15s32 = vmlal_s16(q15s32, d23s16, d2s16); + q2s32 = vmlsl_s16(q2s32, d22s16, d31s16); + q3s32 = vmlsl_s16(q3s32, d23s16, d31s16); + q4s32 = vmlal_s16(q4s32, d22s16, d30s16); + q5s32 = vmlal_s16(q5s32, d23s16, d30s16); - d10s16 = vqrshrn_n_s32(q2s32, 14); - d11s16 = vqrshrn_n_s32(q3s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q15s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); + d2s16 = vqrshrn_n_s32(q2s32, 14); + d3s16 = vqrshrn_n_s32(q3s32, 14); + d12s16 = vqrshrn_n_s32(q4s32, 14); + d13s16 = vqrshrn_n_s32(q5s32, 14); + q1s16 = vcombine_s16(d2s16, d3s16); + q6s16 = vcombine_s16(d12s16, d13s16); - // stage 4 - d30s16 = vdup_n_s16(cospi_16_64); + d30s16 = vdup_n_s16(cospi_22_64); + d31s16 = vdup_n_s16(cospi_10_64); - q2s32 = vmull_s16(d16s16, d30s16); - q11s32 = vmull_s16(d17s16, d30s16); - q0s32 = vmull_s16(d24s16, d30s16); - q1s32 = vmull_s16(d25s16, d30s16); + q11s32 = vmull_s16(d20s16, d30s16); + q12s32 = vmull_s16(d21s16, d30s16); + q4s32 = vmull_s16(d20s16, d31s16); + q5s32 = vmull_s16(d21s16, d31s16); - d30s16 = vdup_n_s16(cospi_24_64); - d31s16 = vdup_n_s16(cospi_8_64); + q11s32 = vmlsl_s16(q11s32, d26s16, d31s16); + q12s32 = vmlsl_s16(q12s32, d27s16, d31s16); + q4s32 = vmlal_s16(q4s32, d26s16, d30s16); + q5s32 = vmlal_s16(q5s32, d27s16, d30s16); - q3s32 = vaddq_s32(q2s32, q0s32); - q12s32 = vaddq_s32(q11s32, q1s32); - q13s32 = vsubq_s32(q2s32, q0s32); - q1s32 = vsubq_s32(q11s32, q1s32); + d4s16 = vqrshrn_n_s32(q11s32, 14); + d5s16 = vqrshrn_n_s32(q12s32, 14); + d11s16 = vqrshrn_n_s32(q5s32, 14); + d10s16 = vqrshrn_n_s32(q4s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + q5s16 = vcombine_s16(d10s16, d11s16); - d16s16 = vqrshrn_n_s32(q3s32, 14); - d17s16 = vqrshrn_n_s32(q12s32, 14); - d18s16 = vqrshrn_n_s32(q13s32, 14); - d19s16 = vqrshrn_n_s32(q1s32, 14); - q8s16 = vcombine_s16(d16s16, d17s16); - q9s16 = vcombine_s16(d18s16, d19s16); + d30s16 = vdup_n_s16(cospi_6_64); + d31s16 = vdup_n_s16(cospi_26_64); - q0s32 = vmull_s16(d20s16, d31s16); - q1s32 = vmull_s16(d21s16, d31s16); - q12s32 = vmull_s16(d20s16, d30s16); - q13s32 = vmull_s16(d21s16, d30s16); + q10s32 = vmull_s16(d28s16, d30s16); + q11s32 = vmull_s16(d29s16, d30s16); + q12s32 = vmull_s16(d28s16, d31s16); + q13s32 = vmull_s16(d29s16, d31s16); - q0s32 = vmlal_s16(q0s32, d28s16, d30s16); - q1s32 = vmlal_s16(q1s32, d29s16, d30s16); - q12s32 = vmlsl_s16(q12s32, d28s16, d31s16); - q13s32 = vmlsl_s16(q13s32, d29s16, d31s16); + q10s32 = vmlsl_s16(q10s32, d18s16, d31s16); + q11s32 = vmlsl_s16(q11s32, d19s16, d31s16); + q12s32 = vmlal_s16(q12s32, d18s16, d30s16); + q13s32 = vmlal_s16(q13s32, d19s16, d30s16); - d22s16 = vqrshrn_n_s32(q0s32, 14); - d23s16 = vqrshrn_n_s32(q1s32, 14); - d20s16 = vqrshrn_n_s32(q12s32, 14); - d21s16 = vqrshrn_n_s32(q13s32, 14); - q10s16 = vcombine_s16(d20s16, d21s16); - q11s16 = vcombine_s16(d22s16, d23s16); + d6s16 = vqrshrn_n_s32(q10s32, 14); + d7s16 = vqrshrn_n_s32(q11s32, 14); + d8s16 = vqrshrn_n_s32(q12s32, 14); + d9s16 = vqrshrn_n_s32(q13s32, 14); + q3s16 = vcombine_s16(d6s16, d7s16); + q4s16 = vcombine_s16(d8s16, d9s16); - q13s16 = vsubq_s16(q4s16, q5s16); - q4s16 = vaddq_s16(q4s16, q5s16); - q14s16 = vsubq_s16(q7s16, q6s16); - q15s16 = vaddq_s16(q6s16, q7s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); + // stage 3 + q9s16 = vsubq_s16(q0s16, q1s16); + q0s16 = vaddq_s16(q0s16, q1s16); + q10s16 = vsubq_s16(q3s16, q2s16); + q11s16 = vaddq_s16(q2s16, q3s16); + q12s16 = vaddq_s16(q4s16, q5s16); + q13s16 = vsubq_s16(q4s16, q5s16); + q14s16 = vsubq_s16(q7s16, q6s16); + q7s16 = vaddq_s16(q6s16, q7s16); - // stage 5 - q0s16 = vaddq_s16(q8s16, q11s16); - q1s16 = vaddq_s16(q9s16, q10s16); - q2s16 = vsubq_s16(q9s16, q10s16); - q3s16 = vsubq_s16(q8s16, q11s16); + // stage 4 + d18s16 = vget_low_s16(q9s16); + d19s16 = vget_high_s16(q9s16); + d20s16 = vget_low_s16(q10s16); + d21s16 = vget_high_s16(q10s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); - d16s16 = vdup_n_s16(cospi_16_64); + d30s16 = vdup_n_s16(cospi_8_64); + d31s16 = vdup_n_s16(cospi_24_64); - q11s32 = vmull_s16(d26s16, d16s16); - q12s32 = vmull_s16(d27s16, d16s16); - q9s32 = vmull_s16(d28s16, d16s16); - q10s32 = vmull_s16(d29s16, d16s16); + q2s32 = vmull_s16(d18s16, d31s16); + q3s32 = vmull_s16(d19s16, d31s16); + q4s32 = vmull_s16(d28s16, d31s16); + q5s32 = vmull_s16(d29s16, d31s16); - q6s32 = vsubq_s32(q9s32, q11s32); - q13s32 = vsubq_s32(q10s32, q12s32); - q9s32 = vaddq_s32(q9s32, q11s32); - q10s32 = vaddq_s32(q10s32, q12s32); + q2s32 = vmlal_s16(q2s32, d28s16, d30s16); + q3s32 = vmlal_s16(q3s32, d29s16, d30s16); + q4s32 = vmlsl_s16(q4s32, d18s16, d30s16); + q5s32 = vmlsl_s16(q5s32, d19s16, d30s16); - d10s16 = vqrshrn_n_s32(q6s32, 14); - d11s16 = vqrshrn_n_s32(q13s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q10s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); + d12s16 = vqrshrn_n_s32(q2s32, 14); + d13s16 = vqrshrn_n_s32(q3s32, 14); + d2s16 = vqrshrn_n_s32(q4s32, 14); + d3s16 = vqrshrn_n_s32(q5s32, 14); + q1s16 = vcombine_s16(d2s16, d3s16); + q6s16 = vcombine_s16(d12s16, d13s16); - // stage 6 - q8s16 = vaddq_s16(q0s16, q15s16); - q9s16 = vaddq_s16(q1s16, q6s16); - q10s16 = vaddq_s16(q2s16, q5s16); - q11s16 = vaddq_s16(q3s16, q4s16); - q12s16 = vsubq_s16(q3s16, q4s16); - q13s16 = vsubq_s16(q2s16, q5s16); - q14s16 = vsubq_s16(q1s16, q6s16); + q3s16 = q11s16; + q4s16 = q12s16; + + d30s16 = vdup_n_s16(-cospi_8_64); + q11s32 = vmull_s16(d26s16, d30s16); + q12s32 = vmull_s16(d27s16, d30s16); + q8s32 = vmull_s16(d20s16, d30s16); + q9s32 = vmull_s16(d21s16, d30s16); + + q11s32 = vmlsl_s16(q11s32, d20s16, d31s16); + q12s32 = vmlsl_s16(q12s32, d21s16, d31s16); + q8s32 = vmlal_s16(q8s32, d26s16, d31s16); + q9s32 = vmlal_s16(q9s32, d27s16, d31s16); + + d4s16 = vqrshrn_n_s32(q11s32, 14); + d5s16 = vqrshrn_n_s32(q12s32, 14); + d10s16 = vqrshrn_n_s32(q8s32, 14); + d11s16 = vqrshrn_n_s32(q9s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + // stage 5 + q8s16 = vaddq_s16(q0s16, q3s16); + q9s16 = vaddq_s16(q1s16, q2s16); + q10s16 = vsubq_s16(q1s16, q2s16); + q11s16 = vsubq_s16(q0s16, q3s16); + q12s16 = vsubq_s16(q7s16, q4s16); + q13s16 = vsubq_s16(q6s16, q5s16); + q14s16 = vaddq_s16(q6s16, q5s16); + q15s16 = vaddq_s16(q7s16, q4s16); + + // stage 6 + d20s16 = vget_low_s16(q10s16); + d21s16 = vget_high_s16(q10s16); + d22s16 = vget_low_s16(q11s16); + d23s16 = vget_high_s16(q11s16); + d24s16 = vget_low_s16(q12s16); + d25s16 = vget_high_s16(q12s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + + d14s16 = vdup_n_s16(cospi_16_64); + + q3s32 = vmull_s16(d26s16, d14s16); + q4s32 = vmull_s16(d27s16, d14s16); + q0s32 = vmull_s16(d20s16, d14s16); + q1s32 = vmull_s16(d21s16, d14s16); + + q5s32 = vsubq_s32(q3s32, q0s32); + q6s32 = vsubq_s32(q4s32, q1s32); + q10s32 = vaddq_s32(q3s32, q0s32); + q4s32 = vaddq_s32(q4s32, q1s32); + + d4s16 = vqrshrn_n_s32(q5s32, 14); + d5s16 = vqrshrn_n_s32(q6s32, 14); + d10s16 = vqrshrn_n_s32(q10s32, 14); + d11s16 = vqrshrn_n_s32(q4s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + q0s32 = vmull_s16(d22s16, d14s16); + q1s32 = vmull_s16(d23s16, d14s16); + q13s32 = vmull_s16(d24s16, d14s16); + q6s32 = vmull_s16(d25s16, d14s16); + + q10s32 = vsubq_s32(q13s32, q0s32); + q4s32 = vsubq_s32(q6s32, q1s32); + q13s32 = vaddq_s32(q13s32, q0s32); + q6s32 = vaddq_s32(q6s32, q1s32); + + d6s16 = vqrshrn_n_s32(q10s32, 14); + d7s16 = vqrshrn_n_s32(q4s32, 14); + d8s16 = vqrshrn_n_s32(q13s32, 14); + d9s16 = vqrshrn_n_s32(q6s32, 14); + q3s16 = vcombine_s16(d6s16, d7s16); + q4s16 = vcombine_s16(d8s16, d9s16); + + // stage 7 + if (skip_adding != 0) { + d = dest; + // load the data in pass1 + q0s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q1s16 = vld1q_s16(pass1Output); + pass1Output += 8; + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + d13s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + + q12s16 = vaddq_s16(q0s16, q15s16); + q13s16 = vaddq_s16(q1s16, q14s16); + q12s16 = vrshrq_n_s16(q12s16, 6); + q13s16 = vrshrq_n_s16(q13s16, 6); + q12u16 = + vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64)); + q13u16 = + vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); + d += dest_stride; + q14s16 = vsubq_s16(q1s16, q14s16); q15s16 = vsubq_s16(q0s16, q15s16); - d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16)); - d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16)); - d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); - d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); - d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16)); - d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16)); - d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16)); - d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16)); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); - d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); - d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); - d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); - - // store the data - output_stride >>= 1; // output_stride / 2, out is int16_t - vst1_u64((uint64_t *)out, d16u64); - out += output_stride; - vst1_u64((uint64_t *)out, d17u64); - out += output_stride; - vst1_u64((uint64_t *)out, d18u64); - out += output_stride; - vst1_u64((uint64_t *)out, d19u64); - out += output_stride; - vst1_u64((uint64_t *)out, d20u64); - out += output_stride; - vst1_u64((uint64_t *)out, d21u64); - out += output_stride; - vst1_u64((uint64_t *)out, d22u64); - out += output_stride; - vst1_u64((uint64_t *)out, d23u64); - out += output_stride; - vst1_u64((uint64_t *)out, d24u64); - out += output_stride; - vst1_u64((uint64_t *)out, d25u64); - out += output_stride; - vst1_u64((uint64_t *)out, d26u64); - out += output_stride; - vst1_u64((uint64_t *)out, d27u64); - out += output_stride; - vst1_u64((uint64_t *)out, d28u64); - out += output_stride; - vst1_u64((uint64_t *)out, d29u64); - out += output_stride; - vst1_u64((uint64_t *)out, d30u64); - out += output_stride; - vst1_u64((uint64_t *)out, d31u64); - return; -} - -void vpx_idct16x16_256_add_neon_pass2( - int16_t *src, - int16_t *out, - int16_t *pass1Output, - int16_t skip_adding, - uint8_t *dest, - int dest_stride) { - uint8_t *d; - uint8x8_t d12u8, d13u8; - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - uint64x1_t d24u64, d25u64, d26u64, d27u64; - int64x1_t d12s64, d13s64; - uint16x8_t q2u16, q3u16, q4u16, q5u16, q8u16; - uint16x8_t q9u16, q12u16, q13u16, q14u16, q15u16; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32; - int16x8x2_t q0x2s16; - - q0x2s16 = vld2q_s16(src); - q8s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q9s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q10s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q11s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q12s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q13s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q14s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q15s16 = q0x2s16.val[0]; - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - d30s16 = vget_low_s16(q15s16); - d31s16 = vget_high_s16(q15s16); - - // stage 3 - d12s16 = vdup_n_s16(cospi_30_64); - d13s16 = vdup_n_s16(cospi_2_64); - - q2s32 = vmull_s16(d16s16, d12s16); - q3s32 = vmull_s16(d17s16, d12s16); - q1s32 = vmull_s16(d16s16, d13s16); - q4s32 = vmull_s16(d17s16, d13s16); - - q2s32 = vmlsl_s16(q2s32, d30s16, d13s16); - q3s32 = vmlsl_s16(q3s32, d31s16, d13s16); - q1s32 = vmlal_s16(q1s32, d30s16, d12s16); - q4s32 = vmlal_s16(q4s32, d31s16, d12s16); - - d0s16 = vqrshrn_n_s32(q2s32, 14); - d1s16 = vqrshrn_n_s32(q3s32, 14); - d14s16 = vqrshrn_n_s32(q1s32, 14); - d15s16 = vqrshrn_n_s32(q4s32, 14); - q0s16 = vcombine_s16(d0s16, d1s16); - q7s16 = vcombine_s16(d14s16, d15s16); - - d30s16 = vdup_n_s16(cospi_14_64); - d31s16 = vdup_n_s16(cospi_18_64); - - q2s32 = vmull_s16(d24s16, d30s16); - q3s32 = vmull_s16(d25s16, d30s16); - q4s32 = vmull_s16(d24s16, d31s16); - q5s32 = vmull_s16(d25s16, d31s16); - - q2s32 = vmlsl_s16(q2s32, d22s16, d31s16); - q3s32 = vmlsl_s16(q3s32, d23s16, d31s16); - q4s32 = vmlal_s16(q4s32, d22s16, d30s16); - q5s32 = vmlal_s16(q5s32, d23s16, d30s16); - - d2s16 = vqrshrn_n_s32(q2s32, 14); - d3s16 = vqrshrn_n_s32(q3s32, 14); - d12s16 = vqrshrn_n_s32(q4s32, 14); - d13s16 = vqrshrn_n_s32(q5s32, 14); - q1s16 = vcombine_s16(d2s16, d3s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - d30s16 = vdup_n_s16(cospi_22_64); - d31s16 = vdup_n_s16(cospi_10_64); - - q11s32 = vmull_s16(d20s16, d30s16); - q12s32 = vmull_s16(d21s16, d30s16); - q4s32 = vmull_s16(d20s16, d31s16); - q5s32 = vmull_s16(d21s16, d31s16); - - q11s32 = vmlsl_s16(q11s32, d26s16, d31s16); - q12s32 = vmlsl_s16(q12s32, d27s16, d31s16); - q4s32 = vmlal_s16(q4s32, d26s16, d30s16); - q5s32 = vmlal_s16(q5s32, d27s16, d30s16); - - d4s16 = vqrshrn_n_s32(q11s32, 14); - d5s16 = vqrshrn_n_s32(q12s32, 14); - d11s16 = vqrshrn_n_s32(q5s32, 14); - d10s16 = vqrshrn_n_s32(q4s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - d30s16 = vdup_n_s16(cospi_6_64); - d31s16 = vdup_n_s16(cospi_26_64); - - q10s32 = vmull_s16(d28s16, d30s16); - q11s32 = vmull_s16(d29s16, d30s16); - q12s32 = vmull_s16(d28s16, d31s16); - q13s32 = vmull_s16(d29s16, d31s16); - - q10s32 = vmlsl_s16(q10s32, d18s16, d31s16); - q11s32 = vmlsl_s16(q11s32, d19s16, d31s16); - q12s32 = vmlal_s16(q12s32, d18s16, d30s16); - q13s32 = vmlal_s16(q13s32, d19s16, d30s16); - - d6s16 = vqrshrn_n_s32(q10s32, 14); - d7s16 = vqrshrn_n_s32(q11s32, 14); - d8s16 = vqrshrn_n_s32(q12s32, 14); - d9s16 = vqrshrn_n_s32(q13s32, 14); - q3s16 = vcombine_s16(d6s16, d7s16); - q4s16 = vcombine_s16(d8s16, d9s16); - - // stage 3 - q9s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q10s16 = vsubq_s16(q3s16, q2s16); - q11s16 = vaddq_s16(q2s16, q3s16); - q12s16 = vaddq_s16(q4s16, q5s16); - q13s16 = vsubq_s16(q4s16, q5s16); - q14s16 = vsubq_s16(q7s16, q6s16); - q7s16 = vaddq_s16(q6s16, q7s16); - - // stage 4 - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - - d30s16 = vdup_n_s16(cospi_8_64); - d31s16 = vdup_n_s16(cospi_24_64); - - q2s32 = vmull_s16(d18s16, d31s16); - q3s32 = vmull_s16(d19s16, d31s16); - q4s32 = vmull_s16(d28s16, d31s16); - q5s32 = vmull_s16(d29s16, d31s16); - - q2s32 = vmlal_s16(q2s32, d28s16, d30s16); - q3s32 = vmlal_s16(q3s32, d29s16, d30s16); - q4s32 = vmlsl_s16(q4s32, d18s16, d30s16); - q5s32 = vmlsl_s16(q5s32, d19s16, d30s16); - - d12s16 = vqrshrn_n_s32(q2s32, 14); - d13s16 = vqrshrn_n_s32(q3s32, 14); - d2s16 = vqrshrn_n_s32(q4s32, 14); - d3s16 = vqrshrn_n_s32(q5s32, 14); - q1s16 = vcombine_s16(d2s16, d3s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - q3s16 = q11s16; - q4s16 = q12s16; - - d30s16 = vdup_n_s16(-cospi_8_64); - q11s32 = vmull_s16(d26s16, d30s16); - q12s32 = vmull_s16(d27s16, d30s16); - q8s32 = vmull_s16(d20s16, d30s16); - q9s32 = vmull_s16(d21s16, d30s16); - - q11s32 = vmlsl_s16(q11s32, d20s16, d31s16); - q12s32 = vmlsl_s16(q12s32, d21s16, d31s16); - q8s32 = vmlal_s16(q8s32, d26s16, d31s16); - q9s32 = vmlal_s16(q9s32, d27s16, d31s16); - - d4s16 = vqrshrn_n_s32(q11s32, 14); - d5s16 = vqrshrn_n_s32(q12s32, 14); - d10s16 = vqrshrn_n_s32(q8s32, 14); - d11s16 = vqrshrn_n_s32(q9s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - // stage 5 - q8s16 = vaddq_s16(q0s16, q3s16); - q9s16 = vaddq_s16(q1s16, q2s16); - q10s16 = vsubq_s16(q1s16, q2s16); - q11s16 = vsubq_s16(q0s16, q3s16); - q12s16 = vsubq_s16(q7s16, q4s16); - q13s16 = vsubq_s16(q6s16, q5s16); - q14s16 = vaddq_s16(q6s16, q5s16); - q15s16 = vaddq_s16(q7s16, q4s16); - - // stage 6 - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - - d14s16 = vdup_n_s16(cospi_16_64); - - q3s32 = vmull_s16(d26s16, d14s16); - q4s32 = vmull_s16(d27s16, d14s16); - q0s32 = vmull_s16(d20s16, d14s16); - q1s32 = vmull_s16(d21s16, d14s16); - - q5s32 = vsubq_s32(q3s32, q0s32); - q6s32 = vsubq_s32(q4s32, q1s32); - q10s32 = vaddq_s32(q3s32, q0s32); - q4s32 = vaddq_s32(q4s32, q1s32); - - d4s16 = vqrshrn_n_s32(q5s32, 14); - d5s16 = vqrshrn_n_s32(q6s32, 14); - d10s16 = vqrshrn_n_s32(q10s32, 14); - d11s16 = vqrshrn_n_s32(q4s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - q0s32 = vmull_s16(d22s16, d14s16); - q1s32 = vmull_s16(d23s16, d14s16); - q13s32 = vmull_s16(d24s16, d14s16); - q6s32 = vmull_s16(d25s16, d14s16); - - q10s32 = vsubq_s32(q13s32, q0s32); - q4s32 = vsubq_s32(q6s32, q1s32); - q13s32 = vaddq_s32(q13s32, q0s32); - q6s32 = vaddq_s32(q6s32, q1s32); - - d6s16 = vqrshrn_n_s32(q10s32, 14); - d7s16 = vqrshrn_n_s32(q4s32, 14); - d8s16 = vqrshrn_n_s32(q13s32, 14); - d9s16 = vqrshrn_n_s32(q6s32, 14); - q3s16 = vcombine_s16(d6s16, d7s16); - q4s16 = vcombine_s16(d8s16, d9s16); - - // stage 7 - if (skip_adding != 0) { - d = dest; - // load the data in pass1 - q0s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q1s16 = vld1q_s16(pass1Output); - pass1Output += 8; - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - d13s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - - q12s16 = vaddq_s16(q0s16, q15s16); - q13s16 = vaddq_s16(q1s16, q14s16); - q12s16 = vrshrq_n_s16(q12s16, 6); - q13s16 = vrshrq_n_s16(q13s16, 6); - q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16), - vreinterpret_u8_s64(d12s64)); - q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16), - vreinterpret_u8_s64(d13s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); - d += dest_stride; - q14s16 = vsubq_s16(q1s16, q14s16); - q15s16 = vsubq_s16(q0s16, q15s16); - - q10s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q11s16 = vld1q_s16(pass1Output); - pass1Output += 8; - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - d13s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q12s16 = vaddq_s16(q10s16, q5s16); - q13s16 = vaddq_s16(q11s16, q4s16); - q12s16 = vrshrq_n_s16(q12s16, 6); - q13s16 = vrshrq_n_s16(q13s16, 6); - q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16), - vreinterpret_u8_s64(d12s64)); - q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16), - vreinterpret_u8_s64(d13s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); - d += dest_stride; - q4s16 = vsubq_s16(q11s16, q4s16); - q5s16 = vsubq_s16(q10s16, q5s16); - - q0s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q1s16 = vld1q_s16(pass1Output); - pass1Output += 8; - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - d13s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q12s16 = vaddq_s16(q0s16, q3s16); - q13s16 = vaddq_s16(q1s16, q2s16); - q12s16 = vrshrq_n_s16(q12s16, 6); - q13s16 = vrshrq_n_s16(q13s16, 6); - q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16), - vreinterpret_u8_s64(d12s64)); - q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16), - vreinterpret_u8_s64(d13s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); - d += dest_stride; - q2s16 = vsubq_s16(q1s16, q2s16); - q3s16 = vsubq_s16(q0s16, q3s16); - - q10s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q11s16 = vld1q_s16(pass1Output); - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - d13s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q12s16 = vaddq_s16(q10s16, q9s16); - q13s16 = vaddq_s16(q11s16, q8s16); - q12s16 = vrshrq_n_s16(q12s16, 6); - q13s16 = vrshrq_n_s16(q13s16, 6); - q12u16 = vaddw_u8(vreinterpretq_u16_s16(q12s16), - vreinterpret_u8_s64(d12s64)); - q13u16 = vaddw_u8(vreinterpretq_u16_s16(q13s16), - vreinterpret_u8_s64(d13s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); - d += dest_stride; - q8s16 = vsubq_s16(q11s16, q8s16); - q9s16 = vsubq_s16(q10s16, q9s16); - - // store the data out 8,9,10,11,12,13,14,15 - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q8s16 = vrshrq_n_s16(q8s16, 6); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q9s16 = vrshrq_n_s16(q9s16, 6); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q2s16 = vrshrq_n_s16(q2s16, 6); - q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q3s16 = vrshrq_n_s16(q3s16, 6); - q3u16 = vaddw_u8(vreinterpretq_u16_s16(q3s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q3u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q4s16 = vrshrq_n_s16(q4s16, 6); - q4u16 = vaddw_u8(vreinterpretq_u16_s16(q4s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q4u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q5s16 = vrshrq_n_s16(q5s16, 6); - q5u16 = vaddw_u8(vreinterpretq_u16_s16(q5s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q5u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - dest += dest_stride; - q14s16 = vrshrq_n_s16(q14s16, 6); - q14u16 = vaddw_u8(vreinterpretq_u16_s16(q14s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q14u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - d += dest_stride; - - d12s64 = vld1_s64((int64_t *)dest); - q15s16 = vrshrq_n_s16(q15s16, 6); - q15u16 = vaddw_u8(vreinterpretq_u16_s16(q15s16), - vreinterpret_u8_s64(d12s64)); - d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q15u16)); - vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); - } else { // skip_adding_dest - q0s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q1s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q12s16 = vaddq_s16(q0s16, q15s16); - q13s16 = vaddq_s16(q1s16, q14s16); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - vst1_u64((uint64_t *)out, d24u64); - out += 4; - vst1_u64((uint64_t *)out, d25u64); - out += 12; - vst1_u64((uint64_t *)out, d26u64); - out += 4; - vst1_u64((uint64_t *)out, d27u64); - out += 12; - q14s16 = vsubq_s16(q1s16, q14s16); - q15s16 = vsubq_s16(q0s16, q15s16); - - q10s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q11s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q12s16 = vaddq_s16(q10s16, q5s16); - q13s16 = vaddq_s16(q11s16, q4s16); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - vst1_u64((uint64_t *)out, d24u64); - out += 4; - vst1_u64((uint64_t *)out, d25u64); - out += 12; - vst1_u64((uint64_t *)out, d26u64); - out += 4; - vst1_u64((uint64_t *)out, d27u64); - out += 12; - q4s16 = vsubq_s16(q11s16, q4s16); - q5s16 = vsubq_s16(q10s16, q5s16); - - q0s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q1s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q12s16 = vaddq_s16(q0s16, q3s16); - q13s16 = vaddq_s16(q1s16, q2s16); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - vst1_u64((uint64_t *)out, d24u64); - out += 4; - vst1_u64((uint64_t *)out, d25u64); - out += 12; - vst1_u64((uint64_t *)out, d26u64); - out += 4; - vst1_u64((uint64_t *)out, d27u64); - out += 12; - q2s16 = vsubq_s16(q1s16, q2s16); - q3s16 = vsubq_s16(q0s16, q3s16); - - q10s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q11s16 = vld1q_s16(pass1Output); - pass1Output += 8; - q12s16 = vaddq_s16(q10s16, q9s16); - q13s16 = vaddq_s16(q11s16, q8s16); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - vst1_u64((uint64_t *)out, d24u64); - out += 4; - vst1_u64((uint64_t *)out, d25u64); - out += 12; - vst1_u64((uint64_t *)out, d26u64); - out += 4; - vst1_u64((uint64_t *)out, d27u64); - out += 12; - q8s16 = vsubq_s16(q11s16, q8s16); - q9s16 = vsubq_s16(q10s16, q9s16); - - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q8s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q8s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q9s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q9s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q2s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q2s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q3s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q3s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q4s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q4s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q5s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q5s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q14s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q14s16))); - out += 12; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q15s16))); - out += 4; - vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q15s16))); - } - return; -} - -void vpx_idct16x16_10_add_neon_pass1( - int16_t *in, - int16_t *out, - int output_stride) { - int16x4_t d4s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64; - uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; - int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4_t q6s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q15s32; - int16x8x2_t q0x2s16; - - q0x2s16 = vld2q_s16(in); - q8s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q9s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q10s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q11s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q12s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q13s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q14s16 = q0x2s16.val[0]; - in += 16; - q0x2s16 = vld2q_s16(in); - q15s16 = q0x2s16.val[0]; - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // stage 3 - q0s16 = vdupq_n_s16(cospi_28_64 * 2); - q1s16 = vdupq_n_s16(cospi_4_64 * 2); - - q4s16 = vqrdmulhq_s16(q9s16, q0s16); - q7s16 = vqrdmulhq_s16(q9s16, q1s16); - - // stage 4 - q1s16 = vdupq_n_s16(cospi_16_64 * 2); - d4s16 = vdup_n_s16(cospi_16_64); - - q8s16 = vqrdmulhq_s16(q8s16, q1s16); - - d8s16 = vget_low_s16(q4s16); - d9s16 = vget_high_s16(q4s16); - d14s16 = vget_low_s16(q7s16); - d15s16 = vget_high_s16(q7s16); - q9s32 = vmull_s16(d14s16, d4s16); - q10s32 = vmull_s16(d15s16, d4s16); - q12s32 = vmull_s16(d9s16, d4s16); - q11s32 = vmull_s16(d8s16, d4s16); - - q15s32 = vsubq_s32(q10s32, q12s32); - q6s32 = vsubq_s32(q9s32, q11s32); - q9s32 = vaddq_s32(q9s32, q11s32); - q10s32 = vaddq_s32(q10s32, q12s32); - - d11s16 = vqrshrn_n_s32(q15s32, 14); - d10s16 = vqrshrn_n_s32(q6s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q10s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - // stage 6 - q2s16 = vaddq_s16(q8s16, q7s16); - q9s16 = vaddq_s16(q8s16, q6s16); - q10s16 = vaddq_s16(q8s16, q5s16); - q11s16 = vaddq_s16(q8s16, q4s16); - q12s16 = vsubq_s16(q8s16, q4s16); - q13s16 = vsubq_s16(q8s16, q5s16); - q14s16 = vsubq_s16(q8s16, q6s16); - q15s16 = vsubq_s16(q8s16, q7s16); - - d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16)); - d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16)); - d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); - d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); - d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16)); - d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16)); - d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16)); - d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16)); - d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); - d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); - d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); - d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); - d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); - d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); - d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); - d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); - - // store the data - output_stride >>= 1; // output_stride / 2, out is int16_t - vst1_u64((uint64_t *)out, d4u64); - out += output_stride; - vst1_u64((uint64_t *)out, d5u64); - out += output_stride; - vst1_u64((uint64_t *)out, d18u64); - out += output_stride; - vst1_u64((uint64_t *)out, d19u64); - out += output_stride; - vst1_u64((uint64_t *)out, d20u64); - out += output_stride; - vst1_u64((uint64_t *)out, d21u64); - out += output_stride; - vst1_u64((uint64_t *)out, d22u64); - out += output_stride; - vst1_u64((uint64_t *)out, d23u64); - out += output_stride; - vst1_u64((uint64_t *)out, d24u64); - out += output_stride; - vst1_u64((uint64_t *)out, d25u64); - out += output_stride; - vst1_u64((uint64_t *)out, d26u64); - out += output_stride; - vst1_u64((uint64_t *)out, d27u64); - out += output_stride; - vst1_u64((uint64_t *)out, d28u64); - out += output_stride; - vst1_u64((uint64_t *)out, d29u64); - out += output_stride; - vst1_u64((uint64_t *)out, d30u64); - out += output_stride; - vst1_u64((uint64_t *)out, d31u64); - return; -} - -void vpx_idct16x16_10_add_neon_pass2( - int16_t *src, - int16_t *out, - int16_t *pass1Output, - int16_t skip_adding, - uint8_t *dest, - int dest_stride) { - int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d30s16, d31s16; - uint64x1_t d4u64, d5u64, d6u64, d7u64, d8u64, d9u64, d10u64, d11u64; - uint64x1_t d16u64, d17u64, d18u64, d19u64; - uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32; - int16x8x2_t q0x2s16; - (void)skip_adding; - (void)dest; - (void)dest_stride; - - q0x2s16 = vld2q_s16(src); - q8s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q9s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q10s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q11s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q12s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q13s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q14s16 = q0x2s16.val[0]; - src += 16; - q0x2s16 = vld2q_s16(src); - q15s16 = q0x2s16.val[0]; - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // stage 3 - q6s16 = vdupq_n_s16(cospi_30_64 * 2); - q0s16 = vqrdmulhq_s16(q8s16, q6s16); - q6s16 = vdupq_n_s16(cospi_2_64 * 2); - q7s16 = vqrdmulhq_s16(q8s16, q6s16); - - q15s16 = vdupq_n_s16(-cospi_26_64 * 2); - q14s16 = vdupq_n_s16(cospi_6_64 * 2); - q3s16 = vqrdmulhq_s16(q9s16, q15s16); - q4s16 = vqrdmulhq_s16(q9s16, q14s16); - - // stage 4 - d0s16 = vget_low_s16(q0s16); - d1s16 = vget_high_s16(q0s16); - d6s16 = vget_low_s16(q3s16); - d7s16 = vget_high_s16(q3s16); - d8s16 = vget_low_s16(q4s16); - d9s16 = vget_high_s16(q4s16); - d14s16 = vget_low_s16(q7s16); - d15s16 = vget_high_s16(q7s16); - - d30s16 = vdup_n_s16(cospi_8_64); - d31s16 = vdup_n_s16(cospi_24_64); - - q12s32 = vmull_s16(d14s16, d31s16); - q5s32 = vmull_s16(d15s16, d31s16); - q2s32 = vmull_s16(d0s16, d31s16); - q11s32 = vmull_s16(d1s16, d31s16); - - q12s32 = vmlsl_s16(q12s32, d0s16, d30s16); - q5s32 = vmlsl_s16(q5s32, d1s16, d30s16); - q2s32 = vmlal_s16(q2s32, d14s16, d30s16); - q11s32 = vmlal_s16(q11s32, d15s16, d30s16); - - d2s16 = vqrshrn_n_s32(q12s32, 14); - d3s16 = vqrshrn_n_s32(q5s32, 14); - d12s16 = vqrshrn_n_s32(q2s32, 14); - d13s16 = vqrshrn_n_s32(q11s32, 14); - q1s16 = vcombine_s16(d2s16, d3s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - d30s16 = vdup_n_s16(-cospi_8_64); - q10s32 = vmull_s16(d8s16, d30s16); - q13s32 = vmull_s16(d9s16, d30s16); - q8s32 = vmull_s16(d6s16, d30s16); - q9s32 = vmull_s16(d7s16, d30s16); - - q10s32 = vmlsl_s16(q10s32, d6s16, d31s16); - q13s32 = vmlsl_s16(q13s32, d7s16, d31s16); - q8s32 = vmlal_s16(q8s32, d8s16, d31s16); - q9s32 = vmlal_s16(q9s32, d9s16, d31s16); - - d4s16 = vqrshrn_n_s32(q10s32, 14); - d5s16 = vqrshrn_n_s32(q13s32, 14); - d10s16 = vqrshrn_n_s32(q8s32, 14); - d11s16 = vqrshrn_n_s32(q9s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - // stage 5 - q8s16 = vaddq_s16(q0s16, q3s16); - q9s16 = vaddq_s16(q1s16, q2s16); - q10s16 = vsubq_s16(q1s16, q2s16); - q11s16 = vsubq_s16(q0s16, q3s16); - q12s16 = vsubq_s16(q7s16, q4s16); - q13s16 = vsubq_s16(q6s16, q5s16); - q14s16 = vaddq_s16(q6s16, q5s16); - q15s16 = vaddq_s16(q7s16, q4s16); - - // stage 6 - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - - d14s16 = vdup_n_s16(cospi_16_64); - q3s32 = vmull_s16(d26s16, d14s16); - q4s32 = vmull_s16(d27s16, d14s16); - q0s32 = vmull_s16(d20s16, d14s16); - q1s32 = vmull_s16(d21s16, d14s16); - - q5s32 = vsubq_s32(q3s32, q0s32); - q6s32 = vsubq_s32(q4s32, q1s32); - q0s32 = vaddq_s32(q3s32, q0s32); - q4s32 = vaddq_s32(q4s32, q1s32); - - d4s16 = vqrshrn_n_s32(q5s32, 14); - d5s16 = vqrshrn_n_s32(q6s32, 14); - d10s16 = vqrshrn_n_s32(q0s32, 14); - d11s16 = vqrshrn_n_s32(q4s32, 14); - q2s16 = vcombine_s16(d4s16, d5s16); - q5s16 = vcombine_s16(d10s16, d11s16); - - q0s32 = vmull_s16(d22s16, d14s16); - q1s32 = vmull_s16(d23s16, d14s16); - q13s32 = vmull_s16(d24s16, d14s16); - q6s32 = vmull_s16(d25s16, d14s16); - - q10s32 = vsubq_s32(q13s32, q0s32); - q4s32 = vsubq_s32(q6s32, q1s32); - q13s32 = vaddq_s32(q13s32, q0s32); - q6s32 = vaddq_s32(q6s32, q1s32); - - d6s16 = vqrshrn_n_s32(q10s32, 14); - d7s16 = vqrshrn_n_s32(q4s32, 14); - d8s16 = vqrshrn_n_s32(q13s32, 14); - d9s16 = vqrshrn_n_s32(q6s32, 14); - q3s16 = vcombine_s16(d6s16, d7s16); - q4s16 = vcombine_s16(d8s16, d9s16); - - // stage 7 + q10s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q11s16 = vld1q_s16(pass1Output); + pass1Output += 8; + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + d13s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q12s16 = vaddq_s16(q10s16, q5s16); + q13s16 = vaddq_s16(q11s16, q4s16); + q12s16 = vrshrq_n_s16(q12s16, 6); + q13s16 = vrshrq_n_s16(q13s16, 6); + q12u16 = + vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64)); + q13u16 = + vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); + d += dest_stride; + q4s16 = vsubq_s16(q11s16, q4s16); + q5s16 = vsubq_s16(q10s16, q5s16); + + q0s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q1s16 = vld1q_s16(pass1Output); + pass1Output += 8; + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + d13s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q12s16 = vaddq_s16(q0s16, q3s16); + q13s16 = vaddq_s16(q1s16, q2s16); + q12s16 = vrshrq_n_s16(q12s16, 6); + q13s16 = vrshrq_n_s16(q13s16, 6); + q12u16 = + vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64)); + q13u16 = + vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); + d += dest_stride; + q2s16 = vsubq_s16(q1s16, q2s16); + q3s16 = vsubq_s16(q0s16, q3s16); + + q10s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q11s16 = vld1q_s16(pass1Output); + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + d13s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q12s16 = vaddq_s16(q10s16, q9s16); + q13s16 = vaddq_s16(q11s16, q8s16); + q12s16 = vrshrq_n_s16(q12s16, 6); + q13s16 = vrshrq_n_s16(q13s16, 6); + q12u16 = + vaddw_u8(vreinterpretq_u16_s16(q12s16), vreinterpret_u8_s64(d12s64)); + q13u16 = + vaddw_u8(vreinterpretq_u16_s16(q13s16), vreinterpret_u8_s64(d13s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d13u8 = vqmovun_s16(vreinterpretq_s16_u16(q13u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d13u8)); + d += dest_stride; + q8s16 = vsubq_s16(q11s16, q8s16); + q9s16 = vsubq_s16(q10s16, q9s16); + + // store the data out 8,9,10,11,12,13,14,15 + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q8s16 = vrshrq_n_s16(q8s16, 6); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q9s16 = vrshrq_n_s16(q9s16, 6); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q2s16 = vrshrq_n_s16(q2s16, 6); + q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q3s16 = vrshrq_n_s16(q3s16, 6); + q3u16 = vaddw_u8(vreinterpretq_u16_s16(q3s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q3u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q4s16 = vrshrq_n_s16(q4s16, 6); + q4u16 = vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q4u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q5s16 = vrshrq_n_s16(q5s16, 6); + q5u16 = vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q5u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + dest += dest_stride; + q14s16 = vrshrq_n_s16(q14s16, 6); + q14u16 = + vaddw_u8(vreinterpretq_u16_s16(q14s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q14u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + d += dest_stride; + + d12s64 = vld1_s64((int64_t *)dest); + q15s16 = vrshrq_n_s16(q15s16, 6); + q15u16 = + vaddw_u8(vreinterpretq_u16_s16(q15s16), vreinterpret_u8_s64(d12s64)); + d12u8 = vqmovun_s16(vreinterpretq_s16_u16(q15u16)); + vst1_u64((uint64_t *)d, vreinterpret_u64_u8(d12u8)); + } else { // skip_adding_dest q0s16 = vld1q_s16(pass1Output); pass1Output += 8; q1s16 = vld1q_s16(pass1Output); @@ -1248,6 +746,7 @@ void vpx_idct16x16_10_add_neon_pass2( q10s16 = vld1q_s16(pass1Output); pass1Output += 8; q11s16 = vld1q_s16(pass1Output); + pass1Output += 8; q12s16 = vaddq_s16(q10s16, q9s16); q13s16 = vaddq_s16(q11s16, q8s16); d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); @@ -1265,53 +764,468 @@ void vpx_idct16x16_10_add_neon_pass2( q8s16 = vsubq_s16(q11s16, q8s16); q9s16 = vsubq_s16(q10s16, q9s16); - d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16)); - d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16)); - d6u64 = vreinterpret_u64_s16(vget_low_s16(q3s16)); - d7u64 = vreinterpret_u64_s16(vget_high_s16(q3s16)); - d8u64 = vreinterpret_u64_s16(vget_low_s16(q4s16)); - d9u64 = vreinterpret_u64_s16(vget_high_s16(q4s16)); - d10u64 = vreinterpret_u64_s16(vget_low_s16(q5s16)); - d11u64 = vreinterpret_u64_s16(vget_high_s16(q5s16)); - d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16)); - d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16)); - d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); - d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); - d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); - d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); - d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); - d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); - - vst1_u64((uint64_t *)out, d16u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q8s16))); out += 4; - vst1_u64((uint64_t *)out, d17u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q8s16))); out += 12; - vst1_u64((uint64_t *)out, d18u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q9s16))); out += 4; - vst1_u64((uint64_t *)out, d19u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q9s16))); out += 12; - vst1_u64((uint64_t *)out, d4u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q2s16))); out += 4; - vst1_u64((uint64_t *)out, d5u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q2s16))); out += 12; - vst1_u64((uint64_t *)out, d6u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q3s16))); out += 4; - vst1_u64((uint64_t *)out, d7u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q3s16))); out += 12; - vst1_u64((uint64_t *)out, d8u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q4s16))); out += 4; - vst1_u64((uint64_t *)out, d9u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q4s16))); out += 12; - vst1_u64((uint64_t *)out, d10u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q5s16))); out += 4; - vst1_u64((uint64_t *)out, d11u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q5s16))); out += 12; - vst1_u64((uint64_t *)out, d28u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q14s16))); out += 4; - vst1_u64((uint64_t *)out, d29u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q14s16))); out += 12; - vst1_u64((uint64_t *)out, d30u64); + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_low_s16(q15s16))); out += 4; - vst1_u64((uint64_t *)out, d31u64); - return; + vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q15s16))); + } + return; +} + +void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out, + int output_stride) { + int16x4_t d4s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + uint64x1_t d4u64, d5u64, d18u64, d19u64, d20u64, d21u64, d22u64, d23u64; + uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; + int16x8_t q0s16, q1s16, q2s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int32x4_t q6s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q15s32; + int16x8x2_t q0x2s16; + + q0x2s16 = vld2q_s16(in); + q8s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q9s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q10s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q11s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q12s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q13s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q14s16 = q0x2s16.val[0]; + in += 16; + q0x2s16 = vld2q_s16(in); + q15s16 = q0x2s16.val[0]; + + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + + // stage 3 + q0s16 = vdupq_n_s16(cospi_28_64 * 2); + q1s16 = vdupq_n_s16(cospi_4_64 * 2); + + q4s16 = vqrdmulhq_s16(q9s16, q0s16); + q7s16 = vqrdmulhq_s16(q9s16, q1s16); + + // stage 4 + q1s16 = vdupq_n_s16(cospi_16_64 * 2); + d4s16 = vdup_n_s16(cospi_16_64); + + q8s16 = vqrdmulhq_s16(q8s16, q1s16); + + d8s16 = vget_low_s16(q4s16); + d9s16 = vget_high_s16(q4s16); + d14s16 = vget_low_s16(q7s16); + d15s16 = vget_high_s16(q7s16); + q9s32 = vmull_s16(d14s16, d4s16); + q10s32 = vmull_s16(d15s16, d4s16); + q12s32 = vmull_s16(d9s16, d4s16); + q11s32 = vmull_s16(d8s16, d4s16); + + q15s32 = vsubq_s32(q10s32, q12s32); + q6s32 = vsubq_s32(q9s32, q11s32); + q9s32 = vaddq_s32(q9s32, q11s32); + q10s32 = vaddq_s32(q10s32, q12s32); + + d11s16 = vqrshrn_n_s32(q15s32, 14); + d10s16 = vqrshrn_n_s32(q6s32, 14); + d12s16 = vqrshrn_n_s32(q9s32, 14); + d13s16 = vqrshrn_n_s32(q10s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + // stage 6 + q2s16 = vaddq_s16(q8s16, q7s16); + q9s16 = vaddq_s16(q8s16, q6s16); + q10s16 = vaddq_s16(q8s16, q5s16); + q11s16 = vaddq_s16(q8s16, q4s16); + q12s16 = vsubq_s16(q8s16, q4s16); + q13s16 = vsubq_s16(q8s16, q5s16); + q14s16 = vsubq_s16(q8s16, q6s16); + q15s16 = vsubq_s16(q8s16, q7s16); + + d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16)); + d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16)); + d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); + d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); + d20u64 = vreinterpret_u64_s16(vget_low_s16(q10s16)); + d21u64 = vreinterpret_u64_s16(vget_high_s16(q10s16)); + d22u64 = vreinterpret_u64_s16(vget_low_s16(q11s16)); + d23u64 = vreinterpret_u64_s16(vget_high_s16(q11s16)); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); + d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); + d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); + d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); + + // store the data + output_stride >>= 1; // output_stride / 2, out is int16_t + vst1_u64((uint64_t *)out, d4u64); + out += output_stride; + vst1_u64((uint64_t *)out, d5u64); + out += output_stride; + vst1_u64((uint64_t *)out, d18u64); + out += output_stride; + vst1_u64((uint64_t *)out, d19u64); + out += output_stride; + vst1_u64((uint64_t *)out, d20u64); + out += output_stride; + vst1_u64((uint64_t *)out, d21u64); + out += output_stride; + vst1_u64((uint64_t *)out, d22u64); + out += output_stride; + vst1_u64((uint64_t *)out, d23u64); + out += output_stride; + vst1_u64((uint64_t *)out, d24u64); + out += output_stride; + vst1_u64((uint64_t *)out, d25u64); + out += output_stride; + vst1_u64((uint64_t *)out, d26u64); + out += output_stride; + vst1_u64((uint64_t *)out, d27u64); + out += output_stride; + vst1_u64((uint64_t *)out, d28u64); + out += output_stride; + vst1_u64((uint64_t *)out, d29u64); + out += output_stride; + vst1_u64((uint64_t *)out, d30u64); + out += output_stride; + vst1_u64((uint64_t *)out, d31u64); + return; +} + +void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, + int16_t *pass1Output, int16_t skip_adding, + uint8_t *dest, int dest_stride) { + int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d30s16, d31s16; + uint64x1_t d4u64, d5u64, d6u64, d7u64, d8u64, d9u64, d10u64, d11u64; + uint64x1_t d16u64, d17u64, d18u64, d19u64; + uint64x1_t d24u64, d25u64, d26u64, d27u64, d28u64, d29u64, d30u64, d31u64; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + int32x4_t q0s32, q1s32, q2s32, q3s32, q4s32, q5s32, q6s32, q8s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q13s32; + int16x8x2_t q0x2s16; + (void)skip_adding; + (void)dest; + (void)dest_stride; + + q0x2s16 = vld2q_s16(src); + q8s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q9s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q10s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q11s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q12s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q13s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q14s16 = q0x2s16.val[0]; + src += 16; + q0x2s16 = vld2q_s16(src); + q15s16 = q0x2s16.val[0]; + + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + + // stage 3 + q6s16 = vdupq_n_s16(cospi_30_64 * 2); + q0s16 = vqrdmulhq_s16(q8s16, q6s16); + q6s16 = vdupq_n_s16(cospi_2_64 * 2); + q7s16 = vqrdmulhq_s16(q8s16, q6s16); + + q15s16 = vdupq_n_s16(-cospi_26_64 * 2); + q14s16 = vdupq_n_s16(cospi_6_64 * 2); + q3s16 = vqrdmulhq_s16(q9s16, q15s16); + q4s16 = vqrdmulhq_s16(q9s16, q14s16); + + // stage 4 + d0s16 = vget_low_s16(q0s16); + d1s16 = vget_high_s16(q0s16); + d6s16 = vget_low_s16(q3s16); + d7s16 = vget_high_s16(q3s16); + d8s16 = vget_low_s16(q4s16); + d9s16 = vget_high_s16(q4s16); + d14s16 = vget_low_s16(q7s16); + d15s16 = vget_high_s16(q7s16); + + d30s16 = vdup_n_s16(cospi_8_64); + d31s16 = vdup_n_s16(cospi_24_64); + + q12s32 = vmull_s16(d14s16, d31s16); + q5s32 = vmull_s16(d15s16, d31s16); + q2s32 = vmull_s16(d0s16, d31s16); + q11s32 = vmull_s16(d1s16, d31s16); + + q12s32 = vmlsl_s16(q12s32, d0s16, d30s16); + q5s32 = vmlsl_s16(q5s32, d1s16, d30s16); + q2s32 = vmlal_s16(q2s32, d14s16, d30s16); + q11s32 = vmlal_s16(q11s32, d15s16, d30s16); + + d2s16 = vqrshrn_n_s32(q12s32, 14); + d3s16 = vqrshrn_n_s32(q5s32, 14); + d12s16 = vqrshrn_n_s32(q2s32, 14); + d13s16 = vqrshrn_n_s32(q11s32, 14); + q1s16 = vcombine_s16(d2s16, d3s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + d30s16 = vdup_n_s16(-cospi_8_64); + q10s32 = vmull_s16(d8s16, d30s16); + q13s32 = vmull_s16(d9s16, d30s16); + q8s32 = vmull_s16(d6s16, d30s16); + q9s32 = vmull_s16(d7s16, d30s16); + + q10s32 = vmlsl_s16(q10s32, d6s16, d31s16); + q13s32 = vmlsl_s16(q13s32, d7s16, d31s16); + q8s32 = vmlal_s16(q8s32, d8s16, d31s16); + q9s32 = vmlal_s16(q9s32, d9s16, d31s16); + + d4s16 = vqrshrn_n_s32(q10s32, 14); + d5s16 = vqrshrn_n_s32(q13s32, 14); + d10s16 = vqrshrn_n_s32(q8s32, 14); + d11s16 = vqrshrn_n_s32(q9s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + // stage 5 + q8s16 = vaddq_s16(q0s16, q3s16); + q9s16 = vaddq_s16(q1s16, q2s16); + q10s16 = vsubq_s16(q1s16, q2s16); + q11s16 = vsubq_s16(q0s16, q3s16); + q12s16 = vsubq_s16(q7s16, q4s16); + q13s16 = vsubq_s16(q6s16, q5s16); + q14s16 = vaddq_s16(q6s16, q5s16); + q15s16 = vaddq_s16(q7s16, q4s16); + + // stage 6 + d20s16 = vget_low_s16(q10s16); + d21s16 = vget_high_s16(q10s16); + d22s16 = vget_low_s16(q11s16); + d23s16 = vget_high_s16(q11s16); + d24s16 = vget_low_s16(q12s16); + d25s16 = vget_high_s16(q12s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + + d14s16 = vdup_n_s16(cospi_16_64); + q3s32 = vmull_s16(d26s16, d14s16); + q4s32 = vmull_s16(d27s16, d14s16); + q0s32 = vmull_s16(d20s16, d14s16); + q1s32 = vmull_s16(d21s16, d14s16); + + q5s32 = vsubq_s32(q3s32, q0s32); + q6s32 = vsubq_s32(q4s32, q1s32); + q0s32 = vaddq_s32(q3s32, q0s32); + q4s32 = vaddq_s32(q4s32, q1s32); + + d4s16 = vqrshrn_n_s32(q5s32, 14); + d5s16 = vqrshrn_n_s32(q6s32, 14); + d10s16 = vqrshrn_n_s32(q0s32, 14); + d11s16 = vqrshrn_n_s32(q4s32, 14); + q2s16 = vcombine_s16(d4s16, d5s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + q0s32 = vmull_s16(d22s16, d14s16); + q1s32 = vmull_s16(d23s16, d14s16); + q13s32 = vmull_s16(d24s16, d14s16); + q6s32 = vmull_s16(d25s16, d14s16); + + q10s32 = vsubq_s32(q13s32, q0s32); + q4s32 = vsubq_s32(q6s32, q1s32); + q13s32 = vaddq_s32(q13s32, q0s32); + q6s32 = vaddq_s32(q6s32, q1s32); + + d6s16 = vqrshrn_n_s32(q10s32, 14); + d7s16 = vqrshrn_n_s32(q4s32, 14); + d8s16 = vqrshrn_n_s32(q13s32, 14); + d9s16 = vqrshrn_n_s32(q6s32, 14); + q3s16 = vcombine_s16(d6s16, d7s16); + q4s16 = vcombine_s16(d8s16, d9s16); + + // stage 7 + q0s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q1s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q12s16 = vaddq_s16(q0s16, q15s16); + q13s16 = vaddq_s16(q1s16, q14s16); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + vst1_u64((uint64_t *)out, d24u64); + out += 4; + vst1_u64((uint64_t *)out, d25u64); + out += 12; + vst1_u64((uint64_t *)out, d26u64); + out += 4; + vst1_u64((uint64_t *)out, d27u64); + out += 12; + q14s16 = vsubq_s16(q1s16, q14s16); + q15s16 = vsubq_s16(q0s16, q15s16); + + q10s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q11s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q12s16 = vaddq_s16(q10s16, q5s16); + q13s16 = vaddq_s16(q11s16, q4s16); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + vst1_u64((uint64_t *)out, d24u64); + out += 4; + vst1_u64((uint64_t *)out, d25u64); + out += 12; + vst1_u64((uint64_t *)out, d26u64); + out += 4; + vst1_u64((uint64_t *)out, d27u64); + out += 12; + q4s16 = vsubq_s16(q11s16, q4s16); + q5s16 = vsubq_s16(q10s16, q5s16); + + q0s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q1s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q12s16 = vaddq_s16(q0s16, q3s16); + q13s16 = vaddq_s16(q1s16, q2s16); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + vst1_u64((uint64_t *)out, d24u64); + out += 4; + vst1_u64((uint64_t *)out, d25u64); + out += 12; + vst1_u64((uint64_t *)out, d26u64); + out += 4; + vst1_u64((uint64_t *)out, d27u64); + out += 12; + q2s16 = vsubq_s16(q1s16, q2s16); + q3s16 = vsubq_s16(q0s16, q3s16); + + q10s16 = vld1q_s16(pass1Output); + pass1Output += 8; + q11s16 = vld1q_s16(pass1Output); + q12s16 = vaddq_s16(q10s16, q9s16); + q13s16 = vaddq_s16(q11s16, q8s16); + d24u64 = vreinterpret_u64_s16(vget_low_s16(q12s16)); + d25u64 = vreinterpret_u64_s16(vget_high_s16(q12s16)); + d26u64 = vreinterpret_u64_s16(vget_low_s16(q13s16)); + d27u64 = vreinterpret_u64_s16(vget_high_s16(q13s16)); + vst1_u64((uint64_t *)out, d24u64); + out += 4; + vst1_u64((uint64_t *)out, d25u64); + out += 12; + vst1_u64((uint64_t *)out, d26u64); + out += 4; + vst1_u64((uint64_t *)out, d27u64); + out += 12; + q8s16 = vsubq_s16(q11s16, q8s16); + q9s16 = vsubq_s16(q10s16, q9s16); + + d4u64 = vreinterpret_u64_s16(vget_low_s16(q2s16)); + d5u64 = vreinterpret_u64_s16(vget_high_s16(q2s16)); + d6u64 = vreinterpret_u64_s16(vget_low_s16(q3s16)); + d7u64 = vreinterpret_u64_s16(vget_high_s16(q3s16)); + d8u64 = vreinterpret_u64_s16(vget_low_s16(q4s16)); + d9u64 = vreinterpret_u64_s16(vget_high_s16(q4s16)); + d10u64 = vreinterpret_u64_s16(vget_low_s16(q5s16)); + d11u64 = vreinterpret_u64_s16(vget_high_s16(q5s16)); + d16u64 = vreinterpret_u64_s16(vget_low_s16(q8s16)); + d17u64 = vreinterpret_u64_s16(vget_high_s16(q8s16)); + d18u64 = vreinterpret_u64_s16(vget_low_s16(q9s16)); + d19u64 = vreinterpret_u64_s16(vget_high_s16(q9s16)); + d28u64 = vreinterpret_u64_s16(vget_low_s16(q14s16)); + d29u64 = vreinterpret_u64_s16(vget_high_s16(q14s16)); + d30u64 = vreinterpret_u64_s16(vget_low_s16(q15s16)); + d31u64 = vreinterpret_u64_s16(vget_high_s16(q15s16)); + + vst1_u64((uint64_t *)out, d16u64); + out += 4; + vst1_u64((uint64_t *)out, d17u64); + out += 12; + vst1_u64((uint64_t *)out, d18u64); + out += 4; + vst1_u64((uint64_t *)out, d19u64); + out += 12; + vst1_u64((uint64_t *)out, d4u64); + out += 4; + vst1_u64((uint64_t *)out, d5u64); + out += 12; + vst1_u64((uint64_t *)out, d6u64); + out += 4; + vst1_u64((uint64_t *)out, d7u64); + out += 12; + vst1_u64((uint64_t *)out, d8u64); + out += 4; + vst1_u64((uint64_t *)out, d9u64); + out += 12; + vst1_u64((uint64_t *)out, d10u64); + out += 4; + vst1_u64((uint64_t *)out, d11u64); + out += 12; + vst1_u64((uint64_t *)out, d28u64); + out += 4; + vst1_u64((uint64_t *)out, d29u64); + out += 12; + vst1_u64((uint64_t *)out, d30u64); + out += 4; + vst1_u64((uint64_t *)out, d31u64); + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct16x16_neon.c b/libs/libvpx/vpx_dsp/arm/idct16x16_neon.c index 352979aa16..ecc263df28 100644 --- a/libs/libvpx/vpx_dsp/arm/idct16x16_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct16x16_neon.c @@ -10,24 +10,16 @@ #include "vpx_dsp/vpx_dsp_common.h" -void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, - int16_t *output, +void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output, int output_stride); -void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, - int16_t *output, - int16_t *pass1Output, - int16_t skip_adding, - uint8_t *dest, - int dest_stride); -void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, - int16_t *output, +void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output, + int16_t *pass1Output, int16_t skip_adding, + uint8_t *dest, int dest_stride); +void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output, int output_stride); -void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, - int16_t *output, - int16_t *pass1Output, - int16_t skip_adding, - uint8_t *dest, - int dest_stride); +void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output, + int16_t *pass1Output, int16_t skip_adding, + uint8_t *dest, int dest_stride); #if HAVE_NEON_ASM /* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */ @@ -35,13 +27,13 @@ extern void vpx_push_neon(int64_t *store); extern void vpx_pop_neon(int64_t *store); #endif // HAVE_NEON_ASM -void vpx_idct16x16_256_add_neon(const int16_t *input, - uint8_t *dest, int dest_stride) { +void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest, + int dest_stride) { #if HAVE_NEON_ASM int64_t store_reg[8]; #endif - int16_t pass1_output[16*16] = {0}; - int16_t row_idct_output[16*16] = {0}; + int16_t pass1_output[16 * 16] = { 0 }; + int16_t row_idct_output[16 * 16] = { 0 }; #if HAVE_NEON_ASM // save d8-d15 register values. @@ -56,27 +48,19 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_256_add_neon_pass2(input+1, - row_idct_output, - pass1_output, - 0, - dest, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, + dest, dest_stride); /* Parallel idct on the lower 8 rows */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(input+8*16, pass1_output, 8); + vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_256_add_neon_pass2(input+8*16+1, - row_idct_output+8, - pass1_output, - 0, - dest, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8, + pass1_output, 0, dest, dest_stride); /* Parallel idct on the left 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the @@ -86,27 +70,20 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output+1, - row_idct_output, - pass1_output, - 1, - dest, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, + pass1_output, 1, dest, dest_stride); /* Parallel idct on the right 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output+8*16, pass1_output, 8); + vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output+8*16+1, - row_idct_output+8, - pass1_output, - 1, - dest+8, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, + row_idct_output + 8, pass1_output, 1, + dest + 8, dest_stride); #if HAVE_NEON_ASM // restore d8-d15 register values. @@ -116,13 +93,13 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, return; } -void vpx_idct16x16_10_add_neon(const int16_t *input, - uint8_t *dest, int dest_stride) { +void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest, + int dest_stride) { #if HAVE_NEON_ASM int64_t store_reg[8]; #endif - int16_t pass1_output[16*16] = {0}; - int16_t row_idct_output[16*16] = {0}; + int16_t pass1_output[16 * 16] = { 0 }; + int16_t row_idct_output[16 * 16] = { 0 }; #if HAVE_NEON_ASM // save d8-d15 register values. @@ -137,12 +114,8 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_10_add_neon_pass2(input+1, - row_idct_output, - pass1_output, - 0, - dest, - dest_stride); + vpx_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, + dest, dest_stride); /* Skip Parallel idct on the lower 8 rows as they are all 0s */ @@ -154,27 +127,20 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output+1, - row_idct_output, - pass1_output, - 1, - dest, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, + pass1_output, 1, dest, dest_stride); /* Parallel idct on the right 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output+8*16, pass1_output, 8); + vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output+8*16+1, - row_idct_output+8, - pass1_output, - 1, - dest+8, - dest_stride); + vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, + row_idct_output + 8, pass1_output, 1, + dest + 8, dest_stride); #if HAVE_NEON_ASM // restore d8-d15 register values. diff --git a/libs/libvpx/vpx_dsp/arm/idct32x32_1_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct32x32_1_add_neon.c index c25c0c4a5c..dab7d098e8 100644 --- a/libs/libvpx/vpx_dsp/arm/idct32x32_1_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct32x32_1_add_neon.c @@ -15,151 +15,126 @@ #include "vpx_dsp/inv_txfm.h" #include "vpx_ports/mem.h" -static INLINE void LD_16x8( - uint8_t *d, - int d_stride, - uint8x16_t *q8u8, - uint8x16_t *q9u8, - uint8x16_t *q10u8, - uint8x16_t *q11u8, - uint8x16_t *q12u8, - uint8x16_t *q13u8, - uint8x16_t *q14u8, - uint8x16_t *q15u8) { - *q8u8 = vld1q_u8(d); - d += d_stride; - *q9u8 = vld1q_u8(d); - d += d_stride; - *q10u8 = vld1q_u8(d); - d += d_stride; - *q11u8 = vld1q_u8(d); - d += d_stride; - *q12u8 = vld1q_u8(d); - d += d_stride; - *q13u8 = vld1q_u8(d); - d += d_stride; - *q14u8 = vld1q_u8(d); - d += d_stride; - *q15u8 = vld1q_u8(d); - return; +static INLINE void LD_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8, + uint8x16_t *q9u8, uint8x16_t *q10u8, + uint8x16_t *q11u8, uint8x16_t *q12u8, + uint8x16_t *q13u8, uint8x16_t *q14u8, + uint8x16_t *q15u8) { + *q8u8 = vld1q_u8(d); + d += d_stride; + *q9u8 = vld1q_u8(d); + d += d_stride; + *q10u8 = vld1q_u8(d); + d += d_stride; + *q11u8 = vld1q_u8(d); + d += d_stride; + *q12u8 = vld1q_u8(d); + d += d_stride; + *q13u8 = vld1q_u8(d); + d += d_stride; + *q14u8 = vld1q_u8(d); + d += d_stride; + *q15u8 = vld1q_u8(d); + return; } -static INLINE void ADD_DIFF_16x8( - uint8x16_t qdiffu8, - uint8x16_t *q8u8, - uint8x16_t *q9u8, - uint8x16_t *q10u8, - uint8x16_t *q11u8, - uint8x16_t *q12u8, - uint8x16_t *q13u8, - uint8x16_t *q14u8, - uint8x16_t *q15u8) { - *q8u8 = vqaddq_u8(*q8u8, qdiffu8); - *q9u8 = vqaddq_u8(*q9u8, qdiffu8); - *q10u8 = vqaddq_u8(*q10u8, qdiffu8); - *q11u8 = vqaddq_u8(*q11u8, qdiffu8); - *q12u8 = vqaddq_u8(*q12u8, qdiffu8); - *q13u8 = vqaddq_u8(*q13u8, qdiffu8); - *q14u8 = vqaddq_u8(*q14u8, qdiffu8); - *q15u8 = vqaddq_u8(*q15u8, qdiffu8); - return; +static INLINE void ADD_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8, + uint8x16_t *q9u8, uint8x16_t *q10u8, + uint8x16_t *q11u8, uint8x16_t *q12u8, + uint8x16_t *q13u8, uint8x16_t *q14u8, + uint8x16_t *q15u8) { + *q8u8 = vqaddq_u8(*q8u8, qdiffu8); + *q9u8 = vqaddq_u8(*q9u8, qdiffu8); + *q10u8 = vqaddq_u8(*q10u8, qdiffu8); + *q11u8 = vqaddq_u8(*q11u8, qdiffu8); + *q12u8 = vqaddq_u8(*q12u8, qdiffu8); + *q13u8 = vqaddq_u8(*q13u8, qdiffu8); + *q14u8 = vqaddq_u8(*q14u8, qdiffu8); + *q15u8 = vqaddq_u8(*q15u8, qdiffu8); + return; } -static INLINE void SUB_DIFF_16x8( - uint8x16_t qdiffu8, - uint8x16_t *q8u8, - uint8x16_t *q9u8, - uint8x16_t *q10u8, - uint8x16_t *q11u8, - uint8x16_t *q12u8, - uint8x16_t *q13u8, - uint8x16_t *q14u8, - uint8x16_t *q15u8) { - *q8u8 = vqsubq_u8(*q8u8, qdiffu8); - *q9u8 = vqsubq_u8(*q9u8, qdiffu8); - *q10u8 = vqsubq_u8(*q10u8, qdiffu8); - *q11u8 = vqsubq_u8(*q11u8, qdiffu8); - *q12u8 = vqsubq_u8(*q12u8, qdiffu8); - *q13u8 = vqsubq_u8(*q13u8, qdiffu8); - *q14u8 = vqsubq_u8(*q14u8, qdiffu8); - *q15u8 = vqsubq_u8(*q15u8, qdiffu8); - return; +static INLINE void SUB_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8, + uint8x16_t *q9u8, uint8x16_t *q10u8, + uint8x16_t *q11u8, uint8x16_t *q12u8, + uint8x16_t *q13u8, uint8x16_t *q14u8, + uint8x16_t *q15u8) { + *q8u8 = vqsubq_u8(*q8u8, qdiffu8); + *q9u8 = vqsubq_u8(*q9u8, qdiffu8); + *q10u8 = vqsubq_u8(*q10u8, qdiffu8); + *q11u8 = vqsubq_u8(*q11u8, qdiffu8); + *q12u8 = vqsubq_u8(*q12u8, qdiffu8); + *q13u8 = vqsubq_u8(*q13u8, qdiffu8); + *q14u8 = vqsubq_u8(*q14u8, qdiffu8); + *q15u8 = vqsubq_u8(*q15u8, qdiffu8); + return; } -static INLINE void ST_16x8( - uint8_t *d, - int d_stride, - uint8x16_t *q8u8, - uint8x16_t *q9u8, - uint8x16_t *q10u8, - uint8x16_t *q11u8, - uint8x16_t *q12u8, - uint8x16_t *q13u8, - uint8x16_t *q14u8, - uint8x16_t *q15u8) { - vst1q_u8(d, *q8u8); - d += d_stride; - vst1q_u8(d, *q9u8); - d += d_stride; - vst1q_u8(d, *q10u8); - d += d_stride; - vst1q_u8(d, *q11u8); - d += d_stride; - vst1q_u8(d, *q12u8); - d += d_stride; - vst1q_u8(d, *q13u8); - d += d_stride; - vst1q_u8(d, *q14u8); - d += d_stride; - vst1q_u8(d, *q15u8); - return; +static INLINE void ST_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8, + uint8x16_t *q9u8, uint8x16_t *q10u8, + uint8x16_t *q11u8, uint8x16_t *q12u8, + uint8x16_t *q13u8, uint8x16_t *q14u8, + uint8x16_t *q15u8) { + vst1q_u8(d, *q8u8); + d += d_stride; + vst1q_u8(d, *q9u8); + d += d_stride; + vst1q_u8(d, *q10u8); + d += d_stride; + vst1q_u8(d, *q11u8); + d += d_stride; + vst1q_u8(d, *q12u8); + d += d_stride; + vst1q_u8(d, *q13u8); + d += d_stride; + vst1q_u8(d, *q14u8); + d += d_stride; + vst1q_u8(d, *q15u8); + return; } -void vpx_idct32x32_1_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8; - int i, j, dest_stride8; - uint8_t *d; - int16_t a1, cospi_16_64 = 11585; - int16_t out = dct_const_round_shift(input[0] * cospi_16_64); +void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8; + int i, j, dest_stride8; + uint8_t *d; + int16_t a1, cospi_16_64 = 11585; + int16_t out = dct_const_round_shift(input[0] * cospi_16_64); - out = dct_const_round_shift(out * cospi_16_64); - a1 = ROUND_POWER_OF_TWO(out, 6); + out = dct_const_round_shift(out * cospi_16_64); + a1 = ROUND_POWER_OF_TWO(out, 6); - dest_stride8 = dest_stride * 8; - if (a1 >= 0) { // diff_positive_32_32 - a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1; - q0u8 = vdupq_n_u8(a1); - for (i = 0; i < 2; i++, dest += 16) { // diff_positive_32_32_loop - d = dest; - for (j = 0; j < 4; j++) { - LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - d += dest_stride8; - } - } - } else { // diff_negative_32_32 - a1 = -a1; - a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1; - q0u8 = vdupq_n_u8(a1); - for (i = 0; i < 2; i++, dest += 16) { // diff_negative_32_32_loop - d = dest; - for (j = 0; j < 4; j++) { - LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, - &q12u8, &q13u8, &q14u8, &q15u8); - d += dest_stride8; - } - } + dest_stride8 = dest_stride * 8; + if (a1 >= 0) { // diff_positive_32_32 + a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1; + q0u8 = vdupq_n_u8(a1); + for (i = 0; i < 2; i++, dest += 16) { // diff_positive_32_32_loop + d = dest; + for (j = 0; j < 4; j++) { + LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + ADD_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + d += dest_stride8; + } } - return; + } else { // diff_negative_32_32 + a1 = -a1; + a1 = a1 < 0 ? 0 : a1 > 255 ? 255 : a1; + q0u8 = vdupq_n_u8(a1); + for (i = 0; i < 2; i++, dest += 16) { // diff_negative_32_32_loop + d = dest; + for (j = 0; j < 4; j++) { + LD_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + SUB_DIFF_16x8(q0u8, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + ST_16x8(d, dest_stride, &q8u8, &q9u8, &q10u8, &q11u8, &q12u8, &q13u8, + &q14u8, &q15u8); + d += dest_stride8; + } + } + } + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct32x32_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct32x32_add_neon.c index 025437eb96..c4d1e8473c 100644 --- a/libs/libvpx/vpx_dsp/arm/idct32x32_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct32x32_add_neon.c @@ -11,709 +11,630 @@ #include #include "./vpx_config.h" +#include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" #define LOAD_FROM_TRANSPOSED(prev, first, second) \ - q14s16 = vld1q_s16(trans_buf + first * 8); \ - q13s16 = vld1q_s16(trans_buf + second * 8); + q14s16 = vld1q_s16(trans_buf + first * 8); \ + q13s16 = vld1q_s16(trans_buf + second * 8); #define LOAD_FROM_OUTPUT(prev, first, second, qA, qB) \ - qA = vld1q_s16(out + first * 32); \ - qB = vld1q_s16(out + second * 32); + qA = vld1q_s16(out + first * 32); \ + qB = vld1q_s16(out + second * 32); #define STORE_IN_OUTPUT(prev, first, second, qA, qB) \ - vst1q_s16(out + first * 32, qA); \ - vst1q_s16(out + second * 32, qB); + vst1q_s16(out + first * 32, qA); \ + vst1q_s16(out + second * 32, qB); -#define STORE_COMBINE_CENTER_RESULTS(r10, r9) \ - __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, \ - q6s16, q7s16, q8s16, q9s16); -static INLINE void __STORE_COMBINE_CENTER_RESULTS( - uint8_t *p1, - uint8_t *p2, - int stride, - int16x8_t q6s16, - int16x8_t q7s16, - int16x8_t q8s16, - int16x8_t q9s16) { - int16x4_t d8s16, d9s16, d10s16, d11s16; +#define STORE_COMBINE_CENTER_RESULTS(r10, r9) \ + __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, q6s16, q7s16, q8s16, q9s16); +static INLINE void __STORE_COMBINE_CENTER_RESULTS(uint8_t *p1, uint8_t *p2, + int stride, int16x8_t q6s16, + int16x8_t q7s16, + int16x8_t q8s16, + int16x8_t q9s16) { + int16x4_t d8s16, d9s16, d10s16, d11s16; - d8s16 = vld1_s16((int16_t *)p1); - p1 += stride; - d11s16 = vld1_s16((int16_t *)p2); - p2 -= stride; - d9s16 = vld1_s16((int16_t *)p1); - d10s16 = vld1_s16((int16_t *)p2); + d8s16 = vld1_s16((int16_t *)p1); + p1 += stride; + d11s16 = vld1_s16((int16_t *)p2); + p2 -= stride; + d9s16 = vld1_s16((int16_t *)p1); + d10s16 = vld1_s16((int16_t *)p2); - q7s16 = vrshrq_n_s16(q7s16, 6); - q8s16 = vrshrq_n_s16(q8s16, 6); - q9s16 = vrshrq_n_s16(q9s16, 6); - q6s16 = vrshrq_n_s16(q6s16, 6); + q7s16 = vrshrq_n_s16(q7s16, 6); + q8s16 = vrshrq_n_s16(q8s16, 6); + q9s16 = vrshrq_n_s16(q9s16, 6); + q6s16 = vrshrq_n_s16(q6s16, 6); - q7s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q7s16), - vreinterpret_u8_s16(d9s16))); - q8s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_s16(d10s16))); - q9s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_s16(d11s16))); - q6s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q6s16), - vreinterpret_u8_s16(d8s16))); + q7s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d9s16))); + q8s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s16(d10s16))); + q9s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s16(d11s16))); + q6s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d8s16))); - d9s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); - d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16)); - d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16)); - d8s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); + d9s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); + d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16)); + d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16)); + d8s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); - vst1_s16((int16_t *)p1, d9s16); - p1 -= stride; - vst1_s16((int16_t *)p2, d10s16); - p2 += stride; - vst1_s16((int16_t *)p1, d8s16); - vst1_s16((int16_t *)p2, d11s16); - return; + vst1_s16((int16_t *)p1, d9s16); + p1 -= stride; + vst1_s16((int16_t *)p2, d10s16); + p2 += stride; + vst1_s16((int16_t *)p1, d8s16); + vst1_s16((int16_t *)p2, d11s16); + return; } -#define STORE_COMBINE_EXTREME_RESULTS(r7, r6); \ - __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, \ - q4s16, q5s16, q6s16, q7s16); -static INLINE void __STORE_COMBINE_EXTREME_RESULTS( - uint8_t *p1, - uint8_t *p2, - int stride, - int16x8_t q4s16, - int16x8_t q5s16, - int16x8_t q6s16, - int16x8_t q7s16) { - int16x4_t d4s16, d5s16, d6s16, d7s16; +#define STORE_COMBINE_EXTREME_RESULTS(r7, r6) \ + __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, q4s16, q5s16, q6s16, q7s16); +static INLINE void __STORE_COMBINE_EXTREME_RESULTS(uint8_t *p1, uint8_t *p2, + int stride, int16x8_t q4s16, + int16x8_t q5s16, + int16x8_t q6s16, + int16x8_t q7s16) { + int16x4_t d4s16, d5s16, d6s16, d7s16; - d4s16 = vld1_s16((int16_t *)p1); - p1 += stride; - d7s16 = vld1_s16((int16_t *)p2); - p2 -= stride; - d5s16 = vld1_s16((int16_t *)p1); - d6s16 = vld1_s16((int16_t *)p2); + d4s16 = vld1_s16((int16_t *)p1); + p1 += stride; + d7s16 = vld1_s16((int16_t *)p2); + p2 -= stride; + d5s16 = vld1_s16((int16_t *)p1); + d6s16 = vld1_s16((int16_t *)p2); - q5s16 = vrshrq_n_s16(q5s16, 6); - q6s16 = vrshrq_n_s16(q6s16, 6); - q7s16 = vrshrq_n_s16(q7s16, 6); - q4s16 = vrshrq_n_s16(q4s16, 6); + q5s16 = vrshrq_n_s16(q5s16, 6); + q6s16 = vrshrq_n_s16(q6s16, 6); + q7s16 = vrshrq_n_s16(q7s16, 6); + q4s16 = vrshrq_n_s16(q4s16, 6); - q5s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q5s16), - vreinterpret_u8_s16(d5s16))); - q6s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q6s16), - vreinterpret_u8_s16(d6s16))); - q7s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q7s16), - vreinterpret_u8_s16(d7s16))); - q4s16 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q4s16), - vreinterpret_u8_s16(d4s16))); + q5s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s16(d5s16))); + q6s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d6s16))); + q7s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d7s16))); + q4s16 = vreinterpretq_s16_u16( + vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s16(d4s16))); - d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16)); - d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); - d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); - d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16)); + d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16)); + d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16)); + d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16)); + d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16)); - vst1_s16((int16_t *)p1, d5s16); - p1 -= stride; - vst1_s16((int16_t *)p2, d6s16); - p2 += stride; - vst1_s16((int16_t *)p2, d7s16); - vst1_s16((int16_t *)p1, d4s16); - return; + vst1_s16((int16_t *)p1, d5s16); + p1 -= stride; + vst1_s16((int16_t *)p2, d6s16); + p2 += stride; + vst1_s16((int16_t *)p2, d7s16); + vst1_s16((int16_t *)p1, d4s16); + return; } #define DO_BUTTERFLY_STD(const_1, const_2, qA, qB) \ - DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB); -static INLINE void DO_BUTTERFLY( - int16x8_t q14s16, - int16x8_t q13s16, - int16_t first_const, - int16_t second_const, - int16x8_t *qAs16, - int16x8_t *qBs16) { - int16x4_t d30s16, d31s16; - int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32; - int16x4_t dCs16, dDs16, dAs16, dBs16; + DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB); +static INLINE void DO_BUTTERFLY(int16x8_t q14s16, int16x8_t q13s16, + int16_t first_const, int16_t second_const, + int16x8_t *qAs16, int16x8_t *qBs16) { + int16x4_t d30s16, d31s16; + int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32; + int16x4_t dCs16, dDs16, dAs16, dBs16; - dCs16 = vget_low_s16(q14s16); - dDs16 = vget_high_s16(q14s16); - dAs16 = vget_low_s16(q13s16); - dBs16 = vget_high_s16(q13s16); + dCs16 = vget_low_s16(q14s16); + dDs16 = vget_high_s16(q14s16); + dAs16 = vget_low_s16(q13s16); + dBs16 = vget_high_s16(q13s16); - d30s16 = vdup_n_s16(first_const); - d31s16 = vdup_n_s16(second_const); + d30s16 = vdup_n_s16(first_const); + d31s16 = vdup_n_s16(second_const); - q8s32 = vmull_s16(dCs16, d30s16); - q10s32 = vmull_s16(dAs16, d31s16); - q9s32 = vmull_s16(dDs16, d30s16); - q11s32 = vmull_s16(dBs16, d31s16); - q12s32 = vmull_s16(dCs16, d31s16); + q8s32 = vmull_s16(dCs16, d30s16); + q10s32 = vmull_s16(dAs16, d31s16); + q9s32 = vmull_s16(dDs16, d30s16); + q11s32 = vmull_s16(dBs16, d31s16); + q12s32 = vmull_s16(dCs16, d31s16); - q8s32 = vsubq_s32(q8s32, q10s32); - q9s32 = vsubq_s32(q9s32, q11s32); + q8s32 = vsubq_s32(q8s32, q10s32); + q9s32 = vsubq_s32(q9s32, q11s32); - q10s32 = vmull_s16(dDs16, d31s16); - q11s32 = vmull_s16(dAs16, d30s16); - q15s32 = vmull_s16(dBs16, d30s16); + q10s32 = vmull_s16(dDs16, d31s16); + q11s32 = vmull_s16(dAs16, d30s16); + q15s32 = vmull_s16(dBs16, d30s16); - q11s32 = vaddq_s32(q12s32, q11s32); - q10s32 = vaddq_s32(q10s32, q15s32); + q11s32 = vaddq_s32(q12s32, q11s32); + q10s32 = vaddq_s32(q10s32, q15s32); - *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), - vqrshrn_n_s32(q9s32, 14)); - *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14), - vqrshrn_n_s32(q10s32, 14)); - return; + *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), vqrshrn_n_s32(q9s32, 14)); + *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14), vqrshrn_n_s32(q10s32, 14)); + return; } -static INLINE void idct32_transpose_pair( - int16_t *input, - int16_t *t_buf) { - int16_t *in; - int i; - const int stride = 32; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; +static INLINE void idct32_transpose_pair(int16_t *input, int16_t *t_buf) { + int16_t *in; + int i; + const int stride = 32; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - for (i = 0; i < 4; i++, input += 8) { - in = input; - q8s16 = vld1q_s16(in); - in += stride; - q9s16 = vld1q_s16(in); - in += stride; - q10s16 = vld1q_s16(in); - in += stride; - q11s16 = vld1q_s16(in); - in += stride; - q12s16 = vld1q_s16(in); - in += stride; - q13s16 = vld1q_s16(in); - in += stride; - q14s16 = vld1q_s16(in); - in += stride; - q15s16 = vld1q_s16(in); + for (i = 0; i < 4; i++, input += 8) { + in = input; + q8s16 = vld1q_s16(in); + in += stride; + q9s16 = vld1q_s16(in); + in += stride; + q10s16 = vld1q_s16(in); + in += stride; + q11s16 = vld1q_s16(in); + in += stride; + q12s16 = vld1q_s16(in); + in += stride; + q13s16 = vld1q_s16(in); + in += stride; + q14s16 = vld1q_s16(in); + in += stride; + q15s16 = vld1q_s16(in); - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); - d20s16 = vget_low_s16(q10s16); - d21s16 = vget_high_s16(q10s16); - d22s16 = vget_low_s16(q11s16); - d23s16 = vget_high_s16(q11s16); - d24s16 = vget_low_s16(q12s16); - d25s16 = vget_high_s16(q12s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - d30s16 = vget_low_s16(q15s16); - d31s16 = vget_high_s16(q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, + &q14s16, &q15s16); - q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - q12s16 = vcombine_s16(d17s16, d25s16); - q13s16 = vcombine_s16(d19s16, d27s16); - q14s16 = vcombine_s16(d21s16, d29s16); - q15s16 = vcombine_s16(d23s16, d31s16); - - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), - vreinterpretq_s32_s16(q10s16)); - q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q9s16), - vreinterpretq_s32_s16(q11s16)); - q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q12s16), - vreinterpretq_s32_s16(q14s16)); - q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q13s16), - vreinterpretq_s32_s16(q15s16)); - - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 - - vst1q_s16(t_buf, q0x2s16.val[0]); - t_buf += 8; - vst1q_s16(t_buf, q0x2s16.val[1]); - t_buf += 8; - vst1q_s16(t_buf, q1x2s16.val[0]); - t_buf += 8; - vst1q_s16(t_buf, q1x2s16.val[1]); - t_buf += 8; - vst1q_s16(t_buf, q2x2s16.val[0]); - t_buf += 8; - vst1q_s16(t_buf, q2x2s16.val[1]); - t_buf += 8; - vst1q_s16(t_buf, q3x2s16.val[0]); - t_buf += 8; - vst1q_s16(t_buf, q3x2s16.val[1]); - t_buf += 8; - } - return; + vst1q_s16(t_buf, q8s16); + t_buf += 8; + vst1q_s16(t_buf, q9s16); + t_buf += 8; + vst1q_s16(t_buf, q10s16); + t_buf += 8; + vst1q_s16(t_buf, q11s16); + t_buf += 8; + vst1q_s16(t_buf, q12s16); + t_buf += 8; + vst1q_s16(t_buf, q13s16); + t_buf += 8; + vst1q_s16(t_buf, q14s16); + t_buf += 8; + vst1q_s16(t_buf, q15s16); + t_buf += 8; + } + return; } -static INLINE void idct32_bands_end_1st_pass( - int16_t *out, - int16x8_t q2s16, - int16x8_t q3s16, - int16x8_t q6s16, - int16x8_t q7s16, - int16x8_t q8s16, - int16x8_t q9s16, - int16x8_t q10s16, - int16x8_t q11s16, - int16x8_t q12s16, - int16x8_t q13s16, - int16x8_t q14s16, - int16x8_t q15s16) { - int16x8_t q0s16, q1s16, q4s16, q5s16; +static INLINE void idct32_bands_end_1st_pass(int16_t *out, int16x8_t q2s16, + int16x8_t q3s16, int16x8_t q6s16, + int16x8_t q7s16, int16x8_t q8s16, + int16x8_t q9s16, int16x8_t q10s16, + int16x8_t q11s16, int16x8_t q12s16, + int16x8_t q13s16, int16x8_t q14s16, + int16x8_t q15s16) { + int16x8_t q0s16, q1s16, q4s16, q5s16; - STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16); - STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16); + STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16); + STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16); - LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16); - STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16); + LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16); + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16); + STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16); - LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16); - q2s16 = vaddq_s16(q10s16, q1s16); - q3s16 = vaddq_s16(q11s16, q0s16); - q4s16 = vsubq_s16(q11s16, q0s16); - q5s16 = vsubq_s16(q10s16, q1s16); + LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16); + q2s16 = vaddq_s16(q10s16, q1s16); + q3s16 = vaddq_s16(q11s16, q0s16); + q4s16 = vsubq_s16(q11s16, q0s16); + q5s16 = vsubq_s16(q10s16, q1s16); - LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16); - STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16); + LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16); + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16); + STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16); - LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16); - STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16); + LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16); + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16); + STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16); - LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16); - q2s16 = vaddq_s16(q12s16, q1s16); - q3s16 = vaddq_s16(q13s16, q0s16); - q4s16 = vsubq_s16(q13s16, q0s16); - q5s16 = vsubq_s16(q12s16, q1s16); + LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16); + q2s16 = vaddq_s16(q12s16, q1s16); + q3s16 = vaddq_s16(q13s16, q0s16); + q4s16 = vsubq_s16(q13s16, q0s16); + q5s16 = vsubq_s16(q12s16, q1s16); - LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16); - STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16); + LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16); + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16); + STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16); - LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16); - STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16); + LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16); + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16); + STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16); - LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16); - q2s16 = vaddq_s16(q14s16, q1s16); - q3s16 = vaddq_s16(q15s16, q0s16); - q4s16 = vsubq_s16(q15s16, q0s16); - q5s16 = vsubq_s16(q14s16, q1s16); + LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16); + q2s16 = vaddq_s16(q14s16, q1s16); + q3s16 = vaddq_s16(q15s16, q0s16); + q4s16 = vsubq_s16(q15s16, q0s16); + q5s16 = vsubq_s16(q14s16, q1s16); - LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16); - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16); - STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16); + LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16); + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16); + STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16); - LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16); - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16); - STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16); - return; + LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16); + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16); + STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16); + return; } static INLINE void idct32_bands_end_2nd_pass( - int16_t *out, - uint8_t *dest, - int stride, - int16x8_t q2s16, - int16x8_t q3s16, - int16x8_t q6s16, - int16x8_t q7s16, - int16x8_t q8s16, - int16x8_t q9s16, - int16x8_t q10s16, - int16x8_t q11s16, - int16x8_t q12s16, - int16x8_t q13s16, - int16x8_t q14s16, - int16x8_t q15s16) { - uint8_t *r6 = dest + 31 * stride; - uint8_t *r7 = dest/* + 0 * stride*/; - uint8_t *r9 = dest + 15 * stride; - uint8_t *r10 = dest + 16 * stride; - int str2 = stride << 1; - int16x8_t q0s16, q1s16, q4s16, q5s16; + int16_t *out, uint8_t *dest, int stride, int16x8_t q2s16, int16x8_t q3s16, + int16x8_t q6s16, int16x8_t q7s16, int16x8_t q8s16, int16x8_t q9s16, + int16x8_t q10s16, int16x8_t q11s16, int16x8_t q12s16, int16x8_t q13s16, + int16x8_t q14s16, int16x8_t q15s16) { + uint8_t *r6 = dest + 31 * stride; + uint8_t *r7 = dest /* + 0 * stride*/; + uint8_t *r9 = dest + 15 * stride; + uint8_t *r10 = dest + 16 * stride; + int str2 = stride << 1; + int16x8_t q0s16, q1s16, q4s16, q5s16; - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; r9 -= str2; + STORE_COMBINE_CENTER_RESULTS(r10, r9); + r10 += str2; + r9 -= str2; - LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; r6 -= str2; + LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16) + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_COMBINE_EXTREME_RESULTS(r7, r6); + r7 += str2; + r6 -= str2; - LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16) - q2s16 = vaddq_s16(q10s16, q1s16); - q3s16 = vaddq_s16(q11s16, q0s16); - q4s16 = vsubq_s16(q11s16, q0s16); - q5s16 = vsubq_s16(q10s16, q1s16); + LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16) + q2s16 = vaddq_s16(q10s16, q1s16); + q3s16 = vaddq_s16(q11s16, q0s16); + q4s16 = vsubq_s16(q11s16, q0s16); + q5s16 = vsubq_s16(q10s16, q1s16); - LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; r9 -= str2; + LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16) + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_COMBINE_CENTER_RESULTS(r10, r9); + r10 += str2; + r9 -= str2; - LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; r6 -= str2; + LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16) + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_COMBINE_EXTREME_RESULTS(r7, r6); + r7 += str2; + r6 -= str2; - LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16) - q2s16 = vaddq_s16(q12s16, q1s16); - q3s16 = vaddq_s16(q13s16, q0s16); - q4s16 = vsubq_s16(q13s16, q0s16); - q5s16 = vsubq_s16(q12s16, q1s16); + LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16) + q2s16 = vaddq_s16(q12s16, q1s16); + q3s16 = vaddq_s16(q13s16, q0s16); + q4s16 = vsubq_s16(q13s16, q0s16); + q5s16 = vsubq_s16(q12s16, q1s16); - LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); - r10 += str2; r9 -= str2; + LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16) + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_COMBINE_CENTER_RESULTS(r10, r9); + r10 += str2; + r9 -= str2; - LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - r7 += str2; r6 -= str2; + LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16) + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_COMBINE_EXTREME_RESULTS(r7, r6); + r7 += str2; + r6 -= str2; - LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16) - q2s16 = vaddq_s16(q14s16, q1s16); - q3s16 = vaddq_s16(q15s16, q0s16); - q4s16 = vsubq_s16(q15s16, q0s16); - q5s16 = vsubq_s16(q14s16, q1s16); + LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16) + q2s16 = vaddq_s16(q14s16, q1s16); + q3s16 = vaddq_s16(q15s16, q0s16); + q4s16 = vsubq_s16(q15s16, q0s16); + q5s16 = vsubq_s16(q14s16, q1s16); - LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); - STORE_COMBINE_CENTER_RESULTS(r10, r9); + LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16) + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); + STORE_COMBINE_CENTER_RESULTS(r10, r9); - LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16) - q4s16 = vaddq_s16(q2s16, q1s16); - q5s16 = vaddq_s16(q3s16, q0s16); - q6s16 = vsubq_s16(q3s16, q0s16); - q7s16 = vsubq_s16(q2s16, q1s16); - STORE_COMBINE_EXTREME_RESULTS(r7, r6); - return; + LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16) + q4s16 = vaddq_s16(q2s16, q1s16); + q5s16 = vaddq_s16(q3s16, q0s16); + q6s16 = vsubq_s16(q3s16, q0s16); + q7s16 = vsubq_s16(q2s16, q1s16); + STORE_COMBINE_EXTREME_RESULTS(r7, r6); + return; } -void vpx_idct32x32_1024_add_neon( - int16_t *input, - uint8_t *dest, - int stride) { - int i, idct32_pass_loop; - int16_t trans_buf[32 * 8]; - int16_t pass1[32 * 32]; - int16_t pass2[32 * 32]; - int16_t *out; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; +void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) { + int i, idct32_pass_loop; + int16_t trans_buf[32 * 8]; + int16_t pass1[32 * 32]; + int16_t pass2[32 * 32]; + int16_t *out; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - for (idct32_pass_loop = 0, out = pass1; - idct32_pass_loop < 2; - idct32_pass_loop++, - input = pass1, // the input of pass2 is the result of pass1 - out = pass2) { - for (i = 0; - i < 4; i++, - input += 32 * 8, out += 8) { // idct32_bands_loop - idct32_transpose_pair(input, trans_buf); + for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2; + idct32_pass_loop++, + input = pass1, // the input of pass2 is the result of pass1 + out = pass2) { + for (i = 0; i < 4; i++, input += 32 * 8, out += 8) { // idct32_bands_loop + idct32_transpose_pair(input, trans_buf); - // ----------------------------------------- - // BLOCK A: 16-19,28-31 - // ----------------------------------------- - // generate 16,17,30,31 - // part of stage 1 - LOAD_FROM_TRANSPOSED(0, 1, 31) - DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(31, 17, 15) - DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16) - // part of stage 2 - q4s16 = vaddq_s16(q0s16, q1s16); - q13s16 = vsubq_s16(q0s16, q1s16); - q6s16 = vaddq_s16(q2s16, q3s16); - q14s16 = vsubq_s16(q2s16, q3s16); - // part of stage 3 - DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16) + // ----------------------------------------- + // BLOCK A: 16-19,28-31 + // ----------------------------------------- + // generate 16,17,30,31 + // part of stage 1 + LOAD_FROM_TRANSPOSED(0, 1, 31) + DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16) + LOAD_FROM_TRANSPOSED(31, 17, 15) + DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16) + // part of stage 2 + q4s16 = vaddq_s16(q0s16, q1s16); + q13s16 = vsubq_s16(q0s16, q1s16); + q6s16 = vaddq_s16(q2s16, q3s16); + q14s16 = vsubq_s16(q2s16, q3s16); + // part of stage 3 + DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16) - // generate 18,19,28,29 - // part of stage 1 - LOAD_FROM_TRANSPOSED(15, 9, 23) - DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(23, 25, 7) - DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16) - // part of stage 2 - q13s16 = vsubq_s16(q3s16, q2s16); - q3s16 = vaddq_s16(q3s16, q2s16); - q14s16 = vsubq_s16(q1s16, q0s16); - q2s16 = vaddq_s16(q1s16, q0s16); - // part of stage 3 - DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16) - // part of stage 4 - q8s16 = vaddq_s16(q4s16, q2s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q10s16 = vaddq_s16(q7s16, q1s16); - q15s16 = vaddq_s16(q6s16, q3s16); - q13s16 = vsubq_s16(q5s16, q0s16); - q14s16 = vsubq_s16(q7s16, q1s16); - STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16) - STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16) - // part of stage 5 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16) - STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16) - // part of stage 4 - q13s16 = vsubq_s16(q4s16, q2s16); - q14s16 = vsubq_s16(q6s16, q3s16); - // part of stage 5 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16) - STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16) + // generate 18,19,28,29 + // part of stage 1 + LOAD_FROM_TRANSPOSED(15, 9, 23) + DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16) + LOAD_FROM_TRANSPOSED(23, 25, 7) + DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16) + // part of stage 2 + q13s16 = vsubq_s16(q3s16, q2s16); + q3s16 = vaddq_s16(q3s16, q2s16); + q14s16 = vsubq_s16(q1s16, q0s16); + q2s16 = vaddq_s16(q1s16, q0s16); + // part of stage 3 + DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16) + // part of stage 4 + q8s16 = vaddq_s16(q4s16, q2s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q10s16 = vaddq_s16(q7s16, q1s16); + q15s16 = vaddq_s16(q6s16, q3s16); + q13s16 = vsubq_s16(q5s16, q0s16); + q14s16 = vsubq_s16(q7s16, q1s16); + STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16) + STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16) + // part of stage 5 + DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16) + STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16) + // part of stage 4 + q13s16 = vsubq_s16(q4s16, q2s16); + q14s16 = vsubq_s16(q6s16, q3s16); + // part of stage 5 + DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16) + STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16) - // ----------------------------------------- - // BLOCK B: 20-23,24-27 - // ----------------------------------------- - // generate 20,21,26,27 - // part of stage 1 - LOAD_FROM_TRANSPOSED(7, 5, 27) - DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(27, 21, 11) - DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16) - // part of stage 2 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); - // part of stage 3 - DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) + // ----------------------------------------- + // BLOCK B: 20-23,24-27 + // ----------------------------------------- + // generate 20,21,26,27 + // part of stage 1 + LOAD_FROM_TRANSPOSED(7, 5, 27) + DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16) + LOAD_FROM_TRANSPOSED(27, 21, 11) + DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16) + // part of stage 2 + q13s16 = vsubq_s16(q0s16, q1s16); + q0s16 = vaddq_s16(q0s16, q1s16); + q14s16 = vsubq_s16(q2s16, q3s16); + q2s16 = vaddq_s16(q2s16, q3s16); + // part of stage 3 + DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) - // generate 22,23,24,25 - // part of stage 1 - LOAD_FROM_TRANSPOSED(11, 13, 19) - DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(19, 29, 3) - DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16) - // part of stage 2 - q14s16 = vsubq_s16(q4s16, q5s16); - q5s16 = vaddq_s16(q4s16, q5s16); - q13s16 = vsubq_s16(q6s16, q7s16); - q6s16 = vaddq_s16(q6s16, q7s16); - // part of stage 3 - DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16) - // part of stage 4 - q10s16 = vaddq_s16(q7s16, q1s16); - q11s16 = vaddq_s16(q5s16, q0s16); - q12s16 = vaddq_s16(q6s16, q2s16); - q15s16 = vaddq_s16(q4s16, q3s16); - // part of stage 6 - LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16) - q8s16 = vaddq_s16(q14s16, q11s16); - q9s16 = vaddq_s16(q13s16, q10s16); - q13s16 = vsubq_s16(q13s16, q10s16); - q11s16 = vsubq_s16(q14s16, q11s16); - STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16) - LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16) - q8s16 = vsubq_s16(q9s16, q12s16); - q10s16 = vaddq_s16(q14s16, q15s16); - q14s16 = vsubq_s16(q14s16, q15s16); - q12s16 = vaddq_s16(q9s16, q12s16); - STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16) - // part of stage 7 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16) - q13s16 = q11s16; - q14s16 = q8s16; - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16) - // part of stage 4 - q14s16 = vsubq_s16(q5s16, q0s16); - q13s16 = vsubq_s16(q6s16, q2s16); - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16); - q14s16 = vsubq_s16(q7s16, q1s16); - q13s16 = vsubq_s16(q4s16, q3s16); - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16); - // part of stage 6 - LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16) - q8s16 = vaddq_s16(q14s16, q1s16); - q9s16 = vaddq_s16(q13s16, q6s16); - q13s16 = vsubq_s16(q13s16, q6s16); - q1s16 = vsubq_s16(q14s16, q1s16); - STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16) - LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16) - q14s16 = vsubq_s16(q8s16, q5s16); - q10s16 = vaddq_s16(q8s16, q5s16); - q11s16 = vaddq_s16(q9s16, q0s16); - q0s16 = vsubq_s16(q9s16, q0s16); - STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16) - // part of stage 7 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) - STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16) - DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, - &q1s16, &q0s16); - STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16) + // generate 22,23,24,25 + // part of stage 1 + LOAD_FROM_TRANSPOSED(11, 13, 19) + DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16) + LOAD_FROM_TRANSPOSED(19, 29, 3) + DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16) + // part of stage 2 + q14s16 = vsubq_s16(q4s16, q5s16); + q5s16 = vaddq_s16(q4s16, q5s16); + q13s16 = vsubq_s16(q6s16, q7s16); + q6s16 = vaddq_s16(q6s16, q7s16); + // part of stage 3 + DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16) + // part of stage 4 + q10s16 = vaddq_s16(q7s16, q1s16); + q11s16 = vaddq_s16(q5s16, q0s16); + q12s16 = vaddq_s16(q6s16, q2s16); + q15s16 = vaddq_s16(q4s16, q3s16); + // part of stage 6 + LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16) + q8s16 = vaddq_s16(q14s16, q11s16); + q9s16 = vaddq_s16(q13s16, q10s16); + q13s16 = vsubq_s16(q13s16, q10s16); + q11s16 = vsubq_s16(q14s16, q11s16); + STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16) + LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16) + q8s16 = vsubq_s16(q9s16, q12s16); + q10s16 = vaddq_s16(q14s16, q15s16); + q14s16 = vsubq_s16(q14s16, q15s16); + q12s16 = vaddq_s16(q9s16, q12s16); + STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16) + // part of stage 7 + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) + STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16) + q13s16 = q11s16; + q14s16 = q8s16; + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) + STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16) + // part of stage 4 + q14s16 = vsubq_s16(q5s16, q0s16); + q13s16 = vsubq_s16(q6s16, q2s16); + DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16); + q14s16 = vsubq_s16(q7s16, q1s16); + q13s16 = vsubq_s16(q4s16, q3s16); + DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16); + // part of stage 6 + LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16) + q8s16 = vaddq_s16(q14s16, q1s16); + q9s16 = vaddq_s16(q13s16, q6s16); + q13s16 = vsubq_s16(q13s16, q6s16); + q1s16 = vsubq_s16(q14s16, q1s16); + STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16) + LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16) + q14s16 = vsubq_s16(q8s16, q5s16); + q10s16 = vaddq_s16(q8s16, q5s16); + q11s16 = vaddq_s16(q9s16, q0s16); + q0s16 = vsubq_s16(q9s16, q0s16); + STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16) + // part of stage 7 + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) + STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16) + DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, &q1s16, &q0s16); + STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16) - // ----------------------------------------- - // BLOCK C: 8-10,11-15 - // ----------------------------------------- - // generate 8,9,14,15 - // part of stage 2 - LOAD_FROM_TRANSPOSED(3, 2, 30) - DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(30, 18, 14) - DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16) - // part of stage 3 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); - // part of stage 4 - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16) + // ----------------------------------------- + // BLOCK C: 8-10,11-15 + // ----------------------------------------- + // generate 8,9,14,15 + // part of stage 2 + LOAD_FROM_TRANSPOSED(3, 2, 30) + DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16) + LOAD_FROM_TRANSPOSED(30, 18, 14) + DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16) + // part of stage 3 + q13s16 = vsubq_s16(q0s16, q1s16); + q0s16 = vaddq_s16(q0s16, q1s16); + q14s16 = vsubq_s16(q2s16, q3s16); + q2s16 = vaddq_s16(q2s16, q3s16); + // part of stage 4 + DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16) - // generate 10,11,12,13 - // part of stage 2 - LOAD_FROM_TRANSPOSED(14, 10, 22) - DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(22, 26, 6) - DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16) - // part of stage 3 - q14s16 = vsubq_s16(q4s16, q5s16); - q5s16 = vaddq_s16(q4s16, q5s16); - q13s16 = vsubq_s16(q6s16, q7s16); - q6s16 = vaddq_s16(q6s16, q7s16); - // part of stage 4 - DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16) - // part of stage 5 - q8s16 = vaddq_s16(q0s16, q5s16); - q9s16 = vaddq_s16(q1s16, q7s16); - q13s16 = vsubq_s16(q1s16, q7s16); - q14s16 = vsubq_s16(q3s16, q4s16); - q10s16 = vaddq_s16(q3s16, q4s16); - q15s16 = vaddq_s16(q2s16, q6s16); - STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16) - STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16) - // part of stage 6 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) - STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16) - q13s16 = vsubq_s16(q0s16, q5s16); - q14s16 = vsubq_s16(q2s16, q6s16); - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) - STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16) + // generate 10,11,12,13 + // part of stage 2 + LOAD_FROM_TRANSPOSED(14, 10, 22) + DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16) + LOAD_FROM_TRANSPOSED(22, 26, 6) + DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16) + // part of stage 3 + q14s16 = vsubq_s16(q4s16, q5s16); + q5s16 = vaddq_s16(q4s16, q5s16); + q13s16 = vsubq_s16(q6s16, q7s16); + q6s16 = vaddq_s16(q6s16, q7s16); + // part of stage 4 + DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16) + // part of stage 5 + q8s16 = vaddq_s16(q0s16, q5s16); + q9s16 = vaddq_s16(q1s16, q7s16); + q13s16 = vsubq_s16(q1s16, q7s16); + q14s16 = vsubq_s16(q3s16, q4s16); + q10s16 = vaddq_s16(q3s16, q4s16); + q15s16 = vaddq_s16(q2s16, q6s16); + STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16) + STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16) + // part of stage 6 + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) + STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16) + q13s16 = vsubq_s16(q0s16, q5s16); + q14s16 = vsubq_s16(q2s16, q6s16); + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) + STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16) - // ----------------------------------------- - // BLOCK D: 0-3,4-7 - // ----------------------------------------- - // generate 4,5,6,7 - // part of stage 3 - LOAD_FROM_TRANSPOSED(6, 4, 28) - DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16) - LOAD_FROM_TRANSPOSED(28, 20, 12) - DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) - // part of stage 4 - q13s16 = vsubq_s16(q0s16, q1s16); - q0s16 = vaddq_s16(q0s16, q1s16); - q14s16 = vsubq_s16(q2s16, q3s16); - q2s16 = vaddq_s16(q2s16, q3s16); - // part of stage 5 - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) + // ----------------------------------------- + // BLOCK D: 0-3,4-7 + // ----------------------------------------- + // generate 4,5,6,7 + // part of stage 3 + LOAD_FROM_TRANSPOSED(6, 4, 28) + DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16) + LOAD_FROM_TRANSPOSED(28, 20, 12) + DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) + // part of stage 4 + q13s16 = vsubq_s16(q0s16, q1s16); + q0s16 = vaddq_s16(q0s16, q1s16); + q14s16 = vsubq_s16(q2s16, q3s16); + q2s16 = vaddq_s16(q2s16, q3s16); + // part of stage 5 + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) - // generate 0,1,2,3 - // part of stage 4 - LOAD_FROM_TRANSPOSED(12, 0, 16) - DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16) - LOAD_FROM_TRANSPOSED(16, 8, 24) - DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16) - // part of stage 5 - q4s16 = vaddq_s16(q7s16, q6s16); - q7s16 = vsubq_s16(q7s16, q6s16); - q6s16 = vsubq_s16(q5s16, q14s16); - q5s16 = vaddq_s16(q5s16, q14s16); - // part of stage 6 - q8s16 = vaddq_s16(q4s16, q2s16); - q9s16 = vaddq_s16(q5s16, q3s16); - q10s16 = vaddq_s16(q6s16, q1s16); - q11s16 = vaddq_s16(q7s16, q0s16); - q12s16 = vsubq_s16(q7s16, q0s16); - q13s16 = vsubq_s16(q6s16, q1s16); - q14s16 = vsubq_s16(q5s16, q3s16); - q15s16 = vsubq_s16(q4s16, q2s16); - // part of stage 7 - LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16) - q2s16 = vaddq_s16(q8s16, q1s16); - q3s16 = vaddq_s16(q9s16, q0s16); - q4s16 = vsubq_s16(q9s16, q0s16); - q5s16 = vsubq_s16(q8s16, q1s16); - LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16) - q8s16 = vaddq_s16(q4s16, q1s16); - q9s16 = vaddq_s16(q5s16, q0s16); - q6s16 = vsubq_s16(q5s16, q0s16); - q7s16 = vsubq_s16(q4s16, q1s16); + // generate 0,1,2,3 + // part of stage 4 + LOAD_FROM_TRANSPOSED(12, 0, 16) + DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16) + LOAD_FROM_TRANSPOSED(16, 8, 24) + DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16) + // part of stage 5 + q4s16 = vaddq_s16(q7s16, q6s16); + q7s16 = vsubq_s16(q7s16, q6s16); + q6s16 = vsubq_s16(q5s16, q14s16); + q5s16 = vaddq_s16(q5s16, q14s16); + // part of stage 6 + q8s16 = vaddq_s16(q4s16, q2s16); + q9s16 = vaddq_s16(q5s16, q3s16); + q10s16 = vaddq_s16(q6s16, q1s16); + q11s16 = vaddq_s16(q7s16, q0s16); + q12s16 = vsubq_s16(q7s16, q0s16); + q13s16 = vsubq_s16(q6s16, q1s16); + q14s16 = vsubq_s16(q5s16, q3s16); + q15s16 = vsubq_s16(q4s16, q2s16); + // part of stage 7 + LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16) + q2s16 = vaddq_s16(q8s16, q1s16); + q3s16 = vaddq_s16(q9s16, q0s16); + q4s16 = vsubq_s16(q9s16, q0s16); + q5s16 = vsubq_s16(q8s16, q1s16); + LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16) + q8s16 = vaddq_s16(q4s16, q1s16); + q9s16 = vaddq_s16(q5s16, q0s16); + q6s16 = vsubq_s16(q5s16, q0s16); + q7s16 = vsubq_s16(q4s16, q1s16); - if (idct32_pass_loop == 0) { - idct32_bands_end_1st_pass(out, - q2s16, q3s16, q6s16, q7s16, q8s16, q9s16, - q10s16, q11s16, q12s16, q13s16, q14s16, q15s16); - } else { - idct32_bands_end_2nd_pass(out, dest, stride, - q2s16, q3s16, q6s16, q7s16, q8s16, q9s16, - q10s16, q11s16, q12s16, q13s16, q14s16, q15s16); - dest += 8; - } - } + if (idct32_pass_loop == 0) { + idct32_bands_end_1st_pass(out, q2s16, q3s16, q6s16, q7s16, q8s16, q9s16, + q10s16, q11s16, q12s16, q13s16, q14s16, + q15s16); + } else { + idct32_bands_end_2nd_pass(out, dest, stride, q2s16, q3s16, q6s16, q7s16, + q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, + q14s16, q15s16); + dest += 8; + } } - return; + } + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct4x4_1_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct4x4_1_add_neon.c index ea618700c9..9f999e979d 100644 --- a/libs/libvpx/vpx_dsp/arm/idct4x4_1_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct4x4_1_add_neon.c @@ -13,38 +13,34 @@ #include "vpx_dsp/inv_txfm.h" #include "vpx_ports/mem.h" -void vpx_idct4x4_1_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8x8_t d6u8; - uint32x2_t d2u32 = vdup_n_u32(0); - uint16x8_t q8u16; - int16x8_t q0s16; - uint8_t *d1, *d2; - int16_t i, a1, cospi_16_64 = 11585; - int16_t out = dct_const_round_shift(input[0] * cospi_16_64); - out = dct_const_round_shift(out * cospi_16_64); - a1 = ROUND_POWER_OF_TWO(out, 4); +void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8x8_t d6u8; + uint32x2_t d2u32 = vdup_n_u32(0); + uint16x8_t q8u16; + int16x8_t q0s16; + uint8_t *d1, *d2; + int16_t i, a1, cospi_16_64 = 11585; + int16_t out = dct_const_round_shift(input[0] * cospi_16_64); + out = dct_const_round_shift(out * cospi_16_64); + a1 = ROUND_POWER_OF_TWO(out, 4); - q0s16 = vdupq_n_s16(a1); + q0s16 = vdupq_n_s16(a1); - // dc_only_idct_add - d1 = d2 = dest; - for (i = 0; i < 2; i++) { - d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0); - d1 += dest_stride; - d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1); - d1 += dest_stride; + // dc_only_idct_add + d1 = d2 = dest; + for (i = 0; i < 2; i++) { + d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 0); + d1 += dest_stride; + d2u32 = vld1_lane_u32((const uint32_t *)d1, d2u32, 1); + d1 += dest_stride; - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16), - vreinterpret_u8_u32(d2u32)); - d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q0s16), vreinterpret_u8_u32(d2u32)); + d6u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0); - d2 += dest_stride; - vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1); - d2 += dest_stride; - } - return; + vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 0); + d2 += dest_stride; + vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1); + d2 += dest_stride; + } + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct4x4_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct4x4_add_neon.c index 3c975c99b7..382626928b 100644 --- a/libs/libvpx/vpx_dsp/arm/idct4x4_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct4x4_add_neon.c @@ -10,142 +10,137 @@ #include -void vpx_idct4x4_16_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8x8_t d26u8, d27u8; - uint32x2_t d26u32, d27u32; - uint16x8_t q8u16, q9u16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16; - int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16; - int16x8_t q8s16, q9s16, q13s16, q14s16; - int32x4_t q1s32, q13s32, q14s32, q15s32; - int16x4x2_t d0x2s16, d1x2s16; - int32x4x2_t q0x2s32; - uint8_t *d; - int16_t cospi_8_64 = 15137; - int16_t cospi_16_64 = 11585; - int16_t cospi_24_64 = 6270; +void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8x8_t d26u8, d27u8; + uint32x2_t d26u32, d27u32; + uint16x8_t q8u16, q9u16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16; + int16x4_t d22s16, d23s16, d24s16, d26s16, d27s16, d28s16, d29s16; + int16x8_t q8s16, q9s16, q13s16, q14s16; + int32x4_t q1s32, q13s32, q14s32, q15s32; + int16x4x2_t d0x2s16, d1x2s16; + int32x4x2_t q0x2s32; + uint8_t *d; + int16_t cospi_8_64 = 15137; + int16_t cospi_16_64 = 11585; + int16_t cospi_24_64 = 6270; - d26u32 = d27u32 = vdup_n_u32(0); + d26u32 = d27u32 = vdup_n_u32(0); - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); + q8s16 = vld1q_s16(input); + q9s16 = vld1q_s16(input + 8); - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_low_s16(q9s16); - d19s16 = vget_high_s16(q9s16); + d16s16 = vget_low_s16(q8s16); + d17s16 = vget_high_s16(q8s16); + d18s16 = vget_low_s16(q9s16); + d19s16 = vget_high_s16(q9s16); - d0x2s16 = vtrn_s16(d16s16, d17s16); - d1x2s16 = vtrn_s16(d18s16, d19s16); - q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); - q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); + d0x2s16 = vtrn_s16(d16s16, d17s16); + d1x2s16 = vtrn_s16(d18s16, d19s16); + q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); + q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); - d20s16 = vdup_n_s16(cospi_8_64); - d21s16 = vdup_n_s16(cospi_16_64); + d20s16 = vdup_n_s16(cospi_8_64); + d21s16 = vdup_n_s16(cospi_16_64); - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), - vreinterpretq_s32_s16(q9s16)); - d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); - d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); - d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); - d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); + q0x2s32 = + vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); + d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); + d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); + d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); + d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); - d22s16 = vdup_n_s16(cospi_24_64); + d22s16 = vdup_n_s16(cospi_24_64); - // stage 1 - d23s16 = vadd_s16(d16s16, d18s16); - d24s16 = vsub_s16(d16s16, d18s16); + // stage 1 + d23s16 = vadd_s16(d16s16, d18s16); + d24s16 = vsub_s16(d16s16, d18s16); - q15s32 = vmull_s16(d17s16, d22s16); - q1s32 = vmull_s16(d17s16, d20s16); - q13s32 = vmull_s16(d23s16, d21s16); - q14s32 = vmull_s16(d24s16, d21s16); + q15s32 = vmull_s16(d17s16, d22s16); + q1s32 = vmull_s16(d17s16, d20s16); + q13s32 = vmull_s16(d23s16, d21s16); + q14s32 = vmull_s16(d24s16, d21s16); - q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); - q1s32 = vmlal_s16(q1s32, d19s16, d22s16); + q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); + q1s32 = vmlal_s16(q1s32, d19s16, d22s16); - d26s16 = vqrshrn_n_s32(q13s32, 14); - d27s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d28s16 = vqrshrn_n_s32(q1s32, 14); - q13s16 = vcombine_s16(d26s16, d27s16); - q14s16 = vcombine_s16(d28s16, d29s16); + d26s16 = vqrshrn_n_s32(q13s32, 14); + d27s16 = vqrshrn_n_s32(q14s32, 14); + d29s16 = vqrshrn_n_s32(q15s32, 14); + d28s16 = vqrshrn_n_s32(q1s32, 14); + q13s16 = vcombine_s16(d26s16, d27s16); + q14s16 = vcombine_s16(d28s16, d29s16); - // stage 2 - q8s16 = vaddq_s16(q13s16, q14s16); - q9s16 = vsubq_s16(q13s16, q14s16); + // stage 2 + q8s16 = vaddq_s16(q13s16, q14s16); + q9s16 = vsubq_s16(q13s16, q14s16); - d16s16 = vget_low_s16(q8s16); - d17s16 = vget_high_s16(q8s16); - d18s16 = vget_high_s16(q9s16); // vswp d18 d19 - d19s16 = vget_low_s16(q9s16); + d16s16 = vget_low_s16(q8s16); + d17s16 = vget_high_s16(q8s16); + d18s16 = vget_high_s16(q9s16); // vswp d18 d19 + d19s16 = vget_low_s16(q9s16); - d0x2s16 = vtrn_s16(d16s16, d17s16); - d1x2s16 = vtrn_s16(d18s16, d19s16); - q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); - q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); + d0x2s16 = vtrn_s16(d16s16, d17s16); + d1x2s16 = vtrn_s16(d18s16, d19s16); + q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); + q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(q8s16), - vreinterpretq_s32_s16(q9s16)); - d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); - d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); - d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); - d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); + q0x2s32 = + vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); + d16s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); + d17s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[0])); + d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); + d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); - // do the transform on columns - // stage 1 - d23s16 = vadd_s16(d16s16, d18s16); - d24s16 = vsub_s16(d16s16, d18s16); + // do the transform on columns + // stage 1 + d23s16 = vadd_s16(d16s16, d18s16); + d24s16 = vsub_s16(d16s16, d18s16); - q15s32 = vmull_s16(d17s16, d22s16); - q1s32 = vmull_s16(d17s16, d20s16); - q13s32 = vmull_s16(d23s16, d21s16); - q14s32 = vmull_s16(d24s16, d21s16); + q15s32 = vmull_s16(d17s16, d22s16); + q1s32 = vmull_s16(d17s16, d20s16); + q13s32 = vmull_s16(d23s16, d21s16); + q14s32 = vmull_s16(d24s16, d21s16); - q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); - q1s32 = vmlal_s16(q1s32, d19s16, d22s16); + q15s32 = vmlsl_s16(q15s32, d19s16, d20s16); + q1s32 = vmlal_s16(q1s32, d19s16, d22s16); - d26s16 = vqrshrn_n_s32(q13s32, 14); - d27s16 = vqrshrn_n_s32(q14s32, 14); - d29s16 = vqrshrn_n_s32(q15s32, 14); - d28s16 = vqrshrn_n_s32(q1s32, 14); - q13s16 = vcombine_s16(d26s16, d27s16); - q14s16 = vcombine_s16(d28s16, d29s16); + d26s16 = vqrshrn_n_s32(q13s32, 14); + d27s16 = vqrshrn_n_s32(q14s32, 14); + d29s16 = vqrshrn_n_s32(q15s32, 14); + d28s16 = vqrshrn_n_s32(q1s32, 14); + q13s16 = vcombine_s16(d26s16, d27s16); + q14s16 = vcombine_s16(d28s16, d29s16); - // stage 2 - q8s16 = vaddq_s16(q13s16, q14s16); - q9s16 = vsubq_s16(q13s16, q14s16); + // stage 2 + q8s16 = vaddq_s16(q13s16, q14s16); + q9s16 = vsubq_s16(q13s16, q14s16); - q8s16 = vrshrq_n_s16(q8s16, 4); - q9s16 = vrshrq_n_s16(q9s16, 4); + q8s16 = vrshrq_n_s16(q8s16, 4); + q9s16 = vrshrq_n_s16(q9s16, 4); - d = dest; - d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0); - d += dest_stride; - d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1); - d += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1); - d += dest_stride; - d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0); + d = dest; + d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 0); + d += dest_stride; + d26u32 = vld1_lane_u32((const uint32_t *)d, d26u32, 1); + d += dest_stride; + d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 1); + d += dest_stride; + d27u32 = vld1_lane_u32((const uint32_t *)d, d27u32, 0); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u32(d26u32)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u32(d27u32)); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u32(d26u32)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u32(d27u32)); - d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d26u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d27u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d = dest; - vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0); - d += dest_stride; - vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1); - d += dest_stride; - vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1); - d += dest_stride; - vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0); - return; + d = dest; + vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 0); + d += dest_stride; + vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d26u8), 1); + d += dest_stride; + vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1); + d += dest_stride; + vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0); + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct8x8_1_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct8x8_1_add_neon.c index c1b801fad5..e3db0b876b 100644 --- a/libs/libvpx/vpx_dsp/arm/idct8x8_1_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct8x8_1_add_neon.c @@ -13,52 +13,49 @@ #include "vpx_dsp/inv_txfm.h" #include "vpx_ports/mem.h" -void vpx_idct8x8_1_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8x8_t d2u8, d3u8, d30u8, d31u8; - uint64x1_t d2u64, d3u64, d4u64, d5u64; - uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; - int16x8_t q0s16; - uint8_t *d1, *d2; - int16_t i, a1, cospi_16_64 = 11585; - int16_t out = dct_const_round_shift(input[0] * cospi_16_64); - out = dct_const_round_shift(out * cospi_16_64); - a1 = ROUND_POWER_OF_TWO(out, 5); +void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8x8_t d2u8, d3u8, d30u8, d31u8; + uint64x1_t d2u64, d3u64, d4u64, d5u64; + uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; + int16x8_t q0s16; + uint8_t *d1, *d2; + int16_t i, a1, cospi_16_64 = 11585; + int16_t out = dct_const_round_shift(input[0] * cospi_16_64); + out = dct_const_round_shift(out * cospi_16_64); + a1 = ROUND_POWER_OF_TWO(out, 5); - q0s16 = vdupq_n_s16(a1); - q0u16 = vreinterpretq_u16_s16(q0s16); + q0s16 = vdupq_n_s16(a1); + q0u16 = vreinterpretq_u16_s16(q0s16); - d1 = d2 = dest; - for (i = 0; i < 2; i++) { - d2u64 = vld1_u64((const uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((const uint64_t *)d1); - d1 += dest_stride; - d4u64 = vld1_u64((const uint64_t *)d1); - d1 += dest_stride; - d5u64 = vld1_u64((const uint64_t *)d1); - d1 += dest_stride; + d1 = d2 = dest; + for (i = 0; i < 2; i++) { + d2u64 = vld1_u64((const uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((const uint64_t *)d1); + d1 += dest_stride; + d4u64 = vld1_u64((const uint64_t *)d1); + d1 += dest_stride; + d5u64 = vld1_u64((const uint64_t *)d1); + d1 += dest_stride; - q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64)); - q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64)); - q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64)); - q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64)); + q9u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d2u64)); + q10u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d3u64)); + q11u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d4u64)); + q12u16 = vaddw_u8(q0u16, vreinterpret_u8_u64(d5u64)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d30u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + d31u8 = vqmovun_s16(vreinterpretq_s16_u16(q12u16)); - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d31u8)); - d2 += dest_stride; - } - return; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d30u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d31u8)); + d2 += dest_stride; + } + return; } diff --git a/libs/libvpx/vpx_dsp/arm/idct8x8_add_neon.c b/libs/libvpx/vpx_dsp/arm/idct8x8_add_neon.c index 4b2c2a6f83..82b3182568 100644 --- a/libs/libvpx/vpx_dsp/arm/idct8x8_add_neon.c +++ b/libs/libvpx/vpx_dsp/arm/idct8x8_add_neon.c @@ -11,530 +11,435 @@ #include #include "./vpx_config.h" +#include "vpx_dsp/arm/transpose_neon.h" #include "vpx_dsp/txfm_common.h" -static INLINE void TRANSPOSE8X8( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32; - int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16; +static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, + int16x8_t *q10s16, int16x8_t *q11s16, + int16x8_t *q12s16, int16x8_t *q13s16, + int16x8_t *q14s16, int16x8_t *q15s16) { + int16x4_t d0s16, d1s16, d2s16, d3s16; + int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; + int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; + int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; + int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); + d0s16 = vdup_n_s16(cospi_28_64); + d1s16 = vdup_n_s16(cospi_4_64); + d2s16 = vdup_n_s16(cospi_12_64); + d3s16 = vdup_n_s16(cospi_20_64); - *q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24 - *q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26 - *q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28 - *q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30 - *q12s16 = vcombine_s16(d17s16, d25s16); - *q13s16 = vcombine_s16(d19s16, d27s16); - *q14s16 = vcombine_s16(d21s16, d29s16); - *q15s16 = vcombine_s16(d23s16, d31s16); + d16s16 = vget_low_s16(*q8s16); + d17s16 = vget_high_s16(*q8s16); + d18s16 = vget_low_s16(*q9s16); + d19s16 = vget_high_s16(*q9s16); + d20s16 = vget_low_s16(*q10s16); + d21s16 = vget_high_s16(*q10s16); + d22s16 = vget_low_s16(*q11s16); + d23s16 = vget_high_s16(*q11s16); + d24s16 = vget_low_s16(*q12s16); + d25s16 = vget_high_s16(*q12s16); + d26s16 = vget_low_s16(*q13s16); + d27s16 = vget_high_s16(*q13s16); + d28s16 = vget_low_s16(*q14s16); + d29s16 = vget_high_s16(*q14s16); + d30s16 = vget_low_s16(*q15s16); + d31s16 = vget_high_s16(*q15s16); - q0x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q8s16), - vreinterpretq_s32_s16(*q10s16)); - q1x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q9s16), - vreinterpretq_s32_s16(*q11s16)); - q2x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q12s16), - vreinterpretq_s32_s16(*q14s16)); - q3x2s32 = vtrnq_s32(vreinterpretq_s32_s16(*q13s16), - vreinterpretq_s32_s16(*q15s16)); + q2s32 = vmull_s16(d18s16, d0s16); + q3s32 = vmull_s16(d19s16, d0s16); + q5s32 = vmull_s16(d26s16, d2s16); + q6s32 = vmull_s16(d27s16, d2s16); - q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8 - vreinterpretq_s16_s32(q1x2s32.val[0])); // q9 - q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10 - vreinterpretq_s16_s32(q1x2s32.val[1])); // q11 - q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12 - vreinterpretq_s16_s32(q3x2s32.val[0])); // q13 - q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14 - vreinterpretq_s16_s32(q3x2s32.val[1])); // q15 + q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); + q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); + q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); + q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); - *q8s16 = q0x2s16.val[0]; - *q9s16 = q0x2s16.val[1]; - *q10s16 = q1x2s16.val[0]; - *q11s16 = q1x2s16.val[1]; - *q12s16 = q2x2s16.val[0]; - *q13s16 = q2x2s16.val[1]; - *q14s16 = q3x2s16.val[0]; - *q15s16 = q3x2s16.val[1]; - return; + d8s16 = vqrshrn_n_s32(q2s32, 14); + d9s16 = vqrshrn_n_s32(q3s32, 14); + d10s16 = vqrshrn_n_s32(q5s32, 14); + d11s16 = vqrshrn_n_s32(q6s32, 14); + q4s16 = vcombine_s16(d8s16, d9s16); + q5s16 = vcombine_s16(d10s16, d11s16); + + q2s32 = vmull_s16(d18s16, d1s16); + q3s32 = vmull_s16(d19s16, d1s16); + q9s32 = vmull_s16(d26s16, d3s16); + q13s32 = vmull_s16(d27s16, d3s16); + + q2s32 = vmlal_s16(q2s32, d30s16, d0s16); + q3s32 = vmlal_s16(q3s32, d31s16, d0s16); + q9s32 = vmlal_s16(q9s32, d22s16, d2s16); + q13s32 = vmlal_s16(q13s32, d23s16, d2s16); + + d14s16 = vqrshrn_n_s32(q2s32, 14); + d15s16 = vqrshrn_n_s32(q3s32, 14); + d12s16 = vqrshrn_n_s32(q9s32, 14); + d13s16 = vqrshrn_n_s32(q13s32, 14); + q6s16 = vcombine_s16(d12s16, d13s16); + q7s16 = vcombine_s16(d14s16, d15s16); + + d0s16 = vdup_n_s16(cospi_16_64); + + q2s32 = vmull_s16(d16s16, d0s16); + q3s32 = vmull_s16(d17s16, d0s16); + q13s32 = vmull_s16(d16s16, d0s16); + q15s32 = vmull_s16(d17s16, d0s16); + + q2s32 = vmlal_s16(q2s32, d24s16, d0s16); + q3s32 = vmlal_s16(q3s32, d25s16, d0s16); + q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); + q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); + + d0s16 = vdup_n_s16(cospi_24_64); + d1s16 = vdup_n_s16(cospi_8_64); + + d18s16 = vqrshrn_n_s32(q2s32, 14); + d19s16 = vqrshrn_n_s32(q3s32, 14); + d22s16 = vqrshrn_n_s32(q13s32, 14); + d23s16 = vqrshrn_n_s32(q15s32, 14); + *q9s16 = vcombine_s16(d18s16, d19s16); + *q11s16 = vcombine_s16(d22s16, d23s16); + + q2s32 = vmull_s16(d20s16, d0s16); + q3s32 = vmull_s16(d21s16, d0s16); + q8s32 = vmull_s16(d20s16, d1s16); + q12s32 = vmull_s16(d21s16, d1s16); + + q2s32 = vmlsl_s16(q2s32, d28s16, d1s16); + q3s32 = vmlsl_s16(q3s32, d29s16, d1s16); + q8s32 = vmlal_s16(q8s32, d28s16, d0s16); + q12s32 = vmlal_s16(q12s32, d29s16, d0s16); + + d26s16 = vqrshrn_n_s32(q2s32, 14); + d27s16 = vqrshrn_n_s32(q3s32, 14); + d30s16 = vqrshrn_n_s32(q8s32, 14); + d31s16 = vqrshrn_n_s32(q12s32, 14); + *q13s16 = vcombine_s16(d26s16, d27s16); + *q15s16 = vcombine_s16(d30s16, d31s16); + + q0s16 = vaddq_s16(*q9s16, *q15s16); + q1s16 = vaddq_s16(*q11s16, *q13s16); + q2s16 = vsubq_s16(*q11s16, *q13s16); + q3s16 = vsubq_s16(*q9s16, *q15s16); + + *q13s16 = vsubq_s16(q4s16, q5s16); + q4s16 = vaddq_s16(q4s16, q5s16); + *q14s16 = vsubq_s16(q7s16, q6s16); + q7s16 = vaddq_s16(q7s16, q6s16); + d26s16 = vget_low_s16(*q13s16); + d27s16 = vget_high_s16(*q13s16); + d28s16 = vget_low_s16(*q14s16); + d29s16 = vget_high_s16(*q14s16); + + d16s16 = vdup_n_s16(cospi_16_64); + + q9s32 = vmull_s16(d28s16, d16s16); + q10s32 = vmull_s16(d29s16, d16s16); + q11s32 = vmull_s16(d28s16, d16s16); + q12s32 = vmull_s16(d29s16, d16s16); + + q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); + q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); + q11s32 = vmlal_s16(q11s32, d26s16, d16s16); + q12s32 = vmlal_s16(q12s32, d27s16, d16s16); + + d10s16 = vqrshrn_n_s32(q9s32, 14); + d11s16 = vqrshrn_n_s32(q10s32, 14); + d12s16 = vqrshrn_n_s32(q11s32, 14); + d13s16 = vqrshrn_n_s32(q12s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + *q8s16 = vaddq_s16(q0s16, q7s16); + *q9s16 = vaddq_s16(q1s16, q6s16); + *q10s16 = vaddq_s16(q2s16, q5s16); + *q11s16 = vaddq_s16(q3s16, q4s16); + *q12s16 = vsubq_s16(q3s16, q4s16); + *q13s16 = vsubq_s16(q2s16, q5s16); + *q14s16 = vsubq_s16(q1s16, q6s16); + *q15s16 = vsubq_s16(q0s16, q7s16); + return; } -static INLINE void IDCT8x8_1D( - int16x8_t *q8s16, - int16x8_t *q9s16, - int16x8_t *q10s16, - int16x8_t *q11s16, - int16x8_t *q12s16, - int16x8_t *q13s16, - int16x8_t *q14s16, - int16x8_t *q15s16) { - int16x4_t d0s16, d1s16, d2s16, d3s16; - int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; - int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16; - int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; - int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; +void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8_t *d1, *d2; + uint8x8_t d0u8, d1u8, d2u8, d3u8; + uint64x1_t d0u64, d1u64, d2u64, d3u64; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + uint16x8_t q8u16, q9u16, q10u16, q11u16; - d0s16 = vdup_n_s16(cospi_28_64); - d1s16 = vdup_n_s16(cospi_4_64); - d2s16 = vdup_n_s16(cospi_12_64); - d3s16 = vdup_n_s16(cospi_20_64); + q8s16 = vld1q_s16(input); + q9s16 = vld1q_s16(input + 8); + q10s16 = vld1q_s16(input + 16); + q11s16 = vld1q_s16(input + 24); + q12s16 = vld1q_s16(input + 32); + q13s16 = vld1q_s16(input + 40); + q14s16 = vld1q_s16(input + 48); + q15s16 = vld1q_s16(input + 56); - d16s16 = vget_low_s16(*q8s16); - d17s16 = vget_high_s16(*q8s16); - d18s16 = vget_low_s16(*q9s16); - d19s16 = vget_high_s16(*q9s16); - d20s16 = vget_low_s16(*q10s16); - d21s16 = vget_high_s16(*q10s16); - d22s16 = vget_low_s16(*q11s16); - d23s16 = vget_high_s16(*q11s16); - d24s16 = vget_low_s16(*q12s16); - d25s16 = vget_high_s16(*q12s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - d30s16 = vget_low_s16(*q15s16); - d31s16 = vget_high_s16(*q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - q2s32 = vmull_s16(d18s16, d0s16); - q3s32 = vmull_s16(d19s16, d0s16); - q5s32 = vmull_s16(d26s16, d2s16); - q6s32 = vmull_s16(d27s16, d2s16); + IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - q2s32 = vmlsl_s16(q2s32, d30s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d31s16, d1s16); - q5s32 = vmlsl_s16(q5s32, d22s16, d3s16); - q6s32 = vmlsl_s16(q6s32, d23s16, d3s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - d8s16 = vqrshrn_n_s32(q2s32, 14); - d9s16 = vqrshrn_n_s32(q3s32, 14); - d10s16 = vqrshrn_n_s32(q5s32, 14); - d11s16 = vqrshrn_n_s32(q6s32, 14); - q4s16 = vcombine_s16(d8s16, d9s16); - q5s16 = vcombine_s16(d10s16, d11s16); + IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - q2s32 = vmull_s16(d18s16, d1s16); - q3s32 = vmull_s16(d19s16, d1s16); - q9s32 = vmull_s16(d26s16, d3s16); - q13s32 = vmull_s16(d27s16, d3s16); + q8s16 = vrshrq_n_s16(q8s16, 5); + q9s16 = vrshrq_n_s16(q9s16, 5); + q10s16 = vrshrq_n_s16(q10s16, 5); + q11s16 = vrshrq_n_s16(q11s16, 5); + q12s16 = vrshrq_n_s16(q12s16, 5); + q13s16 = vrshrq_n_s16(q13s16, 5); + q14s16 = vrshrq_n_s16(q14s16, 5); + q15s16 = vrshrq_n_s16(q15s16, 5); - q2s32 = vmlal_s16(q2s32, d30s16, d0s16); - q3s32 = vmlal_s16(q3s32, d31s16, d0s16); - q9s32 = vmlal_s16(q9s32, d22s16, d2s16); - q13s32 = vmlal_s16(q13s32, d23s16, d2s16); + d1 = d2 = dest; - d14s16 = vqrshrn_n_s32(q2s32, 14); - d15s16 = vqrshrn_n_s32(q3s32, 14); - d12s16 = vqrshrn_n_s32(q9s32, 14); - d13s16 = vqrshrn_n_s32(q13s32, 14); - q6s16 = vcombine_s16(d12s16, d13s16); - q7s16 = vcombine_s16(d14s16, d15s16); + d0u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d1u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d2u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; - d0s16 = vdup_n_s16(cospi_16_64); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); + q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); + q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); - q2s32 = vmull_s16(d16s16, d0s16); - q3s32 = vmull_s16(d17s16, d0s16); - q13s32 = vmull_s16(d16s16, d0s16); - q15s32 = vmull_s16(d17s16, d0s16); + d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - q2s32 = vmlal_s16(q2s32, d24s16, d0s16); - q3s32 = vmlal_s16(q3s32, d25s16, d0s16); - q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); - q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; - d0s16 = vdup_n_s16(cospi_24_64); - d1s16 = vdup_n_s16(cospi_8_64); + q8s16 = q12s16; + q9s16 = q13s16; + q10s16 = q14s16; + q11s16 = q15s16; - d18s16 = vqrshrn_n_s32(q2s32, 14); - d19s16 = vqrshrn_n_s32(q3s32, 14); - d22s16 = vqrshrn_n_s32(q13s32, 14); - d23s16 = vqrshrn_n_s32(q15s32, 14); - *q9s16 = vcombine_s16(d18s16, d19s16); - *q11s16 = vcombine_s16(d22s16, d23s16); + d0u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d1u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d2u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; - q2s32 = vmull_s16(d20s16, d0s16); - q3s32 = vmull_s16(d21s16, d0s16); - q8s32 = vmull_s16(d20s16, d1s16); - q12s32 = vmull_s16(d21s16, d1s16); + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); + q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); + q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); - q2s32 = vmlsl_s16(q2s32, d28s16, d1s16); - q3s32 = vmlsl_s16(q3s32, d29s16, d1s16); - q8s32 = vmlal_s16(q8s32, d28s16, d0s16); - q12s32 = vmlal_s16(q12s32, d29s16, d0s16); + d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - d26s16 = vqrshrn_n_s32(q2s32, 14); - d27s16 = vqrshrn_n_s32(q3s32, 14); - d30s16 = vqrshrn_n_s32(q8s32, 14); - d31s16 = vqrshrn_n_s32(q12s32, 14); - *q13s16 = vcombine_s16(d26s16, d27s16); - *q15s16 = vcombine_s16(d30s16, d31s16); - - q0s16 = vaddq_s16(*q9s16, *q15s16); - q1s16 = vaddq_s16(*q11s16, *q13s16); - q2s16 = vsubq_s16(*q11s16, *q13s16); - q3s16 = vsubq_s16(*q9s16, *q15s16); - - *q13s16 = vsubq_s16(q4s16, q5s16); - q4s16 = vaddq_s16(q4s16, q5s16); - *q14s16 = vsubq_s16(q7s16, q6s16); - q7s16 = vaddq_s16(q7s16, q6s16); - d26s16 = vget_low_s16(*q13s16); - d27s16 = vget_high_s16(*q13s16); - d28s16 = vget_low_s16(*q14s16); - d29s16 = vget_high_s16(*q14s16); - - d16s16 = vdup_n_s16(cospi_16_64); - - q9s32 = vmull_s16(d28s16, d16s16); - q10s32 = vmull_s16(d29s16, d16s16); - q11s32 = vmull_s16(d28s16, d16s16); - q12s32 = vmull_s16(d29s16, d16s16); - - q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); - q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); - q11s32 = vmlal_s16(q11s32, d26s16, d16s16); - q12s32 = vmlal_s16(q12s32, d27s16, d16s16); - - d10s16 = vqrshrn_n_s32(q9s32, 14); - d11s16 = vqrshrn_n_s32(q10s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q12s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - *q8s16 = vaddq_s16(q0s16, q7s16); - *q9s16 = vaddq_s16(q1s16, q6s16); - *q10s16 = vaddq_s16(q2s16, q5s16); - *q11s16 = vaddq_s16(q3s16, q4s16); - *q12s16 = vsubq_s16(q3s16, q4s16); - *q13s16 = vsubq_s16(q2s16, q5s16); - *q14s16 = vsubq_s16(q1s16, q6s16); - *q15s16 = vsubq_s16(q0s16, q7s16); - return; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + return; } -void vpx_idct8x8_64_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8_t *d1, *d2; - uint8x8_t d0u8, d1u8, d2u8, d3u8; - uint64x1_t d0u64, d1u64, d2u64, d3u64; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - uint16x8_t q8u16, q9u16, q10u16, q11u16; +void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { + uint8_t *d1, *d2; + uint8x8_t d0u8, d1u8, d2u8, d3u8; + int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16; + int16x4_t d26s16, d27s16, d28s16, d29s16; + uint64x1_t d0u64, d1u64, d2u64, d3u64; + int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; + int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; + uint16x8_t q8u16, q9u16, q10u16, q11u16; + int32x4_t q9s32, q10s32, q11s32, q12s32; - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); - q10s16 = vld1q_s16(input + 16); - q11s16 = vld1q_s16(input + 24); - q12s16 = vld1q_s16(input + 32); - q13s16 = vld1q_s16(input + 40); - q14s16 = vld1q_s16(input + 48); - q15s16 = vld1q_s16(input + 56); + q8s16 = vld1q_s16(input); + q9s16 = vld1q_s16(input + 8); + q10s16 = vld1q_s16(input + 16); + q11s16 = vld1q_s16(input + 24); + q12s16 = vld1q_s16(input + 32); + q13s16 = vld1q_s16(input + 40); + q14s16 = vld1q_s16(input + 48); + q15s16 = vld1q_s16(input + 56); - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + // First transform rows + // stage 1 + q0s16 = vdupq_n_s16(cospi_28_64 * 2); + q1s16 = vdupq_n_s16(cospi_4_64 * 2); - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + q4s16 = vqrdmulhq_s16(q9s16, q0s16); - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); + q0s16 = vdupq_n_s16(-cospi_20_64 * 2); - q8s16 = vrshrq_n_s16(q8s16, 5); - q9s16 = vrshrq_n_s16(q9s16, 5); - q10s16 = vrshrq_n_s16(q10s16, 5); - q11s16 = vrshrq_n_s16(q11s16, 5); - q12s16 = vrshrq_n_s16(q12s16, 5); - q13s16 = vrshrq_n_s16(q13s16, 5); - q14s16 = vrshrq_n_s16(q14s16, 5); - q15s16 = vrshrq_n_s16(q15s16, 5); + q7s16 = vqrdmulhq_s16(q9s16, q1s16); - d1 = d2 = dest; + q1s16 = vdupq_n_s16(cospi_12_64 * 2); - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; + q5s16 = vqrdmulhq_s16(q11s16, q0s16); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); + q0s16 = vdupq_n_s16(cospi_16_64 * 2); - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + q6s16 = vqrdmulhq_s16(q11s16, q1s16); - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; + // stage 2 & stage 3 - even half + q1s16 = vdupq_n_s16(cospi_24_64 * 2); - q8s16 = q12s16; - q9s16 = q13s16; - q10s16 = q14s16; - q11s16 = q15s16; + q9s16 = vqrdmulhq_s16(q8s16, q0s16); - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; + q0s16 = vdupq_n_s16(cospi_8_64 * 2); - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); + q13s16 = vqrdmulhq_s16(q10s16, q1s16); - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + q15s16 = vqrdmulhq_s16(q10s16, q0s16); - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - return; -} - -void vpx_idct8x8_12_add_neon( - int16_t *input, - uint8_t *dest, - int dest_stride) { - uint8_t *d1, *d2; - uint8x8_t d0u8, d1u8, d2u8, d3u8; - int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16; - int16x4_t d26s16, d27s16, d28s16, d29s16; - uint64x1_t d0u64, d1u64, d2u64, d3u64; - int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16; - int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16; - uint16x8_t q8u16, q9u16, q10u16, q11u16; - int32x4_t q9s32, q10s32, q11s32, q12s32; - - q8s16 = vld1q_s16(input); - q9s16 = vld1q_s16(input + 8); - q10s16 = vld1q_s16(input + 16); - q11s16 = vld1q_s16(input + 24); - q12s16 = vld1q_s16(input + 32); - q13s16 = vld1q_s16(input + 40); - q14s16 = vld1q_s16(input + 48); - q15s16 = vld1q_s16(input + 56); - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - // First transform rows - // stage 1 - q0s16 = vdupq_n_s16(cospi_28_64 * 2); - q1s16 = vdupq_n_s16(cospi_4_64 * 2); - - q4s16 = vqrdmulhq_s16(q9s16, q0s16); - - q0s16 = vdupq_n_s16(-cospi_20_64 * 2); - - q7s16 = vqrdmulhq_s16(q9s16, q1s16); - - q1s16 = vdupq_n_s16(cospi_12_64 * 2); - - q5s16 = vqrdmulhq_s16(q11s16, q0s16); - - q0s16 = vdupq_n_s16(cospi_16_64 * 2); - - q6s16 = vqrdmulhq_s16(q11s16, q1s16); - - // stage 2 & stage 3 - even half - q1s16 = vdupq_n_s16(cospi_24_64 * 2); - - q9s16 = vqrdmulhq_s16(q8s16, q0s16); - - q0s16 = vdupq_n_s16(cospi_8_64 * 2); - - q13s16 = vqrdmulhq_s16(q10s16, q1s16); - - q15s16 = vqrdmulhq_s16(q10s16, q0s16); - - // stage 3 -odd half - q0s16 = vaddq_s16(q9s16, q15s16); - q1s16 = vaddq_s16(q9s16, q13s16); - q2s16 = vsubq_s16(q9s16, q13s16); - q3s16 = vsubq_s16(q9s16, q15s16); - - // stage 2 - odd half - q13s16 = vsubq_s16(q4s16, q5s16); - q4s16 = vaddq_s16(q4s16, q5s16); - q14s16 = vsubq_s16(q7s16, q6s16); - q7s16 = vaddq_s16(q7s16, q6s16); - d26s16 = vget_low_s16(q13s16); - d27s16 = vget_high_s16(q13s16); - d28s16 = vget_low_s16(q14s16); - d29s16 = vget_high_s16(q14s16); - - d16s16 = vdup_n_s16(cospi_16_64); - q9s32 = vmull_s16(d28s16, d16s16); - q10s32 = vmull_s16(d29s16, d16s16); - q11s32 = vmull_s16(d28s16, d16s16); - q12s32 = vmull_s16(d29s16, d16s16); - - q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); - q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); - q11s32 = vmlal_s16(q11s32, d26s16, d16s16); - q12s32 = vmlal_s16(q12s32, d27s16, d16s16); - - d10s16 = vqrshrn_n_s32(q9s32, 14); - d11s16 = vqrshrn_n_s32(q10s32, 14); - d12s16 = vqrshrn_n_s32(q11s32, 14); - d13s16 = vqrshrn_n_s32(q12s32, 14); - q5s16 = vcombine_s16(d10s16, d11s16); - q6s16 = vcombine_s16(d12s16, d13s16); - - // stage 4 - q8s16 = vaddq_s16(q0s16, q7s16); - q9s16 = vaddq_s16(q1s16, q6s16); - q10s16 = vaddq_s16(q2s16, q5s16); - q11s16 = vaddq_s16(q3s16, q4s16); - q12s16 = vsubq_s16(q3s16, q4s16); - q13s16 = vsubq_s16(q2s16, q5s16); - q14s16 = vsubq_s16(q1s16, q6s16); - q15s16 = vsubq_s16(q0s16, q7s16); - - TRANSPOSE8X8(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, - &q12s16, &q13s16, &q14s16, &q15s16); - - q8s16 = vrshrq_n_s16(q8s16, 5); - q9s16 = vrshrq_n_s16(q9s16, 5); - q10s16 = vrshrq_n_s16(q10s16, 5); - q11s16 = vrshrq_n_s16(q11s16, 5); - q12s16 = vrshrq_n_s16(q12s16, 5); - q13s16 = vrshrq_n_s16(q13s16, 5); - q14s16 = vrshrq_n_s16(q14s16, 5); - q15s16 = vrshrq_n_s16(q15s16, 5); - - d1 = d2 = dest; - - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); - - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - - q8s16 = q12s16; - q9s16 = q13s16; - q10s16 = q14s16; - q11s16 = q15s16; - - d0u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d1u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d2u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - d3u64 = vld1_u64((uint64_t *)d1); - d1 += dest_stride; - - q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), - vreinterpret_u8_u64(d0u64)); - q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), - vreinterpret_u8_u64(d1u64)); - q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), - vreinterpret_u8_u64(d2u64)); - q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), - vreinterpret_u8_u64(d3u64)); - - d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); - d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); - d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); - d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); - - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); - d2 += dest_stride; - vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); - d2 += dest_stride; - return; + // stage 3 -odd half + q0s16 = vaddq_s16(q9s16, q15s16); + q1s16 = vaddq_s16(q9s16, q13s16); + q2s16 = vsubq_s16(q9s16, q13s16); + q3s16 = vsubq_s16(q9s16, q15s16); + + // stage 2 - odd half + q13s16 = vsubq_s16(q4s16, q5s16); + q4s16 = vaddq_s16(q4s16, q5s16); + q14s16 = vsubq_s16(q7s16, q6s16); + q7s16 = vaddq_s16(q7s16, q6s16); + d26s16 = vget_low_s16(q13s16); + d27s16 = vget_high_s16(q13s16); + d28s16 = vget_low_s16(q14s16); + d29s16 = vget_high_s16(q14s16); + + d16s16 = vdup_n_s16(cospi_16_64); + q9s32 = vmull_s16(d28s16, d16s16); + q10s32 = vmull_s16(d29s16, d16s16); + q11s32 = vmull_s16(d28s16, d16s16); + q12s32 = vmull_s16(d29s16, d16s16); + + q9s32 = vmlsl_s16(q9s32, d26s16, d16s16); + q10s32 = vmlsl_s16(q10s32, d27s16, d16s16); + q11s32 = vmlal_s16(q11s32, d26s16, d16s16); + q12s32 = vmlal_s16(q12s32, d27s16, d16s16); + + d10s16 = vqrshrn_n_s32(q9s32, 14); + d11s16 = vqrshrn_n_s32(q10s32, 14); + d12s16 = vqrshrn_n_s32(q11s32, 14); + d13s16 = vqrshrn_n_s32(q12s32, 14); + q5s16 = vcombine_s16(d10s16, d11s16); + q6s16 = vcombine_s16(d12s16, d13s16); + + // stage 4 + q8s16 = vaddq_s16(q0s16, q7s16); + q9s16 = vaddq_s16(q1s16, q6s16); + q10s16 = vaddq_s16(q2s16, q5s16); + q11s16 = vaddq_s16(q3s16, q4s16); + q12s16 = vsubq_s16(q3s16, q4s16); + q13s16 = vsubq_s16(q2s16, q5s16); + q14s16 = vsubq_s16(q1s16, q6s16); + q15s16 = vsubq_s16(q0s16, q7s16); + + transpose_s16_8x8(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + + IDCT8x8_1D(&q8s16, &q9s16, &q10s16, &q11s16, &q12s16, &q13s16, &q14s16, + &q15s16); + + q8s16 = vrshrq_n_s16(q8s16, 5); + q9s16 = vrshrq_n_s16(q9s16, 5); + q10s16 = vrshrq_n_s16(q10s16, 5); + q11s16 = vrshrq_n_s16(q11s16, 5); + q12s16 = vrshrq_n_s16(q12s16, 5); + q13s16 = vrshrq_n_s16(q13s16, 5); + q14s16 = vrshrq_n_s16(q14s16, 5); + q15s16 = vrshrq_n_s16(q15s16, 5); + + d1 = d2 = dest; + + d0u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d1u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d2u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); + q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); + q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); + + d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + + q8s16 = q12s16; + q9s16 = q13s16; + q10s16 = q14s16; + q11s16 = q15s16; + + d0u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d1u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d2u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + d3u64 = vld1_u64((uint64_t *)d1); + d1 += dest_stride; + + q8u16 = vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_u64(d0u64)); + q9u16 = vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_u64(d1u64)); + q10u16 = vaddw_u8(vreinterpretq_u16_s16(q10s16), vreinterpret_u8_u64(d2u64)); + q11u16 = vaddw_u8(vreinterpretq_u16_s16(q11s16), vreinterpret_u8_u64(d3u64)); + + d0u8 = vqmovun_s16(vreinterpretq_s16_u16(q8u16)); + d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q9u16)); + d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q10u16)); + d3u8 = vqmovun_s16(vreinterpretq_s16_u16(q11u16)); + + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d0u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d1u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d2u8)); + d2 += dest_stride; + vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8)); + d2 += dest_stride; + return; } diff --git a/libs/libvpx/vpx_dsp/arm/intrapred_neon.c b/libs/libvpx/vpx_dsp/arm/intrapred_neon.c index 0a376104d2..38e79ed69d 100644 --- a/libs/libvpx/vpx_dsp/arm/intrapred_neon.c +++ b/libs/libvpx/vpx_dsp/arm/intrapred_neon.c @@ -18,43 +18,40 @@ // DC 4x4 // 'do_above' and 'do_left' facilitate branch removal when inlined. -static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, - const uint8_t *above, const uint8_t *left, - int do_above, int do_left) { - uint16x8_t sum_top; - uint16x8_t sum_left; - uint8x8_t dc0; +static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, + const uint8_t *left, int do_above, int do_left) { + uint16x4_t sum_top; + uint16x4_t sum_left; + uint16x4_t dc0; if (do_above) { const uint8x8_t A = vld1_u8(above); // top row const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top - const uint16x4_t p1 = vpadd_u16(p0, p0); - sum_top = vcombine_u16(p1, p1); + sum_top = vpadd_u16(p0, p0); } if (do_left) { - const uint8x8_t L = vld1_u8(left); // left border + const uint8x8_t L = vld1_u8(left); // left border const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left - const uint16x4_t p1 = vpadd_u16(p0, p0); - sum_left = vcombine_u16(p1, p1); + sum_left = vpadd_u16(p0, p0); } if (do_above && do_left) { - const uint16x8_t sum = vaddq_u16(sum_left, sum_top); - dc0 = vrshrn_n_u16(sum, 3); + const uint16x4_t sum = vadd_u16(sum_left, sum_top); + dc0 = vrshr_n_u16(sum, 3); } else if (do_above) { - dc0 = vrshrn_n_u16(sum_top, 2); + dc0 = vrshr_n_u16(sum_top, 2); } else if (do_left) { - dc0 = vrshrn_n_u16(sum_left, 2); + dc0 = vrshr_n_u16(sum_left, 2); } else { - dc0 = vdup_n_u8(0x80); + dc0 = vdup_n_u16(0x80); } { - const uint8x8_t dc = vdup_lane_u8(dc0, 0); + const uint8x8_t dc = vdup_lane_u8(vreinterpret_u8_u16(dc0), 0); int i; for (i = 0; i < 4; ++i) { - vst1_lane_u32((uint32_t*)(dst + i * stride), vreinterpret_u32_u8(dc), 0); + vst1_lane_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc), 0); } } } @@ -87,9 +84,8 @@ void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, // DC 8x8 // 'do_above' and 'do_left' facilitate branch removal when inlined. -static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, - const uint8_t *above, const uint8_t *left, - int do_above, int do_left) { +static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, + const uint8_t *left, int do_above, int do_left) { uint16x8_t sum_top; uint16x8_t sum_left; uint8x8_t dc0; @@ -103,7 +99,7 @@ static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, } if (do_left) { - const uint8x8_t L = vld1_u8(left); // left border + const uint8x8_t L = vld1_u8(left); // left border const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left const uint16x4_t p1 = vpadd_u16(p0, p0); const uint16x4_t p2 = vpadd_u16(p1, p1); @@ -125,7 +121,7 @@ static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8x8_t dc = vdup_lane_u8(dc0, 0); int i; for (i = 0; i < 8; ++i) { - vst1_u32((uint32_t*)(dst + i * stride), vreinterpret_u32_u8(dc)); + vst1_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc)); } } } @@ -167,7 +163,7 @@ static INLINE void dc_16x16(uint8_t *dst, ptrdiff_t stride, if (do_above) { const uint8x16_t A = vld1q_u8(above); // top row - const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top + const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0)); const uint16x4_t p2 = vpadd_u16(p1, p1); const uint16x4_t p3 = vpadd_u16(p2, p2); @@ -425,8 +421,7 @@ void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, (void)left; d0u8 = vld1_u8(above); - for (i = 0; i < 8; i++, dst += stride) - vst1_u8(dst, d0u8); + for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8); } void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, @@ -436,8 +431,7 @@ void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, (void)left; q0u8 = vld1q_u8(above); - for (i = 0; i < 16; i++, dst += stride) - vst1q_u8(dst, q0u8); + for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8); } void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, @@ -608,8 +602,8 @@ void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8); for (i = 0; i < 4; i++, dst += stride) { q1u16 = vdupq_n_u16((uint16_t)left[i]); - q1s16 = vaddq_s16(vreinterpretq_s16_u16(q1u16), - vreinterpretq_s16_u16(q3u16)); + q1s16 = + vaddq_s16(vreinterpretq_s16_u16(q1u16), vreinterpretq_s16_u16(q3u16)); d0u8 = vqmovun_s16(q1s16); vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); } @@ -631,26 +625,26 @@ void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, d20u16 = vget_low_u16(q10u16); for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) { q0u16 = vdupq_lane_u16(d20u16, 0); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16), - vreinterpretq_s16_u16(q0u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16)); d0u8 = vqmovun_s16(q0s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8)); dst += stride; q0u16 = vdupq_lane_u16(d20u16, 1); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16), - vreinterpretq_s16_u16(q0u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16)); d0u8 = vqmovun_s16(q0s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8)); dst += stride; q0u16 = vdupq_lane_u16(d20u16, 2); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16), - vreinterpretq_s16_u16(q0u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16)); d0u8 = vqmovun_s16(q0s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8)); dst += stride; q0u16 = vdupq_lane_u16(d20u16, 3); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q3u16), - vreinterpretq_s16_u16(q0u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16)); d0u8 = vqmovun_s16(q0s16); vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8)); dst += stride; @@ -677,14 +671,14 @@ void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) { q0u16 = vdupq_lane_u16(d20u16, 0); q8u16 = vdupq_lane_u16(d20u16, 1); - q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q2u16)); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q3u16)); - q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), - vreinterpretq_s16_u16(q2u16)); - q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), - vreinterpretq_s16_u16(q3u16)); + q1s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16)); + q11s16 = + vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16)); + q8s16 = + vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16)); d2u8 = vqmovun_s16(q1s16); d3u8 = vqmovun_s16(q0s16); d22u8 = vqmovun_s16(q11s16); @@ -698,14 +692,14 @@ void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, q0u16 = vdupq_lane_u16(d20u16, 2); q8u16 = vdupq_lane_u16(d20u16, 3); - q1s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q2u16)); - q0s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q3u16)); - q11s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), - vreinterpretq_s16_u16(q2u16)); - q8s16 = vaddq_s16(vreinterpretq_s16_u16(q8u16), - vreinterpretq_s16_u16(q3u16)); + q1s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16)); + q0s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16)); + q11s16 = + vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16)); + q8s16 = + vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16)); d2u8 = vqmovun_s16(q1s16); d3u8 = vqmovun_s16(q0s16); d22u8 = vqmovun_s16(q11s16); @@ -742,10 +736,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, d6u16 = vget_low_u16(q3u16); for (j = 0; j < 2; j++, d6u16 = vget_high_u16(q3u16)) { q0u16 = vdupq_lane_u16(d6u16, 0); - q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q8u16)); - q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q9u16)); + q12s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); + q13s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), @@ -761,10 +755,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, dst += stride; q0u16 = vdupq_lane_u16(d6u16, 1); - q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q8u16)); - q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q9u16)); + q12s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); + q13s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), @@ -780,10 +774,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, dst += stride; q0u16 = vdupq_lane_u16(d6u16, 2); - q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q8u16)); - q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q9u16)); + q12s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); + q13s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), @@ -799,10 +793,10 @@ void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, dst += stride; q0u16 = vdupq_lane_u16(d6u16, 3); - q12s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q8u16)); - q13s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), - vreinterpretq_s16_u16(q9u16)); + q12s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16)); + q13s16 = + vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16)); q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q10u16)); q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16), diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_16_neon.c b/libs/libvpx/vpx_dsp/arm/loopfilter_16_neon.c deleted file mode 100644 index d24e6adc8a..0000000000 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_16_neon.c +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" -#include "vpx/vpx_integer.h" - -static INLINE void loop_filter_neon_16( - uint8x16_t qblimit, // blimit - uint8x16_t qlimit, // limit - uint8x16_t qthresh, // thresh - uint8x16_t q3, // p3 - uint8x16_t q4, // p2 - uint8x16_t q5, // p1 - uint8x16_t q6, // p0 - uint8x16_t q7, // q0 - uint8x16_t q8, // q1 - uint8x16_t q9, // q2 - uint8x16_t q10, // q3 - uint8x16_t *q5r, // p1 - uint8x16_t *q6r, // p0 - uint8x16_t *q7r, // q0 - uint8x16_t *q8r) { // q1 - uint8x16_t q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8; - int16x8_t q2s16, q11s16; - uint16x8_t q4u16; - int8x16_t q0s8, q1s8, q2s8, q11s8, q12s8, q13s8; - int8x8_t d2s8, d3s8; - - q11u8 = vabdq_u8(q3, q4); - q12u8 = vabdq_u8(q4, q5); - q13u8 = vabdq_u8(q5, q6); - q14u8 = vabdq_u8(q8, q7); - q3 = vabdq_u8(q9, q8); - q4 = vabdq_u8(q10, q9); - - q11u8 = vmaxq_u8(q11u8, q12u8); - q12u8 = vmaxq_u8(q13u8, q14u8); - q3 = vmaxq_u8(q3, q4); - q15u8 = vmaxq_u8(q11u8, q12u8); - - q9 = vabdq_u8(q6, q7); - - // vp8_hevmask - q13u8 = vcgtq_u8(q13u8, qthresh); - q14u8 = vcgtq_u8(q14u8, qthresh); - q15u8 = vmaxq_u8(q15u8, q3); - - q2u8 = vabdq_u8(q5, q8); - q9 = vqaddq_u8(q9, q9); - - q15u8 = vcgeq_u8(qlimit, q15u8); - - // vp8_filter() function - // convert to signed - q10 = vdupq_n_u8(0x80); - q8 = veorq_u8(q8, q10); - q7 = veorq_u8(q7, q10); - q6 = veorq_u8(q6, q10); - q5 = veorq_u8(q5, q10); - - q2u8 = vshrq_n_u8(q2u8, 1); - q9 = vqaddq_u8(q9, q2u8); - - q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)), - vget_low_s8(vreinterpretq_s8_u8(q6))); - q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)), - vget_high_s8(vreinterpretq_s8_u8(q6))); - - q9 = vcgeq_u8(qblimit, q9); - - q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5), - vreinterpretq_s8_u8(q8)); - - q14u8 = vorrq_u8(q13u8, q14u8); - - q4u16 = vdupq_n_u16(3); - q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16)); - q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16)); - - q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8); - q15u8 = vandq_u8(q15u8, q9); - - q1s8 = vreinterpretq_s8_u8(q1u8); - q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8)); - q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8)); - - q4 = vdupq_n_u8(3); - q9 = vdupq_n_u8(4); - // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0)) - d2s8 = vqmovn_s16(q2s16); - d3s8 = vqmovn_s16(q11s16); - q1s8 = vcombine_s8(d2s8, d3s8); - q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8); - q1s8 = vreinterpretq_s8_u8(q1u8); - - q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q4)); - q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9)); - q2s8 = vshrq_n_s8(q2s8, 3); - q1s8 = vshrq_n_s8(q1s8, 3); - - q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8); - q0s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8); - - q1s8 = vrshrq_n_s8(q1s8, 1); - q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8)); - - q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8); - q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8); - - *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q10); - *q7r = veorq_u8(vreinterpretq_u8_s8(q0s8), q10); - *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q10); - *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q10); - return; -} - -void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p /* pitch */, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1) { - uint8x8_t dblimit0, dlimit0, dthresh0, dblimit1, dlimit1, dthresh1; - uint8x16_t qblimit, qlimit, qthresh; - uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8; - - dblimit0 = vld1_u8(blimit0); - dlimit0 = vld1_u8(limit0); - dthresh0 = vld1_u8(thresh0); - dblimit1 = vld1_u8(blimit1); - dlimit1 = vld1_u8(limit1); - dthresh1 = vld1_u8(thresh1); - qblimit = vcombine_u8(dblimit0, dblimit1); - qlimit = vcombine_u8(dlimit0, dlimit1); - qthresh = vcombine_u8(dthresh0, dthresh1); - - s -= (p << 2); - - q3u8 = vld1q_u8(s); - s += p; - q4u8 = vld1q_u8(s); - s += p; - q5u8 = vld1q_u8(s); - s += p; - q6u8 = vld1q_u8(s); - s += p; - q7u8 = vld1q_u8(s); - s += p; - q8u8 = vld1q_u8(s); - s += p; - q9u8 = vld1q_u8(s); - s += p; - q10u8 = vld1q_u8(s); - - loop_filter_neon_16(qblimit, qlimit, qthresh, - q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, - &q5u8, &q6u8, &q7u8, &q8u8); - - s -= (p * 5); - vst1q_u8(s, q5u8); - s += p; - vst1q_u8(s, q6u8); - s += p; - vst1q_u8(s, q7u8); - s += p; - vst1q_u8(s, q8u8); - return; -} diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.asm b/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.asm index e45e34cd4c..9371158984 100644 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.asm +++ b/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.asm @@ -16,37 +16,28 @@ ; Currently vpx only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. -; TODO(fgalligan): See about removing the count code as this function is only -; called with a count of 1. ; ; void vpx_lpf_horizontal_4_neon(uint8_t *s, ; int p /* pitch */, ; const uint8_t *blimit, ; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; const uint8_t *thresh) ; ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -; sp+4 int count |vpx_lpf_horizontal_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit - ldr r12, [sp, #8] ; load count ldr r2, [sp, #4] ; load thresh add r1, r1, r1 ; double pitch - cmp r12, #0 - beq end_vpx_lf_h_edge - vld1.8 {d1[]}, [r3] ; duplicate *limit vld1.8 {d2[]}, [r2] ; duplicate *thresh -count_lf_h_loop sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines add r3, r2, r1, lsr #1 ; set to 3 lines down @@ -69,47 +60,34 @@ count_lf_h_loop vst1.u8 {d6}, [r2@64], r1 ; store oq0 vst1.u8 {d7}, [r3@64], r1 ; store oq1 - add r0, r0, #8 - subs r12, r12, #1 - bne count_lf_h_loop - -end_vpx_lf_h_edge pop {pc} ENDP ; |vpx_lpf_horizontal_4_neon| ; Currently vpx only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. -; TODO(fgalligan): See about removing the count code as this function is only -; called with a count of 1. ; ; void vpx_lpf_vertical_4_neon(uint8_t *s, ; int p /* pitch */, ; const uint8_t *blimit, ; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; const uint8_t *thresh) ; ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -; sp+4 int count |vpx_lpf_vertical_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit - ldr r12, [sp, #8] ; load count vld1.8 {d1[]}, [r3] ; duplicate *limit ldr r3, [sp, #4] ; load thresh sub r2, r0, #4 ; move s pointer down by 4 columns - cmp r12, #0 - beq end_vpx_lf_v_edge vld1.8 {d2[]}, [r3] ; duplicate *thresh -count_lf_v_loop vld1.u8 {d3}, [r2], r1 ; load s data vld1.u8 {d4}, [r2], r1 vld1.u8 {d5}, [r2], r1 @@ -149,12 +127,6 @@ count_lf_v_loop vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1 vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0] - add r0, r0, r1, lsl #3 ; s += pitch * 8 - subs r12, r12, #1 - subne r2, r0, #4 ; move s pointer down by 4 columns - bne count_lf_v_loop - -end_vpx_lf_v_edge pop {pc} ENDP ; |vpx_lpf_vertical_4_neon| diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.c b/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.c deleted file mode 100644 index 7ad411aea2..0000000000 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_4_neon.c +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vpx_dsp_rtcd.h" - -static INLINE void loop_filter_neon( - uint8x8_t dblimit, // flimit - uint8x8_t dlimit, // limit - uint8x8_t dthresh, // thresh - uint8x8_t d3u8, // p3 - uint8x8_t d4u8, // p2 - uint8x8_t d5u8, // p1 - uint8x8_t d6u8, // p0 - uint8x8_t d7u8, // q0 - uint8x8_t d16u8, // q1 - uint8x8_t d17u8, // q2 - uint8x8_t d18u8, // q3 - uint8x8_t *d4ru8, // p1 - uint8x8_t *d5ru8, // p0 - uint8x8_t *d6ru8, // q0 - uint8x8_t *d7ru8) { // q1 - uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8; - int16x8_t q12s16; - int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8; - - d19u8 = vabd_u8(d3u8, d4u8); - d20u8 = vabd_u8(d4u8, d5u8); - d21u8 = vabd_u8(d5u8, d6u8); - d22u8 = vabd_u8(d16u8, d7u8); - d3u8 = vabd_u8(d17u8, d16u8); - d4u8 = vabd_u8(d18u8, d17u8); - - d19u8 = vmax_u8(d19u8, d20u8); - d20u8 = vmax_u8(d21u8, d22u8); - d3u8 = vmax_u8(d3u8, d4u8); - d23u8 = vmax_u8(d19u8, d20u8); - - d17u8 = vabd_u8(d6u8, d7u8); - - d21u8 = vcgt_u8(d21u8, dthresh); - d22u8 = vcgt_u8(d22u8, dthresh); - d23u8 = vmax_u8(d23u8, d3u8); - - d28u8 = vabd_u8(d5u8, d16u8); - d17u8 = vqadd_u8(d17u8, d17u8); - - d23u8 = vcge_u8(dlimit, d23u8); - - d18u8 = vdup_n_u8(0x80); - d5u8 = veor_u8(d5u8, d18u8); - d6u8 = veor_u8(d6u8, d18u8); - d7u8 = veor_u8(d7u8, d18u8); - d16u8 = veor_u8(d16u8, d18u8); - - d28u8 = vshr_n_u8(d28u8, 1); - d17u8 = vqadd_u8(d17u8, d28u8); - - d19u8 = vdup_n_u8(3); - - d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8), - vreinterpret_s8_u8(d6u8)); - - d17u8 = vcge_u8(dblimit, d17u8); - - d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8), - vreinterpret_s8_u8(d16u8)); - - d22u8 = vorr_u8(d21u8, d22u8); - - q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8)); - - d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8); - d23u8 = vand_u8(d23u8, d17u8); - - q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8)); - - d17u8 = vdup_n_u8(4); - - d27s8 = vqmovn_s16(q12s16); - d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8); - d27s8 = vreinterpret_s8_u8(d27u8); - - d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8)); - d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8)); - d28s8 = vshr_n_s8(d28s8, 3); - d27s8 = vshr_n_s8(d27s8, 3); - - d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8); - d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8); - - d27s8 = vrshr_n_s8(d27s8, 1); - d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8)); - - d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8); - d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8); - - *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8); - *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8); - *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8); - *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8); - return; -} - -void vpx_lpf_horizontal_4_neon( - uint8_t *src, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - int i; - uint8_t *s, *psrc; - uint8x8_t dblimit, dlimit, dthresh; - uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8; - - if (count == 0) // end_vpx_lf_h_edge - return; - - dblimit = vld1_u8(blimit); - dlimit = vld1_u8(limit); - dthresh = vld1_u8(thresh); - - psrc = src - (pitch << 2); - for (i = 0; i < count; i++) { - s = psrc + i * 8; - - d3u8 = vld1_u8(s); - s += pitch; - d4u8 = vld1_u8(s); - s += pitch; - d5u8 = vld1_u8(s); - s += pitch; - d6u8 = vld1_u8(s); - s += pitch; - d7u8 = vld1_u8(s); - s += pitch; - d16u8 = vld1_u8(s); - s += pitch; - d17u8 = vld1_u8(s); - s += pitch; - d18u8 = vld1_u8(s); - - loop_filter_neon(dblimit, dlimit, dthresh, - d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, - &d4u8, &d5u8, &d6u8, &d7u8); - - s -= (pitch * 5); - vst1_u8(s, d4u8); - s += pitch; - vst1_u8(s, d5u8); - s += pitch; - vst1_u8(s, d6u8); - s += pitch; - vst1_u8(s, d7u8); - } - return; -} - -void vpx_lpf_vertical_4_neon( - uint8_t *src, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - int i, pitch8; - uint8_t *s; - uint8x8_t dblimit, dlimit, dthresh; - uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8; - uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3; - uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7; - uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11; - uint8x8x4_t d4Result; - - if (count == 0) // end_vpx_lf_h_edge - return; - - dblimit = vld1_u8(blimit); - dlimit = vld1_u8(limit); - dthresh = vld1_u8(thresh); - - pitch8 = pitch * 8; - for (i = 0; i < count; i++, src += pitch8) { - s = src - (i + 1) * 4; - - d3u8 = vld1_u8(s); - s += pitch; - d4u8 = vld1_u8(s); - s += pitch; - d5u8 = vld1_u8(s); - s += pitch; - d6u8 = vld1_u8(s); - s += pitch; - d7u8 = vld1_u8(s); - s += pitch; - d16u8 = vld1_u8(s); - s += pitch; - d17u8 = vld1_u8(s); - s += pitch; - d18u8 = vld1_u8(s); - - d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), - vreinterpret_u32_u8(d7u8)); - d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), - vreinterpret_u32_u8(d16u8)); - d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), - vreinterpret_u32_u8(d17u8)); - d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), - vreinterpret_u32_u8(d18u8)); - - d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]), - vreinterpret_u16_u32(d2tmp2.val[0])); - d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]), - vreinterpret_u16_u32(d2tmp3.val[0])); - d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]), - vreinterpret_u16_u32(d2tmp2.val[1])); - d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]), - vreinterpret_u16_u32(d2tmp3.val[1])); - - d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]), - vreinterpret_u8_u16(d2tmp5.val[0])); - d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]), - vreinterpret_u8_u16(d2tmp5.val[1])); - d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]), - vreinterpret_u8_u16(d2tmp7.val[0])); - d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]), - vreinterpret_u8_u16(d2tmp7.val[1])); - - d3u8 = d2tmp8.val[0]; - d4u8 = d2tmp8.val[1]; - d5u8 = d2tmp9.val[0]; - d6u8 = d2tmp9.val[1]; - d7u8 = d2tmp10.val[0]; - d16u8 = d2tmp10.val[1]; - d17u8 = d2tmp11.val[0]; - d18u8 = d2tmp11.val[1]; - - loop_filter_neon(dblimit, dlimit, dthresh, - d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, - &d4u8, &d5u8, &d6u8, &d7u8); - - d4Result.val[0] = d4u8; - d4Result.val[1] = d5u8; - d4Result.val[2] = d6u8; - d4Result.val[3] = d7u8; - - src -= 2; - vst4_lane_u8(src, d4Result, 0); - src += pitch; - vst4_lane_u8(src, d4Result, 1); - src += pitch; - vst4_lane_u8(src, d4Result, 2); - src += pitch; - vst4_lane_u8(src, d4Result, 3); - src += pitch; - vst4_lane_u8(src, d4Result, 4); - src += pitch; - vst4_lane_u8(src, d4Result, 5); - src += pitch; - vst4_lane_u8(src, d4Result, 6); - src += pitch; - vst4_lane_u8(src, d4Result, 7); - } - return; -} diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.asm b/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.asm index e81734c046..a042d40acb 100644 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.asm +++ b/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.asm @@ -9,42 +9,35 @@ ; EXPORT |vpx_lpf_horizontal_8_neon| + EXPORT |vpx_lpf_horizontal_8_dual_neon| EXPORT |vpx_lpf_vertical_8_neon| + EXPORT |vpx_lpf_vertical_8_dual_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 ; Currently vpx only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. -; TODO(fgalligan): See about removing the count code as this function is only -; called with a count of 1. ; ; void vpx_lpf_horizontal_8_neon(uint8_t *s, int p, ; const uint8_t *blimit, ; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; const uint8_t *thresh) ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -; sp+4 int count |vpx_lpf_horizontal_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit - ldr r12, [sp, #16] ; load count ldr r2, [sp, #12] ; load thresh add r1, r1, r1 ; double pitch - cmp r12, #0 - beq end_vpx_mblf_h_edge - vld1.8 {d1[]}, [r3] ; duplicate *limit vld1.8 {d2[]}, [r2] ; duplicate *thresh -count_mblf_h_loop sub r3, r0, r1, lsl #1 ; move src pointer down by 4 lines add r2, r3, r1, lsr #1 ; set to 3 lines down @@ -69,43 +62,64 @@ count_mblf_h_loop vst1.u8 {d4}, [r2@64], r1 ; store oq1 vst1.u8 {d5}, [r3@64], r1 ; store oq2 - add r0, r0, #8 - subs r12, r12, #1 - bne count_mblf_h_loop - -end_vpx_mblf_h_edge pop {r4-r5, pc} ENDP ; |vpx_lpf_horizontal_8_neon| +;void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, +; int p, +; const uint8_t *blimit0, +; const uint8_t *limit0, +; const uint8_t *thresh0, +; const uint8_t *blimit1, +; const uint8_t *limit1, +; const uint8_t *thresh1) +; r0 uint8_t *s, +; r1 int p, /* pitch */ +; r2 const uint8_t *blimit0, +; r3 const uint8_t *limit0, +; sp const uint8_t *thresh0, +; sp + 4 const uint8_t *blimit1, +; sp + 8 const uint8_t *limit1, +; sp + 12 const uint8_t *thresh1, +|vpx_lpf_horizontal_8_dual_neon| PROC + push {r0-r1, lr} + ldr lr, [sp, #12] + push {lr} ; thresh0 + bl vpx_lpf_horizontal_8_neon + + ldr r2, [sp, #20] ; blimit1 + ldr r3, [sp, #24] ; limit1 + ldr lr, [sp, #28] + str lr, [sp, #16] ; thresh1 + add sp, #4 + pop {r0-r1, lr} + add r0, #8 ; s + 8 + b vpx_lpf_horizontal_8_neon + ENDP ; |vpx_lpf_horizontal_8_dual_neon| + ; void vpx_lpf_vertical_8_neon(uint8_t *s, ; int pitch, ; const uint8_t *blimit, ; const uint8_t *limit, -; const uint8_t *thresh, -; int count) +; const uint8_t *thresh) ; ; r0 uint8_t *s, ; r1 int pitch, ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -; sp+4 int count |vpx_lpf_vertical_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit - ldr r12, [sp, #16] ; load count vld1.8 {d1[]}, [r3] ; duplicate *limit ldr r3, [sp, #12] ; load thresh sub r2, r0, #4 ; move s pointer down by 4 columns - cmp r12, #0 - beq end_vpx_mblf_v_edge vld1.8 {d2[]}, [r3] ; duplicate *thresh -count_mblf_v_loop vld1.u8 {d3}, [r2], r1 ; load s data vld1.u8 {d4}, [r2], r1 vld1.u8 {d5}, [r2], r1 @@ -156,15 +170,41 @@ count_mblf_v_loop vst2.8 {d4[6], d5[6]}, [r3], r1 vst2.8 {d4[7], d5[7]}, [r3] - add r0, r0, r1, lsl #3 ; s += pitch * 8 - subs r12, r12, #1 - subne r2, r0, #4 ; move s pointer down by 4 columns - bne count_mblf_v_loop - -end_vpx_mblf_v_edge pop {r4-r5, pc} ENDP ; |vpx_lpf_vertical_8_neon| +;void vpx_lpf_vertical_8_dual_neon(uint8_t *s, +; int pitch, +; const uint8_t *blimit0, +; const uint8_t *limit0, +; const uint8_t *thresh0, +; const uint8_t *blimit1, +; const uint8_t *limit1, +; const uint8_t *thresh1) +; r0 uint8_t *s, +; r1 int pitch +; r2 const uint8_t *blimit0, +; r3 const uint8_t *limit0, +; sp const uint8_t *thresh0, +; sp + 4 const uint8_t *blimit1, +; sp + 8 const uint8_t *limit1, +; sp + 12 const uint8_t *thresh1, +|vpx_lpf_vertical_8_dual_neon| PROC + push {r0-r1, lr} + ldr lr, [sp, #12] + push {lr} ; thresh0 + bl vpx_lpf_vertical_8_neon + + ldr r2, [sp, #20] ; blimit1 + ldr r3, [sp, #24] ; limit1 + ldr lr, [sp, #28] + str lr, [sp, #16] ; thresh1 + add sp, #4 + pop {r0-r1, lr} + add r0, r1, lsl #3 ; s + 8 * pitch + b vpx_lpf_vertical_8_neon + ENDP ; |vpx_lpf_vertical_8_dual_neon| + ; void vpx_mbloop_filter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. The function does not use diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.c b/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.c deleted file mode 100644 index a887e2ee54..0000000000 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_8_neon.c +++ /dev/null @@ -1,453 +0,0 @@ -/* - * Copyright (c) 2014 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include - -#include "./vpx_dsp_rtcd.h" - -static INLINE void mbloop_filter_neon( - uint8x8_t dblimit, // mblimit - uint8x8_t dlimit, // limit - uint8x8_t dthresh, // thresh - uint8x8_t d3u8, // p2 - uint8x8_t d4u8, // p2 - uint8x8_t d5u8, // p1 - uint8x8_t d6u8, // p0 - uint8x8_t d7u8, // q0 - uint8x8_t d16u8, // q1 - uint8x8_t d17u8, // q2 - uint8x8_t d18u8, // q3 - uint8x8_t *d0ru8, // p1 - uint8x8_t *d1ru8, // p1 - uint8x8_t *d2ru8, // p0 - uint8x8_t *d3ru8, // q0 - uint8x8_t *d4ru8, // q1 - uint8x8_t *d5ru8) { // q1 - uint32_t flat; - uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8; - uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8; - int16x8_t q15s16; - uint16x8_t q10u16, q14u16; - int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8; - - d19u8 = vabd_u8(d3u8, d4u8); - d20u8 = vabd_u8(d4u8, d5u8); - d21u8 = vabd_u8(d5u8, d6u8); - d22u8 = vabd_u8(d16u8, d7u8); - d23u8 = vabd_u8(d17u8, d16u8); - d24u8 = vabd_u8(d18u8, d17u8); - - d19u8 = vmax_u8(d19u8, d20u8); - d20u8 = vmax_u8(d21u8, d22u8); - - d25u8 = vabd_u8(d6u8, d4u8); - - d23u8 = vmax_u8(d23u8, d24u8); - - d26u8 = vabd_u8(d7u8, d17u8); - - d19u8 = vmax_u8(d19u8, d20u8); - - d24u8 = vabd_u8(d6u8, d7u8); - d27u8 = vabd_u8(d3u8, d6u8); - d28u8 = vabd_u8(d18u8, d7u8); - - d19u8 = vmax_u8(d19u8, d23u8); - - d23u8 = vabd_u8(d5u8, d16u8); - d24u8 = vqadd_u8(d24u8, d24u8); - - - d19u8 = vcge_u8(dlimit, d19u8); - - - d25u8 = vmax_u8(d25u8, d26u8); - d26u8 = vmax_u8(d27u8, d28u8); - - d23u8 = vshr_n_u8(d23u8, 1); - - d25u8 = vmax_u8(d25u8, d26u8); - - d24u8 = vqadd_u8(d24u8, d23u8); - - d20u8 = vmax_u8(d20u8, d25u8); - - d23u8 = vdup_n_u8(1); - d24u8 = vcge_u8(dblimit, d24u8); - - d21u8 = vcgt_u8(d21u8, dthresh); - - d20u8 = vcge_u8(d23u8, d20u8); - - d19u8 = vand_u8(d19u8, d24u8); - - d23u8 = vcgt_u8(d22u8, dthresh); - - d20u8 = vand_u8(d20u8, d19u8); - - d22u8 = vdup_n_u8(0x80); - - d23u8 = vorr_u8(d21u8, d23u8); - - q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8), - vreinterpret_u16_u8(d21u8)); - - d30u8 = vshrn_n_u16(q10u16, 4); - flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0); - - if (flat == 0xffffffff) { // Check for all 1's, power_branch_only - d27u8 = vdup_n_u8(3); - d21u8 = vdup_n_u8(2); - q14u16 = vaddl_u8(d6u8, d7u8); - q14u16 = vmlal_u8(q14u16, d3u8, d27u8); - q14u16 = vmlal_u8(q14u16, d4u8, d21u8); - q14u16 = vaddw_u8(q14u16, d5u8); - *d0ru8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d4u8); - q14u16 = vaddw_u8(q14u16, d5u8); - q14u16 = vaddw_u8(q14u16, d16u8); - *d1ru8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d5u8); - q14u16 = vaddw_u8(q14u16, d6u8); - q14u16 = vaddw_u8(q14u16, d17u8); - *d2ru8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d6u8); - q14u16 = vaddw_u8(q14u16, d7u8); - q14u16 = vaddw_u8(q14u16, d18u8); - *d3ru8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d4u8); - q14u16 = vsubw_u8(q14u16, d7u8); - q14u16 = vaddw_u8(q14u16, d16u8); - q14u16 = vaddw_u8(q14u16, d18u8); - *d4ru8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d5u8); - q14u16 = vsubw_u8(q14u16, d16u8); - q14u16 = vaddw_u8(q14u16, d17u8); - q14u16 = vaddw_u8(q14u16, d18u8); - *d5ru8 = vqrshrn_n_u16(q14u16, 3); - } else { - d21u8 = veor_u8(d7u8, d22u8); - d24u8 = veor_u8(d6u8, d22u8); - d25u8 = veor_u8(d5u8, d22u8); - d26u8 = veor_u8(d16u8, d22u8); - - d27u8 = vdup_n_u8(3); - - d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8)); - d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8)); - - q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8)); - - d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8)); - - q15s16 = vaddw_s8(q15s16, d29s8); - - d29u8 = vdup_n_u8(4); - - d28s8 = vqmovn_s16(q15s16); - - d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8)); - - d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8)); - d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8)); - d30s8 = vshr_n_s8(d30s8, 3); - d29s8 = vshr_n_s8(d29s8, 3); - - d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8); - d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8); - - d29s8 = vrshr_n_s8(d29s8, 1); - d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8)); - - d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8); - d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8); - - if (flat == 0) { // filter_branch_only - *d0ru8 = d4u8; - *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8); - *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8); - *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8); - *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8); - *d5ru8 = d17u8; - return; - } - - d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8); - d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8); - d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8); - d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8); - - d23u8 = vdup_n_u8(2); - q14u16 = vaddl_u8(d6u8, d7u8); - q14u16 = vmlal_u8(q14u16, d3u8, d27u8); - q14u16 = vmlal_u8(q14u16, d4u8, d23u8); - - d0u8 = vbsl_u8(d20u8, dblimit, d4u8); - - q14u16 = vaddw_u8(q14u16, d5u8); - - d1u8 = vbsl_u8(d20u8, dlimit, d25u8); - - d30u8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d4u8); - q14u16 = vaddw_u8(q14u16, d5u8); - q14u16 = vaddw_u8(q14u16, d16u8); - - d2u8 = vbsl_u8(d20u8, dthresh, d24u8); - - d31u8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d5u8); - q14u16 = vaddw_u8(q14u16, d6u8); - q14u16 = vaddw_u8(q14u16, d17u8); - - *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8); - - d23u8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d3u8); - q14u16 = vsubw_u8(q14u16, d6u8); - q14u16 = vaddw_u8(q14u16, d7u8); - - *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8); - - q14u16 = vaddw_u8(q14u16, d18u8); - - *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8); - - d22u8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d4u8); - q14u16 = vsubw_u8(q14u16, d7u8); - q14u16 = vaddw_u8(q14u16, d16u8); - - d3u8 = vbsl_u8(d20u8, d3u8, d21u8); - - q14u16 = vaddw_u8(q14u16, d18u8); - - d4u8 = vbsl_u8(d20u8, d4u8, d26u8); - - d6u8 = vqrshrn_n_u16(q14u16, 3); - - q14u16 = vsubw_u8(q14u16, d5u8); - q14u16 = vsubw_u8(q14u16, d16u8); - q14u16 = vaddw_u8(q14u16, d17u8); - q14u16 = vaddw_u8(q14u16, d18u8); - - d5u8 = vbsl_u8(d20u8, d5u8, d17u8); - - d7u8 = vqrshrn_n_u16(q14u16, 3); - - *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8); - *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8); - *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8); - } - return; -} - -void vpx_lpf_horizontal_8_neon( - uint8_t *src, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - int i; - uint8_t *s, *psrc; - uint8x8_t dblimit, dlimit, dthresh; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; - uint8x8_t d16u8, d17u8, d18u8; - - if (count == 0) // end_vpx_mblf_h_edge - return; - - dblimit = vld1_u8(blimit); - dlimit = vld1_u8(limit); - dthresh = vld1_u8(thresh); - - psrc = src - (pitch << 2); - for (i = 0; i < count; i++) { - s = psrc + i * 8; - - d3u8 = vld1_u8(s); - s += pitch; - d4u8 = vld1_u8(s); - s += pitch; - d5u8 = vld1_u8(s); - s += pitch; - d6u8 = vld1_u8(s); - s += pitch; - d7u8 = vld1_u8(s); - s += pitch; - d16u8 = vld1_u8(s); - s += pitch; - d17u8 = vld1_u8(s); - s += pitch; - d18u8 = vld1_u8(s); - - mbloop_filter_neon(dblimit, dlimit, dthresh, - d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, - &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8); - - s -= (pitch * 6); - vst1_u8(s, d0u8); - s += pitch; - vst1_u8(s, d1u8); - s += pitch; - vst1_u8(s, d2u8); - s += pitch; - vst1_u8(s, d3u8); - s += pitch; - vst1_u8(s, d4u8); - s += pitch; - vst1_u8(s, d5u8); - } - return; -} - -void vpx_lpf_vertical_8_neon( - uint8_t *src, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - int i; - uint8_t *s; - uint8x8_t dblimit, dlimit, dthresh; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; - uint8x8_t d16u8, d17u8, d18u8; - uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3; - uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7; - uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11; - uint8x8x4_t d4Result; - uint8x8x2_t d2Result; - - if (count == 0) - return; - - dblimit = vld1_u8(blimit); - dlimit = vld1_u8(limit); - dthresh = vld1_u8(thresh); - - for (i = 0; i < count; i++) { - s = src + (i * (pitch << 3)) - 4; - - d3u8 = vld1_u8(s); - s += pitch; - d4u8 = vld1_u8(s); - s += pitch; - d5u8 = vld1_u8(s); - s += pitch; - d6u8 = vld1_u8(s); - s += pitch; - d7u8 = vld1_u8(s); - s += pitch; - d16u8 = vld1_u8(s); - s += pitch; - d17u8 = vld1_u8(s); - s += pitch; - d18u8 = vld1_u8(s); - - d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8), - vreinterpret_u32_u8(d7u8)); - d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8), - vreinterpret_u32_u8(d16u8)); - d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8), - vreinterpret_u32_u8(d17u8)); - d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8), - vreinterpret_u32_u8(d18u8)); - - d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]), - vreinterpret_u16_u32(d2tmp2.val[0])); - d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]), - vreinterpret_u16_u32(d2tmp3.val[0])); - d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]), - vreinterpret_u16_u32(d2tmp2.val[1])); - d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]), - vreinterpret_u16_u32(d2tmp3.val[1])); - - d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]), - vreinterpret_u8_u16(d2tmp5.val[0])); - d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]), - vreinterpret_u8_u16(d2tmp5.val[1])); - d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]), - vreinterpret_u8_u16(d2tmp7.val[0])); - d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]), - vreinterpret_u8_u16(d2tmp7.val[1])); - - d3u8 = d2tmp8.val[0]; - d4u8 = d2tmp8.val[1]; - d5u8 = d2tmp9.val[0]; - d6u8 = d2tmp9.val[1]; - d7u8 = d2tmp10.val[0]; - d16u8 = d2tmp10.val[1]; - d17u8 = d2tmp11.val[0]; - d18u8 = d2tmp11.val[1]; - - mbloop_filter_neon(dblimit, dlimit, dthresh, - d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8, - &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8); - - d4Result.val[0] = d0u8; - d4Result.val[1] = d1u8; - d4Result.val[2] = d2u8; - d4Result.val[3] = d3u8; - - d2Result.val[0] = d4u8; - d2Result.val[1] = d5u8; - - s = src - 3; - vst4_lane_u8(s, d4Result, 0); - s += pitch; - vst4_lane_u8(s, d4Result, 1); - s += pitch; - vst4_lane_u8(s, d4Result, 2); - s += pitch; - vst4_lane_u8(s, d4Result, 3); - s += pitch; - vst4_lane_u8(s, d4Result, 4); - s += pitch; - vst4_lane_u8(s, d4Result, 5); - s += pitch; - vst4_lane_u8(s, d4Result, 6); - s += pitch; - vst4_lane_u8(s, d4Result, 7); - - s = src + 1; - vst2_lane_u8(s, d2Result, 0); - s += pitch; - vst2_lane_u8(s, d2Result, 1); - s += pitch; - vst2_lane_u8(s, d2Result, 2); - s += pitch; - vst2_lane_u8(s, d2Result, 3); - s += pitch; - vst2_lane_u8(s, d2Result, 4); - s += pitch; - vst2_lane_u8(s, d2Result, 5); - s += pitch; - vst2_lane_u8(s, d2Result, 6); - s += pitch; - vst2_lane_u8(s, d2Result, 7); - } - return; -} diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.asm b/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.asm index 20d9cfb113..5279ecfb7f 100644 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.asm +++ b/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.asm @@ -8,27 +8,29 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_lpf_horizontal_16_neon| + EXPORT |vpx_lpf_horizontal_edge_8_neon| + EXPORT |vpx_lpf_horizontal_edge_16_neon| EXPORT |vpx_lpf_vertical_16_neon| + EXPORT |vpx_lpf_vertical_16_dual_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -; void vpx_lpf_horizontal_16_neon(uint8_t *s, int p, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh -; int count) +; void mb_lpf_horizontal_edge(uint8_t *s, int p, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh, +; int count) ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vpx_lpf_horizontal_16_neon| PROC +; r12 int count +|mb_lpf_horizontal_edge| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh - ldr r12, [sp, #92] ; load count h_count vld1.8 {d16[]}, [r2] ; load *blimit @@ -115,22 +117,51 @@ h_next vpop {d8-d15} pop {r4-r8, pc} - ENDP ; |vpx_lpf_horizontal_16_neon| + ENDP ; |mb_lpf_horizontal_edge| -; void vpx_lpf_vertical_16_neon(uint8_t *s, int p, -; const uint8_t *blimit, -; const uint8_t *limit, -; const uint8_t *thresh) +; void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh) +; r0 uint8_t *s, +; r1 int pitch, +; r2 const uint8_t *blimit, +; r3 const uint8_t *limit, +; sp const uint8_t *thresh +|vpx_lpf_horizontal_edge_8_neon| PROC + mov r12, #1 + b mb_lpf_horizontal_edge + ENDP ; |vpx_lpf_horizontal_edge_8_neon| + +; void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch, +; const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh) +; r0 uint8_t *s, +; r1 int pitch, +; r2 const uint8_t *blimit, +; r3 const uint8_t *limit, +; sp const uint8_t *thresh +|vpx_lpf_horizontal_edge_16_neon| PROC + mov r12, #2 + b mb_lpf_horizontal_edge + ENDP ; |vpx_lpf_horizontal_edge_16_neon| + +; void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit, +; const uint8_t *limit, const uint8_t *thresh, +; int count) { ; r0 uint8_t *s, ; r1 int p, /* pitch */ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vpx_lpf_vertical_16_neon| PROC +; r12 int count +|mb_lpf_vertical_edge_w| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh +v_count vld1.8 {d16[]}, [r2] ; load *blimit vld1.8 {d17[]}, [r3] ; load *limit vld1.8 {d18[]}, [r4] ; load *thresh @@ -183,20 +214,21 @@ h_next ; flat && mask were not set for any of the channels. Just store the values ; from filter. - sub r8, r0, #2 + sub r0, #2 vswp d23, d25 - vst4.8 {d23[0], d24[0], d25[0], d26[0]}, [r8], r1 - vst4.8 {d23[1], d24[1], d25[1], d26[1]}, [r8], r1 - vst4.8 {d23[2], d24[2], d25[2], d26[2]}, [r8], r1 - vst4.8 {d23[3], d24[3], d25[3], d26[3]}, [r8], r1 - vst4.8 {d23[4], d24[4], d25[4], d26[4]}, [r8], r1 - vst4.8 {d23[5], d24[5], d25[5], d26[5]}, [r8], r1 - vst4.8 {d23[6], d24[6], d25[6], d26[6]}, [r8], r1 - vst4.8 {d23[7], d24[7], d25[7], d26[7]}, [r8], r1 + vst4.8 {d23[0], d24[0], d25[0], d26[0]}, [r0], r1 + vst4.8 {d23[1], d24[1], d25[1], d26[1]}, [r0], r1 + vst4.8 {d23[2], d24[2], d25[2], d26[2]}, [r0], r1 + vst4.8 {d23[3], d24[3], d25[3], d26[3]}, [r0], r1 + vst4.8 {d23[4], d24[4], d25[4], d26[4]}, [r0], r1 + vst4.8 {d23[5], d24[5], d25[5], d26[5]}, [r0], r1 + vst4.8 {d23[6], d24[6], d25[6], d26[6]}, [r0], r1 + vst4.8 {d23[7], d24[7], d25[7], d26[7]}, [r0], r1 + add r0, #2 - b v_end + b v_next v_mbfilter tst r7, #2 @@ -223,7 +255,7 @@ v_mbfilter vst3.8 {d18[7], d19[7], d20[7]}, [r8], r1 vst3.8 {d21[7], d22[7], d23[7]}, [r0], r1 - b v_end + b v_next v_wide_mbfilter sub r8, r0, #8 @@ -275,12 +307,40 @@ v_wide_mbfilter vst1.8 {d19}, [r8@64], r1 vst1.8 {d15}, [r0@64], r1 -v_end +v_next + subs r12, #1 + bne v_count + vpop {d8-d15} pop {r4-r8, pc} + ENDP ; |mb_lpf_vertical_edge_w| + +; void vpx_lpf_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit, +; const uint8_t *limit, const uint8_t *thresh) +; r0 uint8_t *s, +; r1 int p, /* pitch */ +; r2 const uint8_t *blimit, +; r3 const uint8_t *limit, +; sp const uint8_t *thresh +|vpx_lpf_vertical_16_neon| PROC + mov r12, #1 + b mb_lpf_vertical_edge_w ENDP ; |vpx_lpf_vertical_16_neon| +; void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit, +; const uint8_t *limit, +; const uint8_t *thresh) +; r0 uint8_t *s, +; r1 int p, /* pitch */ +; r2 const uint8_t *blimit, +; r3 const uint8_t *limit, +; sp const uint8_t *thresh +|vpx_lpf_vertical_16_dual_neon| PROC + mov r12, #2 + b mb_lpf_vertical_edge_w + ENDP ; |vpx_lpf_vertical_16_dual_neon| + ; void vpx_wide_mbfilter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.c b/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.c new file mode 100644 index 0000000000..f95267472c --- /dev/null +++ b/libs/libvpx/vpx_dsp/arm/loopfilter_mb_neon.c @@ -0,0 +1,1093 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" +#include "vpx_dsp/arm/transpose_neon.h" + +// For all the static inline functions, the functions ending with '_8' process +// 8 samples in a bunch, and the functions ending with '_16' process 16 samples +// in a bunch. + +#define FUN_LOAD_THRESH(w, r) \ + static INLINE void load_thresh_##w( \ + const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, \ + uint8x##w##_t *blimit_vec, uint8x##w##_t *limit_vec, \ + uint8x##w##_t *thresh_vec) { \ + *blimit_vec = vld1##r##dup_u8(blimit); \ + *limit_vec = vld1##r##dup_u8(limit); \ + *thresh_vec = vld1##r##dup_u8(thresh); \ + } + +FUN_LOAD_THRESH(8, _) // load_thresh_8 +FUN_LOAD_THRESH(16, q_) // load_thresh_16 +#undef FUN_LOAD_THRESH + +static INLINE void load_thresh_8_dual( + const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, + uint8x16_t *blimit_vec, uint8x16_t *limit_vec, uint8x16_t *thresh_vec) { + *blimit_vec = vcombine_u8(vld1_dup_u8(blimit0), vld1_dup_u8(blimit1)); + *limit_vec = vcombine_u8(vld1_dup_u8(limit0), vld1_dup_u8(limit1)); + *thresh_vec = vcombine_u8(vld1_dup_u8(thresh0), vld1_dup_u8(thresh1)); +} + +// Here flat is 64-bit long, with each 8-bit (or 4-bit) chunk being a mask of a +// pixel. When used to control filter branches, we only detect whether it is all +// 0s or all 1s. We pairwise add flat to a 32-bit long number flat_status. +// flat equals 0 if and only if flat_status equals 0. +// flat equals -1 (all 1s) if and only if flat_status equals -2. (This is true +// because each mask occupies more than 1 bit.) +static INLINE uint32_t calc_flat_status_8(uint8x8_t flat) { + return vget_lane_u32( + vreinterpret_u32_u64(vpaddl_u32(vreinterpret_u32_u8(flat))), 0); +} + +// Here flat is 128-bit long, with each 8-bit chunk being a mask of a pixel. +// When used to control filter branches, we only detect whether it is all 0s or +// all 1s. We narrowing shift right each 16-bit chunk by 4 arithmetically, so +// we get a 64-bit long number, with each 4-bit chunk being a mask of a pixel. +// Then we pairwise add flat to a 32-bit long number flat_status. +// flat equals 0 if and only if flat_status equals 0. +// flat equals -1 (all 1s) if and only if flat_status equals -2. (This is true +// because each mask occupies more than 1 bit.) +static INLINE uint32_t calc_flat_status_16(uint8x16_t flat) { + const uint8x8_t flat_4bit = + vreinterpret_u8_s8(vshrn_n_s16(vreinterpretq_s16_u8(flat), 4)); + return calc_flat_status_8(flat_4bit); +} + +#define FUN_FILTER_HEV_MASK4(w, r) \ + static INLINE uint8x##w##_t filter_hev_mask4_##w( \ + const uint8x##w##_t limit, const uint8x##w##_t blimit, \ + const uint8x##w##_t thresh, const uint8x##w##_t p3, \ + const uint8x##w##_t p2, const uint8x##w##_t p1, const uint8x##w##_t p0, \ + const uint8x##w##_t q0, const uint8x##w##_t q1, const uint8x##w##_t q2, \ + const uint8x##w##_t q3, uint8x##w##_t *hev, uint8x##w##_t *mask) { \ + uint8x##w##_t max, t0, t1; \ + \ + max = vabd##r##u8(p1, p0); \ + max = vmax##r##u8(max, vabd##r##u8(q1, q0)); \ + *hev = vcgt##r##u8(max, thresh); \ + *mask = vmax##r##u8(max, vabd##r##u8(p3, p2)); \ + *mask = vmax##r##u8(*mask, vabd##r##u8(p2, p1)); \ + *mask = vmax##r##u8(*mask, vabd##r##u8(q2, q1)); \ + *mask = vmax##r##u8(*mask, vabd##r##u8(q3, q2)); \ + t0 = vabd##r##u8(p0, q0); \ + t1 = vabd##r##u8(p1, q1); \ + t0 = vqadd##r##u8(t0, t0); \ + t1 = vshr##r##n_u8(t1, 1); \ + t0 = vqadd##r##u8(t0, t1); \ + *mask = vcle##r##u8(*mask, limit); \ + t0 = vcle##r##u8(t0, blimit); \ + *mask = vand##r##u8(*mask, t0); \ + \ + return max; \ + } + +FUN_FILTER_HEV_MASK4(8, _) // filter_hev_mask4_8 +FUN_FILTER_HEV_MASK4(16, q_) // filter_hev_mask4_16 +#undef FUN_FILTER_HEV_MASK4 + +#define FUN_FILTER_FLAT_HEV_MASK(w, r) \ + static INLINE uint8x##w##_t filter_flat_hev_mask_##w( \ + const uint8x##w##_t limit, const uint8x##w##_t blimit, \ + const uint8x##w##_t thresh, const uint8x##w##_t p3, \ + const uint8x##w##_t p2, const uint8x##w##_t p1, const uint8x##w##_t p0, \ + const uint8x##w##_t q0, const uint8x##w##_t q1, const uint8x##w##_t q2, \ + const uint8x##w##_t q3, uint8x##w##_t *flat, uint32_t *flat_status, \ + uint8x##w##_t *hev) { \ + uint8x##w##_t max, mask; \ + \ + max = filter_hev_mask4_##w(limit, blimit, thresh, p3, p2, p1, p0, q0, q1, \ + q2, q3, hev, &mask); \ + *flat = vmax##r##u8(max, vabd##r##u8(p2, p0)); \ + *flat = vmax##r##u8(*flat, vabd##r##u8(q2, q0)); \ + *flat = vmax##r##u8(*flat, vabd##r##u8(p3, p0)); \ + *flat = vmax##r##u8(*flat, vabd##r##u8(q3, q0)); \ + *flat = vcle##r##u8(*flat, vdup##r##n_u8(1)); /* flat_mask4() */ \ + *flat = vand##r##u8(*flat, mask); \ + *flat_status = calc_flat_status_##w(*flat); \ + \ + return mask; \ + } + +FUN_FILTER_FLAT_HEV_MASK(8, _) // filter_flat_hev_mask_8 +FUN_FILTER_FLAT_HEV_MASK(16, q_) // filter_flat_hev_mask_16 +#undef FUN_FILTER_FLAT_HEV_MASK + +#define FUN_FLAT_MASK5(w, r) \ + static INLINE uint8x##w##_t flat_mask5_##w( \ + const uint8x##w##_t p4, const uint8x##w##_t p3, const uint8x##w##_t p2, \ + const uint8x##w##_t p1, const uint8x##w##_t p0, const uint8x##w##_t q0, \ + const uint8x##w##_t q1, const uint8x##w##_t q2, const uint8x##w##_t q3, \ + const uint8x##w##_t q4, const uint8x##w##_t flat, \ + uint32_t *flat2_status) { \ + uint8x##w##_t flat2 = vabd##r##u8(p4, p0); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(p3, p0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(p2, p0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(p1, p0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(q1, q0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(q2, q0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(q3, q0)); \ + flat2 = vmax##r##u8(flat2, vabd##r##u8(q4, q0)); \ + flat2 = vcle##r##u8(flat2, vdup##r##n_u8(1)); \ + flat2 = vand##r##u8(flat2, flat); \ + *flat2_status = calc_flat_status_##w(flat2); \ + \ + return flat2; \ + } + +FUN_FLAT_MASK5(8, _) // flat_mask5_8 +FUN_FLAT_MASK5(16, q_) // flat_mask5_16 +#undef FUN_FLAT_MASK5 + +#define FUN_FLIP_SIGN(w, r) \ + static INLINE int8x##w##_t flip_sign_##w(const uint8x##w##_t v) { \ + const uint8x##w##_t sign_bit = vdup##r##n_u8(0x80); \ + return vreinterpret##r##s8_u8(veor##r##u8(v, sign_bit)); \ + } + +FUN_FLIP_SIGN(8, _) // flip_sign_8 +FUN_FLIP_SIGN(16, q_) // flip_sign_16 +#undef FUN_FLIP_SIGN + +#define FUN_FLIP_SIGN_BACK(w, r) \ + static INLINE uint8x##w##_t flip_sign_back_##w(const int8x##w##_t v) { \ + const int8x##w##_t sign_bit = vdup##r##n_s8(0x80); \ + return vreinterpret##r##u8_s8(veor##r##s8(v, sign_bit)); \ + } + +FUN_FLIP_SIGN_BACK(8, _) // flip_sign_back_8 +FUN_FLIP_SIGN_BACK(16, q_) // flip_sign_back_16 +#undef FUN_FLIP_SIGN_BACK + +static INLINE void filter_update_8(const uint8x8_t sub0, const uint8x8_t sub1, + const uint8x8_t add0, const uint8x8_t add1, + uint16x8_t *sum) { + *sum = vsubw_u8(*sum, sub0); + *sum = vsubw_u8(*sum, sub1); + *sum = vaddw_u8(*sum, add0); + *sum = vaddw_u8(*sum, add1); +} + +static INLINE void filter_update_16(const uint8x16_t sub0, + const uint8x16_t sub1, + const uint8x16_t add0, + const uint8x16_t add1, uint16x8_t *sum0, + uint16x8_t *sum1) { + *sum0 = vsubw_u8(*sum0, vget_low_u8(sub0)); + *sum1 = vsubw_u8(*sum1, vget_high_u8(sub0)); + *sum0 = vsubw_u8(*sum0, vget_low_u8(sub1)); + *sum1 = vsubw_u8(*sum1, vget_high_u8(sub1)); + *sum0 = vaddw_u8(*sum0, vget_low_u8(add0)); + *sum1 = vaddw_u8(*sum1, vget_high_u8(add0)); + *sum0 = vaddw_u8(*sum0, vget_low_u8(add1)); + *sum1 = vaddw_u8(*sum1, vget_high_u8(add1)); +} + +static INLINE uint8x8_t calc_7_tap_filter_8_kernel(const uint8x8_t sub0, + const uint8x8_t sub1, + const uint8x8_t add0, + const uint8x8_t add1, + uint16x8_t *sum) { + filter_update_8(sub0, sub1, add0, add1, sum); + return vrshrn_n_u16(*sum, 3); +} + +static INLINE uint8x16_t calc_7_tap_filter_16_kernel( + const uint8x16_t sub0, const uint8x16_t sub1, const uint8x16_t add0, + const uint8x16_t add1, uint16x8_t *sum0, uint16x8_t *sum1) { + filter_update_16(sub0, sub1, add0, add1, sum0, sum1); + return vcombine_u8(vrshrn_n_u16(*sum0, 3), vrshrn_n_u16(*sum1, 3)); +} + +static INLINE uint8x8_t apply_15_tap_filter_8_kernel( + const uint8x8_t flat, const uint8x8_t sub0, const uint8x8_t sub1, + const uint8x8_t add0, const uint8x8_t add1, const uint8x8_t in, + uint16x8_t *sum) { + filter_update_8(sub0, sub1, add0, add1, sum); + return vbsl_u8(flat, vrshrn_n_u16(*sum, 4), in); +} + +static INLINE uint8x16_t apply_15_tap_filter_16_kernel( + const uint8x16_t flat, const uint8x16_t sub0, const uint8x16_t sub1, + const uint8x16_t add0, const uint8x16_t add1, const uint8x16_t in, + uint16x8_t *sum0, uint16x8_t *sum1) { + uint8x16_t t; + filter_update_16(sub0, sub1, add0, add1, sum0, sum1); + t = vcombine_u8(vrshrn_n_u16(*sum0, 4), vrshrn_n_u16(*sum1, 4)); + return vbslq_u8(flat, t, in); +} + +// 7-tap filter [1, 1, 1, 2, 1, 1, 1] +static INLINE void calc_7_tap_filter_8(const uint8x8_t p3, const uint8x8_t p2, + const uint8x8_t p1, const uint8x8_t p0, + const uint8x8_t q0, const uint8x8_t q1, + const uint8x8_t q2, const uint8x8_t q3, + uint8x8_t *op2, uint8x8_t *op1, + uint8x8_t *op0, uint8x8_t *oq0, + uint8x8_t *oq1, uint8x8_t *oq2) { + uint16x8_t sum; + sum = vaddl_u8(p3, p3); // 2*p3 + sum = vaddw_u8(sum, p3); // 3*p3 + sum = vaddw_u8(sum, p2); // 3*p3+p2 + sum = vaddw_u8(sum, p2); // 3*p3+2*p2 + sum = vaddw_u8(sum, p1); // 3*p3+2*p2+p1 + sum = vaddw_u8(sum, p0); // 3*p3+2*p2+p1+p0 + sum = vaddw_u8(sum, q0); // 3*p3+2*p2+p1+p0+q0 + *op2 = vrshrn_n_u16(sum, 3); + *op1 = calc_7_tap_filter_8_kernel(p3, p2, p1, q1, &sum); + *op0 = calc_7_tap_filter_8_kernel(p3, p1, p0, q2, &sum); + *oq0 = calc_7_tap_filter_8_kernel(p3, p0, q0, q3, &sum); + *oq1 = calc_7_tap_filter_8_kernel(p2, q0, q1, q3, &sum); + *oq2 = calc_7_tap_filter_8_kernel(p1, q1, q2, q3, &sum); +} + +static INLINE void calc_7_tap_filter_16( + const uint8x16_t p3, const uint8x16_t p2, const uint8x16_t p1, + const uint8x16_t p0, const uint8x16_t q0, const uint8x16_t q1, + const uint8x16_t q2, const uint8x16_t q3, uint8x16_t *op2, uint8x16_t *op1, + uint8x16_t *op0, uint8x16_t *oq0, uint8x16_t *oq1, uint8x16_t *oq2) { + uint16x8_t sum0, sum1; + sum0 = vaddl_u8(vget_low_u8(p3), vget_low_u8(p3)); // 2*p3 + sum1 = vaddl_u8(vget_high_u8(p3), vget_high_u8(p3)); // 2*p3 + sum0 = vaddw_u8(sum0, vget_low_u8(p3)); // 3*p3 + sum1 = vaddw_u8(sum1, vget_high_u8(p3)); // 3*p3 + sum0 = vaddw_u8(sum0, vget_low_u8(p2)); // 3*p3+p2 + sum1 = vaddw_u8(sum1, vget_high_u8(p2)); // 3*p3+p2 + sum0 = vaddw_u8(sum0, vget_low_u8(p2)); // 3*p3+2*p2 + sum1 = vaddw_u8(sum1, vget_high_u8(p2)); // 3*p3+2*p2 + sum0 = vaddw_u8(sum0, vget_low_u8(p1)); // 3*p3+2*p2+p1 + sum1 = vaddw_u8(sum1, vget_high_u8(p1)); // 3*p3+2*p2+p1 + sum0 = vaddw_u8(sum0, vget_low_u8(p0)); // 3*p3+2*p2+p1+p0 + sum1 = vaddw_u8(sum1, vget_high_u8(p0)); // 3*p3+2*p2+p1+p0 + sum0 = vaddw_u8(sum0, vget_low_u8(q0)); // 3*p3+2*p2+p1+p0+q0 + sum1 = vaddw_u8(sum1, vget_high_u8(q0)); // 3*p3+2*p2+p1+p0+q0 + *op2 = vcombine_u8(vrshrn_n_u16(sum0, 3), vrshrn_n_u16(sum1, 3)); + *op1 = calc_7_tap_filter_16_kernel(p3, p2, p1, q1, &sum0, &sum1); + *op0 = calc_7_tap_filter_16_kernel(p3, p1, p0, q2, &sum0, &sum1); + *oq0 = calc_7_tap_filter_16_kernel(p3, p0, q0, q3, &sum0, &sum1); + *oq1 = calc_7_tap_filter_16_kernel(p2, q0, q1, q3, &sum0, &sum1); + *oq2 = calc_7_tap_filter_16_kernel(p1, q1, q2, q3, &sum0, &sum1); +} + +#define FUN_APPLY_7_TAP_FILTER(w, r) \ + static INLINE void apply_7_tap_filter_##w( \ + const uint8x##w##_t flat, const uint8x##w##_t p3, \ + const uint8x##w##_t p2, const uint8x##w##_t p1, const uint8x##w##_t p0, \ + const uint8x##w##_t q0, const uint8x##w##_t q1, const uint8x##w##_t q2, \ + const uint8x##w##_t q3, uint8x##w##_t *op2, uint8x##w##_t *op1, \ + uint8x##w##_t *op0, uint8x##w##_t *oq0, uint8x##w##_t *oq1, \ + uint8x##w##_t *oq2) { \ + uint8x##w##_t tp1, tp0, tq0, tq1; \ + calc_7_tap_filter_##w(p3, p2, p1, p0, q0, q1, q2, q3, op2, &tp1, &tp0, \ + &tq0, &tq1, oq2); \ + *op2 = vbsl##r##u8(flat, *op2, p2); \ + *op1 = vbsl##r##u8(flat, tp1, *op1); \ + *op0 = vbsl##r##u8(flat, tp0, *op0); \ + *oq0 = vbsl##r##u8(flat, tq0, *oq0); \ + *oq1 = vbsl##r##u8(flat, tq1, *oq1); \ + *oq2 = vbsl##r##u8(flat, *oq2, q2); \ + } + +FUN_APPLY_7_TAP_FILTER(8, _) // apply_7_tap_filter_8 +FUN_APPLY_7_TAP_FILTER(16, q_) // apply_7_tap_filter_16 +#undef FUN_APPLY_7_TAP_FILTER + +// 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1] +static INLINE void apply_15_tap_filter_8( + const uint8x8_t flat2, const uint8x8_t p7, const uint8x8_t p6, + const uint8x8_t p5, const uint8x8_t p4, const uint8x8_t p3, + const uint8x8_t p2, const uint8x8_t p1, const uint8x8_t p0, + const uint8x8_t q0, const uint8x8_t q1, const uint8x8_t q2, + const uint8x8_t q3, const uint8x8_t q4, const uint8x8_t q5, + const uint8x8_t q6, const uint8x8_t q7, uint8x8_t *op6, uint8x8_t *op5, + uint8x8_t *op4, uint8x8_t *op3, uint8x8_t *op2, uint8x8_t *op1, + uint8x8_t *op0, uint8x8_t *oq0, uint8x8_t *oq1, uint8x8_t *oq2, + uint8x8_t *oq3, uint8x8_t *oq4, uint8x8_t *oq5, uint8x8_t *oq6) { + uint16x8_t sum; + sum = vshll_n_u8(p7, 3); // 8*p7 + sum = vsubw_u8(sum, p7); // 7*p7 + sum = vaddw_u8(sum, p6); // 7*p7+p6 + sum = vaddw_u8(sum, p6); // 7*p7+2*p6 + sum = vaddw_u8(sum, p5); // 7*p7+2*p6+p5 + sum = vaddw_u8(sum, p4); // 7*p7+2*p6+p5+p4 + sum = vaddw_u8(sum, p3); // 7*p7+2*p6+p5+p4+p3 + sum = vaddw_u8(sum, p2); // 7*p7+2*p6+p5+p4+p3+p2 + sum = vaddw_u8(sum, p1); // 7*p7+2*p6+p5+p4+p3+p2+p1 + sum = vaddw_u8(sum, p0); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0 + sum = vaddw_u8(sum, q0); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0+q0 + *op6 = vbsl_u8(flat2, vrshrn_n_u16(sum, 4), p6); + *op5 = apply_15_tap_filter_8_kernel(flat2, p7, p6, p5, q1, p5, &sum); + *op4 = apply_15_tap_filter_8_kernel(flat2, p7, p5, p4, q2, p4, &sum); + *op3 = apply_15_tap_filter_8_kernel(flat2, p7, p4, p3, q3, p3, &sum); + *op2 = apply_15_tap_filter_8_kernel(flat2, p7, p3, p2, q4, *op2, &sum); + *op1 = apply_15_tap_filter_8_kernel(flat2, p7, p2, p1, q5, *op1, &sum); + *op0 = apply_15_tap_filter_8_kernel(flat2, p7, p1, p0, q6, *op0, &sum); + *oq0 = apply_15_tap_filter_8_kernel(flat2, p7, p0, q0, q7, *oq0, &sum); + *oq1 = apply_15_tap_filter_8_kernel(flat2, p6, q0, q1, q7, *oq1, &sum); + *oq2 = apply_15_tap_filter_8_kernel(flat2, p5, q1, q2, q7, *oq2, &sum); + *oq3 = apply_15_tap_filter_8_kernel(flat2, p4, q2, q3, q7, q3, &sum); + *oq4 = apply_15_tap_filter_8_kernel(flat2, p3, q3, q4, q7, q4, &sum); + *oq5 = apply_15_tap_filter_8_kernel(flat2, p2, q4, q5, q7, q5, &sum); + *oq6 = apply_15_tap_filter_8_kernel(flat2, p1, q5, q6, q7, q6, &sum); +} + +static INLINE void apply_15_tap_filter_16( + const uint8x16_t flat2, const uint8x16_t p7, const uint8x16_t p6, + const uint8x16_t p5, const uint8x16_t p4, const uint8x16_t p3, + const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2, + const uint8x16_t q3, const uint8x16_t q4, const uint8x16_t q5, + const uint8x16_t q6, const uint8x16_t q7, uint8x16_t *op6, uint8x16_t *op5, + uint8x16_t *op4, uint8x16_t *op3, uint8x16_t *op2, uint8x16_t *op1, + uint8x16_t *op0, uint8x16_t *oq0, uint8x16_t *oq1, uint8x16_t *oq2, + uint8x16_t *oq3, uint8x16_t *oq4, uint8x16_t *oq5, uint8x16_t *oq6) { + uint16x8_t sum0, sum1; + uint8x16_t t; + sum0 = vshll_n_u8(vget_low_u8(p7), 3); // 8*p7 + sum1 = vshll_n_u8(vget_high_u8(p7), 3); // 8*p7 + sum0 = vsubw_u8(sum0, vget_low_u8(p7)); // 7*p7 + sum1 = vsubw_u8(sum1, vget_high_u8(p7)); // 7*p7 + sum0 = vaddw_u8(sum0, vget_low_u8(p6)); // 7*p7+p6 + sum1 = vaddw_u8(sum1, vget_high_u8(p6)); // 7*p7+p6 + sum0 = vaddw_u8(sum0, vget_low_u8(p6)); // 7*p7+2*p6 + sum1 = vaddw_u8(sum1, vget_high_u8(p6)); // 7*p7+2*p6 + sum0 = vaddw_u8(sum0, vget_low_u8(p5)); // 7*p7+2*p6+p5 + sum1 = vaddw_u8(sum1, vget_high_u8(p5)); // 7*p7+2*p6+p5 + sum0 = vaddw_u8(sum0, vget_low_u8(p4)); // 7*p7+2*p6+p5+p4 + sum1 = vaddw_u8(sum1, vget_high_u8(p4)); // 7*p7+2*p6+p5+p4 + sum0 = vaddw_u8(sum0, vget_low_u8(p3)); // 7*p7+2*p6+p5+p4+p3 + sum1 = vaddw_u8(sum1, vget_high_u8(p3)); // 7*p7+2*p6+p5+p4+p3 + sum0 = vaddw_u8(sum0, vget_low_u8(p2)); // 7*p7+2*p6+p5+p4+p3+p2 + sum1 = vaddw_u8(sum1, vget_high_u8(p2)); // 7*p7+2*p6+p5+p4+p3+p2 + sum0 = vaddw_u8(sum0, vget_low_u8(p1)); // 7*p7+2*p6+p5+p4+p3+p2+p1 + sum1 = vaddw_u8(sum1, vget_high_u8(p1)); // 7*p7+2*p6+p5+p4+p3+p2+p1 + sum0 = vaddw_u8(sum0, vget_low_u8(p0)); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0 + sum1 = vaddw_u8(sum1, vget_high_u8(p0)); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0 + sum0 = vaddw_u8(sum0, vget_low_u8(q0)); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0+q0 + sum1 = vaddw_u8(sum1, vget_high_u8(q0)); // 7*p7+2*p6+p5+p4+p3+p2+p1+p0+q0 + t = vcombine_u8(vrshrn_n_u16(sum0, 4), vrshrn_n_u16(sum1, 4)); + *op6 = vbslq_u8(flat2, t, p6); + *op5 = apply_15_tap_filter_16_kernel(flat2, p7, p6, p5, q1, p5, &sum0, &sum1); + *op4 = apply_15_tap_filter_16_kernel(flat2, p7, p5, p4, q2, p4, &sum0, &sum1); + *op3 = apply_15_tap_filter_16_kernel(flat2, p7, p4, p3, q3, p3, &sum0, &sum1); + *op2 = + apply_15_tap_filter_16_kernel(flat2, p7, p3, p2, q4, *op2, &sum0, &sum1); + *op1 = + apply_15_tap_filter_16_kernel(flat2, p7, p2, p1, q5, *op1, &sum0, &sum1); + *op0 = + apply_15_tap_filter_16_kernel(flat2, p7, p1, p0, q6, *op0, &sum0, &sum1); + *oq0 = + apply_15_tap_filter_16_kernel(flat2, p7, p0, q0, q7, *oq0, &sum0, &sum1); + *oq1 = + apply_15_tap_filter_16_kernel(flat2, p6, q0, q1, q7, *oq1, &sum0, &sum1); + *oq2 = + apply_15_tap_filter_16_kernel(flat2, p5, q1, q2, q7, *oq2, &sum0, &sum1); + *oq3 = apply_15_tap_filter_16_kernel(flat2, p4, q2, q3, q7, q3, &sum0, &sum1); + *oq4 = apply_15_tap_filter_16_kernel(flat2, p3, q3, q4, q7, q4, &sum0, &sum1); + *oq5 = apply_15_tap_filter_16_kernel(flat2, p2, q4, q5, q7, q5, &sum0, &sum1); + *oq6 = apply_15_tap_filter_16_kernel(flat2, p1, q5, q6, q7, q6, &sum0, &sum1); +} + +#define FUN_FILTER4(w, r) \ + static INLINE void filter4_##w( \ + const uint8x##w##_t mask, const uint8x##w##_t hev, \ + const uint8x##w##_t p1, const uint8x##w##_t p0, const uint8x##w##_t q0, \ + const uint8x##w##_t q1, uint8x##w##_t *op1, uint8x##w##_t *op0, \ + uint8x##w##_t *oq0, uint8x##w##_t *oq1) { \ + int8x##w##_t filter, filter1, filter2, t; \ + int8x##w##_t ps1 = flip_sign_##w(p1); \ + int8x##w##_t ps0 = flip_sign_##w(p0); \ + int8x##w##_t qs0 = flip_sign_##w(q0); \ + int8x##w##_t qs1 = flip_sign_##w(q1); \ + \ + /* add outer taps if we have high edge variance */ \ + filter = vqsub##r##s8(ps1, qs1); \ + filter = vand##r##s8(filter, vreinterpret##r##s8_u8(hev)); \ + t = vqsub##r##s8(qs0, ps0); \ + \ + /* inner taps */ \ + filter = vqadd##r##s8(filter, t); \ + filter = vqadd##r##s8(filter, t); \ + filter = vqadd##r##s8(filter, t); \ + filter = vand##r##s8(filter, vreinterpret##r##s8_u8(mask)); \ + \ + /* save bottom 3 bits so that we round one side +4 and the other +3 */ \ + /* if it equals 4 we'll set to adjust by -1 to account for the fact */ \ + /* we'd round 3 the other way */ \ + filter1 = vshr##r##n_s8(vqadd##r##s8(filter, vdup##r##n_s8(4)), 3); \ + filter2 = vshr##r##n_s8(vqadd##r##s8(filter, vdup##r##n_s8(3)), 3); \ + \ + qs0 = vqsub##r##s8(qs0, filter1); \ + ps0 = vqadd##r##s8(ps0, filter2); \ + *oq0 = flip_sign_back_##w(qs0); \ + *op0 = flip_sign_back_##w(ps0); \ + \ + /* outer tap adjustments */ \ + filter = vrshr##r##n_s8(filter1, 1); \ + filter = vbic##r##s8(filter, vreinterpret##r##s8_u8(hev)); \ + \ + qs1 = vqsub##r##s8(qs1, filter); \ + ps1 = vqadd##r##s8(ps1, filter); \ + *oq1 = flip_sign_back_##w(qs1); \ + *op1 = flip_sign_back_##w(ps1); \ + } + +FUN_FILTER4(8, _) // filter4_8 +FUN_FILTER4(16, q_) // filter4_16 +#undef FUN_FILTER4 + +#define FUN_FILTER8(w) \ + static INLINE void filter8_##w( \ + const uint8x##w##_t mask, const uint8x##w##_t flat, \ + const uint32_t flat_status, const uint8x##w##_t hev, \ + const uint8x##w##_t p3, const uint8x##w##_t p2, const uint8x##w##_t p1, \ + const uint8x##w##_t p0, const uint8x##w##_t q0, const uint8x##w##_t q1, \ + const uint8x##w##_t q2, const uint8x##w##_t q3, uint8x##w##_t *op2, \ + uint8x##w##_t *op1, uint8x##w##_t *op0, uint8x##w##_t *oq0, \ + uint8x##w##_t *oq1, uint8x##w##_t *oq2) { \ + if (flat_status != (uint32_t)-2) { \ + filter4_##w(mask, hev, p1, p0, q0, q1, op1, op0, oq0, oq1); \ + *op2 = p2; \ + *oq2 = q2; \ + if (flat_status) { \ + apply_7_tap_filter_##w(flat, p3, p2, p1, p0, q0, q1, q2, q3, op2, op1, \ + op0, oq0, oq1, oq2); \ + } \ + } else { \ + calc_7_tap_filter_##w(p3, p2, p1, p0, q0, q1, q2, q3, op2, op1, op0, \ + oq0, oq1, oq2); \ + } \ + } + +FUN_FILTER8(8) // filter8_8 +FUN_FILTER8(16) // filter8_16 +#undef FUN_FILTER8 + +#define FUN_FILTER16(w) \ + static INLINE void filter16_##w( \ + const uint8x##w##_t mask, const uint8x##w##_t flat, \ + const uint32_t flat_status, const uint8x##w##_t flat2, \ + const uint32_t flat2_status, const uint8x##w##_t hev, \ + const uint8x##w##_t p7, const uint8x##w##_t p6, const uint8x##w##_t p5, \ + const uint8x##w##_t p4, const uint8x##w##_t p3, const uint8x##w##_t p2, \ + const uint8x##w##_t p1, const uint8x##w##_t p0, const uint8x##w##_t q0, \ + const uint8x##w##_t q1, const uint8x##w##_t q2, const uint8x##w##_t q3, \ + const uint8x##w##_t q4, const uint8x##w##_t q5, const uint8x##w##_t q6, \ + const uint8x##w##_t q7, uint8x##w##_t *op6, uint8x##w##_t *op5, \ + uint8x##w##_t *op4, uint8x##w##_t *op3, uint8x##w##_t *op2, \ + uint8x##w##_t *op1, uint8x##w##_t *op0, uint8x##w##_t *oq0, \ + uint8x##w##_t *oq1, uint8x##w##_t *oq2, uint8x##w##_t *oq3, \ + uint8x##w##_t *oq4, uint8x##w##_t *oq5, uint8x##w##_t *oq6) { \ + if (flat_status != (uint32_t)-2) { \ + filter4_##w(mask, hev, p1, p0, q0, q1, op1, op0, oq0, oq1); \ + } \ + \ + if (flat_status) { \ + *op2 = p2; \ + *oq2 = q2; \ + if (flat2_status != (uint32_t)-2) { \ + apply_7_tap_filter_##w(flat, p3, p2, p1, p0, q0, q1, q2, q3, op2, op1, \ + op0, oq0, oq1, oq2); \ + } \ + if (flat2_status) { \ + apply_15_tap_filter_##w(flat2, p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, \ + q2, q3, q4, q5, q6, q7, op6, op5, op4, op3, \ + op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, \ + oq6); \ + } \ + } \ + } + +FUN_FILTER16(8) // filter16_8 +FUN_FILTER16(16) // filter16_16 +#undef FUN_FILTER16 + +#define FUN_LOAD8(w, r) \ + static INLINE void load_##w##x8( \ + const uint8_t *s, const int p, uint8x##w##_t *p3, uint8x##w##_t *p2, \ + uint8x##w##_t *p1, uint8x##w##_t *p0, uint8x##w##_t *q0, \ + uint8x##w##_t *q1, uint8x##w##_t *q2, uint8x##w##_t *q3) { \ + *p3 = vld1##r##u8(s); \ + s += p; \ + *p2 = vld1##r##u8(s); \ + s += p; \ + *p1 = vld1##r##u8(s); \ + s += p; \ + *p0 = vld1##r##u8(s); \ + s += p; \ + *q0 = vld1##r##u8(s); \ + s += p; \ + *q1 = vld1##r##u8(s); \ + s += p; \ + *q2 = vld1##r##u8(s); \ + s += p; \ + *q3 = vld1##r##u8(s); \ + } + +FUN_LOAD8(8, _) // load_8x8 +FUN_LOAD8(16, q_) // load_16x8 +#undef FUN_LOAD8 + +#define FUN_LOAD16(w, r) \ + static INLINE void load_##w##x16( \ + const uint8_t *s, const int p, uint8x##w##_t *s0, uint8x##w##_t *s1, \ + uint8x##w##_t *s2, uint8x##w##_t *s3, uint8x##w##_t *s4, \ + uint8x##w##_t *s5, uint8x##w##_t *s6, uint8x##w##_t *s7, \ + uint8x##w##_t *s8, uint8x##w##_t *s9, uint8x##w##_t *s10, \ + uint8x##w##_t *s11, uint8x##w##_t *s12, uint8x##w##_t *s13, \ + uint8x##w##_t *s14, uint8x##w##_t *s15) { \ + *s0 = vld1##r##u8(s); \ + s += p; \ + *s1 = vld1##r##u8(s); \ + s += p; \ + *s2 = vld1##r##u8(s); \ + s += p; \ + *s3 = vld1##r##u8(s); \ + s += p; \ + *s4 = vld1##r##u8(s); \ + s += p; \ + *s5 = vld1##r##u8(s); \ + s += p; \ + *s6 = vld1##r##u8(s); \ + s += p; \ + *s7 = vld1##r##u8(s); \ + s += p; \ + *s8 = vld1##r##u8(s); \ + s += p; \ + *s9 = vld1##r##u8(s); \ + s += p; \ + *s10 = vld1##r##u8(s); \ + s += p; \ + *s11 = vld1##r##u8(s); \ + s += p; \ + *s12 = vld1##r##u8(s); \ + s += p; \ + *s13 = vld1##r##u8(s); \ + s += p; \ + *s14 = vld1##r##u8(s); \ + s += p; \ + *s15 = vld1##r##u8(s); \ + } + +FUN_LOAD16(8, _) // load_8x16 +FUN_LOAD16(16, q_) // load_16x16 +#undef FUN_LOAD16 + +#define FUN_STORE4(w, r) \ + static INLINE void store_##w##x4( \ + uint8_t *s, const int p, const uint8x##w##_t s0, const uint8x##w##_t s1, \ + const uint8x##w##_t s2, const uint8x##w##_t s3) { \ + vst1##r##u8(s, s0); \ + s += p; \ + vst1##r##u8(s, s1); \ + s += p; \ + vst1##r##u8(s, s2); \ + s += p; \ + vst1##r##u8(s, s3); \ + } + +FUN_STORE4(8, _) // store_8x4 +FUN_STORE4(16, q_) // store_16x4 +#undef FUN_STORE4 + +#define FUN_STORE6(w, r) \ + static INLINE void store_##w##x6( \ + uint8_t *s, const int p, const uint8x##w##_t s0, const uint8x##w##_t s1, \ + const uint8x##w##_t s2, const uint8x##w##_t s3, const uint8x##w##_t s4, \ + const uint8x##w##_t s5) { \ + vst1##r##u8(s, s0); \ + s += p; \ + vst1##r##u8(s, s1); \ + s += p; \ + vst1##r##u8(s, s2); \ + s += p; \ + vst1##r##u8(s, s3); \ + s += p; \ + vst1##r##u8(s, s4); \ + s += p; \ + vst1##r##u8(s, s5); \ + } + +FUN_STORE6(8, _) // store_8x6 +FUN_STORE6(16, q_) // store_16x6 +#undef FUN_STORE6 + +static INLINE void store_4x8(uint8_t *s, const int p, const uint8x8_t p1, + const uint8x8_t p0, const uint8x8_t q0, + const uint8x8_t q1) { + uint8x8x4_t o; + + o.val[0] = p1; + o.val[1] = p0; + o.val[2] = q0; + o.val[3] = q1; + vst4_lane_u8(s, o, 0); + s += p; + vst4_lane_u8(s, o, 1); + s += p; + vst4_lane_u8(s, o, 2); + s += p; + vst4_lane_u8(s, o, 3); + s += p; + vst4_lane_u8(s, o, 4); + s += p; + vst4_lane_u8(s, o, 5); + s += p; + vst4_lane_u8(s, o, 6); + s += p; + vst4_lane_u8(s, o, 7); +} + +static INLINE void store_6x8(uint8_t *s, const int p, const uint8x8_t s0, + const uint8x8_t s1, const uint8x8_t s2, + const uint8x8_t s3, const uint8x8_t s4, + const uint8x8_t s5) { + uint8x8x3_t o0, o1; + + o0.val[0] = s0; + o0.val[1] = s1; + o0.val[2] = s2; + o1.val[0] = s3; + o1.val[1] = s4; + o1.val[2] = s5; + vst3_lane_u8(s - 3, o0, 0); + vst3_lane_u8(s + 0, o1, 0); + s += p; + vst3_lane_u8(s - 3, o0, 1); + vst3_lane_u8(s + 0, o1, 1); + s += p; + vst3_lane_u8(s - 3, o0, 2); + vst3_lane_u8(s + 0, o1, 2); + s += p; + vst3_lane_u8(s - 3, o0, 3); + vst3_lane_u8(s + 0, o1, 3); + s += p; + vst3_lane_u8(s - 3, o0, 4); + vst3_lane_u8(s + 0, o1, 4); + s += p; + vst3_lane_u8(s - 3, o0, 5); + vst3_lane_u8(s + 0, o1, 5); + s += p; + vst3_lane_u8(s - 3, o0, 6); + vst3_lane_u8(s + 0, o1, 6); + s += p; + vst3_lane_u8(s - 3, o0, 7); + vst3_lane_u8(s + 0, o1, 7); +} + +#define FUN_STORE8(w, r) \ + static INLINE void store_##w##x8( \ + uint8_t *s, const int p, const uint8x##w##_t s0, const uint8x##w##_t s1, \ + const uint8x##w##_t s2, const uint8x##w##_t s3, const uint8x##w##_t s4, \ + const uint8x##w##_t s5, const uint8x##w##_t s6, \ + const uint8x##w##_t s7) { \ + vst1##r##u8(s, s0); \ + s += p; \ + vst1##r##u8(s, s1); \ + s += p; \ + vst1##r##u8(s, s2); \ + s += p; \ + vst1##r##u8(s, s3); \ + s += p; \ + vst1##r##u8(s, s4); \ + s += p; \ + vst1##r##u8(s, s5); \ + s += p; \ + vst1##r##u8(s, s6); \ + s += p; \ + vst1##r##u8(s, s7); \ + } + +FUN_STORE8(8, _) // store_8x8 +FUN_STORE8(16, q_) // store_16x8 +#undef FUN_STORE8 + +#define FUN_STORE14(w, r) \ + static INLINE void store_##w##x14( \ + uint8_t *s, const int p, const uint8x##w##_t p6, const uint8x##w##_t p5, \ + const uint8x##w##_t p4, const uint8x##w##_t p3, const uint8x##w##_t p2, \ + const uint8x##w##_t p1, const uint8x##w##_t p0, const uint8x##w##_t q0, \ + const uint8x##w##_t q1, const uint8x##w##_t q2, const uint8x##w##_t q3, \ + const uint8x##w##_t q4, const uint8x##w##_t q5, const uint8x##w##_t q6, \ + const uint32_t flat_status, const uint32_t flat2_status) { \ + if (flat_status) { \ + if (flat2_status) { \ + vst1##r##u8(s - 7 * p, p6); \ + vst1##r##u8(s - 6 * p, p5); \ + vst1##r##u8(s - 5 * p, p4); \ + vst1##r##u8(s - 4 * p, p3); \ + vst1##r##u8(s + 3 * p, q3); \ + vst1##r##u8(s + 4 * p, q4); \ + vst1##r##u8(s + 5 * p, q5); \ + vst1##r##u8(s + 6 * p, q6); \ + } \ + vst1##r##u8(s - 3 * p, p2); \ + vst1##r##u8(s + 2 * p, q2); \ + } \ + vst1##r##u8(s - 2 * p, p1); \ + vst1##r##u8(s - 1 * p, p0); \ + vst1##r##u8(s + 0 * p, q0); \ + vst1##r##u8(s + 1 * p, q1); \ + } + +FUN_STORE14(8, _) // store_8x14 +FUN_STORE14(16, q_) // store_16x14 +#undef FUN_STORE14 + +static INLINE void store_16x16(uint8_t *s, const int p, const uint8x16_t s0, + const uint8x16_t s1, const uint8x16_t s2, + const uint8x16_t s3, const uint8x16_t s4, + const uint8x16_t s5, const uint8x16_t s6, + const uint8x16_t s7, const uint8x16_t s8, + const uint8x16_t s9, const uint8x16_t s10, + const uint8x16_t s11, const uint8x16_t s12, + const uint8x16_t s13, const uint8x16_t s14, + const uint8x16_t s15) { + vst1q_u8(s, s0); + s += p; + vst1q_u8(s, s1); + s += p; + vst1q_u8(s, s2); + s += p; + vst1q_u8(s, s3); + s += p; + vst1q_u8(s, s4); + s += p; + vst1q_u8(s, s5); + s += p; + vst1q_u8(s, s6); + s += p; + vst1q_u8(s, s7); + s += p; + vst1q_u8(s, s8); + s += p; + vst1q_u8(s, s9); + s += p; + vst1q_u8(s, s10); + s += p; + vst1q_u8(s, s11); + s += p; + vst1q_u8(s, s12); + s += p; + vst1q_u8(s, s13); + s += p; + vst1q_u8(s, s14); + s += p; + vst1q_u8(s, s15); +} + +#define FUN_HOR_4_KERNEL(name, w) \ + static INLINE void lpf_horizontal_4##name##kernel( \ + uint8_t *s, const int p, const uint8x##w##_t blimit, \ + const uint8x##w##_t limit, const uint8x##w##_t thresh) { \ + uint8x##w##_t p3, p2, p1, p0, q0, q1, q2, q3, mask, hev; \ + \ + load_##w##x8(s - 4 * p, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); \ + filter_hev_mask4_##w(limit, blimit, thresh, p3, p2, p1, p0, q0, q1, q2, \ + q3, &hev, &mask); \ + filter4_##w(mask, hev, p1, p0, q0, q1, &p1, &p0, &q0, &q1); \ + store_##w##x4(s - 2 * p, p, p1, p0, q0, q1); \ + } + +FUN_HOR_4_KERNEL(_, 8) // lpf_horizontal_4_kernel +FUN_HOR_4_KERNEL(_dual_, 16) // lpf_horizontal_4_dual_kernel +#undef FUN_HOR_4_KERNEL + +void vpx_lpf_horizontal_4_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t blimit_vec, limit_vec, thresh_vec; + load_thresh_8(blimit, limit, thresh, &blimit_vec, &limit_vec, &thresh_vec); + lpf_horizontal_4_kernel(s, p, blimit_vec, limit_vec, thresh_vec); +} + +void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + uint8x16_t blimit_vec, limit_vec, thresh_vec; + load_thresh_8_dual(blimit0, limit0, thresh0, blimit1, limit1, thresh1, + &blimit_vec, &limit_vec, &thresh_vec); + lpf_horizontal_4_dual_kernel(s, p, blimit_vec, limit_vec, thresh_vec); +} + +void vpx_lpf_vertical_4_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + mask, hev; + load_thresh_8(blimit, limit, thresh, &blimit_vec, &limit_vec, &thresh_vec); + load_8x8(s - 4, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + transpose_u8_8x8(&p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + filter_hev_mask4_8(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, + q2, q3, &hev, &mask); + filter4_8(mask, hev, p1, p0, q0, q1, &p1, &p0, &q0, &q1); + store_4x8(s - 2, p, p1, p0, q0, q1); +} + +void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + uint8x16_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + mask, hev; + uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + s15; + + load_thresh_8_dual(blimit0, limit0, thresh0, blimit1, limit1, thresh1, + &blimit_vec, &limit_vec, &thresh_vec); + load_8x16(s - 4, p, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, &s9, &s10, + &s11, &s12, &s13, &s14, &s15); + transpose_u8_8x16(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + s14, s15, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + filter_hev_mask4_16(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, + q2, q3, &hev, &mask); + filter4_16(mask, hev, p1, p0, q0, q1, &p1, &p0, &q0, &q1); + s -= 2; + store_4x8(s, p, vget_low_u8(p1), vget_low_u8(p0), vget_low_u8(q0), + vget_low_u8(q1)); + store_4x8(s + 8 * p, p, vget_high_u8(p1), vget_high_u8(p0), vget_high_u8(q0), + vget_high_u8(q1)); +} + +void vpx_lpf_horizontal_8_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + op2, op1, op0, oq0, oq1, oq2, mask, flat, hev; + uint32_t flat_status; + + load_thresh_8(blimit, limit, thresh, &blimit_vec, &limit_vec, &thresh_vec); + load_8x8(s - 4 * p, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + mask = filter_flat_hev_mask_8(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, + p0, q0, q1, q2, q3, &flat, &flat_status, &hev); + filter8_8(mask, flat, flat_status, hev, p3, p2, p1, p0, q0, q1, q2, q3, &op2, + &op1, &op0, &oq0, &oq1, &oq2); + store_8x6(s - 3 * p, p, op2, op1, op0, oq0, oq1, oq2); +} + +void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, + const uint8_t *thresh0, + const uint8_t *blimit1, + const uint8_t *limit1, + const uint8_t *thresh1) { + uint8x16_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + op2, op1, op0, oq0, oq1, oq2, mask, flat, hev; + uint32_t flat_status; + + load_thresh_8_dual(blimit0, limit0, thresh0, blimit1, limit1, thresh1, + &blimit_vec, &limit_vec, &thresh_vec); + load_16x8(s - 4 * p, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + mask = filter_flat_hev_mask_16(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, + p0, q0, q1, q2, q3, &flat, &flat_status, &hev); + filter8_16(mask, flat, flat_status, hev, p3, p2, p1, p0, q0, q1, q2, q3, &op2, + &op1, &op0, &oq0, &oq1, &oq2); + store_16x6(s - 3 * p, p, op2, op1, op0, oq0, oq1, oq2); +} + +void vpx_lpf_vertical_8_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + op2, op1, op0, oq0, oq1, oq2, mask, flat, hev; + uint32_t flat_status; + + load_thresh_8(blimit, limit, thresh, &blimit_vec, &limit_vec, &thresh_vec); + load_8x8(s - 4, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + transpose_u8_8x8(&p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + mask = filter_flat_hev_mask_8(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, + p0, q0, q1, q2, q3, &flat, &flat_status, &hev); + filter8_8(mask, flat, flat_status, hev, p3, p2, p1, p0, q0, q1, q2, q3, &op2, + &op1, &op0, &oq0, &oq1, &oq2); + // Note: tranpose + store_8x8() is faster than store_6x8(). + transpose_u8_8x8(&p3, &op2, &op1, &op0, &oq0, &oq1, &oq2, &q3); + store_8x8(s - 4, p, p3, op2, op1, op0, oq0, oq1, oq2, q3); +} + +void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1) { + uint8x16_t blimit_vec, limit_vec, thresh_vec, p3, p2, p1, p0, q0, q1, q2, q3, + op2, op1, op0, oq0, oq1, oq2, mask, flat, hev; + uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + s15; + uint32_t flat_status; + + load_thresh_8_dual(blimit0, limit0, thresh0, blimit1, limit1, thresh1, + &blimit_vec, &limit_vec, &thresh_vec); + load_8x16(s - 4, p, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, &s9, &s10, + &s11, &s12, &s13, &s14, &s15); + transpose_u8_8x16(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + s14, s15, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + mask = filter_flat_hev_mask_16(limit_vec, blimit_vec, thresh_vec, p3, p2, p1, + p0, q0, q1, q2, q3, &flat, &flat_status, &hev); + filter8_16(mask, flat, flat_status, hev, p3, p2, p1, p0, q0, q1, q2, q3, &op2, + &op1, &op0, &oq0, &oq1, &oq2); + // Note: store_6x8() twice is faster than tranpose + store_8x16(). + store_6x8(s, p, vget_low_u8(op2), vget_low_u8(op1), vget_low_u8(op0), + vget_low_u8(oq0), vget_low_u8(oq1), vget_low_u8(oq2)); + store_6x8(s + 8 * p, p, vget_high_u8(op2), vget_high_u8(op1), + vget_high_u8(op0), vget_high_u8(oq0), vget_high_u8(oq1), + vget_high_u8(oq2)); +} + +#define FUN_LPF_16_KERNEL(name, w) \ + static INLINE void lpf_16##name##kernel( \ + const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, \ + const uint8x##w##_t p7, const uint8x##w##_t p6, const uint8x##w##_t p5, \ + const uint8x##w##_t p4, const uint8x##w##_t p3, const uint8x##w##_t p2, \ + const uint8x##w##_t p1, const uint8x##w##_t p0, const uint8x##w##_t q0, \ + const uint8x##w##_t q1, const uint8x##w##_t q2, const uint8x##w##_t q3, \ + const uint8x##w##_t q4, const uint8x##w##_t q5, const uint8x##w##_t q6, \ + const uint8x##w##_t q7, uint8x##w##_t *op6, uint8x##w##_t *op5, \ + uint8x##w##_t *op4, uint8x##w##_t *op3, uint8x##w##_t *op2, \ + uint8x##w##_t *op1, uint8x##w##_t *op0, uint8x##w##_t *oq0, \ + uint8x##w##_t *oq1, uint8x##w##_t *oq2, uint8x##w##_t *oq3, \ + uint8x##w##_t *oq4, uint8x##w##_t *oq5, uint8x##w##_t *oq6, \ + uint32_t *flat_status, uint32_t *flat2_status) { \ + uint8x##w##_t blimit_vec, limit_vec, thresh_vec, mask, flat, flat2, hev; \ + \ + load_thresh_##w(blimit, limit, thresh, &blimit_vec, &limit_vec, \ + &thresh_vec); \ + mask = filter_flat_hev_mask_##w(limit_vec, blimit_vec, thresh_vec, p3, p2, \ + p1, p0, q0, q1, q2, q3, &flat, \ + flat_status, &hev); \ + flat2 = flat_mask5_##w(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, \ + flat2_status); \ + filter16_##w(mask, flat, *flat_status, flat2, *flat2_status, hev, p7, p6, \ + p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, op6, \ + op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, \ + oq6); \ + } + +FUN_LPF_16_KERNEL(_, 8) // lpf_16_kernel +FUN_LPF_16_KERNEL(_dual_, 16) // lpf_16_dual_kernel +#undef FUN_LPF_16_KERNEL + +void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + uint8x8_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, op6, + op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, oq6; + uint32_t flat_status, flat2_status; + + load_8x16(s - 8 * p, p, &p7, &p6, &p5, &p4, &p3, &p2, &p1, &p0, &q0, &q1, &q2, + &q3, &q4, &q5, &q6, &q7); + lpf_16_kernel(blimit, limit, thresh, p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, + q2, q3, q4, q5, q6, q7, &op6, &op5, &op4, &op3, &op2, &op1, + &op0, &oq0, &oq1, &oq2, &oq3, &oq4, &oq5, &oq6, &flat_status, + &flat2_status); + store_8x14(s, p, op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, + oq5, oq6, flat_status, flat2_status); +} + +void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + uint8x16_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, + op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, oq6; + uint32_t flat_status, flat2_status; + + load_16x8(s - 4 * p, p, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + p7 = vld1q_u8(s - 8 * p); + p6 = vld1q_u8(s - 7 * p); + p5 = vld1q_u8(s - 6 * p); + p4 = vld1q_u8(s - 5 * p); + q4 = vld1q_u8(s + 4 * p); + q5 = vld1q_u8(s + 5 * p); + q6 = vld1q_u8(s + 6 * p); + q7 = vld1q_u8(s + 7 * p); + lpf_16_dual_kernel(blimit, limit, thresh, p7, p6, p5, p4, p3, p2, p1, p0, q0, + q1, q2, q3, q4, q5, q6, q7, &op6, &op5, &op4, &op3, &op2, + &op1, &op0, &oq0, &oq1, &oq2, &oq3, &oq4, &oq5, &oq6, + &flat_status, &flat2_status); + store_16x14(s, p, op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, + oq5, oq6, flat_status, flat2_status); +} + +void vpx_lpf_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8x8_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, op6, + op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, oq6; + uint8x16_t s0, s1, s2, s3, s4, s5, s6, s7; + uint32_t flat_status, flat2_status; + + s -= 8; + load_16x8(s, p, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7); + transpose_u8_16x8(s0, s1, s2, s3, s4, s5, s6, s7, &p7, &p6, &p5, &p4, &p3, + &p2, &p1, &p0, &q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7); + lpf_16_kernel(blimit, limit, thresh, p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, + q2, q3, q4, q5, q6, q7, &op6, &op5, &op4, &op3, &op2, &op1, + &op0, &oq0, &oq1, &oq2, &oq3, &oq4, &oq5, &oq6, &flat_status, + &flat2_status); + if (flat_status) { + if (flat2_status) { + transpose_u8_8x16(p7, op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, + oq3, oq4, oq5, oq6, q7, &s0, &s1, &s2, &s3, &s4, &s5, + &s6, &s7); + store_16x8(s, p, s0, s1, s2, s3, s4, s5, s6, s7); + } else { + // Note: tranpose + store_8x8() is faster than store_6x8(). + transpose_u8_8x8(&p3, &op2, &op1, &op0, &oq0, &oq1, &oq2, &q3); + store_8x8(s + 4, p, p3, op2, op1, op0, oq0, oq1, oq2, q3); + } + } else { + store_4x8(s + 6, p, op1, op0, oq0, oq1); + } +} + +void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + uint8x16_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7, + op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, oq3, oq4, oq5, oq6; + uint8x16_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, + s15; + uint32_t flat_status, flat2_status; + + s -= 8; + load_16x16(s, p, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8, &s9, &s10, &s11, + &s12, &s13, &s14, &s15); + transpose_u8_16x16(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, + s14, s15, &p7, &p6, &p5, &p4, &p3, &p2, &p1, &p0, &q0, &q1, + &q2, &q3, &q4, &q5, &q6, &q7); + lpf_16_dual_kernel(blimit, limit, thresh, p7, p6, p5, p4, p3, p2, p1, p0, q0, + q1, q2, q3, q4, q5, q6, q7, &op6, &op5, &op4, &op3, &op2, + &op1, &op0, &oq0, &oq1, &oq2, &oq3, &oq4, &oq5, &oq6, + &flat_status, &flat2_status); + if (flat_status) { + if (flat2_status) { + transpose_u8_16x16(p7, op6, op5, op4, op3, op2, op1, op0, oq0, oq1, oq2, + oq3, oq4, oq5, oq6, q7, &s0, &s1, &s2, &s3, &s4, &s5, + &s6, &s7, &s8, &s9, &s10, &s11, &s12, &s13, &s14, + &s15); + store_16x16(s, p, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, + s13, s14, s15); + } else { + // Note: store_6x8() twice is faster than tranpose + store_8x16(). + s += 8; + store_6x8(s, p, vget_low_u8(op2), vget_low_u8(op1), vget_low_u8(op0), + vget_low_u8(oq0), vget_low_u8(oq1), vget_low_u8(oq2)); + store_6x8(s + 8 * p, p, vget_high_u8(op2), vget_high_u8(op1), + vget_high_u8(op0), vget_high_u8(oq0), vget_high_u8(oq1), + vget_high_u8(oq2)); + } + } else { + s += 6; + store_4x8(s, p, vget_low_u8(op1), vget_low_u8(op0), vget_low_u8(oq0), + vget_low_u8(oq1)); + store_4x8(s + 8 * p, p, vget_high_u8(op1), vget_high_u8(op0), + vget_high_u8(oq0), vget_high_u8(oq1)); + } +} diff --git a/libs/libvpx/vpx_dsp/arm/loopfilter_neon.c b/libs/libvpx/vpx_dsp/arm/loopfilter_neon.c index eff87d29bd..ced5aef0ab 100644 --- a/libs/libvpx/vpx_dsp/arm/loopfilter_neon.c +++ b/libs/libvpx/vpx_dsp/arm/loopfilter_neon.c @@ -14,45 +14,10 @@ #include "./vpx_config.h" #include "vpx/vpx_integer.h" -void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, +void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); + vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0); + vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1); } - -#if HAVE_NEON_ASM -void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1) { - vpx_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1); -} - -void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1) { - vpx_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); -} - -void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh) { - vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh); - vpx_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh); -} -#endif // HAVE_NEON_ASM diff --git a/libs/libvpx/vpx_dsp/arm/sad4d_neon.c b/libs/libvpx/vpx_dsp/arm/sad4d_neon.c index c7704dc1be..dc20398000 100644 --- a/libs/libvpx/vpx_dsp/arm/sad4d_neon.c +++ b/libs/libvpx/vpx_dsp/arm/sad4d_neon.c @@ -16,10 +16,10 @@ static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, const uint16x8_t vec_hi) { - const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo), - vget_high_u16(vec_lo)); - const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi), - vget_high_u16(vec_hi)); + const uint32x4_t vec_l_lo = + vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo)); + const uint32x4_t vec_l_hi = + vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi)); const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi); const uint64x2_t b = vpaddlq_u32(a); const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), @@ -33,8 +33,7 @@ static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, static void sad_neon_64(const uint8x16_t vec_src_00, const uint8x16_t vec_src_16, const uint8x16_t vec_src_32, - const uint8x16_t vec_src_48, - const uint8_t *ref, + const uint8x16_t vec_src_48, const uint8_t *ref, uint16x8_t *vec_sum_ref_lo, uint16x8_t *vec_sum_ref_hi) { const uint8x16_t vec_ref_00 = vld1q_u8(ref); @@ -63,8 +62,7 @@ static void sad_neon_64(const uint8x16_t vec_src_00, // Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16, // and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi. static void sad_neon_32(const uint8x16_t vec_src_00, - const uint8x16_t vec_src_16, - const uint8_t *ref, + const uint8x16_t vec_src_16, const uint8_t *ref, uint16x8_t *vec_sum_ref_lo, uint16x8_t *vec_sum_ref_hi) { const uint8x16_t vec_ref_00 = vld1q_u8(ref); @@ -81,7 +79,7 @@ static void sad_neon_32(const uint8x16_t vec_src_00, } void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, - const uint8_t* const ref[4], int ref_stride, + const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); @@ -127,7 +125,7 @@ void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, } void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, - const uint8_t* const ref[4], int ref_stride, + const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); @@ -148,14 +146,14 @@ void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, const uint8x16_t vec_src_00 = vld1q_u8(src); const uint8x16_t vec_src_16 = vld1q_u8(src + 16); - sad_neon_32(vec_src_00, vec_src_16, ref0, - &vec_sum_ref0_lo, &vec_sum_ref0_hi); - sad_neon_32(vec_src_00, vec_src_16, ref1, - &vec_sum_ref1_lo, &vec_sum_ref1_hi); - sad_neon_32(vec_src_00, vec_src_16, ref2, - &vec_sum_ref2_lo, &vec_sum_ref2_hi); - sad_neon_32(vec_src_00, vec_src_16, ref3, - &vec_sum_ref3_lo, &vec_sum_ref3_hi); + sad_neon_32(vec_src_00, vec_src_16, ref0, &vec_sum_ref0_lo, + &vec_sum_ref0_hi); + sad_neon_32(vec_src_00, vec_src_16, ref1, &vec_sum_ref1_lo, + &vec_sum_ref1_hi); + sad_neon_32(vec_src_00, vec_src_16, ref2, &vec_sum_ref2_lo, + &vec_sum_ref2_hi); + sad_neon_32(vec_src_00, vec_src_16, ref3, &vec_sum_ref3_lo, + &vec_sum_ref3_hi); src += src_stride; ref0 += ref_stride; @@ -171,7 +169,7 @@ void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, } void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride, - const uint8_t* const ref[4], int ref_stride, + const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0); @@ -195,20 +193,20 @@ void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride, const uint8x16_t vec_ref2 = vld1q_u8(ref2); const uint8x16_t vec_ref3 = vld1q_u8(ref3); - vec_sum_ref0_lo = vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src), - vget_low_u8(vec_ref0)); + vec_sum_ref0_lo = + vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref0)); vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref0)); - vec_sum_ref1_lo = vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src), - vget_low_u8(vec_ref1)); + vec_sum_ref1_lo = + vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref1)); vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref1)); - vec_sum_ref2_lo = vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src), - vget_low_u8(vec_ref2)); + vec_sum_ref2_lo = + vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref2)); vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref2)); - vec_sum_ref3_lo = vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src), - vget_low_u8(vec_ref3)); + vec_sum_ref3_lo = + vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref3)); vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref3)); diff --git a/libs/libvpx/vpx_dsp/arm/sad_media.asm b/libs/libvpx/vpx_dsp/arm/sad_media.asm deleted file mode 100644 index aed1d3a22e..0000000000 --- a/libs/libvpx/vpx_dsp/arm/sad_media.asm +++ /dev/null @@ -1,95 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_sad16x16_media| - - ARM - REQUIRE8 - PRESERVE8 - - AREA ||.text||, CODE, READONLY, ALIGN=2 - -; r0 const unsigned char *src_ptr -; r1 int src_stride -; r2 const unsigned char *ref_ptr -; r3 int ref_stride -|vpx_sad16x16_media| PROC - stmfd sp!, {r4-r12, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - pld [r0, r1, lsl #1] - pld [r2, r3, lsl #1] - - mov r4, #0 ; sad = 0; - mov r5, #8 ; loop count - -loop - ; 1st row - ldr r6, [r0, #0x0] ; load 4 src pixels (1A) - ldr r8, [r2, #0x0] ; load 4 ref pixels (1A) - ldr r7, [r0, #0x4] ; load 4 src pixels (1A) - ldr r9, [r2, #0x4] ; load 4 ref pixels (1A) - ldr r10, [r0, #0x8] ; load 4 src pixels (1B) - ldr r11, [r0, #0xC] ; load 4 src pixels (1B) - - usada8 r4, r8, r6, r4 ; calculate sad for 4 pixels - usad8 r8, r7, r9 ; calculate sad for 4 pixels - - ldr r12, [r2, #0x8] ; load 4 ref pixels (1B) - ldr lr, [r2, #0xC] ; load 4 ref pixels (1B) - - add r0, r0, r1 ; set src pointer to next row - add r2, r2, r3 ; set dst pointer to next row - - pld [r0, r1, lsl #1] - pld [r2, r3, lsl #1] - - usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels - usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels - - ldr r6, [r0, #0x0] ; load 4 src pixels (2A) - ldr r7, [r0, #0x4] ; load 4 src pixels (2A) - add r4, r4, r8 ; add partial sad values - - ; 2nd row - ldr r8, [r2, #0x0] ; load 4 ref pixels (2A) - ldr r9, [r2, #0x4] ; load 4 ref pixels (2A) - ldr r10, [r0, #0x8] ; load 4 src pixels (2B) - ldr r11, [r0, #0xC] ; load 4 src pixels (2B) - - usada8 r4, r6, r8, r4 ; calculate sad for 4 pixels - usad8 r8, r7, r9 ; calculate sad for 4 pixels - - ldr r12, [r2, #0x8] ; load 4 ref pixels (2B) - ldr lr, [r2, #0xC] ; load 4 ref pixels (2B) - - add r0, r0, r1 ; set src pointer to next row - add r2, r2, r3 ; set dst pointer to next row - - usada8 r4, r10, r12, r4 ; calculate sad for 4 pixels - usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels - - pld [r0, r1, lsl #1] - pld [r2, r3, lsl #1] - - subs r5, r5, #1 ; decrement loop counter - add r4, r4, r8 ; add partial sad values - - bne loop - - mov r0, r4 ; return sad - ldmfd sp!, {r4-r12, pc} - - ENDP - - END - diff --git a/libs/libvpx/vpx_dsp/arm/sad_neon.c b/libs/libvpx/vpx_dsp/arm/sad_neon.c index 173f08ac3c..ff3228768c 100644 --- a/libs/libvpx/vpx_dsp/arm/sad_neon.c +++ b/libs/libvpx/vpx_dsp/arm/sad_neon.c @@ -14,114 +14,105 @@ #include "vpx/vpx_integer.h" -unsigned int vpx_sad8x16_neon( - unsigned char *src_ptr, - int src_stride, - unsigned char *ref_ptr, - int ref_stride) { - uint8x8_t d0, d8; - uint16x8_t q12; - uint32x4_t q1; - uint64x2_t q3; - uint32x2_t d5; - int i; +unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride, + unsigned char *ref_ptr, int ref_stride) { + uint8x8_t d0, d8; + uint16x8_t q12; + uint32x4_t q1; + uint64x2_t q3; + uint32x2_t d5; + int i; + d0 = vld1_u8(src_ptr); + src_ptr += src_stride; + d8 = vld1_u8(ref_ptr); + ref_ptr += ref_stride; + q12 = vabdl_u8(d0, d8); + + for (i = 0; i < 15; i++) { d0 = vld1_u8(src_ptr); src_ptr += src_stride; d8 = vld1_u8(ref_ptr); ref_ptr += ref_stride; - q12 = vabdl_u8(d0, d8); + q12 = vabal_u8(q12, d0, d8); + } - for (i = 0; i < 15; i++) { - d0 = vld1_u8(src_ptr); - src_ptr += src_stride; - d8 = vld1_u8(ref_ptr); - ref_ptr += ref_stride; - q12 = vabal_u8(q12, d0, d8); - } + q1 = vpaddlq_u16(q12); + q3 = vpaddlq_u32(q1); + d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), + vreinterpret_u32_u64(vget_high_u64(q3))); - q1 = vpaddlq_u16(q12); - q3 = vpaddlq_u32(q1); - d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), - vreinterpret_u32_u64(vget_high_u64(q3))); - - return vget_lane_u32(d5, 0); + return vget_lane_u32(d5, 0); } -unsigned int vpx_sad4x4_neon( - unsigned char *src_ptr, - int src_stride, - unsigned char *ref_ptr, - int ref_stride) { - uint8x8_t d0, d8; - uint16x8_t q12; - uint32x2_t d1; - uint64x1_t d3; - int i; +unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride, + unsigned char *ref_ptr, int ref_stride) { + uint8x8_t d0, d8; + uint16x8_t q12; + uint32x2_t d1; + uint64x1_t d3; + int i; + d0 = vld1_u8(src_ptr); + src_ptr += src_stride; + d8 = vld1_u8(ref_ptr); + ref_ptr += ref_stride; + q12 = vabdl_u8(d0, d8); + + for (i = 0; i < 3; i++) { d0 = vld1_u8(src_ptr); src_ptr += src_stride; d8 = vld1_u8(ref_ptr); ref_ptr += ref_stride; - q12 = vabdl_u8(d0, d8); + q12 = vabal_u8(q12, d0, d8); + } - for (i = 0; i < 3; i++) { - d0 = vld1_u8(src_ptr); - src_ptr += src_stride; - d8 = vld1_u8(ref_ptr); - ref_ptr += ref_stride; - q12 = vabal_u8(q12, d0, d8); - } + d1 = vpaddl_u16(vget_low_u16(q12)); + d3 = vpaddl_u32(d1); - d1 = vpaddl_u16(vget_low_u16(q12)); - d3 = vpaddl_u32(d1); - - return vget_lane_u32(vreinterpret_u32_u64(d3), 0); + return vget_lane_u32(vreinterpret_u32_u64(d3), 0); } -unsigned int vpx_sad16x8_neon( - unsigned char *src_ptr, - int src_stride, - unsigned char *ref_ptr, - int ref_stride) { - uint8x16_t q0, q4; - uint16x8_t q12, q13; - uint32x4_t q1; - uint64x2_t q3; - uint32x2_t d5; - int i; +unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride, + unsigned char *ref_ptr, int ref_stride) { + uint8x16_t q0, q4; + uint16x8_t q12, q13; + uint32x4_t q1; + uint64x2_t q3; + uint32x2_t d5; + int i; + q0 = vld1q_u8(src_ptr); + src_ptr += src_stride; + q4 = vld1q_u8(ref_ptr); + ref_ptr += ref_stride; + q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4)); + q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4)); + + for (i = 0; i < 7; i++) { q0 = vld1q_u8(src_ptr); src_ptr += src_stride; q4 = vld1q_u8(ref_ptr); ref_ptr += ref_stride; - q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4)); - q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4)); + q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4)); + q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4)); + } - for (i = 0; i < 7; i++) { - q0 = vld1q_u8(src_ptr); - src_ptr += src_stride; - q4 = vld1q_u8(ref_ptr); - ref_ptr += ref_stride; - q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4)); - q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4)); - } + q12 = vaddq_u16(q12, q13); + q1 = vpaddlq_u16(q12); + q3 = vpaddlq_u32(q1); + d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), + vreinterpret_u32_u64(vget_high_u64(q3))); - q12 = vaddq_u16(q12, q13); - q1 = vpaddlq_u16(q12); - q3 = vpaddlq_u32(q1); - d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), - vreinterpret_u32_u64(vget_high_u64(q3))); - - return vget_lane_u32(d5, 0); + return vget_lane_u32(d5, 0); } static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, const uint16x8_t vec_hi) { - const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo), - vget_high_u16(vec_lo)); - const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi), - vget_high_u16(vec_hi)); + const uint32x4_t vec_l_lo = + vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo)); + const uint32x4_t vec_l_hi = + vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi)); const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi); const uint64x2_t b = vpaddlq_u32(a); const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), @@ -208,10 +199,10 @@ unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride, const uint8x16_t vec_ref = vld1q_u8(ref); src += src_stride; ref += ref_stride; - vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src), - vget_low_u8(vec_ref)); - vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src), - vget_high_u8(vec_ref)); + vec_accum_lo = + vabal_u8(vec_accum_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref)); + vec_accum_hi = + vabal_u8(vec_accum_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref)); } return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi)); } diff --git a/libs/libvpx/vpx_dsp/arm/save_reg_neon.asm b/libs/libvpx/vpx_dsp/arm/save_reg_neon.asm index c9ca10801d..9811cd5a5a 100644 --- a/libs/libvpx/vpx_dsp/arm/save_reg_neon.asm +++ b/libs/libvpx/vpx_dsp/arm/save_reg_neon.asm @@ -19,15 +19,13 @@ AREA ||.text||, CODE, READONLY, ALIGN=2 |vpx_push_neon| PROC - vst1.i64 {d8, d9, d10, d11}, [r0]! - vst1.i64 {d12, d13, d14, d15}, [r0]! + vstm r0!, {d8-d15} bx lr ENDP |vpx_pop_neon| PROC - vld1.i64 {d8, d9, d10, d11}, [r0]! - vld1.i64 {d12, d13, d14, d15}, [r0]! + vldm r0!, {d8-d15} bx lr ENDP diff --git a/libs/libvpx/vpx_dsp/arm/subpel_variance_media.c b/libs/libvpx/vpx_dsp/arm/subpel_variance_media.c deleted file mode 100644 index e7d8c85fb5..0000000000 --- a/libs/libvpx/vpx_dsp/arm/subpel_variance_media.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "vpx/vpx_integer.h" -#include "vpx_ports/mem.h" - -#if HAVE_MEDIA -static const int16_t bilinear_filters_media[8][2] = { - { 128, 0 }, - { 112, 16 }, - { 96, 32 }, - { 80, 48 }, - { 64, 64 }, - { 48, 80 }, - { 32, 96 }, - { 16, 112 } -}; - -extern void vpx_filter_block2d_bil_first_pass_media(const uint8_t *src_ptr, - uint16_t *dst_ptr, - uint32_t src_pitch, - uint32_t height, - uint32_t width, - const int16_t *filter); - -extern void vpx_filter_block2d_bil_second_pass_media(const uint16_t *src_ptr, - uint8_t *dst_ptr, - int32_t src_pitch, - uint32_t height, - uint32_t width, - const int16_t *filter); - - -unsigned int vpx_sub_pixel_variance8x8_media(const uint8_t *src_ptr, - int src_pixels_per_line, - int xoffset, int yoffset, - const uint8_t *dst_ptr, - int dst_pixels_per_line, - unsigned int *sse) { - uint16_t first_pass[10*8]; - uint8_t second_pass[8*8]; - const int16_t *HFilter, *VFilter; - - HFilter = bilinear_filters_media[xoffset]; - VFilter = bilinear_filters_media[yoffset]; - - vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass, - src_pixels_per_line, - 9, 8, HFilter); - vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, - 8, 8, 8, VFilter); - - return vpx_variance8x8_media(second_pass, 8, dst_ptr, - dst_pixels_per_line, sse); -} - -unsigned int vpx_sub_pixel_variance16x16_media(const uint8_t *src_ptr, - int src_pixels_per_line, - int xoffset, - int yoffset, - const uint8_t *dst_ptr, - int dst_pixels_per_line, - unsigned int *sse) { - uint16_t first_pass[36*16]; - uint8_t second_pass[20*16]; - const int16_t *HFilter, *VFilter; - unsigned int var; - - if (xoffset == 4 && yoffset == 0) { - var = vpx_variance_halfpixvar16x16_h_media(src_ptr, src_pixels_per_line, - dst_ptr, dst_pixels_per_line, - sse); - } else if (xoffset == 0 && yoffset == 4) { - var = vpx_variance_halfpixvar16x16_v_media(src_ptr, src_pixels_per_line, - dst_ptr, dst_pixels_per_line, - sse); - } else if (xoffset == 4 && yoffset == 4) { - var = vpx_variance_halfpixvar16x16_hv_media(src_ptr, src_pixels_per_line, - dst_ptr, dst_pixels_per_line, - sse); - } else { - HFilter = bilinear_filters_media[xoffset]; - VFilter = bilinear_filters_media[yoffset]; - - vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass, - src_pixels_per_line, - 17, 16, HFilter); - vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, - 16, 16, 16, VFilter); - - var = vpx_variance16x16_media(second_pass, 16, dst_ptr, - dst_pixels_per_line, sse); - } - return var; -} -#endif // HAVE_MEDIA diff --git a/libs/libvpx/vpx_dsp/arm/subpel_variance_neon.c b/libs/libvpx/vpx_dsp/arm/subpel_variance_neon.c index 40e2cc89b3..f044e11a15 100644 --- a/libs/libvpx/vpx_dsp/arm/subpel_variance_neon.c +++ b/libs/libvpx/vpx_dsp/arm/subpel_variance_neon.c @@ -18,14 +18,8 @@ #include "vpx_dsp/variance.h" static const uint8_t bilinear_filters[8][2] = { - { 128, 0, }, - { 112, 16, }, - { 96, 32, }, - { 80, 48, }, - { 64, 64, }, - { 48, 80, }, - { 32, 96, }, - { 16, 112, }, + { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 }, + { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 }, }; static void var_filter_block2d_bil_w8(const uint8_t *src_ptr, @@ -79,74 +73,61 @@ static void var_filter_block2d_bil_w16(const uint8_t *src_ptr, } } -unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, - int src_stride, - int xoffset, - int yoffset, - const uint8_t *dst, - int dst_stride, +unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride, + int xoffset, int yoffset, + const uint8_t *dst, int dst_stride, unsigned int *sse) { DECLARE_ALIGNED(16, uint8_t, temp2[8 * 8]); DECLARE_ALIGNED(16, uint8_t, fdata3[9 * 8]); - var_filter_block2d_bil_w8(src, fdata3, src_stride, 1, - 9, 8, + var_filter_block2d_bil_w8(src, fdata3, src_stride, 1, 9, 8, bilinear_filters[xoffset]); - var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, - 8, bilinear_filters[yoffset]); + var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8, + bilinear_filters[yoffset]); return vpx_variance8x8_neon(temp2, 8, dst, dst_stride, sse); } unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src, - int src_stride, - int xoffset, - int yoffset, - const uint8_t *dst, + int src_stride, int xoffset, + int yoffset, const uint8_t *dst, int dst_stride, unsigned int *sse) { DECLARE_ALIGNED(16, uint8_t, temp2[16 * 16]); DECLARE_ALIGNED(16, uint8_t, fdata3[17 * 16]); - var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, - 17, 16, + var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 17, 16, bilinear_filters[xoffset]); - var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, - 16, bilinear_filters[yoffset]); + var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16, + bilinear_filters[yoffset]); return vpx_variance16x16_neon(temp2, 16, dst, dst_stride, sse); } unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src, - int src_stride, - int xoffset, - int yoffset, - const uint8_t *dst, + int src_stride, int xoffset, + int yoffset, const uint8_t *dst, int dst_stride, unsigned int *sse) { DECLARE_ALIGNED(16, uint8_t, temp2[32 * 32]); DECLARE_ALIGNED(16, uint8_t, fdata3[33 * 32]); - var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, - 33, 32, + var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 33, 32, bilinear_filters[xoffset]); - var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, - 32, bilinear_filters[yoffset]); + var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32, + bilinear_filters[yoffset]); return vpx_variance32x32_neon(temp2, 32, dst, dst_stride, sse); } unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src, - int src_stride, - int xoffset, - int yoffset, - const uint8_t *dst, + int src_stride, int xoffset, + int yoffset, const uint8_t *dst, int dst_stride, unsigned int *sse) { DECLARE_ALIGNED(16, uint8_t, temp2[64 * 64]); DECLARE_ALIGNED(16, uint8_t, fdata3[65 * 64]); - var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, - 65, 64, + var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 65, 64, bilinear_filters[xoffset]); - var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, - 64, bilinear_filters[yoffset]); + var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64, + bilinear_filters[yoffset]); return vpx_variance64x64_neon(temp2, 64, dst, dst_stride, sse); } diff --git a/libs/libvpx/vpx_dsp/arm/subtract_neon.c b/libs/libvpx/vpx_dsp/arm/subtract_neon.c index 7b146095ea..ce81fb630f 100644 --- a/libs/libvpx/vpx_dsp/arm/subtract_neon.c +++ b/libs/libvpx/vpx_dsp/arm/subtract_neon.c @@ -13,10 +13,10 @@ #include "./vpx_config.h" #include "vpx/vpx_integer.h" -void vpx_subtract_block_neon(int rows, int cols, - int16_t *diff, ptrdiff_t diff_stride, - const uint8_t *src, ptrdiff_t src_stride, - const uint8_t *pred, ptrdiff_t pred_stride) { +void vpx_subtract_block_neon(int rows, int cols, int16_t *diff, + ptrdiff_t diff_stride, const uint8_t *src, + ptrdiff_t src_stride, const uint8_t *pred, + ptrdiff_t pred_stride) { int r, c; if (cols > 16) { @@ -24,38 +24,38 @@ void vpx_subtract_block_neon(int rows, int cols, for (c = 0; c < cols; c += 32) { const uint8x16_t v_src_00 = vld1q_u8(&src[c + 0]); const uint8x16_t v_src_16 = vld1q_u8(&src[c + 16]); - const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]); + const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]); const uint8x16_t v_pred_16 = vld1q_u8(&pred[c + 16]); - const uint16x8_t v_diff_lo_00 = vsubl_u8(vget_low_u8(v_src_00), - vget_low_u8(v_pred_00)); - const uint16x8_t v_diff_hi_00 = vsubl_u8(vget_high_u8(v_src_00), - vget_high_u8(v_pred_00)); - const uint16x8_t v_diff_lo_16 = vsubl_u8(vget_low_u8(v_src_16), - vget_low_u8(v_pred_16)); - const uint16x8_t v_diff_hi_16 = vsubl_u8(vget_high_u8(v_src_16), - vget_high_u8(v_pred_16)); - vst1q_s16(&diff[c + 0], vreinterpretq_s16_u16(v_diff_lo_00)); - vst1q_s16(&diff[c + 8], vreinterpretq_s16_u16(v_diff_hi_00)); + const uint16x8_t v_diff_lo_00 = + vsubl_u8(vget_low_u8(v_src_00), vget_low_u8(v_pred_00)); + const uint16x8_t v_diff_hi_00 = + vsubl_u8(vget_high_u8(v_src_00), vget_high_u8(v_pred_00)); + const uint16x8_t v_diff_lo_16 = + vsubl_u8(vget_low_u8(v_src_16), vget_low_u8(v_pred_16)); + const uint16x8_t v_diff_hi_16 = + vsubl_u8(vget_high_u8(v_src_16), vget_high_u8(v_pred_16)); + vst1q_s16(&diff[c + 0], vreinterpretq_s16_u16(v_diff_lo_00)); + vst1q_s16(&diff[c + 8], vreinterpretq_s16_u16(v_diff_hi_00)); vst1q_s16(&diff[c + 16], vreinterpretq_s16_u16(v_diff_lo_16)); vst1q_s16(&diff[c + 24], vreinterpretq_s16_u16(v_diff_hi_16)); } diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } else if (cols > 8) { for (r = 0; r < rows; ++r) { const uint8x16_t v_src = vld1q_u8(&src[0]); const uint8x16_t v_pred = vld1q_u8(&pred[0]); - const uint16x8_t v_diff_lo = vsubl_u8(vget_low_u8(v_src), - vget_low_u8(v_pred)); - const uint16x8_t v_diff_hi = vsubl_u8(vget_high_u8(v_src), - vget_high_u8(v_pred)); + const uint16x8_t v_diff_lo = + vsubl_u8(vget_low_u8(v_src), vget_low_u8(v_pred)); + const uint16x8_t v_diff_hi = + vsubl_u8(vget_high_u8(v_src), vget_high_u8(v_pred)); vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff_lo)); vst1q_s16(&diff[8], vreinterpretq_s16_u16(v_diff_hi)); diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } else if (cols > 4) { for (r = 0; r < rows; ++r) { @@ -65,16 +65,15 @@ void vpx_subtract_block_neon(int rows, int cols, vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff)); diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } else { for (r = 0; r < rows; ++r) { - for (c = 0; c < cols; ++c) - diff[c] = src[c] - pred[c]; + for (c = 0; c < cols; ++c) diff[c] = src[c] - pred[c]; diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } } diff --git a/libs/libvpx/vpx_dsp/arm/transpose_neon.h b/libs/libvpx/vpx_dsp/arm/transpose_neon.h new file mode 100644 index 0000000000..3d0b41f930 --- /dev/null +++ b/libs/libvpx/vpx_dsp/arm/transpose_neon.h @@ -0,0 +1,566 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VPX_DSP_ARM_TRANSPOSE_NEON_H_ +#define VPX_DSP_ARM_TRANSPOSE_NEON_H_ + +#include + +#include "./vpx_config.h" + +// Transpose 64 bit elements as follows: +// a0: 00 01 02 03 04 05 06 07 +// a1: 16 17 18 19 20 21 22 23 +// +// b0.val[0]: 00 01 02 03 16 17 18 19 +// b0.val[1]: 04 05 06 07 20 21 22 23 +static INLINE int16x8x2_t vpx_vtrnq_s64(int32x4_t a0, int32x4_t a1) { + int16x8x2_t b0; + b0.val[0] = vcombine_s16(vreinterpret_s16_s32(vget_low_s32(a0)), + vreinterpret_s16_s32(vget_low_s32(a1))); + b0.val[1] = vcombine_s16(vreinterpret_s16_s32(vget_high_s32(a0)), + vreinterpret_s16_s32(vget_high_s32(a1))); + return b0; +} + +static INLINE uint8x16x2_t vpx_vtrnq_u64(uint32x4_t a0, uint32x4_t a1) { + uint8x16x2_t b0; + b0.val[0] = vcombine_u8(vreinterpret_u8_u32(vget_low_u32(a0)), + vreinterpret_u8_u32(vget_low_u32(a1))); + b0.val[1] = vcombine_u8(vreinterpret_u8_u32(vget_high_u32(a0)), + vreinterpret_u8_u32(vget_high_u32(a1))); + return b0; +} + +// Note: Using 'd' registers or 'q' registers has almost identical speed. We use +// 'q' registers here to save some instructions. +static INLINE void transpose_u8_8x8(uint8x8_t *a0, uint8x8_t *a1, uint8x8_t *a2, + uint8x8_t *a3, uint8x8_t *a4, uint8x8_t *a5, + uint8x8_t *a6, uint8x8_t *a7) { + // Swap 8 bit elements. Goes from: + // a0: 00 01 02 03 04 05 06 07 + // a1: 10 11 12 13 14 15 16 17 + // a2: 20 21 22 23 24 25 26 27 + // a3: 30 31 32 33 34 35 36 37 + // a4: 40 41 42 43 44 45 46 47 + // a5: 50 51 52 53 54 55 56 57 + // a6: 60 61 62 63 64 65 66 67 + // a7: 70 71 72 73 74 75 76 77 + // to: + // b0.val[0]: 00 10 02 12 04 14 06 16 40 50 42 52 44 54 46 56 + // b0.val[1]: 01 11 03 13 05 15 07 17 41 51 43 53 45 55 47 57 + // b1.val[0]: 20 30 22 32 24 34 26 36 60 70 62 72 64 74 66 76 + // b1.val[1]: 21 31 23 33 25 35 27 37 61 71 63 73 65 75 67 77 + + const uint8x16x2_t b0 = + vtrnq_u8(vcombine_u8(*a0, *a4), vcombine_u8(*a1, *a5)); + const uint8x16x2_t b1 = + vtrnq_u8(vcombine_u8(*a2, *a6), vcombine_u8(*a3, *a7)); + + // Swap 16 bit elements resulting in: + // c0.val[0]: 00 10 20 30 04 14 24 34 40 50 60 70 44 54 64 74 + // c0.val[1]: 02 12 22 32 06 16 26 36 42 52 62 72 46 56 66 76 + // c1.val[0]: 01 11 21 31 05 15 25 35 41 51 61 71 45 55 65 75 + // c1.val[1]: 03 13 23 33 07 17 27 37 43 53 63 73 47 57 67 77 + + const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), + vreinterpretq_u16_u8(b1.val[0])); + const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), + vreinterpretq_u16_u8(b1.val[1])); + + // Unzip 32 bit elements resulting in: + // d0.val[0]: 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71 + // d0.val[1]: 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75 + // d1.val[0]: 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73 + // d1.val[1]: 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77 + const uint32x4x2_t d0 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[0]), + vreinterpretq_u32_u16(c1.val[0])); + const uint32x4x2_t d1 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[1]), + vreinterpretq_u32_u16(c1.val[1])); + + *a0 = vreinterpret_u8_u32(vget_low_u32(d0.val[0])); + *a1 = vreinterpret_u8_u32(vget_high_u32(d0.val[0])); + *a2 = vreinterpret_u8_u32(vget_low_u32(d1.val[0])); + *a3 = vreinterpret_u8_u32(vget_high_u32(d1.val[0])); + *a4 = vreinterpret_u8_u32(vget_low_u32(d0.val[1])); + *a5 = vreinterpret_u8_u32(vget_high_u32(d0.val[1])); + *a6 = vreinterpret_u8_u32(vget_low_u32(d1.val[1])); + *a7 = vreinterpret_u8_u32(vget_high_u32(d1.val[1])); +} + +static INLINE void transpose_s16_8x8(int16x8_t *a0, int16x8_t *a1, + int16x8_t *a2, int16x8_t *a3, + int16x8_t *a4, int16x8_t *a5, + int16x8_t *a6, int16x8_t *a7) { + // Swap 16 bit elements. Goes from: + // a0: 00 01 02 03 04 05 06 07 + // a1: 10 11 12 13 14 15 16 17 + // a2: 20 21 22 23 24 25 26 27 + // a3: 30 31 32 33 34 35 36 37 + // a4: 40 41 42 43 44 45 46 47 + // a5: 50 51 52 53 54 55 56 57 + // a6: 60 61 62 63 64 65 66 67 + // a7: 70 71 72 73 74 75 76 77 + // to: + // b0.val[0]: 00 10 02 12 04 14 06 16 + // b0.val[1]: 01 11 03 13 05 15 07 17 + // b1.val[0]: 20 30 22 32 24 34 26 36 + // b1.val[1]: 21 31 23 33 25 35 27 37 + // b2.val[0]: 40 50 42 52 44 54 46 56 + // b2.val[1]: 41 51 43 53 45 55 47 57 + // b3.val[0]: 60 70 62 72 64 74 66 76 + // b3.val[1]: 61 71 63 73 65 75 67 77 + + const int16x8x2_t b0 = vtrnq_s16(*a0, *a1); + const int16x8x2_t b1 = vtrnq_s16(*a2, *a3); + const int16x8x2_t b2 = vtrnq_s16(*a4, *a5); + const int16x8x2_t b3 = vtrnq_s16(*a6, *a7); + + // Swap 32 bit elements resulting in: + // c0.val[0]: 00 10 20 30 04 14 24 34 + // c0.val[1]: 02 12 22 32 06 16 26 36 + // c1.val[0]: 01 11 21 31 05 15 25 35 + // c1.val[1]: 03 13 23 33 07 17 27 37 + // c2.val[0]: 40 50 60 70 44 54 64 74 + // c2.val[1]: 42 52 62 72 46 56 66 76 + // c3.val[0]: 41 51 61 71 45 55 65 75 + // c3.val[1]: 43 53 63 73 47 57 67 77 + + const int32x4x2_t c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]), + vreinterpretq_s32_s16(b1.val[0])); + const int32x4x2_t c1 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[1]), + vreinterpretq_s32_s16(b1.val[1])); + const int32x4x2_t c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]), + vreinterpretq_s32_s16(b3.val[0])); + const int32x4x2_t c3 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[1]), + vreinterpretq_s32_s16(b3.val[1])); + + // Swap 64 bit elements resulting in: + // d0.val[0]: 00 10 20 30 40 50 60 70 + // d0.val[1]: 04 14 24 34 44 54 64 74 + // d1.val[0]: 01 11 21 31 41 51 61 71 + // d1.val[1]: 05 15 25 35 45 55 65 75 + // d2.val[0]: 02 12 22 32 42 52 62 72 + // d2.val[1]: 06 16 26 36 46 56 66 76 + // d3.val[0]: 03 13 23 33 43 53 63 73 + // d3.val[1]: 07 17 27 37 47 57 67 77 + const int16x8x2_t d0 = vpx_vtrnq_s64(c0.val[0], c2.val[0]); + const int16x8x2_t d1 = vpx_vtrnq_s64(c1.val[0], c3.val[0]); + const int16x8x2_t d2 = vpx_vtrnq_s64(c0.val[1], c2.val[1]); + const int16x8x2_t d3 = vpx_vtrnq_s64(c1.val[1], c3.val[1]); + + *a0 = d0.val[0]; + *a1 = d1.val[0]; + *a2 = d2.val[0]; + *a3 = d3.val[0]; + *a4 = d0.val[1]; + *a5 = d1.val[1]; + *a6 = d2.val[1]; + *a7 = d3.val[1]; +} + +static INLINE void transpose_u8_16x8( + const uint8x16_t i0, const uint8x16_t i1, const uint8x16_t i2, + const uint8x16_t i3, const uint8x16_t i4, const uint8x16_t i5, + const uint8x16_t i6, const uint8x16_t i7, uint8x8_t *o0, uint8x8_t *o1, + uint8x8_t *o2, uint8x8_t *o3, uint8x8_t *o4, uint8x8_t *o5, uint8x8_t *o6, + uint8x8_t *o7, uint8x8_t *o8, uint8x8_t *o9, uint8x8_t *o10, uint8x8_t *o11, + uint8x8_t *o12, uint8x8_t *o13, uint8x8_t *o14, uint8x8_t *o15) { + // Swap 8 bit elements. Goes from: + // i0: 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + // i1: 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + // i2: 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + // i3: 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + // i4: 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + // i5: 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + // i6: 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + // i7: 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + // to: + // b0.val[0]: 00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E + // b0.val[1]: 01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F + // b1.val[0]: 20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E + // b1.val[1]: 21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F + // b2.val[0]: 40 50 42 52 44 54 46 56 48 58 4A 5A 4C 5C 4E 5E + // b2.val[1]: 41 51 43 53 45 55 47 57 49 59 4B 5B 4D 5D 4F 5F + // b3.val[0]: 60 70 62 72 64 74 66 76 68 78 6A 7A 6C 7C 6E 7E + // b3.val[1]: 61 71 63 73 65 75 67 77 69 79 6B 7B 6D 7D 6F 7F + const uint8x16x2_t b0 = vtrnq_u8(i0, i1); + const uint8x16x2_t b1 = vtrnq_u8(i2, i3); + const uint8x16x2_t b2 = vtrnq_u8(i4, i5); + const uint8x16x2_t b3 = vtrnq_u8(i6, i7); + + // Swap 16 bit elements resulting in: + // c0.val[0]: 00 10 20 30 04 14 24 34 08 18 28 38 0C 1C 2C 3C + // c0.val[1]: 02 12 22 32 06 16 26 36 0A 1A 2A 3A 0E 1E 2E 3E + // c1.val[0]: 01 11 21 31 05 15 25 35 09 19 29 39 0D 1D 2D 3D + // c1.val[1]: 03 13 23 33 07 17 27 37 0B 1B 2B 3B 0F 1F 2F 3F + // c2.val[0]: 40 50 60 70 44 54 64 74 48 58 68 78 4C 5C 6C 7C + // c2.val[1]: 42 52 62 72 46 56 66 76 4A 5A 6A 7A 4E 5E 6E 7E + // c3.val[0]: 41 51 61 71 45 55 65 75 49 59 69 79 4D 5D 6D 7D + // c3.val[1]: 43 53 63 73 47 57 67 77 4B 5B 6B 7B 4F 5F 6F 7F + const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), + vreinterpretq_u16_u8(b1.val[0])); + const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), + vreinterpretq_u16_u8(b1.val[1])); + const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), + vreinterpretq_u16_u8(b3.val[0])); + const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), + vreinterpretq_u16_u8(b3.val[1])); + + // Swap 32 bit elements resulting in: + // d0.val[0]: 00 10 20 30 40 50 60 70 08 18 28 38 48 58 68 78 + // d0.val[1]: 04 14 24 34 44 54 64 74 0C 1C 2C 3C 4C 5C 6C 7C + // d1.val[0]: 02 12 22 32 42 52 62 72 0A 1A 2A 3A 4A 5A 6A 7A + // d1.val[1]: 06 16 26 36 46 56 66 76 0E 1E 2E 3E 4E 5E 6E 7E + // d2.val[0]: 01 11 21 31 41 51 61 71 09 19 29 39 49 59 69 79 + // d2.val[1]: 05 15 25 35 45 55 65 75 0D 1D 2D 3D 4D 5D 6D 7D + // d3.val[0]: 03 13 23 33 43 53 63 73 0B 1B 2B 3B 4B 5B 6B 7B + // d3.val[1]: 07 17 27 37 47 57 67 77 0F 1F 2F 3F 4F 5F 6F 7F + const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), + vreinterpretq_u32_u16(c2.val[0])); + const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), + vreinterpretq_u32_u16(c2.val[1])); + const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), + vreinterpretq_u32_u16(c3.val[0])); + const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), + vreinterpretq_u32_u16(c3.val[1])); + + // Output: + // o0 : 00 10 20 30 40 50 60 70 + // o1 : 01 11 21 31 41 51 61 71 + // o2 : 02 12 22 32 42 52 62 72 + // o3 : 03 13 23 33 43 53 63 73 + // o4 : 04 14 24 34 44 54 64 74 + // o5 : 05 15 25 35 45 55 65 75 + // o6 : 06 16 26 36 46 56 66 76 + // o7 : 07 17 27 37 47 57 67 77 + // o8 : 08 18 28 38 48 58 68 78 + // o9 : 09 19 29 39 49 59 69 79 + // o10: 0A 1A 2A 3A 4A 5A 6A 7A + // o11: 0B 1B 2B 3B 4B 5B 6B 7B + // o12: 0C 1C 2C 3C 4C 5C 6C 7C + // o13: 0D 1D 2D 3D 4D 5D 6D 7D + // o14: 0E 1E 2E 3E 4E 5E 6E 7E + // o15: 0F 1F 2F 3F 4F 5F 6F 7F + *o0 = vget_low_u8(vreinterpretq_u8_u32(d0.val[0])); + *o1 = vget_low_u8(vreinterpretq_u8_u32(d2.val[0])); + *o2 = vget_low_u8(vreinterpretq_u8_u32(d1.val[0])); + *o3 = vget_low_u8(vreinterpretq_u8_u32(d3.val[0])); + *o4 = vget_low_u8(vreinterpretq_u8_u32(d0.val[1])); + *o5 = vget_low_u8(vreinterpretq_u8_u32(d2.val[1])); + *o6 = vget_low_u8(vreinterpretq_u8_u32(d1.val[1])); + *o7 = vget_low_u8(vreinterpretq_u8_u32(d3.val[1])); + *o8 = vget_high_u8(vreinterpretq_u8_u32(d0.val[0])); + *o9 = vget_high_u8(vreinterpretq_u8_u32(d2.val[0])); + *o10 = vget_high_u8(vreinterpretq_u8_u32(d1.val[0])); + *o11 = vget_high_u8(vreinterpretq_u8_u32(d3.val[0])); + *o12 = vget_high_u8(vreinterpretq_u8_u32(d0.val[1])); + *o13 = vget_high_u8(vreinterpretq_u8_u32(d2.val[1])); + *o14 = vget_high_u8(vreinterpretq_u8_u32(d1.val[1])); + *o15 = vget_high_u8(vreinterpretq_u8_u32(d3.val[1])); +} + +static INLINE void transpose_u8_8x16( + const uint8x8_t i0, const uint8x8_t i1, const uint8x8_t i2, + const uint8x8_t i3, const uint8x8_t i4, const uint8x8_t i5, + const uint8x8_t i6, const uint8x8_t i7, const uint8x8_t i8, + const uint8x8_t i9, const uint8x8_t i10, const uint8x8_t i11, + const uint8x8_t i12, const uint8x8_t i13, const uint8x8_t i14, + const uint8x8_t i15, uint8x16_t *o0, uint8x16_t *o1, uint8x16_t *o2, + uint8x16_t *o3, uint8x16_t *o4, uint8x16_t *o5, uint8x16_t *o6, + uint8x16_t *o7) { + // Combine 8 bit elements. Goes from: + // i0 : 00 01 02 03 04 05 06 07 + // i1 : 10 11 12 13 14 15 16 17 + // i2 : 20 21 22 23 24 25 26 27 + // i3 : 30 31 32 33 34 35 36 37 + // i4 : 40 41 42 43 44 45 46 47 + // i5 : 50 51 52 53 54 55 56 57 + // i6 : 60 61 62 63 64 65 66 67 + // i7 : 70 71 72 73 74 75 76 77 + // i8 : 80 81 82 83 84 85 86 87 + // i9 : 90 91 92 93 94 95 96 97 + // i10: A0 A1 A2 A3 A4 A5 A6 A7 + // i11: B0 B1 B2 B3 B4 B5 B6 B7 + // i12: C0 C1 C2 C3 C4 C5 C6 C7 + // i13: D0 D1 D2 D3 D4 D5 D6 D7 + // i14: E0 E1 E2 E3 E4 E5 E6 E7 + // i15: F0 F1 F2 F3 F4 F5 F6 F7 + // to: + // a0: 00 01 02 03 04 05 06 07 80 81 82 83 84 85 86 87 + // a1: 10 11 12 13 14 15 16 17 90 91 92 93 94 95 96 97 + // a2: 20 21 22 23 24 25 26 27 A0 A1 A2 A3 A4 A5 A6 A7 + // a3: 30 31 32 33 34 35 36 37 B0 B1 B2 B3 B4 B5 B6 B7 + // a4: 40 41 42 43 44 45 46 47 C0 C1 C2 C3 C4 C5 C6 C7 + // a5: 50 51 52 53 54 55 56 57 D0 D1 D2 D3 D4 D5 D6 D7 + // a6: 60 61 62 63 64 65 66 67 E0 E1 E2 E3 E4 E5 E6 E7 + // a7: 70 71 72 73 74 75 76 77 F0 F1 F2 F3 F4 F5 F6 F7 + const uint8x16_t a0 = vcombine_u8(i0, i8); + const uint8x16_t a1 = vcombine_u8(i1, i9); + const uint8x16_t a2 = vcombine_u8(i2, i10); + const uint8x16_t a3 = vcombine_u8(i3, i11); + const uint8x16_t a4 = vcombine_u8(i4, i12); + const uint8x16_t a5 = vcombine_u8(i5, i13); + const uint8x16_t a6 = vcombine_u8(i6, i14); + const uint8x16_t a7 = vcombine_u8(i7, i15); + + // Swap 8 bit elements resulting in: + // b0.val[0]: 00 10 02 12 04 14 06 16 80 90 82 92 84 94 86 96 + // b0.val[1]: 01 11 03 13 05 15 07 17 81 91 83 93 85 95 87 97 + // b1.val[0]: 20 30 22 32 24 34 26 36 A0 B0 A2 B2 A4 B4 A6 B6 + // b1.val[1]: 21 31 23 33 25 35 27 37 A1 B1 A3 B3 A5 B5 A7 B7 + // b2.val[0]: 40 50 42 52 44 54 46 56 C0 D0 C2 D2 C4 D4 C6 D6 + // b2.val[1]: 41 51 43 53 45 55 47 57 C1 D1 C3 D3 C5 D5 C7 D7 + // b3.val[0]: 60 70 62 72 64 74 66 76 E0 F0 E2 F2 E4 F4 E6 F6 + // b3.val[1]: 61 71 63 73 65 75 67 77 E1 F1 E3 F3 E5 F5 E7 F7 + const uint8x16x2_t b0 = vtrnq_u8(a0, a1); + const uint8x16x2_t b1 = vtrnq_u8(a2, a3); + const uint8x16x2_t b2 = vtrnq_u8(a4, a5); + const uint8x16x2_t b3 = vtrnq_u8(a6, a7); + + // Swap 16 bit elements resulting in: + // c0.val[0]: 00 10 20 30 04 14 24 34 80 90 A0 B0 84 94 A4 B4 + // c0.val[1]: 02 12 22 32 06 16 26 36 82 92 A2 B2 86 96 A6 B6 + // c1.val[0]: 01 11 21 31 05 15 25 35 81 91 A1 B1 85 95 A5 B5 + // c1.val[1]: 03 13 23 33 07 17 27 37 83 93 A3 B3 87 97 A7 B7 + // c2.val[0]: 40 50 60 70 44 54 64 74 C0 D0 E0 F0 C4 D4 E4 F4 + // c2.val[1]: 42 52 62 72 46 56 66 76 C2 D2 E2 F2 C6 D6 E6 F6 + // c3.val[0]: 41 51 61 71 45 55 65 75 C1 D1 E1 F1 C5 D5 E5 F5 + // c3.val[1]: 43 53 63 73 47 57 67 77 C3 D3 E3 F3 C7 D7 E7 F7 + const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), + vreinterpretq_u16_u8(b1.val[0])); + const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), + vreinterpretq_u16_u8(b1.val[1])); + const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), + vreinterpretq_u16_u8(b3.val[0])); + const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), + vreinterpretq_u16_u8(b3.val[1])); + + // Swap 32 bit elements resulting in: + // d0.val[0]: 00 10 20 30 40 50 60 70 80 90 A0 B0 C0 D0 E0 F0 + // d0.val[1]: 04 14 24 34 44 54 64 74 84 94 A4 B4 C4 D4 E4 F4 + // d1.val[0]: 02 12 22 32 42 52 62 72 82 92 A2 B2 C2 D2 E2 F2 + // d1.val[1]: 06 16 26 36 46 56 66 76 86 96 A6 B6 C6 D6 E6 F6 + // d2.val[0]: 01 11 21 31 41 51 61 71 81 91 A1 B1 C1 D1 E1 F1 + // d2.val[1]: 05 15 25 35 45 55 65 75 85 95 A5 B5 C5 D5 E5 F5 + // d3.val[0]: 03 13 23 33 43 53 63 73 83 93 A3 B3 C3 D3 E3 F3 + // d3.val[1]: 07 17 27 37 47 57 67 77 87 97 A7 B7 C7 D7 E7 F7 + const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), + vreinterpretq_u32_u16(c2.val[0])); + const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), + vreinterpretq_u32_u16(c2.val[1])); + const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), + vreinterpretq_u32_u16(c3.val[0])); + const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), + vreinterpretq_u32_u16(c3.val[1])); + + // Output: + // o0: 00 10 20 30 40 50 60 70 80 90 A0 B0 C0 D0 E0 F0 + // o1: 01 11 21 31 41 51 61 71 81 91 A1 B1 C1 D1 E1 F1 + // o2: 02 12 22 32 42 52 62 72 82 92 A2 B2 C2 D2 E2 F2 + // o3: 03 13 23 33 43 53 63 73 83 93 A3 B3 C3 D3 E3 F3 + // o4: 04 14 24 34 44 54 64 74 84 94 A4 B4 C4 D4 E4 F4 + // o5: 05 15 25 35 45 55 65 75 85 95 A5 B5 C5 D5 E5 F5 + // o6: 06 16 26 36 46 56 66 76 86 96 A6 B6 C6 D6 E6 F6 + // o7: 07 17 27 37 47 57 67 77 87 97 A7 B7 C7 D7 E7 F7 + *o0 = vreinterpretq_u8_u32(d0.val[0]); + *o1 = vreinterpretq_u8_u32(d2.val[0]); + *o2 = vreinterpretq_u8_u32(d1.val[0]); + *o3 = vreinterpretq_u8_u32(d3.val[0]); + *o4 = vreinterpretq_u8_u32(d0.val[1]); + *o5 = vreinterpretq_u8_u32(d2.val[1]); + *o6 = vreinterpretq_u8_u32(d1.val[1]); + *o7 = vreinterpretq_u8_u32(d3.val[1]); +} + +static INLINE void transpose_u8_16x16( + const uint8x16_t i0, const uint8x16_t i1, const uint8x16_t i2, + const uint8x16_t i3, const uint8x16_t i4, const uint8x16_t i5, + const uint8x16_t i6, const uint8x16_t i7, const uint8x16_t i8, + const uint8x16_t i9, const uint8x16_t i10, const uint8x16_t i11, + const uint8x16_t i12, const uint8x16_t i13, const uint8x16_t i14, + const uint8x16_t i15, uint8x16_t *o0, uint8x16_t *o1, uint8x16_t *o2, + uint8x16_t *o3, uint8x16_t *o4, uint8x16_t *o5, uint8x16_t *o6, + uint8x16_t *o7, uint8x16_t *o8, uint8x16_t *o9, uint8x16_t *o10, + uint8x16_t *o11, uint8x16_t *o12, uint8x16_t *o13, uint8x16_t *o14, + uint8x16_t *o15) { + // Swap 8 bit elements. Goes from: + // i0: 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F + // i1: 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F + // i2: 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 2F + // i3: 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F + // i4: 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F + // i5: 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F + // i6: 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F + // i7: 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F + // i8: 80 81 82 83 84 85 86 87 88 89 8A 8B 8C 8D 8E 8F + // i9: 90 91 92 93 94 95 96 97 98 99 9A 9B 9C 9D 9E 9F + // i10: A0 A1 A2 A3 A4 A5 A6 A7 A8 A9 AA AB AC AD AE AF + // i11: B0 B1 B2 B3 B4 B5 B6 B7 B8 B9 BA BB BC BD BE BF + // i12: C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF + // i13: D0 D1 D2 D3 D4 D5 D6 D7 D8 D9 DA DB DC DD DE DF + // i14: E0 E1 E2 E3 E4 E5 E6 E7 E8 E9 EA EB EC ED EE EF + // i15: F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 FA FB FC FD FE FF + // to: + // b0.val[0]: 00 10 02 12 04 14 06 16 08 18 0A 1A 0C 1C 0E 1E + // b0.val[1]: 01 11 03 13 05 15 07 17 09 19 0B 1B 0D 1D 0F 1F + // b1.val[0]: 20 30 22 32 24 34 26 36 28 38 2A 3A 2C 3C 2E 3E + // b1.val[1]: 21 31 23 33 25 35 27 37 29 39 2B 3B 2D 3D 2F 3F + // b2.val[0]: 40 50 42 52 44 54 46 56 48 58 4A 5A 4C 5C 4E 5E + // b2.val[1]: 41 51 43 53 45 55 47 57 49 59 4B 5B 4D 5D 4F 5F + // b3.val[0]: 60 70 62 72 64 74 66 76 68 78 6A 7A 6C 7C 6E 7E + // b3.val[1]: 61 71 63 73 65 75 67 77 69 79 6B 7B 6D 7D 6F 7F + // b4.val[0]: 80 90 82 92 84 94 86 96 88 98 8A 9A 8C 9C 8E 9E + // b4.val[1]: 81 91 83 93 85 95 87 97 89 99 8B 9B 8D 9D 8F 9F + // b5.val[0]: A0 B0 A2 B2 A4 B4 A6 B6 A8 B8 AA BA AC BC AE BE + // b5.val[1]: A1 B1 A3 B3 A5 B5 A7 B7 A9 B9 AB BB AD BD AF BF + // b6.val[0]: C0 D0 C2 D2 C4 D4 C6 D6 C8 D8 CA DA CC DC CE DE + // b6.val[1]: C1 D1 C3 D3 C5 D5 C7 D7 C9 D9 CB DB CD DD CF DF + // b7.val[0]: E0 F0 E2 F2 E4 F4 E6 F6 E8 F8 EA FA EC FC EE FE + // b7.val[1]: E1 F1 E3 F3 E5 F5 E7 F7 E9 F9 EB FB ED FD EF FF + const uint8x16x2_t b0 = vtrnq_u8(i0, i1); + const uint8x16x2_t b1 = vtrnq_u8(i2, i3); + const uint8x16x2_t b2 = vtrnq_u8(i4, i5); + const uint8x16x2_t b3 = vtrnq_u8(i6, i7); + const uint8x16x2_t b4 = vtrnq_u8(i8, i9); + const uint8x16x2_t b5 = vtrnq_u8(i10, i11); + const uint8x16x2_t b6 = vtrnq_u8(i12, i13); + const uint8x16x2_t b7 = vtrnq_u8(i14, i15); + + // Swap 16 bit elements resulting in: + // c0.val[0]: 00 10 20 30 04 14 24 34 08 18 28 38 0C 1C 2C 3C + // c0.val[1]: 02 12 22 32 06 16 26 36 0A 1A 2A 3A 0E 1E 2E 3E + // c1.val[0]: 01 11 21 31 05 15 25 35 09 19 29 39 0D 1D 2D 3D + // c1.val[1]: 03 13 23 33 07 17 27 37 0B 1B 2B 3B 0F 1F 2F 3F + // c2.val[0]: 40 50 60 70 44 54 64 74 48 58 68 78 4C 5C 6C 7C + // c2.val[1]: 42 52 62 72 46 56 66 76 4A 5A 6A 7A 4E 5E 6E 7E + // c3.val[0]: 41 51 61 71 45 55 65 75 49 59 69 79 4D 5D 6D 7D + // c3.val[1]: 43 53 63 73 47 57 67 77 4B 5B 6B 7B 4F 5F 6F 7F + // c4.val[0]: 80 90 A0 B0 84 94 A4 B4 88 98 A8 B8 8C 9C AC BC + // c4.val[1]: 82 92 A2 B2 86 96 A6 B6 8A 9A AA BA 8E 9E AE BE + // c5.val[0]: 81 91 A1 B1 85 95 A5 B5 89 99 A9 B9 8D 9D AD BD + // c5.val[1]: 83 93 A3 B3 87 97 A7 B7 8B 9B AB BB 8F 9F AF BF + // c6.val[0]: C0 D0 E0 F0 C4 D4 E4 F4 C8 D8 E8 F8 CC DC EC FC + // c6.val[1]: C2 D2 E2 F2 C6 D6 E6 F6 CA DA EA FA CE DE EE FE + // c7.val[0]: C1 D1 E1 F1 C5 D5 E5 F5 C9 D9 E9 F9 CD DD ED FD + // c7.val[1]: C3 D3 E3 F3 C7 D7 E7 F7 CB DB EB FB CF DF EF FF + const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]), + vreinterpretq_u16_u8(b1.val[0])); + const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]), + vreinterpretq_u16_u8(b1.val[1])); + const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]), + vreinterpretq_u16_u8(b3.val[0])); + const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]), + vreinterpretq_u16_u8(b3.val[1])); + const uint16x8x2_t c4 = vtrnq_u16(vreinterpretq_u16_u8(b4.val[0]), + vreinterpretq_u16_u8(b5.val[0])); + const uint16x8x2_t c5 = vtrnq_u16(vreinterpretq_u16_u8(b4.val[1]), + vreinterpretq_u16_u8(b5.val[1])); + const uint16x8x2_t c6 = vtrnq_u16(vreinterpretq_u16_u8(b6.val[0]), + vreinterpretq_u16_u8(b7.val[0])); + const uint16x8x2_t c7 = vtrnq_u16(vreinterpretq_u16_u8(b6.val[1]), + vreinterpretq_u16_u8(b7.val[1])); + + // Swap 32 bit elements resulting in: + // d0.val[0]: 00 10 20 30 40 50 60 70 08 18 28 38 48 58 68 78 + // d0.val[1]: 04 14 24 34 44 54 64 74 0C 1C 2C 3C 4C 5C 6C 7C + // d1.val[0]: 02 12 22 32 42 52 62 72 0A 1A 2A 3A 4A 5A 6A 7A + // d1.val[1]: 06 16 26 36 46 56 66 76 0E 1E 2E 3E 4E 5E 6E 7E + // d2.val[0]: 01 11 21 31 41 51 61 71 09 19 29 39 49 59 69 79 + // d2.val[1]: 05 15 25 35 45 55 65 75 0D 1D 2D 3D 4D 5D 6D 7D + // d3.val[0]: 03 13 23 33 43 53 63 73 0B 1B 2B 3B 4B 5B 6B 7B + // d3.val[1]: 07 17 27 37 47 57 67 77 0F 1F 2F 3F 4F 5F 6F 7F + // d4.val[0]: 80 90 A0 B0 C0 D0 E0 F0 88 98 A8 B8 C8 D8 E8 F8 + // d4.val[1]: 84 94 A4 B4 C4 D4 E4 F4 8C 9C AC BC CC DC EC FC + // d5.val[0]: 82 92 A2 B2 C2 D2 E2 F2 8A 9A AA BA CA DA EA FA + // d5.val[1]: 86 96 A6 B6 C6 D6 E6 F6 8E 9E AE BE CE DE EE FE + // d6.val[0]: 81 91 A1 B1 C1 D1 E1 F1 89 99 A9 B9 C9 D9 E9 F9 + // d6.val[1]: 85 95 A5 B5 C5 D5 E5 F5 8D 9D AD BD CD DD ED FD + // d7.val[0]: 83 93 A3 B3 C3 D3 E3 F3 8B 9B AB BB CB DB EB FB + // d7.val[1]: 87 97 A7 B7 C7 D7 E7 F7 8F 9F AF BF CF DF EF FF + const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]), + vreinterpretq_u32_u16(c2.val[0])); + const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]), + vreinterpretq_u32_u16(c2.val[1])); + const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]), + vreinterpretq_u32_u16(c3.val[0])); + const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]), + vreinterpretq_u32_u16(c3.val[1])); + const uint32x4x2_t d4 = vtrnq_u32(vreinterpretq_u32_u16(c4.val[0]), + vreinterpretq_u32_u16(c6.val[0])); + const uint32x4x2_t d5 = vtrnq_u32(vreinterpretq_u32_u16(c4.val[1]), + vreinterpretq_u32_u16(c6.val[1])); + const uint32x4x2_t d6 = vtrnq_u32(vreinterpretq_u32_u16(c5.val[0]), + vreinterpretq_u32_u16(c7.val[0])); + const uint32x4x2_t d7 = vtrnq_u32(vreinterpretq_u32_u16(c5.val[1]), + vreinterpretq_u32_u16(c7.val[1])); + + // Swap 64 bit elements resulting in: + // e0.val[0]: 00 10 20 30 40 50 60 70 80 90 A0 B0 C0 D0 E0 F0 + // e0.val[1]: 08 18 28 38 48 58 68 78 88 98 A8 B8 C8 D8 E8 F8 + // e1.val[0]: 01 11 21 31 41 51 61 71 84 94 A4 B4 C4 D4 E4 F4 + // e1.val[1]: 09 19 29 39 49 59 69 79 89 99 A9 B9 C9 D9 E9 F9 + // e2.val[0]: 02 12 22 32 42 52 62 72 82 92 A2 B2 C2 D2 E2 F2 + // e2.val[1]: 0A 1A 2A 3A 4A 5A 6A 7A 8A 9A AA BA CA DA EA FA + // e3.val[0]: 03 13 23 33 43 53 63 73 86 96 A6 B6 C6 D6 E6 F6 + // e3.val[1]: 0B 1B 2B 3B 4B 5B 6B 7B 8B 9B AB BB CB DB EB FB + // e4.val[0]: 04 14 24 34 44 54 64 74 81 91 A1 B1 C1 D1 E1 F1 + // e4.val[1]: 0C 1C 2C 3C 4C 5C 6C 7C 8C 9C AC BC CC DC EC FC + // e5.val[0]: 05 15 25 35 45 55 65 75 85 95 A5 B5 C5 D5 E5 F5 + // e5.val[1]: 0D 1D 2D 3D 4D 5D 6D 7D 8D 9D AD BD CD DD ED FD + // e6.val[0]: 06 16 26 36 46 56 66 76 83 93 A3 B3 C3 D3 E3 F3 + // e6.val[1]: 0E 1E 2E 3E 4E 5E 6E 7E 8E 9E AE BE CE DE EE FE + // e7.val[0]: 07 17 27 37 47 57 67 77 87 97 A7 B7 C7 D7 E7 F7 + // e7.val[1]: 0F 1F 2F 3F 4F 5F 6F 7F 8F 9F AF BF CF DF EF FF + const uint8x16x2_t e0 = vpx_vtrnq_u64(d0.val[0], d4.val[0]); + const uint8x16x2_t e1 = vpx_vtrnq_u64(d2.val[0], d6.val[0]); + const uint8x16x2_t e2 = vpx_vtrnq_u64(d1.val[0], d5.val[0]); + const uint8x16x2_t e3 = vpx_vtrnq_u64(d3.val[0], d7.val[0]); + const uint8x16x2_t e4 = vpx_vtrnq_u64(d0.val[1], d4.val[1]); + const uint8x16x2_t e5 = vpx_vtrnq_u64(d2.val[1], d6.val[1]); + const uint8x16x2_t e6 = vpx_vtrnq_u64(d1.val[1], d5.val[1]); + const uint8x16x2_t e7 = vpx_vtrnq_u64(d3.val[1], d7.val[1]); + + // Output: + // o0 : 00 10 20 30 40 50 60 70 80 90 A0 B0 C0 D0 E0 F0 + // o1 : 01 11 21 31 41 51 61 71 84 94 A4 B4 C4 D4 E4 F4 + // o2 : 02 12 22 32 42 52 62 72 82 92 A2 B2 C2 D2 E2 F2 + // o3 : 03 13 23 33 43 53 63 73 86 96 A6 B6 C6 D6 E6 F6 + // o4 : 04 14 24 34 44 54 64 74 81 91 A1 B1 C1 D1 E1 F1 + // o5 : 05 15 25 35 45 55 65 75 85 95 A5 B5 C5 D5 E5 F5 + // o6 : 06 16 26 36 46 56 66 76 83 93 A3 B3 C3 D3 E3 F3 + // o7 : 07 17 27 37 47 57 67 77 87 97 A7 B7 C7 D7 E7 F7 + // o8 : 08 18 28 38 48 58 68 78 88 98 A8 B8 C8 D8 E8 F8 + // o9 : 09 19 29 39 49 59 69 79 89 99 A9 B9 C9 D9 E9 F9 + // o10: 0A 1A 2A 3A 4A 5A 6A 7A 8A 9A AA BA CA DA EA FA + // o11: 0B 1B 2B 3B 4B 5B 6B 7B 8B 9B AB BB CB DB EB FB + // o12: 0C 1C 2C 3C 4C 5C 6C 7C 8C 9C AC BC CC DC EC FC + // o13: 0D 1D 2D 3D 4D 5D 6D 7D 8D 9D AD BD CD DD ED FD + // o14: 0E 1E 2E 3E 4E 5E 6E 7E 8E 9E AE BE CE DE EE FE + // o15: 0F 1F 2F 3F 4F 5F 6F 7F 8F 9F AF BF CF DF EF FF + *o0 = e0.val[0]; + *o1 = e1.val[0]; + *o2 = e2.val[0]; + *o3 = e3.val[0]; + *o4 = e4.val[0]; + *o5 = e5.val[0]; + *o6 = e6.val[0]; + *o7 = e7.val[0]; + *o8 = e0.val[1]; + *o9 = e1.val[1]; + *o10 = e2.val[1]; + *o11 = e3.val[1]; + *o12 = e4.val[1]; + *o13 = e5.val[1]; + *o14 = e6.val[1]; + *o15 = e7.val[1]; +} + +#endif // VPX_DSP_ARM_TRANSPOSE_NEON_H_ diff --git a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_h_media.asm b/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_h_media.asm deleted file mode 100644 index dab845a204..0000000000 --- a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_h_media.asm +++ /dev/null @@ -1,182 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_variance_halfpixvar16x16_h_media| - - ARM - REQUIRE8 - PRESERVE8 - - AREA ||.text||, CODE, READONLY, ALIGN=2 - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_h_media| PROC - - stmfd sp!, {r4-r12, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r8, #0 ; initialize sum = 0 - ldr r10, c80808080 - mov r11, #0 ; initialize sse = 0 - mov r12, #16 ; set loop counter to 16 (=block height) - mov lr, #0 ; constant zero -loop - ; 1st 4 pixels - ldr r4, [r0, #0] ; load 4 src pixels - ldr r6, [r0, #1] ; load 4 src pixels with 1 byte offset - ldr r5, [r2, #0] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - pld [r0, r1, lsl #1] - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - ; calculate total sum - adds r8, r8, r4 ; add positive differences to sum - subs r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r4, [r0, #4] ; load 4 src pixels - ldr r6, [r0, #5] ; load 4 src pixels with 1 byte offset - ldr r5, [r2, #4] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 3rd 4 pixels - ldr r4, [r0, #8] ; load 4 src pixels - ldr r6, [r0, #9] ; load 4 src pixels with 1 byte offset - ldr r5, [r2, #8] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 4th 4 pixels - ldr r4, [r0, #12] ; load 4 src pixels - ldr r6, [r0, #13] ; load 4 src pixels with 1 byte offset - ldr r5, [r2, #12] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - subs r12, r12, #1 - - bne loop - - ; return stuff - ldr r6, [sp, #40] ; get address of sse - mul r0, r8, r8 ; sum * sum - str r11, [r6] ; store sse - sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8)) - - ldmfd sp!, {r4-r12, pc} - - ENDP - -c80808080 - DCD 0x80808080 - - END - diff --git a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_hv_media.asm b/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_hv_media.asm deleted file mode 100644 index 01953b7094..0000000000 --- a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_hv_media.asm +++ /dev/null @@ -1,222 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_variance_halfpixvar16x16_hv_media| - - ARM - REQUIRE8 - PRESERVE8 - - AREA ||.text||, CODE, READONLY, ALIGN=2 - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_hv_media| PROC - - stmfd sp!, {r4-r12, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r8, #0 ; initialize sum = 0 - ldr r10, c80808080 - mov r11, #0 ; initialize sse = 0 - mov r12, #16 ; set loop counter to 16 (=block height) - mov lr, #0 ; constant zero -loop - add r9, r0, r1 ; pointer to pixels on the next row - ; 1st 4 pixels - ldr r4, [r0, #0] ; load source pixels a, row N - ldr r6, [r0, #1] ; load source pixels b, row N - ldr r5, [r9, #0] ; load source pixels c, row N+1 - ldr r7, [r9, #1] ; load source pixels d, row N+1 - - ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1 - mvn r7, r7 - uhsub8 r5, r5, r7 - eor r5, r5, r10 - ; z = (x + y + 1) >> 1, interpolate half pixel values vertically - mvn r5, r5 - uhsub8 r4, r4, r5 - ldr r5, [r2, #0] ; load 4 ref pixels - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - pld [r0, r1, lsl #1] - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - ; calculate total sum - adds r8, r8, r4 ; add positive differences to sum - subs r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r4, [r0, #4] ; load source pixels a, row N - ldr r6, [r0, #5] ; load source pixels b, row N - ldr r5, [r9, #4] ; load source pixels c, row N+1 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - ldr r7, [r9, #5] ; load source pixels d, row N+1 - - ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1 - mvn r7, r7 - uhsub8 r5, r5, r7 - eor r5, r5, r10 - ; z = (x + y + 1) >> 1, interpolate half pixel values vertically - mvn r5, r5 - uhsub8 r4, r4, r5 - ldr r5, [r2, #4] ; load 4 ref pixels - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 3rd 4 pixels - ldr r4, [r0, #8] ; load source pixels a, row N - ldr r6, [r0, #9] ; load source pixels b, row N - ldr r5, [r9, #8] ; load source pixels c, row N+1 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - ldr r7, [r9, #9] ; load source pixels d, row N+1 - - ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1 - mvn r7, r7 - uhsub8 r5, r5, r7 - eor r5, r5, r10 - ; z = (x + y + 1) >> 1, interpolate half pixel values vertically - mvn r5, r5 - uhsub8 r4, r4, r5 - ldr r5, [r2, #8] ; load 4 ref pixels - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 4th 4 pixels - ldr r4, [r0, #12] ; load source pixels a, row N - ldr r6, [r0, #13] ; load source pixels b, row N - ldr r5, [r9, #12] ; load source pixels c, row N+1 - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - ldr r7, [r9, #13] ; load source pixels d, row N+1 - - ; x = (a + b + 1) >> 1, interpolate pixels horizontally on row N - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - ; y = (c + d + 1) >> 1, interpolate pixels horizontally on row N+1 - mvn r7, r7 - uhsub8 r5, r5, r7 - eor r5, r5, r10 - ; z = (x + y + 1) >> 1, interpolate half pixel values vertically - mvn r5, r5 - uhsub8 r4, r4, r5 - ldr r5, [r2, #12] ; load 4 ref pixels - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - subs r12, r12, #1 - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - bne loop - - ; return stuff - ldr r6, [sp, #40] ; get address of sse - mul r0, r8, r8 ; sum * sum - str r11, [r6] ; store sse - sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8)) - - ldmfd sp!, {r4-r12, pc} - - ENDP - -c80808080 - DCD 0x80808080 - - END diff --git a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_v_media.asm b/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_v_media.asm deleted file mode 100644 index 0d17acb38f..0000000000 --- a/libs/libvpx/vpx_dsp/arm/variance_halfpixvar16x16_v_media.asm +++ /dev/null @@ -1,184 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_variance_halfpixvar16x16_v_media| - - ARM - REQUIRE8 - PRESERVE8 - - AREA ||.text||, CODE, READONLY, ALIGN=2 - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_v_media| PROC - - stmfd sp!, {r4-r12, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r8, #0 ; initialize sum = 0 - ldr r10, c80808080 - mov r11, #0 ; initialize sse = 0 - mov r12, #16 ; set loop counter to 16 (=block height) - mov lr, #0 ; constant zero -loop - add r9, r0, r1 ; set src pointer to next row - ; 1st 4 pixels - ldr r4, [r0, #0] ; load 4 src pixels - ldr r6, [r9, #0] ; load 4 src pixels from next row - ldr r5, [r2, #0] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - usub8 r6, r4, r5 ; calculate difference - pld [r0, r1, lsl #1] - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - ; calculate total sum - adds r8, r8, r4 ; add positive differences to sum - subs r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r4, [r0, #4] ; load 4 src pixels - ldr r6, [r9, #4] ; load 4 src pixels from next row - ldr r5, [r2, #4] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 3rd 4 pixels - ldr r4, [r0, #8] ; load 4 src pixels - ldr r6, [r9, #8] ; load 4 src pixels from next row - ldr r5, [r2, #8] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 4th 4 pixels - ldr r4, [r0, #12] ; load 4 src pixels - ldr r6, [r9, #12] ; load 4 src pixels from next row - ldr r5, [r2, #12] ; load 4 ref pixels - - ; bilinear interpolation - mvn r6, r6 - uhsub8 r4, r4, r6 - eor r4, r4, r10 - - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r7, r6, lr ; select bytes with positive difference - usub8 r6, r5, r4 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r6, r6, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r7, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) - - - subs r12, r12, #1 - - bne loop - - ; return stuff - ldr r6, [sp, #40] ; get address of sse - mul r0, r8, r8 ; sum * sum - str r11, [r6] ; store sse - sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8)) - - ldmfd sp!, {r4-r12, pc} - - ENDP - -c80808080 - DCD 0x80808080 - - END - diff --git a/libs/libvpx/vpx_dsp/arm/variance_media.asm b/libs/libvpx/vpx_dsp/arm/variance_media.asm deleted file mode 100644 index f7f9e14b0a..0000000000 --- a/libs/libvpx/vpx_dsp/arm/variance_media.asm +++ /dev/null @@ -1,358 +0,0 @@ -; -; Copyright (c) 2011 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - - EXPORT |vpx_variance16x16_media| - EXPORT |vpx_variance8x8_media| - EXPORT |vpx_mse16x16_media| - - ARM - REQUIRE8 - PRESERVE8 - - AREA ||.text||, CODE, READONLY, ALIGN=2 - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -|vpx_variance16x16_media| PROC - - stmfd sp!, {r4-r12, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r8, #0 ; initialize sum = 0 - mov r11, #0 ; initialize sse = 0 - mov r12, #16 ; set loop counter to 16 (=block height) - -loop16x16 - ; 1st 4 pixels - ldr r4, [r0, #0] ; load 4 src pixels - ldr r5, [r2, #0] ; load 4 ref pixels - - mov lr, #0 ; constant zero - - usub8 r6, r4, r5 ; calculate difference - pld [r0, r1, lsl #1] - sel r7, r6, lr ; select bytes with positive difference - usub8 r9, r5, r4 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r6, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - ; calculate total sum - adds r8, r8, r4 ; add positive differences to sum - subs r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r10, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r4, [r0, #4] ; load 4 src pixels - ldr r5, [r2, #4] ; load 4 ref pixels - smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r9, r5, r4 ; calculate difference with reversed operands - sel r6, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r10, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 3rd 4 pixels - ldr r4, [r0, #8] ; load 4 src pixels - ldr r5, [r2, #8] ; load 4 ref pixels - smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - sel r7, r6, lr ; select bytes with positive difference - usub8 r9, r5, r4 ; calculate difference with reversed operands - sel r6, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r10, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - - ; 4th 4 pixels - ldr r4, [r0, #12] ; load 4 src pixels - ldr r5, [r2, #12] ; load 4 ref pixels - smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) - - usub8 r6, r4, r5 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r7, r6, lr ; select bytes with positive difference - usub8 r9, r5, r4 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r6, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r4, r7, lr ; calculate sum of positive differences - usad8 r5, r6, lr ; calculate sum of negative differences - orr r6, r6, r7 ; differences of all 4 pixels - - ; calculate total sum - add r8, r8, r4 ; add positive differences to sum - sub r8, r8, r5 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r5, r6 ; byte (two pixels) to halfwords - uxtb16 r10, r6, ror #8 ; another two pixels to halfwords - smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) - smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) - - - subs r12, r12, #1 - - bne loop16x16 - - ; return stuff - ldr r6, [sp, #40] ; get address of sse - mul r0, r8, r8 ; sum * sum - str r11, [r6] ; store sse - sub r0, r11, r0, lsr #8 ; return (sse - ((sum * sum) >> 8)) - - ldmfd sp!, {r4-r12, pc} - - ENDP - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -|vpx_variance8x8_media| PROC - - push {r4-r10, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r12, #8 ; set loop counter to 8 (=block height) - mov r4, #0 ; initialize sum = 0 - mov r5, #0 ; initialize sse = 0 - -loop8x8 - ; 1st 4 pixels - ldr r6, [r0, #0x0] ; load 4 src pixels - ldr r7, [r2, #0x0] ; load 4 ref pixels - - mov lr, #0 ; constant zero - - usub8 r8, r6, r7 ; calculate difference - pld [r0, r1, lsl #1] - sel r10, r8, lr ; select bytes with positive difference - usub8 r9, r7, r6 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r6, r10, lr ; calculate sum of positive differences - usad8 r7, r8, lr ; calculate sum of negative differences - orr r8, r8, r10 ; differences of all 4 pixels - ; calculate total sum - add r4, r4, r6 ; add positive differences to sum - sub r4, r4, r7 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r7, r8 ; byte (two pixels) to halfwords - uxtb16 r10, r8, ror #8 ; another two pixels to halfwords - smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r6, [r0, #0x4] ; load 4 src pixels - ldr r7, [r2, #0x4] ; load 4 ref pixels - smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2) - - usub8 r8, r6, r7 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r10, r8, lr ; select bytes with positive difference - usub8 r9, r7, r6 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r6, r10, lr ; calculate sum of positive differences - usad8 r7, r8, lr ; calculate sum of negative differences - orr r8, r8, r10 ; differences of all 4 pixels - - ; calculate total sum - add r4, r4, r6 ; add positive differences to sum - sub r4, r4, r7 ; subtract negative differences from sum - - ; calculate sse - uxtb16 r7, r8 ; byte (two pixels) to halfwords - uxtb16 r10, r8, ror #8 ; another two pixels to halfwords - smlad r5, r7, r7, r5 ; dual signed multiply, add and accumulate (1) - subs r12, r12, #1 ; next row - smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2) - - bne loop8x8 - - ; return stuff - ldr r8, [sp, #32] ; get address of sse - mul r1, r4, r4 ; sum * sum - str r5, [r8] ; store sse - sub r0, r5, r1, ASR #6 ; return (sse - ((sum * sum) >> 6)) - - pop {r4-r10, pc} - - ENDP - -; r0 unsigned char *src_ptr -; r1 int source_stride -; r2 unsigned char *ref_ptr -; r3 int recon_stride -; stack unsigned int *sse -; -;note: Based on vpx_variance16x16_media. In this function, sum is never used. -; So, we can remove this part of calculation. - -|vpx_mse16x16_media| PROC - - push {r4-r9, lr} - - pld [r0, r1, lsl #0] - pld [r2, r3, lsl #0] - - mov r12, #16 ; set loop counter to 16 (=block height) - mov r4, #0 ; initialize sse = 0 - -loopmse - ; 1st 4 pixels - ldr r5, [r0, #0x0] ; load 4 src pixels - ldr r6, [r2, #0x0] ; load 4 ref pixels - - mov lr, #0 ; constant zero - - usub8 r8, r5, r6 ; calculate difference - pld [r0, r1, lsl #1] - sel r7, r8, lr ; select bytes with positive difference - usub8 r9, r6, r5 ; calculate difference with reversed operands - pld [r2, r3, lsl #1] - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r5, r7, lr ; calculate sum of positive differences - usad8 r6, r8, lr ; calculate sum of negative differences - orr r8, r8, r7 ; differences of all 4 pixels - - ldr r5, [r0, #0x4] ; load 4 src pixels - - ; calculate sse - uxtb16 r6, r8 ; byte (two pixels) to halfwords - uxtb16 r7, r8, ror #8 ; another two pixels to halfwords - smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1) - - ; 2nd 4 pixels - ldr r6, [r2, #0x4] ; load 4 ref pixels - smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2) - - usub8 r8, r5, r6 ; calculate difference - sel r7, r8, lr ; select bytes with positive difference - usub8 r9, r6, r5 ; calculate difference with reversed operands - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r5, r7, lr ; calculate sum of positive differences - usad8 r6, r8, lr ; calculate sum of negative differences - orr r8, r8, r7 ; differences of all 4 pixels - ldr r5, [r0, #0x8] ; load 4 src pixels - ; calculate sse - uxtb16 r6, r8 ; byte (two pixels) to halfwords - uxtb16 r7, r8, ror #8 ; another two pixels to halfwords - smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1) - - ; 3rd 4 pixels - ldr r6, [r2, #0x8] ; load 4 ref pixels - smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2) - - usub8 r8, r5, r6 ; calculate difference - sel r7, r8, lr ; select bytes with positive difference - usub8 r9, r6, r5 ; calculate difference with reversed operands - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r5, r7, lr ; calculate sum of positive differences - usad8 r6, r8, lr ; calculate sum of negative differences - orr r8, r8, r7 ; differences of all 4 pixels - - ldr r5, [r0, #0xc] ; load 4 src pixels - - ; calculate sse - uxtb16 r6, r8 ; byte (two pixels) to halfwords - uxtb16 r7, r8, ror #8 ; another two pixels to halfwords - smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1) - - ; 4th 4 pixels - ldr r6, [r2, #0xc] ; load 4 ref pixels - smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2) - - usub8 r8, r5, r6 ; calculate difference - add r0, r0, r1 ; set src_ptr to next row - sel r7, r8, lr ; select bytes with positive difference - usub8 r9, r6, r5 ; calculate difference with reversed operands - add r2, r2, r3 ; set dst_ptr to next row - sel r8, r9, lr ; select bytes with negative difference - - ; calculate partial sums - usad8 r5, r7, lr ; calculate sum of positive differences - usad8 r6, r8, lr ; calculate sum of negative differences - orr r8, r8, r7 ; differences of all 4 pixels - - subs r12, r12, #1 ; next row - - ; calculate sse - uxtb16 r6, r8 ; byte (two pixels) to halfwords - uxtb16 r7, r8, ror #8 ; another two pixels to halfwords - smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1) - smlad r4, r7, r7, r4 ; dual signed multiply, add and accumulate (2) - - bne loopmse - - ; return stuff - ldr r1, [sp, #28] ; get address of sse - mov r0, r4 ; return sse - str r4, [r1] ; store sse - - pop {r4-r9, pc} - - ENDP - - END diff --git a/libs/libvpx/vpx_dsp/arm/variance_neon.c b/libs/libvpx/vpx_dsp/arm/variance_neon.c index ede6e7bbb0..b6d7f86a4b 100644 --- a/libs/libvpx/vpx_dsp/arm/variance_neon.c +++ b/libs/libvpx/vpx_dsp/arm/variance_neon.c @@ -32,9 +32,9 @@ static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) { } // w * h must be less than 2048 or local variable v_sum may overflow. -static void variance_neon_w8(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, uint32_t *sse, int *sum) { +static void variance_neon_w8(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int w, int h, uint32_t *sse, + int *sum) { int i, j; int16x8_t v_sum = vdupq_n_s16(0); int32x4_t v_sse_lo = vdupq_n_s32(0); @@ -47,12 +47,10 @@ static void variance_neon_w8(const uint8_t *a, int a_stride, const uint16x8_t v_diff = vsubl_u8(v_a, v_b); const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff); v_sum = vaddq_s16(v_sum, sv_diff); - v_sse_lo = vmlal_s16(v_sse_lo, - vget_low_s16(sv_diff), - vget_low_s16(sv_diff)); - v_sse_hi = vmlal_s16(v_sse_hi, - vget_high_s16(sv_diff), - vget_high_s16(sv_diff)); + v_sse_lo = + vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff)); + v_sse_hi = + vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff)); } a += a_stride; b += b_stride; @@ -62,15 +60,13 @@ static void variance_neon_w8(const uint8_t *a, int a_stride, *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi)); } -void vpx_get8x8var_neon(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - unsigned int *sse, int *sum) { +void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, unsigned int *sse, int *sum) { variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum); } -void vpx_get16x16var_neon(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - unsigned int *sse, int *sum) { +void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, unsigned int *sse, int *sum) { variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum); } @@ -79,7 +75,7 @@ unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride, unsigned int *sse) { int sum; variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum); - return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8 + return *sse - ((sum * sum) >> 6); } unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride, @@ -87,7 +83,7 @@ unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride, unsigned int *sse) { int sum; variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, &sum); - return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16 + return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8); } unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride, @@ -95,7 +91,7 @@ unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride, unsigned int *sse) { int sum; variance_neon_w8(a, a_stride, b, b_stride, 32, 32, sse, &sum); - return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32 + return *sse - (unsigned int)(((int64_t)sum * sum) >> 10); } unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride, @@ -104,12 +100,11 @@ unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride, int sum1, sum2; uint32_t sse1, sse2; variance_neon_w8(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1); - variance_neon_w8(a + (32 * a_stride), a_stride, - b + (32 * b_stride), b_stride, 32, 32, - &sse2, &sum2); + variance_neon_w8(a + (32 * a_stride), a_stride, b + (32 * b_stride), b_stride, + 32, 32, &sse2, &sum2); *sse = sse1 + sse2; sum1 += sum2; - return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64 + return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11); } unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride, @@ -118,12 +113,11 @@ unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride, int sum1, sum2; uint32_t sse1, sse2; variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1); - variance_neon_w8(a + (16 * a_stride), a_stride, - b + (16 * b_stride), b_stride, 64, 16, - &sse2, &sum2); + variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride, + 64, 16, &sse2, &sum2); *sse = sse1 + sse2; sum1 += sum2; - return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64 + return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11); } unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride, @@ -133,286 +127,273 @@ unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride, uint32_t sse1, sse2; variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1); - variance_neon_w8(a + (16 * a_stride), a_stride, - b + (16 * b_stride), b_stride, 64, 16, - &sse2, &sum2); - sse1 += sse2; - sum1 += sum2; - - variance_neon_w8(a + (16 * 2 * a_stride), a_stride, - b + (16 * 2 * b_stride), b_stride, + variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride, 64, 16, &sse2, &sum2); sse1 += sse2; sum1 += sum2; - variance_neon_w8(a + (16 * 3 * a_stride), a_stride, - b + (16 * 3 * b_stride), b_stride, - 64, 16, &sse2, &sum2); + variance_neon_w8(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride), + b_stride, 64, 16, &sse2, &sum2); + sse1 += sse2; + sum1 += sum2; + + variance_neon_w8(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride), + b_stride, 64, 16, &sse2, &sum2); *sse = sse1 + sse2; sum1 += sum2; - return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64 + return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12); } -unsigned int vpx_variance16x8_neon( - const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride, - unsigned int *sse) { - int i; - int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16; - uint32x2_t d0u32, d10u32; - int64x1_t d0s64, d1s64; - uint8x16_t q0u8, q1u8, q2u8, q3u8; - uint16x8_t q11u16, q12u16, q13u16, q14u16; - int32x4_t q8s32, q9s32, q10s32; - int64x2_t q0s64, q1s64, q5s64; +unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, unsigned int *sse) { + int i; + int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16; + uint32x2_t d0u32, d10u32; + int64x1_t d0s64, d1s64; + uint8x16_t q0u8, q1u8, q2u8, q3u8; + uint16x8_t q11u16, q12u16, q13u16, q14u16; + int32x4_t q8s32, q9s32, q10s32; + int64x2_t q0s64, q1s64, q5s64; - q8s32 = vdupq_n_s32(0); - q9s32 = vdupq_n_s32(0); - q10s32 = vdupq_n_s32(0); + q8s32 = vdupq_n_s32(0); + q9s32 = vdupq_n_s32(0); + q10s32 = vdupq_n_s32(0); - for (i = 0; i < 4; i++) { - q0u8 = vld1q_u8(src_ptr); - src_ptr += source_stride; - q1u8 = vld1q_u8(src_ptr); - src_ptr += source_stride; - __builtin_prefetch(src_ptr); + for (i = 0; i < 4; i++) { + q0u8 = vld1q_u8(src_ptr); + src_ptr += source_stride; + q1u8 = vld1q_u8(src_ptr); + src_ptr += source_stride; + __builtin_prefetch(src_ptr); - q2u8 = vld1q_u8(ref_ptr); - ref_ptr += recon_stride; - q3u8 = vld1q_u8(ref_ptr); - ref_ptr += recon_stride; - __builtin_prefetch(ref_ptr); + q2u8 = vld1q_u8(ref_ptr); + ref_ptr += recon_stride; + q3u8 = vld1q_u8(ref_ptr); + ref_ptr += recon_stride; + __builtin_prefetch(ref_ptr); - q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); - q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); - q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); - q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); + q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); + q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); + q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); + q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); - d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); - d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); - q9s32 = vmlal_s16(q9s32, d22s16, d22s16); - q10s32 = vmlal_s16(q10s32, d23s16, d23s16); + d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); + d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); + q9s32 = vmlal_s16(q9s32, d22s16, d22s16); + q10s32 = vmlal_s16(q10s32, d23s16, d23s16); - d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); - d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16)); - q9s32 = vmlal_s16(q9s32, d24s16, d24s16); - q10s32 = vmlal_s16(q10s32, d25s16, d25s16); + d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); + d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16)); + q9s32 = vmlal_s16(q9s32, d24s16, d24s16); + q10s32 = vmlal_s16(q10s32, d25s16, d25s16); - d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); - d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16)); - q9s32 = vmlal_s16(q9s32, d26s16, d26s16); - q10s32 = vmlal_s16(q10s32, d27s16, d27s16); + d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); + d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16)); + q9s32 = vmlal_s16(q9s32, d26s16, d26s16); + q10s32 = vmlal_s16(q10s32, d27s16, d27s16); - d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); - d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16)); - q9s32 = vmlal_s16(q9s32, d28s16, d28s16); - q10s32 = vmlal_s16(q10s32, d29s16, d29s16); - } + d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); + d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16)); + q9s32 = vmlal_s16(q9s32, d28s16, d28s16); + q10s32 = vmlal_s16(q10s32, d29s16, d29s16); + } - q10s32 = vaddq_s32(q10s32, q9s32); - q0s64 = vpaddlq_s32(q8s32); - q1s64 = vpaddlq_s32(q10s32); + q10s32 = vaddq_s32(q10s32, q9s32); + q0s64 = vpaddlq_s32(q8s32); + q1s64 = vpaddlq_s32(q10s32); - d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64)); - d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); + d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64)); + d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); - q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), - vreinterpret_s32_s64(d0s64)); - vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0); + q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64)); + vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0); - d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7); - d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32); + d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7); + d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32); - return vget_lane_u32(d0u32, 0); + return vget_lane_u32(d0u32, 0); } -unsigned int vpx_variance8x16_neon( - const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride, - unsigned int *sse) { - int i; - uint8x8_t d0u8, d2u8, d4u8, d6u8; - int16x4_t d22s16, d23s16, d24s16, d25s16; - uint32x2_t d0u32, d10u32; - int64x1_t d0s64, d1s64; - uint16x8_t q11u16, q12u16; - int32x4_t q8s32, q9s32, q10s32; - int64x2_t q0s64, q1s64, q5s64; +unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride, unsigned int *sse) { + int i; + uint8x8_t d0u8, d2u8, d4u8, d6u8; + int16x4_t d22s16, d23s16, d24s16, d25s16; + uint32x2_t d0u32, d10u32; + int64x1_t d0s64, d1s64; + uint16x8_t q11u16, q12u16; + int32x4_t q8s32, q9s32, q10s32; + int64x2_t q0s64, q1s64, q5s64; - q8s32 = vdupq_n_s32(0); - q9s32 = vdupq_n_s32(0); - q10s32 = vdupq_n_s32(0); - - for (i = 0; i < 8; i++) { - d0u8 = vld1_u8(src_ptr); - src_ptr += source_stride; - d2u8 = vld1_u8(src_ptr); - src_ptr += source_stride; - __builtin_prefetch(src_ptr); - - d4u8 = vld1_u8(ref_ptr); - ref_ptr += recon_stride; - d6u8 = vld1_u8(ref_ptr); - ref_ptr += recon_stride; - __builtin_prefetch(ref_ptr); - - q11u16 = vsubl_u8(d0u8, d4u8); - q12u16 = vsubl_u8(d2u8, d6u8); - - d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); - d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); - q9s32 = vmlal_s16(q9s32, d22s16, d22s16); - q10s32 = vmlal_s16(q10s32, d23s16, d23s16); - - d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); - d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); - q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16)); - q9s32 = vmlal_s16(q9s32, d24s16, d24s16); - q10s32 = vmlal_s16(q10s32, d25s16, d25s16); - } - - q10s32 = vaddq_s32(q10s32, q9s32); - q0s64 = vpaddlq_s32(q8s32); - q1s64 = vpaddlq_s32(q10s32); - - d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64)); - d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); - - q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), - vreinterpret_s32_s64(d0s64)); - vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0); - - d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7); - d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32); - - return vget_lane_u32(d0u32, 0); -} - -unsigned int vpx_mse16x16_neon( - const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride, - unsigned int *sse) { - int i; - int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16; - int64x1_t d0s64; - uint8x16_t q0u8, q1u8, q2u8, q3u8; - int32x4_t q7s32, q8s32, q9s32, q10s32; - uint16x8_t q11u16, q12u16, q13u16, q14u16; - int64x2_t q1s64; - - q7s32 = vdupq_n_s32(0); - q8s32 = vdupq_n_s32(0); - q9s32 = vdupq_n_s32(0); - q10s32 = vdupq_n_s32(0); - - for (i = 0; i < 8; i++) { // mse16x16_neon_loop - q0u8 = vld1q_u8(src_ptr); - src_ptr += source_stride; - q1u8 = vld1q_u8(src_ptr); - src_ptr += source_stride; - q2u8 = vld1q_u8(ref_ptr); - ref_ptr += recon_stride; - q3u8 = vld1q_u8(ref_ptr); - ref_ptr += recon_stride; - - q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); - q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); - q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); - q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); - - d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); - d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); - q7s32 = vmlal_s16(q7s32, d22s16, d22s16); - q8s32 = vmlal_s16(q8s32, d23s16, d23s16); - - d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); - d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); - q9s32 = vmlal_s16(q9s32, d24s16, d24s16); - q10s32 = vmlal_s16(q10s32, d25s16, d25s16); - - d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); - d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); - q7s32 = vmlal_s16(q7s32, d26s16, d26s16); - q8s32 = vmlal_s16(q8s32, d27s16, d27s16); - - d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); - d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); - q9s32 = vmlal_s16(q9s32, d28s16, d28s16); - q10s32 = vmlal_s16(q10s32, d29s16, d29s16); - } - - q7s32 = vaddq_s32(q7s32, q8s32); - q9s32 = vaddq_s32(q9s32, q10s32); - q10s32 = vaddq_s32(q7s32, q9s32); - - q1s64 = vpaddlq_s32(q10s32); - d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); - - vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0); - return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0); -} - -unsigned int vpx_get4x4sse_cs_neon( - const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride) { - int16x4_t d22s16, d24s16, d26s16, d28s16; - int64x1_t d0s64; - uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; - int32x4_t q7s32, q8s32, q9s32, q10s32; - uint16x8_t q11u16, q12u16, q13u16, q14u16; - int64x2_t q1s64; + q8s32 = vdupq_n_s32(0); + q9s32 = vdupq_n_s32(0); + q10s32 = vdupq_n_s32(0); + for (i = 0; i < 8; i++) { d0u8 = vld1_u8(src_ptr); src_ptr += source_stride; - d4u8 = vld1_u8(ref_ptr); - ref_ptr += recon_stride; - d1u8 = vld1_u8(src_ptr); - src_ptr += source_stride; - d5u8 = vld1_u8(ref_ptr); - ref_ptr += recon_stride; d2u8 = vld1_u8(src_ptr); src_ptr += source_stride; + __builtin_prefetch(src_ptr); + + d4u8 = vld1_u8(ref_ptr); + ref_ptr += recon_stride; d6u8 = vld1_u8(ref_ptr); ref_ptr += recon_stride; - d3u8 = vld1_u8(src_ptr); - src_ptr += source_stride; - d7u8 = vld1_u8(ref_ptr); - ref_ptr += recon_stride; + __builtin_prefetch(ref_ptr); q11u16 = vsubl_u8(d0u8, d4u8); - q12u16 = vsubl_u8(d1u8, d5u8); - q13u16 = vsubl_u8(d2u8, d6u8); - q14u16 = vsubl_u8(d3u8, d7u8); + q12u16 = vsubl_u8(d2u8, d6u8); - d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16)); - d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16)); - d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16)); - d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16)); + d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); + d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16)); + q9s32 = vmlal_s16(q9s32, d22s16, d22s16); + q10s32 = vmlal_s16(q10s32, d23s16, d23s16); - q7s32 = vmull_s16(d22s16, d22s16); - q8s32 = vmull_s16(d24s16, d24s16); - q9s32 = vmull_s16(d26s16, d26s16); - q10s32 = vmull_s16(d28s16, d28s16); + d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); + d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); + q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16)); + q9s32 = vmlal_s16(q9s32, d24s16, d24s16); + q10s32 = vmlal_s16(q10s32, d25s16, d25s16); + } - q7s32 = vaddq_s32(q7s32, q8s32); - q9s32 = vaddq_s32(q9s32, q10s32); - q9s32 = vaddq_s32(q7s32, q9s32); + q10s32 = vaddq_s32(q10s32, q9s32); + q0s64 = vpaddlq_s32(q8s32); + q1s64 = vpaddlq_s32(q10s32); - q1s64 = vpaddlq_s32(q9s32); - d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); + d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64)); + d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); - return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0); + q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64)); + vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0); + + d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7); + d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32); + + return vget_lane_u32(d0u32, 0); +} + +unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride, + const unsigned char *ref_ptr, int recon_stride, + unsigned int *sse) { + int i; + int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16; + int64x1_t d0s64; + uint8x16_t q0u8, q1u8, q2u8, q3u8; + int32x4_t q7s32, q8s32, q9s32, q10s32; + uint16x8_t q11u16, q12u16, q13u16, q14u16; + int64x2_t q1s64; + + q7s32 = vdupq_n_s32(0); + q8s32 = vdupq_n_s32(0); + q9s32 = vdupq_n_s32(0); + q10s32 = vdupq_n_s32(0); + + for (i = 0; i < 8; i++) { // mse16x16_neon_loop + q0u8 = vld1q_u8(src_ptr); + src_ptr += source_stride; + q1u8 = vld1q_u8(src_ptr); + src_ptr += source_stride; + q2u8 = vld1q_u8(ref_ptr); + ref_ptr += recon_stride; + q3u8 = vld1q_u8(ref_ptr); + ref_ptr += recon_stride; + + q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8)); + q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8)); + q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8)); + q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8)); + + d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16)); + d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16)); + q7s32 = vmlal_s16(q7s32, d22s16, d22s16); + q8s32 = vmlal_s16(q8s32, d23s16, d23s16); + + d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16)); + d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16)); + q9s32 = vmlal_s16(q9s32, d24s16, d24s16); + q10s32 = vmlal_s16(q10s32, d25s16, d25s16); + + d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); + d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); + q7s32 = vmlal_s16(q7s32, d26s16, d26s16); + q8s32 = vmlal_s16(q8s32, d27s16, d27s16); + + d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16)); + d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16)); + q9s32 = vmlal_s16(q9s32, d28s16, d28s16); + q10s32 = vmlal_s16(q10s32, d29s16, d29s16); + } + + q7s32 = vaddq_s32(q7s32, q8s32); + q9s32 = vaddq_s32(q9s32, q10s32); + q10s32 = vaddq_s32(q7s32, q9s32); + + q1s64 = vpaddlq_s32(q10s32); + d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); + + vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0); + return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0); +} + +unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, + int source_stride, + const unsigned char *ref_ptr, + int recon_stride) { + int16x4_t d22s16, d24s16, d26s16, d28s16; + int64x1_t d0s64; + uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; + int32x4_t q7s32, q8s32, q9s32, q10s32; + uint16x8_t q11u16, q12u16, q13u16, q14u16; + int64x2_t q1s64; + + d0u8 = vld1_u8(src_ptr); + src_ptr += source_stride; + d4u8 = vld1_u8(ref_ptr); + ref_ptr += recon_stride; + d1u8 = vld1_u8(src_ptr); + src_ptr += source_stride; + d5u8 = vld1_u8(ref_ptr); + ref_ptr += recon_stride; + d2u8 = vld1_u8(src_ptr); + src_ptr += source_stride; + d6u8 = vld1_u8(ref_ptr); + ref_ptr += recon_stride; + d3u8 = vld1_u8(src_ptr); + src_ptr += source_stride; + d7u8 = vld1_u8(ref_ptr); + ref_ptr += recon_stride; + + q11u16 = vsubl_u8(d0u8, d4u8); + q12u16 = vsubl_u8(d1u8, d5u8); + q13u16 = vsubl_u8(d2u8, d6u8); + q14u16 = vsubl_u8(d3u8, d7u8); + + d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16)); + d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16)); + d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16)); + d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16)); + + q7s32 = vmull_s16(d22s16, d22s16); + q8s32 = vmull_s16(d24s16, d24s16); + q9s32 = vmull_s16(d26s16, d26s16); + q10s32 = vmull_s16(d28s16, d28s16); + + q7s32 = vaddq_s32(q7s32, q8s32); + q9s32 = vaddq_s32(q9s32, q10s32); + q9s32 = vaddq_s32(q7s32, q9s32); + + q1s64 = vpaddlq_s32(q9s32); + d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64)); + + return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0); } diff --git a/libs/libvpx/vpx_dsp/arm/vpx_convolve8_avg_neon.c b/libs/libvpx/vpx_dsp/arm/vpx_convolve8_avg_neon.c index 8632250138..69cb284005 100644 --- a/libs/libvpx/vpx_dsp/arm/vpx_convolve8_avg_neon.c +++ b/libs/libvpx/vpx_dsp/arm/vpx_convolve8_avg_neon.c @@ -16,16 +16,11 @@ #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" -static INLINE int32x4_t MULTIPLY_BY_Q0( - int16x4_t dsrc0, - int16x4_t dsrc1, - int16x4_t dsrc2, - int16x4_t dsrc3, - int16x4_t dsrc4, - int16x4_t dsrc5, - int16x4_t dsrc6, - int16x4_t dsrc7, - int16x8_t q0s16) { +static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, + int16x4_t dsrc2, int16x4_t dsrc3, + int16x4_t dsrc4, int16x4_t dsrc5, + int16x4_t dsrc6, int16x4_t dsrc7, + int16x8_t q0s16) { int32x4_t qdst; int16x4_t d0s16, d1s16; @@ -43,17 +38,12 @@ static INLINE int32x4_t MULTIPLY_BY_Q0( return qdst; } -void vpx_convolve8_avg_horiz_neon( - const uint8_t *src, - ptrdiff_t src_stride, - uint8_t *dst, - ptrdiff_t dst_stride, - const int16_t *filter_x, - int x_step_q4, - const int16_t *filter_y, // unused - int y_step_q4, // unused - int w, - int h) { +void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const int16_t *filter_x, int x_step_q4, + const int16_t *filter_y, // unused + int y_step_q4, // unused + int w, int h) { int width; const uint8_t *s; uint8_t *d; @@ -76,7 +66,7 @@ void vpx_convolve8_avg_horiz_neon( q0s16 = vld1q_s16(filter_x); - src -= 3; // adjust for taps + src -= 3; // adjust for taps for (; h > 0; h -= 4) { // loop_horiz_v s = src; d24u8 = vld1_u8(s); @@ -90,8 +80,8 @@ void vpx_convolve8_avg_horiz_neon( q12u8 = vcombine_u8(d24u8, d25u8); q13u8 = vcombine_u8(d26u8, d27u8); - q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8), - vreinterpretq_u16_u8(q13u8)); + q0x2u16 = + vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8)); d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0])); d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0])); d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1])); @@ -116,10 +106,8 @@ void vpx_convolve8_avg_horiz_neon( q9u16 = vcombine_u16(d17u16, d19u16); d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16)); - d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21 - for (width = w; - width > 0; - width -= 4, src += 4, dst += 4) { // loop_horiz + d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21 + for (width = w; width > 0; width -= 4, src += 4, dst += 4) { // loop_horiz s = src; d28u32 = vld1_dup_u32((const uint32_t *)s); s += src_stride; @@ -131,10 +119,10 @@ void vpx_convolve8_avg_horiz_neon( __builtin_prefetch(src + 64); - d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32), - vreinterpret_u16_u32(d31u32)); - d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32), - vreinterpret_u16_u32(d30u32)); + d0x2u16 = + vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32)); + d1x2u16 = + vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32)); d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]), // d28 vreinterpret_u8_u16(d1x2u16.val[0])); // d29 d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]), // d31 @@ -144,8 +132,8 @@ void vpx_convolve8_avg_horiz_neon( q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]); q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]); - q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8), - vreinterpretq_u32_u8(q15u8)); + q0x2u32 = + vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8)); d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0])); d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0])); @@ -173,14 +161,14 @@ void vpx_convolve8_avg_horiz_neon( d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); - q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, - d18s16, d19s16, d23s16, d24s16, q0s16); - q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, - d19s16, d23s16, d24s16, d26s16, q0s16); - q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, - d23s16, d24s16, d26s16, d27s16, q0s16); - q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, - d24s16, d26s16, d27s16, d25s16, q0s16); + q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16, + d23s16, d24s16, q0s16); + q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16, + d24s16, d26s16, q0s16); + q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16, + d26s16, d27s16, q0s16); + q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16, + d27s16, d25s16, q0s16); __builtin_prefetch(src + 64 + src_stride * 3); @@ -195,8 +183,7 @@ void vpx_convolve8_avg_horiz_neon( d2u8 = vqmovn_u16(q1u16); d3u8 = vqmovn_u16(q2u16); - d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), - vreinterpret_u16_u8(d3u8)); + d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8)); d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]), vreinterpret_u32_u16(d0x2u16.val[1])); d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]), @@ -231,17 +218,12 @@ void vpx_convolve8_avg_horiz_neon( return; } -void vpx_convolve8_avg_vert_neon( - const uint8_t *src, - ptrdiff_t src_stride, - uint8_t *dst, - ptrdiff_t dst_stride, - const int16_t *filter_x, // unused - int x_step_q4, // unused - const int16_t *filter_y, - int y_step_q4, - int w, - int h) { +void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const int16_t *filter_x, // unused + int x_step_q4, // unused + const int16_t *filter_y, int y_step_q4, int w, + int h) { int height; const uint8_t *s; uint8_t *d; @@ -277,8 +259,8 @@ void vpx_convolve8_avg_vert_neon( d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0); s += src_stride; - q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); - q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); + q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); + q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32)); q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32)); @@ -319,20 +301,20 @@ void vpx_convolve8_avg_vert_neon( __builtin_prefetch(s); __builtin_prefetch(s + src_stride); - q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, - d20s16, d21s16, d22s16, d24s16, q0s16); + q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, + d22s16, d24s16, q0s16); __builtin_prefetch(s + src_stride * 2); __builtin_prefetch(s + src_stride * 3); - q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, - d21s16, d22s16, d24s16, d26s16, q0s16); + q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, + d24s16, d26s16, q0s16); __builtin_prefetch(d); __builtin_prefetch(d + dst_stride); - q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, - d22s16, d24s16, d26s16, d27s16, q0s16); + q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16, + d26s16, d27s16, q0s16); __builtin_prefetch(d + dst_stride * 2); __builtin_prefetch(d + dst_stride * 3); - q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, - d24s16, d26s16, d27s16, d25s16, q0s16); + q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16, + d27s16, d25s16, q0s16); d2u16 = vqrshrun_n_s32(q1s32, 7); d3u16 = vqrshrun_n_s32(q2s32, 7); diff --git a/libs/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c b/libs/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c index 9bd715e2c6..514525696b 100644 --- a/libs/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c +++ b/libs/libvpx/vpx_dsp/arm/vpx_convolve8_neon.c @@ -16,16 +16,11 @@ #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" -static INLINE int32x4_t MULTIPLY_BY_Q0( - int16x4_t dsrc0, - int16x4_t dsrc1, - int16x4_t dsrc2, - int16x4_t dsrc3, - int16x4_t dsrc4, - int16x4_t dsrc5, - int16x4_t dsrc6, - int16x4_t dsrc7, - int16x8_t q0s16) { +static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, + int16x4_t dsrc2, int16x4_t dsrc3, + int16x4_t dsrc4, int16x4_t dsrc5, + int16x4_t dsrc6, int16x4_t dsrc7, + int16x8_t q0s16) { int32x4_t qdst; int16x4_t d0s16, d1s16; @@ -43,17 +38,12 @@ static INLINE int32x4_t MULTIPLY_BY_Q0( return qdst; } -void vpx_convolve8_horiz_neon( - const uint8_t *src, - ptrdiff_t src_stride, - uint8_t *dst, - ptrdiff_t dst_stride, - const int16_t *filter_x, - int x_step_q4, - const int16_t *filter_y, // unused - int y_step_q4, // unused - int w, - int h) { +void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const int16_t *filter_x, int x_step_q4, + const int16_t *filter_y, // unused + int y_step_q4, // unused + int w, int h) { int width; const uint8_t *s, *psrc; uint8_t *d, *pdst; @@ -77,9 +67,8 @@ void vpx_convolve8_horiz_neon( q0s16 = vld1q_s16(filter_x); src -= 3; // adjust for taps - for (; h > 0; h -= 4, - src += src_stride * 4, - dst += dst_stride * 4) { // loop_horiz_v + for (; h > 0; h -= 4, src += src_stride * 4, + dst += dst_stride * 4) { // loop_horiz_v s = src; d24u8 = vld1_u8(s); s += src_stride; @@ -92,8 +81,8 @@ void vpx_convolve8_horiz_neon( q12u8 = vcombine_u8(d24u8, d25u8); q13u8 = vcombine_u8(d26u8, d27u8); - q0x2u16 = vtrnq_u16(vreinterpretq_u16_u8(q12u8), - vreinterpretq_u16_u8(q13u8)); + q0x2u16 = + vtrnq_u16(vreinterpretq_u16_u8(q12u8), vreinterpretq_u16_u8(q13u8)); d24u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[0])); d25u8 = vreinterpret_u8_u16(vget_high_u16(q0x2u16.val[0])); d26u8 = vreinterpret_u8_u16(vget_low_u16(q0x2u16.val[1])); @@ -105,8 +94,8 @@ void vpx_convolve8_horiz_neon( __builtin_prefetch(src + src_stride * 5); __builtin_prefetch(src + src_stride * 6); - q8u16 = vmovl_u8(d0x2u8.val[0]); - q9u16 = vmovl_u8(d0x2u8.val[1]); + q8u16 = vmovl_u8(d0x2u8.val[0]); + q9u16 = vmovl_u8(d0x2u8.val[1]); q10u16 = vmovl_u8(d1x2u8.val[0]); q11u16 = vmovl_u8(d1x2u8.val[1]); @@ -119,8 +108,7 @@ void vpx_convolve8_horiz_neon( d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16)); d23s16 = vreinterpret_s16_u16(vget_high_u16(q10u16)); // vmov 23 21 - for (width = w, psrc = src + 7, pdst = dst; - width > 0; + for (width = w, psrc = src + 7, pdst = dst; width > 0; width -= 4, psrc += 4, pdst += 4) { // loop_horiz s = psrc; d28u32 = vld1_dup_u32((const uint32_t *)s); @@ -133,10 +121,10 @@ void vpx_convolve8_horiz_neon( __builtin_prefetch(psrc + 64); - d0x2u16 = vtrn_u16(vreinterpret_u16_u32(d28u32), - vreinterpret_u16_u32(d31u32)); - d1x2u16 = vtrn_u16(vreinterpret_u16_u32(d29u32), - vreinterpret_u16_u32(d30u32)); + d0x2u16 = + vtrn_u16(vreinterpret_u16_u32(d28u32), vreinterpret_u16_u32(d31u32)); + d1x2u16 = + vtrn_u16(vreinterpret_u16_u32(d29u32), vreinterpret_u16_u32(d30u32)); d0x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[0]), // d28 vreinterpret_u8_u16(d1x2u16.val[0])); // d29 d1x2u8 = vtrn_u8(vreinterpret_u8_u16(d0x2u16.val[1]), // d31 @@ -146,8 +134,8 @@ void vpx_convolve8_horiz_neon( q14u8 = vcombine_u8(d0x2u8.val[0], d0x2u8.val[1]); q15u8 = vcombine_u8(d1x2u8.val[1], d1x2u8.val[0]); - q0x2u32 = vtrnq_u32(vreinterpretq_u32_u8(q14u8), - vreinterpretq_u32_u8(q15u8)); + q0x2u32 = + vtrnq_u32(vreinterpretq_u32_u8(q14u8), vreinterpretq_u32_u8(q15u8)); d28u8 = vreinterpret_u8_u32(vget_low_u32(q0x2u32.val[0])); d29u8 = vreinterpret_u8_u32(vget_high_u32(q0x2u32.val[0])); @@ -166,14 +154,14 @@ void vpx_convolve8_horiz_neon( d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16)); d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16)); - q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, - d18s16, d19s16, d23s16, d24s16, q0s16); - q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, - d19s16, d23s16, d24s16, d26s16, q0s16); - q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, - d23s16, d24s16, d26s16, d27s16, q0s16); - q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, - d24s16, d26s16, d27s16, d25s16, q0s16); + q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d20s16, d22s16, d18s16, d19s16, + d23s16, d24s16, q0s16); + q2s32 = MULTIPLY_BY_Q0(d17s16, d20s16, d22s16, d18s16, d19s16, d23s16, + d24s16, d26s16, q0s16); + q14s32 = MULTIPLY_BY_Q0(d20s16, d22s16, d18s16, d19s16, d23s16, d24s16, + d26s16, d27s16, q0s16); + q15s32 = MULTIPLY_BY_Q0(d22s16, d18s16, d19s16, d23s16, d24s16, d26s16, + d27s16, d25s16, q0s16); __builtin_prefetch(psrc + 60 + src_stride * 3); @@ -188,8 +176,7 @@ void vpx_convolve8_horiz_neon( d2u8 = vqmovn_u16(q1u16); d3u8 = vqmovn_u16(q2u16); - d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), - vreinterpret_u16_u8(d3u8)); + d0x2u16 = vtrn_u16(vreinterpret_u16_u8(d2u8), vreinterpret_u16_u8(d3u8)); d0x2u32 = vtrn_u32(vreinterpret_u32_u16(d0x2u16.val[0]), vreinterpret_u32_u16(d0x2u16.val[1])); d0x2u8 = vtrn_u8(vreinterpret_u8_u32(d0x2u32.val[0]), @@ -217,17 +204,12 @@ void vpx_convolve8_horiz_neon( return; } -void vpx_convolve8_vert_neon( - const uint8_t *src, - ptrdiff_t src_stride, - uint8_t *dst, - ptrdiff_t dst_stride, - const int16_t *filter_x, // unused - int x_step_q4, // unused - const int16_t *filter_y, - int y_step_q4, - int w, - int h) { +void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, + uint8_t *dst, ptrdiff_t dst_stride, + const int16_t *filter_x, // unused + int x_step_q4, // unused + const int16_t *filter_y, int y_step_q4, int w, + int h) { int height; const uint8_t *s; uint8_t *d; @@ -261,8 +243,8 @@ void vpx_convolve8_vert_neon( d22u32 = vld1_lane_u32((const uint32_t *)s, d22u32, 0); s += src_stride; - q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); - q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); + q8u16 = vmovl_u8(vreinterpret_u8_u32(d16u32)); + q9u16 = vmovl_u8(vreinterpret_u8_u32(d18u32)); q10u16 = vmovl_u8(vreinterpret_u8_u32(d20u32)); q11u16 = vmovl_u8(vreinterpret_u8_u32(d22u32)); @@ -294,20 +276,20 @@ void vpx_convolve8_vert_neon( __builtin_prefetch(d); __builtin_prefetch(d + dst_stride); - q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, - d20s16, d21s16, d22s16, d24s16, q0s16); + q1s32 = MULTIPLY_BY_Q0(d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, + d22s16, d24s16, q0s16); __builtin_prefetch(d + dst_stride * 2); __builtin_prefetch(d + dst_stride * 3); - q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, - d21s16, d22s16, d24s16, d26s16, q0s16); + q2s32 = MULTIPLY_BY_Q0(d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, + d24s16, d26s16, q0s16); __builtin_prefetch(s); __builtin_prefetch(s + src_stride); - q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, - d22s16, d24s16, d26s16, d27s16, q0s16); + q14s32 = MULTIPLY_BY_Q0(d18s16, d19s16, d20s16, d21s16, d22s16, d24s16, + d26s16, d27s16, q0s16); __builtin_prefetch(s + src_stride * 2); __builtin_prefetch(s + src_stride * 3); - q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, - d24s16, d26s16, d27s16, d25s16, q0s16); + q15s32 = MULTIPLY_BY_Q0(d19s16, d20s16, d21s16, d22s16, d24s16, d26s16, + d27s16, d25s16, q0s16); d2u16 = vqrshrun_n_s32(q1s32, 7); d3u16 = vqrshrun_n_s32(q2s32, 7); diff --git a/libs/libvpx/vpx_dsp/arm/vpx_convolve_avg_neon.c b/libs/libvpx/vpx_dsp/arm/vpx_convolve_avg_neon.c index dc58a332f8..abc2511ea2 100644 --- a/libs/libvpx/vpx_dsp/arm/vpx_convolve_avg_neon.c +++ b/libs/libvpx/vpx_dsp/arm/vpx_convolve_avg_neon.c @@ -13,34 +13,32 @@ #include "./vpx_dsp_rtcd.h" #include "vpx/vpx_integer.h" -void vpx_convolve_avg_neon( - const uint8_t *src, // r0 - ptrdiff_t src_stride, // r1 - uint8_t *dst, // r2 - ptrdiff_t dst_stride, // r3 - const int16_t *filter_x, - int filter_x_stride, - const int16_t *filter_y, - int filter_y_stride, - int w, - int h) { +void vpx_convolve_avg_neon(const uint8_t *src, // r0 + ptrdiff_t src_stride, // r1 + uint8_t *dst, // r2 + ptrdiff_t dst_stride, // r3 + const int16_t *filter_x, int filter_x_stride, + const int16_t *filter_y, int filter_y_stride, int w, + int h) { uint8_t *d; uint8x8_t d0u8, d1u8, d2u8, d3u8; uint32x2_t d0u32, d2u32; uint8x16_t q0u8, q1u8, q2u8, q3u8, q8u8, q9u8, q10u8, q11u8; - (void)filter_x; (void)filter_x_stride; - (void)filter_y; (void)filter_y_stride; + (void)filter_x; + (void)filter_x_stride; + (void)filter_y; + (void)filter_y_stride; d = dst; if (w > 32) { // avg64 for (; h > 0; h -= 1) { - q0u8 = vld1q_u8(src); - q1u8 = vld1q_u8(src + 16); - q2u8 = vld1q_u8(src + 32); - q3u8 = vld1q_u8(src + 48); + q0u8 = vld1q_u8(src); + q1u8 = vld1q_u8(src + 16); + q2u8 = vld1q_u8(src + 32); + q3u8 = vld1q_u8(src + 48); src += src_stride; - q8u8 = vld1q_u8(d); - q9u8 = vld1q_u8(d + 16); + q8u8 = vld1q_u8(d); + q9u8 = vld1q_u8(d + 16); q10u8 = vld1q_u8(d + 32); q11u8 = vld1q_u8(d + 48); d += dst_stride; @@ -133,8 +131,7 @@ void vpx_convolve_avg_neon( d2u32 = vld1_lane_u32((const uint32_t *)d, d2u32, 1); d += dst_stride; - d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32), - vreinterpret_u8_u32(d2u32)); + d0u8 = vrhadd_u8(vreinterpret_u8_u32(d0u32), vreinterpret_u8_u32(d2u32)); d0u32 = vreinterpret_u32_u8(d0u8); vst1_lane_u32((uint32_t *)dst, d0u32, 0); diff --git a/libs/libvpx/vpx_dsp/arm/vpx_convolve_copy_neon.c b/libs/libvpx/vpx_dsp/arm/vpx_convolve_copy_neon.c index d8fb97a861..fec189e0e4 100644 --- a/libs/libvpx/vpx_dsp/arm/vpx_convolve_copy_neon.c +++ b/libs/libvpx/vpx_dsp/arm/vpx_convolve_copy_neon.c @@ -13,21 +13,19 @@ #include "./vpx_dsp_rtcd.h" #include "vpx/vpx_integer.h" -void vpx_convolve_copy_neon( - const uint8_t *src, // r0 - ptrdiff_t src_stride, // r1 - uint8_t *dst, // r2 - ptrdiff_t dst_stride, // r3 - const int16_t *filter_x, - int filter_x_stride, - const int16_t *filter_y, - int filter_y_stride, - int w, - int h) { +void vpx_convolve_copy_neon(const uint8_t *src, // r0 + ptrdiff_t src_stride, // r1 + uint8_t *dst, // r2 + ptrdiff_t dst_stride, // r3 + const int16_t *filter_x, int filter_x_stride, + const int16_t *filter_y, int filter_y_stride, int w, + int h) { uint8x8_t d0u8, d2u8; uint8x16_t q0u8, q1u8, q2u8, q3u8; - (void)filter_x; (void)filter_x_stride; - (void)filter_y; (void)filter_y_stride; + (void)filter_x; + (void)filter_x_stride; + (void)filter_y; + (void)filter_y_stride; if (w > 32) { // copy64 for (; h > 0; h--) { diff --git a/libs/libvpx/vpx_dsp/arm/vpx_convolve_neon.c b/libs/libvpx/vpx_dsp/arm/vpx_convolve_neon.c index 1506ce6203..c2d5895b71 100644 --- a/libs/libvpx/vpx_dsp/arm/vpx_convolve_neon.c +++ b/libs/libvpx/vpx_dsp/arm/vpx_convolve_neon.c @@ -14,10 +14,9 @@ #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_ports/mem.h" -void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { /* Given our constraints: w <= 64, h <= 64, taps == 8 we can reduce the * maximum buffer size to 64 * 64 + 7 (+ 1 to make it divisible by 4). @@ -35,23 +34,20 @@ void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, * the temp buffer which has lots of extra room and is subsequently discarded * this is safe if somewhat less than ideal. */ - vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, - temp, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, intermediate_height); + vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, + x_step_q4, filter_y, y_step_q4, w, + intermediate_height); /* Step into the temp buffer 3 lines to get the actual frame data */ - vpx_convolve8_vert_neon(temp + 64 * 3, 64, - dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { DECLARE_ALIGNED(8, uint8_t, temp[64 * 72]); int intermediate_height = h + 7; @@ -61,12 +57,9 @@ void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, /* This implementation has the same issues as above. In addition, we only want * to average the values after both passes. */ - vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, - temp, 64, - filter_x, x_step_q4, filter_y, y_step_q4, - w, intermediate_height); - vpx_convolve8_avg_vert_neon(temp + 64 * 3, - 64, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, + x_step_q4, filter_y, y_step_q4, w, + intermediate_height); + vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } diff --git a/libs/libvpx/vpx_dsp/avg.c b/libs/libvpx/vpx_dsp/avg.c index 26fe7859a5..4d9abb8de3 100644 --- a/libs/libvpx/vpx_dsp/avg.c +++ b/libs/libvpx/vpx_dsp/avg.c @@ -15,8 +15,9 @@ unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) { int i, j; int sum = 0; - for (i = 0; i < 8; ++i, s+=p) - for (j = 0; j < 8; sum += s[j], ++j) {} + for (i = 0; i < 8; ++i, s += p) + for (j = 0; j < 8; sum += s[j], ++j) { + } return (sum + 32) >> 6; } @@ -24,8 +25,9 @@ unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) { unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) { int i, j; int sum = 0; - for (i = 0; i < 4; ++i, s+=p) - for (j = 0; j < 4; sum += s[j], ++j) {} + for (i = 0; i < 4; ++i, s += p) + for (j = 0; j < 4; sum += s[j], ++j) { + } return (sum + 8) >> 4; } @@ -62,7 +64,9 @@ static void hadamard_col8(const int16_t *src_diff, int src_stride, coeff[5] = c3 - c7; } -void vpx_hadamard_8x8_c(int16_t const *src_diff, int src_stride, +// The order of the output coeff of the hadamard is not important. For +// optimization purposes the final transpose may be skipped. +void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride, int16_t *coeff) { int idx; int16_t buffer[64]; @@ -78,20 +82,20 @@ void vpx_hadamard_8x8_c(int16_t const *src_diff, int src_stride, for (idx = 0; idx < 8; ++idx) { hadamard_col8(tmp_buf, 8, coeff); // tmp_buf: 12 bit // dynamic range [-2040, 2040] - coeff += 8; // coeff: 15 bit - // dynamic range [-16320, 16320] + coeff += 8; // coeff: 15 bit + // dynamic range [-16320, 16320] ++tmp_buf; } } // In place 16x16 2D Hadamard transform -void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride, +void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride, int16_t *coeff) { int idx; for (idx = 0; idx < 4; ++idx) { // src_diff: 9 bit, dynamic range [-255, 255] - int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride - + (idx & 0x01) * 8; + const int16_t *src_ptr = + src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64); } @@ -107,8 +111,8 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride, int16_t b2 = (a2 + a3) >> 1; // [-16320, 16320] int16_t b3 = (a2 - a3) >> 1; - coeff[0] = b0 + b2; // 16 bit, [-32640, 32640] - coeff[64] = b1 + b3; + coeff[0] = b0 + b2; // 16 bit, [-32640, 32640] + coeff[64] = b1 + b3; coeff[128] = b0 - b2; coeff[192] = b1 - b3; @@ -121,8 +125,7 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride, int vpx_satd_c(const int16_t *coeff, int length) { int i; int satd = 0; - for (i = 0; i < length; ++i) - satd += abs(coeff[i]); + for (i = 0; i < length; ++i) satd += abs(coeff[i]); // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024] return satd; @@ -130,7 +133,7 @@ int vpx_satd_c(const int16_t *coeff, int length) { // Integer projection onto row vectors. // height: value range {16, 32, 64}. -void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, +void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref, const int ref_stride, const int height) { int idx; const int norm_factor = height >> 1; @@ -138,8 +141,7 @@ void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, int i; hbuf[idx] = 0; // hbuf[idx]: 14 bit, dynamic range [0, 16320]. - for (i = 0; i < height; ++i) - hbuf[idx] += ref[i * ref_stride]; + for (i = 0; i < height; ++i) hbuf[idx] += ref[i * ref_stride]; // hbuf[idx]: 9 bit, dynamic range [0, 510]. hbuf[idx] /= norm_factor; ++ref; @@ -147,20 +149,18 @@ void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, } // width: value range {16, 32, 64}. -int16_t vpx_int_pro_col_c(uint8_t const *ref, const int width) { +int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) { int idx; int16_t sum = 0; // sum: 14 bit, dynamic range [0, 16320] - for (idx = 0; idx < width; ++idx) - sum += ref[idx]; + for (idx = 0; idx < width; ++idx) sum += ref[idx]; return sum; } // ref: [0 - 510] // src: [0 - 510] // bwl: {2, 3, 4} -int vpx_vector_var_c(int16_t const *ref, int16_t const *src, - const int bwl) { +int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) { int i; int width = 4 << bwl; int sse = 0, mean = 0, var; @@ -183,7 +183,7 @@ void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, *max = 0; for (i = 0; i < 8; ++i, s += p, d += dp) { for (j = 0; j < 8; ++j) { - int diff = abs(s[j]-d[j]); + int diff = abs(s[j] - d[j]); *min = diff < *min ? diff : *min; *max = diff > *max ? diff : *max; } @@ -194,9 +194,10 @@ void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) { int i, j; int sum = 0; - const uint16_t* s = CONVERT_TO_SHORTPTR(s8); - for (i = 0; i < 8; ++i, s+=p) - for (j = 0; j < 8; sum += s[j], ++j) {} + const uint16_t *s = CONVERT_TO_SHORTPTR(s8); + for (i = 0; i < 8; ++i, s += p) + for (j = 0; j < 8; sum += s[j], ++j) { + } return (sum + 32) >> 6; } @@ -204,9 +205,10 @@ unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) { unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) { int i, j; int sum = 0; - const uint16_t* s = CONVERT_TO_SHORTPTR(s8); - for (i = 0; i < 4; ++i, s+=p) - for (j = 0; j < 4; sum += s[j], ++j) {} + const uint16_t *s = CONVERT_TO_SHORTPTR(s8); + for (i = 0; i < 4; ++i, s += p) + for (j = 0; j < 4; sum += s[j], ++j) { + } return (sum + 8) >> 4; } @@ -214,18 +216,16 @@ unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) { void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8, int dp, int *min, int *max) { int i, j; - const uint16_t* s = CONVERT_TO_SHORTPTR(s8); - const uint16_t* d = CONVERT_TO_SHORTPTR(d8); + const uint16_t *s = CONVERT_TO_SHORTPTR(s8); + const uint16_t *d = CONVERT_TO_SHORTPTR(d8); *min = 255; *max = 0; for (i = 0; i < 8; ++i, s += p, d += dp) { for (j = 0; j < 8; ++j) { - int diff = abs(s[j]-d[j]); + int diff = abs(s[j] - d[j]); *min = diff < *min ? diff : *min; *max = diff > *max ? diff : *max; } } } #endif // CONFIG_VP9_HIGHBITDEPTH - - diff --git a/libs/libvpx/vpx_dsp/bitreader.c b/libs/libvpx/vpx_dsp/bitreader.c index 6ad806ac3f..90cbbba53f 100644 --- a/libs/libvpx/vpx_dsp/bitreader.c +++ b/libs/libvpx/vpx_dsp/bitreader.c @@ -18,11 +18,8 @@ #include "vpx_mem/vpx_mem.h" #include "vpx_util/endian_inl.h" -int vpx_reader_init(vpx_reader *r, - const uint8_t *buffer, - size_t size, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state) { +int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size, + vpx_decrypt_cb decrypt_cb, void *decrypt_state) { if (size && !buffer) { return 1; } else { @@ -55,21 +52,21 @@ void vpx_reader_fill(vpx_reader *r) { buffer_start = r->clear_buffer; } if (bits_left > BD_VALUE_SIZE) { - const int bits = (shift & 0xfffffff8) + CHAR_BIT; - BD_VALUE nv; - BD_VALUE big_endian_values; - memcpy(&big_endian_values, buffer, sizeof(BD_VALUE)); + const int bits = (shift & 0xfffffff8) + CHAR_BIT; + BD_VALUE nv; + BD_VALUE big_endian_values; + memcpy(&big_endian_values, buffer, sizeof(BD_VALUE)); #if SIZE_MAX == 0xffffffffffffffffULL - big_endian_values = HToBE64(big_endian_values); + big_endian_values = HToBE64(big_endian_values); #else - big_endian_values = HToBE32(big_endian_values); + big_endian_values = HToBE32(big_endian_values); #endif - nv = big_endian_values >> (BD_VALUE_SIZE - bits); - count += bits; - buffer += (bits >> 3); - value = r->value | (nv << (shift & 0x7)); + nv = big_endian_values >> (BD_VALUE_SIZE - bits); + count += bits; + buffer += (bits >> 3); + value = r->value | (nv << (shift & 0x7)); } else { - const int bits_over = (int)(shift + CHAR_BIT - bits_left); + const int bits_over = (int)(shift + CHAR_BIT - (int)bits_left); int loop_end = 0; if (bits_over >= 0) { count += LOTS_OF_BITS; diff --git a/libs/libvpx/vpx_dsp/bitreader.h b/libs/libvpx/vpx_dsp/bitreader.h index 9a441b4107..6ee2a58632 100644 --- a/libs/libvpx/vpx_dsp/bitreader.h +++ b/libs/libvpx/vpx_dsp/bitreader.h @@ -45,11 +45,8 @@ typedef struct { uint8_t clear_buffer[sizeof(BD_VALUE) + 1]; } vpx_reader; -int vpx_reader_init(vpx_reader *r, - const uint8_t *buffer, - size_t size, - vpx_decrypt_cb decrypt_cb, - void *decrypt_state); +int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size, + vpx_decrypt_cb decrypt_cb, void *decrypt_state); void vpx_reader_fill(vpx_reader *r); @@ -81,8 +78,7 @@ static INLINE int vpx_read(vpx_reader *r, int prob) { unsigned int range; unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT; - if (r->count < 0) - vpx_reader_fill(r); + if (r->count < 0) vpx_reader_fill(r); value = r->value; count = r->count; @@ -117,8 +113,7 @@ static INLINE int vpx_read_bit(vpx_reader *r) { static INLINE int vpx_read_literal(vpx_reader *r, int bits) { int literal = 0, bit; - for (bit = bits - 1; bit >= 0; bit--) - literal |= vpx_read_bit(r) << bit; + for (bit = bits - 1; bit >= 0; bit--) literal |= vpx_read_bit(r) << bit; return literal; } @@ -127,8 +122,7 @@ static INLINE int vpx_read_tree(vpx_reader *r, const vpx_tree_index *tree, const vpx_prob *probs) { vpx_tree_index i = 0; - while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) - continue; + while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) continue; return -i; } diff --git a/libs/libvpx/vpx_dsp/bitreader_buffer.c b/libs/libvpx/vpx_dsp/bitreader_buffer.c index d7b55cf9f4..e99fffb605 100644 --- a/libs/libvpx/vpx_dsp/bitreader_buffer.c +++ b/libs/libvpx/vpx_dsp/bitreader_buffer.c @@ -30,23 +30,20 @@ int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) { int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits) { int value = 0, bit; - for (bit = bits - 1; bit >= 0; bit--) - value |= vpx_rb_read_bit(rb) << bit; + for (bit = bits - 1; bit >= 0; bit--) value |= vpx_rb_read_bit(rb) << bit; return value; } -int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, - int bits) { +int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits) { const int value = vpx_rb_read_literal(rb, bits); return vpx_rb_read_bit(rb) ? -value : value; } -int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, - int bits) { +int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits) { #if CONFIG_MISC_FIXES const int nbits = sizeof(unsigned) * 8 - bits - 1; const unsigned value = (unsigned)vpx_rb_read_literal(rb, bits + 1) << nbits; - return ((int) value) >> nbits; + return ((int)value) >> nbits; #else return vpx_rb_read_signed_literal(rb, bits); #endif diff --git a/libs/libvpx/vpx_dsp/bitwriter.c b/libs/libvpx/vpx_dsp/bitwriter.c index 5b232e346e..81e28b309f 100644 --- a/libs/libvpx/vpx_dsp/bitwriter.c +++ b/libs/libvpx/vpx_dsp/bitwriter.c @@ -14,21 +14,18 @@ void vpx_start_encode(vpx_writer *br, uint8_t *source) { br->lowvalue = 0; - br->range = 255; - br->count = -24; - br->buffer = source; - br->pos = 0; + br->range = 255; + br->count = -24; + br->buffer = source; + br->pos = 0; vpx_write_bit(br, 0); } void vpx_stop_encode(vpx_writer *br) { int i; - for (i = 0; i < 32; i++) - vpx_write_bit(br, 0); + for (i = 0; i < 32; i++) vpx_write_bit(br, 0); // Ensure there's no ambigous collision with any index marker bytes - if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) - br->buffer[br->pos++] = 0; + if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) br->buffer[br->pos++] = 0; } - diff --git a/libs/libvpx/vpx_dsp/bitwriter.h b/libs/libvpx/vpx_dsp/bitwriter.h index d904997af3..41040cf935 100644 --- a/libs/libvpx/vpx_dsp/bitwriter.h +++ b/libs/libvpx/vpx_dsp/bitwriter.h @@ -85,8 +85,7 @@ static INLINE void vpx_write_bit(vpx_writer *w, int bit) { static INLINE void vpx_write_literal(vpx_writer *w, int data, int bits) { int bit; - for (bit = bits - 1; bit >= 0; bit--) - vpx_write_bit(w, 1 & (data >> bit)); + for (bit = bits - 1; bit >= 0; bit--) vpx_write_bit(w, 1 & (data >> bit)); } #define vpx_write_prob(w, v) vpx_write_literal((w), (v), 8) diff --git a/libs/libvpx/vpx_dsp/bitwriter_buffer.c b/libs/libvpx/vpx_dsp/bitwriter_buffer.c index 6182a72221..1043cdc784 100644 --- a/libs/libvpx/vpx_dsp/bitwriter_buffer.c +++ b/libs/libvpx/vpx_dsp/bitwriter_buffer.c @@ -22,7 +22,7 @@ void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) { const int off = (int)wb->bit_offset; const int p = off / CHAR_BIT; const int q = CHAR_BIT - 1 - off % CHAR_BIT; - if (q == CHAR_BIT -1) { + if (q == CHAR_BIT - 1) { wb->bit_buffer[p] = bit << q; } else { wb->bit_buffer[p] &= ~(1 << q); @@ -33,12 +33,11 @@ void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) { void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits) { int bit; - for (bit = bits - 1; bit >= 0; bit--) - vpx_wb_write_bit(wb, (data >> bit) & 1); + for (bit = bits - 1; bit >= 0; bit--) vpx_wb_write_bit(wb, (data >> bit) & 1); } -void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, - int data, int bits) { +void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data, + int bits) { #if CONFIG_MISC_FIXES vpx_wb_write_literal(wb, data, bits + 1); #else diff --git a/libs/libvpx/vpx_dsp/deblock.c b/libs/libvpx/vpx_dsp/deblock.c new file mode 100644 index 0000000000..589b124e26 --- /dev/null +++ b/libs/libvpx/vpx_dsp/deblock.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ +#include +#include "vpx/vpx_integer.h" + +const int16_t vpx_rv[] = { + 8, 5, 2, 2, 8, 12, 4, 9, 8, 3, 0, 3, 9, 0, 0, 0, 8, 3, 14, + 4, 10, 1, 11, 14, 1, 14, 9, 6, 12, 11, 8, 6, 10, 0, 0, 8, 9, 0, + 3, 14, 8, 11, 13, 4, 2, 9, 0, 3, 9, 6, 1, 2, 3, 14, 13, 1, 8, + 2, 9, 7, 3, 3, 1, 13, 13, 6, 6, 5, 2, 7, 11, 9, 11, 8, 7, 3, + 2, 0, 13, 13, 14, 4, 12, 5, 12, 10, 8, 10, 13, 10, 4, 14, 4, 10, 0, + 8, 11, 1, 13, 7, 7, 14, 6, 14, 13, 2, 13, 5, 4, 4, 0, 10, 0, 5, + 13, 2, 12, 7, 11, 13, 8, 0, 4, 10, 7, 2, 7, 2, 2, 5, 3, 4, 7, + 3, 3, 14, 14, 5, 9, 13, 3, 14, 3, 6, 3, 0, 11, 8, 13, 1, 13, 1, + 12, 0, 10, 9, 7, 6, 2, 8, 5, 2, 13, 7, 1, 13, 14, 7, 6, 7, 9, + 6, 10, 11, 7, 8, 7, 5, 14, 8, 4, 4, 0, 8, 7, 10, 0, 8, 14, 11, + 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, 11, 12, 12, 8, 0, 11, 13, 1, 2, + 0, 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, 0, 3, 10, 5, 8, 0, 11, 6, + 7, 8, 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, 4, 3, 5, 6, 10, 8, 9, + 4, 11, 14, 0, 10, 0, 5, 13, 2, 12, 7, 11, 13, 8, 0, 4, 10, 7, 2, + 7, 2, 2, 5, 3, 4, 7, 3, 3, 14, 14, 5, 9, 13, 3, 14, 3, 6, 3, + 0, 11, 8, 13, 1, 13, 1, 12, 0, 10, 9, 7, 6, 2, 8, 5, 2, 13, 7, + 1, 13, 14, 7, 6, 7, 9, 6, 10, 11, 7, 8, 7, 5, 14, 8, 4, 4, 0, + 8, 7, 10, 0, 8, 14, 11, 3, 12, 5, 7, 14, 3, 14, 5, 2, 6, 11, 12, + 12, 8, 0, 11, 13, 1, 2, 0, 5, 10, 14, 7, 8, 0, 4, 11, 0, 8, 0, + 3, 10, 5, 8, 0, 11, 6, 7, 8, 10, 7, 13, 9, 2, 5, 1, 5, 10, 2, + 4, 3, 5, 6, 10, 8, 9, 4, 11, 14, 3, 8, 3, 7, 8, 5, 11, 4, 12, + 3, 11, 9, 14, 8, 14, 13, 4, 3, 1, 2, 14, 6, 5, 4, 4, 11, 4, 6, + 2, 1, 5, 8, 8, 12, 13, 5, 14, 10, 12, 13, 0, 9, 5, 5, 11, 10, 13, + 9, 10, 13, +}; + +void vpx_post_proc_down_and_across_mb_row_c(unsigned char *src_ptr, + unsigned char *dst_ptr, + int src_pixels_per_line, + int dst_pixels_per_line, int cols, + unsigned char *f, int size) { + unsigned char *p_src, *p_dst; + int row; + int col; + unsigned char v; + unsigned char d[4]; + + for (row = 0; row < size; row++) { + /* post_proc_down for one row */ + p_src = src_ptr; + p_dst = dst_ptr; + + for (col = 0; col < cols; col++) { + unsigned char p_above2 = p_src[col - 2 * src_pixels_per_line]; + unsigned char p_above1 = p_src[col - src_pixels_per_line]; + unsigned char p_below1 = p_src[col + src_pixels_per_line]; + unsigned char p_below2 = p_src[col + 2 * src_pixels_per_line]; + + v = p_src[col]; + + if ((abs(v - p_above2) < f[col]) && (abs(v - p_above1) < f[col]) && + (abs(v - p_below1) < f[col]) && (abs(v - p_below2) < f[col])) { + unsigned char k1, k2, k3; + k1 = (p_above2 + p_above1 + 1) >> 1; + k2 = (p_below2 + p_below1 + 1) >> 1; + k3 = (k1 + k2 + 1) >> 1; + v = (k3 + v + 1) >> 1; + } + + p_dst[col] = v; + } + + /* now post_proc_across */ + p_src = dst_ptr; + p_dst = dst_ptr; + + p_src[-2] = p_src[-1] = p_src[0]; + p_src[cols] = p_src[cols + 1] = p_src[cols - 1]; + + for (col = 0; col < cols; col++) { + v = p_src[col]; + + if ((abs(v - p_src[col - 2]) < f[col]) && + (abs(v - p_src[col - 1]) < f[col]) && + (abs(v - p_src[col + 1]) < f[col]) && + (abs(v - p_src[col + 2]) < f[col])) { + unsigned char k1, k2, k3; + k1 = (p_src[col - 2] + p_src[col - 1] + 1) >> 1; + k2 = (p_src[col + 2] + p_src[col + 1] + 1) >> 1; + k3 = (k1 + k2 + 1) >> 1; + v = (k3 + v + 1) >> 1; + } + + d[col & 3] = v; + + if (col >= 2) p_dst[col - 2] = d[(col - 2) & 3]; + } + + /* handle the last two pixels */ + p_dst[col - 2] = d[(col - 2) & 3]; + p_dst[col - 1] = d[(col - 1) & 3]; + + /* next row */ + src_ptr += src_pixels_per_line; + dst_ptr += dst_pixels_per_line; + } +} + +void vpx_mbpost_proc_across_ip_c(unsigned char *src, int pitch, int rows, + int cols, int flimit) { + int r, c, i; + + unsigned char *s = src; + unsigned char d[16]; + + for (r = 0; r < rows; r++) { + int sumsq = 0; + int sum = 0; + + for (i = -8; i < 0; i++) s[i] = s[0]; + + /* 17 avoids valgrind warning - we buffer values in c in d + * and only write them when we've read 8 ahead... + */ + for (i = 0; i < 17; i++) s[i + cols] = s[cols - 1]; + + for (i = -8; i <= 6; i++) { + sumsq += s[i] * s[i]; + sum += s[i]; + d[i + 8] = 0; + } + + for (c = 0; c < cols + 8; c++) { + int x = s[c + 7] - s[c - 8]; + int y = s[c + 7] + s[c - 8]; + + sum += x; + sumsq += x * y; + + d[c & 15] = s[c]; + + if (sumsq * 15 - sum * sum < flimit) { + d[c & 15] = (8 + sum + s[c]) >> 4; + } + + s[c - 8] = d[(c - 8) & 15]; + } + + s += pitch; + } +} + +void vpx_mbpost_proc_down_c(unsigned char *dst, int pitch, int rows, int cols, + int flimit) { + int r, c, i; + const int16_t *rv3 = &vpx_rv[63 & rand()]; + + for (c = 0; c < cols; c++) { + unsigned char *s = &dst[c]; + int sumsq = 0; + int sum = 0; + unsigned char d[16]; + const int16_t *rv2 = rv3 + ((c * 17) & 127); + + for (i = -8; i < 0; i++) s[i * pitch] = s[0]; + + /* 17 avoids valgrind warning - we buffer values in c in d + * and only write them when we've read 8 ahead... + */ + for (i = 0; i < 17; i++) s[(i + rows) * pitch] = s[(rows - 1) * pitch]; + + for (i = -8; i <= 6; i++) { + sumsq += s[i * pitch] * s[i * pitch]; + sum += s[i * pitch]; + } + + for (r = 0; r < rows + 8; r++) { + sumsq += s[7 * pitch] * s[7 * pitch] - s[-8 * pitch] * s[-8 * pitch]; + sum += s[7 * pitch] - s[-8 * pitch]; + d[r & 15] = s[0]; + + if (sumsq * 15 - sum * sum < flimit) { + d[r & 15] = (rv2[r & 127] + sum + s[0]) >> 4; + } + if (r >= 8) s[-8 * pitch] = d[(r - 8) & 15]; + s += pitch; + } + } +} diff --git a/libs/libvpx/vpx_dsp/fastssim.c b/libs/libvpx/vpx_dsp/fastssim.c index 1405a30e00..4d5eb5a6ff 100644 --- a/libs/libvpx/vpx_dsp/fastssim.c +++ b/libs/libvpx/vpx_dsp/fastssim.c @@ -10,6 +10,7 @@ * This code was originally written by: Nathan E. Egge, at the Daala * project. */ +#include #include #include #include @@ -17,19 +18,24 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/ssim.h" #include "vpx_ports/system_state.h" -/* TODO(jbb): High bit depth version of this code needed */ + typedef struct fs_level fs_level; typedef struct fs_ctx fs_ctx; #define SSIM_C1 (255 * 255 * 0.01 * 0.01) #define SSIM_C2 (255 * 255 * 0.03 * 0.03) - +#if CONFIG_VP9_HIGHBITDEPTH +#define SSIM_C1_10 (1023 * 1023 * 0.01 * 0.01) +#define SSIM_C1_12 (4095 * 4095 * 0.01 * 0.01) +#define SSIM_C2_10 (1023 * 1023 * 0.03 * 0.03) +#define SSIM_C2_12 (4095 * 4095 * 0.03 * 0.03) +#endif #define FS_MINI(_a, _b) ((_a) < (_b) ? (_a) : (_b)) #define FS_MAXI(_a, _b) ((_a) > (_b) ? (_a) : (_b)) struct fs_level { - uint16_t *im1; - uint16_t *im2; + uint32_t *im1; + uint32_t *im2; double *ssim; int w; int h; @@ -49,12 +55,12 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) { int l; lw = (_w + 1) >> 1; lh = (_h + 1) >> 1; - data_size = _nlevels * sizeof(fs_level) - + 2 * (lw + 8) * 8 * sizeof(*_ctx->col_buf); + data_size = + _nlevels * sizeof(fs_level) + 2 * (lw + 8) * 8 * sizeof(*_ctx->col_buf); for (l = 0; l < _nlevels; l++) { size_t im_size; size_t level_size; - im_size = lw * (size_t) lh; + im_size = lw * (size_t)lh; level_size = 2 * im_size * sizeof(*_ctx->level[l].im1); level_size += sizeof(*_ctx->level[l].ssim) - 1; level_size /= sizeof(*_ctx->level[l].ssim); @@ -64,8 +70,8 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) { lw = (lw + 1) >> 1; lh = (lh + 1) >> 1; } - data = (unsigned char *) malloc(data_size); - _ctx->level = (fs_level *) data; + data = (unsigned char *)malloc(data_size); + _ctx->level = (fs_level *)data; _ctx->nlevels = _nlevels; data += _nlevels * sizeof(*_ctx->level); lw = (_w + 1) >> 1; @@ -75,31 +81,29 @@ static void fs_ctx_init(fs_ctx *_ctx, int _w, int _h, int _nlevels) { size_t level_size; _ctx->level[l].w = lw; _ctx->level[l].h = lh; - im_size = lw * (size_t) lh; + im_size = lw * (size_t)lh; level_size = 2 * im_size * sizeof(*_ctx->level[l].im1); level_size += sizeof(*_ctx->level[l].ssim) - 1; level_size /= sizeof(*_ctx->level[l].ssim); level_size *= sizeof(*_ctx->level[l].ssim); - _ctx->level[l].im1 = (uint16_t *) data; + _ctx->level[l].im1 = (uint32_t *)data; _ctx->level[l].im2 = _ctx->level[l].im1 + im_size; data += level_size; - _ctx->level[l].ssim = (double *) data; + _ctx->level[l].ssim = (double *)data; data += im_size * sizeof(*_ctx->level[l].ssim); lw = (lw + 1) >> 1; lh = (lh + 1) >> 1; } - _ctx->col_buf = (unsigned *) data; + _ctx->col_buf = (unsigned *)data; } -static void fs_ctx_clear(fs_ctx *_ctx) { - free(_ctx->level); -} +static void fs_ctx_clear(fs_ctx *_ctx) { free(_ctx->level); } static void fs_downsample_level(fs_ctx *_ctx, int _l) { - const uint16_t *src1; - const uint16_t *src2; - uint16_t *dst1; - uint16_t *dst2; + const uint32_t *src1; + const uint32_t *src2; + uint32_t *dst1; + uint32_t *dst2; int w2; int h2; int w; @@ -124,19 +128,20 @@ static void fs_downsample_level(fs_ctx *_ctx, int _l) { int i1; i0 = 2 * i; i1 = FS_MINI(i0 + 1, w2); - dst1[j * w + i] = src1[j0offs + i0] + src1[j0offs + i1] - + src1[j1offs + i0] + src1[j1offs + i1]; - dst2[j * w + i] = src2[j0offs + i0] + src2[j0offs + i1] - + src2[j1offs + i0] + src2[j1offs + i1]; + dst1[j * w + i] = src1[j0offs + i0] + src1[j0offs + i1] + + src1[j1offs + i0] + src1[j1offs + i1]; + dst2[j * w + i] = src2[j0offs + i0] + src2[j0offs + i1] + + src2[j1offs + i0] + src2[j1offs + i1]; } } } -static void fs_downsample_level0(fs_ctx *_ctx, const unsigned char *_src1, - int _s1ystride, const unsigned char *_src2, - int _s2ystride, int _w, int _h) { - uint16_t *dst1; - uint16_t *dst2; +static void fs_downsample_level0(fs_ctx *_ctx, const uint8_t *_src1, + int _s1ystride, const uint8_t *_src2, + int _s2ystride, int _w, int _h, uint32_t bd, + uint32_t shift) { + uint32_t *dst1; + uint32_t *dst2; int w; int h; int i; @@ -155,21 +160,34 @@ static void fs_downsample_level0(fs_ctx *_ctx, const unsigned char *_src1, int i1; i0 = 2 * i; i1 = FS_MINI(i0 + 1, _w); - dst1[j * w + i] = _src1[j0 * _s1ystride + i0] - + _src1[j0 * _s1ystride + i1] + _src1[j1 * _s1ystride + i0] - + _src1[j1 * _s1ystride + i1]; - dst2[j * w + i] = _src2[j0 * _s2ystride + i0] - + _src2[j0 * _s2ystride + i1] + _src2[j1 * _s2ystride + i0] - + _src2[j1 * _s2ystride + i1]; + if (bd == 8 && shift == 0) { + dst1[j * w + i] = + _src1[j0 * _s1ystride + i0] + _src1[j0 * _s1ystride + i1] + + _src1[j1 * _s1ystride + i0] + _src1[j1 * _s1ystride + i1]; + dst2[j * w + i] = + _src2[j0 * _s2ystride + i0] + _src2[j0 * _s2ystride + i1] + + _src2[j1 * _s2ystride + i0] + _src2[j1 * _s2ystride + i1]; + } else { + uint16_t *src1s = CONVERT_TO_SHORTPTR(_src1); + uint16_t *src2s = CONVERT_TO_SHORTPTR(_src2); + dst1[j * w + i] = (src1s[j0 * _s1ystride + i0] >> shift) + + (src1s[j0 * _s1ystride + i1] >> shift) + + (src1s[j1 * _s1ystride + i0] >> shift) + + (src1s[j1 * _s1ystride + i1] >> shift); + dst2[j * w + i] = (src2s[j0 * _s2ystride + i0] >> shift) + + (src2s[j0 * _s2ystride + i1] >> shift) + + (src2s[j1 * _s2ystride + i0] >> shift) + + (src2s[j1 * _s2ystride + i1] >> shift); + } } } } -static void fs_apply_luminance(fs_ctx *_ctx, int _l) { +static void fs_apply_luminance(fs_ctx *_ctx, int _l, int bit_depth) { unsigned *col_sums_x; unsigned *col_sums_y; - uint16_t *im1; - uint16_t *im2; + uint32_t *im1; + uint32_t *im2; double *ssim; double c1; int w; @@ -178,25 +196,28 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) { int j1offs; int i; int j; + double ssim_c1 = SSIM_C1; +#if CONFIG_VP9_HIGHBITDEPTH + if (bit_depth == 10) ssim_c1 = SSIM_C1_10; + if (bit_depth == 12) ssim_c1 = SSIM_C1_12; +#else + assert(bit_depth == 8); +#endif w = _ctx->level[_l].w; h = _ctx->level[_l].h; col_sums_x = _ctx->col_buf; col_sums_y = col_sums_x + w; im1 = _ctx->level[_l].im1; im2 = _ctx->level[_l].im2; - for (i = 0; i < w; i++) - col_sums_x[i] = 5 * im1[i]; - for (i = 0; i < w; i++) - col_sums_y[i] = 5 * im2[i]; + for (i = 0; i < w; i++) col_sums_x[i] = 5 * im1[i]; + for (i = 0; i < w; i++) col_sums_y[i] = 5 * im2[i]; for (j = 1; j < 4; j++) { j1offs = FS_MINI(j, h - 1) * w; - for (i = 0; i < w; i++) - col_sums_x[i] += im1[j1offs + i]; - for (i = 0; i < w; i++) - col_sums_y[i] += im2[j1offs + i]; + for (i = 0; i < w; i++) col_sums_x[i] += im1[j1offs + i]; + for (i = 0; i < w; i++) col_sums_y[i] += im2[j1offs + i]; } ssim = _ctx->level[_l].ssim; - c1 = (double) (SSIM_C1 * 4096 * (1 << 4 * _l)); + c1 = (double)(ssim_c1 * 4096 * (1 << 4 * _l)); for (j = 0; j < h; j++) { unsigned mux; unsigned muy; @@ -210,8 +231,8 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) { muy += col_sums_y[i1]; } for (i = 0; i < w; i++) { - ssim[j * w + i] *= (2 * mux * (double) muy + c1) - / (mux * (double) mux + muy * (double) muy + c1); + ssim[j * w + i] *= (2 * mux * (double)muy + c1) / + (mux * (double)mux + muy * (double)muy + c1); if (i + 1 < w) { i0 = FS_MAXI(0, i - 4); i1 = FS_MINI(i + 4, w - 1); @@ -221,82 +242,72 @@ static void fs_apply_luminance(fs_ctx *_ctx, int _l) { } if (j + 1 < h) { j0offs = FS_MAXI(0, j - 4) * w; - for (i = 0; i < w; i++) - col_sums_x[i] -= im1[j0offs + i]; - for (i = 0; i < w; i++) - col_sums_y[i] -= im2[j0offs + i]; + for (i = 0; i < w; i++) col_sums_x[i] -= im1[j0offs + i]; + for (i = 0; i < w; i++) col_sums_y[i] -= im2[j0offs + i]; j1offs = FS_MINI(j + 4, h - 1) * w; - for (i = 0; i < w; i++) - col_sums_x[i] += im1[j1offs + i]; - for (i = 0; i < w; i++) - col_sums_y[i] += im2[j1offs + i]; + for (i = 0; i < w; i++) col_sums_x[i] += im1[j1offs + i]; + for (i = 0; i < w; i++) col_sums_y[i] += im2[j1offs + i]; } } } -#define FS_COL_SET(_col, _joffs, _ioffs) \ - do { \ - unsigned gx; \ - unsigned gy; \ +#define FS_COL_SET(_col, _joffs, _ioffs) \ + do { \ + unsigned gx; \ + unsigned gy; \ gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ - col_sums_gx2[(_col)] = gx * (double)gx; \ - col_sums_gy2[(_col)] = gy * (double)gy; \ - col_sums_gxgy[(_col)] = gx * (double)gy; \ - } \ - while (0) + col_sums_gx2[(_col)] = gx * (double)gx; \ + col_sums_gy2[(_col)] = gy * (double)gy; \ + col_sums_gxgy[(_col)] = gx * (double)gy; \ + } while (0) -#define FS_COL_ADD(_col, _joffs, _ioffs) \ - do { \ - unsigned gx; \ - unsigned gy; \ +#define FS_COL_ADD(_col, _joffs, _ioffs) \ + do { \ + unsigned gx; \ + unsigned gy; \ gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ - col_sums_gx2[(_col)] += gx * (double)gx; \ - col_sums_gy2[(_col)] += gy * (double)gy; \ - col_sums_gxgy[(_col)] += gx * (double)gy; \ - } \ - while (0) + col_sums_gx2[(_col)] += gx * (double)gx; \ + col_sums_gy2[(_col)] += gy * (double)gy; \ + col_sums_gxgy[(_col)] += gx * (double)gy; \ + } while (0) -#define FS_COL_SUB(_col, _joffs, _ioffs) \ - do { \ - unsigned gx; \ - unsigned gy; \ +#define FS_COL_SUB(_col, _joffs, _ioffs) \ + do { \ + unsigned gx; \ + unsigned gy; \ gx = gx_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ gy = gy_buf[((j + (_joffs)) & 7) * stride + i + (_ioffs)]; \ - col_sums_gx2[(_col)] -= gx * (double)gx; \ - col_sums_gy2[(_col)] -= gy * (double)gy; \ - col_sums_gxgy[(_col)] -= gx * (double)gy; \ - } \ - while (0) + col_sums_gx2[(_col)] -= gx * (double)gx; \ + col_sums_gy2[(_col)] -= gy * (double)gy; \ + col_sums_gxgy[(_col)] -= gx * (double)gy; \ + } while (0) -#define FS_COL_COPY(_col1, _col2) \ - do { \ - col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)]; \ - col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)]; \ +#define FS_COL_COPY(_col1, _col2) \ + do { \ + col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)]; \ + col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)]; \ col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)]; \ - } \ - while (0) + } while (0) -#define FS_COL_HALVE(_col1, _col2) \ - do { \ - col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 0.5; \ - col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 0.5; \ +#define FS_COL_HALVE(_col1, _col2) \ + do { \ + col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 0.5; \ + col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 0.5; \ col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 0.5; \ - } \ - while (0) + } while (0) -#define FS_COL_DOUBLE(_col1, _col2) \ - do { \ - col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 2; \ - col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 2; \ +#define FS_COL_DOUBLE(_col1, _col2) \ + do { \ + col_sums_gx2[(_col1)] = col_sums_gx2[(_col2)] * 2; \ + col_sums_gy2[(_col1)] = col_sums_gy2[(_col2)] * 2; \ col_sums_gxgy[(_col1)] = col_sums_gxgy[(_col2)] * 2; \ - } \ - while (0) + } while (0) -static void fs_calc_structure(fs_ctx *_ctx, int _l) { - uint16_t *im1; - uint16_t *im2; +static void fs_calc_structure(fs_ctx *_ctx, int _l, int bit_depth) { + uint32_t *im1; + uint32_t *im2; unsigned *gx_buf; unsigned *gy_buf; double *ssim; @@ -309,6 +320,14 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) { int h; int i; int j; + double ssim_c2 = SSIM_C2; +#if CONFIG_VP9_HIGHBITDEPTH + if (bit_depth == 10) ssim_c2 = SSIM_C2_10; + if (bit_depth == 12) ssim_c2 = SSIM_C2_12; +#else + assert(bit_depth == 8); +#endif + w = _ctx->level[_l].w; h = _ctx->level[_l].h; im1 = _ctx->level[_l].im1; @@ -318,7 +337,7 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) { stride = w + 8; gy_buf = gx_buf + 8 * stride; memset(gx_buf, 0, 2 * 8 * stride * sizeof(*gx_buf)); - c2 = SSIM_C2 * (1 << 4 * _l) * 16 * 104; + c2 = ssim_c2 * (1 << 4 * _l) * 16 * 104; for (j = 0; j < h + 4; j++) { if (j < h - 1) { for (i = 0; i < w - 1; i++) { @@ -326,11 +345,11 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) { unsigned g2; unsigned gx; unsigned gy; - g1 = abs(im1[(j + 1) * w + i + 1] - im1[j * w + i]); - g2 = abs(im1[(j + 1) * w + i] - im1[j * w + i + 1]); + g1 = abs((int)im1[(j + 1) * w + i + 1] - (int)im1[j * w + i]); + g2 = abs((int)im1[(j + 1) * w + i] - (int)im1[j * w + i + 1]); gx = 4 * FS_MAXI(g1, g2) + FS_MINI(g1, g2); - g1 = abs(im2[(j + 1) * w + i + 1] - im2[j * w + i]); - g2 = abs(im2[(j + 1) * w + i] - im2[j * w + i + 1]); + g1 = abs((int)im2[(j + 1) * w + i + 1] - (int)im2[j * w + i]); + g2 = abs((int)im2[(j + 1) * w + i] - (int)im2[j * w + i + 1]); gy = 4 * FS_MAXI(g1, g2) + FS_MINI(g1, g2); gx_buf[(j & 7) * stride + i + 4] = gx; gy_buf[(j & 7) * stride + i + 4] = gy; @@ -359,14 +378,11 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) { double mugy2; double mugxgy; mugx2 = col_sums_gx2[0]; - for (k = 1; k < 8; k++) - mugx2 += col_sums_gx2[k]; + for (k = 1; k < 8; k++) mugx2 += col_sums_gx2[k]; mugy2 = col_sums_gy2[0]; - for (k = 1; k < 8; k++) - mugy2 += col_sums_gy2[k]; + for (k = 1; k < 8; k++) mugy2 += col_sums_gy2[k]; mugxgy = col_sums_gxgy[0]; - for (k = 1; k < 8; k++) - mugxgy += col_sums_gxgy[k]; + for (k = 1; k < 8; k++) mugxgy += col_sums_gxgy[k]; ssim[(j - 4) * w + i] = (2 * mugxgy + c2) / (mugx2 + mugy2 + c2); if (i + 1 < w) { FS_COL_SET(0, -1, 1); @@ -401,8 +417,9 @@ static void fs_calc_structure(fs_ctx *_ctx, int _l) { Matlab implementation: {0.0448, 0.2856, 0.2363, 0.1333}. We drop the finest scale and renormalize the rest to sum to 1.*/ -static const double FS_WEIGHTS[FS_NLEVELS] = {0.2989654541015625, - 0.3141326904296875, 0.2473602294921875, 0.1395416259765625}; +static const double FS_WEIGHTS[FS_NLEVELS] = { + 0.2989654541015625, 0.3141326904296875, 0.2473602294921875, 0.1395416259765625 +}; static double fs_average(fs_ctx *_ctx, int _l) { double *ssim; @@ -416,53 +433,58 @@ static double fs_average(fs_ctx *_ctx, int _l) { ssim = _ctx->level[_l].ssim; ret = 0; for (j = 0; j < h; j++) - for (i = 0; i < w; i++) - ret += ssim[j * w + i]; + for (i = 0; i < w; i++) ret += ssim[j * w + i]; return pow(ret / (w * h), FS_WEIGHTS[_l]); } -static double calc_ssim(const unsigned char *_src, int _systride, - const unsigned char *_dst, int _dystride, int _w, int _h) { +static double convert_ssim_db(double _ssim, double _weight) { + assert(_weight >= _ssim); + if ((_weight - _ssim) < 1e-10) return MAX_SSIM_DB; + return 10 * (log10(_weight) - log10(_weight - _ssim)); +} + +static double calc_ssim(const uint8_t *_src, int _systride, const uint8_t *_dst, + int _dystride, int _w, int _h, uint32_t _bd, + uint32_t _shift) { fs_ctx ctx; double ret; int l; ret = 1; fs_ctx_init(&ctx, _w, _h, FS_NLEVELS); - fs_downsample_level0(&ctx, _src, _systride, _dst, _dystride, _w, _h); + fs_downsample_level0(&ctx, _src, _systride, _dst, _dystride, _w, _h, _bd, + _shift); for (l = 0; l < FS_NLEVELS - 1; l++) { - fs_calc_structure(&ctx, l); + fs_calc_structure(&ctx, l, _bd); ret *= fs_average(&ctx, l); fs_downsample_level(&ctx, l + 1); } - fs_calc_structure(&ctx, l); - fs_apply_luminance(&ctx, l); + fs_calc_structure(&ctx, l, _bd); + fs_apply_luminance(&ctx, l, _bd); ret *= fs_average(&ctx, l); fs_ctx_clear(&ctx); return ret; } -static double convert_ssim_db(double _ssim, double _weight) { - return 10 * (log10(_weight) - log10(_weight - _ssim)); -} - double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, double *ssim_u, double *ssim_v) { + const YV12_BUFFER_CONFIG *dest, double *ssim_y, + double *ssim_u, double *ssim_v, uint32_t bd, + uint32_t in_bd) { double ssimv; + uint32_t bd_shift = 0; vpx_clear_system_state(); + assert(bd >= in_bd); + bd_shift = bd - in_bd; *ssim_y = calc_ssim(source->y_buffer, source->y_stride, dest->y_buffer, dest->y_stride, source->y_crop_width, - source->y_crop_height); - + source->y_crop_height, in_bd, bd_shift); *ssim_u = calc_ssim(source->u_buffer, source->uv_stride, dest->u_buffer, dest->uv_stride, source->uv_crop_width, - source->uv_crop_height); - + source->uv_crop_height, in_bd, bd_shift); *ssim_v = calc_ssim(source->v_buffer, source->uv_stride, dest->v_buffer, dest->uv_stride, source->uv_crop_width, - source->uv_crop_height); - ssimv = (*ssim_y) * .8 + .1 * ((*ssim_u) + (*ssim_v)); + source->uv_crop_height, in_bd, bd_shift); + ssimv = (*ssim_y) * .8 + .1 * ((*ssim_u) + (*ssim_v)); return convert_ssim_db(ssimv, 1.0); } diff --git a/libs/libvpx/vpx_dsp/fwd_txfm.c b/libs/libvpx/vpx_dsp/fwd_txfm.c index 7baaa8b0d0..4e7d4053ea 100644 --- a/libs/libvpx/vpx_dsp/fwd_txfm.c +++ b/libs/libvpx/vpx_dsp/fwd_txfm.c @@ -8,6 +8,7 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/fwd_txfm.h" void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { @@ -71,8 +72,7 @@ void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { { int i, j; for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - output[j + i * 4] = (output[j + i * 4] + 1) >> 2; + for (j = 0; j < 4; ++j) output[j + i * 4] = (output[j + i * 4] + 1) >> 2; } } } @@ -81,11 +81,9 @@ void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 4; ++r) - for (c = 0; c < 4; ++c) - sum += input[r * stride + c]; + for (c = 0; c < 4; ++c) sum += input[r * stride + c]; output[0] = sum << 1; - output[1] = 0; } void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { @@ -133,8 +131,8 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { x3 = s0 - s3; t0 = (x0 + x1) * cospi_16_64; t1 = (x0 - x1) * cospi_16_64; - t2 = x2 * cospi_24_64 + x3 * cospi_8_64; - t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; + t2 = x2 * cospi_24_64 + x3 * cospi_8_64; + t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; output[0] = (tran_low_t)fdct_round_shift(t0); output[2] = (tran_low_t)fdct_round_shift(t2); output[4] = (tran_low_t)fdct_round_shift(t1); @@ -153,24 +151,23 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { x3 = s7 + t3; // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; + t0 = x0 * cospi_28_64 + x3 * cospi_4_64; + t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; + t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; output[1] = (tran_low_t)fdct_round_shift(t0); output[3] = (tran_low_t)fdct_round_shift(t2); output[5] = (tran_low_t)fdct_round_shift(t1); output[7] = (tran_low_t)fdct_round_shift(t3); output += 8; } - in = intermediate; + in = intermediate; output = final_output; } // Rows for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - final_output[j + i * 8] /= 2; + for (j = 0; j < 8; ++j) final_output[j + i * 8] /= 2; } } @@ -178,11 +175,9 @@ void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 8; ++r) - for (c = 0; c < 8; ++c) - sum += input[r * stride + c]; + for (c = 0; c < 8; ++c) sum += input[r * stride + c]; output[0] = sum; - output[1] = 0; } void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { @@ -215,11 +210,11 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { input[3] = (in_pass0[3 * stride] + in_pass0[12 * stride]) * 4; input[4] = (in_pass0[4 * stride] + in_pass0[11 * stride]) * 4; input[5] = (in_pass0[5 * stride] + in_pass0[10 * stride]) * 4; - input[6] = (in_pass0[6 * stride] + in_pass0[ 9 * stride]) * 4; - input[7] = (in_pass0[7 * stride] + in_pass0[ 8 * stride]) * 4; + input[6] = (in_pass0[6 * stride] + in_pass0[9 * stride]) * 4; + input[7] = (in_pass0[7 * stride] + in_pass0[8 * stride]) * 4; // Calculate input for the next 8 results. - step1[0] = (in_pass0[7 * stride] - in_pass0[ 8 * stride]) * 4; - step1[1] = (in_pass0[6 * stride] - in_pass0[ 9 * stride]) * 4; + step1[0] = (in_pass0[7 * stride] - in_pass0[8 * stride]) * 4; + step1[1] = (in_pass0[6 * stride] - in_pass0[9 * stride]) * 4; step1[2] = (in_pass0[5 * stride] - in_pass0[10 * stride]) * 4; step1[3] = (in_pass0[4 * stride] - in_pass0[11 * stride]) * 4; step1[4] = (in_pass0[3 * stride] - in_pass0[12 * stride]) * 4; @@ -234,11 +229,11 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2); input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2); input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2); - input[6] = ((in[6 * 16] + 1) >> 2) + ((in[ 9 * 16] + 1) >> 2); - input[7] = ((in[7 * 16] + 1) >> 2) + ((in[ 8 * 16] + 1) >> 2); + input[6] = ((in[6 * 16] + 1) >> 2) + ((in[9 * 16] + 1) >> 2); + input[7] = ((in[7 * 16] + 1) >> 2) + ((in[8 * 16] + 1) >> 2); // Calculate input for the next 8 results. - step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[ 8 * 16] + 1) >> 2); - step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[ 9 * 16] + 1) >> 2); + step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[8 * 16] + 1) >> 2); + step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[9 * 16] + 1) >> 2); step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2); step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2); step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2); @@ -269,7 +264,7 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { x3 = s0 - s3; t0 = (x0 + x1) * cospi_16_64; t1 = (x0 - x1) * cospi_16_64; - t2 = x3 * cospi_8_64 + x2 * cospi_24_64; + t2 = x3 * cospi_8_64 + x2 * cospi_24_64; t3 = x3 * cospi_24_64 - x2 * cospi_8_64; out[0] = (tran_low_t)fdct_round_shift(t0); out[4] = (tran_low_t)fdct_round_shift(t2); @@ -289,10 +284,10 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { x3 = s7 + t3; // Stage 4 - t0 = x0 * cospi_28_64 + x3 * cospi_4_64; - t1 = x1 * cospi_12_64 + x2 * cospi_20_64; + t0 = x0 * cospi_28_64 + x3 * cospi_4_64; + t1 = x1 * cospi_12_64 + x2 * cospi_20_64; t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; - t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; + t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; out[2] = (tran_low_t)fdct_round_shift(t0); out[6] = (tran_low_t)fdct_round_shift(t2); out[10] = (tran_low_t)fdct_round_shift(t1); @@ -319,12 +314,12 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { step3[6] = step1[6] + step2[5]; step3[7] = step1[7] + step2[4]; // step 4 - temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; - temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; + temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; + temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; step2[1] = fdct_round_shift(temp1); step2[2] = fdct_round_shift(temp2); temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; - temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; + temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; step2[5] = fdct_round_shift(temp1); step2[6] = fdct_round_shift(temp2); // step 5 @@ -337,20 +332,20 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { step1[6] = step3[7] - step2[6]; step1[7] = step3[7] + step2[6]; // step 6 - temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; + temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64; temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64; out[1] = (tran_low_t)fdct_round_shift(temp1); out[9] = (tran_low_t)fdct_round_shift(temp2); temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64; - temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; + temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64; out[5] = (tran_low_t)fdct_round_shift(temp1); out[13] = (tran_low_t)fdct_round_shift(temp2); - temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; + temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64; temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64; out[3] = (tran_low_t)fdct_round_shift(temp1); out[11] = (tran_low_t)fdct_round_shift(temp2); temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64; - temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; + temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64; out[7] = (tran_low_t)fdct_round_shift(temp1); out[15] = (tran_low_t)fdct_round_shift(temp2); } @@ -367,13 +362,11 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; - tran_low_t sum = 0; + int sum = 0; for (r = 0; r < 16; ++r) - for (c = 0; c < 16; ++c) - sum += input[r * stride + c]; + for (c = 0; c < 16; ++c) sum += input[r * stride + c]; - output[0] = sum >> 1; - output[1] = 0; + output[0] = (tran_low_t)(sum >> 1); } static INLINE tran_high_t dct_32_round(tran_high_t input) { @@ -677,36 +670,36 @@ void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) { step[31] = output[31] + output[30]; // Final stage --- outputs indices are bit-reversed. - output[0] = step[0]; + output[0] = step[0]; output[16] = step[1]; - output[8] = step[2]; + output[8] = step[2]; output[24] = step[3]; - output[4] = step[4]; + output[4] = step[4]; output[20] = step[5]; output[12] = step[6]; output[28] = step[7]; - output[2] = step[8]; + output[2] = step[8]; output[18] = step[9]; output[10] = step[10]; output[26] = step[11]; - output[6] = step[12]; + output[6] = step[12]; output[22] = step[13]; output[14] = step[14]; output[30] = step[15]; - output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64); + output[1] = dct_32_round(step[16] * cospi_31_64 + step[31] * cospi_1_64); output[17] = dct_32_round(step[17] * cospi_15_64 + step[30] * cospi_17_64); - output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64); + output[9] = dct_32_round(step[18] * cospi_23_64 + step[29] * cospi_9_64); output[25] = dct_32_round(step[19] * cospi_7_64 + step[28] * cospi_25_64); - output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64); + output[5] = dct_32_round(step[20] * cospi_27_64 + step[27] * cospi_5_64); output[21] = dct_32_round(step[21] * cospi_11_64 + step[26] * cospi_21_64); output[13] = dct_32_round(step[22] * cospi_19_64 + step[25] * cospi_13_64); output[29] = dct_32_round(step[23] * cospi_3_64 + step[24] * cospi_29_64); - output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64); + output[3] = dct_32_round(step[24] * cospi_3_64 + step[23] * -cospi_29_64); output[19] = dct_32_round(step[25] * cospi_19_64 + step[22] * -cospi_13_64); output[11] = dct_32_round(step[26] * cospi_11_64 + step[21] * -cospi_21_64); output[27] = dct_32_round(step[27] * cospi_27_64 + step[20] * -cospi_5_64); - output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64); + output[7] = dct_32_round(step[28] * cospi_7_64 + step[19] * -cospi_25_64); output[23] = dct_32_round(step[29] * cospi_23_64 + step[18] * -cospi_9_64); output[15] = dct_32_round(step[30] * cospi_15_64 + step[17] * -cospi_17_64); output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64); @@ -719,8 +712,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { // Columns for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = input[j * stride + i] * 4; + for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; vpx_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; @@ -729,8 +721,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { // Rows for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = output[j + i * 32]; + for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; vpx_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) out[j + i * 32] = @@ -748,8 +739,7 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { // Columns for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = input[j * stride + i] * 4; + for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; vpx_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) // TODO(cd): see quality impact of only doing @@ -761,23 +751,19 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { // Rows for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = output[j + i * 32]; + for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; vpx_fdct32(temp_in, temp_out, 1); - for (j = 0; j < 32; ++j) - out[j + i * 32] = (tran_low_t)temp_out[j]; + for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j]; } } void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; - tran_low_t sum = 0; + int sum = 0; for (r = 0; r < 32; ++r) - for (c = 0; c < 32; ++c) - sum += input[r * stride + c]; + for (c = 0; c < 32; ++c) sum += input[r * stride + c]; - output[0] = sum >> 3; - output[1] = 0; + output[0] = (tran_low_t)(sum >> 3); } #if CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/intrapred.c b/libs/libvpx/vpx_dsp/intrapred.c index cc4a74bd26..eca17a983e 100644 --- a/libs/libvpx/vpx_dsp/intrapred.c +++ b/libs/libvpx/vpx_dsp/intrapred.c @@ -14,17 +14,16 @@ #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_mem/vpx_mem.h" -#define DST(x, y) dst[(x) + (y) * stride] +#define DST(x, y) dst[(x) + (y)*stride] #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) #define AVG2(a, b) (((a) + (b) + 1) >> 1) static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r, c; - (void) above; + (void)above; // first column - for (r = 0; r < bs - 1; ++r) - dst[r * stride] = AVG2(left[r], left[r + 1]); + for (r = 0; r < bs - 1; ++r) dst[r * stride] = AVG2(left[r], left[r + 1]); dst[(bs - 1) * stride] = left[bs - 1]; dst++; @@ -36,8 +35,7 @@ static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs, dst++; // rest of last row - for (c = 0; c < bs - 2; ++c) - dst[(bs - 1) * stride + c] = left[bs - 1]; + for (c = 0; c < bs - 2; ++c) dst[(bs - 1) * stride + c] = left[bs - 1]; for (r = bs - 2; r >= 0; --r) for (c = 0; c < bs - 2; ++c) @@ -48,13 +46,13 @@ static INLINE void d207_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void d207e_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r, c; - (void) above; + (void)above; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1], left[(c >> 1) + r + 2]) - : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]); + : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]); } dst += stride; } @@ -82,12 +80,12 @@ static INLINE void d63_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void d63e_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r, c; - (void) left; + (void)left; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1], above[(r >> 1) + c + 2]) - : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]); + : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]); } dst += stride; } @@ -117,7 +115,7 @@ static INLINE void d45_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void d45e_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r, c; - (void) left; + (void)left; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = AVG3(above[r + c], above[r + c + 1], @@ -133,14 +131,12 @@ static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs, int r, c; // first row - for (c = 0; c < bs; c++) - dst[c] = AVG2(above[c - 1], above[c]); + for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]); dst += stride; // second row dst[0] = AVG3(left[0], above[-1], above[0]); - for (c = 1; c < bs; c++) - dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); + for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); dst += stride; // the rest of first col @@ -150,8 +146,7 @@ static INLINE void d117_predictor(uint8_t *dst, ptrdiff_t stride, int bs, // the rest of the block for (r = 2; r < bs; ++r) { - for (c = 1; c < bs; c++) - dst[c] = dst[-2 * stride + c - 1]; + for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1]; dst += stride; } } @@ -188,8 +183,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r, c; dst[0] = AVG2(above[-1], left[0]); - for (r = 1; r < bs; r++) - dst[r * stride] = AVG2(left[r - 1], left[r]); + for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]); dst++; dst[0] = AVG3(left[0], above[-1], above[0]); @@ -203,8 +197,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs, dst += stride; for (r = 1; r < bs; ++r) { - for (c = 0; c < bs - 2; c++) - dst[c] = dst[-stride + c - 2]; + for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2]; dst += stride; } } @@ -212,7 +205,7 @@ static INLINE void d153_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r; - (void) left; + (void)left; for (r = 0; r < bs; r++) { memcpy(dst, above, bs); @@ -223,7 +216,7 @@ static INLINE void v_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void h_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r; - (void) above; + (void)above; for (r = 0; r < bs; r++) { memset(dst, left[r], bs); @@ -246,8 +239,8 @@ static INLINE void tm_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void dc_128_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int r; - (void) above; - (void) left; + (void)above; + (void)left; for (r = 0; r < bs; r++) { memset(dst, 128, bs); @@ -259,10 +252,9 @@ static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int i, r, expected_dc, sum = 0; - (void) above; + (void)above; - for (i = 0; i < bs; i++) - sum += left[i]; + for (i = 0; i < bs; i++) sum += left[i]; expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { @@ -274,10 +266,9 @@ static INLINE void dc_left_predictor(uint8_t *dst, ptrdiff_t stride, int bs, static INLINE void dc_top_predictor(uint8_t *dst, ptrdiff_t stride, int bs, const uint8_t *above, const uint8_t *left) { int i, r, expected_dc, sum = 0; - (void) left; + (void)left; - for (i = 0; i < bs; i++) - sum += above[i]; + for (i = 0; i < bs; i++) sum += above[i]; expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { @@ -344,14 +335,13 @@ void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int K = left[2]; const int L = left[3]; (void)above; - DST(0, 0) = AVG2(I, J); + DST(0, 0) = AVG2(I, J); DST(2, 0) = DST(0, 1) = AVG2(J, K); DST(2, 1) = DST(0, 2) = AVG2(K, L); - DST(1, 0) = AVG3(I, J, K); + DST(1, 0) = AVG3(I, J, K); DST(3, 0) = DST(1, 1) = AVG3(J, K, L); DST(3, 1) = DST(1, 2) = AVG3(K, L, L); - DST(3, 2) = DST(2, 2) = - DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; + DST(3, 2) = DST(2, 2) = DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; } void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -364,17 +354,17 @@ void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int F = above[5]; const int G = above[6]; (void)left; - DST(0, 0) = AVG2(A, B); + DST(0, 0) = AVG2(A, B); DST(1, 0) = DST(0, 2) = AVG2(B, C); DST(2, 0) = DST(1, 2) = AVG2(C, D); DST(3, 0) = DST(2, 2) = AVG2(D, E); - DST(3, 2) = AVG2(E, F); // differs from vp8 + DST(3, 2) = AVG2(E, F); // differs from vp8 - DST(0, 1) = AVG3(A, B, C); + DST(0, 1) = AVG3(A, B, C); DST(1, 1) = DST(0, 3) = AVG3(B, C, D); DST(2, 1) = DST(1, 3) = AVG3(C, D, E); DST(3, 1) = DST(2, 3) = AVG3(D, E, F); - DST(3, 3) = AVG3(E, F, G); // differs from vp8 + DST(3, 3) = AVG3(E, F, G); // differs from vp8 } void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -388,17 +378,17 @@ void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int G = above[6]; const int H = above[7]; (void)left; - DST(0, 0) = AVG2(A, B); + DST(0, 0) = AVG2(A, B); DST(1, 0) = DST(0, 2) = AVG2(B, C); DST(2, 0) = DST(1, 2) = AVG2(C, D); DST(3, 0) = DST(2, 2) = AVG2(D, E); - DST(3, 2) = AVG3(E, F, G); + DST(3, 2) = AVG3(E, F, G); - DST(0, 1) = AVG3(A, B, C); + DST(0, 1) = AVG3(A, B, C); DST(1, 1) = DST(0, 3) = AVG3(B, C, D); DST(2, 1) = DST(1, 3) = AVG3(C, D, E); DST(3, 1) = DST(2, 3) = AVG3(D, E, F); - DST(3, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(F, G, H); } void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -413,13 +403,13 @@ void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int H = above[7]; (void)stride; (void)left; - DST(0, 0) = AVG3(A, B, C); - DST(1, 0) = DST(0, 1) = AVG3(B, C, D); - DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); - DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); - DST(3, 2) = DST(2, 3) = AVG3(F, G, H); - DST(3, 3) = H; // differs from vp8 + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = H; // differs from vp8 } void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -434,13 +424,13 @@ void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int H = above[7]; (void)stride; (void)left; - DST(0, 0) = AVG3(A, B, C); - DST(1, 0) = DST(0, 1) = AVG3(B, C, D); - DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); - DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); - DST(3, 2) = DST(2, 3) = AVG3(F, G, H); - DST(3, 3) = AVG3(G, H, H); + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(G, H, H); } void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -456,14 +446,14 @@ void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(0, 0) = DST(1, 2) = AVG2(X, A); DST(1, 0) = DST(2, 2) = AVG2(A, B); DST(2, 0) = DST(3, 2) = AVG2(B, C); - DST(3, 0) = AVG2(C, D); + DST(3, 0) = AVG2(C, D); - DST(0, 3) = AVG3(K, J, I); - DST(0, 2) = AVG3(J, I, X); + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); DST(0, 1) = DST(1, 3) = AVG3(I, X, A); DST(1, 1) = DST(2, 3) = AVG3(X, A, B); DST(2, 1) = DST(3, 3) = AVG3(A, B, C); - DST(3, 1) = AVG3(B, C, D); + DST(3, 1) = AVG3(B, C, D); } void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -478,13 +468,13 @@ void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const int C = above[2]; const int D = above[3]; (void)stride; - DST(0, 3) = AVG3(J, K, L); - DST(1, 3) = DST(0, 2) = AVG3(I, J, K); - DST(2, 3) = DST(1, 2) = DST(0, 1) = AVG3(X, I, J); + DST(0, 3) = AVG3(J, K, L); + DST(1, 3) = DST(0, 2) = AVG3(I, J, K); + DST(2, 3) = DST(1, 2) = DST(0, 1) = AVG3(X, I, J); DST(3, 3) = DST(2, 2) = DST(1, 1) = DST(0, 0) = AVG3(A, X, I); - DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X); - DST(3, 1) = DST(2, 0) = AVG3(C, B, A); - DST(3, 0) = AVG3(D, C, B); + DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X); + DST(3, 1) = DST(2, 0) = AVG3(C, B, A); + DST(3, 0) = AVG3(D, C, B); } void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, @@ -501,14 +491,14 @@ void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(0, 0) = DST(2, 1) = AVG2(I, X); DST(0, 1) = DST(2, 2) = AVG2(J, I); DST(0, 2) = DST(2, 3) = AVG2(K, J); - DST(0, 3) = AVG2(L, K); + DST(0, 3) = AVG2(L, K); - DST(3, 0) = AVG3(A, B, C); - DST(2, 0) = AVG3(X, A, B); + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); DST(1, 0) = DST(3, 1) = AVG3(I, X, A); DST(1, 1) = DST(3, 2) = AVG3(J, I, X); DST(1, 2) = DST(3, 3) = AVG3(K, J, I); - DST(1, 3) = AVG3(L, K, J); + DST(1, 3) = AVG3(L, K, J); } #if CONFIG_VP9_HIGHBITDEPTH @@ -516,8 +506,8 @@ static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) above; - (void) bd; + (void)above; + (void)bd; // First column. for (r = 0; r < bs - 1; ++r) { @@ -535,8 +525,7 @@ static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride, dst++; // Rest of last row. - for (c = 0; c < bs - 2; ++c) - dst[(bs - 1) * stride + c] = left[bs - 1]; + for (c = 0; c < bs - 2; ++c) dst[(bs - 1) * stride + c] = left[bs - 1]; for (r = bs - 2; r >= 0; --r) { for (c = 0; c < bs - 2; ++c) @@ -549,31 +538,31 @@ static INLINE void highbd_d207e_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) above; - (void) bd; + (void)above; + (void)bd; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = c & 1 ? AVG3(left[(c >> 1) + r], left[(c >> 1) + r + 1], left[(c >> 1) + r + 2]) - : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]); + : AVG2(left[(c >> 1) + r], left[(c >> 1) + r + 1]); } dst += stride; } } #endif // CONFIG_MISC_FIXES -static INLINE void highbd_d63_predictor(uint16_t *dst, ptrdiff_t stride, - int bs, const uint16_t *above, +static INLINE void highbd_d63_predictor(uint16_t *dst, ptrdiff_t stride, int bs, + const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) left; - (void) bd; + (void)left; + (void)bd; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = r & 1 ? AVG3(above[(r >> 1) + c], above[(r >> 1) + c + 1], above[(r >> 1) + c + 2]) - : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]); + : AVG2(above[(r >> 1) + c], above[(r >> 1) + c + 1]); } dst += stride; } @@ -585,13 +574,13 @@ static INLINE void highbd_d45_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) left; - (void) bd; + (void)left; + (void)bd; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { - dst[c] = r + c + 2 < bs * 2 ? AVG3(above[r + c], above[r + c + 1], - above[r + c + 2]) - : above[bs * 2 - 1]; + dst[c] = r + c + 2 < bs * 2 + ? AVG3(above[r + c], above[r + c + 1], above[r + c + 2]) + : above[bs * 2 - 1]; } dst += stride; } @@ -602,8 +591,8 @@ static INLINE void highbd_d45e_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) left; - (void) bd; + (void)left; + (void)bd; for (r = 0; r < bs; ++r) { for (c = 0; c < bs; ++c) { dst[c] = AVG3(above[r + c], above[r + c + 1], @@ -618,17 +607,15 @@ static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) bd; + (void)bd; // first row - for (c = 0; c < bs; c++) - dst[c] = AVG2(above[c - 1], above[c]); + for (c = 0; c < bs; c++) dst[c] = AVG2(above[c - 1], above[c]); dst += stride; // second row dst[0] = AVG3(left[0], above[-1], above[0]); - for (c = 1; c < bs; c++) - dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); + for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); dst += stride; // the rest of first col @@ -638,8 +625,7 @@ static INLINE void highbd_d117_predictor(uint16_t *dst, ptrdiff_t stride, // the rest of the block for (r = 2; r < bs; ++r) { - for (c = 1; c < bs; c++) - dst[c] = dst[-2 * stride + c - 1]; + for (c = 1; c < bs; c++) dst[c] = dst[-2 * stride + c - 1]; dst += stride; } } @@ -648,10 +634,9 @@ static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) bd; + (void)bd; dst[0] = AVG3(left[0], above[-1], above[0]); - for (c = 1; c < bs; c++) - dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); + for (c = 1; c < bs; c++) dst[c] = AVG3(above[c - 2], above[c - 1], above[c]); dst[stride] = AVG3(above[-1], left[0], left[1]); for (r = 2; r < bs; ++r) @@ -659,8 +644,7 @@ static INLINE void highbd_d135_predictor(uint16_t *dst, ptrdiff_t stride, dst += stride; for (r = 1; r < bs; ++r) { - for (c = 1; c < bs; c++) - dst[c] = dst[-stride + c - 1]; + for (c = 1; c < bs; c++) dst[c] = dst[-stride + c - 1]; dst += stride; } } @@ -669,10 +653,9 @@ static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r, c; - (void) bd; + (void)bd; dst[0] = AVG2(above[-1], left[0]); - for (r = 1; r < bs; r++) - dst[r * stride] = AVG2(left[r - 1], left[r]); + for (r = 1; r < bs; r++) dst[r * stride] = AVG2(left[r - 1], left[r]); dst++; dst[0] = AVG3(left[0], above[-1], above[0]); @@ -686,42 +669,41 @@ static INLINE void highbd_d153_predictor(uint16_t *dst, ptrdiff_t stride, dst += stride; for (r = 1; r < bs; ++r) { - for (c = 0; c < bs - 2; c++) - dst[c] = dst[-stride + c - 2]; + for (c = 0; c < bs - 2; c++) dst[c] = dst[-stride + c - 2]; dst += stride; } } -static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, - int bs, const uint16_t *above, +static INLINE void highbd_v_predictor(uint16_t *dst, ptrdiff_t stride, int bs, + const uint16_t *above, const uint16_t *left, int bd) { int r; - (void) left; - (void) bd; + (void)left; + (void)bd; for (r = 0; r < bs; r++) { memcpy(dst, above, bs * sizeof(uint16_t)); dst += stride; } } -static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, - int bs, const uint16_t *above, +static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, int bs, + const uint16_t *above, const uint16_t *left, int bd) { int r; - (void) above; - (void) bd; + (void)above; + (void)bd; for (r = 0; r < bs; r++) { vpx_memset16(dst, left[r], bs); dst += stride; } } -static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, - int bs, const uint16_t *above, +static INLINE void highbd_tm_predictor(uint16_t *dst, ptrdiff_t stride, int bs, + const uint16_t *above, const uint16_t *left, int bd) { int r, c; int ytop_left = above[-1]; - (void) bd; + (void)bd; for (r = 0; r < bs; r++) { for (c = 0; c < bs; c++) @@ -734,8 +716,8 @@ static INLINE void highbd_dc_128_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int r; - (void) above; - (void) left; + (void)above; + (void)left; for (r = 0; r < bs; r++) { vpx_memset16(dst, 128 << (bd - 8), bs); @@ -747,11 +729,10 @@ static INLINE void highbd_dc_left_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int i, r, expected_dc, sum = 0; - (void) above; - (void) bd; + (void)above; + (void)bd; - for (i = 0; i < bs; i++) - sum += left[i]; + for (i = 0; i < bs; i++) sum += left[i]; expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { @@ -764,11 +745,10 @@ static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride, int bs, const uint16_t *above, const uint16_t *left, int bd) { int i, r, expected_dc, sum = 0; - (void) left; - (void) bd; + (void)left; + (void)bd; - for (i = 0; i < bs; i++) - sum += above[i]; + for (i = 0; i < bs; i++) sum += above[i]; expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { @@ -777,12 +757,12 @@ static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride, } } -static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, - int bs, const uint16_t *above, +static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs, + const uint16_t *above, const uint16_t *left, int bd) { int i, r, expected_dc, sum = 0; const int count = 2 * bs; - (void) bd; + (void)bd; for (i = 0; i < bs; i++) { sum += above[i]; @@ -801,22 +781,22 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, // This serves as a wrapper function, so that all the prediction functions // can be unified and accessed as a pointer array. Note that the boundary // above and left are not necessarily used all the time. -#define intra_pred_sized(type, size) \ - void vpx_##type##_predictor_##size##x##size##_c(uint8_t *dst, \ - ptrdiff_t stride, \ - const uint8_t *above, \ - const uint8_t *left) { \ - type##_predictor(dst, stride, size, above, left); \ +#define intra_pred_sized(type, size) \ + void vpx_##type##_predictor_##size##x##size##_c( \ + uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \ + const uint8_t *left) { \ + type##_predictor(dst, stride, size, above, left); \ } #if CONFIG_VP9_HIGHBITDEPTH -#define intra_pred_highbd_sized(type, size) \ - void vpx_highbd_##type##_predictor_##size##x##size##_c( \ - uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \ - const uint16_t *left, int bd) { \ +#define intra_pred_highbd_sized(type, size) \ + void vpx_highbd_##type##_predictor_##size##x##size##_c( \ + uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \ + const uint16_t *left, int bd) { \ highbd_##type##_predictor(dst, stride, size, above, left, bd); \ } +/* clang-format off */ #define intra_pred_allsizes(type) \ intra_pred_sized(type, 4) \ intra_pred_sized(type, 8) \ @@ -867,4 +847,5 @@ intra_pred_allsizes(dc_128) intra_pred_allsizes(dc_left) intra_pred_allsizes(dc_top) intra_pred_allsizes(dc) +/* clang-format on */ #undef intra_pred_allsizes diff --git a/libs/libvpx/vpx_dsp/inv_txfm.c b/libs/libvpx/vpx_dsp/inv_txfm.c index a0f59bf757..1526663d89 100644 --- a/libs/libvpx/vpx_dsp/inv_txfm.c +++ b/libs/libvpx/vpx_dsp/inv_txfm.c @@ -11,11 +11,12 @@ #include #include +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/inv_txfm.h" void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { -/* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, - 0.5 shifts per pixel. */ + /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, + 0.5 shifts per pixel. */ int i; tran_low_t output[16]; tran_high_t a1, b1, c1, d1, e1; @@ -34,10 +35,10 @@ void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { c1 = e1 - c1; a1 -= b1; d1 += c1; - op[0] = WRAPLOW(a1, 8); - op[1] = WRAPLOW(b1, 8); - op[2] = WRAPLOW(c1, 8); - op[3] = WRAPLOW(d1, 8); + op[0] = WRAPLOW(a1); + op[1] = WRAPLOW(b1); + op[2] = WRAPLOW(c1); + op[3] = WRAPLOW(d1); ip += 4; op += 4; } @@ -55,10 +56,10 @@ void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { c1 = e1 - c1; a1 -= b1; d1 += c1; - dest[stride * 0] = clip_pixel_add(dest[stride * 0], a1); - dest[stride * 1] = clip_pixel_add(dest[stride * 1], b1); - dest[stride * 2] = clip_pixel_add(dest[stride * 2], c1); - dest[stride * 3] = clip_pixel_add(dest[stride * 3], d1); + dest[stride * 0] = clip_pixel_add(dest[stride * 0], WRAPLOW(a1)); + dest[stride * 1] = clip_pixel_add(dest[stride * 1], WRAPLOW(b1)); + dest[stride * 2] = clip_pixel_add(dest[stride * 2], WRAPLOW(c1)); + dest[stride * 3] = clip_pixel_add(dest[stride * 3], WRAPLOW(d1)); ip++; dest++; @@ -75,8 +76,8 @@ void vpx_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) { a1 = ip[0] >> UNIT_QUANT_SHIFT; e1 = a1 >> 1; a1 -= e1; - op[0] = WRAPLOW(a1, 8); - op[1] = op[2] = op[3] = WRAPLOW(e1, 8); + op[0] = WRAPLOW(a1); + op[1] = op[2] = op[3] = WRAPLOW(e1); ip = tmp; for (i = 0; i < 4; i++) { @@ -97,18 +98,18 @@ void idct4_c(const tran_low_t *input, tran_low_t *output) { // stage 1 temp1 = (input[0] + input[2]) * cospi_16_64; temp2 = (input[0] - input[2]) * cospi_16_64; - step[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step[1] = WRAPLOW(dct_const_round_shift(temp2), 8); + step[0] = WRAPLOW(dct_const_round_shift(temp1)); + step[1] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; - step[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step[3] = WRAPLOW(dct_const_round_shift(temp2), 8); + step[2] = WRAPLOW(dct_const_round_shift(temp1)); + step[3] = WRAPLOW(dct_const_round_shift(temp2)); // stage 2 - output[0] = WRAPLOW(step[0] + step[3], 8); - output[1] = WRAPLOW(step[1] + step[2], 8); - output[2] = WRAPLOW(step[1] - step[2], 8); - output[3] = WRAPLOW(step[0] - step[3], 8); + output[0] = WRAPLOW(step[0] + step[3]); + output[1] = WRAPLOW(step[1] + step[2]); + output[2] = WRAPLOW(step[1] - step[2]); + output[3] = WRAPLOW(step[0] - step[3]); } void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { @@ -126,8 +127,7 @@ void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { // Columns for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; idct4_c(temp_in, temp_out); for (j = 0; j < 4; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -140,8 +140,8 @@ void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride) { int i; tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); + tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64)); + out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); a1 = ROUND_POWER_OF_TWO(out, 4); for (i = 0; i < 4; i++) { @@ -163,48 +163,48 @@ void idct8_c(const tran_low_t *input, tran_low_t *output) { step1[3] = input[6]; temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[4] = WRAPLOW(dct_const_round_shift(temp1)); + step1[7] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); // stage 2 temp1 = (step1[0] + step1[2]) * cospi_16_64; temp2 = (step1[0] - step1[2]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[0] = WRAPLOW(dct_const_round_shift(temp1)); + step2[1] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); + step2[2] = WRAPLOW(dct_const_round_shift(temp1)); + step2[3] = WRAPLOW(dct_const_round_shift(temp2)); + step2[4] = WRAPLOW(step1[4] + step1[5]); + step2[5] = WRAPLOW(step1[4] - step1[5]); + step2[6] = WRAPLOW(-step1[6] + step1[7]); + step2[7] = WRAPLOW(step1[6] + step1[7]); // stage 3 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); + step1[0] = WRAPLOW(step2[0] + step2[3]); + step1[1] = WRAPLOW(step2[1] + step2[2]); + step1[2] = WRAPLOW(step2[1] - step2[2]); + step1[3] = WRAPLOW(step2[0] - step2[3]); step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); step1[7] = step2[7]; // stage 4 - output[0] = WRAPLOW(step1[0] + step1[7], 8); - output[1] = WRAPLOW(step1[1] + step1[6], 8); - output[2] = WRAPLOW(step1[2] + step1[5], 8); - output[3] = WRAPLOW(step1[3] + step1[4], 8); - output[4] = WRAPLOW(step1[3] - step1[4], 8); - output[5] = WRAPLOW(step1[2] - step1[5], 8); - output[6] = WRAPLOW(step1[1] - step1[6], 8); - output[7] = WRAPLOW(step1[0] - step1[7], 8); + output[0] = WRAPLOW(step1[0] + step1[7]); + output[1] = WRAPLOW(step1[1] + step1[6]); + output[2] = WRAPLOW(step1[2] + step1[5]); + output[3] = WRAPLOW(step1[3] + step1[4]); + output[4] = WRAPLOW(step1[3] - step1[4]); + output[5] = WRAPLOW(step1[2] - step1[5]); + output[6] = WRAPLOW(step1[1] - step1[6]); + output[7] = WRAPLOW(step1[0] - step1[7]); } void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { @@ -222,8 +222,7 @@ void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { // Then transform columns for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; idct8_c(temp_in, temp_out); for (j = 0; j < 8; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -235,12 +234,11 @@ void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); + tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64)); + out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); a1 = ROUND_POWER_OF_TWO(out, 5); for (j = 0; j < 8; ++j) { - for (i = 0; i < 8; ++i) - dest[i] = clip_pixel_add(dest[i], a1); + for (i = 0; i < 8; ++i) dest[i] = clip_pixel_add(dest[i], a1); dest += stride; } } @@ -265,7 +263,7 @@ void iadst4_c(const tran_low_t *input, tran_low_t *output) { s4 = sinpi_1_9 * x2; s5 = sinpi_2_9 * x3; s6 = sinpi_4_9 * x3; - s7 = x0 - x2 + x3; + s7 = WRAPLOW(x0 - x2 + x3); s0 = s0 + s3 + s5; s1 = s1 - s4 - s6; @@ -276,10 +274,10 @@ void iadst4_c(const tran_low_t *input, tran_low_t *output) { // The overall dynamic range is 14b (input) + 14b (multiplication scaling) // + 1b (addition) = 29b. // Hence the output bit depth is 15b. - output[0] = WRAPLOW(dct_const_round_shift(s0 + s3), 8); - output[1] = WRAPLOW(dct_const_round_shift(s1 + s3), 8); - output[2] = WRAPLOW(dct_const_round_shift(s2), 8); - output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3), 8); + output[0] = WRAPLOW(dct_const_round_shift(s0 + s3)); + output[1] = WRAPLOW(dct_const_round_shift(s1 + s3)); + output[2] = WRAPLOW(dct_const_round_shift(s2)); + output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3)); } void iadst8_c(const tran_low_t *input, tran_low_t *output) { @@ -295,29 +293,29 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) { tran_high_t x7 = input[6]; if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = 0; + output[0] = output[1] = output[2] = output[3] = output[4] = output[5] = + output[6] = output[7] = 0; return; } // stage 1 - s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1); - s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1); + s0 = (int)(cospi_2_64 * x0 + cospi_30_64 * x1); + s1 = (int)(cospi_30_64 * x0 - cospi_2_64 * x1); s2 = (int)(cospi_10_64 * x2 + cospi_22_64 * x3); s3 = (int)(cospi_22_64 * x2 - cospi_10_64 * x3); s4 = (int)(cospi_18_64 * x4 + cospi_14_64 * x5); s5 = (int)(cospi_14_64 * x4 - cospi_18_64 * x5); - s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7); - s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7); + s6 = (int)(cospi_26_64 * x6 + cospi_6_64 * x7); + s7 = (int)(cospi_6_64 * x6 - cospi_26_64 * x7); - x0 = WRAPLOW(dct_const_round_shift(s0 + s4), 8); - x1 = WRAPLOW(dct_const_round_shift(s1 + s5), 8); - x2 = WRAPLOW(dct_const_round_shift(s2 + s6), 8); - x3 = WRAPLOW(dct_const_round_shift(s3 + s7), 8); - x4 = WRAPLOW(dct_const_round_shift(s0 - s4), 8); - x5 = WRAPLOW(dct_const_round_shift(s1 - s5), 8); - x6 = WRAPLOW(dct_const_round_shift(s2 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s3 - s7), 8); + x0 = WRAPLOW(dct_const_round_shift(s0 + s4)); + x1 = WRAPLOW(dct_const_round_shift(s1 + s5)); + x2 = WRAPLOW(dct_const_round_shift(s2 + s6)); + x3 = WRAPLOW(dct_const_round_shift(s3 + s7)); + x4 = WRAPLOW(dct_const_round_shift(s0 - s4)); + x5 = WRAPLOW(dct_const_round_shift(s1 - s5)); + x6 = WRAPLOW(dct_const_round_shift(s2 - s6)); + x7 = WRAPLOW(dct_const_round_shift(s3 - s7)); // stage 2 s0 = (int)x0; @@ -329,14 +327,14 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) { s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); - x0 = WRAPLOW(s0 + s2, 8); - x1 = WRAPLOW(s1 + s3, 8); - x2 = WRAPLOW(s0 - s2, 8); - x3 = WRAPLOW(s1 - s3, 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s6), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s7), 8); - x6 = WRAPLOW(dct_const_round_shift(s4 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s5 - s7), 8); + x0 = WRAPLOW(s0 + s2); + x1 = WRAPLOW(s1 + s3); + x2 = WRAPLOW(s0 - s2); + x3 = WRAPLOW(s1 - s3); + x4 = WRAPLOW(dct_const_round_shift(s4 + s6)); + x5 = WRAPLOW(dct_const_round_shift(s5 + s7)); + x6 = WRAPLOW(dct_const_round_shift(s4 - s6)); + x7 = WRAPLOW(dct_const_round_shift(s5 - s7)); // stage 3 s2 = (int)(cospi_16_64 * (x2 + x3)); @@ -344,19 +342,19 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) { s6 = (int)(cospi_16_64 * (x6 + x7)); s7 = (int)(cospi_16_64 * (x6 - x7)); - x2 = WRAPLOW(dct_const_round_shift(s2), 8); - x3 = WRAPLOW(dct_const_round_shift(s3), 8); - x6 = WRAPLOW(dct_const_round_shift(s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s7), 8); + x2 = WRAPLOW(dct_const_round_shift(s2)); + x3 = WRAPLOW(dct_const_round_shift(s3)); + x6 = WRAPLOW(dct_const_round_shift(s6)); + x7 = WRAPLOW(dct_const_round_shift(s7)); - output[0] = WRAPLOW(x0, 8); - output[1] = WRAPLOW(-x4, 8); - output[2] = WRAPLOW(x6, 8); - output[3] = WRAPLOW(-x2, 8); - output[4] = WRAPLOW(x3, 8); - output[5] = WRAPLOW(-x7, 8); - output[6] = WRAPLOW(x5, 8); - output[7] = WRAPLOW(-x1, 8); + output[0] = WRAPLOW(x0); + output[1] = WRAPLOW(-x4); + output[2] = WRAPLOW(x6); + output[3] = WRAPLOW(-x2); + output[4] = WRAPLOW(x3); + output[5] = WRAPLOW(-x7); + output[6] = WRAPLOW(x5); + output[7] = WRAPLOW(-x1); } void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) { @@ -375,8 +373,7 @@ void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) { // Then transform columns for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; idct8_c(temp_in, temp_out); for (j = 0; j < 8; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -390,22 +387,22 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) { tran_high_t temp1, temp2; // stage 1 - step1[0] = input[0/2]; - step1[1] = input[16/2]; - step1[2] = input[8/2]; - step1[3] = input[24/2]; - step1[4] = input[4/2]; - step1[5] = input[20/2]; - step1[6] = input[12/2]; - step1[7] = input[28/2]; - step1[8] = input[2/2]; - step1[9] = input[18/2]; - step1[10] = input[10/2]; - step1[11] = input[26/2]; - step1[12] = input[6/2]; - step1[13] = input[22/2]; - step1[14] = input[14/2]; - step1[15] = input[30/2]; + step1[0] = input[0 / 2]; + step1[1] = input[16 / 2]; + step1[2] = input[8 / 2]; + step1[3] = input[24 / 2]; + step1[4] = input[4 / 2]; + step1[5] = input[20 / 2]; + step1[6] = input[12 / 2]; + step1[7] = input[28 / 2]; + step1[8] = input[2 / 2]; + step1[9] = input[18 / 2]; + step1[10] = input[10 / 2]; + step1[11] = input[26 / 2]; + step1[12] = input[6 / 2]; + step1[13] = input[22 / 2]; + step1[14] = input[14 / 2]; + step1[15] = input[30 / 2]; // stage 2 step2[0] = step1[0]; @@ -419,23 +416,23 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) { temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[15] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[8] = WRAPLOW(dct_const_round_shift(temp1)); + step2[15] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[9] = WRAPLOW(dct_const_round_shift(temp1)); + step2[14] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[11] = WRAPLOW(dct_const_round_shift(temp1)); + step2[12] = WRAPLOW(dct_const_round_shift(temp2)); // stage 3 step1[0] = step2[0]; @@ -445,109 +442,109 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) { temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[4] = WRAPLOW(dct_const_round_shift(temp1)); + step1[7] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); - step1[8] = WRAPLOW(step2[8] + step2[9], 8); - step1[9] = WRAPLOW(step2[8] - step2[9], 8); - step1[10] = WRAPLOW(-step2[10] + step2[11], 8); - step1[11] = WRAPLOW(step2[10] + step2[11], 8); - step1[12] = WRAPLOW(step2[12] + step2[13], 8); - step1[13] = WRAPLOW(step2[12] - step2[13], 8); - step1[14] = WRAPLOW(-step2[14] + step2[15], 8); - step1[15] = WRAPLOW(step2[14] + step2[15], 8); + step1[8] = WRAPLOW(step2[8] + step2[9]); + step1[9] = WRAPLOW(step2[8] - step2[9]); + step1[10] = WRAPLOW(-step2[10] + step2[11]); + step1[11] = WRAPLOW(step2[10] + step2[11]); + step1[12] = WRAPLOW(step2[12] + step2[13]); + step1[13] = WRAPLOW(step2[12] - step2[13]); + step1[14] = WRAPLOW(-step2[14] + step2[15]); + step1[15] = WRAPLOW(step2[14] + step2[15]); // stage 4 temp1 = (step1[0] + step1[1]) * cospi_16_64; temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[0] = WRAPLOW(dct_const_round_shift(temp1)); + step2[1] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); + step2[2] = WRAPLOW(dct_const_round_shift(temp1)); + step2[3] = WRAPLOW(dct_const_round_shift(temp2)); + step2[4] = WRAPLOW(step1[4] + step1[5]); + step2[5] = WRAPLOW(step1[4] - step1[5]); + step2[6] = WRAPLOW(-step1[6] + step1[7]); + step2[7] = WRAPLOW(step1[6] + step1[7]); step2[8] = step1[8]; step2[15] = step1[15]; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[9] = WRAPLOW(dct_const_round_shift(temp1)); + step2[14] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); step2[11] = step1[11]; step2[12] = step1[12]; // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); + step1[0] = WRAPLOW(step2[0] + step2[3]); + step1[1] = WRAPLOW(step2[1] + step2[2]); + step1[2] = WRAPLOW(step2[1] - step2[2]); + step1[3] = WRAPLOW(step2[0] - step2[3]); step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); step1[7] = step2[7]; - step1[8] = WRAPLOW(step2[8] + step2[11], 8); - step1[9] = WRAPLOW(step2[9] + step2[10], 8); - step1[10] = WRAPLOW(step2[9] - step2[10], 8); - step1[11] = WRAPLOW(step2[8] - step2[11], 8); - step1[12] = WRAPLOW(-step2[12] + step2[15], 8); - step1[13] = WRAPLOW(-step2[13] + step2[14], 8); - step1[14] = WRAPLOW(step2[13] + step2[14], 8); - step1[15] = WRAPLOW(step2[12] + step2[15], 8); + step1[8] = WRAPLOW(step2[8] + step2[11]); + step1[9] = WRAPLOW(step2[9] + step2[10]); + step1[10] = WRAPLOW(step2[9] - step2[10]); + step1[11] = WRAPLOW(step2[8] - step2[11]); + step1[12] = WRAPLOW(-step2[12] + step2[15]); + step1[13] = WRAPLOW(-step2[13] + step2[14]); + step1[14] = WRAPLOW(step2[13] + step2[14]); + step1[15] = WRAPLOW(step2[12] + step2[15]); // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], 8); - step2[1] = WRAPLOW(step1[1] + step1[6], 8); - step2[2] = WRAPLOW(step1[2] + step1[5], 8); - step2[3] = WRAPLOW(step1[3] + step1[4], 8); - step2[4] = WRAPLOW(step1[3] - step1[4], 8); - step2[5] = WRAPLOW(step1[2] - step1[5], 8); - step2[6] = WRAPLOW(step1[1] - step1[6], 8); - step2[7] = WRAPLOW(step1[0] - step1[7], 8); + step2[0] = WRAPLOW(step1[0] + step1[7]); + step2[1] = WRAPLOW(step1[1] + step1[6]); + step2[2] = WRAPLOW(step1[2] + step1[5]); + step2[3] = WRAPLOW(step1[3] + step1[4]); + step2[4] = WRAPLOW(step1[3] - step1[4]); + step2[5] = WRAPLOW(step1[2] - step1[5]); + step2[6] = WRAPLOW(step1[1] - step1[6]); + step2[7] = WRAPLOW(step1[0] - step1[7]); step2[8] = step1[8]; step2[9] = step1[9]; temp1 = (-step1[10] + step1[13]) * cospi_16_64; temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = (-step1[11] + step1[12]) * cospi_16_64; temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[11] = WRAPLOW(dct_const_round_shift(temp1)); + step2[12] = WRAPLOW(dct_const_round_shift(temp2)); step2[14] = step1[14]; step2[15] = step1[15]; // stage 7 - output[0] = WRAPLOW(step2[0] + step2[15], 8); - output[1] = WRAPLOW(step2[1] + step2[14], 8); - output[2] = WRAPLOW(step2[2] + step2[13], 8); - output[3] = WRAPLOW(step2[3] + step2[12], 8); - output[4] = WRAPLOW(step2[4] + step2[11], 8); - output[5] = WRAPLOW(step2[5] + step2[10], 8); - output[6] = WRAPLOW(step2[6] + step2[9], 8); - output[7] = WRAPLOW(step2[7] + step2[8], 8); - output[8] = WRAPLOW(step2[7] - step2[8], 8); - output[9] = WRAPLOW(step2[6] - step2[9], 8); - output[10] = WRAPLOW(step2[5] - step2[10], 8); - output[11] = WRAPLOW(step2[4] - step2[11], 8); - output[12] = WRAPLOW(step2[3] - step2[12], 8); - output[13] = WRAPLOW(step2[2] - step2[13], 8); - output[14] = WRAPLOW(step2[1] - step2[14], 8); - output[15] = WRAPLOW(step2[0] - step2[15], 8); + output[0] = WRAPLOW(step2[0] + step2[15]); + output[1] = WRAPLOW(step2[1] + step2[14]); + output[2] = WRAPLOW(step2[2] + step2[13]); + output[3] = WRAPLOW(step2[3] + step2[12]); + output[4] = WRAPLOW(step2[4] + step2[11]); + output[5] = WRAPLOW(step2[5] + step2[10]); + output[6] = WRAPLOW(step2[6] + step2[9]); + output[7] = WRAPLOW(step2[7] + step2[8]); + output[8] = WRAPLOW(step2[7] - step2[8]); + output[9] = WRAPLOW(step2[6] - step2[9]); + output[10] = WRAPLOW(step2[5] - step2[10]); + output[11] = WRAPLOW(step2[4] - step2[11]); + output[12] = WRAPLOW(step2[3] - step2[12]); + output[13] = WRAPLOW(step2[2] - step2[13]); + output[14] = WRAPLOW(step2[1] - step2[14]); + output[15] = WRAPLOW(step2[0] - step2[15]); } void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, @@ -566,8 +563,7 @@ void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, // Then transform columns for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; idct16_c(temp_in, temp_out); for (j = 0; j < 16; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -597,21 +593,20 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) { tran_high_t x14 = input[1]; tran_high_t x15 = input[14]; - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 - | x9 | x10 | x11 | x12 | x13 | x14 | x15)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = output[8] - = output[9] = output[10] = output[11] = output[12] - = output[13] = output[14] = output[15] = 0; + if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 | + x13 | x14 | x15)) { + output[0] = output[1] = output[2] = output[3] = output[4] = output[5] = + output[6] = output[7] = output[8] = output[9] = output[10] = + output[11] = output[12] = output[13] = output[14] = output[15] = 0; return; } // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; s5 = x4 * cospi_23_64 - x5 * cospi_9_64; s6 = x6 * cospi_13_64 + x7 * cospi_19_64; s7 = x6 * cospi_19_64 - x7 * cospi_13_64; @@ -620,26 +615,26 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) { s10 = x10 * cospi_21_64 + x11 * cospi_11_64; s11 = x10 * cospi_11_64 - x11 * cospi_21_64; s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - x0 = WRAPLOW(dct_const_round_shift(s0 + s8), 8); - x1 = WRAPLOW(dct_const_round_shift(s1 + s9), 8); - x2 = WRAPLOW(dct_const_round_shift(s2 + s10), 8); - x3 = WRAPLOW(dct_const_round_shift(s3 + s11), 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s12), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s13), 8); - x6 = WRAPLOW(dct_const_round_shift(s6 + s14), 8); - x7 = WRAPLOW(dct_const_round_shift(s7 + s15), 8); - x8 = WRAPLOW(dct_const_round_shift(s0 - s8), 8); - x9 = WRAPLOW(dct_const_round_shift(s1 - s9), 8); - x10 = WRAPLOW(dct_const_round_shift(s2 - s10), 8); - x11 = WRAPLOW(dct_const_round_shift(s3 - s11), 8); - x12 = WRAPLOW(dct_const_round_shift(s4 - s12), 8); - x13 = WRAPLOW(dct_const_round_shift(s5 - s13), 8); - x14 = WRAPLOW(dct_const_round_shift(s6 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s7 - s15), 8); + x0 = WRAPLOW(dct_const_round_shift(s0 + s8)); + x1 = WRAPLOW(dct_const_round_shift(s1 + s9)); + x2 = WRAPLOW(dct_const_round_shift(s2 + s10)); + x3 = WRAPLOW(dct_const_round_shift(s3 + s11)); + x4 = WRAPLOW(dct_const_round_shift(s4 + s12)); + x5 = WRAPLOW(dct_const_round_shift(s5 + s13)); + x6 = WRAPLOW(dct_const_round_shift(s6 + s14)); + x7 = WRAPLOW(dct_const_round_shift(s7 + s15)); + x8 = WRAPLOW(dct_const_round_shift(s0 - s8)); + x9 = WRAPLOW(dct_const_round_shift(s1 - s9)); + x10 = WRAPLOW(dct_const_round_shift(s2 - s10)); + x11 = WRAPLOW(dct_const_round_shift(s3 - s11)); + x12 = WRAPLOW(dct_const_round_shift(s4 - s12)); + x13 = WRAPLOW(dct_const_round_shift(s5 - s13)); + x14 = WRAPLOW(dct_const_round_shift(s6 - s14)); + x15 = WRAPLOW(dct_const_round_shift(s7 - s15)); // stage 2 s0 = x0; @@ -650,102 +645,102 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) { s5 = x5; s6 = x6; s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; + s8 = x8 * cospi_4_64 + x9 * cospi_28_64; + s9 = x8 * cospi_28_64 - x9 * cospi_4_64; + s10 = x10 * cospi_20_64 + x11 * cospi_12_64; + s11 = x10 * cospi_12_64 - x11 * cospi_20_64; + s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; + s13 = x12 * cospi_4_64 + x13 * cospi_28_64; + s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; + s15 = x14 * cospi_20_64 + x15 * cospi_12_64; - x0 = WRAPLOW(s0 + s4, 8); - x1 = WRAPLOW(s1 + s5, 8); - x2 = WRAPLOW(s2 + s6, 8); - x3 = WRAPLOW(s3 + s7, 8); - x4 = WRAPLOW(s0 - s4, 8); - x5 = WRAPLOW(s1 - s5, 8); - x6 = WRAPLOW(s2 - s6, 8); - x7 = WRAPLOW(s3 - s7, 8); - x8 = WRAPLOW(dct_const_round_shift(s8 + s12), 8); - x9 = WRAPLOW(dct_const_round_shift(s9 + s13), 8); - x10 = WRAPLOW(dct_const_round_shift(s10 + s14), 8); - x11 = WRAPLOW(dct_const_round_shift(s11 + s15), 8); - x12 = WRAPLOW(dct_const_round_shift(s8 - s12), 8); - x13 = WRAPLOW(dct_const_round_shift(s9 - s13), 8); - x14 = WRAPLOW(dct_const_round_shift(s10 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s11 - s15), 8); + x0 = WRAPLOW(s0 + s4); + x1 = WRAPLOW(s1 + s5); + x2 = WRAPLOW(s2 + s6); + x3 = WRAPLOW(s3 + s7); + x4 = WRAPLOW(s0 - s4); + x5 = WRAPLOW(s1 - s5); + x6 = WRAPLOW(s2 - s6); + x7 = WRAPLOW(s3 - s7); + x8 = WRAPLOW(dct_const_round_shift(s8 + s12)); + x9 = WRAPLOW(dct_const_round_shift(s9 + s13)); + x10 = WRAPLOW(dct_const_round_shift(s10 + s14)); + x11 = WRAPLOW(dct_const_round_shift(s11 + s15)); + x12 = WRAPLOW(dct_const_round_shift(s8 - s12)); + x13 = WRAPLOW(dct_const_round_shift(s9 - s13)); + x14 = WRAPLOW(dct_const_round_shift(s10 - s14)); + x15 = WRAPLOW(dct_const_round_shift(s11 - s15)); // stage 3 s0 = x0; s1 = x1; s2 = x2; s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; + s4 = x4 * cospi_8_64 + x5 * cospi_24_64; s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; + s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; + s7 = x6 * cospi_8_64 + x7 * cospi_24_64; s8 = x8; s9 = x9; s10 = x10; s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; + s12 = x12 * cospi_8_64 + x13 * cospi_24_64; s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; + s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; + s15 = x14 * cospi_8_64 + x15 * cospi_24_64; - x0 = WRAPLOW(check_range(s0 + s2), 8); - x1 = WRAPLOW(check_range(s1 + s3), 8); - x2 = WRAPLOW(check_range(s0 - s2), 8); - x3 = WRAPLOW(check_range(s1 - s3), 8); - x4 = WRAPLOW(dct_const_round_shift(s4 + s6), 8); - x5 = WRAPLOW(dct_const_round_shift(s5 + s7), 8); - x6 = WRAPLOW(dct_const_round_shift(s4 - s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s5 - s7), 8); - x8 = WRAPLOW(check_range(s8 + s10), 8); - x9 = WRAPLOW(check_range(s9 + s11), 8); - x10 = WRAPLOW(check_range(s8 - s10), 8); - x11 = WRAPLOW(check_range(s9 - s11), 8); - x12 = WRAPLOW(dct_const_round_shift(s12 + s14), 8); - x13 = WRAPLOW(dct_const_round_shift(s13 + s15), 8); - x14 = WRAPLOW(dct_const_round_shift(s12 - s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s13 - s15), 8); + x0 = WRAPLOW(s0 + s2); + x1 = WRAPLOW(s1 + s3); + x2 = WRAPLOW(s0 - s2); + x3 = WRAPLOW(s1 - s3); + x4 = WRAPLOW(dct_const_round_shift(s4 + s6)); + x5 = WRAPLOW(dct_const_round_shift(s5 + s7)); + x6 = WRAPLOW(dct_const_round_shift(s4 - s6)); + x7 = WRAPLOW(dct_const_round_shift(s5 - s7)); + x8 = WRAPLOW(s8 + s10); + x9 = WRAPLOW(s9 + s11); + x10 = WRAPLOW(s8 - s10); + x11 = WRAPLOW(s9 - s11); + x12 = WRAPLOW(dct_const_round_shift(s12 + s14)); + x13 = WRAPLOW(dct_const_round_shift(s13 + s15)); + x14 = WRAPLOW(dct_const_round_shift(s12 - s14)); + x15 = WRAPLOW(dct_const_round_shift(s13 - s15)); // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); + s2 = (-cospi_16_64) * (x2 + x3); s3 = cospi_16_64 * (x2 - x3); s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (- x6 + x7); + s7 = cospi_16_64 * (-x6 + x7); s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (- x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); + s11 = cospi_16_64 * (-x10 + x11); + s14 = (-cospi_16_64) * (x14 + x15); s15 = cospi_16_64 * (x14 - x15); - x2 = WRAPLOW(dct_const_round_shift(s2), 8); - x3 = WRAPLOW(dct_const_round_shift(s3), 8); - x6 = WRAPLOW(dct_const_round_shift(s6), 8); - x7 = WRAPLOW(dct_const_round_shift(s7), 8); - x10 = WRAPLOW(dct_const_round_shift(s10), 8); - x11 = WRAPLOW(dct_const_round_shift(s11), 8); - x14 = WRAPLOW(dct_const_round_shift(s14), 8); - x15 = WRAPLOW(dct_const_round_shift(s15), 8); + x2 = WRAPLOW(dct_const_round_shift(s2)); + x3 = WRAPLOW(dct_const_round_shift(s3)); + x6 = WRAPLOW(dct_const_round_shift(s6)); + x7 = WRAPLOW(dct_const_round_shift(s7)); + x10 = WRAPLOW(dct_const_round_shift(s10)); + x11 = WRAPLOW(dct_const_round_shift(s11)); + x14 = WRAPLOW(dct_const_round_shift(s14)); + x15 = WRAPLOW(dct_const_round_shift(s15)); - output[0] = WRAPLOW(x0, 8); - output[1] = WRAPLOW(-x8, 8); - output[2] = WRAPLOW(x12, 8); - output[3] = WRAPLOW(-x4, 8); - output[4] = WRAPLOW(x6, 8); - output[5] = WRAPLOW(x14, 8); - output[6] = WRAPLOW(x10, 8); - output[7] = WRAPLOW(x2, 8); - output[8] = WRAPLOW(x3, 8); - output[9] = WRAPLOW(x11, 8); - output[10] = WRAPLOW(x15, 8); - output[11] = WRAPLOW(x7, 8); - output[12] = WRAPLOW(x5, 8); - output[13] = WRAPLOW(-x13, 8); - output[14] = WRAPLOW(x9, 8); - output[15] = WRAPLOW(-x1, 8); + output[0] = WRAPLOW(x0); + output[1] = WRAPLOW(-x8); + output[2] = WRAPLOW(x12); + output[3] = WRAPLOW(-x4); + output[4] = WRAPLOW(x6); + output[5] = WRAPLOW(x14); + output[6] = WRAPLOW(x10); + output[7] = WRAPLOW(x2); + output[8] = WRAPLOW(x3); + output[9] = WRAPLOW(x11); + output[10] = WRAPLOW(x15); + output[11] = WRAPLOW(x7); + output[12] = WRAPLOW(x5); + output[13] = WRAPLOW(-x13); + output[14] = WRAPLOW(x9); + output[15] = WRAPLOW(-x1); } void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, @@ -765,8 +760,7 @@ void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, // Then transform columns for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j*16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; idct16_c(temp_in, temp_out); for (j = 0; j < 16; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -778,12 +772,11 @@ void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); + tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64)); + out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); a1 = ROUND_POWER_OF_TWO(out, 6); for (j = 0; j < 16; ++j) { - for (i = 0; i < 16; ++i) - dest[i] = clip_pixel_add(dest[i], a1); + for (i = 0; i < 16; ++i) dest[i] = clip_pixel_add(dest[i], a1); dest += stride; } } @@ -812,43 +805,43 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64; temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64; - step1[16] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[31] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[16] = WRAPLOW(dct_const_round_shift(temp1)); + step1[31] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64; temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64; - step1[17] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[30] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[17] = WRAPLOW(dct_const_round_shift(temp1)); + step1[30] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64; temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[18] = WRAPLOW(dct_const_round_shift(temp1)); + step1[29] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64; temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64; - step1[19] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[28] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[19] = WRAPLOW(dct_const_round_shift(temp1)); + step1[28] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64; temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[20] = WRAPLOW(dct_const_round_shift(temp1)); + step1[27] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64; temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[21] = WRAPLOW(dct_const_round_shift(temp1)); + step1[26] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64; temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[22] = WRAPLOW(dct_const_round_shift(temp1)); + step1[25] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64; temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64; - step1[23] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[24] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[23] = WRAPLOW(dct_const_round_shift(temp1)); + step1[24] = WRAPLOW(dct_const_round_shift(temp2)); // stage 2 step2[0] = step1[0]; @@ -862,40 +855,40 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[15] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[8] = WRAPLOW(dct_const_round_shift(temp1)); + step2[15] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[9] = WRAPLOW(dct_const_round_shift(temp1)); + step2[14] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[11] = WRAPLOW(dct_const_round_shift(temp1)); + step2[12] = WRAPLOW(dct_const_round_shift(temp2)); - step2[16] = WRAPLOW(step1[16] + step1[17], 8); - step2[17] = WRAPLOW(step1[16] - step1[17], 8); - step2[18] = WRAPLOW(-step1[18] + step1[19], 8); - step2[19] = WRAPLOW(step1[18] + step1[19], 8); - step2[20] = WRAPLOW(step1[20] + step1[21], 8); - step2[21] = WRAPLOW(step1[20] - step1[21], 8); - step2[22] = WRAPLOW(-step1[22] + step1[23], 8); - step2[23] = WRAPLOW(step1[22] + step1[23], 8); - step2[24] = WRAPLOW(step1[24] + step1[25], 8); - step2[25] = WRAPLOW(step1[24] - step1[25], 8); - step2[26] = WRAPLOW(-step1[26] + step1[27], 8); - step2[27] = WRAPLOW(step1[26] + step1[27], 8); - step2[28] = WRAPLOW(step1[28] + step1[29], 8); - step2[29] = WRAPLOW(step1[28] - step1[29], 8); - step2[30] = WRAPLOW(-step1[30] + step1[31], 8); - step2[31] = WRAPLOW(step1[30] + step1[31], 8); + step2[16] = WRAPLOW(step1[16] + step1[17]); + step2[17] = WRAPLOW(step1[16] - step1[17]); + step2[18] = WRAPLOW(-step1[18] + step1[19]); + step2[19] = WRAPLOW(step1[18] + step1[19]); + step2[20] = WRAPLOW(step1[20] + step1[21]); + step2[21] = WRAPLOW(step1[20] - step1[21]); + step2[22] = WRAPLOW(-step1[22] + step1[23]); + step2[23] = WRAPLOW(step1[22] + step1[23]); + step2[24] = WRAPLOW(step1[24] + step1[25]); + step2[25] = WRAPLOW(step1[24] - step1[25]); + step2[26] = WRAPLOW(-step1[26] + step1[27]); + step2[27] = WRAPLOW(step1[26] + step1[27]); + step2[28] = WRAPLOW(step1[28] + step1[29]); + step2[29] = WRAPLOW(step1[28] - step1[29]); + step2[30] = WRAPLOW(-step1[30] + step1[31]); + step2[31] = WRAPLOW(step1[30] + step1[31]); // stage 3 step1[0] = step2[0]; @@ -905,42 +898,42 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[7] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[4] = WRAPLOW(dct_const_round_shift(temp1)); + step1[7] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); - step1[8] = WRAPLOW(step2[8] + step2[9], 8); - step1[9] = WRAPLOW(step2[8] - step2[9], 8); - step1[10] = WRAPLOW(-step2[10] + step2[11], 8); - step1[11] = WRAPLOW(step2[10] + step2[11], 8); - step1[12] = WRAPLOW(step2[12] + step2[13], 8); - step1[13] = WRAPLOW(step2[12] - step2[13], 8); - step1[14] = WRAPLOW(-step2[14] + step2[15], 8); - step1[15] = WRAPLOW(step2[14] + step2[15], 8); + step1[8] = WRAPLOW(step2[8] + step2[9]); + step1[9] = WRAPLOW(step2[8] - step2[9]); + step1[10] = WRAPLOW(-step2[10] + step2[11]); + step1[11] = WRAPLOW(step2[10] + step2[11]); + step1[12] = WRAPLOW(step2[12] + step2[13]); + step1[13] = WRAPLOW(step2[12] - step2[13]); + step1[14] = WRAPLOW(-step2[14] + step2[15]); + step1[15] = WRAPLOW(step2[14] + step2[15]); step1[16] = step2[16]; step1[31] = step2[31]; temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64; temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64; - step1[17] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[30] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[17] = WRAPLOW(dct_const_round_shift(temp1)); + step1[30] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64; temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[18] = WRAPLOW(dct_const_round_shift(temp1)); + step1[29] = WRAPLOW(dct_const_round_shift(temp2)); step1[19] = step2[19]; step1[20] = step2[20]; temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64; temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[21] = WRAPLOW(dct_const_round_shift(temp1)); + step1[26] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64; temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[22] = WRAPLOW(dct_const_round_shift(temp1)); + step1[25] = WRAPLOW(dct_const_round_shift(temp2)); step1[23] = step2[23]; step1[24] = step2[24]; step1[27] = step2[27]; @@ -949,87 +942,87 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { // stage 4 temp1 = (step1[0] + step1[1]) * cospi_16_64; temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[1] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[0] = WRAPLOW(dct_const_round_shift(temp1)); + step2[1] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[3] = WRAPLOW(dct_const_round_shift(temp2), 8); - step2[4] = WRAPLOW(step1[4] + step1[5], 8); - step2[5] = WRAPLOW(step1[4] - step1[5], 8); - step2[6] = WRAPLOW(-step1[6] + step1[7], 8); - step2[7] = WRAPLOW(step1[6] + step1[7], 8); + step2[2] = WRAPLOW(dct_const_round_shift(temp1)); + step2[3] = WRAPLOW(dct_const_round_shift(temp2)); + step2[4] = WRAPLOW(step1[4] + step1[5]); + step2[5] = WRAPLOW(step1[4] - step1[5]); + step2[6] = WRAPLOW(-step1[6] + step1[7]); + step2[7] = WRAPLOW(step1[6] + step1[7]); step2[8] = step1[8]; step2[15] = step1[15]; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[14] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[9] = WRAPLOW(dct_const_round_shift(temp1)); + step2[14] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); step2[11] = step1[11]; step2[12] = step1[12]; - step2[16] = WRAPLOW(step1[16] + step1[19], 8); - step2[17] = WRAPLOW(step1[17] + step1[18], 8); - step2[18] = WRAPLOW(step1[17] - step1[18], 8); - step2[19] = WRAPLOW(step1[16] - step1[19], 8); - step2[20] = WRAPLOW(-step1[20] + step1[23], 8); - step2[21] = WRAPLOW(-step1[21] + step1[22], 8); - step2[22] = WRAPLOW(step1[21] + step1[22], 8); - step2[23] = WRAPLOW(step1[20] + step1[23], 8); + step2[16] = WRAPLOW(step1[16] + step1[19]); + step2[17] = WRAPLOW(step1[17] + step1[18]); + step2[18] = WRAPLOW(step1[17] - step1[18]); + step2[19] = WRAPLOW(step1[16] - step1[19]); + step2[20] = WRAPLOW(-step1[20] + step1[23]); + step2[21] = WRAPLOW(-step1[21] + step1[22]); + step2[22] = WRAPLOW(step1[21] + step1[22]); + step2[23] = WRAPLOW(step1[20] + step1[23]); - step2[24] = WRAPLOW(step1[24] + step1[27], 8); - step2[25] = WRAPLOW(step1[25] + step1[26], 8); - step2[26] = WRAPLOW(step1[25] - step1[26], 8); - step2[27] = WRAPLOW(step1[24] - step1[27], 8); - step2[28] = WRAPLOW(-step1[28] + step1[31], 8); - step2[29] = WRAPLOW(-step1[29] + step1[30], 8); - step2[30] = WRAPLOW(step1[29] + step1[30], 8); - step2[31] = WRAPLOW(step1[28] + step1[31], 8); + step2[24] = WRAPLOW(step1[24] + step1[27]); + step2[25] = WRAPLOW(step1[25] + step1[26]); + step2[26] = WRAPLOW(step1[25] - step1[26]); + step2[27] = WRAPLOW(step1[24] - step1[27]); + step2[28] = WRAPLOW(-step1[28] + step1[31]); + step2[29] = WRAPLOW(-step1[29] + step1[30]); + step2[30] = WRAPLOW(step1[29] + step1[30]); + step2[31] = WRAPLOW(step1[28] + step1[31]); // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], 8); - step1[1] = WRAPLOW(step2[1] + step2[2], 8); - step1[2] = WRAPLOW(step2[1] - step2[2], 8); - step1[3] = WRAPLOW(step2[0] - step2[3], 8); + step1[0] = WRAPLOW(step2[0] + step2[3]); + step1[1] = WRAPLOW(step2[1] + step2[2]); + step1[2] = WRAPLOW(step2[1] - step2[2]); + step1[3] = WRAPLOW(step2[0] - step2[3]); step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[6] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[5] = WRAPLOW(dct_const_round_shift(temp1)); + step1[6] = WRAPLOW(dct_const_round_shift(temp2)); step1[7] = step2[7]; - step1[8] = WRAPLOW(step2[8] + step2[11], 8); - step1[9] = WRAPLOW(step2[9] + step2[10], 8); - step1[10] = WRAPLOW(step2[9] - step2[10], 8); - step1[11] = WRAPLOW(step2[8] - step2[11], 8); - step1[12] = WRAPLOW(-step2[12] + step2[15], 8); - step1[13] = WRAPLOW(-step2[13] + step2[14], 8); - step1[14] = WRAPLOW(step2[13] + step2[14], 8); - step1[15] = WRAPLOW(step2[12] + step2[15], 8); + step1[8] = WRAPLOW(step2[8] + step2[11]); + step1[9] = WRAPLOW(step2[9] + step2[10]); + step1[10] = WRAPLOW(step2[9] - step2[10]); + step1[11] = WRAPLOW(step2[8] - step2[11]); + step1[12] = WRAPLOW(-step2[12] + step2[15]); + step1[13] = WRAPLOW(-step2[13] + step2[14]); + step1[14] = WRAPLOW(step2[13] + step2[14]); + step1[15] = WRAPLOW(step2[12] + step2[15]); step1[16] = step2[16]; step1[17] = step2[17]; temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64; temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64; - step1[18] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[29] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[18] = WRAPLOW(dct_const_round_shift(temp1)); + step1[29] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64; temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64; - step1[19] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[28] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[19] = WRAPLOW(dct_const_round_shift(temp1)); + step1[28] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64; temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[20] = WRAPLOW(dct_const_round_shift(temp1)); + step1[27] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64; temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[21] = WRAPLOW(dct_const_round_shift(temp1)); + step1[26] = WRAPLOW(dct_const_round_shift(temp2)); step1[22] = step2[22]; step1[23] = step2[23]; step1[24] = step2[24]; @@ -1038,62 +1031,62 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { step1[31] = step2[31]; // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], 8); - step2[1] = WRAPLOW(step1[1] + step1[6], 8); - step2[2] = WRAPLOW(step1[2] + step1[5], 8); - step2[3] = WRAPLOW(step1[3] + step1[4], 8); - step2[4] = WRAPLOW(step1[3] - step1[4], 8); - step2[5] = WRAPLOW(step1[2] - step1[5], 8); - step2[6] = WRAPLOW(step1[1] - step1[6], 8); - step2[7] = WRAPLOW(step1[0] - step1[7], 8); + step2[0] = WRAPLOW(step1[0] + step1[7]); + step2[1] = WRAPLOW(step1[1] + step1[6]); + step2[2] = WRAPLOW(step1[2] + step1[5]); + step2[3] = WRAPLOW(step1[3] + step1[4]); + step2[4] = WRAPLOW(step1[3] - step1[4]); + step2[5] = WRAPLOW(step1[2] - step1[5]); + step2[6] = WRAPLOW(step1[1] - step1[6]); + step2[7] = WRAPLOW(step1[0] - step1[7]); step2[8] = step1[8]; step2[9] = step1[9]; temp1 = (-step1[10] + step1[13]) * cospi_16_64; temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[13] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[10] = WRAPLOW(dct_const_round_shift(temp1)); + step2[13] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = (-step1[11] + step1[12]) * cospi_16_64; temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(dct_const_round_shift(temp1), 8); - step2[12] = WRAPLOW(dct_const_round_shift(temp2), 8); + step2[11] = WRAPLOW(dct_const_round_shift(temp1)); + step2[12] = WRAPLOW(dct_const_round_shift(temp2)); step2[14] = step1[14]; step2[15] = step1[15]; - step2[16] = WRAPLOW(step1[16] + step1[23], 8); - step2[17] = WRAPLOW(step1[17] + step1[22], 8); - step2[18] = WRAPLOW(step1[18] + step1[21], 8); - step2[19] = WRAPLOW(step1[19] + step1[20], 8); - step2[20] = WRAPLOW(step1[19] - step1[20], 8); - step2[21] = WRAPLOW(step1[18] - step1[21], 8); - step2[22] = WRAPLOW(step1[17] - step1[22], 8); - step2[23] = WRAPLOW(step1[16] - step1[23], 8); + step2[16] = WRAPLOW(step1[16] + step1[23]); + step2[17] = WRAPLOW(step1[17] + step1[22]); + step2[18] = WRAPLOW(step1[18] + step1[21]); + step2[19] = WRAPLOW(step1[19] + step1[20]); + step2[20] = WRAPLOW(step1[19] - step1[20]); + step2[21] = WRAPLOW(step1[18] - step1[21]); + step2[22] = WRAPLOW(step1[17] - step1[22]); + step2[23] = WRAPLOW(step1[16] - step1[23]); - step2[24] = WRAPLOW(-step1[24] + step1[31], 8); - step2[25] = WRAPLOW(-step1[25] + step1[30], 8); - step2[26] = WRAPLOW(-step1[26] + step1[29], 8); - step2[27] = WRAPLOW(-step1[27] + step1[28], 8); - step2[28] = WRAPLOW(step1[27] + step1[28], 8); - step2[29] = WRAPLOW(step1[26] + step1[29], 8); - step2[30] = WRAPLOW(step1[25] + step1[30], 8); - step2[31] = WRAPLOW(step1[24] + step1[31], 8); + step2[24] = WRAPLOW(-step1[24] + step1[31]); + step2[25] = WRAPLOW(-step1[25] + step1[30]); + step2[26] = WRAPLOW(-step1[26] + step1[29]); + step2[27] = WRAPLOW(-step1[27] + step1[28]); + step2[28] = WRAPLOW(step1[27] + step1[28]); + step2[29] = WRAPLOW(step1[26] + step1[29]); + step2[30] = WRAPLOW(step1[25] + step1[30]); + step2[31] = WRAPLOW(step1[24] + step1[31]); // stage 7 - step1[0] = WRAPLOW(step2[0] + step2[15], 8); - step1[1] = WRAPLOW(step2[1] + step2[14], 8); - step1[2] = WRAPLOW(step2[2] + step2[13], 8); - step1[3] = WRAPLOW(step2[3] + step2[12], 8); - step1[4] = WRAPLOW(step2[4] + step2[11], 8); - step1[5] = WRAPLOW(step2[5] + step2[10], 8); - step1[6] = WRAPLOW(step2[6] + step2[9], 8); - step1[7] = WRAPLOW(step2[7] + step2[8], 8); - step1[8] = WRAPLOW(step2[7] - step2[8], 8); - step1[9] = WRAPLOW(step2[6] - step2[9], 8); - step1[10] = WRAPLOW(step2[5] - step2[10], 8); - step1[11] = WRAPLOW(step2[4] - step2[11], 8); - step1[12] = WRAPLOW(step2[3] - step2[12], 8); - step1[13] = WRAPLOW(step2[2] - step2[13], 8); - step1[14] = WRAPLOW(step2[1] - step2[14], 8); - step1[15] = WRAPLOW(step2[0] - step2[15], 8); + step1[0] = WRAPLOW(step2[0] + step2[15]); + step1[1] = WRAPLOW(step2[1] + step2[14]); + step1[2] = WRAPLOW(step2[2] + step2[13]); + step1[3] = WRAPLOW(step2[3] + step2[12]); + step1[4] = WRAPLOW(step2[4] + step2[11]); + step1[5] = WRAPLOW(step2[5] + step2[10]); + step1[6] = WRAPLOW(step2[6] + step2[9]); + step1[7] = WRAPLOW(step2[7] + step2[8]); + step1[8] = WRAPLOW(step2[7] - step2[8]); + step1[9] = WRAPLOW(step2[6] - step2[9]); + step1[10] = WRAPLOW(step2[5] - step2[10]); + step1[11] = WRAPLOW(step2[4] - step2[11]); + step1[12] = WRAPLOW(step2[3] - step2[12]); + step1[13] = WRAPLOW(step2[2] - step2[13]); + step1[14] = WRAPLOW(step2[1] - step2[14]); + step1[15] = WRAPLOW(step2[0] - step2[15]); step1[16] = step2[16]; step1[17] = step2[17]; @@ -1101,58 +1094,58 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { step1[19] = step2[19]; temp1 = (-step2[20] + step2[27]) * cospi_16_64; temp2 = (step2[20] + step2[27]) * cospi_16_64; - step1[20] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[27] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[20] = WRAPLOW(dct_const_round_shift(temp1)); + step1[27] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = (-step2[21] + step2[26]) * cospi_16_64; temp2 = (step2[21] + step2[26]) * cospi_16_64; - step1[21] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[26] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[21] = WRAPLOW(dct_const_round_shift(temp1)); + step1[26] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = (-step2[22] + step2[25]) * cospi_16_64; temp2 = (step2[22] + step2[25]) * cospi_16_64; - step1[22] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[25] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[22] = WRAPLOW(dct_const_round_shift(temp1)); + step1[25] = WRAPLOW(dct_const_round_shift(temp2)); temp1 = (-step2[23] + step2[24]) * cospi_16_64; temp2 = (step2[23] + step2[24]) * cospi_16_64; - step1[23] = WRAPLOW(dct_const_round_shift(temp1), 8); - step1[24] = WRAPLOW(dct_const_round_shift(temp2), 8); + step1[23] = WRAPLOW(dct_const_round_shift(temp1)); + step1[24] = WRAPLOW(dct_const_round_shift(temp2)); step1[28] = step2[28]; step1[29] = step2[29]; step1[30] = step2[30]; step1[31] = step2[31]; // final stage - output[0] = WRAPLOW(step1[0] + step1[31], 8); - output[1] = WRAPLOW(step1[1] + step1[30], 8); - output[2] = WRAPLOW(step1[2] + step1[29], 8); - output[3] = WRAPLOW(step1[3] + step1[28], 8); - output[4] = WRAPLOW(step1[4] + step1[27], 8); - output[5] = WRAPLOW(step1[5] + step1[26], 8); - output[6] = WRAPLOW(step1[6] + step1[25], 8); - output[7] = WRAPLOW(step1[7] + step1[24], 8); - output[8] = WRAPLOW(step1[8] + step1[23], 8); - output[9] = WRAPLOW(step1[9] + step1[22], 8); - output[10] = WRAPLOW(step1[10] + step1[21], 8); - output[11] = WRAPLOW(step1[11] + step1[20], 8); - output[12] = WRAPLOW(step1[12] + step1[19], 8); - output[13] = WRAPLOW(step1[13] + step1[18], 8); - output[14] = WRAPLOW(step1[14] + step1[17], 8); - output[15] = WRAPLOW(step1[15] + step1[16], 8); - output[16] = WRAPLOW(step1[15] - step1[16], 8); - output[17] = WRAPLOW(step1[14] - step1[17], 8); - output[18] = WRAPLOW(step1[13] - step1[18], 8); - output[19] = WRAPLOW(step1[12] - step1[19], 8); - output[20] = WRAPLOW(step1[11] - step1[20], 8); - output[21] = WRAPLOW(step1[10] - step1[21], 8); - output[22] = WRAPLOW(step1[9] - step1[22], 8); - output[23] = WRAPLOW(step1[8] - step1[23], 8); - output[24] = WRAPLOW(step1[7] - step1[24], 8); - output[25] = WRAPLOW(step1[6] - step1[25], 8); - output[26] = WRAPLOW(step1[5] - step1[26], 8); - output[27] = WRAPLOW(step1[4] - step1[27], 8); - output[28] = WRAPLOW(step1[3] - step1[28], 8); - output[29] = WRAPLOW(step1[2] - step1[29], 8); - output[30] = WRAPLOW(step1[1] - step1[30], 8); - output[31] = WRAPLOW(step1[0] - step1[31], 8); + output[0] = WRAPLOW(step1[0] + step1[31]); + output[1] = WRAPLOW(step1[1] + step1[30]); + output[2] = WRAPLOW(step1[2] + step1[29]); + output[3] = WRAPLOW(step1[3] + step1[28]); + output[4] = WRAPLOW(step1[4] + step1[27]); + output[5] = WRAPLOW(step1[5] + step1[26]); + output[6] = WRAPLOW(step1[6] + step1[25]); + output[7] = WRAPLOW(step1[7] + step1[24]); + output[8] = WRAPLOW(step1[8] + step1[23]); + output[9] = WRAPLOW(step1[9] + step1[22]); + output[10] = WRAPLOW(step1[10] + step1[21]); + output[11] = WRAPLOW(step1[11] + step1[20]); + output[12] = WRAPLOW(step1[12] + step1[19]); + output[13] = WRAPLOW(step1[13] + step1[18]); + output[14] = WRAPLOW(step1[14] + step1[17]); + output[15] = WRAPLOW(step1[15] + step1[16]); + output[16] = WRAPLOW(step1[15] - step1[16]); + output[17] = WRAPLOW(step1[14] - step1[17]); + output[18] = WRAPLOW(step1[13] - step1[18]); + output[19] = WRAPLOW(step1[12] - step1[19]); + output[20] = WRAPLOW(step1[11] - step1[20]); + output[21] = WRAPLOW(step1[10] - step1[21]); + output[22] = WRAPLOW(step1[9] - step1[22]); + output[23] = WRAPLOW(step1[8] - step1[23]); + output[24] = WRAPLOW(step1[7] - step1[24]); + output[25] = WRAPLOW(step1[6] - step1[25]); + output[26] = WRAPLOW(step1[5] - step1[26]); + output[27] = WRAPLOW(step1[4] - step1[27]); + output[28] = WRAPLOW(step1[3] - step1[28]); + output[29] = WRAPLOW(step1[2] - step1[29]); + output[30] = WRAPLOW(step1[1] - step1[30]); + output[31] = WRAPLOW(step1[0] - step1[31]); } void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, @@ -1165,8 +1158,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, // Rows for (i = 0; i < 32; ++i) { int16_t zero_coeff[16]; - for (j = 0; j < 16; ++j) - zero_coeff[j] = input[2 * j] | input[2 * j + 1]; + for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1]; for (j = 0; j < 8; ++j) zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; for (j = 0; j < 4; ++j) @@ -1184,8 +1176,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, // Columns for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; + for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; idct32_c(temp_in, temp_out); for (j = 0; j < 32; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -1196,7 +1187,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - tran_low_t out[32 * 32] = {0}; + tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; int i, j; tran_low_t temp_in[32], temp_out[32]; @@ -1211,8 +1202,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, // Columns for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; + for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; idct32_c(temp_in, temp_out); for (j = 0; j < 32; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -1223,7 +1213,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int stride) { - tran_low_t out[32 * 32] = {0}; + tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; int i, j; tran_low_t temp_in[32], temp_out[32]; @@ -1238,8 +1228,7 @@ void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, // Columns for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; + for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; idct32_c(temp_in, temp_out); for (j = 0; j < 32; ++j) { dest[j * stride + i] = clip_pixel_add(dest[j * stride + i], @@ -1252,13 +1241,12 @@ void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; - tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); - out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); + tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64)); + out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); a1 = ROUND_POWER_OF_TWO(out, 6); for (j = 0; j < 32; ++j) { - for (i = 0; i < 32; ++i) - dest[i] = clip_pixel_add(dest[i], a1); + for (i = 0; i < 32; ++i) dest[i] = clip_pixel_add(dest[i], a1); dest += stride; } } @@ -1287,10 +1275,10 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, c1 = e1 - c1; a1 -= b1; d1 += c1; - op[0] = WRAPLOW(a1, bd); - op[1] = WRAPLOW(b1, bd); - op[2] = WRAPLOW(c1, bd); - op[3] = WRAPLOW(d1, bd); + op[0] = HIGHBD_WRAPLOW(a1, bd); + op[1] = HIGHBD_WRAPLOW(b1, bd); + op[2] = HIGHBD_WRAPLOW(c1, bd); + op[3] = HIGHBD_WRAPLOW(d1, bd); ip += 4; op += 4; } @@ -1308,10 +1296,14 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, c1 = e1 - c1; a1 -= b1; d1 += c1; - dest[stride * 0] = highbd_clip_pixel_add(dest[stride * 0], a1, bd); - dest[stride * 1] = highbd_clip_pixel_add(dest[stride * 1], b1, bd); - dest[stride * 2] = highbd_clip_pixel_add(dest[stride * 2], c1, bd); - dest[stride * 3] = highbd_clip_pixel_add(dest[stride * 3], d1, bd); + dest[stride * 0] = + highbd_clip_pixel_add(dest[stride * 0], HIGHBD_WRAPLOW(a1, bd), bd); + dest[stride * 1] = + highbd_clip_pixel_add(dest[stride * 1], HIGHBD_WRAPLOW(b1, bd), bd); + dest[stride * 2] = + highbd_clip_pixel_add(dest[stride * 2], HIGHBD_WRAPLOW(c1, bd), bd); + dest[stride * 3] = + highbd_clip_pixel_add(dest[stride * 3], HIGHBD_WRAPLOW(d1, bd), bd); ip++; dest++; @@ -1326,26 +1318,26 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, const tran_low_t *ip = in; tran_low_t *op = tmp; uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - (void) bd; + (void)bd; a1 = ip[0] >> UNIT_QUANT_SHIFT; e1 = a1 >> 1; a1 -= e1; - op[0] = WRAPLOW(a1, bd); - op[1] = op[2] = op[3] = WRAPLOW(e1, bd); + op[0] = HIGHBD_WRAPLOW(a1, bd); + op[1] = op[2] = op[3] = HIGHBD_WRAPLOW(e1, bd); ip = tmp; for (i = 0; i < 4; i++) { e1 = ip[0] >> 1; a1 = ip[0] - e1; - dest[dest_stride * 0] = highbd_clip_pixel_add( - dest[dest_stride * 0], a1, bd); - dest[dest_stride * 1] = highbd_clip_pixel_add( - dest[dest_stride * 1], e1, bd); - dest[dest_stride * 2] = highbd_clip_pixel_add( - dest[dest_stride * 2], e1, bd); - dest[dest_stride * 3] = highbd_clip_pixel_add( - dest[dest_stride * 3], e1, bd); + dest[dest_stride * 0] = + highbd_clip_pixel_add(dest[dest_stride * 0], a1, bd); + dest[dest_stride * 1] = + highbd_clip_pixel_add(dest[dest_stride * 1], e1, bd); + dest[dest_stride * 2] = + highbd_clip_pixel_add(dest[dest_stride * 2], e1, bd); + dest[dest_stride * 3] = + highbd_clip_pixel_add(dest[dest_stride * 3], e1, bd); ip++; dest++; } @@ -1354,22 +1346,22 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t step[4]; tran_high_t temp1, temp2; - (void) bd; + (void)bd; // stage 1 temp1 = (input[0] + input[2]) * cospi_16_64; temp2 = (input[0] - input[2]) * cospi_16_64; - step[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step[0] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step[1] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; - step[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step[2] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); // stage 2 - output[0] = WRAPLOW(step[0] + step[3], bd); - output[1] = WRAPLOW(step[1] + step[2], bd); - output[2] = WRAPLOW(step[1] - step[2], bd); - output[3] = WRAPLOW(step[0] - step[3], bd); + output[0] = HIGHBD_WRAPLOW(step[0] + step[3], bd); + output[1] = HIGHBD_WRAPLOW(step[1] + step[2], bd); + output[2] = HIGHBD_WRAPLOW(step[1] - step[2], bd); + output[3] = HIGHBD_WRAPLOW(step[0] - step[3], bd); } void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, @@ -1389,8 +1381,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, // Columns for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; vpx_highbd_idct4_c(temp_in, temp_out, bd); for (j = 0; j < 4; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -1403,11 +1394,11 @@ void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8, int dest_stride, int bd) { int i; tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); + tran_low_t out = + HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd); uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); + out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd); a1 = ROUND_POWER_OF_TWO(out, 4); for (i = 0; i < 4; i++) { @@ -1429,39 +1420,39 @@ void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { step1[3] = input[6]; temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[4] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[7] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); // stage 2 & stage 3 - even half vpx_highbd_idct4_c(step1, step1, bd); // stage 2 - odd half - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); + step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd); + step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd); + step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd); + step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd); // stage 3 - odd half step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[7] = step2[7]; // stage 4 - output[0] = WRAPLOW(step1[0] + step1[7], bd); - output[1] = WRAPLOW(step1[1] + step1[6], bd); - output[2] = WRAPLOW(step1[2] + step1[5], bd); - output[3] = WRAPLOW(step1[3] + step1[4], bd); - output[4] = WRAPLOW(step1[3] - step1[4], bd); - output[5] = WRAPLOW(step1[2] - step1[5], bd); - output[6] = WRAPLOW(step1[1] - step1[6], bd); - output[7] = WRAPLOW(step1[0] - step1[7], bd); + output[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd); + output[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd); + output[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd); + output[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd); + output[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd); + output[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd); + output[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd); + output[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd); } void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, @@ -1481,8 +1472,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; vpx_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -1495,14 +1485,13 @@ void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { int i, j; tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); + tran_low_t out = + HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd); uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); + out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd); a1 = ROUND_POWER_OF_TWO(out, 5); for (j = 0; j < 8; ++j) { - for (i = 0; i < 8; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); + for (i = 0; i < 8; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); dest += stride; } } @@ -1514,7 +1503,7 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t x1 = input[1]; tran_low_t x2 = input[2]; tran_low_t x3 = input[3]; - (void) bd; + (void)bd; if (!(x0 | x1 | x2 | x3)) { memset(output, 0, 4 * sizeof(*output)); @@ -1528,7 +1517,7 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { s4 = sinpi_1_9 * x2; s5 = sinpi_2_9 * x3; s6 = sinpi_4_9 * x3; - s7 = (tran_high_t)(x0 - x2 + x3); + s7 = (tran_high_t)HIGHBD_WRAPLOW(x0 - x2 + x3, bd); s0 = s0 + s3 + s5; s1 = s1 - s4 - s6; @@ -1539,10 +1528,10 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { // The overall dynamic range is 14b (input) + 14b (multiplication scaling) // + 1b (addition) = 29b. // Hence the output bit depth is 15b. - output[0] = WRAPLOW(highbd_dct_const_round_shift(s0 + s3, bd), bd); - output[1] = WRAPLOW(highbd_dct_const_round_shift(s1 + s3, bd), bd); - output[2] = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd); + output[0] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s3), bd); + output[1] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 + s3), bd); + output[2] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2), bd); + output[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3), bd); } void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { @@ -1556,7 +1545,7 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t x5 = input[4]; tran_low_t x6 = input[1]; tran_low_t x7 = input[6]; - (void) bd; + (void)bd; if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { memset(output, 0, 8 * sizeof(*output)); @@ -1564,42 +1553,42 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { } // stage 1 - s0 = cospi_2_64 * x0 + cospi_30_64 * x1; - s1 = cospi_30_64 * x0 - cospi_2_64 * x1; + s0 = cospi_2_64 * x0 + cospi_30_64 * x1; + s1 = cospi_30_64 * x0 - cospi_2_64 * x1; s2 = cospi_10_64 * x2 + cospi_22_64 * x3; s3 = cospi_22_64 * x2 - cospi_10_64 * x3; s4 = cospi_18_64 * x4 + cospi_14_64 * x5; s5 = cospi_14_64 * x4 - cospi_18_64 * x5; - s6 = cospi_26_64 * x6 + cospi_6_64 * x7; - s7 = cospi_6_64 * x6 - cospi_26_64 * x7; + s6 = cospi_26_64 * x6 + cospi_6_64 * x7; + s7 = cospi_6_64 * x6 - cospi_26_64 * x7; - x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s4, bd), bd); - x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s5, bd), bd); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s6, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s7, bd), bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s0 - s4, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s1 - s5, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s2 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s3 - s7, bd), bd); + x0 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s4), bd); + x1 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 + s5), bd); + x2 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2 + s6), bd); + x3 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3 + s7), bd); + x4 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 - s4), bd); + x5 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 - s5), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2 - s6), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3 - s7), bd); // stage 2 s0 = x0; s1 = x1; s2 = x2; s3 = x3; - s4 = cospi_8_64 * x4 + cospi_24_64 * x5; - s5 = cospi_24_64 * x4 - cospi_8_64 * x5; - s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; - s7 = cospi_8_64 * x6 + cospi_24_64 * x7; + s4 = cospi_8_64 * x4 + cospi_24_64 * x5; + s5 = cospi_24_64 * x4 - cospi_8_64 * x5; + s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; + s7 = cospi_8_64 * x6 + cospi_24_64 * x7; - x0 = WRAPLOW(s0 + s2, bd); - x1 = WRAPLOW(s1 + s3, bd); - x2 = WRAPLOW(s0 - s2, bd); - x3 = WRAPLOW(s1 - s3, bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd); + x0 = HIGHBD_WRAPLOW(s0 + s2, bd); + x1 = HIGHBD_WRAPLOW(s1 + s3, bd); + x2 = HIGHBD_WRAPLOW(s0 - s2, bd); + x3 = HIGHBD_WRAPLOW(s1 - s3, bd); + x4 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 + s6), bd); + x5 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 + s7), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 - s6), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 - s7), bd); // stage 3 s2 = cospi_16_64 * (x2 + x3); @@ -1607,19 +1596,19 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { s6 = cospi_16_64 * (x6 + x7); s7 = cospi_16_64 * (x6 - x7); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd); + x2 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2), bd); + x3 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s6), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s7), bd); - output[0] = WRAPLOW(x0, bd); - output[1] = WRAPLOW(-x4, bd); - output[2] = WRAPLOW(x6, bd); - output[3] = WRAPLOW(-x2, bd); - output[4] = WRAPLOW(x3, bd); - output[5] = WRAPLOW(-x7, bd); - output[6] = WRAPLOW(x5, bd); - output[7] = WRAPLOW(-x1, bd); + output[0] = HIGHBD_WRAPLOW(x0, bd); + output[1] = HIGHBD_WRAPLOW(-x4, bd); + output[2] = HIGHBD_WRAPLOW(x6, bd); + output[3] = HIGHBD_WRAPLOW(-x2, bd); + output[4] = HIGHBD_WRAPLOW(x3, bd); + output[5] = HIGHBD_WRAPLOW(-x7, bd); + output[6] = HIGHBD_WRAPLOW(x5, bd); + output[7] = HIGHBD_WRAPLOW(-x1, bd); } void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, @@ -1639,8 +1628,7 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, } // Then transform columns. for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; vpx_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -1652,25 +1640,25 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t step1[16], step2[16]; tran_high_t temp1, temp2; - (void) bd; + (void)bd; // stage 1 - step1[0] = input[0/2]; - step1[1] = input[16/2]; - step1[2] = input[8/2]; - step1[3] = input[24/2]; - step1[4] = input[4/2]; - step1[5] = input[20/2]; - step1[6] = input[12/2]; - step1[7] = input[28/2]; - step1[8] = input[2/2]; - step1[9] = input[18/2]; - step1[10] = input[10/2]; - step1[11] = input[26/2]; - step1[12] = input[6/2]; - step1[13] = input[22/2]; - step1[14] = input[14/2]; - step1[15] = input[30/2]; + step1[0] = input[0 / 2]; + step1[1] = input[16 / 2]; + step1[2] = input[8 / 2]; + step1[3] = input[24 / 2]; + step1[4] = input[4 / 2]; + step1[5] = input[20 / 2]; + step1[6] = input[12 / 2]; + step1[7] = input[28 / 2]; + step1[8] = input[2 / 2]; + step1[9] = input[18 / 2]; + step1[10] = input[10 / 2]; + step1[11] = input[26 / 2]; + step1[12] = input[6 / 2]; + step1[13] = input[22 / 2]; + step1[14] = input[14 / 2]; + step1[15] = input[30 / 2]; // stage 2 step2[0] = step1[0]; @@ -1684,23 +1672,23 @@ void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[8] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[15] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[9] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[14] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[11] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[12] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); // stage 3 step1[0] = step2[0]; @@ -1710,109 +1698,109 @@ void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[4] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[7] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); - step1[8] = WRAPLOW(step2[8] + step2[9], bd); - step1[9] = WRAPLOW(step2[8] - step2[9], bd); - step1[10] = WRAPLOW(-step2[10] + step2[11], bd); - step1[11] = WRAPLOW(step2[10] + step2[11], bd); - step1[12] = WRAPLOW(step2[12] + step2[13], bd); - step1[13] = WRAPLOW(step2[12] - step2[13], bd); - step1[14] = WRAPLOW(-step2[14] + step2[15], bd); - step1[15] = WRAPLOW(step2[14] + step2[15], bd); + step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd); + step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd); + step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd); + step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd); + step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd); + step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd); + step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd); + step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd); // stage 4 temp1 = (step1[0] + step1[1]) * cospi_16_64; temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[0] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[1] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); + step2[2] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); + step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd); + step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd); + step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd); + step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd); step2[8] = step1[8]; step2[15] = step1[15]; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[9] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[14] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step2[11] = step1[11]; step2[12] = step1[12]; // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], bd); - step1[1] = WRAPLOW(step2[1] + step2[2], bd); - step1[2] = WRAPLOW(step2[1] - step2[2], bd); - step1[3] = WRAPLOW(step2[0] - step2[3], bd); + step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd); + step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd); + step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd); + step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd); step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[7] = step2[7]; - step1[8] = WRAPLOW(step2[8] + step2[11], bd); - step1[9] = WRAPLOW(step2[9] + step2[10], bd); - step1[10] = WRAPLOW(step2[9] - step2[10], bd); - step1[11] = WRAPLOW(step2[8] - step2[11], bd); - step1[12] = WRAPLOW(-step2[12] + step2[15], bd); - step1[13] = WRAPLOW(-step2[13] + step2[14], bd); - step1[14] = WRAPLOW(step2[13] + step2[14], bd); - step1[15] = WRAPLOW(step2[12] + step2[15], bd); + step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd); + step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd); + step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd); + step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd); + step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd); + step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd); + step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd); + step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd); // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], bd); - step2[1] = WRAPLOW(step1[1] + step1[6], bd); - step2[2] = WRAPLOW(step1[2] + step1[5], bd); - step2[3] = WRAPLOW(step1[3] + step1[4], bd); - step2[4] = WRAPLOW(step1[3] - step1[4], bd); - step2[5] = WRAPLOW(step1[2] - step1[5], bd); - step2[6] = WRAPLOW(step1[1] - step1[6], bd); - step2[7] = WRAPLOW(step1[0] - step1[7], bd); + step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd); + step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd); + step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd); + step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd); + step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd); + step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd); + step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd); + step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd); step2[8] = step1[8]; step2[9] = step1[9]; temp1 = (-step1[10] + step1[13]) * cospi_16_64; temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = (-step1[11] + step1[12]) * cospi_16_64; temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[11] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[12] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step2[14] = step1[14]; step2[15] = step1[15]; // stage 7 - output[0] = WRAPLOW(step2[0] + step2[15], bd); - output[1] = WRAPLOW(step2[1] + step2[14], bd); - output[2] = WRAPLOW(step2[2] + step2[13], bd); - output[3] = WRAPLOW(step2[3] + step2[12], bd); - output[4] = WRAPLOW(step2[4] + step2[11], bd); - output[5] = WRAPLOW(step2[5] + step2[10], bd); - output[6] = WRAPLOW(step2[6] + step2[9], bd); - output[7] = WRAPLOW(step2[7] + step2[8], bd); - output[8] = WRAPLOW(step2[7] - step2[8], bd); - output[9] = WRAPLOW(step2[6] - step2[9], bd); - output[10] = WRAPLOW(step2[5] - step2[10], bd); - output[11] = WRAPLOW(step2[4] - step2[11], bd); - output[12] = WRAPLOW(step2[3] - step2[12], bd); - output[13] = WRAPLOW(step2[2] - step2[13], bd); - output[14] = WRAPLOW(step2[1] - step2[14], bd); - output[15] = WRAPLOW(step2[0] - step2[15], bd); + output[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd); + output[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd); + output[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd); + output[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd); + output[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd); + output[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd); + output[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd); + output[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd); + output[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd); + output[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd); + output[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd); + output[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd); + output[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd); + output[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd); + output[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd); + output[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd); } void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, @@ -1832,8 +1820,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; vpx_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -1862,20 +1849,20 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t x13 = input[12]; tran_low_t x14 = input[1]; tran_low_t x15 = input[14]; - (void) bd; + (void)bd; - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 - | x9 | x10 | x11 | x12 | x13 | x14 | x15)) { + if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 | + x13 | x14 | x15)) { memset(output, 0, 16 * sizeof(*output)); return; } // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; s5 = x4 * cospi_23_64 - x5 * cospi_9_64; s6 = x6 * cospi_13_64 + x7 * cospi_19_64; s7 = x6 * cospi_19_64 - x7 * cospi_13_64; @@ -1884,26 +1871,26 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { s10 = x10 * cospi_21_64 + x11 * cospi_11_64; s11 = x10 * cospi_11_64 - x11 * cospi_21_64; s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; - x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s8, bd), bd); - x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s9, bd), bd); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s10, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s11, bd), bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s12, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s13, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6 + s14, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7 + s15, bd), bd); - x8 = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd); - x9 = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s2 - s10, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s3 - s11, bd), bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s4 - s12, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s5 - s13, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s6 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s7 - s15, bd), bd); + x0 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 + s8), bd); + x1 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 + s9), bd); + x2 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2 + s10), bd); + x3 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3 + s11), bd); + x4 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 + s12), bd); + x5 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 + s13), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s6 + s14), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s7 + s15), bd); + x8 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s0 - s8), bd); + x9 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s1 - s9), bd); + x10 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2 - s10), bd); + x11 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3 - s11), bd); + x12 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 - s12), bd); + x13 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 - s13), bd); + x14 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s6 - s14), bd); + x15 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s7 - s15), bd); // stage 2 s0 = x0; @@ -1923,22 +1910,22 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; s15 = x14 * cospi_20_64 + x15 * cospi_12_64; - x0 = WRAPLOW(s0 + s4, bd); - x1 = WRAPLOW(s1 + s5, bd); - x2 = WRAPLOW(s2 + s6, bd); - x3 = WRAPLOW(s3 + s7, bd); - x4 = WRAPLOW(s0 - s4, bd); - x5 = WRAPLOW(s1 - s5, bd); - x6 = WRAPLOW(s2 - s6, bd); - x7 = WRAPLOW(s3 - s7, bd); - x8 = WRAPLOW(highbd_dct_const_round_shift(s8 + s12, bd), bd); - x9 = WRAPLOW(highbd_dct_const_round_shift(s9 + s13, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s10 + s14, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s11 + s15, bd), bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s8 - s12, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s9 - s13, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s10 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s11 - s15, bd), bd); + x0 = HIGHBD_WRAPLOW(s0 + s4, bd); + x1 = HIGHBD_WRAPLOW(s1 + s5, bd); + x2 = HIGHBD_WRAPLOW(s2 + s6, bd); + x3 = HIGHBD_WRAPLOW(s3 + s7, bd); + x4 = HIGHBD_WRAPLOW(s0 - s4, bd); + x5 = HIGHBD_WRAPLOW(s1 - s5, bd); + x6 = HIGHBD_WRAPLOW(s2 - s6, bd); + x7 = HIGHBD_WRAPLOW(s3 - s7, bd); + x8 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s8 + s12), bd); + x9 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s9 + s13), bd); + x10 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s10 + s14), bd); + x11 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s11 + s15), bd); + x12 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s8 - s12), bd); + x13 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s9 - s13), bd); + x14 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s10 - s14), bd); + x15 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s11 - s15), bd); // stage 3 s0 = x0; @@ -1958,58 +1945,58 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; s15 = x14 * cospi_8_64 + x15 * cospi_24_64; - x0 = WRAPLOW(s0 + s2, bd); - x1 = WRAPLOW(s1 + s3, bd); - x2 = WRAPLOW(s0 - s2, bd); - x3 = WRAPLOW(s1 - s3, bd); - x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd); - x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd); - x8 = WRAPLOW(s8 + s10, bd); - x9 = WRAPLOW(s9 + s11, bd); - x10 = WRAPLOW(s8 - s10, bd); - x11 = WRAPLOW(s9 - s11, bd); - x12 = WRAPLOW(highbd_dct_const_round_shift(s12 + s14, bd), bd); - x13 = WRAPLOW(highbd_dct_const_round_shift(s13 + s15, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s12 - s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s13 - s15, bd), bd); + x0 = HIGHBD_WRAPLOW(s0 + s2, bd); + x1 = HIGHBD_WRAPLOW(s1 + s3, bd); + x2 = HIGHBD_WRAPLOW(s0 - s2, bd); + x3 = HIGHBD_WRAPLOW(s1 - s3, bd); + x4 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 + s6), bd); + x5 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 + s7), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s4 - s6), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s5 - s7), bd); + x8 = HIGHBD_WRAPLOW(s8 + s10, bd); + x9 = HIGHBD_WRAPLOW(s9 + s11, bd); + x10 = HIGHBD_WRAPLOW(s8 - s10, bd); + x11 = HIGHBD_WRAPLOW(s9 - s11, bd); + x12 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s12 + s14), bd); + x13 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s13 + s15), bd); + x14 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s12 - s14), bd); + x15 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s13 - s15), bd); // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); + s2 = (-cospi_16_64) * (x2 + x3); s3 = cospi_16_64 * (x2 - x3); s6 = cospi_16_64 * (x6 + x7); s7 = cospi_16_64 * (-x6 + x7); s10 = cospi_16_64 * (x10 + x11); s11 = cospi_16_64 * (-x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); + s14 = (-cospi_16_64) * (x14 + x15); s15 = cospi_16_64 * (x14 - x15); - x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd); - x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd); - x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd); - x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd); - x10 = WRAPLOW(highbd_dct_const_round_shift(s10, bd), bd); - x11 = WRAPLOW(highbd_dct_const_round_shift(s11, bd), bd); - x14 = WRAPLOW(highbd_dct_const_round_shift(s14, bd), bd); - x15 = WRAPLOW(highbd_dct_const_round_shift(s15, bd), bd); + x2 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s2), bd); + x3 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s3), bd); + x6 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s6), bd); + x7 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s7), bd); + x10 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s10), bd); + x11 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s11), bd); + x14 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s14), bd); + x15 = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(s15), bd); - output[0] = WRAPLOW(x0, bd); - output[1] = WRAPLOW(-x8, bd); - output[2] = WRAPLOW(x12, bd); - output[3] = WRAPLOW(-x4, bd); - output[4] = WRAPLOW(x6, bd); - output[5] = WRAPLOW(x14, bd); - output[6] = WRAPLOW(x10, bd); - output[7] = WRAPLOW(x2, bd); - output[8] = WRAPLOW(x3, bd); - output[9] = WRAPLOW(x11, bd); - output[10] = WRAPLOW(x15, bd); - output[11] = WRAPLOW(x7, bd); - output[12] = WRAPLOW(x5, bd); - output[13] = WRAPLOW(-x13, bd); - output[14] = WRAPLOW(x9, bd); - output[15] = WRAPLOW(-x1, bd); + output[0] = HIGHBD_WRAPLOW(x0, bd); + output[1] = HIGHBD_WRAPLOW(-x8, bd); + output[2] = HIGHBD_WRAPLOW(x12, bd); + output[3] = HIGHBD_WRAPLOW(-x4, bd); + output[4] = HIGHBD_WRAPLOW(x6, bd); + output[5] = HIGHBD_WRAPLOW(x14, bd); + output[6] = HIGHBD_WRAPLOW(x10, bd); + output[7] = HIGHBD_WRAPLOW(x2, bd); + output[8] = HIGHBD_WRAPLOW(x3, bd); + output[9] = HIGHBD_WRAPLOW(x11, bd); + output[10] = HIGHBD_WRAPLOW(x15, bd); + output[11] = HIGHBD_WRAPLOW(x7, bd); + output[12] = HIGHBD_WRAPLOW(x5, bd); + output[13] = HIGHBD_WRAPLOW(-x13, bd); + output[14] = HIGHBD_WRAPLOW(x9, bd); + output[15] = HIGHBD_WRAPLOW(-x1, bd); } void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, @@ -2030,8 +2017,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j*16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; vpx_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -2044,24 +2030,23 @@ void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { int i, j; tran_high_t a1; - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); + tran_low_t out = + HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd); uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); + out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd); a1 = ROUND_POWER_OF_TWO(out, 6); for (j = 0; j < 16; ++j) { - for (i = 0; i < 16; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); + for (i = 0; i < 16; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); dest += stride; } } -static void highbd_idct32_c(const tran_low_t *input, - tran_low_t *output, int bd) { +static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output, + int bd) { tran_low_t step1[32], step2[32]; tran_high_t temp1, temp2; - (void) bd; + (void)bd; // stage 1 step1[0] = input[0]; @@ -2083,43 +2068,43 @@ static void highbd_idct32_c(const tran_low_t *input, temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64; temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64; - step1[16] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[31] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[16] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[31] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64; temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64; - step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[17] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[30] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64; temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[18] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[29] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64; temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64; - step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[19] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[28] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64; temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[20] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[27] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64; temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[21] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[26] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64; temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[22] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[25] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64; temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64; - step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[23] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[24] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); // stage 2 step2[0] = step1[0]; @@ -2133,40 +2118,40 @@ static void highbd_idct32_c(const tran_low_t *input, temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64; temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64; - step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[8] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[15] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64; temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[9] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[14] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64; temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64; temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[11] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[12] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); - step2[16] = WRAPLOW(step1[16] + step1[17], bd); - step2[17] = WRAPLOW(step1[16] - step1[17], bd); - step2[18] = WRAPLOW(-step1[18] + step1[19], bd); - step2[19] = WRAPLOW(step1[18] + step1[19], bd); - step2[20] = WRAPLOW(step1[20] + step1[21], bd); - step2[21] = WRAPLOW(step1[20] - step1[21], bd); - step2[22] = WRAPLOW(-step1[22] + step1[23], bd); - step2[23] = WRAPLOW(step1[22] + step1[23], bd); - step2[24] = WRAPLOW(step1[24] + step1[25], bd); - step2[25] = WRAPLOW(step1[24] - step1[25], bd); - step2[26] = WRAPLOW(-step1[26] + step1[27], bd); - step2[27] = WRAPLOW(step1[26] + step1[27], bd); - step2[28] = WRAPLOW(step1[28] + step1[29], bd); - step2[29] = WRAPLOW(step1[28] - step1[29], bd); - step2[30] = WRAPLOW(-step1[30] + step1[31], bd); - step2[31] = WRAPLOW(step1[30] + step1[31], bd); + step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[17], bd); + step2[17] = HIGHBD_WRAPLOW(step1[16] - step1[17], bd); + step2[18] = HIGHBD_WRAPLOW(-step1[18] + step1[19], bd); + step2[19] = HIGHBD_WRAPLOW(step1[18] + step1[19], bd); + step2[20] = HIGHBD_WRAPLOW(step1[20] + step1[21], bd); + step2[21] = HIGHBD_WRAPLOW(step1[20] - step1[21], bd); + step2[22] = HIGHBD_WRAPLOW(-step1[22] + step1[23], bd); + step2[23] = HIGHBD_WRAPLOW(step1[22] + step1[23], bd); + step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[25], bd); + step2[25] = HIGHBD_WRAPLOW(step1[24] - step1[25], bd); + step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[27], bd); + step2[27] = HIGHBD_WRAPLOW(step1[26] + step1[27], bd); + step2[28] = HIGHBD_WRAPLOW(step1[28] + step1[29], bd); + step2[29] = HIGHBD_WRAPLOW(step1[28] - step1[29], bd); + step2[30] = HIGHBD_WRAPLOW(-step1[30] + step1[31], bd); + step2[31] = HIGHBD_WRAPLOW(step1[30] + step1[31], bd); // stage 3 step1[0] = step2[0]; @@ -2176,42 +2161,42 @@ static void highbd_idct32_c(const tran_low_t *input, temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; - step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[4] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[7] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); - step1[8] = WRAPLOW(step2[8] + step2[9], bd); - step1[9] = WRAPLOW(step2[8] - step2[9], bd); - step1[10] = WRAPLOW(-step2[10] + step2[11], bd); - step1[11] = WRAPLOW(step2[10] + step2[11], bd); - step1[12] = WRAPLOW(step2[12] + step2[13], bd); - step1[13] = WRAPLOW(step2[12] - step2[13], bd); - step1[14] = WRAPLOW(-step2[14] + step2[15], bd); - step1[15] = WRAPLOW(step2[14] + step2[15], bd); + step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[9], bd); + step1[9] = HIGHBD_WRAPLOW(step2[8] - step2[9], bd); + step1[10] = HIGHBD_WRAPLOW(-step2[10] + step2[11], bd); + step1[11] = HIGHBD_WRAPLOW(step2[10] + step2[11], bd); + step1[12] = HIGHBD_WRAPLOW(step2[12] + step2[13], bd); + step1[13] = HIGHBD_WRAPLOW(step2[12] - step2[13], bd); + step1[14] = HIGHBD_WRAPLOW(-step2[14] + step2[15], bd); + step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd); step1[16] = step2[16]; step1[31] = step2[31]; temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64; temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64; - step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[17] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[30] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64; temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[18] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[29] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[19] = step2[19]; step1[20] = step2[20]; temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64; temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[21] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[26] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64; temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[22] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[25] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[23] = step2[23]; step1[24] = step2[24]; step1[27] = step2[27]; @@ -2220,87 +2205,87 @@ static void highbd_idct32_c(const tran_low_t *input, // stage 4 temp1 = (step1[0] + step1[1]) * cospi_16_64; temp2 = (step1[0] - step1[1]) * cospi_16_64; - step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[0] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[1] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64; temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64; - step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); - step2[4] = WRAPLOW(step1[4] + step1[5], bd); - step2[5] = WRAPLOW(step1[4] - step1[5], bd); - step2[6] = WRAPLOW(-step1[6] + step1[7], bd); - step2[7] = WRAPLOW(step1[6] + step1[7], bd); + step2[2] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[3] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); + step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd); + step2[5] = HIGHBD_WRAPLOW(step1[4] - step1[5], bd); + step2[6] = HIGHBD_WRAPLOW(-step1[6] + step1[7], bd); + step2[7] = HIGHBD_WRAPLOW(step1[6] + step1[7], bd); step2[8] = step1[8]; step2[15] = step1[15]; temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64; temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64; - step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[9] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[14] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64; temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step2[11] = step1[11]; step2[12] = step1[12]; - step2[16] = WRAPLOW(step1[16] + step1[19], bd); - step2[17] = WRAPLOW(step1[17] + step1[18], bd); - step2[18] = WRAPLOW(step1[17] - step1[18], bd); - step2[19] = WRAPLOW(step1[16] - step1[19], bd); - step2[20] = WRAPLOW(-step1[20] + step1[23], bd); - step2[21] = WRAPLOW(-step1[21] + step1[22], bd); - step2[22] = WRAPLOW(step1[21] + step1[22], bd); - step2[23] = WRAPLOW(step1[20] + step1[23], bd); + step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[19], bd); + step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[18], bd); + step2[18] = HIGHBD_WRAPLOW(step1[17] - step1[18], bd); + step2[19] = HIGHBD_WRAPLOW(step1[16] - step1[19], bd); + step2[20] = HIGHBD_WRAPLOW(-step1[20] + step1[23], bd); + step2[21] = HIGHBD_WRAPLOW(-step1[21] + step1[22], bd); + step2[22] = HIGHBD_WRAPLOW(step1[21] + step1[22], bd); + step2[23] = HIGHBD_WRAPLOW(step1[20] + step1[23], bd); - step2[24] = WRAPLOW(step1[24] + step1[27], bd); - step2[25] = WRAPLOW(step1[25] + step1[26], bd); - step2[26] = WRAPLOW(step1[25] - step1[26], bd); - step2[27] = WRAPLOW(step1[24] - step1[27], bd); - step2[28] = WRAPLOW(-step1[28] + step1[31], bd); - step2[29] = WRAPLOW(-step1[29] + step1[30], bd); - step2[30] = WRAPLOW(step1[29] + step1[30], bd); - step2[31] = WRAPLOW(step1[28] + step1[31], bd); + step2[24] = HIGHBD_WRAPLOW(step1[24] + step1[27], bd); + step2[25] = HIGHBD_WRAPLOW(step1[25] + step1[26], bd); + step2[26] = HIGHBD_WRAPLOW(step1[25] - step1[26], bd); + step2[27] = HIGHBD_WRAPLOW(step1[24] - step1[27], bd); + step2[28] = HIGHBD_WRAPLOW(-step1[28] + step1[31], bd); + step2[29] = HIGHBD_WRAPLOW(-step1[29] + step1[30], bd); + step2[30] = HIGHBD_WRAPLOW(step1[29] + step1[30], bd); + step2[31] = HIGHBD_WRAPLOW(step1[28] + step1[31], bd); // stage 5 - step1[0] = WRAPLOW(step2[0] + step2[3], bd); - step1[1] = WRAPLOW(step2[1] + step2[2], bd); - step1[2] = WRAPLOW(step2[1] - step2[2], bd); - step1[3] = WRAPLOW(step2[0] - step2[3], bd); + step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[3], bd); + step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[2], bd); + step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd); + step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd); step1[4] = step2[4]; temp1 = (step2[6] - step2[5]) * cospi_16_64; temp2 = (step2[5] + step2[6]) * cospi_16_64; - step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[5] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[6] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[7] = step2[7]; - step1[8] = WRAPLOW(step2[8] + step2[11], bd); - step1[9] = WRAPLOW(step2[9] + step2[10], bd); - step1[10] = WRAPLOW(step2[9] - step2[10], bd); - step1[11] = WRAPLOW(step2[8] - step2[11], bd); - step1[12] = WRAPLOW(-step2[12] + step2[15], bd); - step1[13] = WRAPLOW(-step2[13] + step2[14], bd); - step1[14] = WRAPLOW(step2[13] + step2[14], bd); - step1[15] = WRAPLOW(step2[12] + step2[15], bd); + step1[8] = HIGHBD_WRAPLOW(step2[8] + step2[11], bd); + step1[9] = HIGHBD_WRAPLOW(step2[9] + step2[10], bd); + step1[10] = HIGHBD_WRAPLOW(step2[9] - step2[10], bd); + step1[11] = HIGHBD_WRAPLOW(step2[8] - step2[11], bd); + step1[12] = HIGHBD_WRAPLOW(-step2[12] + step2[15], bd); + step1[13] = HIGHBD_WRAPLOW(-step2[13] + step2[14], bd); + step1[14] = HIGHBD_WRAPLOW(step2[13] + step2[14], bd); + step1[15] = HIGHBD_WRAPLOW(step2[12] + step2[15], bd); step1[16] = step2[16]; step1[17] = step2[17]; temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64; temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64; - step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[18] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[29] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64; temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64; - step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[19] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[28] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64; temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[20] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[27] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64; temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[21] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[26] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[22] = step2[22]; step1[23] = step2[23]; step1[24] = step2[24]; @@ -2309,62 +2294,62 @@ static void highbd_idct32_c(const tran_low_t *input, step1[31] = step2[31]; // stage 6 - step2[0] = WRAPLOW(step1[0] + step1[7], bd); - step2[1] = WRAPLOW(step1[1] + step1[6], bd); - step2[2] = WRAPLOW(step1[2] + step1[5], bd); - step2[3] = WRAPLOW(step1[3] + step1[4], bd); - step2[4] = WRAPLOW(step1[3] - step1[4], bd); - step2[5] = WRAPLOW(step1[2] - step1[5], bd); - step2[6] = WRAPLOW(step1[1] - step1[6], bd); - step2[7] = WRAPLOW(step1[0] - step1[7], bd); + step2[0] = HIGHBD_WRAPLOW(step1[0] + step1[7], bd); + step2[1] = HIGHBD_WRAPLOW(step1[1] + step1[6], bd); + step2[2] = HIGHBD_WRAPLOW(step1[2] + step1[5], bd); + step2[3] = HIGHBD_WRAPLOW(step1[3] + step1[4], bd); + step2[4] = HIGHBD_WRAPLOW(step1[3] - step1[4], bd); + step2[5] = HIGHBD_WRAPLOW(step1[2] - step1[5], bd); + step2[6] = HIGHBD_WRAPLOW(step1[1] - step1[6], bd); + step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd); step2[8] = step1[8]; step2[9] = step1[9]; temp1 = (-step1[10] + step1[13]) * cospi_16_64; temp2 = (step1[10] + step1[13]) * cospi_16_64; - step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[10] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[13] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = (-step1[11] + step1[12]) * cospi_16_64; temp2 = (step1[11] + step1[12]) * cospi_16_64; - step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step2[11] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step2[12] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step2[14] = step1[14]; step2[15] = step1[15]; - step2[16] = WRAPLOW(step1[16] + step1[23], bd); - step2[17] = WRAPLOW(step1[17] + step1[22], bd); - step2[18] = WRAPLOW(step1[18] + step1[21], bd); - step2[19] = WRAPLOW(step1[19] + step1[20], bd); - step2[20] = WRAPLOW(step1[19] - step1[20], bd); - step2[21] = WRAPLOW(step1[18] - step1[21], bd); - step2[22] = WRAPLOW(step1[17] - step1[22], bd); - step2[23] = WRAPLOW(step1[16] - step1[23], bd); + step2[16] = HIGHBD_WRAPLOW(step1[16] + step1[23], bd); + step2[17] = HIGHBD_WRAPLOW(step1[17] + step1[22], bd); + step2[18] = HIGHBD_WRAPLOW(step1[18] + step1[21], bd); + step2[19] = HIGHBD_WRAPLOW(step1[19] + step1[20], bd); + step2[20] = HIGHBD_WRAPLOW(step1[19] - step1[20], bd); + step2[21] = HIGHBD_WRAPLOW(step1[18] - step1[21], bd); + step2[22] = HIGHBD_WRAPLOW(step1[17] - step1[22], bd); + step2[23] = HIGHBD_WRAPLOW(step1[16] - step1[23], bd); - step2[24] = WRAPLOW(-step1[24] + step1[31], bd); - step2[25] = WRAPLOW(-step1[25] + step1[30], bd); - step2[26] = WRAPLOW(-step1[26] + step1[29], bd); - step2[27] = WRAPLOW(-step1[27] + step1[28], bd); - step2[28] = WRAPLOW(step1[27] + step1[28], bd); - step2[29] = WRAPLOW(step1[26] + step1[29], bd); - step2[30] = WRAPLOW(step1[25] + step1[30], bd); - step2[31] = WRAPLOW(step1[24] + step1[31], bd); + step2[24] = HIGHBD_WRAPLOW(-step1[24] + step1[31], bd); + step2[25] = HIGHBD_WRAPLOW(-step1[25] + step1[30], bd); + step2[26] = HIGHBD_WRAPLOW(-step1[26] + step1[29], bd); + step2[27] = HIGHBD_WRAPLOW(-step1[27] + step1[28], bd); + step2[28] = HIGHBD_WRAPLOW(step1[27] + step1[28], bd); + step2[29] = HIGHBD_WRAPLOW(step1[26] + step1[29], bd); + step2[30] = HIGHBD_WRAPLOW(step1[25] + step1[30], bd); + step2[31] = HIGHBD_WRAPLOW(step1[24] + step1[31], bd); // stage 7 - step1[0] = WRAPLOW(step2[0] + step2[15], bd); - step1[1] = WRAPLOW(step2[1] + step2[14], bd); - step1[2] = WRAPLOW(step2[2] + step2[13], bd); - step1[3] = WRAPLOW(step2[3] + step2[12], bd); - step1[4] = WRAPLOW(step2[4] + step2[11], bd); - step1[5] = WRAPLOW(step2[5] + step2[10], bd); - step1[6] = WRAPLOW(step2[6] + step2[9], bd); - step1[7] = WRAPLOW(step2[7] + step2[8], bd); - step1[8] = WRAPLOW(step2[7] - step2[8], bd); - step1[9] = WRAPLOW(step2[6] - step2[9], bd); - step1[10] = WRAPLOW(step2[5] - step2[10], bd); - step1[11] = WRAPLOW(step2[4] - step2[11], bd); - step1[12] = WRAPLOW(step2[3] - step2[12], bd); - step1[13] = WRAPLOW(step2[2] - step2[13], bd); - step1[14] = WRAPLOW(step2[1] - step2[14], bd); - step1[15] = WRAPLOW(step2[0] - step2[15], bd); + step1[0] = HIGHBD_WRAPLOW(step2[0] + step2[15], bd); + step1[1] = HIGHBD_WRAPLOW(step2[1] + step2[14], bd); + step1[2] = HIGHBD_WRAPLOW(step2[2] + step2[13], bd); + step1[3] = HIGHBD_WRAPLOW(step2[3] + step2[12], bd); + step1[4] = HIGHBD_WRAPLOW(step2[4] + step2[11], bd); + step1[5] = HIGHBD_WRAPLOW(step2[5] + step2[10], bd); + step1[6] = HIGHBD_WRAPLOW(step2[6] + step2[9], bd); + step1[7] = HIGHBD_WRAPLOW(step2[7] + step2[8], bd); + step1[8] = HIGHBD_WRAPLOW(step2[7] - step2[8], bd); + step1[9] = HIGHBD_WRAPLOW(step2[6] - step2[9], bd); + step1[10] = HIGHBD_WRAPLOW(step2[5] - step2[10], bd); + step1[11] = HIGHBD_WRAPLOW(step2[4] - step2[11], bd); + step1[12] = HIGHBD_WRAPLOW(step2[3] - step2[12], bd); + step1[13] = HIGHBD_WRAPLOW(step2[2] - step2[13], bd); + step1[14] = HIGHBD_WRAPLOW(step2[1] - step2[14], bd); + step1[15] = HIGHBD_WRAPLOW(step2[0] - step2[15], bd); step1[16] = step2[16]; step1[17] = step2[17]; @@ -2372,58 +2357,58 @@ static void highbd_idct32_c(const tran_low_t *input, step1[19] = step2[19]; temp1 = (-step2[20] + step2[27]) * cospi_16_64; temp2 = (step2[20] + step2[27]) * cospi_16_64; - step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[20] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[27] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = (-step2[21] + step2[26]) * cospi_16_64; temp2 = (step2[21] + step2[26]) * cospi_16_64; - step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[21] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[26] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = (-step2[22] + step2[25]) * cospi_16_64; temp2 = (step2[22] + step2[25]) * cospi_16_64; - step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[22] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[25] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); temp1 = (-step2[23] + step2[24]) * cospi_16_64; temp2 = (step2[23] + step2[24]) * cospi_16_64; - step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd); - step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); + step1[23] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp1), bd); + step1[24] = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(temp2), bd); step1[28] = step2[28]; step1[29] = step2[29]; step1[30] = step2[30]; step1[31] = step2[31]; // final stage - output[0] = WRAPLOW(step1[0] + step1[31], bd); - output[1] = WRAPLOW(step1[1] + step1[30], bd); - output[2] = WRAPLOW(step1[2] + step1[29], bd); - output[3] = WRAPLOW(step1[3] + step1[28], bd); - output[4] = WRAPLOW(step1[4] + step1[27], bd); - output[5] = WRAPLOW(step1[5] + step1[26], bd); - output[6] = WRAPLOW(step1[6] + step1[25], bd); - output[7] = WRAPLOW(step1[7] + step1[24], bd); - output[8] = WRAPLOW(step1[8] + step1[23], bd); - output[9] = WRAPLOW(step1[9] + step1[22], bd); - output[10] = WRAPLOW(step1[10] + step1[21], bd); - output[11] = WRAPLOW(step1[11] + step1[20], bd); - output[12] = WRAPLOW(step1[12] + step1[19], bd); - output[13] = WRAPLOW(step1[13] + step1[18], bd); - output[14] = WRAPLOW(step1[14] + step1[17], bd); - output[15] = WRAPLOW(step1[15] + step1[16], bd); - output[16] = WRAPLOW(step1[15] - step1[16], bd); - output[17] = WRAPLOW(step1[14] - step1[17], bd); - output[18] = WRAPLOW(step1[13] - step1[18], bd); - output[19] = WRAPLOW(step1[12] - step1[19], bd); - output[20] = WRAPLOW(step1[11] - step1[20], bd); - output[21] = WRAPLOW(step1[10] - step1[21], bd); - output[22] = WRAPLOW(step1[9] - step1[22], bd); - output[23] = WRAPLOW(step1[8] - step1[23], bd); - output[24] = WRAPLOW(step1[7] - step1[24], bd); - output[25] = WRAPLOW(step1[6] - step1[25], bd); - output[26] = WRAPLOW(step1[5] - step1[26], bd); - output[27] = WRAPLOW(step1[4] - step1[27], bd); - output[28] = WRAPLOW(step1[3] - step1[28], bd); - output[29] = WRAPLOW(step1[2] - step1[29], bd); - output[30] = WRAPLOW(step1[1] - step1[30], bd); - output[31] = WRAPLOW(step1[0] - step1[31], bd); + output[0] = HIGHBD_WRAPLOW(step1[0] + step1[31], bd); + output[1] = HIGHBD_WRAPLOW(step1[1] + step1[30], bd); + output[2] = HIGHBD_WRAPLOW(step1[2] + step1[29], bd); + output[3] = HIGHBD_WRAPLOW(step1[3] + step1[28], bd); + output[4] = HIGHBD_WRAPLOW(step1[4] + step1[27], bd); + output[5] = HIGHBD_WRAPLOW(step1[5] + step1[26], bd); + output[6] = HIGHBD_WRAPLOW(step1[6] + step1[25], bd); + output[7] = HIGHBD_WRAPLOW(step1[7] + step1[24], bd); + output[8] = HIGHBD_WRAPLOW(step1[8] + step1[23], bd); + output[9] = HIGHBD_WRAPLOW(step1[9] + step1[22], bd); + output[10] = HIGHBD_WRAPLOW(step1[10] + step1[21], bd); + output[11] = HIGHBD_WRAPLOW(step1[11] + step1[20], bd); + output[12] = HIGHBD_WRAPLOW(step1[12] + step1[19], bd); + output[13] = HIGHBD_WRAPLOW(step1[13] + step1[18], bd); + output[14] = HIGHBD_WRAPLOW(step1[14] + step1[17], bd); + output[15] = HIGHBD_WRAPLOW(step1[15] + step1[16], bd); + output[16] = HIGHBD_WRAPLOW(step1[15] - step1[16], bd); + output[17] = HIGHBD_WRAPLOW(step1[14] - step1[17], bd); + output[18] = HIGHBD_WRAPLOW(step1[13] - step1[18], bd); + output[19] = HIGHBD_WRAPLOW(step1[12] - step1[19], bd); + output[20] = HIGHBD_WRAPLOW(step1[11] - step1[20], bd); + output[21] = HIGHBD_WRAPLOW(step1[10] - step1[21], bd); + output[22] = HIGHBD_WRAPLOW(step1[9] - step1[22], bd); + output[23] = HIGHBD_WRAPLOW(step1[8] - step1[23], bd); + output[24] = HIGHBD_WRAPLOW(step1[7] - step1[24], bd); + output[25] = HIGHBD_WRAPLOW(step1[6] - step1[25], bd); + output[26] = HIGHBD_WRAPLOW(step1[5] - step1[26], bd); + output[27] = HIGHBD_WRAPLOW(step1[4] - step1[27], bd); + output[28] = HIGHBD_WRAPLOW(step1[3] - step1[28], bd); + output[29] = HIGHBD_WRAPLOW(step1[2] - step1[29], bd); + output[30] = HIGHBD_WRAPLOW(step1[1] - step1[30], bd); + output[31] = HIGHBD_WRAPLOW(step1[0] - step1[31], bd); } void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, @@ -2437,8 +2422,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, // Rows for (i = 0; i < 32; ++i) { tran_low_t zero_coeff[16]; - for (j = 0; j < 16; ++j) - zero_coeff[j] = input[2 * j] | input[2 * j + 1]; + for (j = 0; j < 16; ++j) zero_coeff[j] = input[2 * j] | input[2 * j + 1]; for (j = 0; j < 8; ++j) zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1]; for (j = 0; j < 4; ++j) @@ -2456,8 +2440,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, // Columns for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; + for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; highbd_idct32_c(temp_in, temp_out, bd); for (j = 0; j < 32; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -2468,7 +2451,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { - tran_low_t out[32 * 32] = {0}; + tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; int i, j; tran_low_t temp_in[32], temp_out[32]; @@ -2483,8 +2466,7 @@ void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, } // Columns for (i = 0; i < 32; ++i) { - for (j = 0; j < 32; ++j) - temp_in[j] = out[j * 32 + i]; + for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i]; highbd_idct32_c(temp_in, temp_out, bd); for (j = 0; j < 32; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -2499,14 +2481,13 @@ void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8, int a1; uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); - tran_low_t out = WRAPLOW( - highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd); - out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd); + tran_low_t out = + HIGHBD_WRAPLOW(highbd_dct_const_round_shift(input[0] * cospi_16_64), bd); + out = HIGHBD_WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64), bd); a1 = ROUND_POWER_OF_TWO(out, 6); for (j = 0; j < 32; ++j) { - for (i = 0; i < 32; ++i) - dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); + for (i = 0; i < 32; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd); dest += stride; } } diff --git a/libs/libvpx/vpx_dsp/inv_txfm.h b/libs/libvpx/vpx_dsp/inv_txfm.h index 23588139ed..e530730d57 100644 --- a/libs/libvpx/vpx_dsp/inv_txfm.h +++ b/libs/libvpx/vpx_dsp/inv_txfm.h @@ -21,7 +21,7 @@ extern "C" { #endif -static INLINE tran_low_t check_range(tran_high_t input) { +static INLINE tran_high_t check_range(tran_high_t input) { #if CONFIG_COEFFICIENT_RANGE_CHECKING // For valid VP9 input streams, intermediate stage coefficients should always // stay within the range of a signed 16 bit integer. Coefficients can go out @@ -32,17 +32,16 @@ static INLINE tran_low_t check_range(tran_high_t input) { assert(INT16_MIN <= input); assert(input <= INT16_MAX); #endif // CONFIG_COEFFICIENT_RANGE_CHECKING - return (tran_low_t)input; + return input; } -static INLINE tran_low_t dct_const_round_shift(tran_high_t input) { +static INLINE tran_high_t dct_const_round_shift(tran_high_t input) { tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - return check_range(rv); + return (tran_high_t)rv; } #if CONFIG_VP9_HIGHBITDEPTH -static INLINE tran_low_t highbd_check_range(tran_high_t input, - int bd) { +static INLINE tran_high_t highbd_check_range(tran_high_t input, int bd) { #if CONFIG_COEFFICIENT_RANGE_CHECKING // For valid highbitdepth VP9 streams, intermediate stage coefficients will // stay within the ranges: @@ -53,16 +52,15 @@ static INLINE tran_low_t highbd_check_range(tran_high_t input, const int32_t int_min = -int_max - 1; assert(int_min <= input); assert(input <= int_max); - (void) int_min; + (void)int_min; #endif // CONFIG_COEFFICIENT_RANGE_CHECKING - (void) bd; - return (tran_low_t)input; + (void)bd; + return input; } -static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input, - int bd) { +static INLINE tran_high_t highbd_dct_const_round_shift(tran_high_t input) { tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS); - return highbd_check_range(rv, bd); + return (tran_high_t)rv; } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -83,9 +81,19 @@ static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input, // bd of 10 uses trans_low with 18bits, need to remove 14bits // bd of 12 uses trans_low with 20bits, need to remove 12bits // bd of x uses trans_low with 8+x bits, need to remove 24-x bits -#define WRAPLOW(x, bd) ((((int32_t)(x)) << (24 - bd)) >> (24 - bd)) -#else -#define WRAPLOW(x, bd) ((int32_t)(x)) + +#define WRAPLOW(x) ((((int32_t)check_range(x)) << 16) >> 16) +#if CONFIG_VP9_HIGHBITDEPTH +#define HIGHBD_WRAPLOW(x, bd) \ + ((((int32_t)highbd_check_range((x), bd)) << (24 - bd)) >> (24 - bd)) +#endif // CONFIG_VP9_HIGHBITDEPTH + +#else // CONFIG_EMULATE_HARDWARE + +#define WRAPLOW(x) ((int32_t)check_range(x)) +#if CONFIG_VP9_HIGHBITDEPTH +#define HIGHBD_WRAPLOW(x, bd) ((int32_t)highbd_check_range((x), bd)) +#endif // CONFIG_VP9_HIGHBITDEPTH #endif // CONFIG_EMULATE_HARDWARE void idct4_c(const tran_low_t *input, tran_low_t *output); @@ -107,14 +115,14 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd); static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans, int bd) { - trans = WRAPLOW(trans, bd); - return clip_pixel_highbd(WRAPLOW(dest + trans, bd), bd); + trans = HIGHBD_WRAPLOW(trans, bd); + return clip_pixel_highbd(dest + (int)trans, bd); } #endif static INLINE uint8_t clip_pixel_add(uint8_t dest, tran_high_t trans) { - trans = WRAPLOW(trans, 8); - return clip_pixel(WRAPLOW(dest + trans, 8)); + trans = WRAPLOW(trans); + return clip_pixel(dest + (int)trans); } #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vpx_dsp/loopfilter.c b/libs/libvpx/vpx_dsp/loopfilter.c index 66f4d9576c..2f7eff8b05 100644 --- a/libs/libvpx/vpx_dsp/loopfilter.c +++ b/libs/libvpx/vpx_dsp/loopfilter.c @@ -11,6 +11,7 @@ #include #include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_ports/mem.h" @@ -21,23 +22,18 @@ static INLINE int8_t signed_char_clamp(int t) { #if CONFIG_VP9_HIGHBITDEPTH static INLINE int16_t signed_char_clamp_high(int t, int bd) { switch (bd) { - case 10: - return (int16_t)clamp(t, -128*4, 128*4-1); - case 12: - return (int16_t)clamp(t, -128*16, 128*16-1); + case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1); + case 12: return (int16_t)clamp(t, -128 * 16, 128 * 16 - 1); case 8: - default: - return (int16_t)clamp(t, -128, 128-1); + default: return (int16_t)clamp(t, -128, 128 - 1); } } #endif -// should we apply any filter at all: 11111111 yes, 00000000 no -static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit, - uint8_t p3, uint8_t p2, - uint8_t p1, uint8_t p0, - uint8_t q0, uint8_t q1, - uint8_t q2, uint8_t q3) { +// Should we apply any filter at all: 11111111 yes, 00000000 no +static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit, uint8_t p3, + uint8_t p2, uint8_t p1, uint8_t p0, uint8_t q0, + uint8_t q1, uint8_t q2, uint8_t q3) { int8_t mask = 0; mask |= (abs(p3 - p2) > limit) * -1; mask |= (abs(p2 - p1) > limit) * -1; @@ -45,14 +41,12 @@ static INLINE int8_t filter_mask(uint8_t limit, uint8_t blimit, mask |= (abs(q1 - q0) > limit) * -1; mask |= (abs(q2 - q1) > limit) * -1; mask |= (abs(q3 - q2) > limit) * -1; - mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; + mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; return ~mask; } -static INLINE int8_t flat_mask4(uint8_t thresh, - uint8_t p3, uint8_t p2, - uint8_t p1, uint8_t p0, - uint8_t q0, uint8_t q1, +static INLINE int8_t flat_mask4(uint8_t thresh, uint8_t p3, uint8_t p2, + uint8_t p1, uint8_t p0, uint8_t q0, uint8_t q1, uint8_t q2, uint8_t q3) { int8_t mask = 0; mask |= (abs(p1 - p0) > thresh) * -1; @@ -64,24 +58,22 @@ static INLINE int8_t flat_mask4(uint8_t thresh, return ~mask; } -static INLINE int8_t flat_mask5(uint8_t thresh, - uint8_t p4, uint8_t p3, - uint8_t p2, uint8_t p1, - uint8_t p0, uint8_t q0, - uint8_t q1, uint8_t q2, - uint8_t q3, uint8_t q4) { +static INLINE int8_t flat_mask5(uint8_t thresh, uint8_t p4, uint8_t p3, + uint8_t p2, uint8_t p1, uint8_t p0, uint8_t q0, + uint8_t q1, uint8_t q2, uint8_t q3, + uint8_t q4) { int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); mask |= (abs(p4 - p0) > thresh) * -1; mask |= (abs(q4 - q0) > thresh) * -1; return ~mask; } -// is there high edge variance internal edge: 11111111 yes, 00000000 no +// Is there high edge variance internal edge: 11111111 yes, 00000000 no static INLINE int8_t hev_mask(uint8_t thresh, uint8_t p1, uint8_t p0, uint8_t q0, uint8_t q1) { int8_t hev = 0; - hev |= (abs(p1 - p0) > thresh) * -1; - hev |= (abs(q1 - q0) > thresh) * -1; + hev |= (abs(p1 - p0) > thresh) * -1; + hev |= (abs(q1 - q0) > thresh) * -1; return hev; } @@ -89,10 +81,10 @@ static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1, uint8_t *op0, uint8_t *oq0, uint8_t *oq1) { int8_t filter1, filter2; - const int8_t ps1 = (int8_t) *op1 ^ 0x80; - const int8_t ps0 = (int8_t) *op0 ^ 0x80; - const int8_t qs0 = (int8_t) *oq0 ^ 0x80; - const int8_t qs1 = (int8_t) *oq1 ^ 0x80; + const int8_t ps1 = (int8_t)*op1 ^ 0x80; + const int8_t ps0 = (int8_t)*op0 ^ 0x80; + const int8_t qs0 = (int8_t)*oq0 ^ 0x80; + const int8_t qs1 = (int8_t)*oq1 ^ 0x80; const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1); // add outer taps if we have high edge variance @@ -119,16 +111,16 @@ static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1, void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, int count) { + const uint8_t *thresh) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p]; - const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p); ++s; } @@ -138,22 +130,21 @@ void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1); + vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0); + vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1); } void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { + const uint8_t *limit, const uint8_t *thresh) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; - const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); filter4(mask, *thresh, s - 2, s - 1, s, s + 1); s += pitch; } @@ -163,15 +154,13 @@ void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, - thresh1, 1); + vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0); + vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1); } static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat, - uint8_t *op3, uint8_t *op2, - uint8_t *op1, uint8_t *op0, - uint8_t *oq0, uint8_t *oq1, + uint8_t *op3, uint8_t *op2, uint8_t *op1, + uint8_t *op0, uint8_t *oq0, uint8_t *oq1, uint8_t *oq2, uint8_t *oq3) { if (flat && mask) { const uint8_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; @@ -185,26 +174,25 @@ static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat, *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3); *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3); } else { - filter4(mask, thresh, op1, op0, oq0, oq1); + filter4(mask, thresh, op1, op0, oq0, oq1); } } void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { + const uint8_t *limit, const uint8_t *thresh) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p]; const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, - s, s + 1 * p, s + 2 * p, s + 3 * p); + filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s, + s + 1 * p, s + 2 * p, s + 3 * p); ++s; } } @@ -213,23 +201,22 @@ void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1); + vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0); + vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1); } void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { + const uint8_t *limit, const uint8_t *thresh) { int i; - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, - s, s + 1, s + 2, s + 3); + filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2, + s + 3); s += pitch; } } @@ -238,65 +225,67 @@ void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, - thresh1, 1); + vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0); + vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1); } -static INLINE void filter16(int8_t mask, uint8_t thresh, - uint8_t flat, uint8_t flat2, - uint8_t *op7, uint8_t *op6, - uint8_t *op5, uint8_t *op4, - uint8_t *op3, uint8_t *op2, - uint8_t *op1, uint8_t *op0, - uint8_t *oq0, uint8_t *oq1, - uint8_t *oq2, uint8_t *oq3, - uint8_t *oq4, uint8_t *oq5, +static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat, + uint8_t flat2, uint8_t *op7, uint8_t *op6, + uint8_t *op5, uint8_t *op4, uint8_t *op3, + uint8_t *op2, uint8_t *op1, uint8_t *op0, + uint8_t *oq0, uint8_t *oq1, uint8_t *oq2, + uint8_t *oq3, uint8_t *oq4, uint8_t *oq5, uint8_t *oq6, uint8_t *oq7) { if (flat2 && flat && mask) { - const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4, - p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; + const uint8_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4, p3 = *op3, + p2 = *op2, p1 = *op1, p0 = *op0; - const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3, - q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7; + const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3, q4 = *oq4, + q5 = *oq5, q6 = *oq6, q7 = *oq7; // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1] - *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + - q0, 4); - *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + - q0 + q1, 4); - *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + - q0 + q1 + q2, 4); - *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + - q0 + q1 + q2 + q3, 4); - *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + - q0 + q1 + q2 + q3 + q4, 4); + *op6 = ROUND_POWER_OF_TWO( + p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0, 4); + *op5 = ROUND_POWER_OF_TWO( + p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + q0 + q1, 4); + *op4 = ROUND_POWER_OF_TWO( + p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + q0 + q1 + q2, 4); + *op3 = ROUND_POWER_OF_TWO( + p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + q0 + q1 + q2 + q3, 4); + *op2 = ROUND_POWER_OF_TWO( + p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + q0 + q1 + q2 + q3 + q4, + 4); *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 + - q0 + q1 + q2 + q3 + q4 + q5, 4); - *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + - q0 + q1 + q2 + q3 + q4 + q5 + q6, 4); - *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + - q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); - *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + - q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4); - *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 + - q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4); - *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + - q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4); - *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + - q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4); - *oq5 = ROUND_POWER_OF_TWO(p1 + p0 + - q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4); - *oq6 = ROUND_POWER_OF_TWO(p0 + - q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4); + q0 + q1 + q2 + q3 + q4 + q5, + 4); + *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + q0 + + q1 + q2 + q3 + q4 + q5 + q6, + 4); + *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 + + q2 + q3 + q4 + q5 + q6 + q7, + 4); + *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 + q2 + + q3 + q4 + q5 + q6 + q7 * 2, + 4); + *oq2 = ROUND_POWER_OF_TWO( + p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, + 4); + *oq3 = ROUND_POWER_OF_TWO( + p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4); + *oq4 = ROUND_POWER_OF_TWO( + p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4); + *oq5 = ROUND_POWER_OF_TWO( + p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4); + *oq6 = ROUND_POWER_OF_TWO( + p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4); } else { filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3); } } -void vpx_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count) { +static void mb_lpf_horizontal_edge_w(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, int count) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -304,41 +293,48 @@ void vpx_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit, for (i = 0; i < 8 * count; ++i) { const uint8_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p]; const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t flat2 = flat_mask5(1, - s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, - q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p]); + const int8_t flat2 = + flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0, + s[4 * p], s[5 * p], s[6 * p], s[7 * p]); - filter16(mask, *thresh, flat, flat2, - s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p, - s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, - s, s + 1 * p, s + 2 * p, s + 3 * p, - s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p); + filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p, + s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s, + s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p, s + 6 * p, + s + 7 * p); ++s; } } -static void mb_lpf_vertical_edge_w(uint8_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, +void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1); +} + +void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2); +} + +static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, int count) { int i; for (i = 0; i < count; ++i) { const uint8_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; - const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; - const int8_t mask = filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3); + const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; + const int8_t mask = + filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); const int8_t flat = flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3); - const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0, - q0, s[4], s[5], s[6], s[7]); + const int8_t flat2 = flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0, q0, s[4], + s[5], s[6], s[7]); - filter16(mask, *thresh, flat, flat2, - s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1, - s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7); + filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5, s - 4, + s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, + s + 7); s += p; } } @@ -356,9 +352,8 @@ void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, #if CONFIG_VP9_HIGHBITDEPTH // Should we apply any filter at all: 11111111 yes, 00000000 no ? static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit, - uint16_t p3, uint16_t p2, - uint16_t p1, uint16_t p0, - uint16_t q0, uint16_t q1, + uint16_t p3, uint16_t p2, uint16_t p1, + uint16_t p0, uint16_t q0, uint16_t q1, uint16_t q2, uint16_t q3, int bd) { int8_t mask = 0; int16_t limit16 = (uint16_t)limit << (bd - 8); @@ -369,15 +364,14 @@ static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit, mask |= (abs(q1 - q0) > limit16) * -1; mask |= (abs(q2 - q1) > limit16) * -1; mask |= (abs(q3 - q2) > limit16) * -1; - mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit16) * -1; + mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit16) * -1; return ~mask; } -static INLINE int8_t highbd_flat_mask4(uint8_t thresh, - uint16_t p3, uint16_t p2, - uint16_t p1, uint16_t p0, - uint16_t q0, uint16_t q1, - uint16_t q2, uint16_t q3, int bd) { +static INLINE int8_t highbd_flat_mask4(uint8_t thresh, uint16_t p3, uint16_t p2, + uint16_t p1, uint16_t p0, uint16_t q0, + uint16_t q1, uint16_t q2, uint16_t q3, + int bd) { int8_t mask = 0; int16_t thresh16 = (uint16_t)thresh << (bd - 8); mask |= (abs(p1 - p0) > thresh16) * -1; @@ -389,11 +383,9 @@ static INLINE int8_t highbd_flat_mask4(uint8_t thresh, return ~mask; } -static INLINE int8_t highbd_flat_mask5(uint8_t thresh, - uint16_t p4, uint16_t p3, - uint16_t p2, uint16_t p1, - uint16_t p0, uint16_t q0, - uint16_t q1, uint16_t q2, +static INLINE int8_t highbd_flat_mask5(uint8_t thresh, uint16_t p4, uint16_t p3, + uint16_t p2, uint16_t p1, uint16_t p0, + uint16_t q0, uint16_t q1, uint16_t q2, uint16_t q3, uint16_t q4, int bd) { int8_t mask = ~highbd_flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3, bd); int16_t thresh16 = (uint16_t)thresh << (bd - 8); @@ -450,12 +442,12 @@ static INLINE void highbd_filter4(int8_t mask, uint8_t thresh, uint16_t *op1, void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, int count, int bd) { + const uint8_t *thresh, int bd) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint16_t p3 = s[-4 * p]; const uint16_t p2 = s[-3 * p]; const uint16_t p1 = s[-2 * p]; @@ -464,59 +456,50 @@ void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */, const uint16_t q1 = s[1 * p]; const uint16_t q2 = s[2 * p]; const uint16_t q3 = s[3 * p]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); highbd_filter4(mask, *thresh, s - 2 * p, s - 1 * p, s, s + 1 * p, bd); ++s; } } -void vpx_highbd_lpf_horizontal_4_dual_c(uint16_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { - vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); +void vpx_highbd_lpf_horizontal_4_dual_c( + uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { + vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, bd); + vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, bd); } void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { + int bd) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; - const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); + const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); highbd_filter4(mask, *thresh, s - 2, s - 1, s, s + 1, bd); s += pitch; } } -void vpx_highbd_lpf_vertical_4_dual_c(uint16_t *s, int pitch, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { - vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, - thresh1, 1, bd); +void vpx_highbd_lpf_vertical_4_dual_c( + uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { + vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, bd); + vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, + bd); } static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat, - uint16_t *op3, uint16_t *op2, - uint16_t *op1, uint16_t *op0, - uint16_t *oq0, uint16_t *oq1, + uint16_t *op3, uint16_t *op2, uint16_t *op1, + uint16_t *op0, uint16_t *oq0, uint16_t *oq1, uint16_t *oq2, uint16_t *oq3, int bd) { if (flat && mask) { const uint16_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; @@ -530,86 +513,72 @@ static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat, *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3, 3); *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3, 3); } else { - highbd_filter4(mask, thresh, op1, op0, oq0, oq1, bd); + highbd_filter4(mask, thresh, op1, op0, oq0, oq1, bd); } } void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { + int bd) { int i; // loop filter designed to work using chars so that we can make maximum use // of 8 bit simd instructions. - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint16_t p3 = s[-4 * p], p2 = s[-3 * p], p1 = s[-2 * p], p0 = s[-p]; const uint16_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); - const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, - bd); - highbd_filter8(mask, *thresh, flat, - s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, - s, s + 1 * p, s + 2 * p, s + 3 * p, bd); + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t flat = + highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd); + highbd_filter8(mask, *thresh, flat, s - 4 * p, s - 3 * p, s - 2 * p, + s - 1 * p, s, s + 1 * p, s + 2 * p, s + 3 * p, bd); ++s; } } -void vpx_highbd_lpf_horizontal_8_dual_c(uint16_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { - vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); +void vpx_highbd_lpf_horizontal_8_dual_c( + uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { + vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, bd); + vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, bd); } void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { + int bd) { int i; - for (i = 0; i < 8 * count; ++i) { + for (i = 0; i < 8; ++i) { const uint16_t p3 = s[-4], p2 = s[-3], p1 = s[-2], p0 = s[-1]; const uint16_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); - const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, - bd); - highbd_filter8(mask, *thresh, flat, - s - 4, s - 3, s - 2, s - 1, - s, s + 1, s + 2, s + 3, - bd); + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t flat = + highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd); + highbd_filter8(mask, *thresh, flat, s - 4, s - 3, s - 2, s - 1, s, s + 1, + s + 2, s + 3, bd); s += pitch; } } -void vpx_highbd_lpf_vertical_8_dual_c(uint16_t *s, int pitch, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { - vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, - thresh1, 1, bd); +void vpx_highbd_lpf_vertical_8_dual_c( + uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { + vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, bd); + vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, + bd); } -static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, - uint8_t flat, uint8_t flat2, - uint16_t *op7, uint16_t *op6, - uint16_t *op5, uint16_t *op4, - uint16_t *op3, uint16_t *op2, - uint16_t *op1, uint16_t *op0, - uint16_t *oq0, uint16_t *oq1, - uint16_t *oq2, uint16_t *oq3, - uint16_t *oq4, uint16_t *oq5, +static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, uint8_t flat, + uint8_t flat2, uint16_t *op7, uint16_t *op6, + uint16_t *op5, uint16_t *op4, uint16_t *op3, + uint16_t *op2, uint16_t *op1, uint16_t *op0, + uint16_t *oq0, uint16_t *oq1, uint16_t *oq2, + uint16_t *oq3, uint16_t *oq4, uint16_t *oq5, uint16_t *oq6, uint16_t *oq7, int bd) { if (flat2 && flat && mask) { const uint16_t p7 = *op7; @@ -630,43 +599,51 @@ static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, const uint16_t q7 = *oq7; // 15-tap filter [1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1] - *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + - q0, 4); - *op5 = ROUND_POWER_OF_TWO(p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + - q0 + q1, 4); - *op4 = ROUND_POWER_OF_TWO(p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + - q0 + q1 + q2, 4); - *op3 = ROUND_POWER_OF_TWO(p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + - q0 + q1 + q2 + q3, 4); - *op2 = ROUND_POWER_OF_TWO(p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + - q0 + q1 + q2 + q3 + q4, 4); + *op6 = ROUND_POWER_OF_TWO( + p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0, 4); + *op5 = ROUND_POWER_OF_TWO( + p7 * 6 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + q0 + q1, 4); + *op4 = ROUND_POWER_OF_TWO( + p7 * 5 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + q0 + q1 + q2, 4); + *op3 = ROUND_POWER_OF_TWO( + p7 * 4 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + q0 + q1 + q2 + q3, 4); + *op2 = ROUND_POWER_OF_TWO( + p7 * 3 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + q0 + q1 + q2 + q3 + q4, + 4); *op1 = ROUND_POWER_OF_TWO(p7 * 2 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 + - q0 + q1 + q2 + q3 + q4 + q5, 4); - *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + - q0 + q1 + q2 + q3 + q4 + q5 + q6, 4); - *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + - q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); - *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + - q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 * 2, 4); - *oq2 = ROUND_POWER_OF_TWO(p4 + p3 + p2 + p1 + p0 + - q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, 4); - *oq3 = ROUND_POWER_OF_TWO(p3 + p2 + p1 + p0 + - q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4); - *oq4 = ROUND_POWER_OF_TWO(p2 + p1 + p0 + - q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4); - *oq5 = ROUND_POWER_OF_TWO(p1 + p0 + - q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4); - *oq6 = ROUND_POWER_OF_TWO(p0 + - q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4); + q0 + q1 + q2 + q3 + q4 + q5, + 4); + *op0 = ROUND_POWER_OF_TWO(p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + q0 + + q1 + q2 + q3 + q4 + q5 + q6, + 4); + *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 + + q2 + q3 + q4 + q5 + q6 + q7, + 4); + *oq1 = ROUND_POWER_OF_TWO(p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 + q2 + + q3 + q4 + q5 + q6 + q7 * 2, + 4); + *oq2 = ROUND_POWER_OF_TWO( + p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 * 3, + 4); + *oq3 = ROUND_POWER_OF_TWO( + p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 * 4, 4); + *oq4 = ROUND_POWER_OF_TWO( + p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 * 5, 4); + *oq5 = ROUND_POWER_OF_TWO( + p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 * 6, 4); + *oq6 = ROUND_POWER_OF_TWO( + p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 * 7, 4); } else { highbd_filter8(mask, thresh, flat, op3, op2, op1, op0, oq0, oq1, oq2, oq3, bd); } } -void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit, - const uint8_t *limit, const uint8_t *thresh, - int count, int bd) { +static void highbd_mb_lpf_horizontal_edge_w(uint16_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, int count, + int bd) { int i; // loop filter designed to work using chars so that we can make maximum use @@ -680,29 +657,41 @@ void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit, const uint16_t q1 = s[1 * p]; const uint16_t q2 = s[2 * p]; const uint16_t q3 = s[3 * p]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); - const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, - bd); - const int8_t flat2 = highbd_flat_mask5( - 1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, - q0, s[4 * p], s[5 * p], s[6 * p], s[7 * p], bd); + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t flat = + highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t flat2 = + highbd_flat_mask5(1, s[-8 * p], s[-7 * p], s[-6 * p], s[-5 * p], p0, q0, + s[4 * p], s[5 * p], s[6 * p], s[7 * p], bd); - highbd_filter16(mask, *thresh, flat, flat2, - s - 8 * p, s - 7 * p, s - 6 * p, s - 5 * p, - s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, - s, s + 1 * p, s + 2 * p, s + 3 * p, - s + 4 * p, s + 5 * p, s + 6 * p, s + 7 * p, - bd); + highbd_filter16(mask, *thresh, flat, flat2, s - 8 * p, s - 7 * p, s - 6 * p, + s - 5 * p, s - 4 * p, s - 3 * p, s - 2 * p, s - 1 * p, s, + s + 1 * p, s + 2 * p, s + 3 * p, s + 4 * p, s + 5 * p, + s + 6 * p, s + 7 * p, bd); ++s; } } +void vpx_highbd_lpf_horizontal_edge_8_c(uint16_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, int bd) { + highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 1, bd); +} + +void vpx_highbd_lpf_horizontal_edge_16_c(uint16_t *s, int p, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh, int bd) { + highbd_mb_lpf_horizontal_edge_w(s, p, blimit, limit, thresh, 2, bd); +} + static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, - int count, int bd) { + const uint8_t *thresh, int count, + int bd) { int i; for (i = 0; i < count; ++i) { @@ -714,17 +703,16 @@ static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p, const uint16_t q1 = s[1]; const uint16_t q2 = s[2]; const uint16_t q3 = s[3]; - const int8_t mask = highbd_filter_mask(*limit, *blimit, - p3, p2, p1, p0, q0, q1, q2, q3, bd); - const int8_t flat = highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, - bd); + const int8_t mask = + highbd_filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3, bd); + const int8_t flat = + highbd_flat_mask4(1, p3, p2, p1, p0, q0, q1, q2, q3, bd); const int8_t flat2 = highbd_flat_mask5(1, s[-8], s[-7], s[-6], s[-5], p0, q0, s[4], s[5], s[6], s[7], bd); - highbd_filter16(mask, *thresh, flat, flat2, - s - 8, s - 7, s - 6, s - 5, s - 4, s - 3, s - 2, s - 1, - s, s + 1, s + 2, s + 3, s + 4, s + 5, s + 6, s + 7, - bd); + highbd_filter16(mask, *thresh, flat, flat2, s - 8, s - 7, s - 6, s - 5, + s - 4, s - 3, s - 2, s - 1, s, s + 1, s + 2, s + 3, s + 4, + s + 5, s + 6, s + 7, bd); s += p; } } @@ -738,8 +726,7 @@ void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit, void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, - int bd) { + const uint8_t *thresh, int bd) { highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd); } #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/mips/add_noise_msa.c b/libs/libvpx/vpx_dsp/mips/add_noise_msa.c new file mode 100644 index 0000000000..e372b9d8c9 --- /dev/null +++ b/libs/libvpx/vpx_dsp/mips/add_noise_msa.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2015 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include "./macros_msa.h" + +void vpx_plane_add_noise_msa(uint8_t *start_ptr, const int8_t *noise, + int blackclamp, int whiteclamp, int width, + int height, int32_t pitch) { + uint32_t i, j; + + for (i = 0; i < height / 2; ++i) { + uint8_t *pos0_ptr = start_ptr + (2 * i) * pitch; + const int8_t *ref0_ptr = noise + (rand() & 0xff); + uint8_t *pos1_ptr = start_ptr + (2 * i + 1) * pitch; + const int8_t *ref1_ptr = noise + (rand() & 0xff); + for (j = width / 16; j--;) { + v16i8 temp00_s, temp01_s; + v16u8 temp00, temp01, black_clamp, white_clamp; + v16u8 pos0, ref0, pos1, ref1; + v16i8 const127 = __msa_ldi_b(127); + + pos0 = LD_UB(pos0_ptr); + ref0 = LD_UB(ref0_ptr); + pos1 = LD_UB(pos1_ptr); + ref1 = LD_UB(ref1_ptr); + black_clamp = (v16u8)__msa_fill_b(blackclamp); + white_clamp = (v16u8)__msa_fill_b(whiteclamp); + temp00 = (pos0 < black_clamp); + pos0 = __msa_bmnz_v(pos0, black_clamp, temp00); + temp01 = (pos1 < black_clamp); + pos1 = __msa_bmnz_v(pos1, black_clamp, temp01); + XORI_B2_128_UB(pos0, pos1); + temp00_s = __msa_adds_s_b((v16i8)white_clamp, const127); + temp00 = (v16u8)(temp00_s < pos0); + pos0 = (v16u8)__msa_bmnz_v((v16u8)pos0, (v16u8)temp00_s, temp00); + temp01_s = __msa_adds_s_b((v16i8)white_clamp, const127); + temp01 = (temp01_s < pos1); + pos1 = (v16u8)__msa_bmnz_v((v16u8)pos1, (v16u8)temp01_s, temp01); + XORI_B2_128_UB(pos0, pos1); + pos0 += ref0; + ST_UB(pos0, pos0_ptr); + pos1 += ref1; + ST_UB(pos1, pos1_ptr); + pos0_ptr += 16; + pos1_ptr += 16; + ref0_ptr += 16; + ref1_ptr += 16; + } + } +} diff --git a/libs/libvpx/vpx_dsp/mips/common_dspr2.h b/libs/libvpx/vpx_dsp/mips/common_dspr2.h index 7a10bf1c40..0a42f5cec2 100644 --- a/libs/libvpx/vpx_dsp/mips/common_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/common_dspr2.h @@ -24,37 +24,21 @@ extern "C" { extern uint8_t *vpx_ff_cropTbl; // From "vpx_dsp/mips/intrapred4_dspr2.c" static INLINE void prefetch_load(const unsigned char *src) { - __asm__ __volatile__ ( - "pref 0, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); + __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); } /* prefetch data for store */ static INLINE void prefetch_store(unsigned char *dst) { - __asm__ __volatile__ ( - "pref 1, 0(%[dst]) \n\t" - : - : [dst] "r" (dst) - ); + __asm__ __volatile__("pref 1, 0(%[dst]) \n\t" : : [dst] "r"(dst)); } static INLINE void prefetch_load_streamed(const unsigned char *src) { - __asm__ __volatile__ ( - "pref 4, 0(%[src]) \n\t" - : - : [src] "r" (src) - ); + __asm__ __volatile__("pref 4, 0(%[src]) \n\t" : : [src] "r"(src)); } /* prefetch data for store */ static INLINE void prefetch_store_streamed(unsigned char *dst) { - __asm__ __volatile__ ( - "pref 5, 0(%[dst]) \n\t" - : - : [dst] "r" (dst) - ); + __asm__ __volatile__("pref 5, 0(%[dst]) \n\t" : : [dst] "r"(dst)); } #endif // #if HAVE_DSPR2 #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/mips/convolve2_avg_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve2_avg_dspr2.c index 3c767672fb..ae88eddfd6 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve2_avg_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve2_avg_dspr2.c @@ -18,25 +18,22 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t w, +static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t w, int32_t h) { - int32_t x, y; + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2; - uint32_t p1, p2; - uint32_t scratch1, scratch2; - uint32_t store1, store2; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2; + uint32_t p1, p2; + uint32_t scratch1, scratch2; + uint32_t store1, store2; + int32_t Temp1, Temp2; const int16_t *filter = &filter_y[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -48,7 +45,7 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -105,16 +102,13 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), [cm] "r" (cm), - [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), + [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), + [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -124,23 +118,21 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, } static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - const int16_t *filter_y, - int32_t h) { - int32_t x, y; + const int16_t *filter_y, int32_t h) { + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2; - uint32_t p1, p2; - uint32_t scratch1, scratch2; - uint32_t store1, store2; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2; + uint32_t p1, p2; + uint32_t scratch1, scratch2; + uint32_t store1, store2; + int32_t Temp1, Temp2; const int16_t *filter = &filter_y[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -153,7 +145,7 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -210,16 +202,13 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), [cm] "r" (cm), - [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), + [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), + [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -231,18 +220,16 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { uint32_t pos = 38; assert(y_step_q4 == 16); /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); prefetch_store(dst); @@ -251,22 +238,17 @@ void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, case 8: case 16: case 32: - convolve_bi_avg_vert_4_dspr2(src, src_stride, - dst, dst_stride, - filter_y, w, h); + convolve_bi_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, + w, h); break; case 64: prefetch_store(dst + 32); - convolve_bi_avg_vert_64_dspr2(src, src_stride, - dst, dst_stride, - filter_y, h); + convolve_bi_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, + h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c index 932a73d39b..e944207b6e 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve2_avg_horiz_dspr2.c @@ -19,20 +19,18 @@ #if HAVE_DSPR2 static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; - int32_t Temp1, Temp2, Temp3, Temp4; + int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; uint32_t tp1, tp2; uint32_t p1, p2, p3; uint32_t tn1, tn2; const int16_t *filter = &filter_x0[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -42,7 +40,7 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -61,51 +59,49 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src, "dpa.w.ph $ac2, %[p2], %[filter45] \n\t" "extp %[Temp3], $ac2, 31 \n\t" - "lbu %[p2], 3(%[dst]) \n\t" /* load odd 2 */ + "lbu %[p2], 3(%[dst]) \n\t" /* load odd 2 */ /* odd 1. pixel */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" /* even 1 */ + "lbux %[tp1], %[Temp1](%[cm]) \n\t" /* even 1 */ "mtlo %[vector4a], $ac3 \n\t" "mthi $zero, $ac3 \n\t" - "lbu %[Temp1], 1(%[dst]) \n\t" /* load odd 1 */ + "lbu %[Temp1], 1(%[dst]) \n\t" /* load odd 1 */ "preceu.ph.qbr %[p1], %[tp2] \n\t" "preceu.ph.qbl %[p3], %[tp2] \n\t" "dpa.w.ph $ac3, %[p1], %[filter45] \n\t" "extp %[Temp2], $ac3, 31 \n\t" - "lbu %[tn2], 0(%[dst]) \n\t" /* load even 1 */ + "lbu %[tn2], 0(%[dst]) \n\t" /* load even 1 */ /* odd 2. pixel */ - "lbux %[tp2], %[Temp3](%[cm]) \n\t" /* even 2 */ + "lbux %[tp2], %[Temp3](%[cm]) \n\t" /* even 2 */ "mtlo %[vector4a], $ac2 \n\t" "mthi $zero, $ac2 \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" /* odd 1 */ - "addqh_r.w %[tn2], %[tn2], %[tp1] \n\t" /* average even 1 */ + "lbux %[tn1], %[Temp2](%[cm]) \n\t" /* odd 1 */ + "addqh_r.w %[tn2], %[tn2], %[tp1] \n\t" /* average even 1 */ "dpa.w.ph $ac2, %[p3], %[filter45] \n\t" "extp %[Temp4], $ac2, 31 \n\t" - "lbu %[tp1], 2(%[dst]) \n\t" /* load even 2 */ - "sb %[tn2], 0(%[dst]) \n\t" /* store even 1 */ + "lbu %[tp1], 2(%[dst]) \n\t" /* load even 2 */ + "sb %[tn2], 0(%[dst]) \n\t" /* store even 1 */ /* clamp */ - "addqh_r.w %[Temp1], %[Temp1], %[tn1] \n\t" /* average odd 1 */ - "lbux %[p3], %[Temp4](%[cm]) \n\t" /* odd 2 */ - "sb %[Temp1], 1(%[dst]) \n\t" /* store odd 1 */ + "addqh_r.w %[Temp1], %[Temp1], %[tn1] \n\t" /* average odd 1 */ + "lbux %[p3], %[Temp4](%[cm]) \n\t" /* odd 2 */ + "sb %[Temp1], 1(%[dst]) \n\t" /* store odd 1 */ - "addqh_r.w %[tp1], %[tp1], %[tp2] \n\t" /* average even 2 */ - "sb %[tp1], 2(%[dst]) \n\t" /* store even 2 */ + "addqh_r.w %[tp1], %[tp1], %[tp2] \n\t" /* average even 2 */ + "sb %[tp1], 2(%[dst]) \n\t" /* store even 2 */ - "addqh_r.w %[p2], %[p2], %[p3] \n\t" /* average odd 2 */ - "sb %[p2], 3(%[dst]) \n\t" /* store odd 2 */ + "addqh_r.w %[p2], %[p2], %[p3] \n\t" /* average odd 2 */ + "sb %[p2], 3(%[dst]) \n\t" /* store odd 2 */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); /* Next row... */ src += src_stride; @@ -114,11 +110,9 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src, } static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector4a = 64; @@ -127,7 +121,7 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, uint32_t p1, p2, p3, p4, n1; uint32_t st0, st1; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -137,7 +131,7 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -246,15 +240,12 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, "sb %[tp4], 5(%[dst]) \n\t" "sb %[tp1], 7(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [st0] "=&r" (st0), [st1] "=&r" (st1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1), + [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); /* Next row... */ src += src_stride; @@ -263,12 +254,10 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, } static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, - int32_t count) { + int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, + const int16_t *filter_x0, int32_t h, + int32_t count) { int32_t y, c; const uint8_t *src; uint8_t *dst; @@ -279,7 +268,7 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -293,7 +282,7 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -493,14 +482,13 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */ "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [qload3] "=&r" (qload3), [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1), + [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), + [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3), + [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); src += 16; dst += 16; @@ -513,11 +501,10 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, } static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { + int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, + const int16_t *filter_x0, + int32_t h) { int32_t y, c; const uint8_t *src; uint8_t *dst; @@ -528,7 +515,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -544,7 +531,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride + 32); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -744,14 +731,13 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */ "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [qload3] "=&r" (qload3), [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1), + [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), + [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3), + [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); src += 16; dst += 16; @@ -773,11 +759,9 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(x_step_q4 == 16); /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); /* prefetch data to cache memory */ prefetch_load(src); @@ -786,39 +770,31 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_bi_avg_horiz_4_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_bi_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; case 8: - convolve_bi_avg_horiz_8_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_bi_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; case 16: - convolve_bi_avg_horiz_16_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h, 1); + convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x, + h, 1); break; case 32: - convolve_bi_avg_horiz_16_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h, 2); + convolve_bi_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x, + h, 2); break; case 64: prefetch_load(src + 64); prefetch_store(dst + 32); - convolve_bi_avg_horiz_64_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_bi_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve2_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve2_dspr2.c index d111029d42..e355ba3a06 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve2_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve2_dspr2.c @@ -18,21 +18,18 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { - int32_t y; - uint8_t *cm = vpx_ff_cropTbl; - uint8_t *dst_ptr; - int32_t Temp1, Temp2; - uint32_t vector4a = 64; - uint32_t tp1, tp2; - uint32_t p1, p2; +static void convolve_bi_horiz_4_transposed_dspr2( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { + int32_t y; + uint8_t *cm = vpx_ff_cropTbl; + uint8_t *dst_ptr; + int32_t Temp1, Temp2; + uint32_t vector4a = 64; + uint32_t tp1, tp2; + uint32_t p1, p2; const int16_t *filter = &filter_x0[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -42,7 +39,7 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src, prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -94,13 +91,10 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src, "sb %[p2], 0(%[dst_ptr]) \n\t" "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [dst_ptr] "+r" (dst_ptr) - : [filter45] "r" (filter45),[vector4a] "r" (vector4a), - [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [dst_ptr] "+r"(dst_ptr) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [src] "r"(src), [dst_stride] "r"(dst_stride)); /* Next row... */ src += src_stride; @@ -108,12 +102,9 @@ static void convolve_bi_horiz_4_transposed_dspr2(const uint8_t *src, } } -static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_bi_horiz_8_transposed_dspr2( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; uint8_t *dst_ptr; @@ -124,7 +115,7 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, uint8_t *odd_dst; uint32_t dst_pitch_2 = (dst_stride << 1); const int16_t *filter = &filter_x0[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -136,7 +127,7 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, dst_ptr = dst; odd_dst = (dst_ptr + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -180,7 +171,8 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, "dpa.w.ph $ac2, %[p4], %[filter45] \n\t" "extp %[Temp3], $ac2, 31 \n\t" - "lbux %[Temp1], %[p3](%[cm]) \n\t" + "lbux %[Temp1], %[p3](%[cm]) " + "\n\t" /* odd 1. pixel */ "mtlo %[vector4a], $ac1 \n\t" @@ -231,13 +223,12 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, "sb %[p1], 0(%[odd_dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst) - : [filter45] "r" (filter45),[vector4a] "r" (vector4a), [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1), + [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), [dst_ptr] "+r"(dst_ptr), + [odd_dst] "+r"(odd_dst) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2)); /* Next row... */ src += src_stride; @@ -245,26 +236,22 @@ static void convolve_bi_horiz_8_transposed_dspr2(const uint8_t *src, } } -static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, - int32_t count) { - int32_t c, y; +static void convolve_bi_horiz_16_transposed_dspr2( + const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) { + int32_t c, y; const uint8_t *src; - uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector_64 = 64; - int32_t Temp1, Temp2, Temp3; - uint32_t qload1, qload2; - uint32_t p1, p2, p3, p4, p5; - uint32_t st1, st2, st3; - uint32_t dst_pitch_2 = (dst_stride << 1); - uint8_t *odd_dst; + uint8_t *dst; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector_64 = 64; + int32_t Temp1, Temp2, Temp3; + uint32_t qload1, qload2; + uint32_t p1, p2, p3, p4, p5; + uint32_t st1, st2, st3; + uint32_t dst_pitch_2 = (dst_stride << 1); + uint8_t *odd_dst; const int16_t *filter = &filter_x0[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -279,193 +266,329 @@ static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr, odd_dst = (dst + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( - "ulw %[qload1], 0(%[src]) \n\t" - "ulw %[qload2], 4(%[src]) \n\t" + __asm__ __volatile__( + "ulw %[qload1], 0(%[src]) " + "\n\t" + "ulw %[qload2], 4(%[src]) " + "\n\t" /* even 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 1 */ - "mthi $zero, $ac1 \n\t" - "mtlo %[vector_64], $ac2 \n\t" /* even 2 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "ulw %[qload1], 8(%[src]) \n\t" - "dpa.w.ph $ac1, %[p1], %[filter45] \n\t" /* even 1 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 1 */ + "mthi $zero, $ac1 " + "\n\t" + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 2 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "ulw %[qload1], 8(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter45] " + "\n\t" /* even 1 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 1 */ /* even 2. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 3 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p5], %[qload1] \n\t" - "ulw %[qload2], 12(%[src]) \n\t" - "dpa.w.ph $ac2, %[p2], %[filter45] \n\t" /* even 1 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 3 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload1] " + "\n\t" + "ulw %[qload2], 12(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[filter45] " + "\n\t" /* even 1 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 1 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 1 */ /* even 3. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 4 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p2], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 1 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p3], %[filter45] \n\t" /* even 3 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 4 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 1 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + " \n\t" + "dpa.w.ph $ac3, %[p3], %[filter45] " + "\n\t" /* even 3 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 3 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 1 */ /* even 4. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 5 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbl %[p3], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 2 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p4], %[filter45] \n\t" /* even 4 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 5 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 2 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p4], %[filter45] " + "\n\t" /* even 4 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 4 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 3 */ /* even 5. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 6 */ - "mthi $zero, $ac3 \n\t" - "sb %[st3], 0(%[dst]) \n\t" /* even 3 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter45] \n\t" /* even 5 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 6 */ + "mthi $zero, $ac3 " + "\n\t" + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 3 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter45] " + "\n\t" /* even 5 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 5 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 4 */ /* even 6. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 7 */ - "mthi $zero, $ac1 \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 4 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 20(%[src]) \n\t" - "dpa.w.ph $ac3, %[p5], %[filter45] \n\t" /* even 6 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 7 */ + "mthi $zero, $ac1 " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 4 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 20(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p5], %[filter45] " + "\n\t" /* even 6 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 6 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 5 */ /* even 7. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 8 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 5 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p2], %[filter45] \n\t" /* even 7 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 8 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 5 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter45] " + "\n\t" /* even 7 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 7 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 6 */ /* even 8. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */ - "mthi $zero, $ac3 \n\t" - "dpa.w.ph $ac2, %[p3], %[filter45] \n\t" /* even 8 */ - "sb %[st3], 0(%[dst]) \n\t" /* even 6 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 1 */ + "mthi $zero, $ac3 " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter45] " + "\n\t" /* even 8 */ + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 6 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 8 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 7 */ /* ODD pixels */ - "ulw %[qload1], 1(%[src]) \n\t" - "ulw %[qload2], 5(%[src]) \n\t" + "ulw %[qload1], 1(%[src]) " + "\n\t" + "ulw %[qload2], 5(%[src]) " + "\n\t" /* odd 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 7 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 9(%[src]) \n\t" - "dpa.w.ph $ac3, %[p1], %[filter45] \n\t" /* odd 1 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 2 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 7 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 9(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[filter45] " + "\n\t" /* odd 1 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 1 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 8 */ /* odd 2. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 8 */ - "ulw %[qload1], 13(%[src]) \n\t" - "dpa.w.ph $ac1, %[p2], %[filter45] \n\t" /* odd 2 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 3 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 8 */ + "ulw %[qload1], 13(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter45] " + "\n\t" /* odd 2 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 2 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 1 */ /* odd 3. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 1 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p3], %[filter45] \n\t" /* odd 3 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 4 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 1 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter45] " + "\n\t" /* odd 3 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 3 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 2 */ /* odd 4. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 2 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p4], %[filter45] \n\t" /* odd 4 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 5 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 2 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p4], %[filter45] " + "\n\t" /* odd 4 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 4 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 3 */ /* odd 5. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */ - "mthi $zero, $ac2 \n\t" - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 3 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p1], %[filter45] \n\t" /* odd 5 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 6 */ + "mthi $zero, $ac2 " + "\n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 3 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter45] " + "\n\t" /* odd 5 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 5 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 4 */ /* odd 6. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */ - "mthi $zero, $ac3 \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 4 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 21(%[src]) \n\t" - "dpa.w.ph $ac2, %[p5], %[filter45] \n\t" /* odd 6 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 7 */ + "mthi $zero, $ac3 " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 4 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 21(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p5], %[filter45] " + "\n\t" /* odd 6 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 6 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 5 */ /* odd 7. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 5 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p2], %[filter45] \n\t" /* odd 7 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 8 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 5 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[filter45] " + "\n\t" /* odd 7 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 7 */ /* odd 8. pixel */ - "dpa.w.ph $ac1, %[p3], %[filter45] \n\t" /* odd 8 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p3], %[filter45] " + "\n\t" /* odd 8 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 8 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 6 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 7 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 8 */ - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 6 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 6 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 7 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 7 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 8 */ + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst] "+r" (dst), [odd_dst] "+r" (odd_dst) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5), + [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [dst] "+r"(dst), [odd_dst] "+r"(odd_dst) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2)); src += 16; dst = (dst_ptr + ((c + 1) * 16 * dst_stride)); @@ -478,25 +601,22 @@ static void convolve_bi_horiz_16_transposed_dspr2(const uint8_t *src_ptr, } } -static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { - int32_t c, y; +static void convolve_bi_horiz_64_transposed_dspr2( + const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, const int16_t *filter_x0, int32_t h) { + int32_t c, y; const uint8_t *src; - uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector_64 = 64; - int32_t Temp1, Temp2, Temp3; - uint32_t qload1, qload2; - uint32_t p1, p2, p3, p4, p5; - uint32_t st1, st2, st3; - uint32_t dst_pitch_2 = (dst_stride << 1); - uint8_t *odd_dst; + uint8_t *dst; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector_64 = 64; + int32_t Temp1, Temp2, Temp3; + uint32_t qload1, qload2; + uint32_t p1, p2, p3, p4, p5; + uint32_t st1, st2, st3; + uint32_t dst_pitch_2 = (dst_stride << 1); + uint8_t *odd_dst; const int16_t *filter = &filter_x0[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -512,193 +632,329 @@ static void convolve_bi_horiz_64_transposed_dspr2(const uint8_t *src_ptr, odd_dst = (dst + dst_stride); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( - "ulw %[qload1], 0(%[src]) \n\t" - "ulw %[qload2], 4(%[src]) \n\t" + __asm__ __volatile__( + "ulw %[qload1], 0(%[src]) " + "\n\t" + "ulw %[qload2], 4(%[src]) " + "\n\t" /* even 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 1 */ - "mthi $zero, $ac1 \n\t" - "mtlo %[vector_64], $ac2 \n\t" /* even 2 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "ulw %[qload1], 8(%[src]) \n\t" - "dpa.w.ph $ac1, %[p1], %[filter45] \n\t" /* even 1 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 1 */ + "mthi $zero, $ac1 " + "\n\t" + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 2 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "ulw %[qload1], 8(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter45] " + "\n\t" /* even 1 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 1 */ /* even 2. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 3 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p5], %[qload1] \n\t" - "ulw %[qload2], 12(%[src]) \n\t" - "dpa.w.ph $ac2, %[p2], %[filter45] \n\t" /* even 1 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 3 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload1] " + "\n\t" + "ulw %[qload2], 12(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[filter45] " + "\n\t" /* even 1 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 1 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 1 */ /* even 3. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 4 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p2], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 1 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p3], %[filter45] \n\t" /* even 3 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 4 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 1 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + " \n\t" + "dpa.w.ph $ac3, %[p3], %[filter45] " + "\n\t" /* even 3 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 3 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 1 */ /* even 4. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 5 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbl %[p3], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 2 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p4], %[filter45] \n\t" /* even 4 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 5 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 2 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p4], %[filter45] " + "\n\t" /* even 4 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 4 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 3 */ /* even 5. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 6 */ - "mthi $zero, $ac3 \n\t" - "sb %[st3], 0(%[dst]) \n\t" /* even 3 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter45] \n\t" /* even 5 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 6 */ + "mthi $zero, $ac3 " + "\n\t" + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 3 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter45] " + "\n\t" /* even 5 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 5 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 4 */ /* even 6. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 7 */ - "mthi $zero, $ac1 \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 4 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 20(%[src]) \n\t" - "dpa.w.ph $ac3, %[p5], %[filter45] \n\t" /* even 6 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 7 */ + "mthi $zero, $ac1 " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 4 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 20(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p5], %[filter45] " + "\n\t" /* even 6 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 6 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 5 */ /* even 7. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 8 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 5 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p2], %[filter45] \n\t" /* even 7 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 8 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 5 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter45] " + "\n\t" /* even 7 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 7 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 6 */ /* even 8. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */ - "mthi $zero, $ac3 \n\t" - "dpa.w.ph $ac2, %[p3], %[filter45] \n\t" /* even 8 */ - "sb %[st3], 0(%[dst]) \n\t" /* even 6 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 1 */ + "mthi $zero, $ac3 " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter45] " + "\n\t" /* even 8 */ + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 6 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 8 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 7 */ /* ODD pixels */ - "ulw %[qload1], 1(%[src]) \n\t" - "ulw %[qload2], 5(%[src]) \n\t" + "ulw %[qload1], 1(%[src]) " + "\n\t" + "ulw %[qload2], 5(%[src]) " + "\n\t" /* odd 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 7 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 9(%[src]) \n\t" - "dpa.w.ph $ac3, %[p1], %[filter45] \n\t" /* odd 1 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 2 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 7 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 9(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[filter45] " + "\n\t" /* odd 1 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 1 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 8 */ /* odd 2. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 8 */ - "ulw %[qload1], 13(%[src]) \n\t" - "dpa.w.ph $ac1, %[p2], %[filter45] \n\t" /* odd 2 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 3 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 8 */ + "ulw %[qload1], 13(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter45] " + "\n\t" /* odd 2 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 2 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 1 */ /* odd 3. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 1 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p3], %[filter45] \n\t" /* odd 3 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 4 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 1 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter45] " + "\n\t" /* odd 3 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 3 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 2 */ /* odd 4. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 2 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p4], %[filter45] \n\t" /* odd 4 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 5 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 2 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p4], %[filter45] " + "\n\t" /* odd 4 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 4 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 3 */ /* odd 5. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */ - "mthi $zero, $ac2 \n\t" - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 3 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p1], %[filter45] \n\t" /* odd 5 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 6 */ + "mthi $zero, $ac2 " + "\n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 3 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter45] " + "\n\t" /* odd 5 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 5 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 4 */ /* odd 6. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */ - "mthi $zero, $ac3 \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 4 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 21(%[src]) \n\t" - "dpa.w.ph $ac2, %[p5], %[filter45] \n\t" /* odd 6 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 7 */ + "mthi $zero, $ac3 " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 4 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 21(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p5], %[filter45] " + "\n\t" /* odd 6 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 6 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 5 */ /* odd 7. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 5 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p2], %[filter45] \n\t" /* odd 7 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 8 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 5 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[filter45] " + "\n\t" /* odd 7 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 7 */ /* odd 8. pixel */ - "dpa.w.ph $ac1, %[p3], %[filter45] \n\t" /* odd 8 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p3], %[filter45] " + "\n\t" /* odd 8 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 8 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 6 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 7 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 8 */ - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 6 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 6 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 7 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 7 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 8 */ + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst] "+r" (dst), [odd_dst] "+r" (odd_dst) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5), + [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [dst] "+r"(dst), [odd_dst] "+r"(odd_dst) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [src] "r"(src), [dst_pitch_2] "r"(dst_pitch_2)); src += 16; dst = (dst_ptr + ((c + 1) * 16 * dst_stride)); @@ -731,18 +987,15 @@ void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, } } -void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter, - int w, int h) { +void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter, int w, + int h) { uint32_t pos = 38; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); /* prefetch data to cache memory */ prefetch_load(src); @@ -750,32 +1003,26 @@ void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_bi_horiz_4_transposed_dspr2(src, src_stride, - dst, dst_stride, + convolve_bi_horiz_4_transposed_dspr2(src, src_stride, dst, dst_stride, filter, h); break; case 8: - convolve_bi_horiz_8_transposed_dspr2(src, src_stride, - dst, dst_stride, + convolve_bi_horiz_8_transposed_dspr2(src, src_stride, dst, dst_stride, filter, h); break; case 16: case 32: - convolve_bi_horiz_16_transposed_dspr2(src, src_stride, - dst, dst_stride, - filter, h, - (w/16)); + convolve_bi_horiz_16_transposed_dspr2(src, src_stride, dst, dst_stride, + filter, h, (w / 16)); break; case 64: prefetch_load(src + 32); - convolve_bi_horiz_64_transposed_dspr2(src, src_stride, - dst, dst_stride, + convolve_bi_horiz_64_transposed_dspr2(src, src_stride, dst, dst_stride, filter, h); break; default: - convolve_bi_horiz_transposed(src, src_stride, - dst, dst_stride, - filter, w, h); + convolve_bi_horiz_transposed(src, src_stride, dst, dst_stride, filter, w, + h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve2_horiz_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve2_horiz_dspr2.c index 9fe1a3454b..5cc06b5f26 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve2_horiz_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve2_horiz_dspr2.c @@ -18,12 +18,9 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_bi_horiz_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_bi_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; int32_t Temp1, Temp2, Temp3, Temp4; @@ -31,7 +28,7 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src, uint32_t tp1, tp2; uint32_t p1, p2; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -41,7 +38,7 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -86,13 +83,11 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src, "sb %[tp2], 2(%[dst]) \n\t" "sb %[p2], 3(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [p1] "=&r"(p1), [p2] "=&r"(p2), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [Temp4] "=&r"(Temp4) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); /* Next row... */ src += src_stride; @@ -100,12 +95,9 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src, } } -static void convolve_bi_horiz_8_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_bi_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector4a = 64; @@ -114,7 +106,7 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src, uint32_t p1, p2, p3, p4; uint32_t st0, st1; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -124,7 +116,7 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -210,13 +202,12 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src, "sb %[p2], 5(%[dst]) \n\t" "sb %[p1], 7(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3), - [st0] "=&r" (st0), [st1] "=&r" (st1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [st0] "=&r"(st0), [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), + [p3] "=&r"(p3), [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); /* Next row... */ src += src_stride; @@ -225,11 +216,9 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src, } static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, + int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, + const int16_t *filter_x0, int32_t h, int32_t count) { int32_t y, c; const uint8_t *src; @@ -241,7 +230,7 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -255,7 +244,7 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -413,14 +402,13 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */ "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), + [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2), + [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); src += 16; dst += 16; @@ -433,11 +421,9 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, } static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, + int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { + const int16_t *filter_x0, int32_t h) { int32_t y, c; const uint8_t *src; uint8_t *dst; @@ -448,7 +434,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; const int16_t *filter = &filter_x0[3]; - uint32_t filter45;; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -464,7 +450,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride + 32); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -622,14 +608,13 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */ "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter45] "r" (filter45), [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), + [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2), + [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter45] "r"(filter45), [vector_64] "r"(vector_64), [cm] "r"(cm), + [dst] "r"(dst), [src] "r"(src)); src += 16; dst += 16; @@ -644,8 +629,8 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { uint32_t pos = 38; assert(x_step_q4 == 16); @@ -653,11 +638,9 @@ void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, prefetch_load((const uint8_t *)filter_x); /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); /* prefetch data to cache memory */ prefetch_load(src); @@ -666,39 +649,31 @@ void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_bi_horiz_4_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; case 8: - convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_bi_horiz_8_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; case 16: - convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h, 1); + convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h, 1); break; case 32: - convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h, 2); + convolve_bi_horiz_16_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h, 2); break; case 64: prefetch_load(src + 64); prefetch_store(dst + 32); - convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_bi_horiz_64_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; default: - vpx_convolve8_horiz_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve2_vert_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve2_vert_dspr2.c index dde6ffd54f..eb1975e447 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve2_vert_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve2_vert_dspr2.c @@ -18,25 +18,22 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_bi_vert_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t w, +static void convolve_bi_vert_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t w, int32_t h) { - int32_t x, y; + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2; - uint32_t p1, p2; - uint32_t scratch1; - uint32_t store1, store2; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2; + uint32_t p1, p2; + uint32_t scratch1; + uint32_t store1, store2; + int32_t Temp1, Temp2; const int16_t *filter = &filter_y[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -48,7 +45,7 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -98,16 +95,12 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [scratch1] "=&r" (scratch1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [filter45] "r" (filter45),[vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), - [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), + [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), + [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -116,24 +109,21 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src, } } -static void convolve_bi_vert_64_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t h) { - int32_t x, y; +static void convolve_bi_vert_64_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t h) { + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2; - uint32_t p1, p2; - uint32_t scratch1; - uint32_t store1, store2; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2; + uint32_t p1, p2; + uint32_t scratch1; + uint32_t store1, store2; + int32_t Temp1, Temp2; const int16_t *filter = &filter_y[3]; - uint32_t filter45; + uint32_t filter45; filter45 = ((const int32_t *)filter)[0]; @@ -145,7 +135,7 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -195,16 +185,12 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [scratch1] "=&r" (scratch1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [filter45] "r" (filter45),[vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), - [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), + [p2] "=&r"(p2), [scratch1] "=&r"(scratch1), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [filter45] "r"(filter45), [vector4a] "r"(vector4a), + [src_stride] "r"(src_stride), [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -216,42 +202,34 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src, void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { uint32_t pos = 38; assert(y_step_q4 == 16); /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); prefetch_store(dst); switch (w) { - case 4 : - case 8 : - case 16 : - case 32 : - convolve_bi_vert_4_dspr2(src, src_stride, - dst, dst_stride, - filter_y, w, h); + case 4: + case 8: + case 16: + case 32: + convolve_bi_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, + h); break; - case 64 : + case 64: prefetch_store(dst + 32); - convolve_bi_vert_64_dspr2(src, src_stride, - dst, dst_stride, - filter_y, h); + convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h); break; default: - vpx_convolve8_vert_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c index 43da9e54fb..31812299c3 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve8_avg_dspr2.c @@ -18,25 +18,22 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_avg_vert_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t w, +static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t w, int32_t h) { - int32_t x, y; + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2, load3, load4; - uint32_t p1, p2; - uint32_t n1, n2; - uint32_t scratch1, scratch2; - uint32_t store1, store2; - int32_t vector1b, vector2b, vector3b, vector4b; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2, load3, load4; + uint32_t p1, p2; + uint32_t n1, n2; + uint32_t scratch1, scratch2; + uint32_t store1, store2; + int32_t vector1b, vector2b, vector3b, vector4b; + int32_t Temp1, Temp2; vector1b = ((const int32_t *)filter_y)[0]; vector2b = ((const int32_t *)filter_y)[1]; @@ -53,7 +50,7 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -160,18 +157,16 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), + [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [src_stride] "r"(src_stride), + [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -180,24 +175,21 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src, } } -static void convolve_avg_vert_64_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t h) { - int32_t x, y; +static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t h) { + int32_t x, y; const uint8_t *src_ptr; - uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; - uint32_t vector4a = 64; - uint32_t load1, load2, load3, load4; - uint32_t p1, p2; - uint32_t n1, n2; - uint32_t scratch1, scratch2; - uint32_t store1, store2; - int32_t vector1b, vector2b, vector3b, vector4b; - int32_t Temp1, Temp2; + uint8_t *dst_ptr; + uint8_t *cm = vpx_ff_cropTbl; + uint32_t vector4a = 64; + uint32_t load1, load2, load3, load4; + uint32_t p1, p2; + uint32_t n1, n2; + uint32_t scratch1, scratch2; + uint32_t store1, store2; + int32_t vector1b, vector2b, vector3b, vector4b; + int32_t Temp1, Temp2; vector1b = ((const int32_t *)filter_y)[0]; vector2b = ((const int32_t *)filter_y)[1]; @@ -215,7 +207,7 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -322,18 +314,16 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [p1] "=&r" (p1), [p2] "=&r" (p2), [n1] "=&r" (n1), [n2] "=&r" (n2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [src_stride] "r" (src_stride), [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), + [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [src_stride] "r"(src_stride), + [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -345,26 +335,21 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src, void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { assert(y_step_q4 == 16); assert(((const int32_t *)filter_y)[1] != 0x800000); if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_avg_vert_dspr2(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); prefetch_store(dst); @@ -373,22 +358,17 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, case 8: case 16: case 32: - convolve_avg_vert_4_dspr2(src, src_stride, - dst, dst_stride, - filter_y, w, h); + convolve_avg_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, + h); break; case 64: prefetch_store(dst + 32); - convolve_avg_vert_64_dspr2(src, src_stride, - dst, dst_stride, - filter_y, h); + convolve_avg_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, + h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } @@ -397,8 +377,8 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { /* Fixed size intermediate buffer places limits on parameters. */ DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]); int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7; @@ -408,27 +388,20 @@ void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(x_step_q4 == 16); assert(y_step_q4 == 16); - if (intermediate_height < h) - intermediate_height = h; + if (intermediate_height < h) intermediate_height = h; - vpx_convolve8_horiz(src - (src_stride * 3), src_stride, - temp, 64, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, intermediate_height); + vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x, + x_step_q4, filter_y, y_step_q4, w, intermediate_height); - vpx_convolve8_avg_vert(temp + 64 * 3, 64, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, - const int16_t *filter_y, int filter_y_stride, - int w, int h) { + const int16_t *filter_y, int filter_y_stride, int w, + int h) { int x, y; uint32_t tp1, tp2, tn1; uint32_t tp3, tp4, tn2; @@ -441,21 +414,19 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: /* 1 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 0(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ - "sw %[tn1], 0(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "sw %[tn1], 0(%[dst]) \n\t" /* store */ - : [tn1] "=&r" (tn1), [tp1] "=&r" (tp1), - [tp2] "=&r" (tp2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tn1] "=&r"(tn1), [tp1] "=&r"(tp1), [tp2] "=&r"(tp2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; @@ -463,26 +434,24 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, break; case 8: /* 2 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 0(%[dst]) \n\t" "ulw %[tp3], 4(%[src]) \n\t" "ulw %[tp4], 4(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ - "sw %[tn1], 0(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 4(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "sw %[tn1], 0(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 4(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; @@ -490,34 +459,32 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, break; case 16: /* 4 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 0(%[dst]) \n\t" "ulw %[tp3], 4(%[src]) \n\t" "ulw %[tp4], 4(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 8(%[src]) \n\t" "ulw %[tp2], 8(%[dst]) \n\t" - "sw %[tn1], 0(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 4(%[dst]) \n\t" /* store */ + "sw %[tn1], 0(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 4(%[dst]) \n\t" /* store */ "ulw %[tp3], 12(%[src]) \n\t" "ulw %[tp4], 12(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ - "sw %[tn1], 8(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 12(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "sw %[tn1], 8(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 12(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; @@ -525,50 +492,48 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, break; case 32: /* 8 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 0(%[dst]) \n\t" "ulw %[tp3], 4(%[src]) \n\t" "ulw %[tp4], 4(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 8(%[src]) \n\t" "ulw %[tp2], 8(%[dst]) \n\t" - "sw %[tn1], 0(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 4(%[dst]) \n\t" /* store */ + "sw %[tn1], 0(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 4(%[dst]) \n\t" /* store */ "ulw %[tp3], 12(%[src]) \n\t" "ulw %[tp4], 12(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 16(%[src]) \n\t" "ulw %[tp2], 16(%[dst]) \n\t" - "sw %[tn1], 8(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 12(%[dst]) \n\t" /* store */ + "sw %[tn1], 8(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 12(%[dst]) \n\t" /* store */ "ulw %[tp3], 20(%[src]) \n\t" "ulw %[tp4], 20(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 24(%[src]) \n\t" "ulw %[tp2], 24(%[dst]) \n\t" - "sw %[tn1], 16(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 20(%[dst]) \n\t" /* store */ + "sw %[tn1], 16(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 20(%[dst]) \n\t" /* store */ "ulw %[tp3], 28(%[src]) \n\t" "ulw %[tp4], 28(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ - "sw %[tn1], 24(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 28(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "sw %[tn1], 24(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 28(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; @@ -579,84 +544,82 @@ void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, prefetch_store(dst + 32); /* 16 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_load(src + src_stride + 64); prefetch_store(dst + dst_stride); prefetch_store(dst + dst_stride + 32); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 0(%[dst]) \n\t" "ulw %[tp3], 4(%[src]) \n\t" "ulw %[tp4], 4(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 8(%[src]) \n\t" "ulw %[tp2], 8(%[dst]) \n\t" - "sw %[tn1], 0(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 4(%[dst]) \n\t" /* store */ + "sw %[tn1], 0(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 4(%[dst]) \n\t" /* store */ "ulw %[tp3], 12(%[src]) \n\t" "ulw %[tp4], 12(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 16(%[src]) \n\t" "ulw %[tp2], 16(%[dst]) \n\t" - "sw %[tn1], 8(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 12(%[dst]) \n\t" /* store */ + "sw %[tn1], 8(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 12(%[dst]) \n\t" /* store */ "ulw %[tp3], 20(%[src]) \n\t" "ulw %[tp4], 20(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 24(%[src]) \n\t" "ulw %[tp2], 24(%[dst]) \n\t" - "sw %[tn1], 16(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 20(%[dst]) \n\t" /* store */ + "sw %[tn1], 16(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 20(%[dst]) \n\t" /* store */ "ulw %[tp3], 28(%[src]) \n\t" "ulw %[tp4], 28(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 32(%[src]) \n\t" "ulw %[tp2], 32(%[dst]) \n\t" - "sw %[tn1], 24(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 28(%[dst]) \n\t" /* store */ + "sw %[tn1], 24(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 28(%[dst]) \n\t" /* store */ "ulw %[tp3], 36(%[src]) \n\t" "ulw %[tp4], 36(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 40(%[src]) \n\t" "ulw %[tp2], 40(%[dst]) \n\t" - "sw %[tn1], 32(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 36(%[dst]) \n\t" /* store */ + "sw %[tn1], 32(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 36(%[dst]) \n\t" /* store */ "ulw %[tp3], 44(%[src]) \n\t" "ulw %[tp4], 44(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 48(%[src]) \n\t" "ulw %[tp2], 48(%[dst]) \n\t" - "sw %[tn1], 40(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 44(%[dst]) \n\t" /* store */ + "sw %[tn1], 40(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 44(%[dst]) \n\t" /* store */ "ulw %[tp3], 52(%[src]) \n\t" "ulw %[tp4], 52(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ "ulw %[tp1], 56(%[src]) \n\t" "ulw %[tp2], 56(%[dst]) \n\t" - "sw %[tn1], 48(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 52(%[dst]) \n\t" /* store */ + "sw %[tn1], 48(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 52(%[dst]) \n\t" /* store */ "ulw %[tp3], 60(%[src]) \n\t" "ulw %[tp4], 60(%[dst]) \n\t" - "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ - "sw %[tn1], 56(%[dst]) \n\t" /* store */ - "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ - "sw %[tn2], 60(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn1], %[tp2], %[tp1] \n\t" /* average */ + "sw %[tn1], 56(%[dst]) \n\t" /* store */ + "adduh_r.qb %[tn2], %[tp3], %[tp4] \n\t" /* average */ + "sw %[tn2], 60(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tn1] "=&r"(tn1), [tn2] "=&r"(tn2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; diff --git a/libs/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c index db0c2a4da5..9a9bab25a5 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve8_avg_horiz_dspr2.c @@ -18,16 +18,13 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_avg_horiz_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_avg_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; - int32_t vector1b, vector2b, vector3b, vector4b; - int32_t Temp1, Temp2, Temp3, Temp4; + int32_t vector1b, vector2b, vector3b, vector4b; + int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; uint32_t tp1, tp2; uint32_t p1, p2, p3, p4; @@ -45,7 +42,7 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -76,13 +73,13 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src, "dpa.w.ph $ac2, %[p1], %[vector4b] \n\t" "extp %[Temp3], $ac2, 31 \n\t" - "lbu %[p2], 3(%[dst]) \n\t" /* load odd 2 */ + "lbu %[p2], 3(%[dst]) \n\t" /* load odd 2 */ /* odd 1. pixel */ - "lbux %[tp1], %[Temp1](%[cm]) \n\t" /* even 1 */ + "lbux %[tp1], %[Temp1](%[cm]) \n\t" /* even 1 */ "mtlo %[vector4a], $ac3 \n\t" "mthi $zero, $ac3 \n\t" - "lbu %[Temp1], 1(%[dst]) \n\t" /* load odd 1 */ + "lbu %[Temp1], 1(%[dst]) \n\t" /* load odd 1 */ "preceu.ph.qbr %[n1], %[tp2] \n\t" "preceu.ph.qbl %[n2], %[tp2] \n\t" "preceu.ph.qbr %[n3], %[tn2] \n\t" @@ -93,46 +90,44 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src, "dpa.w.ph $ac3, %[n4], %[vector4b] \n\t" "extp %[Temp2], $ac3, 31 \n\t" - "lbu %[tn2], 0(%[dst]) \n\t" /* load even 1 */ + "lbu %[tn2], 0(%[dst]) \n\t" /* load even 1 */ /* odd 2. pixel */ - "lbux %[tp2], %[Temp3](%[cm]) \n\t" /* even 2 */ + "lbux %[tp2], %[Temp3](%[cm]) \n\t" /* even 2 */ "mtlo %[vector4a], $ac2 \n\t" "mthi $zero, $ac2 \n\t" "preceu.ph.qbr %[n1], %[tn1] \n\t" - "lbux %[tn1], %[Temp2](%[cm]) \n\t" /* odd 1 */ - "addqh_r.w %[tn2], %[tn2], %[tp1] \n\t" /* average even 1 */ + "lbux %[tn1], %[Temp2](%[cm]) \n\t" /* odd 1 */ + "addqh_r.w %[tn2], %[tn2], %[tp1] \n\t" /* average even 1 */ "dpa.w.ph $ac2, %[n2], %[vector1b] \n\t" "dpa.w.ph $ac2, %[n3], %[vector2b] \n\t" "dpa.w.ph $ac2, %[n4], %[vector3b] \n\t" "dpa.w.ph $ac2, %[n1], %[vector4b] \n\t" "extp %[Temp4], $ac2, 31 \n\t" - "lbu %[tp1], 2(%[dst]) \n\t" /* load even 2 */ - "sb %[tn2], 0(%[dst]) \n\t" /* store even 1 */ + "lbu %[tp1], 2(%[dst]) \n\t" /* load even 2 */ + "sb %[tn2], 0(%[dst]) \n\t" /* store even 1 */ /* clamp */ - "addqh_r.w %[Temp1], %[Temp1], %[tn1] \n\t" /* average odd 1 */ - "lbux %[n2], %[Temp4](%[cm]) \n\t" /* odd 2 */ - "sb %[Temp1], 1(%[dst]) \n\t" /* store odd 1 */ + "addqh_r.w %[Temp1], %[Temp1], %[tn1] \n\t" /* average odd 1 */ + "lbux %[n2], %[Temp4](%[cm]) \n\t" /* odd 2 */ + "sb %[Temp1], 1(%[dst]) \n\t" /* store odd 1 */ - "addqh_r.w %[tp1], %[tp1], %[tp2] \n\t" /* average even 2 */ - "sb %[tp1], 2(%[dst]) \n\t" /* store even 2 */ + "addqh_r.w %[tp1], %[tp1], %[tp2] \n\t" /* average even 2 */ + "sb %[tp1], 2(%[dst]) \n\t" /* store even 2 */ - "addqh_r.w %[p2], %[p2], %[n2] \n\t" /* average odd 2 */ - "sb %[p2], 3(%[dst]) \n\t" /* store odd 2 */ + "addqh_r.w %[p2], %[p2], %[n2] \n\t" /* average odd 2 */ + "sb %[p2], 3(%[dst]) \n\t" /* store odd 2 */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3), + [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); /* Next row... */ src += src_stride; @@ -140,12 +135,9 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src, } } -static void convolve_avg_horiz_8_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_avg_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector4a = 64; @@ -167,7 +159,7 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -309,17 +301,15 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src, "sb %[tn3], 5(%[dst]) \n\t" "sb %[tn1], 7(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3), - [st0] "=&r" (st0), [st1] "=&r" (st1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0), + [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); /* Next row... */ src += src_stride; @@ -328,11 +318,9 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src, } static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, + int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, + const int16_t *filter_x0, int32_t h, int32_t count) { int32_t y, c; const uint8_t *src; @@ -360,7 +348,7 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -618,16 +606,15 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr, "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */ "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [qload3] "=&r" (qload3), [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1), + [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), + [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3), + [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); src += 16; dst += 16; @@ -640,11 +627,9 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr, } static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, + int32_t src_stride, uint8_t *dst_ptr, int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { + const int16_t *filter_x0, int32_t h) { int32_t y, c; const uint8_t *src; uint8_t *dst; @@ -673,7 +658,7 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride + 32); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -931,16 +916,15 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr, "sb %[qload3], 13(%[dst]) \n\t" /* store odd 7 to dst */ "sb %[qload1], 15(%[dst]) \n\t" /* store odd 8 to dst */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [qload3] "=&r" (qload3), [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [st1] "=&r"(st1), + [st2] "=&r"(st2), [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), + [p3] "=&r"(p3), [p4] "=&r"(p4), [qload3] "=&r"(qload3), + [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); src += 16; dst += 16; @@ -961,22 +945,17 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_x)[1] != 0x800000); if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_avg_horiz_dspr2(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; src -= 3; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); /* prefetch data to cache memory */ prefetch_load(src); @@ -985,39 +964,32 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_avg_horiz_4_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_avg_horiz_4_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; case 8: - convolve_avg_horiz_8_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_avg_horiz_8_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; case 16: - convolve_avg_horiz_16_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h, 1); + convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x, + h, 1); break; case 32: - convolve_avg_horiz_16_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h, 2); + convolve_avg_horiz_16_dspr2(src, src_stride, dst, dst_stride, filter_x, + h, 2); break; case 64: prefetch_load(src + 64); prefetch_store(dst + 32); - convolve_avg_horiz_64_dspr2(src, src_stride, - dst, dst_stride, - filter_x, h); + convolve_avg_horiz_64_dspr2(src, src_stride, dst, dst_stride, filter_x, + h); break; default: - vpx_convolve8_avg_horiz_c(src + 3, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride, + filter_x, x_step_q4, filter_y, y_step_q4, w, + h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve8_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve8_dspr2.c index ddad186922..f6812c7d04 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve8_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve8_dspr2.c @@ -19,8 +19,7 @@ #if HAVE_DSPR2 static void convolve_horiz_4_transposed_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { @@ -45,7 +44,7 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src, prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -118,15 +117,14 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src, "sb %[p2], 0(%[dst_ptr]) \n\t" "addu %[dst_ptr], %[dst_ptr], %[dst_stride] \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4), - [dst_ptr] "+r" (dst_ptr) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [cm] "r" (cm), [src] "r" (src), [dst_stride] "r" (dst_stride) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4), [dst_ptr] "+r"(dst_ptr) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src), + [dst_stride] "r"(dst_stride)); /* Next row... */ src += src_stride; @@ -135,8 +133,7 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src, } static void convolve_horiz_8_transposed_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { @@ -164,7 +161,7 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src, dst_ptr = dst; odd_dst = (dst_ptr + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp2], 0(%[src]) \n\t" "ulw %[tp1], 4(%[src]) \n\t" @@ -293,16 +290,14 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src, "sb %[n1], 0(%[odd_dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), [tp3] "=&r" (tp3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst_ptr] "+r" (dst_ptr), [odd_dst] "+r" (odd_dst) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), [p1] "=&r"(p1), + [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), [n1] "=&r"(n1), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [dst_ptr] "+r"(dst_ptr), [odd_dst] "+r"(odd_dst) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [src] "r"(src), + [dst_pitch_2] "r"(dst_pitch_2)); /* Next row... */ src += src_stride; @@ -310,25 +305,21 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src, } } -static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, - int32_t count) { +static void convolve_horiz_16_transposed_dspr2( + const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, const int16_t *filter_x0, int32_t h, int32_t count) { int32_t c, y; const uint8_t *src; uint8_t *dst; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector_64 = 64; - int32_t filter12, filter34, filter56, filter78; - int32_t Temp1, Temp2, Temp3; + int32_t filter12, filter34, filter56, filter78; + int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2; uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; uint32_t dst_pitch_2 = (dst_stride << 1); - uint8_t *odd_dst; + uint8_t *odd_dst; filter12 = ((const int32_t *)filter_x0)[0]; filter34 = ((const int32_t *)filter_x0)[1]; @@ -346,248 +337,439 @@ static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr, odd_dst = (dst + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( - "ulw %[qload1], 0(%[src]) \n\t" - "ulw %[qload2], 4(%[src]) \n\t" + __asm__ __volatile__( + "ulw %[qload1], 0(%[src]) " + "\n\t" + "ulw %[qload2], 4(%[src]) " + "\n\t" /* even 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 1 */ - "mthi $zero, $ac1 \n\t" - "mtlo %[vector_64], $ac2 \n\t" /* even 2 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "ulw %[qload2], 8(%[src]) \n\t" - "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 1 */ + "mthi $zero, $ac1 " + "\n\t" + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 2 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "ulw %[qload2], 8(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter12] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p2], %[filter34] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p3], %[filter56] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p4], %[filter78] " + "\n\t" /* even 1 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 1 */ /* even 2. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 3 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "ulw %[qload1], 12(%[src]) \n\t" - "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 3 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "ulw %[qload1], 12(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[filter12] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p3], %[filter34] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p4], %[filter56] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p1], %[filter78] " + "\n\t" /* even 1 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 1 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 1 */ /* even 3. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 4 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 1 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 4 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 1 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + " \n\t" + "dpa.w.ph $ac3, %[p3], %[filter12] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p4], %[filter34] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p1], %[filter56] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p5], %[filter78] " + "\n\t" /* even 3 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 3 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 1 */ /* even 4. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 5 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 2 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 16(%[src]) \n\t" - "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 5 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 2 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 16(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p4], %[filter12] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p1], %[filter34] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p5], %[filter56] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p2], %[filter78] " + "\n\t" /* even 4 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 4 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 3 */ /* even 5. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 6 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p4], %[qload2] \n\t" - "sb %[st3], 0(%[dst]) \n\t" /* even 3 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 6 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p4], %[qload2] " + "\n\t" + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 3 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter12] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p5], %[filter34] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p2], %[filter56] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p3], %[filter78] " + "\n\t" /* even 5 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 5 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 4 */ /* even 6. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 7 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p1], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 4 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 20(%[src]) \n\t" - "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 7 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p1], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 4 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 20(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p5], %[filter12] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p2], %[filter34] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p3], %[filter56] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p4], %[filter78] " + "\n\t" /* even 6 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 6 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 5 */ /* even 7. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 8 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 5 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 8 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 5 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter12] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p3], %[filter34] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p4], %[filter56] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p1], %[filter78] " + "\n\t" /* even 7 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 7 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 6 */ /* even 8. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */ - "mthi $zero, $ac3 \n\t" - "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */ - "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */ - "sb %[st3], 0(%[dst]) \n\t" /* even 6 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */ - "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 1 */ + "mthi $zero, $ac3 " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter12] " + "\n\t" /* even 8 */ + "dpa.w.ph $ac2, %[p4], %[filter34] " + "\n\t" /* even 8 */ + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 6 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter56] " + "\n\t" /* even 8 */ + "dpa.w.ph $ac2, %[p5], %[filter78] " + "\n\t" /* even 8 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 8 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 7 */ /* ODD pixels */ - "ulw %[qload1], 1(%[src]) \n\t" - "ulw %[qload2], 5(%[src]) \n\t" + "ulw %[qload1], 1(%[src]) " + "\n\t" + "ulw %[qload2], 5(%[src]) " + "\n\t" /* odd 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 7 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 9(%[src]) \n\t" - "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 2 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 7 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 9(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[filter12] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p2], %[filter34] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p3], %[filter56] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p4], %[filter78] " + "\n\t" /* odd 1 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 1 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 8 */ /* odd 2. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 8 */ - "ulw %[qload1], 13(%[src]) \n\t" - "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 3 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 8 */ + "ulw %[qload1], 13(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter12] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p3], %[filter34] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p4], %[filter56] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p1], %[filter78] " + "\n\t" /* odd 2 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 2 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 1 */ /* odd 3. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 1 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 4 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 1 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter12] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p4], %[filter34] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p1], %[filter56] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p5], %[filter78] " + "\n\t" /* odd 3 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 3 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 2 */ /* odd 4. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 2 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 17(%[src]) \n\t" - "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 5 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 2 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 17(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p4], %[filter12] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p1], %[filter34] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p5], %[filter56] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p2], %[filter78] " + "\n\t" /* odd 4 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 4 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 3 */ /* odd 5. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p4], %[qload2] \n\t" - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 3 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 6 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p4], %[qload2] " + "\n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 3 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter12] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p5], %[filter34] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p2], %[filter56] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p3], %[filter78] " + "\n\t" /* odd 5 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 5 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 4 */ /* odd 6. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbl %[p1], %[qload2] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 4 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 21(%[src]) \n\t" - "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 7 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbl %[p1], %[qload2] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 4 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 21(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p5], %[filter12] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p2], %[filter34] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p3], %[filter56] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p4], %[filter78] " + "\n\t" /* odd 6 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 6 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 5 */ /* odd 7. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 5 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 8 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 5 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[filter12] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p3], %[filter34] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p4], %[filter56] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p1], %[filter78] " + "\n\t" /* odd 7 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 7 */ /* odd 8. pixel */ - "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p3], %[filter12] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p4], %[filter34] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p1], %[filter56] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p5], %[filter78] " + "\n\t" /* odd 8 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 8 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 6 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 7 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 8 */ - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 6 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 6 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 7 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 7 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 8 */ + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst] "+r" (dst), [odd_dst] "+r" (odd_dst) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5), + [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [dst] "+r"(dst), [odd_dst] "+r"(odd_dst) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src), + [dst_pitch_2] "r"(dst_pitch_2)); src += 16; dst = (dst_ptr + ((c + 1) * 16 * dst_stride)); @@ -601,24 +783,21 @@ static void convolve_horiz_16_transposed_dspr2(const uint8_t *src_ptr, } } -static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_horiz_64_transposed_dspr2( + const uint8_t *src_ptr, int32_t src_stride, uint8_t *dst_ptr, + int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t c, y; const uint8_t *src; uint8_t *dst; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector_64 = 64; - int32_t filter12, filter34, filter56, filter78; - int32_t Temp1, Temp2, Temp3; + int32_t filter12, filter34, filter56, filter78; + int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2; uint32_t p1, p2, p3, p4, p5; uint32_t st1, st2, st3; uint32_t dst_pitch_2 = (dst_stride << 1); - uint8_t *odd_dst; + uint8_t *odd_dst; filter12 = ((const int32_t *)filter_x0)[0]; filter34 = ((const int32_t *)filter_x0)[1]; @@ -637,248 +816,439 @@ static void convolve_horiz_64_transposed_dspr2(const uint8_t *src_ptr, odd_dst = (dst + dst_stride); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( - "ulw %[qload1], 0(%[src]) \n\t" - "ulw %[qload2], 4(%[src]) \n\t" + __asm__ __volatile__( + "ulw %[qload1], 0(%[src]) " + "\n\t" + "ulw %[qload2], 4(%[src]) " + "\n\t" /* even 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 1 */ - "mthi $zero, $ac1 \n\t" - "mtlo %[vector_64], $ac2 \n\t" /* even 2 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "ulw %[qload2], 8(%[src]) \n\t" - "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p2], %[filter34] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p3], %[filter56] \n\t" /* even 1 */ - "dpa.w.ph $ac1, %[p4], %[filter78] \n\t" /* even 1 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 1 */ + "mthi $zero, $ac1 " + "\n\t" + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 2 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "ulw %[qload2], 8(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter12] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p2], %[filter34] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p3], %[filter56] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac1, %[p4], %[filter78] " + "\n\t" /* even 1 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 1 */ /* even 2. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 3 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "ulw %[qload1], 12(%[src]) \n\t" - "dpa.w.ph $ac2, %[p2], %[filter12] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p3], %[filter34] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p4], %[filter56] \n\t" /* even 1 */ - "dpa.w.ph $ac2, %[p1], %[filter78] \n\t" /* even 1 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 1 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 1 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 3 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "ulw %[qload1], 12(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p2], %[filter12] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p3], %[filter34] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p4], %[filter56] " + "\n\t" /* even 1 */ + "dpa.w.ph $ac2, %[p1], %[filter78] " + "\n\t" /* even 1 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 1 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 1 */ /* even 3. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 4 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 1 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p3], %[filter12] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p4], %[filter34] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p1], %[filter56] \n\t" /* even 3 */ - "dpa.w.ph $ac3, %[p5], %[filter78] \n\t" /* even 3 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 3 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 1 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 4 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 1 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + " \n\t" + "dpa.w.ph $ac3, %[p3], %[filter12] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p4], %[filter34] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p1], %[filter56] " + "\n\t" /* even 3 */ + "dpa.w.ph $ac3, %[p5], %[filter78] " + "\n\t" /* even 3 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 3 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 1 */ /* even 4. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 5 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 2 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 16(%[src]) \n\t" - "dpa.w.ph $ac1, %[p4], %[filter12] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p1], %[filter34] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p5], %[filter56] \n\t" /* even 4 */ - "dpa.w.ph $ac1, %[p2], %[filter78] \n\t" /* even 4 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 4 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 3 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 5 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 2 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 16(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p4], %[filter12] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p1], %[filter34] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p5], %[filter56] " + "\n\t" /* even 4 */ + "dpa.w.ph $ac1, %[p2], %[filter78] " + "\n\t" /* even 4 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 4 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 3 */ /* even 5. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* even 6 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p4], %[qload2] \n\t" - "sb %[st3], 0(%[dst]) \n\t" /* even 3 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter12] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p5], %[filter34] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p2], %[filter56] \n\t" /* even 5 */ - "dpa.w.ph $ac2, %[p3], %[filter78] \n\t" /* even 5 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 5 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 4 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* even 6 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p4], %[qload2] " + "\n\t" + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 3 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter12] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p5], %[filter34] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p2], %[filter56] " + "\n\t" /* even 5 */ + "dpa.w.ph $ac2, %[p3], %[filter78] " + "\n\t" /* even 5 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 5 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 4 */ /* even 6. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* even 7 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p1], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 4 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 20(%[src]) \n\t" - "dpa.w.ph $ac3, %[p5], %[filter12] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* even 6 */ - "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* even 6 */ - "extp %[Temp3], $ac3, 31 \n\t" /* even 6 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 5 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* even 7 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p1], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 4 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 20(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p5], %[filter12] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p2], %[filter34] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p3], %[filter56] " + "\n\t" /* even 6 */ + "dpa.w.ph $ac3, %[p4], %[filter78] " + "\n\t" /* even 6 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* even 6 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 5 */ /* even 7. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* even 8 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 5 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* even 7 */ - "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* even 7 */ - "extp %[Temp1], $ac1, 31 \n\t" /* even 7 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* even 6 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* even 8 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 5 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter12] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p3], %[filter34] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p4], %[filter56] " + "\n\t" /* even 7 */ + "dpa.w.ph $ac1, %[p1], %[filter78] " + "\n\t" /* even 7 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* even 7 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* even 6 */ /* even 8. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 1 */ - "mthi $zero, $ac3 \n\t" - "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* even 8 */ - "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* even 8 */ - "sb %[st3], 0(%[dst]) \n\t" /* even 6 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* even 8 */ - "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* even 8 */ - "extp %[Temp2], $ac2, 31 \n\t" /* even 8 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* even 7 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 1 */ + "mthi $zero, $ac3 " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter12] " + "\n\t" /* even 8 */ + "dpa.w.ph $ac2, %[p4], %[filter34] " + "\n\t" /* even 8 */ + "sb %[st3], 0(%[dst]) " + "\n\t" /* even 6 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p1], %[filter56] " + "\n\t" /* even 8 */ + "dpa.w.ph $ac2, %[p5], %[filter78] " + "\n\t" /* even 8 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* even 8 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* even 7 */ /* ODD pixels */ - "ulw %[qload1], 1(%[src]) \n\t" - "ulw %[qload2], 5(%[src]) \n\t" + "ulw %[qload1], 1(%[src]) " + "\n\t" + "ulw %[qload2], 5(%[src]) " + "\n\t" /* odd 1. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 2 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p1], %[qload1] \n\t" - "preceu.ph.qbl %[p2], %[qload1] \n\t" - "preceu.ph.qbr %[p3], %[qload2] \n\t" - "preceu.ph.qbl %[p4], %[qload2] \n\t" - "sb %[st1], 0(%[dst]) \n\t" /* even 7 */ - "addu %[dst], %[dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 9(%[src]) \n\t" - "dpa.w.ph $ac3, %[p1], %[filter12] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p2], %[filter34] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p3], %[filter56] \n\t" /* odd 1 */ - "dpa.w.ph $ac3, %[p4], %[filter78] \n\t" /* odd 1 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 1 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* even 8 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 2 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload1] " + "\n\t" + "preceu.ph.qbl %[p2], %[qload1] " + "\n\t" + "preceu.ph.qbr %[p3], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p4], %[qload2] " + "\n\t" + "sb %[st1], 0(%[dst]) " + "\n\t" /* even 7 */ + "addu %[dst], %[dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 9(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p1], %[filter12] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p2], %[filter34] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p3], %[filter56] " + "\n\t" /* odd 1 */ + "dpa.w.ph $ac3, %[p4], %[filter78] " + "\n\t" /* odd 1 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 1 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* even 8 */ /* odd 2. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 3 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p1], %[qload2] \n\t" - "preceu.ph.qbl %[p5], %[qload2] \n\t" - "sb %[st2], 0(%[dst]) \n\t" /* even 8 */ - "ulw %[qload1], 13(%[src]) \n\t" - "dpa.w.ph $ac1, %[p2], %[filter12] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p3], %[filter34] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p4], %[filter56] \n\t" /* odd 2 */ - "dpa.w.ph $ac1, %[p1], %[filter78] \n\t" /* odd 2 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 2 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 1 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 3 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p1], %[qload2] " + "\n\t" + "preceu.ph.qbl %[p5], %[qload2] " + "\n\t" + "sb %[st2], 0(%[dst]) " + "\n\t" /* even 8 */ + "ulw %[qload1], 13(%[src]) " + "\n\t" + "dpa.w.ph $ac1, %[p2], %[filter12] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p3], %[filter34] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p4], %[filter56] " + "\n\t" /* odd 2 */ + "dpa.w.ph $ac1, %[p1], %[filter78] " + "\n\t" /* odd 2 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 2 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 1 */ /* odd 3. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 4 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbr %[p2], %[qload1] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 1 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac2, %[p3], %[filter12] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p4], %[filter34] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p1], %[filter56] \n\t" /* odd 3 */ - "dpa.w.ph $ac2, %[p5], %[filter78] \n\t" /* odd 3 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 3 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 2 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 4 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbr %[p2], %[qload1] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 1 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac2, %[p3], %[filter12] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p4], %[filter34] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p1], %[filter56] " + "\n\t" /* odd 3 */ + "dpa.w.ph $ac2, %[p5], %[filter78] " + "\n\t" /* odd 3 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 3 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 2 */ /* odd 4. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 5 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbl %[p3], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 2 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload2], 17(%[src]) \n\t" - "dpa.w.ph $ac3, %[p4], %[filter12] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p1], %[filter34] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p5], %[filter56] \n\t" /* odd 4 */ - "dpa.w.ph $ac3, %[p2], %[filter78] \n\t" /* odd 4 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 4 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 3 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 5 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbl %[p3], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 2 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload2], 17(%[src]) " + "\n\t" + "dpa.w.ph $ac3, %[p4], %[filter12] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p1], %[filter34] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p5], %[filter56] " + "\n\t" /* odd 4 */ + "dpa.w.ph $ac3, %[p2], %[filter78] " + "\n\t" /* odd 4 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 4 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 3 */ /* odd 5. pixel */ - "mtlo %[vector_64], $ac2 \n\t" /* odd 6 */ - "mthi $zero, $ac2 \n\t" - "preceu.ph.qbr %[p4], %[qload2] \n\t" - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 3 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac1, %[p1], %[filter12] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p5], %[filter34] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p2], %[filter56] \n\t" /* odd 5 */ - "dpa.w.ph $ac1, %[p3], %[filter78] \n\t" /* odd 5 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 5 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 4 */ + "mtlo %[vector_64], $ac2 " + "\n\t" /* odd 6 */ + "mthi $zero, $ac2 " + "\n\t" + "preceu.ph.qbr %[p4], %[qload2] " + "\n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 3 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac1, %[p1], %[filter12] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p5], %[filter34] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p2], %[filter56] " + "\n\t" /* odd 5 */ + "dpa.w.ph $ac1, %[p3], %[filter78] " + "\n\t" /* odd 5 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 5 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 4 */ /* odd 6. pixel */ - "mtlo %[vector_64], $ac3 \n\t" /* odd 7 */ - "mthi $zero, $ac3 \n\t" - "preceu.ph.qbl %[p1], %[qload2] \n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 4 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "ulw %[qload1], 21(%[src]) \n\t" - "dpa.w.ph $ac2, %[p5], %[filter12] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p2], %[filter34] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p3], %[filter56] \n\t" /* odd 6 */ - "dpa.w.ph $ac2, %[p4], %[filter78] \n\t" /* odd 6 */ - "extp %[Temp2], $ac2, 31 \n\t" /* odd 6 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 5 */ + "mtlo %[vector_64], $ac3 " + "\n\t" /* odd 7 */ + "mthi $zero, $ac3 " + "\n\t" + "preceu.ph.qbl %[p1], %[qload2] " + "\n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 4 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "ulw %[qload1], 21(%[src]) " + "\n\t" + "dpa.w.ph $ac2, %[p5], %[filter12] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p2], %[filter34] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p3], %[filter56] " + "\n\t" /* odd 6 */ + "dpa.w.ph $ac2, %[p4], %[filter78] " + "\n\t" /* odd 6 */ + "extp %[Temp2], $ac2, 31 " + "\n\t" /* odd 6 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 5 */ /* odd 7. pixel */ - "mtlo %[vector_64], $ac1 \n\t" /* odd 8 */ - "mthi $zero, $ac1 \n\t" - "preceu.ph.qbr %[p5], %[qload1] \n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 5 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" - "dpa.w.ph $ac3, %[p2], %[filter12] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p3], %[filter34] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p4], %[filter56] \n\t" /* odd 7 */ - "dpa.w.ph $ac3, %[p1], %[filter78] \n\t" /* odd 7 */ - "extp %[Temp3], $ac3, 31 \n\t" /* odd 7 */ + "mtlo %[vector_64], $ac1 " + "\n\t" /* odd 8 */ + "mthi $zero, $ac1 " + "\n\t" + "preceu.ph.qbr %[p5], %[qload1] " + "\n\t" + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 5 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" + "dpa.w.ph $ac3, %[p2], %[filter12] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p3], %[filter34] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p4], %[filter56] " + "\n\t" /* odd 7 */ + "dpa.w.ph $ac3, %[p1], %[filter78] " + "\n\t" /* odd 7 */ + "extp %[Temp3], $ac3, 31 " + "\n\t" /* odd 7 */ /* odd 8. pixel */ - "dpa.w.ph $ac1, %[p3], %[filter12] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p4], %[filter34] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p1], %[filter56] \n\t" /* odd 8 */ - "dpa.w.ph $ac1, %[p5], %[filter78] \n\t" /* odd 8 */ - "extp %[Temp1], $ac1, 31 \n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p3], %[filter12] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p4], %[filter34] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p1], %[filter56] " + "\n\t" /* odd 8 */ + "dpa.w.ph $ac1, %[p5], %[filter78] " + "\n\t" /* odd 8 */ + "extp %[Temp1], $ac1, 31 " + "\n\t" /* odd 8 */ - "lbux %[st2], %[Temp2](%[cm]) \n\t" /* odd 6 */ - "lbux %[st3], %[Temp3](%[cm]) \n\t" /* odd 7 */ - "lbux %[st1], %[Temp1](%[cm]) \n\t" /* odd 8 */ + "lbux %[st2], %[Temp2](%[cm]) " + "\n\t" /* odd 6 */ + "lbux %[st3], %[Temp3](%[cm]) " + "\n\t" /* odd 7 */ + "lbux %[st1], %[Temp1](%[cm]) " + "\n\t" /* odd 8 */ - "sb %[st2], 0(%[odd_dst]) \n\t" /* odd 6 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st2], 0(%[odd_dst]) " + "\n\t" /* odd 6 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st3], 0(%[odd_dst]) \n\t" /* odd 7 */ - "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] \n\t" + "sb %[st3], 0(%[odd_dst]) " + "\n\t" /* odd 7 */ + "addu %[odd_dst], %[odd_dst], %[dst_pitch_2] " + "\n\t" - "sb %[st1], 0(%[odd_dst]) \n\t" /* odd 8 */ + "sb %[st1], 0(%[odd_dst]) " + "\n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [p5] "=&r" (p5), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dst] "+r" (dst), [odd_dst] "+r" (odd_dst) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), [cm] "r" (cm), - [src] "r" (src), [dst_pitch_2] "r" (dst_pitch_2) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), [p5] "=&r"(p5), + [st1] "=&r"(st1), [st2] "=&r"(st2), [st3] "=&r"(st3), + [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4), + [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3), + [dst] "+r"(dst), [odd_dst] "+r"(odd_dst) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [src] "r"(src), + [dst_pitch_2] "r"(dst_pitch_2)); src += 16; dst = (dst_ptr + ((c + 1) * 16 * dst_stride)); @@ -901,8 +1271,7 @@ void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, for (x = 0; x < w; ++x) { int sum = 0; - for (k = 0; k < 8; ++k) - sum += src[x + k] * filter[k]; + for (k = 0; k < 8; ++k) sum += src[x + k] * filter[k]; dst[x * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); } @@ -913,8 +1282,7 @@ void convolve_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, } void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - int w, int h) { + uint8_t *dst, ptrdiff_t dst_stride, int w, int h) { int x, y; for (y = 0; y < h; ++y) { @@ -927,10 +1295,9 @@ void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, } } -void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { DECLARE_ALIGNED(32, uint8_t, temp[64 * 135]); int32_t intermediate_height = ((h * y_step_q4) >> 4) + 7; @@ -941,27 +1308,20 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_x)[1] != 0x800000); assert(((const int32_t *)filter_y)[1] != 0x800000); - /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); - if (intermediate_height < h) - intermediate_height = h; + if (intermediate_height < h) intermediate_height = h; /* copy the src to dst */ if (filter_x[3] == 0x80) { - copy_horiz_transposed(src - src_stride * 3, src_stride, - temp, intermediate_height, - w, intermediate_height); + copy_horiz_transposed(src - src_stride * 3, src_stride, temp, + intermediate_height, w, intermediate_height); } else if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_dspr2(src - src_stride * 3, src_stride, - temp, intermediate_height, - filter_x, - w, intermediate_height); + vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp, + intermediate_height, filter_x, w, intermediate_height); } else { src -= (src_stride * 3 + 3); @@ -971,31 +1331,29 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_horiz_4_transposed_dspr2(src, src_stride, - temp, intermediate_height, - filter_x, intermediate_height); + convolve_horiz_4_transposed_dspr2(src, src_stride, temp, + intermediate_height, filter_x, + intermediate_height); break; case 8: - convolve_horiz_8_transposed_dspr2(src, src_stride, - temp, intermediate_height, - filter_x, intermediate_height); + convolve_horiz_8_transposed_dspr2(src, src_stride, temp, + intermediate_height, filter_x, + intermediate_height); break; case 16: case 32: - convolve_horiz_16_transposed_dspr2(src, src_stride, - temp, intermediate_height, - filter_x, intermediate_height, - (w/16)); + convolve_horiz_16_transposed_dspr2(src, src_stride, temp, + intermediate_height, filter_x, + intermediate_height, (w / 16)); break; case 64: prefetch_load(src + 32); - convolve_horiz_64_transposed_dspr2(src, src_stride, - temp, intermediate_height, - filter_x, intermediate_height); + convolve_horiz_64_transposed_dspr2(src, src_stride, temp, + intermediate_height, filter_x, + intermediate_height); break; default: - convolve_horiz_transposed(src, src_stride, - temp, intermediate_height, + convolve_horiz_transposed(src, src_stride, temp, intermediate_height, filter_x, w, intermediate_height); break; } @@ -1003,40 +1361,31 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, /* copy the src to dst */ if (filter_y[3] == 0x80) { - copy_horiz_transposed(temp + 3, intermediate_height, - dst, dst_stride, - h, w); + copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w); } else if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_dspr2(temp + 3, intermediate_height, - dst, dst_stride, - filter_y, - h, w); + vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride, + filter_y, h, w); } else { switch (h) { case 4: - convolve_horiz_4_transposed_dspr2(temp, intermediate_height, - dst, dst_stride, - filter_y, w); + convolve_horiz_4_transposed_dspr2(temp, intermediate_height, dst, + dst_stride, filter_y, w); break; case 8: - convolve_horiz_8_transposed_dspr2(temp, intermediate_height, - dst, dst_stride, - filter_y, w); + convolve_horiz_8_transposed_dspr2(temp, intermediate_height, dst, + dst_stride, filter_y, w); break; case 16: case 32: - convolve_horiz_16_transposed_dspr2(temp, intermediate_height, - dst, dst_stride, - filter_y, w, (h/16)); + convolve_horiz_16_transposed_dspr2(temp, intermediate_height, dst, + dst_stride, filter_y, w, (h / 16)); break; case 64: - convolve_horiz_64_transposed_dspr2(temp, intermediate_height, - dst, dst_stride, - filter_y, w); + convolve_horiz_64_transposed_dspr2(temp, intermediate_height, dst, + dst_stride, filter_y, w); break; default: - convolve_horiz_transposed(temp, intermediate_height, - dst, dst_stride, + convolve_horiz_transposed(temp, intermediate_height, dst, dst_stride, filter_y, h, w); break; } @@ -1056,97 +1405,90 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, prefetch_store(dst); switch (w) { - case 4: - { + case 4: { uint32_t tp1; /* 1 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], (%[src]) \n\t" - "sw %[tp1], (%[dst]) \n\t" /* store */ + "sw %[tp1], (%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; } - } break; - case 8: - { + } + case 8: { uint32_t tp1, tp2; /* 2 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" - "sw %[tp1], 0(%[dst]) \n\t" /* store */ - "sw %[tp2], 4(%[dst]) \n\t" /* store */ + "sw %[tp1], 0(%[dst]) \n\t" /* store */ + "sw %[tp2], 4(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; } - } break; - case 16: - { + } + case 16: { uint32_t tp1, tp2, tp3, tp4; /* 4 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" "ulw %[tp3], 8(%[src]) \n\t" "ulw %[tp4], 12(%[src]) \n\t" - "sw %[tp1], 0(%[dst]) \n\t" /* store */ - "sw %[tp2], 4(%[dst]) \n\t" /* store */ - "sw %[tp3], 8(%[dst]) \n\t" /* store */ - "sw %[tp4], 12(%[dst]) \n\t" /* store */ + "sw %[tp1], 0(%[dst]) \n\t" /* store */ + "sw %[tp2], 4(%[dst]) \n\t" /* store */ + "sw %[tp3], 8(%[dst]) \n\t" /* store */ + "sw %[tp4], 12(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; } - } break; - case 32: - { + } + case 32: { uint32_t tp1, tp2, tp3, tp4; uint32_t tp5, tp6, tp7, tp8; /* 8 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" "ulw %[tp3], 8(%[src]) \n\t" @@ -1156,29 +1498,26 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, "ulw %[tp7], 24(%[src]) \n\t" "ulw %[tp8], 28(%[src]) \n\t" - "sw %[tp1], 0(%[dst]) \n\t" /* store */ - "sw %[tp2], 4(%[dst]) \n\t" /* store */ - "sw %[tp3], 8(%[dst]) \n\t" /* store */ - "sw %[tp4], 12(%[dst]) \n\t" /* store */ - "sw %[tp5], 16(%[dst]) \n\t" /* store */ - "sw %[tp6], 20(%[dst]) \n\t" /* store */ - "sw %[tp7], 24(%[dst]) \n\t" /* store */ - "sw %[tp8], 28(%[dst]) \n\t" /* store */ + "sw %[tp1], 0(%[dst]) \n\t" /* store */ + "sw %[tp2], 4(%[dst]) \n\t" /* store */ + "sw %[tp3], 8(%[dst]) \n\t" /* store */ + "sw %[tp4], 12(%[dst]) \n\t" /* store */ + "sw %[tp5], 16(%[dst]) \n\t" /* store */ + "sw %[tp6], 20(%[dst]) \n\t" /* store */ + "sw %[tp7], 24(%[dst]) \n\t" /* store */ + "sw %[tp8], 28(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tp5] "=&r" (tp5), [tp6] "=&r" (tp6), - [tp7] "=&r" (tp7), [tp8] "=&r" (tp8) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6), + [tp7] "=&r"(tp7), [tp8] "=&r"(tp8) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; } - } break; - case 64: - { + } + case 64: { uint32_t tp1, tp2, tp3, tp4; uint32_t tp5, tp6, tp7, tp8; @@ -1186,14 +1525,14 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, prefetch_store(dst + 32); /* 16 word storage */ - for (y = h; y--; ) { + for (y = h; y--;) { prefetch_load(src + src_stride); prefetch_load(src + src_stride + 32); prefetch_load(src + src_stride + 64); prefetch_store(dst + dst_stride); prefetch_store(dst + dst_stride + 32); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" "ulw %[tp3], 8(%[src]) \n\t" @@ -1203,14 +1542,14 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, "ulw %[tp7], 24(%[src]) \n\t" "ulw %[tp8], 28(%[src]) \n\t" - "sw %[tp1], 0(%[dst]) \n\t" /* store */ - "sw %[tp2], 4(%[dst]) \n\t" /* store */ - "sw %[tp3], 8(%[dst]) \n\t" /* store */ - "sw %[tp4], 12(%[dst]) \n\t" /* store */ - "sw %[tp5], 16(%[dst]) \n\t" /* store */ - "sw %[tp6], 20(%[dst]) \n\t" /* store */ - "sw %[tp7], 24(%[dst]) \n\t" /* store */ - "sw %[tp8], 28(%[dst]) \n\t" /* store */ + "sw %[tp1], 0(%[dst]) \n\t" /* store */ + "sw %[tp2], 4(%[dst]) \n\t" /* store */ + "sw %[tp3], 8(%[dst]) \n\t" /* store */ + "sw %[tp4], 12(%[dst]) \n\t" /* store */ + "sw %[tp5], 16(%[dst]) \n\t" /* store */ + "sw %[tp6], 20(%[dst]) \n\t" /* store */ + "sw %[tp7], 24(%[dst]) \n\t" /* store */ + "sw %[tp8], 28(%[dst]) \n\t" /* store */ "ulw %[tp1], 32(%[src]) \n\t" "ulw %[tp2], 36(%[src]) \n\t" @@ -1221,29 +1560,27 @@ void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, "ulw %[tp7], 56(%[src]) \n\t" "ulw %[tp8], 60(%[src]) \n\t" - "sw %[tp1], 32(%[dst]) \n\t" /* store */ - "sw %[tp2], 36(%[dst]) \n\t" /* store */ - "sw %[tp3], 40(%[dst]) \n\t" /* store */ - "sw %[tp4], 44(%[dst]) \n\t" /* store */ - "sw %[tp5], 48(%[dst]) \n\t" /* store */ - "sw %[tp6], 52(%[dst]) \n\t" /* store */ - "sw %[tp7], 56(%[dst]) \n\t" /* store */ - "sw %[tp8], 60(%[dst]) \n\t" /* store */ + "sw %[tp1], 32(%[dst]) \n\t" /* store */ + "sw %[tp2], 36(%[dst]) \n\t" /* store */ + "sw %[tp3], 40(%[dst]) \n\t" /* store */ + "sw %[tp4], 44(%[dst]) \n\t" /* store */ + "sw %[tp5], 48(%[dst]) \n\t" /* store */ + "sw %[tp6], 52(%[dst]) \n\t" /* store */ + "sw %[tp7], 56(%[dst]) \n\t" /* store */ + "sw %[tp8], 60(%[dst]) \n\t" /* store */ - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tp3] "=&r" (tp3), [tp4] "=&r" (tp4), - [tp5] "=&r" (tp5), [tp6] "=&r" (tp6), - [tp7] "=&r" (tp7), [tp8] "=&r" (tp8) - : [src] "r" (src), [dst] "r" (dst) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tp3] "=&r"(tp3), + [tp4] "=&r"(tp4), [tp5] "=&r"(tp5), [tp6] "=&r"(tp6), + [tp7] "=&r"(tp7), [tp8] "=&r"(tp8) + : [src] "r"(src), [dst] "r"(dst)); src += src_stride; dst += dst_stride; } - } break; + } default: - for (y = h; y--; ) { + for (y = h; y--;) { for (x = 0; x < w; ++x) { dst[x] = src[x]; } diff --git a/libs/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c index ae78bab892..196a0a2f0b 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve8_horiz_dspr2.c @@ -18,12 +18,9 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_horiz_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; int32_t vector1b, vector2b, vector3b, vector4b; @@ -45,7 +42,7 @@ static void convolve_horiz_4_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -111,17 +108,15 @@ static void convolve_horiz_4_dspr2(const uint8_t *src, "sb %[tp2], 2(%[dst]) \n\t" "sb %[n2], 3(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), [n2] "=&r" (n2), [n3] "=&r" (n3), [n4] "=&r" (n4), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [Temp3] "=&r" (Temp3), [Temp4] "=&r" (Temp4) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [n1] "=&r"(n1), [n2] "=&r"(n2), [n3] "=&r"(n3), + [n4] "=&r"(n4), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); /* Next row... */ src += src_stride; @@ -129,12 +124,9 @@ static void convolve_horiz_4_dspr2(const uint8_t *src, } } -static void convolve_horiz_8_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y; uint8_t *cm = vpx_ff_cropTbl; uint32_t vector4a = 64; @@ -156,7 +148,7 @@ static void convolve_horiz_8_dspr2(const uint8_t *src, prefetch_load(src + src_stride + 32); prefetch_store(dst + dst_stride); - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[tp1], 0(%[src]) \n\t" "ulw %[tp2], 4(%[src]) \n\t" @@ -275,17 +267,15 @@ static void convolve_horiz_8_dspr2(const uint8_t *src, "sb %[p2], 5(%[dst]) \n\t" "sb %[n1], 7(%[dst]) \n\t" - : [tp1] "=&r" (tp1), [tp2] "=&r" (tp2), - [tn1] "=&r" (tn1), [tn2] "=&r" (tn2), [tn3] "=&r" (tn3), - [st0] "=&r" (st0), [st1] "=&r" (st1), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [n1] "=&r" (n1), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), - [cm] "r" (cm), [dst] "r" (dst), [src] "r" (src) - ); + : [tp1] "=&r"(tp1), [tp2] "=&r"(tp2), [tn1] "=&r"(tn1), + [tn2] "=&r"(tn2), [tn3] "=&r"(tn3), [st0] "=&r"(st0), + [st1] "=&r"(st1), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [n1] "=&r"(n1), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); /* Next row... */ src += src_stride; @@ -293,12 +283,9 @@ static void convolve_horiz_8_dspr2(const uint8_t *src, } } -static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h, +static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, int32_t src_stride, + uint8_t *dst_ptr, int32_t dst_stride, + const int16_t *filter_x0, int32_t h, int32_t count) { int32_t y, c; const uint8_t *src; @@ -326,7 +313,7 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride); for (c = 0; c < count; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -542,17 +529,15 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */ "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), - [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), + [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2), + [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); src += 16; dst += 16; @@ -564,12 +549,9 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, } } -static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, - int32_t src_stride, - uint8_t *dst_ptr, - int32_t dst_stride, - const int16_t *filter_x0, - int32_t h) { +static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride, + uint8_t *dst_ptr, int32_t dst_stride, + const int16_t *filter_x0, int32_t h) { int32_t y, c; const uint8_t *src; uint8_t *dst; @@ -598,7 +580,7 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, prefetch_store(dst_ptr + dst_stride + 32); for (c = 0; c < 4; c++) { - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[qload1], 0(%[src]) \n\t" "ulw %[qload2], 4(%[src]) \n\t" @@ -814,17 +796,15 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, "sb %[st3], 13(%[dst]) \n\t" /* odd 7 */ "sb %[st1], 15(%[dst]) \n\t" /* odd 8 */ - : [qload1] "=&r" (qload1), [qload2] "=&r" (qload2), [qload3] "=&r" (qload3), - [st1] "=&r" (st1), [st2] "=&r" (st2), [st3] "=&r" (st3), - [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4), - [p5] "=&r" (p5), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3) - : [filter12] "r" (filter12), [filter34] "r" (filter34), - [filter56] "r" (filter56), [filter78] "r" (filter78), - [vector_64] "r" (vector_64), - [cm] "r" (cm), [dst] "r" (dst), - [src] "r" (src) - ); + : [qload1] "=&r"(qload1), [qload2] "=&r"(qload2), + [qload3] "=&r"(qload3), [st1] "=&r"(st1), [st2] "=&r"(st2), + [st3] "=&r"(st3), [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), + [p4] "=&r"(p4), [p5] "=&r"(p5), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [Temp3] "=&r"(Temp3) + : [filter12] "r"(filter12), [filter34] "r"(filter34), + [filter56] "r"(filter56), [filter78] "r"(filter78), + [vector_64] "r"(vector_64), [cm] "r"(cm), [dst] "r"(dst), + [src] "r"(src)); src += 16; dst += 16; @@ -839,17 +819,14 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { assert(x_step_q4 == 16); assert(((const int32_t *)filter_x)[1] != 0x800000); if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_horiz_dspr2(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; @@ -857,11 +834,9 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, src -= 3; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); /* prefetch data to cache memory */ prefetch_load(src); @@ -870,39 +845,31 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, switch (w) { case 4: - convolve_horiz_4_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_horiz_4_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; case 8: - convolve_horiz_8_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_horiz_8_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; case 16: - convolve_horiz_16_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h, 1); + convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h, 1); break; case 32: - convolve_horiz_16_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h, 2); + convolve_horiz_16_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h, 2); break; case 64: prefetch_load(src + 64); prefetch_store(dst + 32); - convolve_horiz_64_dspr2(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filter_x, (int32_t)h); + convolve_horiz_64_dspr2(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filter_x, (int32_t)h); break; default: - vpx_convolve8_horiz_c(src + 3, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c b/libs/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c index d553828c59..ad107d5c47 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/convolve8_vert_dspr2.c @@ -18,12 +18,9 @@ #include "vpx_ports/mem.h" #if HAVE_DSPR2 -static void convolve_vert_4_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t w, +static void convolve_vert_4_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t w, int32_t h) { int32_t x, y; const uint8_t *src_ptr; @@ -53,7 +50,7 @@ static void convolve_vert_4_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -152,19 +149,16 @@ static void convolve_vert_4_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [n1] "=&r" (n1), [n2] "=&r" (n2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), [src_stride] "r" (src_stride), - [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), + [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [src_stride] "r"(src_stride), + [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -173,12 +167,9 @@ static void convolve_vert_4_dspr2(const uint8_t *src, } } -static void convolve_vert_64_dspr2(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - const int16_t *filter_y, - int32_t h) { +static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride, + uint8_t *dst, int32_t dst_stride, + const int16_t *filter_y, int32_t h) { int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; @@ -208,7 +199,7 @@ static void convolve_vert_64_dspr2(const uint8_t *src, src_ptr = src + x; dst_ptr = dst + x; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[load1], 0(%[src_ptr]) \n\t" "add %[src_ptr], %[src_ptr], %[src_stride] \n\t" "ulw %[load2], 0(%[src_ptr]) \n\t" @@ -307,19 +298,16 @@ static void convolve_vert_64_dspr2(const uint8_t *src, "sb %[store1], 2(%[dst_ptr]) \n\t" "sb %[store2], 3(%[dst_ptr]) \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [p1] "=&r" (p1), [p2] "=&r" (p2), - [n1] "=&r" (n1), [n2] "=&r" (n2), - [scratch1] "=&r" (scratch1), [scratch2] "=&r" (scratch2), - [Temp1] "=&r" (Temp1), [Temp2] "=&r" (Temp2), - [store1] "=&r" (store1), [store2] "=&r" (store2), - [src_ptr] "+r" (src_ptr) - : [vector1b] "r" (vector1b), [vector2b] "r" (vector2b), - [vector3b] "r" (vector3b), [vector4b] "r" (vector4b), - [vector4a] "r" (vector4a), [src_stride] "r" (src_stride), - [cm] "r" (cm), [dst_ptr] "r" (dst_ptr) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), + [n1] "=&r"(n1), [n2] "=&r"(n2), [scratch1] "=&r"(scratch1), + [scratch2] "=&r"(scratch2), [Temp1] "=&r"(Temp1), + [Temp2] "=&r"(Temp2), [store1] "=&r"(store1), + [store2] "=&r"(store2), [src_ptr] "+r"(src_ptr) + : [vector1b] "r"(vector1b), [vector2b] "r"(vector2b), + [vector3b] "r"(vector3b), [vector4b] "r"(vector4b), + [vector4a] "r"(vector4a), [src_stride] "r"(src_stride), + [cm] "r"(cm), [dst_ptr] "r"(dst_ptr)); } /* Next row... */ @@ -331,50 +319,38 @@ static void convolve_vert_64_dspr2(const uint8_t *src, void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { assert(y_step_q4 == 16); assert(((const int32_t *)filter_y)[1] != 0x800000); if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_vert_dspr2(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); prefetch_store(dst); switch (w) { - case 4 : - case 8 : - case 16 : - case 32 : - convolve_vert_4_dspr2(src, src_stride, - dst, dst_stride, - filter_y, w, h); + case 4: + case 8: + case 16: + case 32: + convolve_vert_4_dspr2(src, src_stride, dst, dst_stride, filter_y, w, h); break; - case 64 : + case 64: prefetch_store(dst + 32); - convolve_vert_64_dspr2(src, src_stride, - dst, dst_stride, - filter_y, h); + convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h); break; default: - vpx_convolve8_vert_c(src, src_stride, - dst, dst_stride, - filter_x, x_step_q4, - filter_y, y_step_q4, - w, h); + vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/convolve_common_dspr2.h b/libs/libvpx/vpx_dsp/mips/convolve_common_dspr2.h index 66d77a2854..4eee3bd5e1 100644 --- a/libs/libvpx/vpx_dsp/mips/convolve_common_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/convolve_common_dspr2.h @@ -25,8 +25,8 @@ extern "C" { void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h); + const int16_t *filter_y, int y_step_q4, int w, + int h); void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, @@ -37,19 +37,18 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h); + const int16_t *filter_y, int y_step_q4, int w, + int h); -void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter, - int w, int h); +void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter, int w, + int h); void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h); + const int16_t *filter_y, int y_step_q4, int w, + int h); #endif // #if HAVE_DSPR2 #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/mips/deblock_msa.c b/libs/libvpx/vpx_dsp/mips/deblock_msa.c new file mode 100644 index 0000000000..402d7ed997 --- /dev/null +++ b/libs/libvpx/vpx_dsp/mips/deblock_msa.c @@ -0,0 +1,681 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include "./macros_msa.h" + +extern const int16_t vpx_rv[]; + +#define VPX_TRANSPOSE8x16_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, out0, \ + out1, out2, out3, out4, out5, out6, out7, \ + out8, out9, out10, out11, out12, out13, out14, \ + out15) \ + { \ + v8i16 temp0, temp1, temp2, temp3, temp4; \ + v8i16 temp5, temp6, temp7, temp8, temp9; \ + \ + ILVR_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \ + temp3); \ + ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ + ILVRL_W2_SH(temp5, temp4, temp6, temp7); \ + ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ + ILVRL_W2_SH(temp5, temp4, temp8, temp9); \ + ILVL_B4_SH(in1, in0, in3, in2, in5, in4, in7, in6, temp0, temp1, temp2, \ + temp3); \ + ILVR_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ + ILVRL_W2_UB(temp5, temp4, out8, out10); \ + ILVL_H2_SH(temp1, temp0, temp3, temp2, temp4, temp5); \ + ILVRL_W2_UB(temp5, temp4, out12, out14); \ + out0 = (v16u8)temp6; \ + out2 = (v16u8)temp7; \ + out4 = (v16u8)temp8; \ + out6 = (v16u8)temp9; \ + out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \ + out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \ + out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \ + out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \ + out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ + out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ + out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \ + out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \ + } + +#define VPX_AVER_IF_RETAIN(above2_in, above1_in, src_in, below1_in, below2_in, \ + ref, out) \ + { \ + v16u8 temp0, temp1; \ + \ + temp1 = __msa_aver_u_b(above2_in, above1_in); \ + temp0 = __msa_aver_u_b(below2_in, below1_in); \ + temp1 = __msa_aver_u_b(temp1, temp0); \ + out = __msa_aver_u_b(src_in, temp1); \ + temp0 = __msa_asub_u_b(src_in, above2_in); \ + temp1 = __msa_asub_u_b(src_in, above1_in); \ + temp0 = (temp0 < ref); \ + temp1 = (temp1 < ref); \ + temp0 = temp0 & temp1; \ + temp1 = __msa_asub_u_b(src_in, below1_in); \ + temp1 = (temp1 < ref); \ + temp0 = temp0 & temp1; \ + temp1 = __msa_asub_u_b(src_in, below2_in); \ + temp1 = (temp1 < ref); \ + temp0 = temp0 & temp1; \ + out = __msa_bmz_v(out, src_in, temp0); \ + } + +#define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ + in10, in11, in12, in13, in14, in15) \ + { \ + v8i16 temp0, temp1, temp2, temp3, temp4; \ + v8i16 temp5, temp6, temp7, temp8, temp9; \ + \ + ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \ + ILVRL_H2_SH(temp1, temp0, temp2, temp3); \ + ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \ + ILVRL_H2_SH(temp1, temp0, temp4, temp5); \ + ILVRL_W2_SH(temp4, temp2, temp0, temp1); \ + ILVRL_W2_SH(temp5, temp3, temp2, temp3); \ + ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ + ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ + ILVRL_H2_SH(temp5, temp4, temp6, temp7); \ + ILVR_B2_SH(in13, in12, in15, in14, temp4, temp5); \ + ILVRL_H2_SH(temp5, temp4, temp8, temp9); \ + ILVRL_W2_SH(temp8, temp6, temp4, temp5); \ + ILVRL_W2_SH(temp9, temp7, temp6, temp7); \ + ILVL_B2_SH(in1, in0, in3, in2, temp8, temp9); \ + ILVR_D2_UB(temp4, temp0, temp5, temp1, in0, in2); \ + in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \ + in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \ + ILVL_B2_SH(in5, in4, in7, in6, temp0, temp1); \ + ILVR_D2_UB(temp6, temp2, temp7, temp3, in4, in6); \ + in5 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp2); \ + in7 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp3); \ + ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \ + temp4, temp5); \ + ILVR_H4_SH(temp9, temp8, temp1, temp0, temp3, temp2, temp5, temp4, temp6, \ + temp7, temp8, temp9); \ + ILVR_W2_SH(temp7, temp6, temp9, temp8, temp0, temp1); \ + in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0); \ + in9 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp0); \ + ILVL_W2_SH(temp7, temp6, temp9, temp8, temp2, temp3); \ + in10 = (v16u8)__msa_ilvr_d((v2i64)temp3, (v2i64)temp2); \ + in11 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp2); \ + } + +#define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \ + in9, in10, in11) \ + { \ + v8i16 temp0, temp1, temp2, temp3; \ + v8i16 temp4, temp5, temp6, temp7; \ + \ + ILVR_B2_SH(in1, in0, in3, in2, temp0, temp1); \ + ILVRL_H2_SH(temp1, temp0, temp2, temp3); \ + ILVR_B2_SH(in5, in4, in7, in6, temp0, temp1); \ + ILVRL_H2_SH(temp1, temp0, temp4, temp5); \ + ILVRL_W2_SH(temp4, temp2, temp0, temp1); \ + ILVRL_W2_SH(temp5, temp3, temp2, temp3); \ + ILVL_B2_SH(in1, in0, in3, in2, temp4, temp5); \ + temp4 = __msa_ilvr_h(temp5, temp4); \ + ILVL_B2_SH(in5, in4, in7, in6, temp6, temp7); \ + temp5 = __msa_ilvr_h(temp7, temp6); \ + ILVRL_W2_SH(temp5, temp4, temp6, temp7); \ + in0 = (v16u8)temp0; \ + in2 = (v16u8)temp1; \ + in4 = (v16u8)temp2; \ + in6 = (v16u8)temp3; \ + in8 = (v16u8)temp6; \ + in10 = (v16u8)temp7; \ + in1 = (v16u8)__msa_ilvl_d((v2i64)temp0, (v2i64)temp0); \ + in3 = (v16u8)__msa_ilvl_d((v2i64)temp1, (v2i64)temp1); \ + in5 = (v16u8)__msa_ilvl_d((v2i64)temp2, (v2i64)temp2); \ + in7 = (v16u8)__msa_ilvl_d((v2i64)temp3, (v2i64)temp3); \ + in9 = (v16u8)__msa_ilvl_d((v2i64)temp6, (v2i64)temp6); \ + in11 = (v16u8)__msa_ilvl_d((v2i64)temp7, (v2i64)temp7); \ + } + +static void postproc_down_across_chroma_msa(uint8_t *src_ptr, uint8_t *dst_ptr, + int32_t src_stride, + int32_t dst_stride, int32_t cols, + uint8_t *f) { + uint8_t *p_src = src_ptr; + uint8_t *p_dst = dst_ptr; + uint8_t *f_orig = f; + uint8_t *p_dst_st = dst_ptr; + uint16_t col; + uint64_t out0, out1, out2, out3; + v16u8 above2, above1, below2, below1, src, ref, ref_temp; + v16u8 inter0, inter1, inter2, inter3, inter4, inter5; + v16u8 inter6, inter7, inter8, inter9, inter10, inter11; + + for (col = (cols / 16); col--;) { + ref = LD_UB(f); + LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); + src = LD_UB(p_src); + LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); + above2 = LD_UB(p_src + 3 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); + above1 = LD_UB(p_src + 4 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); + src = LD_UB(p_src + 5 * src_stride); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); + below1 = LD_UB(p_src + 6 * src_stride); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); + below2 = LD_UB(p_src + 7 * src_stride); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); + above2 = LD_UB(p_src + 8 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); + above1 = LD_UB(p_src + 9 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); + ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7, + p_dst, dst_stride); + + p_dst += 16; + p_src += 16; + f += 16; + } + + if (0 != (cols / 16)) { + ref = LD_UB(f); + LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); + src = LD_UB(p_src); + LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); + above2 = LD_UB(p_src + 3 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); + above1 = LD_UB(p_src + 4 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); + src = LD_UB(p_src + 5 * src_stride); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); + below1 = LD_UB(p_src + 6 * src_stride); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); + below2 = LD_UB(p_src + 7 * src_stride); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); + above2 = LD_UB(p_src + 8 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); + above1 = LD_UB(p_src + 9 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); + out0 = __msa_copy_u_d((v2i64)inter0, 0); + out1 = __msa_copy_u_d((v2i64)inter1, 0); + out2 = __msa_copy_u_d((v2i64)inter2, 0); + out3 = __msa_copy_u_d((v2i64)inter3, 0); + SD4(out0, out1, out2, out3, p_dst, dst_stride); + + out0 = __msa_copy_u_d((v2i64)inter4, 0); + out1 = __msa_copy_u_d((v2i64)inter5, 0); + out2 = __msa_copy_u_d((v2i64)inter6, 0); + out3 = __msa_copy_u_d((v2i64)inter7, 0); + SD4(out0, out1, out2, out3, p_dst + 4 * dst_stride, dst_stride); + } + + f = f_orig; + p_dst = dst_ptr - 2; + LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5, + inter6, inter7); + + for (col = 0; col < (cols / 8); ++col) { + ref = LD_UB(f); + f += 8; + VPX_TRANSPOSE12x8_UB_UB(inter0, inter1, inter2, inter3, inter4, inter5, + inter6, inter7, inter8, inter9, inter10, inter11); + if (0 == col) { + above2 = inter2; + above1 = inter2; + } else { + above2 = inter0; + above1 = inter1; + } + src = inter2; + below1 = inter3; + below2 = inter4; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2); + above2 = inter5; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3); + above1 = inter6; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4); + src = inter7; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5); + below1 = inter8; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6); + below2 = inter9; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7); + if (col == (cols / 8 - 1)) { + above2 = inter9; + } else { + above2 = inter10; + } + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8); + if (col == (cols / 8 - 1)) { + above1 = inter9; + } else { + above1 = inter11; + } + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9); + TRANSPOSE8x8_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, inter8, + inter9, inter2, inter3, inter4, inter5, inter6, inter7, + inter8, inter9); + p_dst += 8; + LD_UB2(p_dst, dst_stride, inter0, inter1); + ST8x1_UB(inter2, p_dst_st); + ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride)); + LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3); + ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride)); + ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride)); + LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5); + ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride)); + ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride)); + LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7); + ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride)); + ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride)); + p_dst_st += 8; + } +} + +static void postproc_down_across_luma_msa(uint8_t *src_ptr, uint8_t *dst_ptr, + int32_t src_stride, + int32_t dst_stride, int32_t cols, + uint8_t *f) { + uint8_t *p_src = src_ptr; + uint8_t *p_dst = dst_ptr; + uint8_t *p_dst_st = dst_ptr; + uint8_t *f_orig = f; + uint16_t col; + v16u8 above2, above1, below2, below1; + v16u8 src, ref, ref_temp; + v16u8 inter0, inter1, inter2, inter3, inter4, inter5, inter6; + v16u8 inter7, inter8, inter9, inter10, inter11; + v16u8 inter12, inter13, inter14, inter15; + + for (col = (cols / 16); col--;) { + ref = LD_UB(f); + LD_UB2(p_src - 2 * src_stride, src_stride, above2, above1); + src = LD_UB(p_src); + LD_UB2(p_src + 1 * src_stride, src_stride, below1, below2); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter0); + above2 = LD_UB(p_src + 3 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter1); + above1 = LD_UB(p_src + 4 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter2); + src = LD_UB(p_src + 5 * src_stride); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter3); + below1 = LD_UB(p_src + 6 * src_stride); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter4); + below2 = LD_UB(p_src + 7 * src_stride); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter5); + above2 = LD_UB(p_src + 8 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter6); + above1 = LD_UB(p_src + 9 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter7); + src = LD_UB(p_src + 10 * src_stride); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter8); + below1 = LD_UB(p_src + 11 * src_stride); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter9); + below2 = LD_UB(p_src + 12 * src_stride); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter10); + above2 = LD_UB(p_src + 13 * src_stride); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref, inter11); + above1 = LD_UB(p_src + 14 * src_stride); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref, inter12); + src = LD_UB(p_src + 15 * src_stride); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref, inter13); + below1 = LD_UB(p_src + 16 * src_stride); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref, inter14); + below2 = LD_UB(p_src + 17 * src_stride); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref, inter15); + ST_UB8(inter0, inter1, inter2, inter3, inter4, inter5, inter6, inter7, + p_dst, dst_stride); + ST_UB8(inter8, inter9, inter10, inter11, inter12, inter13, inter14, inter15, + p_dst + 8 * dst_stride, dst_stride); + p_src += 16; + p_dst += 16; + f += 16; + } + + f = f_orig; + p_dst = dst_ptr - 2; + LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5, + inter6, inter7); + LD_UB8(p_dst + 8 * dst_stride, dst_stride, inter8, inter9, inter10, inter11, + inter12, inter13, inter14, inter15); + + for (col = 0; col < cols / 8; ++col) { + ref = LD_UB(f); + f += 8; + TRANSPOSE12x16_B(inter0, inter1, inter2, inter3, inter4, inter5, inter6, + inter7, inter8, inter9, inter10, inter11, inter12, inter13, + inter14, inter15); + if (0 == col) { + above2 = inter2; + above1 = inter2; + } else { + above2 = inter0; + above1 = inter1; + } + + src = inter2; + below1 = inter3; + below2 = inter4; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter2); + above2 = inter5; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter3); + above1 = inter6; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter4); + src = inter7; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3); + VPX_AVER_IF_RETAIN(below1, below2, above2, above1, src, ref_temp, inter5); + below1 = inter8; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4); + VPX_AVER_IF_RETAIN(below2, above2, above1, src, below1, ref_temp, inter6); + below2 = inter9; + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5); + VPX_AVER_IF_RETAIN(above2, above1, src, below1, below2, ref_temp, inter7); + if (col == (cols / 8 - 1)) { + above2 = inter9; + } else { + above2 = inter10; + } + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6); + VPX_AVER_IF_RETAIN(above1, src, below1, below2, above2, ref_temp, inter8); + if (col == (cols / 8 - 1)) { + above1 = inter9; + } else { + above1 = inter11; + } + ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7); + VPX_AVER_IF_RETAIN(src, below1, below2, above2, above1, ref_temp, inter9); + VPX_TRANSPOSE8x16_UB_UB(inter2, inter3, inter4, inter5, inter6, inter7, + inter8, inter9, inter2, inter3, inter4, inter5, + inter6, inter7, inter8, inter9, inter10, inter11, + inter12, inter13, inter14, inter15, above2, above1); + + p_dst += 8; + LD_UB2(p_dst, dst_stride, inter0, inter1); + ST8x1_UB(inter2, p_dst_st); + ST8x1_UB(inter3, (p_dst_st + 1 * dst_stride)); + LD_UB2(p_dst + 2 * dst_stride, dst_stride, inter2, inter3); + ST8x1_UB(inter4, (p_dst_st + 2 * dst_stride)); + ST8x1_UB(inter5, (p_dst_st + 3 * dst_stride)); + LD_UB2(p_dst + 4 * dst_stride, dst_stride, inter4, inter5); + ST8x1_UB(inter6, (p_dst_st + 4 * dst_stride)); + ST8x1_UB(inter7, (p_dst_st + 5 * dst_stride)); + LD_UB2(p_dst + 6 * dst_stride, dst_stride, inter6, inter7); + ST8x1_UB(inter8, (p_dst_st + 6 * dst_stride)); + ST8x1_UB(inter9, (p_dst_st + 7 * dst_stride)); + LD_UB2(p_dst + 8 * dst_stride, dst_stride, inter8, inter9); + ST8x1_UB(inter10, (p_dst_st + 8 * dst_stride)); + ST8x1_UB(inter11, (p_dst_st + 9 * dst_stride)); + LD_UB2(p_dst + 10 * dst_stride, dst_stride, inter10, inter11); + ST8x1_UB(inter12, (p_dst_st + 10 * dst_stride)); + ST8x1_UB(inter13, (p_dst_st + 11 * dst_stride)); + LD_UB2(p_dst + 12 * dst_stride, dst_stride, inter12, inter13); + ST8x1_UB(inter14, (p_dst_st + 12 * dst_stride)); + ST8x1_UB(inter15, (p_dst_st + 13 * dst_stride)); + LD_UB2(p_dst + 14 * dst_stride, dst_stride, inter14, inter15); + ST8x1_UB(above2, (p_dst_st + 14 * dst_stride)); + ST8x1_UB(above1, (p_dst_st + 15 * dst_stride)); + p_dst_st += 8; + } +} + +void vpx_post_proc_down_and_across_mb_row_msa(uint8_t *src, uint8_t *dst, + int32_t src_stride, + int32_t dst_stride, int32_t cols, + uint8_t *f, int32_t size) { + if (8 == size) { + postproc_down_across_chroma_msa(src, dst, src_stride, dst_stride, cols, f); + } else if (16 == size) { + postproc_down_across_luma_msa(src, dst, src_stride, dst_stride, cols, f); + } +} + +void vpx_mbpost_proc_across_ip_msa(uint8_t *src_ptr, int32_t pitch, + int32_t rows, int32_t cols, int32_t flimit) { + int32_t row, col, cnt; + uint8_t *src_dup = src_ptr; + v16u8 src0, src, tmp_orig; + v16u8 tmp = { 0 }; + v16i8 zero = { 0 }; + v8u16 sum_h, src_r_h, src_l_h; + v4u32 src_r_w, src_l_w; + v4i32 flimit_vec; + + flimit_vec = __msa_fill_w(flimit); + for (row = rows; row--;) { + int32_t sum_sq = 0; + int32_t sum = 0; + src0 = (v16u8)__msa_fill_b(src_dup[0]); + ST8x1_UB(src0, (src_dup - 8)); + + src0 = (v16u8)__msa_fill_b(src_dup[cols - 1]); + ST_UB(src0, src_dup + cols); + src_dup[cols + 16] = src_dup[cols - 1]; + tmp_orig = (v16u8)__msa_ldi_b(0); + tmp_orig[15] = tmp[15]; + src = LD_UB(src_dup - 8); + src[15] = 0; + ILVRL_B2_UH(zero, src, src_r_h, src_l_h); + src_r_w = __msa_dotp_u_w(src_r_h, src_r_h); + src_l_w = __msa_dotp_u_w(src_l_h, src_l_h); + sum_sq = HADD_SW_S32(src_r_w); + sum_sq += HADD_SW_S32(src_l_w); + sum_h = __msa_hadd_u_h(src, src); + sum = HADD_UH_U32(sum_h); + { + v16u8 src7, src8, src_r, src_l; + v16i8 mask; + v8u16 add_r, add_l; + v8i16 sub_r, sub_l, sum_r, sum_l, mask0, mask1; + v4i32 sum_sq0, sum_sq1, sum_sq2, sum_sq3; + v4i32 sub0, sub1, sub2, sub3; + v4i32 sum0_w, sum1_w, sum2_w, sum3_w; + v4i32 mul0, mul1, mul2, mul3; + v4i32 total0, total1, total2, total3; + v8i16 const8 = __msa_fill_h(8); + + src7 = LD_UB(src_dup + 7); + src8 = LD_UB(src_dup - 8); + for (col = 0; col < (cols >> 4); ++col) { + ILVRL_B2_UB(src7, src8, src_r, src_l); + HSUB_UB2_SH(src_r, src_l, sub_r, sub_l); + + sum_r[0] = sum + sub_r[0]; + for (cnt = 0; cnt < 7; ++cnt) { + sum_r[cnt + 1] = sum_r[cnt] + sub_r[cnt + 1]; + } + sum_l[0] = sum_r[7] + sub_l[0]; + for (cnt = 0; cnt < 7; ++cnt) { + sum_l[cnt + 1] = sum_l[cnt] + sub_l[cnt + 1]; + } + sum = sum_l[7]; + src = LD_UB(src_dup + 16 * col); + ILVRL_B2_UH(zero, src, src_r_h, src_l_h); + src7 = (v16u8)((const8 + sum_r + (v8i16)src_r_h) >> 4); + src8 = (v16u8)((const8 + sum_l + (v8i16)src_l_h) >> 4); + tmp = (v16u8)__msa_pckev_b((v16i8)src8, (v16i8)src7); + + HADD_UB2_UH(src_r, src_l, add_r, add_l); + UNPCK_SH_SW(sub_r, sub0, sub1); + UNPCK_SH_SW(sub_l, sub2, sub3); + ILVR_H2_SW(zero, add_r, zero, add_l, sum0_w, sum2_w); + ILVL_H2_SW(zero, add_r, zero, add_l, sum1_w, sum3_w); + MUL4(sum0_w, sub0, sum1_w, sub1, sum2_w, sub2, sum3_w, sub3, mul0, mul1, + mul2, mul3); + sum_sq0[0] = sum_sq + mul0[0]; + for (cnt = 0; cnt < 3; ++cnt) { + sum_sq0[cnt + 1] = sum_sq0[cnt] + mul0[cnt + 1]; + } + sum_sq1[0] = sum_sq0[3] + mul1[0]; + for (cnt = 0; cnt < 3; ++cnt) { + sum_sq1[cnt + 1] = sum_sq1[cnt] + mul1[cnt + 1]; + } + sum_sq2[0] = sum_sq1[3] + mul2[0]; + for (cnt = 0; cnt < 3; ++cnt) { + sum_sq2[cnt + 1] = sum_sq2[cnt] + mul2[cnt + 1]; + } + sum_sq3[0] = sum_sq2[3] + mul3[0]; + for (cnt = 0; cnt < 3; ++cnt) { + sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1]; + } + sum_sq = sum_sq3[3]; + + UNPCK_SH_SW(sum_r, sum0_w, sum1_w); + UNPCK_SH_SW(sum_l, sum2_w, sum3_w); + total0 = sum_sq0 * __msa_ldi_w(15); + total0 -= sum0_w * sum0_w; + total1 = sum_sq1 * __msa_ldi_w(15); + total1 -= sum1_w * sum1_w; + total2 = sum_sq2 * __msa_ldi_w(15); + total2 -= sum2_w * sum2_w; + total3 = sum_sq3 * __msa_ldi_w(15); + total3 -= sum3_w * sum3_w; + total0 = (total0 < flimit_vec); + total1 = (total1 < flimit_vec); + total2 = (total2 < flimit_vec); + total3 = (total3 < flimit_vec); + PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1); + mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0); + tmp = __msa_bmz_v(tmp, src, (v16u8)mask); + + if (col == 0) { + uint64_t src_d; + + src_d = __msa_copy_u_d((v2i64)tmp_orig, 1); + SD(src_d, (src_dup - 8)); + } + + src7 = LD_UB(src_dup + 16 * (col + 1) + 7); + src8 = LD_UB(src_dup + 16 * (col + 1) - 8); + ST_UB(tmp, (src_dup + (16 * col))); + } + + src_dup += pitch; + } + } +} + +void vpx_mbpost_proc_down_msa(uint8_t *dst_ptr, int32_t pitch, int32_t rows, + int32_t cols, int32_t flimit) { + int32_t row, col, cnt, i; + const int16_t *rv3 = &vpx_rv[63 & rand()]; + v4i32 flimit_vec; + v16u8 dst7, dst8, dst_r_b, dst_l_b; + v16i8 mask; + v8u16 add_r, add_l; + v8i16 dst_r_h, dst_l_h, sub_r, sub_l, mask0, mask1; + v4i32 sub0, sub1, sub2, sub3, total0, total1, total2, total3; + + flimit_vec = __msa_fill_w(flimit); + + for (col = 0; col < (cols >> 4); ++col) { + uint8_t *dst_tmp = &dst_ptr[col << 4]; + v16u8 dst; + v16i8 zero = { 0 }; + v16u8 tmp[16]; + v8i16 mult0, mult1, rv2_0, rv2_1; + v8i16 sum0_h = { 0 }; + v8i16 sum1_h = { 0 }; + v4i32 mul0 = { 0 }; + v4i32 mul1 = { 0 }; + v4i32 mul2 = { 0 }; + v4i32 mul3 = { 0 }; + v4i32 sum0_w, sum1_w, sum2_w, sum3_w; + v4i32 add0, add1, add2, add3; + const int16_t *rv2[16]; + + dst = LD_UB(dst_tmp); + for (cnt = (col << 4), i = 0; i < 16; ++cnt) { + rv2[i] = rv3 + ((cnt * 17) & 127); + ++i; + } + for (cnt = -8; cnt < 0; ++cnt) { + ST_UB(dst, dst_tmp + cnt * pitch); + } + + dst = LD_UB((dst_tmp + (rows - 1) * pitch)); + for (cnt = rows; cnt < rows + 17; ++cnt) { + ST_UB(dst, dst_tmp + cnt * pitch); + } + for (cnt = -8; cnt <= 6; ++cnt) { + dst = LD_UB(dst_tmp + (cnt * pitch)); + UNPCK_UB_SH(dst, dst_r_h, dst_l_h); + MUL2(dst_r_h, dst_r_h, dst_l_h, dst_l_h, mult0, mult1); + mul0 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult0); + mul1 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult0); + mul2 += (v4i32)__msa_ilvr_h((v8i16)zero, (v8i16)mult1); + mul3 += (v4i32)__msa_ilvl_h((v8i16)zero, (v8i16)mult1); + ADD2(sum0_h, dst_r_h, sum1_h, dst_l_h, sum0_h, sum1_h); + } + + for (row = 0; row < (rows + 8); ++row) { + for (i = 0; i < 8; ++i) { + rv2_0[i] = *(rv2[i] + (row & 127)); + rv2_1[i] = *(rv2[i + 8] + (row & 127)); + } + dst7 = LD_UB(dst_tmp + (7 * pitch)); + dst8 = LD_UB(dst_tmp - (8 * pitch)); + ILVRL_B2_UB(dst7, dst8, dst_r_b, dst_l_b); + + HSUB_UB2_SH(dst_r_b, dst_l_b, sub_r, sub_l); + UNPCK_SH_SW(sub_r, sub0, sub1); + UNPCK_SH_SW(sub_l, sub2, sub3); + sum0_h += sub_r; + sum1_h += sub_l; + + HADD_UB2_UH(dst_r_b, dst_l_b, add_r, add_l); + + ILVRL_H2_SW(zero, add_r, add0, add1); + ILVRL_H2_SW(zero, add_l, add2, add3); + mul0 += add0 * sub0; + mul1 += add1 * sub1; + mul2 += add2 * sub2; + mul3 += add3 * sub3; + dst = LD_UB(dst_tmp); + ILVRL_B2_SH(zero, dst, dst_r_h, dst_l_h); + dst7 = (v16u8)((rv2_0 + sum0_h + dst_r_h) >> 4); + dst8 = (v16u8)((rv2_1 + sum1_h + dst_l_h) >> 4); + tmp[row & 15] = (v16u8)__msa_pckev_b((v16i8)dst8, (v16i8)dst7); + + UNPCK_SH_SW(sum0_h, sum0_w, sum1_w); + UNPCK_SH_SW(sum1_h, sum2_w, sum3_w); + total0 = mul0 * __msa_ldi_w(15); + total0 -= sum0_w * sum0_w; + total1 = mul1 * __msa_ldi_w(15); + total1 -= sum1_w * sum1_w; + total2 = mul2 * __msa_ldi_w(15); + total2 -= sum2_w * sum2_w; + total3 = mul3 * __msa_ldi_w(15); + total3 -= sum3_w * sum3_w; + total0 = (total0 < flimit_vec); + total1 = (total1 < flimit_vec); + total2 = (total2 < flimit_vec); + total3 = (total3 < flimit_vec); + PCKEV_H2_SH(total1, total0, total3, total2, mask0, mask1); + mask = __msa_pckev_b((v16i8)mask1, (v16i8)mask0); + tmp[row & 15] = __msa_bmz_v(tmp[row & 15], dst, (v16u8)mask); + + if (row >= 8) { + ST_UB(tmp[(row - 8) & 15], (dst_tmp - 8 * pitch)); + } + + dst_tmp += pitch; + } + } +} diff --git a/libs/libvpx/vpx_dsp/mips/fwd_dct32x32_msa.c b/libs/libvpx/vpx_dsp/mips/fwd_dct32x32_msa.c index 2115a348c2..e41a904808 100644 --- a/libs/libvpx/vpx_dsp/mips/fwd_dct32x32_msa.c +++ b/libs/libvpx/vpx_dsp/mips/fwd_dct32x32_msa.c @@ -27,10 +27,10 @@ static void fdct8x32_1d_column_load_butterfly(const int16_t *input, SLLI_4V(in4, in5, in6, in7, 2); SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2); SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2); - BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, - step0, step1, step2, step3, in4, in5, in6, in7); - BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, - step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1); + BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, + step3, in4, in5, in6, in7); + BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1, + step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1); ST_SH4(step0, step1, step2, step3, temp_buff, 8); ST_SH4(in4, in5, in6, in7, temp_buff + (28 * 8), 8); ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (4 * 8), 8); @@ -45,10 +45,10 @@ static void fdct8x32_1d_column_load_butterfly(const int16_t *input, SLLI_4V(in4, in5, in6, in7, 2); SLLI_4V(in0_1, in1_1, in2_1, in3_1, 2); SLLI_4V(in4_1, in5_1, in6_1, in7_1, 2); - BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, - step0, step1, step2, step3, in4, in5, in6, in7); - BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, - step0_1, step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1); + BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, step0, step1, step2, + step3, in4, in5, in6, in7); + BUTTERFLY_8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, step0_1, + step1_1, step2_1, step3_1, in4_1, in5_1, in6_1, in7_1); ST_SH4(step0, step1, step2, step3, temp_buff + (8 * 8), 8); ST_SH4(in4, in5, in6, in7, temp_buff + (20 * 8), 8); ST_SH4(step0_1, step1_1, step2_1, step3_1, temp_buff + (12 * 8), 8); @@ -64,12 +64,12 @@ static void fdct8x32_1d_column_even_store(int16_t *input, int16_t *temp) { /* fdct even */ LD_SH4(input, 8, in0, in1, in2, in3); LD_SH4(input + 96, 8, in12, in13, in14, in15); - BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, - vec0, vec1, vec2, vec3, in12, in13, in14, in15); + BUTTERFLY_8(in0, in1, in2, in3, in12, in13, in14, in15, vec0, vec1, vec2, + vec3, in12, in13, in14, in15); LD_SH4(input + 32, 8, in4, in5, in6, in7); LD_SH4(input + 64, 8, in8, in9, in10, in11); - BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, - vec4, vec5, vec6, vec7, in8, in9, in10, in11); + BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, + in8, in9, in10, in11); /* Stage 3 */ ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); @@ -258,28 +258,26 @@ static void fdct8x32_1d_row_load_butterfly(int16_t *temp_buff, LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, - in8, in9, in10, in11, in12, in13, in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - step0, step1, step2, step3, step4, step5, step6, step7, - in8, in9, in10, in11, in12, in13, in14, in15); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, + in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, step0, step1, step2, step3, step4, step5, + step6, step7, in8, in9, in10, in11, in12, in13, in14, in15); ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8); ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8); /* 2nd set */ LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, - in8, in9, in10, in11, in12, in13, in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - step0, step1, step2, step3, step4, step5, step6, step7, - in8, in9, in10, in11, in12, in13, in14, in15); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, + in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, step0, step1, step2, step3, step4, step5, + step6, step7, in8, in9, in10, in11, in12, in13, in14, in15); ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, (output + 8 * 8), 8); ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8); @@ -299,10 +297,9 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr, LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, - in8, in9, in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6, + vec7, in8, in9, in10, in11, in12, in13, in14, in15); ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8); ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8); @@ -315,19 +312,19 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr, UNPCK_SH_SW(vec5, vec5_l, vec5_r); UNPCK_SH_SW(vec6, vec6_l, vec6_r); UNPCK_SH_SW(vec7, vec7_l, vec7_r); - ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, - tmp0_w, tmp1_w, tmp2_w, tmp3_w); + ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w, + tmp1_w, tmp2_w, tmp3_w); BUTTERFLY_4(tmp0_w, tmp1_w, tmp2_w, tmp3_w, vec4_r, vec6_r, vec7_r, vec5_r); - ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, - vec0_r, vec1_r, vec2_r, vec3_r); + ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r, + vec1_r, vec2_r, vec3_r); tmp3_w = vec0_r + vec3_r; vec0_r = vec0_r - vec3_r; vec3_r = vec1_r + vec2_r; vec1_r = vec1_r - vec2_r; - DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, - cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r); + DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64, + vec4_r, tmp3_w, vec6_r, vec3_r); FDCT32_POSTPROC_NEG_W(vec4_r); FDCT32_POSTPROC_NEG_W(tmp3_w); FDCT32_POSTPROC_NEG_W(vec6_r); @@ -335,8 +332,8 @@ static void fdct8x32_1d_row_even_4x(int16_t *input, int16_t *interm_ptr, PCKEV_H2_SH(vec4_r, tmp3_w, vec6_r, vec3_r, vec4, vec5); ST_SH2(vec5, vec4, out, 8); - DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, - cospi_8_64, vec4_r, tmp3_w, vec6_r, vec3_r); + DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, + vec4_r, tmp3_w, vec6_r, vec3_r); FDCT32_POSTPROC_NEG_W(vec4_r); FDCT32_POSTPROC_NEG_W(tmp3_w); FDCT32_POSTPROC_NEG_W(vec6_r); @@ -401,10 +398,9 @@ static void fdct8x32_1d_row_even(int16_t *temp, int16_t *out) { LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, - in8, in9, in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6, + vec7, in8, in9, in10, in11, in12, in13, in14, in15); /* Stage 3 */ ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); @@ -610,8 +606,8 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) { in3 = LD_SH(temp + 192); in5 = LD_SH(temp + 216); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* 2nd set */ in0_1 = LD_SH(temp + 16); @@ -637,10 +633,10 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) { in6 = LD_SH(temp + 104); in7 = LD_SH(temp + 144); - ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, - output + 8, 32); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 8, + 32); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32); /* 4th set */ @@ -655,12 +651,11 @@ static void fdct8x32_1d_row_transpose_store(int16_t *temp, int16_t *output) { TRANSPOSE8x8_SH_SH(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1); - ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, - output + 24, 32); + ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 24, + 32); } -static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf, - int16_t *output) { +static void fdct32x8_1d_row(int16_t *temp, int16_t *temp_buf, int16_t *output) { fdct8x32_1d_row_load_butterfly(temp, temp_buf); fdct8x32_1d_row_even(temp_buf, temp_buf); fdct8x32_1d_row_odd(temp_buf + 128, temp, temp_buf + 128); @@ -706,10 +701,9 @@ static void fdct8x32_1d_row_even_rd(int16_t *temp, int16_t *out) { LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); - BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15, - vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, - in8, in9, in10, in11, in12, in13, in14, in15); + BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, + in12, in13, in14, in15, vec0, vec1, vec2, vec3, vec4, vec5, vec6, + vec7, in8, in9, in10, in11, in12, in13, in14, in15); FDCT_POSTPROC_2V_NEG_H(vec0, vec1); FDCT_POSTPROC_2V_NEG_H(vec2, vec3); FDCT_POSTPROC_2V_NEG_H(vec4, vec5); @@ -933,23 +927,21 @@ void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out, } void vpx_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) { - out[1] = 0; - - out[0] = LD_HADD(input, stride); - out[0] += LD_HADD(input + 8, stride); - out[0] += LD_HADD(input + 16, stride); - out[0] += LD_HADD(input + 24, stride); - out[0] += LD_HADD(input + 32 * 8, stride); - out[0] += LD_HADD(input + 32 * 8 + 8, stride); - out[0] += LD_HADD(input + 32 * 8 + 16, stride); - out[0] += LD_HADD(input + 32 * 8 + 24, stride); - out[0] += LD_HADD(input + 32 * 16, stride); - out[0] += LD_HADD(input + 32 * 16 + 8, stride); - out[0] += LD_HADD(input + 32 * 16 + 16, stride); - out[0] += LD_HADD(input + 32 * 16 + 24, stride); - out[0] += LD_HADD(input + 32 * 24, stride); - out[0] += LD_HADD(input + 32 * 24 + 8, stride); - out[0] += LD_HADD(input + 32 * 24 + 16, stride); - out[0] += LD_HADD(input + 32 * 24 + 24, stride); - out[0] >>= 3; + int sum = LD_HADD(input, stride); + sum += LD_HADD(input + 8, stride); + sum += LD_HADD(input + 16, stride); + sum += LD_HADD(input + 24, stride); + sum += LD_HADD(input + 32 * 8, stride); + sum += LD_HADD(input + 32 * 8 + 8, stride); + sum += LD_HADD(input + 32 * 8 + 16, stride); + sum += LD_HADD(input + 32 * 8 + 24, stride); + sum += LD_HADD(input + 32 * 16, stride); + sum += LD_HADD(input + 32 * 16 + 8, stride); + sum += LD_HADD(input + 32 * 16 + 16, stride); + sum += LD_HADD(input + 32 * 16 + 24, stride); + sum += LD_HADD(input + 32 * 24, stride); + sum += LD_HADD(input + 32 * 24 + 8, stride); + sum += LD_HADD(input + 32 * 24 + 16, stride); + sum += LD_HADD(input + 32 * 24 + 24, stride); + out[0] = (int16_t)(sum >> 3); } diff --git a/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.c b/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.c index f66dd5fce2..fdead50503 100644 --- a/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.c +++ b/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.c @@ -18,24 +18,24 @@ void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr, v8i16 stp21, stp22, stp23, stp24, stp25, stp26, stp30; v8i16 stp31, stp32, stp33, stp34, stp35, stp36, stp37; v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; - v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, - -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; - v8i16 coeff1 = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64, - cospi_10_64, cospi_22_64, cospi_6_64, cospi_26_64 }; - v8i16 coeff2 = { -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, - 0, 0, 0, 0 }; + v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, + -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; + v8i16 coeff1 = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64, + cospi_10_64, cospi_22_64, cospi_6_64, cospi_26_64 }; + v8i16 coeff2 = { + -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0 + }; - LD_SH16(input, src_stride, - in0, in1, in2, in3, in4, in5, in6, in7, - in8, in9, in10, in11, in12, in13, in14, in15); + LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, + in10, in11, in12, in13, in14, in15); SLLI_4V(in0, in1, in2, in3, 2); SLLI_4V(in4, in5, in6, in7, 2); SLLI_4V(in8, in9, in10, in11, 2); SLLI_4V(in12, in13, in14, in15, 2); ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); - FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); + FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, + tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12); SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); @@ -137,10 +137,10 @@ void fdct16x8_1d_row(int16_t *input, int16_t *output) { LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); LD_SH8((input + 8), 16, in8, in9, in10, in11, in12, in13, in14, in15); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, - in8, in9, in10, in11, in12, in13, in14, in15); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, + in10, in11, in12, in13, in14, in15); ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7); ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11); @@ -150,19 +150,19 @@ void fdct16x8_1d_row(int16_t *input, int16_t *output) { SRA_4V(in8, in9, in10, in11, 2); SRA_4V(in12, in13, in14, in15, 2); BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, - in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, - tmp6, tmp7, in8, in9, in10, in11, in12, in13, in14, in15); + in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, + tmp7, in8, in9, in10, in11, in12, in13, in14, in15); ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16); - FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, - tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); + FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1, + tmp2, tmp3, tmp4, tmp5, tmp6, tmp7); LD_SH8(input, 16, in8, in9, in10, in11, in12, in13, in14, in15); - FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, - tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3); + FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3, + in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0, + tmp1, in1, tmp2, in2, tmp3, in3); ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16); - TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, - tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7); + TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4, + tmp5, in5, tmp6, in6, tmp7, in7); ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16); } @@ -203,14 +203,14 @@ void vpx_fdct8x8_msa(const int16_t *input, int16_t *output, LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7); SLLI_4V(in0, in1, in2, in3, 2); SLLI_4V(in4, in5, in6, in7, 2); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, + in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7); ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); } @@ -237,11 +237,9 @@ void vpx_fdct16x16_msa(const int16_t *input, int16_t *output, } void vpx_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) { - out[1] = 0; - - out[0] = LD_HADD(input, stride); - out[0] += LD_HADD(input + 8, stride); - out[0] += LD_HADD(input + 16 * 8, stride); - out[0] += LD_HADD(input + 16 * 8 + 8, stride); - out[0] >>= 1; + int sum = LD_HADD(input, stride); + sum += LD_HADD(input + 8, stride); + sum += LD_HADD(input + 16 * 8, stride); + sum += LD_HADD(input + 16 * 8 + 8, stride); + out[0] = (int16_t)(sum >> 1); } diff --git a/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.h b/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.h index d1e160eed5..db5e90e7b9 100644 --- a/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.h +++ b/libs/libvpx/vpx_dsp/mips/fwd_txfm_msa.h @@ -14,358 +14,365 @@ #include "vpx_dsp/mips/txfm_macros_msa.h" #include "vpx_dsp/txfm_common.h" -#define LD_HADD(psrc, stride) ({ \ - v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m; \ - v4i32 vec_w_m; \ - \ - LD_SH4((psrc), stride, in0_m, in1_m, in2_m, in3_m); \ - ADD2(in0_m, in1_m, in2_m, in3_m, in0_m, in2_m); \ - LD_SH4(((psrc) + 4 * stride), stride, in4_m, in5_m, in6_m, in7_m); \ - ADD4(in4_m, in5_m, in6_m, in7_m, in0_m, in2_m, in4_m, in6_m, \ - in4_m, in6_m, in0_m, in4_m); \ - in0_m += in4_m; \ - \ - vec_w_m = __msa_hadd_s_w(in0_m, in0_m); \ - HADD_SW_S32(vec_w_m); \ -}) +#define LD_HADD(psrc, stride) \ + ({ \ + v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m; \ + v4i32 vec_w_m; \ + \ + LD_SH4((psrc), stride, in0_m, in1_m, in2_m, in3_m); \ + ADD2(in0_m, in1_m, in2_m, in3_m, in0_m, in2_m); \ + LD_SH4(((psrc) + 4 * stride), stride, in4_m, in5_m, in6_m, in7_m); \ + ADD4(in4_m, in5_m, in6_m, in7_m, in0_m, in2_m, in4_m, in6_m, in4_m, in6_m, \ + in0_m, in4_m); \ + in0_m += in4_m; \ + \ + vec_w_m = __msa_hadd_s_w(in0_m, in0_m); \ + HADD_SW_S32(vec_w_m); \ + }) -#define VP9_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \ - v4i32 vec4_m, vec5_m, vec6_m, vec7_m; \ - v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \ - cospi_24_64, -cospi_8_64, 0, 0, 0 }; \ - \ - BUTTERFLY_4(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m); \ - ILVR_H2_SH(vec1_m, vec0_m, vec3_m, vec2_m, vec0_m, vec2_m); \ - SPLATI_H2_SH(coeff_m, 0, 1, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - vec5_m = __msa_dotp_s_w(vec0_m, cnst1_m); \ - \ - SPLATI_H2_SH(coeff_m, 4, 3, cnst2_m, cnst3_m); \ - cnst2_m = __msa_ilvev_h(cnst3_m, cnst2_m); \ - vec7_m = __msa_dotp_s_w(vec2_m, cnst2_m); \ - \ - vec4_m = __msa_dotp_s_w(vec0_m, cnst0_m); \ - cnst2_m = __msa_splati_h(coeff_m, 2); \ - cnst2_m = __msa_ilvev_h(cnst2_m, cnst3_m); \ - vec6_m = __msa_dotp_s_w(vec2_m, cnst2_m); \ - \ - SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS); \ - PCKEV_H4_SH(vec4_m, vec4_m, vec5_m, vec5_m, vec6_m, vec6_m, \ - vec7_m, vec7_m, out0, out2, out1, out3); \ -} +#define VP9_FDCT4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \ + v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \ + v4i32 vec4_m, vec5_m, vec6_m, vec7_m; \ + v8i16 coeff_m = { \ + cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, 0, 0, 0 \ + }; \ + \ + BUTTERFLY_4(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m); \ + ILVR_H2_SH(vec1_m, vec0_m, vec3_m, vec2_m, vec0_m, vec2_m); \ + SPLATI_H2_SH(coeff_m, 0, 1, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + vec5_m = __msa_dotp_s_w(vec0_m, cnst1_m); \ + \ + SPLATI_H2_SH(coeff_m, 4, 3, cnst2_m, cnst3_m); \ + cnst2_m = __msa_ilvev_h(cnst3_m, cnst2_m); \ + vec7_m = __msa_dotp_s_w(vec2_m, cnst2_m); \ + \ + vec4_m = __msa_dotp_s_w(vec0_m, cnst0_m); \ + cnst2_m = __msa_splati_h(coeff_m, 2); \ + cnst2_m = __msa_ilvev_h(cnst2_m, cnst3_m); \ + vec6_m = __msa_dotp_s_w(vec2_m, cnst2_m); \ + \ + SRARI_W4_SW(vec4_m, vec5_m, vec6_m, vec7_m, DCT_CONST_BITS); \ + PCKEV_H4_SH(vec4_m, vec4_m, vec5_m, vec5_m, vec6_m, vec6_m, vec7_m, \ + vec7_m, out0, out2, out1, out3); \ + } -#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) { \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - \ - SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \ - SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15); \ - AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3, \ - in0, in1, in2, in3); \ - AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, \ - in4, in5, in6, in7); \ -} +#define SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7) \ + { \ + v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + \ + SRLI_H4_SH(in0, in1, in2, in3, vec0_m, vec1_m, vec2_m, vec3_m, 15); \ + SRLI_H4_SH(in4, in5, in6, in7, vec4_m, vec5_m, vec6_m, vec7_m, 15); \ + AVE_SH4_SH(vec0_m, in0, vec1_m, in1, vec2_m, in2, vec3_m, in3, in0, in1, \ + in2, in3); \ + AVE_SH4_SH(vec4_m, in4, vec5_m, in5, vec6_m, in6, vec7_m, in7, in4, in5, \ + in6, in7); \ + } -#define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \ - v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \ - v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \ - cospi_24_64, cospi_4_64, cospi_28_64, \ - cospi_12_64, cospi_20_64 }; \ - \ - /* FDCT stage1 */ \ - BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \ - s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m); \ - BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m); \ - ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m); \ - ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \ - SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \ - x1_m = __msa_ilvev_h(x1_m, x0_m); \ - out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ - \ - SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \ - x2_m = -x2_m; \ - x2_m = __msa_ilvev_h(x3_m, x2_m); \ - out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ - \ - out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ - x2_m = __msa_splati_h(coeff_m, 2); \ - x2_m = __msa_ilvev_h(x2_m, x3_m); \ - out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ - \ - /* stage2 */ \ - ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \ - \ - s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ - s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ - \ - /* stage3 */ \ - BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \ - \ - /* stage4 */ \ - ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m); \ - ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m); \ - \ - SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \ - x1_m = __msa_ilvev_h(x0_m, x1_m); \ - out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \ - \ - SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \ - x2_m = __msa_ilvev_h(x3_m, x2_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ - \ - x1_m = __msa_splati_h(coeff_m, 5); \ - x0_m = -x0_m; \ - x0_m = __msa_ilvev_h(x1_m, x0_m); \ - out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \ - \ - x2_m = __msa_splati_h(coeff_m, 6); \ - x3_m = -x3_m; \ - x2_m = __msa_ilvev_h(x2_m, x3_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ -} +#define VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ + out3, out4, out5, out6, out7) \ + { \ + v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \ + v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \ + v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ + cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ + \ + /* FDCT stage1 */ \ + BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m, \ + s3_m, s4_m, s5_m, s6_m, s7_m); \ + BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m); \ + ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m); \ + ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \ + SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \ + x1_m = __msa_ilvev_h(x1_m, x0_m); \ + out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ + \ + SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \ + x2_m = -x2_m; \ + x2_m = __msa_ilvev_h(x3_m, x2_m); \ + out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ + \ + out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ + x2_m = __msa_splati_h(coeff_m, 2); \ + x2_m = __msa_ilvev_h(x2_m, x3_m); \ + out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ + \ + /* stage2 */ \ + ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \ + \ + s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ + s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ + \ + /* stage3 */ \ + BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \ + \ + /* stage4 */ \ + ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m); \ + ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m); \ + \ + SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \ + x1_m = __msa_ilvev_h(x0_m, x1_m); \ + out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \ + \ + SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \ + x2_m = __msa_ilvev_h(x3_m, x2_m); \ + out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ + \ + x1_m = __msa_splati_h(coeff_m, 5); \ + x0_m = -x0_m; \ + x0_m = __msa_ilvev_h(x1_m, x0_m); \ + out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \ + \ + x2_m = __msa_splati_h(coeff_m, 6); \ + x3_m = -x3_m; \ + x2_m = __msa_ilvev_h(x2_m, x3_m); \ + out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ + } -#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \ - v8i16 x0_m, x1_m, x2_m, x3_m; \ - v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ - cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ +#define FDCT8x16_EVEN(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \ + v8i16 x0_m, x1_m, x2_m, x3_m; \ + v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ + cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ \ - /* FDCT stage1 */ \ - BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \ - s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m); \ - BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m); \ - ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m); \ - ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \ - SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \ - x1_m = __msa_ilvev_h(x1_m, x0_m); \ - out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ + /* FDCT stage1 */ \ + BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, s0_m, s1_m, s2_m, \ + s3_m, s4_m, s5_m, s6_m, s7_m); \ + BUTTERFLY_4(s0_m, s1_m, s2_m, s3_m, x0_m, x1_m, x2_m, x3_m); \ + ILVL_H2_SH(x1_m, x0_m, x3_m, x2_m, s0_m, s2_m); \ + ILVR_H2_SH(x1_m, x0_m, x3_m, x2_m, s1_m, s3_m); \ + SPLATI_H2_SH(coeff_m, 0, 1, x0_m, x1_m); \ + x1_m = __msa_ilvev_h(x1_m, x0_m); \ + out4 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ \ - SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \ - x2_m = -x2_m; \ - x2_m = __msa_ilvev_h(x3_m, x2_m); \ - out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ + SPLATI_H2_SH(coeff_m, 2, 3, x2_m, x3_m); \ + x2_m = -x2_m; \ + x2_m = __msa_ilvev_h(x3_m, x2_m); \ + out6 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ \ - out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ - x2_m = __msa_splati_h(coeff_m, 2); \ - x2_m = __msa_ilvev_h(x2_m, x3_m); \ - out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ + out0 = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ + x2_m = __msa_splati_h(coeff_m, 2); \ + x2_m = __msa_ilvev_h(x2_m, x3_m); \ + out2 = DOT_SHIFT_RIGHT_PCK_H(s2_m, s3_m, x2_m); \ \ - /* stage2 */ \ - ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \ + /* stage2 */ \ + ILVRL_H2_SH(s5_m, s6_m, s1_m, s0_m); \ \ - s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ - s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ + s6_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x0_m); \ + s5_m = DOT_SHIFT_RIGHT_PCK_H(s0_m, s1_m, x1_m); \ \ - /* stage3 */ \ - BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \ + /* stage3 */ \ + BUTTERFLY_4(s4_m, s7_m, s6_m, s5_m, x0_m, x3_m, x2_m, x1_m); \ \ - /* stage4 */ \ - ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m); \ - ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m); \ + /* stage4 */ \ + ILVL_H2_SH(x3_m, x0_m, x2_m, x1_m, s4_m, s6_m); \ + ILVR_H2_SH(x3_m, x0_m, x2_m, x1_m, s5_m, s7_m); \ \ - SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \ - x1_m = __msa_ilvev_h(x0_m, x1_m); \ - out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \ + SPLATI_H2_SH(coeff_m, 4, 5, x0_m, x1_m); \ + x1_m = __msa_ilvev_h(x0_m, x1_m); \ + out1 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x1_m); \ \ - SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \ - x2_m = __msa_ilvev_h(x3_m, x2_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ + SPLATI_H2_SH(coeff_m, 6, 7, x2_m, x3_m); \ + x2_m = __msa_ilvev_h(x3_m, x2_m); \ + out5 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ \ - x1_m = __msa_splati_h(coeff_m, 5); \ - x0_m = -x0_m; \ - x0_m = __msa_ilvev_h(x1_m, x0_m); \ - out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \ + x1_m = __msa_splati_h(coeff_m, 5); \ + x0_m = -x0_m; \ + x0_m = __msa_ilvev_h(x1_m, x0_m); \ + out7 = DOT_SHIFT_RIGHT_PCK_H(s4_m, s5_m, x0_m); \ \ - x2_m = __msa_splati_h(coeff_m, 6); \ - x3_m = -x3_m; \ - x2_m = __msa_ilvev_h(x2_m, x3_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ -} + x2_m = __msa_splati_h(coeff_m, 6); \ + x3_m = -x3_m; \ + x2_m = __msa_ilvev_h(x2_m, x3_m); \ + out3 = DOT_SHIFT_RIGHT_PCK_H(s6_m, s7_m, x2_m); \ + } -#define FDCT8x16_ODD(input0, input1, input2, input3, \ - input4, input5, input6, input7, \ - out1, out3, out5, out7, \ - out9, out11, out13, out15) { \ - v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m; \ - v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m; \ - v8i16 stp36_m, stp37_m, vec0_m, vec1_m; \ - v8i16 vec2_m, vec3_m, vec4_m, vec5_m, vec6_m; \ - v8i16 cnst0_m, cnst1_m, cnst4_m, cnst5_m; \ - v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \ - cospi_24_64, -cospi_8_64, -cospi_24_64, \ - cospi_12_64, cospi_20_64 }; \ - v8i16 coeff1_m = { cospi_2_64, cospi_30_64, cospi_14_64, \ - cospi_18_64, cospi_10_64, cospi_22_64, \ - cospi_6_64, cospi_26_64 }; \ - v8i16 coeff2_m = { -cospi_2_64, -cospi_10_64, -cospi_18_64, \ - -cospi_26_64, 0, 0, 0, 0 }; \ - \ - /* stp 1 */ \ - ILVL_H2_SH(input2, input5, input3, input4, vec2_m, vec4_m); \ - ILVR_H2_SH(input2, input5, input3, input4, vec3_m, vec5_m); \ - \ - cnst4_m = __msa_splati_h(coeff_m, 0); \ - stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m); \ - \ - cnst5_m = __msa_splati_h(coeff_m, 1); \ - cnst5_m = __msa_ilvev_h(cnst5_m, cnst4_m); \ - stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m); \ - stp24_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m); \ - stp23_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m); \ - \ - /* stp2 */ \ - BUTTERFLY_4(input0, input1, stp22_m, stp23_m, \ - stp30_m, stp31_m, stp32_m, stp33_m); \ - BUTTERFLY_4(input7, input6, stp25_m, stp24_m, \ - stp37_m, stp36_m, stp35_m, stp34_m); \ - \ - ILVL_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec2_m, vec4_m); \ - ILVR_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec3_m, vec5_m); \ - \ - SPLATI_H2_SH(coeff_m, 2, 3, cnst0_m, cnst1_m); \ - cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - stp26_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ - \ - cnst0_m = __msa_splati_h(coeff_m, 4); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - stp21_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ - \ - SPLATI_H2_SH(coeff_m, 5, 2, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \ - \ - cnst0_m = __msa_splati_h(coeff_m, 3); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \ - \ - /* stp4 */ \ - BUTTERFLY_4(stp30_m, stp37_m, stp26_m, stp21_m, \ - vec6_m, vec2_m, vec4_m, vec5_m); \ - BUTTERFLY_4(stp33_m, stp34_m, stp25_m, stp22_m, \ - stp21_m, stp23_m, stp24_m, stp31_m); \ - \ - ILVRL_H2_SH(vec2_m, vec6_m, vec1_m, vec0_m); \ - SPLATI_H2_SH(coeff1_m, 0, 1, cnst0_m, cnst1_m); \ - cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - \ - out1 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - \ - cnst0_m = __msa_splati_h(coeff2_m, 0); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - out15 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - \ - ILVRL_H2_SH(vec4_m, vec5_m, vec1_m, vec0_m); \ - SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - out9 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - \ - cnst1_m = __msa_splati_h(coeff2_m, 2); \ - cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - out7 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - \ - ILVRL_H2_SH(stp23_m, stp21_m, vec1_m, vec0_m); \ - SPLATI_H2_SH(coeff1_m, 4, 5, cnst0_m, cnst1_m); \ - cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - \ - cnst0_m = __msa_splati_h(coeff2_m, 1); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - out11 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - \ - ILVRL_H2_SH(stp24_m, stp31_m, vec1_m, vec0_m); \ - SPLATI_H2_SH(coeff1_m, 6, 7, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - out13 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - \ - cnst1_m = __msa_splati_h(coeff2_m, 3); \ - cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ -} +#define FDCT8x16_ODD(input0, input1, input2, input3, input4, input5, input6, \ + input7, out1, out3, out5, out7, out9, out11, out13, \ + out15) \ + { \ + v8i16 stp21_m, stp22_m, stp23_m, stp24_m, stp25_m, stp26_m; \ + v8i16 stp30_m, stp31_m, stp32_m, stp33_m, stp34_m, stp35_m; \ + v8i16 stp36_m, stp37_m, vec0_m, vec1_m; \ + v8i16 vec2_m, vec3_m, vec4_m, vec5_m, vec6_m; \ + v8i16 cnst0_m, cnst1_m, cnst4_m, cnst5_m; \ + v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \ + -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; \ + v8i16 coeff1_m = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64, \ + cospi_10_64, cospi_22_64, cospi_6_64, cospi_26_64 }; \ + v8i16 coeff2_m = { \ + -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0 \ + }; \ + \ + /* stp 1 */ \ + ILVL_H2_SH(input2, input5, input3, input4, vec2_m, vec4_m); \ + ILVR_H2_SH(input2, input5, input3, input4, vec3_m, vec5_m); \ + \ + cnst4_m = __msa_splati_h(coeff_m, 0); \ + stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst4_m); \ + \ + cnst5_m = __msa_splati_h(coeff_m, 1); \ + cnst5_m = __msa_ilvev_h(cnst5_m, cnst4_m); \ + stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst5_m); \ + stp24_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst4_m); \ + stp23_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst5_m); \ + \ + /* stp2 */ \ + BUTTERFLY_4(input0, input1, stp22_m, stp23_m, stp30_m, stp31_m, stp32_m, \ + stp33_m); \ + BUTTERFLY_4(input7, input6, stp25_m, stp24_m, stp37_m, stp36_m, stp35_m, \ + stp34_m); \ + \ + ILVL_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec2_m, vec4_m); \ + ILVR_H2_SH(stp36_m, stp31_m, stp35_m, stp32_m, vec3_m, vec5_m); \ + \ + SPLATI_H2_SH(coeff_m, 2, 3, cnst0_m, cnst1_m); \ + cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + stp26_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ + \ + cnst0_m = __msa_splati_h(coeff_m, 4); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + stp21_m = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ + \ + SPLATI_H2_SH(coeff_m, 5, 2, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + stp25_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \ + \ + cnst0_m = __msa_splati_h(coeff_m, 3); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + stp22_m = DOT_SHIFT_RIGHT_PCK_H(vec4_m, vec5_m, cnst1_m); \ + \ + /* stp4 */ \ + BUTTERFLY_4(stp30_m, stp37_m, stp26_m, stp21_m, vec6_m, vec2_m, vec4_m, \ + vec5_m); \ + BUTTERFLY_4(stp33_m, stp34_m, stp25_m, stp22_m, stp21_m, stp23_m, stp24_m, \ + stp31_m); \ + \ + ILVRL_H2_SH(vec2_m, vec6_m, vec1_m, vec0_m); \ + SPLATI_H2_SH(coeff1_m, 0, 1, cnst0_m, cnst1_m); \ + cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + \ + out1 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + \ + cnst0_m = __msa_splati_h(coeff2_m, 0); \ + cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + out15 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + \ + ILVRL_H2_SH(vec4_m, vec5_m, vec1_m, vec0_m); \ + SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + \ + out9 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ + \ + cnst1_m = __msa_splati_h(coeff2_m, 2); \ + cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + out7 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + \ + ILVRL_H2_SH(stp23_m, stp21_m, vec1_m, vec0_m); \ + SPLATI_H2_SH(coeff1_m, 4, 5, cnst0_m, cnst1_m); \ + cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + out5 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + \ + cnst0_m = __msa_splati_h(coeff2_m, 1); \ + cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + out11 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + \ + ILVRL_H2_SH(stp24_m, stp31_m, vec1_m, vec0_m); \ + SPLATI_H2_SH(coeff1_m, 6, 7, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + \ + out13 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ + \ + cnst1_m = __msa_splati_h(coeff2_m, 3); \ + cnst0_m = __msa_ilvev_h(cnst0_m, cnst1_m); \ + out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + } -#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) { \ - v8i16 tp0_m, tp1_m; \ - v8i16 one_m = __msa_ldi_h(1); \ - \ - tp0_m = __msa_clti_s_h(vec0, 0); \ - tp1_m = __msa_clti_s_h(vec1, 0); \ - vec0 += 1; \ - vec1 += 1; \ - tp0_m = one_m & tp0_m; \ - tp1_m = one_m & tp1_m; \ - vec0 += tp0_m; \ - vec1 += tp1_m; \ - vec0 >>= 2; \ - vec1 >>= 2; \ -} +#define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) \ + { \ + v8i16 tp0_m, tp1_m; \ + v8i16 one_m = __msa_ldi_h(1); \ + \ + tp0_m = __msa_clti_s_h(vec0, 0); \ + tp1_m = __msa_clti_s_h(vec1, 0); \ + vec0 += 1; \ + vec1 += 1; \ + tp0_m = one_m & tp0_m; \ + tp1_m = one_m & tp1_m; \ + vec0 += tp0_m; \ + vec1 += tp1_m; \ + vec0 >>= 2; \ + vec1 >>= 2; \ + } -#define FDCT32_POSTPROC_NEG_W(vec) { \ - v4i32 temp_m; \ - v4i32 one_m = __msa_ldi_w(1); \ - \ - temp_m = __msa_clti_s_w(vec, 0); \ - vec += 1; \ - temp_m = one_m & temp_m; \ - vec += temp_m; \ - vec >>= 2; \ -} +#define FDCT32_POSTPROC_NEG_W(vec) \ + { \ + v4i32 temp_m; \ + v4i32 one_m = __msa_ldi_w(1); \ + \ + temp_m = __msa_clti_s_w(vec, 0); \ + vec += 1; \ + temp_m = one_m & temp_m; \ + vec += temp_m; \ + vec >>= 2; \ + } -#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) { \ - v8i16 tp0_m, tp1_m; \ - v8i16 one = __msa_ldi_h(1); \ +#define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) \ + { \ + v8i16 tp0_m, tp1_m; \ + v8i16 one = __msa_ldi_h(1); \ \ - tp0_m = __msa_clei_s_h(vec0, 0); \ - tp1_m = __msa_clei_s_h(vec1, 0); \ - tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255); \ - tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255); \ - vec0 += 1; \ - vec1 += 1; \ - tp0_m = one & tp0_m; \ - tp1_m = one & tp1_m; \ - vec0 += tp0_m; \ - vec1 += tp1_m; \ - vec0 >>= 2; \ - vec1 >>= 2; \ -} + tp0_m = __msa_clei_s_h(vec0, 0); \ + tp1_m = __msa_clei_s_h(vec1, 0); \ + tp0_m = (v8i16)__msa_xori_b((v16u8)tp0_m, 255); \ + tp1_m = (v8i16)__msa_xori_b((v16u8)tp1_m, 255); \ + vec0 += 1; \ + vec1 += 1; \ + tp0_m = one & tp0_m; \ + tp1_m = one & tp1_m; \ + vec0 += tp0_m; \ + vec1 += tp1_m; \ + vec0 >>= 2; \ + vec1 >>= 2; \ + } -#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, \ - reg1_right, const0, const1, \ - out0, out1, out2, out3) { \ - v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \ - v2i64 tp0_m, tp1_m, tp2_m, tp3_m; \ - v4i32 k0_m = __msa_fill_w((int32_t) const0); \ - \ - s0_m = __msa_fill_w((int32_t) const1); \ - k0_m = __msa_ilvev_w(s0_m, k0_m); \ - \ - ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m); \ - ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m); \ - ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m); \ - ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m); \ - \ - DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m); \ - DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m); \ - tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \ - tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \ - tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \ - tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \ - out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m); \ - out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m); \ - \ - DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m); \ - DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m); \ - tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \ - tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \ - tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \ - tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \ - out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m); \ - out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m); \ -} +#define DOTP_CONST_PAIR_W(reg0_left, reg1_left, reg0_right, reg1_right, \ + const0, const1, out0, out1, out2, out3) \ + { \ + v4i32 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \ + v2i64 tp0_m, tp1_m, tp2_m, tp3_m; \ + v4i32 k0_m = __msa_fill_w((int32_t)const0); \ + \ + s0_m = __msa_fill_w((int32_t)const1); \ + k0_m = __msa_ilvev_w(s0_m, k0_m); \ + \ + ILVRL_W2_SW(-reg1_left, reg0_left, s1_m, s0_m); \ + ILVRL_W2_SW(reg0_left, reg1_left, s3_m, s2_m); \ + ILVRL_W2_SW(-reg1_right, reg0_right, s5_m, s4_m); \ + ILVRL_W2_SW(reg0_right, reg1_right, s7_m, s6_m); \ + \ + DOTP_SW2_SD(s0_m, s1_m, k0_m, k0_m, tp0_m, tp1_m); \ + DOTP_SW2_SD(s4_m, s5_m, k0_m, k0_m, tp2_m, tp3_m); \ + tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \ + tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \ + tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \ + tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \ + out0 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m); \ + out1 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m); \ + \ + DOTP_SW2_SD(s2_m, s3_m, k0_m, k0_m, tp0_m, tp1_m); \ + DOTP_SW2_SD(s6_m, s7_m, k0_m, k0_m, tp2_m, tp3_m); \ + tp0_m = __msa_srari_d(tp0_m, DCT_CONST_BITS); \ + tp1_m = __msa_srari_d(tp1_m, DCT_CONST_BITS); \ + tp2_m = __msa_srari_d(tp2_m, DCT_CONST_BITS); \ + tp3_m = __msa_srari_d(tp3_m, DCT_CONST_BITS); \ + out2 = __msa_pckev_w((v4i32)tp0_m, (v4i32)tp1_m); \ + out3 = __msa_pckev_w((v4i32)tp2_m, (v4i32)tp3_m); \ + } void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr, int32_t src_stride); diff --git a/libs/libvpx/vpx_dsp/mips/idct16x16_msa.c b/libs/libvpx/vpx_dsp/mips/idct16x16_msa.c index 5faac715e8..2a211c5677 100644 --- a/libs/libvpx/vpx_dsp/mips/idct16x16_msa.c +++ b/libs/libvpx/vpx_dsp/mips/idct16x16_msa.c @@ -20,10 +20,10 @@ void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) { input += 8; LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); - TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, - reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); - TRANSPOSE8x8_SH_SH(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15, - reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); + TRANSPOSE8x8_SH_SH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg0, reg1, + reg2, reg3, reg4, reg5, reg6, reg7); + TRANSPOSE8x8_SH_SH(reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15, reg8, + reg9, reg10, reg11, reg12, reg13, reg14, reg15); DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2); @@ -93,13 +93,13 @@ void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) { reg3 = tmp7; /* transpose block */ - TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, - reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14); + TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, reg0, + reg2, reg4, reg6, reg8, reg10, reg12, reg14); ST_SH8(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, output, 16); /* transpose block */ - TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, - reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15); + TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, reg3, + reg13, reg11, reg5, reg7, reg9, reg1, reg15); ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16); } @@ -233,7 +233,7 @@ void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst, /* short case just considers top 4 rows as valid output */ out += 4 * 16; for (i = 12; i--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[out]) \n\t" "sw $zero, 4(%[out]) \n\t" "sw $zero, 8(%[out]) \n\t" @@ -244,8 +244,7 @@ void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst, "sw $zero, 28(%[out]) \n\t" : - : [out] "r" (out) - ); + : [out] "r"(out)); out += 16; } @@ -283,8 +282,8 @@ void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst, ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); CLIP_SH4_0_255(res0, res1, res2, res3); CLIP_SH4_0_255(res4, res5, res6, res7); - PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, - tmp0, tmp1, tmp2, tmp3); + PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1, + tmp2, tmp3); ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); dst += (4 * dst_stride); } @@ -295,29 +294,28 @@ void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) { v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; /* load input data */ - LD_SH16(input, 8, - l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, l7, l15); - TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, - l0, l1, l2, l3, l4, l5, l6, l7); - TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, - l8, l9, l10, l11, l12, l13, l14, l15); + LD_SH16(input, 8, l0, l8, l1, l9, l2, l10, l3, l11, l4, l12, l5, l13, l6, l14, + l7, l15); + TRANSPOSE8x8_SH_SH(l0, l1, l2, l3, l4, l5, l6, l7, l0, l1, l2, l3, l4, l5, l6, + l7); + TRANSPOSE8x8_SH_SH(l8, l9, l10, l11, l12, l13, l14, l15, l8, l9, l10, l11, + l12, l13, l14, l15); /* ADST in horizontal */ - VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, - l8, l9, l10, l11, l12, l13, l14, l15, - r0, r1, r2, r3, r4, r5, r6, r7, - r8, r9, r10, r11, r12, r13, r14, r15); + VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, + l14, l15, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, + r12, r13, r14, r15); l1 = -r8; l3 = -r4; l13 = -r13; l15 = -r1; - TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2, - l0, l1, l2, l3, l4, l5, l6, l7); + TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2, l0, l1, l2, l3, l4, l5, + l6, l7); ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16); - TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15, - l8, l9, l10, l11, l12, l13, l14, l15); + TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15, l8, l9, l10, l11, l12, + l13, l14, l15); ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16); } diff --git a/libs/libvpx/vpx_dsp/mips/idct32x32_msa.c b/libs/libvpx/vpx_dsp/mips/idct32x32_msa.c index d5b3966e0e..2ea6136f9b 100644 --- a/libs/libvpx/vpx_dsp/mips/idct32x32_msa.c +++ b/libs/libvpx/vpx_dsp/mips/idct32x32_msa.c @@ -17,10 +17,10 @@ static void idct32x8_row_transpose_store(const int16_t *input, /* 1st & 2nd 8x8 */ LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3); LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7); - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); + TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3, + n3); + TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7, + n7); ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8); ST_SH4(m4, n4, m5, n5, (tmp_buf + 8 * 8), 8); ST_SH4(m6, n6, m7, n7, (tmp_buf + 12 * 8), 8); @@ -28,10 +28,10 @@ static void idct32x8_row_transpose_store(const int16_t *input, /* 3rd & 4th 8x8 */ LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3); LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7); - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); + TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3, + n3); + TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7, + n7); ST_SH4(m0, n0, m1, n1, (tmp_buf + 16 * 8), 8); ST_SH4(m2, n2, m3, n3, (tmp_buf + 20 * 8), 8); ST_SH4(m4, n4, m5, n5, (tmp_buf + 24 * 8), 8); @@ -186,8 +186,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf, DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7); /* 4 Stores */ - SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, - vec0, vec1, vec2, vec3); + SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec0, vec1, vec2, vec3); DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); @@ -198,8 +197,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf, ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8); /* 4 Stores */ - ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, - vec1, vec2, vec0, vec3); + ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3); BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2); ST_SH(reg0, (tmp_odd_buf + 13 * 8)); ST_SH(reg1, (tmp_odd_buf + 14 * 8)); @@ -213,8 +211,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf, LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3); LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7); - ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, - loc0, loc1, loc2, loc3); + ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8); SUB2(reg0, reg4, reg1, reg5, vec0, vec1); @@ -228,8 +225,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf, LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3); LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7); - ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, - loc0, loc1, loc2, loc3); + ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8); SUB2(reg0, reg4, reg3, reg7, vec0, vec1); @@ -242,8 +238,7 @@ static void idct32x8_row_odd_process_store(int16_t *tmp_buf, static void idct_butterfly_transpose_store(int16_t *tmp_buf, int16_t *tmp_eve_buf, - int16_t *tmp_odd_buf, - int16_t *dst) { + int16_t *tmp_odd_buf, int16_t *dst) { v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7; @@ -317,26 +312,26 @@ static void idct_butterfly_transpose_store(int16_t *tmp_buf, /* Transpose : 16 vectors */ /* 1st & 2nd 8x8 */ - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); + TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3, + n3); ST_SH4(m0, n0, m1, n1, (dst + 0), 32); ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32); - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); + TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7, + n7); ST_SH4(m4, n4, m5, n5, (dst + 8), 32); ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32); /* 3rd & 4th 8x8 */ LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3); LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7); - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); + TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, m0, n0, m1, n1, m2, n2, m3, + n3); ST_SH4(m0, n0, m1, n1, (dst + 16), 32); ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32); - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); + TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, m4, n4, m5, n5, m6, n6, m7, + n7); ST_SH4(m4, n4, m5, n5, (dst + 24), 32); ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32); } @@ -349,8 +344,8 @@ static void idct32x8_1d_rows_msa(const int16_t *input, int16_t *output) { idct32x8_row_transpose_store(input, &tmp_buf[0]); idct32x8_row_even_process_store(&tmp_buf[0], &tmp_eve_buf[0]); idct32x8_row_odd_process_store(&tmp_buf[0], &tmp_odd_buf[0]); - idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], - &tmp_odd_buf[0], output); + idct_butterfly_transpose_store(&tmp_buf[0], &tmp_eve_buf[0], &tmp_odd_buf[0], + output); } static void idct8x32_column_even_process_store(int16_t *tmp_buf, @@ -541,8 +536,7 @@ static void idct8x32_column_odd_process_store(int16_t *tmp_buf, } static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, - int16_t *tmp_odd_buf, - uint8_t *dst, + int16_t *tmp_odd_buf, uint8_t *dst, int32_t dst_stride) { v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7; @@ -563,8 +557,8 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0); SRARI_H4_SH(m0, m2, m4, m6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), - m0, m2, m4, m6); + VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), m0, m2, m4, + m6); /* Load 8 & Store 8 */ vec0 = LD_SH(tmp_odd_buf + 4 * 8); @@ -578,13 +572,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); SRARI_H4_SH(m1, m3, m5, m7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), - m1, m3, m5, m7); + VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), m1, m3, m5, m7); SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1); SRARI_H4_SH(m1, m3, m5, m7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), - m1, m3, m5, m7); + VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), m1, m3, m5, + m7); /* Load 8 & Store 8 */ vec0 = LD_SH(tmp_odd_buf + 2 * 8); @@ -598,13 +591,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); SRARI_H4_SH(n0, n2, n4, n6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), - n0, n2, n4, n6); + VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), n0, n2, n4, n6); SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0); SRARI_H4_SH(n0, n2, n4, n6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), - n0, n2, n4, n6); + VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), n0, n2, n4, + n6); /* Load 8 & Store 8 */ vec0 = LD_SH(tmp_odd_buf + 5 * 8); @@ -618,13 +610,12 @@ static void idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); SRARI_H4_SH(n1, n3, n5, n7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), - n1, n3, n5, n7); + VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), n1, n3, n5, n7); SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1); SRARI_H4_SH(n1, n3, n5, n7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), - n1, n3, n5, n7); + VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), n1, n3, n5, + n7); } static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, @@ -634,8 +625,8 @@ static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, idct8x32_column_even_process_store(input, &tmp_eve_buf[0]); idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]); - idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], - dst, dst_stride); + idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], dst, + dst_stride); } void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst, @@ -665,7 +656,7 @@ void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst, int16_t *out_ptr = out_arr; for (i = 32; i--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[out_ptr]) \n\t" "sw $zero, 4(%[out_ptr]) \n\t" "sw $zero, 8(%[out_ptr]) \n\t" @@ -684,8 +675,7 @@ void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst, "sw $zero, 60(%[out_ptr]) \n\t" : - : [out_ptr] "r" (out_ptr) - ); + : [out_ptr] "r"(out_ptr)); out_ptr += 32; } @@ -728,8 +718,8 @@ void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst, ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); CLIP_SH4_0_255(res0, res1, res2, res3); CLIP_SH4_0_255(res4, res5, res6, res7); - PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, - tmp0, tmp1, tmp2, tmp3); + PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, tmp0, tmp1, + tmp2, tmp3); ST_UB2(tmp0, tmp1, dst, 16); dst += dst_stride; diff --git a/libs/libvpx/vpx_dsp/mips/idct4x4_msa.c b/libs/libvpx/vpx_dsp/mips/idct4x4_msa.c index f289d8edab..0a85742f10 100644 --- a/libs/libvpx/vpx_dsp/mips/idct4x4_msa.c +++ b/libs/libvpx/vpx_dsp/mips/idct4x4_msa.c @@ -42,8 +42,8 @@ void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst, in0_r -= in3_r; in2_r += in1_r; - PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r, - in0, in1, in2, in3); + PCKEV_H4_SH(in0_r, in0_r, in1_r, in1_r, in2_r, in2_r, in3_r, in3_r, in0, in1, + in2, in3); ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); } diff --git a/libs/libvpx/vpx_dsp/mips/idct8x8_msa.c b/libs/libvpx/vpx_dsp/mips/idct8x8_msa.c index fd667e4566..7f77d20191 100644 --- a/libs/libvpx/vpx_dsp/mips/idct8x8_msa.c +++ b/libs/libvpx/vpx_dsp/mips/idct8x8_msa.c @@ -18,17 +18,17 @@ void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst, LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); /* rows transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* columns transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* final rounding (add 2^4, divide by 2^5) and shift */ SRARI_H4_SH(in0, in1, in2, in3, 5); SRARI_H4_SH(in4, in5, in6, in7, 5); @@ -82,12 +82,12 @@ void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst, PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3); /* stage4 */ - BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, in0, in1, in2, in3, in4, in5, in6, + in7); + TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); + VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); /* final rounding (add 2^4, divide by 2^5) and shift */ SRARI_H4_SH(in0, in1, in2, in3, 5); diff --git a/libs/libvpx/vpx_dsp/mips/intrapred16_dspr2.c b/libs/libvpx/vpx_dsp/mips/intrapred16_dspr2.c index 11444c718e..3e29d0ac39 100644 --- a/libs/libvpx/vpx_dsp/mips/intrapred16_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/intrapred16_dspr2.c @@ -13,10 +13,10 @@ #if HAVE_DSPR2 void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; - int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; + int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; - __asm__ __volatile__ ( + __asm__ __volatile__( "lb %[tmp1], (%[left]) \n\t" "lb %[tmp2], 1(%[left]) \n\t" "lb %[tmp3], 2(%[left]) \n\t" @@ -146,26 +146,23 @@ void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, "sw %[tmp16], 8(%[dst]) \n\t" "sw %[tmp16], 12(%[dst]) \n\t" - : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), - [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4), - [tmp5] "=&r" (tmp5), [tmp7] "=&r" (tmp7), - [tmp6] "=&r" (tmp6), [tmp8] "=&r" (tmp8), - [tmp9] "=&r" (tmp9), [tmp10] "=&r" (tmp10), - [tmp11] "=&r" (tmp11), [tmp12] "=&r" (tmp12), - [tmp13] "=&r" (tmp13), [tmp14] "=&r" (tmp14), - [tmp15] "=&r" (tmp15), [tmp16] "=&r" (tmp16) - : [left] "r" (left), [dst] "r" (dst), [stride] "r" (stride) - ); + : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3), + [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7), + [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8), [tmp9] "=&r"(tmp9), + [tmp10] "=&r"(tmp10), [tmp11] "=&r"(tmp11), [tmp12] "=&r"(tmp12), + [tmp13] "=&r"(tmp13), [tmp14] "=&r"(tmp14), [tmp15] "=&r"(tmp15), + [tmp16] "=&r"(tmp16) + : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t expected_dc; - int32_t average; - int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1; - int32_t above2, left2; + int32_t expected_dc; + int32_t average; + int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1; + int32_t above2, left2; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[above1], (%[above]) \n\t" "lw %[above2], 4(%[above]) \n\t" "lw %[left1], (%[left]) \n\t" @@ -316,14 +313,12 @@ void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, "sw %[expected_dc], 8(%[dst]) \n\t" "sw %[expected_dc], 12(%[dst]) \n\t" - : [left1] "=&r" (left1), [above1] "=&r" (above1), - [left_l1] "=&r" (left_l1), [above_l1] "=&r" (above_l1), - [left_r1] "=&r" (left_r1), [above_r1] "=&r" (above_r1), - [above2] "=&r" (above2), [left2] "=&r" (left2), - [average] "=&r" (average), [tmp] "=&r" (tmp), - [expected_dc] "=&r" (expected_dc) - : [above] "r" (above), [left] "r" (left), - [dst] "r" (dst), [stride] "r" (stride) - ); + : [left1] "=&r"(left1), [above1] "=&r"(above1), [left_l1] "=&r"(left_l1), + [above_l1] "=&r"(above_l1), [left_r1] "=&r"(left_r1), + [above_r1] "=&r"(above_r1), [above2] "=&r"(above2), + [left2] "=&r"(left2), [average] "=&r"(average), [tmp] "=&r"(tmp), + [expected_dc] "=&r"(expected_dc) + : [above] "r"(above), [left] "r"(left), [dst] "r"(dst), + [stride] "r"(stride)); } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/intrapred4_dspr2.c b/libs/libvpx/vpx_dsp/mips/intrapred4_dspr2.c index 03baf4c9cc..9f51d50c75 100644 --- a/libs/libvpx/vpx_dsp/mips/intrapred4_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/intrapred4_dspr2.c @@ -13,9 +13,9 @@ #if HAVE_DSPR2 void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t tmp1, tmp2, tmp3, tmp4; + int32_t tmp1, tmp2, tmp3, tmp4; - __asm__ __volatile__ ( + __asm__ __volatile__( "lb %[tmp1], (%[left]) \n\t" "lb %[tmp2], 1(%[left]) \n\t" "lb %[tmp3], 2(%[left]) \n\t" @@ -32,19 +32,18 @@ void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, "add %[dst], %[dst], %[stride] \n\t" "sw %[tmp4], (%[dst]) \n\t" - : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), - [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4) - : [left] "r" (left), [dst] "r" (dst), [stride] "r" (stride) - ); + : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3), + [tmp4] "=&r"(tmp4) + : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t expected_dc; - int32_t average; - int32_t tmp, above_c, above_l, above_r, left_c, left_r, left_l; + int32_t expected_dc; + int32_t average; + int32_t tmp, above_c, above_l, above_r, left_c, left_r, left_l; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[above_c], (%[above]) \n\t" "lw %[left_c], (%[left]) \n\t" @@ -70,27 +69,26 @@ void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, "add %[dst], %[dst], %[stride] \n\t" "sw %[expected_dc], (%[dst]) \n\t" - : [above_c] "=&r" (above_c), [above_l] "=&r" (above_l), - [above_r] "=&r" (above_r), [left_c] "=&r" (left_c), - [left_l] "=&r" (left_l), [left_r] "=&r" (left_r), - [average] "=&r" (average), [tmp] "=&r" (tmp), - [expected_dc] "=&r" (expected_dc) - : [above] "r" (above), [left] "r" (left), - [dst] "r" (dst), [stride] "r" (stride) - ); + : [above_c] "=&r"(above_c), [above_l] "=&r"(above_l), + [above_r] "=&r"(above_r), [left_c] "=&r"(left_c), + [left_l] "=&r"(left_l), [left_r] "=&r"(left_r), + [average] "=&r"(average), [tmp] "=&r"(tmp), + [expected_dc] "=&r"(expected_dc) + : [above] "r"(above), [left] "r"(left), [dst] "r"(dst), + [stride] "r"(stride)); } void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t abovel, abover; - int32_t left0, left1, left2, left3; - int32_t res0, res1; - int32_t resl; - int32_t resr; - int32_t top_left; - uint8_t *cm = vpx_ff_cropTbl; + int32_t abovel, abover; + int32_t left0, left1, left2, left3; + int32_t res0, res1; + int32_t resl; + int32_t resr; + int32_t top_left; + uint8_t *cm = vpx_ff_cropTbl; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[resl], (%[above]) \n\t" "lbu %[left0], (%[left]) \n\t" @@ -174,7 +172,6 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, "sra %[res0], %[res0], 16 \n\t" "lbux %[res0], %[res0](%[cm]) \n\t" - "sra %[res1], %[resr], 16 \n\t" "lbux %[res1], %[res1](%[cm]) \n\t" "sb %[res0], (%[dst]) \n\t" @@ -183,7 +180,6 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, "sra %[res0], %[res0], 16 \n\t" "lbux %[res0], %[res0](%[cm]) \n\t" - "sb %[res1], 1(%[dst]) \n\t" "sra %[res1], %[resl], 16 \n\t" "lbux %[res1], %[res1](%[cm]) \n\t" @@ -218,12 +214,11 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, "sb %[res0], 2(%[dst]) \n\t" "sb %[res1], 3(%[dst]) \n\t" - : [abovel] "=&r" (abovel), [abover] "=&r" (abover), - [left0] "=&r" (left0), [left1] "=&r" (left1), [left2] "=&r" (left2), - [res0] "=&r" (res0), [res1] "=&r" (res1), [left3] "=&r" (left3), - [resl] "=&r" (resl), [resr] "=&r" (resr), [top_left] "=&r" (top_left) - : [above] "r" (above), [left] "r" (left), - [dst] "r" (dst), [stride] "r" (stride), [cm] "r" (cm) - ); + : [abovel] "=&r"(abovel), [abover] "=&r"(abover), [left0] "=&r"(left0), + [left1] "=&r"(left1), [left2] "=&r"(left2), [res0] "=&r"(res0), + [res1] "=&r"(res1), [left3] "=&r"(left3), [resl] "=&r"(resl), + [resr] "=&r"(resr), [top_left] "=&r"(top_left) + : [above] "r"(above), [left] "r"(left), [dst] "r"(dst), + [stride] "r"(stride), [cm] "r"(cm)); } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/intrapred8_dspr2.c b/libs/libvpx/vpx_dsp/mips/intrapred8_dspr2.c index 196ff5a062..eac79d5100 100644 --- a/libs/libvpx/vpx_dsp/mips/intrapred8_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/intrapred8_dspr2.c @@ -13,9 +13,9 @@ #if HAVE_DSPR2 void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; - __asm__ __volatile__ ( + __asm__ __volatile__( "lb %[tmp1], (%[left]) \n\t" "lb %[tmp2], 1(%[left]) \n\t" "lb %[tmp3], 2(%[left]) \n\t" @@ -58,23 +58,20 @@ void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, "sw %[tmp8], (%[dst]) \n\t" "sw %[tmp8], 4(%[dst]) \n\t" - : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), - [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4), - [tmp5] "=&r" (tmp5), [tmp7] "=&r" (tmp7), - [tmp6] "=&r" (tmp6), [tmp8] "=&r" (tmp8) - : [left] "r" (left), [dst] "r" (dst), - [stride] "r" (stride) - ); + : [tmp1] "=&r"(tmp1), [tmp2] "=&r"(tmp2), [tmp3] "=&r"(tmp3), + [tmp4] "=&r"(tmp4), [tmp5] "=&r"(tmp5), [tmp7] "=&r"(tmp7), + [tmp6] "=&r"(tmp6), [tmp8] "=&r"(tmp8) + : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t expected_dc; - int32_t average; - int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1; - int32_t above2, above_l2, above_r2, left2, left_r2, left_l2; + int32_t expected_dc; + int32_t average; + int32_t tmp, above1, above_l1, above_r1, left1, left_r1, left_l1; + int32_t above2, above_l2, above_r2, left2, left_r2, left_l2; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[above1], (%[above]) \n\t" "lw %[above2], 4(%[above]) \n\t" "lw %[left1], (%[left]) \n\t" @@ -137,30 +134,29 @@ void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, "sw %[expected_dc], (%[dst]) \n\t" "sw %[expected_dc], 4(%[dst]) \n\t" - : [above1] "=&r" (above1), [above_l1] "=&r" (above_l1), - [above_r1] "=&r" (above_r1), [left1] "=&r" (left1), - [left_l1] "=&r" (left_l1), [left_r1] "=&r" (left_r1), - [above2] "=&r" (above2), [above_l2] "=&r" (above_l2), - [above_r2] "=&r" (above_r2), [left2] "=&r" (left2), - [left_l2] "=&r" (left_l2), [left_r2] "=&r" (left_r2), - [average] "=&r" (average), [tmp] "=&r" (tmp), - [expected_dc] "=&r" (expected_dc) - : [above] "r" (above), [left] "r" (left), [dst] "r" (dst), - [stride] "r" (stride) - ); + : [above1] "=&r"(above1), [above_l1] "=&r"(above_l1), + [above_r1] "=&r"(above_r1), [left1] "=&r"(left1), + [left_l1] "=&r"(left_l1), [left_r1] "=&r"(left_r1), + [above2] "=&r"(above2), [above_l2] "=&r"(above_l2), + [above_r2] "=&r"(above_r2), [left2] "=&r"(left2), + [left_l2] "=&r"(left_l2), [left_r2] "=&r"(left_r2), + [average] "=&r"(average), [tmp] "=&r"(tmp), + [expected_dc] "=&r"(expected_dc) + : [above] "r"(above), [left] "r"(left), [dst] "r"(dst), + [stride] "r"(stride)); } void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { - int32_t abovel, abover; - int32_t abovel_1, abover_1; - int32_t left0; - int32_t res0, res1, res2, res3; - int32_t reshw; - int32_t top_left; - uint8_t *cm = vpx_ff_cropTbl; + int32_t abovel, abover; + int32_t abovel_1, abover_1; + int32_t left0; + int32_t res0, res1, res2, res3; + int32_t reshw; + int32_t top_left; + uint8_t *cm = vpx_ff_cropTbl; - __asm__ __volatile__ ( + __asm__ __volatile__( "ulw %[reshw], (%[above]) \n\t" "ulw %[top_left], 4(%[above]) \n\t" @@ -595,13 +591,12 @@ void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, "sb %[res2], 6(%[dst]) \n\t" "sb %[res3], 7(%[dst]) \n\t" - : [abovel] "=&r" (abovel), [abover] "=&r" (abover), - [abovel_1] "=&r" (abovel_1), [abover_1] "=&r" (abover_1), - [left0] "=&r" (left0), [res2] "=&r" (res2), [res3] "=&r" (res3), - [res0] "=&r" (res0), [res1] "=&r" (res1), - [reshw] "=&r" (reshw), [top_left] "=&r" (top_left) - : [above] "r" (above), [left] "r" (left), - [dst] "r" (dst), [stride] "r" (stride), [cm] "r" (cm) - ); + : [abovel] "=&r"(abovel), [abover] "=&r"(abover), + [abovel_1] "=&r"(abovel_1), [abover_1] "=&r"(abover_1), + [left0] "=&r"(left0), [res2] "=&r"(res2), [res3] "=&r"(res3), + [res0] "=&r"(res0), [res1] "=&r"(res1), [reshw] "=&r"(reshw), + [top_left] "=&r"(top_left) + : [above] "r"(above), [left] "r"(left), [dst] "r"(dst), + [stride] "r"(stride), [cm] "r"(cm)); } #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/intrapred_msa.c b/libs/libvpx/vpx_dsp/mips/intrapred_msa.c index f6fbe40162..b5ee943031 100644 --- a/libs/libvpx/vpx_dsp/mips/intrapred_msa.c +++ b/libs/libvpx/vpx_dsp/mips/intrapred_msa.c @@ -11,10 +11,11 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/mips/macros_msa.h" -#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) { \ - out0 = __msa_subs_u_h(out0, in0); \ - out1 = __msa_subs_u_h(out1, in1); \ -} +#define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \ + { \ + out0 = __msa_subs_u_h(out0, in0); \ + out1 = __msa_subs_u_h(out1, in1); \ + } static void intra_predict_vert_4x4_msa(const uint8_t *src, uint8_t *dst, int32_t dst_stride) { @@ -150,8 +151,8 @@ static void intra_predict_horiz_32x32_msa(const uint8_t *src, uint8_t *dst, } static void intra_predict_dc_4x4_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint32_t val0, val1; v16i8 store, src = { 0 }; v8u16 sum_h; @@ -199,8 +200,8 @@ static void intra_predict_128dc_4x4_msa(uint8_t *dst, int32_t dst_stride) { } static void intra_predict_dc_8x8_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint64_t val0, val1; v16i8 store; v16u8 src = { 0 }; @@ -260,8 +261,8 @@ static void intra_predict_128dc_8x8_msa(uint8_t *dst, int32_t dst_stride) { } static void intra_predict_dc_16x16_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { v16u8 top, left, out; v8u16 sum_h, sum_top, sum_left; v4u32 sum_w; @@ -313,8 +314,8 @@ static void intra_predict_128dc_16x16_msa(uint8_t *dst, int32_t dst_stride) { } static void intra_predict_dc_32x32_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint32_t row; v16u8 top0, top1, left0, left1, out; v8u16 sum_h, sum_top0, sum_top1, sum_left0, sum_left1; @@ -381,8 +382,8 @@ static void intra_predict_128dc_32x32_msa(uint8_t *dst, int32_t dst_stride) { } static void intra_predict_tm_4x4_msa(const uint8_t *src_top_ptr, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint32_t val; uint8_t top_left = src_top_ptr[-1]; v16i8 src_left0, src_left1, src_left2, src_left3, tmp0, tmp1, src_top = { 0 }; @@ -409,8 +410,8 @@ static void intra_predict_tm_4x4_msa(const uint8_t *src_top_ptr, } static void intra_predict_tm_8x8_msa(const uint8_t *src_top_ptr, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint64_t val; uint8_t top_left = src_top_ptr[-1]; uint32_t loop_cnt; @@ -442,8 +443,8 @@ static void intra_predict_tm_8x8_msa(const uint8_t *src_top_ptr, } static void intra_predict_tm_16x16_msa(const uint8_t *src_top_ptr, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint8_t top_left = src_top_ptr[-1]; uint32_t loop_cnt; v16i8 src_top, src_left0, src_left1, src_left2, src_left3; @@ -491,8 +492,8 @@ static void intra_predict_tm_16x16_msa(const uint8_t *src_top_ptr, } static void intra_predict_tm_32x32_msa(const uint8_t *src_top, - const uint8_t *src_left, - uint8_t *dst, int32_t dst_stride) { + const uint8_t *src_left, uint8_t *dst, + int32_t dst_stride) { uint8_t top_left = src_top[-1]; uint32_t loop_cnt; v16i8 src_top0, src_top1, src_left0, src_left1, src_left2, src_left3; diff --git a/libs/libvpx/vpx_dsp/mips/inv_txfm_dspr2.h b/libs/libvpx/vpx_dsp/mips/inv_txfm_dspr2.h index abd8509118..edd54aec5e 100644 --- a/libs/libvpx/vpx_dsp/mips/inv_txfm_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/inv_txfm_dspr2.h @@ -23,31 +23,39 @@ extern "C" { #endif #if HAVE_DSPR2 -#define DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input) ({ \ +#define DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input) \ + ({ \ \ - int32_t tmp, out; \ - int dct_cost_rounding = DCT_CONST_ROUNDING; \ - int in = input; \ + int32_t tmp, out; \ + int dct_cost_rounding = DCT_CONST_ROUNDING; \ + int in = input; \ \ - __asm__ __volatile__ ( \ - /* out = dct_const_round_shift(input_dc * cospi_16_64); */ \ - "mtlo %[dct_cost_rounding], $ac1 \n\t"\ - "mthi $zero, $ac1 \n\t"\ - "madd $ac1, %[in], %[cospi_16_64] \n\t"\ - "extp %[tmp], $ac1, 31 \n\t"\ + __asm__ __volatile__(/* out = dct_const_round_shift(dc * cospi_16_64); */ \ + "mtlo %[dct_cost_rounding], $ac1 " \ + " \n\t" \ + "mthi $zero, $ac1 " \ + " \n\t" \ + "madd $ac1, %[in], " \ + "%[cospi_16_64] \n\t" \ + "extp %[tmp], $ac1, " \ + "31 \n\t" \ \ - /* out = dct_const_round_shift(out * cospi_16_64); */ \ - "mtlo %[dct_cost_rounding], $ac2 \n\t"\ - "mthi $zero, $ac2 \n\t"\ - "madd $ac2, %[tmp], %[cospi_16_64] \n\t"\ - "extp %[out], $ac2, 31 \n\t"\ + /* out = dct_const_round_shift(out * cospi_16_64); */ \ + "mtlo %[dct_cost_rounding], $ac2 " \ + " \n\t" \ + "mthi $zero, $ac2 " \ + " \n\t" \ + "madd $ac2, %[tmp], " \ + "%[cospi_16_64] \n\t" \ + "extp %[out], $ac2, " \ + "31 \n\t" \ \ - : [tmp] "=&r" (tmp), [out] "=r" (out) \ - : [in] "r" (in), \ - [dct_cost_rounding] "r" (dct_cost_rounding), \ - [cospi_16_64] "r" (cospi_16_64) \ - ); \ - out; }) + : [tmp] "=&r"(tmp), [out] "=r"(out) \ + : [in] "r"(in), \ + [dct_cost_rounding] "r"(dct_cost_rounding), \ + [cospi_16_64] "r"(cospi_16_64)); \ + out; \ + }) void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride); @@ -59,10 +67,8 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows); void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride); void iadst8_dspr2(const int16_t *input, int16_t *output); -void idct16_rows_dspr2(const int16_t *input, int16_t *output, - uint32_t no_rows); -void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride); +void idct16_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows); +void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride); void iadst16_dspr2(const int16_t *input, int16_t *output); #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/inv_txfm_msa.h b/libs/libvpx/vpx_dsp/mips/inv_txfm_msa.h index 1458561a61..8b8a4173d2 100644 --- a/libs/libvpx/vpx_dsp/mips/inv_txfm_msa.h +++ b/libs/libvpx/vpx_dsp/mips/inv_txfm_msa.h @@ -15,391 +15,392 @@ #include "vpx_dsp/mips/txfm_macros_msa.h" #include "vpx_dsp/txfm_common.h" -#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ - v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ - cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ - v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \ - -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \ - \ - SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ - ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in7, in0, \ - in4, in3); \ - \ - SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in5, in2, \ - in6, in1); \ - BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ - out7 = -s0_m; \ - out0 = s1_m; \ - \ - SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, \ - cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ - \ - ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - cnst1_m = cnst0_m; \ - \ - ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst2_m, cnst3_m, cnst1_m, out1, out6, \ - s0_m, s1_m); \ - \ - SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ - out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ - out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ - \ - out1 = -out1; \ - out3 = -out3; \ - out5 = -out5; \ -} +#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ + out3, out4, out5, out6, out7) \ + { \ + v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ + v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ + v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ + cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ + v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ + cospi_24_64, -cospi_24_64, 0, 0 }; \ + \ + SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ + cnst2_m = -cnst0_m; \ + ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ + SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ + cnst4_m = -cnst2_m; \ + ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ + \ + ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ + ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \ + cnst2_m, cnst3_m, in7, in0, in4, in3); \ + \ + SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ + cnst2_m = -cnst0_m; \ + ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ + SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ + cnst4_m = -cnst2_m; \ + ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ + \ + ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ + ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ + \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst1_m, \ + cnst2_m, cnst3_m, in5, in2, in6, in1); \ + BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ + out7 = -s0_m; \ + out0 = s1_m; \ + \ + SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ + \ + ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ + cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + cnst1_m = cnst0_m; \ + \ + ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ + ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ + DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, cnst2_m, \ + cnst3_m, cnst1_m, out1, out6, s0_m, s1_m); \ + \ + SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ + cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ + \ + ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ + ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ + out3 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ + out4 = DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ + out2 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ + out5 = DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ + \ + out1 = -out1; \ + out3 = -out3; \ + out5 = -out5; \ + } -#define VP9_SET_COSPI_PAIR(c0_h, c1_h) ({ \ - v8i16 out0_m, r0_m, r1_m; \ - \ - r0_m = __msa_fill_h(c0_h); \ - r1_m = __msa_fill_h(c1_h); \ - out0_m = __msa_ilvev_h(r1_m, r0_m); \ - \ - out0_m; \ -}) +#define VP9_SET_COSPI_PAIR(c0_h, c1_h) \ + ({ \ + v8i16 out0_m, r0_m, r1_m; \ + \ + r0_m = __msa_fill_h(c0_h); \ + r1_m = __msa_fill_h(c1_h); \ + out0_m = __msa_ilvev_h(r1_m, r0_m); \ + \ + out0_m; \ + }) -#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) { \ - uint8_t *dst_m = (uint8_t *) (dst); \ - v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \ - v16i8 tmp0_m, tmp1_m; \ - v16i8 zero_m = { 0 }; \ - v8i16 res0_m, res1_m, res2_m, res3_m; \ - \ - LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m); \ - ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, \ - zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m); \ - ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, \ - res0_m, res1_m, res2_m, res3_m); \ - CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \ - PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \ - ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \ -} +#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \ + { \ + uint8_t *dst_m = (uint8_t *)(dst); \ + v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \ + v16i8 tmp0_m, tmp1_m; \ + v16i8 zero_m = { 0 }; \ + v8i16 res0_m, res1_m, res2_m, res3_m; \ + \ + LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m); \ + ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, zero_m, dst3_m, \ + res0_m, res1_m, res2_m, res3_m); \ + ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m, \ + res2_m, res3_m); \ + CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \ + PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \ + ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \ + } -#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v8i16 c0_m, c1_m, c2_m, c3_m; \ - v8i16 step0_m, step1_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ - c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ - step0_m = __msa_ilvr_h(in2, in0); \ - DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - \ - c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - step1_m = __msa_ilvr_h(in3, in1); \ - DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - \ - PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m); \ - SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8); \ - BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m, \ - (v8i16)tmp2_m, (v8i16)tmp3_m, \ - out0, out1, out2, out3); \ -} +#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 c0_m, c1_m, c2_m, c3_m; \ + v8i16 step0_m, step1_m; \ + v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ + c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ + step0_m = __msa_ilvr_h(in2, in0); \ + DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \ + \ + c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ + c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ + step1_m = __msa_ilvr_h(in3, in1); \ + DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \ + SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + \ + PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m); \ + SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8); \ + BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m, (v8i16)tmp2_m, (v8i16)tmp3_m, \ + out0, out1, out2, out3); \ + } -#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v8i16 res0_m, res1_m, c0_m, c1_m; \ - v8i16 k1_m, k2_m, k3_m, k4_m; \ - v8i16 zero_m = { 0 }; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v4i32 int0_m, int1_m, int2_m, int3_m; \ - v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9, \ - sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, \ - -sinpi_4_9 }; \ - \ - SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m); \ - ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m); \ - ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \ - int0_m = tmp2_m + tmp1_m; \ - \ - SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m); \ - ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - int1_m = tmp0_m + tmp1_m; \ - \ - c0_m = __msa_splati_h(mask_m, 6); \ - ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m); \ - ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - int2_m = tmp0_m + tmp1_m; \ - \ - c0_m = __msa_splati_h(mask_m, 6); \ - c0_m = __msa_ilvev_h(c0_m, k1_m); \ - \ - res0_m = __msa_ilvr_h((in1), (in3)); \ - tmp0_m = __msa_dotp_s_w(res0_m, c0_m); \ - int3_m = tmp2_m + tmp0_m; \ - \ - res0_m = __msa_ilvr_h((in2), (in3)); \ - c1_m = __msa_ilvev_h(k4_m, k3_m); \ - \ - tmp2_m = __msa_dotp_s_w(res0_m, c1_m); \ - res1_m = __msa_ilvr_h((in0), (in2)); \ - c1_m = __msa_ilvev_h(k1_m, zero_m); \ - \ - tmp3_m = __msa_dotp_s_w(res1_m, c1_m); \ - int3_m += tmp2_m; \ - int3_m += tmp3_m; \ - \ - SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \ - PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \ -} +#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 res0_m, res1_m, c0_m, c1_m; \ + v8i16 k1_m, k2_m, k3_m, k4_m; \ + v8i16 zero_m = { 0 }; \ + v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v4i32 int0_m, int1_m, int2_m, int3_m; \ + v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9, sinpi_4_9, \ + -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, -sinpi_4_9 }; \ + \ + SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m); \ + ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m); \ + ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ + DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \ + int0_m = tmp2_m + tmp1_m; \ + \ + SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m); \ + ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m); \ + DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ + int1_m = tmp0_m + tmp1_m; \ + \ + c0_m = __msa_splati_h(mask_m, 6); \ + ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m); \ + ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ + DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ + int2_m = tmp0_m + tmp1_m; \ + \ + c0_m = __msa_splati_h(mask_m, 6); \ + c0_m = __msa_ilvev_h(c0_m, k1_m); \ + \ + res0_m = __msa_ilvr_h((in1), (in3)); \ + tmp0_m = __msa_dotp_s_w(res0_m, c0_m); \ + int3_m = tmp2_m + tmp0_m; \ + \ + res0_m = __msa_ilvr_h((in2), (in3)); \ + c1_m = __msa_ilvev_h(k4_m, k3_m); \ + \ + tmp2_m = __msa_dotp_s_w(res0_m, c1_m); \ + res1_m = __msa_ilvr_h((in0), (in2)); \ + c1_m = __msa_ilvev_h(k1_m, zero_m); \ + \ + tmp3_m = __msa_dotp_s_w(res1_m, c1_m); \ + int3_m += tmp2_m; \ + int3_m += tmp3_m; \ + \ + SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \ + PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \ + } -#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) ({ \ - v8i16 c0_m, c1_m; \ - \ - SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \ - c0_m = __msa_ilvev_h(c1_m, c0_m); \ - \ - c0_m; \ -}) +#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \ + ({ \ + v8i16 c0_m, c1_m; \ + \ + SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \ + c0_m = __msa_ilvev_h(c1_m, c0_m); \ + \ + c0_m; \ + }) /* multiply and add macro */ -#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \ - out0, out1, out2, out3) { \ - v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \ - ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \ - DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, \ - cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \ - DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, \ - cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \ -} +#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ + out2, out3) \ + { \ + v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ + v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \ + ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \ + DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, cst0, cst0, cst1, \ + cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \ + DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, cst2, cst2, cst3, \ + cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \ + } /* idct 8x8 macro */ -#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \ - v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ - cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ - \ - k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \ - k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \ - k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \ - k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \ - VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \ - SUB2(in1, in3, in7, in5, res0_m, res1_m); \ - k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \ - k1_m = __msa_splati_h(mask_m, 4); \ - \ - ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \ - DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m, \ - tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - tp4_m = in1 + in3; \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \ - tp7_m = in7 + in5; \ - k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \ - in0, in4, in2, in6); \ - BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \ - BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \ - out0, out1, out2, out3, out4, out5, out6, out7); \ -} +#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \ + v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \ + v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ + cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ + \ + k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \ + k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \ + k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \ + k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \ + VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \ + SUB2(in1, in3, in7, in5, res0_m, res1_m); \ + k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \ + k1_m = __msa_splati_h(mask_m, 4); \ + \ + ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \ + DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m, \ + tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + tp4_m = in1 + in3; \ + PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \ + tp7_m = in7 + in5; \ + k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ + k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ + VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, in0, in4, in2, in6); \ + BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \ + BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, out0, \ + out1, out2, out3, out4, out5, out6, out7); \ + } -#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \ - v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \ - v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64, \ - cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 }; \ - v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64, \ - cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ - v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \ - -cospi_16_64, 0, 0, 0, 0 }; \ - \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \ - k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \ - ILVRL_H2_SH(in1, in0, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \ - ILVRL_H2_SH(in5, in4, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m); \ - SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \ - k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \ - ILVRL_H2_SH(in3, in2, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \ - ILVRL_H2_SH(in7, in6, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m); \ - SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \ - ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \ - BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \ - k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \ - ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \ - DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6); \ - SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \ - k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \ - k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \ - ILVRL_H2_SH(in4, in3, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4); \ - ILVRL_H2_SW(in5, in2, m2_m, m3_m); \ - DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5); \ - \ - out1 = -in1; \ - out3 = -in3; \ - out5 = -in5; \ - out7 = -in7; \ -} +#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \ + v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \ + v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \ + v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64, cospi_10_64, \ + cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 }; \ + v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64, cospi_6_64, \ + -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ + v8i16 mask3_m = { \ + -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \ + }; \ + \ + k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \ + k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \ + ILVRL_H2_SH(in1, in0, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \ + r1_m, r2_m, r3_m); \ + k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \ + k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \ + ILVRL_H2_SH(in5, in4, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m, \ + r5_m, r6_m, r7_m); \ + ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m); \ + SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \ + k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \ + k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \ + ILVRL_H2_SH(in3, in2, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \ + r1_m, r2_m, r3_m); \ + k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \ + k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \ + ILVRL_H2_SH(in7, in6, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r4_m, \ + r5_m, r6_m, r7_m); \ + ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m); \ + SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \ + ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \ + BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \ + k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \ + k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \ + ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, r0_m, \ + r1_m, r2_m, r3_m); \ + k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \ + DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, r4_m, r5_m, \ + r6_m, r7_m); \ + ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6); \ + SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \ + m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \ + k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \ + k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \ + ILVRL_H2_SH(in4, in3, in_s1, in_s0); \ + DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, m0_m, \ + m1_m, m2_m, m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4); \ + ILVRL_H2_SW(in5, in2, m2_m, m3_m); \ + DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, m0_m, m1_m, \ + m2_m, m3_m); \ + SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5); \ + \ + out1 = -in1; \ + out3 = -in3; \ + out5 = -in5; \ + out7 = -in7; \ + } -#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, \ - r9, r10, r11, r12, r13, r14, r15, \ - out0, out1, out2, out3, out4, out5, \ - out6, out7, out8, out9, out10, out11, \ - out12, out13, out14, out15) { \ - v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \ - v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \ - v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \ - v8i16 h8_m, h9_m, h10_m, h11_m; \ - v8i16 k0_m, k1_m, k2_m, k3_m; \ - \ - /* stage 1 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \ - MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, \ - g0_m, g1_m, g2_m, g3_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \ - MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, \ - g4_m, g5_m, g6_m, g7_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \ - MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, \ - g8_m, g9_m, g10_m, g11_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \ - MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, \ - g12_m, g13_m, g14_m, g15_m); \ - \ - /* stage 2 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \ - k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \ - MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, \ - h0_m, h1_m, h2_m, h3_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \ - k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \ - MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, \ - h4_m, h5_m, h6_m, h7_m); \ - BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \ - BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, \ - h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m); \ - \ - /* stage 3 */ \ - BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \ - MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, \ - out4, out6, out5, out7); \ - MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, \ - out12, out14, out13, out15); \ - \ - /* stage 4 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ - k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ - k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \ - MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \ - MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \ - MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \ - MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \ -} +#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \ + r12, r13, r14, r15, out0, out1, out2, out3, out4, \ + out5, out6, out7, out8, out9, out10, out11, out12, \ + out13, out14, out15) \ + { \ + v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \ + v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \ + v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \ + v8i16 h8_m, h9_m, h10_m, h11_m; \ + v8i16 k0_m, k1_m, k2_m, k3_m; \ + \ + /* stage 1 */ \ + k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \ + k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \ + MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, g0_m, g1_m, g2_m, g3_m); \ + k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \ + k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \ + MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, g4_m, g5_m, g6_m, g7_m); \ + k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \ + k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \ + MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, g8_m, g9_m, g10_m, \ + g11_m); \ + k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \ + k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \ + MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, g12_m, g13_m, g14_m, \ + g15_m); \ + \ + /* stage 2 */ \ + k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \ + k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \ + MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, h0_m, h1_m, h2_m, \ + h3_m); \ + k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \ + k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \ + MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, h4_m, h5_m, \ + h6_m, h7_m); \ + BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \ + BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, h8_m, h9_m, \ + h10_m, h11_m, h6_m, h4_m, h2_m, h0_m); \ + \ + /* stage 3 */ \ + BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \ + k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ + k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ + k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \ + MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, out4, out6, out5, \ + out7); \ + MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, out12, out14, \ + out13, out15); \ + \ + /* stage 4 */ \ + k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ + k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \ + k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ + k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \ + MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \ + MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \ + MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \ + MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \ + } void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride); diff --git a/libs/libvpx/vpx_dsp/mips/itrans16_dspr2.c b/libs/libvpx/vpx_dsp/mips/itrans16_dspr2.c index 6d41e6190b..0ec0c2059f 100644 --- a/libs/libvpx/vpx_dsp/mips/itrans16_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/itrans16_dspr2.c @@ -26,11 +26,11 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, int result1, result2, result3, result4; const int const_2_power_13 = 8192; - for (i = no_rows; i--; ) { + for (i = no_rows; i--;) { /* prefetch row */ prefetch_load((const uint8_t *)(input + 16)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 0(%[input]) \n\t" "lh %[load2], 16(%[input]) \n\t" "lh %[load3], 8(%[input]) \n\t" @@ -64,19 +64,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "sub %[step1_2], %[step2_1], %[step2_2] \n\t" "sub %[step1_3], %[step2_0], %[step2_3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1), - [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3), - [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1), - [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [step2_0] "=&r"(step2_0), + [step2_1] "=&r"(step2_1), [step2_2] "=&r"(step2_2), + [step2_3] "=&r"(step2_3), [step1_0] "=r"(step1_0), + [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2), + [step1_3] "=r"(step1_3) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), + [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load5], 2(%[input]) \n\t" "lh %[load6], 30(%[input]) \n\t" "lh %[load7], 18(%[input]) \n\t" @@ -126,19 +125,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "add %[step2_8], %[result1], %[result2] \n\t" "add %[step2_15], %[result4], %[result3] \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [load7] "=&r" (load7), [load8] "=&r" (load8), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15), - [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64), - [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7), + [load8] "=&r"(load8), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step2_8] "=r"(step2_8), + [step2_15] "=r"(step2_15), [step2_9] "=r"(step2_9), + [step2_14] "=r"(step2_14) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64), + [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 10(%[input]) \n\t" "lh %[load2], 22(%[input]) \n\t" "lh %[load3], 26(%[input]) \n\t" @@ -188,19 +186,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "add %[step2_11], %[result1], %[result2] \n\t" "add %[step2_12], %[result4], %[result3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11), - [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64), - [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step2_10] "=r"(step2_10), + [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12), + [step2_13] "=r"(step2_13) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64), + [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load5], 4(%[input]) \n\t" "lh %[load6], 28(%[input]) \n\t" "lh %[load7], 20(%[input]) \n\t" @@ -253,19 +250,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_4], %[result1], %[result2] \n\t" "add %[step1_7], %[result4], %[result3] \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [load7] "=&r" (load7), [load8] "=&r" (load8), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5), - [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7), + [load8] "=&r"(load8), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step1_4] "=r"(step1_4), + [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6), + [step1_7] "=r"(step1_7) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "mtlo %[const_2_power_13], $ac1 \n\t" @@ -305,18 +301,16 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "extp %[step1_11], $ac2, 31 \n\t" "extp %[step1_12], $ac3, 31 \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11), - [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13) - : [const_2_power_13] "r" (const_2_power_13), - [step2_14] "r" (step2_14), [step2_13] "r" (step2_13), - [step2_9] "r" (step2_9), [step2_10] "r" (step2_10), - [step2_15] "r" (step2_15), [step2_12] "r" (step2_12), - [step2_8] "r" (step2_8), [step2_11] "r" (step2_11), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [step1_10] "=r"(step1_10), + [step1_11] "=r"(step1_11), [step1_12] "=r"(step1_12), + [step1_13] "=r"(step1_13) + : [const_2_power_13] "r"(const_2_power_13), [step2_14] "r"(step2_14), + [step2_13] "r"(step2_13), [step2_9] "r"(step2_9), + [step2_10] "r"(step2_10), [step2_15] "r"(step2_15), + [step2_12] "r"(step2_12), [step2_8] "r"(step2_8), + [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "add %[load5], %[step1_0], %[step1_7] \n\t" "add %[load5], %[load5], %[step2_12] \n\t" "add %[load5], %[load5], %[step2_15] \n\t" @@ -350,17 +344,15 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "sh %[load5], 448(%[output]) \n\t" "sh %[load6], 480(%[output]) \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6) - : [output] "r" (output), - [step1_0] "r" (step1_0), [step1_1] "r" (step1_1), - [step1_6] "r" (step1_6), [step1_7] "r" (step1_7), - [step2_8] "r" (step2_8), [step2_9] "r" (step2_9), - [step2_10] "r" (step2_10), [step2_11] "r" (step2_11), - [step2_12] "r" (step2_12), [step2_13] "r" (step2_13), - [step2_14] "r" (step2_14), [step2_15] "r" (step2_15) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6) + : [output] "r"(output), [step1_0] "r"(step1_0), [step1_1] "r"(step1_1), + [step1_6] "r"(step1_6), [step1_7] "r"(step1_7), + [step2_8] "r"(step2_8), [step2_9] "r"(step2_9), + [step2_10] "r"(step2_10), [step2_11] "r"(step2_11), + [step2_12] "r"(step2_12), [step2_13] "r"(step2_13), + [step2_14] "r"(step2_14), [step2_15] "r"(step2_15)); - __asm__ __volatile__ ( + __asm__ __volatile__( "add %[load5], %[step1_2], %[step1_5] \n\t" "add %[load5], %[load5], %[step1_13] \n\t" "add %[load6], %[step1_3], %[step1_4] \n\t" @@ -386,21 +378,18 @@ void idct16_rows_dspr2(const int16_t *input, int16_t *output, "sh %[load5], 384(%[output]) \n\t" "sh %[load6], 416(%[output]) \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6) - : [output] "r" (output), - [step1_2] "r" (step1_2), [step1_3] "r" (step1_3), - [step1_4] "r" (step1_4), [step1_5] "r" (step1_5), - [step1_10] "r" (step1_10), [step1_11] "r" (step1_11), - [step1_12] "r" (step1_12), [step1_13] "r" (step1_13) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6) + : [output] "r"(output), [step1_2] "r"(step1_2), [step1_3] "r"(step1_3), + [step1_4] "r"(step1_4), [step1_5] "r"(step1_5), + [step1_10] "r"(step1_10), [step1_11] "r"(step1_11), + [step1_12] "r"(step1_12), [step1_13] "r"(step1_13)); input += 16; output += 1; } } -void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, - int dest_stride) { +void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { int i; int step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6, step1_7; int step1_8, step1_9, step1_10, step1_11; @@ -416,9 +405,9 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, /* prefetch vpx_ff_cropTbl */ prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); + prefetch_load(vpx_ff_cropTbl + 32); + prefetch_load(vpx_ff_cropTbl + 64); + prefetch_load(vpx_ff_cropTbl + 96); prefetch_load(vpx_ff_cropTbl + 128); prefetch_load(vpx_ff_cropTbl + 160); prefetch_load(vpx_ff_cropTbl + 192); @@ -426,7 +415,7 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, for (i = 0; i < 16; ++i) { dest_pix = (dest + i); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 0(%[input]) \n\t" "lh %[load2], 16(%[input]) \n\t" "lh %[load3], 8(%[input]) \n\t" @@ -460,19 +449,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sub %[step1_2], %[step2_1], %[step2_2] \n\t" "sub %[step1_3], %[step2_0], %[step2_3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [step2_0] "=&r" (step2_0), [step2_1] "=&r" (step2_1), - [step2_2] "=&r" (step2_2), [step2_3] "=&r" (step2_3), - [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1), - [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [step2_0] "=&r"(step2_0), + [step2_1] "=&r"(step2_1), [step2_2] "=&r"(step2_2), + [step2_3] "=&r"(step2_3), [step1_0] "=r"(step1_0), + [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2), + [step1_3] "=r"(step1_3) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), + [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load5], 2(%[input]) \n\t" "lh %[load6], 30(%[input]) \n\t" "lh %[load7], 18(%[input]) \n\t" @@ -522,19 +510,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step2_8], %[result1], %[result2] \n\t" "add %[step2_15], %[result4], %[result3] \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [load7] "=&r" (load7), [load8] "=&r" (load8), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step2_8] "=r" (step2_8), [step2_15] "=r" (step2_15), - [step2_9] "=r" (step2_9), [step2_14] "=r" (step2_14) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64), - [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7), + [load8] "=&r"(load8), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step2_8] "=r"(step2_8), + [step2_15] "=r"(step2_15), [step2_9] "=r"(step2_9), + [step2_14] "=r"(step2_14) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64), + [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 10(%[input]) \n\t" "lh %[load2], 22(%[input]) \n\t" "lh %[load3], 26(%[input]) \n\t" @@ -584,19 +571,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step2_11], %[result1], %[result2] \n\t" "add %[step2_12], %[result4], %[result3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11), - [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64), - [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step2_10] "=r"(step2_10), + [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12), + [step2_13] "=r"(step2_13) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64), + [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load5], 4(%[input]) \n\t" "lh %[load6], 28(%[input]) \n\t" "lh %[load7], 20(%[input]) \n\t" @@ -650,19 +636,18 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_4], %[result1], %[result2] \n\t" "add %[step1_7], %[result4], %[result3] \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [load7] "=&r" (load7), [load8] "=&r" (load8), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [result3] "=&r" (result3), [result4] "=&r" (result4), - [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5), - [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7), + [load8] "=&r"(load8), [result1] "=&r"(result1), + [result2] "=&r"(result2), [result3] "=&r"(result3), + [result4] "=&r"(result4), [step1_4] "=r"(step1_4), + [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6), + [step1_7] "=r"(step1_7) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "mtlo %[const_2_power_13], $ac1 \n\t" @@ -702,23 +687,21 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "extp %[step1_11], $ac2, 31 \n\t" "extp %[step1_12], $ac3, 31 \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), - [step1_10] "=r" (step1_10), [step1_11] "=r" (step1_11), - [step1_12] "=r" (step1_12), [step1_13] "=r" (step1_13) - : [const_2_power_13] "r" (const_2_power_13), - [step2_14] "r" (step2_14), [step2_13] "r" (step2_13), - [step2_9] "r" (step2_9), [step2_10] "r" (step2_10), - [step2_15] "r" (step2_15), [step2_12] "r" (step2_12), - [step2_8] "r" (step2_8), [step2_11] "r" (step2_11), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [step1_10] "=r"(step1_10), + [step1_11] "=r"(step1_11), [step1_12] "=r"(step1_12), + [step1_13] "=r"(step1_13) + : [const_2_power_13] "r"(const_2_power_13), [step2_14] "r"(step2_14), + [step2_13] "r"(step2_13), [step2_9] "r"(step2_9), + [step2_10] "r"(step2_10), [step2_15] "r"(step2_15), + [step2_12] "r"(step2_12), [step2_8] "r"(step2_8), + [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); step1_8 = step2_8 + step2_11; step1_9 = step2_9 + step2_10; step1_14 = step2_13 + step2_14; step1_15 = step2_12 + step2_15; - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[load7], 0(%[dest_pix]) \n\t" "add %[load5], %[step1_0], %[step1_7] \n\t" "add %[load5], %[load5], %[step1_15] \n\t" @@ -870,18 +853,16 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "lbux %[load6], %[load8](%[cm]) \n\t" "sb %[load6], 0(%[dest_pix]) \n\t" - : [load5] "=&r" (load5), [load6] "=&r" (load6), [load7] "=&r" (load7), - [load8] "=&r" (load8), [dest_pix] "+r" (dest_pix) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step1_0] "r" (step1_0), [step1_1] "r" (step1_1), - [step1_2] "r" (step1_2), [step1_3] "r" (step1_3), - [step1_4] "r" (step1_4), [step1_5] "r" (step1_5), - [step1_6] "r" (step1_6), [step1_7] "r" (step1_7), - [step1_8] "r" (step1_8), [step1_9] "r" (step1_9), - [step1_10] "r" (step1_10), [step1_11] "r" (step1_11), - [step1_12] "r" (step1_12), [step1_13] "r" (step1_13), - [step1_14] "r" (step1_14), [step1_15] "r" (step1_15) - ); + : [load5] "=&r"(load5), [load6] "=&r"(load6), [load7] "=&r"(load7), + [load8] "=&r"(load8), [dest_pix] "+r"(dest_pix) + : + [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_0] "r"(step1_0), + [step1_1] "r"(step1_1), [step1_2] "r"(step1_2), [step1_3] "r"(step1_3), + [step1_4] "r"(step1_4), [step1_5] "r"(step1_5), [step1_6] "r"(step1_6), + [step1_7] "r"(step1_7), [step1_8] "r"(step1_8), [step1_9] "r"(step1_9), + [step1_10] "r"(step1_10), [step1_11] "r"(step1_11), + [step1_12] "r"(step1_12), [step1_13] "r"(step1_13), + [step1_14] "r"(step1_14), [step1_15] "r"(step1_15)); input += 16; } @@ -889,15 +870,11 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { - DECLARE_ALIGNED(32, int16_t, out[16 * 16]); + DECLARE_ALIGNED(32, int16_t, out[16 * 16]); uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); // First transform rows idct16_rows_dspr2(input, out, 16); @@ -908,17 +885,13 @@ void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { - DECLARE_ALIGNED(32, int16_t, out[16 * 16]); + DECLARE_ALIGNED(32, int16_t, out[16 * 16]); int16_t *outptr = out; uint32_t i; uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); // First transform rows. Since all non-zero dct coefficients are in // upper-left 4x4 area, we only need to calculate first 4 rows here. @@ -926,7 +899,7 @@ void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, outptr += 4; for (i = 0; i < 6; ++i) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[outptr]) \n\t" "sw $zero, 32(%[outptr]) \n\t" "sw $zero, 64(%[outptr]) \n\t" @@ -945,8 +918,7 @@ void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, "sw $zero, 480(%[outptr]) \n\t" : - : [outptr] "r" (outptr) - ); + : [outptr] "r"(outptr)); outptr += 2; } @@ -966,35 +938,31 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, int32_t vector_1, vector_2, vector_3, vector_4; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + : + : [pos] "r"(pos)); out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]); - __asm__ __volatile__ ( + __asm__ __volatile__( "addi %[out], %[out], 32 \n\t" "sra %[a1], %[out], 6 \n\t" - : [out] "+r" (out), [a1] "=r" (a1) - : - ); + : [out] "+r"(out), [a1] "=r"(a1) + :); if (a1 < 0) { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( + __asm__ __volatile__( "abs %[absa1], %[a1] \n\t" "replv.qb %[vector_a1], %[absa1] \n\t" - : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 16; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "lw %[t3], 8(%[dest]) \n\t" @@ -1009,25 +977,22 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, "sw %[vector_4], 12(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4), - [dest] "+&r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4), + [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2), + [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4), + [dest] "+&r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } else { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( - "replv.qb %[vector_a1], %[a1] \n\t" + __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t" - : [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 16; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "lw %[t3], 8(%[dest]) \n\t" @@ -1042,12 +1007,11 @@ void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, "sw %[vector_4], 12(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4), - [dest] "+&r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4), + [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2), + [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4), + [dest] "+&r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } } @@ -1072,21 +1036,20 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { int x14 = input[1]; int x15 = input[14]; - if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 - | x9 | x10 | x11 | x12 | x13 | x14 | x15)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = output[8] - = output[9] = output[10] = output[11] = output[12] - = output[13] = output[14] = output[15] = 0; + if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7 | x8 | x9 | x10 | x11 | x12 | + x13 | x14 | x15)) { + output[0] = output[1] = output[2] = output[3] = output[4] = output[5] = + output[6] = output[7] = output[8] = output[9] = output[10] = + output[11] = output[12] = output[13] = output[14] = output[15] = 0; return; } // stage 1 - s0 = x0 * cospi_1_64 + x1 * cospi_31_64; + s0 = x0 * cospi_1_64 + x1 * cospi_31_64; s1 = x0 * cospi_31_64 - x1 * cospi_1_64; - s2 = x2 * cospi_5_64 + x3 * cospi_27_64; + s2 = x2 * cospi_5_64 + x3 * cospi_27_64; s3 = x2 * cospi_27_64 - x3 * cospi_5_64; - s4 = x4 * cospi_9_64 + x5 * cospi_23_64; + s4 = x4 * cospi_9_64 + x5 * cospi_23_64; s5 = x4 * cospi_23_64 - x5 * cospi_9_64; s6 = x6 * cospi_13_64 + x7 * cospi_19_64; s7 = x6 * cospi_19_64 - x7 * cospi_13_64; @@ -1095,9 +1058,9 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { s10 = x10 * cospi_21_64 + x11 * cospi_11_64; s11 = x10 * cospi_11_64 - x11 * cospi_21_64; s12 = x12 * cospi_25_64 + x13 * cospi_7_64; - s13 = x12 * cospi_7_64 - x13 * cospi_25_64; + s13 = x12 * cospi_7_64 - x13 * cospi_25_64; s14 = x14 * cospi_29_64 + x15 * cospi_3_64; - s15 = x14 * cospi_3_64 - x15 * cospi_29_64; + s15 = x14 * cospi_3_64 - x15 * cospi_29_64; x0 = dct_const_round_shift(s0 + s8); x1 = dct_const_round_shift(s1 + s9); @@ -1107,8 +1070,8 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { x5 = dct_const_round_shift(s5 + s13); x6 = dct_const_round_shift(s6 + s14); x7 = dct_const_round_shift(s7 + s15); - x8 = dct_const_round_shift(s0 - s8); - x9 = dct_const_round_shift(s1 - s9); + x8 = dct_const_round_shift(s0 - s8); + x9 = dct_const_round_shift(s1 - s9); x10 = dct_const_round_shift(s2 - s10); x11 = dct_const_round_shift(s3 - s11); x12 = dct_const_round_shift(s4 - s12); @@ -1125,14 +1088,14 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { s5 = x5; s6 = x6; s7 = x7; - s8 = x8 * cospi_4_64 + x9 * cospi_28_64; - s9 = x8 * cospi_28_64 - x9 * cospi_4_64; - s10 = x10 * cospi_20_64 + x11 * cospi_12_64; - s11 = x10 * cospi_12_64 - x11 * cospi_20_64; - s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; - s13 = x12 * cospi_4_64 + x13 * cospi_28_64; - s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; - s15 = x14 * cospi_20_64 + x15 * cospi_12_64; + s8 = x8 * cospi_4_64 + x9 * cospi_28_64; + s9 = x8 * cospi_28_64 - x9 * cospi_4_64; + s10 = x10 * cospi_20_64 + x11 * cospi_12_64; + s11 = x10 * cospi_12_64 - x11 * cospi_20_64; + s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; + s13 = x12 * cospi_4_64 + x13 * cospi_28_64; + s14 = -x14 * cospi_12_64 + x15 * cospi_20_64; + s15 = x14 * cospi_20_64 + x15 * cospi_12_64; x0 = s0 + s4; x1 = s1 + s5; @@ -1156,18 +1119,18 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { s1 = x1; s2 = x2; s3 = x3; - s4 = x4 * cospi_8_64 + x5 * cospi_24_64; + s4 = x4 * cospi_8_64 + x5 * cospi_24_64; s5 = x4 * cospi_24_64 - x5 * cospi_8_64; - s6 = - x6 * cospi_24_64 + x7 * cospi_8_64; - s7 = x6 * cospi_8_64 + x7 * cospi_24_64; + s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; + s7 = x6 * cospi_8_64 + x7 * cospi_24_64; s8 = x8; s9 = x9; s10 = x10; s11 = x11; - s12 = x12 * cospi_8_64 + x13 * cospi_24_64; + s12 = x12 * cospi_8_64 + x13 * cospi_24_64; s13 = x12 * cospi_24_64 - x13 * cospi_8_64; - s14 = - x14 * cospi_24_64 + x15 * cospi_8_64; - s15 = x14 * cospi_8_64 + x15 * cospi_24_64; + s14 = -x14 * cospi_24_64 + x15 * cospi_8_64; + s15 = x14 * cospi_8_64 + x15 * cospi_24_64; x0 = s0 + s2; x1 = s1 + s3; @@ -1187,13 +1150,13 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { x15 = dct_const_round_shift(s13 - s15); // stage 4 - s2 = (- cospi_16_64) * (x2 + x3); + s2 = (-cospi_16_64) * (x2 + x3); s3 = cospi_16_64 * (x2 - x3); s6 = cospi_16_64 * (x6 + x7); - s7 = cospi_16_64 * (- x6 + x7); + s7 = cospi_16_64 * (-x6 + x7); s10 = cospi_16_64 * (x10 + x11); - s11 = cospi_16_64 * (- x10 + x11); - s14 = (- cospi_16_64) * (x14 + x15); + s11 = cospi_16_64 * (-x10 + x11); + s14 = (-cospi_16_64) * (x14 + x15); s15 = cospi_16_64 * (x14 - x15); x2 = dct_const_round_shift(s2); @@ -1205,23 +1168,22 @@ void iadst16_dspr2(const int16_t *input, int16_t *output) { x14 = dct_const_round_shift(s14); x15 = dct_const_round_shift(s15); - output[0] = x0; + output[0] = x0; output[1] = -x8; - output[2] = x12; + output[2] = x12; output[3] = -x4; - output[4] = x6; - output[5] = x14; - output[6] = x10; - output[7] = x2; - output[8] = x3; - output[9] = x11; - output[10] = x15; - output[11] = x7; - output[12] = x5; + output[4] = x6; + output[5] = x14; + output[6] = x10; + output[7] = x2; + output[8] = x3; + output[9] = x11; + output[10] = x15; + output[11] = x7; + output[12] = x5; output[13] = -x13; - output[14] = x9; + output[14] = x9; output[15] = -x1; } - #endif // HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/itrans32_cols_dspr2.c b/libs/libvpx/vpx_dsp/mips/itrans32_cols_dspr2.c index 553acb0f5b..ce25d55c9c 100644 --- a/libs/libvpx/vpx_dsp/mips/itrans32_cols_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/itrans32_cols_dspr2.c @@ -39,9 +39,9 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, /* prefetch vpx_ff_cropTbl */ prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); + prefetch_load(vpx_ff_cropTbl + 32); + prefetch_load(vpx_ff_cropTbl + 64); + prefetch_load(vpx_ff_cropTbl + 96); prefetch_load(vpx_ff_cropTbl + 128); prefetch_load(vpx_ff_cropTbl + 160); prefetch_load(vpx_ff_cropTbl + 192); @@ -51,7 +51,7 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, dest_pix = dest + i; dest_pix1 = dest + i + 31 * dest_stride; - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 2(%[input]) \n\t" "lh %[load2], 62(%[input]) \n\t" "lh %[load3], 34(%[input]) \n\t" @@ -101,18 +101,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_16], %[temp0], %[temp1] \n\t" "add %[step1_31], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17), - [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64), - [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_16] "=r"(step1_16), + [step1_17] "=r"(step1_17), [step1_30] "=r"(step1_30), + [step1_31] "=r"(step1_31) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_31_64] "r"(cospi_31_64), [cospi_1_64] "r"(cospi_1_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), + [cospi_15_64] "r"(cospi_15_64), [cospi_28_64] "r"(cospi_28_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 18(%[input]) \n\t" "lh %[load2], 46(%[input]) \n\t" "lh %[load3], 50(%[input]) \n\t" @@ -162,18 +161,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_19], %[temp0], %[temp1] \n\t" "add %[step1_28], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19), - [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64), - [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_18] "=r"(step1_18), + [step1_19] "=r"(step1_19), [step1_28] "=r"(step1_28), + [step1_29] "=r"(step1_29) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_23_64] "r"(cospi_23_64), [cospi_9_64] "r"(cospi_9_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), + [cospi_25_64] "r"(cospi_25_64), [cospi_28_64] "r"(cospi_28_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 10(%[input]) \n\t" "lh %[load2], 54(%[input]) \n\t" "lh %[load3], 42(%[input]) \n\t" @@ -223,18 +221,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_20], %[temp0], %[temp1] \n\t" "add %[step1_27], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21), - [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64), - [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64), - [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_20] "=r"(step1_20), + [step1_21] "=r"(step1_21), [step1_26] "=r"(step1_26), + [step1_27] "=r"(step1_27) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_27_64] "r"(cospi_27_64), [cospi_5_64] "r"(cospi_5_64), + [cospi_11_64] "r"(cospi_11_64), [cospi_21_64] "r"(cospi_21_64), + [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 26(%[input]) \n\t" "lh %[load2], 38(%[input]) \n\t" "lh %[load3], 58(%[input]) \n\t" @@ -280,18 +277,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_23], %[temp0], %[temp1] \n\t" "add %[step1_24], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23), - [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64), - [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64), - [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_22] "=r"(step1_22), + [step1_23] "=r"(step1_23), [step1_24] "=r"(step1_24), + [step1_25] "=r"(step1_25) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_19_64] "r"(cospi_19_64), [cospi_13_64] "r"(cospi_13_64), + [cospi_3_64] "r"(cospi_3_64), [cospi_29_64] "r"(cospi_29_64), + [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 4(%[input]) \n\t" "lh %[load2], 60(%[input]) \n\t" "lh %[load3], 36(%[input]) \n\t" @@ -337,18 +333,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step2_8], %[temp0], %[temp1] \n\t" "add %[step2_15], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9), - [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64), - [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64), - [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_8] "=r"(step2_8), + [step2_9] "=r"(step2_9), [step2_14] "=r"(step2_14), + [step2_15] "=r"(step2_15) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64), + [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64), + [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 20(%[input]) \n\t" "lh %[load2], 44(%[input]) \n\t" "lh %[load3], 52(%[input]) \n\t" @@ -394,18 +389,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step2_11], %[temp0], %[temp1] \n\t" "add %[step2_12], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), [load3] "=&r" (load3), - [load4] "=&r" (load4), [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11), - [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64), - [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64), - [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_10] "=r"(step2_10), + [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12), + [step2_13] "=r"(step2_13) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64), + [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64), + [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "sub %[temp0], %[step2_14], %[step2_13] \n\t" @@ -440,33 +434,31 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "extp %[step3_11], $ac2, 31 \n\t" "extp %[step3_12], $ac3, 31 \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9), - [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11), - [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13), - [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15) - : [const_2_power_13] "r" (const_2_power_13), [step2_8] "r" (step2_8), - [step2_9] "r" (step2_9), [step2_10] "r" (step2_10), - [step2_11] "r" (step2_11), [step2_12] "r" (step2_12), - [step2_13] "r" (step2_13), [step2_14] "r" (step2_14), - [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [step3_8] "=r"(step3_8), + [step3_9] "=r"(step3_9), [step3_10] "=r"(step3_10), + [step3_11] "=r"(step3_11), [step3_12] "=r"(step3_12), + [step3_13] "=r"(step3_13), [step3_14] "=r"(step3_14), + [step3_15] "=r"(step3_15) + : [const_2_power_13] "r"(const_2_power_13), [step2_8] "r"(step2_8), + [step2_9] "r"(step2_9), [step2_10] "r"(step2_10), + [step2_11] "r"(step2_11), [step2_12] "r"(step2_12), + [step2_13] "r"(step2_13), [step2_14] "r"(step2_14), + [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64)); step2_18 = step1_17 - step1_18; step2_29 = step1_30 - step1_29; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_18], %[cospi_8_64] \n\t" "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" "extp %[step3_18], $ac0, 31 \n\t" - : [step3_18] "=r" (step3_18) - : [const_2_power_13] "r" (const_2_power_13), - [step2_18] "r" (step2_18), [step2_29] "r" (step2_29), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_18] "=r"(step3_18) + : [const_2_power_13] "r"(const_2_power_13), [step2_18] "r"(step2_18), + [step2_29] "r"(step2_29), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64; step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -474,18 +466,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, step2_19 = step1_16 - step1_19; step2_28 = step1_31 - step1_28; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_19], %[cospi_8_64] \n\t" "madd $ac0, %[step2_28], %[cospi_24_64] \n\t" "extp %[step3_19], $ac0, 31 \n\t" - : [step3_19] "=r" (step3_19) - : [const_2_power_13] "r" (const_2_power_13), - [step2_19] "r" (step2_19), [step2_28] "r" (step2_28), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_19] "=r"(step3_19) + : [const_2_power_13] "r"(const_2_power_13), [step2_19] "r"(step2_19), + [step2_28] "r"(step2_28), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64; step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -498,18 +489,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, step2_20 = step1_23 - step1_20; step2_27 = step1_24 - step1_27; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_20], %[cospi_24_64] \n\t" "msub $ac0, %[step2_27], %[cospi_8_64] \n\t" "extp %[step3_20], $ac0, 31 \n\t" - : [step3_20] "=r" (step3_20) - : [const_2_power_13] "r" (const_2_power_13), - [step2_20] "r" (step2_20), [step2_27] "r" (step2_27), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_20] "=r"(step3_20) + : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20), + [step2_27] "r"(step2_27), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64; step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -517,18 +507,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, step2_21 = step1_22 - step1_21; step2_26 = step1_25 - step1_26; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac1 \n\t" "mthi $zero, $ac1 \n\t" "msub $ac1, %[step2_21], %[cospi_24_64] \n\t" "msub $ac1, %[step2_26], %[cospi_8_64] \n\t" "extp %[step3_21], $ac1, 31 \n\t" - : [step3_21] "=r" (step3_21) - : [const_2_power_13] "r" (const_2_power_13), - [step2_21] "r" (step2_21), [step2_26] "r" (step2_26), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_21] "=r"(step3_21) + : [const_2_power_13] "r"(const_2_power_13), [step2_21] "r"(step2_21), + [step2_26] "r"(step2_26), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64; step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -556,7 +545,7 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, step2_30 = step3_30 + step3_25; step2_31 = step3_31 + step3_24; - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 0(%[input]) \n\t" "lh %[load2], 32(%[input]) \n\t" "lh %[load3], 16(%[input]) \n\t" @@ -588,19 +577,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sub %[step1_2], %[temp1], %[temp2] \n\t" "sub %[step1_3], %[temp0], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1), - [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_0] "=r"(step1_0), + [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2), + [step1_3] "=r"(step1_3) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), + [cospi_16_64] "r"(cospi_16_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 8(%[input]) \n\t" "lh %[load2], 56(%[input]) \n\t" "lh %[load3], 40(%[input]) \n\t" @@ -649,17 +636,15 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "add %[step1_4], %[temp0], %[temp1] \n\t" "add %[step1_7], %[temp3], %[temp2] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5), - [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_4] "=r"(step1_4), + [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6), + [step1_7] "=r"(step1_7) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_16_64] "r"(cospi_16_64)); step2_0 = step1_0 + step1_7; step2_1 = step1_1 + step1_6; @@ -688,67 +673,63 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, step1_14 = step2_1 - step3_14; step1_15 = step2_0 - step3_15; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_27], %[step2_20] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_20], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20) - : [const_2_power_13] "r" (const_2_power_13), [step2_20] "r" (step2_20), - [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_20] "=r"(step1_20) + : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20), + [step2_27] "r"(step2_27), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_20 + step2_27) * cospi_16_64; step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_26], %[step2_21] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_21], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21) - : [const_2_power_13] "r" (const_2_power_13), [step2_26] "r" (step2_26), - [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_21] "=r"(step1_21) + : [const_2_power_13] "r"(const_2_power_13), [step2_26] "r"(step2_26), + [step2_21] "r"(step2_21), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_21 + step2_26) * cospi_16_64; step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_25], %[step2_22] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_22], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22) - : [const_2_power_13] "r" (const_2_power_13), [step2_25] "r" (step2_25), - [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_22] "=r"(step1_22) + : [const_2_power_13] "r"(const_2_power_13), [step2_25] "r"(step2_25), + [step2_22] "r"(step2_22), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_22 + step2_25) * cospi_16_64; step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_24], %[step2_23] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_23], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23) - : [const_2_power_13] "r" (const_2_power_13), [step2_24] "r" (step2_24), - [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_23] "=r"(step1_23) + : [const_2_power_13] "r"(const_2_power_13), [step2_24] "r"(step2_24), + [step2_23] "r"(step2_23), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_23 + step2_24) * cospi_16_64; step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix]) \n\t" "add %[temp0], %[step1_0], %[step2_31] \n\t" "addi %[temp0], %[temp0], 32 \n\t" @@ -783,21 +764,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix]) \n\t" "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step1_0] "r" (step1_0), [step1_1] "r" (step1_1), - [step1_2] "r" (step1_2), [step1_3] "r" (step1_3), - [step2_28] "r" (step2_28), [step2_29] "r" (step2_29), - [step2_30] "r" (step2_30), [step2_31] "r" (step2_31) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_0] "r"(step1_0), + [step1_1] "r"(step1_1), [step1_2] "r"(step1_2), + [step1_3] "r"(step1_3), [step2_28] "r"(step2_28), + [step2_29] "r"(step2_29), [step2_30] "r"(step2_30), + [step2_31] "r"(step2_31)); step3_12 = ROUND_POWER_OF_TWO((step1_3 - step2_28), 6); step3_13 = ROUND_POWER_OF_TWO((step1_2 - step2_29), 6); step3_14 = ROUND_POWER_OF_TWO((step1_1 - step2_30), 6); step3_15 = ROUND_POWER_OF_TWO((step1_0 - step2_31), 6); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix1]) \n\t" "add %[temp2], %[temp2], %[step3_15] \n\t" "lbux %[temp0], %[temp2](%[cm]) \n\t" @@ -820,14 +800,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix1]) \n\t" "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step3_12] "r" (step3_12), [step3_13] "r" (step3_13), - [step3_14] "r" (step3_14), [step3_15] "r" (step3_15) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), + [step3_12] "r"(step3_12), [step3_13] "r"(step3_13), + [step3_14] "r"(step3_14), [step3_15] "r"(step3_15)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix]) \n\t" "add %[temp0], %[step1_4], %[step1_27] \n\t" "addi %[temp0], %[temp0], 32 \n\t" @@ -862,21 +841,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix]) \n\t" "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step1_4] "r" (step1_4), [step1_5] "r" (step1_5), - [step1_6] "r" (step1_6), [step1_7] "r" (step1_7), - [step1_24] "r" (step1_24), [step1_25] "r" (step1_25), - [step1_26] "r" (step1_26), [step1_27] "r" (step1_27) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_4] "r"(step1_4), + [step1_5] "r"(step1_5), [step1_6] "r"(step1_6), + [step1_7] "r"(step1_7), [step1_24] "r"(step1_24), + [step1_25] "r"(step1_25), [step1_26] "r"(step1_26), + [step1_27] "r"(step1_27)); step3_12 = ROUND_POWER_OF_TWO((step1_7 - step1_24), 6); step3_13 = ROUND_POWER_OF_TWO((step1_6 - step1_25), 6); step3_14 = ROUND_POWER_OF_TWO((step1_5 - step1_26), 6); step3_15 = ROUND_POWER_OF_TWO((step1_4 - step1_27), 6); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix1]) \n\t" "add %[temp2], %[temp2], %[step3_15] \n\t" "lbux %[temp0], %[temp2](%[cm]) \n\t" @@ -899,14 +877,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix1]) \n\t" "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step3_12] "r" (step3_12), [step3_13] "r" (step3_13), - [step3_14] "r" (step3_14), [step3_15] "r" (step3_15) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), + [step3_12] "r"(step3_12), [step3_13] "r"(step3_13), + [step3_14] "r"(step3_14), [step3_15] "r"(step3_15)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix]) \n\t" "add %[temp0], %[step1_8], %[step1_23] \n\t" "addi %[temp0], %[temp0], 32 \n\t" @@ -941,21 +918,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix]) \n\t" "addu %[dest_pix], %[dest_pix], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step1_8] "r" (step1_8), [step1_9] "r" (step1_9), - [step1_10] "r" (step1_10), [step1_11] "r" (step1_11), - [step1_20] "r" (step1_20), [step1_21] "r" (step1_21), - [step1_22] "r" (step1_22), [step1_23] "r" (step1_23) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), [step1_8] "r"(step1_8), + [step1_9] "r"(step1_9), [step1_10] "r"(step1_10), + [step1_11] "r"(step1_11), [step1_20] "r"(step1_20), + [step1_21] "r"(step1_21), [step1_22] "r"(step1_22), + [step1_23] "r"(step1_23)); step3_12 = ROUND_POWER_OF_TWO((step1_11 - step1_20), 6); step3_13 = ROUND_POWER_OF_TWO((step1_10 - step1_21), 6); step3_14 = ROUND_POWER_OF_TWO((step1_9 - step1_22), 6); step3_15 = ROUND_POWER_OF_TWO((step1_8 - step1_23), 6); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix1]) \n\t" "add %[temp2], %[temp2], %[step3_15] \n\t" "lbux %[temp0], %[temp2](%[cm]) \n\t" @@ -978,14 +954,13 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "sb %[temp1], 0(%[dest_pix1]) \n\t" "subu %[dest_pix1], %[dest_pix1], %[dest_stride] \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step3_12] "r" (step3_12), [step3_13] "r" (step3_13), - [step3_14] "r" (step3_14), [step3_15] "r" (step3_15) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), + [step3_12] "r"(step3_12), [step3_13] "r"(step3_13), + [step3_14] "r"(step3_14), [step3_15] "r"(step3_15)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix]) \n\t" "add %[temp0], %[step1_12], %[step2_19] \n\t" "addi %[temp0], %[temp0], 32 \n\t" @@ -1019,21 +994,20 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "lbux %[temp1], %[temp3](%[cm]) \n\t" "sb %[temp1], 0(%[dest_pix]) \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix] "+r" (dest_pix) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step1_12] "r" (step1_12), [step1_13] "r" (step1_13), - [step1_14] "r" (step1_14), [step1_15] "r" (step1_15), - [step2_16] "r" (step2_16), [step2_17] "r" (step2_17), - [step2_18] "r" (step2_18), [step2_19] "r" (step2_19) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix] "+r"(dest_pix) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), + [step1_12] "r"(step1_12), [step1_13] "r"(step1_13), + [step1_14] "r"(step1_14), [step1_15] "r"(step1_15), + [step2_16] "r"(step2_16), [step2_17] "r"(step2_17), + [step2_18] "r"(step2_18), [step2_19] "r"(step2_19)); step3_12 = ROUND_POWER_OF_TWO((step1_15 - step2_16), 6); step3_13 = ROUND_POWER_OF_TWO((step1_14 - step2_17), 6); step3_14 = ROUND_POWER_OF_TWO((step1_13 - step2_18), 6); step3_15 = ROUND_POWER_OF_TWO((step1_12 - step2_19), 6); - __asm__ __volatile__ ( + __asm__ __volatile__( "lbu %[temp2], 0(%[dest_pix1]) \n\t" "add %[temp2], %[temp2], %[step3_15] \n\t" "lbux %[temp0], %[temp2](%[cm]) \n\t" @@ -1055,12 +1029,11 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, "lbux %[temp1], %[temp3](%[cm]) \n\t" "sb %[temp1], 0(%[dest_pix1]) \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), [temp2] "=&r" (temp2), - [temp3] "=&r" (temp3), [dest_pix1] "+r" (dest_pix1) - : [cm] "r" (cm), [dest_stride] "r" (dest_stride), - [step3_12] "r" (step3_12), [step3_13] "r" (step3_13), - [step3_14] "r" (step3_14), [step3_15] "r" (step3_15) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [temp2] "=&r"(temp2), + [temp3] "=&r"(temp3), [dest_pix1] "+r"(dest_pix1) + : [cm] "r"(cm), [dest_stride] "r"(dest_stride), + [step3_12] "r"(step3_12), [step3_13] "r"(step3_13), + [step3_14] "r"(step3_14), [step3_15] "r"(step3_15)); input += 32; } diff --git a/libs/libvpx/vpx_dsp/mips/itrans32_dspr2.c b/libs/libvpx/vpx_dsp/mips/itrans32_dspr2.c index 523da1df1b..d71c5ffed5 100644 --- a/libs/libvpx/vpx_dsp/mips/itrans32_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/itrans32_dspr2.c @@ -40,16 +40,16 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, const int const_2_power_13 = 8192; const int32_t *input_int; - for (i = no_rows; i--; ) { + for (i = no_rows; i--;) { input_int = (const int32_t *)input; - if (!(input_int[0] | input_int[1] | input_int[2] | input_int[3] | - input_int[4] | input_int[5] | input_int[6] | input_int[7] | - input_int[8] | input_int[9] | input_int[10] | input_int[11] | + if (!(input_int[0] | input_int[1] | input_int[2] | input_int[3] | + input_int[4] | input_int[5] | input_int[6] | input_int[7] | + input_int[8] | input_int[9] | input_int[10] | input_int[11] | input_int[12] | input_int[13] | input_int[14] | input_int[15])) { input += 32; - __asm__ __volatile__ ( + __asm__ __volatile__( "sh $zero, 0(%[output]) \n\t" "sh $zero, 64(%[output]) \n\t" "sh $zero, 128(%[output]) \n\t" @@ -84,8 +84,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "sh $zero, 1984(%[output]) \n\t" : - : [output] "r" (output) - ); + : [output] "r"(output)); output += 1; @@ -96,7 +95,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, prefetch_load((const uint8_t *)(input + 32)); prefetch_load((const uint8_t *)(input + 48)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 2(%[input]) \n\t" "lh %[load2], 62(%[input]) \n\t" "lh %[load3], 34(%[input]) \n\t" @@ -146,19 +145,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_16], %[temp0], %[temp1] \n\t" "add %[step1_31], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_16] "=r" (step1_16), [step1_17] "=r" (step1_17), - [step1_30] "=r" (step1_30), [step1_31] "=r" (step1_31) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_31_64] "r" (cospi_31_64), [cospi_1_64] "r" (cospi_1_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64), - [cospi_15_64] "r" (cospi_15_64), [cospi_28_64] "r" (cospi_28_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_16] "=r"(step1_16), + [step1_17] "=r"(step1_17), [step1_30] "=r"(step1_30), + [step1_31] "=r"(step1_31) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_31_64] "r"(cospi_31_64), [cospi_1_64] "r"(cospi_1_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), + [cospi_15_64] "r"(cospi_15_64), [cospi_28_64] "r"(cospi_28_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 18(%[input]) \n\t" "lh %[load2], 46(%[input]) \n\t" "lh %[load3], 50(%[input]) \n\t" @@ -208,19 +205,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_19], %[temp0], %[temp1] \n\t" "add %[step1_28], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_18] "=r" (step1_18), [step1_19] "=r" (step1_19), - [step1_28] "=r" (step1_28), [step1_29] "=r" (step1_29) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_23_64] "r" (cospi_23_64), [cospi_9_64] "r" (cospi_9_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64), - [cospi_25_64] "r" (cospi_25_64), [cospi_28_64] "r" (cospi_28_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_18] "=r"(step1_18), + [step1_19] "=r"(step1_19), [step1_28] "=r"(step1_28), + [step1_29] "=r"(step1_29) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_23_64] "r"(cospi_23_64), [cospi_9_64] "r"(cospi_9_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), + [cospi_25_64] "r"(cospi_25_64), [cospi_28_64] "r"(cospi_28_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 10(%[input]) \n\t" "lh %[load2], 54(%[input]) \n\t" "lh %[load3], 42(%[input]) \n\t" @@ -270,19 +265,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_20], %[temp0], %[temp1] \n\t" "add %[step1_27], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_20] "=r" (step1_20), [step1_21] "=r" (step1_21), - [step1_26] "=r" (step1_26), [step1_27] "=r" (step1_27) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_27_64] "r" (cospi_27_64), [cospi_5_64] "r" (cospi_5_64), - [cospi_11_64] "r" (cospi_11_64), [cospi_21_64] "r" (cospi_21_64), - [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_20] "=r"(step1_20), + [step1_21] "=r"(step1_21), [step1_26] "=r"(step1_26), + [step1_27] "=r"(step1_27) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_27_64] "r"(cospi_27_64), [cospi_5_64] "r"(cospi_5_64), + [cospi_11_64] "r"(cospi_11_64), [cospi_21_64] "r"(cospi_21_64), + [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 26(%[input]) \n\t" "lh %[load2], 38(%[input]) \n\t" "lh %[load3], 58(%[input]) \n\t" @@ -332,19 +325,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_23], %[temp0], %[temp1] \n\t" "add %[step1_24], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_22] "=r" (step1_22), [step1_23] "=r" (step1_23), - [step1_24] "=r" (step1_24), [step1_25] "=r" (step1_25) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_19_64] "r" (cospi_19_64), [cospi_13_64] "r" (cospi_13_64), - [cospi_3_64] "r" (cospi_3_64), [cospi_29_64] "r" (cospi_29_64), - [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_22] "=r"(step1_22), + [step1_23] "=r"(step1_23), [step1_24] "=r"(step1_24), + [step1_25] "=r"(step1_25) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_19_64] "r"(cospi_19_64), [cospi_13_64] "r"(cospi_13_64), + [cospi_3_64] "r"(cospi_3_64), [cospi_29_64] "r"(cospi_29_64), + [cospi_12_64] "r"(cospi_12_64), [cospi_20_64] "r"(cospi_20_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 4(%[input]) \n\t" "lh %[load2], 60(%[input]) \n\t" "lh %[load3], 36(%[input]) \n\t" @@ -394,19 +385,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step2_8], %[temp0], %[temp1] \n\t" "add %[step2_15], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step2_8] "=r" (step2_8), [step2_9] "=r" (step2_9), - [step2_14] "=r" (step2_14), [step2_15] "=r" (step2_15) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_30_64] "r" (cospi_30_64), [cospi_2_64] "r" (cospi_2_64), - [cospi_14_64] "r" (cospi_14_64), [cospi_18_64] "r" (cospi_18_64), - [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_8] "=r"(step2_8), + [step2_9] "=r"(step2_9), [step2_14] "=r"(step2_14), + [step2_15] "=r"(step2_15) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_30_64] "r"(cospi_30_64), [cospi_2_64] "r"(cospi_2_64), + [cospi_14_64] "r"(cospi_14_64), [cospi_18_64] "r"(cospi_18_64), + [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 20(%[input]) \n\t" "lh %[load2], 44(%[input]) \n\t" "lh %[load3], 52(%[input]) \n\t" @@ -456,19 +445,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step2_11], %[temp0], %[temp1] \n\t" "add %[step2_12], %[temp2], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step2_10] "=r" (step2_10), [step2_11] "=r" (step2_11), - [step2_12] "=r" (step2_12), [step2_13] "=r" (step2_13) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_22_64] "r" (cospi_22_64), [cospi_10_64] "r" (cospi_10_64), - [cospi_6_64] "r" (cospi_6_64), [cospi_26_64] "r" (cospi_26_64), - [cospi_8_64] "r" (cospi_8_64), [cospi_24_64] "r" (cospi_24_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step2_10] "=r"(step2_10), + [step2_11] "=r"(step2_11), [step2_12] "=r"(step2_12), + [step2_13] "=r"(step2_13) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_22_64] "r"(cospi_22_64), [cospi_10_64] "r"(cospi_10_64), + [cospi_6_64] "r"(cospi_6_64), [cospi_26_64] "r"(cospi_26_64), + [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "sub %[temp0], %[step2_14], %[step2_13] \n\t" @@ -507,34 +494,31 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "extp %[step3_11], $ac2, 31 \n\t" "extp %[step3_12], $ac3, 31 \n\t" - : [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [step3_8] "=r" (step3_8), [step3_9] "=r" (step3_9), - [step3_10] "=r" (step3_10), [step3_11] "=r" (step3_11), - [step3_12] "=r" (step3_12), [step3_13] "=r" (step3_13), - [step3_14] "=r" (step3_14), [step3_15] "=r" (step3_15) - : [const_2_power_13] "r" (const_2_power_13), - [step2_8] "r" (step2_8), [step2_9] "r" (step2_9), - [step2_10] "r" (step2_10), [step2_11] "r" (step2_11), - [step2_12] "r" (step2_12), [step2_13] "r" (step2_13), - [step2_14] "r" (step2_14), [step2_15] "r" (step2_15), - [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), [step3_8] "=r"(step3_8), + [step3_9] "=r"(step3_9), [step3_10] "=r"(step3_10), + [step3_11] "=r"(step3_11), [step3_12] "=r"(step3_12), + [step3_13] "=r"(step3_13), [step3_14] "=r"(step3_14), + [step3_15] "=r"(step3_15) + : [const_2_power_13] "r"(const_2_power_13), [step2_8] "r"(step2_8), + [step2_9] "r"(step2_9), [step2_10] "r"(step2_10), + [step2_11] "r"(step2_11), [step2_12] "r"(step2_12), + [step2_13] "r"(step2_13), [step2_14] "r"(step2_14), + [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64)); step2_18 = step1_17 - step1_18; step2_29 = step1_30 - step1_29; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_18], %[cospi_8_64] \n\t" "madd $ac0, %[step2_29], %[cospi_24_64] \n\t" "extp %[step3_18], $ac0, 31 \n\t" - : [step3_18] "=r" (step3_18) - : [const_2_power_13] "r" (const_2_power_13), - [step2_18] "r" (step2_18), [step2_29] "r" (step2_29), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_18] "=r"(step3_18) + : [const_2_power_13] "r"(const_2_power_13), [step2_18] "r"(step2_18), + [step2_29] "r"(step2_29), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = step2_18 * cospi_24_64 + step2_29 * cospi_8_64; step3_29 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -542,18 +526,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, step2_19 = step1_16 - step1_19; step2_28 = step1_31 - step1_28; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_19], %[cospi_8_64] \n\t" "madd $ac0, %[step2_28], %[cospi_24_64] \n\t" "extp %[step3_19], $ac0, 31 \n\t" - : [step3_19] "=r" (step3_19) - : [const_2_power_13] "r" (const_2_power_13), - [step2_19] "r" (step2_19), [step2_28] "r" (step2_28), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_19] "=r"(step3_19) + : [const_2_power_13] "r"(const_2_power_13), [step2_19] "r"(step2_19), + [step2_28] "r"(step2_28), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = step2_19 * cospi_24_64 + step2_28 * cospi_8_64; step3_28 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -566,18 +549,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, step2_20 = step1_23 - step1_20; step2_27 = step1_24 - step1_27; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "msub $ac0, %[step2_20], %[cospi_24_64] \n\t" "msub $ac0, %[step2_27], %[cospi_8_64] \n\t" "extp %[step3_20], $ac0, 31 \n\t" - : [step3_20] "=r" (step3_20) - : [const_2_power_13] "r" (const_2_power_13), - [step2_20] "r" (step2_20), [step2_27] "r" (step2_27), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_20] "=r"(step3_20) + : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20), + [step2_27] "r"(step2_27), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = -step2_20 * cospi_8_64 + step2_27 * cospi_24_64; step3_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -585,18 +567,17 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, step2_21 = step1_22 - step1_21; step2_26 = step1_25 - step1_26; - __asm__ __volatile__ ( + __asm__ __volatile__( "mtlo %[const_2_power_13], $ac1 \n\t" "mthi $zero, $ac1 \n\t" "msub $ac1, %[step2_21], %[cospi_24_64] \n\t" "msub $ac1, %[step2_26], %[cospi_8_64] \n\t" "extp %[step3_21], $ac1, 31 \n\t" - : [step3_21] "=r" (step3_21) - : [const_2_power_13] "r" (const_2_power_13), - [step2_21] "r" (step2_21), [step2_26] "r" (step2_26), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) - ); + : [step3_21] "=r"(step3_21) + : [const_2_power_13] "r"(const_2_power_13), [step2_21] "r"(step2_21), + [step2_26] "r"(step2_26), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64)); temp21 = -step2_21 * cospi_8_64 + step2_26 * cospi_24_64; step3_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -624,7 +605,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, step2_30 = step3_30 + step3_25; step2_31 = step3_31 + step3_24; - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 0(%[input]) \n\t" "lh %[load2], 32(%[input]) \n\t" "lh %[load3], 16(%[input]) \n\t" @@ -658,20 +639,19 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "sub %[step1_2], %[temp1], %[temp2] \n\t" "sub %[step1_3], %[temp0], %[temp3] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [result1] "=&r" (result1), [result2] "=&r" (result2), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_0] "=r" (step1_0), [step1_1] "=r" (step1_1), - [step1_2] "=r" (step1_2), [step1_3] "=r" (step1_3) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_16_64] "r" (cospi_16_64), - [cospi_24_64] "r" (cospi_24_64), [cospi_8_64] "r" (cospi_8_64) + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [result1] "=&r"(result1), + [result2] "=&r"(result2), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_0] "=r"(step1_0), + [step1_1] "=r"(step1_1), [step1_2] "=r"(step1_2), + [step1_3] "=r"(step1_3) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_16_64] "r"(cospi_16_64), [cospi_24_64] "r"(cospi_24_64), + [cospi_8_64] "r"(cospi_8_64) - ); + ); - __asm__ __volatile__ ( + __asm__ __volatile__( "lh %[load1], 8(%[input]) \n\t" "lh %[load2], 56(%[input]) \n\t" "lh %[load3], 40(%[input]) \n\t" @@ -724,17 +704,15 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, "add %[step1_4], %[temp0], %[temp1] \n\t" "add %[step1_7], %[temp3], %[temp2] \n\t" - : [load1] "=&r" (load1), [load2] "=&r" (load2), - [load3] "=&r" (load3), [load4] "=&r" (load4), - [temp0] "=&r" (temp0), [temp1] "=&r" (temp1), - [temp2] "=&r" (temp2), [temp3] "=&r" (temp3), - [step1_4] "=r" (step1_4), [step1_5] "=r" (step1_5), - [step1_6] "=r" (step1_6), [step1_7] "=r" (step1_7) - : [const_2_power_13] "r" (const_2_power_13), [input] "r" (input), - [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_16_64] "r" (cospi_16_64) - ); + : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), + [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), + [temp2] "=&r"(temp2), [temp3] "=&r"(temp3), [step1_4] "=r"(step1_4), + [step1_5] "=r"(step1_5), [step1_6] "=r"(step1_6), + [step1_7] "=r"(step1_7) + : [const_2_power_13] "r"(const_2_power_13), [input] "r"(input), + [cospi_20_64] "r"(cospi_20_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_16_64] "r"(cospi_16_64)); step2_0 = step1_0 + step1_7; step2_1 = step1_1 + step1_6; @@ -762,66 +740,58 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, step1_14 = step2_1 - step3_14; step1_15 = step2_0 - step3_15; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_27], %[step2_20] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_20], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_20] "=r" (step1_20) - : [const_2_power_13] "r" (const_2_power_13), - [step2_20] "r" (step2_20), [step2_27] "r" (step2_27), - [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_20] "=r"(step1_20) + : [const_2_power_13] "r"(const_2_power_13), [step2_20] "r"(step2_20), + [step2_27] "r"(step2_27), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_20 + step2_27) * cospi_16_64; step1_27 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_26], %[step2_21] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_21], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_21] "=r" (step1_21) - : [const_2_power_13] "r" (const_2_power_13), - [step2_26] "r" (step2_26), [step2_21] "r" (step2_21), - [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_21] "=r"(step1_21) + : [const_2_power_13] "r"(const_2_power_13), [step2_26] "r"(step2_26), + [step2_21] "r"(step2_21), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_21 + step2_26) * cospi_16_64; step1_26 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_25], %[step2_22] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_22], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_22] "=r" (step1_22) - : [const_2_power_13] "r" (const_2_power_13), - [step2_25] "r" (step2_25), [step2_22] "r" (step2_22), - [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_22] "=r"(step1_22) + : [const_2_power_13] "r"(const_2_power_13), [step2_25] "r"(step2_25), + [step2_22] "r"(step2_22), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_22 + step2_25) * cospi_16_64; step1_25 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; - __asm__ __volatile__ ( + __asm__ __volatile__( "sub %[temp0], %[step2_24], %[step2_23] \n\t" "mtlo %[const_2_power_13], $ac0 \n\t" "mthi $zero, $ac0 \n\t" "madd $ac0, %[temp0], %[cospi_16_64] \n\t" "extp %[step1_23], $ac0, 31 \n\t" - : [temp0] "=&r" (temp0), [step1_23] "=r" (step1_23) - : [const_2_power_13] "r" (const_2_power_13), - [step2_24] "r" (step2_24), [step2_23] "r" (step2_23), - [cospi_16_64] "r" (cospi_16_64) - ); + : [temp0] "=&r"(temp0), [step1_23] "=r"(step1_23) + : [const_2_power_13] "r"(const_2_power_13), [step2_24] "r"(step2_24), + [step2_23] "r"(step2_23), [cospi_16_64] "r"(cospi_16_64)); temp21 = (step2_23 + step2_24) * cospi_16_64; step1_24 = (temp21 + DCT_CONST_ROUNDING) >> DCT_CONST_BITS; @@ -867,16 +837,14 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { - DECLARE_ALIGNED(32, int16_t, out[32 * 32]); + DECLARE_ALIGNED(32, int16_t, out[32 * 32]); int16_t *outptr = out; uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); // Rows idct32_rows_dspr2(input, outptr, 32); @@ -887,23 +855,21 @@ void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, int stride) { - DECLARE_ALIGNED(32, int16_t, out[32 * 32]); + DECLARE_ALIGNED(32, int16_t, out[32 * 32]); int16_t *outptr = out; uint32_t i; uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); // Rows idct32_rows_dspr2(input, outptr, 8); outptr += 8; - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[outptr]) \n\t" "sw $zero, 4(%[outptr]) \n\t" "sw $zero, 8(%[outptr]) \n\t" @@ -918,13 +884,12 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, "sw $zero, 44(%[outptr]) \n\t" : - : [outptr] "r" (outptr) - ); + : [outptr] "r"(outptr)); for (i = 0; i < 31; ++i) { outptr += 32; - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[outptr]) \n\t" "sw $zero, 4(%[outptr]) \n\t" "sw $zero, 8(%[outptr]) \n\t" @@ -939,8 +904,7 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, "sw $zero, 44(%[outptr]) \n\t" : - : [outptr] "r" (outptr) - ); + : [outptr] "r"(outptr)); } // Columns @@ -949,43 +913,39 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, int stride) { - int r, out; - int32_t a1, absa1; - int32_t vector_a1; - int32_t t1, t2, t3, t4; - int32_t vector_1, vector_2, vector_3, vector_4; - uint32_t pos = 45; + int r, out; + int32_t a1, absa1; + int32_t vector_a1; + int32_t t1, t2, t3, t4; + int32_t vector_1, vector_2, vector_3, vector_4; + uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + : + : [pos] "r"(pos)); out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]); - __asm__ __volatile__ ( + __asm__ __volatile__( "addi %[out], %[out], 32 \n\t" "sra %[a1], %[out], 6 \n\t" - : [out] "+r" (out), [a1] "=r" (a1) - : - ); + : [out] "+r"(out), [a1] "=r"(a1) + :); if (a1 < 0) { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( + __asm__ __volatile__( "abs %[absa1], %[a1] \n\t" "replv.qb %[vector_a1], %[absa1] \n\t" - : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 32; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "lw %[t3], 8(%[dest]) \n\t" @@ -1014,25 +974,22 @@ void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, "add %[dest], %[dest], %[stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4), - [dest] "+&r" (dest) - : [stride] "r" (stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4), + [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2), + [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4), + [dest] "+&r"(dest) + : [stride] "r"(stride), [vector_a1] "r"(vector_a1)); } } else { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( - "replv.qb %[vector_a1], %[a1] \n\t" + __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t" - : [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 32; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "lw %[t3], 8(%[dest]) \n\t" @@ -1061,12 +1018,11 @@ void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, "add %[dest], %[dest], %[stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3), [t4] "=&r" (t4), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [vector_3] "=&r" (vector_3), [vector_4] "=&r" (vector_4), - [dest] "+&r" (dest) - : [stride] "r" (stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [t3] "=&r"(t3), [t4] "=&r"(t4), + [vector_1] "=&r"(vector_1), [vector_2] "=&r"(vector_2), + [vector_3] "=&r"(vector_3), [vector_4] "=&r"(vector_4), + [dest] "+&r"(dest) + : [stride] "r"(stride), [vector_a1] "r"(vector_a1)); } } } diff --git a/libs/libvpx/vpx_dsp/mips/itrans4_dspr2.c b/libs/libvpx/vpx_dsp/mips/itrans4_dspr2.c index ecb8bd3de7..516ea80f4a 100644 --- a/libs/libvpx/vpx_dsp/mips/itrans4_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/itrans4_dspr2.c @@ -15,13 +15,13 @@ #if HAVE_DSPR2 void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) { - int16_t step_0, step_1, step_2, step_3; - int Temp0, Temp1, Temp2, Temp3; + int16_t step_0, step_1, step_2, step_3; + int Temp0, Temp1, Temp2, Temp3; const int const_2_power_13 = 8192; - int i; + int i; - for (i = 4; i--; ) { - __asm__ __volatile__ ( + for (i = 4; i--;) { + __asm__ __volatile__( /* temp_1 = (input[0] + input[2]) * cospi_16_64; step_0 = dct_const_round_shift(temp_1); @@ -83,16 +83,12 @@ void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) { "sub %[Temp3], %[step_0], %[step_3] \n\t" "sh %[Temp3], 24(%[output]) \n\t" - : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1), - [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [step_0] "=&r" (step_0), [step_1] "=&r" (step_1), - [step_2] "=&r" (step_2), [step_3] "=&r" (step_3), - [output] "+r" (output) - : [const_2_power_13] "r" (const_2_power_13), - [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), - [cospi_24_64] "r" (cospi_24_64), - [input] "r" (input) - ); + : [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [step_0] "=&r"(step_0), [step_1] "=&r"(step_1), + [step_2] "=&r"(step_2), [step_3] "=&r"(step_3), [output] "+r"(output) + : [const_2_power_13] "r"(const_2_power_13), + [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), + [cospi_24_64] "r"(cospi_24_64), [input] "r"(input)); input += 4; output += 1; @@ -101,27 +97,27 @@ void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) { void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { - int16_t step_0, step_1, step_2, step_3; - int Temp0, Temp1, Temp2, Temp3; + int16_t step_0, step_1, step_2, step_3; + int Temp0, Temp1, Temp2, Temp3; const int const_2_power_13 = 8192; - int i; - uint8_t *dest_pix; - uint8_t *cm = vpx_ff_cropTbl; + int i; + uint8_t *dest_pix; + uint8_t *cm = vpx_ff_cropTbl; /* prefetch vpx_ff_cropTbl */ prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); + prefetch_load(vpx_ff_cropTbl + 32); + prefetch_load(vpx_ff_cropTbl + 64); + prefetch_load(vpx_ff_cropTbl + 96); prefetch_load(vpx_ff_cropTbl + 128); prefetch_load(vpx_ff_cropTbl + 160); prefetch_load(vpx_ff_cropTbl + 192); prefetch_load(vpx_ff_cropTbl + 224); for (i = 0; i < 4; ++i) { - dest_pix = (dest + i); + dest_pix = (dest + i); - __asm__ __volatile__ ( + __asm__ __volatile__( /* temp_1 = (input[0] + input[2]) * cospi_16_64; step_0 = dct_const_round_shift(temp_1); @@ -206,16 +202,14 @@ void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, "lbux %[Temp2], %[Temp1](%[cm]) \n\t" "sb %[Temp2], 0(%[dest_pix]) \n\t" - : [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1), - [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [step_0] "=&r" (step_0), [step_1] "=&r" (step_1), - [step_2] "=&r" (step_2), [step_3] "=&r" (step_3), - [dest_pix] "+r" (dest_pix) - : [const_2_power_13] "r" (const_2_power_13), - [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), - [cospi_24_64] "r" (cospi_24_64), - [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride) - ); + : [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [step_0] "=&r"(step_0), [step_1] "=&r"(step_1), + [step_2] "=&r"(step_2), [step_3] "=&r"(step_3), + [dest_pix] "+r"(dest_pix) + : [const_2_power_13] "r"(const_2_power_13), + [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), + [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), [cm] "r"(cm), + [dest_stride] "r"(dest_stride)); input += 4; } @@ -228,11 +222,9 @@ void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" + : + : [pos] "r"(pos)); // Rows vpx_idct4_rows_dspr2(input, outptr); @@ -243,73 +235,63 @@ void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, void vpx_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { - int a1, absa1; - int r; - int32_t out; - int t2, vector_a1, vector_a; - uint32_t pos = 45; - int16_t input_dc = input[0]; + int a1, absa1; + int r; + int32_t out; + int t2, vector_a1, vector_a; + uint32_t pos = 45; + int16_t input_dc = input[0]; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + : + : [pos] "r"(pos)); out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input_dc); - __asm__ __volatile__ ( + __asm__ __volatile__( "addi %[out], %[out], 8 \n\t" "sra %[a1], %[out], 4 \n\t" - : [out] "+r" (out), [a1] "=r" (a1) - : - ); + : [out] "+r"(out), [a1] "=r"(a1) + :); if (a1 < 0) { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( + __asm__ __volatile__( "abs %[absa1], %[a1] \n\t" "replv.qb %[vector_a1], %[absa1] \n\t" - : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 4; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t2], 0(%[dest]) \n\t" "subu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t" "sw %[vector_a], 0(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a), - [dest] "+&r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), [dest] "+&r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } else { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( - "replv.qb %[vector_a1], %[a1] \n\t" - : [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t" + : [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 4; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t2], 0(%[dest]) \n\t" "addu_s.qb %[vector_a], %[t2], %[vector_a1] \n\t" "sw %[vector_a], 0(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t2] "=&r" (t2), [vector_a] "=&r" (vector_a), - [dest] "+&r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t2] "=&r"(t2), [vector_a] "=&r"(vector_a), [dest] "+&r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } } diff --git a/libs/libvpx/vpx_dsp/mips/itrans8_dspr2.c b/libs/libvpx/vpx_dsp/mips/itrans8_dspr2.c index 823e845d59..08a6c78b6e 100644 --- a/libs/libvpx/vpx_dsp/mips/itrans8_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/itrans8_dspr2.c @@ -20,8 +20,8 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows) { int Temp0, Temp1, Temp2, Temp3, Temp4; int i; - for (i = no_rows; i--; ) { - __asm__ __volatile__ ( + for (i = no_rows; i--;) { + __asm__ __volatile__( /* temp_1 = (input[0] + input[4]) * cospi_16_64; step2_0 = dct_const_round_shift(temp_1); @@ -174,20 +174,18 @@ void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows) { "sub %[Temp1], %[step1_0], %[step1_7] \n\t" "sh %[Temp1], 112(%[output]) \n\t" - : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1), - [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3), - [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5), - [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7), - [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1), - [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [Temp4] "=&r" (Temp4) - : [const_2_power_13] "r" (const_2_power_13), - [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64), - [cospi_24_64] "r" (cospi_24_64), - [output] "r" (output), [input] "r" (input) - ); + : [step1_0] "=&r"(step1_0), [step1_1] "=&r"(step1_1), + [step1_2] "=&r"(step1_2), [step1_3] "=&r"(step1_3), + [step1_4] "=&r"(step1_4), [step1_5] "=&r"(step1_5), + [step1_6] "=&r"(step1_6), [step1_7] "=&r"(step1_7), + [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [Temp4] "=&r"(Temp4) + : [const_2_power_13] "r"(const_2_power_13), + [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), + [cospi_24_64] "r"(cospi_24_64), [output] "r"(output), + [input] "r"(input)); input += 8; output += 1; @@ -205,18 +203,18 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, /* prefetch vpx_ff_cropTbl */ prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); + prefetch_load(vpx_ff_cropTbl + 32); + prefetch_load(vpx_ff_cropTbl + 64); + prefetch_load(vpx_ff_cropTbl + 96); prefetch_load(vpx_ff_cropTbl + 128); prefetch_load(vpx_ff_cropTbl + 160); prefetch_load(vpx_ff_cropTbl + 192); prefetch_load(vpx_ff_cropTbl + 224); for (i = 0; i < 8; ++i) { - dest_pix = (dest + i); + dest_pix = (dest + i); - __asm__ __volatile__ ( + __asm__ __volatile__( /* temp_1 = (input[0] + input[4]) * cospi_16_64; step2_0 = dct_const_round_shift(temp_1); @@ -423,20 +421,18 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, "lbux %[Temp2], %[Temp1](%[cm]) \n\t" "sb %[Temp2], 0(%[dest_pix]) \n\t" - : [step1_0] "=&r" (step1_0), [step1_1] "=&r" (step1_1), - [step1_2] "=&r" (step1_2), [step1_3] "=&r" (step1_3), - [step1_4] "=&r" (step1_4), [step1_5] "=&r" (step1_5), - [step1_6] "=&r" (step1_6), [step1_7] "=&r" (step1_7), - [Temp0] "=&r" (Temp0), [Temp1] "=&r" (Temp1), - [Temp2] "=&r" (Temp2), [Temp3] "=&r" (Temp3), - [dest_pix] "+r" (dest_pix) - : [const_2_power_13] "r" (const_2_power_13), - [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), - [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), - [cospi_20_64] "r" (cospi_20_64), [cospi_8_64] "r" (cospi_8_64), - [cospi_24_64] "r" (cospi_24_64), - [input] "r" (input), [cm] "r" (cm), [dest_stride] "r" (dest_stride) - ); + : [step1_0] "=&r"(step1_0), [step1_1] "=&r"(step1_1), + [step1_2] "=&r"(step1_2), [step1_3] "=&r"(step1_3), + [step1_4] "=&r"(step1_4), [step1_5] "=&r"(step1_5), + [step1_6] "=&r"(step1_6), [step1_7] "=&r"(step1_7), + [Temp0] "=&r"(Temp0), [Temp1] "=&r"(Temp1), [Temp2] "=&r"(Temp2), + [Temp3] "=&r"(Temp3), [dest_pix] "+r"(dest_pix) + : [const_2_power_13] "r"(const_2_power_13), + [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), + [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), + [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), + [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), [cm] "r"(cm), + [dest_stride] "r"(dest_stride)); input += 8; } @@ -449,11 +445,7 @@ void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); // First transform rows idct8_rows_dspr2(input, outptr, 8); @@ -469,18 +461,14 @@ void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest, uint32_t pos = 45; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" : : [pos] "r"(pos)); // First transform rows idct8_rows_dspr2(input, outptr, 4); outptr += 4; - __asm__ __volatile__ ( + __asm__ __volatile__( "sw $zero, 0(%[outptr]) \n\t" "sw $zero, 4(%[outptr]) \n\t" "sw $zero, 16(%[outptr]) \n\t" @@ -499,9 +487,7 @@ void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest, "sw $zero, 116(%[outptr]) \n\t" : - : [outptr] "r" (outptr) - ); - + : [outptr] "r"(outptr)); // Then transform columns and add to dest idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); @@ -516,35 +502,31 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, int32_t t1, t2, vector_a1, vector_1, vector_2; /* bit positon for extract from acc */ - __asm__ __volatile__ ( - "wrdsp %[pos], 1 \n\t" + __asm__ __volatile__("wrdsp %[pos], 1 \n\t" - : - : [pos] "r" (pos) - ); + : + : [pos] "r"(pos)); out = DCT_CONST_ROUND_SHIFT_TWICE_COSPI_16_64(input[0]); - __asm__ __volatile__ ( + __asm__ __volatile__( "addi %[out], %[out], 16 \n\t" "sra %[a1], %[out], 5 \n\t" - : [out] "+r" (out), [a1] "=r" (a1) - : - ); + : [out] "+r"(out), [a1] "=r"(a1) + :); if (a1 < 0) { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( + __asm__ __volatile__( "abs %[absa1], %[a1] \n\t" "replv.qb %[vector_a1], %[absa1] \n\t" - : [absa1] "=r" (absa1), [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [absa1] "=r"(absa1), [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 8; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "subu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t" @@ -553,24 +535,20 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, "sw %[vector_2], 4(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [dest] "+&r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [vector_1] "=&r"(vector_1), + [vector_2] "=&r"(vector_2), [dest] "+&r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } else { /* use quad-byte * input and output memory are four byte aligned */ - __asm__ __volatile__ ( - "replv.qb %[vector_a1], %[a1] \n\t" + __asm__ __volatile__("replv.qb %[vector_a1], %[a1] \n\t" - : [vector_a1] "=r" (vector_a1) - : [a1] "r" (a1) - ); + : [vector_a1] "=r"(vector_a1) + : [a1] "r"(a1)); for (r = 8; r--;) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[t1], 0(%[dest]) \n\t" "lw %[t2], 4(%[dest]) \n\t" "addu_s.qb %[vector_1], %[t1], %[vector_a1] \n\t" @@ -579,11 +557,9 @@ void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, "sw %[vector_2], 4(%[dest]) \n\t" "add %[dest], %[dest], %[dest_stride] \n\t" - : [t1] "=&r" (t1), [t2] "=&r" (t2), - [vector_1] "=&r" (vector_1), [vector_2] "=&r" (vector_2), - [dest] "+r" (dest) - : [dest_stride] "r" (dest_stride), [vector_a1] "r" (vector_a1) - ); + : [t1] "=&r"(t1), [t2] "=&r"(t2), [vector_1] "=&r"(vector_1), + [vector_2] "=&r"(vector_2), [dest] "+r"(dest) + : [dest_stride] "r"(dest_stride), [vector_a1] "r"(vector_a1)); } } } @@ -602,20 +578,20 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) { x7 = input[6]; if (!(x0 | x1 | x2 | x3 | x4 | x5 | x6 | x7)) { - output[0] = output[1] = output[2] = output[3] = output[4] - = output[5] = output[6] = output[7] = 0; + output[0] = output[1] = output[2] = output[3] = output[4] = output[5] = + output[6] = output[7] = 0; return; } // stage 1 - s0 = cospi_2_64 * x0 + cospi_30_64 * x1; - s1 = cospi_30_64 * x0 - cospi_2_64 * x1; + s0 = cospi_2_64 * x0 + cospi_30_64 * x1; + s1 = cospi_30_64 * x0 - cospi_2_64 * x1; s2 = cospi_10_64 * x2 + cospi_22_64 * x3; s3 = cospi_22_64 * x2 - cospi_10_64 * x3; s4 = cospi_18_64 * x4 + cospi_14_64 * x5; s5 = cospi_14_64 * x4 - cospi_18_64 * x5; - s6 = cospi_26_64 * x6 + cospi_6_64 * x7; - s7 = cospi_6_64 * x6 - cospi_26_64 * x7; + s6 = cospi_26_64 * x6 + cospi_6_64 * x7; + s7 = cospi_6_64 * x6 - cospi_26_64 * x7; x0 = ROUND_POWER_OF_TWO((s0 + s4), DCT_CONST_BITS); x1 = ROUND_POWER_OF_TWO((s1 + s5), DCT_CONST_BITS); @@ -631,10 +607,10 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) { s1 = x1; s2 = x2; s3 = x3; - s4 = cospi_8_64 * x4 + cospi_24_64 * x5; - s5 = cospi_24_64 * x4 - cospi_8_64 * x5; - s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; - s7 = cospi_8_64 * x6 + cospi_24_64 * x7; + s4 = cospi_8_64 * x4 + cospi_24_64 * x5; + s5 = cospi_24_64 * x4 - cospi_8_64 * x5; + s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; + s7 = cospi_8_64 * x6 + cospi_24_64 * x7; x0 = s0 + s2; x1 = s1 + s3; @@ -656,13 +632,13 @@ void iadst8_dspr2(const int16_t *input, int16_t *output) { x6 = ROUND_POWER_OF_TWO((s6), DCT_CONST_BITS); x7 = ROUND_POWER_OF_TWO((s7), DCT_CONST_BITS); - output[0] = x0; + output[0] = x0; output[1] = -x4; - output[2] = x6; + output[2] = x6; output[3] = -x2; - output[4] = x3; + output[4] = x3; output[5] = -x7; - output[6] = x5; + output[6] = x5; output[7] = -x1; } #endif // HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_16_msa.c b/libs/libvpx/vpx_dsp/mips/loopfilter_16_msa.c index b7c9f7bd0e..2c8a10fc5d 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_16_msa.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_16_msa.c @@ -11,8 +11,7 @@ #include "vpx_ports/mem.h" #include "vpx_dsp/mips/loopfilter_msa.h" -int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, - uint8_t *filter48, +int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr) { @@ -33,8 +32,8 @@ int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, limit = (v16u8)__msa_fill_b(*limit_ptr); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); @@ -43,9 +42,8 @@ int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, return 1; } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, - q2_r, q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); @@ -107,8 +105,8 @@ void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) { } else { src -= 7 * pitch; - ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, - zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, + ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero, + p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, p2_r_in, p1_r_in, p0_r_in); q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); @@ -408,8 +406,7 @@ void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) { void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { + const uint8_t *thresh_ptr, int32_t count) { DECLARE_ALIGNED(32, uint8_t, filter48[16 * 8]); uint8_t early_exit = 0; @@ -423,11 +420,10 @@ void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch, } } -void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, - const uint8_t *b_limit_ptr, - const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { +static void mb_lpf_horizontal_edge(uint8_t *src, int32_t pitch, + const uint8_t *b_limit_ptr, + const uint8_t *limit_ptr, + const uint8_t *thresh_ptr, int32_t count) { if (1 == count) { uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; uint64_t dword0, dword1; @@ -449,8 +445,8 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); @@ -472,9 +468,8 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8); /* convert 16 bit output data into 8 bit */ - PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, - zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8, - q0_filter8); + PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero, + q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8); PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8); /* store pixel values */ @@ -648,14 +643,28 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, } } +void vpx_lpf_horizontal_edge_8_msa(uint8_t *src, int32_t pitch, + const uint8_t *b_limit_ptr, + const uint8_t *limit_ptr, + const uint8_t *thresh_ptr) { + mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 1); +} + +void vpx_lpf_horizontal_edge_16_msa(uint8_t *src, int32_t pitch, + const uint8_t *b_limit_ptr, + const uint8_t *limit_ptr, + const uint8_t *thresh_ptr) { + mb_lpf_horizontal_edge(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, 2); +} + static void transpose_16x8_to_8x16(uint8_t *input, int32_t in_pitch, uint8_t *output, int32_t out_pitch) { v16u8 p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org; v16i8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; - LD_UB8(input, in_pitch, - p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org); + LD_UB8(input, in_pitch, p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, + p1_org, p0_org); /* 8x8 transpose */ TRANSPOSE8x8_UB_UB(p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org, p7, p6, p5, p4, p3, p2, p1, p0); @@ -685,8 +694,8 @@ static void transpose_8x16_to_16x8(uint8_t *input, int32_t in_pitch, ST_UB8(p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o, output, out_pitch); } -static void transpose_16x16(uint8_t *input, int32_t in_pitch, - uint8_t *output, int32_t out_pitch) { +static void transpose_16x16(uint8_t *input, int32_t in_pitch, uint8_t *output, + int32_t out_pitch) { v16u8 row0, row1, row2, row3, row4, row5, row6, row7; v16u8 row8, row9, row10, row11, row12, row13, row14, row15; v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; @@ -695,12 +704,11 @@ static void transpose_16x16(uint8_t *input, int32_t in_pitch, LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7); input += (8 * in_pitch); - LD_UB8(input, in_pitch, - row8, row9, row10, row11, row12, row13, row14, row15); + LD_UB8(input, in_pitch, row8, row9, row10, row11, row12, row13, row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p7, p6, p5, p4, p3, p2, p1, p0); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p7, p6, + p5, p4, p3, p2, p1, p0); /* transpose 16x8 matrix into 8x16 */ /* total 8 intermediate register and 32 instructions */ @@ -765,8 +773,8 @@ int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48, limit = (v16u8)__msa_fill_b(*limit_ptr); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ @@ -780,9 +788,8 @@ int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48, ST4x8_UB(vec2, vec3, (src_org - 2), pitch_org); return 1; } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, - q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); @@ -850,9 +857,9 @@ int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch, } else { src -= 7 * 16; - ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, - zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, - p3_r_in, p2_r_in, p1_r_in, p0_r_in); + ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero, + p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, + p2_r_in, p1_r_in, p0_r_in); q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); tmp0_r = p7_r_in << 3; @@ -1042,9 +1049,9 @@ void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch, transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16); - early_exit = vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), - &filter48[0], src, pitch, b_limit_ptr, - limit_ptr, thresh_ptr); + early_exit = + vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src, + pitch, b_limit_ptr, limit_ptr, thresh_ptr); if (0 == early_exit) { early_exit = vpx_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch, @@ -1079,8 +1086,8 @@ int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48, limit = (v16u8)__msa_fill_b(*limit_ptr); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ @@ -1099,9 +1106,8 @@ int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48, return 1; } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, - q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); ILVL_B4_UH(zero, p3, zero, p2, zero, p1, zero, p0, p3_l, p2_l, p1_l, p0_l); @@ -1182,9 +1188,9 @@ int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch, } else { src -= 7 * 16; - ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, - zero, p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, - p3_r_in, p2_r_in, p1_r_in, p0_r_in); + ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, p3, zero, p2, zero, + p1, zero, p0, p7_r_in, p6_r_in, p5_r_in, p4_r_in, p3_r_in, + p2_r_in, p1_r_in, p0_r_in); q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); tmp0_r = p7_r_in << 3; @@ -1465,9 +1471,9 @@ void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch, transpose_16x16((src - 8), pitch, &transposed_input[0], 16); - early_exit = vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), - &filter48[0], src, pitch, b_limit_ptr, - limit_ptr, thresh_ptr); + early_exit = + vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src, + pitch, b_limit_ptr, limit_ptr, thresh_ptr); if (0 == early_exit) { early_exit = vpx_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch, diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_4_msa.c b/libs/libvpx/vpx_dsp/mips/loopfilter_4_msa.c index daf5f38bf7..e0079665f7 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_4_msa.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_4_msa.c @@ -13,14 +13,11 @@ void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { + const uint8_t *thresh_ptr) { uint64_t p1_d, p0_d, q0_d, q1_d; v16u8 mask, hev, flat, thresh, b_limit, limit; v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; - (void)count; - /* load vector elements */ LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); @@ -28,8 +25,8 @@ void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch, b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); p1_d = __msa_copy_u_d((v2i64)p1_out, 0); @@ -64,8 +61,8 @@ void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, limit1 = (v16u8)__msa_fill_b(*limit1_ptr); limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, + mask, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1); ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch); @@ -74,24 +71,21 @@ void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { + const uint8_t *thresh_ptr) { v16u8 mask, hev, flat, limit, thresh, b_limit; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v8i16 vec0, vec1, vec2, vec3; - (void)count; - LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); thresh = (v16u8)__msa_fill_b(*thresh_ptr); b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); - TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, - p3, p2, p1, p0, q0, q1, q2, q3); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, + q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1); ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1); ILVRL_H2_SH(vec1, vec0, vec2, vec3); @@ -117,12 +111,12 @@ void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch, v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); - LD_UB8(src - 4 + (8 * pitch), pitch, - row8, row9, row10, row11, row12, row13, row14, row15); + LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, + row14, row15); - TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, - row8, row9, row10, row11, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, row8, + row9, row10, row11, row12, row13, row14, row15, p3, p2, + p1, p0, q0, q1, q2, q3); thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); @@ -136,8 +130,8 @@ void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch, limit1 = (v16u8)__msa_fill_b(*limit1_ptr); limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, hev, + mask, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1); ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_8_msa.c b/libs/libvpx/vpx_dsp/mips/loopfilter_8_msa.c index 00b6db5509..403e5dc51b 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_8_msa.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_8_msa.c @@ -13,8 +13,7 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { + const uint8_t *thresh_ptr) { uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; v16u8 mask, hev, flat, thresh, b_limit, limit; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; @@ -23,8 +22,6 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, v8u16 p3_r, p2_r, p1_r, p0_r, q3_r, q2_r, q1_r, q0_r; v16i8 zero = { 0 }; - (void)count; - /* load vector elements */ LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); @@ -32,8 +29,8 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); VP9_LPF_FILTER4_8W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); @@ -46,16 +43,14 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, q1_d = __msa_copy_u_d((v2i64)q1_out, 0); SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch); } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, - q2_r, q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filter8, p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8); /* convert 16 bit output data into 8 bit */ - PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, - zero, q0_filter8, p2_filter8, p1_filter8, p0_filter8, - q0_filter8); + PCKEV_B4_SH(zero, p2_filter8, zero, p1_filter8, zero, p0_filter8, zero, + q0_filter8, p2_filter8, p1_filter8, p0_filter8, q0_filter8); PCKEV_B2_SH(zero, q1_filter8, zero, q2_filter8, q1_filter8, q2_filter8); /* store pixel values */ @@ -83,13 +78,10 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, } } -void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch, - const uint8_t *b_limit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *b_limit1, - const uint8_t *limit1, - const uint8_t *thresh1) { +void vpx_lpf_horizontal_8_dual_msa( + uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1, + const uint8_t *thresh1) { v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; v16u8 flat, mask, hev, tmp, thresh, b_limit, limit; @@ -115,17 +107,16 @@ void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch, limit = (v16u8)__msa_ilvr_d((v2i64)tmp, (v2i64)limit); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out, q1_out); if (__msa_test_bz_v(flat)) { ST_UB4(p1_out, p0_out, q0_out, q1_out, (src - 2 * pitch), pitch); } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, - q2_r, q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); @@ -161,8 +152,7 @@ void vpx_lpf_horizontal_8_dual_msa(uint8_t *src, int32_t pitch, void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, - const uint8_t *thresh_ptr, - int32_t count) { + const uint8_t *thresh_ptr) { v16u8 p3, p2, p1, p0, q3, q2, q1, q0; v16u8 p1_out, p0_out, q0_out, q1_out; v16u8 flat, mask, hev, thresh, b_limit, limit; @@ -171,21 +161,19 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, v16u8 zero = { 0 }; v8i16 vec0, vec1, vec2, vec3, vec4; - (void)count; - /* load vector elements */ LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3); - TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, - p3, p2, p1, p0, q0, q1, q2, q3); + TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, p3, p2, p1, p0, q0, q1, q2, + q3); thresh = (v16u8)__msa_fill_b(*thresh_ptr); b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); limit = (v16u8)__msa_fill_b(*limit_ptr); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ @@ -203,9 +191,8 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, src += 4 * pitch; ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, - q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); /* convert 16 bit output data into 8 bit */ @@ -238,11 +225,9 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, } void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, - const uint8_t *b_limit0, - const uint8_t *limit0, + const uint8_t *b_limit0, const uint8_t *limit0, const uint8_t *thresh0, - const uint8_t *b_limit1, - const uint8_t *limit1, + const uint8_t *b_limit1, const uint8_t *limit1, const uint8_t *thresh1) { uint8_t *temp_src; v16u8 p3, p2, p1, p0, q3, q2, q1, q0; @@ -263,9 +248,9 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15); /* transpose 16x8 matrix into 8x16 */ - TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7, - q3, q2, q1, q0, row12, row13, row14, row15, - p3, p2, p1, p0, q0, q1, q2, q3); + TRANSPOSE16x8_UB_UB(p0, p1, p2, p3, row4, row5, row6, row7, q3, q2, q1, q0, + row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, + q3); thresh = (v16u8)__msa_fill_b(*thresh0); vec0 = (v8i16)__msa_fill_b(*thresh1); @@ -280,8 +265,8 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)limit); /* mask and hev */ - LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, - hev, mask, flat); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, + mask, flat); /* flat4 */ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); /* filter4 */ @@ -298,9 +283,8 @@ void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, src += 8 * pitch; ST4x8_UB(vec4, vec5, src, pitch); } else { - ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, - zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, - q3_r); + ILVR_B8_UH(zero, p3, zero, p2, zero, p1, zero, p0, zero, q0, zero, q1, zero, + q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); VP9_FILTER8(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r); diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.c b/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.c index 99a96d89b9..f1743679a7 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.c @@ -19,34 +19,30 @@ #include "vpx_mem/vpx_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_4_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - uint8_t i; - uint32_t mask; - uint32_t hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - uint8_t *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; +void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh) { + uint8_t i; + uint32_t mask; + uint32_t hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + uint8_t *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; uflimit = *blimit; ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); /* prefetch data for store */ prefetch_store(s); @@ -63,49 +59,44 @@ void vpx_lpf_horizontal_4_dspr2(unsigned char *s, s5 = s4 + pitch; s6 = s5 + pitch; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[p1], (%[s1]) \n\t" "lw %[p2], (%[s2]) \n\t" "lw %[p3], (%[s3]) \n\t" "lw %[p4], (%[s4]) \n\t" - : [p1] "=&r" (p1), [p2] "=&r" (p2), [p3] "=&r" (p3), [p4] "=&r" (p4) - : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) - ); + : [p1] "=&r"(p1), [p2] "=&r"(p2), [p3] "=&r"(p3), [p4] "=&r"(p4) + : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4)); /* if (p1 - p4 == 0) and (p2 - p3 == 0) mask will be zero and filtering is not needed */ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[pm1], (%[sm1]) \n\t" "lw %[p0], (%[s0]) \n\t" "lw %[p5], (%[s5]) \n\t" "lw %[p6], (%[s6]) \n\t" - : [pm1] "=&r" (pm1), [p0] "=&r" (p0), [p5] "=&r" (p5), - [p6] "=&r" (p6) - : [sm1] "r" (sm1), [s0] "r" (s0), [s5] "r" (s5), [s6] "r" (s6) - ); + : [pm1] "=&r"(pm1), [p0] "=&r"(p0), [p5] "=&r"(p5), [p6] "=&r"(p6) + : [sm1] "r"(sm1), [s0] "r"(s0), [s5] "r"(s5), [s6] "r"(s6)); - filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, - pm1, p0, p3, p4, p5, p6, - thresh_vec, &hev, &mask); + filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5, + p6, thresh_vec, &hev, &mask); /* if mask == 0 do filtering is not needed */ if (mask) { /* filtering */ filter_dspr2(mask, hev, &p1, &p2, &p3, &p4); - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p1], (%[s1]) \n\t" "sw %[p2], (%[s2]) \n\t" "sw %[p3], (%[s3]) \n\t" "sw %[p4], (%[s4]) \n\t" : - : [p1] "r" (p1), [p2] "r" (p2), [p3] "r" (p3), [p4] "r" (p4), - [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) - ); + : [p1] "r"(p1), [p2] "r"(p2), [p3] "r"(p3), [p4] "r"(p4), + [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4)); } } @@ -113,34 +104,30 @@ void vpx_lpf_horizontal_4_dspr2(unsigned char *s, } } -void vpx_lpf_vertical_4_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - uint8_t i; - uint32_t mask, hev; - uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; - uint8_t *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; +void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh) { + uint8_t i; + uint32_t mask, hev; + uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; + uint8_t *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; uflimit = *blimit; ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); /* prefetch data for store */ prefetch_store(s + pitch); @@ -150,22 +137,22 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s, s2 = s + pitch; s3 = s2 + pitch; s4 = s3 + pitch; - s = s4 + pitch; + s = s4 + pitch; /* load quad-byte vectors * memory is 4 byte aligned */ - p2 = *((uint32_t *)(s1 - 4)); - p6 = *((uint32_t *)(s1)); - p1 = *((uint32_t *)(s2 - 4)); - p5 = *((uint32_t *)(s2)); - p0 = *((uint32_t *)(s3 - 4)); - p4 = *((uint32_t *)(s3)); + p2 = *((uint32_t *)(s1 - 4)); + p6 = *((uint32_t *)(s1)); + p1 = *((uint32_t *)(s2 - 4)); + p5 = *((uint32_t *)(s2)); + p0 = *((uint32_t *)(s3 - 4)); + p4 = *((uint32_t *)(s3)); pm1 = *((uint32_t *)(s4 - 4)); - p3 = *((uint32_t *)(s4)); + p3 = *((uint32_t *)(s4)); /* transpose pm1, p0, p1, p2 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p2], %[p1] \n\t" "precr.qb.ph %[prim2], %[p2], %[p1] \n\t" "precrq.qb.ph %[prim3], %[p0], %[pm1] \n\t" @@ -181,15 +168,13 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s, "append %[p1], %[sec3], 16 \n\t" "append %[pm1], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), + [pm1] "+r"(pm1), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose p3, p4, p5, p6 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p6], %[p5] \n\t" "precr.qb.ph %[prim2], %[p6], %[p5] \n\t" "precrq.qb.ph %[prim3], %[p4], %[p3] \n\t" @@ -205,20 +190,17 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s, "append %[p5], %[sec3], 16 \n\t" "append %[p3], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p6] "+r" (p6), [p5] "+r" (p5), [p4] "+r" (p4), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p6] "+r"(p6), [p5] "+r"(p5), [p4] "+r"(p4), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* if (p1 - p4 == 0) and (p2 - p3 == 0) * mask will be zero and filtering is not needed */ if (!(((p1 - p4) == 0) && ((p2 - p3) == 0))) { - filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, - p0, p3, p4, p5, p6, thresh_vec, - &hev, &mask); + filter_hev_mask_dspr2(limit_vec, flimit_vec, p1, p2, pm1, p0, p3, p4, p5, + p6, thresh_vec, &hev, &mask); /* if mask == 0 do filtering is not needed */ if (mask) { @@ -229,130 +211,113 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s, * don't use transpose on output data * because memory isn't aligned */ - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p4], 1(%[s4]) \n\t" "sb %[p3], 0(%[s4]) \n\t" "sb %[p2], -1(%[s4]) \n\t" "sb %[p1], -2(%[s4]) \n\t" : - : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), - [s4] "r" (s4) - ); + : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [s4] "r"(s4)); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p4], %[p4], 8 \n\t" "srl %[p3], %[p3], 8 \n\t" "srl %[p2], %[p2], 8 \n\t" "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p4], 1(%[s3]) \n\t" "sb %[p3], 0(%[s3]) \n\t" "sb %[p2], -1(%[s3]) \n\t" "sb %[p1], -2(%[s3]) \n\t" - : [p1] "+r" (p1) - : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [s3] "r" (s3) - ); + : [p1] "+r"(p1) + : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [s3] "r"(s3)); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p4], %[p4], 8 \n\t" "srl %[p3], %[p3], 8 \n\t" "srl %[p2], %[p2], 8 \n\t" "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p4], 1(%[s2]) \n\t" "sb %[p3], 0(%[s2]) \n\t" "sb %[p2], -1(%[s2]) \n\t" "sb %[p1], -2(%[s2]) \n\t" : - : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), - [s2] "r" (s2) - ); + : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [s2] "r"(s2)); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p4], %[p4], 8 \n\t" "srl %[p3], %[p3], 8 \n\t" "srl %[p2], %[p2], 8 \n\t" "srl %[p1], %[p1], 8 \n\t" - : [p4] "+r" (p4), [p3] "+r" (p3), [p2] "+r" (p2), [p1] "+r" (p1) - : - ); + : [p4] "+r"(p4), [p3] "+r"(p3), [p2] "+r"(p2), [p1] "+r"(p1) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p4], 1(%[s1]) \n\t" "sb %[p3], 0(%[s1]) \n\t" "sb %[p2], -1(%[s1]) \n\t" "sb %[p1], -2(%[s1]) \n\t" : - : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), - [s1] "r" (s1) - ); + : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [s1] "r"(s1)); } } } } -void vpx_lpf_horizontal_4_dual_dspr2(uint8_t *s, int p /* pitch */, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1) { - vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); +void vpx_lpf_horizontal_4_dual_dspr2( + uint8_t *s, int p /* pitch */, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, + const uint8_t *limit1, const uint8_t *thresh1) { + vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0); + vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1); } -void vpx_lpf_horizontal_8_dual_dspr2(uint8_t *s, int p /* pitch */, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1) { - vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); +void vpx_lpf_horizontal_8_dual_dspr2( + uint8_t *s, int p /* pitch */, const uint8_t *blimit0, + const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, + const uint8_t *limit1, const uint8_t *thresh1) { + vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0); + vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1); } -void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, - const uint8_t *blimit0, +void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); + vpx_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0); + vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1); } -void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, - const uint8_t *blimit0, +void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, - 1); + vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0); + vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1); } -void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, - const uint8_t *blimit, +void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { vpx_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh); diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.h b/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.h index 4a1506ba12..5b0c73345b 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_filters_dspr2.h @@ -24,22 +24,21 @@ extern "C" { #if HAVE_DSPR2 /* inputs & outputs are quad-byte vectors */ -static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, - uint32_t *ps1, uint32_t *ps0, - uint32_t *qs0, uint32_t *qs1) { - int32_t vpx_filter_l, vpx_filter_r; - int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; - int32_t subr_r, subr_l; - uint32_t t1, t2, HWM, t3; - uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; - int32_t vps1, vps0, vqs0, vqs1; - int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; - uint32_t N128; +static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1, + uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) { + int32_t vpx_filter_l, vpx_filter_r; + int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; + int32_t subr_r, subr_l; + uint32_t t1, t2, HWM, t3; + uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; + int32_t vps1, vps0, vqs0, vqs1; + int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; + uint32_t N128; N128 = 0x80808080; - t1 = 0x03000300; - t2 = 0x04000400; - t3 = 0x01000100; + t1 = 0x03000300; + t2 = 0x04000400; + t3 = 0x01000100; HWM = 0xFF00FF00; vps0 = (*ps0) ^ N128; @@ -72,7 +71,7 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, hev_r = hev << 8; hev_r = hev_r & HWM; - __asm__ __volatile__ ( + __asm__ __volatile__( /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */ "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t" "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t" @@ -99,20 +98,17 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t" "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t" - : [vpx_filter_l] "=&r" (vpx_filter_l), - [vpx_filter_r] "=&r" (vpx_filter_r), - [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r), - [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r) - : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l), - [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r), - [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r), - [mask_l] "r" (mask_l), [mask_r] "r" (mask_r), - [hev_l] "r" (hev_l), [hev_r] "r" (hev_r), - [HWM] "r" (HWM) - ); + : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r), + [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r), + [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r) + : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), + [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r), + [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l), + [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r), + [HWM] "r"(HWM)); /* save bottom 3 bits so that we round one side +4 and the other +3 */ - __asm__ __volatile__ ( + __asm__ __volatile__( /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */ "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t" "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t" @@ -137,15 +133,14 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" - : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r), - [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r), - [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r) - : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM), - [vpx_filter_l] "r" (vpx_filter_l), [vpx_filter_r] "r" (vpx_filter_r) - ); + : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r), + [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r), + [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), + [vqs0_r] "+r"(vqs0_r) + : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), + [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r)); - __asm__ __volatile__ ( + __asm__ __volatile__( /* (vpx_filter += 1) >>= 1 */ "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" @@ -162,11 +157,10 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" - : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r), - [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), - [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r) - : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r) - ); + : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r), + [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l), + [vqs1_r] "+r"(vqs1_r) + : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r)); /* Create quad-bytes from halfword pairs */ vqs0_l = vqs0_l & HWM; @@ -174,16 +168,15 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, vps0_l = vps0_l & HWM; vps1_l = vps1_l & HWM; - __asm__ __volatile__ ( + __asm__ __volatile__( "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" - : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r), - [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r) - : - ); + : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r), + [vqs0_r] "+r"(vqs0_r) + :); vqs0 = vqs0_l | vqs0_r; vqs1 = vqs1_l | vqs1_r; @@ -196,24 +189,23 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, *qs1 = vqs1 ^ N128; } -static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, - uint32_t ps1, uint32_t ps0, - uint32_t qs0, uint32_t qs1, +static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1, + uint32_t ps0, uint32_t qs0, uint32_t qs1, uint32_t *p1_f0, uint32_t *p0_f0, uint32_t *q0_f0, uint32_t *q1_f0) { - int32_t vpx_filter_l, vpx_filter_r; - int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; - int32_t subr_r, subr_l; - uint32_t t1, t2, HWM, t3; - uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; - int32_t vps1, vps0, vqs0, vqs1; - int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; - uint32_t N128; + int32_t vpx_filter_l, vpx_filter_r; + int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; + int32_t subr_r, subr_l; + uint32_t t1, t2, HWM, t3; + uint32_t hev_l, hev_r, mask_l, mask_r, invhev_l, invhev_r; + int32_t vps1, vps0, vqs0, vqs1; + int32_t vps1_l, vps1_r, vps0_l, vps0_r, vqs0_l, vqs0_r, vqs1_l, vqs1_r; + uint32_t N128; N128 = 0x80808080; - t1 = 0x03000300; - t2 = 0x04000400; - t3 = 0x01000100; + t1 = 0x03000300; + t2 = 0x04000400; + t3 = 0x01000100; HWM = 0xFF00FF00; vps0 = (ps0) ^ N128; @@ -246,7 +238,7 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, hev_r = hev << 8; hev_r = hev_r & HWM; - __asm__ __volatile__ ( + __asm__ __volatile__( /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */ "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t" "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t" @@ -273,19 +265,17 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t" "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t" - : [vpx_filter_l] "=&r" (vpx_filter_l), - [vpx_filter_r] "=&r" (vpx_filter_r), - [subr_l] "=&r" (subr_l), [subr_r] "=&r" (subr_r), - [invhev_l] "=&r" (invhev_l), [invhev_r] "=&r" (invhev_r) - : [vps0_l] "r" (vps0_l), [vps0_r] "r" (vps0_r), [vps1_l] "r" (vps1_l), - [vps1_r] "r" (vps1_r), [vqs0_l] "r" (vqs0_l), [vqs0_r] "r" (vqs0_r), - [vqs1_l] "r" (vqs1_l), [vqs1_r] "r" (vqs1_r), - [mask_l] "r" (mask_l), [mask_r] "r" (mask_r), - [hev_l] "r" (hev_l), [hev_r] "r" (hev_r), [HWM] "r" (HWM) - ); + : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r), + [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r), + [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r) + : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), + [vps1_r] "r"(vps1_r), [vqs0_l] "r"(vqs0_l), [vqs0_r] "r"(vqs0_r), + [vqs1_l] "r"(vqs1_l), [vqs1_r] "r"(vqs1_r), [mask_l] "r"(mask_l), + [mask_r] "r"(mask_r), [hev_l] "r"(hev_l), [hev_r] "r"(hev_r), + [HWM] "r"(HWM)); /* save bottom 3 bits so that we round one side +4 and the other +3 */ - __asm__ __volatile__ ( + __asm__ __volatile__( /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */ "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t" "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t" @@ -310,15 +300,14 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, "subq_s.ph %[vqs0_l], %[vqs0_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs0_r], %[vqs0_r], %[Filter1_r] \n\t" - : [Filter1_l] "=&r" (Filter1_l), [Filter1_r] "=&r" (Filter1_r), - [Filter2_l] "=&r" (Filter2_l), [Filter2_r] "=&r" (Filter2_r), - [vps0_l] "+r" (vps0_l), [vps0_r] "+r" (vps0_r), - [vqs0_l] "+r" (vqs0_l), [vqs0_r] "+r" (vqs0_r) - : [t1] "r" (t1), [t2] "r" (t2), [HWM] "r" (HWM), - [vpx_filter_l] "r" (vpx_filter_l), [vpx_filter_r] "r" (vpx_filter_r) - ); + : [Filter1_l] "=&r"(Filter1_l), [Filter1_r] "=&r"(Filter1_r), + [Filter2_l] "=&r"(Filter2_l), [Filter2_r] "=&r"(Filter2_r), + [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), + [vqs0_r] "+r"(vqs0_r) + : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), + [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r)); - __asm__ __volatile__ ( + __asm__ __volatile__( /* (vpx_filter += 1) >>= 1 */ "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" @@ -335,11 +324,10 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" - : [Filter1_l] "+r" (Filter1_l), [Filter1_r] "+r" (Filter1_r), - [vps1_l] "+r" (vps1_l), [vps1_r] "+r" (vps1_r), - [vqs1_l] "+r" (vqs1_l), [vqs1_r] "+r" (vqs1_r) - : [t3] "r" (t3), [invhev_l] "r" (invhev_l), [invhev_r] "r" (invhev_r) - ); + : [Filter1_l] "+r"(Filter1_l), [Filter1_r] "+r"(Filter1_r), + [vps1_l] "+r"(vps1_l), [vps1_r] "+r"(vps1_r), [vqs1_l] "+r"(vqs1_l), + [vqs1_r] "+r"(vqs1_r) + : [t3] "r"(t3), [invhev_l] "r"(invhev_l), [invhev_r] "r"(invhev_r)); /* Create quad-bytes from halfword pairs */ vqs0_l = vqs0_l & HWM; @@ -347,16 +335,15 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, vps0_l = vps0_l & HWM; vps1_l = vps1_l & HWM; - __asm__ __volatile__ ( + __asm__ __volatile__( "shrl.ph %[vqs0_r], %[vqs0_r], 8 \n\t" "shrl.ph %[vps0_r], %[vps0_r], 8 \n\t" "shrl.ph %[vqs1_r], %[vqs1_r], 8 \n\t" "shrl.ph %[vps1_r], %[vps1_r], 8 \n\t" - : [vps1_r] "+r" (vps1_r), [vqs1_r] "+r" (vqs1_r), - [vps0_r] "+r" (vps0_r), [vqs0_r] "+r" (vqs0_r) - : - ); + : [vps1_r] "+r"(vps1_r), [vqs1_r] "+r"(vqs1_r), [vps0_r] "+r"(vps0_r), + [vqs0_r] "+r"(vqs0_r) + :); vqs0 = vqs0_l | vqs0_r; vqs1 = vqs1_l | vqs1_r; @@ -369,18 +356,17 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, *q1_f0 = vqs1 ^ N128; } -static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, - uint32_t *op1, uint32_t *op0, - uint32_t *oq0, uint32_t *oq1, +static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, uint32_t *op1, + uint32_t *op0, uint32_t *oq0, uint32_t *oq1, uint32_t *oq2, uint32_t *oq3) { /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */ const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3; - uint32_t res_op2, res_op1, res_op0; - uint32_t res_oq0, res_oq1, res_oq2; - uint32_t tmp; - uint32_t add_p210_q012; - uint32_t u32Four = 0x00040004; + uint32_t res_op2, res_op1, res_op0; + uint32_t res_oq0, res_oq1, res_oq2; + uint32_t tmp; + uint32_t add_p210_q012; + uint32_t u32Four = 0x00040004; /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3) 1 */ /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3) 2 */ @@ -389,7 +375,7 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3) 5 */ /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3) 6 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "addu.ph %[add_p210_q012], %[p2], %[p1] \n\t" "addu.ph %[add_p210_q012], %[add_p210_q012], %[p0] \n\t" "addu.ph %[add_p210_q012], %[add_p210_q012], %[q0] \n\t" @@ -428,15 +414,12 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, "shrl.ph %[res_op0], %[res_op0], 3 \n\t" "shrl.ph %[res_oq2], %[res_oq2], 3 \n\t" - : [add_p210_q012] "=&r" (add_p210_q012), - [tmp] "=&r" (tmp), [res_op2] "=&r" (res_op2), - [res_op1] "=&r" (res_op1), [res_op0] "=&r" (res_op0), - [res_oq0] "=&r" (res_oq0), [res_oq1] "=&r" (res_oq1), - [res_oq2] "=&r" (res_oq2) - : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1), - [p2] "r" (p2), [q2] "r" (q2), [p3] "r" (p3), [q3] "r" (q3), - [u32Four] "r" (u32Four) - ); + : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp), + [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1), + [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0), + [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2) + : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2), + [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four)); *op2 = res_op2; *op1 = res_op1; @@ -446,20 +429,18 @@ static INLINE void mbfilter_dspr2(uint32_t *op3, uint32_t *op2, *oq2 = res_oq2; } -static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, - uint32_t p1, uint32_t p0, - uint32_t q0, uint32_t q1, - uint32_t q2, uint32_t q3, - uint32_t *op2_f1, +static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, uint32_t p1, + uint32_t p0, uint32_t q0, uint32_t q1, + uint32_t q2, uint32_t q3, uint32_t *op2_f1, uint32_t *op1_f1, uint32_t *op0_f1, uint32_t *oq0_f1, uint32_t *oq1_f1, uint32_t *oq2_f1) { /* use a 7 tap filter [1, 1, 1, 2, 1, 1, 1] for flat line */ - uint32_t res_op2, res_op1, res_op0; - uint32_t res_oq0, res_oq1, res_oq2; - uint32_t tmp; - uint32_t add_p210_q012; - uint32_t u32Four = 0x00040004; + uint32_t res_op2, res_op1, res_op0; + uint32_t res_oq0, res_oq1, res_oq2; + uint32_t tmp; + uint32_t add_p210_q012; + uint32_t u32Four = 0x00040004; /* *op2 = ROUND_POWER_OF_TWO(p3 + p3 + p3 + p2 + p2 + p1 + p0 + q0, 3) 1 */ /* *op1 = ROUND_POWER_OF_TWO(p3 + p3 + p2 + p1 + p1 + p0 + q0 + q1, 3) 2 */ @@ -468,7 +449,7 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, /* *oq1 = ROUND_POWER_OF_TWO(p1 + p0 + q0 + q1 + q1 + q2 + q3 + q3, 3) 5 */ /* *oq2 = ROUND_POWER_OF_TWO(p0 + q0 + q1 + q2 + q2 + q3 + q3 + q3, 3) 6 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "addu.ph %[add_p210_q012], %[p2], %[p1] \n\t" "addu.ph %[add_p210_q012], %[add_p210_q012], %[p0] \n\t" "addu.ph %[add_p210_q012], %[add_p210_q012], %[q0] \n\t" @@ -507,14 +488,12 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, "shrl.ph %[res_op0], %[res_op0], 3 \n\t" "shrl.ph %[res_oq2], %[res_oq2], 3 \n\t" - : [add_p210_q012] "=&r" (add_p210_q012), [tmp] "=&r" (tmp), - [res_op2] "=&r" (res_op2), [res_op1] "=&r" (res_op1), - [res_op0] "=&r" (res_op0), [res_oq0] "=&r" (res_oq0), - [res_oq1] "=&r" (res_oq1), [res_oq2] "=&r" (res_oq2) - : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1), - [p2] "r" (p2), [q2] "r" (q2), [p3] "r" (p3), [q3] "r" (q3), - [u32Four] "r" (u32Four) - ); + : [add_p210_q012] "=&r"(add_p210_q012), [tmp] "=&r"(tmp), + [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1), + [res_op0] "=&r"(res_op0), [res_oq0] "=&r"(res_oq0), + [res_oq1] "=&r"(res_oq1), [res_oq2] "=&r"(res_oq2) + : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [q1] "r"(q1), [p2] "r"(p2), + [q2] "r"(q2), [p3] "r"(p3), [q3] "r"(q3), [u32Four] "r"(u32Four)); *op2_f1 = res_op2; *op1_f1 = res_op1; @@ -524,25 +503,22 @@ static INLINE void mbfilter1_dspr2(uint32_t p3, uint32_t p2, *oq2_f1 = res_oq2; } -static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, - uint32_t *op5, uint32_t *op4, - uint32_t *op3, uint32_t *op2, - uint32_t *op1, uint32_t *op0, - uint32_t *oq0, uint32_t *oq1, - uint32_t *oq2, uint32_t *oq3, - uint32_t *oq4, uint32_t *oq5, - uint32_t *oq6, uint32_t *oq7) { +static INLINE void wide_mbfilter_dspr2( + uint32_t *op7, uint32_t *op6, uint32_t *op5, uint32_t *op4, uint32_t *op3, + uint32_t *op2, uint32_t *op1, uint32_t *op0, uint32_t *oq0, uint32_t *oq1, + uint32_t *oq2, uint32_t *oq3, uint32_t *oq4, uint32_t *oq5, uint32_t *oq6, + uint32_t *oq7) { const uint32_t p7 = *op7, p6 = *op6, p5 = *op5, p4 = *op4; const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; const uint32_t q0 = *oq0, q1 = *oq1, q2 = *oq2, q3 = *oq3; const uint32_t q4 = *oq4, q5 = *oq5, q6 = *oq6, q7 = *oq7; - uint32_t res_op6, res_op5, res_op4, res_op3, res_op2, res_op1, res_op0; - uint32_t res_oq0, res_oq1, res_oq2, res_oq3, res_oq4, res_oq5, res_oq6; - uint32_t tmp; - uint32_t add_p6toq6; - uint32_t u32Eight = 0x00080008; + uint32_t res_op6, res_op5, res_op4, res_op3, res_op2, res_op1, res_op0; + uint32_t res_oq0, res_oq1, res_oq2, res_oq3, res_oq4, res_oq5, res_oq6; + uint32_t tmp; + uint32_t add_p6toq6; + uint32_t u32Eight = 0x00080008; - __asm__ __volatile__ ( + __asm__ __volatile__( /* addition of p6,p5,p4,p3,p2,p1,p0,q0,q1,q2,q3,q4,q5,q6 which is used most of the time */ "addu.ph %[add_p6toq6], %[p6], %[p5] \n\t" @@ -560,15 +536,13 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, "addu.ph %[add_p6toq6], %[add_p6toq6], %[q6] \n\t" "addu.ph %[add_p6toq6], %[add_p6toq6], %[u32Eight] \n\t" - : [add_p6toq6] "=&r" (add_p6toq6) - : [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), - [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), - [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3), - [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), - [u32Eight] "r" (u32Eight) - ); + : [add_p6toq6] "=&r"(add_p6toq6) + : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), + [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), + [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), + [u32Eight] "r"(u32Eight)); - __asm__ __volatile__ ( + __asm__ __volatile__( /* *op6 = ROUND_POWER_OF_TWO(p7 * 7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0, 4) */ "shll.ph %[tmp], %[p7], 3 \n\t" @@ -643,16 +617,14 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, "addu.ph %[res_op0], %[res_op0], %[add_p6toq6] \n\t" "shrl.ph %[res_op0], %[res_op0], 4 \n\t" - : [res_op6] "=&r" (res_op6), [res_op5] "=&r" (res_op5), - [res_op4] "=&r" (res_op4), [res_op3] "=&r" (res_op3), - [res_op2] "=&r" (res_op2), [res_op1] "=&r" (res_op1), - [res_op0] "=&r" (res_op0), [tmp] "=&r" (tmp) - : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), - [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), - [q2] "r" (q2), [q1] "r" (q1), - [q3] "r" (q3), [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), - [add_p6toq6] "r" (add_p6toq6) - ); + : [res_op6] "=&r"(res_op6), [res_op5] "=&r"(res_op5), + [res_op4] "=&r"(res_op4), [res_op3] "=&r"(res_op3), + [res_op2] "=&r"(res_op2), [res_op1] "=&r"(res_op1), + [res_op0] "=&r"(res_op0), [tmp] "=&r"(tmp) + : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q2] "r"(q2), [q1] "r"(q1), + [q3] "r"(q3), [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), + [add_p6toq6] "r"(add_p6toq6)); *op6 = res_op6; *op5 = res_op5; @@ -662,7 +634,7 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, *op1 = res_op1; *op0 = res_op0; - __asm__ __volatile__ ( + __asm__ __volatile__( /* *oq0 = ROUND_POWER_OF_TWO(p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7, 4); */ "addu.ph %[res_oq0], %[q7], %[q0] \n\t" @@ -737,16 +709,14 @@ static INLINE void wide_mbfilter_dspr2(uint32_t *op7, uint32_t *op6, "subu.ph %[res_oq6], %[res_oq6], %[p6] \n\t" "shrl.ph %[res_oq6], %[res_oq6], 4 \n\t" - : [res_oq6] "=&r" (res_oq6), [res_oq5] "=&r" (res_oq5), - [res_oq4] "=&r" (res_oq4), [res_oq3] "=&r" (res_oq3), - [res_oq2] "=&r" (res_oq2), [res_oq1] "=&r" (res_oq1), - [res_oq0] "=&r" (res_oq0), [tmp] "=&r" (tmp) - : [q7] "r" (q7), [q6] "r" (q6), [q5] "r" (q5), [q4] "r" (q4), - [q3] "r" (q3), [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0), - [p1] "r" (p1), [p2] "r" (p2), - [p3] "r" (p3), [p4] "r" (p4), [p5] "r" (p5), [p6] "r" (p6), - [add_p6toq6] "r" (add_p6toq6) - ); + : [res_oq6] "=&r"(res_oq6), [res_oq5] "=&r"(res_oq5), + [res_oq4] "=&r"(res_oq4), [res_oq3] "=&r"(res_oq3), + [res_oq2] "=&r"(res_oq2), [res_oq1] "=&r"(res_oq1), + [res_oq0] "=&r"(res_oq0), [tmp] "=&r"(tmp) + : [q7] "r"(q7), [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3), + [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [p1] "r"(p1), [p2] "r"(p2), + [p3] "r"(p3), [p4] "r"(p4), [p5] "r"(p5), [p6] "r"(p6), + [add_p6toq6] "r"(add_p6toq6)); *oq0 = res_oq0; *oq1 = res_oq1; diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_macros_dspr2.h b/libs/libvpx/vpx_dsp/mips/loopfilter_macros_dspr2.h index 994ff185a2..38ed0b2a63 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_macros_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_macros_dspr2.h @@ -22,453 +22,410 @@ extern "C" { #endif #if HAVE_DSPR2 -#define STORE_F0() { \ - __asm__ __volatile__ ( \ - "sb %[q1_f0], 1(%[s4]) \n\t" \ - "sb %[q0_f0], 0(%[s4]) \n\t" \ - "sb %[p0_f0], -1(%[s4]) \n\t" \ - "sb %[p1_f0], -2(%[s4]) \n\t" \ - \ - : \ - : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0), \ - [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0), \ - [s4] "r" (s4) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q1_f0], %[q1_f0], 8 \n\t" \ - "srl %[q0_f0], %[q0_f0], 8 \n\t" \ - "srl %[p0_f0], %[p0_f0], 8 \n\t" \ - "srl %[p1_f0], %[p1_f0], 8 \n\t" \ - \ - : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0), \ - [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q1_f0], 1(%[s3]) \n\t" \ - "sb %[q0_f0], 0(%[s3]) \n\t" \ - "sb %[p0_f0], -1(%[s3]) \n\t" \ - "sb %[p1_f0], -2(%[s3]) \n\t" \ - \ - : [p1_f0] "+r" (p1_f0) \ - : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0), \ - [s3] "r" (s3), [p0_f0] "r" (p0_f0) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q1_f0], %[q1_f0], 8 \n\t" \ - "srl %[q0_f0], %[q0_f0], 8 \n\t" \ - "srl %[p0_f0], %[p0_f0], 8 \n\t" \ - "srl %[p1_f0], %[p1_f0], 8 \n\t" \ - \ - : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0), \ - [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q1_f0], 1(%[s2]) \n\t" \ - "sb %[q0_f0], 0(%[s2]) \n\t" \ - "sb %[p0_f0], -1(%[s2]) \n\t" \ - "sb %[p1_f0], -2(%[s2]) \n\t" \ - \ - : \ - : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0), \ - [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0), \ - [s2] "r" (s2) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q1_f0], %[q1_f0], 8 \n\t" \ - "srl %[q0_f0], %[q0_f0], 8 \n\t" \ - "srl %[p0_f0], %[p0_f0], 8 \n\t" \ - "srl %[p1_f0], %[p1_f0], 8 \n\t" \ - \ - : [q1_f0] "+r" (q1_f0), [q0_f0] "+r" (q0_f0), \ - [p0_f0] "+r" (p0_f0), [p1_f0] "+r" (p1_f0) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q1_f0], 1(%[s1]) \n\t" \ - "sb %[q0_f0], 0(%[s1]) \n\t" \ - "sb %[p0_f0], -1(%[s1]) \n\t" \ - "sb %[p1_f0], -2(%[s1]) \n\t" \ - \ - : \ - : [q1_f0] "r" (q1_f0), [q0_f0] "r" (q0_f0), \ - [p0_f0] "r" (p0_f0), [p1_f0] "r" (p1_f0), \ - [s1] "r" (s1) \ - ); \ -} +#define STORE_F0() \ + { \ + __asm__ __volatile__( \ + "sb %[q1_f0], 1(%[s4]) \n\t" \ + "sb %[q0_f0], 0(%[s4]) \n\t" \ + "sb %[p0_f0], -1(%[s4]) \n\t" \ + "sb %[p1_f0], -2(%[s4]) \n\t" \ + \ + : \ + : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0), \ + [p1_f0] "r"(p1_f0), [s4] "r"(s4)); \ + \ + __asm__ __volatile__( \ + "srl %[q1_f0], %[q1_f0], 8 \n\t" \ + "srl %[q0_f0], %[q0_f0], 8 \n\t" \ + "srl %[p0_f0], %[p0_f0], 8 \n\t" \ + "srl %[p1_f0], %[p1_f0], 8 \n\t" \ + \ + : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \ + [p1_f0] "+r"(p1_f0) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q1_f0], 1(%[s3]) \n\t" \ + "sb %[q0_f0], 0(%[s3]) \n\t" \ + "sb %[p0_f0], -1(%[s3]) \n\t" \ + "sb %[p1_f0], -2(%[s3]) \n\t" \ + \ + : [p1_f0] "+r"(p1_f0) \ + : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [s3] "r"(s3), \ + [p0_f0] "r"(p0_f0)); \ + \ + __asm__ __volatile__( \ + "srl %[q1_f0], %[q1_f0], 8 \n\t" \ + "srl %[q0_f0], %[q0_f0], 8 \n\t" \ + "srl %[p0_f0], %[p0_f0], 8 \n\t" \ + "srl %[p1_f0], %[p1_f0], 8 \n\t" \ + \ + : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \ + [p1_f0] "+r"(p1_f0) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q1_f0], 1(%[s2]) \n\t" \ + "sb %[q0_f0], 0(%[s2]) \n\t" \ + "sb %[p0_f0], -1(%[s2]) \n\t" \ + "sb %[p1_f0], -2(%[s2]) \n\t" \ + \ + : \ + : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0), \ + [p1_f0] "r"(p1_f0), [s2] "r"(s2)); \ + \ + __asm__ __volatile__( \ + "srl %[q1_f0], %[q1_f0], 8 \n\t" \ + "srl %[q0_f0], %[q0_f0], 8 \n\t" \ + "srl %[p0_f0], %[p0_f0], 8 \n\t" \ + "srl %[p1_f0], %[p1_f0], 8 \n\t" \ + \ + : [q1_f0] "+r"(q1_f0), [q0_f0] "+r"(q0_f0), [p0_f0] "+r"(p0_f0), \ + [p1_f0] "+r"(p1_f0) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q1_f0], 1(%[s1]) \n\t" \ + "sb %[q0_f0], 0(%[s1]) \n\t" \ + "sb %[p0_f0], -1(%[s1]) \n\t" \ + "sb %[p1_f0], -2(%[s1]) \n\t" \ + \ + : \ + : [q1_f0] "r"(q1_f0), [q0_f0] "r"(q0_f0), [p0_f0] "r"(p0_f0), \ + [p1_f0] "r"(p1_f0), [s1] "r"(s1)); \ + } -#define STORE_F1() { \ - __asm__ __volatile__ ( \ - "sb %[q2_r], 2(%[s4]) \n\t" \ - "sb %[q1_r], 1(%[s4]) \n\t" \ - "sb %[q0_r], 0(%[s4]) \n\t" \ - "sb %[p0_r], -1(%[s4]) \n\t" \ - "sb %[p1_r], -2(%[s4]) \n\t" \ - "sb %[p2_r], -3(%[s4]) \n\t" \ - \ - : \ - : [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), [q0_r] "r" (q0_r), \ - [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r), \ - [s4] "r" (s4) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q2_r], %[q2_r], 16 \n\t" \ - "srl %[q1_r], %[q1_r], 16 \n\t" \ - "srl %[q0_r], %[q0_r], 16 \n\t" \ - "srl %[p0_r], %[p0_r], 16 \n\t" \ - "srl %[p1_r], %[p1_r], 16 \n\t" \ - "srl %[p2_r], %[p2_r], 16 \n\t" \ - \ - : [q2_r] "+r" (q2_r), [q1_r] "+r" (q1_r), [q0_r] "+r" (q0_r), \ - [p0_r] "+r" (p0_r), [p1_r] "+r" (p1_r), [p2_r] "+r" (p2_r) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q2_r], 2(%[s3]) \n\t" \ - "sb %[q1_r], 1(%[s3]) \n\t" \ - "sb %[q0_r], 0(%[s3]) \n\t" \ - "sb %[p0_r], -1(%[s3]) \n\t" \ - "sb %[p1_r], -2(%[s3]) \n\t" \ - "sb %[p2_r], -3(%[s3]) \n\t" \ - \ - : \ - : [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), [q0_r] "r" (q0_r), \ - [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r), \ - [s3] "r" (s3) \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q2_l], 2(%[s2]) \n\t" \ - "sb %[q1_l], 1(%[s2]) \n\t" \ - "sb %[q0_l], 0(%[s2]) \n\t" \ - "sb %[p0_l], -1(%[s2]) \n\t" \ - "sb %[p1_l], -2(%[s2]) \n\t" \ - "sb %[p2_l], -3(%[s2]) \n\t" \ - \ - : \ - : [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), [q0_l] "r" (q0_l), \ - [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l), \ - [s2] "r" (s2) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q2_l], %[q2_l], 16 \n\t" \ - "srl %[q1_l], %[q1_l], 16 \n\t" \ - "srl %[q0_l], %[q0_l], 16 \n\t" \ - "srl %[p0_l], %[p0_l], 16 \n\t" \ - "srl %[p1_l], %[p1_l], 16 \n\t" \ - "srl %[p2_l], %[p2_l], 16 \n\t" \ - \ - : [q2_l] "+r" (q2_l), [q1_l] "+r" (q1_l), [q0_l] "+r" (q0_l), \ - [p0_l] "+r" (p0_l), [p1_l] "+r" (p1_l), [p2_l] "+r" (p2_l) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q2_l], 2(%[s1]) \n\t" \ - "sb %[q1_l], 1(%[s1]) \n\t" \ - "sb %[q0_l], 0(%[s1]) \n\t" \ - "sb %[p0_l], -1(%[s1]) \n\t" \ - "sb %[p1_l], -2(%[s1]) \n\t" \ - "sb %[p2_l], -3(%[s1]) \n\t" \ - \ - : \ - : [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), [q0_l] "r" (q0_l), \ - [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l), \ - [s1] "r" (s1) \ - ); \ -} +#define STORE_F1() \ + { \ + __asm__ __volatile__( \ + "sb %[q2_r], 2(%[s4]) \n\t" \ + "sb %[q1_r], 1(%[s4]) \n\t" \ + "sb %[q0_r], 0(%[s4]) \n\t" \ + "sb %[p0_r], -1(%[s4]) \n\t" \ + "sb %[p1_r], -2(%[s4]) \n\t" \ + "sb %[p2_r], -3(%[s4]) \n\t" \ + \ + : \ + : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r), \ + [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s4] "r"(s4)); \ + \ + __asm__ __volatile__( \ + "srl %[q2_r], %[q2_r], 16 \n\t" \ + "srl %[q1_r], %[q1_r], 16 \n\t" \ + "srl %[q0_r], %[q0_r], 16 \n\t" \ + "srl %[p0_r], %[p0_r], 16 \n\t" \ + "srl %[p1_r], %[p1_r], 16 \n\t" \ + "srl %[p2_r], %[p2_r], 16 \n\t" \ + \ + : [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), [q0_r] "+r"(q0_r), \ + [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), [p2_r] "+r"(p2_r) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q2_r], 2(%[s3]) \n\t" \ + "sb %[q1_r], 1(%[s3]) \n\t" \ + "sb %[q0_r], 0(%[s3]) \n\t" \ + "sb %[p0_r], -1(%[s3]) \n\t" \ + "sb %[p1_r], -2(%[s3]) \n\t" \ + "sb %[p2_r], -3(%[s3]) \n\t" \ + \ + : \ + : [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), [q0_r] "r"(q0_r), \ + [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), [p2_r] "r"(p2_r), [s3] "r"(s3)); \ + \ + __asm__ __volatile__( \ + "sb %[q2_l], 2(%[s2]) \n\t" \ + "sb %[q1_l], 1(%[s2]) \n\t" \ + "sb %[q0_l], 0(%[s2]) \n\t" \ + "sb %[p0_l], -1(%[s2]) \n\t" \ + "sb %[p1_l], -2(%[s2]) \n\t" \ + "sb %[p2_l], -3(%[s2]) \n\t" \ + \ + : \ + : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l), \ + [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s2] "r"(s2)); \ + \ + __asm__ __volatile__( \ + "srl %[q2_l], %[q2_l], 16 \n\t" \ + "srl %[q1_l], %[q1_l], 16 \n\t" \ + "srl %[q0_l], %[q0_l], 16 \n\t" \ + "srl %[p0_l], %[p0_l], 16 \n\t" \ + "srl %[p1_l], %[p1_l], 16 \n\t" \ + "srl %[p2_l], %[p2_l], 16 \n\t" \ + \ + : [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), [q0_l] "+r"(q0_l), \ + [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), [p2_l] "+r"(p2_l) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q2_l], 2(%[s1]) \n\t" \ + "sb %[q1_l], 1(%[s1]) \n\t" \ + "sb %[q0_l], 0(%[s1]) \n\t" \ + "sb %[p0_l], -1(%[s1]) \n\t" \ + "sb %[p1_l], -2(%[s1]) \n\t" \ + "sb %[p2_l], -3(%[s1]) \n\t" \ + \ + : \ + : [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), [q0_l] "r"(q0_l), \ + [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), [p2_l] "r"(p2_l), [s1] "r"(s1)); \ + } -#define STORE_F2() { \ - __asm__ __volatile__ ( \ - "sb %[q6_r], 6(%[s4]) \n\t" \ - "sb %[q5_r], 5(%[s4]) \n\t" \ - "sb %[q4_r], 4(%[s4]) \n\t" \ - "sb %[q3_r], 3(%[s4]) \n\t" \ - "sb %[q2_r], 2(%[s4]) \n\t" \ - "sb %[q1_r], 1(%[s4]) \n\t" \ - "sb %[q0_r], 0(%[s4]) \n\t" \ - "sb %[p0_r], -1(%[s4]) \n\t" \ - "sb %[p1_r], -2(%[s4]) \n\t" \ - "sb %[p2_r], -3(%[s4]) \n\t" \ - "sb %[p3_r], -4(%[s4]) \n\t" \ - "sb %[p4_r], -5(%[s4]) \n\t" \ - "sb %[p5_r], -6(%[s4]) \n\t" \ - "sb %[p6_r], -7(%[s4]) \n\t" \ - \ - : \ - : [q6_r] "r" (q6_r), [q5_r] "r" (q5_r), [q4_r] "r" (q4_r), \ - [q3_r] "r" (q3_r), [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), \ - [q0_r] "r" (q0_r), \ - [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r), \ - [p3_r] "r" (p3_r), [p4_r] "r" (p4_r), [p5_r] "r" (p5_r), \ - [p6_r] "r" (p6_r), \ - [s4] "r" (s4) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q6_r], %[q6_r], 16 \n\t" \ - "srl %[q5_r], %[q5_r], 16 \n\t" \ - "srl %[q4_r], %[q4_r], 16 \n\t" \ - "srl %[q3_r], %[q3_r], 16 \n\t" \ - "srl %[q2_r], %[q2_r], 16 \n\t" \ - "srl %[q1_r], %[q1_r], 16 \n\t" \ - "srl %[q0_r], %[q0_r], 16 \n\t" \ - "srl %[p0_r], %[p0_r], 16 \n\t" \ - "srl %[p1_r], %[p1_r], 16 \n\t" \ - "srl %[p2_r], %[p2_r], 16 \n\t" \ - "srl %[p3_r], %[p3_r], 16 \n\t" \ - "srl %[p4_r], %[p4_r], 16 \n\t" \ - "srl %[p5_r], %[p5_r], 16 \n\t" \ - "srl %[p6_r], %[p6_r], 16 \n\t" \ - \ - : [q6_r] "+r" (q6_r), [q5_r] "+r" (q5_r), [q4_r] "+r" (q4_r), \ - [q3_r] "+r" (q3_r), [q2_r] "+r" (q2_r), [q1_r] "+r" (q1_r), \ - [q0_r] "+r" (q0_r), \ - [p0_r] "+r" (p0_r), [p1_r] "+r" (p1_r), [p2_r] "+r" (p2_r), \ - [p3_r] "+r" (p3_r), [p4_r] "+r" (p4_r), [p5_r] "+r" (p5_r), \ - [p6_r] "+r" (p6_r) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q6_r], 6(%[s3]) \n\t" \ - "sb %[q5_r], 5(%[s3]) \n\t" \ - "sb %[q4_r], 4(%[s3]) \n\t" \ - "sb %[q3_r], 3(%[s3]) \n\t" \ - "sb %[q2_r], 2(%[s3]) \n\t" \ - "sb %[q1_r], 1(%[s3]) \n\t" \ - "sb %[q0_r], 0(%[s3]) \n\t" \ - "sb %[p0_r], -1(%[s3]) \n\t" \ - "sb %[p1_r], -2(%[s3]) \n\t" \ - "sb %[p2_r], -3(%[s3]) \n\t" \ - "sb %[p3_r], -4(%[s3]) \n\t" \ - "sb %[p4_r], -5(%[s3]) \n\t" \ - "sb %[p5_r], -6(%[s3]) \n\t" \ - "sb %[p6_r], -7(%[s3]) \n\t" \ - \ - : \ - : [q6_r] "r" (q6_r), [q5_r] "r" (q5_r), [q4_r] "r" (q4_r), \ - [q3_r] "r" (q3_r), [q2_r] "r" (q2_r), [q1_r] "r" (q1_r), \ - [q0_r] "r" (q0_r), \ - [p0_r] "r" (p0_r), [p1_r] "r" (p1_r), [p2_r] "r" (p2_r), \ - [p3_r] "r" (p3_r), [p4_r] "r" (p4_r), [p5_r] "r" (p5_r), \ - [p6_r] "r" (p6_r), \ - [s3] "r" (s3) \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q6_l], 6(%[s2]) \n\t" \ - "sb %[q5_l], 5(%[s2]) \n\t" \ - "sb %[q4_l], 4(%[s2]) \n\t" \ - "sb %[q3_l], 3(%[s2]) \n\t" \ - "sb %[q2_l], 2(%[s2]) \n\t" \ - "sb %[q1_l], 1(%[s2]) \n\t" \ - "sb %[q0_l], 0(%[s2]) \n\t" \ - "sb %[p0_l], -1(%[s2]) \n\t" \ - "sb %[p1_l], -2(%[s2]) \n\t" \ - "sb %[p2_l], -3(%[s2]) \n\t" \ - "sb %[p3_l], -4(%[s2]) \n\t" \ - "sb %[p4_l], -5(%[s2]) \n\t" \ - "sb %[p5_l], -6(%[s2]) \n\t" \ - "sb %[p6_l], -7(%[s2]) \n\t" \ - \ - : \ - : [q6_l] "r" (q6_l), [q5_l] "r" (q5_l), [q4_l] "r" (q4_l), \ - [q3_l] "r" (q3_l), [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), \ - [q0_l] "r" (q0_l), \ - [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l), \ - [p3_l] "r" (p3_l), [p4_l] "r" (p4_l), [p5_l] "r" (p5_l), \ - [p6_l] "r" (p6_l), \ - [s2] "r" (s2) \ - ); \ - \ - __asm__ __volatile__ ( \ - "srl %[q6_l], %[q6_l], 16 \n\t" \ - "srl %[q5_l], %[q5_l], 16 \n\t" \ - "srl %[q4_l], %[q4_l], 16 \n\t" \ - "srl %[q3_l], %[q3_l], 16 \n\t" \ - "srl %[q2_l], %[q2_l], 16 \n\t" \ - "srl %[q1_l], %[q1_l], 16 \n\t" \ - "srl %[q0_l], %[q0_l], 16 \n\t" \ - "srl %[p0_l], %[p0_l], 16 \n\t" \ - "srl %[p1_l], %[p1_l], 16 \n\t" \ - "srl %[p2_l], %[p2_l], 16 \n\t" \ - "srl %[p3_l], %[p3_l], 16 \n\t" \ - "srl %[p4_l], %[p4_l], 16 \n\t" \ - "srl %[p5_l], %[p5_l], 16 \n\t" \ - "srl %[p6_l], %[p6_l], 16 \n\t" \ - \ - : [q6_l] "+r" (q6_l), [q5_l] "+r" (q5_l), [q4_l] "+r" (q4_l), \ - [q3_l] "+r" (q3_l), [q2_l] "+r" (q2_l), [q1_l] "+r" (q1_l), \ - [q0_l] "+r" (q0_l), \ - [p0_l] "+r" (p0_l), [p1_l] "+r" (p1_l), [p2_l] "+r" (p2_l), \ - [p3_l] "+r" (p3_l), [p4_l] "+r" (p4_l), [p5_l] "+r" (p5_l), \ - [p6_l] "+r" (p6_l) \ - : \ - ); \ - \ - __asm__ __volatile__ ( \ - "sb %[q6_l], 6(%[s1]) \n\t" \ - "sb %[q5_l], 5(%[s1]) \n\t" \ - "sb %[q4_l], 4(%[s1]) \n\t" \ - "sb %[q3_l], 3(%[s1]) \n\t" \ - "sb %[q2_l], 2(%[s1]) \n\t" \ - "sb %[q1_l], 1(%[s1]) \n\t" \ - "sb %[q0_l], 0(%[s1]) \n\t" \ - "sb %[p0_l], -1(%[s1]) \n\t" \ - "sb %[p1_l], -2(%[s1]) \n\t" \ - "sb %[p2_l], -3(%[s1]) \n\t" \ - "sb %[p3_l], -4(%[s1]) \n\t" \ - "sb %[p4_l], -5(%[s1]) \n\t" \ - "sb %[p5_l], -6(%[s1]) \n\t" \ - "sb %[p6_l], -7(%[s1]) \n\t" \ - \ - : \ - : [q6_l] "r" (q6_l), [q5_l] "r" (q5_l), [q4_l] "r" (q4_l), \ - [q3_l] "r" (q3_l), [q2_l] "r" (q2_l), [q1_l] "r" (q1_l), \ - [q0_l] "r" (q0_l), \ - [p0_l] "r" (p0_l), [p1_l] "r" (p1_l), [p2_l] "r" (p2_l), \ - [p3_l] "r" (p3_l), [p4_l] "r" (p4_l), [p5_l] "r" (p5_l), \ - [p6_l] "r" (p6_l), \ - [s1] "r" (s1) \ - ); \ -} +#define STORE_F2() \ + { \ + __asm__ __volatile__( \ + "sb %[q6_r], 6(%[s4]) \n\t" \ + "sb %[q5_r], 5(%[s4]) \n\t" \ + "sb %[q4_r], 4(%[s4]) \n\t" \ + "sb %[q3_r], 3(%[s4]) \n\t" \ + "sb %[q2_r], 2(%[s4]) \n\t" \ + "sb %[q1_r], 1(%[s4]) \n\t" \ + "sb %[q0_r], 0(%[s4]) \n\t" \ + "sb %[p0_r], -1(%[s4]) \n\t" \ + "sb %[p1_r], -2(%[s4]) \n\t" \ + "sb %[p2_r], -3(%[s4]) \n\t" \ + "sb %[p3_r], -4(%[s4]) \n\t" \ + "sb %[p4_r], -5(%[s4]) \n\t" \ + "sb %[p5_r], -6(%[s4]) \n\t" \ + "sb %[p6_r], -7(%[s4]) \n\t" \ + \ + : \ + : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r), \ + [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), \ + [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), \ + [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r), \ + [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s4] "r"(s4)); \ + \ + __asm__ __volatile__( \ + "srl %[q6_r], %[q6_r], 16 \n\t" \ + "srl %[q5_r], %[q5_r], 16 \n\t" \ + "srl %[q4_r], %[q4_r], 16 \n\t" \ + "srl %[q3_r], %[q3_r], 16 \n\t" \ + "srl %[q2_r], %[q2_r], 16 \n\t" \ + "srl %[q1_r], %[q1_r], 16 \n\t" \ + "srl %[q0_r], %[q0_r], 16 \n\t" \ + "srl %[p0_r], %[p0_r], 16 \n\t" \ + "srl %[p1_r], %[p1_r], 16 \n\t" \ + "srl %[p2_r], %[p2_r], 16 \n\t" \ + "srl %[p3_r], %[p3_r], 16 \n\t" \ + "srl %[p4_r], %[p4_r], 16 \n\t" \ + "srl %[p5_r], %[p5_r], 16 \n\t" \ + "srl %[p6_r], %[p6_r], 16 \n\t" \ + \ + : [q6_r] "+r"(q6_r), [q5_r] "+r"(q5_r), [q4_r] "+r"(q4_r), \ + [q3_r] "+r"(q3_r), [q2_r] "+r"(q2_r), [q1_r] "+r"(q1_r), \ + [q0_r] "+r"(q0_r), [p0_r] "+r"(p0_r), [p1_r] "+r"(p1_r), \ + [p2_r] "+r"(p2_r), [p3_r] "+r"(p3_r), [p4_r] "+r"(p4_r), \ + [p5_r] "+r"(p5_r), [p6_r] "+r"(p6_r) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q6_r], 6(%[s3]) \n\t" \ + "sb %[q5_r], 5(%[s3]) \n\t" \ + "sb %[q4_r], 4(%[s3]) \n\t" \ + "sb %[q3_r], 3(%[s3]) \n\t" \ + "sb %[q2_r], 2(%[s3]) \n\t" \ + "sb %[q1_r], 1(%[s3]) \n\t" \ + "sb %[q0_r], 0(%[s3]) \n\t" \ + "sb %[p0_r], -1(%[s3]) \n\t" \ + "sb %[p1_r], -2(%[s3]) \n\t" \ + "sb %[p2_r], -3(%[s3]) \n\t" \ + "sb %[p3_r], -4(%[s3]) \n\t" \ + "sb %[p4_r], -5(%[s3]) \n\t" \ + "sb %[p5_r], -6(%[s3]) \n\t" \ + "sb %[p6_r], -7(%[s3]) \n\t" \ + \ + : \ + : [q6_r] "r"(q6_r), [q5_r] "r"(q5_r), [q4_r] "r"(q4_r), \ + [q3_r] "r"(q3_r), [q2_r] "r"(q2_r), [q1_r] "r"(q1_r), \ + [q0_r] "r"(q0_r), [p0_r] "r"(p0_r), [p1_r] "r"(p1_r), \ + [p2_r] "r"(p2_r), [p3_r] "r"(p3_r), [p4_r] "r"(p4_r), \ + [p5_r] "r"(p5_r), [p6_r] "r"(p6_r), [s3] "r"(s3)); \ + \ + __asm__ __volatile__( \ + "sb %[q6_l], 6(%[s2]) \n\t" \ + "sb %[q5_l], 5(%[s2]) \n\t" \ + "sb %[q4_l], 4(%[s2]) \n\t" \ + "sb %[q3_l], 3(%[s2]) \n\t" \ + "sb %[q2_l], 2(%[s2]) \n\t" \ + "sb %[q1_l], 1(%[s2]) \n\t" \ + "sb %[q0_l], 0(%[s2]) \n\t" \ + "sb %[p0_l], -1(%[s2]) \n\t" \ + "sb %[p1_l], -2(%[s2]) \n\t" \ + "sb %[p2_l], -3(%[s2]) \n\t" \ + "sb %[p3_l], -4(%[s2]) \n\t" \ + "sb %[p4_l], -5(%[s2]) \n\t" \ + "sb %[p5_l], -6(%[s2]) \n\t" \ + "sb %[p6_l], -7(%[s2]) \n\t" \ + \ + : \ + : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l), \ + [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), \ + [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), \ + [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l), \ + [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s2] "r"(s2)); \ + \ + __asm__ __volatile__( \ + "srl %[q6_l], %[q6_l], 16 \n\t" \ + "srl %[q5_l], %[q5_l], 16 \n\t" \ + "srl %[q4_l], %[q4_l], 16 \n\t" \ + "srl %[q3_l], %[q3_l], 16 \n\t" \ + "srl %[q2_l], %[q2_l], 16 \n\t" \ + "srl %[q1_l], %[q1_l], 16 \n\t" \ + "srl %[q0_l], %[q0_l], 16 \n\t" \ + "srl %[p0_l], %[p0_l], 16 \n\t" \ + "srl %[p1_l], %[p1_l], 16 \n\t" \ + "srl %[p2_l], %[p2_l], 16 \n\t" \ + "srl %[p3_l], %[p3_l], 16 \n\t" \ + "srl %[p4_l], %[p4_l], 16 \n\t" \ + "srl %[p5_l], %[p5_l], 16 \n\t" \ + "srl %[p6_l], %[p6_l], 16 \n\t" \ + \ + : [q6_l] "+r"(q6_l), [q5_l] "+r"(q5_l), [q4_l] "+r"(q4_l), \ + [q3_l] "+r"(q3_l), [q2_l] "+r"(q2_l), [q1_l] "+r"(q1_l), \ + [q0_l] "+r"(q0_l), [p0_l] "+r"(p0_l), [p1_l] "+r"(p1_l), \ + [p2_l] "+r"(p2_l), [p3_l] "+r"(p3_l), [p4_l] "+r"(p4_l), \ + [p5_l] "+r"(p5_l), [p6_l] "+r"(p6_l) \ + :); \ + \ + __asm__ __volatile__( \ + "sb %[q6_l], 6(%[s1]) \n\t" \ + "sb %[q5_l], 5(%[s1]) \n\t" \ + "sb %[q4_l], 4(%[s1]) \n\t" \ + "sb %[q3_l], 3(%[s1]) \n\t" \ + "sb %[q2_l], 2(%[s1]) \n\t" \ + "sb %[q1_l], 1(%[s1]) \n\t" \ + "sb %[q0_l], 0(%[s1]) \n\t" \ + "sb %[p0_l], -1(%[s1]) \n\t" \ + "sb %[p1_l], -2(%[s1]) \n\t" \ + "sb %[p2_l], -3(%[s1]) \n\t" \ + "sb %[p3_l], -4(%[s1]) \n\t" \ + "sb %[p4_l], -5(%[s1]) \n\t" \ + "sb %[p5_l], -6(%[s1]) \n\t" \ + "sb %[p6_l], -7(%[s1]) \n\t" \ + \ + : \ + : [q6_l] "r"(q6_l), [q5_l] "r"(q5_l), [q4_l] "r"(q4_l), \ + [q3_l] "r"(q3_l), [q2_l] "r"(q2_l), [q1_l] "r"(q1_l), \ + [q0_l] "r"(q0_l), [p0_l] "r"(p0_l), [p1_l] "r"(p1_l), \ + [p2_l] "r"(p2_l), [p3_l] "r"(p3_l), [p4_l] "r"(p4_l), \ + [p5_l] "r"(p5_l), [p6_l] "r"(p6_l), [s1] "r"(s1)); \ + } -#define PACK_LEFT_0TO3() { \ - __asm__ __volatile__ ( \ - "preceu.ph.qbl %[p3_l], %[p3] \n\t" \ - "preceu.ph.qbl %[p2_l], %[p2] \n\t" \ - "preceu.ph.qbl %[p1_l], %[p1] \n\t" \ - "preceu.ph.qbl %[p0_l], %[p0] \n\t" \ - "preceu.ph.qbl %[q0_l], %[q0] \n\t" \ - "preceu.ph.qbl %[q1_l], %[q1] \n\t" \ - "preceu.ph.qbl %[q2_l], %[q2] \n\t" \ - "preceu.ph.qbl %[q3_l], %[q3] \n\t" \ - \ - : [p3_l] "=&r" (p3_l), [p2_l] "=&r" (p2_l), \ - [p1_l] "=&r" (p1_l), [p0_l] "=&r" (p0_l), \ - [q0_l] "=&r" (q0_l), [q1_l] "=&r" (q1_l), \ - [q2_l] "=&r" (q2_l), [q3_l] "=&r" (q3_l) \ - : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), \ - [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3) \ - ); \ -} +#define PACK_LEFT_0TO3() \ + { \ + __asm__ __volatile__( \ + "preceu.ph.qbl %[p3_l], %[p3] \n\t" \ + "preceu.ph.qbl %[p2_l], %[p2] \n\t" \ + "preceu.ph.qbl %[p1_l], %[p1] \n\t" \ + "preceu.ph.qbl %[p0_l], %[p0] \n\t" \ + "preceu.ph.qbl %[q0_l], %[q0] \n\t" \ + "preceu.ph.qbl %[q1_l], %[q1] \n\t" \ + "preceu.ph.qbl %[q2_l], %[q2] \n\t" \ + "preceu.ph.qbl %[q3_l], %[q3] \n\t" \ + \ + : [p3_l] "=&r"(p3_l), [p2_l] "=&r"(p2_l), [p1_l] "=&r"(p1_l), \ + [p0_l] "=&r"(p0_l), [q0_l] "=&r"(q0_l), [q1_l] "=&r"(q1_l), \ + [q2_l] "=&r"(q2_l), [q3_l] "=&r"(q3_l) \ + : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), \ + [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3)); \ + } -#define PACK_LEFT_4TO7() { \ - __asm__ __volatile__ ( \ - "preceu.ph.qbl %[p7_l], %[p7] \n\t" \ - "preceu.ph.qbl %[p6_l], %[p6] \n\t" \ - "preceu.ph.qbl %[p5_l], %[p5] \n\t" \ - "preceu.ph.qbl %[p4_l], %[p4] \n\t" \ - "preceu.ph.qbl %[q4_l], %[q4] \n\t" \ - "preceu.ph.qbl %[q5_l], %[q5] \n\t" \ - "preceu.ph.qbl %[q6_l], %[q6] \n\t" \ - "preceu.ph.qbl %[q7_l], %[q7] \n\t" \ - \ - : [p7_l] "=&r" (p7_l), [p6_l] "=&r" (p6_l), \ - [p5_l] "=&r" (p5_l), [p4_l] "=&r" (p4_l), \ - [q4_l] "=&r" (q4_l), [q5_l] "=&r" (q5_l), \ - [q6_l] "=&r" (q6_l), [q7_l] "=&r" (q7_l) \ - : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), \ - [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), [q7] "r" (q7) \ - ); \ -} +#define PACK_LEFT_4TO7() \ + { \ + __asm__ __volatile__( \ + "preceu.ph.qbl %[p7_l], %[p7] \n\t" \ + "preceu.ph.qbl %[p6_l], %[p6] \n\t" \ + "preceu.ph.qbl %[p5_l], %[p5] \n\t" \ + "preceu.ph.qbl %[p4_l], %[p4] \n\t" \ + "preceu.ph.qbl %[q4_l], %[q4] \n\t" \ + "preceu.ph.qbl %[q5_l], %[q5] \n\t" \ + "preceu.ph.qbl %[q6_l], %[q6] \n\t" \ + "preceu.ph.qbl %[q7_l], %[q7] \n\t" \ + \ + : [p7_l] "=&r"(p7_l), [p6_l] "=&r"(p6_l), [p5_l] "=&r"(p5_l), \ + [p4_l] "=&r"(p4_l), [q4_l] "=&r"(q4_l), [q5_l] "=&r"(q5_l), \ + [q6_l] "=&r"(q6_l), [q7_l] "=&r"(q7_l) \ + : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), \ + [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7)); \ + } -#define PACK_RIGHT_0TO3() { \ - __asm__ __volatile__ ( \ - "preceu.ph.qbr %[p3_r], %[p3] \n\t" \ - "preceu.ph.qbr %[p2_r], %[p2] \n\t" \ - "preceu.ph.qbr %[p1_r], %[p1] \n\t" \ - "preceu.ph.qbr %[p0_r], %[p0] \n\t" \ - "preceu.ph.qbr %[q0_r], %[q0] \n\t" \ - "preceu.ph.qbr %[q1_r], %[q1] \n\t" \ - "preceu.ph.qbr %[q2_r], %[q2] \n\t" \ - "preceu.ph.qbr %[q3_r], %[q3] \n\t" \ - \ - : [p3_r] "=&r" (p3_r), [p2_r] "=&r" (p2_r), \ - [p1_r] "=&r" (p1_r), [p0_r] "=&r" (p0_r), \ - [q0_r] "=&r" (q0_r), [q1_r] "=&r" (q1_r), \ - [q2_r] "=&r" (q2_r), [q3_r] "=&r" (q3_r) \ - : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), \ - [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), [q3] "r" (q3) \ - ); \ -} +#define PACK_RIGHT_0TO3() \ + { \ + __asm__ __volatile__( \ + "preceu.ph.qbr %[p3_r], %[p3] \n\t" \ + "preceu.ph.qbr %[p2_r], %[p2] \n\t" \ + "preceu.ph.qbr %[p1_r], %[p1] \n\t" \ + "preceu.ph.qbr %[p0_r], %[p0] \n\t" \ + "preceu.ph.qbr %[q0_r], %[q0] \n\t" \ + "preceu.ph.qbr %[q1_r], %[q1] \n\t" \ + "preceu.ph.qbr %[q2_r], %[q2] \n\t" \ + "preceu.ph.qbr %[q3_r], %[q3] \n\t" \ + \ + : [p3_r] "=&r"(p3_r), [p2_r] "=&r"(p2_r), [p1_r] "=&r"(p1_r), \ + [p0_r] "=&r"(p0_r), [q0_r] "=&r"(q0_r), [q1_r] "=&r"(q1_r), \ + [q2_r] "=&r"(q2_r), [q3_r] "=&r"(q3_r) \ + : [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), \ + [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3)); \ + } -#define PACK_RIGHT_4TO7() { \ - __asm__ __volatile__ ( \ - "preceu.ph.qbr %[p7_r], %[p7] \n\t" \ - "preceu.ph.qbr %[p6_r], %[p6] \n\t" \ - "preceu.ph.qbr %[p5_r], %[p5] \n\t" \ - "preceu.ph.qbr %[p4_r], %[p4] \n\t" \ - "preceu.ph.qbr %[q4_r], %[q4] \n\t" \ - "preceu.ph.qbr %[q5_r], %[q5] \n\t" \ - "preceu.ph.qbr %[q6_r], %[q6] \n\t" \ - "preceu.ph.qbr %[q7_r], %[q7] \n\t" \ - \ - : [p7_r] "=&r" (p7_r), [p6_r] "=&r" (p6_r), \ - [p5_r] "=&r" (p5_r), [p4_r] "=&r" (p4_r), \ - [q4_r] "=&r" (q4_r), [q5_r] "=&r" (q5_r), \ - [q6_r] "=&r" (q6_r), [q7_r] "=&r" (q7_r) \ - : [p7] "r" (p7), [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), \ - [q4] "r" (q4), [q5] "r" (q5), [q6] "r" (q6), [q7] "r" (q7) \ - ); \ -} +#define PACK_RIGHT_4TO7() \ + { \ + __asm__ __volatile__( \ + "preceu.ph.qbr %[p7_r], %[p7] \n\t" \ + "preceu.ph.qbr %[p6_r], %[p6] \n\t" \ + "preceu.ph.qbr %[p5_r], %[p5] \n\t" \ + "preceu.ph.qbr %[p4_r], %[p4] \n\t" \ + "preceu.ph.qbr %[q4_r], %[q4] \n\t" \ + "preceu.ph.qbr %[q5_r], %[q5] \n\t" \ + "preceu.ph.qbr %[q6_r], %[q6] \n\t" \ + "preceu.ph.qbr %[q7_r], %[q7] \n\t" \ + \ + : [p7_r] "=&r"(p7_r), [p6_r] "=&r"(p6_r), [p5_r] "=&r"(p5_r), \ + [p4_r] "=&r"(p4_r), [q4_r] "=&r"(q4_r), [q5_r] "=&r"(q5_r), \ + [q6_r] "=&r"(q6_r), [q7_r] "=&r"(q7_r) \ + : [p7] "r"(p7), [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), \ + [q4] "r"(q4), [q5] "r"(q5), [q6] "r"(q6), [q7] "r"(q7)); \ + } -#define COMBINE_LEFT_RIGHT_0TO2() { \ - __asm__ __volatile__ ( \ - "precr.qb.ph %[p2], %[p2_l], %[p2_r] \n\t" \ - "precr.qb.ph %[p1], %[p1_l], %[p1_r] \n\t" \ - "precr.qb.ph %[p0], %[p0_l], %[p0_r] \n\t" \ - "precr.qb.ph %[q0], %[q0_l], %[q0_r] \n\t" \ - "precr.qb.ph %[q1], %[q1_l], %[q1_r] \n\t" \ - "precr.qb.ph %[q2], %[q2_l], %[q2_r] \n\t" \ - \ - : [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), \ - [q0] "=&r" (q0), [q1] "=&r" (q1), [q2] "=&r" (q2) \ - : [p2_l] "r" (p2_l), [p2_r] "r" (p2_r), \ - [p1_l] "r" (p1_l), [p1_r] "r" (p1_r), \ - [p0_l] "r" (p0_l), [p0_r] "r" (p0_r), \ - [q0_l] "r" (q0_l), [q0_r] "r" (q0_r), \ - [q1_l] "r" (q1_l), [q1_r] "r" (q1_r), \ - [q2_l] "r" (q2_l), [q2_r] "r" (q2_r) \ - ); \ -} +#define COMBINE_LEFT_RIGHT_0TO2() \ + { \ + __asm__ __volatile__( \ + "precr.qb.ph %[p2], %[p2_l], %[p2_r] \n\t" \ + "precr.qb.ph %[p1], %[p1_l], %[p1_r] \n\t" \ + "precr.qb.ph %[p0], %[p0_l], %[p0_r] \n\t" \ + "precr.qb.ph %[q0], %[q0_l], %[q0_r] \n\t" \ + "precr.qb.ph %[q1], %[q1_l], %[q1_r] \n\t" \ + "precr.qb.ph %[q2], %[q2_l], %[q2_r] \n\t" \ + \ + : [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), [q0] "=&r"(q0), \ + [q1] "=&r"(q1), [q2] "=&r"(q2) \ + : [p2_l] "r"(p2_l), [p2_r] "r"(p2_r), [p1_l] "r"(p1_l), \ + [p1_r] "r"(p1_r), [p0_l] "r"(p0_l), [p0_r] "r"(p0_r), \ + [q0_l] "r"(q0_l), [q0_r] "r"(q0_r), [q1_l] "r"(q1_l), \ + [q1_r] "r"(q1_r), [q2_l] "r"(q2_l), [q2_r] "r"(q2_r)); \ + } -#define COMBINE_LEFT_RIGHT_3TO6() { \ - __asm__ __volatile__ ( \ - "precr.qb.ph %[p6], %[p6_l], %[p6_r] \n\t" \ - "precr.qb.ph %[p5], %[p5_l], %[p5_r] \n\t" \ - "precr.qb.ph %[p4], %[p4_l], %[p4_r] \n\t" \ - "precr.qb.ph %[p3], %[p3_l], %[p3_r] \n\t" \ - "precr.qb.ph %[q3], %[q3_l], %[q3_r] \n\t" \ - "precr.qb.ph %[q4], %[q4_l], %[q4_r] \n\t" \ - "precr.qb.ph %[q5], %[q5_l], %[q5_r] \n\t" \ - "precr.qb.ph %[q6], %[q6_l], %[q6_r] \n\t" \ - \ - : [p6] "=&r" (p6),[p5] "=&r" (p5), \ - [p4] "=&r" (p4),[p3] "=&r" (p3), \ - [q3] "=&r" (q3),[q4] "=&r" (q4), \ - [q5] "=&r" (q5),[q6] "=&r" (q6) \ - : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), \ - [p4_l] "r" (p4_l), [p3_l] "r" (p3_l), \ - [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), \ - [p4_r] "r" (p4_r), [p3_r] "r" (p3_r), \ - [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), \ - [q5_l] "r" (q5_l), [q6_l] "r" (q6_l), \ - [q3_r] "r" (q3_r), [q4_r] "r" (q4_r), \ - [q5_r] "r" (q5_r), [q6_r] "r" (q6_r) \ - ); \ -} +#define COMBINE_LEFT_RIGHT_3TO6() \ + { \ + __asm__ __volatile__( \ + "precr.qb.ph %[p6], %[p6_l], %[p6_r] \n\t" \ + "precr.qb.ph %[p5], %[p5_l], %[p5_r] \n\t" \ + "precr.qb.ph %[p4], %[p4_l], %[p4_r] \n\t" \ + "precr.qb.ph %[p3], %[p3_l], %[p3_r] \n\t" \ + "precr.qb.ph %[q3], %[q3_l], %[q3_r] \n\t" \ + "precr.qb.ph %[q4], %[q4_l], %[q4_r] \n\t" \ + "precr.qb.ph %[q5], %[q5_l], %[q5_r] \n\t" \ + "precr.qb.ph %[q6], %[q6_l], %[q6_r] \n\t" \ + \ + : [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4), [p3] "=&r"(p3), \ + [q3] "=&r"(q3), [q4] "=&r"(q4), [q5] "=&r"(q5), [q6] "=&r"(q6) \ + : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l), \ + [p3_l] "r"(p3_l), [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), \ + [p4_r] "r"(p4_r), [p3_r] "r"(p3_r), [q3_l] "r"(q3_l), \ + [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), [q6_l] "r"(q6_l), \ + [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r), \ + [q6_r] "r"(q6_r)); \ + } #endif // #if HAVE_DSPR2 #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_masks_dspr2.h b/libs/libvpx/vpx_dsp/mips/loopfilter_masks_dspr2.h index 2c964afaa7..ee11142266 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_masks_dspr2.h +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_masks_dspr2.h @@ -25,18 +25,17 @@ extern "C" { /* processing 4 pixels at the same time * compute hev and mask in the same function */ static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit, - uint32_t p1, uint32_t p0, - uint32_t p3, uint32_t p2, - uint32_t q0, uint32_t q1, + uint32_t p1, uint32_t p0, uint32_t p3, + uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2, uint32_t q3, uint32_t thresh, uint32_t *hev, uint32_t *mask) { - uint32_t c, r, r3, r_k; - uint32_t s1, s2, s3; - uint32_t ones = 0xFFFFFFFF; - uint32_t hev1; + uint32_t c, r, r3, r_k; + uint32_t s1, s2, s3; + uint32_t ones = 0xFFFFFFFF; + uint32_t hev1; - __asm__ __volatile__ ( + __asm__ __volatile__( /* mask |= (abs(p3 - p2) > limit) */ "subu_s.qb %[c], %[p3], %[p2] \n\t" "subu_s.qb %[r_k], %[p2], %[p3] \n\t" @@ -88,14 +87,12 @@ static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit, "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" "or %[r], %[r], %[c] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), - [r] "=&r" (r), [r3] "=&r" (r3) - : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2), - [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), - [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3) + : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3), + [thresh] "r"(thresh)); - __asm__ __volatile__ ( + __asm__ __volatile__( /* abs(p0 - q0) */ "subu_s.qb %[c], %[p0], %[q0] \n\t" "subu_s.qb %[r_k], %[q0], %[p0] \n\t" @@ -119,34 +116,27 @@ static INLINE void filter_hev_mask_dspr2(uint32_t limit, uint32_t flimit, "wrdsp %[r] \n\t" "pick.qb %[s2], $0, %[ones] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1), - [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3) - : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), - [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1), + [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3) + : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1), + [ones] "r"(ones), [flimit] "r"(flimit)); *hev = hev1; *mask = s2; } -static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit, - uint32_t flimit, - uint32_t thresh, - uint32_t p1, uint32_t p0, - uint32_t p3, uint32_t p2, - uint32_t q0, uint32_t q1, - uint32_t q2, uint32_t q3, - uint32_t *hev, - uint32_t *mask, - uint32_t *flat) { - uint32_t c, r, r3, r_k, r_flat; - uint32_t s1, s2, s3; - uint32_t ones = 0xFFFFFFFF; - uint32_t flat_thresh = 0x01010101; - uint32_t hev1; - uint32_t flat1; +static INLINE void filter_hev_mask_flatmask4_dspr2( + uint32_t limit, uint32_t flimit, uint32_t thresh, uint32_t p1, uint32_t p0, + uint32_t p3, uint32_t p2, uint32_t q0, uint32_t q1, uint32_t q2, + uint32_t q3, uint32_t *hev, uint32_t *mask, uint32_t *flat) { + uint32_t c, r, r3, r_k, r_flat; + uint32_t s1, s2, s3; + uint32_t ones = 0xFFFFFFFF; + uint32_t flat_thresh = 0x01010101; + uint32_t hev1; + uint32_t flat1; - __asm__ __volatile__ ( + __asm__ __volatile__( /* mask |= (abs(p3 - p2) > limit) */ "subu_s.qb %[c], %[p3], %[p2] \n\t" "subu_s.qb %[r_k], %[p2], %[p3] \n\t" @@ -236,15 +226,13 @@ static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit, "cmpgu.lt.qb %[c], %[limit], %[r_k] \n\t" "or %[r], %[r], %[c] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), [r] "=&r" (r), [r3] "=&r" (r3), - [r_flat] "=&r" (r_flat), [flat1] "=&r" (flat1) - : [limit] "r" (limit), [p3] "r" (p3), [p2] "r" (p2), - [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), - [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh), - [flat_thresh] "r" (flat_thresh), [ones] "r" (ones) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r3] "=&r"(r3), + [r_flat] "=&r"(r_flat), [flat1] "=&r"(flat1) + : [limit] "r"(limit), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), + [p0] "r"(p0), [q1] "r"(q1), [q0] "r"(q0), [q2] "r"(q2), [q3] "r"(q3), + [thresh] "r"(thresh), [flat_thresh] "r"(flat_thresh), [ones] "r"(ones)); - __asm__ __volatile__ ( + __asm__ __volatile__( /* abs(p0 - q0) */ "subu_s.qb %[c], %[p0], %[q0] \n\t" "subu_s.qb %[r_k], %[q0], %[p0] \n\t" @@ -268,29 +256,25 @@ static INLINE void filter_hev_mask_flatmask4_dspr2(uint32_t limit, "wrdsp %[r] \n\t" "pick.qb %[s2], $0, %[ones] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), [s1] "=&r" (s1), [hev1] "=&r" (hev1), - [s2] "=&r" (s2), [r] "+r" (r), [s3] "=&r" (s3) - : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), - [q1] "r" (q1), [ones] "r" (ones), [flimit] "r" (flimit) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [s1] "=&r"(s1), [hev1] "=&r"(hev1), + [s2] "=&r"(s2), [r] "+r"(r), [s3] "=&r"(s3) + : [p0] "r"(p0), [q0] "r"(q0), [p1] "r"(p1), [r3] "r"(r3), [q1] "r"(q1), + [ones] "r"(ones), [flimit] "r"(flimit)); *hev = hev1; *mask = s2; *flat = flat1; } -static INLINE void flatmask5(uint32_t p4, uint32_t p3, - uint32_t p2, uint32_t p1, - uint32_t p0, uint32_t q0, - uint32_t q1, uint32_t q2, - uint32_t q3, uint32_t q4, - uint32_t *flat2) { - uint32_t c, r, r_k, r_flat; - uint32_t ones = 0xFFFFFFFF; - uint32_t flat_thresh = 0x01010101; - uint32_t flat1, flat3; +static INLINE void flatmask5(uint32_t p4, uint32_t p3, uint32_t p2, uint32_t p1, + uint32_t p0, uint32_t q0, uint32_t q1, uint32_t q2, + uint32_t q3, uint32_t q4, uint32_t *flat2) { + uint32_t c, r, r_k, r_flat; + uint32_t ones = 0xFFFFFFFF; + uint32_t flat_thresh = 0x01010101; + uint32_t flat1, flat3; - __asm__ __volatile__ ( + __asm__ __volatile__( /* flat |= (abs(p4 - p0) > thresh) */ "subu_s.qb %[c], %[p4], %[p0] \n\t" "subu_s.qb %[r_k], %[p0], %[p4] \n\t" @@ -355,13 +339,11 @@ static INLINE void flatmask5(uint32_t p4, uint32_t p3, /* flat & flatmask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3) */ "and %[flat1], %[flat3], %[flat1] \n\t" - : [c] "=&r" (c), [r_k] "=&r" (r_k), [r] "=&r" (r), - [r_flat] "=&r" (r_flat), [flat1] "=&r" (flat1), [flat3] "=&r" (flat3) - : [p4] "r" (p4), [p3] "r" (p3), [p2] "r" (p2), - [p1] "r" (p1), [p0] "r" (p0), [q0] "r" (q0), [q1] "r" (q1), - [q2] "r" (q2), [q3] "r" (q3), [q4] "r" (q4), - [flat_thresh] "r" (flat_thresh), [ones] "r" (ones) - ); + : [c] "=&r"(c), [r_k] "=&r"(r_k), [r] "=&r"(r), [r_flat] "=&r"(r_flat), + [flat1] "=&r"(flat1), [flat3] "=&r"(flat3) + : [p4] "r"(p4), [p3] "r"(p3), [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), + [q0] "r"(q0), [q1] "r"(q1), [q2] "r"(q2), [q3] "r"(q3), [q4] "r"(q4), + [flat_thresh] "r"(flat_thresh), [ones] "r"(ones)); *flat2 = flat1; } diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_dspr2.c b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_dspr2.c index 4138f56978..e42479257c 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_dspr2.c @@ -19,37 +19,33 @@ #include "vpx_mem/vpx_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_8_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - uint32_t mask; - uint32_t hev, flat; - uint8_t i; - uint8_t *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; - uint32_t p1_f0, p0_f0, q0_f0, q1_f0; - uint32_t p3, p2, p1, p0, q0, q1, q2, q3; - uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l; - uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r; +void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh) { + uint32_t mask; + uint32_t hev, flat; + uint8_t i; + uint8_t *sp3, *sp2, *sp1, *sp0, *sq0, *sq1, *sq2, *sq3; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; + uint32_t p1_f0, p0_f0, q0_f0, q1_f0; + uint32_t p3, p2, p1, p0, q0, q1, q2, q3; + uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l; + uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r; uflimit = *blimit; - ulimit = *limit; + ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); /* prefetch data for store */ prefetch_store(s); @@ -64,7 +60,7 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, sq2 = sq1 + pitch; sq3 = sq2 + pitch; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[p3], (%[sp3]) \n\t" "lw %[p2], (%[sp2]) \n\t" "lw %[p1], (%[sp1]) \n\t" @@ -74,46 +70,39 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "lw %[q2], (%[sq2]) \n\t" "lw %[q3], (%[sq3]) \n\t" - : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), - [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0) - : [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq3] "r" (sq3), [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0) - ); + : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), + [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0) + : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0)); - filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, - p1, p0, p3, p2, q0, q1, q2, q3, - &hev, &mask, &flat); + filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0, + p3, p2, q0, q1, q2, q3, &hev, &mask, &flat); if ((flat == 0) && (mask != 0)) { - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p1_f0], (%[sp1]) \n\t" "sw %[p0_f0], (%[sp0]) \n\t" "sw %[q0_f0], (%[sq0]) \n\t" "sw %[q1_f0], (%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1)); } else if ((mask & flat) == 0xFFFFFFFF) { /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); COMBINE_LEFT_RIGHT_0TO2() - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p2], (%[sp2]) \n\t" "sw %[p1], (%[sp1]) \n\t" "sw %[p0], (%[sp0]) \n\t" @@ -122,28 +111,23 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "sw %[q2], (%[sq2]) \n\t" : - : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), - [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0), + [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1), + [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if ((flat != 0) && (mask != 0)) { /* filtering */ - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], (%[sp2]) \n\t" "sb %[p1_r], (%[sp1]) \n\t" "sb %[p0_r], (%[sp0]) \n\t" @@ -152,27 +136,24 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "sb %[q2_r], (%[sq2]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], (%[sp1]) \n\t" "sb %[p0_f0], (%[sp0]) \n\t" "sb %[q0_f0], (%[sq0]) \n\t" "sb %[q1_f0], (%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r], %[p2_r], 16 \n\t" "srl %[p1_r], %[p1_r], 16 \n\t" "srl %[p0_r], %[p0_r], 16 \n\t" @@ -184,15 +165,14 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r), - [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r), + [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], +1(%[sp2]) \n\t" "sb %[p1_r], +1(%[sp1]) \n\t" "sb %[p0_r], +1(%[sp0]) \n\t" @@ -201,41 +181,36 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "sb %[q2_r], +1(%[sq2]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +1(%[sp1]) \n\t" "sb %[p0_f0], +1(%[sp0]) \n\t" "sb %[q0_f0], +1(%[sq0]) \n\t" "sb %[q1_f0], +1(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), - [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0), + [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0), + [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], +2(%[sp2]) \n\t" "sb %[p1_l], +2(%[sp1]) \n\t" "sb %[p0_l], +2(%[sp0]) \n\t" @@ -244,27 +219,24 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "sb %[q2_l], +2(%[sq2]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +2(%[sp1]) \n\t" "sb %[p0_f0], +2(%[sp0]) \n\t" "sb %[q0_f0], +2(%[sq0]) \n\t" "sb %[q1_f0], +2(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l], %[p2_l], 16 \n\t" "srl %[p1_l], %[p1_l], 16 \n\t" "srl %[p0_l], %[p0_l], 16 \n\t" @@ -276,15 +248,14 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l), - [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l), + [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], +3(%[sp2]) \n\t" "sb %[p1_l], +3(%[sp1]) \n\t" "sb %[p0_l], +3(%[sp0]) \n\t" @@ -293,24 +264,21 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, "sb %[q2_l], +3(%[sq2]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +3(%[sp1]) \n\t" "sb %[p0_f0], +3(%[sp0]) \n\t" "sb %[q0_f0], +3(%[sq0]) \n\t" "sb %[q1_f0], +3(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } } @@ -318,37 +286,33 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, } } -void vpx_lpf_vertical_8_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - uint8_t i; - uint32_t mask, hev, flat; - uint8_t *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; - uint32_t p3, p2, p1, p0, q3, q2, q1, q0; - uint32_t p1_f0, p0_f0, q0_f0, q1_f0; - uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l; - uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r; +void vpx_lpf_vertical_8_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh) { + uint8_t i; + uint32_t mask, hev, flat; + uint8_t *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; + uint32_t p3, p2, p1, p0, q3, q2, q1, q0; + uint32_t p1_f0, p0_f0, q0_f0, q1_f0; + uint32_t p0_l, p1_l, p2_l, p3_l, q0_l, q1_l, q2_l, q3_l; + uint32_t p0_r, p1_r, p2_r, p3_r, q0_r, q1_r, q2_r, q3_r; uflimit = *blimit; - ulimit = *limit; + ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); prefetch_store(s + pitch); @@ -357,9 +321,9 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, s2 = s + pitch; s3 = s2 + pitch; s4 = s3 + pitch; - s = s4 + pitch; + s = s4 + pitch; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[p0], -4(%[s1]) \n\t" "lw %[p1], -4(%[s2]) \n\t" "lw %[p2], -4(%[s3]) \n\t" @@ -369,10 +333,9 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "lw %[q1], (%[s3]) \n\t" "lw %[q0], (%[s4]) \n\t" - : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), - [q0] "=&r" (q0), [q1] "=&r" (q1), [q2] "=&r" (q2), [q3] "=&r" (q3) - : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) - ); + : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), + [q0] "=&r"(q0), [q1] "=&r"(q1), [q2] "=&r"(q2), [q3] "=&r"(q3) + : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4)); /* transpose p3, p2, p1, p0 original (when loaded from memory) @@ -389,7 +352,7 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, p2 p3_1 p2_1 p1_1 p0_1 p3 p3_0 p2_0 p1_0 p0_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p0], %[p1] \n\t" "precr.qb.ph %[prim2], %[p0], %[p1] \n\t" "precrq.qb.ph %[prim3], %[p2], %[p3] \n\t" @@ -405,12 +368,10 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "append %[p1], %[sec3], 16 \n\t" "append %[p3], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose q0, q1, q2, q3 original (when loaded from memory) @@ -427,7 +388,7 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, q1 q0_1 q1_1 q2_1 q3_1 q0 q0_0 q1_0 q2_0 q3_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[q3], %[q2] \n\t" "precr.qb.ph %[prim2], %[q3], %[q2] \n\t" "precrq.qb.ph %[prim3], %[q1], %[q0] \n\t" @@ -443,49 +404,40 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "append %[q2], %[sec3], 16 \n\t" "append %[q0], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1), + [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); - filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, - p1, p0, p3, p2, q0, q1, q2, q3, - &hev, &mask, &flat); + filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0, + p3, p2, q0, q1, q2, q3, &hev, &mask, &flat); if ((flat == 0) && (mask != 0)) { - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); STORE_F0() } else if ((mask & flat) == 0xFFFFFFFF) { /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); STORE_F1() } else if ((flat != 0) && (mask != 0)) { - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], -3(%[s4]) \n\t" "sb %[p1_r], -2(%[s4]) \n\t" "sb %[p0_r], -1(%[s4]) \n\t" @@ -494,25 +446,22 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "sb %[q2_r], +2(%[s4]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [s4] "r" (s4) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [s4] "r"(s4)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s4]) \n\t" "sb %[p0_f0], -1(%[s4]) \n\t" "sb %[q0_f0], (%[s4]) \n\t" "sb %[q1_f0], +1(%[s4]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s4] "r" (s4) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s4] "r"(s4)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r], %[p2_r], 16 \n\t" "srl %[p1_r], %[p1_r], 16 \n\t" "srl %[p0_r], %[p0_r], 16 \n\t" @@ -524,15 +473,14 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r), - [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r), + [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], -3(%[s3]) \n\t" "sb %[p1_r], -2(%[s3]) \n\t" "sb %[p0_r], -1(%[s3]) \n\t" @@ -541,66 +489,58 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "sb %[q2_r], +2(%[s3]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [s3] "r" (s3) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [s3] "r"(s3)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s3]) \n\t" "sb %[p0_f0], -1(%[s3]) \n\t" "sb %[q0_f0], (%[s3]) \n\t" "sb %[q1_f0], +1(%[s3]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s3] "r" (s3) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s3] "r"(s3)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), - [q0] "+r" (q0), [q1] "+r" (q1), [q2] "+r" (q2), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2] "+r"(p2), [p1] "+r"(p1), [p0] "+r"(p0), [q0] "+r"(q0), + [q1] "+r"(q1), [q2] "+r"(q2), [p1_f0] "+r"(p1_f0), + [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( - "sb %[p2_l], -3(%[s2]) \n\t" - "sb %[p1_l], -2(%[s2]) \n\t" - "sb %[p0_l], -1(%[s2]) \n\t" - "sb %[q0_l], (%[s2]) \n\t" - "sb %[q1_l], +1(%[s2]) \n\t" - "sb %[q2_l], +2(%[s2]) \n\t" + __asm__ __volatile__( + "sb %[p2_l], -3(%[s2]) \n\t" + "sb %[p1_l], -2(%[s2]) \n\t" + "sb %[p0_l], -1(%[s2]) \n\t" + "sb %[q0_l], (%[s2]) \n\t" + "sb %[q1_l], +1(%[s2]) \n\t" + "sb %[q2_l], +2(%[s2]) \n\t" - : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [s2] "r" (s2) - ); + : + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [s2] "r"(s2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s2]) \n\t" "sb %[p0_f0], -1(%[s2]) \n\t" "sb %[q0_f0], (%[s2]) \n\t" "sb %[q1_f0], +1(%[s2]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s2] "r" (s2) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s2] "r"(s2)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l], %[p2_l], 16 \n\t" "srl %[p1_l], %[p1_l], 16 \n\t" "srl %[p0_l], %[p0_l], 16 \n\t" @@ -612,15 +552,14 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l), - [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l), + [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], -3(%[s1]) \n\t" "sb %[p1_l], -2(%[s1]) \n\t" "sb %[p0_l], -1(%[s1]) \n\t" @@ -629,21 +568,19 @@ void vpx_lpf_vertical_8_dspr2(unsigned char *s, "sb %[q2_l], +2(%[s1]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [s1] "r" (s1) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [s1] "r"(s1)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s1]) \n\t" "sb %[p0_f0], -1(%[s1]) \n\t" "sb %[q0_f0], (%[s1]) \n\t" "sb %[q1_f0], +1(%[s1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0), - [q1_f0] "r" (q1_f0), [s1] "r" (s1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s1] "r"(s1)); } } } diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c index 8a48650738..6325762a2a 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_horiz_dspr2.c @@ -19,42 +19,38 @@ #include "vpx_mem/vpx_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_16_dspr2(unsigned char *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count) { - uint32_t mask; - uint32_t hev, flat, flat2; - uint8_t i; - uint8_t *sp7, *sp6, *sp5, *sp4, *sp3, *sp2, *sp1, *sp0; - uint8_t *sq0, *sq1, *sq2, *sq3, *sq4, *sq5, *sq6, *sq7; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; - uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; - uint32_t p1_f0, p0_f0, q0_f0, q1_f0; - uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l; - uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l; - uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r; - uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r; - uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1; - uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1; +static void mb_lpf_horizontal_edge(unsigned char *s, int pitch, + const uint8_t *blimit, const uint8_t *limit, + const uint8_t *thresh, int count) { + uint32_t mask; + uint32_t hev, flat, flat2; + uint8_t i; + uint8_t *sp7, *sp6, *sp5, *sp4, *sp3, *sp2, *sp1, *sp0; + uint8_t *sq0, *sq1, *sq2, *sq3, *sq4, *sq5, *sq6, *sq7; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; + uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; + uint32_t p1_f0, p0_f0, q0_f0, q1_f0; + uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l; + uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l; + uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r; + uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r; + uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1; + uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1; uflimit = *blimit; - ulimit = *limit; + ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); /* prefetch data for store */ prefetch_store(s); @@ -77,7 +73,7 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, sq6 = sq5 + pitch; sq7 = sq6 + pitch; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[p7], (%[sp7]) \n\t" "lw %[p6], (%[sp6]) \n\t" "lw %[p5], (%[sp5]) \n\t" @@ -87,13 +83,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "lw %[p1], (%[sp1]) \n\t" "lw %[p0], (%[sp0]) \n\t" - : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), - [p7] "=&r" (p7), [p6] "=&r" (p6), [p5] "=&r" (p5), [p4] "=&r" (p4) - : [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sp4] "r" (sp4), [sp5] "r" (sp5), [sp6] "r" (sp6), [sp7] "r" (sp7) - ); + : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), + [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4) + : [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sp4] "r"(sp4), [sp5] "r"(sp5), [sp6] "r"(sp6), [sp7] "r"(sp7)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[q0], (%[sq0]) \n\t" "lw %[q1], (%[sq1]) \n\t" "lw %[q2], (%[sq2]) \n\t" @@ -103,57 +98,50 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "lw %[q6], (%[sq6]) \n\t" "lw %[q7], (%[sq7]) \n\t" - : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), [q0] "=&r" (q0), - [q7] "=&r" (q7), [q6] "=&r" (q6), [q5] "=&r" (q5), [q4] "=&r" (q4) - : [sq3] "r" (sq3), [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0), - [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6), [sq7] "r" (sq7) - ); + : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0), + [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4) + : [sq3] "r"(sq3), [sq2] "r"(sq2), [sq1] "r"(sq1), [sq0] "r"(sq0), + [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6), [sq7] "r"(sq7)); - filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, - p1, p0, p3, p2, q0, q1, q2, q3, - &hev, &mask, &flat); + filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0, + p3, p2, q0, q1, q2, q3, &hev, &mask, &flat); flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2); /* f0 */ if (((flat2 == 0) && (flat == 0) && (mask != 0)) || ((flat2 != 0) && (flat == 0) && (mask != 0))) { - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p1_f0], (%[sp1]) \n\t" "sw %[p0_f0], (%[sp0]) \n\t" "sw %[q0_f0], (%[sq0]) \n\t" "sw %[q1_f0], (%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1)); } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) { /* f2 */ PACK_LEFT_0TO3() PACK_LEFT_4TO7() - wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, - &p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l, - &q4_l, &q5_l, &q6_l, &q7_l); + wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l, + &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l, + &q6_l, &q7_l); PACK_RIGHT_0TO3() PACK_RIGHT_4TO7() - wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, - &p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r, - &q4_r, &q5_r, &q6_r, &q7_r); + wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r, + &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r, + &q6_r, &q7_r); COMBINE_LEFT_RIGHT_0TO2() COMBINE_LEFT_RIGHT_3TO6() - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p6], (%[sp6]) \n\t" "sw %[p5], (%[sp5]) \n\t" "sw %[p4], (%[sp4]) \n\t" @@ -163,13 +151,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sw %[p0], (%[sp0]) \n\t" : - : [p6] "r" (p6), [p5] "r" (p5), [p4] "r" (p4), [p3] "r" (p3), - [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), - [sp6] "r" (sp6), [sp5] "r" (sp5), [sp4] "r" (sp4), [sp3] "r" (sp3), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0) - ); + : [p6] "r"(p6), [p5] "r"(p5), [p4] "r"(p4), [p3] "r"(p3), + [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [sp6] "r"(sp6), + [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3), [sp2] "r"(sp2), + [sp1] "r"(sp1), [sp0] "r"(sp0)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[q6], (%[sq6]) \n\t" "sw %[q5], (%[sq5]) \n\t" "sw %[q4], (%[sq4]) \n\t" @@ -179,26 +166,23 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sw %[q0], (%[sq0]) \n\t" : - : [q6] "r" (q6), [q5] "r" (q5), [q4] "r" (q4), [q3] "r" (q3), - [q2] "r" (q2), [q1] "r" (q1), [q0] "r" (q0), - [sq6] "r" (sq6), [sq5] "r" (sq5), [sq4] "r" (sq4), [sq3] "r" (sq3), - [sq2] "r" (sq2), [sq1] "r" (sq1), [sq0] "r" (sq0) - ); + : [q6] "r"(q6), [q5] "r"(q5), [q4] "r"(q4), [q3] "r"(q3), + [q2] "r"(q2), [q1] "r"(q1), [q0] "r"(q0), [sq6] "r"(sq6), + [sq5] "r"(sq5), [sq4] "r"(sq4), [sq3] "r"(sq3), [sq2] "r"(sq2), + [sq1] "r"(sq1), [sq0] "r"(sq0)); } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) { /* f1 */ /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); COMBINE_LEFT_RIGHT_0TO2() - __asm__ __volatile__ ( + __asm__ __volatile__( "sw %[p2], (%[sp2]) \n\t" "sw %[p1], (%[sp1]) \n\t" "sw %[p0], (%[sp0]) \n\t" @@ -207,28 +191,23 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sw %[q2], (%[sq2]) \n\t" : - : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), - [q0] "r" (q0), [q1] "r" (q1), [q2] "r" (q2), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2] "r"(p2), [p1] "r"(p1), [p0] "r"(p0), [q0] "r"(q0), + [q1] "r"(q1), [q2] "r"(q2), [sp2] "r"(sp2), [sp1] "r"(sp1), + [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) { /* f0+f1 */ - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], (%[sp2]) \n\t" "sb %[p1_r], (%[sp1]) \n\t" "sb %[p0_r], (%[sp0]) \n\t" @@ -237,27 +216,24 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_r], (%[sq2]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], (%[sp1]) \n\t" "sb %[p0_f0], (%[sp0]) \n\t" "sb %[q0_f0], (%[sq0]) \n\t" "sb %[q1_f0], (%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r], %[p2_r], 16 \n\t" "srl %[p1_r], %[p1_r], 16 \n\t" "srl %[p0_r], %[p0_r], 16 \n\t" @@ -269,15 +245,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r), - [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r), + [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], +1(%[sp2]) \n\t" "sb %[p1_r], +1(%[sp1]) \n\t" "sb %[p0_r], +1(%[sp0]) \n\t" @@ -286,39 +261,35 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_r], +1(%[sq2]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +1(%[sp1]) \n\t" "sb %[p0_f0], +1(%[sp0]) \n\t" "sb %[q0_f0], +1(%[sq0]) \n\t" "sb %[q1_f0], +1(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], +2(%[sp2]) \n\t" "sb %[p1_l], +2(%[sp1]) \n\t" "sb %[p0_l], +2(%[sp0]) \n\t" @@ -327,27 +298,24 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_l], +2(%[sq2]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +2(%[sp1]) \n\t" "sb %[p0_f0], +2(%[sp0]) \n\t" "sb %[q0_f0], +2(%[sq0]) \n\t" "sb %[q1_f0], +2(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l], %[p2_l], 16 \n\t" "srl %[p1_l], %[p1_l], 16 \n\t" "srl %[p0_l], %[p0_l], 16 \n\t" @@ -359,15 +327,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l), - [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l), + [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], +3(%[sp2]) \n\t" "sb %[p1_l], +3(%[sp1]) \n\t" "sb %[p0_l], +3(%[sp0]) \n\t" @@ -376,61 +343,51 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_l], +3(%[sq2]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), + [sq1] "r"(sq1), [sq2] "r"(sq2)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +3(%[sp1]) \n\t" "sb %[p0_f0], +3(%[sp0]) \n\t" "sb %[q0_f0], +3(%[sq0]) \n\t" "sb %[q1_f0], +3(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) { /* f0 + f1 + f2 */ /* f0 function */ - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); /* f1 function */ /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, - q0_l, q1_l, q2_l, q3_l, - &p2_l_f1, &p1_l_f1, &p0_l_f1, - &q0_l_f1, &q1_l_f1, &q2_l_f1); + mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, &p2_l_f1, + &p1_l_f1, &p0_l_f1, &q0_l_f1, &q1_l_f1, &q2_l_f1); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, - q0_r, q1_r, q2_r, q3_r, - &p2_r_f1, &p1_r_f1, &p0_r_f1, - &q0_r_f1, &q1_r_f1, &q2_r_f1); + mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, &p2_r_f1, + &p1_r_f1, &p0_r_f1, &q0_r_f1, &q1_r_f1, &q2_r_f1); /* f2 function */ PACK_LEFT_4TO7() - wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, - &p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l, - &q4_l, &q5_l, &q6_l, &q7_l); + wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l, + &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l, + &q6_l, &q7_l); PACK_RIGHT_4TO7() - wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, - &p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r, - &q4_r, &q5_r, &q6_r, &q7_r); + wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r, + &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r, + &q6_r, &q7_r); if (mask & flat & flat2 & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_r], (%[sp6]) \n\t" "sb %[p5_r], (%[sp5]) \n\t" "sb %[p4_r], (%[sp4]) \n\t" @@ -440,14 +397,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[p0_r], (%[sp0]) \n\t" : - : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r), - [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), - [sp6] "r" (sp6), [sp5] "r" (sp5), [sp4] "r" (sp4), - [sp3] "r" (sp3), [sp2] "r" (sp2), [sp1] "r" (sp1), - [p0_r] "r" (p0_r), [sp0] "r" (sp0) - ); + : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r), + [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), + [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), [sp3] "r"(sp3), + [sp2] "r"(sp2), [sp1] "r"(sp1), [p0_r] "r"(p0_r), [sp0] "r"(sp0)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_r], (%[sq0]) \n\t" "sb %[q1_r], (%[sq1]) \n\t" "sb %[q2_r], (%[sq2]) \n\t" @@ -457,15 +412,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q6_r], (%[sq6]) \n\t" : - : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [q3_r] "r" (q3_r), [q4_r] "r" (q4_r), [q5_r] "r" (q5_r), - [q6_r] "r" (q6_r), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2), - [sq3] "r" (sq3), [sq4] "r" (sq4), [sq5] "r" (sq5), - [sq6] "r" (sq6) - ); + : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r), + [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), + [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6)); } else if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r_f1], (%[sp2]) \n\t" "sb %[p1_r_f1], (%[sp1]) \n\t" "sb %[p0_r_f1], (%[sp0]) \n\t" @@ -474,27 +426,25 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_r_f1], (%[sq2]) \n\t" : - : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1), - [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1), - [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1), + [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1), + [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2), + [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), + [sq2] "r"(sq2)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], (%[sp1]) \n\t" "sb %[p0_f0], (%[sp0]) \n\t" "sb %[q0_f0], (%[sq0]) \n\t" "sb %[q1_f0], (%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0), - [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p6_r], %[p6_r], 16 \n\t" "srl %[p5_r], %[p5_r], 16 \n\t" "srl %[p4_r], %[p4_r], 16 \n\t" @@ -510,15 +460,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q5_r], %[q5_r], 16 \n\t" "srl %[q6_r], %[q6_r], 16 \n\t" - : [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r), - [q3_r] "+r" (q3_r), [q4_r] "+r" (q4_r), [q5_r] "+r" (q5_r), - [p6_r] "+r" (p6_r), [p5_r] "+r" (p5_r), [p4_r] "+r" (p4_r), - [p3_r] "+r" (p3_r), [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), - [q6_r] "+r" (q6_r), [p0_r] "+r" (p0_r) - : - ); + : [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [q3_r] "+r"(q3_r), [q4_r] "+r"(q4_r), [q5_r] "+r"(q5_r), + [p6_r] "+r"(p6_r), [p5_r] "+r"(p5_r), [p4_r] "+r"(p4_r), + [p3_r] "+r"(p3_r), [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), + [q6_r] "+r"(q6_r), [p0_r] "+r"(p0_r) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r_f1], %[p2_r_f1], 16 \n\t" "srl %[p1_r_f1], %[p1_r_f1], 16 \n\t" "srl %[p0_r_f1], %[p0_r_f1], 16 \n\t" @@ -530,16 +479,15 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r_f1] "+r" (p2_r_f1), [p1_r_f1] "+r" (p1_r_f1), - [p0_r_f1] "+r" (p0_r_f1), [q0_r_f1] "+r" (q0_r_f1), - [q1_r_f1] "+r" (q1_r_f1), [q2_r_f1] "+r" (q2_r_f1), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r_f1] "+r"(p2_r_f1), [p1_r_f1] "+r"(p1_r_f1), + [p0_r_f1] "+r"(p0_r_f1), [q0_r_f1] "+r"(q0_r_f1), + [q1_r_f1] "+r"(q1_r_f1), [q2_r_f1] "+r"(q2_r_f1), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_r], +1(%[sp6]) \n\t" "sb %[p5_r], +1(%[sp5]) \n\t" "sb %[p4_r], +1(%[sp4]) \n\t" @@ -549,14 +497,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[p0_r], +1(%[sp0]) \n\t" : - : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r), - [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), - [p0_r] "r" (p0_r), [sp6] "r" (sp6), [sp5] "r" (sp5), - [sp4] "r" (sp4), [sp3] "r" (sp3), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0) - ); + : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r), + [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), + [p0_r] "r"(p0_r), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), + [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_r], +1(%[sq0]) \n\t" "sb %[q1_r], +1(%[sq1]) \n\t" "sb %[q2_r], +1(%[sq2]) \n\t" @@ -566,14 +512,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q6_r], +1(%[sq6]) \n\t" : - : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [q3_r] "r" (q3_r), [q4_r] "r" (q4_r), [q5_r] "r" (q5_r), - [q6_r] "r" (q6_r), [sq0] "r" (sq0), [sq1] "r" (sq1), - [sq2] "r" (sq2), [sq3] "r" (sq3), - [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6) - ); + : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r), + [q6_r] "r"(q6_r), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), + [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6)); } else if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r_f1], +1(%[sp2]) \n\t" "sb %[p1_r_f1], +1(%[sp1]) \n\t" "sb %[p0_r_f1], +1(%[sp0]) \n\t" @@ -582,39 +526,36 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_r_f1], +1(%[sq2]) \n\t" : - : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1), - [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1), - [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1), + [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1), + [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [sp2] "r"(sp2), + [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), + [sq2] "r"(sq2)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +1(%[sp1]) \n\t" "sb %[p0_f0], +1(%[sp0]) \n\t" "sb %[q0_f0], +1(%[sq0]) \n\t" "sb %[q1_f0], +1(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0), - [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_l], +2(%[sp6]) \n\t" "sb %[p5_l], +2(%[sp5]) \n\t" "sb %[p4_l], +2(%[sp4]) \n\t" @@ -624,14 +565,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[p0_l], +2(%[sp0]) \n\t" : - : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l), - [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), - [p0_l] "r" (p0_l), [sp6] "r" (sp6), [sp5] "r" (sp5), - [sp4] "r" (sp4), [sp3] "r" (sp3), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0) - ); + : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l), + [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), + [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), + [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_l], +2(%[sq0]) \n\t" "sb %[q1_l], +2(%[sq1]) \n\t" "sb %[q2_l], +2(%[sq2]) \n\t" @@ -641,14 +580,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q6_l], +2(%[sq6]) \n\t" : - : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l), - [q6_l] "r" (q6_l), [sq0] "r" (sq0), [sq1] "r" (sq1), - [sq2] "r" (sq2), [sq3] "r" (sq3), - [sq4] "r" (sq4), [sq5] "r" (sq5), [sq6] "r" (sq6) - ); + : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), + [q6_l] "r"(q6_l), [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), + [sq3] "r"(sq3), [sq4] "r"(sq4), [sq5] "r"(sq5), [sq6] "r"(sq6)); } else if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l_f1], +2(%[sp2]) \n\t" "sb %[p1_l_f1], +2(%[sp1]) \n\t" "sb %[p0_l_f1], +2(%[sp0]) \n\t" @@ -657,27 +594,25 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_l_f1], +2(%[sq2]) \n\t" : - : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1), - [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1), - [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1), + [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1), + [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2), + [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), + [sq2] "r"(sq2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +2(%[sp1]) \n\t" "sb %[p0_f0], +2(%[sp0]) \n\t" "sb %[q0_f0], +2(%[sq0]) \n\t" "sb %[q1_f0], +2(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), [q0_f0] "r" (q0_f0), - [q1_f0] "r" (q1_f0), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p6_l], %[p6_l], 16 \n\t" "srl %[p5_l], %[p5_l], 16 \n\t" "srl %[p4_l], %[p4_l], 16 \n\t" @@ -693,15 +628,14 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q5_l], %[q5_l], 16 \n\t" "srl %[q6_l], %[q6_l], 16 \n\t" - : [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [q3_l] "+r" (q3_l), [q4_l] "+r" (q4_l), [q5_l] "+r" (q5_l), - [q6_l] "+r" (q6_l), [p6_l] "+r" (p6_l), [p5_l] "+r" (p5_l), - [p4_l] "+r" (p4_l), [p3_l] "+r" (p3_l), [p2_l] "+r" (p2_l), - [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l) - : - ); + : [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [q3_l] "+r"(q3_l), [q4_l] "+r"(q4_l), [q5_l] "+r"(q5_l), + [q6_l] "+r"(q6_l), [p6_l] "+r"(p6_l), [p5_l] "+r"(p5_l), + [p4_l] "+r"(p4_l), [p3_l] "+r"(p3_l), [p2_l] "+r"(p2_l), + [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l_f1], %[p2_l_f1], 16 \n\t" "srl %[p1_l_f1], %[p1_l_f1], 16 \n\t" "srl %[p0_l_f1], %[p0_l_f1], 16 \n\t" @@ -713,16 +647,15 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l_f1] "+r" (p2_l_f1), [p1_l_f1] "+r" (p1_l_f1), - [p0_l_f1] "+r" (p0_l_f1), [q0_l_f1] "+r" (q0_l_f1), - [q1_l_f1] "+r" (q1_l_f1), [q2_l_f1] "+r" (q2_l_f1), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l_f1] "+r"(p2_l_f1), [p1_l_f1] "+r"(p1_l_f1), + [p0_l_f1] "+r"(p0_l_f1), [q0_l_f1] "+r"(q0_l_f1), + [q1_l_f1] "+r"(q1_l_f1), [q2_l_f1] "+r"(q2_l_f1), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_l], +3(%[sp6]) \n\t" "sb %[p5_l], +3(%[sp5]) \n\t" "sb %[p4_l], +3(%[sp4]) \n\t" @@ -732,14 +665,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[p0_l], +3(%[sp0]) \n\t" : - : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l), - [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), - [p0_l] "r" (p0_l), [sp6] "r" (sp6), [sp5] "r" (sp5), - [sp4] "r" (sp4), [sp3] "r" (sp3), [sp2] "r" (sp2), - [sp1] "r" (sp1), [sp0] "r" (sp0) - ); + : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l), + [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), + [p0_l] "r"(p0_l), [sp6] "r"(sp6), [sp5] "r"(sp5), [sp4] "r"(sp4), + [sp3] "r"(sp3), [sp2] "r"(sp2), [sp1] "r"(sp1), [sp0] "r"(sp0)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_l], +3(%[sq0]) \n\t" "sb %[q1_l], +3(%[sq1]) \n\t" "sb %[q2_l], +3(%[sq2]) \n\t" @@ -749,15 +680,12 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q6_l], +3(%[sq6]) \n\t" : - : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), - [q2_l] "r" (q2_l), [q3_l] "r" (q3_l), - [q4_l] "r" (q4_l), [q5_l] "r" (q5_l), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2), - [sq3] "r" (sq3), [sq4] "r" (sq4), [sq5] "r" (sq5), - [q6_l] "r" (q6_l), [sq6] "r" (sq6) - ); + : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), + [sq0] "r"(sq0), [sq1] "r"(sq1), [sq2] "r"(sq2), [sq3] "r"(sq3), + [sq4] "r"(sq4), [sq5] "r"(sq5), [q6_l] "r"(q6_l), [sq6] "r"(sq6)); } else if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l_f1], +3(%[sp2]) \n\t" "sb %[p1_l_f1], +3(%[sp1]) \n\t" "sb %[p0_l_f1], +3(%[sp0]) \n\t" @@ -766,29 +694,40 @@ void vpx_lpf_horizontal_16_dspr2(unsigned char *s, "sb %[q2_l_f1], +3(%[sq2]) \n\t" : - : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1), - [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1), - [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1), - [sp2] "r" (sp2), [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1), [sq2] "r" (sq2) - ); + : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1), + [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1), + [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [sp2] "r"(sp2), + [sp1] "r"(sp1), [sp0] "r"(sp0), [sq0] "r"(sq0), [sq1] "r"(sq1), + [sq2] "r"(sq2)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], +3(%[sp1]) \n\t" "sb %[p0_f0], +3(%[sp0]) \n\t" "sb %[q0_f0], +3(%[sq0]) \n\t" "sb %[q1_f0], +3(%[sq1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [sp1] "r" (sp1), [sp0] "r" (sp0), - [sq0] "r" (sq0), [sq1] "r" (sq1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [sp1] "r"(sp1), [sp0] "r"(sp0), + [sq0] "r"(sq0), [sq1] "r"(sq1)); } } s = s + 4; } } + +void vpx_lpf_horizontal_edge_8_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 1); +} + +void vpx_lpf_horizontal_edge_16_dspr2(unsigned char *s, int pitch, + const uint8_t *blimit, + const uint8_t *limit, + const uint8_t *thresh) { + mb_lpf_horizontal_edge(s, pitch, blimit, limit, thresh, 2); +} #endif // #if HAVE_DSPR2 diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c index e580f014e9..96e8d8858a 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_mb_vert_dspr2.c @@ -19,40 +19,36 @@ #include "vpx_mem/vpx_mem.h" #if HAVE_DSPR2 -void vpx_lpf_vertical_16_dspr2(uint8_t *s, - int pitch, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh) { - uint8_t i; - uint32_t mask, hev, flat, flat2; - uint8_t *s1, *s2, *s3, *s4; - uint32_t prim1, prim2, sec3, sec4, prim3, prim4; - uint32_t thresh_vec, flimit_vec, limit_vec; - uint32_t uflimit, ulimit, uthresh; - uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; - uint32_t p1_f0, p0_f0, q0_f0, q1_f0; - uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l; - uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l; - uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r; - uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r; - uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1; - uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1; +void vpx_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh) { + uint8_t i; + uint32_t mask, hev, flat, flat2; + uint8_t *s1, *s2, *s3, *s4; + uint32_t prim1, prim2, sec3, sec4, prim3, prim4; + uint32_t thresh_vec, flimit_vec, limit_vec; + uint32_t uflimit, ulimit, uthresh; + uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; + uint32_t p1_f0, p0_f0, q0_f0, q1_f0; + uint32_t p7_l, p6_l, p5_l, p4_l, p3_l, p2_l, p1_l, p0_l; + uint32_t q0_l, q1_l, q2_l, q3_l, q4_l, q5_l, q6_l, q7_l; + uint32_t p7_r, p6_r, p5_r, p4_r, p3_r, p2_r, p1_r, p0_r; + uint32_t q0_r, q1_r, q2_r, q3_r, q4_r, q5_r, q6_r, q7_r; + uint32_t p2_l_f1, p1_l_f1, p0_l_f1, p2_r_f1, p1_r_f1, p0_r_f1; + uint32_t q0_l_f1, q1_l_f1, q2_l_f1, q0_r_f1, q1_r_f1, q2_r_f1; uflimit = *blimit; ulimit = *limit; uthresh = *thresh; /* create quad-byte */ - __asm__ __volatile__ ( + __asm__ __volatile__( "replv.qb %[thresh_vec], %[uthresh] \n\t" "replv.qb %[flimit_vec], %[uflimit] \n\t" "replv.qb %[limit_vec], %[ulimit] \n\t" - : [thresh_vec] "=&r" (thresh_vec), [flimit_vec] "=&r" (flimit_vec), - [limit_vec] "=r" (limit_vec) - : [uthresh] "r" (uthresh), [uflimit] "r" (uflimit), [ulimit] "r" (ulimit) - ); + : [thresh_vec] "=&r"(thresh_vec), [flimit_vec] "=&r"(flimit_vec), + [limit_vec] "=r"(limit_vec) + : [uthresh] "r"(uthresh), [uflimit] "r"(uflimit), [ulimit] "r"(ulimit)); prefetch_store(s + pitch); @@ -61,9 +57,9 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, s2 = s + pitch; s3 = s2 + pitch; s4 = s3 + pitch; - s = s4 + pitch; + s = s4 + pitch; - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[p0], -4(%[s1]) \n\t" "lw %[p1], -4(%[s2]) \n\t" "lw %[p2], -4(%[s3]) \n\t" @@ -73,13 +69,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "lw %[p6], -8(%[s3]) \n\t" "lw %[p7], -8(%[s4]) \n\t" - : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), - [p0] "=&r" (p0), [p7] "=&r" (p7), [p6] "=&r" (p6), - [p5] "=&r" (p5), [p4] "=&r" (p4) - : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) - ); + : [p3] "=&r"(p3), [p2] "=&r"(p2), [p1] "=&r"(p1), [p0] "=&r"(p0), + [p7] "=&r"(p7), [p6] "=&r"(p6), [p5] "=&r"(p5), [p4] "=&r"(p4) + : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4)); - __asm__ __volatile__ ( + __asm__ __volatile__( "lw %[q3], (%[s1]) \n\t" "lw %[q2], (%[s2]) \n\t" "lw %[q1], (%[s3]) \n\t" @@ -89,11 +83,9 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "lw %[q5], +4(%[s3]) \n\t" "lw %[q4], +4(%[s4]) \n\t" - : [q3] "=&r" (q3), [q2] "=&r" (q2), [q1] "=&r" (q1), - [q0] "=&r" (q0), [q7] "=&r" (q7), [q6] "=&r" (q6), - [q5] "=&r" (q5), [q4] "=&r" (q4) - : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) - ); + : [q3] "=&r"(q3), [q2] "=&r"(q2), [q1] "=&r"(q1), [q0] "=&r"(q0), + [q7] "=&r"(q7), [q6] "=&r"(q6), [q5] "=&r"(q5), [q4] "=&r"(q4) + : [s1] "r"(s1), [s2] "r"(s2), [s3] "r"(s3), [s4] "r"(s4)); /* transpose p3, p2, p1, p0 original (when loaded from memory) @@ -110,7 +102,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, p2 p3_1 p2_1 p1_1 p0_1 p3 p3_0 p2_0 p1_0 p0_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p0], %[p1] \n\t" "precr.qb.ph %[prim2], %[p0], %[p1] \n\t" "precrq.qb.ph %[prim3], %[p2], %[p3] \n\t" @@ -126,12 +118,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "append %[p1], %[sec3], 16 \n\t" "append %[p3], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p0] "+r"(p0), [p1] "+r"(p1), [p2] "+r"(p2), + [p3] "+r"(p3), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose q0, q1, q2, q3 original (when loaded from memory) @@ -148,7 +138,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, q1 q0_1 q1_1 q2_1 q3_1 q0 q0_0 q1_0 q2_0 q3_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[q3], %[q2] \n\t" "precr.qb.ph %[prim2], %[q3], %[q2] \n\t" "precrq.qb.ph %[prim3], %[q1], %[q0] \n\t" @@ -164,12 +154,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "append %[q2], %[sec3], 16 \n\t" "append %[q0], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [q3] "+r" (q3), [q2] "+r" (q2), [q1] "+r" (q1), [q0] "+r" (q0), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [q3] "+r"(q3), [q2] "+r"(q2), [q1] "+r"(q1), + [q0] "+r"(q0), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose p7, p6, p5, p4 original (when loaded from memory) @@ -186,7 +174,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, p6 p7_1 p6_1 p5_1 p4_1 p7 p7_0 p6_0 p5_0 p4_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[p4], %[p5] \n\t" "precr.qb.ph %[prim2], %[p4], %[p5] \n\t" "precrq.qb.ph %[prim3], %[p6], %[p7] \n\t" @@ -202,12 +190,10 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "append %[p5], %[sec3], 16 \n\t" "append %[p7], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [p4] "+r" (p4), [p5] "+r" (p5), [p6] "+r" (p6), [p7] "+r" (p7), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [p4] "+r"(p4), [p5] "+r"(p5), [p6] "+r"(p6), + [p7] "+r"(p7), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); /* transpose q4, q5, q6, q7 original (when loaded from memory) @@ -224,7 +210,7 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, q5 q4_1 q5_1 q26_1 q7_1 q4 q4_0 q5_0 q26_0 q7_0 */ - __asm__ __volatile__ ( + __asm__ __volatile__( "precrq.qb.ph %[prim1], %[q7], %[q6] \n\t" "precr.qb.ph %[prim2], %[q7], %[q6] \n\t" "precrq.qb.ph %[prim3], %[q5], %[q4] \n\t" @@ -240,71 +226,60 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "append %[q6], %[sec3], 16 \n\t" "append %[q4], %[sec4], 16 \n\t" - : [prim1] "=&r" (prim1), [prim2] "=&r" (prim2), - [prim3] "=&r" (prim3), [prim4] "=&r" (prim4), - [q7] "+r" (q7), [q6] "+r" (q6), [q5] "+r" (q5), [q4] "+r" (q4), - [sec3] "=&r" (sec3), [sec4] "=&r" (sec4) - : - ); + : [prim1] "=&r"(prim1), [prim2] "=&r"(prim2), [prim3] "=&r"(prim3), + [prim4] "=&r"(prim4), [q7] "+r"(q7), [q6] "+r"(q6), [q5] "+r"(q5), + [q4] "+r"(q4), [sec3] "=&r"(sec3), [sec4] "=&r"(sec4) + :); - filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, - p1, p0, p3, p2, q0, q1, q2, q3, - &hev, &mask, &flat); + filter_hev_mask_flatmask4_dspr2(limit_vec, flimit_vec, thresh_vec, p1, p0, + p3, p2, q0, q1, q2, q3, &hev, &mask, &flat); flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2); /* f0 */ if (((flat2 == 0) && (flat == 0) && (mask != 0)) || ((flat2 != 0) && (flat == 0) && (mask != 0))) { - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); STORE_F0() } else if ((flat2 == 0XFFFFFFFF) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) { /* f2 */ PACK_LEFT_0TO3() PACK_LEFT_4TO7() - wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, - &p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l, - &q4_l, &q5_l, &q6_l, &q7_l); + wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l, + &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l, + &q6_l, &q7_l); PACK_RIGHT_0TO3() PACK_RIGHT_4TO7() - wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, - &p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r, - &q4_r, &q5_r, &q6_r, &q7_r); + wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r, + &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r, + &q6_r, &q7_r); STORE_F2() } else if ((flat2 == 0) && (flat == 0xFFFFFFFF) && (mask == 0xFFFFFFFF)) { /* f1 */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); STORE_F1() } else if ((flat2 == 0) && (flat != 0) && (mask != 0)) { /* f0 + f1 */ - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); /* left 2 element operation */ PACK_LEFT_0TO3() - mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l); + mbfilter_dspr2(&p3_l, &p2_l, &p1_l, &p0_l, &q0_l, &q1_l, &q2_l, &q3_l); /* right 2 element operation */ PACK_RIGHT_0TO3() - mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r); + mbfilter_dspr2(&p3_r, &p2_r, &p1_r, &p0_r, &q0_r, &q1_r, &q2_r, &q3_r); if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], -3(%[s4]) \n\t" "sb %[p1_r], -2(%[s4]) \n\t" "sb %[p0_r], -1(%[s4]) \n\t" @@ -313,25 +288,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_r], +2(%[s4]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [s4] "r" (s4) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [s4] "r"(s4)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s4]) \n\t" "sb %[p0_f0], -1(%[s4]) \n\t" "sb %[q0_f0], (%[s4]) \n\t" "sb %[q1_f0], +1(%[s4]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s4] "r" (s4) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s4] "r"(s4)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r], %[p2_r], 16 \n\t" "srl %[p1_r], %[p1_r], 16 \n\t" "srl %[p0_r], %[p0_r], 16 \n\t" @@ -343,15 +315,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r] "+r" (p2_r), [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r), - [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), [q2_r] "+r" (q2_r), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r] "+r"(p2_r), [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r), + [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r], -3(%[s3]) \n\t" "sb %[p1_r], -2(%[s3]) \n\t" "sb %[p0_r], -1(%[s3]) \n\t" @@ -360,64 +331,57 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_r], +2(%[s3]) \n\t" : - : [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), [p0_r] "r" (p0_r), - [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), [q2_r] "r" (q2_r), - [s3] "r" (s3) - ); + : [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), [p0_r] "r"(p0_r), + [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [s3] "r"(s3)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s3]) \n\t" "sb %[p0_f0], -1(%[s3]) \n\t" "sb %[q0_f0], (%[s3]) \n\t" "sb %[q1_f0], +1(%[s3]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s3] "r" (s3) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s3] "r"(s3)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( - "sb %[p2_l], -3(%[s2]) \n\t" - "sb %[p1_l], -2(%[s2]) \n\t" - "sb %[p0_l], -1(%[s2]) \n\t" - "sb %[q0_l], (%[s2]) \n\t" - "sb %[q1_l], +1(%[s2]) \n\t" - "sb %[q2_l], +2(%[s2]) \n\t" + __asm__ __volatile__( + "sb %[p2_l], -3(%[s2]) \n\t" + "sb %[p1_l], -2(%[s2]) \n\t" + "sb %[p0_l], -1(%[s2]) \n\t" + "sb %[q0_l], (%[s2]) \n\t" + "sb %[q1_l], +1(%[s2]) \n\t" + "sb %[q2_l], +2(%[s2]) \n\t" - : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [s2] "r" (s2) - ); + : + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [s2] "r"(s2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s2]) \n\t" "sb %[p0_f0], -1(%[s2]) \n\t" "sb %[q0_f0], (%[s2]) \n\t" "sb %[q1_f0], +1(%[s2]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s2] "r" (s2) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s2] "r"(s2)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l], %[p2_l], 16 \n\t" "srl %[p1_l], %[p1_l], 16 \n\t" "srl %[p0_l], %[p0_l], 16 \n\t" @@ -429,15 +393,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l] "+r" (p2_l), [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l), - [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l] "+r"(p2_l), [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l), + [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l], -3(%[s1]) \n\t" "sb %[p1_l], -2(%[s1]) \n\t" "sb %[p0_l], -1(%[s1]) \n\t" @@ -446,54 +409,44 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_l], +2(%[s1]) \n\t" : - : [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), [p0_l] "r" (p0_l), - [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [s1] "r" (s1) - ); + : [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), [p0_l] "r"(p0_l), + [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [s1] "r"(s1)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s1]) \n\t" "sb %[p0_f0], -1(%[s1]) \n\t" "sb %[q0_f0], (%[s1]) \n\t" "sb %[q1_f0], +1(%[s1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s1] "r" (s1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s1] "r"(s1)); } } else if ((flat2 != 0) && (flat != 0) && (mask != 0)) { /* f0+f1+f2 */ - filter1_dspr2(mask, hev, p1, p0, q0, q1, - &p1_f0, &p0_f0, &q0_f0, &q1_f0); + filter1_dspr2(mask, hev, p1, p0, q0, q1, &p1_f0, &p0_f0, &q0_f0, &q1_f0); PACK_LEFT_0TO3() - mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, - q0_l, q1_l, q2_l, q3_l, - &p2_l_f1, &p1_l_f1, &p0_l_f1, - &q0_l_f1, &q1_l_f1, &q2_l_f1); + mbfilter1_dspr2(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, &p2_l_f1, + &p1_l_f1, &p0_l_f1, &q0_l_f1, &q1_l_f1, &q2_l_f1); PACK_RIGHT_0TO3() - mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, - q0_r, q1_r, q2_r, q3_r, - &p2_r_f1, &p1_r_f1, &p0_r_f1, - &q0_r_f1, &q1_r_f1, &q2_r_f1); + mbfilter1_dspr2(p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r, &p2_r_f1, + &p1_r_f1, &p0_r_f1, &q0_r_f1, &q1_r_f1, &q2_r_f1); PACK_LEFT_4TO7() - wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, - &p3_l, &p2_l, &p1_l, &p0_l, - &q0_l, &q1_l, &q2_l, &q3_l, - &q4_l, &q5_l, &q6_l, &q7_l); + wide_mbfilter_dspr2(&p7_l, &p6_l, &p5_l, &p4_l, &p3_l, &p2_l, &p1_l, + &p0_l, &q0_l, &q1_l, &q2_l, &q3_l, &q4_l, &q5_l, + &q6_l, &q7_l); PACK_RIGHT_4TO7() - wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, - &p3_r, &p2_r, &p1_r, &p0_r, - &q0_r, &q1_r, &q2_r, &q3_r, - &q4_r, &q5_r, &q6_r, &q7_r); + wide_mbfilter_dspr2(&p7_r, &p6_r, &p5_r, &p4_r, &p3_r, &p2_r, &p1_r, + &p0_r, &q0_r, &q1_r, &q2_r, &q3_r, &q4_r, &q5_r, + &q6_r, &q7_r); if (mask & flat & flat2 & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_r], -7(%[s4]) \n\t" "sb %[p5_r], -6(%[s4]) \n\t" "sb %[p4_r], -5(%[s4]) \n\t" @@ -503,13 +456,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[p0_r], -1(%[s4]) \n\t" : - : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), - [p4_r] "r" (p4_r), [p3_r] "r" (p3_r), - [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), - [p0_r] "r" (p0_r), [s4] "r" (s4) - ); + : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r), + [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), + [p0_r] "r"(p0_r), [s4] "r"(s4)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_r], (%[s4]) \n\t" "sb %[q1_r], +1(%[s4]) \n\t" "sb %[q2_r], +2(%[s4]) \n\t" @@ -519,13 +470,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q6_r], +6(%[s4]) \n\t" : - : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), - [q2_r] "r" (q2_r), [q3_r] "r" (q3_r), - [q4_r] "r" (q4_r), [q5_r] "r" (q5_r), - [q6_r] "r" (q6_r), [s4] "r" (s4) - ); + : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r), + [q6_r] "r"(q6_r), [s4] "r"(s4)); } else if (mask & flat & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r_f1], -3(%[s4]) \n\t" "sb %[p1_r_f1], -2(%[s4]) \n\t" "sb %[p0_r_f1], -1(%[s4]) \n\t" @@ -534,26 +483,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_r_f1], +2(%[s4]) \n\t" : - : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1), - [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1), - [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1), - [s4] "r" (s4) - ); + : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1), + [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1), + [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [s4] "r"(s4)); } else if (mask & 0x000000FF) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s4]) \n\t" "sb %[p0_f0], -1(%[s4]) \n\t" "sb %[q0_f0], (%[s4]) \n\t" "sb %[q1_f0], +1(%[s4]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s4] "r" (s4) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s4] "r"(s4)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p6_r], %[p6_r], 16 \n\t" "srl %[p5_r], %[p5_r], 16 \n\t" "srl %[p4_r], %[p4_r], 16 \n\t" @@ -569,17 +514,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q5_r], %[q5_r], 16 \n\t" "srl %[q6_r], %[q6_r], 16 \n\t" - : [q0_r] "+r" (q0_r), [q1_r] "+r" (q1_r), - [q2_r] "+r" (q2_r), [q3_r] "+r" (q3_r), - [q4_r] "+r" (q4_r), [q5_r] "+r" (q5_r), - [q6_r] "+r" (q6_r), [p6_r] "+r" (p6_r), - [p5_r] "+r" (p5_r), [p4_r] "+r" (p4_r), - [p3_r] "+r" (p3_r), [p2_r] "+r" (p2_r), - [p1_r] "+r" (p1_r), [p0_r] "+r" (p0_r) - : - ); + : [q0_r] "+r"(q0_r), [q1_r] "+r"(q1_r), [q2_r] "+r"(q2_r), + [q3_r] "+r"(q3_r), [q4_r] "+r"(q4_r), [q5_r] "+r"(q5_r), + [q6_r] "+r"(q6_r), [p6_r] "+r"(p6_r), [p5_r] "+r"(p5_r), + [p4_r] "+r"(p4_r), [p3_r] "+r"(p3_r), [p2_r] "+r"(p2_r), + [p1_r] "+r"(p1_r), [p0_r] "+r"(p0_r) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_r_f1], %[p2_r_f1], 16 \n\t" "srl %[p1_r_f1], %[p1_r_f1], 16 \n\t" "srl %[p0_r_f1], %[p0_r_f1], 16 \n\t" @@ -591,16 +533,15 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_r_f1] "+r" (p2_r_f1), [p1_r_f1] "+r" (p1_r_f1), - [p0_r_f1] "+r" (p0_r_f1), [q0_r_f1] "+r" (q0_r_f1), - [q1_r_f1] "+r" (q1_r_f1), [q2_r_f1] "+r" (q2_r_f1), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_r_f1] "+r"(p2_r_f1), [p1_r_f1] "+r"(p1_r_f1), + [p0_r_f1] "+r"(p0_r_f1), [q0_r_f1] "+r"(q0_r_f1), + [q1_r_f1] "+r"(q1_r_f1), [q2_r_f1] "+r"(q2_r_f1), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_r], -7(%[s3]) \n\t" "sb %[p5_r], -6(%[s3]) \n\t" "sb %[p4_r], -5(%[s3]) \n\t" @@ -610,12 +551,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[p0_r], -1(%[s3]) \n\t" : - : [p6_r] "r" (p6_r), [p5_r] "r" (p5_r), [p4_r] "r" (p4_r), - [p3_r] "r" (p3_r), [p2_r] "r" (p2_r), [p1_r] "r" (p1_r), - [p0_r] "r" (p0_r), [s3] "r" (s3) - ); + : [p6_r] "r"(p6_r), [p5_r] "r"(p5_r), [p4_r] "r"(p4_r), + [p3_r] "r"(p3_r), [p2_r] "r"(p2_r), [p1_r] "r"(p1_r), + [p0_r] "r"(p0_r), [s3] "r"(s3)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_r], (%[s3]) \n\t" "sb %[q1_r], +1(%[s3]) \n\t" "sb %[q2_r], +2(%[s3]) \n\t" @@ -625,13 +565,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q6_r], +6(%[s3]) \n\t" : - : [q0_r] "r" (q0_r), [q1_r] "r" (q1_r), - [q2_r] "r" (q2_r), [q3_r] "r" (q3_r), - [q4_r] "r" (q4_r), [q5_r] "r" (q5_r), - [q6_r] "r" (q6_r), [s3] "r" (s3) - ); + : [q0_r] "r"(q0_r), [q1_r] "r"(q1_r), [q2_r] "r"(q2_r), + [q3_r] "r"(q3_r), [q4_r] "r"(q4_r), [q5_r] "r"(q5_r), + [q6_r] "r"(q6_r), [s3] "r"(s3)); } else if (mask & flat & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_r_f1], -3(%[s3]) \n\t" "sb %[p1_r_f1], -2(%[s3]) \n\t" "sb %[p0_r_f1], -1(%[s3]) \n\t" @@ -640,38 +578,33 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_r_f1], +2(%[s3]) \n\t" : - : [p2_r_f1] "r" (p2_r_f1), [p1_r_f1] "r" (p1_r_f1), - [p0_r_f1] "r" (p0_r_f1), [q0_r_f1] "r" (q0_r_f1), - [q1_r_f1] "r" (q1_r_f1), [q2_r_f1] "r" (q2_r_f1), - [s3] "r" (s3) - ); + : [p2_r_f1] "r"(p2_r_f1), [p1_r_f1] "r"(p1_r_f1), + [p0_r_f1] "r"(p0_r_f1), [q0_r_f1] "r"(q0_r_f1), + [q1_r_f1] "r"(q1_r_f1), [q2_r_f1] "r"(q2_r_f1), [s3] "r"(s3)); } else if (mask & 0x0000FF00) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s3]) \n\t" "sb %[p0_f0], -1(%[s3]) \n\t" "sb %[q0_f0], (%[s3]) \n\t" "sb %[q1_f0], +1(%[s3]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s3] "r" (s3) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s3] "r"(s3)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p1_f0], %[p1_f0], 8 \n\t" "srl %[p0_f0], %[p0_f0], 8 \n\t" "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_l], -7(%[s2]) \n\t" "sb %[p5_l], -6(%[s2]) \n\t" "sb %[p4_l], -5(%[s2]) \n\t" @@ -681,12 +614,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[p0_l], -1(%[s2]) \n\t" : - : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l), - [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), - [p0_l] "r" (p0_l), [s2] "r" (s2) - ); + : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l), + [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), + [p0_l] "r"(p0_l), [s2] "r"(s2)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_l], (%[s2]) \n\t" "sb %[q1_l], +1(%[s2]) \n\t" "sb %[q2_l], +2(%[s2]) \n\t" @@ -696,12 +628,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q6_l], +6(%[s2]) \n\t" : - : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l), - [q6_l] "r" (q6_l), [s2] "r" (s2) - ); + : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), + [q6_l] "r"(q6_l), [s2] "r"(s2)); } else if (mask & flat & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l_f1], -3(%[s2]) \n\t" "sb %[p1_l_f1], -2(%[s2]) \n\t" "sb %[p0_l_f1], -1(%[s2]) \n\t" @@ -710,26 +641,22 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_l_f1], +2(%[s2]) \n\t" : - : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1), - [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1), - [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1), - [s2] "r" (s2) - ); + : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1), + [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1), + [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [s2] "r"(s2)); } else if (mask & 0x00FF0000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s2]) \n\t" "sb %[p0_f0], -1(%[s2]) \n\t" "sb %[q0_f0], (%[s2]) \n\t" "sb %[q1_f0], +1(%[s2]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s2] "r" (s2) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s2] "r"(s2)); } - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p6_l], %[p6_l], 16 \n\t" "srl %[p5_l], %[p5_l], 16 \n\t" "srl %[p4_l], %[p4_l], 16 \n\t" @@ -745,15 +672,14 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q5_l], %[q5_l], 16 \n\t" "srl %[q6_l], %[q6_l], 16 \n\t" - : [q0_l] "+r" (q0_l), [q1_l] "+r" (q1_l), [q2_l] "+r" (q2_l), - [q3_l] "+r" (q3_l), [q4_l] "+r" (q4_l), [q5_l] "+r" (q5_l), - [q6_l] "+r" (q6_l), [p6_l] "+r" (p6_l), [p5_l] "+r" (p5_l), - [p4_l] "+r" (p4_l), [p3_l] "+r" (p3_l), [p2_l] "+r" (p2_l), - [p1_l] "+r" (p1_l), [p0_l] "+r" (p0_l) - : - ); + : [q0_l] "+r"(q0_l), [q1_l] "+r"(q1_l), [q2_l] "+r"(q2_l), + [q3_l] "+r"(q3_l), [q4_l] "+r"(q4_l), [q5_l] "+r"(q5_l), + [q6_l] "+r"(q6_l), [p6_l] "+r"(p6_l), [p5_l] "+r"(p5_l), + [p4_l] "+r"(p4_l), [p3_l] "+r"(p3_l), [p2_l] "+r"(p2_l), + [p1_l] "+r"(p1_l), [p0_l] "+r"(p0_l) + :); - __asm__ __volatile__ ( + __asm__ __volatile__( "srl %[p2_l_f1], %[p2_l_f1], 16 \n\t" "srl %[p1_l_f1], %[p1_l_f1], 16 \n\t" "srl %[p0_l_f1], %[p0_l_f1], 16 \n\t" @@ -765,16 +691,15 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "srl %[q0_f0], %[q0_f0], 8 \n\t" "srl %[q1_f0], %[q1_f0], 8 \n\t" - : [p2_l_f1] "+r" (p2_l_f1), [p1_l_f1] "+r" (p1_l_f1), - [p0_l_f1] "+r" (p0_l_f1), [q0_l_f1] "+r" (q0_l_f1), - [q1_l_f1] "+r" (q1_l_f1), [q2_l_f1] "+r" (q2_l_f1), - [p1_f0] "+r" (p1_f0), [p0_f0] "+r" (p0_f0), - [q0_f0] "+r" (q0_f0), [q1_f0] "+r" (q1_f0) - : - ); + : [p2_l_f1] "+r"(p2_l_f1), [p1_l_f1] "+r"(p1_l_f1), + [p0_l_f1] "+r"(p0_l_f1), [q0_l_f1] "+r"(q0_l_f1), + [q1_l_f1] "+r"(q1_l_f1), [q2_l_f1] "+r"(q2_l_f1), + [p1_f0] "+r"(p1_f0), [p0_f0] "+r"(p0_f0), [q0_f0] "+r"(q0_f0), + [q1_f0] "+r"(q1_f0) + :); if (mask & flat & flat2 & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p6_l], -7(%[s1]) \n\t" "sb %[p5_l], -6(%[s1]) \n\t" "sb %[p4_l], -5(%[s1]) \n\t" @@ -784,13 +709,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[p0_l], -1(%[s1]) \n\t" : - : [p6_l] "r" (p6_l), [p5_l] "r" (p5_l), [p4_l] "r" (p4_l), - [p3_l] "r" (p3_l), [p2_l] "r" (p2_l), [p1_l] "r" (p1_l), - [p0_l] "r" (p0_l), - [s1] "r" (s1) - ); + : [p6_l] "r"(p6_l), [p5_l] "r"(p5_l), [p4_l] "r"(p4_l), + [p3_l] "r"(p3_l), [p2_l] "r"(p2_l), [p1_l] "r"(p1_l), + [p0_l] "r"(p0_l), [s1] "r"(s1)); - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[q0_l], (%[s1]) \n\t" "sb %[q1_l], 1(%[s1]) \n\t" "sb %[q2_l], 2(%[s1]) \n\t" @@ -800,13 +723,11 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q6_l], 6(%[s1]) \n\t" : - : [q0_l] "r" (q0_l), [q1_l] "r" (q1_l), [q2_l] "r" (q2_l), - [q3_l] "r" (q3_l), [q4_l] "r" (q4_l), [q5_l] "r" (q5_l), - [q6_l] "r" (q6_l), - [s1] "r" (s1) - ); + : [q0_l] "r"(q0_l), [q1_l] "r"(q1_l), [q2_l] "r"(q2_l), + [q3_l] "r"(q3_l), [q4_l] "r"(q4_l), [q5_l] "r"(q5_l), + [q6_l] "r"(q6_l), [s1] "r"(s1)); } else if (mask & flat & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p2_l_f1], -3(%[s1]) \n\t" "sb %[p1_l_f1], -2(%[s1]) \n\t" "sb %[p0_l_f1], -1(%[s1]) \n\t" @@ -815,23 +736,19 @@ void vpx_lpf_vertical_16_dspr2(uint8_t *s, "sb %[q2_l_f1], +2(%[s1]) \n\t" : - : [p2_l_f1] "r" (p2_l_f1), [p1_l_f1] "r" (p1_l_f1), - [p0_l_f1] "r" (p0_l_f1), [q0_l_f1] "r" (q0_l_f1), - [q1_l_f1] "r" (q1_l_f1), [q2_l_f1] "r" (q2_l_f1), - [s1] "r" (s1) - ); + : [p2_l_f1] "r"(p2_l_f1), [p1_l_f1] "r"(p1_l_f1), + [p0_l_f1] "r"(p0_l_f1), [q0_l_f1] "r"(q0_l_f1), + [q1_l_f1] "r"(q1_l_f1), [q2_l_f1] "r"(q2_l_f1), [s1] "r"(s1)); } else if (mask & 0xFF000000) { - __asm__ __volatile__ ( + __asm__ __volatile__( "sb %[p1_f0], -2(%[s1]) \n\t" "sb %[p0_f0], -1(%[s1]) \n\t" "sb %[q0_f0], (%[s1]) \n\t" "sb %[q1_f0], +1(%[s1]) \n\t" : - : [p1_f0] "r" (p1_f0), [p0_f0] "r" (p0_f0), - [q0_f0] "r" (q0_f0), [q1_f0] "r" (q1_f0), - [s1] "r" (s1) - ); + : [p1_f0] "r"(p1_f0), [p0_f0] "r"(p0_f0), [q0_f0] "r"(q0_f0), + [q1_f0] "r"(q1_f0), [s1] "r"(s1)); } } } diff --git a/libs/libvpx/vpx_dsp/mips/loopfilter_msa.h b/libs/libvpx/vpx_dsp/mips/loopfilter_msa.h index 62b170610b..4063e5e6b8 100644 --- a/libs/libvpx/vpx_dsp/mips/loopfilter_msa.h +++ b/libs/libvpx/vpx_dsp/mips/loopfilter_msa.h @@ -13,234 +13,238 @@ #include "vpx_dsp/mips/macros_msa.h" -#define VP9_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \ - p1_out, p0_out, q0_out, q1_out) { \ - v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ - v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ - v8i16 q0_sub_p0_r, filt_r, cnst3h; \ - \ - p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ - p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ - q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ - q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ - \ - filt = __msa_subs_s_b(p1_m, q1_m); \ - filt = filt & (v16i8)hev_in; \ - q0_sub_p0 = q0_m - p0_m; \ - filt_sign = __msa_clti_s_b(filt, 0); \ - \ - cnst3h = __msa_ldi_h(3); \ - q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ - q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ - filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ - filt_r += q0_sub_p0_r; \ - filt_r = __msa_sat_s_h(filt_r, 7); \ - \ - /* combine left and right part */ \ - filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r); \ - \ - filt = filt & (v16i8)mask_in; \ - cnst4b = __msa_ldi_b(4); \ - filt1 = __msa_adds_s_b(filt, cnst4b); \ - filt1 >>= 3; \ - \ - cnst3b = __msa_ldi_b(3); \ - filt2 = __msa_adds_s_b(filt, cnst3b); \ - filt2 >>= 3; \ - \ - q0_m = __msa_subs_s_b(q0_m, filt1); \ - q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ - p0_m = __msa_adds_s_b(p0_m, filt2); \ - p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ - \ - filt = __msa_srari_b(filt1, 1); \ - hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ - filt = filt & (v16i8)hev_in; \ - \ - q1_m = __msa_subs_s_b(q1_m, filt); \ - q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ - p1_m = __msa_adds_s_b(p1_m, filt); \ - p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ -} +#define VP9_LPF_FILTER4_8W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \ + p1_out, p0_out, q0_out, q1_out) \ + { \ + v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ + v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ + v8i16 q0_sub_p0_r, filt_r, cnst3h; \ + \ + p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ + p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ + q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ + q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ + \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + filt = filt & (v16i8)hev_in; \ + q0_sub_p0 = q0_m - p0_m; \ + filt_sign = __msa_clti_s_b(filt, 0); \ + \ + cnst3h = __msa_ldi_h(3); \ + q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ + q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ + filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ + filt_r += q0_sub_p0_r; \ + filt_r = __msa_sat_s_h(filt_r, 7); \ + \ + /* combine left and right part */ \ + filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r); \ + \ + filt = filt & (v16i8)mask_in; \ + cnst4b = __msa_ldi_b(4); \ + filt1 = __msa_adds_s_b(filt, cnst4b); \ + filt1 >>= 3; \ + \ + cnst3b = __msa_ldi_b(3); \ + filt2 = __msa_adds_s_b(filt, cnst3b); \ + filt2 >>= 3; \ + \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ + \ + filt = __msa_srari_b(filt1, 1); \ + hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ + filt = filt & (v16i8)hev_in; \ + \ + q1_m = __msa_subs_s_b(q1_m, filt); \ + q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ + p1_m = __msa_adds_s_b(p1_m, filt); \ + p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ + } -#define VP9_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \ - p1_out, p0_out, q0_out, q1_out) { \ - v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ - v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ - v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ - \ - p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ - p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ - q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ - q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ - \ - filt = __msa_subs_s_b(p1_m, q1_m); \ - \ - filt = filt & (v16i8)hev_in; \ - \ - q0_sub_p0 = q0_m - p0_m; \ - filt_sign = __msa_clti_s_b(filt, 0); \ - \ - cnst3h = __msa_ldi_h(3); \ - q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ - q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ - filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ - filt_r += q0_sub_p0_r; \ - filt_r = __msa_sat_s_h(filt_r, 7); \ - \ - q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \ - q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h); \ - filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ - filt_l += q0_sub_p0_l; \ - filt_l = __msa_sat_s_h(filt_l, 7); \ - \ - filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ - filt = filt & (v16i8)mask_in; \ - \ - cnst4b = __msa_ldi_b(4); \ - filt1 = __msa_adds_s_b(filt, cnst4b); \ - filt1 >>= 3; \ - \ - cnst3b = __msa_ldi_b(3); \ - filt2 = __msa_adds_s_b(filt, cnst3b); \ - filt2 >>= 3; \ - \ - q0_m = __msa_subs_s_b(q0_m, filt1); \ - q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ - p0_m = __msa_adds_s_b(p0_m, filt2); \ - p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ - \ - filt = __msa_srari_b(filt1, 1); \ - hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ - filt = filt & (v16i8)hev_in; \ - \ - q1_m = __msa_subs_s_b(q1_m, filt); \ - q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ - p1_m = __msa_adds_s_b(p1_m, filt); \ - p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ -} +#define VP9_LPF_FILTER4_4W(p1_in, p0_in, q0_in, q1_in, mask_in, hev_in, \ + p1_out, p0_out, q0_out, q1_out) \ + { \ + v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ + v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ + v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \ + \ + p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ + p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ + q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ + q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ + \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + \ + filt = filt & (v16i8)hev_in; \ + \ + q0_sub_p0 = q0_m - p0_m; \ + filt_sign = __msa_clti_s_b(filt, 0); \ + \ + cnst3h = __msa_ldi_h(3); \ + q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \ + q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ + filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \ + filt_r += q0_sub_p0_r; \ + filt_r = __msa_sat_s_h(filt_r, 7); \ + \ + q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \ + q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h); \ + filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \ + filt_l += q0_sub_p0_l; \ + filt_l = __msa_sat_s_h(filt_l, 7); \ + \ + filt = __msa_pckev_b((v16i8)filt_l, (v16i8)filt_r); \ + filt = filt & (v16i8)mask_in; \ + \ + cnst4b = __msa_ldi_b(4); \ + filt1 = __msa_adds_s_b(filt, cnst4b); \ + filt1 >>= 3; \ + \ + cnst3b = __msa_ldi_b(3); \ + filt2 = __msa_adds_s_b(filt, cnst3b); \ + filt2 >>= 3; \ + \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ + \ + filt = __msa_srari_b(filt1, 1); \ + hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ + filt = filt & (v16i8)hev_in; \ + \ + q1_m = __msa_subs_s_b(q1_m, filt); \ + q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ + p1_m = __msa_adds_s_b(p1_m, filt); \ + p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ + } -#define VP9_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) { \ - v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0; \ - v16u8 zero_in = { 0 }; \ - \ - tmp = __msa_ori_b(zero_in, 1); \ - p2_a_sub_p0 = __msa_asub_u_b(p2_in, p0_in); \ - q2_a_sub_q0 = __msa_asub_u_b(q2_in, q0_in); \ - p3_a_sub_p0 = __msa_asub_u_b(p3_in, p0_in); \ - q3_a_sub_q0 = __msa_asub_u_b(q3_in, q0_in); \ - \ - p2_a_sub_p0 = __msa_max_u_b(p2_a_sub_p0, q2_a_sub_q0); \ - flat_out = __msa_max_u_b(p2_a_sub_p0, flat_out); \ - p3_a_sub_p0 = __msa_max_u_b(p3_a_sub_p0, q3_a_sub_q0); \ - flat_out = __msa_max_u_b(p3_a_sub_p0, flat_out); \ - \ - flat_out = (tmp < (v16u8)flat_out); \ - flat_out = __msa_xori_b(flat_out, 0xff); \ - flat_out = flat_out & (mask); \ -} +#define VP9_FLAT4(p3_in, p2_in, p0_in, q0_in, q2_in, q3_in, flat_out) \ + { \ + v16u8 tmp, p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0; \ + v16u8 zero_in = { 0 }; \ + \ + tmp = __msa_ori_b(zero_in, 1); \ + p2_a_sub_p0 = __msa_asub_u_b(p2_in, p0_in); \ + q2_a_sub_q0 = __msa_asub_u_b(q2_in, q0_in); \ + p3_a_sub_p0 = __msa_asub_u_b(p3_in, p0_in); \ + q3_a_sub_q0 = __msa_asub_u_b(q3_in, q0_in); \ + \ + p2_a_sub_p0 = __msa_max_u_b(p2_a_sub_p0, q2_a_sub_q0); \ + flat_out = __msa_max_u_b(p2_a_sub_p0, flat_out); \ + p3_a_sub_p0 = __msa_max_u_b(p3_a_sub_p0, q3_a_sub_q0); \ + flat_out = __msa_max_u_b(p3_a_sub_p0, flat_out); \ + \ + flat_out = (tmp < (v16u8)flat_out); \ + flat_out = __msa_xori_b(flat_out, 0xff); \ + flat_out = flat_out & (mask); \ + } -#define VP9_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, \ - q5_in, q6_in, q7_in, flat_in, flat2_out) { \ - v16u8 tmp, zero_in = { 0 }; \ - v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0; \ - v16u8 p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0; \ - \ - tmp = __msa_ori_b(zero_in, 1); \ - p4_a_sub_p0 = __msa_asub_u_b(p4_in, p0_in); \ - q4_a_sub_q0 = __msa_asub_u_b(q4_in, q0_in); \ - p5_a_sub_p0 = __msa_asub_u_b(p5_in, p0_in); \ - q5_a_sub_q0 = __msa_asub_u_b(q5_in, q0_in); \ - p6_a_sub_p0 = __msa_asub_u_b(p6_in, p0_in); \ - q6_a_sub_q0 = __msa_asub_u_b(q6_in, q0_in); \ - p7_a_sub_p0 = __msa_asub_u_b(p7_in, p0_in); \ - q7_a_sub_q0 = __msa_asub_u_b(q7_in, q0_in); \ - \ - p4_a_sub_p0 = __msa_max_u_b(p4_a_sub_p0, q4_a_sub_q0); \ - flat2_out = __msa_max_u_b(p5_a_sub_p0, q5_a_sub_q0); \ - flat2_out = __msa_max_u_b(p4_a_sub_p0, flat2_out); \ - p6_a_sub_p0 = __msa_max_u_b(p6_a_sub_p0, q6_a_sub_q0); \ - flat2_out = __msa_max_u_b(p6_a_sub_p0, flat2_out); \ - p7_a_sub_p0 = __msa_max_u_b(p7_a_sub_p0, q7_a_sub_q0); \ - flat2_out = __msa_max_u_b(p7_a_sub_p0, flat2_out); \ - \ - flat2_out = (tmp < (v16u8)flat2_out); \ - flat2_out = __msa_xori_b(flat2_out, 0xff); \ - flat2_out = flat2_out & flat_in; \ -} +#define VP9_FLAT5(p7_in, p6_in, p5_in, p4_in, p0_in, q0_in, q4_in, q5_in, \ + q6_in, q7_in, flat_in, flat2_out) \ + { \ + v16u8 tmp, zero_in = { 0 }; \ + v16u8 p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0; \ + v16u8 p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0; \ + \ + tmp = __msa_ori_b(zero_in, 1); \ + p4_a_sub_p0 = __msa_asub_u_b(p4_in, p0_in); \ + q4_a_sub_q0 = __msa_asub_u_b(q4_in, q0_in); \ + p5_a_sub_p0 = __msa_asub_u_b(p5_in, p0_in); \ + q5_a_sub_q0 = __msa_asub_u_b(q5_in, q0_in); \ + p6_a_sub_p0 = __msa_asub_u_b(p6_in, p0_in); \ + q6_a_sub_q0 = __msa_asub_u_b(q6_in, q0_in); \ + p7_a_sub_p0 = __msa_asub_u_b(p7_in, p0_in); \ + q7_a_sub_q0 = __msa_asub_u_b(q7_in, q0_in); \ + \ + p4_a_sub_p0 = __msa_max_u_b(p4_a_sub_p0, q4_a_sub_q0); \ + flat2_out = __msa_max_u_b(p5_a_sub_p0, q5_a_sub_q0); \ + flat2_out = __msa_max_u_b(p4_a_sub_p0, flat2_out); \ + p6_a_sub_p0 = __msa_max_u_b(p6_a_sub_p0, q6_a_sub_q0); \ + flat2_out = __msa_max_u_b(p6_a_sub_p0, flat2_out); \ + p7_a_sub_p0 = __msa_max_u_b(p7_a_sub_p0, q7_a_sub_q0); \ + flat2_out = __msa_max_u_b(p7_a_sub_p0, flat2_out); \ + \ + flat2_out = (tmp < (v16u8)flat2_out); \ + flat2_out = __msa_xori_b(flat2_out, 0xff); \ + flat2_out = flat2_out & flat_in; \ + } -#define VP9_FILTER8(p3_in, p2_in, p1_in, p0_in, \ - q0_in, q1_in, q2_in, q3_in, \ - p2_filt8_out, p1_filt8_out, p0_filt8_out, \ - q0_filt8_out, q1_filt8_out, q2_filt8_out) { \ - v8u16 tmp0, tmp1, tmp2; \ - \ - tmp2 = p2_in + p1_in + p0_in; \ - tmp0 = p3_in << 1; \ - \ - tmp0 = tmp0 + tmp2 + q0_in; \ - tmp1 = tmp0 + p3_in + p2_in; \ - p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ - \ - tmp1 = tmp0 + p1_in + q1_in; \ - p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ - \ - tmp1 = q2_in + q1_in + q0_in; \ - tmp2 = tmp2 + tmp1; \ - tmp0 = tmp2 + (p0_in); \ - tmp0 = tmp0 + (p3_in); \ - p0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp0, 3); \ - \ - tmp0 = q2_in + q3_in; \ - tmp0 = p0_in + tmp1 + tmp0; \ - tmp1 = q3_in + q3_in; \ - tmp1 = tmp1 + tmp0; \ - q2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ - \ - tmp0 = tmp2 + q3_in; \ - tmp1 = tmp0 + q0_in; \ - q0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ - \ - tmp1 = tmp0 - p2_in; \ - tmp0 = q1_in + q3_in; \ - tmp1 = tmp0 + tmp1; \ - q1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ -} +#define VP9_FILTER8(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \ + p2_filt8_out, p1_filt8_out, p0_filt8_out, q0_filt8_out, \ + q1_filt8_out, q2_filt8_out) \ + { \ + v8u16 tmp0, tmp1, tmp2; \ + \ + tmp2 = p2_in + p1_in + p0_in; \ + tmp0 = p3_in << 1; \ + \ + tmp0 = tmp0 + tmp2 + q0_in; \ + tmp1 = tmp0 + p3_in + p2_in; \ + p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ + \ + tmp1 = tmp0 + p1_in + q1_in; \ + p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ + \ + tmp1 = q2_in + q1_in + q0_in; \ + tmp2 = tmp2 + tmp1; \ + tmp0 = tmp2 + (p0_in); \ + tmp0 = tmp0 + (p3_in); \ + p0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp0, 3); \ + \ + tmp0 = q2_in + q3_in; \ + tmp0 = p0_in + tmp1 + tmp0; \ + tmp1 = q3_in + q3_in; \ + tmp1 = tmp1 + tmp0; \ + q2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ + \ + tmp0 = tmp2 + q3_in; \ + tmp1 = tmp0 + q0_in; \ + q0_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ + \ + tmp1 = tmp0 - p2_in; \ + tmp0 = q1_in + q3_in; \ + tmp1 = tmp0 + tmp1; \ + q1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \ + } -#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \ - q0_in, q1_in, q2_in, q3_in, \ - limit_in, b_limit_in, thresh_in, \ - hev_out, mask_out, flat_out) { \ - v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ - v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ - \ - /* absolute subtraction of pixel values */ \ - p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in); \ - p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in); \ - p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in); \ - q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in); \ - q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in); \ - q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in); \ - p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in); \ - p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in); \ - \ - /* calculation of hev */ \ - flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ - hev_out = thresh_in < (v16u8)flat_out; \ - \ - /* calculation of mask */ \ - p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ - p1_asub_q1_m >>= 1; \ - p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ - \ - mask_out = b_limit_in < p0_asub_q0_m; \ - mask_out = __msa_max_u_b(flat_out, mask_out); \ - p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ - mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ - q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ - mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ - \ - mask_out = limit_in < (v16u8)mask_out; \ - mask_out = __msa_xori_b(mask_out, 0xff); \ -} -#endif /* VPX_DSP_LOOPFILTER_MSA_H_ */ +#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, q0_in, q1_in, q2_in, q3_in, \ + limit_in, b_limit_in, thresh_in, hev_out, mask_out, \ + flat_out) \ + { \ + v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ + v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ + \ + /* absolute subtraction of pixel values */ \ + p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in); \ + p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in); \ + p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in); \ + q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in); \ + q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in); \ + q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in); \ + p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in); \ + p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in); \ + \ + /* calculation of hev */ \ + flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ + hev_out = thresh_in < (v16u8)flat_out; \ + \ + /* calculation of mask */ \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ + p1_asub_q1_m >>= 1; \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ + \ + mask_out = b_limit_in < p0_asub_q0_m; \ + mask_out = __msa_max_u_b(flat_out, mask_out); \ + p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ + mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ + q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ + mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ + \ + mask_out = limit_in < (v16u8)mask_out; \ + mask_out = __msa_xori_b(mask_out, 0xff); \ + } +#endif /* VPX_DSP_LOOPFILTER_MSA_H_ */ diff --git a/libs/libvpx/vpx_dsp/mips/macros_msa.h b/libs/libvpx/vpx_dsp/mips/macros_msa.h index 91e3615cf8..f498fbe9de 100644 --- a/libs/libvpx/vpx_dsp/mips/macros_msa.h +++ b/libs/libvpx/vpx_dsp/mips/macros_msa.h @@ -38,194 +38,186 @@ #define ST_SW(...) ST_W(v4i32, __VA_ARGS__) #if (__mips_isa_rev >= 6) -#define LH(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint16_t val_m; \ - \ - __asm__ __volatile__ ( \ - "lh %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LH(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint16_t val_m; \ + \ + __asm__ __volatile__("lh %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) -#define LW(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val_m; \ - \ - __asm__ __volatile__ ( \ - "lw %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LW(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val_m; \ + \ + __asm__ __volatile__("lw %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #if (__mips == 64) -#define LD(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint64_t val_m = 0; \ - \ - __asm__ __volatile__ ( \ - "ld %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint64_t val_m = 0; \ + \ + __asm__ __volatile__("ld %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #else // !(__mips == 64) -#define LD(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val0_m, val1_m; \ - uint64_t val_m = 0; \ - \ - val0_m = LW(psrc_m); \ - val1_m = LW(psrc_m + 4); \ - \ - val_m = (uint64_t)(val1_m); \ - val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ - val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val0_m, val1_m; \ + uint64_t val_m = 0; \ + \ + val0_m = LW(psrc_m); \ + val1_m = LW(psrc_m + 4); \ + \ + val_m = (uint64_t)(val1_m); \ + val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ + val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ + \ + val_m; \ + }) #endif // (__mips == 64) -#define SH(val, pdst) { \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint16_t val_m = (val); \ - \ - __asm__ __volatile__ ( \ - "sh %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SH(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint16_t val_m = (val); \ + \ + __asm__ __volatile__("sh %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SW(val, pdst) { \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint32_t val_m = (val); \ - \ - __asm__ __volatile__ ( \ - "sw %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SW(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint32_t val_m = (val); \ + \ + __asm__ __volatile__("sw %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SD(val, pdst) { \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint64_t val_m = (val); \ - \ - __asm__ __volatile__ ( \ - "sd %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SD(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint64_t val_m = (val); \ + \ + __asm__ __volatile__("sd %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } #else // !(__mips_isa_rev >= 6) -#define LH(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint16_t val_m; \ - \ - __asm__ __volatile__ ( \ - "ulh %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LH(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint16_t val_m; \ + \ + __asm__ __volatile__("ulh %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) -#define LW(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint32_t val_m; \ - \ - __asm__ __volatile__ ( \ - "ulw %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LW(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint32_t val_m; \ + \ + __asm__ __volatile__("ulw %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #if (__mips == 64) -#define LD(psrc) ({ \ - const uint8_t *psrc_m = (const uint8_t *)(psrc); \ - uint64_t val_m = 0; \ - \ - __asm__ __volatile__ ( \ - "uld %[val_m], %[psrc_m] \n\t" \ - \ - : [val_m] "=r" (val_m) \ - : [psrc_m] "m" (*psrc_m) \ - ); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m = (const uint8_t *)(psrc); \ + uint64_t val_m = 0; \ + \ + __asm__ __volatile__("uld %[val_m], %[psrc_m] \n\t" \ + \ + : [val_m] "=r"(val_m) \ + : [psrc_m] "m"(*psrc_m)); \ + \ + val_m; \ + }) #else // !(__mips == 64) -#define LD(psrc) ({ \ - const uint8_t *psrc_m1 = (const uint8_t *)(psrc); \ - uint32_t val0_m, val1_m; \ - uint64_t val_m = 0; \ - \ - val0_m = LW(psrc_m1); \ - val1_m = LW(psrc_m1 + 4); \ - \ - val_m = (uint64_t)(val1_m); \ - val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ - val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ - \ - val_m; \ -}) +#define LD(psrc) \ + ({ \ + const uint8_t *psrc_m1 = (const uint8_t *)(psrc); \ + uint32_t val0_m, val1_m; \ + uint64_t val_m = 0; \ + \ + val0_m = LW(psrc_m1); \ + val1_m = LW(psrc_m1 + 4); \ + \ + val_m = (uint64_t)(val1_m); \ + val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); \ + val_m = (uint64_t)(val_m | (uint64_t)val0_m); \ + \ + val_m; \ + }) #endif // (__mips == 64) -#define SH(val, pdst) { \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint16_t val_m = (val); \ - \ - __asm__ __volatile__ ( \ - "ush %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SH(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint16_t val_m = (val); \ + \ + __asm__ __volatile__("ush %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SW(val, pdst) { \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - const uint32_t val_m = (val); \ - \ - __asm__ __volatile__ ( \ - "usw %[val_m], %[pdst_m] \n\t" \ - \ - : [pdst_m] "=m" (*pdst_m) \ - : [val_m] "r" (val_m) \ - ); \ -} +#define SW(val, pdst) \ + { \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + const uint32_t val_m = (val); \ + \ + __asm__ __volatile__("usw %[val_m], %[pdst_m] \n\t" \ + \ + : [pdst_m] "=m"(*pdst_m) \ + : [val_m] "r"(val_m)); \ + } -#define SD(val, pdst) { \ - uint8_t *pdst_m1 = (uint8_t *)(pdst); \ - uint32_t val0_m, val1_m; \ - \ - val0_m = (uint32_t)((val) & 0x00000000FFFFFFFF); \ - val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \ - \ - SW(val0_m, pdst_m1); \ - SW(val1_m, pdst_m1 + 4); \ -} +#define SD(val, pdst) \ + { \ + uint8_t *pdst_m1 = (uint8_t *)(pdst); \ + uint32_t val0_m, val1_m; \ + \ + val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \ + val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \ + \ + SW(val0_m, pdst_m1); \ + SW(val1_m, pdst_m1 + 4); \ + } #endif // (__mips_isa_rev >= 6) /* Description : Load 4 words with stride @@ -236,12 +228,13 @@ Load word in 'out2' from (psrc + 2 * stride) Load word in 'out3' from (psrc + 3 * stride) */ -#define LW4(psrc, stride, out0, out1, out2, out3) { \ - out0 = LW((psrc)); \ - out1 = LW((psrc) + stride); \ - out2 = LW((psrc) + 2 * stride); \ - out3 = LW((psrc) + 3 * stride); \ -} +#define LW4(psrc, stride, out0, out1, out2, out3) \ + { \ + out0 = LW((psrc)); \ + out1 = LW((psrc) + stride); \ + out2 = LW((psrc) + 2 * stride); \ + out3 = LW((psrc) + 3 * stride); \ + } /* Description : Load double words with stride Arguments : Inputs - psrc, stride @@ -249,14 +242,16 @@ Details : Load double word in 'out0' from (psrc) Load double word in 'out1' from (psrc + stride) */ -#define LD2(psrc, stride, out0, out1) { \ - out0 = LD((psrc)); \ - out1 = LD((psrc) + stride); \ -} -#define LD4(psrc, stride, out0, out1, out2, out3) { \ - LD2((psrc), stride, out0, out1); \ - LD2((psrc) + 2 * stride, stride, out2, out3); \ -} +#define LD2(psrc, stride, out0, out1) \ + { \ + out0 = LD((psrc)); \ + out1 = LD((psrc) + stride); \ + } +#define LD4(psrc, stride, out0, out1, out2, out3) \ + { \ + LD2((psrc), stride, out0, out1); \ + LD2((psrc) + 2 * stride, stride, out2, out3); \ + } /* Description : Store 4 words with stride Arguments : Inputs - in0, in1, in2, in3, pdst, stride @@ -265,12 +260,13 @@ Store word from 'in2' to (pdst + 2 * stride) Store word from 'in3' to (pdst + 3 * stride) */ -#define SW4(in0, in1, in2, in3, pdst, stride) { \ - SW(in0, (pdst)) \ - SW(in1, (pdst) + stride); \ - SW(in2, (pdst) + 2 * stride); \ - SW(in3, (pdst) + 3 * stride); \ -} +#define SW4(in0, in1, in2, in3, pdst, stride) \ + { \ + SW(in0, (pdst)) \ + SW(in1, (pdst) + stride); \ + SW(in2, (pdst) + 2 * stride); \ + SW(in3, (pdst) + 3 * stride); \ + } /* Description : Store 4 double words with stride Arguments : Inputs - in0, in1, in2, in3, pdst, stride @@ -279,12 +275,13 @@ Store double word from 'in2' to (pdst + 2 * stride) Store double word from 'in3' to (pdst + 3 * stride) */ -#define SD4(in0, in1, in2, in3, pdst, stride) { \ - SD(in0, (pdst)) \ - SD(in1, (pdst) + stride); \ - SD(in2, (pdst) + 2 * stride); \ - SD(in3, (pdst) + 3 * stride); \ -} +#define SD4(in0, in1, in2, in3, pdst, stride) \ + { \ + SD(in0, (pdst)) \ + SD(in1, (pdst) + stride); \ + SD(in2, (pdst) + 2 * stride); \ + SD(in3, (pdst) + 3 * stride); \ + } /* Description : Load vectors with 16 byte elements with stride Arguments : Inputs - psrc, stride @@ -293,45 +290,50 @@ Details : Load 16 byte elements in 'out0' from (psrc) Load 16 byte elements in 'out1' from (psrc + stride) */ -#define LD_B2(RTYPE, psrc, stride, out0, out1) { \ - out0 = LD_B(RTYPE, (psrc)); \ - out1 = LD_B(RTYPE, (psrc) + stride); \ -} +#define LD_B2(RTYPE, psrc, stride, out0, out1) \ + { \ + out0 = LD_B(RTYPE, (psrc)); \ + out1 = LD_B(RTYPE, (psrc) + stride); \ + } #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__) #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) -#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) { \ - LD_B2(RTYPE, (psrc), stride, out0, out1); \ - out2 = LD_B(RTYPE, (psrc) + 2 * stride); \ -} +#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) \ + { \ + LD_B2(RTYPE, (psrc), stride, out0, out1); \ + out2 = LD_B(RTYPE, (psrc) + 2 * stride); \ + } #define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__) -#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) { \ - LD_B2(RTYPE, (psrc), stride, out0, out1); \ - LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \ -} +#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \ + { \ + LD_B2(RTYPE, (psrc), stride, out0, out1); \ + LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ + } #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__) #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) -#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) { \ - LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ - out4 = LD_B(RTYPE, (psrc) + 4 * stride); \ -} +#define LD_B5(RTYPE, psrc, stride, out0, out1, out2, out3, out4) \ + { \ + LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + out4 = LD_B(RTYPE, (psrc) + 4 * stride); \ + } #define LD_UB5(...) LD_B5(v16u8, __VA_ARGS__) #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) -#define LD_B7(RTYPE, psrc, stride, \ - out0, out1, out2, out3, out4, out5, out6) { \ - LD_B5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \ - LD_B2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \ -} +#define LD_B7(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6) \ + { \ + LD_B5(RTYPE, (psrc), stride, out0, out1, out2, out3, out4); \ + LD_B2(RTYPE, (psrc) + 5 * stride, stride, out5, out6); \ + } #define LD_SB7(...) LD_B7(v16i8, __VA_ARGS__) -#define LD_B8(RTYPE, psrc, stride, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ - LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ -} +#define LD_B8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \ + out7) \ + { \ + LD_B4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + LD_B4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ + } #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) @@ -341,33 +343,36 @@ Details : Load 8 halfword elements in 'out0' from (psrc) Load 8 halfword elements in 'out1' from (psrc + stride) */ -#define LD_H2(RTYPE, psrc, stride, out0, out1) { \ - out0 = LD_H(RTYPE, (psrc)); \ - out1 = LD_H(RTYPE, (psrc) + (stride)); \ -} +#define LD_H2(RTYPE, psrc, stride, out0, out1) \ + { \ + out0 = LD_H(RTYPE, (psrc)); \ + out1 = LD_H(RTYPE, (psrc) + (stride)); \ + } #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__) -#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) { \ - LD_H2(RTYPE, (psrc), stride, out0, out1); \ - LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ -} +#define LD_H4(RTYPE, psrc, stride, out0, out1, out2, out3) \ + { \ + LD_H2(RTYPE, (psrc), stride, out0, out1); \ + LD_H2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \ + } #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__) -#define LD_H8(RTYPE, psrc, stride, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - LD_H4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ - LD_H4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ -} +#define LD_H8(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \ + out7) \ + { \ + LD_H4(RTYPE, (psrc), stride, out0, out1, out2, out3); \ + LD_H4(RTYPE, (psrc) + 4 * stride, stride, out4, out5, out6, out7); \ + } #define LD_SH8(...) LD_H8(v8i16, __VA_ARGS__) -#define LD_H16(RTYPE, psrc, stride, \ - out0, out1, out2, out3, out4, out5, out6, out7, \ - out8, out9, out10, out11, out12, out13, out14, out15) { \ - LD_H8(RTYPE, (psrc), stride, \ - out0, out1, out2, out3, out4, out5, out6, out7); \ - LD_H8(RTYPE, (psrc) + 8 * stride, stride, \ - out8, out9, out10, out11, out12, out13, out14, out15); \ -} +#define LD_H16(RTYPE, psrc, stride, out0, out1, out2, out3, out4, out5, out6, \ + out7, out8, out9, out10, out11, out12, out13, out14, out15) \ + { \ + LD_H8(RTYPE, (psrc), stride, out0, out1, out2, out3, out4, out5, out6, \ + out7); \ + LD_H8(RTYPE, (psrc) + 8 * stride, stride, out8, out9, out10, out11, out12, \ + out13, out14, out15); \ + } #define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__) /* Description : Load 4x4 block of signed halfword elements from 1D source @@ -375,45 +380,49 @@ Arguments : Input - psrc Outputs - out0, out1, out2, out3 */ -#define LD4x4_SH(psrc, out0, out1, out2, out3) { \ - out0 = LD_SH(psrc); \ - out2 = LD_SH(psrc + 8); \ - out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ - out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ -} +#define LD4x4_SH(psrc, out0, out1, out2, out3) \ + { \ + out0 = LD_SH(psrc); \ + out2 = LD_SH(psrc + 8); \ + out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ + out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ + } /* Description : Load 2 vectors of signed word elements with stride Arguments : Inputs - psrc, stride Outputs - out0, out1 Return Type - signed word */ -#define LD_SW2(psrc, stride, out0, out1) { \ - out0 = LD_SW((psrc)); \ - out1 = LD_SW((psrc) + stride); \ -} +#define LD_SW2(psrc, stride, out0, out1) \ + { \ + out0 = LD_SW((psrc)); \ + out1 = LD_SW((psrc) + stride); \ + } /* Description : Store vectors of 16 byte elements with stride Arguments : Inputs - in0, in1, pdst, stride Details : Store 16 byte elements from 'in0' to (pdst) Store 16 byte elements from 'in1' to (pdst + stride) */ -#define ST_B2(RTYPE, in0, in1, pdst, stride) { \ - ST_B(RTYPE, in0, (pdst)); \ - ST_B(RTYPE, in1, (pdst) + stride); \ -} +#define ST_B2(RTYPE, in0, in1, pdst, stride) \ + { \ + ST_B(RTYPE, in0, (pdst)); \ + ST_B(RTYPE, in1, (pdst) + stride); \ + } #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__) -#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ - ST_B2(RTYPE, in0, in1, (pdst), stride); \ - ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ -} +#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \ + { \ + ST_B2(RTYPE, in0, in1, (pdst), stride); \ + ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ + } #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__) -#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - pdst, stride) { \ - ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ - ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ -} +#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ + { \ + ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ + ST_B4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ + } #define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__) /* Description : Store vectors of 8 halfword elements with stride @@ -421,22 +430,25 @@ Details : Store 8 halfword elements from 'in0' to (pdst) Store 8 halfword elements from 'in1' to (pdst + stride) */ -#define ST_H2(RTYPE, in0, in1, pdst, stride) { \ - ST_H(RTYPE, in0, (pdst)); \ - ST_H(RTYPE, in1, (pdst) + stride); \ -} +#define ST_H2(RTYPE, in0, in1, pdst, stride) \ + { \ + ST_H(RTYPE, in0, (pdst)); \ + ST_H(RTYPE, in1, (pdst) + stride); \ + } #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__) -#define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) { \ - ST_H2(RTYPE, in0, in1, (pdst), stride); \ - ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ -} +#define ST_H4(RTYPE, in0, in1, in2, in3, pdst, stride) \ + { \ + ST_H2(RTYPE, in0, in1, (pdst), stride); \ + ST_H2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \ + } #define ST_SH4(...) ST_H4(v8i16, __VA_ARGS__) -#define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) { \ - ST_H4(RTYPE, in0, in1, in2, in3, (pdst), stride); \ - ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ -} +#define ST_H8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, pdst, stride) \ + { \ + ST_H4(RTYPE, in0, in1, in2, in3, (pdst), stride); \ + ST_H4(RTYPE, in4, in5, in6, in7, (pdst) + 4 * stride, stride); \ + } #define ST_SH8(...) ST_H8(v8i16, __VA_ARGS__) /* Description : Store vectors of word elements with stride @@ -444,10 +456,11 @@ Details : Store 4 word elements from 'in0' to (pdst) Store 4 word elements from 'in1' to (pdst + stride) */ -#define ST_SW2(in0, in1, pdst, stride) { \ - ST_SW(in0, (pdst)); \ - ST_SW(in1, (pdst) + stride); \ -} +#define ST_SW2(in0, in1, pdst, stride) \ + { \ + ST_SW(in0, (pdst)); \ + ST_SW(in1, (pdst) + stride); \ + } /* Description : Store 2x4 byte block to destination memory from input vector Arguments : Inputs - in, stidx, pdst, stride @@ -460,20 +473,21 @@ Index 'stidx+3' halfword element from 'in' vector is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST2x4_UB(in, stidx, pdst, stride) { \ - uint16_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \ - out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \ - out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \ - out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \ - \ - SH(out0_m, pblk_2x4_m); \ - SH(out1_m, pblk_2x4_m + stride); \ - SH(out2_m, pblk_2x4_m + 2 * stride); \ - SH(out3_m, pblk_2x4_m + 3 * stride); \ -} +#define ST2x4_UB(in, stidx, pdst, stride) \ + { \ + uint16_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_2x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \ + out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \ + out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \ + out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \ + \ + SH(out0_m, pblk_2x4_m); \ + SH(out1_m, pblk_2x4_m + stride); \ + SH(out2_m, pblk_2x4_m + 2 * stride); \ + SH(out3_m, pblk_2x4_m + 3 * stride); \ + } /* Description : Store 4x2 byte block to destination memory from input vector Arguments : Inputs - in, pdst, stride @@ -482,16 +496,17 @@ Index 1 word element from 'in' vector is copied to the GP register and stored to (pdst + stride) */ -#define ST4x2_UB(in, pdst, stride) { \ - uint32_t out0_m, out1_m; \ - uint8_t *pblk_4x2_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_w((v4i32)in, 0); \ - out1_m = __msa_copy_u_w((v4i32)in, 1); \ - \ - SW(out0_m, pblk_4x2_m); \ - SW(out1_m, pblk_4x2_m + stride); \ -} +#define ST4x2_UB(in, pdst, stride) \ + { \ + uint32_t out0_m, out1_m; \ + uint8_t *pblk_4x2_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_w((v4i32)in, 0); \ + out1_m = __msa_copy_u_w((v4i32)in, 1); \ + \ + SW(out0_m, pblk_4x2_m); \ + SW(out1_m, pblk_4x2_m + stride); \ + } /* Description : Store 4x4 byte block to destination memory from input vector Arguments : Inputs - in0, in1, pdst, stride @@ -504,35 +519,38 @@ 'Idx3' word element from input vector 'in0' is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) { \ - uint32_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_w((v4i32)in0, idx0); \ - out1_m = __msa_copy_u_w((v4i32)in0, idx1); \ - out2_m = __msa_copy_u_w((v4i32)in1, idx2); \ - out3_m = __msa_copy_u_w((v4i32)in1, idx3); \ - \ - SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \ -} -#define ST4x8_UB(in0, in1, pdst, stride) { \ - uint8_t *pblk_4x8 = (uint8_t *)(pdst); \ - \ - ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \ - ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ -} +#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) \ + { \ + uint32_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_4x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_w((v4i32)in0, idx0); \ + out1_m = __msa_copy_u_w((v4i32)in0, idx1); \ + out2_m = __msa_copy_u_w((v4i32)in1, idx2); \ + out3_m = __msa_copy_u_w((v4i32)in1, idx3); \ + \ + SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \ + } +#define ST4x8_UB(in0, in1, pdst, stride) \ + { \ + uint8_t *pblk_4x8 = (uint8_t *)(pdst); \ + \ + ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \ + ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ + } /* Description : Store 8x1 byte block to destination memory from input vector Arguments : Inputs - in, pdst Details : Index 0 double word element from 'in' vector is copied to the GP register and stored to (pdst) */ -#define ST8x1_UB(in, pdst) { \ - uint64_t out0_m; \ - \ - out0_m = __msa_copy_u_d((v2i64)in, 0); \ - SD(out0_m, pdst); \ -} +#define ST8x1_UB(in, pdst) \ + { \ + uint64_t out0_m; \ + \ + out0_m = __msa_copy_u_d((v2i64)in, 0); \ + SD(out0_m, pdst); \ + } /* Description : Store 8x2 byte block to destination memory from input vector Arguments : Inputs - in, pdst, stride @@ -541,16 +559,17 @@ Index 1 double word element from 'in' vector is copied to the GP register and stored to (pdst + stride) */ -#define ST8x2_UB(in, pdst, stride) { \ - uint64_t out0_m, out1_m; \ - uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_d((v2i64)in, 0); \ - out1_m = __msa_copy_u_d((v2i64)in, 1); \ - \ - SD(out0_m, pblk_8x2_m); \ - SD(out1_m, pblk_8x2_m + stride); \ -} +#define ST8x2_UB(in, pdst, stride) \ + { \ + uint64_t out0_m, out1_m; \ + uint8_t *pblk_8x2_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_d((v2i64)in, 0); \ + out1_m = __msa_copy_u_d((v2i64)in, 1); \ + \ + SD(out0_m, pblk_8x2_m); \ + SD(out1_m, pblk_8x2_m + stride); \ + } /* Description : Store 8x4 byte block to destination memory from input vectors @@ -564,17 +583,18 @@ Index 1 double word element from 'in1' vector is copied to the GP register and stored to (pdst + 3 * stride) */ -#define ST8x4_UB(in0, in1, pdst, stride) { \ - uint64_t out0_m, out1_m, out2_m, out3_m; \ - uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \ - \ - out0_m = __msa_copy_u_d((v2i64)in0, 0); \ - out1_m = __msa_copy_u_d((v2i64)in0, 1); \ - out2_m = __msa_copy_u_d((v2i64)in1, 0); \ - out3_m = __msa_copy_u_d((v2i64)in1, 1); \ - \ - SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \ -} +#define ST8x4_UB(in0, in1, pdst, stride) \ + { \ + uint64_t out0_m, out1_m, out2_m, out3_m; \ + uint8_t *pblk_8x4_m = (uint8_t *)(pdst); \ + \ + out0_m = __msa_copy_u_d((v2i64)in0, 0); \ + out1_m = __msa_copy_u_d((v2i64)in0, 1); \ + out2_m = __msa_copy_u_d((v2i64)in1, 0); \ + out3_m = __msa_copy_u_d((v2i64)in1, 1); \ + \ + SD4(out0_m, out1_m, out2_m, out3_m, pblk_8x4_m, stride); \ + } /* Description : average with rounding (in0 + in1 + 1) / 2. Arguments : Inputs - in0, in1, in2, in3, @@ -584,17 +604,19 @@ each unsigned byte element from 'in1' vector. Then the average with rounding is calculated and written to 'out0' */ -#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ - out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \ -} +#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ + out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \ + } #define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__) -#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ - AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \ -} +#define AVER_UB4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) \ + AVER_UB2(RTYPE, in4, in5, in6, in7, out2, out3) \ + } #define AVER_UB4_UB(...) AVER_UB4(v16u8, __VA_ARGS__) /* Description : Immediate number of elements to slide with zero @@ -604,18 +626,20 @@ Details : Byte elements from 'zero_m' vector are slid into 'in0' by value specified in the 'slide_val' */ -#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) { \ - v16i8 zero_m = { 0 }; \ - out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ - out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ -} +#define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \ + { \ + v16i8 zero_m = { 0 }; \ + out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ + out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ + } #define SLDI_B2_0_SW(...) SLDI_B2_0(v4i32, __VA_ARGS__) -#define SLDI_B4_0(RTYPE, in0, in1, in2, in3, \ - out0, out1, out2, out3, slide_val) { \ - SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \ - SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val); \ -} +#define SLDI_B4_0(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, \ + slide_val) \ + { \ + SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \ + SLDI_B2_0(RTYPE, in2, in3, out2, out3, slide_val); \ + } #define SLDI_B4_0_UB(...) SLDI_B4_0(v16u8, __VA_ARGS__) /* Description : Immediate number of elements to slide @@ -625,18 +649,20 @@ Details : Byte elements from 'in0_0' vector are slid into 'in1_0' by value specified in the 'slide_val' */ -#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) { \ - out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \ - out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \ -} +#define SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \ + { \ + out0 = (RTYPE)__msa_sldi_b((v16i8)in0_0, (v16i8)in1_0, slide_val); \ + out1 = (RTYPE)__msa_sldi_b((v16i8)in0_1, (v16i8)in1_1, slide_val); \ + } #define SLDI_B2_UB(...) SLDI_B2(v16u8, __VA_ARGS__) #define SLDI_B2_SH(...) SLDI_B2(v8i16, __VA_ARGS__) -#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, \ - out0, out1, out2, slide_val) { \ - SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \ - out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \ -} +#define SLDI_B3(RTYPE, in0_0, in0_1, in0_2, in1_0, in1_1, in1_2, out0, out1, \ + out2, slide_val) \ + { \ + SLDI_B2(RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val) \ + out2 = (RTYPE)__msa_sldi_b((v16i8)in0_2, (v16i8)in1_2, slide_val); \ + } #define SLDI_B3_SB(...) SLDI_B3(v16i8, __VA_ARGS__) #define SLDI_B3_UH(...) SLDI_B3(v8u16, __VA_ARGS__) @@ -647,19 +673,21 @@ Details : Byte elements from 'in0' & 'in1' are copied selectively to 'out0' as per control vector 'mask0' */ -#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) { \ - out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \ - out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ -} +#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \ + } #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__) #define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__) #define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__) -#define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, \ - out0, out1, out2, out3) { \ - VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \ - VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \ -} +#define VSHF_B4(RTYPE, in0, in1, mask0, mask1, mask2, mask3, out0, out1, out2, \ + out3) \ + { \ + VSHF_B2(RTYPE, in0, in1, in0, in1, mask0, mask1, out0, out1); \ + VSHF_B2(RTYPE, in0, in1, in0, in1, mask2, mask3, out2, out3); \ + } #define VSHF_B4_SB(...) VSHF_B4(v16i8, __VA_ARGS__) #define VSHF_B4_SH(...) VSHF_B4(v8i16, __VA_ARGS__) @@ -673,18 +701,19 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \ - out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \ -} +#define DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_u_h((v16u8)mult0, (v16u8)cnst0); \ + out1 = (RTYPE)__msa_dotp_u_h((v16u8)mult1, (v16u8)cnst1); \ + } #define DOTP_UB2_UH(...) DOTP_UB2(v8u16, __VA_ARGS__) -#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, \ - out0, out1, out2, out3) { \ - DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_UB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_UB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_UB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_UB4_UH(...) DOTP_UB4(v8u16, __VA_ARGS__) /* Description : Dot product of byte vector elements @@ -697,17 +726,19 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ -} +#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ + } #define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__) -#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) { \ - DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_SB4_SH(...) DOTP_SB4(v8i16, __VA_ARGS__) /* Description : Dot product of halfword vector elements @@ -720,18 +751,19 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \ -} +#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \ + } #define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__) -#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, \ - out0, out1, out2, out3) { \ - DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DOTP_SH4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DOTP_SH2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DOTP_SH4_SW(...) DOTP_SH4(v4i32, __VA_ARGS__) /* Description : Dot product of word vector elements @@ -744,10 +776,11 @@ The multiplication result of adjacent odd-even elements are added together and written to the 'out0' vector */ -#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \ - out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \ -} +#define DOTP_SW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dotp_s_d((v4i32)mult0, (v4i32)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_d((v4i32)mult1, (v4i32)cnst1); \ + } #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__) /* Description : Dot product & addition of byte vector elements @@ -760,17 +793,19 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \ - out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \ -} +#define DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_h((v8i16)out0, (v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_h((v8i16)out1, (v16i8)mult1, (v16i8)cnst1); \ + } #define DPADD_SB2_SH(...) DPADD_SB2(v8i16, __VA_ARGS__) -#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, \ - cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3) { \ - DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ - DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ -} +#define DPADD_SB4(RTYPE, mult0, mult1, mult2, mult3, cnst0, cnst1, cnst2, \ + cnst3, out0, out1, out2, out3) \ + { \ + DPADD_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1); \ + DPADD_SB2(RTYPE, mult2, mult3, cnst2, cnst3, out2, out3); \ + } #define DPADD_SB4_SH(...) DPADD_SB4(v8i16, __VA_ARGS__) /* Description : Dot product & addition of halfword vector elements @@ -783,10 +818,11 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) { \ - out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \ - out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \ -} +#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \ + } #define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__) /* Description : Dot product & addition of double word vector elements @@ -799,10 +835,11 @@ The multiplication result of adjacent odd-even elements are added to the 'out0' vector */ -#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) { \ - out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \ - out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \ -} +#define DPADD_SD2(RTYPE, mult0, mult1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \ + out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \ + } #define DPADD_SD2_SD(...) DPADD_SD2(v2i64, __VA_ARGS__) /* Description : Minimum values between unsigned elements of @@ -813,16 +850,18 @@ Details : Minimum of unsigned halfword element values from 'in0' and 'min_vec' are written to output vector 'in0' */ -#define MIN_UH2(RTYPE, in0, in1, min_vec) { \ - in0 = (RTYPE)__msa_min_u_h((v8u16)in0, min_vec); \ - in1 = (RTYPE)__msa_min_u_h((v8u16)in1, min_vec); \ -} +#define MIN_UH2(RTYPE, in0, in1, min_vec) \ + { \ + in0 = (RTYPE)__msa_min_u_h((v8u16)in0, min_vec); \ + in1 = (RTYPE)__msa_min_u_h((v8u16)in1, min_vec); \ + } #define MIN_UH2_UH(...) MIN_UH2(v8u16, __VA_ARGS__) -#define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) { \ - MIN_UH2(RTYPE, in0, in1, min_vec); \ - MIN_UH2(RTYPE, in2, in3, min_vec); \ -} +#define MIN_UH4(RTYPE, in0, in1, in2, in3, min_vec) \ + { \ + MIN_UH2(RTYPE, in0, in1, min_vec); \ + MIN_UH2(RTYPE, in2, in3, min_vec); \ + } #define MIN_UH4_UH(...) MIN_UH4(v8u16, __VA_ARGS__) /* Description : Clips all signed halfword elements of input vector @@ -831,22 +870,25 @@ Output - out_m Return Type - signed halfword */ -#define CLIP_SH_0_255(in) ({ \ - v8i16 max_m = __msa_ldi_h(255); \ - v8i16 out_m; \ - \ - out_m = __msa_maxi_s_h((v8i16)in, 0); \ - out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \ - out_m; \ -}) -#define CLIP_SH2_0_255(in0, in1) { \ - in0 = CLIP_SH_0_255(in0); \ - in1 = CLIP_SH_0_255(in1); \ -} -#define CLIP_SH4_0_255(in0, in1, in2, in3) { \ - CLIP_SH2_0_255(in0, in1); \ - CLIP_SH2_0_255(in2, in3); \ -} +#define CLIP_SH_0_255(in) \ + ({ \ + v8i16 max_m = __msa_ldi_h(255); \ + v8i16 out_m; \ + \ + out_m = __msa_maxi_s_h((v8i16)in, 0); \ + out_m = __msa_min_s_h((v8i16)max_m, (v8i16)out_m); \ + out_m; \ + }) +#define CLIP_SH2_0_255(in0, in1) \ + { \ + in0 = CLIP_SH_0_255(in0); \ + in1 = CLIP_SH_0_255(in1); \ + } +#define CLIP_SH4_0_255(in0, in1, in2, in3) \ + { \ + CLIP_SH2_0_255(in0, in1); \ + CLIP_SH2_0_255(in2, in3); \ + } /* Description : Horizontal addition of 4 signed word elements of input vector Arguments : Input - in (signed word vector) @@ -855,16 +897,17 @@ Details : 4 signed word elements of 'in' vector are added together and the resulting integer sum is returned */ -#define HADD_SW_S32(in) ({ \ - v2i64 res0_m, res1_m; \ - int32_t sum_m; \ - \ - res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ - res1_m = __msa_splati_d(res0_m, 1); \ - res0_m = res0_m + res1_m; \ - sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ - sum_m; \ -}) +#define HADD_SW_S32(in) \ + ({ \ + v2i64 res0_m, res1_m; \ + int32_t sum_m; \ + \ + res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); \ + res1_m = __msa_splati_d(res0_m, 1); \ + res0_m = res0_m + res1_m; \ + sum_m = __msa_copy_s_w((v4i32)res0_m, 0); \ + sum_m; \ + }) /* Description : Horizontal addition of 8 unsigned halfword elements Arguments : Inputs - in (unsigned halfword vector) @@ -873,18 +916,19 @@ Details : 8 unsigned halfword elements of input vector are added together and the resulting integer sum is returned */ -#define HADD_UH_U32(in) ({ \ - v4u32 res_m; \ - v2u64 res0_m, res1_m; \ - uint32_t sum_m; \ - \ - res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \ - res0_m = __msa_hadd_u_d(res_m, res_m); \ - res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ - res0_m = res0_m + res1_m; \ - sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \ - sum_m; \ -}) +#define HADD_UH_U32(in) \ + ({ \ + v4u32 res_m; \ + v2u64 res0_m, res1_m; \ + uint32_t sum_m; \ + \ + res_m = __msa_hadd_u_w((v8u16)in, (v8u16)in); \ + res0_m = __msa_hadd_u_d(res_m, res_m); \ + res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); \ + res0_m = res0_m + res1_m; \ + sum_m = __msa_copy_u_w((v4i32)res0_m, 0); \ + sum_m; \ + }) /* Description : Horizontal addition of unsigned byte vector elements Arguments : Inputs - in0, in1 @@ -894,16 +938,18 @@ even unsigned byte element from 'in0' (pairwise) and the halfword result is written to 'out0' */ -#define HADD_UB2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \ - out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \ -} +#define HADD_UB2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hadd_u_h((v16u8)in0, (v16u8)in0); \ + out1 = (RTYPE)__msa_hadd_u_h((v16u8)in1, (v16u8)in1); \ + } #define HADD_UB2_UH(...) HADD_UB2(v8u16, __VA_ARGS__) -#define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) { \ - HADD_UB2(RTYPE, in0, in1, out0, out1); \ - HADD_UB2(RTYPE, in2, in3, out2, out3); \ -} +#define HADD_UB4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + HADD_UB2(RTYPE, in0, in1, out0, out1); \ + HADD_UB2(RTYPE, in2, in3, out2, out3); \ + } #define HADD_UB4_UH(...) HADD_UB4(v8u16, __VA_ARGS__) /* Description : Horizontal subtraction of unsigned byte vector elements @@ -914,10 +960,11 @@ even unsigned byte element from 'in0' (pairwise) and the halfword result is written to 'out0' */ -#define HSUB_UB2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \ - out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \ -} +#define HSUB_UB2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \ + out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \ + } #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__) /* Description : SAD (Sum of Absolute Difference) @@ -928,18 +975,19 @@ 'ref0' is calculated and preserved in 'diff0'. Then even-odd pairs are added together to generate 8 halfword results. */ -#define SAD_UB2_UH(in0, in1, ref0, ref1) ({ \ - v16u8 diff0_m, diff1_m; \ - v8u16 sad_m = { 0 }; \ - \ - diff0_m = __msa_asub_u_b((v16u8)in0, (v16u8)ref0); \ - diff1_m = __msa_asub_u_b((v16u8)in1, (v16u8)ref1); \ - \ - sad_m += __msa_hadd_u_h((v16u8)diff0_m, (v16u8)diff0_m); \ - sad_m += __msa_hadd_u_h((v16u8)diff1_m, (v16u8)diff1_m); \ - \ - sad_m; \ -}) +#define SAD_UB2_UH(in0, in1, ref0, ref1) \ + ({ \ + v16u8 diff0_m, diff1_m; \ + v8u16 sad_m = { 0 }; \ + \ + diff0_m = __msa_asub_u_b((v16u8)in0, (v16u8)ref0); \ + diff1_m = __msa_asub_u_b((v16u8)in1, (v16u8)ref1); \ + \ + sad_m += __msa_hadd_u_h((v16u8)diff0_m, (v16u8)diff0_m); \ + sad_m += __msa_hadd_u_h((v16u8)diff1_m, (v16u8)diff1_m); \ + \ + sad_m; \ + }) /* Description : Horizontal subtraction of signed halfword vector elements Arguments : Inputs - in0, in1 @@ -949,10 +997,11 @@ even signed halfword element from 'in0' (pairwise) and the word result is written to 'out0' */ -#define HSUB_UH2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \ - out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \ -} +#define HSUB_UH2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_hsub_s_w((v8i16)in0, (v8i16)in0); \ + out1 = (RTYPE)__msa_hsub_s_w((v8i16)in1, (v8i16)in1); \ + } #define HSUB_UH2_SW(...) HSUB_UH2(v4i32, __VA_ARGS__) /* Description : Set element n input vector to GPR value @@ -961,25 +1010,28 @@ Return Type - as per RTYPE Details : Set element 0 in vector 'out' to value specified in 'in0' */ -#define INSERT_W2(RTYPE, in0, in1, out) { \ - out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ - out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ -} +#define INSERT_W2(RTYPE, in0, in1, out) \ + { \ + out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ + } #define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__) -#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) { \ - out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ - out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ - out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \ - out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \ -} +#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) \ + { \ + out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \ + } #define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__) #define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__) -#define INSERT_D2(RTYPE, in0, in1, out) { \ - out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ - out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ -} +#define INSERT_D2(RTYPE, in0, in1, out) \ + { \ + out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ + out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ + } #define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__) #define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__) @@ -990,10 +1042,11 @@ Details : Even byte elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ - out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ -} +#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ + } #define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__) #define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__) @@ -1004,10 +1057,11 @@ Details : Even halfword elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ - out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \ -} +#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \ + } #define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__) #define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__) #define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__) @@ -1019,10 +1073,11 @@ Details : Even word elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ - out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \ -} +#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ + out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \ + } #define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__) /* Description : Interleave even double word elements from vectors @@ -1032,10 +1087,11 @@ Details : Even double word elements of 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \ - out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \ -} +#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \ + out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \ + } #define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__) /* Description : Interleave left half of byte elements from vectors @@ -1045,21 +1101,24 @@ Details : Left half of byte elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ -} +#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ + } #define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__) #define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__) #define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__) #define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__) -#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVL_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVL_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVL_B4_SB(...) ILVL_B4(v16i8, __VA_ARGS__) +#define ILVL_B4_SH(...) ILVL_B4(v8i16, __VA_ARGS__) #define ILVL_B4_UH(...) ILVL_B4(v8u16, __VA_ARGS__) /* Description : Interleave left half of halfword elements from vectors @@ -1069,11 +1128,13 @@ Details : Left half of halfword elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \ -} +#define ILVL_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvl_h((v8i16)in2, (v8i16)in3); \ + } #define ILVL_H2_SH(...) ILVL_H2(v8i16, __VA_ARGS__) +#define ILVL_H2_SW(...) ILVL_H2(v4i32, __VA_ARGS__) /* Description : Interleave left half of word elements from vectors Arguments : Inputs - in0, in1, in2, in3 @@ -1082,10 +1143,11 @@ Details : Left half of word elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \ -} +#define ILVL_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvl_w((v4i32)in2, (v4i32)in3); \ + } #define ILVL_W2_UB(...) ILVL_W2(v16u8, __VA_ARGS__) #define ILVL_W2_SH(...) ILVL_W2(v8i16, __VA_ARGS__) @@ -1096,33 +1158,36 @@ Details : Right half of byte elements of 'in0' and 'in1' are interleaved and written to out0. */ -#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ -} +#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ + } #define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__) #define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__) #define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__) #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__) -#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__) #define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__) #define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__) #define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__) -#define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3); \ - ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, \ - out4, out5, out6, out7); \ -} +#define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ + in11, in12, in13, in14, in15, out0, out1, out2, out3, out4, \ + out5, out6, out7) \ + { \ + ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ + out3); \ + ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \ + out6, out7); \ + } #define ILVR_B8_UH(...) ILVR_B8(v8u16, __VA_ARGS__) /* Description : Interleave right half of halfword elements from vectors @@ -1132,31 +1197,36 @@ Details : Right half of halfword elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \ -} +#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \ + } #define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__) +#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__) -#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__) -#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \ -} +#define ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvr_w((v4i32)in2, (v4i32)in3); \ + } #define ILVR_W2_UB(...) ILVR_W2(v16u8, __VA_ARGS__) #define ILVR_W2_SH(...) ILVR_W2(v8i16, __VA_ARGS__) -#define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_W4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_W2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_W2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_W4_UB(...) ILVR_W4(v16u8, __VA_ARGS__) /* Description : Interleave right half of double word elements from vectors @@ -1166,25 +1236,28 @@ Details : Right half of double word elements of 'in0' and 'in1' are interleaved and written to 'out0'. */ -#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \ - out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \ -} +#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_d((v2i64)(in0), (v2i64)(in1)); \ + out1 = (RTYPE)__msa_ilvr_d((v2i64)(in2), (v2i64)(in3)); \ + } #define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__) #define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__) #define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__) -#define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) { \ - ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ - out2 = (RTYPE)__msa_ilvr_d((v2i64)(in4), (v2i64)(in5)); \ -} +#define ILVR_D3(RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2) \ + { \ + ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + out2 = (RTYPE)__msa_ilvr_d((v2i64)(in4), (v2i64)(in5)); \ + } #define ILVR_D3_SB(...) ILVR_D3(v16i8, __VA_ARGS__) -#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__) #define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__) @@ -1195,26 +1268,30 @@ Details : Right half of byte elements from 'in0' and 'in1' are interleaved and written to 'out0' */ -#define ILVRL_B2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ -} +#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ + } #define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__) #define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__) #define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__) #define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__) -#define ILVRL_H2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ -} +#define ILVRL_H2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ + } #define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__) #define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__) -#define ILVRL_W2(RTYPE, in0, in1, out0, out1) { \ - out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ - out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ -} +#define ILVRL_W2(RTYPE, in0, in1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ + } +#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__) #define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__) #define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__) @@ -1228,16 +1305,18 @@ value generated with (sat_val + 1) bit range. The results are written in place */ -#define SAT_UH2(RTYPE, in0, in1, sat_val) { \ - in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \ - in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \ -} +#define SAT_UH2(RTYPE, in0, in1, sat_val) \ + { \ + in0 = (RTYPE)__msa_sat_u_h((v8u16)in0, sat_val); \ + in1 = (RTYPE)__msa_sat_u_h((v8u16)in1, sat_val); \ + } #define SAT_UH2_UH(...) SAT_UH2(v8u16, __VA_ARGS__) -#define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) { \ - SAT_UH2(RTYPE, in0, in1, sat_val); \ - SAT_UH2(RTYPE, in2, in3, sat_val) \ -} +#define SAT_UH4(RTYPE, in0, in1, in2, in3, sat_val) \ + { \ + SAT_UH2(RTYPE, in0, in1, sat_val); \ + SAT_UH2(RTYPE, in2, in3, sat_val) \ + } #define SAT_UH4_UH(...) SAT_UH4(v8u16, __VA_ARGS__) /* Description : Saturate the halfword element values to the max @@ -1250,16 +1329,18 @@ value generated with (sat_val + 1) bit range The results are written in place */ -#define SAT_SH2(RTYPE, in0, in1, sat_val) { \ - in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \ - in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \ -} +#define SAT_SH2(RTYPE, in0, in1, sat_val) \ + { \ + in0 = (RTYPE)__msa_sat_s_h((v8i16)in0, sat_val); \ + in1 = (RTYPE)__msa_sat_s_h((v8i16)in1, sat_val); \ + } #define SAT_SH2_SH(...) SAT_SH2(v8i16, __VA_ARGS__) -#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) { \ - SAT_SH2(RTYPE, in0, in1, sat_val); \ - SAT_SH2(RTYPE, in2, in3, sat_val); \ -} +#define SAT_SH4(RTYPE, in0, in1, in2, in3, sat_val) \ + { \ + SAT_SH2(RTYPE, in0, in1, sat_val); \ + SAT_SH2(RTYPE, in2, in3, sat_val); \ + } #define SAT_SH4_SH(...) SAT_SH4(v8i16, __VA_ARGS__) /* Description : Indexed halfword element values are replicated to all @@ -1271,17 +1352,18 @@ elements in 'out0' vector Valid index range for halfword operation is 0-7 */ -#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) { \ - out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \ - out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \ -} +#define SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1) \ + { \ + out0 = (RTYPE)__msa_splati_h((v8i16)in, idx0); \ + out1 = (RTYPE)__msa_splati_h((v8i16)in, idx1); \ + } #define SPLATI_H2_SH(...) SPLATI_H2(v8i16, __VA_ARGS__) -#define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, \ - out0, out1, out2, out3) { \ - SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \ - SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \ -} +#define SPLATI_H4(RTYPE, in, idx0, idx1, idx2, idx3, out0, out1, out2, out3) \ + { \ + SPLATI_H2(RTYPE, in, idx0, idx1, out0, out1); \ + SPLATI_H2(RTYPE, in, idx2, idx3, out2, out3); \ + } #define SPLATI_H4_SB(...) SPLATI_H4(v16i8, __VA_ARGS__) #define SPLATI_H4_SH(...) SPLATI_H4(v8i16, __VA_ARGS__) @@ -1293,19 +1375,21 @@ 'out0' & even byte elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ - out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \ -} +#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \ + } #define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__) #define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__) #define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__) -#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ - PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__) #define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__) #define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__) @@ -1318,18 +1402,20 @@ 'out0' & even halfword elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \ -} +#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \ + } #define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__) #define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__) -#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ - PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define PCKEV_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define PCKEV_H4_SH(...) PCKEV_H4(v8i16, __VA_ARGS__) /* Description : Pack even double word elements of vector pairs @@ -1340,18 +1426,20 @@ 'out0' & even double elements of 'in1' are copied to the right half of 'out0'. */ -#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \ - out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \ -} +#define PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_pckev_d((v2i64)in0, (v2i64)in1); \ + out1 = (RTYPE)__msa_pckev_d((v2i64)in2, (v2i64)in3); \ + } #define PCKEV_D2_UB(...) PCKEV_D2(v16u8, __VA_ARGS__) #define PCKEV_D2_SH(...) PCKEV_D2(v8i16, __VA_ARGS__) -#define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ - PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define PCKEV_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + PCKEV_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define PCKEV_D4_UB(...) PCKEV_D4(v16u8, __VA_ARGS__) /* Description : Each byte element is logically xor'ed with immediate 128 @@ -1361,30 +1449,34 @@ Details : Each unsigned byte element from input vector 'in0' is logically xor'ed with 128 and the result is stored in-place. */ -#define XORI_B2_128(RTYPE, in0, in1) { \ - in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \ - in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \ -} +#define XORI_B2_128(RTYPE, in0, in1) \ + { \ + in0 = (RTYPE)__msa_xori_b((v16u8)in0, 128); \ + in1 = (RTYPE)__msa_xori_b((v16u8)in1, 128); \ + } #define XORI_B2_128_UB(...) XORI_B2_128(v16u8, __VA_ARGS__) #define XORI_B2_128_SB(...) XORI_B2_128(v16i8, __VA_ARGS__) -#define XORI_B3_128(RTYPE, in0, in1, in2) { \ - XORI_B2_128(RTYPE, in0, in1); \ - in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \ -} +#define XORI_B3_128(RTYPE, in0, in1, in2) \ + { \ + XORI_B2_128(RTYPE, in0, in1); \ + in2 = (RTYPE)__msa_xori_b((v16u8)in2, 128); \ + } #define XORI_B3_128_SB(...) XORI_B3_128(v16i8, __VA_ARGS__) -#define XORI_B4_128(RTYPE, in0, in1, in2, in3) { \ - XORI_B2_128(RTYPE, in0, in1); \ - XORI_B2_128(RTYPE, in2, in3); \ -} +#define XORI_B4_128(RTYPE, in0, in1, in2, in3) \ + { \ + XORI_B2_128(RTYPE, in0, in1); \ + XORI_B2_128(RTYPE, in2, in3); \ + } #define XORI_B4_128_UB(...) XORI_B4_128(v16u8, __VA_ARGS__) #define XORI_B4_128_SB(...) XORI_B4_128(v16i8, __VA_ARGS__) -#define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) { \ - XORI_B4_128(RTYPE, in0, in1, in2, in3); \ - XORI_B3_128(RTYPE, in4, in5, in6); \ -} +#define XORI_B7_128(RTYPE, in0, in1, in2, in3, in4, in5, in6) \ + { \ + XORI_B4_128(RTYPE, in0, in1, in2, in3); \ + XORI_B3_128(RTYPE, in4, in5, in6); \ + } #define XORI_B7_128_SB(...) XORI_B7_128(v16i8, __VA_ARGS__) /* Description : Average of signed halfword elements -> (a + b) / 2 @@ -1396,13 +1488,14 @@ in one extra bit in the result. The result is then divided by 2 and written to 'out0' */ -#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3); \ - out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5); \ - out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7); \ -} +#define AVE_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + out0 = (RTYPE)__msa_ave_s_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ave_s_h((v8i16)in2, (v8i16)in3); \ + out2 = (RTYPE)__msa_ave_s_h((v8i16)in4, (v8i16)in5); \ + out3 = (RTYPE)__msa_ave_s_h((v8i16)in6, (v8i16)in7); \ + } #define AVE_SH4_SH(...) AVE_SH4(v8i16, __VA_ARGS__) /* Description : Addition of signed halfword elements and signed saturation @@ -1413,17 +1506,19 @@ halfword elements of 'in1'. The result is then signed saturated between halfword data type range */ -#define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) { \ - out0 = (RTYPE)__msa_adds_s_h((v8i16)in0, (v8i16)in1); \ - out1 = (RTYPE)__msa_adds_s_h((v8i16)in2, (v8i16)in3); \ -} +#define ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1) \ + { \ + out0 = (RTYPE)__msa_adds_s_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_adds_s_h((v8i16)in2, (v8i16)in3); \ + } #define ADDS_SH2_SH(...) ADDS_SH2(v8i16, __VA_ARGS__) -#define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \ - ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \ -} +#define ADDS_SH4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3) \ + { \ + ADDS_SH2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ADDS_SH2(RTYPE, in4, in5, in6, in7, out2, out3); \ + } #define ADDS_SH4_SH(...) ADDS_SH4(v8i16, __VA_ARGS__) /* Description : Shift left all elements of vector (generic for all data types) @@ -1433,12 +1528,13 @@ Details : Each element of vector 'in0' is left shifted by 'shift' and the result is written in-place. */ -#define SLLI_4V(in0, in1, in2, in3, shift) { \ - in0 = in0 << shift; \ - in1 = in1 << shift; \ - in2 = in2 << shift; \ - in3 = in3 << shift; \ -} +#define SLLI_4V(in0, in1, in2, in3, shift) \ + { \ + in0 = in0 << shift; \ + in1 = in1 << shift; \ + in2 = in2 << shift; \ + in3 = in3 << shift; \ + } /* Description : Arithmetic shift right all elements of vector (generic for all data types) @@ -1448,12 +1544,13 @@ Details : Each element of vector 'in0' is right shifted by 'shift' and the result is written in-place. 'shift' is a GP variable. */ -#define SRA_4V(in0, in1, in2, in3, shift) { \ - in0 = in0 >> shift; \ - in1 = in1 >> shift; \ - in2 = in2 >> shift; \ - in3 = in3 >> shift; \ -} +#define SRA_4V(in0, in1, in2, in3, shift) \ + { \ + in0 = in0 >> shift; \ + in1 = in1 >> shift; \ + in2 = in2 >> shift; \ + in3 = in3 >> shift; \ + } /* Description : Shift right arithmetic rounded words Arguments : Inputs - in0, in1, shift @@ -1465,15 +1562,17 @@ rounding and the result is written in-place. 'shift' is a vector. */ -#define SRAR_W2(RTYPE, in0, in1, shift) { \ - in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \ - in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \ -} +#define SRAR_W2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srar_w((v4i32)in0, (v4i32)shift); \ + in1 = (RTYPE)__msa_srar_w((v4i32)in1, (v4i32)shift); \ + } -#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) { \ - SRAR_W2(RTYPE, in0, in1, shift) \ - SRAR_W2(RTYPE, in2, in3, shift) \ -} +#define SRAR_W4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRAR_W2(RTYPE, in0, in1, shift) \ + SRAR_W2(RTYPE, in2, in3, shift) \ + } #define SRAR_W4_SW(...) SRAR_W4(v4i32, __VA_ARGS__) /* Description : Shift right arithmetic rounded (immediate) @@ -1485,30 +1584,34 @@ shifted value for rounding and the result is written in-place. 'shift' is an immediate value. */ -#define SRARI_H2(RTYPE, in0, in1, shift) { \ - in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \ - in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \ -} +#define SRARI_H2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srari_h((v8i16)in0, shift); \ + in1 = (RTYPE)__msa_srari_h((v8i16)in1, shift); \ + } #define SRARI_H2_UH(...) SRARI_H2(v8u16, __VA_ARGS__) #define SRARI_H2_SH(...) SRARI_H2(v8i16, __VA_ARGS__) -#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) { \ - SRARI_H2(RTYPE, in0, in1, shift); \ - SRARI_H2(RTYPE, in2, in3, shift); \ -} +#define SRARI_H4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRARI_H2(RTYPE, in0, in1, shift); \ + SRARI_H2(RTYPE, in2, in3, shift); \ + } #define SRARI_H4_UH(...) SRARI_H4(v8u16, __VA_ARGS__) #define SRARI_H4_SH(...) SRARI_H4(v8i16, __VA_ARGS__) -#define SRARI_W2(RTYPE, in0, in1, shift) { \ - in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ - in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \ -} +#define SRARI_W2(RTYPE, in0, in1, shift) \ + { \ + in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ + in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \ + } #define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__) -#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) { \ - SRARI_W2(RTYPE, in0, in1, shift); \ - SRARI_W2(RTYPE, in2, in3, shift); \ -} +#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) \ + { \ + SRARI_W2(RTYPE, in0, in1, shift); \ + SRARI_W2(RTYPE, in2, in3, shift); \ + } #define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__) /* Description : Logical shift right all elements of vector (immediate) @@ -1518,12 +1621,13 @@ Details : Each element of vector 'in0' is right shifted by 'shift' and the result is written in-place. 'shift' is an immediate value. */ -#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) { \ - out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift); \ - out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift); \ - out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift); \ - out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift); \ -} +#define SRLI_H4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3, shift) \ + { \ + out0 = (RTYPE)__msa_srli_h((v8i16)in0, shift); \ + out1 = (RTYPE)__msa_srli_h((v8i16)in1, shift); \ + out2 = (RTYPE)__msa_srli_h((v8i16)in2, shift); \ + out3 = (RTYPE)__msa_srli_h((v8i16)in3, shift); \ + } #define SRLI_H4_SH(...) SRLI_H4(v8i16, __VA_ARGS__) /* Description : Multiplication of pairs of vectors @@ -1532,15 +1636,16 @@ Details : Each element from 'in0' is multiplied with elements from 'in1' and the result is written to 'out0' */ -#define MUL2(in0, in1, in2, in3, out0, out1) { \ - out0 = in0 * in1; \ - out1 = in2 * in3; \ -} -#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - MUL2(in0, in1, in2, in3, out0, out1); \ - MUL2(in4, in5, in6, in7, out2, out3); \ -} +#define MUL2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 * in1; \ + out1 = in2 * in3; \ + } +#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + MUL2(in0, in1, in2, in3, out0, out1); \ + MUL2(in4, in5, in6, in7, out2, out3); \ + } /* Description : Addition of 2 pairs of vectors Arguments : Inputs - in0, in1, in2, in3 @@ -1548,15 +1653,16 @@ Details : Each element in 'in0' is added to 'in1' and result is written to 'out0'. */ -#define ADD2(in0, in1, in2, in3, out0, out1) { \ - out0 = in0 + in1; \ - out1 = in2 + in3; \ -} -#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - ADD2(in0, in1, in2, in3, out0, out1); \ - ADD2(in4, in5, in6, in7, out2, out3); \ -} +#define ADD2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 + in1; \ + out1 = in2 + in3; \ + } +#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + ADD2(in0, in1, in2, in3, out0, out1); \ + ADD2(in4, in5, in6, in7, out2, out3); \ + } /* Description : Subtraction of 2 pairs of vectors Arguments : Inputs - in0, in1, in2, in3 @@ -1564,17 +1670,18 @@ Details : Each element in 'in1' is subtracted from 'in0' and result is written to 'out0'. */ -#define SUB2(in0, in1, in2, in3, out0, out1) { \ - out0 = in0 - in1; \ - out1 = in2 - in3; \ -} -#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3) { \ - out0 = in0 - in1; \ - out1 = in2 - in3; \ - out2 = in4 - in5; \ - out3 = in6 - in7; \ -} +#define SUB2(in0, in1, in2, in3, out0, out1) \ + { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + } +#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ + { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + out2 = in4 - in5; \ + out3 = in6 - in7; \ + } /* Description : Sign extend halfword elements from right half of the vector Arguments : Input - in (halfword vector) @@ -1584,12 +1691,13 @@ extracted and interleaved with same vector 'in0' to generate 4 word elements keeping sign intact */ -#define UNPCK_R_SH_SW(in, out) { \ - v8i16 sign_m; \ - \ - sign_m = __msa_clti_s_h((v8i16)in, 0); \ - out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \ -} +#define UNPCK_R_SH_SW(in, out) \ + { \ + v8i16 sign_m; \ + \ + sign_m = __msa_clti_s_h((v8i16)in, 0); \ + out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \ + } /* Description : Zero extend unsigned byte elements to halfword elements Arguments : Input - in (unsigned byte vector) @@ -1598,11 +1706,12 @@ Details : Zero extended right half of vector is returned in 'out0' Zero extended left half of vector is returned in 'out1' */ -#define UNPCK_UB_SH(in, out0, out1) { \ - v16i8 zero_m = { 0 }; \ - \ - ILVRL_B2_SH(zero_m, in, out0, out1); \ -} +#define UNPCK_UB_SH(in, out0, out1) \ + { \ + v16i8 zero_m = { 0 }; \ + \ + ILVRL_B2_SH(zero_m, in, out0, out1); \ + } /* Description : Sign extend halfword elements from input vector and return the result in pair of vectors @@ -1615,91 +1724,96 @@ Then interleaved left with same vector 'in0' to generate 4 signed word elements in 'out1' */ -#define UNPCK_SH_SW(in, out0, out1) { \ - v8i16 tmp_m; \ - \ - tmp_m = __msa_clti_s_h((v8i16)in, 0); \ - ILVRL_H2_SW(tmp_m, in, out0, out1); \ -} +#define UNPCK_SH_SW(in, out0, out1) \ + { \ + v8i16 tmp_m; \ + \ + tmp_m = __msa_clti_s_h((v8i16)in, 0); \ + ILVRL_H2_SW(tmp_m, in, out0, out1); \ + } /* Description : Butterfly of 4 input vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1, out2, out3 Details : Butterfly operation */ -#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) { \ - out0 = in0 + in3; \ - out1 = in1 + in2; \ - \ - out2 = in1 - in2; \ - out3 = in0 - in3; \ -} +#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + out0 = in0 + in3; \ + out1 = in1 + in2; \ + \ + out2 = in1 - in2; \ + out3 = in0 - in3; \ + } /* Description : Butterfly of 8 input vectors Arguments : Inputs - in0 ... in7 Outputs - out0 .. out7 Details : Butterfly operation */ -#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - out0 = in0 + in7; \ - out1 = in1 + in6; \ - out2 = in2 + in5; \ - out3 = in3 + in4; \ - \ - out4 = in3 - in4; \ - out5 = in2 - in5; \ - out6 = in1 - in6; \ - out7 = in0 - in7; \ -} +#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, \ + out3, out4, out5, out6, out7) \ + { \ + out0 = in0 + in7; \ + out1 = in1 + in6; \ + out2 = in2 + in5; \ + out3 = in3 + in4; \ + \ + out4 = in3 - in4; \ + out5 = in2 - in5; \ + out6 = in1 - in6; \ + out7 = in0 - in7; \ + } /* Description : Butterfly of 16 input vectors Arguments : Inputs - in0 ... in15 Outputs - out0 .. out15 Details : Butterfly operation */ -#define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15, \ - out0, out1, out2, out3, out4, out5, out6, out7, \ - out8, out9, out10, out11, out12, out13, out14, out15) { \ - out0 = in0 + in15; \ - out1 = in1 + in14; \ - out2 = in2 + in13; \ - out3 = in3 + in12; \ - out4 = in4 + in11; \ - out5 = in5 + in10; \ - out6 = in6 + in9; \ - out7 = in7 + in8; \ +#define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ + in11, in12, in13, in14, in15, out0, out1, out2, out3, \ + out4, out5, out6, out7, out8, out9, out10, out11, out12, \ + out13, out14, out15) \ + { \ + out0 = in0 + in15; \ + out1 = in1 + in14; \ + out2 = in2 + in13; \ + out3 = in3 + in12; \ + out4 = in4 + in11; \ + out5 = in5 + in10; \ + out6 = in6 + in9; \ + out7 = in7 + in8; \ \ - out8 = in7 - in8; \ - out9 = in6 - in9; \ - out10 = in5 - in10; \ - out11 = in4 - in11; \ - out12 = in3 - in12; \ - out13 = in2 - in13; \ - out14 = in1 - in14; \ - out15 = in0 - in15; \ -} + out8 = in7 - in8; \ + out9 = in6 - in9; \ + out10 = in5 - in10; \ + out11 = in4 - in11; \ + out12 = in3 - in12; \ + out13 = in2 - in13; \ + out14 = in1 - in14; \ + out15 = in0 - in15; \ + } /* Description : Transpose input 8x8 byte block Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - as per RTYPE */ -#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ - \ - ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, \ - tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \ - ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \ - ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \ - ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \ - SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \ - SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \ -} +#define TRANSPOSE8x8_UB(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \ + out1, out2, out3, out4, out5, out6, out7) \ + { \ + v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + ILVR_B4_SB(in2, in0, in3, in1, in6, in4, in7, in5, tmp0_m, tmp1_m, tmp2_m, \ + tmp3_m); \ + ILVRL_B2_SB(tmp1_m, tmp0_m, tmp4_m, tmp5_m); \ + ILVRL_B2_SB(tmp3_m, tmp2_m, tmp6_m, tmp7_m); \ + ILVRL_W2(RTYPE, tmp6_m, tmp4_m, out0, out2); \ + ILVRL_W2(RTYPE, tmp7_m, tmp5_m, out4, out6); \ + SLDI_B2_0(RTYPE, out0, out2, out1, out3, 8); \ + SLDI_B2_0(RTYPE, out4, out6, out5, out7, 8); \ + } #define TRANSPOSE8x8_UB_UB(...) TRANSPOSE8x8_UB(v16u8, __VA_ARGS__) /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors @@ -1708,128 +1822,133 @@ Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - unsigned byte */ -#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ - in8, in9, in10, in11, in12, in13, in14, in15, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ - \ - ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \ - ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \ - ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \ - ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \ - \ - tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7); \ - tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7); \ - tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5); \ - tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5); \ - out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3); \ - tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3); \ - out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1); \ - tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1); \ - \ - ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ - out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5); \ - out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ - out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ - tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ - tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ - out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ - out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ -} +#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ + in10, in11, in12, in13, in14, in15, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \ + ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \ + ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \ + ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \ + \ + tmp0_m = (v16u8)__msa_ilvev_b((v16i8)out6, (v16i8)out7); \ + tmp4_m = (v16u8)__msa_ilvod_b((v16i8)out6, (v16i8)out7); \ + tmp1_m = (v16u8)__msa_ilvev_b((v16i8)out4, (v16i8)out5); \ + tmp5_m = (v16u8)__msa_ilvod_b((v16i8)out4, (v16i8)out5); \ + out5 = (v16u8)__msa_ilvev_b((v16i8)out2, (v16i8)out3); \ + tmp6_m = (v16u8)__msa_ilvod_b((v16i8)out2, (v16i8)out3); \ + out7 = (v16u8)__msa_ilvev_b((v16i8)out0, (v16i8)out1); \ + tmp7_m = (v16u8)__msa_ilvod_b((v16i8)out0, (v16i8)out1); \ + \ + ILVEV_H2_UB(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ + out0 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out4 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp1_m, (v8i16)tmp0_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)out7, (v8i16)out5); \ + out2 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out6 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + ILVEV_H2_UB(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ + out1 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out5 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ + tmp2_m = (v16u8)__msa_ilvod_h((v8i16)tmp5_m, (v8i16)tmp4_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ + tmp3_m = (v16u8)__msa_ilvod_h((v8i16)tmp7_m, (v8i16)tmp6_m); \ + out3 = (v16u8)__msa_ilvev_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + out7 = (v16u8)__msa_ilvod_w((v4i32)tmp3_m, (v4i32)tmp2_m); \ + } /* Description : Transpose 4x4 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3 Outputs - out0, out1, out2, out3 Return Type - signed halfword */ -#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v8i16 s0_m, s1_m; \ - \ - ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \ - ILVRL_W2_SH(s1_m, s0_m, out0, out2); \ - out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ - out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \ -} +#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 s0_m, s1_m; \ + \ + ILVR_H2_SH(in1, in0, in3, in2, s0_m, s1_m); \ + ILVRL_W2_SH(s1_m, s0_m, out0, out2); \ + out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ + out3 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out2); \ + } /* Description : Transpose 4x8 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - signed halfword */ -#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \ - v8i16 zero_m = { 0 }; \ - \ - ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \ - tmp0_n, tmp1_n, tmp2_n, tmp3_n); \ - ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m); \ - ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m); \ - \ - out0 = (v8i16)__msa_ilvr_d((v2i64)tmp1_m, (v2i64)tmp0_m); \ - out1 = (v8i16)__msa_ilvl_d((v2i64)tmp1_m, (v2i64)tmp0_m); \ - out2 = (v8i16)__msa_ilvr_d((v2i64)tmp3_m, (v2i64)tmp2_m); \ - out3 = (v8i16)__msa_ilvl_d((v2i64)tmp3_m, (v2i64)tmp2_m); \ - \ - out4 = zero_m; \ - out5 = zero_m; \ - out6 = zero_m; \ - out7 = zero_m; \ -} +#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \ + v8i16 zero_m = { 0 }; \ + \ + ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, tmp0_n, tmp1_n, tmp2_n, \ + tmp3_n); \ + ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m); \ + ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m); \ + \ + out0 = (v8i16)__msa_ilvr_d((v2i64)tmp1_m, (v2i64)tmp0_m); \ + out1 = (v8i16)__msa_ilvl_d((v2i64)tmp1_m, (v2i64)tmp0_m); \ + out2 = (v8i16)__msa_ilvr_d((v2i64)tmp3_m, (v2i64)tmp2_m); \ + out3 = (v8i16)__msa_ilvl_d((v2i64)tmp3_m, (v2i64)tmp2_m); \ + \ + out4 = zero_m; \ + out5 = zero_m; \ + out6 = zero_m; \ + out7 = zero_m; \ + } /* Description : Transpose 8x4 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - signed halfword */ -#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \ - ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \ - ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \ - ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \ -} +#define TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + \ + ILVR_H2_SH(in1, in0, in3, in2, tmp0_m, tmp1_m); \ + ILVL_H2_SH(in1, in0, in3, in2, tmp2_m, tmp3_m); \ + ILVR_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2); \ + ILVL_W2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3); \ + } /* Description : Transpose 8x8 block with half word elements in vectors Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7 Outputs - out0, out1, out2, out3, out4, out5, out6, out7 Return Type - as per RTYPE */ -#define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - v8i16 s0_m, s1_m; \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ - \ - ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \ - ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \ - ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \ - ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ - ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \ - PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \ - tmp3_m, tmp7_m, out0, out2, out4, out6); \ - out1 = (RTYPE)__msa_pckod_d((v2i64)tmp0_m, (v2i64)tmp4_m); \ - out3 = (RTYPE)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m); \ - out5 = (RTYPE)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m); \ - out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m); \ -} +#define TRANSPOSE8x8_H(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, out0, \ + out1, out2, out3, out4, out5, out6, out7) \ + { \ + v8i16 s0_m, s1_m; \ + v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + \ + ILVR_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tmp0_m, tmp1_m); \ + ILVL_H2_SH(in6, in4, in7, in5, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tmp2_m, tmp3_m); \ + ILVR_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tmp4_m, tmp5_m); \ + ILVL_H2_SH(in2, in0, in3, in1, s0_m, s1_m); \ + ILVRL_H2_SH(s1_m, s0_m, tmp6_m, tmp7_m); \ + PCKEV_D4(RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, tmp3_m, \ + tmp7_m, out0, out2, out4, out6); \ + out1 = (RTYPE)__msa_pckod_d((v2i64)tmp0_m, (v2i64)tmp4_m); \ + out3 = (RTYPE)__msa_pckod_d((v2i64)tmp1_m, (v2i64)tmp5_m); \ + out5 = (RTYPE)__msa_pckod_d((v2i64)tmp2_m, (v2i64)tmp6_m); \ + out7 = (RTYPE)__msa_pckod_d((v2i64)tmp3_m, (v2i64)tmp7_m); \ + } #define TRANSPOSE8x8_SH_SH(...) TRANSPOSE8x8_H(v8i16, __VA_ARGS__) /* Description : Transpose 4x4 block with word elements in vectors @@ -1837,40 +1956,42 @@ Outputs - out0, out1, out2, out3 Return Type - signed word */ -#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) { \ - v4i32 s0_m, s1_m, s2_m, s3_m; \ - \ - ILVRL_W2_SW(in1, in0, s0_m, s1_m); \ - ILVRL_W2_SW(in3, in2, s2_m, s3_m); \ - \ - out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \ - out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \ - out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ - out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ -} +#define TRANSPOSE4x4_SW_SW(in0, in1, in2, in3, out0, out1, out2, out3) \ + { \ + v4i32 s0_m, s1_m, s2_m, s3_m; \ + \ + ILVRL_W2_SW(in1, in0, s0_m, s1_m); \ + ILVRL_W2_SW(in3, in2, s2_m, s3_m); \ + \ + out0 = (v4i32)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \ + out1 = (v4i32)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \ + out2 = (v4i32)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ + out3 = (v4i32)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ + } /* Description : Add block 4x4 Arguments : Inputs - in0, in1, in2, in3, pdst, stride Details : Least significant 4 bytes from each input vector are added to the destination bytes, clipped between 0-255 and stored. */ -#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) { \ - uint32_t src0_m, src1_m, src2_m, src3_m; \ - v8i16 inp0_m, inp1_m, res0_m, res1_m; \ - v16i8 dst0_m = { 0 }; \ - v16i8 dst1_m = { 0 }; \ - v16i8 zero_m = { 0 }; \ - \ - ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \ - LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \ - INSERT_W2_SB(src0_m, src1_m, dst0_m); \ - INSERT_W2_SB(src2_m, src3_m, dst1_m); \ - ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \ - ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \ - CLIP_SH2_0_255(res0_m, res1_m); \ - PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \ - ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \ -} +#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) \ + { \ + uint32_t src0_m, src1_m, src2_m, src3_m; \ + v8i16 inp0_m, inp1_m, res0_m, res1_m; \ + v16i8 dst0_m = { 0 }; \ + v16i8 dst1_m = { 0 }; \ + v16i8 zero_m = { 0 }; \ + \ + ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m) \ + LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \ + INSERT_W2_SB(src0_m, src1_m, dst0_m); \ + INSERT_W2_SB(src2_m, src3_m, dst1_m); \ + ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \ + ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \ + CLIP_SH2_0_255(res0_m, res1_m); \ + PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \ + ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \ + } /* Description : Pack even elements of input vectors & xor with 128 Arguments : Inputs - in0, in1 @@ -1880,53 +2001,57 @@ together in one vector and the resulting vector is xor'ed with 128 to shift the range from signed to unsigned byte */ -#define PCKEV_XORI128_UB(in0, in1) ({ \ - v16u8 out_m; \ - \ - out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \ - out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \ - out_m; \ -}) +#define PCKEV_XORI128_UB(in0, in1) \ + ({ \ + v16u8 out_m; \ + \ + out_m = (v16u8)__msa_pckev_b((v16i8)in1, (v16i8)in0); \ + out_m = (v16u8)__msa_xori_b((v16u8)out_m, 128); \ + out_m; \ + }) /* Description : Converts inputs to unsigned bytes, interleave, average & store as 8x4 unsigned byte block Arguments : Inputs - in0, in1, in2, in3, dst0, dst1, dst2, dst3, pdst, stride */ -#define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, \ - dst0, dst1, dst2, dst3, pdst, stride) { \ - v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - \ - tmp0_m = PCKEV_XORI128_UB(in0, in1); \ - tmp1_m = PCKEV_XORI128_UB(in2, in3); \ - ILVR_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m); \ - AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m); \ - ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \ -} +#define CONVERT_UB_AVG_ST8x4_UB(in0, in1, in2, in3, dst0, dst1, dst2, dst3, \ + pdst, stride) \ + { \ + v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + \ + tmp0_m = PCKEV_XORI128_UB(in0, in1); \ + tmp1_m = PCKEV_XORI128_UB(in2, in3); \ + ILVR_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m); \ + AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m); \ + ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \ + } /* Description : Pack even byte elements and store byte vector in destination memory Arguments : Inputs - in0, in1, pdst */ -#define PCKEV_ST_SB(in0, in1, pdst) { \ - v16i8 tmp_m; \ - \ - tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \ - ST_SB(tmp_m, (pdst)); \ -} +#define PCKEV_ST_SB(in0, in1, pdst) \ + { \ + v16i8 tmp_m; \ + \ + tmp_m = __msa_pckev_b((v16i8)in1, (v16i8)in0); \ + ST_SB(tmp_m, (pdst)); \ + } /* Description : Horizontal 2 tap filter kernel code Arguments : Inputs - in0, in1, mask, coeff, shift */ -#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) ({ \ - v16i8 tmp0_m; \ - v8u16 tmp1_m; \ - \ - tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \ - tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff); \ - tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift); \ - \ - tmp1_m; \ -}) -#endif /* VPX_DSP_MIPS_MACROS_MSA_H_ */ +#define HORIZ_2TAP_FILT_UH(in0, in1, mask, coeff, shift) \ + ({ \ + v16i8 tmp0_m; \ + v8u16 tmp1_m; \ + \ + tmp0_m = __msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0); \ + tmp1_m = __msa_dotp_u_h((v16u8)tmp0_m, (v16u8)coeff); \ + tmp1_m = (v8u16)__msa_srari_h((v8i16)tmp1_m, shift); \ + \ + tmp1_m; \ + }) +#endif /* VPX_DSP_MIPS_MACROS_MSA_H_ */ diff --git a/libs/libvpx/vpx_dsp/mips/sad_msa.c b/libs/libvpx/vpx_dsp/mips/sad_msa.c index 3bdec28e6e..6455814e1b 100644 --- a/libs/libvpx/vpx_dsp/mips/sad_msa.c +++ b/libs/libvpx/vpx_dsp/mips/sad_msa.c @@ -11,12 +11,13 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/mips/macros_msa.h" -#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) { \ - out = (RTYPE)__msa_insve_w((v4i32)out, 0, (v4i32)in0); \ - out = (RTYPE)__msa_insve_w((v4i32)out, 1, (v4i32)in1); \ - out = (RTYPE)__msa_insve_w((v4i32)out, 2, (v4i32)in2); \ - out = (RTYPE)__msa_insve_w((v4i32)out, 3, (v4i32)in3); \ -} +#define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) \ + { \ + out = (RTYPE)__msa_insve_w((v4i32)out, 0, (v4i32)in0); \ + out = (RTYPE)__msa_insve_w((v4i32)out, 1, (v4i32)in1); \ + out = (RTYPE)__msa_insve_w((v4i32)out, 2, (v4i32)in2); \ + out = (RTYPE)__msa_insve_w((v4i32)out, 3, (v4i32)in3); \ + } #define SAD_INSVE_W4_UB(...) SAD_INSVE_W4(v16u8, __VA_ARGS__) static uint32_t sad_4width_msa(const uint8_t *src_ptr, int32_t src_stride, @@ -58,8 +59,8 @@ static uint32_t sad_8width_msa(const uint8_t *src, int32_t src_stride, LD_UB4(ref, ref_stride, ref0, ref1, ref2, ref3); ref += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, + ref0, ref1); sad += SAD_UB2_UH(src0, src1, ref0, ref1); } @@ -214,8 +215,8 @@ static void sad_8width_x3_msa(const uint8_t *src, int32_t src_stride, src += (4 * src_stride); LD_UB4(ref, ref_stride, ref00, ref11, ref22, ref33); ref += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, src0, src1, + ref0, ref1); sad0 += SAD_UB2_UH(src0, src1, ref0, ref1); SLDI_B2_UB(ref00, ref11, ref00, ref11, ref00, ref11, 1); @@ -473,8 +474,8 @@ static void sad_8width_x8_msa(const uint8_t *src, int32_t src_stride, src += (4 * src_stride); LD_UB4(ref, ref_stride, ref00, ref11, ref22, ref33); ref += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref11, ref00, ref33, ref22, src0, src1, + ref0, ref1); sad0 += SAD_UB2_UH(src0, src1, ref0, ref1); SLDI_B2_UB(ref00, ref11, ref00, ref11, ref00, ref11, 1); @@ -793,9 +794,9 @@ static void sad_64width_x8_msa(const uint8_t *src, int32_t src_stride, } static void sad_4width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, - const uint8_t * const aref_ptr[], - int32_t ref_stride, - int32_t height, uint32_t *sad_array) { + const uint8_t *const aref_ptr[], + int32_t ref_stride, int32_t height, + uint32_t *sad_array) { const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr; int32_t ht_cnt; uint32_t src0, src1, src2, src3; @@ -854,9 +855,9 @@ static void sad_4width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, } static void sad_8width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, - const uint8_t * const aref_ptr[], - int32_t ref_stride, - int32_t height, uint32_t *sad_array) { + const uint8_t *const aref_ptr[], + int32_t ref_stride, int32_t height, + uint32_t *sad_array) { int32_t ht_cnt; const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr; v16u8 src0, src1, src2, src3; @@ -905,9 +906,9 @@ static void sad_8width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, } static void sad_16width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, - const uint8_t * const aref_ptr[], - int32_t ref_stride, - int32_t height, uint32_t *sad_array) { + const uint8_t *const aref_ptr[], + int32_t ref_stride, int32_t height, + uint32_t *sad_array) { int32_t ht_cnt; const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr; v16u8 src, ref0, ref1, ref2, ref3, diff; @@ -970,9 +971,9 @@ static void sad_16width_x4d_msa(const uint8_t *src_ptr, int32_t src_stride, } static void sad_32width_x4d_msa(const uint8_t *src, int32_t src_stride, - const uint8_t * const aref_ptr[], - int32_t ref_stride, - int32_t height, uint32_t *sad_array) { + const uint8_t *const aref_ptr[], + int32_t ref_stride, int32_t height, + uint32_t *sad_array) { const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr; int32_t ht_cnt; v16u8 src0, src1, ref0, ref1; @@ -1014,9 +1015,9 @@ static void sad_32width_x4d_msa(const uint8_t *src, int32_t src_stride, } static void sad_64width_x4d_msa(const uint8_t *src, int32_t src_stride, - const uint8_t * const aref_ptr[], - int32_t ref_stride, - int32_t height, uint32_t *sad_array) { + const uint8_t *const aref_ptr[], + int32_t ref_stride, int32_t height, + uint32_t *sad_array) { const uint8_t *ref0_ptr, *ref1_ptr, *ref2_ptr, *ref3_ptr; int32_t ht_cnt; v16u8 src0, src1, src2, src3; @@ -1114,8 +1115,8 @@ static uint32_t avgsad_8width_msa(const uint8_t *src, int32_t src_stride, ref += (4 * ref_stride); LD_UB2(sec_pred, 16, pred0, pred1); sec_pred += 32; - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, + ref0, ref1); AVER_UB2_UB(pred0, ref0, pred1, ref1, diff0, diff1); sad += SAD_UB2_UH(src0, src1, diff0, diff1); } @@ -1213,8 +1214,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, ref += ref_stride; LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); sec_pred += 64; - AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, - comp0, comp1, comp2, comp3); + AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0, + comp1, comp2, comp3); sad0 += SAD_UB2_UH(src0, src1, comp0, comp1); sad1 += SAD_UB2_UH(src2, src3, comp2, comp3); @@ -1224,8 +1225,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, ref += ref_stride; LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); sec_pred += 64; - AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, - comp0, comp1, comp2, comp3); + AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0, + comp1, comp2, comp3); sad0 += SAD_UB2_UH(src0, src1, comp0, comp1); sad1 += SAD_UB2_UH(src2, src3, comp2, comp3); @@ -1235,8 +1236,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, ref += ref_stride; LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); sec_pred += 64; - AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, - comp0, comp1, comp2, comp3); + AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0, + comp1, comp2, comp3); sad0 += SAD_UB2_UH(src0, src1, comp0, comp1); sad1 += SAD_UB2_UH(src2, src3, comp2, comp3); @@ -1246,8 +1247,8 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, ref += ref_stride; LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3); sec_pred += 64; - AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, - comp0, comp1, comp2, comp3); + AVER_UB4_UB(pred0, ref0, pred1, ref1, pred2, ref2, pred3, ref3, comp0, + comp1, comp2, comp3); sad0 += SAD_UB2_UH(src0, src1, comp0, comp1); sad1 += SAD_UB2_UH(src2, src3, comp2, comp3); } @@ -1258,180 +1259,180 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, return HADD_SW_S32(sad); } -#define VPX_SAD_4xHEIGHT_MSA(height) \ -uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride) { \ - return sad_4width_msa(src, src_stride, ref, ref_stride, height); \ -} +#define VPX_SAD_4xHEIGHT_MSA(height) \ + uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride) { \ + return sad_4width_msa(src, src_stride, ref, ref_stride, height); \ + } -#define VPX_SAD_8xHEIGHT_MSA(height) \ -uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride) { \ - return sad_8width_msa(src, src_stride, ref, ref_stride, height); \ -} +#define VPX_SAD_8xHEIGHT_MSA(height) \ + uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride) { \ + return sad_8width_msa(src, src_stride, ref, ref_stride, height); \ + } -#define VPX_SAD_16xHEIGHT_MSA(height) \ -uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride) { \ - return sad_16width_msa(src, src_stride, ref, ref_stride, height); \ -} +#define VPX_SAD_16xHEIGHT_MSA(height) \ + uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride) { \ + return sad_16width_msa(src, src_stride, ref, ref_stride, height); \ + } -#define VPX_SAD_32xHEIGHT_MSA(height) \ -uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride) { \ - return sad_32width_msa(src, src_stride, ref, ref_stride, height); \ -} +#define VPX_SAD_32xHEIGHT_MSA(height) \ + uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride) { \ + return sad_32width_msa(src, src_stride, ref, ref_stride, height); \ + } -#define VPX_SAD_64xHEIGHT_MSA(height) \ -uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride) { \ - return sad_64width_msa(src, src_stride, ref, ref_stride, height); \ -} +#define VPX_SAD_64xHEIGHT_MSA(height) \ + uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride) { \ + return sad_64width_msa(src, src_stride, ref, ref_stride, height); \ + } -#define VPX_SAD_4xHEIGHTx3_MSA(height) \ -void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_4xHEIGHTx3_MSA(height) \ + void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_8xHEIGHTx3_MSA(height) \ -void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_8xHEIGHTx3_MSA(height) \ + void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_16xHEIGHTx3_MSA(height) \ -void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_16xHEIGHTx3_MSA(height) \ + void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_32xHEIGHTx3_MSA(height) \ -void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_32xHEIGHTx3_MSA(height) \ + void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_64xHEIGHTx3_MSA(height) \ -void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_64xHEIGHTx3_MSA(height) \ + void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_4xHEIGHTx8_MSA(height) \ -void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_4xHEIGHTx8_MSA(height) \ + void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_8xHEIGHTx8_MSA(height) \ -void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_8xHEIGHTx8_MSA(height) \ + void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_16xHEIGHTx8_MSA(height) \ -void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_16xHEIGHTx8_MSA(height) \ + void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_32xHEIGHTx8_MSA(height) \ -void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_32xHEIGHTx8_MSA(height) \ + void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_64xHEIGHTx8_MSA(height) \ -void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - uint32_t *sads) { \ - sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ -} +#define VPX_SAD_64xHEIGHTx8_MSA(height) \ + void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sads) { \ + sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ + } -#define VPX_SAD_4xHEIGHTx4D_MSA(height) \ -void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *const refs[], \ - int32_t ref_stride, uint32_t *sads) { \ - sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ -} +#define VPX_SAD_4xHEIGHTx4D_MSA(height) \ + void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *const refs[], \ + int32_t ref_stride, uint32_t *sads) { \ + sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ + } -#define VPX_SAD_8xHEIGHTx4D_MSA(height) \ -void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *const refs[], \ - int32_t ref_stride, uint32_t *sads) { \ - sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ -} +#define VPX_SAD_8xHEIGHTx4D_MSA(height) \ + void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *const refs[], \ + int32_t ref_stride, uint32_t *sads) { \ + sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ + } -#define VPX_SAD_16xHEIGHTx4D_MSA(height) \ -void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *const refs[], \ - int32_t ref_stride, uint32_t *sads) { \ - sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ -} +#define VPX_SAD_16xHEIGHTx4D_MSA(height) \ + void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *const refs[], \ + int32_t ref_stride, uint32_t *sads) { \ + sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ + } -#define VPX_SAD_32xHEIGHTx4D_MSA(height) \ -void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *const refs[], \ - int32_t ref_stride, uint32_t *sads) { \ - sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ -} +#define VPX_SAD_32xHEIGHTx4D_MSA(height) \ + void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *const refs[], \ + int32_t ref_stride, uint32_t *sads) { \ + sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ + } -#define VPX_SAD_64xHEIGHTx4D_MSA(height) \ -void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *const refs[], \ - int32_t ref_stride, uint32_t *sads) { \ - sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ -} +#define VPX_SAD_64xHEIGHTx4D_MSA(height) \ + void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *const refs[], \ + int32_t ref_stride, uint32_t *sads) { \ + sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ + } -#define VPX_AVGSAD_4xHEIGHT_MSA(height) \ -uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - const uint8_t *second_pred) { \ - return avgsad_4width_msa(src, src_stride, ref, ref_stride, \ - height, second_pred); \ -} +#define VPX_AVGSAD_4xHEIGHT_MSA(height) \ + uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + const uint8_t *second_pred) { \ + return avgsad_4width_msa(src, src_stride, ref, ref_stride, height, \ + second_pred); \ + } -#define VPX_AVGSAD_8xHEIGHT_MSA(height) \ -uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - const uint8_t *second_pred) { \ - return avgsad_8width_msa(src, src_stride, ref, ref_stride, \ - height, second_pred); \ -} +#define VPX_AVGSAD_8xHEIGHT_MSA(height) \ + uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ + const uint8_t *ref, int32_t ref_stride, \ + const uint8_t *second_pred) { \ + return avgsad_8width_msa(src, src_stride, ref, ref_stride, height, \ + second_pred); \ + } -#define VPX_AVGSAD_16xHEIGHT_MSA(height) \ -uint32_t vpx_sad16x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - const uint8_t *second_pred) { \ - return avgsad_16width_msa(src, src_stride, ref, ref_stride, \ - height, second_pred); \ -} +#define VPX_AVGSAD_16xHEIGHT_MSA(height) \ + uint32_t vpx_sad16x##height##_avg_msa( \ + const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ + int32_t ref_stride, const uint8_t *second_pred) { \ + return avgsad_16width_msa(src, src_stride, ref, ref_stride, height, \ + second_pred); \ + } -#define VPX_AVGSAD_32xHEIGHT_MSA(height) \ -uint32_t vpx_sad32x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - const uint8_t *second_pred) { \ - return avgsad_32width_msa(src, src_stride, ref, ref_stride, \ - height, second_pred); \ -} +#define VPX_AVGSAD_32xHEIGHT_MSA(height) \ + uint32_t vpx_sad32x##height##_avg_msa( \ + const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ + int32_t ref_stride, const uint8_t *second_pred) { \ + return avgsad_32width_msa(src, src_stride, ref, ref_stride, height, \ + second_pred); \ + } -#define VPX_AVGSAD_64xHEIGHT_MSA(height) \ -uint32_t vpx_sad64x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ - const uint8_t *ref, int32_t ref_stride, \ - const uint8_t *second_pred) { \ - return avgsad_64width_msa(src, src_stride, ref, ref_stride, \ - height, second_pred); \ -} +#define VPX_AVGSAD_64xHEIGHT_MSA(height) \ + uint32_t vpx_sad64x##height##_avg_msa( \ + const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ + int32_t ref_stride, const uint8_t *second_pred) { \ + return avgsad_64width_msa(src, src_stride, ref, ref_stride, height, \ + second_pred); \ + } // 64x64 VPX_SAD_64xHEIGHT_MSA(64); diff --git a/libs/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c b/libs/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c index a592a2d078..313e06f92d 100644 --- a/libs/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c +++ b/libs/libvpx/vpx_dsp/mips/sub_pixel_variance_msa.c @@ -14,29 +14,23 @@ #include "vpx_dsp/variance.h" static const uint8_t bilinear_filters_msa[8][2] = { - { 128, 0, }, - { 112, 16, }, - { 96, 32, }, - { 80, 48, }, - { 64, 64, }, - { 48, 80, }, - { 32, 96, }, - { 16, 112, }, + { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 }, + { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 }, }; -#define CALC_MSE_AVG_B(src, ref, var, sub) { \ - v16u8 src_l0_m, src_l1_m; \ - v8i16 res_l0_m, res_l1_m; \ - \ - ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ - HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ - DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ - \ - sub += res_l0_m + res_l1_m; \ -} +#define CALC_MSE_AVG_B(src, ref, var, sub) \ + { \ + v16u8 src_l0_m, src_l1_m; \ + v8i16 res_l0_m, res_l1_m; \ + \ + ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ + HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ + DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ + \ + sub += res_l0_m + res_l1_m; \ + } -#define VARIANCE_WxH(sse, diff, shift) \ - sse - (((uint32_t)diff * diff) >> shift) +#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift) #define VARIANCE_LARGE_WxH(sse, diff, shift) \ sse - (((int64_t)diff * diff) >> shift) @@ -45,8 +39,7 @@ static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, + const uint8_t *sec_pred, int32_t height, int32_t *diff) { int32_t ht_cnt; uint32_t src0, src1, src2, src3; @@ -81,8 +74,7 @@ static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride, - const uint8_t *sec_pred, - int32_t height, + const uint8_t *sec_pred, int32_t height, int32_t *diff) { int32_t ht_cnt; v16u8 src0, src1, src2, src3; @@ -99,8 +91,8 @@ static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr, LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3); ref_ptr += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, + ref0, ref1); AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); CALC_MSE_AVG_B(src0, ref0, var, avg); CALC_MSE_AVG_B(src1, ref1, var, avg); @@ -117,8 +109,7 @@ static uint32_t avg_sse_diff_16width_msa(const uint8_t *src_ptr, const uint8_t *ref_ptr, int32_t ref_stride, const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { + int32_t height, int32_t *diff) { int32_t ht_cnt; v16u8 src, ref, pred; v8i16 avg = { 0 }; @@ -173,8 +164,7 @@ static uint32_t avg_sse_diff_32width_msa(const uint8_t *src_ptr, const uint8_t *ref_ptr, int32_t ref_stride, const uint8_t *sec_pred, - int32_t height, - int32_t *diff) { + int32_t height, int32_t *diff) { int32_t ht_cnt; v16u8 src0, src1, ref0, ref1, pred0, pred1; v8i16 avg = { 0 }; @@ -232,8 +222,7 @@ static uint32_t avg_sse_diff_32x64_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { + const uint8_t *sec_pred, int32_t *diff) { int32_t ht_cnt; v16u8 src0, src1, ref0, ref1, pred0, pred1; v8i16 avg0 = { 0 }; @@ -293,8 +282,7 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { + const uint8_t *sec_pred, int32_t *diff) { int32_t ht_cnt; v16u8 src0, src1, src2, src3; v16u8 ref0, ref1, ref2, ref3; @@ -310,8 +298,8 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr, src_ptr += src_stride; LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); + AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1, + src2, src3); CALC_MSE_AVG_B(src0, ref0, var, avg0); CALC_MSE_AVG_B(src2, ref2, var, avg0); CALC_MSE_AVG_B(src1, ref1, var, avg1); @@ -323,8 +311,8 @@ static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr, src_ptr += src_stride; LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); + AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1, + src2, src3); CALC_MSE_AVG_B(src0, ref0, var, avg0); CALC_MSE_AVG_B(src2, ref2, var, avg0); CALC_MSE_AVG_B(src1, ref1, var, avg1); @@ -343,8 +331,7 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride, - const uint8_t *sec_pred, - int32_t *diff) { + const uint8_t *sec_pred, int32_t *diff) { int32_t ht_cnt; v16u8 src0, src1, src2, src3; v16u8 ref0, ref1, ref2, ref3; @@ -362,8 +349,8 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr, src_ptr += src_stride; LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); + AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1, + src2, src3); CALC_MSE_AVG_B(src0, ref0, var, avg0); CALC_MSE_AVG_B(src1, ref1, var, avg1); CALC_MSE_AVG_B(src2, ref2, var, avg2); @@ -375,8 +362,8 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr, src_ptr += src_stride; LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3); ref_ptr += ref_stride; - AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, - src0, src1, src2, src3); + AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1, + src2, src3); CALC_MSE_AVG_B(src0, ref0, var, avg0); CALC_MSE_AVG_B(src1, ref1, var, avg1); CALC_MSE_AVG_B(src2, ref2, var, avg2); @@ -392,13 +379,9 @@ static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_4width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -420,11 +403,11 @@ static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src, INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); + PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, + src2, src3); ILVEV_W2_SB(src0, src1, src2, src3, src0, src2); src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0); CALC_MSE_AVG_B(src0, ref, var, avg); @@ -436,13 +419,9 @@ static uint32_t sub_pixel_sse_diff_4width_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_8width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 filt0, out, ref0, ref1, ref2, ref3; @@ -464,11 +443,11 @@ static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src, PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); + PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, + src2, src3); out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0); CALC_MSE_AVG_B(out, ref0, var, avg); out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2); @@ -481,13 +460,9 @@ static uint32_t sub_pixel_sse_diff_8width_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_16width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7; @@ -512,14 +487,14 @@ static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src, VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1, + out2, out3); + DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, + out6, out7); SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS); SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS); - PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6, - src0, src1, src2, src3); + PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6, src0, src1, + src2, src3); CALC_MSE_AVG_B(src0, dst0, var, avg); CALC_MSE_AVG_B(src1, dst1, var, avg); CALC_MSE_AVG_B(src2, dst2, var, avg); @@ -532,13 +507,9 @@ static uint32_t sub_pixel_sse_diff_16width_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_32width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_32width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; @@ -554,13 +525,9 @@ static uint32_t sub_pixel_sse_diff_32width_h_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_sse_diff_64width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_64width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; @@ -576,13 +543,9 @@ static uint32_t sub_pixel_sse_diff_64width_h_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_4width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -608,8 +571,8 @@ static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src, dst += (4 * dst_stride); INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); + ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); @@ -624,13 +587,9 @@ static uint32_t sub_pixel_sse_diff_4width_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_8width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4; @@ -654,10 +613,10 @@ static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src, dst += (4 * dst_stride); PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); - ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); + ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, + vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, + tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1); CALC_MSE_AVG_B(src0, ref0, var, avg); @@ -671,13 +630,9 @@ static uint32_t sub_pixel_sse_diff_8width_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_16width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 ref0, ref1, ref2, ref3; @@ -734,13 +689,9 @@ static uint32_t sub_pixel_sse_diff_16width_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_32width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_32width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; @@ -756,13 +707,9 @@ static uint32_t sub_pixel_sse_diff_32width_v_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_sse_diff_64width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_64width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; @@ -778,14 +725,10 @@ static uint32_t sub_pixel_sse_diff_64width_v_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_sse_diff_4width_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_4width_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -831,14 +774,10 @@ static uint32_t sub_pixel_sse_diff_4width_hv_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_8width_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_8width_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 ref0, ref1, ref2, ref3; @@ -892,14 +831,10 @@ static uint32_t sub_pixel_sse_diff_8width_hv_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_16width_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_16width_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, src5, src6, src7; @@ -969,14 +904,10 @@ static uint32_t sub_pixel_sse_diff_16width_hv_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_sse_diff_32width_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_32width_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; @@ -993,14 +924,10 @@ static uint32_t sub_pixel_sse_diff_32width_hv_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_sse_diff_64width_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; @@ -1017,14 +944,10 @@ static uint32_t sub_pixel_sse_diff_64width_hv_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_4width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -1049,11 +972,11 @@ static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src, INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); + PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, + src2, src3); ILVEV_W2_SB(src0, src1, src2, src3, src0, src2); out = (v16u8)__msa_ilvev_d((v2i64)src2, (v2i64)src0); out = __msa_aver_u_b(out, pred); @@ -1066,14 +989,10 @@ static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_8width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 out, pred, filt0; @@ -1096,11 +1015,11 @@ static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src, PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - vec0, vec1, vec2, vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, + vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); - PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, - src0, src1, src2, src3); + PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1, + src2, src3); out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0); pred = LD_UB(sec_pred); @@ -1120,15 +1039,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff, - int32_t width) { +static uint32_t subpel_avg_ssediff_16w_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff, int32_t width) { int16_t filtval; uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7; @@ -1157,16 +1071,16 @@ static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src, VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5); VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - out0, out1, out2, out3); - DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, - out4, out5, out6, out7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1, + out2, out3); + DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5, + out6, out7); SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS); SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS); - PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6, - tmp0, tmp1, tmp2, tmp3); - AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3, - tmp0, tmp1, tmp2, tmp3); + PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6, tmp0, tmp1, + tmp2, tmp3); + AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3, tmp0, tmp1, + tmp2, tmp3); CALC_MSE_AVG_B(tmp0, dst0, var, avg); CALC_MSE_AVG_B(tmp1, dst1, var, avg); @@ -1180,33 +1094,25 @@ static uint32_t subpel_avg_ssediff_16w_h_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_avg_sse_diff_16width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_16width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { return subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred, filter, height, diff, 16); } -static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_32width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 32); + sse += + subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred, + filter, height, &diff0[loop_cnt], 32); src += 16; dst += 16; sec_pred += 16; @@ -1217,21 +1123,17 @@ static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_64width_h_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 64); + sse += + subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred, + filter, height, &diff0[loop_cnt], 64); src += 16; dst += 16; sec_pred += 16; @@ -1242,14 +1144,10 @@ static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_4width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -1276,8 +1174,8 @@ static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src, dst += (4 * dst_stride); INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, - src10_r, src21_r, src32_r, src43_r); + ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, + src32_r, src43_r); ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332); DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1); SRARI_H2_UH(tmp0, tmp1, FILTER_BITS); @@ -1294,14 +1192,10 @@ static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_8width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4; @@ -1326,10 +1220,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src, LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); dst += (4 * dst_stride); PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1); - ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, - vec0, vec1, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, - tmp0, tmp1, tmp2, tmp3); + ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, + vec3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, + tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1); AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1); @@ -1345,15 +1239,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff, - int32_t width) { +static uint32_t subpel_avg_ssediff_16w_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff, int32_t width) { int16_t filtval; uint32_t loop_cnt; v16u8 ref0, ref1, ref2, ref3; @@ -1401,8 +1290,8 @@ static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src, LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); dst += (4 * dst_stride); - AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, - out0, out1, out2, out3); + AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1, + out2, out3); CALC_MSE_AVG_B(out0, ref0, var, avg); CALC_MSE_AVG_B(out1, ref1, var, avg); @@ -1416,33 +1305,25 @@ static uint32_t subpel_avg_ssediff_16w_v_msa(const uint8_t *src, return HADD_SW_S32(var); } -static uint32_t sub_pixel_avg_sse_diff_16width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_16width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { return subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred, filter, height, diff, 16); } -static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_32width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 32); + sse += + subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred, + filter, height, &diff0[loop_cnt], 32); src += 16; dst += 16; sec_pred += 16; @@ -1453,21 +1334,17 @@ static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(const uint8_t *src, return sse; } -static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter, - int32_t height, - int32_t *diff) { +static uint32_t sub_pixel_avg_sse_diff_64width_v_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter, + int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) { - sse += subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, - sec_pred, filter, height, - &diff0[loop_cnt], 64); + sse += + subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred, + filter, height, &diff0[loop_cnt], 64); src += 16; dst += 16; sec_pred += 16; @@ -1479,11 +1356,9 @@ static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(const uint8_t *src, } static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, const uint8_t *filter_vert, - int32_t height, int32_t *diff) { + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; uint32_t ref0, ref1, ref2, ref3; @@ -1532,11 +1407,9 @@ static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa( } static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, const uint8_t *filter_vert, - int32_t height, int32_t *diff) { + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff) { int16_t filtval; uint32_t loop_cnt; v16u8 ref0, ref1, ref2, ref3; @@ -1598,16 +1471,10 @@ static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa( return HADD_SW_S32(var); } -static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src, - int32_t src_stride, - const uint8_t *dst, - int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, - const uint8_t *filter_vert, - int32_t height, - int32_t *diff, - int32_t width) { +static uint32_t subpel_avg_ssediff_16w_hv_msa( + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff, int32_t width) { int16_t filtval; uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, src5, src6, src7; @@ -1669,8 +1536,8 @@ static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src, LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3); dst += (4 * dst_stride); - AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, - out0, out1, out2, out3); + AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1, + out2, out3); CALC_MSE_AVG_B(out0, ref0, var, avg); CALC_MSE_AVG_B(out1, ref1, var, avg); @@ -1685,22 +1552,18 @@ static uint32_t subpel_avg_ssediff_16w_hv_msa(const uint8_t *src, } static uint32_t sub_pixel_avg_sse_diff_16width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, const uint8_t *filter_vert, - int32_t height, int32_t *diff) { + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff) { return subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride, sec_pred, filter_horiz, filter_vert, height, diff, 16); } static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, const uint8_t *filter_vert, - int32_t height, int32_t *diff) { + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[2]; @@ -1719,11 +1582,9 @@ static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa( } static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa( - const uint8_t *src, int32_t src_stride, - const uint8_t *dst, int32_t dst_stride, - const uint8_t *sec_pred, - const uint8_t *filter_horiz, const uint8_t *filter_vert, - int32_t height, int32_t *diff) { + const uint8_t *src, int32_t src_stride, const uint8_t *dst, + int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz, + const uint8_t *filter_vert, int32_t height, int32_t *diff) { uint32_t loop_cnt, sse = 0; int32_t diff0[4]; @@ -1756,47 +1617,40 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa( #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); -#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \ -uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa(const uint8_t *src, \ - int32_t src_stride, \ - int32_t xoffset, \ - int32_t yoffset, \ - const uint8_t *ref, \ - int32_t ref_stride, \ - uint32_t *sse) { \ - int32_t diff; \ - uint32_t var; \ - const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ - const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ - \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_sse_diff_##wd##width_hv_msa(src, src_stride, \ - ref, ref_stride, \ - h_filter, v_filter, \ - ht, &diff); \ - } else { \ - *sse = sub_pixel_sse_diff_##wd##width_v_msa(src, src_stride, \ - ref, ref_stride, \ - v_filter, ht, &diff); \ - } \ - \ - var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_sse_diff_##wd##width_h_msa(src, src_stride, \ - ref, ref_stride, \ - h_filter, ht, &diff); \ - \ - var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ - } else { \ - var = vpx_variance##wd##x##ht##_msa(src, src_stride, \ - ref, ref_stride, sse); \ - } \ - } \ - \ - return var; \ -} +#define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \ + uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa( \ + const uint8_t *src, int32_t src_stride, int32_t xoffset, \ + int32_t yoffset, const uint8_t *ref, int32_t ref_stride, \ + uint32_t *sse) { \ + int32_t diff; \ + uint32_t var; \ + const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ + const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ + \ + if (yoffset) { \ + if (xoffset) { \ + *sse = sub_pixel_sse_diff_##wd##width_hv_msa( \ + src, src_stride, ref, ref_stride, h_filter, v_filter, ht, &diff); \ + } else { \ + *sse = sub_pixel_sse_diff_##wd##width_v_msa( \ + src, src_stride, ref, ref_stride, v_filter, ht, &diff); \ + } \ + \ + var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } else { \ + if (xoffset) { \ + *sse = sub_pixel_sse_diff_##wd##width_h_msa( \ + src, src_stride, ref, ref_stride, h_filter, ht, &diff); \ + \ + var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } else { \ + var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \ + sse); \ + } \ + } \ + \ + return var; \ + } VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4); VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8); @@ -1817,42 +1671,37 @@ VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32); VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64); #define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \ -uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \ - const uint8_t *src_ptr, int32_t src_stride, \ - int32_t xoffset, int32_t yoffset, \ - const uint8_t *ref_ptr, int32_t ref_stride, \ - uint32_t *sse, const uint8_t *sec_pred) { \ - int32_t diff; \ - const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ - const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ + uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \ + const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \ + int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \ + uint32_t *sse, const uint8_t *sec_pred) { \ + int32_t diff; \ + const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ + const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - v_filter, ht, &diff); \ + if (yoffset) { \ + if (xoffset) { \ + *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \ + v_filter, ht, &diff); \ + } else { \ + *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \ + &diff); \ + } \ } else { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, v_filter, \ - ht, &diff); \ + if (xoffset) { \ + *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \ + &diff); \ + } else { \ + *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride, ref_ptr, \ + ref_stride, sec_pred, ht, &diff); \ + } \ } \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - ht, &diff); \ - } else { \ - *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, ht, &diff); \ - } \ - } \ \ - return VARIANCE_##wd##Wx##ht##H(*sse, diff); \ -} + return VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4); VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8); @@ -1870,11 +1719,9 @@ VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32); uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, int32_t src_stride, - int32_t xoffset, - int32_t yoffset, + int32_t xoffset, int32_t yoffset, const uint8_t *ref_ptr, - int32_t ref_stride, - uint32_t *sse, + int32_t ref_stride, uint32_t *sse, const uint8_t *sec_pred) { int32_t diff; const uint8_t *h_filter = bilinear_filters_msa[xoffset]; @@ -1882,22 +1729,19 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, if (yoffset) { if (xoffset) { - *sse = sub_pixel_avg_sse_diff_32width_hv_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, h_filter, - v_filter, 64, &diff); + *sse = sub_pixel_avg_sse_diff_32width_hv_msa( + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, + v_filter, 64, &diff); } else { - *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, v_filter, - 64, &diff); + *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride, ref_ptr, + ref_stride, sec_pred, + v_filter, 64, &diff); } } else { if (xoffset) { - *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, - ref_ptr, ref_stride, - sec_pred, h_filter, - 64, &diff); + *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, ref_ptr, + ref_stride, sec_pred, + h_filter, 64, &diff); } else { *sse = avg_sse_diff_32x64_msa(src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, &diff); @@ -1907,46 +1751,38 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, return VARIANCE_32Wx64H(*sse, diff); } -#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \ -uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa(const uint8_t *src_ptr, \ - int32_t src_stride, \ - int32_t xoffset, \ - int32_t yoffset, \ - const uint8_t *ref_ptr, \ - int32_t ref_stride, \ - uint32_t *sse, \ - const uint8_t *sec_pred) { \ - int32_t diff; \ - const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ - const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ - \ - if (yoffset) { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_64width_hv_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - v_filter, ht, &diff); \ - } else { \ - *sse = sub_pixel_avg_sse_diff_64width_v_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, v_filter, \ - ht, &diff); \ - } \ - } else { \ - if (xoffset) { \ - *sse = sub_pixel_avg_sse_diff_64width_h_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, h_filter, \ - ht, &diff); \ - } else { \ - *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride, \ - ref_ptr, ref_stride, \ - sec_pred, &diff); \ - } \ - } \ - \ - return VARIANCE_64Wx##ht##H(*sse, diff); \ -} +#define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \ + uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa( \ + const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \ + int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \ + uint32_t *sse, const uint8_t *sec_pred) { \ + int32_t diff; \ + const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \ + const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \ + \ + if (yoffset) { \ + if (xoffset) { \ + *sse = sub_pixel_avg_sse_diff_64width_hv_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \ + v_filter, ht, &diff); \ + } else { \ + *sse = sub_pixel_avg_sse_diff_64width_v_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \ + &diff); \ + } \ + } else { \ + if (xoffset) { \ + *sse = sub_pixel_avg_sse_diff_64width_h_msa( \ + src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \ + &diff); \ + } else { \ + *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride, ref_ptr, \ + ref_stride, sec_pred, &diff); \ + } \ + } \ + \ + return VARIANCE_64Wx##ht##H(*sse, diff); \ + } VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32); VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64); diff --git a/libs/libvpx/vpx_dsp/mips/subtract_msa.c b/libs/libvpx/vpx_dsp/mips/subtract_msa.c index 9ac43c5cd5..391a7ebf66 100644 --- a/libs/libvpx/vpx_dsp/mips/subtract_msa.c +++ b/libs/libvpx/vpx_dsp/mips/subtract_msa.c @@ -68,8 +68,8 @@ static void sub_blk_16x16_msa(const uint8_t *src, int32_t src_stride, LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); src += (8 * src_stride); - LD_SB8(pred, pred_stride, - pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7); + LD_SB8(pred, pred_stride, pred0, pred1, pred2, pred3, pred4, pred5, pred6, + pred7); pred += (8 * pred_stride); ILVRL_B2_UB(src0, pred0, src_l0, src_l1); @@ -226,31 +226,31 @@ static void sub_blk_64x64_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_subtract_block_msa(int32_t rows, int32_t cols, - int16_t *diff_ptr, ptrdiff_t diff_stride, - const uint8_t *src_ptr, ptrdiff_t src_stride, - const uint8_t *pred_ptr, ptrdiff_t pred_stride) { +void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr, + ptrdiff_t diff_stride, const uint8_t *src_ptr, + ptrdiff_t src_stride, const uint8_t *pred_ptr, + ptrdiff_t pred_stride) { if (rows == cols) { switch (rows) { case 4: - sub_blk_4x4_msa(src_ptr, src_stride, pred_ptr, pred_stride, - diff_ptr, diff_stride); + sub_blk_4x4_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr, + diff_stride); break; case 8: - sub_blk_8x8_msa(src_ptr, src_stride, pred_ptr, pred_stride, - diff_ptr, diff_stride); + sub_blk_8x8_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr, + diff_stride); break; case 16: - sub_blk_16x16_msa(src_ptr, src_stride, pred_ptr, pred_stride, - diff_ptr, diff_stride); + sub_blk_16x16_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr, + diff_stride); break; case 32: - sub_blk_32x32_msa(src_ptr, src_stride, pred_ptr, pred_stride, - diff_ptr, diff_stride); + sub_blk_32x32_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr, + diff_stride); break; case 64: - sub_blk_64x64_msa(src_ptr, src_stride, pred_ptr, pred_stride, - diff_ptr, diff_stride); + sub_blk_64x64_msa(src_ptr, src_stride, pred_ptr, pred_stride, diff_ptr, + diff_stride); break; default: vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, diff --git a/libs/libvpx/vpx_dsp/mips/txfm_macros_msa.h b/libs/libvpx/vpx_dsp/mips/txfm_macros_msa.h index 68c63d56f6..da100f6a98 100644 --- a/libs/libvpx/vpx_dsp/mips/txfm_macros_msa.h +++ b/libs/libvpx/vpx_dsp/mips/txfm_macros_msa.h @@ -13,81 +13,84 @@ #include "vpx_dsp/mips/macros_msa.h" -#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) { \ - v8i16 k0_m = __msa_fill_h(cnst0); \ - v4i32 s0_m, s1_m, s2_m, s3_m; \ - \ - s0_m = (v4i32)__msa_fill_h(cnst1); \ - k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m); \ - \ - ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m); \ - ILVRL_H2_SW(reg0, reg1, s3_m, s2_m); \ - DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m); \ - SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \ - out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \ - \ - DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m); \ - SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \ - out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \ -} +#define DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \ + { \ + v8i16 k0_m = __msa_fill_h(cnst0); \ + v4i32 s0_m, s1_m, s2_m, s3_m; \ + \ + s0_m = (v4i32)__msa_fill_h(cnst1); \ + k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m); \ + \ + ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m); \ + ILVRL_H2_SW(reg0, reg1, s3_m, s2_m); \ + DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m); \ + SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \ + out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \ + \ + DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m); \ + SRARI_W2_SW(s1_m, s0_m, DCT_CONST_BITS); \ + out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \ + } -#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \ - dst0, dst1, dst2, dst3) { \ - v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \ - v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \ - \ - DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \ - tp0_m, tp2_m, tp3_m, tp4_m); \ - DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \ - tp5_m, tp6_m, tp7_m, tp8_m); \ - BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \ - BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \ - SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS); \ - SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS); \ - PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, \ - dst0, dst1, dst2, dst3); \ -} +#define DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, dst0, \ + dst1, dst2, dst3) \ + { \ + v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \ + v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \ + \ + DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, tp0_m, tp2_m, tp3_m, \ + tp4_m); \ + DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, tp5_m, tp6_m, tp7_m, \ + tp8_m); \ + BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \ + BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \ + SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, DCT_CONST_BITS); \ + SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, DCT_CONST_BITS); \ + PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, dst0, \ + dst1, dst2, dst3); \ + } -#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) ({ \ - v8i16 dst_m; \ - v4i32 tp0_m, tp1_m; \ - \ - DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m); \ - SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS); \ - dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m); \ - \ - dst_m; \ -}) +#define DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) \ + ({ \ + v8i16 dst_m; \ + v4i32 tp0_m, tp1_m; \ + \ + DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m); \ + SRARI_W2_SW(tp1_m, tp0_m, DCT_CONST_BITS); \ + dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m); \ + \ + dst_m; \ + }) -#define MADD_SHORT(m0, m1, c0, c1, res0, res1) { \ - v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \ - v8i16 madd_s0_m, madd_s1_m; \ - \ - ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, \ - c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m); \ - SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \ -} +#define MADD_SHORT(m0, m1, c0, c1, res0, res1) \ + { \ + v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \ + v8i16 madd_s0_m, madd_s1_m; \ + \ + ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m); \ + DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, c0, c0, c1, c1, \ + madd0_m, madd1_m, madd2_m, madd3_m); \ + SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \ + } -#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \ - out0, out1, out2, out3) { \ - v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \ - \ - ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \ - ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \ - cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \ - m4_m, m5_m, tmp3_m, tmp2_m); \ - SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \ - cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \ - m4_m, m5_m, tmp3_m, tmp2_m); \ - SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ - PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \ -} +#define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ + out2, out3) \ + { \ + v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ + v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \ + \ + ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \ + ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \ + DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, cst0, cst0, cst2, \ + cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, m4_m, m5_m, tmp3_m, tmp2_m); \ + SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1); \ + DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, cst1, cst1, cst3, \ + cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, m4_m, m5_m, tmp3_m, tmp2_m); \ + SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, DCT_CONST_BITS); \ + PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \ + } #endif // VPX_DSP_MIPS_TXFM_MACROS_MIPS_MSA_H_ diff --git a/libs/libvpx/vpx_dsp/mips/variance_msa.c b/libs/libvpx/vpx_dsp/mips/variance_msa.c index 33e175560f..085990e484 100644 --- a/libs/libvpx/vpx_dsp/mips/variance_msa.c +++ b/libs/libvpx/vpx_dsp/mips/variance_msa.c @@ -11,28 +11,29 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/mips/macros_msa.h" -#define CALC_MSE_B(src, ref, var) { \ - v16u8 src_l0_m, src_l1_m; \ - v8i16 res_l0_m, res_l1_m; \ - \ - ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ - HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ - DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ -} +#define CALC_MSE_B(src, ref, var) \ + { \ + v16u8 src_l0_m, src_l1_m; \ + v8i16 res_l0_m, res_l1_m; \ + \ + ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ + HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ + DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ + } -#define CALC_MSE_AVG_B(src, ref, var, sub) { \ - v16u8 src_l0_m, src_l1_m; \ - v8i16 res_l0_m, res_l1_m; \ - \ - ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ - HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ - DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ - \ - sub += res_l0_m + res_l1_m; \ -} +#define CALC_MSE_AVG_B(src, ref, var, sub) \ + { \ + v16u8 src_l0_m, src_l1_m; \ + v8i16 res_l0_m, res_l1_m; \ + \ + ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \ + HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \ + DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \ + \ + sub += res_l0_m + res_l1_m; \ + } -#define VARIANCE_WxH(sse, diff, shift) \ - sse - (((uint32_t)diff * diff) >> shift) +#define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift) #define VARIANCE_LARGE_WxH(sse, diff, shift) \ sse - (((int64_t)diff * diff) >> shift) @@ -80,8 +81,8 @@ static uint32_t sse_diff_8width_msa(const uint8_t *src_ptr, int32_t src_stride, LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3); ref_ptr += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, + ref0, ref1); CALC_MSE_AVG_B(src0, ref0, var, avg); CALC_MSE_AVG_B(src1, ref1, var, avg); } @@ -370,8 +371,8 @@ static uint32_t sse_8width_msa(const uint8_t *src_ptr, int32_t src_stride, LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3); ref_ptr += (4 * ref_stride); - PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, - src0, src1, ref0, ref1); + PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1, + ref0, ref1); CALC_MSE_B(src0, ref0, var); CALC_MSE_B(src1, ref1, var); } @@ -526,19 +527,17 @@ uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride, #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11); #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); -#define VPX_VARIANCE_WDXHT_MSA(wd, ht) \ -uint32_t vpx_variance##wd##x##ht##_msa(const uint8_t *src, \ - int32_t src_stride, \ - const uint8_t *ref, \ - int32_t ref_stride, \ - uint32_t *sse) { \ - int32_t diff; \ - \ - *sse = sse_diff_##wd##width_msa(src, src_stride, ref, ref_stride, \ - ht, &diff); \ - \ - return VARIANCE_##wd##Wx##ht##H(*sse, diff); \ -} +#define VPX_VARIANCE_WDXHT_MSA(wd, ht) \ + uint32_t vpx_variance##wd##x##ht##_msa( \ + const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ + int32_t ref_stride, uint32_t *sse) { \ + int32_t diff; \ + \ + *sse = \ + sse_diff_##wd##width_msa(src, src_stride, ref, ref_stride, ht, &diff); \ + \ + return VARIANCE_##wd##Wx##ht##H(*sse, diff); \ + } VPX_VARIANCE_WDXHT_MSA(4, 4); VPX_VARIANCE_WDXHT_MSA(4, 8); @@ -585,8 +584,7 @@ uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride, } uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride, - const uint8_t *ref, int32_t ref_stride, - uint32_t *sse) { + const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8); return *sse; @@ -617,17 +615,15 @@ uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride, } void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride, - const uint8_t *ref, int32_t ref_stride, - uint32_t *sse, int32_t *sum) { + const uint8_t *ref, int32_t ref_stride, uint32_t *sse, + int32_t *sum) { *sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum); } void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride, - const uint8_t *ref, int32_t ref_stride, - uint32_t *sse, int32_t *sum) { + const uint8_t *ref, int32_t ref_stride, uint32_t *sse, + int32_t *sum) { *sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum); } -uint32_t vpx_get_mb_ss_msa(const int16_t *src) { - return get_mb_ss_msa(src); -} +uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c index f6244d834b..ad2af28669 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_horiz_msa.c @@ -13,8 +13,7 @@ #include "vpx_dsp/mips/vpx_convolve_msa.h" static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; @@ -48,8 +47,7 @@ static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src, } static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; @@ -92,10 +90,8 @@ static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src, } static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { if (4 == height) { common_hz_8t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter); @@ -105,10 +101,8 @@ static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src, } static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { int32_t loop_cnt; v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; @@ -136,18 +130,16 @@ static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src, LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS); SAT_SH4_SH(out0, out1, out2, out3, 7); - CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, - dst, dst_stride); + CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst, + dst_stride); dst += (4 * dst_stride); } } static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { int32_t loop_cnt; v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; v16u8 mask0, mask1, mask2, mask3, dst0, dst1; @@ -199,11 +191,9 @@ static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src, } static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; v16u8 dst1, dst2, mask0, mask1, mask2, mask3; @@ -256,11 +246,9 @@ static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src, } static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt, cnt; v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; v16u8 dst1, dst2, mask0, mask1, mask2, mask3; @@ -318,8 +306,7 @@ static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, mask; @@ -344,8 +331,7 @@ static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; @@ -378,10 +364,8 @@ static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { if (4 == height) { common_hz_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter); @@ -391,8 +375,7 @@ static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, mask; @@ -412,16 +395,13 @@ static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src, vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst, + dst_stride); } -static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, - int32_t height) { +static void common_hz_2t_and_aver_dst_8x8mult_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter, int32_t height) { v16i8 src0, src1, src2, src3, mask; v16u8 filt0, dst0, dst1, dst2, dst3; v8u16 vec0, vec1, vec2, vec3, filt; @@ -442,8 +422,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); LD_SB4(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); - PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst, + dst_stride); dst += (4 * dst_stride); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); @@ -452,8 +432,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst, + dst_stride); dst += (4 * dst_stride); if (16 == height) { @@ -467,8 +447,8 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); LD_SB4(src, src_stride, src0, src1, src2, src3); - PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst, + dst_stride); dst += (4 * dst_stride); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); @@ -477,16 +457,14 @@ static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, vec2, vec3); SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3, dst, + dst_stride); } } static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { if (4 == height) { common_hz_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter); @@ -497,11 +475,9 @@ static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; v16u8 filt0, dst0, dst1, dst2, dst3; @@ -566,11 +542,9 @@ static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; v16u8 filt0, dst0, dst1, dst2, dst3; @@ -617,11 +591,9 @@ static void common_hz_2t_and_aver_dst_32w_msa(const uint8_t *src, } static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; v16u8 filt0, dst0, dst1, dst2, dst3; @@ -662,8 +634,8 @@ static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src, void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { int8_t cnt, filt_hor[8]; assert(x_step_q4 == 16); @@ -676,67 +648,55 @@ void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, if (((const int32_t *)filter_x)[0] == 0) { switch (w) { case 4: - common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], h); + common_hz_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], h); break; case 8: - common_hz_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], h); + common_hz_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], h); break; case 16: - common_hz_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], h); + common_hz_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], h); break; case 32: - common_hz_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], h); + common_hz_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], h); break; case 64: - common_hz_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], h); + common_hz_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } else { switch (w) { case 4: - common_hz_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, h); + common_hz_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, h); break; case 8: - common_hz_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, h); + common_hz_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, h); break; case 16: - common_hz_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, h); + common_hz_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, h); break; case 32: - common_hz_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, h); + common_hz_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, h); break; case 64: - common_hz_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, h); + common_hz_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c index 2abde6de83..1cfa63201c 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_msa.c @@ -12,13 +12,9 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/mips/vpx_convolve_msa.h" -static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_8ht_8vt_and_aver_dst_4w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; v16u8 dst0, dst1, dst2, dst3, mask0, mask1, mask2, mask3, tmp0, tmp1; @@ -64,15 +60,15 @@ static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src, src += (4 * src_stride); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); res1 = FILT_8TAP_DPADD_S_H(vec1, vec2, vec3, vec4, filt_vt0, filt_vt1, @@ -94,13 +90,9 @@ static void common_hv_8ht_8vt_and_aver_dst_4w_msa(const uint8_t *src, } } -static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_8ht_8vt_and_aver_dst_8w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; @@ -154,20 +146,20 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src, LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out8 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1, filt_vt2, filt_vt3); @@ -180,8 +172,8 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src, SRARI_H4_SH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); SAT_SH4_SH(tmp0, tmp1, tmp2, tmp3, 7); - CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3, - dst, dst_stride); + CONVERT_UB_AVG_ST8x4_UB(tmp0, tmp1, tmp2, tmp3, dst0, dst1, dst2, dst3, dst, + dst_stride); dst += (4 * dst_stride); hz_out6 = hz_out10; @@ -194,13 +186,9 @@ static void common_hv_8ht_8vt_and_aver_dst_8w_msa(const uint8_t *src, } } -static void common_hv_8ht_8vt_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_8ht_8vt_and_aver_dst_16w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { int32_t multiple8_cnt; for (multiple8_cnt = 2; multiple8_cnt--;) { common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, @@ -210,13 +198,9 @@ static void common_hv_8ht_8vt_and_aver_dst_16w_msa(const uint8_t *src, } } -static void common_hv_8ht_8vt_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_8ht_8vt_and_aver_dst_32w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { int32_t multiple8_cnt; for (multiple8_cnt = 4; multiple8_cnt--;) { common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, @@ -226,13 +210,9 @@ static void common_hv_8ht_8vt_and_aver_dst_32w_msa(const uint8_t *src, } } -static void common_hv_8ht_8vt_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_8ht_8vt_and_aver_dst_64w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { int32_t multiple8_cnt; for (multiple8_cnt = 8; multiple8_cnt--;) { common_hv_8ht_8vt_and_aver_dst_8w_msa(src, src_stride, dst, dst_stride, @@ -242,12 +222,9 @@ static void common_hv_8ht_8vt_and_aver_dst_64w_msa(const uint8_t *src, } } -static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert) { +static void common_hv_2ht_2vt_and_aver_dst_4x4_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert) { v16i8 src0, src1, src2, src3, src4, mask; v16u8 filt_hz, filt_vt, vec0, vec1; v16u8 dst0, dst1, dst2, dst3, res0, res1; @@ -280,12 +257,9 @@ static void common_hv_2ht_2vt_and_aver_dst_4x4_msa(const uint8_t *src, ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); } -static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert) { +static void common_hv_2ht_2vt_and_aver_dst_4x8_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert) { v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, mask; v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1, res2, res3; v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; @@ -316,29 +290,25 @@ static void common_hv_2ht_2vt_and_aver_dst_4x8_msa(const uint8_t *src, hz_out7 = (v8u16)__msa_pckod_d((v2i64)hz_out8, (v2i64)hz_out6); LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, - dst4, dst6); + ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4, + dst6); ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, - tmp0, tmp1, tmp2, tmp3); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, tmp0, + tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - PCKEV_B4_UB(tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, tmp3, res0, res1, - res2, res3); - AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, - res2, res3); + PCKEV_B4_UB(tmp0, tmp0, tmp1, tmp1, tmp2, tmp2, tmp3, tmp3, res0, res1, res2, + res3); + AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, res2, + res3); ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); dst += (4 * dst_stride); ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); } -static void common_hv_2ht_2vt_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_4w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { if (4 == height) { common_hv_2ht_2vt_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter_horiz, filter_vert); @@ -348,12 +318,9 @@ static void common_hv_2ht_2vt_and_aver_dst_4w_msa(const uint8_t *src, } } -static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert) { +static void common_hv_2ht_2vt_and_aver_dst_8x4_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert) { v16i8 src0, src1, src2, src3, src4, mask; v16u8 filt_hz, filt_vt, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3; v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3; @@ -390,17 +357,13 @@ static void common_hv_2ht_2vt_and_aver_dst_8x4_msa(const uint8_t *src, tmp3 = __msa_dotp_u_h(vec3, filt_vt); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst, + dst_stride); } -static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, mask; v16u8 filt_hz, filt_vt, vec0, dst0, dst1, dst2, dst3; @@ -445,36 +408,27 @@ static void common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(const uint8_t *src, SRARI_H2_UH(tmp2, tmp3, FILTER_BITS); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst, + dst_stride); dst += (4 * dst_stride); } } -static void common_hv_2ht_2vt_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_8w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { if (4 == height) { common_hv_2ht_2vt_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter_horiz, filter_vert); } else { - common_hv_2ht_2vt_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride, - filter_horiz, filter_vert, - height); + common_hv_2ht_2vt_and_aver_dst_8x8mult_msa( + src, src_stride, dst, dst_stride, filter_horiz, filter_vert, height); } } -static void common_hv_2ht_2vt_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_16w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; v16u8 filt_hz, filt_vt, vec0, vec1, dst0, dst1, dst2, dst3; @@ -536,13 +490,9 @@ static void common_hv_2ht_2vt_and_aver_dst_16w_msa(const uint8_t *src, } } -static void common_hv_2ht_2vt_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_32w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { int32_t multiple8_cnt; for (multiple8_cnt = 2; multiple8_cnt--;) { common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride, @@ -552,13 +502,9 @@ static void common_hv_2ht_2vt_and_aver_dst_32w_msa(const uint8_t *src, } } -static void common_hv_2ht_2vt_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { +static void common_hv_2ht_2vt_and_aver_dst_64w_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter_horiz, int8_t *filter_vert, int32_t height) { int32_t multiple8_cnt; for (multiple8_cnt = 4; multiple8_cnt--;) { common_hv_2ht_2vt_and_aver_dst_16w_msa(src, src_stride, dst, dst_stride, @@ -571,8 +517,8 @@ static void common_hv_2ht_2vt_and_aver_dst_64w_msa(const uint8_t *src, void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { int8_t cnt, filt_hor[8], filt_ver[8]; assert(x_step_q4 == 16); @@ -589,72 +535,69 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, ((const int32_t *)filter_y)[0] == 0) { switch (w) { case 4: - common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], h); + common_hv_2ht_2vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], h); break; case 8: - common_hv_2ht_2vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], h); + common_hv_2ht_2vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], h); break; case 16: - common_hv_2ht_2vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hv_2ht_2vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], &filt_ver[3], h); break; case 32: - common_hv_2ht_2vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hv_2ht_2vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], &filt_ver[3], h); break; case 64: - common_hv_2ht_2vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hv_2ht_2vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], &filt_ver[3], h); break; default: - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } else if (((const int32_t *)filter_x)[0] == 0 || ((const int32_t *)filter_y)[0] == 0) { - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + filter_y, y_step_q4, w, h); } else { switch (w) { case 4: - common_hv_8ht_8vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, h); + common_hv_8ht_8vt_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, + filt_ver, h); break; case 8: - common_hv_8ht_8vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, h); + common_hv_8ht_8vt_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, + filt_ver, h); break; case 16: - common_hv_8ht_8vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, h); + common_hv_8ht_8vt_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, + filt_ver, h); break; case 32: - common_hv_8ht_8vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, h); + common_hv_8ht_8vt_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, + filt_ver, h); break; case 64: - common_hv_8ht_8vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, h); + common_hv_8ht_8vt_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, + filt_ver, h); break; default: - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c index 0164e41aa1..146ce3b2f5 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_avg_vert_msa.c @@ -13,10 +13,8 @@ #include "vpx_dsp/mips/vpx_convolve_msa.h" static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; @@ -73,10 +71,8 @@ static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src, } static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; @@ -106,18 +102,18 @@ static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src, XORI_B4_128_SB(src7, src8, src9, src10); ILVR_B4_SB(src7, src6, src8, src7, src9, src8, src10, src9, src76_r, src87_r, src98_r, src109_r); - out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, - filt1, filt2, filt3); - out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, - filt1, filt2, filt3); - out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, - filt1, filt2, filt3); + out0 = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r, filt0, filt1, + filt2, filt3); + out1 = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r, filt0, filt1, + filt2, filt3); + out2 = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r, filt0, filt1, + filt2, filt3); out3 = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3); SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS); SAT_SH4_SH(out0, out1, out2, out3, 7); - CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, - dst, dst_stride); + CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3, dst, + dst_stride); dst += (4 * dst_stride); src10_r = src54_r; @@ -130,13 +126,9 @@ static void common_vt_8t_and_aver_dst_8w_msa(const uint8_t *src, } } -static void common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, - int32_t height, - int32_t width) { +static void common_vt_8t_and_aver_dst_16w_mult_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter, int32_t height, int32_t width) { const uint8_t *src_tmp; uint8_t *dst_tmp; uint32_t loop_cnt, cnt; @@ -227,38 +219,31 @@ static void common_vt_8t_and_aver_dst_16w_mult_msa(const uint8_t *src, } static void common_vt_8t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height, 16); } static void common_vt_8t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height, 32); } static void common_vt_8t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { common_vt_8t_and_aver_dst_16w_mult_msa(src, src_stride, dst, dst_stride, filter, height, 64); } static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16i8 src0, src1, src2, src3, src4; @@ -292,8 +277,7 @@ static void common_vt_2t_and_aver_dst_4x4_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; @@ -311,15 +295,15 @@ static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src, src8 = LD_SB(src); LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1, - dst2, dst3); + ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst1, dst2, + dst3); ILVR_D2_UB(dst1, dst0, dst3, dst2, dst0, dst1); ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r, src32_r, src43_r); ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, src76_r, src87_r); - ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, - src87_r, src76_r, src2110, src4332, src6554, src8776); + ILVR_D4_UB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r, + src76_r, src2110, src4332, src6554, src8776); DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); @@ -331,10 +315,8 @@ static void common_vt_2t_and_aver_dst_4x8_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { if (4 == height) { common_vt_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter); @@ -344,8 +326,7 @@ static void common_vt_2t_and_aver_dst_4w_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter) { v16u8 src0, src1, src2, src3, src4; @@ -364,16 +345,13 @@ static void common_vt_2t_and_aver_dst_8x4_msa(const uint8_t *src, DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(tmp0, dst0, tmp1, dst1, tmp2, dst2, tmp3, dst3, dst, + dst_stride); } -static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, - int32_t height) { +static void common_vt_2t_and_aver_dst_8x8mult_msa( + const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8; v16u8 dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8; @@ -393,22 +371,22 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, src += (8 * src_stride); LD_UB8(dst, dst_stride, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, - vec2, vec3); - ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, - vec6, vec7); + ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, + vec3); + ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6, + vec7); DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(tmp0, dst1, tmp1, dst2, tmp2, dst3, tmp3, dst4, dst, + dst_stride); dst += (4 * dst_stride); DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); - PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8, - dst, dst_stride); + PCKEV_AVG_ST8x4_UB(tmp0, dst5, tmp1, dst6, tmp2, dst7, tmp3, dst8, dst, + dst_stride); dst += (4 * dst_stride); src0 = src8; @@ -416,10 +394,8 @@ static void common_vt_2t_and_aver_dst_8x8mult_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, - int32_t dst_stride, - int8_t *filter, + int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int8_t *filter, int32_t height) { if (4 == height) { common_vt_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter); @@ -430,11 +406,9 @@ static void common_vt_2t_and_aver_dst_8w_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, filt0; v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; @@ -481,11 +455,9 @@ static void common_vt_2t_and_aver_dst_16w_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9; v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; @@ -554,11 +526,9 @@ static void common_vt_2t_and_aver_dst_32w_msa(const uint8_t *src, } static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, - int8_t *filter, - int32_t height) { + int8_t *filter, int32_t height) { uint32_t loop_cnt; v16u8 src0, src1, src2, src3, src4, src5; v16u8 src6, src7, src8, src9, src10, src11, filt0; @@ -636,8 +606,8 @@ static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src, void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { int8_t cnt, filt_ver[8]; assert(y_step_q4 == 16); @@ -650,68 +620,56 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, if (((const int32_t *)filter_y)[0] == 0) { switch (w) { case 4: - common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_ver[3], h); + common_vt_2t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_ver[3], h); break; case 8: - common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_ver[3], h); + common_vt_2t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_ver[3], h); break; case 16: - common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_ver[3], h); + common_vt_2t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_ver[3], h); break; case 32: - common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_ver[3], h); + common_vt_2t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_ver[3], h); break; case 64: - common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_ver[3], h); + common_vt_2t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_ver[3], h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } else { switch (w) { case 4: - common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_ver, h); + common_vt_8t_and_aver_dst_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_ver, h); break; case 8: - common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_ver, h); + common_vt_8t_and_aver_dst_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_ver, h); break; case 16: - common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_ver, h); + common_vt_8t_and_aver_dst_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_ver, h); break; case 32: - common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_ver, h); + common_vt_8t_and_aver_dst_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_ver, h); break; case 64: - common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_ver, h); + common_vt_8t_and_aver_dst_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_ver, h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c index dbd120b0d5..9e8bf7b519 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_horiz_msa.c @@ -325,7 +325,7 @@ static void common_hz_2t_4x4_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); LD_SB4(src, src_stride, src0, src1, src2, src3); VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); @@ -347,7 +347,7 @@ static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); @@ -355,8 +355,8 @@ static void common_hz_2t_4x8_msa(const uint8_t *src, int32_t src_stride, DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, vec6, vec7); SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, - res2, res3); + PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, + res3); ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); dst += (4 * dst_stride); ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); @@ -383,7 +383,7 @@ static void common_hz_2t_8x4_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); LD_SB4(src, src_stride, src0, src1, src2, src3); VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); @@ -406,7 +406,7 @@ static void common_hz_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); LD_SB4(src, src_stride, src0, src1, src2, src3); src += (4 * src_stride); @@ -482,7 +482,7 @@ static void common_hz_2t_16w_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); LD_SB4(src, src_stride, src0, src2, src4, src6); LD_SB4(src + 8, src_stride, src1, src3, src5, src7); @@ -545,7 +545,7 @@ static void common_hz_2t_32w_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); for (loop_cnt = height >> 1; loop_cnt--;) { src0 = LD_SB(src); @@ -590,7 +590,7 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride, /* rearranging filter */ filt = LD_UH(filter); - filt0 = (v16u8) __msa_splati_h((v8i16) filt, 0); + filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0); for (loop_cnt = height; loop_cnt--;) { src0 = LD_SB(src); @@ -622,8 +622,8 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride, void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { int8_t cnt, filt_hor[8]; assert(x_step_q4 == 16); @@ -636,67 +636,55 @@ void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, if (((const int32_t *)filter_x)[0] == 0) { switch (w) { case 4: - common_hz_2t_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_hor[3], h); break; case 8: - common_hz_2t_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_hor[3], h); break; case 16: - common_hz_2t_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_hor[3], h); break; case 32: - common_hz_2t_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_hor[3], h); break; case 64: - common_hz_2t_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_hor[3], h); break; default: - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } else { switch (w) { case 4: - common_hz_8t_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_hor, h); break; case 8: - common_hz_8t_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_hor, h); break; case 16: - common_hz_8t_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_hor, h); break; case 32: - common_hz_8t_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_hor, h); break; case 64: - common_hz_8t_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_hz_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_hor, h); break; default: - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c index 7546f13150..b16ec57886 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_msa.c @@ -69,15 +69,15 @@ static void common_hv_8ht_8vt_4w_msa(const uint8_t *src, int32_t src_stride, XORI_B4_128_SB(src7, src8, src9, src10); src += (4 * src_stride); - hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out7 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out9 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); out4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); tmp1 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out4, filt_vt0, filt_vt1, @@ -151,20 +151,20 @@ static void common_hv_8ht_8vt_8w_msa(const uint8_t *src, int32_t src_stride, XORI_B4_128_SB(src7, src8, src9, src10); - hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); tmp0 = FILT_8TAP_DPADD_S_H(out0, out1, out2, out3, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out7 = (v8i16)__msa_ilvev_b((v16i8)hz_out8, (v16i8)hz_out7); tmp1 = FILT_8TAP_DPADD_S_H(out4, out5, out6, out7, filt_vt0, filt_vt1, filt_vt2, filt_vt3); - hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, - filt_hz0, filt_hz1, filt_hz2, filt_hz3); + hz_out9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0, + filt_hz1, filt_hz2, filt_hz3); out8 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); tmp2 = FILT_8TAP_DPADD_S_H(out1, out2, out3, out8, filt_vt0, filt_vt1, filt_vt2, filt_vt3); @@ -295,11 +295,11 @@ static void common_hv_2ht_2vt_4x8_msa(const uint8_t *src, int32_t src_stride, ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); ILVEV_B2_UB(hz_out4, hz_out5, hz_out6, hz_out7, vec2, vec3); - DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, - vec4, vec5, vec6, vec7); + DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, vec4, + vec5, vec6, vec7); SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS); - PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, - res2, res3); + PCKEV_B4_SB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2, + res3); ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride); dst += (4 * dst_stride); ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride); @@ -361,12 +361,10 @@ static void common_hv_2ht_2vt_8x4_msa(const uint8_t *src, int32_t src_stride, } static void common_hv_2ht_2vt_8x8mult_msa(const uint8_t *src, - int32_t src_stride, - uint8_t *dst, + int32_t src_stride, uint8_t *dst, int32_t dst_stride, int8_t *filter_horiz, - int8_t *filter_vert, - int32_t height) { + int8_t *filter_vert, int32_t height) { uint32_t loop_cnt; v16i8 src0, src1, src2, src3, src4, mask, out0, out1; v16u8 filt_hz, filt_vt, vec0; @@ -542,11 +540,10 @@ static void common_hv_2ht_2vt_64w_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int32_t x_step_q4, - const int16_t *filter_y, int32_t y_step_q4, - int32_t w, int32_t h) { +void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int32_t x_step_q4, const int16_t *filter_y, + int32_t y_step_q4, int32_t w, int32_t h) { int8_t cnt, filt_hor[8], filt_ver[8]; assert(x_step_q4 == 16); @@ -563,72 +560,69 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, ((const int32_t *)filter_y)[0] == 0) { switch (w) { case 4: - common_hv_2ht_2vt_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], (int32_t)h); + common_hv_2ht_2vt_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], (int32_t)h); break; case 8: - common_hv_2ht_2vt_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], (int32_t)h); + common_hv_2ht_2vt_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], (int32_t)h); break; case 16: - common_hv_2ht_2vt_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], (int32_t)h); + common_hv_2ht_2vt_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], (int32_t)h); break; case 32: - common_hv_2ht_2vt_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], (int32_t)h); + common_hv_2ht_2vt_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], (int32_t)h); break; case 64: - common_hv_2ht_2vt_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - &filt_hor[3], &filt_ver[3], (int32_t)h); + common_hv_2ht_2vt_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, &filt_hor[3], + &filt_ver[3], (int32_t)h); break; default: - vpx_convolve8_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + filter_y, y_step_q4, w, h); break; } } else if (((const int32_t *)filter_x)[0] == 0 || ((const int32_t *)filter_y)[0] == 0) { - vpx_convolve8_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + filter_y, y_step_q4, w, h); } else { switch (w) { case 4: - common_hv_8ht_8vt_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, (int32_t)h); + common_hv_8ht_8vt_4w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, filt_ver, + (int32_t)h); break; case 8: - common_hv_8ht_8vt_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, (int32_t)h); + common_hv_8ht_8vt_8w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, filt_ver, + (int32_t)h); break; case 16: - common_hv_8ht_8vt_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, (int32_t)h); + common_hv_8ht_8vt_16w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, filt_ver, + (int32_t)h); break; case 32: - common_hv_8ht_8vt_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, (int32_t)h); + common_hv_8ht_8vt_32w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, filt_ver, + (int32_t)h); break; case 64: - common_hv_8ht_8vt_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, - filt_hor, filt_ver, (int32_t)h); + common_hv_8ht_8vt_64w_msa(src, (int32_t)src_stride, dst, + (int32_t)dst_stride, filt_hor, filt_ver, + (int32_t)h); break; default: - vpx_convolve8_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c index 527d457199..410682271f 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve8_vert_msa.c @@ -222,11 +222,11 @@ static void common_vt_8t_16w_mult_msa(const uint8_t *src, int32_t src_stride, LD_SB7(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6); XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); src_tmp += (7 * src_stride); - ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, - src32_r, src54_r, src21_r); + ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, + src54_r, src21_r); ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); - ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, - src32_l, src54_l, src21_l); + ILVL_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_l, src32_l, + src54_l, src21_l); ILVL_B2_SB(src4, src3, src6, src5, src43_l, src65_l); for (loop_cnt = (height >> 2); loop_cnt--;) { @@ -344,8 +344,8 @@ static void common_vt_2t_4x8_msa(const uint8_t *src, int32_t src_stride, src32_r, src43_r); ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7, src54_r, src65_r, src76_r, src87_r); - ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, - src87_r, src76_r, src2110, src4332, src6554, src8776); + ILVR_D4_SB(src21_r, src10_r, src43_r, src32_r, src65_r, src54_r, src87_r, + src76_r, src2110, src4332, src6554, src8776); DOTP_UB4_UH(src2110, src4332, src6554, src8776, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); @@ -407,10 +407,10 @@ static void common_vt_2t_8x8mult_msa(const uint8_t *src, int32_t src_stride, LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); src += (8 * src_stride); - ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, - vec2, vec3); - ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, - vec6, vec7); + ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2, + vec3); + ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, vec4, vec5, vec6, + vec7); DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, tmp2, tmp3); SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS); @@ -629,8 +629,8 @@ static void common_vt_2t_64w_msa(const uint8_t *src, int32_t src_stride, void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { int8_t cnt, filt_ver[8]; assert(y_step_q4 == 16); @@ -643,67 +643,55 @@ void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, if (((const int32_t *)filter_y)[0] == 0) { switch (w) { case 4: - common_vt_2t_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_2t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_ver[3], h); break; case 8: - common_vt_2t_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_2t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_ver[3], h); break; case 16: - common_vt_2t_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_2t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_ver[3], h); break; case 32: - common_vt_2t_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_2t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_ver[3], h); break; case 64: - common_vt_2t_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_2t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, &filt_ver[3], h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } else { switch (w) { case 4: - common_vt_8t_4w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_8t_4w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_ver, h); break; case 8: - common_vt_8t_8w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_8t_8w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_ver, h); break; case 16: - common_vt_8t_16w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_8t_16w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_ver, h); break; case 32: - common_vt_8t_32w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_8t_32w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_ver, h); break; case 64: - common_vt_8t_64w_msa(src, (int32_t)src_stride, - dst, (int32_t)dst_stride, + common_vt_8t_64w_msa(src, (int32_t)src_stride, dst, (int32_t)dst_stride, filt_ver, h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, - filter_x, x_step_q4, filter_y, y_step_q4, - w, h); + vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + x_step_q4, filter_y, y_step_q4, w, h); break; } } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve_avg_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve_avg_msa.c index 4c3d978031..45399bad85 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve_avg_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve_avg_msa.c @@ -10,8 +10,8 @@ #include "vpx_dsp/mips/macros_msa.h" -static void avg_width4_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, int32_t height) { +static void avg_width4_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int32_t height) { int32_t cnt; uint32_t out0, out1, out2, out3; v16u8 src0, src1, src2, src3; @@ -24,8 +24,8 @@ static void avg_width4_msa(const uint8_t *src, int32_t src_stride, LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); + AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, + dst2, dst3); out0 = __msa_copy_u_w((v4i32)dst0, 0); out1 = __msa_copy_u_w((v4i32)dst1, 0); @@ -53,8 +53,8 @@ static void avg_width4_msa(const uint8_t *src, int32_t src_stride, } } -static void avg_width8_msa(const uint8_t *src, int32_t src_stride, - uint8_t *dst, int32_t dst_stride, int32_t height) { +static void avg_width8_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, + int32_t dst_stride, int32_t height) { int32_t cnt; uint64_t out0, out1, out2, out3; v16u8 src0, src1, src2, src3; @@ -65,8 +65,8 @@ static void avg_width8_msa(const uint8_t *src, int32_t src_stride, src += (4 * src_stride); LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); + AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, + dst2, dst3); out0 = __msa_copy_u_d((v2i64)dst0, 0); out1 = __msa_copy_u_d((v2i64)dst1, 0); @@ -88,10 +88,10 @@ static void avg_width16_msa(const uint8_t *src, int32_t src_stride, src += (8 * src_stride); LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); + AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, + dst2, dst3); + AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, + dst6, dst7); ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst, dst_stride); dst += (8 * dst_stride); } @@ -120,14 +120,14 @@ static void avg_width32_msa(const uint8_t *src, int32_t src_stride, LD_UB4(dst_dup + 16, dst_stride, dst9, dst11, dst13, dst15); dst_dup += (4 * dst_stride); - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, - dst8, dst9, dst10, dst11); - AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, - dst12, dst13, dst14, dst15); + AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, + dst2, dst3); + AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, + dst6, dst7); + AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, + dst10, dst11); + AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12, + dst13, dst14, dst15); ST_UB4(dst0, dst2, dst4, dst6, dst, dst_stride); ST_UB4(dst1, dst3, dst5, dst7, dst + 16, dst_stride); @@ -166,14 +166,14 @@ static void avg_width64_msa(const uint8_t *src, int32_t src_stride, LD_UB4(dst_dup, 16, dst12, dst13, dst14, dst15); dst_dup += dst_stride; - AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, - dst0, dst1, dst2, dst3); - AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, - dst4, dst5, dst6, dst7); - AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, - dst8, dst9, dst10, dst11); - AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, - dst12, dst13, dst14, dst15); + AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1, + dst2, dst3); + AVER_UB4_UB(src4, dst4, src5, dst5, src6, dst6, src7, dst7, dst4, dst5, + dst6, dst7); + AVER_UB4_UB(src8, dst8, src9, dst9, src10, dst10, src11, dst11, dst8, dst9, + dst10, dst11); + AVER_UB4_UB(src12, dst12, src13, dst13, src14, dst14, src15, dst15, dst12, + dst13, dst14, dst15); ST_UB4(dst0, dst1, dst2, dst3, dst, 16); dst += dst_stride; diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve_copy_msa.c b/libs/libvpx/vpx_dsp/mips/vpx_convolve_copy_msa.c index ba4012281e..c3d87a4ab8 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve_copy_msa.c +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve_copy_msa.c @@ -105,12 +105,12 @@ static void copy_16multx8mult_msa(const uint8_t *src, int32_t src_stride, dst_tmp = dst; for (loop_cnt = (height >> 3); loop_cnt--;) { - LD_UB8(src_tmp, src_stride, - src0, src1, src2, src3, src4, src5, src6, src7); + LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6, + src7); src_tmp += (8 * src_stride); - ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, - dst_tmp, dst_stride); + ST_UB8(src0, src1, src2, src3, src4, src5, src6, src7, dst_tmp, + dst_stride); dst_tmp += (8 * dst_stride); } diff --git a/libs/libvpx/vpx_dsp/mips/vpx_convolve_msa.h b/libs/libvpx/vpx_dsp/mips/vpx_convolve_msa.h index e0013983ae..198c21ed20 100644 --- a/libs/libvpx/vpx_dsp/mips/vpx_convolve_msa.h +++ b/libs/libvpx/vpx_dsp/mips/vpx_convolve_msa.h @@ -16,104 +16,109 @@ extern const uint8_t mc_filt_mask_arr[16 * 3]; -#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, \ - filt0, filt1, filt2, filt3) ({ \ - v8i16 tmp0, tmp1; \ - \ - tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ - tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ - tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \ - tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3); \ - tmp0 = __msa_adds_s_h(tmp0, tmp1); \ - \ - tmp0; \ -}) - -#define HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, \ - filt_h0, filt_h1, filt_h2, filt_h3) ({ \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ - v8i16 hz_out_m; \ - \ - VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3, \ - vec0_m, vec1_m, vec2_m, vec3_m); \ - hz_out_m = FILT_8TAP_DPADD_S_H(vec0_m, vec1_m, vec2_m, vec3_m, \ - filt_h0, filt_h1, filt_h2, filt_h3); \ - \ - hz_out_m = __msa_srari_h(hz_out_m, FILTER_BITS); \ - hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ - \ - hz_out_m; \ -}) - -#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, mask3, \ - filt0, filt1, filt2, filt3, \ - out0, out1) { \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m; \ +#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt0, filt1, filt2, \ + filt3) \ + ({ \ + v8i16 tmp0, tmp1; \ \ - VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ - DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ - DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ - DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m); \ - VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ - DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m); \ - ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1); \ -} + tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ + tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ + tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \ + tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3); \ + tmp0 = __msa_adds_s_h(tmp0, tmp1); \ + \ + tmp0; \ + }) -#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ - mask0, mask1, mask2, mask3, \ - filt0, filt1, filt2, filt3, \ - out0, out1, out2, out3) { \ - v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m; \ - \ - VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ - res0_m, res1_m, res2_m, res3_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ - DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2, \ - res4_m, res5_m, res6_m, res7_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ - DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1, \ - res0_m, res1_m, res2_m, res3_m); \ - VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m); \ - VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \ - DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3, \ - res4_m, res5_m, res6_m, res7_m); \ - ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m, \ - res7_m, out0, out1, out2, out3); \ -} +#define HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_h0, \ + filt_h1, filt_h2, filt_h3) \ + ({ \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ + v8i16 hz_out_m; \ + \ + VSHF_B4_SB(src0, src1, mask0, mask1, mask2, mask3, vec0_m, vec1_m, vec2_m, \ + vec3_m); \ + hz_out_m = FILT_8TAP_DPADD_S_H(vec0_m, vec1_m, vec2_m, vec3_m, filt_h0, \ + filt_h1, filt_h2, filt_h3); \ + \ + hz_out_m = __msa_srari_h(hz_out_m, FILTER_BITS); \ + hz_out_m = __msa_sat_s_h(hz_out_m, 7); \ + \ + hz_out_m; \ + }) -#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) { \ - v16u8 tmp_m; \ - \ - tmp_m = PCKEV_XORI128_UB(in1, in0); \ - tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \ - ST_UB(tmp_m, (pdst)); \ -} +#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, mask3, filt0, filt1, filt2, filt3, \ + out0, out1) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + v8i16 res0_m, res1_m, res2_m, res3_m; \ + \ + VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ + DOTP_SB2_SH(vec0_m, vec1_m, filt0, filt0, res0_m, res1_m); \ + VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ + DPADD_SB2_SH(vec2_m, vec3_m, filt1, filt1, res0_m, res1_m); \ + VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ + DOTP_SB2_SH(vec4_m, vec5_m, filt2, filt2, res2_m, res3_m); \ + VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ + DPADD_SB2_SH(vec6_m, vec7_m, filt3, filt3, res2_m, res3_m); \ + ADDS_SH2_SH(res0_m, res2_m, res1_m, res3_m, out0, out1); \ + } -#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst) { \ - v16u8 tmp_m; \ - \ - tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ - tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \ - ST_UB(tmp_m, (pdst)); \ -} +#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, \ + mask2, mask3, filt0, filt1, filt2, filt3, \ + out0, out1, out2, out3) \ + { \ + v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ + v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m; \ + \ + VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt0, filt0, filt0, filt0, \ + res0_m, res1_m, res2_m, res3_m); \ + VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0_m, vec1_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ + DOTP_SB4_SH(vec0_m, vec1_m, vec2_m, vec3_m, filt2, filt2, filt2, filt2, \ + res4_m, res5_m, res6_m, res7_m); \ + VSHF_B2_SB(src0, src0, src1, src1, mask1, mask1, vec4_m, vec5_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ + DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt1, filt1, filt1, filt1, \ + res0_m, res1_m, res2_m, res3_m); \ + VSHF_B2_SB(src0, src0, src1, src1, mask3, mask3, vec4_m, vec5_m); \ + VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \ + DPADD_SB4_SH(vec4_m, vec5_m, vec6_m, vec7_m, filt3, filt3, filt3, filt3, \ + res4_m, res5_m, res6_m, res7_m); \ + ADDS_SH4_SH(res0_m, res4_m, res1_m, res5_m, res2_m, res6_m, res3_m, \ + res7_m, out0, out1, out2, out3); \ + } -#define PCKEV_AVG_ST8x4_UB(in1, dst0, in2, dst1, in3, dst2, in4, dst3, \ - pdst, stride) { \ - v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - uint8_t *pdst_m = (uint8_t *)(pdst); \ - \ - PCKEV_B2_UB(in2, in1, in4, in3, tmp0_m, tmp1_m); \ - PCKEV_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m); \ - AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m); \ - ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \ -} -#endif /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */ +#define PCKEV_XORI128_AVG_ST_UB(in0, in1, dst, pdst) \ + { \ + v16u8 tmp_m; \ + \ + tmp_m = PCKEV_XORI128_UB(in1, in0); \ + tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \ + ST_UB(tmp_m, (pdst)); \ + } + +#define PCKEV_AVG_ST_UB(in0, in1, dst, pdst) \ + { \ + v16u8 tmp_m; \ + \ + tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ + tmp_m = __msa_aver_u_b(tmp_m, (v16u8)dst); \ + ST_UB(tmp_m, (pdst)); \ + } + +#define PCKEV_AVG_ST8x4_UB(in1, dst0, in2, dst1, in3, dst2, in4, dst3, pdst, \ + stride) \ + { \ + v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ + uint8_t *pdst_m = (uint8_t *)(pdst); \ + \ + PCKEV_B2_UB(in2, in1, in4, in3, tmp0_m, tmp1_m); \ + PCKEV_D2_UB(dst1, dst0, dst3, dst2, tmp2_m, tmp3_m); \ + AVER_UB2_UB(tmp0_m, tmp2_m, tmp1_m, tmp3_m, tmp0_m, tmp1_m); \ + ST8x4_UB(tmp0_m, tmp1_m, pdst_m, stride); \ + } +#endif /* VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ */ diff --git a/libs/libvpx/vp10/decoder/dsubexp.h b/libs/libvpx/vpx_dsp/postproc.h similarity index 59% rename from libs/libvpx/vp10/decoder/dsubexp.h rename to libs/libvpx/vpx_dsp/postproc.h index 1a7ed99104..43cb5c8e8d 100644 --- a/libs/libvpx/vp10/decoder/dsubexp.h +++ b/libs/libvpx/vpx_dsp/postproc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 The WebM project authors. All Rights Reserved. + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source @@ -8,20 +8,18 @@ * be found in the AUTHORS file in the root of the source tree. */ - -#ifndef VP10_DECODER_DSUBEXP_H_ -#define VP10_DECODER_DSUBEXP_H_ - -#include "vpx_dsp/bitreader.h" +#ifndef VPX_DSP_POSTPROC_H_ +#define VPX_DSP_POSTPROC_H_ #ifdef __cplusplus extern "C" { #endif -void vp10_diff_update_prob(vpx_reader *r, vpx_prob* p); +// Fills a noise buffer with gaussian noise strength determined by sigma. +int vpx_setup_noise(double sigma, int8_t *noise, int size); #ifdef __cplusplus -} // extern "C" +} #endif -#endif // VP10_DECODER_DSUBEXP_H_ +#endif // VPX_DSP_POSTPROC_H_ diff --git a/libs/libvpx/vpx_dsp/prob.c b/libs/libvpx/vpx_dsp/prob.c index 639d24dd2f..819e95062e 100644 --- a/libs/libvpx/vpx_dsp/prob.c +++ b/libs/libvpx/vpx_dsp/prob.c @@ -11,22 +11,16 @@ #include "./prob.h" const uint8_t vpx_norm[256] = { - 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static unsigned int tree_merge_probs_impl(unsigned int i, @@ -35,13 +29,13 @@ static unsigned int tree_merge_probs_impl(unsigned int i, const unsigned int *counts, vpx_prob *probs) { const int l = tree[i]; - const unsigned int left_count = (l <= 0) - ? counts[-l] - : tree_merge_probs_impl(l, tree, pre_probs, counts, probs); + const unsigned int left_count = + (l <= 0) ? counts[-l] + : tree_merge_probs_impl(l, tree, pre_probs, counts, probs); const int r = tree[i + 1]; - const unsigned int right_count = (r <= 0) - ? counts[-r] - : tree_merge_probs_impl(r, tree, pre_probs, counts, probs); + const unsigned int right_count = + (r <= 0) ? counts[-r] + : tree_merge_probs_impl(r, tree, pre_probs, counts, probs); const unsigned int ct[2] = { left_count, right_count }; probs[i >> 1] = mode_mv_merge_probs(pre_probs[i >> 1], ct); return left_count + right_count; diff --git a/libs/libvpx/vpx_dsp/prob.h b/libs/libvpx/vpx_dsp/prob.h index c3cb103ffb..3127a00bb9 100644 --- a/libs/libvpx/vpx_dsp/prob.h +++ b/libs/libvpx/vpx_dsp/prob.h @@ -24,11 +24,11 @@ typedef uint8_t vpx_prob; #define MAX_PROB 255 -#define vpx_prob_half ((vpx_prob) 128) +#define vpx_prob_half ((vpx_prob)128) typedef int8_t vpx_tree_index; -#define TREE_SIZE(leaf_count) (2 * (leaf_count) - 2) +#define TREE_SIZE(leaf_count) (2 * (leaf_count)-2) #define vpx_complement(x) (255 - x) @@ -47,11 +47,12 @@ static INLINE vpx_prob clip_prob(int p) { return (p > 255) ? 255 : (p < 1) ? 1 : p; } -static INLINE vpx_prob get_prob(int num, int den) { - return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den); +static INLINE vpx_prob get_prob(unsigned int num, unsigned int den) { + if (den == 0) return 128u; + return clip_prob((int)(((int64_t)num * 256 + (den >> 1)) / den)); } -static INLINE vpx_prob get_binary_prob(int n0, int n1) { +static INLINE vpx_prob get_binary_prob(unsigned int n0, unsigned int n1) { return get_prob(n0, n0 + n1); } @@ -60,8 +61,7 @@ static INLINE vpx_prob weighted_prob(int prob1, int prob2, int factor) { return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8); } -static INLINE vpx_prob merge_probs(vpx_prob pre_prob, - const unsigned int ct[2], +static INLINE vpx_prob merge_probs(vpx_prob pre_prob, const unsigned int ct[2], unsigned int count_sat, unsigned int max_update_factor) { const vpx_prob prob = get_binary_prob(ct[0], ct[1]); @@ -72,7 +72,7 @@ static INLINE vpx_prob merge_probs(vpx_prob pre_prob, // MODE_MV_MAX_UPDATE_FACTOR (128) * count / MODE_MV_COUNT_SAT; static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = { - 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64, + 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64, 70, 76, 83, 89, 96, 102, 108, 115, 121, 128 }; @@ -84,8 +84,7 @@ static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob, } else { const unsigned int count = VPXMIN(den, MODE_MV_COUNT_SAT); const unsigned int factor = count_to_update_factor[count]; - const vpx_prob prob = - clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den); + const vpx_prob prob = get_prob(ct[0], den); return weighted_prob(pre_prob, prob, factor); } } @@ -93,7 +92,6 @@ static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob, void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs, const unsigned int *counts, vpx_prob *probs); - DECLARE_ALIGNED(16, extern const uint8_t, vpx_norm[256]); #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/psnr.c b/libs/libvpx/vpx_dsp/psnr.c new file mode 100644 index 0000000000..47afd4388a --- /dev/null +++ b/libs/libvpx/vpx_dsp/psnr.c @@ -0,0 +1,281 @@ +/* +* Copyright (c) 2016 The WebM project authors. All Rights Reserved. +* +* Use of this source code is governed by a BSD-style license +* that can be found in the LICENSE file in the root of the source +* tree. An additional intellectual property rights grant can be found +* in the file PATENTS. All contributing project authors may +* be found in the AUTHORS file in the root of the source tree. +*/ + +#include +#include +#include "./vpx_dsp_rtcd.h" +#include "vpx_dsp/psnr.h" +#include "vpx_scale/yv12config.h" + +double vpx_sse_to_psnr(double samples, double peak, double sse) { + if (sse > 0.0) { + const double psnr = 10.0 * log10(samples * peak * peak / sse); + return psnr > MAX_PSNR ? MAX_PSNR : psnr; + } else { + return MAX_PSNR; + } +} + +/* TODO(yaowu): The block_variance calls the unoptimized versions of variance() +* and highbd_8_variance(). It should not. +*/ +static void encoder_variance(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int w, int h, unsigned int *sse, + int *sum) { + int i, j; + + *sum = 0; + *sse = 0; + + for (i = 0; i < h; i++) { + for (j = 0; j < w; j++) { + const int diff = a[j] - b[j]; + *sum += diff; + *sse += diff * diff; + } + + a += a_stride; + b += b_stride; + } +} + +#if CONFIG_VP9_HIGHBITDEPTH +static void encoder_highbd_variance64(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, + int h, uint64_t *sse, int64_t *sum) { + int i, j; + + uint16_t *a = CONVERT_TO_SHORTPTR(a8); + uint16_t *b = CONVERT_TO_SHORTPTR(b8); + *sum = 0; + *sse = 0; + + for (i = 0; i < h; i++) { + for (j = 0; j < w; j++) { + const int diff = a[j] - b[j]; + *sum += diff; + *sse += diff * diff; + } + a += a_stride; + b += b_stride; + } +} + +static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, + int h, unsigned int *sse, int *sum) { + uint64_t sse_long = 0; + int64_t sum_long = 0; + encoder_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, + &sum_long); + *sse = (unsigned int)sse_long; + *sum = (int)sum_long; +} +#endif // CONFIG_VP9_HIGHBITDEPTH + +static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int width, int height) { + const int dw = width % 16; + const int dh = height % 16; + int64_t total_sse = 0; + unsigned int sse = 0; + int sum = 0; + int x, y; + + if (dw > 0) { + encoder_variance(&a[width - dw], a_stride, &b[width - dw], b_stride, dw, + height, &sse, &sum); + total_sse += sse; + } + + if (dh > 0) { + encoder_variance(&a[(height - dh) * a_stride], a_stride, + &b[(height - dh) * b_stride], b_stride, width - dw, dh, + &sse, &sum); + total_sse += sse; + } + + for (y = 0; y < height / 16; ++y) { + const uint8_t *pa = a; + const uint8_t *pb = b; + for (x = 0; x < width / 16; ++x) { + vpx_mse16x16(pa, a_stride, pb, b_stride, &sse); + total_sse += sse; + + pa += 16; + pb += 16; + } + + a += 16 * a_stride; + b += 16 * b_stride; + } + + return total_sse; +} + +#if CONFIG_VP9_HIGHBITDEPTH +static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int width, + int height, unsigned int input_shift) { + const uint16_t *a = CONVERT_TO_SHORTPTR(a8); + const uint16_t *b = CONVERT_TO_SHORTPTR(b8); + int64_t total_sse = 0; + int x, y; + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + int64_t diff; + diff = (a[x] >> input_shift) - (b[x] >> input_shift); + total_sse += diff * diff; + } + a += a_stride; + b += b_stride; + } + return total_sse; +} + +static int64_t highbd_get_sse(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int width, int height) { + int64_t total_sse = 0; + int x, y; + const int dw = width % 16; + const int dh = height % 16; + unsigned int sse = 0; + int sum = 0; + if (dw > 0) { + encoder_highbd_8_variance(&a[width - dw], a_stride, &b[width - dw], + b_stride, dw, height, &sse, &sum); + total_sse += sse; + } + if (dh > 0) { + encoder_highbd_8_variance(&a[(height - dh) * a_stride], a_stride, + &b[(height - dh) * b_stride], b_stride, + width - dw, dh, &sse, &sum); + total_sse += sse; + } + for (y = 0; y < height / 16; ++y) { + const uint8_t *pa = a; + const uint8_t *pb = b; + for (x = 0; x < width / 16; ++x) { + vpx_highbd_8_mse16x16(pa, a_stride, pb, b_stride, &sse); + total_sse += sse; + pa += 16; + pb += 16; + } + a += 16 * a_stride; + b += 16 * b_stride; + } + return total_sse; +} +#endif // CONFIG_VP9_HIGHBITDEPTH + +int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b) { + assert(a->y_crop_width == b->y_crop_width); + assert(a->y_crop_height == b->y_crop_height); + + return get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, + a->y_crop_width, a->y_crop_height); +} + +#if CONFIG_VP9_HIGHBITDEPTH +int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b) { + assert(a->y_crop_width == b->y_crop_width); + assert(a->y_crop_height == b->y_crop_height); + assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0); + assert((b->flags & YV12_FLAG_HIGHBITDEPTH) != 0); + + return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride, + a->y_crop_width, a->y_crop_height); +} +#endif // CONFIG_VP9_HIGHBITDEPTH + +#if CONFIG_VP9_HIGHBITDEPTH +void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr, + uint32_t bit_depth, uint32_t in_bit_depth) { + const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width }; + const int heights[3] = { a->y_crop_height, a->uv_crop_height, + a->uv_crop_height }; + const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer }; + const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride }; + const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer }; + const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride }; + int i; + uint64_t total_sse = 0; + uint32_t total_samples = 0; + const double peak = (double)((1 << in_bit_depth) - 1); + const unsigned int input_shift = bit_depth - in_bit_depth; + + for (i = 0; i < 3; ++i) { + const int w = widths[i]; + const int h = heights[i]; + const uint32_t samples = w * h; + uint64_t sse; + if (a->flags & YV12_FLAG_HIGHBITDEPTH) { + if (input_shift) { + sse = highbd_get_sse_shift(a_planes[i], a_strides[i], b_planes[i], + b_strides[i], w, h, input_shift); + } else { + sse = highbd_get_sse(a_planes[i], a_strides[i], b_planes[i], + b_strides[i], w, h); + } + } else { + sse = get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h); + } + psnr->sse[1 + i] = sse; + psnr->samples[1 + i] = samples; + psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); + + total_sse += sse; + total_samples += samples; + } + + psnr->sse[0] = total_sse; + psnr->samples[0] = total_samples; + psnr->psnr[0] = + vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse); +} + +#endif // !CONFIG_VP9_HIGHBITDEPTH + +void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, + PSNR_STATS *psnr) { + static const double peak = 255.0; + const int widths[3] = { a->y_crop_width, a->uv_crop_width, a->uv_crop_width }; + const int heights[3] = { a->y_crop_height, a->uv_crop_height, + a->uv_crop_height }; + const uint8_t *a_planes[3] = { a->y_buffer, a->u_buffer, a->v_buffer }; + const int a_strides[3] = { a->y_stride, a->uv_stride, a->uv_stride }; + const uint8_t *b_planes[3] = { b->y_buffer, b->u_buffer, b->v_buffer }; + const int b_strides[3] = { b->y_stride, b->uv_stride, b->uv_stride }; + int i; + uint64_t total_sse = 0; + uint32_t total_samples = 0; + + for (i = 0; i < 3; ++i) { + const int w = widths[i]; + const int h = heights[i]; + const uint32_t samples = w * h; + const uint64_t sse = + get_sse(a_planes[i], a_strides[i], b_planes[i], b_strides[i], w, h); + psnr->sse[1 + i] = sse; + psnr->samples[1 + i] = samples; + psnr->psnr[1 + i] = vpx_sse_to_psnr(samples, peak, (double)sse); + + total_sse += sse; + total_samples += samples; + } + + psnr->sse[0] = total_sse; + psnr->samples[0] = total_samples; + psnr->psnr[0] = + vpx_sse_to_psnr((double)total_samples, peak, (double)total_sse); +} diff --git a/libs/libvpx/vpx_dsp/psnr.h b/libs/libvpx/vpx_dsp/psnr.h new file mode 100644 index 0000000000..f321131d0b --- /dev/null +++ b/libs/libvpx/vpx_dsp/psnr.h @@ -0,0 +1,57 @@ +/* +* Copyright (c) 2016 The WebM project authors. All Rights Reserved. +* +* Use of this source code is governed by a BSD-style license +* that can be found in the LICENSE file in the root of the source +* tree. An additional intellectual property rights grant can be found +* in the file PATENTS. All contributing project authors may +* be found in the AUTHORS file in the root of the source tree. +*/ + +#ifndef VPX_DSP_PSNR_H_ +#define VPX_DSP_PSNR_H_ + +#include "vpx_scale/yv12config.h" + +#define MAX_PSNR 100.0 + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + double psnr[4]; // total/y/u/v + uint64_t sse[4]; // total/y/u/v + uint32_t samples[4]; // total/y/u/v +} PSNR_STATS; + +// TODO(dkovalev) change vpx_sse_to_psnr signature: double -> int64_t + +/*!\brief Converts SSE to PSNR +* +* Converts sum of squared errros (SSE) to peak signal-to-noise ratio (PNSR). +* +* \param[in] samples Number of samples +* \param[in] peak Max sample value +* \param[in] sse Sum of squared errors +*/ +double vpx_sse_to_psnr(double samples, double peak, double sse); +int64_t vpx_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b); +#if CONFIG_VP9_HIGHBITDEPTH +int64_t vpx_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b); +void vpx_calc_highbd_psnr(const YV12_BUFFER_CONFIG *a, + const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr, + unsigned int bit_depth, unsigned int in_bit_depth); +#endif +void vpx_calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b, + PSNR_STATS *psnr); + +double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source, + const YV12_BUFFER_CONFIG *dest, double *phvs_y, + double *phvs_u, double *phvs_v, uint32_t bd, uint32_t in_bd); + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // VPX_DSP_PSNR_H_ diff --git a/libs/libvpx/vpx_dsp/psnrhvs.c b/libs/libvpx/vpx_dsp/psnrhvs.c index 3001705791..b3910152c4 100644 --- a/libs/libvpx/vpx_dsp/psnrhvs.c +++ b/libs/libvpx/vpx_dsp/psnrhvs.c @@ -10,6 +10,7 @@ * This code was originally written by: Gregory Maxwell, at the Daala * project. */ +#include #include #include #include @@ -18,90 +19,122 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/ssim.h" #include "vpx_ports/system_state.h" +#include "vpx_dsp/psnr.h" #if !defined(M_PI) -# define M_PI (3.141592653589793238462643) +#define M_PI (3.141592653589793238462643) #endif #include static void od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x, int xstride) { - (void) xstride; + int i, j; + (void)xstride; vpx_fdct8x8(x, y, ystride); + for (i = 0; i < 8; i++) + for (j = 0; j < 8; j++) + *(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3; } +#if CONFIG_VP9_HIGHBITDEPTH +static void hbd_od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x, + int xstride) { + int i, j; + (void)xstride; + vpx_highbd_fdct8x8(x, y, ystride); + for (i = 0; i < 8; i++) + for (j = 0; j < 8; j++) + *(y + ystride * i + j) = (*(y + ystride * i + j) + 4) >> 3; +} +#endif /* Normalized inverse quantization matrix for 8x8 DCT at the point of * transparency. This is not the JPEG based matrix from the paper, this one gives a slightly higher MOS agreement.*/ -static const float csf_y[8][8] = { - {1.6193873005, 2.2901594831, 2.08509755623, 1.48366094411, 1.00227514334, - 0.678296995242, 0.466224900598, 0.3265091542}, - {2.2901594831, 1.94321815382, 2.04793073064, 1.68731108984, 1.2305666963, - 0.868920337363, 0.61280991668, 0.436405793551}, - {2.08509755623, 2.04793073064, 1.34329019223, 1.09205635862, 0.875748795257, - 0.670882927016, 0.501731932449, 0.372504254596}, - {1.48366094411, 1.68731108984, 1.09205635862, 0.772819797575, - 0.605636379554, 0.48309405692, 0.380429446972, 0.295774038565}, - {1.00227514334, 1.2305666963, 0.875748795257, 0.605636379554, - 0.448996256676, 0.352889268808, 0.283006984131, 0.226951348204}, - {0.678296995242, 0.868920337363, 0.670882927016, 0.48309405692, - 0.352889268808, 0.27032073436, 0.215017739696, 0.17408067321}, - {0.466224900598, 0.61280991668, 0.501731932449, 0.380429446972, - 0.283006984131, 0.215017739696, 0.168869545842, 0.136153931001}, - {0.3265091542, 0.436405793551, 0.372504254596, 0.295774038565, - 0.226951348204, 0.17408067321, 0.136153931001, 0.109083846276}}; -static const float csf_cb420[8][8] = { - {1.91113096927, 2.46074210438, 1.18284184739, 1.14982565193, 1.05017074788, - 0.898018824055, 0.74725392039, 0.615105596242}, - {2.46074210438, 1.58529308355, 1.21363250036, 1.38190029285, 1.33100189972, - 1.17428548929, 0.996404342439, 0.830890433625}, - {1.18284184739, 1.21363250036, 0.978712413627, 1.02624506078, 1.03145147362, - 0.960060382087, 0.849823426169, 0.731221236837}, - {1.14982565193, 1.38190029285, 1.02624506078, 0.861317501629, - 0.801821139099, 0.751437590932, 0.685398513368, 0.608694761374}, - {1.05017074788, 1.33100189972, 1.03145147362, 0.801821139099, - 0.676555426187, 0.605503172737, 0.55002013668, 0.495804539034}, - {0.898018824055, 1.17428548929, 0.960060382087, 0.751437590932, - 0.605503172737, 0.514674450957, 0.454353482512, 0.407050308965}, - {0.74725392039, 0.996404342439, 0.849823426169, 0.685398513368, - 0.55002013668, 0.454353482512, 0.389234902883, 0.342353999733}, - {0.615105596242, 0.830890433625, 0.731221236837, 0.608694761374, - 0.495804539034, 0.407050308965, 0.342353999733, 0.295530605237}}; -static const float csf_cr420[8][8] = { - {2.03871978502, 2.62502345193, 1.26180942886, 1.11019789803, 1.01397751469, - 0.867069376285, 0.721500455585, 0.593906509971}, - {2.62502345193, 1.69112867013, 1.17180569821, 1.3342742857, 1.28513006198, - 1.13381474809, 0.962064122248, 0.802254508198}, - {1.26180942886, 1.17180569821, 0.944981930573, 0.990876405848, - 0.995903384143, 0.926972725286, 0.820534991409, 0.706020324706}, - {1.11019789803, 1.3342742857, 0.990876405848, 0.831632933426, 0.77418706195, - 0.725539939514, 0.661776842059, 0.587716619023}, - {1.01397751469, 1.28513006198, 0.995903384143, 0.77418706195, - 0.653238524286, 0.584635025748, 0.531064164893, 0.478717061273}, - {0.867069376285, 1.13381474809, 0.926972725286, 0.725539939514, - 0.584635025748, 0.496936637883, 0.438694579826, 0.393021669543}, - {0.721500455585, 0.962064122248, 0.820534991409, 0.661776842059, - 0.531064164893, 0.438694579826, 0.375820256136, 0.330555063063}, - {0.593906509971, 0.802254508198, 0.706020324706, 0.587716619023, - 0.478717061273, 0.393021669543, 0.330555063063, 0.285345396658}}; +static const double csf_y[8][8] = { + { 1.6193873005, 2.2901594831, 2.08509755623, 1.48366094411, 1.00227514334, + 0.678296995242, 0.466224900598, 0.3265091542 }, + { 2.2901594831, 1.94321815382, 2.04793073064, 1.68731108984, 1.2305666963, + 0.868920337363, 0.61280991668, 0.436405793551 }, + { 2.08509755623, 2.04793073064, 1.34329019223, 1.09205635862, 0.875748795257, + 0.670882927016, 0.501731932449, 0.372504254596 }, + { 1.48366094411, 1.68731108984, 1.09205635862, 0.772819797575, 0.605636379554, + 0.48309405692, 0.380429446972, 0.295774038565 }, + { 1.00227514334, 1.2305666963, 0.875748795257, 0.605636379554, 0.448996256676, + 0.352889268808, 0.283006984131, 0.226951348204 }, + { 0.678296995242, 0.868920337363, 0.670882927016, 0.48309405692, + 0.352889268808, 0.27032073436, 0.215017739696, 0.17408067321 }, + { 0.466224900598, 0.61280991668, 0.501731932449, 0.380429446972, + 0.283006984131, 0.215017739696, 0.168869545842, 0.136153931001 }, + { 0.3265091542, 0.436405793551, 0.372504254596, 0.295774038565, + 0.226951348204, 0.17408067321, 0.136153931001, 0.109083846276 } +}; +static const double csf_cb420[8][8] = { + { 1.91113096927, 2.46074210438, 1.18284184739, 1.14982565193, 1.05017074788, + 0.898018824055, 0.74725392039, 0.615105596242 }, + { 2.46074210438, 1.58529308355, 1.21363250036, 1.38190029285, 1.33100189972, + 1.17428548929, 0.996404342439, 0.830890433625 }, + { 1.18284184739, 1.21363250036, 0.978712413627, 1.02624506078, 1.03145147362, + 0.960060382087, 0.849823426169, 0.731221236837 }, + { 1.14982565193, 1.38190029285, 1.02624506078, 0.861317501629, 0.801821139099, + 0.751437590932, 0.685398513368, 0.608694761374 }, + { 1.05017074788, 1.33100189972, 1.03145147362, 0.801821139099, 0.676555426187, + 0.605503172737, 0.55002013668, 0.495804539034 }, + { 0.898018824055, 1.17428548929, 0.960060382087, 0.751437590932, + 0.605503172737, 0.514674450957, 0.454353482512, 0.407050308965 }, + { 0.74725392039, 0.996404342439, 0.849823426169, 0.685398513368, + 0.55002013668, 0.454353482512, 0.389234902883, 0.342353999733 }, + { 0.615105596242, 0.830890433625, 0.731221236837, 0.608694761374, + 0.495804539034, 0.407050308965, 0.342353999733, 0.295530605237 } +}; +static const double csf_cr420[8][8] = { + { 2.03871978502, 2.62502345193, 1.26180942886, 1.11019789803, 1.01397751469, + 0.867069376285, 0.721500455585, 0.593906509971 }, + { 2.62502345193, 1.69112867013, 1.17180569821, 1.3342742857, 1.28513006198, + 1.13381474809, 0.962064122248, 0.802254508198 }, + { 1.26180942886, 1.17180569821, 0.944981930573, 0.990876405848, + 0.995903384143, 0.926972725286, 0.820534991409, 0.706020324706 }, + { 1.11019789803, 1.3342742857, 0.990876405848, 0.831632933426, 0.77418706195, + 0.725539939514, 0.661776842059, 0.587716619023 }, + { 1.01397751469, 1.28513006198, 0.995903384143, 0.77418706195, 0.653238524286, + 0.584635025748, 0.531064164893, 0.478717061273 }, + { 0.867069376285, 1.13381474809, 0.926972725286, 0.725539939514, + 0.584635025748, 0.496936637883, 0.438694579826, 0.393021669543 }, + { 0.721500455585, 0.962064122248, 0.820534991409, 0.661776842059, + 0.531064164893, 0.438694579826, 0.375820256136, 0.330555063063 }, + { 0.593906509971, 0.802254508198, 0.706020324706, 0.587716619023, + 0.478717061273, 0.393021669543, 0.330555063063, 0.285345396658 } +}; -static double convert_score_db(double _score, double _weight) { - return 10 * (log10(255 * 255) - log10(_weight * _score)); +static double convert_score_db(double _score, double _weight, int bit_depth) { + int16_t pix_max = 255; + assert(_score * _weight >= 0.0); + if (bit_depth == 10) + pix_max = 1023; + else if (bit_depth == 12) + pix_max = 4095; + + if (_weight * _score < pix_max * pix_max * 1e-10) return MAX_PSNR; + return 10 * (log10(pix_max * pix_max) - log10(_weight * _score)); } -static double calc_psnrhvs(const unsigned char *_src, int _systride, - const unsigned char *_dst, int _dystride, - double _par, int _w, int _h, int _step, - const float _csf[8][8]) { - float ret; +static double calc_psnrhvs(const unsigned char *src, int _systride, + const unsigned char *dst, int _dystride, double _par, + int _w, int _h, int _step, const double _csf[8][8], + uint32_t bit_depth, uint32_t _shift) { + double ret; + const uint8_t *_src8 = src; + const uint8_t *_dst8 = dst; + const uint16_t *_src16 = CONVERT_TO_SHORTPTR(src); + const uint16_t *_dst16 = CONVERT_TO_SHORTPTR(dst); int16_t dct_s[8 * 8], dct_d[8 * 8]; tran_low_t dct_s_coef[8 * 8], dct_d_coef[8 * 8]; - float mask[8][8]; + double mask[8][8]; int pixels; int x; int y; - (void) _par; + (void)_par; ret = pixels = 0; + /*In the PSNR-HVS-M paper[1] the authors describe the construction of their masking table as "we have used the quantization table for the color component Y of JPEG [6] that has been also obtained on the @@ -120,29 +153,34 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride, Electronics VPQM-07, Scottsdale, Arizona, USA, 25-26 January, 2007, 4 p.*/ for (x = 0; x < 8; x++) for (y = 0; y < 8; y++) - mask[x][y] = (_csf[x][y] * 0.3885746225901003) - * (_csf[x][y] * 0.3885746225901003); + mask[x][y] = + (_csf[x][y] * 0.3885746225901003) * (_csf[x][y] * 0.3885746225901003); for (y = 0; y < _h - 7; y += _step) { for (x = 0; x < _w - 7; x += _step) { int i; int j; - float s_means[4]; - float d_means[4]; - float s_vars[4]; - float d_vars[4]; - float s_gmean = 0; - float d_gmean = 0; - float s_gvar = 0; - float d_gvar = 0; - float s_mask = 0; - float d_mask = 0; + double s_means[4]; + double d_means[4]; + double s_vars[4]; + double d_vars[4]; + double s_gmean = 0; + double d_gmean = 0; + double s_gvar = 0; + double d_gvar = 0; + double s_mask = 0; + double d_mask = 0; for (i = 0; i < 4; i++) s_means[i] = d_means[i] = s_vars[i] = d_vars[i] = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { int sub = ((i & 12) >> 2) + ((j & 12) >> 1); - dct_s[i * 8 + j] = _src[(y + i) * _systride + (j + x)]; - dct_d[i * 8 + j] = _dst[(y + i) * _dystride + (j + x)]; + if (bit_depth == 8 && _shift == 0) { + dct_s[i * 8 + j] = _src8[(y + i) * _systride + (j + x)]; + dct_d[i * 8 + j] = _dst8[(y + i) * _dystride + (j + x)]; + } else if (bit_depth == 10 || bit_depth == 12) { + dct_s[i * 8 + j] = _src16[(y + i) * _systride + (j + x)] >> _shift; + dct_d[i * 8 + j] = _dst16[(y + i) * _dystride + (j + x)] >> _shift; + } s_gmean += dct_s[i * 8 + j]; d_gmean += dct_d[i * 8 + j]; s_means[sub] += dct_s[i * 8 + j]; @@ -151,33 +189,37 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride, } s_gmean /= 64.f; d_gmean /= 64.f; - for (i = 0; i < 4; i++) - s_means[i] /= 16.f; - for (i = 0; i < 4; i++) - d_means[i] /= 16.f; + for (i = 0; i < 4; i++) s_means[i] /= 16.f; + for (i = 0; i < 4; i++) d_means[i] /= 16.f; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { int sub = ((i & 12) >> 2) + ((j & 12) >> 1); s_gvar += (dct_s[i * 8 + j] - s_gmean) * (dct_s[i * 8 + j] - s_gmean); d_gvar += (dct_d[i * 8 + j] - d_gmean) * (dct_d[i * 8 + j] - d_gmean); - s_vars[sub] += (dct_s[i * 8 + j] - s_means[sub]) - * (dct_s[i * 8 + j] - s_means[sub]); - d_vars[sub] += (dct_d[i * 8 + j] - d_means[sub]) - * (dct_d[i * 8 + j] - d_means[sub]); + s_vars[sub] += (dct_s[i * 8 + j] - s_means[sub]) * + (dct_s[i * 8 + j] - s_means[sub]); + d_vars[sub] += (dct_d[i * 8 + j] - d_means[sub]) * + (dct_d[i * 8 + j] - d_means[sub]); } } s_gvar *= 1 / 63.f * 64; d_gvar *= 1 / 63.f * 64; - for (i = 0; i < 4; i++) - s_vars[i] *= 1 / 15.f * 16; - for (i = 0; i < 4; i++) - d_vars[i] *= 1 / 15.f * 16; + for (i = 0; i < 4; i++) s_vars[i] *= 1 / 15.f * 16; + for (i = 0; i < 4; i++) d_vars[i] *= 1 / 15.f * 16; if (s_gvar > 0) s_gvar = (s_vars[0] + s_vars[1] + s_vars[2] + s_vars[3]) / s_gvar; if (d_gvar > 0) d_gvar = (d_vars[0] + d_vars[1] + d_vars[2] + d_vars[3]) / d_gvar; - od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8); - od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8); +#if CONFIG_VP9_HIGHBITDEPTH + if (bit_depth == 10 || bit_depth == 12) { + hbd_od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8); + hbd_od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8); + } +#endif + if (bit_depth == 8) { + od_bin_fdct8x8(dct_s_coef, 8, dct_s, 8); + od_bin_fdct8x8(dct_d_coef, 8, dct_d, 8); + } for (i = 0; i < 8; i++) for (j = (i == 0); j < 8; j++) s_mask += dct_s_coef[i * 8 + j] * dct_s_coef[i * 8 + j] * mask[i][j]; @@ -186,12 +228,11 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride, d_mask += dct_d_coef[i * 8 + j] * dct_d_coef[i * 8 + j] * mask[i][j]; s_mask = sqrt(s_mask * s_gvar) / 32.f; d_mask = sqrt(d_mask * d_gvar) / 32.f; - if (d_mask > s_mask) - s_mask = d_mask; + if (d_mask > s_mask) s_mask = d_mask; for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++) { - float err; - err = fabs((float)(dct_s_coef[i * 8 + j] - dct_d_coef[i * 8 + j])); + double err; + err = fabs((double)(dct_s_coef[i * 8 + j] - dct_d_coef[i * 8 + j])); if (i != 0 || j != 0) err = err < s_mask / mask[i][j] ? 0 : err - s_mask / mask[i][j]; ret += (err * _csf[i][j]) * (err * _csf[i][j]); @@ -200,28 +241,35 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride, } } } + if (pixels <= 0) return 0; ret /= pixels; return ret; } -double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source, + +double vpx_psnrhvs(const YV12_BUFFER_CONFIG *src, const YV12_BUFFER_CONFIG *dest, double *y_psnrhvs, - double *u_psnrhvs, double *v_psnrhvs) { + double *u_psnrhvs, double *v_psnrhvs, uint32_t bd, + uint32_t in_bd) { double psnrhvs; const double par = 1.0; const int step = 7; + uint32_t bd_shift = 0; vpx_clear_system_state(); - *y_psnrhvs = calc_psnrhvs(source->y_buffer, source->y_stride, dest->y_buffer, - dest->y_stride, par, source->y_crop_width, - source->y_crop_height, step, csf_y); - *u_psnrhvs = calc_psnrhvs(source->u_buffer, source->uv_stride, dest->u_buffer, - dest->uv_stride, par, source->uv_crop_width, - source->uv_crop_height, step, csf_cb420); + assert(bd == 8 || bd == 10 || bd == 12); + assert(bd >= in_bd); - *v_psnrhvs = calc_psnrhvs(source->v_buffer, source->uv_stride, dest->v_buffer, - dest->uv_stride, par, source->uv_crop_width, - source->uv_crop_height, step, csf_cr420); + bd_shift = bd - in_bd; + + *y_psnrhvs = calc_psnrhvs(src->y_buffer, src->y_stride, dest->y_buffer, + dest->y_stride, par, src->y_crop_width, + src->y_crop_height, step, csf_y, bd, bd_shift); + *u_psnrhvs = calc_psnrhvs(src->u_buffer, src->uv_stride, dest->u_buffer, + dest->uv_stride, par, src->uv_crop_width, + src->uv_crop_height, step, csf_cb420, bd, bd_shift); + *v_psnrhvs = calc_psnrhvs(src->v_buffer, src->uv_stride, dest->v_buffer, + dest->uv_stride, par, src->uv_crop_width, + src->uv_crop_height, step, csf_cr420, bd, bd_shift); psnrhvs = (*y_psnrhvs) * .8 + .1 * ((*u_psnrhvs) + (*v_psnrhvs)); - - return convert_score_db(psnrhvs, 1.0); + return convert_score_db(psnrhvs, 1.0, in_bd); } diff --git a/libs/libvpx/vpx_dsp/quantize.c b/libs/libvpx/vpx_dsp/quantize.c index e4e741a908..3c7f9832f7 100644 --- a/libs/libvpx/vpx_dsp/quantize.c +++ b/libs/libvpx/vpx_dsp/quantize.c @@ -8,11 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/quantize.h" #include "vpx_mem/vpx_mem.h" -void vpx_quantize_dc(const tran_low_t *coeff_ptr, - int n_coeffs, int skip_block, +void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr) { @@ -28,20 +28,19 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, if (!skip_block) { tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX); tmp = (tmp * quant) >> 16; - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; + qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr; - if (tmp) - eob = 0; + if (tmp) eob = 0; } *eob_ptr = eob + 1; } #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, - int n_coeffs, int skip_block, - const int16_t *round_ptr, const int16_t quant, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t dequant_ptr, uint16_t *eob_ptr) { +void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, + int skip_block, const int16_t *round_ptr, + const int16_t quant, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, + uint16_t *eob_ptr) { int eob = -1; memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); @@ -52,11 +51,10 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; const int64_t tmp = abs_coeff + round_ptr[0]; - const uint32_t abs_qcoeff = (uint32_t)((tmp * quant) >> 16); + const int abs_qcoeff = (int)((tmp * quant) >> 16); qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr; - if (abs_qcoeff) - eob = 0; + if (abs_qcoeff) eob = 0; } *eob_ptr = eob + 1; } @@ -80,19 +78,16 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, tmp = clamp(abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1), INT16_MIN, INT16_MAX); tmp = (tmp * quant) >> 15; - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; + qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr / 2; - if (tmp) - eob = 0; + if (tmp) eob = 0; } *eob_ptr = eob + 1; } #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, - int skip_block, - const int16_t *round_ptr, - const int16_t quant, +void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, + const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, @@ -108,27 +103,25 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; const int64_t tmp = abs_coeff + ROUND_POWER_OF_TWO(round_ptr[0], 1); - const uint32_t abs_qcoeff = (uint32_t)((tmp * quant) >> 15); + const int abs_qcoeff = (int)((tmp * quant) >> 15); qcoeff_ptr[0] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[0] = qcoeff_ptr[0] * dequant_ptr / 2; - if (abs_qcoeff) - eob = 0; + if (abs_qcoeff) eob = 0; } *eob_ptr = eob + 1; } #endif void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan, + const int16_t *iscan) { int i, non_zero_count = (int)n_coeffs, eob = -1; - const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]}; - const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1}; + const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] }; + const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 }; (void)iscan; memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); @@ -157,12 +150,12 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, if (abs_coeff >= zbins[rc != 0]) { int tmp = clamp(abs_coeff + round_ptr[rc != 0], INT16_MIN, INT16_MAX); tmp = ((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) * - quant_shift_ptr[rc != 0]) >> 16; // quantization - qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; + quant_shift_ptr[rc != 0]) >> + 16; // quantization + qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (tmp) - eob = i; + if (tmp) eob = i; } } } @@ -175,12 +168,11 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, const int16_t *scan, - const int16_t *iscan) { + const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { int i, non_zero_count = (int)n_coeffs, eob = -1; - const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]}; - const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1}; + const int zbins[2] = { zbin_ptr[0], zbin_ptr[1] }; + const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 }; (void)iscan; memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr)); @@ -213,8 +205,7 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 16); qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0]; - if (abs_qcoeff) - eob = i; + if (abs_qcoeff) eob = i; } } } @@ -223,17 +214,15 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, #endif void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, const int16_t *round_ptr, - const int16_t *quant_ptr, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan) { - const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1), - ROUND_POWER_OF_TWO(zbin_ptr[1], 1)}; - const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1}; + const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], 1), + ROUND_POWER_OF_TWO(zbin_ptr[1], 1) }; + const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 }; int idx = 0; int idx_arr[1024]; @@ -266,33 +255,28 @@ void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, abs_coeff += ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); abs_coeff = clamp(abs_coeff, INT16_MIN, INT16_MAX); tmp = ((((abs_coeff * quant_ptr[rc != 0]) >> 16) + abs_coeff) * - quant_shift_ptr[rc != 0]) >> 15; + quant_shift_ptr[rc != 0]) >> + 15; qcoeff_ptr[rc] = (tmp ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; - if (tmp) - eob = idx_arr[i]; + if (tmp) eob = idx_arr[i]; } } *eob_ptr = eob + 1; } #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_quantize_b_32x32_c(const tran_low_t *coeff_ptr, - intptr_t n_coeffs, int skip_block, - const int16_t *zbin_ptr, - const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, const int16_t *iscan) { - const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1), - ROUND_POWER_OF_TWO(zbin_ptr[1], 1)}; - const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1}; +void vpx_highbd_quantize_b_32x32_c( + const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, + const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { + const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0], 1), + ROUND_POWER_OF_TWO(zbin_ptr[1], 1) }; + const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 }; int idx = 0; int idx_arr[1024]; @@ -321,15 +305,14 @@ void vpx_highbd_quantize_b_32x32_c(const tran_low_t *coeff_ptr, const int coeff = coeff_ptr[rc]; const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - const int64_t tmp1 = abs_coeff - + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); + const int64_t tmp1 = + abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1; const uint32_t abs_qcoeff = (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15); qcoeff_ptr[rc] = (tran_low_t)((abs_qcoeff ^ coeff_sign) - coeff_sign); dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; - if (abs_qcoeff) - eob = idx_arr[i]; + if (abs_qcoeff) eob = idx_arr[i]; } } *eob_ptr = eob + 1; diff --git a/libs/libvpx/vpx_dsp/quantize.h b/libs/libvpx/vpx_dsp/quantize.h index 89ec597924..e132845463 100644 --- a/libs/libvpx/vpx_dsp/quantize.h +++ b/libs/libvpx/vpx_dsp/quantize.h @@ -18,8 +18,7 @@ extern "C" { #endif -void vpx_quantize_dc(const tran_low_t *coeff_ptr, - int n_coeffs, int skip_block, +void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr); @@ -29,19 +28,17 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t dequant_ptr, uint16_t *eob_ptr); #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, - int n_coeffs, int skip_block, - const int16_t *round_ptr, const int16_t quant_ptr, - tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t dequant_ptr, uint16_t *eob_ptr); -void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, - int skip_block, +void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, + int skip_block, const int16_t *round_ptr, + const int16_t quant_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, + uint16_t *eob_ptr); +void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, - const int16_t dequant_ptr, - uint16_t *eob_ptr); + const int16_t dequant_ptr, uint16_t *eob_ptr); #endif #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/sad.c b/libs/libvpx/vpx_dsp/sad.c index c0c3ff9964..c80ef729bf 100644 --- a/libs/libvpx/vpx_dsp/sad.c +++ b/libs/libvpx/vpx_dsp/sad.c @@ -17,15 +17,13 @@ #include "vpx_ports/mem.h" /* Sum the difference between every corresponding element of the buffers. */ -static INLINE unsigned int sad(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int width, int height) { +static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int width, int height) { int y, x; unsigned int sad = 0; for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) - sad += abs(a[x] - b[x]); + for (x = 0; x < width; x++) sad += abs(a[x] - b[x]); a += a_stride; b += b_stride; @@ -33,81 +31,43 @@ static INLINE unsigned int sad(const uint8_t *a, int a_stride, return sad; } -// TODO(johannkoenig): this moved to vpx_dsp, should be able to clean this up. -/* Remove dependency on vp9 variance function by duplicating vp9_comp_avg_pred. - * The function averages every corresponding element of the buffers and stores - * the value in a third buffer, comp_pred. - * pred and comp_pred are assumed to have stride = width - * In the usage below comp_pred is a local array. - */ -static INLINE void avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width, - int height, const uint8_t *ref, int ref_stride) { - int i, j; - - for (i = 0; i < height; i++) { - for (j = 0; j < width; j++) { - const int tmp = pred[j] + ref[j]; - comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1); - } - comp_pred += width; - pred += width; - ref += ref_stride; +#define sadMxN(m, n) \ + unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride) { \ + return sad(src, src_stride, ref, ref_stride, m, n); \ + } \ + unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + const uint8_t *second_pred) { \ + uint8_t comp_pred[m * n]; \ + vpx_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \ + return sad(src, src_stride, comp_pred, m, m, n); \ } -} - -#if CONFIG_VP9_HIGHBITDEPTH -static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8, - int width, int height, const uint8_t *ref8, - int ref_stride) { - int i, j; - uint16_t *pred = CONVERT_TO_SHORTPTR(pred8); - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - for (i = 0; i < height; i++) { - for (j = 0; j < width; j++) { - const int tmp = pred[j] + ref[j]; - comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1); - } - comp_pred += width; - pred += width; - ref += ref_stride; - } -} -#endif // CONFIG_VP9_HIGHBITDEPTH - -#define sadMxN(m, n) \ -unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride) { \ - return sad(src, src_stride, ref, ref_stride, m, n); \ -} \ -unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride, \ - const uint8_t *second_pred) { \ - uint8_t comp_pred[m * n]; \ - avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \ - return sad(src, src_stride, comp_pred, m, m, n); \ -} // depending on call sites, pass **ref_array to avoid & in subsequent call and // de-dup with 4D below. -#define sadMxNxK(m, n, k) \ -void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref_array, int ref_stride, \ - uint32_t *sad_array) { \ - int i; \ - for (i = 0; i < k; ++i) \ - sad_array[i] = vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \ -} +#define sadMxNxK(m, n, k) \ + void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref_array, int ref_stride, \ + uint32_t *sad_array) { \ + int i; \ + for (i = 0; i < k; ++i) \ + sad_array[i] = \ + vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \ + } // This appears to be equivalent to the above when k == 4 and refs is const -#define sadMxNx4D(m, n) \ -void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ - const uint8_t *const ref_array[], int ref_stride, \ - uint32_t *sad_array) { \ - int i; \ - for (i = 0; i < 4; ++i) \ - sad_array[i] = vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \ -} +#define sadMxNx4D(m, n) \ + void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ + const uint8_t *const ref_array[], \ + int ref_stride, uint32_t *sad_array) { \ + int i; \ + for (i = 0; i < 4; ++i) \ + sad_array[i] = \ + vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \ + } +/* clang-format off */ // 64x64 sadMxN(64, 64) sadMxNxK(64, 64, 3) @@ -175,18 +135,18 @@ sadMxN(4, 4) sadMxNxK(4, 4, 3) sadMxNxK(4, 4, 8) sadMxNx4D(4, 4) +/* clang-format on */ #if CONFIG_VP9_HIGHBITDEPTH -static INLINE unsigned int highbd_sad(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int width, int height) { + static INLINE + unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8, + int b_stride, int width, int height) { int y, x; unsigned int sad = 0; const uint16_t *a = CONVERT_TO_SHORTPTR(a8); const uint16_t *b = CONVERT_TO_SHORTPTR(b8); for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) - sad += abs(a[x] - b[x]); + for (x = 0; x < width; x++) sad += abs(a[x] - b[x]); a += a_stride; b += b_stride; @@ -201,8 +161,7 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride, unsigned int sad = 0; const uint16_t *a = CONVERT_TO_SHORTPTR(a8); for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) - sad += abs(a[x] - b[x]); + for (x = 0; x < width; x++) sad += abs(a[x] - b[x]); a += a_stride; b += b_stride; @@ -210,43 +169,43 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride, return sad; } -#define highbd_sadMxN(m, n) \ -unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride) { \ - return highbd_sad(src, src_stride, ref, ref_stride, m, n); \ -} \ -unsigned int vpx_highbd_sad##m##x##n##_avg_c(const uint8_t *src, \ - int src_stride, \ - const uint8_t *ref, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - uint16_t comp_pred[m * n]; \ - highbd_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \ - return highbd_sadb(src, src_stride, comp_pred, m, m, n); \ -} +#define highbd_sadMxN(m, n) \ + unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, \ + int ref_stride) { \ + return highbd_sad(src, src_stride, ref, ref_stride, m, n); \ + } \ + unsigned int vpx_highbd_sad##m##x##n##_avg_c( \ + const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \ + const uint8_t *second_pred) { \ + uint16_t comp_pred[m * n]; \ + vpx_highbd_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \ + return highbd_sadb(src, src_stride, comp_pred, m, m, n); \ + } -#define highbd_sadMxNxK(m, n, k) \ -void vpx_highbd_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref_array, int ref_stride, \ - uint32_t *sad_array) { \ - int i; \ - for (i = 0; i < k; ++i) { \ - sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, &ref_array[i], \ - ref_stride); \ - } \ -} +#define highbd_sadMxNxK(m, n, k) \ + void vpx_highbd_sad##m##x##n##x##k##_c( \ + const uint8_t *src, int src_stride, const uint8_t *ref_array, \ + int ref_stride, uint32_t *sad_array) { \ + int i; \ + for (i = 0; i < k; ++i) { \ + sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \ + &ref_array[i], ref_stride); \ + } \ + } -#define highbd_sadMxNx4D(m, n) \ -void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ - const uint8_t *const ref_array[], \ - int ref_stride, uint32_t *sad_array) { \ - int i; \ - for (i = 0; i < 4; ++i) { \ - sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, ref_array[i], \ - ref_stride); \ - } \ -} +#define highbd_sadMxNx4D(m, n) \ + void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ + const uint8_t *const ref_array[], \ + int ref_stride, uint32_t *sad_array) { \ + int i; \ + for (i = 0; i < 4; ++i) { \ + sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \ + ref_array[i], ref_stride); \ + } \ + } +/* clang-format off */ // 64x64 highbd_sadMxN(64, 64) highbd_sadMxNxK(64, 64, 3) @@ -314,5 +273,6 @@ highbd_sadMxN(4, 4) highbd_sadMxNxK(4, 4, 3) highbd_sadMxNxK(4, 4, 8) highbd_sadMxNx4D(4, 4) +/* clang-format on */ #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/ssim.c b/libs/libvpx/vpx_dsp/ssim.c index cfe5bb331c..7a29bd29f9 100644 --- a/libs/libvpx/vpx_dsp/ssim.c +++ b/libs/libvpx/vpx_dsp/ssim.c @@ -8,14 +8,15 @@ * be found in the AUTHORS file in the root of the source tree. */ +#include #include #include "./vpx_dsp_rtcd.h" #include "vpx_dsp/ssim.h" #include "vpx_ports/mem.h" #include "vpx_ports/system_state.h" -void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, - int rp, uint32_t *sum_s, uint32_t *sum_r, +void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp, + uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr) { int i, j; @@ -30,9 +31,8 @@ void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, } } void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, - uint32_t *sum_s, uint32_t *sum_r, - uint32_t *sum_sq_s, uint32_t *sum_sq_r, - uint32_t *sum_sxr) { + uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, + uint32_t *sum_sq_r, uint32_t *sum_sxr) { int i, j; for (i = 0; i < 8; i++, s += sp, r += rp) { for (j = 0; j < 8; j++) { @@ -46,9 +46,8 @@ void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, } #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, - const uint16_t *r, int rp, - uint32_t *sum_s, uint32_t *sum_r, +void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r, + int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr) { int i, j; @@ -64,25 +63,39 @@ void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, } #endif // CONFIG_VP9_HIGHBITDEPTH -static const int64_t cc1 = 26634; // (64^2*(.01*255)^2 -static const int64_t cc2 = 239708; // (64^2*(.03*255)^2 +static const int64_t cc1 = 26634; // (64^2*(.01*255)^2 +static const int64_t cc2 = 239708; // (64^2*(.03*255)^2 +static const int64_t cc1_10 = 428658; // (64^2*(.01*1023)^2 +static const int64_t cc2_10 = 3857925; // (64^2*(.03*1023)^2 +static const int64_t cc1_12 = 6868593; // (64^2*(.01*4095)^2 +static const int64_t cc2_12 = 61817334; // (64^2*(.03*4095)^2 -static double similarity(uint32_t sum_s, uint32_t sum_r, - uint32_t sum_sq_s, uint32_t sum_sq_r, - uint32_t sum_sxr, int count) { +static double similarity(uint32_t sum_s, uint32_t sum_r, uint32_t sum_sq_s, + uint32_t sum_sq_r, uint32_t sum_sxr, int count, + uint32_t bd) { int64_t ssim_n, ssim_d; int64_t c1, c2; + if (bd == 8) { + // scale the constants by number of pixels + c1 = (cc1 * count * count) >> 12; + c2 = (cc2 * count * count) >> 12; + } else if (bd == 10) { + c1 = (cc1_10 * count * count) >> 12; + c2 = (cc2_10 * count * count) >> 12; + } else if (bd == 12) { + c1 = (cc1_12 * count * count) >> 12; + c2 = (cc2_12 * count * count) >> 12; + } else { + c1 = c2 = 0; + assert(0); + } - // scale the constants by number of pixels - c1 = (cc1 * count * count) >> 12; - c2 = (cc2 * count * count) >> 12; - - ssim_n = (2 * sum_s * sum_r + c1) * ((int64_t) 2 * count * sum_sxr - - (int64_t) 2 * sum_s * sum_r + c2); + ssim_n = (2 * sum_s * sum_r + c1) * + ((int64_t)2 * count * sum_sxr - (int64_t)2 * sum_s * sum_r + c2); ssim_d = (sum_s * sum_s + sum_r * sum_r + c1) * ((int64_t)count * sum_sq_s - (int64_t)sum_s * sum_s + - (int64_t)count * sum_sq_r - (int64_t) sum_r * sum_r + c2); + (int64_t)count * sum_sq_r - (int64_t)sum_r * sum_r + c2); return ssim_n * 1.0 / ssim_d; } @@ -91,22 +104,17 @@ static double ssim_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp) { uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0; vpx_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr); - return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64); + return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64, 8); } #if CONFIG_VP9_HIGHBITDEPTH static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r, - int rp, unsigned int bd) { + int rp, uint32_t bd, uint32_t shift) { uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0; - const int oshift = bd - 8; vpx_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr); - return similarity(sum_s >> oshift, - sum_r >> oshift, - sum_sq_s >> (2 * oshift), - sum_sq_r >> (2 * oshift), - sum_sxr >> (2 * oshift), - 64); + return similarity(sum_s >> shift, sum_r >> shift, sum_sq_s >> (2 * shift), + sum_sq_r >> (2 * shift), sum_sxr >> (2 * shift), 64, bd); } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -136,7 +144,7 @@ static double vpx_ssim2(const uint8_t *img1, const uint8_t *img2, #if CONFIG_VP9_HIGHBITDEPTH static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, int stride_img1, int stride_img2, int width, - int height, unsigned int bd) { + int height, uint32_t bd, uint32_t shift) { int i, j; int samples = 0; double ssim_total = 0; @@ -146,8 +154,8 @@ static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, i += 4, img1 += stride_img1 * 4, img2 += stride_img2 * 4) { for (j = 0; j <= width - 8; j += 4) { double v = highbd_ssim_8x8(CONVERT_TO_SHORTPTR(img1 + j), stride_img1, - CONVERT_TO_SHORTPTR(img2 + j), stride_img2, - bd); + CONVERT_TO_SHORTPTR(img2 + j), stride_img2, bd, + shift); ssim_total += v; samples++; } @@ -158,22 +166,18 @@ static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, #endif // CONFIG_VP9_HIGHBITDEPTH double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *weight) { + const YV12_BUFFER_CONFIG *dest, double *weight) { double a, b, c; double ssimv; - a = vpx_ssim2(source->y_buffer, dest->y_buffer, - source->y_stride, dest->y_stride, - source->y_crop_width, source->y_crop_height); + a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + dest->y_stride, source->y_crop_width, source->y_crop_height); - b = vpx_ssim2(source->u_buffer, dest->u_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height); + b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + dest->uv_stride, source->uv_crop_width, source->uv_crop_height); - c = vpx_ssim2(source->v_buffer, dest->v_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height); + c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + dest->uv_stride, source->uv_crop_width, source->uv_crop_height); ssimv = a * .8 + .1 * (b + c); @@ -182,31 +186,6 @@ double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, return ssimv; } -double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, double *ssim_u, double *ssim_v) { - double ssim_all = 0; - double a, b, c; - - a = vpx_ssim2(source->y_buffer, dest->y_buffer, - source->y_stride, dest->y_stride, - source->y_crop_width, source->y_crop_height); - - b = vpx_ssim2(source->u_buffer, dest->u_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height); - - c = vpx_ssim2(source->v_buffer, dest->v_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height); - *ssim_y = a; - *ssim_u = b; - *ssim_v = c; - ssim_all = (a * 4 + b + c) / 6; - - return ssim_all; -} - // traditional ssim as per: http://en.wikipedia.org/wiki/Structural_similarity // // Re working out the math -> @@ -242,13 +221,13 @@ static double ssimv_similarity(const Ssimv *sv, int64_t n) { const int64_t c2 = (cc2 * n * n) >> 12; const double l = 1.0 * (2 * sv->sum_s * sv->sum_r + c1) / - (sv->sum_s * sv->sum_s + sv->sum_r * sv->sum_r + c1); + (sv->sum_s * sv->sum_s + sv->sum_r * sv->sum_r + c1); // Since these variables are unsigned sums, convert to double so // math is done in double arithmetic. - const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) - / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + n * sv->sum_sq_r - - sv->sum_r * sv->sum_r + c2); + const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) / + (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + + n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2); return l * v; } @@ -277,24 +256,21 @@ static double ssimv_similarity2(const Ssimv *sv, int64_t n) { // Since these variables are unsigned, sums convert to double so // math is done in double arithmetic. - const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) - / (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + - n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2); + const double v = (2.0 * n * sv->sum_sxr - 2 * sv->sum_s * sv->sum_r + c2) / + (n * sv->sum_sq_s - sv->sum_s * sv->sum_s + + n * sv->sum_sq_r - sv->sum_r * sv->sum_r + c2); return l * v; } static void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2, int img2_pitch, Ssimv *sv) { - vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, - &sv->sum_s, &sv->sum_r, &sv->sum_sq_s, &sv->sum_sq_r, - &sv->sum_sxr); + vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r, + &sv->sum_sq_s, &sv->sum_sq_r, &sv->sum_sxr); } -double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, - uint8_t *img2, int img2_pitch, - int width, int height, - Ssimv *sv2, Metrics *m, - int do_inconsistency) { +double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, + int img2_pitch, int width, int height, Ssimv *sv2, + Metrics *m, int do_inconsistency) { double dssim_total = 0; double ssim_total = 0; double ssim2_total = 0; @@ -305,10 +281,10 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, double old_ssim_total = 0; vpx_clear_system_state(); // We can sample points as frequently as we like start with 1 per 4x4. - for (i = 0; i < height; i += 4, - img1 += img1_pitch * 4, img2 += img2_pitch * 4) { + for (i = 0; i < height; + i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) { for (j = 0; j < width; j += 4, ++c) { - Ssimv sv = {0}; + Ssimv sv = { 0 }; double ssim; double ssim2; double dssim; @@ -394,27 +370,29 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, // This measures how much consistent variance is in two consecutive // source frames. 1.0 means they have exactly the same variance. - const double variance_term = (2.0 * var_old * var_new + c1) / + const double variance_term = + (2.0 * var_old * var_new + c1) / (1.0 * var_old * var_old + 1.0 * var_new * var_new + c1); // This measures how consistent the local mean are between two // consecutive frames. 1.0 means they have exactly the same mean. - const double mean_term = (2.0 * mean_old * mean_new + c2) / + const double mean_term = + (2.0 * mean_old * mean_new + c2) / (1.0 * mean_old * mean_old + 1.0 * mean_new * mean_new + c2); // This measures how consistent the ssims of two // consecutive frames is. 1.0 means they are exactly the same. - double ssim_term = pow((2.0 * ssim_old * ssim_new + c3) / - (ssim_old * ssim_old + ssim_new * ssim_new + c3), - 5); + double ssim_term = + pow((2.0 * ssim_old * ssim_new + c3) / + (ssim_old * ssim_old + ssim_new * ssim_new + c3), + 5); double this_inconsistency; // Floating point math sometimes makes this > 1 by a tiny bit. // We want the metric to scale between 0 and 1.0 so we can convert // it to an snr scaled value. - if (ssim_term > 1) - ssim_term = 1; + if (ssim_term > 1) ssim_term = 1; // This converts the consistency metric to an inconsistency metric // ( so we can scale it like psnr to something like sum square error. @@ -442,8 +420,7 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, ssim2_total *= norm; m->ssim2 = ssim2_total; m->ssim = ssim_total; - if (old_ssim_total == 0) - inconsistency_total = 0; + if (old_ssim_total == 0) inconsistency_total = 0; m->ssimc = inconsistency_total; @@ -451,25 +428,28 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, return inconsistency_total; } - #if CONFIG_VP9_HIGHBITDEPTH double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *weight, unsigned int bd) { + const YV12_BUFFER_CONFIG *dest, double *weight, + uint32_t bd, uint32_t in_bd) { double a, b, c; double ssimv; + uint32_t shift = 0; - a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, - source->y_stride, dest->y_stride, - source->y_crop_width, source->y_crop_height, bd); + assert(bd >= in_bd); + shift = bd - in_bd; - b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height, bd); + a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + dest->y_stride, source->y_crop_width, + source->y_crop_height, in_bd, shift); - c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height, bd); + b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + dest->uv_stride, source->uv_crop_width, + source->uv_crop_height, in_bd, shift); + + c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + dest->uv_stride, source->uv_crop_width, + source->uv_crop_height, in_bd, shift); ssimv = a * .8 + .1 * (b + c); @@ -478,28 +458,4 @@ double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, return ssimv; } -double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, double *ssim_y, - double *ssim_u, double *ssim_v, unsigned int bd) { - double ssim_all = 0; - double a, b, c; - - a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, - source->y_stride, dest->y_stride, - source->y_crop_width, source->y_crop_height, bd); - - b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height, bd); - - c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, - source->uv_stride, dest->uv_stride, - source->uv_crop_width, source->uv_crop_height, bd); - *ssim_y = a; - *ssim_u = b; - *ssim_v = c; - ssim_all = (a * 4 + b + c) / 6; - - return ssim_all; -} #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/ssim.h b/libs/libvpx/vpx_dsp/ssim.h index 132f7f9e19..4f2bb1d556 100644 --- a/libs/libvpx/vpx_dsp/ssim.h +++ b/libs/libvpx/vpx_dsp/ssim.h @@ -11,6 +11,8 @@ #ifndef VPX_DSP_SSIM_H_ #define VPX_DSP_SSIM_H_ +#define MAX_SSIM_DB 100.0; + #ifdef __cplusplus extern "C" { #endif @@ -61,37 +63,21 @@ typedef struct { } Metrics; double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, - int img2_pitch, int width, int height, Ssimv *sv2, - Metrics *m, int do_inconsistency); + int img2_pitch, int width, int height, Ssimv *sv2, + Metrics *m, int do_inconsistency); double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *weight); - -double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, double *ssim_u, double *ssim_v); + const YV12_BUFFER_CONFIG *dest, double *weight); double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, double *ssim_u, double *ssim_v); - -double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, double *ssim_u, double *ssim_v); + const YV12_BUFFER_CONFIG *dest, double *ssim_y, + double *ssim_u, double *ssim_v, uint32_t bd, + uint32_t in_bd); #if CONFIG_VP9_HIGHBITDEPTH double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *weight, - unsigned int bd); - -double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, - const YV12_BUFFER_CONFIG *dest, - double *ssim_y, - double *ssim_u, - double *ssim_v, - unsigned int bd); + const YV12_BUFFER_CONFIG *dest, double *weight, + uint32_t bd, uint32_t in_bd); #endif // CONFIG_VP9_HIGHBITDEPTH #ifdef __cplusplus diff --git a/libs/libvpx/vpx_dsp/subtract.c b/libs/libvpx/vpx_dsp/subtract.c index 556e0134f3..95e7071b27 100644 --- a/libs/libvpx/vpx_dsp/subtract.c +++ b/libs/libvpx/vpx_dsp/subtract.c @@ -16,32 +16,30 @@ #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" -void vpx_subtract_block_c(int rows, int cols, - int16_t *diff, ptrdiff_t diff_stride, - const uint8_t *src, ptrdiff_t src_stride, - const uint8_t *pred, ptrdiff_t pred_stride) { +void vpx_subtract_block_c(int rows, int cols, int16_t *diff, + ptrdiff_t diff_stride, const uint8_t *src, + ptrdiff_t src_stride, const uint8_t *pred, + ptrdiff_t pred_stride) { int r, c; for (r = 0; r < rows; r++) { - for (c = 0; c < cols; c++) - diff[c] = src[c] - pred[c]; + for (c = 0; c < cols; c++) diff[c] = src[c] - pred[c]; diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_subtract_block_c(int rows, int cols, - int16_t *diff, ptrdiff_t diff_stride, - const uint8_t *src8, ptrdiff_t src_stride, - const uint8_t *pred8, ptrdiff_t pred_stride, - int bd) { +void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff, + ptrdiff_t diff_stride, const uint8_t *src8, + ptrdiff_t src_stride, const uint8_t *pred8, + ptrdiff_t pred_stride, int bd) { int r, c; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *pred = CONVERT_TO_SHORTPTR(pred8); - (void) bd; + (void)bd; for (r = 0; r < rows; r++) { for (c = 0; c < cols; c++) { @@ -50,7 +48,7 @@ void vpx_highbd_subtract_block_c(int rows, int cols, diff += diff_stride; pred += pred_stride; - src += src_stride; + src += src_stride; } } #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/sum_squares.c b/libs/libvpx/vpx_dsp/sum_squares.c new file mode 100644 index 0000000000..7c535ac2db --- /dev/null +++ b/libs/libvpx/vpx_dsp/sum_squares.c @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include "./vpx_dsp_rtcd.h" + +uint64_t vpx_sum_squares_2d_i16_c(const int16_t *src, int src_stride, + int size) { + int r, c; + uint64_t ss = 0; + + for (r = 0; r < size; r++) { + for (c = 0; c < size; c++) { + const int16_t v = src[c]; + ss += v * v; + } + src += src_stride; + } + + return ss; +} diff --git a/libs/libvpx/vpx_dsp/txfm_common.h b/libs/libvpx/vpx_dsp/txfm_common.h index 442e6a57b5..fd27f928ed 100644 --- a/libs/libvpx/vpx_dsp/txfm_common.h +++ b/libs/libvpx/vpx_dsp/txfm_common.h @@ -15,7 +15,7 @@ // Constants and Macros used by all idct/dct functions #define DCT_CONST_BITS 14 -#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1)) +#define DCT_CONST_ROUNDING (1 << (DCT_CONST_BITS - 1)) #define UNIT_QUANT_SHIFT 2 #define UNIT_QUANT_FACTOR (1 << UNIT_QUANT_SHIFT) @@ -25,15 +25,15 @@ // printf("static const int cospi_%d_64 = %.0f;\n", i, // round(16384 * cos(i*M_PI/64))); // Note: sin(k*Pi/64) = cos((32-k)*Pi/64) -static const tran_high_t cospi_1_64 = 16364; -static const tran_high_t cospi_2_64 = 16305; -static const tran_high_t cospi_3_64 = 16207; -static const tran_high_t cospi_4_64 = 16069; -static const tran_high_t cospi_5_64 = 15893; -static const tran_high_t cospi_6_64 = 15679; -static const tran_high_t cospi_7_64 = 15426; -static const tran_high_t cospi_8_64 = 15137; -static const tran_high_t cospi_9_64 = 14811; +static const tran_high_t cospi_1_64 = 16364; +static const tran_high_t cospi_2_64 = 16305; +static const tran_high_t cospi_3_64 = 16207; +static const tran_high_t cospi_4_64 = 16069; +static const tran_high_t cospi_5_64 = 15893; +static const tran_high_t cospi_6_64 = 15679; +static const tran_high_t cospi_7_64 = 15426; +static const tran_high_t cospi_8_64 = 15137; +static const tran_high_t cospi_9_64 = 14811; static const tran_high_t cospi_10_64 = 14449; static const tran_high_t cospi_11_64 = 14053; static const tran_high_t cospi_12_64 = 13623; diff --git a/libs/libvpx/vpx_dsp/variance.c b/libs/libvpx/vpx_dsp/variance.c index e8bddb0a0e..d1fa0560d5 100644 --- a/libs/libvpx/vpx_dsp/variance.c +++ b/libs/libvpx/vpx_dsp/variance.c @@ -17,18 +17,12 @@ #include "vpx_dsp/variance.h" static const uint8_t bilinear_filters[8][2] = { - { 128, 0 }, - { 112, 16 }, - { 96, 32 }, - { 80, 48 }, - { 64, 64 }, - { 48, 80 }, - { 32, 96 }, - { 16, 112 }, + { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 }, + { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 }, }; -uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride) { +uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride) { int distortion = 0; int r, c; @@ -55,31 +49,8 @@ uint32_t vpx_get_mb_ss_c(const int16_t *a) { return sum; } -uint32_t vpx_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0, - b, b_stride, sse); -} - - -uint32_t vpx_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4, - b, b_stride, sse); -} - -uint32_t vpx_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4, - b, b_stride, sse); -} - -static void variance(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - int w, int h, uint32_t *sse, int *sum) { +static void variance(const uint8_t *a, int a_stride, const uint8_t *b, + int b_stride, int w, int h, uint32_t *sse, int *sum) { int i, j; *sum = 0; @@ -115,9 +86,8 @@ static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b, for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { - b[j] = ROUND_POWER_OF_TWO((int)a[0] * filter[0] + - (int)a[pixel_step] * filter[1], - FILTER_BITS); + b[j] = ROUND_POWER_OF_TWO( + (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS); ++a; } @@ -142,13 +112,12 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, unsigned int output_height, unsigned int output_width, const uint8_t *filter) { - unsigned int i, j; + unsigned int i, j; for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { - b[j] = ROUND_POWER_OF_TWO((int)a[0] * filter[0] + - (int)a[pixel_step] * filter[1], - FILTER_BITS); + b[j] = ROUND_POWER_OF_TWO( + (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS); ++a; } @@ -157,82 +126,78 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, } } -#define VAR(W, H) \ -uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ - const uint8_t *b, int b_stride, \ - uint32_t *sse) { \ - int sum; \ - variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ - return *sse - (((int64_t)sum * sum) / (W * H)); \ -} +#define VAR(W, H) \ + uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, \ + uint32_t *sse) { \ + int sum; \ + variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ + return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \ + } -#define SUBPIX_VAR(W, H) \ -uint32_t vpx_sub_pixel_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ - int xoffset, int yoffset, \ - const uint8_t *b, int b_stride, \ - uint32_t *sse) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint8_t temp2[H * W]; \ -\ - var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \ - bilinear_filters[xoffset]); \ - var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \ -} +#define SUBPIX_VAR(W, H) \ + uint32_t vpx_sub_pixel_variance##W##x##H##_c( \ + const uint8_t *a, int a_stride, int xoffset, int yoffset, \ + const uint8_t *b, int b_stride, uint32_t *sse) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint8_t temp2[H * W]; \ + \ + var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \ + bilinear_filters[xoffset]); \ + var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \ + } -#define SUBPIX_AVG_VAR(W, H) \ -uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c(const uint8_t *a, \ - int a_stride, \ - int xoffset, int yoffset, \ - const uint8_t *b, \ - int b_stride, \ - uint32_t *sse, \ - const uint8_t *second_pred) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint8_t temp2[H * W]; \ - DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \ -\ - var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \ - bilinear_filters[xoffset]); \ - var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \ -\ - return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \ -} +#define SUBPIX_AVG_VAR(W, H) \ + uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \ + const uint8_t *a, int a_stride, int xoffset, int yoffset, \ + const uint8_t *b, int b_stride, uint32_t *sse, \ + const uint8_t *second_pred) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint8_t temp2[H * W]; \ + DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \ + \ + var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \ + bilinear_filters[xoffset]); \ + var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \ + \ + return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \ + } /* Identical to the variance call except it takes an additional parameter, sum, * and returns that value using pass-by-reference instead of returning * sse - sum^2 / w*h */ -#define GET_VAR(W, H) \ -void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \ - const uint8_t *b, int b_stride, \ - uint32_t *sse, int *sum) { \ - variance(a, a_stride, b, b_stride, W, H, sse, sum); \ -} +#define GET_VAR(W, H) \ + void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, uint32_t *sse, \ + int *sum) { \ + variance(a, a_stride, b, b_stride, W, H, sse, sum); \ + } /* Identical to the variance call except it does not calculate the * sse - sum^2 / w*h and returns sse in addtion to modifying the passed in * variable. */ -#define MSE(W, H) \ -uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \ - const uint8_t *b, int b_stride, \ - uint32_t *sse) { \ - int sum; \ - variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ - return *sse; \ -} +#define MSE(W, H) \ + uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, \ + uint32_t *sse) { \ + int sum; \ + variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ + return *sse; \ + } /* All three forms of the variance are available in the same sizes. */ #define VARIANCES(W, H) \ - VAR(W, H) \ - SUBPIX_VAR(W, H) \ - SUBPIX_AVG_VAR(W, H) + VAR(W, H) \ + SUBPIX_VAR(W, H) \ + SUBPIX_AVG_VAR(W, H) VARIANCES(64, 64) VARIANCES(64, 32) @@ -256,9 +221,8 @@ MSE(16, 8) MSE(8, 16) MSE(8, 8) -void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, - int width, int height, - const uint8_t *ref, int ref_stride) { +void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, + int height, const uint8_t *ref, int ref_stride) { int i, j; for (i = 0; i < height; ++i) { @@ -273,9 +237,9 @@ void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, } #if CONFIG_VP9_HIGHBITDEPTH -static void highbd_variance64(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint64_t *sse, uint64_t *sum) { +static void highbd_variance64(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + uint64_t *sse, int64_t *sum) { int i, j; uint16_t *a = CONVERT_TO_SHORTPTR(a8); @@ -294,133 +258,121 @@ static void highbd_variance64(const uint8_t *a8, int a_stride, } } -static void highbd_8_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint32_t *sse, int *sum) { +static void highbd_8_variance(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + uint32_t *sse, int *sum) { uint64_t sse_long = 0; - uint64_t sum_long = 0; + int64_t sum_long = 0; highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); *sse = (uint32_t)sse_long; *sum = (int)sum_long; } -static void highbd_10_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint32_t *sse, int *sum) { +static void highbd_10_variance(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + uint32_t *sse, int *sum) { uint64_t sse_long = 0; - uint64_t sum_long = 0; + int64_t sum_long = 0; highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4); *sum = (int)ROUND_POWER_OF_TWO(sum_long, 2); } -static void highbd_12_variance(const uint8_t *a8, int a_stride, - const uint8_t *b8, int b_stride, - int w, int h, uint32_t *sse, int *sum) { +static void highbd_12_variance(const uint8_t *a8, int a_stride, + const uint8_t *b8, int b_stride, int w, int h, + uint32_t *sse, int *sum) { uint64_t sse_long = 0; - uint64_t sum_long = 0; + int64_t sum_long = 0; highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long); *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8); *sum = (int)ROUND_POWER_OF_TWO(sum_long, 4); } -#define HIGHBD_VAR(W, H) \ -uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, \ - int a_stride, \ - const uint8_t *b, \ - int b_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ - return *sse - (((int64_t)sum * sum) / (W * H)); \ -} \ -\ -uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, \ - int a_stride, \ - const uint8_t *b, \ - int b_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ - return *sse - (((int64_t)sum * sum) / (W * H)); \ -} \ -\ -uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, \ - int a_stride, \ - const uint8_t *b, \ - int b_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ - return *sse - (((int64_t)sum * sum) / (W * H)); \ -} +#define HIGHBD_VAR(W, H) \ + uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, \ + uint32_t *sse) { \ + int sum; \ + highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ + return *sse - (((int64_t)sum * sum) / (W * H)); \ + } \ + \ + uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, \ + uint32_t *sse) { \ + int sum; \ + int64_t var; \ + highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ + var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \ + return (var >= 0) ? (uint32_t)var : 0; \ + } \ + \ + uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + const uint8_t *b, int b_stride, \ + uint32_t *sse) { \ + int sum; \ + int64_t var; \ + highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \ + var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \ + return (var >= 0) ? (uint32_t)var : 0; \ + } -#define HIGHBD_GET_VAR(S) \ -void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride, \ - uint32_t *sse, int *sum) { \ - highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ -} \ -\ -void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride, \ - uint32_t *sse, int *sum) { \ - highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ -} \ -\ -void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ - const uint8_t *ref, int ref_stride, \ - uint32_t *sse, int *sum) { \ - highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ -} +#define HIGHBD_GET_VAR(S) \ + void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse, int *sum) { \ + highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ + } \ + \ + void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse, int *sum) { \ + highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ + } \ + \ + void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse, int *sum) { \ + highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ + } -#define HIGHBD_MSE(W, H) \ -uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, \ - int src_stride, \ - const uint8_t *ref, \ - int ref_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ - return *sse; \ -} \ -\ -uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, \ - int src_stride, \ - const uint8_t *ref, \ - int ref_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ - return *sse; \ -} \ -\ -uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, \ - int src_stride, \ - const uint8_t *ref, \ - int ref_stride, \ - uint32_t *sse) { \ - int sum; \ - highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ - return *sse; \ -} +#define HIGHBD_MSE(W, H) \ + uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse) { \ + int sum; \ + highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ + return *sse; \ + } \ + \ + uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse) { \ + int sum; \ + highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ + return *sse; \ + } \ + \ + uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + const uint8_t *ref, int ref_stride, \ + uint32_t *sse) { \ + int sum; \ + highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \ + return *sse; \ + } static void highbd_var_filter_block2d_bil_first_pass( - const uint8_t *src_ptr8, - uint16_t *output_ptr, - unsigned int src_pixels_per_line, - int pixel_step, - unsigned int output_height, - unsigned int output_width, + const uint8_t *src_ptr8, uint16_t *output_ptr, + unsigned int src_pixels_per_line, int pixel_step, + unsigned int output_height, unsigned int output_width, const uint8_t *filter) { unsigned int i, j; uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src_ptr8); for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { - output_ptr[j] = - ROUND_POWER_OF_TWO((int)src_ptr[0] * filter[0] + - (int)src_ptr[pixel_step] * filter[1], - FILTER_BITS); + output_ptr[j] = ROUND_POWER_OF_TWO( + (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1], + FILTER_BITS); ++src_ptr; } @@ -432,21 +384,17 @@ static void highbd_var_filter_block2d_bil_first_pass( } static void highbd_var_filter_block2d_bil_second_pass( - const uint16_t *src_ptr, - uint16_t *output_ptr, - unsigned int src_pixels_per_line, - unsigned int pixel_step, - unsigned int output_height, - unsigned int output_width, + const uint16_t *src_ptr, uint16_t *output_ptr, + unsigned int src_pixels_per_line, unsigned int pixel_step, + unsigned int output_height, unsigned int output_width, const uint8_t *filter) { - unsigned int i, j; + unsigned int i, j; for (i = 0; i < output_height; ++i) { for (j = 0; j < output_width; ++j) { - output_ptr[j] = - ROUND_POWER_OF_TWO((int)src_ptr[0] * filter[0] + - (int)src_ptr[pixel_step] * filter[1], - FILTER_BITS); + output_ptr[j] = ROUND_POWER_OF_TWO( + (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1], + FILTER_BITS); ++src_ptr; } @@ -455,130 +403,118 @@ static void highbd_var_filter_block2d_bil_second_pass( } } -#define HIGHBD_SUBPIX_VAR(W, H) \ -uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, dst, \ - dst_stride, sse); \ -} \ -\ -uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \ - W, dst, dst_stride, sse); \ -} \ -\ -uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \ - W, dst, dst_stride, sse); \ -} +#define HIGHBD_SUBPIX_VAR(W, H) \ + uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + dst, dst_stride, sse); \ + } \ + \ + uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + dst, dst_stride, sse); \ + } \ + \ + uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + dst, dst_stride, sse); \ + } -#define HIGHBD_SUBPIX_AVG_VAR(W, H) \ -uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse, \ - const uint8_t *second_pred) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ - DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ - CONVERT_TO_BYTEPTR(temp2), W); \ -\ - return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, dst, \ - dst_stride, sse); \ -} \ -\ -uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse, \ - const uint8_t *second_pred) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ - DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ - CONVERT_TO_BYTEPTR(temp2), W); \ -\ - return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), \ - W, dst, dst_stride, sse); \ -} \ -\ -uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \ - const uint8_t *src, int src_stride, \ - int xoffset, int yoffset, \ - const uint8_t *dst, int dst_stride, \ - uint32_t *sse, \ - const uint8_t *second_pred) { \ - uint16_t fdata3[(H + 1) * W]; \ - uint16_t temp2[H * W]; \ - DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ -\ - highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \ - W, bilinear_filters[xoffset]); \ - highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ - bilinear_filters[yoffset]); \ -\ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ - CONVERT_TO_BYTEPTR(temp2), W); \ -\ - return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), \ - W, dst, dst_stride, sse); \ -} +#define HIGHBD_SUBPIX_AVG_VAR(W, H) \ + uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse, \ + const uint8_t *second_pred) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + CONVERT_TO_BYTEPTR(temp2), W); \ + \ + return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + dst, dst_stride, sse); \ + } \ + \ + uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse, \ + const uint8_t *second_pred) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + CONVERT_TO_BYTEPTR(temp2), W); \ + \ + return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + dst, dst_stride, sse); \ + } \ + \ + uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \ + const uint8_t *src, int src_stride, int xoffset, int yoffset, \ + const uint8_t *dst, int dst_stride, uint32_t *sse, \ + const uint8_t *second_pred) { \ + uint16_t fdata3[(H + 1) * W]; \ + uint16_t temp2[H * W]; \ + DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \ + \ + highbd_var_filter_block2d_bil_first_pass( \ + src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \ + highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ + bilinear_filters[yoffset]); \ + \ + vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + CONVERT_TO_BYTEPTR(temp2), W); \ + \ + return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + dst, dst_stride, sse); \ + } /* All three forms of the variance are available in the same sizes. */ #define HIGHBD_VARIANCES(W, H) \ - HIGHBD_VAR(W, H) \ - HIGHBD_SUBPIX_VAR(W, H) \ - HIGHBD_SUBPIX_AVG_VAR(W, H) + HIGHBD_VAR(W, H) \ + HIGHBD_SUBPIX_VAR(W, H) \ + HIGHBD_SUBPIX_AVG_VAR(W, H) HIGHBD_VARIANCES(64, 64) HIGHBD_VARIANCES(64, 32) diff --git a/libs/libvpx/vpx_dsp/variance.h b/libs/libvpx/vpx_dsp/variance.h index cd0fd98785..4c482551e0 100644 --- a/libs/libvpx/vpx_dsp/variance.h +++ b/libs/libvpx/vpx_dsp/variance.h @@ -22,15 +22,15 @@ extern "C" { #define FILTER_BITS 7 #define FILTER_WEIGHT 128 -typedef unsigned int(*vpx_sad_fn_t)(const uint8_t *a, int a_stride, - const uint8_t *b_ptr, int b_stride); +typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride, + const uint8_t *b_ptr, int b_stride); -typedef unsigned int(*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride, - const uint8_t *b_ptr, int b_stride, - const uint8_t *second_pred); +typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride, + const uint8_t *b_ptr, int b_stride, + const uint8_t *second_pred); -typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, - uint8_t *b, int b_stride, int n); +typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b, + int b_stride, int n); typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, @@ -38,8 +38,7 @@ typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride, typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride, const uint8_t *const b_array[], - int b_stride, - unsigned int *sad_array); + int b_stride, unsigned int *sad_array); typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, @@ -50,42 +49,36 @@ typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse); -typedef unsigned int (*vpx_subp_avg_variance_fn_t)(const uint8_t *a_ptr, - int a_stride, - int xoffset, int yoffset, - const uint8_t *b_ptr, - int b_stride, - unsigned int *sse, - const uint8_t *second_pred); +typedef unsigned int (*vpx_subp_avg_variance_fn_t)( + const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset, + const uint8_t *b_ptr, int b_stride, unsigned int *sse, + const uint8_t *second_pred); #if CONFIG_VP8 typedef struct variance_vtable { - vpx_sad_fn_t sdf; - vpx_variance_fn_t vf; + vpx_sad_fn_t sdf; + vpx_variance_fn_t vf; vpx_subpixvariance_fn_t svf; - vpx_variance_fn_t svf_halfpix_h; - vpx_variance_fn_t svf_halfpix_v; - vpx_variance_fn_t svf_halfpix_hv; - vpx_sad_multi_fn_t sdx3f; - vpx_sad_multi_fn_t sdx8f; - vpx_sad_multi_d_fn_t sdx4df; + vpx_sad_multi_fn_t sdx3f; + vpx_sad_multi_fn_t sdx8f; + vpx_sad_multi_d_fn_t sdx4df; #if ARCH_X86 || ARCH_X86_64 - vp8_copy32xn_fn_t copymem; + vp8_copy32xn_fn_t copymem; #endif } vp8_variance_fn_ptr_t; #endif // CONFIG_VP8 -#if CONFIG_VP9 || CONFIG_VP10 +#if CONFIG_VP9 typedef struct vp9_variance_vtable { - vpx_sad_fn_t sdf; - vpx_sad_avg_fn_t sdaf; - vpx_variance_fn_t vf; - vpx_subpixvariance_fn_t svf; + vpx_sad_fn_t sdf; + vpx_sad_avg_fn_t sdaf; + vpx_variance_fn_t vf; + vpx_subpixvariance_fn_t svf; vpx_subp_avg_variance_fn_t svaf; - vpx_sad_multi_fn_t sdx3f; - vpx_sad_multi_fn_t sdx8f; - vpx_sad_multi_d_fn_t sdx4df; + vpx_sad_multi_fn_t sdx3f; + vpx_sad_multi_fn_t sdx8f; + vpx_sad_multi_d_fn_t sdx4df; } vp9_variance_fn_ptr_t; -#endif // CONFIG_VP9 || CONFIG_VP10 +#endif // CONFIG_VP9 #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/vpx_dsp/vpx_convolve.c b/libs/libvpx/vpx_dsp/vpx_convolve.c index 2d1c927cbe..f17281b369 100644 --- a/libs/libvpx/vpx_dsp/vpx_convolve.c +++ b/libs/libvpx/vpx_dsp/vpx_convolve.c @@ -21,8 +21,8 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, int w, int h) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h) { int x, y; src -= SUBPEL_TAPS / 2 - 1; for (y = 0; y < h; ++y) { @@ -31,8 +31,7 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride, const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; int k, sum = 0; - for (k = 0; k < SUBPEL_TAPS; ++k) - sum += src_x[k] * x_filter[k]; + for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k]; dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); x_q4 += x_step_q4; } @@ -43,8 +42,8 @@ static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride, static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, int w, int h) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h) { int x, y; src -= SUBPEL_TAPS / 2 - 1; for (y = 0; y < h; ++y) { @@ -53,10 +52,9 @@ static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride, const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; int k, sum = 0; - for (k = 0; k < SUBPEL_TAPS; ++k) - sum += src_x[k] * x_filter[k]; - dst[x] = ROUND_POWER_OF_TWO(dst[x] + - clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); + for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k]; + dst[x] = ROUND_POWER_OF_TWO( + dst[x] + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); x_q4 += x_step_q4; } src += src_stride; @@ -66,8 +64,8 @@ static void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride, static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h) { int x, y; src -= src_stride * (SUBPEL_TAPS / 2 - 1); @@ -89,8 +87,8 @@ static void convolve_vert(const uint8_t *src, ptrdiff_t src_stride, static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h) { int x, y; src -= src_stride * (SUBPEL_TAPS / 2 - 1); @@ -102,8 +100,10 @@ static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride, int k, sum = 0; for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_y[k * src_stride] * y_filter[k]; - dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] + - clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), 1); + dst[y * dst_stride] = ROUND_POWER_OF_TWO( + dst[y * dst_stride] + + clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)), + 1); y_q4 += y_step_q4; } ++src; @@ -111,13 +111,11 @@ static void convolve_avg_vert(const uint8_t *src, ptrdiff_t src_stride, } } -static void convolve(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *const x_filters, +static void convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const InterpKernel *const x_filters, int x0_q4, int x_step_q4, - const InterpKernel *const y_filters, - int y0_q4, int y_step_q4, - int w, int h) { + const InterpKernel *const y_filters, int y0_q4, + int y_step_q4, int w, int h) { // Note: Fixed size intermediate buffer, temp, places limits on parameters. // 2d filtering proceeds in 2 steps: // (1) Interpolate horizontally into an intermediate buffer, temp. @@ -132,7 +130,7 @@ static void convolve(const uint8_t *src, ptrdiff_t src_stride, // --((64 - 1) * 32 + 15) >> 4 + 8 = 135. uint8_t temp[135 * 64]; int intermediate_height = - (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; + (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; assert(w <= 64); assert(h <= 64); @@ -158,67 +156,66 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) { void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); (void)filter_y; (void)y_step_q4; - convolve_horiz(src, src_stride, dst, dst_stride, filters_x, - x0_q4, x_step_q4, w, h); + convolve_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4, + w, h); } void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); (void)filter_y; (void)y_step_q4; - convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, - x0_q4, x_step_q4, w, h); + convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, + x_step_q4, w, h); } void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); (void)filter_x; (void)x_step_q4; - convolve_vert(src, src_stride, dst, dst_stride, filters_y, - y0_q4, y_step_q4, w, h); + convolve_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4, y_step_q4, + w, h); } void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); (void)filter_x; (void)x_step_q4; - convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, - y0_q4, y_step_q4, w, h); + convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4, + y_step_q4, w, h); } -void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); @@ -226,35 +223,34 @@ void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); - convolve(src, src_stride, dst, dst_stride, - filters_x, x0_q4, x_step_q4, + convolve(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4, filters_y, y0_q4, y_step_q4, w, h); } -void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { /* Fixed size intermediate buffer places limits on parameters. */ DECLARE_ALIGNED(16, uint8_t, temp[64 * 64]); assert(w <= 64); assert(h <= 64); - vpx_convolve8_c(src, src_stride, temp, 64, - filter_x, x_step_q4, filter_y, y_step_q4, w, h); + vpx_convolve8_c(src, src_stride, temp, 64, filter_x, x_step_q4, filter_y, + y_step_q4, w, h); vpx_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); } -void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int filter_x_stride, - const int16_t *filter_y, int filter_y_stride, - int w, int h) { +void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int filter_x_stride, const int16_t *filter_y, + int filter_y_stride, int w, int h) { int r; - (void)filter_x; (void)filter_x_stride; - (void)filter_y; (void)filter_y_stride; + (void)filter_x; + (void)filter_x_stride; + (void)filter_y; + (void)filter_y_stride; for (r = h; r > 0; --r) { memcpy(dst, src, w); @@ -263,47 +259,44 @@ void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, } } -void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int filter_x_stride, - const int16_t *filter_y, int filter_y_stride, - int w, int h) { +void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int filter_x_stride, const int16_t *filter_y, + int filter_y_stride, int w, int h) { int x, y; - (void)filter_x; (void)filter_x_stride; - (void)filter_y; (void)filter_y_stride; + (void)filter_x; + (void)filter_x_stride; + (void)filter_y; + (void)filter_y_stride; for (y = 0; y < h; ++y) { - for (x = 0; x < w; ++x) - dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1); + for (x = 0; x < w; ++x) dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1); src += src_stride; dst += dst_stride; } } -void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); @@ -312,8 +305,8 @@ void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } @@ -321,17 +314,16 @@ void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { + const int16_t *filter_y, int y_step_q4, int w, + int h) { vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h) { +void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, + int w, int h) { vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } @@ -339,9 +331,8 @@ void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, #if CONFIG_VP9_HIGHBITDEPTH static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, - int w, int h, int bd) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h, int bd) { int x, y; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); @@ -352,8 +343,7 @@ static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride, const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; int k, sum = 0; - for (k = 0; k < SUBPEL_TAPS; ++k) - sum += src_x[k] * x_filter[k]; + for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k]; dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); x_q4 += x_step_q4; } @@ -364,9 +354,8 @@ static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride, static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, - int w, int h, int bd) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h, int bd) { int x, y; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); @@ -377,10 +366,10 @@ static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride, const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; int k, sum = 0; - for (k = 0; k < SUBPEL_TAPS; ++k) - sum += src_x[k] * x_filter[k]; - dst[x] = ROUND_POWER_OF_TWO(dst[x] + - clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1); + for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_x[k] * x_filter[k]; + dst[x] = ROUND_POWER_OF_TWO( + dst[x] + clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), + 1); x_q4 += x_step_q4; } src += src_stride; @@ -390,9 +379,8 @@ static void highbd_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride, static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h, - int bd) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h, int bd) { int x, y; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); @@ -405,8 +393,8 @@ static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride, int k, sum = 0; for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_y[k * src_stride] * y_filter[k]; - dst[y * dst_stride] = clip_pixel_highbd( - ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); + dst[y * dst_stride] = + clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); y_q4 += y_step_q4; } ++src; @@ -416,9 +404,8 @@ static void highbd_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride, static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h, - int bd) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h, int bd) { int x, y; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); @@ -431,8 +418,10 @@ static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride, int k, sum = 0; for (k = 0; k < SUBPEL_TAPS; ++k) sum += src_y[k * src_stride] * y_filter[k]; - dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] + - clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1); + dst[y * dst_stride] = ROUND_POWER_OF_TWO( + dst[y * dst_stride] + + clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), + 1); y_q4 += y_step_q4; } ++src; @@ -442,11 +431,9 @@ static void highbd_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride, static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *const x_filters, - int x0_q4, int x_step_q4, - const InterpKernel *const y_filters, - int y0_q4, int y_step_q4, - int w, int h, int bd) { + const InterpKernel *const x_filters, int x0_q4, + int x_step_q4, const InterpKernel *const y_filters, + int y0_q4, int y_step_q4, int w, int h, int bd) { // Note: Fixed size intermediate buffer, temp, places limits on parameters. // 2d filtering proceeds in 2 steps: // (1) Interpolate horizontally into an intermediate buffer, temp. @@ -461,35 +448,33 @@ static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride, // --((64 - 1) * 32 + 15) >> 4 + 8 = 135. uint16_t temp[64 * 135]; int intermediate_height = - (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; + (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; assert(w <= 64); assert(h <= 64); assert(y_step_q4 <= 32); assert(x_step_q4 <= 32); - highbd_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1), - src_stride, CONVERT_TO_BYTEPTR(temp), 64, - x_filters, x0_q4, x_step_q4, w, - intermediate_height, bd); + highbd_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1), src_stride, + CONVERT_TO_BYTEPTR(temp), 64, x_filters, x0_q4, + x_step_q4, w, intermediate_height, bd); highbd_convolve_vert(CONVERT_TO_BYTEPTR(temp) + 64 * (SUBPEL_TAPS / 2 - 1), - 64, dst, dst_stride, y_filters, y0_q4, y_step_q4, - w, h, bd); + 64, dst, dst_stride, y_filters, y0_q4, y_step_q4, w, h, + bd); } - void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int bd) { + const int16_t *filter_y, int y_step_q4, int w, + int h, int bd) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); (void)filter_y; (void)y_step_q4; - highbd_convolve_horiz(src, src_stride, dst, dst_stride, filters_x, - x0_q4, x_step_q4, w, h, bd); + highbd_convolve_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, + x_step_q4, w, h, bd); } void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, @@ -502,22 +487,22 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, (void)filter_y; (void)y_step_q4; - highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, - x0_q4, x_step_q4, w, h, bd); + highbd_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, + x_step_q4, w, h, bd); } void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int bd) { + const int16_t *filter_y, int y_step_q4, int w, + int h, int bd) { const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); (void)filter_x; (void)x_step_q4; - highbd_convolve_vert(src, src_stride, dst, dst_stride, filters_y, - y0_q4, y_step_q4, w, h, bd); + highbd_convolve_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4, + y_step_q4, w, h, bd); } void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, @@ -530,31 +515,30 @@ void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, (void)filter_x; (void)x_step_q4; - highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, - y0_q4, y_step_q4, w, h, bd); + highbd_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y, y0_q4, + y_step_q4, w, h, bd); } void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int bd) { + const int16_t *filter_y, int y_step_q4, int w, + int h, int bd) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); - highbd_convolve(src, src_stride, dst, dst_stride, - filters_x, x0_q4, x_step_q4, + highbd_convolve(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4, filters_y, y0_q4, y_step_q4, w, h, bd); } void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h, int bd) { + const int16_t *filter_y, int y_step_q4, int w, + int h, int bd) { // Fixed size intermediate buffer places limits on parameters. DECLARE_ALIGNED(16, uint16_t, temp[64 * 64]); assert(w <= 64); @@ -562,8 +546,8 @@ void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64, filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); - vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, - NULL, 0, NULL, 0, w, h, bd); + vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, NULL, + 0, NULL, 0, w, h, bd); } void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, diff --git a/libs/libvpx/vpx_dsp/vpx_convolve.h b/libs/libvpx/vpx_dsp/vpx_convolve.h index 9ed3f1750f..ee9744b3ae 100644 --- a/libs/libvpx/vpx_dsp/vpx_convolve.h +++ b/libs/libvpx/vpx_dsp/vpx_convolve.h @@ -20,8 +20,8 @@ extern "C" { typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, - int w, int h); + const int16_t *filter_y, int y_step_q4, int w, + int h); #if CONFIG_VP9_HIGHBITDEPTH typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride, diff --git a/libs/libvpx/vpx_dsp/vpx_dsp.mk b/libs/libvpx/vpx_dsp/vpx_dsp.mk index e394688c71..a78041ce7b 100644 --- a/libs/libvpx/vpx_dsp/vpx_dsp.mk +++ b/libs/libvpx/vpx_dsp/vpx_dsp.mk @@ -22,6 +22,8 @@ DSP_SRCS-yes += bitwriter.h DSP_SRCS-yes += bitwriter.c DSP_SRCS-yes += bitwriter_buffer.c DSP_SRCS-yes += bitwriter_buffer.h +DSP_SRCS-yes += psnr.c +DSP_SRCS-yes += psnr.h DSP_SRCS-$(CONFIG_INTERNAL_STATS) += ssim.c DSP_SRCS-$(CONFIG_INTERNAL_STATS) += ssim.h DSP_SRCS-$(CONFIG_INTERNAL_STATS) += psnrhvs.c @@ -38,20 +40,26 @@ endif # intra predictions DSP_SRCS-yes += intrapred.c -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE) += x86/intrapred_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/intrapred_sse2.asm DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm -endif # CONFIG_USE_X86INC ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE) += x86/highbd_intrapred_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/highbd_intrapred_sse2.asm -endif # CONFIG_USE_X86INC endif # CONFIG_VP9_HIGHBITDEPTH +ifneq ($(filter yes,$(CONFIG_POSTPROC) $(CONFIG_VP9_POSTPROC)),) +DSP_SRCS-yes += add_noise.c +DSP_SRCS-yes += deblock.c +DSP_SRCS-yes += postproc.h +DSP_SRCS-$(HAVE_MSA) += mips/add_noise_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/deblock_msa.c +DSP_SRCS-$(HAVE_SSE2) += x86/add_noise_sse2.asm +DSP_SRCS-$(HAVE_SSE2) += x86/deblock_sse2.asm +endif # CONFIG_POSTPROC + DSP_SRCS-$(HAVE_NEON_ASM) += arm/intrapred_neon_asm$(ASM) DSP_SRCS-$(HAVE_NEON) += arm/intrapred_neon.c DSP_SRCS-$(HAVE_MSA) += mips/intrapred_msa.c @@ -79,9 +87,8 @@ ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_8t_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_bilinear_sse2.asm endif -ifeq ($(CONFIG_USE_X86INC),yes) + DSP_SRCS-$(HAVE_SSE2) += x86/vpx_convolve_copy_sse2.asm -endif ifeq ($(HAVE_NEON_ASM),yes) DSP_SRCS-yes += arm/vpx_convolve_copy_neon_asm$(ASM) @@ -128,19 +135,16 @@ DSP_SRCS-yes += loopfilter.c DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/loopfilter_sse2.c DSP_SRCS-$(HAVE_AVX2) += x86/loopfilter_avx2.c -DSP_SRCS-$(HAVE_MMX) += x86/loopfilter_mmx.asm -DSP_SRCS-$(HAVE_NEON) += arm/loopfilter_neon.c ifeq ($(HAVE_NEON_ASM),yes) +DSP_SRCS-yes += arm/loopfilter_neon.c DSP_SRCS-yes += arm/loopfilter_mb_neon$(ASM) DSP_SRCS-yes += arm/loopfilter_16_neon$(ASM) DSP_SRCS-yes += arm/loopfilter_8_neon$(ASM) DSP_SRCS-yes += arm/loopfilter_4_neon$(ASM) else ifeq ($(HAVE_NEON),yes) -DSP_SRCS-yes += arm/loopfilter_16_neon.c -DSP_SRCS-yes += arm/loopfilter_8_neon.c -DSP_SRCS-yes += arm/loopfilter_4_neon.c +DSP_SRCS-yes += arm/loopfilter_mb_neon.c endif # HAVE_NEON endif # HAVE_NEON_ASM @@ -164,7 +168,7 @@ DSP_SRCS-yes += txfm_common.h DSP_SRCS-$(HAVE_SSE2) += x86/txfm_common_sse2.h DSP_SRCS-$(HAVE_MSA) += mips/txfm_macros_msa.h # forward transform -ifneq ($(filter yes,$(CONFIG_VP9_ENCODER) $(CONFIG_VP10_ENCODER)),) +ifeq ($(CONFIG_VP9_ENCODER),yes) DSP_SRCS-yes += fwd_txfm.c DSP_SRCS-yes += fwd_txfm.h DSP_SRCS-$(HAVE_SSE2) += x86/fwd_txfm_sse2.h @@ -172,30 +176,26 @@ DSP_SRCS-$(HAVE_SSE2) += x86/fwd_txfm_sse2.c DSP_SRCS-$(HAVE_SSE2) += x86/fwd_txfm_impl_sse2.h DSP_SRCS-$(HAVE_SSE2) += x86/fwd_dct32x32_impl_sse2.h ifeq ($(ARCH_X86_64),yes) -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSSE3) += x86/fwd_txfm_ssse3_x86_64.asm endif -endif DSP_SRCS-$(HAVE_AVX2) += x86/fwd_txfm_avx2.c DSP_SRCS-$(HAVE_AVX2) += x86/fwd_dct32x32_impl_avx2.h DSP_SRCS-$(HAVE_NEON) += arm/fwd_txfm_neon.c DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.h DSP_SRCS-$(HAVE_MSA) += mips/fwd_txfm_msa.c DSP_SRCS-$(HAVE_MSA) += mips/fwd_dct32x32_msa.c -endif # CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +endif # CONFIG_VP9_ENCODER # inverse transform -ifneq ($(filter yes,$(CONFIG_VP9) $(CONFIG_VP10)),) +ifeq ($(CONFIG_VP9),yes) DSP_SRCS-yes += inv_txfm.h DSP_SRCS-yes += inv_txfm.c DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.h DSP_SRCS-$(HAVE_SSE2) += x86/inv_txfm_sse2.c -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE2) += x86/inv_wht_sse2.asm ifeq ($(ARCH_X86_64),yes) DSP_SRCS-$(HAVE_SSSE3) += x86/inv_txfm_ssse3_x86_64.asm endif # ARCH_X86_64 -endif # CONFIG_USE_X86INC ifeq ($(HAVE_NEON_ASM),yes) DSP_SRCS-yes += arm/save_reg_neon$(ASM) @@ -235,10 +235,10 @@ DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c endif # CONFIG_VP9_HIGHBITDEPTH -endif # CONFIG_VP9 || CONFIG_VP10 +endif # CONFIG_VP9 # quantization -ifneq ($(filter yes, $(CONFIG_VP9_ENCODER) $(CONFIG_VP10_ENCODER)),) +ifeq ($(CONFIG_VP9_ENCODER),yes) DSP_SRCS-yes += quantize.c DSP_SRCS-yes += quantize.h @@ -247,30 +247,28 @@ ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) DSP_SRCS-$(HAVE_SSE2) += x86/highbd_quantize_intrin_sse2.c endif ifeq ($(ARCH_X86_64),yes) -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSSE3) += x86/quantize_ssse3_x86_64.asm DSP_SRCS-$(HAVE_AVX) += x86/quantize_avx_x86_64.asm endif -endif # avg DSP_SRCS-yes += avg.c DSP_SRCS-$(HAVE_SSE2) += x86/avg_intrin_sse2.c DSP_SRCS-$(HAVE_NEON) += arm/avg_neon.c DSP_SRCS-$(HAVE_MSA) += mips/avg_msa.c +DSP_SRCS-$(HAVE_NEON) += arm/hadamard_neon.c ifeq ($(ARCH_X86_64),yes) -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSSE3) += x86/avg_ssse3_x86_64.asm endif -endif -endif # CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +endif # CONFIG_VP9_ENCODER ifeq ($(CONFIG_ENCODERS),yes) DSP_SRCS-yes += sad.c DSP_SRCS-yes += subtract.c +DSP_SRCS-yes += sum_squares.c +DSP_SRCS-$(HAVE_SSE2) += x86/sum_squares_sse2.c -DSP_SRCS-$(HAVE_MEDIA) += arm/sad_media$(ASM) DSP_SRCS-$(HAVE_NEON) += arm/sad4d_neon.c DSP_SRCS-$(HAVE_NEON) += arm/sad_neon.c DSP_SRCS-$(HAVE_NEON) += arm/subtract_neon.c @@ -278,14 +276,12 @@ DSP_SRCS-$(HAVE_NEON) += arm/subtract_neon.c DSP_SRCS-$(HAVE_MSA) += mips/sad_msa.c DSP_SRCS-$(HAVE_MSA) += mips/subtract_msa.c -DSP_SRCS-$(HAVE_MMX) += x86/sad_mmx.asm DSP_SRCS-$(HAVE_SSE3) += x86/sad_sse3.asm DSP_SRCS-$(HAVE_SSSE3) += x86/sad_ssse3.asm DSP_SRCS-$(HAVE_SSE4_1) += x86/sad_sse4.asm DSP_SRCS-$(HAVE_AVX2) += x86/sad4d_avx2.c DSP_SRCS-$(HAVE_AVX2) += x86/sad_avx2.c -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE) += x86/sad4d_sse2.asm DSP_SRCS-$(HAVE_SSE) += x86/sad_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/sad4d_sse2.asm @@ -296,7 +292,6 @@ ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm endif # CONFIG_VP9_HIGHBITDEPTH -endif # CONFIG_USE_X86INC endif # CONFIG_ENCODERS @@ -304,24 +299,14 @@ ifneq ($(filter yes,$(CONFIG_ENCODERS) $(CONFIG_POSTPROC) $(CONFIG_VP9_POSTPROC) DSP_SRCS-yes += variance.c DSP_SRCS-yes += variance.h -DSP_SRCS-$(HAVE_MEDIA) += arm/bilinear_filter_media$(ASM) -DSP_SRCS-$(HAVE_MEDIA) += arm/subpel_variance_media.c -DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_h_media$(ASM) -DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_hv_media$(ASM) -DSP_SRCS-$(HAVE_MEDIA) += arm/variance_halfpixvar16x16_v_media$(ASM) -DSP_SRCS-$(HAVE_MEDIA) += arm/variance_media$(ASM) DSP_SRCS-$(HAVE_NEON) += arm/subpel_variance_neon.c DSP_SRCS-$(HAVE_NEON) += arm/variance_neon.c DSP_SRCS-$(HAVE_MSA) += mips/variance_msa.c DSP_SRCS-$(HAVE_MSA) += mips/sub_pixel_variance_msa.c -DSP_SRCS-$(HAVE_MMX) += x86/variance_mmx.c -DSP_SRCS-$(HAVE_MMX) += x86/variance_impl_mmx.asm DSP_SRCS-$(HAVE_SSE) += x86/variance_sse2.c DSP_SRCS-$(HAVE_SSE2) += x86/variance_sse2.c # Contains SSE2 and SSSE3 -DSP_SRCS-$(HAVE_SSE2) += x86/halfpix_variance_sse2.c -DSP_SRCS-$(HAVE_SSE2) += x86/halfpix_variance_impl_sse2.asm DSP_SRCS-$(HAVE_AVX2) += x86/variance_avx2.c DSP_SRCS-$(HAVE_AVX2) += x86/variance_impl_avx2.c @@ -329,20 +314,19 @@ ifeq ($(ARCH_X86_64),yes) DSP_SRCS-$(HAVE_SSE2) += x86/ssim_opt_x86_64.asm endif # ARCH_X86_64 -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE) += x86/subpel_variance_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/subpel_variance_sse2.asm # Contains SSE2 and SSSE3 -endif # CONFIG_USE_X86INC ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes) DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_sse2.c DSP_SRCS-$(HAVE_SSE2) += x86/highbd_variance_impl_sse2.asm -ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE2) += x86/highbd_subpel_variance_impl_sse2.asm -endif # CONFIG_USE_X86INC endif # CONFIG_VP9_HIGHBITDEPTH endif # CONFIG_ENCODERS || CONFIG_POSTPROC || CONFIG_VP9_POSTPROC +# Neon utilities +DSP_SRCS-$(HAVE_NEON) += arm/transpose_neon.h + DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes) DSP_SRCS-yes += vpx_dsp_rtcd.c diff --git a/libs/libvpx/vpx_dsp/vpx_dsp_common.h b/libs/libvpx/vpx_dsp/vpx_dsp_common.h index a9e180e793..49d36e5458 100644 --- a/libs/libvpx/vpx_dsp/vpx_dsp_common.h +++ b/libs/libvpx/vpx_dsp/vpx_dsp_common.h @@ -8,12 +8,11 @@ * be found in the AUTHORS file in the root of the source tree. */ -#ifndef VPX_DSP_COMMON_H_ -#define VPX_DSP_COMMON_H_ +#ifndef VPX_DSP_VPX_DSP_COMMON_H_ +#define VPX_DSP_VPX_DSP_COMMON_H_ #include "./vpx_config.h" #include "vpx/vpx_integer.h" -#include "vpx_dsp/vpx_dsp_common.h" #include "vpx_ports/mem.h" #ifdef __cplusplus @@ -23,6 +22,13 @@ extern "C" { #define VPXMIN(x, y) (((x) < (y)) ? (x) : (y)) #define VPXMAX(x, y) (((x) > (y)) ? (x) : (y)) +#define VPX_SWAP(type, a, b) \ + do { \ + type c = (b); \ + b = a; \ + a = c; \ + } while (0) + #if CONFIG_VP9_HIGHBITDEPTH // Note: // tran_low_t is the datatype used for final transform coefficients. @@ -53,12 +59,9 @@ static INLINE double fclamp(double value, double low, double high) { static INLINE uint16_t clip_pixel_highbd(int val, int bd) { switch (bd) { case 8: - default: - return (uint16_t)clamp(val, 0, 255); - case 10: - return (uint16_t)clamp(val, 0, 1023); - case 12: - return (uint16_t)clamp(val, 0, 4095); + default: return (uint16_t)clamp(val, 0, 255); + case 10: return (uint16_t)clamp(val, 0, 1023); + case 12: return (uint16_t)clamp(val, 0, 4095); } } #endif // CONFIG_VP9_HIGHBITDEPTH @@ -67,4 +70,4 @@ static INLINE uint16_t clip_pixel_highbd(int val, int bd) { } // extern "C" #endif -#endif // VPX_DSP_COMMON_H_ +#endif // VPX_DSP_VPX_DSP_COMMON_H_ diff --git a/libs/libvpx/vpx_dsp/vpx_dsp_rtcd.c b/libs/libvpx/vpx_dsp/vpx_dsp_rtcd.c index 5fe27b614b..030c456d39 100644 --- a/libs/libvpx/vpx_dsp/vpx_dsp_rtcd.c +++ b/libs/libvpx/vpx_dsp/vpx_dsp_rtcd.c @@ -12,6 +12,4 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_ports/vpx_once.h" -void vpx_dsp_rtcd() { - once(setup_rtcd_internal); -} +void vpx_dsp_rtcd() { once(setup_rtcd_internal); } diff --git a/libs/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl b/libs/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl index 73726d217c..9fea2d1cf2 100644 --- a/libs/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl +++ b/libs/libvpx/vpx_dsp/vpx_dsp_rtcd_defs.pl @@ -11,29 +11,6 @@ EOF } forward_decls qw/vpx_dsp_forward_decls/; -# x86inc.asm had specific constraints. break it out so it's easy to disable. -# zero all the variables to avoid tricky else conditions. -$mmx_x86inc = $sse_x86inc = $sse2_x86inc = $ssse3_x86inc = $avx_x86inc = - $avx2_x86inc = ''; -$mmx_x86_64_x86inc = $sse_x86_64_x86inc = $sse2_x86_64_x86inc = - $ssse3_x86_64_x86inc = $avx_x86_64_x86inc = $avx2_x86_64_x86inc = ''; -if (vpx_config("CONFIG_USE_X86INC") eq "yes") { - $mmx_x86inc = 'mmx'; - $sse_x86inc = 'sse'; - $sse2_x86inc = 'sse2'; - $ssse3_x86inc = 'ssse3'; - $avx_x86inc = 'avx'; - $avx2_x86inc = 'avx2'; - if ($opts{arch} eq "x86_64") { - $mmx_x86_64_x86inc = 'mmx'; - $sse_x86_64_x86inc = 'sse'; - $sse2_x86_64_x86inc = 'sse2'; - $ssse3_x86_64_x86inc = 'ssse3'; - $avx_x86_64_x86inc = 'avx'; - $avx2_x86_64_x86inc = 'avx2'; - } -} - # optimizations which depend on multiple features $avx2_ssse3 = ''; if ((vpx_config("HAVE_AVX2") eq "yes") && (vpx_config("HAVE_SSSE3") eq "yes")) { @@ -55,19 +32,19 @@ if ($opts{arch} eq "x86_64") { # add_proto qw/void vpx_d207_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d207_predictor_4x4/, "$ssse3_x86inc"; +specialize qw/vpx_d207_predictor_4x4 sse2/; add_proto qw/void vpx_d207e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d207e_predictor_4x4/; add_proto qw/void vpx_d45_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d45_predictor_4x4 neon/, "$ssse3_x86inc"; +specialize qw/vpx_d45_predictor_4x4 neon sse2/; add_proto qw/void vpx_d45e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d45e_predictor_4x4/; add_proto qw/void vpx_d63_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d63_predictor_4x4/, "$ssse3_x86inc"; +specialize qw/vpx_d63_predictor_4x4 ssse3/; add_proto qw/void vpx_d63e_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d63e_predictor_4x4/; @@ -76,7 +53,7 @@ add_proto qw/void vpx_d63f_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, co specialize qw/vpx_d63f_predictor_4x4/; add_proto qw/void vpx_h_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_h_predictor_4x4 neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_h_predictor_4x4 neon dspr2 msa sse2/; add_proto qw/void vpx_he_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_he_predictor_4x4/; @@ -88,49 +65,49 @@ add_proto qw/void vpx_d135_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, co specialize qw/vpx_d135_predictor_4x4 neon/; add_proto qw/void vpx_d153_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d153_predictor_4x4/, "$ssse3_x86inc"; +specialize qw/vpx_d153_predictor_4x4 ssse3/; add_proto qw/void vpx_v_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_v_predictor_4x4 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_v_predictor_4x4 neon msa sse2/; add_proto qw/void vpx_ve_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_ve_predictor_4x4/; add_proto qw/void vpx_tm_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_tm_predictor_4x4 neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_tm_predictor_4x4 neon dspr2 msa sse2/; add_proto qw/void vpx_dc_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_predictor_4x4 dspr2 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_predictor_4x4 dspr2 msa neon sse2/; add_proto qw/void vpx_dc_top_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_top_predictor_4x4 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_top_predictor_4x4 msa neon sse2/; add_proto qw/void vpx_dc_left_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_left_predictor_4x4 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_left_predictor_4x4 msa neon sse2/; add_proto qw/void vpx_dc_128_predictor_4x4/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_128_predictor_4x4 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_128_predictor_4x4 msa neon sse2/; add_proto qw/void vpx_d207_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d207_predictor_8x8/, "$ssse3_x86inc"; +specialize qw/vpx_d207_predictor_8x8 ssse3/; add_proto qw/void vpx_d207e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d207e_predictor_8x8/; add_proto qw/void vpx_d45_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d45_predictor_8x8 neon/, "$ssse3_x86inc"; +specialize qw/vpx_d45_predictor_8x8 neon sse2/; add_proto qw/void vpx_d45e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d45e_predictor_8x8/; add_proto qw/void vpx_d63_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d63_predictor_8x8/, "$ssse3_x86inc"; +specialize qw/vpx_d63_predictor_8x8 ssse3/; add_proto qw/void vpx_d63e_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d63e_predictor_8x8/; add_proto qw/void vpx_h_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_h_predictor_8x8 neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_h_predictor_8x8 neon dspr2 msa sse2/; add_proto qw/void vpx_d117_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d117_predictor_8x8/; @@ -139,46 +116,46 @@ add_proto qw/void vpx_d135_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, co specialize qw/vpx_d135_predictor_8x8/; add_proto qw/void vpx_d153_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d153_predictor_8x8/, "$ssse3_x86inc"; +specialize qw/vpx_d153_predictor_8x8 ssse3/; add_proto qw/void vpx_v_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_v_predictor_8x8 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_v_predictor_8x8 neon msa sse2/; add_proto qw/void vpx_tm_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_tm_predictor_8x8 neon dspr2 msa sse2/; add_proto qw/void vpx_dc_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_predictor_8x8 dspr2 neon msa sse2/; add_proto qw/void vpx_dc_top_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_top_predictor_8x8 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_top_predictor_8x8 neon msa sse2/; add_proto qw/void vpx_dc_left_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_left_predictor_8x8 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_left_predictor_8x8 neon msa sse2/; add_proto qw/void vpx_dc_128_predictor_8x8/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_128_predictor_8x8 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_128_predictor_8x8 neon msa sse2/; add_proto qw/void vpx_d207_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d207_predictor_16x16/, "$ssse3_x86inc"; +specialize qw/vpx_d207_predictor_16x16 ssse3/; add_proto qw/void vpx_d207e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d207e_predictor_16x16/; add_proto qw/void vpx_d45_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d45_predictor_16x16 neon/, "$ssse3_x86inc"; +specialize qw/vpx_d45_predictor_16x16 neon ssse3/; add_proto qw/void vpx_d45e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d45e_predictor_16x16/; add_proto qw/void vpx_d63_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d63_predictor_16x16/, "$ssse3_x86inc"; +specialize qw/vpx_d63_predictor_16x16 ssse3/; add_proto qw/void vpx_d63e_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d63e_predictor_16x16/; add_proto qw/void vpx_h_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_h_predictor_16x16 neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_h_predictor_16x16 neon dspr2 msa sse2/; add_proto qw/void vpx_d117_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d117_predictor_16x16/; @@ -187,46 +164,46 @@ add_proto qw/void vpx_d135_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, specialize qw/vpx_d135_predictor_16x16/; add_proto qw/void vpx_d153_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d153_predictor_16x16/, "$ssse3_x86inc"; +specialize qw/vpx_d153_predictor_16x16 ssse3/; add_proto qw/void vpx_v_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_v_predictor_16x16 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_v_predictor_16x16 neon msa sse2/; add_proto qw/void vpx_tm_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_tm_predictor_16x16 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_tm_predictor_16x16 neon msa sse2/; add_proto qw/void vpx_dc_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_predictor_16x16 dspr2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_predictor_16x16 dspr2 neon msa sse2/; add_proto qw/void vpx_dc_top_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_top_predictor_16x16 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_top_predictor_16x16 neon msa sse2/; add_proto qw/void vpx_dc_left_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_left_predictor_16x16 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_left_predictor_16x16 neon msa sse2/; add_proto qw/void vpx_dc_128_predictor_16x16/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_128_predictor_16x16 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_dc_128_predictor_16x16 neon msa sse2/; add_proto qw/void vpx_d207_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d207_predictor_32x32/, "$ssse3_x86inc"; +specialize qw/vpx_d207_predictor_32x32 ssse3/; add_proto qw/void vpx_d207e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d207e_predictor_32x32/; add_proto qw/void vpx_d45_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d45_predictor_32x32/, "$ssse3_x86inc"; +specialize qw/vpx_d45_predictor_32x32 ssse3/; add_proto qw/void vpx_d45e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d45e_predictor_32x32/; add_proto qw/void vpx_d63_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d63_predictor_32x32/, "$ssse3_x86inc"; +specialize qw/vpx_d63_predictor_32x32 ssse3/; add_proto qw/void vpx_d63e_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d63e_predictor_32x32/; add_proto qw/void vpx_h_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_h_predictor_32x32 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_h_predictor_32x32 neon msa sse2/; add_proto qw/void vpx_d117_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; specialize qw/vpx_d117_predictor_32x32/; @@ -235,25 +212,25 @@ add_proto qw/void vpx_d135_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, specialize qw/vpx_d135_predictor_32x32/; add_proto qw/void vpx_d153_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_d153_predictor_32x32/, "$ssse3_x86inc"; +specialize qw/vpx_d153_predictor_32x32 ssse3/; add_proto qw/void vpx_v_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_v_predictor_32x32 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_v_predictor_32x32 neon msa sse2/; add_proto qw/void vpx_tm_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_tm_predictor_32x32 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_tm_predictor_32x32 neon msa sse2/; add_proto qw/void vpx_dc_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_predictor_32x32 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_predictor_32x32 msa neon sse2/; add_proto qw/void vpx_dc_top_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_top_predictor_32x32 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_top_predictor_32x32 msa neon sse2/; add_proto qw/void vpx_dc_left_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_left_predictor_32x32 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_left_predictor_32x32 msa neon sse2/; add_proto qw/void vpx_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"; -specialize qw/vpx_dc_128_predictor_32x32 msa neon/, "$sse2_x86inc"; +specialize qw/vpx_dc_128_predictor_32x32 msa neon sse2/; # High bitdepth functions if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { @@ -288,13 +265,13 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_d153_predictor_4x4/; add_proto qw/void vpx_highbd_v_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_v_predictor_4x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_v_predictor_4x4 sse2/; add_proto qw/void vpx_highbd_tm_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_tm_predictor_4x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_tm_predictor_4x4 sse2/; add_proto qw/void vpx_highbd_dc_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_dc_predictor_4x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_dc_predictor_4x4 sse2/; add_proto qw/void vpx_highbd_dc_top_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; specialize qw/vpx_highbd_dc_top_predictor_4x4/; @@ -336,13 +313,13 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_d153_predictor_8x8/; add_proto qw/void vpx_highbd_v_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_v_predictor_8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_v_predictor_8x8 sse2/; add_proto qw/void vpx_highbd_tm_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_tm_predictor_8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_tm_predictor_8x8 sse2/; add_proto qw/void vpx_highbd_dc_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_dc_predictor_8x8/, "$sse2_x86inc";; + specialize qw/vpx_highbd_dc_predictor_8x8 sse2/;; add_proto qw/void vpx_highbd_dc_top_predictor_8x8/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; specialize qw/vpx_highbd_dc_top_predictor_8x8/; @@ -384,13 +361,13 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_d153_predictor_16x16/; add_proto qw/void vpx_highbd_v_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_v_predictor_16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_v_predictor_16x16 sse2/; add_proto qw/void vpx_highbd_tm_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_tm_predictor_16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_tm_predictor_16x16 sse2/; add_proto qw/void vpx_highbd_dc_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_dc_predictor_16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_dc_predictor_16x16 sse2/; add_proto qw/void vpx_highbd_dc_top_predictor_16x16/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; specialize qw/vpx_highbd_dc_top_predictor_16x16/; @@ -432,13 +409,13 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_d153_predictor_32x32/; add_proto qw/void vpx_highbd_v_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_v_predictor_32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_v_predictor_32x32 sse2/; add_proto qw/void vpx_highbd_tm_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_tm_predictor_32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_tm_predictor_32x32 sse2/; add_proto qw/void vpx_highbd_dc_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; - specialize qw/vpx_highbd_dc_predictor_32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_dc_predictor_32x32 sse2/; add_proto qw/void vpx_highbd_dc_top_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd"; specialize qw/vpx_highbd_dc_top_predictor_32x32/; @@ -454,10 +431,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Sub Pixel Filters # add_proto qw/void vpx_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; -specialize qw/vpx_convolve_copy neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_convolve_copy neon dspr2 msa sse2/; add_proto qw/void vpx_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; -specialize qw/vpx_convolve_avg neon dspr2 msa/, "$sse2_x86inc"; +specialize qw/vpx_convolve_avg neon dspr2 msa sse2/; add_proto qw/void vpx_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h"; specialize qw/vpx_convolve8 sse2 ssse3 neon dspr2 msa/, "$avx2_ssse3"; @@ -500,10 +477,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Sub Pixel Filters # add_proto qw/void vpx_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vpx_highbd_convolve_copy/, "$sse2_x86inc"; + specialize qw/vpx_highbd_convolve_copy sse2/; add_proto qw/void vpx_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; - specialize qw/vpx_highbd_convolve_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_convolve_avg sse2/; add_proto qw/void vpx_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps"; specialize qw/vpx_highbd_convolve8/, "$sse2_x86_64"; @@ -528,39 +505,37 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Loopfilter # add_proto qw/void vpx_lpf_vertical_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; -specialize qw/vpx_lpf_vertical_16 sse2 neon_asm dspr2 msa/; -$vpx_lpf_vertical_16_neon_asm=vpx_lpf_vertical_16_neon; +specialize qw/vpx_lpf_vertical_16 sse2 neon dspr2 msa/; add_proto qw/void vpx_lpf_vertical_16_dual/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; -specialize qw/vpx_lpf_vertical_16_dual sse2 neon_asm dspr2 msa/; -$vpx_lpf_vertical_16_dual_neon_asm=vpx_lpf_vertical_16_dual_neon; +specialize qw/vpx_lpf_vertical_16_dual sse2 neon dspr2 msa/; -add_proto qw/void vpx_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +add_proto qw/void vpx_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; specialize qw/vpx_lpf_vertical_8 sse2 neon dspr2 msa/; add_proto qw/void vpx_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; -specialize qw/vpx_lpf_vertical_8_dual sse2 neon_asm dspr2 msa/; -$vpx_lpf_vertical_8_dual_neon_asm=vpx_lpf_vertical_8_dual_neon; +specialize qw/vpx_lpf_vertical_8_dual sse2 neon dspr2 msa/; -add_proto qw/void vpx_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; -specialize qw/vpx_lpf_vertical_4 mmx neon dspr2 msa/; +add_proto qw/void vpx_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vpx_lpf_vertical_4 sse2 neon dspr2 msa/; add_proto qw/void vpx_lpf_vertical_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; specialize qw/vpx_lpf_vertical_4_dual sse2 neon dspr2 msa/; -add_proto qw/void vpx_lpf_horizontal_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; -specialize qw/vpx_lpf_horizontal_16 sse2 avx2 neon_asm dspr2 msa/; -$vpx_lpf_horizontal_16_neon_asm=vpx_lpf_horizontal_16_neon; +add_proto qw/void vpx_lpf_horizontal_edge_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vpx_lpf_horizontal_edge_8 sse2 avx2 neon dspr2 msa/; -add_proto qw/void vpx_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; +add_proto qw/void vpx_lpf_horizontal_edge_16/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vpx_lpf_horizontal_edge_16 sse2 avx2 neon dspr2 msa/; + +add_proto qw/void vpx_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; specialize qw/vpx_lpf_horizontal_8 sse2 neon dspr2 msa/; add_proto qw/void vpx_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; -specialize qw/vpx_lpf_horizontal_8_dual sse2 neon_asm dspr2 msa/; -$vpx_lpf_horizontal_8_dual_neon_asm=vpx_lpf_horizontal_8_dual_neon; +specialize qw/vpx_lpf_horizontal_8_dual sse2 neon dspr2 msa/; -add_proto qw/void vpx_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count"; -specialize qw/vpx_lpf_horizontal_4 mmx neon dspr2 msa/; +add_proto qw/void vpx_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh"; +specialize qw/vpx_lpf_horizontal_4 sse2 neon dspr2 msa/; add_proto qw/void vpx_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1"; specialize qw/vpx_lpf_horizontal_4_dual sse2 neon dspr2 msa/; @@ -572,28 +547,31 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vpx_highbd_lpf_vertical_16_dual/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; specialize qw/vpx_highbd_lpf_vertical_16_dual sse2/; - add_proto qw/void vpx_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd"; + add_proto qw/void vpx_highbd_lpf_vertical_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; specialize qw/vpx_highbd_lpf_vertical_8 sse2/; add_proto qw/void vpx_highbd_lpf_vertical_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd"; specialize qw/vpx_highbd_lpf_vertical_8_dual sse2/; - add_proto qw/void vpx_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd"; + add_proto qw/void vpx_highbd_lpf_vertical_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; specialize qw/vpx_highbd_lpf_vertical_4 sse2/; add_proto qw/void vpx_highbd_lpf_vertical_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd"; specialize qw/vpx_highbd_lpf_vertical_4_dual sse2/; - add_proto qw/void vpx_highbd_lpf_horizontal_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd"; - specialize qw/vpx_highbd_lpf_horizontal_16 sse2/; + add_proto qw/void vpx_highbd_lpf_horizontal_edge_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; + specialize qw/vpx_highbd_lpf_horizontal_edge_8 sse2/; - add_proto qw/void vpx_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd"; + add_proto qw/void vpx_highbd_lpf_horizontal_edge_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; + specialize qw/vpx_highbd_lpf_horizontal_edge_16 sse2/; + + add_proto qw/void vpx_highbd_lpf_horizontal_8/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; specialize qw/vpx_highbd_lpf_horizontal_8 sse2/; add_proto qw/void vpx_highbd_lpf_horizontal_8_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd"; specialize qw/vpx_highbd_lpf_horizontal_8_dual sse2/; - add_proto qw/void vpx_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd"; + add_proto qw/void vpx_highbd_lpf_horizontal_4/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd"; specialize qw/vpx_highbd_lpf_horizontal_4 sse2/; add_proto qw/void vpx_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd"; @@ -607,7 +585,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # # Forward transform # -if ((vpx_config("CONFIG_VP9_ENCODER") eq "yes") || (vpx_config("CONFIG_VP10_ENCODER") eq "yes")) { +if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vpx_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride"; specialize qw/vpx_fdct4x4 sse2/; @@ -667,7 +645,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_fdct4x4_1 sse2/; add_proto qw/void vpx_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride"; - specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_fdct8x8 sse2 neon msa/, "$ssse3_x86_64"; add_proto qw/void vpx_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride"; specialize qw/vpx_fdct8x8_1 sse2 neon msa/; @@ -687,11 +665,11 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vpx_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride"; specialize qw/vpx_fdct32x32_1 sse2 msa/; } # CONFIG_VP9_HIGHBITDEPTH -} # CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +} # CONFIG_VP9_ENCODER # # Inverse transform -if ((vpx_config("CONFIG_VP9") eq "yes") || (vpx_config("CONFIG_VP10") eq "yes")) { +if (vpx_config("CONFIG_VP9") eq "yes") { if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Note as optimized versions of these functions are added we need to add a check to ensure # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only. @@ -699,7 +677,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_iwht4x4_1_add/; add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_iwht4x4_16_add/, "$sse2_x86inc"; + specialize qw/vpx_iwht4x4_16_add sse2/; add_proto qw/void vpx_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd"; specialize qw/vpx_highbd_idct4x4_1_add/; @@ -717,7 +695,7 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_idct32x32_34_add/; add_proto qw/void vpx_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd"; - specialize qw/vpx_highbd_idct32x32_1_add/; + specialize qw/vpx_highbd_idct32x32_1_add sse2/; add_proto qw/void vpx_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd"; specialize qw/vpx_highbd_iwht4x4_1_add/; @@ -785,10 +763,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_idct4x4_1_add sse2/; add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct8x8_64_add sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct8x8_64_add sse2/, "$ssse3_x86_64"; add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct8x8_12_add sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct8x8_12_add sse2/, "$ssse3_x86_64"; add_proto qw/void vpx_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; specialize qw/vpx_idct8x8_1_add sse2/; @@ -803,15 +781,15 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_idct16x16_1_add sse2/; add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_1024_add sse2/, "$ssse3_x86_64"; add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_135_add sse2/, "$ssse3_x86_64"; # Need to add 135 eob idct32x32 implementations. $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2; add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_34_add sse2/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_34_add sse2/, "$ssse3_x86_64"; add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; specialize qw/vpx_idct32x32_1_add sse2/; @@ -886,10 +864,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_idct8x8_1_add sse2 neon dspr2 msa/; add_proto qw/void vpx_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct8x8_64_add sse2 neon dspr2 msa/, "$ssse3_x86_64"; add_proto qw/void vpx_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct8x8_12_add sse2 neon dspr2 msa/, "$ssse3_x86_64"; add_proto qw/void vpx_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; specialize qw/vpx_idct16x16_1_add sse2 neon dspr2 msa/; @@ -901,10 +879,10 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_idct16x16_10_add sse2 neon dspr2 msa/; add_proto qw/void vpx_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_1024_add sse2 neon dspr2 msa/, "$ssse3_x86_64"; add_proto qw/void vpx_idct32x32_135_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_135_add sse2 neon dspr2 msa/, "$ssse3_x86_64"; # Need to add 135 eob idct32x32 implementations. $vpx_idct32x32_135_add_sse2=vpx_idct32x32_1024_add_sse2; $vpx_idct32x32_135_add_neon=vpx_idct32x32_1024_add_neon; @@ -912,9 +890,9 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { $vpx_idct32x32_135_add_msa=vpx_idct32x32_1024_add_msa; add_proto qw/void vpx_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_idct32x32_34_add sse2 neon_asm dspr2 msa/, "$ssse3_x86_64_x86inc"; + specialize qw/vpx_idct32x32_34_add sse2 neon dspr2 msa/, "$ssse3_x86_64"; # Need to add 34 eob idct32x32 neon implementation. - $vpx_idct32x32_34_add_neon_asm=vpx_idct32x32_1024_add_neon; + $vpx_idct32x32_34_add_neon=vpx_idct32x32_1024_add_neon; add_proto qw/void vpx_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; specialize qw/vpx_idct32x32_1_add sse2 neon dspr2 msa/; @@ -923,20 +901,20 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_iwht4x4_1_add msa/; add_proto qw/void vpx_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride"; - specialize qw/vpx_iwht4x4_16_add msa/, "$sse2_x86inc"; + specialize qw/vpx_iwht4x4_16_add msa sse2/; } # CONFIG_EMULATE_HARDWARE } # CONFIG_VP9_HIGHBITDEPTH -} # CONFIG_VP9 || CONFIG_VP10 +} # CONFIG_VP9 # # Quantization # -if ((vpx_config("CONFIG_VP9_ENCODER") eq "yes") || (vpx_config("CONFIG_VP10_ENCODER") eq "yes")) { +if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { add_proto qw/void vpx_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vpx_quantize_b sse2/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc"; + specialize qw/vpx_quantize_b sse2/, "$ssse3_x86_64", "$avx_x86_64"; add_proto qw/void vpx_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; - specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc"; + specialize qw/vpx_quantize_b_32x32/, "$ssse3_x86_64", "$avx_x86_64"; if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/void vpx_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; @@ -945,61 +923,61 @@ if ((vpx_config("CONFIG_VP9_ENCODER") eq "yes") || (vpx_config("CONFIG_VP10_ENCO add_proto qw/void vpx_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan"; specialize qw/vpx_highbd_quantize_b_32x32 sse2/; } # CONFIG_VP9_HIGHBITDEPTH -} # CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +} # CONFIG_VP9_ENCODER if (vpx_config("CONFIG_ENCODERS") eq "yes") { # # Block subtraction # add_proto qw/void vpx_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride"; -specialize qw/vpx_subtract_block neon msa/, "$sse2_x86inc"; +specialize qw/vpx_subtract_block neon msa sse2/; # # Single block SAD # add_proto qw/unsigned int vpx_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad64x64 avx2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x64 avx2 neon msa sse2/; add_proto qw/unsigned int vpx_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad64x32 avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x32 avx2 msa sse2/; add_proto qw/unsigned int vpx_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad32x64 avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x64 avx2 msa sse2/; add_proto qw/unsigned int vpx_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad32x32 avx2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x32 avx2 neon msa sse2/; add_proto qw/unsigned int vpx_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad32x16 avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x16 avx2 msa sse2/; add_proto qw/unsigned int vpx_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad16x32 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x32 msa sse2/; add_proto qw/unsigned int vpx_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad16x16 mmx media neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x16 neon msa sse2/; add_proto qw/unsigned int vpx_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad16x8 mmx neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x8 neon msa sse2/; add_proto qw/unsigned int vpx_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad8x16 mmx neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x16 neon msa sse2/; add_proto qw/unsigned int vpx_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad8x8 mmx neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x8 neon msa sse2/; add_proto qw/unsigned int vpx_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad8x4 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x4 msa sse2/; add_proto qw/unsigned int vpx_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad4x8 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x8 msa sse2/; add_proto qw/unsigned int vpx_sad4x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; -specialize qw/vpx_sad4x4 mmx neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x4 neon msa sse2/; # # Avg # -if ((vpx_config("CONFIG_VP9_ENCODER") eq "yes") || (vpx_config("CONFIG_VP10_ENCODER") eq "yes")) { +if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { add_proto qw/unsigned int vpx_avg_8x8/, "const uint8_t *, int p"; specialize qw/vpx_avg_8x8 sse2 neon msa/; @@ -1007,65 +985,65 @@ if ((vpx_config("CONFIG_VP9_ENCODER") eq "yes") || (vpx_config("CONFIG_VP10_ENCO specialize qw/vpx_avg_4x4 sse2 neon msa/; add_proto qw/void vpx_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max"; - specialize qw/vpx_minmax_8x8 sse2/; + specialize qw/vpx_minmax_8x8 sse2 neon/; - add_proto qw/void vpx_hadamard_8x8/, "int16_t const *src_diff, int src_stride, int16_t *coeff"; - specialize qw/vpx_hadamard_8x8 sse2/, "$ssse3_x86_64_x86inc"; + add_proto qw/void vpx_hadamard_8x8/, "const int16_t *src_diff, int src_stride, int16_t *coeff"; + specialize qw/vpx_hadamard_8x8 sse2 neon/, "$ssse3_x86_64"; - add_proto qw/void vpx_hadamard_16x16/, "int16_t const *src_diff, int src_stride, int16_t *coeff"; - specialize qw/vpx_hadamard_16x16 sse2/; + add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff"; + specialize qw/vpx_hadamard_16x16 sse2 neon/; add_proto qw/int vpx_satd/, "const int16_t *coeff, int length"; specialize qw/vpx_satd sse2 neon/; - add_proto qw/void vpx_int_pro_row/, "int16_t *hbuf, uint8_t const *ref, const int ref_stride, const int height"; + add_proto qw/void vpx_int_pro_row/, "int16_t *hbuf, const uint8_t *ref, const int ref_stride, const int height"; specialize qw/vpx_int_pro_row sse2 neon/; - add_proto qw/int16_t vpx_int_pro_col/, "uint8_t const *ref, const int width"; + add_proto qw/int16_t vpx_int_pro_col/, "const uint8_t *ref, const int width"; specialize qw/vpx_int_pro_col sse2 neon/; - add_proto qw/int vpx_vector_var/, "int16_t const *ref, int16_t const *src, const int bwl"; + add_proto qw/int vpx_vector_var/, "const int16_t *ref, const int16_t *src, const int bwl"; specialize qw/vpx_vector_var neon sse2/; -} # CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +} # CONFIG_VP9_ENCODER add_proto qw/unsigned int vpx_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad64x64_avg avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x64_avg avx2 msa sse2/; add_proto qw/unsigned int vpx_sad64x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad64x32_avg avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x32_avg avx2 msa sse2/; add_proto qw/unsigned int vpx_sad32x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad32x64_avg avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x64_avg avx2 msa sse2/; add_proto qw/unsigned int vpx_sad32x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad32x32_avg avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x32_avg avx2 msa sse2/; add_proto qw/unsigned int vpx_sad32x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad32x16_avg avx2 msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x16_avg avx2 msa sse2/; add_proto qw/unsigned int vpx_sad16x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad16x32_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x32_avg msa sse2/; add_proto qw/unsigned int vpx_sad16x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad16x16_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x16_avg msa sse2/; add_proto qw/unsigned int vpx_sad16x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad16x8_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x8_avg msa sse2/; add_proto qw/unsigned int vpx_sad8x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad8x16_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x16_avg msa sse2/; add_proto qw/unsigned int vpx_sad8x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad8x8_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x8_avg msa sse2/; add_proto qw/unsigned int vpx_sad8x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad8x4_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x4_avg msa sse2/; add_proto qw/unsigned int vpx_sad4x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad4x8_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x8_avg msa sse2/; add_proto qw/unsigned int vpx_sad4x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; -specialize qw/vpx_sad4x4_avg msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x4_avg msa sse2/; # # Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally @@ -1124,43 +1102,46 @@ specialize qw/vpx_sad4x4x8 sse4_1 msa/; # Multi-block SAD, comparing a reference to N independent blocks # add_proto qw/void vpx_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad64x64x4d avx2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x64x4d avx2 neon msa sse2/; add_proto qw/void vpx_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad64x32x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad64x32x4d msa sse2/; add_proto qw/void vpx_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad32x64x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x64x4d msa sse2/; add_proto qw/void vpx_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad32x32x4d avx2 neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x32x4d avx2 neon msa sse2/; add_proto qw/void vpx_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad32x16x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad32x16x4d msa sse2/; add_proto qw/void vpx_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad16x32x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x32x4d msa sse2/; add_proto qw/void vpx_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad16x16x4d neon msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x16x4d neon msa sse2/; add_proto qw/void vpx_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad16x8x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad16x8x4d msa sse2/; add_proto qw/void vpx_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x16x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x16x4d msa sse2/; add_proto qw/void vpx_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x8x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x8x4d msa sse2/; add_proto qw/void vpx_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad8x4x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad8x4x4d msa sse2/; add_proto qw/void vpx_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad4x8x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x8x4d msa sse2/; add_proto qw/void vpx_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t * const ref_ptr[], int ref_stride, uint32_t *sad_array"; -specialize qw/vpx_sad4x4x4d msa/, "$sse2_x86inc"; +specialize qw/vpx_sad4x4x4d msa sse2/; + +add_proto qw/uint64_t vpx_sum_squares_2d_i16/, "const int16_t *src, int stride, int size"; +specialize qw/vpx_sum_squares_2d_i16 sse2/; # # Structured Similarity (SSIM) @@ -1184,37 +1165,37 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Single block SAD # add_proto qw/unsigned int vpx_highbd_sad64x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x64 sse2/; add_proto qw/unsigned int vpx_highbd_sad64x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x32 sse2/; add_proto qw/unsigned int vpx_highbd_sad32x64/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x64 sse2/; add_proto qw/unsigned int vpx_highbd_sad32x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x32 sse2/; add_proto qw/unsigned int vpx_highbd_sad32x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x16 sse2/; add_proto qw/unsigned int vpx_highbd_sad16x32/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x32 sse2/; add_proto qw/unsigned int vpx_highbd_sad16x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x16 sse2/; add_proto qw/unsigned int vpx_highbd_sad16x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x8 sse2/; add_proto qw/unsigned int vpx_highbd_sad8x16/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x16 sse2/; add_proto qw/unsigned int vpx_highbd_sad8x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x8 sse2/; add_proto qw/unsigned int vpx_highbd_sad8x4/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; - specialize qw/vpx_highbd_sad8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x4 sse2/; add_proto qw/unsigned int vpx_highbd_sad4x8/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride"; specialize qw/vpx_highbd_sad4x8/; @@ -1233,37 +1214,37 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { specialize qw/vpx_highbd_minmax_8x8/; add_proto qw/unsigned int vpx_highbd_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad64x64_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x64_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad64x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad64x32_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x32_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad32x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad32x64_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x64_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad32x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad32x32_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x32_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad32x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad32x16_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x16_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad16x32_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad16x32_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x32_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad16x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad16x16_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x16_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad16x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad16x8_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x8_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad8x16_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad8x16_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x16_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad8x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad8x8_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x8_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad8x4_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; - specialize qw/vpx_highbd_sad8x4_avg/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x4_avg sse2/; add_proto qw/unsigned int vpx_highbd_sad4x8_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred"; specialize qw/vpx_highbd_sad4x8_avg/; @@ -1328,43 +1309,43 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Multi-block SAD, comparing a reference to N independent blocks # add_proto qw/void vpx_highbd_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad64x64x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x64x4d sse2/; add_proto qw/void vpx_highbd_sad64x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad64x32x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad64x32x4d sse2/; add_proto qw/void vpx_highbd_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad32x64x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x64x4d sse2/; add_proto qw/void vpx_highbd_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad32x32x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x32x4d sse2/; add_proto qw/void vpx_highbd_sad32x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad32x16x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad32x16x4d sse2/; add_proto qw/void vpx_highbd_sad16x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad16x32x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x32x4d sse2/; add_proto qw/void vpx_highbd_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad16x16x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x16x4d sse2/; add_proto qw/void vpx_highbd_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad16x8x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad16x8x4d sse2/; add_proto qw/void vpx_highbd_sad8x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad8x16x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x16x4d sse2/; add_proto qw/void vpx_highbd_sad8x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad8x8x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x8x4d sse2/; add_proto qw/void vpx_highbd_sad8x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad8x4x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad8x4x4d sse2/; add_proto qw/void vpx_highbd_sad4x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad4x8x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad4x8x4d sse2/; add_proto qw/void vpx_highbd_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, uint32_t *sad_array"; - specialize qw/vpx_highbd_sad4x4x4d/, "$sse2_x86inc"; + specialize qw/vpx_highbd_sad4x4x4d sse2/; # # Structured Similarity (SSIM) @@ -1400,16 +1381,16 @@ add_proto qw/unsigned int vpx_variance16x32/, "const uint8_t *src_ptr, int sourc specialize qw/vpx_variance16x32 sse2 msa/; add_proto qw/unsigned int vpx_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; - specialize qw/vpx_variance16x16 mmx sse2 avx2 media neon msa/; + specialize qw/vpx_variance16x16 sse2 avx2 neon msa/; add_proto qw/unsigned int vpx_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; - specialize qw/vpx_variance16x8 mmx sse2 neon msa/; + specialize qw/vpx_variance16x8 sse2 neon msa/; add_proto qw/unsigned int vpx_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; - specialize qw/vpx_variance8x16 mmx sse2 neon msa/; + specialize qw/vpx_variance8x16 sse2 neon msa/; add_proto qw/unsigned int vpx_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; - specialize qw/vpx_variance8x8 mmx sse2 media neon msa/; + specialize qw/vpx_variance8x8 sse2 neon msa/; add_proto qw/unsigned int vpx_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; specialize qw/vpx_variance8x4 sse2 msa/; @@ -1418,7 +1399,7 @@ add_proto qw/unsigned int vpx_variance4x8/, "const uint8_t *src_ptr, int source_ specialize qw/vpx_variance4x8 sse2 msa/; add_proto qw/unsigned int vpx_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; - specialize qw/vpx_variance4x4 mmx sse2 msa/; + specialize qw/vpx_variance4x4 sse2 msa/; # # Specialty Variance @@ -1427,10 +1408,10 @@ add_proto qw/void vpx_get16x16var/, "const uint8_t *src_ptr, int source_stride, specialize qw/vpx_get16x16var sse2 avx2 neon msa/; add_proto qw/void vpx_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum"; - specialize qw/vpx_get8x8var mmx sse2 neon msa/; + specialize qw/vpx_get8x8var sse2 neon msa/; add_proto qw/unsigned int vpx_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; - specialize qw/vpx_mse16x16 mmx sse2 avx2 media neon msa/; + specialize qw/vpx_mse16x16 sse2 avx2 neon msa/; add_proto qw/unsigned int vpx_mse16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse"; specialize qw/vpx_mse16x8 sse2 msa/; @@ -1442,7 +1423,7 @@ add_proto qw/unsigned int vpx_mse8x8/, "const uint8_t *src_ptr, int source_stri specialize qw/vpx_mse8x8 sse2 msa/; add_proto qw/unsigned int vpx_get_mb_ss/, "const int16_t *"; - specialize qw/vpx_get_mb_ss mmx sse2 msa/; + specialize qw/vpx_get_mb_ss sse2 msa/; add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride"; specialize qw/vpx_get4x4sse_cs neon msa/; @@ -1453,94 +1434,82 @@ add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, # Subpixel Variance # add_proto qw/uint32_t vpx_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance64x64 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance64x64 avx2 neon msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance64x32 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance32x64 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance32x32 avx2 neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance32x32 avx2 neon msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance32x16 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance16x32 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance16x16 mmx media neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance16x16 neon msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance16x8 mmx msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance16x8 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance8x16 mmx msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance8x16 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance8x8 mmx media neon msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance8x8 neon msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance8x4 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance4x8 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_sub_pixel_variance4x4 mmx msa/, "$sse_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_variance4x4 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance64x64 avx2 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance64x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance64x32 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance32x64 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance32x64 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance32x32 avx2 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance32x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance32x16 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance16x32 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance16x32 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance16x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance16x16 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance16x8 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance16x8 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance8x16 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance8x16 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance8x8 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance8x8 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance8x4 msa/, "$sse2_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance8x4 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance4x8 msa/, "$sse_x86inc", "$ssse3_x86inc"; + specialize qw/vpx_sub_pixel_avg_variance4x8 msa sse2 ssse3/; add_proto qw/uint32_t vpx_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_sub_pixel_avg_variance4x4 msa/, "$sse_x86inc", "$ssse3_x86inc"; - -# -# Specialty Subpixel -# -add_proto qw/uint32_t vpx_variance_halfpixvar16x16_h/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_variance_halfpixvar16x16_h mmx sse2 media/; - -add_proto qw/uint32_t vpx_variance_halfpixvar16x16_v/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_variance_halfpixvar16x16_v mmx sse2 media/; - -add_proto qw/uint32_t vpx_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_variance_halfpixvar16x16_hv mmx sse2 media/; + specialize qw/vpx_sub_pixel_avg_variance4x4 msa sse2 ssse3/; if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { add_proto qw/unsigned int vpx_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse"; @@ -1684,222 +1653,243 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") { # Subpixel Variance # add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_12_sub_pixel_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_10_sub_pixel_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; - specialize qw/vpx_highbd_8_sub_pixel_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse"; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_12_sub_pixel_avg_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; add_proto qw/uint32_t vpx_highbd_12_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_10_sub_pixel_avg_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; add_proto qw/uint32_t vpx_highbd_10_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x64 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance64x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x64/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x64 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance32x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x32/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x32 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance16x8 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x16/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x16 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x8/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x8 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; - specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x4/, "$sse2_x86inc"; + specialize qw/vpx_highbd_8_sub_pixel_avg_variance8x4 sse2/; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; add_proto qw/uint32_t vpx_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred"; } # CONFIG_VP9_HIGHBITDEPTH + +# +# Post Processing +# +if (vpx_config("CONFIG_POSTPROC") eq "yes" || vpx_config("CONFIG_VP9_POSTPROC") eq "yes") { + add_proto qw/void vpx_plane_add_noise/, "uint8_t *start, const int8_t *noise, int blackclamp, int whiteclamp, int width, int height, int pitch"; + specialize qw/vpx_plane_add_noise sse2 msa/; + + add_proto qw/void vpx_mbpost_proc_down/, "unsigned char *dst, int pitch, int rows, int cols,int flimit"; + specialize qw/vpx_mbpost_proc_down sse2 msa/; + $vpx_mbpost_proc_down_sse2=vpx_mbpost_proc_down_xmm; + + add_proto qw/void vpx_mbpost_proc_across_ip/, "unsigned char *dst, int pitch, int rows, int cols,int flimit"; + specialize qw/vpx_mbpost_proc_across_ip sse2 msa/; + $vpx_mbpost_proc_across_ip_sse2=vpx_mbpost_proc_across_ip_xmm; + + add_proto qw/void vpx_post_proc_down_and_across_mb_row/, "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size"; + specialize qw/vpx_post_proc_down_and_across_mb_row sse2 msa/; + +} + } # CONFIG_ENCODERS || CONFIG_POSTPROC || CONFIG_VP9_POSTPROC 1; diff --git a/libs/libvpx/vpx_dsp/vpx_filter.h b/libs/libvpx/vpx_dsp/vpx_filter.h index 2617febf3b..6cea251bcc 100644 --- a/libs/libvpx/vpx_dsp/vpx_filter.h +++ b/libs/libvpx/vpx_dsp/vpx_filter.h @@ -13,7 +13,6 @@ #include "vpx/vpx_integer.h" - #ifdef __cplusplus extern "C" { #endif diff --git a/libs/libvpx/vpx_dsp/x86/add_noise_sse2.asm b/libs/libvpx/vpx_dsp/x86/add_noise_sse2.asm new file mode 100644 index 0000000000..f758da22dc --- /dev/null +++ b/libs/libvpx/vpx_dsp/x86/add_noise_sse2.asm @@ -0,0 +1,86 @@ +; +; Copyright (c) 2010 The WebM project authors. All Rights Reserved. +; +; Use of this source code is governed by a BSD-style license +; that can be found in the LICENSE file in the root of the source +; tree. An additional intellectual property rights grant can be found +; in the file PATENTS. All contributing project authors may +; be found in the AUTHORS file in the root of the source tree. +; + + +%include "vpx_ports/x86_abi_support.asm" + +;void vpx_plane_add_noise_sse2(uint8_t *start, const int8_t *noise, +; int blackclamp, int whiteclamp, +; int width, int height, int pitch) +global sym(vpx_plane_add_noise_sse2) PRIVATE +sym(vpx_plane_add_noise_sse2): + push rbp + mov rbp, rsp + SHADOW_ARGS_TO_STACK 7 + GET_GOT rbx + push rsi + push rdi + + mov rdx, 0x01010101 + mov rax, arg(2) + mul rdx + movd xmm3, rax + pshufd xmm3, xmm3, 0 ; xmm3 is 16 copies of char in blackclamp + + mov rdx, 0x01010101 + mov rax, arg(3) + mul rdx + movd xmm4, rax + pshufd xmm4, xmm4, 0 ; xmm4 is 16 copies of char in whiteclamp + + movdqu xmm5, xmm3 ; both clamp = black clamp + white clamp + paddusb xmm5, xmm4 + +.addnoise_loop: + call sym(LIBVPX_RAND) WRT_PLT + mov rcx, arg(1) ;noise + and rax, 0xff + add rcx, rax + + mov rdi, rcx + movsxd rcx, dword arg(4) ;[Width] + mov rsi, arg(0) ;Pos + xor rax, rax + +.addnoise_nextset: + movdqu xmm1,[rsi+rax] ; get the source + + psubusb xmm1, xmm3 ; subtract black clamp + paddusb xmm1, xmm5 ; add both clamp + psubusb xmm1, xmm4 ; subtract whiteclamp + + movdqu xmm2,[rdi+rax] ; get the noise for this line + paddb xmm1,xmm2 ; add it in + movdqu [rsi+rax],xmm1 ; store the result + + add rax,16 ; move to the next line + + cmp rax, rcx + jl .addnoise_nextset + + movsxd rax, dword arg(6) ; Pitch + add arg(0), rax ; Start += Pitch + sub dword arg(5), 1 ; Height -= 1 + jg .addnoise_loop + + ; begin epilog + pop rdi + pop rsi + RESTORE_GOT + UNSHADOW_ARGS + pop rbp + ret + +SECTION_RODATA +align 16 +rd42: + times 8 dw 0x04 +four8s: + times 4 dd 8 diff --git a/libs/libvpx/vpx_dsp/x86/avg_intrin_sse2.c b/libs/libvpx/vpx_dsp/x86/avg_intrin_sse2.c index f9af6cf974..b0a104bad0 100644 --- a/libs/libvpx/vpx_dsp/x86/avg_intrin_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/avg_intrin_sse2.c @@ -16,7 +16,7 @@ void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max) { __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff; - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); // Row 0 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0); d0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(d)), u0); @@ -94,7 +94,7 @@ void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp, unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) { __m128i s0, s1, u0; unsigned int avg = 0; - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0); s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0); s0 = _mm_adds_epu16(s0, s1); @@ -121,7 +121,7 @@ unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) { unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) { __m128i s0, s1, u0; unsigned int avg = 0; - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0); s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0); s0 = _mm_adds_epu16(s0, s1); @@ -248,8 +248,8 @@ void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride, int16_t *coeff) { int idx; for (idx = 0; idx < 4; ++idx) { - int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride - + (idx & 0x01) * 8; + int16_t const *src_ptr = + src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; vpx_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64); } @@ -309,7 +309,7 @@ int vpx_satd_sse2(const int16_t *coeff, int length) { return _mm_cvtsi128_si32(accum); } -void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const*ref, +void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref, const int ref_stride, const int height) { int idx; __m128i zero = _mm_setzero_si128(); @@ -378,8 +378,7 @@ int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) { return _mm_extract_epi16(s0, 0); } -int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, - const int bwl) { +int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) { int idx; int width = 4 << bwl; int16_t mean; @@ -398,23 +397,23 @@ int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, diff = _mm_subs_epi16(v0, v1); sum = _mm_add_epi16(sum, diff); - v0 = _mm_madd_epi16(diff, diff); + v0 = _mm_madd_epi16(diff, diff); sse = _mm_add_epi32(sse, v0); ref += 8; src += 8; } - v0 = _mm_srli_si128(sum, 8); + v0 = _mm_srli_si128(sum, 8); sum = _mm_add_epi16(sum, v0); - v0 = _mm_srli_epi64(sum, 32); + v0 = _mm_srli_epi64(sum, 32); sum = _mm_add_epi16(sum, v0); - v0 = _mm_srli_epi32(sum, 16); + v0 = _mm_srli_epi32(sum, 16); sum = _mm_add_epi16(sum, v0); - v1 = _mm_srli_si128(sse, 8); + v1 = _mm_srli_si128(sse, 8); sse = _mm_add_epi32(sse, v1); - v1 = _mm_srli_epi64(sse, 32); + v1 = _mm_srli_epi64(sse, 32); sse = _mm_add_epi32(sse, v1); mean = _mm_extract_epi16(sum, 0); diff --git a/libs/libvpx/vpx_dsp/x86/convolve.h b/libs/libvpx/vpx_dsp/x86/convolve.h index b6fbfcf928..2a0516cdc1 100644 --- a/libs/libvpx/vpx_dsp/x86/convolve.h +++ b/libs/libvpx/vpx_dsp/x86/convolve.h @@ -16,275 +16,179 @@ #include "vpx/vpx_integer.h" #include "vpx_ports/mem.h" -typedef void filter8_1dfunction ( - const uint8_t *src_ptr, - ptrdiff_t src_pitch, - uint8_t *output_ptr, - ptrdiff_t out_pitch, - uint32_t output_height, - const int16_t *filter -); +typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, + uint8_t *output_ptr, ptrdiff_t out_pitch, + uint32_t output_height, const int16_t *filter); -#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ - void vpx_convolve8_##name##_##opt(const uint8_t *src, ptrdiff_t src_stride, \ - uint8_t *dst, ptrdiff_t dst_stride, \ - const int16_t *filter_x, int x_step_q4, \ - const int16_t *filter_y, int y_step_q4, \ - int w, int h) { \ - assert(filter[3] != 128); \ - assert(step_q4 == 16); \ - if (filter[0] || filter[1] || filter[2]) { \ - while (w >= 16) { \ - vpx_filter_block1d16_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 16; \ - dst += 16; \ - w -= 16; \ - } \ - while (w >= 8) { \ - vpx_filter_block1d8_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 8; \ - dst += 8; \ - w -= 8; \ - } \ - while (w >= 4) { \ - vpx_filter_block1d4_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 4; \ - dst += 4; \ - w -= 4; \ - } \ - } else { \ - while (w >= 16) { \ - vpx_filter_block1d16_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 16; \ - dst += 16; \ - w -= 16; \ - } \ - while (w >= 8) { \ - vpx_filter_block1d8_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 8; \ - dst += 8; \ - w -= 8; \ - } \ - while (w >= 4) { \ - vpx_filter_block1d4_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter); \ - src += 4; \ - dst += 4; \ - w -= 4; \ - } \ - } \ -} +#define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ + void vpx_convolve8_##name##_##opt( \ + const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ + ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, int w, int h) { \ + assert(filter[3] != 128); \ + assert(step_q4 == 16); \ + if (filter[0] | filter[1] | filter[2]) { \ + while (w >= 16) { \ + vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \ + dst_stride, h, filter); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + if (w == 8) { \ + vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \ + dst_stride, h, filter); \ + } else if (w == 4) { \ + vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \ + dst_stride, h, filter); \ + } \ + } else { \ + while (w >= 16) { \ + vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \ + dst_stride, h, filter); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + if (w == 8) { \ + vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \ + dst_stride, h, filter); \ + } else if (w == 4) { \ + vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \ + dst_stride, h, filter); \ + } \ + } \ + } -#define FUN_CONV_2D(avg, opt) \ -void vpx_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ - uint8_t *dst, ptrdiff_t dst_stride, \ - const int16_t *filter_x, int x_step_q4, \ - const int16_t *filter_y, int y_step_q4, \ - int w, int h) { \ - assert(filter_x[3] != 128); \ - assert(filter_y[3] != 128); \ - assert(w <= 64); \ - assert(h <= 64); \ - assert(x_step_q4 == 16); \ - assert(y_step_q4 == 16); \ - if (filter_x[0] || filter_x[1] || filter_x[2]|| \ - filter_y[0] || filter_y[1] || filter_y[2]) { \ - DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \ - vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ - filter_x, x_step_q4, filter_y, y_step_q4, \ - w, h + 7); \ - vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ - filter_x, x_step_q4, filter_y, \ - y_step_q4, w, h); \ - } else { \ - DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \ - vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \ - filter_x, x_step_q4, filter_y, y_step_q4, \ - w, h + 1); \ - vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \ - filter_x, x_step_q4, filter_y, \ - y_step_q4, w, h); \ - } \ -} +#define FUN_CONV_2D(avg, opt) \ + void vpx_convolve8_##avg##opt( \ + const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ + ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, int w, int h) { \ + assert(filter_x[3] != 128); \ + assert(filter_y[3] != 128); \ + assert(w <= 64); \ + assert(h <= 64); \ + assert(x_step_q4 == 16); \ + assert(y_step_q4 == 16); \ + if (filter_x[0] | filter_x[1] | filter_x[2]) { \ + DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \ + vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ + filter_x, x_step_q4, filter_y, y_step_q4, w, \ + h + 7); \ + vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, \ + y_step_q4, w, h); \ + } else { \ + DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \ + vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, filter_x, \ + x_step_q4, filter_y, y_step_q4, w, h + 1); \ + vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, filter_x, \ + x_step_q4, filter_y, y_step_q4, w, h); \ + } \ + } #if CONFIG_VP9_HIGHBITDEPTH -typedef void highbd_filter8_1dfunction ( - const uint16_t *src_ptr, - const ptrdiff_t src_pitch, - uint16_t *output_ptr, - ptrdiff_t out_pitch, - unsigned int output_height, - const int16_t *filter, - int bd -); +typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, + const ptrdiff_t src_pitch, + uint16_t *output_ptr, + ptrdiff_t out_pitch, + unsigned int output_height, + const int16_t *filter, int bd); #define HIGH_FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ - void vpx_highbd_convolve8_##name##_##opt(const uint8_t *src8, \ - ptrdiff_t src_stride, \ - uint8_t *dst8, \ - ptrdiff_t dst_stride, \ - const int16_t *filter_x, \ - int x_step_q4, \ - const int16_t *filter_y, \ - int y_step_q4, \ - int w, int h, int bd) { \ - if (step_q4 == 16 && filter[3] != 128) { \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - if (filter[0] || filter[1] || filter[2]) { \ - while (w >= 16) { \ - vpx_highbd_filter_block1d16_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 16; \ - dst += 16; \ - w -= 16; \ - } \ - while (w >= 8) { \ - vpx_highbd_filter_block1d8_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 8; \ - dst += 8; \ - w -= 8; \ - } \ - while (w >= 4) { \ - vpx_highbd_filter_block1d4_##dir##8_##avg##opt(src_start, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 4; \ - dst += 4; \ - w -= 4; \ - } \ - } else { \ - while (w >= 16) { \ - vpx_highbd_filter_block1d16_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 16; \ - dst += 16; \ - w -= 16; \ - } \ - while (w >= 8) { \ - vpx_highbd_filter_block1d8_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 8; \ - dst += 8; \ - w -= 8; \ - } \ - while (w >= 4) { \ - vpx_highbd_filter_block1d4_##dir##2_##avg##opt(src, \ - src_stride, \ - dst, \ - dst_stride, \ - h, \ - filter, \ - bd); \ - src += 4; \ - dst += 4; \ - w -= 4; \ - } \ - } \ - } \ - if (w) { \ - vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \ - filter_x, x_step_q4, filter_y, y_step_q4, \ - w, h, bd); \ - } \ -} + void vpx_highbd_convolve8_##name##_##opt( \ + const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, \ + ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \ + if (step_q4 == 16 && filter[3] != 128) { \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + if (filter[0] | filter[1] | filter[2]) { \ + while (w >= 16) { \ + vpx_highbd_filter_block1d16_##dir##8_##avg##opt( \ + src_start, src_stride, dst, dst_stride, h, filter, bd); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + while (w >= 8) { \ + vpx_highbd_filter_block1d8_##dir##8_##avg##opt( \ + src_start, src_stride, dst, dst_stride, h, filter, bd); \ + src += 8; \ + dst += 8; \ + w -= 8; \ + } \ + while (w >= 4) { \ + vpx_highbd_filter_block1d4_##dir##8_##avg##opt( \ + src_start, src_stride, dst, dst_stride, h, filter, bd); \ + src += 4; \ + dst += 4; \ + w -= 4; \ + } \ + } else { \ + while (w >= 16) { \ + vpx_highbd_filter_block1d16_##dir##2_##avg##opt( \ + src, src_stride, dst, dst_stride, h, filter, bd); \ + src += 16; \ + dst += 16; \ + w -= 16; \ + } \ + while (w >= 8) { \ + vpx_highbd_filter_block1d8_##dir##2_##avg##opt( \ + src, src_stride, dst, dst_stride, h, filter, bd); \ + src += 8; \ + dst += 8; \ + w -= 8; \ + } \ + while (w >= 4) { \ + vpx_highbd_filter_block1d4_##dir##2_##avg##opt( \ + src, src_stride, dst, dst_stride, h, filter, bd); \ + src += 4; \ + dst += 4; \ + w -= 4; \ + } \ + } \ + } \ + if (w) { \ + vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \ + filter_x, x_step_q4, filter_y, \ + y_step_q4, w, h, bd); \ + } \ + } -#define HIGH_FUN_CONV_2D(avg, opt) \ -void vpx_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ - uint8_t *dst, ptrdiff_t dst_stride, \ - const int16_t *filter_x, int x_step_q4, \ - const int16_t *filter_y, int y_step_q4, \ - int w, int h, int bd) { \ - assert(w <= 64); \ - assert(h <= 64); \ - if (x_step_q4 == 16 && y_step_q4 == 16) { \ - if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ - filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ - DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \ - vpx_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \ - CONVERT_TO_BYTEPTR(fdata2), 64, \ - filter_x, x_step_q4, \ - filter_y, y_step_q4, \ - w, h + 7, bd); \ - vpx_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2) + 192, \ - 64, dst, dst_stride, \ - filter_x, x_step_q4, \ - filter_y, y_step_q4, \ - w, h, bd); \ - } else { \ - DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \ - vpx_highbd_convolve8_horiz_##opt(src, src_stride, \ - CONVERT_TO_BYTEPTR(fdata2), 64, \ - filter_x, x_step_q4, \ - filter_y, y_step_q4, \ - w, h + 1, bd); \ - vpx_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2), 64, \ - dst, dst_stride, \ - filter_x, x_step_q4, \ - filter_y, y_step_q4, \ - w, h, bd); \ - } \ - } else { \ - vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ - filter_x, x_step_q4, filter_y, y_step_q4, w, \ - h, bd); \ - } \ -} +#define HIGH_FUN_CONV_2D(avg, opt) \ + void vpx_highbd_convolve8_##avg##opt( \ + const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ + ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ + const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \ + assert(w <= 64); \ + assert(h <= 64); \ + if (x_step_q4 == 16 && y_step_q4 == 16) { \ + if ((filter_x[0] | filter_x[1] | filter_x[2]) || filter_x[3] == 128) { \ + DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \ + vpx_highbd_convolve8_horiz_##opt( \ + src - 3 * src_stride, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, \ + filter_x, x_step_q4, filter_y, y_step_q4, w, h + 7, bd); \ + vpx_highbd_convolve8_##avg##vert_##opt( \ + CONVERT_TO_BYTEPTR(fdata2) + 192, 64, dst, dst_stride, filter_x, \ + x_step_q4, filter_y, y_step_q4, w, h, bd); \ + } else { \ + DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \ + vpx_highbd_convolve8_horiz_##opt( \ + src, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, filter_x, \ + x_step_q4, filter_y, y_step_q4, w, h + 1, bd); \ + vpx_highbd_convolve8_##avg##vert_##opt( \ + CONVERT_TO_BYTEPTR(fdata2), 64, dst, dst_stride, filter_x, \ + x_step_q4, filter_y, y_step_q4, w, h, bd); \ + } \ + } else { \ + vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ + filter_x, x_step_q4, filter_y, y_step_q4, \ + w, h, bd); \ + } \ + } #endif // CONFIG_VP9_HIGHBITDEPTH #endif // VPX_DSP_X86_CONVOLVE_H_ diff --git a/libs/libvpx/vp8/common/x86/postproc_sse2.asm b/libs/libvpx/vpx_dsp/x86/deblock_sse2.asm similarity index 86% rename from libs/libvpx/vp8/common/x86/postproc_sse2.asm rename to libs/libvpx/vpx_dsp/x86/deblock_sse2.asm index fed4ee5ccf..6df360df44 100644 --- a/libs/libvpx/vp8/common/x86/postproc_sse2.asm +++ b/libs/libvpx/vpx_dsp/x86/deblock_sse2.asm @@ -83,7 +83,7 @@ add rbx, 16 %endmacro -;void vp8_post_proc_down_and_across_mb_row_sse2 +;void vpx_post_proc_down_and_across_mb_row_sse2 ;( ; unsigned char *src_ptr, ; unsigned char *dst_ptr, @@ -93,8 +93,8 @@ ; int *flimits, ; int size ;) -global sym(vp8_post_proc_down_and_across_mb_row_sse2) PRIVATE -sym(vp8_post_proc_down_and_across_mb_row_sse2): +global sym(vpx_post_proc_down_and_across_mb_row_sse2) PRIVATE +sym(vpx_post_proc_down_and_across_mb_row_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 7 @@ -198,7 +198,7 @@ sym(vp8_post_proc_down_and_across_mb_row_sse2): UPDATE_FLIMIT jmp .acrossnextcol -.acrossdone +.acrossdone: ; last 16 pixels movq QWORD PTR [rdi+rdx-16], mm0 @@ -230,11 +230,11 @@ sym(vp8_post_proc_down_and_across_mb_row_sse2): ret %undef flimit -;void vp8_mbpost_proc_down_xmm(unsigned char *dst, +;void vpx_mbpost_proc_down_xmm(unsigned char *dst, ; int pitch, int rows, int cols,int flimit) -extern sym(vp8_rv) -global sym(vp8_mbpost_proc_down_xmm) PRIVATE -sym(vp8_mbpost_proc_down_xmm): +extern sym(vpx_rv) +global sym(vpx_mbpost_proc_down_xmm) PRIVATE +sym(vpx_mbpost_proc_down_xmm): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -257,7 +257,7 @@ sym(vp8_mbpost_proc_down_xmm): %define flimit4 [rsp+128] %if ABI_IS_32BIT=0 - lea r8, [GLOBAL(sym(vp8_rv))] + lea r8, [GLOBAL(sym(vpx_rv))] %endif ;rows +=8; @@ -278,7 +278,7 @@ sym(vp8_mbpost_proc_down_xmm): lea rdi, [rdi+rdx] movq xmm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_borderd ; initialize borders +.init_borderd: ; initialize borders lea rdi, [rdi + rax] movq [rdi], xmm1 @@ -291,7 +291,7 @@ sym(vp8_mbpost_proc_down_xmm): mov rdi, rsi movq xmm1, QWORD ptr[rdi] ; first row mov rcx, 8 -.init_border ; initialize borders +.init_border: ; initialize borders lea rdi, [rdi + rax] movq [rdi], xmm1 @@ -403,13 +403,13 @@ sym(vp8_mbpost_proc_down_xmm): and rcx, 127 %if ABI_IS_32BIT=1 && CONFIG_PIC=1 push rax - lea rax, [GLOBAL(sym(vp8_rv))] - movdqu xmm4, [rax + rcx*2] ;vp8_rv[rcx*2] + lea rax, [GLOBAL(sym(vpx_rv))] + movdqu xmm4, [rax + rcx*2] ;vpx_rv[rcx*2] pop rax %elif ABI_IS_32BIT=0 - movdqu xmm4, [r8 + rcx*2] ;vp8_rv[rcx*2] + movdqu xmm4, [r8 + rcx*2] ;vpx_rv[rcx*2] %else - movdqu xmm4, [sym(vp8_rv) + rcx*2] + movdqu xmm4, [sym(vpx_rv) + rcx*2] %endif paddw xmm1, xmm4 @@ -434,7 +434,7 @@ sym(vp8_mbpost_proc_down_xmm): movq mm0, [rsp + rcx*8] ;d[rcx*8] movq [rsi], mm0 -.skip_assignment +.skip_assignment: lea rsi, [rsi+rax] lea rdi, [rdi+rax] @@ -462,10 +462,10 @@ sym(vp8_mbpost_proc_down_xmm): %undef flimit4 -;void vp8_mbpost_proc_across_ip_xmm(unsigned char *src, +;void vpx_mbpost_proc_across_ip_xmm(unsigned char *src, ; int pitch, int rows, int cols,int flimit) -global sym(vp8_mbpost_proc_across_ip_xmm) PRIVATE -sym(vp8_mbpost_proc_across_ip_xmm): +global sym(vpx_mbpost_proc_across_ip_xmm) PRIVATE +sym(vpx_mbpost_proc_across_ip_xmm): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -655,68 +655,6 @@ sym(vp8_mbpost_proc_across_ip_xmm): %undef flimit4 -;void vp8_plane_add_noise_wmt (unsigned char *Start, unsigned char *noise, -; unsigned char blackclamp[16], -; unsigned char whiteclamp[16], -; unsigned char bothclamp[16], -; unsigned int Width, unsigned int Height, int Pitch) -global sym(vp8_plane_add_noise_wmt) PRIVATE -sym(vp8_plane_add_noise_wmt): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 8 - GET_GOT rbx - push rsi - push rdi - ; end prolog - -.addnoise_loop: - call sym(LIBVPX_RAND) WRT_PLT - mov rcx, arg(1) ;noise - and rax, 0xff - add rcx, rax - - ; we rely on the fact that the clamping vectors are stored contiguously - ; in black/white/both order. Note that we have to reload this here because - ; rdx could be trashed by rand() - mov rdx, arg(2) ; blackclamp - - - mov rdi, rcx - movsxd rcx, dword arg(5) ;[Width] - mov rsi, arg(0) ;Pos - xor rax,rax - -.addnoise_nextset: - movdqu xmm1,[rsi+rax] ; get the source - - psubusb xmm1, [rdx] ;blackclamp ; clamp both sides so we don't outrange adding noise - paddusb xmm1, [rdx+32] ;bothclamp - psubusb xmm1, [rdx+16] ;whiteclamp - - movdqu xmm2,[rdi+rax] ; get the noise for this line - paddb xmm1,xmm2 ; add it in - movdqu [rsi+rax],xmm1 ; store the result - - add rax,16 ; move to the next line - - cmp rax, rcx - jl .addnoise_nextset - - movsxd rax, dword arg(7) ; Pitch - add arg(0), rax ; Start += Pitch - sub dword arg(6), 1 ; Height -= 1 - jg .addnoise_loop - - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - - SECTION_RODATA align 16 four8s: diff --git a/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h b/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h index 4df39dff86..39d3a3f59c 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h +++ b/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h @@ -10,17 +10,18 @@ #include // AVX2 +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/txfm_common.h" -#define pair256_set_epi16(a, b) \ +#define pair256_set_epi16(a, b) \ _mm256_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \ (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \ (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \ (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a)) -#define pair256_set_epi32(a, b) \ - _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), \ - (int)(b), (int)(a), (int)(b), (int)(a)) +#define pair256_set_epi32(a, b) \ + _mm256_set_epi32((int)(b), (int)(a), (int)(b), (int)(a), (int)(b), (int)(a), \ + (int)(b), (int)(a)) #if FDCT32x32_HIGH_PRECISION static INLINE __m256i k_madd_epi32_avx2(__m256i a, __m256i b) { @@ -39,8 +40,7 @@ static INLINE __m256i k_packs_epi64_avx2(__m256i a, __m256i b) { } #endif -void FDCT32x32_2D_AVX2(const int16_t *input, - int16_t *output_org, int stride) { +void FDCT32x32_2D_AVX2(const int16_t *input, int16_t *output_org, int stride) { // Calculate pre-multiplied strides const int str1 = stride; const int str2 = 2 * stride; @@ -52,43 +52,45 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // it's a pair of them that we need to repeat four times. This is done // by constructing the 32 bit constant corresponding to that pair. const __m256i k__cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64); - const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64); - const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64); + const __m256i k__cospi_p16_m16 = + pair256_set_epi16(+cospi_16_64, -cospi_16_64); + const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64); const __m256i k__cospi_m24_m08 = pair256_set_epi16(-cospi_24_64, -cospi_8_64); - const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64, cospi_8_64); - const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64); - const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64); - const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64); - const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64); + const __m256i k__cospi_p24_p08 = pair256_set_epi16(+cospi_24_64, cospi_8_64); + const __m256i k__cospi_p12_p20 = pair256_set_epi16(+cospi_12_64, cospi_20_64); + const __m256i k__cospi_m20_p12 = pair256_set_epi16(-cospi_20_64, cospi_12_64); + const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64); + const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64); const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64); - const __m256i k__cospi_m12_m20 = pair256_set_epi16(-cospi_12_64, -cospi_20_64); - const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64, cospi_2_64); - const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64, cospi_18_64); - const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64, cospi_10_64); - const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64, cospi_26_64); - const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64, cospi_6_64); - const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64, cospi_22_64); - const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64, cospi_14_64); - const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64, cospi_30_64); - const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64, cospi_1_64); - const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64, cospi_17_64); - const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64, cospi_9_64); - const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64, cospi_25_64); - const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64, cospi_7_64); - const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64, cospi_23_64); - const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64, cospi_15_64); - const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64, cospi_31_64); - const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64, cospi_5_64); - const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64, cospi_21_64); - const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64, cospi_13_64); - const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64, cospi_29_64); - const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64, cospi_3_64); - const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64, cospi_19_64); - const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64, cospi_11_64); - const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64, cospi_27_64); + const __m256i k__cospi_m12_m20 = + pair256_set_epi16(-cospi_12_64, -cospi_20_64); + const __m256i k__cospi_p30_p02 = pair256_set_epi16(+cospi_30_64, cospi_2_64); + const __m256i k__cospi_p14_p18 = pair256_set_epi16(+cospi_14_64, cospi_18_64); + const __m256i k__cospi_p22_p10 = pair256_set_epi16(+cospi_22_64, cospi_10_64); + const __m256i k__cospi_p06_p26 = pair256_set_epi16(+cospi_6_64, cospi_26_64); + const __m256i k__cospi_m26_p06 = pair256_set_epi16(-cospi_26_64, cospi_6_64); + const __m256i k__cospi_m10_p22 = pair256_set_epi16(-cospi_10_64, cospi_22_64); + const __m256i k__cospi_m18_p14 = pair256_set_epi16(-cospi_18_64, cospi_14_64); + const __m256i k__cospi_m02_p30 = pair256_set_epi16(-cospi_2_64, cospi_30_64); + const __m256i k__cospi_p31_p01 = pair256_set_epi16(+cospi_31_64, cospi_1_64); + const __m256i k__cospi_p15_p17 = pair256_set_epi16(+cospi_15_64, cospi_17_64); + const __m256i k__cospi_p23_p09 = pair256_set_epi16(+cospi_23_64, cospi_9_64); + const __m256i k__cospi_p07_p25 = pair256_set_epi16(+cospi_7_64, cospi_25_64); + const __m256i k__cospi_m25_p07 = pair256_set_epi16(-cospi_25_64, cospi_7_64); + const __m256i k__cospi_m09_p23 = pair256_set_epi16(-cospi_9_64, cospi_23_64); + const __m256i k__cospi_m17_p15 = pair256_set_epi16(-cospi_17_64, cospi_15_64); + const __m256i k__cospi_m01_p31 = pair256_set_epi16(-cospi_1_64, cospi_31_64); + const __m256i k__cospi_p27_p05 = pair256_set_epi16(+cospi_27_64, cospi_5_64); + const __m256i k__cospi_p11_p21 = pair256_set_epi16(+cospi_11_64, cospi_21_64); + const __m256i k__cospi_p19_p13 = pair256_set_epi16(+cospi_19_64, cospi_13_64); + const __m256i k__cospi_p03_p29 = pair256_set_epi16(+cospi_3_64, cospi_29_64); + const __m256i k__cospi_m29_p03 = pair256_set_epi16(-cospi_29_64, cospi_3_64); + const __m256i k__cospi_m13_p19 = pair256_set_epi16(-cospi_13_64, cospi_19_64); + const __m256i k__cospi_m21_p11 = pair256_set_epi16(-cospi_21_64, cospi_11_64); + const __m256i k__cospi_m05_p27 = pair256_set_epi16(-cospi_5_64, cospi_27_64); const __m256i k__DCT_CONST_ROUNDING = _mm256_set1_epi32(DCT_CONST_ROUNDING); const __m256i kZero = _mm256_set1_epi16(0); - const __m256i kOne = _mm256_set1_epi16(1); + const __m256i kOne = _mm256_set1_epi16(1); // Do the two transform/transpose passes int pass; for (pass = 0; pass < 2; ++pass) { @@ -103,125 +105,149 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // Note: even though all the loads below are aligned, using the aligned // intrinsic make the code slightly slower. if (0 == pass) { - const int16_t *in = &input[column_start]; + const int16_t *in = &input[column_start]; // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; // Note: the next four blocks could be in a loop. That would help the // instruction cache but is actually slower. { - const int16_t *ina = in + 0 * str1; - const int16_t *inb = in + 31 * str1; - __m256i *step1a = &step1[ 0]; + const int16_t *ina = in + 0 * str1; + const int16_t *inb = in + 31 * str1; + __m256i *step1a = &step1[0]; __m256i *step1b = &step1[31]; - const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); - const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); - const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); - const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); - const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); - const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); - const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); - const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); - step1a[ 0] = _mm256_add_epi16(ina0, inb0); - step1a[ 1] = _mm256_add_epi16(ina1, inb1); - step1a[ 2] = _mm256_add_epi16(ina2, inb2); - step1a[ 3] = _mm256_add_epi16(ina3, inb3); + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = + _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = + _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = + _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = + _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = + _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = + _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[0] = _mm256_add_epi16(ina0, inb0); + step1a[1] = _mm256_add_epi16(ina1, inb1); + step1a[2] = _mm256_add_epi16(ina2, inb2); + step1a[3] = _mm256_add_epi16(ina3, inb3); step1b[-3] = _mm256_sub_epi16(ina3, inb3); step1b[-2] = _mm256_sub_epi16(ina2, inb2); step1b[-1] = _mm256_sub_epi16(ina1, inb1); step1b[-0] = _mm256_sub_epi16(ina0, inb0); - step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm256_slli_epi16(step1a[0], 2); + step1a[1] = _mm256_slli_epi16(step1a[1], 2); + step1a[2] = _mm256_slli_epi16(step1a[2], 2); + step1a[3] = _mm256_slli_epi16(step1a[3], 2); step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 4 * str1; - const int16_t *inb = in + 27 * str1; - __m256i *step1a = &step1[ 4]; + const int16_t *ina = in + 4 * str1; + const int16_t *inb = in + 27 * str1; + __m256i *step1a = &step1[4]; __m256i *step1b = &step1[27]; - const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); - const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); - const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); - const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); - const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); - const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); - const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); - const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); - step1a[ 0] = _mm256_add_epi16(ina0, inb0); - step1a[ 1] = _mm256_add_epi16(ina1, inb1); - step1a[ 2] = _mm256_add_epi16(ina2, inb2); - step1a[ 3] = _mm256_add_epi16(ina3, inb3); + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = + _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = + _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = + _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = + _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = + _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = + _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[0] = _mm256_add_epi16(ina0, inb0); + step1a[1] = _mm256_add_epi16(ina1, inb1); + step1a[2] = _mm256_add_epi16(ina2, inb2); + step1a[3] = _mm256_add_epi16(ina3, inb3); step1b[-3] = _mm256_sub_epi16(ina3, inb3); step1b[-2] = _mm256_sub_epi16(ina2, inb2); step1b[-1] = _mm256_sub_epi16(ina1, inb1); step1b[-0] = _mm256_sub_epi16(ina0, inb0); - step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm256_slli_epi16(step1a[0], 2); + step1a[1] = _mm256_slli_epi16(step1a[1], 2); + step1a[2] = _mm256_slli_epi16(step1a[2], 2); + step1a[3] = _mm256_slli_epi16(step1a[3], 2); step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 8 * str1; - const int16_t *inb = in + 23 * str1; - __m256i *step1a = &step1[ 8]; + const int16_t *ina = in + 8 * str1; + const int16_t *inb = in + 23 * str1; + __m256i *step1a = &step1[8]; __m256i *step1b = &step1[23]; - const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); - const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); - const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); - const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); - const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); - const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); - const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); - const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); - step1a[ 0] = _mm256_add_epi16(ina0, inb0); - step1a[ 1] = _mm256_add_epi16(ina1, inb1); - step1a[ 2] = _mm256_add_epi16(ina2, inb2); - step1a[ 3] = _mm256_add_epi16(ina3, inb3); + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = + _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = + _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = + _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = + _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = + _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = + _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[0] = _mm256_add_epi16(ina0, inb0); + step1a[1] = _mm256_add_epi16(ina1, inb1); + step1a[2] = _mm256_add_epi16(ina2, inb2); + step1a[3] = _mm256_add_epi16(ina3, inb3); step1b[-3] = _mm256_sub_epi16(ina3, inb3); step1b[-2] = _mm256_sub_epi16(ina2, inb2); step1b[-1] = _mm256_sub_epi16(ina1, inb1); step1b[-0] = _mm256_sub_epi16(ina0, inb0); - step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm256_slli_epi16(step1a[0], 2); + step1a[1] = _mm256_slli_epi16(step1a[1], 2); + step1a[2] = _mm256_slli_epi16(step1a[2], 2); + step1a[3] = _mm256_slli_epi16(step1a[3], 2); step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); step1b[-0] = _mm256_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 12 * str1; - const int16_t *inb = in + 19 * str1; + const int16_t *ina = in + 12 * str1; + const int16_t *inb = in + 19 * str1; __m256i *step1a = &step1[12]; __m256i *step1b = &step1[19]; - const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); - const __m256i ina1 = _mm256_loadu_si256((const __m256i *)(ina + str1)); - const __m256i ina2 = _mm256_loadu_si256((const __m256i *)(ina + str2)); - const __m256i ina3 = _mm256_loadu_si256((const __m256i *)(ina + str3)); - const __m256i inb3 = _mm256_loadu_si256((const __m256i *)(inb - str3)); - const __m256i inb2 = _mm256_loadu_si256((const __m256i *)(inb - str2)); - const __m256i inb1 = _mm256_loadu_si256((const __m256i *)(inb - str1)); - const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); - step1a[ 0] = _mm256_add_epi16(ina0, inb0); - step1a[ 1] = _mm256_add_epi16(ina1, inb1); - step1a[ 2] = _mm256_add_epi16(ina2, inb2); - step1a[ 3] = _mm256_add_epi16(ina3, inb3); + const __m256i ina0 = _mm256_loadu_si256((const __m256i *)(ina)); + const __m256i ina1 = + _mm256_loadu_si256((const __m256i *)(ina + str1)); + const __m256i ina2 = + _mm256_loadu_si256((const __m256i *)(ina + str2)); + const __m256i ina3 = + _mm256_loadu_si256((const __m256i *)(ina + str3)); + const __m256i inb3 = + _mm256_loadu_si256((const __m256i *)(inb - str3)); + const __m256i inb2 = + _mm256_loadu_si256((const __m256i *)(inb - str2)); + const __m256i inb1 = + _mm256_loadu_si256((const __m256i *)(inb - str1)); + const __m256i inb0 = _mm256_loadu_si256((const __m256i *)(inb)); + step1a[0] = _mm256_add_epi16(ina0, inb0); + step1a[1] = _mm256_add_epi16(ina1, inb1); + step1a[2] = _mm256_add_epi16(ina2, inb2); + step1a[3] = _mm256_add_epi16(ina3, inb3); step1b[-3] = _mm256_sub_epi16(ina3, inb3); step1b[-2] = _mm256_sub_epi16(ina2, inb2); step1b[-1] = _mm256_sub_epi16(ina1, inb1); step1b[-0] = _mm256_sub_epi16(ina0, inb0); - step1a[ 0] = _mm256_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm256_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm256_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm256_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm256_slli_epi16(step1a[0], 2); + step1a[1] = _mm256_slli_epi16(step1a[1], 2); + step1a[2] = _mm256_slli_epi16(step1a[2], 2); + step1a[3] = _mm256_slli_epi16(step1a[3], 2); step1b[-3] = _mm256_slli_epi16(step1b[-3], 2); step1b[-2] = _mm256_slli_epi16(step1b[-2], 2); step1b[-1] = _mm256_slli_epi16(step1b[-1], 2); @@ -236,52 +262,52 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // Note: the next four blocks could be in a loop. That would help the // instruction cache but is actually slower. { - __m256i in00 = _mm256_loadu_si256((const __m256i *)(in + 0 * 32)); - __m256i in01 = _mm256_loadu_si256((const __m256i *)(in + 1 * 32)); - __m256i in02 = _mm256_loadu_si256((const __m256i *)(in + 2 * 32)); - __m256i in03 = _mm256_loadu_si256((const __m256i *)(in + 3 * 32)); - __m256i in28 = _mm256_loadu_si256((const __m256i *)(in + 28 * 32)); - __m256i in29 = _mm256_loadu_si256((const __m256i *)(in + 29 * 32)); - __m256i in30 = _mm256_loadu_si256((const __m256i *)(in + 30 * 32)); - __m256i in31 = _mm256_loadu_si256((const __m256i *)(in + 31 * 32)); - step1[ 0] = _mm256_add_epi16(in00, in31); - step1[ 1] = _mm256_add_epi16(in01, in30); - step1[ 2] = _mm256_add_epi16(in02, in29); - step1[ 3] = _mm256_add_epi16(in03, in28); + __m256i in00 = _mm256_loadu_si256((const __m256i *)(in + 0 * 32)); + __m256i in01 = _mm256_loadu_si256((const __m256i *)(in + 1 * 32)); + __m256i in02 = _mm256_loadu_si256((const __m256i *)(in + 2 * 32)); + __m256i in03 = _mm256_loadu_si256((const __m256i *)(in + 3 * 32)); + __m256i in28 = _mm256_loadu_si256((const __m256i *)(in + 28 * 32)); + __m256i in29 = _mm256_loadu_si256((const __m256i *)(in + 29 * 32)); + __m256i in30 = _mm256_loadu_si256((const __m256i *)(in + 30 * 32)); + __m256i in31 = _mm256_loadu_si256((const __m256i *)(in + 31 * 32)); + step1[0] = _mm256_add_epi16(in00, in31); + step1[1] = _mm256_add_epi16(in01, in30); + step1[2] = _mm256_add_epi16(in02, in29); + step1[3] = _mm256_add_epi16(in03, in28); step1[28] = _mm256_sub_epi16(in03, in28); step1[29] = _mm256_sub_epi16(in02, in29); step1[30] = _mm256_sub_epi16(in01, in30); step1[31] = _mm256_sub_epi16(in00, in31); } { - __m256i in04 = _mm256_loadu_si256((const __m256i *)(in + 4 * 32)); - __m256i in05 = _mm256_loadu_si256((const __m256i *)(in + 5 * 32)); - __m256i in06 = _mm256_loadu_si256((const __m256i *)(in + 6 * 32)); - __m256i in07 = _mm256_loadu_si256((const __m256i *)(in + 7 * 32)); - __m256i in24 = _mm256_loadu_si256((const __m256i *)(in + 24 * 32)); - __m256i in25 = _mm256_loadu_si256((const __m256i *)(in + 25 * 32)); - __m256i in26 = _mm256_loadu_si256((const __m256i *)(in + 26 * 32)); - __m256i in27 = _mm256_loadu_si256((const __m256i *)(in + 27 * 32)); - step1[ 4] = _mm256_add_epi16(in04, in27); - step1[ 5] = _mm256_add_epi16(in05, in26); - step1[ 6] = _mm256_add_epi16(in06, in25); - step1[ 7] = _mm256_add_epi16(in07, in24); + __m256i in04 = _mm256_loadu_si256((const __m256i *)(in + 4 * 32)); + __m256i in05 = _mm256_loadu_si256((const __m256i *)(in + 5 * 32)); + __m256i in06 = _mm256_loadu_si256((const __m256i *)(in + 6 * 32)); + __m256i in07 = _mm256_loadu_si256((const __m256i *)(in + 7 * 32)); + __m256i in24 = _mm256_loadu_si256((const __m256i *)(in + 24 * 32)); + __m256i in25 = _mm256_loadu_si256((const __m256i *)(in + 25 * 32)); + __m256i in26 = _mm256_loadu_si256((const __m256i *)(in + 26 * 32)); + __m256i in27 = _mm256_loadu_si256((const __m256i *)(in + 27 * 32)); + step1[4] = _mm256_add_epi16(in04, in27); + step1[5] = _mm256_add_epi16(in05, in26); + step1[6] = _mm256_add_epi16(in06, in25); + step1[7] = _mm256_add_epi16(in07, in24); step1[24] = _mm256_sub_epi16(in07, in24); step1[25] = _mm256_sub_epi16(in06, in25); step1[26] = _mm256_sub_epi16(in05, in26); step1[27] = _mm256_sub_epi16(in04, in27); } { - __m256i in08 = _mm256_loadu_si256((const __m256i *)(in + 8 * 32)); - __m256i in09 = _mm256_loadu_si256((const __m256i *)(in + 9 * 32)); - __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32)); - __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32)); - __m256i in20 = _mm256_loadu_si256((const __m256i *)(in + 20 * 32)); - __m256i in21 = _mm256_loadu_si256((const __m256i *)(in + 21 * 32)); - __m256i in22 = _mm256_loadu_si256((const __m256i *)(in + 22 * 32)); - __m256i in23 = _mm256_loadu_si256((const __m256i *)(in + 23 * 32)); - step1[ 8] = _mm256_add_epi16(in08, in23); - step1[ 9] = _mm256_add_epi16(in09, in22); + __m256i in08 = _mm256_loadu_si256((const __m256i *)(in + 8 * 32)); + __m256i in09 = _mm256_loadu_si256((const __m256i *)(in + 9 * 32)); + __m256i in10 = _mm256_loadu_si256((const __m256i *)(in + 10 * 32)); + __m256i in11 = _mm256_loadu_si256((const __m256i *)(in + 11 * 32)); + __m256i in20 = _mm256_loadu_si256((const __m256i *)(in + 20 * 32)); + __m256i in21 = _mm256_loadu_si256((const __m256i *)(in + 21 * 32)); + __m256i in22 = _mm256_loadu_si256((const __m256i *)(in + 22 * 32)); + __m256i in23 = _mm256_loadu_si256((const __m256i *)(in + 23 * 32)); + step1[8] = _mm256_add_epi16(in08, in23); + step1[9] = _mm256_add_epi16(in09, in22); step1[10] = _mm256_add_epi16(in10, in21); step1[11] = _mm256_add_epi16(in11, in20); step1[20] = _mm256_sub_epi16(in11, in20); @@ -290,14 +316,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input, step1[23] = _mm256_sub_epi16(in08, in23); } { - __m256i in12 = _mm256_loadu_si256((const __m256i *)(in + 12 * 32)); - __m256i in13 = _mm256_loadu_si256((const __m256i *)(in + 13 * 32)); - __m256i in14 = _mm256_loadu_si256((const __m256i *)(in + 14 * 32)); - __m256i in15 = _mm256_loadu_si256((const __m256i *)(in + 15 * 32)); - __m256i in16 = _mm256_loadu_si256((const __m256i *)(in + 16 * 32)); - __m256i in17 = _mm256_loadu_si256((const __m256i *)(in + 17 * 32)); - __m256i in18 = _mm256_loadu_si256((const __m256i *)(in + 18 * 32)); - __m256i in19 = _mm256_loadu_si256((const __m256i *)(in + 19 * 32)); + __m256i in12 = _mm256_loadu_si256((const __m256i *)(in + 12 * 32)); + __m256i in13 = _mm256_loadu_si256((const __m256i *)(in + 13 * 32)); + __m256i in14 = _mm256_loadu_si256((const __m256i *)(in + 14 * 32)); + __m256i in15 = _mm256_loadu_si256((const __m256i *)(in + 15 * 32)); + __m256i in16 = _mm256_loadu_si256((const __m256i *)(in + 16 * 32)); + __m256i in17 = _mm256_loadu_si256((const __m256i *)(in + 17 * 32)); + __m256i in18 = _mm256_loadu_si256((const __m256i *)(in + 18 * 32)); + __m256i in19 = _mm256_loadu_si256((const __m256i *)(in + 19 * 32)); step1[12] = _mm256_add_epi16(in12, in19); step1[13] = _mm256_add_epi16(in13, in18); step1[14] = _mm256_add_epi16(in14, in17); @@ -310,16 +336,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // Stage 2 { - step2[ 0] = _mm256_add_epi16(step1[0], step1[15]); - step2[ 1] = _mm256_add_epi16(step1[1], step1[14]); - step2[ 2] = _mm256_add_epi16(step1[2], step1[13]); - step2[ 3] = _mm256_add_epi16(step1[3], step1[12]); - step2[ 4] = _mm256_add_epi16(step1[4], step1[11]); - step2[ 5] = _mm256_add_epi16(step1[5], step1[10]); - step2[ 6] = _mm256_add_epi16(step1[6], step1[ 9]); - step2[ 7] = _mm256_add_epi16(step1[7], step1[ 8]); - step2[ 8] = _mm256_sub_epi16(step1[7], step1[ 8]); - step2[ 9] = _mm256_sub_epi16(step1[6], step1[ 9]); + step2[0] = _mm256_add_epi16(step1[0], step1[15]); + step2[1] = _mm256_add_epi16(step1[1], step1[14]); + step2[2] = _mm256_add_epi16(step1[2], step1[13]); + step2[3] = _mm256_add_epi16(step1[3], step1[12]); + step2[4] = _mm256_add_epi16(step1[4], step1[11]); + step2[5] = _mm256_add_epi16(step1[5], step1[10]); + step2[6] = _mm256_add_epi16(step1[6], step1[9]); + step2[7] = _mm256_add_epi16(step1[7], step1[8]); + step2[8] = _mm256_sub_epi16(step1[7], step1[8]); + step2[9] = _mm256_sub_epi16(step1[6], step1[9]); step2[10] = _mm256_sub_epi16(step1[5], step1[10]); step2[11] = _mm256_sub_epi16(step1[4], step1[11]); step2[12] = _mm256_sub_epi16(step1[3], step1[12]); @@ -353,22 +379,38 @@ void FDCT32x32_2D_AVX2(const int16_t *input, const __m256i s2_27_2 = _mm256_madd_epi16(s2_20_0, k__cospi_p16_p16); const __m256i s2_27_3 = _mm256_madd_epi16(s2_20_1, k__cospi_p16_p16); // dct_const_round_shift - const __m256i s2_20_4 = _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); - const __m256i s2_20_5 = _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); - const __m256i s2_21_4 = _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); - const __m256i s2_21_5 = _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); - const __m256i s2_22_4 = _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); - const __m256i s2_22_5 = _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); - const __m256i s2_23_4 = _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); - const __m256i s2_23_5 = _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); - const __m256i s2_24_4 = _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); - const __m256i s2_24_5 = _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); - const __m256i s2_25_4 = _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); - const __m256i s2_25_5 = _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); - const __m256i s2_26_4 = _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); - const __m256i s2_26_5 = _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); - const __m256i s2_27_4 = _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); - const __m256i s2_27_5 = _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); + const __m256i s2_20_4 = + _mm256_add_epi32(s2_20_2, k__DCT_CONST_ROUNDING); + const __m256i s2_20_5 = + _mm256_add_epi32(s2_20_3, k__DCT_CONST_ROUNDING); + const __m256i s2_21_4 = + _mm256_add_epi32(s2_21_2, k__DCT_CONST_ROUNDING); + const __m256i s2_21_5 = + _mm256_add_epi32(s2_21_3, k__DCT_CONST_ROUNDING); + const __m256i s2_22_4 = + _mm256_add_epi32(s2_22_2, k__DCT_CONST_ROUNDING); + const __m256i s2_22_5 = + _mm256_add_epi32(s2_22_3, k__DCT_CONST_ROUNDING); + const __m256i s2_23_4 = + _mm256_add_epi32(s2_23_2, k__DCT_CONST_ROUNDING); + const __m256i s2_23_5 = + _mm256_add_epi32(s2_23_3, k__DCT_CONST_ROUNDING); + const __m256i s2_24_4 = + _mm256_add_epi32(s2_24_2, k__DCT_CONST_ROUNDING); + const __m256i s2_24_5 = + _mm256_add_epi32(s2_24_3, k__DCT_CONST_ROUNDING); + const __m256i s2_25_4 = + _mm256_add_epi32(s2_25_2, k__DCT_CONST_ROUNDING); + const __m256i s2_25_5 = + _mm256_add_epi32(s2_25_3, k__DCT_CONST_ROUNDING); + const __m256i s2_26_4 = + _mm256_add_epi32(s2_26_2, k__DCT_CONST_ROUNDING); + const __m256i s2_26_5 = + _mm256_add_epi32(s2_26_3, k__DCT_CONST_ROUNDING); + const __m256i s2_27_4 = + _mm256_add_epi32(s2_27_2, k__DCT_CONST_ROUNDING); + const __m256i s2_27_5 = + _mm256_add_epi32(s2_27_3, k__DCT_CONST_ROUNDING); const __m256i s2_20_6 = _mm256_srai_epi32(s2_20_4, DCT_CONST_BITS); const __m256i s2_20_7 = _mm256_srai_epi32(s2_20_5, DCT_CONST_BITS); const __m256i s2_21_6 = _mm256_srai_epi32(s2_21_4, DCT_CONST_BITS); @@ -400,49 +442,49 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // dump the magnitude by half, hence the intermediate values are within // the range of 16 bits. if (1 == pass) { - __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero,step2[ 0]); - __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero,step2[ 1]); - __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero,step2[ 2]); - __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero,step2[ 3]); - __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero,step2[ 4]); - __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero,step2[ 5]); - __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero,step2[ 6]); - __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero,step2[ 7]); - __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero,step2[ 8]); - __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero,step2[ 9]); - __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero,step2[10]); - __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero,step2[11]); - __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero,step2[12]); - __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero,step2[13]); - __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero,step2[14]); - __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero,step2[15]); - __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero,step1[16]); - __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero,step1[17]); - __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero,step1[18]); - __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero,step1[19]); - __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero,step2[20]); - __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero,step2[21]); - __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero,step2[22]); - __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero,step2[23]); - __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero,step2[24]); - __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero,step2[25]); - __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero,step2[26]); - __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero,step2[27]); - __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero,step1[28]); - __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero,step1[29]); - __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero,step1[30]); - __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero,step1[31]); + __m256i s3_00_0 = _mm256_cmpgt_epi16(kZero, step2[0]); + __m256i s3_01_0 = _mm256_cmpgt_epi16(kZero, step2[1]); + __m256i s3_02_0 = _mm256_cmpgt_epi16(kZero, step2[2]); + __m256i s3_03_0 = _mm256_cmpgt_epi16(kZero, step2[3]); + __m256i s3_04_0 = _mm256_cmpgt_epi16(kZero, step2[4]); + __m256i s3_05_0 = _mm256_cmpgt_epi16(kZero, step2[5]); + __m256i s3_06_0 = _mm256_cmpgt_epi16(kZero, step2[6]); + __m256i s3_07_0 = _mm256_cmpgt_epi16(kZero, step2[7]); + __m256i s2_08_0 = _mm256_cmpgt_epi16(kZero, step2[8]); + __m256i s2_09_0 = _mm256_cmpgt_epi16(kZero, step2[9]); + __m256i s3_10_0 = _mm256_cmpgt_epi16(kZero, step2[10]); + __m256i s3_11_0 = _mm256_cmpgt_epi16(kZero, step2[11]); + __m256i s3_12_0 = _mm256_cmpgt_epi16(kZero, step2[12]); + __m256i s3_13_0 = _mm256_cmpgt_epi16(kZero, step2[13]); + __m256i s2_14_0 = _mm256_cmpgt_epi16(kZero, step2[14]); + __m256i s2_15_0 = _mm256_cmpgt_epi16(kZero, step2[15]); + __m256i s3_16_0 = _mm256_cmpgt_epi16(kZero, step1[16]); + __m256i s3_17_0 = _mm256_cmpgt_epi16(kZero, step1[17]); + __m256i s3_18_0 = _mm256_cmpgt_epi16(kZero, step1[18]); + __m256i s3_19_0 = _mm256_cmpgt_epi16(kZero, step1[19]); + __m256i s3_20_0 = _mm256_cmpgt_epi16(kZero, step2[20]); + __m256i s3_21_0 = _mm256_cmpgt_epi16(kZero, step2[21]); + __m256i s3_22_0 = _mm256_cmpgt_epi16(kZero, step2[22]); + __m256i s3_23_0 = _mm256_cmpgt_epi16(kZero, step2[23]); + __m256i s3_24_0 = _mm256_cmpgt_epi16(kZero, step2[24]); + __m256i s3_25_0 = _mm256_cmpgt_epi16(kZero, step2[25]); + __m256i s3_26_0 = _mm256_cmpgt_epi16(kZero, step2[26]); + __m256i s3_27_0 = _mm256_cmpgt_epi16(kZero, step2[27]); + __m256i s3_28_0 = _mm256_cmpgt_epi16(kZero, step1[28]); + __m256i s3_29_0 = _mm256_cmpgt_epi16(kZero, step1[29]); + __m256i s3_30_0 = _mm256_cmpgt_epi16(kZero, step1[30]); + __m256i s3_31_0 = _mm256_cmpgt_epi16(kZero, step1[31]); - step2[ 0] = _mm256_sub_epi16(step2[ 0], s3_00_0); - step2[ 1] = _mm256_sub_epi16(step2[ 1], s3_01_0); - step2[ 2] = _mm256_sub_epi16(step2[ 2], s3_02_0); - step2[ 3] = _mm256_sub_epi16(step2[ 3], s3_03_0); - step2[ 4] = _mm256_sub_epi16(step2[ 4], s3_04_0); - step2[ 5] = _mm256_sub_epi16(step2[ 5], s3_05_0); - step2[ 6] = _mm256_sub_epi16(step2[ 6], s3_06_0); - step2[ 7] = _mm256_sub_epi16(step2[ 7], s3_07_0); - step2[ 8] = _mm256_sub_epi16(step2[ 8], s2_08_0); - step2[ 9] = _mm256_sub_epi16(step2[ 9], s2_09_0); + step2[0] = _mm256_sub_epi16(step2[0], s3_00_0); + step2[1] = _mm256_sub_epi16(step2[1], s3_01_0); + step2[2] = _mm256_sub_epi16(step2[2], s3_02_0); + step2[3] = _mm256_sub_epi16(step2[3], s3_03_0); + step2[4] = _mm256_sub_epi16(step2[4], s3_04_0); + step2[5] = _mm256_sub_epi16(step2[5], s3_05_0); + step2[6] = _mm256_sub_epi16(step2[6], s3_06_0); + step2[7] = _mm256_sub_epi16(step2[7], s3_07_0); + step2[8] = _mm256_sub_epi16(step2[8], s2_08_0); + step2[9] = _mm256_sub_epi16(step2[9], s2_09_0); step2[10] = _mm256_sub_epi16(step2[10], s3_10_0); step2[11] = _mm256_sub_epi16(step2[11], s3_11_0); step2[12] = _mm256_sub_epi16(step2[12], s3_12_0); @@ -466,16 +508,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, step1[30] = _mm256_sub_epi16(step1[30], s3_30_0); step1[31] = _mm256_sub_epi16(step1[31], s3_31_0); - step2[ 0] = _mm256_add_epi16(step2[ 0], kOne); - step2[ 1] = _mm256_add_epi16(step2[ 1], kOne); - step2[ 2] = _mm256_add_epi16(step2[ 2], kOne); - step2[ 3] = _mm256_add_epi16(step2[ 3], kOne); - step2[ 4] = _mm256_add_epi16(step2[ 4], kOne); - step2[ 5] = _mm256_add_epi16(step2[ 5], kOne); - step2[ 6] = _mm256_add_epi16(step2[ 6], kOne); - step2[ 7] = _mm256_add_epi16(step2[ 7], kOne); - step2[ 8] = _mm256_add_epi16(step2[ 8], kOne); - step2[ 9] = _mm256_add_epi16(step2[ 9], kOne); + step2[0] = _mm256_add_epi16(step2[0], kOne); + step2[1] = _mm256_add_epi16(step2[1], kOne); + step2[2] = _mm256_add_epi16(step2[2], kOne); + step2[3] = _mm256_add_epi16(step2[3], kOne); + step2[4] = _mm256_add_epi16(step2[4], kOne); + step2[5] = _mm256_add_epi16(step2[5], kOne); + step2[6] = _mm256_add_epi16(step2[6], kOne); + step2[7] = _mm256_add_epi16(step2[7], kOne); + step2[8] = _mm256_add_epi16(step2[8], kOne); + step2[9] = _mm256_add_epi16(step2[9], kOne); step2[10] = _mm256_add_epi16(step2[10], kOne); step2[11] = _mm256_add_epi16(step2[11], kOne); step2[12] = _mm256_add_epi16(step2[12], kOne); @@ -499,16 +541,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, step1[30] = _mm256_add_epi16(step1[30], kOne); step1[31] = _mm256_add_epi16(step1[31], kOne); - step2[ 0] = _mm256_srai_epi16(step2[ 0], 2); - step2[ 1] = _mm256_srai_epi16(step2[ 1], 2); - step2[ 2] = _mm256_srai_epi16(step2[ 2], 2); - step2[ 3] = _mm256_srai_epi16(step2[ 3], 2); - step2[ 4] = _mm256_srai_epi16(step2[ 4], 2); - step2[ 5] = _mm256_srai_epi16(step2[ 5], 2); - step2[ 6] = _mm256_srai_epi16(step2[ 6], 2); - step2[ 7] = _mm256_srai_epi16(step2[ 7], 2); - step2[ 8] = _mm256_srai_epi16(step2[ 8], 2); - step2[ 9] = _mm256_srai_epi16(step2[ 9], 2); + step2[0] = _mm256_srai_epi16(step2[0], 2); + step2[1] = _mm256_srai_epi16(step2[1], 2); + step2[2] = _mm256_srai_epi16(step2[2], 2); + step2[3] = _mm256_srai_epi16(step2[3], 2); + step2[4] = _mm256_srai_epi16(step2[4], 2); + step2[5] = _mm256_srai_epi16(step2[5], 2); + step2[6] = _mm256_srai_epi16(step2[6], 2); + step2[7] = _mm256_srai_epi16(step2[7], 2); + step2[8] = _mm256_srai_epi16(step2[8], 2); + step2[9] = _mm256_srai_epi16(step2[9], 2); step2[10] = _mm256_srai_epi16(step2[10], 2); step2[11] = _mm256_srai_epi16(step2[11], 2); step2[12] = _mm256_srai_epi16(step2[12], 2); @@ -537,674 +579,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, #if FDCT32x32_HIGH_PRECISION if (pass == 0) { #endif - // Stage 3 - { - step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]); - step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]); - step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]); - step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]); - step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]); - step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]); - step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]); - step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]); - } - { - const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]); - const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]); - const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]); - const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]); - const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16); - const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16); - const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16); - const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16); - const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16); - const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16); - const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16); - const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); - const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); - const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); - const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); - const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); - const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); - const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); - const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); - const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS); - const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS); - const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS); - const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS); - const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS); - const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS); - const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS); - const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS); - // Combine - step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7); - step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7); - step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7); - step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7); - } - { - step3[16] = _mm256_add_epi16(step2[23], step1[16]); - step3[17] = _mm256_add_epi16(step2[22], step1[17]); - step3[18] = _mm256_add_epi16(step2[21], step1[18]); - step3[19] = _mm256_add_epi16(step2[20], step1[19]); - step3[20] = _mm256_sub_epi16(step1[19], step2[20]); - step3[21] = _mm256_sub_epi16(step1[18], step2[21]); - step3[22] = _mm256_sub_epi16(step1[17], step2[22]); - step3[23] = _mm256_sub_epi16(step1[16], step2[23]); - step3[24] = _mm256_sub_epi16(step1[31], step2[24]); - step3[25] = _mm256_sub_epi16(step1[30], step2[25]); - step3[26] = _mm256_sub_epi16(step1[29], step2[26]); - step3[27] = _mm256_sub_epi16(step1[28], step2[27]); - step3[28] = _mm256_add_epi16(step2[27], step1[28]); - step3[29] = _mm256_add_epi16(step2[26], step1[29]); - step3[30] = _mm256_add_epi16(step2[25], step1[30]); - step3[31] = _mm256_add_epi16(step2[24], step1[31]); - } - - // Stage 4 - { - step1[ 0] = _mm256_add_epi16(step3[ 3], step3[ 0]); - step1[ 1] = _mm256_add_epi16(step3[ 2], step3[ 1]); - step1[ 2] = _mm256_sub_epi16(step3[ 1], step3[ 2]); - step1[ 3] = _mm256_sub_epi16(step3[ 0], step3[ 3]); - step1[ 8] = _mm256_add_epi16(step3[11], step2[ 8]); - step1[ 9] = _mm256_add_epi16(step3[10], step2[ 9]); - step1[10] = _mm256_sub_epi16(step2[ 9], step3[10]); - step1[11] = _mm256_sub_epi16(step2[ 8], step3[11]); - step1[12] = _mm256_sub_epi16(step2[15], step3[12]); - step1[13] = _mm256_sub_epi16(step2[14], step3[13]); - step1[14] = _mm256_add_epi16(step3[13], step2[14]); - step1[15] = _mm256_add_epi16(step3[12], step2[15]); - } - { - const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]); - const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]); - const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16); - const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16); - const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16); - const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m256i s1_05_4 = _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); - const __m256i s1_05_5 = _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); - const __m256i s1_06_4 = _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); - const __m256i s1_06_5 = _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); - const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS); - const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS); - const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS); - const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS); - // Combine - step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7); - step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7); - } - { - const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]); - const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]); - const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]); - const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]); - const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]); - const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]); - const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]); - const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]); - const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24); - const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24); - const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24); - const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24); - const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08); - const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08); - const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08); - const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08); - const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24); - const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24); - const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24); - const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24); - const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08); - const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08); - const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08); - const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m256i s1_18_4 = _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); - const __m256i s1_18_5 = _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); - const __m256i s1_19_4 = _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); - const __m256i s1_19_5 = _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); - const __m256i s1_20_4 = _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); - const __m256i s1_20_5 = _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); - const __m256i s1_21_4 = _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); - const __m256i s1_21_5 = _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); - const __m256i s1_26_4 = _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); - const __m256i s1_26_5 = _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); - const __m256i s1_27_4 = _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); - const __m256i s1_27_5 = _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); - const __m256i s1_28_4 = _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); - const __m256i s1_28_5 = _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); - const __m256i s1_29_4 = _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); - const __m256i s1_29_5 = _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); - const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS); - const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS); - const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS); - const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS); - const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS); - const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS); - const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS); - const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS); - const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS); - const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS); - const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS); - const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS); - const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS); - const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS); - const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS); - const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS); - // Combine - step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7); - step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7); - step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7); - step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7); - step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7); - step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7); - step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7); - step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7); - } - // Stage 5 - { - step2[4] = _mm256_add_epi16(step1[5], step3[4]); - step2[5] = _mm256_sub_epi16(step3[4], step1[5]); - step2[6] = _mm256_sub_epi16(step3[7], step1[6]); - step2[7] = _mm256_add_epi16(step1[6], step3[7]); - } - { - const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]); - const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]); - const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]); - const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]); - const __m256i out_00_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_p16); - const __m256i out_00_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_p16); - const __m256i out_16_2 = _mm256_madd_epi16(out_00_0, k__cospi_p16_m16); - const __m256i out_16_3 = _mm256_madd_epi16(out_00_1, k__cospi_p16_m16); - const __m256i out_08_2 = _mm256_madd_epi16(out_08_0, k__cospi_p24_p08); - const __m256i out_08_3 = _mm256_madd_epi16(out_08_1, k__cospi_p24_p08); - const __m256i out_24_2 = _mm256_madd_epi16(out_08_0, k__cospi_m08_p24); - const __m256i out_24_3 = _mm256_madd_epi16(out_08_1, k__cospi_m08_p24); - // dct_const_round_shift - const __m256i out_00_4 = _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); - const __m256i out_00_5 = _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); - const __m256i out_16_4 = _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); - const __m256i out_16_5 = _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); - const __m256i out_08_4 = _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); - const __m256i out_08_5 = _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); - const __m256i out_24_4 = _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); - const __m256i out_24_5 = _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); - const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS); - const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS); - const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS); - const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS); - const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS); - const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS); - const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS); - const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS); - // Combine - out[ 0] = _mm256_packs_epi32(out_00_6, out_00_7); - out[16] = _mm256_packs_epi32(out_16_6, out_16_7); - out[ 8] = _mm256_packs_epi32(out_08_6, out_08_7); - out[24] = _mm256_packs_epi32(out_24_6, out_24_7); - } - { - const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[ 9], step1[14]); - const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[ 9], step1[14]); - const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]); - const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]); - const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24); - const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24); - const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08); - const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08); - const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24); - const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24); - const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08); - const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m256i s2_09_4 = _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); - const __m256i s2_09_5 = _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); - const __m256i s2_10_4 = _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); - const __m256i s2_10_5 = _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); - const __m256i s2_13_4 = _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); - const __m256i s2_13_5 = _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); - const __m256i s2_14_4 = _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); - const __m256i s2_14_5 = _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); - const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS); - const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS); - const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS); - const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS); - const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS); - const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS); - const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS); - const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS); - // Combine - step2[ 9] = _mm256_packs_epi32(s2_09_6, s2_09_7); - step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7); - step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7); - step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7); - } - { - step2[16] = _mm256_add_epi16(step1[19], step3[16]); - step2[17] = _mm256_add_epi16(step1[18], step3[17]); - step2[18] = _mm256_sub_epi16(step3[17], step1[18]); - step2[19] = _mm256_sub_epi16(step3[16], step1[19]); - step2[20] = _mm256_sub_epi16(step3[23], step1[20]); - step2[21] = _mm256_sub_epi16(step3[22], step1[21]); - step2[22] = _mm256_add_epi16(step1[21], step3[22]); - step2[23] = _mm256_add_epi16(step1[20], step3[23]); - step2[24] = _mm256_add_epi16(step1[27], step3[24]); - step2[25] = _mm256_add_epi16(step1[26], step3[25]); - step2[26] = _mm256_sub_epi16(step3[25], step1[26]); - step2[27] = _mm256_sub_epi16(step3[24], step1[27]); - step2[28] = _mm256_sub_epi16(step3[31], step1[28]); - step2[29] = _mm256_sub_epi16(step3[30], step1[29]); - step2[30] = _mm256_add_epi16(step1[29], step3[30]); - step2[31] = _mm256_add_epi16(step1[28], step3[31]); - } - // Stage 6 - { - const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); - const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); - const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); - const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); - const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); - const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); - const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); - const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); - const __m256i out_04_2 = _mm256_madd_epi16(out_04_0, k__cospi_p28_p04); - const __m256i out_04_3 = _mm256_madd_epi16(out_04_1, k__cospi_p28_p04); - const __m256i out_20_2 = _mm256_madd_epi16(out_20_0, k__cospi_p12_p20); - const __m256i out_20_3 = _mm256_madd_epi16(out_20_1, k__cospi_p12_p20); - const __m256i out_12_2 = _mm256_madd_epi16(out_12_0, k__cospi_m20_p12); - const __m256i out_12_3 = _mm256_madd_epi16(out_12_1, k__cospi_m20_p12); - const __m256i out_28_2 = _mm256_madd_epi16(out_28_0, k__cospi_m04_p28); - const __m256i out_28_3 = _mm256_madd_epi16(out_28_1, k__cospi_m04_p28); - // dct_const_round_shift - const __m256i out_04_4 = _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); - const __m256i out_04_5 = _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); - const __m256i out_20_4 = _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); - const __m256i out_20_5 = _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); - const __m256i out_12_4 = _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); - const __m256i out_12_5 = _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); - const __m256i out_28_4 = _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); - const __m256i out_28_5 = _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); - const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS); - const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS); - const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS); - const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS); - const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS); - const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS); - const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS); - const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS); - // Combine - out[ 4] = _mm256_packs_epi32(out_04_6, out_04_7); - out[20] = _mm256_packs_epi32(out_20_6, out_20_7); - out[12] = _mm256_packs_epi32(out_12_6, out_12_7); - out[28] = _mm256_packs_epi32(out_28_6, out_28_7); - } - { - step3[ 8] = _mm256_add_epi16(step2[ 9], step1[ 8]); - step3[ 9] = _mm256_sub_epi16(step1[ 8], step2[ 9]); - step3[10] = _mm256_sub_epi16(step1[11], step2[10]); - step3[11] = _mm256_add_epi16(step2[10], step1[11]); - step3[12] = _mm256_add_epi16(step2[13], step1[12]); - step3[13] = _mm256_sub_epi16(step1[12], step2[13]); - step3[14] = _mm256_sub_epi16(step1[15], step2[14]); - step3[15] = _mm256_add_epi16(step2[14], step1[15]); - } - { - const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]); - const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]); - const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]); - const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]); - const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]); - const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]); - const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]); - const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]); - const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28); - const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28); - const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04); - const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04); - const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12); - const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12); - const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20); - const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20); - const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12); - const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12); - const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20); - const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20); - const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28); - const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28); - const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04); - const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04); - // dct_const_round_shift - const __m256i s3_17_4 = _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); - const __m256i s3_17_5 = _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); - const __m256i s3_18_4 = _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); - const __m256i s3_18_5 = _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); - const __m256i s3_21_4 = _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); - const __m256i s3_21_5 = _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); - const __m256i s3_22_4 = _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); - const __m256i s3_22_5 = _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); - const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS); - const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS); - const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS); - const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS); - const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS); - const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS); - const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS); - const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS); - const __m256i s3_25_4 = _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); - const __m256i s3_25_5 = _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); - const __m256i s3_26_4 = _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); - const __m256i s3_26_5 = _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); - const __m256i s3_29_4 = _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); - const __m256i s3_29_5 = _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); - const __m256i s3_30_4 = _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); - const __m256i s3_30_5 = _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); - const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS); - const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS); - const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS); - const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS); - const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS); - const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS); - const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS); - const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS); - // Combine - step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7); - step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7); - step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7); - step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7); - // Combine - step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7); - step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7); - step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7); - step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7); - } - // Stage 7 - { - const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[ 8], step3[15]); - const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[ 8], step3[15]); - const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[ 9], step3[14]); - const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[ 9], step3[14]); - const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]); - const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]); - const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]); - const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]); - const __m256i out_02_2 = _mm256_madd_epi16(out_02_0, k__cospi_p30_p02); - const __m256i out_02_3 = _mm256_madd_epi16(out_02_1, k__cospi_p30_p02); - const __m256i out_18_2 = _mm256_madd_epi16(out_18_0, k__cospi_p14_p18); - const __m256i out_18_3 = _mm256_madd_epi16(out_18_1, k__cospi_p14_p18); - const __m256i out_10_2 = _mm256_madd_epi16(out_10_0, k__cospi_p22_p10); - const __m256i out_10_3 = _mm256_madd_epi16(out_10_1, k__cospi_p22_p10); - const __m256i out_26_2 = _mm256_madd_epi16(out_26_0, k__cospi_p06_p26); - const __m256i out_26_3 = _mm256_madd_epi16(out_26_1, k__cospi_p06_p26); - const __m256i out_06_2 = _mm256_madd_epi16(out_26_0, k__cospi_m26_p06); - const __m256i out_06_3 = _mm256_madd_epi16(out_26_1, k__cospi_m26_p06); - const __m256i out_22_2 = _mm256_madd_epi16(out_10_0, k__cospi_m10_p22); - const __m256i out_22_3 = _mm256_madd_epi16(out_10_1, k__cospi_m10_p22); - const __m256i out_14_2 = _mm256_madd_epi16(out_18_0, k__cospi_m18_p14); - const __m256i out_14_3 = _mm256_madd_epi16(out_18_1, k__cospi_m18_p14); - const __m256i out_30_2 = _mm256_madd_epi16(out_02_0, k__cospi_m02_p30); - const __m256i out_30_3 = _mm256_madd_epi16(out_02_1, k__cospi_m02_p30); - // dct_const_round_shift - const __m256i out_02_4 = _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); - const __m256i out_02_5 = _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); - const __m256i out_18_4 = _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); - const __m256i out_18_5 = _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); - const __m256i out_10_4 = _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); - const __m256i out_10_5 = _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); - const __m256i out_26_4 = _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); - const __m256i out_26_5 = _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); - const __m256i out_06_4 = _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); - const __m256i out_06_5 = _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); - const __m256i out_22_4 = _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); - const __m256i out_22_5 = _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); - const __m256i out_14_4 = _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); - const __m256i out_14_5 = _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); - const __m256i out_30_4 = _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); - const __m256i out_30_5 = _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); - const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS); - const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS); - const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS); - const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS); - const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS); - const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS); - const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS); - const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS); - const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS); - const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS); - const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS); - const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS); - const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS); - const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS); - const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS); - const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS); - // Combine - out[ 2] = _mm256_packs_epi32(out_02_6, out_02_7); - out[18] = _mm256_packs_epi32(out_18_6, out_18_7); - out[10] = _mm256_packs_epi32(out_10_6, out_10_7); - out[26] = _mm256_packs_epi32(out_26_6, out_26_7); - out[ 6] = _mm256_packs_epi32(out_06_6, out_06_7); - out[22] = _mm256_packs_epi32(out_22_6, out_22_7); - out[14] = _mm256_packs_epi32(out_14_6, out_14_7); - out[30] = _mm256_packs_epi32(out_30_6, out_30_7); - } - { - step1[16] = _mm256_add_epi16(step3[17], step2[16]); - step1[17] = _mm256_sub_epi16(step2[16], step3[17]); - step1[18] = _mm256_sub_epi16(step2[19], step3[18]); - step1[19] = _mm256_add_epi16(step3[18], step2[19]); - step1[20] = _mm256_add_epi16(step3[21], step2[20]); - step1[21] = _mm256_sub_epi16(step2[20], step3[21]); - step1[22] = _mm256_sub_epi16(step2[23], step3[22]); - step1[23] = _mm256_add_epi16(step3[22], step2[23]); - step1[24] = _mm256_add_epi16(step3[25], step2[24]); - step1[25] = _mm256_sub_epi16(step2[24], step3[25]); - step1[26] = _mm256_sub_epi16(step2[27], step3[26]); - step1[27] = _mm256_add_epi16(step3[26], step2[27]); - step1[28] = _mm256_add_epi16(step3[29], step2[28]); - step1[29] = _mm256_sub_epi16(step2[28], step3[29]); - step1[30] = _mm256_sub_epi16(step2[31], step3[30]); - step1[31] = _mm256_add_epi16(step3[30], step2[31]); - } - // Final stage --- outputs indices are bit-reversed. - { - const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]); - const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]); - const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]); - const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]); - const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]); - const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]); - const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]); - const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]); - const __m256i out_01_2 = _mm256_madd_epi16(out_01_0, k__cospi_p31_p01); - const __m256i out_01_3 = _mm256_madd_epi16(out_01_1, k__cospi_p31_p01); - const __m256i out_17_2 = _mm256_madd_epi16(out_17_0, k__cospi_p15_p17); - const __m256i out_17_3 = _mm256_madd_epi16(out_17_1, k__cospi_p15_p17); - const __m256i out_09_2 = _mm256_madd_epi16(out_09_0, k__cospi_p23_p09); - const __m256i out_09_3 = _mm256_madd_epi16(out_09_1, k__cospi_p23_p09); - const __m256i out_25_2 = _mm256_madd_epi16(out_25_0, k__cospi_p07_p25); - const __m256i out_25_3 = _mm256_madd_epi16(out_25_1, k__cospi_p07_p25); - const __m256i out_07_2 = _mm256_madd_epi16(out_25_0, k__cospi_m25_p07); - const __m256i out_07_3 = _mm256_madd_epi16(out_25_1, k__cospi_m25_p07); - const __m256i out_23_2 = _mm256_madd_epi16(out_09_0, k__cospi_m09_p23); - const __m256i out_23_3 = _mm256_madd_epi16(out_09_1, k__cospi_m09_p23); - const __m256i out_15_2 = _mm256_madd_epi16(out_17_0, k__cospi_m17_p15); - const __m256i out_15_3 = _mm256_madd_epi16(out_17_1, k__cospi_m17_p15); - const __m256i out_31_2 = _mm256_madd_epi16(out_01_0, k__cospi_m01_p31); - const __m256i out_31_3 = _mm256_madd_epi16(out_01_1, k__cospi_m01_p31); - // dct_const_round_shift - const __m256i out_01_4 = _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); - const __m256i out_01_5 = _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); - const __m256i out_17_4 = _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); - const __m256i out_17_5 = _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); - const __m256i out_09_4 = _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); - const __m256i out_09_5 = _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); - const __m256i out_25_4 = _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); - const __m256i out_25_5 = _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); - const __m256i out_07_4 = _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); - const __m256i out_07_5 = _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); - const __m256i out_23_4 = _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); - const __m256i out_23_5 = _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); - const __m256i out_15_4 = _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); - const __m256i out_15_5 = _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); - const __m256i out_31_4 = _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); - const __m256i out_31_5 = _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); - const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS); - const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS); - const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS); - const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS); - const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS); - const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS); - const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS); - const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS); - const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS); - const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS); - const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS); - const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS); - const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS); - const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS); - const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS); - const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS); - // Combine - out[ 1] = _mm256_packs_epi32(out_01_6, out_01_7); - out[17] = _mm256_packs_epi32(out_17_6, out_17_7); - out[ 9] = _mm256_packs_epi32(out_09_6, out_09_7); - out[25] = _mm256_packs_epi32(out_25_6, out_25_7); - out[ 7] = _mm256_packs_epi32(out_07_6, out_07_7); - out[23] = _mm256_packs_epi32(out_23_6, out_23_7); - out[15] = _mm256_packs_epi32(out_15_6, out_15_7); - out[31] = _mm256_packs_epi32(out_31_6, out_31_7); - } - { - const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]); - const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]); - const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]); - const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]); - const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]); - const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]); - const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]); - const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]); - const __m256i out_05_2 = _mm256_madd_epi16(out_05_0, k__cospi_p27_p05); - const __m256i out_05_3 = _mm256_madd_epi16(out_05_1, k__cospi_p27_p05); - const __m256i out_21_2 = _mm256_madd_epi16(out_21_0, k__cospi_p11_p21); - const __m256i out_21_3 = _mm256_madd_epi16(out_21_1, k__cospi_p11_p21); - const __m256i out_13_2 = _mm256_madd_epi16(out_13_0, k__cospi_p19_p13); - const __m256i out_13_3 = _mm256_madd_epi16(out_13_1, k__cospi_p19_p13); - const __m256i out_29_2 = _mm256_madd_epi16(out_29_0, k__cospi_p03_p29); - const __m256i out_29_3 = _mm256_madd_epi16(out_29_1, k__cospi_p03_p29); - const __m256i out_03_2 = _mm256_madd_epi16(out_29_0, k__cospi_m29_p03); - const __m256i out_03_3 = _mm256_madd_epi16(out_29_1, k__cospi_m29_p03); - const __m256i out_19_2 = _mm256_madd_epi16(out_13_0, k__cospi_m13_p19); - const __m256i out_19_3 = _mm256_madd_epi16(out_13_1, k__cospi_m13_p19); - const __m256i out_11_2 = _mm256_madd_epi16(out_21_0, k__cospi_m21_p11); - const __m256i out_11_3 = _mm256_madd_epi16(out_21_1, k__cospi_m21_p11); - const __m256i out_27_2 = _mm256_madd_epi16(out_05_0, k__cospi_m05_p27); - const __m256i out_27_3 = _mm256_madd_epi16(out_05_1, k__cospi_m05_p27); - // dct_const_round_shift - const __m256i out_05_4 = _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); - const __m256i out_05_5 = _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); - const __m256i out_21_4 = _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); - const __m256i out_21_5 = _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); - const __m256i out_13_4 = _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); - const __m256i out_13_5 = _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); - const __m256i out_29_4 = _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); - const __m256i out_29_5 = _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); - const __m256i out_03_4 = _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); - const __m256i out_03_5 = _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); - const __m256i out_19_4 = _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); - const __m256i out_19_5 = _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); - const __m256i out_11_4 = _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); - const __m256i out_11_5 = _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); - const __m256i out_27_4 = _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); - const __m256i out_27_5 = _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); - const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS); - const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS); - const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS); - const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS); - const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS); - const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS); - const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS); - const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS); - const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS); - const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS); - const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS); - const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS); - const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS); - const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS); - const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS); - const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS); - // Combine - out[ 5] = _mm256_packs_epi32(out_05_6, out_05_7); - out[21] = _mm256_packs_epi32(out_21_6, out_21_7); - out[13] = _mm256_packs_epi32(out_13_6, out_13_7); - out[29] = _mm256_packs_epi32(out_29_6, out_29_7); - out[ 3] = _mm256_packs_epi32(out_03_6, out_03_7); - out[19] = _mm256_packs_epi32(out_19_6, out_19_7); - out[11] = _mm256_packs_epi32(out_11_6, out_11_7); - out[27] = _mm256_packs_epi32(out_27_6, out_27_7); - } -#if FDCT32x32_HIGH_PRECISION - } else { - __m256i lstep1[64], lstep2[64], lstep3[64]; - __m256i u[32], v[32], sign[16]; - const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1); - // start using 32-bit operations - // stage 3 + // Stage 3 { - // expanding to 32-bit length priori to addition operations - lstep2[ 0] = _mm256_unpacklo_epi16(step2[ 0], kZero); - lstep2[ 1] = _mm256_unpackhi_epi16(step2[ 0], kZero); - lstep2[ 2] = _mm256_unpacklo_epi16(step2[ 1], kZero); - lstep2[ 3] = _mm256_unpackhi_epi16(step2[ 1], kZero); - lstep2[ 4] = _mm256_unpacklo_epi16(step2[ 2], kZero); - lstep2[ 5] = _mm256_unpackhi_epi16(step2[ 2], kZero); - lstep2[ 6] = _mm256_unpacklo_epi16(step2[ 3], kZero); - lstep2[ 7] = _mm256_unpackhi_epi16(step2[ 3], kZero); - lstep2[ 8] = _mm256_unpacklo_epi16(step2[ 4], kZero); - lstep2[ 9] = _mm256_unpackhi_epi16(step2[ 4], kZero); - lstep2[10] = _mm256_unpacklo_epi16(step2[ 5], kZero); - lstep2[11] = _mm256_unpackhi_epi16(step2[ 5], kZero); - lstep2[12] = _mm256_unpacklo_epi16(step2[ 6], kZero); - lstep2[13] = _mm256_unpackhi_epi16(step2[ 6], kZero); - lstep2[14] = _mm256_unpacklo_epi16(step2[ 7], kZero); - lstep2[15] = _mm256_unpackhi_epi16(step2[ 7], kZero); - lstep2[ 0] = _mm256_madd_epi16(lstep2[ 0], kOne); - lstep2[ 1] = _mm256_madd_epi16(lstep2[ 1], kOne); - lstep2[ 2] = _mm256_madd_epi16(lstep2[ 2], kOne); - lstep2[ 3] = _mm256_madd_epi16(lstep2[ 3], kOne); - lstep2[ 4] = _mm256_madd_epi16(lstep2[ 4], kOne); - lstep2[ 5] = _mm256_madd_epi16(lstep2[ 5], kOne); - lstep2[ 6] = _mm256_madd_epi16(lstep2[ 6], kOne); - lstep2[ 7] = _mm256_madd_epi16(lstep2[ 7], kOne); - lstep2[ 8] = _mm256_madd_epi16(lstep2[ 8], kOne); - lstep2[ 9] = _mm256_madd_epi16(lstep2[ 9], kOne); - lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne); - lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne); - lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne); - lstep2[13] = _mm256_madd_epi16(lstep2[13], kOne); - lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne); - lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne); - - lstep3[ 0] = _mm256_add_epi32(lstep2[14], lstep2[ 0]); - lstep3[ 1] = _mm256_add_epi32(lstep2[15], lstep2[ 1]); - lstep3[ 2] = _mm256_add_epi32(lstep2[12], lstep2[ 2]); - lstep3[ 3] = _mm256_add_epi32(lstep2[13], lstep2[ 3]); - lstep3[ 4] = _mm256_add_epi32(lstep2[10], lstep2[ 4]); - lstep3[ 5] = _mm256_add_epi32(lstep2[11], lstep2[ 5]); - lstep3[ 6] = _mm256_add_epi32(lstep2[ 8], lstep2[ 6]); - lstep3[ 7] = _mm256_add_epi32(lstep2[ 9], lstep2[ 7]); - lstep3[ 8] = _mm256_sub_epi32(lstep2[ 6], lstep2[ 8]); - lstep3[ 9] = _mm256_sub_epi32(lstep2[ 7], lstep2[ 9]); - lstep3[10] = _mm256_sub_epi32(lstep2[ 4], lstep2[10]); - lstep3[11] = _mm256_sub_epi32(lstep2[ 5], lstep2[11]); - lstep3[12] = _mm256_sub_epi32(lstep2[ 2], lstep2[12]); - lstep3[13] = _mm256_sub_epi32(lstep2[ 3], lstep2[13]); - lstep3[14] = _mm256_sub_epi32(lstep2[ 0], lstep2[14]); - lstep3[15] = _mm256_sub_epi32(lstep2[ 1], lstep2[15]); + step3[0] = _mm256_add_epi16(step2[(8 - 1)], step2[0]); + step3[1] = _mm256_add_epi16(step2[(8 - 2)], step2[1]); + step3[2] = _mm256_add_epi16(step2[(8 - 3)], step2[2]); + step3[3] = _mm256_add_epi16(step2[(8 - 4)], step2[3]); + step3[4] = _mm256_sub_epi16(step2[(8 - 5)], step2[4]); + step3[5] = _mm256_sub_epi16(step2[(8 - 6)], step2[5]); + step3[6] = _mm256_sub_epi16(step2[(8 - 7)], step2[6]); + step3[7] = _mm256_sub_epi16(step2[(8 - 8)], step2[7]); } { const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]); @@ -1220,14 +604,860 @@ void FDCT32x32_2D_AVX2(const int16_t *input, const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16); const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16); // dct_const_round_shift - const __m256i s3_10_4 = _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); - const __m256i s3_10_5 = _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); - const __m256i s3_11_4 = _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); - const __m256i s3_11_5 = _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); - const __m256i s3_12_4 = _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); - const __m256i s3_12_5 = _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); - const __m256i s3_13_4 = _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); - const __m256i s3_13_5 = _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); + const __m256i s3_10_4 = + _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); + const __m256i s3_10_5 = + _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); + const __m256i s3_11_4 = + _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); + const __m256i s3_11_5 = + _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); + const __m256i s3_12_4 = + _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); + const __m256i s3_12_5 = + _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); + const __m256i s3_13_4 = + _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); + const __m256i s3_13_5 = + _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); + const __m256i s3_10_6 = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS); + const __m256i s3_10_7 = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS); + const __m256i s3_11_6 = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS); + const __m256i s3_11_7 = _mm256_srai_epi32(s3_11_5, DCT_CONST_BITS); + const __m256i s3_12_6 = _mm256_srai_epi32(s3_12_4, DCT_CONST_BITS); + const __m256i s3_12_7 = _mm256_srai_epi32(s3_12_5, DCT_CONST_BITS); + const __m256i s3_13_6 = _mm256_srai_epi32(s3_13_4, DCT_CONST_BITS); + const __m256i s3_13_7 = _mm256_srai_epi32(s3_13_5, DCT_CONST_BITS); + // Combine + step3[10] = _mm256_packs_epi32(s3_10_6, s3_10_7); + step3[11] = _mm256_packs_epi32(s3_11_6, s3_11_7); + step3[12] = _mm256_packs_epi32(s3_12_6, s3_12_7); + step3[13] = _mm256_packs_epi32(s3_13_6, s3_13_7); + } + { + step3[16] = _mm256_add_epi16(step2[23], step1[16]); + step3[17] = _mm256_add_epi16(step2[22], step1[17]); + step3[18] = _mm256_add_epi16(step2[21], step1[18]); + step3[19] = _mm256_add_epi16(step2[20], step1[19]); + step3[20] = _mm256_sub_epi16(step1[19], step2[20]); + step3[21] = _mm256_sub_epi16(step1[18], step2[21]); + step3[22] = _mm256_sub_epi16(step1[17], step2[22]); + step3[23] = _mm256_sub_epi16(step1[16], step2[23]); + step3[24] = _mm256_sub_epi16(step1[31], step2[24]); + step3[25] = _mm256_sub_epi16(step1[30], step2[25]); + step3[26] = _mm256_sub_epi16(step1[29], step2[26]); + step3[27] = _mm256_sub_epi16(step1[28], step2[27]); + step3[28] = _mm256_add_epi16(step2[27], step1[28]); + step3[29] = _mm256_add_epi16(step2[26], step1[29]); + step3[30] = _mm256_add_epi16(step2[25], step1[30]); + step3[31] = _mm256_add_epi16(step2[24], step1[31]); + } + + // Stage 4 + { + step1[0] = _mm256_add_epi16(step3[3], step3[0]); + step1[1] = _mm256_add_epi16(step3[2], step3[1]); + step1[2] = _mm256_sub_epi16(step3[1], step3[2]); + step1[3] = _mm256_sub_epi16(step3[0], step3[3]); + step1[8] = _mm256_add_epi16(step3[11], step2[8]); + step1[9] = _mm256_add_epi16(step3[10], step2[9]); + step1[10] = _mm256_sub_epi16(step2[9], step3[10]); + step1[11] = _mm256_sub_epi16(step2[8], step3[11]); + step1[12] = _mm256_sub_epi16(step2[15], step3[12]); + step1[13] = _mm256_sub_epi16(step2[14], step3[13]); + step1[14] = _mm256_add_epi16(step3[13], step2[14]); + step1[15] = _mm256_add_epi16(step3[12], step2[15]); + } + { + const __m256i s1_05_0 = _mm256_unpacklo_epi16(step3[6], step3[5]); + const __m256i s1_05_1 = _mm256_unpackhi_epi16(step3[6], step3[5]); + const __m256i s1_05_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_m16); + const __m256i s1_05_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_m16); + const __m256i s1_06_2 = _mm256_madd_epi16(s1_05_0, k__cospi_p16_p16); + const __m256i s1_06_3 = _mm256_madd_epi16(s1_05_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s1_05_4 = + _mm256_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); + const __m256i s1_05_5 = + _mm256_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); + const __m256i s1_06_4 = + _mm256_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); + const __m256i s1_06_5 = + _mm256_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); + const __m256i s1_05_6 = _mm256_srai_epi32(s1_05_4, DCT_CONST_BITS); + const __m256i s1_05_7 = _mm256_srai_epi32(s1_05_5, DCT_CONST_BITS); + const __m256i s1_06_6 = _mm256_srai_epi32(s1_06_4, DCT_CONST_BITS); + const __m256i s1_06_7 = _mm256_srai_epi32(s1_06_5, DCT_CONST_BITS); + // Combine + step1[5] = _mm256_packs_epi32(s1_05_6, s1_05_7); + step1[6] = _mm256_packs_epi32(s1_06_6, s1_06_7); + } + { + const __m256i s1_18_0 = _mm256_unpacklo_epi16(step3[18], step3[29]); + const __m256i s1_18_1 = _mm256_unpackhi_epi16(step3[18], step3[29]); + const __m256i s1_19_0 = _mm256_unpacklo_epi16(step3[19], step3[28]); + const __m256i s1_19_1 = _mm256_unpackhi_epi16(step3[19], step3[28]); + const __m256i s1_20_0 = _mm256_unpacklo_epi16(step3[20], step3[27]); + const __m256i s1_20_1 = _mm256_unpackhi_epi16(step3[20], step3[27]); + const __m256i s1_21_0 = _mm256_unpacklo_epi16(step3[21], step3[26]); + const __m256i s1_21_1 = _mm256_unpackhi_epi16(step3[21], step3[26]); + const __m256i s1_18_2 = _mm256_madd_epi16(s1_18_0, k__cospi_m08_p24); + const __m256i s1_18_3 = _mm256_madd_epi16(s1_18_1, k__cospi_m08_p24); + const __m256i s1_19_2 = _mm256_madd_epi16(s1_19_0, k__cospi_m08_p24); + const __m256i s1_19_3 = _mm256_madd_epi16(s1_19_1, k__cospi_m08_p24); + const __m256i s1_20_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m24_m08); + const __m256i s1_20_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m24_m08); + const __m256i s1_21_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m24_m08); + const __m256i s1_21_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m24_m08); + const __m256i s1_26_2 = _mm256_madd_epi16(s1_21_0, k__cospi_m08_p24); + const __m256i s1_26_3 = _mm256_madd_epi16(s1_21_1, k__cospi_m08_p24); + const __m256i s1_27_2 = _mm256_madd_epi16(s1_20_0, k__cospi_m08_p24); + const __m256i s1_27_3 = _mm256_madd_epi16(s1_20_1, k__cospi_m08_p24); + const __m256i s1_28_2 = _mm256_madd_epi16(s1_19_0, k__cospi_p24_p08); + const __m256i s1_28_3 = _mm256_madd_epi16(s1_19_1, k__cospi_p24_p08); + const __m256i s1_29_2 = _mm256_madd_epi16(s1_18_0, k__cospi_p24_p08); + const __m256i s1_29_3 = _mm256_madd_epi16(s1_18_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m256i s1_18_4 = + _mm256_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); + const __m256i s1_18_5 = + _mm256_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); + const __m256i s1_19_4 = + _mm256_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); + const __m256i s1_19_5 = + _mm256_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); + const __m256i s1_20_4 = + _mm256_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); + const __m256i s1_20_5 = + _mm256_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); + const __m256i s1_21_4 = + _mm256_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); + const __m256i s1_21_5 = + _mm256_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); + const __m256i s1_26_4 = + _mm256_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); + const __m256i s1_26_5 = + _mm256_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); + const __m256i s1_27_4 = + _mm256_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); + const __m256i s1_27_5 = + _mm256_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); + const __m256i s1_28_4 = + _mm256_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); + const __m256i s1_28_5 = + _mm256_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); + const __m256i s1_29_4 = + _mm256_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); + const __m256i s1_29_5 = + _mm256_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); + const __m256i s1_18_6 = _mm256_srai_epi32(s1_18_4, DCT_CONST_BITS); + const __m256i s1_18_7 = _mm256_srai_epi32(s1_18_5, DCT_CONST_BITS); + const __m256i s1_19_6 = _mm256_srai_epi32(s1_19_4, DCT_CONST_BITS); + const __m256i s1_19_7 = _mm256_srai_epi32(s1_19_5, DCT_CONST_BITS); + const __m256i s1_20_6 = _mm256_srai_epi32(s1_20_4, DCT_CONST_BITS); + const __m256i s1_20_7 = _mm256_srai_epi32(s1_20_5, DCT_CONST_BITS); + const __m256i s1_21_6 = _mm256_srai_epi32(s1_21_4, DCT_CONST_BITS); + const __m256i s1_21_7 = _mm256_srai_epi32(s1_21_5, DCT_CONST_BITS); + const __m256i s1_26_6 = _mm256_srai_epi32(s1_26_4, DCT_CONST_BITS); + const __m256i s1_26_7 = _mm256_srai_epi32(s1_26_5, DCT_CONST_BITS); + const __m256i s1_27_6 = _mm256_srai_epi32(s1_27_4, DCT_CONST_BITS); + const __m256i s1_27_7 = _mm256_srai_epi32(s1_27_5, DCT_CONST_BITS); + const __m256i s1_28_6 = _mm256_srai_epi32(s1_28_4, DCT_CONST_BITS); + const __m256i s1_28_7 = _mm256_srai_epi32(s1_28_5, DCT_CONST_BITS); + const __m256i s1_29_6 = _mm256_srai_epi32(s1_29_4, DCT_CONST_BITS); + const __m256i s1_29_7 = _mm256_srai_epi32(s1_29_5, DCT_CONST_BITS); + // Combine + step1[18] = _mm256_packs_epi32(s1_18_6, s1_18_7); + step1[19] = _mm256_packs_epi32(s1_19_6, s1_19_7); + step1[20] = _mm256_packs_epi32(s1_20_6, s1_20_7); + step1[21] = _mm256_packs_epi32(s1_21_6, s1_21_7); + step1[26] = _mm256_packs_epi32(s1_26_6, s1_26_7); + step1[27] = _mm256_packs_epi32(s1_27_6, s1_27_7); + step1[28] = _mm256_packs_epi32(s1_28_6, s1_28_7); + step1[29] = _mm256_packs_epi32(s1_29_6, s1_29_7); + } + // Stage 5 + { + step2[4] = _mm256_add_epi16(step1[5], step3[4]); + step2[5] = _mm256_sub_epi16(step3[4], step1[5]); + step2[6] = _mm256_sub_epi16(step3[7], step1[6]); + step2[7] = _mm256_add_epi16(step1[6], step3[7]); + } + { + const __m256i out_00_0 = _mm256_unpacklo_epi16(step1[0], step1[1]); + const __m256i out_00_1 = _mm256_unpackhi_epi16(step1[0], step1[1]); + const __m256i out_08_0 = _mm256_unpacklo_epi16(step1[2], step1[3]); + const __m256i out_08_1 = _mm256_unpackhi_epi16(step1[2], step1[3]); + const __m256i out_00_2 = + _mm256_madd_epi16(out_00_0, k__cospi_p16_p16); + const __m256i out_00_3 = + _mm256_madd_epi16(out_00_1, k__cospi_p16_p16); + const __m256i out_16_2 = + _mm256_madd_epi16(out_00_0, k__cospi_p16_m16); + const __m256i out_16_3 = + _mm256_madd_epi16(out_00_1, k__cospi_p16_m16); + const __m256i out_08_2 = + _mm256_madd_epi16(out_08_0, k__cospi_p24_p08); + const __m256i out_08_3 = + _mm256_madd_epi16(out_08_1, k__cospi_p24_p08); + const __m256i out_24_2 = + _mm256_madd_epi16(out_08_0, k__cospi_m08_p24); + const __m256i out_24_3 = + _mm256_madd_epi16(out_08_1, k__cospi_m08_p24); + // dct_const_round_shift + const __m256i out_00_4 = + _mm256_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); + const __m256i out_00_5 = + _mm256_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); + const __m256i out_16_4 = + _mm256_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); + const __m256i out_16_5 = + _mm256_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); + const __m256i out_08_4 = + _mm256_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); + const __m256i out_08_5 = + _mm256_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); + const __m256i out_24_4 = + _mm256_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); + const __m256i out_24_5 = + _mm256_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); + const __m256i out_00_6 = _mm256_srai_epi32(out_00_4, DCT_CONST_BITS); + const __m256i out_00_7 = _mm256_srai_epi32(out_00_5, DCT_CONST_BITS); + const __m256i out_16_6 = _mm256_srai_epi32(out_16_4, DCT_CONST_BITS); + const __m256i out_16_7 = _mm256_srai_epi32(out_16_5, DCT_CONST_BITS); + const __m256i out_08_6 = _mm256_srai_epi32(out_08_4, DCT_CONST_BITS); + const __m256i out_08_7 = _mm256_srai_epi32(out_08_5, DCT_CONST_BITS); + const __m256i out_24_6 = _mm256_srai_epi32(out_24_4, DCT_CONST_BITS); + const __m256i out_24_7 = _mm256_srai_epi32(out_24_5, DCT_CONST_BITS); + // Combine + out[0] = _mm256_packs_epi32(out_00_6, out_00_7); + out[16] = _mm256_packs_epi32(out_16_6, out_16_7); + out[8] = _mm256_packs_epi32(out_08_6, out_08_7); + out[24] = _mm256_packs_epi32(out_24_6, out_24_7); + } + { + const __m256i s2_09_0 = _mm256_unpacklo_epi16(step1[9], step1[14]); + const __m256i s2_09_1 = _mm256_unpackhi_epi16(step1[9], step1[14]); + const __m256i s2_10_0 = _mm256_unpacklo_epi16(step1[10], step1[13]); + const __m256i s2_10_1 = _mm256_unpackhi_epi16(step1[10], step1[13]); + const __m256i s2_09_2 = _mm256_madd_epi16(s2_09_0, k__cospi_m08_p24); + const __m256i s2_09_3 = _mm256_madd_epi16(s2_09_1, k__cospi_m08_p24); + const __m256i s2_10_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m24_m08); + const __m256i s2_10_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m24_m08); + const __m256i s2_13_2 = _mm256_madd_epi16(s2_10_0, k__cospi_m08_p24); + const __m256i s2_13_3 = _mm256_madd_epi16(s2_10_1, k__cospi_m08_p24); + const __m256i s2_14_2 = _mm256_madd_epi16(s2_09_0, k__cospi_p24_p08); + const __m256i s2_14_3 = _mm256_madd_epi16(s2_09_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m256i s2_09_4 = + _mm256_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); + const __m256i s2_09_5 = + _mm256_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); + const __m256i s2_10_4 = + _mm256_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); + const __m256i s2_10_5 = + _mm256_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); + const __m256i s2_13_4 = + _mm256_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); + const __m256i s2_13_5 = + _mm256_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); + const __m256i s2_14_4 = + _mm256_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); + const __m256i s2_14_5 = + _mm256_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); + const __m256i s2_09_6 = _mm256_srai_epi32(s2_09_4, DCT_CONST_BITS); + const __m256i s2_09_7 = _mm256_srai_epi32(s2_09_5, DCT_CONST_BITS); + const __m256i s2_10_6 = _mm256_srai_epi32(s2_10_4, DCT_CONST_BITS); + const __m256i s2_10_7 = _mm256_srai_epi32(s2_10_5, DCT_CONST_BITS); + const __m256i s2_13_6 = _mm256_srai_epi32(s2_13_4, DCT_CONST_BITS); + const __m256i s2_13_7 = _mm256_srai_epi32(s2_13_5, DCT_CONST_BITS); + const __m256i s2_14_6 = _mm256_srai_epi32(s2_14_4, DCT_CONST_BITS); + const __m256i s2_14_7 = _mm256_srai_epi32(s2_14_5, DCT_CONST_BITS); + // Combine + step2[9] = _mm256_packs_epi32(s2_09_6, s2_09_7); + step2[10] = _mm256_packs_epi32(s2_10_6, s2_10_7); + step2[13] = _mm256_packs_epi32(s2_13_6, s2_13_7); + step2[14] = _mm256_packs_epi32(s2_14_6, s2_14_7); + } + { + step2[16] = _mm256_add_epi16(step1[19], step3[16]); + step2[17] = _mm256_add_epi16(step1[18], step3[17]); + step2[18] = _mm256_sub_epi16(step3[17], step1[18]); + step2[19] = _mm256_sub_epi16(step3[16], step1[19]); + step2[20] = _mm256_sub_epi16(step3[23], step1[20]); + step2[21] = _mm256_sub_epi16(step3[22], step1[21]); + step2[22] = _mm256_add_epi16(step1[21], step3[22]); + step2[23] = _mm256_add_epi16(step1[20], step3[23]); + step2[24] = _mm256_add_epi16(step1[27], step3[24]); + step2[25] = _mm256_add_epi16(step1[26], step3[25]); + step2[26] = _mm256_sub_epi16(step3[25], step1[26]); + step2[27] = _mm256_sub_epi16(step3[24], step1[27]); + step2[28] = _mm256_sub_epi16(step3[31], step1[28]); + step2[29] = _mm256_sub_epi16(step3[30], step1[29]); + step2[30] = _mm256_add_epi16(step1[29], step3[30]); + step2[31] = _mm256_add_epi16(step1[28], step3[31]); + } + // Stage 6 + { + const __m256i out_04_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); + const __m256i out_04_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); + const __m256i out_20_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); + const __m256i out_20_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); + const __m256i out_12_0 = _mm256_unpacklo_epi16(step2[5], step2[6]); + const __m256i out_12_1 = _mm256_unpackhi_epi16(step2[5], step2[6]); + const __m256i out_28_0 = _mm256_unpacklo_epi16(step2[4], step2[7]); + const __m256i out_28_1 = _mm256_unpackhi_epi16(step2[4], step2[7]); + const __m256i out_04_2 = + _mm256_madd_epi16(out_04_0, k__cospi_p28_p04); + const __m256i out_04_3 = + _mm256_madd_epi16(out_04_1, k__cospi_p28_p04); + const __m256i out_20_2 = + _mm256_madd_epi16(out_20_0, k__cospi_p12_p20); + const __m256i out_20_3 = + _mm256_madd_epi16(out_20_1, k__cospi_p12_p20); + const __m256i out_12_2 = + _mm256_madd_epi16(out_12_0, k__cospi_m20_p12); + const __m256i out_12_3 = + _mm256_madd_epi16(out_12_1, k__cospi_m20_p12); + const __m256i out_28_2 = + _mm256_madd_epi16(out_28_0, k__cospi_m04_p28); + const __m256i out_28_3 = + _mm256_madd_epi16(out_28_1, k__cospi_m04_p28); + // dct_const_round_shift + const __m256i out_04_4 = + _mm256_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); + const __m256i out_04_5 = + _mm256_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); + const __m256i out_20_4 = + _mm256_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); + const __m256i out_20_5 = + _mm256_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); + const __m256i out_12_4 = + _mm256_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); + const __m256i out_12_5 = + _mm256_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); + const __m256i out_28_4 = + _mm256_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); + const __m256i out_28_5 = + _mm256_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); + const __m256i out_04_6 = _mm256_srai_epi32(out_04_4, DCT_CONST_BITS); + const __m256i out_04_7 = _mm256_srai_epi32(out_04_5, DCT_CONST_BITS); + const __m256i out_20_6 = _mm256_srai_epi32(out_20_4, DCT_CONST_BITS); + const __m256i out_20_7 = _mm256_srai_epi32(out_20_5, DCT_CONST_BITS); + const __m256i out_12_6 = _mm256_srai_epi32(out_12_4, DCT_CONST_BITS); + const __m256i out_12_7 = _mm256_srai_epi32(out_12_5, DCT_CONST_BITS); + const __m256i out_28_6 = _mm256_srai_epi32(out_28_4, DCT_CONST_BITS); + const __m256i out_28_7 = _mm256_srai_epi32(out_28_5, DCT_CONST_BITS); + // Combine + out[4] = _mm256_packs_epi32(out_04_6, out_04_7); + out[20] = _mm256_packs_epi32(out_20_6, out_20_7); + out[12] = _mm256_packs_epi32(out_12_6, out_12_7); + out[28] = _mm256_packs_epi32(out_28_6, out_28_7); + } + { + step3[8] = _mm256_add_epi16(step2[9], step1[8]); + step3[9] = _mm256_sub_epi16(step1[8], step2[9]); + step3[10] = _mm256_sub_epi16(step1[11], step2[10]); + step3[11] = _mm256_add_epi16(step2[10], step1[11]); + step3[12] = _mm256_add_epi16(step2[13], step1[12]); + step3[13] = _mm256_sub_epi16(step1[12], step2[13]); + step3[14] = _mm256_sub_epi16(step1[15], step2[14]); + step3[15] = _mm256_add_epi16(step2[14], step1[15]); + } + { + const __m256i s3_17_0 = _mm256_unpacklo_epi16(step2[17], step2[30]); + const __m256i s3_17_1 = _mm256_unpackhi_epi16(step2[17], step2[30]); + const __m256i s3_18_0 = _mm256_unpacklo_epi16(step2[18], step2[29]); + const __m256i s3_18_1 = _mm256_unpackhi_epi16(step2[18], step2[29]); + const __m256i s3_21_0 = _mm256_unpacklo_epi16(step2[21], step2[26]); + const __m256i s3_21_1 = _mm256_unpackhi_epi16(step2[21], step2[26]); + const __m256i s3_22_0 = _mm256_unpacklo_epi16(step2[22], step2[25]); + const __m256i s3_22_1 = _mm256_unpackhi_epi16(step2[22], step2[25]); + const __m256i s3_17_2 = _mm256_madd_epi16(s3_17_0, k__cospi_m04_p28); + const __m256i s3_17_3 = _mm256_madd_epi16(s3_17_1, k__cospi_m04_p28); + const __m256i s3_18_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m28_m04); + const __m256i s3_18_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m28_m04); + const __m256i s3_21_2 = _mm256_madd_epi16(s3_21_0, k__cospi_m20_p12); + const __m256i s3_21_3 = _mm256_madd_epi16(s3_21_1, k__cospi_m20_p12); + const __m256i s3_22_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m12_m20); + const __m256i s3_22_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m12_m20); + const __m256i s3_25_2 = _mm256_madd_epi16(s3_22_0, k__cospi_m20_p12); + const __m256i s3_25_3 = _mm256_madd_epi16(s3_22_1, k__cospi_m20_p12); + const __m256i s3_26_2 = _mm256_madd_epi16(s3_21_0, k__cospi_p12_p20); + const __m256i s3_26_3 = _mm256_madd_epi16(s3_21_1, k__cospi_p12_p20); + const __m256i s3_29_2 = _mm256_madd_epi16(s3_18_0, k__cospi_m04_p28); + const __m256i s3_29_3 = _mm256_madd_epi16(s3_18_1, k__cospi_m04_p28); + const __m256i s3_30_2 = _mm256_madd_epi16(s3_17_0, k__cospi_p28_p04); + const __m256i s3_30_3 = _mm256_madd_epi16(s3_17_1, k__cospi_p28_p04); + // dct_const_round_shift + const __m256i s3_17_4 = + _mm256_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); + const __m256i s3_17_5 = + _mm256_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); + const __m256i s3_18_4 = + _mm256_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); + const __m256i s3_18_5 = + _mm256_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); + const __m256i s3_21_4 = + _mm256_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); + const __m256i s3_21_5 = + _mm256_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); + const __m256i s3_22_4 = + _mm256_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); + const __m256i s3_22_5 = + _mm256_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); + const __m256i s3_17_6 = _mm256_srai_epi32(s3_17_4, DCT_CONST_BITS); + const __m256i s3_17_7 = _mm256_srai_epi32(s3_17_5, DCT_CONST_BITS); + const __m256i s3_18_6 = _mm256_srai_epi32(s3_18_4, DCT_CONST_BITS); + const __m256i s3_18_7 = _mm256_srai_epi32(s3_18_5, DCT_CONST_BITS); + const __m256i s3_21_6 = _mm256_srai_epi32(s3_21_4, DCT_CONST_BITS); + const __m256i s3_21_7 = _mm256_srai_epi32(s3_21_5, DCT_CONST_BITS); + const __m256i s3_22_6 = _mm256_srai_epi32(s3_22_4, DCT_CONST_BITS); + const __m256i s3_22_7 = _mm256_srai_epi32(s3_22_5, DCT_CONST_BITS); + const __m256i s3_25_4 = + _mm256_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); + const __m256i s3_25_5 = + _mm256_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); + const __m256i s3_26_4 = + _mm256_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); + const __m256i s3_26_5 = + _mm256_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); + const __m256i s3_29_4 = + _mm256_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); + const __m256i s3_29_5 = + _mm256_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); + const __m256i s3_30_4 = + _mm256_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); + const __m256i s3_30_5 = + _mm256_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); + const __m256i s3_25_6 = _mm256_srai_epi32(s3_25_4, DCT_CONST_BITS); + const __m256i s3_25_7 = _mm256_srai_epi32(s3_25_5, DCT_CONST_BITS); + const __m256i s3_26_6 = _mm256_srai_epi32(s3_26_4, DCT_CONST_BITS); + const __m256i s3_26_7 = _mm256_srai_epi32(s3_26_5, DCT_CONST_BITS); + const __m256i s3_29_6 = _mm256_srai_epi32(s3_29_4, DCT_CONST_BITS); + const __m256i s3_29_7 = _mm256_srai_epi32(s3_29_5, DCT_CONST_BITS); + const __m256i s3_30_6 = _mm256_srai_epi32(s3_30_4, DCT_CONST_BITS); + const __m256i s3_30_7 = _mm256_srai_epi32(s3_30_5, DCT_CONST_BITS); + // Combine + step3[17] = _mm256_packs_epi32(s3_17_6, s3_17_7); + step3[18] = _mm256_packs_epi32(s3_18_6, s3_18_7); + step3[21] = _mm256_packs_epi32(s3_21_6, s3_21_7); + step3[22] = _mm256_packs_epi32(s3_22_6, s3_22_7); + // Combine + step3[25] = _mm256_packs_epi32(s3_25_6, s3_25_7); + step3[26] = _mm256_packs_epi32(s3_26_6, s3_26_7); + step3[29] = _mm256_packs_epi32(s3_29_6, s3_29_7); + step3[30] = _mm256_packs_epi32(s3_30_6, s3_30_7); + } + // Stage 7 + { + const __m256i out_02_0 = _mm256_unpacklo_epi16(step3[8], step3[15]); + const __m256i out_02_1 = _mm256_unpackhi_epi16(step3[8], step3[15]); + const __m256i out_18_0 = _mm256_unpacklo_epi16(step3[9], step3[14]); + const __m256i out_18_1 = _mm256_unpackhi_epi16(step3[9], step3[14]); + const __m256i out_10_0 = _mm256_unpacklo_epi16(step3[10], step3[13]); + const __m256i out_10_1 = _mm256_unpackhi_epi16(step3[10], step3[13]); + const __m256i out_26_0 = _mm256_unpacklo_epi16(step3[11], step3[12]); + const __m256i out_26_1 = _mm256_unpackhi_epi16(step3[11], step3[12]); + const __m256i out_02_2 = + _mm256_madd_epi16(out_02_0, k__cospi_p30_p02); + const __m256i out_02_3 = + _mm256_madd_epi16(out_02_1, k__cospi_p30_p02); + const __m256i out_18_2 = + _mm256_madd_epi16(out_18_0, k__cospi_p14_p18); + const __m256i out_18_3 = + _mm256_madd_epi16(out_18_1, k__cospi_p14_p18); + const __m256i out_10_2 = + _mm256_madd_epi16(out_10_0, k__cospi_p22_p10); + const __m256i out_10_3 = + _mm256_madd_epi16(out_10_1, k__cospi_p22_p10); + const __m256i out_26_2 = + _mm256_madd_epi16(out_26_0, k__cospi_p06_p26); + const __m256i out_26_3 = + _mm256_madd_epi16(out_26_1, k__cospi_p06_p26); + const __m256i out_06_2 = + _mm256_madd_epi16(out_26_0, k__cospi_m26_p06); + const __m256i out_06_3 = + _mm256_madd_epi16(out_26_1, k__cospi_m26_p06); + const __m256i out_22_2 = + _mm256_madd_epi16(out_10_0, k__cospi_m10_p22); + const __m256i out_22_3 = + _mm256_madd_epi16(out_10_1, k__cospi_m10_p22); + const __m256i out_14_2 = + _mm256_madd_epi16(out_18_0, k__cospi_m18_p14); + const __m256i out_14_3 = + _mm256_madd_epi16(out_18_1, k__cospi_m18_p14); + const __m256i out_30_2 = + _mm256_madd_epi16(out_02_0, k__cospi_m02_p30); + const __m256i out_30_3 = + _mm256_madd_epi16(out_02_1, k__cospi_m02_p30); + // dct_const_round_shift + const __m256i out_02_4 = + _mm256_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); + const __m256i out_02_5 = + _mm256_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); + const __m256i out_18_4 = + _mm256_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); + const __m256i out_18_5 = + _mm256_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); + const __m256i out_10_4 = + _mm256_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); + const __m256i out_10_5 = + _mm256_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); + const __m256i out_26_4 = + _mm256_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); + const __m256i out_26_5 = + _mm256_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); + const __m256i out_06_4 = + _mm256_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); + const __m256i out_06_5 = + _mm256_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); + const __m256i out_22_4 = + _mm256_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); + const __m256i out_22_5 = + _mm256_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); + const __m256i out_14_4 = + _mm256_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); + const __m256i out_14_5 = + _mm256_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); + const __m256i out_30_4 = + _mm256_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); + const __m256i out_30_5 = + _mm256_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); + const __m256i out_02_6 = _mm256_srai_epi32(out_02_4, DCT_CONST_BITS); + const __m256i out_02_7 = _mm256_srai_epi32(out_02_5, DCT_CONST_BITS); + const __m256i out_18_6 = _mm256_srai_epi32(out_18_4, DCT_CONST_BITS); + const __m256i out_18_7 = _mm256_srai_epi32(out_18_5, DCT_CONST_BITS); + const __m256i out_10_6 = _mm256_srai_epi32(out_10_4, DCT_CONST_BITS); + const __m256i out_10_7 = _mm256_srai_epi32(out_10_5, DCT_CONST_BITS); + const __m256i out_26_6 = _mm256_srai_epi32(out_26_4, DCT_CONST_BITS); + const __m256i out_26_7 = _mm256_srai_epi32(out_26_5, DCT_CONST_BITS); + const __m256i out_06_6 = _mm256_srai_epi32(out_06_4, DCT_CONST_BITS); + const __m256i out_06_7 = _mm256_srai_epi32(out_06_5, DCT_CONST_BITS); + const __m256i out_22_6 = _mm256_srai_epi32(out_22_4, DCT_CONST_BITS); + const __m256i out_22_7 = _mm256_srai_epi32(out_22_5, DCT_CONST_BITS); + const __m256i out_14_6 = _mm256_srai_epi32(out_14_4, DCT_CONST_BITS); + const __m256i out_14_7 = _mm256_srai_epi32(out_14_5, DCT_CONST_BITS); + const __m256i out_30_6 = _mm256_srai_epi32(out_30_4, DCT_CONST_BITS); + const __m256i out_30_7 = _mm256_srai_epi32(out_30_5, DCT_CONST_BITS); + // Combine + out[2] = _mm256_packs_epi32(out_02_6, out_02_7); + out[18] = _mm256_packs_epi32(out_18_6, out_18_7); + out[10] = _mm256_packs_epi32(out_10_6, out_10_7); + out[26] = _mm256_packs_epi32(out_26_6, out_26_7); + out[6] = _mm256_packs_epi32(out_06_6, out_06_7); + out[22] = _mm256_packs_epi32(out_22_6, out_22_7); + out[14] = _mm256_packs_epi32(out_14_6, out_14_7); + out[30] = _mm256_packs_epi32(out_30_6, out_30_7); + } + { + step1[16] = _mm256_add_epi16(step3[17], step2[16]); + step1[17] = _mm256_sub_epi16(step2[16], step3[17]); + step1[18] = _mm256_sub_epi16(step2[19], step3[18]); + step1[19] = _mm256_add_epi16(step3[18], step2[19]); + step1[20] = _mm256_add_epi16(step3[21], step2[20]); + step1[21] = _mm256_sub_epi16(step2[20], step3[21]); + step1[22] = _mm256_sub_epi16(step2[23], step3[22]); + step1[23] = _mm256_add_epi16(step3[22], step2[23]); + step1[24] = _mm256_add_epi16(step3[25], step2[24]); + step1[25] = _mm256_sub_epi16(step2[24], step3[25]); + step1[26] = _mm256_sub_epi16(step2[27], step3[26]); + step1[27] = _mm256_add_epi16(step3[26], step2[27]); + step1[28] = _mm256_add_epi16(step3[29], step2[28]); + step1[29] = _mm256_sub_epi16(step2[28], step3[29]); + step1[30] = _mm256_sub_epi16(step2[31], step3[30]); + step1[31] = _mm256_add_epi16(step3[30], step2[31]); + } + // Final stage --- outputs indices are bit-reversed. + { + const __m256i out_01_0 = _mm256_unpacklo_epi16(step1[16], step1[31]); + const __m256i out_01_1 = _mm256_unpackhi_epi16(step1[16], step1[31]); + const __m256i out_17_0 = _mm256_unpacklo_epi16(step1[17], step1[30]); + const __m256i out_17_1 = _mm256_unpackhi_epi16(step1[17], step1[30]); + const __m256i out_09_0 = _mm256_unpacklo_epi16(step1[18], step1[29]); + const __m256i out_09_1 = _mm256_unpackhi_epi16(step1[18], step1[29]); + const __m256i out_25_0 = _mm256_unpacklo_epi16(step1[19], step1[28]); + const __m256i out_25_1 = _mm256_unpackhi_epi16(step1[19], step1[28]); + const __m256i out_01_2 = + _mm256_madd_epi16(out_01_0, k__cospi_p31_p01); + const __m256i out_01_3 = + _mm256_madd_epi16(out_01_1, k__cospi_p31_p01); + const __m256i out_17_2 = + _mm256_madd_epi16(out_17_0, k__cospi_p15_p17); + const __m256i out_17_3 = + _mm256_madd_epi16(out_17_1, k__cospi_p15_p17); + const __m256i out_09_2 = + _mm256_madd_epi16(out_09_0, k__cospi_p23_p09); + const __m256i out_09_3 = + _mm256_madd_epi16(out_09_1, k__cospi_p23_p09); + const __m256i out_25_2 = + _mm256_madd_epi16(out_25_0, k__cospi_p07_p25); + const __m256i out_25_3 = + _mm256_madd_epi16(out_25_1, k__cospi_p07_p25); + const __m256i out_07_2 = + _mm256_madd_epi16(out_25_0, k__cospi_m25_p07); + const __m256i out_07_3 = + _mm256_madd_epi16(out_25_1, k__cospi_m25_p07); + const __m256i out_23_2 = + _mm256_madd_epi16(out_09_0, k__cospi_m09_p23); + const __m256i out_23_3 = + _mm256_madd_epi16(out_09_1, k__cospi_m09_p23); + const __m256i out_15_2 = + _mm256_madd_epi16(out_17_0, k__cospi_m17_p15); + const __m256i out_15_3 = + _mm256_madd_epi16(out_17_1, k__cospi_m17_p15); + const __m256i out_31_2 = + _mm256_madd_epi16(out_01_0, k__cospi_m01_p31); + const __m256i out_31_3 = + _mm256_madd_epi16(out_01_1, k__cospi_m01_p31); + // dct_const_round_shift + const __m256i out_01_4 = + _mm256_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); + const __m256i out_01_5 = + _mm256_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); + const __m256i out_17_4 = + _mm256_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); + const __m256i out_17_5 = + _mm256_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); + const __m256i out_09_4 = + _mm256_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); + const __m256i out_09_5 = + _mm256_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); + const __m256i out_25_4 = + _mm256_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); + const __m256i out_25_5 = + _mm256_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); + const __m256i out_07_4 = + _mm256_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); + const __m256i out_07_5 = + _mm256_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); + const __m256i out_23_4 = + _mm256_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); + const __m256i out_23_5 = + _mm256_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); + const __m256i out_15_4 = + _mm256_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); + const __m256i out_15_5 = + _mm256_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); + const __m256i out_31_4 = + _mm256_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); + const __m256i out_31_5 = + _mm256_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); + const __m256i out_01_6 = _mm256_srai_epi32(out_01_4, DCT_CONST_BITS); + const __m256i out_01_7 = _mm256_srai_epi32(out_01_5, DCT_CONST_BITS); + const __m256i out_17_6 = _mm256_srai_epi32(out_17_4, DCT_CONST_BITS); + const __m256i out_17_7 = _mm256_srai_epi32(out_17_5, DCT_CONST_BITS); + const __m256i out_09_6 = _mm256_srai_epi32(out_09_4, DCT_CONST_BITS); + const __m256i out_09_7 = _mm256_srai_epi32(out_09_5, DCT_CONST_BITS); + const __m256i out_25_6 = _mm256_srai_epi32(out_25_4, DCT_CONST_BITS); + const __m256i out_25_7 = _mm256_srai_epi32(out_25_5, DCT_CONST_BITS); + const __m256i out_07_6 = _mm256_srai_epi32(out_07_4, DCT_CONST_BITS); + const __m256i out_07_7 = _mm256_srai_epi32(out_07_5, DCT_CONST_BITS); + const __m256i out_23_6 = _mm256_srai_epi32(out_23_4, DCT_CONST_BITS); + const __m256i out_23_7 = _mm256_srai_epi32(out_23_5, DCT_CONST_BITS); + const __m256i out_15_6 = _mm256_srai_epi32(out_15_4, DCT_CONST_BITS); + const __m256i out_15_7 = _mm256_srai_epi32(out_15_5, DCT_CONST_BITS); + const __m256i out_31_6 = _mm256_srai_epi32(out_31_4, DCT_CONST_BITS); + const __m256i out_31_7 = _mm256_srai_epi32(out_31_5, DCT_CONST_BITS); + // Combine + out[1] = _mm256_packs_epi32(out_01_6, out_01_7); + out[17] = _mm256_packs_epi32(out_17_6, out_17_7); + out[9] = _mm256_packs_epi32(out_09_6, out_09_7); + out[25] = _mm256_packs_epi32(out_25_6, out_25_7); + out[7] = _mm256_packs_epi32(out_07_6, out_07_7); + out[23] = _mm256_packs_epi32(out_23_6, out_23_7); + out[15] = _mm256_packs_epi32(out_15_6, out_15_7); + out[31] = _mm256_packs_epi32(out_31_6, out_31_7); + } + { + const __m256i out_05_0 = _mm256_unpacklo_epi16(step1[20], step1[27]); + const __m256i out_05_1 = _mm256_unpackhi_epi16(step1[20], step1[27]); + const __m256i out_21_0 = _mm256_unpacklo_epi16(step1[21], step1[26]); + const __m256i out_21_1 = _mm256_unpackhi_epi16(step1[21], step1[26]); + const __m256i out_13_0 = _mm256_unpacklo_epi16(step1[22], step1[25]); + const __m256i out_13_1 = _mm256_unpackhi_epi16(step1[22], step1[25]); + const __m256i out_29_0 = _mm256_unpacklo_epi16(step1[23], step1[24]); + const __m256i out_29_1 = _mm256_unpackhi_epi16(step1[23], step1[24]); + const __m256i out_05_2 = + _mm256_madd_epi16(out_05_0, k__cospi_p27_p05); + const __m256i out_05_3 = + _mm256_madd_epi16(out_05_1, k__cospi_p27_p05); + const __m256i out_21_2 = + _mm256_madd_epi16(out_21_0, k__cospi_p11_p21); + const __m256i out_21_3 = + _mm256_madd_epi16(out_21_1, k__cospi_p11_p21); + const __m256i out_13_2 = + _mm256_madd_epi16(out_13_0, k__cospi_p19_p13); + const __m256i out_13_3 = + _mm256_madd_epi16(out_13_1, k__cospi_p19_p13); + const __m256i out_29_2 = + _mm256_madd_epi16(out_29_0, k__cospi_p03_p29); + const __m256i out_29_3 = + _mm256_madd_epi16(out_29_1, k__cospi_p03_p29); + const __m256i out_03_2 = + _mm256_madd_epi16(out_29_0, k__cospi_m29_p03); + const __m256i out_03_3 = + _mm256_madd_epi16(out_29_1, k__cospi_m29_p03); + const __m256i out_19_2 = + _mm256_madd_epi16(out_13_0, k__cospi_m13_p19); + const __m256i out_19_3 = + _mm256_madd_epi16(out_13_1, k__cospi_m13_p19); + const __m256i out_11_2 = + _mm256_madd_epi16(out_21_0, k__cospi_m21_p11); + const __m256i out_11_3 = + _mm256_madd_epi16(out_21_1, k__cospi_m21_p11); + const __m256i out_27_2 = + _mm256_madd_epi16(out_05_0, k__cospi_m05_p27); + const __m256i out_27_3 = + _mm256_madd_epi16(out_05_1, k__cospi_m05_p27); + // dct_const_round_shift + const __m256i out_05_4 = + _mm256_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); + const __m256i out_05_5 = + _mm256_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); + const __m256i out_21_4 = + _mm256_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); + const __m256i out_21_5 = + _mm256_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); + const __m256i out_13_4 = + _mm256_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); + const __m256i out_13_5 = + _mm256_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); + const __m256i out_29_4 = + _mm256_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); + const __m256i out_29_5 = + _mm256_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); + const __m256i out_03_4 = + _mm256_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); + const __m256i out_03_5 = + _mm256_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); + const __m256i out_19_4 = + _mm256_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); + const __m256i out_19_5 = + _mm256_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); + const __m256i out_11_4 = + _mm256_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); + const __m256i out_11_5 = + _mm256_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); + const __m256i out_27_4 = + _mm256_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); + const __m256i out_27_5 = + _mm256_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); + const __m256i out_05_6 = _mm256_srai_epi32(out_05_4, DCT_CONST_BITS); + const __m256i out_05_7 = _mm256_srai_epi32(out_05_5, DCT_CONST_BITS); + const __m256i out_21_6 = _mm256_srai_epi32(out_21_4, DCT_CONST_BITS); + const __m256i out_21_7 = _mm256_srai_epi32(out_21_5, DCT_CONST_BITS); + const __m256i out_13_6 = _mm256_srai_epi32(out_13_4, DCT_CONST_BITS); + const __m256i out_13_7 = _mm256_srai_epi32(out_13_5, DCT_CONST_BITS); + const __m256i out_29_6 = _mm256_srai_epi32(out_29_4, DCT_CONST_BITS); + const __m256i out_29_7 = _mm256_srai_epi32(out_29_5, DCT_CONST_BITS); + const __m256i out_03_6 = _mm256_srai_epi32(out_03_4, DCT_CONST_BITS); + const __m256i out_03_7 = _mm256_srai_epi32(out_03_5, DCT_CONST_BITS); + const __m256i out_19_6 = _mm256_srai_epi32(out_19_4, DCT_CONST_BITS); + const __m256i out_19_7 = _mm256_srai_epi32(out_19_5, DCT_CONST_BITS); + const __m256i out_11_6 = _mm256_srai_epi32(out_11_4, DCT_CONST_BITS); + const __m256i out_11_7 = _mm256_srai_epi32(out_11_5, DCT_CONST_BITS); + const __m256i out_27_6 = _mm256_srai_epi32(out_27_4, DCT_CONST_BITS); + const __m256i out_27_7 = _mm256_srai_epi32(out_27_5, DCT_CONST_BITS); + // Combine + out[5] = _mm256_packs_epi32(out_05_6, out_05_7); + out[21] = _mm256_packs_epi32(out_21_6, out_21_7); + out[13] = _mm256_packs_epi32(out_13_6, out_13_7); + out[29] = _mm256_packs_epi32(out_29_6, out_29_7); + out[3] = _mm256_packs_epi32(out_03_6, out_03_7); + out[19] = _mm256_packs_epi32(out_19_6, out_19_7); + out[11] = _mm256_packs_epi32(out_11_6, out_11_7); + out[27] = _mm256_packs_epi32(out_27_6, out_27_7); + } +#if FDCT32x32_HIGH_PRECISION + } else { + __m256i lstep1[64], lstep2[64], lstep3[64]; + __m256i u[32], v[32], sign[16]; + const __m256i K32One = _mm256_set_epi32(1, 1, 1, 1, 1, 1, 1, 1); + // start using 32-bit operations + // stage 3 + { + // expanding to 32-bit length priori to addition operations + lstep2[0] = _mm256_unpacklo_epi16(step2[0], kZero); + lstep2[1] = _mm256_unpackhi_epi16(step2[0], kZero); + lstep2[2] = _mm256_unpacklo_epi16(step2[1], kZero); + lstep2[3] = _mm256_unpackhi_epi16(step2[1], kZero); + lstep2[4] = _mm256_unpacklo_epi16(step2[2], kZero); + lstep2[5] = _mm256_unpackhi_epi16(step2[2], kZero); + lstep2[6] = _mm256_unpacklo_epi16(step2[3], kZero); + lstep2[7] = _mm256_unpackhi_epi16(step2[3], kZero); + lstep2[8] = _mm256_unpacklo_epi16(step2[4], kZero); + lstep2[9] = _mm256_unpackhi_epi16(step2[4], kZero); + lstep2[10] = _mm256_unpacklo_epi16(step2[5], kZero); + lstep2[11] = _mm256_unpackhi_epi16(step2[5], kZero); + lstep2[12] = _mm256_unpacklo_epi16(step2[6], kZero); + lstep2[13] = _mm256_unpackhi_epi16(step2[6], kZero); + lstep2[14] = _mm256_unpacklo_epi16(step2[7], kZero); + lstep2[15] = _mm256_unpackhi_epi16(step2[7], kZero); + lstep2[0] = _mm256_madd_epi16(lstep2[0], kOne); + lstep2[1] = _mm256_madd_epi16(lstep2[1], kOne); + lstep2[2] = _mm256_madd_epi16(lstep2[2], kOne); + lstep2[3] = _mm256_madd_epi16(lstep2[3], kOne); + lstep2[4] = _mm256_madd_epi16(lstep2[4], kOne); + lstep2[5] = _mm256_madd_epi16(lstep2[5], kOne); + lstep2[6] = _mm256_madd_epi16(lstep2[6], kOne); + lstep2[7] = _mm256_madd_epi16(lstep2[7], kOne); + lstep2[8] = _mm256_madd_epi16(lstep2[8], kOne); + lstep2[9] = _mm256_madd_epi16(lstep2[9], kOne); + lstep2[10] = _mm256_madd_epi16(lstep2[10], kOne); + lstep2[11] = _mm256_madd_epi16(lstep2[11], kOne); + lstep2[12] = _mm256_madd_epi16(lstep2[12], kOne); + lstep2[13] = _mm256_madd_epi16(lstep2[13], kOne); + lstep2[14] = _mm256_madd_epi16(lstep2[14], kOne); + lstep2[15] = _mm256_madd_epi16(lstep2[15], kOne); + + lstep3[0] = _mm256_add_epi32(lstep2[14], lstep2[0]); + lstep3[1] = _mm256_add_epi32(lstep2[15], lstep2[1]); + lstep3[2] = _mm256_add_epi32(lstep2[12], lstep2[2]); + lstep3[3] = _mm256_add_epi32(lstep2[13], lstep2[3]); + lstep3[4] = _mm256_add_epi32(lstep2[10], lstep2[4]); + lstep3[5] = _mm256_add_epi32(lstep2[11], lstep2[5]); + lstep3[6] = _mm256_add_epi32(lstep2[8], lstep2[6]); + lstep3[7] = _mm256_add_epi32(lstep2[9], lstep2[7]); + lstep3[8] = _mm256_sub_epi32(lstep2[6], lstep2[8]); + lstep3[9] = _mm256_sub_epi32(lstep2[7], lstep2[9]); + lstep3[10] = _mm256_sub_epi32(lstep2[4], lstep2[10]); + lstep3[11] = _mm256_sub_epi32(lstep2[5], lstep2[11]); + lstep3[12] = _mm256_sub_epi32(lstep2[2], lstep2[12]); + lstep3[13] = _mm256_sub_epi32(lstep2[3], lstep2[13]); + lstep3[14] = _mm256_sub_epi32(lstep2[0], lstep2[14]); + lstep3[15] = _mm256_sub_epi32(lstep2[1], lstep2[15]); + } + { + const __m256i s3_10_0 = _mm256_unpacklo_epi16(step2[13], step2[10]); + const __m256i s3_10_1 = _mm256_unpackhi_epi16(step2[13], step2[10]); + const __m256i s3_11_0 = _mm256_unpacklo_epi16(step2[12], step2[11]); + const __m256i s3_11_1 = _mm256_unpackhi_epi16(step2[12], step2[11]); + const __m256i s3_10_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_m16); + const __m256i s3_10_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_m16); + const __m256i s3_11_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_m16); + const __m256i s3_11_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_m16); + const __m256i s3_12_2 = _mm256_madd_epi16(s3_11_0, k__cospi_p16_p16); + const __m256i s3_12_3 = _mm256_madd_epi16(s3_11_1, k__cospi_p16_p16); + const __m256i s3_13_2 = _mm256_madd_epi16(s3_10_0, k__cospi_p16_p16); + const __m256i s3_13_3 = _mm256_madd_epi16(s3_10_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m256i s3_10_4 = + _mm256_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); + const __m256i s3_10_5 = + _mm256_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); + const __m256i s3_11_4 = + _mm256_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); + const __m256i s3_11_5 = + _mm256_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); + const __m256i s3_12_4 = + _mm256_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); + const __m256i s3_12_5 = + _mm256_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); + const __m256i s3_13_4 = + _mm256_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); + const __m256i s3_13_5 = + _mm256_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); lstep3[20] = _mm256_srai_epi32(s3_10_4, DCT_CONST_BITS); lstep3[21] = _mm256_srai_epi32(s3_10_5, DCT_CONST_BITS); lstep3[22] = _mm256_srai_epi32(s3_11_4, DCT_CONST_BITS); @@ -1342,10 +1572,10 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // stage 4 { // expanding to 32-bit length priori to addition operations - lstep2[16] = _mm256_unpacklo_epi16(step2[ 8], kZero); - lstep2[17] = _mm256_unpackhi_epi16(step2[ 8], kZero); - lstep2[18] = _mm256_unpacklo_epi16(step2[ 9], kZero); - lstep2[19] = _mm256_unpackhi_epi16(step2[ 9], kZero); + lstep2[16] = _mm256_unpacklo_epi16(step2[8], kZero); + lstep2[17] = _mm256_unpackhi_epi16(step2[8], kZero); + lstep2[18] = _mm256_unpacklo_epi16(step2[9], kZero); + lstep2[19] = _mm256_unpackhi_epi16(step2[9], kZero); lstep2[28] = _mm256_unpacklo_epi16(step2[14], kZero); lstep2[29] = _mm256_unpackhi_epi16(step2[14], kZero); lstep2[30] = _mm256_unpacklo_epi16(step2[15], kZero); @@ -1359,14 +1589,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input, lstep2[30] = _mm256_madd_epi16(lstep2[30], kOne); lstep2[31] = _mm256_madd_epi16(lstep2[31], kOne); - lstep1[ 0] = _mm256_add_epi32(lstep3[ 6], lstep3[ 0]); - lstep1[ 1] = _mm256_add_epi32(lstep3[ 7], lstep3[ 1]); - lstep1[ 2] = _mm256_add_epi32(lstep3[ 4], lstep3[ 2]); - lstep1[ 3] = _mm256_add_epi32(lstep3[ 5], lstep3[ 3]); - lstep1[ 4] = _mm256_sub_epi32(lstep3[ 2], lstep3[ 4]); - lstep1[ 5] = _mm256_sub_epi32(lstep3[ 3], lstep3[ 5]); - lstep1[ 6] = _mm256_sub_epi32(lstep3[ 0], lstep3[ 6]); - lstep1[ 7] = _mm256_sub_epi32(lstep3[ 1], lstep3[ 7]); + lstep1[0] = _mm256_add_epi32(lstep3[6], lstep3[0]); + lstep1[1] = _mm256_add_epi32(lstep3[7], lstep3[1]); + lstep1[2] = _mm256_add_epi32(lstep3[4], lstep3[2]); + lstep1[3] = _mm256_add_epi32(lstep3[5], lstep3[3]); + lstep1[4] = _mm256_sub_epi32(lstep3[2], lstep3[4]); + lstep1[5] = _mm256_sub_epi32(lstep3[3], lstep3[5]); + lstep1[6] = _mm256_sub_epi32(lstep3[0], lstep3[6]); + lstep1[7] = _mm256_sub_epi32(lstep3[1], lstep3[7]); lstep1[16] = _mm256_add_epi32(lstep3[22], lstep2[16]); lstep1[17] = _mm256_add_epi32(lstep3[23], lstep2[17]); lstep1[18] = _mm256_add_epi32(lstep3[20], lstep2[18]); @@ -1385,57 +1615,62 @@ void FDCT32x32_2D_AVX2(const int16_t *input, lstep1[31] = _mm256_add_epi32(lstep3[25], lstep2[31]); } { - // to be continued... - // - const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); - const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); + // to be continued... + // + const __m256i k32_p16_p16 = + pair256_set_epi32(cospi_16_64, cospi_16_64); + const __m256i k32_p16_m16 = + pair256_set_epi32(cospi_16_64, -cospi_16_64); - u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]); - u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]); - u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]); - u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]); + u[0] = _mm256_unpacklo_epi32(lstep3[12], lstep3[10]); + u[1] = _mm256_unpackhi_epi32(lstep3[12], lstep3[10]); + u[2] = _mm256_unpacklo_epi32(lstep3[13], lstep3[11]); + u[3] = _mm256_unpackhi_epi32(lstep3[13], lstep3[11]); - // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide - // instruction latency. - v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_m16); - v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_m16); - v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_m16); - v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_m16); - v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_p16); - v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_p16); - v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_p16); - v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_p16); + // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide + // instruction latency. + v[0] = k_madd_epi32_avx2(u[0], k32_p16_m16); + v[1] = k_madd_epi32_avx2(u[1], k32_p16_m16); + v[2] = k_madd_epi32_avx2(u[2], k32_p16_m16); + v[3] = k_madd_epi32_avx2(u[3], k32_p16_m16); + v[4] = k_madd_epi32_avx2(u[0], k32_p16_p16); + v[5] = k_madd_epi32_avx2(u[1], k32_p16_p16); + v[6] = k_madd_epi32_avx2(u[2], k32_p16_p16); + v[7] = k_madd_epi32_avx2(u[3], k32_p16_p16); - u[0] = k_packs_epi64_avx2(v[0], v[1]); - u[1] = k_packs_epi64_avx2(v[2], v[3]); - u[2] = k_packs_epi64_avx2(v[4], v[5]); - u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); - v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); - lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); - lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); - lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); - lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + lstep1[10] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + lstep1[11] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + lstep1[12] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + lstep1[13] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); } { - const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); - const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64); - const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); + const __m256i k32_m08_p24 = + pair256_set_epi32(-cospi_8_64, cospi_24_64); + const __m256i k32_m24_m08 = + pair256_set_epi32(-cospi_24_64, -cospi_8_64); + const __m256i k32_p24_p08 = + pair256_set_epi32(cospi_24_64, cospi_8_64); - u[ 0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]); - u[ 1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]); - u[ 2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]); - u[ 3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]); - u[ 4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]); - u[ 5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]); - u[ 6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]); - u[ 7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]); - u[ 8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]); - u[ 9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]); + u[0] = _mm256_unpacklo_epi32(lstep3[36], lstep3[58]); + u[1] = _mm256_unpackhi_epi32(lstep3[36], lstep3[58]); + u[2] = _mm256_unpacklo_epi32(lstep3[37], lstep3[59]); + u[3] = _mm256_unpackhi_epi32(lstep3[37], lstep3[59]); + u[4] = _mm256_unpacklo_epi32(lstep3[38], lstep3[56]); + u[5] = _mm256_unpackhi_epi32(lstep3[38], lstep3[56]); + u[6] = _mm256_unpacklo_epi32(lstep3[39], lstep3[57]); + u[7] = _mm256_unpackhi_epi32(lstep3[39], lstep3[57]); + u[8] = _mm256_unpacklo_epi32(lstep3[40], lstep3[54]); + u[9] = _mm256_unpackhi_epi32(lstep3[40], lstep3[54]); u[10] = _mm256_unpacklo_epi32(lstep3[41], lstep3[55]); u[11] = _mm256_unpackhi_epi32(lstep3[41], lstep3[55]); u[12] = _mm256_unpacklo_epi32(lstep3[42], lstep3[52]); @@ -1443,16 +1678,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_unpacklo_epi32(lstep3[43], lstep3[53]); u[15] = _mm256_unpackhi_epi32(lstep3[43], lstep3[53]); - v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m08_p24); - v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m08_p24); - v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m08_p24); - v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m08_p24); - v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m08_p24); - v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m08_p24); - v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m08_p24); - v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m08_p24); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m24_m08); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m24_m08); + v[0] = k_madd_epi32_avx2(u[0], k32_m08_p24); + v[1] = k_madd_epi32_avx2(u[1], k32_m08_p24); + v[2] = k_madd_epi32_avx2(u[2], k32_m08_p24); + v[3] = k_madd_epi32_avx2(u[3], k32_m08_p24); + v[4] = k_madd_epi32_avx2(u[4], k32_m08_p24); + v[5] = k_madd_epi32_avx2(u[5], k32_m08_p24); + v[6] = k_madd_epi32_avx2(u[6], k32_m08_p24); + v[7] = k_madd_epi32_avx2(u[7], k32_m08_p24); + v[8] = k_madd_epi32_avx2(u[8], k32_m24_m08); + v[9] = k_madd_epi32_avx2(u[9], k32_m24_m08); v[10] = k_madd_epi32_avx2(u[10], k32_m24_m08); v[11] = k_madd_epi32_avx2(u[11], k32_m24_m08); v[12] = k_madd_epi32_avx2(u[12], k32_m24_m08); @@ -1463,29 +1698,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[17] = k_madd_epi32_avx2(u[13], k32_m08_p24); v[18] = k_madd_epi32_avx2(u[14], k32_m08_p24); v[19] = k_madd_epi32_avx2(u[15], k32_m08_p24); - v[20] = k_madd_epi32_avx2(u[ 8], k32_m08_p24); - v[21] = k_madd_epi32_avx2(u[ 9], k32_m08_p24); + v[20] = k_madd_epi32_avx2(u[8], k32_m08_p24); + v[21] = k_madd_epi32_avx2(u[9], k32_m08_p24); v[22] = k_madd_epi32_avx2(u[10], k32_m08_p24); v[23] = k_madd_epi32_avx2(u[11], k32_m08_p24); - v[24] = k_madd_epi32_avx2(u[ 4], k32_p24_p08); - v[25] = k_madd_epi32_avx2(u[ 5], k32_p24_p08); - v[26] = k_madd_epi32_avx2(u[ 6], k32_p24_p08); - v[27] = k_madd_epi32_avx2(u[ 7], k32_p24_p08); - v[28] = k_madd_epi32_avx2(u[ 0], k32_p24_p08); - v[29] = k_madd_epi32_avx2(u[ 1], k32_p24_p08); - v[30] = k_madd_epi32_avx2(u[ 2], k32_p24_p08); - v[31] = k_madd_epi32_avx2(u[ 3], k32_p24_p08); + v[24] = k_madd_epi32_avx2(u[4], k32_p24_p08); + v[25] = k_madd_epi32_avx2(u[5], k32_p24_p08); + v[26] = k_madd_epi32_avx2(u[6], k32_p24_p08); + v[27] = k_madd_epi32_avx2(u[7], k32_p24_p08); + v[28] = k_madd_epi32_avx2(u[0], k32_p24_p08); + v[29] = k_madd_epi32_avx2(u[1], k32_p24_p08); + v[30] = k_madd_epi32_avx2(u[2], k32_p24_p08); + v[31] = k_madd_epi32_avx2(u[3], k32_p24_p08); - u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64_avx2(v[10], v[11]); - u[ 6] = k_packs_epi64_avx2(v[12], v[13]); - u[ 7] = k_packs_epi64_avx2(v[14], v[15]); - u[ 8] = k_packs_epi64_avx2(v[16], v[17]); - u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + u[8] = k_packs_epi64_avx2(v[16], v[17]); + u[9] = k_packs_epi64_avx2(v[18], v[19]); u[10] = k_packs_epi64_avx2(v[20], v[21]); u[11] = k_packs_epi64_avx2(v[22], v[23]); u[12] = k_packs_epi64_avx2(v[24], v[25]); @@ -1493,16 +1728,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = k_packs_epi64_avx2(v[28], v[29]); u[15] = k_packs_epi64_avx2(v[30], v[31]); - v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -1510,16 +1745,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); - lstep1[36] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep1[37] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep1[38] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep1[39] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep1[40] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep1[41] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep1[42] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep1[43] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep1[52] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep1[53] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep1[36] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + lstep1[37] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + lstep1[38] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + lstep1[39] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + lstep1[40] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + lstep1[41] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + lstep1[42] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + lstep1[43] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + lstep1[52] = _mm256_srai_epi32(v[8], DCT_CONST_BITS); + lstep1[53] = _mm256_srai_epi32(v[9], DCT_CONST_BITS); lstep1[54] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); lstep1[55] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); lstep1[56] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); @@ -1529,20 +1764,24 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // stage 5 { - lstep2[ 8] = _mm256_add_epi32(lstep1[10], lstep3[ 8]); - lstep2[ 9] = _mm256_add_epi32(lstep1[11], lstep3[ 9]); - lstep2[10] = _mm256_sub_epi32(lstep3[ 8], lstep1[10]); - lstep2[11] = _mm256_sub_epi32(lstep3[ 9], lstep1[11]); + lstep2[8] = _mm256_add_epi32(lstep1[10], lstep3[8]); + lstep2[9] = _mm256_add_epi32(lstep1[11], lstep3[9]); + lstep2[10] = _mm256_sub_epi32(lstep3[8], lstep1[10]); + lstep2[11] = _mm256_sub_epi32(lstep3[9], lstep1[11]); lstep2[12] = _mm256_sub_epi32(lstep3[14], lstep1[12]); lstep2[13] = _mm256_sub_epi32(lstep3[15], lstep1[13]); lstep2[14] = _mm256_add_epi32(lstep1[12], lstep3[14]); lstep2[15] = _mm256_add_epi32(lstep1[13], lstep3[15]); } { - const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); - const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); - const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); - const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); + const __m256i k32_p16_p16 = + pair256_set_epi32(cospi_16_64, cospi_16_64); + const __m256i k32_p16_m16 = + pair256_set_epi32(cospi_16_64, -cospi_16_64); + const __m256i k32_p24_p08 = + pair256_set_epi32(cospi_24_64, cospi_8_64); + const __m256i k32_m08_p24 = + pair256_set_epi32(-cospi_8_64, cospi_24_64); u[0] = _mm256_unpacklo_epi32(lstep1[0], lstep1[2]); u[1] = _mm256_unpackhi_epi32(lstep1[0], lstep1[2]); @@ -1555,16 +1794,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // TODO(jingning): manually inline k_madd_epi32_avx2_ to further hide // instruction latency. - v[ 0] = k_madd_epi32_avx2(u[0], k32_p16_p16); - v[ 1] = k_madd_epi32_avx2(u[1], k32_p16_p16); - v[ 2] = k_madd_epi32_avx2(u[2], k32_p16_p16); - v[ 3] = k_madd_epi32_avx2(u[3], k32_p16_p16); - v[ 4] = k_madd_epi32_avx2(u[0], k32_p16_m16); - v[ 5] = k_madd_epi32_avx2(u[1], k32_p16_m16); - v[ 6] = k_madd_epi32_avx2(u[2], k32_p16_m16); - v[ 7] = k_madd_epi32_avx2(u[3], k32_p16_m16); - v[ 8] = k_madd_epi32_avx2(u[4], k32_p24_p08); - v[ 9] = k_madd_epi32_avx2(u[5], k32_p24_p08); + v[0] = k_madd_epi32_avx2(u[0], k32_p16_p16); + v[1] = k_madd_epi32_avx2(u[1], k32_p16_p16); + v[2] = k_madd_epi32_avx2(u[2], k32_p16_p16); + v[3] = k_madd_epi32_avx2(u[3], k32_p16_p16); + v[4] = k_madd_epi32_avx2(u[0], k32_p16_m16); + v[5] = k_madd_epi32_avx2(u[1], k32_p16_m16); + v[6] = k_madd_epi32_avx2(u[2], k32_p16_m16); + v[7] = k_madd_epi32_avx2(u[3], k32_p16_m16); + v[8] = k_madd_epi32_avx2(u[4], k32_p24_p08); + v[9] = k_madd_epi32_avx2(u[5], k32_p24_p08); v[10] = k_madd_epi32_avx2(u[6], k32_p24_p08); v[11] = k_madd_epi32_avx2(u[7], k32_p24_p08); v[12] = k_madd_epi32_avx2(u[4], k32_m08_p24); @@ -1599,14 +1838,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); - sign[0] = _mm256_cmpgt_epi32(kZero,u[0]); - sign[1] = _mm256_cmpgt_epi32(kZero,u[1]); - sign[2] = _mm256_cmpgt_epi32(kZero,u[2]); - sign[3] = _mm256_cmpgt_epi32(kZero,u[3]); - sign[4] = _mm256_cmpgt_epi32(kZero,u[4]); - sign[5] = _mm256_cmpgt_epi32(kZero,u[5]); - sign[6] = _mm256_cmpgt_epi32(kZero,u[6]); - sign[7] = _mm256_cmpgt_epi32(kZero,u[7]); + sign[0] = _mm256_cmpgt_epi32(kZero, u[0]); + sign[1] = _mm256_cmpgt_epi32(kZero, u[1]); + sign[2] = _mm256_cmpgt_epi32(kZero, u[2]); + sign[3] = _mm256_cmpgt_epi32(kZero, u[3]); + sign[4] = _mm256_cmpgt_epi32(kZero, u[4]); + sign[5] = _mm256_cmpgt_epi32(kZero, u[5]); + sign[6] = _mm256_cmpgt_epi32(kZero, u[6]); + sign[7] = _mm256_cmpgt_epi32(kZero, u[7]); u[0] = _mm256_sub_epi32(u[0], sign[0]); u[1] = _mm256_sub_epi32(u[1], sign[1]); @@ -1636,15 +1875,18 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[7] = _mm256_srai_epi32(u[7], 2); // Combine - out[ 0] = _mm256_packs_epi32(u[0], u[1]); + out[0] = _mm256_packs_epi32(u[0], u[1]); out[16] = _mm256_packs_epi32(u[2], u[3]); - out[ 8] = _mm256_packs_epi32(u[4], u[5]); + out[8] = _mm256_packs_epi32(u[4], u[5]); out[24] = _mm256_packs_epi32(u[6], u[7]); } { - const __m256i k32_m08_p24 = pair256_set_epi32(-cospi_8_64, cospi_24_64); - const __m256i k32_m24_m08 = pair256_set_epi32(-cospi_24_64, -cospi_8_64); - const __m256i k32_p24_p08 = pair256_set_epi32(cospi_24_64, cospi_8_64); + const __m256i k32_m08_p24 = + pair256_set_epi32(-cospi_8_64, cospi_24_64); + const __m256i k32_m24_m08 = + pair256_set_epi32(-cospi_24_64, -cospi_8_64); + const __m256i k32_p24_p08 = + pair256_set_epi32(cospi_24_64, cospi_8_64); u[0] = _mm256_unpacklo_epi32(lstep1[18], lstep1[28]); u[1] = _mm256_unpackhi_epi32(lstep1[18], lstep1[28]); @@ -1663,8 +1905,8 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[5] = k_madd_epi32_avx2(u[5], k32_m24_m08); v[6] = k_madd_epi32_avx2(u[6], k32_m24_m08); v[7] = k_madd_epi32_avx2(u[7], k32_m24_m08); - v[ 8] = k_madd_epi32_avx2(u[4], k32_m08_p24); - v[ 9] = k_madd_epi32_avx2(u[5], k32_m08_p24); + v[8] = k_madd_epi32_avx2(u[4], k32_m08_p24); + v[9] = k_madd_epi32_avx2(u[5], k32_m08_p24); v[10] = k_madd_epi32_avx2(u[6], k32_m08_p24); v[11] = k_madd_epi32_avx2(u[7], k32_m08_p24); v[12] = k_madd_epi32_avx2(u[0], k32_p24_p08); @@ -1735,15 +1977,19 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // stage 6 { - const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); - const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); - const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); - const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); + const __m256i k32_p28_p04 = + pair256_set_epi32(cospi_28_64, cospi_4_64); + const __m256i k32_p12_p20 = + pair256_set_epi32(cospi_12_64, cospi_20_64); + const __m256i k32_m20_p12 = + pair256_set_epi32(-cospi_20_64, cospi_12_64); + const __m256i k32_m04_p28 = + pair256_set_epi32(-cospi_4_64, cospi_28_64); - u[0] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[1] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[2] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[3] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]); + u[0] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]); + u[1] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]); + u[2] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]); + u[3] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]); u[4] = _mm256_unpacklo_epi32(lstep2[10], lstep2[12]); u[5] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]); u[6] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]); @@ -1752,10 +1998,10 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[9] = _mm256_unpackhi_epi32(lstep2[10], lstep2[12]); u[10] = _mm256_unpacklo_epi32(lstep2[11], lstep2[13]); u[11] = _mm256_unpackhi_epi32(lstep2[11], lstep2[13]); - u[12] = _mm256_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[13] = _mm256_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[14] = _mm256_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[15] = _mm256_unpackhi_epi32(lstep2[ 9], lstep2[15]); + u[12] = _mm256_unpacklo_epi32(lstep2[8], lstep2[14]); + u[13] = _mm256_unpackhi_epi32(lstep2[8], lstep2[14]); + u[14] = _mm256_unpacklo_epi32(lstep2[9], lstep2[15]); + u[15] = _mm256_unpackhi_epi32(lstep2[9], lstep2[15]); v[0] = k_madd_epi32_avx2(u[0], k32_p28_p04); v[1] = k_madd_epi32_avx2(u[1], k32_p28_p04); @@ -1765,8 +2011,8 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[5] = k_madd_epi32_avx2(u[5], k32_p12_p20); v[6] = k_madd_epi32_avx2(u[6], k32_p12_p20); v[7] = k_madd_epi32_avx2(u[7], k32_p12_p20); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12); + v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12); + v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12); v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12); v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12); v[12] = k_madd_epi32_avx2(u[12], k32_m04_p28); @@ -1801,14 +2047,14 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); - sign[0] = _mm256_cmpgt_epi32(kZero,u[0]); - sign[1] = _mm256_cmpgt_epi32(kZero,u[1]); - sign[2] = _mm256_cmpgt_epi32(kZero,u[2]); - sign[3] = _mm256_cmpgt_epi32(kZero,u[3]); - sign[4] = _mm256_cmpgt_epi32(kZero,u[4]); - sign[5] = _mm256_cmpgt_epi32(kZero,u[5]); - sign[6] = _mm256_cmpgt_epi32(kZero,u[6]); - sign[7] = _mm256_cmpgt_epi32(kZero,u[7]); + sign[0] = _mm256_cmpgt_epi32(kZero, u[0]); + sign[1] = _mm256_cmpgt_epi32(kZero, u[1]); + sign[2] = _mm256_cmpgt_epi32(kZero, u[2]); + sign[3] = _mm256_cmpgt_epi32(kZero, u[3]); + sign[4] = _mm256_cmpgt_epi32(kZero, u[4]); + sign[5] = _mm256_cmpgt_epi32(kZero, u[5]); + sign[6] = _mm256_cmpgt_epi32(kZero, u[6]); + sign[7] = _mm256_cmpgt_epi32(kZero, u[7]); u[0] = _mm256_sub_epi32(u[0], sign[0]); u[1] = _mm256_sub_epi32(u[1], sign[1]); @@ -1837,7 +2083,7 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[6] = _mm256_srai_epi32(u[6], 2); u[7] = _mm256_srai_epi32(u[7], 2); - out[ 4] = _mm256_packs_epi32(u[0], u[1]); + out[4] = _mm256_packs_epi32(u[0], u[1]); out[20] = _mm256_packs_epi32(u[2], u[3]); out[12] = _mm256_packs_epi32(u[4], u[5]); out[28] = _mm256_packs_epi32(u[6], u[7]); @@ -1861,24 +2107,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, lstep3[31] = _mm256_add_epi32(lstep2[29], lstep1[31]); } { - const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); - const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64); - const __m256i k32_m20_p12 = pair256_set_epi32(-cospi_20_64, cospi_12_64); - const __m256i k32_m12_m20 = pair256_set_epi32(-cospi_12_64, - -cospi_20_64); - const __m256i k32_p12_p20 = pair256_set_epi32(cospi_12_64, cospi_20_64); - const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); + const __m256i k32_m04_p28 = + pair256_set_epi32(-cospi_4_64, cospi_28_64); + const __m256i k32_m28_m04 = + pair256_set_epi32(-cospi_28_64, -cospi_4_64); + const __m256i k32_m20_p12 = + pair256_set_epi32(-cospi_20_64, cospi_12_64); + const __m256i k32_m12_m20 = + pair256_set_epi32(-cospi_12_64, -cospi_20_64); + const __m256i k32_p12_p20 = + pair256_set_epi32(cospi_12_64, cospi_20_64); + const __m256i k32_p28_p04 = + pair256_set_epi32(cospi_28_64, cospi_4_64); - u[ 0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]); - u[ 1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]); - u[ 2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]); - u[ 3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]); - u[ 4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]); - u[ 5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]); - u[ 6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]); - u[ 7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]); - u[ 8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]); - u[ 9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]); + u[0] = _mm256_unpacklo_epi32(lstep2[34], lstep2[60]); + u[1] = _mm256_unpackhi_epi32(lstep2[34], lstep2[60]); + u[2] = _mm256_unpacklo_epi32(lstep2[35], lstep2[61]); + u[3] = _mm256_unpackhi_epi32(lstep2[35], lstep2[61]); + u[4] = _mm256_unpacklo_epi32(lstep2[36], lstep2[58]); + u[5] = _mm256_unpackhi_epi32(lstep2[36], lstep2[58]); + u[6] = _mm256_unpacklo_epi32(lstep2[37], lstep2[59]); + u[7] = _mm256_unpackhi_epi32(lstep2[37], lstep2[59]); + u[8] = _mm256_unpacklo_epi32(lstep2[42], lstep2[52]); + u[9] = _mm256_unpackhi_epi32(lstep2[42], lstep2[52]); u[10] = _mm256_unpacklo_epi32(lstep2[43], lstep2[53]); u[11] = _mm256_unpackhi_epi32(lstep2[43], lstep2[53]); u[12] = _mm256_unpacklo_epi32(lstep2[44], lstep2[50]); @@ -1886,16 +2137,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_unpacklo_epi32(lstep2[45], lstep2[51]); u[15] = _mm256_unpackhi_epi32(lstep2[45], lstep2[51]); - v[ 0] = k_madd_epi32_avx2(u[ 0], k32_m04_p28); - v[ 1] = k_madd_epi32_avx2(u[ 1], k32_m04_p28); - v[ 2] = k_madd_epi32_avx2(u[ 2], k32_m04_p28); - v[ 3] = k_madd_epi32_avx2(u[ 3], k32_m04_p28); - v[ 4] = k_madd_epi32_avx2(u[ 4], k32_m28_m04); - v[ 5] = k_madd_epi32_avx2(u[ 5], k32_m28_m04); - v[ 6] = k_madd_epi32_avx2(u[ 6], k32_m28_m04); - v[ 7] = k_madd_epi32_avx2(u[ 7], k32_m28_m04); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_m20_p12); + v[0] = k_madd_epi32_avx2(u[0], k32_m04_p28); + v[1] = k_madd_epi32_avx2(u[1], k32_m04_p28); + v[2] = k_madd_epi32_avx2(u[2], k32_m04_p28); + v[3] = k_madd_epi32_avx2(u[3], k32_m04_p28); + v[4] = k_madd_epi32_avx2(u[4], k32_m28_m04); + v[5] = k_madd_epi32_avx2(u[5], k32_m28_m04); + v[6] = k_madd_epi32_avx2(u[6], k32_m28_m04); + v[7] = k_madd_epi32_avx2(u[7], k32_m28_m04); + v[8] = k_madd_epi32_avx2(u[8], k32_m20_p12); + v[9] = k_madd_epi32_avx2(u[9], k32_m20_p12); v[10] = k_madd_epi32_avx2(u[10], k32_m20_p12); v[11] = k_madd_epi32_avx2(u[11], k32_m20_p12); v[12] = k_madd_epi32_avx2(u[12], k32_m12_m20); @@ -1906,29 +2157,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[17] = k_madd_epi32_avx2(u[13], k32_m20_p12); v[18] = k_madd_epi32_avx2(u[14], k32_m20_p12); v[19] = k_madd_epi32_avx2(u[15], k32_m20_p12); - v[20] = k_madd_epi32_avx2(u[ 8], k32_p12_p20); - v[21] = k_madd_epi32_avx2(u[ 9], k32_p12_p20); + v[20] = k_madd_epi32_avx2(u[8], k32_p12_p20); + v[21] = k_madd_epi32_avx2(u[9], k32_p12_p20); v[22] = k_madd_epi32_avx2(u[10], k32_p12_p20); v[23] = k_madd_epi32_avx2(u[11], k32_p12_p20); - v[24] = k_madd_epi32_avx2(u[ 4], k32_m04_p28); - v[25] = k_madd_epi32_avx2(u[ 5], k32_m04_p28); - v[26] = k_madd_epi32_avx2(u[ 6], k32_m04_p28); - v[27] = k_madd_epi32_avx2(u[ 7], k32_m04_p28); - v[28] = k_madd_epi32_avx2(u[ 0], k32_p28_p04); - v[29] = k_madd_epi32_avx2(u[ 1], k32_p28_p04); - v[30] = k_madd_epi32_avx2(u[ 2], k32_p28_p04); - v[31] = k_madd_epi32_avx2(u[ 3], k32_p28_p04); + v[24] = k_madd_epi32_avx2(u[4], k32_m04_p28); + v[25] = k_madd_epi32_avx2(u[5], k32_m04_p28); + v[26] = k_madd_epi32_avx2(u[6], k32_m04_p28); + v[27] = k_madd_epi32_avx2(u[7], k32_m04_p28); + v[28] = k_madd_epi32_avx2(u[0], k32_p28_p04); + v[29] = k_madd_epi32_avx2(u[1], k32_p28_p04); + v[30] = k_madd_epi32_avx2(u[2], k32_p28_p04); + v[31] = k_madd_epi32_avx2(u[3], k32_p28_p04); - u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64_avx2(v[10], v[11]); - u[ 6] = k_packs_epi64_avx2(v[12], v[13]); - u[ 7] = k_packs_epi64_avx2(v[14], v[15]); - u[ 8] = k_packs_epi64_avx2(v[16], v[17]); - u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + u[8] = k_packs_epi64_avx2(v[16], v[17]); + u[9] = k_packs_epi64_avx2(v[18], v[19]); u[10] = k_packs_epi64_avx2(v[20], v[21]); u[11] = k_packs_epi64_avx2(v[22], v[23]); u[12] = k_packs_epi64_avx2(v[24], v[25]); @@ -1936,16 +2187,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = k_packs_epi64_avx2(v[28], v[29]); u[15] = k_packs_epi64_avx2(v[30], v[31]); - v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -1953,16 +2204,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); - lstep3[34] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep3[35] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep3[36] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep3[37] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep3[42] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep3[43] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep3[44] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep3[45] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep3[50] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep3[51] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep3[34] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + lstep3[35] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + lstep3[36] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + lstep3[37] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + lstep3[42] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + lstep3[43] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + lstep3[44] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + lstep3[45] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + lstep3[50] = _mm256_srai_epi32(v[8], DCT_CONST_BITS); + lstep3[51] = _mm256_srai_epi32(v[9], DCT_CONST_BITS); lstep3[52] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); lstep3[53] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); lstep3[58] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); @@ -1972,25 +2223,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // stage 7 { - const __m256i k32_p30_p02 = pair256_set_epi32(cospi_30_64, cospi_2_64); - const __m256i k32_p14_p18 = pair256_set_epi32(cospi_14_64, cospi_18_64); - const __m256i k32_p22_p10 = pair256_set_epi32(cospi_22_64, cospi_10_64); - const __m256i k32_p06_p26 = pair256_set_epi32(cospi_6_64, cospi_26_64); - const __m256i k32_m26_p06 = pair256_set_epi32(-cospi_26_64, cospi_6_64); - const __m256i k32_m10_p22 = pair256_set_epi32(-cospi_10_64, cospi_22_64); - const __m256i k32_m18_p14 = pair256_set_epi32(-cospi_18_64, cospi_14_64); - const __m256i k32_m02_p30 = pair256_set_epi32(-cospi_2_64, cospi_30_64); + const __m256i k32_p30_p02 = + pair256_set_epi32(cospi_30_64, cospi_2_64); + const __m256i k32_p14_p18 = + pair256_set_epi32(cospi_14_64, cospi_18_64); + const __m256i k32_p22_p10 = + pair256_set_epi32(cospi_22_64, cospi_10_64); + const __m256i k32_p06_p26 = + pair256_set_epi32(cospi_6_64, cospi_26_64); + const __m256i k32_m26_p06 = + pair256_set_epi32(-cospi_26_64, cospi_6_64); + const __m256i k32_m10_p22 = + pair256_set_epi32(-cospi_10_64, cospi_22_64); + const __m256i k32_m18_p14 = + pair256_set_epi32(-cospi_18_64, cospi_14_64); + const __m256i k32_m02_p30 = + pair256_set_epi32(-cospi_2_64, cospi_30_64); - u[ 0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]); - u[ 1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]); - u[ 2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]); - u[ 3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]); - u[ 4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]); - u[ 5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]); - u[ 6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]); - u[ 7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]); - u[ 8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]); - u[ 9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]); + u[0] = _mm256_unpacklo_epi32(lstep3[16], lstep3[30]); + u[1] = _mm256_unpackhi_epi32(lstep3[16], lstep3[30]); + u[2] = _mm256_unpacklo_epi32(lstep3[17], lstep3[31]); + u[3] = _mm256_unpackhi_epi32(lstep3[17], lstep3[31]); + u[4] = _mm256_unpacklo_epi32(lstep3[18], lstep3[28]); + u[5] = _mm256_unpackhi_epi32(lstep3[18], lstep3[28]); + u[6] = _mm256_unpacklo_epi32(lstep3[19], lstep3[29]); + u[7] = _mm256_unpackhi_epi32(lstep3[19], lstep3[29]); + u[8] = _mm256_unpacklo_epi32(lstep3[20], lstep3[26]); + u[9] = _mm256_unpackhi_epi32(lstep3[20], lstep3[26]); u[10] = _mm256_unpacklo_epi32(lstep3[21], lstep3[27]); u[11] = _mm256_unpackhi_epi32(lstep3[21], lstep3[27]); u[12] = _mm256_unpacklo_epi32(lstep3[22], lstep3[24]); @@ -1998,16 +2257,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_unpacklo_epi32(lstep3[23], lstep3[25]); u[15] = _mm256_unpackhi_epi32(lstep3[23], lstep3[25]); - v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p30_p02); - v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p30_p02); - v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p30_p02); - v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p30_p02); - v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p14_p18); - v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p14_p18); - v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p14_p18); - v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p14_p18); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p22_p10); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p22_p10); + v[0] = k_madd_epi32_avx2(u[0], k32_p30_p02); + v[1] = k_madd_epi32_avx2(u[1], k32_p30_p02); + v[2] = k_madd_epi32_avx2(u[2], k32_p30_p02); + v[3] = k_madd_epi32_avx2(u[3], k32_p30_p02); + v[4] = k_madd_epi32_avx2(u[4], k32_p14_p18); + v[5] = k_madd_epi32_avx2(u[5], k32_p14_p18); + v[6] = k_madd_epi32_avx2(u[6], k32_p14_p18); + v[7] = k_madd_epi32_avx2(u[7], k32_p14_p18); + v[8] = k_madd_epi32_avx2(u[8], k32_p22_p10); + v[9] = k_madd_epi32_avx2(u[9], k32_p22_p10); v[10] = k_madd_epi32_avx2(u[10], k32_p22_p10); v[11] = k_madd_epi32_avx2(u[11], k32_p22_p10); v[12] = k_madd_epi32_avx2(u[12], k32_p06_p26); @@ -2018,29 +2277,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[17] = k_madd_epi32_avx2(u[13], k32_m26_p06); v[18] = k_madd_epi32_avx2(u[14], k32_m26_p06); v[19] = k_madd_epi32_avx2(u[15], k32_m26_p06); - v[20] = k_madd_epi32_avx2(u[ 8], k32_m10_p22); - v[21] = k_madd_epi32_avx2(u[ 9], k32_m10_p22); + v[20] = k_madd_epi32_avx2(u[8], k32_m10_p22); + v[21] = k_madd_epi32_avx2(u[9], k32_m10_p22); v[22] = k_madd_epi32_avx2(u[10], k32_m10_p22); v[23] = k_madd_epi32_avx2(u[11], k32_m10_p22); - v[24] = k_madd_epi32_avx2(u[ 4], k32_m18_p14); - v[25] = k_madd_epi32_avx2(u[ 5], k32_m18_p14); - v[26] = k_madd_epi32_avx2(u[ 6], k32_m18_p14); - v[27] = k_madd_epi32_avx2(u[ 7], k32_m18_p14); - v[28] = k_madd_epi32_avx2(u[ 0], k32_m02_p30); - v[29] = k_madd_epi32_avx2(u[ 1], k32_m02_p30); - v[30] = k_madd_epi32_avx2(u[ 2], k32_m02_p30); - v[31] = k_madd_epi32_avx2(u[ 3], k32_m02_p30); + v[24] = k_madd_epi32_avx2(u[4], k32_m18_p14); + v[25] = k_madd_epi32_avx2(u[5], k32_m18_p14); + v[26] = k_madd_epi32_avx2(u[6], k32_m18_p14); + v[27] = k_madd_epi32_avx2(u[7], k32_m18_p14); + v[28] = k_madd_epi32_avx2(u[0], k32_m02_p30); + v[29] = k_madd_epi32_avx2(u[1], k32_m02_p30); + v[30] = k_madd_epi32_avx2(u[2], k32_m02_p30); + v[31] = k_madd_epi32_avx2(u[3], k32_m02_p30); - u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64_avx2(v[10], v[11]); - u[ 6] = k_packs_epi64_avx2(v[12], v[13]); - u[ 7] = k_packs_epi64_avx2(v[14], v[15]); - u[ 8] = k_packs_epi64_avx2(v[16], v[17]); - u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + u[8] = k_packs_epi64_avx2(v[16], v[17]); + u[9] = k_packs_epi64_avx2(v[18], v[19]); u[10] = k_packs_epi64_avx2(v[20], v[21]); u[11] = k_packs_epi64_avx2(v[22], v[23]); u[12] = k_packs_epi64_avx2(v[24], v[25]); @@ -2048,16 +2307,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = k_packs_epi64_avx2(v[28], v[29]); u[15] = k_packs_epi64_avx2(v[30], v[31]); - v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2065,16 +2324,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); @@ -2082,33 +2341,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); - v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); - v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); - v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); - v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); - v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); - v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); - v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); - v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); - v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); - v[10] = _mm256_cmpgt_epi32(kZero,u[10]); - v[11] = _mm256_cmpgt_epi32(kZero,u[11]); - v[12] = _mm256_cmpgt_epi32(kZero,u[12]); - v[13] = _mm256_cmpgt_epi32(kZero,u[13]); - v[14] = _mm256_cmpgt_epi32(kZero,u[14]); - v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + v[0] = _mm256_cmpgt_epi32(kZero, u[0]); + v[1] = _mm256_cmpgt_epi32(kZero, u[1]); + v[2] = _mm256_cmpgt_epi32(kZero, u[2]); + v[3] = _mm256_cmpgt_epi32(kZero, u[3]); + v[4] = _mm256_cmpgt_epi32(kZero, u[4]); + v[5] = _mm256_cmpgt_epi32(kZero, u[5]); + v[6] = _mm256_cmpgt_epi32(kZero, u[6]); + v[7] = _mm256_cmpgt_epi32(kZero, u[7]); + v[8] = _mm256_cmpgt_epi32(kZero, u[8]); + v[9] = _mm256_cmpgt_epi32(kZero, u[9]); + v[10] = _mm256_cmpgt_epi32(kZero, u[10]); + v[11] = _mm256_cmpgt_epi32(kZero, u[11]); + v[12] = _mm256_cmpgt_epi32(kZero, u[12]); + v[13] = _mm256_cmpgt_epi32(kZero, u[13]); + v[14] = _mm256_cmpgt_epi32(kZero, u[14]); + v[15] = _mm256_cmpgt_epi32(kZero, u[15]); - u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm256_sub_epi32(u[0], v[0]); + u[1] = _mm256_sub_epi32(u[1], v[1]); + u[2] = _mm256_sub_epi32(u[2], v[2]); + u[3] = _mm256_sub_epi32(u[3], v[3]); + u[4] = _mm256_sub_epi32(u[4], v[4]); + u[5] = _mm256_sub_epi32(u[5], v[5]); + u[6] = _mm256_sub_epi32(u[6], v[6]); + u[7] = _mm256_sub_epi32(u[7], v[7]); + u[8] = _mm256_sub_epi32(u[8], v[8]); + u[9] = _mm256_sub_epi32(u[9], v[9]); u[10] = _mm256_sub_epi32(u[10], v[10]); u[11] = _mm256_sub_epi32(u[11], v[11]); u[12] = _mm256_sub_epi32(u[12], v[12]); @@ -2116,16 +2375,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_sub_epi32(u[14], v[14]); u[15] = _mm256_sub_epi32(u[15], v[15]); - v[ 0] = _mm256_add_epi32(u[ 0], K32One); - v[ 1] = _mm256_add_epi32(u[ 1], K32One); - v[ 2] = _mm256_add_epi32(u[ 2], K32One); - v[ 3] = _mm256_add_epi32(u[ 3], K32One); - v[ 4] = _mm256_add_epi32(u[ 4], K32One); - v[ 5] = _mm256_add_epi32(u[ 5], K32One); - v[ 6] = _mm256_add_epi32(u[ 6], K32One); - v[ 7] = _mm256_add_epi32(u[ 7], K32One); - v[ 8] = _mm256_add_epi32(u[ 8], K32One); - v[ 9] = _mm256_add_epi32(u[ 9], K32One); + v[0] = _mm256_add_epi32(u[0], K32One); + v[1] = _mm256_add_epi32(u[1], K32One); + v[2] = _mm256_add_epi32(u[2], K32One); + v[3] = _mm256_add_epi32(u[3], K32One); + v[4] = _mm256_add_epi32(u[4], K32One); + v[5] = _mm256_add_epi32(u[5], K32One); + v[6] = _mm256_add_epi32(u[6], K32One); + v[7] = _mm256_add_epi32(u[7], K32One); + v[8] = _mm256_add_epi32(u[8], K32One); + v[9] = _mm256_add_epi32(u[9], K32One); v[10] = _mm256_add_epi32(u[10], K32One); v[11] = _mm256_add_epi32(u[11], K32One); v[12] = _mm256_add_epi32(u[12], K32One); @@ -2133,16 +2392,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], K32One); v[15] = _mm256_add_epi32(u[15], K32One); - u[ 0] = _mm256_srai_epi32(v[ 0], 2); - u[ 1] = _mm256_srai_epi32(v[ 1], 2); - u[ 2] = _mm256_srai_epi32(v[ 2], 2); - u[ 3] = _mm256_srai_epi32(v[ 3], 2); - u[ 4] = _mm256_srai_epi32(v[ 4], 2); - u[ 5] = _mm256_srai_epi32(v[ 5], 2); - u[ 6] = _mm256_srai_epi32(v[ 6], 2); - u[ 7] = _mm256_srai_epi32(v[ 7], 2); - u[ 8] = _mm256_srai_epi32(v[ 8], 2); - u[ 9] = _mm256_srai_epi32(v[ 9], 2); + u[0] = _mm256_srai_epi32(v[0], 2); + u[1] = _mm256_srai_epi32(v[1], 2); + u[2] = _mm256_srai_epi32(v[2], 2); + u[3] = _mm256_srai_epi32(v[3], 2); + u[4] = _mm256_srai_epi32(v[4], 2); + u[5] = _mm256_srai_epi32(v[5], 2); + u[6] = _mm256_srai_epi32(v[6], 2); + u[7] = _mm256_srai_epi32(v[7], 2); + u[8] = _mm256_srai_epi32(v[8], 2); + u[9] = _mm256_srai_epi32(v[9], 2); u[10] = _mm256_srai_epi32(v[10], 2); u[11] = _mm256_srai_epi32(v[11], 2); u[12] = _mm256_srai_epi32(v[12], 2); @@ -2150,11 +2409,11 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], 2); u[15] = _mm256_srai_epi32(v[15], 2); - out[ 2] = _mm256_packs_epi32(u[0], u[1]); + out[2] = _mm256_packs_epi32(u[0], u[1]); out[18] = _mm256_packs_epi32(u[2], u[3]); out[10] = _mm256_packs_epi32(u[4], u[5]); out[26] = _mm256_packs_epi32(u[6], u[7]); - out[ 6] = _mm256_packs_epi32(u[8], u[9]); + out[6] = _mm256_packs_epi32(u[8], u[9]); out[22] = _mm256_packs_epi32(u[10], u[11]); out[14] = _mm256_packs_epi32(u[12], u[13]); out[30] = _mm256_packs_epi32(u[14], u[15]); @@ -2195,25 +2454,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // stage 8 { - const __m256i k32_p31_p01 = pair256_set_epi32(cospi_31_64, cospi_1_64); - const __m256i k32_p15_p17 = pair256_set_epi32(cospi_15_64, cospi_17_64); - const __m256i k32_p23_p09 = pair256_set_epi32(cospi_23_64, cospi_9_64); - const __m256i k32_p07_p25 = pair256_set_epi32(cospi_7_64, cospi_25_64); - const __m256i k32_m25_p07 = pair256_set_epi32(-cospi_25_64, cospi_7_64); - const __m256i k32_m09_p23 = pair256_set_epi32(-cospi_9_64, cospi_23_64); - const __m256i k32_m17_p15 = pair256_set_epi32(-cospi_17_64, cospi_15_64); - const __m256i k32_m01_p31 = pair256_set_epi32(-cospi_1_64, cospi_31_64); + const __m256i k32_p31_p01 = + pair256_set_epi32(cospi_31_64, cospi_1_64); + const __m256i k32_p15_p17 = + pair256_set_epi32(cospi_15_64, cospi_17_64); + const __m256i k32_p23_p09 = + pair256_set_epi32(cospi_23_64, cospi_9_64); + const __m256i k32_p07_p25 = + pair256_set_epi32(cospi_7_64, cospi_25_64); + const __m256i k32_m25_p07 = + pair256_set_epi32(-cospi_25_64, cospi_7_64); + const __m256i k32_m09_p23 = + pair256_set_epi32(-cospi_9_64, cospi_23_64); + const __m256i k32_m17_p15 = + pair256_set_epi32(-cospi_17_64, cospi_15_64); + const __m256i k32_m01_p31 = + pair256_set_epi32(-cospi_1_64, cospi_31_64); - u[ 0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]); - u[ 1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]); - u[ 2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]); - u[ 3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]); - u[ 4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]); - u[ 5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]); - u[ 6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]); - u[ 7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]); - u[ 8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]); - u[ 9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]); + u[0] = _mm256_unpacklo_epi32(lstep1[32], lstep1[62]); + u[1] = _mm256_unpackhi_epi32(lstep1[32], lstep1[62]); + u[2] = _mm256_unpacklo_epi32(lstep1[33], lstep1[63]); + u[3] = _mm256_unpackhi_epi32(lstep1[33], lstep1[63]); + u[4] = _mm256_unpacklo_epi32(lstep1[34], lstep1[60]); + u[5] = _mm256_unpackhi_epi32(lstep1[34], lstep1[60]); + u[6] = _mm256_unpacklo_epi32(lstep1[35], lstep1[61]); + u[7] = _mm256_unpackhi_epi32(lstep1[35], lstep1[61]); + u[8] = _mm256_unpacklo_epi32(lstep1[36], lstep1[58]); + u[9] = _mm256_unpackhi_epi32(lstep1[36], lstep1[58]); u[10] = _mm256_unpacklo_epi32(lstep1[37], lstep1[59]); u[11] = _mm256_unpackhi_epi32(lstep1[37], lstep1[59]); u[12] = _mm256_unpacklo_epi32(lstep1[38], lstep1[56]); @@ -2221,16 +2488,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_unpacklo_epi32(lstep1[39], lstep1[57]); u[15] = _mm256_unpackhi_epi32(lstep1[39], lstep1[57]); - v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p31_p01); - v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p31_p01); - v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p31_p01); - v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p31_p01); - v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p15_p17); - v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p15_p17); - v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p15_p17); - v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p15_p17); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p23_p09); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p23_p09); + v[0] = k_madd_epi32_avx2(u[0], k32_p31_p01); + v[1] = k_madd_epi32_avx2(u[1], k32_p31_p01); + v[2] = k_madd_epi32_avx2(u[2], k32_p31_p01); + v[3] = k_madd_epi32_avx2(u[3], k32_p31_p01); + v[4] = k_madd_epi32_avx2(u[4], k32_p15_p17); + v[5] = k_madd_epi32_avx2(u[5], k32_p15_p17); + v[6] = k_madd_epi32_avx2(u[6], k32_p15_p17); + v[7] = k_madd_epi32_avx2(u[7], k32_p15_p17); + v[8] = k_madd_epi32_avx2(u[8], k32_p23_p09); + v[9] = k_madd_epi32_avx2(u[9], k32_p23_p09); v[10] = k_madd_epi32_avx2(u[10], k32_p23_p09); v[11] = k_madd_epi32_avx2(u[11], k32_p23_p09); v[12] = k_madd_epi32_avx2(u[12], k32_p07_p25); @@ -2241,29 +2508,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[17] = k_madd_epi32_avx2(u[13], k32_m25_p07); v[18] = k_madd_epi32_avx2(u[14], k32_m25_p07); v[19] = k_madd_epi32_avx2(u[15], k32_m25_p07); - v[20] = k_madd_epi32_avx2(u[ 8], k32_m09_p23); - v[21] = k_madd_epi32_avx2(u[ 9], k32_m09_p23); + v[20] = k_madd_epi32_avx2(u[8], k32_m09_p23); + v[21] = k_madd_epi32_avx2(u[9], k32_m09_p23); v[22] = k_madd_epi32_avx2(u[10], k32_m09_p23); v[23] = k_madd_epi32_avx2(u[11], k32_m09_p23); - v[24] = k_madd_epi32_avx2(u[ 4], k32_m17_p15); - v[25] = k_madd_epi32_avx2(u[ 5], k32_m17_p15); - v[26] = k_madd_epi32_avx2(u[ 6], k32_m17_p15); - v[27] = k_madd_epi32_avx2(u[ 7], k32_m17_p15); - v[28] = k_madd_epi32_avx2(u[ 0], k32_m01_p31); - v[29] = k_madd_epi32_avx2(u[ 1], k32_m01_p31); - v[30] = k_madd_epi32_avx2(u[ 2], k32_m01_p31); - v[31] = k_madd_epi32_avx2(u[ 3], k32_m01_p31); + v[24] = k_madd_epi32_avx2(u[4], k32_m17_p15); + v[25] = k_madd_epi32_avx2(u[5], k32_m17_p15); + v[26] = k_madd_epi32_avx2(u[6], k32_m17_p15); + v[27] = k_madd_epi32_avx2(u[7], k32_m17_p15); + v[28] = k_madd_epi32_avx2(u[0], k32_m01_p31); + v[29] = k_madd_epi32_avx2(u[1], k32_m01_p31); + v[30] = k_madd_epi32_avx2(u[2], k32_m01_p31); + v[31] = k_madd_epi32_avx2(u[3], k32_m01_p31); - u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64_avx2(v[10], v[11]); - u[ 6] = k_packs_epi64_avx2(v[12], v[13]); - u[ 7] = k_packs_epi64_avx2(v[14], v[15]); - u[ 8] = k_packs_epi64_avx2(v[16], v[17]); - u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + u[8] = k_packs_epi64_avx2(v[16], v[17]); + u[9] = k_packs_epi64_avx2(v[18], v[19]); u[10] = k_packs_epi64_avx2(v[20], v[21]); u[11] = k_packs_epi64_avx2(v[22], v[23]); u[12] = k_packs_epi64_avx2(v[24], v[25]); @@ -2271,16 +2538,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = k_packs_epi64_avx2(v[28], v[29]); u[15] = k_packs_epi64_avx2(v[30], v[31]); - v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2288,16 +2555,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); @@ -2305,33 +2572,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); - v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); - v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); - v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); - v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); - v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); - v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); - v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); - v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); - v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); - v[10] = _mm256_cmpgt_epi32(kZero,u[10]); - v[11] = _mm256_cmpgt_epi32(kZero,u[11]); - v[12] = _mm256_cmpgt_epi32(kZero,u[12]); - v[13] = _mm256_cmpgt_epi32(kZero,u[13]); - v[14] = _mm256_cmpgt_epi32(kZero,u[14]); - v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + v[0] = _mm256_cmpgt_epi32(kZero, u[0]); + v[1] = _mm256_cmpgt_epi32(kZero, u[1]); + v[2] = _mm256_cmpgt_epi32(kZero, u[2]); + v[3] = _mm256_cmpgt_epi32(kZero, u[3]); + v[4] = _mm256_cmpgt_epi32(kZero, u[4]); + v[5] = _mm256_cmpgt_epi32(kZero, u[5]); + v[6] = _mm256_cmpgt_epi32(kZero, u[6]); + v[7] = _mm256_cmpgt_epi32(kZero, u[7]); + v[8] = _mm256_cmpgt_epi32(kZero, u[8]); + v[9] = _mm256_cmpgt_epi32(kZero, u[9]); + v[10] = _mm256_cmpgt_epi32(kZero, u[10]); + v[11] = _mm256_cmpgt_epi32(kZero, u[11]); + v[12] = _mm256_cmpgt_epi32(kZero, u[12]); + v[13] = _mm256_cmpgt_epi32(kZero, u[13]); + v[14] = _mm256_cmpgt_epi32(kZero, u[14]); + v[15] = _mm256_cmpgt_epi32(kZero, u[15]); - u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm256_sub_epi32(u[0], v[0]); + u[1] = _mm256_sub_epi32(u[1], v[1]); + u[2] = _mm256_sub_epi32(u[2], v[2]); + u[3] = _mm256_sub_epi32(u[3], v[3]); + u[4] = _mm256_sub_epi32(u[4], v[4]); + u[5] = _mm256_sub_epi32(u[5], v[5]); + u[6] = _mm256_sub_epi32(u[6], v[6]); + u[7] = _mm256_sub_epi32(u[7], v[7]); + u[8] = _mm256_sub_epi32(u[8], v[8]); + u[9] = _mm256_sub_epi32(u[9], v[9]); u[10] = _mm256_sub_epi32(u[10], v[10]); u[11] = _mm256_sub_epi32(u[11], v[11]); u[12] = _mm256_sub_epi32(u[12], v[12]); @@ -2373,35 +2640,43 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], 2); u[15] = _mm256_srai_epi32(v[15], 2); - out[ 1] = _mm256_packs_epi32(u[0], u[1]); + out[1] = _mm256_packs_epi32(u[0], u[1]); out[17] = _mm256_packs_epi32(u[2], u[3]); - out[ 9] = _mm256_packs_epi32(u[4], u[5]); + out[9] = _mm256_packs_epi32(u[4], u[5]); out[25] = _mm256_packs_epi32(u[6], u[7]); - out[ 7] = _mm256_packs_epi32(u[8], u[9]); + out[7] = _mm256_packs_epi32(u[8], u[9]); out[23] = _mm256_packs_epi32(u[10], u[11]); out[15] = _mm256_packs_epi32(u[12], u[13]); out[31] = _mm256_packs_epi32(u[14], u[15]); } { - const __m256i k32_p27_p05 = pair256_set_epi32(cospi_27_64, cospi_5_64); - const __m256i k32_p11_p21 = pair256_set_epi32(cospi_11_64, cospi_21_64); - const __m256i k32_p19_p13 = pair256_set_epi32(cospi_19_64, cospi_13_64); - const __m256i k32_p03_p29 = pair256_set_epi32(cospi_3_64, cospi_29_64); - const __m256i k32_m29_p03 = pair256_set_epi32(-cospi_29_64, cospi_3_64); - const __m256i k32_m13_p19 = pair256_set_epi32(-cospi_13_64, cospi_19_64); - const __m256i k32_m21_p11 = pair256_set_epi32(-cospi_21_64, cospi_11_64); - const __m256i k32_m05_p27 = pair256_set_epi32(-cospi_5_64, cospi_27_64); + const __m256i k32_p27_p05 = + pair256_set_epi32(cospi_27_64, cospi_5_64); + const __m256i k32_p11_p21 = + pair256_set_epi32(cospi_11_64, cospi_21_64); + const __m256i k32_p19_p13 = + pair256_set_epi32(cospi_19_64, cospi_13_64); + const __m256i k32_p03_p29 = + pair256_set_epi32(cospi_3_64, cospi_29_64); + const __m256i k32_m29_p03 = + pair256_set_epi32(-cospi_29_64, cospi_3_64); + const __m256i k32_m13_p19 = + pair256_set_epi32(-cospi_13_64, cospi_19_64); + const __m256i k32_m21_p11 = + pair256_set_epi32(-cospi_21_64, cospi_11_64); + const __m256i k32_m05_p27 = + pair256_set_epi32(-cospi_5_64, cospi_27_64); - u[ 0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]); - u[ 1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]); - u[ 2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]); - u[ 3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]); - u[ 4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]); - u[ 5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]); - u[ 6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]); - u[ 7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]); - u[ 8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]); - u[ 9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]); + u[0] = _mm256_unpacklo_epi32(lstep1[40], lstep1[54]); + u[1] = _mm256_unpackhi_epi32(lstep1[40], lstep1[54]); + u[2] = _mm256_unpacklo_epi32(lstep1[41], lstep1[55]); + u[3] = _mm256_unpackhi_epi32(lstep1[41], lstep1[55]); + u[4] = _mm256_unpacklo_epi32(lstep1[42], lstep1[52]); + u[5] = _mm256_unpackhi_epi32(lstep1[42], lstep1[52]); + u[6] = _mm256_unpacklo_epi32(lstep1[43], lstep1[53]); + u[7] = _mm256_unpackhi_epi32(lstep1[43], lstep1[53]); + u[8] = _mm256_unpacklo_epi32(lstep1[44], lstep1[50]); + u[9] = _mm256_unpackhi_epi32(lstep1[44], lstep1[50]); u[10] = _mm256_unpacklo_epi32(lstep1[45], lstep1[51]); u[11] = _mm256_unpackhi_epi32(lstep1[45], lstep1[51]); u[12] = _mm256_unpacklo_epi32(lstep1[46], lstep1[48]); @@ -2409,16 +2684,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_unpacklo_epi32(lstep1[47], lstep1[49]); u[15] = _mm256_unpackhi_epi32(lstep1[47], lstep1[49]); - v[ 0] = k_madd_epi32_avx2(u[ 0], k32_p27_p05); - v[ 1] = k_madd_epi32_avx2(u[ 1], k32_p27_p05); - v[ 2] = k_madd_epi32_avx2(u[ 2], k32_p27_p05); - v[ 3] = k_madd_epi32_avx2(u[ 3], k32_p27_p05); - v[ 4] = k_madd_epi32_avx2(u[ 4], k32_p11_p21); - v[ 5] = k_madd_epi32_avx2(u[ 5], k32_p11_p21); - v[ 6] = k_madd_epi32_avx2(u[ 6], k32_p11_p21); - v[ 7] = k_madd_epi32_avx2(u[ 7], k32_p11_p21); - v[ 8] = k_madd_epi32_avx2(u[ 8], k32_p19_p13); - v[ 9] = k_madd_epi32_avx2(u[ 9], k32_p19_p13); + v[0] = k_madd_epi32_avx2(u[0], k32_p27_p05); + v[1] = k_madd_epi32_avx2(u[1], k32_p27_p05); + v[2] = k_madd_epi32_avx2(u[2], k32_p27_p05); + v[3] = k_madd_epi32_avx2(u[3], k32_p27_p05); + v[4] = k_madd_epi32_avx2(u[4], k32_p11_p21); + v[5] = k_madd_epi32_avx2(u[5], k32_p11_p21); + v[6] = k_madd_epi32_avx2(u[6], k32_p11_p21); + v[7] = k_madd_epi32_avx2(u[7], k32_p11_p21); + v[8] = k_madd_epi32_avx2(u[8], k32_p19_p13); + v[9] = k_madd_epi32_avx2(u[9], k32_p19_p13); v[10] = k_madd_epi32_avx2(u[10], k32_p19_p13); v[11] = k_madd_epi32_avx2(u[11], k32_p19_p13); v[12] = k_madd_epi32_avx2(u[12], k32_p03_p29); @@ -2429,29 +2704,29 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[17] = k_madd_epi32_avx2(u[13], k32_m29_p03); v[18] = k_madd_epi32_avx2(u[14], k32_m29_p03); v[19] = k_madd_epi32_avx2(u[15], k32_m29_p03); - v[20] = k_madd_epi32_avx2(u[ 8], k32_m13_p19); - v[21] = k_madd_epi32_avx2(u[ 9], k32_m13_p19); + v[20] = k_madd_epi32_avx2(u[8], k32_m13_p19); + v[21] = k_madd_epi32_avx2(u[9], k32_m13_p19); v[22] = k_madd_epi32_avx2(u[10], k32_m13_p19); v[23] = k_madd_epi32_avx2(u[11], k32_m13_p19); - v[24] = k_madd_epi32_avx2(u[ 4], k32_m21_p11); - v[25] = k_madd_epi32_avx2(u[ 5], k32_m21_p11); - v[26] = k_madd_epi32_avx2(u[ 6], k32_m21_p11); - v[27] = k_madd_epi32_avx2(u[ 7], k32_m21_p11); - v[28] = k_madd_epi32_avx2(u[ 0], k32_m05_p27); - v[29] = k_madd_epi32_avx2(u[ 1], k32_m05_p27); - v[30] = k_madd_epi32_avx2(u[ 2], k32_m05_p27); - v[31] = k_madd_epi32_avx2(u[ 3], k32_m05_p27); + v[24] = k_madd_epi32_avx2(u[4], k32_m21_p11); + v[25] = k_madd_epi32_avx2(u[5], k32_m21_p11); + v[26] = k_madd_epi32_avx2(u[6], k32_m21_p11); + v[27] = k_madd_epi32_avx2(u[7], k32_m21_p11); + v[28] = k_madd_epi32_avx2(u[0], k32_m05_p27); + v[29] = k_madd_epi32_avx2(u[1], k32_m05_p27); + v[30] = k_madd_epi32_avx2(u[2], k32_m05_p27); + v[31] = k_madd_epi32_avx2(u[3], k32_m05_p27); - u[ 0] = k_packs_epi64_avx2(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64_avx2(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64_avx2(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64_avx2(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64_avx2(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64_avx2(v[10], v[11]); - u[ 6] = k_packs_epi64_avx2(v[12], v[13]); - u[ 7] = k_packs_epi64_avx2(v[14], v[15]); - u[ 8] = k_packs_epi64_avx2(v[16], v[17]); - u[ 9] = k_packs_epi64_avx2(v[18], v[19]); + u[0] = k_packs_epi64_avx2(v[0], v[1]); + u[1] = k_packs_epi64_avx2(v[2], v[3]); + u[2] = k_packs_epi64_avx2(v[4], v[5]); + u[3] = k_packs_epi64_avx2(v[6], v[7]); + u[4] = k_packs_epi64_avx2(v[8], v[9]); + u[5] = k_packs_epi64_avx2(v[10], v[11]); + u[6] = k_packs_epi64_avx2(v[12], v[13]); + u[7] = k_packs_epi64_avx2(v[14], v[15]); + u[8] = k_packs_epi64_avx2(v[16], v[17]); + u[9] = k_packs_epi64_avx2(v[18], v[19]); u[10] = k_packs_epi64_avx2(v[20], v[21]); u[11] = k_packs_epi64_avx2(v[22], v[23]); u[12] = k_packs_epi64_avx2(v[24], v[25]); @@ -2459,16 +2734,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = k_packs_epi64_avx2(v[28], v[29]); u[15] = k_packs_epi64_avx2(v[30], v[31]); - v[ 0] = _mm256_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm256_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm256_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm256_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm256_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm256_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm256_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm256_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm256_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm256_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm256_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm256_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm256_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm256_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm256_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm256_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm256_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm256_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm256_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm256_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm256_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm256_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm256_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2476,16 +2751,16 @@ void FDCT32x32_2D_AVX2(const int16_t *input, v[14] = _mm256_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm256_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm256_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm256_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm256_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm256_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm256_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm256_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm256_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm256_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm256_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm256_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm256_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm256_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm256_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm256_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm256_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm256_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm256_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm256_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm256_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm256_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm256_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm256_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm256_srai_epi32(v[12], DCT_CONST_BITS); @@ -2493,33 +2768,33 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm256_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm256_cmpgt_epi32(kZero,u[ 0]); - v[ 1] = _mm256_cmpgt_epi32(kZero,u[ 1]); - v[ 2] = _mm256_cmpgt_epi32(kZero,u[ 2]); - v[ 3] = _mm256_cmpgt_epi32(kZero,u[ 3]); - v[ 4] = _mm256_cmpgt_epi32(kZero,u[ 4]); - v[ 5] = _mm256_cmpgt_epi32(kZero,u[ 5]); - v[ 6] = _mm256_cmpgt_epi32(kZero,u[ 6]); - v[ 7] = _mm256_cmpgt_epi32(kZero,u[ 7]); - v[ 8] = _mm256_cmpgt_epi32(kZero,u[ 8]); - v[ 9] = _mm256_cmpgt_epi32(kZero,u[ 9]); - v[10] = _mm256_cmpgt_epi32(kZero,u[10]); - v[11] = _mm256_cmpgt_epi32(kZero,u[11]); - v[12] = _mm256_cmpgt_epi32(kZero,u[12]); - v[13] = _mm256_cmpgt_epi32(kZero,u[13]); - v[14] = _mm256_cmpgt_epi32(kZero,u[14]); - v[15] = _mm256_cmpgt_epi32(kZero,u[15]); + v[0] = _mm256_cmpgt_epi32(kZero, u[0]); + v[1] = _mm256_cmpgt_epi32(kZero, u[1]); + v[2] = _mm256_cmpgt_epi32(kZero, u[2]); + v[3] = _mm256_cmpgt_epi32(kZero, u[3]); + v[4] = _mm256_cmpgt_epi32(kZero, u[4]); + v[5] = _mm256_cmpgt_epi32(kZero, u[5]); + v[6] = _mm256_cmpgt_epi32(kZero, u[6]); + v[7] = _mm256_cmpgt_epi32(kZero, u[7]); + v[8] = _mm256_cmpgt_epi32(kZero, u[8]); + v[9] = _mm256_cmpgt_epi32(kZero, u[9]); + v[10] = _mm256_cmpgt_epi32(kZero, u[10]); + v[11] = _mm256_cmpgt_epi32(kZero, u[11]); + v[12] = _mm256_cmpgt_epi32(kZero, u[12]); + v[13] = _mm256_cmpgt_epi32(kZero, u[13]); + v[14] = _mm256_cmpgt_epi32(kZero, u[14]); + v[15] = _mm256_cmpgt_epi32(kZero, u[15]); - u[ 0] = _mm256_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm256_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm256_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm256_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm256_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm256_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm256_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm256_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm256_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm256_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm256_sub_epi32(u[0], v[0]); + u[1] = _mm256_sub_epi32(u[1], v[1]); + u[2] = _mm256_sub_epi32(u[2], v[2]); + u[3] = _mm256_sub_epi32(u[3], v[3]); + u[4] = _mm256_sub_epi32(u[4], v[4]); + u[5] = _mm256_sub_epi32(u[5], v[5]); + u[6] = _mm256_sub_epi32(u[6], v[6]); + u[7] = _mm256_sub_epi32(u[7], v[7]); + u[8] = _mm256_sub_epi32(u[8], v[8]); + u[9] = _mm256_sub_epi32(u[9], v[9]); u[10] = _mm256_sub_epi32(u[10], v[10]); u[11] = _mm256_sub_epi32(u[11], v[11]); u[12] = _mm256_sub_epi32(u[12], v[12]); @@ -2561,11 +2836,11 @@ void FDCT32x32_2D_AVX2(const int16_t *input, u[14] = _mm256_srai_epi32(v[14], 2); u[15] = _mm256_srai_epi32(v[15], 2); - out[ 5] = _mm256_packs_epi32(u[0], u[1]); + out[5] = _mm256_packs_epi32(u[0], u[1]); out[21] = _mm256_packs_epi32(u[2], u[3]); out[13] = _mm256_packs_epi32(u[4], u[5]); out[29] = _mm256_packs_epi32(u[6], u[7]); - out[ 3] = _mm256_packs_epi32(u[8], u[9]); + out[3] = _mm256_packs_epi32(u[8], u[9]); out[19] = _mm256_packs_epi32(u[10], u[11]); out[11] = _mm256_packs_epi32(u[12], u[13]); out[27] = _mm256_packs_epi32(u[14], u[15]); @@ -2575,13 +2850,13 @@ void FDCT32x32_2D_AVX2(const int16_t *input, // Transpose the results, do it as four 8x8 transposes. { int transpose_block; - int16_t *output_currStep,*output_nextStep; - if (0 == pass){ - output_currStep = &intermediate[column_start * 32]; - output_nextStep = &intermediate[(column_start + 8) * 32]; - } else{ - output_currStep = &output_org[column_start * 32]; - output_nextStep = &output_org[(column_start + 8) * 32]; + int16_t *output_currStep, *output_nextStep; + if (0 == pass) { + output_currStep = &intermediate[column_start * 32]; + output_nextStep = &intermediate[(column_start + 8) * 32]; + } else { + output_currStep = &output_org[column_start * 32]; + output_nextStep = &output_org[(column_start + 8) * 32]; } for (transpose_block = 0; transpose_block < 4; ++transpose_block) { __m256i *this_out = &out[8 * transpose_block]; @@ -2684,23 +2959,39 @@ void FDCT32x32_2D_AVX2(const int16_t *input, } // Note: even though all these stores are aligned, using the aligned // intrinsic make the code slightly slower. - _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), _mm256_castsi256_si128(tr2_0)); - _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), _mm256_castsi256_si128(tr2_1)); - _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), _mm256_castsi256_si128(tr2_2)); - _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), _mm256_castsi256_si128(tr2_3)); - _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), _mm256_castsi256_si128(tr2_4)); - _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), _mm256_castsi256_si128(tr2_5)); - _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), _mm256_castsi256_si128(tr2_6)); - _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), _mm256_castsi256_si128(tr2_7)); + _mm_storeu_si128((__m128i *)(output_currStep + 0 * 32), + _mm256_castsi256_si128(tr2_0)); + _mm_storeu_si128((__m128i *)(output_currStep + 1 * 32), + _mm256_castsi256_si128(tr2_1)); + _mm_storeu_si128((__m128i *)(output_currStep + 2 * 32), + _mm256_castsi256_si128(tr2_2)); + _mm_storeu_si128((__m128i *)(output_currStep + 3 * 32), + _mm256_castsi256_si128(tr2_3)); + _mm_storeu_si128((__m128i *)(output_currStep + 4 * 32), + _mm256_castsi256_si128(tr2_4)); + _mm_storeu_si128((__m128i *)(output_currStep + 5 * 32), + _mm256_castsi256_si128(tr2_5)); + _mm_storeu_si128((__m128i *)(output_currStep + 6 * 32), + _mm256_castsi256_si128(tr2_6)); + _mm_storeu_si128((__m128i *)(output_currStep + 7 * 32), + _mm256_castsi256_si128(tr2_7)); - _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), _mm256_extractf128_si256(tr2_0,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), _mm256_extractf128_si256(tr2_1,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), _mm256_extractf128_si256(tr2_2,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), _mm256_extractf128_si256(tr2_3,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), _mm256_extractf128_si256(tr2_4,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), _mm256_extractf128_si256(tr2_5,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), _mm256_extractf128_si256(tr2_6,1)); - _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), _mm256_extractf128_si256(tr2_7,1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 0 * 32), + _mm256_extractf128_si256(tr2_0, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 1 * 32), + _mm256_extractf128_si256(tr2_1, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 2 * 32), + _mm256_extractf128_si256(tr2_2, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 3 * 32), + _mm256_extractf128_si256(tr2_3, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 4 * 32), + _mm256_extractf128_si256(tr2_4, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 5 * 32), + _mm256_extractf128_si256(tr2_5, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 6 * 32), + _mm256_extractf128_si256(tr2_6, 1)); + _mm_storeu_si128((__m128i *)(output_nextStep + 7 * 32), + _mm256_extractf128_si256(tr2_7, 1)); // Process next 8x8 output_currStep += 8; output_nextStep += 8; diff --git a/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h b/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h index b85ae103fa..3744333909 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h +++ b/libs/libvpx/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h @@ -22,42 +22,37 @@ #define SUB_EPI16 _mm_subs_epi16 #if FDCT32x32_HIGH_PRECISION void vpx_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) { - int i, j; - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = intermediate[j * 32 + i]; - vpx_fdct32(temp_in, temp_out, 0); - for (j = 0; j < 32; ++j) - out[j + i * 32] = - (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); - } + int i, j; + for (i = 0; i < 32; ++i) { + tran_high_t temp_in[32], temp_out[32]; + for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i]; + vpx_fdct32(temp_in, temp_out, 0); + for (j = 0; j < 32; ++j) + out[j + i * 32] = + (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); + } } - #define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c - #define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c +#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c +#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c #else void vpx_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) { - int i, j; - for (i = 0; i < 32; ++i) { - tran_high_t temp_in[32], temp_out[32]; - for (j = 0; j < 32; ++j) - temp_in[j] = intermediate[j * 32 + i]; - vpx_fdct32(temp_in, temp_out, 1); - for (j = 0; j < 32; ++j) - out[j + i * 32] = (tran_low_t)temp_out[j]; - } + int i, j; + for (i = 0; i < 32; ++i) { + tran_high_t temp_in[32], temp_out[32]; + for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i]; + vpx_fdct32(temp_in, temp_out, 1); + for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j]; + } } - #define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c - #define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c +#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c +#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c #endif // FDCT32x32_HIGH_PRECISION #else #define ADD_EPI16 _mm_add_epi16 #define SUB_EPI16 _mm_sub_epi16 #endif // DCT_HIGH_BIT_DEPTH - -void FDCT32x32_2D(const int16_t *input, - tran_low_t *output_org, int stride) { +void FDCT32x32_2D(const int16_t *input, tran_low_t *output_org, int stride) { // Calculate pre-multiplied strides const int str1 = stride; const int str2 = 2 * stride; @@ -70,42 +65,42 @@ void FDCT32x32_2D(const int16_t *input, // by constructing the 32 bit constant corresponding to that pair. const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); - const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); + const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); - const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64); - const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); - const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); - const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); - const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); + const __m128i k__cospi_p24_p08 = pair_set_epi16(+cospi_24_64, cospi_8_64); + const __m128i k__cospi_p12_p20 = pair_set_epi16(+cospi_12_64, cospi_20_64); + const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); + const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); + const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64); const __m128i k__cospi_m12_m20 = pair_set_epi16(-cospi_12_64, -cospi_20_64); - const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64); - const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64); - const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64); - const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64); - const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); - const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); - const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); - const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); - const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64); - const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64); - const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64); - const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64); - const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64); - const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64); - const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64); - const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64); - const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64); - const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64); - const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64); - const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64); - const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64); - const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64); - const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64); - const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64); + const __m128i k__cospi_p30_p02 = pair_set_epi16(+cospi_30_64, cospi_2_64); + const __m128i k__cospi_p14_p18 = pair_set_epi16(+cospi_14_64, cospi_18_64); + const __m128i k__cospi_p22_p10 = pair_set_epi16(+cospi_22_64, cospi_10_64); + const __m128i k__cospi_p06_p26 = pair_set_epi16(+cospi_6_64, cospi_26_64); + const __m128i k__cospi_m26_p06 = pair_set_epi16(-cospi_26_64, cospi_6_64); + const __m128i k__cospi_m10_p22 = pair_set_epi16(-cospi_10_64, cospi_22_64); + const __m128i k__cospi_m18_p14 = pair_set_epi16(-cospi_18_64, cospi_14_64); + const __m128i k__cospi_m02_p30 = pair_set_epi16(-cospi_2_64, cospi_30_64); + const __m128i k__cospi_p31_p01 = pair_set_epi16(+cospi_31_64, cospi_1_64); + const __m128i k__cospi_p15_p17 = pair_set_epi16(+cospi_15_64, cospi_17_64); + const __m128i k__cospi_p23_p09 = pair_set_epi16(+cospi_23_64, cospi_9_64); + const __m128i k__cospi_p07_p25 = pair_set_epi16(+cospi_7_64, cospi_25_64); + const __m128i k__cospi_m25_p07 = pair_set_epi16(-cospi_25_64, cospi_7_64); + const __m128i k__cospi_m09_p23 = pair_set_epi16(-cospi_9_64, cospi_23_64); + const __m128i k__cospi_m17_p15 = pair_set_epi16(-cospi_17_64, cospi_15_64); + const __m128i k__cospi_m01_p31 = pair_set_epi16(-cospi_1_64, cospi_31_64); + const __m128i k__cospi_p27_p05 = pair_set_epi16(+cospi_27_64, cospi_5_64); + const __m128i k__cospi_p11_p21 = pair_set_epi16(+cospi_11_64, cospi_21_64); + const __m128i k__cospi_p19_p13 = pair_set_epi16(+cospi_19_64, cospi_13_64); + const __m128i k__cospi_p03_p29 = pair_set_epi16(+cospi_3_64, cospi_29_64); + const __m128i k__cospi_m29_p03 = pair_set_epi16(-cospi_29_64, cospi_3_64); + const __m128i k__cospi_m13_p19 = pair_set_epi16(-cospi_13_64, cospi_19_64); + const __m128i k__cospi_m21_p11 = pair_set_epi16(-cospi_21_64, cospi_11_64); + const __m128i k__cospi_m05_p27 = pair_set_epi16(-cospi_5_64, cospi_27_64); const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i kZero = _mm_set1_epi16(0); - const __m128i kOne = _mm_set1_epi16(1); + const __m128i kOne = _mm_set1_epi16(1); // Do the two transform/transpose passes int pass; #if DCT_HIGH_BIT_DEPTH @@ -123,125 +118,125 @@ void FDCT32x32_2D(const int16_t *input, // Note: even though all the loads below are aligned, using the aligned // intrinsic make the code slightly slower. if (0 == pass) { - const int16_t *in = &input[column_start]; + const int16_t *in = &input[column_start]; // step1[i] = (in[ 0 * stride] + in[(32 - 1) * stride]) << 2; // Note: the next four blocks could be in a loop. That would help the // instruction cache but is actually slower. { - const int16_t *ina = in + 0 * str1; - const int16_t *inb = in + 31 * str1; - __m128i *step1a = &step1[ 0]; + const int16_t *ina = in + 0 * str1; + const int16_t *inb = in + 31 * str1; + __m128i *step1a = &step1[0]; __m128i *step1b = &step1[31]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); + const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); + const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); + const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); + const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); + const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); + const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); + const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); + const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); + step1a[0] = _mm_add_epi16(ina0, inb0); + step1a[1] = _mm_add_epi16(ina1, inb1); + step1a[2] = _mm_add_epi16(ina2, inb2); + step1a[3] = _mm_add_epi16(ina3, inb3); step1b[-3] = _mm_sub_epi16(ina3, inb3); step1b[-2] = _mm_sub_epi16(ina2, inb2); step1b[-1] = _mm_sub_epi16(ina1, inb1); step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm_slli_epi16(step1a[0], 2); + step1a[1] = _mm_slli_epi16(step1a[1], 2); + step1a[2] = _mm_slli_epi16(step1a[2], 2); + step1a[3] = _mm_slli_epi16(step1a[3], 2); step1b[-3] = _mm_slli_epi16(step1b[-3], 2); step1b[-2] = _mm_slli_epi16(step1b[-2], 2); step1b[-1] = _mm_slli_epi16(step1b[-1], 2); step1b[-0] = _mm_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 4 * str1; - const int16_t *inb = in + 27 * str1; - __m128i *step1a = &step1[ 4]; + const int16_t *ina = in + 4 * str1; + const int16_t *inb = in + 27 * str1; + __m128i *step1a = &step1[4]; __m128i *step1b = &step1[27]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); + const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); + const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); + const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); + const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); + const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); + const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); + const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); + const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); + step1a[0] = _mm_add_epi16(ina0, inb0); + step1a[1] = _mm_add_epi16(ina1, inb1); + step1a[2] = _mm_add_epi16(ina2, inb2); + step1a[3] = _mm_add_epi16(ina3, inb3); step1b[-3] = _mm_sub_epi16(ina3, inb3); step1b[-2] = _mm_sub_epi16(ina2, inb2); step1b[-1] = _mm_sub_epi16(ina1, inb1); step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm_slli_epi16(step1a[0], 2); + step1a[1] = _mm_slli_epi16(step1a[1], 2); + step1a[2] = _mm_slli_epi16(step1a[2], 2); + step1a[3] = _mm_slli_epi16(step1a[3], 2); step1b[-3] = _mm_slli_epi16(step1b[-3], 2); step1b[-2] = _mm_slli_epi16(step1b[-2], 2); step1b[-1] = _mm_slli_epi16(step1b[-1], 2); step1b[-0] = _mm_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 8 * str1; - const int16_t *inb = in + 23 * str1; - __m128i *step1a = &step1[ 8]; + const int16_t *ina = in + 8 * str1; + const int16_t *inb = in + 23 * str1; + __m128i *step1a = &step1[8]; __m128i *step1b = &step1[23]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); + const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); + const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); + const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); + const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); + const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); + const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); + const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); + const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); + step1a[0] = _mm_add_epi16(ina0, inb0); + step1a[1] = _mm_add_epi16(ina1, inb1); + step1a[2] = _mm_add_epi16(ina2, inb2); + step1a[3] = _mm_add_epi16(ina3, inb3); step1b[-3] = _mm_sub_epi16(ina3, inb3); step1b[-2] = _mm_sub_epi16(ina2, inb2); step1b[-1] = _mm_sub_epi16(ina1, inb1); step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm_slli_epi16(step1a[0], 2); + step1a[1] = _mm_slli_epi16(step1a[1], 2); + step1a[2] = _mm_slli_epi16(step1a[2], 2); + step1a[3] = _mm_slli_epi16(step1a[3], 2); step1b[-3] = _mm_slli_epi16(step1b[-3], 2); step1b[-2] = _mm_slli_epi16(step1b[-2], 2); step1b[-1] = _mm_slli_epi16(step1b[-1], 2); step1b[-0] = _mm_slli_epi16(step1b[-0], 2); } { - const int16_t *ina = in + 12 * str1; - const int16_t *inb = in + 19 * str1; + const int16_t *ina = in + 12 * str1; + const int16_t *inb = in + 19 * str1; __m128i *step1a = &step1[12]; __m128i *step1b = &step1[19]; - const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); - const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); - const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); - const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); - const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); - const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); - const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); - const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); - step1a[ 0] = _mm_add_epi16(ina0, inb0); - step1a[ 1] = _mm_add_epi16(ina1, inb1); - step1a[ 2] = _mm_add_epi16(ina2, inb2); - step1a[ 3] = _mm_add_epi16(ina3, inb3); + const __m128i ina0 = _mm_loadu_si128((const __m128i *)(ina)); + const __m128i ina1 = _mm_loadu_si128((const __m128i *)(ina + str1)); + const __m128i ina2 = _mm_loadu_si128((const __m128i *)(ina + str2)); + const __m128i ina3 = _mm_loadu_si128((const __m128i *)(ina + str3)); + const __m128i inb3 = _mm_loadu_si128((const __m128i *)(inb - str3)); + const __m128i inb2 = _mm_loadu_si128((const __m128i *)(inb - str2)); + const __m128i inb1 = _mm_loadu_si128((const __m128i *)(inb - str1)); + const __m128i inb0 = _mm_loadu_si128((const __m128i *)(inb)); + step1a[0] = _mm_add_epi16(ina0, inb0); + step1a[1] = _mm_add_epi16(ina1, inb1); + step1a[2] = _mm_add_epi16(ina2, inb2); + step1a[3] = _mm_add_epi16(ina3, inb3); step1b[-3] = _mm_sub_epi16(ina3, inb3); step1b[-2] = _mm_sub_epi16(ina2, inb2); step1b[-1] = _mm_sub_epi16(ina1, inb1); step1b[-0] = _mm_sub_epi16(ina0, inb0); - step1a[ 0] = _mm_slli_epi16(step1a[ 0], 2); - step1a[ 1] = _mm_slli_epi16(step1a[ 1], 2); - step1a[ 2] = _mm_slli_epi16(step1a[ 2], 2); - step1a[ 3] = _mm_slli_epi16(step1a[ 3], 2); + step1a[0] = _mm_slli_epi16(step1a[0], 2); + step1a[1] = _mm_slli_epi16(step1a[1], 2); + step1a[2] = _mm_slli_epi16(step1a[2], 2); + step1a[3] = _mm_slli_epi16(step1a[3], 2); step1b[-3] = _mm_slli_epi16(step1b[-3], 2); step1b[-2] = _mm_slli_epi16(step1b[-2], 2); step1b[-1] = _mm_slli_epi16(step1b[-1], 2); @@ -256,14 +251,14 @@ void FDCT32x32_2D(const int16_t *input, // Note: the next four blocks could be in a loop. That would help the // instruction cache but is actually slower. { - __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32)); - __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32)); - __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32)); - __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32)); - __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32)); - __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32)); - __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32)); - __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32)); + __m128i in00 = _mm_loadu_si128((const __m128i *)(in + 0 * 32)); + __m128i in01 = _mm_loadu_si128((const __m128i *)(in + 1 * 32)); + __m128i in02 = _mm_loadu_si128((const __m128i *)(in + 2 * 32)); + __m128i in03 = _mm_loadu_si128((const __m128i *)(in + 3 * 32)); + __m128i in28 = _mm_loadu_si128((const __m128i *)(in + 28 * 32)); + __m128i in29 = _mm_loadu_si128((const __m128i *)(in + 29 * 32)); + __m128i in30 = _mm_loadu_si128((const __m128i *)(in + 30 * 32)); + __m128i in31 = _mm_loadu_si128((const __m128i *)(in + 31 * 32)); step1[0] = ADD_EPI16(in00, in31); step1[1] = ADD_EPI16(in01, in30); step1[2] = ADD_EPI16(in02, in29); @@ -283,14 +278,14 @@ void FDCT32x32_2D(const int16_t *input, #endif // DCT_HIGH_BIT_DEPTH } { - __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32)); - __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32)); - __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32)); - __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32)); - __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32)); - __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32)); - __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32)); - __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32)); + __m128i in04 = _mm_loadu_si128((const __m128i *)(in + 4 * 32)); + __m128i in05 = _mm_loadu_si128((const __m128i *)(in + 5 * 32)); + __m128i in06 = _mm_loadu_si128((const __m128i *)(in + 6 * 32)); + __m128i in07 = _mm_loadu_si128((const __m128i *)(in + 7 * 32)); + __m128i in24 = _mm_loadu_si128((const __m128i *)(in + 24 * 32)); + __m128i in25 = _mm_loadu_si128((const __m128i *)(in + 25 * 32)); + __m128i in26 = _mm_loadu_si128((const __m128i *)(in + 26 * 32)); + __m128i in27 = _mm_loadu_si128((const __m128i *)(in + 27 * 32)); step1[4] = ADD_EPI16(in04, in27); step1[5] = ADD_EPI16(in05, in26); step1[6] = ADD_EPI16(in06, in25); @@ -310,14 +305,14 @@ void FDCT32x32_2D(const int16_t *input, #endif // DCT_HIGH_BIT_DEPTH } { - __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32)); - __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32)); - __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); - __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); - __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32)); - __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32)); - __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32)); - __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32)); + __m128i in08 = _mm_loadu_si128((const __m128i *)(in + 8 * 32)); + __m128i in09 = _mm_loadu_si128((const __m128i *)(in + 9 * 32)); + __m128i in10 = _mm_loadu_si128((const __m128i *)(in + 10 * 32)); + __m128i in11 = _mm_loadu_si128((const __m128i *)(in + 11 * 32)); + __m128i in20 = _mm_loadu_si128((const __m128i *)(in + 20 * 32)); + __m128i in21 = _mm_loadu_si128((const __m128i *)(in + 21 * 32)); + __m128i in22 = _mm_loadu_si128((const __m128i *)(in + 22 * 32)); + __m128i in23 = _mm_loadu_si128((const __m128i *)(in + 23 * 32)); step1[8] = ADD_EPI16(in08, in23); step1[9] = ADD_EPI16(in09, in22); step1[10] = ADD_EPI16(in10, in21); @@ -337,14 +332,14 @@ void FDCT32x32_2D(const int16_t *input, #endif // DCT_HIGH_BIT_DEPTH } { - __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32)); - __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32)); - __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32)); - __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32)); - __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32)); - __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32)); - __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32)); - __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32)); + __m128i in12 = _mm_loadu_si128((const __m128i *)(in + 12 * 32)); + __m128i in13 = _mm_loadu_si128((const __m128i *)(in + 13 * 32)); + __m128i in14 = _mm_loadu_si128((const __m128i *)(in + 14 * 32)); + __m128i in15 = _mm_loadu_si128((const __m128i *)(in + 15 * 32)); + __m128i in16 = _mm_loadu_si128((const __m128i *)(in + 16 * 32)); + __m128i in17 = _mm_loadu_si128((const __m128i *)(in + 17 * 32)); + __m128i in18 = _mm_loadu_si128((const __m128i *)(in + 18 * 32)); + __m128i in19 = _mm_loadu_si128((const __m128i *)(in + 19 * 32)); step1[12] = ADD_EPI16(in12, in19); step1[13] = ADD_EPI16(in13, in18); step1[14] = ADD_EPI16(in14, in17); @@ -372,10 +367,10 @@ void FDCT32x32_2D(const int16_t *input, step2[3] = ADD_EPI16(step1[3], step1[12]); step2[4] = ADD_EPI16(step1[4], step1[11]); step2[5] = ADD_EPI16(step1[5], step1[10]); - step2[6] = ADD_EPI16(step1[6], step1[ 9]); - step2[7] = ADD_EPI16(step1[7], step1[ 8]); - step2[8] = SUB_EPI16(step1[7], step1[ 8]); - step2[9] = SUB_EPI16(step1[6], step1[ 9]); + step2[6] = ADD_EPI16(step1[6], step1[9]); + step2[7] = ADD_EPI16(step1[7], step1[8]); + step2[8] = SUB_EPI16(step1[7], step1[8]); + step2[9] = SUB_EPI16(step1[6], step1[9]); step2[10] = SUB_EPI16(step1[5], step1[10]); step2[11] = SUB_EPI16(step1[4], step1[11]); step2[12] = SUB_EPI16(step1[3], step1[12]); @@ -384,9 +379,8 @@ void FDCT32x32_2D(const int16_t *input, step2[15] = SUB_EPI16(step1[0], step1[15]); #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x16( - &step2[0], &step2[1], &step2[2], &step2[3], - &step2[4], &step2[5], &step2[6], &step2[7], - &step2[8], &step2[9], &step2[10], &step2[11], + &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5], + &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11], &step2[12], &step2[13], &step2[14], &step2[15]); if (overflow) { if (pass == 0) @@ -482,16 +476,16 @@ void FDCT32x32_2D(const int16_t *input, // dump the magnitude by half, hence the intermediate values are within // the range of 16 bits. if (1 == pass) { - __m128i s3_00_0 = _mm_cmplt_epi16(step2[ 0], kZero); - __m128i s3_01_0 = _mm_cmplt_epi16(step2[ 1], kZero); - __m128i s3_02_0 = _mm_cmplt_epi16(step2[ 2], kZero); - __m128i s3_03_0 = _mm_cmplt_epi16(step2[ 3], kZero); - __m128i s3_04_0 = _mm_cmplt_epi16(step2[ 4], kZero); - __m128i s3_05_0 = _mm_cmplt_epi16(step2[ 5], kZero); - __m128i s3_06_0 = _mm_cmplt_epi16(step2[ 6], kZero); - __m128i s3_07_0 = _mm_cmplt_epi16(step2[ 7], kZero); - __m128i s2_08_0 = _mm_cmplt_epi16(step2[ 8], kZero); - __m128i s2_09_0 = _mm_cmplt_epi16(step2[ 9], kZero); + __m128i s3_00_0 = _mm_cmplt_epi16(step2[0], kZero); + __m128i s3_01_0 = _mm_cmplt_epi16(step2[1], kZero); + __m128i s3_02_0 = _mm_cmplt_epi16(step2[2], kZero); + __m128i s3_03_0 = _mm_cmplt_epi16(step2[3], kZero); + __m128i s3_04_0 = _mm_cmplt_epi16(step2[4], kZero); + __m128i s3_05_0 = _mm_cmplt_epi16(step2[5], kZero); + __m128i s3_06_0 = _mm_cmplt_epi16(step2[6], kZero); + __m128i s3_07_0 = _mm_cmplt_epi16(step2[7], kZero); + __m128i s2_08_0 = _mm_cmplt_epi16(step2[8], kZero); + __m128i s2_09_0 = _mm_cmplt_epi16(step2[9], kZero); __m128i s3_10_0 = _mm_cmplt_epi16(step2[10], kZero); __m128i s3_11_0 = _mm_cmplt_epi16(step2[11], kZero); __m128i s3_12_0 = _mm_cmplt_epi16(step2[12], kZero); @@ -515,16 +509,16 @@ void FDCT32x32_2D(const int16_t *input, __m128i s3_30_0 = _mm_cmplt_epi16(step1[30], kZero); __m128i s3_31_0 = _mm_cmplt_epi16(step1[31], kZero); - step2[0] = SUB_EPI16(step2[ 0], s3_00_0); - step2[1] = SUB_EPI16(step2[ 1], s3_01_0); - step2[2] = SUB_EPI16(step2[ 2], s3_02_0); - step2[3] = SUB_EPI16(step2[ 3], s3_03_0); - step2[4] = SUB_EPI16(step2[ 4], s3_04_0); - step2[5] = SUB_EPI16(step2[ 5], s3_05_0); - step2[6] = SUB_EPI16(step2[ 6], s3_06_0); - step2[7] = SUB_EPI16(step2[ 7], s3_07_0); - step2[8] = SUB_EPI16(step2[ 8], s2_08_0); - step2[9] = SUB_EPI16(step2[ 9], s2_09_0); + step2[0] = SUB_EPI16(step2[0], s3_00_0); + step2[1] = SUB_EPI16(step2[1], s3_01_0); + step2[2] = SUB_EPI16(step2[2], s3_02_0); + step2[3] = SUB_EPI16(step2[3], s3_03_0); + step2[4] = SUB_EPI16(step2[4], s3_04_0); + step2[5] = SUB_EPI16(step2[5], s3_05_0); + step2[6] = SUB_EPI16(step2[6], s3_06_0); + step2[7] = SUB_EPI16(step2[7], s3_07_0); + step2[8] = SUB_EPI16(step2[8], s2_08_0); + step2[9] = SUB_EPI16(step2[9], s2_09_0); step2[10] = SUB_EPI16(step2[10], s3_10_0); step2[11] = SUB_EPI16(step2[11], s3_11_0); step2[12] = SUB_EPI16(step2[12], s3_12_0); @@ -549,29 +543,27 @@ void FDCT32x32_2D(const int16_t *input, step1[31] = SUB_EPI16(step1[31], s3_31_0); #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x32( - &step2[0], &step2[1], &step2[2], &step2[3], - &step2[4], &step2[5], &step2[6], &step2[7], - &step2[8], &step2[9], &step2[10], &step2[11], - &step2[12], &step2[13], &step2[14], &step2[15], - &step1[16], &step1[17], &step1[18], &step1[19], - &step2[20], &step2[21], &step2[22], &step2[23], - &step2[24], &step2[25], &step2[26], &step2[27], - &step1[28], &step1[29], &step1[30], &step1[31]); + &step2[0], &step2[1], &step2[2], &step2[3], &step2[4], &step2[5], + &step2[6], &step2[7], &step2[8], &step2[9], &step2[10], &step2[11], + &step2[12], &step2[13], &step2[14], &step2[15], &step1[16], + &step1[17], &step1[18], &step1[19], &step2[20], &step2[21], + &step2[22], &step2[23], &step2[24], &step2[25], &step2[26], + &step2[27], &step1[28], &step1[29], &step1[30], &step1[31]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - step2[0] = _mm_add_epi16(step2[ 0], kOne); - step2[1] = _mm_add_epi16(step2[ 1], kOne); - step2[2] = _mm_add_epi16(step2[ 2], kOne); - step2[3] = _mm_add_epi16(step2[ 3], kOne); - step2[4] = _mm_add_epi16(step2[ 4], kOne); - step2[5] = _mm_add_epi16(step2[ 5], kOne); - step2[6] = _mm_add_epi16(step2[ 6], kOne); - step2[7] = _mm_add_epi16(step2[ 7], kOne); - step2[8] = _mm_add_epi16(step2[ 8], kOne); - step2[9] = _mm_add_epi16(step2[ 9], kOne); + step2[0] = _mm_add_epi16(step2[0], kOne); + step2[1] = _mm_add_epi16(step2[1], kOne); + step2[2] = _mm_add_epi16(step2[2], kOne); + step2[3] = _mm_add_epi16(step2[3], kOne); + step2[4] = _mm_add_epi16(step2[4], kOne); + step2[5] = _mm_add_epi16(step2[5], kOne); + step2[6] = _mm_add_epi16(step2[6], kOne); + step2[7] = _mm_add_epi16(step2[7], kOne); + step2[8] = _mm_add_epi16(step2[8], kOne); + step2[9] = _mm_add_epi16(step2[9], kOne); step2[10] = _mm_add_epi16(step2[10], kOne); step2[11] = _mm_add_epi16(step2[11], kOne); step2[12] = _mm_add_epi16(step2[12], kOne); @@ -595,16 +587,16 @@ void FDCT32x32_2D(const int16_t *input, step1[30] = _mm_add_epi16(step1[30], kOne); step1[31] = _mm_add_epi16(step1[31], kOne); - step2[0] = _mm_srai_epi16(step2[ 0], 2); - step2[1] = _mm_srai_epi16(step2[ 1], 2); - step2[2] = _mm_srai_epi16(step2[ 2], 2); - step2[3] = _mm_srai_epi16(step2[ 3], 2); - step2[4] = _mm_srai_epi16(step2[ 4], 2); - step2[5] = _mm_srai_epi16(step2[ 5], 2); - step2[6] = _mm_srai_epi16(step2[ 6], 2); - step2[7] = _mm_srai_epi16(step2[ 7], 2); - step2[8] = _mm_srai_epi16(step2[ 8], 2); - step2[9] = _mm_srai_epi16(step2[ 9], 2); + step2[0] = _mm_srai_epi16(step2[0], 2); + step2[1] = _mm_srai_epi16(step2[1], 2); + step2[2] = _mm_srai_epi16(step2[2], 2); + step2[3] = _mm_srai_epi16(step2[3], 2); + step2[4] = _mm_srai_epi16(step2[4], 2); + step2[5] = _mm_srai_epi16(step2[5], 2); + step2[6] = _mm_srai_epi16(step2[6], 2); + step2[7] = _mm_srai_epi16(step2[7], 2); + step2[8] = _mm_srai_epi16(step2[8], 2); + step2[9] = _mm_srai_epi16(step2[9], 2); step2[10] = _mm_srai_epi16(step2[10], 2); step2[11] = _mm_srai_epi16(step2[11], 2); step2[12] = _mm_srai_epi16(step2[12], 2); @@ -633,821 +625,884 @@ void FDCT32x32_2D(const int16_t *input, #if FDCT32x32_HIGH_PRECISION if (pass == 0) { #endif - // Stage 3 - { - step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]); - step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]); - step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]); - step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]); - step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]); - step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]); - step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]); - step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]); + // Stage 3 + { + step3[0] = ADD_EPI16(step2[(8 - 1)], step2[0]); + step3[1] = ADD_EPI16(step2[(8 - 2)], step2[1]); + step3[2] = ADD_EPI16(step2[(8 - 3)], step2[2]); + step3[3] = ADD_EPI16(step2[(8 - 4)], step2[3]); + step3[4] = SUB_EPI16(step2[(8 - 5)], step2[4]); + step3[5] = SUB_EPI16(step2[(8 - 6)], step2[5]); + step3[6] = SUB_EPI16(step2[(8 - 7)], step2[6]); + step3[7] = SUB_EPI16(step2[(8 - 8)], step2[7]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2], - &step3[3], &step3[4], &step3[5], - &step3[6], &step3[7]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x8(&step3[0], &step3[1], &step3[2], + &step3[3], &step3[4], &step3[5], + &step3[6], &step3[7]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); - const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); - const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); - const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); - const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); - const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); - const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); - const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); - const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); - const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); - const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); - const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); - const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); - const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); - const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); - const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); - const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); - const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); - const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); - const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); - const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); - const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); - const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); - const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); - const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); - const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); - const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); - // Combine - step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7); - step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7); - step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7); - step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7); + } + { + const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); + const __m128i s3_10_1 = _mm_unpackhi_epi16(step2[13], step2[10]); + const __m128i s3_11_0 = _mm_unpacklo_epi16(step2[12], step2[11]); + const __m128i s3_11_1 = _mm_unpackhi_epi16(step2[12], step2[11]); + const __m128i s3_10_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_m16); + const __m128i s3_10_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_m16); + const __m128i s3_11_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_m16); + const __m128i s3_11_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_m16); + const __m128i s3_12_2 = _mm_madd_epi16(s3_11_0, k__cospi_p16_p16); + const __m128i s3_12_3 = _mm_madd_epi16(s3_11_1, k__cospi_p16_p16); + const __m128i s3_13_2 = _mm_madd_epi16(s3_10_0, k__cospi_p16_p16); + const __m128i s3_13_3 = _mm_madd_epi16(s3_10_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m128i s3_10_4 = _mm_add_epi32(s3_10_2, k__DCT_CONST_ROUNDING); + const __m128i s3_10_5 = _mm_add_epi32(s3_10_3, k__DCT_CONST_ROUNDING); + const __m128i s3_11_4 = _mm_add_epi32(s3_11_2, k__DCT_CONST_ROUNDING); + const __m128i s3_11_5 = _mm_add_epi32(s3_11_3, k__DCT_CONST_ROUNDING); + const __m128i s3_12_4 = _mm_add_epi32(s3_12_2, k__DCT_CONST_ROUNDING); + const __m128i s3_12_5 = _mm_add_epi32(s3_12_3, k__DCT_CONST_ROUNDING); + const __m128i s3_13_4 = _mm_add_epi32(s3_13_2, k__DCT_CONST_ROUNDING); + const __m128i s3_13_5 = _mm_add_epi32(s3_13_3, k__DCT_CONST_ROUNDING); + const __m128i s3_10_6 = _mm_srai_epi32(s3_10_4, DCT_CONST_BITS); + const __m128i s3_10_7 = _mm_srai_epi32(s3_10_5, DCT_CONST_BITS); + const __m128i s3_11_6 = _mm_srai_epi32(s3_11_4, DCT_CONST_BITS); + const __m128i s3_11_7 = _mm_srai_epi32(s3_11_5, DCT_CONST_BITS); + const __m128i s3_12_6 = _mm_srai_epi32(s3_12_4, DCT_CONST_BITS); + const __m128i s3_12_7 = _mm_srai_epi32(s3_12_5, DCT_CONST_BITS); + const __m128i s3_13_6 = _mm_srai_epi32(s3_13_4, DCT_CONST_BITS); + const __m128i s3_13_7 = _mm_srai_epi32(s3_13_5, DCT_CONST_BITS); + // Combine + step3[10] = _mm_packs_epi32(s3_10_6, s3_10_7); + step3[11] = _mm_packs_epi32(s3_11_6, s3_11_7); + step3[12] = _mm_packs_epi32(s3_12_6, s3_12_7); + step3[13] = _mm_packs_epi32(s3_13_6, s3_13_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step3[10], &step3[11], - &step3[12], &step3[13]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x4(&step3[10], &step3[11], &step3[12], + &step3[13]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - step3[16] = ADD_EPI16(step2[23], step1[16]); - step3[17] = ADD_EPI16(step2[22], step1[17]); - step3[18] = ADD_EPI16(step2[21], step1[18]); - step3[19] = ADD_EPI16(step2[20], step1[19]); - step3[20] = SUB_EPI16(step1[19], step2[20]); - step3[21] = SUB_EPI16(step1[18], step2[21]); - step3[22] = SUB_EPI16(step1[17], step2[22]); - step3[23] = SUB_EPI16(step1[16], step2[23]); - step3[24] = SUB_EPI16(step1[31], step2[24]); - step3[25] = SUB_EPI16(step1[30], step2[25]); - step3[26] = SUB_EPI16(step1[29], step2[26]); - step3[27] = SUB_EPI16(step1[28], step2[27]); - step3[28] = ADD_EPI16(step2[27], step1[28]); - step3[29] = ADD_EPI16(step2[26], step1[29]); - step3[30] = ADD_EPI16(step2[25], step1[30]); - step3[31] = ADD_EPI16(step2[24], step1[31]); + } + { + step3[16] = ADD_EPI16(step2[23], step1[16]); + step3[17] = ADD_EPI16(step2[22], step1[17]); + step3[18] = ADD_EPI16(step2[21], step1[18]); + step3[19] = ADD_EPI16(step2[20], step1[19]); + step3[20] = SUB_EPI16(step1[19], step2[20]); + step3[21] = SUB_EPI16(step1[18], step2[21]); + step3[22] = SUB_EPI16(step1[17], step2[22]); + step3[23] = SUB_EPI16(step1[16], step2[23]); + step3[24] = SUB_EPI16(step1[31], step2[24]); + step3[25] = SUB_EPI16(step1[30], step2[25]); + step3[26] = SUB_EPI16(step1[29], step2[26]); + step3[27] = SUB_EPI16(step1[28], step2[27]); + step3[28] = ADD_EPI16(step2[27], step1[28]); + step3[29] = ADD_EPI16(step2[26], step1[29]); + step3[30] = ADD_EPI16(step2[25], step1[30]); + step3[31] = ADD_EPI16(step2[24], step1[31]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step3[16], &step3[17], &step3[18], &step3[19], - &step3[20], &step3[21], &step3[22], &step3[23], - &step3[24], &step3[25], &step3[26], &step3[27], - &step3[28], &step3[29], &step3[30], &step3[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x16( + &step3[16], &step3[17], &step3[18], &step3[19], &step3[20], + &step3[21], &step3[22], &step3[23], &step3[24], &step3[25], + &step3[26], &step3[27], &step3[28], &step3[29], &step3[30], + &step3[31]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } + } - // Stage 4 - { - step1[0] = ADD_EPI16(step3[ 3], step3[ 0]); - step1[1] = ADD_EPI16(step3[ 2], step3[ 1]); - step1[2] = SUB_EPI16(step3[ 1], step3[ 2]); - step1[3] = SUB_EPI16(step3[ 0], step3[ 3]); - step1[8] = ADD_EPI16(step3[11], step2[ 8]); - step1[9] = ADD_EPI16(step3[10], step2[ 9]); - step1[10] = SUB_EPI16(step2[ 9], step3[10]); - step1[11] = SUB_EPI16(step2[ 8], step3[11]); - step1[12] = SUB_EPI16(step2[15], step3[12]); - step1[13] = SUB_EPI16(step2[14], step3[13]); - step1[14] = ADD_EPI16(step3[13], step2[14]); - step1[15] = ADD_EPI16(step3[12], step2[15]); + // Stage 4 + { + step1[0] = ADD_EPI16(step3[3], step3[0]); + step1[1] = ADD_EPI16(step3[2], step3[1]); + step1[2] = SUB_EPI16(step3[1], step3[2]); + step1[3] = SUB_EPI16(step3[0], step3[3]); + step1[8] = ADD_EPI16(step3[11], step2[8]); + step1[9] = ADD_EPI16(step3[10], step2[9]); + step1[10] = SUB_EPI16(step2[9], step3[10]); + step1[11] = SUB_EPI16(step2[8], step3[11]); + step1[12] = SUB_EPI16(step2[15], step3[12]); + step1[13] = SUB_EPI16(step2[14], step3[13]); + step1[14] = ADD_EPI16(step3[13], step2[14]); + step1[15] = ADD_EPI16(step3[12], step2[15]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step1[0], &step1[1], &step1[2], &step1[3], - &step1[4], &step1[5], &step1[6], &step1[7], - &step1[8], &step1[9], &step1[10], &step1[11], - &step1[12], &step1[13], &step1[14], &step1[15]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x16( + &step1[0], &step1[1], &step1[2], &step1[3], &step1[4], &step1[5], + &step1[6], &step1[7], &step1[8], &step1[9], &step1[10], + &step1[11], &step1[12], &step1[13], &step1[14], &step1[15]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); - const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); - const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); - const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); - const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); - const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); - // dct_const_round_shift - const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); - const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); - const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); - const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); - const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); - const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); - const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); - const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); - // Combine - step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7); - step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7); + } + { + const __m128i s1_05_0 = _mm_unpacklo_epi16(step3[6], step3[5]); + const __m128i s1_05_1 = _mm_unpackhi_epi16(step3[6], step3[5]); + const __m128i s1_05_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_m16); + const __m128i s1_05_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_m16); + const __m128i s1_06_2 = _mm_madd_epi16(s1_05_0, k__cospi_p16_p16); + const __m128i s1_06_3 = _mm_madd_epi16(s1_05_1, k__cospi_p16_p16); + // dct_const_round_shift + const __m128i s1_05_4 = _mm_add_epi32(s1_05_2, k__DCT_CONST_ROUNDING); + const __m128i s1_05_5 = _mm_add_epi32(s1_05_3, k__DCT_CONST_ROUNDING); + const __m128i s1_06_4 = _mm_add_epi32(s1_06_2, k__DCT_CONST_ROUNDING); + const __m128i s1_06_5 = _mm_add_epi32(s1_06_3, k__DCT_CONST_ROUNDING); + const __m128i s1_05_6 = _mm_srai_epi32(s1_05_4, DCT_CONST_BITS); + const __m128i s1_05_7 = _mm_srai_epi32(s1_05_5, DCT_CONST_BITS); + const __m128i s1_06_6 = _mm_srai_epi32(s1_06_4, DCT_CONST_BITS); + const __m128i s1_06_7 = _mm_srai_epi32(s1_06_5, DCT_CONST_BITS); + // Combine + step1[5] = _mm_packs_epi32(s1_05_6, s1_05_7); + step1[6] = _mm_packs_epi32(s1_06_6, s1_06_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x2(&step1[5], &step1[6]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x2(&step1[5], &step1[6]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); - const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); - const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); - const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); - const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); - const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); - const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); - const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); - const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); - const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); - const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); - const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); - const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); - const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); - const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); - const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); - const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); - const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); - const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); - const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); - const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); - const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); - const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); - const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); - const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); - const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); - const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); - const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); - const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); - const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); - const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); - const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); - const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); - const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); - const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); - const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); - const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); - const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); - const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); - const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); - const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); - const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); - const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); - const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); - const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); - const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); - const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); - const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); - const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); - const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); - const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); - const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); - const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); - const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); - const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); - // Combine - step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7); - step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7); - step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7); - step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7); - step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7); - step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7); - step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7); - step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7); + } + { + const __m128i s1_18_0 = _mm_unpacklo_epi16(step3[18], step3[29]); + const __m128i s1_18_1 = _mm_unpackhi_epi16(step3[18], step3[29]); + const __m128i s1_19_0 = _mm_unpacklo_epi16(step3[19], step3[28]); + const __m128i s1_19_1 = _mm_unpackhi_epi16(step3[19], step3[28]); + const __m128i s1_20_0 = _mm_unpacklo_epi16(step3[20], step3[27]); + const __m128i s1_20_1 = _mm_unpackhi_epi16(step3[20], step3[27]); + const __m128i s1_21_0 = _mm_unpacklo_epi16(step3[21], step3[26]); + const __m128i s1_21_1 = _mm_unpackhi_epi16(step3[21], step3[26]); + const __m128i s1_18_2 = _mm_madd_epi16(s1_18_0, k__cospi_m08_p24); + const __m128i s1_18_3 = _mm_madd_epi16(s1_18_1, k__cospi_m08_p24); + const __m128i s1_19_2 = _mm_madd_epi16(s1_19_0, k__cospi_m08_p24); + const __m128i s1_19_3 = _mm_madd_epi16(s1_19_1, k__cospi_m08_p24); + const __m128i s1_20_2 = _mm_madd_epi16(s1_20_0, k__cospi_m24_m08); + const __m128i s1_20_3 = _mm_madd_epi16(s1_20_1, k__cospi_m24_m08); + const __m128i s1_21_2 = _mm_madd_epi16(s1_21_0, k__cospi_m24_m08); + const __m128i s1_21_3 = _mm_madd_epi16(s1_21_1, k__cospi_m24_m08); + const __m128i s1_26_2 = _mm_madd_epi16(s1_21_0, k__cospi_m08_p24); + const __m128i s1_26_3 = _mm_madd_epi16(s1_21_1, k__cospi_m08_p24); + const __m128i s1_27_2 = _mm_madd_epi16(s1_20_0, k__cospi_m08_p24); + const __m128i s1_27_3 = _mm_madd_epi16(s1_20_1, k__cospi_m08_p24); + const __m128i s1_28_2 = _mm_madd_epi16(s1_19_0, k__cospi_p24_p08); + const __m128i s1_28_3 = _mm_madd_epi16(s1_19_1, k__cospi_p24_p08); + const __m128i s1_29_2 = _mm_madd_epi16(s1_18_0, k__cospi_p24_p08); + const __m128i s1_29_3 = _mm_madd_epi16(s1_18_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m128i s1_18_4 = _mm_add_epi32(s1_18_2, k__DCT_CONST_ROUNDING); + const __m128i s1_18_5 = _mm_add_epi32(s1_18_3, k__DCT_CONST_ROUNDING); + const __m128i s1_19_4 = _mm_add_epi32(s1_19_2, k__DCT_CONST_ROUNDING); + const __m128i s1_19_5 = _mm_add_epi32(s1_19_3, k__DCT_CONST_ROUNDING); + const __m128i s1_20_4 = _mm_add_epi32(s1_20_2, k__DCT_CONST_ROUNDING); + const __m128i s1_20_5 = _mm_add_epi32(s1_20_3, k__DCT_CONST_ROUNDING); + const __m128i s1_21_4 = _mm_add_epi32(s1_21_2, k__DCT_CONST_ROUNDING); + const __m128i s1_21_5 = _mm_add_epi32(s1_21_3, k__DCT_CONST_ROUNDING); + const __m128i s1_26_4 = _mm_add_epi32(s1_26_2, k__DCT_CONST_ROUNDING); + const __m128i s1_26_5 = _mm_add_epi32(s1_26_3, k__DCT_CONST_ROUNDING); + const __m128i s1_27_4 = _mm_add_epi32(s1_27_2, k__DCT_CONST_ROUNDING); + const __m128i s1_27_5 = _mm_add_epi32(s1_27_3, k__DCT_CONST_ROUNDING); + const __m128i s1_28_4 = _mm_add_epi32(s1_28_2, k__DCT_CONST_ROUNDING); + const __m128i s1_28_5 = _mm_add_epi32(s1_28_3, k__DCT_CONST_ROUNDING); + const __m128i s1_29_4 = _mm_add_epi32(s1_29_2, k__DCT_CONST_ROUNDING); + const __m128i s1_29_5 = _mm_add_epi32(s1_29_3, k__DCT_CONST_ROUNDING); + const __m128i s1_18_6 = _mm_srai_epi32(s1_18_4, DCT_CONST_BITS); + const __m128i s1_18_7 = _mm_srai_epi32(s1_18_5, DCT_CONST_BITS); + const __m128i s1_19_6 = _mm_srai_epi32(s1_19_4, DCT_CONST_BITS); + const __m128i s1_19_7 = _mm_srai_epi32(s1_19_5, DCT_CONST_BITS); + const __m128i s1_20_6 = _mm_srai_epi32(s1_20_4, DCT_CONST_BITS); + const __m128i s1_20_7 = _mm_srai_epi32(s1_20_5, DCT_CONST_BITS); + const __m128i s1_21_6 = _mm_srai_epi32(s1_21_4, DCT_CONST_BITS); + const __m128i s1_21_7 = _mm_srai_epi32(s1_21_5, DCT_CONST_BITS); + const __m128i s1_26_6 = _mm_srai_epi32(s1_26_4, DCT_CONST_BITS); + const __m128i s1_26_7 = _mm_srai_epi32(s1_26_5, DCT_CONST_BITS); + const __m128i s1_27_6 = _mm_srai_epi32(s1_27_4, DCT_CONST_BITS); + const __m128i s1_27_7 = _mm_srai_epi32(s1_27_5, DCT_CONST_BITS); + const __m128i s1_28_6 = _mm_srai_epi32(s1_28_4, DCT_CONST_BITS); + const __m128i s1_28_7 = _mm_srai_epi32(s1_28_5, DCT_CONST_BITS); + const __m128i s1_29_6 = _mm_srai_epi32(s1_29_4, DCT_CONST_BITS); + const __m128i s1_29_7 = _mm_srai_epi32(s1_29_5, DCT_CONST_BITS); + // Combine + step1[18] = _mm_packs_epi32(s1_18_6, s1_18_7); + step1[19] = _mm_packs_epi32(s1_19_6, s1_19_7); + step1[20] = _mm_packs_epi32(s1_20_6, s1_20_7); + step1[21] = _mm_packs_epi32(s1_21_6, s1_21_7); + step1[26] = _mm_packs_epi32(s1_26_6, s1_26_7); + step1[27] = _mm_packs_epi32(s1_27_6, s1_27_7); + step1[28] = _mm_packs_epi32(s1_28_6, s1_28_7); + step1[29] = _mm_packs_epi32(s1_29_6, s1_29_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20], - &step1[21], &step1[26], &step1[27], - &step1[28], &step1[29]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x8(&step1[18], &step1[19], &step1[20], + &step1[21], &step1[26], &step1[27], + &step1[28], &step1[29]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - // Stage 5 - { - step2[4] = ADD_EPI16(step1[5], step3[4]); - step2[5] = SUB_EPI16(step3[4], step1[5]); - step2[6] = SUB_EPI16(step3[7], step1[6]); - step2[7] = ADD_EPI16(step1[6], step3[7]); + } + // Stage 5 + { + step2[4] = ADD_EPI16(step1[5], step3[4]); + step2[5] = SUB_EPI16(step3[4], step1[5]); + step2[6] = SUB_EPI16(step3[7], step1[6]); + step2[7] = ADD_EPI16(step1[6], step3[7]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2[4], &step2[5], - &step2[6], &step2[7]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x4(&step2[4], &step2[5], &step2[6], + &step2[7]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]); - const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]); - const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]); - const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]); - const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16); - const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16); - const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16); - const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16); - const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08); - const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08); - const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24); - const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24); - // dct_const_round_shift - const __m128i out_00_4 = _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); - const __m128i out_00_5 = _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); - const __m128i out_16_4 = _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); - const __m128i out_16_5 = _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); - const __m128i out_08_4 = _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); - const __m128i out_08_5 = _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); - const __m128i out_24_4 = _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); - const __m128i out_24_5 = _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); - const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS); - const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS); - const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS); - const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS); - const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS); - const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS); - const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS); - const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS); - // Combine - out[ 0] = _mm_packs_epi32(out_00_6, out_00_7); - out[16] = _mm_packs_epi32(out_16_6, out_16_7); - out[ 8] = _mm_packs_epi32(out_08_6, out_08_7); - out[24] = _mm_packs_epi32(out_24_6, out_24_7); + } + { + const __m128i out_00_0 = _mm_unpacklo_epi16(step1[0], step1[1]); + const __m128i out_00_1 = _mm_unpackhi_epi16(step1[0], step1[1]); + const __m128i out_08_0 = _mm_unpacklo_epi16(step1[2], step1[3]); + const __m128i out_08_1 = _mm_unpackhi_epi16(step1[2], step1[3]); + const __m128i out_00_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_p16); + const __m128i out_00_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_p16); + const __m128i out_16_2 = _mm_madd_epi16(out_00_0, k__cospi_p16_m16); + const __m128i out_16_3 = _mm_madd_epi16(out_00_1, k__cospi_p16_m16); + const __m128i out_08_2 = _mm_madd_epi16(out_08_0, k__cospi_p24_p08); + const __m128i out_08_3 = _mm_madd_epi16(out_08_1, k__cospi_p24_p08); + const __m128i out_24_2 = _mm_madd_epi16(out_08_0, k__cospi_m08_p24); + const __m128i out_24_3 = _mm_madd_epi16(out_08_1, k__cospi_m08_p24); + // dct_const_round_shift + const __m128i out_00_4 = + _mm_add_epi32(out_00_2, k__DCT_CONST_ROUNDING); + const __m128i out_00_5 = + _mm_add_epi32(out_00_3, k__DCT_CONST_ROUNDING); + const __m128i out_16_4 = + _mm_add_epi32(out_16_2, k__DCT_CONST_ROUNDING); + const __m128i out_16_5 = + _mm_add_epi32(out_16_3, k__DCT_CONST_ROUNDING); + const __m128i out_08_4 = + _mm_add_epi32(out_08_2, k__DCT_CONST_ROUNDING); + const __m128i out_08_5 = + _mm_add_epi32(out_08_3, k__DCT_CONST_ROUNDING); + const __m128i out_24_4 = + _mm_add_epi32(out_24_2, k__DCT_CONST_ROUNDING); + const __m128i out_24_5 = + _mm_add_epi32(out_24_3, k__DCT_CONST_ROUNDING); + const __m128i out_00_6 = _mm_srai_epi32(out_00_4, DCT_CONST_BITS); + const __m128i out_00_7 = _mm_srai_epi32(out_00_5, DCT_CONST_BITS); + const __m128i out_16_6 = _mm_srai_epi32(out_16_4, DCT_CONST_BITS); + const __m128i out_16_7 = _mm_srai_epi32(out_16_5, DCT_CONST_BITS); + const __m128i out_08_6 = _mm_srai_epi32(out_08_4, DCT_CONST_BITS); + const __m128i out_08_7 = _mm_srai_epi32(out_08_5, DCT_CONST_BITS); + const __m128i out_24_6 = _mm_srai_epi32(out_24_4, DCT_CONST_BITS); + const __m128i out_24_7 = _mm_srai_epi32(out_24_5, DCT_CONST_BITS); + // Combine + out[0] = _mm_packs_epi32(out_00_6, out_00_7); + out[16] = _mm_packs_epi32(out_16_6, out_16_7); + out[8] = _mm_packs_epi32(out_08_6, out_08_7); + out[24] = _mm_packs_epi32(out_24_6, out_24_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[0], &out[16], - &out[8], &out[24]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = + check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[ 9], step1[14]); - const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[ 9], step1[14]); - const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]); - const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]); - const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24); - const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24); - const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08); - const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08); - const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24); - const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24); - const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08); - const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08); - // dct_const_round_shift - const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); - const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); - const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); - const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); - const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); - const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); - const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); - const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); - const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS); - const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS); - const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS); - const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS); - const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS); - const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS); - const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS); - const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS); - // Combine - step2[ 9] = _mm_packs_epi32(s2_09_6, s2_09_7); - step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7); - step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7); - step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7); + } + { + const __m128i s2_09_0 = _mm_unpacklo_epi16(step1[9], step1[14]); + const __m128i s2_09_1 = _mm_unpackhi_epi16(step1[9], step1[14]); + const __m128i s2_10_0 = _mm_unpacklo_epi16(step1[10], step1[13]); + const __m128i s2_10_1 = _mm_unpackhi_epi16(step1[10], step1[13]); + const __m128i s2_09_2 = _mm_madd_epi16(s2_09_0, k__cospi_m08_p24); + const __m128i s2_09_3 = _mm_madd_epi16(s2_09_1, k__cospi_m08_p24); + const __m128i s2_10_2 = _mm_madd_epi16(s2_10_0, k__cospi_m24_m08); + const __m128i s2_10_3 = _mm_madd_epi16(s2_10_1, k__cospi_m24_m08); + const __m128i s2_13_2 = _mm_madd_epi16(s2_10_0, k__cospi_m08_p24); + const __m128i s2_13_3 = _mm_madd_epi16(s2_10_1, k__cospi_m08_p24); + const __m128i s2_14_2 = _mm_madd_epi16(s2_09_0, k__cospi_p24_p08); + const __m128i s2_14_3 = _mm_madd_epi16(s2_09_1, k__cospi_p24_p08); + // dct_const_round_shift + const __m128i s2_09_4 = _mm_add_epi32(s2_09_2, k__DCT_CONST_ROUNDING); + const __m128i s2_09_5 = _mm_add_epi32(s2_09_3, k__DCT_CONST_ROUNDING); + const __m128i s2_10_4 = _mm_add_epi32(s2_10_2, k__DCT_CONST_ROUNDING); + const __m128i s2_10_5 = _mm_add_epi32(s2_10_3, k__DCT_CONST_ROUNDING); + const __m128i s2_13_4 = _mm_add_epi32(s2_13_2, k__DCT_CONST_ROUNDING); + const __m128i s2_13_5 = _mm_add_epi32(s2_13_3, k__DCT_CONST_ROUNDING); + const __m128i s2_14_4 = _mm_add_epi32(s2_14_2, k__DCT_CONST_ROUNDING); + const __m128i s2_14_5 = _mm_add_epi32(s2_14_3, k__DCT_CONST_ROUNDING); + const __m128i s2_09_6 = _mm_srai_epi32(s2_09_4, DCT_CONST_BITS); + const __m128i s2_09_7 = _mm_srai_epi32(s2_09_5, DCT_CONST_BITS); + const __m128i s2_10_6 = _mm_srai_epi32(s2_10_4, DCT_CONST_BITS); + const __m128i s2_10_7 = _mm_srai_epi32(s2_10_5, DCT_CONST_BITS); + const __m128i s2_13_6 = _mm_srai_epi32(s2_13_4, DCT_CONST_BITS); + const __m128i s2_13_7 = _mm_srai_epi32(s2_13_5, DCT_CONST_BITS); + const __m128i s2_14_6 = _mm_srai_epi32(s2_14_4, DCT_CONST_BITS); + const __m128i s2_14_7 = _mm_srai_epi32(s2_14_5, DCT_CONST_BITS); + // Combine + step2[9] = _mm_packs_epi32(s2_09_6, s2_09_7); + step2[10] = _mm_packs_epi32(s2_10_6, s2_10_7); + step2[13] = _mm_packs_epi32(s2_13_6, s2_13_7); + step2[14] = _mm_packs_epi32(s2_14_6, s2_14_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2[9], &step2[10], - &step2[13], &step2[14]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x4(&step2[9], &step2[10], &step2[13], + &step2[14]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - step2[16] = ADD_EPI16(step1[19], step3[16]); - step2[17] = ADD_EPI16(step1[18], step3[17]); - step2[18] = SUB_EPI16(step3[17], step1[18]); - step2[19] = SUB_EPI16(step3[16], step1[19]); - step2[20] = SUB_EPI16(step3[23], step1[20]); - step2[21] = SUB_EPI16(step3[22], step1[21]); - step2[22] = ADD_EPI16(step1[21], step3[22]); - step2[23] = ADD_EPI16(step1[20], step3[23]); - step2[24] = ADD_EPI16(step1[27], step3[24]); - step2[25] = ADD_EPI16(step1[26], step3[25]); - step2[26] = SUB_EPI16(step3[25], step1[26]); - step2[27] = SUB_EPI16(step3[24], step1[27]); - step2[28] = SUB_EPI16(step3[31], step1[28]); - step2[29] = SUB_EPI16(step3[30], step1[29]); - step2[30] = ADD_EPI16(step1[29], step3[30]); - step2[31] = ADD_EPI16(step1[28], step3[31]); + } + { + step2[16] = ADD_EPI16(step1[19], step3[16]); + step2[17] = ADD_EPI16(step1[18], step3[17]); + step2[18] = SUB_EPI16(step3[17], step1[18]); + step2[19] = SUB_EPI16(step3[16], step1[19]); + step2[20] = SUB_EPI16(step3[23], step1[20]); + step2[21] = SUB_EPI16(step3[22], step1[21]); + step2[22] = ADD_EPI16(step1[21], step3[22]); + step2[23] = ADD_EPI16(step1[20], step3[23]); + step2[24] = ADD_EPI16(step1[27], step3[24]); + step2[25] = ADD_EPI16(step1[26], step3[25]); + step2[26] = SUB_EPI16(step3[25], step1[26]); + step2[27] = SUB_EPI16(step3[24], step1[27]); + step2[28] = SUB_EPI16(step3[31], step1[28]); + step2[29] = SUB_EPI16(step3[30], step1[29]); + step2[30] = ADD_EPI16(step1[29], step3[30]); + step2[31] = ADD_EPI16(step1[28], step3[31]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step2[16], &step2[17], &step2[18], &step2[19], - &step2[20], &step2[21], &step2[22], &step2[23], - &step2[24], &step2[25], &step2[26], &step2[27], - &step2[28], &step2[29], &step2[30], &step2[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x16( + &step2[16], &step2[17], &step2[18], &step2[19], &step2[20], + &step2[21], &step2[22], &step2[23], &step2[24], &step2[25], + &step2[26], &step2[27], &step2[28], &step2[29], &step2[30], + &step2[31]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - // Stage 6 - { - const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]); - const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]); - const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]); - const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]); - const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]); - const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]); - const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]); - const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]); - const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04); - const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04); - const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20); - const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20); - const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12); - const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12); - const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28); - const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28); - // dct_const_round_shift - const __m128i out_04_4 = _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); - const __m128i out_04_5 = _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); - const __m128i out_20_4 = _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); - const __m128i out_20_5 = _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); - const __m128i out_12_4 = _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); - const __m128i out_12_5 = _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); - const __m128i out_28_4 = _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); - const __m128i out_28_5 = _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); - const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS); - const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS); - const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS); - const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS); - const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS); - const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS); - const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS); - const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS); - // Combine - out[4] = _mm_packs_epi32(out_04_6, out_04_7); - out[20] = _mm_packs_epi32(out_20_6, out_20_7); - out[12] = _mm_packs_epi32(out_12_6, out_12_7); - out[28] = _mm_packs_epi32(out_28_6, out_28_7); + } + // Stage 6 + { + const __m128i out_04_0 = _mm_unpacklo_epi16(step2[4], step2[7]); + const __m128i out_04_1 = _mm_unpackhi_epi16(step2[4], step2[7]); + const __m128i out_20_0 = _mm_unpacklo_epi16(step2[5], step2[6]); + const __m128i out_20_1 = _mm_unpackhi_epi16(step2[5], step2[6]); + const __m128i out_12_0 = _mm_unpacklo_epi16(step2[5], step2[6]); + const __m128i out_12_1 = _mm_unpackhi_epi16(step2[5], step2[6]); + const __m128i out_28_0 = _mm_unpacklo_epi16(step2[4], step2[7]); + const __m128i out_28_1 = _mm_unpackhi_epi16(step2[4], step2[7]); + const __m128i out_04_2 = _mm_madd_epi16(out_04_0, k__cospi_p28_p04); + const __m128i out_04_3 = _mm_madd_epi16(out_04_1, k__cospi_p28_p04); + const __m128i out_20_2 = _mm_madd_epi16(out_20_0, k__cospi_p12_p20); + const __m128i out_20_3 = _mm_madd_epi16(out_20_1, k__cospi_p12_p20); + const __m128i out_12_2 = _mm_madd_epi16(out_12_0, k__cospi_m20_p12); + const __m128i out_12_3 = _mm_madd_epi16(out_12_1, k__cospi_m20_p12); + const __m128i out_28_2 = _mm_madd_epi16(out_28_0, k__cospi_m04_p28); + const __m128i out_28_3 = _mm_madd_epi16(out_28_1, k__cospi_m04_p28); + // dct_const_round_shift + const __m128i out_04_4 = + _mm_add_epi32(out_04_2, k__DCT_CONST_ROUNDING); + const __m128i out_04_5 = + _mm_add_epi32(out_04_3, k__DCT_CONST_ROUNDING); + const __m128i out_20_4 = + _mm_add_epi32(out_20_2, k__DCT_CONST_ROUNDING); + const __m128i out_20_5 = + _mm_add_epi32(out_20_3, k__DCT_CONST_ROUNDING); + const __m128i out_12_4 = + _mm_add_epi32(out_12_2, k__DCT_CONST_ROUNDING); + const __m128i out_12_5 = + _mm_add_epi32(out_12_3, k__DCT_CONST_ROUNDING); + const __m128i out_28_4 = + _mm_add_epi32(out_28_2, k__DCT_CONST_ROUNDING); + const __m128i out_28_5 = + _mm_add_epi32(out_28_3, k__DCT_CONST_ROUNDING); + const __m128i out_04_6 = _mm_srai_epi32(out_04_4, DCT_CONST_BITS); + const __m128i out_04_7 = _mm_srai_epi32(out_04_5, DCT_CONST_BITS); + const __m128i out_20_6 = _mm_srai_epi32(out_20_4, DCT_CONST_BITS); + const __m128i out_20_7 = _mm_srai_epi32(out_20_5, DCT_CONST_BITS); + const __m128i out_12_6 = _mm_srai_epi32(out_12_4, DCT_CONST_BITS); + const __m128i out_12_7 = _mm_srai_epi32(out_12_5, DCT_CONST_BITS); + const __m128i out_28_6 = _mm_srai_epi32(out_28_4, DCT_CONST_BITS); + const __m128i out_28_7 = _mm_srai_epi32(out_28_5, DCT_CONST_BITS); + // Combine + out[4] = _mm_packs_epi32(out_04_6, out_04_7); + out[20] = _mm_packs_epi32(out_20_6, out_20_7); + out[12] = _mm_packs_epi32(out_12_6, out_12_7); + out[28] = _mm_packs_epi32(out_28_6, out_28_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[4], &out[20], - &out[12], &out[28]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = + check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - step3[8] = ADD_EPI16(step2[ 9], step1[ 8]); - step3[9] = SUB_EPI16(step1[ 8], step2[ 9]); - step3[10] = SUB_EPI16(step1[11], step2[10]); - step3[11] = ADD_EPI16(step2[10], step1[11]); - step3[12] = ADD_EPI16(step2[13], step1[12]); - step3[13] = SUB_EPI16(step1[12], step2[13]); - step3[14] = SUB_EPI16(step1[15], step2[14]); - step3[15] = ADD_EPI16(step2[14], step1[15]); + } + { + step3[8] = ADD_EPI16(step2[9], step1[8]); + step3[9] = SUB_EPI16(step1[8], step2[9]); + step3[10] = SUB_EPI16(step1[11], step2[10]); + step3[11] = ADD_EPI16(step2[10], step1[11]); + step3[12] = ADD_EPI16(step2[13], step1[12]); + step3[13] = SUB_EPI16(step1[12], step2[13]); + step3[14] = SUB_EPI16(step1[15], step2[14]); + step3[15] = ADD_EPI16(step2[14], step1[15]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10], - &step3[11], &step3[12], &step3[13], - &step3[14], &step3[15]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x8(&step3[8], &step3[9], &step3[10], + &step3[11], &step3[12], &step3[13], + &step3[14], &step3[15]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]); - const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]); - const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]); - const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]); - const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]); - const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]); - const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]); - const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]); - const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28); - const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28); - const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04); - const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04); - const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12); - const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12); - const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20); - const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20); - const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12); - const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12); - const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20); - const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20); - const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28); - const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28); - const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04); - const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04); - // dct_const_round_shift - const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); - const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); - const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); - const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); - const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); - const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); - const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); - const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); - const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS); - const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS); - const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS); - const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS); - const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS); - const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS); - const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS); - const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS); - const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); - const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); - const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); - const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); - const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); - const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); - const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); - const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); - const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS); - const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS); - const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS); - const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS); - const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS); - const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS); - const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS); - const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS); - // Combine - step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7); - step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7); - step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7); - step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7); - // Combine - step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7); - step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7); - step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7); - step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7); + } + { + const __m128i s3_17_0 = _mm_unpacklo_epi16(step2[17], step2[30]); + const __m128i s3_17_1 = _mm_unpackhi_epi16(step2[17], step2[30]); + const __m128i s3_18_0 = _mm_unpacklo_epi16(step2[18], step2[29]); + const __m128i s3_18_1 = _mm_unpackhi_epi16(step2[18], step2[29]); + const __m128i s3_21_0 = _mm_unpacklo_epi16(step2[21], step2[26]); + const __m128i s3_21_1 = _mm_unpackhi_epi16(step2[21], step2[26]); + const __m128i s3_22_0 = _mm_unpacklo_epi16(step2[22], step2[25]); + const __m128i s3_22_1 = _mm_unpackhi_epi16(step2[22], step2[25]); + const __m128i s3_17_2 = _mm_madd_epi16(s3_17_0, k__cospi_m04_p28); + const __m128i s3_17_3 = _mm_madd_epi16(s3_17_1, k__cospi_m04_p28); + const __m128i s3_18_2 = _mm_madd_epi16(s3_18_0, k__cospi_m28_m04); + const __m128i s3_18_3 = _mm_madd_epi16(s3_18_1, k__cospi_m28_m04); + const __m128i s3_21_2 = _mm_madd_epi16(s3_21_0, k__cospi_m20_p12); + const __m128i s3_21_3 = _mm_madd_epi16(s3_21_1, k__cospi_m20_p12); + const __m128i s3_22_2 = _mm_madd_epi16(s3_22_0, k__cospi_m12_m20); + const __m128i s3_22_3 = _mm_madd_epi16(s3_22_1, k__cospi_m12_m20); + const __m128i s3_25_2 = _mm_madd_epi16(s3_22_0, k__cospi_m20_p12); + const __m128i s3_25_3 = _mm_madd_epi16(s3_22_1, k__cospi_m20_p12); + const __m128i s3_26_2 = _mm_madd_epi16(s3_21_0, k__cospi_p12_p20); + const __m128i s3_26_3 = _mm_madd_epi16(s3_21_1, k__cospi_p12_p20); + const __m128i s3_29_2 = _mm_madd_epi16(s3_18_0, k__cospi_m04_p28); + const __m128i s3_29_3 = _mm_madd_epi16(s3_18_1, k__cospi_m04_p28); + const __m128i s3_30_2 = _mm_madd_epi16(s3_17_0, k__cospi_p28_p04); + const __m128i s3_30_3 = _mm_madd_epi16(s3_17_1, k__cospi_p28_p04); + // dct_const_round_shift + const __m128i s3_17_4 = _mm_add_epi32(s3_17_2, k__DCT_CONST_ROUNDING); + const __m128i s3_17_5 = _mm_add_epi32(s3_17_3, k__DCT_CONST_ROUNDING); + const __m128i s3_18_4 = _mm_add_epi32(s3_18_2, k__DCT_CONST_ROUNDING); + const __m128i s3_18_5 = _mm_add_epi32(s3_18_3, k__DCT_CONST_ROUNDING); + const __m128i s3_21_4 = _mm_add_epi32(s3_21_2, k__DCT_CONST_ROUNDING); + const __m128i s3_21_5 = _mm_add_epi32(s3_21_3, k__DCT_CONST_ROUNDING); + const __m128i s3_22_4 = _mm_add_epi32(s3_22_2, k__DCT_CONST_ROUNDING); + const __m128i s3_22_5 = _mm_add_epi32(s3_22_3, k__DCT_CONST_ROUNDING); + const __m128i s3_17_6 = _mm_srai_epi32(s3_17_4, DCT_CONST_BITS); + const __m128i s3_17_7 = _mm_srai_epi32(s3_17_5, DCT_CONST_BITS); + const __m128i s3_18_6 = _mm_srai_epi32(s3_18_4, DCT_CONST_BITS); + const __m128i s3_18_7 = _mm_srai_epi32(s3_18_5, DCT_CONST_BITS); + const __m128i s3_21_6 = _mm_srai_epi32(s3_21_4, DCT_CONST_BITS); + const __m128i s3_21_7 = _mm_srai_epi32(s3_21_5, DCT_CONST_BITS); + const __m128i s3_22_6 = _mm_srai_epi32(s3_22_4, DCT_CONST_BITS); + const __m128i s3_22_7 = _mm_srai_epi32(s3_22_5, DCT_CONST_BITS); + const __m128i s3_25_4 = _mm_add_epi32(s3_25_2, k__DCT_CONST_ROUNDING); + const __m128i s3_25_5 = _mm_add_epi32(s3_25_3, k__DCT_CONST_ROUNDING); + const __m128i s3_26_4 = _mm_add_epi32(s3_26_2, k__DCT_CONST_ROUNDING); + const __m128i s3_26_5 = _mm_add_epi32(s3_26_3, k__DCT_CONST_ROUNDING); + const __m128i s3_29_4 = _mm_add_epi32(s3_29_2, k__DCT_CONST_ROUNDING); + const __m128i s3_29_5 = _mm_add_epi32(s3_29_3, k__DCT_CONST_ROUNDING); + const __m128i s3_30_4 = _mm_add_epi32(s3_30_2, k__DCT_CONST_ROUNDING); + const __m128i s3_30_5 = _mm_add_epi32(s3_30_3, k__DCT_CONST_ROUNDING); + const __m128i s3_25_6 = _mm_srai_epi32(s3_25_4, DCT_CONST_BITS); + const __m128i s3_25_7 = _mm_srai_epi32(s3_25_5, DCT_CONST_BITS); + const __m128i s3_26_6 = _mm_srai_epi32(s3_26_4, DCT_CONST_BITS); + const __m128i s3_26_7 = _mm_srai_epi32(s3_26_5, DCT_CONST_BITS); + const __m128i s3_29_6 = _mm_srai_epi32(s3_29_4, DCT_CONST_BITS); + const __m128i s3_29_7 = _mm_srai_epi32(s3_29_5, DCT_CONST_BITS); + const __m128i s3_30_6 = _mm_srai_epi32(s3_30_4, DCT_CONST_BITS); + const __m128i s3_30_7 = _mm_srai_epi32(s3_30_5, DCT_CONST_BITS); + // Combine + step3[17] = _mm_packs_epi32(s3_17_6, s3_17_7); + step3[18] = _mm_packs_epi32(s3_18_6, s3_18_7); + step3[21] = _mm_packs_epi32(s3_21_6, s3_21_7); + step3[22] = _mm_packs_epi32(s3_22_6, s3_22_7); + // Combine + step3[25] = _mm_packs_epi32(s3_25_6, s3_25_7); + step3[26] = _mm_packs_epi32(s3_26_6, s3_26_7); + step3[29] = _mm_packs_epi32(s3_29_6, s3_29_7); + step3[30] = _mm_packs_epi32(s3_30_6, s3_30_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21], - &step3[22], &step3[25], &step3[26], - &step3[29], &step3[30]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x8(&step3[17], &step3[18], &step3[21], + &step3[22], &step3[25], &step3[26], + &step3[29], &step3[30]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - // Stage 7 - { - const __m128i out_02_0 = _mm_unpacklo_epi16(step3[ 8], step3[15]); - const __m128i out_02_1 = _mm_unpackhi_epi16(step3[ 8], step3[15]); - const __m128i out_18_0 = _mm_unpacklo_epi16(step3[ 9], step3[14]); - const __m128i out_18_1 = _mm_unpackhi_epi16(step3[ 9], step3[14]); - const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]); - const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]); - const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]); - const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]); - const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02); - const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02); - const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18); - const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18); - const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10); - const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10); - const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26); - const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26); - const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06); - const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06); - const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22); - const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22); - const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14); - const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14); - const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30); - const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30); - // dct_const_round_shift - const __m128i out_02_4 = _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); - const __m128i out_02_5 = _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); - const __m128i out_18_4 = _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); - const __m128i out_18_5 = _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); - const __m128i out_10_4 = _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); - const __m128i out_10_5 = _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); - const __m128i out_26_4 = _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); - const __m128i out_26_5 = _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); - const __m128i out_06_4 = _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); - const __m128i out_06_5 = _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); - const __m128i out_22_4 = _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); - const __m128i out_22_5 = _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); - const __m128i out_14_4 = _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); - const __m128i out_14_5 = _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); - const __m128i out_30_4 = _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); - const __m128i out_30_5 = _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); - const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS); - const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS); - const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS); - const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS); - const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS); - const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS); - const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS); - const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS); - const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS); - const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS); - const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS); - const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS); - const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS); - const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS); - const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS); - const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS); - // Combine - out[ 2] = _mm_packs_epi32(out_02_6, out_02_7); - out[18] = _mm_packs_epi32(out_18_6, out_18_7); - out[10] = _mm_packs_epi32(out_10_6, out_10_7); - out[26] = _mm_packs_epi32(out_26_6, out_26_7); - out[ 6] = _mm_packs_epi32(out_06_6, out_06_7); - out[22] = _mm_packs_epi32(out_22_6, out_22_7); - out[14] = _mm_packs_epi32(out_14_6, out_14_7); - out[30] = _mm_packs_epi32(out_30_6, out_30_7); + } + // Stage 7 + { + const __m128i out_02_0 = _mm_unpacklo_epi16(step3[8], step3[15]); + const __m128i out_02_1 = _mm_unpackhi_epi16(step3[8], step3[15]); + const __m128i out_18_0 = _mm_unpacklo_epi16(step3[9], step3[14]); + const __m128i out_18_1 = _mm_unpackhi_epi16(step3[9], step3[14]); + const __m128i out_10_0 = _mm_unpacklo_epi16(step3[10], step3[13]); + const __m128i out_10_1 = _mm_unpackhi_epi16(step3[10], step3[13]); + const __m128i out_26_0 = _mm_unpacklo_epi16(step3[11], step3[12]); + const __m128i out_26_1 = _mm_unpackhi_epi16(step3[11], step3[12]); + const __m128i out_02_2 = _mm_madd_epi16(out_02_0, k__cospi_p30_p02); + const __m128i out_02_3 = _mm_madd_epi16(out_02_1, k__cospi_p30_p02); + const __m128i out_18_2 = _mm_madd_epi16(out_18_0, k__cospi_p14_p18); + const __m128i out_18_3 = _mm_madd_epi16(out_18_1, k__cospi_p14_p18); + const __m128i out_10_2 = _mm_madd_epi16(out_10_0, k__cospi_p22_p10); + const __m128i out_10_3 = _mm_madd_epi16(out_10_1, k__cospi_p22_p10); + const __m128i out_26_2 = _mm_madd_epi16(out_26_0, k__cospi_p06_p26); + const __m128i out_26_3 = _mm_madd_epi16(out_26_1, k__cospi_p06_p26); + const __m128i out_06_2 = _mm_madd_epi16(out_26_0, k__cospi_m26_p06); + const __m128i out_06_3 = _mm_madd_epi16(out_26_1, k__cospi_m26_p06); + const __m128i out_22_2 = _mm_madd_epi16(out_10_0, k__cospi_m10_p22); + const __m128i out_22_3 = _mm_madd_epi16(out_10_1, k__cospi_m10_p22); + const __m128i out_14_2 = _mm_madd_epi16(out_18_0, k__cospi_m18_p14); + const __m128i out_14_3 = _mm_madd_epi16(out_18_1, k__cospi_m18_p14); + const __m128i out_30_2 = _mm_madd_epi16(out_02_0, k__cospi_m02_p30); + const __m128i out_30_3 = _mm_madd_epi16(out_02_1, k__cospi_m02_p30); + // dct_const_round_shift + const __m128i out_02_4 = + _mm_add_epi32(out_02_2, k__DCT_CONST_ROUNDING); + const __m128i out_02_5 = + _mm_add_epi32(out_02_3, k__DCT_CONST_ROUNDING); + const __m128i out_18_4 = + _mm_add_epi32(out_18_2, k__DCT_CONST_ROUNDING); + const __m128i out_18_5 = + _mm_add_epi32(out_18_3, k__DCT_CONST_ROUNDING); + const __m128i out_10_4 = + _mm_add_epi32(out_10_2, k__DCT_CONST_ROUNDING); + const __m128i out_10_5 = + _mm_add_epi32(out_10_3, k__DCT_CONST_ROUNDING); + const __m128i out_26_4 = + _mm_add_epi32(out_26_2, k__DCT_CONST_ROUNDING); + const __m128i out_26_5 = + _mm_add_epi32(out_26_3, k__DCT_CONST_ROUNDING); + const __m128i out_06_4 = + _mm_add_epi32(out_06_2, k__DCT_CONST_ROUNDING); + const __m128i out_06_5 = + _mm_add_epi32(out_06_3, k__DCT_CONST_ROUNDING); + const __m128i out_22_4 = + _mm_add_epi32(out_22_2, k__DCT_CONST_ROUNDING); + const __m128i out_22_5 = + _mm_add_epi32(out_22_3, k__DCT_CONST_ROUNDING); + const __m128i out_14_4 = + _mm_add_epi32(out_14_2, k__DCT_CONST_ROUNDING); + const __m128i out_14_5 = + _mm_add_epi32(out_14_3, k__DCT_CONST_ROUNDING); + const __m128i out_30_4 = + _mm_add_epi32(out_30_2, k__DCT_CONST_ROUNDING); + const __m128i out_30_5 = + _mm_add_epi32(out_30_3, k__DCT_CONST_ROUNDING); + const __m128i out_02_6 = _mm_srai_epi32(out_02_4, DCT_CONST_BITS); + const __m128i out_02_7 = _mm_srai_epi32(out_02_5, DCT_CONST_BITS); + const __m128i out_18_6 = _mm_srai_epi32(out_18_4, DCT_CONST_BITS); + const __m128i out_18_7 = _mm_srai_epi32(out_18_5, DCT_CONST_BITS); + const __m128i out_10_6 = _mm_srai_epi32(out_10_4, DCT_CONST_BITS); + const __m128i out_10_7 = _mm_srai_epi32(out_10_5, DCT_CONST_BITS); + const __m128i out_26_6 = _mm_srai_epi32(out_26_4, DCT_CONST_BITS); + const __m128i out_26_7 = _mm_srai_epi32(out_26_5, DCT_CONST_BITS); + const __m128i out_06_6 = _mm_srai_epi32(out_06_4, DCT_CONST_BITS); + const __m128i out_06_7 = _mm_srai_epi32(out_06_5, DCT_CONST_BITS); + const __m128i out_22_6 = _mm_srai_epi32(out_22_4, DCT_CONST_BITS); + const __m128i out_22_7 = _mm_srai_epi32(out_22_5, DCT_CONST_BITS); + const __m128i out_14_6 = _mm_srai_epi32(out_14_4, DCT_CONST_BITS); + const __m128i out_14_7 = _mm_srai_epi32(out_14_5, DCT_CONST_BITS); + const __m128i out_30_6 = _mm_srai_epi32(out_30_4, DCT_CONST_BITS); + const __m128i out_30_7 = _mm_srai_epi32(out_30_5, DCT_CONST_BITS); + // Combine + out[2] = _mm_packs_epi32(out_02_6, out_02_7); + out[18] = _mm_packs_epi32(out_18_6, out_18_7); + out[10] = _mm_packs_epi32(out_10_6, out_10_7); + out[26] = _mm_packs_epi32(out_26_6, out_26_7); + out[6] = _mm_packs_epi32(out_06_6, out_06_7); + out[22] = _mm_packs_epi32(out_22_6, out_22_7); + out[14] = _mm_packs_epi32(out_14_6, out_14_7); + out[30] = _mm_packs_epi32(out_30_6, out_30_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], - &out[26], &out[6], &out[22], - &out[14], &out[30]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = + check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26], + &out[6], &out[22], &out[14], &out[30]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - step1[16] = ADD_EPI16(step3[17], step2[16]); - step1[17] = SUB_EPI16(step2[16], step3[17]); - step1[18] = SUB_EPI16(step2[19], step3[18]); - step1[19] = ADD_EPI16(step3[18], step2[19]); - step1[20] = ADD_EPI16(step3[21], step2[20]); - step1[21] = SUB_EPI16(step2[20], step3[21]); - step1[22] = SUB_EPI16(step2[23], step3[22]); - step1[23] = ADD_EPI16(step3[22], step2[23]); - step1[24] = ADD_EPI16(step3[25], step2[24]); - step1[25] = SUB_EPI16(step2[24], step3[25]); - step1[26] = SUB_EPI16(step2[27], step3[26]); - step1[27] = ADD_EPI16(step3[26], step2[27]); - step1[28] = ADD_EPI16(step3[29], step2[28]); - step1[29] = SUB_EPI16(step2[28], step3[29]); - step1[30] = SUB_EPI16(step2[31], step3[30]); - step1[31] = ADD_EPI16(step3[30], step2[31]); + } + { + step1[16] = ADD_EPI16(step3[17], step2[16]); + step1[17] = SUB_EPI16(step2[16], step3[17]); + step1[18] = SUB_EPI16(step2[19], step3[18]); + step1[19] = ADD_EPI16(step3[18], step2[19]); + step1[20] = ADD_EPI16(step3[21], step2[20]); + step1[21] = SUB_EPI16(step2[20], step3[21]); + step1[22] = SUB_EPI16(step2[23], step3[22]); + step1[23] = ADD_EPI16(step3[22], step2[23]); + step1[24] = ADD_EPI16(step3[25], step2[24]); + step1[25] = SUB_EPI16(step2[24], step3[25]); + step1[26] = SUB_EPI16(step2[27], step3[26]); + step1[27] = ADD_EPI16(step3[26], step2[27]); + step1[28] = ADD_EPI16(step3[29], step2[28]); + step1[29] = SUB_EPI16(step2[28], step3[29]); + step1[30] = SUB_EPI16(step2[31], step3[30]); + step1[31] = ADD_EPI16(step3[30], step2[31]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x16( - &step1[16], &step1[17], &step1[18], &step1[19], - &step1[20], &step1[21], &step1[22], &step1[23], - &step1[24], &step1[25], &step1[26], &step1[27], - &step1[28], &step1[29], &step1[30], &step1[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = check_epi16_overflow_x16( + &step1[16], &step1[17], &step1[18], &step1[19], &step1[20], + &step1[21], &step1[22], &step1[23], &step1[24], &step1[25], + &step1[26], &step1[27], &step1[28], &step1[29], &step1[30], + &step1[31]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - // Final stage --- outputs indices are bit-reversed. - { - const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]); - const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]); - const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]); - const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]); - const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]); - const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]); - const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]); - const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]); - const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01); - const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01); - const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17); - const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17); - const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09); - const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09); - const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25); - const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25); - const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07); - const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07); - const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23); - const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23); - const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15); - const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15); - const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31); - const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31); - // dct_const_round_shift - const __m128i out_01_4 = _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); - const __m128i out_01_5 = _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); - const __m128i out_17_4 = _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); - const __m128i out_17_5 = _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); - const __m128i out_09_4 = _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); - const __m128i out_09_5 = _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); - const __m128i out_25_4 = _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); - const __m128i out_25_5 = _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); - const __m128i out_07_4 = _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); - const __m128i out_07_5 = _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); - const __m128i out_23_4 = _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); - const __m128i out_23_5 = _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); - const __m128i out_15_4 = _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); - const __m128i out_15_5 = _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); - const __m128i out_31_4 = _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); - const __m128i out_31_5 = _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); - const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS); - const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS); - const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS); - const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS); - const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS); - const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS); - const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS); - const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS); - const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS); - const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS); - const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS); - const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS); - const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS); - const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS); - const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS); - const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS); - // Combine - out[ 1] = _mm_packs_epi32(out_01_6, out_01_7); - out[17] = _mm_packs_epi32(out_17_6, out_17_7); - out[ 9] = _mm_packs_epi32(out_09_6, out_09_7); - out[25] = _mm_packs_epi32(out_25_6, out_25_7); - out[ 7] = _mm_packs_epi32(out_07_6, out_07_7); - out[23] = _mm_packs_epi32(out_23_6, out_23_7); - out[15] = _mm_packs_epi32(out_15_6, out_15_7); - out[31] = _mm_packs_epi32(out_31_6, out_31_7); + } + // Final stage --- outputs indices are bit-reversed. + { + const __m128i out_01_0 = _mm_unpacklo_epi16(step1[16], step1[31]); + const __m128i out_01_1 = _mm_unpackhi_epi16(step1[16], step1[31]); + const __m128i out_17_0 = _mm_unpacklo_epi16(step1[17], step1[30]); + const __m128i out_17_1 = _mm_unpackhi_epi16(step1[17], step1[30]); + const __m128i out_09_0 = _mm_unpacklo_epi16(step1[18], step1[29]); + const __m128i out_09_1 = _mm_unpackhi_epi16(step1[18], step1[29]); + const __m128i out_25_0 = _mm_unpacklo_epi16(step1[19], step1[28]); + const __m128i out_25_1 = _mm_unpackhi_epi16(step1[19], step1[28]); + const __m128i out_01_2 = _mm_madd_epi16(out_01_0, k__cospi_p31_p01); + const __m128i out_01_3 = _mm_madd_epi16(out_01_1, k__cospi_p31_p01); + const __m128i out_17_2 = _mm_madd_epi16(out_17_0, k__cospi_p15_p17); + const __m128i out_17_3 = _mm_madd_epi16(out_17_1, k__cospi_p15_p17); + const __m128i out_09_2 = _mm_madd_epi16(out_09_0, k__cospi_p23_p09); + const __m128i out_09_3 = _mm_madd_epi16(out_09_1, k__cospi_p23_p09); + const __m128i out_25_2 = _mm_madd_epi16(out_25_0, k__cospi_p07_p25); + const __m128i out_25_3 = _mm_madd_epi16(out_25_1, k__cospi_p07_p25); + const __m128i out_07_2 = _mm_madd_epi16(out_25_0, k__cospi_m25_p07); + const __m128i out_07_3 = _mm_madd_epi16(out_25_1, k__cospi_m25_p07); + const __m128i out_23_2 = _mm_madd_epi16(out_09_0, k__cospi_m09_p23); + const __m128i out_23_3 = _mm_madd_epi16(out_09_1, k__cospi_m09_p23); + const __m128i out_15_2 = _mm_madd_epi16(out_17_0, k__cospi_m17_p15); + const __m128i out_15_3 = _mm_madd_epi16(out_17_1, k__cospi_m17_p15); + const __m128i out_31_2 = _mm_madd_epi16(out_01_0, k__cospi_m01_p31); + const __m128i out_31_3 = _mm_madd_epi16(out_01_1, k__cospi_m01_p31); + // dct_const_round_shift + const __m128i out_01_4 = + _mm_add_epi32(out_01_2, k__DCT_CONST_ROUNDING); + const __m128i out_01_5 = + _mm_add_epi32(out_01_3, k__DCT_CONST_ROUNDING); + const __m128i out_17_4 = + _mm_add_epi32(out_17_2, k__DCT_CONST_ROUNDING); + const __m128i out_17_5 = + _mm_add_epi32(out_17_3, k__DCT_CONST_ROUNDING); + const __m128i out_09_4 = + _mm_add_epi32(out_09_2, k__DCT_CONST_ROUNDING); + const __m128i out_09_5 = + _mm_add_epi32(out_09_3, k__DCT_CONST_ROUNDING); + const __m128i out_25_4 = + _mm_add_epi32(out_25_2, k__DCT_CONST_ROUNDING); + const __m128i out_25_5 = + _mm_add_epi32(out_25_3, k__DCT_CONST_ROUNDING); + const __m128i out_07_4 = + _mm_add_epi32(out_07_2, k__DCT_CONST_ROUNDING); + const __m128i out_07_5 = + _mm_add_epi32(out_07_3, k__DCT_CONST_ROUNDING); + const __m128i out_23_4 = + _mm_add_epi32(out_23_2, k__DCT_CONST_ROUNDING); + const __m128i out_23_5 = + _mm_add_epi32(out_23_3, k__DCT_CONST_ROUNDING); + const __m128i out_15_4 = + _mm_add_epi32(out_15_2, k__DCT_CONST_ROUNDING); + const __m128i out_15_5 = + _mm_add_epi32(out_15_3, k__DCT_CONST_ROUNDING); + const __m128i out_31_4 = + _mm_add_epi32(out_31_2, k__DCT_CONST_ROUNDING); + const __m128i out_31_5 = + _mm_add_epi32(out_31_3, k__DCT_CONST_ROUNDING); + const __m128i out_01_6 = _mm_srai_epi32(out_01_4, DCT_CONST_BITS); + const __m128i out_01_7 = _mm_srai_epi32(out_01_5, DCT_CONST_BITS); + const __m128i out_17_6 = _mm_srai_epi32(out_17_4, DCT_CONST_BITS); + const __m128i out_17_7 = _mm_srai_epi32(out_17_5, DCT_CONST_BITS); + const __m128i out_09_6 = _mm_srai_epi32(out_09_4, DCT_CONST_BITS); + const __m128i out_09_7 = _mm_srai_epi32(out_09_5, DCT_CONST_BITS); + const __m128i out_25_6 = _mm_srai_epi32(out_25_4, DCT_CONST_BITS); + const __m128i out_25_7 = _mm_srai_epi32(out_25_5, DCT_CONST_BITS); + const __m128i out_07_6 = _mm_srai_epi32(out_07_4, DCT_CONST_BITS); + const __m128i out_07_7 = _mm_srai_epi32(out_07_5, DCT_CONST_BITS); + const __m128i out_23_6 = _mm_srai_epi32(out_23_4, DCT_CONST_BITS); + const __m128i out_23_7 = _mm_srai_epi32(out_23_5, DCT_CONST_BITS); + const __m128i out_15_6 = _mm_srai_epi32(out_15_4, DCT_CONST_BITS); + const __m128i out_15_7 = _mm_srai_epi32(out_15_5, DCT_CONST_BITS); + const __m128i out_31_6 = _mm_srai_epi32(out_31_4, DCT_CONST_BITS); + const __m128i out_31_7 = _mm_srai_epi32(out_31_5, DCT_CONST_BITS); + // Combine + out[1] = _mm_packs_epi32(out_01_6, out_01_7); + out[17] = _mm_packs_epi32(out_17_6, out_17_7); + out[9] = _mm_packs_epi32(out_09_6, out_09_7); + out[25] = _mm_packs_epi32(out_25_6, out_25_7); + out[7] = _mm_packs_epi32(out_07_6, out_07_7); + out[23] = _mm_packs_epi32(out_23_6, out_23_7); + out[15] = _mm_packs_epi32(out_15_6, out_15_7); + out[31] = _mm_packs_epi32(out_31_6, out_31_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], - &out[25], &out[7], &out[23], - &out[15], &out[31]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = + check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25], + &out[7], &out[23], &out[15], &out[31]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } - { - const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]); - const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]); - const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]); - const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]); - const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]); - const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]); - const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]); - const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]); - const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05); - const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05); - const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21); - const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21); - const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13); - const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13); - const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29); - const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29); - const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03); - const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03); - const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19); - const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19); - const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11); - const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11); - const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27); - const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27); - // dct_const_round_shift - const __m128i out_05_4 = _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); - const __m128i out_05_5 = _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); - const __m128i out_21_4 = _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); - const __m128i out_21_5 = _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); - const __m128i out_13_4 = _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); - const __m128i out_13_5 = _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); - const __m128i out_29_4 = _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); - const __m128i out_29_5 = _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); - const __m128i out_03_4 = _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); - const __m128i out_03_5 = _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); - const __m128i out_19_4 = _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); - const __m128i out_19_5 = _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); - const __m128i out_11_4 = _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); - const __m128i out_11_5 = _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); - const __m128i out_27_4 = _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); - const __m128i out_27_5 = _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); - const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS); - const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS); - const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS); - const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS); - const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS); - const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS); - const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS); - const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS); - const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS); - const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS); - const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS); - const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS); - const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS); - const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS); - const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS); - const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS); - // Combine - out[ 5] = _mm_packs_epi32(out_05_6, out_05_7); - out[21] = _mm_packs_epi32(out_21_6, out_21_7); - out[13] = _mm_packs_epi32(out_13_6, out_13_7); - out[29] = _mm_packs_epi32(out_29_6, out_29_7); - out[ 3] = _mm_packs_epi32(out_03_6, out_03_7); - out[19] = _mm_packs_epi32(out_19_6, out_19_7); - out[11] = _mm_packs_epi32(out_11_6, out_11_7); - out[27] = _mm_packs_epi32(out_27_6, out_27_7); + } + { + const __m128i out_05_0 = _mm_unpacklo_epi16(step1[20], step1[27]); + const __m128i out_05_1 = _mm_unpackhi_epi16(step1[20], step1[27]); + const __m128i out_21_0 = _mm_unpacklo_epi16(step1[21], step1[26]); + const __m128i out_21_1 = _mm_unpackhi_epi16(step1[21], step1[26]); + const __m128i out_13_0 = _mm_unpacklo_epi16(step1[22], step1[25]); + const __m128i out_13_1 = _mm_unpackhi_epi16(step1[22], step1[25]); + const __m128i out_29_0 = _mm_unpacklo_epi16(step1[23], step1[24]); + const __m128i out_29_1 = _mm_unpackhi_epi16(step1[23], step1[24]); + const __m128i out_05_2 = _mm_madd_epi16(out_05_0, k__cospi_p27_p05); + const __m128i out_05_3 = _mm_madd_epi16(out_05_1, k__cospi_p27_p05); + const __m128i out_21_2 = _mm_madd_epi16(out_21_0, k__cospi_p11_p21); + const __m128i out_21_3 = _mm_madd_epi16(out_21_1, k__cospi_p11_p21); + const __m128i out_13_2 = _mm_madd_epi16(out_13_0, k__cospi_p19_p13); + const __m128i out_13_3 = _mm_madd_epi16(out_13_1, k__cospi_p19_p13); + const __m128i out_29_2 = _mm_madd_epi16(out_29_0, k__cospi_p03_p29); + const __m128i out_29_3 = _mm_madd_epi16(out_29_1, k__cospi_p03_p29); + const __m128i out_03_2 = _mm_madd_epi16(out_29_0, k__cospi_m29_p03); + const __m128i out_03_3 = _mm_madd_epi16(out_29_1, k__cospi_m29_p03); + const __m128i out_19_2 = _mm_madd_epi16(out_13_0, k__cospi_m13_p19); + const __m128i out_19_3 = _mm_madd_epi16(out_13_1, k__cospi_m13_p19); + const __m128i out_11_2 = _mm_madd_epi16(out_21_0, k__cospi_m21_p11); + const __m128i out_11_3 = _mm_madd_epi16(out_21_1, k__cospi_m21_p11); + const __m128i out_27_2 = _mm_madd_epi16(out_05_0, k__cospi_m05_p27); + const __m128i out_27_3 = _mm_madd_epi16(out_05_1, k__cospi_m05_p27); + // dct_const_round_shift + const __m128i out_05_4 = + _mm_add_epi32(out_05_2, k__DCT_CONST_ROUNDING); + const __m128i out_05_5 = + _mm_add_epi32(out_05_3, k__DCT_CONST_ROUNDING); + const __m128i out_21_4 = + _mm_add_epi32(out_21_2, k__DCT_CONST_ROUNDING); + const __m128i out_21_5 = + _mm_add_epi32(out_21_3, k__DCT_CONST_ROUNDING); + const __m128i out_13_4 = + _mm_add_epi32(out_13_2, k__DCT_CONST_ROUNDING); + const __m128i out_13_5 = + _mm_add_epi32(out_13_3, k__DCT_CONST_ROUNDING); + const __m128i out_29_4 = + _mm_add_epi32(out_29_2, k__DCT_CONST_ROUNDING); + const __m128i out_29_5 = + _mm_add_epi32(out_29_3, k__DCT_CONST_ROUNDING); + const __m128i out_03_4 = + _mm_add_epi32(out_03_2, k__DCT_CONST_ROUNDING); + const __m128i out_03_5 = + _mm_add_epi32(out_03_3, k__DCT_CONST_ROUNDING); + const __m128i out_19_4 = + _mm_add_epi32(out_19_2, k__DCT_CONST_ROUNDING); + const __m128i out_19_5 = + _mm_add_epi32(out_19_3, k__DCT_CONST_ROUNDING); + const __m128i out_11_4 = + _mm_add_epi32(out_11_2, k__DCT_CONST_ROUNDING); + const __m128i out_11_5 = + _mm_add_epi32(out_11_3, k__DCT_CONST_ROUNDING); + const __m128i out_27_4 = + _mm_add_epi32(out_27_2, k__DCT_CONST_ROUNDING); + const __m128i out_27_5 = + _mm_add_epi32(out_27_3, k__DCT_CONST_ROUNDING); + const __m128i out_05_6 = _mm_srai_epi32(out_05_4, DCT_CONST_BITS); + const __m128i out_05_7 = _mm_srai_epi32(out_05_5, DCT_CONST_BITS); + const __m128i out_21_6 = _mm_srai_epi32(out_21_4, DCT_CONST_BITS); + const __m128i out_21_7 = _mm_srai_epi32(out_21_5, DCT_CONST_BITS); + const __m128i out_13_6 = _mm_srai_epi32(out_13_4, DCT_CONST_BITS); + const __m128i out_13_7 = _mm_srai_epi32(out_13_5, DCT_CONST_BITS); + const __m128i out_29_6 = _mm_srai_epi32(out_29_4, DCT_CONST_BITS); + const __m128i out_29_7 = _mm_srai_epi32(out_29_5, DCT_CONST_BITS); + const __m128i out_03_6 = _mm_srai_epi32(out_03_4, DCT_CONST_BITS); + const __m128i out_03_7 = _mm_srai_epi32(out_03_5, DCT_CONST_BITS); + const __m128i out_19_6 = _mm_srai_epi32(out_19_4, DCT_CONST_BITS); + const __m128i out_19_7 = _mm_srai_epi32(out_19_5, DCT_CONST_BITS); + const __m128i out_11_6 = _mm_srai_epi32(out_11_4, DCT_CONST_BITS); + const __m128i out_11_7 = _mm_srai_epi32(out_11_5, DCT_CONST_BITS); + const __m128i out_27_6 = _mm_srai_epi32(out_27_4, DCT_CONST_BITS); + const __m128i out_27_7 = _mm_srai_epi32(out_27_5, DCT_CONST_BITS); + // Combine + out[5] = _mm_packs_epi32(out_05_6, out_05_7); + out[21] = _mm_packs_epi32(out_21_6, out_21_7); + out[13] = _mm_packs_epi32(out_13_6, out_13_7); + out[29] = _mm_packs_epi32(out_29_6, out_29_7); + out[3] = _mm_packs_epi32(out_03_6, out_03_7); + out[19] = _mm_packs_epi32(out_19_6, out_19_7); + out[11] = _mm_packs_epi32(out_11_6, out_11_7); + out[27] = _mm_packs_epi32(out_27_6, out_27_7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], - &out[29], &out[3], &out[19], - &out[11], &out[27]); - if (overflow) { - if (pass == 0) - HIGH_FDCT32x32_2D_C(input, output_org, stride); - else - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = + check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29], + &out[3], &out[19], &out[11], &out[27]); + if (overflow) { + if (pass == 0) + HIGH_FDCT32x32_2D_C(input, output_org, stride); + else + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - } + } #if FDCT32x32_HIGH_PRECISION } else { __m128i lstep1[64], lstep2[64], lstep3[64]; @@ -1457,32 +1512,32 @@ void FDCT32x32_2D(const int16_t *input, // stage 3 { // expanding to 32-bit length priori to addition operations - lstep2[ 0] = _mm_unpacklo_epi16(step2[ 0], kZero); - lstep2[ 1] = _mm_unpackhi_epi16(step2[ 0], kZero); - lstep2[ 2] = _mm_unpacklo_epi16(step2[ 1], kZero); - lstep2[ 3] = _mm_unpackhi_epi16(step2[ 1], kZero); - lstep2[ 4] = _mm_unpacklo_epi16(step2[ 2], kZero); - lstep2[ 5] = _mm_unpackhi_epi16(step2[ 2], kZero); - lstep2[ 6] = _mm_unpacklo_epi16(step2[ 3], kZero); - lstep2[ 7] = _mm_unpackhi_epi16(step2[ 3], kZero); - lstep2[ 8] = _mm_unpacklo_epi16(step2[ 4], kZero); - lstep2[ 9] = _mm_unpackhi_epi16(step2[ 4], kZero); - lstep2[10] = _mm_unpacklo_epi16(step2[ 5], kZero); - lstep2[11] = _mm_unpackhi_epi16(step2[ 5], kZero); - lstep2[12] = _mm_unpacklo_epi16(step2[ 6], kZero); - lstep2[13] = _mm_unpackhi_epi16(step2[ 6], kZero); - lstep2[14] = _mm_unpacklo_epi16(step2[ 7], kZero); - lstep2[15] = _mm_unpackhi_epi16(step2[ 7], kZero); - lstep2[ 0] = _mm_madd_epi16(lstep2[ 0], kOne); - lstep2[ 1] = _mm_madd_epi16(lstep2[ 1], kOne); - lstep2[ 2] = _mm_madd_epi16(lstep2[ 2], kOne); - lstep2[ 3] = _mm_madd_epi16(lstep2[ 3], kOne); - lstep2[ 4] = _mm_madd_epi16(lstep2[ 4], kOne); - lstep2[ 5] = _mm_madd_epi16(lstep2[ 5], kOne); - lstep2[ 6] = _mm_madd_epi16(lstep2[ 6], kOne); - lstep2[ 7] = _mm_madd_epi16(lstep2[ 7], kOne); - lstep2[ 8] = _mm_madd_epi16(lstep2[ 8], kOne); - lstep2[ 9] = _mm_madd_epi16(lstep2[ 9], kOne); + lstep2[0] = _mm_unpacklo_epi16(step2[0], kZero); + lstep2[1] = _mm_unpackhi_epi16(step2[0], kZero); + lstep2[2] = _mm_unpacklo_epi16(step2[1], kZero); + lstep2[3] = _mm_unpackhi_epi16(step2[1], kZero); + lstep2[4] = _mm_unpacklo_epi16(step2[2], kZero); + lstep2[5] = _mm_unpackhi_epi16(step2[2], kZero); + lstep2[6] = _mm_unpacklo_epi16(step2[3], kZero); + lstep2[7] = _mm_unpackhi_epi16(step2[3], kZero); + lstep2[8] = _mm_unpacklo_epi16(step2[4], kZero); + lstep2[9] = _mm_unpackhi_epi16(step2[4], kZero); + lstep2[10] = _mm_unpacklo_epi16(step2[5], kZero); + lstep2[11] = _mm_unpackhi_epi16(step2[5], kZero); + lstep2[12] = _mm_unpacklo_epi16(step2[6], kZero); + lstep2[13] = _mm_unpackhi_epi16(step2[6], kZero); + lstep2[14] = _mm_unpacklo_epi16(step2[7], kZero); + lstep2[15] = _mm_unpackhi_epi16(step2[7], kZero); + lstep2[0] = _mm_madd_epi16(lstep2[0], kOne); + lstep2[1] = _mm_madd_epi16(lstep2[1], kOne); + lstep2[2] = _mm_madd_epi16(lstep2[2], kOne); + lstep2[3] = _mm_madd_epi16(lstep2[3], kOne); + lstep2[4] = _mm_madd_epi16(lstep2[4], kOne); + lstep2[5] = _mm_madd_epi16(lstep2[5], kOne); + lstep2[6] = _mm_madd_epi16(lstep2[6], kOne); + lstep2[7] = _mm_madd_epi16(lstep2[7], kOne); + lstep2[8] = _mm_madd_epi16(lstep2[8], kOne); + lstep2[9] = _mm_madd_epi16(lstep2[9], kOne); lstep2[10] = _mm_madd_epi16(lstep2[10], kOne); lstep2[11] = _mm_madd_epi16(lstep2[11], kOne); lstep2[12] = _mm_madd_epi16(lstep2[12], kOne); @@ -1490,22 +1545,22 @@ void FDCT32x32_2D(const int16_t *input, lstep2[14] = _mm_madd_epi16(lstep2[14], kOne); lstep2[15] = _mm_madd_epi16(lstep2[15], kOne); - lstep3[ 0] = _mm_add_epi32(lstep2[14], lstep2[ 0]); - lstep3[ 1] = _mm_add_epi32(lstep2[15], lstep2[ 1]); - lstep3[ 2] = _mm_add_epi32(lstep2[12], lstep2[ 2]); - lstep3[ 3] = _mm_add_epi32(lstep2[13], lstep2[ 3]); - lstep3[ 4] = _mm_add_epi32(lstep2[10], lstep2[ 4]); - lstep3[ 5] = _mm_add_epi32(lstep2[11], lstep2[ 5]); - lstep3[ 6] = _mm_add_epi32(lstep2[ 8], lstep2[ 6]); - lstep3[ 7] = _mm_add_epi32(lstep2[ 9], lstep2[ 7]); - lstep3[ 8] = _mm_sub_epi32(lstep2[ 6], lstep2[ 8]); - lstep3[ 9] = _mm_sub_epi32(lstep2[ 7], lstep2[ 9]); - lstep3[10] = _mm_sub_epi32(lstep2[ 4], lstep2[10]); - lstep3[11] = _mm_sub_epi32(lstep2[ 5], lstep2[11]); - lstep3[12] = _mm_sub_epi32(lstep2[ 2], lstep2[12]); - lstep3[13] = _mm_sub_epi32(lstep2[ 3], lstep2[13]); - lstep3[14] = _mm_sub_epi32(lstep2[ 0], lstep2[14]); - lstep3[15] = _mm_sub_epi32(lstep2[ 1], lstep2[15]); + lstep3[0] = _mm_add_epi32(lstep2[14], lstep2[0]); + lstep3[1] = _mm_add_epi32(lstep2[15], lstep2[1]); + lstep3[2] = _mm_add_epi32(lstep2[12], lstep2[2]); + lstep3[3] = _mm_add_epi32(lstep2[13], lstep2[3]); + lstep3[4] = _mm_add_epi32(lstep2[10], lstep2[4]); + lstep3[5] = _mm_add_epi32(lstep2[11], lstep2[5]); + lstep3[6] = _mm_add_epi32(lstep2[8], lstep2[6]); + lstep3[7] = _mm_add_epi32(lstep2[9], lstep2[7]); + lstep3[8] = _mm_sub_epi32(lstep2[6], lstep2[8]); + lstep3[9] = _mm_sub_epi32(lstep2[7], lstep2[9]); + lstep3[10] = _mm_sub_epi32(lstep2[4], lstep2[10]); + lstep3[11] = _mm_sub_epi32(lstep2[5], lstep2[11]); + lstep3[12] = _mm_sub_epi32(lstep2[2], lstep2[12]); + lstep3[13] = _mm_sub_epi32(lstep2[3], lstep2[13]); + lstep3[14] = _mm_sub_epi32(lstep2[0], lstep2[14]); + lstep3[15] = _mm_sub_epi32(lstep2[1], lstep2[15]); } { const __m128i s3_10_0 = _mm_unpacklo_epi16(step2[13], step2[10]); @@ -1643,10 +1698,10 @@ void FDCT32x32_2D(const int16_t *input, // stage 4 { // expanding to 32-bit length priori to addition operations - lstep2[16] = _mm_unpacklo_epi16(step2[ 8], kZero); - lstep2[17] = _mm_unpackhi_epi16(step2[ 8], kZero); - lstep2[18] = _mm_unpacklo_epi16(step2[ 9], kZero); - lstep2[19] = _mm_unpackhi_epi16(step2[ 9], kZero); + lstep2[16] = _mm_unpacklo_epi16(step2[8], kZero); + lstep2[17] = _mm_unpackhi_epi16(step2[8], kZero); + lstep2[18] = _mm_unpacklo_epi16(step2[9], kZero); + lstep2[19] = _mm_unpackhi_epi16(step2[9], kZero); lstep2[28] = _mm_unpacklo_epi16(step2[14], kZero); lstep2[29] = _mm_unpackhi_epi16(step2[14], kZero); lstep2[30] = _mm_unpacklo_epi16(step2[15], kZero); @@ -1660,14 +1715,14 @@ void FDCT32x32_2D(const int16_t *input, lstep2[30] = _mm_madd_epi16(lstep2[30], kOne); lstep2[31] = _mm_madd_epi16(lstep2[31], kOne); - lstep1[ 0] = _mm_add_epi32(lstep3[ 6], lstep3[ 0]); - lstep1[ 1] = _mm_add_epi32(lstep3[ 7], lstep3[ 1]); - lstep1[ 2] = _mm_add_epi32(lstep3[ 4], lstep3[ 2]); - lstep1[ 3] = _mm_add_epi32(lstep3[ 5], lstep3[ 3]); - lstep1[ 4] = _mm_sub_epi32(lstep3[ 2], lstep3[ 4]); - lstep1[ 5] = _mm_sub_epi32(lstep3[ 3], lstep3[ 5]); - lstep1[ 6] = _mm_sub_epi32(lstep3[ 0], lstep3[ 6]); - lstep1[ 7] = _mm_sub_epi32(lstep3[ 1], lstep3[ 7]); + lstep1[0] = _mm_add_epi32(lstep3[6], lstep3[0]); + lstep1[1] = _mm_add_epi32(lstep3[7], lstep3[1]); + lstep1[2] = _mm_add_epi32(lstep3[4], lstep3[2]); + lstep1[3] = _mm_add_epi32(lstep3[5], lstep3[3]); + lstep1[4] = _mm_sub_epi32(lstep3[2], lstep3[4]); + lstep1[5] = _mm_sub_epi32(lstep3[3], lstep3[5]); + lstep1[6] = _mm_sub_epi32(lstep3[0], lstep3[6]); + lstep1[7] = _mm_sub_epi32(lstep3[1], lstep3[7]); lstep1[16] = _mm_add_epi32(lstep3[22], lstep2[16]); lstep1[17] = _mm_add_epi32(lstep3[23], lstep2[17]); lstep1[18] = _mm_add_epi32(lstep3[20], lstep2[18]); @@ -1686,64 +1741,64 @@ void FDCT32x32_2D(const int16_t *input, lstep1[31] = _mm_add_epi32(lstep3[25], lstep2[31]); } { - // to be continued... - // - const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); - const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); + // to be continued... + // + const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); + const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); - u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]); - u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]); - u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]); - u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]); + u[0] = _mm_unpacklo_epi32(lstep3[12], lstep3[10]); + u[1] = _mm_unpackhi_epi32(lstep3[12], lstep3[10]); + u[2] = _mm_unpacklo_epi32(lstep3[13], lstep3[11]); + u[3] = _mm_unpackhi_epi32(lstep3[13], lstep3[11]); - // TODO(jingning): manually inline k_madd_epi32_ to further hide - // instruction latency. - v[0] = k_madd_epi32(u[0], k32_p16_m16); - v[1] = k_madd_epi32(u[1], k32_p16_m16); - v[2] = k_madd_epi32(u[2], k32_p16_m16); - v[3] = k_madd_epi32(u[3], k32_p16_m16); - v[4] = k_madd_epi32(u[0], k32_p16_p16); - v[5] = k_madd_epi32(u[1], k32_p16_p16); - v[6] = k_madd_epi32(u[2], k32_p16_p16); - v[7] = k_madd_epi32(u[3], k32_p16_p16); + // TODO(jingning): manually inline k_madd_epi32_ to further hide + // instruction latency. + v[0] = k_madd_epi32(u[0], k32_p16_m16); + v[1] = k_madd_epi32(u[1], k32_p16_m16); + v[2] = k_madd_epi32(u[2], k32_p16_m16); + v[3] = k_madd_epi32(u[3], k32_p16_m16); + v[4] = k_madd_epi32(u[0], k32_p16_p16); + v[5] = k_madd_epi32(u[1], k32_p16_p16); + v[6] = k_madd_epi32(u[2], k32_p16_p16); + v[7] = k_madd_epi32(u[3], k32_p16_p16); #if DCT_HIGH_BIT_DEPTH - overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], - &v[4], &v[5], &v[6], &v[7], &kZero); - if (overflow) { - HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); - return; - } + overflow = k_check_epi32_overflow_8(&v[0], &v[1], &v[2], &v[3], &v[4], + &v[5], &v[6], &v[7], &kZero); + if (overflow) { + HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); + return; + } #endif // DCT_HIGH_BIT_DEPTH - u[0] = k_packs_epi64(v[0], v[1]); - u[1] = k_packs_epi64(v[2], v[3]); - u[2] = k_packs_epi64(v[4], v[5]); - u[3] = k_packs_epi64(v[6], v[7]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); - v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); - v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); - v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); - v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); - lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS); - lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS); - lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS); - lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + lstep1[10] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + lstep1[11] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + lstep1[12] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + lstep1[13] = _mm_srai_epi32(v[3], DCT_CONST_BITS); } { const __m128i k32_m08_p24 = pair_set_epi32(-cospi_8_64, cospi_24_64); const __m128i k32_m24_m08 = pair_set_epi32(-cospi_24_64, -cospi_8_64); const __m128i k32_p24_p08 = pair_set_epi32(cospi_24_64, cospi_8_64); - u[ 0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]); - u[ 1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]); - u[ 2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]); - u[ 3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]); - u[ 4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]); - u[ 5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]); - u[ 6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]); - u[ 7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]); - u[ 8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]); - u[ 9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]); + u[0] = _mm_unpacklo_epi32(lstep3[36], lstep3[58]); + u[1] = _mm_unpackhi_epi32(lstep3[36], lstep3[58]); + u[2] = _mm_unpacklo_epi32(lstep3[37], lstep3[59]); + u[3] = _mm_unpackhi_epi32(lstep3[37], lstep3[59]); + u[4] = _mm_unpacklo_epi32(lstep3[38], lstep3[56]); + u[5] = _mm_unpackhi_epi32(lstep3[38], lstep3[56]); + u[6] = _mm_unpacklo_epi32(lstep3[39], lstep3[57]); + u[7] = _mm_unpackhi_epi32(lstep3[39], lstep3[57]); + u[8] = _mm_unpacklo_epi32(lstep3[40], lstep3[54]); + u[9] = _mm_unpackhi_epi32(lstep3[40], lstep3[54]); u[10] = _mm_unpacklo_epi32(lstep3[41], lstep3[55]); u[11] = _mm_unpackhi_epi32(lstep3[41], lstep3[55]); u[12] = _mm_unpacklo_epi32(lstep3[42], lstep3[52]); @@ -1751,16 +1806,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_unpacklo_epi32(lstep3[43], lstep3[53]); u[15] = _mm_unpackhi_epi32(lstep3[43], lstep3[53]); - v[ 0] = k_madd_epi32(u[ 0], k32_m08_p24); - v[ 1] = k_madd_epi32(u[ 1], k32_m08_p24); - v[ 2] = k_madd_epi32(u[ 2], k32_m08_p24); - v[ 3] = k_madd_epi32(u[ 3], k32_m08_p24); - v[ 4] = k_madd_epi32(u[ 4], k32_m08_p24); - v[ 5] = k_madd_epi32(u[ 5], k32_m08_p24); - v[ 6] = k_madd_epi32(u[ 6], k32_m08_p24); - v[ 7] = k_madd_epi32(u[ 7], k32_m08_p24); - v[ 8] = k_madd_epi32(u[ 8], k32_m24_m08); - v[ 9] = k_madd_epi32(u[ 9], k32_m24_m08); + v[0] = k_madd_epi32(u[0], k32_m08_p24); + v[1] = k_madd_epi32(u[1], k32_m08_p24); + v[2] = k_madd_epi32(u[2], k32_m08_p24); + v[3] = k_madd_epi32(u[3], k32_m08_p24); + v[4] = k_madd_epi32(u[4], k32_m08_p24); + v[5] = k_madd_epi32(u[5], k32_m08_p24); + v[6] = k_madd_epi32(u[6], k32_m08_p24); + v[7] = k_madd_epi32(u[7], k32_m08_p24); + v[8] = k_madd_epi32(u[8], k32_m24_m08); + v[9] = k_madd_epi32(u[9], k32_m24_m08); v[10] = k_madd_epi32(u[10], k32_m24_m08); v[11] = k_madd_epi32(u[11], k32_m24_m08); v[12] = k_madd_epi32(u[12], k32_m24_m08); @@ -1771,41 +1826,40 @@ void FDCT32x32_2D(const int16_t *input, v[17] = k_madd_epi32(u[13], k32_m08_p24); v[18] = k_madd_epi32(u[14], k32_m08_p24); v[19] = k_madd_epi32(u[15], k32_m08_p24); - v[20] = k_madd_epi32(u[ 8], k32_m08_p24); - v[21] = k_madd_epi32(u[ 9], k32_m08_p24); + v[20] = k_madd_epi32(u[8], k32_m08_p24); + v[21] = k_madd_epi32(u[9], k32_m08_p24); v[22] = k_madd_epi32(u[10], k32_m08_p24); v[23] = k_madd_epi32(u[11], k32_m08_p24); - v[24] = k_madd_epi32(u[ 4], k32_p24_p08); - v[25] = k_madd_epi32(u[ 5], k32_p24_p08); - v[26] = k_madd_epi32(u[ 6], k32_p24_p08); - v[27] = k_madd_epi32(u[ 7], k32_p24_p08); - v[28] = k_madd_epi32(u[ 0], k32_p24_p08); - v[29] = k_madd_epi32(u[ 1], k32_p24_p08); - v[30] = k_madd_epi32(u[ 2], k32_p24_p08); - v[31] = k_madd_epi32(u[ 3], k32_p24_p08); + v[24] = k_madd_epi32(u[4], k32_p24_p08); + v[25] = k_madd_epi32(u[5], k32_p24_p08); + v[26] = k_madd_epi32(u[6], k32_p24_p08); + v[27] = k_madd_epi32(u[7], k32_p24_p08); + v[28] = k_madd_epi32(u[0], k32_p24_p08); + v[29] = k_madd_epi32(u[1], k32_p24_p08); + v[30] = k_madd_epi32(u[2], k32_p24_p08); + v[31] = k_madd_epi32(u[3], k32_p24_p08); #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16], + &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24], + &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); + u[4] = k_packs_epi64(v[8], v[9]); + u[5] = k_packs_epi64(v[10], v[11]); + u[6] = k_packs_epi64(v[12], v[13]); + u[7] = k_packs_epi64(v[14], v[15]); + u[8] = k_packs_epi64(v[16], v[17]); + u[9] = k_packs_epi64(v[18], v[19]); u[10] = k_packs_epi64(v[20], v[21]); u[11] = k_packs_epi64(v[22], v[23]); u[12] = k_packs_epi64(v[24], v[25]); @@ -1813,16 +1867,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = k_packs_epi64(v[28], v[29]); u[15] = k_packs_epi64(v[30], v[31]); - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -1830,16 +1884,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - lstep1[36] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep1[37] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep1[38] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep1[39] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep1[40] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep1[41] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep1[42] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep1[43] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep1[52] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep1[53] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep1[36] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + lstep1[37] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + lstep1[38] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + lstep1[39] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + lstep1[40] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + lstep1[41] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + lstep1[42] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + lstep1[43] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + lstep1[52] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + lstep1[53] = _mm_srai_epi32(v[9], DCT_CONST_BITS); lstep1[54] = _mm_srai_epi32(v[10], DCT_CONST_BITS); lstep1[55] = _mm_srai_epi32(v[11], DCT_CONST_BITS); lstep1[56] = _mm_srai_epi32(v[12], DCT_CONST_BITS); @@ -1849,10 +1903,10 @@ void FDCT32x32_2D(const int16_t *input, } // stage 5 { - lstep2[ 8] = _mm_add_epi32(lstep1[10], lstep3[ 8]); - lstep2[ 9] = _mm_add_epi32(lstep1[11], lstep3[ 9]); - lstep2[10] = _mm_sub_epi32(lstep3[ 8], lstep1[10]); - lstep2[11] = _mm_sub_epi32(lstep3[ 9], lstep1[11]); + lstep2[8] = _mm_add_epi32(lstep1[10], lstep3[8]); + lstep2[9] = _mm_add_epi32(lstep1[11], lstep3[9]); + lstep2[10] = _mm_sub_epi32(lstep3[8], lstep1[10]); + lstep2[11] = _mm_sub_epi32(lstep3[9], lstep1[11]); lstep2[12] = _mm_sub_epi32(lstep3[14], lstep1[12]); lstep2[13] = _mm_sub_epi32(lstep3[15], lstep1[13]); lstep2[14] = _mm_add_epi32(lstep1[12], lstep3[14]); @@ -1875,16 +1929,16 @@ void FDCT32x32_2D(const int16_t *input, // TODO(jingning): manually inline k_madd_epi32_ to further hide // instruction latency. - v[ 0] = k_madd_epi32(u[0], k32_p16_p16); - v[ 1] = k_madd_epi32(u[1], k32_p16_p16); - v[ 2] = k_madd_epi32(u[2], k32_p16_p16); - v[ 3] = k_madd_epi32(u[3], k32_p16_p16); - v[ 4] = k_madd_epi32(u[0], k32_p16_m16); - v[ 5] = k_madd_epi32(u[1], k32_p16_m16); - v[ 6] = k_madd_epi32(u[2], k32_p16_m16); - v[ 7] = k_madd_epi32(u[3], k32_p16_m16); - v[ 8] = k_madd_epi32(u[4], k32_p24_p08); - v[ 9] = k_madd_epi32(u[5], k32_p24_p08); + v[0] = k_madd_epi32(u[0], k32_p16_p16); + v[1] = k_madd_epi32(u[1], k32_p16_p16); + v[2] = k_madd_epi32(u[2], k32_p16_p16); + v[3] = k_madd_epi32(u[3], k32_p16_p16); + v[4] = k_madd_epi32(u[0], k32_p16_m16); + v[5] = k_madd_epi32(u[1], k32_p16_m16); + v[6] = k_madd_epi32(u[2], k32_p16_m16); + v[7] = k_madd_epi32(u[3], k32_p16_m16); + v[8] = k_madd_epi32(u[4], k32_p24_p08); + v[9] = k_madd_epi32(u[5], k32_p24_p08); v[10] = k_madd_epi32(u[6], k32_p24_p08); v[11] = k_madd_epi32(u[7], k32_p24_p08); v[12] = k_madd_epi32(u[4], k32_m08_p24); @@ -1894,9 +1948,8 @@ void FDCT32x32_2D(const int16_t *input, #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -1966,13 +2019,13 @@ void FDCT32x32_2D(const int16_t *input, u[7] = _mm_srai_epi32(u[7], 2); // Combine - out[ 0] = _mm_packs_epi32(u[0], u[1]); + out[0] = _mm_packs_epi32(u[0], u[1]); out[16] = _mm_packs_epi32(u[2], u[3]); - out[ 8] = _mm_packs_epi32(u[4], u[5]); + out[8] = _mm_packs_epi32(u[4], u[5]); out[24] = _mm_packs_epi32(u[6], u[7]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[0], &out[16], - &out[8], &out[24]); + overflow = + check_epi16_overflow_x4(&out[0], &out[16], &out[8], &out[24]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2001,8 +2054,8 @@ void FDCT32x32_2D(const int16_t *input, v[5] = k_madd_epi32(u[5], k32_m24_m08); v[6] = k_madd_epi32(u[6], k32_m24_m08); v[7] = k_madd_epi32(u[7], k32_m24_m08); - v[ 8] = k_madd_epi32(u[4], k32_m08_p24); - v[ 9] = k_madd_epi32(u[5], k32_m08_p24); + v[8] = k_madd_epi32(u[4], k32_m08_p24); + v[9] = k_madd_epi32(u[5], k32_m08_p24); v[10] = k_madd_epi32(u[6], k32_m08_p24); v[11] = k_madd_epi32(u[7], k32_m08_p24); v[12] = k_madd_epi32(u[0], k32_p24_p08); @@ -2012,9 +2065,8 @@ void FDCT32x32_2D(const int16_t *input, #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2088,10 +2140,10 @@ void FDCT32x32_2D(const int16_t *input, const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); - u[0] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[1] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[2] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[3] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); + u[0] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]); + u[1] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]); + u[2] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]); + u[3] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]); u[4] = _mm_unpacklo_epi32(lstep2[10], lstep2[12]); u[5] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); u[6] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); @@ -2100,10 +2152,10 @@ void FDCT32x32_2D(const int16_t *input, u[9] = _mm_unpackhi_epi32(lstep2[10], lstep2[12]); u[10] = _mm_unpacklo_epi32(lstep2[11], lstep2[13]); u[11] = _mm_unpackhi_epi32(lstep2[11], lstep2[13]); - u[12] = _mm_unpacklo_epi32(lstep2[ 8], lstep2[14]); - u[13] = _mm_unpackhi_epi32(lstep2[ 8], lstep2[14]); - u[14] = _mm_unpacklo_epi32(lstep2[ 9], lstep2[15]); - u[15] = _mm_unpackhi_epi32(lstep2[ 9], lstep2[15]); + u[12] = _mm_unpacklo_epi32(lstep2[8], lstep2[14]); + u[13] = _mm_unpackhi_epi32(lstep2[8], lstep2[14]); + u[14] = _mm_unpacklo_epi32(lstep2[9], lstep2[15]); + u[15] = _mm_unpackhi_epi32(lstep2[9], lstep2[15]); v[0] = k_madd_epi32(u[0], k32_p28_p04); v[1] = k_madd_epi32(u[1], k32_p28_p04); @@ -2113,8 +2165,8 @@ void FDCT32x32_2D(const int16_t *input, v[5] = k_madd_epi32(u[5], k32_p12_p20); v[6] = k_madd_epi32(u[6], k32_p12_p20); v[7] = k_madd_epi32(u[7], k32_p12_p20); - v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); + v[8] = k_madd_epi32(u[8], k32_m20_p12); + v[9] = k_madd_epi32(u[9], k32_m20_p12); v[10] = k_madd_epi32(u[10], k32_m20_p12); v[11] = k_madd_epi32(u[11], k32_m20_p12); v[12] = k_madd_epi32(u[12], k32_m04_p28); @@ -2124,9 +2176,8 @@ void FDCT32x32_2D(const int16_t *input, #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_16( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2195,13 +2246,13 @@ void FDCT32x32_2D(const int16_t *input, u[6] = _mm_srai_epi32(u[6], 2); u[7] = _mm_srai_epi32(u[7], 2); - out[ 4] = _mm_packs_epi32(u[0], u[1]); + out[4] = _mm_packs_epi32(u[0], u[1]); out[20] = _mm_packs_epi32(u[2], u[3]); out[12] = _mm_packs_epi32(u[4], u[5]); out[28] = _mm_packs_epi32(u[6], u[7]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&out[4], &out[20], - &out[12], &out[28]); + overflow = + check_epi16_overflow_x4(&out[4], &out[20], &out[12], &out[28]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2230,21 +2281,21 @@ void FDCT32x32_2D(const int16_t *input, const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64); const __m128i k32_m20_p12 = pair_set_epi32(-cospi_20_64, cospi_12_64); - const __m128i k32_m12_m20 = pair_set_epi32(-cospi_12_64, - -cospi_20_64); + const __m128i k32_m12_m20 = + pair_set_epi32(-cospi_12_64, -cospi_20_64); const __m128i k32_p12_p20 = pair_set_epi32(cospi_12_64, cospi_20_64); const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); - u[ 0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]); - u[ 1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]); - u[ 2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]); - u[ 3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]); - u[ 4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]); - u[ 5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]); - u[ 6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]); - u[ 7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]); - u[ 8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]); - u[ 9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]); + u[0] = _mm_unpacklo_epi32(lstep2[34], lstep2[60]); + u[1] = _mm_unpackhi_epi32(lstep2[34], lstep2[60]); + u[2] = _mm_unpacklo_epi32(lstep2[35], lstep2[61]); + u[3] = _mm_unpackhi_epi32(lstep2[35], lstep2[61]); + u[4] = _mm_unpacklo_epi32(lstep2[36], lstep2[58]); + u[5] = _mm_unpackhi_epi32(lstep2[36], lstep2[58]); + u[6] = _mm_unpacklo_epi32(lstep2[37], lstep2[59]); + u[7] = _mm_unpackhi_epi32(lstep2[37], lstep2[59]); + u[8] = _mm_unpacklo_epi32(lstep2[42], lstep2[52]); + u[9] = _mm_unpackhi_epi32(lstep2[42], lstep2[52]); u[10] = _mm_unpacklo_epi32(lstep2[43], lstep2[53]); u[11] = _mm_unpackhi_epi32(lstep2[43], lstep2[53]); u[12] = _mm_unpacklo_epi32(lstep2[44], lstep2[50]); @@ -2252,16 +2303,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_unpacklo_epi32(lstep2[45], lstep2[51]); u[15] = _mm_unpackhi_epi32(lstep2[45], lstep2[51]); - v[ 0] = k_madd_epi32(u[ 0], k32_m04_p28); - v[ 1] = k_madd_epi32(u[ 1], k32_m04_p28); - v[ 2] = k_madd_epi32(u[ 2], k32_m04_p28); - v[ 3] = k_madd_epi32(u[ 3], k32_m04_p28); - v[ 4] = k_madd_epi32(u[ 4], k32_m28_m04); - v[ 5] = k_madd_epi32(u[ 5], k32_m28_m04); - v[ 6] = k_madd_epi32(u[ 6], k32_m28_m04); - v[ 7] = k_madd_epi32(u[ 7], k32_m28_m04); - v[ 8] = k_madd_epi32(u[ 8], k32_m20_p12); - v[ 9] = k_madd_epi32(u[ 9], k32_m20_p12); + v[0] = k_madd_epi32(u[0], k32_m04_p28); + v[1] = k_madd_epi32(u[1], k32_m04_p28); + v[2] = k_madd_epi32(u[2], k32_m04_p28); + v[3] = k_madd_epi32(u[3], k32_m04_p28); + v[4] = k_madd_epi32(u[4], k32_m28_m04); + v[5] = k_madd_epi32(u[5], k32_m28_m04); + v[6] = k_madd_epi32(u[6], k32_m28_m04); + v[7] = k_madd_epi32(u[7], k32_m28_m04); + v[8] = k_madd_epi32(u[8], k32_m20_p12); + v[9] = k_madd_epi32(u[9], k32_m20_p12); v[10] = k_madd_epi32(u[10], k32_m20_p12); v[11] = k_madd_epi32(u[11], k32_m20_p12); v[12] = k_madd_epi32(u[12], k32_m12_m20); @@ -2272,41 +2323,40 @@ void FDCT32x32_2D(const int16_t *input, v[17] = k_madd_epi32(u[13], k32_m20_p12); v[18] = k_madd_epi32(u[14], k32_m20_p12); v[19] = k_madd_epi32(u[15], k32_m20_p12); - v[20] = k_madd_epi32(u[ 8], k32_p12_p20); - v[21] = k_madd_epi32(u[ 9], k32_p12_p20); + v[20] = k_madd_epi32(u[8], k32_p12_p20); + v[21] = k_madd_epi32(u[9], k32_p12_p20); v[22] = k_madd_epi32(u[10], k32_p12_p20); v[23] = k_madd_epi32(u[11], k32_p12_p20); - v[24] = k_madd_epi32(u[ 4], k32_m04_p28); - v[25] = k_madd_epi32(u[ 5], k32_m04_p28); - v[26] = k_madd_epi32(u[ 6], k32_m04_p28); - v[27] = k_madd_epi32(u[ 7], k32_m04_p28); - v[28] = k_madd_epi32(u[ 0], k32_p28_p04); - v[29] = k_madd_epi32(u[ 1], k32_p28_p04); - v[30] = k_madd_epi32(u[ 2], k32_p28_p04); - v[31] = k_madd_epi32(u[ 3], k32_p28_p04); + v[24] = k_madd_epi32(u[4], k32_m04_p28); + v[25] = k_madd_epi32(u[5], k32_m04_p28); + v[26] = k_madd_epi32(u[6], k32_m04_p28); + v[27] = k_madd_epi32(u[7], k32_m04_p28); + v[28] = k_madd_epi32(u[0], k32_p28_p04); + v[29] = k_madd_epi32(u[1], k32_p28_p04); + v[30] = k_madd_epi32(u[2], k32_p28_p04); + v[31] = k_madd_epi32(u[3], k32_p28_p04); #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16], + &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24], + &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); + u[4] = k_packs_epi64(v[8], v[9]); + u[5] = k_packs_epi64(v[10], v[11]); + u[6] = k_packs_epi64(v[12], v[13]); + u[7] = k_packs_epi64(v[14], v[15]); + u[8] = k_packs_epi64(v[16], v[17]); + u[9] = k_packs_epi64(v[18], v[19]); u[10] = k_packs_epi64(v[20], v[21]); u[11] = k_packs_epi64(v[22], v[23]); u[12] = k_packs_epi64(v[24], v[25]); @@ -2314,16 +2364,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = k_packs_epi64(v[28], v[29]); u[15] = k_packs_epi64(v[30], v[31]); - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2331,16 +2381,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - lstep3[34] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - lstep3[35] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - lstep3[36] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - lstep3[37] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - lstep3[42] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - lstep3[43] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - lstep3[44] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - lstep3[45] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - lstep3[50] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - lstep3[51] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); + lstep3[34] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + lstep3[35] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + lstep3[36] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + lstep3[37] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + lstep3[42] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + lstep3[43] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + lstep3[44] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + lstep3[45] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + lstep3[50] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + lstep3[51] = _mm_srai_epi32(v[9], DCT_CONST_BITS); lstep3[52] = _mm_srai_epi32(v[10], DCT_CONST_BITS); lstep3[53] = _mm_srai_epi32(v[11], DCT_CONST_BITS); lstep3[58] = _mm_srai_epi32(v[12], DCT_CONST_BITS); @@ -2353,22 +2403,22 @@ void FDCT32x32_2D(const int16_t *input, const __m128i k32_p30_p02 = pair_set_epi32(cospi_30_64, cospi_2_64); const __m128i k32_p14_p18 = pair_set_epi32(cospi_14_64, cospi_18_64); const __m128i k32_p22_p10 = pair_set_epi32(cospi_22_64, cospi_10_64); - const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64); + const __m128i k32_p06_p26 = pair_set_epi32(cospi_6_64, cospi_26_64); const __m128i k32_m26_p06 = pair_set_epi32(-cospi_26_64, cospi_6_64); const __m128i k32_m10_p22 = pair_set_epi32(-cospi_10_64, cospi_22_64); const __m128i k32_m18_p14 = pair_set_epi32(-cospi_18_64, cospi_14_64); const __m128i k32_m02_p30 = pair_set_epi32(-cospi_2_64, cospi_30_64); - u[ 0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]); - u[ 1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]); - u[ 2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]); - u[ 3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]); - u[ 4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]); - u[ 5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]); - u[ 6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]); - u[ 7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]); - u[ 8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]); - u[ 9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]); + u[0] = _mm_unpacklo_epi32(lstep3[16], lstep3[30]); + u[1] = _mm_unpackhi_epi32(lstep3[16], lstep3[30]); + u[2] = _mm_unpacklo_epi32(lstep3[17], lstep3[31]); + u[3] = _mm_unpackhi_epi32(lstep3[17], lstep3[31]); + u[4] = _mm_unpacklo_epi32(lstep3[18], lstep3[28]); + u[5] = _mm_unpackhi_epi32(lstep3[18], lstep3[28]); + u[6] = _mm_unpacklo_epi32(lstep3[19], lstep3[29]); + u[7] = _mm_unpackhi_epi32(lstep3[19], lstep3[29]); + u[8] = _mm_unpacklo_epi32(lstep3[20], lstep3[26]); + u[9] = _mm_unpackhi_epi32(lstep3[20], lstep3[26]); u[10] = _mm_unpacklo_epi32(lstep3[21], lstep3[27]); u[11] = _mm_unpackhi_epi32(lstep3[21], lstep3[27]); u[12] = _mm_unpacklo_epi32(lstep3[22], lstep3[24]); @@ -2376,16 +2426,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_unpacklo_epi32(lstep3[23], lstep3[25]); u[15] = _mm_unpackhi_epi32(lstep3[23], lstep3[25]); - v[ 0] = k_madd_epi32(u[ 0], k32_p30_p02); - v[ 1] = k_madd_epi32(u[ 1], k32_p30_p02); - v[ 2] = k_madd_epi32(u[ 2], k32_p30_p02); - v[ 3] = k_madd_epi32(u[ 3], k32_p30_p02); - v[ 4] = k_madd_epi32(u[ 4], k32_p14_p18); - v[ 5] = k_madd_epi32(u[ 5], k32_p14_p18); - v[ 6] = k_madd_epi32(u[ 6], k32_p14_p18); - v[ 7] = k_madd_epi32(u[ 7], k32_p14_p18); - v[ 8] = k_madd_epi32(u[ 8], k32_p22_p10); - v[ 9] = k_madd_epi32(u[ 9], k32_p22_p10); + v[0] = k_madd_epi32(u[0], k32_p30_p02); + v[1] = k_madd_epi32(u[1], k32_p30_p02); + v[2] = k_madd_epi32(u[2], k32_p30_p02); + v[3] = k_madd_epi32(u[3], k32_p30_p02); + v[4] = k_madd_epi32(u[4], k32_p14_p18); + v[5] = k_madd_epi32(u[5], k32_p14_p18); + v[6] = k_madd_epi32(u[6], k32_p14_p18); + v[7] = k_madd_epi32(u[7], k32_p14_p18); + v[8] = k_madd_epi32(u[8], k32_p22_p10); + v[9] = k_madd_epi32(u[9], k32_p22_p10); v[10] = k_madd_epi32(u[10], k32_p22_p10); v[11] = k_madd_epi32(u[11], k32_p22_p10); v[12] = k_madd_epi32(u[12], k32_p06_p26); @@ -2396,41 +2446,40 @@ void FDCT32x32_2D(const int16_t *input, v[17] = k_madd_epi32(u[13], k32_m26_p06); v[18] = k_madd_epi32(u[14], k32_m26_p06); v[19] = k_madd_epi32(u[15], k32_m26_p06); - v[20] = k_madd_epi32(u[ 8], k32_m10_p22); - v[21] = k_madd_epi32(u[ 9], k32_m10_p22); + v[20] = k_madd_epi32(u[8], k32_m10_p22); + v[21] = k_madd_epi32(u[9], k32_m10_p22); v[22] = k_madd_epi32(u[10], k32_m10_p22); v[23] = k_madd_epi32(u[11], k32_m10_p22); - v[24] = k_madd_epi32(u[ 4], k32_m18_p14); - v[25] = k_madd_epi32(u[ 5], k32_m18_p14); - v[26] = k_madd_epi32(u[ 6], k32_m18_p14); - v[27] = k_madd_epi32(u[ 7], k32_m18_p14); - v[28] = k_madd_epi32(u[ 0], k32_m02_p30); - v[29] = k_madd_epi32(u[ 1], k32_m02_p30); - v[30] = k_madd_epi32(u[ 2], k32_m02_p30); - v[31] = k_madd_epi32(u[ 3], k32_m02_p30); + v[24] = k_madd_epi32(u[4], k32_m18_p14); + v[25] = k_madd_epi32(u[5], k32_m18_p14); + v[26] = k_madd_epi32(u[6], k32_m18_p14); + v[27] = k_madd_epi32(u[7], k32_m18_p14); + v[28] = k_madd_epi32(u[0], k32_m02_p30); + v[29] = k_madd_epi32(u[1], k32_m02_p30); + v[30] = k_madd_epi32(u[2], k32_m02_p30); + v[31] = k_madd_epi32(u[3], k32_m02_p30); #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16], + &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24], + &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); + u[4] = k_packs_epi64(v[8], v[9]); + u[5] = k_packs_epi64(v[10], v[11]); + u[6] = k_packs_epi64(v[12], v[13]); + u[7] = k_packs_epi64(v[14], v[15]); + u[8] = k_packs_epi64(v[16], v[17]); + u[9] = k_packs_epi64(v[18], v[19]); u[10] = k_packs_epi64(v[20], v[21]); u[11] = k_packs_epi64(v[22], v[23]); u[12] = k_packs_epi64(v[24], v[25]); @@ -2438,16 +2487,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = k_packs_epi64(v[28], v[29]); u[15] = k_packs_epi64(v[30], v[31]); - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2455,16 +2504,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); @@ -2472,16 +2521,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); + v[0] = _mm_cmplt_epi32(u[0], kZero); + v[1] = _mm_cmplt_epi32(u[1], kZero); + v[2] = _mm_cmplt_epi32(u[2], kZero); + v[3] = _mm_cmplt_epi32(u[3], kZero); + v[4] = _mm_cmplt_epi32(u[4], kZero); + v[5] = _mm_cmplt_epi32(u[5], kZero); + v[6] = _mm_cmplt_epi32(u[6], kZero); + v[7] = _mm_cmplt_epi32(u[7], kZero); + v[8] = _mm_cmplt_epi32(u[8], kZero); + v[9] = _mm_cmplt_epi32(u[9], kZero); v[10] = _mm_cmplt_epi32(u[10], kZero); v[11] = _mm_cmplt_epi32(u[11], kZero); v[12] = _mm_cmplt_epi32(u[12], kZero); @@ -2489,16 +2538,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_cmplt_epi32(u[14], kZero); v[15] = _mm_cmplt_epi32(u[15], kZero); - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm_sub_epi32(u[0], v[0]); + u[1] = _mm_sub_epi32(u[1], v[1]); + u[2] = _mm_sub_epi32(u[2], v[2]); + u[3] = _mm_sub_epi32(u[3], v[3]); + u[4] = _mm_sub_epi32(u[4], v[4]); + u[5] = _mm_sub_epi32(u[5], v[5]); + u[6] = _mm_sub_epi32(u[6], v[6]); + u[7] = _mm_sub_epi32(u[7], v[7]); + u[8] = _mm_sub_epi32(u[8], v[8]); + u[9] = _mm_sub_epi32(u[9], v[9]); u[10] = _mm_sub_epi32(u[10], v[10]); u[11] = _mm_sub_epi32(u[11], v[11]); u[12] = _mm_sub_epi32(u[12], v[12]); @@ -2506,16 +2555,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_sub_epi32(u[14], v[14]); u[15] = _mm_sub_epi32(u[15], v[15]); - v[ 0] = _mm_add_epi32(u[ 0], K32One); - v[ 1] = _mm_add_epi32(u[ 1], K32One); - v[ 2] = _mm_add_epi32(u[ 2], K32One); - v[ 3] = _mm_add_epi32(u[ 3], K32One); - v[ 4] = _mm_add_epi32(u[ 4], K32One); - v[ 5] = _mm_add_epi32(u[ 5], K32One); - v[ 6] = _mm_add_epi32(u[ 6], K32One); - v[ 7] = _mm_add_epi32(u[ 7], K32One); - v[ 8] = _mm_add_epi32(u[ 8], K32One); - v[ 9] = _mm_add_epi32(u[ 9], K32One); + v[0] = _mm_add_epi32(u[0], K32One); + v[1] = _mm_add_epi32(u[1], K32One); + v[2] = _mm_add_epi32(u[2], K32One); + v[3] = _mm_add_epi32(u[3], K32One); + v[4] = _mm_add_epi32(u[4], K32One); + v[5] = _mm_add_epi32(u[5], K32One); + v[6] = _mm_add_epi32(u[6], K32One); + v[7] = _mm_add_epi32(u[7], K32One); + v[8] = _mm_add_epi32(u[8], K32One); + v[9] = _mm_add_epi32(u[9], K32One); v[10] = _mm_add_epi32(u[10], K32One); v[11] = _mm_add_epi32(u[11], K32One); v[12] = _mm_add_epi32(u[12], K32One); @@ -2523,16 +2572,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], K32One); v[15] = _mm_add_epi32(u[15], K32One); - u[ 0] = _mm_srai_epi32(v[ 0], 2); - u[ 1] = _mm_srai_epi32(v[ 1], 2); - u[ 2] = _mm_srai_epi32(v[ 2], 2); - u[ 3] = _mm_srai_epi32(v[ 3], 2); - u[ 4] = _mm_srai_epi32(v[ 4], 2); - u[ 5] = _mm_srai_epi32(v[ 5], 2); - u[ 6] = _mm_srai_epi32(v[ 6], 2); - u[ 7] = _mm_srai_epi32(v[ 7], 2); - u[ 8] = _mm_srai_epi32(v[ 8], 2); - u[ 9] = _mm_srai_epi32(v[ 9], 2); + u[0] = _mm_srai_epi32(v[0], 2); + u[1] = _mm_srai_epi32(v[1], 2); + u[2] = _mm_srai_epi32(v[2], 2); + u[3] = _mm_srai_epi32(v[3], 2); + u[4] = _mm_srai_epi32(v[4], 2); + u[5] = _mm_srai_epi32(v[5], 2); + u[6] = _mm_srai_epi32(v[6], 2); + u[7] = _mm_srai_epi32(v[7], 2); + u[8] = _mm_srai_epi32(v[8], 2); + u[9] = _mm_srai_epi32(v[9], 2); u[10] = _mm_srai_epi32(v[10], 2); u[11] = _mm_srai_epi32(v[11], 2); u[12] = _mm_srai_epi32(v[12], 2); @@ -2540,18 +2589,18 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], 2); u[15] = _mm_srai_epi32(v[15], 2); - out[ 2] = _mm_packs_epi32(u[0], u[1]); + out[2] = _mm_packs_epi32(u[0], u[1]); out[18] = _mm_packs_epi32(u[2], u[3]); out[10] = _mm_packs_epi32(u[4], u[5]); out[26] = _mm_packs_epi32(u[6], u[7]); - out[ 6] = _mm_packs_epi32(u[8], u[9]); + out[6] = _mm_packs_epi32(u[8], u[9]); out[22] = _mm_packs_epi32(u[10], u[11]); out[14] = _mm_packs_epi32(u[12], u[13]); out[30] = _mm_packs_epi32(u[14], u[15]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[2], &out[18], &out[10], - &out[26], &out[6], &out[22], - &out[14], &out[30]); + overflow = + check_epi16_overflow_x8(&out[2], &out[18], &out[10], &out[26], + &out[6], &out[22], &out[14], &out[30]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2603,16 +2652,16 @@ void FDCT32x32_2D(const int16_t *input, const __m128i k32_m17_p15 = pair_set_epi32(-cospi_17_64, cospi_15_64); const __m128i k32_m01_p31 = pair_set_epi32(-cospi_1_64, cospi_31_64); - u[ 0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]); - u[ 1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]); - u[ 2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]); - u[ 3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]); - u[ 4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]); - u[ 5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]); - u[ 6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]); - u[ 7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]); - u[ 8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]); - u[ 9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]); + u[0] = _mm_unpacklo_epi32(lstep1[32], lstep1[62]); + u[1] = _mm_unpackhi_epi32(lstep1[32], lstep1[62]); + u[2] = _mm_unpacklo_epi32(lstep1[33], lstep1[63]); + u[3] = _mm_unpackhi_epi32(lstep1[33], lstep1[63]); + u[4] = _mm_unpacklo_epi32(lstep1[34], lstep1[60]); + u[5] = _mm_unpackhi_epi32(lstep1[34], lstep1[60]); + u[6] = _mm_unpacklo_epi32(lstep1[35], lstep1[61]); + u[7] = _mm_unpackhi_epi32(lstep1[35], lstep1[61]); + u[8] = _mm_unpacklo_epi32(lstep1[36], lstep1[58]); + u[9] = _mm_unpackhi_epi32(lstep1[36], lstep1[58]); u[10] = _mm_unpacklo_epi32(lstep1[37], lstep1[59]); u[11] = _mm_unpackhi_epi32(lstep1[37], lstep1[59]); u[12] = _mm_unpacklo_epi32(lstep1[38], lstep1[56]); @@ -2620,16 +2669,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_unpacklo_epi32(lstep1[39], lstep1[57]); u[15] = _mm_unpackhi_epi32(lstep1[39], lstep1[57]); - v[ 0] = k_madd_epi32(u[ 0], k32_p31_p01); - v[ 1] = k_madd_epi32(u[ 1], k32_p31_p01); - v[ 2] = k_madd_epi32(u[ 2], k32_p31_p01); - v[ 3] = k_madd_epi32(u[ 3], k32_p31_p01); - v[ 4] = k_madd_epi32(u[ 4], k32_p15_p17); - v[ 5] = k_madd_epi32(u[ 5], k32_p15_p17); - v[ 6] = k_madd_epi32(u[ 6], k32_p15_p17); - v[ 7] = k_madd_epi32(u[ 7], k32_p15_p17); - v[ 8] = k_madd_epi32(u[ 8], k32_p23_p09); - v[ 9] = k_madd_epi32(u[ 9], k32_p23_p09); + v[0] = k_madd_epi32(u[0], k32_p31_p01); + v[1] = k_madd_epi32(u[1], k32_p31_p01); + v[2] = k_madd_epi32(u[2], k32_p31_p01); + v[3] = k_madd_epi32(u[3], k32_p31_p01); + v[4] = k_madd_epi32(u[4], k32_p15_p17); + v[5] = k_madd_epi32(u[5], k32_p15_p17); + v[6] = k_madd_epi32(u[6], k32_p15_p17); + v[7] = k_madd_epi32(u[7], k32_p15_p17); + v[8] = k_madd_epi32(u[8], k32_p23_p09); + v[9] = k_madd_epi32(u[9], k32_p23_p09); v[10] = k_madd_epi32(u[10], k32_p23_p09); v[11] = k_madd_epi32(u[11], k32_p23_p09); v[12] = k_madd_epi32(u[12], k32_p07_p25); @@ -2640,41 +2689,40 @@ void FDCT32x32_2D(const int16_t *input, v[17] = k_madd_epi32(u[13], k32_m25_p07); v[18] = k_madd_epi32(u[14], k32_m25_p07); v[19] = k_madd_epi32(u[15], k32_m25_p07); - v[20] = k_madd_epi32(u[ 8], k32_m09_p23); - v[21] = k_madd_epi32(u[ 9], k32_m09_p23); + v[20] = k_madd_epi32(u[8], k32_m09_p23); + v[21] = k_madd_epi32(u[9], k32_m09_p23); v[22] = k_madd_epi32(u[10], k32_m09_p23); v[23] = k_madd_epi32(u[11], k32_m09_p23); - v[24] = k_madd_epi32(u[ 4], k32_m17_p15); - v[25] = k_madd_epi32(u[ 5], k32_m17_p15); - v[26] = k_madd_epi32(u[ 6], k32_m17_p15); - v[27] = k_madd_epi32(u[ 7], k32_m17_p15); - v[28] = k_madd_epi32(u[ 0], k32_m01_p31); - v[29] = k_madd_epi32(u[ 1], k32_m01_p31); - v[30] = k_madd_epi32(u[ 2], k32_m01_p31); - v[31] = k_madd_epi32(u[ 3], k32_m01_p31); + v[24] = k_madd_epi32(u[4], k32_m17_p15); + v[25] = k_madd_epi32(u[5], k32_m17_p15); + v[26] = k_madd_epi32(u[6], k32_m17_p15); + v[27] = k_madd_epi32(u[7], k32_m17_p15); + v[28] = k_madd_epi32(u[0], k32_m01_p31); + v[29] = k_madd_epi32(u[1], k32_m01_p31); + v[30] = k_madd_epi32(u[2], k32_m01_p31); + v[31] = k_madd_epi32(u[3], k32_m01_p31); #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16], + &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24], + &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); + u[4] = k_packs_epi64(v[8], v[9]); + u[5] = k_packs_epi64(v[10], v[11]); + u[6] = k_packs_epi64(v[12], v[13]); + u[7] = k_packs_epi64(v[14], v[15]); + u[8] = k_packs_epi64(v[16], v[17]); + u[9] = k_packs_epi64(v[18], v[19]); u[10] = k_packs_epi64(v[20], v[21]); u[11] = k_packs_epi64(v[22], v[23]); u[12] = k_packs_epi64(v[24], v[25]); @@ -2682,16 +2730,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = k_packs_epi64(v[28], v[29]); u[15] = k_packs_epi64(v[30], v[31]); - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2699,16 +2747,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); @@ -2716,16 +2764,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); + v[0] = _mm_cmplt_epi32(u[0], kZero); + v[1] = _mm_cmplt_epi32(u[1], kZero); + v[2] = _mm_cmplt_epi32(u[2], kZero); + v[3] = _mm_cmplt_epi32(u[3], kZero); + v[4] = _mm_cmplt_epi32(u[4], kZero); + v[5] = _mm_cmplt_epi32(u[5], kZero); + v[6] = _mm_cmplt_epi32(u[6], kZero); + v[7] = _mm_cmplt_epi32(u[7], kZero); + v[8] = _mm_cmplt_epi32(u[8], kZero); + v[9] = _mm_cmplt_epi32(u[9], kZero); v[10] = _mm_cmplt_epi32(u[10], kZero); v[11] = _mm_cmplt_epi32(u[11], kZero); v[12] = _mm_cmplt_epi32(u[12], kZero); @@ -2733,16 +2781,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_cmplt_epi32(u[14], kZero); v[15] = _mm_cmplt_epi32(u[15], kZero); - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm_sub_epi32(u[0], v[0]); + u[1] = _mm_sub_epi32(u[1], v[1]); + u[2] = _mm_sub_epi32(u[2], v[2]); + u[3] = _mm_sub_epi32(u[3], v[3]); + u[4] = _mm_sub_epi32(u[4], v[4]); + u[5] = _mm_sub_epi32(u[5], v[5]); + u[6] = _mm_sub_epi32(u[6], v[6]); + u[7] = _mm_sub_epi32(u[7], v[7]); + u[8] = _mm_sub_epi32(u[8], v[8]); + u[9] = _mm_sub_epi32(u[9], v[9]); u[10] = _mm_sub_epi32(u[10], v[10]); u[11] = _mm_sub_epi32(u[11], v[11]); u[12] = _mm_sub_epi32(u[12], v[12]); @@ -2784,18 +2832,18 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], 2); u[15] = _mm_srai_epi32(v[15], 2); - out[ 1] = _mm_packs_epi32(u[0], u[1]); + out[1] = _mm_packs_epi32(u[0], u[1]); out[17] = _mm_packs_epi32(u[2], u[3]); - out[ 9] = _mm_packs_epi32(u[4], u[5]); + out[9] = _mm_packs_epi32(u[4], u[5]); out[25] = _mm_packs_epi32(u[6], u[7]); - out[ 7] = _mm_packs_epi32(u[8], u[9]); + out[7] = _mm_packs_epi32(u[8], u[9]); out[23] = _mm_packs_epi32(u[10], u[11]); out[15] = _mm_packs_epi32(u[12], u[13]); out[31] = _mm_packs_epi32(u[14], u[15]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[1], &out[17], &out[9], - &out[25], &out[7], &out[23], - &out[15], &out[31]); + overflow = + check_epi16_overflow_x8(&out[1], &out[17], &out[9], &out[25], + &out[7], &out[23], &out[15], &out[31]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; @@ -2812,16 +2860,16 @@ void FDCT32x32_2D(const int16_t *input, const __m128i k32_m21_p11 = pair_set_epi32(-cospi_21_64, cospi_11_64); const __m128i k32_m05_p27 = pair_set_epi32(-cospi_5_64, cospi_27_64); - u[ 0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]); - u[ 1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]); - u[ 2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]); - u[ 3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]); - u[ 4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]); - u[ 5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]); - u[ 6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]); - u[ 7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]); - u[ 8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]); - u[ 9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]); + u[0] = _mm_unpacklo_epi32(lstep1[40], lstep1[54]); + u[1] = _mm_unpackhi_epi32(lstep1[40], lstep1[54]); + u[2] = _mm_unpacklo_epi32(lstep1[41], lstep1[55]); + u[3] = _mm_unpackhi_epi32(lstep1[41], lstep1[55]); + u[4] = _mm_unpacklo_epi32(lstep1[42], lstep1[52]); + u[5] = _mm_unpackhi_epi32(lstep1[42], lstep1[52]); + u[6] = _mm_unpacklo_epi32(lstep1[43], lstep1[53]); + u[7] = _mm_unpackhi_epi32(lstep1[43], lstep1[53]); + u[8] = _mm_unpacklo_epi32(lstep1[44], lstep1[50]); + u[9] = _mm_unpackhi_epi32(lstep1[44], lstep1[50]); u[10] = _mm_unpacklo_epi32(lstep1[45], lstep1[51]); u[11] = _mm_unpackhi_epi32(lstep1[45], lstep1[51]); u[12] = _mm_unpacklo_epi32(lstep1[46], lstep1[48]); @@ -2829,16 +2877,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_unpacklo_epi32(lstep1[47], lstep1[49]); u[15] = _mm_unpackhi_epi32(lstep1[47], lstep1[49]); - v[ 0] = k_madd_epi32(u[ 0], k32_p27_p05); - v[ 1] = k_madd_epi32(u[ 1], k32_p27_p05); - v[ 2] = k_madd_epi32(u[ 2], k32_p27_p05); - v[ 3] = k_madd_epi32(u[ 3], k32_p27_p05); - v[ 4] = k_madd_epi32(u[ 4], k32_p11_p21); - v[ 5] = k_madd_epi32(u[ 5], k32_p11_p21); - v[ 6] = k_madd_epi32(u[ 6], k32_p11_p21); - v[ 7] = k_madd_epi32(u[ 7], k32_p11_p21); - v[ 8] = k_madd_epi32(u[ 8], k32_p19_p13); - v[ 9] = k_madd_epi32(u[ 9], k32_p19_p13); + v[0] = k_madd_epi32(u[0], k32_p27_p05); + v[1] = k_madd_epi32(u[1], k32_p27_p05); + v[2] = k_madd_epi32(u[2], k32_p27_p05); + v[3] = k_madd_epi32(u[3], k32_p27_p05); + v[4] = k_madd_epi32(u[4], k32_p11_p21); + v[5] = k_madd_epi32(u[5], k32_p11_p21); + v[6] = k_madd_epi32(u[6], k32_p11_p21); + v[7] = k_madd_epi32(u[7], k32_p11_p21); + v[8] = k_madd_epi32(u[8], k32_p19_p13); + v[9] = k_madd_epi32(u[9], k32_p19_p13); v[10] = k_madd_epi32(u[10], k32_p19_p13); v[11] = k_madd_epi32(u[11], k32_p19_p13); v[12] = k_madd_epi32(u[12], k32_p03_p29); @@ -2849,41 +2897,40 @@ void FDCT32x32_2D(const int16_t *input, v[17] = k_madd_epi32(u[13], k32_m29_p03); v[18] = k_madd_epi32(u[14], k32_m29_p03); v[19] = k_madd_epi32(u[15], k32_m29_p03); - v[20] = k_madd_epi32(u[ 8], k32_m13_p19); - v[21] = k_madd_epi32(u[ 9], k32_m13_p19); + v[20] = k_madd_epi32(u[8], k32_m13_p19); + v[21] = k_madd_epi32(u[9], k32_m13_p19); v[22] = k_madd_epi32(u[10], k32_m13_p19); v[23] = k_madd_epi32(u[11], k32_m13_p19); - v[24] = k_madd_epi32(u[ 4], k32_m21_p11); - v[25] = k_madd_epi32(u[ 5], k32_m21_p11); - v[26] = k_madd_epi32(u[ 6], k32_m21_p11); - v[27] = k_madd_epi32(u[ 7], k32_m21_p11); - v[28] = k_madd_epi32(u[ 0], k32_m05_p27); - v[29] = k_madd_epi32(u[ 1], k32_m05_p27); - v[30] = k_madd_epi32(u[ 2], k32_m05_p27); - v[31] = k_madd_epi32(u[ 3], k32_m05_p27); + v[24] = k_madd_epi32(u[4], k32_m21_p11); + v[25] = k_madd_epi32(u[5], k32_m21_p11); + v[26] = k_madd_epi32(u[6], k32_m21_p11); + v[27] = k_madd_epi32(u[7], k32_m21_p11); + v[28] = k_madd_epi32(u[0], k32_m05_p27); + v[29] = k_madd_epi32(u[1], k32_m05_p27); + v[30] = k_madd_epi32(u[2], k32_m05_p27); + v[31] = k_madd_epi32(u[3], k32_m05_p27); #if DCT_HIGH_BIT_DEPTH overflow = k_check_epi32_overflow_32( - &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], - &v[8], &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], - &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], - &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], - &kZero); + &v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7], &v[8], + &v[9], &v[10], &v[11], &v[12], &v[13], &v[14], &v[15], &v[16], + &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23], &v[24], + &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31], &kZero); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; } #endif // DCT_HIGH_BIT_DEPTH - u[ 0] = k_packs_epi64(v[ 0], v[ 1]); - u[ 1] = k_packs_epi64(v[ 2], v[ 3]); - u[ 2] = k_packs_epi64(v[ 4], v[ 5]); - u[ 3] = k_packs_epi64(v[ 6], v[ 7]); - u[ 4] = k_packs_epi64(v[ 8], v[ 9]); - u[ 5] = k_packs_epi64(v[10], v[11]); - u[ 6] = k_packs_epi64(v[12], v[13]); - u[ 7] = k_packs_epi64(v[14], v[15]); - u[ 8] = k_packs_epi64(v[16], v[17]); - u[ 9] = k_packs_epi64(v[18], v[19]); + u[0] = k_packs_epi64(v[0], v[1]); + u[1] = k_packs_epi64(v[2], v[3]); + u[2] = k_packs_epi64(v[4], v[5]); + u[3] = k_packs_epi64(v[6], v[7]); + u[4] = k_packs_epi64(v[8], v[9]); + u[5] = k_packs_epi64(v[10], v[11]); + u[6] = k_packs_epi64(v[12], v[13]); + u[7] = k_packs_epi64(v[14], v[15]); + u[8] = k_packs_epi64(v[16], v[17]); + u[9] = k_packs_epi64(v[18], v[19]); u[10] = k_packs_epi64(v[20], v[21]); u[11] = k_packs_epi64(v[22], v[23]); u[12] = k_packs_epi64(v[24], v[25]); @@ -2891,16 +2938,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = k_packs_epi64(v[28], v[29]); u[15] = k_packs_epi64(v[30], v[31]); - v[ 0] = _mm_add_epi32(u[ 0], k__DCT_CONST_ROUNDING); - v[ 1] = _mm_add_epi32(u[ 1], k__DCT_CONST_ROUNDING); - v[ 2] = _mm_add_epi32(u[ 2], k__DCT_CONST_ROUNDING); - v[ 3] = _mm_add_epi32(u[ 3], k__DCT_CONST_ROUNDING); - v[ 4] = _mm_add_epi32(u[ 4], k__DCT_CONST_ROUNDING); - v[ 5] = _mm_add_epi32(u[ 5], k__DCT_CONST_ROUNDING); - v[ 6] = _mm_add_epi32(u[ 6], k__DCT_CONST_ROUNDING); - v[ 7] = _mm_add_epi32(u[ 7], k__DCT_CONST_ROUNDING); - v[ 8] = _mm_add_epi32(u[ 8], k__DCT_CONST_ROUNDING); - v[ 9] = _mm_add_epi32(u[ 9], k__DCT_CONST_ROUNDING); + v[0] = _mm_add_epi32(u[0], k__DCT_CONST_ROUNDING); + v[1] = _mm_add_epi32(u[1], k__DCT_CONST_ROUNDING); + v[2] = _mm_add_epi32(u[2], k__DCT_CONST_ROUNDING); + v[3] = _mm_add_epi32(u[3], k__DCT_CONST_ROUNDING); + v[4] = _mm_add_epi32(u[4], k__DCT_CONST_ROUNDING); + v[5] = _mm_add_epi32(u[5], k__DCT_CONST_ROUNDING); + v[6] = _mm_add_epi32(u[6], k__DCT_CONST_ROUNDING); + v[7] = _mm_add_epi32(u[7], k__DCT_CONST_ROUNDING); + v[8] = _mm_add_epi32(u[8], k__DCT_CONST_ROUNDING); + v[9] = _mm_add_epi32(u[9], k__DCT_CONST_ROUNDING); v[10] = _mm_add_epi32(u[10], k__DCT_CONST_ROUNDING); v[11] = _mm_add_epi32(u[11], k__DCT_CONST_ROUNDING); v[12] = _mm_add_epi32(u[12], k__DCT_CONST_ROUNDING); @@ -2908,16 +2955,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_add_epi32(u[14], k__DCT_CONST_ROUNDING); v[15] = _mm_add_epi32(u[15], k__DCT_CONST_ROUNDING); - u[ 0] = _mm_srai_epi32(v[ 0], DCT_CONST_BITS); - u[ 1] = _mm_srai_epi32(v[ 1], DCT_CONST_BITS); - u[ 2] = _mm_srai_epi32(v[ 2], DCT_CONST_BITS); - u[ 3] = _mm_srai_epi32(v[ 3], DCT_CONST_BITS); - u[ 4] = _mm_srai_epi32(v[ 4], DCT_CONST_BITS); - u[ 5] = _mm_srai_epi32(v[ 5], DCT_CONST_BITS); - u[ 6] = _mm_srai_epi32(v[ 6], DCT_CONST_BITS); - u[ 7] = _mm_srai_epi32(v[ 7], DCT_CONST_BITS); - u[ 8] = _mm_srai_epi32(v[ 8], DCT_CONST_BITS); - u[ 9] = _mm_srai_epi32(v[ 9], DCT_CONST_BITS); + u[0] = _mm_srai_epi32(v[0], DCT_CONST_BITS); + u[1] = _mm_srai_epi32(v[1], DCT_CONST_BITS); + u[2] = _mm_srai_epi32(v[2], DCT_CONST_BITS); + u[3] = _mm_srai_epi32(v[3], DCT_CONST_BITS); + u[4] = _mm_srai_epi32(v[4], DCT_CONST_BITS); + u[5] = _mm_srai_epi32(v[5], DCT_CONST_BITS); + u[6] = _mm_srai_epi32(v[6], DCT_CONST_BITS); + u[7] = _mm_srai_epi32(v[7], DCT_CONST_BITS); + u[8] = _mm_srai_epi32(v[8], DCT_CONST_BITS); + u[9] = _mm_srai_epi32(v[9], DCT_CONST_BITS); u[10] = _mm_srai_epi32(v[10], DCT_CONST_BITS); u[11] = _mm_srai_epi32(v[11], DCT_CONST_BITS); u[12] = _mm_srai_epi32(v[12], DCT_CONST_BITS); @@ -2925,16 +2972,16 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], DCT_CONST_BITS); u[15] = _mm_srai_epi32(v[15], DCT_CONST_BITS); - v[ 0] = _mm_cmplt_epi32(u[ 0], kZero); - v[ 1] = _mm_cmplt_epi32(u[ 1], kZero); - v[ 2] = _mm_cmplt_epi32(u[ 2], kZero); - v[ 3] = _mm_cmplt_epi32(u[ 3], kZero); - v[ 4] = _mm_cmplt_epi32(u[ 4], kZero); - v[ 5] = _mm_cmplt_epi32(u[ 5], kZero); - v[ 6] = _mm_cmplt_epi32(u[ 6], kZero); - v[ 7] = _mm_cmplt_epi32(u[ 7], kZero); - v[ 8] = _mm_cmplt_epi32(u[ 8], kZero); - v[ 9] = _mm_cmplt_epi32(u[ 9], kZero); + v[0] = _mm_cmplt_epi32(u[0], kZero); + v[1] = _mm_cmplt_epi32(u[1], kZero); + v[2] = _mm_cmplt_epi32(u[2], kZero); + v[3] = _mm_cmplt_epi32(u[3], kZero); + v[4] = _mm_cmplt_epi32(u[4], kZero); + v[5] = _mm_cmplt_epi32(u[5], kZero); + v[6] = _mm_cmplt_epi32(u[6], kZero); + v[7] = _mm_cmplt_epi32(u[7], kZero); + v[8] = _mm_cmplt_epi32(u[8], kZero); + v[9] = _mm_cmplt_epi32(u[9], kZero); v[10] = _mm_cmplt_epi32(u[10], kZero); v[11] = _mm_cmplt_epi32(u[11], kZero); v[12] = _mm_cmplt_epi32(u[12], kZero); @@ -2942,16 +2989,16 @@ void FDCT32x32_2D(const int16_t *input, v[14] = _mm_cmplt_epi32(u[14], kZero); v[15] = _mm_cmplt_epi32(u[15], kZero); - u[ 0] = _mm_sub_epi32(u[ 0], v[ 0]); - u[ 1] = _mm_sub_epi32(u[ 1], v[ 1]); - u[ 2] = _mm_sub_epi32(u[ 2], v[ 2]); - u[ 3] = _mm_sub_epi32(u[ 3], v[ 3]); - u[ 4] = _mm_sub_epi32(u[ 4], v[ 4]); - u[ 5] = _mm_sub_epi32(u[ 5], v[ 5]); - u[ 6] = _mm_sub_epi32(u[ 6], v[ 6]); - u[ 7] = _mm_sub_epi32(u[ 7], v[ 7]); - u[ 8] = _mm_sub_epi32(u[ 8], v[ 8]); - u[ 9] = _mm_sub_epi32(u[ 9], v[ 9]); + u[0] = _mm_sub_epi32(u[0], v[0]); + u[1] = _mm_sub_epi32(u[1], v[1]); + u[2] = _mm_sub_epi32(u[2], v[2]); + u[3] = _mm_sub_epi32(u[3], v[3]); + u[4] = _mm_sub_epi32(u[4], v[4]); + u[5] = _mm_sub_epi32(u[5], v[5]); + u[6] = _mm_sub_epi32(u[6], v[6]); + u[7] = _mm_sub_epi32(u[7], v[7]); + u[8] = _mm_sub_epi32(u[8], v[8]); + u[9] = _mm_sub_epi32(u[9], v[9]); u[10] = _mm_sub_epi32(u[10], v[10]); u[11] = _mm_sub_epi32(u[11], v[11]); u[12] = _mm_sub_epi32(u[12], v[12]); @@ -2993,18 +3040,18 @@ void FDCT32x32_2D(const int16_t *input, u[14] = _mm_srai_epi32(v[14], 2); u[15] = _mm_srai_epi32(v[15], 2); - out[ 5] = _mm_packs_epi32(u[0], u[1]); + out[5] = _mm_packs_epi32(u[0], u[1]); out[21] = _mm_packs_epi32(u[2], u[3]); out[13] = _mm_packs_epi32(u[4], u[5]); out[29] = _mm_packs_epi32(u[6], u[7]); - out[ 3] = _mm_packs_epi32(u[8], u[9]); + out[3] = _mm_packs_epi32(u[8], u[9]); out[19] = _mm_packs_epi32(u[10], u[11]); out[11] = _mm_packs_epi32(u[12], u[13]); out[27] = _mm_packs_epi32(u[14], u[15]); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&out[5], &out[21], &out[13], - &out[29], &out[3], &out[19], - &out[11], &out[27]); + overflow = + check_epi16_overflow_x8(&out[5], &out[21], &out[13], &out[29], + &out[3], &out[19], &out[11], &out[27]); if (overflow) { HIGH_FDCT32x32_2D_ROWS_C(intermediate, output_org); return; diff --git a/libs/libvpx/vpx_dsp/x86/fwd_txfm_avx2.c b/libs/libvpx/vpx_dsp/x86/fwd_txfm_avx2.c index 6d9da6aa89..21f11f0c3e 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_txfm_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/fwd_txfm_avx2.c @@ -13,11 +13,11 @@ #define FDCT32x32_2D_AVX2 vpx_fdct32x32_rd_avx2 #define FDCT32x32_HIGH_PRECISION 0 #include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h" -#undef FDCT32x32_2D_AVX2 -#undef FDCT32x32_HIGH_PRECISION +#undef FDCT32x32_2D_AVX2 +#undef FDCT32x32_HIGH_PRECISION #define FDCT32x32_2D_AVX2 vpx_fdct32x32_avx2 #define FDCT32x32_HIGH_PRECISION 1 -#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT -#undef FDCT32x32_2D_AVX2 -#undef FDCT32x32_HIGH_PRECISION +#include "vpx_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT +#undef FDCT32x32_2D_AVX2 +#undef FDCT32x32_HIGH_PRECISION diff --git a/libs/libvpx/vpx_dsp/x86/fwd_txfm_impl_sse2.h b/libs/libvpx/vpx_dsp/x86/fwd_txfm_impl_sse2.h index 69889e2e98..743e55e635 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_txfm_impl_sse2.h +++ b/libs/libvpx/vpx_dsp/x86/fwd_txfm_impl_sse2.h @@ -43,44 +43,36 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { // These are the coefficients used for the multiplies. // In the comments, pN means cos(N pi /64) and mN is -cos(N pi /64), // where cospi_N_64 = cos(N pi /64) - const __m128i k__cospi_A = octa_set_epi16(cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64); - const __m128i k__cospi_B = octa_set_epi16(cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64); - const __m128i k__cospi_C = octa_set_epi16(cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64, - cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64); - const __m128i k__cospi_D = octa_set_epi16(cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64, - cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64); - const __m128i k__cospi_E = octa_set_epi16(cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64, - cospi_16_64, cospi_16_64); - const __m128i k__cospi_F = octa_set_epi16(cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64, - cospi_16_64, -cospi_16_64); - const __m128i k__cospi_G = octa_set_epi16(cospi_8_64, cospi_24_64, - cospi_8_64, cospi_24_64, - -cospi_8_64, -cospi_24_64, - -cospi_8_64, -cospi_24_64); - const __m128i k__cospi_H = octa_set_epi16(cospi_24_64, -cospi_8_64, - cospi_24_64, -cospi_8_64, - -cospi_24_64, cospi_8_64, - -cospi_24_64, cospi_8_64); + const __m128i k__cospi_A = + octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, + cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); + const __m128i k__cospi_B = + octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, + cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); + const __m128i k__cospi_C = + octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, + cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); + const __m128i k__cospi_D = + octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, + cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); + const __m128i k__cospi_E = + octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, + cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); + const __m128i k__cospi_F = + octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, + cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); + const __m128i k__cospi_G = + octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, + -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64); + const __m128i k__cospi_H = + octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, + -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64); const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING); // This second rounding constant saves doing some extra adds at the end - const __m128i k__DCT_CONST_ROUNDING2 = _mm_set1_epi32(DCT_CONST_ROUNDING - +(DCT_CONST_ROUNDING << 1)); - const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2; + const __m128i k__DCT_CONST_ROUNDING2 = + _mm_set1_epi32(DCT_CONST_ROUNDING + (DCT_CONST_ROUNDING << 1)); + const int DCT_CONST_BITS2 = DCT_CONST_BITS + 2; const __m128i k__nonzero_bias_a = _mm_setr_epi16(0, 1, 1, 1, 1, 1, 1, 1); const __m128i k__nonzero_bias_b = _mm_setr_epi16(1, 0, 0, 0, 0, 0, 0, 0); __m128i in0, in1; @@ -90,14 +82,14 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { #endif // Load inputs. - in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); - in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) - (input + 2 * stride))); - in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) - (input + 3 * stride))); - // in0 = [i0 i1 i2 i3 iC iD iE iF] - // in1 = [i4 i5 i6 i7 i8 i9 iA iB] + in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); + in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); + in1 = _mm_unpacklo_epi64( + in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride))); + in0 = _mm_unpacklo_epi64( + in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride))); +// in0 = [i0 i1 i2 i3 iC iD iE iF] +// in1 = [i4 i5 i6 i7 i8 i9 iA iB] #if DCT_HIGH_BIT_DEPTH // Check inputs small enough to use optimised code cmp0 = _mm_xor_si128(_mm_cmpgt_epi16(in0, _mm_set1_epi16(0x3ff)), @@ -194,8 +186,8 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { const __m128i t0 = ADD_EPI16(in0, in1); const __m128i t1 = SUB_EPI16(in0, in1); - // t0 = [c0 c1 c8 c9 c4 c5 cC cD] - // t1 = [c3 c2 cB cA -c7 -c6 -cF -cE] +// t0 = [c0 c1 c8 c9 c4 c5 cC cD] +// t1 = [c3 c2 cB cA -c7 -c6 -cF -cE] #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&t0, &t1); if (overflow) { @@ -263,7 +255,6 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { storeu_output(&in1, output + 2 * 4); } - void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { int pass; // Constants @@ -283,14 +274,14 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { int overflow; #endif // Load input - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); - __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); // Pre-condition input (shift by two) in0 = _mm_slli_epi16(in0, 2); in1 = _mm_slli_epi16(in1, 2); @@ -319,8 +310,8 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { const __m128i q7 = SUB_EPI16(in0, in7); #if DCT_HIGH_BIT_DEPTH if (pass == 1) { - overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, - &q4, &q5, &q6, &q7); + overflow = + check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7); if (overflow) { vpx_highbd_fdct8x8_c(input, output, stride); return; @@ -630,22 +621,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { __m128i res08, res09, res10, res11, res12, res13, res14, res15; // Load and pre-condition input. if (0 == pass) { - in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); - in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); - in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); - in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); - in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); - in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); - in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); - in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); - in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); - in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); - in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); - in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); - in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); - in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); - in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); - in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); + in00 = _mm_load_si128((const __m128i *)(in + 0 * stride)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * stride)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * stride)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * stride)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * stride)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * stride)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * stride)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * stride)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * stride)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * stride)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * stride)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * stride)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * stride)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * stride)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * stride)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * stride)); // x = x << 2 in00 = _mm_slli_epi16(in00, 2); in01 = _mm_slli_epi16(in01, 2); @@ -664,22 +655,22 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { in14 = _mm_slli_epi16(in14, 2); in15 = _mm_slli_epi16(in15, 2); } else { - in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); - in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); - in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); - in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); - in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); - in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); - in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); - in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); - in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); - in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); - in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); - in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); - in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); - in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); - in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); - in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); + in00 = _mm_load_si128((const __m128i *)(in + 0 * 16)); + in01 = _mm_load_si128((const __m128i *)(in + 1 * 16)); + in02 = _mm_load_si128((const __m128i *)(in + 2 * 16)); + in03 = _mm_load_si128((const __m128i *)(in + 3 * 16)); + in04 = _mm_load_si128((const __m128i *)(in + 4 * 16)); + in05 = _mm_load_si128((const __m128i *)(in + 5 * 16)); + in06 = _mm_load_si128((const __m128i *)(in + 6 * 16)); + in07 = _mm_load_si128((const __m128i *)(in + 7 * 16)); + in08 = _mm_load_si128((const __m128i *)(in + 8 * 16)); + in09 = _mm_load_si128((const __m128i *)(in + 9 * 16)); + in10 = _mm_load_si128((const __m128i *)(in + 10 * 16)); + in11 = _mm_load_si128((const __m128i *)(in + 11 * 16)); + in12 = _mm_load_si128((const __m128i *)(in + 12 * 16)); + in13 = _mm_load_si128((const __m128i *)(in + 13 * 16)); + in14 = _mm_load_si128((const __m128i *)(in + 14 * 16)); + in15 = _mm_load_si128((const __m128i *)(in + 15 * 16)); // x = (x + 1) >> 2 in00 = _mm_add_epi16(in00, kOne); in01 = _mm_add_epi16(in01, kOne); @@ -745,10 +736,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { step1_6 = SUB_EPI16(in01, in14); step1_7 = SUB_EPI16(in00, in15); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1_0, &step1_1, - &step1_2, &step1_3, - &step1_4, &step1_5, - &step1_6, &step1_7); + overflow = + check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3, + &step1_4, &step1_5, &step1_6, &step1_7); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -767,8 +757,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { const __m128i q6 = SUB_EPI16(input1, input6); const __m128i q7 = SUB_EPI16(input0, input7); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, - &q4, &q5, &q6, &q7); + overflow = + check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -818,12 +808,12 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { // into 32 bits. const __m128i d0 = _mm_unpacklo_epi16(q6, q5); const __m128i d1 = _mm_unpackhi_epi16(q6, q5); - const __m128i r0 = mult_round_shift(&d0, &d1, &k__cospi_p16_m16, - &k__DCT_CONST_ROUNDING, - DCT_CONST_BITS); - const __m128i r1 = mult_round_shift(&d0, &d1, &k__cospi_p16_p16, - &k__DCT_CONST_ROUNDING, - DCT_CONST_BITS); + const __m128i r0 = + mult_round_shift(&d0, &d1, &k__cospi_p16_m16, + &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); + const __m128i r1 = + mult_round_shift(&d0, &d1, &k__cospi_p16_p16, + &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&r0, &r1); if (overflow) { @@ -860,8 +850,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { res06 = mult_round_shift(&t2, &t3, &k__cospi_m20_p12, &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&res02, &res14, - &res10, &res06); + overflow = + check_epi16_overflow_x4(&res02, &res14, &res10, &res06); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -888,8 +878,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { step2_4 = mult_round_shift(&t2, &t3, &k__cospi_p16_p16, &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, - &step2_4); + overflow = + check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -907,10 +897,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { step3_6 = ADD_EPI16(step1_6, step2_5); step3_7 = ADD_EPI16(step1_7, step2_4); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step3_0, &step3_1, - &step3_2, &step3_3, - &step3_4, &step3_5, - &step3_6, &step3_7); + overflow = + check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3, + &step3_4, &step3_5, &step3_6, &step3_7); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -932,8 +921,8 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { step2_5 = mult_round_shift(&t2, &t3, &k__cospi_p08_m24, &k__DCT_CONST_ROUNDING, DCT_CONST_BITS); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, - &step2_5); + overflow = + check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -951,10 +940,9 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { step1_6 = SUB_EPI16(step3_7, step2_6); step1_7 = ADD_EPI16(step3_7, step2_6); #if DCT_HIGH_BIT_DEPTH - overflow = check_epi16_overflow_x8(&step1_0, &step1_1, - &step1_2, &step1_3, - &step1_4, &step1_5, - &step1_6, &step1_7); + overflow = + check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3, + &step1_4, &step1_5, &step1_6, &step1_7); if (overflow) { vpx_highbd_fdct16x16_c(input, output, stride); return; @@ -1006,16 +994,14 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { } } // Transpose the results, do it as two 8x8 transposes. - transpose_and_output8x8(&res00, &res01, &res02, &res03, - &res04, &res05, &res06, &res07, - pass, out0, out1); - transpose_and_output8x8(&res08, &res09, &res10, &res11, - &res12, &res13, &res14, &res15, - pass, out0 + 8, out1 + 8); + transpose_and_output8x8(&res00, &res01, &res02, &res03, &res04, &res05, + &res06, &res07, pass, out0, out1); + transpose_and_output8x8(&res08, &res09, &res10, &res11, &res12, &res13, + &res14, &res15, pass, out0 + 8, out1 + 8); if (pass == 0) { - out0 += 8*16; + out0 += 8 * 16; } else { - out1 += 8*16; + out1 += 8 * 16; } } // Setup in/out for next pass. diff --git a/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.c b/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.c index bca72e8749..e14b99197f 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.c @@ -11,6 +11,7 @@ #include // SSE2 #include "./vpx_config.h" +#include "./vpx_dsp_rtcd.h" #include "vpx_dsp/vpx_dsp_common.h" #include "vpx_dsp/x86/fwd_txfm_sse2.h" @@ -18,12 +19,12 @@ void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { __m128i in0, in1; __m128i tmp; const __m128i zero = _mm_setzero_si128(); - in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); - in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); - in1 = _mm_unpacklo_epi64(in1, _mm_loadl_epi64((const __m128i *) - (input + 2 * stride))); - in0 = _mm_unpacklo_epi64(in0, _mm_loadl_epi64((const __m128i *) - (input + 3 * stride))); + in0 = _mm_loadl_epi64((const __m128i *)(input + 0 * stride)); + in1 = _mm_loadl_epi64((const __m128i *)(input + 1 * stride)); + in1 = _mm_unpacklo_epi64( + in1, _mm_loadl_epi64((const __m128i *)(input + 2 * stride))); + in0 = _mm_unpacklo_epi64( + in0, _mm_loadl_epi64((const __m128i *)(input + 3 * stride))); tmp = _mm_add_epi16(in0, in1); in0 = _mm_unpacklo_epi16(zero, tmp); @@ -40,23 +41,23 @@ void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { in1 = _mm_add_epi32(tmp, in0); in0 = _mm_slli_epi32(in1, 1); - store_output(&in0, output); + output[0] = (tran_low_t)_mm_cvtsi128_si32(in0); } void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { - __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); + __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); + __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); + __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); __m128i u0, u1, sum; u0 = _mm_add_epi16(in0, in1); u1 = _mm_add_epi16(in2, in3); - in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); + in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); + in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); + in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); sum = _mm_add_epi16(u0, u1); @@ -64,7 +65,7 @@ void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { in2 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, in0); - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); sum = _mm_add_epi16(sum, in2); in0 = _mm_unpacklo_epi16(u0, sum); @@ -80,7 +81,7 @@ void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { in0 = _mm_srli_si128(sum, 8); in1 = _mm_add_epi32(sum, in0); - store_output(&in1, output); + output[0] = (tran_low_t)_mm_cvtsi128_si32(in1); } void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, @@ -91,50 +92,50 @@ void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, int i; for (i = 0; i < 2; ++i) { - input += 8 * i; - in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); + in0 = _mm_load_si128((const __m128i *)(input + 0 * stride + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 0 * stride + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 1 * stride + 0)); + in3 = _mm_load_si128((const __m128i *)(input + 1 * stride + 8)); u0 = _mm_add_epi16(in0, in1); u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); + in0 = _mm_load_si128((const __m128i *)(input + 2 * stride + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 2 * stride + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 3 * stride + 0)); + in3 = _mm_load_si128((const __m128i *)(input + 3 * stride + 8)); sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 8 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 9 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 10 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 11 * stride)); + in0 = _mm_load_si128((const __m128i *)(input + 4 * stride + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 4 * stride + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 5 * stride + 0)); + in3 = _mm_load_si128((const __m128i *)(input + 5 * stride + 8)); sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 12 * stride)); - in1 = _mm_load_si128((const __m128i *)(input + 13 * stride)); - in2 = _mm_load_si128((const __m128i *)(input + 14 * stride)); - in3 = _mm_load_si128((const __m128i *)(input + 15 * stride)); + in0 = _mm_load_si128((const __m128i *)(input + 6 * stride + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 6 * stride + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 7 * stride + 0)); + in3 = _mm_load_si128((const __m128i *)(input + 7 * stride + 8)); sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); sum = _mm_add_epi16(sum, u1); + input += 8 * stride; } - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); in0 = _mm_unpacklo_epi16(u0, sum); in1 = _mm_unpackhi_epi16(u0, sum); in0 = _mm_srai_epi32(in0, 16); @@ -149,7 +150,7 @@ void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, in1 = _mm_add_epi32(sum, in0); in1 = _mm_srai_epi32(in1, 1); - store_output(&in1, output); + output[0] = (tran_low_t)_mm_cvtsi128_si32(in1); } void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, @@ -160,53 +161,53 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, int i; for (i = 0; i < 8; ++i) { - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); + in0 = _mm_load_si128((const __m128i *)(input + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 16)); + in3 = _mm_load_si128((const __m128i *)(input + 24)); input += stride; u0 = _mm_add_epi16(in0, in1); u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); + in0 = _mm_load_si128((const __m128i *)(input + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 16)); + in3 = _mm_load_si128((const __m128i *)(input + 24)); input += stride; sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); + in0 = _mm_load_si128((const __m128i *)(input + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 16)); + in3 = _mm_load_si128((const __m128i *)(input + 24)); input += stride; sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); - in0 = _mm_load_si128((const __m128i *)(input + 0)); - in1 = _mm_load_si128((const __m128i *)(input + 8)); - in2 = _mm_load_si128((const __m128i *)(input + 16)); - in3 = _mm_load_si128((const __m128i *)(input + 24)); + in0 = _mm_load_si128((const __m128i *)(input + 0)); + in1 = _mm_load_si128((const __m128i *)(input + 8)); + in2 = _mm_load_si128((const __m128i *)(input + 16)); + in3 = _mm_load_si128((const __m128i *)(input + 24)); input += stride; sum = _mm_add_epi16(sum, u1); - u0 = _mm_add_epi16(in0, in1); - u1 = _mm_add_epi16(in2, in3); + u0 = _mm_add_epi16(in0, in1); + u1 = _mm_add_epi16(in2, in3); sum = _mm_add_epi16(sum, u0); sum = _mm_add_epi16(sum, u1); } - u0 = _mm_setzero_si128(); + u0 = _mm_setzero_si128(); in0 = _mm_unpacklo_epi16(u0, sum); in1 = _mm_unpackhi_epi16(u0, sum); in0 = _mm_srai_epi32(in0, 16); @@ -221,7 +222,7 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, in1 = _mm_add_epi32(sum, in0); in1 = _mm_srai_epi32(in1, 3); - store_output(&in1, output); + output[0] = (tran_low_t)_mm_cvtsi128_si32(in1); } #define DCT_HIGH_BIT_DEPTH 0 @@ -229,43 +230,43 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, #define FDCT8x8_2D vpx_fdct8x8_sse2 #define FDCT16x16_2D vpx_fdct16x16_sse2 #include "vpx_dsp/x86/fwd_txfm_impl_sse2.h" -#undef FDCT4x4_2D -#undef FDCT8x8_2D -#undef FDCT16x16_2D +#undef FDCT4x4_2D +#undef FDCT8x8_2D +#undef FDCT16x16_2D #define FDCT32x32_2D vpx_fdct32x32_rd_sse2 #define FDCT32x32_HIGH_PRECISION 0 #include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION +#undef FDCT32x32_2D +#undef FDCT32x32_HIGH_PRECISION #define FDCT32x32_2D vpx_fdct32x32_sse2 #define FDCT32x32_HIGH_PRECISION 1 #include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION -#undef DCT_HIGH_BIT_DEPTH +#undef FDCT32x32_2D +#undef FDCT32x32_HIGH_PRECISION +#undef DCT_HIGH_BIT_DEPTH #if CONFIG_VP9_HIGHBITDEPTH #define DCT_HIGH_BIT_DEPTH 1 #define FDCT4x4_2D vpx_highbd_fdct4x4_sse2 #define FDCT8x8_2D vpx_highbd_fdct8x8_sse2 #define FDCT16x16_2D vpx_highbd_fdct16x16_sse2 -#include "vpx_dsp/x86/fwd_txfm_impl_sse2.h" // NOLINT -#undef FDCT4x4_2D -#undef FDCT8x8_2D -#undef FDCT16x16_2D +#include "vpx_dsp/x86/fwd_txfm_impl_sse2.h" // NOLINT +#undef FDCT4x4_2D +#undef FDCT8x8_2D +#undef FDCT16x16_2D #define FDCT32x32_2D vpx_highbd_fdct32x32_rd_sse2 #define FDCT32x32_HIGH_PRECISION 0 -#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION +#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT +#undef FDCT32x32_2D +#undef FDCT32x32_HIGH_PRECISION #define FDCT32x32_2D vpx_highbd_fdct32x32_sse2 #define FDCT32x32_HIGH_PRECISION 1 -#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT -#undef FDCT32x32_2D -#undef FDCT32x32_HIGH_PRECISION -#undef DCT_HIGH_BIT_DEPTH +#include "vpx_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT +#undef FDCT32x32_2D +#undef FDCT32x32_HIGH_PRECISION +#undef DCT_HIGH_BIT_DEPTH #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.h b/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.h index 94d5befbfe..5201e764c8 100644 --- a/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.h +++ b/libs/libvpx/vpx_dsp/x86/fwd_txfm_sse2.h @@ -63,99 +63,57 @@ static INLINE int check_epi16_overflow_x4(const __m128i *preg0, return _mm_movemask_epi8(cmp0); } -static INLINE int check_epi16_overflow_x8(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7) { +static INLINE int check_epi16_overflow_x8( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7) { int res0, res1; res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7); return res0 + res1; } -static INLINE int check_epi16_overflow_x12(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *preg8, - const __m128i *preg9, - const __m128i *preg10, - const __m128i *preg11) { +static INLINE int check_epi16_overflow_x12( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *preg8, + const __m128i *preg9, const __m128i *preg10, const __m128i *preg11) { int res0, res1; res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7); - if (!res0) - res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11); + if (!res0) res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11); return res0 + res1; } -static INLINE int check_epi16_overflow_x16(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *preg8, - const __m128i *preg9, - const __m128i *preg10, - const __m128i *preg11, - const __m128i *preg12, - const __m128i *preg13, - const __m128i *preg14, - const __m128i *preg15) { +static INLINE int check_epi16_overflow_x16( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *preg8, + const __m128i *preg9, const __m128i *preg10, const __m128i *preg11, + const __m128i *preg12, const __m128i *preg13, const __m128i *preg14, + const __m128i *preg15) { int res0, res1; res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7); if (!res0) { res0 = check_epi16_overflow_x4(preg8, preg9, preg10, preg11); - if (!res1) - res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15); + if (!res1) res1 = check_epi16_overflow_x4(preg12, preg13, preg14, preg15); } return res0 + res1; } -static INLINE int check_epi16_overflow_x32(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *preg8, - const __m128i *preg9, - const __m128i *preg10, - const __m128i *preg11, - const __m128i *preg12, - const __m128i *preg13, - const __m128i *preg14, - const __m128i *preg15, - const __m128i *preg16, - const __m128i *preg17, - const __m128i *preg18, - const __m128i *preg19, - const __m128i *preg20, - const __m128i *preg21, - const __m128i *preg22, - const __m128i *preg23, - const __m128i *preg24, - const __m128i *preg25, - const __m128i *preg26, - const __m128i *preg27, - const __m128i *preg28, - const __m128i *preg29, - const __m128i *preg30, - const __m128i *preg31) { +static INLINE int check_epi16_overflow_x32( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *preg8, + const __m128i *preg9, const __m128i *preg10, const __m128i *preg11, + const __m128i *preg12, const __m128i *preg13, const __m128i *preg14, + const __m128i *preg15, const __m128i *preg16, const __m128i *preg17, + const __m128i *preg18, const __m128i *preg19, const __m128i *preg20, + const __m128i *preg21, const __m128i *preg22, const __m128i *preg23, + const __m128i *preg24, const __m128i *preg25, const __m128i *preg26, + const __m128i *preg27, const __m128i *preg28, const __m128i *preg29, + const __m128i *preg30, const __m128i *preg31) { int res0, res1; res0 = check_epi16_overflow_x4(preg0, preg1, preg2, preg3); res1 = check_epi16_overflow_x4(preg4, preg5, preg6, preg7); @@ -190,36 +148,31 @@ static INLINE int k_check_epi32_overflow_4(const __m128i *preg0, __m128i reg1_shifted = _mm_slli_epi64(*preg1, 1); __m128i reg2_shifted = _mm_slli_epi64(*preg2, 1); __m128i reg3_shifted = _mm_slli_epi64(*preg3, 1); - __m128i reg0_top_dwords = _mm_shuffle_epi32( - reg0_shifted, _MM_SHUFFLE(0, 0, 3, 1)); - __m128i reg1_top_dwords = _mm_shuffle_epi32( - reg1_shifted, _MM_SHUFFLE(0, 0, 3, 1)); - __m128i reg2_top_dwords = _mm_shuffle_epi32( - reg2_shifted, _MM_SHUFFLE(0, 0, 3, 1)); - __m128i reg3_top_dwords = _mm_shuffle_epi32( - reg3_shifted, _MM_SHUFFLE(0, 0, 3, 1)); + __m128i reg0_top_dwords = + _mm_shuffle_epi32(reg0_shifted, _MM_SHUFFLE(0, 0, 3, 1)); + __m128i reg1_top_dwords = + _mm_shuffle_epi32(reg1_shifted, _MM_SHUFFLE(0, 0, 3, 1)); + __m128i reg2_top_dwords = + _mm_shuffle_epi32(reg2_shifted, _MM_SHUFFLE(0, 0, 3, 1)); + __m128i reg3_top_dwords = + _mm_shuffle_epi32(reg3_shifted, _MM_SHUFFLE(0, 0, 3, 1)); __m128i top_dwords_01 = _mm_unpacklo_epi64(reg0_top_dwords, reg1_top_dwords); __m128i top_dwords_23 = _mm_unpacklo_epi64(reg2_top_dwords, reg3_top_dwords); __m128i valid_positve_01 = _mm_cmpeq_epi32(top_dwords_01, *zero); __m128i valid_positve_23 = _mm_cmpeq_epi32(top_dwords_23, *zero); __m128i valid_negative_01 = _mm_cmpeq_epi32(top_dwords_01, minus_one); __m128i valid_negative_23 = _mm_cmpeq_epi32(top_dwords_23, minus_one); - int overflow_01 = _mm_movemask_epi8( - _mm_cmpeq_epi32(valid_positve_01, valid_negative_01)); - int overflow_23 = _mm_movemask_epi8( - _mm_cmpeq_epi32(valid_positve_23, valid_negative_23)); + int overflow_01 = + _mm_movemask_epi8(_mm_cmpeq_epi32(valid_positve_01, valid_negative_01)); + int overflow_23 = + _mm_movemask_epi8(_mm_cmpeq_epi32(valid_positve_23, valid_negative_23)); return (overflow_01 + overflow_23); } -static INLINE int k_check_epi32_overflow_8(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *zero) { +static INLINE int k_check_epi32_overflow_8( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *zero) { int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); if (!overflow) { overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); @@ -227,91 +180,59 @@ static INLINE int k_check_epi32_overflow_8(const __m128i *preg0, return overflow; } -static INLINE int k_check_epi32_overflow_16(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *preg8, - const __m128i *preg9, - const __m128i *preg10, - const __m128i *preg11, - const __m128i *preg12, - const __m128i *preg13, - const __m128i *preg14, - const __m128i *preg15, - const __m128i *zero) { - int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); - if (!overflow) { - overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); - if (!overflow) { - overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, - zero); - if (!overflow) { - overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, - zero); - } - } - } - return overflow; -} - -static INLINE int k_check_epi32_overflow_32(const __m128i *preg0, - const __m128i *preg1, - const __m128i *preg2, - const __m128i *preg3, - const __m128i *preg4, - const __m128i *preg5, - const __m128i *preg6, - const __m128i *preg7, - const __m128i *preg8, - const __m128i *preg9, - const __m128i *preg10, - const __m128i *preg11, - const __m128i *preg12, - const __m128i *preg13, - const __m128i *preg14, - const __m128i *preg15, - const __m128i *preg16, - const __m128i *preg17, - const __m128i *preg18, - const __m128i *preg19, - const __m128i *preg20, - const __m128i *preg21, - const __m128i *preg22, - const __m128i *preg23, - const __m128i *preg24, - const __m128i *preg25, - const __m128i *preg26, - const __m128i *preg27, - const __m128i *preg28, - const __m128i *preg29, - const __m128i *preg30, - const __m128i *preg31, - const __m128i *zero) { +static INLINE int k_check_epi32_overflow_16( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *preg8, + const __m128i *preg9, const __m128i *preg10, const __m128i *preg11, + const __m128i *preg12, const __m128i *preg13, const __m128i *preg14, + const __m128i *preg15, const __m128i *zero) { int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); if (!overflow) { overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); if (!overflow) { overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero); if (!overflow) { - overflow = k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, - zero); + overflow = + k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, zero); + } + } + } + return overflow; +} + +static INLINE int k_check_epi32_overflow_32( + const __m128i *preg0, const __m128i *preg1, const __m128i *preg2, + const __m128i *preg3, const __m128i *preg4, const __m128i *preg5, + const __m128i *preg6, const __m128i *preg7, const __m128i *preg8, + const __m128i *preg9, const __m128i *preg10, const __m128i *preg11, + const __m128i *preg12, const __m128i *preg13, const __m128i *preg14, + const __m128i *preg15, const __m128i *preg16, const __m128i *preg17, + const __m128i *preg18, const __m128i *preg19, const __m128i *preg20, + const __m128i *preg21, const __m128i *preg22, const __m128i *preg23, + const __m128i *preg24, const __m128i *preg25, const __m128i *preg26, + const __m128i *preg27, const __m128i *preg28, const __m128i *preg29, + const __m128i *preg30, const __m128i *preg31, const __m128i *zero) { + int overflow = k_check_epi32_overflow_4(preg0, preg1, preg2, preg3, zero); + if (!overflow) { + overflow = k_check_epi32_overflow_4(preg4, preg5, preg6, preg7, zero); + if (!overflow) { + overflow = k_check_epi32_overflow_4(preg8, preg9, preg10, preg11, zero); + if (!overflow) { + overflow = + k_check_epi32_overflow_4(preg12, preg13, preg14, preg15, zero); if (!overflow) { - overflow = k_check_epi32_overflow_4(preg16, preg17, preg18, preg19, - zero); + overflow = + k_check_epi32_overflow_4(preg16, preg17, preg18, preg19, zero); if (!overflow) { - overflow = k_check_epi32_overflow_4(preg20, preg21, - preg22, preg23, zero); + overflow = + k_check_epi32_overflow_4(preg20, preg21, preg22, preg23, zero); if (!overflow) { - overflow = k_check_epi32_overflow_4(preg24, preg25, - preg26, preg27, zero); + overflow = k_check_epi32_overflow_4(preg24, preg25, preg26, + preg27, zero); if (!overflow) { - overflow = k_check_epi32_overflow_4(preg28, preg29, - preg30, preg31, zero); + overflow = k_check_epi32_overflow_4(preg28, preg29, preg30, + preg31, zero); } } } @@ -322,7 +243,7 @@ static INLINE int k_check_epi32_overflow_32(const __m128i *preg0, return overflow; } -static INLINE void store_output(const __m128i *poutput, tran_low_t* dst_ptr) { +static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) { #if CONFIG_VP9_HIGHBITDEPTH const __m128i zero = _mm_setzero_si128(); const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero); @@ -335,7 +256,7 @@ static INLINE void store_output(const __m128i *poutput, tran_low_t* dst_ptr) { #endif // CONFIG_VP9_HIGHBITDEPTH } -static INLINE void storeu_output(const __m128i *poutput, tran_low_t* dst_ptr) { +static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) { #if CONFIG_VP9_HIGHBITDEPTH const __m128i zero = _mm_setzero_si128(); const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero); @@ -348,9 +269,7 @@ static INLINE void storeu_output(const __m128i *poutput, tran_low_t* dst_ptr) { #endif // CONFIG_VP9_HIGHBITDEPTH } - -static INLINE __m128i mult_round_shift(const __m128i *pin0, - const __m128i *pin1, +static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1, const __m128i *pmultiplier, const __m128i *prounding, const int shift) { @@ -364,12 +283,10 @@ static INLINE __m128i mult_round_shift(const __m128i *pin0, } static INLINE void transpose_and_output8x8( - const __m128i *pin00, const __m128i *pin01, - const __m128i *pin02, const __m128i *pin03, - const __m128i *pin04, const __m128i *pin05, - const __m128i *pin06, const __m128i *pin07, - const int pass, int16_t* out0_ptr, - tran_low_t* out1_ptr) { + const __m128i *pin00, const __m128i *pin01, const __m128i *pin02, + const __m128i *pin03, const __m128i *pin04, const __m128i *pin05, + const __m128i *pin06, const __m128i *pin07, const int pass, + int16_t *out0_ptr, tran_low_t *out1_ptr) { // 00 01 02 03 04 05 06 07 // 10 11 12 13 14 15 16 17 // 20 21 22 23 24 25 26 27 @@ -427,14 +344,14 @@ static INLINE void transpose_and_output8x8( // 06 16 26 36 46 56 66 76 // 07 17 27 37 47 57 67 77 if (pass == 0) { - _mm_storeu_si128((__m128i*)(out0_ptr + 0 * 16), tr2_0); - _mm_storeu_si128((__m128i*)(out0_ptr + 1 * 16), tr2_1); - _mm_storeu_si128((__m128i*)(out0_ptr + 2 * 16), tr2_2); - _mm_storeu_si128((__m128i*)(out0_ptr + 3 * 16), tr2_3); - _mm_storeu_si128((__m128i*)(out0_ptr + 4 * 16), tr2_4); - _mm_storeu_si128((__m128i*)(out0_ptr + 5 * 16), tr2_5); - _mm_storeu_si128((__m128i*)(out0_ptr + 6 * 16), tr2_6); - _mm_storeu_si128((__m128i*)(out0_ptr + 7 * 16), tr2_7); + _mm_storeu_si128((__m128i *)(out0_ptr + 0 * 16), tr2_0); + _mm_storeu_si128((__m128i *)(out0_ptr + 1 * 16), tr2_1); + _mm_storeu_si128((__m128i *)(out0_ptr + 2 * 16), tr2_2); + _mm_storeu_si128((__m128i *)(out0_ptr + 3 * 16), tr2_3); + _mm_storeu_si128((__m128i *)(out0_ptr + 4 * 16), tr2_4); + _mm_storeu_si128((__m128i *)(out0_ptr + 5 * 16), tr2_5); + _mm_storeu_si128((__m128i *)(out0_ptr + 6 * 16), tr2_6); + _mm_storeu_si128((__m128i *)(out0_ptr + 7 * 16), tr2_7); } else { storeu_output(&tr2_0, (out1_ptr + 0 * 16)); storeu_output(&tr2_1, (out1_ptr + 1 * 16)); diff --git a/libs/libvpx/vpx_dsp/x86/halfpix_variance_impl_sse2.asm b/libs/libvpx/vpx_dsp/x86/halfpix_variance_impl_sse2.asm deleted file mode 100644 index cc26bb6124..0000000000 --- a/libs/libvpx/vpx_dsp/x86/halfpix_variance_impl_sse2.asm +++ /dev/null @@ -1,346 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - -%include "vpx_ports/x86_abi_support.asm" - -;void vpx_half_horiz_vert_variance16x_h_sse2(unsigned char *ref, -; int ref_stride, -; unsigned char *src, -; int src_stride, -; unsigned int height, -; int *sum, -; unsigned int *sumsquared) -global sym(vpx_half_horiz_vert_variance16x_h_sse2) PRIVATE -sym(vpx_half_horiz_vert_variance16x_h_sse2): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - pxor xmm6, xmm6 ; error accumulator - pxor xmm7, xmm7 ; sse eaccumulator - mov rsi, arg(0) ;ref - - mov rdi, arg(2) ;src - movsxd rcx, dword ptr arg(4) ;height - movsxd rax, dword ptr arg(1) ;ref_stride - movsxd rdx, dword ptr arg(3) ;src_stride - - pxor xmm0, xmm0 ; - - movdqu xmm5, XMMWORD PTR [rsi] - movdqu xmm3, XMMWORD PTR [rsi+1] - pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) horizontal line 1 - - lea rsi, [rsi + rax] - -vpx_half_horiz_vert_variance16x_h_1: - movdqu xmm1, XMMWORD PTR [rsi] ; - movdqu xmm2, XMMWORD PTR [rsi+1] ; - pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1 - - pavgb xmm5, xmm1 ; xmm = vertical average of the above - - movdqa xmm4, xmm5 - punpcklbw xmm5, xmm0 ; xmm5 = words of above - punpckhbw xmm4, xmm0 - - movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7 - punpcklbw xmm3, xmm0 ; xmm3 = words of above - psubw xmm5, xmm3 ; xmm5 -= xmm3 - - movq xmm3, QWORD PTR [rdi+8] - punpcklbw xmm3, xmm0 - psubw xmm4, xmm3 - - paddw xmm6, xmm5 ; xmm6 += accumulated column differences - paddw xmm6, xmm4 - pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 - pmaddwd xmm4, xmm4 - paddd xmm7, xmm5 ; xmm7 += accumulated square column differences - paddd xmm7, xmm4 - - movdqa xmm5, xmm1 ; save xmm1 for use on the next row - - lea rsi, [rsi + rax] - lea rdi, [rdi + rdx] - - sub rcx, 1 ; - jnz vpx_half_horiz_vert_variance16x_h_1 ; - - pxor xmm1, xmm1 - pxor xmm5, xmm5 - - punpcklwd xmm0, xmm6 - punpckhwd xmm1, xmm6 - psrad xmm0, 16 - psrad xmm1, 16 - paddd xmm0, xmm1 - movdqa xmm1, xmm0 - - movdqa xmm6, xmm7 - punpckldq xmm6, xmm5 - punpckhdq xmm7, xmm5 - paddd xmm6, xmm7 - - punpckldq xmm0, xmm5 - punpckhdq xmm1, xmm5 - paddd xmm0, xmm1 - - movdqa xmm7, xmm6 - movdqa xmm1, xmm0 - - psrldq xmm7, 8 - psrldq xmm1, 8 - - paddd xmm6, xmm7 - paddd xmm0, xmm1 - - mov rsi, arg(5) ;[Sum] - mov rdi, arg(6) ;[SSE] - - movd [rsi], xmm0 - movd [rdi], xmm6 - - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret - - -;void vpx_half_vert_variance16x_h_sse2(unsigned char *ref, -; int ref_stride, -; unsigned char *src, -; int src_stride, -; unsigned int height, -; int *sum, -; unsigned int *sumsquared) -global sym(vpx_half_vert_variance16x_h_sse2) PRIVATE -sym(vpx_half_vert_variance16x_h_sse2): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - pxor xmm6, xmm6 ; error accumulator - pxor xmm7, xmm7 ; sse eaccumulator - mov rsi, arg(0) ;ref - - mov rdi, arg(2) ;src - movsxd rcx, dword ptr arg(4) ;height - movsxd rax, dword ptr arg(1) ;ref_stride - movsxd rdx, dword ptr arg(3) ;src_stride - - movdqu xmm5, XMMWORD PTR [rsi] - lea rsi, [rsi + rax ] - pxor xmm0, xmm0 - -vpx_half_vert_variance16x_h_1: - movdqu xmm3, XMMWORD PTR [rsi] - - pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) - movdqa xmm4, xmm5 - punpcklbw xmm5, xmm0 - punpckhbw xmm4, xmm0 - - movq xmm2, QWORD PTR [rdi] - punpcklbw xmm2, xmm0 - psubw xmm5, xmm2 - movq xmm2, QWORD PTR [rdi+8] - punpcklbw xmm2, xmm0 - psubw xmm4, xmm2 - - paddw xmm6, xmm5 ; xmm6 += accumulated column differences - paddw xmm6, xmm4 - pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 - pmaddwd xmm4, xmm4 - paddd xmm7, xmm5 ; xmm7 += accumulated square column differences - paddd xmm7, xmm4 - - movdqa xmm5, xmm3 - - lea rsi, [rsi + rax] - lea rdi, [rdi + rdx] - - sub rcx, 1 - jnz vpx_half_vert_variance16x_h_1 - - pxor xmm1, xmm1 - pxor xmm5, xmm5 - - punpcklwd xmm0, xmm6 - punpckhwd xmm1, xmm6 - psrad xmm0, 16 - psrad xmm1, 16 - paddd xmm0, xmm1 - movdqa xmm1, xmm0 - - movdqa xmm6, xmm7 - punpckldq xmm6, xmm5 - punpckhdq xmm7, xmm5 - paddd xmm6, xmm7 - - punpckldq xmm0, xmm5 - punpckhdq xmm1, xmm5 - paddd xmm0, xmm1 - - movdqa xmm7, xmm6 - movdqa xmm1, xmm0 - - psrldq xmm7, 8 - psrldq xmm1, 8 - - paddd xmm6, xmm7 - paddd xmm0, xmm1 - - mov rsi, arg(5) ;[Sum] - mov rdi, arg(6) ;[SSE] - - movd [rsi], xmm0 - movd [rdi], xmm6 - - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret - - -;void vpx_half_horiz_variance16x_h_sse2(unsigned char *ref, -; int ref_stride -; unsigned char *src, -; int src_stride, -; unsigned int height, -; int *sum, -; unsigned int *sumsquared) -global sym(vpx_half_horiz_variance16x_h_sse2) PRIVATE -sym(vpx_half_horiz_variance16x_h_sse2): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - SAVE_XMM 7 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - pxor xmm6, xmm6 ; error accumulator - pxor xmm7, xmm7 ; sse eaccumulator - mov rsi, arg(0) ;ref - - mov rdi, arg(2) ;src - movsxd rcx, dword ptr arg(4) ;height - movsxd rax, dword ptr arg(1) ;ref_stride - movsxd rdx, dword ptr arg(3) ;src_stride - - pxor xmm0, xmm0 ; - -vpx_half_horiz_variance16x_h_1: - movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15 - movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16 - - pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) - movdqa xmm1, xmm5 - punpcklbw xmm5, xmm0 ; xmm5 = words of above - punpckhbw xmm1, xmm0 - - movq xmm3, QWORD PTR [rdi] ; xmm3 = d0,d1,d2..d7 - punpcklbw xmm3, xmm0 ; xmm3 = words of above - movq xmm2, QWORD PTR [rdi+8] - punpcklbw xmm2, xmm0 - - psubw xmm5, xmm3 ; xmm5 -= xmm3 - psubw xmm1, xmm2 - paddw xmm6, xmm5 ; xmm6 += accumulated column differences - paddw xmm6, xmm1 - pmaddwd xmm5, xmm5 ; xmm5 *= xmm5 - pmaddwd xmm1, xmm1 - paddd xmm7, xmm5 ; xmm7 += accumulated square column differences - paddd xmm7, xmm1 - - lea rsi, [rsi + rax] - lea rdi, [rdi + rdx] - - sub rcx, 1 ; - jnz vpx_half_horiz_variance16x_h_1 ; - - pxor xmm1, xmm1 - pxor xmm5, xmm5 - - punpcklwd xmm0, xmm6 - punpckhwd xmm1, xmm6 - psrad xmm0, 16 - psrad xmm1, 16 - paddd xmm0, xmm1 - movdqa xmm1, xmm0 - - movdqa xmm6, xmm7 - punpckldq xmm6, xmm5 - punpckhdq xmm7, xmm5 - paddd xmm6, xmm7 - - punpckldq xmm0, xmm5 - punpckhdq xmm1, xmm5 - paddd xmm0, xmm1 - - movdqa xmm7, xmm6 - movdqa xmm1, xmm0 - - psrldq xmm7, 8 - psrldq xmm1, 8 - - paddd xmm6, xmm7 - paddd xmm0, xmm1 - - mov rsi, arg(5) ;[Sum] - mov rdi, arg(6) ;[SSE] - - movd [rsi], xmm0 - movd [rdi], xmm6 - - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - RESTORE_XMM - UNSHADOW_ARGS - pop rbp - ret - -SECTION_RODATA -; short xmm_bi_rd[8] = { 64, 64, 64, 64,64, 64, 64, 64}; -align 16 -xmm_bi_rd: - times 8 dw 64 -align 16 -vpx_bilinear_filters_sse2: - dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0 - dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16 - dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32 - dw 80, 80, 80, 80, 80, 80, 80, 80, 48, 48, 48, 48, 48, 48, 48, 48 - dw 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 - dw 48, 48, 48, 48, 48, 48, 48, 48, 80, 80, 80, 80, 80, 80, 80, 80 - dw 32, 32, 32, 32, 32, 32, 32, 32, 96, 96, 96, 96, 96, 96, 96, 96 - dw 16, 16, 16, 16, 16, 16, 16, 16, 112, 112, 112, 112, 112, 112, 112, 112 diff --git a/libs/libvpx/vpx_dsp/x86/halfpix_variance_sse2.c b/libs/libvpx/vpx_dsp/x86/halfpix_variance_sse2.c deleted file mode 100644 index 5782155bf8..0000000000 --- a/libs/libvpx/vpx_dsp/x86/halfpix_variance_sse2.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "vpx/vpx_integer.h" - -void vpx_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref, - int ref_stride, - const unsigned char *src, - int src_stride, - unsigned int height, - int *sum, - unsigned int *sumsquared); -void vpx_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride, - const unsigned char *src, int src_stride, - unsigned int height, int *sum, - unsigned int *sumsquared); -void vpx_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride, - const unsigned char *src, int src_stride, - unsigned int height, int *sum, - unsigned int *sumsquared); - -uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src, - int src_stride, - const unsigned char *dst, - int dst_stride, - uint32_t *sse) { - int xsum0; - unsigned int xxsum0; - - vpx_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, - &xsum0, &xxsum0); - - *sse = xxsum0; - return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); -} - -uint32_t vpx_variance_halfpixvar16x16_v_sse2(const unsigned char *src, - int src_stride, - const unsigned char *dst, - int dst_stride, - uint32_t *sse) { - int xsum0; - unsigned int xxsum0; - vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, - &xsum0, &xxsum0); - - *sse = xxsum0; - return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); -} - - -uint32_t vpx_variance_halfpixvar16x16_hv_sse2(const unsigned char *src, - int src_stride, - const unsigned char *dst, - int dst_stride, - uint32_t *sse) { - int xsum0; - unsigned int xxsum0; - - vpx_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, - &xsum0, &xxsum0); - - *sse = xxsum0; - return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); -} diff --git a/libs/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c b/libs/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c index c4fd5e1a02..7d66411080 100644 --- a/libs/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/highbd_loopfilter_sse2.c @@ -25,16 +25,13 @@ static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) { if (bd == 8) { t80 = _mm_set1_epi16(0x80); - max = _mm_subs_epi16( - _mm_subs_epi16(_mm_slli_epi16(one, 8), one), t80); + max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 8), one), t80); } else if (bd == 10) { t80 = _mm_set1_epi16(0x200); - max = _mm_subs_epi16( - _mm_subs_epi16(_mm_slli_epi16(one, 10), one), t80); + max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 10), one), t80); } else { // bd == 12 t80 = _mm_set1_epi16(0x800); - max = _mm_subs_epi16( - _mm_subs_epi16(_mm_slli_epi16(one, 12), one), t80); + max = _mm_subs_epi16(_mm_subs_epi16(_mm_slli_epi16(one, 12), one), t80); } min = _mm_subs_epi16(zero, t80); @@ -51,12 +48,10 @@ static INLINE __m128i signed_char_clamp_bd_sse2(__m128i value, int bd) { // TODO(debargha, peter): Break up large functions into smaller ones // in this file. -static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, - int p, - const uint8_t *_blimit, - const uint8_t *_limit, - const uint8_t *_thresh, - int bd) { +void vpx_highbd_lpf_horizontal_edge_8_sse2(uint16_t *s, int p, + const uint8_t *_blimit, + const uint8_t *_limit, + const uint8_t *_thresh, int bd) { const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi16(1); __m128i blimit, limit, thresh; @@ -83,16 +78,16 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, blimit = _mm_slli_epi16( _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); limit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); thresh = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); } else { // bd == 12 blimit = _mm_slli_epi16( _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); limit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); thresh = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); } q4 = _mm_load_si128((__m128i *)(s + 4 * p)); @@ -120,25 +115,22 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, hev = _mm_subs_epu16(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); - abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); // abs(p0 - q0) * 2 - abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // abs(p1 - q1) / 2 + abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0); // abs(p0 - q0) * 2 + abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); // abs(p1 - q1) / 2 mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); mask = _mm_and_si128(mask, _mm_adds_epu16(limit, one)); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p1, p0), - _mm_subs_epu16(p0, p1)), - _mm_or_si128(_mm_subs_epu16(q1, q0), - _mm_subs_epu16(q0, q1))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)), + _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1))); mask = _mm_max_epi16(work, mask); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), - _mm_subs_epu16(p1, p2)), - _mm_or_si128(_mm_subs_epu16(q2, q1), - _mm_subs_epu16(q1, q2))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)), + _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2))); mask = _mm_max_epi16(work, mask); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2), - _mm_subs_epu16(p2, p3)), - _mm_or_si128(_mm_subs_epu16(q3, q2), - _mm_subs_epu16(q2, q3))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)), + _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3))); mask = _mm_max_epi16(work, mask); mask = _mm_subs_epu16(mask, limit); @@ -162,8 +154,8 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, ps0 = _mm_subs_epi16(p0, t80); qs0 = _mm_subs_epi16(q0, t80); - filt = _mm_and_si128( - signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd), hev); + filt = _mm_and_si128(signed_char_clamp_bd_sse2(_mm_subs_epi16(ps1, qs1), bd), + hev); work_a = _mm_subs_epi16(qs0, ps0); filt = _mm_adds_epi16(filt, work_a); filt = _mm_adds_epi16(filt, work_a); @@ -177,33 +169,27 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, filter2 = _mm_srai_epi16(filter2, 0x3); qs0 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), - t80); + signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80); ps0 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), - t80); + signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80); filt = _mm_adds_epi16(filter1, t1); filt = _mm_srai_epi16(filt, 1); filt = _mm_andnot_si128(hev, filt); - qs1 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), - t80); - ps1 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), - t80); + qs1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), + t80); + ps1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), + t80); // end highbd_filter4 // loopfilter done // highbd_flat_mask4 - flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0), - _mm_subs_epu16(p0, p2)), - _mm_or_si128(_mm_subs_epu16(p3, p0), - _mm_subs_epu16(p0, p3))); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q0), - _mm_subs_epu16(q0, q2)), - _mm_or_si128(_mm_subs_epu16(q3, q0), - _mm_subs_epu16(q0, q3))); + flat = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p2, p0), _mm_subs_epu16(p0, p2)), + _mm_or_si128(_mm_subs_epu16(p3, p0), _mm_subs_epu16(p0, p3))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(q2, q0), _mm_subs_epu16(q0, q2)), + _mm_or_si128(_mm_subs_epu16(q3, q0), _mm_subs_epu16(q0, q3))); flat = _mm_max_epi16(work, flat); work = _mm_max_epi16(abs_p1p0, abs_q1q0); flat = _mm_max_epi16(work, flat); @@ -231,27 +217,23 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, // highbd_flat_mask5 (arguments passed in are p0, q0, p4-p7, q4-q7 // but referred to as p0-p4 & q0-q4 in fn) - flat2 = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p4, p0), - _mm_subs_epu16(p0, p4)), - _mm_or_si128(_mm_subs_epu16(q4, q0), - _mm_subs_epu16(q0, q4))); + flat2 = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p4, p0), _mm_subs_epu16(p0, p4)), + _mm_or_si128(_mm_subs_epu16(q4, q0), _mm_subs_epu16(q0, q4))); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p5, p0), - _mm_subs_epu16(p0, p5)), - _mm_or_si128(_mm_subs_epu16(q5, q0), - _mm_subs_epu16(q0, q5))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p5, p0), _mm_subs_epu16(p0, p5)), + _mm_or_si128(_mm_subs_epu16(q5, q0), _mm_subs_epu16(q0, q5))); flat2 = _mm_max_epi16(work, flat2); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p6, p0), - _mm_subs_epu16(p0, p6)), - _mm_or_si128(_mm_subs_epu16(q6, q0), - _mm_subs_epu16(q0, q6))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p6, p0), _mm_subs_epu16(p0, p6)), + _mm_or_si128(_mm_subs_epu16(q6, q0), _mm_subs_epu16(q0, q6))); flat2 = _mm_max_epi16(work, flat2); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p7, p0), - _mm_subs_epu16(p0, p7)), - _mm_or_si128(_mm_subs_epu16(q7, q0), - _mm_subs_epu16(q0, q7))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p7, p0), _mm_subs_epu16(p0, p7)), + _mm_or_si128(_mm_subs_epu16(q7, q0), _mm_subs_epu16(q0, q7))); flat2 = _mm_max_epi16(work, flat2); if (bd == 8) @@ -270,29 +252,26 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, eight = _mm_set1_epi16(8); four = _mm_set1_epi16(4); - pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5), - _mm_add_epi16(p4, p3)); - pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5), - _mm_add_epi16(q4, q3)); + pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6, p5), _mm_add_epi16(p4, p3)); + pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6, q5), _mm_add_epi16(q4, q3)); pixetFilter_p2p1p0 = _mm_add_epi16(p0, _mm_add_epi16(p2, p1)); pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); pixetFilter_q2q1q0 = _mm_add_epi16(q0, _mm_add_epi16(q2, q1)); pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); - pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, - pixelFilter_q)); - pixetFilter_p2p1p0 = _mm_add_epi16(four, - _mm_add_epi16(pixetFilter_p2p1p0, - pixetFilter_q2q1q0)); - flat2_p0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(p7, p0)), 4); - flat2_q0 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(q7, q0)), 4); - flat_p0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(p3, p0)), 3); - flat_q0 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(q3, q0)), 3); + pixelFilter_p = + _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q)); + pixetFilter_p2p1p0 = _mm_add_epi16( + four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); + flat2_p0 = + _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7, p0)), 4); + flat2_q0 = + _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7, q0)), 4); + flat_p0 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3, p0)), 3); + flat_q0 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3, q0)), 3); sum_p7 = _mm_add_epi16(p7, p7); sum_q7 = _mm_add_epi16(q7, q7); @@ -308,10 +287,10 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2); pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2); - flat_p1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p1)), 3); - flat_q1 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q1)), 3); + flat_p1 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1)), 3); + flat_q1 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1)), 3); sum_p7 = _mm_add_epi16(sum_p7, p7); sum_q7 = _mm_add_epi16(sum_q7, q7); @@ -320,53 +299,53 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5); - flat2_p2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p2)), 4); - flat2_q2 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q2)), 4); + flat2_p2 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2)), 4); + flat2_q2 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2)), 4); pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1); pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1); - flat_p2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p2)), 3); - flat_q2 = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q2)), 3); + flat_p2 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2)), 3); + flat_q2 = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2)), 3); sum_p7 = _mm_add_epi16(sum_p7, p7); sum_q7 = _mm_add_epi16(sum_q7, q7); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4); - flat2_p3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p3)), 4); - flat2_q3 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q3)), 4); + flat2_p3 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3)), 4); + flat2_q3 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3)), 4); sum_p7 = _mm_add_epi16(sum_p7, p7); sum_q7 = _mm_add_epi16(sum_q7, q7); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3); - flat2_p4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p4)), 4); - flat2_q4 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q4)), 4); + flat2_p4 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4)), 4); + flat2_q4 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4)), 4); sum_p7 = _mm_add_epi16(sum_p7, p7); sum_q7 = _mm_add_epi16(sum_q7, q7); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2); - flat2_p5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p5)), 4); - flat2_q5 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q5)), 4); + flat2_p5 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5)), 4); + flat2_q5 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5)), 4); sum_p7 = _mm_add_epi16(sum_p7, p7); sum_q7 = _mm_add_epi16(sum_q7, q7); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1); - flat2_p6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p6)), 4); - flat2_q6 = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q6)), 4); + flat2_p6 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6)), 4); + flat2_q6 = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6)), 4); // wide flat // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -496,34 +475,18 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_8(uint16_t *s, _mm_store_si128((__m128i *)(s - 0 * p), q0); } -static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s, - int p, - const uint8_t *_blimit, - const uint8_t *_limit, - const uint8_t *_thresh, - int bd) { - highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd); - highbd_mb_lpf_horizontal_edge_w_sse2_8(s + 8, p, _blimit, _limit, _thresh, - bd); -} - -// TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. -void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p, - const uint8_t *_blimit, - const uint8_t *_limit, - const uint8_t *_thresh, - int count, int bd) { - if (count == 1) - highbd_mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh, bd); - else - highbd_mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh, bd); +void vpx_highbd_lpf_horizontal_edge_16_sse2(uint16_t *s, int p, + const uint8_t *_blimit, + const uint8_t *_limit, + const uint8_t *_thresh, int bd) { + vpx_highbd_lpf_horizontal_edge_8_sse2(s, p, _blimit, _limit, _thresh, bd); + vpx_highbd_lpf_horizontal_edge_8_sse2(s + 8, p, _blimit, _limit, _thresh, bd); } void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, - const uint8_t *_thresh, - int count, int bd) { + const uint8_t *_thresh, int bd) { DECLARE_ALIGNED(16, uint16_t, flat_op2[16]); DECLARE_ALIGNED(16, uint16_t, flat_op1[16]); DECLARE_ALIGNED(16, uint16_t, flat_op0[16]); @@ -556,8 +519,6 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, __m128i work_a; __m128i filter1, filter2; - (void)count; - if (bd == 8) { blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero); limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero); @@ -565,19 +526,19 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, t80 = _mm_set1_epi16(0x80); } else if (bd == 10) { blimit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 2); limit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 2); thresh = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 2); t80 = _mm_set1_epi16(0x200); } else { // bd == 12 blimit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero), 4); limit = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero), 4); thresh = _mm_slli_epi16( - _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); + _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_thresh), zero), 4); t80 = _mm_set1_epi16(0x800); } @@ -587,20 +548,16 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, qs1 = _mm_subs_epi16(q1, t80); // filter_mask and hev_mask - abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), - _mm_subs_epu16(p0, p1)); - abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), - _mm_subs_epu16(q0, q1)); + abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)); + abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)); - abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), - _mm_subs_epu16(q0, p0)); - abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), - _mm_subs_epu16(q1, p1)); + abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0)); + abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1)); flat = _mm_max_epi16(abs_p1p0, abs_q1q0); hev = _mm_subs_epu16(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); - abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); @@ -612,28 +569,24 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, mask = _mm_max_epi16(abs_q1q0, mask); // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), - _mm_subs_epu16(p1, p2)), - _mm_or_si128(_mm_subs_epu16(q2, q1), - _mm_subs_epu16(q1, q2))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)), + _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2))); mask = _mm_max_epi16(work, mask); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p2), - _mm_subs_epu16(p2, p3)), - _mm_or_si128(_mm_subs_epu16(q3, q2), - _mm_subs_epu16(q2, q3))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3)), + _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3))); mask = _mm_max_epi16(work, mask); mask = _mm_subs_epu16(mask, limit); mask = _mm_cmpeq_epi16(mask, zero); // flat_mask4 - flat = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p0), - _mm_subs_epu16(p0, p2)), - _mm_or_si128(_mm_subs_epu16(q2, q0), - _mm_subs_epu16(q0, q2))); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p3, p0), - _mm_subs_epu16(p0, p3)), - _mm_or_si128(_mm_subs_epu16(q3, q0), - _mm_subs_epu16(q0, q3))); + flat = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p2, p0), _mm_subs_epu16(p0, p2)), + _mm_or_si128(_mm_subs_epu16(q2, q0), _mm_subs_epu16(q0, q2))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p3, p0), _mm_subs_epu16(p0, p3)), + _mm_or_si128(_mm_subs_epu16(q3, q0), _mm_subs_epu16(q0, q3))); flat = _mm_max_epi16(work, flat); flat = _mm_max_epi16(abs_p1p0, flat); flat = _mm_max_epi16(abs_q1q0, flat); @@ -756,24 +709,18 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, _mm_store_si128((__m128i *)(s + 2 * p), q2); } -void vpx_highbd_lpf_horizontal_8_dual_sse2(uint16_t *s, int p, - const uint8_t *_blimit0, - const uint8_t *_limit0, - const uint8_t *_thresh0, - const uint8_t *_blimit1, - const uint8_t *_limit1, - const uint8_t *_thresh1, - int bd) { - vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); - vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, - 1, bd); +void vpx_highbd_lpf_horizontal_8_dual_sse2( + uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, + const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1, + const uint8_t *_thresh1, int bd) { + vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, bd); + vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd); } void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, - const uint8_t *_thresh, - int count, int bd) { + const uint8_t *_thresh, int bd) { const __m128i zero = _mm_set1_epi16(0); __m128i blimit, limit, thresh; __m128i mask, hev, flat; @@ -785,16 +732,16 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, __m128i q1 = _mm_loadu_si128((__m128i *)(s + 1 * p)); __m128i q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); __m128i q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); - const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu16(p1, p0), - _mm_subs_epu16(p0, p1)); - const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu16(q1, q0), - _mm_subs_epu16(q0, q1)); + const __m128i abs_p1p0 = + _mm_or_si128(_mm_subs_epu16(p1, p0), _mm_subs_epu16(p0, p1)); + const __m128i abs_q1q0 = + _mm_or_si128(_mm_subs_epu16(q1, q0), _mm_subs_epu16(q0, q1)); const __m128i ffff = _mm_cmpeq_epi16(abs_p1p0, abs_p1p0); const __m128i one = _mm_set1_epi16(1); - __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu16(p0, q0), - _mm_subs_epu16(q0, p0)); - __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu16(p1, q1), - _mm_subs_epu16(q1, p1)); + __m128i abs_p0q0 = + _mm_or_si128(_mm_subs_epu16(p0, q0), _mm_subs_epu16(q0, p0)); + __m128i abs_p1q1 = + _mm_or_si128(_mm_subs_epu16(p1, q1), _mm_subs_epu16(q1, p1)); __m128i work; const __m128i t4 = _mm_set1_epi16(4); const __m128i t3 = _mm_set1_epi16(3); @@ -813,8 +760,6 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, __m128i work_a; __m128i filter1, filter2; - (void)count; - if (bd == 8) { blimit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_blimit), zero); limit = _mm_unpacklo_epi8(_mm_load_si128((const __m128i *)_limit), zero); @@ -860,7 +805,7 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, hev = _mm_subs_epu16(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi16(hev, zero), ffff); - abs_p0q0 =_mm_adds_epu16(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu16(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(abs_p1q1, 1); mask = _mm_subs_epu16(_mm_adds_epu16(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi16(mask, zero), ffff); @@ -870,15 +815,13 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, mask = _mm_max_epi16(flat, mask); // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(p2, p1), - _mm_subs_epu16(p1, p2)), - _mm_or_si128(_mm_subs_epu16(p3, p2), - _mm_subs_epu16(p2, p3))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(p2, p1), _mm_subs_epu16(p1, p2)), + _mm_or_si128(_mm_subs_epu16(p3, p2), _mm_subs_epu16(p2, p3))); mask = _mm_max_epi16(work, mask); - work = _mm_max_epi16(_mm_or_si128(_mm_subs_epu16(q2, q1), - _mm_subs_epu16(q1, q2)), - _mm_or_si128(_mm_subs_epu16(q3, q2), - _mm_subs_epu16(q2, q3))); + work = _mm_max_epi16( + _mm_or_si128(_mm_subs_epu16(q2, q1), _mm_subs_epu16(q1, q2)), + _mm_or_si128(_mm_subs_epu16(q3, q2), _mm_subs_epu16(q2, q3))); mask = _mm_max_epi16(work, mask); mask = _mm_subs_epu16(mask, limit); mask = _mm_cmpeq_epi16(mask, zero); @@ -900,8 +843,8 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, // Filter1 >> 3 work_a = _mm_cmpgt_epi16(zero, filter1); // get the values that are <0 filter1 = _mm_srli_epi16(filter1, 3); - work_a = _mm_and_si128(work_a, tffe0); // sign bits for the values < 0 - filter1 = _mm_and_si128(filter1, t1f); // clamp the range + work_a = _mm_and_si128(work_a, tffe0); // sign bits for the values < 0 + filter1 = _mm_and_si128(filter1, t1f); // clamp the range filter1 = _mm_or_si128(filter1, work_a); // reinsert the sign bits // Filter2 >> 3 @@ -923,12 +866,12 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, q0 = _mm_adds_epi16( signed_char_clamp_bd_sse2(_mm_subs_epi16(qs0, filter1), bd), t80); - q1 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), t80); + q1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_subs_epi16(qs1, filt), bd), + t80); p0 = _mm_adds_epi16( signed_char_clamp_bd_sse2(_mm_adds_epi16(ps0, filter2), bd), t80); - p1 = _mm_adds_epi16( - signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), t80); + p1 = _mm_adds_epi16(signed_char_clamp_bd_sse2(_mm_adds_epi16(ps1, filt), bd), + t80); _mm_storeu_si128((__m128i *)(s - 2 * p), p1); _mm_storeu_si128((__m128i *)(s - 1 * p), p0); @@ -936,36 +879,38 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, _mm_storeu_si128((__m128i *)(s + 1 * p), q1); } -void vpx_highbd_lpf_horizontal_4_dual_sse2(uint16_t *s, int p, - const uint8_t *_blimit0, - const uint8_t *_limit0, - const uint8_t *_thresh0, - const uint8_t *_blimit1, - const uint8_t *_limit1, - const uint8_t *_thresh1, - int bd) { - vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); - vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, - bd); +void vpx_highbd_lpf_horizontal_4_dual_sse2( + uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, + const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1, + const uint8_t *_thresh1, int bd) { + vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, bd); + vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, bd); } -static INLINE void highbd_transpose(uint16_t *src[], int in_p, - uint16_t *dst[], int out_p, - int num_8x8_to_transpose) { +static INLINE void highbd_transpose(uint16_t *src[], int in_p, uint16_t *dst[], + int out_p, int num_8x8_to_transpose) { int idx8x8 = 0; __m128i p0, p1, p2, p3, p4, p5, p6, p7, x0, x1, x2, x3, x4, x5, x6, x7; do { uint16_t *in = src[idx8x8]; uint16_t *out = dst[idx8x8]; - p0 = _mm_loadu_si128((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07 - p1 = _mm_loadu_si128((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17 - p2 = _mm_loadu_si128((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27 - p3 = _mm_loadu_si128((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37 - p4 = _mm_loadu_si128((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47 - p5 = _mm_loadu_si128((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57 - p6 = _mm_loadu_si128((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67 - p7 = _mm_loadu_si128((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77 + p0 = + _mm_loadu_si128((__m128i *)(in + 0 * in_p)); // 00 01 02 03 04 05 06 07 + p1 = + _mm_loadu_si128((__m128i *)(in + 1 * in_p)); // 10 11 12 13 14 15 16 17 + p2 = + _mm_loadu_si128((__m128i *)(in + 2 * in_p)); // 20 21 22 23 24 25 26 27 + p3 = + _mm_loadu_si128((__m128i *)(in + 3 * in_p)); // 30 31 32 33 34 35 36 37 + p4 = + _mm_loadu_si128((__m128i *)(in + 4 * in_p)); // 40 41 42 43 44 45 46 47 + p5 = + _mm_loadu_si128((__m128i *)(in + 5 * in_p)); // 50 51 52 53 54 55 56 57 + p6 = + _mm_loadu_si128((__m128i *)(in + 6 * in_p)); // 60 61 62 63 64 65 66 67 + p7 = + _mm_loadu_si128((__m128i *)(in + 7 * in_p)); // 70 71 72 73 74 75 76 77 // 00 10 01 11 02 12 03 13 x0 = _mm_unpacklo_epi16(p0, p1); // 20 30 21 31 22 32 23 33 @@ -983,9 +928,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p, // 01 11 21 31 41 51 61 71 x7 = _mm_unpackhi_epi64(x4, x5); - _mm_storeu_si128((__m128i *)(out + 0*out_p), x6); + _mm_storeu_si128((__m128i *)(out + 0 * out_p), x6); // 00 10 20 30 40 50 60 70 - _mm_storeu_si128((__m128i *)(out + 1*out_p), x7); + _mm_storeu_si128((__m128i *)(out + 1 * out_p), x7); // 01 11 21 31 41 51 61 71 // 02 12 22 32 03 13 23 33 @@ -997,9 +942,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p, // 03 13 23 33 43 53 63 73 x7 = _mm_unpackhi_epi64(x4, x5); - _mm_storeu_si128((__m128i *)(out + 2*out_p), x6); + _mm_storeu_si128((__m128i *)(out + 2 * out_p), x6); // 02 12 22 32 42 52 62 72 - _mm_storeu_si128((__m128i *)(out + 3*out_p), x7); + _mm_storeu_si128((__m128i *)(out + 3 * out_p), x7); // 03 13 23 33 43 53 63 73 // 04 14 05 15 06 16 07 17 @@ -1019,9 +964,9 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p, // 05 15 25 35 45 55 65 75 x7 = _mm_unpackhi_epi64(x4, x5); - _mm_storeu_si128((__m128i *)(out + 4*out_p), x6); + _mm_storeu_si128((__m128i *)(out + 4 * out_p), x6); // 04 14 24 34 44 54 64 74 - _mm_storeu_si128((__m128i *)(out + 5*out_p), x7); + _mm_storeu_si128((__m128i *)(out + 5 * out_p), x7); // 05 15 25 35 45 55 65 75 // 06 16 26 36 07 17 27 37 @@ -1033,15 +978,15 @@ static INLINE void highbd_transpose(uint16_t *src[], int in_p, // 07 17 27 37 47 57 67 77 x7 = _mm_unpackhi_epi64(x4, x5); - _mm_storeu_si128((__m128i *)(out + 6*out_p), x6); + _mm_storeu_si128((__m128i *)(out + 6 * out_p), x6); // 06 16 26 36 46 56 66 76 - _mm_storeu_si128((__m128i *)(out + 7*out_p), x7); + _mm_storeu_si128((__m128i *)(out + 7 * out_p), x7); // 07 17 27 37 47 57 67 77 } while (++idx8x8 < num_8x8_to_transpose); } -static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, - int in_p, uint16_t *out, int out_p) { +static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p, + uint16_t *out, int out_p) { uint16_t *src0[1]; uint16_t *src1[1]; uint16_t *dest0[1]; @@ -1054,15 +999,12 @@ static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, highbd_transpose(src1, in_p, dest1, out_p, 1); } -void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count, int bd) { +void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); uint16_t *src[1]; uint16_t *dst[1]; - (void)count; // Transpose 8x8 src[0] = s - 4; @@ -1071,8 +1013,7 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, highbd_transpose(src, p, dst, 8, 1); // Loop filtering - vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, - bd); + vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd); src[0] = t_dst; dst[0] = s - 4; @@ -1081,14 +1022,10 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, highbd_transpose(src, 8, dst, p, 1); } -void vpx_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { +void vpx_highbd_lpf_vertical_4_dual_sse2( + uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]); uint16_t *src[2]; uint16_t *dst[2]; @@ -1108,15 +1045,12 @@ void vpx_highbd_lpf_vertical_4_dual_sse2(uint16_t *s, int p, highbd_transpose(src, 16, dst, p, 2); } -void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, - const uint8_t *blimit, - const uint8_t *limit, - const uint8_t *thresh, - int count, int bd) { +void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit, + const uint8_t *limit, const uint8_t *thresh, + int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); uint16_t *src[1]; uint16_t *dst[1]; - (void)count; // Transpose 8x8 src[0] = s - 4; @@ -1125,8 +1059,7 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, highbd_transpose(src, p, dst, 8, 1); // Loop filtering - vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, - bd); + vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, bd); src[0] = t_dst; dst[0] = s - 4; @@ -1135,14 +1068,10 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, highbd_transpose(src, 8, dst, p, 1); } -void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p, - const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, - const uint8_t *thresh1, - int bd) { +void vpx_highbd_lpf_vertical_8_dual_sse2( + uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, + const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, + const uint8_t *thresh1, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[16 * 8]); uint16_t *src[2]; uint16_t *dst[2]; @@ -1163,11 +1092,9 @@ void vpx_highbd_lpf_vertical_8_dual_sse2(uint16_t *s, int p, highbd_transpose(src, 16, dst, p, 2); } -void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, - const uint8_t *blimit, +void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, - int bd) { + const uint8_t *thresh, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]); uint16_t *src[2]; uint16_t *dst[2]; @@ -1181,8 +1108,8 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, highbd_transpose(src, p, dst, 8, 2); // Loop filtering - highbd_mb_lpf_horizontal_edge_w_sse2_8(t_dst + 8 * 8, 8, blimit, limit, - thresh, bd); + vpx_highbd_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh, + bd); src[0] = t_dst; src[1] = t_dst + 8 * 8; dst[0] = s - 8; @@ -1192,12 +1119,10 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, highbd_transpose(src, 8, dst, p, 2); } -void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, - int p, +void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, - const uint8_t *thresh, - int bd) { + const uint8_t *thresh, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[256]); // Transpose 16x16 @@ -1205,8 +1130,8 @@ void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, highbd_transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16); // Loop filtering - highbd_mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit, - thresh, bd); + vpx_highbd_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, + thresh, bd); // Transpose back highbd_transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p); diff --git a/libs/libvpx/vpx_dsp/x86/highbd_quantize_intrin_sse2.c b/libs/libvpx/vpx_dsp/x86/highbd_quantize_intrin_sse2.c index fd46bef3d8..64b56223ed 100644 --- a/libs/libvpx/vpx_dsp/x86/highbd_quantize_intrin_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/highbd_quantize_intrin_sse2.c @@ -15,26 +15,19 @@ #include "vpx_ports/mem.h" #if CONFIG_VP9_HIGHBITDEPTH -void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, - intptr_t count, - int skip_block, - const int16_t *zbin_ptr, +void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count, + int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, - const int16_t *iscan) { + tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, + const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { int i, j, non_zero_regs = (int)count / 4, eob_i = -1; __m128i zbins[2]; __m128i nzbins[2]; - zbins[0] = _mm_set_epi32((int)zbin_ptr[1], - (int)zbin_ptr[1], - (int)zbin_ptr[1], + zbins[0] = _mm_set_epi32((int)zbin_ptr[1], (int)zbin_ptr[1], (int)zbin_ptr[1], (int)zbin_ptr[0]); zbins[1] = _mm_set1_epi32((int)zbin_ptr[1]); @@ -73,14 +66,13 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, coeffs = _mm_load_si128((const __m128i *)(coeff_ptr + i * 4)); coeffs_sign = _mm_srai_epi32(coeffs, 31); - coeffs = _mm_sub_epi32( - _mm_xor_si128(coeffs, coeffs_sign), coeffs_sign); + coeffs = _mm_sub_epi32(_mm_xor_si128(coeffs, coeffs_sign), coeffs_sign); tmp1 = _mm_cmpgt_epi32(coeffs, zbins[i != 0]); tmp2 = _mm_cmpeq_epi32(coeffs, zbins[i != 0]); tmp1 = _mm_or_si128(tmp1, tmp2); test = _mm_movemask_epi8(tmp1); - _mm_storeu_si128((__m128i*)abs_coeff, coeffs); - _mm_storeu_si128((__m128i*)coeff_sign, coeffs_sign); + _mm_storeu_si128((__m128i *)abs_coeff, coeffs); + _mm_storeu_si128((__m128i *)coeff_sign, coeffs_sign); for (j = 0; j < 4; j++) { if (test & (1 << (4 * j))) { @@ -91,8 +83,7 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, (uint32_t)((tmp2 * quant_shift_ptr[k != 0]) >> 16); qcoeff_ptr[k] = (int)(abs_qcoeff ^ coeff_sign[j]) - coeff_sign[j]; dqcoeff_ptr[k] = qcoeff_ptr[k] * dequant_ptr[k != 0]; - if (abs_qcoeff) - eob_i = iscan[k] > eob_i ? iscan[k] : eob_i; + if (abs_qcoeff) eob_i = iscan[k] > eob_i ? iscan[k] : eob_i; } } } @@ -100,20 +91,12 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, *eob_ptr = eob_i + 1; } - -void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr, - intptr_t n_coeffs, - int skip_block, - const int16_t *zbin_ptr, - const int16_t *round_ptr, - const int16_t *quant_ptr, - const int16_t *quant_shift_ptr, - tran_low_t *qcoeff_ptr, - tran_low_t *dqcoeff_ptr, - const int16_t *dequant_ptr, - uint16_t *eob_ptr, - const int16_t *scan, - const int16_t *iscan) { +void vpx_highbd_quantize_b_32x32_sse2( + const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, + const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, + const int16_t *scan, const int16_t *iscan) { __m128i zbins[2]; __m128i nzbins[2]; int idx = 0; @@ -122,10 +105,7 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr, const int zbin0_tmp = ROUND_POWER_OF_TWO(zbin_ptr[0], 1); const int zbin1_tmp = ROUND_POWER_OF_TWO(zbin_ptr[1], 1); (void)scan; - zbins[0] = _mm_set_epi32(zbin1_tmp, - zbin1_tmp, - zbin1_tmp, - zbin0_tmp); + zbins[0] = _mm_set_epi32(zbin1_tmp, zbin1_tmp, zbin1_tmp, zbin0_tmp); zbins[1] = _mm_set1_epi32(zbin1_tmp); nzbins[0] = _mm_setzero_si128(); @@ -146,14 +126,10 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr, cmp2 = _mm_cmpgt_epi32(coeffs, nzbins[i != 0]); cmp1 = _mm_and_si128(cmp1, cmp2); test = _mm_movemask_epi8(cmp1); - if (!(test & 0xf)) - idx_arr[idx++] = i * 4; - if (!(test & 0xf0)) - idx_arr[idx++] = i * 4 + 1; - if (!(test & 0xf00)) - idx_arr[idx++] = i * 4 + 2; - if (!(test & 0xf000)) - idx_arr[idx++] = i * 4 + 3; + if (!(test & 0xf)) idx_arr[idx++] = i * 4; + if (!(test & 0xf0)) idx_arr[idx++] = i * 4 + 1; + if (!(test & 0xf00)) idx_arr[idx++] = i * 4 + 2; + if (!(test & 0xf000)) idx_arr[idx++] = i * 4 + 3; } // Quantization pass: only process the coefficients selected in @@ -163,15 +139,14 @@ void vpx_highbd_quantize_b_32x32_sse2(const tran_low_t *coeff_ptr, const int coeff = coeff_ptr[rc]; const int coeff_sign = (coeff >> 31); const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign; - const int64_t tmp1 = abs_coeff - + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); + const int64_t tmp1 = + abs_coeff + ROUND_POWER_OF_TWO(round_ptr[rc != 0], 1); const int64_t tmp2 = ((tmp1 * quant_ptr[rc != 0]) >> 16) + tmp1; const uint32_t abs_qcoeff = (uint32_t)((tmp2 * quant_shift_ptr[rc != 0]) >> 15); qcoeff_ptr[rc] = (int)(abs_qcoeff ^ coeff_sign) - coeff_sign; dqcoeff_ptr[rc] = qcoeff_ptr[rc] * dequant_ptr[rc != 0] / 2; - if (abs_qcoeff) - eob = iscan[idx_arr[i]] > eob ? iscan[idx_arr[i]] : eob; + if (abs_qcoeff) eob = iscan[idx_arr[i]] > eob ? iscan[idx_arr[i]] : eob; } } *eob_ptr = eob + 1; diff --git a/libs/libvpx/vpx_dsp/x86/highbd_variance_sse2.c b/libs/libvpx/vpx_dsp/x86/highbd_variance_sse2.c index 81ec5dbdb9..414ae5de1a 100644 --- a/libs/libvpx/vpx_dsp/x86/highbd_variance_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/highbd_variance_sse2.c @@ -11,9 +11,9 @@ #include "vpx_ports/mem.h" -typedef uint32_t (*high_variance_fn_t) (const uint16_t *src, int src_stride, - const uint16_t *ref, int ref_stride, - uint32_t *sse, int *sum); +typedef uint32_t (*high_variance_fn_t)(const uint16_t *src, int src_stride, + const uint16_t *ref, int ref_stride, + uint32_t *sse, int *sum); uint32_t vpx_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride, @@ -24,8 +24,8 @@ uint32_t vpx_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride, uint32_t *sse, int *sum); static void highbd_8_variance_sse2(const uint16_t *src, int src_stride, - const uint16_t *ref, int ref_stride, - int w, int h, uint32_t *sse, int *sum, + const uint16_t *ref, int ref_stride, int w, + int h, uint32_t *sse, int *sum, high_variance_fn_t var_fn, int block_size) { int i, j; @@ -36,8 +36,8 @@ static void highbd_8_variance_sse2(const uint16_t *src, int src_stride, for (j = 0; j < w; j += block_size) { unsigned int sse0; int sum0; - var_fn(src + src_stride * i + j, src_stride, - ref + ref_stride * i + j, ref_stride, &sse0, &sum0); + var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j, + ref_stride, &sse0, &sum0); *sse += sse0; *sum += sum0; } @@ -45,8 +45,8 @@ static void highbd_8_variance_sse2(const uint16_t *src, int src_stride, } static void highbd_10_variance_sse2(const uint16_t *src, int src_stride, - const uint16_t *ref, int ref_stride, - int w, int h, uint32_t *sse, int *sum, + const uint16_t *ref, int ref_stride, int w, + int h, uint32_t *sse, int *sum, high_variance_fn_t var_fn, int block_size) { int i, j; uint64_t sse_long = 0; @@ -56,8 +56,8 @@ static void highbd_10_variance_sse2(const uint16_t *src, int src_stride, for (j = 0; j < w; j += block_size) { unsigned int sse0; int sum0; - var_fn(src + src_stride * i + j, src_stride, - ref + ref_stride * i + j, ref_stride, &sse0, &sum0); + var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j, + ref_stride, &sse0, &sum0); sse_long += sse0; sum_long += sum0; } @@ -67,8 +67,8 @@ static void highbd_10_variance_sse2(const uint16_t *src, int src_stride, } static void highbd_12_variance_sse2(const uint16_t *src, int src_stride, - const uint16_t *ref, int ref_stride, - int w, int h, uint32_t *sse, int *sum, + const uint16_t *ref, int ref_stride, int w, + int h, uint32_t *sse, int *sum, high_variance_fn_t var_fn, int block_size) { int i, j; uint64_t sse_long = 0; @@ -78,8 +78,8 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride, for (j = 0; j < w; j += block_size) { unsigned int sse0; int sum0; - var_fn(src + src_stride * i + j, src_stride, - ref + ref_stride * i + j, ref_stride, &sse0, &sum0); + var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j, + ref_stride, &sse0, &sum0); sse_long += sse0; sum_long += sum0; } @@ -88,80 +88,83 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride, *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8); } - -#define HIGH_GET_VAR(S) \ -void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, \ - uint32_t *sse, int *sum) { \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \ - sse, sum); \ -} \ -\ -void vpx_highbd_10_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, \ - uint32_t *sse, int *sum) { \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \ - sse, sum); \ - *sum = ROUND_POWER_OF_TWO(*sum, 2); \ - *sse = ROUND_POWER_OF_TWO(*sse, 4); \ -} \ -\ -void vpx_highbd_12_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, \ - uint32_t *sse, int *sum) { \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, \ - sse, sum); \ - *sum = ROUND_POWER_OF_TWO(*sum, 4); \ - *sse = ROUND_POWER_OF_TWO(*sse, 8); \ -} +#define HIGH_GET_VAR(S) \ + void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ + const uint8_t *ref8, int ref_stride, \ + uint32_t *sse, int *sum) { \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + sum); \ + } \ + \ + void vpx_highbd_10_get##S##x##S##var_sse2( \ + const uint8_t *src8, int src_stride, const uint8_t *ref8, \ + int ref_stride, uint32_t *sse, int *sum) { \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + sum); \ + *sum = ROUND_POWER_OF_TWO(*sum, 2); \ + *sse = ROUND_POWER_OF_TWO(*sse, 4); \ + } \ + \ + void vpx_highbd_12_get##S##x##S##var_sse2( \ + const uint8_t *src8, int src_stride, const uint8_t *ref8, \ + int ref_stride, uint32_t *sse, int *sum) { \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + sum); \ + *sum = ROUND_POWER_OF_TWO(*sum, 4); \ + *sse = ROUND_POWER_OF_TWO(*sse, 8); \ + } HIGH_GET_VAR(16); HIGH_GET_VAR(8); #undef HIGH_GET_VAR -#define VAR_FN(w, h, block_size, shift) \ -uint32_t vpx_highbd_8_variance##w##x##h##_sse2( \ - const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, uint32_t *sse) { \ - int sum; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - highbd_8_variance_sse2(src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, \ - block_size); \ - return *sse - (((int64_t)sum * sum) >> shift); \ -} \ -\ -uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \ - const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, uint32_t *sse) { \ - int sum; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - highbd_10_variance_sse2( \ - src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ - return *sse - (((int64_t)sum * sum) >> shift); \ -} \ -\ -uint32_t vpx_highbd_12_variance##w##x##h##_sse2( \ - const uint8_t *src8, int src_stride, \ - const uint8_t *ref8, int ref_stride, uint32_t *sse) { \ - int sum; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - highbd_12_variance_sse2( \ - src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ - return *sse - (((int64_t)sum * sum) >> shift); \ -} +#define VAR_FN(w, h, block_size, shift) \ + uint32_t vpx_highbd_8_variance##w##x##h##_sse2( \ + const uint8_t *src8, int src_stride, const uint8_t *ref8, \ + int ref_stride, uint32_t *sse) { \ + int sum; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + highbd_8_variance_sse2( \ + src, src_stride, ref, ref_stride, w, h, sse, &sum, \ + vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + return *sse - (((int64_t)sum * sum) >> shift); \ + } \ + \ + uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \ + const uint8_t *src8, int src_stride, const uint8_t *ref8, \ + int ref_stride, uint32_t *sse) { \ + int sum; \ + int64_t var; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + highbd_10_variance_sse2( \ + src, src_stride, ref, ref_stride, w, h, sse, &sum, \ + vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \ + return (var >= 0) ? (uint32_t)var : 0; \ + } \ + \ + uint32_t vpx_highbd_12_variance##w##x##h##_sse2( \ + const uint8_t *src8, int src_stride, const uint8_t *ref8, \ + int ref_stride, uint32_t *sse) { \ + int sum; \ + int64_t var; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ + highbd_12_variance_sse2( \ + src, src_stride, ref, ref_stride, w, h, sse, &sum, \ + vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + var = (int64_t)(*sse) - (((int64_t)sum * sum) >> shift); \ + return (var >= 0) ? (uint32_t)var : 0; \ + } VAR_FN(64, 64, 16, 12); VAR_FN(64, 32, 16, 11); @@ -177,13 +180,13 @@ VAR_FN(8, 8, 8, 6); #undef VAR_FN unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride, - const uint8_t *ref8, int ref_stride, - unsigned int *sse) { + const uint8_t *ref8, int ref_stride, + unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, - sse, &sum, vpx_highbd_calc16x16var_sse2, 16); + highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, + vpx_highbd_calc16x16var_sse2, 16); return *sse; } @@ -193,8 +196,8 @@ unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride, int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, - sse, &sum, vpx_highbd_calc16x16var_sse2, 16); + highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, + vpx_highbd_calc16x16var_sse2, 16); return *sse; } @@ -204,19 +207,19 @@ unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride, int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, - sse, &sum, vpx_highbd_calc16x16var_sse2, 16); + highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, + vpx_highbd_calc16x16var_sse2, 16); return *sse; } unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride, - const uint8_t *ref8, int ref_stride, - unsigned int *sse) { + const uint8_t *ref8, int ref_stride, + unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, - sse, &sum, vpx_highbd_calc8x8var_sse2, 8); + highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, + vpx_highbd_calc8x8var_sse2, 8); return *sse; } @@ -226,8 +229,8 @@ unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride, int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, - sse, &sum, vpx_highbd_calc8x8var_sse2, 8); + highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, + vpx_highbd_calc8x8var_sse2, 8); return *sse; } @@ -237,357 +240,313 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride, int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); - highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, - sse, &sum, vpx_highbd_calc8x8var_sse2, 8); + highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, + vpx_highbd_calc8x8var_sse2, 8); return *sse; } -#if CONFIG_USE_X86INC // The 2 unused parameters are place holders for PIC enabled build. // These definitions are for functions defined in // highbd_subpel_variance_impl_sse2.asm -#define DECL(w, opt) \ - int vpx_highbd_sub_pixel_variance##w##xh_##opt(const uint16_t *src, \ - ptrdiff_t src_stride, \ - int x_offset, int y_offset, \ - const uint16_t *dst, \ - ptrdiff_t dst_stride, \ - int height, \ - unsigned int *sse, \ - void *unused0, void *unused); -#define DECLS(opt1, opt2) \ - DECL(8, opt1); \ - DECL(16, opt1) - -DECLS(sse2, sse); -// TODO(johannkoenig): enable the ssse3 or delete -// DECLS(ssse3, ssse3); -#undef DECLS -#undef DECL - -#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ -uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src8, \ - int src_stride, \ - int x_offset, \ - int y_offset, \ - const uint8_t *dst8, \ - int dst_stride, \ - uint32_t *sse_ptr) { \ - uint32_t sse; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src, src_stride, \ - x_offset, y_offset, \ - dst, dst_stride, h, \ - &sse, NULL, NULL); \ - if (w > wf) { \ - unsigned int sse2; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 16, \ - src_stride, \ - x_offset, y_offset, \ - dst + 16, \ - dst_stride, \ - h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \ - x_offset, y_offset, \ - dst + 32, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ - src + 48, src_stride, x_offset, y_offset, \ - dst + 48, dst_stride, h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} \ -\ -uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \ - const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ - const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ - uint32_t sse; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src, src_stride, \ - x_offset, y_offset, \ - dst, dst_stride, \ - h, &sse, NULL, NULL); \ - if (w > wf) { \ - uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 16, \ - src_stride, \ - x_offset, y_offset, \ - dst + 16, \ - dst_stride, \ - h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \ - x_offset, y_offset, \ - dst + 32, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \ - x_offset, y_offset, \ - dst + 48, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - se = ROUND_POWER_OF_TWO(se, 2); \ - sse = ROUND_POWER_OF_TWO(sse, 4); \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} \ -\ -uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \ - const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ - const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ - int start_row; \ - uint32_t sse; \ - int se = 0; \ - uint64_t long_sse = 0; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - for (start_row = 0; start_row < h; start_row +=16) { \ - uint32_t sse2; \ - int height = h - start_row < 16 ? h - start_row : 16; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ - src + (start_row * src_stride), src_stride, \ - x_offset, y_offset, dst + (start_row * dst_stride), \ - dst_stride, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - if (w > wf) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ - src + 16 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, dst + 16 + (start_row * dst_stride), \ - dst_stride, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ - src + 32 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, dst + 32 + (start_row * dst_stride), \ - dst_stride, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ - src + 48 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, dst + 48 + (start_row * dst_stride), \ - dst_stride, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - }\ - } \ - } \ - se = ROUND_POWER_OF_TWO(se, 4); \ - sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} - -#define FNS(opt1, opt2) \ -FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ -FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ -FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ -FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ -FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ -FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ -FN(16, 16, 16, 4, 4, opt1, (int64_t)); \ -FN(16, 8, 16, 4, 3, opt1, (int64_t)); \ -FN(8, 16, 8, 3, 4, opt1, (int64_t)); \ -FN(8, 8, 8, 3, 3, opt1, (int64_t)); \ -FN(8, 4, 8, 3, 2, opt1, (int64_t)); - - -FNS(sse2, sse); - -#undef FNS -#undef FN - -// The 2 unused parameters are place holders for PIC enabled build. -#define DECL(w, opt) \ -int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt(const uint16_t *src, \ - ptrdiff_t src_stride, \ - int x_offset, int y_offset, \ - const uint16_t *dst, \ - ptrdiff_t dst_stride, \ - const uint16_t *sec, \ - ptrdiff_t sec_stride, \ - int height, \ - unsigned int *sse, \ - void *unused0, void *unused); -#define DECLS(opt1) \ -DECL(16, opt1) \ -DECL(8, opt1) +#define DECL(w, opt) \ + int vpx_highbd_sub_pixel_variance##w##xh_##opt( \ + const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ + const uint16_t *dst, ptrdiff_t dst_stride, int height, \ + unsigned int *sse, void *unused0, void *unused); +#define DECLS(opt) \ + DECL(8, opt); \ + DECL(16, opt) DECLS(sse2); -#undef DECL + #undef DECLS +#undef DECL -#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ -uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \ - const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ - const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ - const uint8_t *sec8) { \ - uint32_t sse; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ - int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src, src_stride, x_offset, \ - y_offset, dst, dst_stride, sec, w, h, &sse, NULL, NULL); \ - if (w > wf) { \ - uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 16, src_stride, x_offset, y_offset, \ - dst + 16, dst_stride, sec + 16, w, h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 32, src_stride, x_offset, y_offset, \ - dst + 32, dst_stride, sec + 32, w, h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 48, src_stride, x_offset, y_offset, \ - dst + 48, dst_stride, sec + 48, w, h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} \ -\ -uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \ - const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ - const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ - const uint8_t *sec8) { \ - uint32_t sse; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ - int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src, src_stride, x_offset, \ - y_offset, dst, dst_stride, \ - sec, w, h, &sse, NULL, NULL); \ - if (w > wf) { \ - uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 16, src_stride, \ - x_offset, y_offset, \ - dst + 16, dst_stride, \ - sec + 16, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 32, src_stride, \ - x_offset, y_offset, \ - dst + 32, dst_stride, \ - sec + 32, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 48, src_stride, \ - x_offset, y_offset, \ - dst + 48, dst_stride, \ - sec + 48, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - se = ROUND_POWER_OF_TWO(se, 2); \ - sse = ROUND_POWER_OF_TWO(sse, 4); \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} \ -\ -uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \ - const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ - const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ - const uint8_t *sec8) { \ - int start_row; \ - uint32_t sse; \ - int se = 0; \ - uint64_t long_sse = 0; \ - uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ - uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ - for (start_row = 0; start_row < h; start_row +=16) { \ - uint32_t sse2; \ - int height = h - start_row < 16 ? h - start_row : 16; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + (start_row * src_stride), src_stride, x_offset, \ - y_offset, dst + (start_row * dst_stride), dst_stride, \ - sec + (start_row * w), w, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - if (w > wf) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 16 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, \ - dst + 16 + (start_row * dst_stride), dst_stride, \ - sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 32 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, \ - dst + 32 + (start_row * dst_stride), dst_stride, \ - sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ - src + 48 + (start_row * src_stride), src_stride, \ - x_offset, y_offset, \ - dst + 48 + (start_row * dst_stride), dst_stride, \ - sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \ - se += se2; \ - long_sse += sse2; \ - } \ - } \ - } \ - se = ROUND_POWER_OF_TWO(se, 4); \ - sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} +#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ + uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ + uint32_t sse; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \ + NULL); \ + if (w > wf) { \ + unsigned int sse2; \ + int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } \ + \ + uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ + uint32_t sse; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \ + NULL); \ + if (w > wf) { \ + uint32_t sse2; \ + int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + se = ROUND_POWER_OF_TWO(se, 2); \ + sse = ROUND_POWER_OF_TWO(sse, 4); \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } \ + \ + uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ + int start_row; \ + uint32_t sse; \ + int se = 0; \ + uint64_t long_sse = 0; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + for (start_row = 0; start_row < h; start_row += 16) { \ + uint32_t sse2; \ + int height = h - start_row < 16 ? h - start_row : 16; \ + int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + (start_row * src_stride), src_stride, x_offset, y_offset, \ + dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL, \ + NULL); \ + se += se2; \ + long_sse += sse2; \ + if (w > wf) { \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 16 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \ + &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 32 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \ + height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + src + 48 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \ + height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + } \ + } \ + } \ + se = ROUND_POWER_OF_TWO(se, 4); \ + sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } - -#define FNS(opt1) \ -FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ -FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ -FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ -FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ -FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ -FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ -FN(16, 16, 16, 4, 4, opt1, (int64_t)); \ -FN(16, 8, 16, 4, 3, opt1, (int64_t)); \ -FN(8, 16, 8, 4, 3, opt1, (int64_t)); \ -FN(8, 8, 8, 3, 3, opt1, (int64_t)); \ -FN(8, 4, 8, 3, 2, opt1, (int64_t)); +#define FNS(opt) \ + FN(64, 64, 16, 6, 6, opt, (int64_t)); \ + FN(64, 32, 16, 6, 5, opt, (int64_t)); \ + FN(32, 64, 16, 5, 6, opt, (int64_t)); \ + FN(32, 32, 16, 5, 5, opt, (int64_t)); \ + FN(32, 16, 16, 5, 4, opt, (int64_t)); \ + FN(16, 32, 16, 4, 5, opt, (int64_t)); \ + FN(16, 16, 16, 4, 4, opt, (int64_t)); \ + FN(16, 8, 16, 4, 3, opt, (int64_t)); \ + FN(8, 16, 8, 3, 4, opt, (int64_t)); \ + FN(8, 8, 8, 3, 3, opt, (int64_t)); \ + FN(8, 4, 8, 3, 2, opt, (int64_t)); + +FNS(sse2); + +#undef FNS +#undef FN + +// The 2 unused parameters are place holders for PIC enabled build. +#define DECL(w, opt) \ + int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt( \ + const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ + const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \ + ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \ + void *unused); +#define DECLS(opt1) \ + DECL(16, opt1) \ + DECL(8, opt1) + +DECLS(sse2); +#undef DECL +#undef DECLS + +#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ + uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ + const uint8_t *sec8) { \ + uint32_t sse; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ + int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \ + NULL, NULL); \ + if (w > wf) { \ + uint32_t sse2; \ + int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \ + sec + 16, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \ + sec + 32, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \ + sec + 48, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } \ + \ + uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ + const uint8_t *sec8) { \ + uint32_t sse; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ + int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \ + NULL, NULL); \ + if (w > wf) { \ + uint32_t sse2; \ + int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \ + sec + 16, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \ + sec + 32, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \ + sec + 48, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + se = ROUND_POWER_OF_TWO(se, 2); \ + sse = ROUND_POWER_OF_TWO(sse, 4); \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } \ + \ + uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \ + const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ + const uint8_t *sec8) { \ + int start_row; \ + uint32_t sse; \ + int se = 0; \ + uint64_t long_sse = 0; \ + uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ + uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ + for (start_row = 0; start_row < h; start_row += 16) { \ + uint32_t sse2; \ + int height = h - start_row < 16 ? h - start_row : 16; \ + int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + (start_row * src_stride), src_stride, x_offset, y_offset, \ + dst + (start_row * dst_stride), dst_stride, sec + (start_row * w), \ + w, height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + if (w > wf) { \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 16 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 16 + (start_row * dst_stride), dst_stride, \ + sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 32 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \ + sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 48 + (start_row * src_stride), src_stride, x_offset, \ + y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \ + sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \ + se += se2; \ + long_sse += sse2; \ + } \ + } \ + } \ + se = ROUND_POWER_OF_TWO(se, 4); \ + sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \ + *sse_ptr = sse; \ + return sse - ((cast se * se) >> (wlog2 + hlog2)); \ + } + +#define FNS(opt1) \ + FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ + FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ + FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ + FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ + FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ + FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ + FN(16, 16, 16, 4, 4, opt1, (int64_t)); \ + FN(16, 8, 16, 4, 3, opt1, (int64_t)); \ + FN(8, 16, 8, 4, 3, opt1, (int64_t)); \ + FN(8, 8, 8, 3, 3, opt1, (int64_t)); \ + FN(8, 4, 8, 3, 2, opt1, (int64_t)); FNS(sse2); #undef FNS #undef FN -#endif // CONFIG_USE_X86INC diff --git a/libs/libvpx/vpx_dsp/x86/intrapred_sse2.asm b/libs/libvpx/vpx_dsp/x86/intrapred_sse2.asm index c24d53686a..c18095c287 100644 --- a/libs/libvpx/vpx_dsp/x86/intrapred_sse2.asm +++ b/libs/libvpx/vpx_dsp/x86/intrapred_sse2.asm @@ -11,6 +11,7 @@ %include "third_party/x86inc/x86inc.asm" SECTION_RODATA +pb_1: times 16 db 1 pw_4: times 8 dw 4 pw_8: times 8 dw 8 pw_16: times 8 dw 16 @@ -23,6 +24,115 @@ pw2_32: times 8 dw 16 SECTION .text +; ------------------------------------------ +; input: x, y, z, result +; +; trick from pascal +; (x+2y+z+2)>>2 can be calculated as: +; result = avg(x,z) +; result -= xor(x,z) & 1 +; result = avg(result,y) +; ------------------------------------------ +%macro X_PLUS_2Y_PLUS_Z_PLUS_2_RSH_2 4 + pavgb %4, %1, %3 + pxor %3, %1 + pand %3, [GLOBAL(pb_1)] + psubb %4, %3 + pavgb %4, %2 +%endmacro + +INIT_XMM sse2 +cglobal d45_predictor_4x4, 3, 4, 4, dst, stride, above, goffset + GET_GOT goffsetq + + movq m0, [aboveq] + DEFINE_ARGS dst, stride, temp + psrldq m1, m0, 1 + psrldq m2, m0, 2 + X_PLUS_2Y_PLUS_Z_PLUS_2_RSH_2 m0, m1, m2, m3 + + ; store 4 lines + movd [dstq ], m3 + psrlq m3, 8 + movd [dstq+strideq ], m3 + lea dstq, [dstq+strideq*2] + psrlq m3, 8 + movd [dstq ], m3 + psrlq m3, 8 + movd [dstq+strideq ], m3 + psrlq m0, 56 + movd tempq, m0 + mov [dstq+strideq+3], tempb + + RESTORE_GOT + RET + +INIT_XMM sse2 +cglobal d45_predictor_8x8, 3, 4, 4, dst, stride, above, goffset + GET_GOT goffsetq + + movu m1, [aboveq] + pslldq m0, m1, 1 + psrldq m2, m1, 1 + DEFINE_ARGS dst, stride, stride3 + lea stride3q, [strideq*3] + X_PLUS_2Y_PLUS_Z_PLUS_2_RSH_2 m0, m1, m2, m3 + punpckhbw m0, m0 ; 7 7 + punpcklwd m0, m0 ; 7 7 7 7 + punpckldq m0, m0 ; 7 7 7 7 7 7 7 7 + punpcklqdq m3, m0 ; -1 0 1 2 3 4 5 6 7 7 7 7 7 7 7 7 + + ; store 4 lines + psrldq m3, 1 + movq [dstq ], m3 + psrldq m3, 1 + movq [dstq+strideq ], m3 + psrldq m3, 1 + movq [dstq+strideq*2], m3 + psrldq m3, 1 + movq [dstq+stride3q ], m3 + lea dstq, [dstq+strideq*4] + + ; store next 4 lines + psrldq m3, 1 + movq [dstq ], m3 + psrldq m3, 1 + movq [dstq+strideq ], m3 + psrldq m3, 1 + movq [dstq+strideq*2], m3 + psrldq m3, 1 + movq [dstq+stride3q ], m3 + + RESTORE_GOT + RET + +INIT_XMM sse2 +cglobal d207_predictor_4x4, 4, 4, 5, dst, stride, unused, left, goffset + GET_GOT goffsetq + + movd m0, [leftq] ; abcd [byte] + punpcklbw m4, m0, m0 ; aabb ccdd + punpcklwd m4, m4 ; aaaa bbbb cccc dddd + psrldq m4, 12 ; dddd + punpckldq m0, m4 ; abcd dddd + psrldq m1, m0, 1 ; bcdd + psrldq m2, m0, 2 ; cddd + + X_PLUS_2Y_PLUS_Z_PLUS_2_RSH_2 m0, m1, m2, m3 ; a2bc b2cd c3d d + pavgb m1, m0 ; ab, bc, cd, d [byte] + + punpcklbw m1, m3 ; ab, a2bc, bc, b2cd, cd, c3d, d, d + movd [dstq ], m1 + psrlq m1, 16 ; bc, b2cd, cd, c3d, d, d + movd [dstq+strideq], m1 + + lea dstq, [dstq+strideq*2] + psrlq m1, 16 ; cd, c3d, d, d + movd [dstq ], m1 + movd [dstq+strideq], m4 ; d, d, d, d + RESTORE_GOT + RET + INIT_XMM sse2 cglobal dc_predictor_4x4, 4, 5, 3, dst, stride, above, left, goffset GET_GOT goffsetq @@ -646,7 +756,7 @@ cglobal tm_predictor_8x8, 4, 4, 5, dst, stride, above, left psubw m0, m2 ; t1-tl t2-tl ... t8-tl [word] movq m2, [leftq] punpcklbw m2, m1 ; l1 l2 l3 l4 l5 l6 l7 l8 [word] -.loop +.loop: pshuflw m4, m2, 0x0 ; [63:0] l1 l1 l1 l1 [word] pshuflw m3, m2, 0x55 ; [63:0] l2 l2 l2 l2 [word] punpcklqdq m4, m4 ; l1 l1 l1 l1 l1 l1 l1 l1 [word] diff --git a/libs/libvpx/vpx_dsp/x86/intrapred_ssse3.asm b/libs/libvpx/vpx_dsp/x86/intrapred_ssse3.asm index d061278c7d..5e0139fa8d 100644 --- a/libs/libvpx/vpx_dsp/x86/intrapred_ssse3.asm +++ b/libs/libvpx/vpx_dsp/x86/intrapred_ssse3.asm @@ -13,7 +13,6 @@ SECTION_RODATA pb_1: times 16 db 1 -sh_b01234577: db 0, 1, 2, 3, 4, 5, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0 sh_b12345677: db 1, 2, 3, 4, 5, 6, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0 sh_b23456777: db 2, 3, 4, 5, 6, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0, 0 sh_b0123456777777777: db 0, 1, 2, 3, 4, 5, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7 @@ -28,77 +27,9 @@ sh_b65432108: db 6, 5, 4, 3, 2, 1, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0 sh_b54321089: db 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0 sh_b89abcdef: db 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0 sh_bfedcba9876543210: db 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 -sh_b1233: db 1, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 -sh_b2333: db 2, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 SECTION .text -INIT_MMX ssse3 -cglobal d45_predictor_4x4, 3, 4, 4, dst, stride, above, goffset - GET_GOT goffsetq - - movq m0, [aboveq] - pshufb m2, m0, [GLOBAL(sh_b23456777)] - pshufb m1, m0, [GLOBAL(sh_b01234577)] - pshufb m0, [GLOBAL(sh_b12345677)] - pavgb m3, m2, m1 - pxor m2, m1 - pand m2, [GLOBAL(pb_1)] - psubb m3, m2 - pavgb m0, m3 - - ; store 4 lines - movd [dstq ], m0 - psrlq m0, 8 - movd [dstq+strideq], m0 - lea dstq, [dstq+strideq*2] - psrlq m0, 8 - movd [dstq ], m0 - psrlq m0, 8 - movd [dstq+strideq], m0 - - RESTORE_GOT - RET - -INIT_MMX ssse3 -cglobal d45_predictor_8x8, 3, 4, 4, dst, stride, above, goffset - GET_GOT goffsetq - - movq m0, [aboveq] - mova m1, [GLOBAL(sh_b12345677)] - DEFINE_ARGS dst, stride, stride3 - lea stride3q, [strideq*3] - pshufb m2, m0, [GLOBAL(sh_b23456777)] - pavgb m3, m2, m0 - pxor m2, m0 - pshufb m0, m1 - pand m2, [GLOBAL(pb_1)] - psubb m3, m2 - pavgb m0, m3 - - ; store 4 lines - movq [dstq ], m0 - pshufb m0, m1 - movq [dstq+strideq ], m0 - pshufb m0, m1 - movq [dstq+strideq*2], m0 - pshufb m0, m1 - movq [dstq+stride3q ], m0 - pshufb m0, m1 - lea dstq, [dstq+strideq*4] - - ; store next 4 lines - movq [dstq ], m0 - pshufb m0, m1 - movq [dstq+strideq ], m0 - pshufb m0, m1 - movq [dstq+strideq*2], m0 - pshufb m0, m1 - movq [dstq+stride3q ], m0 - - RESTORE_GOT - RET - INIT_XMM ssse3 cglobal d45_predictor_16x16, 3, 6, 4, dst, stride, above, dst8, line, goffset GET_GOT goffsetq @@ -715,28 +646,6 @@ cglobal d153_predictor_32x32, 4, 5, 8, dst, stride, above, left, goffset RESTORE_GOT RET -INIT_MMX ssse3 -cglobal d207_predictor_4x4, 4, 5, 4, dst, stride, unused, left, goffset - GET_GOT goffsetq - movd m0, [leftq] ; abcd [byte] - pshufb m1, m0, [GLOBAL(sh_b1233)] ; bcdd [byte] - pshufb m3, m0, [GLOBAL(sh_b2333)] ; cddd - - X_PLUS_2Y_PLUS_Z_PLUS_2_RSH_2 m0, m1, m3, m2 - pavgb m1, m0 ; ab, bc, cd, d [byte] - - punpcklbw m1, m2 ; ab, a2bc, bc, b2cd, cd, c3d, d, d - movd [dstq ], m1 - psrlq m1, 16 ; bc, b2cd, cd, c3d, d, d - movd [dstq+strideq], m1 - lea dstq, [dstq+strideq*2] - psrlq m1, 16 ; cd, c3d, d, d - movd [dstq ], m1 - pshufw m1, m1, q1111 ; d, d, d, d - movd [dstq+strideq], m1 - RESTORE_GOT - RET - INIT_XMM ssse3 cglobal d207_predictor_8x8, 4, 5, 4, dst, stride, stride3, left, goffset GET_GOT goffsetq diff --git a/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.c b/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.c index ae907fd0bd..cb56ad0789 100644 --- a/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.c @@ -12,14 +12,14 @@ #include "vpx_dsp/x86/inv_txfm_sse2.h" #include "vpx_dsp/x86/txfm_common_sse2.h" -#define RECON_AND_STORE4X4(dest, in_x) \ -{ \ - __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \ - d0 = _mm_unpacklo_epi8(d0, zero); \ - d0 = _mm_add_epi16(in_x, d0); \ - d0 = _mm_packus_epi16(d0, d0); \ - *(int *)(dest) = _mm_cvtsi128_si32(d0); \ -} +#define RECON_AND_STORE4X4(dest, in_x) \ + { \ + __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); \ + d0 = _mm_unpacklo_epi8(d0, zero); \ + d0 = _mm_add_epi16(in_x, d0); \ + d0 = _mm_packus_epi16(d0, d0); \ + *(int *)(dest) = _mm_cvtsi128_si32(d0); \ + } void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { @@ -158,8 +158,8 @@ void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest, const __m128i zero = _mm_setzero_si128(); int a; - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); + a = (int)dct_const_round_shift(input[0] * cospi_16_64); + a = (int)dct_const_round_shift(a * cospi_16_64); a = ROUND_POWER_OF_TWO(a, 4); dc_value = _mm_set1_epi16(a); @@ -263,192 +263,189 @@ void iadst4_sse2(__m128i *in) { in[1] = _mm_packs_epi32(u[2], u[3]); } -#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \ - const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \ - const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \ - const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \ - const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \ - const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \ - \ - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \ - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ - const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \ - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ - const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \ - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ - const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \ - \ - out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ - out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ - out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ - out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ - out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \ - out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \ - out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \ - out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \ +#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ + const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ + const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1); \ + const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3); \ + const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5); \ + const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7); \ + const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5); \ + const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7); \ + \ + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ + const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3); \ + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ + const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3); \ + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ + const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7); \ + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ + const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7); \ + \ + out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ + out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ + out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ + out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ + out4 = _mm_unpacklo_epi64(tr1_1, tr1_5); \ + out5 = _mm_unpackhi_epi64(tr1_1, tr1_5); \ + out6 = _mm_unpacklo_epi64(tr1_3, tr1_7); \ + out7 = _mm_unpackhi_epi64(tr1_3, tr1_7); \ } -#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, \ - out0, out1, out2, out3) \ - { \ - const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \ - const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \ - const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \ - \ - const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ - const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ - const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ - \ - out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ - out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ - out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ - out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ +#define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \ + { \ + const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1); \ + const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0); \ + const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3); \ + const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2); \ + \ + const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ + const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ + const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5); \ + const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5); \ + \ + out0 = _mm_unpacklo_epi64(tr1_0, tr1_4); \ + out1 = _mm_unpackhi_epi64(tr1_0, tr1_4); \ + out2 = _mm_unpacklo_epi64(tr1_2, tr1_6); \ + out3 = _mm_unpackhi_epi64(tr1_2, tr1_6); \ } #define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \ - { \ - const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ - out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ - out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ + { \ + const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ + const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ + out0 = _mm_unpacklo_epi32(tr0_0, tr0_1); \ + out1 = _mm_unpackhi_epi32(tr0_0, tr0_1); \ } // Define Macro for multiplying elements by constants and adding them together. -#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, \ - cst0, cst1, cst2, cst3, res0, res1, res2, res3) \ - { \ - tmp0 = _mm_madd_epi16(lo_0, cst0); \ - tmp1 = _mm_madd_epi16(hi_0, cst0); \ - tmp2 = _mm_madd_epi16(lo_0, cst1); \ - tmp3 = _mm_madd_epi16(hi_0, cst1); \ - tmp4 = _mm_madd_epi16(lo_1, cst2); \ - tmp5 = _mm_madd_epi16(hi_1, cst2); \ - tmp6 = _mm_madd_epi16(lo_1, cst3); \ - tmp7 = _mm_madd_epi16(hi_1, cst3); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - tmp4 = _mm_add_epi32(tmp4, rounding); \ - tmp5 = _mm_add_epi32(tmp5, rounding); \ - tmp6 = _mm_add_epi32(tmp6, rounding); \ - tmp7 = _mm_add_epi32(tmp7, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \ - tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \ - tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \ - tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \ - \ - res0 = _mm_packs_epi32(tmp0, tmp1); \ - res1 = _mm_packs_epi32(tmp2, tmp3); \ - res2 = _mm_packs_epi32(tmp4, tmp5); \ - res3 = _mm_packs_epi32(tmp6, tmp7); \ +#define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \ + res0, res1, res2, res3) \ + { \ + tmp0 = _mm_madd_epi16(lo_0, cst0); \ + tmp1 = _mm_madd_epi16(hi_0, cst0); \ + tmp2 = _mm_madd_epi16(lo_0, cst1); \ + tmp3 = _mm_madd_epi16(hi_0, cst1); \ + tmp4 = _mm_madd_epi16(lo_1, cst2); \ + tmp5 = _mm_madd_epi16(hi_1, cst2); \ + tmp6 = _mm_madd_epi16(lo_1, cst3); \ + tmp7 = _mm_madd_epi16(hi_1, cst3); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + tmp4 = _mm_add_epi32(tmp4, rounding); \ + tmp5 = _mm_add_epi32(tmp5, rounding); \ + tmp6 = _mm_add_epi32(tmp6, rounding); \ + tmp7 = _mm_add_epi32(tmp7, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS); \ + tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); \ + tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS); \ + tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); \ + \ + res0 = _mm_packs_epi32(tmp0, tmp1); \ + res1 = _mm_packs_epi32(tmp2, tmp3); \ + res2 = _mm_packs_epi32(tmp4, tmp5); \ + res3 = _mm_packs_epi32(tmp6, tmp7); \ } #define MULTIPLICATION_AND_ADD_2(lo_0, hi_0, cst0, cst1, res0, res1) \ - { \ - tmp0 = _mm_madd_epi16(lo_0, cst0); \ - tmp1 = _mm_madd_epi16(hi_0, cst0); \ - tmp2 = _mm_madd_epi16(lo_0, cst1); \ - tmp3 = _mm_madd_epi16(hi_0, cst1); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - res0 = _mm_packs_epi32(tmp0, tmp1); \ - res1 = _mm_packs_epi32(tmp2, tmp3); \ + { \ + tmp0 = _mm_madd_epi16(lo_0, cst0); \ + tmp1 = _mm_madd_epi16(hi_0, cst0); \ + tmp2 = _mm_madd_epi16(lo_0, cst1); \ + tmp3 = _mm_madd_epi16(hi_0, cst1); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + res0 = _mm_packs_epi32(tmp0, tmp1); \ + res1 = _mm_packs_epi32(tmp2, tmp3); \ } -#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ - { \ - /* Stage1 */ \ - { \ - const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \ - const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \ - const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \ - const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \ - \ - MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, \ - stg1_1, stg1_2, stg1_3, stp1_4, \ - stp1_7, stp1_5, stp1_6) \ - } \ - \ - /* Stage2 */ \ - { \ - const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \ - const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \ - const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \ - const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \ - \ - MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, \ - stg2_1, stg2_2, stg2_3, stp2_0, \ - stp2_1, stp2_2, stp2_3) \ - \ - stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_56, stg2_1); \ - tmp1 = _mm_madd_epi16(hi_56, stg2_1); \ - tmp2 = _mm_madd_epi16(lo_56, stg2_0); \ - tmp3 = _mm_madd_epi16(hi_56, stg2_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - } \ - \ - /* Stage4 */ \ - out0 = _mm_adds_epi16(stp1_0, stp2_7); \ - out1 = _mm_adds_epi16(stp1_1, stp1_6); \ - out2 = _mm_adds_epi16(stp1_2, stp1_5); \ - out3 = _mm_adds_epi16(stp1_3, stp2_4); \ - out4 = _mm_subs_epi16(stp1_3, stp2_4); \ - out5 = _mm_subs_epi16(stp1_2, stp1_5); \ - out6 = _mm_subs_epi16(stp1_1, stp1_6); \ - out7 = _mm_subs_epi16(stp1_0, stp2_7); \ +#define IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, \ + out4, out5, out6, out7) \ + { \ + /* Stage1 */ \ + { \ + const __m128i lo_17 = _mm_unpacklo_epi16(in1, in7); \ + const __m128i hi_17 = _mm_unpackhi_epi16(in1, in7); \ + const __m128i lo_35 = _mm_unpacklo_epi16(in3, in5); \ + const __m128i hi_35 = _mm_unpackhi_epi16(in3, in5); \ + \ + MULTIPLICATION_AND_ADD(lo_17, hi_17, lo_35, hi_35, stg1_0, stg1_1, \ + stg1_2, stg1_3, stp1_4, stp1_7, stp1_5, stp1_6) \ + } \ + \ + /* Stage2 */ \ + { \ + const __m128i lo_04 = _mm_unpacklo_epi16(in0, in4); \ + const __m128i hi_04 = _mm_unpackhi_epi16(in0, in4); \ + const __m128i lo_26 = _mm_unpacklo_epi16(in2, in6); \ + const __m128i hi_26 = _mm_unpackhi_epi16(in2, in6); \ + \ + MULTIPLICATION_AND_ADD(lo_04, hi_04, lo_26, hi_26, stg2_0, stg2_1, \ + stg2_2, stg2_3, stp2_0, stp2_1, stp2_2, stp2_3) \ + \ + stp2_4 = _mm_adds_epi16(stp1_4, stp1_5); \ + stp2_5 = _mm_subs_epi16(stp1_4, stp1_5); \ + stp2_6 = _mm_subs_epi16(stp1_7, stp1_6); \ + stp2_7 = _mm_adds_epi16(stp1_7, stp1_6); \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i lo_56 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_56 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + \ + stp1_0 = _mm_adds_epi16(stp2_0, stp2_3); \ + stp1_1 = _mm_adds_epi16(stp2_1, stp2_2); \ + stp1_2 = _mm_subs_epi16(stp2_1, stp2_2); \ + stp1_3 = _mm_subs_epi16(stp2_0, stp2_3); \ + \ + tmp0 = _mm_madd_epi16(lo_56, stg2_1); \ + tmp1 = _mm_madd_epi16(hi_56, stg2_1); \ + tmp2 = _mm_madd_epi16(lo_56, stg2_0); \ + tmp3 = _mm_madd_epi16(hi_56, stg2_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + } \ + \ + /* Stage4 */ \ + out0 = _mm_adds_epi16(stp1_0, stp2_7); \ + out1 = _mm_adds_epi16(stp1_1, stp1_6); \ + out2 = _mm_adds_epi16(stp1_2, stp1_5); \ + out3 = _mm_adds_epi16(stp1_3, stp2_4); \ + out4 = _mm_subs_epi16(stp1_3, stp2_4); \ + out5 = _mm_subs_epi16(stp1_2, stp1_5); \ + out6 = _mm_subs_epi16(stp1_1, stp1_6); \ + out7 = _mm_subs_epi16(stp1_0, stp2_7); \ } void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, @@ -484,12 +481,12 @@ void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, // 2-D for (i = 0; i < 2; i++) { // 8x8 Transpose is copied from vpx_fdct8x8_sse2() - TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, + in4, in5, in6, in7); // 4-stage 1D idct8x8 - IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); + IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5, + in6, in7); } // Final rounding and shift @@ -527,8 +524,8 @@ void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest, const __m128i zero = _mm_setzero_si128(); int a; - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); + a = (int)dct_const_round_shift(input[0] * cospi_16_64); + a = (int)dct_const_round_shift(a * cospi_16_64); a = ROUND_POWER_OF_TWO(a, 5); dc_value = _mm_set1_epi16(a); @@ -560,12 +557,12 @@ void idct8_sse2(__m128i *in) { __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; // 8x8 Transpose is copied from vpx_fdct8x8_sse2() - TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], - in0, in1, in2, in3, in4, in5, in6, in7); + TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0, + in1, in2, in3, in4, in5, in6, in7); // 4-stage 1D idct8x8 - IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, - in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7]); + IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3], + in[4], in[5], in[6], in[7]); } void iadst8_sse2(__m128i *in) { @@ -906,8 +903,8 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, in0, in1, in2, in3) - IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, - in0, in1, in2, in3, in4, in5, in6, in7); + IDCT8(in0, in1, in2, in3, zero, zero, zero, zero, in0, in1, in2, in3, in4, + in5, in6, in7); // Final rounding and shift in0 = _mm_adds_epi16(in0, final_rounding); in1 = _mm_adds_epi16(in1, final_rounding); @@ -937,242 +934,234 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, RECON_AND_STORE(dest + 7 * stride, in7); } -#define IDCT16 \ - /* Stage2 */ \ - { \ - const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \ - const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \ - const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]); \ - const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]); \ - const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \ - const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \ - const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \ - const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, \ - stg2_0, stg2_1, stg2_2, stg2_3, \ - stp2_8, stp2_15, stp2_9, stp2_14) \ - \ - MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, \ - stg2_4, stg2_5, stg2_6, stg2_7, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \ - const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \ - const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \ - const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \ - \ - MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, \ - stg3_0, stg3_1, stg3_2, stg3_3, \ - stp1_4, stp1_7, stp1_5, stp1_6) \ - \ - stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); \ - stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ - stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ - \ - stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \ - stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ - } \ - \ - /* Stage4 */ \ - { \ - const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \ - const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \ - const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \ - const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, \ - stg4_0, stg4_1, stg4_2, stg4_3, \ - stp2_0, stp2_1, stp2_2, stp2_3) \ - \ - stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \ - stg4_4, stg4_5, stg4_6, stg4_7, \ - stp2_9, stp2_14, stp2_10, stp2_13) \ - } \ - \ - /* Stage5 */ \ - { \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ - \ - stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ - } \ - \ - /* Stage6 */ \ - { \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ +#define IDCT16 \ + /* Stage2 */ \ + { \ + const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], in[15]); \ + const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], in[15]); \ + const __m128i lo_9_7 = _mm_unpacklo_epi16(in[9], in[7]); \ + const __m128i hi_9_7 = _mm_unpackhi_epi16(in[9], in[7]); \ + const __m128i lo_5_11 = _mm_unpacklo_epi16(in[5], in[11]); \ + const __m128i hi_5_11 = _mm_unpackhi_epi16(in[5], in[11]); \ + const __m128i lo_13_3 = _mm_unpacklo_epi16(in[13], in[3]); \ + const __m128i hi_13_3 = _mm_unpackhi_epi16(in[13], in[3]); \ + \ + MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_9_7, hi_9_7, stg2_0, stg2_1, \ + stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, stp2_14) \ + \ + MULTIPLICATION_AND_ADD(lo_5_11, hi_5_11, lo_13_3, hi_13_3, stg2_4, stg2_5, \ + stg2_6, stg2_7, stp2_10, stp2_13, stp2_11, stp2_12) \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], in[14]); \ + const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], in[14]); \ + const __m128i lo_10_6 = _mm_unpacklo_epi16(in[10], in[6]); \ + const __m128i hi_10_6 = _mm_unpackhi_epi16(in[10], in[6]); \ + \ + MULTIPLICATION_AND_ADD(lo_2_14, hi_2_14, lo_10_6, hi_10_6, stg3_0, stg3_1, \ + stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, stp1_6) \ + \ + stp1_8_0 = _mm_add_epi16(stp2_8, stp2_9); \ + stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ + stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ + stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ + \ + stp1_12_0 = _mm_add_epi16(stp2_12, stp2_13); \ + stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ + stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ + stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ + } \ + \ + /* Stage4 */ \ + { \ + const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], in[8]); \ + const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], in[8]); \ + const __m128i lo_4_12 = _mm_unpacklo_epi16(in[4], in[12]); \ + const __m128i hi_4_12 = _mm_unpackhi_epi16(in[4], in[12]); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + \ + MULTIPLICATION_AND_ADD(lo_0_8, hi_0_8, lo_4_12, hi_4_12, stg4_0, stg4_1, \ + stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3) \ + \ + stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ + stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ + stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ + stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10, \ + stp2_13) \ + } \ + \ + /* Stage5 */ \ + { \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + \ + stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ + stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ + stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ + stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ + \ + stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ + } \ + \ + /* Stage6 */ \ + { \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11, \ + stp2_12) \ } -#define IDCT16_10 \ - /* Stage2 */ \ - { \ - const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \ - const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \ - const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \ - const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, \ - stg2_0, stg2_1, stg2_6, stg2_7, \ - stp1_8_0, stp1_15, stp1_11, stp1_12_0) \ - } \ - \ - /* Stage3 */ \ - { \ - const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \ - const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \ - \ - MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, \ - stg3_0, stg3_1, \ - stp2_4, stp2_7) \ - \ - stp1_9 = stp1_8_0; \ - stp1_10 = stp1_11; \ - \ - stp1_13 = stp1_12_0; \ - stp1_14 = stp1_15; \ - } \ - \ - /* Stage4 */ \ - { \ - const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \ - const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, \ - stg4_0, stg4_1, \ - stp1_0, stp1_1) \ - stp2_5 = stp2_4; \ - stp2_6 = stp2_7; \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, \ - stg4_4, stg4_5, stg4_6, stg4_7, \ - stp2_9, stp2_14, stp2_10, stp2_13) \ - } \ - \ - /* Stage5 */ \ - { \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - \ - stp1_2 = stp1_1; \ - stp1_3 = stp1_0; \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ - \ - stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ - } \ - \ - /* Stage6 */ \ - { \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, \ - stp2_10, stp2_13, stp2_11, stp2_12) \ - } +#define IDCT16_10 \ + /* Stage2 */ \ + { \ + const __m128i lo_1_15 = _mm_unpacklo_epi16(in[1], zero); \ + const __m128i hi_1_15 = _mm_unpackhi_epi16(in[1], zero); \ + const __m128i lo_13_3 = _mm_unpacklo_epi16(zero, in[3]); \ + const __m128i hi_13_3 = _mm_unpackhi_epi16(zero, in[3]); \ + \ + MULTIPLICATION_AND_ADD(lo_1_15, hi_1_15, lo_13_3, hi_13_3, stg2_0, stg2_1, \ + stg2_6, stg2_7, stp1_8_0, stp1_15, stp1_11, \ + stp1_12_0) \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i lo_2_14 = _mm_unpacklo_epi16(in[2], zero); \ + const __m128i hi_2_14 = _mm_unpackhi_epi16(in[2], zero); \ + \ + MULTIPLICATION_AND_ADD_2(lo_2_14, hi_2_14, stg3_0, stg3_1, stp2_4, stp2_7) \ + \ + stp1_9 = stp1_8_0; \ + stp1_10 = stp1_11; \ + \ + stp1_13 = stp1_12_0; \ + stp1_14 = stp1_15; \ + } \ + \ + /* Stage4 */ \ + { \ + const __m128i lo_0_8 = _mm_unpacklo_epi16(in[0], zero); \ + const __m128i hi_0_8 = _mm_unpackhi_epi16(in[0], zero); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + \ + MULTIPLICATION_AND_ADD_2(lo_0_8, hi_0_8, stg4_0, stg4_1, stp1_0, stp1_1) \ + stp2_5 = stp2_4; \ + stp2_6 = stp2_7; \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ + stg4_5, stg4_6, stg4_7, stp2_9, stp2_14, stp2_10, \ + stp2_13) \ + } \ + \ + /* Stage5 */ \ + { \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + \ + stp1_2 = stp1_1; \ + stp1_3 = stp1_0; \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_8 = _mm_add_epi16(stp1_8_0, stp1_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp1_8_0, stp1_11); \ + \ + stp1_12 = _mm_sub_epi16(stp1_15, stp1_12_0); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp1_15, stp1_12_0); \ + } \ + \ + /* Stage6 */ \ + { \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp2_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp2_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp2_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp2_7); \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11, \ + stp2_12) \ + } void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { @@ -1207,10 +1196,10 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, __m128i in[16], l[16], r[16], *curr1; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_8_0, stp1_12_0; + stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, + stp1_8_0, stp1_12_0; __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15; + stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i; @@ -1305,30 +1294,16 @@ void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest, const __m128i zero = _mm_setzero_si128(); int a, i; - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); + a = (int)dct_const_round_shift(input[0] * cospi_16_64); + a = (int)dct_const_round_shift(a * cospi_16_64); a = ROUND_POWER_OF_TWO(a, 6); dc_value = _mm_set1_epi16(a); - for (i = 0; i < 2; ++i) { - RECON_AND_STORE(dest + 0 * stride, dc_value); - RECON_AND_STORE(dest + 1 * stride, dc_value); - RECON_AND_STORE(dest + 2 * stride, dc_value); - RECON_AND_STORE(dest + 3 * stride, dc_value); - RECON_AND_STORE(dest + 4 * stride, dc_value); - RECON_AND_STORE(dest + 5 * stride, dc_value); - RECON_AND_STORE(dest + 6 * stride, dc_value); - RECON_AND_STORE(dest + 7 * stride, dc_value); - RECON_AND_STORE(dest + 8 * stride, dc_value); - RECON_AND_STORE(dest + 9 * stride, dc_value); - RECON_AND_STORE(dest + 10 * stride, dc_value); - RECON_AND_STORE(dest + 11 * stride, dc_value); - RECON_AND_STORE(dest + 12 * stride, dc_value); - RECON_AND_STORE(dest + 13 * stride, dc_value); - RECON_AND_STORE(dest + 14 * stride, dc_value); - RECON_AND_STORE(dest + 15 * stride, dc_value); - dest += 8; + for (i = 0; i < 16; ++i) { + RECON_AND_STORE(dest + 0, dc_value); + RECON_AND_STORE(dest + 8, dc_value); + dest += stride; } } @@ -1905,9 +1880,9 @@ static void idct16_8col(__m128i *in) { u[14] = _mm_srai_epi32(u[14], DCT_CONST_BITS); u[15] = _mm_srai_epi32(u[15], DCT_CONST_BITS); - s[8] = _mm_packs_epi32(u[0], u[1]); + s[8] = _mm_packs_epi32(u[0], u[1]); s[15] = _mm_packs_epi32(u[2], u[3]); - s[9] = _mm_packs_epi32(u[4], u[5]); + s[9] = _mm_packs_epi32(u[4], u[5]); s[14] = _mm_packs_epi32(u[6], u[7]); s[10] = _mm_packs_epi32(u[8], u[9]); s[13] = _mm_packs_epi32(u[10], u[11]); @@ -2035,7 +2010,7 @@ static void idct16_8col(__m128i *in) { s[7] = _mm_add_epi16(t[6], t[7]); s[8] = t[8]; s[15] = t[15]; - s[9] = _mm_packs_epi32(u[8], u[9]); + s[9] = _mm_packs_epi32(u[8], u[9]); s[14] = _mm_packs_epi32(u[10], u[11]); s[10] = _mm_packs_epi32(u[12], u[13]); s[13] = _mm_packs_epi32(u[14], u[15]); @@ -2181,11 +2156,11 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); __m128i in[16], l[16]; - __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_8_0, stp1_12_0; + __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_8, + stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, stp1_8_0, + stp1_12_0; __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14; + stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i; // First 1-D inverse DCT @@ -2217,7 +2192,7 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS); tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS); - stp2_8 = _mm_packs_epi32(tmp0, tmp2); + stp2_8 = _mm_packs_epi32(tmp0, tmp2); stp2_11 = _mm_packs_epi32(tmp5, tmp7); } @@ -2281,9 +2256,9 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, tmp2 = _mm_add_epi16(stp2_9, stp2_10); tmp3 = _mm_sub_epi16(stp2_9, stp2_10); - stp1_9 = _mm_unpacklo_epi64(tmp2, zero); + stp1_9 = _mm_unpacklo_epi64(tmp2, zero); stp1_10 = _mm_unpacklo_epi64(tmp3, zero); - stp1_8 = _mm_unpacklo_epi64(tmp0, zero); + stp1_8 = _mm_unpacklo_epi64(tmp0, zero); stp1_11 = _mm_unpacklo_epi64(tmp1, zero); stp1_13 = _mm_unpackhi_epi64(tmp3, zero); @@ -2395,650 +2370,647 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, } } -#define LOAD_DQCOEFF(reg, input) \ - { \ +#define LOAD_DQCOEFF(reg, input) \ + { \ reg = load_input_data(input); \ - input += 8; \ - } \ + input += 8; \ + } -#define IDCT32_34 \ -/* Stage1 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \ - const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \ - \ - const __m128i lo_25_7= _mm_unpacklo_epi16(zero, in[7]); \ - const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \ - \ - const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \ - const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \ - \ - const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \ - const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \ - \ - MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, \ - stg1_1, stp1_16, stp1_31); \ - MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, \ - stg1_7, stp1_19, stp1_28); \ - MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, \ - stg1_9, stp1_20, stp1_27); \ - MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, \ - stg1_15, stp1_23, stp1_24); \ -} \ -\ -/* Stage2 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \ - const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \ - \ - const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \ - const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \ - \ - MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, \ - stg2_1, stp2_8, stp2_15); \ - MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, \ - stg2_7, stp2_11, stp2_12); \ - \ - stp2_16 = stp1_16; \ - stp2_19 = stp1_19; \ - \ - stp2_20 = stp1_20; \ - stp2_23 = stp1_23; \ - \ - stp2_24 = stp1_24; \ - stp2_27 = stp1_27; \ - \ - stp2_28 = stp1_28; \ - stp2_31 = stp1_31; \ -} \ -\ -/* Stage3 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \ - const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \ - \ - const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \ - const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \ - \ - MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, \ - stg3_1, stp1_4, stp1_7); \ - \ - stp1_8 = stp2_8; \ - stp1_11 = stp2_11; \ - stp1_12 = stp2_12; \ - stp1_15 = stp2_15; \ - \ - MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ - stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \ - stp1_18, stp1_29) \ - MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ - stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \ - stp1_22, stp1_25) \ - \ - stp1_16 = stp2_16; \ - stp1_31 = stp2_31; \ - stp1_19 = stp2_19; \ - stp1_20 = stp2_20; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_27 = stp2_27; \ - stp1_28 = stp2_28; \ -} \ -\ -/* Stage4 */ \ -{ \ - const __m128i zero = _mm_setzero_si128();\ - const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \ - const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \ - \ - MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, \ - stg4_1, stp2_0, stp2_1); \ - \ - stp2_4 = stp1_4; \ - stp2_5 = stp1_4; \ - stp2_6 = stp1_7; \ - stp2_7 = stp1_7; \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ - stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \ - stp2_10, stp2_13) \ - \ - stp2_8 = stp1_8; \ - stp2_15 = stp1_15; \ - stp2_11 = stp1_11; \ - stp2_12 = stp1_12; \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ - stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ - stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ - stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ - stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ - stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ - stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ - stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ - stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ -} \ -\ -/* Stage5 */ \ -{ \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ - const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - stp1_0 = stp2_0; \ - stp1_1 = stp2_1; \ - stp1_2 = stp2_1; \ - stp1_3 = stp2_0; \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_4 = stp2_4; \ - stp1_7 = stp2_7; \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - \ - MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ - stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ - stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - \ - stp1_22 = stp2_22; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_25 = stp2_25; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} \ -\ -/* Stage6 */ \ -{ \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ - \ - stp2_8 = stp1_8; \ - stp2_9 = stp1_9; \ - stp2_14 = stp1_14; \ - stp2_15 = stp1_15; \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \ - stp2_13, stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ - stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ - stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ - stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ - \ - stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ - stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ - stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ - stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ -} \ -\ -/* Stage7 */ \ -{ \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ - const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ - stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ - stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ - stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ - stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ - stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ - stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ - stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ - stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ - stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - stp1_18 = stp2_18; \ - stp1_19 = stp2_19; \ - \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ - \ - stp1_28 = stp2_28; \ - stp1_29 = stp2_29; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} +#define IDCT32_34 \ + /* Stage1 */ \ + { \ + const __m128i zero = _mm_setzero_si128(); \ + const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \ + const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \ + \ + const __m128i lo_25_7 = _mm_unpacklo_epi16(zero, in[7]); \ + const __m128i hi_25_7 = _mm_unpackhi_epi16(zero, in[7]); \ + \ + const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], zero); \ + const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], zero); \ + \ + const __m128i lo_29_3 = _mm_unpacklo_epi16(zero, in[3]); \ + const __m128i hi_29_3 = _mm_unpackhi_epi16(zero, in[3]); \ + \ + MULTIPLICATION_AND_ADD_2(lo_1_31, hi_1_31, stg1_0, stg1_1, stp1_16, \ + stp1_31); \ + MULTIPLICATION_AND_ADD_2(lo_25_7, hi_25_7, stg1_6, stg1_7, stp1_19, \ + stp1_28); \ + MULTIPLICATION_AND_ADD_2(lo_5_27, hi_5_27, stg1_8, stg1_9, stp1_20, \ + stp1_27); \ + MULTIPLICATION_AND_ADD_2(lo_29_3, hi_29_3, stg1_14, stg1_15, stp1_23, \ + stp1_24); \ + } \ + \ + /* Stage2 */ \ + { \ + const __m128i zero = _mm_setzero_si128(); \ + const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \ + const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \ + \ + const __m128i lo_26_6 = _mm_unpacklo_epi16(zero, in[6]); \ + const __m128i hi_26_6 = _mm_unpackhi_epi16(zero, in[6]); \ + \ + MULTIPLICATION_AND_ADD_2(lo_2_30, hi_2_30, stg2_0, stg2_1, stp2_8, \ + stp2_15); \ + MULTIPLICATION_AND_ADD_2(lo_26_6, hi_26_6, stg2_6, stg2_7, stp2_11, \ + stp2_12); \ + \ + stp2_16 = stp1_16; \ + stp2_19 = stp1_19; \ + \ + stp2_20 = stp1_20; \ + stp2_23 = stp1_23; \ + \ + stp2_24 = stp1_24; \ + stp2_27 = stp1_27; \ + \ + stp2_28 = stp1_28; \ + stp2_31 = stp1_31; \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i zero = _mm_setzero_si128(); \ + const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \ + const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \ + \ + const __m128i lo_17_30 = _mm_unpacklo_epi16(stp1_16, stp1_31); \ + const __m128i hi_17_30 = _mm_unpackhi_epi16(stp1_16, stp1_31); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp1_19, stp1_28); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp1_19, stp1_28); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp1_20, stp1_27); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp1_20, stp1_27); \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp1_23, stp1_24); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp1_23, stp2_24); \ + \ + MULTIPLICATION_AND_ADD_2(lo_4_28, hi_4_28, stg3_0, stg3_1, stp1_4, \ + stp1_7); \ + \ + stp1_8 = stp2_8; \ + stp1_11 = stp2_11; \ + stp1_12 = stp2_12; \ + stp1_15 = stp2_15; \ + \ + MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ + stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18, \ + stp1_29) \ + MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ + stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \ + stp1_25) \ + \ + stp1_16 = stp2_16; \ + stp1_31 = stp2_31; \ + stp1_19 = stp2_19; \ + stp1_20 = stp2_20; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_27 = stp2_27; \ + stp1_28 = stp2_28; \ + } \ + \ + /* Stage4 */ \ + { \ + const __m128i zero = _mm_setzero_si128(); \ + const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \ + const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp2_8, stp2_15); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp2_8, stp2_15); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp2_11, stp2_12); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp2_11, stp2_12); \ + \ + MULTIPLICATION_AND_ADD_2(lo_0_16, hi_0_16, stg4_0, stg4_1, stp2_0, \ + stp2_1); \ + \ + stp2_4 = stp1_4; \ + stp2_5 = stp1_4; \ + stp2_6 = stp1_7; \ + stp2_7 = stp1_7; \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ + stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10, \ + stp2_13) \ + \ + stp2_8 = stp1_8; \ + stp2_15 = stp1_15; \ + stp2_11 = stp1_11; \ + stp2_12 = stp1_12; \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ + stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ + stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ + stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ + stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ + stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ + \ + stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ + stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ + stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ + stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ + stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ + } \ + \ + /* Stage5 */ \ + { \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ + \ + const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ + const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + stp1_0 = stp2_0; \ + stp1_1 = stp2_1; \ + stp1_2 = stp2_1; \ + stp1_3 = stp2_0; \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_4 = stp2_4; \ + stp1_7 = stp2_7; \ + \ + stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + \ + MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ + stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19, \ + stp1_28) \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ + stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21, \ + stp1_26) \ + \ + stp1_22 = stp2_22; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_25 = stp2_25; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ + } \ + \ + /* Stage6 */ \ + { \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ + \ + stp2_8 = stp1_8; \ + stp2_9 = stp1_9; \ + stp2_14 = stp1_14; \ + stp2_15 = stp1_15; \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11, \ + stp2_12) \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ + stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ + stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ + stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ + stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ + stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ + \ + stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ + stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ + stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ + stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ + stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ + } \ + \ + /* Stage7 */ \ + { \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ + const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ + const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ + \ + stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ + stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ + stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ + stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ + stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ + stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ + stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ + stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ + stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ + stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ + stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ + stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ + stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + stp1_18 = stp2_18; \ + stp1_19 = stp2_19; \ + \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21, \ + stp1_26) \ + MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23, \ + stp1_24) \ + \ + stp1_28 = stp2_28; \ + stp1_29 = stp2_29; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ + } - -#define IDCT32 \ -/* Stage1 */ \ -{ \ - const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \ - const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \ - const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \ - const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \ - \ - const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \ - const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \ - const __m128i lo_25_7= _mm_unpacklo_epi16(in[25], in[7]); \ - const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \ - \ - const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \ - const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \ - const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \ - const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \ - \ - const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \ - const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \ - const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \ - const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \ - \ - MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \ - stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, \ - stp1_17, stp1_30) \ - MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, \ - stg1_5, stg1_6, stg1_7, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8, \ - stg1_9, stg1_10, stg1_11, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12, \ - stg1_13, stg1_14, stg1_15, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ -} \ -\ -/* Stage2 */ \ -{ \ - const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \ - const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \ - const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \ - const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \ - \ - const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \ - const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \ - const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \ - const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \ - \ - MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \ - stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \ - stp2_14) \ - MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4, \ - stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, \ - stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_17); \ - stp2_17 = _mm_sub_epi16(stp1_16, stp1_17); \ - stp2_18 = _mm_sub_epi16(stp1_19, stp1_18); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_18); \ - \ - stp2_20 = _mm_add_epi16(stp1_20, stp1_21); \ - stp2_21 = _mm_sub_epi16(stp1_20, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_23, stp1_22); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_22); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_25); \ - stp2_25 = _mm_sub_epi16(stp1_24, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_27, stp1_26); \ - stp2_27 = _mm_add_epi16(stp1_27, stp1_26); \ - \ - stp2_28 = _mm_add_epi16(stp1_28, stp1_29); \ - stp2_29 = _mm_sub_epi16(stp1_28, stp1_29); \ - stp2_30 = _mm_sub_epi16(stp1_31, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_31, stp1_30); \ -} \ -\ -/* Stage3 */ \ -{ \ - const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \ - const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \ - const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \ - const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \ - \ - const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \ - const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - \ - MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0, \ - stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, \ - stp1_6) \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_9); \ - stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ - stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ - stp1_12 = _mm_add_epi16(stp2_12, stp2_13); \ - stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ - \ - MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ - stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, \ - stp1_18, stp1_29) \ - MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ - stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, \ - stp1_22, stp1_25) \ - \ - stp1_16 = stp2_16; \ - stp1_31 = stp2_31; \ - stp1_19 = stp2_19; \ - stp1_20 = stp2_20; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_27 = stp2_27; \ - stp1_28 = stp2_28; \ -} \ -\ -/* Stage4 */ \ -{ \ - const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \ - const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \ - const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \ - const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \ - \ - const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ - const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - \ - MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, \ - stg4_1, stg4_2, stg4_3, stp2_0, stp2_1, \ - stp2_2, stp2_3) \ - \ - stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ - stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ - stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ - \ - MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ - stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, \ - stp2_10, stp2_13) \ - \ - stp2_8 = stp1_8; \ - stp2_15 = stp1_15; \ - stp2_11 = stp1_11; \ - stp2_12 = stp1_12; \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ - stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ - stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ - stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ - stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ - stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ - \ - stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ - stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ - stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ - stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ - stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ -} \ -\ -/* Stage5 */ \ -{ \ - const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ - const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ - const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ - const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ - \ - const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ - const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ - stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ - stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ - \ - tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ - tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ - tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ - tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ - \ - tmp0 = _mm_add_epi32(tmp0, rounding); \ - tmp1 = _mm_add_epi32(tmp1, rounding); \ - tmp2 = _mm_add_epi32(tmp2, rounding); \ - tmp3 = _mm_add_epi32(tmp3, rounding); \ - \ - tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ - tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ - tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ - tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ - \ - stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ - stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ - \ - stp1_4 = stp2_4; \ - stp1_7 = stp2_7; \ - \ - stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ - stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ - stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ - stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ - stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - \ - MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ - stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, \ - stp1_19, stp1_28) \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ - stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - \ - stp1_22 = stp2_22; \ - stp1_23 = stp2_23; \ - stp1_24 = stp2_24; \ - stp1_25 = stp2_25; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} \ -\ -/* Stage6 */ \ -{ \ - const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ - const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ - const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ - const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ - \ - stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ - stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ - stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ - stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ - stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ - stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ - stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ - stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ - \ - stp2_8 = stp1_8; \ - stp2_9 = stp1_9; \ - stp2_14 = stp1_14; \ - stp2_15 = stp1_15; \ - \ - MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, \ - stg6_0, stg4_0, stg6_0, stg4_0, stp2_10, \ - stp2_13, stp2_11, stp2_12) \ - \ - stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ - stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ - stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ - stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ - stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ - stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ - stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ - stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ - \ - stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ - stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ - stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ - stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ - stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ - stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ - stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ - stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ -} \ -\ -/* Stage7 */ \ -{ \ - const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ - const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ - const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ - const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ - \ - const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ - const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ - const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ - const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ - \ - stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ - stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ - stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ - stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ - stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ - stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ - stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ - stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ - stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ - stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ - stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ - stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ - stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ - stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ - stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ - stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ - \ - stp1_16 = stp2_16; \ - stp1_17 = stp2_17; \ - stp1_18 = stp2_18; \ - stp1_19 = stp2_19; \ - \ - MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, \ - stp1_21, stp1_26) \ - MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ - stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, \ - stp1_23, stp1_24) \ - \ - stp1_28 = stp2_28; \ - stp1_29 = stp2_29; \ - stp1_30 = stp2_30; \ - stp1_31 = stp2_31; \ -} +#define IDCT32 \ + /* Stage1 */ \ + { \ + const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], in[31]); \ + const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], in[31]); \ + const __m128i lo_17_15 = _mm_unpacklo_epi16(in[17], in[15]); \ + const __m128i hi_17_15 = _mm_unpackhi_epi16(in[17], in[15]); \ + \ + const __m128i lo_9_23 = _mm_unpacklo_epi16(in[9], in[23]); \ + const __m128i hi_9_23 = _mm_unpackhi_epi16(in[9], in[23]); \ + const __m128i lo_25_7 = _mm_unpacklo_epi16(in[25], in[7]); \ + const __m128i hi_25_7 = _mm_unpackhi_epi16(in[25], in[7]); \ + \ + const __m128i lo_5_27 = _mm_unpacklo_epi16(in[5], in[27]); \ + const __m128i hi_5_27 = _mm_unpackhi_epi16(in[5], in[27]); \ + const __m128i lo_21_11 = _mm_unpacklo_epi16(in[21], in[11]); \ + const __m128i hi_21_11 = _mm_unpackhi_epi16(in[21], in[11]); \ + \ + const __m128i lo_13_19 = _mm_unpacklo_epi16(in[13], in[19]); \ + const __m128i hi_13_19 = _mm_unpackhi_epi16(in[13], in[19]); \ + const __m128i lo_29_3 = _mm_unpacklo_epi16(in[29], in[3]); \ + const __m128i hi_29_3 = _mm_unpackhi_epi16(in[29], in[3]); \ + \ + MULTIPLICATION_AND_ADD(lo_1_31, hi_1_31, lo_17_15, hi_17_15, stg1_0, \ + stg1_1, stg1_2, stg1_3, stp1_16, stp1_31, stp1_17, \ + stp1_30) \ + MULTIPLICATION_AND_ADD(lo_9_23, hi_9_23, lo_25_7, hi_25_7, stg1_4, stg1_5, \ + stg1_6, stg1_7, stp1_18, stp1_29, stp1_19, stp1_28) \ + MULTIPLICATION_AND_ADD(lo_5_27, hi_5_27, lo_21_11, hi_21_11, stg1_8, \ + stg1_9, stg1_10, stg1_11, stp1_20, stp1_27, \ + stp1_21, stp1_26) \ + MULTIPLICATION_AND_ADD(lo_13_19, hi_13_19, lo_29_3, hi_29_3, stg1_12, \ + stg1_13, stg1_14, stg1_15, stp1_22, stp1_25, \ + stp1_23, stp1_24) \ + } \ + \ + /* Stage2 */ \ + { \ + const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], in[30]); \ + const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], in[30]); \ + const __m128i lo_18_14 = _mm_unpacklo_epi16(in[18], in[14]); \ + const __m128i hi_18_14 = _mm_unpackhi_epi16(in[18], in[14]); \ + \ + const __m128i lo_10_22 = _mm_unpacklo_epi16(in[10], in[22]); \ + const __m128i hi_10_22 = _mm_unpackhi_epi16(in[10], in[22]); \ + const __m128i lo_26_6 = _mm_unpacklo_epi16(in[26], in[6]); \ + const __m128i hi_26_6 = _mm_unpackhi_epi16(in[26], in[6]); \ + \ + MULTIPLICATION_AND_ADD(lo_2_30, hi_2_30, lo_18_14, hi_18_14, stg2_0, \ + stg2_1, stg2_2, stg2_3, stp2_8, stp2_15, stp2_9, \ + stp2_14) \ + MULTIPLICATION_AND_ADD(lo_10_22, hi_10_22, lo_26_6, hi_26_6, stg2_4, \ + stg2_5, stg2_6, stg2_7, stp2_10, stp2_13, stp2_11, \ + stp2_12) \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_17); \ + stp2_17 = _mm_sub_epi16(stp1_16, stp1_17); \ + stp2_18 = _mm_sub_epi16(stp1_19, stp1_18); \ + stp2_19 = _mm_add_epi16(stp1_19, stp1_18); \ + \ + stp2_20 = _mm_add_epi16(stp1_20, stp1_21); \ + stp2_21 = _mm_sub_epi16(stp1_20, stp1_21); \ + stp2_22 = _mm_sub_epi16(stp1_23, stp1_22); \ + stp2_23 = _mm_add_epi16(stp1_23, stp1_22); \ + \ + stp2_24 = _mm_add_epi16(stp1_24, stp1_25); \ + stp2_25 = _mm_sub_epi16(stp1_24, stp1_25); \ + stp2_26 = _mm_sub_epi16(stp1_27, stp1_26); \ + stp2_27 = _mm_add_epi16(stp1_27, stp1_26); \ + \ + stp2_28 = _mm_add_epi16(stp1_28, stp1_29); \ + stp2_29 = _mm_sub_epi16(stp1_28, stp1_29); \ + stp2_30 = _mm_sub_epi16(stp1_31, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_31, stp1_30); \ + } \ + \ + /* Stage3 */ \ + { \ + const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], in[28]); \ + const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], in[28]); \ + const __m128i lo_20_12 = _mm_unpacklo_epi16(in[20], in[12]); \ + const __m128i hi_20_12 = _mm_unpackhi_epi16(in[20], in[12]); \ + \ + const __m128i lo_17_30 = _mm_unpacklo_epi16(stp2_17, stp2_30); \ + const __m128i hi_17_30 = _mm_unpackhi_epi16(stp2_17, stp2_30); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ + \ + MULTIPLICATION_AND_ADD(lo_4_28, hi_4_28, lo_20_12, hi_20_12, stg3_0, \ + stg3_1, stg3_2, stg3_3, stp1_4, stp1_7, stp1_5, \ + stp1_6) \ + \ + stp1_8 = _mm_add_epi16(stp2_8, stp2_9); \ + stp1_9 = _mm_sub_epi16(stp2_8, stp2_9); \ + stp1_10 = _mm_sub_epi16(stp2_11, stp2_10); \ + stp1_11 = _mm_add_epi16(stp2_11, stp2_10); \ + stp1_12 = _mm_add_epi16(stp2_12, stp2_13); \ + stp1_13 = _mm_sub_epi16(stp2_12, stp2_13); \ + stp1_14 = _mm_sub_epi16(stp2_15, stp2_14); \ + stp1_15 = _mm_add_epi16(stp2_15, stp2_14); \ + \ + MULTIPLICATION_AND_ADD(lo_17_30, hi_17_30, lo_18_29, hi_18_29, stg3_4, \ + stg3_5, stg3_6, stg3_4, stp1_17, stp1_30, stp1_18, \ + stp1_29) \ + MULTIPLICATION_AND_ADD(lo_21_26, hi_21_26, lo_22_25, hi_22_25, stg3_8, \ + stg3_9, stg3_10, stg3_8, stp1_21, stp1_26, stp1_22, \ + stp1_25) \ + \ + stp1_16 = stp2_16; \ + stp1_31 = stp2_31; \ + stp1_19 = stp2_19; \ + stp1_20 = stp2_20; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_27 = stp2_27; \ + stp1_28 = stp2_28; \ + } \ + \ + /* Stage4 */ \ + { \ + const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], in[16]); \ + const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], in[16]); \ + const __m128i lo_8_24 = _mm_unpacklo_epi16(in[8], in[24]); \ + const __m128i hi_8_24 = _mm_unpackhi_epi16(in[8], in[24]); \ + \ + const __m128i lo_9_14 = _mm_unpacklo_epi16(stp1_9, stp1_14); \ + const __m128i hi_9_14 = _mm_unpackhi_epi16(stp1_9, stp1_14); \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + \ + MULTIPLICATION_AND_ADD(lo_0_16, hi_0_16, lo_8_24, hi_8_24, stg4_0, stg4_1, \ + stg4_2, stg4_3, stp2_0, stp2_1, stp2_2, stp2_3) \ + \ + stp2_4 = _mm_add_epi16(stp1_4, stp1_5); \ + stp2_5 = _mm_sub_epi16(stp1_4, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_7, stp1_6); \ + stp2_7 = _mm_add_epi16(stp1_7, stp1_6); \ + \ + MULTIPLICATION_AND_ADD(lo_9_14, hi_9_14, lo_10_13, hi_10_13, stg4_4, \ + stg4_5, stg4_6, stg4_4, stp2_9, stp2_14, stp2_10, \ + stp2_13) \ + \ + stp2_8 = stp1_8; \ + stp2_15 = stp1_15; \ + stp2_11 = stp1_11; \ + stp2_12 = stp1_12; \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_19); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_18); \ + stp2_18 = _mm_sub_epi16(stp1_17, stp1_18); \ + stp2_19 = _mm_sub_epi16(stp1_16, stp1_19); \ + stp2_20 = _mm_sub_epi16(stp1_23, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_22, stp1_21); \ + stp2_22 = _mm_add_epi16(stp1_22, stp1_21); \ + stp2_23 = _mm_add_epi16(stp1_23, stp1_20); \ + \ + stp2_24 = _mm_add_epi16(stp1_24, stp1_27); \ + stp2_25 = _mm_add_epi16(stp1_25, stp1_26); \ + stp2_26 = _mm_sub_epi16(stp1_25, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_24, stp1_27); \ + stp2_28 = _mm_sub_epi16(stp1_31, stp1_28); \ + stp2_29 = _mm_sub_epi16(stp1_30, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_29, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_28, stp1_31); \ + } \ + \ + /* Stage5 */ \ + { \ + const __m128i lo_6_5 = _mm_unpacklo_epi16(stp2_6, stp2_5); \ + const __m128i hi_6_5 = _mm_unpackhi_epi16(stp2_6, stp2_5); \ + const __m128i lo_18_29 = _mm_unpacklo_epi16(stp2_18, stp2_29); \ + const __m128i hi_18_29 = _mm_unpackhi_epi16(stp2_18, stp2_29); \ + \ + const __m128i lo_19_28 = _mm_unpacklo_epi16(stp2_19, stp2_28); \ + const __m128i hi_19_28 = _mm_unpackhi_epi16(stp2_19, stp2_28); \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + stp1_0 = _mm_add_epi16(stp2_0, stp2_3); \ + stp1_1 = _mm_add_epi16(stp2_1, stp2_2); \ + stp1_2 = _mm_sub_epi16(stp2_1, stp2_2); \ + stp1_3 = _mm_sub_epi16(stp2_0, stp2_3); \ + \ + tmp0 = _mm_madd_epi16(lo_6_5, stg4_1); \ + tmp1 = _mm_madd_epi16(hi_6_5, stg4_1); \ + tmp2 = _mm_madd_epi16(lo_6_5, stg4_0); \ + tmp3 = _mm_madd_epi16(hi_6_5, stg4_0); \ + \ + tmp0 = _mm_add_epi32(tmp0, rounding); \ + tmp1 = _mm_add_epi32(tmp1, rounding); \ + tmp2 = _mm_add_epi32(tmp2, rounding); \ + tmp3 = _mm_add_epi32(tmp3, rounding); \ + \ + tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS); \ + tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS); \ + tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS); \ + tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS); \ + \ + stp1_5 = _mm_packs_epi32(tmp0, tmp1); \ + stp1_6 = _mm_packs_epi32(tmp2, tmp3); \ + \ + stp1_4 = stp2_4; \ + stp1_7 = stp2_7; \ + \ + stp1_8 = _mm_add_epi16(stp2_8, stp2_11); \ + stp1_9 = _mm_add_epi16(stp2_9, stp2_10); \ + stp1_10 = _mm_sub_epi16(stp2_9, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_8, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_15, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_14, stp2_13); \ + stp1_14 = _mm_add_epi16(stp2_14, stp2_13); \ + stp1_15 = _mm_add_epi16(stp2_15, stp2_12); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + \ + MULTIPLICATION_AND_ADD(lo_18_29, hi_18_29, lo_19_28, hi_19_28, stg4_4, \ + stg4_5, stg4_4, stg4_5, stp1_18, stp1_29, stp1_19, \ + stp1_28) \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg4_6, \ + stg4_4, stg4_6, stg4_4, stp1_20, stp1_27, stp1_21, \ + stp1_26) \ + \ + stp1_22 = stp2_22; \ + stp1_23 = stp2_23; \ + stp1_24 = stp2_24; \ + stp1_25 = stp2_25; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ + } \ + \ + /* Stage6 */ \ + { \ + const __m128i lo_10_13 = _mm_unpacklo_epi16(stp1_10, stp1_13); \ + const __m128i hi_10_13 = _mm_unpackhi_epi16(stp1_10, stp1_13); \ + const __m128i lo_11_12 = _mm_unpacklo_epi16(stp1_11, stp1_12); \ + const __m128i hi_11_12 = _mm_unpackhi_epi16(stp1_11, stp1_12); \ + \ + stp2_0 = _mm_add_epi16(stp1_0, stp1_7); \ + stp2_1 = _mm_add_epi16(stp1_1, stp1_6); \ + stp2_2 = _mm_add_epi16(stp1_2, stp1_5); \ + stp2_3 = _mm_add_epi16(stp1_3, stp1_4); \ + stp2_4 = _mm_sub_epi16(stp1_3, stp1_4); \ + stp2_5 = _mm_sub_epi16(stp1_2, stp1_5); \ + stp2_6 = _mm_sub_epi16(stp1_1, stp1_6); \ + stp2_7 = _mm_sub_epi16(stp1_0, stp1_7); \ + \ + stp2_8 = stp1_8; \ + stp2_9 = stp1_9; \ + stp2_14 = stp1_14; \ + stp2_15 = stp1_15; \ + \ + MULTIPLICATION_AND_ADD(lo_10_13, hi_10_13, lo_11_12, hi_11_12, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp2_10, stp2_13, stp2_11, \ + stp2_12) \ + \ + stp2_16 = _mm_add_epi16(stp1_16, stp1_23); \ + stp2_17 = _mm_add_epi16(stp1_17, stp1_22); \ + stp2_18 = _mm_add_epi16(stp1_18, stp1_21); \ + stp2_19 = _mm_add_epi16(stp1_19, stp1_20); \ + stp2_20 = _mm_sub_epi16(stp1_19, stp1_20); \ + stp2_21 = _mm_sub_epi16(stp1_18, stp1_21); \ + stp2_22 = _mm_sub_epi16(stp1_17, stp1_22); \ + stp2_23 = _mm_sub_epi16(stp1_16, stp1_23); \ + \ + stp2_24 = _mm_sub_epi16(stp1_31, stp1_24); \ + stp2_25 = _mm_sub_epi16(stp1_30, stp1_25); \ + stp2_26 = _mm_sub_epi16(stp1_29, stp1_26); \ + stp2_27 = _mm_sub_epi16(stp1_28, stp1_27); \ + stp2_28 = _mm_add_epi16(stp1_27, stp1_28); \ + stp2_29 = _mm_add_epi16(stp1_26, stp1_29); \ + stp2_30 = _mm_add_epi16(stp1_25, stp1_30); \ + stp2_31 = _mm_add_epi16(stp1_24, stp1_31); \ + } \ + \ + /* Stage7 */ \ + { \ + const __m128i lo_20_27 = _mm_unpacklo_epi16(stp2_20, stp2_27); \ + const __m128i hi_20_27 = _mm_unpackhi_epi16(stp2_20, stp2_27); \ + const __m128i lo_21_26 = _mm_unpacklo_epi16(stp2_21, stp2_26); \ + const __m128i hi_21_26 = _mm_unpackhi_epi16(stp2_21, stp2_26); \ + \ + const __m128i lo_22_25 = _mm_unpacklo_epi16(stp2_22, stp2_25); \ + const __m128i hi_22_25 = _mm_unpackhi_epi16(stp2_22, stp2_25); \ + const __m128i lo_23_24 = _mm_unpacklo_epi16(stp2_23, stp2_24); \ + const __m128i hi_23_24 = _mm_unpackhi_epi16(stp2_23, stp2_24); \ + \ + stp1_0 = _mm_add_epi16(stp2_0, stp2_15); \ + stp1_1 = _mm_add_epi16(stp2_1, stp2_14); \ + stp1_2 = _mm_add_epi16(stp2_2, stp2_13); \ + stp1_3 = _mm_add_epi16(stp2_3, stp2_12); \ + stp1_4 = _mm_add_epi16(stp2_4, stp2_11); \ + stp1_5 = _mm_add_epi16(stp2_5, stp2_10); \ + stp1_6 = _mm_add_epi16(stp2_6, stp2_9); \ + stp1_7 = _mm_add_epi16(stp2_7, stp2_8); \ + stp1_8 = _mm_sub_epi16(stp2_7, stp2_8); \ + stp1_9 = _mm_sub_epi16(stp2_6, stp2_9); \ + stp1_10 = _mm_sub_epi16(stp2_5, stp2_10); \ + stp1_11 = _mm_sub_epi16(stp2_4, stp2_11); \ + stp1_12 = _mm_sub_epi16(stp2_3, stp2_12); \ + stp1_13 = _mm_sub_epi16(stp2_2, stp2_13); \ + stp1_14 = _mm_sub_epi16(stp2_1, stp2_14); \ + stp1_15 = _mm_sub_epi16(stp2_0, stp2_15); \ + \ + stp1_16 = stp2_16; \ + stp1_17 = stp2_17; \ + stp1_18 = stp2_18; \ + stp1_19 = stp2_19; \ + \ + MULTIPLICATION_AND_ADD(lo_20_27, hi_20_27, lo_21_26, hi_21_26, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_20, stp1_27, stp1_21, \ + stp1_26) \ + MULTIPLICATION_AND_ADD(lo_22_25, hi_22_25, lo_23_24, hi_23_24, stg6_0, \ + stg4_0, stg6_0, stg4_0, stp1_22, stp1_25, stp1_23, \ + stp1_24) \ + \ + stp1_28 = stp2_28; \ + stp1_29 = stp2_29; \ + stp1_30 = stp2_30; \ + stp1_31 = stp2_31; \ + } // Only upper-left 8x8 has non-zero coeff void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); - const __m128i final_rounding = _mm_set1_epi16(1<<5); + const __m128i final_rounding = _mm_set1_epi16(1 << 5); // idct constants for each stage const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64); @@ -3074,15 +3046,13 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, __m128i in[32], col[32]; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, - stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, - stp1_30, stp1_31; + stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, + stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23, + stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31; __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, - stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, - stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, - stp2_30, stp2_31; + stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, + stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23, + stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i; @@ -3096,17 +3066,7 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, in[6] = load_input_data(input + 192); in[7] = load_input_data(input + 224); - for (i = 8; i < 32; ++i) { - in[i] = _mm_setzero_si128(); - } - array_transpose_8x8(in, in); - // TODO(hkuang): Following transposes are unnecessary. But remove them will - // lead to performance drop on some devices. - array_transpose_8x8(in + 8, in + 8); - array_transpose_8x8(in + 16, in + 16); - array_transpose_8x8(in + 24, in + 24); - IDCT32_34 // 1_D: Store 32 intermediate results for each 8x32 block. @@ -3250,15 +3210,13 @@ void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, __m128i in[32], col[128], zero_idx[16]; __m128i stp1_0, stp1_1, stp1_2, stp1_3, stp1_4, stp1_5, stp1_6, stp1_7, - stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, - stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, - stp1_23, stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, - stp1_30, stp1_31; + stp1_8, stp1_9, stp1_10, stp1_11, stp1_12, stp1_13, stp1_14, stp1_15, + stp1_16, stp1_17, stp1_18, stp1_19, stp1_20, stp1_21, stp1_22, stp1_23, + stp1_24, stp1_25, stp1_26, stp1_27, stp1_28, stp1_29, stp1_30, stp1_31; __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7, - stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, - stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, - stp2_23, stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, - stp2_30, stp2_31; + stp2_8, stp2_9, stp2_10, stp2_11, stp2_12, stp2_13, stp2_14, stp2_15, + stp2_16, stp2_17, stp2_18, stp2_19, stp2_20, stp2_21, stp2_22, stp2_23, + stp2_24, stp2_25, stp2_26, stp2_27, stp2_28, stp2_29, stp2_30, stp2_31; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int i, j, i32; @@ -3476,15 +3434,15 @@ void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest, const __m128i zero = _mm_setzero_si128(); int a, j; - a = dct_const_round_shift(input[0] * cospi_16_64); - a = dct_const_round_shift(a * cospi_16_64); + a = (int)dct_const_round_shift(input[0] * cospi_16_64); + a = (int)dct_const_round_shift(a * cospi_16_64); a = ROUND_POWER_OF_TWO(a, 6); dc_value = _mm_set1_epi16(a); for (j = 0; j < 32; ++j) { - RECON_AND_STORE(dest + 0 + j * stride, dc_value); - RECON_AND_STORE(dest + 8 + j * stride, dc_value); + RECON_AND_STORE(dest + 0 + j * stride, dc_value); + RECON_AND_STORE(dest + 8 + j * stride, dc_value); RECON_AND_STORE(dest + 16 + j * stride, dc_value); RECON_AND_STORE(dest + 24 + j * stride, dc_value); } @@ -3609,8 +3567,7 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, tran_low_t temp_in[4], temp_out[4]; // Columns for (i = 0; i < 4; ++i) { - for (j = 0; j < 4; ++j) - temp_in[j] = out[j * 4 + i]; + for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; vpx_highbd_idct4_c(temp_in, temp_out, bd); for (j = 0; j < 4; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -3699,19 +3656,18 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, __m128i d[8]; for (i = 0; i < 8; i++) { inptr[i] = _mm_add_epi16(inptr[i], sixteen); - d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); + d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i)); inptr[i] = _mm_srai_epi16(inptr[i], 5); d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd); // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]); + _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]); } } } else { // Run the un-optimised column transform tran_low_t temp_in[8], temp_out[8]; for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; vpx_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -3803,19 +3759,18 @@ void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, __m128i d[8]; for (i = 0; i < 8; i++) { inptr[i] = _mm_add_epi16(inptr[i], sixteen); - d[i] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); + d[i] = _mm_loadu_si128((const __m128i *)(dest + stride * i)); inptr[i] = _mm_srai_epi16(inptr[i], 5); d[i] = clamp_high_sse2(_mm_adds_epi16(d[i], inptr[i]), bd); // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[i]); + _mm_storeu_si128((__m128i *)(dest + stride * i), d[i]); } } } else { // Run the un-optimised column transform tran_low_t temp_in[8], temp_out[8]; for (i = 0; i < 8; ++i) { - for (j = 0; j < 8; ++j) - temp_in[j] = out[j * 8 + i]; + for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; vpx_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -3911,25 +3866,24 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, { __m128i d[2]; for (i = 0; i < 16; i++) { - inptr[i ] = _mm_add_epi16(inptr[i ], rounding); - inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding); - d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8)); - inptr[i ] = _mm_srai_epi16(inptr[i ], 6); - inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6); - d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i ]), bd); - d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd); + inptr[i] = _mm_add_epi16(inptr[i], rounding); + inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding); + d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i)); + d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8)); + inptr[i] = _mm_srai_epi16(inptr[i], 6); + inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6); + d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd); + d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd); // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]); - _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]); + _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]); + _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]); } } } else { // Run the un-optimised column transform tran_low_t temp_in[16], temp_out[16]; for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; vpx_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -4030,25 +3984,24 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, { __m128i d[2]; for (i = 0; i < 16; i++) { - inptr[i ] = _mm_add_epi16(inptr[i ], rounding); - inptr[i+16] = _mm_add_epi16(inptr[i+16], rounding); - d[0] = _mm_loadu_si128((const __m128i *)(dest + stride*i)); - d[1] = _mm_loadu_si128((const __m128i *)(dest + stride*i + 8)); - inptr[i ] = _mm_srai_epi16(inptr[i ], 6); - inptr[i+16] = _mm_srai_epi16(inptr[i+16], 6); - d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i ]), bd); - d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i+16]), bd); + inptr[i] = _mm_add_epi16(inptr[i], rounding); + inptr[i + 16] = _mm_add_epi16(inptr[i + 16], rounding); + d[0] = _mm_loadu_si128((const __m128i *)(dest + stride * i)); + d[1] = _mm_loadu_si128((const __m128i *)(dest + stride * i + 8)); + inptr[i] = _mm_srai_epi16(inptr[i], 6); + inptr[i + 16] = _mm_srai_epi16(inptr[i + 16], 6); + d[0] = clamp_high_sse2(_mm_add_epi16(d[0], inptr[i]), bd); + d[1] = clamp_high_sse2(_mm_add_epi16(d[1], inptr[i + 16]), bd); // Store - _mm_storeu_si128((__m128i *)(dest + stride*i), d[0]); - _mm_storeu_si128((__m128i *)(dest + stride*i + 8), d[1]); + _mm_storeu_si128((__m128i *)(dest + stride * i), d[0]); + _mm_storeu_si128((__m128i *)(dest + stride * i + 8), d[1]); } } } else { // Run the un-optimised column transform tran_low_t temp_in[16], temp_out[16]; for (i = 0; i < 16; ++i) { - for (j = 0; j < 16; ++j) - temp_in[j] = out[j * 16 + i]; + for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; vpx_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( @@ -4057,4 +4010,32 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, } } } + +void vpx_highbd_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest8, + int stride, int bd) { + __m128i dc_value, d; + const __m128i zero = _mm_setzero_si128(); + const __m128i one = _mm_set1_epi16(1); + const __m128i max = _mm_subs_epi16(_mm_slli_epi16(one, bd), one); + int a, i, j; + uint16_t *dest = CONVERT_TO_SHORTPTR(dest8); + tran_low_t out; + + out = highbd_dct_const_round_shift(input[0] * cospi_16_64); + out = highbd_dct_const_round_shift(out * cospi_16_64); + a = ROUND_POWER_OF_TWO(out, 6); + + d = _mm_set1_epi32(a); + dc_value = _mm_packs_epi32(d, d); + for (i = 0; i < 32; ++i) { + for (j = 0; j < 4; ++j) { + d = _mm_loadu_si128((const __m128i *)(&dest[j * 8])); + d = _mm_adds_epi16(d, dc_value); + d = _mm_max_epi16(d, zero); + d = _mm_min_epi16(d, max); + _mm_storeu_si128((__m128i *)(&dest[j * 8]), d); + } + dest += stride; + } +} #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.h b/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.h index bd520c18e5..d762a04abc 100644 --- a/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.h +++ b/libs/libvpx/vpx_dsp/x86/inv_txfm_sse2.h @@ -47,16 +47,16 @@ static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) { res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7); } -#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \ +#define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1) \ { \ const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \ const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \ \ - in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \ - in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \ + in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \ + in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \ } -static INLINE void array_transpose_4X8(__m128i *in, __m128i * out) { +static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) { const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]); const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]); const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]); @@ -95,43 +95,43 @@ static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) { static INLINE __m128i load_input_data(const tran_low_t *data) { #if CONFIG_VP9_HIGHBITDEPTH return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5], - data[6], data[7]); + data[6], data[7]); #else return _mm_load_si128((const __m128i *)data); #endif } static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) { - in[0] = load_input_data(input + 0 * 16); - in[1] = load_input_data(input + 1 * 16); - in[2] = load_input_data(input + 2 * 16); - in[3] = load_input_data(input + 3 * 16); - in[4] = load_input_data(input + 4 * 16); - in[5] = load_input_data(input + 5 * 16); - in[6] = load_input_data(input + 6 * 16); - in[7] = load_input_data(input + 7 * 16); + in[0] = load_input_data(input + 0 * 16); + in[1] = load_input_data(input + 1 * 16); + in[2] = load_input_data(input + 2 * 16); + in[3] = load_input_data(input + 3 * 16); + in[4] = load_input_data(input + 4 * 16); + in[5] = load_input_data(input + 5 * 16); + in[6] = load_input_data(input + 6 * 16); + in[7] = load_input_data(input + 7 * 16); - in[8] = load_input_data(input + 8 * 16); - in[9] = load_input_data(input + 9 * 16); - in[10] = load_input_data(input + 10 * 16); - in[11] = load_input_data(input + 11 * 16); - in[12] = load_input_data(input + 12 * 16); - in[13] = load_input_data(input + 13 * 16); - in[14] = load_input_data(input + 14 * 16); - in[15] = load_input_data(input + 15 * 16); + in[8] = load_input_data(input + 8 * 16); + in[9] = load_input_data(input + 9 * 16); + in[10] = load_input_data(input + 10 * 16); + in[11] = load_input_data(input + 11 * 16); + in[12] = load_input_data(input + 12 * 16); + in[13] = load_input_data(input + 13 * 16); + in[14] = load_input_data(input + 14 * 16); + in[15] = load_input_data(input + 15 * 16); } -#define RECON_AND_STORE(dest, in_x) \ - { \ - __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \ - d0 = _mm_unpacklo_epi8(d0, zero); \ - d0 = _mm_add_epi16(in_x, d0); \ - d0 = _mm_packus_epi16(d0, d0); \ - _mm_storel_epi64((__m128i *)(dest), d0); \ +#define RECON_AND_STORE(dest, in_x) \ + { \ + __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \ + d0 = _mm_unpacklo_epi8(d0, zero); \ + d0 = _mm_add_epi16(in_x, d0); \ + d0 = _mm_packus_epi16(d0, d0); \ + _mm_storel_epi64((__m128i *)(dest), d0); \ } static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) { - const __m128i final_rounding = _mm_set1_epi16(1<<5); + const __m128i final_rounding = _mm_set1_epi16(1 << 5); const __m128i zero = _mm_setzero_si128(); // Final rounding and shift in[0] = _mm_adds_epi16(in[0], final_rounding); @@ -168,16 +168,16 @@ static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) { in[14] = _mm_srai_epi16(in[14], 6); in[15] = _mm_srai_epi16(in[15], 6); - RECON_AND_STORE(dest + 0 * stride, in[0]); - RECON_AND_STORE(dest + 1 * stride, in[1]); - RECON_AND_STORE(dest + 2 * stride, in[2]); - RECON_AND_STORE(dest + 3 * stride, in[3]); - RECON_AND_STORE(dest + 4 * stride, in[4]); - RECON_AND_STORE(dest + 5 * stride, in[5]); - RECON_AND_STORE(dest + 6 * stride, in[6]); - RECON_AND_STORE(dest + 7 * stride, in[7]); - RECON_AND_STORE(dest + 8 * stride, in[8]); - RECON_AND_STORE(dest + 9 * stride, in[9]); + RECON_AND_STORE(dest + 0 * stride, in[0]); + RECON_AND_STORE(dest + 1 * stride, in[1]); + RECON_AND_STORE(dest + 2 * stride, in[2]); + RECON_AND_STORE(dest + 3 * stride, in[3]); + RECON_AND_STORE(dest + 4 * stride, in[4]); + RECON_AND_STORE(dest + 5 * stride, in[5]); + RECON_AND_STORE(dest + 6 * stride, in[6]); + RECON_AND_STORE(dest + 7 * stride, in[7]); + RECON_AND_STORE(dest + 8 * stride, in[8]); + RECON_AND_STORE(dest + 9 * stride, in[9]); RECON_AND_STORE(dest + 10 * stride, in[10]); RECON_AND_STORE(dest + 11 * stride, in[11]); RECON_AND_STORE(dest + 12 * stride, in[12]); diff --git a/libs/libvpx/vpx_dsp/x86/loopfilter_avx2.c b/libs/libvpx/vpx_dsp/x86/loopfilter_avx2.c index 23a97dd05f..85923b4478 100644 --- a/libs/libvpx/vpx_dsp/x86/loopfilter_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/loopfilter_avx2.c @@ -8,979 +8,906 @@ * be found in the AUTHORS file in the root of the source tree. */ -#include /* AVX2 */ +#include /* AVX2 */ #include "./vpx_dsp_rtcd.h" #include "vpx_ports/mem.h" -static void mb_lpf_horizontal_edge_w_avx2_8(unsigned char *s, int p, - const unsigned char *_blimit, const unsigned char *_limit, - const unsigned char *_thresh) { - __m128i mask, hev, flat, flat2; - const __m128i zero = _mm_set1_epi16(0); - const __m128i one = _mm_set1_epi8(1); - __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1; - __m128i abs_p1p0; +void vpx_lpf_horizontal_edge_8_avx2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh) { + __m128i mask, hev, flat, flat2; + const __m128i zero = _mm_set1_epi16(0); + const __m128i one = _mm_set1_epi8(1); + __m128i q7p7, q6p6, q5p5, q4p4, q3p3, q2p2, q1p1, q0p0, p0q0, p1q1; + __m128i abs_p1p0; - const __m128i thresh = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _thresh[0])); - const __m128i limit = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _limit[0])); - const __m128i blimit = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _blimit[0])); + const __m128i thresh = + _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0])); + const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0])); + const __m128i blimit = + _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0])); - q4p4 = _mm_loadl_epi64((__m128i *) (s - 5 * p)); - q4p4 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *) (s + 4 * p))); - q3p3 = _mm_loadl_epi64((__m128i *) (s - 4 * p)); - q3p3 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *) (s + 3 * p))); - q2p2 = _mm_loadl_epi64((__m128i *) (s - 3 * p)); - q2p2 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *) (s + 2 * p))); - q1p1 = _mm_loadl_epi64((__m128i *) (s - 2 * p)); - q1p1 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *) (s + 1 * p))); - p1q1 = _mm_shuffle_epi32(q1p1, 78); - q0p0 = _mm_loadl_epi64((__m128i *) (s - 1 * p)); - q0p0 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *) (s - 0 * p))); - p0q0 = _mm_shuffle_epi32(q0p0, 78); + q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p)); + q4p4 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p))); + q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p)); + q3p3 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p))); + q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p)); + q2p2 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p))); + q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p)); + q1p1 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p))); + p1q1 = _mm_shuffle_epi32(q1p1, 78); + q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p)); + q0p0 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p))); + p0q0 = _mm_shuffle_epi32(q0p0, 78); + + { + __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work; + abs_p1p0 = + _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), _mm_subs_epu8(q0p0, q1p1)); + abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); + fe = _mm_set1_epi8(0xfe); + ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); + abs_p0q0 = + _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), _mm_subs_epu8(p0q0, q0p0)); + abs_p1q1 = + _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), _mm_subs_epu8(p1q1, q1p1)); + flat = _mm_max_epu8(abs_p1p0, abs_q1q0); + hev = _mm_subs_epu8(flat, thresh); + hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); + mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); + mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); + // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; + mask = _mm_max_epu8(abs_p1p0, mask); + // mask |= (abs(p1 - p0) > limit) * -1; + // mask |= (abs(q1 - q0) > limit) * -1; + + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), _mm_subs_epu8(q1p1, q2p2)), + _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), _mm_subs_epu8(q2p2, q3p3))); + mask = _mm_max_epu8(work, mask); + mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8)); + mask = _mm_subs_epu8(mask, limit); + mask = _mm_cmpeq_epi8(mask, zero); + } + + // lp filter + { + const __m128i t4 = _mm_set1_epi8(4); + const __m128i t3 = _mm_set1_epi8(3); + const __m128i t80 = _mm_set1_epi8(0x80); + const __m128i t1 = _mm_set1_epi16(0x1); + __m128i qs1ps1 = _mm_xor_si128(q1p1, t80); + __m128i qs0ps0 = _mm_xor_si128(q0p0, t80); + __m128i qs0 = _mm_xor_si128(p0q0, t80); + __m128i qs1 = _mm_xor_si128(p1q1, t80); + __m128i filt; + __m128i work_a; + __m128i filter1, filter2; + __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2; + __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0; + + filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); + work_a = _mm_subs_epi8(qs0, qs0ps0); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ + filt = _mm_and_si128(filt, mask); + + filter1 = _mm_adds_epi8(filt, t4); + filter2 = _mm_adds_epi8(filt, t3); + + filter1 = _mm_unpacklo_epi8(zero, filter1); + filter1 = _mm_srai_epi16(filter1, 0xB); + filter2 = _mm_unpacklo_epi8(zero, filter2); + filter2 = _mm_srai_epi16(filter2, 0xB); + + /* Filter1 >> 3 */ + filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1)); + qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80); + + /* filt >> 1 */ + filt = _mm_adds_epi16(filter1, t1); + filt = _mm_srai_epi16(filt, 1); + filt = _mm_andnot_si128(_mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8), + filt); + filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt)); + qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80); + // loopfilter done { - __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work; - abs_p1p0 = _mm_or_si128(_mm_subs_epu8(q1p1, q0p0), - _mm_subs_epu8(q0p0, q1p1)); - abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); - fe = _mm_set1_epi8(0xfe); - ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); - abs_p0q0 = _mm_or_si128(_mm_subs_epu8(q0p0, p0q0), - _mm_subs_epu8(p0q0, q0p0)); - abs_p1q1 = _mm_or_si128(_mm_subs_epu8(q1p1, p1q1), - _mm_subs_epu8(p1q1, q1p1)); - flat = _mm_max_epu8(abs_p1p0, abs_q1q0); - hev = _mm_subs_epu8(flat, thresh); - hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + __m128i work; + flat = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), _mm_subs_epu8(q0p0, q2p2)), + _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), _mm_subs_epu8(q0p0, q3p3))); + flat = _mm_max_epu8(abs_p1p0, flat); + flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); + flat = _mm_subs_epu8(flat, one); + flat = _mm_cmpeq_epi8(flat, zero); + flat = _mm_and_si128(flat, mask); - abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); - abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); - mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); - mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); - // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; - mask = _mm_max_epu8(abs_p1p0, mask); - // mask |= (abs(p1 - p0) > limit) * -1; - // mask |= (abs(q1 - q0) > limit) * -1; + q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p)); + q5p5 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p))); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(q2p2, q1p1), - _mm_subs_epu8(q1p1, q2p2)), - _mm_or_si128(_mm_subs_epu8(q3p3, q2p2), - _mm_subs_epu8(q2p2, q3p3))); - mask = _mm_max_epu8(work, mask); - mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8)); - mask = _mm_subs_epu8(mask, limit); - mask = _mm_cmpeq_epi8(mask, zero); + q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p)); + q6p6 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p))); + + flat2 = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), _mm_subs_epu8(q0p0, q4p4)), + _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), _mm_subs_epu8(q0p0, q5p5))); + + q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p)); + q7p7 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p))); + + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), _mm_subs_epu8(q0p0, q6p6)), + _mm_or_si128(_mm_subs_epu8(q7p7, q0p0), _mm_subs_epu8(q0p0, q7p7))); + + flat2 = _mm_max_epu8(work, flat2); + flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8)); + flat2 = _mm_subs_epu8(flat2, one); + flat2 = _mm_cmpeq_epi8(flat2, zero); + flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask } - // lp filter + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // flat and wide flat calculations { - const __m128i t4 = _mm_set1_epi8(4); - const __m128i t3 = _mm_set1_epi8(3); - const __m128i t80 = _mm_set1_epi8(0x80); - const __m128i t1 = _mm_set1_epi16(0x1); - __m128i qs1ps1 = _mm_xor_si128(q1p1, t80); - __m128i qs0ps0 = _mm_xor_si128(q0p0, t80); - __m128i qs0 = _mm_xor_si128(p0q0, t80); - __m128i qs1 = _mm_xor_si128(p1q1, t80); - __m128i filt; - __m128i work_a; - __m128i filter1, filter2; - __m128i flat2_q6p6, flat2_q5p5, flat2_q4p4, flat2_q3p3, flat2_q2p2; - __m128i flat2_q1p1, flat2_q0p0, flat_q2p2, flat_q1p1, flat_q0p0; + const __m128i eight = _mm_set1_epi16(8); + const __m128i four = _mm_set1_epi16(4); + __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16; + __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16; + __m128i pixelFilter_p, pixelFilter_q; + __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0; + __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q; - filt = _mm_and_si128(_mm_subs_epi8(qs1ps1, qs1), hev); - work_a = _mm_subs_epi8(qs0, qs0ps0); - filt = _mm_adds_epi8(filt, work_a); - filt = _mm_adds_epi8(filt, work_a); - filt = _mm_adds_epi8(filt, work_a); - /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ - filt = _mm_and_si128(filt, mask); + p7_16 = _mm_unpacklo_epi8(q7p7, zero); + p6_16 = _mm_unpacklo_epi8(q6p6, zero); + p5_16 = _mm_unpacklo_epi8(q5p5, zero); + p4_16 = _mm_unpacklo_epi8(q4p4, zero); + p3_16 = _mm_unpacklo_epi8(q3p3, zero); + p2_16 = _mm_unpacklo_epi8(q2p2, zero); + p1_16 = _mm_unpacklo_epi8(q1p1, zero); + p0_16 = _mm_unpacklo_epi8(q0p0, zero); + q0_16 = _mm_unpackhi_epi8(q0p0, zero); + q1_16 = _mm_unpackhi_epi8(q1p1, zero); + q2_16 = _mm_unpackhi_epi8(q2p2, zero); + q3_16 = _mm_unpackhi_epi8(q3p3, zero); + q4_16 = _mm_unpackhi_epi8(q4p4, zero); + q5_16 = _mm_unpackhi_epi8(q5p5, zero); + q6_16 = _mm_unpackhi_epi8(q6p6, zero); + q7_16 = _mm_unpackhi_epi8(q7p7, zero); - filter1 = _mm_adds_epi8(filt, t4); - filter2 = _mm_adds_epi8(filt, t3); + pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16), + _mm_add_epi16(p4_16, p3_16)); + pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16), + _mm_add_epi16(q4_16, q3_16)); - filter1 = _mm_unpacklo_epi8(zero, filter1); - filter1 = _mm_srai_epi16(filter1, 0xB); - filter2 = _mm_unpacklo_epi8(zero, filter2); - filter2 = _mm_srai_epi16(filter2, 0xB); + pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16)); + pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); - /* Filter1 >> 3 */ - filt = _mm_packs_epi16(filter2, _mm_subs_epi16(zero, filter1)); - qs0ps0 = _mm_xor_si128(_mm_adds_epi8(qs0ps0, filt), t80); + pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16)); + pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); + pixelFilter_p = + _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q)); + pixetFilter_p2p1p0 = _mm_add_epi16( + four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), 4); + flat2_q0p0 = _mm_packus_epi16(res_p, res_q); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3_16, p0_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3_16, q0_16)), 3); - /* filt >> 1 */ - filt = _mm_adds_epi16(filter1, t1); - filt = _mm_srai_epi16(filt, 1); - filt = _mm_andnot_si128( - _mm_srai_epi16(_mm_unpacklo_epi8(zero, hev), 0x8), filt); - filt = _mm_packs_epi16(filt, _mm_subs_epi16(zero, filt)); - qs1ps1 = _mm_xor_si128(_mm_adds_epi8(qs1ps1, filt), t80); - // loopfilter done + flat_q0p0 = _mm_packus_epi16(res_p, res_q); - { - __m128i work; - flat = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(q2p2, q0p0), - _mm_subs_epu8(q0p0, q2p2)), - _mm_or_si128(_mm_subs_epu8(q3p3, q0p0), - _mm_subs_epu8(q0p0, q3p3))); - flat = _mm_max_epu8(abs_p1p0, flat); - flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); - flat = _mm_subs_epu8(flat, one); - flat = _mm_cmpeq_epi8(flat, zero); - flat = _mm_and_si128(flat, mask); + sum_p7 = _mm_add_epi16(p7_16, p7_16); + sum_q7 = _mm_add_epi16(q7_16, q7_16); + sum_p3 = _mm_add_epi16(p3_16, p3_16); + sum_q3 = _mm_add_epi16(q3_16, q3_16); - q5p5 = _mm_loadl_epi64((__m128i *) (s - 6 * p)); - q5p5 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q5p5), - (__m64 *) (s + 5 * p))); + pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), 4); + flat2_q1p1 = _mm_packus_epi16(res_p, res_q); - q6p6 = _mm_loadl_epi64((__m128i *) (s - 7 * p)); - q6p6 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q6p6), - (__m64 *) (s + 6 * p))); + pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16); + pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1_16)), 3); + flat_q1p1 = _mm_packus_epi16(res_p, res_q); - flat2 = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(q4p4, q0p0), - _mm_subs_epu8(q0p0, q4p4)), - _mm_or_si128(_mm_subs_epu8(q5p5, q0p0), - _mm_subs_epu8(q0p0, q5p5))); + sum_p7 = _mm_add_epi16(sum_p7, p7_16); + sum_q7 = _mm_add_epi16(sum_q7, q7_16); + sum_p3 = _mm_add_epi16(sum_p3, p3_16); + sum_q3 = _mm_add_epi16(sum_q3, q3_16); - q7p7 = _mm_loadl_epi64((__m128i *) (s - 8 * p)); - q7p7 = _mm_castps_si128( - _mm_loadh_pi(_mm_castsi128_ps(q7p7), - (__m64 *) (s + 7 * p))); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16); + pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), 4); + flat2_q2p2 = _mm_packus_epi16(res_p, res_q); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(q6p6, q0p0), - _mm_subs_epu8(q0p0, q6p6)), - _mm_or_si128(_mm_subs_epu8(q7p7, q0p0), - _mm_subs_epu8(q0p0, q7p7))); + pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16); + pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16); - flat2 = _mm_max_epu8(work, flat2); - flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8)); - flat2 = _mm_subs_epu8(flat2, one); - flat2 = _mm_cmpeq_epi8(flat2, zero); - flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask - } + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2_16)), 3); + flat_q2p2 = _mm_packus_epi16(res_p, res_q); - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // flat and wide flat calculations - { - const __m128i eight = _mm_set1_epi16(8); - const __m128i four = _mm_set1_epi16(4); - __m128i p7_16, p6_16, p5_16, p4_16, p3_16, p2_16, p1_16, p0_16; - __m128i q7_16, q6_16, q5_16, q4_16, q3_16, q2_16, q1_16, q0_16; - __m128i pixelFilter_p, pixelFilter_q; - __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0; - __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q; + sum_p7 = _mm_add_epi16(sum_p7, p7_16); + sum_q7 = _mm_add_epi16(sum_q7, q7_16); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16); + pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), 4); + flat2_q3p3 = _mm_packus_epi16(res_p, res_q); - p7_16 = _mm_unpacklo_epi8(q7p7, zero); - p6_16 = _mm_unpacklo_epi8(q6p6, zero); - p5_16 = _mm_unpacklo_epi8(q5p5, zero); - p4_16 = _mm_unpacklo_epi8(q4p4, zero); - p3_16 = _mm_unpacklo_epi8(q3p3, zero); - p2_16 = _mm_unpacklo_epi8(q2p2, zero); - p1_16 = _mm_unpacklo_epi8(q1p1, zero); - p0_16 = _mm_unpacklo_epi8(q0p0, zero); - q0_16 = _mm_unpackhi_epi8(q0p0, zero); - q1_16 = _mm_unpackhi_epi8(q1p1, zero); - q2_16 = _mm_unpackhi_epi8(q2p2, zero); - q3_16 = _mm_unpackhi_epi8(q3p3, zero); - q4_16 = _mm_unpackhi_epi8(q4p4, zero); - q5_16 = _mm_unpackhi_epi8(q5p5, zero); - q6_16 = _mm_unpackhi_epi8(q6p6, zero); - q7_16 = _mm_unpackhi_epi8(q7p7, zero); + sum_p7 = _mm_add_epi16(sum_p7, p7_16); + sum_q7 = _mm_add_epi16(sum_q7, q7_16); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16); + pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), 4); + flat2_q4p4 = _mm_packus_epi16(res_p, res_q); - pixelFilter_p = _mm_add_epi16(_mm_add_epi16(p6_16, p5_16), - _mm_add_epi16(p4_16, p3_16)); - pixelFilter_q = _mm_add_epi16(_mm_add_epi16(q6_16, q5_16), - _mm_add_epi16(q4_16, q3_16)); + sum_p7 = _mm_add_epi16(sum_p7, p7_16); + sum_q7 = _mm_add_epi16(sum_q7, q7_16); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16); + pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), 4); + flat2_q5p5 = _mm_packus_epi16(res_p, res_q); - pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, - _mm_add_epi16(p2_16, p1_16)); - pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); - - pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, - _mm_add_epi16(q2_16, q1_16)); - pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); - pixelFilter_p = _mm_add_epi16(eight, - _mm_add_epi16(pixelFilter_p, pixelFilter_q)); - pixetFilter_p2p1p0 = _mm_add_epi16(four, - _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), - 4); - flat2_q0p0 = _mm_packus_epi16(res_p, res_q); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(p3_16, p0_16)), 3); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(q3_16, q0_16)), 3); - - flat_q0p0 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(p7_16, p7_16); - sum_q7 = _mm_add_epi16(q7_16, q7_16); - sum_p3 = _mm_add_epi16(p3_16, p3_16); - sum_q3 = _mm_add_epi16(q3_16, q3_16); - - pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16); - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), - 4); - flat2_q1p1 = _mm_packus_epi16(res_p, res_q); - - pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16); - pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p1_16)), 3); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q1_16)), 3); - flat_q1p1 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(sum_p7, p7_16); - sum_q7 = _mm_add_epi16(sum_q7, q7_16); - sum_p3 = _mm_add_epi16(sum_p3, p3_16); - sum_q3 = _mm_add_epi16(sum_q3, q3_16); - - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16); - pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), - 4); - flat2_q2p2 = _mm_packus_epi16(res_p, res_q); - - pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16); - pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16); - - res_p = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p2_16)), 3); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q2_16)), 3); - flat_q2p2 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(sum_p7, p7_16); - sum_q7 = _mm_add_epi16(sum_q7, q7_16); - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16); - pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), - 4); - flat2_q3p3 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(sum_p7, p7_16); - sum_q7 = _mm_add_epi16(sum_q7, q7_16); - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16); - pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), - 4); - flat2_q4p4 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(sum_p7, p7_16); - sum_q7 = _mm_add_epi16(sum_q7, q7_16); - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16); - pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), - 4); - flat2_q5p5 = _mm_packus_epi16(res_p, res_q); - - sum_p7 = _mm_add_epi16(sum_p7, p7_16); - sum_q7 = _mm_add_epi16(sum_q7, q7_16); - pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16); - pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16); - res_p = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), - 4); - res_q = _mm_srli_epi16( - _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), - 4); - flat2_q6p6 = _mm_packus_epi16(res_p, res_q); - } - // wide flat - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - flat = _mm_shuffle_epi32(flat, 68); - flat2 = _mm_shuffle_epi32(flat2, 68); - - q2p2 = _mm_andnot_si128(flat, q2p2); - flat_q2p2 = _mm_and_si128(flat, flat_q2p2); - q2p2 = _mm_or_si128(q2p2, flat_q2p2); - - qs1ps1 = _mm_andnot_si128(flat, qs1ps1); - flat_q1p1 = _mm_and_si128(flat, flat_q1p1); - q1p1 = _mm_or_si128(qs1ps1, flat_q1p1); - - qs0ps0 = _mm_andnot_si128(flat, qs0ps0); - flat_q0p0 = _mm_and_si128(flat, flat_q0p0); - q0p0 = _mm_or_si128(qs0ps0, flat_q0p0); - - q6p6 = _mm_andnot_si128(flat2, q6p6); - flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); - q6p6 = _mm_or_si128(q6p6, flat2_q6p6); - _mm_storel_epi64((__m128i *) (s - 7 * p), q6p6); - _mm_storeh_pi((__m64 *) (s + 6 * p), _mm_castsi128_ps(q6p6)); - - q5p5 = _mm_andnot_si128(flat2, q5p5); - flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); - q5p5 = _mm_or_si128(q5p5, flat2_q5p5); - _mm_storel_epi64((__m128i *) (s - 6 * p), q5p5); - _mm_storeh_pi((__m64 *) (s + 5 * p), _mm_castsi128_ps(q5p5)); - - q4p4 = _mm_andnot_si128(flat2, q4p4); - flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4); - q4p4 = _mm_or_si128(q4p4, flat2_q4p4); - _mm_storel_epi64((__m128i *) (s - 5 * p), q4p4); - _mm_storeh_pi((__m64 *) (s + 4 * p), _mm_castsi128_ps(q4p4)); - - q3p3 = _mm_andnot_si128(flat2, q3p3); - flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3); - q3p3 = _mm_or_si128(q3p3, flat2_q3p3); - _mm_storel_epi64((__m128i *) (s - 4 * p), q3p3); - _mm_storeh_pi((__m64 *) (s + 3 * p), _mm_castsi128_ps(q3p3)); - - q2p2 = _mm_andnot_si128(flat2, q2p2); - flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2); - q2p2 = _mm_or_si128(q2p2, flat2_q2p2); - _mm_storel_epi64((__m128i *) (s - 3 * p), q2p2); - _mm_storeh_pi((__m64 *) (s + 2 * p), _mm_castsi128_ps(q2p2)); - - q1p1 = _mm_andnot_si128(flat2, q1p1); - flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1); - q1p1 = _mm_or_si128(q1p1, flat2_q1p1); - _mm_storel_epi64((__m128i *) (s - 2 * p), q1p1); - _mm_storeh_pi((__m64 *) (s + 1 * p), _mm_castsi128_ps(q1p1)); - - q0p0 = _mm_andnot_si128(flat2, q0p0); - flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0); - q0p0 = _mm_or_si128(q0p0, flat2_q0p0); - _mm_storel_epi64((__m128i *) (s - 1 * p), q0p0); - _mm_storeh_pi((__m64 *) (s - 0 * p), _mm_castsi128_ps(q0p0)); + sum_p7 = _mm_add_epi16(sum_p7, p7_16); + sum_q7 = _mm_add_epi16(sum_q7, q7_16); + pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16); + pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), 4); + flat2_q6p6 = _mm_packus_epi16(res_p, res_q); } + // wide flat + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + flat = _mm_shuffle_epi32(flat, 68); + flat2 = _mm_shuffle_epi32(flat2, 68); + + q2p2 = _mm_andnot_si128(flat, q2p2); + flat_q2p2 = _mm_and_si128(flat, flat_q2p2); + q2p2 = _mm_or_si128(q2p2, flat_q2p2); + + qs1ps1 = _mm_andnot_si128(flat, qs1ps1); + flat_q1p1 = _mm_and_si128(flat, flat_q1p1); + q1p1 = _mm_or_si128(qs1ps1, flat_q1p1); + + qs0ps0 = _mm_andnot_si128(flat, qs0ps0); + flat_q0p0 = _mm_and_si128(flat, flat_q0p0); + q0p0 = _mm_or_si128(qs0ps0, flat_q0p0); + + q6p6 = _mm_andnot_si128(flat2, q6p6); + flat2_q6p6 = _mm_and_si128(flat2, flat2_q6p6); + q6p6 = _mm_or_si128(q6p6, flat2_q6p6); + _mm_storel_epi64((__m128i *)(s - 7 * p), q6p6); + _mm_storeh_pi((__m64 *)(s + 6 * p), _mm_castsi128_ps(q6p6)); + + q5p5 = _mm_andnot_si128(flat2, q5p5); + flat2_q5p5 = _mm_and_si128(flat2, flat2_q5p5); + q5p5 = _mm_or_si128(q5p5, flat2_q5p5); + _mm_storel_epi64((__m128i *)(s - 6 * p), q5p5); + _mm_storeh_pi((__m64 *)(s + 5 * p), _mm_castsi128_ps(q5p5)); + + q4p4 = _mm_andnot_si128(flat2, q4p4); + flat2_q4p4 = _mm_and_si128(flat2, flat2_q4p4); + q4p4 = _mm_or_si128(q4p4, flat2_q4p4); + _mm_storel_epi64((__m128i *)(s - 5 * p), q4p4); + _mm_storeh_pi((__m64 *)(s + 4 * p), _mm_castsi128_ps(q4p4)); + + q3p3 = _mm_andnot_si128(flat2, q3p3); + flat2_q3p3 = _mm_and_si128(flat2, flat2_q3p3); + q3p3 = _mm_or_si128(q3p3, flat2_q3p3); + _mm_storel_epi64((__m128i *)(s - 4 * p), q3p3); + _mm_storeh_pi((__m64 *)(s + 3 * p), _mm_castsi128_ps(q3p3)); + + q2p2 = _mm_andnot_si128(flat2, q2p2); + flat2_q2p2 = _mm_and_si128(flat2, flat2_q2p2); + q2p2 = _mm_or_si128(q2p2, flat2_q2p2); + _mm_storel_epi64((__m128i *)(s - 3 * p), q2p2); + _mm_storeh_pi((__m64 *)(s + 2 * p), _mm_castsi128_ps(q2p2)); + + q1p1 = _mm_andnot_si128(flat2, q1p1); + flat2_q1p1 = _mm_and_si128(flat2, flat2_q1p1); + q1p1 = _mm_or_si128(q1p1, flat2_q1p1); + _mm_storel_epi64((__m128i *)(s - 2 * p), q1p1); + _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(q1p1)); + + q0p0 = _mm_andnot_si128(flat2, q0p0); + flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0); + q0p0 = _mm_or_si128(q0p0, flat2_q0p0); + _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0); + _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0)); + } } DECLARE_ALIGNED(32, static const uint8_t, filt_loopfilter_avx2[32]) = { - 0, 128, 1, 128, 2, 128, 3, 128, 4, 128, 5, 128, 6, 128, 7, 128, + 0, 128, 1, 128, 2, 128, 3, 128, 4, 128, 5, 128, 6, 128, 7, 128, 8, 128, 9, 128, 10, 128, 11, 128, 12, 128, 13, 128, 14, 128, 15, 128 }; -static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p, - const unsigned char *_blimit, const unsigned char *_limit, - const unsigned char *_thresh) { - __m128i mask, hev, flat, flat2; - const __m128i zero = _mm_set1_epi16(0); - const __m128i one = _mm_set1_epi8(1); - __m128i p7, p6, p5; - __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; - __m128i q5, q6, q7; - __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, - q256_4, p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, - p256_0, q256_0; +void vpx_lpf_horizontal_edge_16_avx2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh) { + __m128i mask, hev, flat, flat2; + const __m128i zero = _mm_set1_epi16(0); + const __m128i one = _mm_set1_epi8(1); + __m128i p7, p6, p5; + __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; + __m128i q5, q6, q7; + __m256i p256_7, q256_7, p256_6, q256_6, p256_5, q256_5, p256_4, q256_4, + p256_3, q256_3, p256_2, q256_2, p256_1, q256_1, p256_0, q256_0; - const __m128i thresh = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _thresh[0])); - const __m128i limit = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _limit[0])); - const __m128i blimit = _mm_broadcastb_epi8( - _mm_cvtsi32_si128((int) _blimit[0])); + const __m128i thresh = + _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_thresh[0])); + const __m128i limit = _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_limit[0])); + const __m128i blimit = + _mm_broadcastb_epi8(_mm_cvtsi32_si128((int)_blimit[0])); - p256_4 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 5 * p))); - p256_3 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 4 * p))); - p256_2 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 3 * p))); - p256_1 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 2 * p))); - p256_0 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 1 * p))); - q256_0 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 0 * p))); - q256_1 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 1 * p))); - q256_2 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 2 * p))); - q256_3 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 3 * p))); - q256_4 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 4 * p))); + p256_4 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 5 * p))); + p256_3 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 4 * p))); + p256_2 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 3 * p))); + p256_1 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 2 * p))); + p256_0 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 1 * p))); + q256_0 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s - 0 * p))); + q256_1 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 1 * p))); + q256_2 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 2 * p))); + q256_3 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 3 * p))); + q256_4 = + _mm256_castpd_si256(_mm256_broadcast_pd((__m128d const *)(s + 4 * p))); - p4 = _mm256_castsi256_si128(p256_4); - p3 = _mm256_castsi256_si128(p256_3); - p2 = _mm256_castsi256_si128(p256_2); - p1 = _mm256_castsi256_si128(p256_1); - p0 = _mm256_castsi256_si128(p256_0); - q0 = _mm256_castsi256_si128(q256_0); - q1 = _mm256_castsi256_si128(q256_1); - q2 = _mm256_castsi256_si128(q256_2); - q3 = _mm256_castsi256_si128(q256_3); - q4 = _mm256_castsi256_si128(q256_4); + p4 = _mm256_castsi256_si128(p256_4); + p3 = _mm256_castsi256_si128(p256_3); + p2 = _mm256_castsi256_si128(p256_2); + p1 = _mm256_castsi256_si128(p256_1); + p0 = _mm256_castsi256_si128(p256_0); + q0 = _mm256_castsi256_si128(q256_0); + q1 = _mm256_castsi256_si128(q256_1); + q2 = _mm256_castsi256_si128(q256_2); + q3 = _mm256_castsi256_si128(q256_3); + q4 = _mm256_castsi256_si128(q256_4); + + { + const __m128i abs_p1p0 = + _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1)); + const __m128i abs_q1q0 = + _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1)); + const __m128i fe = _mm_set1_epi8(0xfe); + const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); + __m128i abs_p0q0 = + _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0)); + __m128i abs_p1q1 = + _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1)); + __m128i work; + flat = _mm_max_epu8(abs_p1p0, abs_q1q0); + hev = _mm_subs_epu8(flat, thresh); + hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); + mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); + mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); + // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; + mask = _mm_max_epu8(flat, mask); + // mask |= (abs(p1 - p0) > limit) * -1; + // mask |= (abs(q1 - q0) > limit) * -1; + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)), + _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3))); + mask = _mm_max_epu8(work, mask); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)), + _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3))); + mask = _mm_max_epu8(work, mask); + mask = _mm_subs_epu8(mask, limit); + mask = _mm_cmpeq_epi8(mask, zero); + } + + // lp filter + { + const __m128i t4 = _mm_set1_epi8(4); + const __m128i t3 = _mm_set1_epi8(3); + const __m128i t80 = _mm_set1_epi8(0x80); + const __m128i te0 = _mm_set1_epi8(0xe0); + const __m128i t1f = _mm_set1_epi8(0x1f); + const __m128i t1 = _mm_set1_epi8(0x1); + const __m128i t7f = _mm_set1_epi8(0x7f); + + __m128i ps1 = _mm_xor_si128(p1, t80); + __m128i ps0 = _mm_xor_si128(p0, t80); + __m128i qs0 = _mm_xor_si128(q0, t80); + __m128i qs1 = _mm_xor_si128(q1, t80); + __m128i filt; + __m128i work_a; + __m128i filter1, filter2; + __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1, + flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4, flat2_q5, + flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1, flat_q2; + + filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev); + work_a = _mm_subs_epi8(qs0, ps0); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + filt = _mm_adds_epi8(filt, work_a); + /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ + filt = _mm_and_si128(filt, mask); + + filter1 = _mm_adds_epi8(filt, t4); + filter2 = _mm_adds_epi8(filt, t3); + + /* Filter1 >> 3 */ + work_a = _mm_cmpgt_epi8(zero, filter1); + filter1 = _mm_srli_epi16(filter1, 3); + work_a = _mm_and_si128(work_a, te0); + filter1 = _mm_and_si128(filter1, t1f); + filter1 = _mm_or_si128(filter1, work_a); + qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); + + /* Filter2 >> 3 */ + work_a = _mm_cmpgt_epi8(zero, filter2); + filter2 = _mm_srli_epi16(filter2, 3); + work_a = _mm_and_si128(work_a, te0); + filter2 = _mm_and_si128(filter2, t1f); + filter2 = _mm_or_si128(filter2, work_a); + ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); + + /* filt >> 1 */ + filt = _mm_adds_epi8(filter1, t1); + work_a = _mm_cmpgt_epi8(zero, filt); + filt = _mm_srli_epi16(filt, 1); + work_a = _mm_and_si128(work_a, t80); + filt = _mm_and_si128(filt, t7f); + filt = _mm_or_si128(filt, work_a); + filt = _mm_andnot_si128(hev, filt); + ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); + qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); + // loopfilter done { - const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), - _mm_subs_epu8(p0, p1)); - const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0), - _mm_subs_epu8(q0, q1)); - const __m128i fe = _mm_set1_epi8(0xfe); - const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); - __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), - _mm_subs_epu8(q0, p0)); - __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), - _mm_subs_epu8(q1, p1)); - __m128i work; - flat = _mm_max_epu8(abs_p1p0, abs_q1q0); - hev = _mm_subs_epu8(flat, thresh); - hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); + __m128i work; + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), + _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2))); + flat = _mm_max_epu8(work, flat); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)), + _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3))); + flat = _mm_max_epu8(work, flat); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)), + _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4))); + flat = _mm_subs_epu8(flat, one); + flat = _mm_cmpeq_epi8(flat, zero); + flat = _mm_and_si128(flat, mask); - abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); - abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); - mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); - mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); - // mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; - mask = _mm_max_epu8(flat, mask); - // mask |= (abs(p1 - p0) > limit) * -1; - // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)), - _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3))); - mask = _mm_max_epu8(work, mask); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)), - _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3))); - mask = _mm_max_epu8(work, mask); - mask = _mm_subs_epu8(mask, limit); - mask = _mm_cmpeq_epi8(mask, zero); + p256_5 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s - 6 * p))); + q256_5 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s + 5 * p))); + p5 = _mm256_castsi256_si128(p256_5); + q5 = _mm256_castsi256_si128(q256_5); + flat2 = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)), + _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5))); + + flat2 = _mm_max_epu8(work, flat2); + p256_6 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s - 7 * p))); + q256_6 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s + 6 * p))); + p6 = _mm256_castsi256_si128(p256_6); + q6 = _mm256_castsi256_si128(q256_6); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)), + _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6))); + + flat2 = _mm_max_epu8(work, flat2); + + p256_7 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s - 8 * p))); + q256_7 = _mm256_castpd_si256( + _mm256_broadcast_pd((__m128d const *)(s + 7 * p))); + p7 = _mm256_castsi256_si128(p256_7); + q7 = _mm256_castsi256_si128(q256_7); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)), + _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7))); + + flat2 = _mm_max_epu8(work, flat2); + flat2 = _mm_subs_epu8(flat2, one); + flat2 = _mm_cmpeq_epi8(flat2, zero); + flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask } - // lp filter + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // flat and wide flat calculations { - const __m128i t4 = _mm_set1_epi8(4); - const __m128i t3 = _mm_set1_epi8(3); - const __m128i t80 = _mm_set1_epi8(0x80); - const __m128i te0 = _mm_set1_epi8(0xe0); - const __m128i t1f = _mm_set1_epi8(0x1f); - const __m128i t1 = _mm_set1_epi8(0x1); - const __m128i t7f = _mm_set1_epi8(0x7f); - - __m128i ps1 = _mm_xor_si128(p1, t80); - __m128i ps0 = _mm_xor_si128(p0, t80); - __m128i qs0 = _mm_xor_si128(q0, t80); - __m128i qs1 = _mm_xor_si128(q1, t80); - __m128i filt; - __m128i work_a; - __m128i filter1, filter2; - __m128i flat2_p6, flat2_p5, flat2_p4, flat2_p3, flat2_p2, flat2_p1, - flat2_p0, flat2_q0, flat2_q1, flat2_q2, flat2_q3, flat2_q4, - flat2_q5, flat2_q6, flat_p2, flat_p1, flat_p0, flat_q0, flat_q1, - flat_q2; - - filt = _mm_and_si128(_mm_subs_epi8(ps1, qs1), hev); - work_a = _mm_subs_epi8(qs0, ps0); - filt = _mm_adds_epi8(filt, work_a); - filt = _mm_adds_epi8(filt, work_a); - filt = _mm_adds_epi8(filt, work_a); - /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ - filt = _mm_and_si128(filt, mask); - - filter1 = _mm_adds_epi8(filt, t4); - filter2 = _mm_adds_epi8(filt, t3); - - /* Filter1 >> 3 */ - work_a = _mm_cmpgt_epi8(zero, filter1); - filter1 = _mm_srli_epi16(filter1, 3); - work_a = _mm_and_si128(work_a, te0); - filter1 = _mm_and_si128(filter1, t1f); - filter1 = _mm_or_si128(filter1, work_a); - qs0 = _mm_xor_si128(_mm_subs_epi8(qs0, filter1), t80); - - /* Filter2 >> 3 */ - work_a = _mm_cmpgt_epi8(zero, filter2); - filter2 = _mm_srli_epi16(filter2, 3); - work_a = _mm_and_si128(work_a, te0); - filter2 = _mm_and_si128(filter2, t1f); - filter2 = _mm_or_si128(filter2, work_a); - ps0 = _mm_xor_si128(_mm_adds_epi8(ps0, filter2), t80); - - /* filt >> 1 */ - filt = _mm_adds_epi8(filter1, t1); - work_a = _mm_cmpgt_epi8(zero, filt); - filt = _mm_srli_epi16(filt, 1); - work_a = _mm_and_si128(work_a, t80); - filt = _mm_and_si128(filt, t7f); - filt = _mm_or_si128(filt, work_a); - filt = _mm_andnot_si128(hev, filt); - ps1 = _mm_xor_si128(_mm_adds_epi8(ps1, filt), t80); - qs1 = _mm_xor_si128(_mm_subs_epi8(qs1, filt), t80); - // loopfilter done - - { - __m128i work; - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), - _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2))); - flat = _mm_max_epu8(work, flat); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)), - _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3))); - flat = _mm_max_epu8(work, flat); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)), - _mm_or_si128(_mm_subs_epu8(q4, q0), _mm_subs_epu8(q0, q4))); - flat = _mm_subs_epu8(flat, one); - flat = _mm_cmpeq_epi8(flat, zero); - flat = _mm_and_si128(flat, mask); - - p256_5 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 6 * p))); - q256_5 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 5 * p))); - p5 = _mm256_castsi256_si128(p256_5); - q5 = _mm256_castsi256_si128(q256_5); - flat2 = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p5, p0), _mm_subs_epu8(p0, p5)), - _mm_or_si128(_mm_subs_epu8(q5, q0), _mm_subs_epu8(q0, q5))); - - flat2 = _mm_max_epu8(work, flat2); - p256_6 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 7 * p))); - q256_6 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 6 * p))); - p6 = _mm256_castsi256_si128(p256_6); - q6 = _mm256_castsi256_si128(q256_6); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p6, p0), _mm_subs_epu8(p0, p6)), - _mm_or_si128(_mm_subs_epu8(q6, q0), _mm_subs_epu8(q0, q6))); - - flat2 = _mm_max_epu8(work, flat2); - - p256_7 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s - 8 * p))); - q256_7 = _mm256_castpd_si256(_mm256_broadcast_pd( - (__m128d const *)(s + 7 * p))); - p7 = _mm256_castsi256_si128(p256_7); - q7 = _mm256_castsi256_si128(q256_7); - work = _mm_max_epu8( - _mm_or_si128(_mm_subs_epu8(p7, p0), _mm_subs_epu8(p0, p7)), - _mm_or_si128(_mm_subs_epu8(q7, q0), _mm_subs_epu8(q0, q7))); - - flat2 = _mm_max_epu8(work, flat2); - flat2 = _mm_subs_epu8(flat2, one); - flat2 = _mm_cmpeq_epi8(flat2, zero); - flat2 = _mm_and_si128(flat2, flat); // flat2 & flat & mask - } - - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - // flat and wide flat calculations - { - const __m256i eight = _mm256_set1_epi16(8); - const __m256i four = _mm256_set1_epi16(4); - __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0, - pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p, - res_q; - - const __m256i filter = _mm256_load_si256( - (__m256i const *)filt_loopfilter_avx2); - p256_7 = _mm256_shuffle_epi8(p256_7, filter); - p256_6 = _mm256_shuffle_epi8(p256_6, filter); - p256_5 = _mm256_shuffle_epi8(p256_5, filter); - p256_4 = _mm256_shuffle_epi8(p256_4, filter); - p256_3 = _mm256_shuffle_epi8(p256_3, filter); - p256_2 = _mm256_shuffle_epi8(p256_2, filter); - p256_1 = _mm256_shuffle_epi8(p256_1, filter); - p256_0 = _mm256_shuffle_epi8(p256_0, filter); - q256_0 = _mm256_shuffle_epi8(q256_0, filter); - q256_1 = _mm256_shuffle_epi8(q256_1, filter); - q256_2 = _mm256_shuffle_epi8(q256_2, filter); - q256_3 = _mm256_shuffle_epi8(q256_3, filter); - q256_4 = _mm256_shuffle_epi8(q256_4, filter); - q256_5 = _mm256_shuffle_epi8(q256_5, filter); - q256_6 = _mm256_shuffle_epi8(q256_6, filter); - q256_7 = _mm256_shuffle_epi8(q256_7, filter); - - pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5), - _mm256_add_epi16(p256_4, p256_3)); - pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5), - _mm256_add_epi16(q256_4, q256_3)); - - pixetFilter_p2p1p0 = _mm256_add_epi16(p256_0, - _mm256_add_epi16(p256_2, p256_1)); - pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); - - pixetFilter_q2q1q0 = _mm256_add_epi16(q256_0, - _mm256_add_epi16(q256_2, q256_1)); - pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); - - pixelFilter_p = _mm256_add_epi16(eight, - _mm256_add_epi16(pixelFilter_p, pixelFilter_q)); - - pixetFilter_p2p1p0 = _mm256_add_epi16(four, - _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); - - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(p256_7, p256_0)), 4); - - flat2_p0 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); - - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(q256_7, q256_0)), 4); - - flat2_q0 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); - - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_p2p1p0, - _mm256_add_epi16(p256_3, p256_0)), 3); + const __m256i eight = _mm256_set1_epi16(8); + const __m256i four = _mm256_set1_epi16(4); + __m256i pixelFilter_p, pixelFilter_q, pixetFilter_p2p1p0, + pixetFilter_q2q1q0, sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q; - flat_p0 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + const __m256i filter = + _mm256_load_si256((__m256i const *)filt_loopfilter_avx2); + p256_7 = _mm256_shuffle_epi8(p256_7, filter); + p256_6 = _mm256_shuffle_epi8(p256_6, filter); + p256_5 = _mm256_shuffle_epi8(p256_5, filter); + p256_4 = _mm256_shuffle_epi8(p256_4, filter); + p256_3 = _mm256_shuffle_epi8(p256_3, filter); + p256_2 = _mm256_shuffle_epi8(p256_2, filter); + p256_1 = _mm256_shuffle_epi8(p256_1, filter); + p256_0 = _mm256_shuffle_epi8(p256_0, filter); + q256_0 = _mm256_shuffle_epi8(q256_0, filter); + q256_1 = _mm256_shuffle_epi8(q256_1, filter); + q256_2 = _mm256_shuffle_epi8(q256_2, filter); + q256_3 = _mm256_shuffle_epi8(q256_3, filter); + q256_4 = _mm256_shuffle_epi8(q256_4, filter); + q256_5 = _mm256_shuffle_epi8(q256_5, filter); + q256_6 = _mm256_shuffle_epi8(q256_6, filter); + q256_7 = _mm256_shuffle_epi8(q256_7, filter); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_p2p1p0, - _mm256_add_epi16(q256_3, q256_0)), 3); + pixelFilter_p = _mm256_add_epi16(_mm256_add_epi16(p256_6, p256_5), + _mm256_add_epi16(p256_4, p256_3)); + pixelFilter_q = _mm256_add_epi16(_mm256_add_epi16(q256_6, q256_5), + _mm256_add_epi16(q256_4, q256_3)); - flat_q0 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + pixetFilter_p2p1p0 = + _mm256_add_epi16(p256_0, _mm256_add_epi16(p256_2, p256_1)); + pixelFilter_p = _mm256_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); - sum_p7 = _mm256_add_epi16(p256_7, p256_7); + pixetFilter_q2q1q0 = + _mm256_add_epi16(q256_0, _mm256_add_epi16(q256_2, q256_1)); + pixelFilter_q = _mm256_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); - sum_q7 = _mm256_add_epi16(q256_7, q256_7); + pixelFilter_p = _mm256_add_epi16( + eight, _mm256_add_epi16(pixelFilter_p, pixelFilter_q)); - sum_p3 = _mm256_add_epi16(p256_3, p256_3); + pixetFilter_p2p1p0 = _mm256_add_epi16( + four, _mm256_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); - sum_q3 = _mm256_add_epi16(q256_3, q256_3); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(p256_7, p256_0)), 4); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6); + flat2_p0 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(q256_7, q256_0)), 4); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_1)), 4); + flat2_q0 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - flat2_p1 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + res_p = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, + _mm256_add_epi16(p256_3, p256_0)), + 3); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_1)), 4); + flat_p0 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - flat2_q1 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + res_q = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, + _mm256_add_epi16(q256_3, q256_0)), + 3); - pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2); + flat_q0 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2); + sum_p7 = _mm256_add_epi16(p256_7, p256_7); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_p2p1p0, - _mm256_add_epi16(sum_p3, p256_1)), 3); + sum_q7 = _mm256_add_epi16(q256_7, q256_7); - flat_p1 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + sum_p3 = _mm256_add_epi16(p256_3, p256_3); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_q2q1q0, - _mm256_add_epi16(sum_q3, q256_1)), 3); + sum_q3 = _mm256_add_epi16(q256_3, q256_3); - flat_q1 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_p, p256_6); - sum_p7 = _mm256_add_epi16(sum_p7, p256_7); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_6); - sum_q7 = _mm256_add_epi16(sum_q7, q256_7); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_1)), 4); - sum_p3 = _mm256_add_epi16(sum_p3, p256_3); + flat2_p1 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - sum_q3 = _mm256_add_epi16(sum_q3, q256_3); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_1)), 4); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5); + flat2_q1 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5); + pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_p2p1p0, p256_2); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_2)), 4); + pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_2); - flat2_p2 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + res_p = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, + _mm256_add_epi16(sum_p3, p256_1)), + 3); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_2)), 4); + flat_p1 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - flat2_q2 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + res_q = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0, + _mm256_add_epi16(sum_q3, q256_1)), + 3); - pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1); + flat_q1 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1); + sum_p7 = _mm256_add_epi16(sum_p7, p256_7); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_p2p1p0, - _mm256_add_epi16(sum_p3, p256_2)), 3); + sum_q7 = _mm256_add_epi16(sum_q7, q256_7); - flat_p2 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + sum_p3 = _mm256_add_epi16(sum_p3, p256_3); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixetFilter_q2q1q0, - _mm256_add_epi16(sum_q3, q256_2)), 3); + sum_q3 = _mm256_add_epi16(sum_q3, q256_3); - flat_q2 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_5); - sum_p7 = _mm256_add_epi16(sum_p7, p256_7); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_5); - sum_q7 = _mm256_add_epi16(sum_q7, q256_7); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_2)), 4); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4); + flat2_p2 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_2)), 4); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_3)), 4); + flat2_q2 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - flat2_p3 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + pixetFilter_p2p1p0 = _mm256_sub_epi16(pixetFilter_p2p1p0, q256_1); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_3)), 4); + pixetFilter_q2q1q0 = _mm256_sub_epi16(pixetFilter_q2q1q0, p256_1); - flat2_q3 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + res_p = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_p2p1p0, + _mm256_add_epi16(sum_p3, p256_2)), + 3); - sum_p7 = _mm256_add_epi16(sum_p7, p256_7); + flat_p2 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - sum_q7 = _mm256_add_epi16(sum_q7, q256_7); + res_q = + _mm256_srli_epi16(_mm256_add_epi16(pixetFilter_q2q1q0, + _mm256_add_epi16(sum_q3, q256_2)), + 3); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3); + flat_q2 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3); + sum_p7 = _mm256_add_epi16(sum_p7, p256_7); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_4)), 4); + sum_q7 = _mm256_add_epi16(sum_q7, q256_7); - flat2_p4 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_4); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_4)), 4); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_4); - flat2_q4 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_3)), 4); - sum_p7 = _mm256_add_epi16(sum_p7, p256_7); + flat2_p3 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - sum_q7 = _mm256_add_epi16(sum_q7, q256_7); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_3)), 4); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2); + flat2_q3 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2); + sum_p7 = _mm256_add_epi16(sum_p7, p256_7); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_5)), 4); + sum_q7 = _mm256_add_epi16(sum_q7, q256_7); - flat2_p5 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_3); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_5)), 4); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_3); - flat2_q5 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_4)), 4); - sum_p7 = _mm256_add_epi16(sum_p7, p256_7); + flat2_p4 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - sum_q7 = _mm256_add_epi16(sum_q7, q256_7); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_4)), 4); - pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1); + flat2_q4 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1); + sum_p7 = _mm256_add_epi16(sum_p7, p256_7); - res_p = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_p, - _mm256_add_epi16(sum_p7, p256_6)), 4); + sum_q7 = _mm256_add_epi16(sum_q7, q256_7); - flat2_p6 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), - 168)); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_2); - res_q = _mm256_srli_epi16( - _mm256_add_epi16(pixelFilter_q, - _mm256_add_epi16(sum_q7, q256_6)), 4); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_2); - flat2_q6 = _mm256_castsi256_si128( - _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), - 168)); - } + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_5)), 4); - // wide flat - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + flat2_p5 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - p2 = _mm_andnot_si128(flat, p2); - flat_p2 = _mm_and_si128(flat, flat_p2); - p2 = _mm_or_si128(flat_p2, p2); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_5)), 4); - p1 = _mm_andnot_si128(flat, ps1); - flat_p1 = _mm_and_si128(flat, flat_p1); - p1 = _mm_or_si128(flat_p1, p1); + flat2_q5 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); - p0 = _mm_andnot_si128(flat, ps0); - flat_p0 = _mm_and_si128(flat, flat_p0); - p0 = _mm_or_si128(flat_p0, p0); + sum_p7 = _mm256_add_epi16(sum_p7, p256_7); - q0 = _mm_andnot_si128(flat, qs0); - flat_q0 = _mm_and_si128(flat, flat_q0); - q0 = _mm_or_si128(flat_q0, q0); + sum_q7 = _mm256_add_epi16(sum_q7, q256_7); - q1 = _mm_andnot_si128(flat, qs1); - flat_q1 = _mm_and_si128(flat, flat_q1); - q1 = _mm_or_si128(flat_q1, q1); + pixelFilter_p = _mm256_sub_epi16(pixelFilter_p, q256_1); - q2 = _mm_andnot_si128(flat, q2); - flat_q2 = _mm_and_si128(flat, flat_q2); - q2 = _mm_or_si128(flat_q2, q2); + pixelFilter_q = _mm256_sub_epi16(pixelFilter_q, p256_1); - p6 = _mm_andnot_si128(flat2, p6); - flat2_p6 = _mm_and_si128(flat2, flat2_p6); - p6 = _mm_or_si128(flat2_p6, p6); - _mm_storeu_si128((__m128i *) (s - 7 * p), p6); + res_p = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_p, _mm256_add_epi16(sum_p7, p256_6)), 4); - p5 = _mm_andnot_si128(flat2, p5); - flat2_p5 = _mm_and_si128(flat2, flat2_p5); - p5 = _mm_or_si128(flat2_p5, p5); - _mm_storeu_si128((__m128i *) (s - 6 * p), p5); + flat2_p6 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_p, res_p), 168)); - p4 = _mm_andnot_si128(flat2, p4); - flat2_p4 = _mm_and_si128(flat2, flat2_p4); - p4 = _mm_or_si128(flat2_p4, p4); - _mm_storeu_si128((__m128i *) (s - 5 * p), p4); + res_q = _mm256_srli_epi16( + _mm256_add_epi16(pixelFilter_q, _mm256_add_epi16(sum_q7, q256_6)), 4); - p3 = _mm_andnot_si128(flat2, p3); - flat2_p3 = _mm_and_si128(flat2, flat2_p3); - p3 = _mm_or_si128(flat2_p3, p3); - _mm_storeu_si128((__m128i *) (s - 4 * p), p3); - - p2 = _mm_andnot_si128(flat2, p2); - flat2_p2 = _mm_and_si128(flat2, flat2_p2); - p2 = _mm_or_si128(flat2_p2, p2); - _mm_storeu_si128((__m128i *) (s - 3 * p), p2); - - p1 = _mm_andnot_si128(flat2, p1); - flat2_p1 = _mm_and_si128(flat2, flat2_p1); - p1 = _mm_or_si128(flat2_p1, p1); - _mm_storeu_si128((__m128i *) (s - 2 * p), p1); - - p0 = _mm_andnot_si128(flat2, p0); - flat2_p0 = _mm_and_si128(flat2, flat2_p0); - p0 = _mm_or_si128(flat2_p0, p0); - _mm_storeu_si128((__m128i *) (s - 1 * p), p0); - - q0 = _mm_andnot_si128(flat2, q0); - flat2_q0 = _mm_and_si128(flat2, flat2_q0); - q0 = _mm_or_si128(flat2_q0, q0); - _mm_storeu_si128((__m128i *) (s - 0 * p), q0); - - q1 = _mm_andnot_si128(flat2, q1); - flat2_q1 = _mm_and_si128(flat2, flat2_q1); - q1 = _mm_or_si128(flat2_q1, q1); - _mm_storeu_si128((__m128i *) (s + 1 * p), q1); - - q2 = _mm_andnot_si128(flat2, q2); - flat2_q2 = _mm_and_si128(flat2, flat2_q2); - q2 = _mm_or_si128(flat2_q2, q2); - _mm_storeu_si128((__m128i *) (s + 2 * p), q2); - - q3 = _mm_andnot_si128(flat2, q3); - flat2_q3 = _mm_and_si128(flat2, flat2_q3); - q3 = _mm_or_si128(flat2_q3, q3); - _mm_storeu_si128((__m128i *) (s + 3 * p), q3); - - q4 = _mm_andnot_si128(flat2, q4); - flat2_q4 = _mm_and_si128(flat2, flat2_q4); - q4 = _mm_or_si128(flat2_q4, q4); - _mm_storeu_si128((__m128i *) (s + 4 * p), q4); - - q5 = _mm_andnot_si128(flat2, q5); - flat2_q5 = _mm_and_si128(flat2, flat2_q5); - q5 = _mm_or_si128(flat2_q5, q5); - _mm_storeu_si128((__m128i *) (s + 5 * p), q5); - - q6 = _mm_andnot_si128(flat2, q6); - flat2_q6 = _mm_and_si128(flat2, flat2_q6); - q6 = _mm_or_si128(flat2_q6, q6); - _mm_storeu_si128((__m128i *) (s + 6 * p), q6); + flat2_q6 = _mm256_castsi256_si128( + _mm256_permute4x64_epi64(_mm256_packus_epi16(res_q, res_q), 168)); } -} -void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p, - const unsigned char *_blimit, const unsigned char *_limit, - const unsigned char *_thresh, int count) { - if (count == 1) - mb_lpf_horizontal_edge_w_avx2_8(s, p, _blimit, _limit, _thresh); - else - mb_lpf_horizontal_edge_w_avx2_16(s, p, _blimit, _limit, _thresh); + // wide flat + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + p2 = _mm_andnot_si128(flat, p2); + flat_p2 = _mm_and_si128(flat, flat_p2); + p2 = _mm_or_si128(flat_p2, p2); + + p1 = _mm_andnot_si128(flat, ps1); + flat_p1 = _mm_and_si128(flat, flat_p1); + p1 = _mm_or_si128(flat_p1, p1); + + p0 = _mm_andnot_si128(flat, ps0); + flat_p0 = _mm_and_si128(flat, flat_p0); + p0 = _mm_or_si128(flat_p0, p0); + + q0 = _mm_andnot_si128(flat, qs0); + flat_q0 = _mm_and_si128(flat, flat_q0); + q0 = _mm_or_si128(flat_q0, q0); + + q1 = _mm_andnot_si128(flat, qs1); + flat_q1 = _mm_and_si128(flat, flat_q1); + q1 = _mm_or_si128(flat_q1, q1); + + q2 = _mm_andnot_si128(flat, q2); + flat_q2 = _mm_and_si128(flat, flat_q2); + q2 = _mm_or_si128(flat_q2, q2); + + p6 = _mm_andnot_si128(flat2, p6); + flat2_p6 = _mm_and_si128(flat2, flat2_p6); + p6 = _mm_or_si128(flat2_p6, p6); + _mm_storeu_si128((__m128i *)(s - 7 * p), p6); + + p5 = _mm_andnot_si128(flat2, p5); + flat2_p5 = _mm_and_si128(flat2, flat2_p5); + p5 = _mm_or_si128(flat2_p5, p5); + _mm_storeu_si128((__m128i *)(s - 6 * p), p5); + + p4 = _mm_andnot_si128(flat2, p4); + flat2_p4 = _mm_and_si128(flat2, flat2_p4); + p4 = _mm_or_si128(flat2_p4, p4); + _mm_storeu_si128((__m128i *)(s - 5 * p), p4); + + p3 = _mm_andnot_si128(flat2, p3); + flat2_p3 = _mm_and_si128(flat2, flat2_p3); + p3 = _mm_or_si128(flat2_p3, p3); + _mm_storeu_si128((__m128i *)(s - 4 * p), p3); + + p2 = _mm_andnot_si128(flat2, p2); + flat2_p2 = _mm_and_si128(flat2, flat2_p2); + p2 = _mm_or_si128(flat2_p2, p2); + _mm_storeu_si128((__m128i *)(s - 3 * p), p2); + + p1 = _mm_andnot_si128(flat2, p1); + flat2_p1 = _mm_and_si128(flat2, flat2_p1); + p1 = _mm_or_si128(flat2_p1, p1); + _mm_storeu_si128((__m128i *)(s - 2 * p), p1); + + p0 = _mm_andnot_si128(flat2, p0); + flat2_p0 = _mm_and_si128(flat2, flat2_p0); + p0 = _mm_or_si128(flat2_p0, p0); + _mm_storeu_si128((__m128i *)(s - 1 * p), p0); + + q0 = _mm_andnot_si128(flat2, q0); + flat2_q0 = _mm_and_si128(flat2, flat2_q0); + q0 = _mm_or_si128(flat2_q0, q0); + _mm_storeu_si128((__m128i *)(s - 0 * p), q0); + + q1 = _mm_andnot_si128(flat2, q1); + flat2_q1 = _mm_and_si128(flat2, flat2_q1); + q1 = _mm_or_si128(flat2_q1, q1); + _mm_storeu_si128((__m128i *)(s + 1 * p), q1); + + q2 = _mm_andnot_si128(flat2, q2); + flat2_q2 = _mm_and_si128(flat2, flat2_q2); + q2 = _mm_or_si128(flat2_q2, q2); + _mm_storeu_si128((__m128i *)(s + 2 * p), q2); + + q3 = _mm_andnot_si128(flat2, q3); + flat2_q3 = _mm_and_si128(flat2, flat2_q3); + q3 = _mm_or_si128(flat2_q3, q3); + _mm_storeu_si128((__m128i *)(s + 3 * p), q3); + + q4 = _mm_andnot_si128(flat2, q4); + flat2_q4 = _mm_and_si128(flat2, flat2_q4); + q4 = _mm_or_si128(flat2_q4, q4); + _mm_storeu_si128((__m128i *)(s + 4 * p), q4); + + q5 = _mm_andnot_si128(flat2, q5); + flat2_q5 = _mm_and_si128(flat2, flat2_q5); + q5 = _mm_or_si128(flat2_q5, q5); + _mm_storeu_si128((__m128i *)(s + 5 * p), q5); + + q6 = _mm_andnot_si128(flat2, q6); + flat2_q6 = _mm_and_si128(flat2, flat2_q6); + q6 = _mm_or_si128(flat2_q6, q6); + _mm_storeu_si128((__m128i *)(s + 6 * p), q6); + } } diff --git a/libs/libvpx/vpx_dsp/x86/loopfilter_mmx.asm b/libs/libvpx/vpx_dsp/x86/loopfilter_mmx.asm deleted file mode 100644 index b9c18b680f..0000000000 --- a/libs/libvpx/vpx_dsp/x86/loopfilter_mmx.asm +++ /dev/null @@ -1,611 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - - -;void vpx_lpf_horizontal_4_mmx -;( -; unsigned char *src_ptr, -; int src_pixel_step, -; const char *blimit, -; const char *limit, -; const char *thresh, -; int count -;) -global sym(vpx_lpf_horizontal_4_mmx) PRIVATE -sym(vpx_lpf_horizontal_4_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 6 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - ALIGN_STACK 16, rax - sub rsp, 32 ; reserve 32 bytes - %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8]; - %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8]; - - mov rsi, arg(0) ;src_ptr - movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch? - - movsxd rcx, dword ptr arg(5) ;count -.next8_h: - mov rdx, arg(3) ;limit - movq mm7, [rdx] - mov rdi, rsi ; rdi points to row +1 for indirect addressing - add rdi, rax - - ; calculate breakout conditions - movq mm2, [rdi+2*rax] ; q3 - movq mm1, [rsi+2*rax] ; q2 - movq mm6, mm1 ; q2 - psubusb mm1, mm2 ; q2-=q3 - psubusb mm2, mm6 ; q3-=q2 - por mm1, mm2 ; abs(q3-q2) - psubusb mm1, mm7 ; - - - movq mm4, [rsi+rax] ; q1 - movq mm3, mm4 ; q1 - psubusb mm4, mm6 ; q1-=q2 - psubusb mm6, mm3 ; q2-=q1 - por mm4, mm6 ; abs(q2-q1) - - psubusb mm4, mm7 - por mm1, mm4 - - movq mm4, [rsi] ; q0 - movq mm0, mm4 ; q0 - psubusb mm4, mm3 ; q0-=q1 - psubusb mm3, mm0 ; q1-=q0 - por mm4, mm3 ; abs(q0-q1) - movq t0, mm4 ; save to t0 - psubusb mm4, mm7 - por mm1, mm4 - - - neg rax ; negate pitch to deal with above border - - movq mm2, [rsi+4*rax] ; p3 - movq mm4, [rdi+4*rax] ; p2 - movq mm5, mm4 ; p2 - psubusb mm4, mm2 ; p2-=p3 - psubusb mm2, mm5 ; p3-=p2 - por mm4, mm2 ; abs(p3 - p2) - psubusb mm4, mm7 - por mm1, mm4 - - - movq mm4, [rsi+2*rax] ; p1 - movq mm3, mm4 ; p1 - psubusb mm4, mm5 ; p1-=p2 - psubusb mm5, mm3 ; p2-=p1 - por mm4, mm5 ; abs(p2 - p1) - psubusb mm4, mm7 - por mm1, mm4 - - movq mm2, mm3 ; p1 - - movq mm4, [rsi+rax] ; p0 - movq mm5, mm4 ; p0 - psubusb mm4, mm3 ; p0-=p1 - psubusb mm3, mm5 ; p1-=p0 - por mm4, mm3 ; abs(p1 - p0) - movq t1, mm4 ; save to t1 - psubusb mm4, mm7 - por mm1, mm4 - - movq mm3, [rdi] ; q1 - movq mm4, mm3 ; q1 - psubusb mm3, mm2 ; q1-=p1 - psubusb mm2, mm4 ; p1-=q1 - por mm2, mm3 ; abs(p1-q1) - pand mm2, [GLOBAL(tfe)] ; set lsb of each byte to zero - psrlw mm2, 1 ; abs(p1-q1)/2 - - movq mm6, mm5 ; p0 - movq mm3, [rsi] ; q0 - psubusb mm5, mm3 ; p0-=q0 - psubusb mm3, mm6 ; q0-=p0 - por mm5, mm3 ; abs(p0 - q0) - paddusb mm5, mm5 ; abs(p0-q0)*2 - paddusb mm5, mm2 ; abs (p0 - q0) *2 + abs(p1-q1)/2 - - mov rdx, arg(2) ;blimit ; get blimit - movq mm7, [rdx] ; blimit - - psubusb mm5, mm7 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit - por mm1, mm5 - pxor mm5, mm5 - pcmpeqb mm1, mm5 ; mask mm1 - - ; calculate high edge variance - mov rdx, arg(4) ;thresh ; get thresh - movq mm7, [rdx] ; - movq mm4, t0 ; get abs (q1 - q0) - psubusb mm4, mm7 - movq mm3, t1 ; get abs (p1 - p0) - psubusb mm3, mm7 - paddb mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh - - pcmpeqb mm4, mm5 - - pcmpeqb mm5, mm5 - pxor mm4, mm5 - - - ; start work on filters - movq mm2, [rsi+2*rax] ; p1 - movq mm7, [rdi] ; q1 - pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values - pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values - psubsb mm2, mm7 ; p1 - q1 - pand mm2, mm4 ; high var mask (hvm)(p1 - q1) - pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values - pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values - movq mm3, mm0 ; q0 - psubsb mm0, mm6 ; q0 - p0 - paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1) - paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1) - paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1) - pand mm1, mm2 ; mask filter values we don't care about - movq mm2, mm1 - paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4 - paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3 - - pxor mm0, mm0 ; - pxor mm5, mm5 - punpcklbw mm0, mm2 ; - punpckhbw mm5, mm2 ; - psraw mm0, 11 ; - psraw mm5, 11 - packsswb mm0, mm5 - movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3; - - pxor mm0, mm0 ; 0 - movq mm5, mm1 ; abcdefgh - punpcklbw mm0, mm1 ; e0f0g0h0 - psraw mm0, 11 ; sign extended shift right by 3 - pxor mm1, mm1 ; 0 - punpckhbw mm1, mm5 ; a0b0c0d0 - psraw mm1, 11 ; sign extended shift right by 3 - movq mm5, mm0 ; save results - - packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3 - paddsw mm5, [GLOBAL(ones)] - paddsw mm1, [GLOBAL(ones)] - psraw mm5, 1 ; partial shifted one more time for 2nd tap - psraw mm1, 1 ; partial shifted one more time for 2nd tap - packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4 - pandn mm4, mm5 ; high edge variance additive - - paddsb mm6, mm2 ; p0+= p0 add - pxor mm6, [GLOBAL(t80)] ; unoffset - movq [rsi+rax], mm6 ; write back - - movq mm6, [rsi+2*rax] ; p1 - pxor mm6, [GLOBAL(t80)] ; reoffset - paddsb mm6, mm4 ; p1+= p1 add - pxor mm6, [GLOBAL(t80)] ; unoffset - movq [rsi+2*rax], mm6 ; write back - - psubsb mm3, mm0 ; q0-= q0 add - pxor mm3, [GLOBAL(t80)] ; unoffset - movq [rsi], mm3 ; write back - - psubsb mm7, mm4 ; q1-= q1 add - pxor mm7, [GLOBAL(t80)] ; unoffset - movq [rdi], mm7 ; write back - - add rsi,8 - neg rax - dec rcx - jnz .next8_h - - add rsp, 32 - pop rsp - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - - -;void vpx_lpf_vertical_4_mmx -;( -; unsigned char *src_ptr, -; int src_pixel_step, -; const char *blimit, -; const char *limit, -; const char *thresh, -; int count -;) -global sym(vpx_lpf_vertical_4_mmx) PRIVATE -sym(vpx_lpf_vertical_4_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 6 - GET_GOT rbx - push rsi - push rdi - ; end prolog - - ALIGN_STACK 16, rax - sub rsp, 64 ; reserve 64 bytes - %define t0 [rsp + 0] ;__declspec(align(16)) char t0[8]; - %define t1 [rsp + 16] ;__declspec(align(16)) char t1[8]; - %define srct [rsp + 32] ;__declspec(align(16)) char srct[32]; - - mov rsi, arg(0) ;src_ptr - movsxd rax, dword ptr arg(1) ;src_pixel_step ; destination pitch? - - lea rsi, [rsi + rax*4 - 4] - - movsxd rcx, dword ptr arg(5) ;count -.next8_v: - mov rdi, rsi ; rdi points to row +1 for indirect addressing - add rdi, rax - - - ;transpose - movq mm6, [rsi+2*rax] ; 67 66 65 64 63 62 61 60 - movq mm7, mm6 ; 77 76 75 74 73 72 71 70 - - punpckhbw mm7, [rdi+2*rax] ; 77 67 76 66 75 65 74 64 - punpcklbw mm6, [rdi+2*rax] ; 73 63 72 62 71 61 70 60 - - movq mm4, [rsi] ; 47 46 45 44 43 42 41 40 - movq mm5, mm4 ; 47 46 45 44 43 42 41 40 - - punpckhbw mm5, [rsi+rax] ; 57 47 56 46 55 45 54 44 - punpcklbw mm4, [rsi+rax] ; 53 43 52 42 51 41 50 40 - - movq mm3, mm5 ; 57 47 56 46 55 45 54 44 - punpckhwd mm5, mm7 ; 77 67 57 47 76 66 56 46 - - punpcklwd mm3, mm7 ; 75 65 55 45 74 64 54 44 - movq mm2, mm4 ; 53 43 52 42 51 41 50 40 - - punpckhwd mm4, mm6 ; 73 63 53 43 72 62 52 42 - punpcklwd mm2, mm6 ; 71 61 51 41 70 60 50 40 - - neg rax - movq mm6, [rsi+rax*2] ; 27 26 25 24 23 22 21 20 - - movq mm1, mm6 ; 27 26 25 24 23 22 21 20 - punpckhbw mm6, [rsi+rax] ; 37 27 36 36 35 25 34 24 - - punpcklbw mm1, [rsi+rax] ; 33 23 32 22 31 21 30 20 - movq mm7, [rsi+rax*4]; ; 07 06 05 04 03 02 01 00 - - punpckhbw mm7, [rdi+rax*4] ; 17 07 16 06 15 05 14 04 - movq mm0, mm7 ; 17 07 16 06 15 05 14 04 - - punpckhwd mm7, mm6 ; 37 27 17 07 36 26 16 06 - punpcklwd mm0, mm6 ; 35 25 15 05 34 24 14 04 - - movq mm6, mm7 ; 37 27 17 07 36 26 16 06 - punpckhdq mm7, mm5 ; 77 67 57 47 37 27 17 07 = q3 - - punpckldq mm6, mm5 ; 76 66 56 46 36 26 16 06 = q2 - - movq mm5, mm6 ; 76 66 56 46 36 26 16 06 - psubusb mm5, mm7 ; q2-q3 - - psubusb mm7, mm6 ; q3-q2 - por mm7, mm5; ; mm7=abs (q3-q2) - - movq mm5, mm0 ; 35 25 15 05 34 24 14 04 - punpckhdq mm5, mm3 ; 75 65 55 45 35 25 15 05 = q1 - - punpckldq mm0, mm3 ; 74 64 54 44 34 24 15 04 = q0 - movq mm3, mm5 ; 75 65 55 45 35 25 15 05 = q1 - - psubusb mm3, mm6 ; q1-q2 - psubusb mm6, mm5 ; q2-q1 - - por mm6, mm3 ; mm6=abs(q2-q1) - lea rdx, srct - - movq [rdx+24], mm5 ; save q1 - movq [rdx+16], mm0 ; save q0 - - movq mm3, [rsi+rax*4] ; 07 06 05 04 03 02 01 00 - punpcklbw mm3, [rdi+rax*4] ; 13 03 12 02 11 01 10 00 - - movq mm0, mm3 ; 13 03 12 02 11 01 10 00 - punpcklwd mm0, mm1 ; 31 21 11 01 30 20 10 00 - - punpckhwd mm3, mm1 ; 33 23 13 03 32 22 12 02 - movq mm1, mm0 ; 31 21 11 01 30 20 10 00 - - punpckldq mm0, mm2 ; 70 60 50 40 30 20 10 00 =p3 - punpckhdq mm1, mm2 ; 71 61 51 41 31 21 11 01 =p2 - - movq mm2, mm1 ; 71 61 51 41 31 21 11 01 =p2 - psubusb mm2, mm0 ; p2-p3 - - psubusb mm0, mm1 ; p3-p2 - por mm0, mm2 ; mm0=abs(p3-p2) - - movq mm2, mm3 ; 33 23 13 03 32 22 12 02 - punpckldq mm2, mm4 ; 72 62 52 42 32 22 12 02 = p1 - - punpckhdq mm3, mm4 ; 73 63 53 43 33 23 13 03 = p0 - movq [rdx+8], mm3 ; save p0 - - movq [rdx], mm2 ; save p1 - movq mm5, mm2 ; mm5 = p1 - - psubusb mm2, mm1 ; p1-p2 - psubusb mm1, mm5 ; p2-p1 - - por mm1, mm2 ; mm1=abs(p2-p1) - mov rdx, arg(3) ;limit - - movq mm4, [rdx] ; mm4 = limit - psubusb mm7, mm4 - - psubusb mm0, mm4 - psubusb mm1, mm4 - - psubusb mm6, mm4 - por mm7, mm6 - - por mm0, mm1 - por mm0, mm7 ; abs(q3-q2) > limit || abs(p3-p2) > limit ||abs(p2-p1) > limit || abs(q2-q1) > limit - - movq mm1, mm5 ; p1 - - movq mm7, mm3 ; mm3=mm7=p0 - psubusb mm7, mm5 ; p0 - p1 - - psubusb mm5, mm3 ; p1 - p0 - por mm5, mm7 ; abs(p1-p0) - - movq t0, mm5 ; save abs(p1-p0) - lea rdx, srct - - psubusb mm5, mm4 - por mm0, mm5 ; mm0=mask - - movq mm5, [rdx+16] ; mm5=q0 - movq mm7, [rdx+24] ; mm7=q1 - - movq mm6, mm5 ; mm6=q0 - movq mm2, mm7 ; q1 - psubusb mm5, mm7 ; q0-q1 - - psubusb mm7, mm6 ; q1-q0 - por mm7, mm5 ; abs(q1-q0) - - movq t1, mm7 ; save abs(q1-q0) - psubusb mm7, mm4 - - por mm0, mm7 ; mask - - movq mm5, mm2 ; q1 - psubusb mm5, mm1 ; q1-=p1 - psubusb mm1, mm2 ; p1-=q1 - por mm5, mm1 ; abs(p1-q1) - pand mm5, [GLOBAL(tfe)] ; set lsb of each byte to zero - psrlw mm5, 1 ; abs(p1-q1)/2 - - mov rdx, arg(2) ;blimit ; - - movq mm4, [rdx] ;blimit - movq mm1, mm3 ; mm1=mm3=p0 - - movq mm7, mm6 ; mm7=mm6=q0 - psubusb mm1, mm7 ; p0-q0 - - psubusb mm7, mm3 ; q0-p0 - por mm1, mm7 ; abs(q0-p0) - paddusb mm1, mm1 ; abs(q0-p0)*2 - paddusb mm1, mm5 ; abs (p0 - q0) *2 + abs(p1-q1)/2 - - psubusb mm1, mm4 ; abs (p0 - q0) *2 + abs(p1-q1)/2 > blimit - por mm1, mm0; ; mask - - pxor mm0, mm0 - pcmpeqb mm1, mm0 - - ; calculate high edge variance - mov rdx, arg(4) ;thresh ; get thresh - movq mm7, [rdx] - ; - movq mm4, t0 ; get abs (q1 - q0) - psubusb mm4, mm7 - - movq mm3, t1 ; get abs (p1 - p0) - psubusb mm3, mm7 - - por mm4, mm3 ; abs(q1 - q0) > thresh || abs(p1 - p0) > thresh - pcmpeqb mm4, mm0 - - pcmpeqb mm0, mm0 - pxor mm4, mm0 - - - - ; start work on filters - lea rdx, srct - - movq mm2, [rdx] ; p1 - movq mm7, [rdx+24] ; q1 - - movq mm6, [rdx+8] ; p0 - movq mm0, [rdx+16] ; q0 - - pxor mm2, [GLOBAL(t80)] ; p1 offset to convert to signed values - pxor mm7, [GLOBAL(t80)] ; q1 offset to convert to signed values - - psubsb mm2, mm7 ; p1 - q1 - pand mm2, mm4 ; high var mask (hvm)(p1 - q1) - - pxor mm6, [GLOBAL(t80)] ; offset to convert to signed values - pxor mm0, [GLOBAL(t80)] ; offset to convert to signed values - - movq mm3, mm0 ; q0 - psubsb mm0, mm6 ; q0 - p0 - - paddsb mm2, mm0 ; 1 * (q0 - p0) + hvm(p1 - q1) - paddsb mm2, mm0 ; 2 * (q0 - p0) + hvm(p1 - q1) - - paddsb mm2, mm0 ; 3 * (q0 - p0) + hvm(p1 - q1) - pand mm1, mm2 ; mask filter values we don't care about - - movq mm2, mm1 - paddsb mm1, [GLOBAL(t4)] ; 3* (q0 - p0) + hvm(p1 - q1) + 4 - - paddsb mm2, [GLOBAL(t3)] ; 3* (q0 - p0) + hvm(p1 - q1) + 3 - pxor mm0, mm0 ; - - pxor mm5, mm5 - punpcklbw mm0, mm2 ; - - punpckhbw mm5, mm2 ; - psraw mm0, 11 ; - - psraw mm5, 11 - packsswb mm0, mm5 - - movq mm2, mm0 ; (3* (q0 - p0) + hvm(p1 - q1) + 3) >> 3; - - pxor mm0, mm0 ; 0 - movq mm5, mm1 ; abcdefgh - - punpcklbw mm0, mm1 ; e0f0g0h0 - psraw mm0, 11 ; sign extended shift right by 3 - - pxor mm1, mm1 ; 0 - punpckhbw mm1, mm5 ; a0b0c0d0 - - psraw mm1, 11 ; sign extended shift right by 3 - movq mm5, mm0 ; save results - - packsswb mm0, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>3 - paddsw mm5, [GLOBAL(ones)] - - paddsw mm1, [GLOBAL(ones)] - psraw mm5, 1 ; partial shifted one more time for 2nd tap - - psraw mm1, 1 ; partial shifted one more time for 2nd tap - packsswb mm5, mm1 ; (3* (q0 - p0) + hvm(p1 - q1) + 4) >>4 - - pandn mm4, mm5 ; high edge variance additive - - paddsb mm6, mm2 ; p0+= p0 add - pxor mm6, [GLOBAL(t80)] ; unoffset - - ; mm6=p0 ; - movq mm1, [rdx] ; p1 - pxor mm1, [GLOBAL(t80)] ; reoffset - - paddsb mm1, mm4 ; p1+= p1 add - pxor mm1, [GLOBAL(t80)] ; unoffset - ; mm6 = p0 mm1 = p1 - - psubsb mm3, mm0 ; q0-= q0 add - pxor mm3, [GLOBAL(t80)] ; unoffset - - ; mm3 = q0 - psubsb mm7, mm4 ; q1-= q1 add - pxor mm7, [GLOBAL(t80)] ; unoffset - ; mm7 = q1 - - ; transpose and write back - ; mm1 = 72 62 52 42 32 22 12 02 - ; mm6 = 73 63 53 43 33 23 13 03 - ; mm3 = 74 64 54 44 34 24 14 04 - ; mm7 = 75 65 55 45 35 25 15 05 - - movq mm2, mm1 ; 72 62 52 42 32 22 12 02 - punpcklbw mm2, mm6 ; 33 32 23 22 13 12 03 02 - - movq mm4, mm3 ; 74 64 54 44 34 24 14 04 - punpckhbw mm1, mm6 ; 73 72 63 62 53 52 43 42 - - punpcklbw mm4, mm7 ; 35 34 25 24 15 14 05 04 - punpckhbw mm3, mm7 ; 75 74 65 64 55 54 45 44 - - movq mm6, mm2 ; 33 32 23 22 13 12 03 02 - punpcklwd mm2, mm4 ; 15 14 13 12 05 04 03 02 - - punpckhwd mm6, mm4 ; 35 34 33 32 25 24 23 22 - movq mm5, mm1 ; 73 72 63 62 53 52 43 42 - - punpcklwd mm1, mm3 ; 55 54 53 52 45 44 43 42 - punpckhwd mm5, mm3 ; 75 74 73 72 65 64 63 62 - - - ; mm2 = 15 14 13 12 05 04 03 02 - ; mm6 = 35 34 33 32 25 24 23 22 - ; mm5 = 55 54 53 52 45 44 43 42 - ; mm1 = 75 74 73 72 65 64 63 62 - - - - movd [rsi+rax*4+2], mm2 - psrlq mm2, 32 - - movd [rdi+rax*4+2], mm2 - movd [rsi+rax*2+2], mm6 - - psrlq mm6, 32 - movd [rsi+rax+2],mm6 - - movd [rsi+2], mm1 - psrlq mm1, 32 - - movd [rdi+2], mm1 - neg rax - - movd [rdi+rax+2],mm5 - psrlq mm5, 32 - - movd [rdi+rax*2+2], mm5 - - lea rsi, [rsi+rax*8] - dec rcx - jnz .next8_v - - add rsp, 64 - pop rsp - ; begin epilog - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - -SECTION_RODATA -align 16 -tfe: - times 8 db 0xfe -align 16 -t80: - times 8 db 0x80 -align 16 -t3: - times 8 db 0x03 -align 16 -t4: - times 8 db 0x04 -align 16 -ones: - times 4 dw 0x0001 diff --git a/libs/libvpx/vpx_dsp/x86/loopfilter_sse2.c b/libs/libvpx/vpx_dsp/x86/loopfilter_sse2.c index ed10127367..e13334ae0e 100644 --- a/libs/libvpx/vpx_dsp/x86/loopfilter_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/loopfilter_sse2.c @@ -18,11 +18,221 @@ static INLINE __m128i abs_diff(__m128i a, __m128i b) { return _mm_or_si128(_mm_subs_epu8(a, b), _mm_subs_epu8(b, a)); } -static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, - int p, - const unsigned char *_blimit, - const unsigned char *_limit, - const unsigned char *_thresh) { +// filter_mask and hev_mask +#define FILTER_HEV_MASK \ + do { \ + /* (abs(q1 - q0), abs(p1 - p0) */ \ + __m128i flat = abs_diff(q1p1, q0p0); \ + /* abs(p1 - q1), abs(p0 - q0) */ \ + const __m128i abs_p1q1p0q0 = abs_diff(p1p0, q1q0); \ + __m128i abs_p0q0, abs_p1q1, work; \ + \ + /* const uint8_t hev = hev_mask(thresh, *op1, *op0, *oq0, *oq1); */ \ + hev = \ + _mm_unpacklo_epi8(_mm_max_epu8(flat, _mm_srli_si128(flat, 8)), zero); \ + hev = _mm_cmpgt_epi16(hev, thresh); \ + hev = _mm_packs_epi16(hev, hev); \ + \ + /* const int8_t mask = filter_mask(*limit, *blimit, */ \ + /* p3, p2, p1, p0, q0, q1, q2, q3); */ \ + abs_p0q0 = \ + _mm_adds_epu8(abs_p1q1p0q0, abs_p1q1p0q0); /* abs(p0 - q0) * 2 */ \ + abs_p1q1 = \ + _mm_unpackhi_epi8(abs_p1q1p0q0, abs_p1q1p0q0); /* abs(p1 - q1) */ \ + abs_p1q1 = _mm_srli_epi16(abs_p1q1, 9); \ + abs_p1q1 = _mm_packs_epi16(abs_p1q1, abs_p1q1); /* abs(p1 - q1) / 2 */ \ + /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \ + mask = _mm_adds_epu8(abs_p0q0, abs_p1q1); \ + /* abs(p3 - p2), abs(p2 - p1) */ \ + work = abs_diff(p3p2, p2p1); \ + flat = _mm_max_epu8(work, flat); \ + /* abs(q3 - q2), abs(q2 - q1) */ \ + work = abs_diff(q3q2, q2q1); \ + flat = _mm_max_epu8(work, flat); \ + flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); \ + mask = _mm_unpacklo_epi64(mask, flat); \ + mask = _mm_subs_epu8(mask, limit); \ + mask = _mm_cmpeq_epi8(mask, zero); \ + mask = _mm_and_si128(mask, _mm_srli_si128(mask, 8)); \ + } while (0) + +#define FILTER4 \ + do { \ + const __m128i t3t4 = \ + _mm_set_epi8(3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4); \ + const __m128i t80 = _mm_set1_epi8(0x80); \ + __m128i filter, filter2filter1, work; \ + \ + ps1ps0 = _mm_xor_si128(p1p0, t80); /* ^ 0x80 */ \ + qs1qs0 = _mm_xor_si128(q1q0, t80); \ + \ + /* int8_t filter = signed_char_clamp(ps1 - qs1) & hev; */ \ + work = _mm_subs_epi8(ps1ps0, qs1qs0); \ + filter = _mm_and_si128(_mm_srli_si128(work, 8), hev); \ + /* filter = signed_char_clamp(filter + 3 * (qs0 - ps0)) & mask; */ \ + filter = _mm_subs_epi8(filter, work); \ + filter = _mm_subs_epi8(filter, work); \ + filter = _mm_subs_epi8(filter, work); /* + 3 * (qs0 - ps0) */ \ + filter = _mm_and_si128(filter, mask); /* & mask */ \ + filter = _mm_unpacklo_epi64(filter, filter); \ + \ + /* filter1 = signed_char_clamp(filter + 4) >> 3; */ \ + /* filter2 = signed_char_clamp(filter + 3) >> 3; */ \ + filter2filter1 = _mm_adds_epi8(filter, t3t4); /* signed_char_clamp */ \ + filter = _mm_unpackhi_epi8(filter2filter1, filter2filter1); \ + filter2filter1 = _mm_unpacklo_epi8(filter2filter1, filter2filter1); \ + filter2filter1 = _mm_srai_epi16(filter2filter1, 11); /* >> 3 */ \ + filter = _mm_srai_epi16(filter, 11); /* >> 3 */ \ + filter2filter1 = _mm_packs_epi16(filter2filter1, filter); \ + \ + /* filter = ROUND_POWER_OF_TWO(filter1, 1) & ~hev; */ \ + filter = _mm_subs_epi8(filter2filter1, ff); /* + 1 */ \ + filter = _mm_unpacklo_epi8(filter, filter); \ + filter = _mm_srai_epi16(filter, 9); /* round */ \ + filter = _mm_packs_epi16(filter, filter); \ + filter = _mm_andnot_si128(hev, filter); \ + \ + hev = _mm_unpackhi_epi64(filter2filter1, filter); \ + filter2filter1 = _mm_unpacklo_epi64(filter2filter1, filter); \ + \ + /* signed_char_clamp(qs1 - filter), signed_char_clamp(qs0 - filter1) */ \ + qs1qs0 = _mm_subs_epi8(qs1qs0, filter2filter1); \ + /* signed_char_clamp(ps1 + filter), signed_char_clamp(ps0 + filter2) */ \ + ps1ps0 = _mm_adds_epi8(ps1ps0, hev); \ + qs1qs0 = _mm_xor_si128(qs1qs0, t80); /* ^ 0x80 */ \ + ps1ps0 = _mm_xor_si128(ps1ps0, t80); /* ^ 0x80 */ \ + } while (0) + +void vpx_lpf_horizontal_4_sse2(uint8_t *s, int p /* pitch */, + const uint8_t *_blimit, const uint8_t *_limit, + const uint8_t *_thresh) { + const __m128i zero = _mm_set1_epi16(0); + const __m128i limit = + _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)_blimit), + _mm_loadl_epi64((const __m128i *)_limit)); + const __m128i thresh = + _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)_thresh), zero); + const __m128i ff = _mm_cmpeq_epi8(zero, zero); + __m128i q1p1, q0p0, p3p2, p2p1, p1p0, q3q2, q2q1, q1q0, ps1ps0, qs1qs0; + __m128i mask, hev; + + p3p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * p)), + _mm_loadl_epi64((__m128i *)(s - 4 * p))); + q1p1 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 2 * p)), + _mm_loadl_epi64((__m128i *)(s + 1 * p))); + q0p0 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 1 * p)), + _mm_loadl_epi64((__m128i *)(s + 0 * p))); + q3q2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s + 2 * p)), + _mm_loadl_epi64((__m128i *)(s + 3 * p))); + p1p0 = _mm_unpacklo_epi64(q0p0, q1p1); + p2p1 = _mm_unpacklo_epi64(q1p1, p3p2); + q1q0 = _mm_unpackhi_epi64(q0p0, q1p1); + q2q1 = _mm_unpacklo_epi64(_mm_srli_si128(q1p1, 8), q3q2); + + FILTER_HEV_MASK; + FILTER4; + + _mm_storeh_pi((__m64 *)(s - 2 * p), _mm_castsi128_ps(ps1ps0)); // *op1 + _mm_storel_epi64((__m128i *)(s - 1 * p), ps1ps0); // *op0 + _mm_storel_epi64((__m128i *)(s + 0 * p), qs1qs0); // *oq0 + _mm_storeh_pi((__m64 *)(s + 1 * p), _mm_castsi128_ps(qs1qs0)); // *oq1 +} + +void vpx_lpf_vertical_4_sse2(uint8_t *s, int p /* pitch */, + const uint8_t *_blimit, const uint8_t *_limit, + const uint8_t *_thresh) { + const __m128i zero = _mm_set1_epi16(0); + const __m128i limit = + _mm_unpacklo_epi64(_mm_loadl_epi64((const __m128i *)_blimit), + _mm_loadl_epi64((const __m128i *)_limit)); + const __m128i thresh = + _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)_thresh), zero); + const __m128i ff = _mm_cmpeq_epi8(zero, zero); + __m128i x0, x1, x2, x3; + __m128i q1p1, q0p0, p3p2, p2p1, p1p0, q3q2, q2q1, q1q0, ps1ps0, qs1qs0; + __m128i mask, hev; + + // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17 + q1q0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 0 * p - 4)), + _mm_loadl_epi64((__m128i *)(s + 1 * p - 4))); + + // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37 + x1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 2 * p - 4)), + _mm_loadl_epi64((__m128i *)(s + 3 * p - 4))); + + // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57 + x2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 4 * p - 4)), + _mm_loadl_epi64((__m128i *)(s + 5 * p - 4))); + + // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77 + x3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i *)(s + 6 * p - 4)), + _mm_loadl_epi64((__m128i *)(s + 7 * p - 4))); + + // Transpose 8x8 + // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33 + p1p0 = _mm_unpacklo_epi16(q1q0, x1); + // 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73 + x0 = _mm_unpacklo_epi16(x2, x3); + // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71 + p3p2 = _mm_unpacklo_epi32(p1p0, x0); + // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73 + p1p0 = _mm_unpackhi_epi32(p1p0, x0); + p3p2 = _mm_unpackhi_epi64(p3p2, _mm_slli_si128(p3p2, 8)); // swap lo and high + p1p0 = _mm_unpackhi_epi64(p1p0, _mm_slli_si128(p1p0, 8)); // swap lo and high + + // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37 + q1q0 = _mm_unpackhi_epi16(q1q0, x1); + // 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77 + x2 = _mm_unpackhi_epi16(x2, x3); + // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77 + q3q2 = _mm_unpackhi_epi32(q1q0, x2); + // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75 + q1q0 = _mm_unpacklo_epi32(q1q0, x2); + + q0p0 = _mm_unpacklo_epi64(p1p0, q1q0); + q1p1 = _mm_unpackhi_epi64(p1p0, q1q0); + p1p0 = _mm_unpacklo_epi64(q0p0, q1p1); + p2p1 = _mm_unpacklo_epi64(q1p1, p3p2); + q2q1 = _mm_unpacklo_epi64(_mm_srli_si128(q1p1, 8), q3q2); + + FILTER_HEV_MASK; + FILTER4; + + // Transpose 8x4 to 4x8 + // qs1qs0: 20 21 22 23 24 25 26 27 30 31 32 33 34 34 36 37 + // ps1ps0: 10 11 12 13 14 15 16 17 00 01 02 03 04 05 06 07 + // 00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17 + ps1ps0 = _mm_unpackhi_epi64(ps1ps0, _mm_slli_si128(ps1ps0, 8)); + // 10 30 11 31 12 32 13 33 14 34 15 35 16 36 17 37 + x0 = _mm_unpackhi_epi8(ps1ps0, qs1qs0); + // 00 20 01 21 02 22 03 23 04 24 05 25 06 26 07 27 + ps1ps0 = _mm_unpacklo_epi8(ps1ps0, qs1qs0); + // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37 + qs1qs0 = _mm_unpackhi_epi8(ps1ps0, x0); + // 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33 + ps1ps0 = _mm_unpacklo_epi8(ps1ps0, x0); + + *(int *)(s + 0 * p - 2) = _mm_cvtsi128_si32(ps1ps0); + ps1ps0 = _mm_srli_si128(ps1ps0, 4); + *(int *)(s + 1 * p - 2) = _mm_cvtsi128_si32(ps1ps0); + ps1ps0 = _mm_srli_si128(ps1ps0, 4); + *(int *)(s + 2 * p - 2) = _mm_cvtsi128_si32(ps1ps0); + ps1ps0 = _mm_srli_si128(ps1ps0, 4); + *(int *)(s + 3 * p - 2) = _mm_cvtsi128_si32(ps1ps0); + + *(int *)(s + 4 * p - 2) = _mm_cvtsi128_si32(qs1qs0); + qs1qs0 = _mm_srli_si128(qs1qs0, 4); + *(int *)(s + 5 * p - 2) = _mm_cvtsi128_si32(qs1qs0); + qs1qs0 = _mm_srli_si128(qs1qs0, 4); + *(int *)(s + 6 * p - 2) = _mm_cvtsi128_si32(qs1qs0); + qs1qs0 = _mm_srli_si128(qs1qs0, 4); + *(int *)(s + 7 * p - 2) = _mm_cvtsi128_si32(qs1qs0); +} + +void vpx_lpf_horizontal_edge_8_sse2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh) { const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi8(1); const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); @@ -33,27 +243,27 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, __m128i abs_p1p0; q4p4 = _mm_loadl_epi64((__m128i *)(s - 5 * p)); - q4p4 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q4p4), - (__m64 *)(s + 4 * p))); + q4p4 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q4p4), (__m64 *)(s + 4 * p))); q3p3 = _mm_loadl_epi64((__m128i *)(s - 4 * p)); - q3p3 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q3p3), - (__m64 *)(s + 3 * p))); + q3p3 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q3p3), (__m64 *)(s + 3 * p))); q2p2 = _mm_loadl_epi64((__m128i *)(s - 3 * p)); - q2p2 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q2p2), - (__m64 *)(s + 2 * p))); + q2p2 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q2p2), (__m64 *)(s + 2 * p))); q1p1 = _mm_loadl_epi64((__m128i *)(s - 2 * p)); - q1p1 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q1p1), - (__m64 *)(s + 1 * p))); + q1p1 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q1p1), (__m64 *)(s + 1 * p))); p1q1 = _mm_shuffle_epi32(q1p1, 78); q0p0 = _mm_loadl_epi64((__m128i *)(s - 1 * p)); - q0p0 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q0p0), - (__m64 *)(s - 0 * p))); + q0p0 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q0p0), (__m64 *)(s - 0 * p))); p0q0 = _mm_shuffle_epi32(q0p0, 78); { __m128i abs_p1q1, abs_p0q0, abs_q1q0, fe, ff, work; abs_p1p0 = abs_diff(q1p1, q0p0); - abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); + abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); fe = _mm_set1_epi8(0xfe); ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); abs_p0q0 = abs_diff(q0p0, p0q0); @@ -62,7 +272,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); - abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); @@ -71,8 +281,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epu8(abs_diff(q2p2, q1p1), - abs_diff(q3p3, q2p2)); + work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2)); mask = _mm_max_epu8(work, mask); mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8)); mask = _mm_subs_epu8(mask, limit); @@ -134,17 +343,17 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, flat = _mm_and_si128(flat, mask); q5p5 = _mm_loadl_epi64((__m128i *)(s - 6 * p)); - q5p5 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q5p5), - (__m64 *)(s + 5 * p))); + q5p5 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q5p5), (__m64 *)(s + 5 * p))); q6p6 = _mm_loadl_epi64((__m128i *)(s - 7 * p)); - q6p6 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q6p6), - (__m64 *)(s + 6 * p))); + q6p6 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q6p6), (__m64 *)(s + 6 * p))); flat2 = _mm_max_epu8(abs_diff(q4p4, q0p0), abs_diff(q5p5, q0p0)); q7p7 = _mm_loadl_epi64((__m128i *)(s - 8 * p)); - q7p7 = _mm_castps_si128(_mm_loadh_pi(_mm_castsi128_ps(q7p7), - (__m64 *)(s + 7 * p))); + q7p7 = _mm_castps_si128( + _mm_loadh_pi(_mm_castsi128_ps(q7p7), (__m64 *)(s + 7 * p))); work = _mm_max_epu8(abs_diff(q6p6, q0p0), abs_diff(q7p7, q0p0)); flat2 = _mm_max_epu8(work, flat2); flat2 = _mm_max_epu8(flat2, _mm_srli_si128(flat2, 8)); @@ -164,7 +373,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, __m128i pixetFilter_p2p1p0, pixetFilter_q2q1q0; __m128i sum_p7, sum_q7, sum_p3, sum_q3, res_p, res_q; - p7_16 = _mm_unpacklo_epi8(q7p7, zero);; + p7_16 = _mm_unpacklo_epi8(q7p7, zero); p6_16 = _mm_unpacklo_epi8(q6p6, zero); p5_16 = _mm_unpacklo_epi8(q5p5, zero); p4_16 = _mm_unpacklo_epi8(q4p4, zero); @@ -187,24 +396,23 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, _mm_add_epi16(q4_16, q3_16)); pixetFilter_p2p1p0 = _mm_add_epi16(p0_16, _mm_add_epi16(p2_16, p1_16)); - pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); + pixelFilter_p = _mm_add_epi16(pixelFilter_p, pixetFilter_p2p1p0); pixetFilter_q2q1q0 = _mm_add_epi16(q0_16, _mm_add_epi16(q2_16, q1_16)); - pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); - pixelFilter_p = _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, - pixelFilter_q)); - pixetFilter_p2p1p0 = _mm_add_epi16(four, - _mm_add_epi16(pixetFilter_p2p1p0, - pixetFilter_q2q1q0)); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(p7_16, p0_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(q7_16, q0_16)), 4); + pixelFilter_q = _mm_add_epi16(pixelFilter_q, pixetFilter_q2q1q0); + pixelFilter_p = + _mm_add_epi16(eight, _mm_add_epi16(pixelFilter_p, pixelFilter_q)); + pixetFilter_p2p1p0 = _mm_add_epi16( + four, _mm_add_epi16(pixetFilter_p2p1p0, pixetFilter_q2q1q0)); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(p7_16, p0_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(q7_16, q0_16)), 4); flat2_q0p0 = _mm_packus_epi16(res_p, res_q); - res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(p3_16, p0_16)), 3); - res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(q3_16, q0_16)), 3); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(p3_16, p0_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(q3_16, q0_16)), 3); flat_q0p0 = _mm_packus_epi16(res_p, res_q); @@ -215,18 +423,18 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, pixelFilter_q = _mm_sub_epi16(pixelFilter_p, p6_16); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q6_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p1_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q1_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p1_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q1_16)), 4); flat2_q1p1 = _mm_packus_epi16(res_p, res_q); pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_p2p1p0, p2_16); pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q2_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p1_16)), 3); - res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q1_16)), 3); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p1_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q1_16)), 3); flat_q1p1 = _mm_packus_epi16(res_p, res_q); sum_p7 = _mm_add_epi16(sum_p7, p7_16); @@ -236,59 +444,59 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q5_16); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p5_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p2_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q2_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p2_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q2_16)), 4); flat2_q2p2 = _mm_packus_epi16(res_p, res_q); pixetFilter_p2p1p0 = _mm_sub_epi16(pixetFilter_p2p1p0, q1_16); pixetFilter_q2q1q0 = _mm_sub_epi16(pixetFilter_q2q1q0, p1_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixetFilter_p2p1p0, - _mm_add_epi16(sum_p3, p2_16)), 3); - res_q = _mm_srli_epi16(_mm_add_epi16(pixetFilter_q2q1q0, - _mm_add_epi16(sum_q3, q2_16)), 3); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_p2p1p0, _mm_add_epi16(sum_p3, p2_16)), 3); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixetFilter_q2q1q0, _mm_add_epi16(sum_q3, q2_16)), 3); flat_q2p2 = _mm_packus_epi16(res_p, res_q); sum_p7 = _mm_add_epi16(sum_p7, p7_16); sum_q7 = _mm_add_epi16(sum_q7, q7_16); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q4_16); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p4_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p3_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q3_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p3_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q3_16)), 4); flat2_q3p3 = _mm_packus_epi16(res_p, res_q); sum_p7 = _mm_add_epi16(sum_p7, p7_16); sum_q7 = _mm_add_epi16(sum_q7, q7_16); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q3_16); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p3_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p4_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q4_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p4_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q4_16)), 4); flat2_q4p4 = _mm_packus_epi16(res_p, res_q); sum_p7 = _mm_add_epi16(sum_p7, p7_16); sum_q7 = _mm_add_epi16(sum_q7, q7_16); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q2_16); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p2_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p5_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q5_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p5_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q5_16)), 4); flat2_q5p5 = _mm_packus_epi16(res_p, res_q); sum_p7 = _mm_add_epi16(sum_p7, p7_16); sum_q7 = _mm_add_epi16(sum_q7, q7_16); pixelFilter_p = _mm_sub_epi16(pixelFilter_p, q1_16); pixelFilter_q = _mm_sub_epi16(pixelFilter_q, p1_16); - res_p = _mm_srli_epi16(_mm_add_epi16(pixelFilter_p, - _mm_add_epi16(sum_p7, p6_16)), 4); - res_q = _mm_srli_epi16(_mm_add_epi16(pixelFilter_q, - _mm_add_epi16(sum_q7, q6_16)), 4); + res_p = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_p, _mm_add_epi16(sum_p7, p6_16)), 4); + res_q = _mm_srli_epi16( + _mm_add_epi16(pixelFilter_q, _mm_add_epi16(sum_q7, q6_16)), 4); flat2_q6p6 = _mm_packus_epi16(res_p, res_q); } // wide flat @@ -349,7 +557,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, flat2_q0p0 = _mm_and_si128(flat2, flat2_q0p0); q0p0 = _mm_or_si128(q0p0, flat2_q0p0); _mm_storel_epi64((__m128i *)(s - 1 * p), q0p0); - _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0)); + _mm_storeh_pi((__m64 *)(s - 0 * p), _mm_castsi128_ps(q0p0)); } } @@ -367,8 +575,8 @@ static INLINE __m128i filter8_mask(const __m128i *const flat, const __m128i *const other_filt, const __m128i *const f8_lo, const __m128i *const f8_hi) { - const __m128i f8 = _mm_packus_epi16(_mm_srli_epi16(*f8_lo, 3), - _mm_srli_epi16(*f8_hi, 3)); + const __m128i f8 = + _mm_packus_epi16(_mm_srli_epi16(*f8_lo, 3), _mm_srli_epi16(*f8_hi, 3)); const __m128i result = _mm_and_si128(*flat, f8); return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result); } @@ -377,17 +585,16 @@ static INLINE __m128i filter16_mask(const __m128i *const flat, const __m128i *const other_filt, const __m128i *const f_lo, const __m128i *const f_hi) { - const __m128i f = _mm_packus_epi16(_mm_srli_epi16(*f_lo, 4), - _mm_srli_epi16(*f_hi, 4)); + const __m128i f = + _mm_packus_epi16(_mm_srli_epi16(*f_lo, 4), _mm_srli_epi16(*f_hi, 4)); const __m128i result = _mm_and_si128(*flat, f); return _mm_or_si128(_mm_andnot_si128(*flat, *other_filt), result); } -static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, - int p, - const unsigned char *_blimit, - const unsigned char *_limit, - const unsigned char *_thresh) { +void vpx_lpf_horizontal_edge_16_sse2(unsigned char *s, int p, + const unsigned char *_blimit, + const unsigned char *_limit, + const unsigned char *_thresh) { const __m128i zero = _mm_set1_epi16(0); const __m128i one = _mm_set1_epi8(1); const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); @@ -429,7 +636,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, __m128i work; max_abs_p1p0q1q0 = _mm_max_epu8(abs_p1p0, abs_q1q0); - abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); @@ -628,16 +835,16 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, __m128i f_hi; f_lo = _mm_sub_epi16(_mm_slli_epi16(p7_lo, 3), p7_lo); // p7 * 7 - f_lo = _mm_add_epi16(_mm_slli_epi16(p6_lo, 1), - _mm_add_epi16(p4_lo, f_lo)); + f_lo = + _mm_add_epi16(_mm_slli_epi16(p6_lo, 1), _mm_add_epi16(p4_lo, f_lo)); f_lo = _mm_add_epi16(_mm_add_epi16(p3_lo, f_lo), _mm_add_epi16(p2_lo, p1_lo)); f_lo = _mm_add_epi16(_mm_add_epi16(p0_lo, q0_lo), f_lo); f_lo = _mm_add_epi16(_mm_add_epi16(p5_lo, eight), f_lo); f_hi = _mm_sub_epi16(_mm_slli_epi16(p7_hi, 3), p7_hi); // p7 * 7 - f_hi = _mm_add_epi16(_mm_slli_epi16(p6_hi, 1), - _mm_add_epi16(p4_hi, f_hi)); + f_hi = + _mm_add_epi16(_mm_slli_epi16(p6_hi, 1), _mm_add_epi16(p4_hi, f_hi)); f_hi = _mm_add_epi16(_mm_add_epi16(p3_hi, f_hi), _mm_add_epi16(p2_hi, p1_hi)); f_hi = _mm_add_epi16(_mm_add_epi16(p0_hi, q0_hi), f_hi); @@ -716,21 +923,10 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, } } -// TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. -void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p, - const unsigned char *_blimit, - const unsigned char *_limit, - const unsigned char *_thresh, int count) { - if (count == 1) - mb_lpf_horizontal_edge_w_sse2_8(s, p, _blimit, _limit, _thresh); - else - mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh); -} - void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, - const unsigned char *_thresh, int count) { + const unsigned char *_thresh) { DECLARE_ALIGNED(16, unsigned char, flat_op2[16]); DECLARE_ALIGNED(16, unsigned char, flat_op1[16]); DECLARE_ALIGNED(16, unsigned char, flat_op0[16]); @@ -745,8 +941,6 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, __m128i p3, p2, p1, p0, q0, q1, q2, q3; __m128i q3p3, q2p2, q1p1, q0p0, p1q1, p0q0; - (void)count; - q3p3 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 4 * p)), _mm_loadl_epi64((__m128i *)(s + 3 * p))); q2p2 = _mm_unpacklo_epi64(_mm_loadl_epi64((__m128i *)(s - 3 * p)), @@ -765,7 +959,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, const __m128i ff = _mm_cmpeq_epi8(fe, fe); __m128i abs_p1q1, abs_p0q0, abs_q1q0, abs_p1p0, work; abs_p1p0 = abs_diff(q1p1, q0p0); - abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); + abs_q1q0 = _mm_srli_si128(abs_p1p0, 8); abs_p0q0 = abs_diff(q0p0, p0q0); abs_p1q1 = abs_diff(q1p1, p1q1); @@ -773,7 +967,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); - abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); @@ -782,8 +976,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epu8(abs_diff(q2p2, q1p1), - abs_diff(q3p3, q2p2)); + work = _mm_max_epu8(abs_diff(q2p2, q1p1), abs_diff(q3p3, q2p2)); mask = _mm_max_epu8(work, mask); mask = _mm_max_epu8(mask, _mm_srli_si128(mask, 8)); mask = _mm_subs_epu8(mask, limit); @@ -791,8 +984,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, // flat_mask4 - flat = _mm_max_epu8(abs_diff(q2p2, q0p0), - abs_diff(q3p3, q0p0)); + flat = _mm_max_epu8(abs_diff(q2p2, q0p0), abs_diff(q3p3, q0p0)); flat = _mm_max_epu8(abs_p1p0, flat); flat = _mm_max_epu8(flat, _mm_srli_si128(flat, 8)); flat = _mm_subs_epu8(flat, one); @@ -857,14 +1049,14 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, const __m128i t3 = _mm_set1_epi8(3); const __m128i t80 = _mm_set1_epi8(0x80); const __m128i t1 = _mm_set1_epi8(0x1); - const __m128i ps1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), - t80); - const __m128i ps0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), - t80); - const __m128i qs0 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), - t80); - const __m128i qs1 = _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), - t80); + const __m128i ps1 = + _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 2 * p)), t80); + const __m128i ps0 = + _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s - 1 * p)), t80); + const __m128i qs0 = + _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 0 * p)), t80); + const __m128i qs1 = + _mm_xor_si128(_mm_loadl_epi64((__m128i *)(s + 1 * p)), t80); __m128i filt; __m128i work_a; __m128i filter1, filter2; @@ -943,8 +1135,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, } } -void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, - const uint8_t *_blimit0, +void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0, const uint8_t *_blimit1, @@ -979,17 +1170,17 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, q2 = _mm_loadu_si128((__m128i *)(s + 2 * p)); q3 = _mm_loadu_si128((__m128i *)(s + 3 * p)); { - const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), - _mm_subs_epu8(p0, p1)); - const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0), - _mm_subs_epu8(q0, q1)); + const __m128i abs_p1p0 = + _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1)); + const __m128i abs_q1q0 = + _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1)); const __m128i one = _mm_set1_epi8(1); const __m128i fe = _mm_set1_epi8(0xfe); const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); - __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), - _mm_subs_epu8(q0, p0)); - __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), - _mm_subs_epu8(q1, p1)); + __m128i abs_p0q0 = + _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0)); + __m128i abs_p1q1 = + _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1)); __m128i work; // filter_mask and hev_mask @@ -997,7 +1188,7 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); - abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); @@ -1005,29 +1196,25 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, mask = _mm_max_epu8(flat, mask); // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1), - _mm_subs_epu8(p1, p2)), - _mm_or_si128(_mm_subs_epu8(p3, p2), - _mm_subs_epu8(p2, p3))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)), + _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3))); mask = _mm_max_epu8(work, mask); - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1), - _mm_subs_epu8(q1, q2)), - _mm_or_si128(_mm_subs_epu8(q3, q2), - _mm_subs_epu8(q2, q3))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)), + _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3))); mask = _mm_max_epu8(work, mask); mask = _mm_subs_epu8(mask, limit); mask = _mm_cmpeq_epi8(mask, zero); // flat_mask4 - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0), - _mm_subs_epu8(p0, p2)), - _mm_or_si128(_mm_subs_epu8(q2, q0), - _mm_subs_epu8(q0, q2))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), + _mm_or_si128(_mm_subs_epu8(q2, q0), _mm_subs_epu8(q0, q2))); flat = _mm_max_epu8(work, flat); - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p3, p0), - _mm_subs_epu8(p0, p3)), - _mm_or_si128(_mm_subs_epu8(q3, q0), - _mm_subs_epu8(q0, q3))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)), + _mm_or_si128(_mm_subs_epu8(q3, q0), _mm_subs_epu8(q0, q3))); flat = _mm_max_epu8(work, flat); flat = _mm_subs_epu8(flat, one); flat = _mm_cmpeq_epi8(flat, zero); @@ -1098,14 +1285,14 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const __m128i t1 = _mm_set1_epi8(0x1); const __m128i t7f = _mm_set1_epi8(0x7f); - const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), - t80); - const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), - t80); - const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), - t80); - const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), - t80); + const __m128i ps1 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80); + const __m128i ps0 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80); + const __m128i qs0 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80); + const __m128i qs1 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80); __m128i filt; __m128i work_a; __m128i filter1, filter2; @@ -1221,23 +1408,23 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, // filter_mask and hev_mask { - const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), - _mm_subs_epu8(p0, p1)); - const __m128i abs_q1q0 = _mm_or_si128(_mm_subs_epu8(q1, q0), - _mm_subs_epu8(q0, q1)); + const __m128i abs_p1p0 = + _mm_or_si128(_mm_subs_epu8(p1, p0), _mm_subs_epu8(p0, p1)); + const __m128i abs_q1q0 = + _mm_or_si128(_mm_subs_epu8(q1, q0), _mm_subs_epu8(q0, q1)); const __m128i fe = _mm_set1_epi8(0xfe); const __m128i ff = _mm_cmpeq_epi8(abs_p1p0, abs_p1p0); - __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), - _mm_subs_epu8(q0, p0)); - __m128i abs_p1q1 = _mm_or_si128(_mm_subs_epu8(p1, q1), - _mm_subs_epu8(q1, p1)); + __m128i abs_p0q0 = + _mm_or_si128(_mm_subs_epu8(p0, q0), _mm_subs_epu8(q0, p0)); + __m128i abs_p1q1 = + _mm_or_si128(_mm_subs_epu8(p1, q1), _mm_subs_epu8(q1, p1)); __m128i work; flat = _mm_max_epu8(abs_p1p0, abs_q1q0); hev = _mm_subs_epu8(flat, thresh); hev = _mm_xor_si128(_mm_cmpeq_epi8(hev, zero), ff); - abs_p0q0 =_mm_adds_epu8(abs_p0q0, abs_p0q0); + abs_p0q0 = _mm_adds_epu8(abs_p0q0, abs_p0q0); abs_p1q1 = _mm_srli_epi16(_mm_and_si128(abs_p1q1, fe), 1); mask = _mm_subs_epu8(_mm_adds_epu8(abs_p0q0, abs_p1q1), blimit); mask = _mm_xor_si128(_mm_cmpeq_epi8(mask, zero), ff); @@ -1245,15 +1432,13 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, mask = _mm_max_epu8(flat, mask); // mask |= (abs(p1 - p0) > limit) * -1; // mask |= (abs(q1 - q0) > limit) * -1; - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p1), - _mm_subs_epu8(p1, p2)), - _mm_or_si128(_mm_subs_epu8(p3, p2), - _mm_subs_epu8(p2, p3))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(p2, p1), _mm_subs_epu8(p1, p2)), + _mm_or_si128(_mm_subs_epu8(p3, p2), _mm_subs_epu8(p2, p3))); mask = _mm_max_epu8(work, mask); - work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(q2, q1), - _mm_subs_epu8(q1, q2)), - _mm_or_si128(_mm_subs_epu8(q3, q2), - _mm_subs_epu8(q2, q3))); + work = _mm_max_epu8( + _mm_or_si128(_mm_subs_epu8(q2, q1), _mm_subs_epu8(q1, q2)), + _mm_or_si128(_mm_subs_epu8(q3, q2), _mm_subs_epu8(q2, q3))); mask = _mm_max_epu8(work, mask); mask = _mm_subs_epu8(mask, limit); mask = _mm_cmpeq_epi8(mask, zero); @@ -1269,14 +1454,14 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, const __m128i t1 = _mm_set1_epi8(0x1); const __m128i t7f = _mm_set1_epi8(0x7f); - const __m128i ps1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), - t80); - const __m128i ps0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), - t80); - const __m128i qs0 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), - t80); - const __m128i qs1 = _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), - t80); + const __m128i ps1 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 2 * p)), t80); + const __m128i ps0 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s - 1 * p)), t80); + const __m128i qs0 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 0 * p)), t80); + const __m128i qs1 = + _mm_xor_si128(_mm_loadu_si128((__m128i *)(s + 1 * p)), t80); __m128i filt; __m128i work_a; __m128i filter1, filter2; @@ -1334,44 +1519,44 @@ static INLINE void transpose8x16(unsigned char *in0, unsigned char *in1, __m128i x8, x9, x10, x11, x12, x13, x14, x15; // 2-way interleave w/hoisting of unpacks - x0 = _mm_loadl_epi64((__m128i *)in0); // 1 + x0 = _mm_loadl_epi64((__m128i *)in0); // 1 x1 = _mm_loadl_epi64((__m128i *)(in0 + in_p)); // 3 - x0 = _mm_unpacklo_epi8(x0, x1); // 1 + x0 = _mm_unpacklo_epi8(x0, x1); // 1 x2 = _mm_loadl_epi64((__m128i *)(in0 + 2 * in_p)); // 5 - x3 = _mm_loadl_epi64((__m128i *)(in0 + 3*in_p)); // 7 - x1 = _mm_unpacklo_epi8(x2, x3); // 2 + x3 = _mm_loadl_epi64((__m128i *)(in0 + 3 * in_p)); // 7 + x1 = _mm_unpacklo_epi8(x2, x3); // 2 - x4 = _mm_loadl_epi64((__m128i *)(in0 + 4*in_p)); // 9 - x5 = _mm_loadl_epi64((__m128i *)(in0 + 5*in_p)); // 11 - x2 = _mm_unpacklo_epi8(x4, x5); // 3 + x4 = _mm_loadl_epi64((__m128i *)(in0 + 4 * in_p)); // 9 + x5 = _mm_loadl_epi64((__m128i *)(in0 + 5 * in_p)); // 11 + x2 = _mm_unpacklo_epi8(x4, x5); // 3 - x6 = _mm_loadl_epi64((__m128i *)(in0 + 6*in_p)); // 13 - x7 = _mm_loadl_epi64((__m128i *)(in0 + 7*in_p)); // 15 - x3 = _mm_unpacklo_epi8(x6, x7); // 4 - x4 = _mm_unpacklo_epi16(x0, x1); // 9 + x6 = _mm_loadl_epi64((__m128i *)(in0 + 6 * in_p)); // 13 + x7 = _mm_loadl_epi64((__m128i *)(in0 + 7 * in_p)); // 15 + x3 = _mm_unpacklo_epi8(x6, x7); // 4 + x4 = _mm_unpacklo_epi16(x0, x1); // 9 - x8 = _mm_loadl_epi64((__m128i *)in1); // 2 + x8 = _mm_loadl_epi64((__m128i *)in1); // 2 x9 = _mm_loadl_epi64((__m128i *)(in1 + in_p)); // 4 - x8 = _mm_unpacklo_epi8(x8, x9); // 5 - x5 = _mm_unpacklo_epi16(x2, x3); // 10 + x8 = _mm_unpacklo_epi8(x8, x9); // 5 + x5 = _mm_unpacklo_epi16(x2, x3); // 10 x10 = _mm_loadl_epi64((__m128i *)(in1 + 2 * in_p)); // 6 - x11 = _mm_loadl_epi64((__m128i *)(in1 + 3*in_p)); // 8 - x9 = _mm_unpacklo_epi8(x10, x11); // 6 + x11 = _mm_loadl_epi64((__m128i *)(in1 + 3 * in_p)); // 8 + x9 = _mm_unpacklo_epi8(x10, x11); // 6 - x12 = _mm_loadl_epi64((__m128i *)(in1 + 4*in_p)); // 10 - x13 = _mm_loadl_epi64((__m128i *)(in1 + 5*in_p)); // 12 - x10 = _mm_unpacklo_epi8(x12, x13); // 7 - x12 = _mm_unpacklo_epi16(x8, x9); // 11 + x12 = _mm_loadl_epi64((__m128i *)(in1 + 4 * in_p)); // 10 + x13 = _mm_loadl_epi64((__m128i *)(in1 + 5 * in_p)); // 12 + x10 = _mm_unpacklo_epi8(x12, x13); // 7 + x12 = _mm_unpacklo_epi16(x8, x9); // 11 - x14 = _mm_loadl_epi64((__m128i *)(in1 + 6*in_p)); // 14 - x15 = _mm_loadl_epi64((__m128i *)(in1 + 7*in_p)); // 16 - x11 = _mm_unpacklo_epi8(x14, x15); // 8 - x13 = _mm_unpacklo_epi16(x10, x11); // 12 + x14 = _mm_loadl_epi64((__m128i *)(in1 + 6 * in_p)); // 14 + x15 = _mm_loadl_epi64((__m128i *)(in1 + 7 * in_p)); // 16 + x11 = _mm_unpacklo_epi8(x14, x15); // 8 + x13 = _mm_unpacklo_epi16(x10, x11); // 12 - x6 = _mm_unpacklo_epi32(x4, x5); // 13 - x7 = _mm_unpackhi_epi32(x4, x5); // 14 + x6 = _mm_unpacklo_epi32(x4, x5); // 13 + x7 = _mm_unpackhi_epi32(x4, x5); // 14 x14 = _mm_unpacklo_epi32(x12, x13); // 15 x15 = _mm_unpackhi_epi32(x12, x13); // 16 @@ -1407,23 +1592,31 @@ static INLINE void transpose(unsigned char *src[], int in_p, unsigned char *in = src[idx8x8]; unsigned char *out = dst[idx8x8]; - x0 = _mm_loadl_epi64((__m128i *)(in + 0*in_p)); // 00 01 02 03 04 05 06 07 - x1 = _mm_loadl_epi64((__m128i *)(in + 1*in_p)); // 10 11 12 13 14 15 16 17 + x0 = + _mm_loadl_epi64((__m128i *)(in + 0 * in_p)); // 00 01 02 03 04 05 06 07 + x1 = + _mm_loadl_epi64((__m128i *)(in + 1 * in_p)); // 10 11 12 13 14 15 16 17 // 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17 x0 = _mm_unpacklo_epi8(x0, x1); - x2 = _mm_loadl_epi64((__m128i *)(in + 2*in_p)); // 20 21 22 23 24 25 26 27 - x3 = _mm_loadl_epi64((__m128i *)(in + 3*in_p)); // 30 31 32 33 34 35 36 37 + x2 = + _mm_loadl_epi64((__m128i *)(in + 2 * in_p)); // 20 21 22 23 24 25 26 27 + x3 = + _mm_loadl_epi64((__m128i *)(in + 3 * in_p)); // 30 31 32 33 34 35 36 37 // 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37 x1 = _mm_unpacklo_epi8(x2, x3); - x4 = _mm_loadl_epi64((__m128i *)(in + 4*in_p)); // 40 41 42 43 44 45 46 47 - x5 = _mm_loadl_epi64((__m128i *)(in + 5*in_p)); // 50 51 52 53 54 55 56 57 + x4 = + _mm_loadl_epi64((__m128i *)(in + 4 * in_p)); // 40 41 42 43 44 45 46 47 + x5 = + _mm_loadl_epi64((__m128i *)(in + 5 * in_p)); // 50 51 52 53 54 55 56 57 // 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57 x2 = _mm_unpacklo_epi8(x4, x5); - x6 = _mm_loadl_epi64((__m128i *)(in + 6*in_p)); // 60 61 62 63 64 65 66 67 - x7 = _mm_loadl_epi64((__m128i *)(in + 7*in_p)); // 70 71 72 73 74 75 76 77 + x6 = + _mm_loadl_epi64((__m128i *)(in + 6 * in_p)); // 60 61 62 63 64 65 66 67 + x7 = + _mm_loadl_epi64((__m128i *)(in + 7 * in_p)); // 70 71 72 73 74 75 76 77 // 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77 x3 = _mm_unpacklo_epi8(x6, x7); @@ -1433,15 +1626,15 @@ static INLINE void transpose(unsigned char *src[], int in_p, x5 = _mm_unpacklo_epi16(x2, x3); // 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71 x6 = _mm_unpacklo_epi32(x4, x5); - _mm_storel_pd((double *)(out + 0*out_p), + _mm_storel_pd((double *)(out + 0 * out_p), _mm_castsi128_pd(x6)); // 00 10 20 30 40 50 60 70 - _mm_storeh_pd((double *)(out + 1*out_p), + _mm_storeh_pd((double *)(out + 1 * out_p), _mm_castsi128_pd(x6)); // 01 11 21 31 41 51 61 71 // 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73 x7 = _mm_unpackhi_epi32(x4, x5); - _mm_storel_pd((double *)(out + 2*out_p), + _mm_storel_pd((double *)(out + 2 * out_p), _mm_castsi128_pd(x7)); // 02 12 22 32 42 52 62 72 - _mm_storeh_pd((double *)(out + 3*out_p), + _mm_storeh_pd((double *)(out + 3 * out_p), _mm_castsi128_pd(x7)); // 03 13 23 33 43 53 63 73 // 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37 @@ -1450,25 +1643,23 @@ static INLINE void transpose(unsigned char *src[], int in_p, x5 = _mm_unpackhi_epi16(x2, x3); // 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75 x6 = _mm_unpacklo_epi32(x4, x5); - _mm_storel_pd((double *)(out + 4*out_p), + _mm_storel_pd((double *)(out + 4 * out_p), _mm_castsi128_pd(x6)); // 04 14 24 34 44 54 64 74 - _mm_storeh_pd((double *)(out + 5*out_p), + _mm_storeh_pd((double *)(out + 5 * out_p), _mm_castsi128_pd(x6)); // 05 15 25 35 45 55 65 75 // 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77 x7 = _mm_unpackhi_epi32(x4, x5); - _mm_storel_pd((double *)(out + 6*out_p), + _mm_storel_pd((double *)(out + 6 * out_p), _mm_castsi128_pd(x7)); // 06 16 26 36 46 56 66 76 - _mm_storeh_pd((double *)(out + 7*out_p), + _mm_storeh_pd((double *)(out + 7 * out_p), _mm_castsi128_pd(x7)); // 07 17 27 37 47 57 67 77 } while (++idx8x8 < num_8x8_to_transpose); } void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]); unsigned char *src[2]; @@ -1492,11 +1683,10 @@ void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, const unsigned char *blimit, const unsigned char *limit, - const unsigned char *thresh, int count) { + const unsigned char *thresh) { DECLARE_ALIGNED(8, unsigned char, t_dst[8 * 8]); unsigned char *src[1]; unsigned char *dst[1]; - (void)count; // Transpose 8x8 src[0] = s - 4; @@ -1505,7 +1695,7 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, transpose(src, p, dst, 8, 1); // Loop filtering - vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1); + vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh); src[0] = t_dst; dst[0] = s - 4; @@ -1515,10 +1705,8 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, } void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, - const uint8_t *limit0, - const uint8_t *thresh0, - const uint8_t *blimit1, - const uint8_t *limit1, + const uint8_t *limit0, const uint8_t *thresh0, + const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { DECLARE_ALIGNED(16, unsigned char, t_dst[16 * 8]); unsigned char *src[2]; @@ -1557,7 +1745,7 @@ void vpx_lpf_vertical_16_sse2(unsigned char *s, int p, transpose(src, p, dst, 8, 2); // Loop filtering - mb_lpf_horizontal_edge_w_sse2_8(t_dst + 8 * 8, 8, blimit, limit, thresh); + vpx_lpf_horizontal_edge_8_sse2(t_dst + 8 * 8, 8, blimit, limit, thresh); src[0] = t_dst; src[1] = t_dst + 8 * 8; @@ -1578,8 +1766,7 @@ void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p, transpose8x16(s, s + 8 * p, p, t_dst + 8 * 16, 16); // Loop filtering - mb_lpf_horizontal_edge_w_sse2_16(t_dst + 8 * 16, 16, blimit, limit, - thresh); + vpx_lpf_horizontal_edge_16_sse2(t_dst + 8 * 16, 16, blimit, limit, thresh); // Transpose back transpose8x16(t_dst, t_dst + 8 * 16, 16, s - 8, p); diff --git a/libs/libvpx/vpx_dsp/x86/quantize_sse2.c b/libs/libvpx/vpx_dsp/x86/quantize_sse2.c index 8aa4568d67..2c7e431c74 100644 --- a/libs/libvpx/vpx_dsp/x86/quantize_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/quantize_sse2.c @@ -17,8 +17,9 @@ static INLINE __m128i load_coefficients(const tran_low_t *coeff_ptr) { #if CONFIG_VP9_HIGHBITDEPTH return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1], - (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3], (int16_t)coeff_ptr[4], - (int16_t)coeff_ptr[5], (int16_t)coeff_ptr[6], (int16_t)coeff_ptr[7]); + (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3], + (int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5], + (int16_t)coeff_ptr[6], (int16_t)coeff_ptr[7]); #else return _mm_load_si128((const __m128i *)coeff_ptr); #endif @@ -32,21 +33,20 @@ static INLINE void store_coefficients(__m128i coeff_vals, __m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one); __m128i coeff_vals_1 = _mm_unpacklo_epi16(coeff_vals_lo, coeff_vals_hi); __m128i coeff_vals_2 = _mm_unpackhi_epi16(coeff_vals_lo, coeff_vals_hi); - _mm_store_si128((__m128i*)(coeff_ptr), coeff_vals_1); - _mm_store_si128((__m128i*)(coeff_ptr + 4), coeff_vals_2); + _mm_store_si128((__m128i *)(coeff_ptr), coeff_vals_1); + _mm_store_si128((__m128i *)(coeff_ptr + 4), coeff_vals_2); #else - _mm_store_si128((__m128i*)(coeff_ptr), coeff_vals); + _mm_store_si128((__m128i *)(coeff_ptr), coeff_vals); #endif } -void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, - int skip_block, const int16_t* zbin_ptr, - const int16_t* round_ptr, const int16_t* quant_ptr, - const int16_t* quant_shift_ptr, tran_low_t* qcoeff_ptr, - tran_low_t* dqcoeff_ptr, const int16_t* dequant_ptr, - uint16_t* eob_ptr, - const int16_t* scan_ptr, - const int16_t* iscan_ptr) { +void vpx_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs, + int skip_block, const int16_t *zbin_ptr, + const int16_t *round_ptr, const int16_t *quant_ptr, + const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, + tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, + uint16_t *eob_ptr, const int16_t *scan_ptr, + const int16_t *iscan_ptr) { __m128i zero; (void)scan_ptr; @@ -66,13 +66,13 @@ void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, // Setup global values { __m128i pw_1; - zbin = _mm_load_si128((const __m128i*)zbin_ptr); - round = _mm_load_si128((const __m128i*)round_ptr); - quant = _mm_load_si128((const __m128i*)quant_ptr); + zbin = _mm_load_si128((const __m128i *)zbin_ptr); + round = _mm_load_si128((const __m128i *)round_ptr); + quant = _mm_load_si128((const __m128i *)quant_ptr); pw_1 = _mm_set1_epi16(1); zbin = _mm_sub_epi16(zbin, pw_1); - dequant = _mm_load_si128((const __m128i*)dequant_ptr); - shift = _mm_load_si128((const __m128i*)quant_shift_ptr); + dequant = _mm_load_si128((const __m128i *)dequant_ptr); + shift = _mm_load_si128((const __m128i *)quant_shift_ptr); } { @@ -138,8 +138,8 @@ void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); @@ -211,8 +211,8 @@ void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero); nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero); nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero); - iscan0 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs)); - iscan1 = _mm_load_si128((const __m128i*)(iscan_ptr + n_coeffs) + 1); + iscan0 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs)); + iscan1 = _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1); // Add one to convert from indices to counts iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); diff --git a/libs/libvpx/vpx_dsp/x86/sad4d_avx2.c b/libs/libvpx/vpx_dsp/x86/sad4d_avx2.c index 793658f9ea..962b8fb11a 100644 --- a/libs/libvpx/vpx_dsp/x86/sad4d_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/sad4d_avx2.c @@ -11,10 +11,8 @@ #include "./vpx_dsp_rtcd.h" #include "vpx/vpx_integer.h" -void vpx_sad32x32x4d_avx2(const uint8_t *src, - int src_stride, - const uint8_t *const ref[4], - int ref_stride, +void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, uint32_t res[4]) { __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg; __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3; @@ -30,7 +28,7 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src, sum_ref1 = _mm256_set1_epi16(0); sum_ref2 = _mm256_set1_epi16(0); sum_ref3 = _mm256_set1_epi16(0); - for (i = 0; i < 32 ; i++) { + for (i = 0; i < 32; i++) { // load src and all refs src_reg = _mm256_loadu_si256((const __m256i *)src); ref0_reg = _mm256_loadu_si256((const __m256i *)ref0); @@ -48,11 +46,11 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src, sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg); sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg); - src+= src_stride; - ref0+= ref_stride; - ref1+= ref_stride; - ref2+= ref_stride; - ref3+= ref_stride; + src += src_stride; + ref0 += ref_stride; + ref1 += ref_stride; + ref2 += ref_stride; + ref3 += ref_stride; } { __m128i sum; @@ -81,10 +79,8 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src, } } -void vpx_sad64x64x4d_avx2(const uint8_t *src, - int src_stride, - const uint8_t *const ref[4], - int ref_stride, +void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride, + const uint8_t *const ref[4], int ref_stride, uint32_t res[4]) { __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg; __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg; @@ -102,7 +98,7 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, sum_ref1 = _mm256_set1_epi16(0); sum_ref2 = _mm256_set1_epi16(0); sum_ref3 = _mm256_set1_epi16(0); - for (i = 0; i < 64 ; i++) { + for (i = 0; i < 64; i++) { // load 64 bytes from src and all refs src_reg = _mm256_loadu_si256((const __m256i *)src); srcnext_reg = _mm256_loadu_si256((const __m256i *)(src + 32)); @@ -133,11 +129,11 @@ void vpx_sad64x64x4d_avx2(const uint8_t *src, sum_ref1 = _mm256_add_epi32(sum_ref1, ref1next_reg); sum_ref2 = _mm256_add_epi32(sum_ref2, ref2next_reg); sum_ref3 = _mm256_add_epi32(sum_ref3, ref3next_reg); - src+= src_stride; - ref0+= ref_stride; - ref1+= ref_stride; - ref2+= ref_stride; - ref3+= ref_stride; + src += src_stride; + ref0 += ref_stride; + ref1 += ref_stride; + ref2 += ref_stride; + ref3 += ref_stride; } { __m128i sum; diff --git a/libs/libvpx/vpx_dsp/x86/sad_avx2.c b/libs/libvpx/vpx_dsp/x86/sad_avx2.c index ce9ad8f780..d944134305 100644 --- a/libs/libvpx/vpx_dsp/x86/sad_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/sad_avx2.c @@ -11,75 +11,74 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_ports/mem.h" -#define FSAD64_H(h) \ -unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - int i, res; \ - __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ - __m256i sum_sad = _mm256_setzero_si256(); \ - __m256i sum_sad_h; \ - __m128i sum_sad128; \ - for (i = 0 ; i < h ; i++) { \ - ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ - ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \ - sad1_reg = _mm256_sad_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)src_ptr)); \ - sad2_reg = _mm256_sad_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \ - sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ - ref_ptr+= ref_stride; \ - src_ptr+= src_stride; \ - } \ - sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ - sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ - sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ - sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ - res = _mm_cvtsi128_si32(sum_sad128); \ - return res; \ -} +#define FSAD64_H(h) \ + unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ + const uint8_t *ref_ptr, int ref_stride) { \ + int i, res; \ + __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ + __m256i sum_sad = _mm256_setzero_si256(); \ + __m256i sum_sad_h; \ + __m128i sum_sad128; \ + for (i = 0; i < h; i++) { \ + ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ + ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \ + sad1_reg = _mm256_sad_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr)); \ + sad2_reg = _mm256_sad_epu8( \ + ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \ + sum_sad = \ + _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ + ref_ptr += ref_stride; \ + src_ptr += src_stride; \ + } \ + sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ + sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ + sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ + sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ + res = _mm_cvtsi128_si32(sum_sad128); \ + return res; \ + } -#define FSAD32_H(h) \ -unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride) { \ - int i, res; \ - __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ - __m256i sum_sad = _mm256_setzero_si256(); \ - __m256i sum_sad_h; \ - __m128i sum_sad128; \ - int ref2_stride = ref_stride << 1; \ - int src2_stride = src_stride << 1; \ - int max = h >> 1; \ - for (i = 0 ; i < max ; i++) { \ - ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ - ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \ - sad1_reg = _mm256_sad_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)src_ptr)); \ - sad2_reg = _mm256_sad_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \ - sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ - ref_ptr+= ref2_stride; \ - src_ptr+= src2_stride; \ - } \ - sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ - sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ - sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ - sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ - res = _mm_cvtsi128_si32(sum_sad128); \ - return res; \ -} +#define FSAD32_H(h) \ + unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ + const uint8_t *ref_ptr, int ref_stride) { \ + int i, res; \ + __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ + __m256i sum_sad = _mm256_setzero_si256(); \ + __m256i sum_sad_h; \ + __m128i sum_sad128; \ + int ref2_stride = ref_stride << 1; \ + int src2_stride = src_stride << 1; \ + int max = h >> 1; \ + for (i = 0; i < max; i++) { \ + ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ + ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \ + sad1_reg = _mm256_sad_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr)); \ + sad2_reg = _mm256_sad_epu8( \ + ref2_reg, \ + _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \ + sum_sad = \ + _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ + ref_ptr += ref2_stride; \ + src_ptr += src2_stride; \ + } \ + sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ + sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ + sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ + sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ + res = _mm_cvtsi128_si32(sum_sad128); \ + return res; \ + } -#define FSAD64 \ -FSAD64_H(64); \ -FSAD64_H(32); +#define FSAD64 \ + FSAD64_H(64); \ + FSAD64_H(32); -#define FSAD32 \ -FSAD32_H(64); \ -FSAD32_H(32); \ -FSAD32_H(16); +#define FSAD32 \ + FSAD32_H(64); \ + FSAD32_H(32); \ + FSAD32_H(16); FSAD64; FSAD32; @@ -89,88 +88,86 @@ FSAD32; #undef FSAD64_H #undef FSAD32_H -#define FSADAVG64_H(h) \ -unsigned int vpx_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - int i, res; \ - __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ - __m256i sum_sad = _mm256_setzero_si256(); \ - __m256i sum_sad_h; \ - __m128i sum_sad128; \ - for (i = 0 ; i < h ; i++) { \ - ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ - ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \ - ref1_reg = _mm256_avg_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)second_pred)); \ - ref2_reg = _mm256_avg_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(second_pred +32))); \ - sad1_reg = _mm256_sad_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)src_ptr)); \ - sad2_reg = _mm256_sad_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \ - sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ - ref_ptr+= ref_stride; \ - src_ptr+= src_stride; \ - second_pred+= 64; \ - } \ - sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ - sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ - sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ - sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ - res = _mm_cvtsi128_si32(sum_sad128); \ - return res; \ -} +#define FSADAVG64_H(h) \ + unsigned int vpx_sad64x##h##_avg_avx2( \ + const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \ + int ref_stride, const uint8_t *second_pred) { \ + int i, res; \ + __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ + __m256i sum_sad = _mm256_setzero_si256(); \ + __m256i sum_sad_h; \ + __m128i sum_sad128; \ + for (i = 0; i < h; i++) { \ + ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ + ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \ + ref1_reg = _mm256_avg_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred)); \ + ref2_reg = _mm256_avg_epu8( \ + ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32))); \ + sad1_reg = _mm256_sad_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr)); \ + sad2_reg = _mm256_sad_epu8( \ + ref2_reg, _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \ + sum_sad = \ + _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ + ref_ptr += ref_stride; \ + src_ptr += src_stride; \ + second_pred += 64; \ + } \ + sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ + sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ + sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ + sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ + res = _mm_cvtsi128_si32(sum_sad128); \ + return res; \ + } -#define FSADAVG32_H(h) \ -unsigned int vpx_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \ - int src_stride, \ - const uint8_t *ref_ptr, \ - int ref_stride, \ - const uint8_t *second_pred) { \ - int i, res; \ - __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ - __m256i sum_sad = _mm256_setzero_si256(); \ - __m256i sum_sad_h; \ - __m128i sum_sad128; \ - int ref2_stride = ref_stride << 1; \ - int src2_stride = src_stride << 1; \ - int max = h >> 1; \ - for (i = 0 ; i < max ; i++) { \ - ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ - ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \ - ref1_reg = _mm256_avg_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)second_pred)); \ - ref2_reg = _mm256_avg_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(second_pred +32))); \ - sad1_reg = _mm256_sad_epu8(ref1_reg, \ - _mm256_loadu_si256((__m256i const *)src_ptr)); \ - sad2_reg = _mm256_sad_epu8(ref2_reg, \ - _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \ - sum_sad = _mm256_add_epi32(sum_sad, \ - _mm256_add_epi32(sad1_reg, sad2_reg)); \ - ref_ptr+= ref2_stride; \ - src_ptr+= src2_stride; \ - second_pred+= 64; \ - } \ - sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ - sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ - sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ - sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ - res = _mm_cvtsi128_si32(sum_sad128); \ - return res; \ -} +#define FSADAVG32_H(h) \ + unsigned int vpx_sad32x##h##_avg_avx2( \ + const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \ + int ref_stride, const uint8_t *second_pred) { \ + int i, res; \ + __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ + __m256i sum_sad = _mm256_setzero_si256(); \ + __m256i sum_sad_h; \ + __m128i sum_sad128; \ + int ref2_stride = ref_stride << 1; \ + int src2_stride = src_stride << 1; \ + int max = h >> 1; \ + for (i = 0; i < max; i++) { \ + ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ + ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + ref_stride)); \ + ref1_reg = _mm256_avg_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)second_pred)); \ + ref2_reg = _mm256_avg_epu8( \ + ref2_reg, _mm256_loadu_si256((__m256i const *)(second_pred + 32))); \ + sad1_reg = _mm256_sad_epu8( \ + ref1_reg, _mm256_loadu_si256((__m256i const *)src_ptr)); \ + sad2_reg = _mm256_sad_epu8( \ + ref2_reg, \ + _mm256_loadu_si256((__m256i const *)(src_ptr + src_stride))); \ + sum_sad = \ + _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ + ref_ptr += ref2_stride; \ + src_ptr += src2_stride; \ + second_pred += 64; \ + } \ + sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ + sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ + sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ + sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ + res = _mm_cvtsi128_si32(sum_sad128); \ + return res; \ + } -#define FSADAVG64 \ -FSADAVG64_H(64); \ -FSADAVG64_H(32); +#define FSADAVG64 \ + FSADAVG64_H(64); \ + FSADAVG64_H(32); -#define FSADAVG32 \ -FSADAVG32_H(64); \ -FSADAVG32_H(32); \ -FSADAVG32_H(16); +#define FSADAVG32 \ + FSADAVG32_H(64); \ + FSADAVG32_H(32); \ + FSADAVG32_H(16); FSADAVG64; FSADAVG32; diff --git a/libs/libvpx/vpx_dsp/x86/sad_mmx.asm b/libs/libvpx/vpx_dsp/x86/sad_mmx.asm deleted file mode 100644 index 9968992bd1..0000000000 --- a/libs/libvpx/vpx_dsp/x86/sad_mmx.asm +++ /dev/null @@ -1,427 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -global sym(vpx_sad16x16_mmx) PRIVATE -global sym(vpx_sad8x16_mmx) PRIVATE -global sym(vpx_sad8x8_mmx) PRIVATE -global sym(vpx_sad4x4_mmx) PRIVATE -global sym(vpx_sad16x8_mmx) PRIVATE - -;unsigned int vpx_sad16x16_mmx( -; unsigned char *src_ptr, -; int src_stride, -; unsigned char *ref_ptr, -; int ref_stride) -sym(vpx_sad16x16_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 4 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(2) ;ref_ptr - - movsxd rax, dword ptr arg(1) ;src_stride - movsxd rdx, dword ptr arg(3) ;ref_stride - - lea rcx, [rsi+rax*8] - - lea rcx, [rcx+rax*8] - pxor mm7, mm7 - - pxor mm6, mm6 - -.x16x16sad_mmx_loop: - - movq mm0, QWORD PTR [rsi] - movq mm2, QWORD PTR [rsi+8] - - movq mm1, QWORD PTR [rdi] - movq mm3, QWORD PTR [rdi+8] - - movq mm4, mm0 - movq mm5, mm2 - - psubusb mm0, mm1 - psubusb mm1, mm4 - - psubusb mm2, mm3 - psubusb mm3, mm5 - - por mm0, mm1 - por mm2, mm3 - - movq mm1, mm0 - movq mm3, mm2 - - punpcklbw mm0, mm6 - punpcklbw mm2, mm6 - - punpckhbw mm1, mm6 - punpckhbw mm3, mm6 - - paddw mm0, mm2 - paddw mm1, mm3 - - - lea rsi, [rsi+rax] - add rdi, rdx - - paddw mm7, mm0 - paddw mm7, mm1 - - cmp rsi, rcx - jne .x16x16sad_mmx_loop - - - movq mm0, mm7 - - punpcklwd mm0, mm6 - punpckhwd mm7, mm6 - - paddw mm0, mm7 - movq mm7, mm0 - - - psrlq mm0, 32 - paddw mm7, mm0 - - movq rax, mm7 - - pop rdi - pop rsi - mov rsp, rbp - ; begin epilog - UNSHADOW_ARGS - pop rbp - ret - - -;unsigned int vpx_sad8x16_mmx( -; unsigned char *src_ptr, -; int src_stride, -; unsigned char *ref_ptr, -; int ref_stride) -sym(vpx_sad8x16_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 4 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(2) ;ref_ptr - - movsxd rax, dword ptr arg(1) ;src_stride - movsxd rdx, dword ptr arg(3) ;ref_stride - - lea rcx, [rsi+rax*8] - - lea rcx, [rcx+rax*8] - pxor mm7, mm7 - - pxor mm6, mm6 - -.x8x16sad_mmx_loop: - - movq mm0, QWORD PTR [rsi] - movq mm1, QWORD PTR [rdi] - - movq mm2, mm0 - psubusb mm0, mm1 - - psubusb mm1, mm2 - por mm0, mm1 - - movq mm2, mm0 - punpcklbw mm0, mm6 - - punpckhbw mm2, mm6 - lea rsi, [rsi+rax] - - add rdi, rdx - paddw mm7, mm0 - - paddw mm7, mm2 - cmp rsi, rcx - - jne .x8x16sad_mmx_loop - - movq mm0, mm7 - punpcklwd mm0, mm6 - - punpckhwd mm7, mm6 - paddw mm0, mm7 - - movq mm7, mm0 - psrlq mm0, 32 - - paddw mm7, mm0 - movq rax, mm7 - - pop rdi - pop rsi - mov rsp, rbp - ; begin epilog - UNSHADOW_ARGS - pop rbp - ret - - -;unsigned int vpx_sad8x8_mmx( -; unsigned char *src_ptr, -; int src_stride, -; unsigned char *ref_ptr, -; int ref_stride) -sym(vpx_sad8x8_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 4 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(2) ;ref_ptr - - movsxd rax, dword ptr arg(1) ;src_stride - movsxd rdx, dword ptr arg(3) ;ref_stride - - lea rcx, [rsi+rax*8] - pxor mm7, mm7 - - pxor mm6, mm6 - -.x8x8sad_mmx_loop: - - movq mm0, QWORD PTR [rsi] - movq mm1, QWORD PTR [rdi] - - movq mm2, mm0 - psubusb mm0, mm1 - - psubusb mm1, mm2 - por mm0, mm1 - - movq mm2, mm0 - punpcklbw mm0, mm6 - - punpckhbw mm2, mm6 - paddw mm0, mm2 - - lea rsi, [rsi+rax] - add rdi, rdx - - paddw mm7, mm0 - cmp rsi, rcx - - jne .x8x8sad_mmx_loop - - movq mm0, mm7 - punpcklwd mm0, mm6 - - punpckhwd mm7, mm6 - paddw mm0, mm7 - - movq mm7, mm0 - psrlq mm0, 32 - - paddw mm7, mm0 - movq rax, mm7 - - pop rdi - pop rsi - mov rsp, rbp - ; begin epilog - UNSHADOW_ARGS - pop rbp - ret - - -;unsigned int vpx_sad4x4_mmx( -; unsigned char *src_ptr, -; int src_stride, -; unsigned char *ref_ptr, -; int ref_stride) -sym(vpx_sad4x4_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 4 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(2) ;ref_ptr - - movsxd rax, dword ptr arg(1) ;src_stride - movsxd rdx, dword ptr arg(3) ;ref_stride - - movd mm0, DWORD PTR [rsi] - movd mm1, DWORD PTR [rdi] - - movd mm2, DWORD PTR [rsi+rax] - movd mm3, DWORD PTR [rdi+rdx] - - punpcklbw mm0, mm2 - punpcklbw mm1, mm3 - - movq mm2, mm0 - psubusb mm0, mm1 - - psubusb mm1, mm2 - por mm0, mm1 - - movq mm2, mm0 - pxor mm3, mm3 - - punpcklbw mm0, mm3 - punpckhbw mm2, mm3 - - paddw mm0, mm2 - - lea rsi, [rsi+rax*2] - lea rdi, [rdi+rdx*2] - - movd mm4, DWORD PTR [rsi] - movd mm5, DWORD PTR [rdi] - - movd mm6, DWORD PTR [rsi+rax] - movd mm7, DWORD PTR [rdi+rdx] - - punpcklbw mm4, mm6 - punpcklbw mm5, mm7 - - movq mm6, mm4 - psubusb mm4, mm5 - - psubusb mm5, mm6 - por mm4, mm5 - - movq mm5, mm4 - punpcklbw mm4, mm3 - - punpckhbw mm5, mm3 - paddw mm4, mm5 - - paddw mm0, mm4 - movq mm1, mm0 - - punpcklwd mm0, mm3 - punpckhwd mm1, mm3 - - paddw mm0, mm1 - movq mm1, mm0 - - psrlq mm0, 32 - paddw mm0, mm1 - - movq rax, mm0 - - pop rdi - pop rsi - mov rsp, rbp - ; begin epilog - UNSHADOW_ARGS - pop rbp - ret - - -;unsigned int vpx_sad16x8_mmx( -; unsigned char *src_ptr, -; int src_stride, -; unsigned char *ref_ptr, -; int ref_stride) -sym(vpx_sad16x8_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 4 - push rsi - push rdi - ; end prolog - - mov rsi, arg(0) ;src_ptr - mov rdi, arg(2) ;ref_ptr - - movsxd rax, dword ptr arg(1) ;src_stride - movsxd rdx, dword ptr arg(3) ;ref_stride - - lea rcx, [rsi+rax*8] - pxor mm7, mm7 - - pxor mm6, mm6 - -.x16x8sad_mmx_loop: - - movq mm0, [rsi] - movq mm1, [rdi] - - movq mm2, [rsi+8] - movq mm3, [rdi+8] - - movq mm4, mm0 - movq mm5, mm2 - - psubusb mm0, mm1 - psubusb mm1, mm4 - - psubusb mm2, mm3 - psubusb mm3, mm5 - - por mm0, mm1 - por mm2, mm3 - - movq mm1, mm0 - movq mm3, mm2 - - punpcklbw mm0, mm6 - punpckhbw mm1, mm6 - - punpcklbw mm2, mm6 - punpckhbw mm3, mm6 - - - paddw mm0, mm2 - paddw mm1, mm3 - - paddw mm0, mm1 - lea rsi, [rsi+rax] - - add rdi, rdx - paddw mm7, mm0 - - cmp rsi, rcx - jne .x16x8sad_mmx_loop - - movq mm0, mm7 - punpcklwd mm0, mm6 - - punpckhwd mm7, mm6 - paddw mm0, mm7 - - movq mm7, mm0 - psrlq mm0, 32 - - paddw mm7, mm0 - movq rax, mm7 - - pop rdi - pop rsi - mov rsp, rbp - ; begin epilog - UNSHADOW_ARGS - pop rbp - ret diff --git a/libs/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm b/libs/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm index c655e4b346..cee4468c1f 100644 --- a/libs/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm +++ b/libs/libvpx/vpx_dsp/x86/subpel_variance_sse2.asm @@ -57,8 +57,8 @@ SECTION .text paddd %6, %1 %endmacro -%macro STORE_AND_RET 0 -%if mmsize == 16 +%macro STORE_AND_RET 1 +%if %1 > 4 ; if H=64 and W=16, we have 8 words of each 2(1bit)x64(6bit)x9bit=16bit ; in m6, i.e. it _exactly_ fits in a signed word per word in the xmm reg. ; We have to sign-extend it before adding the words within the register @@ -78,16 +78,16 @@ SECTION .text movd [r1], m7 ; store sse paddd m6, m4 movd raxd, m6 ; store sum as return value -%else ; mmsize == 8 - pshufw m4, m6, 0xe - pshufw m3, m7, 0xe +%else ; 4xh + pshuflw m4, m6, 0xe + pshuflw m3, m7, 0xe paddw m6, m4 paddd m7, m3 pcmpgtw m5, m6 ; mask for 0 > x mov r1, ssem ; r1 = unsigned int *sse punpcklwd m6, m5 ; sign-extend m6 word->dword movd [r1], m7 ; store sse - pshufw m4, m6, 0xe + pshuflw m4, m6, 0xe paddd m6, m4 movd raxd, m6 ; store sum as return value %endif @@ -196,6 +196,12 @@ SECTION .text %endif %endif +%if %1 == 4 + %define movx movd +%else + %define movx movh +%endif + ASSERT %1 <= 16 ; m6 overflows if w > 16 pxor m6, m6 ; sum pxor m7, m7 ; sse @@ -228,6 +234,7 @@ SECTION .text %endif punpckhbw m2, m0, m5 punpcklbw m0, m5 + %if %2 == 0 ; !avg punpckhbw m3, m1, m5 punpcklbw m1, m5 @@ -237,24 +244,37 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] + movx m0, [srcq] %if %2 == 1 ; avg -%if mmsize == 16 +%if %1 > 4 movhps m0, [srcq+src_strideq] -%else ; mmsize == 8 - punpckldq m0, [srcq+src_strideq] +%else ; 4xh + movx m1, [srcq+src_strideq] + punpckldq m0, m1 %endif %else ; !avg - movh m2, [srcq+src_strideq] + movx m2, [srcq+src_strideq] %endif - movh m1, [dstq] - movh m3, [dstq+dst_strideq] + + movx m1, [dstq] + movx m3, [dstq+dst_strideq] + %if %2 == 1 ; avg +%if %1 > 4 pavgb m0, [secq] +%else + movh m2, [secq] + pavgb m0, m2 +%endif punpcklbw m3, m5 punpcklbw m1, m5 +%if %1 > 4 punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else ; 4xh + punpcklbw m0, m5 + movhlps m2, m0 +%endif %else ; !avg punpcklbw m0, m5 punpcklbw m2, m5 @@ -271,10 +291,10 @@ SECTION .text %endif dec block_height jg .x_zero_y_zero_loop - STORE_AND_RET + STORE_AND_RET %1 .x_zero_y_nonzero: - cmp y_offsetd, 8 + cmp y_offsetd, 4 jne .x_zero_y_nonhalf ; x_offset == 0 && y_offset == 0.5 @@ -296,37 +316,41 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m2, [srcq+src_strideq] + movx m0, [srcq] + movx m2, [srcq+src_strideq] %if %2 == 1 ; avg -%if mmsize == 16 +%if %1 > 4 movhps m2, [srcq+src_strideq*2] -%else ; mmsize == 8 -%if %1 == 4 - movh m1, [srcq+src_strideq*2] +%else ; 4xh + movx m1, [srcq+src_strideq*2] punpckldq m2, m1 -%else - punpckldq m2, [srcq+src_strideq*2] %endif -%endif - movh m1, [dstq] -%if mmsize == 16 + movx m1, [dstq] +%if %1 > 4 movlhps m0, m2 -%else ; mmsize == 8 +%else ; 4xh punpckldq m0, m2 %endif - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] pavgb m0, m2 punpcklbw m1, m5 +%if %1 > 4 pavgb m0, [secq] punpcklbw m3, m5 punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else ; 4xh + movh m4, [secq] + pavgb m0, m4 + punpcklbw m3, m5 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %else ; !avg - movh m4, [srcq+src_strideq*2] - movh m1, [dstq] + movx m4, [srcq+src_strideq*2] + movx m1, [dstq] pavgb m0, m2 - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] pavgb m2, m4 punpcklbw m0, m5 punpcklbw m2, m5 @@ -343,7 +367,7 @@ SECTION .text %endif dec block_height jg .x_zero_y_half_loop - STORE_AND_RET + STORE_AND_RET %1 .x_zero_y_nonhalf: ; x_offset == 0 && y_offset == bilin interpolation @@ -351,7 +375,7 @@ SECTION .text lea bilin_filter, [bilin_filter_m] %endif shl y_offsetd, filter_idx_shift -%if ARCH_X86_64 && mmsize == 16 +%if ARCH_X86_64 && %1 > 4 mova m8, [bilin_filter+y_offsetq] %if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64 mova m9, [bilin_filter+y_offsetq+16] @@ -424,12 +448,12 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m2, [srcq+src_strideq] - movh m4, [srcq+src_strideq*2] - movh m3, [dstq+dst_strideq] + movx m0, [srcq] + movx m2, [srcq+src_strideq] + movx m4, [srcq+src_strideq*2] + movx m3, [dstq+dst_strideq] %if cpuflag(ssse3) - movh m1, [dstq] + movx m1, [dstq] punpcklbw m0, m2 punpcklbw m2, m4 pmaddubsw m0, filter_y_a @@ -449,17 +473,27 @@ SECTION .text pmullw m4, filter_y_b paddw m0, m1 paddw m2, filter_rnd - movh m1, [dstq] + movx m1, [dstq] paddw m2, m4 %endif psraw m0, 4 psraw m2, 4 %if %2 == 1 ; avg ; FIXME(rbultje) pipeline +%if %1 == 4 + movlhps m0, m2 +%endif packuswb m0, m2 +%if %1 > 4 pavgb m0, [secq] punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else ; 4xh + movh m2, [secq] + pavgb m0, m2 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %endif punpcklbw m1, m5 SUM_SSE m0, m1, m2, m3, m6, m7 @@ -475,10 +509,10 @@ SECTION .text %undef filter_y_a %undef filter_y_b %undef filter_rnd - STORE_AND_RET + STORE_AND_RET %1 .x_nonzero: - cmp x_offsetd, 8 + cmp x_offsetd, 4 jne .x_nonhalf ; x_offset == 0.5 test y_offsetd, y_offsetd @@ -503,30 +537,40 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m4, [srcq+1] + movx m0, [srcq] + movx m4, [srcq+1] %if %2 == 1 ; avg -%if mmsize == 16 +%if %1 > 4 movhps m0, [srcq+src_strideq] movhps m4, [srcq+src_strideq+1] -%else ; mmsize == 8 - punpckldq m0, [srcq+src_strideq] - punpckldq m4, [srcq+src_strideq+1] +%else ; 4xh + movx m1, [srcq+src_strideq] + punpckldq m0, m1 + movx m2, [srcq+src_strideq+1] + punpckldq m4, m2 %endif - movh m1, [dstq] - movh m3, [dstq+dst_strideq] + movx m1, [dstq] + movx m3, [dstq+dst_strideq] pavgb m0, m4 punpcklbw m3, m5 +%if %1 > 4 pavgb m0, [secq] punpcklbw m1, m5 punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else ; 4xh + movh m2, [secq] + pavgb m0, m2 + punpcklbw m1, m5 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %else ; !avg - movh m2, [srcq+src_strideq] - movh m1, [dstq] + movx m2, [srcq+src_strideq] + movx m1, [dstq] pavgb m0, m4 - movh m4, [srcq+src_strideq+1] - movh m3, [dstq+dst_strideq] + movx m4, [srcq+src_strideq+1] + movx m3, [dstq+dst_strideq] pavgb m2, m4 punpcklbw m0, m5 punpcklbw m2, m5 @@ -543,10 +587,10 @@ SECTION .text %endif dec block_height jg .x_half_y_zero_loop - STORE_AND_RET + STORE_AND_RET %1 .x_half_y_nonzero: - cmp y_offsetd, 8 + cmp y_offsetd, 4 jne .x_half_y_nonhalf ; x_offset == 0.5 && y_offset == 0.5 @@ -578,53 +622,58 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m3, [srcq+1] + movx m0, [srcq] + movx m3, [srcq+1] add srcq, src_strideq pavgb m0, m3 .x_half_y_half_loop: - movh m2, [srcq] - movh m3, [srcq+1] + movx m2, [srcq] + movx m3, [srcq+1] %if %2 == 1 ; avg -%if mmsize == 16 +%if %1 > 4 movhps m2, [srcq+src_strideq] movhps m3, [srcq+src_strideq+1] %else -%if %1 == 4 - movh m1, [srcq+src_strideq] + movx m1, [srcq+src_strideq] punpckldq m2, m1 - movh m1, [srcq+src_strideq+1] + movx m1, [srcq+src_strideq+1] punpckldq m3, m1 -%else - punpckldq m2, [srcq+src_strideq] - punpckldq m3, [srcq+src_strideq+1] -%endif %endif pavgb m2, m3 -%if mmsize == 16 +%if %1 > 4 movlhps m0, m2 movhlps m4, m2 -%else ; mmsize == 8 +%else ; 4xh punpckldq m0, m2 - pshufw m4, m2, 0xe + pshuflw m4, m2, 0xe %endif - movh m1, [dstq] + movx m1, [dstq] pavgb m0, m2 - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] +%if %1 > 4 pavgb m0, [secq] +%else + movh m2, [secq] + pavgb m0, m2 +%endif punpcklbw m3, m5 punpcklbw m1, m5 +%if %1 > 4 punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else + punpcklbw m0, m5 + movhlps m2, m0 +%endif %else ; !avg - movh m4, [srcq+src_strideq] - movh m1, [srcq+src_strideq+1] + movx m4, [srcq+src_strideq] + movx m1, [srcq+src_strideq+1] pavgb m2, m3 pavgb m4, m1 pavgb m0, m2 pavgb m2, m4 - movh m1, [dstq] - movh m3, [dstq+dst_strideq] + movx m1, [dstq] + movx m3, [dstq+dst_strideq] punpcklbw m0, m5 punpcklbw m2, m5 punpcklbw m3, m5 @@ -641,7 +690,7 @@ SECTION .text %endif dec block_height jg .x_half_y_half_loop - STORE_AND_RET + STORE_AND_RET %1 .x_half_y_nonhalf: ; x_offset == 0.5 && y_offset == bilin interpolation @@ -649,7 +698,7 @@ SECTION .text lea bilin_filter, [bilin_filter_m] %endif shl y_offsetd, filter_idx_shift -%if ARCH_X86_64 && mmsize == 16 +%if ARCH_X86_64 && %1 > 4 mova m8, [bilin_filter+y_offsetq] %if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64 mova m9, [bilin_filter+y_offsetq+16] @@ -724,23 +773,23 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m3, [srcq+1] + movx m0, [srcq] + movx m3, [srcq+1] add srcq, src_strideq pavgb m0, m3 %if notcpuflag(ssse3) punpcklbw m0, m5 %endif .x_half_y_other_loop: - movh m2, [srcq] - movh m1, [srcq+1] - movh m4, [srcq+src_strideq] - movh m3, [srcq+src_strideq+1] + movx m2, [srcq] + movx m1, [srcq+1] + movx m4, [srcq+src_strideq] + movx m3, [srcq+src_strideq+1] pavgb m2, m1 pavgb m4, m3 - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] %if cpuflag(ssse3) - movh m1, [dstq] + movx m1, [dstq] punpcklbw m0, m2 punpcklbw m2, m4 pmaddubsw m0, filter_y_a @@ -760,16 +809,26 @@ SECTION .text pmullw m1, m4, filter_y_b paddw m2, filter_rnd paddw m2, m1 - movh m1, [dstq] + movx m1, [dstq] %endif psraw m0, 4 psraw m2, 4 %if %2 == 1 ; avg ; FIXME(rbultje) pipeline +%if %1 == 4 + movlhps m0, m2 +%endif packuswb m0, m2 +%if %1 > 4 pavgb m0, [secq] punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else + movh m2, [secq] + pavgb m0, m2 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %endif punpcklbw m1, m5 SUM_SSE m0, m1, m2, m3, m6, m7 @@ -786,7 +845,7 @@ SECTION .text %undef filter_y_a %undef filter_y_b %undef filter_rnd - STORE_AND_RET + STORE_AND_RET %1 .x_nonhalf: test y_offsetd, y_offsetd @@ -797,7 +856,7 @@ SECTION .text lea bilin_filter, [bilin_filter_m] %endif shl x_offsetd, filter_idx_shift -%if ARCH_X86_64 && mmsize == 16 +%if ARCH_X86_64 && %1 > 4 mova m8, [bilin_filter+x_offsetq] %if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64 mova m9, [bilin_filter+x_offsetq+16] @@ -865,14 +924,14 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m1, [srcq+1] - movh m2, [srcq+src_strideq] - movh m4, [srcq+src_strideq+1] - movh m3, [dstq+dst_strideq] + movx m0, [srcq] + movx m1, [srcq+1] + movx m2, [srcq+src_strideq] + movx m4, [srcq+src_strideq+1] + movx m3, [dstq+dst_strideq] %if cpuflag(ssse3) punpcklbw m0, m1 - movh m1, [dstq] + movx m1, [dstq] punpcklbw m2, m4 pmaddubsw m0, filter_x_a pmaddubsw m2, filter_x_a @@ -892,17 +951,27 @@ SECTION .text pmullw m4, filter_x_b paddw m0, m1 paddw m2, filter_rnd - movh m1, [dstq] + movx m1, [dstq] paddw m2, m4 %endif psraw m0, 4 psraw m2, 4 %if %2 == 1 ; avg ; FIXME(rbultje) pipeline +%if %1 == 4 + movlhps m0, m2 +%endif packuswb m0, m2 +%if %1 > 4 pavgb m0, [secq] punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else + movh m2, [secq] + pavgb m0, m2 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %endif punpcklbw m1, m5 SUM_SSE m0, m1, m2, m3, m6, m7 @@ -918,10 +987,10 @@ SECTION .text %undef filter_x_a %undef filter_x_b %undef filter_rnd - STORE_AND_RET + STORE_AND_RET %1 .x_nonhalf_y_nonzero: - cmp y_offsetd, 8 + cmp y_offsetd, 4 jne .x_nonhalf_y_nonhalf ; x_offset == bilin interpolation && y_offset == 0.5 @@ -929,7 +998,7 @@ SECTION .text lea bilin_filter, [bilin_filter_m] %endif shl x_offsetd, filter_idx_shift -%if ARCH_X86_64 && mmsize == 16 +%if ARCH_X86_64 && %1 > 4 mova m8, [bilin_filter+x_offsetq] %if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64 mova m9, [bilin_filter+x_offsetq+16] @@ -1037,8 +1106,8 @@ SECTION .text add srcq, src_strideq add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m1, [srcq+1] + movx m0, [srcq] + movx m1, [srcq+1] %if cpuflag(ssse3) punpcklbw m0, m1 pmaddubsw m0, filter_x_a @@ -1054,17 +1123,17 @@ SECTION .text add srcq, src_strideq psraw m0, 4 .x_other_y_half_loop: - movh m2, [srcq] - movh m1, [srcq+1] - movh m4, [srcq+src_strideq] - movh m3, [srcq+src_strideq+1] + movx m2, [srcq] + movx m1, [srcq+1] + movx m4, [srcq+src_strideq] + movx m3, [srcq+src_strideq+1] %if cpuflag(ssse3) punpcklbw m2, m1 punpcklbw m4, m3 pmaddubsw m2, filter_x_a pmaddubsw m4, filter_x_a - movh m1, [dstq] - movh m3, [dstq+dst_strideq] + movx m1, [dstq] + movx m3, [dstq+dst_strideq] paddw m2, filter_rnd paddw m4, filter_rnd %else @@ -1079,9 +1148,9 @@ SECTION .text pmullw m3, filter_x_b paddw m4, filter_rnd paddw m2, m1 - movh m1, [dstq] + movx m1, [dstq] paddw m4, m3 - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] %endif psraw m2, 4 psraw m4, 4 @@ -1089,10 +1158,20 @@ SECTION .text pavgw m2, m4 %if %2 == 1 ; avg ; FIXME(rbultje) pipeline - also consider going to bytes here +%if %1 == 4 + movlhps m0, m2 +%endif packuswb m0, m2 +%if %1 > 4 pavgb m0, [secq] punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else + movh m2, [secq] + pavgb m0, m2 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %endif punpcklbw m3, m5 punpcklbw m1, m5 @@ -1110,7 +1189,7 @@ SECTION .text %undef filter_x_a %undef filter_x_b %undef filter_rnd - STORE_AND_RET + STORE_AND_RET %1 .x_nonhalf_y_nonhalf: %ifdef PIC @@ -1118,7 +1197,7 @@ SECTION .text %endif shl x_offsetd, filter_idx_shift shl y_offsetd, filter_idx_shift -%if ARCH_X86_64 && mmsize == 16 +%if ARCH_X86_64 && %1 > 4 mova m8, [bilin_filter+x_offsetq] %if notcpuflag(ssse3) ; FIXME(rbultje) don't scatter registers on x86-64 mova m9, [bilin_filter+x_offsetq+16] @@ -1261,8 +1340,8 @@ SECTION .text INC_SRC_BY_SRC_STRIDE add dstq, dst_strideq %else ; %1 < 16 - movh m0, [srcq] - movh m1, [srcq+1] + movx m0, [srcq] + movx m1, [srcq+1] %if cpuflag(ssse3) punpcklbw m0, m1 pmaddubsw m0, filter_x_a @@ -1283,20 +1362,20 @@ SECTION .text INC_SRC_BY_SRC_STRIDE .x_other_y_other_loop: - movh m2, [srcq] - movh m1, [srcq+1] + movx m2, [srcq] + movx m1, [srcq+1] INC_SRC_BY_SRC_STRIDE - movh m4, [srcq] - movh m3, [srcq+1] + movx m4, [srcq] + movx m3, [srcq+1] %if cpuflag(ssse3) punpcklbw m2, m1 punpcklbw m4, m3 pmaddubsw m2, filter_x_a pmaddubsw m4, filter_x_a - movh m3, [dstq+dst_strideq] - movh m1, [dstq] + movx m3, [dstq+dst_strideq] + movx m1, [dstq] paddw m2, filter_rnd paddw m4, filter_rnd psraw m2, 4 @@ -1335,9 +1414,9 @@ SECTION .text pmullw m1, m4, filter_y_b paddw m2, filter_rnd paddw m0, m3 - movh m3, [dstq+dst_strideq] + movx m3, [dstq+dst_strideq] paddw m2, m1 - movh m1, [dstq] + movx m1, [dstq] psraw m0, 4 psraw m2, 4 punpcklbw m3, m5 @@ -1345,10 +1424,20 @@ SECTION .text %endif %if %2 == 1 ; avg ; FIXME(rbultje) pipeline +%if %1 == 4 + movlhps m0, m2 +%endif packuswb m0, m2 +%if %1 > 4 pavgb m0, [secq] punpckhbw m2, m0, m5 punpcklbw m0, m5 +%else + movh m2, [secq] + pavgb m0, m2 + punpcklbw m0, m5 + movhlps m2, m0 +%endif %endif SUM_SSE m0, m1, m2, m3, m6, m7 mova m0, m4 @@ -1366,7 +1455,8 @@ SECTION .text %undef filter_y_a %undef filter_y_b %undef filter_rnd - STORE_AND_RET +%undef movx + STORE_AND_RET %1 %endmacro ; FIXME(rbultje) the non-bilinear versions (i.e. x=0,8&&y=0,8) are identical @@ -1375,26 +1465,22 @@ SECTION .text ; location in the sse/2 version, rather than duplicating that code in the ; binary. -INIT_MMX sse -SUBPEL_VARIANCE 4 INIT_XMM sse2 +SUBPEL_VARIANCE 4 SUBPEL_VARIANCE 8 SUBPEL_VARIANCE 16 -INIT_MMX ssse3 -SUBPEL_VARIANCE 4 INIT_XMM ssse3 +SUBPEL_VARIANCE 4 SUBPEL_VARIANCE 8 SUBPEL_VARIANCE 16 -INIT_MMX sse -SUBPEL_VARIANCE 4, 1 INIT_XMM sse2 +SUBPEL_VARIANCE 4, 1 SUBPEL_VARIANCE 8, 1 SUBPEL_VARIANCE 16, 1 -INIT_MMX ssse3 -SUBPEL_VARIANCE 4, 1 INIT_XMM ssse3 +SUBPEL_VARIANCE 4, 1 SUBPEL_VARIANCE 8, 1 SUBPEL_VARIANCE 16, 1 diff --git a/libs/libvpx/vpx_dsp/x86/sum_squares_sse2.c b/libs/libvpx/vpx_dsp/x86/sum_squares_sse2.c new file mode 100644 index 0000000000..bc5362e10f --- /dev/null +++ b/libs/libvpx/vpx_dsp/x86/sum_squares_sse2.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2016 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include +#include +#include + +#include "./vpx_dsp_rtcd.h" + +static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src, + int stride) { + const __m128i v_val_0_w = + _mm_loadl_epi64((const __m128i *)(src + 0 * stride)); + const __m128i v_val_1_w = + _mm_loadl_epi64((const __m128i *)(src + 1 * stride)); + const __m128i v_val_2_w = + _mm_loadl_epi64((const __m128i *)(src + 2 * stride)); + const __m128i v_val_3_w = + _mm_loadl_epi64((const __m128i *)(src + 3 * stride)); + + const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w); + const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w); + const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w); + const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w); + + const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d); + const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d); + const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d); + + const __m128i v_sum_d = + _mm_add_epi32(v_sum_0123_d, _mm_srli_epi64(v_sum_0123_d, 32)); + + return (uint64_t)_mm_cvtsi128_si32(v_sum_d); +} + +// TODO(jingning): Evaluate the performance impact here. +#ifdef __GNUC__ +// This prevents GCC/Clang from inlining this function into +// vpx_sum_squares_2d_i16_sse2, which in turn saves some stack +// maintenance instructions in the common case of 4x4. +__attribute__((noinline)) +#endif +static uint64_t +vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src, int stride, int size) { + int r, c; + const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff); + __m128i v_acc_q = _mm_setzero_si128(); + + for (r = 0; r < size; r += 8) { + __m128i v_acc_d = _mm_setzero_si128(); + + for (c = 0; c < size; c += 8) { + const int16_t *b = src + c; + const __m128i v_val_0_w = + _mm_load_si128((const __m128i *)(b + 0 * stride)); + const __m128i v_val_1_w = + _mm_load_si128((const __m128i *)(b + 1 * stride)); + const __m128i v_val_2_w = + _mm_load_si128((const __m128i *)(b + 2 * stride)); + const __m128i v_val_3_w = + _mm_load_si128((const __m128i *)(b + 3 * stride)); + const __m128i v_val_4_w = + _mm_load_si128((const __m128i *)(b + 4 * stride)); + const __m128i v_val_5_w = + _mm_load_si128((const __m128i *)(b + 5 * stride)); + const __m128i v_val_6_w = + _mm_load_si128((const __m128i *)(b + 6 * stride)); + const __m128i v_val_7_w = + _mm_load_si128((const __m128i *)(b + 7 * stride)); + + const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w); + const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w); + const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w); + const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w); + const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w); + const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w); + const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w); + const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w); + + const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d); + const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d); + const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d); + const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d); + + const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d); + const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d); + + v_acc_d = _mm_add_epi32(v_acc_d, v_sum_0123_d); + v_acc_d = _mm_add_epi32(v_acc_d, v_sum_4567_d); + } + + v_acc_q = _mm_add_epi64(v_acc_q, _mm_and_si128(v_acc_d, v_zext_mask_q)); + v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_epi64(v_acc_d, 32)); + + src += 8 * stride; + } + + v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8)); + +#if ARCH_X86_64 + return (uint64_t)_mm_cvtsi128_si64(v_acc_q); +#else + { + uint64_t tmp; + _mm_storel_epi64((__m128i *)&tmp, v_acc_q); + return tmp; + } +#endif +} + +uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride, int size) { + // 4 elements per row only requires half an XMM register, so this + // must be a special case, but also note that over 75% of all calls + // are with size == 4, so it is also the common case. + if (size == 4) { + return vpx_sum_squares_2d_i16_4x4_sse2(src, stride); + } else { + // Generic case + return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size); + } +} diff --git a/libs/libvpx/vpx_dsp/x86/txfm_common_sse2.h b/libs/libvpx/vpx_dsp/x86/txfm_common_sse2.h index 536b206876..f8edb1b787 100644 --- a/libs/libvpx/vpx_dsp/x86/txfm_common_sse2.h +++ b/libs/libvpx/vpx_dsp/x86/txfm_common_sse2.h @@ -14,15 +14,15 @@ #include #include "vpx/vpx_integer.h" -#define pair_set_epi16(a, b) \ +#define pair_set_epi16(a, b) \ _mm_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \ (int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a)) -#define dual_set_epi16(a, b) \ +#define dual_set_epi16(a, b) \ _mm_set_epi16((int16_t)(b), (int16_t)(b), (int16_t)(b), (int16_t)(b), \ (int16_t)(a), (int16_t)(a), (int16_t)(a), (int16_t)(a)) -#define octa_set_epi16(a, b, c, d, e, f, g, h) \ +#define octa_set_epi16(a, b, c, d, e, f, g, h) \ _mm_setr_epi16((int16_t)(a), (int16_t)(b), (int16_t)(c), (int16_t)(d), \ (int16_t)(e), (int16_t)(f), (int16_t)(g), (int16_t)(h)) diff --git a/libs/libvpx/vpx_dsp/x86/variance_avx2.c b/libs/libvpx/vpx_dsp/x86/variance_avx2.c index 7851a98b14..8428e0520d 100644 --- a/libs/libvpx/vpx_dsp/x86/variance_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/variance_avx2.c @@ -14,13 +14,13 @@ typedef void (*get_var_avx2)(const uint8_t *src, int src_stride, unsigned int *sse, int *sum); void vpx_get32x32var_avx2(const uint8_t *src, int src_stride, - const uint8_t *ref, int ref_stride, - unsigned int *sse, int *sum); + const uint8_t *ref, int ref_stride, unsigned int *sse, + int *sum); static void variance_avx2(const uint8_t *src, int src_stride, - const uint8_t *ref, int ref_stride, - int w, int h, unsigned int *sse, int *sum, - get_var_avx2 var_fn, int block_size) { + const uint8_t *ref, int ref_stride, int w, int h, + unsigned int *sse, int *sum, get_var_avx2 var_fn, + int block_size) { int i, j; *sse = 0; @@ -30,22 +30,21 @@ static void variance_avx2(const uint8_t *src, int src_stride, for (j = 0; j < w; j += block_size) { unsigned int sse0; int sum0; - var_fn(&src[src_stride * i + j], src_stride, - &ref[ref_stride * i + j], ref_stride, &sse0, &sum0); + var_fn(&src[src_stride * i + j], src_stride, &ref[ref_stride * i + j], + ref_stride, &sse0, &sum0); *sse += sse0; *sum += sum0; } } } - unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_avx2(src, src_stride, ref, ref_stride, 16, 16, - sse, &sum, vpx_get16x16var_avx2, 16); - return *sse - (((unsigned int)sum * sum) >> 8); + variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, + vpx_get16x16var_avx2, 16); + return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8); } unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride, @@ -60,124 +59,97 @@ unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_avx2(src, src_stride, ref, ref_stride, 32, 16, - sse, &sum, vpx_get32x32var_avx2, 32); - return *sse - (((int64_t)sum * sum) >> 9); + variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum, + vpx_get32x32var_avx2, 32); + return *sse - (uint32_t)(((int64_t)sum * sum) >> 9); } unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_avx2(src, src_stride, ref, ref_stride, 32, 32, - sse, &sum, vpx_get32x32var_avx2, 32); - return *sse - (((int64_t)sum * sum) >> 10); + variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum, + vpx_get32x32var_avx2, 32); + return *sse - (uint32_t)(((int64_t)sum * sum) >> 10); } unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_avx2(src, src_stride, ref, ref_stride, 64, 64, - sse, &sum, vpx_get32x32var_avx2, 32); - return *sse - (((int64_t)sum * sum) >> 12); + variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum, + vpx_get32x32var_avx2, 32); + return *sse - (uint32_t)(((int64_t)sum * sum) >> 12); } unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_avx2(src, src_stride, ref, ref_stride, 64, 32, - sse, &sum, vpx_get32x32var_avx2, 32); - return *sse - (((int64_t)sum * sum) >> 11); + variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum, + vpx_get32x32var_avx2, 32); + return *sse - (uint32_t)(((int64_t)sum * sum) >> 11); } unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, - int height, - unsigned int *sse); + int height, unsigned int *sse); -unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, - int dst_stride, - const uint8_t *sec, - int sec_stride, - int height, - unsigned int *sseptr); +unsigned int vpx_sub_pixel_avg_variance32xh_avx2( + const uint8_t *src, int src_stride, int x_offset, int y_offset, + const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride, + int height, unsigned int *sseptr); unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, + int src_stride, int x_offset, + int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse) { unsigned int sse1; - const int se1 = vpx_sub_pixel_variance32xh_avx2(src, src_stride, x_offset, - y_offset, dst, dst_stride, - 64, &sse1); + const int se1 = vpx_sub_pixel_variance32xh_avx2( + src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1); unsigned int sse2; - const int se2 = vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, - x_offset, y_offset, - dst + 32, dst_stride, - 64, &sse2); + const int se2 = + vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset, + dst + 32, dst_stride, 64, &sse2); const int se = se1 + se2; *sse = sse1 + sse2; - return *sse - (((int64_t)se * se) >> 12); + return *sse - (uint32_t)(((int64_t)se * se) >> 12); } unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, + int src_stride, int x_offset, + int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse) { - const int se = vpx_sub_pixel_variance32xh_avx2(src, src_stride, x_offset, - y_offset, dst, dst_stride, - 32, sse); - return *sse - (((int64_t)se * se) >> 10); + const int se = vpx_sub_pixel_variance32xh_avx2( + src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse); + return *sse - (uint32_t)(((int64_t)se * se) >> 10); } -unsigned int vpx_sub_pixel_avg_variance64x64_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, - int dst_stride, - unsigned int *sse, - const uint8_t *sec) { +unsigned int vpx_sub_pixel_avg_variance64x64_avx2( + const uint8_t *src, int src_stride, int x_offset, int y_offset, + const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) { unsigned int sse1; - const int se1 = vpx_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset, - y_offset, dst, dst_stride, - sec, 64, 64, &sse1); + const int se1 = vpx_sub_pixel_avg_variance32xh_avx2( + src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1); unsigned int sse2; - const int se2 = - vpx_sub_pixel_avg_variance32xh_avx2(src + 32, src_stride, x_offset, - y_offset, dst + 32, dst_stride, - sec + 32, 64, 64, &sse2); + const int se2 = vpx_sub_pixel_avg_variance32xh_avx2( + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32, + 64, 64, &sse2); const int se = se1 + se2; *sse = sse1 + sse2; - return *sse - (((int64_t)se * se) >> 12); + return *sse - (uint32_t)(((int64_t)se * se) >> 12); } -unsigned int vpx_sub_pixel_avg_variance32x32_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, - int dst_stride, - unsigned int *sse, - const uint8_t *sec) { +unsigned int vpx_sub_pixel_avg_variance32x32_avx2( + const uint8_t *src, int src_stride, int x_offset, int y_offset, + const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) { // Process 32 elements in parallel. - const int se = vpx_sub_pixel_avg_variance32xh_avx2(src, src_stride, x_offset, - y_offset, dst, dst_stride, - sec, 32, 32, sse); - return *sse - (((int64_t)se * se) >> 10); + const int se = vpx_sub_pixel_avg_variance32xh_avx2( + src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse); + return *sse - (uint32_t)(((int64_t)se * se) >> 10); } diff --git a/libs/libvpx/vpx_dsp/x86/variance_impl_avx2.c b/libs/libvpx/vpx_dsp/x86/variance_impl_avx2.c index b289e9a0c7..51e6b19ad1 100644 --- a/libs/libvpx/vpx_dsp/x86/variance_impl_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/variance_impl_avx2.c @@ -13,307 +13,294 @@ #include "./vpx_dsp_rtcd.h" #include "vpx_ports/mem.h" +/* clang-format off */ DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = { - 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, - 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, - 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, - 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, - 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, - 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, - 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, - 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, - 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, - 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, - 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, - 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, - 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, + 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, + 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, 16, 0, + 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, + 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, + 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, + 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, + 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, + 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, + 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, + 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, 6, 10, + 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, + 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, 4, 12, + 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, + 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, 2, 14, }; +/* clang-format on */ +void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride, + const unsigned char *ref_ptr, int recon_stride, + unsigned int *SSE, int *Sum) { + __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; + __m256i ref_expand_high, madd_low, madd_high; + unsigned int i, src_2strides, ref_2strides; + __m256i zero_reg = _mm256_set1_epi16(0); + __m256i sum_ref_src = _mm256_set1_epi16(0); + __m256i madd_ref_src = _mm256_set1_epi16(0); -void vpx_get16x16var_avx2(const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride, - unsigned int *SSE, - int *Sum) { - __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; - __m256i ref_expand_high, madd_low, madd_high; - unsigned int i, src_2strides, ref_2strides; - __m256i zero_reg = _mm256_set1_epi16(0); - __m256i sum_ref_src = _mm256_set1_epi16(0); - __m256i madd_ref_src = _mm256_set1_epi16(0); + // processing two strides in a 256 bit register reducing the number + // of loop stride by half (comparing to the sse2 code) + src_2strides = source_stride << 1; + ref_2strides = recon_stride << 1; + for (i = 0; i < 8; i++) { + src = _mm256_castsi128_si256(_mm_loadu_si128((__m128i const *)(src_ptr))); + src = _mm256_inserti128_si256( + src, _mm_loadu_si128((__m128i const *)(src_ptr + source_stride)), 1); - // processing two strides in a 256 bit register reducing the number - // of loop stride by half (comparing to the sse2 code) - src_2strides = source_stride << 1; - ref_2strides = recon_stride << 1; - for (i = 0; i < 8; i++) { - src = _mm256_castsi128_si256( - _mm_loadu_si128((__m128i const *) (src_ptr))); - src = _mm256_inserti128_si256(src, - _mm_loadu_si128((__m128i const *)(src_ptr+source_stride)), 1); + ref = _mm256_castsi128_si256(_mm_loadu_si128((__m128i const *)(ref_ptr))); + ref = _mm256_inserti128_si256( + ref, _mm_loadu_si128((__m128i const *)(ref_ptr + recon_stride)), 1); - ref =_mm256_castsi128_si256( - _mm_loadu_si128((__m128i const *) (ref_ptr))); - ref = _mm256_inserti128_si256(ref, - _mm_loadu_si128((__m128i const *)(ref_ptr+recon_stride)), 1); + // expanding to 16 bit each lane + src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); + src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); - // expanding to 16 bit each lane - src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); - src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); + ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); + ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); - ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); - ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); + // src-ref + src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); + src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); - // src-ref - src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); - src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); + // madd low (src - ref) + madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); - // madd low (src - ref) - madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); + // add high to low + src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); - // add high to low - src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); + // madd high (src - ref) + madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); - // madd high (src - ref) - madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); + sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); - sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); + // add high to low + madd_ref_src = + _mm256_add_epi32(madd_ref_src, _mm256_add_epi32(madd_low, madd_high)); - // add high to low - madd_ref_src = _mm256_add_epi32(madd_ref_src, - _mm256_add_epi32(madd_low, madd_high)); + src_ptr += src_2strides; + ref_ptr += ref_2strides; + } - src_ptr+= src_2strides; - ref_ptr+= ref_2strides; - } + { + __m128i sum_res, madd_res; + __m128i expand_sum_low, expand_sum_high, expand_sum; + __m128i expand_madd_low, expand_madd_high, expand_madd; + __m128i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; - { - __m128i sum_res, madd_res; - __m128i expand_sum_low, expand_sum_high, expand_sum; - __m128i expand_madd_low, expand_madd_high, expand_madd; - __m128i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; + // extract the low lane and add it to the high lane + sum_res = _mm_add_epi16(_mm256_castsi256_si128(sum_ref_src), + _mm256_extractf128_si256(sum_ref_src, 1)); - // extract the low lane and add it to the high lane - sum_res = _mm_add_epi16(_mm256_castsi256_si128(sum_ref_src), - _mm256_extractf128_si256(sum_ref_src, 1)); + madd_res = _mm_add_epi32(_mm256_castsi256_si128(madd_ref_src), + _mm256_extractf128_si256(madd_ref_src, 1)); - madd_res = _mm_add_epi32(_mm256_castsi256_si128(madd_ref_src), - _mm256_extractf128_si256(madd_ref_src, 1)); + // padding each 2 bytes with another 2 zeroed bytes + expand_sum_low = + _mm_unpacklo_epi16(_mm256_castsi256_si128(zero_reg), sum_res); + expand_sum_high = + _mm_unpackhi_epi16(_mm256_castsi256_si128(zero_reg), sum_res); - // padding each 2 bytes with another 2 zeroed bytes - expand_sum_low = _mm_unpacklo_epi16(_mm256_castsi256_si128(zero_reg), - sum_res); - expand_sum_high = _mm_unpackhi_epi16(_mm256_castsi256_si128(zero_reg), - sum_res); + // shifting the sign 16 bits right + expand_sum_low = _mm_srai_epi32(expand_sum_low, 16); + expand_sum_high = _mm_srai_epi32(expand_sum_high, 16); - // shifting the sign 16 bits right - expand_sum_low = _mm_srai_epi32(expand_sum_low, 16); - expand_sum_high = _mm_srai_epi32(expand_sum_high, 16); + expand_sum = _mm_add_epi32(expand_sum_low, expand_sum_high); - expand_sum = _mm_add_epi32(expand_sum_low, expand_sum_high); + // expand each 32 bits of the madd result to 64 bits + expand_madd_low = + _mm_unpacklo_epi32(madd_res, _mm256_castsi256_si128(zero_reg)); + expand_madd_high = + _mm_unpackhi_epi32(madd_res, _mm256_castsi256_si128(zero_reg)); - // expand each 32 bits of the madd result to 64 bits - expand_madd_low = _mm_unpacklo_epi32(madd_res, - _mm256_castsi256_si128(zero_reg)); - expand_madd_high = _mm_unpackhi_epi32(madd_res, - _mm256_castsi256_si128(zero_reg)); + expand_madd = _mm_add_epi32(expand_madd_low, expand_madd_high); - expand_madd = _mm_add_epi32(expand_madd_low, expand_madd_high); + ex_expand_sum_low = + _mm_unpacklo_epi32(expand_sum, _mm256_castsi256_si128(zero_reg)); + ex_expand_sum_high = + _mm_unpackhi_epi32(expand_sum, _mm256_castsi256_si128(zero_reg)); - ex_expand_sum_low = _mm_unpacklo_epi32(expand_sum, - _mm256_castsi256_si128(zero_reg)); - ex_expand_sum_high = _mm_unpackhi_epi32(expand_sum, - _mm256_castsi256_si128(zero_reg)); + ex_expand_sum = _mm_add_epi32(ex_expand_sum_low, ex_expand_sum_high); - ex_expand_sum = _mm_add_epi32(ex_expand_sum_low, ex_expand_sum_high); + // shift 8 bytes eight + madd_res = _mm_srli_si128(expand_madd, 8); + sum_res = _mm_srli_si128(ex_expand_sum, 8); - // shift 8 bytes eight - madd_res = _mm_srli_si128(expand_madd, 8); - sum_res = _mm_srli_si128(ex_expand_sum, 8); + madd_res = _mm_add_epi32(madd_res, expand_madd); + sum_res = _mm_add_epi32(sum_res, ex_expand_sum); - madd_res = _mm_add_epi32(madd_res, expand_madd); - sum_res = _mm_add_epi32(sum_res, ex_expand_sum); + *((int *)SSE) = _mm_cvtsi128_si32(madd_res); - *((int*)SSE)= _mm_cvtsi128_si32(madd_res); - - *((int*)Sum)= _mm_cvtsi128_si32(sum_res); - } + *((int *)Sum) = _mm_cvtsi128_si32(sum_res); + } } -void vpx_get32x32var_avx2(const unsigned char *src_ptr, - int source_stride, - const unsigned char *ref_ptr, - int recon_stride, - unsigned int *SSE, - int *Sum) { - __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; - __m256i ref_expand_high, madd_low, madd_high; - unsigned int i; - __m256i zero_reg = _mm256_set1_epi16(0); - __m256i sum_ref_src = _mm256_set1_epi16(0); - __m256i madd_ref_src = _mm256_set1_epi16(0); +void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride, + const unsigned char *ref_ptr, int recon_stride, + unsigned int *SSE, int *Sum) { + __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; + __m256i ref_expand_high, madd_low, madd_high; + unsigned int i; + __m256i zero_reg = _mm256_set1_epi16(0); + __m256i sum_ref_src = _mm256_set1_epi16(0); + __m256i madd_ref_src = _mm256_set1_epi16(0); - // processing 32 elements in parallel - for (i = 0; i < 16; i++) { - src = _mm256_loadu_si256((__m256i const *) (src_ptr)); + // processing 32 elements in parallel + for (i = 0; i < 16; i++) { + src = _mm256_loadu_si256((__m256i const *)(src_ptr)); - ref = _mm256_loadu_si256((__m256i const *) (ref_ptr)); + ref = _mm256_loadu_si256((__m256i const *)(ref_ptr)); - // expanding to 16 bit each lane - src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); - src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); + // expanding to 16 bit each lane + src_expand_low = _mm256_unpacklo_epi8(src, zero_reg); + src_expand_high = _mm256_unpackhi_epi8(src, zero_reg); - ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); - ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); + ref_expand_low = _mm256_unpacklo_epi8(ref, zero_reg); + ref_expand_high = _mm256_unpackhi_epi8(ref, zero_reg); - // src-ref - src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); - src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); + // src-ref + src_expand_low = _mm256_sub_epi16(src_expand_low, ref_expand_low); + src_expand_high = _mm256_sub_epi16(src_expand_high, ref_expand_high); - // madd low (src - ref) - madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); + // madd low (src - ref) + madd_low = _mm256_madd_epi16(src_expand_low, src_expand_low); - // add high to low - src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); + // add high to low + src_expand_low = _mm256_add_epi16(src_expand_low, src_expand_high); - // madd high (src - ref) - madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); + // madd high (src - ref) + madd_high = _mm256_madd_epi16(src_expand_high, src_expand_high); - sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); + sum_ref_src = _mm256_add_epi16(sum_ref_src, src_expand_low); - // add high to low - madd_ref_src = _mm256_add_epi32(madd_ref_src, - _mm256_add_epi32(madd_low, madd_high)); + // add high to low + madd_ref_src = + _mm256_add_epi32(madd_ref_src, _mm256_add_epi32(madd_low, madd_high)); - src_ptr+= source_stride; - ref_ptr+= recon_stride; - } + src_ptr += source_stride; + ref_ptr += recon_stride; + } - { - __m256i expand_sum_low, expand_sum_high, expand_sum; - __m256i expand_madd_low, expand_madd_high, expand_madd; - __m256i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; + { + __m256i expand_sum_low, expand_sum_high, expand_sum; + __m256i expand_madd_low, expand_madd_high, expand_madd; + __m256i ex_expand_sum_low, ex_expand_sum_high, ex_expand_sum; - // padding each 2 bytes with another 2 zeroed bytes - expand_sum_low = _mm256_unpacklo_epi16(zero_reg, sum_ref_src); - expand_sum_high = _mm256_unpackhi_epi16(zero_reg, sum_ref_src); + // padding each 2 bytes with another 2 zeroed bytes + expand_sum_low = _mm256_unpacklo_epi16(zero_reg, sum_ref_src); + expand_sum_high = _mm256_unpackhi_epi16(zero_reg, sum_ref_src); - // shifting the sign 16 bits right - expand_sum_low = _mm256_srai_epi32(expand_sum_low, 16); - expand_sum_high = _mm256_srai_epi32(expand_sum_high, 16); + // shifting the sign 16 bits right + expand_sum_low = _mm256_srai_epi32(expand_sum_low, 16); + expand_sum_high = _mm256_srai_epi32(expand_sum_high, 16); - expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high); + expand_sum = _mm256_add_epi32(expand_sum_low, expand_sum_high); - // expand each 32 bits of the madd result to 64 bits - expand_madd_low = _mm256_unpacklo_epi32(madd_ref_src, zero_reg); - expand_madd_high = _mm256_unpackhi_epi32(madd_ref_src, zero_reg); + // expand each 32 bits of the madd result to 64 bits + expand_madd_low = _mm256_unpacklo_epi32(madd_ref_src, zero_reg); + expand_madd_high = _mm256_unpackhi_epi32(madd_ref_src, zero_reg); - expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high); + expand_madd = _mm256_add_epi32(expand_madd_low, expand_madd_high); - ex_expand_sum_low = _mm256_unpacklo_epi32(expand_sum, zero_reg); - ex_expand_sum_high = _mm256_unpackhi_epi32(expand_sum, zero_reg); + ex_expand_sum_low = _mm256_unpacklo_epi32(expand_sum, zero_reg); + ex_expand_sum_high = _mm256_unpackhi_epi32(expand_sum, zero_reg); - ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high); + ex_expand_sum = _mm256_add_epi32(ex_expand_sum_low, ex_expand_sum_high); - // shift 8 bytes eight - madd_ref_src = _mm256_srli_si256(expand_madd, 8); - sum_ref_src = _mm256_srli_si256(ex_expand_sum, 8); + // shift 8 bytes eight + madd_ref_src = _mm256_srli_si256(expand_madd, 8); + sum_ref_src = _mm256_srli_si256(ex_expand_sum, 8); - madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd); - sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum); + madd_ref_src = _mm256_add_epi32(madd_ref_src, expand_madd); + sum_ref_src = _mm256_add_epi32(sum_ref_src, ex_expand_sum); - // extract the low lane and the high lane and add the results - *((int*)SSE)= _mm_cvtsi128_si32(_mm256_castsi256_si128(madd_ref_src)) + - _mm_cvtsi128_si32(_mm256_extractf128_si256(madd_ref_src, 1)); + // extract the low lane and the high lane and add the results + *((int *)SSE) = + _mm_cvtsi128_si32(_mm256_castsi256_si128(madd_ref_src)) + + _mm_cvtsi128_si32(_mm256_extractf128_si256(madd_ref_src, 1)); - *((int*)Sum)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_ref_src)) + - _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_ref_src, 1)); - } + *((int *)Sum) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_ref_src)) + + _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_ref_src, 1)); + } } -#define FILTER_SRC(filter) \ - /* filter the source */ \ +#define FILTER_SRC(filter) \ + /* filter the source */ \ exp_src_lo = _mm256_maddubs_epi16(exp_src_lo, filter); \ exp_src_hi = _mm256_maddubs_epi16(exp_src_hi, filter); \ - \ - /* add 8 to source */ \ - exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \ - exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \ - \ - /* divide source by 16 */ \ - exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ + \ + /* add 8 to source */ \ + exp_src_lo = _mm256_add_epi16(exp_src_lo, pw8); \ + exp_src_hi = _mm256_add_epi16(exp_src_hi, pw8); \ + \ + /* divide source by 16 */ \ + exp_src_lo = _mm256_srai_epi16(exp_src_lo, 4); \ exp_src_hi = _mm256_srai_epi16(exp_src_hi, 4); -#define MERGE_WITH_SRC(src_reg, reg) \ +#define MERGE_WITH_SRC(src_reg, reg) \ exp_src_lo = _mm256_unpacklo_epi8(src_reg, reg); \ exp_src_hi = _mm256_unpackhi_epi8(src_reg, reg); -#define LOAD_SRC_DST \ - /* load source and destination */ \ - src_reg = _mm256_loadu_si256((__m256i const *) (src)); \ - dst_reg = _mm256_loadu_si256((__m256i const *) (dst)); +#define LOAD_SRC_DST \ + /* load source and destination */ \ + src_reg = _mm256_loadu_si256((__m256i const *)(src)); \ + dst_reg = _mm256_loadu_si256((__m256i const *)(dst)); -#define AVG_NEXT_SRC(src_reg, size_stride) \ - src_next_reg = _mm256_loadu_si256((__m256i const *) \ - (src + size_stride)); \ - /* average between current and next stride source */ \ +#define AVG_NEXT_SRC(src_reg, size_stride) \ + src_next_reg = _mm256_loadu_si256((__m256i const *)(src + size_stride)); \ + /* average between current and next stride source */ \ src_reg = _mm256_avg_epu8(src_reg, src_next_reg); -#define MERGE_NEXT_SRC(src_reg, size_stride) \ - src_next_reg = _mm256_loadu_si256((__m256i const *) \ - (src + size_stride)); \ +#define MERGE_NEXT_SRC(src_reg, size_stride) \ + src_next_reg = _mm256_loadu_si256((__m256i const *)(src + size_stride)); \ MERGE_WITH_SRC(src_reg, src_next_reg) -#define CALC_SUM_SSE_INSIDE_LOOP \ - /* expand each byte to 2 bytes */ \ - exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \ - exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \ - /* source - dest */ \ - exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \ - exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \ - /* caculate sum */ \ - sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \ +#define CALC_SUM_SSE_INSIDE_LOOP \ + /* expand each byte to 2 bytes */ \ + exp_dst_lo = _mm256_unpacklo_epi8(dst_reg, zero_reg); \ + exp_dst_hi = _mm256_unpackhi_epi8(dst_reg, zero_reg); \ + /* source - dest */ \ + exp_src_lo = _mm256_sub_epi16(exp_src_lo, exp_dst_lo); \ + exp_src_hi = _mm256_sub_epi16(exp_src_hi, exp_dst_hi); \ + /* caculate sum */ \ + sum_reg = _mm256_add_epi16(sum_reg, exp_src_lo); \ exp_src_lo = _mm256_madd_epi16(exp_src_lo, exp_src_lo); \ - sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \ + sum_reg = _mm256_add_epi16(sum_reg, exp_src_hi); \ exp_src_hi = _mm256_madd_epi16(exp_src_hi, exp_src_hi); \ - /* calculate sse */ \ - sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \ + /* calculate sse */ \ + sse_reg = _mm256_add_epi32(sse_reg, exp_src_lo); \ sse_reg = _mm256_add_epi32(sse_reg, exp_src_hi); // final calculation to sum and sse -#define CALC_SUM_AND_SSE \ - res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \ - sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \ - sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \ - sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \ - sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ - sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \ - \ - sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \ - sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \ - \ - sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ - sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ - *((int*)sse)= _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \ - _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \ - sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \ - sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ - sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \ +#define CALC_SUM_AND_SSE \ + res_cmp = _mm256_cmpgt_epi16(zero_reg, sum_reg); \ + sse_reg_hi = _mm256_srli_si256(sse_reg, 8); \ + sum_reg_lo = _mm256_unpacklo_epi16(sum_reg, res_cmp); \ + sum_reg_hi = _mm256_unpackhi_epi16(sum_reg, res_cmp); \ + sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ + sum_reg = _mm256_add_epi32(sum_reg_lo, sum_reg_hi); \ + \ + sse_reg_hi = _mm256_srli_si256(sse_reg, 4); \ + sum_reg_hi = _mm256_srli_si256(sum_reg, 8); \ + \ + sse_reg = _mm256_add_epi32(sse_reg, sse_reg_hi); \ + sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ + *((int *)sse) = _mm_cvtsi128_si32(_mm256_castsi256_si128(sse_reg)) + \ + _mm_cvtsi128_si32(_mm256_extractf128_si256(sse_reg, 1)); \ + sum_reg_hi = _mm256_srli_si256(sum_reg, 4); \ + sum_reg = _mm256_add_epi32(sum_reg, sum_reg_hi); \ + sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \ _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1)); - -unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, - int dst_stride, - int height, - unsigned int *sse) { +unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, + int x_offset, int y_offset, + const uint8_t *dst, int dst_stride, + int height, unsigned int *sse) { __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi; __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi; __m256i zero_reg; @@ -325,66 +312,66 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, // x_offset = 0 and y_offset = 0 if (x_offset == 0) { if (y_offset == 0) { - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = 0 and y_offset = 8 + // x_offset = 0 and y_offset = 8 } else if (y_offset == 8) { __m256i src_next_reg; - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, src_stride) // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = 0 and y_offset = bilin interpolation + // x_offset = 0 and y_offset = bilin interpolation } else { __m256i filter, pw8, src_next_reg; y_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, src_stride) FILTER_SRC(filter) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } } - // x_offset = 8 and y_offset = 0 + // x_offset = 8 and y_offset = 0 } else if (x_offset == 8) { if (y_offset == 0) { __m256i src_next_reg; - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) // expand each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = 8 and y_offset = 8 + // x_offset = 8 and y_offset = 8 } else if (y_offset == 8) { __m256i src_next_reg, src_avg; // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); AVG_NEXT_SRC(src_reg, 1) - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { src_avg = src_reg; - src+= src_stride; + src += src_stride; LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) // average between previous average to current average @@ -393,92 +380,92 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, MERGE_WITH_SRC(src_avg, zero_reg) // save current source average CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } - // x_offset = 8 and y_offset = bilin interpolation + // x_offset = 8 and y_offset = bilin interpolation } else { __m256i filter, pw8, src_next_reg, src_avg; y_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); AVG_NEXT_SRC(src_reg, 1) - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { // save current source average src_avg = src_reg; - src+= src_stride; + src += src_stride; LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) MERGE_WITH_SRC(src_avg, src_reg) FILTER_SRC(filter) CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } } - // x_offset = bilin interpolation and y_offset = 0 + // x_offset = bilin interpolation and y_offset = 0 } else { if (y_offset == 0) { __m256i filter, pw8, src_next_reg; x_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); pw8 = _mm256_set1_epi16(8); - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = bilin interpolation and y_offset = 8 + // x_offset = bilin interpolation and y_offset = 8 } else if (y_offset == 8) { __m256i filter, pw8, src_next_reg, src_pack; x_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); pw8 = _mm256_set1_epi16(8); - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) // convert each 16 bit to 8 bit to each low and high lane source - src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - for (i = 0; i < height ; i++) { - src+= src_stride; + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height; i++) { + src += src_stride; LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) - src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); // average between previous pack to the current src_pack = _mm256_avg_epu8(src_pack, src_reg); MERGE_WITH_SRC(src_pack, zero_reg) CALC_SUM_SSE_INSIDE_LOOP src_pack = src_reg; - dst+= dst_stride; + dst += dst_stride; } - // x_offset = bilin interpolation and y_offset = bilin interpolation + // x_offset = bilin interpolation and y_offset = bilin interpolation } else { __m256i xfilter, yfilter, pw8, src_next_reg, src_pack; x_offset <<= 5; - xfilter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + xfilter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); y_offset <<= 5; - yfilter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + yfilter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(xfilter) // convert each 16 bit to 8 bit to each low and high lane source src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - for (i = 0; i < height ; i++) { - src+= src_stride; + for (i = 0; i < height; i++) { + src += src_stride; LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(xfilter) @@ -489,7 +476,7 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, FILTER_SRC(yfilter) src_pack = src_reg; CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } } } @@ -497,16 +484,10 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, return sum; } -unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src, - int src_stride, - int x_offset, - int y_offset, - const uint8_t *dst, - int dst_stride, - const uint8_t *sec, - int sec_stride, - int height, - unsigned int *sse) { +unsigned int vpx_sub_pixel_avg_variance32xh_avx2( + const uint8_t *src, int src_stride, int x_offset, int y_offset, + const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride, + int height, unsigned int *sse) { __m256i sec_reg; __m256i src_reg, dst_reg, exp_src_lo, exp_src_hi, exp_dst_lo, exp_dst_hi; __m256i sse_reg, sum_reg, sse_reg_hi, res_cmp, sum_reg_lo, sum_reg_hi; @@ -519,190 +500,190 @@ unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src, // x_offset = 0 and y_offset = 0 if (x_offset == 0) { if (y_offset == 0) { - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_reg = _mm256_avg_epu8(src_reg, sec_reg); - sec+= sec_stride; + sec += sec_stride; // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } } else if (y_offset == 8) { __m256i src_next_reg; - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, src_stride) - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_reg = _mm256_avg_epu8(src_reg, sec_reg); - sec+= sec_stride; + sec += sec_stride; // expend each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = 0 and y_offset = bilin interpolation + // x_offset = 0 and y_offset = bilin interpolation } else { __m256i filter, pw8, src_next_reg; y_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, src_stride) FILTER_SRC(filter) src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_reg = _mm256_avg_epu8(src_reg, sec_reg); - sec+= sec_stride; + sec += sec_stride; MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } } - // x_offset = 8 and y_offset = 0 + // x_offset = 8 and y_offset = 0 } else if (x_offset == 8) { if (y_offset == 0) { __m256i src_next_reg; - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_reg = _mm256_avg_epu8(src_reg, sec_reg); - sec+= sec_stride; + sec += sec_stride; // expand each byte to 2 bytes MERGE_WITH_SRC(src_reg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = 8 and y_offset = 8 + // x_offset = 8 and y_offset = 8 } else if (y_offset == 8) { __m256i src_next_reg, src_avg; // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); AVG_NEXT_SRC(src_reg, 1) - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { // save current source average src_avg = src_reg; - src+= src_stride; + src += src_stride; LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) // average between previous average to current average src_avg = _mm256_avg_epu8(src_avg, src_reg); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_avg = _mm256_avg_epu8(src_avg, sec_reg); - sec+= sec_stride; + sec += sec_stride; // expand each byte to 2 bytes MERGE_WITH_SRC(src_avg, zero_reg) CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } - // x_offset = 8 and y_offset = bilin interpolation + // x_offset = 8 and y_offset = bilin interpolation } else { __m256i filter, pw8, src_next_reg, src_avg; y_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); AVG_NEXT_SRC(src_reg, 1) - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { // save current source average src_avg = src_reg; - src+= src_stride; + src += src_stride; LOAD_SRC_DST AVG_NEXT_SRC(src_reg, 1) MERGE_WITH_SRC(src_avg, src_reg) FILTER_SRC(filter) src_avg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_avg = _mm256_avg_epu8(src_avg, sec_reg); // expand each byte to 2 bytes MERGE_WITH_SRC(src_avg, zero_reg) - sec+= sec_stride; + sec += sec_stride; CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } } - // x_offset = bilin interpolation and y_offset = 0 + // x_offset = bilin interpolation and y_offset = 0 } else { if (y_offset == 0) { __m256i filter, pw8, src_next_reg; x_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); pw8 = _mm256_set1_epi16(8); - for (i = 0; i < height ; i++) { + for (i = 0; i < height; i++) { LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_reg = _mm256_avg_epu8(src_reg, sec_reg); MERGE_WITH_SRC(src_reg, zero_reg) - sec+= sec_stride; + sec += sec_stride; CALC_SUM_SSE_INSIDE_LOOP - src+= src_stride; - dst+= dst_stride; + src += src_stride; + dst += dst_stride; } - // x_offset = bilin interpolation and y_offset = 8 + // x_offset = bilin interpolation and y_offset = 8 } else if (y_offset == 8) { __m256i filter, pw8, src_next_reg, src_pack; x_offset <<= 5; - filter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + filter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); pw8 = _mm256_set1_epi16(8); - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) // convert each 16 bit to 8 bit to each low and high lane source - src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - for (i = 0; i < height ; i++) { - src+= src_stride; + src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + for (i = 0; i < height; i++) { + src += src_stride; LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(filter) - src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); + src_reg = _mm256_packus_epi16(exp_src_lo, exp_src_hi); // average between previous pack to the current src_pack = _mm256_avg_epu8(src_pack, src_reg); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_pack = _mm256_avg_epu8(src_pack, sec_reg); - sec+= sec_stride; + sec += sec_stride; MERGE_WITH_SRC(src_pack, zero_reg) src_pack = src_reg; CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } - // x_offset = bilin interpolation and y_offset = bilin interpolation + // x_offset = bilin interpolation and y_offset = bilin interpolation } else { __m256i xfilter, yfilter, pw8, src_next_reg, src_pack; x_offset <<= 5; - xfilter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + x_offset)); + xfilter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + x_offset)); y_offset <<= 5; - yfilter = _mm256_load_si256((__m256i const *) - (bilinear_filters_avx2 + y_offset)); + yfilter = _mm256_load_si256( + (__m256i const *)(bilinear_filters_avx2 + y_offset)); pw8 = _mm256_set1_epi16(8); // load source and another source starting from the next // following byte - src_reg = _mm256_loadu_si256((__m256i const *) (src)); + src_reg = _mm256_loadu_si256((__m256i const *)(src)); MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(xfilter) // convert each 16 bit to 8 bit to each low and high lane source src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - for (i = 0; i < height ; i++) { - src+= src_stride; + for (i = 0; i < height; i++) { + src += src_stride; LOAD_SRC_DST MERGE_NEXT_SRC(src_reg, 1) FILTER_SRC(xfilter) @@ -712,13 +693,13 @@ unsigned int vpx_sub_pixel_avg_variance32xh_avx2(const uint8_t *src, // filter the source FILTER_SRC(yfilter) src_pack = _mm256_packus_epi16(exp_src_lo, exp_src_hi); - sec_reg = _mm256_loadu_si256((__m256i const *) (sec)); + sec_reg = _mm256_loadu_si256((__m256i const *)(sec)); src_pack = _mm256_avg_epu8(src_pack, sec_reg); MERGE_WITH_SRC(src_pack, zero_reg) src_pack = src_reg; - sec+= sec_stride; + sec += sec_stride; CALC_SUM_SSE_INSIDE_LOOP - dst+= dst_stride; + dst += dst_stride; } } } diff --git a/libs/libvpx/vpx_dsp/x86/variance_impl_mmx.asm b/libs/libvpx/vpx_dsp/x86/variance_impl_mmx.asm deleted file mode 100644 index b8ba79b65e..0000000000 --- a/libs/libvpx/vpx_dsp/x86/variance_impl_mmx.asm +++ /dev/null @@ -1,744 +0,0 @@ -; -; Copyright (c) 2010 The WebM project authors. All Rights Reserved. -; -; Use of this source code is governed by a BSD-style license -; that can be found in the LICENSE file in the root of the source -; tree. An additional intellectual property rights grant can be found -; in the file PATENTS. All contributing project authors may -; be found in the AUTHORS file in the root of the source tree. -; - - -%include "vpx_ports/x86_abi_support.asm" - -%define mmx_filter_shift 7 - -;unsigned int vpx_get_mb_ss_mmx( short *src_ptr ) -global sym(vpx_get_mb_ss_mmx) PRIVATE -sym(vpx_get_mb_ss_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 7 - GET_GOT rbx - push rsi - push rdi - sub rsp, 8 - ; end prolog - - mov rax, arg(0) ;src_ptr - mov rcx, 16 - pxor mm4, mm4 - -.NEXTROW: - movq mm0, [rax] - movq mm1, [rax+8] - movq mm2, [rax+16] - movq mm3, [rax+24] - pmaddwd mm0, mm0 - pmaddwd mm1, mm1 - pmaddwd mm2, mm2 - pmaddwd mm3, mm3 - - paddd mm4, mm0 - paddd mm4, mm1 - paddd mm4, mm2 - paddd mm4, mm3 - - add rax, 32 - dec rcx - ja .NEXTROW - movq QWORD PTR [rsp], mm4 - - ;return sum[0]+sum[1]; - movsxd rax, dword ptr [rsp] - movsxd rcx, dword ptr [rsp+4] - add rax, rcx - - ; begin epilog - add rsp, 8 - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - -;void vpx_get8x8var_mmx -;( -; unsigned char *src_ptr, -; int source_stride, -; unsigned char *ref_ptr, -; int recon_stride, -; unsigned int *SSE, -; int *Sum -;) -global sym(vpx_get8x8var_mmx) PRIVATE -sym(vpx_get8x8var_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 6 - push rsi - push rdi - push rbx - sub rsp, 16 - ; end prolog - - pxor mm5, mm5 ; Blank mmx6 - pxor mm6, mm6 ; Blank mmx7 - pxor mm7, mm7 ; Blank mmx7 - - mov rax, arg(0) ;[src_ptr] ; Load base addresses - mov rbx, arg(2) ;[ref_ptr] - movsxd rcx, dword ptr arg(1) ;[source_stride] - movsxd rdx, dword ptr arg(3) ;[recon_stride] - - ; Row 1 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm1, [rbx] ; Copy eight bytes to mm1 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 2 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 3 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 4 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 5 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - ; movq mm4, [rbx + rdx] - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 6 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 7 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movq mm1, [rbx] ; Copy eight bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Row 8 - movq mm0, [rax] ; Copy eight bytes to mm0 - movq mm2, mm0 ; Take copies - movq mm3, mm1 ; Take copies - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - punpckhbw mm2, mm6 ; unpack to higher prrcision - punpckhbw mm3, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - psubsw mm2, mm3 ; A-B (high order) to MM2 - - paddw mm5, mm0 ; accumulate differences in mm5 - paddw mm5, mm2 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - pmaddwd mm2, mm2 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - paddd mm7, mm0 ; accumulate in mm7 - paddd mm7, mm2 ; accumulate in mm7 - - ; Now accumulate the final results. - movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory - movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory - movsx rdx, WORD PTR [rsp+8] - movsx rcx, WORD PTR [rsp+10] - movsx rbx, WORD PTR [rsp+12] - movsx rax, WORD PTR [rsp+14] - add rdx, rcx - add rbx, rax - add rdx, rbx ;XSum - movsxd rax, DWORD PTR [rsp] - movsxd rcx, DWORD PTR [rsp+4] - add rax, rcx ;XXSum - mov rsi, arg(4) ;SSE - mov rdi, arg(5) ;Sum - mov dword ptr [rsi], eax - mov dword ptr [rdi], edx - xor rax, rax ; return 0 - - ; begin epilog - add rsp, 16 - pop rbx - pop rdi - pop rsi - UNSHADOW_ARGS - pop rbp - ret - -;void -;vpx_get4x4var_mmx -;( -; unsigned char *src_ptr, -; int source_stride, -; unsigned char *ref_ptr, -; int recon_stride, -; unsigned int *SSE, -; int *Sum -;) -global sym(vpx_get4x4var_mmx) PRIVATE -sym(vpx_get4x4var_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 6 - push rsi - push rdi - push rbx - sub rsp, 16 - ; end prolog - - pxor mm5, mm5 ; Blank mmx6 - pxor mm6, mm6 ; Blank mmx7 - pxor mm7, mm7 ; Blank mmx7 - - mov rax, arg(0) ;[src_ptr] ; Load base addresses - mov rbx, arg(2) ;[ref_ptr] - movsxd rcx, dword ptr arg(1) ;[source_stride] - movsxd rdx, dword ptr arg(3) ;[recon_stride] - - ; Row 1 - movd mm0, [rax] ; Copy four bytes to mm0 - movd mm1, [rbx] ; Copy four bytes to mm1 - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - paddw mm5, mm0 ; accumulate differences in mm5 - pmaddwd mm0, mm0 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movd mm1, [rbx] ; Copy four bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - - ; Row 2 - movd mm0, [rax] ; Copy four bytes to mm0 - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - paddw mm5, mm0 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movd mm1, [rbx] ; Copy four bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - - ; Row 3 - movd mm0, [rax] ; Copy four bytes to mm0 - punpcklbw mm0, mm6 ; unpack to higher precision - punpcklbw mm1, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - paddw mm5, mm0 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - add rbx,rdx ; Inc pointer into ref data - add rax,rcx ; Inc pointer into the new data - movd mm1, [rbx] ; Copy four bytes to mm1 - paddd mm7, mm0 ; accumulate in mm7 - - ; Row 4 - movd mm0, [rax] ; Copy four bytes to mm0 - - punpcklbw mm0, mm6 ; unpack to higher prrcision - punpcklbw mm1, mm6 - psubsw mm0, mm1 ; A-B (low order) to MM0 - - paddw mm5, mm0 ; accumulate differences in mm5 - - pmaddwd mm0, mm0 ; square and accumulate - paddd mm7, mm0 ; accumulate in mm7 - - ; Now accumulate the final results. - movq QWORD PTR [rsp+8], mm5 ; copy back accumulated results into normal memory - movq QWORD PTR [rsp], mm7 ; copy back accumulated results into normal memory - movsx rdx, WORD PTR [rsp+8] - movsx rcx, WORD PTR [rsp+10] - movsx rbx, WORD PTR [rsp+12] - movsx rax, WORD PTR [rsp+14] - add rdx, rcx - add rbx, rax - add rdx, rbx ;XSum - movsxd rax, DWORD PTR [rsp] - movsxd rcx, DWORD PTR [rsp+4] - add rax, rcx ;XXSum - mov rsi, arg(4) ;SSE - mov rdi, arg(5) ;Sum - mov dword ptr [rsi], eax - mov dword ptr [rdi], edx - xor rax, rax ; return 0 - - ; begin epilog - add rsp, 16 - pop rbx - pop rdi - pop rsi - UNSHADOW_ARGS - pop rbp - ret - -;void vpx_filter_block2d_bil4x4_var_mmx -;( -; unsigned char *ref_ptr, -; int ref_pixels_per_line, -; unsigned char *src_ptr, -; int src_pixels_per_line, -; unsigned short *HFilter, -; unsigned short *VFilter, -; int *sum, -; unsigned int *sumsquared -;) -global sym(vpx_filter_block2d_bil4x4_var_mmx) PRIVATE -sym(vpx_filter_block2d_bil4x4_var_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 8 - GET_GOT rbx - push rsi - push rdi - sub rsp, 16 - ; end prolog - - pxor mm6, mm6 ; - pxor mm7, mm7 ; - - mov rax, arg(4) ;HFilter ; - mov rdx, arg(5) ;VFilter ; - - mov rsi, arg(0) ;ref_ptr ; - mov rdi, arg(2) ;src_ptr ; - - mov rcx, 4 ; - pxor mm0, mm0 ; - - movd mm1, [rsi] ; - movd mm3, [rsi+1] ; - - punpcklbw mm1, mm0 ; - pmullw mm1, [rax] ; - - punpcklbw mm3, mm0 ; - pmullw mm3, [rax+8] ; - - paddw mm1, mm3 ; - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - - psraw mm1, mmx_filter_shift ; - movq mm5, mm1 - -%if ABI_IS_32BIT - add rsi, dword ptr arg(1) ;ref_pixels_per_line ; -%else - movsxd r8, dword ptr arg(1) ;ref_pixels_per_line ; - add rsi, r8 -%endif - -.filter_block2d_bil4x4_var_mmx_loop: - - movd mm1, [rsi] ; - movd mm3, [rsi+1] ; - - punpcklbw mm1, mm0 ; - pmullw mm1, [rax] ; - - punpcklbw mm3, mm0 ; - pmullw mm3, [rax+8] ; - - paddw mm1, mm3 ; - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - - psraw mm1, mmx_filter_shift ; - movq mm3, mm5 ; - - movq mm5, mm1 ; - pmullw mm3, [rdx] ; - - pmullw mm1, [rdx+8] ; - paddw mm1, mm3 ; - - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - psraw mm1, mmx_filter_shift ; - - movd mm3, [rdi] ; - punpcklbw mm3, mm0 ; - - psubw mm1, mm3 ; - paddw mm6, mm1 ; - - pmaddwd mm1, mm1 ; - paddd mm7, mm1 ; - -%if ABI_IS_32BIT - add rsi, dword ptr arg(1) ;ref_pixels_per_line ; - add rdi, dword ptr arg(3) ;src_pixels_per_line ; -%else - movsxd r8, dword ptr arg(1) ;ref_pixels_per_line - movsxd r9, dword ptr arg(3) ;src_pixels_per_line - add rsi, r8 - add rdi, r9 -%endif - sub rcx, 1 ; - jnz .filter_block2d_bil4x4_var_mmx_loop ; - - pxor mm3, mm3 ; - pxor mm2, mm2 ; - - punpcklwd mm2, mm6 ; - punpckhwd mm3, mm6 ; - - paddd mm2, mm3 ; - movq mm6, mm2 ; - - psrlq mm6, 32 ; - paddd mm2, mm6 ; - - psrad mm2, 16 ; - movq mm4, mm7 ; - - psrlq mm4, 32 ; - paddd mm4, mm7 ; - - mov rdi, arg(6) ;sum - mov rsi, arg(7) ;sumsquared - - movd dword ptr [rdi], mm2 ; - movd dword ptr [rsi], mm4 ; - - ; begin epilog - add rsp, 16 - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - -;void vpx_filter_block2d_bil_var_mmx -;( -; unsigned char *ref_ptr, -; int ref_pixels_per_line, -; unsigned char *src_ptr, -; int src_pixels_per_line, -; unsigned int Height, -; unsigned short *HFilter, -; unsigned short *VFilter, -; int *sum, -; unsigned int *sumsquared -;) -global sym(vpx_filter_block2d_bil_var_mmx) PRIVATE -sym(vpx_filter_block2d_bil_var_mmx): - push rbp - mov rbp, rsp - SHADOW_ARGS_TO_STACK 9 - GET_GOT rbx - push rsi - push rdi - sub rsp, 16 - ; end prolog - - pxor mm6, mm6 ; - pxor mm7, mm7 ; - mov rax, arg(5) ;HFilter ; - - mov rdx, arg(6) ;VFilter ; - mov rsi, arg(0) ;ref_ptr ; - - mov rdi, arg(2) ;src_ptr ; - movsxd rcx, dword ptr arg(4) ;Height ; - - pxor mm0, mm0 ; - movq mm1, [rsi] ; - - movq mm3, [rsi+1] ; - movq mm2, mm1 ; - - movq mm4, mm3 ; - punpcklbw mm1, mm0 ; - - punpckhbw mm2, mm0 ; - pmullw mm1, [rax] ; - - pmullw mm2, [rax] ; - punpcklbw mm3, mm0 ; - - punpckhbw mm4, mm0 ; - pmullw mm3, [rax+8] ; - - pmullw mm4, [rax+8] ; - paddw mm1, mm3 ; - - paddw mm2, mm4 ; - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - - psraw mm1, mmx_filter_shift ; - paddw mm2, [GLOBAL(mmx_bi_rd)] ; - - psraw mm2, mmx_filter_shift ; - movq mm5, mm1 - - packuswb mm5, mm2 ; -%if ABI_IS_32BIT - add rsi, dword ptr arg(1) ;ref_pixels_per_line -%else - movsxd r8, dword ptr arg(1) ;ref_pixels_per_line - add rsi, r8 -%endif - -.filter_block2d_bil_var_mmx_loop: - - movq mm1, [rsi] ; - movq mm3, [rsi+1] ; - - movq mm2, mm1 ; - movq mm4, mm3 ; - - punpcklbw mm1, mm0 ; - punpckhbw mm2, mm0 ; - - pmullw mm1, [rax] ; - pmullw mm2, [rax] ; - - punpcklbw mm3, mm0 ; - punpckhbw mm4, mm0 ; - - pmullw mm3, [rax+8] ; - pmullw mm4, [rax+8] ; - - paddw mm1, mm3 ; - paddw mm2, mm4 ; - - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - psraw mm1, mmx_filter_shift ; - - paddw mm2, [GLOBAL(mmx_bi_rd)] ; - psraw mm2, mmx_filter_shift ; - - movq mm3, mm5 ; - movq mm4, mm5 ; - - punpcklbw mm3, mm0 ; - punpckhbw mm4, mm0 ; - - movq mm5, mm1 ; - packuswb mm5, mm2 ; - - pmullw mm3, [rdx] ; - pmullw mm4, [rdx] ; - - pmullw mm1, [rdx+8] ; - pmullw mm2, [rdx+8] ; - - paddw mm1, mm3 ; - paddw mm2, mm4 ; - - paddw mm1, [GLOBAL(mmx_bi_rd)] ; - paddw mm2, [GLOBAL(mmx_bi_rd)] ; - - psraw mm1, mmx_filter_shift ; - psraw mm2, mmx_filter_shift ; - - movq mm3, [rdi] ; - movq mm4, mm3 ; - - punpcklbw mm3, mm0 ; - punpckhbw mm4, mm0 ; - - psubw mm1, mm3 ; - psubw mm2, mm4 ; - - paddw mm6, mm1 ; - pmaddwd mm1, mm1 ; - - paddw mm6, mm2 ; - pmaddwd mm2, mm2 ; - - paddd mm7, mm1 ; - paddd mm7, mm2 ; - -%if ABI_IS_32BIT - add rsi, dword ptr arg(1) ;ref_pixels_per_line ; - add rdi, dword ptr arg(3) ;src_pixels_per_line ; -%else - movsxd r8, dword ptr arg(1) ;ref_pixels_per_line ; - movsxd r9, dword ptr arg(3) ;src_pixels_per_line ; - add rsi, r8 - add rdi, r9 -%endif - sub rcx, 1 ; - jnz .filter_block2d_bil_var_mmx_loop ; - - pxor mm3, mm3 ; - pxor mm2, mm2 ; - - punpcklwd mm2, mm6 ; - punpckhwd mm3, mm6 ; - - paddd mm2, mm3 ; - movq mm6, mm2 ; - - psrlq mm6, 32 ; - paddd mm2, mm6 ; - - psrad mm2, 16 ; - movq mm4, mm7 ; - - psrlq mm4, 32 ; - paddd mm4, mm7 ; - - mov rdi, arg(7) ;sum - mov rsi, arg(8) ;sumsquared - - movd dword ptr [rdi], mm2 ; - movd dword ptr [rsi], mm4 ; - - ; begin epilog - add rsp, 16 - pop rdi - pop rsi - RESTORE_GOT - UNSHADOW_ARGS - pop rbp - ret - -SECTION_RODATA -;short mmx_bi_rd[4] = { 64, 64, 64, 64}; -align 16 -mmx_bi_rd: - times 4 dw 64 diff --git a/libs/libvpx/vpx_dsp/x86/variance_mmx.c b/libs/libvpx/vpx_dsp/x86/variance_mmx.c deleted file mode 100644 index f04f4e2c8f..0000000000 --- a/libs/libvpx/vpx_dsp/x86/variance_mmx.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (c) 2010 The WebM project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "./vpx_dsp_rtcd.h" - -#include "vpx_ports/mem.h" - -DECLARE_ALIGNED(16, static const int16_t, bilinear_filters_mmx[8][8]) = { - { 128, 128, 128, 128, 0, 0, 0, 0 }, - { 112, 112, 112, 112, 16, 16, 16, 16 }, - { 96, 96, 96, 96, 32, 32, 32, 32 }, - { 80, 80, 80, 80, 48, 48, 48, 48 }, - { 64, 64, 64, 64, 64, 64, 64, 64 }, - { 48, 48, 48, 48, 80, 80, 80, 80 }, - { 32, 32, 32, 32, 96, 96, 96, 96 }, - { 16, 16, 16, 16, 112, 112, 112, 112 } -}; - -extern void vpx_get4x4var_mmx(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - unsigned int *sse, int *sum); - -extern void vpx_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr, - int ref_pixels_per_line, - const unsigned char *src_ptr, - int src_pixels_per_line, - const int16_t *HFilter, - const int16_t *VFilter, - int *sum, - unsigned int *sumsquared); - -extern void vpx_filter_block2d_bil_var_mmx(const unsigned char *ref_ptr, - int ref_pixels_per_line, - const unsigned char *src_ptr, - int src_pixels_per_line, - unsigned int Height, - const int16_t *HFilter, - const int16_t *VFilter, - int *sum, - unsigned int *sumsquared); - - -unsigned int vpx_variance4x4_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int var; - int avg; - - vpx_get4x4var_mmx(a, a_stride, b, b_stride, &var, &avg); - *sse = var; - return (var - (((unsigned int)avg * avg) >> 4)); -} - -unsigned int vpx_variance8x8_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int var; - int avg; - - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &var, &avg); - *sse = var; - - return (var - (((unsigned int)avg * avg) >> 6)); -} - -unsigned int vpx_mse16x16_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int sse0, sse1, sse2, sse3, var; - int sum0, sum1, sum2, sum3; - - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, - b + 8 * b_stride, b_stride, &sse2, &sum2); - vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, - b + 8 * b_stride + 8, b_stride, &sse3, &sum3); - - var = sse0 + sse1 + sse2 + sse3; - *sse = var; - return var; -} - -unsigned int vpx_variance16x16_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int sse0, sse1, sse2, sse3, var; - int sum0, sum1, sum2, sum3, avg; - - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, - b + 8 * b_stride, b_stride, &sse2, &sum2); - vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, - b + 8 * b_stride + 8, b_stride, &sse3, &sum3); - - var = sse0 + sse1 + sse2 + sse3; - avg = sum0 + sum1 + sum2 + sum3; - *sse = var; - return (var - (((unsigned int)avg * avg) >> 8)); -} - -unsigned int vpx_variance16x8_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int sse0, sse1, var; - int sum0, sum1, avg; - - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); - - var = sse0 + sse1; - avg = sum0 + sum1; - *sse = var; - return (var - (((unsigned int)avg * avg) >> 7)); -} - -unsigned int vpx_variance8x16_mmx(const unsigned char *a, int a_stride, - const unsigned char *b, int b_stride, - unsigned int *sse) { - unsigned int sse0, sse1, var; - int sum0, sum1, avg; - - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, - b + 8 * b_stride, b_stride, &sse1, &sum1); - - var = sse0 + sse1; - avg = sum0 + sum1; - *sse = var; - - return (var - (((unsigned int)avg * avg) >> 7)); -} - -uint32_t vpx_sub_pixel_variance4x4_mmx(const uint8_t *a, int a_stride, - int xoffset, int yoffset, - const uint8_t *b, int b_stride, - uint32_t *sse) { - int xsum; - unsigned int xxsum; - vpx_filter_block2d_bil4x4_var_mmx(a, a_stride, b, b_stride, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum, &xxsum); - *sse = xxsum; - return (xxsum - (((unsigned int)xsum * xsum) >> 4)); -} - - -uint32_t vpx_sub_pixel_variance8x8_mmx(const uint8_t *a, int a_stride, - int xoffset, int yoffset, - const uint8_t *b, int b_stride, - uint32_t *sse) { - int xsum; - uint32_t xxsum; - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum, &xxsum); - *sse = xxsum; - return (xxsum - (((uint32_t)xsum * xsum) >> 6)); -} - -uint32_t vpx_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride, - int xoffset, int yoffset, - const uint8_t *b, int b_stride, - uint32_t *sse) { - int xsum0, xsum1; - unsigned int xxsum0, xxsum1; - - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum0, &xxsum0); - - vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 16, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum1, &xxsum1); - - xsum0 += xsum1; - xxsum0 += xxsum1; - - *sse = xxsum0; - return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); -} - -uint32_t vpx_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride, - int xoffset, int yoffset, - const uint8_t *b, int b_stride, - uint32_t *sse) { - int xsum0, xsum1; - unsigned int xxsum0, xxsum1; - - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum0, &xxsum0); - - vpx_filter_block2d_bil_var_mmx(a + 8, a_stride, b + 8, b_stride, 8, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum1, &xxsum1); - - xsum0 += xsum1; - xxsum0 += xxsum1; - - *sse = xxsum0; - return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 7)); -} - -uint32_t vpx_sub_pixel_variance8x16_mmx(const uint8_t *a, int a_stride, - int xoffset, int yoffset, - const uint8_t *b, int b_stride, - uint32_t *sse) { - int xsum; - unsigned int xxsum; - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16, - bilinear_filters_mmx[xoffset], - bilinear_filters_mmx[yoffset], - &xsum, &xxsum); - *sse = xxsum; - return (xxsum - (((uint32_t)xsum * xsum) >> 7)); -} - -uint32_t vpx_variance_halfpixvar16x16_h_mmx(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 0, b, b_stride, sse); -} - -uint32_t vpx_variance_halfpixvar16x16_v_mmx(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 0, 4, b, b_stride, sse); -} - -uint32_t vpx_variance_halfpixvar16x16_hv_mmx(const uint8_t *a, int a_stride, - const uint8_t *b, int b_stride, - uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 4, b, b_stride, sse); -} diff --git a/libs/libvpx/vpx_dsp/x86/variance_sse2.c b/libs/libvpx/vpx_dsp/x86/variance_sse2.c index e6c9365ab4..1161da4914 100644 --- a/libs/libvpx/vpx_dsp/x86/variance_sse2.c +++ b/libs/libvpx/vpx_dsp/x86/variance_sse2.c @@ -15,9 +15,9 @@ #include "vpx_ports/mem.h" -typedef void (*getNxMvar_fn_t) (const unsigned char *src, int src_stride, - const unsigned char *ref, int ref_stride, - unsigned int *sse, int *sum); +typedef void (*getNxMvar_fn_t)(const unsigned char *src, int src_stride, + const unsigned char *ref, int ref_stride, + unsigned int *sse, int *sum); unsigned int vpx_get_mb_ss_sse2(const int16_t *src) { __m128i vsum = _mm_setzero_si128(); @@ -31,11 +31,12 @@ unsigned int vpx_get_mb_ss_sse2(const int16_t *src) { vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8)); vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4)); - return _mm_cvtsi128_si32(vsum); + return _mm_cvtsi128_si32(vsum); } -#define READ64(p, stride, i) \ - _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \ +#define READ64(p, stride, i) \ + _mm_unpacklo_epi8( \ + _mm_cvtsi32_si128(*(const uint32_t *)(p + i * stride)), \ _mm_cvtsi32_si128(*(const uint32_t *)(p + (i + 1) * stride))) static void get4x4var_sse2(const uint8_t *src, int src_stride, @@ -57,32 +58,31 @@ static void get4x4var_sse2(const uint8_t *src, int src_stride, *sum = (int16_t)_mm_extract_epi16(vsum, 0); // sse - vsum = _mm_add_epi32(_mm_madd_epi16(diff0, diff0), - _mm_madd_epi16(diff1, diff1)); + vsum = + _mm_add_epi32(_mm_madd_epi16(diff0, diff0), _mm_madd_epi16(diff1, diff1)); vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 8)); vsum = _mm_add_epi32(vsum, _mm_srli_si128(vsum, 4)); *sse = _mm_cvtsi128_si32(vsum); } -void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, - const uint8_t *ref, int ref_stride, - unsigned int *sse, int *sum) { +void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, + int ref_stride, unsigned int *sse, int *sum) { const __m128i zero = _mm_setzero_si128(); __m128i vsum = _mm_setzero_si128(); __m128i vsse = _mm_setzero_si128(); int i; for (i = 0; i < 8; i += 2) { - const __m128i src0 = _mm_unpacklo_epi8(_mm_loadl_epi64( - (const __m128i *)(src + i * src_stride)), zero); - const __m128i ref0 = _mm_unpacklo_epi8(_mm_loadl_epi64( - (const __m128i *)(ref + i * ref_stride)), zero); + const __m128i src0 = _mm_unpacklo_epi8( + _mm_loadl_epi64((const __m128i *)(src + i * src_stride)), zero); + const __m128i ref0 = _mm_unpacklo_epi8( + _mm_loadl_epi64((const __m128i *)(ref + i * ref_stride)), zero); const __m128i diff0 = _mm_sub_epi16(src0, ref0); - const __m128i src1 = _mm_unpacklo_epi8(_mm_loadl_epi64( - (const __m128i *)(src + (i + 1) * src_stride)), zero); - const __m128i ref1 = _mm_unpacklo_epi8(_mm_loadl_epi64( - (const __m128i *)(ref + (i + 1) * ref_stride)), zero); + const __m128i src1 = _mm_unpacklo_epi8( + _mm_loadl_epi64((const __m128i *)(src + (i + 1) * src_stride)), zero); + const __m128i ref1 = _mm_unpacklo_epi8( + _mm_loadl_epi64((const __m128i *)(ref + (i + 1) * ref_stride)), zero); const __m128i diff1 = _mm_sub_epi16(src1, ref1); vsum = _mm_add_epi16(vsum, diff0); @@ -104,8 +104,8 @@ void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, } void vpx_get16x16var_sse2(const uint8_t *src, int src_stride, - const uint8_t *ref, int ref_stride, - unsigned int *sse, int *sum) { + const uint8_t *ref, int ref_stride, unsigned int *sse, + int *sum) { const __m128i zero = _mm_setzero_si128(); __m128i vsum = _mm_setzero_si128(); __m128i vsse = _mm_setzero_si128(); @@ -135,8 +135,8 @@ void vpx_get16x16var_sse2(const uint8_t *src, int src_stride, // sum vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 8)); vsum = _mm_add_epi16(vsum, _mm_srli_si128(vsum, 4)); - *sum = (int16_t)_mm_extract_epi16(vsum, 0) + - (int16_t)_mm_extract_epi16(vsum, 1); + *sum = + (int16_t)_mm_extract_epi16(vsum, 0) + (int16_t)_mm_extract_epi16(vsum, 1); // sse vsse = _mm_add_epi32(vsse, _mm_srli_si128(vsse, 8)); @@ -144,10 +144,9 @@ void vpx_get16x16var_sse2(const uint8_t *src, int src_stride, *sse = _mm_cvtsi128_si32(vsse); } - static void variance_sse2(const unsigned char *src, int src_stride, - const unsigned char *ref, int ref_stride, - int w, int h, unsigned int *sse, int *sum, + const unsigned char *ref, int ref_stride, int w, + int h, unsigned int *sse, int *sum, getNxMvar_fn_t var_fn, int block_size) { int i, j; @@ -158,8 +157,8 @@ static void variance_sse2(const unsigned char *src, int src_stride, for (j = 0; j < w; j += block_size) { unsigned int sse0; int sum0; - var_fn(src + src_stride * i + j, src_stride, - ref + ref_stride * i + j, ref_stride, &sse0, &sum0); + var_fn(src + src_stride * i + j, src_stride, ref + ref_stride * i + j, + ref_stride, &sse0, &sum0); *sse += sse0; *sum += sum0; } @@ -171,25 +170,25 @@ unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride, unsigned int *sse) { int sum; get4x4var_sse2(src, src_stride, ref, ref_stride, sse, &sum); - return *sse - (((unsigned int)sum * sum) >> 4); + return *sse - ((sum * sum) >> 4); } unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 8, 4, - sse, &sum, get4x4var_sse2, 4); - return *sse - (((unsigned int)sum * sum) >> 5); + variance_sse2(src, src_stride, ref, ref_stride, 8, 4, sse, &sum, + get4x4var_sse2, 4); + return *sse - ((sum * sum) >> 5); } unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 4, 8, - sse, &sum, get4x4var_sse2, 4); - return *sse - (((unsigned int)sum * sum) >> 5); + variance_sse2(src, src_stride, ref, ref_stride, 4, 8, sse, &sum, + get4x4var_sse2, 4); + return *sse - ((sum * sum) >> 5); } unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride, @@ -197,25 +196,25 @@ unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride, unsigned int *sse) { int sum; vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum); - return *sse - (((unsigned int)sum * sum) >> 6); + return *sse - ((sum * sum) >> 6); } unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 16, 8, - sse, &sum, vpx_get8x8var_sse2, 8); - return *sse - (((unsigned int)sum * sum) >> 7); + variance_sse2(src, src_stride, ref, ref_stride, 16, 8, sse, &sum, + vpx_get8x8var_sse2, 8); + return *sse - ((sum * sum) >> 7); } unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 8, 16, - sse, &sum, vpx_get8x8var_sse2, 8); - return *sse - (((unsigned int)sum * sum) >> 7); + variance_sse2(src, src_stride, ref, ref_stride, 8, 16, sse, &sum, + vpx_get8x8var_sse2, 8); + return *sse - ((sum * sum) >> 7); } unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride, @@ -223,61 +222,61 @@ unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride, unsigned int *sse) { int sum; vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum); - return *sse - (((unsigned int)sum * sum) >> 8); + return *sse - (((uint32_t)((int64_t)sum * sum)) >> 8); } unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 32, 32, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 10); + variance_sse2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 10); } unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 32, 16, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 9); + variance_sse2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 9); } unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 16, 32, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 9); + variance_sse2(src, src_stride, ref, ref_stride, 16, 32, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 9); } unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 64, 64, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 12); + variance_sse2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 12); } unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 64, 32, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 11); + variance_sse2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 11); } unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - variance_sse2(src, src_stride, ref, ref_stride, 32, 64, - sse, &sum, vpx_get16x16var_sse2, 16); - return *sse - (((int64_t)sum * sum) >> 11); + variance_sse2(src, src_stride, ref, ref_stride, 32, 64, sse, &sum, + vpx_get16x16var_sse2, 16); + return *sse - (unsigned int)(((int64_t)sum * sum) >> 11); } unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride, @@ -308,170 +307,143 @@ unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride, return *sse; } -#if CONFIG_USE_X86INC // The 2 unused parameters are place holders for PIC enabled build. // These definitions are for functions defined in subpel_variance.asm -#define DECL(w, opt) \ - int vpx_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \ - ptrdiff_t src_stride, \ - int x_offset, int y_offset, \ - const uint8_t *dst, \ - ptrdiff_t dst_stride, \ - int height, unsigned int *sse, \ - void *unused0, void *unused) +#define DECL(w, opt) \ + int vpx_sub_pixel_variance##w##xh_##opt( \ + const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ + const uint8_t *dst, ptrdiff_t dst_stride, int height, unsigned int *sse, \ + void *unused0, void *unused) #define DECLS(opt1, opt2) \ - DECL(4, opt2); \ - DECL(8, opt1); \ + DECL(4, opt1); \ + DECL(8, opt1); \ DECL(16, opt1) -DECLS(sse2, sse); +DECLS(sse2, sse2); DECLS(ssse3, ssse3); #undef DECLS #undef DECL -#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ -unsigned int vpx_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src, \ - int src_stride, \ - int x_offset, \ - int y_offset, \ - const uint8_t *dst, \ - int dst_stride, \ - unsigned int *sse_ptr) { \ - unsigned int sse; \ - int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \ - y_offset, dst, dst_stride, \ - h, &sse, NULL, NULL); \ - if (w > wf) { \ - unsigned int sse2; \ - int se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 16, src_stride, \ - x_offset, y_offset, \ - dst + 16, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \ - x_offset, y_offset, \ - dst + 32, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \ - x_offset, y_offset, \ - dst + 48, dst_stride, \ - h, &sse2, NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - *sse_ptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} +#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \ + unsigned int vpx_sub_pixel_variance##w##x##h##_##opt( \ + const uint8_t *src, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst, int dst_stride, unsigned int *sse_ptr) { \ + unsigned int sse; \ + int se = vpx_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \ + y_offset, dst, dst_stride, \ + h, &sse, NULL, NULL); \ + if (w > wf) { \ + unsigned int sse2; \ + int se2 = vpx_sub_pixel_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_sub_pixel_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_sub_pixel_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \ + &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + *sse_ptr = sse; \ + return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \ + } -#define FNS(opt1, opt2) \ -FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ -FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ -FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ -FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ -FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ -FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ -FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \ -FN(16, 8, 16, 4, 3, opt1, (uint32_t)); \ -FN(8, 16, 8, 3, 4, opt1, (uint32_t)); \ -FN(8, 8, 8, 3, 3, opt1, (uint32_t)); \ -FN(8, 4, 8, 3, 2, opt1, (uint32_t)); \ -FN(4, 8, 4, 2, 3, opt2, (uint32_t)); \ -FN(4, 4, 4, 2, 2, opt2, (uint32_t)) +#define FNS(opt1, opt2) \ + FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \ + FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \ + FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \ + FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \ + FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \ + FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \ + FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \ + FN(16, 8, 16, 4, 3, opt1, (int32_t), (int32_t)); \ + FN(8, 16, 8, 3, 4, opt1, (int32_t), (int32_t)); \ + FN(8, 8, 8, 3, 3, opt1, (int32_t), (int32_t)); \ + FN(8, 4, 8, 3, 2, opt1, (int32_t), (int32_t)); \ + FN(4, 8, 4, 2, 3, opt1, (int32_t), (int32_t)); \ + FN(4, 4, 4, 2, 2, opt1, (int32_t), (int32_t)) -FNS(sse2, sse); +FNS(sse2, sse2); FNS(ssse3, ssse3); #undef FNS #undef FN // The 2 unused parameters are place holders for PIC enabled build. -#define DECL(w, opt) \ -int vpx_sub_pixel_avg_variance##w##xh_##opt(const uint8_t *src, \ - ptrdiff_t src_stride, \ - int x_offset, int y_offset, \ - const uint8_t *dst, \ - ptrdiff_t dst_stride, \ - const uint8_t *sec, \ - ptrdiff_t sec_stride, \ - int height, unsigned int *sse, \ - void *unused0, void *unused) +#define DECL(w, opt) \ + int vpx_sub_pixel_avg_variance##w##xh_##opt( \ + const uint8_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ + const uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *sec, \ + ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \ + void *unused) #define DECLS(opt1, opt2) \ -DECL(4, opt2); \ -DECL(8, opt1); \ -DECL(16, opt1) + DECL(4, opt1); \ + DECL(8, opt1); \ + DECL(16, opt1) -DECLS(sse2, sse); +DECLS(sse2, sse2); DECLS(ssse3, ssse3); #undef DECL #undef DECLS -#define FN(w, h, wf, wlog2, hlog2, opt, cast) \ -unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt(const uint8_t *src, \ - int src_stride, \ - int x_offset, \ - int y_offset, \ - const uint8_t *dst, \ - int dst_stride, \ - unsigned int *sseptr, \ - const uint8_t *sec) { \ - unsigned int sse; \ - int se = vpx_sub_pixel_avg_variance##wf##xh_##opt(src, src_stride, x_offset, \ - y_offset, dst, dst_stride, \ - sec, w, h, &sse, NULL, \ - NULL); \ - if (w > wf) { \ - unsigned int sse2; \ - int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 16, src_stride, \ - x_offset, y_offset, \ - dst + 16, dst_stride, \ - sec + 16, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - if (w > wf * 2) { \ - se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 32, src_stride, \ - x_offset, y_offset, \ - dst + 32, dst_stride, \ - sec + 32, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt(src + 48, src_stride, \ - x_offset, y_offset, \ - dst + 48, dst_stride, \ - sec + 48, w, h, &sse2, \ - NULL, NULL); \ - se += se2; \ - sse += sse2; \ - } \ - } \ - *sseptr = sse; \ - return sse - ((cast se * se) >> (wlog2 + hlog2)); \ -} +#define FN(w, h, wf, wlog2, hlog2, opt, cast_prod, cast) \ + unsigned int vpx_sub_pixel_avg_variance##w##x##h##_##opt( \ + const uint8_t *src, int src_stride, int x_offset, int y_offset, \ + const uint8_t *dst, int dst_stride, unsigned int *sseptr, \ + const uint8_t *sec) { \ + unsigned int sse; \ + int se = vpx_sub_pixel_avg_variance##wf##xh_##opt( \ + src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \ + NULL, NULL); \ + if (w > wf) { \ + unsigned int sse2; \ + int se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \ + sec + 16, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + if (w > wf * 2) { \ + se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \ + sec + 32, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + se2 = vpx_sub_pixel_avg_variance##wf##xh_##opt( \ + src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \ + sec + 48, w, h, &sse2, NULL, NULL); \ + se += se2; \ + sse += sse2; \ + } \ + } \ + *sseptr = sse; \ + return sse - (unsigned int)(cast_prod(cast se * se) >> (wlog2 + hlog2)); \ + } -#define FNS(opt1, opt2) \ -FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ -FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ -FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ -FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ -FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ -FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ -FN(16, 16, 16, 4, 4, opt1, (uint32_t)); \ -FN(16, 8, 16, 4, 3, opt1, (uint32_t)); \ -FN(8, 16, 8, 3, 4, opt1, (uint32_t)); \ -FN(8, 8, 8, 3, 3, opt1, (uint32_t)); \ -FN(8, 4, 8, 3, 2, opt1, (uint32_t)); \ -FN(4, 8, 4, 2, 3, opt2, (uint32_t)); \ -FN(4, 4, 4, 2, 2, opt2, (uint32_t)) +#define FNS(opt1, opt2) \ + FN(64, 64, 16, 6, 6, opt1, (int64_t), (int64_t)); \ + FN(64, 32, 16, 6, 5, opt1, (int64_t), (int64_t)); \ + FN(32, 64, 16, 5, 6, opt1, (int64_t), (int64_t)); \ + FN(32, 32, 16, 5, 5, opt1, (int64_t), (int64_t)); \ + FN(32, 16, 16, 5, 4, opt1, (int64_t), (int64_t)); \ + FN(16, 32, 16, 4, 5, opt1, (int64_t), (int64_t)); \ + FN(16, 16, 16, 4, 4, opt1, (uint32_t), (int64_t)); \ + FN(16, 8, 16, 4, 3, opt1, (uint32_t), (int32_t)); \ + FN(8, 16, 8, 3, 4, opt1, (uint32_t), (int32_t)); \ + FN(8, 8, 8, 3, 3, opt1, (uint32_t), (int32_t)); \ + FN(8, 4, 8, 3, 2, opt1, (uint32_t), (int32_t)); \ + FN(4, 8, 4, 2, 3, opt1, (uint32_t), (int32_t)); \ + FN(4, 4, 4, 2, 2, opt1, (uint32_t), (int32_t)) FNS(sse2, sse); FNS(ssse3, ssse3); #undef FNS #undef FN -#endif // CONFIG_USE_X86INC diff --git a/libs/libvpx/vpx_dsp/x86/vpx_asm_stubs.c b/libs/libvpx/vpx_dsp/x86/vpx_asm_stubs.c index 422b0fc422..727d9d1156 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_asm_stubs.c +++ b/libs/libvpx/vpx_dsp/x86/vpx_asm_stubs.c @@ -75,7 +75,7 @@ FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2); // const int16_t *filter_y, int y_step_q4, // int w, int h); FUN_CONV_2D(, sse2); -FUN_CONV_2D(avg_ , sse2); +FUN_CONV_2D(avg_, sse2); #if CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64 highbd_filter8_1dfunction vpx_highbd_filter_block1d16_v8_sse2; @@ -157,6 +157,6 @@ HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, // const int16_t *filter_y, int y_step_q4, // int w, int h, int bd); HIGH_FUN_CONV_2D(, sse2); -HIGH_FUN_CONV_2D(avg_ , sse2); +HIGH_FUN_CONV_2D(avg_, sse2); #endif // CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64 #endif // HAVE_SSE2 diff --git a/libs/libvpx/vpx_dsp/x86/vpx_convolve_copy_sse2.asm b/libs/libvpx/vpx_dsp/x86/vpx_convolve_copy_sse2.asm index abc0270655..e2311c1167 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_convolve_copy_sse2.asm +++ b/libs/libvpx/vpx_dsp/x86/vpx_convolve_copy_sse2.asm @@ -87,7 +87,7 @@ cglobal convolve_%1, 4, 7, 4+AUX_XMM_REGS, src, src_stride, \ RET %endif -.w64 +.w64: mov r4d, dword hm .loop64: movu m0, [srcq] diff --git a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c index b718678537..7c1ecc0148 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c +++ b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c @@ -8,10 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// Due to a header conflict between math.h and intrinsics includes with ceil() -// in certain configurations under vs9 this include needs to precede -// immintrin.h. - #include #include "./vpx_dsp_rtcd.h" @@ -40,35 +36,32 @@ DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = { }; #if defined(__clang__) -# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ <= 3) || \ - (defined(__APPLE__) && \ - ((__clang_major__ == 4 && __clang_minor__ <= 2) || \ - (__clang_major__ == 5 && __clang_minor__ == 0))) - -# define MM256_BROADCASTSI128_SI256(x) \ - _mm_broadcastsi128_si256((__m128i const *)&(x)) -# else // clang > 3.3, and not 5.0 on macosx. -# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) -# endif // clang <= 3.3 +#if (__clang_major__ > 0 && __clang_major__ < 3) || \ + (__clang_major__ == 3 && __clang_minor__ <= 3) || \ + (defined(__APPLE__) && defined(__apple_build_version__) && \ + ((__clang_major__ == 4 && __clang_minor__ <= 2) || \ + (__clang_major__ == 5 && __clang_minor__ == 0))) +#define MM256_BROADCASTSI128_SI256(x) \ + _mm_broadcastsi128_si256((__m128i const *)&(x)) +#else // clang > 3.3, and not 5.0 on macosx. +#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +#endif // clang <= 3.3 #elif defined(__GNUC__) -# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6) -# define MM256_BROADCASTSI128_SI256(x) \ - _mm_broadcastsi128_si256((__m128i const *)&(x)) -# elif __GNUC__ == 4 && __GNUC_MINOR__ == 7 -# define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x) -# else // gcc > 4.7 -# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) -# endif // gcc <= 4.6 -#else // !(gcc || clang) -# define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6) +#define MM256_BROADCASTSI128_SI256(x) \ + _mm_broadcastsi128_si256((__m128i const *)&(x)) +#elif __GNUC__ == 4 && __GNUC_MINOR__ == 7 +#define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x) +#else // gcc > 4.7 +#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) +#endif // gcc <= 4.6 +#else // !(gcc || clang) +#define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) #endif // __clang__ -static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, - ptrdiff_t src_pixels_per_line, - uint8_t *output_ptr, - ptrdiff_t output_pitch, - uint32_t output_height, - const int16_t *filter) { +static void vpx_filter_block1d16_h8_avx2( + const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, + ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i filtersReg; __m256i addFilterReg64, filt1Reg, filt2Reg, filt3Reg, filt4Reg; __m256i firstFilters, secondFilters, thirdFilters, forthFilters; @@ -82,26 +75,22 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, filtersReg = _mm_loadu_si128((const __m128i *)filter); // converting the 16 bit (short) to 8 bit (byte) and have the same data // in both lanes of 128 bit register. - filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + filtersReg = _mm_packs_epi16(filtersReg, filtersReg); // have the same data in both lanes of a 256 bit register filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg); // duplicate only the first 16 bits (first and second byte) // across 256 bit register - firstFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x100u)); + firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u)); // duplicate only the second 16 bits (third and forth byte) // across 256 bit register - secondFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x302u)); + secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u)); // duplicate only the third 16 bits (fifth and sixth byte) // across 256 bit register - thirdFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x504u)); + thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u)); // duplicate only the forth 16 bits (seventh and eighth byte) // across 256 bit register - forthFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x706u)); + forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u)); filt1Reg = _mm256_load_si256((__m256i const *)filt1_global_avx2); filt2Reg = _mm256_load_si256((__m256i const *)filt2_global_avx2); @@ -111,17 +100,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, // multiple the size of the source and destination stride by two src_stride = src_pixels_per_line << 1; dst_stride = output_pitch << 1; - for (i = output_height; i > 1; i-=2) { + for (i = output_height; i > 1; i -= 2) { // load the 2 strides of source - srcReg32b1 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr - 3))); - srcReg32b1 = _mm256_inserti128_si256(srcReg32b1, - _mm_loadu_si128((const __m128i *) - (src_ptr+src_pixels_per_line-3)), 1); + srcReg32b1 = + _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr - 3))); + srcReg32b1 = _mm256_inserti128_si256( + srcReg32b1, + _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line - 3)), + 1); // filter the source buffer - srcRegFilt32b1_1= _mm256_shuffle_epi8(srcReg32b1, filt1Reg); - srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt4Reg); + srcRegFilt32b1_1 = _mm256_shuffle_epi8(srcReg32b1, filt1Reg); + srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt4Reg); // multiply 2 adjacent elements with the filter and add the result srcRegFilt32b1_1 = _mm256_maddubs_epi16(srcRegFilt32b1_1, firstFilters); @@ -131,28 +121,29 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, srcRegFilt32b2); // filter the source buffer - srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b1, filt2Reg); - srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b1, filt3Reg); + srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b1, filt2Reg); + srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt3Reg); // multiply 2 adjacent elements with the filter and add the result srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters); srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters); // add and saturate the results together - srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, - _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); + srcRegFilt32b1_1 = _mm256_adds_epi16( + srcRegFilt32b1_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); // reading 2 strides of the next 16 bytes // (part of it was being read by earlier read) - srcReg32b2 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + 5))); - srcReg32b2 = _mm256_inserti128_si256(srcReg32b2, - _mm_loadu_si128((const __m128i *) - (src_ptr+src_pixels_per_line+5)), 1); + srcReg32b2 = + _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr + 5))); + srcReg32b2 = _mm256_inserti128_si256( + srcReg32b2, + _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line + 5)), + 1); // add and saturate the results together - srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, - _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); + srcRegFilt32b1_1 = _mm256_adds_epi16( + srcRegFilt32b1_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); // filter the source buffer srcRegFilt32b2_1 = _mm256_shuffle_epi8(srcReg32b2, filt1Reg); @@ -166,19 +157,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, srcRegFilt32b2); // filter the source buffer - srcRegFilt32b3= _mm256_shuffle_epi8(srcReg32b2, filt2Reg); - srcRegFilt32b2= _mm256_shuffle_epi8(srcReg32b2, filt3Reg); + srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b2, filt2Reg); + srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b2, filt3Reg); // multiply 2 adjacent elements with the filter and add the result srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters); srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters); // add and saturate the results together - srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, - _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); - srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, - _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); - + srcRegFilt32b2_1 = _mm256_adds_epi16( + srcRegFilt32b2_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2)); + srcRegFilt32b2_1 = _mm256_adds_epi16( + srcRegFilt32b2_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2)); srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, addFilterReg64); @@ -191,19 +181,18 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, // shrink to 8 bit each 16 bits, the first lane contain the first // convolve result and the second lane contain the second convolve // result - srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1, - srcRegFilt32b2_1); + srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1, srcRegFilt32b2_1); - src_ptr+=src_stride; + src_ptr += src_stride; // save 16 bytes - _mm_store_si128((__m128i*)output_ptr, - _mm256_castsi256_si128(srcRegFilt32b1_1)); + _mm_store_si128((__m128i *)output_ptr, + _mm256_castsi256_si128(srcRegFilt32b1_1)); // save the next 16 bits - _mm_store_si128((__m128i*)(output_ptr+output_pitch), - _mm256_extractf128_si256(srcRegFilt32b1_1, 1)); - output_ptr+=dst_stride; + _mm_store_si128((__m128i *)(output_ptr + output_pitch), + _mm256_extractf128_si256(srcRegFilt32b1_1, 1)); + output_ptr += dst_stride; } // if the number of strides is odd. @@ -215,83 +204,74 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, srcReg1 = _mm_loadu_si128((const __m128i *)(src_ptr - 3)); // filter the source buffer - srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1, - _mm256_castsi256_si128(filt1Reg)); - srcRegFilt2 = _mm_shuffle_epi8(srcReg1, - _mm256_castsi256_si128(filt4Reg)); + srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt1Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt4Reg)); // multiply 2 adjacent elements with the filter and add the result - srcRegFilt1_1 = _mm_maddubs_epi16(srcRegFilt1_1, - _mm256_castsi256_si128(firstFilters)); - srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, - _mm256_castsi256_si128(forthFilters)); + srcRegFilt1_1 = + _mm_maddubs_epi16(srcRegFilt1_1, _mm256_castsi256_si128(firstFilters)); + srcRegFilt2 = + _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters)); // add and saturate the results together srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, srcRegFilt2); // filter the source buffer - srcRegFilt3= _mm_shuffle_epi8(srcReg1, - _mm256_castsi256_si128(filt2Reg)); - srcRegFilt2= _mm_shuffle_epi8(srcReg1, - _mm256_castsi256_si128(filt3Reg)); + srcRegFilt3 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt2Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt3Reg)); // multiply 2 adjacent elements with the filter and add the result - srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, - _mm256_castsi256_si128(secondFilters)); - srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, - _mm256_castsi256_si128(thirdFilters)); + srcRegFilt3 = + _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters)); + srcRegFilt2 = + _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters)); // add and saturate the results together - srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, - _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt1_1 = + _mm_adds_epi16(srcRegFilt1_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2)); // reading the next 16 bytes // (part of it was being read by earlier read) srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + 5)); // add and saturate the results together - srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, - _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt1_1 = + _mm_adds_epi16(srcRegFilt1_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2)); // filter the source buffer - srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2, - _mm256_castsi256_si128(filt1Reg)); - srcRegFilt2 = _mm_shuffle_epi8(srcReg2, - _mm256_castsi256_si128(filt4Reg)); + srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt1Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt4Reg)); // multiply 2 adjacent elements with the filter and add the result - srcRegFilt2_1 = _mm_maddubs_epi16(srcRegFilt2_1, - _mm256_castsi256_si128(firstFilters)); - srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, - _mm256_castsi256_si128(forthFilters)); + srcRegFilt2_1 = + _mm_maddubs_epi16(srcRegFilt2_1, _mm256_castsi256_si128(firstFilters)); + srcRegFilt2 = + _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters)); // add and saturate the results together srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, srcRegFilt2); // filter the source buffer - srcRegFilt3 = _mm_shuffle_epi8(srcReg2, - _mm256_castsi256_si128(filt2Reg)); - srcRegFilt2 = _mm_shuffle_epi8(srcReg2, - _mm256_castsi256_si128(filt3Reg)); + srcRegFilt3 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt2Reg)); + srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt3Reg)); // multiply 2 adjacent elements with the filter and add the result - srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, - _mm256_castsi256_si128(secondFilters)); - srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, - _mm256_castsi256_si128(thirdFilters)); + srcRegFilt3 = + _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters)); + srcRegFilt2 = + _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters)); // add and saturate the results together - srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, - _mm_min_epi16(srcRegFilt3, srcRegFilt2)); - srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, - _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt2_1 = + _mm_adds_epi16(srcRegFilt2_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt2_1 = + _mm_adds_epi16(srcRegFilt2_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2)); + srcRegFilt1_1 = + _mm_adds_epi16(srcRegFilt1_1, _mm256_castsi256_si128(addFilterReg64)); - srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, - _mm256_castsi256_si128(addFilterReg64)); - - srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, - _mm256_castsi256_si128(addFilterReg64)); + srcRegFilt2_1 = + _mm_adds_epi16(srcRegFilt2_1, _mm256_castsi256_si128(addFilterReg64)); // shift by 7 bit each 16 bit srcRegFilt1_1 = _mm_srai_epi16(srcRegFilt1_1, 7); @@ -303,16 +283,13 @@ static void vpx_filter_block1d16_h8_avx2(const uint8_t *src_ptr, srcRegFilt1_1 = _mm_packus_epi16(srcRegFilt1_1, srcRegFilt2_1); // save 16 bytes - _mm_store_si128((__m128i*)output_ptr, srcRegFilt1_1); + _mm_store_si128((__m128i *)output_ptr, srcRegFilt1_1); } } -static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr, - ptrdiff_t src_pitch, - uint8_t *output_ptr, - ptrdiff_t out_pitch, - uint32_t output_height, - const int16_t *filter) { +static void vpx_filter_block1d16_v8_avx2( + const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr, + ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) { __m128i filtersReg; __m256i addFilterReg64; __m256i srcReg32b1, srcReg32b2, srcReg32b3, srcReg32b4, srcReg32b5; @@ -327,60 +304,56 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr, filtersReg = _mm_loadu_si128((const __m128i *)filter); // converting the 16 bit (short) to 8 bit (byte) and have the // same data in both lanes of 128 bit register. - filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + filtersReg = _mm_packs_epi16(filtersReg, filtersReg); // have the same data in both lanes of a 256 bit register filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg); // duplicate only the first 16 bits (first and second byte) // across 256 bit register - firstFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x100u)); + firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u)); // duplicate only the second 16 bits (third and forth byte) // across 256 bit register - secondFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x302u)); + secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u)); // duplicate only the third 16 bits (fifth and sixth byte) // across 256 bit register - thirdFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x504u)); + thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u)); // duplicate only the forth 16 bits (seventh and eighth byte) // across 256 bit register - forthFilters = _mm256_shuffle_epi8(filtersReg32, - _mm256_set1_epi16(0x706u)); + forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u)); // multiple the size of the source and destination stride by two src_stride = src_pitch << 1; dst_stride = out_pitch << 1; // load 16 bytes 7 times in stride of src_pitch - srcReg32b1 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr))); + srcReg32b1 = + _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr))); srcReg32b2 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch))); srcReg32b3 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2))); srcReg32b4 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3))); srcReg32b5 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4))); srcReg32b6 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5))); srcReg32b7 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6))); + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6))); // have each consecutive loads on the same 256 register srcReg32b1 = _mm256_inserti128_si256(srcReg32b1, - _mm256_castsi256_si128(srcReg32b2), 1); + _mm256_castsi256_si128(srcReg32b2), 1); srcReg32b2 = _mm256_inserti128_si256(srcReg32b2, - _mm256_castsi256_si128(srcReg32b3), 1); + _mm256_castsi256_si128(srcReg32b3), 1); srcReg32b3 = _mm256_inserti128_si256(srcReg32b3, - _mm256_castsi256_si128(srcReg32b4), 1); + _mm256_castsi256_si128(srcReg32b4), 1); srcReg32b4 = _mm256_inserti128_si256(srcReg32b4, - _mm256_castsi256_si128(srcReg32b5), 1); + _mm256_castsi256_si128(srcReg32b5), 1); srcReg32b5 = _mm256_inserti128_si256(srcReg32b5, - _mm256_castsi256_si128(srcReg32b6), 1); + _mm256_castsi256_si128(srcReg32b6), 1); srcReg32b6 = _mm256_inserti128_si256(srcReg32b6, - _mm256_castsi256_si128(srcReg32b7), 1); + _mm256_castsi256_si128(srcReg32b7), 1); // merge every two consecutive registers except the last one srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2); @@ -398,89 +371,87 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr, // save srcReg32b5 = _mm256_unpackhi_epi8(srcReg32b5, srcReg32b6); + for (i = output_height; i > 1; i -= 2) { + // load the last 2 loads of 16 bytes and have every two + // consecutive loads in the same 256 bit register + srcReg32b8 = _mm256_castsi128_si256( + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7))); + srcReg32b7 = _mm256_inserti128_si256(srcReg32b7, + _mm256_castsi256_si128(srcReg32b8), 1); + srcReg32b9 = _mm256_castsi128_si256( + _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 8))); + srcReg32b8 = _mm256_inserti128_si256(srcReg32b8, + _mm256_castsi256_si128(srcReg32b9), 1); - for (i = output_height; i > 1; i-=2) { - // load the last 2 loads of 16 bytes and have every two - // consecutive loads in the same 256 bit register - srcReg32b8 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7))); - srcReg32b7 = _mm256_inserti128_si256(srcReg32b7, - _mm256_castsi256_si128(srcReg32b8), 1); - srcReg32b9 = _mm256_castsi128_si256( - _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 8))); - srcReg32b8 = _mm256_inserti128_si256(srcReg32b8, - _mm256_castsi256_si128(srcReg32b9), 1); + // merge every two consecutive registers + // save + srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8); + srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8); - // merge every two consecutive registers - // save - srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8); - srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8); + // multiply 2 adjacent elements with the filter and add the result + srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters); + srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters); - // multiply 2 adjacent elements with the filter and add the result - srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters); - srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters); + // add and saturate the results together + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6); - // add and saturate the results together - srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6); + // multiply 2 adjacent elements with the filter and add the result + srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters); + srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters); - // multiply 2 adjacent elements with the filter and add the result - srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters); - srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters); + // add and saturate the results together + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, + _mm256_min_epi16(srcReg32b8, srcReg32b12)); + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, + _mm256_max_epi16(srcReg32b8, srcReg32b12)); - // add and saturate the results together - srcReg32b10 = _mm256_adds_epi16(srcReg32b10, - _mm256_min_epi16(srcReg32b8, srcReg32b12)); - srcReg32b10 = _mm256_adds_epi16(srcReg32b10, - _mm256_max_epi16(srcReg32b8, srcReg32b12)); + // multiply 2 adjacent elements with the filter and add the result + srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters); + srcReg32b6 = _mm256_maddubs_epi16(srcReg32b7, forthFilters); - // multiply 2 adjacent elements with the filter and add the result - srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters); - srcReg32b6 = _mm256_maddubs_epi16(srcReg32b7, forthFilters); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b6); - srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b6); + // multiply 2 adjacent elements with the filter and add the result + srcReg32b8 = _mm256_maddubs_epi16(srcReg32b3, secondFilters); + srcReg32b12 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters); - // multiply 2 adjacent elements with the filter and add the result - srcReg32b8 = _mm256_maddubs_epi16(srcReg32b3, secondFilters); - srcReg32b12 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters); + // add and saturate the results together + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, + _mm256_min_epi16(srcReg32b8, srcReg32b12)); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, + _mm256_max_epi16(srcReg32b8, srcReg32b12)); - // add and saturate the results together - srcReg32b1 = _mm256_adds_epi16(srcReg32b1, - _mm256_min_epi16(srcReg32b8, srcReg32b12)); - srcReg32b1 = _mm256_adds_epi16(srcReg32b1, - _mm256_max_epi16(srcReg32b8, srcReg32b12)); + srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64); + srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64); - srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64); - srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64); + // shift by 7 bit each 16 bit + srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7); + srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7); - // shift by 7 bit each 16 bit - srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7); - srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7); + // shrink to 8 bit each 16 bits, the first lane contain the first + // convolve result and the second lane contain the second convolve + // result + srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1); - // shrink to 8 bit each 16 bits, the first lane contain the first - // convolve result and the second lane contain the second convolve - // result - srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1); + src_ptr += src_stride; - src_ptr+=src_stride; + // save 16 bytes + _mm_store_si128((__m128i *)output_ptr, _mm256_castsi256_si128(srcReg32b1)); - // save 16 bytes - _mm_store_si128((__m128i*)output_ptr, - _mm256_castsi256_si128(srcReg32b1)); + // save the next 16 bits + _mm_store_si128((__m128i *)(output_ptr + out_pitch), + _mm256_extractf128_si256(srcReg32b1, 1)); - // save the next 16 bits - _mm_store_si128((__m128i*)(output_ptr+out_pitch), - _mm256_extractf128_si256(srcReg32b1, 1)); + output_ptr += dst_stride; - output_ptr+=dst_stride; - - // save part of the registers for next strides - srcReg32b10 = srcReg32b11; - srcReg32b1 = srcReg32b3; - srcReg32b11 = srcReg32b2; - srcReg32b3 = srcReg32b5; - srcReg32b2 = srcReg32b4; - srcReg32b5 = srcReg32b7; - srcReg32b7 = srcReg32b9; + // save part of the registers for next strides + srcReg32b10 = srcReg32b11; + srcReg32b1 = srcReg32b3; + srcReg32b11 = srcReg32b2; + srcReg32b3 = srcReg32b5; + srcReg32b2 = srcReg32b4; + srcReg32b5 = srcReg32b7; + srcReg32b7 = srcReg32b9; } if (i > 0) { __m128i srcRegFilt1, srcRegFilt3, srcRegFilt4, srcRegFilt5; @@ -489,55 +460,53 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr, srcRegFilt8 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7)); // merge the last 2 results together - srcRegFilt4 = _mm_unpacklo_epi8( - _mm256_castsi256_si128(srcReg32b7), srcRegFilt8); - srcRegFilt7 = _mm_unpackhi_epi8( - _mm256_castsi256_si128(srcReg32b7), srcRegFilt8); + srcRegFilt4 = + _mm_unpacklo_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8); + srcRegFilt7 = + _mm_unpackhi_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8); // multiply 2 adjacent elements with the filter and add the result srcRegFilt1 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b10), - _mm256_castsi256_si128(firstFilters)); - srcRegFilt4 = _mm_maddubs_epi16(srcRegFilt4, - _mm256_castsi256_si128(forthFilters)); + _mm256_castsi256_si128(firstFilters)); + srcRegFilt4 = + _mm_maddubs_epi16(srcRegFilt4, _mm256_castsi256_si128(forthFilters)); srcRegFilt3 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b1), - _mm256_castsi256_si128(firstFilters)); - srcRegFilt7 = _mm_maddubs_epi16(srcRegFilt7, - _mm256_castsi256_si128(forthFilters)); + _mm256_castsi256_si128(firstFilters)); + srcRegFilt7 = + _mm_maddubs_epi16(srcRegFilt7, _mm256_castsi256_si128(forthFilters)); // add and saturate the results together srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4); srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, srcRegFilt7); - // multiply 2 adjacent elements with the filter and add the result srcRegFilt4 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b11), - _mm256_castsi256_si128(secondFilters)); + _mm256_castsi256_si128(secondFilters)); srcRegFilt5 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b3), - _mm256_castsi256_si128(secondFilters)); + _mm256_castsi256_si128(secondFilters)); // multiply 2 adjacent elements with the filter and add the result srcRegFilt6 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b2), - _mm256_castsi256_si128(thirdFilters)); + _mm256_castsi256_si128(thirdFilters)); srcRegFilt7 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b5), - _mm256_castsi256_si128(thirdFilters)); + _mm256_castsi256_si128(thirdFilters)); // add and saturate the results together - srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, - _mm_min_epi16(srcRegFilt4, srcRegFilt6)); - srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, - _mm_min_epi16(srcRegFilt5, srcRegFilt7)); + srcRegFilt1 = + _mm_adds_epi16(srcRegFilt1, _mm_min_epi16(srcRegFilt4, srcRegFilt6)); + srcRegFilt3 = + _mm_adds_epi16(srcRegFilt3, _mm_min_epi16(srcRegFilt5, srcRegFilt7)); // add and saturate the results together - srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, - _mm_max_epi16(srcRegFilt4, srcRegFilt6)); - srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, - _mm_max_epi16(srcRegFilt5, srcRegFilt7)); + srcRegFilt1 = + _mm_adds_epi16(srcRegFilt1, _mm_max_epi16(srcRegFilt4, srcRegFilt6)); + srcRegFilt3 = + _mm_adds_epi16(srcRegFilt3, _mm_max_epi16(srcRegFilt5, srcRegFilt7)); - - srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, - _mm256_castsi256_si128(addFilterReg64)); - srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, - _mm256_castsi256_si128(addFilterReg64)); + srcRegFilt1 = + _mm_adds_epi16(srcRegFilt1, _mm256_castsi256_si128(addFilterReg64)); + srcRegFilt3 = + _mm_adds_epi16(srcRegFilt3, _mm256_castsi256_si128(addFilterReg64)); // shift by 7 bit each 16 bit srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7); @@ -549,7 +518,7 @@ static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr, srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt3); // save 16 bytes - _mm_store_si128((__m128i*)output_ptr, srcRegFilt1); + _mm_store_si128((__m128i *)output_ptr, srcRegFilt1); } } @@ -579,10 +548,10 @@ filter8_1dfunction vpx_filter_block1d4_h2_ssse3; #define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3 #define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3 #define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3 -#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3 -#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3 -#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3 -#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3 +#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3 +#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3 +#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3 +#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3 // void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, diff --git a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c index 6fd52087c7..b26d97b455 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c +++ b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c @@ -8,10 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ -// Due to a header conflict between math.h and intrinsics includes with ceil() -// in certain configurations under vs9 this include needs to precede -// tmmintrin.h. - #include #include "./vpx_dsp_rtcd.h" @@ -52,23 +48,20 @@ filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3; filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3; filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3; -void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr, - ptrdiff_t src_pixels_per_line, - uint8_t *output_ptr, - ptrdiff_t output_pitch, - uint32_t output_height, - const int16_t *filter) { +void vpx_filter_block1d4_h8_intrin_ssse3( + const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, + ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i firstFilters, secondFilters, shuffle1, shuffle2; __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4; __m128i addFilterReg64, filtersReg, srcReg, minReg; unsigned int i; // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64 - addFilterReg64 =_mm_set1_epi32((int)0x0400040u); + addFilterReg64 = _mm_set1_epi32((int)0x0400040u); filtersReg = _mm_loadu_si128((const __m128i *)filter); // converting the 16 bit (short) to 8 bit (byte) and have the same data // in both lanes of 128 bit register. - filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + filtersReg = _mm_packs_epi16(filtersReg, filtersReg); // duplicate only the first 16 bits in the filter into the first lane firstFilters = _mm_shufflelo_epi16(filtersReg, 0); @@ -82,23 +75,23 @@ void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr, secondFilters = _mm_shufflehi_epi16(secondFilters, 0xFFu); // loading the local filters - shuffle1 =_mm_load_si128((__m128i const *)filt1_4_h8); + shuffle1 = _mm_load_si128((__m128i const *)filt1_4_h8); shuffle2 = _mm_load_si128((__m128i const *)filt2_4_h8); for (i = 0; i < output_height; i++) { srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3)); // filter the source buffer - srcRegFilt1= _mm_shuffle_epi8(srcReg, shuffle1); - srcRegFilt2= _mm_shuffle_epi8(srcReg, shuffle2); + srcRegFilt1 = _mm_shuffle_epi8(srcReg, shuffle1); + srcRegFilt2 = _mm_shuffle_epi8(srcReg, shuffle2); // multiply 2 adjacent elements with the filter and add the result srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); // extract the higher half of the lane - srcRegFilt3 = _mm_srli_si128(srcRegFilt1, 8); - srcRegFilt4 = _mm_srli_si128(srcRegFilt2, 8); + srcRegFilt3 = _mm_srli_si128(srcRegFilt1, 8); + srcRegFilt4 = _mm_srli_si128(srcRegFilt2, 8); minReg = _mm_min_epi16(srcRegFilt3, srcRegFilt2); @@ -114,21 +107,18 @@ void vpx_filter_block1d4_h8_intrin_ssse3(const uint8_t *src_ptr, // shrink to 8 bit each 16 bits srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); - src_ptr+=src_pixels_per_line; + src_ptr += src_pixels_per_line; // save only 4 bytes - *((int*)&output_ptr[0])= _mm_cvtsi128_si32(srcRegFilt1); + *((int *)&output_ptr[0]) = _mm_cvtsi128_si32(srcRegFilt1); - output_ptr+=output_pitch; + output_ptr += output_pitch; } } -void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr, - ptrdiff_t src_pixels_per_line, - uint8_t *output_ptr, - ptrdiff_t output_pitch, - uint32_t output_height, - const int16_t *filter) { +void vpx_filter_block1d8_h8_intrin_ssse3( + const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, + ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg; __m128i filt1Reg, filt2Reg, filt3Reg, filt4Reg; __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt4; @@ -140,7 +130,7 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr, filtersReg = _mm_loadu_si128((const __m128i *)filter); // converting the 16 bit (short) to 8 bit (byte) and have the same data // in both lanes of 128 bit register. - filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + filtersReg = _mm_packs_epi16(filtersReg, filtersReg); // duplicate only the first 16 bits (first and second byte) // across 128 bit register @@ -164,16 +154,16 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr, srcReg = _mm_loadu_si128((const __m128i *)(src_ptr - 3)); // filter the source buffer - srcRegFilt1= _mm_shuffle_epi8(srcReg, filt1Reg); - srcRegFilt2= _mm_shuffle_epi8(srcReg, filt2Reg); + srcRegFilt1 = _mm_shuffle_epi8(srcReg, filt1Reg); + srcRegFilt2 = _mm_shuffle_epi8(srcReg, filt2Reg); // multiply 2 adjacent elements with the filter and add the result srcRegFilt1 = _mm_maddubs_epi16(srcRegFilt1, firstFilters); srcRegFilt2 = _mm_maddubs_epi16(srcRegFilt2, secondFilters); // filter the source buffer - srcRegFilt3= _mm_shuffle_epi8(srcReg, filt3Reg); - srcRegFilt4= _mm_shuffle_epi8(srcReg, filt4Reg); + srcRegFilt3 = _mm_shuffle_epi8(srcReg, filt3Reg); + srcRegFilt4 = _mm_shuffle_epi8(srcReg, filt4Reg); // multiply 2 adjacent elements with the filter and add the result srcRegFilt3 = _mm_maddubs_epi16(srcRegFilt3, thirdFilters); @@ -183,7 +173,7 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr, minReg = _mm_min_epi16(srcRegFilt2, srcRegFilt3); srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4); - srcRegFilt2= _mm_max_epi16(srcRegFilt2, srcRegFilt3); + srcRegFilt2 = _mm_max_epi16(srcRegFilt2, srcRegFilt3); srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, minReg); srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt2); srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, addFilterReg64); @@ -194,21 +184,18 @@ void vpx_filter_block1d8_h8_intrin_ssse3(const uint8_t *src_ptr, // shrink to 8 bit each 16 bits srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); - src_ptr+=src_pixels_per_line; + src_ptr += src_pixels_per_line; // save only 8 bytes - _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1); + _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1); - output_ptr+=output_pitch; + output_ptr += output_pitch; } } -void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr, - ptrdiff_t src_pitch, - uint8_t *output_ptr, - ptrdiff_t out_pitch, - uint32_t output_height, - const int16_t *filter) { +void vpx_filter_block1d8_v8_intrin_ssse3( + const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr, + ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) { __m128i addFilterReg64, filtersReg, minReg; __m128i firstFilters, secondFilters, thirdFilters, forthFilters; __m128i srcRegFilt1, srcRegFilt2, srcRegFilt3, srcRegFilt5; @@ -221,7 +208,7 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr, filtersReg = _mm_loadu_si128((const __m128i *)filter); // converting the 16 bit (short) to 8 bit (byte) and have the same data // in both lanes of 128 bit register. - filtersReg =_mm_packs_epi16(filtersReg, filtersReg); + filtersReg = _mm_packs_epi16(filtersReg, filtersReg); // duplicate only the first 16 bits in the filter firstFilters = _mm_shuffle_epi8(filtersReg, _mm_set1_epi16(0x100u)); @@ -273,7 +260,7 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr, // shrink to 8 bit each 16 bits srcRegFilt1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt1); - src_ptr+=src_pitch; + src_ptr += src_pitch; // shift down a row srcReg1 = srcReg2; @@ -285,9 +272,9 @@ void vpx_filter_block1d8_v8_intrin_ssse3(const uint8_t *src_ptr, srcReg7 = srcReg8; // save only 8 bytes convolve result - _mm_storel_epi64((__m128i*)&output_ptr[0], srcRegFilt1); + _mm_storel_epi64((__m128i *)&output_ptr[0], srcRegFilt1); - output_ptr+=out_pitch; + output_ptr += out_pitch; } } @@ -343,32 +330,33 @@ FUN_CONV_1D(avg_horiz, x_step_q4, filter_x, h, src, avg_, ssse3); FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, ssse3); -#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) { \ - const __m128i tr0_0 = _mm_unpacklo_epi8(in0, in1); \ - const __m128i tr0_1 = _mm_unpacklo_epi8(in2, in3); \ - const __m128i tr0_2 = _mm_unpacklo_epi8(in4, in5); \ - const __m128i tr0_3 = _mm_unpacklo_epi8(in6, in7); \ - \ - const __m128i tr1_0 = _mm_unpacklo_epi16(tr0_0, tr0_1); \ - const __m128i tr1_1 = _mm_unpackhi_epi16(tr0_0, tr0_1); \ - const __m128i tr1_2 = _mm_unpacklo_epi16(tr0_2, tr0_3); \ - const __m128i tr1_3 = _mm_unpackhi_epi16(tr0_2, tr0_3); \ - \ - const __m128i tr2_0 = _mm_unpacklo_epi32(tr1_0, tr1_2); \ - const __m128i tr2_1 = _mm_unpackhi_epi32(tr1_0, tr1_2); \ - const __m128i tr2_2 = _mm_unpacklo_epi32(tr1_1, tr1_3); \ - const __m128i tr2_3 = _mm_unpackhi_epi32(tr1_1, tr1_3); \ - \ - out0 = _mm_unpacklo_epi64(tr2_0, tr2_0); \ - out1 = _mm_unpackhi_epi64(tr2_0, tr2_0); \ - out2 = _mm_unpacklo_epi64(tr2_1, tr2_1); \ - out3 = _mm_unpackhi_epi64(tr2_1, tr2_1); \ - out4 = _mm_unpacklo_epi64(tr2_2, tr2_2); \ - out5 = _mm_unpackhi_epi64(tr2_2, tr2_2); \ - out6 = _mm_unpacklo_epi64(tr2_3, tr2_3); \ - out7 = _mm_unpackhi_epi64(tr2_3, tr2_3); \ -} +#define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \ + out2, out3, out4, out5, out6, out7) \ + { \ + const __m128i tr0_0 = _mm_unpacklo_epi8(in0, in1); \ + const __m128i tr0_1 = _mm_unpacklo_epi8(in2, in3); \ + const __m128i tr0_2 = _mm_unpacklo_epi8(in4, in5); \ + const __m128i tr0_3 = _mm_unpacklo_epi8(in6, in7); \ + \ + const __m128i tr1_0 = _mm_unpacklo_epi16(tr0_0, tr0_1); \ + const __m128i tr1_1 = _mm_unpackhi_epi16(tr0_0, tr0_1); \ + const __m128i tr1_2 = _mm_unpacklo_epi16(tr0_2, tr0_3); \ + const __m128i tr1_3 = _mm_unpackhi_epi16(tr0_2, tr0_3); \ + \ + const __m128i tr2_0 = _mm_unpacklo_epi32(tr1_0, tr1_2); \ + const __m128i tr2_1 = _mm_unpackhi_epi32(tr1_0, tr1_2); \ + const __m128i tr2_2 = _mm_unpacklo_epi32(tr1_1, tr1_3); \ + const __m128i tr2_3 = _mm_unpackhi_epi32(tr1_1, tr1_3); \ + \ + out0 = _mm_unpacklo_epi64(tr2_0, tr2_0); \ + out1 = _mm_unpackhi_epi64(tr2_0, tr2_0); \ + out2 = _mm_unpacklo_epi64(tr2_1, tr2_1); \ + out3 = _mm_unpackhi_epi64(tr2_1, tr2_1); \ + out4 = _mm_unpacklo_epi64(tr2_2, tr2_2); \ + out5 = _mm_unpackhi_epi64(tr2_2, tr2_2); \ + out6 = _mm_unpacklo_epi64(tr2_3, tr2_3); \ + out7 = _mm_unpackhi_epi64(tr2_3, tr2_3); \ + } static void filter_horiz_w8_ssse3(const uint8_t *src_x, ptrdiff_t src_pitch, uint8_t *dst, const int16_t *x_filter) { @@ -424,7 +412,7 @@ static void filter_horiz_w8_ssse3(const uint8_t *src_x, ptrdiff_t src_pitch, // shrink to 8 bit each 16 bits temp = _mm_packus_epi16(temp, temp); // save only 8 bytes convolve result - _mm_storel_epi64((__m128i*)dst, temp); + _mm_storel_epi64((__m128i *)dst, temp); } static void transpose8x8_to_dst(const uint8_t *src, ptrdiff_t src_stride, @@ -440,23 +428,22 @@ static void transpose8x8_to_dst(const uint8_t *src, ptrdiff_t src_stride, G = _mm_loadl_epi64((const __m128i *)(src + src_stride * 6)); H = _mm_loadl_epi64((const __m128i *)(src + src_stride * 7)); - TRANSPOSE_8X8(A, B, C, D, E, F, G, H, - A, B, C, D, E, F, G, H); + TRANSPOSE_8X8(A, B, C, D, E, F, G, H, A, B, C, D, E, F, G, H); - _mm_storel_epi64((__m128i*)dst, A); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 1), B); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 2), C); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 3), D); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 4), E); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 5), F); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 6), G); - _mm_storel_epi64((__m128i*)(dst + dst_stride * 7), H); + _mm_storel_epi64((__m128i *)dst, A); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 1), B); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 2), C); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 3), D); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 4), E); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 5), F); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 6), G); + _mm_storel_epi64((__m128i *)(dst + dst_stride * 7), H); } static void scaledconvolve_horiz_w8(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, int w, int h) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h) { DECLARE_ALIGNED(16, uint8_t, temp[8 * 8]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; @@ -527,7 +514,7 @@ static void filter_horiz_w4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch, // 20 21 30 31 22 23 32 33 24 25 34 35 26 27 36 37 const __m128i tr0_1 = _mm_unpacklo_epi16(C, D); // 00 01 10 11 20 21 30 31 02 03 12 13 22 23 32 33 - const __m128i s1s0 = _mm_unpacklo_epi32(tr0_0, tr0_1); + const __m128i s1s0 = _mm_unpacklo_epi32(tr0_0, tr0_1); // 04 05 14 15 24 25 34 35 06 07 16 17 26 27 36 37 const __m128i s5s4 = _mm_unpackhi_epi32(tr0_0, tr0_1); // 02 03 12 13 22 23 32 33 @@ -569,16 +556,16 @@ static void transpose4x4_to_dst(const uint8_t *src, ptrdiff_t src_stride, C = _mm_srli_si128(A, 8); D = _mm_srli_si128(A, 12); - *(int *)(dst) = _mm_cvtsi128_si32(A); - *(int *)(dst + dst_stride) = _mm_cvtsi128_si32(B); - *(int *)(dst + dst_stride * 2) = _mm_cvtsi128_si32(C); - *(int *)(dst + dst_stride * 3) = _mm_cvtsi128_si32(D); + *(int *)(dst) = _mm_cvtsi128_si32(A); + *(int *)(dst + dst_stride) = _mm_cvtsi128_si32(B); + *(int *)(dst + dst_stride * 2) = _mm_cvtsi128_si32(C); + *(int *)(dst + dst_stride * 3) = _mm_cvtsi128_si32(D); } static void scaledconvolve_horiz_w4(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *x_filters, - int x0_q4, int x_step_q4, int w, int h) { + const InterpKernel *x_filters, int x0_q4, + int x_step_q4, int w, int h) { DECLARE_ALIGNED(16, uint8_t, temp[4 * 4]); int x, y, z; src -= SUBPEL_TAPS / 2 - 1; @@ -652,8 +639,8 @@ static void filter_vert_w4_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch, static void scaledconvolve_vert_w4(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h) { int y; int y_q4 = y0_q4; @@ -709,13 +696,13 @@ static void filter_vert_w8_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch, // shrink to 8 bit each 16 bits temp = _mm_packus_epi16(temp, temp); // save only 8 bytes convolve result - _mm_storel_epi64((__m128i*)dst, temp); + _mm_storel_epi64((__m128i *)dst, temp); } static void scaledconvolve_vert_w8(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h) { int y; int y_q4 = y0_q4; @@ -798,15 +785,15 @@ static void filter_vert_w16_ssse3(const uint8_t *src_ptr, ptrdiff_t src_pitch, // result temp_hi = _mm_packus_epi16(temp_lo, temp_hi); src_ptr += 16; - // save 16 bytes convolve result - _mm_store_si128((__m128i*)&dst[i], temp_hi); + // save 16 bytes convolve result + _mm_store_si128((__m128i *)&dst[i], temp_hi); } } static void scaledconvolve_vert_w16(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *y_filters, - int y0_q4, int y_step_q4, int w, int h) { + const InterpKernel *y_filters, int y0_q4, + int y_step_q4, int w, int h) { int y; int y_q4 = y0_q4; @@ -826,11 +813,9 @@ static void scaledconvolve_vert_w16(const uint8_t *src, ptrdiff_t src_stride, static void scaledconvolve2d(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, - const InterpKernel *const x_filters, - int x0_q4, int x_step_q4, - const InterpKernel *const y_filters, - int y0_q4, int y_step_q4, - int w, int h) { + const InterpKernel *const x_filters, int x0_q4, + int x_step_q4, const InterpKernel *const y_filters, + int y0_q4, int y_step_q4, int w, int h) { // Note: Fixed size intermediate buffer, temp, places limits on parameters. // 2d filtering proceeds in 2 steps: // (1) Interpolate horizontally into an intermediate buffer, temp. @@ -885,10 +870,9 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) { return (int)((const InterpKernel *)(intptr_t)f - base); } -void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, - uint8_t *dst, ptrdiff_t dst_stride, - const int16_t *filter_x, int x_step_q4, - const int16_t *filter_y, int y_step_q4, +void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, + ptrdiff_t dst_stride, const int16_t *filter_x, + int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { const InterpKernel *const filters_x = get_filter_base(filter_x); const int x0_q4 = get_filter_offset(filter_x, filters_x); @@ -896,9 +880,8 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, const InterpKernel *const filters_y = get_filter_base(filter_y); const int y0_q4 = get_filter_offset(filter_y, filters_y); - scaledconvolve2d(src, src_stride, dst, dst_stride, - filters_x, x0_q4, x_step_q4, - filters_y, y0_q4, y_step_q4, w, h); + scaledconvolve2d(src, src_stride, dst, dst_stride, filters_x, x0_q4, + x_step_q4, filters_y, y0_q4, y_step_q4, w, h); } // void vp9_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, @@ -912,4 +895,4 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, // const int16_t *filter_y, int y_step_q4, // int w, int h); FUN_CONV_2D(, ssse3); -FUN_CONV_2D(avg_ , ssse3); +FUN_CONV_2D(avg_, ssse3); diff --git a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm index 3fbaa274cd..c1a6f23abe 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm +++ b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm @@ -16,13 +16,14 @@ pw_64: times 8 dw 64 ; %define USE_PMULHRSW ; NOTE: pmulhrsw has a latency of 5 cycles. Tests showed a performance loss ; when using this instruction. +; +; The add order below (based on ffvp9) must be followed to prevent outranges. +; x = k0k1 + k4k5 +; y = k2k3 + k6k7 +; z = signed SAT(x + y) SECTION .text -%if ARCH_X86_64 - %define LOCAL_VARS_SIZE 16*4 -%else - %define LOCAL_VARS_SIZE 16*6 -%endif +%define LOCAL_VARS_SIZE 16*6 %macro SETUP_LOCAL_VARS 0 ; TODO(slavarnway): using xmm registers for these on ARCH_X86_64 + @@ -49,11 +50,11 @@ SECTION .text mova k6k7, m3 %if ARCH_X86_64 %define krd m12 - %define tmp m13 + %define tmp0 [rsp + 16*4] + %define tmp1 [rsp + 16*5] mova krd, [GLOBAL(pw_64)] %else - %define tmp [rsp + 16*4] - %define krd [rsp + 16*5] + %define krd [rsp + 16*4] %if CONFIG_PIC=0 mova m6, [GLOBAL(pw_64)] %else @@ -66,55 +67,31 @@ SECTION .text %endif %endm -%macro HORIZx4_ROW 2 - mova %2, %1 - punpcklbw %1, %1 - punpckhbw %2, %2 - - mova m3, %2 - palignr %2, %1, 1 - palignr m3, %1, 5 - - pmaddubsw %2, k0k1k4k5 - pmaddubsw m3, k2k3k6k7 - - mova m4, %2 - mova m5, m3 - psrldq %2, 8 - psrldq m3, 8 - mova m6, m5 - - paddsw m4, m3 - pmaxsw m5, %2 - pminsw %2, m6 - paddsw %2, m4 - paddsw %2, m5 - paddsw %2, krd - psraw %2, 7 - packuswb %2, %2 -%endm - ;------------------------------------------------------------------------------- +%if ARCH_X86_64 + %define LOCAL_VARS_SIZE_H4 0 +%else + %define LOCAL_VARS_SIZE_H4 16*4 +%endif + %macro SUBPIX_HFILTER4 1 -cglobal filter_block1d4_%1, 6, 6+(ARCH_X86_64*2), 11, LOCAL_VARS_SIZE, \ +cglobal filter_block1d4_%1, 6, 6, 11, LOCAL_VARS_SIZE_H4, \ src, sstride, dst, dstride, height, filter mova m4, [filterq] packsswb m4, m4 %if ARCH_X86_64 - %define k0k1k4k5 m8 - %define k2k3k6k7 m9 - %define krd m10 - %define orig_height r7d + %define k0k1k4k5 m8 + %define k2k3k6k7 m9 + %define krd m10 mova krd, [GLOBAL(pw_64)] pshuflw k0k1k4k5, m4, 0b ;k0_k1 pshufhw k0k1k4k5, k0k1k4k5, 10101010b ;k0_k1_k4_k5 pshuflw k2k3k6k7, m4, 01010101b ;k2_k3 pshufhw k2k3k6k7, k2k3k6k7, 11111111b ;k2_k3_k6_k7 %else - %define k0k1k4k5 [rsp + 16*0] - %define k2k3k6k7 [rsp + 16*1] - %define krd [rsp + 16*2] - %define orig_height [rsp + 16*3] + %define k0k1k4k5 [rsp + 16*0] + %define k2k3k6k7 [rsp + 16*1] + %define krd [rsp + 16*2] pshuflw m6, m4, 0b ;k0_k1 pshufhw m6, m6, 10101010b ;k0_k1_k4_k5 pshuflw m7, m4, 01010101b ;k2_k3 @@ -131,68 +108,46 @@ cglobal filter_block1d4_%1, 6, 6+(ARCH_X86_64*2), 11, LOCAL_VARS_SIZE, \ mova k2k3k6k7, m7 mova krd, m1 %endif - mov orig_height, heightd - shr heightd, 1 + dec heightd + .loop: ;Do two rows at once - movh m0, [srcq - 3] - movh m1, [srcq + 5] - punpcklqdq m0, m1 - mova m1, m0 - movh m2, [srcq + sstrideq - 3] - movh m3, [srcq + sstrideq + 5] - punpcklqdq m2, m3 - mova m3, m2 - punpcklbw m0, m0 - punpckhbw m1, m1 - punpcklbw m2, m2 - punpckhbw m3, m3 - mova m4, m1 - palignr m4, m0, 1 - pmaddubsw m4, k0k1k4k5 - palignr m1, m0, 5 + movu m4, [srcq - 3] + movu m5, [srcq + sstrideq - 3] + punpckhbw m1, m4, m4 + punpcklbw m4, m4 + punpckhbw m3, m5, m5 + punpcklbw m5, m5 + palignr m0, m1, m4, 1 + pmaddubsw m0, k0k1k4k5 + palignr m1, m4, 5 pmaddubsw m1, k2k3k6k7 - mova m7, m3 - palignr m7, m2, 1 - pmaddubsw m7, k0k1k4k5 - palignr m3, m2, 5 + palignr m2, m3, m5, 1 + pmaddubsw m2, k0k1k4k5 + palignr m3, m5, 5 pmaddubsw m3, k2k3k6k7 - mova m0, m4 - mova m5, m1 - mova m2, m7 - psrldq m4, 8 - psrldq m1, 8 - mova m6, m5 + punpckhqdq m4, m0, m2 + punpcklqdq m0, m2 + punpckhqdq m5, m1, m3 + punpcklqdq m1, m3 + paddsw m0, m4 + paddsw m1, m5 +%ifidn %1, h8_avg + movd m4, [dstq] + movd m5, [dstq + dstrideq] +%endif paddsw m0, m1 - mova m1, m3 - psrldq m7, 8 - psrldq m3, 8 - paddsw m2, m3 - mova m3, m1 - pmaxsw m5, m4 - pminsw m4, m6 - paddsw m4, m0 - paddsw m4, m5 - pmaxsw m1, m7 - pminsw m7, m3 - paddsw m7, m2 - paddsw m7, m1 - - paddsw m4, krd - psraw m4, 7 - packuswb m4, m4 - paddsw m7, krd - psraw m7, 7 - packuswb m7, m7 + paddsw m0, krd + psraw m0, 7 + packuswb m0, m0 + psrldq m1, m0, 4 %ifidn %1, h8_avg - movd m0, [dstq] - pavgb m4, m0 - movd m2, [dstq + dstrideq] - pavgb m7, m2 + pavgb m0, m4 + pavgb m1, m5 %endif - movd [dstq], m4 - movd [dstq + dstrideq], m7 + movd [dstq], m0 + movd [dstq + dstrideq], m1 lea srcq, [srcq + sstrideq ] prefetcht0 [srcq + 4 * sstrideq - 3] @@ -200,236 +155,175 @@ cglobal filter_block1d4_%1, 6, 6+(ARCH_X86_64*2), 11, LOCAL_VARS_SIZE, \ lea dstq, [dstq + 2 * dstrideq ] prefetcht0 [srcq + 2 * sstrideq - 3] - dec heightd - jnz .loop + sub heightd, 2 + jg .loop ; Do last row if output_height is odd - mov heightd, orig_height - and heightd, 1 - je .done + jne .done - movh m0, [srcq - 3] ; load src - movh m1, [srcq + 5] - punpcklqdq m0, m1 - - HORIZx4_ROW m0, m1 + movu m4, [srcq - 3] + punpckhbw m1, m4, m4 + punpcklbw m4, m4 + palignr m0, m1, m4, 1 + palignr m1, m4, 5 + pmaddubsw m0, k0k1k4k5 + pmaddubsw m1, k2k3k6k7 + psrldq m2, m0, 8 + psrldq m3, m1, 8 + paddsw m0, m2 + paddsw m1, m3 + paddsw m0, m1 + paddsw m0, krd + psraw m0, 7 + packuswb m0, m0 %ifidn %1, h8_avg - movd m0, [dstq] - pavgb m1, m0 + movd m4, [dstq] + pavgb m0, m4 %endif - movd [dstq], m1 -.done - RET -%endm - -%macro HORIZx8_ROW 5 - mova %2, %1 - punpcklbw %1, %1 - punpckhbw %2, %2 - - mova %3, %2 - mova %4, %2 - mova %5, %2 - - palignr %2, %1, 1 - palignr %3, %1, 5 - palignr %4, %1, 9 - palignr %5, %1, 13 - - pmaddubsw %2, k0k1 - pmaddubsw %3, k2k3 - pmaddubsw %4, k4k5 - pmaddubsw %5, k6k7 - - paddsw %2, %5 - mova %1, %3 - pminsw %3, %4 - pmaxsw %1, %4 - paddsw %2, %3 - paddsw %1, %2 - paddsw %1, krd - psraw %1, 7 - packuswb %1, %1 + movd [dstq], m0 +.done: + REP_RET %endm ;------------------------------------------------------------------------------- %macro SUBPIX_HFILTER8 1 -cglobal filter_block1d8_%1, 6, 6+(ARCH_X86_64*1), 14, LOCAL_VARS_SIZE, \ +cglobal filter_block1d8_%1, 6, 6, 14, LOCAL_VARS_SIZE, \ src, sstride, dst, dstride, height, filter mova m4, [filterq] SETUP_LOCAL_VARS -%if ARCH_X86_64 - %define orig_height r7d -%else - %define orig_height heightmp -%endif - mov orig_height, heightd - shr heightd, 1 + dec heightd .loop: - movh m0, [srcq - 3] - movh m3, [srcq + 5] - movh m4, [srcq + sstrideq - 3] - movh m7, [srcq + sstrideq + 5] - punpcklqdq m0, m3 - mova m1, m0 + ;Do two rows at once + movu m0, [srcq - 3] + movu m4, [srcq + sstrideq - 3] + punpckhbw m1, m0, m0 punpcklbw m0, m0 - punpckhbw m1, m1 - mova m5, m1 - palignr m5, m0, 13 + palignr m5, m1, m0, 13 pmaddubsw m5, k6k7 - mova m2, m1 - mova m3, m1 + palignr m2, m1, m0, 5 + palignr m3, m1, m0, 9 palignr m1, m0, 1 pmaddubsw m1, k0k1 - punpcklqdq m4, m7 - mova m6, m4 + punpckhbw m6, m4, m4 punpcklbw m4, m4 - palignr m2, m0, 5 - punpckhbw m6, m6 - palignr m3, m0, 9 - mova m7, m6 pmaddubsw m2, k2k3 pmaddubsw m3, k4k5 - palignr m7, m4, 13 - paddsw m1, m5 - mova m5, m6 - mova m0, m2 - palignr m5, m4, 5 - pminsw m2, m3 + palignr m7, m6, m4, 13 + palignr m0, m6, m4, 5 pmaddubsw m7, k6k7 - pmaxsw m3, m0 - paddsw m1, m2 - mova m0, m6 - palignr m6, m4, 1 - pmaddubsw m5, k2k3 paddsw m1, m3 + paddsw m2, m5 + paddsw m1, m2 +%ifidn %1, h8_avg + movh m2, [dstq] + movhps m2, [dstq + dstrideq] +%endif + palignr m5, m6, m4, 9 + palignr m6, m4, 1 + pmaddubsw m0, k2k3 pmaddubsw m6, k0k1 - palignr m0, m4, 9 paddsw m1, krd - pmaddubsw m0, k4k5 - mova m4, m5 + pmaddubsw m5, k4k5 psraw m1, 7 - pminsw m5, m0 - paddsw m6, m7 - packuswb m1, m1 - + paddsw m0, m7 paddsw m6, m5 - pmaxsw m0, m4 paddsw m6, m0 paddsw m6, krd psraw m6, 7 - packuswb m6, m6 - + packuswb m1, m6 %ifidn %1, h8_avg - movh m0, [dstq] - movh m2, [dstq + dstrideq] - pavgb m1, m0 - pavgb m6, m2 + pavgb m1, m2 %endif - movh [dstq], m1 - movh [dstq + dstrideq], m6 + movh [dstq], m1 + movhps [dstq + dstrideq], m1 lea srcq, [srcq + sstrideq ] prefetcht0 [srcq + 4 * sstrideq - 3] lea srcq, [srcq + sstrideq ] lea dstq, [dstq + 2 * dstrideq ] prefetcht0 [srcq + 2 * sstrideq - 3] - dec heightd - jnz .loop + sub heightd, 2 + jg .loop - ;Do last row if output_height is odd - mov heightd, orig_height - and heightd, 1 - je .done - - movh m0, [srcq - 3] - movh m3, [srcq + 5] - punpcklqdq m0, m3 - - HORIZx8_ROW m0, m1, m2, m3, m4 + ; Do last row if output_height is odd + jne .done + movu m0, [srcq - 3] + punpckhbw m3, m0, m0 + punpcklbw m0, m0 + palignr m1, m3, m0, 1 + palignr m2, m3, m0, 5 + palignr m4, m3, m0, 13 + palignr m3, m0, 9 + pmaddubsw m1, k0k1 + pmaddubsw m2, k2k3 + pmaddubsw m3, k4k5 + pmaddubsw m4, k6k7 + paddsw m1, m3 + paddsw m4, m2 + paddsw m1, m4 + paddsw m1, krd + psraw m1, 7 + packuswb m1, m1 %ifidn %1, h8_avg - movh m1, [dstq] - pavgb m0, m1 + movh m0, [dstq] + pavgb m1, m0 %endif - movh [dstq], m0 + movh [dstq], m1 .done: - RET + REP_RET %endm ;------------------------------------------------------------------------------- %macro SUBPIX_HFILTER16 1 -cglobal filter_block1d16_%1, 6, 6+(ARCH_X86_64*0), 14, LOCAL_VARS_SIZE, \ +cglobal filter_block1d16_%1, 6, 6, 14, LOCAL_VARS_SIZE, \ src, sstride, dst, dstride, height, filter mova m4, [filterq] SETUP_LOCAL_VARS + .loop: prefetcht0 [srcq + 2 * sstrideq -3] - movh m0, [srcq - 3] - movh m4, [srcq + 5] - movh m6, [srcq + 13] - punpcklqdq m0, m4 - mova m7, m0 - punpckhbw m0, m0 - mova m1, m0 - punpcklqdq m4, m6 - mova m3, m0 - punpcklbw m7, m7 - - palignr m3, m7, 13 - mova m2, m0 - pmaddubsw m3, k6k7 - palignr m0, m7, 1 + movu m0, [srcq - 3] + movu m4, [srcq - 2] pmaddubsw m0, k0k1 - palignr m1, m7, 5 - pmaddubsw m1, k2k3 - palignr m2, m7, 9 - pmaddubsw m2, k4k5 - paddsw m0, m3 - mova m3, m4 - punpckhbw m4, m4 - mova m5, m4 - punpcklbw m3, m3 - mova m7, m4 - palignr m5, m3, 5 - mova m6, m4 - palignr m4, m3, 1 pmaddubsw m4, k0k1 + movu m1, [srcq - 1] + movu m5, [srcq + 0] + pmaddubsw m1, k2k3 pmaddubsw m5, k2k3 - palignr m6, m3, 9 + movu m2, [srcq + 1] + movu m6, [srcq + 2] + pmaddubsw m2, k4k5 pmaddubsw m6, k4k5 - palignr m7, m3, 13 + movu m3, [srcq + 3] + movu m7, [srcq + 4] + pmaddubsw m3, k6k7 pmaddubsw m7, k6k7 - - mova m3, m1 - pmaxsw m1, m2 - pminsw m2, m3 paddsw m0, m2 + paddsw m1, m3 paddsw m0, m1 - paddsw m4, m7 - mova m7, m5 - pmaxsw m5, m6 - pminsw m6, m7 paddsw m4, m6 + paddsw m5, m7 paddsw m4, m5 paddsw m0, krd paddsw m4, krd psraw m0, 7 psraw m4, 7 - packuswb m0, m4 + packuswb m0, m0 + packuswb m4, m4 + punpcklbw m0, m4 %ifidn %1, h8_avg - mova m1, [dstq] - pavgb m0, m1 + pavgb m0, [dstq] %endif lea srcq, [srcq + sstrideq] mova [dstq], m0 lea dstq, [dstq + dstrideq] dec heightd jnz .loop - RET + REP_RET %endm INIT_XMM ssse3 @@ -441,223 +335,463 @@ SUBPIX_HFILTER4 h8 SUBPIX_HFILTER4 h8_avg ;------------------------------------------------------------------------------- + +; TODO(Linfeng): Detect cpu type and choose the code with better performance. +%define X86_SUBPIX_VFILTER_PREFER_SLOW_CELERON 1 + +%if ARCH_X86_64 && X86_SUBPIX_VFILTER_PREFER_SLOW_CELERON + %define NUM_GENERAL_REG_USED 9 +%else + %define NUM_GENERAL_REG_USED 6 +%endif + %macro SUBPIX_VFILTER 2 -cglobal filter_block1d%2_%1, 6, 6+(ARCH_X86_64*3), 14, LOCAL_VARS_SIZE, \ +cglobal filter_block1d%2_%1, 6, NUM_GENERAL_REG_USED, 15, LOCAL_VARS_SIZE, \ src, sstride, dst, dstride, height, filter mova m4, [filterq] SETUP_LOCAL_VARS -%if ARCH_X86_64 - %define src1q r7 - %define sstride6q r8 - %define dst_stride dstrideq -%else - %define src1q filterq - %define sstride6q dstrideq - %define dst_stride dstridemp -%endif - mov src1q, srcq - add src1q, sstrideq - lea sstride6q, [sstrideq + sstrideq * 4] - add sstride6q, sstrideq ;pitch * 6 %ifidn %2, 8 - %define movx movh + %define movx movh %else - %define movx movd + %define movx movd %endif + + dec heightd + +%if ARCH_X86 || X86_SUBPIX_VFILTER_PREFER_SLOW_CELERON + +%if ARCH_X86_64 + %define src1q r7 + %define sstride6q r8 + %define dst_stride dstrideq +%else + %define src1q filterq + %define sstride6q dstrideq + %define dst_stride dstridemp +%endif + mov src1q, srcq + add src1q, sstrideq + lea sstride6q, [sstrideq + sstrideq * 4] + add sstride6q, sstrideq ;pitch * 6 + .loop: - movx m0, [srcq ] ;A - movx m1, [srcq + sstrideq ] ;B - punpcklbw m0, m1 ;A B - movx m2, [srcq + sstrideq * 2 ] ;C - pmaddubsw m0, k0k1 - mova m6, m2 - movx m3, [src1q + sstrideq * 2] ;D - punpcklbw m2, m3 ;C D - pmaddubsw m2, k2k3 - movx m4, [srcq + sstrideq * 4 ] ;E - mova m7, m4 - movx m5, [src1q + sstrideq * 4] ;F - punpcklbw m4, m5 ;E F - pmaddubsw m4, k4k5 - punpcklbw m1, m6 ;A B next iter - movx m6, [srcq + sstride6q ] ;G - punpcklbw m5, m6 ;E F next iter - punpcklbw m3, m7 ;C D next iter - pmaddubsw m5, k4k5 - movx m7, [src1q + sstride6q ] ;H - punpcklbw m6, m7 ;G H - pmaddubsw m6, k6k7 - mova tmp, m2 - pmaddubsw m3, k2k3 - pmaddubsw m1, k0k1 - pmaxsw m2, m4 - paddsw m0, m6 - movx m6, [srcq + sstrideq * 8 ] ;H next iter - punpcklbw m7, m6 - pmaddubsw m7, k6k7 - pminsw m4, tmp - paddsw m0, m4 - mova m4, m3 - paddsw m0, m2 - pminsw m3, m5 - pmaxsw m5, m4 - paddsw m0, krd - psraw m0, 7 - paddsw m1, m7 - packuswb m0, m0 + ;Do two rows at once + movx m0, [srcq ] ;A + movx m1, [src1q ] ;B + punpcklbw m0, m1 ;A B + movx m2, [srcq + sstrideq * 2 ] ;C + pmaddubsw m0, k0k1 + mova m6, m2 + movx m3, [src1q + sstrideq * 2] ;D + punpcklbw m2, m3 ;C D + pmaddubsw m2, k2k3 + movx m4, [srcq + sstrideq * 4 ] ;E + mova m7, m4 + movx m5, [src1q + sstrideq * 4] ;F + punpcklbw m4, m5 ;E F + pmaddubsw m4, k4k5 + punpcklbw m1, m6 ;A B next iter + movx m6, [srcq + sstride6q ] ;G + punpcklbw m5, m6 ;E F next iter + punpcklbw m3, m7 ;C D next iter + pmaddubsw m5, k4k5 + movx m7, [src1q + sstride6q ] ;H + punpcklbw m6, m7 ;G H + pmaddubsw m6, k6k7 + pmaddubsw m3, k2k3 + pmaddubsw m1, k0k1 + paddsw m0, m4 + paddsw m2, m6 + movx m6, [srcq + sstrideq * 8 ] ;H next iter + punpcklbw m7, m6 + pmaddubsw m7, k6k7 + paddsw m0, m2 + paddsw m0, krd + psraw m0, 7 + paddsw m1, m5 + packuswb m0, m0 - paddsw m1, m3 - paddsw m1, m5 - paddsw m1, krd - psraw m1, 7 - lea srcq, [srcq + sstrideq * 2 ] - lea src1q, [src1q + sstrideq * 2] - packuswb m1, m1 + paddsw m3, m7 + paddsw m1, m3 + paddsw m1, krd + psraw m1, 7 + lea srcq, [srcq + sstrideq * 2 ] + lea src1q, [src1q + sstrideq * 2] + packuswb m1, m1 %ifidn %1, v8_avg - movx m2, [dstq] - pavgb m0, m2 + movx m2, [dstq] + pavgb m0, m2 %endif - movx [dstq], m0 - add dstq, dst_stride + movx [dstq], m0 + add dstq, dst_stride %ifidn %1, v8_avg - movx m3, [dstq] - pavgb m1, m3 + movx m3, [dstq] + pavgb m1, m3 %endif - movx [dstq], m1 - add dstq, dst_stride - sub heightd, 2 - cmp heightd, 1 - jg .loop + movx [dstq], m1 + add dstq, dst_stride + sub heightd, 2 + jg .loop - cmp heightd, 0 - je .done + ; Do last row if output_height is odd + jne .done - movx m0, [srcq ] ;A - movx m1, [srcq + sstrideq ] ;B - movx m6, [srcq + sstride6q ] ;G - punpcklbw m0, m1 ;A B - movx m7, [rax + sstride6q ] ;H - pmaddubsw m0, k0k1 - movx m2, [srcq + sstrideq * 2 ] ;C - punpcklbw m6, m7 ;G H - movx m3, [rax + sstrideq * 2 ] ;D - pmaddubsw m6, k6k7 - movx m4, [srcq + sstrideq * 4 ] ;E - punpcklbw m2, m3 ;C D - movx m5, [src1q + sstrideq * 4] ;F - punpcklbw m4, m5 ;E F - pmaddubsw m2, k2k3 - pmaddubsw m4, k4k5 - paddsw m0, m6 - mova m1, m2 - pmaxsw m2, m4 - pminsw m4, m1 - paddsw m0, m4 - paddsw m0, m2 - paddsw m0, krd - psraw m0, 7 - packuswb m0, m0 + movx m0, [srcq ] ;A + movx m1, [srcq + sstrideq ] ;B + movx m6, [srcq + sstride6q ] ;G + punpcklbw m0, m1 ;A B + movx m7, [src1q + sstride6q ] ;H + pmaddubsw m0, k0k1 + movx m2, [srcq + sstrideq * 2 ] ;C + punpcklbw m6, m7 ;G H + movx m3, [src1q + sstrideq * 2] ;D + pmaddubsw m6, k6k7 + movx m4, [srcq + sstrideq * 4 ] ;E + punpcklbw m2, m3 ;C D + movx m5, [src1q + sstrideq * 4] ;F + punpcklbw m4, m5 ;E F + pmaddubsw m2, k2k3 + pmaddubsw m4, k4k5 + paddsw m2, m6 + paddsw m0, m4 + paddsw m0, m2 + paddsw m0, krd + psraw m0, 7 + packuswb m0, m0 %ifidn %1, v8_avg - movx m1, [dstq] - pavgb m0, m1 + movx m1, [dstq] + pavgb m0, m1 %endif - movx [dstq], m0 + movx [dstq], m0 + +%else + ; ARCH_X86_64 + + movx m0, [srcq ] ;A + movx m1, [srcq + sstrideq ] ;B + lea srcq, [srcq + sstrideq * 2 ] + movx m2, [srcq] ;C + movx m3, [srcq + sstrideq] ;D + lea srcq, [srcq + sstrideq * 2 ] + movx m4, [srcq] ;E + movx m5, [srcq + sstrideq] ;F + lea srcq, [srcq + sstrideq * 2 ] + movx m6, [srcq] ;G + punpcklbw m0, m1 ;A B + punpcklbw m1, m2 ;A B next iter + punpcklbw m2, m3 ;C D + punpcklbw m3, m4 ;C D next iter + punpcklbw m4, m5 ;E F + punpcklbw m5, m6 ;E F next iter + +.loop: + ;Do two rows at once + movx m7, [srcq + sstrideq] ;H + lea srcq, [srcq + sstrideq * 2 ] + movx m14, [srcq] ;H next iter + punpcklbw m6, m7 ;G H + punpcklbw m7, m14 ;G H next iter + pmaddubsw m8, m0, k0k1 + pmaddubsw m9, m1, k0k1 + mova m0, m2 + mova m1, m3 + pmaddubsw m10, m2, k2k3 + pmaddubsw m11, m3, k2k3 + mova m2, m4 + mova m3, m5 + pmaddubsw m4, k4k5 + pmaddubsw m5, k4k5 + paddsw m8, m4 + paddsw m9, m5 + mova m4, m6 + mova m5, m7 + pmaddubsw m6, k6k7 + pmaddubsw m7, k6k7 + paddsw m10, m6 + paddsw m11, m7 + paddsw m8, m10 + paddsw m9, m11 + mova m6, m14 + paddsw m8, krd + paddsw m9, krd + psraw m8, 7 + psraw m9, 7 +%ifidn %2, 4 + packuswb m8, m8 + packuswb m9, m9 +%else + packuswb m8, m9 +%endif + +%ifidn %1, v8_avg + movx m7, [dstq] +%ifidn %2, 4 + movx m10, [dstq + dstrideq] + pavgb m9, m10 +%else + movhpd m7, [dstq + dstrideq] +%endif + pavgb m8, m7 +%endif + movx [dstq], m8 +%ifidn %2, 4 + movx [dstq + dstrideq], m9 +%else + movhpd [dstq + dstrideq], m8 +%endif + + lea dstq, [dstq + dstrideq * 2 ] + sub heightd, 2 + jg .loop + + ; Do last row if output_height is odd + jne .done + + movx m7, [srcq + sstrideq] ;H + punpcklbw m6, m7 ;G H + pmaddubsw m0, k0k1 + pmaddubsw m2, k2k3 + pmaddubsw m4, k4k5 + pmaddubsw m6, k6k7 + paddsw m0, m4 + paddsw m2, m6 + paddsw m0, m2 + paddsw m0, krd + psraw m0, 7 + packuswb m0, m0 +%ifidn %1, v8_avg + movx m1, [dstq] + pavgb m0, m1 +%endif + movx [dstq], m0 + +%endif ; ARCH_X86_64 + .done: - RET + REP_RET + %endm ;------------------------------------------------------------------------------- %macro SUBPIX_VFILTER16 1 -cglobal filter_block1d16_%1, 6, 6+(ARCH_X86_64*3), 14, LOCAL_VARS_SIZE, \ +cglobal filter_block1d16_%1, 6, NUM_GENERAL_REG_USED, 16, LOCAL_VARS_SIZE, \ src, sstride, dst, dstride, height, filter - - mova m4, [filterq] + mova m4, [filterq] SETUP_LOCAL_VARS + +%if ARCH_X86 || X86_SUBPIX_VFILTER_PREFER_SLOW_CELERON + %if ARCH_X86_64 - %define src1q r7 - %define sstride6q r8 - %define dst_stride dstrideq + %define src1q r7 + %define sstride6q r8 + %define dst_stride dstrideq %else - %define src1q filterq - %define sstride6q dstrideq - %define dst_stride dstridemp + %define src1q filterq + %define sstride6q dstrideq + %define dst_stride dstridemp %endif - mov src1q, srcq - add src1q, sstrideq - lea sstride6q, [sstrideq + sstrideq * 4] - add sstride6q, sstrideq ;pitch * 6 + lea src1q, [srcq + sstrideq] + lea sstride6q, [sstrideq + sstrideq * 4] + add sstride6q, sstrideq ;pitch * 6 .loop: - movh m0, [srcq ] ;A - movh m1, [srcq + sstrideq ] ;B - movh m2, [srcq + sstrideq * 2 ] ;C - movh m3, [src1q + sstrideq * 2] ;D - movh m4, [srcq + sstrideq * 4 ] ;E - movh m5, [src1q + sstrideq * 4] ;F + movh m0, [srcq ] ;A + movh m1, [src1q ] ;B + movh m2, [srcq + sstrideq * 2 ] ;C + movh m3, [src1q + sstrideq * 2] ;D + movh m4, [srcq + sstrideq * 4 ] ;E + movh m5, [src1q + sstrideq * 4] ;F - punpcklbw m0, m1 ;A B - movh m6, [srcq + sstride6q] ;G - punpcklbw m2, m3 ;C D - movh m7, [src1q + sstride6q] ;H - punpcklbw m4, m5 ;E F - pmaddubsw m0, k0k1 - movh m3, [srcq + 8] ;A - pmaddubsw m2, k2k3 - punpcklbw m6, m7 ;G H - movh m5, [srcq + sstrideq + 8] ;B - pmaddubsw m4, k4k5 - punpcklbw m3, m5 ;A B - movh m7, [srcq + sstrideq * 2 + 8] ;C - pmaddubsw m6, k6k7 - mova m1, m2 - movh m5, [src1q + sstrideq * 2 + 8] ;D - pmaxsw m2, m4 - punpcklbw m7, m5 ;C D - pminsw m4, m1 - paddsw m0, m6 - pmaddubsw m3, k0k1 - movh m1, [srcq + sstrideq * 4 + 8] ;E - paddsw m0, m4 - pmaddubsw m7, k2k3 - movh m6, [src1q + sstrideq * 4 + 8] ;F - punpcklbw m1, m6 ;E F - paddsw m0, m2 - paddsw m0, krd - movh m2, [srcq + sstride6q + 8] ;G - pmaddubsw m1, k4k5 - movh m5, [src1q + sstride6q + 8] ;H - psraw m0, 7 - punpcklbw m2, m5 ;G H - packuswb m0, m0 - pmaddubsw m2, k6k7 -%ifidn %1, v8_avg - movh m4, [dstq] - pavgb m0, m4 -%endif - movh [dstq], m0 - mova m6, m7 - pmaxsw m7, m1 - pminsw m1, m6 - paddsw m3, m2 - paddsw m3, m1 - paddsw m3, m7 - paddsw m3, krd - psraw m3, 7 - packuswb m3, m3 + punpcklbw m0, m1 ;A B + movh m6, [srcq + sstride6q] ;G + punpcklbw m2, m3 ;C D + movh m7, [src1q + sstride6q] ;H + punpcklbw m4, m5 ;E F + pmaddubsw m0, k0k1 + movh m3, [srcq + 8] ;A + pmaddubsw m2, k2k3 + punpcklbw m6, m7 ;G H + movh m5, [srcq + sstrideq + 8] ;B + pmaddubsw m4, k4k5 + punpcklbw m3, m5 ;A B + movh m7, [srcq + sstrideq * 2 + 8] ;C + pmaddubsw m6, k6k7 + movh m5, [src1q + sstrideq * 2 + 8] ;D + punpcklbw m7, m5 ;C D + paddsw m2, m6 + pmaddubsw m3, k0k1 + movh m1, [srcq + sstrideq * 4 + 8] ;E + paddsw m0, m4 + pmaddubsw m7, k2k3 + movh m6, [src1q + sstrideq * 4 + 8] ;F + punpcklbw m1, m6 ;E F + paddsw m0, m2 + paddsw m0, krd + movh m2, [srcq + sstride6q + 8] ;G + pmaddubsw m1, k4k5 + movh m5, [src1q + sstride6q + 8] ;H + psraw m0, 7 + punpcklbw m2, m5 ;G H + pmaddubsw m2, k6k7 + paddsw m7, m2 + paddsw m3, m1 + paddsw m3, m7 + paddsw m3, krd + psraw m3, 7 + packuswb m0, m3 - add srcq, sstrideq - add src1q, sstrideq + add srcq, sstrideq + add src1q, sstrideq %ifidn %1, v8_avg - movh m1, [dstq + 8] - pavgb m3, m1 + pavgb m0, [dstq] %endif - movh [dstq + 8], m3 - add dstq, dst_stride - dec heightd - jnz .loop - RET + mova [dstq], m0 + add dstq, dst_stride + dec heightd + jnz .loop + REP_RET + +%else + ; ARCH_X86_64 + dec heightd + + movu m1, [srcq ] ;A + movu m3, [srcq + sstrideq ] ;B + lea srcq, [srcq + sstrideq * 2] + punpcklbw m0, m1, m3 ;A B + punpckhbw m1, m3 ;A B + movu m5, [srcq] ;C + punpcklbw m2, m3, m5 ;A B next iter + punpckhbw m3, m5 ;A B next iter + mova tmp0, m2 ;store to stack + mova tmp1, m3 ;store to stack + movu m7, [srcq + sstrideq] ;D + lea srcq, [srcq + sstrideq * 2] + punpcklbw m4, m5, m7 ;C D + punpckhbw m5, m7 ;C D + movu m9, [srcq] ;E + punpcklbw m6, m7, m9 ;C D next iter + punpckhbw m7, m9 ;C D next iter + movu m11, [srcq + sstrideq] ;F + lea srcq, [srcq + sstrideq * 2] + punpcklbw m8, m9, m11 ;E F + punpckhbw m9, m11 ;E F + movu m2, [srcq] ;G + punpcklbw m10, m11, m2 ;E F next iter + punpckhbw m11, m2 ;E F next iter + +.loop: + ;Do two rows at once + pmaddubsw m13, m0, k0k1 + mova m0, m4 + pmaddubsw m14, m8, k4k5 + pmaddubsw m15, m4, k2k3 + mova m4, m8 + paddsw m13, m14 + movu m3, [srcq + sstrideq] ;H + lea srcq, [srcq + sstrideq * 2] + punpcklbw m14, m2, m3 ;G H + mova m8, m14 + pmaddubsw m14, k6k7 + paddsw m15, m14 + paddsw m13, m15 + paddsw m13, krd + psraw m13, 7 + + pmaddubsw m14, m1, k0k1 + pmaddubsw m1, m9, k4k5 + pmaddubsw m15, m5, k2k3 + paddsw m14, m1 + mova m1, m5 + mova m5, m9 + punpckhbw m2, m3 ;G H + mova m9, m2 + pmaddubsw m2, k6k7 + paddsw m15, m2 + paddsw m14, m15 + paddsw m14, krd + psraw m14, 7 + packuswb m13, m14 +%ifidn %1, v8_avg + pavgb m13, [dstq] +%endif + mova [dstq], m13 + + ; next iter + pmaddubsw m15, tmp0, k0k1 + pmaddubsw m14, m10, k4k5 + pmaddubsw m13, m6, k2k3 + paddsw m15, m14 + mova tmp0, m6 + mova m6, m10 + movu m2, [srcq] ;G next iter + punpcklbw m14, m3, m2 ;G H next iter + mova m10, m14 + pmaddubsw m14, k6k7 + paddsw m13, m14 + paddsw m15, m13 + paddsw m15, krd + psraw m15, 7 + + pmaddubsw m14, tmp1, k0k1 + mova tmp1, m7 + pmaddubsw m13, m7, k2k3 + mova m7, m11 + pmaddubsw m11, k4k5 + paddsw m14, m11 + punpckhbw m3, m2 ;G H next iter + mova m11, m3 + pmaddubsw m3, k6k7 + paddsw m13, m3 + paddsw m14, m13 + paddsw m14, krd + psraw m14, 7 + packuswb m15, m14 +%ifidn %1, v8_avg + pavgb m15, [dstq + dstrideq] +%endif + mova [dstq + dstrideq], m15 + lea dstq, [dstq + dstrideq * 2] + sub heightd, 2 + jg .loop + + ; Do last row if output_height is odd + jne .done + + movu m3, [srcq + sstrideq] ;H + punpcklbw m6, m2, m3 ;G H + punpckhbw m2, m3 ;G H + pmaddubsw m0, k0k1 + pmaddubsw m1, k0k1 + pmaddubsw m4, k2k3 + pmaddubsw m5, k2k3 + pmaddubsw m8, k4k5 + pmaddubsw m9, k4k5 + pmaddubsw m6, k6k7 + pmaddubsw m2, k6k7 + paddsw m0, m8 + paddsw m1, m9 + paddsw m4, m6 + paddsw m5, m2 + paddsw m0, m4 + paddsw m1, m5 + paddsw m0, krd + paddsw m1, krd + psraw m0, 7 + psraw m1, 7 + packuswb m0, m1 +%ifidn %1, v8_avg + pavgb m0, [dstq] +%endif + mova [dstq], m0 + +.done: + REP_RET + +%endif ; ARCH_X86_64 + %endm INIT_XMM ssse3 diff --git a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_bilinear_ssse3.asm b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_bilinear_ssse3.asm index 3c8cfd2253..538b2129db 100644 --- a/libs/libvpx/vpx_dsp/x86/vpx_subpixel_bilinear_ssse3.asm +++ b/libs/libvpx/vpx_dsp/x86/vpx_subpixel_bilinear_ssse3.asm @@ -14,14 +14,14 @@ mov rdx, arg(5) ;filter ptr mov rsi, arg(0) ;src_ptr mov rdi, arg(2) ;output_ptr - mov rcx, 0x0400040 + mov ecx, 0x01000100 movdqa xmm3, [rdx] ;load filters psrldq xmm3, 6 packsswb xmm3, xmm3 pshuflw xmm3, xmm3, 0b ;k3_k4 - movq xmm2, rcx ;rounding + movd xmm2, ecx ;rounding_shift pshufd xmm2, xmm2, 0 movsxd rax, DWORD PTR arg(1) ;pixels_per_line @@ -33,8 +33,7 @@ punpcklbw xmm0, xmm1 pmaddubsw xmm0, xmm3 - paddsw xmm0, xmm2 ;rounding - psraw xmm0, 7 ;shift + pmulhrsw xmm0, xmm2 ;rounding(+64)+shift(>>7) packuswb xmm0, xmm0 ;pack to byte %if %1 @@ -51,7 +50,7 @@ mov rdx, arg(5) ;filter ptr mov rsi, arg(0) ;src_ptr mov rdi, arg(2) ;output_ptr - mov rcx, 0x0400040 + mov ecx, 0x01000100 movdqa xmm7, [rdx] ;load filters psrldq xmm7, 6 @@ -59,7 +58,7 @@ pshuflw xmm7, xmm7, 0b ;k3_k4 punpcklwd xmm7, xmm7 - movq xmm6, rcx ;rounding + movd xmm6, ecx ;rounding_shift pshufd xmm6, xmm6, 0 movsxd rax, DWORD PTR arg(1) ;pixels_per_line @@ -71,8 +70,7 @@ punpcklbw xmm0, xmm1 pmaddubsw xmm0, xmm7 - paddsw xmm0, xmm6 ;rounding - psraw xmm0, 7 ;shift + pmulhrsw xmm0, xmm6 ;rounding(+64)+shift(>>7) packuswb xmm0, xmm0 ;pack back to byte %if %1 @@ -92,10 +90,8 @@ pmaddubsw xmm0, xmm7 pmaddubsw xmm2, xmm7 - paddsw xmm0, xmm6 ;rounding - paddsw xmm2, xmm6 - psraw xmm0, 7 ;shift - psraw xmm2, 7 + pmulhrsw xmm0, xmm6 ;rounding(+64)+shift(>>7) + pmulhrsw xmm2, xmm6 packuswb xmm0, xmm2 ;pack back to byte %if %1 diff --git a/libs/libvpx/vpx_mem/include/vpx_mem_intrnl.h b/libs/libvpx/vpx_mem/include/vpx_mem_intrnl.h index c4dd78550f..2c259d322e 100644 --- a/libs/libvpx/vpx_mem/include/vpx_mem_intrnl.h +++ b/libs/libvpx/vpx_mem/include/vpx_mem_intrnl.h @@ -8,24 +8,24 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_ #define VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_ #include "./vpx_config.h" -#define ADDRESS_STORAGE_SIZE sizeof(size_t) +#define ADDRESS_STORAGE_SIZE sizeof(size_t) #ifndef DEFAULT_ALIGNMENT -# if defined(VXWORKS) -# define DEFAULT_ALIGNMENT 32 /*default addr alignment to use in -calls to vpx_* functions other -than vpx_memalign*/ -# else -# define DEFAULT_ALIGNMENT (2 * sizeof(void*)) /* NOLINT */ -# endif +#if defined(VXWORKS) +/*default addr alignment to use in calls to vpx_* functions other than + * vpx_memalign*/ +#define DEFAULT_ALIGNMENT 32 +#else +#define DEFAULT_ALIGNMENT (2 * sizeof(void *)) /* NOLINT */ +#endif #endif /*returns an addr aligned to the byte boundary specified by align*/ -#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align)) +#define align_addr(addr, align) \ + (void *)(((size_t)(addr) + ((align)-1)) & ~(size_t)((align)-1)) #endif // VPX_MEM_INCLUDE_VPX_MEM_INTRNL_H_ diff --git a/libs/libvpx/vpx_mem/vpx_mem.c b/libs/libvpx/vpx_mem/vpx_mem.c index b261fc0da1..c94ed52d16 100644 --- a/libs/libvpx/vpx_mem/vpx_mem.c +++ b/libs/libvpx/vpx_mem/vpx_mem.c @@ -8,48 +8,76 @@ * be found in the AUTHORS file in the root of the source tree. */ - #include "vpx_mem.h" +#include #include #include #include #include "include/vpx_mem_intrnl.h" #include "vpx/vpx_integer.h" +#if SIZE_MAX > (1ULL << 40) +#define VPX_MAX_ALLOCABLE_MEMORY (1ULL << 40) +#else +// For 32-bit targets keep this below INT_MAX to avoid valgrind warnings. +#define VPX_MAX_ALLOCABLE_MEMORY ((1ULL << 31) - (1 << 16)) +#endif + +// Returns 0 in case of overflow of nmemb * size. +static int check_size_argument_overflow(uint64_t nmemb, uint64_t size) { + const uint64_t total_size = nmemb * size; + if (nmemb == 0) return 1; + if (size > VPX_MAX_ALLOCABLE_MEMORY / nmemb) return 0; + if (total_size != (size_t)total_size) return 0; + + return 1; +} + +static size_t *get_malloc_address_location(void *const mem) { + return ((size_t *)mem) - 1; +} + +static uint64_t get_aligned_malloc_size(size_t size, size_t align) { + return (uint64_t)size + align - 1 + ADDRESS_STORAGE_SIZE; +} + +static void set_actual_malloc_address(void *const mem, + const void *const malloc_addr) { + size_t *const malloc_addr_location = get_malloc_address_location(mem); + *malloc_addr_location = (size_t)malloc_addr; +} + +static void *get_actual_malloc_address(void *const mem) { + size_t *const malloc_addr_location = get_malloc_address_location(mem); + return (void *)(*malloc_addr_location); +} + void *vpx_memalign(size_t align, size_t size) { - void *addr, - * x = NULL; - - addr = malloc(size + align - 1 + ADDRESS_STORAGE_SIZE); + void *x = NULL, *addr; + const uint64_t aligned_size = get_aligned_malloc_size(size, align); + if (!check_size_argument_overflow(1, aligned_size)) return NULL; + addr = malloc((size_t)aligned_size); if (addr) { - x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align); - /* save the actual malloc address */ - ((size_t *)x)[-1] = (size_t)addr; + x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, align); + set_actual_malloc_address(x, addr); } - return x; } -void *vpx_malloc(size_t size) { - return vpx_memalign(DEFAULT_ALIGNMENT, size); -} +void *vpx_malloc(size_t size) { return vpx_memalign(DEFAULT_ALIGNMENT, size); } void *vpx_calloc(size_t num, size_t size) { void *x; + if (!check_size_argument_overflow(num, size)) return NULL; - x = vpx_memalign(DEFAULT_ALIGNMENT, num * size); - - if (x) - memset(x, 0, num * size); - + x = vpx_malloc(num * size); + if (x) memset(x, 0, num * size); return x; } void *vpx_realloc(void *memblk, size_t size) { - void *addr, - * new_addr = NULL; - int align = DEFAULT_ALIGNMENT; + void *new_addr = NULL; /* The realloc() function changes the size of the object pointed to by @@ -64,18 +92,16 @@ void *vpx_realloc(void *memblk, size_t size) { else if (!size) vpx_free(memblk); else { - addr = (void *)(((size_t *)memblk)[-1]); - memblk = NULL; + void *addr = get_actual_malloc_address(memblk); + const uint64_t aligned_size = + get_aligned_malloc_size(size, DEFAULT_ALIGNMENT); + if (!check_size_argument_overflow(1, aligned_size)) return NULL; - new_addr = realloc(addr, size + align + ADDRESS_STORAGE_SIZE); - - if (new_addr) { - addr = new_addr; - new_addr = (void *)(((size_t) - ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) & - (size_t) - align); - /* save the actual malloc address */ - ((size_t *)new_addr)[-1] = (size_t)addr; + addr = realloc(addr, (size_t)aligned_size); + if (addr) { + new_addr = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, + DEFAULT_ALIGNMENT); + set_actual_malloc_address(new_addr, addr); } } @@ -84,7 +110,7 @@ void *vpx_realloc(void *memblk, size_t size) { void vpx_free(void *memblk) { if (memblk) { - void *addr = (void *)(((size_t *)memblk)[-1]); + void *addr = get_actual_malloc_address(memblk); free(addr); } } @@ -93,8 +119,7 @@ void vpx_free(void *memblk) { void *vpx_memset16(void *dest, int val, size_t length) { size_t i; uint16_t *dest16 = (uint16_t *)dest; - for (i = 0; i < length; i++) - *dest16++ = val; + for (i = 0; i < length; i++) *dest16++ = val; return dest; } #endif // CONFIG_VP9_HIGHBITDEPTH diff --git a/libs/libvpx/vpx_mem/vpx_mem.h b/libs/libvpx/vpx_mem/vpx_mem.h index a006e0f00b..c14f288b89 100644 --- a/libs/libvpx/vpx_mem/vpx_mem.h +++ b/libs/libvpx/vpx_mem/vpx_mem.h @@ -8,13 +8,12 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_MEM_VPX_MEM_H_ #define VPX_MEM_VPX_MEM_H_ #include "vpx_config.h" #if defined(__uClinux__) -# include +#include #endif #include @@ -24,20 +23,20 @@ extern "C" { #endif - void *vpx_memalign(size_t align, size_t size); - void *vpx_malloc(size_t size); - void *vpx_calloc(size_t num, size_t size); - void *vpx_realloc(void *memblk, size_t size); - void vpx_free(void *memblk); +void *vpx_memalign(size_t align, size_t size); +void *vpx_malloc(size_t size); +void *vpx_calloc(size_t num, size_t size); +void *vpx_realloc(void *memblk, size_t size); +void vpx_free(void *memblk); #if CONFIG_VP9_HIGHBITDEPTH - void *vpx_memset16(void *dest, int val, size_t length); +void *vpx_memset16(void *dest, int val, size_t length); #endif #include #ifdef VPX_MEM_PLTFRM -# include VPX_MEM_PLTFRM +#include VPX_MEM_PLTFRM #endif #if defined(__cplusplus) diff --git a/libs/libvpx/vpx_ports/arm.h b/libs/libvpx/vpx_ports/arm.h index 42c98f5a83..7be6104a4f 100644 --- a/libs/libvpx/vpx_ports/arm.h +++ b/libs/libvpx/vpx_ports/arm.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_PORTS_ARM_H_ #define VPX_PORTS_ARM_H_ #include @@ -19,17 +18,17 @@ extern "C" { #endif /*ARMv5TE "Enhanced DSP" instructions.*/ -#define HAS_EDSP 0x01 +#define HAS_EDSP 0x01 /*ARMv6 "Parallel" or "Media" instructions.*/ #define HAS_MEDIA 0x02 /*ARMv7 optional NEON instructions.*/ -#define HAS_NEON 0x04 +#define HAS_NEON 0x04 int arm_cpu_caps(void); // Earlier gcc compilers have issues with some neon intrinsics -#if !defined(__clang__) && defined(__GNUC__) && \ - __GNUC__ == 4 && __GNUC_MINOR__ <= 6 +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 4 && \ + __GNUC_MINOR__ <= 6 #define VPX_INCOMPATIBLE_GCC #endif @@ -38,4 +37,3 @@ int arm_cpu_caps(void); #endif #endif // VPX_PORTS_ARM_H_ - diff --git a/libs/libvpx/vpx_ports/arm_cpudetect.c b/libs/libvpx/vpx_ports/arm_cpudetect.c index 8a4b8af964..79c60f7a19 100644 --- a/libs/libvpx/vpx_ports/arm_cpudetect.c +++ b/libs/libvpx/vpx_ports/arm_cpudetect.c @@ -10,8 +10,9 @@ #include #include -#include "vpx_ports/arm.h" + #include "./vpx_config.h" +#include "vpx_ports/arm.h" #ifdef WINAPI_FAMILY #include @@ -49,9 +50,6 @@ int arm_cpu_caps(void) { return flags; } mask = arm_cpu_env_mask(); -#if HAVE_MEDIA - flags |= HAS_MEDIA; -#endif /* HAVE_MEDIA */ #if HAVE_NEON || HAVE_NEON_ASM flags |= HAS_NEON; #endif /* HAVE_NEON || HAVE_NEON_ASM */ @@ -71,33 +69,22 @@ int arm_cpu_caps(void) { return flags; } mask = arm_cpu_env_mask(); - /* MSVC has no inline __asm support for ARM, but it does let you __emit - * instructions via their assembled hex code. - * All of these instructions should be essentially nops. - */ -#if HAVE_MEDIA - if (mask & HAS_MEDIA) - __try { - /*SHADD8 r3,r3,r3*/ - __emit(0xE6333F93); - flags |= HAS_MEDIA; - } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) { - /*Ignore exception.*/ - } -} -#endif /* HAVE_MEDIA */ +/* MSVC has no inline __asm support for ARM, but it does let you __emit + * instructions via their assembled hex code. + * All of these instructions should be essentially nops. + */ #if HAVE_NEON || HAVE_NEON_ASM -if (mask &HAS_NEON) { - __try { - /*VORR q0,q0,q0*/ - __emit(0xF2200150); - flags |= HAS_NEON; - } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) { - /*Ignore exception.*/ + if (mask & HAS_NEON) { + __try { + /*VORR q0,q0,q0*/ + __emit(0xF2200150); + flags |= HAS_NEON; + } __except (GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) { + /*Ignore exception.*/ + } } -} #endif /* HAVE_NEON || HAVE_NEON_ASM */ -return flags & mask; + return flags & mask; } #elif defined(__ANDROID__) /* end _MSC_VER */ @@ -113,12 +100,8 @@ int arm_cpu_caps(void) { mask = arm_cpu_env_mask(); features = android_getCpuFeatures(); -#if HAVE_MEDIA - flags |= HAS_MEDIA; -#endif /* HAVE_MEDIA */ #if HAVE_NEON || HAVE_NEON_ASM - if (features & ANDROID_CPU_ARM_FEATURE_NEON) - flags |= HAS_NEON; + if (features & ANDROID_CPU_ARM_FEATURE_NEON) flags |= HAS_NEON; #endif /* HAVE_NEON || HAVE_NEON_ASM */ return flags & mask; } @@ -155,21 +138,13 @@ int arm_cpu_caps(void) { } } #endif /* HAVE_NEON || HAVE_NEON_ASM */ -#if HAVE_MEDIA - if (memcmp(buf, "CPU architecture:", 17) == 0) { - int version; - version = atoi(buf + 17); - if (version >= 6) { - flags |= HAS_MEDIA; - } - } -#endif /* HAVE_MEDIA */ } fclose(fin); } return flags & mask; } -#else /* end __linux__ */ -#error "--enable-runtime-cpu-detect selected, but no CPU detection method " \ +#else /* end __linux__ */ +#error \ + "--enable-runtime-cpu-detect selected, but no CPU detection method " \ "available for your platform. Reconfigure with --disable-runtime-cpu-detect." #endif diff --git a/libs/libvpx/vpx_ports/bitops.h b/libs/libvpx/vpx_ports/bitops.h index 84ff3659fe..0ed7189ff6 100644 --- a/libs/libvpx/vpx_ports/bitops.h +++ b/libs/libvpx/vpx_ports/bitops.h @@ -16,11 +16,10 @@ #include "vpx_ports/msvc.h" #ifdef _MSC_VER -# include // the ceil() definition must precede intrin.h -# if _MSC_VER > 1310 && (defined(_M_X64) || defined(_M_IX86)) -# include -# define USE_MSC_INTRINSICS -# endif +#if defined(_M_X64) || defined(_M_IX86) +#include +#define USE_MSC_INTRINSICS +#endif #endif #ifdef __cplusplus diff --git a/libs/libvpx/vpx_ports/emmintrin_compat.h b/libs/libvpx/vpx_ports/emmintrin_compat.h index 16176383d2..903534e0c0 100644 --- a/libs/libvpx/vpx_ports/emmintrin_compat.h +++ b/libs/libvpx/vpx_ports/emmintrin_compat.h @@ -15,40 +15,40 @@ /* From emmintrin.h (gcc 4.5.3) */ /* Casts between various SP, DP, INT vector types. Note that these do no conversion of values, they just change the type. */ -extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castpd_ps(__m128d __A) -{ - return (__m128) __A; +extern __inline __m128 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castpd_ps(__m128d __A) { + return (__m128)__A; } -extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castpd_si128(__m128d __A) -{ - return (__m128i) __A; +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castpd_si128(__m128d __A) { + return (__m128i)__A; } -extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castps_pd(__m128 __A) -{ - return (__m128d) __A; +extern __inline __m128d + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castps_pd(__m128 __A) { + return (__m128d)__A; } -extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castps_si128(__m128 __A) -{ - return (__m128i) __A; +extern __inline __m128i + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castps_si128(__m128 __A) { + return (__m128i)__A; } -extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castsi128_ps(__m128i __A) -{ - return (__m128) __A; +extern __inline __m128 + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castsi128_ps(__m128i __A) { + return (__m128)__A; } -extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__)) -_mm_castsi128_pd(__m128i __A) -{ - return (__m128d) __A; +extern __inline __m128d + __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + _mm_castsi128_pd(__m128i __A) { + return (__m128d)__A; } #endif diff --git a/libs/libvpx/vpx_ports/mem.h b/libs/libvpx/vpx_ports/mem.h index 7502f90632..2d49b7a06d 100644 --- a/libs/libvpx/vpx_ports/mem.h +++ b/libs/libvpx/vpx_ports/mem.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_PORTS_MEM_H_ #define VPX_PORTS_MEM_H_ @@ -16,12 +15,12 @@ #include "vpx/vpx_integer.h" #if (defined(__GNUC__) && __GNUC__) || defined(__SUNPRO_C) -#define DECLARE_ALIGNED(n,typ,val) typ val __attribute__ ((aligned (n))) +#define DECLARE_ALIGNED(n, typ, val) typ val __attribute__((aligned(n))) #elif defined(_MSC_VER) -#define DECLARE_ALIGNED(n,typ,val) __declspec(align(n)) typ val +#define DECLARE_ALIGNED(n, typ, val) __declspec(align(n)) typ val #else #warning No alignment directives known for this compiler. -#define DECLARE_ALIGNED(n,typ,val) typ val +#define DECLARE_ALIGNED(n, typ, val) typ val #endif /* Indicates that the usage of the specified variable has been audited to assure @@ -29,7 +28,7 @@ * warnings on gcc. */ #if defined(__GNUC__) && __GNUC__ -#define UNINITIALIZED_IS_SAFE(x) x=x +#define UNINITIALIZED_IS_SAFE(x) x = x #else #define UNINITIALIZED_IS_SAFE(x) x #endif @@ -39,15 +38,25 @@ #endif /* Shift down with rounding */ -#define ROUND_POWER_OF_TWO(value, n) \ - (((value) + (1 << ((n) - 1))) >> (n)) +#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n)-1))) >> (n)) +#define ROUND64_POWER_OF_TWO(value, n) (((value) + (1ULL << ((n)-1))) >> (n)) #define ALIGN_POWER_OF_TWO(value, n) \ - (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) + (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1)) +#define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)(x)) << 1)) #if CONFIG_VP9_HIGHBITDEPTH -#define CONVERT_TO_SHORTPTR(x) ((uint16_t*)(((uintptr_t)x) << 1)) -#define CONVERT_TO_BYTEPTR(x) ((uint8_t*)(((uintptr_t)x) >> 1)) +#define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)(x)) >> 1)) #endif // CONFIG_VP9_HIGHBITDEPTH +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif // !defined(__has_feature) + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +#define VPX_WITH_ASAN 1 +#else +#define VPX_WITH_ASAN 0 +#endif // __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) + #endif // VPX_PORTS_MEM_H_ diff --git a/libs/libvpx/vpx_ports/mem_ops.h b/libs/libvpx/vpx_ports/mem_ops.h index d4a3d773f3..343f27577c 100644 --- a/libs/libvpx/vpx_ports/mem_ops.h +++ b/libs/libvpx/vpx_ports/mem_ops.h @@ -46,36 +46,36 @@ #undef MEM_VALUE_T_SZ_BITS #define MEM_VALUE_T_SZ_BITS (sizeof(MEM_VALUE_T) << 3) -#undef mem_ops_wrap_symbol +#undef mem_ops_wrap_symbol #define mem_ops_wrap_symbol(fn) mem_ops_wrap_symbol2(fn, MEM_VALUE_T) -#undef mem_ops_wrap_symbol2 -#define mem_ops_wrap_symbol2(fn,typ) mem_ops_wrap_symbol3(fn,typ) -#undef mem_ops_wrap_symbol3 -#define mem_ops_wrap_symbol3(fn,typ) fn##_as_##typ +#undef mem_ops_wrap_symbol2 +#define mem_ops_wrap_symbol2(fn, typ) mem_ops_wrap_symbol3(fn, typ) +#undef mem_ops_wrap_symbol3 +#define mem_ops_wrap_symbol3(fn, typ) fn##_as_##typ /* * Include aligned access routines */ #define INCLUDED_BY_MEM_OPS_H #include "mem_ops_aligned.h" -#undef INCLUDED_BY_MEM_OPS_H +#undef INCLUDED_BY_MEM_OPS_H -#undef mem_get_be16 +#undef mem_get_be16 #define mem_get_be16 mem_ops_wrap_symbol(mem_get_be16) static unsigned MEM_VALUE_T mem_get_be16(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; val = mem[0] << 8; val |= mem[1]; return val; } -#undef mem_get_be24 +#undef mem_get_be24 #define mem_get_be24 mem_ops_wrap_symbol(mem_get_be24) static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; val = mem[0] << 16; val |= mem[1] << 8; @@ -83,35 +83,35 @@ static unsigned MEM_VALUE_T mem_get_be24(const void *vmem) { return val; } -#undef mem_get_be32 +#undef mem_get_be32 #define mem_get_be32 mem_ops_wrap_symbol(mem_get_be32) static unsigned MEM_VALUE_T mem_get_be32(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; - val = mem[0] << 24; + val = ((unsigned MEM_VALUE_T)mem[0]) << 24; val |= mem[1] << 16; val |= mem[2] << 8; val |= mem[3]; return val; } -#undef mem_get_le16 +#undef mem_get_le16 #define mem_get_le16 mem_ops_wrap_symbol(mem_get_le16) static unsigned MEM_VALUE_T mem_get_le16(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; val = mem[1] << 8; val |= mem[0]; return val; } -#undef mem_get_le24 +#undef mem_get_le24 #define mem_get_le24 mem_ops_wrap_symbol(mem_get_le24) static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; val = mem[2] << 16; val |= mem[1] << 8; @@ -119,26 +119,27 @@ static unsigned MEM_VALUE_T mem_get_le24(const void *vmem) { return val; } -#undef mem_get_le32 +#undef mem_get_le32 #define mem_get_le32 mem_ops_wrap_symbol(mem_get_le32) static unsigned MEM_VALUE_T mem_get_le32(const void *vmem) { - unsigned MEM_VALUE_T val; - const MAU_T *mem = (const MAU_T *)vmem; + unsigned MEM_VALUE_T val; + const MAU_T *mem = (const MAU_T *)vmem; - val = mem[3] << 24; + val = ((unsigned MEM_VALUE_T)mem[3]) << 24; val |= mem[2] << 16; val |= mem[1] << 8; val |= mem[0]; return val; } -#define mem_get_s_generic(end,sz) \ - static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) {\ - const MAU_T *mem = (const MAU_T*)vmem;\ - signed MEM_VALUE_T val = mem_get_##end##sz(mem);\ - return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz);\ +#define mem_get_s_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz(const void *vmem) { \ + const MAU_T *mem = (const MAU_T *)vmem; \ + signed MEM_VALUE_T val = mem_get_##end##sz(mem); \ + return (val << (MEM_VALUE_T_SZ_BITS - sz)) >> (MEM_VALUE_T_SZ_BITS - sz); \ } +/* clang-format off */ #undef mem_get_sbe16 #define mem_get_sbe16 mem_ops_wrap_symbol(mem_get_sbe16) mem_get_s_generic(be, 16) @@ -168,8 +169,8 @@ mem_get_s_generic(le, 32) static VPX_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 8) & 0xff; - mem[1] = (val >> 0) & 0xff; + mem[0] = (MAU_T)((val >> 8) & 0xff); + mem[1] = (MAU_T)((val >> 0) & 0xff); } #undef mem_put_be24 @@ -177,9 +178,9 @@ static VPX_INLINE void mem_put_be16(void *vmem, MEM_VALUE_T val) { static VPX_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 16) & 0xff; - mem[1] = (val >> 8) & 0xff; - mem[2] = (val >> 0) & 0xff; + mem[0] = (MAU_T)((val >> 16) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 0) & 0xff); } #undef mem_put_be32 @@ -187,10 +188,10 @@ static VPX_INLINE void mem_put_be24(void *vmem, MEM_VALUE_T val) { static VPX_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 24) & 0xff; - mem[1] = (val >> 16) & 0xff; - mem[2] = (val >> 8) & 0xff; - mem[3] = (val >> 0) & 0xff; + mem[0] = (MAU_T)((val >> 24) & 0xff); + mem[1] = (MAU_T)((val >> 16) & 0xff); + mem[2] = (MAU_T)((val >> 8) & 0xff); + mem[3] = (MAU_T)((val >> 0) & 0xff); } #undef mem_put_le16 @@ -198,8 +199,8 @@ static VPX_INLINE void mem_put_be32(void *vmem, MEM_VALUE_T val) { static VPX_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 0) & 0xff; - mem[1] = (val >> 8) & 0xff; + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); } #undef mem_put_le24 @@ -207,9 +208,9 @@ static VPX_INLINE void mem_put_le16(void *vmem, MEM_VALUE_T val) { static VPX_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 0) & 0xff; - mem[1] = (val >> 8) & 0xff; - mem[2] = (val >> 16) & 0xff; + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 16) & 0xff); } #undef mem_put_le32 @@ -217,10 +218,11 @@ static VPX_INLINE void mem_put_le24(void *vmem, MEM_VALUE_T val) { static VPX_INLINE void mem_put_le32(void *vmem, MEM_VALUE_T val) { MAU_T *mem = (MAU_T *)vmem; - mem[0] = (val >> 0) & 0xff; - mem[1] = (val >> 8) & 0xff; - mem[2] = (val >> 16) & 0xff; - mem[3] = (val >> 24) & 0xff; + mem[0] = (MAU_T)((val >> 0) & 0xff); + mem[1] = (MAU_T)((val >> 8) & 0xff); + mem[2] = (MAU_T)((val >> 16) & 0xff); + mem[3] = (MAU_T)((val >> 24) & 0xff); } +/* clang-format on */ #endif // VPX_PORTS_MEM_OPS_H_ diff --git a/libs/libvpx/vpx_ports/mem_ops_aligned.h b/libs/libvpx/vpx_ports/mem_ops_aligned.h index c16111fec5..ccac391ba0 100644 --- a/libs/libvpx/vpx_ports/mem_ops_aligned.h +++ b/libs/libvpx/vpx_ports/mem_ops_aligned.h @@ -27,86 +27,87 @@ /* Architectures that provide instructions for doing this byte swapping * could redefine these macros. */ -#define swap_endian_16(val,raw) do {\ - val = ((raw>>8) & 0x00ff) \ - | ((raw<<8) & 0xff00);\ - } while(0) -#define swap_endian_32(val,raw) do {\ - val = ((raw>>24) & 0x000000ff) \ - | ((raw>>8) & 0x0000ff00) \ - | ((raw<<8) & 0x00ff0000) \ - | ((raw<<24) & 0xff000000); \ - } while(0) -#define swap_endian_16_se(val,raw) do {\ - swap_endian_16(val,raw);\ - val = ((val << 16) >> 16);\ - } while(0) -#define swap_endian_32_se(val,raw) swap_endian_32(val,raw) +#define swap_endian_16(val, raw) \ + do { \ + val = (uint16_t)(((raw >> 8) & 0x00ff) | ((raw << 8) & 0xff00)); \ + } while (0) +#define swap_endian_32(val, raw) \ + do { \ + val = ((raw >> 24) & 0x000000ff) | ((raw >> 8) & 0x0000ff00) | \ + ((raw << 8) & 0x00ff0000) | ((raw << 24) & 0xff000000); \ + } while (0) +#define swap_endian_16_se(val, raw) \ + do { \ + swap_endian_16(val, raw); \ + val = ((val << 16) >> 16); \ + } while (0) +#define swap_endian_32_se(val, raw) swap_endian_32(val, raw) -#define mem_get_ne_aligned_generic(end,sz) \ - static VPX_INLINE unsigned MEM_VALUE_T \ - mem_get_##end##sz##_aligned(const void *vmem) {\ - const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\ - return *mem;\ +#define mem_get_ne_aligned_generic(end, sz) \ + static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \ + const void *vmem) { \ + const uint##sz##_t *mem = (const uint##sz##_t *)vmem; \ + return *mem; \ } -#define mem_get_sne_aligned_generic(end,sz) \ - static VPX_INLINE signed MEM_VALUE_T \ - mem_get_s##end##sz##_aligned(const void *vmem) {\ - const int##sz##_t *mem = (const int##sz##_t *)vmem;\ - return *mem;\ +#define mem_get_sne_aligned_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \ + const void *vmem) { \ + const int##sz##_t *mem = (const int##sz##_t *)vmem; \ + return *mem; \ } -#define mem_get_se_aligned_generic(end,sz) \ - static VPX_INLINE unsigned MEM_VALUE_T \ - mem_get_##end##sz##_aligned(const void *vmem) {\ - const uint##sz##_t *mem = (const uint##sz##_t *)vmem;\ - unsigned MEM_VALUE_T val, raw = *mem;\ - swap_endian_##sz(val,raw);\ - return val;\ +#define mem_get_se_aligned_generic(end, sz) \ + static VPX_INLINE unsigned MEM_VALUE_T mem_get_##end##sz##_aligned( \ + const void *vmem) { \ + const uint##sz##_t *mem = (const uint##sz##_t *)vmem; \ + unsigned MEM_VALUE_T val, raw = *mem; \ + swap_endian_##sz(val, raw); \ + return val; \ } -#define mem_get_sse_aligned_generic(end,sz) \ - static VPX_INLINE signed MEM_VALUE_T \ - mem_get_s##end##sz##_aligned(const void *vmem) {\ - const int##sz##_t *mem = (const int##sz##_t *)vmem;\ - unsigned MEM_VALUE_T val, raw = *mem;\ - swap_endian_##sz##_se(val,raw);\ - return val;\ +#define mem_get_sse_aligned_generic(end, sz) \ + static VPX_INLINE signed MEM_VALUE_T mem_get_s##end##sz##_aligned( \ + const void *vmem) { \ + const int##sz##_t *mem = (const int##sz##_t *)vmem; \ + unsigned MEM_VALUE_T val, raw = *mem; \ + swap_endian_##sz##_se(val, raw); \ + return val; \ } -#define mem_put_ne_aligned_generic(end,sz) \ - static VPX_INLINE void \ - mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\ - uint##sz##_t *mem = (uint##sz##_t *)vmem;\ - *mem = (uint##sz##_t)val;\ +#define mem_put_ne_aligned_generic(end, sz) \ + static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem, \ + MEM_VALUE_T val) { \ + uint##sz##_t *mem = (uint##sz##_t *)vmem; \ + *mem = (uint##sz##_t)val; \ } -#define mem_put_se_aligned_generic(end,sz) \ - static VPX_INLINE void \ - mem_put_##end##sz##_aligned(void *vmem, MEM_VALUE_T val) {\ - uint##sz##_t *mem = (uint##sz##_t *)vmem, raw;\ - swap_endian_##sz(raw,val);\ - *mem = (uint##sz##_t)raw;\ +#define mem_put_se_aligned_generic(end, sz) \ + static VPX_INLINE void mem_put_##end##sz##_aligned(void *vmem, \ + MEM_VALUE_T val) { \ + uint##sz##_t *mem = (uint##sz##_t *)vmem, raw; \ + swap_endian_##sz(raw, val); \ + *mem = (uint##sz##_t)raw; \ } #include "vpx_config.h" #if CONFIG_BIG_ENDIAN -#define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be,sz) -#define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be,sz) -#define mem_get_le_aligned_generic(sz) mem_get_se_aligned_generic(le,sz) -#define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le,sz) -#define mem_put_be_aligned_generic(sz) mem_put_ne_aligned_generic(be,sz) -#define mem_put_le_aligned_generic(sz) mem_put_se_aligned_generic(le,sz) +#define mem_get_be_aligned_generic(sz) mem_get_ne_aligned_generic(be, sz) +#define mem_get_sbe_aligned_generic(sz) mem_get_sne_aligned_generic(be, sz) +#define mem_get_le_aligned_generic(sz) mem_get_se_aligned_generic(le, sz) +#define mem_get_sle_aligned_generic(sz) mem_get_sse_aligned_generic(le, sz) +#define mem_put_be_aligned_generic(sz) mem_put_ne_aligned_generic(be, sz) +#define mem_put_le_aligned_generic(sz) mem_put_se_aligned_generic(le, sz) #else -#define mem_get_be_aligned_generic(sz) mem_get_se_aligned_generic(be,sz) -#define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be,sz) -#define mem_get_le_aligned_generic(sz) mem_get_ne_aligned_generic(le,sz) -#define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le,sz) -#define mem_put_be_aligned_generic(sz) mem_put_se_aligned_generic(be,sz) -#define mem_put_le_aligned_generic(sz) mem_put_ne_aligned_generic(le,sz) +#define mem_get_be_aligned_generic(sz) mem_get_se_aligned_generic(be, sz) +#define mem_get_sbe_aligned_generic(sz) mem_get_sse_aligned_generic(be, sz) +#define mem_get_le_aligned_generic(sz) mem_get_ne_aligned_generic(le, sz) +#define mem_get_sle_aligned_generic(sz) mem_get_sne_aligned_generic(le, sz) +#define mem_put_be_aligned_generic(sz) mem_put_se_aligned_generic(be, sz) +#define mem_put_le_aligned_generic(sz) mem_put_ne_aligned_generic(le, sz) #endif +/* clang-format off */ #undef mem_get_be16_aligned #define mem_get_be16_aligned mem_ops_wrap_symbol(mem_get_be16_aligned) mem_get_be_aligned_generic(16) @@ -165,5 +166,6 @@ mem_put_le_aligned_generic(32) #undef swap_endian_32 #undef swap_endian_16_se #undef swap_endian_32_se +/* clang-format on */ #endif // VPX_PORTS_MEM_OPS_ALIGNED_H_ diff --git a/libs/libvpx/vpx_ports/msvc.h b/libs/libvpx/vpx_ports/msvc.h index cab77405f4..3ff71474b3 100644 --- a/libs/libvpx/vpx_ports/msvc.h +++ b/libs/libvpx/vpx_ports/msvc.h @@ -14,9 +14,9 @@ #include "./vpx_config.h" -# if _MSC_VER < 1900 // VS2015 provides snprintf -# define snprintf _snprintf -# endif // _MSC_VER < 1900 +#if _MSC_VER < 1900 // VS2015 provides snprintf +#define snprintf _snprintf +#endif // _MSC_VER < 1900 #if _MSC_VER < 1800 // VS2013 provides round #include diff --git a/libs/libvpx/vpx_ports/vpx_once.h b/libs/libvpx/vpx_ports/vpx_once.h index da04db4590..7d9fc3b406 100644 --- a/libs/libvpx/vpx_ports/vpx_once.h +++ b/libs/libvpx/vpx_ports/vpx_once.h @@ -48,102 +48,92 @@ * As a static, once_state will be zero-initialized as program start. */ static LONG once_state; -static void once(void (*func)(void)) -{ - /* Try to advance once_state from its initial value of 0 to 1. - * Only one thread can succeed in doing so. - */ - if (InterlockedCompareExchange(&once_state, 1, 0) == 0) { - /* We're the winning thread, having set once_state to 1. - * Call our function. */ - func(); - /* Now advance once_state to 2, unblocking any other threads. */ - InterlockedIncrement(&once_state); - return; - } - - /* We weren't the winning thread, but we want to block on - * the state variable so we don't return before func() - * has finished executing elsewhere. - * - * Try to advance once_state from 2 to 2, which is only possible - * after the winning thead advances it from 1 to 2. - */ - while (InterlockedCompareExchange(&once_state, 2, 2) != 2) { - /* State isn't yet 2. Try again. - * - * We are used for singleton initialization functions, - * which should complete quickly. Contention will likewise - * be rare, so it's worthwhile to use a simple but cpu- - * intensive busy-wait instead of successive backoff, - * waiting on a kernel object, or another heavier-weight scheme. - * - * We can at least yield our timeslice. - */ - Sleep(0); - } - - /* We've seen once_state advance to 2, so we know func() - * has been called. And we've left once_state as we found it, - * so other threads will have the same experience. - * - * It's safe to return now. - */ +static void once(void (*func)(void)) { + /* Try to advance once_state from its initial value of 0 to 1. + * Only one thread can succeed in doing so. + */ + if (InterlockedCompareExchange(&once_state, 1, 0) == 0) { + /* We're the winning thread, having set once_state to 1. + * Call our function. */ + func(); + /* Now advance once_state to 2, unblocking any other threads. */ + InterlockedIncrement(&once_state); return; -} + } + /* We weren't the winning thread, but we want to block on + * the state variable so we don't return before func() + * has finished executing elsewhere. + * + * Try to advance once_state from 2 to 2, which is only possible + * after the winning thead advances it from 1 to 2. + */ + while (InterlockedCompareExchange(&once_state, 2, 2) != 2) { + /* State isn't yet 2. Try again. + * + * We are used for singleton initialization functions, + * which should complete quickly. Contention will likewise + * be rare, so it's worthwhile to use a simple but cpu- + * intensive busy-wait instead of successive backoff, + * waiting on a kernel object, or another heavier-weight scheme. + * + * We can at least yield our timeslice. + */ + Sleep(0); + } + + /* We've seen once_state advance to 2, so we know func() + * has been called. And we've left once_state as we found it, + * so other threads will have the same experience. + * + * It's safe to return now. + */ + return; +} #elif CONFIG_MULTITHREAD && defined(__OS2__) #define INCL_DOS #include -static void once(void (*func)(void)) -{ - static int done; +static void once(void (*func)(void)) { + static int done; - /* If the initialization is complete, return early. */ - if(done) - return; + /* If the initialization is complete, return early. */ + if (done) return; - /* Causes all other threads in the process to block themselves - * and give up their time slice. - */ - DosEnterCritSec(); + /* Causes all other threads in the process to block themselves + * and give up their time slice. + */ + DosEnterCritSec(); - if (!done) - { - func(); - done = 1; - } + if (!done) { + func(); + done = 1; + } - /* Restores normal thread dispatching for the current process. */ - DosExitCritSec(); + /* Restores normal thread dispatching for the current process. */ + DosExitCritSec(); } - #elif CONFIG_MULTITHREAD && HAVE_PTHREAD_H #include -static void once(void (*func)(void)) -{ - static pthread_once_t lock = PTHREAD_ONCE_INIT; - pthread_once(&lock, func); +static void once(void (*func)(void)) { + static pthread_once_t lock = PTHREAD_ONCE_INIT; + pthread_once(&lock, func); } - #else /* No-op version that performs no synchronization. *_rtcd() is idempotent, * so as long as your platform provides atomic loads/stores of pointers * no synchronization is strictly necessary. */ -static void once(void (*func)(void)) -{ - static int done; +static void once(void (*func)(void)) { + static int done; - if(!done) - { - func(); - done = 1; - } + if (!done) { + func(); + done = 1; + } } #endif diff --git a/libs/libvpx/vpx_ports/vpx_timer.h b/libs/libvpx/vpx_ports/vpx_timer.h index dd98e291c2..4aae30e947 100644 --- a/libs/libvpx/vpx_ports/vpx_timer.h +++ b/libs/libvpx/vpx_ports/vpx_timer.h @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_PORTS_VPX_TIMER_H_ #define VPX_PORTS_VPX_TIMER_H_ @@ -34,30 +33,27 @@ /* timersub is not provided by msys at this time. */ #ifndef timersub -#define timersub(a, b, result) \ - do { \ - (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ +#define timersub(a, b, result) \ + do { \ + (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ - if ((result)->tv_usec < 0) { \ - --(result)->tv_sec; \ - (result)->tv_usec += 1000000; \ - } \ + if ((result)->tv_usec < 0) { \ + --(result)->tv_sec; \ + (result)->tv_usec += 1000000; \ + } \ } while (0) #endif #endif - struct vpx_usec_timer { #if defined(_WIN32) - LARGE_INTEGER begin, end; + LARGE_INTEGER begin, end; #else struct timeval begin, end; #endif }; - -static INLINE void -vpx_usec_timer_start(struct vpx_usec_timer *t) { +static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) { #if defined(_WIN32) QueryPerformanceCounter(&t->begin); #else @@ -65,9 +61,7 @@ vpx_usec_timer_start(struct vpx_usec_timer *t) { #endif } - -static INLINE void -vpx_usec_timer_mark(struct vpx_usec_timer *t) { +static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) { #if defined(_WIN32) QueryPerformanceCounter(&t->end); #else @@ -75,9 +69,7 @@ vpx_usec_timer_mark(struct vpx_usec_timer *t) { #endif } - -static INLINE int64_t -vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { +static INLINE int64_t vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { #if defined(_WIN32) LARGE_INTEGER freq, diff; @@ -104,16 +96,11 @@ struct vpx_usec_timer { void *dummy; }; -static INLINE void -vpx_usec_timer_start(struct vpx_usec_timer *t) { } +static INLINE void vpx_usec_timer_start(struct vpx_usec_timer *t) {} -static INLINE void -vpx_usec_timer_mark(struct vpx_usec_timer *t) { } +static INLINE void vpx_usec_timer_mark(struct vpx_usec_timer *t) {} -static INLINE int -vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { - return 0; -} +static INLINE int vpx_usec_timer_elapsed(struct vpx_usec_timer *t) { return 0; } #endif /* CONFIG_OS_SUPPORT */ diff --git a/libs/libvpx/vpx_ports/x86.h b/libs/libvpx/vpx_ports/x86.h index 5da346e58f..6ba02cf1fc 100644 --- a/libs/libvpx/vpx_ports/x86.h +++ b/libs/libvpx/vpx_ports/x86.h @@ -8,10 +8,14 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_PORTS_X86_H_ #define VPX_PORTS_X86_H_ #include + +#if defined(_MSC_VER) +#include /* For __cpuidex, __rdtsc */ +#endif + #include "vpx_config.h" #include "vpx/vpx_integer.h" @@ -36,72 +40,71 @@ typedef enum { VPX_CPU_VIA, VPX_CPU_LAST -} vpx_cpu_t; +} vpx_cpu_t; #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__) #if ARCH_X86_64 -#define cpuid(func, func2, ax, bx, cx, dx)\ - __asm__ __volatile__ (\ - "cpuid \n\t" \ - : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \ - : "a" (func), "c" (func2)); +#define cpuid(func, func2, ax, bx, cx, dx) \ + __asm__ __volatile__("cpuid \n\t" \ + : "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)); #else -#define cpuid(func, func2, ax, bx, cx, dx)\ - __asm__ __volatile__ (\ - "mov %%ebx, %%edi \n\t" \ - "cpuid \n\t" \ - "xchg %%edi, %%ebx \n\t" \ - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \ - : "a" (func), "c" (func2)); +#define cpuid(func, func2, ax, bx, cx, dx) \ + __asm__ __volatile__( \ + "mov %%ebx, %%edi \n\t" \ + "cpuid \n\t" \ + "xchg %%edi, %%ebx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)); #endif -#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/ +#elif defined(__SUNPRO_C) || \ + defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/ #if ARCH_X86_64 -#define cpuid(func, func2, ax, bx, cx, dx)\ - asm volatile (\ - "xchg %rsi, %rbx \n\t" \ - "cpuid \n\t" \ - "movl %ebx, %edi \n\t" \ - "xchg %rsi, %rbx \n\t" \ - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \ - : "a" (func), "c" (func2)); +#define cpuid(func, func2, ax, bx, cx, dx) \ + asm volatile( \ + "xchg %rsi, %rbx \n\t" \ + "cpuid \n\t" \ + "movl %ebx, %edi \n\t" \ + "xchg %rsi, %rbx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)); #else -#define cpuid(func, func2, ax, bx, cx, dx)\ - asm volatile (\ - "pushl %ebx \n\t" \ - "cpuid \n\t" \ - "movl %ebx, %edi \n\t" \ - "popl %ebx \n\t" \ - : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \ - : "a" (func), "c" (func2)); +#define cpuid(func, func2, ax, bx, cx, dx) \ + asm volatile( \ + "pushl %ebx \n\t" \ + "cpuid \n\t" \ + "movl %ebx, %edi \n\t" \ + "popl %ebx \n\t" \ + : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \ + : "a"(func), "c"(func2)); #endif #else /* end __SUNPRO__ */ #if ARCH_X86_64 #if defined(_MSC_VER) && _MSC_VER > 1500 -void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue); -#pragma intrinsic(__cpuidex) -#define cpuid(func, func2, a, b, c, d) do {\ - int regs[4];\ - __cpuidex(regs, func, func2); \ - a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\ - } while(0) +#define cpuid(func, func2, a, b, c, d) \ + do { \ + int regs[4]; \ + __cpuidex(regs, func, func2); \ + a = regs[0]; \ + b = regs[1]; \ + c = regs[2]; \ + d = regs[3]; \ + } while (0) #else -void __cpuid(int CPUInfo[4], int info_type); -#pragma intrinsic(__cpuid) -#define cpuid(func, func2, a, b, c, d) do {\ - int regs[4];\ - __cpuid(regs, func); \ - a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\ +#define cpuid(func, func2, a, b, c, d) \ + do { \ + int regs[4]; \ + __cpuid(regs, func); \ + a = regs[0]; \ + b = regs[1]; \ + c = regs[2]; \ + d = regs[3]; \ } while (0) #endif #else -#define cpuid(func, func2, a, b, c, d)\ - __asm mov eax, func\ - __asm mov ecx, func2\ - __asm cpuid\ - __asm mov a, eax\ - __asm mov b, ebx\ - __asm mov c, ecx\ - __asm mov d, edx +#define cpuid(func, func2, a, b, c, d) \ + __asm mov eax, func __asm mov ecx, func2 __asm cpuid __asm mov a, \ + eax __asm mov b, ebx __asm mov c, ecx __asm mov d, edx #endif #endif /* end others */ @@ -111,13 +114,13 @@ static INLINE uint64_t xgetbv(void) { const uint32_t ecx = 0; uint32_t eax, edx; // Use the raw opcode for xgetbv for compatibility with older toolchains. - __asm__ volatile ( - ".byte 0x0f, 0x01, 0xd0\n" - : "=a"(eax), "=d"(edx) : "c" (ecx)); + __asm__ volatile(".byte 0x0f, 0x01, 0xd0\n" + : "=a"(eax), "=d"(edx) + : "c"(ecx)); return ((uint64_t)edx << 32) | eax; } -#elif (defined(_M_X64) || defined(_M_IX86)) && \ - defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1 +#elif (defined(_M_X64) || defined(_M_IX86)) && defined(_MSC_FULL_VER) && \ + _MSC_FULL_VER >= 160040219 // >= VS2010 SP1 #include #define xgetbv() _xgetbv(0) #elif defined(_MSC_VER) && defined(_M_IX86) @@ -143,20 +146,19 @@ static INLINE uint64_t xgetbv(void) { #endif #endif -#define HAS_MMX 0x01 -#define HAS_SSE 0x02 -#define HAS_SSE2 0x04 -#define HAS_SSE3 0x08 -#define HAS_SSSE3 0x10 -#define HAS_SSE4_1 0x20 -#define HAS_AVX 0x40 -#define HAS_AVX2 0x80 +#define HAS_MMX 0x01 +#define HAS_SSE 0x02 +#define HAS_SSE2 0x04 +#define HAS_SSE3 0x08 +#define HAS_SSSE3 0x10 +#define HAS_SSE4_1 0x20 +#define HAS_AVX 0x40 +#define HAS_AVX2 0x80 #ifndef BIT -#define BIT(n) (1< a couple of seconds), 64-bit +// counter should be used. +// 32-bit CPU cycle counter +static INLINE unsigned int x86_readtsc(void) { #if defined(__GNUC__) && __GNUC__ unsigned int tsc; - __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):); + __asm__ __volatile__("rdtsc\n\t" : "=a"(tsc) :); return tsc; #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) unsigned int tsc; - asm volatile("rdtsc\n\t":"=a"(tsc):); + asm volatile("rdtsc\n\t" : "=a"(tsc) :); return tsc; #else #if ARCH_X86_64 return (unsigned int)__rdtsc(); #else - __asm rdtsc; + __asm rdtsc; #endif #endif } - - +// 64-bit CPU cycle counter +static INLINE uint64_t x86_readtsc64(void) { #if defined(__GNUC__) && __GNUC__ -#define x86_pause_hint()\ - __asm__ __volatile__ ("pause \n\t") + uint32_t hi, lo; + __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi)); + return ((uint64_t)hi << 32) | lo; #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) -#define x86_pause_hint()\ - asm volatile ("pause \n\t") + uint_t hi, lo; + asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi)); + return ((uint64_t)hi << 32) | lo; #else #if ARCH_X86_64 -#define x86_pause_hint()\ - _mm_pause(); + return (uint64_t)__rdtsc(); #else -#define x86_pause_hint()\ - __asm pause + __asm rdtsc; +#endif +#endif +} + +#if defined(__GNUC__) && __GNUC__ +#define x86_pause_hint() __asm__ __volatile__("pause \n\t") +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) +#define x86_pause_hint() asm volatile("pause \n\t") +#else +#if ARCH_X86_64 +#define x86_pause_hint() _mm_pause(); +#else +#define x86_pause_hint() __asm pause #endif #endif #if defined(__GNUC__) && __GNUC__ -static void -x87_set_control_word(unsigned short mode) { +static void x87_set_control_word(unsigned short mode) { __asm__ __volatile__("fldcw %0" : : "m"(*&mode)); } -static unsigned short -x87_get_control_word(void) { +static unsigned short x87_get_control_word(void) { unsigned short mode; - __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):); - return mode; + __asm__ __volatile__("fstcw %0\n\t" : "=m"(*&mode) :); + return mode; } #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) -static void -x87_set_control_word(unsigned short mode) { +static void x87_set_control_word(unsigned short mode) { asm volatile("fldcw %0" : : "m"(*&mode)); } -static unsigned short -x87_get_control_word(void) { +static unsigned short x87_get_control_word(void) { unsigned short mode; - asm volatile("fstcw %0\n\t":"=m"(*&mode):); + asm volatile("fstcw %0\n\t" : "=m"(*&mode) :); return mode; } #elif ARCH_X86_64 /* No fldcw intrinsics on Windows x64, punt to external asm */ -extern void vpx_winx64_fldcw(unsigned short mode); +extern void vpx_winx64_fldcw(unsigned short mode); extern unsigned short vpx_winx64_fstcw(void); #define x87_set_control_word vpx_winx64_fldcw #define x87_get_control_word vpx_winx64_fstcw #else -static void -x87_set_control_word(unsigned short mode) { +static void x87_set_control_word(unsigned short mode) { __asm { fldcw mode } } -static unsigned short -x87_get_control_word(void) { +static unsigned short x87_get_control_word(void) { unsigned short mode; __asm { fstcw mode } return mode; } #endif -static INLINE unsigned int -x87_set_double_precision(void) { +static INLINE unsigned int x87_set_double_precision(void) { unsigned int mode = x87_get_control_word(); - x87_set_control_word((mode&~0x300) | 0x200); + x87_set_control_word((mode & ~0x300) | 0x200); return mode; } - extern void vpx_reset_mmx_state(void); #ifdef __cplusplus diff --git a/libs/libvpx/vpx_scale/generic/gen_scalers.c b/libs/libvpx/vpx_scale/generic/gen_scalers.c index dab324edfc..b554a56e83 100644 --- a/libs/libvpx/vpx_scale/generic/gen_scalers.c +++ b/libs/libvpx/vpx_scale/generic/gen_scalers.c @@ -19,9 +19,9 @@ * * * INPUTS : const unsigned char *source : Pointer to source data. - * unsigned int source_width : Stride of source. + * unsigned int source_width : Stride of source. * unsigned char *dest : Pointer to destination data. - * unsigned int dest_width : Stride of destination (NOT USED). + * unsigned int dest_width : Stride of dest (UNUSED). * * OUTPUTS : None. * @@ -42,7 +42,7 @@ void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, unsigned char *des = dest; const unsigned char *src = source; - (void) dest_width; + (void)dest_width; for (i = 0; i < source_width; i += 5) { a = src[0]; @@ -51,7 +51,7 @@ void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, d = src[3]; e = src[4]; - des[0] = (unsigned char) a; + des[0] = (unsigned char)a; des[1] = (unsigned char)((b * 192 + c * 64 + 128) >> 8); des[2] = (unsigned char)((c * 128 + d * 128 + 128) >> 8); des[3] = (unsigned char)((d * 64 + e * 192 + 128) >> 8); @@ -61,12 +61,8 @@ void vp8_horizontal_line_5_4_scale_c(const unsigned char *source, } } - - - void vp8_vertical_band_5_4_scale_c(unsigned char *source, - unsigned int src_pitch, - unsigned char *dest, + unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width) { unsigned int i; @@ -75,33 +71,30 @@ void vp8_vertical_band_5_4_scale_c(unsigned char *source, unsigned char *src = source; for (i = 0; i < dest_width; i++) { - a = src[0 * src_pitch]; b = src[1 * src_pitch]; c = src[2 * src_pitch]; d = src[3 * src_pitch]; e = src[4 * src_pitch]; - des[0 * dest_pitch] = (unsigned char) a; + des[0 * dest_pitch] = (unsigned char)a; des[1 * dest_pitch] = (unsigned char)((b * 192 + c * 64 + 128) >> 8); des[2 * dest_pitch] = (unsigned char)((c * 128 + d * 128 + 128) >> 8); des[3 * dest_pitch] = (unsigned char)((d * 64 + e * 192 + 128) >> 8); src++; des++; - } } - /*7*************************************************************************** * * ROUTINE : vp8_horizontal_line_3_5_scale_c * * INPUTS : const unsigned char *source : Pointer to source data. - * unsigned int source_width : Stride of source. + * unsigned int source_width : Stride of source. * unsigned char *dest : Pointer to destination data. - * unsigned int dest_width : Stride of destination (NOT USED). + * unsigned int dest_width : Stride of dest (UNUSED). * * OUTPUTS : None. * @@ -123,7 +116,7 @@ void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, unsigned char *des = dest; const unsigned char *src = source; - (void) dest_width; + (void)dest_width; for (i = 0; i < source_width; i += 5) { a = src[0]; @@ -132,19 +125,17 @@ void vp8_horizontal_line_5_3_scale_c(const unsigned char *source, d = src[3]; e = src[4]; - des[0] = (unsigned char) a; - des[1] = (unsigned char)((b * 85 + c * 171 + 128) >> 8); + des[0] = (unsigned char)a; + des[1] = (unsigned char)((b * 85 + c * 171 + 128) >> 8); des[2] = (unsigned char)((d * 171 + e * 85 + 128) >> 8); src += 5; des += 3; } - } void vp8_vertical_band_5_3_scale_c(unsigned char *source, - unsigned int src_pitch, - unsigned char *dest, + unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width) { unsigned int i; @@ -153,20 +144,18 @@ void vp8_vertical_band_5_3_scale_c(unsigned char *source, unsigned char *src = source; for (i = 0; i < dest_width; i++) { - a = src[0 * src_pitch]; b = src[1 * src_pitch]; c = src[2 * src_pitch]; d = src[3 * src_pitch]; e = src[4 * src_pitch]; - des[0 * dest_pitch] = (unsigned char) a; + des[0 * dest_pitch] = (unsigned char)a; des[1 * dest_pitch] = (unsigned char)((b * 85 + c * 171 + 128) >> 8); des[2 * dest_pitch] = (unsigned char)((d * 171 + e * 85 + 128) >> 8); src++; des++; - } } @@ -175,9 +164,9 @@ void vp8_vertical_band_5_3_scale_c(unsigned char *source, * ROUTINE : vp8_horizontal_line_1_2_scale_c * * INPUTS : const unsigned char *source : Pointer to source data. - * unsigned int source_width : Stride of source. + * unsigned int source_width : Stride of source. * unsigned char *dest : Pointer to destination data. - * unsigned int dest_width : Stride of destination (NOT USED). + * unsigned int dest_width : Stride of dest (UNUSED). * * OUTPUTS : None. * @@ -198,23 +187,22 @@ void vp8_horizontal_line_2_1_scale_c(const unsigned char *source, unsigned char *des = dest; const unsigned char *src = source; - (void) dest_width; + (void)dest_width; for (i = 0; i < source_width; i += 2) { a = src[0]; - des [0] = (unsigned char)(a); + des[0] = (unsigned char)(a); src += 2; des += 1; } } void vp8_vertical_band_2_1_scale_c(unsigned char *source, - unsigned int src_pitch, - unsigned char *dest, + unsigned int src_pitch, unsigned char *dest, unsigned int dest_pitch, unsigned int dest_width) { - (void) dest_pitch; - (void) src_pitch; + (void)dest_pitch; + (void)src_pitch; memcpy(dest, source, dest_width); } @@ -227,7 +215,7 @@ void vp8_vertical_band_2_1_scale_i_c(unsigned char *source, int temp; int width = dest_width; - (void) dest_pitch; + (void)dest_pitch; for (i = 0; i < width; i++) { temp = 8; diff --git a/libs/libvpx/vpx_scale/generic/vpx_scale.c b/libs/libvpx/vpx_scale/generic/vpx_scale.c index 15e4ba87e7..20e1ff90fd 100644 --- a/libs/libvpx/vpx_scale/generic/vpx_scale.c +++ b/libs/libvpx/vpx_scale/generic/vpx_scale.c @@ -8,7 +8,6 @@ * be found in the AUTHORS file in the root of the source tree. */ - /**************************************************************************** * * Module Title : scale.c @@ -26,8 +25,8 @@ #include "vpx_scale/yv12config.h" typedef struct { - int expanded_frame_width; - int expanded_frame_height; + int expanded_frame_width; + int expanded_frame_height; int HScale; int HRatio; @@ -44,13 +43,16 @@ typedef struct { * ROUTINE : scale1d_2t1_i * * INPUTS : const unsigned char *source : Pointer to data to be scaled. - * int source_step : Number of pixels to step on in source. - * unsigned int source_scale : Scale for source (UNUSED). - * unsigned int source_length : Length of source (UNUSED). + * int source_step : Number of pixels to step on in + * source. + * unsigned int source_scale : Scale for source (UNUSED). + * unsigned int source_length : Length of source (UNUSED). * unsigned char *dest : Pointer to output data array. - * int dest_step : Number of pixels to step on in destination. - * unsigned int dest_scale : Scale for destination (UNUSED). - * unsigned int dest_length : Length of destination. + * int dest_step : Number of pixels to step on in + * destination. + * unsigned int dest_scale : Scale for destination + * (UNUSED). + * unsigned int dest_length : Length of destination. * * OUTPUTS : None. * @@ -61,29 +63,22 @@ typedef struct { * SPECIAL NOTES : None. * ****************************************************************************/ -static -void scale1d_2t1_i -( - const unsigned char *source, - int source_step, - unsigned int source_scale, - unsigned int source_length, - unsigned char *dest, - int dest_step, - unsigned int dest_scale, - unsigned int dest_length -) { +static void scale1d_2t1_i(const unsigned char *source, int source_step, + unsigned int source_scale, unsigned int source_length, + unsigned char *dest, int dest_step, + unsigned int dest_scale, unsigned int dest_length) { unsigned int i, j; unsigned int temp; int source_pitch = source_step; - (void) source_length; - (void) source_scale; - (void) dest_scale; + (void)source_length; + (void)source_scale; + (void)dest_scale; source_step *= 2; dest[0] = source[0]; - for (i = dest_step, j = source_step; i < dest_length * dest_step; i += dest_step, j += source_step) { + for (i = dest_step, j = source_step; i < dest_length * dest_step; + i += dest_step, j += source_step) { temp = 8; temp += 3 * source[j - source_pitch]; temp += 10 * source[j]; @@ -98,13 +93,16 @@ void scale1d_2t1_i * ROUTINE : scale1d_2t1_ps * * INPUTS : const unsigned char *source : Pointer to data to be scaled. - * int source_step : Number of pixels to step on in source. - * unsigned int source_scale : Scale for source (UNUSED). - * unsigned int source_length : Length of source (UNUSED). + * int source_step : Number of pixels to step on in + * source. + * unsigned int source_scale : Scale for source (UNUSED). + * unsigned int source_length : Length of source (UNUSED). * unsigned char *dest : Pointer to output data array. - * int dest_step : Number of pixels to step on in destination. - * unsigned int dest_scale : Scale for destination (UNUSED). - * unsigned int dest_length : Length of destination. + * int dest_step : Number of pixels to step on in + * destination. + * unsigned int dest_scale : Scale for destination + * (UNUSED). + * unsigned int dest_length : Length of destination. * * OUTPUTS : None. * @@ -115,23 +113,16 @@ void scale1d_2t1_i * SPECIAL NOTES : None. * ****************************************************************************/ -static -void scale1d_2t1_ps -( - const unsigned char *source, - int source_step, - unsigned int source_scale, - unsigned int source_length, - unsigned char *dest, - int dest_step, - unsigned int dest_scale, - unsigned int dest_length -) { +static void scale1d_2t1_ps(const unsigned char *source, int source_step, + unsigned int source_scale, + unsigned int source_length, unsigned char *dest, + int dest_step, unsigned int dest_scale, + unsigned int dest_length) { unsigned int i, j; - (void) source_length; - (void) source_scale; - (void) dest_scale; + (void)source_length; + (void)source_scale; + (void)dest_scale; source_step *= 2; j = 0; @@ -144,13 +135,15 @@ void scale1d_2t1_ps * ROUTINE : scale1d_c * * INPUTS : const unsigned char *source : Pointer to data to be scaled. - * int source_step : Number of pixels to step on in source. - * unsigned int source_scale : Scale for source. - * unsigned int source_length : Length of source (UNUSED). + * int source_step : Number of pixels to step on in + * source. + * unsigned int source_scale : Scale for source. + * unsigned int source_length : Length of source (UNUSED). * unsigned char *dest : Pointer to output data array. - * int dest_step : Number of pixels to step on in destination. - * unsigned int dest_scale : Scale for destination. - * unsigned int dest_length : Length of destination. + * int dest_step : Number of pixels to step on in + * destination. + * unsigned int dest_scale : Scale for destination. + * unsigned int dest_length : Length of destination. * * OUTPUTS : None. * @@ -161,18 +154,10 @@ void scale1d_2t1_ps * SPECIAL NOTES : None. * ****************************************************************************/ -static -void scale1d_c -( - const unsigned char *source, - int source_step, - unsigned int source_scale, - unsigned int source_length, - unsigned char *dest, - int dest_step, - unsigned int dest_scale, - unsigned int dest_length -) { +static void scale1d_c(const unsigned char *source, int source_step, + unsigned int source_scale, unsigned int source_length, + unsigned char *dest, int dest_step, + unsigned int dest_scale, unsigned int dest_length) { unsigned int i; unsigned int round_value = dest_scale / 2; unsigned int left_modifier = dest_scale; @@ -180,14 +165,17 @@ void scale1d_c unsigned char left_pixel = *source; unsigned char right_pixel = *(source + source_step); - (void) source_length; + (void)source_length; /* These asserts are needed if there are boundary issues... */ /*assert ( dest_scale > source_scale );*/ - /*assert ( (source_length-1) * dest_scale >= (dest_length-1) * source_scale );*/ + /*assert ( (source_length-1) * dest_scale >= (dest_length-1) * source_scale + * );*/ for (i = 0; i < dest_length * dest_step; i += dest_step) { - dest[i] = (char)((left_modifier * left_pixel + right_modifier * right_pixel + round_value) / dest_scale); + dest[i] = (char)((left_modifier * left_pixel + + right_modifier * right_pixel + round_value) / + dest_scale); right_modifier += source_scale; @@ -206,21 +194,29 @@ void scale1d_c * * ROUTINE : Scale2D * - * INPUTS : const unsigned char *source : Pointer to data to be scaled. - * int source_pitch : Stride of source image. - * unsigned int source_width : Width of input image. - * unsigned int source_height : Height of input image. - * unsigned char *dest : Pointer to output data array. - * int dest_pitch : Stride of destination image. - * unsigned int dest_width : Width of destination image. - * unsigned int dest_height : Height of destination image. - * unsigned char *temp_area : Pointer to temp work area. + * INPUTS : const unsigned char *source : Pointer to data to be + * scaled. + * int source_pitch : Stride of source image. + * unsigned int source_width : Width of input image. + * unsigned int source_height : Height of input image. + * unsigned char *dest : Pointer to output data + * array. + * int dest_pitch : Stride of destination + * image. + * unsigned int dest_width : Width of destination image. + * unsigned int dest_height : Height of destination + * image. + * unsigned char *temp_area : Pointer to temp work area. * unsigned char temp_area_height : Height of temp work area. - * unsigned int hscale : Horizontal scale factor numerator. - * unsigned int hratio : Horizontal scale factor denominator. - * unsigned int vscale : Vertical scale factor numerator. - * unsigned int vratio : Vertical scale factor denominator. - * unsigned int interlaced : Interlace flag. + * unsigned int hscale : Horizontal scale factor + * numerator. + * unsigned int hratio : Horizontal scale factor + * denominator. + * unsigned int vscale : Vertical scale factor + * numerator. + * unsigned int vratio : Vertical scale factor + * denominator. + * unsigned int interlaced : Interlace flag. * * OUTPUTS : None. * @@ -232,48 +228,38 @@ void scale1d_c * caching. * ****************************************************************************/ -static -void Scale2D -( - /*const*/ - unsigned char *source, - int source_pitch, - unsigned int source_width, - unsigned int source_height, - unsigned char *dest, - int dest_pitch, - unsigned int dest_width, - unsigned int dest_height, - unsigned char *temp_area, - unsigned char temp_area_height, - unsigned int hscale, - unsigned int hratio, - unsigned int vscale, - unsigned int vratio, - unsigned int interlaced -) { +static void Scale2D( + /*const*/ + unsigned char *source, int source_pitch, unsigned int source_width, + unsigned int source_height, unsigned char *dest, int dest_pitch, + unsigned int dest_width, unsigned int dest_height, unsigned char *temp_area, + unsigned char temp_area_height, unsigned int hscale, unsigned int hratio, + unsigned int vscale, unsigned int vratio, unsigned int interlaced) { /*unsigned*/ int i, j, k; int bands; int dest_band_height; int source_band_height; - typedef void (*Scale1D)(const unsigned char * source, int source_step, unsigned int source_scale, unsigned int source_length, - unsigned char * dest, int dest_step, unsigned int dest_scale, unsigned int dest_length); + typedef void (*Scale1D)(const unsigned char *source, int source_step, + unsigned int source_scale, unsigned int source_length, + unsigned char *dest, int dest_step, + unsigned int dest_scale, unsigned int dest_length); Scale1D Scale1Dv = scale1d_c; Scale1D Scale1Dh = scale1d_c; - void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *, unsigned int) = NULL; - void (*vert_band_scale)(unsigned char *, unsigned int, unsigned char *, unsigned int, unsigned int) = NULL; + void (*horiz_line_scale)(const unsigned char *, unsigned int, unsigned char *, + unsigned int) = NULL; + void (*vert_band_scale)(unsigned char *, unsigned int, unsigned char *, + unsigned int, unsigned int) = NULL; int ratio_scalable = 1; int interpolation = 0; - unsigned char *source_base; /* = (unsigned char *) ((source_pitch >= 0) ? source : (source + ((source_height-1) * source_pitch))); */ + unsigned char *source_base; unsigned char *line_src; - source_base = (unsigned char *)source; if (source_pitch < 0) { @@ -309,32 +295,30 @@ void Scale2D switch (vratio * 10 / vscale) { case 8: /* 4-5 Scale in vertical direction */ - vert_band_scale = vp8_vertical_band_5_4_scale; - source_band_height = 5; - dest_band_height = 4; + vert_band_scale = vp8_vertical_band_5_4_scale; + source_band_height = 5; + dest_band_height = 4; break; case 6: /* 3-5 Scale in vertical direction */ - vert_band_scale = vp8_vertical_band_5_3_scale; - source_band_height = 5; - dest_band_height = 3; + vert_band_scale = vp8_vertical_band_5_3_scale; + source_band_height = 5; + dest_band_height = 3; break; case 5: /* 1-2 Scale in vertical direction */ if (interlaced) { /* if the content is interlaced, point sampling is used */ - vert_band_scale = vp8_vertical_band_2_1_scale; + vert_band_scale = vp8_vertical_band_2_1_scale; } else { - interpolation = 1; /* if the content is progressive, interplo */ - vert_band_scale = vp8_vertical_band_2_1_scale_i; - + vert_band_scale = vp8_vertical_band_2_1_scale_i; } - source_band_height = 2; - dest_band_height = 1; + source_band_height = 2; + dest_band_height = 1; break; default: /* The ratio is not acceptable now */ @@ -349,49 +333,50 @@ void Scale2D for (k = 0; k < (int)dest_height; k++) { horiz_line_scale(source, source_width, dest, dest_width); source += source_pitch; - dest += dest_pitch; + dest += dest_pitch; } return; } if (interpolation) { - if (source < source_base) - source = source_base; + if (source < source_base) source = source_base; horiz_line_scale(source, source_width, temp_area, dest_width); } - for (k = 0; k < (int)(dest_height + dest_band_height - 1) / dest_band_height; k++) { + for (k = 0; + k < (int)(dest_height + dest_band_height - 1) / dest_band_height; + k++) { /* scale one band horizontally */ for (i = 0; i < source_band_height; i++) { /* Trap case where we could read off the base of the source buffer */ line_src = (unsigned char *)source + i * source_pitch; - if (line_src < source_base) - line_src = source_base; + if (line_src < source_base) line_src = source_base; horiz_line_scale(line_src, source_width, - temp_area + (i + 1)*dest_pitch, dest_width); + temp_area + (i + 1) * dest_pitch, dest_width); } /* Vertical scaling is in place */ - vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch, dest_width); + vert_band_scale(temp_area + dest_pitch, dest_pitch, dest, dest_pitch, + dest_width); if (interpolation) - memcpy(temp_area, temp_area + source_band_height * dest_pitch, dest_width); + memcpy(temp_area, temp_area + source_band_height * dest_pitch, + dest_width); /* Next band... */ - source += (unsigned long) source_band_height * source_pitch; - dest += (unsigned long) dest_band_height * dest_pitch; + source += (unsigned long)source_band_height * source_pitch; + dest += (unsigned long)dest_band_height * dest_pitch; } return; } - if (hscale == 2 && hratio == 1) - Scale1Dh = scale1d_2t1_ps; + if (hscale == 2 && hratio == 1) Scale1Dh = scale1d_2t1_ps; if (vscale == 2 && vratio == 1) { if (interlaced) @@ -403,24 +388,27 @@ void Scale2D if (source_height == dest_height) { /* for each band of the image */ for (k = 0; k < (int)dest_height; k++) { - Scale1Dh(source, 1, hscale, source_width + 1, dest, 1, hratio, dest_width); + Scale1Dh(source, 1, hscale, source_width + 1, dest, 1, hratio, + dest_width); source += source_pitch; - dest += dest_pitch; + dest += dest_pitch; } return; } if (dest_height > source_height) { - dest_band_height = temp_area_height - 1; + dest_band_height = temp_area_height - 1; source_band_height = dest_band_height * source_height / dest_height; } else { source_band_height = temp_area_height - 1; - dest_band_height = source_band_height * vratio / vscale; + dest_band_height = source_band_height * vratio / vscale; } - /* first row needs to be done so that we can stay one row ahead for vertical zoom */ - Scale1Dh(source, 1, hscale, source_width + 1, temp_area, 1, hratio, dest_width); + /* first row needs to be done so that we can stay one row ahead for vertical + * zoom */ + Scale1Dh(source, 1, hscale, source_width + 1, temp_area, 1, hratio, + dest_width); /* for each band of the image */ bands = (dest_height + dest_band_height - 1) / dest_band_height; @@ -428,12 +416,13 @@ void Scale2D for (k = 0; k < bands; k++) { /* scale one band horizontally */ for (i = 1; i < source_band_height + 1; i++) { - if (k * source_band_height + i < (int) source_height) { + if (k * source_band_height + i < (int)source_height) { Scale1Dh(source + i * source_pitch, 1, hscale, source_width + 1, temp_area + i * dest_pitch, 1, hratio, dest_width); } else { /* Duplicate the last row */ /* copy temp_area row 0 over from last row in the past */ - memcpy(temp_area + i * dest_pitch, temp_area + (i - 1)*dest_pitch, dest_pitch); + memcpy(temp_area + i * dest_pitch, temp_area + (i - 1) * dest_pitch, + dest_pitch); } } @@ -448,7 +437,7 @@ void Scale2D /* move to the next band */ source += source_band_height * source_pitch; - dest += dest_band_height * dest_pitch; + dest += dest_band_height * dest_pitch; } } @@ -456,15 +445,21 @@ void Scale2D * * ROUTINE : vpx_scale_frame * - * INPUTS : YV12_BUFFER_CONFIG *src : Pointer to frame to be scaled. - * YV12_BUFFER_CONFIG *dst : Pointer to buffer to hold scaled frame. - * unsigned char *temp_area : Pointer to temp work area. + * INPUTS : YV12_BUFFER_CONFIG *src : Pointer to frame to be + * scaled. + * YV12_BUFFER_CONFIG *dst : Pointer to buffer to hold + * scaled frame. + * unsigned char *temp_area : Pointer to temp work area. * unsigned char temp_area_height : Height of temp work area. - * unsigned int hscale : Horizontal scale factor numerator. - * unsigned int hratio : Horizontal scale factor denominator. - * unsigned int vscale : Vertical scale factor numerator. - * unsigned int vratio : Vertical scale factor denominator. - * unsigned int interlaced : Interlace flag. + * unsigned int hscale : Horizontal scale factor + * numerator. + * unsigned int hratio : Horizontal scale factor + * denominator. + * unsigned int vscale : Vertical scale factor + * numerator. + * unsigned int vratio : Vertical scale factor + * denominator. + * unsigned int interlaced : Interlace flag. * * OUTPUTS : None. * @@ -476,56 +471,59 @@ void Scale2D * caching. * ****************************************************************************/ -void vpx_scale_frame -( - YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - unsigned char *temp_area, - unsigned char temp_height, - unsigned int hscale, - unsigned int hratio, - unsigned int vscale, - unsigned int vratio, - unsigned int interlaced -) { +void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, + unsigned char *temp_area, unsigned char temp_height, + unsigned int hscale, unsigned int hratio, + unsigned int vscale, unsigned int vratio, + unsigned int interlaced) { int i; int dw = (hscale - 1 + src->y_width * hratio) / hscale; int dh = (vscale - 1 + src->y_height * vratio) / vscale; /* call our internal scaling routines!! */ - Scale2D((unsigned char *) src->y_buffer, src->y_stride, src->y_width, src->y_height, - (unsigned char *) dst->y_buffer, dst->y_stride, dw, dh, + Scale2D((unsigned char *)src->y_buffer, src->y_stride, src->y_width, + src->y_height, (unsigned char *)dst->y_buffer, dst->y_stride, dw, dh, temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced); if (dw < (int)dst->y_width) for (i = 0; i < dh; i++) - memset(dst->y_buffer + i * dst->y_stride + dw - 1, dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1); + memset(dst->y_buffer + i * dst->y_stride + dw - 1, + dst->y_buffer[i * dst->y_stride + dw - 2], dst->y_width - dw + 1); if (dh < (int)dst->y_height) for (i = dh - 1; i < (int)dst->y_height; i++) - memcpy(dst->y_buffer + i * dst->y_stride, dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1); + memcpy(dst->y_buffer + i * dst->y_stride, + dst->y_buffer + (dh - 2) * dst->y_stride, dst->y_width + 1); - Scale2D((unsigned char *) src->u_buffer, src->uv_stride, src->uv_width, src->uv_height, - (unsigned char *) dst->u_buffer, dst->uv_stride, dw / 2, dh / 2, - temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced); + Scale2D((unsigned char *)src->u_buffer, src->uv_stride, src->uv_width, + src->uv_height, (unsigned char *)dst->u_buffer, dst->uv_stride, + dw / 2, dh / 2, temp_area, temp_height, hscale, hratio, vscale, + vratio, interlaced); if (dw / 2 < (int)dst->uv_width) for (i = 0; i < dst->uv_height; i++) - memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1); + memset(dst->u_buffer + i * dst->uv_stride + dw / 2 - 1, + dst->u_buffer[i * dst->uv_stride + dw / 2 - 2], + dst->uv_width - dw / 2 + 1); if (dh / 2 < (int)dst->uv_height) for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++) - memcpy(dst->u_buffer + i * dst->uv_stride, dst->u_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width); + memcpy(dst->u_buffer + i * dst->uv_stride, + dst->u_buffer + (dh / 2 - 2) * dst->uv_stride, dst->uv_width); - Scale2D((unsigned char *) src->v_buffer, src->uv_stride, src->uv_width, src->uv_height, - (unsigned char *) dst->v_buffer, dst->uv_stride, dw / 2, dh / 2, - temp_area, temp_height, hscale, hratio, vscale, vratio, interlaced); + Scale2D((unsigned char *)src->v_buffer, src->uv_stride, src->uv_width, + src->uv_height, (unsigned char *)dst->v_buffer, dst->uv_stride, + dw / 2, dh / 2, temp_area, temp_height, hscale, hratio, vscale, + vratio, interlaced); if (dw / 2 < (int)dst->uv_width) for (i = 0; i < dst->uv_height; i++) - memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], dst->uv_width - dw / 2 + 1); + memset(dst->v_buffer + i * dst->uv_stride + dw / 2 - 1, + dst->v_buffer[i * dst->uv_stride + dw / 2 - 2], + dst->uv_width - dw / 2 + 1); - if (dh / 2 < (int) dst->uv_height) + if (dh / 2 < (int)dst->uv_height) for (i = dh / 2 - 1; i < (int)dst->y_height / 2; i++) - memcpy(dst->v_buffer + i * dst->uv_stride, dst->v_buffer + (dh / 2 - 2)*dst->uv_stride, dst->uv_width); + memcpy(dst->v_buffer + i * dst->uv_stride, + dst->v_buffer + (dh / 2 - 2) * dst->uv_stride, dst->uv_width); } diff --git a/libs/libvpx/vpx_scale/generic/yv12config.c b/libs/libvpx/vpx_scale/generic/yv12config.c index e8fee528e7..a674eac84b 100644 --- a/libs/libvpx/vpx_scale/generic/yv12config.c +++ b/libs/libvpx/vpx_scale/generic/yv12config.c @@ -22,10 +22,9 @@ * ****************************************************************************/ #define yv12_align_addr(addr, align) \ - (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align)) + (void *)(((size_t)(addr) + ((align)-1)) & (size_t) - (align)) -int -vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) { +int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) { if (ybf) { // If libvpx is using frame buffer callbacks then buffer_alloc_sz must // not be set. @@ -44,8 +43,8 @@ vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf) { return 0; } -int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int border) { +int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, + int height, int border) { if (ybf) { int aligned_width = (width + 15) & ~15; int aligned_height = (height + 15) & ~15; @@ -64,20 +63,18 @@ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, ybf->buffer_alloc_sz = frame_size; } - if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size) - return -1; + if (!ybf->buffer_alloc || ybf->buffer_alloc_sz < frame_size) return -1; /* Only support allocating buffers that have a border that's a multiple * of 32. The border restriction is required to get 16-byte alignment of * the start of the chroma rows without introducing an arbitrary gap * between planes, which would break the semantics of things like * vpx_img_set_rect(). */ - if (border & 0x1f) - return -3; + if (border & 0x1f) return -3; ybf->y_crop_width = width; ybf->y_crop_height = height; - ybf->y_width = aligned_width; + ybf->y_width = aligned_width; ybf->y_height = aligned_height; ybf->y_stride = y_stride; @@ -95,8 +92,10 @@ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, ybf->frame_size = frame_size; ybf->y_buffer = ybf->buffer_alloc + (border * y_stride) + border; - ybf->u_buffer = ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2; - ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + (border / 2 * uv_stride) + border / 2; + ybf->u_buffer = + ybf->buffer_alloc + yplane_size + (border / 2 * uv_stride) + border / 2; + ybf->v_buffer = ybf->buffer_alloc + yplane_size + uvplane_size + + (border / 2 * uv_stride) + border / 2; ybf->alpha_buffer = NULL; ybf->corrupted = 0; /* assume not currupted by errors */ @@ -105,8 +104,8 @@ int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, return -2; } -int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int border) { +int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, + int border) { if (ybf) { vp8_yv12_de_alloc_frame_buffer(ybf); return vp8_yv12_realloc_frame_buffer(ybf, width, height, border); @@ -114,7 +113,7 @@ int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, return -2; } -#if CONFIG_VP9 || CONFIG_VP10 +#if CONFIG_VP9 // TODO(jkoleszar): Maybe replace this with struct vpx_image int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) { @@ -134,31 +133,28 @@ int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) { return 0; } -int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, +int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int ss_x, int ss_y, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif - int border, - int byte_alignment, + int border, int byte_alignment, vpx_codec_frame_buffer_t *fb, - vpx_get_frame_buffer_cb_fn_t cb, - void *cb_priv) { + vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv) { if (ybf) { const int vp9_byte_align = (byte_alignment == 0) ? 1 : byte_alignment; const int aligned_width = (width + 7) & ~7; const int aligned_height = (height + 7) & ~7; const int y_stride = ((aligned_width + 2 * border) + 31) & ~31; - const uint64_t yplane_size = (aligned_height + 2 * border) * - (uint64_t)y_stride + byte_alignment; + const uint64_t yplane_size = + (aligned_height + 2 * border) * (uint64_t)y_stride + byte_alignment; const int uv_width = aligned_width >> ss_x; const int uv_height = aligned_height >> ss_y; const int uv_stride = y_stride >> ss_x; const int uv_border_w = border >> ss_x; const int uv_border_h = border >> ss_y; - const uint64_t uvplane_size = (uv_height + 2 * uv_border_h) * - (uint64_t)uv_stride + byte_alignment; + const uint64_t uvplane_size = + (uv_height + 2 * uv_border_h) * (uint64_t)uv_stride + byte_alignment; #if CONFIG_VP9_HIGHBITDEPTH const uint64_t frame_size = @@ -175,15 +171,12 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, assert(fb != NULL); - if (external_frame_size != (size_t)external_frame_size) - return -1; + if (external_frame_size != (size_t)external_frame_size) return -1; // Allocation to hold larger frame, or first allocation. - if (cb(cb_priv, (size_t)external_frame_size, fb) < 0) - return -1; + if (cb(cb_priv, (size_t)external_frame_size, fb) < 0) return -1; - if (fb->data == NULL || fb->size < external_frame_size) - return -1; + if (fb->data == NULL || fb->size < external_frame_size) return -1; ybf->buffer_alloc = (uint8_t *)yv12_align_addr(fb->data, 32); @@ -200,12 +193,10 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, vpx_free(ybf->buffer_alloc); ybf->buffer_alloc = NULL; - if (frame_size != (size_t)frame_size) - return -1; + if (frame_size != (size_t)frame_size) return -1; ybf->buffer_alloc = (uint8_t *)vpx_memalign(32, (size_t)frame_size); - if (!ybf->buffer_alloc) - return -1; + if (!ybf->buffer_alloc) return -1; ybf->buffer_alloc_sz = (int)frame_size; @@ -220,12 +211,11 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, * the start of the chroma rows without introducing an arbitrary gap * between planes, which would break the semantics of things like * vpx_img_set_rect(). */ - if (border & 0x1f) - return -3; + if (border & 0x1f) return -3; ybf->y_crop_width = width; ybf->y_crop_height = height; - ybf->y_width = aligned_width; + ybf->y_width = aligned_width; ybf->y_height = aligned_height; ybf->y_stride = y_stride; @@ -256,9 +246,10 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, ybf->u_buffer = (uint8_t *)yv12_align_addr( buf + yplane_size + (uv_border_h * uv_stride) + uv_border_w, vp9_byte_align); - ybf->v_buffer = (uint8_t *)yv12_align_addr( - buf + yplane_size + uvplane_size + (uv_border_h * uv_stride) + - uv_border_w, vp9_byte_align); + ybf->v_buffer = + (uint8_t *)yv12_align_addr(buf + yplane_size + uvplane_size + + (uv_border_h * uv_stride) + uv_border_w, + vp9_byte_align); ybf->corrupted = 0; /* assume not corrupted by errors */ return 0; @@ -266,14 +257,12 @@ int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, return -2; } -int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, +int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, int ss_x, int ss_y, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif - int border, - int byte_alignment) { + int border, int byte_alignment) { if (ybf) { vpx_free_frame_buffer(ybf); return vpx_realloc_frame_buffer(ybf, width, height, ss_x, ss_y, diff --git a/libs/libvpx/vpx_scale/generic/yv12extend.c b/libs/libvpx/vpx_scale/generic/yv12extend.c index 670144bc10..a6aaff95a0 100644 --- a/libs/libvpx/vpx_scale/generic/yv12extend.c +++ b/libs/libvpx/vpx_scale/generic/yv12extend.c @@ -19,9 +19,8 @@ #include "vp9/common/vp9_common.h" #endif -static void extend_plane(uint8_t *const src, int src_stride, - int width, int height, - int extend_top, int extend_left, +static void extend_plane(uint8_t *const src, int src_stride, int width, + int height, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i; const int linesize = extend_left + extend_right + width; @@ -61,9 +60,8 @@ static void extend_plane(uint8_t *const src, int src_stride, } #if CONFIG_VP9_HIGHBITDEPTH -static void extend_plane_high(uint8_t *const src8, int src_stride, - int width, int height, - int extend_top, int extend_left, +static void extend_plane_high(uint8_t *const src8, int src_stride, int width, + int height, int extend_top, int extend_left, int extend_bottom, int extend_right) { int i; const int linesize = extend_left + extend_right + width; @@ -115,49 +113,40 @@ void vp8_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) { #if CONFIG_VP9_HIGHBITDEPTH if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) { - extend_plane_high( - ybf->y_buffer, ybf->y_stride, - ybf->y_crop_width, ybf->y_crop_height, - ybf->border, ybf->border, - ybf->border + ybf->y_height - ybf->y_crop_height, - ybf->border + ybf->y_width - ybf->y_crop_width); + extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width, + ybf->y_crop_height, ybf->border, ybf->border, + ybf->border + ybf->y_height - ybf->y_crop_height, + ybf->border + ybf->y_width - ybf->y_crop_width); - extend_plane_high( - ybf->u_buffer, ybf->uv_stride, - ybf->uv_crop_width, ybf->uv_crop_height, - uv_border, uv_border, - uv_border + ybf->uv_height - ybf->uv_crop_height, - uv_border + ybf->uv_width - ybf->uv_crop_width); + extend_plane_high(ybf->u_buffer, ybf->uv_stride, ybf->uv_crop_width, + ybf->uv_crop_height, uv_border, uv_border, + uv_border + ybf->uv_height - ybf->uv_crop_height, + uv_border + ybf->uv_width - ybf->uv_crop_width); - extend_plane_high( - ybf->v_buffer, ybf->uv_stride, - ybf->uv_crop_width, ybf->uv_crop_height, - uv_border, uv_border, - uv_border + ybf->uv_height - ybf->uv_crop_height, - uv_border + ybf->uv_width - ybf->uv_crop_width); + extend_plane_high(ybf->v_buffer, ybf->uv_stride, ybf->uv_crop_width, + ybf->uv_crop_height, uv_border, uv_border, + uv_border + ybf->uv_height - ybf->uv_crop_height, + uv_border + ybf->uv_width - ybf->uv_crop_width); return; } #endif - extend_plane(ybf->y_buffer, ybf->y_stride, - ybf->y_crop_width, ybf->y_crop_height, - ybf->border, ybf->border, + extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width, + ybf->y_crop_height, ybf->border, ybf->border, ybf->border + ybf->y_height - ybf->y_crop_height, ybf->border + ybf->y_width - ybf->y_crop_width); - extend_plane(ybf->u_buffer, ybf->uv_stride, - ybf->uv_crop_width, ybf->uv_crop_height, - uv_border, uv_border, + extend_plane(ybf->u_buffer, ybf->uv_stride, ybf->uv_crop_width, + ybf->uv_crop_height, uv_border, uv_border, uv_border + ybf->uv_height - ybf->uv_crop_height, uv_border + ybf->uv_width - ybf->uv_crop_width); - extend_plane(ybf->v_buffer, ybf->uv_stride, - ybf->uv_crop_width, ybf->uv_crop_height, - uv_border, uv_border, + extend_plane(ybf->v_buffer, ybf->uv_stride, ybf->uv_crop_width, + ybf->uv_crop_height, uv_border, uv_border, uv_border + ybf->uv_height - ybf->uv_crop_height, uv_border + ybf->uv_width - ybf->uv_crop_width); } -#if CONFIG_VP9 || CONFIG_VP10 +#if CONFIG_VP9 static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) { const int c_w = ybf->uv_crop_width; const int c_h = ybf->uv_crop_height; @@ -175,29 +164,25 @@ static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) { #if CONFIG_VP9_HIGHBITDEPTH if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) { - extend_plane_high(ybf->y_buffer, ybf->y_stride, - ybf->y_crop_width, ybf->y_crop_height, - ext_size, ext_size, + extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width, + ybf->y_crop_height, ext_size, ext_size, ext_size + ybf->y_height - ybf->y_crop_height, ext_size + ybf->y_width - ybf->y_crop_width); - extend_plane_high(ybf->u_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); - extend_plane_high(ybf->v_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); + extend_plane_high(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, + c_er); + extend_plane_high(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, + c_er); return; } #endif - extend_plane(ybf->y_buffer, ybf->y_stride, - ybf->y_crop_width, ybf->y_crop_height, - ext_size, ext_size, + extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width, + ybf->y_crop_height, ext_size, ext_size, ext_size + ybf->y_height - ybf->y_crop_height, ext_size + ybf->y_width - ybf->y_crop_width); - extend_plane(ybf->u_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); + extend_plane(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er); - extend_plane(ybf->v_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); + extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er); } void vpx_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) { @@ -205,19 +190,20 @@ void vpx_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) { } void vpx_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) { - const int inner_bw = (ybf->border > VP9INNERBORDERINPIXELS) ? - VP9INNERBORDERINPIXELS : ybf->border; + const int inner_bw = (ybf->border > VP9INNERBORDERINPIXELS) + ? VP9INNERBORDERINPIXELS + : ybf->border; extend_frame(ybf, inner_bw); } #if CONFIG_VP9_HIGHBITDEPTH -void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) { +static void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) { uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); uint16_t *src = CONVERT_TO_SHORTPTR(src8); memcpy(dst, src, num * sizeof(uint16_t)); } #endif // CONFIG_VP9_HIGHBITDEPTH -#endif // CONFIG_VP9 || CONFIG_VP10 +#endif // CONFIG_VP9 // Copies the source image into the destination image and updates the // destination's UMV borders. diff --git a/libs/libvpx/vpx_scale/mips/dspr2/yv12extend_dspr2.c b/libs/libvpx/vpx_scale/mips/dspr2/yv12extend_dspr2.c index aab478539a..d3d1b07f45 100644 --- a/libs/libvpx/vpx_scale/mips/dspr2/yv12extend_dspr2.c +++ b/libs/libvpx/vpx_scale/mips/dspr2/yv12extend_dspr2.c @@ -16,69 +16,65 @@ #include "vpx_scale/vpx_scale.h" #if HAVE_DSPR2 -static void extend_plane(uint8_t *const src, int src_stride, - int width, int height, - int extend_top, int extend_left, +static void extend_plane(uint8_t *const src, int src_stride, int width, + int height, int extend_top, int extend_left, int extend_bottom, int extend_right) { - int i, j; - uint8_t *left_src, *right_src; - uint8_t *left_dst_start, *right_dst_start; - uint8_t *left_dst, *right_dst; - uint8_t *top_src, *bot_src; - uint8_t *top_dst, *bot_dst; - uint32_t left_pix; - uint32_t right_pix; - uint32_t linesize; + int i, j; + uint8_t *left_src, *right_src; + uint8_t *left_dst_start, *right_dst_start; + uint8_t *left_dst, *right_dst; + uint8_t *top_src, *bot_src; + uint8_t *top_dst, *bot_dst; + uint32_t left_pix; + uint32_t right_pix; + uint32_t linesize; /* copy the left and right most columns out */ - left_src = src; + left_src = src; right_src = src + width - 1; left_dst_start = src - extend_left; right_dst_start = src + width; - for (i = height; i--; ) { - left_dst = left_dst_start; + for (i = height; i--;) { + left_dst = left_dst_start; right_dst = right_dst_start; - __asm__ __volatile__ ( + __asm__ __volatile__( "lb %[left_pix], 0(%[left_src]) \n\t" "lb %[right_pix], 0(%[right_src]) \n\t" "replv.qb %[left_pix], %[left_pix] \n\t" "replv.qb %[right_pix], %[right_pix] \n\t" - : [left_pix] "=&r" (left_pix), [right_pix] "=&r" (right_pix) - : [left_src] "r" (left_src), [right_src] "r" (right_src) - ); + : [left_pix] "=&r"(left_pix), [right_pix] "=&r"(right_pix) + : [left_src] "r"(left_src), [right_src] "r"(right_src)); - for (j = extend_left/4; j--; ) { - __asm__ __volatile__ ( - "sw %[left_pix], 0(%[left_dst]) \n\t" - "sw %[right_pix], 0(%[right_dst]) \n\t" + for (j = extend_left / 4; j--;) { + __asm__ __volatile__( + "sw %[left_pix], 0(%[left_dst]) \n\t" + "sw %[right_pix], 0(%[right_dst]) \n\t" - : - : [left_dst] "r" (left_dst), [left_pix] "r" (left_pix), - [right_dst] "r" (right_dst), [right_pix] "r" (right_pix) - ); + : + : [left_dst] "r"(left_dst), [left_pix] "r"(left_pix), + [right_dst] "r"(right_dst), [right_pix] "r"(right_pix)); left_dst += 4; right_dst += 4; } - for (j = extend_left%4; j--; ) { - __asm__ __volatile__ ( - "sb %[left_pix], 0(%[left_dst]) \n\t" - "sb %[right_pix], 0(%[right_dst]) \n\t" + for (j = extend_left % 4; j--;) { + __asm__ __volatile__( + "sb %[left_pix], 0(%[left_dst]) \n\t" + "sb %[right_pix], 0(%[right_dst]) \n\t" - : - : [left_dst] "r" (left_dst), [left_pix] "r" (left_pix), - [right_dst] "r" (right_dst), [right_pix] "r" (right_pix) - ); + : + : [left_dst] "r"(left_dst), [left_pix] "r"(left_pix), + [right_dst] "r"(right_dst), [right_pix] "r"(right_pix)); left_dst += 1; right_dst += 1; } - left_src += src_stride; + left_src += src_stride; right_src += src_stride; left_dst_start += src_stride; right_dst_start += src_stride; @@ -90,7 +86,7 @@ static void extend_plane(uint8_t *const src, int src_stride, top_src = src - extend_left; bot_src = src + src_stride * (height - 1) - extend_left; top_dst = src + src_stride * (-extend_top) - extend_left; - bot_dst = src + src_stride * (height) - extend_left; + bot_dst = src + src_stride * (height)-extend_left; linesize = extend_left + extend_right + width; for (i = 0; i < extend_top; i++) { @@ -119,17 +115,14 @@ static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) { assert(ybf->y_height - ybf->y_crop_height >= 0); assert(ybf->y_width - ybf->y_crop_width >= 0); - extend_plane(ybf->y_buffer, ybf->y_stride, - ybf->y_crop_width, ybf->y_crop_height, - ext_size, ext_size, + extend_plane(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width, + ybf->y_crop_height, ext_size, ext_size, ext_size + ybf->y_height - ybf->y_crop_height, ext_size + ybf->y_width - ybf->y_crop_width); - extend_plane(ybf->u_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); + extend_plane(ybf->u_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er); - extend_plane(ybf->v_buffer, ybf->uv_stride, - c_w, c_h, c_et, c_el, c_eb, c_er); + extend_plane(ybf->v_buffer, ybf->uv_stride, c_w, c_h, c_et, c_el, c_eb, c_er); } void vpx_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) { @@ -137,8 +130,9 @@ void vpx_extend_frame_borders_dspr2(YV12_BUFFER_CONFIG *ybf) { } void vpx_extend_frame_inner_borders_dspr2(YV12_BUFFER_CONFIG *ybf) { - const int inner_bw = (ybf->border > VP9INNERBORDERINPIXELS) ? - VP9INNERBORDERINPIXELS : ybf->border; + const int inner_bw = (ybf->border > VP9INNERBORDERINPIXELS) + ? VP9INNERBORDERINPIXELS + : ybf->border; extend_frame(ybf, inner_bw); } #endif diff --git a/libs/libvpx/vpx_scale/vpx_scale.h b/libs/libvpx/vpx_scale/vpx_scale.h index 43fcf9d66e..478a483461 100644 --- a/libs/libvpx/vpx_scale/vpx_scale.h +++ b/libs/libvpx/vpx_scale/vpx_scale.h @@ -8,20 +8,15 @@ * be found in the AUTHORS file in the root of the source tree. */ - #ifndef VPX_SCALE_VPX_SCALE_H_ #define VPX_SCALE_VPX_SCALE_H_ #include "vpx_scale/yv12config.h" -extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src, - YV12_BUFFER_CONFIG *dst, - unsigned char *temp_area, - unsigned char temp_height, - unsigned int hscale, - unsigned int hratio, - unsigned int vscale, - unsigned int vratio, +extern void vpx_scale_frame(YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *dst, + unsigned char *temp_area, unsigned char temp_height, + unsigned int hscale, unsigned int hratio, + unsigned int vscale, unsigned int vratio, unsigned int interlaced); #endif // VPX_SCALE_VPX_SCALE_H_ diff --git a/libs/libvpx/vpx_scale/vpx_scale_rtcd.c b/libs/libvpx/vpx_scale/vpx_scale_rtcd.c index bea603fd10..dc4d9593a8 100644 --- a/libs/libvpx/vpx_scale/vpx_scale_rtcd.c +++ b/libs/libvpx/vpx_scale/vpx_scale_rtcd.c @@ -12,7 +12,4 @@ #include "./vpx_scale_rtcd.h" #include "vpx_ports/vpx_once.h" -void vpx_scale_rtcd() -{ - once(setup_rtcd_internal); -} +void vpx_scale_rtcd() { once(setup_rtcd_internal); } diff --git a/libs/libvpx/vpx_scale/vpx_scale_rtcd.pl b/libs/libvpx/vpx_scale/vpx_scale_rtcd.pl index 56b952ba35..44b115c7eb 100644 --- a/libs/libvpx/vpx_scale/vpx_scale_rtcd.pl +++ b/libs/libvpx/vpx_scale/vpx_scale_rtcd.pl @@ -22,7 +22,7 @@ add_proto qw/void vp8_yv12_copy_frame/, "const struct yv12_buffer_config *src_yb add_proto qw/void vpx_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"; -if ((vpx_config("CONFIG_VP9") eq "yes") || (vpx_config("CONFIG_VP10") eq "yes")) { +if (vpx_config("CONFIG_VP9") eq "yes") { add_proto qw/void vpx_extend_frame_borders/, "struct yv12_buffer_config *ybf"; specialize qw/vpx_extend_frame_borders dspr2/; diff --git a/libs/libvpx/vpx_scale/yv12config.h b/libs/libvpx/vpx_scale/yv12config.h index 37b255d4d3..b9b3362144 100644 --- a/libs/libvpx/vpx_scale/yv12config.h +++ b/libs/libvpx/vpx_scale/yv12config.h @@ -20,28 +20,28 @@ extern "C" { #include "vpx/vpx_frame_buffer.h" #include "vpx/vpx_integer.h" -#define VP8BORDERINPIXELS 32 -#define VP9INNERBORDERINPIXELS 96 -#define VP9_INTERP_EXTEND 4 -#define VP9_ENC_BORDER_IN_PIXELS 160 -#define VP9_DEC_BORDER_IN_PIXELS 32 +#define VP8BORDERINPIXELS 32 +#define VP9INNERBORDERINPIXELS 96 +#define VP9_INTERP_EXTEND 4 +#define VP9_ENC_BORDER_IN_PIXELS 160 +#define VP9_DEC_BORDER_IN_PIXELS 32 typedef struct yv12_buffer_config { - int y_width; - int y_height; - int y_crop_width; - int y_crop_height; - int y_stride; + int y_width; + int y_height; + int y_crop_width; + int y_crop_height; + int y_stride; - int uv_width; - int uv_height; - int uv_crop_width; - int uv_crop_height; - int uv_stride; + int uv_width; + int uv_height; + int uv_crop_width; + int uv_crop_height; + int uv_stride; - int alpha_width; - int alpha_height; - int alpha_stride; + int alpha_width; + int alpha_height; + int alpha_stride; uint8_t *y_buffer; uint8_t *u_buffer; @@ -66,14 +66,14 @@ typedef struct yv12_buffer_config { #define YV12_FLAG_HIGHBITDEPTH 8 -int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int border); -int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int border); +int vp8_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, + int border); +int vp8_yv12_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, + int height, int border); int vp8_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf); -int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int ss_x, int ss_y, +int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, + int ss_x, int ss_y, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif @@ -86,16 +86,14 @@ int vpx_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, // to decode the current frame. If cb is NULL, libvpx will allocate memory // internally to decode the current frame. Returns 0 on success. Returns < 0 // on failure. -int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, - int width, int height, int ss_x, int ss_y, +int vpx_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height, + int ss_x, int ss_y, #if CONFIG_VP9_HIGHBITDEPTH int use_highbitdepth, #endif - int border, - int byte_alignment, + int border, int byte_alignment, vpx_codec_frame_buffer_t *fb, - vpx_get_frame_buffer_cb_fn_t cb, - void *cb_priv); + vpx_get_frame_buffer_cb_fn_t cb, void *cb_priv); int vpx_free_frame_buffer(YV12_BUFFER_CONFIG *ybf); #ifdef __cplusplus diff --git a/libs/libvpx/vpx_util/endian_inl.h b/libs/libvpx/vpx_util/endian_inl.h index 37bdce1ccd..dc38774095 100644 --- a/libs/libvpx/vpx_util/endian_inl.h +++ b/libs/libvpx/vpx_util/endian_inl.h @@ -17,21 +17,20 @@ #include "vpx/vpx_integer.h" #if defined(__GNUC__) -# define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__) -# define LOCAL_GCC_PREREQ(maj, min) \ - (LOCAL_GCC_VERSION >= (((maj) << 8) | (min))) +#define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__) +#define LOCAL_GCC_PREREQ(maj, min) (LOCAL_GCC_VERSION >= (((maj) << 8) | (min))) #else -# define LOCAL_GCC_VERSION 0 -# define LOCAL_GCC_PREREQ(maj, min) 0 +#define LOCAL_GCC_VERSION 0 +#define LOCAL_GCC_PREREQ(maj, min) 0 #endif // handle clang compatibility #ifndef __has_builtin -# define __has_builtin(x) 0 +#define __has_builtin(x) 0 #endif // some endian fix (e.g.: mips-gcc doesn't define __BIG_ENDIAN__) -#if !defined(WORDS_BIGENDIAN) && \ +#if !defined(WORDS_BIGENDIAN) && \ (defined(__BIG_ENDIAN__) || defined(_M_PPC) || \ (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) #define WORDS_BIGENDIAN @@ -80,12 +79,11 @@ static INLINE uint16_t BSwap16(uint16_t x) { static INLINE uint32_t BSwap32(uint32_t x) { #if defined(VPX_USE_MIPS32_R2) uint32_t ret; - __asm__ volatile ( - "wsbh %[ret], %[x] \n\t" - "rotr %[ret], %[ret], 16 \n\t" - : [ret]"=r"(ret) - : [x]"r"(x) - ); + __asm__ volatile( + "wsbh %[ret], %[x] \n\t" + "rotr %[ret], %[ret], 16 \n\t" + : [ret] "=r"(ret) + : [x] "r"(x)); return ret; #elif defined(HAVE_BUILTIN_BSWAP32) return __builtin_bswap32(x); @@ -109,10 +107,10 @@ static INLINE uint64_t BSwap64(uint64_t x) { return swapped_bytes; #elif defined(_MSC_VER) return (uint64_t)_byteswap_uint64(x); -#else // generic code for swapping 64-bit values (suggested by bdb@) +#else // generic code for swapping 64-bit values (suggested by bdb@) x = ((x & 0xffffffff00000000ull) >> 32) | ((x & 0x00000000ffffffffull) << 32); x = ((x & 0xffff0000ffff0000ull) >> 16) | ((x & 0x0000ffff0000ffffull) << 16); - x = ((x & 0xff00ff00ff00ff00ull) >> 8) | ((x & 0x00ff00ff00ff00ffull) << 8); + x = ((x & 0xff00ff00ff00ff00ull) >> 8) | ((x & 0x00ff00ff00ff00ffull) << 8); return x; #endif // HAVE_BUILTIN_BSWAP64 } diff --git a/libs/libvpx/vpx_util/vpx_thread.c b/libs/libvpx/vpx_util/vpx_thread.c index 0bb0125bd4..04c5fb6f26 100644 --- a/libs/libvpx/vpx_util/vpx_thread.c +++ b/libs/libvpx/vpx_util/vpx_thread.c @@ -10,11 +10,10 @@ // Multi-threaded worker // // Original source: -// http://git.chromium.org/webm/libwebp.git -// 100644 blob 264210ba2807e4da47eb5d18c04cf869d89b9784 src/utils/thread.c +// https://chromium.googlesource.com/webm/libwebp #include -#include // for memset() +#include // for memset() #include "./vpx_thread.h" #include "vpx_mem/vpx_mem.h" @@ -22,8 +21,8 @@ struct VPxWorkerImpl { pthread_mutex_t mutex_; - pthread_cond_t condition_; - pthread_t thread_; + pthread_cond_t condition_; + pthread_t thread_; }; //------------------------------------------------------------------------------ @@ -31,29 +30,28 @@ struct VPxWorkerImpl { static void execute(VPxWorker *const worker); // Forward declaration. static THREADFN thread_loop(void *ptr) { - VPxWorker *const worker = (VPxWorker*)ptr; + VPxWorker *const worker = (VPxWorker *)ptr; int done = 0; while (!done) { pthread_mutex_lock(&worker->impl_->mutex_); - while (worker->status_ == OK) { // wait in idling mode + while (worker->status_ == OK) { // wait in idling mode pthread_cond_wait(&worker->impl_->condition_, &worker->impl_->mutex_); } if (worker->status_ == WORK) { execute(worker); worker->status_ = OK; - } else if (worker->status_ == NOT_OK) { // finish the worker + } else if (worker->status_ == NOT_OK) { // finish the worker done = 1; } // signal to the main thread that we're done (for sync()) pthread_cond_signal(&worker->impl_->condition_); pthread_mutex_unlock(&worker->impl_->mutex_); } - return THREAD_RETURN(NULL); // Thread is finished + return THREAD_RETURN(NULL); // Thread is finished } // main thread state control -static void change_state(VPxWorker *const worker, - VPxWorkerStatus new_status) { +static void change_state(VPxWorker *const worker, VPxWorkerStatus new_status) { // No-op when attempting to change state on a thread that didn't come up. // Checking status_ without acquiring the lock first would result in a data // race. @@ -96,7 +94,7 @@ static int reset(VPxWorker *const worker) { worker->had_error = 0; if (worker->status_ < OK) { #if CONFIG_MULTITHREAD - worker->impl_ = (VPxWorkerImpl*)vpx_calloc(1, sizeof(*worker->impl_)); + worker->impl_ = (VPxWorkerImpl *)vpx_calloc(1, sizeof(*worker->impl_)); if (worker->impl_ == NULL) { return 0; } @@ -114,7 +112,7 @@ static int reset(VPxWorker *const worker) { if (!ok) { pthread_mutex_destroy(&worker->impl_->mutex_); pthread_cond_destroy(&worker->impl_->condition_); - Error: + Error: vpx_free(worker->impl_); worker->impl_ = NULL; return 0; @@ -162,15 +160,14 @@ static void end(VPxWorker *const worker) { //------------------------------------------------------------------------------ -static VPxWorkerInterface g_worker_interface = { - init, reset, sync, launch, execute, end -}; +static VPxWorkerInterface g_worker_interface = { init, reset, sync, + launch, execute, end }; -int vpx_set_worker_interface(const VPxWorkerInterface* const winterface) { - if (winterface == NULL || - winterface->init == NULL || winterface->reset == NULL || - winterface->sync == NULL || winterface->launch == NULL || - winterface->execute == NULL || winterface->end == NULL) { +int vpx_set_worker_interface(const VPxWorkerInterface *const winterface) { + if (winterface == NULL || winterface->init == NULL || + winterface->reset == NULL || winterface->sync == NULL || + winterface->launch == NULL || winterface->execute == NULL || + winterface->end == NULL) { return 0; } g_worker_interface = *winterface; diff --git a/libs/libvpx/vpx_util/vpx_thread.h b/libs/libvpx/vpx_util/vpx_thread.h index de63c4da00..53a5f4966a 100644 --- a/libs/libvpx/vpx_util/vpx_thread.h +++ b/libs/libvpx/vpx_util/vpx_thread.h @@ -10,8 +10,7 @@ // Multi-threaded worker // // Original source: -// http://git.chromium.org/webm/libwebp.git -// 100644 blob 7bd451b124ae3b81596abfbcc823e3cb129d3a38 src/utils/thread.h +// https://chromium.googlesource.com/webm/libwebp #ifndef VPX_THREAD_H_ #define VPX_THREAD_H_ @@ -29,40 +28,70 @@ extern "C" { #if CONFIG_MULTITHREAD #if defined(_WIN32) && !HAVE_PTHREAD_H -#include // NOLINT +#include // NOLINT #include // NOLINT #include // NOLINT typedef HANDLE pthread_t; typedef CRITICAL_SECTION pthread_mutex_t; + +#if _WIN32_WINNT >= 0x0600 // Windows Vista / Server 2008 or greater +#define USE_WINDOWS_CONDITION_VARIABLE +typedef CONDITION_VARIABLE pthread_cond_t; +#else typedef struct { HANDLE waiting_sem_; HANDLE received_sem_; HANDLE signal_event_; } pthread_cond_t; +#endif // _WIN32_WINNT >= 0x600 + +#ifndef WINAPI_FAMILY_PARTITION +#define WINAPI_PARTITION_DESKTOP 1 +#define WINAPI_FAMILY_PARTITION(x) x +#endif + +#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +#define USE_CREATE_THREAD +#endif //------------------------------------------------------------------------------ // simplistic pthread emulation layer // _beginthreadex requires __stdcall +#if defined(__GNUC__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)) +#define THREADFN __attribute__((force_align_arg_pointer)) unsigned int __stdcall +#else #define THREADFN unsigned int __stdcall +#endif #define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val) -static INLINE int pthread_create(pthread_t* const thread, const void* attr, - unsigned int (__stdcall *start)(void*), - void* arg) { +#if _WIN32_WINNT >= 0x0501 // Windows XP or greater +#define WaitForSingleObject(obj, timeout) \ + WaitForSingleObjectEx(obj, timeout, FALSE /*bAlertable*/) +#endif + +static INLINE int pthread_create(pthread_t *const thread, const void *attr, + unsigned int(__stdcall *start)(void *), + void *arg) { (void)attr; - *thread = (pthread_t)_beginthreadex(NULL, /* void *security */ - 0, /* unsigned stack_size */ - start, - arg, - 0, /* unsigned initflag */ - NULL); /* unsigned *thrdaddr */ +#ifdef USE_CREATE_THREAD + *thread = CreateThread(NULL, /* lpThreadAttributes */ + 0, /* dwStackSize */ + start, arg, 0, /* dwStackSize */ + NULL); /* lpThreadId */ +#else + *thread = (pthread_t)_beginthreadex(NULL, /* void *security */ + 0, /* unsigned stack_size */ + start, arg, 0, /* unsigned initflag */ + NULL); /* unsigned *thrdaddr */ +#endif if (*thread == NULL) return 1; SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL); return 0; } -static INLINE int pthread_join(pthread_t thread, void** value_ptr) { +static INLINE int pthread_join(pthread_t thread, void **value_ptr) { (void)value_ptr; return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 || CloseHandle(thread) == 0); @@ -70,9 +99,13 @@ static INLINE int pthread_join(pthread_t thread, void** value_ptr) { // Mutex static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex, - void* mutexattr) { + void *mutexattr) { (void)mutexattr; +#if _WIN32_WINNT >= 0x0600 // Windows Vista / Server 2008 or greater + InitializeCriticalSectionEx(mutex, 0 /*dwSpinCount*/, 0 /*Flags*/); +#else InitializeCriticalSection(mutex); +#endif return 0; } @@ -98,29 +131,39 @@ static INLINE int pthread_mutex_destroy(pthread_mutex_t *const mutex) { // Condition static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) { int ok = 1; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + (void)condition; +#else ok &= (CloseHandle(condition->waiting_sem_) != 0); ok &= (CloseHandle(condition->received_sem_) != 0); ok &= (CloseHandle(condition->signal_event_) != 0); +#endif return !ok; } static INLINE int pthread_cond_init(pthread_cond_t *const condition, - void* cond_attr) { + void *cond_attr) { (void)cond_attr; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + InitializeConditionVariable(condition); +#else condition->waiting_sem_ = CreateSemaphore(NULL, 0, MAX_DECODE_THREADS, NULL); condition->received_sem_ = CreateSemaphore(NULL, 0, MAX_DECODE_THREADS, NULL); condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL); - if (condition->waiting_sem_ == NULL || - condition->received_sem_ == NULL || + if (condition->waiting_sem_ == NULL || condition->received_sem_ == NULL || condition->signal_event_ == NULL) { pthread_cond_destroy(condition); return 1; } +#endif return 0; } static INLINE int pthread_cond_signal(pthread_cond_t *const condition) { int ok = 1; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + WakeConditionVariable(condition); +#else if (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) { // a thread is waiting in pthread_cond_wait: allow it to be notified ok = SetEvent(condition->signal_event_); @@ -129,42 +172,191 @@ static INLINE int pthread_cond_signal(pthread_cond_t *const condition) { ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) != WAIT_OBJECT_0); } +#endif return !ok; } static INLINE int pthread_cond_wait(pthread_cond_t *const condition, pthread_mutex_t *const mutex) { int ok; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + ok = SleepConditionVariableCS(condition, mutex, INFINITE); +#else // note that there is a consumer available so the signal isn't dropped in // pthread_cond_signal - if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) - return 1; + if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) return 1; // now unlock the mutex so pthread_cond_signal may be issued pthread_mutex_unlock(mutex); ok = (WaitForSingleObject(condition->signal_event_, INFINITE) == WAIT_OBJECT_0); ok &= ReleaseSemaphore(condition->received_sem_, 1, NULL); pthread_mutex_lock(mutex); +#endif return !ok; } -#else // _WIN32 -#include // NOLINT -# define THREADFN void* -# define THREAD_RETURN(val) val +#elif defined(__OS2__) +#define INCL_DOS +#include // NOLINT + +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#define pthread_t TID +#define pthread_mutex_t HMTX + +typedef struct { + HEV event_sem_; + HEV ack_sem_; + volatile unsigned wait_count_; +} pthread_cond_t; + +//------------------------------------------------------------------------------ +// simplistic pthread emulation layer + +#define THREADFN void * +#define THREAD_RETURN(val) (val) + +typedef struct { + void *(*start_)(void *); + void *arg_; +} thread_arg; + +static void thread_start(void *arg) { + thread_arg targ = *(thread_arg *)arg; + free(arg); + + targ.start_(targ.arg_); +} + +static INLINE int pthread_create(pthread_t *const thread, const void *attr, + void *(*start)(void *), void *arg) { + int tid; + thread_arg *targ = (thread_arg *)malloc(sizeof(*targ)); + if (targ == NULL) return 1; + + (void)attr; + + targ->start_ = start; + targ->arg_ = arg; + tid = (pthread_t)_beginthread(thread_start, NULL, 1024 * 1024, targ); + if (tid == -1) { + free(targ); + return 1; + } + + *thread = tid; + return 0; +} + +static INLINE int pthread_join(pthread_t thread, void **value_ptr) { + (void)value_ptr; + return DosWaitThread(&thread, DCWW_WAIT) != 0; +} + +// Mutex +static INLINE int pthread_mutex_init(pthread_mutex_t *const mutex, + void *mutexattr) { + (void)mutexattr; + return DosCreateMutexSem(NULL, mutex, 0, FALSE) != 0; +} + +static INLINE int pthread_mutex_trylock(pthread_mutex_t *const mutex) { + return DosRequestMutexSem(*mutex, SEM_IMMEDIATE_RETURN) == 0 ? 0 : EBUSY; +} + +static INLINE int pthread_mutex_lock(pthread_mutex_t *const mutex) { + return DosRequestMutexSem(*mutex, SEM_INDEFINITE_WAIT) != 0; +} + +static INLINE int pthread_mutex_unlock(pthread_mutex_t *const mutex) { + return DosReleaseMutexSem(*mutex) != 0; +} + +static INLINE int pthread_mutex_destroy(pthread_mutex_t *const mutex) { + return DosCloseMutexSem(*mutex) != 0; +} + +// Condition +static INLINE int pthread_cond_destroy(pthread_cond_t *const condition) { + int ok = 1; + ok &= DosCloseEventSem(condition->event_sem_) == 0; + ok &= DosCloseEventSem(condition->ack_sem_) == 0; + return !ok; +} + +static INLINE int pthread_cond_init(pthread_cond_t *const condition, + void *cond_attr) { + int ok = 1; + (void)cond_attr; + + ok &= + DosCreateEventSem(NULL, &condition->event_sem_, DCE_POSTONE, FALSE) == 0; + ok &= DosCreateEventSem(NULL, &condition->ack_sem_, DCE_POSTONE, FALSE) == 0; + if (!ok) { + pthread_cond_destroy(condition); + return 1; + } + condition->wait_count_ = 0; + return 0; +} + +static INLINE int pthread_cond_signal(pthread_cond_t *const condition) { + int ok = 1; + + if (!__atomic_cmpxchg32(&condition->wait_count_, 0, 0)) { + ok &= DosPostEventSem(condition->event_sem_) == 0; + ok &= DosWaitEventSem(condition->ack_sem_, SEM_INDEFINITE_WAIT) == 0; + } + + return !ok; +} + +static INLINE int pthread_cond_broadcast(pthread_cond_t *const condition) { + int ok = 1; + + while (!__atomic_cmpxchg32(&condition->wait_count_, 0, 0)) + ok &= pthread_cond_signal(condition) == 0; + + return !ok; +} + +static INLINE int pthread_cond_wait(pthread_cond_t *const condition, + pthread_mutex_t *const mutex) { + int ok = 1; + + __atomic_increment(&condition->wait_count_); + + ok &= pthread_mutex_unlock(mutex) == 0; + + ok &= DosWaitEventSem(condition->event_sem_, SEM_INDEFINITE_WAIT) == 0; + + __atomic_decrement(&condition->wait_count_); + + ok &= DosPostEventSem(condition->ack_sem_) == 0; + + pthread_mutex_lock(mutex); + + return !ok; +} +#else // _WIN32 +#include // NOLINT +#define THREADFN void * +#define THREAD_RETURN(val) val #endif #endif // CONFIG_MULTITHREAD // State of the worker thread object typedef enum { - NOT_OK = 0, // object is unusable - OK, // ready to work - WORK // busy finishing the current task + NOT_OK = 0, // object is unusable + OK, // ready to work + WORK // busy finishing the current task } VPxWorkerStatus; // Function to be called by the worker thread. Takes two opaque pointers as // arguments (data1 and data2), and should return false in case of error. -typedef int (*VPxWorkerHook)(void*, void*); +typedef int (*VPxWorkerHook)(void *, void *); // Platform-dependent implementation details for the worker. typedef struct VPxWorkerImpl VPxWorkerImpl; @@ -173,10 +365,10 @@ typedef struct VPxWorkerImpl VPxWorkerImpl; typedef struct { VPxWorkerImpl *impl_; VPxWorkerStatus status_; - VPxWorkerHook hook; // hook to call - void *data1; // first argument passed to 'hook' - void *data2; // second argument passed to 'hook' - int had_error; // return value of the last call to 'hook' + VPxWorkerHook hook; // hook to call + void *data1; // first argument passed to 'hook' + void *data2; // second argument passed to 'hook' + int had_error; // return value of the last call to 'hook' } VPxWorker; // The interface for all thread-worker related functions. All these functions @@ -217,7 +409,7 @@ const VPxWorkerInterface *vpx_get_worker_interface(void); //------------------------------------------------------------------------------ #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif #endif // VPX_THREAD_H_ diff --git a/libs/libvpx/vpxdec.c b/libs/libvpx/vpxdec.c index 285d58e1e7..ab638ec6b8 100644 --- a/libs/libvpx/vpxdec.c +++ b/libs/libvpx/vpxdec.c @@ -28,7 +28,7 @@ #include "vpx_ports/mem_ops.h" #include "vpx_ports/vpx_timer.h" -#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER || CONFIG_VP10_DECODER +#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER #include "vpx/vp8dx.h" #endif @@ -47,134 +47,126 @@ struct VpxDecInputContext { struct WebmInputContext *webm_ctx; }; -static const arg_def_t looparg = ARG_DEF( - NULL, "loops", 1, "Number of times to decode the file"); -static const arg_def_t codecarg = ARG_DEF( - NULL, "codec", 1, "Codec to use"); -static const arg_def_t use_yv12 = ARG_DEF( - NULL, "yv12", 0, "Output raw YV12 frames"); -static const arg_def_t use_i420 = ARG_DEF( - NULL, "i420", 0, "Output raw I420 frames"); -static const arg_def_t flipuvarg = ARG_DEF( - NULL, "flipuv", 0, "Flip the chroma planes in the output"); -static const arg_def_t rawvideo = ARG_DEF( - NULL, "rawvideo", 0, "Output raw YUV frames"); -static const arg_def_t noblitarg = ARG_DEF( - NULL, "noblit", 0, "Don't process the decoded frames"); -static const arg_def_t progressarg = ARG_DEF( - NULL, "progress", 0, "Show progress after each frame decodes"); -static const arg_def_t limitarg = ARG_DEF( - NULL, "limit", 1, "Stop decoding after n frames"); -static const arg_def_t skiparg = ARG_DEF( - NULL, "skip", 1, "Skip the first n input frames"); -static const arg_def_t postprocarg = ARG_DEF( - NULL, "postproc", 0, "Postprocess decoded frames"); -static const arg_def_t summaryarg = ARG_DEF( - NULL, "summary", 0, "Show timing summary"); -static const arg_def_t outputfile = ARG_DEF( - "o", "output", 1, "Output file name pattern (see below)"); -static const arg_def_t threadsarg = ARG_DEF( - "t", "threads", 1, "Max threads to use"); -static const arg_def_t frameparallelarg = ARG_DEF( - NULL, "frame-parallel", 0, "Frame parallel decode"); -static const arg_def_t verbosearg = ARG_DEF( - "v", "verbose", 0, "Show version string"); -static const arg_def_t error_concealment = ARG_DEF( - NULL, "error-concealment", 0, "Enable decoder error-concealment"); -static const arg_def_t scalearg = ARG_DEF( - "S", "scale", 0, "Scale output frames uniformly"); -static const arg_def_t continuearg = ARG_DEF( - "k", "keep-going", 0, "(debug) Continue decoding after error"); -static const arg_def_t fb_arg = ARG_DEF( - NULL, "frame-buffers", 1, "Number of frame buffers to use"); -static const arg_def_t md5arg = ARG_DEF( - NULL, "md5", 0, "Compute the MD5 sum of the decoded frame"); +static const arg_def_t looparg = + ARG_DEF(NULL, "loops", 1, "Number of times to decode the file"); +static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1, "Codec to use"); +static const arg_def_t use_yv12 = + ARG_DEF(NULL, "yv12", 0, "Output raw YV12 frames"); +static const arg_def_t use_i420 = + ARG_DEF(NULL, "i420", 0, "Output raw I420 frames"); +static const arg_def_t flipuvarg = + ARG_DEF(NULL, "flipuv", 0, "Flip the chroma planes in the output"); +static const arg_def_t rawvideo = + ARG_DEF(NULL, "rawvideo", 0, "Output raw YUV frames"); +static const arg_def_t noblitarg = + ARG_DEF(NULL, "noblit", 0, "Don't process the decoded frames"); +static const arg_def_t progressarg = + ARG_DEF(NULL, "progress", 0, "Show progress after each frame decodes"); +static const arg_def_t limitarg = + ARG_DEF(NULL, "limit", 1, "Stop decoding after n frames"); +static const arg_def_t skiparg = + ARG_DEF(NULL, "skip", 1, "Skip the first n input frames"); +static const arg_def_t postprocarg = + ARG_DEF(NULL, "postproc", 0, "Postprocess decoded frames"); +static const arg_def_t summaryarg = + ARG_DEF(NULL, "summary", 0, "Show timing summary"); +static const arg_def_t outputfile = + ARG_DEF("o", "output", 1, "Output file name pattern (see below)"); +static const arg_def_t threadsarg = + ARG_DEF("t", "threads", 1, "Max threads to use"); +static const arg_def_t frameparallelarg = + ARG_DEF(NULL, "frame-parallel", 0, "Frame parallel decode"); +static const arg_def_t verbosearg = + ARG_DEF("v", "verbose", 0, "Show version string"); +static const arg_def_t error_concealment = + ARG_DEF(NULL, "error-concealment", 0, "Enable decoder error-concealment"); +static const arg_def_t scalearg = + ARG_DEF("S", "scale", 0, "Scale output frames uniformly"); +static const arg_def_t continuearg = + ARG_DEF("k", "keep-going", 0, "(debug) Continue decoding after error"); +static const arg_def_t fb_arg = + ARG_DEF(NULL, "frame-buffers", 1, "Number of frame buffers to use"); +static const arg_def_t md5arg = + ARG_DEF(NULL, "md5", 0, "Compute the MD5 sum of the decoded frame"); #if CONFIG_VP9_HIGHBITDEPTH -static const arg_def_t outbitdeptharg = ARG_DEF( - NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames"); +static const arg_def_t outbitdeptharg = + ARG_DEF(NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames"); #endif -static const arg_def_t *all_args[] = { - &codecarg, &use_yv12, &use_i420, &flipuvarg, &rawvideo, &noblitarg, - &progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile, - &threadsarg, &frameparallelarg, &verbosearg, &scalearg, &fb_arg, - &md5arg, &error_concealment, &continuearg, +static const arg_def_t *all_args[] = { &codecarg, + &use_yv12, + &use_i420, + &flipuvarg, + &rawvideo, + &noblitarg, + &progressarg, + &limitarg, + &skiparg, + &postprocarg, + &summaryarg, + &outputfile, + &threadsarg, + &frameparallelarg, + &verbosearg, + &scalearg, + &fb_arg, + &md5arg, + &error_concealment, + &continuearg, #if CONFIG_VP9_HIGHBITDEPTH - &outbitdeptharg, + &outbitdeptharg, #endif - NULL -}; + NULL }; #if CONFIG_VP8_DECODER -static const arg_def_t addnoise_level = ARG_DEF( - NULL, "noise-level", 1, "Enable VP8 postproc add noise"); -static const arg_def_t deblock = ARG_DEF( - NULL, "deblock", 0, "Enable VP8 deblocking"); +static const arg_def_t addnoise_level = + ARG_DEF(NULL, "noise-level", 1, "Enable VP8 postproc add noise"); +static const arg_def_t deblock = + ARG_DEF(NULL, "deblock", 0, "Enable VP8 deblocking"); static const arg_def_t demacroblock_level = ARG_DEF( NULL, "demacroblock-level", 1, "Enable VP8 demacroblocking, w/ level"); -static const arg_def_t pp_debug_info = ARG_DEF( - NULL, "pp-debug-info", 1, "Enable VP8 visible debug info"); -static const arg_def_t pp_disp_ref_frame = ARG_DEF( - NULL, "pp-dbg-ref-frame", 1, - "Display only selected reference frame per macro block"); -static const arg_def_t pp_disp_mb_modes = ARG_DEF( - NULL, "pp-dbg-mb-modes", 1, "Display only selected macro block modes"); -static const arg_def_t pp_disp_b_modes = ARG_DEF( - NULL, "pp-dbg-b-modes", 1, "Display only selected block modes"); -static const arg_def_t pp_disp_mvs = ARG_DEF( - NULL, "pp-dbg-mvs", 1, "Draw only selected motion vectors"); -static const arg_def_t mfqe = ARG_DEF( - NULL, "mfqe", 0, "Enable multiframe quality enhancement"); +static const arg_def_t mfqe = + ARG_DEF(NULL, "mfqe", 0, "Enable multiframe quality enhancement"); -static const arg_def_t *vp8_pp_args[] = { - &addnoise_level, &deblock, &demacroblock_level, &pp_debug_info, - &pp_disp_ref_frame, &pp_disp_mb_modes, &pp_disp_b_modes, &pp_disp_mvs, &mfqe, - NULL -}; +static const arg_def_t *vp8_pp_args[] = { &addnoise_level, &deblock, + &demacroblock_level, &mfqe, NULL }; #endif #if CONFIG_LIBYUV static INLINE int libyuv_scale(vpx_image_t *src, vpx_image_t *dst, - FilterModeEnum mode) { + FilterModeEnum mode) { #if CONFIG_VP9_HIGHBITDEPTH if (src->fmt == VPX_IMG_FMT_I42016) { assert(dst->fmt == VPX_IMG_FMT_I42016); - return I420Scale_16((uint16_t*)src->planes[VPX_PLANE_Y], - src->stride[VPX_PLANE_Y]/2, - (uint16_t*)src->planes[VPX_PLANE_U], - src->stride[VPX_PLANE_U]/2, - (uint16_t*)src->planes[VPX_PLANE_V], - src->stride[VPX_PLANE_V]/2, - src->d_w, src->d_h, - (uint16_t*)dst->planes[VPX_PLANE_Y], - dst->stride[VPX_PLANE_Y]/2, - (uint16_t*)dst->planes[VPX_PLANE_U], - dst->stride[VPX_PLANE_U]/2, - (uint16_t*)dst->planes[VPX_PLANE_V], - dst->stride[VPX_PLANE_V]/2, - dst->d_w, dst->d_h, - mode); + return I420Scale_16( + (uint16_t *)src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y] / 2, + (uint16_t *)src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U] / 2, + (uint16_t *)src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V] / 2, + src->d_w, src->d_h, (uint16_t *)dst->planes[VPX_PLANE_Y], + dst->stride[VPX_PLANE_Y] / 2, (uint16_t *)dst->planes[VPX_PLANE_U], + dst->stride[VPX_PLANE_U] / 2, (uint16_t *)dst->planes[VPX_PLANE_V], + dst->stride[VPX_PLANE_V] / 2, dst->d_w, dst->d_h, mode); } #endif assert(src->fmt == VPX_IMG_FMT_I420); assert(dst->fmt == VPX_IMG_FMT_I420); return I420Scale(src->planes[VPX_PLANE_Y], src->stride[VPX_PLANE_Y], src->planes[VPX_PLANE_U], src->stride[VPX_PLANE_U], - src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V], - src->d_w, src->d_h, - dst->planes[VPX_PLANE_Y], dst->stride[VPX_PLANE_Y], + src->planes[VPX_PLANE_V], src->stride[VPX_PLANE_V], src->d_w, + src->d_h, dst->planes[VPX_PLANE_Y], dst->stride[VPX_PLANE_Y], dst->planes[VPX_PLANE_U], dst->stride[VPX_PLANE_U], - dst->planes[VPX_PLANE_V], dst->stride[VPX_PLANE_V], - dst->d_w, dst->d_h, - mode); + dst->planes[VPX_PLANE_V], dst->stride[VPX_PLANE_V], dst->d_w, + dst->d_h, mode); } #endif void usage_exit(void) { int i; - fprintf(stderr, "Usage: %s filename\n\n" - "Options:\n", exec_name); + fprintf(stderr, + "Usage: %s filename\n\n" + "Options:\n", + exec_name); arg_show_usage(stderr, all_args); #if CONFIG_VP8_DECODER fprintf(stderr, "\nVP8 Postprocessing Options:\n"); @@ -193,27 +185,25 @@ void usage_exit(void) { "\n\t%% - Frame number, zero padded to places (1..9)" "\n\n Pattern arguments are only supported in conjunction " "with the --yv12 and\n --i420 options. If the -o option is " - "not specified, the output will be\n directed to stdout.\n" - ); + "not specified, the output will be\n directed to stdout.\n"); fprintf(stderr, "\nIncluded decoders:\n\n"); for (i = 0; i < get_vpx_decoder_count(); ++i) { const VpxInterface *const decoder = get_vpx_decoder_by_index(i); - fprintf(stderr, " %-6s - %s\n", - decoder->name, vpx_codec_iface_name(decoder->codec_interface())); + fprintf(stderr, " %-6s - %s\n", decoder->name, + vpx_codec_iface_name(decoder->codec_interface())); } exit(EXIT_FAILURE); } -static int raw_read_frame(FILE *infile, uint8_t **buffer, - size_t *bytes_read, size_t *buffer_size) { +static int raw_read_frame(FILE *infile, uint8_t **buffer, size_t *bytes_read, + size_t *buffer_size) { char raw_hdr[RAW_FRAME_HDR_SZ]; size_t frame_size = 0; if (fread(raw_hdr, RAW_FRAME_HDR_SZ, 1, infile) != 1) { - if (!feof(infile)) - warn("Failed to read RAW frame size\n"); + if (!feof(infile)) warn("Failed to read RAW frame size\n"); } else { const size_t kCorruptFrameThreshold = 256 * 1024 * 1024; const size_t kFrameTooSmallThreshold = 256 * 1024; @@ -257,17 +247,15 @@ static int read_frame(struct VpxDecInputContext *input, uint8_t **buf, switch (input->vpx_input_ctx->file_type) { #if CONFIG_WEBM_IO case FILE_TYPE_WEBM: - return webm_read_frame(input->webm_ctx, - buf, bytes_in_buffer, buffer_size); + return webm_read_frame(input->webm_ctx, buf, bytes_in_buffer); #endif case FILE_TYPE_RAW: - return raw_read_frame(input->vpx_input_ctx->file, - buf, bytes_in_buffer, buffer_size); + return raw_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer, + buffer_size); case FILE_TYPE_IVF: - return ivf_read_frame(input->vpx_input_ctx->file, - buf, bytes_in_buffer, buffer_size); - default: - return 1; + return ivf_read_frame(input->vpx_input_ctx->file, buf, bytes_in_buffer, + buffer_size); + default: return 1; } } @@ -280,7 +268,7 @@ static void update_image_md5(const vpx_image_t *img, const int planes[3], const unsigned char *buf = img->planes[plane]; const int stride = img->stride[plane]; const int w = vpx_img_plane_width(img, plane) * - ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); + ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1); const int h = vpx_img_plane_height(img, plane); for (y = 0; y < h; ++y) { @@ -326,8 +314,8 @@ static int file_is_raw(struct VpxInputContext *input) { if (mem_get_le32(buf) < 256 * 1024 * 1024) { for (i = 0; i < get_vpx_decoder_count(); ++i) { const VpxInterface *const decoder = get_vpx_decoder_by_index(i); - if (!vpx_codec_peek_stream_info(decoder->codec_interface(), - buf + 4, 32 - 4, &si)) { + if (!vpx_codec_peek_stream_info(decoder->codec_interface(), buf + 4, + 32 - 4, &si)) { is_raw = 1; input->fourcc = decoder->fourcc; input->width = si.w; @@ -346,13 +334,13 @@ static int file_is_raw(struct VpxInputContext *input) { static void show_progress(int frame_in, int frame_out, uint64_t dx_time) { fprintf(stderr, - "%d decoded frames/%d showed frames in %"PRId64" us (%.2f fps)\r", + "%d decoded frames/%d showed frames in %" PRId64 " us (%.2f fps)\r", frame_in, frame_out, dx_time, (double)frame_out * 1000000.0 / (double)dx_time); } struct ExternalFrameBuffer { - uint8_t* data; + uint8_t *data; size_t size; int in_use; }; @@ -371,23 +359,19 @@ static int get_vp9_frame_buffer(void *cb_priv, size_t min_size, int i; struct ExternalFrameBufferList *const ext_fb_list = (struct ExternalFrameBufferList *)cb_priv; - if (ext_fb_list == NULL) - return -1; + if (ext_fb_list == NULL) return -1; // Find a free frame buffer. for (i = 0; i < ext_fb_list->num_external_frame_buffers; ++i) { - if (!ext_fb_list->ext_fb[i].in_use) - break; + if (!ext_fb_list->ext_fb[i].in_use) break; } - if (i == ext_fb_list->num_external_frame_buffers) - return -1; + if (i == ext_fb_list->num_external_frame_buffers) return -1; if (ext_fb_list->ext_fb[i].size < min_size) { free(ext_fb_list->ext_fb[i].data); ext_fb_list->ext_fb[i].data = (uint8_t *)calloc(min_size, sizeof(uint8_t)); - if (!ext_fb_list->ext_fb[i].data) - return -1; + if (!ext_fb_list->ext_fb[i].data) return -1; ext_fb_list->ext_fb[i].size = min_size; } @@ -428,47 +412,22 @@ static void generate_filename(const char *pattern, char *out, size_t q_len, /* parse the pattern */ q[q_len - 1] = '\0'; switch (p[1]) { - case 'w': - snprintf(q, q_len - 1, "%d", d_w); - break; - case 'h': - snprintf(q, q_len - 1, "%d", d_h); - break; - case '1': - snprintf(q, q_len - 1, "%d", frame_in); - break; - case '2': - snprintf(q, q_len - 1, "%02d", frame_in); - break; - case '3': - snprintf(q, q_len - 1, "%03d", frame_in); - break; - case '4': - snprintf(q, q_len - 1, "%04d", frame_in); - break; - case '5': - snprintf(q, q_len - 1, "%05d", frame_in); - break; - case '6': - snprintf(q, q_len - 1, "%06d", frame_in); - break; - case '7': - snprintf(q, q_len - 1, "%07d", frame_in); - break; - case '8': - snprintf(q, q_len - 1, "%08d", frame_in); - break; - case '9': - snprintf(q, q_len - 1, "%09d", frame_in); - break; - default: - die("Unrecognized pattern %%%c\n", p[1]); - break; + case 'w': snprintf(q, q_len - 1, "%d", d_w); break; + case 'h': snprintf(q, q_len - 1, "%d", d_h); break; + case '1': snprintf(q, q_len - 1, "%d", frame_in); break; + case '2': snprintf(q, q_len - 1, "%02d", frame_in); break; + case '3': snprintf(q, q_len - 1, "%03d", frame_in); break; + case '4': snprintf(q, q_len - 1, "%04d", frame_in); break; + case '5': snprintf(q, q_len - 1, "%05d", frame_in); break; + case '6': snprintf(q, q_len - 1, "%06d", frame_in); break; + case '7': snprintf(q, q_len - 1, "%07d", frame_in); break; + case '8': snprintf(q, q_len - 1, "%08d", frame_in); break; + case '9': snprintf(q, q_len - 1, "%09d", frame_in); break; + default: die("Unrecognized pattern %%%c\n", p[1]); break; } pat_len = strlen(q); - if (pat_len >= q_len - 1) - die("Output filename too long.\n"); + if (pat_len >= q_len - 1) die("Output filename too long.\n"); q += pat_len; p += 2; q_len -= pat_len; @@ -481,8 +440,7 @@ static void generate_filename(const char *pattern, char *out, size_t q_len, else copy_len = next_pat - p; - if (copy_len >= q_len - 1) - die("Output filename too long.\n"); + if (copy_len >= q_len - 1) die("Output filename too long.\n"); memcpy(q, p, copy_len); q[copy_len] = '\0'; @@ -500,8 +458,7 @@ static int is_single_file(const char *outfile_pattern) { p = strchr(p, '%'); if (p && p[1] >= '1' && p[1] <= '9') return 0; // pattern contains sequence number, so it's not unique - if (p) - p++; + if (p) p++; } while (p); return 1; @@ -510,8 +467,7 @@ static int is_single_file(const char *outfile_pattern) { static void print_md5(unsigned char digest[16], const char *filename) { int i; - for (i = 0; i < 16; ++i) - printf("%02x", digest[i]); + for (i = 0; i < 16; ++i) printf("%02x", digest[i]); printf(" %s\n", filename); } @@ -521,8 +477,7 @@ static FILE *open_outfile(const char *name) { return stdout; } else { FILE *file = fopen(name, "wb"); - if (!file) - fatal("Failed to open output file '%s'", name); + if (!file) fatal("Failed to open output file '%s'", name); return file; } } @@ -531,65 +486,60 @@ static FILE *open_outfile(const char *name) { static int img_shifted_realloc_required(const vpx_image_t *img, const vpx_image_t *shifted, vpx_img_fmt_t required_fmt) { - return img->d_w != shifted->d_w || - img->d_h != shifted->d_h || + return img->d_w != shifted->d_w || img->d_h != shifted->d_h || required_fmt != shifted->fmt; } #endif static int main_loop(int argc, const char **argv_) { - vpx_codec_ctx_t decoder; - char *fn = NULL; - int i; - uint8_t *buf = NULL; - size_t bytes_in_buffer = 0, buffer_size = 0; - FILE *infile; - int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0; - int do_md5 = 0, progress = 0, frame_parallel = 0; - int stop_after = 0, postproc = 0, summary = 0, quiet = 1; - int arg_skip = 0; - int ec_enabled = 0; - int keep_going = 0; + vpx_codec_ctx_t decoder; + char *fn = NULL; + int i; + uint8_t *buf = NULL; + size_t bytes_in_buffer = 0, buffer_size = 0; + FILE *infile; + int frame_in = 0, frame_out = 0, flipuv = 0, noblit = 0; + int do_md5 = 0, progress = 0, frame_parallel = 0; + int stop_after = 0, postproc = 0, summary = 0, quiet = 1; + int arg_skip = 0; + int ec_enabled = 0; + int keep_going = 0; const VpxInterface *interface = NULL; const VpxInterface *fourcc_interface = NULL; uint64_t dx_time = 0; - struct arg arg; - char **argv, **argi, **argj; + struct arg arg; + char **argv, **argi, **argj; - int single_file; - int use_y4m = 1; - int opt_yv12 = 0; - int opt_i420 = 0; - vpx_codec_dec_cfg_t cfg = {0, 0, 0}; + int single_file; + int use_y4m = 1; + int opt_yv12 = 0; + int opt_i420 = 0; + vpx_codec_dec_cfg_t cfg = { 0, 0, 0 }; #if CONFIG_VP9_HIGHBITDEPTH - unsigned int output_bit_depth = 0; + unsigned int output_bit_depth = 0; #endif #if CONFIG_VP8_DECODER - vp8_postproc_cfg_t vp8_pp_cfg = {0}; - int vp8_dbg_color_ref_frame = 0; - int vp8_dbg_color_mb_modes = 0; - int vp8_dbg_color_b_modes = 0; - int vp8_dbg_display_mv = 0; + vp8_postproc_cfg_t vp8_pp_cfg = { 0, 0, 0 }; #endif - int frames_corrupted = 0; - int dec_flags = 0; - int do_scale = 0; - vpx_image_t *scaled_img = NULL; + int frames_corrupted = 0; + int dec_flags = 0; + int do_scale = 0; + vpx_image_t *scaled_img = NULL; #if CONFIG_VP9_HIGHBITDEPTH - vpx_image_t *img_shifted = NULL; + vpx_image_t *img_shifted = NULL; #endif - int frame_avail, got_data, flush_decoder = 0; - int num_external_frame_buffers = 0; - struct ExternalFrameBufferList ext_fb_list = {0, NULL}; + int frame_avail, got_data, flush_decoder = 0; + int num_external_frame_buffers = 0; + struct ExternalFrameBufferList ext_fb_list = { 0, NULL }; const char *outfile_pattern = NULL; - char outfile_name[PATH_MAX] = {0}; + char outfile_name[PATH_MAX] = { 0 }; FILE *outfile = NULL; MD5Context md5_ctx; unsigned char md5_digest[16]; - struct VpxDecInputContext input = {NULL, NULL}; + struct VpxDecInputContext input = { NULL, NULL }; struct VpxInputContext vpx_input_ctx; #if CONFIG_WEBM_IO struct WebmInputContext webm_ctx; @@ -642,7 +592,7 @@ static int main_loop(int argc, const char **argv_) { summary = 1; else if (arg_match(&arg, &threadsarg, argi)) cfg.threads = arg_parse_uint(&arg); -#if CONFIG_VP9_DECODER || CONFIG_VP10_DECODER +#if CONFIG_VP9_DECODER else if (arg_match(&arg, &frameparallelarg, argi)) frame_parallel = 1; #endif @@ -674,38 +624,6 @@ static int main_loop(int argc, const char **argv_) { } else if (arg_match(&arg, &mfqe, argi)) { postproc = 1; vp8_pp_cfg.post_proc_flag |= VP8_MFQE; - } else if (arg_match(&arg, &pp_debug_info, argi)) { - unsigned int level = arg_parse_uint(&arg); - - postproc = 1; - vp8_pp_cfg.post_proc_flag &= ~0x7; - - if (level) - vp8_pp_cfg.post_proc_flag |= level; - } else if (arg_match(&arg, &pp_disp_ref_frame, argi)) { - unsigned int flags = arg_parse_int(&arg); - if (flags) { - postproc = 1; - vp8_dbg_color_ref_frame = flags; - } - } else if (arg_match(&arg, &pp_disp_mb_modes, argi)) { - unsigned int flags = arg_parse_int(&arg); - if (flags) { - postproc = 1; - vp8_dbg_color_mb_modes = flags; - } - } else if (arg_match(&arg, &pp_disp_b_modes, argi)) { - unsigned int flags = arg_parse_int(&arg); - if (flags) { - postproc = 1; - vp8_dbg_color_b_modes = flags; - } - } else if (arg_match(&arg, &pp_disp_mvs, argi)) { - unsigned int flags = arg_parse_int(&arg); - if (flags) { - postproc = 1; - vp8_dbg_display_mv = flags; - } } else if (arg_match(&arg, &error_concealment, argi)) { ec_enabled = 1; } @@ -772,7 +690,8 @@ static int main_loop(int argc, const char **argv_) { if (use_y4m && !noblit) { if (!single_file) { - fprintf(stderr, "YUV4MPEG2 not supported with output patterns," + fprintf(stderr, + "YUV4MPEG2 not supported with output patterns," " try --i420 or --yv12 or --rawvideo.\n"); return EXIT_FAILURE; } @@ -780,7 +699,8 @@ static int main_loop(int argc, const char **argv_) { #if CONFIG_WEBM_IO if (vpx_input_ctx.file_type == FILE_TYPE_WEBM) { if (webm_guess_framerate(input.webm_ctx, input.vpx_input_ctx)) { - fprintf(stderr, "Failed to guess framerate -- error parsing " + fprintf(stderr, + "Failed to guess framerate -- error parsing " "webm file?\n"); return EXIT_FAILURE; } @@ -794,69 +714,32 @@ static int main_loop(int argc, const char **argv_) { else interface = fourcc_interface; - if (!interface) - interface = get_vpx_decoder_by_index(0); + if (!interface) interface = get_vpx_decoder_by_index(0); dec_flags = (postproc ? VPX_CODEC_USE_POSTPROC : 0) | (ec_enabled ? VPX_CODEC_USE_ERROR_CONCEALMENT : 0) | (frame_parallel ? VPX_CODEC_USE_FRAME_THREADING : 0); - if (vpx_codec_dec_init(&decoder, interface->codec_interface(), - &cfg, dec_flags)) { + if (vpx_codec_dec_init(&decoder, interface->codec_interface(), &cfg, + dec_flags)) { fprintf(stderr, "Failed to initialize decoder: %s\n", vpx_codec_error(&decoder)); return EXIT_FAILURE; } - if (!quiet) - fprintf(stderr, "%s\n", decoder.name); + if (!quiet) fprintf(stderr, "%s\n", decoder.name); #if CONFIG_VP8_DECODER - if (vp8_pp_cfg.post_proc_flag - && vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg)) { + if (vp8_pp_cfg.post_proc_flag && + vpx_codec_control(&decoder, VP8_SET_POSTPROC, &vp8_pp_cfg)) { fprintf(stderr, "Failed to configure postproc: %s\n", vpx_codec_error(&decoder)); return EXIT_FAILURE; } - - if (vp8_dbg_color_ref_frame - && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_REF_FRAME, - vp8_dbg_color_ref_frame)) { - fprintf(stderr, "Failed to configure reference block visualizer: %s\n", - vpx_codec_error(&decoder)); - return EXIT_FAILURE; - } - - if (vp8_dbg_color_mb_modes - && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_MB_MODES, - vp8_dbg_color_mb_modes)) { - fprintf(stderr, "Failed to configure macro block visualizer: %s\n", - vpx_codec_error(&decoder)); - return EXIT_FAILURE; - } - - if (vp8_dbg_color_b_modes - && vpx_codec_control(&decoder, VP8_SET_DBG_COLOR_B_MODES, - vp8_dbg_color_b_modes)) { - fprintf(stderr, "Failed to configure block visualizer: %s\n", - vpx_codec_error(&decoder)); - return EXIT_FAILURE; - } - - if (vp8_dbg_display_mv - && vpx_codec_control(&decoder, VP8_SET_DBG_DISPLAY_MV, - vp8_dbg_display_mv)) { - fprintf(stderr, "Failed to configure motion vector visualizer: %s\n", - vpx_codec_error(&decoder)); - return EXIT_FAILURE; - } #endif - - if (arg_skip) - fprintf(stderr, "Skipping first %d frames.\n", arg_skip); + if (arg_skip) fprintf(stderr, "Skipping first %d frames.\n", arg_skip); while (arg_skip) { - if (read_frame(&input, &buf, &bytes_in_buffer, &buffer_size)) - break; + if (read_frame(&input, &buf, &bytes_in_buffer, &buffer_size)) break; arg_skip--; } @@ -864,9 +747,9 @@ static int main_loop(int argc, const char **argv_) { ext_fb_list.num_external_frame_buffers = num_external_frame_buffers; ext_fb_list.ext_fb = (struct ExternalFrameBuffer *)calloc( num_external_frame_buffers, sizeof(*ext_fb_list.ext_fb)); - if (vpx_codec_set_frame_buffer_functions( - &decoder, get_vp9_frame_buffer, release_vp9_frame_buffer, - &ext_fb_list)) { + if (vpx_codec_set_frame_buffer_functions(&decoder, get_vp9_frame_buffer, + release_vp9_frame_buffer, + &ext_fb_list)) { fprintf(stderr, "Failed to configure external frame buffers: %s\n", vpx_codec_error(&decoder)); return EXIT_FAILURE; @@ -878,10 +761,10 @@ static int main_loop(int argc, const char **argv_) { /* Decode file */ while (frame_avail || got_data) { - vpx_codec_iter_t iter = NULL; - vpx_image_t *img; + vpx_codec_iter_t iter = NULL; + vpx_image_t *img; struct vpx_usec_timer timer; - int corrupted = 0; + int corrupted = 0; frame_avail = 0; if (!stop_after || frame_in < stop_after) { @@ -891,16 +774,14 @@ static int main_loop(int argc, const char **argv_) { vpx_usec_timer_start(&timer); - if (vpx_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer, - NULL, 0)) { + if (vpx_codec_decode(&decoder, buf, (unsigned int)bytes_in_buffer, NULL, + 0)) { const char *detail = vpx_codec_error_detail(&decoder); - warn("Failed to decode frame %d: %s", - frame_in, vpx_codec_error(&decoder)); + warn("Failed to decode frame %d: %s", frame_in, + vpx_codec_error(&decoder)); - if (detail) - warn("Additional information: %s", detail); - if (!keep_going) - goto fail; + if (detail) warn("Additional information: %s", detail); + if (!keep_going) goto fail; } vpx_usec_timer_mark(&timer); @@ -933,17 +814,15 @@ static int main_loop(int argc, const char **argv_) { if (!frame_parallel && vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) { warn("Failed VP8_GET_FRAME_CORRUPTED: %s", vpx_codec_error(&decoder)); - if (!keep_going) - goto fail; + if (!keep_going) goto fail; } frames_corrupted += corrupted; - if (progress) - show_progress(frame_in, frame_out, dx_time); + if (progress) show_progress(frame_in, frame_out, dx_time); if (!noblit && img) { - const int PLANES_YUV[] = {VPX_PLANE_Y, VPX_PLANE_U, VPX_PLANE_V}; - const int PLANES_YVU[] = {VPX_PLANE_Y, VPX_PLANE_V, VPX_PLANE_U}; + const int PLANES_YUV[] = { VPX_PLANE_Y, VPX_PLANE_U, VPX_PLANE_V }; + const int PLANES_YVU[] = { VPX_PLANE_Y, VPX_PLANE_V, VPX_PLANE_U }; const int *planes = flipuv ? PLANES_YVU : PLANES_YUV; if (do_scale) { @@ -967,8 +846,8 @@ static int main_loop(int argc, const char **argv_) { render_height = render_size[1]; } } - scaled_img = vpx_img_alloc(NULL, img->fmt, render_width, - render_height, 16); + scaled_img = + vpx_img_alloc(NULL, img->fmt, render_width, render_height, 16); scaled_img->bit_depth = img->bit_depth; } @@ -977,7 +856,8 @@ static int main_loop(int argc, const char **argv_) { libyuv_scale(img, scaled_img, kFilterBox); img = scaled_img; #else - fprintf(stderr, "Failed to scale output frame: %s.\n" + fprintf(stderr, + "Failed to scale output frame: %s.\n" "Scaling is disabled in this configuration. " "To enable scaling, configure with --enable-libyuv\n", vpx_codec_error(&decoder)); @@ -992,22 +872,22 @@ static int main_loop(int argc, const char **argv_) { } // Shift up or down if necessary if (output_bit_depth != 0 && output_bit_depth != img->bit_depth) { - const vpx_img_fmt_t shifted_fmt = output_bit_depth == 8 ? - img->fmt ^ (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) : - img->fmt | VPX_IMG_FMT_HIGHBITDEPTH; + const vpx_img_fmt_t shifted_fmt = + output_bit_depth == 8 + ? img->fmt ^ (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) + : img->fmt | VPX_IMG_FMT_HIGHBITDEPTH; if (img_shifted && img_shifted_realloc_required(img, img_shifted, shifted_fmt)) { vpx_img_free(img_shifted); img_shifted = NULL; } if (!img_shifted) { - img_shifted = vpx_img_alloc(NULL, shifted_fmt, - img->d_w, img->d_h, 16); + img_shifted = + vpx_img_alloc(NULL, shifted_fmt, img->d_w, img->d_h, 16); img_shifted->bit_depth = output_bit_depth; } if (output_bit_depth > img->bit_depth) { - vpx_img_upshift(img_shifted, img, - output_bit_depth - img->bit_depth); + vpx_img_upshift(img_shifted, img, output_bit_depth - img->bit_depth); } else { vpx_img_downshift(img_shifted, img, img->bit_depth - output_bit_depth); @@ -1018,7 +898,7 @@ static int main_loop(int argc, const char **argv_) { if (single_file) { if (use_y4m) { - char buf[Y4M_BUFFER_SIZE] = {0}; + char buf[Y4M_BUFFER_SIZE] = { 0 }; size_t len = 0; if (img->fmt == VPX_IMG_FMT_I440 || img->fmt == VPX_IMG_FMT_I44016) { fprintf(stderr, "Cannot produce y4m output for 440 sampling.\n"); @@ -1026,11 +906,9 @@ static int main_loop(int argc, const char **argv_) { } if (frame_out == 1) { // Y4M file header - len = y4m_write_file_header(buf, sizeof(buf), - vpx_input_ctx.width, - vpx_input_ctx.height, - &vpx_input_ctx.framerate, - img->fmt, img->bit_depth); + len = y4m_write_file_header( + buf, sizeof(buf), vpx_input_ctx.width, vpx_input_ctx.height, + &vpx_input_ctx.framerate, img->fmt, img->bit_depth); if (do_md5) { MD5Update(&md5_ctx, (md5byte *)buf, (unsigned int)len); } else { @@ -1058,7 +936,8 @@ static int main_loop(int argc, const char **argv_) { } if (opt_yv12) { if ((img->fmt != VPX_IMG_FMT_I420 && - img->fmt != VPX_IMG_FMT_YV12) || img->bit_depth != 8) { + img->fmt != VPX_IMG_FMT_YV12) || + img->bit_depth != 8) { fprintf(stderr, "Cannot produce yv12 output for bit-stream.\n"); goto fail; } @@ -1072,8 +951,8 @@ static int main_loop(int argc, const char **argv_) { write_image_file(img, planes, outfile); } } else { - generate_filename(outfile_pattern, outfile_name, PATH_MAX, - img->d_w, img->d_h, frame_in); + generate_filename(outfile_pattern, outfile_name, PATH_MAX, img->d_w, + img->d_h, frame_in); if (do_md5) { MD5Init(&md5_ctx); update_image_md5(img, planes, &md5_ctx); @@ -1118,8 +997,7 @@ fail: webm_free(input.webm_ctx); #endif - if (input.vpx_input_ctx->file_type != FILE_TYPE_WEBM) - free(buf); + if (input.vpx_input_ctx->file_type != FILE_TYPE_WEBM) free(buf); if (scaled_img) vpx_img_free(scaled_img); #if CONFIG_VP9_HIGHBITDEPTH @@ -1154,7 +1032,6 @@ int main(int argc, const char **argv_) { } } free(argv); - for (i = 0; !error && i < loops; i++) - error = main_loop(argc, argv_); + for (i = 0; !error && i < loops; i++) error = main_loop(argc, argv_); return error; } diff --git a/libs/libvpx/vpxenc.c b/libs/libvpx/vpxenc.c index f14470a6f2..6e0af57a42 100644 --- a/libs/libvpx/vpxenc.c +++ b/libs/libvpx/vpxenc.c @@ -32,10 +32,10 @@ #include "./ivfenc.h" #include "./tools_common.h" -#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP8_ENCODER || CONFIG_VP9_ENCODER #include "vpx/vp8cx.h" #endif -#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER || CONFIG_VP10_DECODER +#if CONFIG_VP8_DECODER || CONFIG_VP9_DECODER #include "vpx/vp8dx.h" #endif @@ -51,8 +51,7 @@ #include "./y4minput.h" /* Swallow warnings about unused results of fread/fwrite */ -static size_t wrap_fread(void *ptr, size_t size, size_t nmemb, - FILE *stream) { +static size_t wrap_fread(void *ptr, size_t size, size_t nmemb, FILE *stream) { return fread(ptr, size, nmemb, stream); } #define fread wrap_fread @@ -63,7 +62,6 @@ static size_t wrap_fwrite(const void *ptr, size_t size, size_t nmemb, } #define fwrite wrap_fwrite - static const char *exec_name; static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal, @@ -74,11 +72,9 @@ static void warn_or_exit_on_errorv(vpx_codec_ctx_t *ctx, int fatal, vfprintf(stderr, s, ap); fprintf(stderr, ": %s\n", vpx_codec_error(ctx)); - if (detail) - fprintf(stderr, " %s\n", detail); + if (detail) fprintf(stderr, " %s\n", detail); - if (fatal) - exit(EXIT_FAILURE); + if (fatal) exit(EXIT_FAILURE); } } @@ -105,8 +101,7 @@ static int read_frame(struct VpxInputContext *input_ctx, vpx_image_t *img) { int shortread = 0; if (input_ctx->file_type == FILE_TYPE_Y4M) { - if (y4m_input_fetch_frame(y4m, f, img) < 1) - return 0; + if (y4m_input_fetch_frame(y4m, f, img) < 1) return 0; } else { shortread = read_yuv_frame(input_ctx, img); } @@ -128,274 +123,294 @@ static int fourcc_is_ivf(const char detect[4]) { return 0; } -static const arg_def_t debugmode = ARG_DEF( - "D", "debug", 0, "Debug mode (makes output deterministic)"); -static const arg_def_t outputfile = ARG_DEF( - "o", "output", 1, "Output filename"); -static const arg_def_t use_yv12 = ARG_DEF( - NULL, "yv12", 0, "Input file is YV12 "); -static const arg_def_t use_i420 = ARG_DEF( - NULL, "i420", 0, "Input file is I420 (default)"); -static const arg_def_t use_i422 = ARG_DEF( - NULL, "i422", 0, "Input file is I422"); -static const arg_def_t use_i444 = ARG_DEF( - NULL, "i444", 0, "Input file is I444"); -static const arg_def_t use_i440 = ARG_DEF( - NULL, "i440", 0, "Input file is I440"); -static const arg_def_t codecarg = ARG_DEF( - NULL, "codec", 1, "Codec to use"); -static const arg_def_t passes = ARG_DEF( - "p", "passes", 1, "Number of passes (1/2)"); -static const arg_def_t pass_arg = ARG_DEF( - NULL, "pass", 1, "Pass to execute (1/2)"); -static const arg_def_t fpf_name = ARG_DEF( - NULL, "fpf", 1, "First pass statistics file name"); +static const arg_def_t debugmode = + ARG_DEF("D", "debug", 0, "Debug mode (makes output deterministic)"); +static const arg_def_t outputfile = + ARG_DEF("o", "output", 1, "Output filename"); +static const arg_def_t use_yv12 = + ARG_DEF(NULL, "yv12", 0, "Input file is YV12 "); +static const arg_def_t use_i420 = + ARG_DEF(NULL, "i420", 0, "Input file is I420 (default)"); +static const arg_def_t use_i422 = + ARG_DEF(NULL, "i422", 0, "Input file is I422"); +static const arg_def_t use_i444 = + ARG_DEF(NULL, "i444", 0, "Input file is I444"); +static const arg_def_t use_i440 = + ARG_DEF(NULL, "i440", 0, "Input file is I440"); +static const arg_def_t codecarg = ARG_DEF(NULL, "codec", 1, "Codec to use"); +static const arg_def_t passes = + ARG_DEF("p", "passes", 1, "Number of passes (1/2)"); +static const arg_def_t pass_arg = + ARG_DEF(NULL, "pass", 1, "Pass to execute (1/2)"); +static const arg_def_t fpf_name = + ARG_DEF(NULL, "fpf", 1, "First pass statistics file name"); #if CONFIG_FP_MB_STATS -static const arg_def_t fpmbf_name = ARG_DEF( - NULL, "fpmbf", 1, "First pass block statistics file name"); +static const arg_def_t fpmbf_name = + ARG_DEF(NULL, "fpmbf", 1, "First pass block statistics file name"); #endif -static const arg_def_t limit = ARG_DEF( - NULL, "limit", 1, "Stop encoding after n input frames"); -static const arg_def_t skip = ARG_DEF( - NULL, "skip", 1, "Skip the first n input frames"); -static const arg_def_t deadline = ARG_DEF( - "d", "deadline", 1, "Deadline per frame (usec)"); -static const arg_def_t best_dl = ARG_DEF( - NULL, "best", 0, "Use Best Quality Deadline"); -static const arg_def_t good_dl = ARG_DEF( - NULL, "good", 0, "Use Good Quality Deadline"); -static const arg_def_t rt_dl = ARG_DEF( - NULL, "rt", 0, "Use Realtime Quality Deadline"); -static const arg_def_t quietarg = ARG_DEF( - "q", "quiet", 0, "Do not print encode progress"); -static const arg_def_t verbosearg = ARG_DEF( - "v", "verbose", 0, "Show encoder parameters"); -static const arg_def_t psnrarg = ARG_DEF( - NULL, "psnr", 0, "Show PSNR in status line"); +static const arg_def_t limit = + ARG_DEF(NULL, "limit", 1, "Stop encoding after n input frames"); +static const arg_def_t skip = + ARG_DEF(NULL, "skip", 1, "Skip the first n input frames"); +static const arg_def_t deadline = + ARG_DEF("d", "deadline", 1, "Deadline per frame (usec)"); +static const arg_def_t best_dl = + ARG_DEF(NULL, "best", 0, "Use Best Quality Deadline"); +static const arg_def_t good_dl = + ARG_DEF(NULL, "good", 0, "Use Good Quality Deadline"); +static const arg_def_t rt_dl = + ARG_DEF(NULL, "rt", 0, "Use Realtime Quality Deadline"); +static const arg_def_t quietarg = + ARG_DEF("q", "quiet", 0, "Do not print encode progress"); +static const arg_def_t verbosearg = + ARG_DEF("v", "verbose", 0, "Show encoder parameters"); +static const arg_def_t psnrarg = + ARG_DEF(NULL, "psnr", 0, "Show PSNR in status line"); static const struct arg_enum_list test_decode_enum[] = { - {"off", TEST_DECODE_OFF}, - {"fatal", TEST_DECODE_FATAL}, - {"warn", TEST_DECODE_WARN}, - {NULL, 0} + { "off", TEST_DECODE_OFF }, + { "fatal", TEST_DECODE_FATAL }, + { "warn", TEST_DECODE_WARN }, + { NULL, 0 } }; static const arg_def_t recontest = ARG_DEF_ENUM( NULL, "test-decode", 1, "Test encode/decode mismatch", test_decode_enum); -static const arg_def_t framerate = ARG_DEF( - NULL, "fps", 1, "Stream frame rate (rate/scale)"); -static const arg_def_t use_webm = ARG_DEF( - NULL, "webm", 0, "Output WebM (default when WebM IO is enabled)"); -static const arg_def_t use_ivf = ARG_DEF( - NULL, "ivf", 0, "Output IVF"); -static const arg_def_t out_part = ARG_DEF( - "P", "output-partitions", 0, - "Makes encoder output partitions. Requires IVF output!"); -static const arg_def_t q_hist_n = ARG_DEF( - NULL, "q-hist", 1, "Show quantizer histogram (n-buckets)"); -static const arg_def_t rate_hist_n = ARG_DEF( - NULL, "rate-hist", 1, "Show rate histogram (n-buckets)"); -static const arg_def_t disable_warnings = ARG_DEF( - NULL, "disable-warnings", 0, - "Disable warnings about potentially incorrect encode settings."); -static const arg_def_t disable_warning_prompt = ARG_DEF( - "y", "disable-warning-prompt", 0, - "Display warnings, but do not prompt user to continue."); +static const arg_def_t framerate = + ARG_DEF(NULL, "fps", 1, "Stream frame rate (rate/scale)"); +static const arg_def_t use_webm = + ARG_DEF(NULL, "webm", 0, "Output WebM (default when WebM IO is enabled)"); +static const arg_def_t use_ivf = ARG_DEF(NULL, "ivf", 0, "Output IVF"); +static const arg_def_t out_part = + ARG_DEF("P", "output-partitions", 0, + "Makes encoder output partitions. Requires IVF output!"); +static const arg_def_t q_hist_n = + ARG_DEF(NULL, "q-hist", 1, "Show quantizer histogram (n-buckets)"); +static const arg_def_t rate_hist_n = + ARG_DEF(NULL, "rate-hist", 1, "Show rate histogram (n-buckets)"); +static const arg_def_t disable_warnings = + ARG_DEF(NULL, "disable-warnings", 0, + "Disable warnings about potentially incorrect encode settings."); +static const arg_def_t disable_warning_prompt = + ARG_DEF("y", "disable-warning-prompt", 0, + "Display warnings, but do not prompt user to continue."); #if CONFIG_VP9_HIGHBITDEPTH static const arg_def_t test16bitinternalarg = ARG_DEF( NULL, "test-16bit-internal", 0, "Force use of 16 bit internal buffer"); #endif -static const arg_def_t *main_args[] = { - &debugmode, - &outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &skip, - &deadline, &best_dl, &good_dl, &rt_dl, - &quietarg, &verbosearg, &psnrarg, &use_webm, &use_ivf, &out_part, &q_hist_n, - &rate_hist_n, &disable_warnings, &disable_warning_prompt, &recontest, - NULL -}; +static const arg_def_t *main_args[] = { &debugmode, + &outputfile, + &codecarg, + &passes, + &pass_arg, + &fpf_name, + &limit, + &skip, + &deadline, + &best_dl, + &good_dl, + &rt_dl, + &quietarg, + &verbosearg, + &psnrarg, + &use_webm, + &use_ivf, + &out_part, + &q_hist_n, + &rate_hist_n, + &disable_warnings, + &disable_warning_prompt, + &recontest, + NULL }; -static const arg_def_t usage = ARG_DEF( - "u", "usage", 1, "Usage profile number to use"); -static const arg_def_t threads = ARG_DEF( - "t", "threads", 1, "Max number of threads to use"); -static const arg_def_t profile = ARG_DEF( - NULL, "profile", 1, "Bitstream profile number to use"); +static const arg_def_t usage = + ARG_DEF("u", "usage", 1, "Usage profile number to use"); +static const arg_def_t threads = + ARG_DEF("t", "threads", 1, "Max number of threads to use"); +static const arg_def_t profile = + ARG_DEF(NULL, "profile", 1, "Bitstream profile number to use"); static const arg_def_t width = ARG_DEF("w", "width", 1, "Frame width"); static const arg_def_t height = ARG_DEF("h", "height", 1, "Frame height"); #if CONFIG_WEBM_IO static const struct arg_enum_list stereo_mode_enum[] = { - {"mono", STEREO_FORMAT_MONO}, - {"left-right", STEREO_FORMAT_LEFT_RIGHT}, - {"bottom-top", STEREO_FORMAT_BOTTOM_TOP}, - {"top-bottom", STEREO_FORMAT_TOP_BOTTOM}, - {"right-left", STEREO_FORMAT_RIGHT_LEFT}, - {NULL, 0} + { "mono", STEREO_FORMAT_MONO }, + { "left-right", STEREO_FORMAT_LEFT_RIGHT }, + { "bottom-top", STEREO_FORMAT_BOTTOM_TOP }, + { "top-bottom", STEREO_FORMAT_TOP_BOTTOM }, + { "right-left", STEREO_FORMAT_RIGHT_LEFT }, + { NULL, 0 } }; static const arg_def_t stereo_mode = ARG_DEF_ENUM( NULL, "stereo-mode", 1, "Stereo 3D video format", stereo_mode_enum); #endif static const arg_def_t timebase = ARG_DEF( NULL, "timebase", 1, "Output timestamp precision (fractional seconds)"); -static const arg_def_t error_resilient = ARG_DEF( - NULL, "error-resilient", 1, "Enable error resiliency features"); -static const arg_def_t lag_in_frames = ARG_DEF( - NULL, "lag-in-frames", 1, "Max number of frames to lag"); +static const arg_def_t error_resilient = + ARG_DEF(NULL, "error-resilient", 1, "Enable error resiliency features"); +static const arg_def_t lag_in_frames = + ARG_DEF(NULL, "lag-in-frames", 1, "Max number of frames to lag"); -static const arg_def_t *global_args[] = { - &use_yv12, &use_i420, &use_i422, &use_i444, &use_i440, - &usage, &threads, &profile, - &width, &height, +static const arg_def_t *global_args[] = { &use_yv12, + &use_i420, + &use_i422, + &use_i444, + &use_i440, + &usage, + &threads, + &profile, + &width, + &height, #if CONFIG_WEBM_IO - &stereo_mode, + &stereo_mode, #endif - &timebase, &framerate, - &error_resilient, + &timebase, + &framerate, + &error_resilient, #if CONFIG_VP9_HIGHBITDEPTH - &test16bitinternalarg, + &test16bitinternalarg, #endif - &lag_in_frames, NULL -}; + &lag_in_frames, + NULL }; -static const arg_def_t dropframe_thresh = ARG_DEF( - NULL, "drop-frame", 1, "Temporal resampling threshold (buf %)"); -static const arg_def_t resize_allowed = ARG_DEF( - NULL, "resize-allowed", 1, "Spatial resampling enabled (bool)"); -static const arg_def_t resize_width = ARG_DEF( - NULL, "resize-width", 1, "Width of encoded frame"); -static const arg_def_t resize_height = ARG_DEF( - NULL, "resize-height", 1, "Height of encoded frame"); -static const arg_def_t resize_up_thresh = ARG_DEF( - NULL, "resize-up", 1, "Upscale threshold (buf %)"); -static const arg_def_t resize_down_thresh = ARG_DEF( - NULL, "resize-down", 1, "Downscale threshold (buf %)"); -static const struct arg_enum_list end_usage_enum[] = { - {"vbr", VPX_VBR}, - {"cbr", VPX_CBR}, - {"cq", VPX_CQ}, - {"q", VPX_Q}, - {NULL, 0} -}; -static const arg_def_t end_usage = ARG_DEF_ENUM( - NULL, "end-usage", 1, "Rate control mode", end_usage_enum); -static const arg_def_t target_bitrate = ARG_DEF( - NULL, "target-bitrate", 1, "Bitrate (kbps)"); -static const arg_def_t min_quantizer = ARG_DEF( - NULL, "min-q", 1, "Minimum (best) quantizer"); -static const arg_def_t max_quantizer = ARG_DEF( - NULL, "max-q", 1, "Maximum (worst) quantizer"); -static const arg_def_t undershoot_pct = ARG_DEF( - NULL, "undershoot-pct", 1, "Datarate undershoot (min) target (%)"); -static const arg_def_t overshoot_pct = ARG_DEF( - NULL, "overshoot-pct", 1, "Datarate overshoot (max) target (%)"); -static const arg_def_t buf_sz = ARG_DEF( - NULL, "buf-sz", 1, "Client buffer size (ms)"); -static const arg_def_t buf_initial_sz = ARG_DEF( - NULL, "buf-initial-sz", 1, "Client initial buffer size (ms)"); -static const arg_def_t buf_optimal_sz = ARG_DEF( - NULL, "buf-optimal-sz", 1, "Client optimal buffer size (ms)"); +static const arg_def_t dropframe_thresh = + ARG_DEF(NULL, "drop-frame", 1, "Temporal resampling threshold (buf %)"); +static const arg_def_t resize_allowed = + ARG_DEF(NULL, "resize-allowed", 1, "Spatial resampling enabled (bool)"); +static const arg_def_t resize_width = + ARG_DEF(NULL, "resize-width", 1, "Width of encoded frame"); +static const arg_def_t resize_height = + ARG_DEF(NULL, "resize-height", 1, "Height of encoded frame"); +static const arg_def_t resize_up_thresh = + ARG_DEF(NULL, "resize-up", 1, "Upscale threshold (buf %)"); +static const arg_def_t resize_down_thresh = + ARG_DEF(NULL, "resize-down", 1, "Downscale threshold (buf %)"); +static const struct arg_enum_list end_usage_enum[] = { { "vbr", VPX_VBR }, + { "cbr", VPX_CBR }, + { "cq", VPX_CQ }, + { "q", VPX_Q }, + { NULL, 0 } }; +static const arg_def_t end_usage = + ARG_DEF_ENUM(NULL, "end-usage", 1, "Rate control mode", end_usage_enum); +static const arg_def_t target_bitrate = + ARG_DEF(NULL, "target-bitrate", 1, "Bitrate (kbps)"); +static const arg_def_t min_quantizer = + ARG_DEF(NULL, "min-q", 1, "Minimum (best) quantizer"); +static const arg_def_t max_quantizer = + ARG_DEF(NULL, "max-q", 1, "Maximum (worst) quantizer"); +static const arg_def_t undershoot_pct = + ARG_DEF(NULL, "undershoot-pct", 1, "Datarate undershoot (min) target (%)"); +static const arg_def_t overshoot_pct = + ARG_DEF(NULL, "overshoot-pct", 1, "Datarate overshoot (max) target (%)"); +static const arg_def_t buf_sz = + ARG_DEF(NULL, "buf-sz", 1, "Client buffer size (ms)"); +static const arg_def_t buf_initial_sz = + ARG_DEF(NULL, "buf-initial-sz", 1, "Client initial buffer size (ms)"); +static const arg_def_t buf_optimal_sz = + ARG_DEF(NULL, "buf-optimal-sz", 1, "Client optimal buffer size (ms)"); static const arg_def_t *rc_args[] = { - &dropframe_thresh, &resize_allowed, &resize_width, &resize_height, - &resize_up_thresh, &resize_down_thresh, &end_usage, &target_bitrate, - &min_quantizer, &max_quantizer, &undershoot_pct, &overshoot_pct, &buf_sz, - &buf_initial_sz, &buf_optimal_sz, NULL + &dropframe_thresh, &resize_allowed, &resize_width, &resize_height, + &resize_up_thresh, &resize_down_thresh, &end_usage, &target_bitrate, + &min_quantizer, &max_quantizer, &undershoot_pct, &overshoot_pct, + &buf_sz, &buf_initial_sz, &buf_optimal_sz, NULL }; +static const arg_def_t bias_pct = + ARG_DEF(NULL, "bias-pct", 1, "CBR/VBR bias (0=CBR, 100=VBR)"); +static const arg_def_t minsection_pct = + ARG_DEF(NULL, "minsection-pct", 1, "GOP min bitrate (% of target)"); +static const arg_def_t maxsection_pct = + ARG_DEF(NULL, "maxsection-pct", 1, "GOP max bitrate (% of target)"); +static const arg_def_t *rc_twopass_args[] = { &bias_pct, &minsection_pct, + &maxsection_pct, NULL }; -static const arg_def_t bias_pct = ARG_DEF( - NULL, "bias-pct", 1, "CBR/VBR bias (0=CBR, 100=VBR)"); -static const arg_def_t minsection_pct = ARG_DEF( - NULL, "minsection-pct", 1, "GOP min bitrate (% of target)"); -static const arg_def_t maxsection_pct = ARG_DEF( - NULL, "maxsection-pct", 1, "GOP max bitrate (% of target)"); -static const arg_def_t *rc_twopass_args[] = { - &bias_pct, &minsection_pct, &maxsection_pct, NULL -}; +static const arg_def_t kf_min_dist = + ARG_DEF(NULL, "kf-min-dist", 1, "Minimum keyframe interval (frames)"); +static const arg_def_t kf_max_dist = + ARG_DEF(NULL, "kf-max-dist", 1, "Maximum keyframe interval (frames)"); +static const arg_def_t kf_disabled = + ARG_DEF(NULL, "disable-kf", 0, "Disable keyframe placement"); +static const arg_def_t *kf_args[] = { &kf_min_dist, &kf_max_dist, &kf_disabled, + NULL }; - -static const arg_def_t kf_min_dist = ARG_DEF( - NULL, "kf-min-dist", 1, "Minimum keyframe interval (frames)"); -static const arg_def_t kf_max_dist = ARG_DEF( - NULL, "kf-max-dist", 1, "Maximum keyframe interval (frames)"); -static const arg_def_t kf_disabled = ARG_DEF( - NULL, "disable-kf", 0, "Disable keyframe placement"); -static const arg_def_t *kf_args[] = { - &kf_min_dist, &kf_max_dist, &kf_disabled, NULL -}; - - -static const arg_def_t noise_sens = ARG_DEF( - NULL, "noise-sensitivity", 1, "Noise sensitivity (frames to blur)"); -static const arg_def_t sharpness = ARG_DEF( - NULL, "sharpness", 1, "Loop filter sharpness (0..7)"); -static const arg_def_t static_thresh = ARG_DEF( - NULL, "static-thresh", 1, "Motion detection threshold"); -static const arg_def_t auto_altref = ARG_DEF( - NULL, "auto-alt-ref", 1, "Enable automatic alt reference frames"); -static const arg_def_t arnr_maxframes = ARG_DEF( - NULL, "arnr-maxframes", 1, "AltRef max frames (0..15)"); -static const arg_def_t arnr_strength = ARG_DEF( - NULL, "arnr-strength", 1, "AltRef filter strength (0..6)"); -static const arg_def_t arnr_type = ARG_DEF( - NULL, "arnr-type", 1, "AltRef type"); +static const arg_def_t noise_sens = + ARG_DEF(NULL, "noise-sensitivity", 1, "Noise sensitivity (frames to blur)"); +static const arg_def_t sharpness = + ARG_DEF(NULL, "sharpness", 1, "Loop filter sharpness (0..7)"); +static const arg_def_t static_thresh = + ARG_DEF(NULL, "static-thresh", 1, "Motion detection threshold"); +static const arg_def_t auto_altref = + ARG_DEF(NULL, "auto-alt-ref", 1, "Enable automatic alt reference frames"); +static const arg_def_t arnr_maxframes = + ARG_DEF(NULL, "arnr-maxframes", 1, "AltRef max frames (0..15)"); +static const arg_def_t arnr_strength = + ARG_DEF(NULL, "arnr-strength", 1, "AltRef filter strength (0..6)"); +static const arg_def_t arnr_type = ARG_DEF(NULL, "arnr-type", 1, "AltRef type"); static const struct arg_enum_list tuning_enum[] = { - {"psnr", VP8_TUNE_PSNR}, - {"ssim", VP8_TUNE_SSIM}, - {NULL, 0} + { "psnr", VP8_TUNE_PSNR }, { "ssim", VP8_TUNE_SSIM }, { NULL, 0 } }; -static const arg_def_t tune_ssim = ARG_DEF_ENUM( - NULL, "tune", 1, "Material to favor", tuning_enum); -static const arg_def_t cq_level = ARG_DEF( - NULL, "cq-level", 1, "Constant/Constrained Quality level"); -static const arg_def_t max_intra_rate_pct = ARG_DEF( - NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)"); +static const arg_def_t tune_ssim = + ARG_DEF_ENUM(NULL, "tune", 1, "Material to favor", tuning_enum); +static const arg_def_t cq_level = + ARG_DEF(NULL, "cq-level", 1, "Constant/Constrained Quality level"); +static const arg_def_t max_intra_rate_pct = + ARG_DEF(NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)"); #if CONFIG_VP8_ENCODER -static const arg_def_t cpu_used_vp8 = ARG_DEF( - NULL, "cpu-used", 1, "CPU Used (-16..16)"); -static const arg_def_t token_parts = ARG_DEF( - NULL, "token-parts", 1, "Number of token partitions to use, log2"); -static const arg_def_t screen_content_mode = ARG_DEF( - NULL, "screen-content-mode", 1, "Screen content mode"); +static const arg_def_t cpu_used_vp8 = + ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-16..16)"); +static const arg_def_t token_parts = + ARG_DEF(NULL, "token-parts", 1, "Number of token partitions to use, log2"); +static const arg_def_t screen_content_mode = + ARG_DEF(NULL, "screen-content-mode", 1, "Screen content mode"); static const arg_def_t *vp8_args[] = { - &cpu_used_vp8, &auto_altref, &noise_sens, &sharpness, &static_thresh, - &token_parts, &arnr_maxframes, &arnr_strength, &arnr_type, - &tune_ssim, &cq_level, &max_intra_rate_pct, &screen_content_mode, - NULL -}; -static const int vp8_arg_ctrl_map[] = { - VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF, - VP8E_SET_NOISE_SENSITIVITY, VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD, - VP8E_SET_TOKEN_PARTITIONS, - VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE, - VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT, - VP8E_SET_SCREEN_CONTENT_MODE, - 0 + &cpu_used_vp8, &auto_altref, &noise_sens, &sharpness, + &static_thresh, &token_parts, &arnr_maxframes, &arnr_strength, + &arnr_type, &tune_ssim, &cq_level, &max_intra_rate_pct, + &screen_content_mode, NULL }; +static const int vp8_arg_ctrl_map[] = { VP8E_SET_CPUUSED, + VP8E_SET_ENABLEAUTOALTREF, + VP8E_SET_NOISE_SENSITIVITY, + VP8E_SET_SHARPNESS, + VP8E_SET_STATIC_THRESHOLD, + VP8E_SET_TOKEN_PARTITIONS, + VP8E_SET_ARNR_MAXFRAMES, + VP8E_SET_ARNR_STRENGTH, + VP8E_SET_ARNR_TYPE, + VP8E_SET_TUNING, + VP8E_SET_CQ_LEVEL, + VP8E_SET_MAX_INTRA_BITRATE_PCT, + VP8E_SET_SCREEN_CONTENT_MODE, + 0 }; #endif -#if CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER -static const arg_def_t cpu_used_vp9 = ARG_DEF( - NULL, "cpu-used", 1, "CPU Used (-8..8)"); -static const arg_def_t tile_cols = ARG_DEF( - NULL, "tile-columns", 1, "Number of tile columns to use, log2"); -static const arg_def_t tile_rows = ARG_DEF( - NULL, "tile-rows", 1, "Number of tile rows to use, log2"); -static const arg_def_t lossless = ARG_DEF( - NULL, "lossless", 1, "Lossless mode (0: false (default), 1: true)"); +#if CONFIG_VP9_ENCODER +static const arg_def_t cpu_used_vp9 = + ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-8..8)"); +static const arg_def_t tile_cols = + ARG_DEF(NULL, "tile-columns", 1, "Number of tile columns to use, log2"); +static const arg_def_t tile_rows = + ARG_DEF(NULL, "tile-rows", 1, + "Number of tile rows to use, log2 (set to 0 while threads > 1)"); +static const arg_def_t lossless = + ARG_DEF(NULL, "lossless", 1, "Lossless mode (0: false (default), 1: true)"); static const arg_def_t frame_parallel_decoding = ARG_DEF( NULL, "frame-parallel", 1, "Enable frame parallel decodability features"); static const arg_def_t aq_mode = ARG_DEF( NULL, "aq-mode", 1, "Adaptive quantization mode (0: off (default), 1: variance 2: complexity, " "3: cyclic refresh, 4: equator360)"); -static const arg_def_t frame_periodic_boost = ARG_DEF( - NULL, "frame-boost", 1, - "Enable frame periodic boost (0: off (default), 1: on)"); +static const arg_def_t alt_ref_aq = ARG_DEF(NULL, "alt-ref-aq", 1, + "Special adaptive quantization for " + "the alternate reference frames."); +static const arg_def_t frame_periodic_boost = + ARG_DEF(NULL, "frame-boost", 1, + "Enable frame periodic boost (0: off (default), 1: on)"); static const arg_def_t gf_cbr_boost_pct = ARG_DEF( NULL, "gf-cbr-boost", 1, "Boost for Golden Frame in CBR mode (pct)"); -static const arg_def_t max_inter_rate_pct = ARG_DEF( - NULL, "max-inter-rate", 1, "Max P-frame bitrate (pct)"); +static const arg_def_t max_inter_rate_pct = + ARG_DEF(NULL, "max-inter-rate", 1, "Max P-frame bitrate (pct)"); static const arg_def_t min_gf_interval = ARG_DEF( NULL, "min-gf-interval", 1, "min gf/arf frame interval (default 0, indicating in-built behavior)"); @@ -415,92 +430,95 @@ static const struct arg_enum_list color_space_enum[] = { { NULL, 0 } }; -static const arg_def_t input_color_space = ARG_DEF_ENUM( - NULL, "color-space", 1, - "The color space of input content:", color_space_enum); +static const arg_def_t input_color_space = + ARG_DEF_ENUM(NULL, "color-space", 1, "The color space of input content:", + color_space_enum); #if CONFIG_VP9_HIGHBITDEPTH static const struct arg_enum_list bitdepth_enum[] = { - {"8", VPX_BITS_8}, - {"10", VPX_BITS_10}, - {"12", VPX_BITS_12}, - {NULL, 0} + { "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 } }; static const arg_def_t bitdeptharg = ARG_DEF_ENUM( "b", "bit-depth", 1, "Bit depth for codec (8 for version <=1, 10 or 12 for version 2)", bitdepth_enum); -static const arg_def_t inbitdeptharg = ARG_DEF( - NULL, "input-bit-depth", 1, "Bit depth of input"); +static const arg_def_t inbitdeptharg = + ARG_DEF(NULL, "input-bit-depth", 1, "Bit depth of input"); #endif static const struct arg_enum_list tune_content_enum[] = { - {"default", VP9E_CONTENT_DEFAULT}, - {"screen", VP9E_CONTENT_SCREEN}, - {NULL, 0} + { "default", VP9E_CONTENT_DEFAULT }, + { "screen", VP9E_CONTENT_SCREEN }, + { NULL, 0 } }; static const arg_def_t tune_content = ARG_DEF_ENUM( NULL, "tune-content", 1, "Tune content type", tune_content_enum); + +static const arg_def_t target_level = ARG_DEF( + NULL, "target-level", 1, + "Target level (255: off (default); 0: only keep level stats; 10: level 1.0;" + " 11: level 1.1; ... 62: level 6.2)"); #endif #if CONFIG_VP9_ENCODER -static const arg_def_t *vp9_args[] = { - &cpu_used_vp9, &auto_altref, &sharpness, &static_thresh, - &tile_cols, &tile_rows, &arnr_maxframes, &arnr_strength, &arnr_type, - &tune_ssim, &cq_level, &max_intra_rate_pct, &max_inter_rate_pct, - &gf_cbr_boost_pct, &lossless, - &frame_parallel_decoding, &aq_mode, &frame_periodic_boost, - &noise_sens, &tune_content, &input_color_space, - &min_gf_interval, &max_gf_interval, +static const arg_def_t *vp9_args[] = { &cpu_used_vp9, + &auto_altref, + &sharpness, + &static_thresh, + &tile_cols, + &tile_rows, + &arnr_maxframes, + &arnr_strength, + &arnr_type, + &tune_ssim, + &cq_level, + &max_intra_rate_pct, + &max_inter_rate_pct, + &gf_cbr_boost_pct, + &lossless, + &frame_parallel_decoding, + &aq_mode, + &alt_ref_aq, + &frame_periodic_boost, + &noise_sens, + &tune_content, + &input_color_space, + &min_gf_interval, + &max_gf_interval, + &target_level, #if CONFIG_VP9_HIGHBITDEPTH - &bitdeptharg, &inbitdeptharg, + &bitdeptharg, + &inbitdeptharg, #endif // CONFIG_VP9_HIGHBITDEPTH - NULL -}; -static const int vp9_arg_ctrl_map[] = { - VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF, - VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD, - VP9E_SET_TILE_COLUMNS, VP9E_SET_TILE_ROWS, - VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE, - VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT, - VP9E_SET_MAX_INTER_BITRATE_PCT, VP9E_SET_GF_CBR_BOOST_PCT, - VP9E_SET_LOSSLESS, VP9E_SET_FRAME_PARALLEL_DECODING, VP9E_SET_AQ_MODE, - VP9E_SET_FRAME_PERIODIC_BOOST, VP9E_SET_NOISE_SENSITIVITY, - VP9E_SET_TUNE_CONTENT, VP9E_SET_COLOR_SPACE, - VP9E_SET_MIN_GF_INTERVAL, VP9E_SET_MAX_GF_INTERVAL, - 0 -}; -#endif - -#if CONFIG_VP10_ENCODER -static const arg_def_t *vp10_args[] = { - &cpu_used_vp9, &auto_altref, &sharpness, &static_thresh, - &tile_cols, &tile_rows, &arnr_maxframes, &arnr_strength, &arnr_type, - &tune_ssim, &cq_level, &max_intra_rate_pct, &max_inter_rate_pct, - &gf_cbr_boost_pct, &lossless, - &frame_parallel_decoding, &aq_mode, &frame_periodic_boost, - &noise_sens, &tune_content, &input_color_space, - &min_gf_interval, &max_gf_interval, -#if CONFIG_VP9_HIGHBITDEPTH - &bitdeptharg, &inbitdeptharg, -#endif // CONFIG_VP9_HIGHBITDEPTH - NULL -}; -static const int vp10_arg_ctrl_map[] = { - VP8E_SET_CPUUSED, VP8E_SET_ENABLEAUTOALTREF, - VP8E_SET_SHARPNESS, VP8E_SET_STATIC_THRESHOLD, - VP9E_SET_TILE_COLUMNS, VP9E_SET_TILE_ROWS, - VP8E_SET_ARNR_MAXFRAMES, VP8E_SET_ARNR_STRENGTH, VP8E_SET_ARNR_TYPE, - VP8E_SET_TUNING, VP8E_SET_CQ_LEVEL, VP8E_SET_MAX_INTRA_BITRATE_PCT, - VP9E_SET_MAX_INTER_BITRATE_PCT, VP9E_SET_GF_CBR_BOOST_PCT, - VP9E_SET_LOSSLESS, VP9E_SET_FRAME_PARALLEL_DECODING, VP9E_SET_AQ_MODE, - VP9E_SET_FRAME_PERIODIC_BOOST, VP9E_SET_NOISE_SENSITIVITY, - VP9E_SET_TUNE_CONTENT, VP9E_SET_COLOR_SPACE, - VP9E_SET_MIN_GF_INTERVAL, VP9E_SET_MAX_GF_INTERVAL, - 0 -}; + NULL }; +static const int vp9_arg_ctrl_map[] = { VP8E_SET_CPUUSED, + VP8E_SET_ENABLEAUTOALTREF, + VP8E_SET_SHARPNESS, + VP8E_SET_STATIC_THRESHOLD, + VP9E_SET_TILE_COLUMNS, + VP9E_SET_TILE_ROWS, + VP8E_SET_ARNR_MAXFRAMES, + VP8E_SET_ARNR_STRENGTH, + VP8E_SET_ARNR_TYPE, + VP8E_SET_TUNING, + VP8E_SET_CQ_LEVEL, + VP8E_SET_MAX_INTRA_BITRATE_PCT, + VP9E_SET_MAX_INTER_BITRATE_PCT, + VP9E_SET_GF_CBR_BOOST_PCT, + VP9E_SET_LOSSLESS, + VP9E_SET_FRAME_PARALLEL_DECODING, + VP9E_SET_AQ_MODE, + VP9E_SET_ALT_REF_AQ, + VP9E_SET_FRAME_PERIODIC_BOOST, + VP9E_SET_NOISE_SENSITIVITY, + VP9E_SET_TUNE_CONTENT, + VP9E_SET_COLOR_SPACE, + VP9E_SET_MIN_GF_INTERVAL, + VP9E_SET_MAX_GF_INTERVAL, + VP9E_SET_TARGET_LEVEL, + 0 }; #endif static const arg_def_t *no_args[] = { NULL }; @@ -530,21 +548,17 @@ void usage_exit(void) { fprintf(stderr, "\nVP9 Specific Options:\n"); arg_show_usage(stderr, vp9_args); #endif -#if CONFIG_VP10_ENCODER - fprintf(stderr, "\nVP10 Specific Options:\n"); - arg_show_usage(stderr, vp10_args); -#endif - fprintf(stderr, "\nStream timebase (--timebase):\n" + fprintf(stderr, + "\nStream timebase (--timebase):\n" " The desired precision of timestamps in the output, expressed\n" " in fractional seconds. Default is 1/1000.\n"); fprintf(stderr, "\nIncluded encoders:\n\n"); for (i = 0; i < num_encoder; ++i) { const VpxInterface *const encoder = get_vpx_encoder_by_index(i); - const char* defstr = (i == (num_encoder - 1)) ? "(default)" : ""; - fprintf(stderr, " %-6s - %s %s\n", - encoder->name, vpx_codec_iface_name(encoder->codec_interface()), - defstr); + const char *defstr = (i == (num_encoder - 1)) ? "(default)" : ""; + fprintf(stderr, " %-6s - %s %s\n", encoder->name, + vpx_codec_iface_name(encoder->codec_interface()), defstr); } fprintf(stderr, "\n "); fprintf(stderr, "Use --codec to switch to a non-default encoder.\n\n"); @@ -552,12 +566,12 @@ void usage_exit(void) { exit(EXIT_FAILURE); } -#define mmin(a, b) ((a) < (b) ? (a) : (b)) +#define mmin(a, b) ((a) < (b) ? (a) : (b)) #if CONFIG_VP9_HIGHBITDEPTH static void find_mismatch_high(const vpx_image_t *const img1, - const vpx_image_t *const img2, - int yloc[4], int uloc[4], int vloc[4]) { + const vpx_image_t *const img2, int yloc[4], + int uloc[4], int vloc[4]) { uint16_t *plane1, *plane2; uint32_t stride1, stride2; const uint32_t bsize = 64; @@ -570,10 +584,10 @@ static void find_mismatch_high(const vpx_image_t *const img1, int match = 1; uint32_t i, j; yloc[0] = yloc[1] = yloc[2] = yloc[3] = -1; - plane1 = (uint16_t*)img1->planes[VPX_PLANE_Y]; - plane2 = (uint16_t*)img2->planes[VPX_PLANE_Y]; - stride1 = img1->stride[VPX_PLANE_Y]/2; - stride2 = img2->stride[VPX_PLANE_Y]/2; + plane1 = (uint16_t *)img1->planes[VPX_PLANE_Y]; + plane2 = (uint16_t *)img2->planes[VPX_PLANE_Y]; + stride1 = img1->stride[VPX_PLANE_Y] / 2; + stride2 = img2->stride[VPX_PLANE_Y] / 2; for (i = 0, match = 1; match && i < img1->d_h; i += bsize) { for (j = 0; match && j < img1->d_w; j += bsize) { int k, l; @@ -596,10 +610,10 @@ static void find_mismatch_high(const vpx_image_t *const img1, } uloc[0] = uloc[1] = uloc[2] = uloc[3] = -1; - plane1 = (uint16_t*)img1->planes[VPX_PLANE_U]; - plane2 = (uint16_t*)img2->planes[VPX_PLANE_U]; - stride1 = img1->stride[VPX_PLANE_U]/2; - stride2 = img2->stride[VPX_PLANE_U]/2; + plane1 = (uint16_t *)img1->planes[VPX_PLANE_U]; + plane2 = (uint16_t *)img2->planes[VPX_PLANE_U]; + stride1 = img1->stride[VPX_PLANE_U] / 2; + stride2 = img2->stride[VPX_PLANE_U] / 2; for (i = 0, match = 1; match && i < c_h; i += bsizey) { for (j = 0; match && j < c_w; j += bsizex) { int k, l; @@ -622,10 +636,10 @@ static void find_mismatch_high(const vpx_image_t *const img1, } vloc[0] = vloc[1] = vloc[2] = vloc[3] = -1; - plane1 = (uint16_t*)img1->planes[VPX_PLANE_V]; - plane2 = (uint16_t*)img2->planes[VPX_PLANE_V]; - stride1 = img1->stride[VPX_PLANE_V]/2; - stride2 = img2->stride[VPX_PLANE_V]/2; + plane1 = (uint16_t *)img1->planes[VPX_PLANE_V]; + plane2 = (uint16_t *)img2->planes[VPX_PLANE_V]; + stride1 = img1->stride[VPX_PLANE_V] / 2; + stride2 = img2->stride[VPX_PLANE_V] / 2; for (i = 0, match = 1; match && i < c_h; i += bsizey) { for (j = 0; match && j < c_w; j += bsizex) { int k, l; @@ -650,8 +664,8 @@ static void find_mismatch_high(const vpx_image_t *const img1, #endif static void find_mismatch(const vpx_image_t *const img1, - const vpx_image_t *const img2, - int yloc[4], int uloc[4], int vloc[4]) { + const vpx_image_t *const img2, int yloc[4], + int uloc[4], int vloc[4]) { const uint32_t bsize = 64; const uint32_t bsizey = bsize >> img1->y_chroma_shift; const uint32_t bsizex = bsize >> img1->x_chroma_shift; @@ -742,8 +756,7 @@ static void find_mismatch(const vpx_image_t *const img1, static int compare_img(const vpx_image_t *const img1, const vpx_image_t *const img2) { uint32_t l_w = img1->d_w; - uint32_t c_w = - (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift; + uint32_t c_w = (img1->d_w + img1->x_chroma_shift) >> img1->x_chroma_shift; const uint32_t c_h = (img1->d_h + img1->y_chroma_shift) >> img1->y_chroma_shift; uint32_t i; @@ -777,11 +790,8 @@ static int compare_img(const vpx_image_t *const img1, return match; } - -#define NELEMENTS(x) (sizeof(x)/sizeof(x[0])) -#if CONFIG_VP10_ENCODER -#define ARG_CTRL_CNT_MAX NELEMENTS(vp10_arg_ctrl_map) -#elif CONFIG_VP9_ENCODER +#define NELEMENTS(x) (sizeof(x) / sizeof(x[0])) +#if CONFIG_VP9_ENCODER #define ARG_CTRL_CNT_MAX NELEMENTS(vp9_arg_ctrl_map) #else #define ARG_CTRL_CNT_MAX NELEMENTS(vp8_arg_ctrl_map) @@ -789,77 +799,72 @@ static int compare_img(const vpx_image_t *const img1, #if !CONFIG_WEBM_IO typedef int stereo_format_t; -struct EbmlGlobal { int debug; }; +struct WebmOutputContext { + int debug; +}; #endif /* Per-stream configuration */ struct stream_config { - struct vpx_codec_enc_cfg cfg; - const char *out_fn; - const char *stats_fn; + struct vpx_codec_enc_cfg cfg; + const char *out_fn; + const char *stats_fn; #if CONFIG_FP_MB_STATS - const char *fpmb_stats_fn; + const char *fpmb_stats_fn; #endif - stereo_format_t stereo_fmt; - int arg_ctrls[ARG_CTRL_CNT_MAX][2]; - int arg_ctrl_cnt; - int write_webm; - int have_kf_max_dist; + stereo_format_t stereo_fmt; + int arg_ctrls[ARG_CTRL_CNT_MAX][2]; + int arg_ctrl_cnt; + int write_webm; #if CONFIG_VP9_HIGHBITDEPTH // whether to use 16bit internal buffers - int use_16bit_internal; + int use_16bit_internal; #endif }; - struct stream_state { - int index; - struct stream_state *next; - struct stream_config config; - FILE *file; - struct rate_hist *rate_hist; - struct EbmlGlobal ebml; - uint64_t psnr_sse_total; - uint64_t psnr_samples_total; - double psnr_totals[4]; - int psnr_count; - int counts[64]; - vpx_codec_ctx_t encoder; - unsigned int frames_out; - uint64_t cx_time; - size_t nbytes; - stats_io_t stats; + int index; + struct stream_state *next; + struct stream_config config; + FILE *file; + struct rate_hist *rate_hist; + struct WebmOutputContext webm_ctx; + uint64_t psnr_sse_total; + uint64_t psnr_samples_total; + double psnr_totals[4]; + int psnr_count; + int counts[64]; + vpx_codec_ctx_t encoder; + unsigned int frames_out; + uint64_t cx_time; + size_t nbytes; + stats_io_t stats; #if CONFIG_FP_MB_STATS - stats_io_t fpmb_stats; + stats_io_t fpmb_stats; #endif - struct vpx_image *img; - vpx_codec_ctx_t decoder; - int mismatch_seen; + struct vpx_image *img; + vpx_codec_ctx_t decoder; + int mismatch_seen; }; - -static void validate_positive_rational(const char *msg, +static void validate_positive_rational(const char *msg, struct vpx_rational *rat) { if (rat->den < 0) { rat->num *= -1; rat->den *= -1; } - if (rat->num < 0) - die("Error: %s must be positive\n", msg); + if (rat->num < 0) die("Error: %s must be positive\n", msg); - if (!rat->den) - die("Error: %s has zero denominator\n", msg); + if (!rat->den) die("Error: %s has zero denominator\n", msg); } - static void parse_global_config(struct VpxEncoderConfig *global, char **argv) { - char **argi, **argj; - struct arg arg; + char **argi, **argj; + struct arg arg; const int num_encoder = get_vpx_encoder_count(); - if (num_encoder < 1) - die("Error: no valid encoder available\n"); + if (num_encoder < 1) die("Error: no valid encoder available\n"); /* Initialize default parameters */ memset(global, 0, sizeof(*global)); @@ -885,8 +890,7 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) { global->pass = arg_parse_uint(&arg); if (global->pass < 1 || global->pass > 2) - die("Error: Invalid pass selected (%d)\n", - global->pass); + die("Error: Invalid pass selected (%d)\n", global->pass); } else if (arg_match(&arg, &usage, argi)) global->usage = arg_parse_uint(&arg); else if (arg_match(&arg, &deadline, argi)) @@ -942,39 +946,38 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) { if (global->pass) { /* DWIM: Assume the user meant passes=2 if pass=2 is specified */ if (global->pass > global->passes) { - warn("Assuming --pass=%d implies --passes=%d\n", - global->pass, global->pass); + warn("Assuming --pass=%d implies --passes=%d\n", global->pass, + global->pass); global->passes = global->pass; } } /* Validate global config */ if (global->passes == 0) { -#if CONFIG_VP9_ENCODER || CONFIG_VP10_ENCODER +#if CONFIG_VP9_ENCODER // Make default VP9 passes = 2 until there is a better quality 1-pass // encoder if (global->codec != NULL && global->codec->name != NULL) global->passes = (strcmp(global->codec->name, "vp9") == 0 && - global->deadline != VPX_DL_REALTIME) ? 2 : 1; + global->deadline != VPX_DL_REALTIME) + ? 2 + : 1; #else global->passes = 1; #endif } - if (global->deadline == VPX_DL_REALTIME && - global->passes > 1) { + if (global->deadline == VPX_DL_REALTIME && global->passes > 1) { warn("Enforcing one-pass encoding in realtime mode\n"); global->passes = 1; } } - static void open_input_file(struct VpxInputContext *input) { /* Parse certain options from the input file, if possible */ - input->file = strcmp(input->filename, "-") - ? fopen(input->filename, "rb") : set_binary_mode(stdin); + input->file = strcmp(input->filename, "-") ? fopen(input->filename, "rb") + : set_binary_mode(stdin); - if (!input->file) - fatal("Failed to open input file"); + if (!input->file) fatal("Failed to open input file"); if (!fseeko(input->file, 0, SEEK_END)) { /* Input file is seekable. Figure out how long it is, so we can get @@ -994,8 +997,7 @@ static void open_input_file(struct VpxInputContext *input) { input->detect.buf_read = fread(input->detect.buf, 1, 4, input->file); input->detect.position = 0; - if (input->detect.buf_read == 4 - && file_is_y4m(input->detect.buf)) { + if (input->detect.buf_read == 4 && file_is_y4m(input->detect.buf)) { if (y4m_input_open(&input->y4m, input->file, input->detect.buf, 4, input->only_i420) >= 0) { input->file_type = FILE_TYPE_Y4M; @@ -1016,11 +1018,9 @@ static void open_input_file(struct VpxInputContext *input) { } } - static void close_input_file(struct VpxInputContext *input) { fclose(input->file); - if (input->file_type == FILE_TYPE_Y4M) - y4m_input_close(&input->y4m); + if (input->file_type == FILE_TYPE_Y4M) y4m_input_close(&input->y4m); } static struct stream_state *new_stream(struct VpxEncoderConfig *global, @@ -1037,14 +1037,12 @@ static struct stream_state *new_stream(struct VpxEncoderConfig *global, stream->index++; prev->next = stream; } else { - vpx_codec_err_t res; + vpx_codec_err_t res; /* Populate encoder configuration */ res = vpx_codec_enc_config_default(global->codec->codec_interface(), - &stream->config.cfg, - global->usage); - if (res) - fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res)); + &stream->config.cfg, global->usage); + if (res) fatal("Failed to get config: %s\n", vpx_codec_err_to_string(res)); /* Change the default timebase to a high enough value so that the * encoder will always create strictly increasing timestamps. @@ -1061,16 +1059,17 @@ static struct stream_state *new_stream(struct VpxEncoderConfig *global, stream->config.write_webm = 1; #if CONFIG_WEBM_IO stream->config.stereo_fmt = STEREO_FORMAT_MONO; - stream->ebml.last_pts_ns = -1; - stream->ebml.writer = NULL; - stream->ebml.segment = NULL; + stream->webm_ctx.last_pts_ns = -1; + stream->webm_ctx.writer = NULL; + stream->webm_ctx.segment = NULL; #endif /* Allows removal of the application version from the EBML tags */ - stream->ebml.debug = global->debug; + stream->webm_ctx.debug = global->debug; - /* Default lag_in_frames is 0 in realtime mode */ - if (global->deadline == VPX_DL_REALTIME) + /* Default lag_in_frames is 0 in realtime mode CBR mode*/ + if (global->deadline == VPX_DL_REALTIME && + stream->config.cfg.rc_end_usage == 1) stream->config.cfg.g_lag_in_frames = 0; } @@ -1081,18 +1080,16 @@ static struct stream_state *new_stream(struct VpxEncoderConfig *global, return stream; } - static int parse_stream_params(struct VpxEncoderConfig *global, - struct stream_state *stream, - char **argv) { - char **argi, **argj; - struct arg arg; + struct stream_state *stream, char **argv) { + char **argi, **argj; + struct arg arg; static const arg_def_t **ctrl_args = no_args; - static const int *ctrl_args_map = NULL; - struct stream_config *config = &stream->config; - int eos_mark_found = 0; + static const int *ctrl_args_map = NULL; + struct stream_config *config = &stream->config; + int eos_mark_found = 0; #if CONFIG_VP9_HIGHBITDEPTH - int test_16bit_internal = 0; + int test_16bit_internal = 0; #endif // Handle codec specific options @@ -1106,13 +1103,6 @@ static int parse_stream_params(struct VpxEncoderConfig *global, } else if (strcmp(global->codec->name, "vp9") == 0) { ctrl_args = vp9_args; ctrl_args_map = vp9_arg_ctrl_map; -#endif -#if CONFIG_VP10_ENCODER - } else if (strcmp(global->codec->name, "vp10") == 0) { - // TODO(jingning): Reuse VP9 specific encoder configuration parameters. - // Consider to expand this set for VP10 encoder control. - ctrl_args = vp10_args; - ctrl_args_map = vp10_arg_ctrl_map; #endif } @@ -1169,11 +1159,14 @@ static int parse_stream_params(struct VpxEncoderConfig *global, validate_positive_rational(arg.name, &config->cfg.g_timebase); } else if (arg_match(&arg, &error_resilient, argi)) { config->cfg.g_error_resilient = arg_parse_uint(&arg); + } else if (arg_match(&arg, &end_usage, argi)) { + config->cfg.rc_end_usage = arg_parse_enum_or_int(&arg); } else if (arg_match(&arg, &lag_in_frames, argi)) { config->cfg.g_lag_in_frames = arg_parse_uint(&arg); if (global->deadline == VPX_DL_REALTIME && + config->cfg.rc_end_usage == VPX_CBR && config->cfg.g_lag_in_frames != 0) { - warn("non-zero %s option ignored in realtime mode.\n", arg.name); + warn("non-zero %s option ignored in realtime CBR mode.\n", arg.name); config->cfg.g_lag_in_frames = 0; } } else if (arg_match(&arg, &dropframe_thresh, argi)) { @@ -1207,7 +1200,7 @@ static int parse_stream_params(struct VpxEncoderConfig *global, } else if (arg_match(&arg, &buf_optimal_sz, argi)) { config->cfg.rc_buf_optimal_sz = arg_parse_uint(&arg); } else if (arg_match(&arg, &bias_pct, argi)) { - config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg); + config->cfg.rc_2pass_vbr_bias_pct = arg_parse_uint(&arg); if (global->passes < 2) warn("option %s ignored in one-pass mode.\n", arg.name); } else if (arg_match(&arg, &minsection_pct, argi)) { @@ -1224,13 +1217,11 @@ static int parse_stream_params(struct VpxEncoderConfig *global, config->cfg.kf_min_dist = arg_parse_uint(&arg); } else if (arg_match(&arg, &kf_max_dist, argi)) { config->cfg.kf_max_dist = arg_parse_uint(&arg); - config->have_kf_max_dist = 1; } else if (arg_match(&arg, &kf_disabled, argi)) { config->cfg.kf_mode = VPX_KF_DISABLED; #if CONFIG_VP9_HIGHBITDEPTH } else if (arg_match(&arg, &test16bitinternalarg, argi)) { - if (strcmp(global->codec->name, "vp9") == 0 || - strcmp(global->codec->name, "vp10") == 0) { + if (strcmp(global->codec->name, "vp9") == 0) { test_16bit_internal = 1; } #endif @@ -1254,43 +1245,40 @@ static int parse_stream_params(struct VpxEncoderConfig *global, if (ctrl_args_map != NULL && j < (int)ARG_CTRL_CNT_MAX) { config->arg_ctrls[j][0] = ctrl_args_map[i]; config->arg_ctrls[j][1] = arg_parse_enum_or_int(&arg); - if (j == config->arg_ctrl_cnt) - config->arg_ctrl_cnt++; + if (j == config->arg_ctrl_cnt) config->arg_ctrl_cnt++; } } } - if (!match) - argj++; + if (!match) argj++; } } #if CONFIG_VP9_HIGHBITDEPTH - if (strcmp(global->codec->name, "vp9") == 0 || - strcmp(global->codec->name, "vp10") == 0) { - config->use_16bit_internal = test_16bit_internal | - (config->cfg.g_profile > 1); + if (strcmp(global->codec->name, "vp9") == 0) { + config->use_16bit_internal = + test_16bit_internal | (config->cfg.g_profile > 1); } #endif return eos_mark_found; } - -#define FOREACH_STREAM(func) \ - do { \ - struct stream_state *stream; \ +#define FOREACH_STREAM(func) \ + do { \ + struct stream_state *stream; \ for (stream = streams; stream; stream = stream->next) { \ - func; \ - } \ + func; \ + } \ } while (0) - static void validate_stream_config(const struct stream_state *stream, const struct VpxEncoderConfig *global) { const struct stream_state *streami; (void)global; if (!stream->config.cfg.g_w || !stream->config.cfg.g_h) - fatal("Stream %d: Specify stream dimensions with --width (-w) " - " and --height (-h)", stream->index); + fatal( + "Stream %d: Specify stream dimensions with --width (-w) " + " and --height (-h)", + stream->index); // Check that the codec bit depth is greater than the input bit depth. if (stream->config.cfg.g_input_bit_depth > @@ -1337,9 +1325,7 @@ static void validate_stream_config(const struct stream_state *stream, } } - -static void set_stream_dimensions(struct stream_state *stream, - unsigned int w, +static void set_stream_dimensions(struct stream_state *stream, unsigned int w, unsigned int h) { if (!stream->config.cfg.g_w) { if (!stream->config.cfg.g_h) @@ -1352,20 +1338,7 @@ static void set_stream_dimensions(struct stream_state *stream, } } - -static void set_default_kf_interval(struct stream_state *stream, - struct VpxEncoderConfig *global) { - /* Use a max keyframe interval of 5 seconds, if none was - * specified on the command line. - */ - if (!stream->config.have_kf_max_dist) { - double framerate = (double)global->framerate.num / global->framerate.den; - if (framerate > 0.0) - stream->config.cfg.kf_max_dist = (unsigned int)(5.0 * framerate); - } -} - -static const char* file_type_to_string(enum VideoFileType t) { +static const char *file_type_to_string(enum VideoFileType t) { switch (t) { case FILE_TYPE_RAW: return "RAW"; case FILE_TYPE_Y4M: return "Y4M"; @@ -1373,7 +1346,7 @@ static const char* file_type_to_string(enum VideoFileType t) { } } -static const char* image_format_to_string(vpx_img_fmt_t f) { +static const char *image_format_to_string(vpx_img_fmt_t f) { switch (f) { case VPX_IMG_FMT_I420: return "I420"; case VPX_IMG_FMT_I422: return "I422"; @@ -1391,7 +1364,6 @@ static const char* image_format_to_string(vpx_img_fmt_t f) { static void show_stream_config(struct stream_state *stream, struct VpxEncoderConfig *global, struct VpxInputContext *input) { - #define SHOW(field) \ fprintf(stderr, " %-28s = %d\n", #field, stream->config.cfg.field) @@ -1399,8 +1371,7 @@ static void show_stream_config(struct stream_state *stream, fprintf(stderr, "Codec: %s\n", vpx_codec_iface_name(global->codec->codec_interface())); fprintf(stderr, "Source file: %s File Type: %s Format: %s\n", - input->filename, - file_type_to_string(input->file_type), + input->filename, file_type_to_string(input->file_type), image_format_to_string(input->fmt)); } if (stream->next || stream->index) @@ -1443,32 +1414,26 @@ static void show_stream_config(struct stream_state *stream, SHOW(kf_max_dist); } - static void open_output_file(struct stream_state *stream, struct VpxEncoderConfig *global, const struct VpxRational *pixel_aspect_ratio) { const char *fn = stream->config.out_fn; const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg; - if (cfg->g_pass == VPX_RC_FIRST_PASS) - return; + if (cfg->g_pass == VPX_RC_FIRST_PASS) return; stream->file = strcmp(fn, "-") ? fopen(fn, "wb") : set_binary_mode(stdout); - if (!stream->file) - fatal("Failed to open output file"); + if (!stream->file) fatal("Failed to open output file"); if (stream->config.write_webm && fseek(stream->file, 0, SEEK_CUR)) fatal("WebM output to pipes not supported."); #if CONFIG_WEBM_IO if (stream->config.write_webm) { - stream->ebml.stream = stream->file; - write_webm_file_header(&stream->ebml, cfg, - &global->framerate, - stream->config.stereo_fmt, - global->codec->fourcc, - pixel_aspect_ratio); + stream->webm_ctx.stream = stream->file; + write_webm_file_header(&stream->webm_ctx, cfg, stream->config.stereo_fmt, + global->codec->fourcc, pixel_aspect_ratio); } #else (void)pixel_aspect_ratio; @@ -1479,37 +1444,31 @@ static void open_output_file(struct stream_state *stream, } } - static void close_output_file(struct stream_state *stream, unsigned int fourcc) { const struct vpx_codec_enc_cfg *const cfg = &stream->config.cfg; - if (cfg->g_pass == VPX_RC_FIRST_PASS) - return; + if (cfg->g_pass == VPX_RC_FIRST_PASS) return; #if CONFIG_WEBM_IO if (stream->config.write_webm) { - write_webm_file_footer(&stream->ebml); + write_webm_file_footer(&stream->webm_ctx); } #endif if (!stream->config.write_webm) { if (!fseek(stream->file, 0, SEEK_SET)) - ivf_write_file_header(stream->file, &stream->config.cfg, - fourcc, + ivf_write_file_header(stream->file, &stream->config.cfg, fourcc, stream->frames_out); } fclose(stream->file); } - static void setup_pass(struct stream_state *stream, - struct VpxEncoderConfig *global, - int pass) { + struct VpxEncoderConfig *global, int pass) { if (stream->config.stats_fn) { - if (!stats_open_file(&stream->stats, stream->config.stats_fn, - pass)) + if (!stats_open_file(&stream->stats, stream->config.stats_fn, pass)) fatal("Failed to open statistics store"); } else { if (!stats_open_mem(&stream->stats, pass)) @@ -1518,8 +1477,8 @@ static void setup_pass(struct stream_state *stream, #if CONFIG_FP_MB_STATS if (stream->config.fpmb_stats_fn) { - if (!stats_open_file(&stream->fpmb_stats, - stream->config.fpmb_stats_fn, pass)) + if (!stats_open_file(&stream->fpmb_stats, stream->config.fpmb_stats_fn, + pass)) fatal("Failed to open mb statistics store"); } else { if (!stats_open_mem(&stream->fpmb_stats, pass)) @@ -1528,8 +1487,8 @@ static void setup_pass(struct stream_state *stream, #endif stream->config.cfg.g_pass = global->passes == 2 - ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS - : VPX_RC_ONE_PASS; + ? pass ? VPX_RC_LAST_PASS : VPX_RC_FIRST_PASS + : VPX_RC_ONE_PASS; if (pass) { stream->config.cfg.rc_twopass_stats_in = stats_get(&stream->stats); #if CONFIG_FP_MB_STATS @@ -1543,7 +1502,6 @@ static void setup_pass(struct stream_state *stream, stream->frames_out = 0; } - static void initialize_encoder(struct stream_state *stream, struct VpxEncoderConfig *global) { int i; @@ -1568,8 +1526,7 @@ static void initialize_encoder(struct stream_state *stream, int ctrl = stream->config.arg_ctrls[i][0]; int value = stream->config.arg_ctrls[i][1]; if (vpx_codec_control_(&stream->encoder, ctrl, value)) - fprintf(stderr, "Error: Tried to set control %d = %d\n", - ctrl, value); + fprintf(stderr, "Error: Tried to set control %d = %d\n", ctrl, value); ctx_exit_on_error(&stream->encoder, "Failed to control codec"); } @@ -1582,23 +1539,21 @@ static void initialize_encoder(struct stream_state *stream, #endif } - static void encode_frame(struct stream_state *stream, - struct VpxEncoderConfig *global, - struct vpx_image *img, + struct VpxEncoderConfig *global, struct vpx_image *img, unsigned int frames_in) { vpx_codec_pts_t frame_start, next_frame_start; struct vpx_codec_enc_cfg *cfg = &stream->config.cfg; struct vpx_usec_timer timer; - frame_start = (cfg->g_timebase.den * (int64_t)(frames_in - 1) - * global->framerate.den) - / cfg->g_timebase.num / global->framerate.num; - next_frame_start = (cfg->g_timebase.den * (int64_t)(frames_in) - * global->framerate.den) - / cfg->g_timebase.num / global->framerate.num; + frame_start = + (cfg->g_timebase.den * (int64_t)(frames_in - 1) * global->framerate.den) / + cfg->g_timebase.num / global->framerate.num; + next_frame_start = + (cfg->g_timebase.den * (int64_t)(frames_in)*global->framerate.den) / + cfg->g_timebase.num / global->framerate.num; - /* Scale if necessary */ +/* Scale if necessary */ #if CONFIG_VP9_HIGHBITDEPTH if (img) { if ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) && @@ -1609,32 +1564,28 @@ static void encode_frame(struct stream_state *stream, } #if CONFIG_LIBYUV if (!stream->img) { - stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I42016, - cfg->g_w, cfg->g_h, 16); + stream->img = + vpx_img_alloc(NULL, VPX_IMG_FMT_I42016, cfg->g_w, cfg->g_h, 16); } - I420Scale_16((uint16*)img->planes[VPX_PLANE_Y], - img->stride[VPX_PLANE_Y]/2, - (uint16*)img->planes[VPX_PLANE_U], - img->stride[VPX_PLANE_U]/2, - (uint16*)img->planes[VPX_PLANE_V], - img->stride[VPX_PLANE_V]/2, - img->d_w, img->d_h, - (uint16*)stream->img->planes[VPX_PLANE_Y], - stream->img->stride[VPX_PLANE_Y]/2, - (uint16*)stream->img->planes[VPX_PLANE_U], - stream->img->stride[VPX_PLANE_U]/2, - (uint16*)stream->img->planes[VPX_PLANE_V], - stream->img->stride[VPX_PLANE_V]/2, - stream->img->d_w, stream->img->d_h, - kFilterBox); + I420Scale_16( + (uint16 *)img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y] / 2, + (uint16 *)img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U] / 2, + (uint16 *)img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V] / 2, + img->d_w, img->d_h, (uint16 *)stream->img->planes[VPX_PLANE_Y], + stream->img->stride[VPX_PLANE_Y] / 2, + (uint16 *)stream->img->planes[VPX_PLANE_U], + stream->img->stride[VPX_PLANE_U] / 2, + (uint16 *)stream->img->planes[VPX_PLANE_V], + stream->img->stride[VPX_PLANE_V] / 2, stream->img->d_w, + stream->img->d_h, kFilterBox); img = stream->img; #else - stream->encoder.err = 1; - ctx_exit_on_error(&stream->encoder, - "Stream %d: Failed to encode frame.\n" - "Scaling disabled in this configuration. \n" - "To enable, configure with --enable-libyuv\n", - stream->index); + stream->encoder.err = 1; + ctx_exit_on_error(&stream->encoder, + "Stream %d: Failed to encode frame.\n" + "Scaling disabled in this configuration. \n" + "To enable, configure with --enable-libyuv\n", + stream->index); #endif } } @@ -1646,20 +1597,16 @@ static void encode_frame(struct stream_state *stream, } #if CONFIG_LIBYUV if (!stream->img) - stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, - cfg->g_w, cfg->g_h, 16); - I420Scale(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], - img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], - img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], - img->d_w, img->d_h, - stream->img->planes[VPX_PLANE_Y], - stream->img->stride[VPX_PLANE_Y], - stream->img->planes[VPX_PLANE_U], - stream->img->stride[VPX_PLANE_U], - stream->img->planes[VPX_PLANE_V], - stream->img->stride[VPX_PLANE_V], - stream->img->d_w, stream->img->d_h, - kFilterBox); + stream->img = + vpx_img_alloc(NULL, VPX_IMG_FMT_I420, cfg->g_w, cfg->g_h, 16); + I420Scale( + img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y], + img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U], + img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V], img->d_w, img->d_h, + stream->img->planes[VPX_PLANE_Y], stream->img->stride[VPX_PLANE_Y], + stream->img->planes[VPX_PLANE_U], stream->img->stride[VPX_PLANE_U], + stream->img->planes[VPX_PLANE_V], stream->img->stride[VPX_PLANE_V], + stream->img->d_w, stream->img->d_h, kFilterBox); img = stream->img; #else stream->encoder.err = 1; @@ -1673,15 +1620,14 @@ static void encode_frame(struct stream_state *stream, vpx_usec_timer_start(&timer); vpx_codec_encode(&stream->encoder, img, frame_start, - (unsigned long)(next_frame_start - frame_start), - 0, global->deadline); + (unsigned long)(next_frame_start - frame_start), 0, + global->deadline); vpx_usec_timer_mark(&timer); stream->cx_time += vpx_usec_timer_elapsed(&timer); ctx_exit_on_error(&stream->encoder, "Stream %d: Failed to encode frame", stream->index); } - static void update_quantizer_histogram(struct stream_state *stream) { if (stream->config.cfg.g_pass != VPX_RC_FIRST_PASS) { int q; @@ -1692,10 +1638,8 @@ static void update_quantizer_histogram(struct stream_state *stream) { } } - static void get_cx_data(struct stream_state *stream, - struct VpxEncoderConfig *global, - int *got_data) { + struct VpxEncoderConfig *global, int *got_data) { const vpx_codec_cx_pkt_t *pkt; const struct vpx_codec_enc_cfg *cfg = &stream->config.cfg; vpx_codec_iter_t iter = NULL; @@ -1716,7 +1660,7 @@ static void get_cx_data(struct stream_state *stream, update_rate_histogram(stream->rate_hist, cfg, pkt); #if CONFIG_WEBM_IO if (stream->config.write_webm) { - write_webm_block(&stream->ebml, cfg, pkt); + write_webm_block(&stream->webm_ctx, cfg, pkt); } #endif if (!stream->config.write_webm) { @@ -1736,8 +1680,8 @@ static void get_cx_data(struct stream_state *stream, } } - (void) fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, - stream->file); + (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, + stream->file); } stream->nbytes += pkt->data.raw.sz; @@ -1758,15 +1702,13 @@ static void get_cx_data(struct stream_state *stream, break; case VPX_CODEC_STATS_PKT: stream->frames_out++; - stats_write(&stream->stats, - pkt->data.twopass_stats.buf, + stats_write(&stream->stats, pkt->data.twopass_stats.buf, pkt->data.twopass_stats.sz); stream->nbytes += pkt->data.raw.sz; break; #if CONFIG_FP_MB_STATS case VPX_CODEC_FPMB_STATS_PKT: - stats_write(&stream->fpmb_stats, - pkt->data.firstpass_mb_stats.buf, + stats_write(&stream->fpmb_stats, pkt->data.firstpass_mb_stats.buf, pkt->data.firstpass_mb_stats.sz); stream->nbytes += pkt->data.raw.sz; break; @@ -1787,19 +1729,16 @@ static void get_cx_data(struct stream_state *stream, } break; - default: - break; + default: break; } } } - -static void show_psnr(struct stream_state *stream, double peak) { +static void show_psnr(struct stream_state *stream, double peak) { int i; double ovpsnr; - if (!stream->psnr_count) - return; + if (!stream->psnr_count) return; fprintf(stderr, "Stream %d PSNR (Overall/Avg/Y/U/V)", stream->index); ovpsnr = sse_to_psnr((double)stream->psnr_samples_total, peak, @@ -1812,18 +1751,16 @@ static void show_psnr(struct stream_state *stream, double peak) { fprintf(stderr, "\n"); } - static float usec_to_fps(uint64_t usec, unsigned int frames) { return (float)(usec > 0 ? frames * 1000000.0 / (float)usec : 0); } -static void test_decode(struct stream_state *stream, +static void test_decode(struct stream_state *stream, enum TestDecodeFatality fatal, const VpxInterface *codec) { vpx_image_t enc_img, dec_img; - if (stream->mismatch_seen) - return; + if (stream->mismatch_seen) return; /* Get the internal reference frame */ if (strcmp(codec->name, "vp8") == 0) { @@ -1886,10 +1823,8 @@ static void test_decode(struct stream_state *stream, " Y[%d, %d] {%d/%d}," " U[%d, %d] {%d/%d}," " V[%d, %d] {%d/%d}", - stream->index, stream->frames_out, - y[0], y[1], y[2], y[3], - u[0], u[1], u[2], u[3], - v[0], v[1], v[2], v[3]); + stream->index, stream->frames_out, y[0], y[1], y[2], + y[3], u[0], u[1], u[2], u[3], v[0], v[1], v[2], v[3]); stream->mismatch_seen = stream->frames_out; } @@ -1897,7 +1832,6 @@ static void test_decode(struct stream_state *stream, vpx_img_free(&dec_img); } - static void print_time(const char *label, int64_t etl) { int64_t hours; int64_t mins; @@ -1910,14 +1844,13 @@ static void print_time(const char *label, int64_t etl) { etl -= mins * 60; secs = etl; - fprintf(stderr, "[%3s %2"PRId64":%02"PRId64":%02"PRId64"] ", - label, hours, mins, secs); + fprintf(stderr, "[%3s %2" PRId64 ":%02" PRId64 ":%02" PRId64 "] ", label, + hours, mins, secs); } else { fprintf(stderr, "[%3s unknown] ", label); } } - int main(int argc, const char **argv_) { int pass; vpx_image_t raw; @@ -1940,8 +1873,7 @@ int main(int argc, const char **argv_) { memset(&input, 0, sizeof(input)); exec_name = argv_[0]; - if (argc < 3) - usage_exit(); + if (argc < 3) usage_exit(); /* Setup default input stream settings */ input.framerate.numerator = 30; @@ -1957,21 +1889,11 @@ int main(int argc, const char **argv_) { parse_global_config(&global, argv); switch (global.color_type) { - case I420: - input.fmt = VPX_IMG_FMT_I420; - break; - case I422: - input.fmt = VPX_IMG_FMT_I422; - break; - case I444: - input.fmt = VPX_IMG_FMT_I444; - break; - case I440: - input.fmt = VPX_IMG_FMT_I440; - break; - case YV12: - input.fmt = VPX_IMG_FMT_YV12; - break; + case I420: input.fmt = VPX_IMG_FMT_I420; break; + case I422: input.fmt = VPX_IMG_FMT_I422; break; + case I444: input.fmt = VPX_IMG_FMT_I444; break; + case I440: input.fmt = VPX_IMG_FMT_I440; break; + case YV12: input.fmt = VPX_IMG_FMT_YV12; break; } { @@ -1984,8 +1906,7 @@ int main(int argc, const char **argv_) { do { stream = new_stream(&global, stream); stream_cnt++; - if (!streams) - streams = stream; + if (!streams) streams = stream; } while (parse_stream_params(&global, stream, argv)); } @@ -1994,18 +1915,16 @@ int main(int argc, const char **argv_) { if (argi[0][0] == '-' && argi[0][1]) die("Error: Unrecognized option %s\n", *argi); - FOREACH_STREAM(check_encoder_config(global.disable_warning_prompt, - &global, &stream->config.cfg);); + FOREACH_STREAM(check_encoder_config(global.disable_warning_prompt, &global, + &stream->config.cfg);); /* Handle non-option arguments */ input.filename = argv[0]; - if (!input.filename) - usage_exit(); + if (!input.filename) usage_exit(); /* Decide if other chroma subsamplings than 4:2:0 are supported */ - if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == VP10_FOURCC) - input.only_i420 = 0; + if (global.codec->fourcc == VP9_FOURCC) input.only_i420 = 0; for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) { int frames_in = 0, seen_frames = 0; @@ -2030,8 +1949,9 @@ int main(int argc, const char **argv_) { /* Update stream configurations from the input file's parameters */ if (!input.width || !input.height) - fatal("Specify stream dimensions with --width (-w) " - " and --height (-h)"); + fatal( + "Specify stream dimensions with --width (-w) " + " and --height (-h)"); /* If input file does not specify bit-depth but input-bit-depth parameter * exists, assume that to be the input bit-depth. However, if the @@ -2048,9 +1968,8 @@ int main(int argc, const char **argv_) { }); if (input.bit_depth > 8) input.fmt |= VPX_IMG_FMT_HIGHBITDEPTH; } else { - FOREACH_STREAM({ - stream->config.cfg.g_input_bit_depth = input.bit_depth; - }); + FOREACH_STREAM( + { stream->config.cfg.g_input_bit_depth = input.bit_depth; }); } FOREACH_STREAM(set_stream_dimensions(stream, input.width, input.height)); @@ -2060,18 +1979,20 @@ int main(int argc, const char **argv_) { * --passes=2, ensure --fpf was set. */ if (global.pass && global.passes == 2) - FOREACH_STREAM( { - if (!stream->config.stats_fn) - die("Stream %d: Must specify --fpf when --pass=%d" - " and --passes=2\n", stream->index, global.pass); - }); + FOREACH_STREAM({ + if (!stream->config.stats_fn) + die("Stream %d: Must specify --fpf when --pass=%d" + " and --passes=2\n", + stream->index, global.pass); + }); #if !CONFIG_WEBM_IO FOREACH_STREAM({ if (stream->config.write_webm) { stream->config.write_webm = 0; - warn("vpxenc was compiled without WebM container support." - "Producing IVF output"); + warn( + "vpxenc was compiled without WebM container support." + "Producing IVF output"); } }); #endif @@ -2086,8 +2007,6 @@ int main(int argc, const char **argv_) { stream->config.cfg.g_timebase.num = global.framerate.den); } - FOREACH_STREAM(set_default_kf_interval(stream, &global)); - /* Show configuration */ if (global.verbose && pass == 0) FOREACH_STREAM(show_stream_config(stream, &global, &input)); @@ -2101,19 +2020,17 @@ int main(int argc, const char **argv_) { else vpx_img_alloc(&raw, input.fmt, input.width, input.height, 32); - FOREACH_STREAM(stream->rate_hist = - init_rate_histogram(&stream->config.cfg, - &global.framerate)); + FOREACH_STREAM(stream->rate_hist = init_rate_histogram( + &stream->config.cfg, &global.framerate)); } FOREACH_STREAM(setup_pass(stream, &global, pass)); - FOREACH_STREAM(open_output_file(stream, &global, - &input.pixel_aspect_ratio)); + FOREACH_STREAM( + open_output_file(stream, &global, &input.pixel_aspect_ratio)); FOREACH_STREAM(initialize_encoder(stream, &global)); #if CONFIG_VP9_HIGHBITDEPTH - if (strcmp(global.codec->name, "vp9") == 0 || - strcmp(global.codec->name, "vp10") == 0) { + if (strcmp(global.codec->name, "vp9") == 0) { // Check to see if at least one stream uses 16 bit internal. // Currently assume that the bit_depths for all streams using // highbitdepth are the same. @@ -2125,7 +2042,7 @@ int main(int argc, const char **argv_) { input_shift = 0; } else { input_shift = (int)stream->config.cfg.g_bit_depth - - stream->config.cfg.g_input_bit_depth; + stream->config.cfg.g_input_bit_depth; } }); } @@ -2140,26 +2057,23 @@ int main(int argc, const char **argv_) { if (!global.limit || frames_in < global.limit) { frame_avail = read_frame(&input, &raw); - if (frame_avail) - frames_in++; - seen_frames = frames_in > global.skip_frames ? - frames_in - global.skip_frames : 0; + if (frame_avail) frames_in++; + seen_frames = + frames_in > global.skip_frames ? frames_in - global.skip_frames : 0; if (!global.quiet) { float fps = usec_to_fps(cx_time, seen_frames); fprintf(stderr, "\rPass %d/%d ", pass + 1, global.passes); if (stream_cnt == 1) - fprintf(stderr, - "frame %4d/%-4d %7"PRId64"B ", - frames_in, streams->frames_out, (int64_t)streams->nbytes); + fprintf(stderr, "frame %4d/%-4d %7" PRId64 "B ", frames_in, + streams->frames_out, (int64_t)streams->nbytes); else fprintf(stderr, "frame %4d ", frames_in); - fprintf(stderr, "%7"PRId64" %s %.2f %s ", + fprintf(stderr, "%7" PRId64 " %s %.2f %s ", cx_time > 9999999 ? cx_time / 1000 : cx_time, - cx_time > 9999999 ? "ms" : "us", - fps >= 1.0 ? fps : fps * 60, + cx_time > 9999999 ? "ms" : "us", fps >= 1.0 ? fps : fps * 60, fps >= 1.0 ? "fps" : "fpm"); print_time("ETA", estimated_time_left); } @@ -2190,8 +2104,7 @@ int main(int argc, const char **argv_) { FOREACH_STREAM({ if (stream->config.use_16bit_internal) encode_frame(stream, &global, - frame_avail ? frame_to_encode : NULL, - frames_in); + frame_avail ? frame_to_encode : NULL, frames_in); else assert(0); }); @@ -2203,8 +2116,7 @@ int main(int argc, const char **argv_) { } #else vpx_usec_timer_start(&timer); - FOREACH_STREAM(encode_frame(stream, &global, - frame_avail ? &raw : NULL, + FOREACH_STREAM(encode_frame(stream, &global, frame_avail ? &raw : NULL, frames_in)); #endif vpx_usec_timer_mark(&timer); @@ -2226,8 +2138,8 @@ int main(int argc, const char **argv_) { const int64_t frame_in_lagged = (seen_frames - lagged_count) * 1000; rate = cx_time ? frame_in_lagged * (int64_t)1000000 / cx_time : 0; - remaining = 1000 * (global.limit - global.skip_frames - - seen_frames + lagged_count); + remaining = 1000 * (global.limit - global.skip_frames - + seen_frames + lagged_count); } else { const int64_t input_pos = ftello(input.file); const int64_t input_pos_lagged = input_pos - lagged_count; @@ -2237,9 +2149,8 @@ int main(int argc, const char **argv_) { remaining = limit - input_pos + lagged_count; } - average_rate = (average_rate <= 0) - ? rate - : (average_rate * 7 + rate) / 8; + average_rate = + (average_rate <= 0) ? rate : (average_rate * 7 + rate) / 8; estimated_time_left = average_rate ? remaining / average_rate : -1; } @@ -2248,23 +2159,23 @@ int main(int argc, const char **argv_) { } fflush(stdout); - if (!global.quiet) - fprintf(stderr, "\033[K"); + if (!global.quiet) fprintf(stderr, "\033[K"); } - if (stream_cnt > 1) - fprintf(stderr, "\n"); + if (stream_cnt > 1) fprintf(stderr, "\n"); if (!global.quiet) { - FOREACH_STREAM(fprintf(stderr, - "\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7"PRId64"b/f %7"PRId64"b/s" - " %7"PRId64" %s (%.2f fps)\033[K\n", - pass + 1, - global.passes, frames_in, stream->frames_out, (int64_t)stream->nbytes, + FOREACH_STREAM(fprintf( + stderr, "\rPass %d/%d frame %4d/%-4d %7" PRId64 "B %7" PRId64 + "b/f %7" PRId64 "b/s" + " %7" PRId64 " %s (%.2f fps)\033[K\n", + pass + 1, global.passes, frames_in, stream->frames_out, + (int64_t)stream->nbytes, seen_frames ? (int64_t)(stream->nbytes * 8 / seen_frames) : 0, - seen_frames ? (int64_t)stream->nbytes * 8 * - (int64_t)global.framerate.num / global.framerate.den / - seen_frames : 0, + seen_frames + ? (int64_t)stream->nbytes * 8 * (int64_t)global.framerate.num / + global.framerate.den / seen_frames + : 0, stream->cx_time > 9999999 ? stream->cx_time / 1000 : stream->cx_time, stream->cx_time > 9999999 ? "ms" : "us", usec_to_fps(stream->cx_time, seen_frames))); @@ -2298,17 +2209,15 @@ int main(int argc, const char **argv_) { FOREACH_STREAM(stats_close(&stream->fpmb_stats, global.passes - 1)); #endif - if (global.pass) - break; + if (global.pass) break; } if (global.show_q_hist_buckets) - FOREACH_STREAM(show_q_histogram(stream->counts, - global.show_q_hist_buckets)); + FOREACH_STREAM( + show_q_histogram(stream->counts, global.show_q_hist_buckets)); if (global.show_rate_hist_buckets) - FOREACH_STREAM(show_rate_histogram(stream->rate_hist, - &stream->config.cfg, + FOREACH_STREAM(show_rate_histogram(stream->rate_hist, &stream->config.cfg, global.show_rate_hist_buckets)); FOREACH_STREAM(destroy_rate_histogram(stream->rate_hist)); @@ -2330,8 +2239,7 @@ int main(int argc, const char **argv_) { #endif #if CONFIG_VP9_HIGHBITDEPTH - if (allocated_raw_shift) - vpx_img_free(&raw_shift); + if (allocated_raw_shift) vpx_img_free(&raw_shift); #endif vpx_img_free(&raw); free(argv); diff --git a/libs/libvpx/vpxstats.c b/libs/libvpx/vpxstats.c index 16728ce096..142e367bb4 100644 --- a/libs/libvpx/vpxstats.c +++ b/libs/libvpx/vpxstats.c @@ -30,8 +30,7 @@ int stats_open_file(stats_io_t *stats, const char *fpf, int pass) { stats->file = fopen(fpf, "rb"); - if (stats->file == NULL) - fatal("First-pass stats file does not exist!"); + if (stats->file == NULL) fatal("First-pass stats file does not exist!"); if (fseek(stats->file, 0, SEEK_END)) fatal("First-pass stats file must be seekable!"); @@ -76,18 +75,17 @@ void stats_close(stats_io_t *stats, int last_pass) { fclose(stats->file); stats->file = NULL; } else { - if (stats->pass == last_pass) - free(stats->buf.buf); + if (stats->pass == last_pass) free(stats->buf.buf); } } void stats_write(stats_io_t *stats, const void *pkt, size_t len) { if (stats->file) { - (void) fwrite(pkt, 1, len, stats->file); + (void)fwrite(pkt, 1, len, stats->file); } else { if (stats->buf.sz + len > stats->buf_alloc_sz) { - size_t new_sz = stats->buf_alloc_sz + 64 * 1024; - char *new_ptr = realloc(stats->buf.buf, new_sz); + size_t new_sz = stats->buf_alloc_sz + 64 * 1024; + char *new_ptr = realloc(stats->buf.buf, new_sz); if (new_ptr) { stats->buf_ptr = new_ptr + (stats->buf_ptr - (char *)stats->buf.buf); @@ -104,6 +102,4 @@ void stats_write(stats_io_t *stats, const void *pkt, size_t len) { } } -vpx_fixed_buf_t stats_get(stats_io_t *stats) { - return stats->buf; -} +vpx_fixed_buf_t stats_get(stats_io_t *stats) { return stats->buf; } diff --git a/libs/libvpx/warnings.c b/libs/libvpx/warnings.c index 7ac678ab4a..a80da527f7 100644 --- a/libs/libvpx/warnings.c +++ b/libs/libvpx/warnings.c @@ -24,7 +24,7 @@ static const char quantizer_warning_string[] = "Bad quantizer values. Quantizer values should not be equal, and should " "differ by at least 8."; static const char lag_in_frames_with_realtime[] = - "Lag in frames is ignored when deadline is set to realtime."; + "Lag in frames is ignored when deadline is set to realtime for cbr mode."; struct WarningListNode { const char *warning_string; @@ -47,8 +47,7 @@ static void add_warning(const char *warning_string, new_node->warning_string = warning_string; new_node->next_warning = NULL; - while (*node != NULL) - node = &(*node)->next_warning; + while (*node != NULL) node = &(*node)->next_warning; *node = new_node; } @@ -78,10 +77,9 @@ static void check_quantizer(int min_q, int max_q, } static void check_lag_in_frames_realtime_deadline( - int lag_in_frames, - int deadline, + int lag_in_frames, int deadline, int rc_end_usage, struct WarningList *warning_list) { - if (deadline == VPX_DL_REALTIME && lag_in_frames != 0) + if (deadline == VPX_DL_REALTIME && lag_in_frames != 0 && rc_end_usage == 1) add_warning(lag_in_frames_with_realtime, warning_list); } @@ -90,26 +88,22 @@ void check_encoder_config(int disable_prompt, const struct vpx_codec_enc_cfg *stream_config) { int num_warnings = 0; struct WarningListNode *warning = NULL; - struct WarningList warning_list = {0}; + struct WarningList warning_list = { 0 }; check_quantizer(stream_config->rc_min_quantizer, - stream_config->rc_max_quantizer, - &warning_list); - check_lag_in_frames_realtime_deadline(stream_config->g_lag_in_frames, - global_config->deadline, - &warning_list); + stream_config->rc_max_quantizer, &warning_list); + check_lag_in_frames_realtime_deadline( + stream_config->g_lag_in_frames, global_config->deadline, + stream_config->rc_end_usage, &warning_list); /* Count and print warnings. */ - for (warning = warning_list.warning_node; - warning != NULL; - warning = warning->next_warning, - ++num_warnings) { + for (warning = warning_list.warning_node; warning != NULL; + warning = warning->next_warning, ++num_warnings) { warn(warning->warning_string); } free_warning_list(&warning_list); if (num_warnings) { - if (!disable_prompt && !continue_prompt(num_warnings)) - exit(EXIT_FAILURE); + if (!disable_prompt && !continue_prompt(num_warnings)) exit(EXIT_FAILURE); } } diff --git a/libs/libvpx/webmdec.cc b/libs/libvpx/webmdec.cc index f541cfecc1..ed4bd700dd 100644 --- a/libs/libvpx/webmdec.cc +++ b/libs/libvpx/webmdec.cc @@ -13,20 +13,20 @@ #include #include -#include "third_party/libwebm/mkvparser.hpp" -#include "third_party/libwebm/mkvreader.hpp" +#include "third_party/libwebm/mkvparser/mkvparser.h" +#include "third_party/libwebm/mkvparser/mkvreader.h" namespace { void reset(struct WebmInputContext *const webm_ctx) { if (webm_ctx->reader != NULL) { mkvparser::MkvReader *const reader = - reinterpret_cast(webm_ctx->reader); + reinterpret_cast(webm_ctx->reader); delete reader; } if (webm_ctx->segment != NULL) { mkvparser::Segment *const segment = - reinterpret_cast(webm_ctx->segment); + reinterpret_cast(webm_ctx->segment); delete segment; } if (webm_ctx->buffer != NULL) { @@ -46,7 +46,7 @@ void reset(struct WebmInputContext *const webm_ctx) { void get_first_cluster(struct WebmInputContext *const webm_ctx) { mkvparser::Segment *const segment = - reinterpret_cast(webm_ctx->segment); + reinterpret_cast(webm_ctx->segment); const mkvparser::Cluster *const cluster = segment->GetFirst(); webm_ctx->cluster = cluster; } @@ -72,7 +72,7 @@ int file_is_webm(struct WebmInputContext *webm_ctx, return 0; } - mkvparser::Segment* segment; + mkvparser::Segment *segment; if (mkvparser::Segment::CreateInstance(reader, pos, segment)) { rewind_and_reset(webm_ctx, vpx_ctx); return 0; @@ -84,12 +84,12 @@ int file_is_webm(struct WebmInputContext *webm_ctx, } const mkvparser::Tracks *const tracks = segment->GetTracks(); - const mkvparser::VideoTrack* video_track = NULL; + const mkvparser::VideoTrack *video_track = NULL; for (unsigned long i = 0; i < tracks->GetTracksCount(); ++i) { - const mkvparser::Track* const track = tracks->GetTrackByIndex(i); + const mkvparser::Track *const track = tracks->GetTrackByIndex(i); if (track->GetType() == mkvparser::Track::kVideo) { - video_track = static_cast(track); - webm_ctx->video_track_index = track->GetNumber(); + video_track = static_cast(track); + webm_ctx->video_track_index = static_cast(track->GetNumber()); break; } } @@ -103,8 +103,6 @@ int file_is_webm(struct WebmInputContext *webm_ctx, vpx_ctx->fourcc = VP8_FOURCC; } else if (!strncmp(video_track->GetCodecId(), "V_VP9", 5)) { vpx_ctx->fourcc = VP9_FOURCC; - } else if (!strncmp(video_track->GetCodecId(), "V_VP10", 6)) { - vpx_ctx->fourcc = VP10_FOURCC; } else { rewind_and_reset(webm_ctx, vpx_ctx); return 0; @@ -120,9 +118,7 @@ int file_is_webm(struct WebmInputContext *webm_ctx, return 1; } -int webm_read_frame(struct WebmInputContext *webm_ctx, - uint8_t **buffer, - size_t *bytes_in_buffer, +int webm_read_frame(struct WebmInputContext *webm_ctx, uint8_t **buffer, size_t *buffer_size) { // This check is needed for frame parallel decoding, in which case this // function could be called even after it has reached end of input stream. @@ -130,13 +126,13 @@ int webm_read_frame(struct WebmInputContext *webm_ctx, return 1; } mkvparser::Segment *const segment = - reinterpret_cast(webm_ctx->segment); - const mkvparser::Cluster* cluster = - reinterpret_cast(webm_ctx->cluster); + reinterpret_cast(webm_ctx->segment); + const mkvparser::Cluster *cluster = + reinterpret_cast(webm_ctx->cluster); const mkvparser::Block *block = - reinterpret_cast(webm_ctx->block); + reinterpret_cast(webm_ctx->block); const mkvparser::BlockEntry *block_entry = - reinterpret_cast(webm_ctx->block_entry); + reinterpret_cast(webm_ctx->block_entry); bool block_entry_eos = false; do { long status = 0; @@ -147,7 +143,7 @@ int webm_read_frame(struct WebmInputContext *webm_ctx, } else if (block_entry_eos || block_entry->EOS()) { cluster = segment->GetNext(cluster); if (cluster == NULL || cluster->EOS()) { - *bytes_in_buffer = 0; + *buffer_size = 0; webm_ctx->reached_eos = 1; return 1; } @@ -164,7 +160,7 @@ int webm_read_frame(struct WebmInputContext *webm_ctx, } get_new_block = true; } - if (status) { + if (status || block_entry == NULL) { return -1; } if (get_new_block) { @@ -178,24 +174,23 @@ int webm_read_frame(struct WebmInputContext *webm_ctx, webm_ctx->block_entry = block_entry; webm_ctx->block = block; - const mkvparser::Block::Frame& frame = + const mkvparser::Block::Frame &frame = block->GetFrame(webm_ctx->block_frame_index); ++webm_ctx->block_frame_index; if (frame.len > static_cast(*buffer_size)) { - delete[] *buffer; + delete[] * buffer; *buffer = new uint8_t[frame.len]; if (*buffer == NULL) { return -1; } - *buffer_size = frame.len; webm_ctx->buffer = *buffer; } - *bytes_in_buffer = frame.len; + *buffer_size = frame.len; webm_ctx->timestamp_ns = block->GetTime(cluster); webm_ctx->is_key_frame = block->IsKey(); mkvparser::MkvReader *const reader = - reinterpret_cast(webm_ctx->reader); + reinterpret_cast(webm_ctx->reader); return frame.Read(reader, *buffer) ? -1 : 0; } @@ -203,10 +198,9 @@ int webm_guess_framerate(struct WebmInputContext *webm_ctx, struct VpxInputContext *vpx_ctx) { uint32_t i = 0; uint8_t *buffer = NULL; - size_t bytes_in_buffer = 0; size_t buffer_size = 0; while (webm_ctx->timestamp_ns < 1000000000 && i < 50) { - if (webm_read_frame(webm_ctx, &buffer, &bytes_in_buffer, &buffer_size)) { + if (webm_read_frame(webm_ctx, &buffer, &buffer_size)) { break; } ++i; @@ -226,6 +220,4 @@ int webm_guess_framerate(struct WebmInputContext *webm_ctx, return 0; } -void webm_free(struct WebmInputContext *webm_ctx) { - reset(webm_ctx); -} +void webm_free(struct WebmInputContext *webm_ctx) { reset(webm_ctx); } diff --git a/libs/libvpx/webmdec.h b/libs/libvpx/webmdec.h index 7d16380355..7dcb170caf 100644 --- a/libs/libvpx/webmdec.h +++ b/libs/libvpx/webmdec.h @@ -42,22 +42,17 @@ int file_is_webm(struct WebmInputContext *webm_ctx, // Reads a WebM Video Frame. Memory for the buffer is created, owned and managed // by this function. For the first call, |buffer| should be NULL and -// |*bytes_in_buffer| should be 0. Once all the frames are read and used, +// |*buffer_size| should be 0. Once all the frames are read and used, // webm_free() should be called, otherwise there will be a leak. // Parameters: // webm_ctx - WebmInputContext object // buffer - pointer where the frame data will be filled. -// bytes_in_buffer - pointer to buffer size. -// buffer_size - unused TODO(vigneshv): remove this +// buffer_size - pointer to buffer size. // Return values: // 0 - Success // 1 - End of Stream // -1 - Error -// TODO(vigneshv): Make the return values consistent across all functions in -// this file. -int webm_read_frame(struct WebmInputContext *webm_ctx, - uint8_t **buffer, - size_t *bytes_in_buffer, +int webm_read_frame(struct WebmInputContext *webm_ctx, uint8_t **buffer, size_t *buffer_size); // Guesses the frame rate of the input file based on the container timestamps. diff --git a/libs/libvpx/webmenc.cc b/libs/libvpx/webmenc.cc index d41e700443..66606674b0 100644 --- a/libs/libvpx/webmenc.cc +++ b/libs/libvpx/webmenc.cc @@ -11,22 +11,20 @@ #include -#include "third_party/libwebm/mkvmuxer.hpp" -#include "third_party/libwebm/mkvmuxerutil.hpp" -#include "third_party/libwebm/mkvwriter.hpp" +#include "third_party/libwebm/mkvmuxer/mkvmuxer.h" +#include "third_party/libwebm/mkvmuxer/mkvmuxerutil.h" +#include "third_party/libwebm/mkvmuxer/mkvwriter.h" namespace { const uint64_t kDebugTrackUid = 0xDEADBEEF; const int kVideoTrackNumber = 1; } // namespace -void write_webm_file_header(struct EbmlGlobal *glob, +void write_webm_file_header(struct WebmOutputContext *webm_ctx, const vpx_codec_enc_cfg_t *cfg, - const struct vpx_rational *fps, - stereo_format_t stereo_fmt, - unsigned int fourcc, + stereo_format_t stereo_fmt, unsigned int fourcc, const struct VpxRational *par) { - mkvmuxer::MkvWriter *const writer = new mkvmuxer::MkvWriter(glob->stream); + mkvmuxer::MkvWriter *const writer = new mkvmuxer::MkvWriter(webm_ctx->stream); mkvmuxer::Segment *const segment = new mkvmuxer::Segment(); segment->Init(writer); segment->set_mode(mkvmuxer::Segment::kFile); @@ -36,77 +34,62 @@ void write_webm_file_header(struct EbmlGlobal *glob, const uint64_t kTimecodeScale = 1000000; info->set_timecode_scale(kTimecodeScale); std::string version = "vpxenc"; - if (!glob->debug) { + if (!webm_ctx->debug) { version.append(std::string(" ") + vpx_codec_version_str()); } info->set_writing_app(version.c_str()); const uint64_t video_track_id = segment->AddVideoTrack(static_cast(cfg->g_w), - static_cast(cfg->g_h), - kVideoTrackNumber); - mkvmuxer::VideoTrack* const video_track = - static_cast( - segment->GetTrackByNumber(video_track_id)); + static_cast(cfg->g_h), kVideoTrackNumber); + mkvmuxer::VideoTrack *const video_track = static_cast( + segment->GetTrackByNumber(video_track_id)); video_track->SetStereoMode(stereo_fmt); const char *codec_id; switch (fourcc) { - case VP8_FOURCC: - codec_id = "V_VP8"; - break; - case VP9_FOURCC: - codec_id = "V_VP9"; - break; - case VP10_FOURCC: - codec_id = "V_VP10"; - break; - default: - codec_id = "V_VP10"; - break; + case VP8_FOURCC: codec_id = "V_VP8"; break; + case VP9_FOURCC: + default: codec_id = "V_VP9"; break; } video_track->set_codec_id(codec_id); if (par->numerator > 1 || par->denominator > 1) { // TODO(fgalligan): Add support of DisplayUnit, Display Aspect Ratio type // to WebM format. - const uint64_t display_width = - static_cast(((cfg->g_w * par->numerator * 1.0) / - par->denominator) + .5); + const uint64_t display_width = static_cast( + ((cfg->g_w * par->numerator * 1.0) / par->denominator) + .5); video_track->set_display_width(display_width); video_track->set_display_height(cfg->g_h); } - if (glob->debug) { + if (webm_ctx->debug) { video_track->set_uid(kDebugTrackUid); } - glob->writer = writer; - glob->segment = segment; + webm_ctx->writer = writer; + webm_ctx->segment = segment; } -void write_webm_block(struct EbmlGlobal *glob, +void write_webm_block(struct WebmOutputContext *webm_ctx, const vpx_codec_enc_cfg_t *cfg, const vpx_codec_cx_pkt_t *pkt) { mkvmuxer::Segment *const segment = - reinterpret_cast(glob->segment); - int64_t pts_ns = pkt->data.frame.pts * 1000000000ll * - cfg->g_timebase.num / cfg->g_timebase.den; - if (pts_ns <= glob->last_pts_ns) - pts_ns = glob->last_pts_ns + 1000000; - glob->last_pts_ns = pts_ns; + reinterpret_cast(webm_ctx->segment); + int64_t pts_ns = pkt->data.frame.pts * 1000000000ll * cfg->g_timebase.num / + cfg->g_timebase.den; + if (pts_ns <= webm_ctx->last_pts_ns) pts_ns = webm_ctx->last_pts_ns + 1000000; + webm_ctx->last_pts_ns = pts_ns; - segment->AddFrame(static_cast(pkt->data.frame.buf), - pkt->data.frame.sz, - kVideoTrackNumber, - pts_ns, + segment->AddFrame(static_cast(pkt->data.frame.buf), + pkt->data.frame.sz, kVideoTrackNumber, pts_ns, pkt->data.frame.flags & VPX_FRAME_IS_KEY); } -void write_webm_file_footer(struct EbmlGlobal *glob) { +void write_webm_file_footer(struct WebmOutputContext *webm_ctx) { mkvmuxer::MkvWriter *const writer = - reinterpret_cast(glob->writer); + reinterpret_cast(webm_ctx->writer); mkvmuxer::Segment *const segment = - reinterpret_cast(glob->segment); + reinterpret_cast(webm_ctx->segment); segment->Finalize(); delete segment; delete writer; - glob->writer = NULL; - glob->segment = NULL; + webm_ctx->writer = NULL; + webm_ctx->segment = NULL; } diff --git a/libs/libvpx/webmenc.h b/libs/libvpx/webmenc.h index c255d3de66..b4a9e357bb 100644 --- a/libs/libvpx/webmenc.h +++ b/libs/libvpx/webmenc.h @@ -20,8 +20,7 @@ extern "C" { #endif -/* TODO(vigneshv): Rename this struct */ -struct EbmlGlobal { +struct WebmOutputContext { int debug; FILE *stream; int64_t last_pts_ns; @@ -38,18 +37,16 @@ typedef enum stereo_format { STEREO_FORMAT_RIGHT_LEFT = 11 } stereo_format_t; -void write_webm_file_header(struct EbmlGlobal *glob, +void write_webm_file_header(struct WebmOutputContext *webm_ctx, const vpx_codec_enc_cfg_t *cfg, - const struct vpx_rational *fps, - stereo_format_t stereo_fmt, - unsigned int fourcc, + stereo_format_t stereo_fmt, unsigned int fourcc, const struct VpxRational *par); -void write_webm_block(struct EbmlGlobal *glob, +void write_webm_block(struct WebmOutputContext *webm_ctx, const vpx_codec_enc_cfg_t *cfg, const vpx_codec_cx_pkt_t *pkt); -void write_webm_file_footer(struct EbmlGlobal *glob); +void write_webm_file_footer(struct WebmOutputContext *webm_ctx); #ifdef __cplusplus } // extern "C" diff --git a/libs/libvpx/y4menc.c b/libs/libvpx/y4menc.c index b647e8dcc5..e26fcaf6ea 100644 --- a/libs/libvpx/y4menc.c +++ b/libs/libvpx/y4menc.c @@ -17,39 +17,43 @@ int y4m_write_file_header(char *buf, size_t len, int width, int height, const char *color; switch (bit_depth) { case 8: - color = fmt == VPX_IMG_FMT_444A ? "C444alpha\n" : - fmt == VPX_IMG_FMT_I444 ? "C444\n" : - fmt == VPX_IMG_FMT_I422 ? "C422\n" : - "C420jpeg\n"; + color = fmt == VPX_IMG_FMT_444A + ? "C444alpha\n" + : fmt == VPX_IMG_FMT_I444 ? "C444\n" : fmt == VPX_IMG_FMT_I422 + ? "C422\n" + : "C420jpeg\n"; break; case 9: - color = fmt == VPX_IMG_FMT_I44416 ? "C444p9 XYSCSS=444P9\n" : - fmt == VPX_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n" : - "C420p9 XYSCSS=420P9\n"; + color = fmt == VPX_IMG_FMT_I44416 + ? "C444p9 XYSCSS=444P9\n" + : fmt == VPX_IMG_FMT_I42216 ? "C422p9 XYSCSS=422P9\n" + : "C420p9 XYSCSS=420P9\n"; break; case 10: - color = fmt == VPX_IMG_FMT_I44416 ? "C444p10 XYSCSS=444P10\n" : - fmt == VPX_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n" : - "C420p10 XYSCSS=420P10\n"; + color = fmt == VPX_IMG_FMT_I44416 + ? "C444p10 XYSCSS=444P10\n" + : fmt == VPX_IMG_FMT_I42216 ? "C422p10 XYSCSS=422P10\n" + : "C420p10 XYSCSS=420P10\n"; break; case 12: - color = fmt == VPX_IMG_FMT_I44416 ? "C444p12 XYSCSS=444P12\n" : - fmt == VPX_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n" : - "C420p12 XYSCSS=420P12\n"; + color = fmt == VPX_IMG_FMT_I44416 + ? "C444p12 XYSCSS=444P12\n" + : fmt == VPX_IMG_FMT_I42216 ? "C422p12 XYSCSS=422P12\n" + : "C420p12 XYSCSS=420P12\n"; break; case 14: - color = fmt == VPX_IMG_FMT_I44416 ? "C444p14 XYSCSS=444P14\n" : - fmt == VPX_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n" : - "C420p14 XYSCSS=420P14\n"; + color = fmt == VPX_IMG_FMT_I44416 + ? "C444p14 XYSCSS=444P14\n" + : fmt == VPX_IMG_FMT_I42216 ? "C422p14 XYSCSS=422P14\n" + : "C420p14 XYSCSS=420P14\n"; break; case 16: - color = fmt == VPX_IMG_FMT_I44416 ? "C444p16 XYSCSS=444P16\n" : - fmt == VPX_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n" : - "C420p16 XYSCSS=420P16\n"; + color = fmt == VPX_IMG_FMT_I44416 + ? "C444p16 XYSCSS=444P16\n" + : fmt == VPX_IMG_FMT_I42216 ? "C422p16 XYSCSS=422P16\n" + : "C420p16 XYSCSS=420P16\n"; break; - default: - color = NULL; - assert(0); + default: color = NULL; assert(0); } return snprintf(buf, len, "YUV4MPEG2 W%u H%u F%u:%u I%c %s", width, height, framerate->numerator, framerate->denominator, 'p', color); diff --git a/libs/libvpx/y4minput.c b/libs/libvpx/y4minput.c index 34ea96d9d5..acf7d69fe9 100644 --- a/libs/libvpx/y4minput.c +++ b/libs/libvpx/y4minput.c @@ -25,7 +25,7 @@ static int file_read(void *buf, size_t size, FILE *file) { int file_error; size_t len = 0; do { - const size_t n = fread((uint8_t*)buf + len, 1, size - len, file); + const size_t n = fread((uint8_t *)buf + len, 1, size - len, file); len += n; file_error = ferror(file); if (file_error) { @@ -41,83 +41,83 @@ static int file_read(void *buf, size_t size, FILE *file) { } while (!feof(file) && len < size && ++retry_count < kMaxRetries); if (!feof(file) && len != size) { - fprintf(stderr, "Error reading file: %u of %u bytes read," - " error: %d, retries: %d, %d: %s\n", - (uint32_t)len, (uint32_t)size, file_error, retry_count, - errno, strerror(errno)); + fprintf(stderr, + "Error reading file: %u of %u bytes read," + " error: %d, retries: %d, %d: %s\n", + (uint32_t)len, (uint32_t)size, file_error, retry_count, errno, + strerror(errno)); } return len == size; } static int y4m_parse_tags(y4m_input *_y4m, char *_tags) { - int got_w; - int got_h; - int got_fps; - int got_interlace; - int got_par; - int got_chroma; + int got_w; + int got_h; + int got_fps; + int got_interlace; + int got_par; + int got_chroma; char *p; char *q; got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0; for (p = _tags;; p = q) { /*Skip any leading spaces.*/ - while (*p == ' ')p++; + while (*p == ' ') p++; /*If that's all we have, stop.*/ - if (p[0] == '\0')break; + if (p[0] == '\0') break; /*Find the end of this tag.*/ - for (q = p + 1; *q != '\0' && *q != ' '; q++); + for (q = p + 1; *q != '\0' && *q != ' '; q++) { + } /*Process the tag.*/ switch (p[0]) { case 'W': { - if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1)return -1; + if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1) return -1; got_w = 1; + break; } - break; case 'H': { - if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1)return -1; + if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1) return -1; got_h = 1; + break; } - break; case 'F': { if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) { return -1; } got_fps = 1; + break; } - break; case 'I': { _y4m->interlace = p[1]; got_interlace = 1; + break; } - break; case 'A': { if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) { return -1; } got_par = 1; + break; } - break; case 'C': { - if (q - p > 16)return -1; + if (q - p > 16) return -1; memcpy(_y4m->chroma_type, p + 1, q - p - 1); _y4m->chroma_type[q - p - 1] = '\0'; got_chroma = 1; + break; } - break; - /*Ignore unknown tags.*/ + /*Ignore unknown tags.*/ } } - if (!got_w || !got_h || !got_fps)return -1; - if (!got_interlace)_y4m->interlace = '?'; - if (!got_par)_y4m->par_n = _y4m->par_d = 0; + if (!got_w || !got_h || !got_fps) return -1; + if (!got_interlace) _y4m->interlace = '?'; + if (!got_par) _y4m->par_n = _y4m->par_d = 0; /*Chroma-type is not specified in older files, e.g., those generated by mplayer.*/ - if (!got_chroma)strcpy(_y4m->chroma_type, "420"); + if (!got_chroma) strcpy(_y4m->chroma_type, "420"); return 0; } - - /*All anti-aliasing filters in the following conversion functions are based on one of two window functions: The 6-tap Lanczos window (for down-sampling and shifts): @@ -140,9 +140,9 @@ static int y4m_parse_tags(y4m_input *_y4m, char *_tags) { have these steps pipelined, for less memory consumption and better cache performance, but we do them separately for simplicity.*/ -#define OC_MINI(_a,_b) ((_a)>(_b)?(_b):(_a)) -#define OC_MAXI(_a,_b) ((_a)<(_b)?(_b):(_a)) -#define OC_CLAMPI(_a,_b,_c) (OC_MAXI(_a,OC_MINI(_b,_c))) +#define OC_MINI(_a, _b) ((_a) > (_b) ? (_b) : (_a)) +#define OC_MAXI(_a, _b) ((_a) < (_b) ? (_b) : (_a)) +#define OC_CLAMPI(_a, _b, _c) (OC_MAXI(_a, OC_MINI(_b, _c))) /*420jpeg chroma samples are sited like: Y-------Y-------Y-------Y------- @@ -186,25 +186,36 @@ static int y4m_parse_tags(y4m_input *_y4m, char *_tags) { lines, and they are vertically co-sited with the luma samples in both the mpeg2 and jpeg cases (thus requiring no vertical resampling).*/ static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst, - const unsigned char *_src, int _c_w, int _c_h) { + const unsigned char *_src, int _c_w, + int _c_h) { int y; int x; for (y = 0; y < _c_h; y++) { /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos window.*/ for (x = 0; x < OC_MINI(_c_w, 2); x++) { - _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] + - 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] + - _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> 7, 255); + _dst[x] = (unsigned char)OC_CLAMPI( + 0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] + 114 * _src[x] + + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - + 9 * _src[OC_MINI(x + 2, _c_w - 1)] + + _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> + 7, + 255); } for (; x < _c_w - 3; x++) { - _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] + - 114 * _src[x] + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> 7, 255); + _dst[x] = (unsigned char)OC_CLAMPI( + 0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] + + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> + 7, + 255); } for (; x < _c_w; x++) { - _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] + - 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] + - _src[_c_w - 1] + 64) >> 7, 255); + _dst[x] = (unsigned char)OC_CLAMPI( + 0, (4 * _src[x - 2] - 17 * _src[x - 1] + 114 * _src[x] + + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - + 9 * _src[OC_MINI(x + 2, _c_w - 1)] + _src[_c_w - 1] + 64) >> + 7, + 255); } _dst += _c_w; _src += _c_w; @@ -277,12 +288,12 @@ static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst, static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst, unsigned char *_aux) { unsigned char *tmp; - int c_w; - int c_h; - int c_sz; - int pli; - int y; - int x; + int c_w; + int c_h; + int c_sz; + int pli; + int y; + int x; /*Skip past the luma data.*/ _dst += _y4m->pic_w * _y4m->pic_h; /*Compute the size of each chroma plane.*/ @@ -302,53 +313,75 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst, This is the same filter used above, but in the other order.*/ for (x = 0; x < c_w; x++) { for (y = 0; y < OC_MINI(c_h, 3); y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[0] - - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + 35 * tmp[OC_MAXI(y - 1, 0) * c_w] - + 114 * tmp[y * c_w] - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, (tmp[0] - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + + 35 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w] - + 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> + 7, + 255); } for (; y < c_h - 2; y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w] - - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] - - 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] + + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] - + 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> + 7, + 255); } for (; y < c_h; y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w] - - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] - - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + 4 * tmp[(c_h - 1) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, (tmp[(y - 3) * c_w] - 9 * tmp[(y - 2) * c_w] + + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] - + 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + + 4 * tmp[(c_h - 1) * c_w] + 64) >> + 7, + 255); } _dst++; tmp++; } _dst += c_sz - c_w; tmp -= c_w; + break; } - break; case 2: { /*Slide C_r down a quarter-pel. This is the same as the horizontal filter.*/ for (x = 0; x < c_w; x++) { for (y = 0; y < OC_MINI(c_h, 2); y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[0] - - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w] - + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] - + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, + (4 * tmp[0] - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - + 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> + 7, + 255); } for (; y < c_h - 3; y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w] - - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w] - - 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] + + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w] - + 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> + 7, + 255); } for (; y < c_h; y++) { - _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w] - - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + 64) >> 7, 255); + _dst[y * c_w] = (unsigned char)OC_CLAMPI( + 0, + (4 * tmp[(y - 2) * c_w] - 17 * tmp[(y - 1) * c_w] + + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - + 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + + 64) >> + 7, + 255); } _dst++; tmp++; } + break; } - break; } /*For actual interlaced material, this would have to be done separately on each field, and the shift amounts would be different. @@ -363,27 +396,37 @@ static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst, /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0. This is used as a helper by several converation routines.*/ static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst, - const unsigned char *_src, int _c_w, int _c_h) { + const unsigned char *_src, int _c_w, + int _c_h) { int y; int x; /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/ for (x = 0; x < _c_w; x++) { for (y = 0; y < OC_MINI(_c_h, 2); y += 2) { - _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (64 * _src[0] - + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w] - - 17 * _src[OC_MINI(2, _c_h - 1) * _c_w] - + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> 7, 255); + _dst[(y >> 1) * _c_w] = + OC_CLAMPI(0, (64 * _src[0] + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w] - + 17 * _src[OC_MINI(2, _c_h - 1) * _c_w] + + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> + 7, + 255); } for (; y < _c_h - 3; y += 2) { - _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w]) - - 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w]) - + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> 7, 255); + _dst[(y >> 1) * _c_w] = + OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w]) - + 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w]) + + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> + 7, + 255); } for (; y < _c_h; y += 2) { - _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] - + _src[(_c_h - 1) * _c_w]) - 17 * (_src[(y - 1) * _c_w] - + _src[OC_MINI(y + 2, _c_h - 1) * _c_w]) - + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + 64) >> 7, 255); + _dst[(y >> 1) * _c_w] = OC_CLAMPI( + 0, + (3 * (_src[(y - 2) * _c_w] + _src[(_c_h - 1) * _c_w]) - + 17 * (_src[(y - 1) * _c_w] + _src[OC_MINI(y + 2, _c_h - 1) * _c_w]) + + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + + 64) >> + 7, + 255); } _src++; _dst++; @@ -496,12 +539,12 @@ static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst, static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst, unsigned char *_aux) { unsigned char *tmp; - int c_w; - int c_h; - int c_sz; - int dst_c_h; - int dst_c_sz; - int pli; + int c_w; + int c_h; + int c_sz; + int dst_c_h; + int dst_c_sz; + int pli; /*Skip past the luma data.*/ _dst += _y4m->pic_w * _y4m->pic_h; /*Compute the size of each chroma plane.*/ @@ -568,16 +611,16 @@ static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst, static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst, unsigned char *_aux) { unsigned char *tmp; - int c_w; - int c_h; - int c_sz; - int dst_c_w; - int dst_c_h; - int dst_c_sz; - int tmp_sz; - int pli; - int y; - int x; + int c_w; + int c_h; + int c_sz; + int dst_c_w; + int dst_c_h; + int dst_c_sz; + int tmp_sz; + int pli; + int y; + int x; /*Skip past the luma data.*/ _dst += _y4m->pic_w * _y4m->pic_h; /*Compute the size of each chroma plane.*/ @@ -598,23 +641,42 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst, /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a 4-tap Mitchell window.*/ for (x = 0; x < OC_MINI(c_w, 1); x++) { - tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (111 * _aux[0] - + 18 * _aux[OC_MINI(1, c_w - 1)] - _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255); - tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (47 * _aux[0] - + 86 * _aux[OC_MINI(1, c_w - 1)] - 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255); + tmp[x << 1] = (unsigned char)OC_CLAMPI( + 0, (111 * _aux[0] + 18 * _aux[OC_MINI(1, c_w - 1)] - + _aux[OC_MINI(2, c_w - 1)] + 64) >> + 7, + 255); + tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI( + 0, (47 * _aux[0] + 86 * _aux[OC_MINI(1, c_w - 1)] - + 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> + 7, + 255); } for (; x < c_w - 2; x++) { - tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x] - + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> 7, 255); - tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x] - + 86 * _aux[x + 1] - 5 * _aux[x + 2] + 64) >> 7, 255); + tmp[x << 1] = + (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x] + + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> + 7, + 255); + tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI( + 0, (-3 * _aux[x - 1] + 50 * _aux[x] + 86 * _aux[x + 1] - + 5 * _aux[x + 2] + 64) >> + 7, + 255); } for (; x < c_w; x++) { - tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x] - + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> 7, 255); + tmp[x << 1] = (unsigned char)OC_CLAMPI( + 0, (_aux[x - 1] + 110 * _aux[x] + + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> + 7, + 255); if ((x << 1 | 1) < dst_c_w) { - tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x] - + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> 7, 255); + tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI( + 0, + (-3 * _aux[x - 1] + 50 * _aux[x] + + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> + 7, + 255); } } tmp += dst_c_w; @@ -631,16 +693,16 @@ static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst, static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst, unsigned char *_aux) { unsigned char *tmp; - int c_w; - int c_h; - int c_sz; - int dst_c_w; - int dst_c_h; - int dst_c_sz; - int tmp_sz; - int pli; - int y; - int x; + int c_w; + int c_h; + int c_sz; + int dst_c_w; + int dst_c_h; + int dst_c_sz; + int tmp_sz; + int pli; + int y; + int x; /*Skip past the luma data.*/ _dst += _y4m->pic_w * _y4m->pic_h; /*Compute the size of each chroma plane.*/ @@ -656,18 +718,27 @@ static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst, /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/ for (y = 0; y < c_h; y++) { for (x = 0; x < OC_MINI(c_w, 2); x += 2) { - tmp[x >> 1] = OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)] - - 17 * _aux[OC_MINI(2, c_w - 1)] - + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> 7, 255); + tmp[x >> 1] = + OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)] - + 17 * _aux[OC_MINI(2, c_w - 1)] + + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> + 7, + 255); } for (; x < c_w - 3; x += 2) { - tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3]) - - 17 * (_aux[x - 1] + _aux[x + 2]) + 78 * (_aux[x] + _aux[x + 1]) + 64) >> 7, 255); + tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3]) - + 17 * (_aux[x - 1] + _aux[x + 2]) + + 78 * (_aux[x] + _aux[x + 1]) + 64) >> + 7, + 255); } for (; x < c_w; x += 2) { - tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[c_w - 1]) - - 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) + - 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> 7, 255); + tmp[x >> 1] = OC_CLAMPI( + 0, (3 * (_aux[x - 2] + _aux[c_w - 1]) - + 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) + + 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> + 7, + 255); } tmp += dst_c_w; _aux += c_w; @@ -700,9 +771,9 @@ static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst, int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, int only_420) { - char buffer[80] = {0}; - int ret; - int i; + char buffer[80] = { 0 }; + int ret; + int i; /*Read until newline, or 80 cols, whichever happens first.*/ for (i = 0; i < 79; i++) { if (_nskip > 0) { @@ -711,10 +782,10 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, } else { if (!file_read(buffer + i, 1, _fin)) return -1; } - if (buffer[i] == '\n')break; + if (buffer[i] == '\n') break; } /*We skipped too much header data.*/ - if (_nskip > 0)return -1; + if (_nskip > 0) return -1; if (i == 79) { fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n"); return -1; @@ -733,10 +804,12 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, return ret; } if (_y4m->interlace == '?') { - fprintf(stderr, "Warning: Input video interlacing format unknown; " + fprintf(stderr, + "Warning: Input video interlacing format unknown; " "assuming progressive scan.\n"); } else if (_y4m->interlace != 'p') { - fprintf(stderr, "Input video is interlaced; " + fprintf(stderr, + "Input video is interlaced; " "Only progressive scan handled.\n"); return -1; } @@ -745,9 +818,11 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, _y4m->bit_depth = 8; if (strcmp(_y4m->chroma_type, "420") == 0 || strcmp(_y4m->chroma_type, "420jpeg") == 0) { - _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2; - _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h - + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); + _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = + _y4m->dst_c_dec_v = 2; + _y4m->dst_buf_read_sz = + _y4m->pic_w * _y4m->pic_h + + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); /* Natively supported: no conversion required. */ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0; _y4m->convert = y4m_convert_null; @@ -756,9 +831,9 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, _y4m->dst_c_dec_h = 2; _y4m->src_c_dec_v = 2; _y4m->dst_c_dec_v = 2; - _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h + - 2 * ((_y4m->pic_w + 1) / 2) * - ((_y4m->pic_h + 1) / 2)); + _y4m->dst_buf_read_sz = + 2 * (_y4m->pic_w * _y4m->pic_h + + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2)); /* Natively supported: no conversion required. */ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0; _y4m->convert = y4m_convert_null; @@ -774,9 +849,9 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, _y4m->dst_c_dec_h = 2; _y4m->src_c_dec_v = 2; _y4m->dst_c_dec_v = 2; - _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h + - 2 * ((_y4m->pic_w + 1) / 2) * - ((_y4m->pic_h + 1) / 2)); + _y4m->dst_buf_read_sz = + 2 * (_y4m->pic_w * _y4m->pic_h + + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2)); /* Natively supported: no conversion required. */ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0; _y4m->convert = y4m_convert_null; @@ -788,20 +863,23 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, return -1; } } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) { - _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2; + _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = + _y4m->dst_c_dec_v = 2; _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h; /*Chroma filter required: read into the aux buf first.*/ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = - 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); _y4m->convert = y4m_convert_42xmpeg2_42xjpeg; } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) { - _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2; + _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = + _y4m->dst_c_dec_v = 2; _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h; /*Chroma filter required: read into the aux buf first. We need to make two filter passes, so we need some extra space in the aux buffer.*/ _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); - _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); + _y4m->aux_buf_read_sz = + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2); _y4m->convert = y4m_convert_42xpaldv_42xjpeg; } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) { _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2; @@ -809,7 +887,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, _y4m->dst_c_dec_v = 2; _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h; /*Chroma filter required: read into the aux buf first.*/ - _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; + _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; _y4m->convert = y4m_convert_422jpeg_420jpeg; } else if (strcmp(_y4m->chroma_type, "422") == 0) { _y4m->src_c_dec_h = 2; @@ -822,16 +901,16 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, We need to make two filter passes, so we need some extra space in the aux buffer.*/ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; - _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + - ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; + _y4m->aux_buf_sz = + _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; _y4m->convert = y4m_convert_422_420jpeg; } else { _y4m->vpx_fmt = VPX_IMG_FMT_I422; _y4m->bps = 16; _y4m->dst_c_dec_h = _y4m->src_c_dec_h; _y4m->dst_c_dec_v = _y4m->src_c_dec_v; - _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h - + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; + _y4m->dst_buf_read_sz = + _y4m->pic_w * _y4m->pic_h + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; /*Natively supported: no conversion required.*/ _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0; _y4m->convert = y4m_convert_null; @@ -878,7 +957,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, We need to make two filter passes, so we need some extra space in the aux buffer.*/ _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h; - _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; + _y4m->aux_buf_sz = + _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; _y4m->convert = y4m_convert_411_420jpeg; } else if (strcmp(_y4m->chroma_type, "444") == 0) { _y4m->src_c_dec_h = 1; @@ -891,8 +971,8 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, We need to make two filter passes, so we need some extra space in the aux buffer.*/ _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h; - _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + - ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; + _y4m->aux_buf_sz = + _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h; _y4m->convert = y4m_convert_444_420jpeg; } else { _y4m->vpx_fmt = VPX_IMG_FMT_I444; @@ -971,9 +1051,10 @@ int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip, } /*The size of the final frame buffers is always computed from the destination chroma decimation type.*/ - _y4m->dst_buf_sz = _y4m->pic_w * _y4m->pic_h - + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) * - ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v); + _y4m->dst_buf_sz = + _y4m->pic_w * _y4m->pic_h + + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) * + ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v); if (_y4m->bit_depth == 8) _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz); else @@ -991,11 +1072,11 @@ void y4m_input_close(y4m_input *_y4m) { int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) { char frame[6]; - int pic_sz; - int c_w; - int c_h; - int c_sz; - int bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1; + int pic_sz; + int c_w; + int c_h; + int c_sz; + int bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1; /*Read and skip the frame header.*/ if (!file_read(frame, 6, _fin)) return 0; if (memcmp(frame, "FRAME", 5)) { @@ -1004,8 +1085,9 @@ int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) { } if (frame[5] != '\n') { char c; - int j; - for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) {} + int j; + for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) { + } if (j == 79) { fprintf(stderr, "Error parsing Y4M frame header\n"); return -1; diff --git a/libs/libvpx/y4minput.h b/libs/libvpx/y4minput.h index 356cebbcf0..9e69ceb835 100644 --- a/libs/libvpx/y4minput.h +++ b/libs/libvpx/y4minput.h @@ -14,52 +14,46 @@ #ifndef Y4MINPUT_H_ #define Y4MINPUT_H_ -# include -# include "vpx/vpx_image.h" +#include +#include "vpx/vpx_image.h" #ifdef __cplusplus extern "C" { #endif - - typedef struct y4m_input y4m_input; - - /*The function used to perform chroma conversion.*/ -typedef void (*y4m_convert_func)(y4m_input *_y4m, - unsigned char *_dst, unsigned char *_src); - - +typedef void (*y4m_convert_func)(y4m_input *_y4m, unsigned char *_dst, + unsigned char *_src); struct y4m_input { - int pic_w; - int pic_h; - int fps_n; - int fps_d; - int par_n; - int par_d; - char interlace; - int src_c_dec_h; - int src_c_dec_v; - int dst_c_dec_h; - int dst_c_dec_v; - char chroma_type[16]; + int pic_w; + int pic_h; + int fps_n; + int fps_d; + int par_n; + int par_d; + char interlace; + int src_c_dec_h; + int src_c_dec_v; + int dst_c_dec_h; + int dst_c_dec_v; + char chroma_type[16]; /*The size of each converted frame buffer.*/ - size_t dst_buf_sz; + size_t dst_buf_sz; /*The amount to read directly into the converted frame buffer.*/ - size_t dst_buf_read_sz; + size_t dst_buf_read_sz; /*The size of the auxilliary buffer.*/ - size_t aux_buf_sz; + size_t aux_buf_sz; /*The amount to read into the auxilliary buffer.*/ - size_t aux_buf_read_sz; - y4m_convert_func convert; - unsigned char *dst_buf; - unsigned char *aux_buf; - enum vpx_img_fmt vpx_fmt; - int bps; - unsigned int bit_depth; + size_t aux_buf_read_sz; + y4m_convert_func convert; + unsigned char *dst_buf; + unsigned char *aux_buf; + enum vpx_img_fmt vpx_fmt; + int bps; + unsigned int bit_depth; }; int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,